summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/android.txt121
-rw-r--r--Documentation/cgroups/cgroups.txt9
-rw-r--r--Documentation/cgroups/cpuacct.txt7
-rw-r--r--Documentation/cpu-freq/governors.txt38
-rw-r--r--Documentation/power/runtime_pm.txt1
-rw-r--r--Documentation/trace/tracedump.txt58
-rw-r--r--Documentation/trace/tracelevel.txt42
-rw-r--r--Documentation/video/tegra_dc_ext.txt83
-rw-r--r--arch/arm/Kconfig34
-rw-r--r--arch/arm/boot/compressed/head.S3
-rw-r--r--arch/arm/common/Kconfig59
-rw-r--r--arch/arm/common/Makefile2
-rw-r--r--arch/arm/common/fiq_debugger.c1196
-rw-r--r--arch/arm/common/fiq_debugger_ringbuf.h94
-rw-r--r--arch/arm/common/fiq_glue.S111
-rw-r--r--arch/arm/common/fiq_glue_setup.c155
-rw-r--r--arch/arm/common/gic.c205
-rw-r--r--arch/arm/configs/tegra3_android_defconfig409
-rw-r--r--arch/arm/configs/tegra3_defconfig428
-rw-r--r--arch/arm/configs/tegra_android_defconfig374
-rw-r--r--arch/arm/configs/tegra_aruba2_android_defconfig311
-rw-r--r--arch/arm/configs/tegra_cardhu_mods_defconfig149
-rw-r--r--arch/arm/configs/tegra_defconfig311
-rw-r--r--arch/arm/configs/tegra_p852_gnu_linux_defconfig303
-rw-r--r--arch/arm/include/asm/cacheflush.h51
-rw-r--r--arch/arm/include/asm/cpu_pm.h123
-rw-r--r--arch/arm/include/asm/delay.h4
-rw-r--r--arch/arm/include/asm/elf.h1
-rw-r--r--arch/arm/include/asm/fiq_debugger.h64
-rw-r--r--arch/arm/include/asm/fiq_glue.h30
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h6
-rw-r--r--arch/arm/include/asm/hardware/coresight.h26
-rw-r--r--arch/arm/include/asm/hardware/gic.h9
-rw-r--r--arch/arm/include/asm/irq.h3
-rw-r--r--arch/arm/include/asm/mach/mmc.h28
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/pgalloc.h4
-rw-r--r--arch/arm/include/asm/pgtable.h25
-rw-r--r--arch/arm/include/asm/sizes.h3
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/cpu_pm.c116
-rw-r--r--arch/arm/kernel/debug.S10
-rw-r--r--arch/arm/kernel/elf.c17
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/etm.c473
-rw-r--r--arch/arm/kernel/leds.c27
-rw-r--r--arch/arm/kernel/module.c1
-rw-r--r--arch/arm/kernel/process.c120
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/sleep.S1
-rw-r--r--arch/arm/kernel/smp.c78
-rw-r--r--arch/arm/kernel/smp_twd.c86
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/lib/Makefile6
-rw-r--r--arch/arm/mach-pxa/pm.c1
-rw-r--r--arch/arm/mach-sa1100/pm.c2
-rw-r--r--arch/arm/mach-tegra/Kconfig369
-rw-r--r--arch/arm/mach-tegra/Makefile163
-rw-r--r--arch/arm/mach-tegra/Makefile.boot4
-rw-r--r--arch/arm/mach-tegra/ahb.c218
-rw-r--r--arch/arm/mach-tegra/apbio.c158
-rw-r--r--arch/arm/mach-tegra/apbio.h19
-rw-r--r--arch/arm/mach-tegra/arb_sema.c243
-rw-r--r--arch/arm/mach-tegra/asm_macros.h72
-rw-r--r--arch/arm/mach-tegra/baseband-xmm-power.c891
-rw-r--r--arch/arm/mach-tegra/baseband-xmm-power.h111
-rw-r--r--arch/arm/mach-tegra/baseband-xmm-power2.c680
-rw-r--r--arch/arm/mach-tegra/board-aruba-panel.c253
-rw-r--r--arch/arm/mach-tegra/board-aruba-pinmux.c307
-rw-r--r--arch/arm/mach-tegra/board-aruba-power.c76
-rw-r--r--arch/arm/mach-tegra/board-aruba-sdhci.c248
-rw-r--r--arch/arm/mach-tegra/board-aruba-sensors.c109
-rw-r--r--arch/arm/mach-tegra/board-aruba.c544
-rw-r--r--arch/arm/mach-tegra/board-aruba.h26
-rw-r--r--arch/arm/mach-tegra/board-cardhu-kbc.c280
-rw-r--r--arch/arm/mach-tegra/board-cardhu-memory.c4390
-rw-r--r--arch/arm/mach-tegra/board-cardhu-panel.c1244
-rw-r--r--arch/arm/mach-tegra/board-cardhu-pinmux.c795
-rw-r--r--arch/arm/mach-tegra/board-cardhu-pm298-power-rails.c758
-rw-r--r--arch/arm/mach-tegra/board-cardhu-pm299-power-rails.c748
-rw-r--r--arch/arm/mach-tegra/board-cardhu-power.c1252
-rw-r--r--arch/arm/mach-tegra/board-cardhu-powermon.c256
-rw-r--r--arch/arm/mach-tegra/board-cardhu-sdhci.c300
-rw-r--r--arch/arm/mach-tegra/board-cardhu-sensors.c938
-rw-r--r--arch/arm/mach-tegra/board-cardhu.c1045
-rw-r--r--arch/arm/mach-tegra/board-cardhu.h254
-rw-r--r--arch/arm/mach-tegra/board-enterprise-baseband.c246
-rw-r--r--arch/arm/mach-tegra/board-enterprise-kbc.c107
-rw-r--r--arch/arm/mach-tegra/board-enterprise-memory.c629
-rw-r--r--arch/arm/mach-tegra/board-enterprise-panel.c822
-rw-r--r--arch/arm/mach-tegra/board-enterprise-pinmux.c539
-rw-r--r--arch/arm/mach-tegra/board-enterprise-power.c615
-rw-r--r--arch/arm/mach-tegra/board-enterprise-sdhci.c269
-rw-r--r--arch/arm/mach-tegra/board-enterprise-sensors.c664
-rw-r--r--arch/arm/mach-tegra/board-enterprise.c1000
-rw-r--r--arch/arm/mach-tegra/board-enterprise.h158
-rw-r--r--arch/arm/mach-tegra/board-harmony-kbc.c375
-rw-r--r--arch/arm/mach-tegra/board-harmony-panel.c273
-rw-r--r--arch/arm/mach-tegra/board-harmony-pcie.c19
-rw-r--r--arch/arm/mach-tegra/board-harmony-pinmux.c33
-rw-r--r--arch/arm/mach-tegra/board-harmony-power.c265
-rw-r--r--arch/arm/mach-tegra/board-harmony.c375
-rw-r--r--arch/arm/mach-tegra/board-harmony.h15
-rw-r--r--arch/arm/mach-tegra/board-seaboard-pinmux.c1
-rw-r--r--arch/arm/mach-tegra/board-trimslice.c19
-rw-r--r--arch/arm/mach-tegra/board-ventana-memory.c592
-rw-r--r--arch/arm/mach-tegra/board-ventana-panel.c444
-rw-r--r--arch/arm/mach-tegra/board-ventana-pinmux.c194
-rw-r--r--arch/arm/mach-tegra/board-ventana-power.c320
-rw-r--r--arch/arm/mach-tegra/board-ventana-sdhci.c266
-rw-r--r--arch/arm/mach-tegra/board-ventana-sensors.c573
-rw-r--r--arch/arm/mach-tegra/board-ventana.c653
-rw-r--r--arch/arm/mach-tegra/board-ventana.h89
-rw-r--r--arch/arm/mach-tegra/board-whistler-baseband.c230
-rw-r--r--arch/arm/mach-tegra/board-whistler-baseband.h81
-rw-r--r--arch/arm/mach-tegra/board-whistler-kbc.c138
-rw-r--r--arch/arm/mach-tegra/board-whistler-memory.c569
-rw-r--r--arch/arm/mach-tegra/board-whistler-panel.c390
-rw-r--r--arch/arm/mach-tegra/board-whistler-pinmux.c177
-rw-r--r--arch/arm/mach-tegra/board-whistler-power.c276
-rw-r--r--arch/arm/mach-tegra/board-whistler-sdhci.c248
-rw-r--r--arch/arm/mach-tegra/board-whistler-sensors.c406
-rw-r--r--arch/arm/mach-tegra/board-whistler.c612
-rw-r--r--arch/arm/mach-tegra/board-whistler.h39
-rw-r--r--arch/arm/mach-tegra/board.h85
-rw-r--r--arch/arm/mach-tegra/clock.c833
-rw-r--r--arch/arm/mach-tegra/clock.h148
-rw-r--r--arch/arm/mach-tegra/common-t2.c192
-rw-r--r--arch/arm/mach-tegra/common-t3.c268
-rw-r--r--arch/arm/mach-tegra/common.c978
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c566
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.h79
-rw-r--r--arch/arm/mach-tegra/cpu-tegra3.c490
-rw-r--r--arch/arm/mach-tegra/cpuidle-t2.c413
-rw-r--r--arch/arm/mach-tegra/cpuidle-t3.c446
-rw-r--r--arch/arm/mach-tegra/cpuidle.c315
-rw-r--r--arch/arm/mach-tegra/cpuidle.h120
-rw-r--r--arch/arm/mach-tegra/csi.c84
-rw-r--r--arch/arm/mach-tegra/delay.S52
-rw-r--r--arch/arm/mach-tegra/devices.c1078
-rw-r--r--arch/arm/mach-tegra/devices.h75
-rw-r--r--arch/arm/mach-tegra/dma.c527
-rw-r--r--arch/arm/mach-tegra/dvfs.c806
-rw-r--r--arch/arm/mach-tegra/dvfs.h165
-rw-r--r--arch/arm/mach-tegra/edp.c387
-rw-r--r--arch/arm/mach-tegra/fiq.c99
-rw-r--r--arch/arm/mach-tegra/fuse.c413
-rw-r--r--arch/arm/mach-tegra/fuse.h43
-rw-r--r--arch/arm/mach-tegra/gic.c129
-rw-r--r--arch/arm/mach-tegra/gic.h49
-rw-r--r--arch/arm/mach-tegra/gpio-names.h24
-rw-r--r--arch/arm/mach-tegra/headsmp.S358
-rw-r--r--arch/arm/mach-tegra/hotplug.c144
-rw-r--r--arch/arm/mach-tegra/i2c_error_recovery.c103
-rw-r--r--arch/arm/mach-tegra/include/mach/arb_sema.h35
-rw-r--r--arch/arm/mach-tegra/include/mach/audio.h57
-rw-r--r--arch/arm/mach-tegra/include/mach/clk.h38
-rw-r--r--arch/arm/mach-tegra/include/mach/csi.h38
-rw-r--r--arch/arm/mach-tegra/include/mach/dc.h558
-rw-r--r--arch/arm/mach-tegra/include/mach/delay.h41
-rw-r--r--arch/arm/mach-tegra/include/mach/dma.h31
-rw-r--r--arch/arm/mach-tegra/include/mach/edp.h80
-rw-r--r--arch/arm/mach-tegra/include/mach/entry-macro.S23
-rw-r--r--arch/arm/mach-tegra/include/mach/fb.h61
-rw-r--r--arch/arm/mach-tegra/include/mach/fiq.h25
-rw-r--r--arch/arm/mach-tegra/include/mach/gpio.h26
-rw-r--r--arch/arm/mach-tegra/include/mach/gpufuse.h19
-rw-r--r--arch/arm/mach-tegra/include/mach/hardware.h52
-rw-r--r--arch/arm/mach-tegra/include/mach/hdmi-audio.h46
-rw-r--r--arch/arm/mach-tegra/include/mach/i2s.h316
-rw-r--r--arch/arm/mach-tegra/include/mach/io.h39
-rw-r--r--arch/arm/mach-tegra/include/mach/io_dpd.h25
-rw-r--r--arch/arm/mach-tegra/include/mach/iomap.h265
-rw-r--r--arch/arm/mach-tegra/include/mach/iovmm.h323
-rw-r--r--arch/arm/mach-tegra/include/mach/irqs.h219
-rw-r--r--arch/arm/mach-tegra/include/mach/kbc.h36
-rw-r--r--arch/arm/mach-tegra/include/mach/kfuse.h20
-rw-r--r--arch/arm/mach-tegra/include/mach/latency_allowance.h121
-rw-r--r--arch/arm/mach-tegra/include/mach/legacy_irq.h23
-rw-r--r--arch/arm/mach-tegra/include/mach/mc.h109
-rw-r--r--arch/arm/mach-tegra/include/mach/memory.h15
-rw-r--r--arch/arm/mach-tegra/include/mach/nand.h55
-rw-r--r--arch/arm/mach-tegra/include/mach/nvmap.h152
-rw-r--r--arch/arm/mach-tegra/include/mach/pci.h38
-rw-r--r--arch/arm/mach-tegra/include/mach/pinmux-t3.h321
-rw-r--r--arch/arm/mach-tegra/include/mach/pinmux.h277
-rw-r--r--arch/arm/mach-tegra/include/mach/powergate.h65
-rw-r--r--arch/arm/mach-tegra/include/mach/sdhci.h4
-rw-r--r--arch/arm/mach-tegra/include/mach/spdif.h392
-rw-r--r--arch/arm/mach-tegra/include/mach/spi.h42
-rw-r--r--arch/arm/mach-tegra/include/mach/suspend.h38
-rw-r--r--arch/arm/mach-tegra/include/mach/system.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra-bb-power.h61
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_aic326x_pdata.h39
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_dc_ext.h77
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_fb.h27
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h30
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_fuse.h27
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_max98088_pdata.h35
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_odm_fuses.h107
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_usb_modem_power.h47
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra_wm8753_pdata.h24
-rw-r--r--arch/arm/mach-tegra/include/mach/thermal.h62
-rw-r--r--arch/arm/mach-tegra/include/mach/tsensor.h61
-rw-r--r--arch/arm/mach-tegra/include/mach/uncompress.h76
-rw-r--r--arch/arm/mach-tegra/include/mach/usb_phy.h90
-rw-r--r--arch/arm/mach-tegra/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/w1.h84
-rw-r--r--arch/arm/mach-tegra/io.c44
-rw-r--r--arch/arm/mach-tegra/iovmm-gart.c346
-rw-r--r--arch/arm/mach-tegra/iovmm-smmu.c1351
-rw-r--r--arch/arm/mach-tegra/iovmm.c950
-rw-r--r--arch/arm/mach-tegra/irq.c95
-rw-r--r--arch/arm/mach-tegra/kfuse.c114
-rw-r--r--arch/arm/mach-tegra/latency_allowance.c593
-rw-r--r--arch/arm/mach-tegra/mc.c73
-rw-r--r--arch/arm/mach-tegra/p852/Kconfig110
-rw-r--r--arch/arm/mach-tegra/p852/Makefile39
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-gpio.c158
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-i2c.c180
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-panel.c191
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-pinmux.c439
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-power.c225
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sdhci.c199
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku1-b00.c98
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku1-c0x.c98
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku1.c89
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku13-b00.c114
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku13.c112
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku23-b00.c115
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku23-c01.c87
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku23.c113
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku3.c103
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku5-b00.c115
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku5-c01.c93
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku8-b00.c88
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku8-c01.c87
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku9-b00.c93
-rw-r--r--arch/arm/mach-tegra/p852/board-p852-sku9-c01.c92
-rw-r--r--arch/arm/mach-tegra/p852/board-p852.c765
-rw-r--r--arch/arm/mach-tegra/p852/board-p852.h301
-rw-r--r--arch/arm/mach-tegra/pcie.c967
-rw-r--r--arch/arm/mach-tegra/pinmux-t2-tables.c392
-rw-r--r--arch/arm/mach-tegra/pinmux-t3-tables.c478
-rw-r--r--arch/arm/mach-tegra/pinmux.c400
-rw-r--r--arch/arm/mach-tegra/platsmp.c274
-rw-r--r--arch/arm/mach-tegra/pm-irq.c366
-rw-r--r--arch/arm/mach-tegra/pm-irq.h33
-rw-r--r--arch/arm/mach-tegra/pm-t2.c376
-rw-r--r--arch/arm/mach-tegra/pm-t3.c522
-rw-r--r--arch/arm/mach-tegra/pm.c1244
-rw-r--r--arch/arm/mach-tegra/pm.h215
-rw-r--r--arch/arm/mach-tegra/powerdetect.c348
-rw-r--r--arch/arm/mach-tegra/powergate.c720
-rw-r--r--arch/arm/mach-tegra/pwm.c296
-rw-r--r--arch/arm/mach-tegra/reset.c116
-rw-r--r--arch/arm/mach-tegra/reset.h70
-rw-r--r--arch/arm/mach-tegra/sleep-t2.S569
-rw-r--r--arch/arm/mach-tegra/sleep-t3.S713
-rw-r--r--arch/arm/mach-tegra/sleep.S491
-rw-r--r--arch/arm/mach-tegra/sleep.h246
-rw-r--r--arch/arm/mach-tegra/syncpt.c100
-rw-r--r--arch/arm/mach-tegra/sysfs-cluster.c461
-rw-r--r--arch/arm/mach-tegra/sysfs-dcc.c249
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c801
-rw-r--r--arch/arm/mach-tegra/tegra2_dvfs.c357
-rw-r--r--arch/arm/mach-tegra/tegra2_emc.c106
-rw-r--r--arch/arm/mach-tegra/tegra2_emc.h15
-rw-r--r--arch/arm/mach-tegra/tegra2_mc.c1017
-rw-r--r--arch/arm/mach-tegra/tegra2_mc.h250
-rw-r--r--arch/arm/mach-tegra/tegra2_speedo.c140
-rw-r--r--arch/arm/mach-tegra/tegra2_statmon.c440
-rw-r--r--arch/arm/mach-tegra/tegra2_statmon.h33
-rw-r--r--arch/arm/mach-tegra/tegra2_throttle.c180
-rw-r--r--arch/arm/mach-tegra/tegra3_actmon.c848
-rw-r--r--arch/arm/mach-tegra/tegra3_clocks.c4751
-rw-r--r--arch/arm/mach-tegra/tegra3_dvfs.c893
-rw-r--r--arch/arm/mach-tegra/tegra3_emc.c1069
-rw-r--r--arch/arm/mach-tegra/tegra3_emc.h279
-rw-r--r--arch/arm/mach-tegra/tegra3_speedo.c404
-rw-r--r--arch/arm/mach-tegra/tegra3_thermal.c544
-rw-r--r--arch/arm/mach-tegra/tegra3_throttle.c367
-rw-r--r--arch/arm/mach-tegra/tegra3_tsensor.c194
-rw-r--r--arch/arm/mach-tegra/tegra_fiq_debugger.c206
-rw-r--r--arch/arm/mach-tegra/tegra_i2s_audio.c1965
-rw-r--r--arch/arm/mach-tegra/tegra_odm_fuses.c951
-rw-r--r--arch/arm/mach-tegra/tegra_smmu.h24
-rw-r--r--arch/arm/mach-tegra/tegra_spdif_audio.c1187
-rw-r--r--arch/arm/mach-tegra/tegra_usb_modem_power.c290
-rw-r--r--arch/arm/mach-tegra/timer-t2.c128
-rw-r--r--arch/arm/mach-tegra/timer-t3.c288
-rw-r--r--arch/arm/mach-tegra/timer.c138
-rw-r--r--arch/arm/mach-tegra/timer.h51
-rw-r--r--arch/arm/mach-tegra/usb_phy.c2373
-rw-r--r--arch/arm/mach-tegra/wakeups-t2.c111
-rw-r--r--arch/arm/mach-tegra/wakeups-t2.h65
-rw-r--r--arch/arm/mach-tegra/wakeups-t3.c122
-rw-r--r--arch/arm/mach-tegra/wakeups-t3.h71
-rw-r--r--arch/arm/mach-tegra/wdt-recovery.c131
-rw-r--r--arch/arm/mach-tegra/wdt-recovery.h17
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/cache-l2x0.c84
-rw-r--r--arch/arm/mm/cache-v6.S17
-rw-r--r--arch/arm/mm/cache-v7.S55
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/mmu.c12
-rw-r--r--arch/arm/mm/pageattr.c998
-rw-r--r--arch/arm/mm/pgd.c32
-rw-r--r--arch/arm/mm/proc-macros.S2
-rw-r--r--arch/arm/mm/proc-v7.S208
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/plat-samsung/pm.c4
-rw-r--r--arch/arm/tools/mach-types2
-rw-r--r--arch/arm/vfp/entry.S3
-rw-r--r--arch/arm/vfp/vfphw.S43
-rw-r--r--arch/arm/vfp/vfpmodule.c156
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/x86/include/asm/idle.h7
-rw-r--r--arch/x86/kernel/process_64.c18
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--block/genhd.c17
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/testmgr.c10
-rw-r--r--crypto/testmgr.h213
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/base/power/main.c45
-rw-r--r--drivers/base/power/runtime.c14
-rw-r--r--drivers/base/regmap/regmap.c36
-rw-r--r--drivers/bluetooth/Kconfig10
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/bluesleep.c864
-rw-r--r--drivers/char/Kconfig17
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/dcc_tty.c326
-rw-r--r--drivers/char/mem.c17
-rw-r--r--drivers/cpufreq/Kconfig27
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c772
-rw-r--r--drivers/cpufreq/cpufreq_stats.c56
-rw-r--r--drivers/cpuidle/governors/menu.c7
-rw-r--r--drivers/crypto/Kconfig19
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/tegra-aes.c1451
-rw-r--r--drivers/crypto/tegra-aes.h100
-rw-r--r--drivers/crypto/tegra-se.c2442
-rw-r--r--drivers/crypto/tegra-se.h235
-rw-r--r--drivers/gpio/gpio-tegra.c170
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/ion/Kconfig12
-rw-r--r--drivers/gpu/ion/Makefile2
-rw-r--r--drivers/gpu/ion/ion.c1132
-rw-r--r--drivers/gpu/ion/ion_carveout_heap.c162
-rw-r--r--drivers/gpu/ion/ion_heap.c72
-rw-r--r--drivers/gpu/ion/ion_priv.h275
-rw-r--r--drivers/gpu/ion/ion_system_heap.c198
-rw-r--r--drivers/gpu/ion/ion_system_mapper.c114
-rw-r--r--drivers/gpu/ion/tegra/Makefile1
-rw-r--r--drivers/gpu/ion/tegra/tegra_ion.c599
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-magicmouse.c13
-rw-r--r--drivers/hid/hid-multitouch.c10
-rw-r--r--drivers/hwmon/Kconfig33
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/adt7461.c809
-rw-r--r--drivers/hwmon/ina219.c414
-rw-r--r--drivers/hwmon/ina230.c561
-rw-r--r--drivers/hwmon/tegra-tsensor.c1991
-rw-r--r--drivers/i2c/Kconfig10
-rw-r--r--drivers/i2c/Makefile2
-rw-r--r--drivers/i2c/busses/Kconfig8
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-slave-tegra.c1114
-rw-r--r--drivers/i2c/busses/i2c-tegra.c443
-rwxr-xr-xdrivers/i2c/i2c-slave.c281
-rw-r--r--drivers/i2c/muxes/pca954x.c104
-rw-r--r--drivers/input/Kconfig9
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/evdev.c34
-rw-r--r--drivers/input/keyboard/Kconfig17
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/gpio_keys.c16
-rw-r--r--drivers/input/keyboard/interrupt_keys.c350
-rw-r--r--drivers/input/keyboard/tegra-kbc.c94
-rw-r--r--drivers/input/keyreset.c239
-rw-r--r--drivers/input/misc/Kconfig32
-rw-r--r--drivers/input/misc/Makefile3
-rw-r--r--drivers/input/misc/alps_gpio_scrollwheel.c428
-rw-r--r--drivers/input/misc/gpio_axis.c192
-rw-r--r--drivers/input/misc/gpio_event.c260
-rw-r--r--drivers/input/misc/gpio_input.c376
-rw-r--r--drivers/input/misc/gpio_matrix.c441
-rw-r--r--drivers/input/misc/gpio_output.c97
-rw-r--r--drivers/input/misc/keychord.c387
-rw-r--r--drivers/input/touchscreen/Kconfig19
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c875
-rw-r--r--drivers/input/touchscreen/panjit_i2c.c361
-rw-r--r--drivers/input/touchscreen/synaptics_i2c_rmi.c699
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/ledtrig-sleep.c80
-rw-r--r--drivers/media/video/Kconfig1
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/tegra/Kconfig81
-rw-r--r--drivers/media/video/tegra/Makefile19
-rw-r--r--drivers/media/video/tegra/ad5820.c231
-rw-r--r--drivers/media/video/tegra/ar0832_main.c2549
-rw-r--r--drivers/media/video/tegra/avp/Kconfig25
-rw-r--r--drivers/media/video/tegra/avp/Makefile7
-rw-r--r--drivers/media/video/tegra/avp/avp.c1949
-rw-r--r--drivers/media/video/tegra/avp/avp.h32
-rw-r--r--drivers/media/video/tegra/avp/avp_msg.h358
-rw-r--r--drivers/media/video/tegra/avp/avp_svc.c874
-rw-r--r--drivers/media/video/tegra/avp/headavp.S68
-rw-r--r--drivers/media/video/tegra/avp/headavp.h41
-rw-r--r--drivers/media/video/tegra/avp/nvavp.h53
-rw-r--r--drivers/media/video/tegra/avp/tegra_rpc.c796
-rw-r--r--drivers/media/video/tegra/avp/trpc.h80
-rw-r--r--drivers/media/video/tegra/avp/trpc_local.c419
-rw-r--r--drivers/media/video/tegra/avp/trpc_sema.c244
-rw-r--r--drivers/media/video/tegra/avp/trpc_sema.h30
-rw-r--r--drivers/media/video/tegra/mediaserver/Kconfig10
-rw-r--r--drivers/media/video/tegra/mediaserver/Makefile3
-rw-r--r--drivers/media/video/tegra/mediaserver/tegra_mediaserver.c555
-rw-r--r--drivers/media/video/tegra/nvavp/Kconfig10
-rw-r--r--drivers/media/video/tegra/nvavp/Makefile3
-rw-r--r--drivers/media/video/tegra/nvavp/nvavp_dev.c1405
-rw-r--r--drivers/media/video/tegra/nvavp/nvavp_os.h103
-rw-r--r--drivers/media/video/tegra/ov14810.c1235
-rw-r--r--drivers/media/video/tegra/ov2710.c682
-rw-r--r--drivers/media/video/tegra/ov5650.c1482
-rw-r--r--drivers/media/video/tegra/ov9726.c845
-rw-r--r--drivers/media/video/tegra/sh532u.c1688
-rw-r--r--drivers/media/video/tegra/soc380.c473
-rw-r--r--drivers/media/video/tegra/ssl3250a.c986
-rw-r--r--drivers/media/video/tegra/tegra_camera.c553
-rw-r--r--drivers/media/video/tegra/tps61050.c989
-rw-r--r--drivers/mfd/Kconfig61
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/aat2870-core.c7
-rw-r--r--drivers/mfd/max77663-core.c1402
-rw-r--r--drivers/mfd/max8907c-irq.c425
-rw-r--r--drivers/mfd/max8907c.c373
-rw-r--r--drivers/mfd/ricoh583.c1213
-rw-r--r--drivers/mfd/tps6586x.c43
-rw-r--r--drivers/mfd/tps6591x.c919
-rw-r--r--drivers/mfd/tps80031.c1272
-rw-r--r--drivers/mfd/tps8003x-gpadc.c650
-rw-r--r--drivers/mfd/twl-core.c110
-rw-r--r--drivers/misc/Kconfig68
-rw-r--r--drivers/misc/Makefile16
-rw-r--r--drivers/misc/akm8975.c732
-rw-r--r--drivers/misc/apanic.c606
-rw-r--r--drivers/misc/bcm4329_rfkill.c207
-rw-r--r--drivers/misc/inv_mpu/Kconfig77
-rw-r--r--drivers/misc/inv_mpu/Makefile45
-rw-r--r--drivers/misc/inv_mpu/accel/Kconfig133
-rw-r--r--drivers/misc/inv_mpu/accel/Makefile38
-rw-r--r--drivers/misc/inv_mpu/accel/adxl34x.c728
-rw-r--r--drivers/misc/inv_mpu/accel/bma150.c777
-rw-r--r--drivers/misc/inv_mpu/accel/bma222.c654
-rw-r--r--drivers/misc/inv_mpu/accel/bma250.c787
-rw-r--r--drivers/misc/inv_mpu/accel/cma3000.c222
-rw-r--r--drivers/misc/inv_mpu/accel/kxsd9.c264
-rw-r--r--drivers/misc/inv_mpu/accel/kxtf9.c841
-rw-r--r--drivers/misc/inv_mpu/accel/lis331.c745
-rw-r--r--drivers/misc/inv_mpu/accel/lis3dh.c728
-rw-r--r--drivers/misc/inv_mpu/accel/lsm303dlx_a.c881
-rw-r--r--drivers/misc/inv_mpu/accel/mma8450.c804
-rw-r--r--drivers/misc/inv_mpu/accel/mma845x.c713
-rw-r--r--drivers/misc/inv_mpu/accel/mpu6050.h28
-rw-r--r--drivers/misc/inv_mpu/compass/Kconfig121
-rw-r--r--drivers/misc/inv_mpu/compass/Makefile38
-rw-r--r--drivers/misc/inv_mpu/compass/ak8972.c499
-rw-r--r--drivers/misc/inv_mpu/compass/ak8975.c500
-rw-r--r--drivers/misc/inv_mpu/compass/ami306.c1020
-rw-r--r--drivers/misc/inv_mpu/compass/ami30x.c308
-rw-r--r--drivers/misc/inv_mpu/compass/ami_hw.h87
-rw-r--r--drivers/misc/inv_mpu/compass/ami_sensor_def.h144
-rw-r--r--drivers/misc/inv_mpu/compass/hmc5883.c391
-rw-r--r--drivers/misc/inv_mpu/compass/hscdtd002b.c294
-rw-r--r--drivers/misc/inv_mpu/compass/hscdtd004a.c318
-rw-r--r--drivers/misc/inv_mpu/compass/lsm303dlx_m.c395
-rw-r--r--drivers/misc/inv_mpu/compass/mmc314x.c313
-rw-r--r--drivers/misc/inv_mpu/compass/yas529-kernel.c611
-rw-r--r--drivers/misc/inv_mpu/compass/yas530.c580
-rw-r--r--drivers/misc/inv_mpu/log.h287
-rw-r--r--drivers/misc/inv_mpu/mldl_cfg.c1765
-rw-r--r--drivers/misc/inv_mpu/mldl_cfg.h380
-rw-r--r--drivers/misc/inv_mpu/mldl_print_cfg.c137
-rw-r--r--drivers/misc/inv_mpu/mldl_print_cfg.h38
-rw-r--r--drivers/misc/inv_mpu/mlsl-kernel.c420
-rw-r--r--drivers/misc/inv_mpu/mlsl.h186
-rw-r--r--drivers/misc/inv_mpu/mltypes.h234
-rw-r--r--drivers/misc/inv_mpu/mpu-dev.c1244
-rw-r--r--drivers/misc/inv_mpu/mpu-dev.h36
-rw-r--r--drivers/misc/inv_mpu/mpu3050.h251
-rw-r--r--drivers/misc/inv_mpu/mpuirq.c257
-rw-r--r--drivers/misc/inv_mpu/mpuirq.h36
-rw-r--r--drivers/misc/inv_mpu/pressure/Kconfig20
-rw-r--r--drivers/misc/inv_mpu/pressure/Makefile8
-rw-r--r--drivers/misc/inv_mpu/pressure/bma085.c367
-rw-r--r--drivers/misc/inv_mpu/slaveirq.c266
-rw-r--r--drivers/misc/inv_mpu/slaveirq.h36
-rw-r--r--drivers/misc/inv_mpu/timerirq.c296
-rw-r--r--drivers/misc/inv_mpu/timerirq.h30
-rw-r--r--drivers/misc/max1749.c118
-rw-r--r--drivers/misc/mpu3050/Kconfig65
-rw-r--r--drivers/misc/mpu3050/Makefile132
-rw-r--r--drivers/misc/mpu3050/accel/kxtf9.c669
-rw-r--r--drivers/misc/mpu3050/compass/ak8975.c258
-rw-r--r--drivers/misc/mpu3050/log.h306
-rw-r--r--drivers/misc/mpu3050/mldl_cfg.c1739
-rw-r--r--drivers/misc/mpu3050/mldl_cfg.h199
-rw-r--r--drivers/misc/mpu3050/mlos-kernel.c89
-rw-r--r--drivers/misc/mpu3050/mlos.h73
-rw-r--r--drivers/misc/mpu3050/mlsl-kernel.c331
-rw-r--r--drivers/misc/mpu3050/mlsl.h103
-rw-r--r--drivers/misc/mpu3050/mltypes.h227
-rw-r--r--drivers/misc/mpu3050/mpu-dev.c1310
-rw-r--r--drivers/misc/mpu3050/mpu-i2c.c196
-rw-r--r--drivers/misc/mpu3050/mpu-i2c.h58
-rw-r--r--drivers/misc/mpu3050/mpuirq.c319
-rw-r--r--drivers/misc/mpu3050/mpuirq.h42
-rw-r--r--drivers/misc/mpu3050/slaveirq.c273
-rw-r--r--drivers/misc/mpu3050/slaveirq.h43
-rw-r--r--drivers/misc/mpu3050/timerirq.c299
-rw-r--r--drivers/misc/mpu3050/timerirq.h30
-rw-r--r--drivers/misc/nct1008.c1027
-rw-r--r--drivers/misc/pmem.c1345
-rw-r--r--drivers/misc/tegra-baseband/Kconfig32
-rw-r--r--drivers/misc/tegra-baseband/Makefile6
-rw-r--r--drivers/misc/tegra-baseband/bb-m7400.c270
-rw-r--r--drivers/misc/tegra-baseband/bb-power.c273
-rw-r--r--drivers/misc/tegra-baseband/bb-power.h60
-rw-r--r--drivers/misc/tegra-cryptodev.c349
-rw-r--r--drivers/misc/tegra-cryptodev.h70
-rw-r--r--drivers/misc/uid_stat.c156
-rw-r--r--drivers/misc/wl127x-rfkill.c121
-rw-r--r--drivers/mmc/card/Kconfig9
-rw-r--r--drivers/mmc/card/block.c38
-rw-r--r--drivers/mmc/core/Kconfig17
-rw-r--r--drivers/mmc/core/core.c98
-rw-r--r--drivers/mmc/core/host.c10
-rw-r--r--drivers/mmc/core/sd.c92
-rw-r--r--drivers/mmc/core/sdio.c148
-rw-r--r--drivers/mmc/core/sdio_bus.c13
-rw-r--r--drivers/mmc/core/sdio_io.c33
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c30
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c595
-rw-r--r--drivers/mmc/host/sdhci.c128
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mtd/devices/Kconfig6
-rw-r--r--drivers/mtd/devices/Makefile2
-rw-r--r--drivers/mtd/devices/tegra_nand.c1782
-rw-r--r--drivers/mtd/devices/tegra_nand.h148
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/tegra_nor.c480
-rw-r--r--drivers/mtd/nand/Kconfig24
-rw-r--r--drivers/mtd/nand/nand_base.c39
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/caif/Kconfig9
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/tegra_caif_sspi.c426
-rw-r--r--drivers/net/pppolac.c449
-rw-r--r--drivers/net/pppopns.c428
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_ether.c20
-rw-r--r--drivers/net/usb/raw_ip_net.c735
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/wireless/Kconfig7
-rw-r--r--drivers/net/wireless/Makefile3
-rw-r--r--drivers/net/wireless/bcm4329/Kconfig75
-rw-r--r--drivers/net/wireless/bcm4329/Makefile52
-rw-r--r--drivers/net/wireless/bcm4329/aiutils.c686
-rw-r--r--drivers/net/wireless/bcm4329/bcmpcispi.c630
-rw-r--r--drivers/net/wireless/bcm4329/bcmsdh.c652
-rw-r--r--drivers/net/wireless/bcm4329/bcmsdh_linux.c735
-rw-r--r--drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c1304
-rw-r--r--drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c269
-rw-r--r--drivers/net/wireless/bcm4329/bcmsdspi.c1596
-rw-r--r--drivers/net/wireless/bcm4329/bcmsdspi_linux.c252
-rw-r--r--drivers/net/wireless/bcm4329/bcmsdstd.c3127
-rw-r--r--drivers/net/wireless/bcm4329/bcmsdstd_linux.c251
-rw-r--r--drivers/net/wireless/bcm4329/bcmutils.c1838
-rw-r--r--drivers/net/wireless/bcm4329/bcmwifi.c199
-rw-r--r--drivers/net/wireless/bcm4329/dhd.h472
-rw-r--r--drivers/net/wireless/bcm4329/dhd_bus.h93
-rw-r--r--drivers/net/wireless/bcm4329/dhd_cdc.c535
-rw-r--r--drivers/net/wireless/bcm4329/dhd_common.c2432
-rw-r--r--drivers/net/wireless/bcm4329/dhd_custom_gpio.c272
-rw-r--r--drivers/net/wireless/bcm4329/dhd_dbg.h100
-rw-r--r--drivers/net/wireless/bcm4329/dhd_linux.c3450
-rw-r--r--drivers/net/wireless/bcm4329/dhd_linux_sched.c38
-rw-r--r--drivers/net/wireless/bcm4329/dhd_proto.h102
-rw-r--r--drivers/net/wireless/bcm4329/dhd_sdio.c5840
-rw-r--r--drivers/net/wireless/bcm4329/dngl_stats.h43
-rw-r--r--drivers/net/wireless/bcm4329/hndpmu.c131
-rw-r--r--drivers/net/wireless/bcm4329/include/Makefile21
-rw-r--r--drivers/net/wireless/bcm4329/include/aidmp.h368
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmcdc.h100
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmdefs.h114
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmdevs.h124
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmendian.h205
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmpcispi.h205
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmperf.h36
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmsdbus.h117
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmsdh.h208
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h122
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmsdpcm.h263
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmsdspi.h131
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmsdstd.h223
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmspi.h36
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmspibrcm.h134
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmutils.h637
-rw-r--r--drivers/net/wireless/bcm4329/include/bcmwifi.h154
-rw-r--r--drivers/net/wireless/bcm4329/include/dhdioctl.h123
-rw-r--r--drivers/net/wireless/bcm4329/include/epivers.h48
-rw-r--r--drivers/net/wireless/bcm4329/include/hndpmu.h34
-rw-r--r--drivers/net/wireless/bcm4329/include/hndrte_armtrap.h88
-rw-r--r--drivers/net/wireless/bcm4329/include/hndrte_cons.h63
-rw-r--r--drivers/net/wireless/bcm4329/include/hndsoc.h195
-rw-r--r--drivers/net/wireless/bcm4329/include/linux_osl.h322
-rw-r--r--drivers/net/wireless/bcm4329/include/linuxver.h447
-rw-r--r--drivers/net/wireless/bcm4329/include/miniopt.h77
-rw-r--r--drivers/net/wireless/bcm4329/include/msgtrace.h72
-rw-r--r--drivers/net/wireless/bcm4329/include/osl.h55
-rw-r--r--drivers/net/wireless/bcm4329/include/packed_section_end.h54
-rw-r--r--drivers/net/wireless/bcm4329/include/packed_section_start.h61
-rw-r--r--drivers/net/wireless/bcm4329/include/pcicfg.h52
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/802.11.h1433
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/802.11e.h131
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/802.1d.h49
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/bcmeth.h83
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/bcmevent.h212
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/bcmip.h157
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/eapol.h172
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/ethernet.h148
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/sdspi.h71
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/vlan.h63
-rw-r--r--drivers/net/wireless/bcm4329/include/proto/wpa.h159
-rw-r--r--drivers/net/wireless/bcm4329/include/sbchipc.h1026
-rw-r--r--drivers/net/wireless/bcm4329/include/sbconfig.h276
-rw-r--r--drivers/net/wireless/bcm4329/include/sbhnddma.h294
-rw-r--r--drivers/net/wireless/bcm4329/include/sbpcmcia.h109
-rw-r--r--drivers/net/wireless/bcm4329/include/sbsdio.h166
-rw-r--r--drivers/net/wireless/bcm4329/include/sbsdpcmdev.h288
-rw-r--r--drivers/net/wireless/bcm4329/include/sbsocram.h150
-rw-r--r--drivers/net/wireless/bcm4329/include/sdio.h566
-rw-r--r--drivers/net/wireless/bcm4329/include/sdioh.h299
-rw-r--r--drivers/net/wireless/bcm4329/include/sdiovar.h58
-rw-r--r--drivers/net/wireless/bcm4329/include/siutils.h235
-rw-r--r--drivers/net/wireless/bcm4329/include/spid.h153
-rw-r--r--drivers/net/wireless/bcm4329/include/trxhdr.h46
-rw-r--r--drivers/net/wireless/bcm4329/include/typedefs.h303
-rw-r--r--drivers/net/wireless/bcm4329/include/wlioctl.h1673
-rw-r--r--drivers/net/wireless/bcm4329/linux_osl.c625
-rw-r--r--drivers/net/wireless/bcm4329/miniopt.c163
-rw-r--r--drivers/net/wireless/bcm4329/sbutils.c1004
-rw-r--r--drivers/net/wireless/bcm4329/siutils.c1527
-rw-r--r--drivers/net/wireless/bcm4329/siutils_priv.h213
-rw-r--r--drivers/net/wireless/bcm4329/wl_iw.c8455
-rw-r--r--drivers/net/wireless/bcm4329/wl_iw.h309
-rw-r--r--drivers/net/wireless/bcmdhd/Kconfig48
-rw-r--r--drivers/net/wireless/bcmdhd/Makefile43
-rw-r--r--drivers/net/wireless/bcmdhd/aiutils.c675
-rw-r--r--drivers/net/wireless/bcmdhd/bcmevent.c125
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh.c690
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_linux.c741
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c1331
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c332
-rw-r--r--drivers/net/wireless/bcmdhd/bcmutils.c1967
-rw-r--r--drivers/net/wireless/bcmdhd/bcmwifi.c274
-rw-r--r--drivers/net/wireless/bcmdhd/dhd.h733
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bta.c335
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bta.h39
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bus.h99
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_cdc.c2530
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_common.c2306
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_custom_gpio.c293
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_dbg.h105
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_linux.c5079
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_linux_mon.c393
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_linux_sched.c39
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_proto.h105
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_sdio.c6289
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_wlfc.h266
-rw-r--r--drivers/net/wireless/bcmdhd/dngl_stats.h43
-rw-r--r--drivers/net/wireless/bcmdhd/dngl_wlhdr.h40
-rw-r--r--drivers/net/wireless/bcmdhd/hndpmu.c222
-rw-r--r--drivers/net/wireless/bcmdhd/include/Makefile53
-rw-r--r--drivers/net/wireless/bcmdhd/include/aidmp.h377
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmcdc.h121
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmdefs.h196
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmdevs.h182
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmendian.h279
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmpcispi.h181
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmperf.h36
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdbus.h120
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdh.h211
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h122
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdpcm.h274
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdspi.h135
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdstd.h267
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmspi.h40
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmutils.h708
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmwifi.h165
-rw-r--r--drivers/net/wireless/bcmdhd/include/dhdioctl.h129
-rw-r--r--drivers/net/wireless/bcmdhd/include/epivers.h49
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndpmu.h34
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h88
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndrte_cons.h68
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndsoc.h207
-rw-r--r--drivers/net/wireless/bcmdhd/include/htsf.h74
-rw-r--r--drivers/net/wireless/bcmdhd/include/linux_osl.h431
-rw-r--r--drivers/net/wireless/bcmdhd/include/linuxver.h593
-rw-r--r--drivers/net/wireless/bcmdhd/include/miniopt.h77
-rw-r--r--drivers/net/wireless/bcmdhd/include/msgtrace.h74
-rw-r--r--drivers/net/wireless/bcmdhd/include/osl.h66
-rw-r--r--drivers/net/wireless/bcmdhd/include/packed_section_end.h54
-rw-r--r--drivers/net/wireless/bcmdhd/include/packed_section_start.h61
-rw-r--r--drivers/net/wireless/bcmdhd/include/pcicfg.h52
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11.h1731
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h45
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11e.h131
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.1d.h49
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmeth.h83
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmevent.h312
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmip.h154
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h442
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/eapol.h173
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/ethernet.h162
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/p2p.h512
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/sdspi.h76
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/vlan.h70
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/wpa.h160
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbchipc.h1615
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbconfig.h276
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbhnddma.h327
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbpcmcia.h109
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsdio.h166
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h293
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsocram.h186
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdio.h611
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdioh.h412
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdiovar.h58
-rw-r--r--drivers/net/wireless/bcmdhd/include/siutils.h247
-rw-r--r--drivers/net/wireless/bcmdhd/include/trxhdr.h52
-rw-r--r--drivers/net/wireless/bcmdhd/include/typedefs.h309
-rw-r--r--drivers/net/wireless/bcmdhd/include/wlfc_proto.h198
-rw-r--r--drivers/net/wireless/bcmdhd/include/wlioctl.h2757
-rw-r--r--drivers/net/wireless/bcmdhd/linux_osl.c919
-rw-r--r--drivers/net/wireless/bcmdhd/sbutils.c992
-rw-r--r--drivers/net/wireless/bcmdhd/siutils.c1720
-rw-r--r--drivers/net/wireless/bcmdhd/siutils_priv.h235
-rw-r--r--drivers/net/wireless/bcmdhd/uamp_api.h176
-rw-r--r--drivers/net/wireless/bcmdhd/wl_android.c840
-rw-r--r--drivers/net/wireless/bcmdhd/wl_android.h57
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfg80211.c7330
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfg80211.h558
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfgp2p.c1469
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfgp2p.h247
-rw-r--r--drivers/net/wireless/bcmdhd/wl_dbg.h49
-rw-r--r--drivers/net/wireless/bcmdhd/wl_iw.c8766
-rw-r--r--drivers/net/wireless/bcmdhd/wl_iw.h306
-rw-r--r--drivers/net/wireless/bcmdhd/wldev_common.c341
-rw-r--r--drivers/net/wireless/bcmdhd/wldev_common.h110
-rw-r--r--drivers/nfc/pn544.c1049
-rw-r--r--drivers/power/Kconfig29
-rw-r--r--drivers/power/Makefile4
-rw-r--r--drivers/power/bq20z75.c29
-rw-r--r--drivers/power/bq27x00_battery.c307
-rw-r--r--drivers/power/gpio-charger.c2
-rw-r--r--drivers/power/max8907c-charger.c228
-rw-r--r--drivers/power/pda_power.c71
-rw-r--r--drivers/power/power_supply_core.c30
-rw-r--r--drivers/power/tegra_bpc_mgmt.c139
-rw-r--r--drivers/power/tps80031-charger.c471
-rw-r--r--drivers/power/tps80031_battery_gauge.c606
-rw-r--r--drivers/regulator/Kconfig64
-rw-r--r--drivers/regulator/Makefile9
-rw-r--r--drivers/regulator/aat2870-regulator.c5
-rw-r--r--drivers/regulator/core.c70
-rw-r--r--drivers/regulator/fan53555-regulator.c567
-rw-r--r--drivers/regulator/gpio-switch-regulator.c412
-rw-r--r--drivers/regulator/max77663-regulator.c895
-rw-r--r--drivers/regulator/max8907c-regulator.c421
-rw-r--r--drivers/regulator/ricoh583-regulator.c412
-rw-r--r--drivers/regulator/tps6236x-regulator.c521
-rw-r--r--drivers/regulator/tps6586x-regulator.c129
-rw-r--r--drivers/regulator/tps6591x-regulator.c955
-rw-r--r--drivers/regulator/tps80031-regulator.c1082
-rw-r--r--drivers/rtc/Kconfig77
-rw-r--r--drivers/rtc/Makefile8
-rw-r--r--drivers/rtc/alarm-dev.c286
-rw-r--r--drivers/rtc/alarm.c590
-rw-r--r--drivers/rtc/rtc-max77663.c611
-rw-r--r--drivers/rtc/rtc-max8907c.c318
-rw-r--r--drivers/rtc/rtc-ricoh583.c403
-rw-r--r--drivers/rtc/rtc-tegra.c33
-rw-r--r--drivers/rtc/rtc-tps6586x.c387
-rw-r--r--drivers/rtc/rtc-tps6591x.c546
-rw-r--r--drivers/rtc/rtc-tps80031.c452
-rw-r--r--drivers/spi/Kconfig10
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-tegra.c1345
-rw-r--r--drivers/spi/spi_slave_tegra.c1399
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/Kconfig95
-rw-r--r--drivers/staging/android/Makefile6
-rw-r--r--drivers/staging/android/binder.c3600
-rw-r--r--drivers/staging/android/binder.h330
-rw-r--r--drivers/staging/android/logger.c616
-rw-r--r--drivers/staging/android/logger.h49
-rw-r--r--drivers/staging/android/lowmemorykiller.c213
-rw-r--r--drivers/staging/android/ram_console.c443
-rw-r--r--drivers/staging/android/timed_gpio.c176
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c123
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/iio/light/Kconfig29
-rw-r--r--drivers/staging/iio/light/Makefile3
-rw-r--r--drivers/staging/iio/light/isl29028.c1269
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c13
-rw-r--r--drivers/switch/Kconfig15
-rw-r--r--drivers/switch/Makefile4
-rw-r--r--drivers/switch/switch_class.c174
-rw-r--r--drivers/switch/switch_gpio.c172
-rw-r--r--drivers/tty/serial/8250.c28
-rw-r--r--drivers/tty/serial/8250.h1
-rw-r--r--drivers/tty/serial/Kconfig8
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/tty/serial/tegra_hsuart.c1682
-rw-r--r--drivers/usb/class/cdc-acm.c52
-rw-r--r--drivers/usb/class/cdc-acm.h4
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/Makefile6
-rw-r--r--drivers/usb/gadget/android.c1165
-rw-r--r--drivers/usb/gadget/composite.c62
-rw-r--r--drivers/usb/gadget/f_accessory.c788
-rw-r--r--drivers/usb/gadget/f_acm.c7
-rw-r--r--drivers/usb/gadget/f_adb.c635
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/f_mtp.c1264
-rw-r--r--drivers/usb/gadget/f_rndis.c24
-rw-r--r--drivers/usb/gadget/fsl_tegra_udc.c155
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c913
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h208
-rw-r--r--drivers/usb/gadget/rndis.c23
-rw-r--r--drivers/usb/gadget/storage_common.c6
-rw-r--r--drivers/usb/gadget/u_ether.c23
-rw-r--r--drivers/usb/gadget/u_ether.h9
-rw-r--r--drivers/usb/gadget/u_serial.c6
-rw-r--r--drivers/usb/host/Kconfig6
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-fsl.c66
-rw-r--r--drivers/usb/host/ehci-hcd.c10
-rw-r--r--drivers/usb/host/ehci-hub.c12
-rw-r--r--drivers/usb/host/ehci-q.c19
-rw-r--r--drivers/usb/host/ehci-tegra.c628
-rw-r--r--drivers/usb/host/ehci.h24
-rw-r--r--drivers/usb/otg/Kconfig16
-rw-r--r--drivers/usb/otg/Makefile4
-rw-r--r--drivers/usb/otg/otg-wakelock.c169
-rw-r--r--drivers/usb/otg/otg_id.c205
-rw-r--r--drivers/usb/otg/tegra-otg.c510
-rw-r--r--drivers/usb/serial/Kconfig13
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/baseband_usb_chr.c1105
-rw-r--r--drivers/usb/serial/baseband_usb_chr.h106
-rw-r--r--drivers/usb/storage/unusual_devs.h6
-rw-r--r--drivers/video/Kconfig3
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/backlight/Kconfig10
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/aat2870_bl.c3
-rw-r--r--drivers/video/backlight/tegra_pwm_bl.c177
-rw-r--r--drivers/video/fbmon.c33
-rw-r--r--drivers/video/modedb.c578
-rw-r--r--drivers/video/tegra/Kconfig125
-rw-r--r--drivers/video/tegra/Makefile5
-rw-r--r--drivers/video/tegra/dc/Makefile11
-rw-r--r--drivers/video/tegra/dc/dc.c2989
-rw-r--r--drivers/video/tegra/dc/dc_priv.h222
-rw-r--r--drivers/video/tegra/dc/dc_reg.h555
-rw-r--r--drivers/video/tegra/dc/dc_sysfs.c327
-rw-r--r--drivers/video/tegra/dc/dsi.c2642
-rw-r--r--drivers/video/tegra/dc/dsi.h279
-rw-r--r--drivers/video/tegra/dc/dsi_regs.h351
-rw-r--r--drivers/video/tegra/dc/edid.c611
-rw-r--r--drivers/video/tegra/dc/edid.h62
-rw-r--r--drivers/video/tegra/dc/ext/Makefile5
-rw-r--r--drivers/video/tegra/dc/ext/control.c261
-rw-r--r--drivers/video/tegra/dc/ext/cursor.c203
-rw-r--r--drivers/video/tegra/dc/ext/dev.c919
-rw-r--r--drivers/video/tegra/dc/ext/events.c197
-rw-r--r--drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h142
-rw-r--r--drivers/video/tegra/dc/ext/util.c78
-rw-r--r--drivers/video/tegra/dc/hdmi.c1838
-rw-r--r--drivers/video/tegra/dc/hdmi.h222
-rw-r--r--drivers/video/tegra/dc/hdmi_reg.h477
-rw-r--r--drivers/video/tegra/dc/nvhdcp.c1259
-rw-r--r--drivers/video/tegra/dc/nvhdcp.h46
-rw-r--r--drivers/video/tegra/dc/nvsd.c904
-rw-r--r--drivers/video/tegra/dc/nvsd.h25
-rw-r--r--drivers/video/tegra/dc/overlay.c898
-rw-r--r--drivers/video/tegra/dc/overlay.h43
-rw-r--r--drivers/video/tegra/dc/rgb.c160
-rw-r--r--drivers/video/tegra/fb.c499
-rw-r--r--drivers/video/tegra/host/Makefile19
-rw-r--r--drivers/video/tegra/host/bus.c567
-rw-r--r--drivers/video/tegra/host/chip_support.h146
-rw-r--r--drivers/video/tegra/host/debug.c159
-rw-r--r--drivers/video/tegra/host/debug.h52
-rw-r--r--drivers/video/tegra/host/dev.c1076
-rw-r--r--drivers/video/tegra/host/dev.h66
-rw-r--r--drivers/video/tegra/host/gr3d/Makefile10
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d.c154
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d.h62
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t20.c385
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t20.h30
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t30.c425
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t30.h30
-rw-r--r--drivers/video/tegra/host/gr3d/scale3d.c651
-rw-r--r--drivers/video/tegra/host/gr3d/scale3d.h49
-rw-r--r--drivers/video/tegra/host/host1x/Makefile13
-rw-r--r--drivers/video/tegra/host/host1x/host1x_cdma.c668
-rw-r--r--drivers/video/tegra/host/host1x/host1x_cdma.h43
-rw-r--r--drivers/video/tegra/host/host1x/host1x_channel.c570
-rw-r--r--drivers/video/tegra/host/host1x/host1x_channel.h46
-rw-r--r--drivers/video/tegra/host/host1x/host1x_cpuaccess.c54
-rw-r--r--drivers/video/tegra/host/host1x/host1x_debug.c404
-rw-r--r--drivers/video/tegra/host/host1x/host1x_hardware.h276
-rw-r--r--drivers/video/tegra/host/host1x/host1x_intr.c220
-rw-r--r--drivers/video/tegra/host/host1x/host1x_syncpt.c244
-rw-r--r--drivers/video/tegra/host/host1x/host1x_syncpt.h79
-rw-r--r--drivers/video/tegra/host/mpe/Makefile7
-rw-r--r--drivers/video/tegra/host/mpe/mpe.c570
-rw-r--r--drivers/video/tegra/host/mpe/mpe.h31
-rw-r--r--drivers/video/tegra/host/nvhost_acm.c510
-rw-r--r--drivers/video/tegra/host/nvhost_acm.h121
-rw-r--r--drivers/video/tegra/host/nvhost_cdma.c565
-rw-r--r--drivers/video/tegra/host/nvhost_cdma.h136
-rw-r--r--drivers/video/tegra/host/nvhost_channel.c117
-rw-r--r--drivers/video/tegra/host/nvhost_channel.h101
-rw-r--r--drivers/video/tegra/host/nvhost_cpuaccess.c120
-rw-r--r--drivers/video/tegra/host/nvhost_cpuaccess.h65
-rw-r--r--drivers/video/tegra/host/nvhost_hwctx.h78
-rw-r--r--drivers/video/tegra/host/nvhost_intr.c440
-rw-r--r--drivers/video/tegra/host/nvhost_intr.h123
-rw-r--r--drivers/video/tegra/host/nvhost_job.c321
-rw-r--r--drivers/video/tegra/host/nvhost_job.h140
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.c247
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.h162
-rw-r--r--drivers/video/tegra/host/t20/Makefile8
-rw-r--r--drivers/video/tegra/host/t20/t20.c221
-rw-r--r--drivers/video/tegra/host/t20/t20.h35
-rw-r--r--drivers/video/tegra/host/t30/Makefile8
-rw-r--r--drivers/video/tegra/host/t30/t30.c244
-rw-r--r--drivers/video/tegra/host/t30/t30.h30
-rw-r--r--drivers/video/tegra/nvmap/Makefile7
-rw-r--r--drivers/video/tegra/nvmap/nvmap.c867
-rw-r--r--drivers/video/tegra/nvmap/nvmap.h244
-rw-r--r--drivers/video/tegra/nvmap/nvmap_common.h38
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c1423
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c626
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.c1113
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.h68
-rw-r--r--drivers/video/tegra/nvmap/nvmap_ioctl.c749
-rw-r--r--drivers/video/tegra/nvmap/nvmap_ioctl.h159
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.c187
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.h84
-rw-r--r--drivers/w1/Makefile1
-rw-r--r--drivers/w1/masters/Kconfig7
-rw-r--r--drivers/w1/masters/Makefile2
-rw-r--r--drivers/w1/masters/tegra_w1.c491
-rw-r--r--drivers/watchdog/Kconfig17
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/tegra_wdt.c444
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/Makefile3
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/fat/dir.c9
-rw-r--r--fs/fat/fat.h1
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/partitions/check.c11
-rw-r--r--fs/partitions/efi.c11
-rw-r--r--fs/proc/Kconfig12
-rw-r--r--fs/proc/base.c43
-rw-r--r--fs/proc/stat.c4
-rw-r--r--fs/yaffs2/Kconfig161
-rw-r--r--fs/yaffs2/Makefile17
-rw-r--r--fs/yaffs2/yaffs_allocator.c396
-rw-r--r--fs/yaffs2/yaffs_allocator.h30
-rw-r--r--fs/yaffs2/yaffs_attribs.c124
-rw-r--r--fs/yaffs2/yaffs_attribs.h28
-rw-r--r--fs/yaffs2/yaffs_bitmap.c98
-rw-r--r--fs/yaffs2/yaffs_bitmap.h33
-rw-r--r--fs/yaffs2/yaffs_checkptrw.c415
-rw-r--r--fs/yaffs2/yaffs_checkptrw.h33
-rw-r--r--fs/yaffs2/yaffs_ecc.c298
-rw-r--r--fs/yaffs2/yaffs_ecc.h44
-rw-r--r--fs/yaffs2/yaffs_getblockinfo.h35
-rw-r--r--fs/yaffs2/yaffs_guts.c5164
-rw-r--r--fs/yaffs2/yaffs_guts.h915
-rw-r--r--fs/yaffs2/yaffs_linux.h41
-rw-r--r--fs/yaffs2/yaffs_mtdif.c54
-rw-r--r--fs/yaffs2/yaffs_mtdif.h23
-rw-r--r--fs/yaffs2/yaffs_mtdif1.c330
-rw-r--r--fs/yaffs2/yaffs_mtdif1.h29
-rw-r--r--fs/yaffs2/yaffs_mtdif2.c225
-rw-r--r--fs/yaffs2/yaffs_mtdif2.h29
-rw-r--r--fs/yaffs2/yaffs_nameval.c201
-rw-r--r--fs/yaffs2/yaffs_nameval.h28
-rw-r--r--fs/yaffs2/yaffs_nand.c127
-rw-r--r--fs/yaffs2/yaffs_nand.h38
-rw-r--r--fs/yaffs2/yaffs_packedtags1.c53
-rw-r--r--fs/yaffs2/yaffs_packedtags1.h39
-rw-r--r--fs/yaffs2/yaffs_packedtags2.c196
-rw-r--r--fs/yaffs2/yaffs_packedtags2.h47
-rw-r--r--fs/yaffs2/yaffs_tagscompat.c422
-rw-r--r--fs/yaffs2/yaffs_tagscompat.h36
-rw-r--r--fs/yaffs2/yaffs_tagsvalidity.c27
-rw-r--r--fs/yaffs2/yaffs_tagsvalidity.h23
-rw-r--r--fs/yaffs2/yaffs_trace.h57
-rw-r--r--fs/yaffs2/yaffs_verify.c535
-rw-r--r--fs/yaffs2/yaffs_verify.h43
-rw-r--r--fs/yaffs2/yaffs_vfs.c2790
-rw-r--r--fs/yaffs2/yaffs_yaffs1.c433
-rw-r--r--fs/yaffs2/yaffs_yaffs1.h22
-rw-r--r--fs/yaffs2/yaffs_yaffs2.c1598
-rw-r--r--fs/yaffs2/yaffs_yaffs2.h39
-rw-r--r--fs/yaffs2/yportenv.h70
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/drm/drm_fixed.h1
-rw-r--r--include/linux/adt7461.h41
-rw-r--r--include/linux/akm8975.h87
-rw-r--r--include/linux/amba/mmci.h12
-rw-r--r--include/linux/android_aid.h28
-rw-r--r--include/linux/android_alarm.h106
-rw-r--r--include/linux/android_pmem.h93
-rw-r--r--include/linux/ashmem.h48
-rw-r--r--include/linux/bq27x00.h30
-rw-r--r--include/linux/cgroup.h14
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpuacct.h43
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/earlysuspend.h56
-rw-r--r--include/linux/fb.h18
-rw-r--r--include/linux/fsl_devices.h11
-rw-r--r--include/linux/gpio_event.h170
-rw-r--r--include/linux/gpio_keys.h1
-rw-r--r--include/linux/gpio_scrollwheel.h46
-rw-r--r--include/linux/hid.h4
-rwxr-xr-xinclude/linux/i2c-slave.h259
-rw-r--r--include/linux/i2c-tegra.h27
-rw-r--r--include/linux/i2c.h1
-rw-r--r--include/linux/i2c/atmel_mxt_ts.h16
-rw-r--r--include/linux/i2c/panjit_ts.h30
-rw-r--r--include/linux/i2c/twl.h58
-rw-r--r--include/linux/if_pppolac.h33
-rw-r--r--include/linux/if_pppopns.h32
-rw-r--r--include/linux/if_pppox.h27
-rw-r--r--include/linux/ina219.h34
-rwxr-xr-xinclude/linux/interrupt_keys.h47
-rw-r--r--include/linux/ion.h344
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/keychord.h52
-rw-r--r--include/linux/keyreset.h28
-rw-r--r--include/linux/mfd/max77663-core.h173
-rw-r--r--include/linux/mfd/max8907c.h259
-rw-r--r--include/linux/mfd/ricoh583.h164
-rw-r--r--include/linux/mfd/tps6586x.h53
-rw-r--r--include/linux/mfd/tps6591x.h123
-rw-r--r--include/linux/mfd/tps80031.h204
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmc/host.h35
-rw-r--r--include/linux/mmc/pm.h1
-rw-r--r--include/linux/mmc/sdhci.h69
-rw-r--r--include/linux/mmc/sdio_func.h10
-rw-r--r--include/linux/mpu.h366
-rw-r--r--include/linux/mpu3050.h255
-rw-r--r--include/linux/mpu6000.h406
-rw-r--r--include/linux/msdos_fs.h12
-rw-r--r--include/linux/nct1008.h98
-rw-r--r--include/linux/netfilter/xt_qtaguid.h13
-rw-r--r--include/linux/netfilter/xt_quota2.h25
-rw-r--r--include/linux/netfilter/xt_socket.h6
-rw-r--r--include/linux/nfc/pn544.h102
-rw-r--r--include/linux/nvhost.h73
-rw-r--r--include/linux/nvhost_ioctl.h204
-rw-r--r--include/linux/pda_power.h2
-rw-r--r--include/linux/platform_data/ina230.h32
-rw-r--r--include/linux/platform_data/ram_console.h22
-rw-r--r--include/linux/platform_data/tegra_bpc_mgmt.h25
-rw-r--r--include/linux/platform_data/tegra_nor.h37
-rw-r--r--include/linux/platform_data/tegra_usb.h16
-rw-r--r--include/linux/pm_qos_params.h17
-rw-r--r--include/linux/power/max8907c-charger.h64
-rw-r--r--include/linux/power_supply.h4
-rw-r--r--include/linux/regmap.h2
-rw-r--r--include/linux/regulator/consumer.h8
-rw-r--r--include/linux/regulator/fan53555-regulator.h63
-rw-r--r--include/linux/regulator/gpio-switch-regulator.h69
-rw-r--r--include/linux/regulator/max77663-regulator.h125
-rw-r--r--include/linux/regulator/max8907c-regulator.h46
-rw-r--r--include/linux/regulator/ricoh583-regulator.h63
-rw-r--r--include/linux/regulator/tps6236x-regulator.h54
-rw-r--r--include/linux/regulator/tps6591x-regulator.h77
-rw-r--r--include/linux/regulator/tps80031-regulator.h89
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/serial_reg.h10
-rw-r--r--include/linux/sockios.h1
-rw-r--r--include/linux/spi-tegra.h50
-rw-r--r--include/linux/switch.h53
-rw-r--r--include/linux/synaptics_i2c_rmi.h55
-rw-r--r--include/linux/tegra_audio.h78
-rw-r--r--include/linux/tegra_avp.h44
-rw-r--r--include/linux/tegra_caif.h34
-rw-r--r--include/linux/tegra_ion.h98
-rw-r--r--include/linux/tegra_mediaserver.h112
-rw-r--r--include/linux/tegra_nvavp.h84
-rw-r--r--include/linux/tegra_overlay.h79
-rw-r--r--include/linux/tegra_pwm_bl.h31
-rw-r--r--include/linux/tegra_rpc.h47
-rw-r--r--include/linux/tegra_sema.h34
-rw-r--r--include/linux/tegra_spdif.h56
-rw-r--r--include/linux/tegra_uart.h43
-rw-r--r--include/linux/tps80031-charger.h62
-rw-r--r--include/linux/tracedump.h43
-rw-r--r--include/linux/tracelevel.h42
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/uid_stat.h29
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/f_accessory.h83
-rw-r--r--include/linux/usb/f_mtp.h75
-rw-r--r--include/linux/usb/otg_id.h58
-rw-r--r--include/linux/wakelock.h91
-rw-r--r--include/linux/wifi_tiwlan.h27
-rw-r--r--include/linux/wl127x-rfkill.h35
-rw-r--r--include/linux/wlan_plat.h27
-rw-r--r--include/media/ad5820.h41
-rw-r--r--include/media/ar0832_main.h106
-rw-r--r--include/media/nvc.h146
-rw-r--r--include/media/nvc_focus.h48
-rw-r--r--include/media/nvc_torch.h43
-rw-r--r--include/media/ov14810.h47
-rw-r--r--include/media/ov2710.h47
-rw-r--r--include/media/ov5650.h83
-rw-r--r--include/media/ov9726.h62
-rw-r--r--include/media/sh532u.h319
-rw-r--r--include/media/soc380.h59
-rw-r--r--include/media/ssl3250a.h38
-rw-r--r--include/media/tegra_camera.h55
-rw-r--r--include/media/tps61050.h36
-rw-r--r--include/net/activity_stats.h25
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/hci.h10
-rw-r--r--include/net/bluetooth/hci_core.h8
-rw-r--r--include/net/bluetooth/sco.h4
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/sound/max98088.h4
-rw-r--r--include/sound/tlv320aic326x.h23
-rw-r--r--include/trace/events/nvhost.h411
-rw-r--r--include/video/nvhdcp.h91
-rw-r--r--include/video/tegra_dc_ext.h309
-rw-r--r--include/video/tegrafb.h32
-rw-r--r--init/Kconfig15
-rw-r--r--kernel/cgroup.c212
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/fork.c16
-rw-r--r--kernel/gcov/Kconfig8
-rw-r--r--kernel/gcov/gcc_3_4.c88
-rw-r--r--kernel/gcov/gcov.h42
-rw-r--r--kernel/irq/pm.c7
-rw-r--r--kernel/irq/resend.c19
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c13
-rw-r--r--kernel/pm_qos_params.c36
-rw-r--r--kernel/power/Kconfig74
-rw-r--r--kernel/power/Makefile6
-rw-r--r--kernel/power/consoleearlysuspend.c78
-rw-r--r--kernel/power/earlysuspend.c187
-rw-r--r--kernel/power/fbearlysuspend.c153
-rw-r--r--kernel/power/main.c20
-rw-r--r--kernel/power/power.h24
-rw-r--r--kernel/power/process.c27
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/power/suspend_time.c111
-rw-r--r--kernel/power/userwakelock.c219
-rw-r--r--kernel/power/wakelock.c634
-rw-r--r--kernel/printk.c56
-rw-r--r--kernel/sched.c99
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/trace/Kconfig33
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/tracedump.c682
-rw-r--r--kernel/trace/tracelevel.c142
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--mm/Makefile1
-rw-r--r--mm/ashmem.c748
-rw-r--r--mm/page_alloc.c25
-rw-r--r--mm/shmem.c15
-rw-r--r--net/Kconfig16
-rw-r--r--net/Makefile1
-rw-r--r--net/activity_stats.c115
-rw-r--r--net/bluetooth/af_bluetooth.c38
-rw-r--r--net/bluetooth/hci_conn.c57
-rw-r--r--net/bluetooth/hci_core.c3
-rwxr-xr-x[-rw-r--r--]net/bluetooth/hci_event.c22
-rw-r--r--net/bluetooth/l2cap_core.c4
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bluetooth/rfcomm/core.c1
-rw-r--r--net/bluetooth/sco.c54
-rw-r--r--net/bluetooth/smp.c3
-rw-r--r--net/bridge/br_device.c11
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/devinet.c8
-rw-r--r--net/ipv4/netfilter/Kconfig12
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c8
-rw-r--r--net/ipv4/sysfs_net_ipv4.c88
-rw-r--r--net/ipv4/tcp.c121
-rw-r--r--net/ipv6/addrconf.c69
-rw-r--r--net/ipv6/af_inet6.c34
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c9
-rw-r--r--net/netfilter/Kconfig42
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/xt_qtaguid.c2785
-rw-r--r--net/netfilter/xt_qtaguid_internal.h330
-rw-r--r--net/netfilter/xt_qtaguid_print.c556
-rw-r--r--net/netfilter/xt_qtaguid_print.h120
-rw-r--r--net/netfilter/xt_quota2.c381
-rw-r--r--net/netfilter/xt_socket.c70
-rw-r--r--net/rfkill/Kconfig5
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/wireless/Kconfig11
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c6
-rw-r--r--security/Kconfig1
-rw-r--r--security/Makefile2
-rw-r--r--security/commoncap.c10
-rw-r--r--security/tf_driver/Kconfig8
-rw-r--r--security/tf_driver/Makefile36
-rw-r--r--security/tf_driver/s_version.h92
-rw-r--r--security/tf_driver/tf_comm.c1745
-rw-r--r--security/tf_driver/tf_comm.h202
-rw-r--r--security/tf_driver/tf_comm_tz.c885
-rw-r--r--security/tf_driver/tf_conn.c1574
-rw-r--r--security/tf_driver/tf_conn.h106
-rw-r--r--security/tf_driver/tf_defs.h538
-rw-r--r--security/tf_driver/tf_device.c796
-rw-r--r--security/tf_driver/tf_protocol.h690
-rw-r--r--security/tf_driver/tf_util.c1143
-rw-r--r--security/tf_driver/tf_util.h122
-rw-r--r--sound/pci/hda/Kconfig14
-rw-r--r--sound/pci/hda/hda_codec.h3
-rw-r--r--sound/pci/hda/hda_intel.c616
-rw-r--r--sound/pci/hda/patch_hdmi.c62
-rw-r--r--sound/soc/codecs/Kconfig8
-rw-r--r--sound/soc/codecs/Makefile5
-rw-r--r--sound/soc/codecs/Patch_base_jazz_Rate48_pps_driver.h96
-rw-r--r--sound/soc/codecs/Patch_base_main_Rate48_pps_driver.h96
-rw-r--r--sound/soc/codecs/Patch_base_pop_Rate48_pps_driver.h96
-rw-r--r--sound/soc/codecs/Patch_base_rock_Rate48_pps_driver.h96
-rw-r--r--sound/soc/codecs/first_rate_pps_driver.h6011
-rw-r--r--sound/soc/codecs/max98088.c142
-rw-r--r--sound/soc/codecs/max98088.h22
-rw-r--r--sound/soc/codecs/max98095.c45
-rw-r--r--sound/soc/codecs/rt5640.c2509
-rw-r--r--sound/soc/codecs/rt5640.h2094
-rw-r--r--sound/soc/codecs/second_rate_pps_driver.h3153
-rw-r--r--sound/soc/codecs/spdif_transciever.c7
-rw-r--r--sound/soc/codecs/tlv320aic326x.c3689
-rw-r--r--sound/soc/codecs/tlv320aic326x.h638
-rw-r--r--sound/soc/codecs/tlv320aic326x_mini-dsp.c1587
-rw-r--r--sound/soc/codecs/tlv320aic326x_mini-dsp.h127
-rw-r--r--sound/soc/codecs/tlv320aic326x_minidsp_config.c410
-rw-r--r--sound/soc/codecs/wm8753.c89
-rw-r--r--sound/soc/codecs/wm8753.h25
-rw-r--r--sound/soc/codecs/wm8903.c9
-rw-r--r--sound/soc/tegra/Kconfig119
-rw-r--r--sound/soc/tegra/Makefile30
-rw-r--r--sound/soc/tegra/tegra20_das.c301
-rw-r--r--sound/soc/tegra/tegra20_das.h146
-rw-r--r--sound/soc/tegra/tegra20_i2s.c576
-rw-r--r--sound/soc/tegra/tegra20_i2s.h198
-rw-r--r--sound/soc/tegra/tegra20_spdif.c463
-rw-r--r--sound/soc/tegra/tegra20_spdif.h556
-rw-r--r--sound/soc/tegra/tegra30_ahub.c659
-rw-r--r--sound/soc/tegra/tegra30_ahub.h512
-rw-r--r--sound/soc/tegra/tegra30_dam.c644
-rw-r--r--sound/soc/tegra/tegra30_dam.h163
-rw-r--r--sound/soc/tegra/tegra30_i2s.c947
-rw-r--r--sound/soc/tegra/tegra30_i2s.h274
-rw-r--r--sound/soc/tegra/tegra30_spdif.c505
-rw-r--r--sound/soc/tegra/tegra30_spdif.h777
-rw-r--r--sound/soc/tegra/tegra_aic326x.c1075
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.c121
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.h6
-rw-r--r--sound/soc/tegra/tegra_das.c265
-rw-r--r--sound/soc/tegra/tegra_das.h135
-rw-r--r--sound/soc/tegra/tegra_i2s.c507
-rw-r--r--sound/soc/tegra/tegra_i2s.h165
-rw-r--r--sound/soc/tegra/tegra_max98088.c1199
-rw-r--r--sound/soc/tegra/tegra_pcm.c80
-rw-r--r--sound/soc/tegra/tegra_pcm.h8
-rw-r--r--sound/soc/tegra/tegra_spdif.c371
-rw-r--r--sound/soc/tegra/tegra_spdif.h473
-rw-r--r--sound/soc/tegra/tegra_wm8753.c685
-rw-r--r--sound/soc/tegra/tegra_wm8903.c476
1333 files changed, 426251 insertions, 6568 deletions
diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644
index 000000000000..72a62afdf202
--- /dev/null
+++ b/Documentation/android.txt
@@ -0,0 +1,121 @@
+ =============
+ A N D R O I D
+ =============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+ 1.1 Required enabled config options
+ 1.2 Required disabled config options
+ 1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+SWITCH
+SWITCH_GPIO
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+ANDROID_RAM_CONSOLE
+ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index cd67e90003c0..60d82e1e498d 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -593,6 +593,15 @@ there are not tasks in the cgroup. If pre_destroy() returns error code,
rmdir() will fail with it. From this behavior, pre_destroy() can be
called multiple times against a cgroup.
+int allow_attach(struct cgroup *cgrp, struct task_struct *task)
+(cgroup_mutex held by caller)
+
+Called prior to moving a task into a cgroup; if the subsystem
+returns an error, this will abort the attach operation. Used
+to extend the permission checks - if all subsystems in a cgroup
+return 0, the attach will be allowed to proceed, even if the
+default permission check (root or same user) fails.
+
int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *task)
(cgroup_mutex held by caller)
diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt
index 9d73cc0cadb9..e21a932059fb 100644
--- a/Documentation/cgroups/cpuacct.txt
+++ b/Documentation/cgroups/cpuacct.txt
@@ -39,6 +39,13 @@ system: Time spent by tasks of the cgroup in kernel mode.
user and system are in USER_HZ unit.
+cpuacct.cpufreq file gives CPU time (in nanoseconds) spent at each CPU
+frequency. Platform hooks must be implemented inorder to properly track
+time at each CPU frequency.
+
+cpuacct.power file gives CPU power consumed (in milliWatt seconds). Platform
+must provide and implement power callback functions.
+
cpuacct controller uses percpu_counter interface to collect user and
system times. This has two side effects:
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index e74d0a2eb1cf..51b1cd360c33 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -28,6 +28,7 @@ Contents:
2.3 Userspace
2.4 Ondemand
2.5 Conservative
+2.6 Interactive
3. The Governor Interface in the CPUfreq Core
@@ -193,6 +194,43 @@ governor but for the opposite direction. For example when set to its
default value of '20' it means that if the CPU usage needs to be below
20% between samples to have the frequency decreased.
+
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors. However,
+the governor is more aggressive about scaling the CPU speed up in
+response to CPU-intensive activity.
+
+Sampling the CPU load every X ms can lead to under-powering the CPU
+for X ms, leading to dropped frames, stuttering UI, etc. Instead of
+sampling the cpu at a specified rate, the interactive governor will
+check whether to scale the cpu frequency up soon after coming out of
+idle. When the cpu comes out of idle, a timer is configured to fire
+within 1-2 ticks. If the cpu is very busy between exiting idle and
+when the timer fires then we assume the cpu is underpowered and ramp
+to MAX speed.
+
+If the cpu was not sufficiently busy to immediately ramp to MAX speed,
+then governor evaluates the cpu load since the last speed adjustment,
+choosing the highest value between that longer-term load or the
+short-term load since idle exit to determine the cpu speed to ramp to.
+
+The tuneable values for this governor are:
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. This is to ensure that the governor has
+seen enough historic cpu load data to determine the appropriate
+workload. Default is 80000 uS.
+
+go_maxspeed_load: The CPU load at which to ramp to max speed. Default
+is 85.
+
+timer_rate: Sample rate for reevaluating cpu load when the system is
+not idle. Default is 30000 uS.
+
3. The Governor Interface in the CPUfreq Core
=============================================
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index d3710dc6d25f..b0ee95e99ff7 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -483,6 +483,7 @@ pm_runtime_resume()
pm_runtime_get_sync()
pm_runtime_put_sync()
pm_runtime_put_sync_suspend()
+pm_runtime_put_sync_autosuspend()
5. Runtime PM Initialization, Device Probing and Removal
diff --git a/Documentation/trace/tracedump.txt b/Documentation/trace/tracedump.txt
new file mode 100644
index 000000000000..cba0decc3fc3
--- /dev/null
+++ b/Documentation/trace/tracedump.txt
@@ -0,0 +1,58 @@
+ Tracedump
+
+ Documentation written by Alon Farchy
+
+1. Overview
+============
+
+The tracedump module provides additional mechanisms to retrieve tracing data.
+It can be used to retrieve traces after a kernel panic or while the system
+is running in either binary format or plaintext. The dumped data is compressed
+with zlib to conserve space.
+
+2. Configuration Options
+========================
+
+CONFIG_TRACEDUMP - enable the tracedump module.
+CONFIG_TRACEDUMP_PANIC - dump to console on kernel panic
+CONFIG_TRACEDUMP_PROCFS - add file /proc/tracedump for userspace access.
+
+3. Module Parameters
+====================
+
+format_ascii
+
+ If 1, data will dump in human-readable format, ordered by time.
+ If 0, data will be dumped as raw pages from the ring buffer,
+ ordered by CPU, followed by the saved cmdlines so that the
+ raw data can be decoded. Default: 0
+
+panic_size
+
+ Maximum amount of compressed data to dump during a kernel panic
+ in kilobytes. This only applies if format_ascii == 1. In this case,
+ tracedump will compress the data, check the size, and if it is too big
+ toss out some data, compress again, etc, until the size is below
+ panic_size. Default: 512KB
+
+compress_level
+
+ Determines the compression level that zlib will use. Available levels
+ are 0-9, with 0 as no compression and 9 as maximum compression.
+ Default: 9.
+
+4. Usage
+========
+
+If configured with CONFIG_TRACEDUMP_PROCFS, the tracing data can be pulled
+by reading from /proc/tracedump. For example:
+
+ # cat /proc/tracedump > my_tracedump
+
+Tracedump will surround the dump with a magic word (TRACEDUMP). Between the
+magic words is the compressed data, which can be decompressed with a standard
+zlib implementation. After decompression, if format_ascii == 1, then the
+output should be readable.
+
+If format_ascii == 0, the output should be in binary form, delimited by
+CPU_END. After the last CPU should be the saved cmdlines, delimited by |.
diff --git a/Documentation/trace/tracelevel.txt b/Documentation/trace/tracelevel.txt
new file mode 100644
index 000000000000..b282dd2b329b
--- /dev/null
+++ b/Documentation/trace/tracelevel.txt
@@ -0,0 +1,42 @@
+ Tracelevel
+
+ Documentation by Alon Farchy
+
+1. Overview
+===========
+
+Tracelevel allows subsystem authors to add trace priorities to
+their tracing events. High priority traces will be enabled
+automatically at boot time.
+
+This module is configured with CONFIG_TRACELEVEL.
+
+2. Usage
+=========
+
+To give an event a priority, use the function tracelevel_register
+at any time.
+
+ tracelevel_register(my_event, level);
+
+my_event corresponds directly to the event name as defined in the
+event header file. Available levels are:
+
+ TRACELEVEL_ERR 3
+ TRACELEVEL_WARN 2
+ TRACELEVEL_INFO 1
+ TRACELEVEL_DEBUG 0
+
+Any event registered at boot time as TRACELEVEL_ERR will be enabled
+by default. The header also exposes the function tracelevel_set_level
+to change the trace level at runtime. Any trace event registered with the
+specified level or higher will be enabled with this call.
+
+A userspace handle to tracelevel_set_level is available via the module
+parameter 'level'. For example,
+
+ echo 1 > /sys/module/tracelevel/parameters/level
+
+Is logically equivalent to:
+
+ tracelevel_set_level(TRACELEVEL_INFO);
diff --git a/Documentation/video/tegra_dc_ext.txt b/Documentation/video/tegra_dc_ext.txt
new file mode 100644
index 000000000000..6fc3394c6652
--- /dev/null
+++ b/Documentation/video/tegra_dc_ext.txt
@@ -0,0 +1,83 @@
+The Tegra display controller (dc) driver has two frontends that implement
+different interfaces:
+1. The traditional fbdev interface, implemented in drivers/video/tegra/fb.c
+2. A new interface that exposes the unique capabilities of the controller,
+ implemented in drivers/video/tegra/dc/ext
+
+The Tegra fbdev capabilities are documented in fb/tegrafb.c [TODO]. This
+document will describe the new "extended" dc interface.
+
+The extended interface is only available when its frontend has been compiled
+in, i.e., CONFIG_TEGRA_DC_EXTENSIONS=y. The dc_ext frontend can coexist with
+tegrafb, but takes precedence (more on that later).
+
+The dc_ext frontend's interface to userspace is exposed through a set of
+device nodes: one for each controller (generally /dev/tegra_dc_N), and one
+"control" node (generally /dev/tegra_dc_ctrl). Communication through these
+device nodes is done with special IOCTLs. There is also an event delivery
+mechanism; userspace can wait for and receive events with read() or poll().
+
+The tegra_dc_N interface is stateful; each fresh open() of the device node
+creates a client instance. In order to prevent multiple processes from
+"fighting" for the hardware, only one client instance is permitted to control
+certain resources at a time, on a first-come, first-serve basis.
+
+Overview of tegra_dc_N IOCTLs:
+SET_NVMAP_FD: This is used to associate your nvmap client with this dc_ext
+ client instance. This is necessary so that the kernel can
+ appropriately enforce permissions on nvmap buffers.
+
+GET_WINDOW: A dc_ext client must call this on each window that it wishes to
+ control. This strictly enforces a single dc_ext client on a
+ window at a time.
+
+PUT_WINDOW: A dc_ext client may call this to release a window previously
+ reserved with GET_WINDOW.
+
+FLIP: This ioctl is used to actually display an nvmap surface using one or
+ more window. Each time a dc_ext client performs a FLIP, the request is
+ put on a flip queue and executed asynchronously (the FLIP ioctl will
+ return immediately). Various parameters are available in the
+ tegra_dc_ext_flip structure.
+ A dc_ext client may only use this on windows that it has previously
+ reserved with a successful GET_WINDOW call.
+
+GET_CURSOR: This is analogous to GET_WINDOW, but for the hardware cursor
+ instead of a window.
+
+PUT_CURSOR: This is analogous to PUT_WINDOW, but for the hardware cursor
+ instead of a window.
+
+SET_CURSOR_IMAGE: This is used to change the hardware cursor image. May only
+ be used by a client who has successfully performed a
+ GET_CURSOR call.
+
+SET_CURSOR: This is used to actually place the hardware cursor on the screen.
+ May only be used by a client who has successfully performed a
+ GET_CURSOR call.
+
+SET_CSC: This may be used to set a color space conversion matrix on a window.
+ A dc_ext client may only use this on windows that it has previously
+ reserved with a successful GET_WINDOW call.
+
+GET_STATUS: This is used to retrieve general status about the dc.
+
+GET_VBLANK_SYNCPT: This is used to retrieve the auto-incrementing vblank
+ syncpoint for the head associated with this dc.
+
+
+Overview of tegra_dc_ctrl IOCTLs:
+GET_NUM_OUTPUTS: This returns the number of available output devices on the
+ system, which may exceed the number of display controllers.
+
+GET_OUTPUT_PROPERTIES: This returns data about the given output, such as what
+ kind of output it is, whether it's currently associated
+ with a head, etc.
+
+GET_OUTPUT_EDID: This returns the binary EDID read from the device connected
+ to the given output, if any.
+
+SET_EVENT_MASK: A dc_ext client may call this ioctl with a bitmask of events
+ that it wishes to receive. These events will then be
+ available to that client on a subsequent read() on the same
+ file descriptor.
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index dfe01400cd14..cd6dd77850db 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -186,6 +186,9 @@ config FIQ
config ARCH_MTD_XIP
bool
+config ARCH_PROVIDES_UDELAY
+ bool
+
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -604,6 +607,9 @@ config ARCH_TEGRA
select HAVE_CLK
select HAVE_SCHED_CLOCK
select ARCH_HAS_CPUFREQ
+ select ARCH_PROVIDES_UDELAY
+ select FIQ
+ select PCI
help
This enables support for NVIDIA Tegra based systems (Tegra APX,
Tegra 6xx and Tegra 2 series).
@@ -1297,6 +1303,25 @@ config ARM_ERRATA_764369
relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU.
+config ARM_ERRATA_720791
+ bool "ARM errata: Dynamic high-level clock gating corrupts the Jazelle instruction stream"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 720791 Cortex-A9
+ (r1p0..r1p2) erratum. The Jazelle instruction stream may be
+ corrupted when dynamic high-level clock gating is enabled.
+ This workaround disables gating the Core clock when the Instruction
+ side is waiting for a Page Table Walk answer or linefill completion.
+
+config ARM_ERRATA_752520
+ bool "ARM errata: Faulty arbitration between PLD and Cacheable TLB requests may create a system deadlock"
+ depends on CPU_V7
+ help
+ Under rare circumstances, PLDs may interfere with a Cacheable page table walk,
+ creating a processor deadlock. The erratum can only happen when the Data Cache
+ and MMU are enabled, with the TLB descriptors marked as L1 cacheable,
+ so that Page Table Walks are performed as cache linefills.
+
config PL310_ERRATA_769419
bool "PL310 errata: no automatic Store Buffer drain"
depends on CACHE_L2X0
@@ -1745,6 +1770,15 @@ config DEPRECATED_PARAM_STRUCT
This was deprecated in 2001 and announced to live on for 5 years.
Some old boot loaders still use this way.
+config ARM_FLUSH_CONSOLE_ON_RESTART
+ bool "Force flush the console on restart"
+ help
+ If the console is locked while the system is rebooted, the messages
+ in the temporary logbuffer would not have propogated to all the
+ console drivers. This option forces the console lock to be
+ released if it failed to be acquired, which will cause all the
+ pending messages to be flushed.
+
endmenu
menu "Boot options"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index e95a5989602a..24701d6f72bc 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -657,6 +657,8 @@ proc_types:
@ b __arm6_mmu_cache_off
@ b __armv3_mmu_cache_flush
+#if !defined(CONFIG_CPU_V7)
+ /* This collides with some V7 IDs, preventing correct detection */
.word 0x00000000 @ old ARM ID
.word 0x0000f000
mov pc, lr
@@ -665,6 +667,7 @@ proc_types:
THUMB( nop )
mov pc, lr
THUMB( nop )
+#endif
.word 0x41007000 @ ARM7/710
.word 0xfff8fe00
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 4b71766fb21d..23b2a6a98c27 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -39,3 +39,62 @@ config SHARP_PARAM
config SHARP_SCOOP
bool
+
+config FIQ_GLUE
+ bool
+ select FIQ
+
+config FIQ_DEBUGGER
+ bool "FIQ Mode Serial Debugger"
+ select FIQ
+ select FIQ_GLUE
+ default n
+ help
+ The FIQ serial debugger can accept commands even when the
+ kernel is unresponsive due to being stuck with interrupts
+ disabled.
+
+
+config FIQ_DEBUGGER_NO_SLEEP
+ bool "Keep serial debugger active"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Enables the serial debugger at boot. Passing
+ fiq_debugger.no_sleep on the kernel commandline will
+ override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+ bool "Don't disable wakeup IRQ when debugger is active"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Don't disable the wakeup irq when enabling the uart clock. This will
+ cause extra interrupts, but it makes the serial debugger usable with
+ on some MSM radio builds that ignore the uart clock request in power
+ collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+ bool "Console on FIQ Serial Debugger port"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Enables a console so that printk messages are displayed on
+ the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+ bool "Put the FIQ debugger into console mode by default"
+ depends on FIQ_DEBUGGER_CONSOLE
+ default n
+ help
+ If enabled, this puts the fiq debugger into console mode by default.
+ Otherwise, the fiq debugger will start out in debug mode.
+
+config GIC_SET_MULTIPLE_CPUS
+ bool "Use affinity hint to allow multiple CPUs for IRQ"
+ depends on ARM_GIC && SMP
+ default n
+ help
+ IRQ affinity is always set by gic to the 1st cpu in the requested
+ mask. If this option is enabled, affinity is also set to all cpus
+ present in affinity_hint and requested masks.
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 6ea9b6f3607a..3ab5d765fedd 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -17,3 +17,5 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o
obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
+obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c
new file mode 100644
index 000000000000..3ed18ae2ed80
--- /dev/null
+++ b/arch/arm/common/fiq_debugger.c
@@ -0,0 +1,1196 @@
+/*
+ * arch/arm/common/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#include <asm/fiq_debugger.h>
+#include <asm/fiq_glue.h>
+#include <asm/stacktrace.h>
+
+#include <mach/system.h>
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+ ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_state {
+ struct fiq_glue_handler handler;
+
+ int fiq;
+ int uart_irq;
+ int signal_irq;
+ int wakeup_irq;
+ bool wakeup_irq_no_set_wake;
+ struct clk *clk;
+ struct fiq_debugger_pdata *pdata;
+ struct platform_device *pdev;
+
+ char debug_cmd[DEBUG_MAX];
+ int debug_busy;
+ int debug_abort;
+
+ char debug_buf[DEBUG_MAX];
+ int debug_count;
+
+ bool no_sleep;
+ bool debug_enable;
+ bool ignore_next_wakeup_irq;
+ struct timer_list sleep_timer;
+ spinlock_t sleep_timer_lock;
+ bool uart_enabled;
+ struct wake_lock debugger_wake_lock;
+ bool console_enable;
+ int current_cpu;
+ atomic_t unhandled_fiq_count;
+ bool in_fiq;
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+ struct console console;
+ struct tty_driver *tty_driver;
+ struct tty_struct *tty;
+ int tty_open_count;
+ struct fiq_debugger_ringbuf *tty_rbuf;
+ bool syslog_dumping;
+#endif
+
+ unsigned int last_irqs[NR_IRQS];
+ unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+ if (state->wakeup_irq < 0)
+ return;
+ enable_irq(state->wakeup_irq);
+ if (!state->wakeup_irq_no_set_wake)
+ enable_irq_wake(state->wakeup_irq);
+}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+ if (state->wakeup_irq < 0)
+ return;
+ disable_irq_nosync(state->wakeup_irq);
+ if (!state->wakeup_irq_no_set_wake)
+ disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static bool inline debug_have_fiq(struct fiq_debugger_state *state)
+{
+ return (state->fiq >= 0);
+}
+
+static void debug_force_irq(struct fiq_debugger_state *state)
+{
+ unsigned int irq = state->signal_irq;
+
+ if (WARN_ON(!debug_have_fiq(state)))
+ return;
+ if (state->pdata->force_irq) {
+ state->pdata->force_irq(state->pdev, irq);
+ } else {
+ struct irq_chip *chip = irq_get_chip(irq);
+ if (chip && chip->irq_retrigger)
+ chip->irq_retrigger(irq_get_irq_data(irq));
+ }
+}
+
+static void debug_uart_enable(struct fiq_debugger_state *state)
+{
+ if (state->clk)
+ clk_enable(state->clk);
+ if (state->pdata->uart_enable)
+ state->pdata->uart_enable(state->pdev);
+}
+
+static void debug_uart_disable(struct fiq_debugger_state *state)
+{
+ if (state->pdata->uart_disable)
+ state->pdata->uart_disable(state->pdev);
+ if (state->clk)
+ clk_disable(state->clk);
+}
+
+static void debug_uart_flush(struct fiq_debugger_state *state)
+{
+ if (state->pdata->uart_flush)
+ state->pdata->uart_flush(state->pdev);
+}
+
+static void debug_puts(struct fiq_debugger_state *state, char *s)
+{
+ unsigned c;
+ while ((c = *s++)) {
+ if (c == '\n')
+ state->pdata->uart_putc(state->pdev, '\r');
+ state->pdata->uart_putc(state->pdev, c);
+ }
+}
+
+static void debug_prompt(struct fiq_debugger_state *state)
+{
+ debug_puts(state, "debug> ");
+}
+
+int log_buf_copy(char *dest, int idx, int len);
+static void dump_kernel_log(struct fiq_debugger_state *state)
+{
+ char buf[1024];
+ int idx = 0;
+ int ret;
+ int saved_oip;
+
+ /* setting oops_in_progress prevents log_buf_copy()
+ * from trying to take a spinlock which will make it
+ * very unhappy in some cases...
+ */
+ saved_oip = oops_in_progress;
+ oops_in_progress = 1;
+ for (;;) {
+ ret = log_buf_copy(buf, idx, 1023);
+ if (ret <= 0)
+ break;
+ buf[ret] = 0;
+ debug_puts(state, buf);
+ idx += ret;
+ }
+ oops_in_progress = saved_oip;
+}
+
+static char *mode_name(unsigned cpsr)
+{
+ switch (cpsr & MODE_MASK) {
+ case USR_MODE: return "USR";
+ case FIQ_MODE: return "FIQ";
+ case IRQ_MODE: return "IRQ";
+ case SVC_MODE: return "SVC";
+ case ABT_MODE: return "ABT";
+ case UND_MODE: return "UND";
+ case SYSTEM_MODE: return "SYS";
+ default: return "???";
+ }
+}
+
+static int debug_printf(void *cookie, const char *fmt, ...)
+{
+ struct fiq_debugger_state *state = cookie;
+ char buf[256];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ debug_puts(state, buf);
+ return state->debug_abort;
+}
+
+/* Safe outside fiq context */
+static int debug_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+ struct fiq_debugger_state *state = cookie;
+ char buf[256];
+ va_list ap;
+ unsigned long irq_flags;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, 128, fmt, ap);
+ va_end(ap);
+
+ local_irq_save(irq_flags);
+ debug_puts(state, buf);
+ debug_uart_flush(state);
+ local_irq_restore(irq_flags);
+ return state->debug_abort;
+}
+
+static void dump_regs(struct fiq_debugger_state *state, unsigned *regs)
+{
+ debug_printf(state, " r0 %08x r1 %08x r2 %08x r3 %08x\n",
+ regs[0], regs[1], regs[2], regs[3]);
+ debug_printf(state, " r4 %08x r5 %08x r6 %08x r7 %08x\n",
+ regs[4], regs[5], regs[6], regs[7]);
+ debug_printf(state, " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n",
+ regs[8], regs[9], regs[10], regs[11],
+ mode_name(regs[16]));
+ if ((regs[16] & MODE_MASK) == USR_MODE)
+ debug_printf(state, " ip %08x sp %08x lr %08x pc %08x "
+ "cpsr %08x\n", regs[12], regs[13], regs[14],
+ regs[15], regs[16]);
+ else
+ debug_printf(state, " ip %08x sp %08x lr %08x pc %08x "
+ "cpsr %08x spsr %08x\n", regs[12], regs[13],
+ regs[14], regs[15], regs[16], regs[17]);
+}
+
+struct mode_regs {
+ unsigned long sp_svc;
+ unsigned long lr_svc;
+ unsigned long spsr_svc;
+
+ unsigned long sp_abt;
+ unsigned long lr_abt;
+ unsigned long spsr_abt;
+
+ unsigned long sp_und;
+ unsigned long lr_und;
+ unsigned long spsr_und;
+
+ unsigned long sp_irq;
+ unsigned long lr_irq;
+ unsigned long spsr_irq;
+
+ unsigned long r8_fiq;
+ unsigned long r9_fiq;
+ unsigned long r10_fiq;
+ unsigned long r11_fiq;
+ unsigned long r12_fiq;
+ unsigned long sp_fiq;
+ unsigned long lr_fiq;
+ unsigned long spsr_fiq;
+};
+
+void __naked get_mode_regs(struct mode_regs *regs)
+{
+ asm volatile (
+ "mrs r1, cpsr\n"
+ "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r8 - r14}\n"
+ "mrs r2, spsr\n"
+ "stmia r0!, {r2}\n"
+ "msr cpsr_c, r1\n"
+ "bx lr\n");
+}
+
+
+static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs)
+{
+ struct mode_regs mode_regs;
+ dump_regs(state, regs);
+ get_mode_regs(&mode_regs);
+ debug_printf(state, " svc: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+ debug_printf(state, " abt: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+ debug_printf(state, " und: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+ debug_printf(state, " irq: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+ debug_printf(state, " fiq: r8 %08x r9 %08x r10 %08x r11 %08x "
+ "r12 %08x\n",
+ mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+ mode_regs.r11_fiq, mode_regs.r12_fiq);
+ debug_printf(state, " fiq: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+static void dump_irqs(struct fiq_debugger_state *state)
+{
+ int n;
+ unsigned int cpu;
+
+ debug_printf(state, "irqnr total since-last status name\n");
+ for (n = 0; n < NR_IRQS; n++) {
+ struct irqaction *act = irq_desc[n].action;
+ if (!act && !kstat_irqs(n))
+ continue;
+ debug_printf(state, "%5d: %10u %11u %8x %s\n", n,
+ kstat_irqs(n),
+ kstat_irqs(n) - state->last_irqs[n],
+ irq_desc[n].status_use_accessors,
+ (act && act->name) ? act->name : "???");
+ state->last_irqs[n] = kstat_irqs(n);
+ }
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+
+ debug_printf(state, "LOC %d: %10u %11u\n", cpu,
+ __IRQ_STAT(cpu, local_timer_irqs),
+ __IRQ_STAT(cpu, local_timer_irqs) -
+ state->last_local_timer_irqs[cpu]);
+ state->last_local_timer_irqs[cpu] =
+ __IRQ_STAT(cpu, local_timer_irqs);
+ }
+}
+
+struct stacktrace_state {
+ struct fiq_debugger_state *state;
+ unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+ struct stacktrace_state *sts = d;
+
+ if (sts->depth) {
+ debug_printf(sts->state,
+ " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+ frame->pc, frame->pc, frame->lr, frame->lr,
+ frame->sp, frame->fp);
+ sts->depth--;
+ return 0;
+ }
+ debug_printf(sts->state, " ...\n");
+
+ return sts->depth == 0;
+}
+
+struct frame_tail {
+ struct frame_tail *fp;
+ unsigned long sp;
+ unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_state *state,
+ struct frame_tail *tail)
+{
+ struct frame_tail buftail[2];
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+ debug_printf(state, " invalid frame pointer %p\n", tail);
+ return NULL;
+ }
+ if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+ debug_printf(state,
+ " failed to copy frame pointer %p\n", tail);
+ return NULL;
+ }
+
+ debug_printf(state, " %p\n", buftail[0].lr);
+
+ /* frame pointers should strictly progress back up the stack
+ * (towards higher addresses) */
+ if (tail >= buftail[0].fp)
+ return NULL;
+
+ return buftail[0].fp-1;
+}
+
+void dump_stacktrace(struct fiq_debugger_state *state,
+ struct pt_regs * const regs, unsigned int depth, void *ssp)
+{
+ struct frame_tail *tail;
+ struct thread_info *real_thread_info = THREAD_INFO(ssp);
+ struct stacktrace_state sts;
+
+ sts.depth = depth;
+ sts.state = state;
+ *current_thread_info() = *real_thread_info;
+
+ if (!current)
+ debug_printf(state, "current NULL\n");
+ else
+ debug_printf(state, "pid: %d comm: %s\n",
+ current->pid, current->comm);
+ dump_regs(state, (unsigned *)regs);
+
+ if (!user_mode(regs)) {
+ struct stackframe frame;
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+ debug_printf(state,
+ " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+ regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+ regs->ARM_sp, regs->ARM_fp);
+ walk_stackframe(&frame, report_trace, &sts);
+ return;
+ }
+
+ tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+ while (depth-- && tail && !((unsigned long) tail & 3))
+ tail = user_backtrace(state, tail);
+}
+
+static void do_ps(struct fiq_debugger_state *state)
+{
+ struct task_struct *g;
+ struct task_struct *p;
+ unsigned task_state;
+ static const char stat_nam[] = "RSDTtZX";
+
+ debug_printf(state, "pid ppid prio task pc\n");
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ task_state = p->state ? __ffs(p->state) + 1 : 0;
+ debug_printf(state,
+ "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+ debug_printf(state, "%-13.13s %c", p->comm,
+ task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+ if (task_state == TASK_RUNNING)
+ debug_printf(state, " running\n");
+ else
+ debug_printf(state, " %08lx\n", thread_saved_pc(p));
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = true;
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+ char buf[128];
+ int ret;
+ int idx = 0;
+
+ while (1) {
+ ret = log_buf_copy(buf, idx, sizeof(buf) - 1);
+ if (ret <= 0)
+ break;
+ buf[ret] = 0;
+ debug_printf(state, "%s", buf);
+ idx += ret;
+ }
+}
+#endif
+
+static void do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+ begin_syslog_dump(state);
+ handle_sysrq(rq);
+ end_syslog_dump(state);
+}
+
+/* This function CANNOT be called in FIQ context */
+static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+ if (!strcmp(cmd, "ps"))
+ do_ps(state);
+ if (!strcmp(cmd, "sysrq"))
+ do_sysrq(state, 'h');
+ if (!strncmp(cmd, "sysrq ", 6))
+ do_sysrq(state, cmd[6]);
+}
+
+static void debug_help(struct fiq_debugger_state *state)
+{
+ debug_printf(state, "FIQ Debugger commands:\n"
+ " pc PC status\n"
+ " regs Register dump\n"
+ " allregs Extended Register dump\n"
+ " bt Stack trace\n"
+ " reboot Reboot\n"
+ " irqs Interupt status\n"
+ " kmsg Kernel log\n"
+ " version Kernel version\n");
+ debug_printf(state, " sleep Allow sleep while in FIQ\n"
+ " nosleep Disable sleep while in FIQ\n"
+ " console Switch terminal to console\n"
+ " cpu Current CPU\n"
+ " cpu <number> Switch to CPU<number>\n");
+ debug_printf(state, " ps Process list\n"
+ " sysrq sysrq options\n"
+ " sysrq <param> Execute sysrq with <param>\n");
+}
+
+static void take_affinity(void *info)
+{
+ struct fiq_debugger_state *state = info;
+ struct cpumask cpumask;
+
+ cpumask_clear(&cpumask);
+ cpumask_set_cpu(get_cpu(), &cpumask);
+
+ irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+ if (!debug_have_fiq(state))
+ smp_call_function_single(cpu, take_affinity, state, false);
+ state->current_cpu = cpu;
+}
+
+static bool debug_fiq_exec(struct fiq_debugger_state *state,
+ const char *cmd, unsigned *regs, void *svc_sp)
+{
+ bool signal_helper = false;
+
+ if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+ debug_help(state);
+ } else if (!strcmp(cmd, "pc")) {
+ debug_printf(state, " pc %08x cpsr %08x mode %s\n",
+ regs[15], regs[16], mode_name(regs[16]));
+ } else if (!strcmp(cmd, "regs")) {
+ dump_regs(state, regs);
+ } else if (!strcmp(cmd, "allregs")) {
+ dump_allregs(state, regs);
+ } else if (!strcmp(cmd, "bt")) {
+ dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp);
+ } else if (!strcmp(cmd, "reboot")) {
+ arch_reset(0, 0);
+ } else if (!strcmp(cmd, "irqs")) {
+ dump_irqs(state);
+ } else if (!strcmp(cmd, "kmsg")) {
+ dump_kernel_log(state);
+ } else if (!strcmp(cmd, "version")) {
+ debug_printf(state, "%s\n", linux_banner);
+ } else if (!strcmp(cmd, "sleep")) {
+ state->no_sleep = false;
+ debug_printf(state, "enabling sleep\n");
+ } else if (!strcmp(cmd, "nosleep")) {
+ state->no_sleep = true;
+ debug_printf(state, "disabling sleep\n");
+ } else if (!strcmp(cmd, "console")) {
+ state->console_enable = true;
+ debug_printf(state, "console mode\n");
+ } else if (!strcmp(cmd, "cpu")) {
+ debug_printf(state, "cpu %d\n", state->current_cpu);
+ } else if (!strncmp(cmd, "cpu ", 4)) {
+ unsigned long cpu = 0;
+ if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
+ switch_cpu(state, cpu);
+ else
+ debug_printf(state, "invalid cpu\n");
+ debug_printf(state, "cpu %d\n", state->current_cpu);
+ } else {
+ if (state->debug_busy) {
+ debug_printf(state,
+ "command processor busy. trying to abort.\n");
+ state->debug_abort = -1;
+ } else {
+ strcpy(state->debug_cmd, cmd);
+ state->debug_busy = 1;
+ }
+
+ return true;
+ }
+ if (!state->console_enable)
+ debug_prompt(state);
+
+ return signal_helper;
+}
+
+static void sleep_timer_expired(unsigned long data)
+{
+ struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ if (state->uart_enabled && !state->no_sleep) {
+ if (state->debug_enable && !state->console_enable) {
+ state->debug_enable = false;
+ debug_printf_nfiq(state, "suspending fiq debugger\n");
+ }
+ state->ignore_next_wakeup_irq = true;
+ debug_uart_disable(state);
+ state->uart_enabled = false;
+ enable_wakeup_irq(state);
+ }
+ wake_unlock(&state->debugger_wake_lock);
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void handle_wakeup(struct fiq_debugger_state *state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+ state->ignore_next_wakeup_irq = false;
+ } else if (!state->uart_enabled) {
+ wake_lock(&state->debugger_wake_lock);
+ debug_uart_enable(state);
+ state->uart_enabled = true;
+ disable_wakeup_irq(state);
+ mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+ }
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t wakeup_irq_handler(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+
+ if (!state->no_sleep)
+ debug_puts(state, "WAKEUP\n");
+ handle_wakeup(state);
+
+ return IRQ_HANDLED;
+}
+
+
+static void debug_handle_irq_context(struct fiq_debugger_state *state)
+{
+ if (!state->no_sleep) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ wake_lock(&state->debugger_wake_lock);
+ mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+ }
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+ if (state->tty) {
+ int i;
+ int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+ for (i = 0; i < count; i++) {
+ int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+ tty_insert_flip_char(state->tty, c, TTY_NORMAL);
+ if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+ pr_warn("fiq tty failed to consume byte\n");
+ }
+ tty_flip_buffer_push(state->tty);
+ }
+#endif
+ if (state->debug_busy) {
+ debug_irq_exec(state, state->debug_cmd);
+ debug_prompt(state);
+ state->debug_busy = 0;
+ }
+}
+
+static int debug_getc(struct fiq_debugger_state *state)
+{
+ return state->pdata->uart_getc(state->pdev);
+}
+
+static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state,
+ int this_cpu, void *regs, void *svc_sp)
+{
+ int c;
+ static int last_c;
+ int count = 0;
+ bool signal_helper = false;
+
+ if (this_cpu != state->current_cpu) {
+ if (state->in_fiq)
+ return false;
+
+ if (atomic_inc_return(&state->unhandled_fiq_count) !=
+ MAX_UNHANDLED_FIQ_COUNT)
+ return false;
+
+ debug_printf(state, "fiq_debugger: cpu %d not responding, "
+ "reverting to cpu %d\n", state->current_cpu,
+ this_cpu);
+
+ atomic_set(&state->unhandled_fiq_count, 0);
+ switch_cpu(state, this_cpu);
+ return false;
+ }
+
+ state->in_fiq = true;
+
+ while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+ count++;
+ if (!state->debug_enable) {
+ if ((c == 13) || (c == 10)) {
+ state->debug_enable = true;
+ state->debug_count = 0;
+ debug_prompt(state);
+ }
+ } else if (c == FIQ_DEBUGGER_BREAK) {
+ state->console_enable = false;
+ debug_puts(state, "fiq debugger mode\n");
+ state->debug_count = 0;
+ debug_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+ } else if (state->console_enable && state->tty_rbuf) {
+ fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+ signal_helper = true;
+#endif
+ } else if ((c >= ' ') && (c < 127)) {
+ if (state->debug_count < (DEBUG_MAX - 1)) {
+ state->debug_buf[state->debug_count++] = c;
+ state->pdata->uart_putc(state->pdev, c);
+ }
+ } else if ((c == 8) || (c == 127)) {
+ if (state->debug_count > 0) {
+ state->debug_count--;
+ state->pdata->uart_putc(state->pdev, 8);
+ state->pdata->uart_putc(state->pdev, ' ');
+ state->pdata->uart_putc(state->pdev, 8);
+ }
+ } else if ((c == 13) || (c == 10)) {
+ if (c == '\r' || (c == '\n' && last_c != '\r')) {
+ state->pdata->uart_putc(state->pdev, '\r');
+ state->pdata->uart_putc(state->pdev, '\n');
+ }
+ if (state->debug_count) {
+ state->debug_buf[state->debug_count] = 0;
+ state->debug_count = 0;
+ signal_helper |=
+ debug_fiq_exec(state, state->debug_buf,
+ regs, svc_sp);
+ } else {
+ debug_prompt(state);
+ }
+ }
+ last_c = c;
+ }
+ debug_uart_flush(state);
+ if (state->pdata->fiq_ack)
+ state->pdata->fiq_ack(state->pdev, state->fiq);
+
+ /* poke sleep timer if necessary */
+ if (state->debug_enable && !state->no_sleep)
+ signal_helper = true;
+
+ atomic_set(&state->unhandled_fiq_count, 0);
+ state->in_fiq = false;
+
+ return signal_helper;
+}
+
+static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp)
+{
+ struct fiq_debugger_state *state =
+ container_of(h, struct fiq_debugger_state, handler);
+ unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+ bool need_irq;
+
+ need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp);
+ if (need_irq)
+ debug_force_irq(state);
+}
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t debug_uart_irq(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+ bool not_done;
+
+ handle_wakeup(state);
+
+ /* handle the debugger irq in regular context */
+ not_done = debug_handle_uart_interrupt(state, smp_processor_id(),
+ get_irq_regs(),
+ current_thread_info());
+ if (not_done)
+ debug_handle_irq_context(state);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t debug_signal_irq(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+
+ if (state->pdata->force_irq_ack)
+ state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+ debug_handle_irq_context(state);
+
+ return IRQ_HANDLED;
+}
+
+static void debug_resume(struct fiq_glue_handler *h)
+{
+ struct fiq_debugger_state *state =
+ container_of(h, struct fiq_debugger_state, handler);
+ if (state->pdata->uart_resume)
+ state->pdata->uart_resume(state->pdev);
+}
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *debug_console_device(struct console *co, int *index)
+{
+ struct fiq_debugger_state *state;
+ state = container_of(co, struct fiq_debugger_state, console);
+ *index = 0;
+ return state->tty_driver;
+}
+
+static void debug_console_write(struct console *co,
+ const char *s, unsigned int count)
+{
+ struct fiq_debugger_state *state;
+
+ state = container_of(co, struct fiq_debugger_state, console);
+
+ if (!state->console_enable && !state->syslog_dumping)
+ return;
+
+ debug_uart_enable(state);
+ while (count--) {
+ if (*s == '\n')
+ state->pdata->uart_putc(state->pdev, '\r');
+ state->pdata->uart_putc(state->pdev, *s++);
+ }
+ debug_uart_flush(state);
+ debug_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+ .name = "ttyFIQ",
+ .device = debug_console_device,
+ .write = debug_console_write,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct fiq_debugger_state *state = tty->driver->driver_state;
+ if (state->tty_open_count++)
+ return 0;
+
+ tty->driver_data = state;
+ state->tty = tty;
+ return 0;
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct fiq_debugger_state *state = tty->driver_data;
+ if (--state->tty_open_count)
+ return;
+ state->tty = NULL;
+}
+
+int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int i;
+ struct fiq_debugger_state *state = tty->driver_data;
+
+ if (!state->console_enable)
+ return count;
+
+ debug_uart_enable(state);
+ for (i = 0; i < count; i++)
+ state->pdata->uart_putc(state->pdev, *buf++);
+ debug_uart_disable(state);
+
+ return count;
+}
+
+int fiq_tty_write_room(struct tty_struct *tty)
+{
+ return 1024;
+}
+
+static const struct tty_operations fiq_tty_driver_ops = {
+ .write = fiq_tty_write,
+ .write_room = fiq_tty_write_room,
+ .open = fiq_tty_open,
+ .close = fiq_tty_close,
+};
+
+static int fiq_debugger_tty_init(struct fiq_debugger_state *state)
+{
+ int ret = -EINVAL;
+
+ state->tty_driver = alloc_tty_driver(1);
+ if (!state->tty_driver) {
+ pr_err("Failed to allocate fiq debugger tty\n");
+ return -ENOMEM;
+ }
+
+ state->tty_driver->owner = THIS_MODULE;
+ state->tty_driver->driver_name = "fiq-debugger";
+ state->tty_driver->name = "ttyFIQ";
+ state->tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ state->tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ state->tty_driver->init_termios = tty_std_termios;
+ state->tty_driver->init_termios.c_cflag =
+ B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+ state->tty_driver->init_termios.c_ispeed =
+ state->tty_driver->init_termios.c_ospeed = 115200;
+ state->tty_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(state->tty_driver, &fiq_tty_driver_ops);
+ state->tty_driver->driver_state = state;
+
+ ret = tty_register_driver(state->tty_driver);
+ if (ret) {
+ pr_err("Failed to register fiq tty: %d\n", ret);
+ goto err;
+ }
+
+ state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+ if (!state->tty_rbuf) {
+ pr_err("Failed to allocate fiq debugger ringbuf\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ pr_info("Registered FIQ tty driver %p\n", state->tty_driver);
+ return 0;
+
+err:
+ fiq_debugger_ringbuf_free(state->tty_rbuf);
+ state->tty_rbuf = NULL;
+ put_tty_driver(state->tty_driver);
+ return ret;
+}
+#endif
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+ if (state->pdata->uart_dev_suspend)
+ return state->pdata->uart_dev_suspend(pdev);
+ return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+ if (state->pdata->uart_dev_resume)
+ return state->pdata->uart_dev_resume(pdev);
+ return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fiq_debugger_state *state;
+ int fiq;
+ int uart_irq;
+
+ if (!pdata->uart_getc || !pdata->uart_putc)
+ return -EINVAL;
+ if ((pdata->uart_enable && !pdata->uart_disable) ||
+ (!pdata->uart_enable && pdata->uart_disable))
+ return -EINVAL;
+
+ fiq = platform_get_irq_byname(pdev, "fiq");
+ uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+ /* uart_irq mode and fiq mode are mutually exclusive, but one of them
+ * is required */
+ if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+ return -EINVAL;
+ if (fiq >= 0 && !pdata->fiq_enable)
+ return -EINVAL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ setup_timer(&state->sleep_timer, sleep_timer_expired,
+ (unsigned long)state);
+ state->pdata = pdata;
+ state->pdev = pdev;
+ state->no_sleep = initial_no_sleep;
+ state->debug_enable = initial_debug_enable;
+ state->console_enable = initial_console_enable;
+
+ state->fiq = fiq;
+ state->uart_irq = uart_irq;
+ state->signal_irq = platform_get_irq_byname(pdev, "signal");
+ state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+ platform_set_drvdata(pdev, state);
+
+ spin_lock_init(&state->sleep_timer_lock);
+
+ if (state->wakeup_irq < 0 && debug_have_fiq(state))
+ state->no_sleep = true;
+ state->ignore_next_wakeup_irq = !state->no_sleep;
+
+ wake_lock_init(&state->debugger_wake_lock,
+ WAKE_LOCK_SUSPEND, "serial-debug");
+
+ state->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(state->clk))
+ state->clk = NULL;
+
+ /* do not call pdata->uart_enable here since uart_init may still
+ * need to do some initialization before uart_enable can work.
+ * So, only try to manage the clock during init.
+ */
+ if (state->clk)
+ clk_enable(state->clk);
+
+ if (pdata->uart_init) {
+ ret = pdata->uart_init(pdev);
+ if (ret)
+ goto err_uart_init;
+ }
+
+ debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n",
+ state->no_sleep ? "" : "twice ");
+
+ if (debug_have_fiq(state)) {
+ state->handler.fiq = debug_fiq;
+ state->handler.resume = debug_resume;
+ ret = fiq_glue_register_handler(&state->handler);
+ if (ret) {
+ pr_err("%s: could not install fiq handler\n", __func__);
+ goto err_register_fiq;
+ }
+
+ pdata->fiq_enable(pdev, state->fiq, 1);
+ } else {
+ ret = request_irq(state->uart_irq, debug_uart_irq,
+ IRQF_NO_SUSPEND, "debug", state);
+ if (ret) {
+ pr_err("%s: could not install irq handler\n", __func__);
+ goto err_register_irq;
+ }
+
+ /* for irq-only mode, we want this irq to wake us up, if it
+ * can.
+ */
+ enable_irq_wake(state->uart_irq);
+ }
+
+ if (state->clk)
+ clk_disable(state->clk);
+
+ if (state->signal_irq >= 0) {
+ ret = request_irq(state->signal_irq, debug_signal_irq,
+ IRQF_TRIGGER_RISING, "debug-signal", state);
+ if (ret)
+ pr_err("serial_debugger: could not install signal_irq");
+ }
+
+ if (state->wakeup_irq >= 0) {
+ ret = request_irq(state->wakeup_irq, wakeup_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ "debug-wakeup", state);
+ if (ret) {
+ pr_err("serial_debugger: "
+ "could not install wakeup irq\n");
+ state->wakeup_irq = -1;
+ } else {
+ ret = enable_irq_wake(state->wakeup_irq);
+ if (ret) {
+ pr_err("serial_debugger: "
+ "could not enable wakeup\n");
+ state->wakeup_irq_no_set_wake = true;
+ }
+ }
+ }
+ if (state->no_sleep)
+ handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+ state->console = fiq_debugger_console;
+ register_console(&state->console);
+ fiq_debugger_tty_init(state);
+#endif
+ return 0;
+
+err_register_irq:
+err_register_fiq:
+ if (pdata->uart_free)
+ pdata->uart_free(pdev);
+err_uart_init:
+ if (state->clk)
+ clk_disable(state->clk);
+ if (state->clk)
+ clk_put(state->clk);
+ wake_lock_destroy(&state->debugger_wake_lock);
+ platform_set_drvdata(pdev, NULL);
+ kfree(state);
+ return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+ .suspend = fiq_debugger_dev_suspend,
+ .resume = fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+ .probe = fiq_debugger_probe,
+ .driver = {
+ .name = "fiq_debugger",
+ .pm = &fiq_debugger_dev_pm_ops,
+ },
+};
+
+static int __init fiq_debugger_init(void)
+{
+ return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h
new file mode 100644
index 000000000000..2649b5581088
--- /dev/null
+++ b/arch/arm/common/fiq_debugger_ringbuf.h
@@ -0,0 +1,94 @@
+/*
+ * arch/arm/common/fiq_debugger_ringbuf.c
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+ int len;
+ int head;
+ int tail;
+ u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+ struct fiq_debugger_ringbuf *rbuf;
+
+ rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+ if (rbuf == NULL)
+ return NULL;
+
+ rbuf->len = len;
+ rbuf->head = 0;
+ rbuf->tail = 0;
+ smp_mb();
+
+ return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+ kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+ int level = rbuf->head - rbuf->tail;
+
+ if (level < 0)
+ level = rbuf->len + level;
+
+ return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+ return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+ return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+ count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+ rbuf->tail = (rbuf->tail + count) % rbuf->len;
+ smp_mb();
+
+ return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+ if (fiq_debugger_ringbuf_room(rbuf) == 0)
+ return 0;
+
+ rbuf->buf[rbuf->head] = datum;
+ smp_mb();
+ rbuf->head = (rbuf->head + 1) % rbuf->len;
+ smp_mb();
+
+ return 1;
+}
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644
index 000000000000..9e3455a09f8f
--- /dev/null
+++ b/arch/arm/common/fiq_glue.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+ .global fiq_glue_end
+
+ /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+ /* store pc, cpsr from previous mode */
+ mrs r12, spsr
+ sub r11, lr, #4
+ subs r10, #1
+ bne nested_fiq
+
+ stmfd sp!, {r11-r12, lr}
+
+ /* store r8-r14 from previous mode */
+ sub sp, sp, #(7 * 4)
+ stmia sp, {r8-r14}^
+ nop
+
+ /* store r0-r7 from previous mode */
+ stmfd sp!, {r0-r7}
+
+ /* setup func(data,regs) arguments */
+ mov r0, r9
+ mov r1, sp
+ mov r3, r8
+
+ mov r7, sp
+
+ /* Get sp and lr from non-user modes */
+ and r4, r12, #MODE_MASK
+ cmp r4, #USR_MODE
+ beq fiq_from_usr_mode
+
+ mov r7, sp
+ orr r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+ msr cpsr_c, r4
+ str sp, [r7, #(4 * 13)]
+ str lr, [r7, #(4 * 14)]
+ mrs r5, spsr
+ str r5, [r7, #(4 * 17)]
+
+ cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+ /* use fiq stack if we reenter this mode */
+ subne sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+ msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+ mov r2, sp
+ sub sp, r7, #12
+ stmfd sp!, {r2, ip, lr}
+ /* call func(data,regs) */
+ blx r3
+ ldmfd sp, {r2, ip, lr}
+ mov sp, r2
+
+ /* restore/discard saved state */
+ cmp r4, #USR_MODE
+ beq fiq_from_usr_mode_exit
+
+ msr cpsr_c, r4
+ ldr sp, [r7, #(4 * 13)]
+ ldr lr, [r7, #(4 * 14)]
+ msr spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+ msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+ ldmfd sp!, {r0-r7}
+ add sp, sp, #(7 * 4)
+ ldmfd sp!, {r11-r12, lr}
+exit_fiq:
+ msr spsr_cxsf, r12
+ add r10, #1
+ movs pc, r11
+
+nested_fiq:
+ orr r12, r12, #(PSR_F_BIT)
+ b exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp */
+ mrs r3, cpsr
+ msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+ movs r8, r0
+ mov r9, r1
+ mov sp, r2
+ moveq r10, #0
+ movne r10, #1
+ msr cpsr_c, r3
+ bx lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644
index 000000000000..59586861a636
--- /dev/null
+++ b/arch/arm/common/fiq_glue_setup.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <asm/cpu_pm.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+ .name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+ struct fiq_glue_handler *handler = info;
+ fiq_glue_setup(handler->fiq, handler,
+ __get_cpu_var(fiq_stack) + THREAD_START_SP);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+ int ret;
+ int cpu;
+
+ if (!handler || !handler->fiq)
+ return -EINVAL;
+
+ mutex_lock(&fiq_glue_lock);
+ if (fiq_stack) {
+ ret = -EBUSY;
+ goto err_busy;
+ }
+
+ for_each_possible_cpu(cpu) {
+ void *stack;
+ stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (WARN_ON(!stack)) {
+ ret = -ENOMEM;
+ goto err_alloc_fiq_stack;
+ }
+ per_cpu(fiq_stack, cpu) = stack;
+ }
+
+ ret = claim_fiq(&fiq_debbuger_fiq_handler);
+ if (WARN_ON(ret))
+ goto err_claim_fiq;
+
+ current_handler = handler;
+ on_each_cpu(fiq_glue_setup_helper, handler, true);
+ set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+ mutex_unlock(&fiq_glue_lock);
+ return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+ for_each_possible_cpu(cpu) {
+ __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+ per_cpu(fiq_stack, cpu) = NULL;
+ }
+err_busy:
+ mutex_unlock(&fiq_glue_lock);
+ return ret;
+}
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+ if (!current_handler)
+ return;
+ fiq_glue_setup(current_handler->fiq, current_handler,
+ __get_cpu_var(fiq_stack) + THREAD_START_SP);
+ if (current_handler->resume)
+ current_handler->resume(current_handler);
+}
+
+static int fiq_glue_cpu_pm_notify(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ //pr_info("cpu pm enter %d\n", smp_processor_id());
+ local_fiq_disable();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ fiq_glue_resume();
+ local_fiq_enable();
+ //pr_info("cpu pm exit %d\n", smp_processor_id());
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fiq_glue_cpu_pm_notifier = {
+ .notifier_call = fiq_glue_cpu_pm_notify,
+};
+
+static int __init fiq_glue_cpu_pm_init(void)
+{
+ return cpu_pm_register_notifier(&fiq_glue_cpu_pm_notifier);
+}
+core_initcall(fiq_glue_cpu_pm_init);
+
+#ifdef CONFIG_PM
+static int fiq_glue_syscore_suspend(void)
+{
+ local_fiq_disable();
+ return 0;
+}
+
+static void fiq_glue_syscore_resume(void)
+{
+ fiq_glue_resume();
+ local_fiq_enable();
+}
+
+static struct syscore_ops fiq_glue_syscore_ops = {
+ .suspend = fiq_glue_syscore_suspend,
+ .resume = fiq_glue_syscore_resume,
+};
+
+static int __init fiq_glue_syscore_init(void)
+{
+ register_syscore_ops(&fiq_glue_syscore_ops);
+ return 0;
+}
+late_initcall(fiq_glue_syscore_init);
+#endif
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 3227ca952a12..05cd423c5750 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -29,6 +29,7 @@
#include <linux/cpumask.h>
#include <linux/io.h>
+#include <asm/cpu_pm.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/hardware/gic.h>
@@ -175,6 +176,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
unsigned int shift = (d->irq % 4) * 8;
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
u32 val, mask, bit;
+#ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
+ struct irq_desc *desc = irq_to_desc(d->irq);
+#endif
if (cpu >= 8 || cpu >= nr_cpu_ids)
return -EINVAL;
@@ -184,7 +188,15 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
+ val |= bit;
+#ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
+ if (desc && desc->affinity_hint) {
+ struct cpumask mask_hint;
+ if (cpumask_and(&mask_hint, desc->affinity_hint, mask_val))
+ val |= (*cpumask_bits(&mask_hint) << shift) & mask;
+ }
+#endif
+ writel_relaxed(val, reg);
spin_unlock(&irq_controller_lock);
return IRQ_SET_MASK_OK;
@@ -276,6 +288,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
if (gic_irqs > 1020)
gic_irqs = 1020;
+ gic->gic_irqs = gic_irqs;
+
/*
* Set all global interrupts to be level triggered, active low.
*/
@@ -343,6 +357,180 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_CPU_CTRL);
}
+/*
+ * Saves the GIC distributor registers during suspend or idle. Must be called
+ * with interrupts disabled but before powering down the GIC. After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+static void gic_dist_save(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ void __iomem *dist_base;
+ int i;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data[gic_nr].dist_base;
+
+ if (!dist_base)
+ return;
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ gic_data[gic_nr].saved_spi_conf[i] =
+ readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ gic_data[gic_nr].saved_spi_pri[i] =
+ readl_relaxed(dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ gic_data[gic_nr].saved_spi_target[i] =
+ readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ gic_data[gic_nr].saved_spi_enable[i] =
+ readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ writel_relaxed(0, dist_base + GIC_DIST_CTRL);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle. Must be called before enabling interrupts. If a level interrupt
+ * that occured while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occured will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+static void gic_dist_restore(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ unsigned int i;
+ void __iomem *dist_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data[gic_nr].dist_base;
+
+ if (!dist_base)
+ return;
+
+ writel_relaxed(0, dist_base + GIC_DIST_CTRL);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+ dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_pri[i],
+ dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+ dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+ dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+}
+
+static void gic_cpu_save(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data[gic_nr].dist_base;
+ cpu_base = gic_data[gic_nr].cpu_base;
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_pri);
+ for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_PRI + i * 4);
+}
+
+static void gic_cpu_restore(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data[gic_nr].dist_base;
+ cpu_base = gic_data[gic_nr].cpu_base;
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_pri);
+ for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_PRI + i * 4);
+
+ writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++) {
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ gic_cpu_save(i);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ gic_cpu_restore(i);
+ break;
+ case CPU_COMPLEX_PM_ENTER:
+ gic_dist_save(i);
+ break;
+ case CPU_COMPLEX_PM_ENTER_FAILED:
+ case CPU_COMPLEX_PM_EXIT:
+ gic_dist_restore(i);
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+ .notifier_call = gic_notifier,
+};
+
void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
void __iomem *dist_base, void __iomem *cpu_base)
{
@@ -358,8 +546,23 @@ void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
if (gic_nr == 0)
gic_cpu_base_addr = cpu_base;
+ gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic, irq_start);
gic_cpu_init(gic);
+
+ gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_enable);
+
+ gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_conf);
+
+ gic->saved_ppi_pri = __alloc_percpu(DIV_ROUND_UP(32, 4) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_pri);
+
+ cpu_pm_register_notifier(&gic_notifier_block);
}
void __cpuinit gic_secondary_init(unsigned int gic_nr)
diff --git a/arch/arm/configs/tegra3_android_defconfig b/arch/arm/configs/tegra3_android_defconfig
new file mode 100644
index 000000000000..f3d5155ab344
--- /dev/null
+++ b/arch/arm/configs/tegra3_android_defconfig
@@ -0,0 +1,409 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_ASHMEM=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_MACH_CARDHU=y
+CONFIG_MACH_TEGRA_ENTERPRISE=y
+CONFIG_TEGRA_PWM=y
+CONFIG_TEGRA_EMC_SCALING_ENABLE=y
+CONFIG_TEGRA_CLOCK_DEBUG_WRITE=y
+CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND=y
+CONFIG_USB_HOTPLUG=y
+CONFIG_TEGRA_DYNAMIC_PWRDET=y
+CONFIG_TEGRA_USB_MODEM_POWER=y
+CONFIG_TEGRA_BB_XMM_POWER=y
+CONFIG_TEGRA_BB_XMM_POWER2=m
+CONFIG_TEGRA_PLLM_RESTRICTED=y
+CONFIG_ARM_ERRATA_742230=y
+CONFIG_ARM_ERRATA_743622=y
+CONFIG_ARM_ERRATA_751472=y
+CONFIG_ARM_ERRATA_752520=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_WAKELOCK=y
+CONFIG_PM_RUNTIME=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_BLUESLEEP=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_CAIF=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_MISC_DEVICES=y
+CONFIG_AD525X_DPOT=y
+CONFIG_AD525X_DPOT_I2C=y
+# CONFIG_ANDROID_PMEM is not set
+CONFIG_APDS9802ALS=y
+CONFIG_SENSORS_NCT1008=y
+CONFIG_UID_STAT=y
+CONFIG_BCM4329_RFKILL=y
+CONFIG_TEGRA_CRYPTO_DEV=y
+CONFIG_MAX1749_VIBRATOR=y
+CONFIG_MPU_SENSORS_TIMERIRQ=y
+CONFIG_MPU_SENSORS_KXTF9=y
+CONFIG_MPU_SENSORS_AK8975=y
+CONFIG_TEGRA_BB_SUPPORT=y
+CONFIG_TEGRA_BB_POWER=y
+CONFIG_TEGRA_BB_M7400=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_NETDEV_10000 is not set
+CONFIG_BCM4329=m
+CONFIG_BCM4329_FIRST_SCAN=y
+CONFIG_BCM4329_FW_PATH="/system/vendor/firmware/fw_bcm4329.bin"
+CONFIG_BCM4329_NVRAM_PATH="/system/etc/nvram.txt"
+CONFIG_BCM4329_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD=m
+CONFIG_BCMDHD_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD_HW_OOB=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC95XX=y
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_USB_NET_RAW_IP=m
+CONFIG_PPP=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_INTERRUPT=y
+CONFIG_KEYBOARD_TEGRA=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_TEGRA=y
+CONFIG_SPI=y
+CONFIG_SPI_TEGRA=y
+CONFIG_SPI_SLAVE_TEGRA=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_BQ20Z75=y
+CONFIG_BATTERY_BQ27x00=y
+CONFIG_CHARGER_TPS8003X=y
+CONFIG_BATTERY_GAUGE_TPS8003X=y
+CONFIG_CHARGER_GPIO=y
+CONFIG_SENSORS_TEGRA_TSENSOR=y
+CONFIG_SENSORS_INA219=y
+CONFIG_THERMAL=y
+CONFIG_MFD_MAX77663=y
+CONFIG_MFD_TPS6586X=y
+CONFIG_MFD_TPS6591X=y
+CONFIG_MFD_TPS80031=y
+CONFIG_GPADC_TPS80031=y
+CONFIG_MFD_RICOH583=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_MAX77663=y
+CONFIG_REGULATOR_TPS6586X=y
+CONFIG_REGULATOR_TPS6591X=y
+CONFIG_REGULATOR_TPS6236X=y
+CONFIG_REGULATOR_TPS80031=y
+CONFIG_REGULATOR_RICOH583=y
+CONFIG_REGULATOR_GPIO_SWITCH=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_RC_CORE is not set
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+# CONFIG_TEGRA_AVP is not set
+# CONFIG_TEGRA_MEDIASERVER is not set
+CONFIG_TEGRA_NVAVP=y
+CONFIG_VIDEO_OV5650=y
+CONFIG_VIDEO_OV9726=y
+CONFIG_VIDEO_OV2710=y
+CONFIG_VIDEO_AR0832=y
+CONFIG_TORCH_SSL3250A=y
+CONFIG_TORCH_TPS61050=y
+CONFIG_VIDEO_SH532U=y
+CONFIG_USB_VIDEO_CLASS=y
+# CONFIG_USB_GSPCA is not set
+# CONFIG_RADIO_ADAPTERS is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_GRHOST=y
+CONFIG_TEGRA_DC=y
+CONFIG_TEGRA_DSI=y
+CONFIG_TEGRA_NVHDCP=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_TEGRA_PWM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_PLATFORM_DRIVER=y
+CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA=y
+CONFIG_SND_HDA_POWER_SAVE=y
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=10
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TEGRA=y
+CONFIG_SND_SOC_TEGRA_WM8903=y
+CONFIG_SND_SOC_TEGRA_MAX98088=y
+CONFIG_HID_SONY=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_WDM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_SERIAL_OPTION=y
+CONFIG_USB_SERIAL_BASEBAND=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_FSL_USB2=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_USB_TEGRA_OTG=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_NFC_DEVICES=y
+CONFIG_PN544_NFC=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77663=y
+CONFIG_RTC_DRV_TPS6586X=y
+CONFIG_RTC_DRV_TPS6591x=y
+CONFIG_RTC_DRV_TPS80031=y
+CONFIG_RTC_DRV_RC5T583=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_IIO=y
+CONFIG_SENSORS_ISL29028=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_TEGRA_SE=y
diff --git a/arch/arm/configs/tegra3_defconfig b/arch/arm/configs/tegra3_defconfig
new file mode 100644
index 000000000000..0a4b42a6ad6d
--- /dev/null
+++ b/arch/arm/configs/tegra3_defconfig
@@ -0,0 +1,428 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_ELF_CORE is not set
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_TEGRA_PCI=y
+CONFIG_MACH_CARDHU=y
+CONFIG_MACH_TEGRA_ENTERPRISE=y
+CONFIG_TEGRA_PWM=y
+CONFIG_TEGRA_EMC_SCALING_ENABLE=y
+CONFIG_TEGRA_CLOCK_DEBUG_WRITE=y
+CONFIG_USB_HOTPLUG=y
+CONFIG_TEGRA_DYNAMIC_PWRDET=y
+CONFIG_TEGRA_USB_MODEM_POWER=y
+CONFIG_TEGRA_BB_XMM_POWER=y
+CONFIG_TEGRA_BB_XMM_POWER2=m
+CONFIG_TEGRA_PLLM_RESTRICTED=y
+CONFIG_ARM_ERRATA_742230=y
+CONFIG_ARM_ERRATA_743622=y
+CONFIG_ARM_ERRATA_751472=y
+CONFIG_ARM_ERRATA_752520=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_CUBIC=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_BLUESLEEP=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_CAIF=y
+CONFIG_NFC=y
+CONFIG_PN544_NFC=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_MISC_DEVICES=y
+CONFIG_AD525X_DPOT=y
+CONFIG_AD525X_DPOT_I2C=y
+# CONFIG_ANDROID_PMEM is not set
+CONFIG_ICS932S401=y
+CONFIG_APDS9802ALS=y
+CONFIG_ISL29003=y
+CONFIG_SENSORS_AK8975=y
+CONFIG_SENSORS_NCT1008=y
+CONFIG_UID_STAT=y
+CONFIG_BCM4329_RFKILL=y
+CONFIG_TEGRA_CRYPTO_DEV=y
+CONFIG_MPU_SENSORS_TIMERIRQ=y
+CONFIG_MPU_SENSORS_KXTF9=y
+CONFIG_MPU_SENSORS_AK8975=y
+CONFIG_TEGRA_BB_SUPPORT=y
+CONFIG_TEGRA_BB_POWER=y
+CONFIG_TEGRA_BB_M7400=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_R8169=y
+# CONFIG_NETDEV_10000 is not set
+CONFIG_BCM4329=m
+CONFIG_BCM4329_FIRST_SCAN=y
+CONFIG_BCM4329_FW_PATH="/lib/firmware/bcm4329/fw_bcm4329.bin"
+CONFIG_BCM4329_NVRAM_PATH="/lib/firmware/bcm4329/nvram.txt"
+CONFIG_BCM4329_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD=m
+CONFIG_BCMDHD_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD_HW_OOB=y
+CONFIG_USB_CATC=y
+CONFIG_USB_KAWETH=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+# CONFIG_USB_NET_NET1080 is not set
+CONFIG_USB_NET_MCS7830=y
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_USB_NET_RAW_IP=m
+CONFIG_PPP=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_INTERRUPT=y
+CONFIG_KEYBOARD_TEGRA=y
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_ALPS_GPIO_SCROLLWHEEL=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_TEGRA=y
+CONFIG_SPI=y
+CONFIG_SPI_TEGRA=y
+CONFIG_SPI_SLAVE_TEGRA=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_BQ20Z75=y
+CONFIG_BATTERY_BQ27x00=y
+CONFIG_CHARGER_TPS8003X=y
+CONFIG_BATTERY_GAUGE_TPS8003X=y
+CONFIG_CHARGER_GPIO=y
+CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_TEGRA_TSENSOR=y
+CONFIG_SENSORS_INA219=y
+CONFIG_THERMAL=y
+CONFIG_MFD_TPS6586X=y
+CONFIG_MFD_MAX8907C=y
+CONFIG_MFD_MAX77663=y
+CONFIG_MFD_TPS6591X=y
+CONFIG_MFD_TPS80031=y
+CONFIG_GPADC_TPS80031=y
+CONFIG_MFD_RICOH583=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_MAX8907C=y
+CONFIG_REGULATOR_MAX77663=y
+CONFIG_REGULATOR_TPS6586X=y
+CONFIG_REGULATOR_TPS6591X=y
+CONFIG_REGULATOR_TPS6236X=y
+CONFIG_REGULATOR_TPS80031=y
+CONFIG_REGULATOR_RICOH583=y
+CONFIG_REGULATOR_GPIO_SWITCH=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+# CONFIG_TEGRA_AVP is not set
+# CONFIG_TEGRA_MEDIASERVER is not set
+CONFIG_TEGRA_NVAVP=y
+CONFIG_VIDEO_OV5650=y
+CONFIG_VIDEO_OV9726=y
+CONFIG_VIDEO_OV2710=y
+CONFIG_VIDEO_AR0832=y
+CONFIG_TORCH_SSL3250A=y
+CONFIG_TORCH_TPS61050=y
+CONFIG_VIDEO_SH532U=y
+CONFIG_USB_VIDEO_CLASS=y
+# CONFIG_RADIO_ADAPTERS is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_GRHOST=y
+CONFIG_TEGRA_DC=y
+CONFIG_TEGRA_DSI=y
+CONFIG_TEGRA_NVHDCP=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_TEGRA_PWM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_PLATFORM_DRIVER=y
+CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA=y
+CONFIG_SND_HDA_POWER_SAVE=y
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=10
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TEGRA=y
+CONFIG_SND_SOC_TEGRA_WM8903=y
+CONFIG_SND_SOC_TEGRA_MAX98088=y
+CONFIG_HID_SONY=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_WDM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_SERIAL_OPTION=y
+CONFIG_USB_SERIAL_BASEBAND=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_FSL_USB2=y
+CONFIG_USB_TEGRA_OTG=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77663=y
+CONFIG_RTC_DRV_TPS6586X=y
+CONFIG_RTC_DRV_TPS6591x=y
+CONFIG_RTC_DRV_TPS80031=y
+CONFIG_RTC_DRV_RC5T583=y
+CONFIG_STAGING=y
+CONFIG_IIO=y
+CONFIG_SENSORS_ISL29028=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=m
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_SG=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_TEGRA_SE=y
diff --git a/arch/arm/configs/tegra_android_defconfig b/arch/arm/configs/tegra_android_defconfig
new file mode 100644
index 000000000000..f3325e991683
--- /dev/null
+++ b/arch/arm/configs/tegra_android_defconfig
@@ -0,0 +1,374 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_ASHMEM=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_MACH_HARMONY=y
+CONFIG_MACH_VENTANA=y
+CONFIG_MACH_KAEN=y
+CONFIG_MACH_PAZ00=y
+CONFIG_MACH_TRIMSLICE=y
+CONFIG_MACH_WARIO=y
+CONFIG_MACH_WHISTLER=y
+CONFIG_TEGRA_PWM=y
+CONFIG_TEGRA_EMC_SCALING_ENABLE=y
+CONFIG_TEGRA_CLOCK_DEBUG_WRITE=y
+CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND=y
+CONFIG_USB_HOTPLUG=y
+CONFIG_TEGRA_USB_MODEM_POWER=y
+CONFIG_ARM_ERRATA_720789=y
+CONFIG_ARM_ERRATA_751472=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_WAKELOCK=y
+CONFIG_PM_RUNTIME=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_BLUESLEEP=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_MISC_DEVICES=y
+CONFIG_AD525X_DPOT=y
+CONFIG_AD525X_DPOT_I2C=y
+# CONFIG_ANDROID_PMEM is not set
+CONFIG_APDS9802ALS=y
+CONFIG_UID_STAT=y
+CONFIG_BCM4329_RFKILL=y
+CONFIG_TEGRA_CRYPTO_DEV=y
+CONFIG_MAX1749_VIBRATOR=y
+CONFIG_MPU_SENSORS_TIMERIRQ=y
+CONFIG_MPU_SENSORS_KXTF9=y
+CONFIG_MPU_SENSORS_AK8975=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_R8169=y
+# CONFIG_NETDEV_10000 is not set
+CONFIG_BCM4329=m
+CONFIG_BCM4329_FIRST_SCAN=y
+CONFIG_BCM4329_FW_PATH="/system/vendor/firmware/fw_bcm4329.bin"
+CONFIG_BCM4329_NVRAM_PATH="/system/etc/nvram.txt"
+CONFIG_BCM4329_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD=y
+CONFIG_BCMDHD_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD_HW_OOB=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_PPP=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_TEGRA=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_PANJIT_I2C=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_ADXL34X=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_TEGRA=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_BQ20Z75=y
+CONFIG_CHARGER_GPIO=y
+CONFIG_SENSORS_ADT7461=y
+CONFIG_MFD_TPS6586X=y
+CONFIG_MFD_MAX8907C=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_MAX8907C=y
+CONFIG_REGULATOR_TPS6586X=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_RC_CORE is not set
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_OV5650=y
+CONFIG_VIDEO_OV2710=y
+CONFIG_VIDEO_SOC380=y
+CONFIG_TORCH_SSL3250A=y
+CONFIG_VIDEO_SH532U=y
+CONFIG_VIDEO_AD5820=y
+CONFIG_USB_VIDEO_CLASS=y
+# CONFIG_USB_GSPCA is not set
+# CONFIG_RADIO_ADAPTERS is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_GRHOST=y
+CONFIG_TEGRA_DC=y
+CONFIG_TEGRA_DSI=y
+CONFIG_TEGRA_NVHDCP=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_TEGRA_PWM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TEGRA=y
+CONFIG_SND_SOC_TEGRA_WM8903=y
+CONFIG_SND_SOC_TEGRA_WM8753=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_ACM=y
+CONFIG_USB_WDM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_SERIAL_OPTION=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_FSL_USB2=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_USB_TEGRA_OTG=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX8907C=y
+CONFIG_RTC_DRV_TPS6586X=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_IIO=y
+CONFIG_SENSORS_ISL29018=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_SG=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_TEGRA_AES=y
diff --git a/arch/arm/configs/tegra_aruba2_android_defconfig b/arch/arm/configs/tegra_aruba2_android_defconfig
new file mode 100644
index 000000000000..9665b2ebf516
--- /dev/null
+++ b/arch/arm/configs/tegra_aruba2_android_defconfig
@@ -0,0 +1,311 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_CROSS_COMPILE="arm-eabi-"
+# CONFIG_SWAP is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_ASHMEM=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_MACH_ARUBA=y
+CONFIG_TEGRA_DEBUG_UARTA=y
+CONFIG_TEGRA_PWM=y
+CONFIG_TEGRA_CLOCK_DEBUG_WRITE=y
+CONFIG_TEGRA_MC_PROFILE=y
+CONFIG_ARM_ERRATA_743622=y
+CONFIG_ARM_ERRATA_751472=y
+CONFIG_ARM_ERRATA_752520=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=448@2048M console=ttyS0,115200n8 earlyprintk init=/bin/ash"
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_RFKILL=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND_TEGRA=y
+CONFIG_MTD_NAND=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_MISC_DEVICES=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMC91X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_PPP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_TEGRA=y
+CONFIG_SPI=y
+CONFIG_SPI_TEGRA=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PDA_POWER=y
+CONFIG_BATTERY_BQ20Z75=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_DUMMY=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_MEDIA_TUNER_SIMPLE is not set
+# CONFIG_MEDIA_TUNER_TDA8290 is not set
+# CONFIG_MEDIA_TUNER_TDA827X is not set
+# CONFIG_MEDIA_TUNER_TDA18271 is not set
+# CONFIG_MEDIA_TUNER_TDA9887 is not set
+# CONFIG_MEDIA_TUNER_TEA5761 is not set
+# CONFIG_MEDIA_TUNER_TEA5767 is not set
+# CONFIG_MEDIA_TUNER_MT20XX is not set
+# CONFIG_MEDIA_TUNER_MT2060 is not set
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MT2131 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_XC2028 is not set
+# CONFIG_MEDIA_TUNER_XC5000 is not set
+# CONFIG_MEDIA_TUNER_MXL5005S is not set
+# CONFIG_MEDIA_TUNER_MXL5007T is not set
+# CONFIG_MEDIA_TUNER_MC44S803 is not set
+# CONFIG_MEDIA_TUNER_MAX2165 is not set
+# CONFIG_V4L_USB_DRIVERS is not set
+# CONFIG_RADIO_ADAPTERS is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_GRHOST=y
+CONFIG_TEGRA_DC=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TEGRA=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_ACM=y
+CONFIG_USB_WDM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_ANDROID=y
+CONFIG_USB_ANDROID_ADB=y
+CONFIG_USB_ANDROID_MTP=y
+CONFIG_USB_TEGRA_OTG=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_IIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_ICEDCC=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/tegra_cardhu_mods_defconfig b/arch/arm/configs/tegra_cardhu_mods_defconfig
new file mode 100644
index 000000000000..265af9744379
--- /dev/null
+++ b/arch/arm/configs/tegra_cardhu_mods_defconfig
@@ -0,0 +1,149 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_CROSS_COMPILE="arm-eabi-"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_ASHMEM=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_MACH_CARDHU=y
+CONFIG_TEGRA_PWM=y
+# CONFIG_TEGRA_CPU_DVFS is not set
+# CONFIG_TEGRA_IOVMM_SMMU is not set
+CONFIG_TEGRA_CLOCK_DEBUG_WRITE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=448@2048M console=ttyS0,115200n8 earlyprintk init=/bin/ash"
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
+CONFIG_SUSPEND_TIME=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_TEGRA=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_BQ20Z75=y
+# CONFIG_HWMON is not set
+CONFIG_MFD_TPS6591X=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_TPS6591X=y
+CONFIG_REGULATOR_TPS6236X=y
+CONFIG_REGULATOR_GPIO_SWITCH=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_GRHOST=y
+CONFIG_TEGRA_DC=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_TEGRA_PWM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_SWITCH=y
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_IIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
+CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 8845f1c9925d..b73d45cb6ed5 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -1,4 +1,5 @@
CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -9,10 +10,13 @@ CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS_ALL=y
# CONFIG_ELF_CORE is not set
+CONFIG_EMBEDDED=y
CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -20,13 +24,21 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_TEGRA=y
+CONFIG_GPIO_PCA953X=y
CONFIG_MACH_HARMONY=y
+CONFIG_MACH_VENTANA=y
CONFIG_MACH_KAEN=y
CONFIG_MACH_PAZ00=y
CONFIG_MACH_TRIMSLICE=y
CONFIG_MACH_WARIO=y
-CONFIG_TEGRA_DEBUG_UARTD=y
-CONFIG_ARM_ERRATA_742230=y
+CONFIG_MACH_WHISTLER=y
+CONFIG_TEGRA_PWM=y
+CONFIG_TEGRA_EMC_SCALING_ENABLE=y
+CONFIG_TEGRA_CLOCK_DEBUG_WRITE=y
+CONFIG_USB_HOTPLUG=y
+CONFIG_TEGRA_USB_MODEM_POWER=y
+CONFIG_ARM_ERRATA_720789=y
+CONFIG_ARM_ERRATA_751472=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
@@ -37,22 +49,38 @@ CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
CONFIG_VFP=y
-CONFIG_PM=y
+CONFIG_PM_RUNTIME=y
+CONFIG_SUSPEND_TIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
+CONFIG_INET_AH=m
CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_CUBIC=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_ILLINOIS=m
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
@@ -63,46 +91,255 @@ CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_TUNNEL=y
CONFIG_IPV6_MULTIPLE_TABLES=y
-# CONFIG_WIRELESS is not set
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_BLUESLEEP=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND_TEGRA=y
+CONFIG_MTD_NAND=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_MISC_DEVICES=y
CONFIG_AD525X_DPOT=y
CONFIG_AD525X_DPOT_I2C=y
-CONFIG_ICS932S401=y
+# CONFIG_ANDROID_PMEM is not set
CONFIG_APDS9802ALS=y
CONFIG_ISL29003=y
+CONFIG_SENSORS_AK8975=y
+CONFIG_UID_STAT=y
+CONFIG_BCM4329_RFKILL=y
+CONFIG_TEGRA_CRYPTO_DEV=y
+CONFIG_MPU_SENSORS_TIMERIRQ=y
+CONFIG_MPU_SENSORS_KXTF9=y
+CONFIG_MPU_SENSORS_AK8975=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_R8169=y
# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
+CONFIG_BCM4329=m
+CONFIG_BCM4329_FIRST_SCAN=y
+CONFIG_BCM4329_FW_PATH="/lib/firmware/bcm4329/fw_bcm4329.bin"
+CONFIG_BCM4329_NVRAM_PATH="/lib/firmware/bcm4329/nvram.txt"
+CONFIG_BCM4329_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD=y
+CONFIG_BCMDHD_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD_HW_OOB=y
+CONFIG_USB_CATC=y
+CONFIG_USB_KAWETH=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+# CONFIG_USB_NET_NET1080 is not set
+CONFIG_USB_NET_MCS7830=y
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_PPP=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_TEGRA=y
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_PANJIT_I2C=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_ADXL34X=y
+CONFIG_INPUT_ALPS_GPIO_SCROLLWHEEL=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_TEGRA=y
# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-# CONFIG_I2C_COMPAT is not set
-# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
CONFIG_I2C_TEGRA=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_BQ20Z75=y
+CONFIG_CHARGER_GPIO=y
+CONFIG_SENSORS_ADT7461=y
CONFIG_SENSORS_LM90=y
CONFIG_MFD_TPS6586X=y
+CONFIG_MFD_MAX8907C=y
CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_MAX8907C=y
CONFIG_REGULATOR_TPS6586X=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_OV5650=y
+CONFIG_VIDEO_OV2710=y
+CONFIG_VIDEO_SOC380=y
+CONFIG_TORCH_SSL3250A=y
+CONFIG_VIDEO_SH532U=y
+CONFIG_VIDEO_AD5820=y
+CONFIG_USB_VIDEO_CLASS=y
+# CONFIG_RADIO_ADAPTERS is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_GRHOST=y
+CONFIG_TEGRA_DC=y
+CONFIG_TEGRA_DSI=y
+CONFIG_TEGRA_NVHDCP=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_TEGRA_PWM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TEGRA=y
+CONFIG_SND_SOC_TEGRA_WM8903=y
+CONFIG_SND_SOC_TEGRA_WM8753=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_ACM=y
+CONFIG_USB_WDM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_SERIAL_OPTION=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_FSL_USB2=y
+CONFIG_USB_TEGRA_OTG=y
CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX8907C=y
+CONFIG_RTC_DRV_TPS6586X=y
+CONFIG_RTC_DRV_TEGRA=y
CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
CONFIG_IIO=y
CONFIG_SENSORS_ISL29018=y
-CONFIG_SENSORS_AK8975=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -111,36 +348,48 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
CONFIG_TMPFS=y
+CONFIG_YAFFS_FS=y
+CONFIG_YAFFS_DISABLE_TAGS_ECC=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
CONFIG_PARTITION_ADVANCED=y
CONFIG_EFI_PARTITION=y
CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=m
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
+CONFIG_LOCKUP_DETECTOR=y
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_SLAB=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_SG=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_AES=y
-CONFIG_CRYPTO_ARC4=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_TWOFISH=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
+CONFIG_CRYPTO_DEV_TEGRA_AES=y
diff --git a/arch/arm/configs/tegra_p852_gnu_linux_defconfig b/arch/arm/configs/tegra_p852_gnu_linux_defconfig
new file mode 100644
index 000000000000..e393d437365f
--- /dev/null
+++ b/arch/arm/configs/tegra_p852_gnu_linux_defconfig
@@ -0,0 +1,303 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_CROSS_COMPILE="arm-eabi-"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_PANIC_TIMEOUT=10
+CONFIG_ASHMEM=y
+CONFIG_EMBEDDED=y
+CONFIG_ASHMEM=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_MACH_P852=y
+CONFIG_TEGRA_PWM=y
+# CONFIG_TEGRA_CPU_DVFS is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_VMSPLIT_2G=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=448M@0M console=ttyS0,115200n8 earlyprintk init=/bin/ash"
+CONFIG_VFP=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_RFKILL=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_NAND_TEGRA=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMC91X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_BCM4329_WIFI_CONTROL_FUNC=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC95XX=y
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_PPP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_TEGRA=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_TEGRA=y
+CONFIG_SPI=y
+CONFIG_SPI_TEGRA=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_MFD_TPS6586X=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_DUMMY=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_TPS6586X=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+# CONFIG_RC_MAP is not set
+# CONFIG_IR_NEC_DECODER is not set
+# CONFIG_IR_RC5_DECODER is not set
+# CONFIG_IR_RC6_DECODER is not set
+# CONFIG_IR_JVC_DECODER is not set
+# CONFIG_IR_SONY_DECODER is not set
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+# CONFIG_TEGRA_CAMERA is not set
+CONFIG_USB_VIDEO_CLASS=y
+# CONFIG_USB_GSPCA is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_DC=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_HIDRAW=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_ACM=y
+CONFIG_USB_WDM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_TEGRA_OTG=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_TPS6586X=y
+CONFIG_DMADEVICES=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c72682..f91a748d0736 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(vma,start,end) \
+#define flush_cache_user_range(start,end) \
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
/*
@@ -344,4 +344,53 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
flush_cache_all();
}
+/*
+ * The set_memory_* API can be used to change various attributes of a virtual
+ * address range. The attributes include:
+ * Cachability : UnCached, WriteCombining, WriteBack
+ * Executability : eXeutable, NoteXecutable
+ * Read/Write : ReadOnly, ReadWrite
+ * Presence : NotPresent
+ *
+ * Within a catagory, the attributes are mutually exclusive.
+ *
+ * The implementation of this API will take care of various aspects that
+ * are associated with changing such attributes, such as:
+ * - Flushing TLBs
+ * - Flushing CPU caches
+ * - Making sure aliases of the memory behind the mapping don't violate
+ * coherency rules as defined by the CPU in the system.
+ *
+ * What this API does not do:
+ * - Provide exclusion between various callers - including callers that
+ * operation on other mappings of the same physical page
+ * - Restore default attributes when a page is freed
+ * - Guarantee that mappings other than the requested one are
+ * in any state, other than that these do not violate rules for
+ * the CPU you have. Do not depend on any effects on other mappings,
+ * CPUs other than the one you have may have more relaxed rules.
+ * The caller is required to take care of these.
+ */
+
+int set_memory_uc(unsigned long addr, int numpages);
+int set_memory_wc(unsigned long addr, int numpages);
+int set_memory_wb(unsigned long addr, int numpages);
+int set_memory_iwb(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_np(unsigned long addr, int numpages);
+int set_memory_4k(unsigned long addr, int numpages);
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
+int set_memory_array_wb(unsigned long *addr, int addrinarray);
+int set_memory_array_iwb(unsigned long *addr, int addrinarray);
+
+int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
+int set_pages_array_wb(struct page **pages, int addrinarray);
+int set_pages_array_iwb(struct page **pages, int addrinarray);
+
#endif
diff --git a/arch/arm/include/asm/cpu_pm.h b/arch/arm/include/asm/cpu_pm.h
new file mode 100644
index 000000000000..07b1b6ec025c
--- /dev/null
+++ b/arch/arm/include/asm/cpu_pm.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ASMARM_CPU_PM_H
+#define _ASMARM_CPU_PM_H
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+/*
+ * When a CPU goes to a low power state that turns off power to the CPU's
+ * power domain, the contents of some blocks (floating point coprocessors,
+ * interrutp controllers, caches, timers) in the same power domain can
+ * be lost. The cpm_pm notifiers provide a method for platform idle, suspend,
+ * and hotplug implementations to notify the drivers for these blocks that
+ * they may be reset.
+ *
+ * All cpu_pm notifications must be called with interrupts disabled.
+ *
+ * The notifications are split into two classes, CPU notifications and CPU
+ * complex notifications.
+ *
+ * CPU notifications apply to a single CPU, and must be called on the affected
+ * CPU. They are used to save per-cpu context for affected blocks.
+ *
+ * CPU complex notifications apply to all CPUs in a single power domain. They
+ * are used to save any global context for affected blocks, and must be called
+ * after all the CPUs in the power domain have been notified of the low power
+ * state.
+ *
+ */
+
+/*
+ * Event codes passed as unsigned long val to notifier calls
+ */
+enum cpu_pm_event {
+ /* A single cpu is entering a low power state */
+ CPU_PM_ENTER,
+
+ /* A single cpu failed to enter a low power state */
+ CPU_PM_ENTER_FAILED,
+
+ /* A single cpu is exiting a low power state */
+ CPU_PM_EXIT,
+
+ /* A cpu power domain is entering a low power state */
+ CPU_COMPLEX_PM_ENTER,
+
+ /* A cpu power domain failed to enter a low power state */
+ CPU_COMPLEX_PM_ENTER_FAILED,
+
+ /* A cpu power domain is exiting a low power state */
+ CPU_COMPLEX_PM_EXIT,
+};
+
+int cpu_pm_register_notifier(struct notifier_block *nb);
+int cpu_pm_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * cpm_pm_enter
+ *
+ * Notifies listeners that a single cpu is entering a low power state that may
+ * cause some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called on the affected cpu with interrupts disabled. Platform is
+ * responsible for ensuring that cpu_pm_enter is not called twice on the same
+ * cpu before cpu_pm_exit is called.
+ */
+int cpu_pm_enter(void);
+
+/*
+ * cpm_pm_exit
+ *
+ * Notifies listeners that a single cpu is exiting a low power state that may
+ * have caused some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called on the affected cpu with interrupts disabled.
+ */
+int cpu_pm_exit(void);
+
+/*
+ * cpm_complex_pm_enter
+ *
+ * Notifies listeners that all cpus in a power domain are entering a low power
+ * state that may cause some blocks in the same power domain to reset.
+ *
+ * Must be called after cpu_pm_enter has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain.
+ *
+ * Must be called with interrupts disabled.
+ */
+int cpu_complex_pm_enter(void);
+
+/*
+ * cpm_pm_enter
+ *
+ * Notifies listeners that a single cpu is entering a low power state that may
+ * cause some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called after cpu_pm_enter has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain.
+ *
+ * Must be called with interrupts disabled.
+ */
+int cpu_complex_pm_exit(void);
+
+#endif
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b2deda181549..57f1fa0e983b 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -8,6 +8,9 @@
#include <asm/param.h> /* HZ */
+#ifdef CONFIG_ARCH_PROVIDES_UDELAY
+#include <mach/delay.h>
+#else
extern void __delay(int loops);
/*
@@ -40,5 +43,6 @@ extern void __const_udelay(unsigned long);
__const_udelay((n) * ((2199023U*HZ)>>11))) : \
__udelay(n))
+#endif /* defined(ARCH_PROVIDES_UDELAY) */
#endif /* defined(_ARM_DELAY_H) */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 0e9ce8d9686e..0691cdce48e8 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -52,6 +52,7 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_ABS32 2
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
+#define R_ARM_TARGET1 38
#define R_ARM_V4BX 40
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
diff --git a/arch/arm/include/asm/fiq_debugger.h b/arch/arm/include/asm/fiq_debugger.h
new file mode 100644
index 000000000000..4d274883ba6a
--- /dev/null
+++ b/arch/arm/include/asm/fiq_debugger.h
@@ -0,0 +1,64 @@
+/*
+ * arch/arm/include/asm/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume: used to restore uart state right before enabling
+ * the fiq.
+ * @uart_enable: Do the work necessary to communicate with the uart
+ * hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable: Do the work necessary to disable the uart hw
+ * (disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend: called during PM suspend, generally not needed
+ * for real fiq mode debugger.
+ * @uart_dev_resume: called during PM resume, generally not needed
+ * for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+ int (*uart_init)(struct platform_device *pdev);
+ void (*uart_free)(struct platform_device *pdev);
+ int (*uart_resume)(struct platform_device *pdev);
+ int (*uart_getc)(struct platform_device *pdev);
+ void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+ void (*uart_flush)(struct platform_device *pdev);
+ void (*uart_enable)(struct platform_device *pdev);
+ void (*uart_disable)(struct platform_device *pdev);
+
+ int (*uart_dev_suspend)(struct platform_device *pdev);
+ int (*uart_dev_resume)(struct platform_device *pdev);
+
+ void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+ bool enable);
+ void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+ void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+ void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644
index 000000000000..d54c29db97a8
--- /dev/null
+++ b/arch/arm/include/asm/fiq_glue.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+ void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+ void (*resume)(struct fiq_glue_handler *h);
+};
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 89ad1805e579..2635c8b5bf59 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 5
+#define NR_IPI 6
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 99a6ed7e1bfd..fd04f24055fd 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -62,6 +62,7 @@
#define L2X0_STNDBY_MODE_EN (1 << 0)
/* Registers shifts and masks */
+#define L2X0_CACHE_ID_REV_MASK (0x3f)
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
@@ -77,8 +78,11 @@
#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
+#define REV_PL310_R2P0 4
+
#ifndef __ASSEMBLY__
-extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
+extern void l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
+extern void l2x0_enable(void);
#endif
#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 7ecd793b8f5a..6643d6c4f35e 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -17,15 +17,17 @@
#define TRACER_ACCESSED_BIT 0
#define TRACER_RUNNING_BIT 1
#define TRACER_CYCLE_ACC_BIT 2
+#define TRACER_TRACE_DATA_BIT 3
#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
+#define TRACER_TRACE_DATA BIT(TRACER_TRACE_DATA_BIT)
#define TRACER_TIMEOUT 10000
-#define etm_writel(t, v, x) \
- (__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+#define etm_writel(t, id, v, x) \
+ (__raw_writel((v), (t)->etm_regs[(id)] + (x)))
+#define etm_readl(t, id, x) (__raw_readl((t)->etm_regs[(id)] + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
@@ -113,11 +115,19 @@
#define ETMR_TRACEENCTRL 0x24
#define ETMTE_INCLEXCL BIT(24)
#define ETMR_TRACEENEVT 0x20
+
+#define ETMR_VIEWDATAEVT 0x30
+#define ETMR_VIEWDATACTRL1 0x34
+#define ETMR_VIEWDATACTRL2 0x38
+#define ETMR_VIEWDATACTRL3 0x3c
+#define ETMVDC3_EXCLONLY BIT(16)
+
#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
- ETMCTRL_DATA_DO_ADDR | \
ETMCTRL_BRANCH_OUTPUT | \
ETMCTRL_DO_CONTEXTID)
+#define ETMR_TRACEIDR 0x200
+
/* ETM management registers, "ETM Architecture", 3.5.24 */
#define ETMMR_OSLAR 0x300
#define ETMMR_OSLSR 0x304
@@ -140,14 +150,16 @@
#define ETBFF_TRIGIN BIT(8)
#define ETBFF_TRIGEVT BIT(9)
#define ETBFF_TRIGFL BIT(10)
+#define ETBFF_STOPFL BIT(12)
#define etb_writel(t, v, x) \
(__raw_writel((v), (t)->etb_regs + (x)))
#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
- do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+#define etm_lock(t, id) \
+ do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
+#define etm_unlock(t, id) \
+ do { etm_writel((t), (id), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etb_unlock(t) \
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 435d3f86c708..67d28c76365a 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -46,6 +46,15 @@ struct gic_chip_data {
unsigned int irq_offset;
void __iomem *dist_base;
void __iomem *cpu_base;
+ u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+ u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+ u32 saved_spi_pri[DIV_ROUND_UP(1020, 4)];
+ u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+ u32 __percpu *saved_ppi_enable;
+ u32 __percpu *saved_ppi_conf;
+ u32 __percpu *saved_ppi_pri;
+
+ unsigned int gic_irqs;
};
#endif
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 5a526afb5f18..a5656333d574 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -26,6 +26,9 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
#endif
#endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
new file mode 100644
index 000000000000..bca864ac945f
--- /dev/null
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+ struct sdio_cis cis;
+ struct sdio_cccr cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+};
+
+struct mmc_platform_data {
+ unsigned int ocr_mask; /* available voltages */
+ int built_in; /* built-in device flag */
+ int card_present; /* card detect state */
+ u32 (*translate_vdd)(struct device *, unsigned int);
+ unsigned int (*status)(struct device *);
+ struct embedded_sdio_data *embedded_sdio;
+ int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+};
+
+#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index ac75d0848889..c906a2534c88 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -201,6 +201,8 @@ typedef struct page *pgtable_t;
extern int pfn_valid(unsigned long);
#endif
+extern phys_addr_t lowmem_limit;
+
#include <asm/memory.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 22de005f159c..9a8099ed3ade 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -64,8 +64,10 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
pte_t *pte;
pte = (pte_t *)__get_free_page(PGALLOC_GFP);
+#if !defined(CONFIG_CPU_CACHE_V7) || !defined(CONFIG_SMP)
if (pte)
clean_pte_table(pte);
+#endif
return pte;
}
@@ -81,8 +83,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
pte = alloc_pages(PGALLOC_GFP, 0);
#endif
if (pte) {
+#if !defined(CONFIG_CPU_CACHE_V7) || !defined(CONFIG_SMP)
if (!PageHighMem(pte))
clean_pte_table(page_address(pte));
+#endif
pgtable_page_ctor(pte);
}
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5750704e0271..e6d609c2cb9b 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -189,6 +189,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
+#define L_PTE_MT_INNER_WB (_AT(pteval_t, 0x05) << 2) /* 0101 (armv6, armv7) */
#ifndef __ASSEMBLY__
@@ -244,6 +245,9 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
#endif
+#define pgprot_inner_writeback(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_INNER_WB)
+
#endif /* __ASSEMBLY__ */
/*
@@ -325,6 +329,24 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
clean_pmd_entry(pmdp); \
} while (0)
+extern spinlock_t pgd_lock;
+extern struct list_head pgd_list;
+
+pte_t *lookup_address(unsigned long address, unsigned int *level);
+enum {
+ PG_LEVEL_NONE,
+ PG_LEVEL_4K,
+ PG_LEVEL_2M,
+ PG_LEVEL_NUM
+};
+
+#ifdef CONFIG_PROC_FS
+extern void update_page_count(int level, unsigned long pages);
+#else
+static inline void update_page_count(int level, unsigned long pages) { }
+#endif
+
+
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
return __va(pmd_val(pmd) & PAGE_MASK);
@@ -354,6 +376,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
+#define pmd_pfn(pmd) ((pmd_val(pmd) & SECTION_MASK) >> PAGE_SHIFT)
+#define pte_pgprot(pte) ((pgprot_t)(pte_val(pte) & ~PAGE_MASK))
+
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h
index 154b89b81d3e..1ec30e944084 100644
--- a/arch/arm/include/asm/sizes.h
+++ b/arch/arm/include/asm/sizes.h
@@ -18,4 +18,5 @@
*/
#include <asm-generic/sizes.h>
-#define SZ_48M (SZ_32M + SZ_16M)
+#define SZ_48M (SZ_32M + SZ_16M)
+#define SZ_160M (SZ_128M | SZ_32M) \ No newline at end of file
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index e42d96a45d3e..74f288f4802c 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -93,4 +93,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
*/
extern void show_local_irqs(struct seq_file *, int);
+extern void smp_send_all_cpu_backtrace(void);
+
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index f7887dc53c1f..b6f3c9f80ea7 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+obj-y += cpu_pm.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
ifneq ($(CONFIG_ARCH_EBSA110),y)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 16baba2e4369..927522cfc12e 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -59,9 +59,6 @@ int main(void)
DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
-#ifdef CONFIG_SMP
- DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
-#endif
#ifdef CONFIG_ARM_THUMBEE
DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
#endif
diff --git a/arch/arm/kernel/cpu_pm.c b/arch/arm/kernel/cpu_pm.c
new file mode 100644
index 000000000000..748af1f1f437
--- /dev/null
+++ b/arch/arm/kernel/cpu_pm.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+
+#include <asm/cpu_pm.h>
+
+static DEFINE_RWLOCK(cpu_pm_notifier_lock);
+static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+
+int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+
+static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+{
+ int ret;
+
+ ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
+
+ return notifier_to_errno(ret);
+}
+
+int cpu_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_enter);
+
+int cpu_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_exit);
+
+int cpu_complex_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_COMPLEX_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ cpu_pm_notify(CPU_COMPLEX_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_complex_pm_enter);
+
+int cpu_complex_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_COMPLEX_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_complex_pm_exit);
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index bcd66e00bdbe..9126592867f4 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -154,6 +154,11 @@ ENDPROC(printhex2)
.ltorg
ENTRY(printascii)
+#if defined(CONFIG_DEBUG_ICEDCC) && defined(CONFIG_SMP)
+ mrc p15, 0, r3, c0, c0, 5
+ ands r3, r3, #3
+ movne pc, lr
+#endif
addruart_current r3, r1, r2
b 2f
1: waituart r2, r3
@@ -170,6 +175,11 @@ ENTRY(printascii)
ENDPROC(printascii)
ENTRY(printch)
+#if defined(CONFIG_DEBUG_ICEDCC) && defined(CONFIG_SMP)
+ mrc p15, 0, r3, c0, c0, 5
+ ands r3, r3, #3
+ movne pc, lr
+#endif
addruart_current r3, r1, r2
mov r1, r0
mov r0, #0
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index 9b05c6a0dcea..d4a0da1e48f4 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -40,22 +40,15 @@ EXPORT_SYMBOL(elf_check_arch);
void elf_set_personality(const struct elf32_hdr *x)
{
unsigned int eflags = x->e_flags;
- unsigned int personality = current->personality & ~PER_MASK;
-
- /*
- * We only support Linux ELF executables, so always set the
- * personality to LINUX.
- */
- personality |= PER_LINUX;
+ unsigned int personality = PER_LINUX_32BIT;
/*
* APCS-26 is only valid for OABI executables
*/
- if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
- (eflags & EF_ARM_APCS_26))
- personality &= ~ADDR_LIMIT_32BIT;
- else
- personality |= ADDR_LIMIT_32BIT;
+ if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
+ if (eflags & EF_ARM_APCS_26)
+ personality = PER_LINUX;
+ }
set_personality(personality);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a87cbf889ff4..4f8e30f183b9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -450,7 +450,7 @@ __und_usr:
blo __und_usr_unknown
3: ldrht r0, [r4]
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
- orr r0, r0, r5, lsl #16
+ orr r0, r0, r5, lsl #16
#else
b __und_usr_unknown
#endif
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 1bec8b5f22f0..496b8b84e455 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/clk.h>
@@ -36,26 +37,36 @@ MODULE_AUTHOR("Alexander Shishkin");
struct tracectx {
unsigned int etb_bufsz;
void __iomem *etb_regs;
- void __iomem *etm_regs;
+ void __iomem **etm_regs;
+ int etm_regs_count;
unsigned long flags;
int ncmppairs;
int etm_portsz;
+ u32 etb_fc;
+ unsigned long range_start;
+ unsigned long range_end;
+ unsigned long data_range_start;
+ unsigned long data_range_end;
+ bool dump_initial_etb;
struct device *dev;
struct clk *emu_clk;
struct mutex mutex;
};
-static struct tracectx tracer;
+static struct tracectx tracer = {
+ .range_start = (unsigned long)_stext,
+ .range_end = (unsigned long)_etext,
+};
static inline bool trace_isrunning(struct tracectx *t)
{
return !!(t->flags & TRACER_RUNNING);
}
-static int etm_setup_address_range(struct tracectx *t, int n,
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
unsigned long start, unsigned long end, int exclude, int data)
{
- u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
+ u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
ETMAAT_NOVALCMP;
if (n < 1 || n > t->ncmppairs)
@@ -71,95 +82,155 @@ static int etm_setup_address_range(struct tracectx *t, int n,
flags |= ETMAAT_IEXEC;
/* first comparator for the range */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
- etm_writel(t, start, ETMR_COMP_VAL(n * 2));
+ etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+ etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
/* second comparator is right next to it */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
- etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
-
- flags = exclude ? ETMTE_INCLEXCL : 0;
- etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
+ etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+ etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
+
+ if (data) {
+ flags = exclude ? ETMVDC3_EXCLONLY : 0;
+ if (exclude)
+ n += 8;
+ etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+ } else {
+ flags = exclude ? ETMTE_INCLEXCL : 0;
+ etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+ }
return 0;
}
-static int trace_start(struct tracectx *t)
+static int trace_start_etm(struct tracectx *t, int id)
{
u32 v;
unsigned long timeout = TRACER_TIMEOUT;
- etb_unlock(t);
-
- etb_writel(t, 0, ETBR_FORMATTERCTRL);
- etb_writel(t, 1, ETBR_CTRL);
-
- etb_lock(t);
-
- /* configure etm */
v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
if (t->flags & TRACER_CYCLE_ACC)
v |= ETMCTRL_CYCLEACCURATE;
- etm_unlock(t);
+ if (t->flags & TRACER_TRACE_DATA)
+ v |= ETMCTRL_DATA_DO_ADDR;
+
+ etm_unlock(t, id);
- etm_writel(t, v, ETMR_CTRL);
+ etm_writel(t, id, v, ETMR_CTRL);
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+ while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_setup_address_range(t, 1, (unsigned long)_stext,
- (unsigned long)_etext, 0, 0);
- etm_writel(t, 0, ETMR_TRACEENCTRL2);
- etm_writel(t, 0, ETMR_TRACESSCTRL);
- etm_writel(t, 0x6f, ETMR_TRACEENEVT);
+ if (t->range_start || t->range_end)
+ etm_setup_address_range(t, id, 1,
+ t->range_start, t->range_end, 0, 0);
+ else
+ etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+ etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+ etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+ etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+ etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+ etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+ if (t->data_range_start || t->data_range_end)
+ etm_setup_address_range(t, id, 2, t->data_range_start,
+ t->data_range_end, 0, 1);
+ else
+ etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+ etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
v &= ~ETMCTRL_PROGRAM;
v |= ETMCTRL_PORTSEL;
- etm_writel(t, v, ETMR_CTRL);
+ etm_writel(t, id, v, ETMR_CTRL);
timeout = TRACER_TIMEOUT;
- while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+ while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
- etm_lock(t);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_lock(t);
+ etm_lock(t, id);
+ return 0;
+}
+
+static int trace_start(struct tracectx *t)
+{
+ int ret;
+ int id;
+ u32 etb_fc = t->etb_fc;
+
+ etb_unlock(t);
+
+ t->dump_initial_etb = false;
+ etb_writel(t, 0, ETBR_WRITEADDR);
+ etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
+ etb_writel(t, 1, ETBR_CTRL);
+
+ etb_lock(t);
+
+ /* configure etm(s) */
+ for (id = 0; id < t->etm_regs_count; id++) {
+ ret = trace_start_etm(t, id);
+ if (ret)
+ return ret;
+ }
t->flags |= TRACER_RUNNING;
return 0;
}
-static int trace_stop(struct tracectx *t)
+static int trace_stop_etm(struct tracectx *t, int id)
{
unsigned long timeout = TRACER_TIMEOUT;
- etm_unlock(t);
+ etm_unlock(t, id);
- etm_writel(t, 0x440, ETMR_CTRL);
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+ etm_writel(t, id, 0x441, ETMR_CTRL);
+ while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_lock(t);
+ etm_lock(t, id);
+ return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+ int id;
+ int ret;
+ unsigned long timeout = TRACER_TIMEOUT;
+ u32 etb_fc = t->etb_fc;
+
+ for (id = 0; id < t->etm_regs_count; id++) {
+ ret = trace_stop_etm(t, id);
+ if (ret)
+ return ret;
+ }
etb_unlock(t);
- etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+ if (etb_fc) {
+ etb_fc |= ETBFF_STOPFL;
+ etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+ }
+ etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
timeout = TRACER_TIMEOUT;
while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -184,24 +255,15 @@ static int trace_stop(struct tracectx *t)
static int etb_getdatalen(struct tracectx *t)
{
u32 v;
- int rp, wp;
+ int wp;
v = etb_readl(t, ETBR_STATUS);
if (v & 1)
return t->etb_bufsz;
- rp = etb_readl(t, ETBR_READADDR);
wp = etb_readl(t, ETBR_WRITEADDR);
-
- if (rp > wp) {
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
- return 0;
- }
-
- return wp - rp;
+ return wp;
}
/* sysrq+v will always stop the running trace and leave it at that */
@@ -234,21 +296,18 @@ static void etm_dump(void)
printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
printk(KERN_INFO "\n--- ETB buffer end ---\n");
- /* deassert the overflow bit */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
etb_lock(t);
}
static void sysrq_etm_dump(int key)
{
+ if (!mutex_trylock(&tracer.mutex)) {
+ printk(KERN_INFO "Tracing hardware busy\n");
+ return;
+ }
dev_dbg(tracer.dev, "Dumping ETB buffer\n");
etm_dump();
+ mutex_unlock(&tracer.mutex);
}
static struct sysrq_key_op sysrq_etm_op = {
@@ -275,6 +334,10 @@ static ssize_t etb_read(struct file *file, char __user *data,
struct tracectx *t = file->private_data;
u32 first = 0;
u32 *buf;
+ int wpos;
+ int skip;
+ long wlength;
+ loff_t pos = *ppos;
mutex_lock(&t->mutex);
@@ -286,31 +349,39 @@ static ssize_t etb_read(struct file *file, char __user *data,
etb_unlock(t);
total = etb_getdatalen(t);
+ if (total == 0 && t->dump_initial_etb)
+ total = t->etb_bufsz;
if (total == t->etb_bufsz)
first = etb_readl(t, ETBR_WRITEADDR);
+ if (pos > total * 4) {
+ skip = 0;
+ wpos = total;
+ } else {
+ skip = (int)pos % 4;
+ wpos = (int)pos / 4;
+ }
+ total -= wpos;
+ first = (first + wpos) % t->etb_bufsz;
+
etb_writel(t, first, ETBR_READADDR);
- length = min(total * 4, (int)len);
- buf = vmalloc(length);
+ wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+ length = min(total * 4 - skip, (int)len);
+ buf = vmalloc(wlength * 4);
- dev_dbg(t->dev, "ETB buffer length: %d\n", total);
+ dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+ length, pos, wlength, first);
+ dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
- for (i = 0; i < length / 4; i++)
+ for (i = 0; i < wlength; i++)
buf[i] = etb_readl(t, ETBR_READMEM);
- /* the only way to deassert overflow bit in ETB status is this */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_WRITEADDR);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
etb_lock(t);
- length -= copy_to_user(data, buf, length);
+ length -= copy_to_user(data, (u8 *)buf + skip, length);
vfree(buf);
+ *ppos = pos + length;
out:
mutex_unlock(&t->mutex);
@@ -347,28 +418,17 @@ static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id
if (ret)
goto out;
+ mutex_lock(&t->mutex);
t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
if (!t->etb_regs) {
ret = -ENOMEM;
goto out_release;
}
+ t->dev = &dev->dev;
+ t->dump_initial_etb = true;
amba_set_drvdata(dev, t);
- etb_miscdev.parent = &dev->dev;
-
- ret = misc_register(&etb_miscdev);
- if (ret)
- goto out_unmap;
-
- t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
- if (IS_ERR(t->emu_clk)) {
- dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
- return -EFAULT;
- }
-
- clk_enable(t->emu_clk);
-
etb_unlock(t);
t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -377,6 +437,20 @@ static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id
etb_writel(t, 0, ETBR_CTRL);
etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
etb_lock(t);
+ mutex_unlock(&t->mutex);
+
+ etb_miscdev.parent = &dev->dev;
+
+ ret = misc_register(&etb_miscdev);
+ if (ret)
+ goto out_unmap;
+
+ /* Get optional clock. Currently used to select clock source on omap3 */
+ t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+ if (IS_ERR(t->emu_clk))
+ dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+ else
+ clk_enable(t->emu_clk);
dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
@@ -384,10 +458,13 @@ out:
return ret;
out_unmap:
+ mutex_lock(&t->mutex);
amba_set_drvdata(dev, NULL);
iounmap(t->etb_regs);
+ t->etb_regs = NULL;
out_release:
+ mutex_unlock(&t->mutex);
amba_release_regions(dev);
return ret;
@@ -402,8 +479,10 @@ static int etb_remove(struct amba_device *dev)
iounmap(t->etb_regs);
t->etb_regs = NULL;
- clk_disable(t->emu_clk);
- clk_put(t->emu_clk);
+ if (!IS_ERR(t->emu_clk)) {
+ clk_disable(t->emu_clk);
+ clk_put(t->emu_clk);
+ }
amba_release_regions(dev);
@@ -447,7 +526,10 @@ static ssize_t trace_running_store(struct kobject *kobj,
return -EINVAL;
mutex_lock(&tracer.mutex);
- ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+ if (!tracer.etb_regs)
+ ret = -ENODEV;
+ else
+ ret = value ? trace_start(&tracer) : trace_stop(&tracer);
mutex_unlock(&tracer.mutex);
return ret ? : n;
@@ -462,36 +544,50 @@ static ssize_t trace_info_show(struct kobject *kobj,
{
u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
int datalen;
+ int id;
+ int ret;
- etb_unlock(&tracer);
- datalen = etb_getdatalen(&tracer);
- etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
- etb_ra = etb_readl(&tracer, ETBR_READADDR);
- etb_st = etb_readl(&tracer, ETBR_STATUS);
- etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
- etb_lock(&tracer);
-
- etm_unlock(&tracer);
- etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
- etm_st = etm_readl(&tracer, ETMR_STATUS);
- etm_lock(&tracer);
+ mutex_lock(&tracer.mutex);
+ if (tracer.etb_regs) {
+ etb_unlock(&tracer);
+ datalen = etb_getdatalen(&tracer);
+ etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+ etb_ra = etb_readl(&tracer, ETBR_READADDR);
+ etb_st = etb_readl(&tracer, ETBR_STATUS);
+ etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+ etb_lock(&tracer);
+ } else {
+ etb_wa = etb_ra = etb_st = etb_fc = ~0;
+ datalen = -1;
+ }
- return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+ ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
"ETBR_WRITEADDR:\t%08x\n"
"ETBR_READADDR:\t%08x\n"
"ETBR_STATUS:\t%08x\n"
- "ETBR_FORMATTERCTRL:\t%08x\n"
- "ETMR_CTRL:\t%08x\n"
- "ETMR_STATUS:\t%08x\n",
+ "ETBR_FORMATTERCTRL:\t%08x\n",
datalen,
tracer.ncmppairs,
etb_wa,
etb_ra,
etb_st,
- etb_fc,
+ etb_fc
+ );
+
+ for (id = 0; id < tracer.etm_regs_count; id++) {
+ etm_unlock(&tracer, id);
+ etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+ etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+ etm_lock(&tracer, id);
+ ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+ "ETMR_STATUS:\t%08x\n",
etm_ctrl,
etm_st
);
+ }
+ mutex_unlock(&tracer.mutex);
+
+ return ret;
}
static struct kobj_attribute trace_info_attr =
@@ -530,42 +626,121 @@ static ssize_t trace_mode_store(struct kobject *kobj,
static struct kobj_attribute trace_mode_attr =
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
+static ssize_t trace_range_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%08lx %08lx\n",
+ tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long range_start, range_end;
+
+ if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ tracer.range_start = range_start;
+ tracer.range_end = range_end;
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+ __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long range_start;
+ u64 range_end;
+ mutex_lock(&tracer.mutex);
+ range_start = tracer.data_range_start;
+ range_end = tracer.data_range_end;
+ if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+ range_end = 0x100000000ULL;
+ mutex_unlock(&tracer.mutex);
+ return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long range_start;
+ u64 range_end;
+
+ if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ tracer.data_range_start = range_start;
+ tracer.data_range_end = (unsigned long)range_end;
+ if (range_end)
+ tracer.flags |= TRACER_TRACE_DATA;
+ else
+ tracer.flags &= ~TRACER_TRACE_DATA;
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+ __ATTR(trace_data_range, 0644,
+ trace_data_range_show, trace_data_range_store);
+
static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
+ void __iomem **new_regs;
+ int new_count;
- if (t->etm_regs) {
- dev_dbg(&dev->dev, "ETM already initialized\n");
- ret = -EBUSY;
+ mutex_lock(&t->mutex);
+ new_count = t->etm_regs_count + 1;
+ new_regs = krealloc(t->etm_regs,
+ sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
+
+ if (!new_regs) {
+ dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+ ret = -ENOMEM;
goto out;
}
+ t->etm_regs = new_regs;
ret = amba_request_regions(dev, NULL);
if (ret)
goto out;
- t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
- if (!t->etm_regs) {
+ t->etm_regs[t->etm_regs_count] =
+ ioremap_nocache(dev->res.start, resource_size(&dev->res));
+ if (!t->etm_regs[t->etm_regs_count]) {
ret = -ENOMEM;
goto out_release;
}
- amba_set_drvdata(dev, t);
+ amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
- mutex_init(&t->mutex);
- t->dev = &dev->dev;
- t->flags = TRACER_CYCLE_ACC;
+ t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA;
t->etm_portsz = 1;
- etm_unlock(t);
- (void)etm_readl(t, ETMMR_PDSR);
+ etm_unlock(t, t->etm_regs_count);
+ (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
/* dummy first read */
- (void)etm_readl(&tracer, ETMMR_OSSRR);
+ (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
- t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
- etm_writel(t, 0x440, ETMR_CTRL);
- etm_lock(t);
+ t->ncmppairs = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE) & 0xf;
+ etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+ etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+ etm_lock(t, t->etm_regs_count);
ret = sysfs_create_file(&dev->dev.kobj,
&trace_running_attr.attr);
@@ -581,36 +756,68 @@ static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id
if (ret)
dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
- dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
+ ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+ ret = sysfs_create_file(&dev->dev.kobj, &trace_data_range_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev,
+ "Failed to create trace_data_range in sysfs\n");
+
+ dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+ /* Enable formatter if there are multiple trace sources */
+ if (new_count > 1)
+ t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+ t->etm_regs_count = new_count;
out:
+ mutex_unlock(&t->mutex);
return ret;
out_unmap:
amba_set_drvdata(dev, NULL);
- iounmap(t->etm_regs);
+ iounmap(t->etm_regs[t->etm_regs_count]);
out_release:
amba_release_regions(dev);
+ mutex_unlock(&t->mutex);
return ret;
}
static int etm_remove(struct amba_device *dev)
{
- struct tracectx *t = amba_get_drvdata(dev);
+ int i;
+ struct tracectx *t = &tracer;
+ void __iomem *etm_regs = amba_get_drvdata(dev);
+
+ sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
amba_set_drvdata(dev, NULL);
- iounmap(t->etm_regs);
- t->etm_regs = NULL;
+ mutex_lock(&t->mutex);
+ for (i = 0; i < t->etm_regs_count; i++)
+ if (t->etm_regs[i] == etm_regs)
+ break;
+ for (; i < t->etm_regs_count - 1; i++)
+ t->etm_regs[i] = t->etm_regs[i + 1];
+ t->etm_regs_count--;
+ if (!t->etm_regs_count) {
+ kfree(t->etm_regs);
+ t->etm_regs = NULL;
+ }
+ mutex_unlock(&t->mutex);
+ iounmap(etm_regs);
amba_release_regions(dev);
- sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-
return 0;
}
@@ -619,6 +826,10 @@ static struct amba_id etm_ids[] = {
.id = 0x0003b921,
.mask = 0x0007ffff,
},
+ {
+ .id = 0x0003b950,
+ .mask = 0x0007ffff,
+ },
{ 0, 0 },
};
@@ -636,6 +847,8 @@ static int __init etm_init(void)
{
int retval;
+ mutex_init(&tracer.mutex);
+
retval = amba_driver_register(&etb_driver);
if (retval) {
printk(KERN_ERR "Failed to register etb\n");
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
index 0f107dcb0347..136e8376a3eb 100644
--- a/arch/arm/kernel/leds.c
+++ b/arch/arm/kernel/leds.c
@@ -9,6 +9,8 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
@@ -101,6 +103,25 @@ static struct syscore_ops leds_syscore_ops = {
.resume = leds_resume,
};
+static int leds_idle_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ leds_event(led_idle_start);
+ break;
+ case IDLE_END:
+ leds_event(led_idle_end);
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block leds_idle_nb = {
+ .notifier_call = leds_idle_notifier,
+};
+
static int __init leds_init(void)
{
int ret;
@@ -109,8 +130,12 @@ static int __init leds_init(void)
ret = sysdev_register(&leds_device);
if (ret == 0)
ret = sysdev_create_file(&leds_device, &attr_event);
- if (ret == 0)
+
+ if (ret == 0) {
register_syscore_ops(&leds_syscore_ops);
+ idle_notifier_register(&leds_idle_nb);
+ }
+
return ret;
}
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index cc2020c2c709..09326b62780d 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -89,6 +89,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
break;
case R_ARM_ABS32:
+ case R_ARM_TARGET1:
*(u32 *)loc += sym->st_value;
break;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c9d11eaf4384..d33f09378458 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -31,9 +31,9 @@
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/cpuidle.h>
+#include <linux/console.h>
#include <asm/cacheflush.h>
-#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/thread_notify.h>
@@ -63,6 +63,18 @@ static volatile int hlt_counter;
#include <mach/system.h>
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+ smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+ dump_stack();
+}
+#endif
+
void disable_hlt(void)
{
hlt_counter++;
@@ -92,8 +104,37 @@ static int __init hlt_setup(char *__unused)
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+ printk("\n");
+ pr_emerg("Restarting %s\n", linux_banner);
+ if (console_trylock()) {
+ console_unlock();
+ return;
+ }
+
+ mdelay(50);
+
+ local_irq_disable();
+ if (!console_trylock())
+ pr_emerg("arm_restart: Console was locked! Busting\n");
+ else
+ pr_emerg("arm_restart: Console was locked!\n");
+ console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
void arm_machine_restart(char mode, const char *cmd)
{
+ /* Flush the console to make sure all the relevant messages make it
+ * out to the console drivers */
+ arm_machine_flush_console();
+
/* Disable interrupts first */
local_irq_disable();
local_fiq_disable();
@@ -183,8 +224,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
+ idle_notifier_call_chain(IDLE_START);
tick_nohz_stop_sched_tick(1);
- leds_event(led_idle_start);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
@@ -212,8 +253,8 @@ void cpu_idle(void)
local_irq_enable();
}
}
- leds_event(led_idle_end);
tick_nohz_restart_sched_tick();
+ idle_notifier_call_chain(IDLE_END);
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -256,6 +297,77 @@ void machine_restart(char *cmd)
arm_pm_restart(reboot_mode, cmd);
}
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+ int i, j;
+ int nlines;
+ u32 *p;
+
+ /*
+ * don't attempt to dump non-kernel addresses or
+ * values that are probably just small negative numbers
+ */
+ if (addr < PAGE_OFFSET || addr > -256UL)
+ return;
+
+ printk("\n%s: %#lx:\n", name, addr);
+
+ /*
+ * round address down to a 32 bit boundary
+ * and always dump a multiple of 32 bytes
+ */
+ p = (u32 *)(addr & ~(sizeof(u32) - 1));
+ nbytes += (addr & (sizeof(u32) - 1));
+ nlines = (nbytes + 31) / 32;
+
+
+ for (i = 0; i < nlines; i++) {
+ /*
+ * just display low 16 bits of address to keep
+ * each line of the dump < 80 characters
+ */
+ printk("%04lx ", (unsigned long)p & 0xffff);
+ for (j = 0; j < 8; j++) {
+ u32 data;
+ if (probe_kernel_address(p, data)) {
+ printk(" ********");
+ } else {
+ printk(" %08x", data);
+ }
+ ++p;
+ }
+ printk("\n");
+ }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+ mm_segment_t fs;
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+ show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+ show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+ show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+ show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+ show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+ show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+ show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+ show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+ show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+ show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+ show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+ show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+ show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+ show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+ show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+ set_fs(fs);
+}
+
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
@@ -315,6 +427,8 @@ void __show_regs(struct pt_regs *regs)
printk("Control: %08x%s\n", ctrl, buf);
}
#endif
+
+ show_extra_register_data(regs, 128);
}
void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3e42faf13620..5af79bd6a8bf 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1002,7 +1002,11 @@ static int c_show(struct seq_file *m, void *v)
cpu_name, read_cpuid_id() & 15, elf_platform);
#if defined(CONFIG_SMP)
+# if defined(CONFIG_REPORT_PRESENT_CPUS)
+ for_each_present_cpu(i) {
+# else
for_each_online_cpu(i) {
+# endif
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index dc902f2c6845..e87f5f243012 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -94,7 +94,6 @@ ENDPROC(cpu_resume_turn_mmu_on)
cpu_resume_after_mmu:
str r5, [r2, r4, lsl #2] @ restore old mapping
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
- bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d88ff0230e82..d1fe8b4abbc3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -53,6 +53,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_CPU_BACKTRACE,
};
int __cpuinit __cpu_up(unsigned int cpu)
@@ -301,17 +302,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
*/
platform_secondary_init(cpu);
- /*
- * Enable local interrupts.
- */
notify_cpu_starting(cpu);
- local_irq_enable();
- local_fiq_enable();
-
- /*
- * Setup the percpu timer for this CPU.
- */
- percpu_timer_setup();
calibrate_delay();
@@ -323,8 +314,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* before we continue.
*/
set_cpu_online(cpu, true);
- while (!cpu_active(cpu))
- cpu_relax();
+
+ /*
+ * Setup the percpu timer for this CPU.
+ */
+ percpu_timer_setup();
+
+ local_irq_enable();
+ local_fiq_enable();
/*
* OK, it's off to the idle thread for us
@@ -412,6 +409,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -562,6 +560,58 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ int i;
+
+ if (test_and_set_bit(0, &backtrace_flag))
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ return;
+
+ cpumask_copy(&backtrace_mask, cpu_online_mask);
+ cpu_clear(this_cpu, backtrace_mask);
+
+ pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+ dump_stack();
+
+ pr_info("\nsending IPI to all other CPUs:\n");
+ smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+ /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(&backtrace_mask))
+ break;
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+ if (cpu_isset(cpu, backtrace_mask)) {
+ raw_spin_lock(&backtrace_lock);
+ pr_warning("IPI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ raw_spin_unlock(&backtrace_lock);
+ cpu_clear(cpu, backtrace_mask);
+ }
+}
+
/*
* Main handler for inter-processor interrupts
*/
@@ -594,6 +644,10 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
ipi_cpu_stop(cpu);
break;
+ case IPI_CPU_BACKTRACE:
+ ipi_cpu_backtrace(cpu, regs);
+ break;
+
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
cpu, ipinr);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 01c186222f3b..1953102bec9f 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -10,13 +10,17 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/smp.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/percpu.h>
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
@@ -24,7 +28,9 @@
/* set up by the platform code */
void __iomem *twd_base;
+static struct clk *twd_clk;
static unsigned long twd_timer_rate;
+static DEFINE_PER_CPU(struct clock_event_device *, twd_ce);
static void twd_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
@@ -80,6 +86,48 @@ int twd_timer_ack(void)
return 0;
}
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *data)
+{
+ twd_timer_rate = clk_get_rate(twd_clk);
+
+ clockevents_update_freq(__get_cpu_var(twd_ce), twd_timer_rate);
+}
+
+static int twd_cpufreq_transition(struct notifier_block *nb,
+ unsigned long state, void *data)
+{
+ struct cpufreq_freqs *freqs = data;
+
+ /*
+ * The twd clock events must be reprogrammed to account for the new
+ * frequency. The timer is local to a cpu, so cross-call to the
+ * changing cpu.
+ */
+ if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+ smp_call_function_single(freqs->cpu, twd_update_frequency,
+ NULL, 1);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block twd_cpufreq_nb = {
+ .notifier_call = twd_cpufreq_transition,
+};
+
+static int twd_cpufreq_init(void)
+{
+ if (!IS_ERR_OR_NULL(twd_clk))
+ return cpufreq_register_notifier(&twd_cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return 0;
+}
+core_initcall(twd_cpufreq_init);
+
static void __cpuinit twd_calibrate_rate(void)
{
unsigned long count;
@@ -119,12 +167,39 @@ static void __cpuinit twd_calibrate_rate(void)
}
}
+static struct clk *twd_get_clock(void)
+{
+ struct clk *clk;
+ int err;
+
+ clk = clk_get_sys("smp_twd", NULL);
+ if (IS_ERR(clk)) {
+ pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
+ return clk;
+ }
+
+ err = clk_enable(clk);
+ if (err) {
+ pr_err("smp_twd: clock failed to enable: %d\n", err);
+ clk_put(clk);
+ return ERR_PTR(err);
+ }
+
+ return clk;
+}
+
/*
* Setup the local clock events for a CPU.
*/
void __cpuinit twd_timer_setup(struct clock_event_device *clk)
{
- twd_calibrate_rate();
+ if (!twd_clk)
+ twd_clk = twd_get_clock();
+
+ if (!IS_ERR_OR_NULL(twd_clk))
+ twd_timer_rate = clk_get_rate(twd_clk);
+ else
+ twd_calibrate_rate();
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -132,12 +207,11 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clk->rating = 350;
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
- clk->shift = 20;
- clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift);
- clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
- clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
- clockevents_register_device(clk);
+ __get_cpu_var(twd_ce) = clk;
+
+ clockevents_config_and_register(clk, twd_timer_rate,
+ 0xf, 0xffffffff);
/* Make sure our local interrupt controller has this enabled */
gic_enable_ppi(clk->irq);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc9f9da782cb..4ef9f0d04e5a 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -466,7 +466,9 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
if (end > vma->vm_end)
end = vma->vm_end;
- flush_cache_user_range(vma, start, end);
+ up_read(&mm->mmap_sem);
+ flush_cache_user_range(start, end);
+ return;
}
up_read(&mm->mmap_sem);
}
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index cf73a7f742dd..775132a100f0 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -6,7 +6,7 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
- delay.o findbit.o memchr.o memcpy.o \
+ findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \
@@ -17,6 +17,10 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
+ifneq ($(CONFIG_ARCH_PROVIDES_UDELAY),y)
+ lib-y += delay.o
+endif
+
# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y)
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 37178a8559b1..51e1583265b2 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -42,6 +42,7 @@ int pxa_pm_enter(suspend_state_t state)
/* *** go zzz *** */
pxa_cpu_pm_fns->enter(state);
+ cpu_init();
if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
/* after sleeping, validate the checksum */
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index bf85b8b259d5..e4512cdb9236 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -78,6 +78,8 @@ static int sa11x0_pm_enter(suspend_state_t state)
/* go zzz */
cpu_suspend(0, sa1100_finish_suspend);
+ cpu_init();
+
/*
* Ensure not to come back here if it wasn't intended
*/
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d82ebab50e11..6778958942fc 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -2,37 +2,86 @@ if ARCH_TEGRA
comment "NVIDIA Tegra options"
-choice
- prompt "Select Tegra processor family for target system"
-
config ARCH_TEGRA_2x_SOC
- bool "Tegra 2 family"
+ bool "Tegra 2 family SOC"
+ default y
+ depends on !ARCH_TEGRA_3x_SOC
+ select ARCH_TEGRA_HAS_PCIE
select CPU_V7
select ARM_GIC
select ARCH_REQUIRE_GPIOLIB
select USB_ARCH_HAS_EHCI if USB_SUPPORT
select USB_ULPI if USB_SUPPORT
select USB_ULPI_VIEWPORT if USB_SUPPORT
+ select ARM_ERRATA_742230 if SMP
+ select USB_ARCH_HAS_EHCI if USB_SUPPORT
+ select USB_ULPI if USB_SUPPORT
+ select USB_ULPI_VIEWPORT if USB_SUPPORT
+ select ARCH_SUPPORTS_MSI if TEGRA_PCI
+ select PCI_MSI if TEGRA_PCI
help
Support for NVIDIA Tegra AP20 and T20 processors, based on the
ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
-endchoice
+config ARCH_TEGRA_3x_SOC
+ bool "Tegra 3 family SOC"
+ select ARCH_TEGRA_HAS_PCIE
+ select ARCH_TEGRA_HAS_SATA
+ select ARCH_TEGRA_HAS_DUAL_3D
+ select ARCH_TEGRA_HAS_DUAL_CPU_CLUSTERS
+ select CPU_V7
+ select ARM_GIC
+ select GIC_SET_MULTIPLE_CPUS
+ select ARCH_REQUIRE_GPIOLIB
+ select USB_ARCH_HAS_EHCI if USB_SUPPORT
+ select USB_EHCI_TEGRA if USB_SUPPORT
+ select USB_ULPI if USB_SUPPORT
+ select USB_ULPI_VIEWPORT if USB_SUPPORT
+ select REPORT_PRESENT_CPUS if TEGRA_AUTO_HOTPLUG
+ select ARCH_SUPPORTS_MSI if TEGRA_PCI
+ select PCI_MSI if TEGRA_PCI
+ help
+ Support for NVIDIA Tegra 3 family of SoCs, based upon the
+ ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
+
+config ARCH_TEGRA_HAS_DUAL_3D
+ bool
+
+config ARCH_TEGRA_HAS_DUAL_CPU_CLUSTERS
+ bool
+
+config ARCH_TEGRA_HAS_PCIE
+ bool
+
+config ARCH_TEGRA_HAS_SATA
+ bool
config TEGRA_PCI
- bool "PCI Express support"
+ bool "PCIe host controller driver"
select PCI
+ depends on ARCH_TEGRA_HAS_PCIE
+ help
+ Adds PCIe Host controller driver for tegra based systems
comment "Tegra board type"
config MACH_HARMONY
bool "Harmony board"
+ depends on ARCH_TEGRA_2x_SOC
+ select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
+ help
+ Support for NVIDIA Harmony development platform
+
+config MACH_VENTANA
+ bool "Ventana board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
help
- Support for nVidia Harmony development platform
+ Support for NVIDIA Ventana development platform
config MACH_KAEN
bool "Kaen board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_SEABOARD
select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
help
@@ -40,11 +89,13 @@ config MACH_KAEN
config MACH_PAZ00
bool "Paz00 board"
+ depends on ARCH_TEGRA_2x_SOC
help
Support for the Toshiba AC100/Dynabook AZ netbook
config MACH_SEABOARD
bool "Seaboard board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
help
Support for nVidia Seaboard development platform. It will
@@ -59,16 +110,73 @@ config MACH_TEGRA_DT
config MACH_TRIMSLICE
bool "TrimSlice board"
+ depends on ARCH_TEGRA_2x_SOC
select TEGRA_PCI
help
Support for CompuLab TrimSlice platform
config MACH_WARIO
bool "Wario board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_SEABOARD
help
Support for the Wario version of Seaboard
+config MACH_WHISTLER
+ bool "Whistler board"
+ depends on ARCH_TEGRA_2x_SOC
+ select MACH_HAS_SND_SOC_TEGRA_WM8753 if SND_SOC
+ select MACH_HAS_SND_SOC_TEGRA_TLV320AIC326X if SND_SOC
+ help
+ Support for NVIDIA Whistler development platform
+
+config MACH_ARUBA
+ bool "Aruba board"
+ depends on ARCH_TEGRA_3x_SOC
+ select TEGRA_FPGA_PLATFORM
+ help
+ Support for NVIDIA Aruba2 FPGA development platform
+
+config MACH_CARDHU
+ bool "Cardhu board"
+ depends on ARCH_TEGRA_3x_SOC
+ select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
+ help
+ Support for NVIDIA Cardhu development platform
+
+config MACH_TEGRA_ENTERPRISE
+ bool "Enterprise board"
+ depends on ARCH_TEGRA_3x_SOC
+ select MACH_HAS_SND_SOC_TEGRA_MAX98088 if SND_SOC
+ help
+ Support for NVIDIA Enterprise development platform
+
+choice
+ prompt "Tegra platform type"
+ default TEGRA_SILICON_PLATFORM
+
+config TEGRA_SILICON_PLATFORM
+ bool "Silicon"
+ help
+ This enables support for a Tegra silicon platform.
+
+config TEGRA_SIMULATION_PLATFORM
+ bool "Simulation"
+ help
+ This enables support for a Tegra simulation platform.
+ Select this only if you are an NVIDIA developer working
+ on a simulation platform.
+
+config TEGRA_FPGA_PLATFORM
+ bool "FPGA"
+ help
+ This enables support for a Tegra FPGA platform.
+ Select this only if you are an NVIDIA developer working
+ on a FPGA platform.
+endchoice
+
+source "arch/arm/mach-tegra/p852/Kconfig"
+
choice
prompt "Low-level debug console UART"
default TEGRA_DEBUG_UART_NONE
@@ -78,19 +186,23 @@ config TEGRA_DEBUG_UART_NONE
config TEGRA_DEBUG_UARTA
bool "UART-A"
+ depends on DEBUG_LL
config TEGRA_DEBUG_UARTB
bool "UART-B"
+ depends on DEBUG_LL
config TEGRA_DEBUG_UARTC
bool "UART-C"
+ depends on DEBUG_LL
config TEGRA_DEBUG_UARTD
bool "UART-D"
+ depends on DEBUG_LL
config TEGRA_DEBUG_UARTE
bool "UART-E"
-
+ depends on DEBUG_LL
endchoice
config TEGRA_SYSTEM_DMA
@@ -100,7 +212,248 @@ config TEGRA_SYSTEM_DMA
Adds system DMA functionality for NVIDIA Tegra SoCs, used by
several Tegra device drivers
+config TEGRA_PWM
+ tristate "Enable PWM driver"
+ select HAVE_PWM
+ help
+ Enable support for the Tegra PWM controller(s).
+
+config TEGRA_FIQ_DEBUGGER
+ bool "Enable the FIQ serial debugger on Tegra"
+ default n
+ select FIQ_DEBUGGER
+ help
+ Enables the FIQ serial debugger on Tegra
+
+config TEGRA_CARDHU_DSI
+ bool "Support DSI panel on Cardhu"
+ depends on MACH_CARDHU
+ select TEGRA_DSI
+ help
+ Support for DSI Panel on Nvidia Cardhu
+
config TEGRA_EMC_SCALING_ENABLE
bool "Enable scaling the memory frequency"
+ depends on TEGRA_SILICON_PLATFORM
+ default n
+config TEGRA_CPU_DVFS
+ bool "Enable voltage scaling on Tegra CPU"
+ depends on TEGRA_SILICON_PLATFORM
+ default y
+
+config TEGRA_CORE_DVFS
+ bool "Enable voltage scaling on Tegra core"
+ depends on TEGRA_SILICON_PLATFORM
+ depends on TEGRA_CPU_DVFS
+ default y
+
+config TEGRA_IOVMM_GART
+ bool "Enable I/O virtual memory manager for GART"
+ depends on ARCH_TEGRA_2x_SOC
+ default y
+ select TEGRA_IOVMM
+ help
+ Enables support for remapping discontiguous physical memory
+ shared with the operating system into contiguous I/O virtual
+ space through the GART (Graphics Address Relocation Table)
+ hardware included on Tegra SoCs.
+
+config TEGRA_IOVMM_SMMU
+ bool "Enable I/O virtual memory manager for SMMU"
+ depends on ARCH_TEGRA_3x_SOC
+ default y
+ select TEGRA_IOVMM
+ help
+ Enables support for remapping discontiguous physical memory
+ shared with the operating system into contiguous I/O virtual
+ space through the SMMU (System Memory Management Unit)
+ hardware included on Tegra SoCs.
+
+config TEGRA_SMMU_BASE_AT_E0000000
+ bool "Force SMMU IOVA base to 0xe0000000"
+ depends on TEGRA_IOVMM_SMMU
+ default n
+ help
+ Forces SMMU IOVA base address to 0xe0000000 for debug purposes
+ only. Select n for production systems.
+
+config TEGRA_IOVMM_SMMU_SYSFS
+ bool "Enable SMMU register access for debugging"
+ depends on TEGRA_IOVMM_SMMU
+ default n
+ help
+ Enables SMMU register access through /sys/devices/smmu/* files.
+
+config TEGRA_IOVMM
+ bool
+
+config TEGRA_AVP_KERNEL_ON_MMU
+ bool "Use AVP MMU to relocate AVP kernel"
+ depends on ARCH_TEGRA_2x_SOC
+ default y
+ help
+ Use AVP MMU to relocate AVP kernel (nvrm_avp.bin).
+
+config TEGRA_AVP_KERNEL_ON_SMMU
+ bool "Use SMMU to relocate AVP kernel"
+ depends on TEGRA_IOVMM_SMMU
+ default y
+ help
+ Use SMMU to relocate AVP kernel (nvrm_avp.bin).
+
+config TEGRA_ARB_SEMAPHORE
+ bool
+
+config TEGRA_THERMAL_THROTTLE
+ bool "Enable throttling of CPU speed on overtemp"
+ depends on TEGRA_SILICON_PLATFORM
+ depends on CPU_FREQ
+ default y
+ help
+ Also requires enabling a temperature sensor such as NCT1008.
+
+config WIFI_CONTROL_FUNC
+ bool "Enable WiFi control function abstraction"
+ help
+ Enables Power/Reset/Carddetect function abstraction
+
+config TEGRA_CLOCK_DEBUG_WRITE
+ bool "Enable debugfs write access to clock tree"
+ depends on DEBUG_FS
+ default n
+
+config TEGRA_CLUSTER_CONTROL
+ bool
+ depends on ARCH_TEGRA_HAS_DUAL_CPU_CLUSTERS
+ default y if PM_SLEEP
+
+config TEGRA_AUTO_HOTPLUG
+ bool "Enable automatic CPU hot-plugging"
+ depends on HOTPLUG_CPU && CPU_FREQ && !ARCH_CPU_PROBE_RELEASE && !ARCH_TEGRA_2x_SOC
+ default y
+ help
+ This option enables turning CPUs off/on and switching tegra
+ high/low power CPU clusters automatically, corresponding to
+ CPU frequency scaling.
+
+config TEGRA_MC_PROFILE
+ tristate "Enable profiling memory controller utilization"
+ default y
+ help
+ When enabled, provides a mechanism to perform statistical
+ sampling of the memory controller usage on a client-by-client
+ basis, and report the log through sysfs.
+
+config TEGRA_EDP_LIMITS
+ bool "Enforce electrical design limits"
+ depends on TEGRA_SILICON_PLATFORM
+ depends on CPU_FREQ
+ default y if ARCH_TEGRA_3x_SOC
+ default n
+ help
+ Limit maximum CPU frequency based on temperature and number
+ of on-line CPUs to keep CPU rail current within power supply
+ capabilities.
+
+config TEGRA_INTERNAL_TSENSOR_EDP_SUPPORT
+ bool "Enable EDP and thermal throttling using internal TSensor"
+ depends on TEGRA_EDP_LIMITS && ARCH_TEGRA_3x_SOC
+ help
+ When enabled, uses internal tsensor to support EDP and
+ thermal throttling on tegra platform
+
+config TEGRA_EMC_TO_DDR_CLOCK
+ int "EMC to DDR clocks ratio"
+ default "2" if ARCH_TEGRA_2x_SOC
+ default "1"
+
+config TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+ bool "Use conservative cpu frequency governor when device enters early suspend"
+ depends on HAS_EARLYSUSPEND && CPU_FREQ
+ default n
+ help
+ Also will restore to original cpu frequency governor when device is resumed
+
+config TEGRA_LEGACY_AUDIO
+ bool "Enable Tegra Legacy Audio APIs"
+ default n
+ help
+ Say Y if you want to add support legacy (non-ALSA) audio APIs on
+ Tegra. This will disable ALSA (ASoC) support.
+
+config TEGRA_STAT_MON
+ bool "Enable H/W statistics monitor"
+ depends on ARCH_TEGRA_2x_SOC
+ default n
+ help
+ Enables support for hardware statistics monitor for AVP.
+
+config USB_HOTPLUG
+ bool "Enabling the USB hotplug"
+ default n
+
+config TEGRA_DYNAMIC_PWRDET
+ bool "Enable dynamic activation of IO level auto-detection"
+ depends on TEGRA_SILICON_PLATFORM
+ default n
+ help
+ This option allows turning off tegra IO level auto-detection
+ when IO power is stable. If set auto-detection cells are active
+ only during power transitions, otherwise, the cells are active
+ always
+
+config TEGRA_EDP_EXACT_FREQ
+ bool "Use maximum possible cpu frequency when EDP capping"
+ depends on TEGRA_EDP_LIMITS
+ default y
+ help
+ When enabled the cpu will run at the exact frequency
+ specified in the EDP table when EDP capping is applied; when
+ disabled the next lower cpufreq frequency will be used.
+
+config TEGRA_USB_MODEM_POWER
+ bool "Enable tegra usb modem power management"
+ default n
+ help
+ This option enables support for out-of_band remote wakeup, selective
+ suspend and system suspend/resume.
+
+config TEGRA_BB_XMM_POWER
+ bool "Enable power driver for XMM modem"
+ default n
+ help
+ Enables power driver which controls gpio signals to XMM modem.
+
+config TEGRA_BB_XMM_POWER2
+ tristate "Enable power driver for XMM modem (flashless)"
+ default n
+ help
+ Enables power driver which controls gpio signals to XMM modem
+ (in flashless configuration). User-mode application must
+ insert this LKM to initiate 2nd USB enumeration power sequence
+ - after modem software has been downloaded to flashless device.
+
+config TEGRA_THERMAL_SYSFS
+ bool "Enable Thermal driver to use Thermal Sysfs infrastructure"
+ depends on THERMAL
+ default y
+
+config TEGRA_PLLM_RESTRICTED
+ bool "Restrict PLLM usage as module clock source"
+ depends on !ARCH_TEGRA_2x_SOC
+ default n
+ help
+ When enabled, PLLM usage may be restricted to modules with dividers
+ capable of dividing maximum PLLM frequency at minimum voltage. When
+ disabled, PLLM is used as a clock source with no restrictions (which
+ may effectively increase lower limit for core voltage).
+
+config TEGRA_WDT_RECOVERY
+ bool "Enable suspend/resume watchdog recovery mechanism"
+ default n
+ help
+ Enables watchdog recovery mechanism to protect against
+ suspend/resume hangs.
endif
+
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index f11b9100114a..a4d1b217c163 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -1,30 +1,114 @@
+GCOV_PROFILE := y
+
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += ahb.o
+obj-y += apbio.o
obj-y += common.o
-obj-y += devices.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += common-t2.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += common-t3.o
obj-y += io.o
obj-y += irq.o
+obj-$(CONFIG_TEGRA_GRHOST) += syncpt.o
obj-y += clock.o
obj-y += timer.o
+ifeq ($(CONFIG_ARCH_TEGRA_2x_SOC),y)
+obj-y += tegra2_clocks.o
+obj-y += timer-t2.o
+else
+obj-y += tegra3_clocks.o
+obj-y += timer-t3.o
+endif
obj-y += pinmux.o
+obj-y += devices.o
+obj-y += delay.o
obj-y += powergate.o
-obj-y += fuse.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o
-obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o
+obj-y += pm.o
+obj-$(CONFIG_TEGRA_WDT_RECOVERY) += wdt-recovery.o
+obj-$(CONFIG_PM_SLEEP) += pm-irq.o
+obj-y += gic.o
+obj-y += sleep.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += sleep-t2.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += sleep-t3.o
+obj-y += fuse.o
+obj-y += kfuse.o
+obj-y += csi.o
+obj-$(CONFIG_TEGRA_SILICON_PLATFORM) += tegra_odm_fuses.o
+obj-y += i2c_error_recovery.o
+obj-$(CONFIG_TEGRA_LEGACY_AUDIO) += tegra_i2s_audio.o
+obj-$(CONFIG_TEGRA_LEGACY_AUDIO) += tegra_spdif_audio.o
+obj-y += mc.o
+obj-$(CONFIG_TEGRA_STAT_MON) += tegra2_statmon.o
+obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
+obj-$(CONFIG_FIQ) += fiq.o
+obj-$(CONFIG_TEGRA_FIQ_DEBUGGER) += tegra_fiq_debugger.o
+obj-$(CONFIG_TEGRA_PWM) += pwm.o
+obj-$(CONFIG_TEGRA_ARB_SEMAPHORE) += arb_sema.o
+
+ifeq ($(CONFIG_TEGRA_SILICON_PLATFORM),y)
+obj-y += dvfs.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_dvfs.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra3_dvfs.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += latency_allowance.o
+obj-$(CONFIG_TEGRA_EDP_LIMITS) += edp.o
+endif
+ifeq ($(CONFIG_TEGRA_SILICON_PLATFORM),y)
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_speedo.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra3_speedo.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra3_actmon.o
+endif
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra3_emc.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += wakeups-t2.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += wakeups-t3.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pm-t2.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += pm-t3.o
+
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += pinmux-t3-tables.o
+obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
+obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
+obj-y += headsmp.o
+obj-y += reset.o
+obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
-obj-$(CONFIG_TEGRA_PCI) += pcie.o
-obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
-
+ifeq ($(CONFIG_TEGRA_AUTO_HOTPLUG),y)
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += cpu-tegra3.o
+endif
+obj-$(CONFIG_TEGRA_PCI) += pcie.o
+obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
+ifeq ($(CONFIG_CPU_IDLE),y)
+obj-y += cpuidle.o
+ifeq ($(CONFIG_PM_SLEEP),y)
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += cpuidle-t2.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += cpuidle-t3.o
+endif
+endif
+ifeq ($(CONFIG_TEGRA_THERMAL_THROTTLE),y)
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_throttle.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra3_throttle.o
+endif
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra3_thermal.o
+obj-$(CONFIG_TEGRA_IOVMM) += iovmm.o
+obj-$(CONFIG_TEGRA_IOVMM_GART) += iovmm-gart.o
+obj-$(CONFIG_TEGRA_IOVMM_SMMU) += iovmm-smmu.o
+obj-$(CONFIG_DEBUG_ICEDCC) += sysfs-dcc.o
+obj-$(CONFIG_TEGRA_CLUSTER_CONTROL) += sysfs-cluster.o
+ifeq ($(CONFIG_TEGRA_MC_PROFILE),y)
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_mc.o
+endif
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra3_tsensor.o
+obj-$(CONFIG_TEGRA_DYNAMIC_PWRDET) += powerdetect.o
+obj-$(CONFIG_TEGRA_USB_MODEM_POWER) += tegra_usb_modem_power.o
+obj-$(CONFIG_TEGRA_PCI) += pcie.o
obj-${CONFIG_MACH_HARMONY} += board-harmony.o
+obj-${CONFIG_MACH_HARMONY} += board-harmony-kbc.o
+obj-${CONFIG_MACH_HARMONY} += board-harmony-panel.o
obj-${CONFIG_MACH_HARMONY} += board-harmony-pinmux.o
obj-${CONFIG_MACH_HARMONY} += board-harmony-pcie.o
obj-${CONFIG_MACH_HARMONY} += board-harmony-power.o
-obj-${CONFIG_MACH_PAZ00} += board-paz00.o
-obj-${CONFIG_MACH_PAZ00} += board-paz00-pinmux.o
+obj-${CONFIG_MACH_PAZ00} += board-paz00.o
+obj-${CONFIG_MACH_PAZ00} += board-paz00-pinmux.o
obj-${CONFIG_MACH_SEABOARD} += board-seaboard.o
obj-${CONFIG_MACH_SEABOARD} += board-seaboard-pinmux.o
@@ -34,3 +118,56 @@ obj-${CONFIG_MACH_TEGRA_DT} += board-harmony-pinmux.o
obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice.o
obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice-pinmux.o
+
+obj-${CONFIG_MACH_P852} += p852/
+
+obj-${CONFIG_MACH_VENTANA} += board-ventana.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-pinmux.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-sdhci.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-power.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-panel.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-sensors.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-memory.o
+
+obj-${CONFIG_MACH_ARUBA} += board-aruba.o
+obj-${CONFIG_MACH_ARUBA} += board-aruba-panel.o
+obj-${CONFIG_MACH_ARUBA} += board-aruba-pinmux.o
+obj-${CONFIG_MACH_ARUBA} += board-aruba-power.o
+obj-${CONFIG_MACH_ARUBA} += board-aruba-sdhci.o
+obj-${CONFIG_MACH_ARUBA} += board-aruba-sensors.o
+
+obj-${CONFIG_MACH_WHISTLER} += board-whistler.o
+obj-${CONFIG_MACH_WHISTLER} += board-whistler-pinmux.o
+obj-${CONFIG_MACH_WHISTLER} += board-whistler-sdhci.o
+obj-${CONFIG_MACH_WHISTLER} += board-whistler-power.o
+obj-${CONFIG_MACH_WHISTLER} += board-whistler-panel.o
+obj-${CONFIG_MACH_WHISTLER} += board-whistler-sensors.o
+obj-${CONFIG_MACH_WHISTLER} += board-whistler-kbc.o
+obj-${CONFIG_MACH_WHISTLER} += board-whistler-baseband.o
+obj-${CONFIG_MACH_WHISTLER} += board-whistler-memory.o
+
+obj-${CONFIG_MACH_CARDHU} += board-cardhu.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-kbc.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-panel.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-pinmux.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-power.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-pm298-power-rails.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-pm299-power-rails.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-sdhci.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-sensors.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-memory.o
+obj-${CONFIG_MACH_CARDHU} += board-cardhu-powermon.o
+
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise.o
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise-panel.o
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise-pinmux.o
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise-sdhci.o
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise-memory.o
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise-power.o
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise-baseband.o
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise-kbc.o
+obj-${CONFIG_MACH_TEGRA_ENTERPRISE} += board-enterprise-sensors.o
+
+obj-${CONFIG_TEGRA_BB_XMM_POWER} += baseband-xmm-power.o
+obj-${CONFIG_TEGRA_BB_XMM_POWER2} += baseband-xmm-power2.o
+
diff --git a/arch/arm/mach-tegra/Makefile.boot b/arch/arm/mach-tegra/Makefile.boot
index 428ad122be03..d8cb9173cdf7 100644
--- a/arch/arm/mach-tegra/Makefile.boot
+++ b/arch/arm/mach-tegra/Makefile.boot
@@ -2,5 +2,9 @@ zreladdr-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00008000
params_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00000100
initrd_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00800000
+zreladdr-$(CONFIG_ARCH_TEGRA_3x_SOC) := 0x80008000
+params_phys-$(CONFIG_ARCH_TEGRA_3x_SOC) := 0x80000100
+initrd_phys-$(CONFIG_ARCH_TEGRA_3x_SOC) := 0x80800000
+
dtb-$(CONFIG_MACH_HARMONY) += tegra-harmony.dtb
dtb-$(CONFIG_MACH_SEABOARD) += tegra-seaboard.dtb
diff --git a/arch/arm/mach-tegra/ahb.c b/arch/arm/mach-tegra/ahb.c
new file mode 100644
index 000000000000..b7f3fb5219bb
--- /dev/null
+++ b/arch/arm/mach-tegra/ahb.c
@@ -0,0 +1,218 @@
+/*
+ * arch/arm/mach-tegra/ahb.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Jay Cheng <jacheng@nvidia.com>
+ * James Wylder <james.wylder@motorola.com>
+ * Benoit Goby <benoit@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/syscore_ops.h>
+
+#include <mach/iomap.h>
+
+#define AHB_ARBITRATION_DISABLE 0x00
+#define AHB_ARBITRATION_PRIORITY_CTRL 0x04
+#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
+#define PRIORITY_SELECT_USB BIT(6)
+#define PRIORITY_SELECT_USB2 BIT(18)
+#define PRIORITY_SELECT_USB3 BIT(17)
+
+#define AHB_GIZMO_AHB_MEM 0x0c
+#define ENB_FAST_REARBITRATE BIT(2)
+#define DONT_SPLIT_AHB_WR BIT(7)
+
+#define AHB_GIZMO_APB_DMA 0x10
+#define AHB_GIZMO_IDE 0x18
+#define AHB_GIZMO_USB 0x1c
+#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20
+#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24
+#define AHB_GIZMO_COP_AHB_BRIDGE 0x28
+#define AHB_GIZMO_XBAR_APB_CTLR 0x2c
+#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30
+#define AHB_GIZMO_NAND 0x3c
+#define AHB_GIZMO_SDMMC4 0x44
+#define AHB_GIZMO_XIO 0x48
+#define AHB_GIZMO_BSEV 0x60
+#define AHB_GIZMO_BSEA 0x70
+#define AHB_GIZMO_NOR 0x74
+#define AHB_GIZMO_USB2 0x78
+#define AHB_GIZMO_USB3 0x7c
+#define IMMEDIATE BIT(18)
+
+#define AHB_GIZMO_SDMMC1 0x80
+#define AHB_GIZMO_SDMMC2 0x84
+#define AHB_GIZMO_SDMMC3 0x88
+#define AHB_MEM_PREFETCH_CFG_X 0xd8
+#define AHB_ARBITRATION_XBAR_CTRL 0xdc
+#define AHB_MEM_PREFETCH_CFG3 0xe0
+#define AHB_MEM_PREFETCH_CFG4 0xe4
+#define AHB_MEM_PREFETCH_CFG1 0xec
+#define AHB_MEM_PREFETCH_CFG2 0xf0
+#define PREFETCH_ENB BIT(31)
+#define MST_ID(x) (((x) & 0x1f) << 26)
+#define AHBDMA_MST_ID MST_ID(5)
+#define USB_MST_ID MST_ID(6)
+#define USB2_MST_ID MST_ID(18)
+#define USB3_MST_ID MST_ID(17)
+#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
+#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
+
+#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8
+
+
+static inline unsigned long gizmo_readl(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(TEGRA_AHB_GIZMO_BASE + offset));
+}
+
+static inline void gizmo_writel(unsigned long value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(TEGRA_AHB_GIZMO_BASE + offset));
+}
+
+static u32 ahb_gizmo[29];
+
+#ifdef CONFIG_PM
+int tegra_ahbgizmo_suspend(void)
+{
+ ahb_gizmo[0] = gizmo_readl(AHB_ARBITRATION_DISABLE);
+ ahb_gizmo[1] = gizmo_readl(AHB_ARBITRATION_PRIORITY_CTRL);
+ ahb_gizmo[2] = gizmo_readl(AHB_GIZMO_AHB_MEM);
+ ahb_gizmo[3] = gizmo_readl(AHB_GIZMO_APB_DMA);
+ ahb_gizmo[4] = gizmo_readl(AHB_GIZMO_IDE);
+ ahb_gizmo[5] = gizmo_readl(AHB_GIZMO_USB);
+ ahb_gizmo[6] = gizmo_readl(AHB_GIZMO_AHB_XBAR_BRIDGE);
+ ahb_gizmo[7] = gizmo_readl(AHB_GIZMO_CPU_AHB_BRIDGE);
+ ahb_gizmo[8] = gizmo_readl(AHB_GIZMO_COP_AHB_BRIDGE);
+ ahb_gizmo[9] = gizmo_readl(AHB_GIZMO_XBAR_APB_CTLR);
+ ahb_gizmo[10] = gizmo_readl(AHB_GIZMO_VCP_AHB_BRIDGE);
+ ahb_gizmo[11] = gizmo_readl(AHB_GIZMO_NAND);
+ ahb_gizmo[12] = gizmo_readl(AHB_GIZMO_SDMMC4);
+ ahb_gizmo[13] = gizmo_readl(AHB_GIZMO_XIO);
+ ahb_gizmo[14] = gizmo_readl(AHB_GIZMO_BSEV);
+ ahb_gizmo[15] = gizmo_readl(AHB_GIZMO_BSEA);
+ ahb_gizmo[16] = gizmo_readl(AHB_GIZMO_NOR);
+ ahb_gizmo[17] = gizmo_readl(AHB_GIZMO_USB2);
+ ahb_gizmo[18] = gizmo_readl(AHB_GIZMO_USB3);
+ ahb_gizmo[19] = gizmo_readl(AHB_GIZMO_SDMMC1);
+ ahb_gizmo[20] = gizmo_readl(AHB_GIZMO_SDMMC2);
+ ahb_gizmo[21] = gizmo_readl(AHB_GIZMO_SDMMC3);
+ ahb_gizmo[22] = gizmo_readl(AHB_MEM_PREFETCH_CFG_X);
+ ahb_gizmo[23] = gizmo_readl(AHB_ARBITRATION_XBAR_CTRL);
+ ahb_gizmo[24] = gizmo_readl(AHB_MEM_PREFETCH_CFG3);
+ ahb_gizmo[25] = gizmo_readl(AHB_MEM_PREFETCH_CFG4);
+ ahb_gizmo[26] = gizmo_readl(AHB_MEM_PREFETCH_CFG1);
+ ahb_gizmo[27] = gizmo_readl(AHB_MEM_PREFETCH_CFG2);
+ ahb_gizmo[28] = gizmo_readl(AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID);
+ return 0;
+}
+
+void tegra_ahbgizmo_resume(void)
+{
+ gizmo_writel(ahb_gizmo[0], AHB_ARBITRATION_DISABLE);
+ gizmo_writel(ahb_gizmo[1], AHB_ARBITRATION_PRIORITY_CTRL);
+ gizmo_writel(ahb_gizmo[2], AHB_GIZMO_AHB_MEM);
+ gizmo_writel(ahb_gizmo[3], AHB_GIZMO_APB_DMA);
+ gizmo_writel(ahb_gizmo[4], AHB_GIZMO_IDE);
+ gizmo_writel(ahb_gizmo[5], AHB_GIZMO_USB);
+ gizmo_writel(ahb_gizmo[6], AHB_GIZMO_AHB_XBAR_BRIDGE);
+ gizmo_writel(ahb_gizmo[7], AHB_GIZMO_CPU_AHB_BRIDGE);
+ gizmo_writel(ahb_gizmo[8], AHB_GIZMO_COP_AHB_BRIDGE);
+ gizmo_writel(ahb_gizmo[9], AHB_GIZMO_XBAR_APB_CTLR);
+ gizmo_writel(ahb_gizmo[10], AHB_GIZMO_VCP_AHB_BRIDGE);
+ gizmo_writel(ahb_gizmo[11], AHB_GIZMO_NAND);
+ gizmo_writel(ahb_gizmo[12], AHB_GIZMO_SDMMC4);
+ gizmo_writel(ahb_gizmo[13], AHB_GIZMO_XIO);
+ gizmo_writel(ahb_gizmo[14], AHB_GIZMO_BSEV);
+ gizmo_writel(ahb_gizmo[15], AHB_GIZMO_BSEA);
+ gizmo_writel(ahb_gizmo[16], AHB_GIZMO_NOR);
+ gizmo_writel(ahb_gizmo[17], AHB_GIZMO_USB2);
+ gizmo_writel(ahb_gizmo[18], AHB_GIZMO_USB3);
+ gizmo_writel(ahb_gizmo[19], AHB_GIZMO_SDMMC1);
+ gizmo_writel(ahb_gizmo[20], AHB_GIZMO_SDMMC2);
+ gizmo_writel(ahb_gizmo[21], AHB_GIZMO_SDMMC3);
+ gizmo_writel(ahb_gizmo[22], AHB_MEM_PREFETCH_CFG_X);
+ gizmo_writel(ahb_gizmo[23], AHB_ARBITRATION_XBAR_CTRL);
+ gizmo_writel(ahb_gizmo[24], AHB_MEM_PREFETCH_CFG3);
+ gizmo_writel(ahb_gizmo[25], AHB_MEM_PREFETCH_CFG4);
+ gizmo_writel(ahb_gizmo[26], AHB_MEM_PREFETCH_CFG1);
+ gizmo_writel(ahb_gizmo[27], AHB_MEM_PREFETCH_CFG2);
+ gizmo_writel(ahb_gizmo[28], AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID);
+}
+#else
+#define tegra_ahbgizmo_suspend NULL
+#define tegra_ahbgizmo_resume NULL
+#endif
+
+static struct syscore_ops tegra_ahbgizmo_syscore_ops = {
+ .suspend = tegra_ahbgizmo_suspend,
+ .resume = tegra_ahbgizmo_resume,
+};
+
+static int __init tegra_init_ahb_gizmo_settings(void)
+{
+ unsigned long val;
+
+ val = gizmo_readl(AHB_GIZMO_AHB_MEM);
+ val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
+ gizmo_writel(val, AHB_GIZMO_AHB_MEM);
+
+ val = gizmo_readl(AHB_GIZMO_USB);
+ val |= IMMEDIATE;
+ gizmo_writel(val, AHB_GIZMO_USB);
+
+ val = gizmo_readl(AHB_GIZMO_USB2);
+ val |= IMMEDIATE;
+ gizmo_writel(val, AHB_GIZMO_USB2);
+
+ val = gizmo_readl(AHB_GIZMO_USB3);
+ val |= IMMEDIATE;
+ gizmo_writel(val, AHB_GIZMO_USB3);
+
+ val = gizmo_readl(AHB_ARBITRATION_PRIORITY_CTRL);
+ val |= PRIORITY_SELECT_USB | PRIORITY_SELECT_USB2 | PRIORITY_SELECT_USB3
+ | AHB_PRIORITY_WEIGHT(7);
+ gizmo_writel(val, AHB_ARBITRATION_PRIORITY_CTRL);
+
+ val = gizmo_readl(AHB_MEM_PREFETCH_CFG1);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB | AHBDMA_MST_ID | ADDR_BNDRY(0xc) | INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(val, AHB_MEM_PREFETCH_CFG1);
+
+ val = gizmo_readl(AHB_MEM_PREFETCH_CFG2);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB | USB_MST_ID | ADDR_BNDRY(0xc) | INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(val, AHB_MEM_PREFETCH_CFG2);
+
+ val = gizmo_readl(AHB_MEM_PREFETCH_CFG3);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB | USB3_MST_ID | ADDR_BNDRY(0xc) | INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(val, AHB_MEM_PREFETCH_CFG3);
+
+ val = gizmo_readl(AHB_MEM_PREFETCH_CFG4);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB | USB2_MST_ID | ADDR_BNDRY(0xc) | INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(val, AHB_MEM_PREFETCH_CFG4);
+
+ register_syscore_ops(&tegra_ahbgizmo_syscore_ops);
+
+ return 0;
+}
+postcore_initcall(tegra_init_ahb_gizmo_settings);
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c
new file mode 100644
index 000000000000..41eb0aa3c738
--- /dev/null
+++ b/arch/arm/mach-tegra/apbio.c
@@ -0,0 +1,158 @@
+/*
+ * arch/arm/mach-tegra/apbio.c
+ *
+ * Copyright (C) 2010 NVIDIA Corporation.
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include <mach/dma.h>
+#include <mach/iomap.h>
+
+#include "apbio.h"
+
+static DEFINE_MUTEX(tegra_apb_dma_lock);
+
+#ifdef CONFIG_TEGRA_SYSTEM_DMA
+static struct tegra_dma_channel *tegra_apb_dma;
+static u32 *tegra_apb_bb;
+static dma_addr_t tegra_apb_bb_phys;
+static DECLARE_COMPLETION(tegra_apb_wait);
+
+static void apb_dma_complete(struct tegra_dma_req *req)
+{
+ complete(&tegra_apb_wait);
+}
+
+static inline u32 apb_readl(unsigned long offset)
+{
+ struct tegra_dma_req req;
+ int ret;
+
+ if (!tegra_apb_dma)
+ return readl(IO_TO_VIRT(offset));
+
+ mutex_lock(&tegra_apb_dma_lock);
+ req.complete = apb_dma_complete;
+ req.to_memory = 1;
+ req.dest_addr = tegra_apb_bb_phys;
+ req.dest_bus_width = 32;
+ req.dest_wrap = 1;
+ req.source_addr = offset;
+ req.source_bus_width = 32;
+ req.source_wrap = 4;
+ req.req_sel = 0;
+ req.size = 4;
+
+ INIT_COMPLETION(tegra_apb_wait);
+
+ tegra_dma_enqueue_req(tegra_apb_dma, &req);
+
+ ret = wait_for_completion_timeout(&tegra_apb_wait,
+ msecs_to_jiffies(400));
+
+ if (WARN(ret == 0, "apb read dma timed out")) {
+ tegra_dma_dequeue_req(tegra_apb_dma, &req);
+ *(u32 *)tegra_apb_bb = 0;
+ }
+
+ mutex_unlock(&tegra_apb_dma_lock);
+ return *((u32 *)tegra_apb_bb);
+}
+
+static inline void apb_writel(u32 value, unsigned long offset)
+{
+ struct tegra_dma_req req;
+ int ret;
+
+ if (!tegra_apb_dma) {
+ writel(value, IO_TO_VIRT(offset));
+ return;
+ }
+
+ mutex_lock(&tegra_apb_dma_lock);
+ *((u32 *)tegra_apb_bb) = value;
+ req.complete = apb_dma_complete;
+ req.to_memory = 0;
+ req.dest_addr = offset;
+ req.dest_wrap = 4;
+ req.dest_bus_width = 32;
+ req.source_addr = tegra_apb_bb_phys;
+ req.source_bus_width = 32;
+ req.source_wrap = 1;
+ req.req_sel = 0;
+ req.size = 4;
+
+ INIT_COMPLETION(tegra_apb_wait);
+
+ tegra_dma_enqueue_req(tegra_apb_dma, &req);
+
+ ret = wait_for_completion_timeout(&tegra_apb_wait,
+ msecs_to_jiffies(400));
+
+ if (WARN(ret == 0, "apb write dma timed out"))
+ tegra_dma_dequeue_req(tegra_apb_dma, &req);
+
+ mutex_unlock(&tegra_apb_dma_lock);
+}
+#else
+static inline u32 apb_readl(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(offset));
+}
+
+static inline void apb_writel(u32 value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(offset));
+}
+#endif
+
+u32 tegra_apb_readl(unsigned long offset)
+{
+ return apb_readl(offset);
+}
+
+void tegra_apb_writel(u32 value, unsigned long offset)
+{
+ apb_writel(value, offset);
+}
+
+static int tegra_init_apb_dma(void)
+{
+#ifdef CONFIG_TEGRA_SYSTEM_DMA
+ tegra_apb_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
+ TEGRA_DMA_SHARED, "apbio");
+ if (!tegra_apb_dma) {
+ pr_err("%s: can not allocate dma channel\n", __func__);
+ return -ENODEV;
+ }
+
+ tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
+ &tegra_apb_bb_phys, GFP_KERNEL);
+ if (!tegra_apb_bb) {
+ pr_err("%s: can not allocate bounce buffer\n", __func__);
+ tegra_dma_free_channel(tegra_apb_dma);
+ tegra_apb_dma = NULL;
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+arch_initcall(tegra_init_apb_dma);
diff --git a/arch/arm/mach-tegra/apbio.h b/arch/arm/mach-tegra/apbio.h
new file mode 100644
index 000000000000..f0c87f06a209
--- /dev/null
+++ b/arch/arm/mach-tegra/apbio.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-tegra/apbio.h
+ *
+ * Copyright (C) 2010 NVIDIA Corporation.
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+u32 tegra_apb_readl(unsigned long offset);
+void tegra_apb_writel(u32 value, unsigned long offset);
diff --git a/arch/arm/mach-tegra/arb_sema.c b/arch/arm/mach-tegra/arb_sema.c
new file mode 100644
index 000000000000..eecdee5967c8
--- /dev/null
+++ b/arch/arm/mach-tegra/arb_sema.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010, NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+
+#include <mach/arb_sema.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+
+#define TEGRA_RPC_MAX_SEM 32
+
+/* arb_gnt ictrl */
+#define ARB_CPU_INT_EN 0x4
+
+/* arb_sema */
+#define ARB_GRANT_STATUS 0x0
+#define ARB_GRANT_REQUEST 0x4
+#define ARB_GRANT_RELEASE 0x8
+#define ARB_GRANT_PENDING 0xC
+
+struct tegra_arb_dev {
+ void __iomem *sema_base;
+ void __iomem *gnt_base;
+ spinlock_t lock;
+ struct completion arb_gnt_complete[TEGRA_RPC_MAX_SEM];
+ struct mutex mutexes[TEGRA_RPC_MAX_SEM];
+ int irq;
+ int status;
+ bool suspended;
+};
+
+static struct tegra_arb_dev *arb;
+
+static inline u32 arb_sema_read(u32 offset)
+{
+ return readl(arb->sema_base + offset);
+}
+
+static inline void arb_sema_write(u32 value, u32 offset)
+{
+ writel(value, arb->sema_base + offset);
+}
+
+static inline u32 arb_gnt_read(u32 offset)
+{
+ return readl(arb->gnt_base + offset);
+}
+
+static inline void arb_gnt_write(u32 value, u32 offset)
+{
+ writel(value, arb->gnt_base + offset);
+}
+
+static void request_arb_sem(enum tegra_arb_module lock)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&arb->lock, flags);
+
+ arb_sema_write(1 << lock, ARB_GRANT_REQUEST);
+ value = arb_gnt_read(ARB_CPU_INT_EN);
+ value |= (1 << lock);
+ arb_gnt_write(value, ARB_CPU_INT_EN);
+
+ spin_unlock_irqrestore(&arb->lock, flags);
+}
+
+static void cancel_arb_sem(enum tegra_arb_module lock)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&arb->lock, flags);
+
+ arb_sema_write(1 << lock, ARB_GRANT_RELEASE);
+ value = arb_gnt_read(ARB_CPU_INT_EN);
+ value &= ~(1 << lock);
+ arb_gnt_write(value, ARB_CPU_INT_EN);
+
+ spin_unlock_irqrestore(&arb->lock, flags);
+}
+
+int tegra_arb_mutex_lock_timeout(enum tegra_arb_module lock, int msecs)
+{
+ int ret;
+
+ if (!arb)
+ return -ENODEV;
+
+ if (arb->suspended) {
+ pr_err("device in suspend\n");
+ return -ETIMEDOUT;
+ }
+
+ mutex_lock(&arb->mutexes[lock]);
+ INIT_COMPLETION(arb->arb_gnt_complete[lock]);
+ request_arb_sem(lock);
+ ret = wait_for_completion_timeout(&arb->arb_gnt_complete[lock], msecs_to_jiffies(msecs));
+ if (ret == 0) {
+ pr_err("timed out. pending:0x%x\n", arb_sema_read(ARB_GRANT_PENDING));
+ cancel_arb_sem(lock);
+ mutex_unlock(&arb->mutexes[lock]);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_arb_mutex_lock_timeout);
+
+int tegra_arb_mutex_unlock(enum tegra_arb_module lock)
+{
+ if (!arb)
+ return -ENODEV;
+
+ if (arb->suspended) {
+ pr_err("device in suspend\n");
+ return -ETIMEDOUT;
+ }
+
+ cancel_arb_sem(lock);
+ mutex_unlock(&arb->mutexes[lock]);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_arb_mutex_unlock);
+
+static irqreturn_t arb_gnt_isr(int irq, void *dev_id)
+{
+ struct tegra_arb_dev *dev = dev_id;
+ unsigned long status;
+ u32 cpu_int_en;
+ unsigned int bit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&arb->lock, flags);
+
+ status = arb_sema_read(ARB_GRANT_STATUS);
+ pr_debug("%s: 0x%lx\n", __func__, status);
+
+ /* disable the arb semaphores which were signalled */
+ cpu_int_en = arb_gnt_read(ARB_CPU_INT_EN);
+ arb_gnt_write((cpu_int_en & ~(status & cpu_int_en)),
+ ARB_CPU_INT_EN);
+
+ status &= cpu_int_en;
+ for_each_set_bit(bit, &status, BITS_PER_LONG)
+ complete(&dev->arb_gnt_complete[bit]);
+
+ spin_unlock_irqrestore(&arb->lock, flags);
+ return IRQ_HANDLED;
+}
+
+int tegra_arb_suspend(void)
+{
+ unsigned long status = arb_sema_read(ARB_GRANT_STATUS);
+
+ if (WARN_ON(status != 0)) {
+ pr_err("%s: suspending while holding arbitration "
+ "semaphore: %08lx\n", __func__, status);
+ }
+ arb->suspended = true;
+
+ return status ? -EBUSY : 0;
+}
+
+int tegra_arb_resume(void)
+{
+ arb->suspended = false;
+ return 0;
+}
+
+static int __init tegra_arb_init(void)
+{
+ struct tegra_arb_dev *dev = NULL;
+ int err, i;
+
+ dev = kzalloc(sizeof(struct tegra_arb_dev), GFP_KERNEL);
+ if (dev == NULL) {
+ pr_err("%s: unable to alloc data struct.\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TEGRA_RPC_MAX_SEM; i++) {
+ mutex_init(&dev->mutexes[i]);
+ init_completion(&dev->arb_gnt_complete[i]);
+ }
+
+ dev->sema_base = IO_ADDRESS(TEGRA_ARB_SEMA_BASE);
+ if (!dev->sema_base) {
+ pr_err("%s: can't get arb sema_base\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev->gnt_base = IO_ADDRESS(TEGRA_ARBGNT_ICTLR_BASE);
+ if (!dev->gnt_base) {
+ pr_err("%s: can't ioremap gnt_base\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev->irq = INT_GNT_1;
+ err = request_irq(dev->irq, arb_gnt_isr, 0, "rpc-arbsema", dev);
+ if (err) {
+ pr_err("%s: request_irq(%d) failed(%d)\n", __func__,
+ dev->irq, err);
+ goto out;
+ }
+
+ spin_lock_init(&dev->lock);
+ arb = dev;
+
+ pr_info("%s: initialized\n", __func__);
+ return 0;
+
+out:
+ kfree(dev);
+ pr_err("%s: initialization failed.\n", __func__);
+ return err;
+}
+subsys_initcall(tegra_arb_init);
+
+MODULE_LICENSE("GPLv2");
diff --git a/arch/arm/mach-tegra/asm_macros.h b/arch/arm/mach-tegra/asm_macros.h
new file mode 100644
index 000000000000..2463d797ce39
--- /dev/null
+++ b/arch/arm/mach-tegra/asm_macros.h
@@ -0,0 +1,72 @@
+/*
+ * arch/arm/mach-tegra/include/mach/asm_macros.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_ASM_MACROS_H_
+#define _MACH_TEGRA_ASM_MACROS_H_
+
+#ifdef __ASSEMBLY__
+
+/* waits until the microsecond counter (base) ticks, for exact timing loops */
+.macro wait_for_us, rd, base, tmp
+ ldr \rd, [\base]
+1001: ldr \tmp, [\base]
+ cmp \rd, \tmp
+ beq 1001b
+ mov \tmp, \rd
+.endm
+
+/* waits until the microsecond counter (base) is > rn */
+.macro wait_until, rn, base, tmp
+ add \rn, \rn, #1
+1002: ldr \tmp, [\base]
+ sub \tmp, \tmp, \rn
+ ands \tmp, \tmp, #0x80000000
+ dmb
+ bne 1002b
+.endm
+
+/* returns the offset of the flow controller halt register for a cpu */
+.macro cpu_to_halt_reg rd, rcpu
+ cmp \rcpu, #0
+ subne \rd, \rcpu, #1
+ movne \rd, \rd, lsl #3
+ addne \rd, \rd, #0x14
+ moveq \rd, #0
+.endm
+
+/* returns the offset of the flow controller csr register for a cpu */
+.macro cpu_to_csr_reg rd, rcpu
+ cmp \rcpu, #0
+ subne \rd, \rcpu, #1
+ movne \rd, \rd, lsl #3
+ addne \rd, \rd, #0x18
+ moveq \rd, #8
+.endm
+
+/* returns the ID of the current processor */
+.macro cpu_id, rd
+ mrc p15, 0, \rd, c0, c0, 5
+ and \rd, \rd, #0xF
+.endm
+
+/* loads a 32-bit value into a register without a data access */
+.macro mov32, reg, val
+ movw \reg, #:lower16:\val
+ movt \reg, #:upper16:\val
+.endm
+
+#endif
+#endif
diff --git a/arch/arm/mach-tegra/baseband-xmm-power.c b/arch/arm/mach-tegra/baseband-xmm-power.c
new file mode 100644
index 000000000000..215f63dbb1ed
--- /dev/null
+++ b/arch/arm/mach-tegra/baseband-xmm-power.c
@@ -0,0 +1,891 @@
+/*
+ * arch/arm/mach-tegra/baseband-xmm-power.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/wakelock.h>
+#include <linux/usb.h>
+#include <mach/usb_phy.h>
+#include "board.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "baseband-xmm-power.h"
+
+MODULE_LICENSE("GPL");
+
+unsigned long modem_ver = XMM_MODEM_VER_1121;
+EXPORT_SYMBOL(modem_ver);
+
+unsigned long modem_flash;
+EXPORT_SYMBOL(modem_flash);
+
+unsigned long modem_pm = 1;
+EXPORT_SYMBOL(modem_pm);
+
+unsigned long enum_delay_ms = 1000; /* ignored if !modem_flash */
+
+module_param(modem_ver, ulong, 0644);
+MODULE_PARM_DESC(modem_ver,
+ "baseband xmm power - modem software version");
+module_param(modem_flash, ulong, 0644);
+MODULE_PARM_DESC(modem_flash,
+ "baseband xmm power - modem flash (1 = flash, 0 = flashless)");
+module_param(modem_pm, ulong, 0644);
+MODULE_PARM_DESC(modem_pm,
+ "baseband xmm power - modem power management (1 = pm, 0 = no pm)");
+module_param(enum_delay_ms, ulong, 0644);
+MODULE_PARM_DESC(enum_delay_ms,
+ "baseband xmm power - delay in ms between modem on and enumeration");
+
+static struct usb_device_id xmm_pm_ids[] = {
+ { USB_DEVICE(VENDOR_ID, PRODUCT_ID),
+ .driver_info = 0 },
+ {}
+};
+
+static struct gpio tegra_baseband_gpios[] = {
+ { -1, GPIOF_OUT_INIT_LOW, "BB_RSTn" },
+ { -1, GPIOF_OUT_INIT_LOW, "BB_ON" },
+ { -1, GPIOF_OUT_INIT_LOW, "IPC_BB_WAKE" },
+ { -1, GPIOF_IN, "IPC_AP_WAKE" },
+ { -1, GPIOF_OUT_INIT_HIGH, "IPC_HSIC_ACTIVE" },
+ { -1, GPIOF_IN, "IPC_HSIC_SUS_REQ" },
+};
+
+static enum {
+ IPC_AP_WAKE_UNINIT,
+ IPC_AP_WAKE_IRQ_READY,
+ IPC_AP_WAKE_INIT1,
+ IPC_AP_WAKE_INIT2,
+ IPC_AP_WAKE_L,
+ IPC_AP_WAKE_H,
+} ipc_ap_wake_state;
+
+enum baseband_xmm_powerstate_t baseband_xmm_powerstate;
+static struct workqueue_struct *workqueue;
+static struct work_struct init1_work;
+static struct work_struct init2_work;
+static struct work_struct L2_resume_work;
+static struct baseband_power_platform_data *baseband_power_driver_data;
+static bool register_hsic_device;
+static struct wake_lock wakelock;
+static struct usb_device *usbdev;
+static bool CP_initiated_L2toL0;
+static bool modem_power_on;
+static int power_onoff;
+static void baseband_xmm_power_L2_resume(void);
+
+static int baseband_modem_power_on(struct baseband_power_platform_data *data)
+{
+ /* set IPC_HSIC_ACTIVE active */
+ gpio_set_value(baseband_power_driver_data->
+ modem.xmm.ipc_hsic_active, 1);
+
+ /* wait 20 ms */
+ mdelay(20);
+
+ /* reset / power on sequence */
+ mdelay(40);
+ gpio_set_value(data->modem.xmm.bb_rst, 1);
+ mdelay(1);
+ gpio_set_value(data->modem.xmm.bb_on, 1);
+ udelay(40);
+ gpio_set_value(data->modem.xmm.bb_on, 0);
+
+ return 0;
+}
+
+static int baseband_xmm_power_on(struct platform_device *device)
+{
+ struct baseband_power_platform_data *data
+ = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+ pr_debug("%s {\n", __func__);
+
+ /* check for platform data */
+ if (!data) {
+ pr_err("%s: !data\n", __func__);
+ return -EINVAL;
+ }
+
+ /* reset the state machine */
+ baseband_xmm_powerstate = BBXMM_PS_INIT;
+ if (modem_ver < XMM_MODEM_VER_1130)
+ ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
+ else
+ ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
+
+ pr_debug("%s - %d\n", __func__, __LINE__);
+
+ /* register usb host controller */
+ if (!modem_flash) {
+ pr_debug("%s - %d\n", __func__, __LINE__);
+ /* register usb host controller only once */
+ if (register_hsic_device) {
+ pr_debug("%s: register usb host controller\n",
+ __func__);
+ modem_power_on = true;
+ if (data->hsic_register)
+ data->modem.xmm.hsic_device =
+ data->hsic_register();
+ else
+ pr_err("%s: hsic_register is missing\n",
+ __func__);
+ register_hsic_device = false;
+ } else {
+ /* register usb host controller */
+ if (data->hsic_register)
+ data->modem.xmm.hsic_device =
+ data->hsic_register();
+ /* turn on modem */
+ pr_debug("%s call baseband_modem_power_on\n", __func__);
+ baseband_modem_power_on(data);
+ }
+ }
+
+ pr_debug("%s }\n", __func__);
+
+ return 0;
+}
+
+static int baseband_xmm_power_off(struct platform_device *device)
+{
+ struct baseband_power_platform_data *data;
+
+ pr_debug("%s {\n", __func__);
+
+ /* check for device / platform data */
+ if (!device) {
+ pr_err("%s: !device\n", __func__);
+ return -EINVAL;
+ }
+ data = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+ if (!data) {
+ pr_err("%s: !data\n", __func__);
+ return -EINVAL;
+ }
+
+ /* unregister usb host controller */
+ pr_info("%s: hsic device: %x\n", __func__, data->modem.xmm.hsic_device);
+ if (data->hsic_unregister)
+ data->hsic_unregister(data->modem.xmm.hsic_device);
+ else
+ pr_err("%s: hsic_unregister is missing\n", __func__);
+
+ /* set IPC_HSIC_ACTIVE low */
+ gpio_set_value(baseband_power_driver_data->
+ modem.xmm.ipc_hsic_active, 0);
+
+ /* wait 20 ms */
+ mdelay(20);
+
+ /* drive bb_rst low */
+ gpio_set_value(data->modem.xmm.bb_rst, 0);
+ mdelay(1);
+
+ pr_debug("%s }\n", __func__);
+
+ return 0;
+}
+
+static ssize_t baseband_xmm_onoff(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int size;
+ struct platform_device *device = to_platform_device(dev);
+
+ pr_debug("%s\n", __func__);
+
+ /* check input */
+ if (buf == NULL) {
+ pr_err("%s: buf NULL\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s: count=%d\n", __func__, count);
+
+ /* parse input */
+ size = sscanf(buf, "%d", &power_onoff);
+ if (size != 1) {
+ pr_err("%s: size=%d -EINVAL\n", __func__, size);
+ return -EINVAL;
+ }
+ pr_debug("%s power_onoff=%d\n", __func__, power_onoff);
+
+ if (power_onoff == 0)
+ baseband_xmm_power_off(device);
+ else if (power_onoff == 1)
+ baseband_xmm_power_on(device);
+ return count;
+}
+
+static DEVICE_ATTR(xmm_onoff, S_IRUSR | S_IWUSR | S_IRGRP,
+ NULL, baseband_xmm_onoff);
+
+void baseband_xmm_set_power_status(unsigned int status)
+{
+ struct baseband_power_platform_data *data = baseband_power_driver_data;
+ int value = 0;
+
+ pr_debug("%s\n", __func__);
+
+ if (baseband_xmm_powerstate == status)
+ return;
+
+ switch (status) {
+ case BBXMM_PS_L0:
+ pr_info("L0\n");
+ value = gpio_get_value(data->modem.xmm.ipc_hsic_active);
+ pr_debug("before L0 ipc_hsic_active=%d\n", value);
+ if (!value) {
+ pr_debug("before L0 gpio set ipc_hsic_active=1 ->\n");
+ gpio_set_value(data->modem.xmm.ipc_hsic_active, 1);
+ }
+ if (modem_power_on) {
+ modem_power_on = false;
+ baseband_modem_power_on(data);
+ }
+ wake_lock(&wakelock);
+ pr_debug("gpio host active high->\n");
+ break;
+ case BBXMM_PS_L2:
+ pr_info("L2\n");
+ wake_unlock(&wakelock);
+ break;
+ case BBXMM_PS_L3:
+ pr_info("L3\n");
+ if (wake_lock_active(&wakelock)) {
+ pr_info("%s: releasing wakelock before L3\n",
+ __func__);
+ wake_unlock(&wakelock);
+ }
+ gpio_set_value(data->modem.xmm.ipc_hsic_active, 0);
+ pr_debug("gpio host active low->\n");
+ break;
+ case BBXMM_PS_L2TOL0:
+ /* do this only from L2 state */
+ if (baseband_xmm_powerstate == BBXMM_PS_L2) {
+ baseband_xmm_powerstate = status;
+ pr_debug("BB XMM POWER STATE = %d\n", status);
+ baseband_xmm_power_L2_resume();
+ }
+ default:
+ break;
+ }
+ baseband_xmm_powerstate = status;
+ pr_debug("BB XMM POWER STATE = %d\n", status);
+}
+EXPORT_SYMBOL_GPL(baseband_xmm_set_power_status);
+
+irqreturn_t baseband_xmm_power_ipc_ap_wake_irq(int irq, void *dev_id)
+{
+ int value;
+
+ pr_debug("%s\n", __func__);
+
+ if (ipc_ap_wake_state < IPC_AP_WAKE_IRQ_READY) {
+ pr_err("%s - spurious irq\n", __func__);
+ } else if (ipc_ap_wake_state == IPC_AP_WAKE_IRQ_READY) {
+ value = gpio_get_value(baseband_power_driver_data->
+ modem.xmm.ipc_ap_wake);
+ if (!value) {
+ pr_debug("%s - IPC_AP_WAKE_INIT1"
+ " - got falling edge\n",
+ __func__);
+ /* go to IPC_AP_WAKE_INIT1 state */
+ ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
+ /* queue work */
+ queue_work(workqueue, &init1_work);
+ } else {
+ pr_debug("%s - IPC_AP_WAKE_INIT1"
+ " - wait for falling edge\n",
+ __func__);
+ }
+ } else if (ipc_ap_wake_state == IPC_AP_WAKE_INIT1) {
+ value = gpio_get_value(baseband_power_driver_data->
+ modem.xmm.ipc_ap_wake);
+ if (!value) {
+ pr_debug("%s - IPC_AP_WAKE_INIT2"
+ " - wait for rising edge\n",
+ __func__);
+ } else {
+ pr_debug("%s - IPC_AP_WAKE_INIT2"
+ " - got rising edge\n",
+ __func__);
+ /* go to IPC_AP_WAKE_INIT2 state */
+ ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
+ /* queue work */
+ queue_work(workqueue, &init2_work);
+ }
+ } else {
+ value = gpio_get_value(baseband_power_driver_data->
+ modem.xmm.ipc_ap_wake);
+ if (!value) {
+ pr_debug("%s - falling\n", __func__);
+ /* [ver < 1130] gpio protocol falling edge */
+ if (modem_ver < XMM_MODEM_VER_1130) {
+ pr_debug("gpio host wakeup done <-\n");
+ value = gpio_get_value
+ (baseband_power_driver_data->
+ modem.xmm.ipc_bb_wake);
+ if (value) {
+ /* Clear the slave wakeup request */
+ gpio_set_value
+ (baseband_power_driver_data->
+ modem.xmm.ipc_bb_wake, 0);
+ pr_debug("gpio slave wakeup done ->\n");
+ }
+ }
+ /* [ver >= 1130] gpio protocol falling edge */
+ if (modem_ver >= XMM_MODEM_VER_1130) {
+ if (baseband_xmm_powerstate == BBXMM_PS_L2) {
+ CP_initiated_L2toL0 = true;
+ baseband_xmm_set_power_status
+ (BBXMM_PS_L2TOL0);
+ }
+ }
+ /* save gpio state */
+ ipc_ap_wake_state = IPC_AP_WAKE_L;
+ } else {
+ pr_debug("%s - rising\n", __func__);
+ /* [ver >= 1130] gpio protocol rising edge */
+ if (modem_ver >= XMM_MODEM_VER_1130) {
+ pr_debug("gpio host wakeup done <-\n");
+ value = gpio_get_value
+ (baseband_power_driver_data->
+ modem.xmm.ipc_bb_wake);
+ if (value) {
+ /* Clear the slave wakeup request */
+ gpio_set_value
+ (baseband_power_driver_data->
+ modem.xmm.ipc_bb_wake, 0);
+ pr_debug("gpio slave wakeup done ->\n");
+ }
+ baseband_xmm_set_power_status(BBXMM_PS_L0);
+ }
+ /* save gpio state */
+ ipc_ap_wake_state = IPC_AP_WAKE_H;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(baseband_xmm_power_ipc_ap_wake_irq);
+
+static void baseband_xmm_power_init1_work(struct work_struct *work)
+{
+ int value;
+
+ pr_debug("%s {\n", __func__);
+
+ /* check if IPC_HSIC_ACTIVE high */
+ value = gpio_get_value(baseband_power_driver_data->
+ modem.xmm.ipc_hsic_active);
+ if (value != 1) {
+ pr_err("%s - expected IPC_HSIC_ACTIVE high!\n", __func__);
+ return;
+ }
+
+ /* wait 100 ms */
+ mdelay(100);
+
+ /* set IPC_HSIC_ACTIVE low */
+ gpio_set_value(baseband_power_driver_data->
+ modem.xmm.ipc_hsic_active, 0);
+
+ /* wait 10 ms */
+ mdelay(10);
+
+ /* set IPC_HSIC_ACTIVE high */
+ gpio_set_value(baseband_power_driver_data->
+ modem.xmm.ipc_hsic_active, 1);
+
+ /* wait 20 ms */
+ mdelay(20);
+
+ pr_debug("%s }\n", __func__);
+}
+
+static void baseband_xmm_power_init2_work(struct work_struct *work)
+{
+ struct baseband_power_platform_data *data = baseband_power_driver_data;
+
+ pr_debug("%s\n", __func__);
+
+ /* check input */
+ if (!data)
+ return;
+
+ /* register usb host controller only once */
+ if (register_hsic_device) {
+ if (data->hsic_register)
+ data->modem.xmm.hsic_device = data->hsic_register();
+ else
+ pr_err("%s: hsic_register is missing\n", __func__);
+ register_hsic_device = false;
+ }
+
+}
+
+/* Do the work for AP/CP initiated L2->L0 */
+static void baseband_xmm_power_L2_resume(void)
+{
+ struct baseband_power_platform_data *data = baseband_power_driver_data;
+ int value;
+ int delay = 10000; /* maxmum delay in msec */
+
+ pr_debug("%s\n", __func__);
+
+ if (!baseband_power_driver_data)
+ return;
+ if (CP_initiated_L2toL0) {
+ pr_info("CP L2->L0\n");
+ CP_initiated_L2toL0 = false;
+ queue_work(workqueue, &L2_resume_work);
+ } else {
+ /* set the slave wakeup request */
+ pr_info("AP L2->L0\n");
+ gpio_set_value(data->modem.xmm.ipc_bb_wake, 1);
+ pr_debug("waiting for host wakeup from CP...\n");
+ do {
+ mdelay(1);
+ value = gpio_get_value(data->modem.xmm.ipc_ap_wake);
+ delay--;
+ } while ((value) && (delay));
+ if (delay)
+ pr_debug("gpio host wakeup low <-\n");
+ else
+ pr_info("!!AP L2->L0 Failed\n");
+ }
+}
+
+/* Do the work for CP initiated L2->L0 */
+static void baseband_xmm_power_L2_resume_work(struct work_struct *work)
+{
+ struct usb_interface *intf;
+
+ pr_debug("%s {\n", __func__);
+
+ if (!usbdev)
+ return;
+ usb_lock_device(usbdev);
+ intf = usb_ifnum_to_if(usbdev, 0);
+ if (usb_autopm_get_interface(intf) == 0)
+ usb_autopm_put_interface(intf);
+ usb_unlock_device(usbdev);
+
+ pr_debug("} %s\n", __func__);
+}
+
+static void baseband_xmm_power_reset_on(void)
+{
+ /* reset / power on sequence */
+ mdelay(40);
+ gpio_set_value(baseband_power_driver_data->modem.xmm.bb_rst, 1);
+ mdelay(1);
+ gpio_set_value(baseband_power_driver_data->modem.xmm.bb_on, 1);
+ udelay(40);
+ gpio_set_value(baseband_power_driver_data->modem.xmm.bb_on, 0);
+}
+
+static struct baseband_xmm_power_work_t *baseband_xmm_power_work;
+
+static void baseband_xmm_power_work_func(struct work_struct *work)
+{
+ struct baseband_xmm_power_work_t *bbxmm_work
+ = (struct baseband_xmm_power_work_t *) work;
+
+ pr_debug("%s\n", __func__);
+
+ switch (bbxmm_work->state) {
+ case BBXMM_WORK_UNINIT:
+ pr_debug("BBXMM_WORK_UNINIT\n");
+ break;
+ case BBXMM_WORK_INIT:
+ pr_debug("BBXMM_WORK_INIT\n");
+ /* go to next state */
+ bbxmm_work->state = (modem_flash && !modem_pm)
+ ? BBXMM_WORK_INIT_FLASH_STEP1
+ : (modem_flash && modem_pm)
+ ? BBXMM_WORK_INIT_FLASH_PM_STEP1
+ : (!modem_flash && modem_pm)
+ ? BBXMM_WORK_INIT_FLASHLESS_PM_STEP1
+ : BBXMM_WORK_UNINIT;
+ pr_debug("Go to next state %d\n", bbxmm_work->state);
+ queue_work(workqueue, work);
+ break;
+ case BBXMM_WORK_INIT_FLASH_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASH_STEP1\n");
+ /* register usb host controller */
+ pr_debug("%s: register usb host controller\n", __func__);
+ if (baseband_power_driver_data->hsic_register)
+ baseband_power_driver_data->modem.xmm.hsic_device =
+ baseband_power_driver_data->hsic_register();
+ else
+ pr_err("%s: hsic_register is missing\n", __func__);
+ break;
+ case BBXMM_WORK_INIT_FLASH_PM_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASH_PM_STEP1\n");
+ /* [modem ver >= 1130] start with IPC_HSIC_ACTIVE low */
+ if (modem_ver >= XMM_MODEM_VER_1130) {
+ pr_debug("%s: ver > 1130:"
+ " ipc_hsic_active -> 0\n", __func__);
+ gpio_set_value(baseband_power_driver_data->
+ modem.xmm.ipc_hsic_active, 0);
+ }
+ /* reset / power on sequence */
+ baseband_xmm_power_reset_on();
+ /* optional delay
+ * 0 = flashless
+ * ==> causes next step to enumerate modem boot rom
+ * (058b / 0041)
+ * some delay > boot rom timeout
+ * ==> causes next step to enumerate modem software
+ * (1519 / 0020)
+ * (requires modem to be flash version, not flashless
+ * version)
+ */
+ if (enum_delay_ms)
+ mdelay(enum_delay_ms);
+ /* register usb host controller */
+ pr_debug("%s: register usb host controller\n", __func__);
+ if (baseband_power_driver_data->hsic_register)
+ baseband_power_driver_data->modem.xmm.hsic_device =
+ baseband_power_driver_data->hsic_register();
+ else
+ pr_err("%s: hsic_register is missing\n", __func__);
+ /* go to next state */
+ bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
+ ? BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1
+ : BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1;
+ queue_work(workqueue, work);
+ pr_debug("Go to next state %d\n", bbxmm_work->state);
+ break;
+ case BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1\n");
+ break;
+ case BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1\n");
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_STEP1\n");
+ /* go to next state */
+ bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
+ ? BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ
+ : BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1;
+ queue_work(workqueue, work);
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1\n");
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1\n");
+ break;
+ default:
+ break;
+ }
+
+}
+
+static void baseband_xmm_device_add_handler(struct usb_device *udev)
+{
+ struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
+ const struct usb_device_id *id = usb_match_id(intf, xmm_pm_ids);
+
+ if (id) {
+ pr_debug("persist_enabled: %u\n", udev->persist_enabled);
+ pr_info("Add device %d <%s %s>\n", udev->devnum,
+ udev->manufacturer, udev->product);
+ usbdev = udev;
+ usb_enable_autosuspend(udev);
+ pr_info("enable autosuspend\n");
+ }
+}
+
+static void baseband_xmm_device_remove_handler(struct usb_device *udev)
+{
+ if (usbdev == udev) {
+ pr_info("Remove device %d <%s %s>\n", udev->devnum,
+ udev->manufacturer, udev->product);
+ usbdev = 0;
+ }
+
+}
+
+static int usb_xmm_notify(struct notifier_block *self, unsigned long action,
+ void *blob)
+{
+ switch (action) {
+ case USB_DEVICE_ADD:
+ baseband_xmm_device_add_handler(blob);
+ break;
+ case USB_DEVICE_REMOVE:
+ baseband_xmm_device_remove_handler(blob);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+
+static struct notifier_block usb_xmm_nb = {
+ .notifier_call = usb_xmm_notify,
+};
+
+static int baseband_xmm_power_driver_probe(struct platform_device *device)
+{
+ struct baseband_power_platform_data *data
+ = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+ struct device *dev = &device->dev;
+ int err;
+
+ pr_debug("%s\n", __func__);
+ pr_debug("[XMM] enum_delay_ms=%d\n", enum_delay_ms);
+
+ /* check for platform data */
+ if (!data)
+ return -ENODEV;
+
+ /* check if supported modem */
+ if (data->baseband_type != BASEBAND_XMM) {
+ pr_err("unsuppported modem\n");
+ return -ENODEV;
+ }
+
+ /* save platform data */
+ baseband_power_driver_data = data;
+
+ /* create device file */
+ err = device_create_file(dev, &dev_attr_xmm_onoff);
+ if (err < 0) {
+ pr_err("%s - device_create_file failed\n", __func__);
+ return -ENODEV;
+ }
+
+ /* init wake lock */
+ wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "baseband_xmm_power");
+
+ /* request baseband gpio(s) */
+ tegra_baseband_gpios[0].gpio = baseband_power_driver_data
+ ->modem.xmm.bb_rst;
+ tegra_baseband_gpios[1].gpio = baseband_power_driver_data
+ ->modem.xmm.bb_on;
+ tegra_baseband_gpios[2].gpio = baseband_power_driver_data
+ ->modem.xmm.ipc_bb_wake;
+ tegra_baseband_gpios[3].gpio = baseband_power_driver_data
+ ->modem.xmm.ipc_ap_wake;
+ tegra_baseband_gpios[4].gpio = baseband_power_driver_data
+ ->modem.xmm.ipc_hsic_active;
+ tegra_baseband_gpios[5].gpio = baseband_power_driver_data
+ ->modem.xmm.ipc_hsic_sus_req;
+ err = gpio_request_array(tegra_baseband_gpios,
+ ARRAY_SIZE(tegra_baseband_gpios));
+ if (err < 0) {
+ pr_err("%s - request gpio(s) failed\n", __func__);
+ return -ENODEV;
+ }
+
+ /* request baseband irq(s) */
+ if (modem_flash && modem_pm) {
+ pr_debug("%s: request_irq IPC_AP_WAKE_IRQ\n", __func__);
+ ipc_ap_wake_state = IPC_AP_WAKE_UNINIT;
+ err = request_irq(gpio_to_irq(data->modem.xmm.ipc_ap_wake),
+ baseband_xmm_power_ipc_ap_wake_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "IPC_AP_WAKE_IRQ",
+ NULL);
+ if (err < 0) {
+ pr_err("%s - request irq IPC_AP_WAKE_IRQ failed\n",
+ __func__);
+ return err;
+ }
+ ipc_ap_wake_state = IPC_AP_WAKE_IRQ_READY;
+ if (modem_ver >= XMM_MODEM_VER_1130) {
+ pr_debug("%s: ver > 1130: AP_WAKE_INIT1\n", __func__);
+ /* ver 1130 or later starts in INIT1 state */
+ ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
+ }
+ }
+
+ /* init work queue */
+ workqueue = create_singlethread_workqueue
+ ("baseband_xmm_power_workqueue");
+ if (!workqueue) {
+ pr_err("cannot create workqueue\n");
+ return -1;
+ }
+ baseband_xmm_power_work = (struct baseband_xmm_power_work_t *)
+ kmalloc(sizeof(struct baseband_xmm_power_work_t), GFP_KERNEL);
+ if (!baseband_xmm_power_work) {
+ pr_err("cannot allocate baseband_xmm_power_work\n");
+ return -1;
+ }
+ INIT_WORK((struct work_struct *) baseband_xmm_power_work,
+ baseband_xmm_power_work_func);
+ baseband_xmm_power_work->state = BBXMM_WORK_INIT;
+ queue_work(workqueue,
+ (struct work_struct *) baseband_xmm_power_work);
+
+ /* init work objects */
+ INIT_WORK(&init1_work, baseband_xmm_power_init1_work);
+ INIT_WORK(&init2_work, baseband_xmm_power_init2_work);
+ INIT_WORK(&L2_resume_work, baseband_xmm_power_L2_resume_work);
+
+ /* init state variables */
+ register_hsic_device = true;
+ baseband_xmm_powerstate = BBXMM_PS_UNINIT;
+ CP_initiated_L2toL0 = false;
+
+ usb_register_notify(&usb_xmm_nb);
+
+ pr_debug("%s }\n", __func__);
+ return 0;
+}
+
+static int baseband_xmm_power_driver_remove(struct platform_device *device)
+{
+ struct baseband_power_platform_data *data
+ = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+ struct device *dev = &device->dev;
+
+ pr_debug("%s\n", __func__);
+
+ /* check for platform data */
+ if (!data)
+ return 0;
+
+ usb_unregister_notify(&usb_xmm_nb);
+
+ /* free work structure */
+ kfree(baseband_xmm_power_work);
+ baseband_xmm_power_work = (struct baseband_xmm_power_work_t *) 0;
+
+ /* free baseband irq(s) */
+ if (modem_flash && modem_pm) {
+ free_irq(gpio_to_irq(baseband_power_driver_data
+ ->modem.xmm.ipc_ap_wake), NULL);
+ }
+
+ /* free baseband gpio(s) */
+ gpio_free_array(tegra_baseband_gpios,
+ ARRAY_SIZE(tegra_baseband_gpios));
+
+ /* destroy wake lock */
+ wake_lock_destroy(&wakelock);
+
+ /* delete device file */
+ device_remove_file(dev, &dev_attr_xmm_onoff);
+
+ /* unregister usb host controller */
+ if (data->hsic_unregister)
+ data->hsic_unregister(data->modem.xmm.hsic_device);
+ else
+ pr_err("%s: hsic_unregister is missing\n", __func__);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int baseband_xmm_power_driver_suspend(struct platform_device *device,
+ pm_message_t state)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static int baseband_xmm_power_driver_resume(struct platform_device *device)
+{
+ struct baseband_power_platform_data *data
+ = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+ int value;
+ int delay = 10000; /* maxmum delay in msec */
+
+ pr_debug("%s\n", __func__);
+
+ /* check for platform data */
+ if (!baseband_power_driver_data)
+ return 0;
+
+ /* check if modem is on */
+ if (power_onoff == 0) {
+ pr_debug("%s - flight mode - nop\n", __func__);
+ baseband_xmm_set_power_status(BBXMM_PS_L3TOL0);
+ return 0;
+ }
+
+ /* L3->L0 */
+ baseband_xmm_set_power_status(BBXMM_PS_L3TOL0);
+ value = gpio_get_value(data->modem.xmm.ipc_ap_wake);
+ if (value) {
+ pr_info("AP L3 -> L0\n");
+ /* wake bb */
+ gpio_set_value(data->modem.xmm.ipc_bb_wake, 1);
+
+ pr_debug("waiting for host wakeup...\n");
+ do {
+ mdelay(1);
+ value = gpio_get_value(data->modem.xmm.ipc_ap_wake);
+ delay--;
+ } while ((value) && (delay));
+ if (delay)
+ pr_debug("gpio host wakeup low <-\n");
+ } else {
+ pr_info("CP L3 -> L0\n");
+ }
+
+ return 0;
+}
+#endif
+
+static struct platform_driver baseband_power_driver = {
+ .probe = baseband_xmm_power_driver_probe,
+ .remove = baseband_xmm_power_driver_remove,
+#ifdef CONFIG_PM
+ .suspend = baseband_xmm_power_driver_suspend,
+ .resume = baseband_xmm_power_driver_resume,
+#endif
+ .driver = {
+ .name = "baseband_xmm_power",
+ },
+};
+
+static int __init baseband_xmm_power_init(void)
+{
+ return platform_driver_register(&baseband_power_driver);
+}
+
+static void __exit baseband_xmm_power_exit(void)
+{
+ pr_debug("%s\n", __func__);
+ platform_driver_unregister(&baseband_power_driver);
+}
+
+module_init(baseband_xmm_power_init)
+module_exit(baseband_xmm_power_exit)
diff --git a/arch/arm/mach-tegra/baseband-xmm-power.h b/arch/arm/mach-tegra/baseband-xmm-power.h
new file mode 100644
index 000000000000..0768ed191b05
--- /dev/null
+++ b/arch/arm/mach-tegra/baseband-xmm-power.h
@@ -0,0 +1,111 @@
+/*
+ * arch/arm/mach-tegra/baseband-xmm-power.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef BASEBAND_XMM_POWER_H
+#define BASREBAND_XMM_POWER_H
+
+#include <linux/pm.h>
+#include <linux/suspend.h>
+
+#define VENDOR_ID 0x1519
+#define PRODUCT_ID 0x0020
+#define TEGRA_EHCI_DEVICE "/sys/devices/platform/tegra-ehci.1/ehci_power"
+
+#define XMM_MODEM_VER_1121 0x1121
+#define XMM_MODEM_VER_1130 0x1130
+
+/* shared between baseband-xmm-* modules so they can agree on same
+ * modem configuration
+ */
+extern unsigned long modem_ver;
+extern unsigned long modem_flash;
+extern unsigned long modem_pm;
+
+enum baseband_type {
+ BASEBAND_XMM,
+};
+
+struct baseband_power_platform_data {
+ enum baseband_type baseband_type;
+ struct platform_device* (*hsic_register)(void);
+ void (*hsic_unregister)(struct platform_device *);
+ union {
+ struct {
+ int mdm_reset;
+ int mdm_on;
+ int ap2mdm_ack;
+ int mdm2ap_ack;
+ int ap2mdm_ack2;
+ int mdm2ap_ack2;
+ struct platform_device *device;
+ } generic;
+ struct {
+ int bb_rst;
+ int bb_on;
+ int ipc_bb_wake;
+ int ipc_ap_wake;
+ int ipc_hsic_active;
+ int ipc_hsic_sus_req;
+ struct platform_device *hsic_device;
+ } xmm;
+ } modem;
+};
+
+enum baseband_xmm_power_work_state_t {
+ BBXMM_WORK_UNINIT,
+ BBXMM_WORK_INIT,
+ /* initialize flash modem */
+ BBXMM_WORK_INIT_FLASH_STEP1,
+ /* initialize flash (with power management support) modem */
+ BBXMM_WORK_INIT_FLASH_PM_STEP1,
+ BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1,
+ BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1,
+ /* initialize flashless (with power management support) modem */
+ BBXMM_WORK_INIT_FLASHLESS_PM_STEP1,
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ,
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1,
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2,
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1,
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP2,
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP3,
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP4,
+};
+
+struct baseband_xmm_power_work_t {
+ /* work structure must be first structure member */
+ struct work_struct work;
+ /* xmm modem state */
+ enum baseband_xmm_power_work_state_t state;
+};
+
+enum baseband_xmm_powerstate_t {
+ BBXMM_PS_UNINIT = 0,
+ BBXMM_PS_INIT = 1,
+ BBXMM_PS_L0 = 2,
+ BBXMM_PS_L0TOL2 = 3,
+ BBXMM_PS_L2 = 4,
+ BBXMM_PS_L2TOL0 = 5,
+ BBXMM_PS_L2TOL3 = 6,
+ BBXMM_PS_L3 = 7,
+ BBXMM_PS_L3TOL0 = 8,
+ BBXMM_PS_LAST = -1,
+};
+
+irqreturn_t baseband_xmm_power_ipc_ap_wake_irq(int irq, void *dev_id);
+
+void baseband_xmm_set_power_status(unsigned int status);
+
+#endif /* BASREBAND_XMM_POWER_H */
diff --git a/arch/arm/mach-tegra/baseband-xmm-power2.c b/arch/arm/mach-tegra/baseband-xmm-power2.c
new file mode 100644
index 000000000000..dd05202ba6ec
--- /dev/null
+++ b/arch/arm/mach-tegra/baseband-xmm-power2.c
@@ -0,0 +1,680 @@
+/*
+ * arch/arm/mach-tegra/baseband-xmm-power2.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/wakelock.h>
+#include <mach/usb_phy.h>
+#include "baseband-xmm-power.h"
+#include "board.h"
+#include "devices.h"
+
+MODULE_LICENSE("GPL");
+
+static unsigned long XYZ = 1000 * 1000000 + 800 * 1000 + 500;
+
+module_param(modem_ver, ulong, 0644);
+MODULE_PARM_DESC(modem_ver,
+ "baseband xmm power2 - modem software version");
+module_param(modem_flash, ulong, 0644);
+MODULE_PARM_DESC(modem_flash,
+ "baseband xmm power2 - modem flash (1 = flash, 0 = flashless)");
+module_param(modem_pm, ulong, 0644);
+MODULE_PARM_DESC(modem_pm,
+ "baseband xmm power2 - modem power management (1 = pm, 0 = no pm)");
+module_param(XYZ, ulong, 0644);
+MODULE_PARM_DESC(XYZ,
+ "baseband xmm power2 - timing parameters X/Y/Z delay in ms");
+
+static struct baseband_power_platform_data *baseband_power2_driver_data;
+static struct workqueue_struct *workqueue;
+static struct baseband_xmm_power_work_t *baseband_xmm_power2_work;
+
+static enum {
+ IPC_AP_WAKE_UNINIT,
+ IPC_AP_WAKE_IRQ_READY,
+ IPC_AP_WAKE_INIT1,
+ IPC_AP_WAKE_INIT2,
+ IPC_AP_WAKE_L,
+ IPC_AP_WAKE_H,
+} ipc_ap_wake_state;
+
+static irqreturn_t baseband_xmm_power2_ver_lt_1130_ipc_ap_wake_irq2
+ (int irq, void *dev_id)
+{
+ int value;
+
+ pr_debug("%s\n", __func__);
+
+ /* check for platform data */
+ if (!baseband_power2_driver_data)
+ return IRQ_HANDLED;
+
+ /* IPC_AP_WAKE state machine */
+ if (ipc_ap_wake_state < IPC_AP_WAKE_IRQ_READY) {
+ pr_err("%s - spurious irq\n", __func__);
+ } else if (ipc_ap_wake_state == IPC_AP_WAKE_IRQ_READY) {
+ value = gpio_get_value(baseband_power2_driver_data->
+ modem.xmm.ipc_ap_wake);
+ if (!value) {
+ pr_debug("%s - IPC_AP_WAKE_INIT1"
+ " - got falling edge\n",
+ __func__);
+ /* go to IPC_AP_WAKE_INIT1 state */
+ ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
+ /* queue work */
+ baseband_xmm_power2_work->state =
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1;
+ queue_work(workqueue, (struct work_struct *)
+ baseband_xmm_power2_work);
+ } else {
+ pr_debug("%s - IPC_AP_WAKE_INIT1"
+ " - wait for falling edge\n",
+ __func__);
+ }
+ } else if (ipc_ap_wake_state == IPC_AP_WAKE_INIT1) {
+ value = gpio_get_value(baseband_power2_driver_data->
+ modem.xmm.ipc_ap_wake);
+ if (!value) {
+ pr_debug("%s - IPC_AP_WAKE_INIT2"
+ " - wait for rising edge\n",
+ __func__);
+ } else {
+ pr_debug("%s - IPC_AP_WAKE_INIT2"
+ " - got rising edge\n",
+ __func__);
+ /* go to IPC_AP_WAKE_INIT2 state */
+ ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
+ /* queue work */
+ baseband_xmm_power2_work->state =
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2;
+ queue_work(workqueue, (struct work_struct *)
+ baseband_xmm_power2_work);
+ }
+ } else {
+ value = gpio_get_value(baseband_power2_driver_data->
+ modem.xmm.ipc_ap_wake);
+ if (!value) {
+ pr_debug("%s - falling\n", __func__);
+ ipc_ap_wake_state = IPC_AP_WAKE_L;
+ } else {
+ pr_debug("%s - rising\n", __func__);
+ ipc_ap_wake_state = IPC_AP_WAKE_H;
+ }
+ return baseband_xmm_power_ipc_ap_wake_irq(irq, dev_id);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t baseband_xmm_power2_ver_ge_1130_ipc_ap_wake_irq2
+ (int irq, void *dev_id)
+{
+ int value;
+
+ pr_debug("%s\n", __func__);
+
+ /* check for platform data */
+ if (!baseband_power2_driver_data)
+ return IRQ_HANDLED;
+
+ /* IPC_AP_WAKE state machine */
+ if (ipc_ap_wake_state < IPC_AP_WAKE_IRQ_READY) {
+ pr_err("%s - spurious irq\n", __func__);
+ } else if (ipc_ap_wake_state == IPC_AP_WAKE_IRQ_READY) {
+ value = gpio_get_value(baseband_power2_driver_data->
+ modem.xmm.ipc_ap_wake);
+ if (!value) {
+ pr_debug("%s - IPC_AP_WAKE_INIT1"
+ " - got falling edge\n",
+ __func__);
+ /* go to IPC_AP_WAKE_INIT2 state */
+ ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
+ /* queue work */
+ baseband_xmm_power2_work->state =
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP2;
+ queue_work(workqueue, (struct work_struct *)
+ baseband_xmm_power2_work);
+ } else {
+ pr_debug("%s - IPC_AP_WAKE_INIT1"
+ " - wait for falling edge\n",
+ __func__);
+ }
+ } else {
+ value = gpio_get_value(baseband_power2_driver_data->
+ modem.xmm.ipc_ap_wake);
+ if (!value) {
+ pr_debug("%s - falling\n", __func__);
+ ipc_ap_wake_state = IPC_AP_WAKE_L;
+ } else {
+ pr_debug("%s - rising\n", __func__);
+ ipc_ap_wake_state = IPC_AP_WAKE_H;
+ }
+ return baseband_xmm_power_ipc_ap_wake_irq(irq, dev_id);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void baseband_xmm_power2_flashless_pm_ver_lt_1130_step1
+ (struct work_struct *work)
+{
+ int value;
+
+ pr_debug("%s {\n", __func__);
+
+ /* check for platform data */
+ if (!baseband_power2_driver_data)
+ return;
+
+ /* check if IPC_HSIC_ACTIVE high */
+ value = gpio_get_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active);
+ if (value != 1) {
+ pr_err("%s - expected IPC_HSIC_ACTIVE high!\n", __func__);
+ return;
+ }
+
+ /* wait 30 ms */
+ mdelay(30);
+
+ /* set IPC_HSIC_ACTIVE low */
+ gpio_set_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active, 0);
+
+ pr_debug("%s }\n", __func__);
+}
+
+static void baseband_xmm_power2_flashless_pm_ver_lt_1130_step2
+ (struct work_struct *work)
+{
+ int value;
+
+ pr_debug("%s {\n", __func__);
+
+ /* check for platform data */
+ if (!baseband_power2_driver_data)
+ return;
+
+ /* check if IPC_HSIC_ACTIVE low */
+ value = gpio_get_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active);
+ if (value != 0) {
+ pr_err("%s - expected IPC_HSIC_ACTIVE low!\n", __func__);
+ return;
+ }
+
+ /* wait 1 ms */
+ mdelay(1);
+
+ /* unregister usb host controller */
+ if (baseband_power2_driver_data->hsic_unregister)
+ baseband_power2_driver_data->hsic_unregister(
+ baseband_power2_driver_data->modem.xmm.hsic_device);
+ else
+ pr_err("%s: hsic_unregister is missing\n", __func__);
+
+ /* set IPC_HSIC_ACTIVE high */
+ gpio_set_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active, 1);
+
+ /* wait 20 ms */
+ mdelay(20);
+
+ /* set IPC_HSIC_ACTIVE low */
+ gpio_set_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active, 0);
+
+ /* wait 20 ms */
+ mdelay(20);
+
+ /* set IPC_HSIC_ACTIVE high */
+ gpio_set_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active, 1);
+
+ pr_debug("%s }\n", __func__);
+}
+
+static void baseband_xmm_power2_flashless_pm_ver_ge_1130_step1
+ (struct work_struct *work)
+{
+ int X = XYZ / 1000000;
+ int Y = XYZ / 1000 - X * 1000;
+ int Z = XYZ % 1000;
+
+ pr_info("%s {\n", __func__);
+
+ pr_info("XYZ=%ld X=%d Y=%d Z=%d\n", XYZ, X, Y, Z);
+
+ /* check for platform data */
+ if (!baseband_power2_driver_data)
+ return;
+
+ /* unregister usb host controller */
+ if (baseband_power2_driver_data->hsic_unregister)
+ baseband_power2_driver_data->hsic_unregister(
+ baseband_power2_driver_data->modem.xmm.hsic_device);
+ else
+ pr_err("%s: hsic_unregister is missing\n", __func__);
+
+ /* wait X ms */
+ mdelay(X);
+
+ /* set IPC_HSIC_ACTIVE low */
+ gpio_set_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active, 0);
+
+ pr_info("%s }\n", __func__);
+}
+
+static void baseband_xmm_power2_flashless_pm_ver_ge_1130_step2
+ (struct work_struct *work)
+{
+ int X = XYZ / 1000000;
+ int Y = XYZ / 1000 - X * 1000;
+ int Z = XYZ % 1000;
+
+ pr_info("%s {\n", __func__);
+
+ pr_info("XYZ=%ld X=%d Y=%d Z=%d\n", XYZ, X, Y, Z);
+
+ /* check for platform data */
+ if (!baseband_power2_driver_data)
+ return;
+
+ /* wait Y ms */
+ mdelay(Y);
+
+ /* register usb host controller */
+ if (baseband_power2_driver_data->hsic_register)
+ baseband_power2_driver_data->modem.xmm.hsic_device =
+ baseband_power2_driver_data->hsic_register();
+ else
+ pr_err("%s: hsic_register is missing\n", __func__);
+
+ /* wait Z ms */
+ mdelay(Z);
+
+ /* set IPC_HSIC_ACTIVE high */
+ gpio_set_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active, 1);
+
+ /* queue work function to check if enumeration succeeded */
+ baseband_xmm_power2_work->state =
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP3;
+ queue_work(workqueue, (struct work_struct *)
+ baseband_xmm_power2_work);
+
+ pr_info("%s }\n", __func__);
+}
+
+static void baseband_xmm_power2_flashless_pm_ver_ge_1130_step3
+ (struct work_struct *work)
+{
+ int X = XYZ / 1000000;
+ int Y = XYZ / 1000 - X * 1000;
+ int Z = XYZ % 1000;
+ int enum_success = 0;
+
+ pr_info("%s {\n", __func__);
+
+ pr_info("XYZ=%ld X=%d Y=%d Z=%d\n", XYZ, X, Y, Z);
+
+ /* check for platform data */
+ if (!baseband_power2_driver_data)
+ return;
+
+ /* wait 500 ms */
+ mdelay(500);
+
+ /* check if enumeration succeeded */
+ {
+ mm_segment_t oldfs;
+ struct file *filp;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ filp = filp_open("/dev/ttyACM0",
+ O_RDONLY, 0);
+ if (IS_ERR(filp) || (filp == NULL)) {
+ pr_err("/dev/ttyACM0 %ld\n",
+ PTR_ERR(filp));
+ } else {
+ filp_close(filp, NULL);
+ enum_success = 1;
+ }
+ set_fs(oldfs);
+ }
+
+ /* if enumeration failed, attempt recovery pulse */
+ if (!enum_success) {
+ pr_info("attempting recovery pulse...\n");
+ /* wait 20 ms */
+ mdelay(20);
+ /* set IPC_HSIC_ACTIVE low */
+ gpio_set_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active, 0);
+ /* wait 20 ms */
+ mdelay(20);
+ /* set IPC_HSIC_ACTIVE high */
+ gpio_set_value(baseband_power2_driver_data->
+ modem.xmm.ipc_hsic_active, 1);
+ /* check if recovery pulse worked */
+ baseband_xmm_power2_work->state =
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP4;
+ queue_work(workqueue, (struct work_struct *)
+ baseband_xmm_power2_work);
+ }
+
+ pr_info("%s }\n", __func__);
+}
+
+static void baseband_xmm_power2_flashless_pm_ver_ge_1130_step4
+ (struct work_struct *work)
+{
+ int X = XYZ / 1000000;
+ int Y = XYZ / 1000 - X * 1000;
+ int Z = XYZ % 1000;
+ int enum_success = 0;
+
+ pr_info("%s {\n", __func__);
+
+ pr_info("XYZ=%ld X=%d Y=%d Z=%d\n", XYZ, X, Y, Z);
+
+ /* check for platform data */
+ if (!baseband_power2_driver_data)
+ return;
+
+ /* wait 500 ms */
+ mdelay(500);
+
+ /* check if enumeration succeeded */
+ {
+ mm_segment_t oldfs;
+ struct file *filp;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ filp = filp_open("/dev/ttyACM0",
+ O_RDONLY, 0);
+ if (IS_ERR(filp) || (filp == NULL)) {
+ pr_err("open /dev/ttyACM0 failed %ld\n",
+ PTR_ERR(filp));
+ } else {
+ filp_close(filp, NULL);
+ enum_success = 1;
+ }
+ set_fs(oldfs);
+ }
+
+ /* if recovery pulse did not fix enumeration, retry from beginning */
+ if (!enum_success) {
+ static int retry = 3;
+ if (!retry) {
+ pr_info("failed to enumerate modem software"
+ " - too many retry attempts\n");
+ } else {
+ pr_info("recovery pulse failed to fix modem"
+ " enumeration..."
+ " restarting from beginning"
+ " - attempt #%d\n",
+ retry);
+ --retry;
+ ipc_ap_wake_state = IPC_AP_WAKE_IRQ_READY;
+ baseband_xmm_power2_work->state =
+ BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1;
+ queue_work(workqueue, (struct work_struct *)
+ baseband_xmm_power2_work);
+ }
+ }
+
+ pr_info("%s }\n", __func__);
+}
+
+static int free_ipc_ap_wake_irq;
+
+static void baseband_xmm_power2_work_func(struct work_struct *work)
+{
+ struct baseband_xmm_power_work_t *bbxmm_work
+ = (struct baseband_xmm_power_work_t *) work;
+ int err;
+
+ pr_debug("%s bbxmm_work->state=%d\n", __func__, bbxmm_work->state);
+
+ switch (bbxmm_work->state) {
+ case BBXMM_WORK_UNINIT:
+ pr_debug("BBXMM_WORK_UNINIT\n");
+ /* free baseband irq(s) */
+ if (free_ipc_ap_wake_irq) {
+ free_irq(gpio_to_irq(baseband_power2_driver_data
+ ->modem.xmm.ipc_ap_wake), NULL);
+ free_ipc_ap_wake_irq = 0;
+ }
+ break;
+ case BBXMM_WORK_INIT:
+ pr_debug("BBXMM_WORK_INIT\n");
+ /* request baseband irq(s) */
+ ipc_ap_wake_state = IPC_AP_WAKE_UNINIT;
+ err = request_irq(gpio_to_irq(baseband_power2_driver_data
+ ->modem.xmm.ipc_ap_wake),
+ (modem_ver < XMM_MODEM_VER_1130)
+ ? baseband_xmm_power2_ver_lt_1130_ipc_ap_wake_irq2
+ : baseband_xmm_power2_ver_ge_1130_ipc_ap_wake_irq2,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "BBXMM_POWER2_IPC_AP_WAKE_IRQ",
+ NULL);
+ if (err < 0) {
+ pr_err("%s - request irq IPC_AP_WAKE_IRQ failed\n",
+ __func__);
+ return;
+ }
+ free_ipc_ap_wake_irq = 1;
+ ipc_ap_wake_state = IPC_AP_WAKE_IRQ_READY;
+ /* go to next state */
+ bbxmm_work->state = (modem_flash && !modem_pm)
+ ? BBXMM_WORK_INIT_FLASH_STEP1
+ : (modem_flash && modem_pm)
+ ? BBXMM_WORK_INIT_FLASH_PM_STEP1
+ : (!modem_flash && modem_pm)
+ ? BBXMM_WORK_INIT_FLASHLESS_PM_STEP1
+ : BBXMM_WORK_UNINIT;
+ queue_work(workqueue, work);
+ break;
+ case BBXMM_WORK_INIT_FLASH_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASH_STEP1\n");
+ break;
+ case BBXMM_WORK_INIT_FLASH_PM_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASH_PM_STEP1\n");
+ /* go to next state */
+ bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
+ ? BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1
+ : BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1;
+ queue_work(workqueue, work);
+ break;
+ case BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1\n");
+ break;
+ case BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1\n");
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_STEP1\n");
+ /* go to next state */
+ bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
+ ? BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ
+ : BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1;
+ queue_work(workqueue, work);
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ"
+ " - waiting for IPC_AP_WAKE_IRQ to trigger step1\n");
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1\n");
+ baseband_xmm_power2_flashless_pm_ver_lt_1130_step1(work);
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2\n");
+ baseband_xmm_power2_flashless_pm_ver_lt_1130_step2(work);
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1\n");
+ baseband_xmm_power2_flashless_pm_ver_ge_1130_step1(work);
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP2:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP2\n");
+ baseband_xmm_power2_flashless_pm_ver_ge_1130_step2(work);
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP3:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP3\n");
+ baseband_xmm_power2_flashless_pm_ver_ge_1130_step3(work);
+ break;
+ case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP4:
+ pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP4\n");
+ baseband_xmm_power2_flashless_pm_ver_ge_1130_step4(work);
+ break;
+ }
+
+}
+
+static int baseband_xmm_power2_driver_probe(struct platform_device *device)
+{
+ struct baseband_power_platform_data *data
+ = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+
+ pr_debug("%s\n", __func__);
+
+ /* save platform data */
+ baseband_power2_driver_data = data;
+
+ /* init work queue */
+ pr_debug("%s: init work queue\n", __func__);
+ workqueue = create_singlethread_workqueue
+ ("baseband_xmm_power2_workqueue");
+ if (!workqueue) {
+ pr_err("cannot create workqueue\n");
+ return -1;
+ }
+ baseband_xmm_power2_work = (struct baseband_xmm_power_work_t *)
+ kmalloc(sizeof(struct baseband_xmm_power_work_t), GFP_KERNEL);
+ if (!baseband_xmm_power2_work) {
+ pr_err("cannot allocate baseband_xmm_power2_work\n");
+ return -1;
+ }
+ pr_debug("%s: BBXMM_WORK_INIT\n", __func__);
+ INIT_WORK((struct work_struct *) baseband_xmm_power2_work,
+ baseband_xmm_power2_work_func);
+ baseband_xmm_power2_work->state = BBXMM_WORK_INIT;
+ queue_work(workqueue,
+ (struct work_struct *) baseband_xmm_power2_work);
+ return 0;
+}
+
+static int baseband_xmm_power2_driver_remove(struct platform_device *device)
+{
+ struct baseband_power_platform_data *data
+ = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+
+ pr_debug("%s\n", __func__);
+
+ /* check for platform data */
+ if (!data)
+ return 0;
+
+ /* free irq */
+ if (free_ipc_ap_wake_irq) {
+ free_irq(gpio_to_irq(data->modem.xmm.ipc_ap_wake), NULL);
+ free_ipc_ap_wake_irq = 0;
+ }
+
+ /* free work structure */
+ destroy_workqueue(workqueue);
+ kfree(baseband_xmm_power2_work);
+ baseband_xmm_power2_work = (struct baseband_xmm_power_work_t *) 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int baseband_xmm_power2_driver_suspend(struct platform_device *device,
+ pm_message_t state)
+{
+ struct baseband_power_platform_data *data
+ = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+
+ pr_debug("%s - nop\n", __func__);
+
+ /* check for platform data */
+ if (!data)
+ return 0;
+
+ return 0;
+}
+
+static int baseband_xmm_power2_driver_resume(struct platform_device *device)
+{
+ struct baseband_power_platform_data *data
+ = (struct baseband_power_platform_data *)
+ device->dev.platform_data;
+
+ pr_debug("%s - nop\n", __func__);
+
+ /* check for platform data */
+ if (!data)
+ return 0;
+
+ return 0;
+}
+#endif
+
+static struct platform_driver baseband_power2_driver = {
+ .probe = baseband_xmm_power2_driver_probe,
+ .remove = baseband_xmm_power2_driver_remove,
+#ifdef CONFIG_PM
+ .suspend = baseband_xmm_power2_driver_suspend,
+ .resume = baseband_xmm_power2_driver_resume,
+#endif
+ .driver = {
+ .name = "baseband_xmm_power2",
+ },
+};
+
+static int __init baseband_xmm_power2_init(void)
+{
+ pr_debug("%s\n", __func__);
+
+ return platform_driver_register(&baseband_power2_driver);
+}
+
+static void __exit baseband_xmm_power2_exit(void)
+{
+ pr_debug("%s\n", __func__);
+ platform_driver_unregister(&baseband_power2_driver);
+}
+
+module_init(baseband_xmm_power2_init)
+module_exit(baseband_xmm_power2_exit)
diff --git a/arch/arm/mach-tegra/board-aruba-panel.c b/arch/arm/mach-tegra/board-aruba-panel.c
new file mode 100644
index 000000000000..7524f821f359
--- /dev/null
+++ b/arch/arm/mach-tegra/board-aruba-panel.c
@@ -0,0 +1,253 @@
+/*
+ * arch/arm/mach-tegra/board-aruba-panel.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <asm/mach-types.h>
+#include <linux/platform_device.h>
+#include <linux/pwm_backlight.h>
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+
+#include "board.h"
+#include "devices.h"
+#include "gpio-names.h"
+
+#define aruba_lvds_shutdown TEGRA_GPIO_PB2
+#define aruba_bl_enb TEGRA_GPIO_PW1
+
+static int aruba_backlight_init(struct device *dev) {
+ int ret;
+
+ ret = gpio_request(aruba_bl_enb, "backlight_enb");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(aruba_bl_enb, 1);
+ if (ret < 0)
+ gpio_free(aruba_bl_enb);
+ else
+ tegra_gpio_enable(aruba_bl_enb);
+
+ return ret;
+};
+
+static void aruba_backlight_exit(struct device *dev) {
+ gpio_set_value(aruba_bl_enb, 0);
+ gpio_free(aruba_bl_enb);
+ tegra_gpio_disable(aruba_bl_enb);
+}
+
+static int aruba_backlight_notify(struct device *unused, int brightness)
+{
+ gpio_set_value(aruba_bl_enb, !!brightness);
+ return brightness;
+}
+
+static struct platform_pwm_backlight_data aruba_backlight_data = {
+ .pwm_id = 2,
+ .max_brightness = 255,
+ .dft_brightness = 224,
+ .pwm_period_ns = 5000000,
+ .init = aruba_backlight_init,
+ .exit = aruba_backlight_exit,
+ .notify = aruba_backlight_notify,
+};
+
+static struct platform_device aruba_backlight_device = {
+ .name = "pwm-backlight",
+ .id = -1,
+ .dev = {
+ .platform_data = &aruba_backlight_data,
+ },
+};
+
+#ifdef CONFIG_TEGRA_DC
+static int aruba_panel_enable(void)
+{
+ static struct regulator *reg = NULL;
+
+ if (reg == NULL) {
+ reg = regulator_get(NULL, "avdd_lvds");
+ if (WARN_ON(IS_ERR(reg)))
+ pr_err("%s: couldn't get regulator avdd_lvds: %ld\n",
+ __func__, PTR_ERR(reg));
+ else
+ regulator_enable(reg);
+ }
+
+ gpio_set_value(aruba_lvds_shutdown, 1);
+ return 0;
+}
+
+static int aruba_panel_disable(void)
+{
+ gpio_set_value(aruba_lvds_shutdown, 0);
+ return 0;
+}
+
+static struct resource aruba_disp1_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .start = 0, /* Filled in by aruba_panel_init() */
+ .end = 0, /* Filled in by aruba_panel_init() */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_dc_mode aruba_panel_modes[] = {
+ {
+ .pclk = 18000000,
+ .h_ref_to_sync = 8,
+ .v_ref_to_sync = 2,
+ .h_sync_width = 4,
+ .v_sync_width = 1,
+ .h_back_porch = 20,
+ .v_back_porch = 7,
+ .h_active = 480,
+ .v_active = 640,
+ .h_front_porch = 8,
+ .v_front_porch = 8,
+ },
+};
+
+static struct tegra_fb_data aruba_fb_data = {
+ .win = 0,
+ .xres = 480,
+ .yres = 640,
+ .bits_per_pixel = 16,
+};
+
+static struct tegra_dc_out aruba_disp1_out = {
+ .type = TEGRA_DC_OUT_RGB,
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .modes = aruba_panel_modes,
+ .n_modes = ARRAY_SIZE(aruba_panel_modes),
+
+ .enable = aruba_panel_enable,
+ .disable = aruba_panel_disable,
+};
+
+static struct tegra_dc_platform_data aruba_disp1_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &aruba_disp1_out,
+ .fb = &aruba_fb_data,
+};
+
+static struct nvhost_device aruba_disp1_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = aruba_disp1_resources,
+ .num_resources = ARRAY_SIZE(aruba_disp1_resources),
+ .dev = {
+ .platform_data = &aruba_disp1_pdata,
+ },
+};
+#endif
+
+#if defined(CONFIG_TEGRA_NVMAP)
+static struct nvmap_platform_carveout aruba_carveouts[] = {
+ [0] = NVMAP_HEAP_CARVEOUT_IRAM_INIT,
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .base = 0, /* Filled in by aruba_panel_init() */
+ .size = 0, /* Filled in by aruba_panel_init() */
+ .buddy_size = SZ_32K,
+ },
+};
+
+static struct nvmap_platform_data aruba_nvmap_data = {
+ .carveouts = aruba_carveouts,
+ .nr_carveouts = ARRAY_SIZE(aruba_carveouts),
+};
+
+static struct platform_device aruba_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &aruba_nvmap_data,
+ },
+};
+#endif
+
+static struct platform_device *aruba_gfx_devices[] __initdata = {
+#if defined(CONFIG_TEGRA_NVMAP)
+ &aruba_nvmap_device,
+#endif
+#ifdef CONFIG_TEGRA_GRHOST
+ &tegra_grhost_device,
+#endif
+ &tegra_pwfm2_device,
+ &aruba_backlight_device,
+};
+
+int __init aruba_panel_init(void)
+{
+ int err;
+ struct resource __maybe_unused *res;
+
+#if defined(CONFIG_TEGRA_NVMAP)
+ aruba_carveouts[1].base = tegra_carveout_start;
+ aruba_carveouts[1].size = tegra_carveout_size;
+#endif
+
+ err = platform_add_devices(aruba_gfx_devices,
+ ARRAY_SIZE(aruba_gfx_devices));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ res = nvhost_get_resource_byname(&aruba_disp1_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb_start;
+ res->end = tegra_fb_start + tegra_fb_size - 1;
+#endif
+
+ /* Copy the bootloader fb to the fb. */
+ tegra_move_framebuffer(tegra_fb_start, tegra_bootloader_fb_start,
+ min(tegra_fb_size, tegra_bootloader_fb_size));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ if (!err)
+ err = nvhost_device_register(&aruba_disp1_device);
+#endif
+
+ return err;
+}
diff --git a/arch/arm/mach-tegra/board-aruba-pinmux.c b/arch/arm/mach-tegra/board-aruba-pinmux.c
new file mode 100644
index 000000000000..3db2ede1eb1c
--- /dev/null
+++ b/arch/arm/mach-tegra/board-aruba-pinmux.c
@@ -0,0 +1,307 @@
+/*
+ * arch/arm/mach-tegra/board-aruba-pinmux.c
+ *
+ * Copyright (C) 2010 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/pinmux.h>
+
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+
+// !!!FIXME!!!! POPULATE THIS TABLE
+static __initdata struct tegra_drive_pingroup_config aruba_drive_pinmux[] = {
+ /* DEFAULT_DRIVE(<pin_group>), */
+};
+
+#define DEFAULT_PINMUX(_pingroup, _mux, _pupd, _tri, _io) \
+ { \
+ .pingroup = TEGRA_PINGROUP_##_pingroup, \
+ .func = TEGRA_MUX_##_mux, \
+ .pupd = TEGRA_PUPD_##_pupd, \
+ .tristate = TEGRA_TRI_##_tri, \
+ .io = TEGRA_PIN_##_io, \
+ }
+
+// !!!FIXME!!!! POPULATE THIS TABLE
+static __initdata struct tegra_pingroup_config aruba_pinmux[] = {
+ DEFAULT_PINMUX(ULPI_DATA0, UARTA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_DATA1, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA2, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA3, UARTA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_DATA4, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA5, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA6, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA7, UARTA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_CLK, UARTD, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_DIR, UARTD, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_NXT, UARTD, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_STP, UARTD, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(DAP3_FS, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_DIN, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_DOUT, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_SCLK, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_CLK, SDMMC1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_CMD, SDMMC1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT3, SDMMC1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT2, SDMMC1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT1, SDMMC1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT0, SDMMC1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PV2, OWR, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PV3, CLK12, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(CLK2_OUT, EXTPERIPH2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK2_REQ, DAP, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_PWR1, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_PWR2, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_SDIN, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_SDOUT, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_WR_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_CS0_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_DC0, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_SCK, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_PWR0, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_PCLK, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_DE, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_HSYNC, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_VSYNC, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D0, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D1, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D2, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D3, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D4, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D5, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D6, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D7, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D8, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D9, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D10, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D11, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D12, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D13, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D14, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D15, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D16, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D17, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D18, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D19, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D20, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D21, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D22, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D23, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_CS1_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_M1, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_DC1, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DDC_SCL, I2C4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DDC_SDA, I2C4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CRT_HSYNC, CRT, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(CRT_VSYNC, CRT, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(VI_D0, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D1, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D2, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D3, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D4, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D5, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D6, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D7, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D8, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D9, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D10, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D11, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_PCLK, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_MCLK, VI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_VSYNC, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_HSYNC, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART2_RXD, IRDA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART2_TXD, IRDA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART2_RTS_N, GMI, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART2_CTS_N, GMI, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART3_TXD, UARTC, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART3_RXD, UARTC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART3_CTS_N, UARTC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART3_RTS_N, UARTC, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU0, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU1, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU2, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU3, PWM0, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU4, PWM1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU5, PWM2, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU6, PWM3, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GEN1_I2C_SDA, I2C1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GEN1_I2C_SCL, I2C1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_FS, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_DIN, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_DOUT, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_SCLK, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK3_OUT, EXTPERIPH3, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(CLK3_REQ, DEV3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_WP_N, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_IORDY, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_WAIT, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_ADV_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CLK, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS0_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS1_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS2_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS3_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS4_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS6_N, NAND_ALT, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS7_N, NAND_ALT, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD0, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD1, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD2, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD3, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD4, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD5, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD6, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD7, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD8, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD9, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD10, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD11, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD12, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD13, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD14, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD15, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A16, UARTD, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_A17, UARTD, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A18, UARTD, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A19, UARTD, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_WR_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_OE_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_DQS, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_RST_N, RSVD3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GEN2_I2C_SCL, I2C2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GEN2_I2C_SDA, I2C2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_CLK, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_CMD, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT0, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT1, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT2, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT3, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT4, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT5, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT6, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT7, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_RST_N, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CAM_MCLK, VI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PCC1, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB0, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CAM_I2C_SCL, I2C3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CAM_I2C_SDA, I2C3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB3, VGP3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB4, VGP4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB5, VGP5, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB6, VGP6, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB7, I2S4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PCC2, I2S4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(JTAG_RTCK, RTCK, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PWR_I2C_SCL, I2CPWR, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PWR_I2C_SDA, I2CPWR, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW0, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW1, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW2, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW3, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW4, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW5, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW6, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW7, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW8, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW9, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW10, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW11, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW12, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW13, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW14, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW15, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL0, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL1, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL2, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL3, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL4, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL5, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL6, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL7, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK_32K_OUT, BLINK, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(SYS_CLK_REQ, SYSCLK, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(OWR, OWR, NORMAL, NORMAL, INPUT),
+#ifdef CONFIG_SND_HDA_TEGRA
+ DEFAULT_PINMUX(DAP1_FS, HDA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_DIN, HDA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_DOUT, HDA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_SCLK, HDA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK1_REQ, DAP1, NORMAL, NORMAL, INPUT),
+#else
+ DEFAULT_PINMUX(DAP1_FS, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_DIN, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_DOUT, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_SCLK, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK1_REQ, DAP, NORMAL, NORMAL, INPUT),
+#endif
+ DEFAULT_PINMUX(CLK1_OUT, EXTPERIPH1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPDIF_IN, SPDIF, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPDIF_OUT, SPDIF, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(DAP2_FS, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DIN, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DOUT, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_SCLK, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_MOSI, SPI6, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_MISO, SPI6, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_CS0_N, SPI6, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_SCK, SPI6, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_MOSI, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_SCK, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_CS0_N, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_MISO, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_CS1_N, SPI3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_CS2_N, SPI3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_CLK, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_CMD, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT0, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT1, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT2, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT3, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT4, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT5, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT6, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT7, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L0_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L0_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L0_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_WAKE_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L1_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L1_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L1_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L2_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L2_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L2_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(HDMI_CEC, CEC, NORMAL, NORMAL, INPUT),
+};
+
+void __init aruba_pinmux_init(void)
+{
+ tegra_pinmux_config_table(aruba_pinmux, ARRAY_SIZE(aruba_pinmux));
+ tegra_drive_pinmux_config_table(aruba_drive_pinmux,
+ ARRAY_SIZE(aruba_drive_pinmux));
+}
diff --git a/arch/arm/mach-tegra/board-aruba-power.c b/arch/arm/mach-tegra/board-aruba-power.c
new file mode 100644
index 000000000000..4391f6f19b51
--- /dev/null
+++ b/arch/arm/mach-tegra/board-aruba-power.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/io.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "pm.h"
+#include "board.h"
+#include "wakeups-t3.h"
+
+static int ac_online(void)
+{
+ return 1;
+}
+
+static struct resource aruba_pda_resources[] = {
+ [0] = {
+ .name = "ac",
+ },
+};
+
+static struct pda_power_pdata aruba_pda_data = {
+ .is_ac_online = ac_online,
+};
+
+static struct platform_device aruba_pda_power_device = {
+ .name = "pda-power",
+ .id = -1,
+ .resource = aruba_pda_resources,
+ .num_resources = ARRAY_SIZE(aruba_pda_resources),
+ .dev = {
+ .platform_data = &aruba_pda_data,
+ },
+};
+
+static struct tegra_suspend_platform_data aruba_suspend_data = {
+ .cpu_timer = 2000,
+ .cpu_off_timer = 0,
+ .suspend_mode = TEGRA_SUSPEND_NONE,
+ .core_timer = 0x7e7e,
+ .core_off_timer = 0,
+ .corereq_high = false,
+ .sysclkreq_high = true,
+};
+
+int __init aruba_regulator_init(void)
+{
+ platform_device_register(&aruba_pda_power_device);
+ tegra_init_suspend(&aruba_suspend_data);
+ return 0;
+}
+
+void __init tegra_tsensor_init(void)
+{
+ /* No tsensor on FPGAs */
+}
diff --git a/arch/arm/mach-tegra/board-aruba-sdhci.c b/arch/arm/mach-tegra/board-aruba-sdhci.c
new file mode 100644
index 000000000000..26b04a9021e1
--- /dev/null
+++ b/arch/arm/mach-tegra/board-aruba-sdhci.c
@@ -0,0 +1,248 @@
+/*
+ * arch/arm/mach-tegra/board-harmony-sdhci.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/wlan_plat.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/sdhci.h>
+
+#include "gpio-names.h"
+#include "board.h"
+
+#define ARUBA_WIFI 0 /* !!!FIXME!!! NOT SUPPORTED YET */
+
+#if ARUBA_WIFI
+
+#define ARUBA_WLAN_PWR TEGRA_GPIO_PK5
+#define ARUBA_WLAN_RST TEGRA_GPIO_PK6
+
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+static int aruba_wifi_status_register(void (*callback)(int , void *), void *);
+static struct clk *wifi_32k_clk;
+
+static int aruba_wifi_reset(int on);
+static int aruba_wifi_power(int on);
+static int aruba_wifi_set_carddetect(int val);
+
+static struct wifi_platform_data aruba_wifi_control = {
+ .set_power = aruba_wifi_power,
+ .set_reset = aruba_wifi_reset,
+ .set_carddetect = aruba_wifi_set_carddetect,
+};
+
+static struct platform_device aruba_wifi_device = {
+ .name = "bcm4329_wlan",
+ .id = 1,
+ .dev = {
+ .platform_data = &aruba_wifi_control,
+ },
+};
+#endif
+
+static struct resource sdhci_resource0[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data0 = {
+#if ARUBA_WIFI /* !!!FIXME!!! NOT SUPPORTED YET */
+ .register_status_notify = aruba_wifi_status_register,
+ .cccr = {
+ .sdio_vsn = 2,
+ .multi_block = 1,
+ .low_speed = 0,
+ .wide_bus = 0,
+ .high_power = 1,
+ .high_speed = 1,
+ },
+ .cis = {
+ .vendor = 0x02d0,
+ .device = 0x4329,
+ },
+#endif
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+/* .max_clk = 12000000, */
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+/* .max_clk = 12000000, */
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data3 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+/* .max_clk = 12000000, */
+};
+
+static struct platform_device tegra_sdhci_device0 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource0,
+ .num_resources = ARRAY_SIZE(sdhci_resource0),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data0,
+ },
+};
+
+static struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data2,
+ },
+};
+
+static struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data3,
+ },
+};
+
+#if ARUBA_WIFI /* !!!FIXME!!! NOT SUPPORTED YET */
+static int aruba_wifi_status_register(
+ void (*callback)(int card_present, void *dev_id),
+ void *dev_id)
+{
+ if (wifi_status_cb)
+ return -EAGAIN;
+ wifi_status_cb = callback;
+ wifi_status_cb_devid = dev_id;
+ return 0;
+}
+
+static int aruba_wifi_set_carddetect(int val)
+{
+ pr_debug("%s: %d\n", __func__, val);
+ if (wifi_status_cb)
+ wifi_status_cb(val, wifi_status_cb_devid);
+ else
+ pr_warning("%s: Nobody to notify\n", __func__);
+ return 0;
+}
+
+static int aruba_wifi_power(int on)
+{
+ pr_debug("%s: %d\n", __func__, on);
+
+ gpio_set_value(ARUBA_WLAN_PWR, on);
+ mdelay(100);
+ gpio_set_value(ARUBA_WLAN_RST, on);
+ mdelay(200);
+
+ if (on)
+ clk_enable(wifi_32k_clk);
+ else
+ clk_disable(wifi_32k_clk);
+
+ return 0;
+}
+
+static int aruba_wifi_reset(int on)
+{
+ pr_debug("%s: do nothing\n", __func__);
+ return 0;
+}
+
+static int __init aruba_wifi_init(void)
+{
+ wifi_32k_clk = clk_get_sys(NULL, "blink");
+ if (IS_ERR(wifi_32k_clk)) {
+ pr_err("%s: unable to get blink clock\n", __func__);
+ return PTR_ERR(wifi_32k_clk);
+ }
+
+ gpio_request(ARUBA_WLAN_PWR, "wlan_power");
+ gpio_request(ARUBA_WLAN_RST, "wlan_rst");
+
+ tegra_gpio_enable(ARUBA_WLAN_PWR);
+ tegra_gpio_enable(ARUBA_WLAN_RST);
+
+ gpio_direction_output(ARUBA_WLAN_PWR, 0);
+ gpio_direction_output(ARUBA_WLAN_RST, 0);
+
+ platform_device_register(&aruba_wifi_device);
+ return 0;
+}
+#else
+#define aruba_wifi_init() do {} while (0)
+#endif
+
+int __init aruba_sdhci_init(void)
+{
+ platform_device_register(&tegra_sdhci_device3);
+ platform_device_register(&tegra_sdhci_device2);
+ platform_device_register(&tegra_sdhci_device0);
+
+ aruba_wifi_init();
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-aruba-sensors.c b/arch/arm/mach-tegra/board-aruba-sensors.c
new file mode 100644
index 000000000000..f5ba3d761634
--- /dev/null
+++ b/arch/arm/mach-tegra/board-aruba-sensors.c
@@ -0,0 +1,109 @@
+/*
+ * arch/arm/mach-tegra/board-aruba-sensors.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/i2c.h>
+#include <mach/gpio.h>
+
+#include "gpio-names.h"
+
+#if 0 // !!!FIXME!!! IMPLEMENT ME
+
+#define ISL29018_IRQ_GPIO TEGRA_GPIO_PZ2
+#define AKM8975_IRQ_GPIO TEGRA_GPIO_PN5
+
+static void aruba_isl29018_init(void)
+{
+ tegra_gpio_enable(ISL29018_IRQ_GPIO);
+ gpio_request(ISL29018_IRQ_GPIO, "isl29018");
+ gpio_direction_input(ISL29018_IRQ_GPIO);
+}
+
+static void aruba_akm8975_init(void)
+{
+ tegra_gpio_enable(AKM8975_IRQ_GPIO);
+ gpio_request(AKM8975_IRQ_GPIO, "akm8975");
+ gpio_direction_input(AKM8975_IRQ_GPIO);
+}
+
+struct nct1008_platform_data aruba_nct1008_pdata = {
+ .conv_rate = 5,
+ .config = NCT1008_CONFIG_ALERT_DISABLE,
+ .thermal_threshold = 110,
+};
+
+static const struct i2c_board_info aruba_i2c0_board_info[] = {
+ {
+ I2C_BOARD_INFO("isl29018", 0x44),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PZ2),
+ },
+};
+
+static const struct i2c_board_info aruba_i2c2_board_info[] = {
+ {
+ I2C_BOARD_INFO("bq20z75-battery", 0x0B),
+ },
+};
+
+static struct i2c_board_info aruba_i2c4_board_info[] = {
+ {
+ I2C_BOARD_INFO("nct1008", 0x4C),
+ .platform_data = &aruba_nct1008_pdata,
+ },
+ {
+ I2C_BOARD_INFO("akm8975", 0x0C),
+ .irq = TEGRA_GPIO_TO_IRQ(AKM8975_IRQ_GPIO),
+ }
+};
+
+int __init aruba_sensors_init(void)
+{
+ aruba_isl29018_init();
+ aruba_akm8975_init();
+
+ i2c_register_board_info(0, aruba_i2c0_board_info,
+ ARRAY_SIZE(aruba_i2c0_board_info));
+
+ i2c_register_board_info(2, aruba_i2c2_board_info,
+ ARRAY_SIZE(aruba_i2c2_board_info));
+
+ i2c_register_board_info(4, aruba_i2c4_board_info,
+ ARRAY_SIZE(aruba_i2c4_board_info));
+
+ return 0;
+}
+#else
+int __init aruba_sensors_init(void)
+{
+ return 0;
+}
+#endif
diff --git a/arch/arm/mach-tegra/board-aruba.c b/arch/arm/mach-tegra/board-aruba.c
new file mode 100644
index 000000000000..dec2efc022aa
--- /dev/null
+++ b/arch/arm/mach-tegra/board-aruba.c
@@ -0,0 +1,544 @@
+/*
+ * arch/arm/mach-tegra/board-aruba.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/i2c.h>
+#include <linux/i2c/panjit_ts.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/i2c-tegra.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+#include <mach/i2s.h>
+#include <mach/audio.h>
+#include <mach/tegra_das.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/usb_phy.h>
+#include <mach/nand.h>
+#include "board.h"
+#include "clock.h"
+#include "board-aruba.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "fuse.h"
+
+#define ENABLE_OTG 0
+
+static struct plat_serial8250_port debug_uart_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTA_BASE),
+ .mapbase = TEGRA_UARTA_BASE,
+ .irq = INT_UARTA,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .type = PORT_TEGRA,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = 13000000,
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device debug_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uart_platform_data,
+ },
+};
+
+/* !!!FIXME!!! THESE ARE VENTANA SETTINGS */
+static struct tegra_utmip_config utmi_phy_config[] = {
+ [0] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+ [1] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+};
+
+/* !!!FIXME!!! THESE ARE VENTANA SETTINGS */
+static struct tegra_ulpi_config ulpi_phy_config = {
+ .clk = "cdev2",
+};
+
+#ifdef CONFIG_BCM4329_RFKILL
+
+static struct resource aruba_bcm4329_rfkill_resources[] = {
+ {
+ .name = "bcm4329_nreset_gpio",
+ .start = TEGRA_GPIO_PU0,
+ .end = TEGRA_GPIO_PU0,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "bcm4329_nshutdown_gpio",
+ .start = TEGRA_GPIO_PK2,
+ .end = TEGRA_GPIO_PK2,
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct platform_device aruba_bcm4329_rfkill_device = {
+ .name = "bcm4329_rfkill",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(aruba_bcm4329_rfkill_resources),
+ .resource = aruba_bcm4329_rfkill_resources,
+};
+
+static noinline void __init aruba_bt_rfkill(void)
+{
+ /*Add Clock Resource*/
+ clk_add_alias("bcm4329_32k_clk", aruba_bcm4329_rfkill_device.name, \
+ "blink", NULL);
+
+ platform_device_register(&aruba_bcm4329_rfkill_device);
+
+ return;
+}
+#else
+static inline void aruba_bt_rfkill(void) { }
+#endif
+
+static __initdata struct tegra_clk_init_table aruba_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "uarta", "clk_m", 13000000, true},
+ { "uartb", "clk_m", 13000000, true},
+ { "uartc", "clk_m", 13000000, true},
+ { "uartd", "clk_m", 13000000, true},
+ { "uarte", "clk_m", 13000000, true},
+ { "pll_m", NULL, 0, true},
+ { "blink", "clk_32k", 32768, false},
+ { "pll_p_out4", "pll_p", 24000000, true },
+ { "pwm", "clk_32k", 32768, false},
+ { "blink", "clk_32k", 32768, false},
+ { "pll_a", NULL, 56448000, true},
+ { "pll_a_out0", NULL, 11289600, true},
+ { "i2s1", "pll_a_out0", 11289600, true},
+ { "i2s2", "pll_a_out0", 11289600, true},
+ { "d_audio", "pll_a_out0", 11289600, false},
+ { "audio_2x", "audio", 22579200, true},
+ { NULL, NULL, 0, 0},
+};
+
+struct tegra_das_platform_data tegra_das_pdata = {
+ .tegra_dap_port_info_table = {
+ /* I2S0 <--> NULL */
+ [0] = {
+ .dac_port = tegra_das_port_none,
+ .codec_type = tegra_audio_codec_type_none,
+ .device_property = {
+ .num_channels = 0,
+ .bits_per_sample = 0,
+ .rate = 0,
+ .master = 0,
+ .lrck_high_left = false,
+ .dac_dap_data_comm_format = 0,
+ },
+ },
+ /* I2S1 <--> Hifi Codec */
+ [1] = {
+ .dac_port = tegra_das_port_i2s1,
+ .codec_type = tegra_audio_codec_type_hifi,
+ .device_property = {
+ .num_channels = 2,
+ .bits_per_sample = 16,
+ .rate = 48000,
+ .master = 0,
+ .lrck_high_left = false,
+ .dac_dap_data_comm_format =
+ dac_dap_data_format_i2s,
+ },
+ },
+ /* I2s2 <--> BB */
+ [2] = {
+ .dac_port = tegra_das_port_i2s2,
+ .codec_type = tegra_audio_codec_type_baseband,
+ .device_property = {
+ .num_channels = 1,
+ .bits_per_sample = 16,
+ .rate = 16000,
+ .master = 0,
+ .lrck_high_left = true,
+ .dac_dap_data_comm_format =
+ dac_dap_data_format_dsp,
+ },
+ },
+ /* I2s3 <--> BT */
+ [3] = {
+ .dac_port = tegra_das_port_i2s3,
+ .codec_type = tegra_audio_codec_type_bluetooth,
+ .device_property = {
+ .num_channels = 1,
+ .bits_per_sample = 16,
+ .rate = 8000,
+ .master = 0,
+ .lrck_high_left = false,
+ .dac_dap_data_comm_format =
+ dac_dap_data_format_dsp,
+ },
+ },
+ [4] = {
+ .dac_port = tegra_das_port_none,
+ .codec_type = tegra_audio_codec_type_none,
+ .device_property = {
+ .num_channels = 0,
+ .bits_per_sample = 0,
+ .rate = 0,
+ .master = 0,
+ .lrck_high_left = false,
+ .dac_dap_data_comm_format = 0,
+ },
+ },
+ },
+};
+
+static struct i2c_board_info __initdata aruba_i2c_bus1_board_info[] = {
+ {
+ I2C_BOARD_INFO("wm8903", 0x1a),
+ },
+};
+
+static struct tegra_i2c_platform_data aruba_i2c1_platform_data = {
+ .adapter_nr = 0,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+};
+
+#if 0 /* !!!FIXME!!! THESE ARE VENTANA SETTINGS */
+static const struct tegra_pingroup_config i2c2_ddc = {
+ .pingroup = TEGRA_PINGROUP_DDC,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static const struct tegra_pingroup_config i2c2_gen2 = {
+ .pingroup = TEGRA_PINGROUP_PTA,
+ .func = TEGRA_MUX_I2C2,
+};
+#endif
+
+static struct tegra_i2c_platform_data aruba_i2c2_platform_data = {
+ .adapter_nr = 1,
+ .bus_count = 2,
+ .bus_clk_rate = { 100000, 100000 },
+#if 0 /* !!!FIXME!!!! TESE ARE VENTANA SETTINGS */
+ .bus_mux = { &i2c2_ddc, &i2c2_gen2 },
+ .bus_mux_len = { 1, 1 },
+#endif
+};
+
+static struct tegra_i2c_platform_data aruba_i2c3_platform_data = {
+ .adapter_nr = 3,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+};
+
+static struct tegra_i2c_platform_data aruba_i2c4_platform_data = {
+ .adapter_nr = 4,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+};
+
+static struct tegra_i2c_platform_data aruba_i2c5_platform_data = {
+ .adapter_nr = 5,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+};
+
+static void aruba_i2c_init(void)
+{
+ tegra_i2c_device1.dev.platform_data = &aruba_i2c1_platform_data;
+ tegra_i2c_device2.dev.platform_data = &aruba_i2c2_platform_data;
+ tegra_i2c_device3.dev.platform_data = &aruba_i2c3_platform_data;
+ tegra_i2c_device4.dev.platform_data = &aruba_i2c4_platform_data;
+ tegra_i2c_device5.dev.platform_data = &aruba_i2c5_platform_data;
+
+ i2c_register_board_info(0, aruba_i2c_bus1_board_info, 1);
+
+ platform_device_register(&tegra_i2c_device5);
+ platform_device_register(&tegra_i2c_device4);
+ platform_device_register(&tegra_i2c_device3);
+ platform_device_register(&tegra_i2c_device2);
+ platform_device_register(&tegra_i2c_device1);
+}
+
+#define GPIO_KEY(_id, _gpio, _iswake) \
+ { \
+ .code = _id, \
+ .gpio = TEGRA_GPIO_##_gpio, \
+ .active_low = 1, \
+ .desc = #_id, \
+ .type = EV_KEY, \
+ .wakeup = _iswake, \
+ .debounce_interval = 10, \
+ }
+
+// !!!FIXME!!! THESE ARE VENTANA DEFINITIONS
+static struct gpio_keys_button aruba_keys[] = {
+ [0] = GPIO_KEY(KEY_MENU, PQ0, 0),
+ [1] = GPIO_KEY(KEY_HOME, PQ1, 0),
+ [2] = GPIO_KEY(KEY_BACK, PQ2, 0),
+ [3] = GPIO_KEY(KEY_VOLUMEUP, PQ3, 0),
+ [4] = GPIO_KEY(KEY_VOLUMEDOWN, PQ4, 0),
+ [5] = GPIO_KEY(KEY_POWER, PV2, 1),
+};
+
+static struct gpio_keys_platform_data aruba_keys_platform_data = {
+ .buttons = aruba_keys,
+ .nbuttons = ARRAY_SIZE(aruba_keys),
+};
+
+static struct platform_device aruba_keys_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .dev = {
+ .platform_data = &aruba_keys_platform_data,
+ },
+};
+
+static struct resource tegra_rtc_resources[] = {
+ [0] = {
+ .start = TEGRA_RTC_BASE,
+ .end = TEGRA_RTC_BASE + TEGRA_RTC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_RTC,
+ .end = INT_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tegra_rtc_device = {
+ .name = "tegra_rtc",
+ .id = -1,
+ .resource = tegra_rtc_resources,
+ .num_resources = ARRAY_SIZE(tegra_rtc_resources),
+};
+
+#if defined(CONFIG_MTD_NAND_TEGRA)
+static struct resource nand_resources[] = {
+ [0] = {
+ .start = INT_NANDFLASH,
+ .end = INT_NANDFLASH,
+ .flags = IORESOURCE_IRQ
+ },
+ [1] = {
+ .start = TEGRA_NAND_BASE,
+ .end = TEGRA_NAND_BASE + TEGRA_NAND_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct tegra_nand_chip_parms nand_chip_parms[] = {
+ /* Samsung K5E2G1GACM */
+ [0] = {
+ .vendor_id = 0xEC,
+ .device_id = 0xAA,
+ .capacity = 256,
+ .timing = {
+ .trp = 21,
+ .trh = 15,
+ .twp = 21,
+ .twh = 15,
+ .tcs = 31,
+ .twhr = 60,
+ .tcr_tar_trr = 20,
+ .twb = 100,
+ .trp_resp = 30,
+ .tadl = 100,
+ },
+ },
+ /* Hynix H5PS1GB3EFR */
+ [1] = {
+ .vendor_id = 0xAD,
+ .device_id = 0xDC,
+ .capacity = 512,
+ .timing = {
+ .trp = 12,
+ .trh = 10,
+ .twp = 12,
+ .twh = 10,
+ .tcs = 20,
+ .twhr = 80,
+ .tcr_tar_trr = 20,
+ .twb = 100,
+ .trp_resp = 20,
+ .tadl = 70,
+ },
+ },
+};
+
+struct tegra_nand_platform nand_data = {
+ .max_chips = 8,
+ .chip_parms = nand_chip_parms,
+ .nr_chip_parms = ARRAY_SIZE(nand_chip_parms),
+};
+
+struct platform_device tegra_nand_device = {
+ .name = "tegra_nand",
+ .id = -1,
+ .resource = nand_resources,
+ .num_resources = ARRAY_SIZE(nand_resources),
+ .dev = {
+ .platform_data = &nand_data,
+ },
+};
+#endif
+
+static struct platform_device *aruba_devices[] __initdata = {
+#if ENABLE_OTG
+ &tegra_otg_device,
+#endif
+ &debug_uart,
+ &tegra_uartb_device,
+ &tegra_uartc_device,
+ &tegra_uartd_device,
+ &tegra_uarte_device,
+ &tegra_pmu_device,
+ &tegra_rtc_device,
+ &tegra_udc_device,
+#if defined(CONFIG_TEGRA_IOVMM_SMMU)
+ &tegra_smmu_device,
+#endif
+ &aruba_keys_device,
+ &tegra_wdt_device,
+#if defined(CONFIG_SND_HDA_TEGRA)
+ &tegra_hda_device,
+#endif
+ &tegra_avp_device,
+#if defined(CONFIG_MTD_NAND_TEGRA)
+ &tegra_nand_device,
+#endif
+};
+
+static void aruba_keys_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aruba_keys); i++)
+ tegra_gpio_enable(aruba_keys[i].gpio);
+}
+
+static int __init aruba_touch_init(void)
+{
+ return 0;
+}
+
+
+static struct tegra_ehci_platform_data tegra_ehci_pdata[] = {
+ [0] = {
+ .phy_config = &utmi_phy_config[0],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 0,
+ },
+ [1] = {
+ .phy_config = &ulpi_phy_config,
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+ [2] = {
+ .phy_config = &utmi_phy_config[1],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 0,
+ },
+};
+
+
+static void aruba_usb_init(void)
+{
+ tegra_ehci2_device.dev.platform_data=&tegra_ehci_pdata[1];
+ platform_device_register(&tegra_ehci2_device);
+}
+
+#ifdef CONFIG_SATA_AHCI_TEGRA
+static void aruba_sata_init(void)
+{
+ platform_device_register(&tegra_sata_device);
+}
+#else
+static void aruba_sata_init(void) { }
+#endif
+
+static void __init tegra_aruba_init(void)
+{
+ tegra_clk_init_from_table(aruba_clk_init_table);
+ aruba_pinmux_init();
+
+ platform_add_devices(aruba_devices, ARRAY_SIZE(aruba_devices));
+
+ aruba_sdhci_init();
+ aruba_i2c_init();
+ aruba_regulator_init();
+ aruba_touch_init();
+ aruba_keys_init();
+ aruba_usb_init();
+ aruba_panel_init();
+ aruba_sensors_init();
+ aruba_bt_rfkill();
+ aruba_sata_init();
+ tegra_release_bootloader_fb();
+}
+
+static void __init tegra_aruba_reserve(void)
+{
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM)
+ tegra_reserve(0, SZ_4M, 0);
+#else
+ tegra_reserve(SZ_32M, SZ_4M, 0);
+#endif
+}
+
+MACHINE_START(ARUBA, "aruba")
+ .boot_params = 0x80000100,
+ .map_io = tegra_map_common_io,
+ .reserve = tegra_aruba_reserve,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_aruba_init,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-aruba.h b/arch/arm/mach-tegra/board-aruba.h
new file mode 100644
index 000000000000..e00e0b071ffb
--- /dev/null
+++ b/arch/arm/mach-tegra/board-aruba.h
@@ -0,0 +1,26 @@
+/*
+ * arch/arm/mach-tegra/board-aruba.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_BOARD_ARUBA_H
+#define _MACH_TEGRA_BOARD_ARUBA_H
+
+int aruba_regulator_init(void);
+int aruba_sdhci_init(void);
+int aruba_pinmux_init(void);
+int aruba_panel_init(void);
+int aruba_sensors_init(void);
+
+#endif
diff --git a/arch/arm/mach-tegra/board-cardhu-kbc.c b/arch/arm/mach-tegra/board-cardhu-kbc.c
new file mode 100644
index 000000000000..a693772948f0
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-kbc.c
@@ -0,0 +1,280 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu-kbc.c
+ * Keys configuration for Nvidia tegra3 cardhu platform.
+ *
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/mfd/tps6591x.h>
+#include <linux/mfd/max77663-core.h>
+#include <linux/interrupt_keys.h>
+#include <linux/gpio_scrollwheel.h>
+
+#include <mach/irqs.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/kbc.h>
+#include "board.h"
+#include "board-cardhu.h"
+
+#include "gpio-names.h"
+#include "devices.h"
+
+#define CARDHU_PM269_ROW_COUNT 2
+#define CARDHU_PM269_COL_COUNT 4
+
+static const u32 kbd_keymap[] = {
+ KEY(0, 0, KEY_POWER),
+ KEY(0, 1, KEY_RESERVED),
+ KEY(0, 2, KEY_VOLUMEUP),
+ KEY(0, 3, KEY_VOLUMEDOWN),
+
+ KEY(1, 0, KEY_HOME),
+ KEY(1, 1, KEY_MENU),
+ KEY(1, 2, KEY_BACK),
+ KEY(1, 3, KEY_SEARCH),
+};
+static const struct matrix_keymap_data keymap_data = {
+ .keymap = kbd_keymap,
+ .keymap_size = ARRAY_SIZE(kbd_keymap),
+};
+
+static struct tegra_kbc_wake_key cardhu_wake_cfg[] = {
+ [0] = {
+ .row = 0,
+ .col = 0,
+ },
+};
+
+static struct tegra_kbc_platform_data cardhu_kbc_platform_data = {
+ .debounce_cnt = 20,
+ .repeat_cnt = 1,
+ .scan_count = 30,
+ .wakeup = true,
+ .keymap_data = &keymap_data,
+ .wake_cnt = 1,
+ .wake_cfg = &cardhu_wake_cfg[0],
+#ifdef CONFIG_ANDROID
+ .disable_ev_rep = true,
+#endif
+};
+
+int __init cardhu_kbc_init(void)
+{
+ struct tegra_kbc_platform_data *data = &cardhu_kbc_platform_data;
+ int i;
+ struct board_info board_info;
+
+ tegra_get_board_info(&board_info);
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291))
+ return 0;
+
+ pr_info("Registering tegra-kbc\n");
+ tegra_kbc_device.dev.platform_data = &cardhu_kbc_platform_data;
+
+ for (i = 0; i < CARDHU_PM269_ROW_COUNT; i++) {
+ data->pin_cfg[i].num = i;
+ data->pin_cfg[i].is_row = true;
+ data->pin_cfg[i].en = true;
+ }
+ for (i = 0; i < CARDHU_PM269_COL_COUNT; i++) {
+ data->pin_cfg[i + KBC_PIN_GPIO_16].num = i;
+ data->pin_cfg[i + KBC_PIN_GPIO_16].en = true;
+ }
+
+ platform_device_register(&tegra_kbc_device);
+ return 0;
+}
+
+int __init cardhu_scroll_init(void)
+{
+ return 0;
+}
+
+#define GPIO_KEY(_id, _gpio, _iswake) \
+ { \
+ .code = _id, \
+ .gpio = TEGRA_GPIO_##_gpio, \
+ .active_low = 1, \
+ .desc = #_id, \
+ .type = EV_KEY, \
+ .wakeup = _iswake, \
+ .debounce_interval = 10, \
+ }
+
+static struct gpio_keys_button cardhu_keys_e1198[] = {
+ [0] = GPIO_KEY(KEY_HOME, PQ0, 0),
+ [1] = GPIO_KEY(KEY_BACK, PQ1, 0),
+ [2] = GPIO_KEY(KEY_MENU, PQ2, 0),
+ [3] = GPIO_KEY(KEY_SEARCH, PQ3, 0),
+ [4] = GPIO_KEY(KEY_VOLUMEUP, PR0, 0),
+ [5] = GPIO_KEY(KEY_VOLUMEDOWN, PR1, 0),
+ [6] = GPIO_KEY(KEY_POWER, PV0, 1),
+};
+
+static struct gpio_keys_platform_data cardhu_keys_e1198_platform_data = {
+ .buttons = cardhu_keys_e1198,
+ .nbuttons = ARRAY_SIZE(cardhu_keys_e1198),
+};
+
+static struct platform_device cardhu_keys_e1198_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .dev = {
+ .platform_data = &cardhu_keys_e1198_platform_data,
+ },
+};
+
+static struct gpio_keys_button cardhu_keys_e1291[] = {
+ [0] = GPIO_KEY(KEY_MENU, PR0, 0),
+ [1] = GPIO_KEY(KEY_BACK, PR1, 0),
+ [2] = GPIO_KEY(KEY_HOME, PR2, 0),
+ [3] = GPIO_KEY(KEY_SEARCH, PQ3, 0),
+ [4] = GPIO_KEY(KEY_VOLUMEUP, PQ0, 0),
+ [5] = GPIO_KEY(KEY_VOLUMEDOWN, PQ1, 0),
+};
+
+static struct gpio_keys_button cardhu_keys_e1291_a04[] = {
+ [0] = GPIO_KEY(KEY_MENU, PR0, 0),
+ [1] = GPIO_KEY(KEY_BACK, PR1, 0),
+ [2] = GPIO_KEY(KEY_HOME, PQ2, 0),
+ [3] = GPIO_KEY(KEY_SEARCH, PQ3, 0),
+ [4] = GPIO_KEY(KEY_VOLUMEUP, PQ0, 0),
+ [5] = GPIO_KEY(KEY_VOLUMEDOWN, PQ1, 0),
+};
+
+static struct gpio_keys_platform_data cardhu_keys_e1291_platform_data = {
+ .buttons = cardhu_keys_e1291,
+ .nbuttons = ARRAY_SIZE(cardhu_keys_e1291),
+};
+
+static struct platform_device cardhu_keys_e1291_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .dev = {
+ .platform_data = &cardhu_keys_e1291_platform_data,
+ },
+};
+
+#define INT_KEY(_id, _irq, _iswake, _deb_int) \
+ { \
+ .code = _id, \
+ .irq = _irq, \
+ .active_low = 1, \
+ .desc = #_id, \
+ .type = EV_KEY, \
+ .wakeup = _iswake, \
+ .debounce_interval = _deb_int, \
+ }
+static struct interrupt_keys_button cardhu_int_keys[] = {
+ [0] = INT_KEY(KEY_POWER, TPS6591X_IRQ_BASE + TPS6591X_INT_PWRON, 0, 100),
+};
+
+static struct interrupt_keys_button cardhu_pm298_int_keys[] = {
+ [0] = INT_KEY(KEY_POWER, MAX77663_IRQ_BASE + MAX77663_IRQ_ONOFF_EN0_FALLING, 0, 100),
+ [1] = INT_KEY(KEY_POWER, MAX77663_IRQ_BASE + MAX77663_IRQ_ONOFF_EN0_1SEC, 0, 3000),
+};
+
+static struct interrupt_keys_button cardhu_pm299_int_keys[] = {
+ [0] = INT_KEY(KEY_POWER, RICOH583_IRQ_BASE + RICOH583_IRQ_ONKEY, 0, 100),
+};
+
+static struct interrupt_keys_platform_data cardhu_int_keys_pdata = {
+ .int_buttons = cardhu_int_keys,
+ .nbuttons = ARRAY_SIZE(cardhu_int_keys),
+};
+
+static struct platform_device cardhu_int_keys_device = {
+ .name = "interrupt-keys",
+ .id = 0,
+ .dev = {
+ .platform_data = &cardhu_int_keys_pdata,
+ },
+};
+
+int __init cardhu_keys_init(void)
+{
+ int i;
+ struct board_info board_info;
+ struct board_info pmu_board_info;
+
+ tegra_get_board_info(&board_info);
+ if (!((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291) ||
+ (board_info.board_id == BOARD_E1186) ||
+ (board_info.board_id == BOARD_E1257) ||
+ (board_info.board_id == BOARD_PM305) ||
+ (board_info.board_id == BOARD_PM311) ||
+ (board_info.board_id == BOARD_PM269)))
+ return 0;
+
+ pr_info("Registering gpio keys\n");
+
+ if (board_info.board_id == BOARD_E1291) {
+ if (board_info.fab >= BOARD_FAB_A04) {
+ cardhu_keys_e1291_platform_data.buttons =
+ cardhu_keys_e1291_a04;
+ cardhu_keys_e1291_platform_data.nbuttons =
+ ARRAY_SIZE(cardhu_keys_e1291_a04);
+ }
+
+ /* Enable gpio mode for other pins */
+ for (i = 0; i < cardhu_keys_e1291_platform_data.nbuttons; i++)
+ tegra_gpio_enable(cardhu_keys_e1291_platform_data.
+ buttons[i].gpio);
+
+ platform_device_register(&cardhu_keys_e1291_device);
+ } else if (board_info.board_id == BOARD_E1198) {
+ /* For E1198 */
+ for (i = 0; i < ARRAY_SIZE(cardhu_keys_e1198); i++)
+ tegra_gpio_enable(cardhu_keys_e1198[i].gpio);
+
+ platform_device_register(&cardhu_keys_e1198_device);
+ }
+
+ /* Register on-key through pmu interrupt */
+ tegra_get_pmu_board_info(&pmu_board_info);
+
+ if (pmu_board_info.board_id == BOARD_PMU_PM298) {
+ cardhu_int_keys_pdata.int_buttons = cardhu_pm298_int_keys;
+ cardhu_int_keys_pdata.nbuttons =
+ ARRAY_SIZE(cardhu_pm298_int_keys);
+ }
+
+ if (pmu_board_info.board_id == BOARD_PMU_PM299) {
+ cardhu_int_keys_pdata.int_buttons = cardhu_pm299_int_keys;
+ cardhu_int_keys_pdata.nbuttons =
+ ARRAY_SIZE(cardhu_pm299_int_keys);
+ }
+
+ if ((board_info.board_id == BOARD_E1291) ||
+ (board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1257) ||
+ (board_info.board_id == BOARD_E1186) ||
+ (board_info.board_id == BOARD_PM305) ||
+ (board_info.board_id == BOARD_PM311) ||
+ (board_info.board_id == BOARD_PM269))
+ platform_device_register(&cardhu_int_keys_device);
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-cardhu-memory.c b/arch/arm/mach-tegra/board-cardhu-memory.c
new file mode 100644
index 000000000000..c8d541fbbd61
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-memory.c
@@ -0,0 +1,4390 @@
+/*
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "board.h"
+#include "board-cardhu.h"
+#include "tegra3_emc.h"
+#include "fuse.h"
+
+
+static const struct tegra_emc_table cardhu_emc_tables_h5tc2g[] = {
+ {
+ 0x30, /* Rev 3.0 */
+ 27000, /* SDRAM frquency */
+ {
+ 0x00000001, /* EMC_RC */
+ 0x00000004, /* EMC_RFC */
+ 0x00000000, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000007, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x000000cb, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000032, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000005, /* EMC_TXSR */
+ 0x00000005, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000001, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x000000d3, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006288, /* EMC_FBIO_CFG5 */
+ 0xd0780421, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00080000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000003e0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07075504, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x0800012d, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000029e, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0x8000000d, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000f, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x0000000f, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x0f070506, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00140905, /* MC_EMEM_ARB_DA_COVERS */
+ 0x78430306, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0001, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x00001221, /* Mode Register 0 */
+ 0x00100003, /* Mode Register 1 */
+ 0x00200008, /* Mode Register 2 */
+ },
+ {
+ 0x30, /* Rev 3.0 */
+ 54000, /* SDRAM frquency */
+ {
+ 0x00000002, /* EMC_RC */
+ 0x00000008, /* EMC_RFC */
+ 0x00000001, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000007, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x00000198, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000066, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000000a, /* EMC_TXSR */
+ 0x0000000a, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000002, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x000001a6, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006288, /* EMC_FBIO_CFG5 */
+ 0xd0780421, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00080000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000003e0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07075504, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x0800012d, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000439, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0x80000014, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000f, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x0000000f, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x0f070506, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00140905, /* MC_EMEM_ARB_DA_COVERS */
+ 0x78430506, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0001, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x00001221, /* Mode Register 0 */
+ 0x00100003, /* Mode Register 1 */
+ 0x00200008, /* Mode Register 2 */
+ },
+ {
+ 0x30, /* Rev 3.0 */
+ 108000, /* SDRAM frquency */
+ {
+ 0x00000005, /* EMC_RC */
+ 0x00000011, /* EMC_RFC */
+ 0x00000003, /* EMC_RAS */
+ 0x00000001, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000001, /* EMC_RD_RCD */
+ 0x00000001, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000007, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x00000330, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000000cc, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000013, /* EMC_TXSR */
+ 0x00000013, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000004, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000034b, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006288, /* EMC_FBIO_CFG5 */
+ 0xd0780421, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00080000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000003e0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07075504, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x0800012d, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000076e, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000003, /* MC_EMEM_ARB_CFG */
+ 0x80000027, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000f, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x0000000f, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x0f070506, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00140906, /* MC_EMEM_ARB_DA_COVERS */
+ 0x78440a07, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0001, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x00001221, /* Mode Register 0 */
+ 0x00100003, /* Mode Register 1 */
+ 0x00200008, /* Mode Register 2 */
+ },
+ {
+ 0x30, /* Rev 3.0 */
+ 416000, /* SDRAM frequency */
+ {
+ 0x00000013, /* EMC_RC */
+ 0x00000041, /* EMC_RFC */
+ 0x0000000d, /* EMC_RAS */
+ 0x00000004, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x00000009, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000c, /* EMC_W2P */
+ 0x00000004, /* EMC_RD_RCD */
+ 0x00000004, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000008, /* EMC_QUSE */
+ 0x00000006, /* EMC_QRST */
+ 0x00000008, /* EMC_QSAFE */
+ 0x00000010, /* EMC_RDV */
+ 0x00000c6c, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x0000031b, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000008, /* EMC_AR2PDEN */
+ 0x00000011, /* EMC_RW2PDEN */
+ 0x00000047, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x0000000d, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000cad, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00007088, /* EMC_FBIO_CFG5 */
+ 0xf0120441, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00010000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000006a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f50f, /* EMC_XM2COMPPADCTRL */
+ 0x07077404, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x0800011d, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x01be000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10404, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x000020ae, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000006, /* MC_EMEM_ARB_CFG */
+ 0x8000004b, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */
+ 0x0000000a, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000e070a, /* MC_EMEM_ARB_DA_COVERS */
+ 0x7027130b, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x00000010, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x00001941, /* Mode Register 0 */
+ 0x00100002, /* Mode Register 1 */
+ 0x00200008, /* Mode Register 2 */
+ },
+ {
+ 0x30, /* Rev 3.0 */
+ 533000, /* SDRAM frquency */
+ {
+ 0x00000018, /* EMC_RC */
+ 0x00000054, /* EMC_RFC */
+ 0x00000011, /* EMC_RAS */
+ 0x00000006, /* EMC_RP */
+ 0x00000003, /* EMC_R2W */
+ 0x00000009, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000d, /* EMC_W2P */
+ 0x00000006, /* EMC_RD_RCD */
+ 0x00000006, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000008, /* EMC_QUSE */
+ 0x00000006, /* EMC_QRST */
+ 0x00000008, /* EMC_QSAFE */
+ 0x00000010, /* EMC_RDV */
+ 0x00000ffd, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000003ff, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000000a, /* EMC_AR2PDEN */
+ 0x00000012, /* EMC_RW2PDEN */
+ 0x0000005b, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000010, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000006, /* EMC_TCLKSTOP */
+ 0x0000103e, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00007088, /* EMC_FBIO_CFG5 */
+ 0xf0120441, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00010000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000006a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f50f, /* EMC_XM2COMPPADCTRL */
+ 0x07077404, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x0800011d, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x01ab000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10404, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x000020ae, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000008, /* MC_EMEM_ARB_CFG */
+ 0x80000060, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */
+ 0x0000000d, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x0010090d, /* MC_EMEM_ARB_DA_COVERS */
+ 0x7028180e, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x00000010, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x00001941, /* Mode Register 0 */
+ 0x00100002, /* Mode Register 1 */
+ 0x00200008, /* Mode Register 2 */
+ },
+};
+
+static const struct tegra_emc_table cardhu_emc_tables_h5tc2g_a2[] = {
+ {
+ 0x32, /* Rev 3.2 */
+ 25500, /* SDRAM frequency */
+ {
+ 0x00000001, /* EMC_RC */
+ 0x00000003, /* EMC_RFC */
+ 0x00000000, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000007, /* EMC_QSAFE */
+ 0x0000000c, /* EMC_RDV */
+ 0x000000bd, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x0000002f, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000005, /* EMC_TXSR */
+ 0x00000005, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000001, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x000000c3, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006288, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00080000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000280, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00020001, /* MC_EMEM_ARB_CFG */
+ 0xc0000008, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x74430303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xd8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 51000, /* SDRAM frequency */
+ {
+ 0x00000002, /* EMC_RC */
+ 0x00000008, /* EMC_RFC */
+ 0x00000001, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000007, /* EMC_QSAFE */
+ 0x0000000c, /* EMC_RDV */
+ 0x00000181, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000060, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000009, /* EMC_TXSR */
+ 0x00000009, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000002, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000018e, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006288, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00080000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000040b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0xc000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73430303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xd8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 102000, /* SDRAM frequency */
+ {
+ 0x00000004, /* EMC_RC */
+ 0x00000010, /* EMC_RFC */
+ 0x00000003, /* EMC_RAS */
+ 0x00000001, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000001, /* EMC_RD_RCD */
+ 0x00000001, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000007, /* EMC_QSAFE */
+ 0x0000000c, /* EMC_RDV */
+ 0x00000303, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000000c0, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000012, /* EMC_TXSR */
+ 0x00000012, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000004, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000031c, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006288, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00080000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0xc0000013, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72830504, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xd8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 375000, /* SDRAM frequency */
+ {
+ 0x00000011, /* EMC_RC */
+ 0x0000003a, /* EMC_RFC */
+ 0x0000000c, /* EMC_RAS */
+ 0x00000004, /* EMC_RP */
+ 0x00000003, /* EMC_R2W */
+ 0x00000008, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000a, /* EMC_W2P */
+ 0x00000004, /* EMC_RD_RCD */
+ 0x00000004, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000004, /* EMC_WDV */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000008, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x00000b2d, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000002cb, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000008, /* EMC_PDEX2WR */
+ 0x00000008, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000040, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000009, /* EMC_TCKE */
+ 0x0000000c, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000b6d, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00007088, /* EMC_FBIO_CFG5 */
+ 0x00200084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS0 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS1 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS2 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS3 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS4 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS5 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS6 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f508, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x0184000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000174b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000005, /* MC_EMEM_ARB_CFG */
+ 0x80000044, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */
+ 0x75c6110a, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x58000000, /* EMC_FBIO_SPARE */
+ 0xff00ff88, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000521, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200000, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 400000, /* SDRAM frequency */
+ {
+ 0x00000012, /* EMC_RC */
+ 0x00000040, /* EMC_RFC */
+ 0x0000000d, /* EMC_RAS */
+ 0x00000004, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x00000009, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000c, /* EMC_W2P */
+ 0x00000004, /* EMC_RD_RCD */
+ 0x00000004, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000007, /* EMC_QUSE */
+ 0x00000005, /* EMC_QRST */
+ 0x00000008, /* EMC_QSAFE */
+ 0x0000000e, /* EMC_RDV */
+ 0x00000c2e, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x0000030b, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000008, /* EMC_PDEX2WR */
+ 0x00000008, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000008, /* EMC_AR2PDEN */
+ 0x00000011, /* EMC_RW2PDEN */
+ 0x00000046, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x0000000a, /* EMC_TCKE */
+ 0x0000000d, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000c6f, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00007088, /* EMC_FBIO_CFG5 */
+ 0x001c0084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00034000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f508, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x017f000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80001941, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000006, /* MC_EMEM_ARB_CFG */
+ 0x8000004a, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */
+ 0x0000000a, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000e070a, /* MC_EMEM_ARB_DA_COVERS */
+ 0x7547130b, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x58000000, /* EMC_FBIO_SPARE */
+ 0xff00ff88, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000731, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 533000, /* SDRAM frequency */
+ {
+ 0x00000018, /* EMC_RC */
+ 0x00000054, /* EMC_RFC */
+ 0x00000011, /* EMC_RAS */
+ 0x00000006, /* EMC_RP */
+ 0x00000003, /* EMC_R2W */
+ 0x00000009, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000d, /* EMC_W2P */
+ 0x00000006, /* EMC_RD_RCD */
+ 0x00000006, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000008, /* EMC_QUSE */
+ 0x00000006, /* EMC_QRST */
+ 0x00000008, /* EMC_QSAFE */
+ 0x00000010, /* EMC_RDV */
+ 0x00000ffd, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000003ff, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x0000000b, /* EMC_PDEX2WR */
+ 0x0000000b, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000000a, /* EMC_AR2PDEN */
+ 0x00000012, /* EMC_RW2PDEN */
+ 0x0000005b, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x0000000d, /* EMC_TCKE */
+ 0x00000010, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000006, /* EMC_TCLKSTOP */
+ 0x0000103e, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00007088, /* EMC_FBIO_CFG5 */
+ 0x00120084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00010000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00010000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000006a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc084, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f508, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x01ab000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10404, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x800020ae, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000008, /* MC_EMEM_ARB_CFG */
+ 0x80000060, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */
+ 0x0000000d, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x0010090d, /* MC_EMEM_ARB_DA_COVERS */
+ 0x7028180e, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000941, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 750000, /* SDRAM frequency */
+ {
+ 0x00000025, /* EMC_RC */
+ 0x0000007e, /* EMC_RFC */
+ 0x0000001a, /* EMC_RAS */
+ 0x00000009, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x0000000d, /* EMC_W2R */
+ 0x00000004, /* EMC_R2P */
+ 0x00000013, /* EMC_W2P */
+ 0x00000009, /* EMC_RD_RCD */
+ 0x00000009, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000007, /* EMC_WDV */
+ 0x0000000b, /* EMC_QUSE */
+ 0x00000009, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x00000011, /* EMC_RDV */
+ 0x0000169a, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000608, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000012, /* EMC_PDEX2WR */
+ 0x00000012, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000000f, /* EMC_AR2PDEN */
+ 0x00000018, /* EMC_RW2PDEN */
+ 0x00000088, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000014, /* EMC_TCKE */
+ 0x00000018, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000007, /* EMC_TCLKSTABLE */
+ 0x00000008, /* EMC_TCLKSTOP */
+ 0x00001860, /* EMC_TREFBW */
+ 0x0000000c, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00005088, /* EMC_FBIO_CFG5 */
+ 0xf0080191, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00000008, /* EMC_DLL_XFORM_DQS0 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS1 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS2 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0600013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x22220000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f501, /* EMC_XM2COMPPADCTRL */
+ 0x07077404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x07000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x0180000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000308c, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000c, /* MC_EMEM_ARB_CFG */
+ 0x80000090, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x08040202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00160d13, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72ac2414, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xf8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff49, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000d71, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200018, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 800000, /* SDRAM frequency */
+ {
+ 0x00000025, /* EMC_RC */
+ 0x0000007e, /* EMC_RFC */
+ 0x0000001a, /* EMC_RAS */
+ 0x00000009, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x0000000d, /* EMC_W2R */
+ 0x00000004, /* EMC_R2P */
+ 0x00000013, /* EMC_W2P */
+ 0x00000009, /* EMC_RD_RCD */
+ 0x00000009, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000007, /* EMC_WDV */
+ 0x0000000b, /* EMC_QUSE */
+ 0x00000009, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x00000011, /* EMC_RDV */
+ 0x00001820, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000608, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000012, /* EMC_PDEX2WR */
+ 0x00000012, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000000f, /* EMC_AR2PDEN */
+ 0x00000018, /* EMC_RW2PDEN */
+ 0x00000088, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000014, /* EMC_TCKE */
+ 0x00000018, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000007, /* EMC_TCLKSTABLE */
+ 0x00000008, /* EMC_TCLKSTOP */
+ 0x00001860, /* EMC_TREFBW */
+ 0x0000000c, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00005088, /* EMC_FBIO_CFG5 */
+ 0xf0070191, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0600013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x22220000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f501, /* EMC_XM2COMPPADCTRL */
+ 0x07077404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x07000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x0180000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000308c, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000c, /* MC_EMEM_ARB_CFG */
+ 0x80000090, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x08040202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00160d13, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72ac2414, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xf8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff49, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000d71, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200018, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+};
+
+static const struct tegra_emc_table cardhu_emc_tables_h5tc2g_a2_2GB1R[] = {
+ {
+ 0x32, /* Rev 3.2 */
+ 51000, /* SDRAM frequency */
+ {
+ 0x00000002, /* EMC_RC */
+ 0x0000000d, /* EMC_RFC */
+ 0x00000001, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000b, /* EMC_RDV */
+ 0x00000181, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000060, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000000e, /* EMC_TXSR */
+ 0x0000000e, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000002, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000018e, /* EMC_TREFBW */
+ 0x00000006, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004288, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS3 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS4 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS5 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS6 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000040b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00010001, /* MC_EMEM_ARB_CFG */
+ 0xc000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x74630303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 102000, /* SDRAM frequency */
+ {
+ 0x00000004, /* EMC_RC */
+ 0x0000001a, /* EMC_RFC */
+ 0x00000003, /* EMC_RAS */
+ 0x00000001, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000001, /* EMC_RD_RCD */
+ 0x00000001, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000b, /* EMC_RDV */
+ 0x00000303, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000000c0, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000001c, /* EMC_TXSR */
+ 0x0000001c, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000004, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000031c, /* EMC_TREFBW */
+ 0x00000006, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004288, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS3 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS4 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS5 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS6 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0xc0000013, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73c30504, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 204000, /* SDRAM frequency */
+ {
+ 0x00000009, /* EMC_RC */
+ 0x00000035, /* EMC_RFC */
+ 0x00000007, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000b, /* EMC_RDV */
+ 0x00000607, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000038, /* EMC_TXSR */
+ 0x00000038, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000007, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000638, /* EMC_TREFBW */
+ 0x00000006, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004288, /* EMC_FBIO_CFG5 */
+ 0x004400a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00080000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000003, /* MC_EMEM_ARB_CFG */
+ 0xc0000025, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0405, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73840a06, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 375000, /* SDRAM frequency */
+ {
+ 0x00000011, /* EMC_RC */
+ 0x0000006f, /* EMC_RFC */
+ 0x0000000c, /* EMC_RAS */
+ 0x00000004, /* EMC_RP */
+ 0x00000003, /* EMC_R2W */
+ 0x00000008, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000a, /* EMC_W2P */
+ 0x00000004, /* EMC_RD_RCD */
+ 0x00000004, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000004, /* EMC_WDV */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000a, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x00000b2d, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000002cb, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000008, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000075, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x0000000c, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000b6d, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00007088, /* EMC_FBIO_CFG5 */
+ 0x00200084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS0 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS1 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS2 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS3 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS4 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS5 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS6 */
+ 0x0003c000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f508, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x0150000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000174b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000005, /* MC_EMEM_ARB_CFG */
+ 0x80000044, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */
+ 0x75c6110a, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x58000000, /* EMC_FBIO_SPARE */
+ 0xff00ff88, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000521, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200000, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 750000, /* SDRAM frequency */
+ {
+ 0x00000023, /* EMC_RC */
+ 0x000000df, /* EMC_RFC */
+ 0x00000019, /* EMC_RAS */
+ 0x00000009, /* EMC_RP */
+ 0x00000005, /* EMC_R2W */
+ 0x0000000d, /* EMC_W2R */
+ 0x00000004, /* EMC_R2P */
+ 0x00000013, /* EMC_W2P */
+ 0x00000009, /* EMC_RD_RCD */
+ 0x00000009, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000007, /* EMC_WDV */
+ 0x0000000b, /* EMC_QUSE */
+ 0x00000009, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x00000011, /* EMC_RDV */
+ 0x0000169a, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000005a6, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000003, /* EMC_PDEX2WR */
+ 0x00000010, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000000e, /* EMC_AR2PDEN */
+ 0x00000018, /* EMC_RW2PDEN */
+ 0x000000e9, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000005, /* EMC_TCKE */
+ 0x00000017, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000007, /* EMC_TCLKSTABLE */
+ 0x00000008, /* EMC_TCLKSTOP */
+ 0x000016da, /* EMC_TREFBW */
+ 0x0000000c, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00005088, /* EMC_FBIO_CFG5 */
+ 0xf0080191, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00000008, /* EMC_DLL_XFORM_DQS0 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS1 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS2 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0600013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x22220000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f501, /* EMC_XM2COMPPADCTRL */
+ 0x07077404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x07000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x00df000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80002d93, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000b, /* MC_EMEM_ARB_CFG */
+ 0x80000087, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000012, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x08040202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00160d12, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73cc2213, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xf8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff49, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000d71, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200018, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+};
+
+static const struct tegra_emc_table cardhu_emc_tables_k4b4g0846b_hyk0[] = {
+ {
+ 0x32, /* Rev 3.2 */
+ 25500, /* SDRAM frequency */
+ {
+ 0x00000001, /* EMC_RC */
+ 0x00000006, /* EMC_RFC */
+ 0x00000000, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000b, /* EMC_RDV */
+ 0x000000c0, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000030, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000007, /* EMC_TXSR */
+ 0x00000007, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000001, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x000000c7, /* EMC_TREFBW */
+ 0x00000006, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004288, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS3 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS4 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS5 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS6 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000287, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00020001, /* MC_EMEM_ARB_CFG */
+ 0xc0000008, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x75830303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 51000, /* SDRAM frequency */
+ {
+ 0x00000002, /* EMC_RC */
+ 0x0000000d, /* EMC_RFC */
+ 0x00000001, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000b, /* EMC_RDV */
+ 0x00000181, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000060, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000000e, /* EMC_TXSR */
+ 0x0000000e, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000002, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000018e, /* EMC_TREFBW */
+ 0x00000006, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004288, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS3 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS4 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS5 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS6 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000040b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00010001, /* MC_EMEM_ARB_CFG */
+ 0xc000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x74630303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 102000, /* SDRAM frequency */
+ {
+ 0x00000004, /* EMC_RC */
+ 0x0000001a, /* EMC_RFC */
+ 0x00000003, /* EMC_RAS */
+ 0x00000001, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000001, /* EMC_RD_RCD */
+ 0x00000001, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000b, /* EMC_RDV */
+ 0x00000303, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000000c0, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000001c, /* EMC_TXSR */
+ 0x0000001c, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000004, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000031c, /* EMC_TREFBW */
+ 0x00000006, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004288, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS3 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS4 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS5 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS6 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */
+ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0xc0000013, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73c30504, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 204000, /* SDRAM frequency */
+ {
+ 0x00000009, /* EMC_RC */
+ 0x00000035, /* EMC_RFC */
+ 0x00000007, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000002, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000b, /* EMC_RDV */
+ 0x00000607, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000038, /* EMC_TXSR */
+ 0x00000038, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000007, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000638, /* EMC_TREFBW */
+ 0x00000006, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004288, /* EMC_FBIO_CFG5 */
+ 0x004400a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00080000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00080000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800211c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000168, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000003, /* MC_EMEM_ARB_CFG */
+ 0xc0000025, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06020102, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0405, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73840a06, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 375000, /* SDRAM frequency */
+ {
+ 0x00000011, /* EMC_RC */
+ 0x00000060, /* EMC_RFC */
+ 0x0000000c, /* EMC_RAS */
+ 0x00000004, /* EMC_RP */
+ 0x00000003, /* EMC_R2W */
+ 0x00000008, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000a, /* EMC_W2P */
+ 0x00000004, /* EMC_RD_RCD */
+ 0x00000004, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000004, /* EMC_WDV */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000a, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x00000b2d, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000002cb, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000008, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000066, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x0000000c, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000b6d, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00007088, /* EMC_FBIO_CFG5 */
+ 0x00200084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00014000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00014000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00014000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00014000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00014000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00014000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00014000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00014000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00020000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f508, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x015f000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000174b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000005, /* MC_EMEM_ARB_CFG */
+ 0x80000044, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */
+ 0x7086110a, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x58000000, /* EMC_FBIO_SPARE */
+ 0xff00ff88, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000521, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200000, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 400000, /* SDRAM frequency */
+ {
+ 0x00000012, /* EMC_RC */
+ 0x00000066, /* EMC_RFC */
+ 0x0000000c, /* EMC_RAS */
+ 0x00000004, /* EMC_RP */
+ 0x00000003, /* EMC_R2W */
+ 0x00000008, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000a, /* EMC_W2P */
+ 0x00000004, /* EMC_RD_RCD */
+ 0x00000004, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000004, /* EMC_WDV */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000a, /* EMC_QSAFE */
+ 0x0000000c, /* EMC_RDV */
+ 0x00000bf0, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000002fc, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000008, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000008, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000006c, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x0000000c, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000c30, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00007088, /* EMC_FBIO_CFG5 */
+ 0x001d0084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00034000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00034000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00040000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0800013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f508, /* EMC_XM2COMPPADCTRL */
+ 0x05057404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x0158000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x800018c8, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000006, /* MC_EMEM_ARB_CFG */
+ 0x80000048, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */
+ 0x7566120a, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff89, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000000, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000521, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200000, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 750000, /* SDRAM frequency */
+ {
+ 0x00000023, /* EMC_RC */
+ 0x000000c1, /* EMC_RFC */
+ 0x00000019, /* EMC_RAS */
+ 0x00000009, /* EMC_RP */
+ 0x00000005, /* EMC_R2W */
+ 0x0000000d, /* EMC_W2R */
+ 0x00000004, /* EMC_R2P */
+ 0x00000013, /* EMC_W2P */
+ 0x00000009, /* EMC_RD_RCD */
+ 0x00000009, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000007, /* EMC_WDV */
+ 0x0000000b, /* EMC_QUSE */
+ 0x00000009, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x00000011, /* EMC_RDV */
+ 0x0000169a, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000005a6, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000003, /* EMC_PDEX2WR */
+ 0x00000010, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000000e, /* EMC_AR2PDEN */
+ 0x00000018, /* EMC_RW2PDEN */
+ 0x000000cb, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000005, /* EMC_TCKE */
+ 0x00000017, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000007, /* EMC_TCLKSTABLE */
+ 0x00000008, /* EMC_TCLKSTOP */
+ 0x000016da, /* EMC_TREFBW */
+ 0x0000000c, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00005088, /* EMC_FBIO_CFG5 */
+ 0xf0080191, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00000008, /* EMC_DLL_XFORM_DQS0 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS1 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS2 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0600013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x22220000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f501, /* EMC_XM2COMPPADCTRL */
+ 0x07077404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x08000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x00fd000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80002d93, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000b, /* MC_EMEM_ARB_CFG */
+ 0x80000087, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000012, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x08040202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00160d12, /* MC_EMEM_ARB_DA_COVERS */
+ 0x710c2213, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xf8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff49, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000d71, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200018, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 800000, /* SDRAM frequency */
+ {
+ 0x00000025, /* EMC_RC */
+ 0x000000ce, /* EMC_RFC */
+ 0x0000001a, /* EMC_RAS */
+ 0x00000009, /* EMC_RP */
+ 0x00000005, /* EMC_R2W */
+ 0x0000000d, /* EMC_W2R */
+ 0x00000004, /* EMC_R2P */
+ 0x00000013, /* EMC_W2P */
+ 0x00000009, /* EMC_RD_RCD */
+ 0x00000009, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000007, /* EMC_WDV */
+ 0x0000000b, /* EMC_QUSE */
+ 0x00000009, /* EMC_QRST */
+ 0x0000000b, /* EMC_QSAFE */
+ 0x00000011, /* EMC_RDV */
+ 0x00001820, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000608, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000003, /* EMC_PDEX2WR */
+ 0x00000012, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000000f, /* EMC_AR2PDEN */
+ 0x00000018, /* EMC_RW2PDEN */
+ 0x000000d8, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000005, /* EMC_TCKE */
+ 0x00000018, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000007, /* EMC_TCLKSTABLE */
+ 0x00000008, /* EMC_TCLKSTOP */
+ 0x00001860, /* EMC_TREFBW */
+ 0x0000000c, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00005088, /* EMC_FBIO_CFG5 */
+ 0xf0070191, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0000800a, /* EMC_DLL_XFORM_DQS0 */
+ 0x0000800a, /* EMC_DLL_XFORM_DQS1 */
+ 0x0000800a, /* EMC_DLL_XFORM_DQS2 */
+ 0x0000800a, /* EMC_DLL_XFORM_DQS3 */
+ 0x0000800a, /* EMC_DLL_XFORM_DQS4 */
+ 0x0000800a, /* EMC_DLL_XFORM_DQS5 */
+ 0x0000800a, /* EMC_DLL_XFORM_DQS6 */
+ 0x0000800a, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQ3 */
+ 0x000002a0, /* EMC_XM2CMDPADCTRL */
+ 0x0600013d, /* EMC_XM2DQSPADCTRL2 */
+ 0x22220000, /* EMC_XM2DQPADCTRL2 */
+ 0x77fff884, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f501, /* EMC_XM2COMPPADCTRL */
+ 0x07077404, /* EMC_XM2VTTGENPADCTRL */
+ 0x54000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x080001e8, /* EMC_XM2QUSEPADCTRL */
+ 0x09000021, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x00f0000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000308c, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000c, /* MC_EMEM_ARB_CFG */
+ 0x80000090, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x08040202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00160d13, /* MC_EMEM_ARB_DA_COVERS */
+ 0x734c2414, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xf8000000, /* EMC_FBIO_SPARE */
+ 0xff00ff49, /* EMC_CFG_RSV */
+ },
+ 0x00000040, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x80000d71, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200018, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+};
+
+static const struct tegra_emc_table cardhu_emc_tables_k4p8g304eb[] = {
+ {
+ 0x32, /* Rev 3.2 */
+ 25500, /* SDRAM frequency */
+ {
+ 0x00000001, /* EMC_RC */
+ 0x00000003, /* EMC_RFC */
+ 0x00000002, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x0000005e, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000017, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x00000004, /* EMC_TXSR */
+ 0x00000004, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x00000068, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00098000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00098000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00098000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00098000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00100220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x0000000a, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x800001c2, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00020001, /* MC_EMEM_ARB_CFG */
+ 0xc0000008, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x74030303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000009, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 51000, /* SDRAM frequency */
+ {
+ 0x00000003, /* EMC_RC */
+ 0x00000006, /* EMC_RFC */
+ 0x00000002, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x000000c0, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000030, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x00000008, /* EMC_TXSR */
+ 0x00000008, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x000000d5, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00100220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x00000013, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000287, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00010001, /* MC_EMEM_ARB_CFG */
+ 0xc000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72c30303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000009, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 102000, /* SDRAM frequency */
+ {
+ 0x00000006, /* EMC_RC */
+ 0x0000000d, /* EMC_RFC */
+ 0x00000004, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x00000009, /* EMC_RDV */
+ 0x00000181, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000060, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x0000000f, /* EMC_TXSR */
+ 0x0000000f, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x000001a9, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00120220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x00000025, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000040b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0xc0000013, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060403, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72430504, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x10000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x0000000a, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 204000, /* SDRAM frequency */
+ {
+ 0x0000000c, /* EMC_RC */
+ 0x0000001a, /* EMC_RFC */
+ 0x00000008, /* EMC_RAS */
+ 0x00000003, /* EMC_RP */
+ 0x00000005, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000006, /* EMC_W2P */
+ 0x00000003, /* EMC_RD_RCD */
+ 0x00000003, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x0000000a, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x00000303, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000000c0, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000003, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x0000001d, /* EMC_TXSR */
+ 0x0000001d, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x0000000b, /* EMC_TFAW */
+ 0x00000005, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x00000351, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x004400a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00074000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00074000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00074000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00074000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00078000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00078000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00078000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00078000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00100220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x0000004a, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000003, /* MC_EMEM_ARB_CFG */
+ 0xc0000025, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02030001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00070506, /* MC_EMEM_ARB_DA_COVERS */
+ 0x71e40a07, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000013, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010042, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 533000, /* SDRAM frequency */
+ {
+ 0x0000001f, /* EMC_RC */
+ 0x00000045, /* EMC_RFC */
+ 0x00000016, /* EMC_RAS */
+ 0x00000009, /* EMC_RP */
+ 0x00000008, /* EMC_R2W */
+ 0x00000009, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000d, /* EMC_W2P */
+ 0x00000009, /* EMC_RD_RCD */
+ 0x00000009, /* EMC_WR_RCD */
+ 0x00000005, /* EMC_RRD */
+ 0x00000003, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000004, /* EMC_WDV */
+ 0x00000009, /* EMC_QUSE */
+ 0x00000006, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x00000010, /* EMC_RDV */
+ 0x000007df, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000001f7, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000003, /* EMC_PDEX2WR */
+ 0x00000003, /* EMC_PDEX2RD */
+ 0x00000009, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000004b, /* EMC_TXSR */
+ 0x0000004b, /* EMC_TXSRDLL */
+ 0x00000008, /* EMC_TCKE */
+ 0x0000001b, /* EMC_TFAW */
+ 0x0000000c, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x000008aa, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006282, /* EMC_FBIO_CFG5 */
+ 0xf0120091, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ3 */
+ 0x000b0220, /* EMC_XM2CMDPADCTRL */
+ 0x0800003d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f408, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x000000c0, /* EMC_ZCAL_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10202, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x800010d9, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000008, /* MC_EMEM_ARB_CFG */
+ 0x80000060, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000010, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000a, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000d, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x05040002, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00110b10, /* MC_EMEM_ARB_DA_COVERS */
+ 0x71c81811, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xe0000000, /* EMC_FBIO_SPARE */
+ 0xff00ff88, /* EMC_CFG_RSV */
+ },
+ 0x00000030, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x000100c2, /* Mode Register 1 */
+ 0x00020006, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+};
+
+static const struct tegra_emc_table cardhu_emc_tables_edb8132b2ma[] = {
+ {
+ 0x32, /* Rev 3.2 */
+ 25500, /* SDRAM frequency */
+ {
+ 0x00000001, /* EMC_RC */
+ 0x00000003, /* EMC_RFC */
+ 0x00000002, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x00000060, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000018, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x00000004, /* EMC_TXSR */
+ 0x00000004, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x0000006b, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00120220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x0000000a, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x800001c5, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00020001, /* MC_EMEM_ARB_CFG */
+ 0xc0000008, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73e30303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000009, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 51000, /* SDRAM frequency */
+ {
+ 0x00000003, /* EMC_RC */
+ 0x00000006, /* EMC_RFC */
+ 0x00000002, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x000000c0, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000030, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x00000008, /* EMC_TXSR */
+ 0x00000008, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x000000d5, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00120220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x00000013, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000287, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00010001, /* MC_EMEM_ARB_CFG */
+ 0xc000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72c30303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000009, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 102000, /* SDRAM frequency */
+ {
+ 0x00000006, /* EMC_RC */
+ 0x0000000d, /* EMC_RFC */
+ 0x00000004, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x00000181, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000060, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x0000000f, /* EMC_TXSR */
+ 0x0000000f, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x000001a9, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x007800a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS0 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS1 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS2 */
+ 0x000a0000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00080000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00120220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x00000025, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000040b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0xc0000013, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060403, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72430504, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x0000000a, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 204000, /* SDRAM frequency */
+ {
+ 0x0000000c, /* EMC_RC */
+ 0x0000001a, /* EMC_RFC */
+ 0x00000008, /* EMC_RAS */
+ 0x00000003, /* EMC_RP */
+ 0x00000005, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000006, /* EMC_W2P */
+ 0x00000003, /* EMC_RD_RCD */
+ 0x00000003, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000004, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x0000000b, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x00000303, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000000c0, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000003, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x0000001d, /* EMC_TXSR */
+ 0x0000001d, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x0000000b, /* EMC_TFAW */
+ 0x00000005, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x00000351, /* EMC_TREFBW */
+ 0x00000005, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x004400a4, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00070000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00070000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00070000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00070000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00078000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00078000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00078000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00078000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000d0220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x0000004a, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000003, /* MC_EMEM_ARB_CFG */
+ 0xc0000025, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02030001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00070506, /* MC_EMEM_ARB_DA_COVERS */
+ 0x71e40a07, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0xd0000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000013, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010042, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ 0x00000001, /* EMC_CFG.DYN_SELF_REF */
+ },
+ {
+ 0x32, /* Rev 3.2 */
+ 533000, /* SDRAM frequency */
+ {
+ 0x0000001f, /* EMC_RC */
+ 0x00000045, /* EMC_RFC */
+ 0x00000016, /* EMC_RAS */
+ 0x00000009, /* EMC_RP */
+ 0x00000008, /* EMC_R2W */
+ 0x00000009, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x0000000d, /* EMC_W2P */
+ 0x00000009, /* EMC_RD_RCD */
+ 0x00000009, /* EMC_WR_RCD */
+ 0x00000005, /* EMC_RRD */
+ 0x00000003, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000004, /* EMC_WDV */
+ 0x00000009, /* EMC_QUSE */
+ 0x00000006, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x00000010, /* EMC_RDV */
+ 0x000007df, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000001f7, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000003, /* EMC_PDEX2WR */
+ 0x00000003, /* EMC_PDEX2RD */
+ 0x00000009, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000004b, /* EMC_TXSR */
+ 0x0000004b, /* EMC_TXSRDLL */
+ 0x00000008, /* EMC_TCKE */
+ 0x0000001b, /* EMC_TFAW */
+ 0x0000000c, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x000008aa, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006282, /* EMC_FBIO_CFG5 */
+ 0xf0120091, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ3 */
+ 0x00070220, /* EMC_XM2CMDPADCTRL */
+ 0x0400003d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f408, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x000000c0, /* EMC_ZCAL_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10202, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x800010d9, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000008, /* MC_EMEM_ARB_CFG */
+ 0x80000060, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000010, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000a, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000d, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x05040002, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00110b10, /* MC_EMEM_ARB_DA_COVERS */
+ 0x71c81811, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x60000000, /* EMC_FBIO_SPARE */
+ 0xff00ff88, /* EMC_CFG_RSV */
+ },
+ 0x00000030, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x000100c2, /* Mode Register 1 */
+ 0x00020006, /* Mode Register 2 */
+ 0x00000000, /* EMC_CFG.DYN_SELF_REF */
+ },
+};
+
+int cardhu_emc_init(void)
+{
+ struct board_info board;
+
+ tegra_get_board_info(&board);
+
+ switch (board.board_id) {
+ case BOARD_PM269:
+ case BOARD_E1257:
+ if (MEMORY_TYPE(board.sku) == SKU_MEMORY_ELPIDA)
+ tegra_init_emc(cardhu_emc_tables_edb8132b2ma,
+ ARRAY_SIZE(cardhu_emc_tables_edb8132b2ma));
+ else
+ tegra_init_emc(cardhu_emc_tables_k4p8g304eb,
+ ARRAY_SIZE(cardhu_emc_tables_k4p8g304eb));
+ break;
+
+ case BOARD_PM305:
+ case BOARD_PM311:
+ break;
+ default:
+ if (tegra_get_revision() == TEGRA_REVISION_A01)
+ tegra_init_emc(cardhu_emc_tables_h5tc2g,
+ ARRAY_SIZE(cardhu_emc_tables_h5tc2g));
+ else if (MEMORY_TYPE(board.sku) == SKU_MEMORY_CARDHU_1GB_1R)
+ tegra_init_emc(cardhu_emc_tables_h5tc2g_a2,
+ ARRAY_SIZE(cardhu_emc_tables_h5tc2g_a2));
+ else if (MEMORY_TYPE(board.sku) ==
+ SKU_MEMORY_CARDHU_2GB_1R_HYK0)
+ tegra_init_emc(cardhu_emc_tables_k4b4g0846b_hyk0,
+ ARRAY_SIZE(cardhu_emc_tables_k4b4g0846b_hyk0));
+ else if (MEMORY_TYPE(board.sku) ==
+ SKU_MEMORY_CARDHU_2GB_1R_HYNIX)
+ tegra_init_emc(cardhu_emc_tables_h5tc2g_a2_2GB1R,
+ ARRAY_SIZE(cardhu_emc_tables_h5tc2g_a2_2GB1R));
+ break;
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-cardhu-panel.c b/arch/arm/mach-tegra/board-cardhu-panel.c
new file mode 100644
index 000000000000..da281ecff806
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-panel.c
@@ -0,0 +1,1244 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu-panel.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/ion.h>
+#include <linux/tegra_ion.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <asm/mach-types.h>
+#include <linux/platform_device.h>
+#include <linux/earlysuspend.h>
+#include <linux/pwm_backlight.h>
+#include <asm/atomic.h>
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+
+#include "board.h"
+#include "board-cardhu.h"
+#include "devices.h"
+#include "gpio-names.h"
+
+/* Select DSI panel to be used. */
+#define DSI_PANEL_219 0
+#define DSI_PANEL_218 1
+#define AVDD_LCD PMU_TCA6416_GPIO_PORT17
+#define DSI_PANEL_RESET 0
+
+/* Select LVDS panel resolution. 13X7 is default */
+#define PM313_LVDS_PANEL_19X12 1
+#define PM313_LVDS_PANEL_BPP 1 /* 0:24bpp, 1:18bpp */
+
+/* PM313 display board specific pins */
+#define pm313_R_FDE TEGRA_GPIO_PW0
+#define pm313_R_FB TEGRA_GPIO_PN4
+#define pm313_MODE0 TEGRA_GPIO_PZ4
+#define pm313_MODE1 TEGRA_GPIO_PW1
+#define pm313_BPP TEGRA_GPIO_PN6 /* 0:24bpp, 1:18bpp */
+#define pm313_lvds_shutdown TEGRA_GPIO_PH1
+
+/* E1247 reworked for pm269 pins */
+#define e1247_pm269_lvds_shutdown TEGRA_GPIO_PN6
+
+/* E1247 cardhu default display board pins */
+#define cardhu_lvds_shutdown TEGRA_GPIO_PL2
+
+/* common pins( backlight ) for all display boards */
+#define cardhu_bl_enb TEGRA_GPIO_PH2
+#define cardhu_bl_pwm TEGRA_GPIO_PH0
+#define cardhu_hdmi_hpd TEGRA_GPIO_PN7
+
+#if defined(DSI_PANEL_219) || defined(DSI_PANEL_218)
+#define cardhu_dsia_bl_enb TEGRA_GPIO_PW1
+#define cardhu_dsib_bl_enb TEGRA_GPIO_PW0
+#define cardhu_dsi_panel_reset TEGRA_GPIO_PD2
+#endif
+
+#ifdef CONFIG_TEGRA_DC
+static struct regulator *cardhu_hdmi_reg = NULL;
+static struct regulator *cardhu_hdmi_pll = NULL;
+static struct regulator *cardhu_hdmi_vddio = NULL;
+#endif
+
+static atomic_t sd_brightness = ATOMIC_INIT(255);
+
+#ifdef CONFIG_TEGRA_CARDHU_DSI
+static struct regulator *cardhu_dsi_reg = NULL;
+#else
+static struct regulator *cardhu_lvds_reg = NULL;
+static struct regulator *cardhu_lvds_vdd_bl = NULL;
+static struct regulator *cardhu_lvds_vdd_panel = NULL;
+#endif
+
+static struct board_info board_info;
+static struct board_info display_board_info;
+
+static tegra_dc_bl_output cardhu_bl_output_measured = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 70, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 133,
+ 134, 135, 136, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 146, 147, 148, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 179, 180, 181,
+ 182, 184, 185, 186, 187, 188, 189, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206,
+ 207, 208, 209, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255
+};
+
+static p_tegra_dc_bl_output bl_output;
+
+static int cardhu_backlight_init(struct device *dev) {
+ int ret;
+
+ bl_output = cardhu_bl_output_measured;
+
+ if (WARN_ON(ARRAY_SIZE(cardhu_bl_output_measured) != 256))
+ pr_err("bl_output array does not have 256 elements\n");
+
+#ifndef CONFIG_TEGRA_CARDHU_DSI
+ tegra_gpio_disable(cardhu_bl_pwm);
+
+ ret = gpio_request(cardhu_bl_enb, "backlight_enb");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(cardhu_bl_enb, 1);
+ if (ret < 0)
+ gpio_free(cardhu_bl_enb);
+ else
+ tegra_gpio_enable(cardhu_bl_enb);
+
+ return ret;
+#endif
+
+#if DSI_PANEL_219 || DSI_PANEL_218
+ /* Enable back light for DSIa panel */
+ printk("cardhu_dsi_backlight_init\n");
+ ret = gpio_request(cardhu_dsia_bl_enb, "dsia_bl_enable");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(cardhu_dsia_bl_enb, 1);
+ if (ret < 0)
+ gpio_free(cardhu_dsia_bl_enb);
+ else
+ tegra_gpio_enable(cardhu_dsia_bl_enb);
+
+ /* Enable back light for DSIb panel */
+ ret = gpio_request(cardhu_dsib_bl_enb, "dsib_bl_enable");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(cardhu_dsib_bl_enb, 1);
+ if (ret < 0)
+ gpio_free(cardhu_dsib_bl_enb);
+ else
+ tegra_gpio_enable(cardhu_dsib_bl_enb);
+#endif
+
+ return ret;
+};
+
+static void cardhu_backlight_exit(struct device *dev) {
+#ifndef CONFIG_TEGRA_CARDHU_DSI
+ /* int ret; */
+ /*ret = gpio_request(cardhu_bl_enb, "backlight_enb");*/
+ gpio_set_value(cardhu_bl_enb, 0);
+ gpio_free(cardhu_bl_enb);
+ tegra_gpio_disable(cardhu_bl_enb);
+ return;
+#endif
+#if DSI_PANEL_219 || DSI_PANEL_218
+ /* Disable back light for DSIa panel */
+ gpio_set_value(cardhu_dsia_bl_enb, 0);
+ gpio_free(cardhu_dsia_bl_enb);
+ tegra_gpio_disable(cardhu_dsia_bl_enb);
+
+ /* Disable back light for DSIb panel */
+ gpio_set_value(cardhu_dsib_bl_enb, 0);
+ gpio_free(cardhu_dsib_bl_enb);
+ tegra_gpio_disable(cardhu_dsib_bl_enb);
+
+ gpio_set_value(cardhu_lvds_shutdown, 1);
+ mdelay(20);
+#endif
+}
+
+static int cardhu_backlight_notify(struct device *unused, int brightness)
+{
+ int cur_sd_brightness = atomic_read(&sd_brightness);
+
+#ifndef CONFIG_TEGRA_CARDHU_DSI
+ /* Set the backlight GPIO pin mode to 'backlight_enable' */
+ gpio_set_value(cardhu_bl_enb, !!brightness);
+#elif DSI_PANEL_219 || DSI_PANEL_218
+ /* DSIa */
+ gpio_set_value(cardhu_dsia_bl_enb, !!brightness);
+
+ /* DSIb */
+ gpio_set_value(cardhu_dsib_bl_enb, !!brightness);
+#endif
+
+ /* SD brightness is a percentage, 8-bit value. */
+ brightness = (brightness * cur_sd_brightness) / 255;
+
+ /* Apply any backlight response curve */
+ if (brightness > 255) {
+ pr_info("Error: Brightness > 255!\n");
+ } else {
+ /* This value depends on the panel.
+ Current 19X12 panel with PM313 gets
+ full brightness when the output is 0. */
+ if (display_board_info.board_id == BOARD_DISPLAY_PM313)
+ brightness = 255 - bl_output[brightness];
+ else
+ brightness = bl_output[brightness];
+ }
+
+ return brightness;
+}
+
+static int cardhu_disp1_check_fb(struct device *dev, struct fb_info *info);
+
+static struct platform_pwm_backlight_data cardhu_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = 255,
+ .dft_brightness = 224,
+ .pwm_period_ns = 1000000,
+ .init = cardhu_backlight_init,
+ .exit = cardhu_backlight_exit,
+ .notify = cardhu_backlight_notify,
+ /* Only toggle backlight on fb blank notifications for disp1 */
+ .check_fb = cardhu_disp1_check_fb,
+};
+
+static struct platform_device cardhu_backlight_device = {
+ .name = "pwm-backlight",
+ .id = -1,
+ .dev = {
+ .platform_data = &cardhu_backlight_data,
+ },
+};
+
+#ifndef CONFIG_TEGRA_CARDHU_DSI
+static int cardhu_panel_enable(void)
+{
+ if (cardhu_lvds_reg == NULL) {
+ cardhu_lvds_reg = regulator_get(NULL, "vdd_lvds");
+ if (WARN_ON(IS_ERR(cardhu_lvds_reg)))
+ pr_err("%s: couldn't get regulator vdd_lvds: %ld\n",
+ __func__, PTR_ERR(cardhu_lvds_reg));
+ else
+ regulator_enable(cardhu_lvds_reg);
+ }
+
+ if (cardhu_lvds_vdd_bl == NULL) {
+ cardhu_lvds_vdd_bl = regulator_get(NULL, "vdd_backlight");
+ if (WARN_ON(IS_ERR(cardhu_lvds_vdd_bl)))
+ pr_err("%s: couldn't get regulator vdd_backlight: %ld\n",
+ __func__, PTR_ERR(cardhu_lvds_vdd_bl));
+ else
+ regulator_enable(cardhu_lvds_vdd_bl);
+ }
+
+ if (cardhu_lvds_vdd_panel == NULL) {
+ cardhu_lvds_vdd_panel = regulator_get(NULL, "vdd_lcd_panel");
+ if (WARN_ON(IS_ERR(cardhu_lvds_vdd_panel)))
+ pr_err("%s: couldn't get regulator vdd_lcd_panel: %ld\n",
+ __func__, PTR_ERR(cardhu_lvds_vdd_panel));
+ else
+ regulator_enable(cardhu_lvds_vdd_panel);
+ }
+
+ if (display_board_info.board_id == BOARD_DISPLAY_PM313) {
+ /* lvds configuration */
+ gpio_set_value(pm313_R_FDE, 1);
+ gpio_set_value(pm313_R_FB, 1);
+ gpio_set_value(pm313_MODE0, 1);
+ gpio_set_value(pm313_MODE1, 0);
+ gpio_set_value(pm313_BPP, PM313_LVDS_PANEL_BPP);
+
+ /* FIXME : it may require more or less delay for latching
+ values correctly before enabling RGB2LVDS */
+ mdelay(100);
+ gpio_set_value(pm313_lvds_shutdown, 1);
+ } else if ((display_board_info.board_id == BOARD_DISPLAY_E1247 &&
+ board_info.board_id == BOARD_PM269) ||
+ (board_info.board_id == BOARD_E1257) ||
+ (board_info.board_id == BOARD_PM305) ||
+ (board_info.board_id == BOARD_PM311))
+ gpio_set_value(e1247_pm269_lvds_shutdown, 1);
+ else
+ gpio_set_value(cardhu_lvds_shutdown, 1);
+
+ return 0;
+}
+
+static int cardhu_panel_disable(void)
+{
+ regulator_disable(cardhu_lvds_reg);
+ regulator_put(cardhu_lvds_reg);
+ cardhu_lvds_reg = NULL;
+
+ regulator_disable(cardhu_lvds_vdd_bl);
+ regulator_put(cardhu_lvds_vdd_bl);
+ cardhu_lvds_vdd_bl = NULL;
+
+ regulator_disable(cardhu_lvds_vdd_panel);
+ regulator_put(cardhu_lvds_vdd_panel);
+ cardhu_lvds_vdd_panel= NULL;
+
+ if (display_board_info.board_id == BOARD_DISPLAY_PM313) {
+ gpio_set_value(pm313_lvds_shutdown, 0);
+ } else if ((display_board_info.board_id == BOARD_DISPLAY_E1247 &&
+ board_info.board_id == BOARD_PM269) ||
+ (board_info.board_id == BOARD_E1257) ||
+ (board_info.board_id == BOARD_PM305) ||
+ (board_info.board_id == BOARD_PM311)) {
+ gpio_set_value(e1247_pm269_lvds_shutdown, 0);
+ } else {
+ gpio_set_value(cardhu_lvds_shutdown, 0);
+ }
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_TEGRA_DC
+static int cardhu_hdmi_vddio_enable(void)
+{
+ int ret;
+ if (!cardhu_hdmi_vddio) {
+ cardhu_hdmi_vddio = regulator_get(NULL, "vdd_hdmi_con");
+ if (IS_ERR_OR_NULL(cardhu_hdmi_vddio)) {
+ ret = PTR_ERR(cardhu_hdmi_vddio);
+ pr_err("hdmi: couldn't get regulator vdd_hdmi_con\n");
+ cardhu_hdmi_vddio = NULL;
+ return ret;
+ }
+ }
+ ret = regulator_enable(cardhu_hdmi_vddio);
+ if (ret < 0) {
+ pr_err("hdmi: couldn't enable regulator vdd_hdmi_con\n");
+ regulator_put(cardhu_hdmi_vddio);
+ cardhu_hdmi_vddio = NULL;
+ return ret;
+ }
+ return ret;
+}
+
+static int cardhu_hdmi_vddio_disable(void)
+{
+ if (cardhu_hdmi_vddio) {
+ regulator_disable(cardhu_hdmi_vddio);
+ regulator_put(cardhu_hdmi_vddio);
+ cardhu_hdmi_vddio = NULL;
+ }
+ return 0;
+}
+
+static int cardhu_hdmi_enable(void)
+{
+ int ret;
+ if (!cardhu_hdmi_reg) {
+ cardhu_hdmi_reg = regulator_get(NULL, "avdd_hdmi");
+ if (IS_ERR_OR_NULL(cardhu_hdmi_reg)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi\n");
+ cardhu_hdmi_reg = NULL;
+ return PTR_ERR(cardhu_hdmi_reg);
+ }
+ }
+ ret = regulator_enable(cardhu_hdmi_reg);
+ if (ret < 0) {
+ pr_err("hdmi: couldn't enable regulator avdd_hdmi\n");
+ return ret;
+ }
+ if (!cardhu_hdmi_pll) {
+ cardhu_hdmi_pll = regulator_get(NULL, "avdd_hdmi_pll");
+ if (IS_ERR_OR_NULL(cardhu_hdmi_pll)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi_pll\n");
+ cardhu_hdmi_pll = NULL;
+ regulator_put(cardhu_hdmi_reg);
+ cardhu_hdmi_reg = NULL;
+ return PTR_ERR(cardhu_hdmi_pll);
+ }
+ }
+ ret = regulator_enable(cardhu_hdmi_pll);
+ if (ret < 0) {
+ pr_err("hdmi: couldn't enable regulator avdd_hdmi_pll\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int cardhu_hdmi_disable(void)
+{
+ regulator_disable(cardhu_hdmi_reg);
+ regulator_put(cardhu_hdmi_reg);
+ cardhu_hdmi_reg = NULL;
+
+ regulator_disable(cardhu_hdmi_pll);
+ regulator_put(cardhu_hdmi_pll);
+ cardhu_hdmi_pll = NULL;
+ return 0;
+}
+
+static struct resource cardhu_disp1_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .start = 0, /* Filled in by cardhu_panel_init() */
+ .end = 0, /* Filled in by cardhu_panel_init() */
+ .flags = IORESOURCE_MEM,
+ },
+#ifdef CONFIG_TEGRA_DSI_INSTANCE_1
+ {
+ .name = "dsi_regs",
+ .start = TEGRA_DSIB_BASE,
+ .end = TEGRA_DSIB_BASE + TEGRA_DSIB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+#else
+ {
+ .name = "dsi_regs",
+ .start = TEGRA_DSI_BASE,
+ .end = TEGRA_DSI_BASE + TEGRA_DSI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+#endif
+};
+
+static struct resource cardhu_disp2_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_B_GENERAL,
+ .end = INT_DISPLAY_B_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .flags = IORESOURCE_MEM,
+ .start = 0,
+ .end = 0,
+ },
+ {
+ .name = "hdmi_regs",
+ .start = TEGRA_HDMI_BASE,
+ .end = TEGRA_HDMI_BASE + TEGRA_HDMI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+#endif
+
+#ifndef CONFIG_TEGRA_CARDHU_DSI
+static struct tegra_dc_mode panel_19X12_modes[] = {
+ {
+ .pclk = 154000000,
+ .h_ref_to_sync = 11,
+ .v_ref_to_sync = 1,
+ .h_sync_width = 32,
+ .v_sync_width = 6,
+ .h_back_porch = 80,
+ .v_back_porch = 26,
+ .h_active = 1920,
+ .v_active = 1200,
+ .h_front_porch = 48,
+ .v_front_porch = 3,
+ },
+};
+
+static struct tegra_dc_mode cardhu_panel_modes[] = {
+ {
+ /* 1366x768@60Hz */
+ .pclk = 74180000,
+ .h_ref_to_sync = 1,
+ .v_ref_to_sync = 1,
+ .h_sync_width = 30,
+ .v_sync_width = 5,
+ .h_back_porch = 52,
+ .v_back_porch = 20,
+ .h_active = 1366,
+ .v_active = 768,
+ .h_front_porch = 64,
+ .v_front_porch = 25,
+ },
+};
+
+static struct tegra_dc_mode cardhu_panel_modes_55hz[] = {
+ {
+ /* 1366x768p 55Hz */
+ .pclk = 68000000,
+ .h_ref_to_sync = 0,
+ .v_ref_to_sync = 12,
+ .h_sync_width = 30,
+ .v_sync_width = 5,
+ .h_back_porch = 52,
+ .v_back_porch = 20,
+ .h_active = 1366,
+ .v_active = 768,
+ .h_front_porch = 64,
+ .v_front_porch = 25,
+ },
+};
+#endif
+
+static struct tegra_dc_sd_settings cardhu_sd_settings = {
+ .enable = 1, /* enabled by default. */
+ .use_auto_pwm = false,
+ .hw_update_delay = 0,
+ .bin_width = -1,
+ .aggressiveness = 1,
+ .phase_in_adjustments = true,
+ .use_vid_luma = false,
+ /* Default video coefficients */
+ .coeff = {5, 9, 2},
+ .fc = {0, 0},
+ /* Immediate backlight changes */
+ .blp = {1024, 255},
+ /* Gammas: R: 2.2 G: 2.2 B: 2.2 */
+ /* Default BL TF */
+ .bltf = {
+ {
+ {57, 65, 74, 83},
+ {93, 103, 114, 126},
+ {138, 151, 165, 179},
+ {194, 209, 225, 242},
+ },
+ {
+ {58, 66, 75, 84},
+ {94, 105, 116, 127},
+ {140, 153, 166, 181},
+ {196, 211, 227, 244},
+ },
+ {
+ {60, 68, 77, 87},
+ {97, 107, 119, 130},
+ {143, 156, 170, 184},
+ {199, 215, 231, 248},
+ },
+ {
+ {64, 73, 82, 91},
+ {102, 113, 124, 137},
+ {149, 163, 177, 192},
+ {207, 223, 240, 255},
+ },
+ },
+ /* Default LUT */
+ .lut = {
+ {
+ {250, 250, 250},
+ {194, 194, 194},
+ {149, 149, 149},
+ {113, 113, 113},
+ {82, 82, 82},
+ {56, 56, 56},
+ {34, 34, 34},
+ {15, 15, 15},
+ {0, 0, 0},
+ },
+ {
+ {246, 246, 246},
+ {191, 191, 191},
+ {147, 147, 147},
+ {111, 111, 111},
+ {80, 80, 80},
+ {55, 55, 55},
+ {33, 33, 33},
+ {14, 14, 14},
+ {0, 0, 0},
+ },
+ {
+ {239, 239, 239},
+ {185, 185, 185},
+ {142, 142, 142},
+ {107, 107, 107},
+ {77, 77, 77},
+ {52, 52, 52},
+ {30, 30, 30},
+ {12, 12, 12},
+ {0, 0, 0},
+ },
+ {
+ {224, 224, 224},
+ {173, 173, 173},
+ {133, 133, 133},
+ {99, 99, 99},
+ {70, 70, 70},
+ {46, 46, 46},
+ {25, 25, 25},
+ {7, 7, 7},
+ {0, 0, 0},
+ },
+ },
+ .sd_brightness = &sd_brightness,
+ .bl_device = &cardhu_backlight_device,
+};
+
+#ifdef CONFIG_TEGRA_DC
+#ifndef CONFIG_TEGRA_CARDHU_DSI
+static struct tegra_fb_data cardhu_fb_data = {
+ .win = 0,
+ .xres = 1366,
+ .yres = 768,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+#endif
+
+static struct tegra_fb_data cardhu_hdmi_fb_data = {
+ .win = 0,
+ .xres = 1366,
+ .yres = 768,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_dc_out cardhu_disp2_out = {
+ .type = TEGRA_DC_OUT_HDMI,
+ .flags = TEGRA_DC_OUT_HOTPLUG_HIGH,
+
+ .dcc_bus = 3,
+ .hotplug_gpio = cardhu_hdmi_hpd,
+
+ .max_pixclock = KHZ2PICOS(148500),
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .enable = cardhu_hdmi_enable,
+ .disable = cardhu_hdmi_disable,
+
+ .postsuspend = cardhu_hdmi_vddio_disable,
+ .hotplug_init = cardhu_hdmi_vddio_enable,
+};
+
+static struct tegra_dc_platform_data cardhu_disp2_pdata = {
+ .flags = 0,
+ .default_out = &cardhu_disp2_out,
+ .fb = &cardhu_hdmi_fb_data,
+ .emc_clk_rate = 300000000,
+};
+#endif
+
+#ifdef CONFIG_TEGRA_CARDHU_DSI
+static int cardhu_dsi_panel_enable(void)
+{
+ int ret;
+
+ if (cardhu_dsi_reg == NULL) {
+ cardhu_dsi_reg = regulator_get(NULL, "avdd_dsi_csi");
+ if (IS_ERR_OR_NULL(cardhu_dsi_reg)) {
+ pr_err("dsi: Could not get regulator avdd_dsi_csi\n");
+ cardhu_dsi_reg = NULL;
+ return PTR_ERR(cardhu_dsi_reg);
+ }
+ }
+ regulator_enable(cardhu_dsi_reg);
+
+ ret = gpio_request(TEGRA_GPIO_PJ1, "DSI TE");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_input(TEGRA_GPIO_PJ1);
+ if (ret < 0) {
+ gpio_free(TEGRA_GPIO_PJ1);
+ return ret;
+ }
+ tegra_gpio_enable(TEGRA_GPIO_PJ1);
+
+#if DSI_PANEL_219
+
+ ret = gpio_request(TEGRA_GPIO_PH0, "ph0");
+ if (ret < 0)
+ return ret;
+ ret = gpio_direction_output(TEGRA_GPIO_PH0, 0);
+ if (ret < 0) {
+ gpio_free(TEGRA_GPIO_PH0);
+ return ret;
+ }
+ else
+ tegra_gpio_enable(TEGRA_GPIO_PH0);
+
+ ret = gpio_request(TEGRA_GPIO_PH2, "ph2");
+ if (ret < 0)
+ return ret;
+ ret = gpio_direction_output(TEGRA_GPIO_PH2, 0);
+ if (ret < 0) {
+ gpio_free(TEGRA_GPIO_PH2);
+ return ret;
+ }
+ else
+ tegra_gpio_enable(TEGRA_GPIO_PH2);
+
+ ret = gpio_request(TEGRA_GPIO_PU2, "pu2");
+ if (ret < 0)
+ return ret;
+ ret = gpio_direction_output(TEGRA_GPIO_PU2, 0);
+ if (ret < 0) {
+ gpio_free(TEGRA_GPIO_PU2);
+ return ret;
+ }
+ else
+ tegra_gpio_enable(TEGRA_GPIO_PU2);
+
+ gpio_set_value(cardhu_lvds_shutdown, 1);
+ mdelay(20);
+ gpio_set_value(TEGRA_GPIO_PH0, 1);
+ mdelay(10);
+ gpio_set_value(TEGRA_GPIO_PH2, 1);
+ mdelay(15);
+ gpio_set_value(TEGRA_GPIO_PU2, 0);
+ gpio_set_value(TEGRA_GPIO_PU2, 1);
+ mdelay(10);
+ gpio_set_value(TEGRA_GPIO_PU2, 0);
+ mdelay(10);
+ gpio_set_value(TEGRA_GPIO_PU2, 1);
+ mdelay(15);
+#endif
+
+#if DSI_PANEL_218
+ printk("DSI_PANEL_218 is enabled\n");
+ ret = gpio_request(AVDD_LCD, "avdd_lcd");
+ if(ret < 0)
+ gpio_free(AVDD_LCD);
+ ret = gpio_direction_output(AVDD_LCD, 1);
+ if(ret < 0)
+ gpio_free(AVDD_LCD);
+ else
+ tegra_gpio_enable(AVDD_LCD);
+
+#if DSI_PANEL_RESET
+ ret = gpio_request(TEGRA_GPIO_PD2, "pd2");
+ if (ret < 0){
+ return ret;
+ }
+ ret = gpio_direction_output(TEGRA_GPIO_PD2, 0);
+ if (ret < 0) {
+ gpio_free(TEGRA_GPIO_PD2);
+ return ret;
+ }
+ else
+ tegra_gpio_enable(TEGRA_GPIO_PD2);
+
+ gpio_set_value(TEGRA_GPIO_PD2, 1);
+ gpio_set_value(TEGRA_GPIO_PD2, 0);
+ mdelay(2);
+ gpio_set_value(TEGRA_GPIO_PD2, 1);
+ mdelay(2);
+#endif
+#endif
+
+ return 0;
+}
+
+static int cardhu_dsi_panel_disable(void)
+{
+ int err;
+
+ err = 0;
+ printk(KERN_INFO "DSI panel disable\n");
+
+#if DSI_PANEL_219
+ tegra_gpio_disable(TEGRA_GPIO_PU2);
+ gpio_free(TEGRA_GPIO_PU2);
+ tegra_gpio_disable(TEGRA_GPIO_PH2);
+ gpio_free(TEGRA_GPIO_PH2);
+ tegra_gpio_disable(TEGRA_GPIO_PH0);
+ gpio_free(TEGRA_GPIO_PH0);
+ tegra_gpio_disable(TEGRA_GPIO_PL2);
+ gpio_free(TEGRA_GPIO_PL2);
+#endif
+
+#if DSI_PANEL_218
+ tegra_gpio_disable(TEGRA_GPIO_PD2);
+ gpio_free(TEGRA_GPIO_PD2);
+#endif
+
+ return err;
+}
+
+static int cardhu_dsi_panel_postsuspend(void)
+{
+ int err;
+
+ err = 0;
+ printk(KERN_INFO "DSI panel postsuspend\n");
+
+ if (cardhu_dsi_reg) {
+ err = regulator_disable(cardhu_dsi_reg);
+ if (err < 0)
+ printk(KERN_ERR
+ "DSI regulator avdd_dsi_csi disable failed\n");
+ regulator_put(cardhu_dsi_reg);
+ cardhu_dsi_reg = NULL;
+ }
+
+#if DSI_PANEL_218
+ tegra_gpio_disable(AVDD_LCD);
+ gpio_free(AVDD_LCD);
+#endif
+
+ return err;
+}
+
+static struct tegra_dsi_cmd dsi_init_cmd[]= {
+ DSI_CMD_SHORT(0x05, 0x11, 0x00),
+ DSI_DLY_MS(150),
+ DSI_CMD_SHORT(0x05, 0x29, 0x00),
+ DSI_DLY_MS(20),
+};
+
+static struct tegra_dsi_cmd dsi_suspend_cmd[] = {
+ DSI_CMD_SHORT(0x05, 0x28, 0x00),
+ DSI_DLY_MS(20),
+ DSI_CMD_SHORT(0x05, 0x10, 0x00),
+ DSI_DLY_MS(5),
+};
+
+struct tegra_dsi_out cardhu_dsi = {
+ .n_data_lanes = 2,
+ .pixel_format = TEGRA_DSI_PIXEL_FORMAT_24BIT_P,
+ .refresh_rate = 60,
+ .virtual_channel = TEGRA_DSI_VIRTUAL_CHANNEL_0,
+
+ .panel_has_frame_buffer = true,
+#ifdef CONFIG_TEGRA_DSI_INSTANCE_1
+ .dsi_instance = 1,
+#else
+ .dsi_instance = 0,
+#endif
+ .panel_reset = DSI_PANEL_RESET,
+
+ .n_init_cmd = ARRAY_SIZE(dsi_init_cmd),
+ .dsi_init_cmd = dsi_init_cmd,
+
+ .n_suspend_cmd = ARRAY_SIZE(dsi_suspend_cmd),
+ .dsi_suspend_cmd = dsi_suspend_cmd,
+
+ .video_data_type = TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE,
+ .lp_cmd_mode_freq_khz = 430000,
+};
+
+static struct tegra_dc_mode cardhu_dsi_modes[] = {
+#if DSI_PANEL_219
+ {
+ .pclk = 10000000,
+ .h_ref_to_sync = 4,
+ .v_ref_to_sync = 1,
+ .h_sync_width = 16,
+ .v_sync_width = 1,
+ .h_back_porch = 32,
+ .v_back_porch = 1,
+ .h_active = 540,
+ .v_active = 960,
+ .h_front_porch = 32,
+ .v_front_porch = 2,
+ },
+#endif
+
+#if DSI_PANEL_218
+ {
+ .pclk = 323000000,
+ .h_ref_to_sync = 11,
+ .v_ref_to_sync = 1,
+ .h_sync_width = 16,
+ .v_sync_width = 4,
+ .h_back_porch = 16,
+ .v_back_porch = 4,
+ .h_active = 864,
+ .v_active = 480,
+ .h_front_porch = 16,
+ .v_front_porch = 4,
+ },
+#endif
+
+};
+
+
+static struct tegra_fb_data cardhu_dsi_fb_data = {
+#if DSI_PANEL_219
+ .win = 0,
+ .xres = 540,
+ .yres = 960,
+ .bits_per_pixel = 32,
+#endif
+
+#if DSI_PANEL_218
+ .win = 0,
+ .xres = 864,
+ .yres = 480,
+ .bits_per_pixel = 32,
+#endif
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+#endif
+
+static struct tegra_dc_out cardhu_disp1_out = {
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+ .sd_settings = &cardhu_sd_settings,
+ .parent_clk = "pll_p",
+
+#ifndef CONFIG_TEGRA_CARDHU_DSI
+ .type = TEGRA_DC_OUT_RGB,
+ .depth = 18,
+ .dither = TEGRA_DC_ORDERED_DITHER,
+
+ .modes = cardhu_panel_modes,
+ .n_modes = ARRAY_SIZE(cardhu_panel_modes),
+
+ .enable = cardhu_panel_enable,
+ .disable = cardhu_panel_disable,
+#else
+ .type = TEGRA_DC_OUT_DSI,
+
+ .modes = cardhu_dsi_modes,
+ .n_modes = ARRAY_SIZE(cardhu_dsi_modes),
+
+ .dsi = &cardhu_dsi,
+
+ .enable = cardhu_dsi_panel_enable,
+ .disable = cardhu_dsi_panel_disable,
+ .postsuspend = cardhu_dsi_panel_postsuspend,
+#endif
+};
+
+#ifdef CONFIG_TEGRA_DC
+static struct tegra_dc_platform_data cardhu_disp1_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &cardhu_disp1_out,
+ .emc_clk_rate = 300000000,
+#ifndef CONFIG_TEGRA_CARDHU_DSI
+ .fb = &cardhu_fb_data,
+#else
+ .fb = &cardhu_dsi_fb_data,
+#endif
+};
+
+static struct nvhost_device cardhu_disp1_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = cardhu_disp1_resources,
+ .num_resources = ARRAY_SIZE(cardhu_disp1_resources),
+ .dev = {
+ .platform_data = &cardhu_disp1_pdata,
+ },
+};
+
+static int cardhu_disp1_check_fb(struct device *dev, struct fb_info *info)
+{
+ return info->device == &cardhu_disp1_device.dev;
+}
+
+static struct nvhost_device cardhu_disp2_device = {
+ .name = "tegradc",
+ .id = 1,
+ .resource = cardhu_disp2_resources,
+ .num_resources = ARRAY_SIZE(cardhu_disp2_resources),
+ .dev = {
+ .platform_data = &cardhu_disp2_pdata,
+ },
+};
+#else
+static int cardhu_disp1_check_fb(struct device *dev, struct fb_info *info)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_TEGRA_NVMAP)
+static struct nvmap_platform_carveout cardhu_carveouts[] = {
+ [0] = NVMAP_HEAP_CARVEOUT_IRAM_INIT,
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .base = 0, /* Filled in by cardhu_panel_init() */
+ .size = 0, /* Filled in by cardhu_panel_init() */
+ .buddy_size = SZ_32K,
+ },
+};
+
+static struct nvmap_platform_data cardhu_nvmap_data = {
+ .carveouts = cardhu_carveouts,
+ .nr_carveouts = ARRAY_SIZE(cardhu_carveouts),
+};
+
+static struct platform_device cardhu_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &cardhu_nvmap_data,
+ },
+};
+#endif
+
+#if defined(CONFIG_ION_TEGRA)
+static struct ion_platform_data tegra_ion_data = {
+ .nr = 3,
+ .heaps = {
+ {
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .id = TEGRA_ION_HEAP_CARVEOUT,
+ .name = "carveout",
+ .base = 0,
+ .size = 0,
+ },
+ {
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .id = TEGRA_ION_HEAP_IRAM,
+ .name = "iram",
+ .base = TEGRA_IRAM_BASE + TEGRA_RESET_HANDLER_SIZE,
+ .size = TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE,
+ },
+ {
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .id = TEGRA_ION_HEAP_VPR,
+ .name = "vpr",
+ .base = 0,
+ .size = 0,
+ },
+ },
+};
+
+static struct platform_device tegra_ion_device = {
+ .name = "ion-tegra",
+ .id = -1,
+ .dev = {
+ .platform_data = &tegra_ion_data,
+ },
+};
+#endif
+
+static struct platform_device *cardhu_gfx_devices[] __initdata = {
+#if defined(CONFIG_TEGRA_NVMAP)
+ &cardhu_nvmap_device,
+#endif
+#if defined(CONFIG_ION_TEGRA)
+ &tegra_ion_device,
+#endif
+#ifdef CONFIG_TEGRA_GRHOST
+ &tegra_grhost_device,
+#endif
+ &tegra_pwfm0_device,
+ &cardhu_backlight_device,
+};
+
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/* put early_suspend/late_resume handlers here for the display in order
+ * to keep the code out of the display driver, keeping it closer to upstream
+ */
+struct early_suspend cardhu_panel_early_suspender;
+
+static void cardhu_panel_early_suspend(struct early_suspend *h)
+{
+ unsigned i;
+
+ /* power down LCD, add use a black screen for HDMI */
+ if (num_registered_fb > 0)
+ fb_blank(registered_fb[0], FB_BLANK_POWERDOWN);
+ if (num_registered_fb > 1)
+ fb_blank(registered_fb[1], FB_BLANK_NORMAL);
+}
+
+static void cardhu_panel_late_resume(struct early_suspend *h)
+{
+ unsigned i;
+ for (i = 0; i < num_registered_fb; i++)
+ fb_blank(registered_fb[i], FB_BLANK_UNBLANK);
+}
+#endif
+
+int __init cardhu_panel_init(void)
+{
+ int err;
+ struct resource __maybe_unused *res;
+
+ tegra_get_board_info(&board_info);
+ tegra_get_display_board_info(&display_board_info);
+
+#if defined(CONFIG_TEGRA_NVMAP)
+ cardhu_carveouts[1].base = tegra_carveout_start;
+ cardhu_carveouts[1].size = tegra_carveout_size;
+#endif
+
+#if defined(CONFIG_ION_TEGRA)
+ tegra_ion_data.heaps[0].base = tegra_carveout_start;
+ tegra_ion_data.heaps[0].size = tegra_carveout_size;
+#endif
+
+ if (board_info.board_id == BOARD_E1291 &&
+ ((board_info.sku & SKU_TOUCHSCREEN_MECH_FIX) == 0)) {
+ /* use 55Hz panel timings to reduce noise on sensitive touch */
+ printk("Using cardhu_panel_modes_55hz\n");
+ cardhu_disp1_out.modes = cardhu_panel_modes_55hz;
+ cardhu_disp1_out.n_modes = ARRAY_SIZE(cardhu_panel_modes_55hz);
+ }
+
+#if defined(CONFIG_TEGRA_DC) && !defined(CONFIG_TEGRA_CARDHU_DSI)
+ if (display_board_info.board_id == BOARD_DISPLAY_PM313) {
+ /* initialize the values */
+#if defined(PM313_LVDS_PANEL_19X12)
+ cardhu_disp1_out.modes = panel_19X12_modes;
+ cardhu_disp1_out.n_modes = ARRAY_SIZE(panel_19X12_modes);
+ cardhu_disp1_out.parent_clk = "pll_d_out0";
+#if (PM313_LVDS_PANEL_BPP == 1)
+ cardhu_disp1_out.depth = 18;
+#else
+ cardhu_disp1_out.depth = 24;
+#endif
+ cardhu_fb_data.xres = 1920;
+ cardhu_fb_data.yres = 1200;
+
+ cardhu_disp2_out.parent_clk = "pll_d2_out0";
+ cardhu_hdmi_fb_data.xres = 1920;
+ cardhu_hdmi_fb_data.yres = 1200;
+#endif
+
+ /* lvds configuration */
+ err = gpio_request(pm313_R_FDE, "R_FDE");
+ err |= gpio_direction_output(pm313_R_FDE, 1);
+ tegra_gpio_enable(pm313_R_FDE);
+
+ err |= gpio_request(pm313_R_FB, "R_FB");
+ err |= gpio_direction_output(pm313_R_FB, 1);
+ tegra_gpio_enable(pm313_R_FB);
+
+ err |= gpio_request(pm313_MODE0, "MODE0");
+ err |= gpio_direction_output(pm313_MODE0, 1);
+ tegra_gpio_enable(pm313_MODE0);
+
+ err |= gpio_request(pm313_MODE1, "MODE1");
+ err |= gpio_direction_output(pm313_MODE1, 0);
+ tegra_gpio_enable(pm313_MODE1);
+
+ err |= gpio_request(pm313_BPP, "BPP");
+ err |= gpio_direction_output(pm313_BPP, PM313_LVDS_PANEL_BPP);
+ tegra_gpio_enable(pm313_BPP);
+
+ err = gpio_request(pm313_lvds_shutdown, "lvds_shutdown");
+ /* free ride provided by bootloader */
+ err |= gpio_direction_output(pm313_lvds_shutdown, 1);
+ tegra_gpio_enable(pm313_lvds_shutdown);
+
+ if (err)
+ printk(KERN_ERR "ERROR(s) in LVDS configuration\n");
+ } else if ((display_board_info.board_id == BOARD_DISPLAY_E1247 &&
+ board_info.board_id == BOARD_PM269) ||
+ (board_info.board_id == BOARD_E1257) ||
+ (board_info.board_id == BOARD_PM305) ||
+ (board_info.board_id == BOARD_PM311)) {
+ gpio_request(e1247_pm269_lvds_shutdown, "lvds_shutdown");
+ gpio_direction_output(e1247_pm269_lvds_shutdown, 1);
+ tegra_gpio_enable(e1247_pm269_lvds_shutdown);
+ } else {
+ gpio_request(cardhu_lvds_shutdown, "lvds_shutdown");
+ gpio_direction_output(cardhu_lvds_shutdown, 1);
+ tegra_gpio_enable(cardhu_lvds_shutdown);
+ }
+#endif
+
+ tegra_gpio_enable(cardhu_hdmi_hpd);
+ gpio_request(cardhu_hdmi_hpd, "hdmi_hpd");
+ gpio_direction_input(cardhu_hdmi_hpd);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ cardhu_panel_early_suspender.suspend = cardhu_panel_early_suspend;
+ cardhu_panel_early_suspender.resume = cardhu_panel_late_resume;
+ cardhu_panel_early_suspender.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ register_early_suspend(&cardhu_panel_early_suspender);
+#endif
+
+ err = platform_add_devices(cardhu_gfx_devices,
+ ARRAY_SIZE(cardhu_gfx_devices));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ res = nvhost_get_resource_byname(&cardhu_disp1_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb_start;
+ res->end = tegra_fb_start + tegra_fb_size - 1;
+#endif
+
+ /* Copy the bootloader fb to the fb. */
+ tegra_move_framebuffer(tegra_fb_start, tegra_bootloader_fb_start,
+ min(tegra_fb_size, tegra_bootloader_fb_size));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ if (!err)
+ err = nvhost_device_register(&cardhu_disp1_device);
+
+ res = nvhost_get_resource_byname(&cardhu_disp2_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb2_start;
+ res->end = tegra_fb2_start + tegra_fb2_size - 1;
+ if (!err)
+ err = nvhost_device_register(&cardhu_disp2_device);
+#endif
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_NVAVP)
+ if (!err)
+ err = nvhost_device_register(&nvavp_device);
+#endif
+ return err;
+}
diff --git a/arch/arm/mach-tegra/board-cardhu-pinmux.c b/arch/arm/mach-tegra/board-cardhu-pinmux.c
new file mode 100644
index 000000000000..6648c90e489f
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-pinmux.c
@@ -0,0 +1,795 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu-pinmux.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/pinmux.h>
+#include "board.h"
+#include "board-cardhu.h"
+#include "gpio-names.h"
+
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+/* Setting the drive strength of pins
+ * hsm: Enable High speed mode (ENABLE/DISABLE)
+ * Schimit: Enable/disable schimit (ENABLE/DISABLE)
+ * drive: low power mode (DIV_1, DIV_2, DIV_4, DIV_8)
+ * pulldn_drive - drive down (falling edge) - Driver Output Pull-Down drive
+ * strength code. Value from 0 to 31.
+ * pullup_drive - drive up (rising edge) - Driver Output Pull-Up drive
+ * strength code. Value from 0 to 31.
+ * pulldn_slew - Driver Output Pull-Up slew control code - 2bit code
+ * code 11 is least slewing of signal. code 00 is highest
+ * slewing of the signal.
+ * Value - FASTEST, FAST, SLOW, SLOWEST
+ * pullup_slew - Driver Output Pull-Down slew control code -
+ * code 11 is least slewing of signal. code 00 is highest
+ * slewing of the signal.
+ * Value - FASTEST, FAST, SLOW, SLOWEST
+ */
+#define SET_DRIVE(_name, _hsm, _schmitt, _drive, _pulldn_drive, _pullup_drive, _pulldn_slew, _pullup_slew) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_##_hsm, \
+ .schmitt = TEGRA_SCHMITT_##_schmitt, \
+ .drive = TEGRA_DRIVE_##_drive, \
+ .pull_down = TEGRA_PULL_##_pulldn_drive, \
+ .pull_up = TEGRA_PULL_##_pullup_drive, \
+ .slew_rising = TEGRA_SLEW_##_pulldn_slew, \
+ .slew_falling = TEGRA_SLEW_##_pullup_slew, \
+ }
+
+/* !!!FIXME!!!! POPULATE THIS TABLE */
+static __initdata struct tegra_drive_pingroup_config cardhu_drive_pinmux[] = {
+ /* DEFAULT_DRIVE(<pin_group>), */
+ /* SET_DRIVE(ATA, DISABLE, DISABLE, DIV_1, 31, 31, FAST, FAST) */
+ SET_DRIVE(DAP2, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* All I2C pins are driven to maximum drive strength */
+ /* GEN1 I2C */
+ SET_DRIVE(DBG, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* GEN2 I2C */
+ SET_DRIVE(AT5, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* CAM I2C */
+ SET_DRIVE(GME, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* DDC I2C */
+ SET_DRIVE(DDC, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* PWR_I2C */
+ SET_DRIVE(AO1, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* UART3 */
+ SET_DRIVE(UART3, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* SDMMC1 */
+ SET_DRIVE(SDIO1, DISABLE, DISABLE, DIV_1, 46, 42, FAST, FAST),
+
+ /* SDMMC3 */
+ SET_DRIVE(SDIO3, DISABLE, DISABLE, DIV_1, 46, 42, FAST, FAST),
+
+ /* SDMMC4 */
+ SET_DRIVE(GMA, DISABLE, DISABLE, DIV_1, 9, 9, SLOWEST, SLOWEST),
+ SET_DRIVE(GMB, DISABLE, DISABLE, DIV_1, 9, 9, SLOWEST, SLOWEST),
+ SET_DRIVE(GMC, DISABLE, DISABLE, DIV_1, 9, 9, SLOWEST, SLOWEST),
+ SET_DRIVE(GMD, DISABLE, DISABLE, DIV_1, 9, 9, SLOWEST, SLOWEST),
+
+};
+
+#define DEFAULT_PINMUX(_pingroup, _mux, _pupd, _tri, _io) \
+ { \
+ .pingroup = TEGRA_PINGROUP_##_pingroup, \
+ .func = TEGRA_MUX_##_mux, \
+ .pupd = TEGRA_PUPD_##_pupd, \
+ .tristate = TEGRA_TRI_##_tri, \
+ .io = TEGRA_PIN_##_io, \
+ .lock = TEGRA_PIN_LOCK_DEFAULT, \
+ .od = TEGRA_PIN_OD_DEFAULT, \
+ .ioreset = TEGRA_PIN_IO_RESET_DEFAULT, \
+ }
+
+#define I2C_PINMUX(_pingroup, _mux, _pupd, _tri, _io, _lock, _od) \
+ { \
+ .pingroup = TEGRA_PINGROUP_##_pingroup, \
+ .func = TEGRA_MUX_##_mux, \
+ .pupd = TEGRA_PUPD_##_pupd, \
+ .tristate = TEGRA_TRI_##_tri, \
+ .io = TEGRA_PIN_##_io, \
+ .lock = TEGRA_PIN_LOCK_##_lock, \
+ .od = TEGRA_PIN_OD_##_od, \
+ .ioreset = TEGRA_PIN_IO_RESET_DEFAULT, \
+ }
+
+#define VI_PINMUX(_pingroup, _mux, _pupd, _tri, _io, _lock, _ioreset) \
+ { \
+ .pingroup = TEGRA_PINGROUP_##_pingroup, \
+ .func = TEGRA_MUX_##_mux, \
+ .pupd = TEGRA_PUPD_##_pupd, \
+ .tristate = TEGRA_TRI_##_tri, \
+ .io = TEGRA_PIN_##_io, \
+ .lock = TEGRA_PIN_LOCK_##_lock, \
+ .od = TEGRA_PIN_OD_DEFAULT, \
+ .ioreset = TEGRA_PIN_IO_RESET_##_ioreset \
+ }
+
+static __initdata struct tegra_pingroup_config cardhu_pinmux_common[] = {
+ /* SDMMC1 pinmux */
+ DEFAULT_PINMUX(SDMMC1_CLK, SDMMC1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_CMD, SDMMC1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT3, SDMMC1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT2, SDMMC1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT1, SDMMC1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT0, SDMMC1, PULL_UP, NORMAL, INPUT),
+
+ /* SDMMC3 pinmux */
+ DEFAULT_PINMUX(SDMMC3_CLK, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_CMD, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT0, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT1, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT2, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT3, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT6, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT7, SDMMC3, PULL_UP, NORMAL, INPUT),
+
+ /* SDMMC4 pinmux */
+ DEFAULT_PINMUX(SDMMC4_CLK, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_CMD, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT0, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT1, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT2, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT3, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT4, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT5, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT6, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT7, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_RST_N, RSVD1, PULL_DOWN, NORMAL, INPUT),
+
+ /* I2C1 pinmux */
+ I2C_PINMUX(GEN1_I2C_SCL, I2C1, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(GEN1_I2C_SDA, I2C1, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ /* I2C2 pinmux */
+ I2C_PINMUX(GEN2_I2C_SCL, I2C2, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(GEN2_I2C_SDA, I2C2, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ /* I2C3 pinmux */
+ I2C_PINMUX(CAM_I2C_SCL, I2C3, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(CAM_I2C_SDA, I2C3, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ /* I2C4 pinmux */
+ I2C_PINMUX(DDC_SCL, I2C4, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(DDC_SDA, I2C4, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ /* Power I2C pinmux */
+ I2C_PINMUX(PWR_I2C_SCL, I2CPWR, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(PWR_I2C_SDA, I2CPWR, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ DEFAULT_PINMUX(ULPI_DATA0, UARTA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_DATA1, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA2, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA3, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA4, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA5, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA6, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA7, UARTA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_CLK, UARTD, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_DIR, UARTD, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_NXT, UARTD, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_STP, UARTD, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(DAP3_FS, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_DIN, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_DOUT, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_SCLK, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PV2, OWR, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PV3, RSVD1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(CLK2_OUT, EXTPERIPH2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK2_REQ, DAP, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_PWR1, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_PWR2, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_SDIN, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_SDOUT, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_WR_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_DC0, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_PWR0, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_PCLK, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_DE, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_HSYNC, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_VSYNC, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D0, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D1, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D2, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D3, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D4, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D5, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D6, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D7, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D8, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D9, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D10, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D11, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D12, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D13, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D14, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D15, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D16, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D17, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D18, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D19, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D20, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D21, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D22, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D23, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_DC1, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CRT_HSYNC, CRT, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(CRT_VSYNC, CRT, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(VI_D0, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D1, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D2, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D3, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D4, VI, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(VI_D5, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D7, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D10, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_MCLK, VI, PULL_UP, NORMAL, INPUT),
+
+ DEFAULT_PINMUX(UART2_RXD, IRDA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART2_TXD, IRDA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART2_RTS_N, UARTB, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART2_CTS_N, UARTB, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART3_TXD, UARTC, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART3_RXD, UARTC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART3_CTS_N, UARTC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART3_RTS_N, UARTC, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU0, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU1, RSVD1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU2, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU3, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU4, PWM1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU5, PWM2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU6, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_FS, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_DIN, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_DOUT, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_SCLK, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK3_OUT, EXTPERIPH3, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(CLK3_REQ, DEV3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_WP_N, GMI, NORMAL, NORMAL, INPUT),
+
+ DEFAULT_PINMUX(KB_ROW5, OWR, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW12, KBC, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW14, KBC, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW15, KBC, NORMAL, NORMAL, OUTPUT),
+
+#if 0 /* for testing on Verbier */
+ DEFAULT_PINMUX(GMI_WAIT, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_ADV_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CLK, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS0_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS1_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS3_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS4_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS6_N, NAND_ALT, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS7_N, NAND_ALT, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD0, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD1, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD2, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD3, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD4, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD5, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD6, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD7, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD8, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD9, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD10, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD11, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD12, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD13, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD14, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD15, NAND, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_WR_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_OE_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_DQS, NAND, NORMAL, NORMAL, INPUT),
+#else
+ DEFAULT_PINMUX(GMI_AD8, PWM0, NORMAL, NORMAL, OUTPUT), /* LCD1_BL_PWM */
+#endif
+ DEFAULT_PINMUX(GMI_A16, SPI4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A17, SPI4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A18, SPI4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A19, SPI4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CAM_MCLK, VI_ALT2, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PCC1, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB0, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB3, VGP3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB5, VGP5, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB6, VGP6, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB7, I2S4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PCC2, I2S4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(JTAG_RTCK, RTCK, NORMAL, NORMAL, OUTPUT),
+
+ /* KBC keys */
+ DEFAULT_PINMUX(KB_ROW0, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW1, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW2, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW3, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL0, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL1, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL2, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL3, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL4, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL5, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PV0, RSVD, PULL_UP, NORMAL, INPUT),
+
+ DEFAULT_PINMUX(CLK_32K_OUT, BLINK, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(SYS_CLK_REQ, SYSCLK, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(OWR, OWR, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_FS, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_DIN, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_DOUT, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_SCLK, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK1_REQ, DAP, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK1_OUT, EXTPERIPH1, NORMAL, NORMAL, INPUT),
+#if 0 /* For HDA realtek Codec */
+ DEFAULT_PINMUX(SPDIF_IN, DAP2, PULL_DOWN, NORMAL, INPUT),
+#else
+ DEFAULT_PINMUX(SPDIF_IN, SPDIF, NORMAL, NORMAL, INPUT),
+#endif
+ DEFAULT_PINMUX(SPDIF_OUT, SPDIF, NORMAL, NORMAL, OUTPUT),
+#if 0 /* For HDA realtek Codec */
+ DEFAULT_PINMUX(DAP2_FS, HDA, PULL_DOWN, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DIN, HDA, PULL_DOWN, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DOUT, HDA, PULL_DOWN, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_SCLK, HDA, PULL_DOWN, NORMAL, INPUT),
+#else
+ DEFAULT_PINMUX(DAP2_FS, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DIN, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DOUT, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_SCLK, I2S1, NORMAL, NORMAL, INPUT),
+#endif
+ DEFAULT_PINMUX(SPI2_CS1_N, SPI2, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_MOSI, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_SCK, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_CS0_N, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_MISO, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L0_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L0_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L0_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_WAKE_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L1_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L1_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L1_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L2_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L2_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L2_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(HDMI_CEC, CEC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(HDMI_INT, RSVD0, NORMAL, TRISTATE, INPUT),
+
+ /* Gpios */
+ /* SDMMC1 CD gpio */
+ DEFAULT_PINMUX(GMI_IORDY, RSVD1, PULL_UP, NORMAL, INPUT),
+ /* SDMMC1 WP gpio */
+ DEFAULT_PINMUX(VI_D11, RSVD1, PULL_UP, NORMAL, INPUT),
+ /* Touch panel GPIO */
+ /* Touch IRQ */
+ DEFAULT_PINMUX(GMI_AD12, NAND, PULL_UP, NORMAL, INPUT),
+
+ /* Touch RESET */
+ DEFAULT_PINMUX(GMI_AD14, NAND, NORMAL, NORMAL, OUTPUT),
+
+
+ /* Power rails GPIO */
+ DEFAULT_PINMUX(SPI2_SCK, GMI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB4, VGP4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW8, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT5, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT4, SDMMC3, PULL_UP, NORMAL, INPUT),
+
+ VI_PINMUX(VI_D6, VI, NORMAL, NORMAL, OUTPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_D8, SDMMC2, NORMAL, NORMAL, INPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_D9, SDMMC2, NORMAL, NORMAL, INPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_PCLK, RSVD1, PULL_UP, TRISTATE, INPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_HSYNC, RSVD1, NORMAL, NORMAL, INPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_VSYNC, RSVD1, NORMAL, NORMAL, INPUT, DISABLE, DISABLE),
+};
+
+static __initdata struct tegra_pingroup_config cardhu_pinmux_e118x[] = {
+ /* Power rails GPIO */
+ DEFAULT_PINMUX(SPI2_SCK, SPI2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_RST_N, RSVD3, PULL_UP, TRISTATE, INPUT),
+ DEFAULT_PINMUX(GMI_AD15, NAND, PULL_UP, TRISTATE, INPUT),
+};
+
+static __initdata struct tegra_pingroup_config cardhu_pinmux_pm311[] = {
+ /* Power rails GPIO */
+ DEFAULT_PINMUX(SPI2_SCK, SPI2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L2_RST_N, PCIE, PULL_UP, TRISTATE, INPUT),
+ DEFAULT_PINMUX(PEX_L2_CLKREQ_N, PCIE, PULL_UP, TRISTATE, INPUT),
+};
+
+static __initdata struct tegra_pingroup_config cardhu_pinmux_cardhu[] = {
+ DEFAULT_PINMUX(LCD_CS0_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_SCK, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_CS1_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_M1, DISPLAYA, NORMAL, NORMAL, INPUT),
+
+ DEFAULT_PINMUX(GMI_CS2_N, RSVD1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD8, PWM0, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD10, NAND, NORMAL, NORMAL, OUTPUT),
+
+ /* Power rails GPIO */
+ DEFAULT_PINMUX(GMI_CS2_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_RST_N, RSVD3, PULL_UP, TRISTATE, INPUT),
+ DEFAULT_PINMUX(GMI_AD15, NAND, PULL_UP, TRISTATE, INPUT),
+
+ DEFAULT_PINMUX(GMI_CS0_N, GMI, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_CS1_N, GMI, PULL_UP, TRISTATE, INPUT),
+ /*TP_IRQ*/
+ DEFAULT_PINMUX(GMI_CS4_N, GMI, PULL_UP, NORMAL, INPUT),
+ /*PCIE dock detect*/
+ DEFAULT_PINMUX(GPIO_PU4, PWM1, PULL_UP, NORMAL, INPUT),
+};
+
+static __initdata struct tegra_pingroup_config cardhu_pinmux_cardhu_a03[] = {
+ DEFAULT_PINMUX(LCD_CS0_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_SCK, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_CS1_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_M1, DISPLAYA, NORMAL, NORMAL, INPUT),
+
+ DEFAULT_PINMUX(GMI_CS2_N, RSVD1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD8, PWM0, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD10, NAND, NORMAL, NORMAL, OUTPUT),
+
+ /* Power rails GPIO */
+ DEFAULT_PINMUX(PEX_L0_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L0_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L1_CLKREQ_N, RSVD3, PULL_UP, TRISTATE, INPUT),
+ DEFAULT_PINMUX(PEX_L1_PRSNT_N, RSVD3, PULL_UP, TRISTATE, INPUT),
+};
+
+static __initdata struct tegra_pingroup_config cardhu_pinmux_e1291_a04[] = {
+ DEFAULT_PINMUX(GMI_AD15, NAND, PULL_DOWN, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_DATA5, UARTA, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA6, UARTA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(SPI2_MOSI, SPI6, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_SCLK, RSVD1, NORMAL, NORMAL, OUTPUT),
+};
+
+static __initdata struct tegra_pingroup_config cardhu_pinmux_e1198[] = {
+ DEFAULT_PINMUX(LCD_CS0_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_SCK, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_CS1_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_M1, DISPLAYA, NORMAL, NORMAL, INPUT),
+
+ DEFAULT_PINMUX(GMI_CS2_N, RSVD1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD8, PWM0, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD10, NAND, NORMAL, NORMAL, OUTPUT),
+
+ /* SPI2 */
+ DEFAULT_PINMUX(SPI2_SCK, SPI2, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_MOSI, SPI2, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_MISO, SPI2, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_CS0_N, SPI2, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI2_CS2_N, SPI2, PULL_UP, NORMAL, INPUT),
+};
+
+static __initdata struct tegra_pingroup_config unused_pins_lowpower[] = {
+ DEFAULT_PINMUX(GMI_WAIT, NAND, PULL_UP, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_ADV_N, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CLK, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS3_N, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS6_N, SATA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS7_N, NAND, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD0, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD1, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD2, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD3, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD4, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD5, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD6, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD7, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD9, PWM1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD11, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD13, NAND, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_WR_N, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_OE_N, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_DQS, NAND, NORMAL, TRISTATE, OUTPUT),
+};
+
+static __initdata struct tegra_pingroup_config gmi_pins_269[] = {
+ /* Continuation of table unused_pins_lowpower only for PM269 */
+ DEFAULT_PINMUX(GMI_CS0_N, NAND, PULL_UP, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS1_N, NAND, PULL_UP, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS2_N, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_CS3_N, NAND, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS4_N, NAND, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_CS6_N, SATA, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS7_N, NAND, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD8, PWM0, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD9, PWM1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD10, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD11, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD13, NAND, PULL_UP, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD15, NAND, PULL_UP, TRISTATE, INPUT),
+ DEFAULT_PINMUX(GMI_A16, SPI4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A17, SPI4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A18, SPI4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A19, SPI4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_RST_N, NAND, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_WP_N, NAND, NORMAL, NORMAL, INPUT),
+};
+
+static void __init cardhu_pinmux_audio_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_CDC_IRQ);
+ gpio_request(TEGRA_GPIO_CDC_IRQ, "wm8903");
+ gpio_direction_input(TEGRA_GPIO_CDC_IRQ);
+
+ tegra_gpio_enable(TEGRA_GPIO_HP_DET);
+}
+
+#define GPIO_INIT_PIN_MODE(_gpio, _is_input, _value) \
+ { \
+ .gpio_nr = _gpio, \
+ .is_input = _is_input, \
+ .value = _value, \
+ }
+
+
+/* E1198-A01/E1291 specific fab < A03 */
+static struct gpio_init_pin_info init_gpio_mode_e1291_a02[] = {
+ GPIO_INIT_PIN_MODE(TEGRA_GPIO_PH7, false, 0),
+ GPIO_INIT_PIN_MODE(TEGRA_GPIO_PI4, false, 0),
+};
+
+/* E1198-A02/E1291 specific fab = A03 */
+static struct gpio_init_pin_info init_gpio_mode_e1291_a03[] = {
+ GPIO_INIT_PIN_MODE(TEGRA_GPIO_PDD6, false, 0),
+ GPIO_INIT_PIN_MODE(TEGRA_GPIO_PDD4, false, 0),
+};
+
+/* E1198-A02/E1291 specific fab >= A04 */
+static struct gpio_init_pin_info init_gpio_mode_e1291_a04[] = {
+ GPIO_INIT_PIN_MODE(TEGRA_GPIO_PDD6, false, 0),
+ GPIO_INIT_PIN_MODE(TEGRA_GPIO_PDD4, false, 0),
+ GPIO_INIT_PIN_MODE(TEGRA_GPIO_PR2, false, 0),
+};
+
+static void __init cardhu_gpio_init_configure(void)
+{
+ struct board_info board_info;
+ int len;
+ int i;
+ struct gpio_init_pin_info *pins_info;
+
+ tegra_get_board_info(&board_info);
+
+ switch (board_info.board_id) {
+ case BOARD_E1198:
+ if (board_info.fab < BOARD_FAB_A02) {
+ len = ARRAY_SIZE(init_gpio_mode_e1291_a02);
+ pins_info = init_gpio_mode_e1291_a02;
+ } else {
+ len = ARRAY_SIZE(init_gpio_mode_e1291_a03);
+ pins_info = init_gpio_mode_e1291_a03;
+ }
+ break;
+ case BOARD_E1291:
+ if (board_info.fab < BOARD_FAB_A03) {
+ len = ARRAY_SIZE(init_gpio_mode_e1291_a02);
+ pins_info = init_gpio_mode_e1291_a02;
+ } else if (board_info.fab == BOARD_FAB_A03) {
+ len = ARRAY_SIZE(init_gpio_mode_e1291_a03);
+ pins_info = init_gpio_mode_e1291_a03;
+ } else {
+ len = ARRAY_SIZE(init_gpio_mode_e1291_a04);
+ pins_info = init_gpio_mode_e1291_a04;
+ }
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < len; ++i) {
+ tegra_gpio_init_configure(pins_info->gpio_nr,
+ pins_info->is_input, pins_info->value);
+ pins_info++;
+ }
+}
+
+int __init cardhu_pinmux_init(void)
+{
+ struct board_info board_info;
+
+ cardhu_gpio_init_configure();
+
+ tegra_pinmux_config_table(cardhu_pinmux_common, ARRAY_SIZE(cardhu_pinmux_common));
+ tegra_drive_pinmux_config_table(cardhu_drive_pinmux,
+ ARRAY_SIZE(cardhu_drive_pinmux));
+
+ tegra_get_board_info(&board_info);
+ switch (board_info.board_id) {
+ case BOARD_E1198:
+ tegra_pinmux_config_table(cardhu_pinmux_e1198,
+ ARRAY_SIZE(cardhu_pinmux_e1198));
+ tegra_pinmux_config_table(unused_pins_lowpower,
+ ARRAY_SIZE(unused_pins_lowpower));
+ if (board_info.fab >= BOARD_FAB_A02)
+ tegra_pinmux_config_table(cardhu_pinmux_cardhu_a03,
+ ARRAY_SIZE(cardhu_pinmux_cardhu_a03));
+ break;
+ case BOARD_E1291:
+ if (board_info.fab < BOARD_FAB_A03) {
+ tegra_pinmux_config_table(cardhu_pinmux_cardhu,
+ ARRAY_SIZE(cardhu_pinmux_cardhu));
+ tegra_pinmux_config_table(unused_pins_lowpower,
+ ARRAY_SIZE(unused_pins_lowpower));
+ } else {
+ tegra_pinmux_config_table(cardhu_pinmux_cardhu_a03,
+ ARRAY_SIZE(cardhu_pinmux_cardhu_a03));
+ }
+ if (board_info.fab >= BOARD_FAB_A04)
+ tegra_pinmux_config_table(cardhu_pinmux_e1291_a04,
+ ARRAY_SIZE(cardhu_pinmux_e1291_a04));
+ break;
+
+ case BOARD_PM269:
+ case BOARD_PM305:
+ case BOARD_PM311:
+ case BOARD_E1257:
+ if (board_info.board_id == BOARD_PM311 || board_info.board_id == BOARD_PM305) {
+ tegra_pinmux_config_table(cardhu_pinmux_pm311,
+ ARRAY_SIZE(cardhu_pinmux_pm311));
+ } else {
+ tegra_pinmux_config_table(cardhu_pinmux_e118x,
+ ARRAY_SIZE(cardhu_pinmux_e118x));
+ }
+ tegra_pinmux_config_table(unused_pins_lowpower,
+ ARRAY_SIZE(unused_pins_lowpower));
+ tegra_pinmux_config_table(gmi_pins_269,
+ ARRAY_SIZE(gmi_pins_269));
+ break;
+ default:
+ tegra_pinmux_config_table(cardhu_pinmux_e118x,
+ ARRAY_SIZE(cardhu_pinmux_e118x));
+ break;
+ }
+
+ cardhu_pinmux_audio_init();
+
+ return 0;
+}
+
+#define PIN_GPIO_LPM(_name, _gpio, _is_input, _value) \
+ { \
+ .name = _name, \
+ .gpio_nr = _gpio, \
+ .is_gpio = true, \
+ .is_input = _is_input, \
+ .value = _value, \
+ }
+
+struct gpio_init_pin_info pin_lpm_cardhu_common[] = {
+ PIN_GPIO_LPM("GMI_CS3_N", TEGRA_GPIO_PK4, 0, 0),
+ PIN_GPIO_LPM("GMI_CS4_N", TEGRA_GPIO_PK2, 1, 0),
+ PIN_GPIO_LPM("GMI_CS7", TEGRA_GPIO_PI6, 1, 0),
+ PIN_GPIO_LPM("GMI_CS0", TEGRA_GPIO_PJ0, 1, 0),
+ PIN_GPIO_LPM("GMI_CS1", TEGRA_GPIO_PJ2, 1, 0),
+ PIN_GPIO_LPM("GMI_WP_N", TEGRA_GPIO_PC7, 1, 0),
+};
+
+/* E1198 without PM313 display board */
+struct gpio_init_pin_info pin_lpm_cardhu_common_wo_pm313[] = {
+ PIN_GPIO_LPM("GMI_AD9", TEGRA_GPIO_PH1, 0, 0),
+ PIN_GPIO_LPM("GMI_AD11", TEGRA_GPIO_PH3, 0, 0),
+};
+
+struct gpio_init_pin_info vddio_gmi_pins_pm269[] = {
+ PIN_GPIO_LPM("GMI_CS3_N", TEGRA_GPIO_PK4, 0, 0),
+ PIN_GPIO_LPM("GMI_CS4_N", TEGRA_GPIO_PK2, 1, 0),
+ PIN_GPIO_LPM("GMI_CS7", TEGRA_GPIO_PI6, 1, 0),
+ PIN_GPIO_LPM("GMI_CS0", TEGRA_GPIO_PJ0, 1, 0),
+ PIN_GPIO_LPM("GMI_CS1", TEGRA_GPIO_PJ2, 1, 0),
+ PIN_GPIO_LPM("GMI_WP_N", TEGRA_GPIO_PC7, 1, 0),
+ PIN_GPIO_LPM("GMI_A16", TEGRA_GPIO_PJ7, 0, 0),
+ PIN_GPIO_LPM("GMI_A17", TEGRA_GPIO_PB0, 0, 0),
+ PIN_GPIO_LPM("GMI_A18", TEGRA_GPIO_PB1, 1, 0),
+ PIN_GPIO_LPM("GMI_A19", TEGRA_GPIO_PK7, 0, 0),
+};
+
+/* PM269 without PM313 display board */
+struct gpio_init_pin_info vddio_gmi_pins_pm269_wo_pm313[] = {
+ PIN_GPIO_LPM("GMI_CS2", TEGRA_GPIO_PK3, 1, 0),
+ PIN_GPIO_LPM("GMI_AD9", TEGRA_GPIO_PH1, 0, 0),
+};
+
+static void set_unused_pin_gpio(struct gpio_init_pin_info *lpm_pin_info,
+ int list_count)
+{
+ int i;
+ struct gpio_init_pin_info *pin_info;
+ int ret;
+
+ for (i = 0; i < list_count; ++i) {
+ pin_info = (struct gpio_init_pin_info *)(lpm_pin_info + i);
+ if (!pin_info->is_gpio)
+ continue;
+
+ ret = gpio_request(pin_info->gpio_nr, pin_info->name);
+ if (ret < 0) {
+ pr_err("%s() Error in gpio_request() for gpio %d\n",
+ __func__, pin_info->gpio_nr);
+ continue;
+ }
+ if (pin_info->is_input)
+ ret = gpio_direction_input(pin_info->gpio_nr);
+ else
+ ret = gpio_direction_output(pin_info->gpio_nr,
+ pin_info->value);
+ if (ret < 0) {
+ pr_err("%s() Error in setting gpio %d to in/out\n",
+ __func__, pin_info->gpio_nr);
+ gpio_free(pin_info->gpio_nr);
+ continue;
+ }
+ tegra_gpio_enable(pin_info->gpio_nr);
+ }
+}
+
+/* Initialize the pins to desired state as per power/asic/system-eng
+ * recomendation */
+int __init cardhu_pins_state_init(void)
+{
+ struct board_info board_info;
+ struct board_info display_board_info;
+
+ tegra_get_board_info(&board_info);
+ tegra_get_display_board_info(&display_board_info);
+ if ((board_info.board_id == BOARD_E1291) ||
+ (board_info.board_id == BOARD_E1198)) {
+ set_unused_pin_gpio(&pin_lpm_cardhu_common[0],
+ ARRAY_SIZE(pin_lpm_cardhu_common));
+
+ if (display_board_info.board_id != BOARD_DISPLAY_PM313) {
+ set_unused_pin_gpio(&pin_lpm_cardhu_common_wo_pm313[0],
+ ARRAY_SIZE(pin_lpm_cardhu_common_wo_pm313));
+ }
+ }
+
+ if ((board_info.board_id == BOARD_PM269) ||
+ (board_info.board_id == BOARD_E1257) ||
+ (board_info.board_id == BOARD_PM305) ||
+ (board_info.board_id == BOARD_PM311)) {
+ set_unused_pin_gpio(&vddio_gmi_pins_pm269[0],
+ ARRAY_SIZE(vddio_gmi_pins_pm269));
+
+ if (display_board_info.board_id != BOARD_DISPLAY_PM313) {
+ set_unused_pin_gpio(&vddio_gmi_pins_pm269_wo_pm313[0],
+ ARRAY_SIZE(vddio_gmi_pins_pm269_wo_pm313));
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-cardhu-pm298-power-rails.c b/arch/arm/mach-tegra/board-cardhu-pm298-power-rails.c
new file mode 100644
index 000000000000..9839249d197b
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-pm298-power-rails.c
@@ -0,0 +1,758 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu-pm298-power-rails.c
+ *
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/regulator/gpio-switch-regulator.h>
+#include <linux/mfd/max77663-core.h>
+#include <linux/regulator/max77663-regulator.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/edp.h>
+
+#include "gpio-names.h"
+#include "board.h"
+#include "board-cardhu.h"
+#include "pm.h"
+#include "wakeups-t3.h"
+#include "mach/tsensor.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_INTR_LOW BIT(17)
+
+static struct regulator_consumer_supply max77663_sd0_supply[] = {
+ REGULATOR_SUPPLY("vdd_cpu_pmu", NULL),
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+ REGULATOR_SUPPLY("vdd_sys", NULL),
+};
+
+static struct regulator_consumer_supply max77663_sd1_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+ REGULATOR_SUPPLY("en_vddio_ddr_1v2", NULL),
+};
+
+static struct regulator_consumer_supply max77663_sd2_supply[] = {
+ REGULATOR_SUPPLY("avdd_hdmi_pll", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+ REGULATOR_SUPPLY("avdd_osc", NULL),
+ REGULATOR_SUPPLY("vdd1v8_satelite", NULL),
+ REGULATOR_SUPPLY("vddio_uart", NULL),
+ REGULATOR_SUPPLY("pwrdet_uart", NULL),
+ REGULATOR_SUPPLY("vddio_audio", NULL),
+ REGULATOR_SUPPLY("pwrdet_audio", NULL),
+ REGULATOR_SUPPLY("vddio_bb", NULL),
+ REGULATOR_SUPPLY("pwrdet_bb", NULL),
+ REGULATOR_SUPPLY("vddio_lcd_pmu", NULL),
+ REGULATOR_SUPPLY("pwrdet_lcd", NULL),
+ REGULATOR_SUPPLY("vddio_cam", NULL),
+ REGULATOR_SUPPLY("pwrdet_cam", NULL),
+ REGULATOR_SUPPLY("vddio_vi", NULL),
+ REGULATOR_SUPPLY("pwrdet_vi", NULL),
+ REGULATOR_SUPPLY("ldo6", NULL),
+ REGULATOR_SUPPLY("ldo7", NULL),
+ REGULATOR_SUPPLY("ldo8", NULL),
+ REGULATOR_SUPPLY("vcore_audio", NULL),
+ REGULATOR_SUPPLY("avcore_audio", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.2"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc3", NULL),
+ REGULATOR_SUPPLY("vcore1_lpddr2", NULL),
+ REGULATOR_SUPPLY("vcom_1v8", NULL),
+ REGULATOR_SUPPLY("pmuio_1v8", NULL),
+ REGULATOR_SUPPLY("avdd_ic_usb", NULL),
+ REGULATOR_SUPPLY("vdd_gen1v8", NULL),
+};
+
+static struct regulator_consumer_supply max77663_sd3_supply[] = {
+ REGULATOR_SUPPLY("vdd_gen1v5", NULL),
+ REGULATOR_SUPPLY("vcore_lcd", NULL),
+ REGULATOR_SUPPLY("track_ldo1", NULL),
+ REGULATOR_SUPPLY("external_ldo_1v2", NULL),
+ REGULATOR_SUPPLY("vcore_cam1", NULL),
+ REGULATOR_SUPPLY("vcore_cam2", NULL),
+ REGULATOR_SUPPLY("avdd_pexb", NULL),
+ REGULATOR_SUPPLY("vdd_pexb", NULL),
+ REGULATOR_SUPPLY("avdd_pex_pll", NULL),
+ REGULATOR_SUPPLY("avdd_pexa", NULL),
+ REGULATOR_SUPPLY("vdd_pexa", NULL),
+ REGULATOR_SUPPLY("vcom_1v2", NULL),
+ REGULATOR_SUPPLY("vdio_hsic", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo0_supply[] = {
+ REGULATOR_SUPPLY("vdd_ddr_hs", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo1_supply[] = {
+ REGULATOR_SUPPLY("avdd_plla_p_c_s", NULL),
+ REGULATOR_SUPPLY("avdd_pllm", NULL),
+ REGULATOR_SUPPLY("avdd_pllu_d", NULL),
+ REGULATOR_SUPPLY("avdd_pllu_d2", NULL),
+ REGULATOR_SUPPLY("avdd_pllx", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo2_supply[] = {
+ REGULATOR_SUPPLY("avdd_dsi_csi", NULL),
+ REGULATOR_SUPPLY("pwrdet_mipi", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo3_supply[] = {
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.3"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc4", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo4_supply[] = {
+ REGULATOR_SUPPLY("vdd_rtc", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo5_supply[] = {
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.0"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc1", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo6_supply[] = {
+ REGULATOR_SUPPLY("vddio_sys", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo7_supply[] = {
+ REGULATOR_SUPPLY("unused_ldo7", NULL),
+};
+
+static struct regulator_consumer_supply max77663_ldo8_supply[] = {
+ REGULATOR_SUPPLY("vcore_mmc", NULL),
+};
+
+static struct max77663_regulator_fps_cfg max77663_fps_cfgs[] = {
+ {
+ .src = FPS_SRC_0,
+ .en_src = FPS_EN_SRC_EN0,
+ .time_period = FPS_TIME_PERIOD_DEF,
+ },
+ {
+ .src = FPS_SRC_1,
+ .en_src = FPS_EN_SRC_EN1,
+ .time_period = FPS_TIME_PERIOD_DEF,
+ },
+ {
+ .src = FPS_SRC_2,
+ .en_src = FPS_EN_SRC_EN0,
+ .time_period = FPS_TIME_PERIOD_DEF,
+ },
+};
+
+#define MAX77663_PDATA_INIT(_id, _min_uV, _max_uV, _supply_reg, \
+ _always_on, _boot_on, _apply_uV, \
+ _init_apply, _init_enable, _init_uV, \
+ _fps_src, _fps_pu_period, _fps_pd_period, _flags) \
+ static struct max77663_regulator_platform_data max77663_regulator_pdata_##_id = \
+ { \
+ .init_data = { \
+ .constraints = { \
+ .min_uV = _min_uV, \
+ .max_uV = _max_uV, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _always_on, \
+ .boot_on = _boot_on, \
+ .apply_uV = _apply_uV, \
+ }, \
+ .num_consumer_supplies = \
+ ARRAY_SIZE(max77663_##_id##_supply), \
+ .consumer_supplies = max77663_##_id##_supply, \
+ .supply_regulator = _supply_reg, \
+ }, \
+ .init_apply = _init_apply, \
+ .init_enable = _init_enable, \
+ .init_uV = _init_uV, \
+ .fps_src = _fps_src, \
+ .fps_pu_period = _fps_pu_period, \
+ .fps_pd_period = _fps_pd_period, \
+ .fps_cfgs = max77663_fps_cfgs, \
+ .flags = _flags, \
+ }
+
+MAX77663_PDATA_INIT(sd0, 600000, 3387500, NULL, 1, 0, 0,
+ 0, 0, -1, FPS_SRC_NONE, -1, -1, EN2_CTRL_SD0 | SD_FSRADE_DISABLE);
+
+MAX77663_PDATA_INIT(sd1, 800000, 1587500, NULL, 1, 0, 0,
+ 1, 1, -1, FPS_SRC_1, -1, -1, SD_FSRADE_DISABLE);
+
+MAX77663_PDATA_INIT(sd2, 600000, 3387500, NULL, 1, 0, 0,
+ 1, 1, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(sd3, 600000, 3387500, NULL, 0, 0, 0,
+ 1, 1, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo0, 800000, 2350000, max77663_rails(sd2), 0, 0, 0,
+ 1, 1, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo1, 800000, 2350000, max77663_rails(sd2), 0, 0, 0,
+ 1, 1, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo2, 800000, 3950000, max77663_rails(sd2), 0, 0, 0,
+ 0, 0, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo3, 800000, 3950000, NULL, 0, 0, 0,
+ 1, 1, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo4, 800000, 1587500, NULL, 0, 0, 0,
+ 1, 1, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo5, 800000, 3950000, NULL, 0, 0, 0,
+ 0, 0, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo6, 800000, 3950000, NULL, 1, 0, 0,
+ 1, 1, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo7, 800000, 3950000, NULL, 0, 0, 0,
+ 0, 0, -1, FPS_SRC_NONE, -1, -1, 0);
+
+MAX77663_PDATA_INIT(ldo8, 800000, 3950000, NULL, 0, 0, 0,
+ 1, 1, -1, FPS_SRC_NONE, -1, -1, 0);
+
+#define MAX77663_REG(_id, _data) \
+ { \
+ .name = "max77663-regulator", \
+ .id = MAX77663_REGULATOR_ID_##_id, \
+ .platform_data = &max77663_regulator_pdata_##_data, \
+ .pdata_size = sizeof(max77663_regulator_pdata_##_data), \
+ }
+
+#define MAX77663_RTC() \
+ { \
+ .name = "max77663-rtc", \
+ .id = 0, \
+ }
+
+static struct mfd_cell max77663_subdevs[] = {
+ MAX77663_REG(SD0, sd0),
+ MAX77663_REG(SD1, sd1),
+ MAX77663_REG(SD2, sd2),
+ MAX77663_REG(SD3, sd3),
+ MAX77663_REG(LDO0, ldo0),
+ MAX77663_REG(LDO1, ldo1),
+ MAX77663_REG(LDO2, ldo2),
+ MAX77663_REG(LDO3, ldo3),
+ MAX77663_REG(LDO4, ldo4),
+ MAX77663_REG(LDO5, ldo5),
+ MAX77663_REG(LDO6, ldo6),
+ MAX77663_REG(LDO7, ldo7),
+ MAX77663_REG(LDO8, ldo8),
+ MAX77663_RTC(),
+};
+
+struct max77663_gpio_config max77663_gpio_cfgs[] = {
+ {
+ .gpio = MAX77663_GPIO0,
+ .dir = GPIO_DIR_OUT,
+ .dout = GPIO_DOUT_LOW,
+ .out_drv = GPIO_OUT_DRV_PUSH_PULL,
+ .alternate = GPIO_ALT_DISABLE,
+ },
+ {
+ .gpio = MAX77663_GPIO1,
+ .dir = GPIO_DIR_OUT,
+ .dout = GPIO_DOUT_HIGH,
+ .out_drv = GPIO_OUT_DRV_OPEN_DRAIN,
+ .alternate = GPIO_ALT_DISABLE,
+ },
+ {
+ .gpio = MAX77663_GPIO2,
+ .dir = GPIO_DIR_OUT,
+ .dout = GPIO_DOUT_HIGH,
+ .out_drv = GPIO_OUT_DRV_OPEN_DRAIN,
+ .alternate = GPIO_ALT_DISABLE,
+ },
+ {
+ .gpio = MAX77663_GPIO3,
+ .dir = GPIO_DIR_OUT,
+ .dout = GPIO_DOUT_HIGH,
+ .out_drv = GPIO_OUT_DRV_OPEN_DRAIN,
+ .alternate = GPIO_ALT_DISABLE,
+ },
+ {
+ .gpio = MAX77663_GPIO4,
+ .out_drv = GPIO_OUT_DRV_PUSH_PULL,
+ .alternate = GPIO_ALT_ENABLE,
+ },
+ {
+ .gpio = MAX77663_GPIO5,
+ .dir = GPIO_DIR_OUT,
+ .dout = GPIO_DOUT_LOW,
+ .out_drv = GPIO_OUT_DRV_PUSH_PULL,
+ .alternate = GPIO_ALT_DISABLE,
+ },
+ {
+ .gpio = MAX77663_GPIO6,
+ .dir = GPIO_DIR_OUT,
+ .dout = GPIO_DOUT_LOW,
+ .out_drv = GPIO_OUT_DRV_PUSH_PULL,
+ .alternate = GPIO_ALT_DISABLE,
+ },
+ {
+ .gpio = MAX77663_GPIO7,
+ .dir = GPIO_DIR_OUT,
+ .dout = GPIO_DOUT_LOW,
+ .out_drv = GPIO_OUT_DRV_PUSH_PULL,
+ .alternate = GPIO_ALT_DISABLE,
+ },
+};
+
+static struct max77663_platform_data max7763_pdata = {
+ .irq_base = MAX77663_IRQ_BASE,
+ .gpio_base = MAX77663_GPIO_BASE,
+
+ .num_gpio_cfgs = ARRAY_SIZE(max77663_gpio_cfgs),
+ .gpio_cfgs = max77663_gpio_cfgs,
+
+ .num_subdevs = ARRAY_SIZE(max77663_subdevs),
+ .sub_devices = max77663_subdevs,
+};
+
+static struct i2c_board_info __initdata max77663_regulators[] = {
+ {
+ /* The I2C address was determined by OTP factory setting */
+ I2C_BOARD_INFO("max77663", 0x1C),
+ .irq = INT_EXTERNAL_PMU,
+ .platform_data = &max7763_pdata,
+ },
+};
+
+int __init cardhu_pm298_regulator_init(void)
+{
+ struct board_info board_info;
+ struct board_info pmu_board_info;
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ u32 pmc_ctrl;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+
+ /* The regulator details have complete constraints */
+ tegra_get_board_info(&board_info);
+ tegra_get_pmu_board_info(&pmu_board_info);
+ if (pmu_board_info.board_id != BOARD_PMU_PM298) {
+ pr_err("%s(): Board ID is not proper\n", __func__);
+ return -ENODEV;
+ }
+
+ i2c_register_board_info(4, max77663_regulators,
+ ARRAY_SIZE(max77663_regulators));
+
+ return 0;
+}
+
+static struct regulator_consumer_supply gpio_switch_en_track_ldo2_supply[] = {
+ REGULATOR_SUPPLY("avdd_sata", NULL),
+ REGULATOR_SUPPLY("vdd_sata", NULL),
+ REGULATOR_SUPPLY("avdd_sata_pll", NULL),
+ REGULATOR_SUPPLY("avdd_plle", NULL),
+};
+static int gpio_switch_en_track_ldo2_voltages[] = { 3300};
+
+static struct regulator_consumer_supply gpio_switch_en_5v0_supply[] = {
+ REGULATOR_SUPPLY("vdd_5v0_sys", NULL),
+ REGULATOR_SUPPLY("vdd_5v0_sby", NULL),
+ REGULATOR_SUPPLY("vdd_hall", NULL),
+ REGULATOR_SUPPLY("vterm_ddr", NULL),
+ REGULATOR_SUPPLY("v2ref_ddr", NULL),
+};
+static int gpio_switch_en_5v0_voltages[] = { 5000};
+
+static struct regulator_consumer_supply gpio_switch_en_ddr_supply[] = {
+ REGULATOR_SUPPLY("mem_vddio_ddr", NULL),
+ REGULATOR_SUPPLY("t30_vddio_ddr", NULL),
+};
+static int gpio_switch_en_ddr_voltages[] = { 1500};
+
+static struct regulator_consumer_supply gpio_switch_en_3v3_sys_supply[] = {
+ REGULATOR_SUPPLY("avdd_vdac", NULL),
+ REGULATOR_SUPPLY("vdd_lvds", NULL),
+ REGULATOR_SUPPLY("vdd_pnl", NULL),
+ REGULATOR_SUPPLY("vcom_3v3", NULL),
+ REGULATOR_SUPPLY("vdd_3v3", NULL),
+ REGULATOR_SUPPLY("vddio_pex_ctl", NULL),
+ REGULATOR_SUPPLY("pwrdet_pex_ctl", NULL),
+ REGULATOR_SUPPLY("hvdd_pex_pmu", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi", NULL),
+ REGULATOR_SUPPLY("vpp_fuse", NULL),
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("vdd_ddr_rx", NULL),
+ REGULATOR_SUPPLY("vcore_nand", NULL),
+ REGULATOR_SUPPLY("hvdd_sata", NULL),
+ REGULATOR_SUPPLY("vddio_gmi_pmu", NULL),
+ REGULATOR_SUPPLY("pwrdet_nand", NULL),
+ REGULATOR_SUPPLY("avdd_cam1", NULL),
+ REGULATOR_SUPPLY("vdd_af", NULL),
+ REGULATOR_SUPPLY("avdd_cam2", NULL),
+ REGULATOR_SUPPLY("vdd_acc", NULL),
+ REGULATOR_SUPPLY("vdd_phtl", NULL),
+ REGULATOR_SUPPLY("vddio_tp", NULL),
+ REGULATOR_SUPPLY("vdd_led", NULL),
+ REGULATOR_SUPPLY("vddio_cec", NULL),
+ REGULATOR_SUPPLY("vdd_cmps", NULL),
+ REGULATOR_SUPPLY("vdd_temp", NULL),
+ REGULATOR_SUPPLY("vpp_kfuse", NULL),
+ REGULATOR_SUPPLY("vddio_ts", NULL),
+ REGULATOR_SUPPLY("vdd_ir_led", NULL),
+ REGULATOR_SUPPLY("vddio_1wire", NULL),
+ REGULATOR_SUPPLY("avddio_audio", NULL),
+ REGULATOR_SUPPLY("vdd_ec", NULL),
+ REGULATOR_SUPPLY("vcom_pa", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_devices", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_dock", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_edid", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_hdmi_cec", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_gmi", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_spk_amp", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_sensor", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_cam", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_als", NULL),
+ REGULATOR_SUPPLY("debug_cons", NULL),
+ REGULATOR_SUPPLY("vdd", "4-004c"),
+};
+static int gpio_switch_en_3v3_sys_voltages[] = { 3300};
+
+/* DIS_5V_SWITCH from AP SPI2_SCK X02 */
+static struct regulator_consumer_supply gpio_switch_dis_5v_switch_supply[] = {
+ REGULATOR_SUPPLY("master_5v_switch", NULL),
+};
+static int gpio_switch_dis_5v_switch_voltages[] = { 5000};
+
+/* EN_VDD_BL */
+static struct regulator_consumer_supply gpio_switch_en_vdd_bl_supply[] = {
+ REGULATOR_SUPPLY("vdd_backlight", NULL),
+ REGULATOR_SUPPLY("vdd_backlight1", NULL),
+};
+static int gpio_switch_en_vdd_bl_voltages[] = { 5000};
+
+/* EN_3V3_MODEM from AP GPIO VI_VSYNCH D06*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_modem_supply[] = {
+ REGULATOR_SUPPLY("vdd_3v3_mini_card", NULL),
+ REGULATOR_SUPPLY("vdd_mini_card", NULL),
+};
+static int gpio_switch_en_3v3_modem_voltages[] = { 3300};
+
+/* EN_USB1_VBUS_OC*/
+static struct regulator_consumer_supply gpio_switch_en_usb1_vbus_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbus_micro_usb", NULL),
+};
+static int gpio_switch_en_usb1_vbus_oc_voltages[] = { 5000};
+
+/*EN_USB3_VBUS_OC*/
+static struct regulator_consumer_supply gpio_switch_en_usb3_vbus_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbus_typea_usb", NULL),
+};
+static int gpio_switch_en_usb3_vbus_oc_voltages[] = { 5000};
+
+/* EN_VDDIO_VID_OC from AP GPIO VI_PCLK T00*/
+static struct regulator_consumer_supply gpio_switch_en_vddio_vid_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_hdmi_con", NULL),
+};
+static int gpio_switch_en_vddio_vid_oc_voltages[] = { 5000};
+
+/* EN_VDD_PNL1 from AP GPIO VI_D6 L04*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_pnl1_supply[] = {
+ REGULATOR_SUPPLY("vdd_lcd_panel", NULL),
+};
+static int gpio_switch_en_vdd_pnl1_voltages[] = { 3300};
+
+/* CAM1_LDO_EN from AP GPIO KB_ROW6 R06*/
+static struct regulator_consumer_supply gpio_switch_cam1_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_2v8_cam1", NULL),
+ REGULATOR_SUPPLY("vdd_2v8_cam1_af", NULL),
+};
+static int gpio_switch_cam1_ldo_en_voltages[] = { 2800};
+
+/* CAM2_LDO_EN from AP GPIO KB_ROW7 R07*/
+static struct regulator_consumer_supply gpio_switch_cam2_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_2v8_cam2", NULL),
+ REGULATOR_SUPPLY("vdd_2v8_cam2_af", NULL),
+};
+static int gpio_switch_cam2_ldo_en_voltages[] = { 2800};
+
+/* CAM3_LDO_EN from AP GPIO KB_ROW8 S00*/
+static struct regulator_consumer_supply gpio_switch_cam3_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_cam3", NULL),
+};
+static int gpio_switch_cam3_ldo_en_voltages[] = { 3300};
+
+/* EN_VDD_COM from AP GPIO SDMMC3_DAT5 D00*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_com_supply[] = {
+ REGULATOR_SUPPLY("vdd_com_bd", NULL),
+};
+static int gpio_switch_en_vdd_com_voltages[] = { 3300};
+
+/* EN_VDD_SDMMC1 from AP GPIO VI_HSYNC D07*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_sdmmc1_supply[] = {
+ REGULATOR_SUPPLY("vddio_sd_slot", "sdhci-tegra.0"),
+};
+static int gpio_switch_en_vdd_sdmmc1_voltages[] = { 3300};
+
+/* EN_3V3_EMMC from AP GPIO SDMMC3_DAT4 D01*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_emmc_supply[] = {
+ REGULATOR_SUPPLY("vdd_emmc_core", NULL),
+};
+static int gpio_switch_en_3v3_emmc_voltages[] = { 3300};
+
+/* EN_3V3_PEX_HVDD from AP GPIO VI_D09 L07*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_pex_hvdd_supply[] = {
+ REGULATOR_SUPPLY("hvdd_pex", NULL),
+};
+static int gpio_switch_en_3v3_pex_hvdd_voltages[] = { 3300};
+
+/* EN_3v3_FUSE from AP GPIO VI_D08 L06*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_fuse_supply[] = {
+ REGULATOR_SUPPLY("vdd_fuse", NULL),
+};
+static int gpio_switch_en_3v3_fuse_voltages[] = { 3300};
+
+/* EN_1V8_CAM from AP GPIO GPIO_PBB4 PBB04*/
+static struct regulator_consumer_supply gpio_switch_en_1v8_cam_supply[] = {
+ REGULATOR_SUPPLY("vdd_1v8_cam1", NULL),
+ REGULATOR_SUPPLY("vdd_1v8_cam2", NULL),
+ REGULATOR_SUPPLY("vdd_1v8_cam3", NULL),
+};
+static int gpio_switch_en_1v8_cam_voltages[] = { 1800};
+
+static struct regulator_consumer_supply gpio_switch_en_vbrtr_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbrtr", NULL),
+};
+static int gpio_switch_en_vbrtr_voltages[] = { 3300};
+
+static int enable_load_switch_rail(
+ struct gpio_switch_regulator_subdev_data *psubdev_data)
+{
+ int ret;
+
+ if (psubdev_data->pin_group <= 0)
+ return -EINVAL;
+
+ /* Tristate and make pin as input*/
+ ret = tegra_pinmux_set_tristate(psubdev_data->pin_group,
+ TEGRA_TRI_TRISTATE);
+ if (ret < 0)
+ return ret;
+ return gpio_direction_input(psubdev_data->gpio_nr);
+}
+
+static int disable_load_switch_rail(
+ struct gpio_switch_regulator_subdev_data *psubdev_data)
+{
+ int ret;
+
+ if (psubdev_data->pin_group <= 0)
+ return -EINVAL;
+
+ /* Un-tristate and driver low */
+ ret = tegra_pinmux_set_tristate(psubdev_data->pin_group,
+ TEGRA_TRI_NORMAL);
+ if (ret < 0)
+ return ret;
+ return gpio_direction_output(psubdev_data->gpio_nr, 0);
+}
+
+
+/* Macro for defining gpio switch regulator sub device data */
+#define GREG_INIT(_id, _var, _name, _input_supply, _always_on, _boot_on, \
+ _gpio_nr, _active_low, _init_state, _pg, _enable, _disable) \
+ static struct gpio_switch_regulator_subdev_data gpio_pdata_##_var = \
+ { \
+ .regulator_name = "gpio-switch-"#_name, \
+ .input_supply = _input_supply, \
+ .id = _id, \
+ .gpio_nr = _gpio_nr, \
+ .pin_group = _pg, \
+ .active_low = _active_low, \
+ .init_state = _init_state, \
+ .voltages = gpio_switch_##_name##_voltages, \
+ .n_voltages = ARRAY_SIZE(gpio_switch_##_name##_voltages), \
+ .num_consumer_supplies = \
+ ARRAY_SIZE(gpio_switch_##_name##_supply), \
+ .consumer_supplies = gpio_switch_##_name##_supply, \
+ .constraints = { \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _always_on, \
+ .boot_on = _boot_on, \
+ }, \
+ .enable_rail = _enable, \
+ .disable_rail = _disable, \
+ }
+
+/* common to most of boards*/
+GREG_INIT(0, en_track_ldo2, en_track_ldo2, NULL, 0, 0, MAX77663_GPIO_BASE + MAX77663_GPIO0, false, 0, 0, 0, 0);
+GREG_INIT(1, en_5v0, en_5v0, NULL, 1, 0, MAX77663_GPIO_BASE + MAX77663_GPIO2, false, 1, 0, 0, 0);
+GREG_INIT(2, en_ddr, en_ddr, NULL, 1, 0, MAX77663_GPIO_BASE + MAX77663_GPIO3, false, 1, 0, 0, 0);
+GREG_INIT(3, en_3v3_sys, en_3v3_sys, NULL, 1, 0, MAX77663_GPIO_BASE + MAX77663_GPIO1, false, 1, 0, 0, 0);
+GREG_INIT(4, en_vdd_bl, en_vdd_bl, NULL, 0, 0, TEGRA_GPIO_PK3, false, 1, 0, 0, 0);
+GREG_INIT(5, en_3v3_modem, en_3v3_modem, NULL, 1, 0, TEGRA_GPIO_PD6, false, 1, 0, 0, 0);
+GREG_INIT(6, en_vdd_pnl1, en_vdd_pnl1, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PL4, false, 1, 0, 0, 0);
+GREG_INIT(7, cam3_ldo_en, cam3_ldo_en, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PS0, false, 0, 0, 0, 0);
+GREG_INIT(8, en_vdd_com, en_vdd_com, "vdd_3v3_devices", 1, 0, TEGRA_GPIO_PD0, false, 1, 0, 0, 0);
+GREG_INIT(9, en_3v3_fuse, en_3v3_fuse, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PL6, false, 0, 0, 0, 0);
+GREG_INIT(10, en_3v3_emmc, en_3v3_emmc, "vdd_3v3_devices", 1, 0, TEGRA_GPIO_PD1, false, 1, 0, 0, 0);
+GREG_INIT(11, en_vdd_sdmmc1, en_vdd_sdmmc1, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PD7, false, 1, 0, 0, 0);
+GREG_INIT(12, en_3v3_pex_hvdd, en_3v3_pex_hvdd, "hvdd_pex_pmu", 0, 0, TEGRA_GPIO_PL7, false, 0, 0, 0, 0);
+GREG_INIT(13, en_1v8_cam, en_1v8_cam, "vdd_gen1v8", 0, 0, TEGRA_GPIO_PBB4, false, 0, 0, 0, 0);
+
+/*Specific to pm269*/
+GREG_INIT(4, en_vdd_bl_pm269, en_vdd_bl, NULL,
+ 0, 0, TEGRA_GPIO_PH3, false, 1, 0, 0, 0);
+GREG_INIT(6, en_vdd_pnl1_pm269, en_vdd_pnl1, "vdd_3v3_devices",
+ 0, 0, TEGRA_GPIO_PW1, false, 1, 0, 0, 0);
+GREG_INIT(9, en_3v3_fuse_pm269, en_3v3_fuse, "vdd_3v3_devices",
+ 0, 0, TEGRA_GPIO_PC1, false, 0, 0, 0, 0);
+GREG_INIT(12, en_3v3_pex_hvdd_pm269, en_3v3_pex_hvdd, "hvdd_pex_pmu",
+ 0, 0, TEGRA_GPIO_PC6, false, 0, 0, 0, 0);
+GREG_INIT(17, en_vddio_vid_oc_pm269, en_vddio_vid_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PP2, false, 0, TEGRA_PINGROUP_DAP3_DOUT,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* Specific to E1187/E1186/E1256 */
+GREG_INIT(14, dis_5v_switch_e118x, dis_5v_switch, "vdd_5v0_sys",
+ 0, 0, TEGRA_GPIO_PX2, true, 0, 0, 0, 0);
+GREG_INIT(15, en_usb1_vbus_oc_e118x, en_usb1_vbus_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PI4, false, 0, TEGRA_PINGROUP_GMI_RST_N,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(16, en_usb3_vbus_oc_e118x, en_usb3_vbus_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PH7, false, 0, TEGRA_PINGROUP_GMI_AD15,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(17, en_vddio_vid_oc_e118x, en_vddio_vid_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PT0, false, 0, TEGRA_PINGROUP_VI_PCLK,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* E1198/E1291 specific*/
+GREG_INIT(18, cam1_ldo_en, cam1_ldo_en, "vdd_3v3_cam", 0, 0, TEGRA_GPIO_PR6, false, 0, 0, 0, 0);
+GREG_INIT(19, cam2_ldo_en, cam2_ldo_en, "vdd_3v3_cam", 0, 0, TEGRA_GPIO_PR7, false, 0, 0, 0, 0);
+
+GREG_INIT(22, en_vbrtr, en_vbrtr, "vdd_3v3_devices", 0, 0, PMU_TCA6416_GPIO_PORT12, false, 0, 0, 0, 0);
+
+#define ADD_GPIO_REG(_name) &gpio_pdata_##_name
+
+#define COMMON_GPIO_REG \
+ ADD_GPIO_REG(en_track_ldo2), \
+ ADD_GPIO_REG(en_5v0), \
+ ADD_GPIO_REG(en_ddr), \
+ ADD_GPIO_REG(en_3v3_sys), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(en_vdd_pnl1), \
+ ADD_GPIO_REG(cam1_ldo_en), \
+ ADD_GPIO_REG(cam2_ldo_en), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_vdd_sdmmc1), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd), \
+ ADD_GPIO_REG(en_1v8_cam),
+
+#define PM269_GPIO_REG \
+ ADD_GPIO_REG(en_track_ldo2), \
+ ADD_GPIO_REG(en_5v0), \
+ ADD_GPIO_REG(en_ddr), \
+ ADD_GPIO_REG(en_vdd_bl_pm269), \
+ ADD_GPIO_REG(en_3v3_sys), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(en_vdd_pnl1_pm269), \
+ ADD_GPIO_REG(cam1_ldo_en), \
+ ADD_GPIO_REG(cam2_ldo_en), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse_pm269), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd_pm269), \
+ ADD_GPIO_REG(en_1v8_cam), \
+ ADD_GPIO_REG(dis_5v_switch_e118x), \
+ ADD_GPIO_REG(en_usb1_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_usb3_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_vddio_vid_oc_pm269),
+
+#define E118x_GPIO_REG \
+ ADD_GPIO_REG(en_vdd_bl), \
+ ADD_GPIO_REG(dis_5v_switch_e118x), \
+ ADD_GPIO_REG(en_usb1_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_usb3_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_vddio_vid_oc_e118x), \
+ ADD_GPIO_REG(en_vbrtr),
+
+/* Gpio switch regulator platform data for E1186/E1187/E1256*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_e118x[] = {
+ COMMON_GPIO_REG
+ E118x_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for PM269*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_pm269[] = {
+ PM269_GPIO_REG
+};
+
+static struct gpio_switch_regulator_platform_data gswitch_pdata;
+static struct platform_device gswitch_regulator_pdata = {
+ .name = "gpio-switch-regulator",
+ .id = -1,
+ .dev = {
+ .platform_data = &gswitch_pdata,
+ },
+};
+
+int __init cardhu_pm298_gpio_switch_regulator_init(void)
+{
+ int i;
+ struct board_info board_info;
+ tegra_get_board_info(&board_info);
+
+ switch (board_info.board_id) {
+ case BOARD_PM269:
+ case BOARD_PM305:
+ case BOARD_PM311:
+ case BOARD_E1257:
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_pm269);
+ gswitch_pdata.subdevs = gswitch_subdevs_pm269;
+ break;
+
+ default:
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_e118x);
+ gswitch_pdata.subdevs = gswitch_subdevs_e118x;
+ break;
+ }
+
+ for (i = 0; i < gswitch_pdata.num_subdevs; ++i) {
+ struct gpio_switch_regulator_subdev_data *gswitch_data =
+ gswitch_pdata.subdevs[i];
+ if (gswitch_data->gpio_nr <= TEGRA_NR_GPIOS)
+ tegra_gpio_enable(gswitch_data->gpio_nr);
+ }
+
+ return platform_device_register(&gswitch_regulator_pdata);
+}
diff --git a/arch/arm/mach-tegra/board-cardhu-pm299-power-rails.c b/arch/arm/mach-tegra/board-cardhu-pm299-power-rails.c
new file mode 100644
index 000000000000..866c3c0ff506
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-pm299-power-rails.c
@@ -0,0 +1,748 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu-pm299-power-rails.c
+ *
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/ricoh583.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/regulator/gpio-switch-regulator.h>
+#include <linux/regulator/ricoh583-regulator.h>
+#include <linux/regulator/tps6236x-regulator.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/edp.h>
+
+#include "gpio-names.h"
+#include "board.h"
+#include "board-cardhu.h"
+#include "wakeups-t3.h"
+#include "mach/tsensor.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_INTR_LOW (1 << 17)
+
+static struct regulator_consumer_supply ricoh583_dc1_supply_skubit0_0[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+ REGULATOR_SUPPLY("en_vddio_ddr_1v2", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_dc1_supply_skubit0_1[] = {
+ REGULATOR_SUPPLY("en_vddio_ddr_1v2", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_dc3_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_gen1v5", NULL),
+ REGULATOR_SUPPLY("vcore_lcd", NULL),
+ REGULATOR_SUPPLY("track_ldo1", NULL),
+ REGULATOR_SUPPLY("external_ldo_1v2", NULL),
+ REGULATOR_SUPPLY("vcore_cam1", NULL),
+ REGULATOR_SUPPLY("vcore_cam2", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_dc0_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_cpu_pmu", NULL),
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+ REGULATOR_SUPPLY("vdd_sys", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_dc2_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_gen1v8", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi_pll", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+ REGULATOR_SUPPLY("avdd_osc", NULL),
+ REGULATOR_SUPPLY("vddio_sys", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.3"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc4", NULL),
+ REGULATOR_SUPPLY("vdd1v8_satelite", NULL),
+ REGULATOR_SUPPLY("vddio_uart", NULL),
+ REGULATOR_SUPPLY("pwrdet_uart", NULL),
+ REGULATOR_SUPPLY("vddio_audio", NULL),
+ REGULATOR_SUPPLY("pwrdet_audio", NULL),
+ REGULATOR_SUPPLY("vddio_bb", NULL),
+ REGULATOR_SUPPLY("pwrdet_bb", NULL),
+ REGULATOR_SUPPLY("vddio_lcd_pmu", NULL),
+ REGULATOR_SUPPLY("pwrdet_lcd", NULL),
+ REGULATOR_SUPPLY("vddio_cam", NULL),
+ REGULATOR_SUPPLY("pwrdet_cam", NULL),
+ REGULATOR_SUPPLY("vddio_vi", NULL),
+ REGULATOR_SUPPLY("pwrdet_vi", NULL),
+ REGULATOR_SUPPLY("ldo6", NULL),
+ REGULATOR_SUPPLY("ldo7", NULL),
+ REGULATOR_SUPPLY("ldo8", NULL),
+ REGULATOR_SUPPLY("vcore_audio", NULL),
+ REGULATOR_SUPPLY("avcore_audio", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.2"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc3", NULL),
+ REGULATOR_SUPPLY("vcore1_lpddr2", NULL),
+ REGULATOR_SUPPLY("vcom_1v8", NULL),
+ REGULATOR_SUPPLY("pmuio_1v8", NULL),
+ REGULATOR_SUPPLY("avdd_ic_usb", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_ldo0_supply_0[] = {
+ REGULATOR_SUPPLY("unused_ldo0", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_ldo1_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_pexb", NULL),
+ REGULATOR_SUPPLY("vdd_pexb", NULL),
+ REGULATOR_SUPPLY("avdd_pex_pll", NULL),
+ REGULATOR_SUPPLY("avdd_pexa", NULL),
+ REGULATOR_SUPPLY("vdd_pexa", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_ldo2_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_sata", NULL),
+ REGULATOR_SUPPLY("vdd_sata", NULL),
+ REGULATOR_SUPPLY("avdd_sata_pll", NULL),
+ REGULATOR_SUPPLY("avdd_plle", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_ldo3_supply_0[] = {
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.0"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc1", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_ldo4_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_rtc", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_ldo5_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_vdac", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_ldo6_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_dsi_csi", NULL),
+ REGULATOR_SUPPLY("pwrdet_mipi", NULL),
+};
+static struct regulator_consumer_supply ricoh583_ldo7_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_plla_p_c_s", NULL),
+ REGULATOR_SUPPLY("avdd_pllm", NULL),
+ REGULATOR_SUPPLY("avdd_pllu_d", NULL),
+ REGULATOR_SUPPLY("avdd_pllu_d2", NULL),
+ REGULATOR_SUPPLY("avdd_pllx", NULL),
+};
+
+static struct regulator_consumer_supply ricoh583_ldo8_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_ddr_hs", NULL),
+};
+
+#define RICOH_PDATA_INIT(_name, _sname, _minmv, _maxmv, _supply_reg, _always_on, \
+ _boot_on, _apply_uv, _init_uV, _init_enable, _init_apply, _flags, \
+ _ext_contol, _ds_slots) \
+ static struct ricoh583_regulator_platform_data pdata_##_name##_##_sname = \
+ { \
+ .regulator = { \
+ .constraints = { \
+ .min_uV = (_minmv)*1000, \
+ .max_uV = (_maxmv)*1000, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _always_on, \
+ .boot_on = _boot_on, \
+ .apply_uV = _apply_uv, \
+ }, \
+ .num_consumer_supplies = \
+ ARRAY_SIZE(ricoh583_##_name##_supply_##_sname), \
+ .consumer_supplies = ricoh583_##_name##_supply_##_sname, \
+ .supply_regulator = _supply_reg, \
+ }, \
+ .init_uV = _init_uV * 1000, \
+ .init_enable = _init_enable, \
+ .init_apply = _init_apply, \
+ .deepsleep_slots = _ds_slots, \
+ .flags = _flags, \
+ .ext_pwr_req = _ext_contol, \
+ }
+
+RICOH_PDATA_INIT(dc0, 0, 700, 1500, 0, 1, 1, 0, -1, 0, 0, 0,
+ RICOH583_EXT_PWRREQ2_CONTROL, 0);
+RICOH_PDATA_INIT(dc1, skubit0_0, 700, 1500, 0, 1, 1, 0, -1, 0, 0, 0, 0, 0);
+RICOH_PDATA_INIT(dc2, 0, 900, 2400, 0, 1, 1, 0, -1, 0, 0, 0, 0, 0);
+RICOH_PDATA_INIT(dc3, 0, 900, 2400, 0, 1, 1, 0, -1, 0, 0, 0, 0, 0);
+
+RICOH_PDATA_INIT(ldo0, 0, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0);
+RICOH_PDATA_INIT(ldo1, 0, 1000, 3300, ricoh583_rails(DC1), 0, 0, 0, -1, 0, 0, 0, 0, 0);
+RICOH_PDATA_INIT(ldo2, 0, 1050, 1050, ricoh583_rails(DC1), 0, 0, 1, -1, 0, 0, 0, 0, 0);
+
+RICOH_PDATA_INIT(ldo3, 0, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0);
+RICOH_PDATA_INIT(ldo4, 0, 750, 1500, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0);
+RICOH_PDATA_INIT(ldo5, 0, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0);
+
+RICOH_PDATA_INIT(ldo6, 0, 1200, 1200, ricoh583_rails(DC2), 0, 0, 1, -1, 0, 0, 0, 0, 0);
+RICOH_PDATA_INIT(ldo7, 0, 1200, 1200, ricoh583_rails(DC2), 1, 1, 1, -1, 0, 0, 0, 0, 0);
+RICOH_PDATA_INIT(ldo8, 0, 900, 3400, ricoh583_rails(DC2), 1, 0, 0, -1, 0, 0, 0, 0, 0);
+
+static struct ricoh583_rtc_platform_data rtc_data = {
+ .irq = TEGRA_NR_IRQS + RICOH583_IRQ_YALE,
+ .time = {
+ .tm_year = 2011,
+ .tm_mon = 0,
+ .tm_mday = 1,
+ .tm_hour = 0,
+ .tm_min = 0,
+ .tm_sec = 0,
+ },
+};
+
+#define RICOH_RTC_REG() \
+{ \
+ .id = 0, \
+ .name = "rtc_ricoh583", \
+ .platform_data = &rtc_data, \
+}
+
+#define RICOH_REG(_id, _name, _sname) \
+{ \
+ .id = RICOH583_ID_##_id, \
+ .name = "ricoh583-regulator", \
+ .platform_data = &pdata_##_name##_##_sname, \
+}
+
+#define RICOH583_DEV_COMMON_E118X \
+ RICOH_REG(DC0, dc0, 0), \
+ RICOH_REG(DC1, dc1, skubit0_0), \
+ RICOH_REG(DC2, dc2, 0), \
+ RICOH_REG(DC3, dc3, 0), \
+ RICOH_REG(LDO0, ldo8, 0), \
+ RICOH_REG(LDO1, ldo7, 0), \
+ RICOH_REG(LDO2, ldo6, 0), \
+ RICOH_REG(LDO3, ldo5, 0), \
+ RICOH_REG(LDO4, ldo4, 0), \
+ RICOH_REG(LDO5, ldo3, 0), \
+ RICOH_REG(LDO6, ldo0, 0), \
+ RICOH_REG(LDO7, ldo1, 0), \
+ RICOH_REG(LDO8, ldo2, 0), \
+ RICOH_RTC_REG()
+
+static struct ricoh583_subdev_info ricoh_devs_e118x_dcdc[] = {
+ RICOH583_DEV_COMMON_E118X,
+};
+
+#define RICOH_GPIO_INIT(_init_apply, _pulldn, _output_mode, _output_val) \
+ { \
+ .pulldn_en = _pulldn, \
+ .output_mode_en = _output_mode, \
+ .output_val = _output_val, \
+ .init_apply = _init_apply, \
+ }
+struct ricoh583_gpio_init_data ricoh_gpio_data[] = {
+ RICOH_GPIO_INIT(false, false, false, 0),
+ RICOH_GPIO_INIT(false, false, false, 0),
+ RICOH_GPIO_INIT(false, false, false, 0),
+ RICOH_GPIO_INIT(true, false, true, 1),
+ RICOH_GPIO_INIT(true, false, true, 1),
+ RICOH_GPIO_INIT(false, false, false, 0),
+ RICOH_GPIO_INIT(false, false, false, 0),
+ RICOH_GPIO_INIT(false, false, false, 0),
+};
+
+static struct ricoh583_platform_data ricoh_platform = {
+ .irq_base = RICOH583_IRQ_BASE,
+ .gpio_base = RICOH583_GPIO_BASE,
+ .gpio_init_data = ricoh_gpio_data,
+ .num_gpioinit_data = ARRAY_SIZE(ricoh_gpio_data),
+ .enable_shutdown_pin = true,
+};
+
+static struct i2c_board_info __initdata ricoh583_regulators[] = {
+ {
+ I2C_BOARD_INFO("ricoh583", 0x34),
+ .irq = INT_EXTERNAL_PMU,
+ .platform_data = &ricoh_platform,
+ },
+};
+
+/* TPS62361B DC-DC converter */
+static struct regulator_consumer_supply tps6236x_dcdc_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+};
+
+static struct tps6236x_regulator_platform_data tps6236x_pdata = {
+ .reg_init_data = { \
+ .constraints = { \
+ .min_uV = 500000, \
+ .max_uV = 1770000, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = 1, \
+ .boot_on = 1, \
+ .apply_uV = 0, \
+ }, \
+ .num_consumer_supplies = ARRAY_SIZE(tps6236x_dcdc_supply), \
+ .consumer_supplies = tps6236x_dcdc_supply, \
+ }, \
+ .internal_pd_enable = 0, \
+ .vsel = 3, \
+ .init_uV = -1, \
+ .init_apply = 0, \
+};
+
+static struct i2c_board_info __initdata tps6236x_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("tps62361B", 0x60),
+ .platform_data = &tps6236x_pdata,
+ },
+};
+
+int __init cardhu_pm299_regulator_init(void)
+{
+ struct board_info board_info;
+ struct board_info pmu_board_info;
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ u32 pmc_ctrl;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+
+ /* The regulator details have complete constraints */
+ tegra_get_board_info(&board_info);
+ tegra_get_pmu_board_info(&pmu_board_info);
+ if (pmu_board_info.board_id != BOARD_PMU_PM299) {
+ pr_err("%s(): Board ID is not proper\n", __func__);
+ return -ENODEV;
+ }
+
+ /* If TPS6236x DCDC is there then consumer for dc1 should
+ * not have vdd_core */
+ if ((board_info.sku & SKU_DCDC_TPS62361_SUPPORT) ||
+ (pmu_board_info.sku & SKU_DCDC_TPS62361_SUPPORT)) {
+ pdata_dc1_skubit0_0.regulator.consumer_supplies =
+ ricoh583_dc1_supply_skubit0_1;
+ pdata_dc1_skubit0_0.regulator.num_consumer_supplies =
+ ARRAY_SIZE(ricoh583_dc1_supply_skubit0_1);
+ }
+
+ ricoh_platform.num_subdevs = ARRAY_SIZE(ricoh_devs_e118x_dcdc);
+ ricoh_platform.subdevs = ricoh_devs_e118x_dcdc;
+
+ i2c_register_board_info(4, ricoh583_regulators, 1);
+
+ /* Register the TPS6236x for all boards whose sku bit 0 is set. */
+ if ((board_info.sku & SKU_DCDC_TPS62361_SUPPORT) ||
+ (pmu_board_info.sku & SKU_DCDC_TPS62361_SUPPORT)) {
+ pr_info("Registering the device TPS62361B\n");
+ i2c_register_board_info(4, tps6236x_boardinfo, 1);
+ }
+ return 0;
+}
+
+/* EN_5V_CP from PMU GP0 */
+static struct regulator_consumer_supply gpio_switch_en_5v_cp_supply[] = {
+ REGULATOR_SUPPLY("vdd_5v0_sby", NULL),
+ REGULATOR_SUPPLY("vdd_hall", NULL),
+ REGULATOR_SUPPLY("vterm_ddr", NULL),
+ REGULATOR_SUPPLY("v2ref_ddr", NULL),
+};
+static int gpio_switch_en_5v_cp_voltages[] = { 5000};
+
+/* EN_5V0 From PMU GP2 */
+static struct regulator_consumer_supply gpio_switch_en_5v0_supply[] = {
+ REGULATOR_SUPPLY("vdd_5v0_sys", NULL),
+};
+static int gpio_switch_en_5v0_voltages[] = { 5000};
+
+/* EN_DDR From PMU GP6 */
+static struct regulator_consumer_supply gpio_switch_en_ddr_supply[] = {
+ REGULATOR_SUPPLY("mem_vddio_ddr", NULL),
+ REGULATOR_SUPPLY("t30_vddio_ddr", NULL),
+};
+static int gpio_switch_en_ddr_voltages[] = { 1500};
+
+/* EN_3V3_SYS From PMU GP7 */
+static struct regulator_consumer_supply gpio_switch_en_3v3_sys_supply[] = {
+ REGULATOR_SUPPLY("vdd_lvds", NULL),
+ REGULATOR_SUPPLY("vdd_pnl", NULL),
+ REGULATOR_SUPPLY("vcom_3v3", NULL),
+ REGULATOR_SUPPLY("vdd_3v3", NULL),
+ REGULATOR_SUPPLY("vcore_mmc", NULL),
+ REGULATOR_SUPPLY("vddio_pex_ctl", NULL),
+ REGULATOR_SUPPLY("pwrdet_pex_ctl", NULL),
+ REGULATOR_SUPPLY("hvdd_pex_pmu", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi", NULL),
+ REGULATOR_SUPPLY("vpp_fuse", NULL),
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("vdd_ddr_rx", NULL),
+ REGULATOR_SUPPLY("vcore_nand", NULL),
+ REGULATOR_SUPPLY("hvdd_sata", NULL),
+ REGULATOR_SUPPLY("vddio_gmi_pmu", NULL),
+ REGULATOR_SUPPLY("pwrdet_nand", NULL),
+ REGULATOR_SUPPLY("avdd_cam1", NULL),
+ REGULATOR_SUPPLY("vdd_af", NULL),
+ REGULATOR_SUPPLY("avdd_cam2", NULL),
+ REGULATOR_SUPPLY("vdd_acc", NULL),
+ REGULATOR_SUPPLY("vdd_phtl", NULL),
+ REGULATOR_SUPPLY("vddio_tp", NULL),
+ REGULATOR_SUPPLY("vdd_led", NULL),
+ REGULATOR_SUPPLY("vddio_cec", NULL),
+ REGULATOR_SUPPLY("vdd_cmps", NULL),
+ REGULATOR_SUPPLY("vdd_temp", NULL),
+ REGULATOR_SUPPLY("vpp_kfuse", NULL),
+ REGULATOR_SUPPLY("vddio_ts", NULL),
+ REGULATOR_SUPPLY("vdd_ir_led", NULL),
+ REGULATOR_SUPPLY("vddio_1wire", NULL),
+ REGULATOR_SUPPLY("avddio_audio", NULL),
+ REGULATOR_SUPPLY("vdd_ec", NULL),
+ REGULATOR_SUPPLY("vcom_pa", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_devices", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_dock", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_edid", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_hdmi_cec", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_gmi", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_spk_amp", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_sensor", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_cam", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_als", NULL),
+ REGULATOR_SUPPLY("debug_cons", NULL),
+};
+static int gpio_switch_en_3v3_sys_voltages[] = { 3300};
+
+/* DIS_5V_SWITCH from AP SPI2_SCK X02 */
+static struct regulator_consumer_supply gpio_switch_dis_5v_switch_supply[] = {
+ REGULATOR_SUPPLY("master_5v_switch", NULL),
+};
+static int gpio_switch_dis_5v_switch_voltages[] = { 5000};
+
+/* EN_VDD_BL */
+static struct regulator_consumer_supply gpio_switch_en_vdd_bl_supply[] = {
+ REGULATOR_SUPPLY("vdd_backlight", NULL),
+ REGULATOR_SUPPLY("vdd_backlight1", NULL),
+};
+static int gpio_switch_en_vdd_bl_voltages[] = { 5000};
+
+/* EN_3V3_MODEM from AP GPIO VI_VSYNCH D06*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_modem_supply[] = {
+ REGULATOR_SUPPLY("vdd_3v3_mini_card", NULL),
+ REGULATOR_SUPPLY("vdd_mini_card", NULL),
+};
+static int gpio_switch_en_3v3_modem_voltages[] = { 3300};
+
+/* EN_USB1_VBUS_OC*/
+static struct regulator_consumer_supply gpio_switch_en_usb1_vbus_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbus_micro_usb", NULL),
+};
+static int gpio_switch_en_usb1_vbus_oc_voltages[] = { 5000};
+
+/*EN_USB3_VBUS_OC*/
+static struct regulator_consumer_supply gpio_switch_en_usb3_vbus_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbus_typea_usb", NULL),
+};
+static int gpio_switch_en_usb3_vbus_oc_voltages[] = { 5000};
+
+/* EN_VDDIO_VID_OC from AP GPIO VI_PCLK T00*/
+static struct regulator_consumer_supply gpio_switch_en_vddio_vid_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_hdmi_con", NULL),
+};
+static int gpio_switch_en_vddio_vid_oc_voltages[] = { 5000};
+
+/* EN_VDD_PNL1 from AP GPIO VI_D6 L04*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_pnl1_supply[] = {
+ REGULATOR_SUPPLY("vdd_lcd_panel", NULL),
+};
+static int gpio_switch_en_vdd_pnl1_voltages[] = { 3300};
+
+/* CAM1_LDO_EN from AP GPIO KB_ROW6 R06*/
+static struct regulator_consumer_supply gpio_switch_cam1_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_2v8_cam1", NULL),
+ REGULATOR_SUPPLY("vdd_2v8_cam1_af", NULL),
+};
+static int gpio_switch_cam1_ldo_en_voltages[] = { 2800};
+
+/* CAM2_LDO_EN from AP GPIO KB_ROW7 R07*/
+static struct regulator_consumer_supply gpio_switch_cam2_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_2v8_cam2", NULL),
+ REGULATOR_SUPPLY("vdd_2v8_cam2_af", NULL),
+};
+static int gpio_switch_cam2_ldo_en_voltages[] = { 2800};
+
+/* CAM3_LDO_EN from AP GPIO KB_ROW8 S00*/
+static struct regulator_consumer_supply gpio_switch_cam3_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_cam3", NULL),
+};
+static int gpio_switch_cam3_ldo_en_voltages[] = { 3300};
+
+/* EN_VDD_COM from AP GPIO SDMMC3_DAT5 D00*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_com_supply[] = {
+ REGULATOR_SUPPLY("vdd_com_bd", NULL),
+};
+static int gpio_switch_en_vdd_com_voltages[] = { 3300};
+
+/* EN_VDD_SDMMC1 from AP GPIO VI_HSYNC D07*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_sdmmc1_supply[] = {
+ REGULATOR_SUPPLY("vddio_sd_slot", "sdhci-tegra.0"),
+};
+static int gpio_switch_en_vdd_sdmmc1_voltages[] = { 3300};
+
+/* EN_3V3_EMMC from AP GPIO SDMMC3_DAT4 D01*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_emmc_supply[] = {
+ REGULATOR_SUPPLY("vdd_emmc_core", NULL),
+};
+static int gpio_switch_en_3v3_emmc_voltages[] = { 3300};
+
+/* EN_3V3_PEX_HVDD from AP GPIO VI_D09 L07*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_pex_hvdd_supply[] = {
+ REGULATOR_SUPPLY("hvdd_pex", NULL),
+};
+static int gpio_switch_en_3v3_pex_hvdd_voltages[] = { 3300};
+
+/* EN_3v3_FUSE from AP GPIO VI_D08 L06*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_fuse_supply[] = {
+ REGULATOR_SUPPLY("vdd_fuse", NULL),
+};
+static int gpio_switch_en_3v3_fuse_voltages[] = { 3300};
+
+/* EN_1V8_CAM from AP GPIO GPIO_PBB4 PBB04*/
+static struct regulator_consumer_supply gpio_switch_en_1v8_cam_supply[] = {
+ REGULATOR_SUPPLY("vdd_1v8_cam1", NULL),
+ REGULATOR_SUPPLY("vdd_1v8_cam2", NULL),
+ REGULATOR_SUPPLY("vdd_1v8_cam3", NULL),
+};
+static int gpio_switch_en_1v8_cam_voltages[] = { 1800};
+
+static struct regulator_consumer_supply gpio_switch_en_vbrtr_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbrtr", NULL),
+};
+static int gpio_switch_en_vbrtr_voltages[] = { 3300};
+
+static int enable_load_switch_rail(
+ struct gpio_switch_regulator_subdev_data *psubdev_data)
+{
+ int ret;
+
+ if (psubdev_data->pin_group <= 0)
+ return -EINVAL;
+
+ /* Tristate and make pin as input*/
+ ret = tegra_pinmux_set_tristate(psubdev_data->pin_group,
+ TEGRA_TRI_TRISTATE);
+ if (ret < 0)
+ return ret;
+ return gpio_direction_input(psubdev_data->gpio_nr);
+}
+
+static int disable_load_switch_rail(
+ struct gpio_switch_regulator_subdev_data *psubdev_data)
+{
+ int ret;
+
+ if (psubdev_data->pin_group <= 0)
+ return -EINVAL;
+
+ /* Un-tristate and driver low */
+ ret = tegra_pinmux_set_tristate(psubdev_data->pin_group,
+ TEGRA_TRI_NORMAL);
+ if (ret < 0)
+ return ret;
+ return gpio_direction_output(psubdev_data->gpio_nr, 0);
+}
+
+
+/* Macro for defining gpio switch regulator sub device data */
+#define GREG_INIT(_id, _var, _name, _input_supply, _always_on, _boot_on, \
+ _gpio_nr, _active_low, _init_state, _pg, _enable, _disable) \
+ static struct gpio_switch_regulator_subdev_data gpio_pdata_##_var = \
+ { \
+ .regulator_name = "gpio-switch-"#_name, \
+ .input_supply = _input_supply, \
+ .id = _id, \
+ .gpio_nr = _gpio_nr, \
+ .pin_group = _pg, \
+ .active_low = _active_low, \
+ .init_state = _init_state, \
+ .voltages = gpio_switch_##_name##_voltages, \
+ .n_voltages = ARRAY_SIZE(gpio_switch_##_name##_voltages), \
+ .num_consumer_supplies = \
+ ARRAY_SIZE(gpio_switch_##_name##_supply), \
+ .consumer_supplies = gpio_switch_##_name##_supply, \
+ .constraints = { \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _always_on, \
+ .boot_on = _boot_on, \
+ }, \
+ .enable_rail = _enable, \
+ .disable_rail = _disable, \
+ }
+
+/* common to most of boards*/
+GREG_INIT(0, en_5v_cp, en_5v_cp, NULL, 1, 0, TPS6591X_GPIO_0, false, 1, 0, 0, 0);
+GREG_INIT(1, en_5v0, en_5v0, NULL, 0, 0, TPS6591X_GPIO_4, false, 0, 0, 0, 0);
+GREG_INIT(2, en_ddr, en_ddr, NULL, 0, 0, TPS6591X_GPIO_3, false, 1, 0, 0, 0);
+GREG_INIT(3, en_3v3_sys, en_3v3_sys, NULL, 0, 0, TPS6591X_GPIO_1, false, 0, 0, 0, 0);
+GREG_INIT(4, en_vdd_bl, en_vdd_bl, NULL, 0, 0, TEGRA_GPIO_PK3, false, 1, 0, 0, 0);
+GREG_INIT(5, en_3v3_modem, en_3v3_modem, NULL, 1, 0, TEGRA_GPIO_PD6, false, 1, 0, 0, 0);
+GREG_INIT(6, en_vdd_pnl1, en_vdd_pnl1, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PL4, false, 1, 0, 0, 0);
+GREG_INIT(7, cam3_ldo_en, cam3_ldo_en, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PS0, false, 0, 0, 0, 0);
+GREG_INIT(8, en_vdd_com, en_vdd_com, "vdd_3v3_devices", 1, 0, TEGRA_GPIO_PD0, false, 1, 0, 0, 0);
+GREG_INIT(9, en_3v3_fuse, en_3v3_fuse, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PL6, false, 0, 0, 0, 0);
+GREG_INIT(10, en_3v3_emmc, en_3v3_emmc, "vdd_3v3_devices", 1, 0, TEGRA_GPIO_PD1, false, 1, 0, 0, 0);
+GREG_INIT(11, en_vdd_sdmmc1, en_vdd_sdmmc1, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PD7, false, 1, 0, 0, 0);
+GREG_INIT(12, en_3v3_pex_hvdd, en_3v3_pex_hvdd, "hvdd_pex_pmu", 0, 0, TEGRA_GPIO_PL7, false, 0, 0, 0, 0);
+GREG_INIT(13, en_1v8_cam, en_1v8_cam, "vdd_gen1v8", 0, 0, TEGRA_GPIO_PBB4, false, 0, 0, 0, 0);
+
+/*Specific to pm269*/
+GREG_INIT(4, en_vdd_bl_pm269, en_vdd_bl, NULL,
+ 0, 0, TEGRA_GPIO_PH3, false, 1, 0, 0, 0);
+GREG_INIT(6, en_vdd_pnl1_pm269, en_vdd_pnl1, "vdd_3v3_devices",
+ 0, 0, TEGRA_GPIO_PW1, false, 1, 0, 0, 0);
+GREG_INIT(9, en_3v3_fuse_pm269, en_3v3_fuse, "vdd_3v3_devices",
+ 0, 0, TEGRA_GPIO_PC1, false, 0, 0, 0, 0);
+GREG_INIT(11, en_vdd_sdmmc1_pm269, en_vdd_sdmmc1, "vdd_3v3_devices",
+ 0, 0, TEGRA_GPIO_PP1, false, 1, 0, 0, 0);
+GREG_INIT(12, en_3v3_pex_hvdd_pm269, en_3v3_pex_hvdd, "hvdd_pex_pmu",
+ 0, 0, TEGRA_GPIO_PC6, false, 0, 0, 0, 0);
+GREG_INIT(17, en_vddio_vid_oc_pm269, en_vddio_vid_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PP2, false, 0, TEGRA_PINGROUP_DAP3_DOUT,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* Specific to E1187/E1186/E1256 */
+GREG_INIT(14, dis_5v_switch_e118x, dis_5v_switch, "vdd_5v0_sys",
+ 0, 0, TEGRA_GPIO_PX2, true, 0, 0, 0, 0);
+GREG_INIT(15, en_usb1_vbus_oc_e118x, en_usb1_vbus_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PI4, false, 0, TEGRA_PINGROUP_GMI_RST_N,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(16, en_usb3_vbus_oc_e118x, en_usb3_vbus_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PH7, false, 0, TEGRA_PINGROUP_GMI_AD15,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(17, en_vddio_vid_oc_e118x, en_vddio_vid_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PT0, false, 0, TEGRA_PINGROUP_VI_PCLK,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* E1198/E1291 specific*/
+GREG_INIT(18, cam1_ldo_en, cam1_ldo_en, "vdd_3v3_cam", 0, 0, TEGRA_GPIO_PR6, false, 0, 0, 0, 0);
+GREG_INIT(19, cam2_ldo_en, cam2_ldo_en, "vdd_3v3_cam", 0, 0, TEGRA_GPIO_PR7, false, 0, 0, 0, 0);
+
+GREG_INIT(22, en_vbrtr, en_vbrtr, "vdd_3v3_devices", 0, 0, PMU_TCA6416_GPIO_PORT12, false, 0, 0, 0, 0);
+
+#define ADD_GPIO_REG(_name) &gpio_pdata_##_name
+
+#define COMMON_GPIO_REG \
+ ADD_GPIO_REG(en_5v_cp), \
+ ADD_GPIO_REG(en_5v0), \
+ ADD_GPIO_REG(en_ddr), \
+ ADD_GPIO_REG(en_3v3_sys), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(en_vdd_pnl1), \
+ ADD_GPIO_REG(cam1_ldo_en), \
+ ADD_GPIO_REG(cam2_ldo_en), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_vdd_sdmmc1), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd), \
+ ADD_GPIO_REG(en_1v8_cam),
+
+#define PM269_GPIO_REG \
+ ADD_GPIO_REG(en_5v_cp), \
+ ADD_GPIO_REG(en_5v0), \
+ ADD_GPIO_REG(en_ddr), \
+ ADD_GPIO_REG(en_vdd_bl_pm269), \
+ ADD_GPIO_REG(en_3v3_sys), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(en_vdd_pnl1_pm269), \
+ ADD_GPIO_REG(cam1_ldo_en), \
+ ADD_GPIO_REG(cam2_ldo_en), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse_pm269), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_vdd_sdmmc1_pm269), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd_pm269), \
+ ADD_GPIO_REG(en_1v8_cam), \
+ ADD_GPIO_REG(dis_5v_switch_e118x), \
+ ADD_GPIO_REG(en_usb1_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_usb3_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_vddio_vid_oc_pm269),
+
+#define E118x_GPIO_REG \
+ ADD_GPIO_REG(en_vdd_bl), \
+ ADD_GPIO_REG(dis_5v_switch_e118x), \
+ ADD_GPIO_REG(en_usb1_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_usb3_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_vddio_vid_oc_e118x), \
+ ADD_GPIO_REG(en_vbrtr),
+
+/* Gpio switch regulator platform data for E1186/E1187/E1256*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_e118x[] = {
+ COMMON_GPIO_REG
+ E118x_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for PM269*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_pm269[] = {
+ PM269_GPIO_REG
+};
+
+static struct gpio_switch_regulator_platform_data gswitch_pdata;
+static struct platform_device gswitch_regulator_pdata = {
+ .name = "gpio-switch-regulator",
+ .id = -1,
+ .dev = {
+ .platform_data = &gswitch_pdata,
+ },
+};
+
+int __init cardhu_pm299_gpio_switch_regulator_init(void)
+{
+ int i;
+ struct board_info board_info;
+ tegra_get_board_info(&board_info);
+
+ switch (board_info.board_id) {
+ case BOARD_PM269:
+ case BOARD_PM305:
+ case BOARD_PM311:
+ case BOARD_E1257:
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_pm269);
+ gswitch_pdata.subdevs = gswitch_subdevs_pm269;
+ break;
+
+ default:
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_e118x);
+ gswitch_pdata.subdevs = gswitch_subdevs_e118x;
+ break;
+ }
+
+ for (i = 0; i < gswitch_pdata.num_subdevs; ++i) {
+ struct gpio_switch_regulator_subdev_data *gswitch_data =
+ gswitch_pdata.subdevs[i];
+ if (gswitch_data->gpio_nr <= TEGRA_NR_GPIOS)
+ tegra_gpio_enable(gswitch_data->gpio_nr);
+ }
+
+ return platform_device_register(&gswitch_regulator_pdata);
+}
diff --git a/arch/arm/mach-tegra/board-cardhu-power.c b/arch/arm/mach-tegra/board-cardhu-power.c
new file mode 100644
index 000000000000..e38c83f9e919
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-power.c
@@ -0,0 +1,1252 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu-power.c
+ *
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps6591x.h>
+#include <linux/mfd/max77663-core.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/regulator/gpio-switch-regulator.h>
+#include <linux/regulator/tps6591x-regulator.h>
+#include <linux/regulator/tps6236x-regulator.h>
+#include <linux/power/gpio-charger.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/edp.h>
+#include <mach/tsensor.h>
+
+#include "gpio-names.h"
+#include "board.h"
+#include "board-cardhu.h"
+#include "pm.h"
+#include "wakeups-t3.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_INTR_LOW (1 << 17)
+
+static struct regulator_consumer_supply tps6591x_vdd1_supply_skubit0_0[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+ REGULATOR_SUPPLY("en_vddio_ddr_1v2", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_vdd1_supply_skubit0_1[] = {
+ REGULATOR_SUPPLY("en_vddio_ddr_1v2", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_vdd2_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_gen1v5", NULL),
+ REGULATOR_SUPPLY("vcore_lcd", NULL),
+ REGULATOR_SUPPLY("track_ldo1", NULL),
+ REGULATOR_SUPPLY("external_ldo_1v2", NULL),
+ REGULATOR_SUPPLY("vcore_cam1", NULL),
+ REGULATOR_SUPPLY("vcore_cam2", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_vddctrl_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_cpu_pmu", NULL),
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+ REGULATOR_SUPPLY("vdd_sys", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_vio_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_gen1v8", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi_pll", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+ REGULATOR_SUPPLY("avdd_osc", NULL),
+ REGULATOR_SUPPLY("vddio_sys", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.3"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc4", NULL),
+ REGULATOR_SUPPLY("vdd1v8_satelite", NULL),
+ REGULATOR_SUPPLY("vddio_uart", NULL),
+ REGULATOR_SUPPLY("pwrdet_uart", NULL),
+ REGULATOR_SUPPLY("vddio_audio", NULL),
+ REGULATOR_SUPPLY("pwrdet_audio", NULL),
+ REGULATOR_SUPPLY("vddio_bb", NULL),
+ REGULATOR_SUPPLY("pwrdet_bb", NULL),
+ REGULATOR_SUPPLY("vddio_lcd_pmu", NULL),
+ REGULATOR_SUPPLY("pwrdet_lcd", NULL),
+ REGULATOR_SUPPLY("vddio_cam", NULL),
+ REGULATOR_SUPPLY("pwrdet_cam", NULL),
+ REGULATOR_SUPPLY("vddio_vi", NULL),
+ REGULATOR_SUPPLY("pwrdet_vi", NULL),
+ REGULATOR_SUPPLY("ldo6", NULL),
+ REGULATOR_SUPPLY("ldo7", NULL),
+ REGULATOR_SUPPLY("ldo8", NULL),
+ REGULATOR_SUPPLY("vcore_audio", NULL),
+ REGULATOR_SUPPLY("avcore_audio", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.2"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc3", NULL),
+ REGULATOR_SUPPLY("vcore1_lpddr2", NULL),
+ REGULATOR_SUPPLY("vcom_1v8", NULL),
+ REGULATOR_SUPPLY("pmuio_1v8", NULL),
+ REGULATOR_SUPPLY("avdd_ic_usb", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo1_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_pexb", NULL),
+ REGULATOR_SUPPLY("vdd_pexb", NULL),
+ REGULATOR_SUPPLY("avdd_pex_pll", NULL),
+ REGULATOR_SUPPLY("avdd_pexa", NULL),
+ REGULATOR_SUPPLY("vdd_pexa", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo2_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_sata", NULL),
+ REGULATOR_SUPPLY("vdd_sata", NULL),
+ REGULATOR_SUPPLY("avdd_sata_pll", NULL),
+ REGULATOR_SUPPLY("avdd_plle", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo3_supply_e118x[] = {
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.0"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc1", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo3_supply_e1198[] = {
+ REGULATOR_SUPPLY("unused_rail_ldo3", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo4_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_rtc", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo5_supply_e118x[] = {
+ REGULATOR_SUPPLY("avdd_vdac", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo5_supply_e1198[] = {
+ REGULATOR_SUPPLY("avdd_vdac", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc", "sdhci-tegra.0"),
+ REGULATOR_SUPPLY("pwrdet_sdmmc1", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo6_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_dsi_csi", NULL),
+ REGULATOR_SUPPLY("pwrdet_mipi", NULL),
+};
+static struct regulator_consumer_supply tps6591x_ldo7_supply_0[] = {
+ REGULATOR_SUPPLY("avdd_plla_p_c_s", NULL),
+ REGULATOR_SUPPLY("avdd_pllm", NULL),
+ REGULATOR_SUPPLY("avdd_pllu_d", NULL),
+ REGULATOR_SUPPLY("avdd_pllu_d2", NULL),
+ REGULATOR_SUPPLY("avdd_pllx", NULL),
+};
+
+static struct regulator_consumer_supply tps6591x_ldo8_supply_0[] = {
+ REGULATOR_SUPPLY("vdd_ddr_hs", NULL),
+};
+
+#define TPS_PDATA_INIT(_name, _sname, _minmv, _maxmv, _supply_reg, _always_on, \
+ _boot_on, _apply_uv, _init_uV, _init_enable, _init_apply, _ectrl, _flags) \
+ static struct tps6591x_regulator_platform_data pdata_##_name##_##_sname = \
+ { \
+ .regulator = { \
+ .constraints = { \
+ .min_uV = (_minmv)*1000, \
+ .max_uV = (_maxmv)*1000, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _always_on, \
+ .boot_on = _boot_on, \
+ .apply_uV = _apply_uv, \
+ }, \
+ .num_consumer_supplies = \
+ ARRAY_SIZE(tps6591x_##_name##_supply_##_sname), \
+ .consumer_supplies = tps6591x_##_name##_supply_##_sname, \
+ .supply_regulator = _supply_reg, \
+ }, \
+ .init_uV = _init_uV * 1000, \
+ .init_enable = _init_enable, \
+ .init_apply = _init_apply, \
+ .ectrl = _ectrl, \
+ .flags = _flags, \
+ }
+
+TPS_PDATA_INIT(vdd1, skubit0_0, 600, 1500, 0, 1, 1, 0, -1, 0, 0, EXT_CTRL_SLEEP_OFF, 0);
+TPS_PDATA_INIT(vdd1, skubit0_1, 600, 1500, 0, 1, 1, 0, -1, 0, 0, EXT_CTRL_SLEEP_OFF, 0);
+TPS_PDATA_INIT(vdd2, 0, 600, 1500, 0, 1, 1, 0, -1, 0, 0, 0, 0);
+TPS_PDATA_INIT(vddctrl, 0, 600, 1400, 0, 1, 1, 0, -1, 0, 0, EXT_CTRL_EN1, 0);
+TPS_PDATA_INIT(vio, 0, 1500, 3300, 0, 1, 1, 0, -1, 0, 0, 0, 0);
+
+TPS_PDATA_INIT(ldo1, 0, 1000, 3300, tps6591x_rails(VDD_2), 0, 0, 0, -1, 0, 1, 0, 0);
+TPS_PDATA_INIT(ldo2, 0, 1050, 1050, tps6591x_rails(VDD_2), 0, 0, 1, -1, 0, 1, 0, 0);
+
+TPS_PDATA_INIT(ldo3, e118x, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0);
+TPS_PDATA_INIT(ldo3, e1198, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0);
+TPS_PDATA_INIT(ldo4, 0, 1000, 3300, 0, 1, 0, 0, -1, 0, 0, 0, 0);
+TPS_PDATA_INIT(ldo5, e118x, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0);
+TPS_PDATA_INIT(ldo5, e1198, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0);
+
+TPS_PDATA_INIT(ldo6, 0, 1200, 1200, tps6591x_rails(VIO), 0, 0, 1, -1, 0, 0, 0, 0);
+TPS_PDATA_INIT(ldo7, 0, 1200, 1200, tps6591x_rails(VIO), 1, 1, 1, -1, 0, 0, EXT_CTRL_SLEEP_OFF, LDO_LOW_POWER_ON_SUSPEND);
+TPS_PDATA_INIT(ldo8, 0, 1000, 3300, tps6591x_rails(VIO), 1, 0, 0, -1, 0, 0, EXT_CTRL_SLEEP_OFF, LDO_LOW_POWER_ON_SUSPEND);
+
+#if defined(CONFIG_RTC_DRV_TPS6591x)
+static struct tps6591x_rtc_platform_data rtc_data = {
+ .irq = TEGRA_NR_IRQS + TPS6591X_INT_RTC_ALARM,
+ .time = {
+ .tm_year = 2000,
+ .tm_mon = 0,
+ .tm_mday = 1,
+ .tm_hour = 0,
+ .tm_min = 0,
+ .tm_sec = 0,
+ },
+};
+
+#define TPS_RTC_REG() \
+ { \
+ .id = 0, \
+ .name = "rtc_tps6591x", \
+ .platform_data = &rtc_data, \
+ }
+#endif
+
+#define TPS_REG(_id, _name, _sname) \
+ { \
+ .id = TPS6591X_ID_##_id, \
+ .name = "tps6591x-regulator", \
+ .platform_data = &pdata_##_name##_##_sname, \
+ }
+
+#define TPS6591X_DEV_COMMON_E118X \
+ TPS_REG(VDD_2, vdd2, 0), \
+ TPS_REG(VDDCTRL, vddctrl, 0), \
+ TPS_REG(LDO_1, ldo1, 0), \
+ TPS_REG(LDO_2, ldo2, 0), \
+ TPS_REG(LDO_3, ldo3, e118x), \
+ TPS_REG(LDO_4, ldo4, 0), \
+ TPS_REG(LDO_5, ldo5, e118x), \
+ TPS_REG(LDO_6, ldo6, 0), \
+ TPS_REG(LDO_7, ldo7, 0), \
+ TPS_REG(LDO_8, ldo8, 0)
+
+static struct tps6591x_subdev_info tps_devs_e118x_skubit0_0[] = {
+ TPS_REG(VIO, vio, 0),
+ TPS_REG(VDD_1, vdd1, skubit0_0),
+ TPS6591X_DEV_COMMON_E118X,
+#if defined(CONFIG_RTC_DRV_TPS6591x)
+ TPS_RTC_REG(),
+#endif
+};
+
+static struct tps6591x_subdev_info tps_devs_e118x_skubit0_1[] = {
+ TPS_REG(VIO, vio, 0),
+ TPS_REG(VDD_1, vdd1, skubit0_1),
+ TPS6591X_DEV_COMMON_E118X,
+#if defined(CONFIG_RTC_DRV_TPS6591x)
+ TPS_RTC_REG(),
+#endif
+};
+
+#define TPS6591X_DEV_COMMON_CARDHU \
+ TPS_REG(VDD_2, vdd2, 0), \
+ TPS_REG(VDDCTRL, vddctrl, 0), \
+ TPS_REG(LDO_1, ldo1, 0), \
+ TPS_REG(LDO_2, ldo2, 0), \
+ TPS_REG(LDO_3, ldo3, e1198), \
+ TPS_REG(LDO_4, ldo4, 0), \
+ TPS_REG(LDO_5, ldo5, e1198), \
+ TPS_REG(LDO_6, ldo6, 0), \
+ TPS_REG(LDO_7, ldo7, 0), \
+ TPS_REG(LDO_8, ldo8, 0)
+
+static struct tps6591x_subdev_info tps_devs_e1198_skubit0_0[] = {
+ TPS_REG(VIO, vio, 0),
+ TPS_REG(VDD_1, vdd1, skubit0_0),
+ TPS6591X_DEV_COMMON_CARDHU,
+#if defined(CONFIG_RTC_DRV_TPS6591x)
+ TPS_RTC_REG(),
+#endif
+};
+
+static struct tps6591x_subdev_info tps_devs_e1198_skubit0_1[] = {
+ TPS_REG(VIO, vio, 0),
+ TPS_REG(VDD_1, vdd1, skubit0_1),
+ TPS6591X_DEV_COMMON_CARDHU,
+#if defined(CONFIG_RTC_DRV_TPS6591x)
+ TPS_RTC_REG(),
+#endif
+};
+
+#define TPS_GPIO_INIT_PDATA(gpio_nr, _init_apply, _sleep_en, _pulldn_en, _output_en, _output_val) \
+ [gpio_nr] = { \
+ .sleep_en = _sleep_en, \
+ .pulldn_en = _pulldn_en, \
+ .output_mode_en = _output_en, \
+ .output_val = _output_val, \
+ .init_apply = _init_apply, \
+ }
+static struct tps6591x_gpio_init_data tps_gpio_pdata_e1291_a04[] = {
+ TPS_GPIO_INIT_PDATA(0, 0, 0, 0, 0, 0),
+ TPS_GPIO_INIT_PDATA(1, 0, 0, 0, 0, 0),
+ TPS_GPIO_INIT_PDATA(2, 1, 1, 0, 1, 1),
+ TPS_GPIO_INIT_PDATA(3, 0, 0, 0, 0, 0),
+ TPS_GPIO_INIT_PDATA(4, 0, 0, 0, 0, 0),
+ TPS_GPIO_INIT_PDATA(5, 0, 0, 0, 0, 0),
+ TPS_GPIO_INIT_PDATA(6, 0, 0, 0, 0, 0),
+ TPS_GPIO_INIT_PDATA(7, 0, 0, 0, 0, 0),
+ TPS_GPIO_INIT_PDATA(8, 0, 0, 0, 0, 0),
+};
+
+static struct tps6591x_sleep_keepon_data tps_slp_keepon = {
+ .clkout32k_keepon = 1,
+};
+
+static struct tps6591x_platform_data tps_platform = {
+ .irq_base = TPS6591X_IRQ_BASE,
+ .gpio_base = TPS6591X_GPIO_BASE,
+ .dev_slp_en = true,
+ .slp_keepon = &tps_slp_keepon,
+};
+
+static struct i2c_board_info __initdata cardhu_regulators[] = {
+ {
+ I2C_BOARD_INFO("tps6591x", 0x2D),
+ .irq = INT_EXTERNAL_PMU,
+ .platform_data = &tps_platform,
+ },
+};
+
+/* TPS62361B DC-DC converter */
+static struct regulator_consumer_supply tps6236x_dcdc_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+};
+
+static struct tps6236x_regulator_platform_data tps6236x_pdata = {
+ .reg_init_data = { \
+ .constraints = { \
+ .min_uV = 500000, \
+ .max_uV = 1770000, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = 1, \
+ .boot_on = 1, \
+ .apply_uV = 0, \
+ }, \
+ .num_consumer_supplies = ARRAY_SIZE(tps6236x_dcdc_supply), \
+ .consumer_supplies = tps6236x_dcdc_supply, \
+ }, \
+ .internal_pd_enable = 0, \
+ .enable_discharge = true, \
+ .vsel = 3, \
+ .init_uV = -1, \
+ .init_apply = 0, \
+};
+
+static struct i2c_board_info __initdata tps6236x_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("tps62361B", 0x60),
+ .platform_data = &tps6236x_pdata,
+ },
+};
+
+int __init cardhu_regulator_init(void)
+{
+ struct board_info board_info;
+ struct board_info pmu_board_info;
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ u32 pmc_ctrl;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+
+ tegra_get_board_info(&board_info);
+ tegra_get_pmu_board_info(&pmu_board_info);
+
+ if (pmu_board_info.board_id == BOARD_PMU_PM298)
+ return cardhu_pm298_regulator_init();
+
+ if (pmu_board_info.board_id == BOARD_PMU_PM299)
+ return cardhu_pm299_regulator_init();
+
+ /* The regulator details have complete constraints */
+ regulator_has_full_constraints();
+
+ /* PMU-E1208, the ldo2 should be set to 1200mV */
+ if (pmu_board_info.board_id == BOARD_E1208) {
+ pdata_ldo2_0.regulator.constraints.min_uV = 1200000;
+ pdata_ldo2_0.regulator.constraints.max_uV = 1200000;
+ }
+
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+ if (board_info.sku & SKU_DCDC_TPS62361_SUPPORT) {
+ tps_platform.num_subdevs =
+ ARRAY_SIZE(tps_devs_e1198_skubit0_1);
+ tps_platform.subdevs = tps_devs_e1198_skubit0_1;
+ } else {
+ tps_platform.num_subdevs =
+ ARRAY_SIZE(tps_devs_e1198_skubit0_0);
+ tps_platform.subdevs = tps_devs_e1198_skubit0_0;
+ }
+ } else {
+ if (board_info.board_id == BOARD_PM269)
+ pdata_ldo3_e118x.slew_rate_uV_per_us = 250;
+
+ if (pmu_board_info.sku & SKU_DCDC_TPS62361_SUPPORT) {
+ tps_platform.num_subdevs = ARRAY_SIZE(tps_devs_e118x_skubit0_1);
+ tps_platform.subdevs = tps_devs_e118x_skubit0_1;
+ } else {
+ tps_platform.num_subdevs = ARRAY_SIZE(tps_devs_e118x_skubit0_0);
+ tps_platform.subdevs = tps_devs_e118x_skubit0_0;
+ }
+ }
+
+ /* E1291-A04/A05: Enable DEV_SLP and enable sleep on GPIO2 */
+ if ((board_info.board_id == BOARD_E1291) &&
+ ((board_info.fab == BOARD_FAB_A04) ||
+ (board_info.fab == BOARD_FAB_A05))) {
+ tps_platform.dev_slp_en = true;
+ tps_platform.gpio_init_data = tps_gpio_pdata_e1291_a04;
+ tps_platform.num_gpioinit_data =
+ ARRAY_SIZE(tps_gpio_pdata_e1291_a04);
+ }
+
+ i2c_register_board_info(4, cardhu_regulators, 1);
+
+ /* Resgister the TPS6236x for all boards whose sku bit 0 is set. */
+ if ((board_info.sku & SKU_DCDC_TPS62361_SUPPORT) ||
+ (pmu_board_info.sku & SKU_DCDC_TPS62361_SUPPORT)) {
+ pr_info("Registering the device TPS62361B\n");
+ i2c_register_board_info(4, tps6236x_boardinfo, 1);
+ }
+ return 0;
+}
+
+/* EN_5V_CP from PMU GP0 */
+static struct regulator_consumer_supply gpio_switch_en_5v_cp_supply[] = {
+ REGULATOR_SUPPLY("vdd_5v0_sby", NULL),
+ REGULATOR_SUPPLY("vdd_hall", NULL),
+ REGULATOR_SUPPLY("vterm_ddr", NULL),
+ REGULATOR_SUPPLY("v2ref_ddr", NULL),
+};
+static int gpio_switch_en_5v_cp_voltages[] = { 5000};
+
+/* EN_5V0 From PMU GP2 */
+static struct regulator_consumer_supply gpio_switch_en_5v0_supply[] = {
+ REGULATOR_SUPPLY("vdd_5v0_sys", NULL),
+};
+static int gpio_switch_en_5v0_voltages[] = { 5000};
+
+/* EN_DDR From PMU GP6 */
+static struct regulator_consumer_supply gpio_switch_en_ddr_supply[] = {
+ REGULATOR_SUPPLY("mem_vddio_ddr", NULL),
+ REGULATOR_SUPPLY("t30_vddio_ddr", NULL),
+};
+static int gpio_switch_en_ddr_voltages[] = { 1500};
+
+/* EN_3V3_SYS From PMU GP7 */
+static struct regulator_consumer_supply gpio_switch_en_3v3_sys_supply[] = {
+ REGULATOR_SUPPLY("vdd_lvds", NULL),
+ REGULATOR_SUPPLY("vdd_pnl", NULL),
+ REGULATOR_SUPPLY("vcom_3v3", NULL),
+ REGULATOR_SUPPLY("vdd_3v3", NULL),
+ REGULATOR_SUPPLY("vcore_mmc", NULL),
+ REGULATOR_SUPPLY("vddio_pex_ctl", NULL),
+ REGULATOR_SUPPLY("pwrdet_pex_ctl", NULL),
+ REGULATOR_SUPPLY("hvdd_pex_pmu", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi", NULL),
+ REGULATOR_SUPPLY("vpp_fuse", NULL),
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("vdd_ddr_rx", NULL),
+ REGULATOR_SUPPLY("vcore_nand", NULL),
+ REGULATOR_SUPPLY("hvdd_sata", NULL),
+ REGULATOR_SUPPLY("vddio_gmi_pmu", NULL),
+ REGULATOR_SUPPLY("pwrdet_nand", NULL),
+ REGULATOR_SUPPLY("avdd_cam1", NULL),
+ REGULATOR_SUPPLY("vdd_af", NULL),
+ REGULATOR_SUPPLY("avdd_cam2", NULL),
+ REGULATOR_SUPPLY("vdd_acc", NULL),
+ REGULATOR_SUPPLY("vdd_phtl", NULL),
+ REGULATOR_SUPPLY("vddio_tp", NULL),
+ REGULATOR_SUPPLY("vdd_led", NULL),
+ REGULATOR_SUPPLY("vddio_cec", NULL),
+ REGULATOR_SUPPLY("vdd_cmps", NULL),
+ REGULATOR_SUPPLY("vdd_temp", NULL),
+ REGULATOR_SUPPLY("vpp_kfuse", NULL),
+ REGULATOR_SUPPLY("vddio_ts", NULL),
+ REGULATOR_SUPPLY("vdd_ir_led", NULL),
+ REGULATOR_SUPPLY("vddio_1wire", NULL),
+ REGULATOR_SUPPLY("avddio_audio", NULL),
+ REGULATOR_SUPPLY("vdd_ec", NULL),
+ REGULATOR_SUPPLY("vcom_pa", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_devices", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_dock", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_edid", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_hdmi_cec", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_gmi", NULL),
+ REGULATOR_SUPPLY("vdd_spk_amp", "tegra-snd-wm8903"),
+ REGULATOR_SUPPLY("vdd_3v3_sensor", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_cam", NULL),
+ REGULATOR_SUPPLY("vdd_3v3_als", NULL),
+ REGULATOR_SUPPLY("debug_cons", NULL),
+ REGULATOR_SUPPLY("vdd", "4-004c"),
+};
+static int gpio_switch_en_3v3_sys_voltages[] = { 3300};
+
+/* DIS_5V_SWITCH from AP SPI2_SCK X02 */
+static struct regulator_consumer_supply gpio_switch_dis_5v_switch_supply[] = {
+ REGULATOR_SUPPLY("master_5v_switch", NULL),
+};
+static int gpio_switch_dis_5v_switch_voltages[] = { 5000};
+
+/* EN_VDD_BL */
+static struct regulator_consumer_supply gpio_switch_en_vdd_bl_supply[] = {
+ REGULATOR_SUPPLY("vdd_backlight", NULL),
+ REGULATOR_SUPPLY("vdd_backlight1", NULL),
+};
+static int gpio_switch_en_vdd_bl_voltages[] = { 5000};
+
+/* EN_VDD_BL2 (E1291-A03) from AP PEX_L0_PRSNT_N DD.00 */
+static struct regulator_consumer_supply gpio_switch_en_vdd_bl2_supply[] = {
+ REGULATOR_SUPPLY("vdd_backlight2", NULL),
+};
+static int gpio_switch_en_vdd_bl2_voltages[] = { 5000};
+
+/* EN_3V3_MODEM from AP GPIO VI_VSYNCH D06*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_modem_supply[] = {
+ REGULATOR_SUPPLY("vdd_3v3_mini_card", NULL),
+ REGULATOR_SUPPLY("vdd_mini_card", NULL),
+};
+static int gpio_switch_en_3v3_modem_voltages[] = { 3300};
+
+/* EN_USB1_VBUS_OC*/
+static struct regulator_consumer_supply gpio_switch_en_usb1_vbus_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbus_micro_usb", NULL),
+};
+static int gpio_switch_en_usb1_vbus_oc_voltages[] = { 5000};
+
+/*EN_USB3_VBUS_OC*/
+static struct regulator_consumer_supply gpio_switch_en_usb3_vbus_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbus_typea_usb", NULL),
+};
+static int gpio_switch_en_usb3_vbus_oc_voltages[] = { 5000};
+
+/* EN_VDDIO_VID_OC from AP GPIO VI_PCLK T00*/
+static struct regulator_consumer_supply gpio_switch_en_vddio_vid_oc_supply[] = {
+ REGULATOR_SUPPLY("vdd_hdmi_con", NULL),
+};
+static int gpio_switch_en_vddio_vid_oc_voltages[] = { 5000};
+
+/* EN_VDD_PNL1 from AP GPIO VI_D6 L04*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_pnl1_supply[] = {
+ REGULATOR_SUPPLY("vdd_lcd_panel", NULL),
+};
+static int gpio_switch_en_vdd_pnl1_voltages[] = { 3300};
+
+/* CAM1_LDO_EN from AP GPIO KB_ROW6 R06*/
+static struct regulator_consumer_supply gpio_switch_cam1_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_2v8_cam1", NULL),
+ REGULATOR_SUPPLY("vdd", "6-0072"),
+};
+static int gpio_switch_cam1_ldo_en_voltages[] = { 2800};
+
+/* CAM2_LDO_EN from AP GPIO KB_ROW7 R07*/
+static struct regulator_consumer_supply gpio_switch_cam2_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_2v8_cam2", NULL),
+ REGULATOR_SUPPLY("vdd", "7-0072"),
+};
+static int gpio_switch_cam2_ldo_en_voltages[] = { 2800};
+
+/* CAM3_LDO_EN from AP GPIO KB_ROW8 S00*/
+static struct regulator_consumer_supply gpio_switch_cam3_ldo_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_cam3", NULL),
+};
+static int gpio_switch_cam3_ldo_en_voltages[] = { 3300};
+
+/* EN_VDD_COM from AP GPIO SDMMC3_DAT5 D00*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_com_supply[] = {
+ REGULATOR_SUPPLY("vdd_com_bd", NULL),
+};
+static int gpio_switch_en_vdd_com_voltages[] = { 3300};
+
+/* EN_VDD_SDMMC1 from AP GPIO VI_HSYNC D07*/
+static struct regulator_consumer_supply gpio_switch_en_vdd_sdmmc1_supply[] = {
+ REGULATOR_SUPPLY("vddio_sd_slot", "sdhci-tegra.0"),
+};
+static int gpio_switch_en_vdd_sdmmc1_voltages[] = { 3300};
+
+/* EN_3V3_EMMC from AP GPIO SDMMC3_DAT4 D01*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_emmc_supply[] = {
+ REGULATOR_SUPPLY("vdd_emmc_core", NULL),
+};
+static int gpio_switch_en_3v3_emmc_voltages[] = { 3300};
+
+/* EN_3V3_PEX_HVDD from AP GPIO VI_D09 L07*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_pex_hvdd_supply[] = {
+ REGULATOR_SUPPLY("hvdd_pex", NULL),
+};
+static int gpio_switch_en_3v3_pex_hvdd_voltages[] = { 3300};
+
+/* EN_3v3_FUSE from AP GPIO VI_D08 L06*/
+static struct regulator_consumer_supply gpio_switch_en_3v3_fuse_supply[] = {
+ REGULATOR_SUPPLY("vdd_fuse", NULL),
+};
+static int gpio_switch_en_3v3_fuse_voltages[] = { 3300};
+
+/* EN_1V8_CAM from AP GPIO GPIO_PBB4 PBB04*/
+static struct regulator_consumer_supply gpio_switch_en_1v8_cam_supply[] = {
+ REGULATOR_SUPPLY("vdd_1v8_cam1", NULL),
+ REGULATOR_SUPPLY("vdd_1v8_cam2", NULL),
+ REGULATOR_SUPPLY("vdd_1v8_cam3", NULL),
+ REGULATOR_SUPPLY("vdd_i2c", "6-0072"),
+ REGULATOR_SUPPLY("vdd_i2c", "7-0072"),
+ REGULATOR_SUPPLY("vdd_i2c", "2-0033"),
+};
+static int gpio_switch_en_1v8_cam_voltages[] = { 1800};
+
+static struct regulator_consumer_supply gpio_switch_en_vbrtr_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbrtr", NULL),
+};
+static int gpio_switch_en_vbrtr_voltages[] = { 3300};
+
+static int enable_load_switch_rail(
+ struct gpio_switch_regulator_subdev_data *psubdev_data)
+{
+ int ret;
+
+ if (psubdev_data->pin_group <= 0)
+ return -EINVAL;
+
+ /* Tristate and make pin as input*/
+ ret = tegra_pinmux_set_tristate(psubdev_data->pin_group,
+ TEGRA_TRI_TRISTATE);
+ if (ret < 0)
+ return ret;
+ return gpio_direction_input(psubdev_data->gpio_nr);
+}
+
+static int disable_load_switch_rail(
+ struct gpio_switch_regulator_subdev_data *psubdev_data)
+{
+ int ret;
+
+ if (psubdev_data->pin_group <= 0)
+ return -EINVAL;
+
+ /* Un-tristate and driver low */
+ ret = tegra_pinmux_set_tristate(psubdev_data->pin_group,
+ TEGRA_TRI_NORMAL);
+ if (ret < 0)
+ return ret;
+ return gpio_direction_output(psubdev_data->gpio_nr, 0);
+}
+
+
+/* Macro for defining gpio switch regulator sub device data */
+#define GREG_INIT(_id, _var, _name, _input_supply, _always_on, _boot_on, \
+ _gpio_nr, _active_low, _init_state, _pg, _enable, _disable) \
+ static struct gpio_switch_regulator_subdev_data gpio_pdata_##_var = \
+ { \
+ .regulator_name = "gpio-switch-"#_name, \
+ .input_supply = _input_supply, \
+ .id = _id, \
+ .gpio_nr = _gpio_nr, \
+ .pin_group = _pg, \
+ .active_low = _active_low, \
+ .init_state = _init_state, \
+ .voltages = gpio_switch_##_name##_voltages, \
+ .n_voltages = ARRAY_SIZE(gpio_switch_##_name##_voltages), \
+ .num_consumer_supplies = \
+ ARRAY_SIZE(gpio_switch_##_name##_supply), \
+ .consumer_supplies = gpio_switch_##_name##_supply, \
+ .constraints = { \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _always_on, \
+ .boot_on = _boot_on, \
+ }, \
+ .enable_rail = _enable, \
+ .disable_rail = _disable, \
+ }
+
+/* common to most of boards*/
+GREG_INIT(0, en_5v_cp, en_5v_cp, NULL, 1, 0, TPS6591X_GPIO_0, false, 1, 0, 0, 0);
+GREG_INIT(1, en_5v0, en_5v0, NULL, 0, 0, TPS6591X_GPIO_2, false, 0, 0, 0, 0);
+GREG_INIT(2, en_ddr, en_ddr, NULL, 1, 0, TPS6591X_GPIO_6, false, 1, 0, 0, 0);
+GREG_INIT(3, en_3v3_sys, en_3v3_sys, NULL, 0, 0, TPS6591X_GPIO_7, false, 1, 0, 0, 0);
+GREG_INIT(4, en_vdd_bl, en_vdd_bl, NULL, 0, 0, TEGRA_GPIO_PK3, false, 1, 0, 0, 0);
+GREG_INIT(5, en_3v3_modem, en_3v3_modem, NULL, 1, 0, TEGRA_GPIO_PD6, false, 1, 0, 0, 0);
+GREG_INIT(6, en_vdd_pnl1, en_vdd_pnl1, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PL4, false, 1, 0, 0, 0);
+GREG_INIT(7, cam3_ldo_en, cam3_ldo_en, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PS0, false, 0, 0, 0, 0);
+GREG_INIT(8, en_vdd_com, en_vdd_com, "vdd_3v3_devices", 1, 0, TEGRA_GPIO_PD0, false, 1, 0, 0, 0);
+GREG_INIT(9, en_3v3_fuse, en_3v3_fuse, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PL6, false, 0, 0, 0, 0);
+GREG_INIT(10, en_3v3_emmc, en_3v3_emmc, "vdd_3v3_devices", 1, 0, TEGRA_GPIO_PD1, false, 1, 0, 0, 0);
+GREG_INIT(11, en_vdd_sdmmc1, en_vdd_sdmmc1, "vdd_3v3_devices", 0, 0, TEGRA_GPIO_PD7, false, 1, 0, 0, 0);
+GREG_INIT(12, en_3v3_pex_hvdd, en_3v3_pex_hvdd, "hvdd_pex_pmu", 0, 0, TEGRA_GPIO_PL7, false, 0, 0, 0, 0);
+GREG_INIT(13, en_1v8_cam, en_1v8_cam, "vdd_gen1v8", 0, 0, TEGRA_GPIO_PBB4, false, 0, 0, 0, 0);
+
+/* E1291-A04/A05 specific */
+GREG_INIT(1, en_5v0_a04, en_5v0, NULL, 0, 0, TPS6591X_GPIO_8, false, 0, 0, 0, 0);
+GREG_INIT(2, en_ddr_a04, en_ddr, NULL, 1, 0, TPS6591X_GPIO_7, false, 1, 0, 0, 0);
+GREG_INIT(3, en_3v3_sys_a04, en_3v3_sys, NULL, 0, 0, TPS6591X_GPIO_6, false, 1, 0, 0, 0);
+
+
+/*Specific to pm269*/
+GREG_INIT(4, en_vdd_bl_pm269, en_vdd_bl, NULL,
+ 0, 0, TEGRA_GPIO_PH3, false, 1, 0, 0, 0);
+GREG_INIT(6, en_vdd_pnl1_pm269, en_vdd_pnl1, "vdd_3v3_devices",
+ 0, 0, TEGRA_GPIO_PW1, false, 1, 0, 0, 0);
+GREG_INIT(9, en_3v3_fuse_pm269, en_3v3_fuse, "vdd_3v3_devices",
+ 0, 0, TEGRA_GPIO_PC1, false, 0, 0, 0, 0);
+GREG_INIT(12, en_3v3_pex_hvdd_pm269, en_3v3_pex_hvdd, "hvdd_pex_pmu",
+ 0, 0, TEGRA_GPIO_PC6, false, 0, 0, 0, 0);
+GREG_INIT(17, en_vddio_vid_oc_pm269, en_vddio_vid_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PP2, false, 0, TEGRA_PINGROUP_DAP3_DOUT,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* Specific to pm311 */
+GREG_INIT(15, en_usb1_vbus_oc_pm311, en_usb1_vbus_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PCC7, false, 0, TEGRA_PINGROUP_GMI_RST_N,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(16, en_usb3_vbus_oc_pm311, en_usb3_vbus_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PCC6, false, 0, TEGRA_PINGROUP_GMI_AD15,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* Specific to E1187/E1186/E1256 */
+GREG_INIT(14, dis_5v_switch_e118x, dis_5v_switch, "vdd_5v0_sys",
+ 0, 0, TEGRA_GPIO_PX2, true, 0, 0, 0, 0);
+GREG_INIT(15, en_usb1_vbus_oc_e118x, en_usb1_vbus_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PI4, false, 0, TEGRA_PINGROUP_GMI_RST_N,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(16, en_usb3_vbus_oc_e118x, en_usb3_vbus_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PH7, false, 0, TEGRA_PINGROUP_GMI_AD15,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(17, en_vddio_vid_oc_e118x, en_vddio_vid_oc, "master_5v_switch",
+ 0, 0, TEGRA_GPIO_PT0, false, 0, TEGRA_PINGROUP_VI_PCLK,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* E1198/E1291 specific fab < A03 */
+GREG_INIT(15, en_usb1_vbus_oc, en_usb1_vbus_oc, "vdd_5v0_sys",
+ 0, 0, TEGRA_GPIO_PI4, false, 0, TEGRA_PINGROUP_GMI_RST_N,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(16, en_usb3_vbus_oc, en_usb3_vbus_oc, "vdd_5v0_sys",
+ 0, 0, TEGRA_GPIO_PH7, false, 0, TEGRA_PINGROUP_GMI_AD15,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* E1198/E1291 specific fab >= A03 */
+GREG_INIT(15, en_usb1_vbus_oc_a03, en_usb1_vbus_oc, "vdd_5v0_sys",
+ 0, 0, TEGRA_GPIO_PDD6, false, 0, TEGRA_PINGROUP_PEX_L1_CLKREQ_N,
+ enable_load_switch_rail, disable_load_switch_rail);
+GREG_INIT(16, en_usb3_vbus_oc_a03, en_usb3_vbus_oc, "vdd_5v0_sys",
+ 0, 0, TEGRA_GPIO_PDD4, false, 0, TEGRA_PINGROUP_PEX_L1_PRSNT_N,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* E1198/E1291 specific */
+GREG_INIT(17, en_vddio_vid_oc, en_vddio_vid_oc, "vdd_5v0_sys",
+ 0, 0, TEGRA_GPIO_PT0, false, 0, TEGRA_PINGROUP_VI_PCLK,
+ enable_load_switch_rail, disable_load_switch_rail);
+
+/* E1198/E1291 specific*/
+GREG_INIT(18, cam1_ldo_en, cam1_ldo_en, "vdd_3v3_cam", 0, 0, TEGRA_GPIO_PR6, false, 0, 0, 0, 0);
+GREG_INIT(19, cam2_ldo_en, cam2_ldo_en, "vdd_3v3_cam", 0, 0, TEGRA_GPIO_PR7, false, 0, 0, 0, 0);
+
+/* E1291 A03 specific */
+GREG_INIT(20, en_vdd_bl1_a03, en_vdd_bl, NULL, 0, 0, TEGRA_GPIO_PDD2, false, 1, 0, 0, 0);
+GREG_INIT(21, en_vdd_bl2_a03, en_vdd_bl2, NULL, 0, 0, TEGRA_GPIO_PDD0, false, 1, 0, 0, 0);
+
+GREG_INIT(22, en_vbrtr, en_vbrtr, "vdd_3v3_devices", 0, 0, PMU_TCA6416_GPIO_PORT12, false, 0, 0, 0, 0);
+
+/* PM313 display board specific */
+GREG_INIT(4, en_vdd_bl_pm313, en_vdd_bl, NULL,
+ 0, 0, TEGRA_GPIO_PK3, false, 1, 0, 0, 0);
+GREG_INIT(6, en_vdd_pnl1_pm313, en_vdd_pnl1, "vdd_3v3_devices",
+ 0, 0, TEGRA_GPIO_PH3, false, 1, 0, 0, 0);
+
+#define ADD_GPIO_REG(_name) &gpio_pdata_##_name
+
+#define COMMON_GPIO_REG \
+ ADD_GPIO_REG(en_5v_cp), \
+ ADD_GPIO_REG(en_5v0), \
+ ADD_GPIO_REG(en_ddr), \
+ ADD_GPIO_REG(en_3v3_sys), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(en_vdd_pnl1), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_vdd_sdmmc1), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd), \
+ ADD_GPIO_REG(en_1v8_cam),
+
+#define COMMON_GPIO_REG_E1291_A04 \
+ ADD_GPIO_REG(en_5v_cp), \
+ ADD_GPIO_REG(en_5v0_a04), \
+ ADD_GPIO_REG(en_ddr_a04), \
+ ADD_GPIO_REG(en_3v3_sys_a04), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(en_vdd_pnl1), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_vdd_sdmmc1), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd), \
+ ADD_GPIO_REG(en_1v8_cam),
+
+#define PM269_GPIO_REG \
+ ADD_GPIO_REG(en_5v_cp), \
+ ADD_GPIO_REG(en_5v0), \
+ ADD_GPIO_REG(en_ddr), \
+ ADD_GPIO_REG(en_3v3_sys), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(cam1_ldo_en), \
+ ADD_GPIO_REG(cam2_ldo_en), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse_pm269), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd_pm269), \
+ ADD_GPIO_REG(en_1v8_cam), \
+ ADD_GPIO_REG(dis_5v_switch_e118x), \
+ ADD_GPIO_REG(en_usb1_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_usb3_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_vddio_vid_oc_pm269),
+
+#define PM311_GPIO_REG \
+ ADD_GPIO_REG(en_5v_cp), \
+ ADD_GPIO_REG(en_5v0), \
+ ADD_GPIO_REG(en_ddr), \
+ ADD_GPIO_REG(en_3v3_sys), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(cam1_ldo_en), \
+ ADD_GPIO_REG(cam2_ldo_en), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse_pm269), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd_pm269), \
+ ADD_GPIO_REG(en_1v8_cam), \
+ ADD_GPIO_REG(dis_5v_switch_e118x), \
+ ADD_GPIO_REG(en_usb1_vbus_oc_pm311), \
+ ADD_GPIO_REG(en_usb3_vbus_oc_pm311), \
+ ADD_GPIO_REG(en_vddio_vid_oc_pm269),
+
+#define E1247_DISPLAY_GPIO_REG \
+ ADD_GPIO_REG(en_vdd_bl_pm269), \
+ ADD_GPIO_REG(en_vdd_pnl1_pm269),
+
+#define PM313_DISPLAY_GPIO_REG \
+ ADD_GPIO_REG(en_vdd_bl_pm313), \
+ ADD_GPIO_REG(en_vdd_pnl1_pm313),
+
+#define E118x_GPIO_REG \
+ ADD_GPIO_REG(en_5v_cp), \
+ ADD_GPIO_REG(en_5v0), \
+ ADD_GPIO_REG(en_ddr), \
+ ADD_GPIO_REG(en_3v3_sys), \
+ ADD_GPIO_REG(en_3v3_modem), \
+ ADD_GPIO_REG(cam3_ldo_en), \
+ ADD_GPIO_REG(en_vdd_com), \
+ ADD_GPIO_REG(en_3v3_fuse), \
+ ADD_GPIO_REG(en_3v3_emmc), \
+ ADD_GPIO_REG(en_vdd_sdmmc1), \
+ ADD_GPIO_REG(en_3v3_pex_hvdd), \
+ ADD_GPIO_REG(en_1v8_cam), \
+ ADD_GPIO_REG(dis_5v_switch_e118x), \
+ ADD_GPIO_REG(en_usb1_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_usb3_vbus_oc_e118x), \
+ ADD_GPIO_REG(en_vddio_vid_oc_e118x), \
+ ADD_GPIO_REG(en_vbrtr),
+
+#define E1198_GPIO_REG \
+ ADD_GPIO_REG(en_vddio_vid_oc), \
+ ADD_GPIO_REG(cam1_ldo_en), \
+ ADD_GPIO_REG(cam2_ldo_en),
+
+#define E1291_1198_A00_GPIO_REG \
+ ADD_GPIO_REG(en_usb1_vbus_oc), \
+ ADD_GPIO_REG(en_usb3_vbus_oc), \
+ ADD_GPIO_REG(en_vdd_bl),
+
+#define E1291_A03_GPIO_REG \
+ ADD_GPIO_REG(en_usb1_vbus_oc_a03), \
+ ADD_GPIO_REG(en_usb3_vbus_oc_a03), \
+ ADD_GPIO_REG(en_vdd_bl1_a03), \
+ ADD_GPIO_REG(en_vdd_bl2_a03),
+
+/* Gpio switch regulator platform data for E1186/E1187/E1256*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_e118x[] = {
+ E118x_GPIO_REG
+ E1247_DISPLAY_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for E1186/E1187/E1256*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_e118x_pm313[] = {
+ E118x_GPIO_REG
+ PM313_DISPLAY_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for E1198 and E1291*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_e1198_base[] = {
+ COMMON_GPIO_REG
+ E1291_1198_A00_GPIO_REG
+ E1198_GPIO_REG
+};
+
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_e1198_a02[] = {
+ ADD_GPIO_REG(en_5v_cp),
+ ADD_GPIO_REG(en_5v0),
+ ADD_GPIO_REG(en_ddr_a04),
+ ADD_GPIO_REG(en_3v3_sys_a04),
+ ADD_GPIO_REG(en_3v3_modem),
+ ADD_GPIO_REG(en_vdd_pnl1),
+ ADD_GPIO_REG(cam3_ldo_en),
+ ADD_GPIO_REG(en_vdd_com),
+ ADD_GPIO_REG(en_3v3_fuse),
+ ADD_GPIO_REG(en_3v3_emmc),
+ ADD_GPIO_REG(en_vdd_sdmmc1),
+ ADD_GPIO_REG(en_3v3_pex_hvdd),
+ ADD_GPIO_REG(en_1v8_cam),
+ ADD_GPIO_REG(en_usb1_vbus_oc_a03),
+ ADD_GPIO_REG(en_usb3_vbus_oc_a03),
+ ADD_GPIO_REG(en_vdd_bl1_a03),
+ ADD_GPIO_REG(en_vdd_bl2_a03),
+ ADD_GPIO_REG(en_vddio_vid_oc),
+ ADD_GPIO_REG(cam1_ldo_en),
+ ADD_GPIO_REG(cam2_ldo_en),
+};
+
+/* Gpio switch regulator platform data for PM269*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_pm269[] = {
+ PM269_GPIO_REG
+ E1247_DISPLAY_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for PM269*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_pm269_pm313[] = {
+ PM269_GPIO_REG
+ PM313_DISPLAY_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for PM311*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_pm311[] = {
+ PM311_GPIO_REG
+ E1247_DISPLAY_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for PM11*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_pm311_pm313[] = {
+ PM311_GPIO_REG
+ PM313_DISPLAY_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for E1291 A03*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_e1291_a03[] = {
+ COMMON_GPIO_REG
+ E1291_A03_GPIO_REG
+ E1198_GPIO_REG
+};
+
+/* Gpio switch regulator platform data for E1291 A04/A05*/
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs_e1291_a04[] = {
+ COMMON_GPIO_REG_E1291_A04
+ E1291_A03_GPIO_REG
+ E1198_GPIO_REG
+};
+
+
+static struct gpio_switch_regulator_platform_data gswitch_pdata;
+static struct platform_device gswitch_regulator_pdata = {
+ .name = "gpio-switch-regulator",
+ .id = -1,
+ .dev = {
+ .platform_data = &gswitch_pdata,
+ },
+};
+
+int __init cardhu_gpio_switch_regulator_init(void)
+{
+ int i;
+ struct board_info board_info;
+ struct board_info pmu_board_info;
+ struct board_info display_board_info;
+
+ tegra_get_board_info(&board_info);
+ tegra_get_pmu_board_info(&pmu_board_info);
+ tegra_get_display_board_info(&display_board_info);
+
+ if (pmu_board_info.board_id == BOARD_PMU_PM298)
+ return cardhu_pm298_gpio_switch_regulator_init();
+
+ if (pmu_board_info.board_id == BOARD_PMU_PM299)
+ return cardhu_pm299_gpio_switch_regulator_init();
+
+ switch (board_info.board_id) {
+ case BOARD_E1198:
+ if (board_info.fab <= BOARD_FAB_A01) {
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_e1198_base);
+ gswitch_pdata.subdevs = gswitch_subdevs_e1198_base;
+ } else {
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_e1198_a02);
+ gswitch_pdata.subdevs = gswitch_subdevs_e1198_a02;
+ }
+ break;
+
+ case BOARD_E1291:
+ if (board_info.fab == BOARD_FAB_A03) {
+ gswitch_pdata.num_subdevs =
+ ARRAY_SIZE(gswitch_subdevs_e1291_a03);
+ gswitch_pdata.subdevs = gswitch_subdevs_e1291_a03;
+ } else if ((board_info.fab == BOARD_FAB_A04) ||
+ (board_info.fab == BOARD_FAB_A05)) {
+ gswitch_pdata.num_subdevs =
+ ARRAY_SIZE(gswitch_subdevs_e1291_a04);
+ gswitch_pdata.subdevs = gswitch_subdevs_e1291_a04;
+ } else {
+ gswitch_pdata.num_subdevs =
+ ARRAY_SIZE(gswitch_subdevs_e1198_base);
+ gswitch_pdata.subdevs = gswitch_subdevs_e1198_base;
+ }
+ break;
+
+ case BOARD_PM311:
+ case BOARD_PM305:
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_pm311);
+ gswitch_pdata.subdevs = gswitch_subdevs_pm311;
+ if (display_board_info.board_id == BOARD_DISPLAY_PM313) {
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_pm311_pm313);
+ gswitch_pdata.subdevs = gswitch_subdevs_pm311_pm313;
+ }
+ break;
+
+ case BOARD_PM269:
+ case BOARD_E1257:
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_pm269);
+ gswitch_pdata.subdevs = gswitch_subdevs_pm269;
+ if (display_board_info.board_id == BOARD_DISPLAY_PM313) {
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_pm269_pm313);
+ gswitch_pdata.subdevs = gswitch_subdevs_pm269_pm313;
+ } else {
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_pm269);
+ gswitch_pdata.subdevs = gswitch_subdevs_pm269;
+ }
+ break;
+ default:
+ if (display_board_info.board_id == BOARD_DISPLAY_PM313) {
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_e118x_pm313);
+ gswitch_pdata.subdevs = gswitch_subdevs_e118x_pm313;
+ } else {
+ gswitch_pdata.num_subdevs = ARRAY_SIZE(gswitch_subdevs_e118x);
+ gswitch_pdata.subdevs = gswitch_subdevs_e118x;
+ }
+ break;
+ }
+
+ for (i = 0; i < gswitch_pdata.num_subdevs; ++i) {
+ struct gpio_switch_regulator_subdev_data *gswitch_data = gswitch_pdata.subdevs[i];
+ if (gswitch_data->gpio_nr < TEGRA_NR_GPIOS)
+ tegra_gpio_enable(gswitch_data->gpio_nr);
+ }
+
+ return platform_device_register(&gswitch_regulator_pdata);
+}
+
+static void cardhu_board_suspend(int lp_state, enum suspend_stage stg)
+{
+ if ((lp_state == TEGRA_SUSPEND_LP1) && (stg == TEGRA_SUSPEND_BEFORE_CPU))
+ tegra_console_uart_suspend();
+}
+
+static void cardhu_board_resume(int lp_state, enum resume_stage stg)
+{
+ if ((lp_state == TEGRA_SUSPEND_LP1) && (stg == TEGRA_RESUME_AFTER_CPU))
+ tegra_console_uart_resume();
+}
+
+static struct tegra_suspend_platform_data cardhu_suspend_data = {
+ .cpu_timer = 2000,
+ .cpu_off_timer = 200,
+ .suspend_mode = TEGRA_SUSPEND_LP0,
+ .core_timer = 0x7e7e,
+ .core_off_timer = 0,
+ .corereq_high = true,
+ .sysclkreq_high = true,
+ .cpu_lp2_min_residency = 2000,
+ .board_suspend = cardhu_board_suspend,
+ .board_resume = cardhu_board_resume,
+};
+
+int __init cardhu_suspend_init(void)
+{
+ struct board_info board_info;
+ struct board_info pmu_board_info;
+
+ tegra_get_board_info(&board_info);
+ tegra_get_pmu_board_info(&pmu_board_info);
+
+ /* For PMU Fab A03, A04 and A05 make core_pwr_req to high */
+ if ((pmu_board_info.fab == BOARD_FAB_A03) ||
+ (pmu_board_info.fab == BOARD_FAB_A04) ||
+ (pmu_board_info.fab == BOARD_FAB_A05))
+ cardhu_suspend_data.corereq_high = true;
+
+ /* CORE_PWR_REQ to be high for all processor/pmu board whose sku bit 0
+ * is set. This is require to enable the dc-dc converter tps62361x */
+ if ((board_info.sku & SKU_DCDC_TPS62361_SUPPORT) || (pmu_board_info.sku & SKU_DCDC_TPS62361_SUPPORT))
+ cardhu_suspend_data.corereq_high = true;
+
+ switch (board_info.board_id) {
+ case BOARD_E1291:
+ /* CORE_PWR_REQ to be high for E1291-A03 */
+ if (board_info.fab == BOARD_FAB_A03)
+ cardhu_suspend_data.corereq_high = true;
+ break;
+ case BOARD_E1198:
+ case BOARD_PM269:
+ case BOARD_PM305:
+ case BOARD_PM311:
+ break;
+ case BOARD_E1187:
+ case BOARD_E1186:
+ case BOARD_E1256:
+ case BOARD_E1257:
+ cardhu_suspend_data.cpu_timer = 5000;
+ cardhu_suspend_data.cpu_off_timer = 5000;
+ break;
+ default:
+ break;
+ }
+
+ tegra_init_suspend(&cardhu_suspend_data);
+ return 0;
+}
+
+static void cardhu_power_off(void)
+{
+ int ret;
+ pr_err("cardhu: Powering off the device\n");
+ ret = tps6591x_power_off();
+ if (ret)
+ pr_err("cardhu: failed to power off\n");
+
+ while (1);
+}
+
+static void cardhu_pm298_power_off(void)
+{
+ int ret;
+ pr_err("cardhu-pm298: Powering off the device\n");
+ ret = max77663_power_off();
+ if (ret)
+ pr_err("cardhu-pm298: failed to power off\n");
+
+ while (1);
+}
+
+int __init cardhu_power_off_init(void)
+{
+ struct board_info pmu_board_info;
+
+ tegra_get_pmu_board_info(&pmu_board_info);
+
+ if (pmu_board_info.board_id == BOARD_PMU_PM298)
+ pm_power_off = cardhu_pm298_power_off;
+ else
+ pm_power_off = cardhu_power_off;
+
+ return 0;
+}
+
+static struct tegra_tsensor_pmu_data tpdata = {
+ .poweroff_reg_addr = 0x3F,
+ .poweroff_reg_data = 0x80,
+ .reset_tegra = 1,
+ .controller_type = 0,
+ .i2c_controller_id = 4,
+ .pinmux = 0,
+ .pmu_16bit_ops = 0,
+ .pmu_i2c_addr = 0x2D,
+};
+
+void __init cardhu_tsensor_init(void)
+{
+ tegra3_tsensor_init(&tpdata);
+}
+
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+
+int __init cardhu_edp_init(void)
+{
+ unsigned int regulator_mA;
+
+ regulator_mA = get_maximum_cpu_current_supported();
+ if (!regulator_mA) {
+ regulator_mA = 6000; /* regular T30/s */
+ }
+ pr_info("%s: CPU regulator %d mA\n", __func__, regulator_mA);
+
+ tegra_init_cpu_edp_limits(regulator_mA);
+ return 0;
+}
+#endif
+
+static char *cardhu_battery[] = {
+ "bq27510-0",
+};
+
+static struct gpio_charger_platform_data cardhu_charger_pdata = {
+ .name = "ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .gpio = AC_PRESENT_GPIO,
+ .gpio_active_low = 0,
+ .supplied_to = cardhu_battery,
+ .num_supplicants = ARRAY_SIZE(cardhu_battery),
+};
+
+static struct platform_device cardhu_charger_device = {
+ .name = "gpio-charger",
+ .dev = {
+ .platform_data = &cardhu_charger_pdata,
+ },
+};
+
+static int __init cardhu_charger_late_init(void)
+{
+ if (!machine_is_cardhu())
+ return 0;
+
+ platform_device_register(&cardhu_charger_device);
+ return 0;
+}
+
+late_initcall(cardhu_charger_late_init);
+
diff --git a/arch/arm/mach-tegra/board-cardhu-powermon.c b/arch/arm/mach-tegra/board-cardhu-powermon.c
new file mode 100644
index 000000000000..a637b68286a5
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-powermon.c
@@ -0,0 +1,256 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu-powermon.c
+ *
+ * Copyright (c) 2011, NVIDIA, All Rights Reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/ina219.h>
+
+#include "board.h"
+#include "board-cardhu.h"
+
+enum {
+ VDD_AC_BAT,
+ VDD_DRAM_IN,
+ VDD_BACKLIGHT_IN,
+ VDD_CPU_IN,
+ VDD_CORE_IN,
+ VDD_DISPLAY_IN,
+ VDD_3V3_TEGRA,
+ VDD_OTHER_PMU_IN,
+ VDD_1V8_TEGRA,
+ VDD_1V8_OTHER,
+ UNUSED_RAIL,
+};
+
+static struct ina219_platform_data power_mon_info[] = {
+ [VDD_AC_BAT] = {
+ .calibration_data = 0xa000,
+ .power_lsb = 2,
+ .rail_name = "VDD_AC_BAT",
+ .divisor = 20,
+ },
+
+ [VDD_DRAM_IN] = {
+ .calibration_data = 0xa000,
+ .power_lsb = 2,
+ .rail_name = "VDD_DRAM_IN",
+ .divisor = 20,
+ },
+
+ [VDD_BACKLIGHT_IN] = {
+ .calibration_data = 0x6aaa,
+ .power_lsb = 1,
+ .rail_name = "VDD_BACKLIGHT_IN",
+ .divisor = 20,
+ },
+
+ [VDD_CPU_IN] = {
+ .calibration_data = 0xa000,
+ .power_lsb = 1,
+ .rail_name = "VDD_CPU_IN",
+ .divisor = 20,
+ },
+
+ [VDD_CORE_IN] = {
+ .calibration_data = 0x6aaa,
+ .power_lsb = 1,
+ .rail_name = "VDD_CORE_IN",
+ .divisor = 20,
+ },
+
+ [VDD_DISPLAY_IN] = {
+ .calibration_data = 0x4000,
+ .power_lsb = 1,
+ .rail_name = "VDD_DISPLAY_IN",
+ .divisor = 20,
+ },
+
+ [VDD_3V3_TEGRA] = {
+ .calibration_data = 0x6aaa,
+ .power_lsb = 1,
+ .rail_name = "VDD_3V3_TEGRA",
+ .divisor = 20,
+ },
+
+ [VDD_OTHER_PMU_IN] = {
+ .calibration_data = 0xa000,
+ .power_lsb = 1,
+ .rail_name = "VDD_OTHER_PMU_IN",
+ .divisor = 20,
+ },
+
+ [VDD_1V8_TEGRA] = {
+ .calibration_data = 0x4000,
+ .power_lsb = 1,
+ .rail_name = "VDD_1V8_TEGRA",
+ .divisor = 20,
+ },
+
+ [VDD_1V8_OTHER] = {
+ .calibration_data = 0xa000,
+ .power_lsb = 1,
+ .rail_name = "VDD_1V8_OTHER",
+ .divisor = 20,
+ },
+
+ /* All unused INA219 devices use below data*/
+ [UNUSED_RAIL] = {
+ .calibration_data = 0x4000,
+ .power_lsb = 1,
+ .rail_name = "unused_rail",
+ .divisor = 20,
+ },
+};
+
+enum {
+ INA_I2C_ADDR_40,
+ INA_I2C_ADDR_41,
+ INA_I2C_ADDR_42,
+ INA_I2C_ADDR_43,
+ INA_I2C_ADDR_44,
+ INA_I2C_ADDR_45,
+ INA_I2C_ADDR_46,
+ INA_I2C_ADDR_47,
+ INA_I2C_ADDR_48,
+ INA_I2C_ADDR_49,
+ INA_I2C_ADDR_4A,
+ INA_I2C_ADDR_4B,
+ INA_I2C_ADDR_4C,
+ INA_I2C_ADDR_4D,
+ INA_I2C_ADDR_4E,
+ INA_I2C_ADDR_4F,
+};
+
+static struct i2c_board_info cardhu_i2c0_ina219_board_info[] = {
+ [INA_I2C_ADDR_40] = {
+ I2C_BOARD_INFO("ina219", 0x40),
+ .platform_data = &power_mon_info[VDD_AC_BAT],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_41] = {
+ I2C_BOARD_INFO("ina219", 0x41),
+ .platform_data = &power_mon_info[VDD_DRAM_IN],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_42] = {
+ I2C_BOARD_INFO("ina219", 0x42),
+ .platform_data = &power_mon_info[VDD_BACKLIGHT_IN],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_43] = {
+ I2C_BOARD_INFO("ina219", 0x43),
+ .platform_data = &power_mon_info[VDD_CPU_IN],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_44] = {
+ I2C_BOARD_INFO("ina219", 0x44),
+ .platform_data = &power_mon_info[VDD_CORE_IN],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_45] = {
+ I2C_BOARD_INFO("ina219", 0x45),
+ .platform_data = &power_mon_info[VDD_DISPLAY_IN],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_46] = {
+ I2C_BOARD_INFO("ina219", 0x46),
+ .platform_data = &power_mon_info[VDD_3V3_TEGRA],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_47] = {
+ I2C_BOARD_INFO("ina219", 0x47),
+ .platform_data = &power_mon_info[VDD_OTHER_PMU_IN],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_48] = {
+ I2C_BOARD_INFO("ina219", 0x48),
+ .platform_data = &power_mon_info[VDD_1V8_TEGRA],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_49] = {
+ I2C_BOARD_INFO("ina219", 0x49),
+ .platform_data = &power_mon_info[VDD_1V8_OTHER],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_4A] = {
+ I2C_BOARD_INFO("ina219", 0x4A),
+ .platform_data = &power_mon_info[UNUSED_RAIL],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_4B] = {
+ I2C_BOARD_INFO("ina219", 0x4B),
+ .platform_data = &power_mon_info[UNUSED_RAIL],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_4C] = {
+ I2C_BOARD_INFO("ina219", 0x4C),
+ .platform_data = &power_mon_info[UNUSED_RAIL],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_4D] = {
+ I2C_BOARD_INFO("ina219", 0x4D),
+ .platform_data = &power_mon_info[UNUSED_RAIL],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_4E] = {
+ I2C_BOARD_INFO("ina219", 0x4E),
+ .platform_data = &power_mon_info[UNUSED_RAIL],
+ .irq = -1,
+ },
+
+ [INA_I2C_ADDR_4F] = {
+ I2C_BOARD_INFO("ina219", 0x4F),
+ .platform_data = &power_mon_info[UNUSED_RAIL],
+ .irq = -1,
+ },
+};
+
+int __init cardhu_pmon_init(void)
+{
+ struct board_info bi;
+
+ tegra_get_board_info(&bi);
+
+ /* for fab A04 VDD_CORE_IN changed from ina with addr 0x44 to 0x4A */
+ if (bi.fab == BOARD_FAB_A04) {
+ cardhu_i2c0_ina219_board_info[INA_I2C_ADDR_44].platform_data =
+ &power_mon_info[UNUSED_RAIL];
+ cardhu_i2c0_ina219_board_info[INA_I2C_ADDR_4A].platform_data =
+ &power_mon_info[VDD_CORE_IN];
+ }
+
+ i2c_register_board_info(0, cardhu_i2c0_ina219_board_info,
+ ARRAY_SIZE(cardhu_i2c0_ina219_board_info));
+ return 0;
+}
+
diff --git a/arch/arm/mach-tegra/board-cardhu-sdhci.c b/arch/arm/mach-tegra/board-cardhu-sdhci.c
new file mode 100644
index 000000000000..fddf01f3e39d
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-sdhci.c
@@ -0,0 +1,300 @@
+/*
+ * arch/arm/mach-tegra/board-harmony-sdhci.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/wlan_plat.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/sdhci.h>
+
+#include "gpio-names.h"
+#include "board.h"
+#include "board-cardhu.h"
+
+#define CARDHU_WLAN_PWR TEGRA_GPIO_PD4
+#define CARDHU_WLAN_RST TEGRA_GPIO_PD3
+#define CARDHU_WLAN_WOW TEGRA_GPIO_PO4
+#define CARDHU_SD_CD TEGRA_GPIO_PI5
+#define CARDHU_SD_WP TEGRA_GPIO_PT3
+#define PM269_SD_WP -1
+
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+static int cardhu_wifi_status_register(void (*callback)(int , void *), void *);
+
+static int cardhu_wifi_reset(int on);
+static int cardhu_wifi_power(int on);
+static int cardhu_wifi_set_carddetect(int val);
+
+static struct wifi_platform_data cardhu_wifi_control = {
+ .set_power = cardhu_wifi_power,
+ .set_reset = cardhu_wifi_reset,
+ .set_carddetect = cardhu_wifi_set_carddetect,
+};
+
+static struct resource wifi_resource[] = {
+ [0] = {
+ .name = "bcm4329_wlan_irq",
+ .start = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PO4),
+ .end = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PO4),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct platform_device cardhu_wifi_device = {
+ .name = "bcm4329_wlan",
+ .id = 1,
+ .num_resources = 1,
+ .resource = wifi_resource,
+ .dev = {
+ .platform_data = &cardhu_wifi_control,
+ },
+};
+
+static struct resource sdhci_resource0[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct embedded_sdio_data embedded_sdio_data2 = {
+ .cccr = {
+ .sdio_vsn = 2,
+ .multi_block = 1,
+ .low_speed = 0,
+ .wide_bus = 0,
+ .high_power = 1,
+ .high_speed = 1,
+ },
+ .cis = {
+ .vendor = 0x02d0,
+ .device = 0x4329,
+ },
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = {
+ .mmc_data = {
+ .register_status_notify = cardhu_wifi_status_register,
+ .embedded_sdio = &embedded_sdio_data2,
+ .built_in = 1,
+ },
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+/* .tap_delay = 6,
+ .is_voltage_switch_supported = false,
+ .vdd_rail_name = NULL,
+ .slot_rail_name = NULL,
+ .vdd_max_uv = -1,
+ .vdd_min_uv = -1,
+ .max_clk = 0,
+ .is_8bit_supported = false, */
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data0 = {
+ .cd_gpio = CARDHU_SD_CD,
+ .wp_gpio = CARDHU_SD_WP,
+ .power_gpio = -1,
+/* .tap_delay = 6,
+ .is_voltage_switch_supported = true,
+ .vdd_rail_name = "vddio_sdmmc1",
+ .slot_rail_name = "vddio_sd_slot",
+ .vdd_max_uv = 3320000,
+ .vdd_min_uv = 3280000,
+ .max_clk = 208000000,
+ .is_8bit_supported = false, */
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data3 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .is_8bit = 1,
+ .tap_delay = 0x0F,
+ .mmc_data = {
+ .built_in = 1,
+ }
+/* .tap_delay = 6,
+ .is_voltage_switch_supported = false,
+ .vdd_rail_name = NULL,
+ .slot_rail_name = NULL,
+ .vdd_max_uv = -1,
+ .vdd_min_uv = -1,
+ .max_clk = 48000000,
+ .is_8bit_supported = true, */
+};
+
+static struct platform_device tegra_sdhci_device0 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource0,
+ .num_resources = ARRAY_SIZE(sdhci_resource0),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data0,
+ },
+};
+
+static struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data2,
+ },
+};
+
+static struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data3,
+ },
+};
+
+static int cardhu_wifi_status_register(
+ void (*callback)(int card_present, void *dev_id),
+ void *dev_id)
+{
+ if (wifi_status_cb)
+ return -EAGAIN;
+ wifi_status_cb = callback;
+ wifi_status_cb_devid = dev_id;
+ return 0;
+}
+
+static int cardhu_wifi_set_carddetect(int val)
+{
+ pr_debug("%s: %d\n", __func__, val);
+ if (wifi_status_cb)
+ wifi_status_cb(val, wifi_status_cb_devid);
+ else
+ pr_warning("%s: Nobody to notify\n", __func__);
+ return 0;
+}
+
+static int cardhu_wifi_power(int on)
+{
+ pr_debug("%s: %d\n", __func__, on);
+ gpio_set_value(CARDHU_WLAN_PWR, on);
+ mdelay(100);
+ gpio_set_value(CARDHU_WLAN_RST, on);
+ mdelay(200);
+
+ return 0;
+}
+
+static int cardhu_wifi_reset(int on)
+{
+ pr_debug("%s: do nothing\n", __func__);
+ return 0;
+}
+
+static int __init cardhu_wifi_init(void)
+{
+ int rc;
+
+ rc = gpio_request(CARDHU_WLAN_PWR, "wlan_power");
+ if (rc)
+ pr_err("WLAN_PWR gpio request failed:%d\n", rc);
+ rc = gpio_request(CARDHU_WLAN_RST, "wlan_rst");
+ if (rc)
+ pr_err("WLAN_RST gpio request failed:%d\n", rc);
+ rc = gpio_request(CARDHU_WLAN_WOW, "bcmsdh_sdmmc");
+ if (rc)
+ pr_err("WLAN_WOW gpio request failed:%d\n", rc);
+
+ tegra_gpio_enable(CARDHU_WLAN_PWR);
+ tegra_gpio_enable(CARDHU_WLAN_RST);
+ tegra_gpio_enable(CARDHU_WLAN_WOW);
+
+ rc = gpio_direction_output(CARDHU_WLAN_PWR, 0);
+ if (rc)
+ pr_err("WLAN_PWR gpio direction configuration failed:%d\n", rc);
+ gpio_direction_output(CARDHU_WLAN_RST, 0);
+ if (rc)
+ pr_err("WLAN_RST gpio direction configuration failed:%d\n", rc);
+ rc = gpio_direction_input(CARDHU_WLAN_WOW);
+ if (rc)
+ pr_err("WLAN_WOW gpio direction configuration failed:%d\n", rc);
+
+ platform_device_register(&cardhu_wifi_device);
+ return 0;
+}
+
+int __init cardhu_sdhci_init(void)
+{
+ struct board_info board_info;
+ tegra_get_board_info(&board_info);
+ if ((board_info.board_id == BOARD_PM269) ||
+ (board_info.board_id == BOARD_E1257) ||
+ (board_info.board_id == BOARD_PM305) ||
+ (board_info.board_id == BOARD_PM311)) {
+ tegra_sdhci_platform_data0.wp_gpio = PM269_SD_WP;
+ tegra_sdhci_platform_data2.max_clk_limit = 12000000;
+ }
+
+ platform_device_register(&tegra_sdhci_device3);
+ platform_device_register(&tegra_sdhci_device2);
+ platform_device_register(&tegra_sdhci_device0);
+
+ cardhu_wifi_init();
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-cardhu-sensors.c b/arch/arm/mach-tegra/board-cardhu-sensors.c
new file mode 100644
index 000000000000..e464ffe32aa1
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu-sensors.c
@@ -0,0 +1,938 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu-sensors.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c/pca954x.h>
+#include <linux/i2c/pca953x.h>
+#include <linux/nct1008.h>
+#include <mach/fb.h>
+#include <mach/gpio.h>
+#include <media/ov5650.h>
+#include <media/ov14810.h>
+#include <media/ov2710.h>
+#include <media/tps61050.h>
+#include <generated/mach-types.h>
+#include "gpio-names.h"
+#include "board.h"
+#include <linux/mpu.h>
+#include <media/sh532u.h>
+#include <linux/bq27x00.h>
+#include <mach/gpio.h>
+#include <mach/edp.h>
+#include <mach/thermal.h>
+
+#include "gpio-names.h"
+#include "board-cardhu.h"
+#include "cpu-tegra.h"
+
+static struct regulator *cardhu_1v8_cam1 = NULL;
+static struct regulator *cardhu_1v8_cam2 = NULL;
+static struct regulator *cardhu_1v8_cam3 = NULL;
+static struct regulator *cardhu_vdd_2v8_cam1 = NULL;
+static struct regulator *cardhu_vdd_2v8_cam2 = NULL;
+static struct regulator *cardhu_vdd_cam3 = NULL;
+
+static struct board_info board_info;
+
+static struct pca954x_platform_mode cardhu_pca954x_modes[] = {
+ { .adap_id = PCA954x_I2C_BUS0, .deselect_on_exit = true, },
+ { .adap_id = PCA954x_I2C_BUS1, .deselect_on_exit = true, },
+ { .adap_id = PCA954x_I2C_BUS2, .deselect_on_exit = true, },
+ { .adap_id = PCA954x_I2C_BUS3, .deselect_on_exit = true, },
+};
+
+static struct pca954x_platform_data cardhu_pca954x_data = {
+ .modes = cardhu_pca954x_modes,
+ .num_modes = ARRAY_SIZE(cardhu_pca954x_modes),
+};
+
+static int cardhu_camera_init(void)
+{
+ int ret;
+
+ /* Boards E1198 and E1291 are of Cardhu personality
+ * and donot have TCA6416 exp for camera */
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+ tegra_gpio_enable(CAM1_POWER_DWN_GPIO);
+ ret = gpio_request(CAM1_POWER_DWN_GPIO, "camera_power_en");
+ if (ret < 0)
+ pr_err("%s: gpio_request failed for gpio %s\n",
+ __func__, "CAM1_POWER_DWN_GPIO");
+ tegra_gpio_enable(CAM3_POWER_DWN_GPIO);
+ ret = gpio_request(CAM3_POWER_DWN_GPIO, "cam3_power_en");
+ if (ret < 0)
+ pr_err("%s: gpio_request failed for gpio %s\n",
+ __func__, "CAM3_POWER_DWN_GPIO");
+
+ tegra_gpio_enable(CAM2_POWER_DWN_GPIO);
+ ret = gpio_request(CAM2_POWER_DWN_GPIO, "camera2_power_en");
+ if (ret < 0)
+ pr_err("%s: gpio_request failed for gpio %s\n",
+ __func__, "CAM2_POWER_DWN_GPIO");
+
+ tegra_gpio_enable(OV5650_RESETN_GPIO);
+ ret = gpio_request(OV5650_RESETN_GPIO, "camera_reset");
+ if (ret < 0)
+ pr_err("%s: gpio_request failed for gpio %s\n",
+ __func__, "OV5650_RESETN_GPIO");
+
+ gpio_direction_output(CAM3_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 1);
+ mdelay(10);
+
+ gpio_direction_output(OV5650_RESETN_GPIO, 1);
+ mdelay(5);
+ gpio_direction_output(OV5650_RESETN_GPIO, 0);
+ mdelay(5);
+ gpio_direction_output(OV5650_RESETN_GPIO, 1);
+ mdelay(5);
+ }
+
+ /* To select the CSIB MUX either for cam2 or cam3 */
+ tegra_gpio_enable(CAMERA_CSI_MUX_SEL_GPIO);
+ ret = gpio_request(CAMERA_CSI_MUX_SEL_GPIO, "camera_csi_sel");
+ if (ret < 0)
+ pr_err("%s: gpio_request failed for gpio %s\n",
+ __func__, "CAMERA_CSI_MUX_SEL_GPIO");
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 0);
+ gpio_export(CAMERA_CSI_MUX_SEL_GPIO, false);
+
+ return 0;
+}
+
+static int cardhu_left_ov5650_power_on(void)
+{
+ /* Boards E1198 and E1291 are of Cardhu personality
+ * and donot have TCA6416 exp for camera */
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+
+ if (cardhu_vdd_2v8_cam1 == NULL) {
+ cardhu_vdd_2v8_cam1 = regulator_get(NULL, "vdd_2v8_cam1");
+ if (WARN_ON(IS_ERR(cardhu_vdd_2v8_cam1))) {
+ pr_err("%s: couldn't get regulator vdd_2v8_cam1: %ld\n",
+ __func__, PTR_ERR(cardhu_vdd_2v8_cam1));
+ goto reg_alloc_fail;
+ }
+ }
+ regulator_enable(cardhu_vdd_2v8_cam1);
+ mdelay(5);
+ }
+
+ /* Enable VDD_1V8_Cam1 */
+ if (cardhu_1v8_cam1 == NULL) {
+ cardhu_1v8_cam1 = regulator_get(NULL, "vdd_1v8_cam1");
+ if (WARN_ON(IS_ERR(cardhu_1v8_cam1))) {
+ pr_err("%s: couldn't get regulator vdd_1v8_cam1: %ld\n",
+ __func__, PTR_ERR(cardhu_1v8_cam1));
+ goto reg_alloc_fail;
+ }
+ }
+ regulator_enable(cardhu_1v8_cam1);
+
+ mdelay(5);
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 0);
+ mdelay(20);
+ gpio_direction_output(OV5650_RESETN_GPIO, 0);
+ mdelay(100);
+ gpio_direction_output(OV5650_RESETN_GPIO, 1);
+ }
+
+ if (board_info.board_id == BOARD_PM269) {
+ gpio_direction_output(CAM1_RST_L_GPIO, 0);
+ mdelay(100);
+ gpio_direction_output(CAM1_RST_L_GPIO, 1);
+ }
+
+ return 0;
+
+reg_alloc_fail:
+ if (cardhu_1v8_cam1) {
+ regulator_put(cardhu_1v8_cam1);
+ cardhu_1v8_cam1 = NULL;
+ }
+ if (cardhu_vdd_2v8_cam1) {
+ regulator_put(cardhu_vdd_2v8_cam1);
+ cardhu_vdd_2v8_cam1 = NULL;
+ }
+
+ return -ENODEV;
+
+}
+
+static int cardhu_left_ov5650_power_off(void)
+{
+ /* Boards E1198 and E1291 are of Cardhu personality
+ * and donot have TCA6416 exp for camera */
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM3_POWER_DWN_GPIO, 1);
+ }
+ if (cardhu_1v8_cam1)
+ regulator_disable(cardhu_1v8_cam1);
+ if (cardhu_vdd_2v8_cam1)
+ regulator_disable(cardhu_vdd_2v8_cam1);
+
+ return 0;
+}
+
+struct ov5650_platform_data cardhu_left_ov5650_data = {
+ .power_on = cardhu_left_ov5650_power_on,
+ .power_off = cardhu_left_ov5650_power_off,
+};
+
+#ifdef CONFIG_VIDEO_OV14810
+static int cardhu_ov14810_power_on(void)
+{
+ if (board_info.board_id == BOARD_E1198) {
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 1);
+ mdelay(20);
+ gpio_direction_output(OV14810_RESETN_GPIO, 0);
+ mdelay(100);
+ gpio_direction_output(OV14810_RESETN_GPIO, 1);
+ }
+
+ return 0;
+}
+
+static int cardhu_ov14810_power_off(void)
+{
+ if (board_info.board_id == BOARD_E1198) {
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM3_POWER_DWN_GPIO, 1);
+ }
+
+ return 0;
+}
+
+struct ov14810_platform_data cardhu_ov14810_data = {
+ .power_on = cardhu_ov14810_power_on,
+ .power_off = cardhu_ov14810_power_off,
+};
+
+struct ov14810_platform_data cardhu_ov14810uC_data = {
+ .power_on = NULL,
+ .power_off = NULL,
+};
+
+struct ov14810_platform_data cardhu_ov14810SlaveDev_data = {
+ .power_on = NULL,
+ .power_off = NULL,
+};
+
+static struct i2c_board_info cardhu_i2c_board_info_e1214[] = {
+ {
+ I2C_BOARD_INFO("ov14810", 0x36),
+ .platform_data = &cardhu_ov14810_data,
+ },
+ {
+ I2C_BOARD_INFO("ov14810uC", 0x67),
+ .platform_data = &cardhu_ov14810uC_data,
+ },
+ {
+ I2C_BOARD_INFO("ov14810SlaveDev", 0x69),
+ .platform_data = &cardhu_ov14810SlaveDev_data,
+ }
+};
+#endif
+
+static int cardhu_right_ov5650_power_on(void)
+{
+ /* CSI-B and front sensor are muxed on cardhu */
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 0);
+
+ /* Boards E1198 and E1291 are of Cardhu personality
+ * and donot have TCA6416 exp for camera */
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 0);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 0);
+ mdelay(10);
+
+ if (cardhu_vdd_2v8_cam2 == NULL) {
+ cardhu_vdd_2v8_cam2 = regulator_get(NULL, "vdd_2v8_cam2");
+ if (WARN_ON(IS_ERR(cardhu_vdd_2v8_cam2))) {
+ pr_err("%s: couldn't get regulator vdd_2v8_cam2: %ld\n",
+ __func__, PTR_ERR(cardhu_vdd_2v8_cam2));
+ goto reg_alloc_fail;
+ }
+ }
+ regulator_enable(cardhu_vdd_2v8_cam2);
+ mdelay(5);
+ }
+
+ /* Enable VDD_1V8_Cam2 */
+ if (cardhu_1v8_cam2 == NULL) {
+ cardhu_1v8_cam2 = regulator_get(NULL, "vdd_1v8_cam2");
+ if (WARN_ON(IS_ERR(cardhu_1v8_cam2))) {
+ pr_err("%s: couldn't get regulator vdd_1v8_cam2: %ld\n",
+ __func__, PTR_ERR(cardhu_1v8_cam2));
+ goto reg_alloc_fail;
+ }
+ }
+ regulator_enable(cardhu_1v8_cam2);
+
+ mdelay(5);
+
+ if (board_info.board_id == BOARD_PM269) {
+ gpio_direction_output(CAM2_RST_L_GPIO, 0);
+ mdelay(100);
+ gpio_direction_output(CAM2_RST_L_GPIO, 1);
+ }
+
+ return 0;
+
+reg_alloc_fail:
+ if (cardhu_1v8_cam2) {
+ regulator_put(cardhu_1v8_cam2);
+ cardhu_1v8_cam2 = NULL;
+ }
+ if (cardhu_vdd_2v8_cam2) {
+ regulator_put(cardhu_vdd_2v8_cam2);
+ cardhu_vdd_2v8_cam2 = NULL;
+ }
+
+ return -ENODEV;
+
+}
+
+static int cardhu_right_ov5650_power_off(void)
+{
+ /* CSI-B and front sensor are muxed on cardhu */
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 0);
+
+ /* Boards E1198 and E1291 are of Cardhu personality
+ * and donot have TCA6416 exp for camera */
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM3_POWER_DWN_GPIO, 1);
+ }
+
+ if (cardhu_1v8_cam2)
+ regulator_disable(cardhu_1v8_cam2);
+ if (cardhu_vdd_2v8_cam2)
+ regulator_disable(cardhu_vdd_2v8_cam2);
+
+ return 0;
+}
+
+static void cardhu_ov5650_synchronize_sensors(void)
+{
+ if (board_info.board_id == BOARD_E1198) {
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 1);
+ mdelay(50);
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 0);
+ mdelay(50);
+ }
+ else if (board_info.board_id == BOARD_E1291) {
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 1);
+ mdelay(50);
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 0);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 0);
+ mdelay(50);
+ }
+ else
+ pr_err("%s: UnSupported BoardId\n", __func__);
+}
+
+struct ov5650_platform_data cardhu_right_ov5650_data = {
+ .power_on = cardhu_right_ov5650_power_on,
+ .power_off = cardhu_right_ov5650_power_off,
+ .synchronize_sensors = cardhu_ov5650_synchronize_sensors,
+};
+
+static int cardhu_ov2710_power_on(void)
+{
+ /* CSI-B and front sensor are muxed on cardhu */
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 1);
+
+ /* Boards E1198 and E1291 are of Cardhu personality
+ * and donot have TCA6416 exp for camera */
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 0);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 0);
+ gpio_direction_output(CAM3_POWER_DWN_GPIO, 0);
+ mdelay(10);
+
+ if (cardhu_vdd_cam3 == NULL) {
+ cardhu_vdd_cam3 = regulator_get(NULL, "vdd_cam3");
+ if (WARN_ON(IS_ERR(cardhu_vdd_cam3))) {
+ pr_err("%s: couldn't get regulator vdd_cam3: %ld\n",
+ __func__, PTR_ERR(cardhu_vdd_cam3));
+ goto reg_alloc_fail;
+ }
+ }
+ regulator_enable(cardhu_vdd_cam3);
+ }
+
+ /* Enable VDD_1V8_Cam3 */
+ if (cardhu_1v8_cam3 == NULL) {
+ cardhu_1v8_cam3 = regulator_get(NULL, "vdd_1v8_cam3");
+ if (WARN_ON(IS_ERR(cardhu_1v8_cam3))) {
+ pr_err("%s: couldn't get regulator vdd_1v8_cam3: %ld\n",
+ __func__, PTR_ERR(cardhu_1v8_cam3));
+ goto reg_alloc_fail;
+ }
+ }
+ regulator_enable(cardhu_1v8_cam3);
+ mdelay(5);
+
+ return 0;
+
+reg_alloc_fail:
+ if (cardhu_1v8_cam3) {
+ regulator_put(cardhu_1v8_cam3);
+ cardhu_1v8_cam3 = NULL;
+ }
+ if (cardhu_vdd_cam3) {
+ regulator_put(cardhu_vdd_cam3);
+ cardhu_vdd_cam3 = NULL;
+ }
+
+ return -ENODEV;
+}
+
+static int cardhu_ov2710_power_off(void)
+{
+ /* CSI-B and front sensor are muxed on cardhu */
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 1);
+
+ /* Boards E1198 and E1291 are of Cardhu personality
+ * and donot have TCA6416 exp for camera */
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291)) {
+ gpio_direction_output(CAM1_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM2_POWER_DWN_GPIO, 1);
+ gpio_direction_output(CAM3_POWER_DWN_GPIO, 1);
+ }
+
+ if (cardhu_1v8_cam3)
+ regulator_disable(cardhu_1v8_cam3);
+ if (cardhu_vdd_cam3)
+ regulator_disable(cardhu_vdd_cam3);
+
+ return 0;
+}
+
+struct ov2710_platform_data cardhu_ov2710_data = {
+ .power_on = cardhu_ov2710_power_on,
+ .power_off = cardhu_ov2710_power_off,
+};
+
+static const struct i2c_board_info cardhu_i2c3_board_info[] = {
+ {
+ I2C_BOARD_INFO("pca9546", 0x70),
+ .platform_data = &cardhu_pca954x_data,
+ },
+};
+
+static struct sh532u_platform_data sh532u_left_pdata = {
+ .num = 1,
+ .sync = 2,
+ .dev_name = "focuser",
+ .gpio_reset = TEGRA_GPIO_PBB0,
+};
+
+static struct sh532u_platform_data sh532u_right_pdata = {
+ .num = 2,
+ .sync = 1,
+ .dev_name = "focuser",
+ .gpio_reset = TEGRA_GPIO_PBB0,
+};
+
+static struct nvc_torch_pin_state cardhu_tps61050_pinstate = {
+ .mask = 0x0008, /*VGP3*/
+ .values = 0x0008,
+};
+
+static struct tps61050_platform_data cardhu_tps61050_pdata = {
+ .dev_name = "torch",
+ .pinstate = &cardhu_tps61050_pinstate,
+};
+
+static const struct i2c_board_info cardhu_i2c_board_info_tps61050[] = {
+ {
+ I2C_BOARD_INFO("tps61050", 0x33),
+ .platform_data = &cardhu_tps61050_pdata,
+ },
+};
+
+static struct i2c_board_info cardhu_i2c6_board_info[] = {
+ {
+ I2C_BOARD_INFO("ov5650L", 0x36),
+ .platform_data = &cardhu_left_ov5650_data,
+ },
+ {
+ I2C_BOARD_INFO("sh532u", 0x72),
+ .platform_data = &sh532u_left_pdata,
+ },
+};
+
+static struct i2c_board_info cardhu_i2c7_board_info[] = {
+ {
+ I2C_BOARD_INFO("ov5650R", 0x36),
+ .platform_data = &cardhu_right_ov5650_data,
+ },
+ {
+ I2C_BOARD_INFO("sh532u", 0x72),
+ .platform_data = &sh532u_right_pdata,
+ },
+};
+
+static struct i2c_board_info cardhu_i2c8_board_info[] = {
+ {
+ I2C_BOARD_INFO("ov2710", 0x36),
+ .platform_data = &cardhu_ov2710_data,
+ },
+};
+
+#ifndef CONFIG_TEGRA_INTERNAL_TSENSOR_EDP_SUPPORT
+static int nct_get_temp(void *_data, long *temp)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_get_temp(data, temp);
+}
+
+static int nct_get_temp_low(void *_data, long *temp)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_get_temp_low(data, temp);
+}
+
+static int nct_set_limits(void *_data,
+ long lo_limit_milli,
+ long hi_limit_milli)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_set_limits(data,
+ lo_limit_milli,
+ hi_limit_milli);
+}
+
+static int nct_set_alert(void *_data,
+ void (*alert_func)(void *),
+ void *alert_data)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_set_alert(data, alert_func, alert_data);
+}
+
+static int nct_set_shutdown_temp(void *_data, long shutdown_temp)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_set_shutdown_temp(data, shutdown_temp);
+}
+
+static void nct1008_probe_callback(struct nct1008_data *data)
+{
+ struct tegra_thermal_device *thermal_device;
+
+ thermal_device = kzalloc(sizeof(struct tegra_thermal_device),
+ GFP_KERNEL);
+ if (!thermal_device) {
+ pr_err("unable to allocate thermal device\n");
+ return;
+ }
+
+ thermal_device->name = "nct1008";
+ thermal_device->data = data;
+ thermal_device->offset = TDIODE_OFFSET;
+ thermal_device->get_temp = nct_get_temp;
+ thermal_device->get_temp_low = nct_get_temp_low;
+ thermal_device->set_limits = nct_set_limits;
+ thermal_device->set_alert = nct_set_alert;
+ thermal_device->set_shutdown_temp = nct_set_shutdown_temp;
+
+ tegra_thermal_set_device(thermal_device);
+}
+#endif
+
+static struct nct1008_platform_data cardhu_nct1008_pdata = {
+ .supported_hwrev = true,
+ .ext_range = true,
+ .conv_rate = 0x08,
+ .offset = 8, /* 4 * 2C. Bug 844025 - 1C for device accuracies */
+#ifndef CONFIG_TEGRA_INTERNAL_TSENSOR_EDP_SUPPORT
+ .probe_callback = nct1008_probe_callback,
+#endif
+};
+
+static struct i2c_board_info cardhu_i2c4_bq27510_board_info[] = {
+ {
+ I2C_BOARD_INFO("bq27510", 0x55),
+ },
+};
+
+static struct i2c_board_info cardhu_i2c4_nct1008_board_info[] = {
+ {
+ I2C_BOARD_INFO("nct1008", 0x4C),
+ .platform_data = &cardhu_nct1008_pdata,
+ .irq = -1,
+ }
+};
+
+static int cardhu_nct1008_init(void)
+{
+ int nct1008_port = -1;
+ int ret;
+
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291) ||
+ (board_info.board_id == BOARD_E1257) ||
+ (board_info.board_id == BOARD_PM269) ||
+ (board_info.board_id == BOARD_PM305) ||
+ (board_info.board_id == BOARD_PM311)) {
+ nct1008_port = TEGRA_GPIO_PCC2;
+ } else if ((board_info.board_id == BOARD_E1186) ||
+ (board_info.board_id == BOARD_E1187) ||
+ (board_info.board_id == BOARD_E1256)) {
+ /* FIXME: seems to be conflicting with usb3 vbus on E1186 */
+ /* nct1008_port = TEGRA_GPIO_PH7; */
+ }
+
+ if (nct1008_port >= 0) {
+ /* FIXME: enable irq when throttling is supported */
+ cardhu_i2c4_nct1008_board_info[0].irq = TEGRA_GPIO_TO_IRQ(nct1008_port);
+
+ ret = gpio_request(nct1008_port, "temp_alert");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_input(nct1008_port);
+ if (ret < 0)
+ gpio_free(nct1008_port);
+ else
+ tegra_gpio_enable(nct1008_port);
+ }
+
+ return ret;
+}
+
+#if defined(CONFIG_GPIO_PCA953X)
+static struct pca953x_platform_data cardhu_pmu_tca6416_data = {
+ .gpio_base = PMU_TCA6416_GPIO_BASE,
+};
+
+static const struct i2c_board_info cardhu_i2c4_board_info_tca6416[] = {
+ {
+ I2C_BOARD_INFO("tca6416", 0x20),
+ .platform_data = &cardhu_pmu_tca6416_data,
+ },
+};
+
+static struct pca953x_platform_data cardhu_cam_tca6416_data = {
+ .gpio_base = CAM_TCA6416_GPIO_BASE,
+};
+
+static const struct i2c_board_info cardhu_i2c2_board_info_tca6416[] = {
+ {
+ I2C_BOARD_INFO("tca6416", 0x20),
+ .platform_data = &cardhu_cam_tca6416_data,
+ },
+};
+
+static int __init pmu_tca6416_init(void)
+{
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291))
+ return 0;
+
+ pr_info("Registering pmu pca6416\n");
+ i2c_register_board_info(4, cardhu_i2c4_board_info_tca6416,
+ ARRAY_SIZE(cardhu_i2c4_board_info_tca6416));
+ return 0;
+}
+
+static int __init cam_tca6416_init(void)
+{
+ /* Boards E1198 and E1291 are of Cardhu personality
+ * and donot have TCA6416 exp for camera */
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291))
+ return 0;
+
+ pr_info("Registering cam pca6416\n");
+ i2c_register_board_info(2, cardhu_i2c2_board_info_tca6416,
+ ARRAY_SIZE(cardhu_i2c2_board_info_tca6416));
+ return 0;
+}
+#else
+static int __init pmu_tca6416_init(void)
+{
+ return 0;
+}
+
+static int __init cam_tca6416_init(void)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_MPU_SENSORS_MPU3050
+static struct mpu_platform_data mpu3050_data = {
+ .int_config = 0x10,
+ .level_shifter = 0,
+ .orientation = MPU_GYRO_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct ext_slave_platform_data mpu3050_accel_data = {
+ .address = MPU_ACCEL_ADDR,
+ .irq = 0,
+ .adapt_num = MPU_ACCEL_BUS_NUM,
+ .bus = EXT_SLAVE_BUS_SECONDARY,
+ .orientation = MPU_ACCEL_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct ext_slave_platform_data mpu_compass_data = {
+ .address = MPU_COMPASS_ADDR,
+ .irq = 0,
+ .adapt_num = MPU_COMPASS_BUS_NUM,
+ .bus = EXT_SLAVE_BUS_PRIMARY,
+ .orientation = MPU_COMPASS_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct i2c_board_info __initdata inv_mpu_i2c2_board_info[] = {
+ {
+ I2C_BOARD_INFO(MPU_GYRO_NAME, MPU_GYRO_ADDR),
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_GYRO_IRQ_GPIO),
+ .platform_data = &mpu3050_data,
+ },
+ {
+ I2C_BOARD_INFO(MPU_ACCEL_NAME, MPU_ACCEL_ADDR),
+#if MPU_ACCEL_IRQ_GPIO
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_ACCEL_IRQ_GPIO),
+#endif
+ .platform_data = &mpu3050_accel_data,
+ },
+ {
+ I2C_BOARD_INFO(MPU_COMPASS_NAME, MPU_COMPASS_ADDR),
+#if MPU_COMPASS_IRQ_GPIO
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_COMPASS_IRQ_GPIO),
+#endif
+ .platform_data = &mpu_compass_data,
+ },
+};
+
+static void mpuirq_init(void)
+{
+ int ret = 0;
+
+ pr_info("*** MPU START *** mpuirq_init...\n");
+
+#if MPU_ACCEL_IRQ_GPIO
+ /* ACCEL-IRQ assignment */
+ tegra_gpio_enable(MPU_ACCEL_IRQ_GPIO);
+ ret = gpio_request(MPU_ACCEL_IRQ_GPIO, MPU_ACCEL_NAME);
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed %d\n", __func__, ret);
+ return;
+ }
+
+ ret = gpio_direction_input(MPU_ACCEL_IRQ_GPIO);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_input failed %d\n", __func__, ret);
+ gpio_free(MPU_ACCEL_IRQ_GPIO);
+ return;
+ }
+#endif
+
+ /* MPU-IRQ assignment */
+ tegra_gpio_enable(MPU_GYRO_IRQ_GPIO);
+ ret = gpio_request(MPU_GYRO_IRQ_GPIO, MPU_GYRO_NAME);
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed %d\n", __func__, ret);
+ return;
+ }
+
+ ret = gpio_direction_input(MPU_GYRO_IRQ_GPIO);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_input failed %d\n", __func__, ret);
+ gpio_free(MPU_GYRO_IRQ_GPIO);
+ return;
+ }
+ pr_info("*** MPU END *** mpuirq_init...\n");
+
+ i2c_register_board_info(MPU_GYRO_BUS_NUM, inv_mpu_i2c2_board_info,
+ ARRAY_SIZE(inv_mpu_i2c2_board_info));
+}
+#endif
+
+
+static struct i2c_board_info cardhu_i2c2_isl_board_info[] = {
+ {
+ I2C_BOARD_INFO("isl29028", 0x44),
+ }
+};
+
+int __init cardhu_sensors_init(void)
+{
+ int err;
+
+ tegra_get_board_info(&board_info);
+
+ cardhu_camera_init();
+ cam_tca6416_init();
+
+ i2c_register_board_info(2, cardhu_i2c3_board_info,
+ ARRAY_SIZE(cardhu_i2c3_board_info));
+
+ i2c_register_board_info(2, cardhu_i2c_board_info_tps61050,
+ ARRAY_SIZE(cardhu_i2c_board_info_tps61050));
+
+#ifdef CONFIG_VIDEO_OV14810
+ /* This is disabled by default; To enable this change Kconfig;
+ * there should be some way to detect dynamically which board
+ * is connected (E1211/E1214), till that time sensor selection
+ * logic is static;
+ * e1214 corresponds to ov14810 sensor */
+ i2c_register_board_info(2, cardhu_i2c_board_info_e1214,
+ ARRAY_SIZE(cardhu_i2c_board_info_e1214));
+#else
+ /* Left camera is on PCA954x's I2C BUS0, Right camera is on BUS1 &
+ * Front camera is on BUS2 */
+ i2c_register_board_info(PCA954x_I2C_BUS0, cardhu_i2c6_board_info,
+ ARRAY_SIZE(cardhu_i2c6_board_info));
+
+ i2c_register_board_info(PCA954x_I2C_BUS1, cardhu_i2c7_board_info,
+ ARRAY_SIZE(cardhu_i2c7_board_info));
+
+ i2c_register_board_info(PCA954x_I2C_BUS2, cardhu_i2c8_board_info,
+ ARRAY_SIZE(cardhu_i2c8_board_info));
+
+#endif
+ pmu_tca6416_init();
+
+ if (board_info.board_id == BOARD_E1291)
+ i2c_register_board_info(4, cardhu_i2c4_bq27510_board_info,
+ ARRAY_SIZE(cardhu_i2c4_bq27510_board_info));
+
+ i2c_register_board_info(2, cardhu_i2c2_isl_board_info,
+ ARRAY_SIZE(cardhu_i2c2_isl_board_info));
+
+ err = cardhu_nct1008_init();
+ if (err)
+ return err;
+
+ i2c_register_board_info(4, cardhu_i2c4_nct1008_board_info,
+ ARRAY_SIZE(cardhu_i2c4_nct1008_board_info));
+
+#ifdef CONFIG_MPU_SENSORS_MPU3050
+ mpuirq_init();
+#endif
+ return 0;
+}
+
+#if defined(CONFIG_GPIO_PCA953X)
+struct ov5650_gpios {
+ const char *name;
+ int gpio;
+ int enabled;
+};
+
+#define OV5650_GPIO(_name, _gpio, _enabled) \
+ { \
+ .name = _name, \
+ .gpio = _gpio, \
+ .enabled = _enabled, \
+ }
+
+static struct ov5650_gpios ov5650_gpio_keys[] = {
+ [0] = OV5650_GPIO("cam1_pwdn", CAM1_PWR_DN_GPIO, 0),
+ [1] = OV5650_GPIO("cam1_rst_lo", CAM1_RST_L_GPIO, 1),
+ [2] = OV5650_GPIO("cam1_af_pwdn_lo", CAM1_AF_PWR_DN_L_GPIO, 0),
+ [3] = OV5650_GPIO("cam1_ldo_shdn_lo", CAM1_LDO_SHUTDN_L_GPIO, 1),
+ [4] = OV5650_GPIO("cam2_pwdn", CAM2_PWR_DN_GPIO, 0),
+ [5] = OV5650_GPIO("cam2_rst_lo", CAM2_RST_L_GPIO, 1),
+ [6] = OV5650_GPIO("cam2_af_pwdn_lo", CAM2_AF_PWR_DN_L_GPIO, 0),
+ [7] = OV5650_GPIO("cam2_ldo_shdn_lo", CAM2_LDO_SHUTDN_L_GPIO, 1),
+ [8] = OV5650_GPIO("cam3_pwdn", CAM_FRONT_PWR_DN_GPIO, 0),
+ [9] = OV5650_GPIO("cam3_rst_lo", CAM_FRONT_RST_L_GPIO, 1),
+ [10] = OV5650_GPIO("cam3_af_pwdn_lo", CAM_FRONT_AF_PWR_DN_L_GPIO, 0),
+ [11] = OV5650_GPIO("cam3_ldo_shdn_lo", CAM_FRONT_LDO_SHUTDN_L_GPIO, 1),
+ [12] = OV5650_GPIO("cam_led_exp", CAM_FRONT_LED_EXP, 1),
+ [13] = OV5650_GPIO("cam_led_rear_exp", CAM_SNN_LED_REAR_EXP, 1),
+ [14] = OV5650_GPIO("cam_i2c_mux_rst", CAM_I2C_MUX_RST_EXP, 1),
+};
+
+int __init cardhu_ov5650_late_init(void)
+{
+ int ret;
+ int i;
+
+ if (!machine_is_cardhu())
+ return 0;
+
+ if ((board_info.board_id == BOARD_E1198) ||
+ (board_info.board_id == BOARD_E1291))
+ return 0;
+
+ printk("%s: \n", __func__);
+ for (i = 0; i < ARRAY_SIZE(ov5650_gpio_keys); i++) {
+ ret = gpio_request(ov5650_gpio_keys[i].gpio,
+ ov5650_gpio_keys[i].name);
+ if (ret < 0) {
+ printk("%s: gpio_request failed for gpio #%d\n",
+ __func__, i);
+ goto fail;
+ }
+ printk("%s: enable - %d\n", __func__, i);
+ gpio_direction_output(ov5650_gpio_keys[i].gpio,
+ ov5650_gpio_keys[i].enabled);
+ gpio_export(ov5650_gpio_keys[i].gpio, false);
+ }
+
+ return 0;
+
+fail:
+ while (i--)
+ gpio_free(ov5650_gpio_keys[i].gpio);
+ return ret;
+}
+
+late_initcall(cardhu_ov5650_late_init);
+#endif
diff --git a/arch/arm/mach-tegra/board-cardhu.c b/arch/arm/mach-tegra/board-cardhu.c
new file mode 100644
index 000000000000..81129d1a2762
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu.c
@@ -0,0 +1,1045 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/i2c.h>
+#include <linux/i2c/panjit_ts.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/i2c-tegra.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/tegra_uart.h>
+#include <linux/memblock.h>
+#include <linux/spi-tegra.h>
+#include <linux/nfc/pn544.h>
+
+#include <sound/wm8903.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+#include <mach/i2s.h>
+#include <mach/tegra_wm8903_pdata.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/usb_phy.h>
+#include <mach/thermal.h>
+#include <mach/pci.h>
+
+#include "board.h"
+#include "clock.h"
+#include "board-cardhu.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "fuse.h"
+#include "pm.h"
+#include "baseband-xmm-power.h"
+#include "wdt-recovery.h"
+
+/* All units are in millicelsius */
+static struct tegra_thermal_data thermal_data = {
+ .temp_throttle = 85000,
+ .temp_shutdown = 90000,
+ .temp_offset = TDIODE_OFFSET, /* temps based on tdiode */
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ .edp_offset = TDIODE_OFFSET, /* edp based on tdiode */
+ .hysteresis_edp = 3000,
+#endif
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ .tc1 = 0,
+ .tc2 = 1,
+ .passive_delay = 2000,
+#else
+ .hysteresis_throttle = 1000,
+#endif
+};
+
+/* !!!TODO: Change for cardhu (Taken from Ventana) */
+static struct tegra_utmip_config utmi_phy_config[] = {
+ [0] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+ [1] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+ [2] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+};
+
+static struct resource cardhu_bcm4329_rfkill_resources[] = {
+ {
+ .name = "bcm4329_nshutdown_gpio",
+ .start = TEGRA_GPIO_PU0,
+ .end = TEGRA_GPIO_PU0,
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct platform_device cardhu_bcm4329_rfkill_device = {
+ .name = "bcm4329_rfkill",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(cardhu_bcm4329_rfkill_resources),
+ .resource = cardhu_bcm4329_rfkill_resources,
+};
+
+static struct resource cardhu_bluesleep_resources[] = {
+ [0] = {
+ .name = "gpio_host_wake",
+ .start = TEGRA_GPIO_PU6,
+ .end = TEGRA_GPIO_PU6,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .name = "gpio_ext_wake",
+ .start = TEGRA_GPIO_PU1,
+ .end = TEGRA_GPIO_PU1,
+ .flags = IORESOURCE_IO,
+ },
+ [2] = {
+ .name = "host_wake",
+ .start = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ .end = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ },
+};
+
+static struct platform_device cardhu_bluesleep_device = {
+ .name = "bluesleep",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(cardhu_bluesleep_resources),
+ .resource = cardhu_bluesleep_resources,
+};
+
+static noinline void __init cardhu_setup_bluesleep(void)
+{
+ platform_device_register(&cardhu_bluesleep_device);
+ tegra_gpio_enable(TEGRA_GPIO_PU6);
+ tegra_gpio_enable(TEGRA_GPIO_PU1);
+ return;
+}
+
+static __initdata struct tegra_clk_init_table cardhu_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "pll_m", NULL, 0, false},
+ { "hda", "pll_p", 108000000, false},
+ { "hda2codec_2x","pll_p", 48000000, false},
+ { "pwm", "pll_p", 3187500, false},
+ { "blink", "clk_32k", 32768, true},
+ { "i2s1", "pll_a_out0", 0, false},
+ { "i2s3", "pll_a_out0", 0, false},
+ { "spdif_out", "pll_a_out0", 0, false},
+ { "d_audio", "pll_a_out0", 0, false},
+ { "dam0", "pll_a_out0", 0, false},
+ { "dam1", "pll_a_out0", 0, false},
+ { "dam2", "pll_a_out0", 0, false},
+ { "audio1", "i2s1_sync", 0, false},
+ { "audio3", "i2s3_sync", 0, false},
+ { "vi_sensor", "pll_p", 150000000, false},
+ { "i2c1", "pll_p", 3200000, false},
+ { "i2c2", "pll_p", 3200000, false},
+ { "i2c3", "pll_p", 3200000, false},
+ { "i2c4", "pll_p", 3200000, false},
+ { "i2c5", "pll_p", 3200000, false},
+ { NULL, NULL, 0, 0},
+};
+
+static struct pn544_i2c_platform_data nfc_pdata = {
+ .irq_gpio = TEGRA_GPIO_PX0,
+ .ven_gpio = TEGRA_GPIO_PP3,
+ .firm_gpio = TEGRA_GPIO_PO7,
+ };
+
+static struct i2c_board_info __initdata cardhu_i2c_bus3_board_info[] = {
+ {
+ I2C_BOARD_INFO("pn544", 0x28),
+ .platform_data = &nfc_pdata,
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PX0),
+ },
+};
+static struct tegra_i2c_platform_data cardhu_i2c1_platform_data = {
+ .adapter_nr = 0,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PC4, 0},
+ .sda_gpio = {TEGRA_GPIO_PC5, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data cardhu_i2c2_platform_data = {
+ .adapter_nr = 1,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .is_clkon_always = true,
+ .scl_gpio = {TEGRA_GPIO_PT5, 0},
+ .sda_gpio = {TEGRA_GPIO_PT6, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data cardhu_i2c3_platform_data = {
+ .adapter_nr = 2,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PBB1, 0},
+ .sda_gpio = {TEGRA_GPIO_PBB2, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data cardhu_i2c4_platform_data = {
+ .adapter_nr = 3,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PV4, 0},
+ .sda_gpio = {TEGRA_GPIO_PV5, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data cardhu_i2c5_platform_data = {
+ .adapter_nr = 4,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PZ6, 0},
+ .sda_gpio = {TEGRA_GPIO_PZ7, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+
+#if 0
+struct tegra_wired_jack_conf audio_wr_jack_conf = {
+ .hp_det_n = TEGRA_GPIO_PW2,
+ .en_mic_ext = TEGRA_GPIO_PX1,
+ .en_mic_int = TEGRA_GPIO_PX0,
+};
+#endif
+
+static struct wm8903_platform_data cardhu_wm8903_pdata = {
+ .irq_active_low = 0,
+ .micdet_cfg = 0,
+ .micdet_delay = 100,
+ .gpio_base = CARDHU_GPIO_WM8903(0),
+ .gpio_cfg = {
+ (WM8903_GPn_FN_DMIC_LR_CLK_OUTPUT << WM8903_GP1_FN_SHIFT),
+ (WM8903_GPn_FN_DMIC_LR_CLK_OUTPUT << WM8903_GP2_FN_SHIFT) |
+ WM8903_GP2_DIR,
+ 0,
+ WM8903_GPIO_NO_CONFIG,
+ WM8903_GPIO_NO_CONFIG,
+ },
+};
+
+static struct i2c_board_info __initdata wm8903_board_info = {
+ I2C_BOARD_INFO("wm8903", 0x1a),
+ .platform_data = &cardhu_wm8903_pdata,
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_CDC_IRQ),
+};
+
+static void cardhu_i2c_init(void)
+{
+ tegra_i2c_device1.dev.platform_data = &cardhu_i2c1_platform_data;
+ tegra_i2c_device2.dev.platform_data = &cardhu_i2c2_platform_data;
+ tegra_i2c_device3.dev.platform_data = &cardhu_i2c3_platform_data;
+ tegra_i2c_device4.dev.platform_data = &cardhu_i2c4_platform_data;
+ tegra_i2c_device5.dev.platform_data = &cardhu_i2c5_platform_data;
+
+ platform_device_register(&tegra_i2c_device5);
+ platform_device_register(&tegra_i2c_device4);
+ platform_device_register(&tegra_i2c_device3);
+ platform_device_register(&tegra_i2c_device2);
+ platform_device_register(&tegra_i2c_device1);
+
+ i2c_register_board_info(4, &wm8903_board_info, 1);
+ i2c_register_board_info(2, cardhu_i2c_bus3_board_info, 1);
+}
+
+static struct platform_device *cardhu_uart_devices[] __initdata = {
+ &tegra_uarta_device,
+ &tegra_uartb_device,
+ &tegra_uartc_device,
+ &tegra_uartd_device,
+ &tegra_uarte_device,
+};
+static struct uart_clk_parent uart_parent_clk[] = {
+ [0] = {.name = "clk_m"},
+ [1] = {.name = "pll_p"},
+#ifndef CONFIG_TEGRA_PLLM_RESTRICTED
+ [2] = {.name = "pll_m"},
+#endif
+};
+
+static struct tegra_uart_platform_data cardhu_uart_pdata;
+
+static void __init uart_debug_init(void)
+{
+ struct board_info board_info;
+ int debug_port_id;
+
+ tegra_get_board_info(&board_info);
+
+ debug_port_id = get_tegra_uart_debug_port_id();
+ if (debug_port_id < 0) {
+ debug_port_id = 0;
+ /* UARTB is debug port
+ * for SLT - E1186/E1187/PM269
+ * for E1256/E1257
+ */
+ if (((board_info.sku & SKU_SLT_ULPI_SUPPORT) &&
+ ((board_info.board_id == BOARD_E1186) ||
+ (board_info.board_id == BOARD_E1187) ||
+ (board_info.board_id == BOARD_PM269))) ||
+ (board_info.board_id == BOARD_E1256) ||
+ (board_info.board_id == BOARD_E1257))
+ debug_port_id = 1;
+ }
+ switch (debug_port_id) {
+ case 0:
+ /* UARTA is the debug port. */
+ pr_info("Selecting UARTA as the debug console\n");
+ cardhu_uart_devices[0] = &debug_uarta_device;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uarta");
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uarta_device.dev.platform_data))->mapbase;
+ break;
+
+ case 1:
+ /* UARTB is the debug port. */
+ pr_info("Selecting UARTB as the debug console\n");
+ cardhu_uart_devices[1] = &debug_uartb_device;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uartb");
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uartb_device.dev.platform_data))->mapbase;
+ break;
+
+ case 2:
+ /* UARTC is the debug port. */
+ pr_info("Selecting UARTC as the debug console\n");
+ cardhu_uart_devices[2] = &debug_uartc_device;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uartc");
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uartc_device.dev.platform_data))->mapbase;
+ break;
+
+ case 3:
+ /* UARTD is the debug port. */
+ pr_info("Selecting UARTD as the debug console\n");
+ cardhu_uart_devices[3] = &debug_uartd_device;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uartd");
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uartd_device.dev.platform_data))->mapbase;
+ break;
+
+ case 4:
+ /* UARTE is the debug port. */
+ pr_info("Selecting UARTE as the debug console\n");
+ cardhu_uart_devices[4] = &debug_uarte_device;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uarte");
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uarte_device.dev.platform_data))->mapbase;
+ break;
+
+ default:
+ pr_info("The debug console id %d is invalid, Assuming UARTA", debug_port_id);
+ cardhu_uart_devices[0] = &debug_uarta_device;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uarta");
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uarta_device.dev.platform_data))->mapbase;
+ break;
+ }
+ return;
+}
+
+static void __init cardhu_uart_init(void)
+{
+ struct clk *c;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(uart_parent_clk); ++i) {
+ c = tegra_get_clock_by_name(uart_parent_clk[i].name);
+ if (IS_ERR_OR_NULL(c)) {
+ pr_err("Not able to get the clock for %s\n",
+ uart_parent_clk[i].name);
+ continue;
+ }
+ uart_parent_clk[i].parent_clk = c;
+ uart_parent_clk[i].fixed_clk_rate = clk_get_rate(c);
+ }
+ cardhu_uart_pdata.parent_clk_list = uart_parent_clk;
+ cardhu_uart_pdata.parent_clk_count = ARRAY_SIZE(uart_parent_clk);
+ tegra_uarta_device.dev.platform_data = &cardhu_uart_pdata;
+ tegra_uartb_device.dev.platform_data = &cardhu_uart_pdata;
+ tegra_uartc_device.dev.platform_data = &cardhu_uart_pdata;
+ tegra_uartd_device.dev.platform_data = &cardhu_uart_pdata;
+ tegra_uarte_device.dev.platform_data = &cardhu_uart_pdata;
+
+ /* Register low speed only if it is selected */
+ if (!is_tegra_debug_uartport_hs()) {
+ uart_debug_init();
+ /* Clock enable for the debug channel */
+ if (!IS_ERR_OR_NULL(debug_uart_clk)) {
+ pr_info("The debug console clock name is %s\n",
+ debug_uart_clk->name);
+ c = tegra_get_clock_by_name("pll_p");
+ if (IS_ERR_OR_NULL(c))
+ pr_err("Not getting the parent clock pll_p\n");
+ else
+ clk_set_parent(debug_uart_clk, c);
+
+ clk_enable(debug_uart_clk);
+ clk_set_rate(debug_uart_clk, clk_get_rate(c));
+ } else {
+ pr_err("Not getting the clock %s for debug console\n",
+ debug_uart_clk->name);
+ }
+ }
+
+ platform_add_devices(cardhu_uart_devices,
+ ARRAY_SIZE(cardhu_uart_devices));
+}
+
+static struct platform_device tegra_camera = {
+ .name = "tegra_camera",
+ .id = -1,
+};
+
+static struct platform_device *cardhu_spi_devices[] __initdata = {
+ &tegra_spi_device4,
+};
+
+struct spi_clk_parent spi_parent_clk[] = {
+ [0] = {.name = "pll_p"},
+#ifndef CONFIG_TEGRA_PLLM_RESTRICTED
+ [1] = {.name = "pll_m"},
+ [2] = {.name = "clk_m"},
+#else
+ [1] = {.name = "clk_m"},
+#endif
+};
+
+static struct tegra_spi_platform_data cardhu_spi_pdata = {
+ .is_dma_based = true,
+ .max_dma_buffer = (16 * 1024),
+ .is_clkon_always = false,
+ .max_rate = 100000000,
+};
+
+static void __init cardhu_spi_init(void)
+{
+ int i;
+ struct clk *c;
+ struct board_info board_info;
+
+ tegra_get_board_info(&board_info);
+
+ for (i = 0; i < ARRAY_SIZE(spi_parent_clk); ++i) {
+ c = tegra_get_clock_by_name(spi_parent_clk[i].name);
+ if (IS_ERR_OR_NULL(c)) {
+ pr_err("Not able to get the clock for %s\n",
+ spi_parent_clk[i].name);
+ continue;
+ }
+ spi_parent_clk[i].parent_clk = c;
+ spi_parent_clk[i].fixed_clk_rate = clk_get_rate(c);
+ }
+ cardhu_spi_pdata.parent_clk_list = spi_parent_clk;
+ cardhu_spi_pdata.parent_clk_count = ARRAY_SIZE(spi_parent_clk);
+ tegra_spi_device4.dev.platform_data = &cardhu_spi_pdata;
+ platform_add_devices(cardhu_spi_devices,
+ ARRAY_SIZE(cardhu_spi_devices));
+
+ if (board_info.board_id == BOARD_E1198) {
+ tegra_spi_device2.dev.platform_data = &cardhu_spi_pdata;
+ platform_device_register(&tegra_spi_device2);
+ tegra_spi_slave_device1.dev.platform_data = &cardhu_spi_pdata;
+ platform_device_register(&tegra_spi_slave_device1);
+ }
+}
+
+static struct resource tegra_rtc_resources[] = {
+ [0] = {
+ .start = TEGRA_RTC_BASE,
+ .end = TEGRA_RTC_BASE + TEGRA_RTC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_RTC,
+ .end = INT_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tegra_rtc_device = {
+ .name = "tegra_rtc",
+ .id = -1,
+ .resource = tegra_rtc_resources,
+ .num_resources = ARRAY_SIZE(tegra_rtc_resources),
+};
+
+static struct tegra_wm8903_platform_data cardhu_audio_pdata = {
+ .gpio_spkr_en = TEGRA_GPIO_SPKR_EN,
+ .gpio_hp_det = TEGRA_GPIO_HP_DET,
+ .gpio_hp_mute = -1,
+ .gpio_int_mic_en = -1,
+ .gpio_ext_mic_en = -1,
+};
+
+static struct platform_device cardhu_audio_device = {
+ .name = "tegra-snd-wm8903",
+ .id = 0,
+ .dev = {
+ .platform_data = &cardhu_audio_pdata,
+ },
+};
+
+static struct resource ram_console_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ram_console_device = {
+ .name = "ram_console",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ram_console_resources),
+ .resource = ram_console_resources,
+};
+
+static struct platform_device *cardhu_devices[] __initdata = {
+ &tegra_pmu_device,
+ &tegra_rtc_device,
+ &tegra_udc_device,
+#if defined(CONFIG_TEGRA_IOVMM_SMMU)
+ &tegra_smmu_device,
+#endif
+ &tegra_wdt_device,
+#if defined(CONFIG_TEGRA_AVP)
+ &tegra_avp_device,
+#endif
+ &tegra_camera,
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_SE)
+ &tegra_se_device,
+#endif
+ &tegra_ahub_device,
+ &tegra_dam_device0,
+ &tegra_dam_device1,
+ &tegra_dam_device2,
+ &tegra_i2s_device1,
+ &tegra_i2s_device3,
+ &tegra_spdif_device,
+ &spdif_dit_device,
+ &bluetooth_dit_device,
+ &cardhu_bcm4329_rfkill_device,
+ &tegra_pcm_device,
+ &cardhu_audio_device,
+ &tegra_hda_device,
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_AES)
+ &tegra_aes_device,
+#endif
+ &ram_console_device,
+};
+
+#define MXT_CONFIG_CRC 0xD62DE8
+static const u8 config[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0x32, 0x0A, 0x00, 0x14, 0x14, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x00, 0x00,
+ 0x1B, 0x2A, 0x00, 0x20, 0x3C, 0x04, 0x05, 0x00,
+ 0x02, 0x01, 0x00, 0x0A, 0x0A, 0x0A, 0x0A, 0xFF,
+ 0x02, 0x55, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x64, 0x02, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23,
+ 0x00, 0x00, 0x00, 0x05, 0x0A, 0x15, 0x1E, 0x00,
+ 0x00, 0x04, 0xFF, 0x03, 0x3F, 0x64, 0x64, 0x01,
+ 0x0A, 0x14, 0x28, 0x4B, 0x00, 0x02, 0x00, 0x64,
+ 0x00, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x10, 0x3C, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+#define MXT_CONFIG_CRC_SKU2000 0xA24D9A
+static const u8 config_sku2000[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0x32, 0x0A, 0x00, 0x14, 0x14, 0x19,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x00, 0x00,
+ 0x1B, 0x2A, 0x00, 0x20, 0x3A, 0x04, 0x05, 0x00, //23=thr 2 di
+ 0x04, 0x04, 0x41, 0x0A, 0x0A, 0x0A, 0x0A, 0xFF,
+ 0x02, 0x55, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, //0A=limit
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23,
+ 0x00, 0x00, 0x00, 0x05, 0x0A, 0x15, 0x1E, 0x00,
+ 0x00, 0x04, 0x00, 0x03, 0x3F, 0x64, 0x64, 0x01,
+ 0x0A, 0x14, 0x28, 0x4B, 0x00, 0x02, 0x00, 0x64,
+ 0x00, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x10, 0x3C, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static struct mxt_platform_data atmel_mxt_info = {
+ .x_line = 27,
+ .y_line = 42,
+ .x_size = 768,
+ .y_size = 1366,
+ .blen = 0x20,
+ .threshold = 0x3C,
+ .voltage = 3300000, /* 3.3V */
+ .orient = 5,
+ .config = config,
+ .config_length = 157,
+ .config_crc = MXT_CONFIG_CRC,
+ .irqflags = IRQF_TRIGGER_FALLING,
+/* .read_chg = &read_chg, */
+ .read_chg = NULL,
+};
+
+static struct i2c_board_info __initdata atmel_i2c_info[] = {
+ {
+ I2C_BOARD_INFO("atmel_mxt_ts", 0x5A),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PH4),
+ .platform_data = &atmel_mxt_info,
+ }
+};
+
+static int __init cardhu_touch_init(void)
+{
+ struct board_info BoardInfo;
+
+ tegra_gpio_enable(TEGRA_GPIO_PH4);
+ tegra_gpio_enable(TEGRA_GPIO_PH6);
+
+ gpio_request(TEGRA_GPIO_PH4, "atmel-irq");
+ gpio_direction_input(TEGRA_GPIO_PH4);
+
+ gpio_request(TEGRA_GPIO_PH6, "atmel-reset");
+ gpio_direction_output(TEGRA_GPIO_PH6, 0);
+ msleep(1);
+ gpio_set_value(TEGRA_GPIO_PH6, 1);
+ msleep(100);
+
+ tegra_get_board_info(&BoardInfo);
+ if ((BoardInfo.sku & SKU_TOUCH_MASK) == SKU_TOUCH_2000) {
+ atmel_mxt_info.config = config_sku2000;
+ atmel_mxt_info.config_crc = MXT_CONFIG_CRC_SKU2000;
+ }
+
+ i2c_register_board_info(1, atmel_i2c_info, 1);
+
+ return 0;
+}
+
+static struct tegra_uhsic_config uhsic_phy_config = {
+ .enable_gpio = EN_HSIC_GPIO,
+ .reset_gpio = -1,
+ .sync_start_delay = 9,
+ .idle_wait_delay = 17,
+ .term_range_adj = 0,
+ .elastic_underrun_limit = 16,
+ .elastic_overrun_limit = 16,
+};
+
+static struct tegra_ehci_platform_data tegra_ehci_uhsic_pdata = {
+ .phy_type = TEGRA_USB_PHY_TYPE_HSIC,
+ .phy_config = &uhsic_phy_config,
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+};
+
+static struct tegra_ehci_platform_data tegra_ehci_pdata[] = {
+ [0] = {
+ .phy_config = &utmi_phy_config[0],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+ [1] = {
+ .phy_config = &utmi_phy_config[1],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+ [2] = {
+ .phy_config = &utmi_phy_config[2],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ .hotplug = 1,
+ },
+};
+
+static struct tegra_otg_platform_data tegra_otg_pdata = {
+ .ehci_device = &tegra_ehci1_device,
+ .ehci_pdata = &tegra_ehci_pdata[0],
+};
+
+#ifdef CONFIG_USB_SUPPORT
+static struct usb_phy_plat_data tegra_usb_phy_pdata[] = {
+ [0] = {
+ .instance = 0,
+ .vbus_gpio = -1,
+ .vbus_reg_supply = "vdd_vbus_micro_usb",
+ },
+ [1] = {
+ .instance = 1,
+ .vbus_gpio = -1,
+ },
+ [2] = {
+ .instance = 2,
+ .vbus_gpio = -1,
+ .vbus_reg_supply = "vdd_vbus_typea_usb",
+ },
+};
+
+static int cardhu_usb_hsic_postsupend(void)
+{
+#ifdef CONFIG_TEGRA_BB_XMM_POWER
+ baseband_xmm_set_power_status(BBXMM_PS_L2);
+#endif
+ return 0;
+}
+
+static int cardhu_usb_hsic_preresume(void)
+{
+#ifdef CONFIG_TEGRA_BB_XMM_POWER
+ baseband_xmm_set_power_status(BBXMM_PS_L2TOL0);
+#endif
+ return 0;
+}
+
+static int cardhu_usb_hsic_phy_ready(void)
+{
+#ifdef CONFIG_TEGRA_BB_XMM_POWER
+ baseband_xmm_set_power_status(BBXMM_PS_L0);
+#endif
+ return 0;
+}
+
+static int cardhu_usb_hsic_phy_off(void)
+{
+#ifdef CONFIG_TEGRA_BB_XMM_POWER
+ baseband_xmm_set_power_status(BBXMM_PS_L3);
+#endif
+ return 0;
+}
+
+static void cardhu_usb_init(void)
+{
+ struct board_info bi;
+
+ tegra_get_board_info(&bi);
+
+ tegra_usb_phy_init(tegra_usb_phy_pdata,
+ ARRAY_SIZE(tegra_usb_phy_pdata));
+
+ tegra_otg_device.dev.platform_data = &tegra_otg_pdata;
+ platform_device_register(&tegra_otg_device);
+ if (bi.board_id == BOARD_PM267) {
+ uhsic_phy_config.reset_gpio =
+ PM267_SMSC4640_HSIC_HUB_RESET_GPIO;
+ tegra_ehci2_device.dev.platform_data = &tegra_ehci_uhsic_pdata;
+ platform_device_register(&tegra_ehci2_device);
+ } else if (bi.board_id == BOARD_E1256) {
+ tegra_ehci2_device.dev.platform_data = &tegra_ehci_uhsic_pdata;
+ platform_device_register(&tegra_ehci2_device);
+ } else if (bi.board_id == BOARD_E1186) {
+ /* for baseband devices do not switch off phy during suspend */
+ tegra_ehci_uhsic_pdata.power_down_on_bus_suspend = 0;
+ uhsic_phy_config.postsuspend = cardhu_usb_hsic_postsupend;
+ uhsic_phy_config.preresume = cardhu_usb_hsic_preresume;
+ uhsic_phy_config.usb_phy_ready = cardhu_usb_hsic_phy_ready;
+ uhsic_phy_config.post_phy_off = cardhu_usb_hsic_phy_off;
+ tegra_ehci2_device.dev.platform_data = &tegra_ehci_uhsic_pdata;
+ /* baseband registration happens in baseband-xmm-power */
+ } else {
+ tegra_ehci2_device.dev.platform_data = &tegra_ehci_pdata[1];
+ platform_device_register(&tegra_ehci2_device);
+ }
+
+ tegra_ehci3_device.dev.platform_data = &tegra_ehci_pdata[2];
+ platform_device_register(&tegra_ehci3_device);
+
+}
+#else
+static void cardhu_usb_init(void) { }
+#endif
+
+static void cardhu_gps_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PU2);
+ tegra_gpio_enable(TEGRA_GPIO_PU3);
+}
+
+static void cardhu_nfc_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PX0);
+ tegra_gpio_enable(TEGRA_GPIO_PP3);
+ tegra_gpio_enable(TEGRA_GPIO_PO7);
+}
+
+static struct baseband_power_platform_data tegra_baseband_power_data = {
+ .baseband_type = BASEBAND_XMM,
+ .modem = {
+ .xmm = {
+ .bb_rst = XMM_GPIO_BB_RST,
+ .bb_on = XMM_GPIO_BB_ON,
+ .ipc_bb_wake = XMM_GPIO_IPC_BB_WAKE,
+ .ipc_ap_wake = XMM_GPIO_IPC_AP_WAKE,
+ .ipc_hsic_active = XMM_GPIO_IPC_HSIC_ACTIVE,
+ .ipc_hsic_sus_req = XMM_GPIO_IPC_HSIC_SUS_REQ,
+ .hsic_device = &tegra_ehci2_device,
+ },
+ },
+};
+
+static struct platform_device tegra_baseband_power_device = {
+ .name = "baseband_xmm_power",
+ .id = -1,
+ .dev = {
+ .platform_data = &tegra_baseband_power_data,
+ },
+};
+
+static struct platform_device tegra_baseband_power2_device = {
+ .name = "baseband_xmm_power2",
+ .id = -1,
+ .dev = {
+ .platform_data = &tegra_baseband_power_data,
+ },
+};
+
+
+static struct tegra_pci_platform_data cardhu_pci_platform_data = {
+ .port_status[0] = 1,
+ .port_status[1] = 1,
+ .port_status[2] = 1,
+ .use_dock_detect = 0,
+ .gpio = 0,
+};
+
+static void cardhu_pci_init(void)
+{
+ struct board_info board_info;
+
+ tegra_get_board_info(&board_info);
+ if (board_info.board_id == BOARD_E1291) {
+ cardhu_pci_platform_data.port_status[0] = 0;
+ cardhu_pci_platform_data.port_status[1] = 0;
+ cardhu_pci_platform_data.port_status[2] = 1;
+ cardhu_pci_platform_data.use_dock_detect = 1;
+ cardhu_pci_platform_data.gpio = DOCK_DETECT_GPIO;
+ }
+ tegra_pci_device.dev.platform_data = &cardhu_pci_platform_data;
+ platform_device_register(&tegra_pci_device);
+}
+
+static void cardhu_modem_init(void)
+{
+ struct board_info board_info;
+ int w_disable_gpio, ret;
+
+ tegra_get_board_info(&board_info);
+ switch (board_info.board_id) {
+ case BOARD_E1291:
+ case BOARD_E1198:
+ if (((board_info.board_id == BOARD_E1291) &&
+ (board_info.fab < BOARD_FAB_A03)) ||
+ ((board_info.board_id == BOARD_E1198) &&
+ (board_info.fab < BOARD_FAB_A02))) {
+ w_disable_gpio = TEGRA_GPIO_PH5;
+ } else {
+ w_disable_gpio = TEGRA_GPIO_PDD5;
+ }
+ tegra_gpio_enable(w_disable_gpio);
+ ret = gpio_request(w_disable_gpio, "w_disable_gpio");
+ if (ret < 0)
+ pr_err("%s: gpio_request failed for gpio %d\n",
+ __func__, w_disable_gpio);
+ else
+ gpio_direction_input(w_disable_gpio);
+
+ /* E1291-A04 & E1198:A02: Set PERST signal to high */
+ if (((board_info.board_id == BOARD_E1291) &&
+ (board_info.fab >= BOARD_FAB_A04)) ||
+ ((board_info.board_id == BOARD_E1198) &&
+ (board_info.fab >= BOARD_FAB_A02))) {
+ ret = gpio_request(TEGRA_GPIO_PH7, "modem_perst");
+ if (ret < 0) {
+ pr_err("%s(): Error in allocating gpio "
+ "TEGRA_GPIO_PH7\n", __func__);
+ break;
+ }
+ gpio_direction_output(TEGRA_GPIO_PH7, 1);
+ tegra_gpio_enable(TEGRA_GPIO_PH7);
+ }
+ break;
+ case BOARD_E1186:
+ tegra_gpio_enable(
+ tegra_baseband_power_data.modem.xmm.bb_rst);
+ tegra_gpio_enable(
+ tegra_baseband_power_data.modem.xmm.bb_on);
+ tegra_gpio_enable(
+ tegra_baseband_power_data.modem.xmm.ipc_bb_wake);
+ tegra_gpio_enable(
+ tegra_baseband_power_data.modem.xmm.ipc_ap_wake);
+ tegra_gpio_enable(
+ tegra_baseband_power_data.modem.xmm.ipc_hsic_active);
+ tegra_gpio_enable(
+ tegra_baseband_power_data.modem.xmm.ipc_hsic_sus_req);
+ platform_device_register(&tegra_baseband_power_device);
+ platform_device_register(&tegra_baseband_power2_device);
+ break;
+ default:
+ break;
+ }
+
+}
+
+#ifdef CONFIG_SATA_AHCI_TEGRA
+static void cardhu_sata_init(void)
+{
+ platform_device_register(&tegra_sata_device);
+}
+#else
+static void cardhu_sata_init(void) { }
+#endif
+
+static void __init tegra_cardhu_init(void)
+{
+ tegra_thermal_init(&thermal_data);
+ tegra_clk_init_from_table(cardhu_clk_init_table);
+ cardhu_pinmux_init();
+ cardhu_i2c_init();
+ cardhu_spi_init();
+ cardhu_usb_init();
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ cardhu_edp_init();
+#endif
+ cardhu_uart_init();
+ cardhu_tsensor_init();
+ platform_add_devices(cardhu_devices, ARRAY_SIZE(cardhu_devices));
+ cardhu_sdhci_init();
+ cardhu_regulator_init();
+ cardhu_gpio_switch_regulator_init();
+ cardhu_suspend_init();
+ cardhu_power_off_init();
+ cardhu_touch_init();
+ cardhu_gps_init();
+ cardhu_modem_init();
+ cardhu_kbc_init();
+ cardhu_scroll_init();
+ cardhu_keys_init();
+ cardhu_panel_init();
+ cardhu_pmon_init();
+ cardhu_sensors_init();
+ cardhu_setup_bluesleep();
+ cardhu_sata_init();
+ //audio_wired_jack_init();
+ cardhu_pins_state_init();
+ cardhu_emc_init();
+ tegra_release_bootloader_fb();
+ cardhu_nfc_init();
+ cardhu_pci_init();
+#ifdef CONFIG_TEGRA_WDT_RECOVERY
+ tegra_wdt_recovery_init();
+#endif
+}
+
+static void __init cardhu_ramconsole_reserve(unsigned long size)
+{
+ struct resource *res;
+ long ret;
+
+ res = platform_get_resource(&ram_console_device, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("Failed to find memory resource for ram console\n");
+ return;
+ }
+ res->start = memblock_end_of_DRAM() - size;
+ res->end = res->start + size - 1;
+ ret = memblock_remove(res->start, size);
+ if (ret) {
+ ram_console_device.resource = NULL;
+ ram_console_device.num_resources = 0;
+ pr_err("Failed to reserve memory block for ram console\n");
+ }
+}
+
+static void __init tegra_cardhu_reserve(void)
+{
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM)
+ /* support 1920X1200 with 24bpp */
+ tegra_reserve(0, SZ_8M + SZ_1M, SZ_8M + SZ_1M);
+#else
+ tegra_reserve(SZ_128M, SZ_8M, SZ_8M);
+#endif
+ cardhu_ramconsole_reserve(SZ_1M);
+}
+
+MACHINE_START(CARDHU, "cardhu")
+ .boot_params = 0x80000100,
+ .map_io = tegra_map_common_io,
+ .reserve = tegra_cardhu_reserve,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_cardhu_init,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-cardhu.h b/arch/arm/mach-tegra/board-cardhu.h
new file mode 100644
index 000000000000..47b946ca5641
--- /dev/null
+++ b/arch/arm/mach-tegra/board-cardhu.h
@@ -0,0 +1,254 @@
+/*
+ * arch/arm/mach-tegra/board-cardhu.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MACH_TEGRA_BOARD_CARDHU_H
+#define _MACH_TEGRA_BOARD_CARDHU_H
+
+#include <mach/gpio.h>
+#include <mach/irqs.h>
+#include <linux/mfd/tps6591x.h>
+#include <linux/mfd/ricoh583.h>
+
+/* Processor Board ID */
+#define BOARD_E1187 0x0B57
+#define BOARD_E1186 0x0B56
+#define BOARD_E1198 0x0B62
+#define BOARD_E1256 0x0C38
+#define BOARD_E1257 0x0C39
+#define BOARD_E1291 0x0C5B
+#define BOARD_PM267 0x0243
+#define BOARD_PM269 0x0245
+#define BOARD_E1208 0x0C08
+#define BOARD_PM305 0x0305
+#define BOARD_PM311 0x030B
+#define BOARD_PMU_PM298 0x0262
+#define BOARD_PMU_PM299 0x0263
+
+/* SKU Information */
+#define SKU_DCDC_TPS62361_SUPPORT 0x1
+#define SKU_SLT_ULPI_SUPPORT 0x2
+#define SKU_T30S_SUPPORT 0x4
+#define SKU_TOUCHSCREEN_MECH_FIX 0x0100
+
+#define SKU_TOUCH_MASK 0xFF00
+#define SKU_TOUCH_2000 0x0B00
+
+#define SKU_MEMORY_TYPE_BIT 0x3
+#define SKU_MEMORY_TYPE_MASK 0x7
+/* If BOARD_PM269 */
+#define SKU_MEMORY_SAMSUNG_EC 0x0
+#define SKU_MEMORY_ELPIDA 0x2
+#define SKU_MEMORY_SAMSUNG_EB 0x4
+/* If BOARD_PM272 */
+#define SKU_MEMORY_1GB_1R_HYNIX 0x0
+#define SKU_MEMORY_2GB_2R_HYH9 0x2
+/* If other BOARD_ variants */
+#define SKU_MEMORY_CARDHU_1GB_1R 0x0
+#define SKU_MEMORY_CARDHU_2GB_2R 0x2
+#define SKU_MEMORY_CARDHU_2GB_1R_HYK0 0x4
+#define SKU_MEMORY_CARDHU_2GB_1R_HYH9 0x6
+#define SKU_MEMORY_CARDHU_2GB_1R_HYNIX 0x1
+#define MEMORY_TYPE(sku) (((sku) >> SKU_MEMORY_TYPE_BIT) & SKU_MEMORY_TYPE_MASK)
+
+/* Board Fab version */
+#define BOARD_FAB_A00 0x0
+#define BOARD_FAB_A01 0x1
+#define BOARD_FAB_A02 0x2
+#define BOARD_FAB_A03 0x3
+#define BOARD_FAB_A04 0x4
+#define BOARD_FAB_A05 0x5
+
+/* Display Board ID */
+#define BOARD_DISPLAY_PM313 0x030D
+#define BOARD_DISPLAY_E1247 0x0C2F
+
+/* External peripheral act as gpio */
+/* TPS6591x GPIOs */
+#define TPS6591X_GPIO_BASE TEGRA_NR_GPIOS
+#define TPS6591X_GPIO_0 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP0)
+#define TPS6591X_GPIO_1 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP1)
+#define TPS6591X_GPIO_2 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP2)
+#define TPS6591X_GPIO_3 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP3)
+#define TPS6591X_GPIO_4 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP4)
+#define TPS6591X_GPIO_5 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP5)
+#define TPS6591X_GPIO_6 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP6)
+#define TPS6591X_GPIO_7 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP7)
+#define TPS6591X_GPIO_8 (TPS6591X_GPIO_BASE + TPS6591X_GPIO_GP8)
+#define TPS6591X_GPIO_END (TPS6591X_GPIO_BASE + TPS6591X_GPIO_NR)
+
+/* RICOH583 GPIO */
+#define RICOH583_GPIO_BASE TEGRA_NR_GPIOS
+#define RICOH583_GPIO_END (RICOH583_GPIO_BASE + 8)
+
+/* MAX77663 GPIO */
+#define MAX77663_GPIO_BASE TEGRA_NR_GPIOS
+#define MAX77663_GPIO_END (MAX77663_GPIO_BASE + MAX77663_GPIO_NR)
+
+/* PMU_TCA6416 GPIOs */
+#define PMU_TCA6416_GPIO_BASE (TPS6591X_GPIO_END)
+#define PMU_TCA6416_GPIO_PORT00 (PMU_TCA6416_GPIO_BASE + 0)
+#define PMU_TCA6416_GPIO_PORT01 (PMU_TCA6416_GPIO_BASE + 1)
+#define PMU_TCA6416_GPIO_PORT02 (PMU_TCA6416_GPIO_BASE + 2)
+#define PMU_TCA6416_GPIO_PORT03 (PMU_TCA6416_GPIO_BASE + 3)
+#define PMU_TCA6416_GPIO_PORT04 (PMU_TCA6416_GPIO_BASE + 4)
+#define PMU_TCA6416_GPIO_PORT05 (PMU_TCA6416_GPIO_BASE + 5)
+#define PMU_TCA6416_GPIO_PORT06 (PMU_TCA6416_GPIO_BASE + 6)
+#define PMU_TCA6416_GPIO_PORT07 (PMU_TCA6416_GPIO_BASE + 7)
+#define PMU_TCA6416_GPIO_PORT10 (PMU_TCA6416_GPIO_BASE + 8)
+#define PMU_TCA6416_GPIO_PORT11 (PMU_TCA6416_GPIO_BASE + 9)
+#define PMU_TCA6416_GPIO_PORT12 (PMU_TCA6416_GPIO_BASE + 10)
+#define PMU_TCA6416_GPIO_PORT13 (PMU_TCA6416_GPIO_BASE + 11)
+#define PMU_TCA6416_GPIO_PORT14 (PMU_TCA6416_GPIO_BASE + 12)
+#define PMU_TCA6416_GPIO_PORT15 (PMU_TCA6416_GPIO_BASE + 13)
+#define PMU_TCA6416_GPIO_PORT16 (PMU_TCA6416_GPIO_BASE + 14)
+#define PMU_TCA6416_GPIO_PORT17 (PMU_TCA6416_GPIO_BASE + 15)
+#define PMU_TCA6416_GPIO_END (PMU_TCA6416_GPIO_BASE + 16)
+
+/* PMU_TCA6416 GPIO assignment */
+#define EN_HSIC_GPIO PMU_TCA6416_GPIO_PORT11 /* PMU_GPIO25 */
+#define PM267_SMSC4640_HSIC_HUB_RESET_GPIO PMU_TCA6416_GPIO_PORT17 /* PMU_GPIO31 */
+
+/* CAM_TCA6416 GPIOs */
+#define CAM_TCA6416_GPIO_BASE PMU_TCA6416_GPIO_END
+#define CAM1_PWR_DN_GPIO CAM_TCA6416_GPIO_BASE + 0
+#define CAM1_RST_L_GPIO CAM_TCA6416_GPIO_BASE + 1
+#define CAM1_AF_PWR_DN_L_GPIO CAM_TCA6416_GPIO_BASE + 2
+#define CAM1_LDO_SHUTDN_L_GPIO CAM_TCA6416_GPIO_BASE + 3
+#define CAM2_PWR_DN_GPIO CAM_TCA6416_GPIO_BASE + 4
+#define CAM2_RST_L_GPIO CAM_TCA6416_GPIO_BASE + 5
+#define CAM2_AF_PWR_DN_L_GPIO CAM_TCA6416_GPIO_BASE + 6
+#define CAM2_LDO_SHUTDN_L_GPIO CAM_TCA6416_GPIO_BASE + 7
+#define CAM_FRONT_PWR_DN_GPIO CAM_TCA6416_GPIO_BASE + 8
+#define CAM_FRONT_RST_L_GPIO CAM_TCA6416_GPIO_BASE + 9
+#define CAM_FRONT_AF_PWR_DN_L_GPIO CAM_TCA6416_GPIO_BASE + 10
+#define CAM_FRONT_LDO_SHUTDN_L_GPIO CAM_TCA6416_GPIO_BASE + 11
+#define CAM_FRONT_LED_EXP CAM_TCA6416_GPIO_BASE + 12
+#define CAM_SNN_LED_REAR_EXP CAM_TCA6416_GPIO_BASE + 13
+/* PIN 19 NOT USED and is reserved */
+#define CAM_NOT_USED CAM_TCA6416_GPIO_BASE + 14
+#define CAM_I2C_MUX_RST_EXP CAM_TCA6416_GPIO_BASE + 15
+#define CAM_TCA6416_GPIO_END CAM_TCA6416_GPIO_BASE + 16
+
+/* WM8903 GPIOs */
+#define CARDHU_GPIO_WM8903(_x_) (CAM_TCA6416_GPIO_END + (_x_))
+#define CARDHU_GPIO_WM8903_END CARDHU_GPIO_WM8903(4)
+
+/* Audio-related GPIOs */
+#define TEGRA_GPIO_CDC_IRQ TEGRA_GPIO_PW3
+#define TEGRA_GPIO_SPKR_EN CARDHU_GPIO_WM8903(2)
+#define TEGRA_GPIO_HP_DET TEGRA_GPIO_PW2
+
+/* CAMERA RELATED GPIOs on CARDHU */
+#define OV5650_RESETN_GPIO TEGRA_GPIO_PBB0
+#define CAM1_POWER_DWN_GPIO TEGRA_GPIO_PBB5
+#define CAM2_POWER_DWN_GPIO TEGRA_GPIO_PBB6
+#define CAM3_POWER_DWN_GPIO TEGRA_GPIO_PBB7
+#define CAMERA_CSI_CAM_SEL_GPIO TEGRA_GPIO_PBB4
+#define CAMERA_CSI_MUX_SEL_GPIO TEGRA_GPIO_PCC1
+#define CAM1_LDO_EN_GPIO TEGRA_GPIO_PR6
+#define CAM2_LDO_EN_GPIO TEGRA_GPIO_PR7
+#define CAM3_LDO_EN_GPIO TEGRA_GPIO_PS0
+#define OV14810_RESETN_GPIO TEGRA_GPIO_PBB0
+
+#define CAMERA_FLASH_SYNC_GPIO TEGRA_GPIO_PBB3
+#define CAMERA_FLASH_MAX_TORCH_AMP 7
+#define CAMERA_FLASH_MAX_FLASH_AMP 7
+
+/* PCA954x I2C bus expander bus addresses */
+#define PCA954x_I2C_BUS_BASE 6
+#define PCA954x_I2C_BUS0 (PCA954x_I2C_BUS_BASE + 0)
+#define PCA954x_I2C_BUS1 (PCA954x_I2C_BUS_BASE + 1)
+#define PCA954x_I2C_BUS2 (PCA954x_I2C_BUS_BASE + 2)
+#define PCA954x_I2C_BUS3 (PCA954x_I2C_BUS_BASE + 3)
+
+#define AC_PRESENT_GPIO TPS6591X_GPIO_4
+
+/*****************Interrupt tables ******************/
+/* External peripheral act as interrupt controller */
+/* TPS6591x IRQs */
+#define TPS6591X_IRQ_BASE TEGRA_NR_IRQS
+#define TPS6591X_IRQ_END (TPS6591X_IRQ_BASE + 18)
+#define DOCK_DETECT_GPIO TEGRA_GPIO_PU4
+
+/* RICOH583 IRQs */
+#define RICOH583_IRQ_BASE TEGRA_NR_IRQS
+#define RICOH583_IRQ_END (RICOH583_IRQ_BASE + RICOH583_NR_IRQS)
+
+/* MAX77663 IRQs */
+#define MAX77663_IRQ_BASE TEGRA_NR_IRQS
+#define MAX77663_IRQ_END (MAX77663_IRQ_BASE + MAX77663_IRQ_NR)
+
+int cardhu_charge_init(void);
+int cardhu_regulator_init(void);
+int cardhu_suspend_init(void);
+int cardhu_sdhci_init(void);
+int cardhu_pinmux_init(void);
+int cardhu_panel_init(void);
+int cardhu_sensors_init(void);
+int cardhu_kbc_init(void);
+int cardhu_scroll_init(void);
+int cardhu_keys_init(void);
+int cardhu_gpio_switch_regulator_init(void);
+int cardhu_pins_state_init(void);
+int cardhu_emc_init(void);
+int cardhu_power_off_init(void);
+int cardhu_edp_init(void);
+int cardhu_pmon_init(void);
+int cardhu_pm298_gpio_switch_regulator_init(void);
+int cardhu_pm298_regulator_init(void);
+int cardhu_pm299_gpio_switch_regulator_init(void);
+int cardhu_pm299_regulator_init(void);
+void __init cardhu_tsensor_init(void);
+
+/* Invensense MPU Definitions */
+#define MPU_GYRO_NAME "mpu3050"
+#define MPU_GYRO_IRQ_GPIO TEGRA_GPIO_PX1
+#define MPU_GYRO_ADDR 0x68
+#define MPU_GYRO_BUS_NUM 2
+#define MPU_GYRO_ORIENTATION { 0, -1, 0, -1, 0, 0, 0, 0, -1 }
+#define MPU_ACCEL_NAME "kxtf9"
+#define MPU_ACCEL_IRQ_GPIO TEGRA_GPIO_PL1
+#define MPU_ACCEL_ADDR 0x0F
+#define MPU_ACCEL_BUS_NUM 2
+#define MPU_ACCEL_ORIENTATION { 0, -1, 0, -1, 0, 0, 0, 0, -1 }
+#define MPU_COMPASS_NAME "ak8975"
+#define MPU_COMPASS_IRQ_GPIO 0
+#define MPU_COMPASS_ADDR 0x0C
+#define MPU_COMPASS_BUS_NUM 2
+#define MPU_COMPASS_ORIENTATION { 1, 0, 0, 0, 1, 0, 0, 0, 1 }
+
+/* Baseband GPIO addresses */
+#define BB_GPIO_BB_EN TEGRA_GPIO_PR5
+#define BB_GPIO_BB_RST TEGRA_GPIO_PS4
+#define BB_GPIO_SPI_INT TEGRA_GPIO_PS6
+#define BB_GPIO_SPI_SS TEGRA_GPIO_PV0
+#define BB_GPIO_AWR TEGRA_GPIO_PS7
+#define BB_GPIO_CWR TEGRA_GPIO_PU5
+
+#define XMM_GPIO_BB_ON BB_GPIO_BB_EN
+#define XMM_GPIO_BB_RST BB_GPIO_BB_RST
+#define XMM_GPIO_IPC_HSIC_ACTIVE BB_GPIO_SPI_INT
+#define XMM_GPIO_IPC_HSIC_SUS_REQ BB_GPIO_SPI_SS
+#define XMM_GPIO_IPC_BB_WAKE BB_GPIO_AWR
+#define XMM_GPIO_IPC_AP_WAKE BB_GPIO_CWR
+
+#define TDIODE_OFFSET (10000) /* in millicelsius */
+
+#endif
diff --git a/arch/arm/mach-tegra/board-enterprise-baseband.c b/arch/arm/mach-tegra/board-enterprise-baseband.c
new file mode 100644
index 000000000000..1463a066fca5
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise-baseband.c
@@ -0,0 +1,246 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise-baseband.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/wakelock.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/pinmux.h>
+#include <mach/usb_phy.h>
+#include <mach/tegra_usb_modem_power.h>
+#include "devices.h"
+#include "gpio-names.h"
+
+/* Tegra3 BB GPIO */
+#define MODEM_PWR_ON TEGRA_GPIO_PE0
+#define MODEM_RESET TEGRA_GPIO_PE1
+#define BB_RST_OUT TEGRA_GPIO_PV1
+
+/* Icera BB GPIO */
+#define AP2MDM_ACK TEGRA_GPIO_PE3
+#define MDM2AP_ACK TEGRA_GPIO_PU5
+#define AP2MDM_ACK2 TEGRA_GPIO_PE2
+#define MDM2AP_ACK2 TEGRA_GPIO_PV0
+
+/* ULPI GPIO */
+#define ULPI_STP TEGRA_GPIO_PY3
+#define ULPI_DIR TEGRA_GPIO_PY1
+#define ULPI_D0 TEGRA_GPIO_PO1
+#define ULPI_D1 TEGRA_GPIO_PO2
+
+static struct wake_lock mdm_wake_lock;
+
+static struct gpio modem_gpios[] = {
+ {MODEM_PWR_ON, GPIOF_OUT_INIT_LOW, "MODEM PWR ON"},
+ {MODEM_RESET, GPIOF_IN, "MODEM RESET"},
+ {BB_RST_OUT, GPIOF_IN, "BB RST OUT"},
+ {MDM2AP_ACK, GPIOF_IN, "MDM2AP_ACK"},
+ {AP2MDM_ACK2, GPIOF_OUT_INIT_HIGH, "AP2MDM ACK2"},
+ {AP2MDM_ACK, GPIOF_OUT_INIT_LOW, "AP2MDM ACK"},
+ {ULPI_STP, GPIOF_IN, "ULPI_STP"},
+ {ULPI_DIR, GPIOF_OUT_INIT_LOW, "ULPI_DIR"},
+ {ULPI_D0, GPIOF_OUT_INIT_LOW, "ULPI_D0"},
+ {ULPI_D1, GPIOF_OUT_INIT_LOW, "ULPI_D1"},
+};
+
+static int baseband_phy_on(void);
+static int baseband_phy_off(void);
+static void baseband_phy_restore_start(void);
+static void baseband_phy_restore_end(void);
+
+static struct tegra_ulpi_trimmer e1219_trimmer = { 10, 1, 1, 1 };
+
+static struct tegra_ulpi_config ehci2_null_ulpi_phy_config = {
+ .trimmer = &e1219_trimmer,
+ .post_phy_on = baseband_phy_on,
+ .pre_phy_off = baseband_phy_off,
+ .phy_restore_start = baseband_phy_restore_start,
+ .phy_restore_end = baseband_phy_restore_end,
+ .phy_restore_gpio = MDM2AP_ACK,
+ .ulpi_dir_gpio = ULPI_DIR,
+ .ulpi_d0_gpio = ULPI_D0,
+ .ulpi_d1_gpio = ULPI_D1,
+};
+
+static struct tegra_ehci_platform_data ehci2_null_ulpi_platform_data = {
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 0,
+ .phy_config = &ehci2_null_ulpi_phy_config,
+ .phy_type = TEGRA_USB_PHY_TYPE_NULL_ULPI,
+};
+
+static int __init tegra_null_ulpi_init(void)
+{
+ tegra_ehci2_device.dev.platform_data = &ehci2_null_ulpi_platform_data;
+ platform_device_register(&tegra_ehci2_device);
+ return 0;
+}
+
+static irqreturn_t mdm_start_thread(int irq, void *data)
+{
+ if (gpio_get_value(BB_RST_OUT)) {
+ pr_info("BB_RST_OUT high\n");
+ } else {
+ pr_info("BB_RST_OUT low\n");
+ /* hold wait lock to complete the enumeration */
+ wake_lock_timeout(&mdm_wake_lock, HZ * 10);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int baseband_phy_on(void)
+{
+ static bool phy_init = false;
+
+ if (!phy_init) {
+ /* set AP2MDM_ACK2 low */
+ gpio_set_value(AP2MDM_ACK2, 0);
+ phy_init = true;
+ }
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static int baseband_phy_off(void)
+{
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static void baseband_phy_restore_start(void)
+{
+ /* set AP2MDM_ACK2 high */
+ gpio_set_value(AP2MDM_ACK2, 1);
+}
+
+static void baseband_phy_restore_end(void)
+{
+ /* set AP2MDM_ACK2 low */
+ gpio_set_value(AP2MDM_ACK2, 0);
+}
+
+static void baseband_start(void)
+{
+ /*
+ * Leave baseband powered OFF.
+ * User-space daemons will take care of powering it up.
+ */
+ pr_info("%s\n", __func__);
+ gpio_set_value(MODEM_PWR_ON, 0);
+}
+
+static void baseband_reset(void)
+{
+ /* Initiate power cycle on baseband sub system */
+ pr_info("%s\n", __func__);
+ gpio_set_value(MODEM_PWR_ON, 0);
+ mdelay(200);
+ gpio_set_value(MODEM_PWR_ON, 1);
+}
+
+static int baseband_init(void)
+{
+ int irq;
+ int ret;
+
+ ret = gpio_request_array(modem_gpios, ARRAY_SIZE(modem_gpios));
+ if (ret)
+ return ret;
+
+ /* enable pull-up for ULPI STP */
+ tegra_pinmux_set_pullupdown(TEGRA_PINGROUP_ULPI_STP,
+ TEGRA_PUPD_PULL_UP);
+
+ /* enable pull-up for MDM2AP_ACK2 */
+ tegra_pinmux_set_pullupdown(TEGRA_PINGROUP_GPIO_PV0,
+ TEGRA_PUPD_PULL_UP);
+
+ tegra_gpio_enable(MODEM_PWR_ON);
+ tegra_gpio_enable(MODEM_RESET);
+ tegra_gpio_enable(AP2MDM_ACK2);
+ tegra_gpio_enable(BB_RST_OUT);
+ tegra_gpio_enable(AP2MDM_ACK);
+ tegra_gpio_enable(MDM2AP_ACK);
+ tegra_gpio_enable(TEGRA_GPIO_PY3);
+ tegra_gpio_enable(TEGRA_GPIO_PO1);
+ tegra_gpio_enable(TEGRA_GPIO_PO2);
+
+ /* export GPIO for user space access through sysfs */
+ gpio_export(MODEM_PWR_ON, false);
+
+ /* phy init */
+ tegra_null_ulpi_init();
+
+ wake_lock_init(&mdm_wake_lock, WAKE_LOCK_SUSPEND, "mdm_lock");
+
+ /* enable IRQ for BB_RST_OUT */
+ irq = gpio_to_irq(BB_RST_OUT);
+
+ ret = request_threaded_irq(irq, NULL, mdm_start_thread,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "mdm_start", NULL);
+ if (ret < 0) {
+ pr_err("%s: request_threaded_irq error\n", __func__);
+ return ret;
+ }
+
+ ret = enable_irq_wake(irq);
+ if (ret) {
+ pr_err("%s: enable_irq_wake error\n", __func__);
+ free_irq(irq, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct tegra_modem_operations baseband_operations = {
+ .init = baseband_init,
+ .start = baseband_start,
+ .reset = baseband_reset,
+};
+
+static struct tegra_usb_modem_power_platform_data baseband_pdata = {
+ .ops = &baseband_operations,
+ .wake_gpio = MDM2AP_ACK2,
+ .flags = IRQF_TRIGGER_FALLING,
+};
+
+static struct platform_device icera_baseband_device = {
+ .name = "tegra_usb_modem_power",
+ .id = -1,
+ .dev = {
+ .platform_data = &baseband_pdata,
+ },
+};
+
+int __init enterprise_modem_init(void)
+{
+ platform_device_register(&icera_baseband_device);
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-enterprise-kbc.c b/arch/arm/mach-tegra/board-enterprise-kbc.c
new file mode 100644
index 000000000000..982d0e474f24
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise-kbc.c
@@ -0,0 +1,107 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise-kbc.c
+ * Keys configuration for Nvidia tegra3 enterprise platform.
+ *
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/kbc.h>
+
+#include "board.h"
+#include "board-enterprise.h"
+#include "devices.h"
+
+#define ENTERPRISE_ROW_COUNT 3
+#define ENTERPRISE_COL_COUNT 3
+
+static const u32 kbd_keymap[] = {
+ KEY(0, 0, KEY_POWER),
+
+ KEY(1, 0, KEY_HOME),
+ KEY(1, 1, KEY_BACK),
+ KEY(1, 2, KEY_VOLUMEDOWN),
+
+ KEY(2, 0, KEY_MENU),
+ KEY(2, 1, KEY_SEARCH),
+ KEY(2, 2, KEY_VOLUMEUP),
+};
+
+static const struct matrix_keymap_data keymap_data = {
+ .keymap = kbd_keymap,
+ .keymap_size = ARRAY_SIZE(kbd_keymap),
+};
+
+static struct tegra_kbc_wake_key enterprise_wake_cfg[] = {
+ [0] = {
+ .row = 0,
+ .col = 0,
+ },
+ [1] = {
+ .row = 1,
+ .col = 0,
+ },
+ [2] = {
+ .row = 1,
+ .col = 1,
+ },
+ [3] = {
+ .row = 2,
+ .col = 0,
+ },
+};
+
+static struct tegra_kbc_platform_data enterprise_kbc_platform_data = {
+ .debounce_cnt = 20,
+ .repeat_cnt = 1,
+ .scan_count = 30,
+ .wakeup = true,
+ .keymap_data = &keymap_data,
+ .wake_cnt = 4,
+ .wake_cfg = &enterprise_wake_cfg[0],
+#ifdef CONFIG_ANDROID
+ .disable_ev_rep = true,
+#endif
+};
+
+int __init enterprise_kbc_init(void)
+{
+ struct tegra_kbc_platform_data *data = &enterprise_kbc_platform_data;
+ int i;
+ tegra_kbc_device.dev.platform_data = &enterprise_kbc_platform_data;
+ pr_info("Registering tegra-kbc\n");
+
+ BUG_ON((KBC_MAX_ROW + KBC_MAX_COL) > KBC_MAX_GPIO);
+ for (i = 0; i < ENTERPRISE_ROW_COUNT; i++) {
+ data->pin_cfg[i].num = i;
+ data->pin_cfg[i].is_row = true;
+ data->pin_cfg[i].en = true;
+ }
+ for (i = 0; i < ENTERPRISE_COL_COUNT; i++) {
+ data->pin_cfg[i + KBC_PIN_GPIO_16].num = i;
+ data->pin_cfg[i + KBC_PIN_GPIO_16].en = true;
+ }
+
+ platform_device_register(&tegra_kbc_device);
+ pr_info("Registering successful tegra-kbc\n");
+ return 0;
+}
+
diff --git a/arch/arm/mach-tegra/board-enterprise-memory.c b/arch/arm/mach-tegra/board-enterprise-memory.c
new file mode 100644
index 000000000000..3212894cb049
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise-memory.c
@@ -0,0 +1,629 @@
+/*
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "board-enterprise.h"
+#include "tegra3_emc.h"
+
+
+static const struct tegra_emc_table enterprise_emc_tables_h5tc2g[] = {
+ {
+ 0x31, /* Rev 3.1 */
+ 25500, /* SDRAM frequency */
+ {
+ 0x00000001, /* EMC_RC */
+ 0x00000003, /* EMC_RFC */
+ 0x00000002, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x0000005e, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000017, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x00000004, /* EMC_TXSR */
+ 0x00000004, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x00000068, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x00780084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00090000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00100220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x0000000a, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x800001c2, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00020001, /* MC_EMEM_ARB_CFG */
+ 0x80000008, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x74030303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000009, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ },
+ {
+ 0x31, /* Rev 3.1 */
+ 51000, /* SDRAM frequency */
+ {
+ 0x00000003, /* EMC_RC */
+ 0x00000006, /* EMC_RFC */
+ 0x00000002, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x000000c0, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000030, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x00000008, /* EMC_TXSR */
+ 0x00000008, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x000000d5, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000004, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x00780084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00090000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00100220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x00000013, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000287, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00010001, /* MC_EMEM_ARB_CFG */
+ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060402, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72c30303, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000009, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ },
+ {
+ 0x31, /* Rev 3.1 */
+ 102000, /* SDRAM frequency */
+ {
+ 0x00000006, /* EMC_RC */
+ 0x0000000d, /* EMC_RFC */
+ 0x00000004, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000005, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000001, /* EMC_RRD */
+ 0x00000001, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x00000009, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x00000181, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000060, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000002, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x0000000f, /* EMC_TXSR */
+ 0x0000000f, /* EMC_TXSRDLL */
+ 0x00000003, /* EMC_TCKE */
+ 0x00000008, /* EMC_TFAW */
+ 0x00000004, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x000001a9, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x00780084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00090000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00090000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00100220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x00000025, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x8000040b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0x80000013, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02020001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00060403, /* MC_EMEM_ARB_DA_COVERS */
+ 0x72430504, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x0000000a, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010022, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ },
+ {
+ 0x31, /* Rev 3.1 */
+ 204000, /* SDRAM frequency */
+ {
+ 0x0000000c, /* EMC_RC */
+ 0x0000001a, /* EMC_RFC */
+ 0x00000008, /* EMC_RAS */
+ 0x00000003, /* EMC_RP */
+ 0x00000005, /* EMC_R2W */
+ 0x00000004, /* EMC_W2R */
+ 0x00000001, /* EMC_R2P */
+ 0x00000006, /* EMC_W2P */
+ 0x00000003, /* EMC_RD_RCD */
+ 0x00000003, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000001, /* EMC_WDV */
+ 0x00000003, /* EMC_QUSE */
+ 0x00000001, /* EMC_QRST */
+ 0x0000000b, /* EMC_QSAFE */
+ 0x0000000a, /* EMC_RDV */
+ 0x00000303, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000000c0, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000001, /* EMC_PDEX2RD */
+ 0x00000003, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x00000007, /* EMC_RW2PDEN */
+ 0x0000001d, /* EMC_TXSR */
+ 0x0000001d, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x0000000b, /* EMC_TFAW */
+ 0x00000005, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x00000351, /* EMC_TREFBW */
+ 0x00000004, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00004282, /* EMC_FBIO_CFG5 */
+ 0x00440084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0007c000, /* EMC_DLL_XFORM_DQS0 */
+ 0x0007c000, /* EMC_DLL_XFORM_DQS1 */
+ 0x0007c000, /* EMC_DLL_XFORM_DQS2 */
+ 0x0007c000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000018, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00088000, /* EMC_DLL_XFORM_DQ3 */
+ 0x000f0220, /* EMC_XM2CMDPADCTRL */
+ 0x0800201c, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f008, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x0000004a, /* EMC_ZCAL_WAIT_CNT */
+ 0x00090009, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000003, /* MC_EMEM_ARB_CFG */
+ 0x80000025, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x02030001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00070506, /* MC_EMEM_ARB_DA_COVERS */
+ 0x71e40a07, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x50000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000013, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010042, /* Mode Register 1 */
+ 0x00020001, /* Mode Register 2 */
+ },
+ {
+ 0x31, /* Rev 3.1 */
+ 400000, /* SDRAM frequency */
+ {
+ 0x00000017, /* EMC_RC */
+ 0x00000033, /* EMC_RFC */
+ 0x00000010, /* EMC_RAS */
+ 0x00000007, /* EMC_RP */
+ 0x00000007, /* EMC_R2W */
+ 0x00000007, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000a, /* EMC_W2P */
+ 0x00000007, /* EMC_RD_RCD */
+ 0x00000007, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000003, /* EMC_WDV */
+ 0x00000007, /* EMC_QUSE */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000b, /* EMC_QSAFE */
+ 0x0000000e, /* EMC_RDV */
+ 0x000005e9, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x0000017a, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000007, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000001, /* EMC_AR2PDEN */
+ 0x0000000c, /* EMC_RW2PDEN */
+ 0x00000038, /* EMC_TXSR */
+ 0x00000038, /* EMC_TXSRDLL */
+ 0x00000006, /* EMC_TCKE */
+ 0x00000014, /* EMC_TFAW */
+ 0x00000009, /* EMC_TRPAB */
+ 0x00000004, /* EMC_TCLKSTABLE */
+ 0x00000002, /* EMC_TCLKSTOP */
+ 0x00000680, /* EMC_TREFBW */
+ 0x00000000, /* EMC_QUSE_EXTRA */
+ 0x00000006, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x00006282, /* EMC_FBIO_CFG5 */
+ 0x001d0084, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00024000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00024000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00024000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00024000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS4 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS5 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS6 */
+ 0x00000010, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000008, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00060220, /* EMC_XM2CMDPADCTRL */
+ 0x0800003d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x77ffc004, /* EMC_XM2CLKPADCTRL */
+ 0x01f1f408, /* EMC_XM2COMPPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000007, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x08000068, /* EMC_XM2QUSEPADCTRL */
+ 0x08000000, /* EMC_XM2DQSPADCTRL3 */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x00064000, /* EMC_ZCAL_INTERVAL */
+ 0x00000090, /* EMC_ZCAL_WAIT_CNT */
+ 0x000c000c, /* EMC_MRS_WAIT_CNT */
+ 0xa0f10000, /* EMC_AUTO_CAL_CONFIG */
+ 0x00000000, /* EMC_CTT */
+ 0x00000000, /* EMC_CTT_DURATION */
+ 0x80000ce6, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000006, /* MC_EMEM_ARB_CFG */
+ 0x80000048, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x04040001, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000d090c, /* MC_EMEM_ARB_DA_COVERS */
+ 0x71c6120d, /* MC_EMEM_ARB_MISC0 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ 0x10000000, /* EMC_FBIO_SPARE */
+ 0xff00ff00, /* EMC_CFG_RSV */
+ },
+ 0x00000024, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000001, /* EMC_CFG.PERIODIC_QRST */
+ 0x00000000, /* Mode Register 0 */
+ 0x00010082, /* Mode Register 1 */
+ 0x00020004, /* Mode Register 2 */
+ },
+};
+
+int enterprise_emc_init(void)
+{
+ tegra_init_emc(enterprise_emc_tables_h5tc2g,
+ ARRAY_SIZE(enterprise_emc_tables_h5tc2g));
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-enterprise-panel.c b/arch/arm/mach-tegra/board-enterprise-panel.c
new file mode 100644
index 000000000000..1d5507004913
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise-panel.c
@@ -0,0 +1,822 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise-panel.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <asm/mach-types.h>
+#include <linux/platform_device.h>
+#include <linux/earlysuspend.h>
+#include <linux/tegra_pwm_bl.h>
+#include <asm/atomic.h>
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/hardware.h>
+
+#include "board.h"
+#include "board-enterprise.h"
+#include "devices.h"
+#include "gpio-names.h"
+
+#define DC_CTRL_MODE TEGRA_DC_OUT_ONE_SHOT_MODE
+
+/* Select panel to be used. */
+#define AVDD_LCD PMU_TCA6416_GPIO_PORT17
+#define DSI_PANEL_RESET 1
+
+#define enterprise_lvds_shutdown TEGRA_GPIO_PL2
+#define enterprise_hdmi_hpd TEGRA_GPIO_PN7
+
+#define enterprise_dsi_panel_reset TEGRA_GPIO_PW0
+
+#define enterprise_lcd_2d_3d TEGRA_GPIO_PH1
+#define ENTERPRISE_STEREO_3D 0
+#define ENTERPRISE_STEREO_2D 1
+
+#define enterprise_lcd_swp_pl TEGRA_GPIO_PH2
+#define ENTERPRISE_STEREO_LANDSCAPE 0
+#define ENTERPRISE_STEREO_PORTRAIT 1
+
+#define enterprise_lcd_te TEGRA_GPIO_PJ1
+
+#ifdef CONFIG_TEGRA_DC
+static struct regulator *enterprise_dsi_reg = NULL;
+
+static struct regulator *enterprise_hdmi_reg;
+static struct regulator *enterprise_hdmi_pll;
+static struct regulator *enterprise_hdmi_vddio;
+#endif
+
+static atomic_t sd_brightness = ATOMIC_INIT(255);
+
+static tegra_dc_bl_output enterprise_bl_output_measured = {
+ 1, 5, 9, 10, 11, 12, 12, 13,
+ 13, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 19, 20, 21, 21,
+ 22, 22, 23, 24, 24, 25, 26, 26,
+ 27, 27, 28, 29, 29, 31, 31, 32,
+ 32, 33, 34, 35, 36, 36, 37, 38,
+ 39, 39, 40, 41, 41, 42, 43, 43,
+ 44, 45, 45, 46, 47, 47, 48, 49,
+ 49, 50, 51, 51, 52, 53, 53, 54,
+ 55, 56, 56, 57, 58, 59, 60, 61,
+ 61, 62, 63, 64, 65, 65, 66, 67,
+ 67, 68, 69, 69, 70, 71, 71, 72,
+ 73, 73, 74, 74, 75, 76, 76, 77,
+ 77, 78, 79, 79, 80, 81, 82, 83,
+ 83, 84, 85, 85, 86, 86, 88, 89,
+ 90, 91, 91, 92, 93, 93, 94, 95,
+ 95, 96, 97, 97, 98, 99, 99, 100,
+ 101, 101, 102, 103, 103, 104, 105, 105,
+ 107, 107, 108, 109, 110, 111, 111, 112,
+ 113, 113, 114, 115, 115, 116, 117, 117,
+ 118, 119, 119, 120, 121, 122, 123, 124,
+ 124, 125, 126, 126, 127, 128, 129, 129,
+ 130, 131, 131, 132, 133, 133, 134, 135,
+ 135, 136, 137, 137, 138, 139, 139, 140,
+ 142, 142, 143, 144, 145, 146, 147, 147,
+ 148, 149, 149, 150, 151, 152, 153, 153,
+ 153, 154, 155, 156, 157, 158, 158, 159,
+ 160, 161, 162, 163, 163, 164, 165, 165,
+ 166, 166, 167, 168, 169, 169, 170, 170,
+ 171, 172, 173, 173, 174, 175, 175, 176,
+ 176, 178, 178, 179, 180, 181, 182, 182,
+ 183, 184, 185, 186, 186, 187, 188, 188
+};
+
+static p_tegra_dc_bl_output bl_output;
+
+static bool kernel_1st_panel_init = true;
+
+static int enterprise_backlight_notify(struct device *unused, int brightness)
+{
+ int cur_sd_brightness = atomic_read(&sd_brightness);
+ int orig_brightness = brightness;
+
+ /* SD brightness is a percentage, 8-bit value. */
+ brightness = (brightness * cur_sd_brightness) / 255;
+
+ /* Apply any backlight response curve */
+ if (brightness > 255)
+ pr_info("Error: Brightness > 255!\n");
+ else
+ brightness = bl_output[brightness];
+
+ return brightness;
+}
+
+static int enterprise_disp1_check_fb(struct device *dev, struct fb_info *info);
+
+/*
+ * In case which_pwm is TEGRA_PWM_PM0,
+ * gpio_conf_to_sfio should be TEGRA_GPIO_PW0: set LCD_CS1_N pin to SFIO
+ * In case which_pwm is TEGRA_PWM_PM1,
+ * gpio_conf_to_sfio should be TEGRA_GPIO_PW1: set LCD_M1 pin to SFIO
+ */
+static struct platform_tegra_pwm_backlight_data enterprise_disp1_backlight_data = {
+ .which_dc = 0,
+ .which_pwm = TEGRA_PWM_PM1,
+ .gpio_conf_to_sfio = TEGRA_GPIO_PW1,
+ .switch_to_sfio = &tegra_gpio_disable,
+ .max_brightness = 255,
+ .dft_brightness = 224,
+ .notify = enterprise_backlight_notify,
+ .period = 0xFF,
+ .clk_div = 0x3FF,
+ .clk_select = 0,
+ /* Only toggle backlight on fb blank notifications for disp1 */
+ .check_fb = enterprise_disp1_check_fb,
+};
+
+static struct platform_device enterprise_disp1_backlight_device = {
+ .name = "tegra-pwm-bl",
+ .id = -1,
+ .dev = {
+ .platform_data = &enterprise_disp1_backlight_data,
+ },
+};
+
+#ifdef CONFIG_TEGRA_DC
+static int enterprise_hdmi_vddio_enable(void)
+{
+ int ret;
+ if (!enterprise_hdmi_vddio) {
+ enterprise_hdmi_vddio = regulator_get(NULL, "hdmi_5v0");
+ if (IS_ERR_OR_NULL(enterprise_hdmi_vddio)) {
+ ret = PTR_ERR(enterprise_hdmi_vddio);
+ pr_err("hdmi: couldn't get regulator hdmi_5v0\n");
+ enterprise_hdmi_vddio = NULL;
+ return ret;
+ }
+ }
+ ret = regulator_enable(enterprise_hdmi_vddio);
+ if (ret < 0) {
+ pr_err("hdmi: couldn't enable regulator hdmi_5v0\n");
+ regulator_put(enterprise_hdmi_vddio);
+ enterprise_hdmi_vddio = NULL;
+ return ret;
+ }
+ return ret;
+}
+
+static int enterprise_hdmi_vddio_disable(void)
+{
+ if (enterprise_hdmi_vddio) {
+ regulator_disable(enterprise_hdmi_vddio);
+ regulator_put(enterprise_hdmi_vddio);
+ enterprise_hdmi_vddio = NULL;
+ }
+ return 0;
+}
+
+static int enterprise_hdmi_enable(void)
+{
+ int ret;
+ if (!enterprise_hdmi_reg) {
+ enterprise_hdmi_reg = regulator_get(NULL, "avdd_hdmi");
+ if (IS_ERR_OR_NULL(enterprise_hdmi_reg)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi\n");
+ enterprise_hdmi_reg = NULL;
+ return PTR_ERR(enterprise_hdmi_reg);
+ }
+ }
+ ret = regulator_enable(enterprise_hdmi_reg);
+ if (ret < 0) {
+ pr_err("hdmi: couldn't enable regulator avdd_hdmi\n");
+ return ret;
+ }
+ if (!enterprise_hdmi_pll) {
+ enterprise_hdmi_pll = regulator_get(NULL, "avdd_hdmi_pll");
+ if (IS_ERR_OR_NULL(enterprise_hdmi_pll)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi_pll\n");
+ enterprise_hdmi_pll = NULL;
+ regulator_put(enterprise_hdmi_reg);
+ enterprise_hdmi_reg = NULL;
+ return PTR_ERR(enterprise_hdmi_pll);
+ }
+ }
+ ret = regulator_enable(enterprise_hdmi_pll);
+ if (ret < 0) {
+ pr_err("hdmi: couldn't enable regulator avdd_hdmi_pll\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int enterprise_hdmi_disable(void)
+{
+
+ regulator_disable(enterprise_hdmi_reg);
+ regulator_put(enterprise_hdmi_reg);
+ enterprise_hdmi_reg = NULL;
+
+ regulator_disable(enterprise_hdmi_pll);
+ regulator_put(enterprise_hdmi_pll);
+ enterprise_hdmi_pll = NULL;
+
+ return 0;
+}
+static struct resource enterprise_disp1_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .start = 0, /* Filled in by enterprise_panel_init() */
+ .end = 0, /* Filled in by enterprise_panel_init() */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "dsi_regs",
+ .start = TEGRA_DSI_BASE,
+ .end = TEGRA_DSI_BASE + TEGRA_DSI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource enterprise_disp2_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_B_GENERAL,
+ .end = INT_DISPLAY_B_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .flags = IORESOURCE_MEM,
+ .start = 0,
+ .end = 0,
+ },
+ {
+ .name = "hdmi_regs",
+ .start = TEGRA_HDMI_BASE,
+ .end = TEGRA_HDMI_BASE + TEGRA_HDMI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_dc_sd_settings enterprise_sd_settings = {
+ .enable = 1, /* Normal mode operation */
+ .use_auto_pwm = false,
+ .hw_update_delay = 0,
+ .bin_width = -1,
+ .aggressiveness = 1,
+ .phase_in_adjustments = true,
+ .use_vid_luma = false,
+ /* Default video coefficients */
+ .coeff = {5, 9, 2},
+ .fc = {0, 0},
+ /* Immediate backlight changes */
+ .blp = {1024, 255},
+ /* Gammas: R: 2.2 G: 2.2 B: 2.2 */
+ /* Default BL TF */
+ .bltf = {
+ {
+ {57, 65, 74, 83},
+ {93, 103, 114, 126},
+ {138, 151, 165, 179},
+ {194, 209, 225, 242},
+ },
+ {
+ {58, 66, 75, 84},
+ {94, 105, 116, 127},
+ {140, 153, 166, 181},
+ {196, 211, 227, 244},
+ },
+ {
+ {60, 68, 77, 87},
+ {97, 107, 119, 130},
+ {143, 156, 170, 184},
+ {199, 215, 231, 248},
+ },
+ {
+ {64, 73, 82, 91},
+ {102, 113, 124, 137},
+ {149, 163, 177, 192},
+ {207, 223, 240, 255},
+ },
+ },
+ /* Default LUT */
+ .lut = {
+ {
+ {250, 250, 250},
+ {194, 194, 194},
+ {149, 149, 149},
+ {113, 113, 113},
+ {82, 82, 82},
+ {56, 56, 56},
+ {34, 34, 34},
+ {15, 15, 15},
+ {0, 0, 0},
+ },
+ {
+ {246, 246, 246},
+ {191, 191, 191},
+ {147, 147, 147},
+ {111, 111, 111},
+ {80, 80, 80},
+ {55, 55, 55},
+ {33, 33, 33},
+ {14, 14, 14},
+ {0, 0, 0},
+ },
+ {
+ {239, 239, 239},
+ {185, 185, 185},
+ {142, 142, 142},
+ {107, 107, 107},
+ {77, 77, 77},
+ {52, 52, 52},
+ {30, 30, 30},
+ {12, 12, 12},
+ {0, 0, 0},
+ },
+ {
+ {224, 224, 224},
+ {173, 173, 173},
+ {133, 133, 133},
+ {99, 99, 99},
+ {70, 70, 70},
+ {46, 46, 46},
+ {25, 25, 25},
+ {7, 7, 7},
+ {0, 0, 0},
+ },
+ },
+ .sd_brightness = &sd_brightness,
+ .bl_device = &enterprise_disp1_backlight_device,
+};
+
+static struct tegra_fb_data enterprise_hdmi_fb_data = {
+ .win = 0,
+ .xres = 1366,
+ .yres = 768,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_dc_out enterprise_disp2_out = {
+ .type = TEGRA_DC_OUT_HDMI,
+ .flags = TEGRA_DC_OUT_HOTPLUG_HIGH,
+ .parent_clk = "pll_d2_out0",
+
+ .dcc_bus = 3,
+ .hotplug_gpio = enterprise_hdmi_hpd,
+
+ .max_pixclock = KHZ2PICOS(148500),
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .enable = enterprise_hdmi_enable,
+ .disable = enterprise_hdmi_disable,
+ .postsuspend = enterprise_hdmi_vddio_disable,
+ .hotplug_init = enterprise_hdmi_vddio_enable,
+};
+
+static struct tegra_dc_platform_data enterprise_disp2_pdata = {
+ .flags = 0,
+ .default_out = &enterprise_disp2_out,
+ .fb = &enterprise_hdmi_fb_data,
+ .emc_clk_rate = 300000000,
+};
+
+static int enterprise_dsi_panel_enable(void)
+{
+ int ret;
+
+ if (enterprise_dsi_reg == NULL) {
+ enterprise_dsi_reg = regulator_get(NULL, "avdd_dsi_csi");
+ if (IS_ERR_OR_NULL(enterprise_dsi_reg)) {
+ pr_err("dsi: Could not get regulator avdd_dsi_csi\n");
+ enterprise_dsi_reg = NULL;
+ return PTR_ERR(enterprise_dsi_reg);
+ }
+ }
+ ret = regulator_enable(enterprise_dsi_reg);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "DSI regulator avdd_dsi_csi could not be enabled\n");
+ return ret;
+ }
+
+#if DSI_PANEL_RESET
+ if (kernel_1st_panel_init != true) {
+ ret = gpio_request(enterprise_dsi_panel_reset, "panel reset");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(enterprise_dsi_panel_reset, 0);
+ if (ret < 0) {
+ gpio_free(enterprise_dsi_panel_reset);
+ return ret;
+ }
+ tegra_gpio_enable(enterprise_dsi_panel_reset);
+
+ gpio_set_value(enterprise_dsi_panel_reset, 0);
+ udelay(2000);
+ gpio_set_value(enterprise_dsi_panel_reset, 1);
+ mdelay(20);
+ }
+#endif
+
+ return ret;
+}
+
+static int enterprise_dsi_panel_disable(void)
+{
+#if DSI_PANEL_RESET
+ if (kernel_1st_panel_init != true) {
+ tegra_gpio_disable(enterprise_dsi_panel_reset);
+ gpio_free(enterprise_dsi_panel_reset);
+ } else
+ kernel_1st_panel_init = false;
+#endif
+ return 0;
+}
+#endif
+
+static void enterprise_stereo_set_mode(int mode)
+{
+ switch (mode) {
+ case TEGRA_DC_STEREO_MODE_2D:
+ gpio_set_value(TEGRA_GPIO_PH1, ENTERPRISE_STEREO_2D);
+ break;
+ case TEGRA_DC_STEREO_MODE_3D:
+ gpio_set_value(TEGRA_GPIO_PH1, ENTERPRISE_STEREO_3D);
+ break;
+ }
+}
+
+static void enterprise_stereo_set_orientation(int mode)
+{
+ switch (mode) {
+ case TEGRA_DC_STEREO_LANDSCAPE:
+ gpio_set_value(TEGRA_GPIO_PH2, ENTERPRISE_STEREO_LANDSCAPE);
+ break;
+ case TEGRA_DC_STEREO_PORTRAIT:
+ gpio_set_value(TEGRA_GPIO_PH2, ENTERPRISE_STEREO_PORTRAIT);
+ break;
+ }
+}
+
+#ifdef CONFIG_TEGRA_DC
+static int enterprise_dsi_panel_postsuspend(void)
+{
+ /* Do nothing for enterprise dsi panel */
+ return 0;
+}
+#endif
+
+static struct tegra_dsi_cmd dsi_init_cmd[]= {
+ DSI_CMD_SHORT(0x05, 0x11, 0x00),
+ DSI_DLY_MS(20),
+#if(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ DSI_CMD_SHORT(0x15, 0x35, 0x00),
+#endif
+ DSI_CMD_SHORT(0x05, 0x29, 0x00),
+ DSI_DLY_MS(20),
+};
+
+static struct tegra_dsi_cmd dsi_early_suspend_cmd[] = {
+ DSI_CMD_SHORT(0x05, 0x28, 0x00),
+ DSI_DLY_MS(20),
+#if(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ DSI_CMD_SHORT(0x05, 0x34, 0x00),
+#endif
+};
+
+static struct tegra_dsi_cmd dsi_late_resume_cmd[] = {
+#if(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ DSI_CMD_SHORT(0x15, 0x35, 0x00),
+#endif
+ DSI_CMD_SHORT(0x05, 0x29, 0x00),
+ DSI_DLY_MS(20),
+};
+
+static struct tegra_dsi_cmd dsi_suspend_cmd[] = {
+ DSI_CMD_SHORT(0x05, 0x28, 0x00),
+ DSI_DLY_MS(20),
+#if(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ DSI_CMD_SHORT(0x05, 0x34, 0x00),
+#endif
+ DSI_CMD_SHORT(0x05, 0x10, 0x00),
+ DSI_DLY_MS(5),
+};
+
+struct tegra_dsi_out enterprise_dsi = {
+ .n_data_lanes = 2,
+ .pixel_format = TEGRA_DSI_PIXEL_FORMAT_24BIT_P,
+#if(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ /* For one-shot mode, mismatch between freq of DC and TE signal
+ * may cause frame drop. We increase refreash rate a little bit
+ * more than target value to avoid missing TE signal.
+ */
+ .refresh_rate = 66,
+#else
+ .refresh_rate = 60,
+#endif
+ .virtual_channel = TEGRA_DSI_VIRTUAL_CHANNEL_0,
+
+ .panel_has_frame_buffer = true,
+ .dsi_instance = 0,
+
+ .panel_reset = DSI_PANEL_RESET,
+ .power_saving_suspend = true,
+ .n_init_cmd = ARRAY_SIZE(dsi_init_cmd),
+ .dsi_init_cmd = dsi_init_cmd,
+
+ .n_early_suspend_cmd = ARRAY_SIZE(dsi_early_suspend_cmd),
+ .dsi_early_suspend_cmd = dsi_early_suspend_cmd,
+
+ .n_late_resume_cmd = ARRAY_SIZE(dsi_late_resume_cmd),
+ .dsi_late_resume_cmd = dsi_late_resume_cmd,
+
+ .n_suspend_cmd = ARRAY_SIZE(dsi_suspend_cmd),
+ .dsi_suspend_cmd = dsi_suspend_cmd,
+
+ .video_data_type = TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE,
+ .lp_cmd_mode_freq_khz = 20000,
+
+ /* TODO: Get the vender recommended freq */
+ .lp_read_cmd_mode_freq_khz = 200000,
+};
+
+static struct tegra_stereo_out enterprise_stereo = {
+ .set_mode = &enterprise_stereo_set_mode,
+ .set_orientation = &enterprise_stereo_set_orientation,
+};
+
+#ifdef CONFIG_TEGRA_DC
+static struct tegra_dc_mode enterprise_dsi_modes[] = {
+ {
+ .pclk = 10000000,
+ .h_ref_to_sync = 4,
+ .v_ref_to_sync = 1,
+ .h_sync_width = 16,
+ .v_sync_width = 1,
+ .h_back_porch = 32,
+ .v_back_porch = 1,
+ .h_active = 540,
+ .v_active = 960,
+ .h_front_porch = 32,
+ .v_front_porch = 2,
+ },
+};
+
+static struct tegra_fb_data enterprise_dsi_fb_data = {
+ .win = 0,
+ .xres = 540,
+ .yres = 960,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_dc_out enterprise_disp1_out = {
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+ .sd_settings = &enterprise_sd_settings,
+
+ .flags = DC_CTRL_MODE,
+
+ .type = TEGRA_DC_OUT_DSI,
+
+ .modes = enterprise_dsi_modes,
+ .n_modes = ARRAY_SIZE(enterprise_dsi_modes),
+
+ .dsi = &enterprise_dsi,
+ .stereo = &enterprise_stereo,
+
+ .enable = enterprise_dsi_panel_enable,
+ .disable = enterprise_dsi_panel_disable,
+ .postsuspend = enterprise_dsi_panel_postsuspend,
+
+ .width = 53,
+ .height = 95,
+};
+static struct tegra_dc_platform_data enterprise_disp1_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &enterprise_disp1_out,
+ .emc_clk_rate = 204000000,
+ .fb = &enterprise_dsi_fb_data,
+};
+
+static struct nvhost_device enterprise_disp1_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = enterprise_disp1_resources,
+ .num_resources = ARRAY_SIZE(enterprise_disp1_resources),
+ .dev = {
+ .platform_data = &enterprise_disp1_pdata,
+ },
+};
+
+static int enterprise_disp1_check_fb(struct device *dev, struct fb_info *info)
+{
+ return info->device == &enterprise_disp1_device.dev;
+}
+
+static struct nvhost_device enterprise_disp2_device = {
+ .name = "tegradc",
+ .id = 1,
+ .resource = enterprise_disp2_resources,
+ .num_resources = ARRAY_SIZE(enterprise_disp2_resources),
+ .dev = {
+ .platform_data = &enterprise_disp2_pdata,
+ },
+};
+#endif
+
+#if defined(CONFIG_TEGRA_NVMAP)
+static struct nvmap_platform_carveout enterprise_carveouts[] = {
+ [0] = NVMAP_HEAP_CARVEOUT_IRAM_INIT,
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .base = 0, /* Filled in by enterprise_panel_init() */
+ .size = 0, /* Filled in by enterprise_panel_init() */
+ .buddy_size = SZ_32K,
+ },
+};
+
+static struct nvmap_platform_data enterprise_nvmap_data = {
+ .carveouts = enterprise_carveouts,
+ .nr_carveouts = ARRAY_SIZE(enterprise_carveouts),
+};
+
+static struct platform_device enterprise_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &enterprise_nvmap_data,
+ },
+};
+#endif
+
+static struct platform_device *enterprise_gfx_devices[] __initdata = {
+#if defined(CONFIG_TEGRA_NVMAP)
+ &enterprise_nvmap_device,
+#endif
+#ifdef CONFIG_TEGRA_GRHOST
+ &tegra_grhost_device,
+#endif
+ &tegra_pwfm0_device,
+};
+
+static struct platform_device *enterprise_bl_devices[] = {
+ &enterprise_disp1_backlight_device,
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/* put early_suspend/late_resume handlers here for the display in order
+ * to keep the code out of the display driver, keeping it closer to upstream
+ */
+struct early_suspend enterprise_panel_early_suspender;
+
+static void enterprise_panel_early_suspend(struct early_suspend *h)
+{
+ unsigned i;
+
+ /* power down LCD, add use a black screen for HDMI */
+ if (num_registered_fb > 0)
+ fb_blank(registered_fb[0], FB_BLANK_POWERDOWN);
+ if (num_registered_fb > 1)
+ fb_blank(registered_fb[1], FB_BLANK_NORMAL);
+#ifdef CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+ cpufreq_save_default_governor();
+ cpufreq_set_conservative_governor();
+ cpufreq_set_conservative_governor_param(
+ SET_CONSERVATIVE_GOVERNOR_UP_THRESHOLD,
+ SET_CONSERVATIVE_GOVERNOR_DOWN_THRESHOLD);
+#endif
+}
+
+static void enterprise_panel_late_resume(struct early_suspend *h)
+{
+ unsigned i;
+#ifdef CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+ cpufreq_restore_default_governor();
+#endif
+ for (i = 0; i < num_registered_fb; i++)
+ fb_blank(registered_fb[i], FB_BLANK_UNBLANK);
+}
+#endif
+
+int __init enterprise_panel_init(void)
+{
+ int err;
+ struct resource __maybe_unused *res;
+
+ bl_output = enterprise_bl_output_measured;
+
+ if (WARN_ON(ARRAY_SIZE(enterprise_bl_output_measured) != 256))
+ pr_err("bl_output array does not have 256 elements\n");
+
+ enterprise_dsi.chip_id = tegra_get_chipid();
+ enterprise_dsi.chip_rev = tegra_get_revision();
+
+#if defined(CONFIG_TEGRA_NVAVP)
+ enterprise_carveouts[1].base = tegra_carveout_start;
+ enterprise_carveouts[1].size = tegra_carveout_size;
+#endif
+
+ tegra_gpio_enable(enterprise_hdmi_hpd);
+ gpio_request(enterprise_hdmi_hpd, "hdmi_hpd");
+ gpio_direction_input(enterprise_hdmi_hpd);
+
+ tegra_gpio_enable(enterprise_lcd_2d_3d);
+ gpio_request(enterprise_lcd_2d_3d, "lcd_2d_3d");
+ gpio_direction_output(enterprise_lcd_2d_3d, 0);
+ enterprise_stereo_set_mode(enterprise_stereo.mode_2d_3d);
+
+ tegra_gpio_enable(enterprise_lcd_swp_pl);
+ gpio_request(enterprise_lcd_swp_pl, "lcd_swp_pl");
+ gpio_direction_output(enterprise_lcd_swp_pl, 0);
+ enterprise_stereo_set_orientation(enterprise_stereo.orientation);
+
+#if !(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ tegra_gpio_enable(enterprise_lcd_te);
+ gpio_request(enterprise_lcd_swp_pl, "lcd_te");
+ gpio_direction_input(enterprise_lcd_te);
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ enterprise_panel_early_suspender.suspend = enterprise_panel_early_suspend;
+ enterprise_panel_early_suspender.resume = enterprise_panel_late_resume;
+ enterprise_panel_early_suspender.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ register_early_suspend(&enterprise_panel_early_suspender);
+#endif
+
+ err = platform_add_devices(enterprise_gfx_devices,
+ ARRAY_SIZE(enterprise_gfx_devices));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ res = nvhost_get_resource_byname(&enterprise_disp1_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb_start;
+ res->end = tegra_fb_start + tegra_fb_size - 1;
+#endif
+
+ /* Copy the bootloader fb to the fb. */
+ tegra_move_framebuffer(tegra_fb_start, tegra_bootloader_fb_start,
+ min(tegra_fb_size, tegra_bootloader_fb_size));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ if (!err)
+ err = nvhost_device_register(&enterprise_disp1_device);
+
+ res = nvhost_get_resource_byname(&enterprise_disp2_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb2_start;
+ res->end = tegra_fb2_start + tegra_fb2_size - 1;
+ if (!err)
+ err = nvhost_device_register(&enterprise_disp2_device);
+#endif
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_NVAVP)
+ if (!err)
+ err = nvhost_device_register(&nvavp_device);
+#endif
+
+ if (!err)
+ err = platform_add_devices(enterprise_bl_devices,
+ ARRAY_SIZE(enterprise_bl_devices));
+ return err;
+}
diff --git a/arch/arm/mach-tegra/board-enterprise-pinmux.c b/arch/arm/mach-tegra/board-enterprise-pinmux.c
new file mode 100644
index 000000000000..5b3fbbcf0966
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise-pinmux.c
@@ -0,0 +1,539 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise-pinmux.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/pinmux.h>
+#include "board.h"
+#include "board-enterprise.h"
+#include "gpio-names.h"
+
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+/* Setting the drive strength of pins
+ * hsm: Enable High speed mode (ENABLE/DISABLE)
+ * Schimit: Enable/disable schimit (ENABLE/DISABLE)
+ * drive: low power mode (DIV_1, DIV_2, DIV_4, DIV_8)
+ * pulldn_drive - drive down (falling edge) - Driver Output Pull-Down drive
+ * strength code. Value from 0 to 31.
+ * pullup_drive - drive up (rising edge) - Driver Output Pull-Up drive
+ * strength code. Value from 0 to 31.
+ * pulldn_slew - Driver Output Pull-Up slew control code - 2bit code
+ * code 11 is least slewing of signal. code 00 is highest
+ * slewing of the signal.
+ * Value - FASTEST, FAST, SLOW, SLOWEST
+ * pullup_slew - Driver Output Pull-Down slew control code -
+ * code 11 is least slewing of signal. code 00 is highest
+ * slewing of the signal.
+ * Value - FASTEST, FAST, SLOW, SLOWEST
+ */
+#define SET_DRIVE(_name, _hsm, _schmitt, _drive, _pulldn_drive, _pullup_drive, _pulldn_slew, _pullup_slew) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_##_hsm, \
+ .schmitt = TEGRA_SCHMITT_##_schmitt, \
+ .drive = TEGRA_DRIVE_##_drive, \
+ .pull_down = TEGRA_PULL_##_pulldn_drive, \
+ .pull_up = TEGRA_PULL_##_pullup_drive, \
+ .slew_rising = TEGRA_SLEW_##_pulldn_slew, \
+ .slew_falling = TEGRA_SLEW_##_pullup_slew, \
+ }
+
+/* !!!FIXME!!!! POPULATE THIS TABLE */
+static __initdata struct tegra_drive_pingroup_config enterprise_drive_pinmux[] = {
+ /* DEFAULT_DRIVE(<pin_group>), */
+ /* SET_DRIVE(ATA, DISABLE, DISABLE, DIV_1, 31, 31, FAST, FAST) */
+
+ /* All I2C pins are driven to maximum drive strength */
+ /* GEN1 I2C */
+ SET_DRIVE(DBG, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* GEN2 I2C */
+ SET_DRIVE(AT5, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* CAM I2C */
+ SET_DRIVE(GME, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* DDC I2C */
+ SET_DRIVE(DDC, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* PWR_I2C */
+ SET_DRIVE(AO1, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+
+ /* UART3 */
+ SET_DRIVE(UART3, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+};
+
+#define DEFAULT_PINMUX(_pingroup, _mux, _pupd, _tri, _io) \
+ { \
+ .pingroup = TEGRA_PINGROUP_##_pingroup, \
+ .func = TEGRA_MUX_##_mux, \
+ .pupd = TEGRA_PUPD_##_pupd, \
+ .tristate = TEGRA_TRI_##_tri, \
+ .io = TEGRA_PIN_##_io, \
+ .lock = TEGRA_PIN_LOCK_DEFAULT, \
+ .od = TEGRA_PIN_OD_DEFAULT, \
+ .ioreset = TEGRA_PIN_IO_RESET_DEFAULT, \
+ }
+
+#define I2C_PINMUX(_pingroup, _mux, _pupd, _tri, _io, _lock, _od) \
+ { \
+ .pingroup = TEGRA_PINGROUP_##_pingroup, \
+ .func = TEGRA_MUX_##_mux, \
+ .pupd = TEGRA_PUPD_##_pupd, \
+ .tristate = TEGRA_TRI_##_tri, \
+ .io = TEGRA_PIN_##_io, \
+ .lock = TEGRA_PIN_LOCK_##_lock, \
+ .od = TEGRA_PIN_OD_##_od, \
+ .ioreset = TEGRA_PIN_IO_RESET_DEFAULT, \
+ }
+
+#define CEC_PINMUX(_pingroup, _mux, _pupd, _tri, _io, _lock, _od) \
+ { \
+ .pingroup = TEGRA_PINGROUP_##_pingroup, \
+ .func = TEGRA_MUX_##_mux, \
+ .pupd = TEGRA_PUPD_##_pupd, \
+ .tristate = TEGRA_TRI_##_tri, \
+ .io = TEGRA_PIN_##_io, \
+ .lock = TEGRA_PIN_LOCK_##_lock, \
+ .od = TEGRA_PIN_OD_##_od, \
+ .ioreset = TEGRA_PIN_IO_RESET_DEFAULT, \
+ }
+
+#define VI_PINMUX(_pingroup, _mux, _pupd, _tri, _io, _lock, _ioreset) \
+ { \
+ .pingroup = TEGRA_PINGROUP_##_pingroup, \
+ .func = TEGRA_MUX_##_mux, \
+ .pupd = TEGRA_PUPD_##_pupd, \
+ .tristate = TEGRA_TRI_##_tri, \
+ .io = TEGRA_PIN_##_io, \
+ .lock = TEGRA_PIN_LOCK_##_lock, \
+ .od = TEGRA_PIN_OD_DEFAULT, \
+ .ioreset = TEGRA_PIN_IO_RESET_##_ioreset \
+ }
+
+static __initdata struct tegra_pingroup_config enterprise_pinmux[] = {
+ /* SDMMC1 pinmux */
+ DEFAULT_PINMUX(SDMMC1_CLK, SDMMC1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_CMD, SDMMC1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT3, SDMMC1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT2, SDMMC1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT1, SDMMC1, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC1_DAT0, SDMMC1, PULL_UP, NORMAL, INPUT),
+
+ /* SDMMC3 pinmux */
+ DEFAULT_PINMUX(SDMMC3_CLK, SDMMC3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_CMD, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT0, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT1, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT2, SDMMC3, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT3, SDMMC3, PULL_UP, NORMAL, INPUT),
+
+ /* SDMMC4 pinmux */
+ DEFAULT_PINMUX(SDMMC4_CLK, SDMMC4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_CMD, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT0, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT1, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT2, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT3, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT4, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT5, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT6, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_DAT7, SDMMC4, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SDMMC4_RST_N, RSVD1, PULL_DOWN, NORMAL, INPUT),
+
+ /* I2C1 pinmux */
+ I2C_PINMUX(GEN1_I2C_SCL, I2C1, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(GEN1_I2C_SDA, I2C1, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ /* I2C2 pinmux */
+ I2C_PINMUX(GEN2_I2C_SCL, I2C2, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(GEN2_I2C_SDA, I2C2, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ /* I2C3 pinmux */
+ I2C_PINMUX(CAM_I2C_SCL, I2C3, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(CAM_I2C_SDA, I2C3, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ /* I2C4 pinmux */
+ I2C_PINMUX(DDC_SCL, I2C4, PULL_UP,NORMAL, INPUT, DISABLE, DISABLE),
+ I2C_PINMUX(DDC_SDA, I2C4, PULL_UP,NORMAL, INPUT, DISABLE, DISABLE),
+
+ /* Power I2C pinmux */
+ I2C_PINMUX(PWR_I2C_SCL, I2CPWR, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+ I2C_PINMUX(PWR_I2C_SDA, I2CPWR, NORMAL, NORMAL, INPUT, DISABLE, ENABLE),
+
+ DEFAULT_PINMUX(ULPI_DATA0, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA1, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA2, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA3, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA4, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA5, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA6, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DATA7, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_CLK, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(ULPI_DIR, ULPI, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_NXT, ULPI, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(ULPI_STP, ULPI, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_FS, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_DIN, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_DOUT, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP3_SCLK, I2S2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PV2, RSVD1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PV3, RSVD1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(LCD_PWR1, DISPLAYA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(LCD_PWR2, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_CS0_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_DC0, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_DE, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D0, DISPLAYA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(LCD_D1, DISPLAYA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(LCD_D2, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D3, DISPLAYA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(LCD_D4, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D5, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D6, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D7, RSVD1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(LCD_D8, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D9, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D11, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D12, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D13, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D14, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D15, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D16, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D17, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D18, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D19, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D20, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D21, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D22, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_D23, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_CS1_N, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(LCD_M1, DISPLAYA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(LCD_DC1, DISPLAYA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D0, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D1, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D2, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D3, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D4, VI, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(VI_D5, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D7, SDMMC2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_D10, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(VI_MCLK, VI, PULL_UP, NORMAL, INPUT),
+
+ DEFAULT_PINMUX(UART2_RXD, IRDA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART2_TXD, IRDA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART2_RTS_N, UARTB, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART2_CTS_N, UARTB, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART3_TXD, UARTC, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(UART3_RXD, UARTC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART3_CTS_N, UARTC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(UART3_RTS_N, UARTC, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU0, UARTA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU1, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU2, UARTA, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU3, UARTA, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU4, PWM1, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PU5, PWM2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PU6, PWM3, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(DAP4_FS, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_DIN, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_DOUT, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP4_SCLK, I2S3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_AD8, PWM0, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD9, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD10, NAND, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_A16, UARTD, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(GMI_A17, UARTD, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A18, UARTD, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GMI_A19, UARTD, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(CAM_MCLK, VI_ALT2, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PCC1, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB0, RSVD1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB3, VGP3, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PBB7, I2S4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PCC2, I2S4, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(JTAG_RTCK, RTCK, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW0, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW1, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW2, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW3, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW10, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_ROW12, KBC, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL0, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL1, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL2, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL3, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL4, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(KB_COL5, KBC, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(GPIO_PV0, RSVD, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK_32K_OUT, BLINK, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SYS_CLK_REQ, SYSCLK, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(OWR, OWR, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_FS, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_DIN, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_DOUT, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP1_SCLK, I2S0, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK1_REQ, DAP, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(CLK1_OUT, EXTPERIPH1, NORMAL, NORMAL, INPUT),
+#if 0 /* For HDA realtek Codec */
+ DEFAULT_PINMUX(SPDIF_IN, DAP2, PULL_DOWN, NORMAL, INPUT),
+#else
+ DEFAULT_PINMUX(SPDIF_IN, SPDIF, NORMAL, NORMAL, INPUT),
+#endif
+#if 0 /* For HDA realtek Codec */
+ DEFAULT_PINMUX(DAP2_FS, HDA, PULL_DOWN, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DIN, HDA, PULL_DOWN, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DOUT, HDA, PULL_DOWN, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_SCLK, HDA, PULL_DOWN, NORMAL, INPUT),
+#else
+ DEFAULT_PINMUX(DAP2_FS, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DIN, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_DOUT, I2S1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(DAP2_SCLK, I2S1, NORMAL, NORMAL, INPUT),
+#endif
+ DEFAULT_PINMUX(SPI2_CS1_N, SPI2, PULL_UP, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_MOSI, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_SCK, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(SPI1_MISO, SPI1, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L0_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L0_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L0_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_WAKE_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L1_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L1_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L1_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L2_PRSNT_N, PCIE, NORMAL, NORMAL, INPUT),
+ DEFAULT_PINMUX(PEX_L2_RST_N, PCIE, NORMAL, NORMAL, OUTPUT),
+ DEFAULT_PINMUX(PEX_L2_CLKREQ_N, PCIE, NORMAL, NORMAL, INPUT),
+ CEC_PINMUX(HDMI_CEC, CEC, NORMAL, TRISTATE, OUTPUT, DEFAULT, DISABLE),
+ DEFAULT_PINMUX(HDMI_INT, RSVD0, NORMAL, TRISTATE, INPUT),
+
+ /* Gpios */
+ /* SDMMC1 CD gpio */
+ DEFAULT_PINMUX(GMI_IORDY, RSVD1, PULL_UP, NORMAL, INPUT),
+ /* SDMMC1 WP gpio */
+ DEFAULT_PINMUX(VI_D11, RSVD1, PULL_UP, NORMAL, INPUT),
+
+ /* Touch panel GPIO */
+ /* Touch IRQ */
+ DEFAULT_PINMUX(GMI_AD12, NAND, NORMAL, NORMAL, INPUT),
+
+ /* Touch RESET */
+ DEFAULT_PINMUX(GMI_AD14, NAND, NORMAL, NORMAL, INPUT),
+
+ DEFAULT_PINMUX(GMI_AD15, NAND, PULL_UP, TRISTATE, INPUT),
+
+ /* Power rails GPIO */
+ DEFAULT_PINMUX(KB_ROW8, KBC, PULL_UP, NORMAL, INPUT),
+
+ VI_PINMUX(VI_D6, VI, NORMAL, NORMAL, OUTPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_D8, SDMMC2, NORMAL, NORMAL, INPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_D9, SDMMC2, NORMAL, NORMAL, INPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_PCLK, RSVD1, PULL_UP, TRISTATE, INPUT, DISABLE, ENABLE),
+ VI_PINMUX(VI_HSYNC, RSVD1, NORMAL, NORMAL, INPUT, DISABLE, DISABLE),
+ VI_PINMUX(VI_VSYNC, RSVD1, NORMAL, NORMAL, INPUT, DISABLE, DISABLE),
+};
+
+static __initdata struct tegra_pingroup_config enterprise_unused_pinmux[] = {
+ DEFAULT_PINMUX(CLK2_OUT, EXTPERIPH2, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(CLK2_REQ, DAP, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(CLK3_OUT, EXTPERIPH3, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(CLK3_REQ, DEV3, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(CLK_32K_OUT, BLINK, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PBB4, VGP4, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PBB5, VGP5, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GPIO_PBB6, VGP6, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD0, GMI, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD1, GMI, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD2, GMI, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD3, GMI, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD4, GMI, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD5, GMI, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD6, GMI, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD7, GMI, NORMAL, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_AD11, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS0_N, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS2_N, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS3_N, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS6_N, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_CS7_N, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_DQS, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_RST_N, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_WAIT, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(GMI_WP_N, GMI, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW6, KBC, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW7, KBC, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW9, KBC, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW11, KBC, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW13, KBC, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW14, KBC, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(KB_ROW15, KBC, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_PCLK, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_WR_N, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_HSYNC, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_VSYNC, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_D10, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_PWR0, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_SCK, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_SDOUT, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(LCD_SDIN, DISPLAYA, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(CRT_HSYNC, CRT, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(CRT_VSYNC, CRT, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT4, SDMMC3, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT5, SDMMC3, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT6, SDMMC3, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SDMMC3_DAT7, SDMMC3, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SPDIF_OUT, SPDIF, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SPI1_CS0_N, SPI1, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SPI2_SCK, SPI2, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SPI2_CS0_N, SPI2, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SPI2_MOSI, SPI2, PULL_DOWN, TRISTATE, OUTPUT),
+ DEFAULT_PINMUX(SPI2_MISO, SPI2, PULL_DOWN, TRISTATE, OUTPUT),
+};
+
+static struct tegra_gpio_table gpio_table[] = {
+ { .gpio = TEGRA_GPIO_HP_DET, .enable = true },
+};
+
+struct pin_info_low_power_mode {
+ char name[16];
+ int gpio_nr;
+ bool is_gpio;
+ bool is_input;
+ int value; /* Value if it is output*/
+};
+
+#define PIN_GPIO_LPM(_name, _gpio, _is_input, _value) \
+ { \
+ .name = _name, \
+ .gpio_nr = _gpio, \
+ .is_gpio = true, \
+ .is_input = _is_input, \
+ .value = _value, \
+ }
+static __initdata struct pin_info_low_power_mode enterprise_unused_gpio_pins[] = {
+ PIN_GPIO_LPM("CLK2_OUT", TEGRA_GPIO_PW5, 0, 0),
+ PIN_GPIO_LPM("CLK2_REQ", TEGRA_GPIO_PCC5, 0, 0),
+ PIN_GPIO_LPM("CLK3_OUT", TEGRA_GPIO_PEE0, 0, 0),
+ PIN_GPIO_LPM("CLK3_REQ", TEGRA_GPIO_PEE1, 0, 0),
+ PIN_GPIO_LPM("CLK_32K_OUT", TEGRA_GPIO_PA0, 0, 0),
+ PIN_GPIO_LPM("GPIO_PBB4", TEGRA_GPIO_PBB4, 0, 0),
+ PIN_GPIO_LPM("GPIO_PBB5", TEGRA_GPIO_PBB5, 0, 0),
+ PIN_GPIO_LPM("GPIO_PBB6", TEGRA_GPIO_PBB6, 0, 0),
+ PIN_GPIO_LPM("GMI_AD0", TEGRA_GPIO_PG0, 0, 0),
+ PIN_GPIO_LPM("GMI_AD1", TEGRA_GPIO_PG1, 0, 0),
+ PIN_GPIO_LPM("GMI_AD2", TEGRA_GPIO_PG2, 0, 0),
+ PIN_GPIO_LPM("GMI_AD3", TEGRA_GPIO_PG3, 0, 0),
+ PIN_GPIO_LPM("GMI_AD4", TEGRA_GPIO_PG4, 0, 0),
+ PIN_GPIO_LPM("GMI_AD5", TEGRA_GPIO_PG5, 0, 0),
+ PIN_GPIO_LPM("GMI_AD6", TEGRA_GPIO_PG6, 0, 0),
+ PIN_GPIO_LPM("GMI_AD7", TEGRA_GPIO_PG7, 0, 0),
+ PIN_GPIO_LPM("GMI_AD11", TEGRA_GPIO_PH3, 0, 0),
+ PIN_GPIO_LPM("GMI_CS0_N", TEGRA_GPIO_PJ0, 0, 0),
+ PIN_GPIO_LPM("GMI_CS2_N", TEGRA_GPIO_PK3, 0, 0),
+ PIN_GPIO_LPM("GMI_CS3_N", TEGRA_GPIO_PK4, 0, 0),
+ PIN_GPIO_LPM("GMI_CS6_N", TEGRA_GPIO_PI3, 0, 0),
+ PIN_GPIO_LPM("GMI_CS7_N", TEGRA_GPIO_PI6, 0, 0),
+ PIN_GPIO_LPM("GMI_DQS", TEGRA_GPIO_PI2, 0, 0),
+ PIN_GPIO_LPM("GMI_RST_N", TEGRA_GPIO_PI4, 0, 0),
+ PIN_GPIO_LPM("GMI_WAIT", TEGRA_GPIO_PI7, 0, 0),
+ PIN_GPIO_LPM("GMI_WP_N", TEGRA_GPIO_PC7, 0, 0),
+ PIN_GPIO_LPM("KB_ROW6", TEGRA_GPIO_PR6, 0, 0),
+ PIN_GPIO_LPM("KB_ROW7", TEGRA_GPIO_PR7, 0, 0),
+ PIN_GPIO_LPM("KB_ROW9", TEGRA_GPIO_PS1, 0, 0),
+ PIN_GPIO_LPM("KB_ROW11", TEGRA_GPIO_PS3, 0, 0),
+ PIN_GPIO_LPM("KB_ROW13", TEGRA_GPIO_PS5, 0, 0),
+ PIN_GPIO_LPM("KB_ROW14", TEGRA_GPIO_PS6, 0, 0),
+ PIN_GPIO_LPM("KB_ROW15", TEGRA_GPIO_PS7, 0, 0),
+ PIN_GPIO_LPM("LCD_PCLK", TEGRA_GPIO_PB3, 0, 0),
+ PIN_GPIO_LPM("LCD_WR_N", TEGRA_GPIO_PZ3, 0, 0),
+ PIN_GPIO_LPM("LCD_HSYNC", TEGRA_GPIO_PJ3, 0, 0),
+ PIN_GPIO_LPM("LCD_VSYNC", TEGRA_GPIO_PJ4, 0, 0),
+ PIN_GPIO_LPM("LCD_D10", TEGRA_GPIO_PF2, 0, 0),
+ PIN_GPIO_LPM("LCD_PWR0", TEGRA_GPIO_PB2, 0, 0),
+ PIN_GPIO_LPM("LCD_SCK", TEGRA_GPIO_PZ4, 0, 0),
+ PIN_GPIO_LPM("LCD_SDOUT", TEGRA_GPIO_PN5, 0, 0),
+ PIN_GPIO_LPM("LCD_SDIN", TEGRA_GPIO_PZ2, 0, 0),
+ PIN_GPIO_LPM("CRT_HSYNC", TEGRA_GPIO_PV6, 0, 0),
+ PIN_GPIO_LPM("CRT_VSYNC", TEGRA_GPIO_PV7, 0, 0),
+ PIN_GPIO_LPM("SDMMC3_DAT4", TEGRA_GPIO_PD1, 0, 0),
+ PIN_GPIO_LPM("SDMMC3_DAT5", TEGRA_GPIO_PD0, 0, 0),
+ PIN_GPIO_LPM("SDMMC3_DAT6", TEGRA_GPIO_PD3, 0, 0),
+ PIN_GPIO_LPM("SDMMC3_DAT7", TEGRA_GPIO_PD4, 0, 0),
+ PIN_GPIO_LPM("SPDIF_OUT", TEGRA_GPIO_PK5, 0, 0),
+ PIN_GPIO_LPM("SPI1_CS0_N", TEGRA_GPIO_PX6, 0, 0),
+ PIN_GPIO_LPM("SPI2_SCK", TEGRA_GPIO_PX2, 0, 0),
+ PIN_GPIO_LPM("SPI2_CS0_N", TEGRA_GPIO_PX3, 0, 0),
+ PIN_GPIO_LPM("SPI2_MOSI", TEGRA_GPIO_PX0, 0, 0),
+ PIN_GPIO_LPM("SPI2_MISO", TEGRA_GPIO_PX1, 0, 0),
+};
+
+static void enterprise_set_unused_pin_gpio(struct pin_info_low_power_mode *lpm_pin_info,
+ int list_count)
+{
+ int i;
+ struct pin_info_low_power_mode *pin_info;
+ int ret;
+
+ for (i = 0; i < list_count; ++i) {
+ pin_info = (struct pin_info_low_power_mode *)(lpm_pin_info + i);
+ if (!pin_info->is_gpio)
+ continue;
+
+ ret = gpio_request(pin_info->gpio_nr, pin_info->name);
+ if (ret < 0) {
+ pr_err("%s() Error in gpio_request() for gpio %d\n",
+ __func__, pin_info->gpio_nr);
+ continue;
+ }
+ if (pin_info->is_input)
+ ret = gpio_direction_input(pin_info->gpio_nr);
+ else
+ ret = gpio_direction_output(pin_info->gpio_nr,
+ pin_info->value);
+ if (ret < 0) {
+ pr_err("%s() Error in setting gpio %d to in/out\n",
+ __func__, pin_info->gpio_nr);
+ gpio_free(pin_info->gpio_nr);
+ continue;
+ }
+ tegra_gpio_enable(pin_info->gpio_nr);
+ }
+}
+
+int __init enterprise_pinmux_init(void)
+{
+ tegra_pinmux_config_table(enterprise_pinmux, ARRAY_SIZE(enterprise_pinmux));
+ tegra_drive_pinmux_config_table(enterprise_drive_pinmux,
+ ARRAY_SIZE(enterprise_drive_pinmux));
+ tegra_pinmux_config_table(enterprise_unused_pinmux,
+ ARRAY_SIZE(enterprise_unused_pinmux));
+
+ tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
+ enterprise_set_unused_pin_gpio(enterprise_unused_gpio_pins,
+ ARRAY_SIZE(enterprise_unused_gpio_pins));
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-enterprise-power.c b/arch/arm/mach-tegra/board-enterprise-power.c
new file mode 100644
index 000000000000..98f653a8c668
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise-power.c
@@ -0,0 +1,615 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise-power.c
+ *
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/gpio-switch-regulator.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/regulator/tps80031-regulator.h>
+#include <linux/tps80031-charger.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/cpumask.h>
+#include <linux/platform_data/tegra_bpc_mgmt.h>
+
+#include <mach/edp.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/tsensor.h>
+
+#include "gpio-names.h"
+#include "board.h"
+#include "board-enterprise.h"
+#include "pm.h"
+#include "wakeups-t3.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_INTR_LOW (1 << 17)
+
+#define PMC_DPD_PADS_ORIDE 0x01c
+#define PMC_DPD_PADS_ORIDE_BLINK (1 << 20)
+
+/************************ TPS80031 based regulator ****************/
+static struct regulator_consumer_supply tps80031_vio_supply[] = {
+ REGULATOR_SUPPLY("vio_1v8", NULL),
+ REGULATOR_SUPPLY("avdd_osc", NULL),
+ REGULATOR_SUPPLY("vddio_sys", NULL),
+ REGULATOR_SUPPLY("vddio_uart", NULL),
+ REGULATOR_SUPPLY("pwrdet_uart", NULL),
+ REGULATOR_SUPPLY("vddio_lcd", NULL),
+ REGULATOR_SUPPLY("pwrdet_lcd", NULL),
+ REGULATOR_SUPPLY("vddio_audio", NULL),
+ REGULATOR_SUPPLY("pwrdet_audio", NULL),
+ REGULATOR_SUPPLY("vddio_bb", NULL),
+ REGULATOR_SUPPLY("pwrdet_bb", NULL),
+ REGULATOR_SUPPLY("vddio_gmi", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+ REGULATOR_SUPPLY("vddio_cam", NULL),
+ REGULATOR_SUPPLY("pwrdet_cam", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc1", NULL),
+ REGULATOR_SUPPLY("pwrdet_sdmmc1", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc4", NULL),
+ REGULATOR_SUPPLY("pwrdet_sdmmc4", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi_pll", NULL),
+ REGULATOR_SUPPLY("vddio_gps", NULL),
+ REGULATOR_SUPPLY("vdd_lcd_buffered", NULL),
+ REGULATOR_SUPPLY("vddio_nand", NULL),
+ REGULATOR_SUPPLY("pwrdet_nand", NULL),
+ REGULATOR_SUPPLY("vddio_sd", NULL),
+ REGULATOR_SUPPLY("vdd_bat", NULL),
+ REGULATOR_SUPPLY("vdd_io", NULL),
+ REGULATOR_SUPPLY("pwrdet_pex_ctl", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_smps1_supply[] = {
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_smps2_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_smps3_supply[] = {
+ REGULATOR_SUPPLY("en_vddio_ddr_1v2", NULL),
+ REGULATOR_SUPPLY("vddio_ddr", NULL),
+ REGULATOR_SUPPLY("vdd_lpddr", NULL),
+ REGULATOR_SUPPLY("ddr_comp_pu", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_smps4_supply[] = {
+ REGULATOR_SUPPLY("vddio_sdmmc_2v85", NULL),
+ REGULATOR_SUPPLY("pwrdet_sdmmc3", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_vana_supply[] = {
+ REGULATOR_SUPPLY("unused_vana", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldo1_supply[] = {
+ REGULATOR_SUPPLY("avdd_dsi_csi", NULL),
+ REGULATOR_SUPPLY("pwrdet_mipi", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldo2_supply[] = {
+ REGULATOR_SUPPLY("vdd_rtc", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldo3_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbrtr", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldo4_supply[] = {
+ REGULATOR_SUPPLY("avdd_lcd", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldo5_supply[] = {
+ REGULATOR_SUPPLY("vdd_sensor", NULL),
+ REGULATOR_SUPPLY("vdd_compass", NULL),
+ REGULATOR_SUPPLY("vdd_als", NULL),
+ REGULATOR_SUPPLY("vdd_gyro", NULL),
+ REGULATOR_SUPPLY("vdd_touch", NULL),
+ REGULATOR_SUPPLY("vdd_proxim_diode", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldo6_supply[] = {
+ REGULATOR_SUPPLY("vdd_ddr_rx", NULL),
+ REGULATOR_SUPPLY("vddf_core_emmc", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldo7_supply[] = {
+ REGULATOR_SUPPLY("vdd_plla_p_c_s", NULL),
+ REGULATOR_SUPPLY("vdd_pllm", NULL),
+ REGULATOR_SUPPLY("vdd_pllu_d", NULL),
+ REGULATOR_SUPPLY("vdd_pllx", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldoln_supply[] = {
+ REGULATOR_SUPPLY("vdd_ddr_hs", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_ldousb_supply[] = {
+ REGULATOR_SUPPLY("unused_ldousb", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_vbus_supply[] = {
+ REGULATOR_SUPPLY("usb_vbus", NULL),
+};
+
+static struct regulator_consumer_supply tps80031_battery_charge_supply[] = {
+ REGULATOR_SUPPLY("usb_bat_chg", NULL),
+};
+
+#define TPS_PDATA_INIT(_id, _minmv, _maxmv, _supply_reg, _always_on, \
+ _boot_on, _apply_uv, _init_uV, _init_enable, _init_apply, \
+ _flags, _ectrl, _delay) \
+ static struct tps80031_regulator_platform_data pdata_##_id = { \
+ .regulator = { \
+ .constraints = { \
+ .min_uV = (_minmv)*1000, \
+ .max_uV = (_maxmv)*1000, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _always_on, \
+ .boot_on = _boot_on, \
+ .apply_uV = _apply_uv, \
+ }, \
+ .num_consumer_supplies = \
+ ARRAY_SIZE(tps80031_##_id##_supply), \
+ .consumer_supplies = tps80031_##_id##_supply, \
+ .supply_regulator = _supply_reg, \
+ }, \
+ .init_uV = _init_uV * 1000, \
+ .init_enable = _init_enable, \
+ .init_apply = _init_apply, \
+ .flags = _flags, \
+ .ext_ctrl_flag = _ectrl, \
+ .delay_us = _delay, \
+ }
+
+TPS_PDATA_INIT(vio, 600, 2100, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0);
+TPS_PDATA_INIT(smps1, 600, 2100, 0, 0, 0, 0, -1, 0, 0, 0, PWR_REQ_INPUT_PREQ2 | PWR_OFF_ON_SLEEP, 0);
+TPS_PDATA_INIT(smps2, 600, 2100, 0, 0, 0, 0, -1, 0, 0, 0, PWR_REQ_INPUT_PREQ1, 0);
+TPS_PDATA_INIT(smps3, 600, 2100, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0);
+TPS_PDATA_INIT(smps4, 600, 2100, 0, 0, 0, 0, -1, 0, 0, 0, PWR_REQ_INPUT_PREQ1, 0);
+TPS_PDATA_INIT(ldo1, 1000, 3300, tps80031_rails(VIO), 0, 0, 0, -1, 0, 0, 0, 0, 0);
+TPS_PDATA_INIT(ldo2, 1000, 3300, 0, 1, 1, 1, 1000, 1, 1, 0, 0, 0);
+TPS_PDATA_INIT(ldo3, 1000, 3300, tps80031_rails(VIO), 0, 0, 0, -1, 0, 0, 0, PWR_OFF_ON_SLEEP, 0);
+TPS_PDATA_INIT(ldo4, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0);
+TPS_PDATA_INIT(ldo5, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0);
+TPS_PDATA_INIT(ldo6, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, PWR_REQ_INPUT_PREQ1, 0);
+TPS_PDATA_INIT(ldo7, 1000, 3300, tps80031_rails(VIO), 0, 0, 0, -1, 0, 0, 0, PWR_REQ_INPUT_PREQ1, 0);
+TPS_PDATA_INIT(ldoln, 1000, 3300, tps80031_rails(SMPS3), 0, 0, 0, -1, 0, 0, 0, PWR_REQ_INPUT_PREQ1, 0);
+TPS_PDATA_INIT(ldousb, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, USBLDO_INPUT_VSYS, PWR_OFF_ON_SLEEP, 0);
+TPS_PDATA_INIT(vana, 1000, 3300, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0);
+TPS_PDATA_INIT(vbus, 0, 5000, 0, 0, 0, 0, -1, 0, 0, (VBUS_SW_ONLY | VBUS_DISCHRG_EN_PDN), 0, 100000);
+
+static struct tps80031_rtc_platform_data rtc_data = {
+ .irq = ENT_TPS80031_IRQ_BASE + TPS80031_INT_RTC_ALARM,
+ .time = {
+ .tm_year = 2011,
+ .tm_mon = 0,
+ .tm_mday = 1,
+ .tm_hour = 1,
+ .tm_min = 2,
+ .tm_sec = 3,
+ },
+};
+
+int battery_charger_init(void *board_data)
+{
+ int ret;
+ ret = gpio_request(TEGRA_GPIO_PF6, "lcd_d14-bat_charge");
+ if (ret < 0) {
+ pr_err("%s() The gpio_request for battery"
+ " charger fails\n", __func__);
+ }
+ gpio_direction_output(TEGRA_GPIO_PF6, 1);
+ tegra_gpio_enable(TEGRA_GPIO_PF6);
+ return 0;
+}
+
+static struct tps80031_charger_platform_data bcharger_pdata = {
+ .max_charge_volt_mV = 4100,
+ .max_charge_current_mA = 1000,
+ .charging_term_current_mA = 100,
+ .watch_time_sec = 100,
+ .irq_base = ENT_TPS80031_IRQ_BASE,
+ .consumer_supplies = tps80031_battery_charge_supply,
+ .num_consumer_supplies = ARRAY_SIZE(tps80031_battery_charge_supply),
+ .board_init = battery_charger_init,
+ .board_data = NULL,
+};
+
+static struct tps80031_bg_platform_data battery_gauge_data = {
+ .irq_base = ENT_TPS80031_IRQ_BASE,
+ .battery_present = 1,
+};
+
+#define TPS_RTC() \
+ { \
+ .id = 0, \
+ .name = "rtc_tps80031", \
+ .platform_data = &rtc_data, \
+ }
+
+#define TPS_REG(_id, _data) \
+ { \
+ .id = TPS80031_ID_##_id, \
+ .name = "tps80031-regulator", \
+ .platform_data = &pdata_##_data, \
+ }
+#define TPS_BATTERY() \
+ { \
+ .name = "tps80031-charger", \
+ .platform_data = &bcharger_pdata, \
+ }
+#define TPS_BATTERY_GAUGE() \
+ { \
+ .name = "tps80031-battery-gauge", \
+ .platform_data = &battery_gauge_data, \
+ }
+#define TPS_GPADC() \
+ { \
+ .name = "tps80031-gpadc", \
+ }
+
+static struct tps80031_subdev_info tps80031_devs[] = {
+ TPS_REG(VIO, vio),
+ TPS_REG(SMPS1, smps1),
+ TPS_REG(SMPS2, smps2),
+ TPS_REG(SMPS3, smps3),
+ TPS_REG(SMPS4, smps4),
+ TPS_REG(LDO1, ldo1),
+ TPS_REG(LDO2, ldo2),
+ TPS_REG(LDO3, ldo3),
+ TPS_REG(LDO4, ldo4),
+ TPS_REG(LDO5, ldo5),
+ TPS_REG(LDO6, ldo6),
+ TPS_REG(LDO7, ldo7),
+ TPS_REG(LDOLN, ldoln),
+ TPS_REG(LDOUSB, ldousb),
+ TPS_REG(VANA, vana),
+ TPS_REG(VBUS, vbus),
+ TPS_RTC(),
+ TPS_BATTERY(),
+ TPS_BATTERY_GAUGE(),
+ TPS_GPADC(),
+};
+
+struct tps80031_clk32k_init_data clk32k_idata[] = {
+ {
+ .clk32k_nr = TPS80031_CLOCK32K_G,
+ .enable = true,
+ .ext_ctrl_flag = 0,
+ },
+ {
+ .clk32k_nr = TPS80031_CLOCK32K_AUDIO,
+ .enable = true,
+ .ext_ctrl_flag = PWR_REQ_INPUT_PREQ1,
+ },
+};
+
+static struct tps80031_platform_data tps_platform = {
+ .num_subdevs = ARRAY_SIZE(tps80031_devs),
+ .subdevs = tps80031_devs,
+ .irq_base = ENT_TPS80031_IRQ_BASE,
+ .gpio_base = ENT_TPS80031_GPIO_BASE,
+ .clk32k_init_data = clk32k_idata,
+ .clk32k_init_data_size = ARRAY_SIZE(clk32k_idata),
+};
+
+static struct i2c_board_info __initdata enterprise_regulators[] = {
+ {
+ I2C_BOARD_INFO("tps80031", 0x4A),
+ .irq = INT_EXTERNAL_PMU,
+ .platform_data = &tps_platform,
+ },
+};
+
+/************************ GPIO based switch regulator ****************/
+
+/* REGEN1 from PMU*/
+static struct regulator_consumer_supply gpio_switch_pmu_5v15_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_5v15", NULL),
+};
+static int gpio_switch_pmu_5v15_en_voltages[] = {5000};
+
+/* REGEN2 from PMU*/
+static struct regulator_consumer_supply gpio_switch_pmu_3v3_en_supply[] = {
+ REGULATOR_SUPPLY("avdd_usb_hdmi_3v3", NULL),
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi", NULL),
+ REGULATOR_SUPPLY("vdd", "4-004c"),
+};
+static int gpio_switch_pmu_3v3_en_voltages[] = {3300};
+
+/* SYSEN from PMU*/
+static struct regulator_consumer_supply gpio_switch_pmu_hdmi_5v0_en_supply[] = {
+ REGULATOR_SUPPLY("hdmi_5v0", NULL),
+};
+static int gpio_switch_pmu_hdmi_5v0_en_voltages[] = {5000};
+
+/* LCD-D16 (GPIO M0) from T30*/
+static struct regulator_consumer_supply gpio_switch_vdd_fuse_en_supply[] = {
+ REGULATOR_SUPPLY("vdd_fuse", NULL),
+};
+static int gpio_switch_vdd_fuse_en_voltages[] = {3300};
+
+/* LCD-D17 (GPIO M1) from T30*/
+static struct regulator_consumer_supply gpio_switch_sdmmc3_vdd_sel_supply[] = {
+ REGULATOR_SUPPLY("vddio_sdmmc3_2v85_1v8", NULL),
+ REGULATOR_SUPPLY("sdmmc3_compu_pu", NULL),
+ REGULATOR_SUPPLY("vddio_sdmmc3", NULL),
+ REGULATOR_SUPPLY("vsys_3v7", NULL),
+};
+static int gpio_switch_sdmmc3_vdd_sel_voltages[] = {2850};
+
+/* LCD-D23 (GPIO M7) from T30*/
+/* 2-0036 is dev_name of ar0832 in Enterprise A01*/
+/* 2-0032 is alternative dev_name of ar0832 Enterprise A01*/
+/* 2-0010 is dev_name of ov9726 */
+/* 2-0070 is dev_name of PCA9546 in Enterprise A02*/
+/* 6-0036 is dev_name of ar0832 in Enterprise A02 */
+/* 7-0036 is dev_name of ar0832 in Enterprise A02 */
+static struct regulator_consumer_supply gpio_switch_cam_ldo_2v8_en_supply[] = {
+ REGULATOR_SUPPLY("vaa", "2-0036"),
+ REGULATOR_SUPPLY("vaa", "2-0032"),
+ REGULATOR_SUPPLY("avdd", "2-0010"),
+ REGULATOR_SUPPLY("vdd_2v8_cam", NULL),
+ REGULATOR_SUPPLY("vcc", "2-0070"),
+ REGULATOR_SUPPLY("vaa", "6-0036"),
+ REGULATOR_SUPPLY("vaa", "7-0036"),
+};
+static int gpio_switch_cam_ldo_2v8_en_voltages[] = {2800};
+
+/* LCD-D9 (GPIO F1) from T30*/
+/* 2-0036 is dev_name of ar0832 in Enterprise A01*/
+/* 2-0032 is alternative dev_name of ar0832 Enterprise A01*/
+/* 2-0010 is dev_name of ov9726 */
+/* 2-0033 is dev_name of tps61050 */
+/* 2-0070 is dev_name of PCA9546 in Enterprise A02*/
+/* 6-0036 is dev_name of ar0832 in Enterprise A02 */
+/* 7-0036 is dev_name of ar0832 in Enterprise A02 */
+static struct regulator_consumer_supply gpio_switch_cam_ldo_1v8_en_supply[] = {
+ REGULATOR_SUPPLY("vdd", "2-0036"),
+ REGULATOR_SUPPLY("vdd", "2-0032"),
+ REGULATOR_SUPPLY("dovdd", "2-0010"),
+ REGULATOR_SUPPLY("vdd_1v8_cam", NULL),
+ REGULATOR_SUPPLY("vdd_i2c", "2-0033"),
+ REGULATOR_SUPPLY("vcc_i2c", "2-0070"),
+ REGULATOR_SUPPLY("vdd", "6-0036"),
+ REGULATOR_SUPPLY("vdd", "7-0036"),
+};
+static int gpio_switch_cam_ldo_1v8_en_voltages[] = {1800};
+
+/* Macro for defining gpio switch regulator sub device data */
+#define GREG_INIT(_id, _name, _input_supply, _gpio_nr, _active_low, \
+ _init_state, _pg, _enable, _disable) \
+ static struct gpio_switch_regulator_subdev_data gpio_pdata_##_name = \
+ { \
+ .regulator_name = "gpio-switch-"#_name, \
+ .input_supply = _input_supply, \
+ .id = _id, \
+ .gpio_nr = _gpio_nr, \
+ .pin_group = _pg, \
+ .active_low = _active_low, \
+ .init_state = _init_state, \
+ .voltages = gpio_switch_##_name##_voltages, \
+ .n_voltages = ARRAY_SIZE(gpio_switch_##_name##_voltages), \
+ .num_consumer_supplies = \
+ ARRAY_SIZE(gpio_switch_##_name##_supply), \
+ .consumer_supplies = gpio_switch_##_name##_supply, \
+ .constraints = { \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ }, \
+ .enable_rail = _enable, \
+ .disable_rail = _disable, \
+ }
+
+GREG_INIT(0, pmu_5v15_en, NULL, ENT_TPS80031_GPIO_REGEN1, false, 0, 0, 0, 0);
+GREG_INIT(1, pmu_3v3_en, "vdd_5v15", ENT_TPS80031_GPIO_REGEN2, false, 0, 0, 0, 0);
+GREG_INIT(2, pmu_hdmi_5v0_en, "vdd_5v15", ENT_TPS80031_GPIO_SYSEN, false, 0, 0, 0, 0);
+
+GREG_INIT(3, vdd_fuse_en, "avdd_usb_hdmi_3v3", TEGRA_GPIO_PM0, false, 0, 0, 0, 0);
+GREG_INIT(4, sdmmc3_vdd_sel, "vddio_sdmmc_2v85", TEGRA_GPIO_PM1, false, 0, 0, 0, 0);
+GREG_INIT(5, cam_ldo_2v8_en, NULL, TEGRA_GPIO_PM7, false, 0, 0, 0, 0);
+GREG_INIT(6, cam_ldo_1v8_en, NULL, TEGRA_GPIO_PF1, false, 0, 0, 0, 0);
+
+#define ADD_GPIO_REG(_name) (&gpio_pdata_##_name)
+static struct gpio_switch_regulator_subdev_data *gswitch_subdevs[] = {
+ ADD_GPIO_REG(pmu_5v15_en),
+ ADD_GPIO_REG(pmu_3v3_en),
+ ADD_GPIO_REG(pmu_hdmi_5v0_en),
+ ADD_GPIO_REG(vdd_fuse_en),
+ ADD_GPIO_REG(sdmmc3_vdd_sel),
+ ADD_GPIO_REG(cam_ldo_2v8_en),
+ ADD_GPIO_REG(cam_ldo_1v8_en),
+};
+
+static struct gpio_switch_regulator_platform_data gswitch_pdata = {
+ .num_subdevs = ARRAY_SIZE(gswitch_subdevs),
+ .subdevs = gswitch_subdevs,
+};
+
+static struct platform_device gswitch_regulator_pdata = {
+ .name = "gpio-switch-regulator",
+ .id = -1,
+ .dev = {
+ .platform_data = &gswitch_pdata,
+ },
+};
+
+static int __init enterprise_gpio_switch_regulator_init(void)
+{
+ int i;
+ for (i = 0; i < gswitch_pdata.num_subdevs; ++i) {
+ struct gpio_switch_regulator_subdev_data *gswitch_data =
+ gswitch_pdata.subdevs[i];
+ if (gswitch_data->gpio_nr <= TEGRA_NR_GPIOS)
+ tegra_gpio_enable(gswitch_data->gpio_nr);
+ }
+ return platform_device_register(&gswitch_regulator_pdata);
+}
+
+static void enterprise_power_off(void)
+{
+ int ret;
+ pr_info("enterprise: Powering off the device\n");
+ ret = tps80031_power_off();
+ if (ret)
+ pr_err("enterprise: failed to power off\n");
+ while(1);
+}
+
+void __init enterprise_tsensor_init(void)
+{
+ tegra3_tsensor_init(NULL);
+}
+
+int __init enterprise_regulator_init(void)
+{
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ u32 pmc_ctrl;
+ u32 pmc_dpd_pads;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+
+ pmc_dpd_pads = readl(pmc + PMC_DPD_PADS_ORIDE);
+ writel(pmc_dpd_pads & ~PMC_DPD_PADS_ORIDE_BLINK , pmc + PMC_DPD_PADS_ORIDE);
+
+ /* Disable battery charging if power adapter is connected. */
+ if (get_power_supply_type() == POWER_SUPPLY_TYPE_MAINS) {
+ bcharger_pdata.num_consumer_supplies = 0;
+ bcharger_pdata.consumer_supplies = NULL;
+ battery_gauge_data.battery_present = 0;
+ }
+
+ i2c_register_board_info(4, enterprise_regulators, 1);
+ enterprise_gpio_switch_regulator_init();
+ pm_power_off = enterprise_power_off;
+
+ return 0;
+}
+
+static void enterprise_board_suspend(int lp_state, enum suspend_stage stg)
+{
+ if ((lp_state == TEGRA_SUSPEND_LP1) && (stg == TEGRA_SUSPEND_BEFORE_CPU))
+ tegra_console_uart_suspend();
+}
+
+static void enterprise_board_resume(int lp_state, enum resume_stage stg)
+{
+ if ((lp_state == TEGRA_SUSPEND_LP1) && (stg == TEGRA_RESUME_AFTER_CPU))
+ tegra_console_uart_resume();
+}
+
+static struct tegra_suspend_platform_data enterprise_suspend_data = {
+ .cpu_timer = 2000,
+ .cpu_off_timer = 200,
+ .suspend_mode = TEGRA_SUSPEND_LP0,
+ .core_timer = 0x7e7e,
+ .core_off_timer = 0,
+ .corereq_high = true,
+ .sysclkreq_high = true,
+ .board_suspend = enterprise_board_suspend,
+ .board_resume = enterprise_board_resume,
+};
+
+static void enterprise_init_deep_sleep_mode(void)
+{
+ struct board_info bi;
+ tegra_get_board_info(&bi);
+
+ if (bi.board_id == BOARD_E1205 && bi.fab == BOARD_FAB_A01)
+ enterprise_suspend_data.suspend_mode = TEGRA_SUSPEND_LP1;
+
+ if ((bi.board_id == BOARD_E1205 && (bi.sku & BOARD_SKU_VF_BIT) == 0) ||
+ (bi.board_id == BOARD_E1197 && (bi.sku & BOARD_SKU_VF_BIT)))
+ enterprise_suspend_data.cpu_timer = 8000;
+}
+
+int __init enterprise_suspend_init(void)
+{
+ enterprise_init_deep_sleep_mode();
+ tegra_init_suspend(&enterprise_suspend_data);
+ return 0;
+}
+
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+
+int __init enterprise_edp_init(void)
+{
+ unsigned int regulator_mA;
+
+ regulator_mA = get_maximum_cpu_current_supported();
+ if (!regulator_mA) {
+ regulator_mA = 2500; /* regular AP30 */
+ }
+ pr_info("%s: CPU regulator %d mA\n", __func__, regulator_mA);
+
+ tegra_init_cpu_edp_limits(regulator_mA);
+ tegra_init_system_edp_limits(TEGRA_BPC_CPU_PWR_LIMIT);
+ return 0;
+}
+#endif
+
+static struct tegra_bpc_mgmt_platform_data bpc_mgmt_platform_data = {
+ .gpio_trigger = TEGRA_BPC_TRIGGER,
+ .bpc_mgmt_timeout = TEGRA_BPC_TIMEOUT,
+};
+
+static struct platform_device enterprise_bpc_mgmt_device = {
+ .name = "tegra-bpc-mgmt",
+ .id = -1,
+ .dev = {
+ .platform_data = &bpc_mgmt_platform_data,
+ },
+};
+
+void __init enterprise_bpc_mgmt_init(void)
+{
+ int int_gpio = TEGRA_GPIO_TO_IRQ(TEGRA_BPC_TRIGGER);
+
+ tegra_gpio_enable(TEGRA_BPC_TRIGGER);
+
+#ifdef CONFIG_SMP
+ cpumask_setall(&(bpc_mgmt_platform_data.affinity_mask));
+ irq_set_affinity_hint(int_gpio,
+ &(bpc_mgmt_platform_data.affinity_mask));
+ irq_set_affinity(int_gpio, &(bpc_mgmt_platform_data.affinity_mask));
+#endif
+ platform_device_register(&enterprise_bpc_mgmt_device);
+
+ return;
+}
diff --git a/arch/arm/mach-tegra/board-enterprise-sdhci.c b/arch/arm/mach-tegra/board-enterprise-sdhci.c
new file mode 100644
index 000000000000..af1a9ea3dd59
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise-sdhci.c
@@ -0,0 +1,269 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise-sdhci.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/wlan_plat.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/sdhci.h>
+
+#include "gpio-names.h"
+#include "board.h"
+
+
+#define ENTERPRISE_WLAN_PWR TEGRA_GPIO_PV2
+#define ENTERPRISE_WLAN_RST TEGRA_GPIO_PV3
+#define ENTERPRISE_WLAN_WOW TEGRA_GPIO_PU6
+#define ENTERPRISE_SD_CD TEGRA_GPIO_PI5
+
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+static int enterprise_wifi_status_register(void (*callback)(int , void *), void *);
+
+static int enterprise_wifi_reset(int on);
+static int enterprise_wifi_power(int on);
+static int enterprise_wifi_set_carddetect(int val);
+
+static struct wifi_platform_data enterprise_wifi_control = {
+ .set_power = enterprise_wifi_power,
+ .set_reset = enterprise_wifi_reset,
+ .set_carddetect = enterprise_wifi_set_carddetect,
+};
+
+static struct resource wifi_resource[] = {
+ [0] = {
+ .name = "bcm4329_wlan_irq",
+ .start = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ .end = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct platform_device enterprise_wifi_device = {
+ .name = "bcm4329_wlan",
+ .id = 1,
+ .num_resources = 1,
+ .resource = wifi_resource,
+ .dev = {
+ .platform_data = &enterprise_wifi_control,
+ },
+};
+
+static struct resource sdhci_resource0[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct embedded_sdio_data embedded_sdio_data0 = {
+ .cccr = {
+ .sdio_vsn = 2,
+ .multi_block = 1,
+ .low_speed = 0,
+ .wide_bus = 0,
+ .high_power = 1,
+ .high_speed = 1,
+ },
+ .cis = {
+ .vendor = 0x02d0,
+ .device = 0x4329,
+ },
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data0 = {
+ .mmc_data = {
+ .register_status_notify = enterprise_wifi_status_register,
+ .embedded_sdio = &embedded_sdio_data0,
+ /* FIXME need to revert the built_in change
+ once we use get the signal strength fix of
+ bcmdhd driver from broadcom for bcm4329 chipset*/
+ .built_in = 0,
+ },
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .max_clk_limit = 45000000,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data3 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .is_8bit = 1,
+ .mmc_data = {
+ .built_in = 1,
+ }
+};
+
+static struct platform_device tegra_sdhci_device0 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource0,
+ .num_resources = ARRAY_SIZE(sdhci_resource0),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data0,
+ },
+};
+
+static struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data2,
+ },
+};
+
+static struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data3,
+ },
+};
+
+static int enterprise_wifi_status_register(
+ void (*callback)(int card_present, void *dev_id),
+ void *dev_id)
+{
+ if (wifi_status_cb)
+ return -EAGAIN;
+ wifi_status_cb = callback;
+ wifi_status_cb_devid = dev_id;
+ return 0;
+}
+
+static int enterprise_wifi_set_carddetect(int val)
+{
+ pr_debug("%s: %d\n", __func__, val);
+ if (wifi_status_cb)
+ wifi_status_cb(val, wifi_status_cb_devid);
+ else
+ pr_warning("%s: Nobody to notify\n", __func__);
+ return 0;
+}
+
+static int enterprise_wifi_power(int on)
+{
+ pr_debug("%s: %d\n", __func__, on);
+ gpio_set_value(ENTERPRISE_WLAN_PWR, on);
+ mdelay(100);
+ gpio_set_value(ENTERPRISE_WLAN_RST, on);
+ mdelay(200);
+
+ return 0;
+}
+
+static int enterprise_wifi_reset(int on)
+{
+ pr_debug("%s: do nothing\n", __func__);
+ return 0;
+}
+
+static int __init enterprise_wifi_init(void)
+{
+ int rc;
+
+ rc = gpio_request(ENTERPRISE_WLAN_PWR, "wlan_power");
+ if (rc)
+ pr_err("WLAN_PWR gpio request failed:%d\n", rc);
+ rc = gpio_request(ENTERPRISE_WLAN_RST, "wlan_rst");
+ if (rc)
+ pr_err("WLAN_RST gpio request failed:%d\n", rc);
+ rc = gpio_request(ENTERPRISE_WLAN_WOW, "bcmsdh_sdmmc");
+ if (rc)
+ pr_err("WLAN_WOW gpio request failed:%d\n", rc);
+
+ tegra_gpio_enable(ENTERPRISE_WLAN_PWR);
+ tegra_gpio_enable(ENTERPRISE_WLAN_RST);
+ tegra_gpio_enable(ENTERPRISE_WLAN_WOW);
+
+ rc = gpio_direction_output(ENTERPRISE_WLAN_PWR, 0);
+ if (rc)
+ pr_err("WLAN_PWR gpio direction configuration failed:%d\n", rc);
+ gpio_direction_output(ENTERPRISE_WLAN_RST, 0);
+ if (rc)
+ pr_err("WLAN_RST gpio direction configuration failed:%d\n", rc);
+ rc = gpio_direction_input(ENTERPRISE_WLAN_WOW);
+ if (rc)
+ pr_err("WLAN_WOW gpio direction configuration failed:%d\n", rc);
+
+ platform_device_register(&enterprise_wifi_device);
+ return 0;
+}
+
+int __init enterprise_sdhci_init(void)
+{
+ platform_device_register(&tegra_sdhci_device3);
+
+ tegra_gpio_enable(ENTERPRISE_SD_CD);
+ tegra_sdhci_platform_data2.cd_gpio = ENTERPRISE_SD_CD;
+ platform_device_register(&tegra_sdhci_device2);
+
+ platform_device_register(&tegra_sdhci_device0);
+ enterprise_wifi_init();
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-enterprise-sensors.c b/arch/arm/mach-tegra/board-enterprise-sensors.c
new file mode 100644
index 000000000000..68544692bcee
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise-sensors.c
@@ -0,0 +1,664 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise-sensors.c
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/i2c/pca954x.h>
+#include <linux/nct1008.h>
+#include <linux/err.h>
+#include <linux/mpu.h>
+#include <linux/platform_data/ina230.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <mach/gpio.h>
+#include <media/ar0832_main.h>
+#include <media/tps61050.h>
+#include <media/ov9726.h>
+#include <mach/edp.h>
+#include <mach/thermal.h>
+#include "cpu-tegra.h"
+#include "gpio-names.h"
+#include "board-enterprise.h"
+#include "board.h"
+
+#ifndef CONFIG_TEGRA_INTERNAL_TSENSOR_EDP_SUPPORT
+static int nct_get_temp(void *_data, long *temp)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_get_temp(data, temp);
+}
+
+static int nct_get_temp_low(void *_data, long *temp)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_get_temp_low(data, temp);
+}
+
+static int nct_set_limits(void *_data,
+ long lo_limit_milli,
+ long hi_limit_milli)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_set_limits(data,
+ lo_limit_milli,
+ hi_limit_milli);
+}
+
+static int nct_set_alert(void *_data,
+ void (*alert_func)(void *),
+ void *alert_data)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_set_alert(data, alert_func, alert_data);
+}
+
+static int nct_set_shutdown_temp(void *_data, long shutdown_temp)
+{
+ struct nct1008_data *data = _data;
+ return nct1008_thermal_set_shutdown_temp(data,
+ shutdown_temp);
+}
+
+static void nct1008_probe_callback(struct nct1008_data *data)
+{
+ struct tegra_thermal_device *thermal_device;
+
+ thermal_device = kzalloc(sizeof(struct tegra_thermal_device),
+ GFP_KERNEL);
+ if (!thermal_device) {
+ pr_err("unable to allocate thermal device\n");
+ return;
+ }
+
+ thermal_device->name = "nct1008";
+ thermal_device->data = data;
+ thermal_device->offset = TDIODE_OFFSET;
+ thermal_device->get_temp = nct_get_temp;
+ thermal_device->get_temp_low = nct_get_temp_low;
+ thermal_device->set_limits = nct_set_limits;
+ thermal_device->set_alert = nct_set_alert;
+ thermal_device->set_shutdown_temp = nct_set_shutdown_temp;
+
+ tegra_thermal_set_device(thermal_device);
+}
+#endif
+
+static struct nct1008_platform_data enterprise_nct1008_pdata = {
+ .supported_hwrev = true,
+ .ext_range = true,
+ .conv_rate = 0x08,
+ .offset = 8, /* 4 * 2C. Bug 844025 - 1C for device accuracies */
+#ifndef CONFIG_TEGRA_INTERNAL_TSENSOR_EDP_SUPPORT
+ .probe_callback = nct1008_probe_callback,
+#endif
+};
+
+static struct i2c_board_info enterprise_i2c4_nct1008_board_info[] = {
+ {
+ I2C_BOARD_INFO("nct1008", 0x4C),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PH7),
+ .platform_data = &enterprise_nct1008_pdata,
+ }
+};
+
+static void enterprise_nct1008_init(void)
+{
+ int ret;
+
+ tegra_gpio_enable(TEGRA_GPIO_PH7);
+ ret = gpio_request(TEGRA_GPIO_PH7, "temp_alert");
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed %d\n", __func__, ret);
+ return;
+ }
+
+ ret = gpio_direction_input(TEGRA_GPIO_PH7);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_input failed %d\n", __func__, ret);
+ gpio_free(TEGRA_GPIO_PH7);
+ return;
+ }
+
+ i2c_register_board_info(4, enterprise_i2c4_nct1008_board_info,
+ ARRAY_SIZE(enterprise_i2c4_nct1008_board_info));
+}
+
+static struct mpu_platform_data mpu3050_data = {
+ .int_config = 0x10,
+ .level_shifter = 0,
+ .orientation = MPU_GYRO_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct ext_slave_platform_data mpu3050_accel_data = {
+ .address = MPU_ACCEL_ADDR,
+ .irq = 0,
+ .adapt_num = MPU_ACCEL_BUS_NUM,
+ .bus = EXT_SLAVE_BUS_SECONDARY,
+ .orientation = MPU_ACCEL_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct ext_slave_platform_data mpu_compass_data = {
+ .address = MPU_COMPASS_ADDR,
+ .irq = 0,
+ .adapt_num = MPU_COMPASS_BUS_NUM,
+ .bus = EXT_SLAVE_BUS_PRIMARY,
+ .orientation = MPU_COMPASS_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct i2c_board_info __initdata inv_mpu_i2c2_board_info[] = {
+ {
+ I2C_BOARD_INFO(MPU_GYRO_NAME, MPU_GYRO_ADDR),
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_GYRO_IRQ_GPIO),
+ .platform_data = &mpu3050_data,
+ },
+ {
+ I2C_BOARD_INFO(MPU_ACCEL_NAME, MPU_ACCEL_ADDR),
+#if MPU_ACCEL_IRQ_GPIO
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_ACCEL_IRQ_GPIO),
+#endif
+ .platform_data = &mpu3050_accel_data,
+ },
+ {
+ I2C_BOARD_INFO(MPU_COMPASS_NAME, MPU_COMPASS_ADDR),
+#if MPU_COMPASS_IRQ_GPIO
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_COMPASS_IRQ_GPIO),
+#endif
+ .platform_data = &mpu_compass_data,
+ },
+};
+
+static void mpuirq_init(void)
+{
+ int ret = 0;
+
+ pr_info("*** MPU START *** mpuirq_init...\n");
+
+#if MPU_ACCEL_IRQ_GPIO
+ /* ACCEL-IRQ assignment */
+ tegra_gpio_enable(MPU_ACCEL_IRQ_GPIO);
+ ret = gpio_request(MPU_ACCEL_IRQ_GPIO, MPU_ACCEL_NAME);
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed %d\n", __func__, ret);
+ return;
+ }
+
+ ret = gpio_direction_input(MPU_ACCEL_IRQ_GPIO);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_input failed %d\n", __func__, ret);
+ gpio_free(MPU_ACCEL_IRQ_GPIO);
+ return;
+ }
+#endif
+
+ /* MPU-IRQ assignment */
+ tegra_gpio_enable(MPU_GYRO_IRQ_GPIO);
+ ret = gpio_request(MPU_GYRO_IRQ_GPIO, MPU_GYRO_NAME);
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed %d\n", __func__, ret);
+ return;
+ }
+
+ ret = gpio_direction_input(MPU_GYRO_IRQ_GPIO);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_input failed %d\n", __func__, ret);
+ gpio_free(MPU_GYRO_IRQ_GPIO);
+ return;
+ }
+ pr_info("*** MPU END *** mpuirq_init...\n");
+
+ i2c_register_board_info(MPU_GYRO_BUS_NUM, inv_mpu_i2c2_board_info,
+ ARRAY_SIZE(inv_mpu_i2c2_board_info));
+}
+
+static inline void enterprise_msleep(u32 t)
+{
+ /*
+ If timer value is between ( 10us - 20ms),
+ usleep_range() is recommended.
+ Please read Documentation/timers/timers-howto.txt.
+ */
+ usleep_range(t*1000, t*1000 + 500);
+}
+
+static struct i2c_board_info enterprise_i2c0_isl_board_info[] = {
+ {
+ I2C_BOARD_INFO("isl29028", 0x44),
+ }
+};
+
+static void enterprise_isl_init(void)
+{
+ i2c_register_board_info(0, enterprise_i2c0_isl_board_info,
+ ARRAY_SIZE(enterprise_i2c0_isl_board_info));
+}
+
+enum CAMERA_INDEX {
+ CAM_REAR_LEFT,
+ CAM_REAR_RIGHT,
+ CAM_FRONT,
+ NUM_OF_CAM
+};
+
+struct enterprise_power_rail {
+ struct regulator *cam_reg;
+ struct regulator *csi_reg;
+};
+
+static struct enterprise_power_rail ent_vicsi_pwr[NUM_OF_CAM];
+
+static int enterprise_cam_pwr(enum CAMERA_INDEX cam, bool pwr_on)
+{
+ struct enterprise_power_rail *reg_cam = &ent_vicsi_pwr[cam];
+ int ret = 0;
+
+ /*
+ * SW must turn on 1.8V first then 2.8V
+ * SW must turn off 2.8V first then 1.8V
+ */
+ if (pwr_on) {
+ if (reg_cam->csi_reg == NULL) {
+ reg_cam->csi_reg = regulator_get(NULL,
+ "avdd_dsi_csi");
+ if (IS_ERR_OR_NULL(reg_cam->csi_reg)) {
+ pr_err("%s: csi pwr err\n", __func__);
+ ret = PTR_ERR(reg_cam->csi_reg);
+ goto enterprise_cam_pwr_fail;
+ }
+ }
+
+ ret = regulator_enable(reg_cam->csi_reg);
+ if (ret) {
+ pr_err("%s: enable csi pwr err\n", __func__);
+ goto enterprise_cam_pwr_fail;
+ }
+
+ if (reg_cam->cam_reg == NULL) {
+ reg_cam->cam_reg = regulator_get(NULL,
+ "vddio_cam");
+ if (IS_ERR_OR_NULL(reg_cam->cam_reg)) {
+ pr_err("%s: vddio pwr err\n", __func__);
+ ret = PTR_ERR(reg_cam->cam_reg);
+ regulator_disable(reg_cam->csi_reg);
+ goto enterprise_cam_pwr_fail;
+ }
+ }
+
+ ret = regulator_enable(reg_cam->cam_reg);
+ if (ret) {
+ pr_err("%s: enable vddio pwr err\n", __func__);
+ regulator_disable(reg_cam->csi_reg);
+ goto enterprise_cam_pwr_fail;
+ }
+ } else {
+ if (reg_cam->cam_reg)
+ regulator_disable(reg_cam->cam_reg);
+
+ if (reg_cam->csi_reg)
+ regulator_disable(reg_cam->csi_reg);
+ }
+ return 0;
+
+enterprise_cam_pwr_fail:
+ if (!IS_ERR_OR_NULL(reg_cam->cam_reg))
+ regulator_put(reg_cam->cam_reg);
+ reg_cam->cam_reg = NULL;
+
+ if (!IS_ERR_OR_NULL(reg_cam->csi_reg))
+ regulator_put(reg_cam->csi_reg);
+ reg_cam->csi_reg = NULL;
+
+ return ret;
+}
+
+static int enterprise_ar0832_ri_power_on(int is_stereo)
+{
+ int ret = 0;
+
+ pr_info("%s: ++\n", __func__);
+ ret = enterprise_cam_pwr(CAM_REAR_RIGHT, true);
+
+ /* Release Reset */
+ if (is_stereo) {
+ gpio_set_value(CAM1_RST_L_GPIO, 1);
+ gpio_set_value(CAM2_RST_L_GPIO, 1);
+ } else
+ gpio_set_value(CAM1_RST_L_GPIO, 1);
+ /*
+ It takes 2400 EXTCLK for ar0832 to be ready for I2c.
+ EXTCLK is 10 ~ 24MHz. 1 ms should be enough to cover
+ at least 2400 EXTCLK within frequency range.
+ */
+ enterprise_msleep(1);
+
+ return ret;
+}
+
+static int enterprise_ar0832_le_power_on(int is_stereo)
+{
+ int ret = 0;
+
+ pr_info("%s: ++\n", __func__);
+ ret = enterprise_cam_pwr(CAM_REAR_LEFT, true);
+
+ /* Release Reset */
+ gpio_set_value(CAM2_RST_L_GPIO, 1);
+
+ /*
+ It takes 2400 EXTCLK for ar0832 to be ready for I2c.
+ EXTCLK is 10 ~ 24MHz. 1 ms should be enough to cover
+ at least 2400 EXTCLK within frequency range.
+ */
+ enterprise_msleep(1);
+
+ /* CSI B is shared between Front camera and Rear Left camera */
+ gpio_set_value(CAM_CSI_MUX_SEL_GPIO, 1);
+
+ return ret;
+}
+
+static int enterprise_ar0832_ri_power_off(int is_stereo)
+{
+ int ret;
+
+ pr_info("%s: ++\n", __func__);
+ ret = enterprise_cam_pwr(CAM_REAR_RIGHT, false);
+
+ /* Assert Reset */
+ if (is_stereo) {
+ gpio_set_value(CAM1_RST_L_GPIO, 0);
+ gpio_set_value(CAM2_RST_L_GPIO, 0);
+ } else
+ gpio_set_value(CAM1_RST_L_GPIO, 0);
+
+ return ret;
+}
+
+static int enterprise_ar0832_le_power_off(int is_stereo)
+{
+ int ret;
+
+ pr_info("%s: ++\n", __func__);
+ ret = enterprise_cam_pwr(CAM_REAR_LEFT, false);
+
+ /* Assert Reset */
+ gpio_set_value(CAM2_RST_L_GPIO, 0);
+
+ return ret;
+}
+
+static int enterprise_ov9726_power_on(void)
+{
+ pr_info("ov9726 power on\n");
+
+ /* switch mipi mux to front camera */
+ gpio_set_value(CAM_CSI_MUX_SEL_GPIO, CAM_CSI_MUX_SEL_FRONT);
+ enterprise_cam_pwr(CAM_FRONT, true);
+
+ return 0;
+}
+
+static int enterprise_ov9726_power_off(void)
+{
+ pr_info("ov9726 power off\n");
+
+ enterprise_cam_pwr(CAM_FRONT, false);
+
+ return 0;
+}
+
+struct ov9726_platform_data enterprise_ov9726_data = {
+ .power_on = enterprise_ov9726_power_on,
+ .power_off = enterprise_ov9726_power_off,
+ .gpio_rst = CAM3_RST_L_GPIO,
+ .rst_low_active = true,
+ .gpio_pwdn = CAM3_PWDN_GPIO,
+ .pwdn_low_active = false,
+};
+
+static struct nvc_torch_pin_state enterprise_tps61050_pinstate = {
+ .mask = 0x0008, /*VGP3*/
+ .values = 0x0008,
+};
+
+static struct tps61050_platform_data enterprise_tps61050_pdata = {
+ .dev_name = "torch",
+ .pinstate = &enterprise_tps61050_pinstate,
+};
+
+
+struct enterprise_cam_gpio {
+ int gpio;
+ const char *label;
+ int value;
+};
+
+#define TEGRA_CAMERA_GPIO(_gpio, _label, _value) \
+ { \
+ .gpio = _gpio, \
+ .label = _label, \
+ .value = _value, \
+ }
+
+static struct enterprise_cam_gpio enterprise_cam_gpio_data[] = {
+ [0] = TEGRA_CAMERA_GPIO(CAM_CSI_MUX_SEL_GPIO, "cam_csi_sel", 1),
+ [1] = TEGRA_CAMERA_GPIO(CAM1_RST_L_GPIO, "cam1_rst_lo", 0),
+ [2] = TEGRA_CAMERA_GPIO(CAM2_RST_L_GPIO, "cam2_rst_lo", 0),
+ [3] = TEGRA_CAMERA_GPIO(CAM3_RST_L_GPIO, "cam3_rst_lo", 0),
+ [4] = TEGRA_CAMERA_GPIO(CAM3_PWDN_GPIO, "cam3_pwdn", 1),
+ [5] = TEGRA_CAMERA_GPIO(CAM_FLASH_EN_GPIO, "flash_en", 1),
+ [6] = TEGRA_CAMERA_GPIO(CAM_I2C_MUX_RST_EXP, "cam_i2c_mux_rst", 1),
+};
+
+static struct pca954x_platform_mode enterprise_pca954x_modes[] = {
+ { .adap_id = PCA954x_I2C_BUS0, .deselect_on_exit = true, },
+ { .adap_id = PCA954x_I2C_BUS1, .deselect_on_exit = true, },
+ { .adap_id = PCA954x_I2C_BUS2, .deselect_on_exit = true, },
+ { .adap_id = PCA954x_I2C_BUS3, .deselect_on_exit = true, },
+};
+
+static struct pca954x_platform_data enterprise_pca954x_data = {
+ .modes = enterprise_pca954x_modes,
+ .num_modes = ARRAY_SIZE(enterprise_pca954x_modes),
+};
+
+static struct ar0832_platform_data enterprise_ar0832_ri_data = {
+ .power_on = enterprise_ar0832_ri_power_on,
+ .power_off = enterprise_ar0832_ri_power_off,
+ .id = "right",
+};
+
+static struct ar0832_platform_data enterprise_ar0832_le_data = {
+ .power_on = enterprise_ar0832_le_power_on,
+ .power_off = enterprise_ar0832_le_power_off,
+ .id = "left",
+};
+
+static const struct i2c_board_info enterprise_i2c2_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("pca9546", 0x70),
+ .platform_data = &enterprise_pca954x_data,
+ },
+ {
+ I2C_BOARD_INFO("tps61050", 0x33),
+ .platform_data = &enterprise_tps61050_pdata,
+ },
+ {
+ I2C_BOARD_INFO("ov9726", OV9726_I2C_ADDR >> 1),
+ .platform_data = &enterprise_ov9726_data,
+ },
+};
+
+/*
+ * Since ar0832 driver should support multiple devices, slave
+ * address should be changed after it is open. Default slave
+ * address of ar0832 is 0x36. It will be changed to alternate
+ * address defined below when device is open.
+ */
+static struct i2c_board_info ar0832_i2c2_boardinfo[] = {
+ {
+ /* 0x36: alternative slave address */
+ I2C_BOARD_INFO("ar0832", 0x36),
+ .platform_data = &enterprise_ar0832_ri_data,
+ },
+ {
+ /* 0x32: alternative slave address */
+ I2C_BOARD_INFO("ar0832", 0x32),
+ .platform_data = &enterprise_ar0832_le_data,
+ },
+ {
+ I2C_BOARD_INFO("tps61050", 0x33),
+ .platform_data = &enterprise_tps61050_pdata,
+ },
+ {
+ I2C_BOARD_INFO("ov9726", OV9726_I2C_ADDR >> 1),
+ .platform_data = &enterprise_ov9726_data,
+ },
+};
+
+static struct i2c_board_info enterprise_i2c6_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("ar0832", 0x36),
+ .platform_data = &enterprise_ar0832_le_data,
+ },
+};
+
+static struct i2c_board_info enterprise_i2c7_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("ar0832", 0x36),
+ .platform_data = &enterprise_ar0832_ri_data,
+ },
+};
+
+static int enterprise_cam_init(void)
+{
+ int ret;
+ int i;
+ struct board_info bi;
+ struct board_info cam_bi;
+ bool i2c_mux = false;
+
+ pr_info("%s:++\n", __func__);
+ memset(ent_vicsi_pwr, 0, sizeof(ent_vicsi_pwr));
+ for (i = 0; i < ARRAY_SIZE(enterprise_cam_gpio_data); i++) {
+ ret = gpio_request(enterprise_cam_gpio_data[i].gpio,
+ enterprise_cam_gpio_data[i].label);
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed for gpio #%d\n",
+ __func__, i);
+ goto fail_free_gpio;
+ }
+ gpio_direction_output(enterprise_cam_gpio_data[i].gpio,
+ enterprise_cam_gpio_data[i].value);
+ gpio_export(enterprise_cam_gpio_data[i].gpio, false);
+ tegra_gpio_enable(enterprise_cam_gpio_data[i].gpio);
+ }
+
+ tegra_get_board_info(&bi);
+ tegra_get_camera_board_info(&cam_bi);
+
+ if (bi.board_id == BOARD_E1205) {
+ if (bi.fab == BOARD_FAB_A00 || bi.fab == BOARD_FAB_A01)
+ i2c_mux = false;
+ else if (bi.fab == BOARD_FAB_A02)
+ i2c_mux = true;
+ } else if (bi.board_id == BOARD_E1197) {
+ if (cam_bi.fab == BOARD_FAB_A00)
+ i2c_mux = false;
+ else if (cam_bi.fab == BOARD_FAB_A01)
+ i2c_mux = true;
+ }
+
+ if (!i2c_mux)
+ i2c_register_board_info(2, ar0832_i2c2_boardinfo,
+ ARRAY_SIZE(ar0832_i2c2_boardinfo));
+ else {
+ i2c_register_board_info(2, enterprise_i2c2_boardinfo,
+ ARRAY_SIZE(enterprise_i2c2_boardinfo));
+ /*
+ * Right camera is on PCA954x's I2C BUS1,
+ * Left camera is on BUS0
+ */
+ i2c_register_board_info(PCA954x_I2C_BUS0, enterprise_i2c6_boardinfo,
+ ARRAY_SIZE(enterprise_i2c6_boardinfo));
+ i2c_register_board_info(PCA954x_I2C_BUS1, enterprise_i2c7_boardinfo,
+ ARRAY_SIZE(enterprise_i2c7_boardinfo));
+ }
+ return 0;
+
+fail_free_gpio:
+ pr_err("%s enterprise_cam_init failed!\n", __func__);
+ while (i--)
+ gpio_free(enterprise_cam_gpio_data[i].gpio);
+ return ret;
+}
+
+#define ENTERPRISE_INA230_ENABLED 0
+
+#if ENTERPRISE_INA230_ENABLED
+static struct ina230_platform_data ina230_platform = {
+ .rail_name = "VDD_AC_BAT",
+ .current_threshold = TEGRA_CUR_MON_THRESHOLD,
+ .resistor = TEGRA_CUR_MON_RESISTOR,
+ .min_cores_online = TEGRA_CUR_MON_MIN_CORES,
+};
+
+static struct i2c_board_info enterprise_i2c0_ina230_info[] = {
+ {
+ I2C_BOARD_INFO("ina230", 0x42),
+ .platform_data = &ina230_platform,
+ .irq = -1,
+ },
+};
+
+static int __init enterprise_ina230_init(void)
+{
+ return i2c_register_board_info(0, enterprise_i2c0_ina230_info,
+ ARRAY_SIZE(enterprise_i2c0_ina230_info));
+}
+#endif
+
+int __init enterprise_sensors_init(void)
+{
+ int ret;
+
+ enterprise_isl_init();
+ enterprise_nct1008_init();
+ mpuirq_init();
+#if ENTERPRISE_INA230_ENABLED
+ enterprise_ina230_init();
+#endif
+ ret = enterprise_cam_init();
+
+ return ret;
+}
+
diff --git a/arch/arm/mach-tegra/board-enterprise.c b/arch/arm/mach-tegra/board-enterprise.c
new file mode 100644
index 000000000000..dcfbf708b9fc
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise.c
@@ -0,0 +1,1000 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/i2c.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/i2c-tegra.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/spi/spi.h>
+#include <linux/tegra_uart.h>
+#include <linux/fsl_devices.h>
+#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/memblock.h>
+
+#include <linux/nfc/pn544.h>
+#include <sound/max98088.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/usb_phy.h>
+#include <mach/i2s.h>
+#include <mach/tegra_max98088_pdata.h>
+#include <mach/thermal.h>
+#include <mach/tegra-bb-power.h>
+#include "board.h"
+#include "clock.h"
+#include "board-enterprise.h"
+#include "baseband-xmm-power.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "fuse.h"
+#include "pm.h"
+
+/* All units are in millicelsius */
+static struct tegra_thermal_data thermal_data = {
+ .temp_throttle = 85000,
+ .temp_shutdown = 90000,
+ .temp_offset = TDIODE_OFFSET, /* temps based on tdiode */
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ .edp_offset = TDIODE_OFFSET, /* edp based on tdiode */
+ .hysteresis_edp = 3000,
+#endif
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ .tc1 = 0,
+ .tc2 = 1,
+ .passive_delay = 2000,
+#else
+ .hysteresis_throttle = 1000,
+#endif
+};
+
+/* !!!TODO: Change for enterprise (Taken from Cardhu) */
+static struct tegra_utmip_config utmi_phy_config[] = {
+ [0] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+ [1] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+ [2] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+};
+
+static struct resource enterprise_bcm4329_rfkill_resources[] = {
+ {
+ .name = "bcm4329_nshutdown_gpio",
+ .start = TEGRA_GPIO_PE6,
+ .end = TEGRA_GPIO_PE6,
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct platform_device enterprise_bcm4329_rfkill_device = {
+ .name = "bcm4329_rfkill",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(enterprise_bcm4329_rfkill_resources),
+ .resource = enterprise_bcm4329_rfkill_resources,
+};
+
+static struct resource enterprise_bluesleep_resources[] = {
+ [0] = {
+ .name = "gpio_host_wake",
+ .start = TEGRA_GPIO_PS2,
+ .end = TEGRA_GPIO_PS2,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .name = "gpio_ext_wake",
+ .start = TEGRA_GPIO_PE7,
+ .end = TEGRA_GPIO_PE7,
+ .flags = IORESOURCE_IO,
+ },
+ [2] = {
+ .name = "host_wake",
+ .start = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS2),
+ .end = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS2),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ },
+};
+
+static struct platform_device enterprise_bluesleep_device = {
+ .name = "bluesleep",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(enterprise_bluesleep_resources),
+ .resource = enterprise_bluesleep_resources,
+};
+
+static void __init enterprise_setup_bluesleep(void)
+{
+ platform_device_register(&enterprise_bluesleep_device);
+ tegra_gpio_enable(TEGRA_GPIO_PS2);
+ tegra_gpio_enable(TEGRA_GPIO_PE7);
+ return;
+}
+
+static __initdata struct tegra_clk_init_table enterprise_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "pll_m", NULL, 0, false},
+ { "hda", "pll_p", 108000000, false},
+ { "hda2codec_2x","pll_p", 48000000, false},
+ { "pwm", "clk_32k", 32768, false},
+ { "blink", "clk_32k", 32768, true},
+ { "pll_a", NULL, 564480000, false},
+ { "pll_a_out0", NULL, 11289600, false},
+ { "i2s0", "pll_a_out0", 0, false},
+ { "i2s1", "pll_a_out0", 0, false},
+ { "i2s2", "pll_a_out0", 0, false},
+ { "i2s3", "pll_a_out0", 0, false},
+ { "spdif_out", "pll_a_out0", 0, false},
+ { "d_audio", "pll_a_out0", 0, false},
+ { "dam0", "pll_a_out0", 0, false},
+ { "dam1", "pll_a_out0", 0, false},
+ { "dam2", "pll_a_out0", 0, false},
+ { "audio0", "i2s0_sync", 0, false},
+ { "audio1", "i2s1_sync", 0, false},
+ { "audio2", "i2s2_sync", 0, false},
+ { "audio3", "i2s3_sync", 0, false},
+ { NULL, NULL, 0, 0},
+};
+
+static struct tegra_i2c_platform_data enterprise_i2c1_platform_data = {
+ .adapter_nr = 0,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PC4, 0},
+ .sda_gpio = {TEGRA_GPIO_PC5, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data enterprise_i2c2_platform_data = {
+ .adapter_nr = 1,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .is_clkon_always = true,
+ .scl_gpio = {TEGRA_GPIO_PT5, 0},
+ .sda_gpio = {TEGRA_GPIO_PT6, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data enterprise_i2c3_platform_data = {
+ .adapter_nr = 2,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PBB1, 0},
+ .sda_gpio = {TEGRA_GPIO_PBB2, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data enterprise_i2c4_platform_data = {
+ .adapter_nr = 3,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PV4, 0},
+ .sda_gpio = {TEGRA_GPIO_PV5, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data enterprise_i2c5_platform_data = {
+ .adapter_nr = 4,
+ .bus_count = 1,
+ .bus_clk_rate = { 100000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PZ6, 0},
+ .sda_gpio = {TEGRA_GPIO_PZ7, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+/* Equalizer filter coefs generated from the MAXIM MAX98088
+ * evkit software tool */
+static struct max98088_eq_cfg max98088_eq_cfg[] = {
+ {
+ .name = "FLAT",
+ .rate = 44100,
+ .band1 = {0x2000, 0xC002, 0x4000, 0x00E9, 0x0000},
+ .band2 = {0x2000, 0xC00F, 0x4000, 0x02BC, 0x0000},
+ .band3 = {0x2000, 0xC0A7, 0x4000, 0x0916, 0x0000},
+ .band4 = {0x2000, 0xC5C2, 0x4000, 0x1A87, 0x0000},
+ .band5 = {0x2000, 0xF6B0, 0x4000, 0x3F51, 0x0000},
+ },
+ {
+ .name = "LOWPASS1K",
+ .rate = 44100,
+ .band1 = {0x205D, 0xC001, 0x3FEF, 0x002E, 0x02E0},
+ .band2 = {0x5B9A, 0xC093, 0x3AB2, 0x088B, 0x1981},
+ .band3 = {0x0D22, 0xC170, 0x26EA, 0x0D79, 0x32CF},
+ .band4 = {0x0894, 0xC612, 0x01B3, 0x1B34, 0x3FFA},
+ .band5 = {0x0815, 0x3FFF, 0xCF78, 0x0000, 0x29B7},
+ },
+ { /* BASS=-12dB, TREBLE=+9dB, Fc=5KHz */
+ .name = "HIBOOST",
+ .rate = 44100,
+ .band1 = {0x0815, 0xC001, 0x3AA4, 0x0003, 0x19A2},
+ .band2 = {0x0815, 0xC103, 0x092F, 0x0B55, 0x3F56},
+ .band3 = {0x0E0A, 0xC306, 0x1E5C, 0x136E, 0x3856},
+ .band4 = {0x2459, 0xF665, 0x0CAA, 0x3F46, 0x3EBB},
+ .band5 = {0x5BBB, 0x3FFF, 0xCEB0, 0x0000, 0x28CA},
+ },
+ { /* BASS=12dB, TREBLE=+12dB */
+ .name = "LOUD12DB",
+ .rate = 44100,
+ .band1 = {0x7FC1, 0xC001, 0x3EE8, 0x0020, 0x0BC7},
+ .band2 = {0x51E9, 0xC016, 0x3C7C, 0x033F, 0x14E9},
+ .band3 = {0x1745, 0xC12C, 0x1680, 0x0C2F, 0x3BE9},
+ .band4 = {0x4536, 0xD7E2, 0x0ED4, 0x31DD, 0x3E42},
+ .band5 = {0x7FEF, 0x3FFF, 0x0BAB, 0x0000, 0x3EED},
+ },
+ {
+ .name = "FLAT",
+ .rate = 16000,
+ .band1 = {0x2000, 0xC004, 0x4000, 0x0141, 0x0000},
+ .band2 = {0x2000, 0xC033, 0x4000, 0x0505, 0x0000},
+ .band3 = {0x2000, 0xC268, 0x4000, 0x115F, 0x0000},
+ .band4 = {0x2000, 0xDA62, 0x4000, 0x33C6, 0x0000},
+ .band5 = {0x2000, 0x4000, 0x4000, 0x0000, 0x0000},
+ },
+ {
+ .name = "LOWPASS1K",
+ .rate = 16000,
+ .band1 = {0x2000, 0xC004, 0x4000, 0x0141, 0x0000},
+ .band2 = {0x5BE8, 0xC3E0, 0x3307, 0x15ED, 0x26A0},
+ .band3 = {0x0F71, 0xD15A, 0x08B3, 0x2BD0, 0x3F67},
+ .band4 = {0x0815, 0x3FFF, 0xCF78, 0x0000, 0x29B7},
+ .band5 = {0x0815, 0x3FFF, 0xCF78, 0x0000, 0x29B7},
+ },
+ { /* BASS=-12dB, TREBLE=+9dB, Fc=2KHz */
+ .name = "HIBOOST",
+ .rate = 16000,
+ .band1 = {0x0815, 0xC001, 0x3BD2, 0x0009, 0x16BF},
+ .band2 = {0x080E, 0xC17E, 0xF653, 0x0DBD, 0x3F43},
+ .band3 = {0x0F80, 0xDF45, 0xEE33, 0x36FE, 0x3D79},
+ .band4 = {0x590B, 0x3FF0, 0xE882, 0x02BD, 0x3B87},
+ .band5 = {0x4C87, 0xF3D0, 0x063F, 0x3ED4, 0x3FB1},
+ },
+ { /* BASS=12dB, TREBLE=+12dB */
+ .name = "LOUD12DB",
+ .rate = 16000,
+ .band1 = {0x7FC1, 0xC001, 0x3D07, 0x0058, 0x1344},
+ .band2 = {0x2DA6, 0xC013, 0x3CF1, 0x02FF, 0x138B},
+ .band3 = {0x18F1, 0xC08E, 0x244D, 0x0863, 0x34B5},
+ .band4 = {0x2BE0, 0xF385, 0x04FD, 0x3EC5, 0x3FCE},
+ .band5 = {0x7FEF, 0x4000, 0x0BAB, 0x0000, 0x3EED},
+ },
+};
+
+
+static struct max98088_pdata enterprise_max98088_pdata = {
+ /* equalizer configuration */
+ .eq_cfg = max98088_eq_cfg,
+ .eq_cfgcnt = ARRAY_SIZE(max98088_eq_cfg),
+
+ /* debounce time */
+ .debounce_time_ms = 200,
+
+ /* microphone configuration */
+ .digmic_left_mode = 1,
+ .digmic_right_mode = 1,
+
+ /* receiver output configuration */
+ .receiver_mode = 0, /* 0 = amplifier, 1 = line output */
+};
+
+static struct pn544_i2c_platform_data nfc_pdata = {
+ .irq_gpio = TEGRA_GPIO_PS4,
+ .ven_gpio = TEGRA_GPIO_PM6,
+ .firm_gpio = 0,
+};
+
+
+static struct i2c_board_info __initdata max98088_board_info = {
+ I2C_BOARD_INFO("max98088", 0x10),
+ .platform_data = &enterprise_max98088_pdata,
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_HP_DET),
+};
+
+static struct i2c_board_info __initdata nfc_board_info = {
+ I2C_BOARD_INFO("pn544", 0x28),
+ .platform_data = &nfc_pdata,
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS4),
+};
+
+static void enterprise_i2c_init(void)
+{
+ tegra_i2c_device1.dev.platform_data = &enterprise_i2c1_platform_data;
+ tegra_i2c_device2.dev.platform_data = &enterprise_i2c2_platform_data;
+ tegra_i2c_device3.dev.platform_data = &enterprise_i2c3_platform_data;
+ tegra_i2c_device4.dev.platform_data = &enterprise_i2c4_platform_data;
+ tegra_i2c_device5.dev.platform_data = &enterprise_i2c5_platform_data;
+
+ platform_device_register(&tegra_i2c_device5);
+ platform_device_register(&tegra_i2c_device4);
+ platform_device_register(&tegra_i2c_device3);
+ platform_device_register(&tegra_i2c_device2);
+ platform_device_register(&tegra_i2c_device1);
+
+ i2c_register_board_info(0, &max98088_board_info, 1);
+ i2c_register_board_info(0, &nfc_board_info, 1);
+}
+
+static struct platform_device *enterprise_uart_devices[] __initdata = {
+ &tegra_uarta_device,
+ &tegra_uartb_device,
+ &tegra_uartc_device,
+ &tegra_uartd_device,
+ &tegra_uarte_device,
+};
+
+static struct uart_clk_parent uart_parent_clk[] = {
+ [0] = {.name = "clk_m"},
+ [1] = {.name = "pll_p"},
+#ifndef CONFIG_TEGRA_PLLM_RESTRICTED
+ [2] = {.name = "pll_m"},
+#endif
+};
+static struct tegra_uart_platform_data enterprise_uart_pdata;
+
+static void __init uart_debug_init(void)
+{
+ unsigned long rate;
+ struct clk *c;
+
+ /* UARTD is the debug port. */
+ pr_info("Selecting UARTD as the debug console\n");
+ enterprise_uart_devices[3] = &debug_uartd_device;
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uartd_device.dev.platform_data))->mapbase;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uartd");
+
+ /* Clock enable for the debug channel */
+ if (!IS_ERR_OR_NULL(debug_uart_clk)) {
+ rate = ((struct plat_serial8250_port *)(
+ debug_uartd_device.dev.platform_data))->uartclk;
+ pr_info("The debug console clock name is %s\n",
+ debug_uart_clk->name);
+ c = tegra_get_clock_by_name("pll_p");
+ if (IS_ERR_OR_NULL(c))
+ pr_err("Not getting the parent clock pll_p\n");
+ else
+ clk_set_parent(debug_uart_clk, c);
+
+ clk_enable(debug_uart_clk);
+ clk_set_rate(debug_uart_clk, rate);
+ } else {
+ pr_err("Not getting the clock %s for debug console\n",
+ debug_uart_clk->name);
+ }
+}
+
+static void __init enterprise_uart_init(void)
+{
+ int i;
+ struct clk *c;
+
+ for (i = 0; i < ARRAY_SIZE(uart_parent_clk); ++i) {
+ c = tegra_get_clock_by_name(uart_parent_clk[i].name);
+ if (IS_ERR_OR_NULL(c)) {
+ pr_err("Not able to get the clock for %s\n",
+ uart_parent_clk[i].name);
+ continue;
+ }
+ uart_parent_clk[i].parent_clk = c;
+ uart_parent_clk[i].fixed_clk_rate = clk_get_rate(c);
+ }
+ enterprise_uart_pdata.parent_clk_list = uart_parent_clk;
+ enterprise_uart_pdata.parent_clk_count = ARRAY_SIZE(uart_parent_clk);
+ tegra_uarta_device.dev.platform_data = &enterprise_uart_pdata;
+ tegra_uartb_device.dev.platform_data = &enterprise_uart_pdata;
+ tegra_uartc_device.dev.platform_data = &enterprise_uart_pdata;
+ tegra_uartd_device.dev.platform_data = &enterprise_uart_pdata;
+ tegra_uarte_device.dev.platform_data = &enterprise_uart_pdata;
+
+ /* Register low speed only if it is selected */
+ if (!is_tegra_debug_uartport_hs())
+ uart_debug_init();
+
+ platform_add_devices(enterprise_uart_devices,
+ ARRAY_SIZE(enterprise_uart_devices));
+}
+
+
+
+static struct resource tegra_rtc_resources[] = {
+ [0] = {
+ .start = TEGRA_RTC_BASE,
+ .end = TEGRA_RTC_BASE + TEGRA_RTC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_RTC,
+ .end = INT_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tegra_rtc_device = {
+ .name = "tegra_rtc",
+ .id = -1,
+ .resource = tegra_rtc_resources,
+ .num_resources = ARRAY_SIZE(tegra_rtc_resources),
+};
+
+static struct platform_device tegra_camera = {
+ .name = "tegra_camera",
+ .id = -1,
+};
+
+static struct tegra_max98088_platform_data enterprise_audio_pdata = {
+ .gpio_spkr_en = -1,
+ .gpio_hp_det = TEGRA_GPIO_HP_DET,
+ .gpio_hp_mute = -1,
+ .gpio_int_mic_en = -1,
+ .gpio_ext_mic_en = -1,
+ .audio_port_id = {
+ [HIFI_CODEC] = 0,
+ [BASEBAND] = 2,
+ [BT_SCO] = 3,
+ },
+ .baseband_param = {
+ .rate = 8000,
+ .channels = 1,
+ },
+};
+
+static struct platform_device enterprise_audio_device = {
+ .name = "tegra-snd-max98088",
+ .id = 0,
+ .dev = {
+ .platform_data = &enterprise_audio_pdata,
+ },
+};
+
+static struct resource ram_console_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ram_console_device = {
+ .name = "ram_console",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ram_console_resources),
+ .resource = ram_console_resources,
+};
+
+static struct platform_device *enterprise_devices[] __initdata = {
+ &tegra_pmu_device,
+ &tegra_rtc_device,
+ &tegra_udc_device,
+#if defined(CONFIG_TEGRA_IOVMM_SMMU)
+ &tegra_smmu_device,
+#endif
+ &tegra_wdt_device,
+#if defined(CONFIG_TEGRA_AVP)
+ &tegra_avp_device,
+#endif
+ &tegra_camera,
+ &enterprise_bcm4329_rfkill_device,
+ &tegra_spi_device4,
+ &tegra_hda_device,
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_SE)
+ &tegra_se_device,
+#endif
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_AES)
+ &tegra_aes_device,
+#endif
+ &ram_console_device,
+};
+
+#define MXT_CONFIG_CRC 0x62F903
+/*
+ * Config converted from memory-mapped cfg-file with
+ * following version information:
+ *
+ *
+ *
+ * FAMILY_ID=128
+ * VARIANT=1
+ * VERSION=32
+ * BUILD=170
+ * VENDOR_ID=255
+ * PRODUCT_ID=TBD
+ * CHECKSUM=0xC189B6
+ *
+ *
+ */
+
+static const u8 config[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0x32, 0x0A, 0x00, 0x05, 0x01, 0x00,
+ 0x00, 0x1E, 0x0A, 0x8B, 0x00, 0x00, 0x13, 0x0B,
+ 0x00, 0x10, 0x32, 0x03, 0x03, 0x00, 0x03, 0x01,
+ 0x00, 0x0A, 0x0A, 0x0A, 0x0A, 0xBF, 0x03, 0x1B,
+ 0x02, 0x00, 0x00, 0x37, 0x37, 0x00, 0x00, 0x00,
+ 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xA9, 0x7F, 0x9A, 0x0E, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x23, 0x00, 0x00, 0x00, 0x0A,
+ 0x0F, 0x14, 0x19, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x08, 0x10,
+ 0x00
+};
+
+static struct mxt_platform_data atmel_mxt_info = {
+ .x_line = 19,
+ .y_line = 11,
+ .x_size = 960,
+ .y_size = 540,
+ .blen = 0x10,
+ .threshold = 0x32,
+ .voltage = 3300000, /* 3.3V */
+ .orient = 3,
+ .config = config,
+ .config_length = 168,
+ .config_crc = MXT_CONFIG_CRC,
+ .irqflags = IRQF_TRIGGER_FALLING,
+/* .read_chg = &read_chg, */
+ .read_chg = NULL,
+};
+
+static struct i2c_board_info __initdata atmel_i2c_info[] = {
+ {
+ I2C_BOARD_INFO("atmel_mxt_ts", MXT224_I2C_ADDR1),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PH6),
+ .platform_data = &atmel_mxt_info,
+ }
+};
+
+static int __init enterprise_touch_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PH6);
+ tegra_gpio_enable(TEGRA_GPIO_PF5);
+
+ gpio_request(TEGRA_GPIO_PH6, "atmel-irq");
+ gpio_direction_input(TEGRA_GPIO_PH6);
+
+ gpio_request(TEGRA_GPIO_PF5, "atmel-reset");
+ gpio_direction_output(TEGRA_GPIO_PF5, 0);
+ msleep(1);
+ gpio_set_value(TEGRA_GPIO_PF5, 1);
+ msleep(100);
+
+ i2c_register_board_info(1, atmel_i2c_info, 1);
+
+ return 0;
+}
+
+static struct usb_phy_plat_data tegra_usb_phy_pdata[] = {
+ [0] = {
+ .instance = 0,
+ .vbus_gpio = -1,
+ .vbus_reg_supply = "usb_vbus",
+ .vbus_irq = ENT_TPS80031_IRQ_BASE +
+ TPS80031_INT_VBUS_DET,
+ },
+ [1] = {
+ .instance = 1,
+ .vbus_gpio = -1,
+ },
+ [2] = {
+ .instance = 2,
+ .vbus_gpio = -1,
+ },
+};
+
+static struct tegra_uhsic_config uhsic_phy_config = {
+ .enable_gpio = -1,
+ .reset_gpio = -1,
+ .sync_start_delay = 9,
+ .idle_wait_delay = 17,
+ .term_range_adj = 0,
+ .elastic_underrun_limit = 16,
+ .elastic_overrun_limit = 16,
+};
+
+static struct tegra_ehci_platform_data tegra_ehci_uhsic_pdata = {
+ .phy_type = TEGRA_USB_PHY_TYPE_HSIC,
+ .phy_config = &uhsic_phy_config,
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+};
+
+static struct tegra_ehci_platform_data tegra_ehci_pdata[] = {
+ [0] = {
+ .phy_config = &utmi_phy_config[0],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+ [1] = {
+ .phy_config = &utmi_phy_config[1],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+ [2] = {
+ .phy_config = &utmi_phy_config[2],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+};
+
+static struct tegra_otg_platform_data tegra_otg_pdata = {
+ .ehci_device = &tegra_ehci1_device,
+ .ehci_pdata = &tegra_ehci_pdata[0],
+};
+
+struct platform_device *tegra_usb_hsic_host_register(void)
+{
+ struct platform_device *pdev;
+ void *platform_data;
+ int val;
+
+ pdev = platform_device_alloc(tegra_ehci2_device.name,
+ tegra_ehci2_device.id);
+ if (!pdev)
+ return NULL;
+
+ val = platform_device_add_resources(pdev, tegra_ehci2_device.resource,
+ tegra_ehci2_device.num_resources);
+ if (val)
+ goto error;
+
+ pdev->dev.dma_mask = tegra_ehci2_device.dev.dma_mask;
+ pdev->dev.coherent_dma_mask = tegra_ehci2_device.dev.coherent_dma_mask;
+
+ platform_data = kmalloc(sizeof(struct tegra_ehci_platform_data),
+ GFP_KERNEL);
+ if (!platform_data)
+ goto error;
+
+ memcpy(platform_data, &tegra_ehci_uhsic_pdata,
+ sizeof(struct tegra_ehci_platform_data));
+ pdev->dev.platform_data = platform_data;
+
+ val = platform_device_add(pdev);
+ if (val)
+ goto error_add;
+
+ return pdev;
+
+error_add:
+ kfree(platform_data);
+error:
+ pr_err("%s: failed to add the host contoller device\n", __func__);
+ platform_device_put(pdev);
+ return NULL;
+}
+
+void tegra_usb_hsic_host_unregister(struct platform_device *pdev)
+{
+ platform_device_unregister(pdev);
+}
+
+static int enterprise_usb_hsic_postsupend(void)
+{
+ pr_debug("%s\n", __func__);
+#ifdef CONFIG_TEGRA_BB_XMM_POWER
+ baseband_xmm_set_power_status(BBXMM_PS_L2);
+#endif
+ return 0;
+}
+
+static int enterprise_usb_hsic_preresume(void)
+{
+ pr_debug("%s\n", __func__);
+#ifdef CONFIG_TEGRA_BB_XMM_POWER
+ baseband_xmm_set_power_status(BBXMM_PS_L2TOL0);
+#endif
+ return 0;
+}
+
+static int enterprise_usb_hsic_phy_ready(void)
+{
+ pr_debug("%s\n", __func__);
+#ifdef CONFIG_TEGRA_BB_XMM_POWER
+ baseband_xmm_set_power_status(BBXMM_PS_L0);
+#endif
+ return 0;
+}
+
+static int enterprise_usb_hsic_phy_off(void)
+{
+ pr_debug("%s\n", __func__);
+#ifdef CONFIG_TEGRA_BB_XMM_POWER
+ baseband_xmm_set_power_status(BBXMM_PS_L3);
+#endif
+ return 0;
+}
+
+static void enterprise_usb_init(void)
+{
+ struct fsl_usb2_platform_data *udc_pdata;
+
+ tegra_usb_phy_init(tegra_usb_phy_pdata, ARRAY_SIZE(tegra_usb_phy_pdata));
+
+ tegra_otg_device.dev.platform_data = &tegra_otg_pdata;
+ platform_device_register(&tegra_otg_device);
+
+ udc_pdata = tegra_udc_device.dev.platform_data;
+}
+
+static struct platform_device *enterprise_audio_devices[] __initdata = {
+ &tegra_ahub_device,
+ &tegra_dam_device0,
+ &tegra_dam_device1,
+ &tegra_dam_device2,
+ &tegra_i2s_device2,
+ &tegra_i2s_device3,
+ &tegra_spdif_device,
+ &spdif_dit_device,
+ &bluetooth_dit_device,
+ &baseband_dit_device,
+ &tegra_pcm_device,
+ &enterprise_audio_device,
+};
+
+static void enterprise_audio_init(void)
+{
+ struct board_info board_info;
+
+ tegra_get_board_info(&board_info);
+ if (board_info.board_id == BOARD_E1197) {
+ platform_device_register(&tegra_i2s_device1);
+ enterprise_audio_pdata.audio_port_id[HIFI_CODEC] = 1;
+ } else
+ platform_device_register(&tegra_i2s_device0);
+
+ platform_add_devices(enterprise_audio_devices,
+ ARRAY_SIZE(enterprise_audio_devices));
+}
+
+static void enterprise_gps_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PE4);
+ tegra_gpio_enable(TEGRA_GPIO_PE5);
+}
+
+static struct baseband_power_platform_data tegra_baseband_power_data = {
+ .baseband_type = BASEBAND_XMM,
+ .modem = {
+ .xmm = {
+ .bb_rst = XMM_GPIO_BB_RST,
+ .bb_on = XMM_GPIO_BB_ON,
+ .ipc_bb_wake = XMM_GPIO_IPC_BB_WAKE,
+ .ipc_ap_wake = XMM_GPIO_IPC_AP_WAKE,
+ .ipc_hsic_active = XMM_GPIO_IPC_HSIC_ACTIVE,
+ .ipc_hsic_sus_req = XMM_GPIO_IPC_HSIC_SUS_REQ,
+ },
+ },
+};
+
+static struct platform_device tegra_baseband_power_device = {
+ .name = "baseband_xmm_power",
+ .id = -1,
+ .dev = {
+ .platform_data = &tegra_baseband_power_data,
+ },
+};
+
+static struct platform_device tegra_baseband_power2_device = {
+ .name = "baseband_xmm_power2",
+ .id = -1,
+ .dev = {
+ .platform_data = &tegra_baseband_power_data,
+ },
+};
+
+#ifdef CONFIG_TEGRA_BB_M7400
+static union tegra_bb_gpio_id m7400_gpio_id = {
+ .m7400 = {
+ .pwr_status = GPIO_BB_RESET,
+ .pwr_on = GPIO_BB_PWRON,
+ .uart_awr = GPIO_BB_APACK,
+ .uart_cwr = GPIO_BB_CPACK,
+ .usb_awr = GPIO_BB_APACK2,
+ .usb_cwr = GPIO_BB_CPACK2,
+ .service = GPIO_BB_RSVD2,
+ .resout2 = GPIO_BB_RSVD1,
+ },
+};
+
+static struct tegra_bb_pdata m7400_pdata = {
+ .id = &m7400_gpio_id,
+ .device = &tegra_ehci2_device,
+ .ehci_register = tegra_usb_hsic_host_register,
+ .ehci_unregister = tegra_usb_hsic_host_unregister,
+ .bb_id = TEGRA_BB_M7400,
+};
+
+static struct platform_device tegra_baseband_m7400_device = {
+ .name = "tegra_baseband_power",
+ .id = -1,
+ .dev = {
+ .platform_data = &m7400_pdata,
+ },
+};
+#endif
+
+static void enterprise_baseband_init(void)
+{
+ int modem_id = tegra_get_modem_id();
+
+ switch (modem_id) {
+ case TEGRA_BB_PH450: /* PH450 ULPI */
+ enterprise_modem_init();
+ break;
+ case TEGRA_BB_XMM6260: /* XMM6260 HSIC */
+ /* xmm baseband - do not switch off phy during suspend */
+ tegra_ehci_uhsic_pdata.power_down_on_bus_suspend = 0;
+ uhsic_phy_config.postsuspend = enterprise_usb_hsic_postsupend;
+ uhsic_phy_config.preresume = enterprise_usb_hsic_preresume;
+ uhsic_phy_config.usb_phy_ready = enterprise_usb_hsic_phy_ready;
+ uhsic_phy_config.post_phy_off = enterprise_usb_hsic_phy_off;
+ /* enable XMM6260 baseband gpio(s) */
+ tegra_gpio_enable(tegra_baseband_power_data.modem.generic
+ .mdm_reset);
+ tegra_gpio_enable(tegra_baseband_power_data.modem.generic
+ .mdm_on);
+ tegra_gpio_enable(tegra_baseband_power_data.modem.generic
+ .ap2mdm_ack);
+ tegra_gpio_enable(tegra_baseband_power_data.modem.generic
+ .mdm2ap_ack);
+ tegra_gpio_enable(tegra_baseband_power_data.modem.generic
+ .ap2mdm_ack2);
+ tegra_gpio_enable(tegra_baseband_power_data.modem.generic
+ .mdm2ap_ack2);
+ tegra_baseband_power_data.hsic_register =
+ &tegra_usb_hsic_host_register;
+ tegra_baseband_power_data.hsic_unregister =
+ &tegra_usb_hsic_host_unregister;
+ platform_device_register(&tegra_baseband_power_device);
+ platform_device_register(&tegra_baseband_power2_device);
+ break;
+#ifdef CONFIG_TEGRA_BB_M7400
+ case TEGRA_BB_M7400: /* M7400 HSIC */
+ tegra_ehci2_device.dev.platform_data
+ = &tegra_ehci_uhsic_pdata;
+ platform_device_register(&tegra_baseband_m7400_device);
+ break;
+#endif
+ }
+}
+
+static void enterprise_nfc_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PS4);
+ tegra_gpio_enable(TEGRA_GPIO_PM6);
+}
+
+static void __init tegra_enterprise_init(void)
+{
+ tegra_thermal_init(&thermal_data);
+ tegra_clk_init_from_table(enterprise_clk_init_table);
+ enterprise_pinmux_init();
+ enterprise_i2c_init();
+ enterprise_uart_init();
+ enterprise_usb_init();
+ enterprise_tsensor_init();
+ platform_add_devices(enterprise_devices, ARRAY_SIZE(enterprise_devices));
+ enterprise_regulator_init();
+ enterprise_sdhci_init();
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ enterprise_edp_init();
+#endif
+ enterprise_kbc_init();
+ enterprise_touch_init();
+ enterprise_audio_init();
+ enterprise_gps_init();
+ enterprise_baseband_init();
+ enterprise_panel_init();
+ enterprise_setup_bluesleep();
+ enterprise_emc_init();
+ enterprise_sensors_init();
+ enterprise_suspend_init();
+ enterprise_bpc_mgmt_init();
+ tegra_release_bootloader_fb();
+ enterprise_nfc_init();
+}
+
+static void __init tegra_enterprise_ramconsole_reserve(unsigned long size)
+{
+ struct resource *res;
+ long ret;
+
+ res = platform_get_resource(&ram_console_device, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("Failed to find memory resource for ram console\n");
+ return;
+ }
+ res->start = memblock_end_of_DRAM() - size;
+ res->end = res->start + size - 1;
+ ret = memblock_remove(res->start, size);
+ if (ret) {
+ ram_console_device.resource = NULL;
+ ram_console_device.num_resources = 0;
+ pr_err("Failed to reserve memory block for ram console\n");
+ }
+}
+
+static void __init tegra_enterprise_reserve(void)
+{
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM)
+ tegra_reserve(0, SZ_4M, SZ_8M);
+#else
+ tegra_reserve(SZ_128M, SZ_4M, SZ_8M);
+#endif
+ tegra_enterprise_ramconsole_reserve(SZ_1M);
+}
+
+MACHINE_START(TEGRA_ENTERPRISE, "tegra_enterprise")
+ .boot_params = 0x80000100,
+ .map_io = tegra_map_common_io,
+ .reserve = tegra_enterprise_reserve,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_enterprise_init,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-enterprise.h b/arch/arm/mach-tegra/board-enterprise.h
new file mode 100644
index 000000000000..4829e301c8c8
--- /dev/null
+++ b/arch/arm/mach-tegra/board-enterprise.h
@@ -0,0 +1,158 @@
+/*
+ * arch/arm/mach-tegra/board-enterprise.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MACH_TEGRA_BOARD_ENTERPRISE_H
+#define _MACH_TEGRA_BOARD_ENTERPRISE_H
+
+#include <mach/gpio.h>
+#include <mach/irqs.h>
+#include <linux/mfd/tps80031.h>
+
+/* Processor Board ID */
+#define BOARD_E1205 0x0C05
+#define BOARD_E1197 0x0B61
+#define SKU_BATTERY_SUPPORT 0x1
+
+/* Board Fab version */
+#define BOARD_FAB_A00 0x0
+#define BOARD_FAB_A01 0x1
+#define BOARD_FAB_A02 0x2
+
+/* vdd_cpu voltage follower */
+#define BOARD_SKU_VF_BIT 0x0400
+
+int enterprise_charge_init(void);
+int enterprise_sdhci_init(void);
+int enterprise_pinmux_init(void);
+int enterprise_panel_init(void);
+int enterprise_sensors_init(void);
+int touch_init(void);
+int enterprise_kbc_init(void);
+int enterprise_emc_init(void);
+int enterprise_regulator_init(void);
+int enterprise_modem_init(void);
+int enterprise_suspend_init(void);
+int enterprise_edp_init(void);
+void __init enterprise_tsensor_init(void);
+void enterprise_bpc_mgmt_init(void);
+
+/* Invensense MPU Definitions */
+#define MPU_GYRO_NAME "mpu3050"
+#define MPU_GYRO_IRQ_GPIO TEGRA_GPIO_PH4
+#define MPU_GYRO_ADDR 0x68
+#define MPU_GYRO_BUS_NUM 0
+#define MPU_GYRO_ORIENTATION { -1, 0, 0, 0, -1, 0, 0, 0, 1 }
+#define MPU_ACCEL_NAME "kxtf9"
+#define MPU_ACCEL_IRQ_GPIO 0 /* DISABLE ACCELIRQ: TEGRA_GPIO_PJ2 */
+#define MPU_ACCEL_ADDR 0x0F
+#define MPU_ACCEL_BUS_NUM 0
+#define MPU_ACCEL_ORIENTATION { 0, 1, 0, -1, 0, 0, 0, 0, 1 }
+#define MPU_COMPASS_NAME "ak8975"
+#define MPU_COMPASS_IRQ_GPIO 0
+#define MPU_COMPASS_ADDR 0x0C
+#define MPU_COMPASS_BUS_NUM 0
+#define MPU_COMPASS_ORIENTATION { 0, 1, 0, -1, 0, 0, 0, 0, 1 }
+
+/* PCA954x I2C bus expander bus addresses */
+#define PCA954x_I2C_BUS_BASE 6
+#define PCA954x_I2C_BUS0 (PCA954x_I2C_BUS_BASE + 0)
+#define PCA954x_I2C_BUS1 (PCA954x_I2C_BUS_BASE + 1)
+#define PCA954x_I2C_BUS2 (PCA954x_I2C_BUS_BASE + 2)
+#define PCA954x_I2C_BUS3 (PCA954x_I2C_BUS_BASE + 3)
+
+/*****************External GPIO tables ******************/
+/* External peripheral gpio base. */
+#define ENT_TPS80031_GPIO_BASE TEGRA_NR_GPIOS
+#define ENT_TPS80031_GPIO_REGEN1 (ENT_TPS80031_GPIO_BASE + TPS80031_GPIO_REGEN1)
+#define ENT_TPS80031_GPIO_REGEN2 (ENT_TPS80031_GPIO_BASE + TPS80031_GPIO_REGEN2)
+#define ENT_TPS80031_GPIO_SYSEN (ENT_TPS80031_GPIO_BASE + TPS80031_GPIO_SYSEN)
+#define ENT_TPS80031_GPIO_END (ENT_TPS80031_GPIO_BASE + TPS80031_GPIO_NR)
+
+/*****************External Interrupt tables ******************/
+/* External peripheral irq base */
+#define ENT_TPS80031_IRQ_BASE TEGRA_NR_IRQS
+#define ENT_TPS80031_IRQ_END (ENT_TPS80031_IRQ_BASE + TPS80031_INT_NR)
+
+/*****************Camera GPIOs ******************/
+#define CAM_CSI_MUX_SEL_GPIO TEGRA_GPIO_PM3
+#define CAM_CSI_MUX_SEL_REAR 1
+#define CAM_CSI_MUX_SEL_FRONT 0
+
+#define CAM1_RST_L_GPIO TEGRA_GPIO_PM5 /*REAR RIGHT*/
+#define CAM2_RST_L_GPIO TEGRA_GPIO_PF4 /*REAR LEFT*/
+#define CAM3_RST_L_GPIO TEGRA_GPIO_PM2 /*FRONT*/
+#define CAM3_RST_L_TRUE 0
+#define CAM3_RST_L_FALSE 1
+#define CAM3_PWDN_GPIO TEGRA_GPIO_PN4 /*FRONT*/
+#define CAM3_PWDN_TRUE 1
+#define CAM3_PWDN_FALSE 0
+#define CAM_FLASH_EN_GPIO TEGRA_GPIO_PBB3
+#define CAM_FLASH_MAX_TORCH_AMP 7
+#define CAM_FLASH_MAX_FLASH_AMP 7
+#define CAM_I2C_MUX_RST_EXP TEGRA_GPIO_PF3 /*I2C Mux Reset*/
+
+/* Audio-related GPIOs */
+#define TEGRA_GPIO_HP_DET TEGRA_GPIO_PW3
+
+/* Baseband GPIO addresses */
+
+#define GPIO_BB_RESET TEGRA_GPIO_PE1
+#define GPIO_BB_PWRON TEGRA_GPIO_PE0
+#define GPIO_BB_APACK TEGRA_GPIO_PE3
+#define GPIO_BB_APACK2 TEGRA_GPIO_PE2
+#define GPIO_BB_CPACK TEGRA_GPIO_PU5
+#define GPIO_BB_CPACK2 TEGRA_GPIO_PV0
+#define GPIO_BB_RSVD1 TEGRA_GPIO_PV1
+#define GPIO_BB_RSVD2 TEGRA_GPIO_PU4
+
+#define BB_GPIO_MDM_PWRON_AP2BB TEGRA_GPIO_PE0 /* LCD_D0 */
+#define BB_GPIO_RESET_AP2BB TEGRA_GPIO_PE1 /* LCD_D1 */
+#define BB_GPIO_LCD_PWR1 TEGRA_GPIO_PC1
+#define BB_GPIO_LCD_PWR2 TEGRA_GPIO_PC6
+#define BB_GPIO_HS1_AP2BB TEGRA_GPIO_PE3 /* LCD_D3 */
+#define BB_GPIO_HS1_BB2AP TEGRA_GPIO_PU5
+
+#define XMM_GPIO_BB_ON BB_GPIO_MDM_PWRON_AP2BB
+#define XMM_GPIO_BB_RST BB_GPIO_RESET_AP2BB
+#define XMM_GPIO_IPC_HSIC_ACTIVE BB_GPIO_LCD_PWR1
+#define XMM_GPIO_IPC_HSIC_SUS_REQ BB_GPIO_LCD_PWR2
+#define XMM_GPIO_IPC_BB_WAKE BB_GPIO_HS1_AP2BB
+#define XMM_GPIO_IPC_AP_WAKE BB_GPIO_HS1_BB2AP
+
+#define TDIODE_OFFSET (9000) /* in millicelsius */
+
+/* Battery Peak Current Management */
+#define TEGRA_BPC_TRIGGER TEGRA_GPIO_PR3
+#define TEGRA_BPC_TIMEOUT 100 /* ms */
+#define TEGRA_BPC_CPU_PWR_LIMIT 0 /* in mW, (0 disables) */
+
+#define TEGRA_CUR_MON_THRESHOLD -2000
+#define TEGRA_CUR_MON_RESISTOR 20
+#define TEGRA_CUR_MON_MIN_CORES 2
+
+/* Baseband IDs */
+
+enum tegra_bb_type {
+ TEGRA_BB_PH450 = 1,
+ TEGRA_BB_XMM6260,
+ TEGRA_BB_M7400,
+};
+
+#endif /*_MACH_TEGRA_BOARD_ENTERPRISE_H */
diff --git a/arch/arm/mach-tegra/board-harmony-kbc.c b/arch/arm/mach-tegra/board-harmony-kbc.c
new file mode 100644
index 000000000000..156da22bfece
--- /dev/null
+++ b/arch/arm/mach-tegra/board-harmony-kbc.c
@@ -0,0 +1,375 @@
+/*
+ * arch/arm/mach-tegra/board-harmony-kbc.c
+ * Keys configuration for Nvidia tegra2 harmony platform.
+ *
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/kbc.h>
+
+#include "board.h"
+#include "board-harmony.h"
+#include "devices.h"
+
+#define HARMONY_ROW_COUNT 16
+#define HARMONY_COL_COUNT 8
+
+static const u32 kbd_keymap[] = {
+ KEY(0, 0, KEY_RESERVED),
+ KEY(0, 1, KEY_RESERVED),
+ KEY(0, 2, KEY_W),
+ KEY(0, 3, KEY_S),
+ KEY(0, 4, KEY_A),
+ KEY(0, 5, KEY_Z),
+ KEY(0, 6, KEY_RESERVED),
+ KEY(0, 7, KEY_FN),
+
+ KEY(1, 0, KEY_RESERVED),
+ KEY(1, 1, KEY_RESERVED),
+ KEY(1, 2, KEY_RESERVED),
+ KEY(1, 3, KEY_RESERVED),
+ KEY(1, 4, KEY_RESERVED),
+ KEY(1, 5, KEY_RESERVED),
+ KEY(1, 6, KEY_RESERVED),
+ KEY(1, 7, KEY_MENU),
+
+ KEY(2, 0, KEY_RESERVED),
+ KEY(2, 1, KEY_RESERVED),
+ KEY(2, 2, KEY_RESERVED),
+ KEY(2, 3, KEY_RESERVED),
+ KEY(2, 4, KEY_RESERVED),
+ KEY(2, 5, KEY_RESERVED),
+ KEY(2, 6, KEY_LEFTALT),
+ KEY(2, 7, KEY_RIGHTALT),
+
+ KEY(3, 0, KEY_5),
+ KEY(3, 1, KEY_4),
+ KEY(3, 2, KEY_R),
+ KEY(3, 3, KEY_E),
+ KEY(3, 4, KEY_F),
+ KEY(3, 5, KEY_D),
+ KEY(3, 6, KEY_X),
+ KEY(3, 7, KEY_RESERVED),
+
+ KEY(4, 0, KEY_7),
+ KEY(4, 1, KEY_6),
+ KEY(4, 2, KEY_T),
+ KEY(4, 3, KEY_H),
+ KEY(4, 4, KEY_G),
+ KEY(4, 5, KEY_V),
+ KEY(4, 6, KEY_C),
+ KEY(4, 7, KEY_SPACE),
+
+ KEY(5, 0, KEY_9),
+ KEY(5, 1, KEY_8),
+ KEY(5, 2, KEY_U),
+ KEY(5, 3, KEY_Y),
+ KEY(5, 4, KEY_J),
+ KEY(5, 5, KEY_N),
+ KEY(5, 6, KEY_B),
+ KEY(5, 7, KEY_BACKSLASH),
+
+ KEY(6, 0, KEY_MINUS),
+ KEY(6, 1, KEY_0),
+ KEY(6, 2, KEY_O),
+ KEY(6, 3, KEY_I),
+ KEY(6, 4, KEY_L),
+ KEY(6, 5, KEY_K),
+ KEY(6, 6, KEY_COMMA),
+ KEY(6, 7, KEY_M),
+
+ KEY(7, 0, KEY_RESERVED),
+ KEY(7, 1, KEY_EQUAL),
+ KEY(7, 2, KEY_RIGHTBRACE),
+ KEY(7, 3, KEY_ENTER),
+ KEY(7, 4, KEY_RESERVED),
+ KEY(7, 5, KEY_RESERVED),
+ KEY(7, 6, KEY_RESERVED),
+ KEY(7, 7, KEY_MENU),
+
+ KEY(8, 0, KEY_RESERVED),
+ KEY(8, 1, KEY_RESERVED),
+ KEY(8, 2, KEY_RESERVED),
+ KEY(8, 3, KEY_RESERVED),
+ KEY(8, 4, KEY_LEFTSHIFT),
+ KEY(8, 5, KEY_RIGHTSHIFT),
+ KEY(8, 6, KEY_RESERVED),
+ KEY(8, 7, KEY_RESERVED),
+
+ KEY(9, 0, KEY_RESERVED),
+ KEY(9, 1, KEY_RESERVED),
+ KEY(9, 2, KEY_RESERVED),
+ KEY(9, 3, KEY_RESERVED),
+ KEY(9, 4, KEY_RESERVED),
+ KEY(9, 5, KEY_LEFTCTRL),
+ KEY(9, 6, KEY_RESERVED),
+ KEY(9, 7, KEY_RIGHTCTRL),
+
+ KEY(10, 0, KEY_RESERVED),
+ KEY(10, 1, KEY_RESERVED),
+ KEY(10, 2, KEY_RESERVED),
+ KEY(10, 3, KEY_RESERVED),
+ KEY(10, 4, KEY_RESERVED),
+ KEY(10, 5, KEY_RESERVED),
+ KEY(10, 6, KEY_RESERVED),
+ KEY(10, 7, KEY_RESERVED),
+
+ KEY(11, 0, KEY_LEFTBRACE),
+ KEY(11, 1, KEY_P),
+ KEY(11, 2, KEY_APOSTROPHE),
+ KEY(11, 3, KEY_SEMICOLON),
+ KEY(11, 4, KEY_SLASH),
+ KEY(11, 5, KEY_DOT),
+ KEY(11, 6, KEY_RESERVED),
+ KEY(11, 7, KEY_RESERVED),
+
+ KEY(12, 0, KEY_F10),
+ KEY(12, 1, KEY_F9),
+ KEY(12, 2, KEY_BACKSPACE),
+ KEY(12, 3, KEY_3),
+ KEY(12, 4, KEY_2),
+ KEY(12, 5, KEY_UP),
+ KEY(12, 6, KEY_PRINT),
+ KEY(12, 7, KEY_PAUSE),
+
+ KEY(13, 0, KEY_INSERT),
+ KEY(13, 1, KEY_DELETE),
+ KEY(13, 2, KEY_RESERVED),
+ KEY(13, 3, KEY_PAGEUP),
+ KEY(13, 4, KEY_PAGEDOWN),
+ KEY(13, 5, KEY_RIGHT),
+ KEY(13, 6, KEY_DOWN),
+ KEY(13, 7, KEY_LEFT),
+
+ KEY(14, 0, KEY_F11),
+ KEY(14, 1, KEY_F12),
+ KEY(14, 2, KEY_F8),
+ KEY(14, 3, KEY_Q),
+ KEY(14, 4, KEY_F4),
+ KEY(14, 5, KEY_F3),
+ KEY(14, 6, KEY_1),
+ KEY(14, 7, KEY_F7),
+
+ KEY(15, 0, KEY_ESC),
+ KEY(15, 1, KEY_GRAVE),
+ KEY(15, 2, KEY_F5),
+ KEY(15, 3, KEY_TAB),
+ KEY(15, 4, KEY_F1),
+ KEY(15, 5, KEY_F2),
+ KEY(15, 6, KEY_CAPSLOCK),
+ KEY(15, 7, KEY_F6),
+
+ KEY(16, 0, KEY_RESERVED),
+ KEY(16, 1, KEY_RESERVED),
+ KEY(16, 2, KEY_RESERVED),
+ KEY(16, 3, KEY_RESERVED),
+ KEY(16, 4, KEY_RESERVED),
+ KEY(16, 5, KEY_RESERVED),
+ KEY(16, 6, KEY_RESERVED),
+ KEY(16, 7, KEY_RESERVED),
+
+ KEY(17, 0, KEY_RESERVED),
+ KEY(17, 1, KEY_RESERVED),
+ KEY(17, 2, KEY_RESERVED),
+ KEY(17, 3, KEY_RESERVED),
+ KEY(17, 4, KEY_RESERVED),
+ KEY(17, 5, KEY_RESERVED),
+ KEY(17, 6, KEY_RESERVED),
+ KEY(17, 7, KEY_RESERVED),
+
+ KEY(18, 0, KEY_RESERVED),
+ KEY(18, 1, KEY_RESERVED),
+ KEY(18, 2, KEY_RESERVED),
+ KEY(18, 3, KEY_RESERVED),
+ KEY(18, 4, KEY_RESERVED),
+ KEY(18, 5, KEY_RESERVED),
+ KEY(18, 6, KEY_RESERVED),
+ KEY(18, 7, KEY_RESERVED),
+
+ KEY(19, 0, KEY_RESERVED),
+ KEY(19, 1, KEY_RESERVED),
+ KEY(19, 2, KEY_RESERVED),
+ KEY(19, 3, KEY_RESERVED),
+ KEY(19, 4, KEY_RESERVED),
+ KEY(19, 5, KEY_RESERVED),
+ KEY(19, 6, KEY_RESERVED),
+ KEY(19, 7, KEY_RESERVED),
+
+ KEY(20, 0, KEY_7),
+ KEY(20, 1, KEY_RESERVED),
+ KEY(20, 2, KEY_RESERVED),
+ KEY(20, 3, KEY_RESERVED),
+ KEY(20, 4, KEY_RESERVED),
+ KEY(20, 5, KEY_RESERVED),
+ KEY(20, 6, KEY_RESERVED),
+ KEY(20, 7, KEY_RESERVED),
+
+ KEY(21, 0, KEY_9),
+ KEY(21, 1, KEY_8),
+ KEY(21, 2, KEY_4),
+ KEY(21, 3, KEY_RESERVED),
+ KEY(21, 4, KEY_1),
+ KEY(21, 5, KEY_RESERVED),
+ KEY(21, 6, KEY_RESERVED),
+ KEY(21, 7, KEY_RESERVED),
+
+ KEY(22, 0, KEY_RESERVED),
+ KEY(22, 1, KEY_SLASH),
+ KEY(22, 2, KEY_6),
+ KEY(22, 3, KEY_5),
+ KEY(22, 4, KEY_3),
+ KEY(22, 5, KEY_2),
+ KEY(22, 6, KEY_RESERVED),
+ KEY(22, 7, KEY_0),
+
+ KEY(23, 0, KEY_RESERVED),
+ KEY(23, 1, KEY_RESERVED),
+ KEY(23, 2, KEY_RESERVED),
+ KEY(23, 3, KEY_RESERVED),
+ KEY(23, 4, KEY_RESERVED),
+ KEY(23, 5, KEY_RESERVED),
+ KEY(23, 6, KEY_RESERVED),
+ KEY(23, 7, KEY_RESERVED),
+
+ KEY(24, 0, KEY_RESERVED),
+ KEY(24, 1, KEY_RESERVED),
+ KEY(24, 2, KEY_RESERVED),
+ KEY(24, 3, KEY_RESERVED),
+ KEY(24, 4, KEY_RESERVED),
+ KEY(24, 5, KEY_RESERVED),
+ KEY(24, 6, KEY_RESERVED),
+ KEY(24, 7, KEY_RESERVED),
+
+ KEY(25, 0, KEY_RESERVED),
+ KEY(25, 1, KEY_RESERVED),
+ KEY(25, 2, KEY_RESERVED),
+ KEY(25, 3, KEY_RESERVED),
+ KEY(25, 4, KEY_RESERVED),
+ KEY(25, 5, KEY_RESERVED),
+ KEY(25, 6, KEY_RESERVED),
+ KEY(25, 7, KEY_RESERVED),
+
+ KEY(26, 0, KEY_RESERVED),
+ KEY(26, 1, KEY_RESERVED),
+ KEY(26, 2, KEY_RESERVED),
+ KEY(26, 3, KEY_RESERVED),
+ KEY(26, 4, KEY_RESERVED),
+ KEY(26, 5, KEY_RESERVED),
+ KEY(26, 6, KEY_RESERVED),
+ KEY(26, 7, KEY_RESERVED),
+
+ KEY(27, 0, KEY_RESERVED),
+ KEY(27, 1, KEY_KPASTERISK),
+ KEY(27, 2, KEY_RESERVED),
+ KEY(27, 3, KEY_KPMINUS),
+ KEY(27, 4, KEY_KPPLUS),
+ KEY(27, 5, KEY_DOT),
+ KEY(27, 6, KEY_RESERVED),
+ KEY(27, 7, KEY_RESERVED),
+
+ KEY(28, 0, KEY_RESERVED),
+ KEY(28, 1, KEY_RESERVED),
+ KEY(28, 2, KEY_RESERVED),
+ KEY(28, 3, KEY_RESERVED),
+ KEY(28, 4, KEY_RESERVED),
+ KEY(28, 5, KEY_VOLUMEUP),
+ KEY(28, 6, KEY_RESERVED),
+ KEY(28, 7, KEY_RESERVED),
+
+ KEY(29, 0, KEY_RESERVED),
+ KEY(29, 1, KEY_RESERVED),
+ KEY(29, 2, KEY_RESERVED),
+ KEY(29, 3, KEY_HOME),
+ KEY(29, 4, KEY_END),
+ KEY(29, 5, KEY_BRIGHTNESSUP),
+ KEY(29, 6, KEY_VOLUMEDOWN),
+ KEY(29, 7, KEY_BRIGHTNESSDOWN),
+
+ KEY(30, 0, KEY_NUMLOCK),
+ KEY(30, 1, KEY_SCROLLLOCK),
+ KEY(30, 2, KEY_MUTE),
+ KEY(30, 3, KEY_RESERVED),
+ KEY(30, 4, KEY_RESERVED),
+ KEY(30, 5, KEY_RESERVED),
+ KEY(30, 6, KEY_RESERVED),
+ KEY(30, 7, KEY_RESERVED),
+
+ KEY(31, 0, KEY_RESERVED),
+ KEY(31, 1, KEY_RESERVED),
+ KEY(31, 2, KEY_RESERVED),
+ KEY(31, 3, KEY_RESERVED),
+ KEY(31, 4, KEY_QUESTION),
+ KEY(31, 5, KEY_RESERVED),
+ KEY(31, 6, KEY_RESERVED),
+ KEY(31, 7, KEY_RESERVED),
+};
+
+static const struct matrix_keymap_data keymap_data = {
+ .keymap = kbd_keymap,
+ .keymap_size = ARRAY_SIZE(kbd_keymap),
+};
+
+static struct tegra_kbc_wake_key harmony_wake_cfg[] = {
+ [0] = {
+ .row = 1,
+ .col = 7,
+ },
+ [1] = {
+ .row = 15,
+ .col = 0,
+ },
+};
+
+static struct tegra_kbc_platform_data harmony_kbc_platform_data = {
+ .debounce_cnt = 2,
+ .repeat_cnt = 5 * 32,
+ .wakeup = true,
+ .keymap_data = &keymap_data,
+ .use_fn_map = true,
+ .wake_cnt = 2,
+ .wake_cfg = &harmony_wake_cfg[0],
+#ifdef CONFIG_ANDROID
+ .disable_ev_rep = true,
+#endif
+};
+
+int __init harmony_kbc_init(void)
+{
+ struct tegra_kbc_platform_data *data = &harmony_kbc_platform_data;
+ int i;
+ tegra_kbc_device.dev.platform_data = &harmony_kbc_platform_data;
+ pr_info("Registering tegra-kbc\n");
+
+ BUG_ON((KBC_MAX_ROW + KBC_MAX_COL) > KBC_MAX_GPIO);
+ for (i = 0; i < KBC_MAX_ROW; i++) {
+ data->pin_cfg[i].num = i;
+ data->pin_cfg[i].is_row = true;
+ }
+
+ for (i = 0; i < KBC_MAX_COL; i++)
+ data->pin_cfg[i + KBC_MAX_ROW].num = i;
+
+ platform_device_register(&tegra_kbc_device);
+ pr_info("Registering successful tegra-kbc\n");
+ return 0;
+}
+
diff --git a/arch/arm/mach-tegra/board-harmony-panel.c b/arch/arm/mach-tegra/board-harmony-panel.c
new file mode 100644
index 000000000000..d0eecfc9917a
--- /dev/null
+++ b/arch/arm/mach-tegra/board-harmony-panel.c
@@ -0,0 +1,273 @@
+/*
+ * arch/arm/mach-tegra/board-harmony-panel.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <asm/mach-types.h>
+#include <linux/nvhost.h>
+#include <linux/gpio.h>
+#include <linux/pwm_backlight.h>
+
+#include <mach/dc.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/nvmap.h>
+#include <mach/tegra_fb.h>
+#include <mach/fb.h>
+
+#include "devices.h"
+#include "gpio-names.h"
+#include "board.h"
+
+#define harmony_bl_enb TEGRA_GPIO_PB5
+#define harmony_lvds_shutdown TEGRA_GPIO_PB2
+#define harmony_en_vdd_pnl TEGRA_GPIO_PC6
+#define harmony_bl_vdd TEGRA_GPIO_PW0
+#define harmony_bl_pwm TEGRA_GPIO_PB4
+
+/* panel power on sequence timing */
+#define harmony_pnl_to_lvds_ms 0
+#define harmony_lvds_to_bl_ms 200
+
+static int harmony_backlight_init(struct device *dev)
+{
+ int ret;
+
+ ret = gpio_request(harmony_bl_enb, "backlight_enb");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(harmony_bl_enb, 1);
+ if (ret < 0)
+ gpio_free(harmony_bl_enb);
+ else
+ tegra_gpio_enable(harmony_bl_enb);
+
+ return ret;
+}
+
+static void harmony_backlight_exit(struct device *dev)
+{
+ gpio_set_value(harmony_bl_enb, 0);
+ gpio_free(harmony_bl_enb);
+ tegra_gpio_disable(harmony_bl_enb);
+}
+
+static int harmony_backlight_notify(struct device *unused, int brightness)
+{
+ gpio_set_value(harmony_en_vdd_pnl, !!brightness);
+ gpio_set_value(harmony_lvds_shutdown, !!brightness);
+ gpio_set_value(harmony_bl_enb, !!brightness);
+ return brightness;
+}
+
+static int harmony_disp1_check_fb(struct device *dev, struct fb_info *info);
+
+static struct platform_pwm_backlight_data harmony_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = 255,
+ .dft_brightness = 224,
+ .pwm_period_ns = 5000000,
+ .init = harmony_backlight_init,
+ .exit = harmony_backlight_exit,
+ .notify = harmony_backlight_notify,
+ /* Only toggle backlight on fb blank notifications for disp1 */
+ .check_fb = harmony_disp1_check_fb,
+};
+
+static struct platform_device harmony_backlight_device = {
+ .name = "pwm-backlight",
+ .id = -1,
+ .dev = {
+ .platform_data = &harmony_backlight_data,
+ },
+};
+
+static int harmony_panel_enable(void)
+{
+ gpio_set_value(harmony_en_vdd_pnl, 1);
+ mdelay(harmony_pnl_to_lvds_ms);
+ gpio_set_value(harmony_lvds_shutdown, 1);
+ mdelay(harmony_lvds_to_bl_ms);
+ return 0;
+}
+
+static int harmony_panel_disable(void)
+{
+ gpio_set_value(harmony_lvds_shutdown, 0);
+ gpio_set_value(harmony_en_vdd_pnl, 0);
+ return 0;
+}
+
+static struct resource harmony_disp1_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_dc_mode harmony_panel_modes[] = {
+ {
+ .pclk = 42430000,
+ .h_ref_to_sync = 4,
+ .v_ref_to_sync = 2,
+ .h_sync_width = 136,
+ .v_sync_width = 4,
+ .h_back_porch = 138,
+ .v_back_porch = 21,
+ .h_active = 1024,
+ .v_active = 600,
+ .h_front_porch = 34,
+ .v_front_porch = 4,
+ },
+};
+
+static struct tegra_fb_data harmony_fb_data = {
+ .win = 0,
+ .xres = 1024,
+ .yres = 600,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_dc_out harmony_disp1_out = {
+ .type = TEGRA_DC_OUT_RGB,
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+ .depth = 18,
+ .dither = TEGRA_DC_ORDERED_DITHER,
+
+ .modes = harmony_panel_modes,
+ .n_modes = ARRAY_SIZE(harmony_panel_modes),
+
+ .enable = harmony_panel_enable,
+ .disable = harmony_panel_disable,
+};
+
+static struct tegra_dc_platform_data harmony_disp1_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &harmony_disp1_out,
+ .fb = &harmony_fb_data,
+};
+
+static struct nvhost_device harmony_disp1_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = harmony_disp1_resources,
+ .num_resources = ARRAY_SIZE(harmony_disp1_resources),
+ .dev = {
+ .platform_data = &harmony_disp1_pdata,
+ },
+};
+
+static int harmony_disp1_check_fb(struct device *dev, struct fb_info *info)
+{
+ return info->device == &harmony_disp1_device.dev;
+}
+
+#if defined(CONFIG_TEGRA_NVMAP)
+static struct nvmap_platform_carveout harmony_carveouts[] = {
+ [0] = NVMAP_HEAP_CARVEOUT_IRAM_INIT,
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .buddy_size = SZ_32K,
+ },
+};
+
+static struct nvmap_platform_data harmony_nvmap_data = {
+ .carveouts = harmony_carveouts,
+ .nr_carveouts = ARRAY_SIZE(harmony_carveouts),
+};
+
+static struct platform_device harmony_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &harmony_nvmap_data,
+ },
+};
+#endif
+
+static struct platform_device *harmony_gfx_devices[] __initdata = {
+#if defined(CONFIG_TEGRA_NVMAP)
+ &harmony_nvmap_device,
+#endif
+ &tegra_grhost_device,
+ &tegra_pwfm0_device,
+ &harmony_backlight_device,
+};
+
+int __init harmony_panel_init(void) {
+ int err;
+ struct resource *res;
+
+ gpio_request(harmony_en_vdd_pnl, "en_vdd_pnl");
+ gpio_direction_output(harmony_en_vdd_pnl, 1);
+ tegra_gpio_enable(harmony_en_vdd_pnl);
+
+ gpio_request(harmony_bl_vdd, "bl_vdd");
+ gpio_direction_output(harmony_bl_vdd, 1);
+ tegra_gpio_enable(harmony_bl_vdd);
+
+ gpio_request(harmony_lvds_shutdown, "lvds_shdn");
+ gpio_direction_output(harmony_lvds_shutdown, 1);
+ tegra_gpio_enable(harmony_lvds_shutdown);
+
+#if defined(CONFIG_TEGRA_NVMAP)
+ harmony_carveouts[1].base = tegra_carveout_start;
+ harmony_carveouts[1].size = tegra_carveout_size;
+#endif
+
+ err = platform_add_devices(harmony_gfx_devices,
+ ARRAY_SIZE(harmony_gfx_devices));
+ if (err)
+ return err;
+
+ res = nvhost_get_resource_byname(&harmony_disp1_device,
+ IORESOURCE_MEM, "fbmem");
+ if (res) {
+ res->start = tegra_fb_start;
+ res->end = tegra_fb_start + tegra_fb_size - 1;
+
+ /* Copy the bootloader fb to the fb. */
+ if (tegra_bootloader_fb_start)
+ tegra_move_framebuffer(tegra_fb_start,
+ tegra_bootloader_fb_start,
+ min(tegra_fb_size, tegra_bootloader_fb_size));
+
+ err = nvhost_device_register(&harmony_disp1_device);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-tegra/board-harmony-pcie.c b/arch/arm/mach-tegra/board-harmony-pcie.c
index 9c27b95b8d86..f3db0eeba2c9 100644
--- a/arch/arm/mach-tegra/board-harmony-pcie.c
+++ b/arch/arm/mach-tegra/board-harmony-pcie.c
@@ -23,6 +23,8 @@
#include <asm/mach-types.h>
#include <mach/pinmux.h>
+#include <mach/pci.h>
+#include "devices.h"
#include "board.h"
#ifdef CONFIG_TEGRA_PCI
@@ -30,7 +32,14 @@
/* GPIO 3 of the PMIC */
#define EN_VDD_1V05_GPIO (TEGRA_NR_GPIOS + 2)
-static int __init harmony_pcie_init(void)
+static struct tegra_pci_platform_data harmony_pci_platform_data = {
+ .port_status[0] = 1,
+ .port_status[1] = 1,
+ .use_dock_detect = 0,
+ .gpio = 0,
+};
+
+int __init harmony_pcie_init(void)
{
struct regulator *regulator = NULL;
int err;
@@ -54,9 +63,8 @@ static int __init harmony_pcie_init(void)
tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXA, TEGRA_TRI_NORMAL);
tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXK, TEGRA_TRI_NORMAL);
- err = tegra_pcie_init(true, true);
- if (err)
- goto err_pcie;
+ tegra_pci_device.dev.platform_data = &harmony_pci_platform_data;
+ platform_device_register(&tegra_pci_device);
return 0;
@@ -73,7 +81,4 @@ err_reg:
return err;
}
-/* PCI should be initialized after I2C, mfd and regulators */
-subsys_initcall_sync(harmony_pcie_init);
-
#endif
diff --git a/arch/arm/mach-tegra/board-harmony-pinmux.c b/arch/arm/mach-tegra/board-harmony-pinmux.c
index 4d63e2e97a8d..2fa64cff58f3 100644
--- a/arch/arm/mach-tegra/board-harmony-pinmux.c
+++ b/arch/arm/mach-tegra/board-harmony-pinmux.c
@@ -16,12 +16,29 @@
#include <linux/kernel.h>
#include <linux/gpio.h>
+#include <linux/init.h>
#include <mach/pinmux.h>
#include "gpio-names.h"
#include "board-harmony.h"
-static struct tegra_pingroup_config harmony_pinmux[] = {
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+static __initdata struct tegra_drive_pingroup_config harmony_drive_pinmux[] = {
+ DEFAULT_DRIVE(SDIO1),
+};
+
+static __initdata struct tegra_pingroup_config harmony_pinmux[] = {
{TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
@@ -41,7 +58,7 @@ static struct tegra_pingroup_config harmony_pinmux[] = {
{TEGRA_PINGROUP_DTC, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
@@ -102,18 +119,18 @@ static struct tegra_pingroup_config harmony_pinmux[] = {
{TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SDB, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SDC, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SDD, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
@@ -153,9 +170,11 @@ static struct tegra_gpio_table gpio_table[] = {
{ .gpio = TEGRA_GPIO_EXT_MIC_EN, .enable = true },
};
-void harmony_pinmux_init(void)
+void __init harmony_pinmux_init(void)
{
tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux));
+ tegra_drive_pinmux_config_table(harmony_drive_pinmux,
+ ARRAY_SIZE(harmony_drive_pinmux));
tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
}
diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c
index 5ad8b2f94f8d..a841b37dbe2a 100644
--- a/arch/arm/mach-tegra/board-harmony-power.c
+++ b/arch/arm/mach-tegra/board-harmony-power.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 NVIDIA, Inc.
+ * Copyright (C) 2010-2011 NVIDIA, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,35 +20,176 @@
#include <linux/gpio.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
#include <linux/mfd/tps6586x.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
#include <mach/irqs.h>
#include "board-harmony.h"
+#include "pm.h"
#define PMC_CTRL 0x0
#define PMC_CTRL_INTR_LOW (1 << 17)
+static struct regulator_consumer_supply tps658621_sm0_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+};
+
+static struct regulator_consumer_supply tps658621_sm1_supply[] = {
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+};
+
+static struct regulator_consumer_supply tps658621_sm2_supply[] = {
+ REGULATOR_SUPPLY("vdd_sm2", NULL),
+};
+
static struct regulator_consumer_supply tps658621_ldo0_supply[] = {
- REGULATOR_SUPPLY("pex_clk", NULL),
+ REGULATOR_SUPPLY("p_cam_avdd", NULL),
+};
+
+static struct regulator_consumer_supply tps658621_ldo1_supply[] = {
+ REGULATOR_SUPPLY("avdd_pll", NULL),
+};
+
+static struct regulator_consumer_supply tps658621_ldo2_supply[] = {
+ REGULATOR_SUPPLY("vdd_rtc", NULL),
};
-static struct regulator_init_data ldo0_data = {
+static struct regulator_consumer_supply tps658621_ldo3_supply[] = {
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+ REGULATOR_SUPPLY("avdd_lvds", NULL),
+};
+
+static struct regulator_consumer_supply tps658621_ldo4_supply[] = {
+ REGULATOR_SUPPLY("avdd_osc", NULL),
+ REGULATOR_SUPPLY("vddio_sys", "panjit_touch"),
+};
+
+static struct regulator_consumer_supply tps658621_ldo5_supply[] = {
+ REGULATOR_SUPPLY("vcore_mmc", "sdhci-tegra.1"),
+ REGULATOR_SUPPLY("vcore_mmc", "sdhci-tegra.3"),
+};
+
+static struct regulator_consumer_supply tps658621_ldo6_supply[] = {
+ REGULATOR_SUPPLY("avdd_vdac", NULL),
+};
+
+static struct regulator_consumer_supply tps658621_ldo7_supply[] = {
+ REGULATOR_SUPPLY("avdd_hdmi", NULL),
+ REGULATOR_SUPPLY("vdd_fuse", NULL),
+};
+
+static struct regulator_consumer_supply tps658621_ldo8_supply[] = {
+ REGULATOR_SUPPLY("avdd_hdmi_pll", NULL),
+};
+
+static struct regulator_consumer_supply tps658621_ldo9_supply[] = {
+ REGULATOR_SUPPLY("avdd_2v85", NULL),
+ REGULATOR_SUPPLY("vdd_ddr_rx", NULL),
+ REGULATOR_SUPPLY("avdd_amp", NULL),
+};
+
+/* regulator supplies power to WWAN - by default disable */
+static struct regulator_consumer_supply vdd_1v5_consumer_supply[] = {
+ REGULATOR_SUPPLY("vdd_1v5", NULL),
+};
+
+static struct regulator_init_data vdd_1v5_initdata = {
+ .consumer_supplies = vdd_1v5_consumer_supply,
+ .num_consumer_supplies = 1,
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .always_on = 0,
+ },
+};
+
+static struct fixed_voltage_config vdd_1v5 = {
+ .supply_name = "vdd_1v5",
+ .microvolts = 1500000, /* Enable 1.5V */
+ .gpio = TPS_GPIO_EN_1V5, /* GPIO BASE+0 */
+ .startup_delay = 0,
+ .enable_high = 0,
+ .enabled_at_boot = 0,
+ .init_data = &vdd_1v5_initdata,
+};
+
+/* regulator supplies power to WLAN - enable here, to satisfy SDIO probing */
+static struct regulator_consumer_supply vdd_1v2_consumer_supply[] = {
+ REGULATOR_SUPPLY("vdd_1v2", NULL),
+};
+
+static struct regulator_init_data vdd_1v2_initdata = {
+ .consumer_supplies = vdd_1v2_consumer_supply,
+ .num_consumer_supplies = 1,
.constraints = {
- .min_uV = 1250 * 1000,
- .max_uV = 3300 * 1000,
- .valid_modes_mask = (REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_STANDBY),
- .valid_ops_mask = (REGULATOR_CHANGE_MODE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_VOLTAGE),
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .always_on = 1,
},
- .num_consumer_supplies = ARRAY_SIZE(tps658621_ldo0_supply),
- .consumer_supplies = tps658621_ldo0_supply,
};
-#define HARMONY_REGULATOR_INIT(_id, _minmv, _maxmv) \
- static struct regulator_init_data _id##_data = { \
+static struct fixed_voltage_config vdd_1v2 = {
+ .supply_name = "vdd_1v2",
+ .microvolts = 1200000, /* Enable 1.2V */
+ .gpio = TPS_GPIO_EN_1V2, /* GPIO BASE+1 */
+ .startup_delay = 0,
+ .enable_high = 1,
+ .enabled_at_boot = 1,
+ .init_data = &vdd_1v2_initdata,
+};
+
+/* regulator supplies power to PLL - enable here */
+static struct regulator_consumer_supply vdd_1v05_consumer_supply[] = {
+ REGULATOR_SUPPLY("vdd_1v05", NULL),
+};
+
+static struct regulator_init_data vdd_1v05_initdata = {
+ .consumer_supplies = vdd_1v05_consumer_supply,
+ .num_consumer_supplies = 1,
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .always_on = 1,
+ },
+};
+
+static struct fixed_voltage_config vdd_1v05 = {
+ .supply_name = "vdd_1v05",
+ .microvolts = 1050000, /* Enable 1.05V */
+ .gpio = TPS_GPIO_EN_1V05, /* BASE+2 */
+ .startup_delay = 0,
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &vdd_1v05_initdata,
+};
+
+/* mode pin for 1.05V regulator - enable here */
+static struct regulator_consumer_supply vdd_1v05_mode_consumer_supply[] = {
+ REGULATOR_SUPPLY("vdd_1v05_mode", NULL),
+};
+
+static struct regulator_init_data vdd_1v05_mode_initdata = {
+ .consumer_supplies = vdd_1v05_mode_consumer_supply,
+ .num_consumer_supplies = 1,
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .always_on = 1,
+ },
+};
+
+static struct fixed_voltage_config vdd_1v05_mode = {
+ .supply_name = "vdd_1v05_mode",
+ .microvolts = 1050000, /* Enable 1.05V */
+ .gpio = TPS_GPIO_MODE_1V05, /* BASE+3 */
+ .startup_delay = 0,
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &vdd_1v05_mode_initdata,
+};
+
+#define REGULATOR_INIT(_id, _minmv, _maxmv) \
+ { \
.constraints = { \
.min_uV = (_minmv)*1000, \
.max_uV = (_maxmv)*1000, \
@@ -58,20 +199,33 @@ static struct regulator_init_data ldo0_data = {
REGULATOR_CHANGE_STATUS | \
REGULATOR_CHANGE_VOLTAGE), \
}, \
+ .num_consumer_supplies = ARRAY_SIZE(tps658621_##_id##_supply),\
+ .consumer_supplies = tps658621_##_id##_supply, \
}
-HARMONY_REGULATOR_INIT(sm0, 725, 1500);
-HARMONY_REGULATOR_INIT(sm1, 725, 1500);
-HARMONY_REGULATOR_INIT(sm2, 3000, 4550);
-HARMONY_REGULATOR_INIT(ldo1, 725, 1500);
-HARMONY_REGULATOR_INIT(ldo2, 725, 1500);
-HARMONY_REGULATOR_INIT(ldo3, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo4, 1700, 2475);
-HARMONY_REGULATOR_INIT(ldo5, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo6, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo7, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo8, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo9, 1250, 3300);
+static struct regulator_init_data sm0_data = REGULATOR_INIT(sm0, 725, 1500);
+static struct regulator_init_data sm1_data = REGULATOR_INIT(sm1, 725, 1500);
+static struct regulator_init_data sm2_data = REGULATOR_INIT(sm2, 3000, 4550);
+static struct regulator_init_data ldo0_data = REGULATOR_INIT(ldo0, 1250, 3300);
+static struct regulator_init_data ldo1_data = REGULATOR_INIT(ldo1, 725, 1500);
+static struct regulator_init_data ldo2_data = REGULATOR_INIT(ldo2, 725, 1500);
+static struct regulator_init_data ldo3_data = REGULATOR_INIT(ldo3, 1250, 3300);
+static struct regulator_init_data ldo4_data = REGULATOR_INIT(ldo4, 1700, 2475);
+static struct regulator_init_data ldo5_data = REGULATOR_INIT(ldo5, 1250, 3300);
+static struct regulator_init_data ldo6_data = REGULATOR_INIT(ldo6, 1250, 3300);
+static struct regulator_init_data ldo7_data = REGULATOR_INIT(ldo7, 1250, 3300);
+static struct regulator_init_data ldo8_data = REGULATOR_INIT(ldo8, 1250, 3300);
+static struct regulator_init_data ldo9_data = REGULATOR_INIT(ldo9, 1250, 3300);
+
+static struct tps6586x_rtc_platform_data rtc_data = {
+ .irq = TEGRA_NR_IRQS + TPS6586X_INT_RTC_ALM1,
+ .start = {
+ .year = 2009,
+ .month = 1,
+ .day = 1,
+ },
+ .cl_sel = TPS6586X_RTC_CL_SEL_1_5PF /* use lowest (external 20pF cap) */
+};
#define TPS_REG(_id, _data) \
{ \
@@ -80,6 +234,13 @@ HARMONY_REGULATOR_INIT(ldo9, 1250, 3300);
.platform_data = _data, \
}
+#define TPS_GPIO_FIXED_REG(_id, _data) \
+ { \
+ .id = _id, \
+ .name = "reg-fixed-voltage", \
+ .platform_data = _data, \
+ }
+
static struct tps6586x_subdev_info tps_devs[] = {
TPS_REG(SM_0, &sm0_data),
TPS_REG(SM_1, &sm1_data),
@@ -94,6 +255,15 @@ static struct tps6586x_subdev_info tps_devs[] = {
TPS_REG(LDO_7, &ldo7_data),
TPS_REG(LDO_8, &ldo8_data),
TPS_REG(LDO_9, &ldo9_data),
+ TPS_GPIO_FIXED_REG(0, &vdd_1v5),
+ TPS_GPIO_FIXED_REG(1, &vdd_1v2),
+ TPS_GPIO_FIXED_REG(2, &vdd_1v05),
+ TPS_GPIO_FIXED_REG(3, &vdd_1v05_mode),
+ {
+ .id = 0,
+ .name = "tps6586x-rtc",
+ .platform_data = &rtc_data,
+ },
};
static struct tps6586x_platform_data tps_platform = {
@@ -111,9 +281,50 @@ static struct i2c_board_info __initdata harmony_regulators[] = {
},
};
+static void harmony_board_suspend(int lp_state, enum suspend_stage stg)
+{
+ if ((lp_state == TEGRA_SUSPEND_LP1) && (stg == TEGRA_SUSPEND_BEFORE_CPU))
+ tegra_console_uart_suspend();
+}
+
+static void harmony_board_resume(int lp_state, enum resume_stage stg)
+{
+ if ((lp_state == TEGRA_SUSPEND_LP1) && (stg == TEGRA_RESUME_AFTER_CPU))
+ tegra_console_uart_resume();
+}
+
+static struct tegra_suspend_platform_data harmony_suspend_data = {
+ /*
+ * Check power on time and crystal oscillator start time
+ * for appropriate settings.
+ */
+ .cpu_timer = 5000,
+ .cpu_off_timer = 5000,
+ .suspend_mode = TEGRA_SUSPEND_LP0,
+ .core_timer = 0x7e7e,
+ .core_off_timer = 0x7f,
+ .corereq_high = false,
+ .sysclkreq_high = true,
+ .board_suspend = harmony_board_suspend,
+ .board_resume = harmony_board_resume,
+};
+
+int __init harmony_suspend_init(void)
+{
+ tegra_init_suspend(&harmony_suspend_data);
+}
+
int __init harmony_regulator_init(void)
{
- i2c_register_board_info(3, harmony_regulators, 1);
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ u32 pmc_ctrl;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+
+ i2c_register_board_info(4, harmony_regulators, 1);
return 0;
}
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index 846cd7d69e3e..f3f15a1bd3db 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -20,11 +20,19 @@
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/clk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/dma-mapping.h>
#include <linux/pda_power.h>
+#include <linux/input.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
#include <linux/i2c.h>
+#include <linux/i2c-tegra.h>
+#include <linux/memblock.h>
+#include <linux/delay.h>
+#include <linux/mfd/tps6586x.h>
#include <sound/wm8903.h>
@@ -37,35 +45,157 @@
#include <mach/iomap.h>
#include <mach/irqs.h>
#include <mach/sdhci.h>
+#include <mach/nand.h>
+#include <mach/clk.h>
+#include <mach/usb_phy.h>
+#include "clock.h"
#include "board.h"
#include "board-harmony.h"
#include "clock.h"
#include "devices.h"
#include "gpio-names.h"
+#include "pm.h"
-static struct plat_serial8250_port debug_uart_platform_data[] = {
- {
- .membase = IO_ADDRESS(TEGRA_UARTD_BASE),
- .mapbase = TEGRA_UARTD_BASE,
- .irq = INT_UARTD,
- .flags = UPF_BOOT_AUTOCONF,
- .iotype = UPIO_MEM,
- .regshift = 2,
- .uartclk = 216000000,
- }, {
- .flags = 0
- }
+/* NVidia bootloader tags */
+#define ATAG_NVIDIA 0x41000801
+
+#define ATAG_NVIDIA_RM 0x1
+#define ATAG_NVIDIA_DISPLAY 0x2
+#define ATAG_NVIDIA_FRAMEBUFFER 0x3
+#define ATAG_NVIDIA_CHIPSHMOO 0x4
+#define ATAG_NVIDIA_CHIPSHMOOPHYS 0x5
+#define ATAG_NVIDIA_PRESERVED_MEM_0 0x10000
+#define ATAG_NVIDIA_PRESERVED_MEM_N 2
+#define ATAG_NVIDIA_FORCE_32 0x7fffffff
+
+struct tag_tegra {
+ __u32 bootarg_key;
+ __u32 bootarg_len;
+ char bootarg[1];
+};
+
+static int __init parse_tag_nvidia(const struct tag *tag)
+{
+
+ return 0;
+}
+__tagtable(ATAG_NVIDIA, parse_tag_nvidia);
+
+static struct tegra_utmip_config utmi_phy_config = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 9,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
};
-static struct platform_device debug_uart = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
+static struct tegra_ehci_platform_data tegra_ehci_pdata = {
+ .phy_config = &utmi_phy_config,
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+};
+
+static struct tegra_nand_chip_parms nand_chip_parms[] = {
+ /* Samsung K5E2G1GACM */
+ [0] = {
+ .vendor_id = 0xEC,
+ .device_id = 0xAA,
+ .read_id_fourth_byte = 0x15,
+ .capacity = 256,
+ .timing = {
+ .trp = 21,
+ .trh = 15,
+ .twp = 21,
+ .twh = 15,
+ .tcs = 31,
+ .twhr = 60,
+ .tcr_tar_trr = 20,
+ .twb = 100,
+ .trp_resp = 30,
+ .tadl = 100,
+ },
+ },
+ /* Hynix H5PS1GB3EFR */
+ [1] = {
+ .vendor_id = 0xAD,
+ .device_id = 0xDC,
+ .read_id_fourth_byte = 0x95,
+ .capacity = 512,
+ .timing = {
+ .trp = 12,
+ .trh = 10,
+ .twp = 12,
+ .twh = 10,
+ .tcs = 20,
+ .twhr = 80,
+ .tcr_tar_trr = 20,
+ .twb = 100,
+ .trp_resp = 20,
+ .tadl = 70,
+ },
+ },
+};
+
+struct tegra_nand_platform harmony_nand_data = {
+ .max_chips = 8,
+ .chip_parms = nand_chip_parms,
+ .nr_chip_parms = ARRAY_SIZE(nand_chip_parms),
+ .wp_gpio = TEGRA_GPIO_PC7,
+};
+
+static struct resource resources_nand[] = {
+ [0] = {
+ .start = INT_NANDFLASH,
+ .end = INT_NANDFLASH,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_nand_device = {
+ .name = "tegra_nand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(resources_nand),
+ .resource = resources_nand,
.dev = {
- .platform_data = debug_uart_platform_data,
+ .platform_data = &harmony_nand_data,
+ },
+};
+
+static struct gpio_keys_button harmony_gpio_keys_buttons[] = {
+ {
+ .code = KEY_POWER,
+ .gpio = TEGRA_GPIO_POWERKEY,
+ .active_low = 1,
+ .desc = "Power",
+ .type = EV_KEY,
+ .wakeup = 1,
},
};
+static struct gpio_keys_platform_data harmony_gpio_keys = {
+ .buttons = harmony_gpio_keys_buttons,
+ .nbuttons = ARRAY_SIZE(harmony_gpio_keys_buttons),
+};
+
+static struct platform_device harmony_gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &harmony_gpio_keys,
+ }
+};
+
+static void harmony_keys_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(harmony_gpio_keys_buttons); i++)
+ tegra_gpio_enable(harmony_gpio_keys_buttons[i].gpio);
+}
+
static struct tegra_wm8903_platform_data harmony_audio_pdata = {
.gpio_spkr_en = TEGRA_GPIO_SPKR_EN,
.gpio_hp_det = TEGRA_GPIO_HP_DET,
@@ -82,6 +212,43 @@ static struct platform_device harmony_audio_device = {
},
};
+static struct tegra_i2c_platform_data harmony_i2c1_platform_data = {
+ .adapter_nr = 0,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+};
+
+static const struct tegra_pingroup_config i2c2_ddc = {
+ .pingroup = TEGRA_PINGROUP_DDC,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static const struct tegra_pingroup_config i2c2_gen2 = {
+ .pingroup = TEGRA_PINGROUP_PTA,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static struct tegra_i2c_platform_data harmony_i2c2_platform_data = {
+ .adapter_nr = 1,
+ .bus_count = 2,
+ .bus_clk_rate = { 100000, 100000 },
+ .bus_mux = { &i2c2_ddc, &i2c2_gen2 },
+ .bus_mux_len = { 1, 1 },
+};
+
+static struct tegra_i2c_platform_data harmony_i2c3_platform_data = {
+ .adapter_nr = 3,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+};
+
+static struct tegra_i2c_platform_data harmony_dvc_platform_data = {
+ .adapter_nr = 4,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .is_dvc = true,
+};
+
static struct wm8903_platform_data harmony_wm8903_pdata = {
.irq_active_low = 0,
.micdet_cfg = 0,
@@ -104,6 +271,11 @@ static struct i2c_board_info __initdata wm8903_board_info = {
static void __init harmony_i2c_init(void)
{
+ tegra_i2c_device1.dev.platform_data = &harmony_i2c1_platform_data;
+ tegra_i2c_device2.dev.platform_data = &harmony_i2c2_platform_data;
+ tegra_i2c_device3.dev.platform_data = &harmony_i2c3_platform_data;
+ tegra_i2c_device4.dev.platform_data = &harmony_dvc_platform_data;
+
platform_device_register(&tegra_i2c_device1);
platform_device_register(&tegra_i2c_device2);
platform_device_register(&tegra_i2c_device3);
@@ -112,15 +284,100 @@ static void __init harmony_i2c_init(void)
i2c_register_board_info(0, &wm8903_board_info, 1);
}
+/* OTG gadget device */
+/*static u64 tegra_otg_dmamask = DMA_BIT_MASK(32);
+
+
+static struct resource tegra_otg_resources[] = {
+ [0] = {
+ .start = TEGRA_USB_BASE,
+ .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB,
+ .end = INT_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct fsl_usb2_platform_data tegra_otg_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_UTMI,
+};
+
+static struct platform_device tegra_otg = {
+ .name = "fsl-tegra-udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = &tegra_otg_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &tegra_otg_pdata,
+ },
+ .resource = tegra_otg_resources,
+ .num_resources = ARRAY_SIZE(tegra_otg_resources),
+};*/
+
+/* PDA power */
+static struct pda_power_pdata pda_power_pdata = {
+};
+
+static struct platform_device pda_power_device = {
+ .name = "pda_power",
+ .id = -1,
+ .dev = {
+ .platform_data = &pda_power_pdata,
+ },
+};
+
+static void harmony_debug_uart_init(void)
+{
+ struct clk *c;
+
+ debug_uart_clk = clk_get_sys("serial8250.0", "uartd");
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uartd_device.dev.platform_data))->mapbase;
+
+ if (!IS_ERR_OR_NULL(debug_uart_clk)) {
+ pr_info("The debug console clock name is %s\n",
+ debug_uart_clk->name);
+ c = tegra_get_clock_by_name("pll_p");
+ if (IS_ERR_OR_NULL(c))
+ pr_err("Not getting the parent clock pll_p\n");
+ else
+ clk_set_parent(debug_uart_clk, c);
+
+ clk_enable(debug_uart_clk);
+ clk_set_rate(debug_uart_clk, clk_get_rate(c));
+ } else {
+ pr_err("Not getting the clock %s for debug console\n",
+ debug_uart_clk->name);
+ }
+ return;
+}
+
static struct platform_device *harmony_devices[] __initdata = {
- &debug_uart,
+ &debug_uartd_device,
&tegra_sdhci_device1,
&tegra_sdhci_device2,
&tegra_sdhci_device4,
&tegra_i2s_device1,
+ &tegra_spdif_device,
&tegra_das_device,
+ &spdif_dit_device,
&tegra_pcm_device,
&harmony_audio_device,
+ &tegra_pmu_device,
+ &tegra_nand_device,
+ &tegra_udc_device,
+ &harmony_gpio_keys_device,
+ &pda_power_device,
+ &tegra_ehci3_device,
+ &tegra_spi_device1,
+ &tegra_spi_device2,
+ &tegra_spi_device3,
+ &tegra_spi_device4,
+ &tegra_gart_device,
};
static void __init tegra_harmony_fixup(struct machine_desc *desc,
@@ -136,10 +393,13 @@ static void __init tegra_harmony_fixup(struct machine_desc *desc,
static __initdata struct tegra_clk_init_table harmony_clk_init_table[] = {
/* name parent rate enabled */
{ "uartd", "pll_p", 216000000, true },
- { "pll_a", "pll_p_out1", 56448000, true },
- { "pll_a_out0", "pll_a", 11289600, true },
- { "cdev1", NULL, 0, true },
- { "i2s1", "pll_a_out0", 11289600, false},
+ { "i2s1", "pll_a_out0", 0, false},
+ { "spdif_out", "pll_a_out0", 0, false},
+ { "sdmmc1", "clk_m", 48000000, true },
+ { "sdmmc2", "clk_m", 48000000, true },
+ { "sdmmc4", "clk_m", 48000000, true },
+ { "ndflash", "pll_p", 108000000, true},
+ { "pwm", "clk_32k", 32768, false},
{ NULL, NULL, 0, 0},
};
@@ -163,25 +423,98 @@ static struct tegra_sdhci_platform_data sdhci_pdata4 = {
.is_8bit = 1,
};
+static int __init harmony_wifi_init(void)
+{
+ int gpio_pwr, gpio_rst;
+
+ if (!machine_is_harmony())
+ return 0;
+
+ /* WLAN - Power up (low) and Reset (low) */
+ gpio_pwr = gpio_request(TEGRA_GPIO_WLAN_PWR_LOW, "wlan_pwr");
+ gpio_rst = gpio_request(TEGRA_GPIO_WLAN_RST_LOW, "wlan_rst");
+ if (gpio_pwr < 0 || gpio_rst < 0)
+ pr_warning("Unable to get gpio for WLAN Power and Reset\n");
+ else {
+
+ tegra_gpio_enable(TEGRA_GPIO_WLAN_PWR_LOW);
+ tegra_gpio_enable(TEGRA_GPIO_WLAN_RST_LOW);
+ /* toggle in this order as per spec */
+ gpio_direction_output(TEGRA_GPIO_WLAN_PWR_LOW, 0);
+ gpio_direction_output(TEGRA_GPIO_WLAN_RST_LOW, 0);
+ udelay(5);
+ gpio_direction_output(TEGRA_GPIO_WLAN_PWR_LOW, 1);
+ gpio_direction_output(TEGRA_GPIO_WLAN_RST_LOW, 1);
+ }
+
+ return 0;
+}
+
+/*
+ * subsys_initcall_sync is good synch point to call harmony_wifi_init
+ * This makes sure that the required regulators (LDO3
+ * supply of external PMU and 1.2V regulator) are properly enabled,
+ * and mmc driver has not yet probed for a device on SDIO bus.
+ */
+subsys_initcall_sync(harmony_wifi_init);
+
+static void harmony_power_off(void)
+{
+ int ret;
+
+ ret = tps6586x_power_off();
+ if (ret)
+ pr_err("harmony: failed to power off\n");
+
+ while (1);
+}
+
+static void __init harmony_power_off_init(void)
+{
+ pm_power_off = harmony_power_off;
+}
+
static void __init tegra_harmony_init(void)
{
tegra_clk_init_from_table(harmony_clk_init_table);
harmony_pinmux_init();
+ harmony_keys_init();
+
+ harmony_debug_uart_init();
+
tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1;
tegra_sdhci_device2.dev.platform_data = &sdhci_pdata2;
tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4;
+ tegra_ehci3_device.dev.platform_data = &tegra_ehci_pdata;
+
platform_add_devices(harmony_devices, ARRAY_SIZE(harmony_devices));
harmony_i2c_init();
harmony_regulator_init();
+ harmony_suspend_init();
+ harmony_panel_init();
+#ifdef CONFIG_KEYBOARD_TEGRA
+ harmony_kbc_init();
+#endif
+ harmony_pcie_init();
+ harmony_power_off_init();
+}
+
+void __init tegra_harmony_reserve(void)
+{
+ if (memblock_reserve(0x0, 4096) < 0)
+ pr_warn("Cannot reserve first 4K of memory for safety\n");
+
+ tegra_reserve(SZ_128M, SZ_8M, 0);
}
MACHINE_START(HARMONY, "harmony")
.boot_params = 0x00000100,
.fixup = tegra_harmony_fixup,
.map_io = tegra_map_common_io,
+ .reserve = tegra_harmony_reserve,
.init_early = tegra_init_early,
.init_irq = tegra_init_irq,
.timer = &tegra_timer,
diff --git a/arch/arm/mach-tegra/board-harmony.h b/arch/arm/mach-tegra/board-harmony.h
index d85142edaf6b..edd161669904 100644
--- a/arch/arm/mach-tegra/board-harmony.h
+++ b/arch/arm/mach-tegra/board-harmony.h
@@ -31,8 +31,23 @@
#define TEGRA_GPIO_HP_DET TEGRA_GPIO_PW2
#define TEGRA_GPIO_INT_MIC_EN TEGRA_GPIO_PX0
#define TEGRA_GPIO_EXT_MIC_EN TEGRA_GPIO_PX1
+/* fixed voltage regulator enable/mode gpios */
+#define TPS_GPIO_EN_1V5 (HARMONY_GPIO_TPS6586X(0))
+#define TPS_GPIO_EN_1V2 (HARMONY_GPIO_TPS6586X(1))
+#define TPS_GPIO_EN_1V05 (HARMONY_GPIO_TPS6586X(2))
+#define TPS_GPIO_MODE_1V05 (HARMONY_GPIO_TPS6586X(3))
+
+/* WLAN pwr and reset gpio */
+#define TEGRA_GPIO_WLAN_PWR_LOW TEGRA_GPIO_PK5
+#define TEGRA_GPIO_WLAN_RST_LOW TEGRA_GPIO_PK6
+
+#define TEGRA_GPIO_POWERKEY TEGRA_GPIO_PV2
void harmony_pinmux_init(void);
int harmony_regulator_init(void);
+int harmony_suspend_init(void);
+int harmony_panel_init(void);
+int harmony_kbc_init(void);
+int harmony_pcie_init(void);
#endif
diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c
index 0bda495e9742..56dc2c279289 100644
--- a/arch/arm/mach-tegra/board-seaboard-pinmux.c
+++ b/arch/arm/mach-tegra/board-seaboard-pinmux.c
@@ -17,7 +17,6 @@
#include <linux/gpio.h>
#include <mach/pinmux.h>
-#include <mach/pinmux-t2.h>
#include "gpio-names.h"
#include "board-seaboard.h"
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index 89a6d2adc1de..823060ec478f 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -33,6 +33,7 @@
#include <mach/iomap.h>
#include <mach/sdhci.h>
#include <mach/gpio.h>
+#include <mach/pci.h>
#include "board.h"
#include "clock.h"
@@ -79,6 +80,13 @@ static struct platform_device trimslice_audio_device = {
.id = 0,
};
+static struct tegra_pci_platform_data trimslice_pci_platform_data = {
+ .port_status[0] = 1,
+ .port_status[1] = 1,
+ .use_dock_detect = 0,
+ .gpio = 0,
+};
+
static struct platform_device *trimslice_devices[] __initdata = {
&debug_uart,
&tegra_sdhci_device1,
@@ -87,6 +95,7 @@ static struct platform_device *trimslice_devices[] __initdata = {
&tegra_das_device,
&tegra_pcm_device,
&trimslice_audio_device,
+ &trimslice_pci_platform_data,
};
static struct i2c_board_info trimslice_i2c3_board_info[] = {
@@ -146,15 +155,6 @@ static __initdata struct tegra_clk_init_table trimslice_clk_init_table[] = {
{ NULL, NULL, 0, 0},
};
-static int __init tegra_trimslice_pci_init(void)
-{
- if (!machine_is_trimslice())
- return 0;
-
- return tegra_pcie_init(true, true);
-}
-subsys_initcall(tegra_trimslice_pci_init);
-
static void __init tegra_trimslice_init(void)
{
tegra_clk_init_from_table(trimslice_clk_init_table);
@@ -163,6 +163,7 @@ static void __init tegra_trimslice_init(void)
tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1;
tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4;
+ tegra_pci_device.dev.platform_data = &trimslice_pci_platform_data;
platform_add_devices(trimslice_devices, ARRAY_SIZE(trimslice_devices));
diff --git a/arch/arm/mach-tegra/board-ventana-memory.c b/arch/arm/mach-tegra/board-ventana-memory.c
new file mode 100644
index 000000000000..9ef7c7797341
--- /dev/null
+++ b/arch/arm/mach-tegra/board-ventana-memory.c
@@ -0,0 +1,592 @@
+/*
+ * Copyright (C) 2010-2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "board-ventana.h"
+#include "tegra2_emc.h"
+#include "board.h"
+
+static const struct tegra_emc_table ventana_emc_tables_elpida_300Mhz[] = {
+ {
+ .rate = 25000, /* SDRAM frquency */
+ .regs = {
+ 0x00000002, /* RC */
+ 0x00000006, /* RFC */
+ 0x00000003, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000004, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000b, /* RDV */
+ 0x0000004d, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000a, /* RW2PDEN */
+ 0x00000004, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000006, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000068, /* TREFBW */
+ 0x00000003, /* QUSE_EXTRA */
+ 0x00000003, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa06a04ae, /* CFG_DIG_DLL */
+ 0x0001f000, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000003, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 50000, /* SDRAM frequency */
+ .regs = {
+ 0x00000003, /* RC */
+ 0x00000007, /* RFC */
+ 0x00000003, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000005, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000b, /* RDV */
+ 0x0000009f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000a, /* RW2PDEN */
+ 0x00000007, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000006, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x000000d0, /* TREFBW */
+ 0x00000004, /* QUSE_EXTRA */
+ 0x00000000, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa06a04ae, /* CFG_DIG_DLL */
+ 0x0001f000, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000005, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 75000, /* SDRAM frequency */
+ .regs = {
+ 0x00000005, /* RC */
+ 0x0000000a, /* RFC */
+ 0x00000004, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000005, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000b, /* RDV */
+ 0x000000ff, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000a, /* RW2PDEN */
+ 0x0000000b, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000006, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000138, /* TREFBW */
+ 0x00000004, /* QUSE_EXTRA */
+ 0x00000000, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa06a04ae, /* CFG_DIG_DLL */
+ 0x0001f000, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000007, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 150000, /* SDRAM frequency */
+ .regs = {
+ 0x00000009, /* RC */
+ 0x00000014, /* RFC */
+ 0x00000007, /* RAS */
+ 0x00000004, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000005, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000b, /* RDV */
+ 0x0000021f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000004, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000a, /* RW2PDEN */
+ 0x00000015, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000006, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000270, /* TREFBW */
+ 0x00000000, /* QUSE_EXTRA */
+ 0x00000001, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xA04C04AE, /* CFG_DIG_DLL */
+ 0x007FC010, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x0000000e, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 300000, /* SDRAM frequency */
+ .regs = {
+ 0x00000012, /* RC */
+ 0x00000027, /* RFC */
+ 0x0000000D, /* RAS */
+ 0x00000007, /* RP */
+ 0x00000007, /* R2W */
+ 0x00000005, /* W2R */
+ 0x00000003, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000006, /* RD_RCD */
+ 0x00000006, /* WR_RCD */
+ 0x00000003, /* RRD */
+ 0x00000003, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000006, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000009, /* QSAFE */
+ 0x0000000c, /* RDV */
+ 0x0000045f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000004, /* PDEX2WR */
+ 0x00000004, /* PDEX2RD */
+ 0x00000007, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000e, /* RW2PDEN */
+ 0x0000002A, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x0000000F, /* TFAW */
+ 0x00000008, /* TRPAB */
+ 0x00000005, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x000004E1, /* TREFBW */
+ 0x00000005, /* QUSE_EXTRA */
+ 0x00000002, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000282, /* FBIO_CFG5 */
+ 0xE03C048B, /* CFG_DIG_DLL */
+ 0x007FC010, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x0000001B, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ }
+};
+
+static const struct tegra_emc_table ventana_emc_tables_elpida_400Mhz[] = {
+ {
+ .rate = 23750, /* SDRAM frquency */
+ .regs = {
+ 0x00000002, /* RC */
+ 0x00000006, /* RFC */
+ 0x00000003, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000005, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000c, /* RDV */
+ 0x00000047, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000b, /* RW2PDEN */
+ 0x00000004, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000008, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000060, /* TREFBW */
+ 0x00000004, /* QUSE_EXTRA */
+ 0x00000003, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa0ae04ae, /* CFG_DIG_DLL */
+ 0x0001f800, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000003, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 63333, /* SDRAM frquency */
+ .regs = {
+ 0x00000004, /* RC */
+ 0x00000009, /* RFC */
+ 0x00000003, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000006, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000c, /* RDV */
+ 0x000000c4, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000b, /* RW2PDEN */
+ 0x00000009, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000008, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000107, /* TREFBW */
+ 0x00000005, /* QUSE_EXTRA */
+ 0x00000000, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa0ae04ae, /* CFG_DIG_DLL */
+ 0x0001f800, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000006, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 95000, /* SDRAM frquency */
+ .regs = {
+ 0x00000006, /* RC */
+ 0x0000000d, /* RFC */
+ 0x00000004, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000006, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000c, /* RDV */
+ 0x0000013f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000b, /* RW2PDEN */
+ 0x0000000e, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000008, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x0000018c, /* TREFBW */
+ 0x00000005, /* QUSE_EXTRA */
+ 0x00000001, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa0ae04ae, /* CFG_DIG_DLL */
+ 0x0001f000, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000009, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 190000, /* SDRAM frquency */
+ .regs = {
+ 0x0000000c, /* RC */
+ 0x00000019, /* RFC */
+ 0x00000008, /* RAS */
+ 0x00000004, /* RP */
+ 0x00000007, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000004, /* RD_RCD */
+ 0x00000004, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000003, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000006, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x00000009, /* QSAFE */
+ 0x0000000d, /* RDV */
+ 0x000002bf, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000004, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000c, /* RW2PDEN */
+ 0x0000001b, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x0000000a, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000008, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000317, /* TREFBW */
+ 0x00000005, /* QUSE_EXTRA */
+ 0x00000002, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa06204ae, /* CFG_DIG_DLL */
+ 0x007f7010, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000012, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 380000, /* SDRAM frquency */
+ .regs = {
+ 0x00000017, /* RC */
+ 0x00000032, /* RFC */
+ 0x00000010, /* RAS */
+ 0x00000007, /* RP */
+ 0x00000008, /* R2W */
+ 0x00000005, /* W2R */
+ 0x00000003, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000007, /* RD_RCD */
+ 0x00000007, /* WR_RCD */
+ 0x00000004, /* RRD */
+ 0x00000003, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000007, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x0000000a, /* QSAFE */
+ 0x0000000e, /* RDV */
+ 0x0000059f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000004, /* PDEX2WR */
+ 0x00000004, /* PDEX2RD */
+ 0x00000007, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x00000011, /* RW2PDEN */
+ 0x00000036, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000013, /* TFAW */
+ 0x00000008, /* TRPAB */
+ 0x00000007, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x0000062d, /* TREFBW */
+ 0x00000006, /* QUSE_EXTRA */
+ 0x00000003, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000282, /* FBIO_CFG5 */
+ 0xe044048b, /* CFG_DIG_DLL */
+ 0x007fb010, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000023, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ }
+};
+
+static const struct tegra_emc_chip ventana_emc_chips[] = {
+ {
+ .description = "Elpida 300MHz",
+ .mem_manufacturer_id = 0x0303,
+ .mem_revision_id1 = -1,
+ .mem_revision_id2 = -1,
+ .mem_pid = -1,
+ .table = ventana_emc_tables_elpida_300Mhz,
+ .table_size = ARRAY_SIZE(ventana_emc_tables_elpida_300Mhz)
+ },
+};
+
+static const struct tegra_emc_chip ventana_t25_emc_chips[] = {
+ {
+ .description = "Elpida 400MHz",
+ .mem_manufacturer_id = 0x0303,
+ .mem_revision_id1 = -1,
+ .mem_revision_id2 = -1,
+ .mem_pid = -1,
+ .table = ventana_emc_tables_elpida_400Mhz,
+ .table_size = ARRAY_SIZE(ventana_emc_tables_elpida_400Mhz)
+ },
+};
+
+static const struct tegra_emc_chip ventana_siblings_emc_chips[] = {
+};
+
+#define TEGRA25_SKU 0x0B00
+#define board_is_ventana(bi) (bi.board_id == 0x24b || bi.board_id == 0x252)
+
+int ventana_emc_init(void)
+{
+ struct board_info BoardInfo;
+
+ tegra_get_board_info(&BoardInfo);
+
+ if (board_is_ventana(BoardInfo)) {
+ if (BoardInfo.sku == TEGRA25_SKU)
+ tegra_init_emc(ventana_t25_emc_chips,
+ ARRAY_SIZE(ventana_t25_emc_chips));
+ else
+ tegra_init_emc(ventana_emc_chips,
+ ARRAY_SIZE(ventana_emc_chips));
+ } else {
+ pr_info("ventana_emc_init: using ventana_siblings_emc_chips\n");
+ tegra_init_emc(ventana_siblings_emc_chips,
+ ARRAY_SIZE(ventana_siblings_emc_chips));
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-ventana-panel.c b/arch/arm/mach-tegra/board-ventana-panel.c
new file mode 100644
index 000000000000..5290a3da02ca
--- /dev/null
+++ b/arch/arm/mach-tegra/board-ventana-panel.c
@@ -0,0 +1,444 @@
+/*
+ * arch/arm/mach-tegra/board-ventana-panel.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <asm/mach-types.h>
+#include <linux/platform_device.h>
+#include <linux/earlysuspend.h>
+#include <linux/pwm_backlight.h>
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+
+#include "devices.h"
+#include "gpio-names.h"
+#include "board.h"
+
+#define ventana_pnl_pwr_enb TEGRA_GPIO_PC6
+#define ventana_bl_enb TEGRA_GPIO_PD4
+#define ventana_lvds_shutdown TEGRA_GPIO_PB2
+#define ventana_hdmi_hpd TEGRA_GPIO_PN7
+#define ventana_hdmi_enb TEGRA_GPIO_PV5
+
+/*panel power on sequence timing*/
+#define ventana_pnl_to_lvds_ms 0
+#define ventana_lvds_to_bl_ms 200
+
+#ifdef CONFIG_TEGRA_DC
+static struct regulator *ventana_hdmi_reg = NULL;
+static struct regulator *ventana_hdmi_pll = NULL;
+#endif
+
+static int ventana_backlight_init(struct device *dev) {
+ int ret;
+
+ ret = gpio_request(ventana_bl_enb, "backlight_enb");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(ventana_bl_enb, 1);
+ if (ret < 0)
+ gpio_free(ventana_bl_enb);
+ else
+ tegra_gpio_enable(ventana_bl_enb);
+
+ return ret;
+};
+
+static void ventana_backlight_exit(struct device *dev) {
+ gpio_set_value(ventana_bl_enb, 0);
+ gpio_free(ventana_bl_enb);
+ tegra_gpio_disable(ventana_bl_enb);
+}
+
+static int ventana_backlight_notify(struct device *unused, int brightness)
+{
+ gpio_set_value(ventana_bl_enb, !!brightness);
+ return brightness;
+}
+
+static int ventana_disp1_check_fb(struct device *dev, struct fb_info *info);
+
+static struct platform_pwm_backlight_data ventana_backlight_data = {
+ .pwm_id = 2,
+ .max_brightness = 255,
+ .dft_brightness = 224,
+ .pwm_period_ns = 5000000,
+ .init = ventana_backlight_init,
+ .exit = ventana_backlight_exit,
+ .notify = ventana_backlight_notify,
+ /* Only toggle backlight on fb blank notifications for disp1 */
+ .check_fb = ventana_disp1_check_fb,
+};
+
+static struct platform_device ventana_backlight_device = {
+ .name = "pwm-backlight",
+ .id = -1,
+ .dev = {
+ .platform_data = &ventana_backlight_data,
+ },
+};
+
+#ifdef CONFIG_TEGRA_DC
+static int ventana_panel_enable(void)
+{
+ struct regulator *reg = regulator_get(NULL, "vdd_ldo4");
+
+ if (!reg) {
+ regulator_enable(reg);
+ regulator_put(reg);
+ }
+
+ gpio_set_value(ventana_pnl_pwr_enb, 1);
+ mdelay(ventana_pnl_to_lvds_ms);
+ gpio_set_value(ventana_lvds_shutdown, 1);
+ mdelay(ventana_lvds_to_bl_ms);
+ return 0;
+}
+
+static int ventana_panel_disable(void)
+{
+ gpio_set_value(ventana_lvds_shutdown, 0);
+ gpio_set_value(ventana_pnl_pwr_enb, 0);
+ return 0;
+}
+
+static int ventana_hdmi_enable(void)
+{
+ if (!ventana_hdmi_reg) {
+ ventana_hdmi_reg = regulator_get(NULL, "avdd_hdmi"); /* LD07 */
+ if (IS_ERR_OR_NULL(ventana_hdmi_reg)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi\n");
+ ventana_hdmi_reg = NULL;
+ return PTR_ERR(ventana_hdmi_reg);
+ }
+ }
+ regulator_enable(ventana_hdmi_reg);
+
+ if (!ventana_hdmi_pll) {
+ ventana_hdmi_pll = regulator_get(NULL, "avdd_hdmi_pll"); /* LD08 */
+ if (IS_ERR_OR_NULL(ventana_hdmi_pll)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi_pll\n");
+ ventana_hdmi_pll = NULL;
+ regulator_disable(ventana_hdmi_reg);
+ ventana_hdmi_reg = NULL;
+ return PTR_ERR(ventana_hdmi_pll);
+ }
+ }
+ regulator_enable(ventana_hdmi_pll);
+ return 0;
+}
+
+static int ventana_hdmi_disable(void)
+{
+ regulator_disable(ventana_hdmi_reg);
+ regulator_disable(ventana_hdmi_pll);
+ return 0;
+}
+
+static struct resource ventana_disp1_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource ventana_disp2_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_B_GENERAL,
+ .end = INT_DISPLAY_B_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "hdmi_regs",
+ .start = TEGRA_HDMI_BASE,
+ .end = TEGRA_HDMI_BASE + TEGRA_HDMI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_dc_mode ventana_panel_modes[] = {
+ {
+ .pclk = 72072000,
+ .h_ref_to_sync = 11,
+ .v_ref_to_sync = 1,
+ .h_sync_width = 58,
+ .v_sync_width = 4,
+ .h_back_porch = 58,
+ .v_back_porch = 4,
+ .h_active = 1366,
+ .v_active = 768,
+ .h_front_porch = 58,
+ .v_front_porch = 4,
+ },
+};
+
+static struct tegra_fb_data ventana_fb_data = {
+ .win = 0,
+ .xres = 1366,
+ .yres = 768,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_fb_data ventana_hdmi_fb_data = {
+ .win = 0,
+ .xres = 1366,
+ .yres = 768,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_dc_out ventana_disp1_out = {
+ .type = TEGRA_DC_OUT_RGB,
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+ .depth = 18,
+ .dither = TEGRA_DC_ORDERED_DITHER,
+
+ .modes = ventana_panel_modes,
+ .n_modes = ARRAY_SIZE(ventana_panel_modes),
+
+ .enable = ventana_panel_enable,
+ .disable = ventana_panel_disable,
+};
+
+static struct tegra_dc_out ventana_disp2_out = {
+ .type = TEGRA_DC_OUT_HDMI,
+ .flags = TEGRA_DC_OUT_HOTPLUG_HIGH,
+
+ .dcc_bus = 1,
+ .hotplug_gpio = ventana_hdmi_hpd,
+
+ .max_pixclock = KHZ2PICOS(148500),
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .enable = ventana_hdmi_enable,
+ .disable = ventana_hdmi_disable,
+};
+
+static struct tegra_dc_platform_data ventana_disp1_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &ventana_disp1_out,
+ .fb = &ventana_fb_data,
+};
+
+static struct tegra_dc_platform_data ventana_disp2_pdata = {
+ .flags = 0,
+ .default_out = &ventana_disp2_out,
+ .fb = &ventana_hdmi_fb_data,
+};
+
+static struct nvhost_device ventana_disp1_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = ventana_disp1_resources,
+ .num_resources = ARRAY_SIZE(ventana_disp1_resources),
+ .dev = {
+ .platform_data = &ventana_disp1_pdata,
+ },
+};
+
+static int ventana_disp1_check_fb(struct device *dev, struct fb_info *info)
+{
+ return info->device == &ventana_disp1_device.dev;
+}
+
+static struct nvhost_device ventana_disp2_device = {
+ .name = "tegradc",
+ .id = 1,
+ .resource = ventana_disp2_resources,
+ .num_resources = ARRAY_SIZE(ventana_disp2_resources),
+ .dev = {
+ .platform_data = &ventana_disp2_pdata,
+ },
+};
+#else
+static int ventana_disp1_check_fb(struct device *dev, struct fb_info *info)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_TEGRA_NVMAP)
+static struct nvmap_platform_carveout ventana_carveouts[] = {
+ [0] = NVMAP_HEAP_CARVEOUT_IRAM_INIT,
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .buddy_size = SZ_32K,
+ },
+};
+
+static struct nvmap_platform_data ventana_nvmap_data = {
+ .carveouts = ventana_carveouts,
+ .nr_carveouts = ARRAY_SIZE(ventana_carveouts),
+};
+
+static struct platform_device ventana_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &ventana_nvmap_data,
+ },
+};
+#endif
+
+static struct platform_device *ventana_gfx_devices[] __initdata = {
+#if defined(CONFIG_TEGRA_NVMAP)
+ &ventana_nvmap_device,
+#endif
+#ifdef CONFIG_TEGRA_GRHOST
+ &tegra_grhost_device,
+#endif
+ &tegra_pwfm2_device,
+ &ventana_backlight_device,
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/* put early_suspend/late_resume handlers here for the display in order
+ * to keep the code out of the display driver, keeping it closer to upstream
+ */
+struct early_suspend ventana_panel_early_suspender;
+
+static void ventana_panel_early_suspend(struct early_suspend *h)
+{
+ unsigned i;
+
+ /* power down LCD, add use a black screen for HDMI */
+ if (num_registered_fb > 0)
+ fb_blank(registered_fb[0], FB_BLANK_POWERDOWN);
+ if (num_registered_fb > 1)
+ fb_blank(registered_fb[1], FB_BLANK_NORMAL);
+#ifdef CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+ cpufreq_save_default_governor();
+ cpufreq_set_conservative_governor();
+ cpufreq_set_conservative_governor_param(
+ SET_CONSERVATIVE_GOVERNOR_UP_THRESHOLD,
+ SET_CONSERVATIVE_GOVERNOR_DOWN_THRESHOLD);
+#endif
+}
+
+static void ventana_panel_late_resume(struct early_suspend *h)
+{
+ unsigned i;
+#ifdef CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+ cpufreq_restore_default_governor();
+#endif
+ for (i = 0; i < num_registered_fb; i++)
+ fb_blank(registered_fb[i], FB_BLANK_UNBLANK);
+}
+#endif
+
+int __init ventana_panel_init(void)
+{
+ int err;
+ struct resource __maybe_unused *res;
+
+ gpio_request(ventana_pnl_pwr_enb, "pnl_pwr_enb");
+ gpio_direction_output(ventana_pnl_pwr_enb, 1);
+ tegra_gpio_enable(ventana_pnl_pwr_enb);
+
+ gpio_request(ventana_lvds_shutdown, "lvds_shdn");
+ gpio_direction_output(ventana_lvds_shutdown, 1);
+ tegra_gpio_enable(ventana_lvds_shutdown);
+
+ tegra_gpio_enable(ventana_hdmi_enb);
+ gpio_request(ventana_hdmi_enb, "hdmi_5v_en");
+ gpio_direction_output(ventana_hdmi_enb, 1);
+
+ tegra_gpio_enable(ventana_hdmi_hpd);
+ gpio_request(ventana_hdmi_hpd, "hdmi_hpd");
+ gpio_direction_input(ventana_hdmi_hpd);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ventana_panel_early_suspender.suspend = ventana_panel_early_suspend;
+ ventana_panel_early_suspender.resume = ventana_panel_late_resume;
+ ventana_panel_early_suspender.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ register_early_suspend(&ventana_panel_early_suspender);
+#endif
+
+#if defined(CONFIG_TEGRA_NVMAP)
+ ventana_carveouts[1].base = tegra_carveout_start;
+ ventana_carveouts[1].size = tegra_carveout_size;
+#endif
+
+ err = platform_add_devices(ventana_gfx_devices,
+ ARRAY_SIZE(ventana_gfx_devices));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ res = nvhost_get_resource_byname(&ventana_disp1_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb_start;
+ res->end = tegra_fb_start + tegra_fb_size - 1;
+
+ res = nvhost_get_resource_byname(&ventana_disp2_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb2_start;
+ res->end = tegra_fb2_start + tegra_fb2_size - 1;
+#endif
+
+ /* Copy the bootloader fb to the fb. */
+ tegra_move_framebuffer(tegra_fb_start, tegra_bootloader_fb_start,
+ min(tegra_fb_size, tegra_bootloader_fb_size));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ if (!err)
+ err = nvhost_device_register(&ventana_disp1_device);
+
+ if (!err)
+ err = nvhost_device_register(&ventana_disp2_device);
+#endif
+
+ return err;
+}
+
diff --git a/arch/arm/mach-tegra/board-ventana-pinmux.c b/arch/arm/mach-tegra/board-ventana-pinmux.c
new file mode 100644
index 000000000000..9f3447264599
--- /dev/null
+++ b/arch/arm/mach-tegra/board-ventana-pinmux.c
@@ -0,0 +1,194 @@
+/*
+ * arch/arm/mach-tegra/board-ventana-pinmux.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <mach/pinmux.h>
+
+#include "board-ventana.h"
+#include "gpio-names.h"
+
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+#define SET_DRIVE(_name, _hsm, _schmitt, _drive, _pulldn_drive, _pullup_drive, _pulldn_slew, _pullup_slew) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_##_hsm, \
+ .schmitt = TEGRA_SCHMITT_##_schmitt, \
+ .drive = TEGRA_DRIVE_##_drive, \
+ .pull_down = TEGRA_PULL_##_pulldn_drive, \
+ .pull_up = TEGRA_PULL_##_pullup_drive, \
+ .slew_rising = TEGRA_SLEW_##_pulldn_slew, \
+ .slew_falling = TEGRA_SLEW_##_pullup_slew, \
+ }
+
+static __initdata struct tegra_drive_pingroup_config ventana_drive_pinmux[] = {
+ DEFAULT_DRIVE(DDC),
+ DEFAULT_DRIVE(VI1),
+ DEFAULT_DRIVE(SDIO1),
+
+ SET_DRIVE(DBG, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+ SET_DRIVE(VI2, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+ SET_DRIVE(AT1, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+ SET_DRIVE(AO1, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+};
+
+static __initdata struct tegra_pingroup_config ventana_pinmux[] = {
+ {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDC, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTE, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_GME, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LCSN, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LDC, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM0, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM1, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW1, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSCK, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDA, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDI, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP0, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDB, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXK, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+static struct tegra_gpio_table gpio_table[] = {
+ { .gpio = TEGRA_GPIO_CDC_IRQ, .enable = true },
+ { .gpio = TEGRA_GPIO_HP_DET, .enable = true },
+ { .gpio = TEGRA_GPIO_INT_MIC_EN, .enable = true },
+ { .gpio = TEGRA_GPIO_EXT_MIC_EN, .enable = true },
+};
+
+int __init ventana_pinmux_init(void)
+{
+ tegra_pinmux_config_table(ventana_pinmux, ARRAY_SIZE(ventana_pinmux));
+ tegra_drive_pinmux_config_table(ventana_drive_pinmux,
+ ARRAY_SIZE(ventana_drive_pinmux));
+
+ tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-ventana-power.c b/arch/arm/mach-tegra/board-ventana-power.c
new file mode 100644
index 000000000000..6d8ea8db3894
--- /dev/null
+++ b/arch/arm/mach-tegra/board-ventana-power.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2010-2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/power/gpio-charger.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include <generated/mach-types.h>
+
+#include "gpio-names.h"
+#include "fuse.h"
+#include "pm.h"
+#include "wakeups-t2.h"
+#include "board.h"
+#include "board-ventana.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_INTR_LOW (1 << 17)
+
+#define CHARGING_DISABLE TEGRA_GPIO_PR6
+
+int __init ventana_charge_init(void)
+{
+ gpio_request(CHARGING_DISABLE, "chg_disable");
+ gpio_direction_output(CHARGING_DISABLE, 0);
+ tegra_gpio_enable(CHARGING_DISABLE);
+ return 0;
+}
+
+static struct regulator_consumer_supply tps658621_sm0_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+};
+static struct regulator_consumer_supply tps658621_sm1_supply[] = {
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+};
+static struct regulator_consumer_supply tps658621_sm2_supply[] = {
+ REGULATOR_SUPPLY("vdd_sm2", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo0_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo0", NULL),
+ REGULATOR_SUPPLY("p_cam_avdd", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo1_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo1", NULL),
+ REGULATOR_SUPPLY("avdd_pll", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo2_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo2", NULL),
+ REGULATOR_SUPPLY("vdd_rtc", NULL),
+ REGULATOR_SUPPLY("vdd_aon", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo3_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo3", NULL),
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo4_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo4", NULL),
+ REGULATOR_SUPPLY("avdd_osc", NULL),
+ REGULATOR_SUPPLY("vddio_sys", "panjit_touch"),
+};
+static struct regulator_consumer_supply tps658621_ldo5_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo5", NULL),
+ REGULATOR_SUPPLY("vmmc", "sdhci-tegra.3"),
+};
+static struct regulator_consumer_supply tps658621_ldo6_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo6", NULL),
+ REGULATOR_SUPPLY("vcsi", "tegra_camera"),
+ REGULATOR_SUPPLY("vdd_dmic", "tegra-snd-wm8903"),
+ REGULATOR_SUPPLY("vdd_i2c", "3-0030"),
+ REGULATOR_SUPPLY("vdd_i2c", "6-0072"),
+ REGULATOR_SUPPLY("vdd_i2c", "7-0072"),
+};
+static struct regulator_consumer_supply tps658621_ldo7_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo7", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi", NULL),
+ REGULATOR_SUPPLY("vdd_fuse", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo8_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo8", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi_pll", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo9_supply[] = {
+ REGULATOR_SUPPLY("vdd_ldo9", NULL),
+ REGULATOR_SUPPLY("avdd_2v85", NULL),
+ REGULATOR_SUPPLY("vdd_ddr_rx", NULL),
+ REGULATOR_SUPPLY("vdd_spk_amp", "tegra-snd-wm8903"),
+};
+
+static struct tps6586x_settings sm0_config = {
+ .sm_pwm_mode = PWM_DEFAULT_VALUE,
+ .slew_rate = SLEW_RATE_3520UV_PER_SEC,
+};
+
+static struct tps6586x_settings sm1_config = {
+ /*
+ * Current TPS6586x is known for having a voltage glitch if current load
+ * changes from low to high in auto PWM/PFM mode for CPU's Vdd line.
+ */
+ .sm_pwm_mode = PWM_ONLY,
+ .slew_rate = SLEW_RATE_3520UV_PER_SEC,
+};
+
+#define REGULATOR_INIT(_id, _minmv, _maxmv, on, config) \
+ { \
+ .constraints = { \
+ .min_uV = (_minmv)*1000, \
+ .max_uV = (_maxmv)*1000, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = on, \
+ .apply_uV = 1, \
+ }, \
+ .num_consumer_supplies = ARRAY_SIZE(tps658621_##_id##_supply),\
+ .consumer_supplies = tps658621_##_id##_supply, \
+ .driver_data = config, \
+ }
+
+#define ON 1
+#define OFF 0
+
+static struct regulator_init_data sm0_data = REGULATOR_INIT(sm0, 725, 1500, ON, &sm0_config);
+static struct regulator_init_data sm1_data = REGULATOR_INIT(sm1, 725, 1500, ON, &sm1_config);
+static struct regulator_init_data sm2_data = REGULATOR_INIT(sm2, 3000, 4550, ON, NULL);
+static struct regulator_init_data ldo0_data = REGULATOR_INIT(ldo0, 1250, 3300, OFF, NULL);
+static struct regulator_init_data ldo1_data = REGULATOR_INIT(ldo1, 725, 1500, ON, NULL);
+static struct regulator_init_data ldo2_data = REGULATOR_INIT(ldo2, 725, 1500, OFF, NULL);
+static struct regulator_init_data ldo3_data = REGULATOR_INIT(ldo3, 1250, 3300, OFF, NULL);
+static struct regulator_init_data ldo4_data = REGULATOR_INIT(ldo4, 1700, 2475, ON, NULL);
+static struct regulator_init_data ldo5_data = REGULATOR_INIT(ldo5, 1250, 3300, ON, NULL);
+static struct regulator_init_data ldo6_data = REGULATOR_INIT(ldo6, 1800, 1800, OFF, NULL);
+static struct regulator_init_data ldo7_data = REGULATOR_INIT(ldo7, 1250, 3300, OFF, NULL);
+static struct regulator_init_data ldo8_data = REGULATOR_INIT(ldo8, 1250, 3300, OFF, NULL);
+static struct regulator_init_data ldo9_data = REGULATOR_INIT(ldo9, 1250, 3300, OFF, NULL);
+
+static struct tps6586x_rtc_platform_data rtc_data = {
+ .irq = TEGRA_NR_IRQS + TPS6586X_INT_RTC_ALM1,
+ .start = {
+ .year = 2009,
+ .month = 1,
+ .day = 1,
+ },
+ .cl_sel = TPS6586X_RTC_CL_SEL_1_5PF /* use lowest (external 20pF cap) */
+};
+
+#define TPS_REG(_id, _data) \
+ { \
+ .id = TPS6586X_ID_##_id, \
+ .name = "tps6586x-regulator", \
+ .platform_data = _data, \
+ }
+
+static struct tps6586x_subdev_info tps_devs[] = {
+ TPS_REG(SM_0, &sm0_data),
+ TPS_REG(SM_1, &sm1_data),
+ TPS_REG(SM_2, &sm2_data),
+ TPS_REG(LDO_0, &ldo0_data),
+ TPS_REG(LDO_1, &ldo1_data),
+ TPS_REG(LDO_2, &ldo2_data),
+ TPS_REG(LDO_3, &ldo3_data),
+ TPS_REG(LDO_4, &ldo4_data),
+ TPS_REG(LDO_5, &ldo5_data),
+ TPS_REG(LDO_6, &ldo6_data),
+ TPS_REG(LDO_7, &ldo7_data),
+ TPS_REG(LDO_8, &ldo8_data),
+ TPS_REG(LDO_9, &ldo9_data),
+ {
+ .id = 0,
+ .name = "tps6586x-rtc",
+ .platform_data = &rtc_data,
+ },
+};
+
+static struct tps6586x_platform_data tps_platform = {
+ .irq_base = TPS6586X_INT_BASE,
+ .num_subdevs = ARRAY_SIZE(tps_devs),
+ .subdevs = tps_devs,
+ .gpio_base = TPS6586X_GPIO_BASE,
+};
+
+static struct i2c_board_info __initdata ventana_regulators[] = {
+ {
+ I2C_BOARD_INFO("tps6586x", 0x34),
+ .irq = INT_EXTERNAL_PMU,
+ .platform_data = &tps_platform,
+ },
+};
+
+static void ventana_board_suspend(int lp_state, enum suspend_stage stg)
+{
+ if ((lp_state == TEGRA_SUSPEND_LP1) && (stg == TEGRA_SUSPEND_BEFORE_CPU))
+ tegra_console_uart_suspend();
+}
+
+static void ventana_board_resume(int lp_state, enum resume_stage stg)
+{
+ if ((lp_state == TEGRA_SUSPEND_LP1) && (stg == TEGRA_RESUME_AFTER_CPU))
+ tegra_console_uart_resume();
+}
+
+static struct tegra_suspend_platform_data ventana_suspend_data = {
+ /*
+ * Check power on time and crystal oscillator start time
+ * for appropriate settings.
+ */
+ .cpu_timer = 2000,
+ .cpu_off_timer = 100,
+ .suspend_mode = TEGRA_SUSPEND_LP0,
+ .core_timer = 0x7e7e,
+ .core_off_timer = 0xf,
+ .corereq_high = false,
+ .sysclkreq_high = true,
+ .board_suspend = ventana_board_suspend,
+ .board_resume = ventana_board_resume,
+};
+
+int __init ventana_regulator_init(void)
+{
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ void __iomem *chip_id = IO_ADDRESS(TEGRA_APB_MISC_BASE) + 0x804;
+ u32 pmc_ctrl;
+ u32 minor;
+
+ minor = (readl(chip_id) >> 16) & 0xf;
+ /* A03 (but not A03p) chips do not support LP0 */
+ if (minor == 3 && !(tegra_spare_fuse(18) || tegra_spare_fuse(19)))
+ ventana_suspend_data.suspend_mode = TEGRA_SUSPEND_LP1;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+
+ i2c_register_board_info(4, ventana_regulators, 1);
+
+// regulator_has_full_constraints();
+
+ tegra_init_suspend(&ventana_suspend_data);
+
+ return 0;
+}
+
+static char *ventana_battery[] = {
+ "battery",
+};
+
+static struct gpio_charger_platform_data ventana_charger_pdata = {
+ .name = "ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .gpio = AC_PRESENT_GPIO,
+ .gpio_active_low = 1,
+ .supplied_to = ventana_battery,
+ .num_supplicants = ARRAY_SIZE(ventana_battery),
+};
+
+static struct platform_device ventana_charger_device = {
+ .name = "gpio-charger",
+ .dev = {
+ .platform_data = &ventana_charger_pdata,
+ },
+};
+
+int __init ventana_charger_init(void)
+{
+ tegra_gpio_enable(AC_PRESENT_GPIO);
+ platform_device_register(&ventana_charger_device);
+ return 0;
+}
+
+static int __init ventana_pcie_init(void)
+{
+ int ret;
+
+ if (!machine_is_ventana())
+ return 0;
+
+ ret = gpio_request(TPS6586X_GPIO_BASE, "pcie_vdd");
+ if (ret < 0)
+ goto fail;
+
+ ret = gpio_direction_output(TPS6586X_GPIO_BASE, 1);
+ if (ret < 0)
+ goto fail;
+
+ gpio_export(TPS6586X_GPIO_BASE, false);
+ return 0;
+
+fail:
+ pr_err("%s: gpio_request failed #%d\n", __func__, TPS6586X_GPIO_BASE);
+ gpio_free(TPS6586X_GPIO_BASE);
+ return ret;
+}
+
+late_initcall(ventana_pcie_init);
diff --git a/arch/arm/mach-tegra/board-ventana-sdhci.c b/arch/arm/mach-tegra/board-ventana-sdhci.c
new file mode 100644
index 000000000000..d6ec6edb0b0f
--- /dev/null
+++ b/arch/arm/mach-tegra/board-ventana-sdhci.c
@@ -0,0 +1,266 @@
+/*
+ * arch/arm/mach-tegra/board-harmony-sdhci.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/wlan_plat.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/sdhci.h>
+
+#include "gpio-names.h"
+#include "board.h"
+
+#define VENTANA_WLAN_PWR TEGRA_GPIO_PK5
+#define VENTANA_WLAN_RST TEGRA_GPIO_PK6
+#define VENTANA_WLAN_WOW TEGRA_GPIO_PS0
+
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+static int ventana_wifi_status_register(void (*callback)(int , void *), void *);
+static struct clk *wifi_32k_clk;
+
+static int ventana_wifi_reset(int on);
+static int ventana_wifi_power(int on);
+static int ventana_wifi_set_carddetect(int val);
+
+static struct wifi_platform_data ventana_wifi_control = {
+ .set_power = ventana_wifi_power,
+ .set_reset = ventana_wifi_reset,
+ .set_carddetect = ventana_wifi_set_carddetect,
+};
+
+static struct resource wifi_resource[] = {
+ [0] = {
+ .name = "bcm4329_wlan_irq",
+ .start = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS0),
+ .end = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS0),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct platform_device ventana_wifi_device = {
+ .name = "bcm4329_wlan",
+ .id = 1,
+ .num_resources = 1,
+ .resource = wifi_resource,
+ .dev = {
+ .platform_data = &ventana_wifi_control,
+ },
+};
+
+static struct resource sdhci_resource0[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct embedded_sdio_data embedded_sdio_data0 = {
+ .cccr = {
+ .sdio_vsn = 2,
+ .multi_block = 1,
+ .low_speed = 0,
+ .wide_bus = 0,
+ .high_power = 1,
+ .high_speed = 1,
+ },
+ .cis = {
+ .vendor = 0x02d0,
+ .device = 0x4329,
+ },
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data0 = {
+ .mmc_data = {
+ .register_status_notify = ventana_wifi_status_register,
+ .embedded_sdio = &embedded_sdio_data0,
+ .built_in = 1,
+ },
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = {
+ .cd_gpio = TEGRA_GPIO_PI5,
+ .wp_gpio = TEGRA_GPIO_PH1,
+ .power_gpio = TEGRA_GPIO_PT3,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data3 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = TEGRA_GPIO_PI6,
+ .mmc_data = {
+ .built_in = 1,
+ }
+};
+
+static struct platform_device tegra_sdhci_device0 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource0,
+ .num_resources = ARRAY_SIZE(sdhci_resource0),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data0,
+ },
+};
+
+static struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data2,
+ },
+};
+
+static struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data3,
+ },
+};
+
+static int ventana_wifi_status_register(
+ void (*callback)(int card_present, void *dev_id),
+ void *dev_id)
+{
+ if (wifi_status_cb)
+ return -EAGAIN;
+ wifi_status_cb = callback;
+ wifi_status_cb_devid = dev_id;
+ return 0;
+}
+
+static int ventana_wifi_set_carddetect(int val)
+{
+ pr_debug("%s: %d\n", __func__, val);
+ if (wifi_status_cb)
+ wifi_status_cb(val, wifi_status_cb_devid);
+ else
+ pr_warning("%s: Nobody to notify\n", __func__);
+ return 0;
+}
+
+static int ventana_wifi_power(int on)
+{
+ pr_debug("%s: %d\n", __func__, on);
+
+ gpio_set_value(VENTANA_WLAN_PWR, on);
+ mdelay(100);
+ gpio_set_value(VENTANA_WLAN_RST, on);
+ mdelay(200);
+
+ if (on)
+ clk_enable(wifi_32k_clk);
+ else
+ clk_disable(wifi_32k_clk);
+
+ return 0;
+}
+
+static int ventana_wifi_reset(int on)
+{
+ pr_debug("%s: do nothing\n", __func__);
+ return 0;
+}
+
+static int __init ventana_wifi_init(void)
+{
+ wifi_32k_clk = clk_get_sys(NULL, "blink");
+ if (IS_ERR(wifi_32k_clk)) {
+ pr_err("%s: unable to get blink clock\n", __func__);
+ return PTR_ERR(wifi_32k_clk);
+ }
+
+ gpio_request(VENTANA_WLAN_PWR, "wlan_power");
+ gpio_request(VENTANA_WLAN_RST, "wlan_rst");
+ gpio_request(VENTANA_WLAN_WOW, "bcmsdh_sdmmc");
+
+ tegra_gpio_enable(VENTANA_WLAN_PWR);
+ tegra_gpio_enable(VENTANA_WLAN_RST);
+ tegra_gpio_enable(VENTANA_WLAN_WOW);
+
+ gpio_direction_output(VENTANA_WLAN_PWR, 0);
+ gpio_direction_output(VENTANA_WLAN_RST, 0);
+ gpio_direction_input(VENTANA_WLAN_WOW);
+
+ platform_device_register(&ventana_wifi_device);
+
+ device_init_wakeup(&ventana_wifi_device.dev, 1);
+ device_set_wakeup_enable(&ventana_wifi_device.dev, 0);
+
+ return 0;
+}
+int __init ventana_sdhci_init(void)
+{
+ tegra_gpio_enable(tegra_sdhci_platform_data2.power_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data2.cd_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data2.wp_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data3.power_gpio);
+
+ platform_device_register(&tegra_sdhci_device3);
+ platform_device_register(&tegra_sdhci_device2);
+ platform_device_register(&tegra_sdhci_device0);
+
+ ventana_wifi_init();
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-ventana-sensors.c b/arch/arm/mach-tegra/board-ventana-sensors.c
new file mode 100644
index 000000000000..c9c2f5441612
--- /dev/null
+++ b/arch/arm/mach-tegra/board-ventana-sensors.c
@@ -0,0 +1,573 @@
+/*
+ * arch/arm/mach-tegra/board-ventana-sensors.c
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/mpu.h>
+#include <linux/i2c/pca954x.h>
+#include <linux/i2c/pca953x.h>
+#include <linux/nct1008.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/gpio.h>
+
+#include <media/ov5650.h>
+#include <media/ov2710.h>
+#include <media/sh532u.h>
+#include <media/ssl3250a.h>
+#include <generated/mach-types.h>
+
+#include "gpio-names.h"
+#include "board.h"
+#include "board-ventana.h"
+#include "cpu-tegra.h"
+
+#define ISL29018_IRQ_GPIO TEGRA_GPIO_PZ2
+#define AKM8975_IRQ_GPIO TEGRA_GPIO_PN5
+#define CAMERA_POWER_GPIO TEGRA_GPIO_PV4
+#define CAMERA_CSI_MUX_SEL_GPIO TEGRA_GPIO_PBB4
+#define CAMERA_FLASH_ACT_GPIO TEGRA_GPIO_PD2
+#define NCT1008_THERM2_GPIO TEGRA_GPIO_PN6
+
+static int ventana_camera_init(void)
+{
+ int err;
+
+ tegra_gpio_enable(CAMERA_POWER_GPIO);
+ gpio_request(CAMERA_POWER_GPIO, "camera_power_en");
+ gpio_direction_output(CAMERA_POWER_GPIO, 1);
+ gpio_export(CAMERA_POWER_GPIO, false);
+
+ tegra_gpio_enable(CAMERA_CSI_MUX_SEL_GPIO);
+ gpio_request(CAMERA_CSI_MUX_SEL_GPIO, "camera_csi_sel");
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 0);
+ gpio_export(CAMERA_CSI_MUX_SEL_GPIO, false);
+
+ err = gpio_request(CAMERA_FLASH_ACT_GPIO, "torch_gpio_act");
+ if (err < 0) {
+ pr_err("gpio_request failed for gpio %d\n",
+ CAMERA_FLASH_ACT_GPIO);
+ } else {
+ tegra_gpio_enable(CAMERA_FLASH_ACT_GPIO);
+ gpio_direction_output(CAMERA_FLASH_ACT_GPIO, 0);
+ gpio_export(CAMERA_FLASH_ACT_GPIO, false);
+ }
+ return 0;
+}
+
+/* left ov5650 is CAM2 which is on csi_a */
+static int ventana_left_ov5650_power_on(void)
+{
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 0);
+ gpio_direction_output(AVDD_DSI_CSI_ENB_GPIO, 1);
+ gpio_direction_output(CAM2_LDO_SHUTDN_L_GPIO, 1);
+ mdelay(5);
+ gpio_direction_output(CAM2_PWR_DN_GPIO, 0);
+ mdelay(5);
+ gpio_direction_output(CAM2_RST_L_GPIO, 0);
+ mdelay(1);
+ gpio_direction_output(CAM2_RST_L_GPIO, 1);
+ mdelay(20);
+ return 0;
+}
+
+static int ventana_left_ov5650_power_off(void)
+{
+ gpio_direction_output(AVDD_DSI_CSI_ENB_GPIO, 0);
+ gpio_direction_output(CAM2_RST_L_GPIO, 0);
+ gpio_direction_output(CAM2_PWR_DN_GPIO, 1);
+ gpio_direction_output(CAM2_LDO_SHUTDN_L_GPIO, 0);
+ return 0;
+}
+
+struct ov5650_platform_data ventana_left_ov5650_data = {
+ .power_on = ventana_left_ov5650_power_on,
+ .power_off = ventana_left_ov5650_power_off,
+};
+
+/* right ov5650 is CAM1 which is on csi_b */
+static int ventana_right_ov5650_power_on(void)
+{
+ gpio_direction_output(AVDD_DSI_CSI_ENB_GPIO, 1);
+ gpio_direction_output(CAM1_LDO_SHUTDN_L_GPIO, 1);
+ mdelay(5);
+ gpio_direction_output(CAM1_PWR_DN_GPIO, 0);
+ mdelay(5);
+ gpio_direction_output(CAM1_RST_L_GPIO, 0);
+ mdelay(1);
+ gpio_direction_output(CAM1_RST_L_GPIO, 1);
+ mdelay(20);
+ return 0;
+}
+
+static int ventana_right_ov5650_power_off(void)
+{
+ gpio_direction_output(AVDD_DSI_CSI_ENB_GPIO, 0);
+ gpio_direction_output(CAM1_RST_L_GPIO, 0);
+ gpio_direction_output(CAM1_PWR_DN_GPIO, 1);
+ gpio_direction_output(CAM1_LDO_SHUTDN_L_GPIO, 0);
+ return 0;
+}
+
+struct ov5650_platform_data ventana_right_ov5650_data = {
+ .power_on = ventana_right_ov5650_power_on,
+ .power_off = ventana_right_ov5650_power_off,
+};
+
+static int ventana_ov2710_power_on(void)
+{
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 1);
+ gpio_direction_output(AVDD_DSI_CSI_ENB_GPIO, 1);
+ gpio_direction_output(CAM3_LDO_SHUTDN_L_GPIO, 1);
+ mdelay(5);
+ gpio_direction_output(CAM3_PWR_DN_GPIO, 0);
+ mdelay(5);
+ gpio_direction_output(CAM3_RST_L_GPIO, 0);
+ mdelay(1);
+ gpio_direction_output(CAM3_RST_L_GPIO, 1);
+ mdelay(20);
+ return 0;
+}
+
+static int ventana_ov2710_power_off(void)
+{
+ gpio_direction_output(CAM3_RST_L_GPIO, 0);
+ gpio_direction_output(CAM3_PWR_DN_GPIO, 1);
+ gpio_direction_output(CAM3_LDO_SHUTDN_L_GPIO, 0);
+ gpio_direction_output(AVDD_DSI_CSI_ENB_GPIO, 0);
+ gpio_direction_output(CAMERA_CSI_MUX_SEL_GPIO, 0);
+ return 0;
+}
+
+struct ov2710_platform_data ventana_ov2710_data = {
+ .power_on = ventana_ov2710_power_on,
+ .power_off = ventana_ov2710_power_off,
+};
+
+
+static struct sh532u_platform_data sh532u_left_pdata = {
+ .num = 1,
+ .sync = 2,
+ .dev_name = "focuser",
+ .gpio_reset = CAM2_RST_L_GPIO,
+ .gpio_en = CAM2_LDO_SHUTDN_L_GPIO,
+};
+
+static struct sh532u_platform_data sh532u_right_pdata = {
+ .num = 2,
+ .sync = 1,
+ .dev_name = "focuser",
+ .gpio_reset = CAM1_RST_L_GPIO,
+ .gpio_en = CAM1_LDO_SHUTDN_L_GPIO,
+};
+
+
+static struct nvc_torch_pin_state ventana_ssl3250a_pinstate = {
+ .mask = 0x0040, /* VGP6 */
+ .values = 0x0040,
+};
+
+static struct ssl3250a_platform_data ventana_ssl3250a_pdata = {
+ .dev_name = "torch",
+ .pinstate = &ventana_ssl3250a_pinstate,
+ .gpio_act = CAMERA_FLASH_ACT_GPIO,
+};
+
+
+static void ventana_isl29018_init(void)
+{
+ tegra_gpio_enable(ISL29018_IRQ_GPIO);
+ gpio_request(ISL29018_IRQ_GPIO, "isl29018");
+ gpio_direction_input(ISL29018_IRQ_GPIO);
+}
+
+#ifdef CONFIG_SENSORS_AK8975
+static void ventana_akm8975_init(void)
+{
+ tegra_gpio_enable(AKM8975_IRQ_GPIO);
+ gpio_request(AKM8975_IRQ_GPIO, "akm8975");
+ gpio_direction_input(AKM8975_IRQ_GPIO);
+}
+#endif
+
+static void ventana_nct1008_init(void)
+{
+ tegra_gpio_enable(NCT1008_THERM2_GPIO);
+ gpio_request(NCT1008_THERM2_GPIO, "temp_alert");
+ gpio_direction_input(NCT1008_THERM2_GPIO);
+}
+
+static struct nct1008_platform_data ventana_nct1008_pdata = {
+ .supported_hwrev = true,
+ .ext_range = false,
+ .conv_rate = 0x08,
+ .offset = 0,
+ .hysteresis = 0,
+ .shutdown_ext_limit = 115,
+ .shutdown_local_limit = 120,
+ .throttling_ext_limit = 90,
+ .alarm_fn = tegra_throttling_enable,
+};
+
+static const struct i2c_board_info ventana_i2c0_board_info[] = {
+ {
+ I2C_BOARD_INFO("isl29018", 0x44),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PZ2),
+ },
+};
+
+static const struct i2c_board_info ventana_i2c2_board_info[] = {
+ {
+ I2C_BOARD_INFO("bq20z75", 0x0B),
+ },
+};
+
+static struct pca953x_platform_data ventana_tca6416_data = {
+ .gpio_base = TEGRA_NR_GPIOS + 4, /* 4 gpios are already requested by tps6586x */
+};
+
+static struct pca954x_platform_mode ventana_pca9546_modes[] = {
+ { .adap_id = 6, .deselect_on_exit = 1 }, /* REAR CAM1 */
+ { .adap_id = 7, .deselect_on_exit = 1 }, /* REAR CAM2 */
+ { .adap_id = 8, .deselect_on_exit = 1 }, /* FRONT CAM3 */
+};
+
+static struct pca954x_platform_data ventana_pca9546_data = {
+ .modes = ventana_pca9546_modes,
+ .num_modes = ARRAY_SIZE(ventana_pca9546_modes),
+};
+
+static const struct i2c_board_info ventana_i2c3_board_info_tca6416[] = {
+ {
+ I2C_BOARD_INFO("tca6416", 0x20),
+ .platform_data = &ventana_tca6416_data,
+ },
+};
+
+static const struct i2c_board_info ventana_i2c3_board_info_pca9546[] = {
+ {
+ I2C_BOARD_INFO("pca9546", 0x70),
+ .platform_data = &ventana_pca9546_data,
+ },
+};
+
+static const struct i2c_board_info ventana_i2c3_board_info_ssl3250a[] = {
+ {
+ I2C_BOARD_INFO("ssl3250a", 0x30),
+ .platform_data = &ventana_ssl3250a_pdata,
+ },
+};
+
+static struct i2c_board_info ventana_i2c4_board_info[] = {
+ {
+ I2C_BOARD_INFO("nct1008", 0x4C),
+ .irq = TEGRA_GPIO_TO_IRQ(NCT1008_THERM2_GPIO),
+ .platform_data = &ventana_nct1008_pdata,
+ },
+
+#ifdef CONFIG_SENSORS_AK8975
+ {
+ I2C_BOARD_INFO("akm8975", 0x0C),
+ .irq = TEGRA_GPIO_TO_IRQ(AKM8975_IRQ_GPIO),
+ },
+#endif
+};
+
+static struct i2c_board_info ventana_i2c6_board_info[] = {
+ {
+ I2C_BOARD_INFO("ov5650R", 0x36),
+ .platform_data = &ventana_right_ov5650_data,
+ },
+ {
+ I2C_BOARD_INFO("sh532u", 0x72),
+ .platform_data = &sh532u_right_pdata,
+ },
+};
+
+static struct i2c_board_info ventana_i2c7_board_info[] = {
+ {
+ I2C_BOARD_INFO("ov5650L", 0x36),
+ .platform_data = &ventana_left_ov5650_data,
+ },
+ {
+ I2C_BOARD_INFO("sh532u", 0x72),
+ .platform_data = &sh532u_left_pdata,
+ },
+};
+
+static struct i2c_board_info ventana_i2c8_board_info[] = {
+ {
+ I2C_BOARD_INFO("ov2710", 0x36),
+ .platform_data = &ventana_ov2710_data,
+ },
+};
+
+#ifdef CONFIG_MPU_SENSORS_MPU3050
+static struct mpu_platform_data mpu3050_data = {
+ .int_config = 0x10,
+ .level_shifter = 0,
+ .orientation = MPU_GYRO_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct ext_slave_platform_data mpu3050_accel_data = {
+ .address = MPU_ACCEL_ADDR,
+ .irq = 0,
+ .adapt_num = MPU_ACCEL_BUS_NUM,
+ .bus = EXT_SLAVE_BUS_SECONDARY,
+ .orientation = MPU_ACCEL_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct ext_slave_platform_data mpu_compass_data = {
+ .address = MPU_COMPASS_ADDR,
+ .irq = 0,
+ .adapt_num = MPU_COMPASS_BUS_NUM,
+ .bus = EXT_SLAVE_BUS_PRIMARY,
+ .orientation = MPU_COMPASS_ORIENTATION, /* Located in board_[platformname].h */
+};
+
+static struct i2c_board_info __initdata inv_mpu_i2c2_board_info[] = {
+ {
+ I2C_BOARD_INFO(MPU_GYRO_NAME, MPU_GYRO_ADDR),
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_GYRO_IRQ_GPIO),
+ .platform_data = &mpu3050_data,
+ },
+ {
+ I2C_BOARD_INFO(MPU_ACCEL_NAME, MPU_ACCEL_ADDR),
+#if MPU_ACCEL_IRQ_GPIO
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_ACCEL_IRQ_GPIO),
+#endif
+ .platform_data = &mpu3050_accel_data,
+ },
+};
+
+static struct i2c_board_info __initdata inv_mpu_i2c4_board_info[] = {
+ {
+ I2C_BOARD_INFO(MPU_COMPASS_NAME, MPU_COMPASS_ADDR),
+#if MPU_COMPASS_IRQ_GPIO
+ .irq = TEGRA_GPIO_TO_IRQ(MPU_COMPASS_IRQ_GPIO),
+#endif
+ .platform_data = &mpu_compass_data,
+ },
+};
+
+static void mpuirq_init(void)
+{
+ int ret = 0;
+
+ pr_info("*** MPU START *** mpuirq_init...\n");
+
+#if MPU_ACCEL_IRQ_GPIO
+ /* ACCEL-IRQ assignment */
+ tegra_gpio_enable(MPU_ACCEL_IRQ_GPIO);
+ ret = gpio_request(MPU_ACCEL_IRQ_GPIO, MPU_ACCEL_NAME);
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed %d\n", __func__, ret);
+ return;
+ }
+
+ ret = gpio_direction_input(MPU_ACCEL_IRQ_GPIO);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_input failed %d\n", __func__, ret);
+ gpio_free(MPU_ACCEL_IRQ_GPIO);
+ return;
+ }
+#endif
+
+ /* MPU-IRQ assignment */
+ tegra_gpio_enable(MPU_GYRO_IRQ_GPIO);
+ ret = gpio_request(MPU_GYRO_IRQ_GPIO, MPU_GYRO_NAME);
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed %d\n", __func__, ret);
+ return;
+ }
+
+ ret = gpio_direction_input(MPU_GYRO_IRQ_GPIO);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_input failed %d\n", __func__, ret);
+ gpio_free(MPU_GYRO_IRQ_GPIO);
+ return;
+ }
+ pr_info("*** MPU END *** mpuirq_init...\n");
+
+ i2c_register_board_info(MPU_GYRO_BUS_NUM, inv_mpu_i2c2_board_info,
+ ARRAY_SIZE(inv_mpu_i2c2_board_info));
+ i2c_register_board_info(MPU_COMPASS_BUS_NUM, inv_mpu_i2c4_board_info,
+ ARRAY_SIZE(inv_mpu_i2c4_board_info));
+}
+#endif
+
+int __init ventana_sensors_init(void)
+{
+ struct board_info BoardInfo;
+
+ ventana_isl29018_init();
+#ifdef CONFIG_SENSORS_AK8975
+ ventana_akm8975_init();
+#endif
+#ifdef CONFIG_MPU_SENSORS_MPU3050
+ mpuirq_init();
+#endif
+ ventana_camera_init();
+ ventana_nct1008_init();
+
+ i2c_register_board_info(0, ventana_i2c0_board_info,
+ ARRAY_SIZE(ventana_i2c0_board_info));
+
+ tegra_get_board_info(&BoardInfo);
+
+ /*
+ * battery driver is supported on FAB.D boards and above only,
+ * since they have the necessary hardware rework
+ */
+ if (BoardInfo.sku > 0) {
+ i2c_register_board_info(2, ventana_i2c2_board_info,
+ ARRAY_SIZE(ventana_i2c2_board_info));
+ }
+
+ i2c_register_board_info(3, ventana_i2c3_board_info_ssl3250a,
+ ARRAY_SIZE(ventana_i2c3_board_info_ssl3250a));
+
+ i2c_register_board_info(4, ventana_i2c4_board_info,
+ ARRAY_SIZE(ventana_i2c4_board_info));
+
+ i2c_register_board_info(6, ventana_i2c6_board_info,
+ ARRAY_SIZE(ventana_i2c6_board_info));
+
+ i2c_register_board_info(7, ventana_i2c7_board_info,
+ ARRAY_SIZE(ventana_i2c7_board_info));
+
+ i2c_register_board_info(8, ventana_i2c8_board_info,
+ ARRAY_SIZE(ventana_i2c8_board_info));
+
+ return 0;
+}
+
+#ifdef CONFIG_TEGRA_CAMERA
+
+struct tegra_camera_gpios {
+ const char *name;
+ int gpio;
+ int enabled;
+};
+
+#define TEGRA_CAMERA_GPIO(_name, _gpio, _enabled) \
+ { \
+ .name = _name, \
+ .gpio = _gpio, \
+ .enabled = _enabled, \
+ }
+
+static struct tegra_camera_gpios ventana_camera_gpio_keys[] = {
+ [0] = TEGRA_CAMERA_GPIO("en_avdd_csi", AVDD_DSI_CSI_ENB_GPIO, 1),
+ [1] = TEGRA_CAMERA_GPIO("cam_i2c_mux_rst_lo", CAM_I2C_MUX_RST_GPIO, 1),
+
+ [2] = TEGRA_CAMERA_GPIO("cam2_ldo_shdn_lo", CAM2_LDO_SHUTDN_L_GPIO, 0),
+ [3] = TEGRA_CAMERA_GPIO("cam2_af_pwdn_lo", CAM2_AF_PWR_DN_L_GPIO, 0),
+ [4] = TEGRA_CAMERA_GPIO("cam2_pwdn", CAM2_PWR_DN_GPIO, 0),
+ [5] = TEGRA_CAMERA_GPIO("cam2_rst_lo", CAM2_RST_L_GPIO, 1),
+
+ [6] = TEGRA_CAMERA_GPIO("cam3_ldo_shdn_lo", CAM3_LDO_SHUTDN_L_GPIO, 0),
+ [7] = TEGRA_CAMERA_GPIO("cam3_af_pwdn_lo", CAM3_AF_PWR_DN_L_GPIO, 0),
+ [8] = TEGRA_CAMERA_GPIO("cam3_pwdn", CAM3_PWR_DN_GPIO, 0),
+ [9] = TEGRA_CAMERA_GPIO("cam3_rst_lo", CAM3_RST_L_GPIO, 1),
+
+ [10] = TEGRA_CAMERA_GPIO("cam1_ldo_shdn_lo", CAM1_LDO_SHUTDN_L_GPIO, 0),
+ [11] = TEGRA_CAMERA_GPIO("cam1_af_pwdn_lo", CAM1_AF_PWR_DN_L_GPIO, 0),
+ [12] = TEGRA_CAMERA_GPIO("cam1_pwdn", CAM1_PWR_DN_GPIO, 0),
+ [13] = TEGRA_CAMERA_GPIO("cam1_rst_lo", CAM1_RST_L_GPIO, 1),
+};
+
+int __init ventana_camera_late_init(void)
+{
+ int ret;
+ int i;
+ struct regulator *cam_ldo6 = NULL;
+
+ if (!machine_is_ventana())
+ return 0;
+
+ cam_ldo6 = regulator_get(NULL, "vdd_ldo6");
+ if (IS_ERR_OR_NULL(cam_ldo6)) {
+ pr_err("%s: Couldn't get regulator ldo6\n", __func__);
+ return PTR_ERR(cam_ldo6);
+ }
+
+ ret = regulator_enable(cam_ldo6);
+ if (ret){
+ pr_err("%s: Failed to enable ldo6\n", __func__);
+ goto fail_put_regulator;
+ }
+
+ i2c_new_device(i2c_get_adapter(3), ventana_i2c3_board_info_tca6416);
+
+ for (i = 0; i < ARRAY_SIZE(ventana_camera_gpio_keys); i++) {
+ ret = gpio_request(ventana_camera_gpio_keys[i].gpio,
+ ventana_camera_gpio_keys[i].name);
+ if (ret < 0) {
+ pr_err("%s: gpio_request failed for gpio #%d\n",
+ __func__, i);
+ goto fail_free_gpio;
+ }
+ gpio_direction_output(ventana_camera_gpio_keys[i].gpio,
+ ventana_camera_gpio_keys[i].enabled);
+ gpio_export(ventana_camera_gpio_keys[i].gpio, false);
+ }
+
+ i2c_new_device(i2c_get_adapter(3), ventana_i2c3_board_info_pca9546);
+
+ ventana_ov2710_power_off();
+ ventana_left_ov5650_power_off();
+ ventana_right_ov5650_power_off();
+
+ ret = regulator_disable(cam_ldo6);
+ if (ret){
+ pr_err("%s: Failed to disable ldo6\n", __func__);
+ goto fail_free_gpio;
+ }
+
+ regulator_put(cam_ldo6);
+ return 0;
+
+fail_free_gpio:
+ while (i--)
+ gpio_free(ventana_camera_gpio_keys[i].gpio);
+
+fail_put_regulator:
+ regulator_put(cam_ldo6);
+ return ret;
+}
+
+late_initcall(ventana_camera_late_init);
+
+#endif /* CONFIG_TEGRA_CAMERA */
diff --git a/arch/arm/mach-tegra/board-ventana.c b/arch/arm/mach-tegra/board-ventana.c
new file mode 100644
index 000000000000..e0ca2c65f143
--- /dev/null
+++ b/arch/arm/mach-tegra/board-ventana.c
@@ -0,0 +1,653 @@
+/*
+ * arch/arm/mach-tegra/board-ventana.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/i2c.h>
+#include <linux/i2c/panjit_ts.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/i2c-tegra.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/memblock.h>
+#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/tegra_uart.h>
+
+#include <sound/wm8903.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+#include <mach/i2s.h>
+#include <mach/tegra_wm8903_pdata.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/usb_phy.h>
+
+#include "board.h"
+#include "clock.h"
+#include "board-ventana.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "fuse.h"
+#include "wakeups-t2.h"
+#include "pm.h"
+
+static struct tegra_utmip_config utmi_phy_config[] = {
+ [0] = {
+ .hssync_start_delay = 9,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+ [1] = {
+ .hssync_start_delay = 9,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+};
+
+static struct tegra_ulpi_config ulpi_phy_config = {
+ .reset_gpio = TEGRA_GPIO_PG2,
+ .clk = "cdev2",
+};
+
+static struct resource ventana_bcm4329_rfkill_resources[] = {
+ {
+ .name = "bcm4329_nshutdown_gpio",
+ .start = TEGRA_GPIO_PU0,
+ .end = TEGRA_GPIO_PU0,
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct platform_device ventana_bcm4329_rfkill_device = {
+ .name = "bcm4329_rfkill",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ventana_bcm4329_rfkill_resources),
+ .resource = ventana_bcm4329_rfkill_resources,
+};
+
+static void __init ventana_bt_rfkill(void)
+{
+ /*Add Clock Resource*/
+ clk_add_alias("bcm4329_32k_clk", ventana_bcm4329_rfkill_device.name, \
+ "blink", NULL);
+ return;
+}
+
+static struct resource ventana_bluesleep_resources[] = {
+ [0] = {
+ .name = "gpio_host_wake",
+ .start = TEGRA_GPIO_PU6,
+ .end = TEGRA_GPIO_PU6,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .name = "gpio_ext_wake",
+ .start = TEGRA_GPIO_PU1,
+ .end = TEGRA_GPIO_PU1,
+ .flags = IORESOURCE_IO,
+ },
+ [2] = {
+ .name = "host_wake",
+ .start = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ .end = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ },
+};
+
+static struct platform_device ventana_bluesleep_device = {
+ .name = "bluesleep",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ventana_bluesleep_resources),
+ .resource = ventana_bluesleep_resources,
+};
+
+static void __init ventana_setup_bluesleep(void)
+{
+ platform_device_register(&ventana_bluesleep_device);
+ tegra_gpio_enable(TEGRA_GPIO_PU6);
+ tegra_gpio_enable(TEGRA_GPIO_PU1);
+ return;
+}
+
+static __initdata struct tegra_clk_init_table ventana_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "blink", "clk_32k", 32768, false},
+ { "pll_p_out4", "pll_p", 24000000, true },
+ { "pwm", "clk_32k", 32768, false},
+ { "i2s1", "pll_a_out0", 0, false},
+ { "i2s2", "pll_a_out0", 0, false},
+ { "spdif_out", "pll_a_out0", 0, false},
+ { NULL, NULL, 0, 0},
+};
+
+static struct tegra_ulpi_config ventana_ehci2_ulpi_phy_config = {
+ .reset_gpio = TEGRA_GPIO_PV1,
+ .clk = "cdev2",
+};
+
+static struct tegra_ehci_platform_data ventana_ehci2_ulpi_platform_data = {
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ .phy_config = &ventana_ehci2_ulpi_phy_config,
+ .phy_type = TEGRA_USB_PHY_TYPE_LINK_ULPI,
+};
+
+static struct tegra_i2c_platform_data ventana_i2c1_platform_data = {
+ .adapter_nr = 0,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .slave_addr = 0x00FC,
+ .scl_gpio = {TEGRA_GPIO_PC4, 0},
+ .sda_gpio = {TEGRA_GPIO_PC5, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static const struct tegra_pingroup_config i2c2_ddc = {
+ .pingroup = TEGRA_PINGROUP_DDC,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static const struct tegra_pingroup_config i2c2_gen2 = {
+ .pingroup = TEGRA_PINGROUP_PTA,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static struct tegra_i2c_platform_data ventana_i2c2_platform_data = {
+ .adapter_nr = 1,
+ .bus_count = 2,
+ .bus_clk_rate = { 100000, 10000 },
+ .bus_mux = { &i2c2_ddc, &i2c2_gen2 },
+ .bus_mux_len = { 1, 1 },
+ .slave_addr = 0x00FC,
+ .scl_gpio = {0, TEGRA_GPIO_PT5},
+ .sda_gpio = {0, TEGRA_GPIO_PT6},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data ventana_i2c3_platform_data = {
+ .adapter_nr = 3,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .slave_addr = 0x00FC,
+ .scl_gpio = {TEGRA_GPIO_PBB2, 0},
+ .sda_gpio = {TEGRA_GPIO_PBB3, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data ventana_dvc_platform_data = {
+ .adapter_nr = 4,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .is_dvc = true,
+ .scl_gpio = {TEGRA_GPIO_PZ6, 0},
+ .sda_gpio = {TEGRA_GPIO_PZ7, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct wm8903_platform_data ventana_wm8903_pdata = {
+ .irq_active_low = 0,
+ .micdet_cfg = 0,
+ .micdet_delay = 100,
+ .gpio_base = VENTANA_GPIO_WM8903(0),
+ .gpio_cfg = {
+ (WM8903_GPn_FN_DMIC_LR_CLK_OUTPUT << WM8903_GP1_FN_SHIFT),
+ (WM8903_GPn_FN_DMIC_LR_CLK_OUTPUT << WM8903_GP2_FN_SHIFT) |
+ WM8903_GP2_DIR,
+ 0,
+ WM8903_GPIO_NO_CONFIG,
+ WM8903_GPIO_NO_CONFIG,
+ },
+};
+
+static struct i2c_board_info __initdata wm8903_board_info = {
+ I2C_BOARD_INFO("wm8903", 0x1a),
+ .platform_data = &ventana_wm8903_pdata,
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_CDC_IRQ),
+};
+
+static void ventana_i2c_init(void)
+{
+ tegra_i2c_device1.dev.platform_data = &ventana_i2c1_platform_data;
+ tegra_i2c_device2.dev.platform_data = &ventana_i2c2_platform_data;
+ tegra_i2c_device3.dev.platform_data = &ventana_i2c3_platform_data;
+ tegra_i2c_device4.dev.platform_data = &ventana_dvc_platform_data;
+
+ platform_device_register(&tegra_i2c_device1);
+ platform_device_register(&tegra_i2c_device2);
+ platform_device_register(&tegra_i2c_device3);
+ platform_device_register(&tegra_i2c_device4);
+
+ i2c_register_board_info(0, &wm8903_board_info, 1);
+}
+static struct platform_device *ventana_uart_devices[] __initdata = {
+ &tegra_uartb_device,
+ &tegra_uartc_device,
+ &tegra_uartd_device,
+};
+
+static struct uart_clk_parent uart_parent_clk[] = {
+ [0] = {.name = "pll_p"},
+ [1] = {.name = "pll_m"},
+ [2] = {.name = "clk_m"},
+};
+
+static struct tegra_uart_platform_data ventana_uart_pdata;
+
+static void __init uart_debug_init(void)
+{
+ unsigned long rate;
+ struct clk *c;
+
+ /* UARTD is the debug port. */
+ pr_info("Selecting UARTD as the debug console\n");
+ ventana_uart_devices[2] = &debug_uartd_device;
+ debug_uart_port_base = ((struct plat_serial8250_port *)(
+ debug_uartd_device.dev.platform_data))->mapbase;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uartd");
+
+ /* Clock enable for the debug channel */
+ if (!IS_ERR_OR_NULL(debug_uart_clk)) {
+ rate = ((struct plat_serial8250_port *)(
+ debug_uartd_device.dev.platform_data))->uartclk;
+ pr_info("The debug console clock name is %s\n",
+ debug_uart_clk->name);
+ c = tegra_get_clock_by_name("pll_p");
+ if (IS_ERR_OR_NULL(c))
+ pr_err("Not getting the parent clock pll_p\n");
+ else
+ clk_set_parent(debug_uart_clk, c);
+
+ clk_enable(debug_uart_clk);
+ clk_set_rate(debug_uart_clk, rate);
+ } else {
+ pr_err("Not getting the clock %s for debug console\n",
+ debug_uart_clk->name);
+ }
+}
+
+static void __init ventana_uart_init(void)
+{
+ int i;
+ struct clk *c;
+
+ for (i = 0; i < ARRAY_SIZE(uart_parent_clk); ++i) {
+ c = tegra_get_clock_by_name(uart_parent_clk[i].name);
+ if (IS_ERR_OR_NULL(c)) {
+ pr_err("Not able to get the clock for %s\n",
+ uart_parent_clk[i].name);
+ continue;
+ }
+ uart_parent_clk[i].parent_clk = c;
+ uart_parent_clk[i].fixed_clk_rate = clk_get_rate(c);
+ }
+ ventana_uart_pdata.parent_clk_list = uart_parent_clk;
+ ventana_uart_pdata.parent_clk_count = ARRAY_SIZE(uart_parent_clk);
+ tegra_uartb_device.dev.platform_data = &ventana_uart_pdata;
+ tegra_uartc_device.dev.platform_data = &ventana_uart_pdata;
+ tegra_uartd_device.dev.platform_data = &ventana_uart_pdata;
+
+ /* Register low speed only if it is selected */
+ if (!is_tegra_debug_uartport_hs())
+ uart_debug_init();
+
+ platform_add_devices(ventana_uart_devices,
+ ARRAY_SIZE(ventana_uart_devices));
+}
+
+#ifdef CONFIG_KEYBOARD_GPIO
+#define GPIO_KEY(_id, _gpio, _iswake) \
+ { \
+ .code = _id, \
+ .gpio = TEGRA_GPIO_##_gpio, \
+ .active_low = 1, \
+ .desc = #_id, \
+ .type = EV_KEY, \
+ .wakeup = _iswake, \
+ .debounce_interval = 10, \
+ }
+
+static struct gpio_keys_button ventana_keys[] = {
+ [0] = GPIO_KEY(KEY_FIND, PQ3, 0),
+ [1] = GPIO_KEY(KEY_HOME, PQ1, 0),
+ [2] = GPIO_KEY(KEY_BACK, PQ2, 0),
+ [3] = GPIO_KEY(KEY_VOLUMEUP, PQ5, 0),
+ [4] = GPIO_KEY(KEY_VOLUMEDOWN, PQ4, 0),
+ [5] = GPIO_KEY(KEY_POWER, PV2, 1),
+ [6] = GPIO_KEY(KEY_MENU, PC7, 0),
+};
+
+#define PMC_WAKE_STATUS 0x14
+
+static int ventana_wakeup_key(void)
+{
+ unsigned long status =
+ readl(IO_ADDRESS(TEGRA_PMC_BASE) + PMC_WAKE_STATUS);
+
+ return status & TEGRA_WAKE_GPIO_PV2 ? KEY_POWER : KEY_RESERVED;
+}
+
+static struct gpio_keys_platform_data ventana_keys_platform_data = {
+ .buttons = ventana_keys,
+ .nbuttons = ARRAY_SIZE(ventana_keys),
+ .wakeup_key = ventana_wakeup_key,
+};
+
+static struct platform_device ventana_keys_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .dev = {
+ .platform_data = &ventana_keys_platform_data,
+ },
+};
+
+static void ventana_keys_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ventana_keys); i++)
+ tegra_gpio_enable(ventana_keys[i].gpio);
+}
+#endif
+
+static struct platform_device tegra_camera = {
+ .name = "tegra_camera",
+ .id = -1,
+};
+
+static struct tegra_wm8903_platform_data ventana_audio_pdata = {
+ .gpio_spkr_en = TEGRA_GPIO_SPKR_EN,
+ .gpio_hp_det = TEGRA_GPIO_HP_DET,
+ .gpio_hp_mute = -1,
+ .gpio_int_mic_en = TEGRA_GPIO_INT_MIC_EN,
+ .gpio_ext_mic_en = TEGRA_GPIO_EXT_MIC_EN,
+};
+
+static struct platform_device ventana_audio_device = {
+ .name = "tegra-snd-wm8903",
+ .id = 0,
+ .dev = {
+ .platform_data = &ventana_audio_pdata,
+ },
+};
+
+static struct platform_device *ventana_devices[] __initdata = {
+ &tegra_pmu_device,
+ &tegra_gart_device,
+ &tegra_aes_device,
+#ifdef CONFIG_KEYBOARD_GPIO
+ &ventana_keys_device,
+#endif
+ &tegra_wdt_device,
+ &tegra_avp_device,
+ &tegra_camera,
+ &tegra_i2s_device1,
+ &tegra_i2s_device2,
+ &tegra_spdif_device,
+ &tegra_das_device,
+ &spdif_dit_device,
+ &bluetooth_dit_device,
+ &ventana_bcm4329_rfkill_device,
+ &tegra_pcm_device,
+ &ventana_audio_device,
+};
+
+
+static struct mxt_platform_data atmel_mxt_info = {
+ .x_line = 27,
+ .y_line = 42,
+ .x_size = 768,
+ .y_size = 1366,
+ .blen = 0x20,
+ .threshold = 0x3C,
+ .voltage = 3300000,
+ .orient = MXT_ROTATED_90,
+ .irqflags = IRQF_TRIGGER_FALLING,
+};
+
+static struct i2c_board_info __initdata i2c_info[] = {
+ {
+ I2C_BOARD_INFO("atmel_mxt_ts", 0x5A),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV6),
+ .platform_data = &atmel_mxt_info,
+ },
+};
+
+static int __init ventana_touch_init_atmel(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PV6);
+ tegra_gpio_enable(TEGRA_GPIO_PQ7);
+
+ gpio_request(TEGRA_GPIO_PV6, "atmel-irq");
+ gpio_direction_input(TEGRA_GPIO_PV6);
+
+ gpio_request(TEGRA_GPIO_PQ7, "atmel-reset");
+ gpio_direction_output(TEGRA_GPIO_PQ7, 0);
+ msleep(1);
+ gpio_set_value(TEGRA_GPIO_PQ7, 1);
+ msleep(100);
+
+ i2c_register_board_info(0, i2c_info, 1);
+
+ return 0;
+}
+
+static struct panjit_i2c_ts_platform_data panjit_data = {
+ .gpio_reset = TEGRA_GPIO_PQ7,
+};
+
+static struct i2c_board_info __initdata ventana_i2c_bus1_touch_info[] = {
+ {
+ I2C_BOARD_INFO("panjit_touch", 0x3),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV6),
+ .platform_data = &panjit_data,
+ },
+};
+
+static int __init ventana_touch_init_panjit(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PV6);
+
+ tegra_gpio_enable(TEGRA_GPIO_PQ7);
+ i2c_register_board_info(0, ventana_i2c_bus1_touch_info, 1);
+
+ return 0;
+}
+
+static struct usb_phy_plat_data tegra_usb_phy_pdata[] = {
+ [0] = {
+ .instance = 0,
+ .vbus_irq = TPS6586X_INT_BASE + TPS6586X_INT_USB_DET,
+ .vbus_gpio = TEGRA_GPIO_PD0,
+ },
+ [1] = {
+ .instance = 1,
+ .vbus_gpio = -1,
+ },
+ [2] = {
+ .instance = 2,
+ .vbus_gpio = TEGRA_GPIO_PD3,
+ },
+};
+
+static struct tegra_ehci_platform_data tegra_ehci_pdata[] = {
+ [0] = {
+ .phy_config = &utmi_phy_config[0],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+ [1] = {
+ .phy_config = &ulpi_phy_config,
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ .phy_type = TEGRA_USB_PHY_TYPE_LINK_ULPI,
+ },
+ [2] = {
+ .phy_config = &utmi_phy_config[1],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ .hotplug = 1,
+ },
+};
+
+static struct tegra_otg_platform_data tegra_otg_pdata = {
+ .ehci_device = &tegra_ehci1_device,
+ .ehci_pdata = &tegra_ehci_pdata[0],
+};
+
+static int __init ventana_gps_init(void)
+{
+ struct clk *clk32 = clk_get_sys(NULL, "blink");
+ if (!IS_ERR(clk32)) {
+ clk_set_rate(clk32,clk32->parent->rate);
+ clk_enable(clk32);
+ }
+
+ tegra_gpio_enable(TEGRA_GPIO_PZ3);
+ return 0;
+}
+
+static void ventana_power_off(void)
+{
+ int ret;
+
+ ret = tps6586x_power_off();
+ if (ret)
+ pr_err("ventana: failed to power off\n");
+
+ while(1);
+}
+
+static void __init ventana_power_off_init(void)
+{
+ pm_power_off = ventana_power_off;
+}
+
+static void ventana_usb_init(void)
+{
+ tegra_usb_phy_init(tegra_usb_phy_pdata, ARRAY_SIZE(tegra_usb_phy_pdata));
+ /* OTG should be the first to be registered */
+ tegra_otg_device.dev.platform_data = &tegra_otg_pdata;
+ platform_device_register(&tegra_otg_device);
+
+ platform_device_register(&tegra_udc_device);
+ platform_device_register(&tegra_ehci2_device);
+
+ tegra_ehci3_device.dev.platform_data=&tegra_ehci_pdata[2];
+ platform_device_register(&tegra_ehci3_device);
+}
+
+static void __init tegra_ventana_init(void)
+{
+ struct board_info BoardInfo;
+
+ tegra_clk_init_from_table(ventana_clk_init_table);
+ ventana_pinmux_init();
+ ventana_i2c_init();
+ ventana_uart_init();
+ tegra_ehci2_device.dev.platform_data
+ = &ventana_ehci2_ulpi_platform_data;
+ platform_add_devices(ventana_devices, ARRAY_SIZE(ventana_devices));
+
+ ventana_sdhci_init();
+ ventana_charge_init();
+ ventana_regulator_init();
+ ventana_charger_init();
+
+ tegra_get_board_info(&BoardInfo);
+
+ /* boards with sku > 0 have atmel touch panels */
+ if (BoardInfo.sku) {
+ pr_info("Initializing Atmel touch driver\n");
+ ventana_touch_init_atmel();
+ } else {
+ pr_info("Initializing Panjit touch driver\n");
+ ventana_touch_init_panjit();
+ }
+
+#ifdef CONFIG_KEYBOARD_GPIO
+ ventana_keys_init();
+#endif
+
+ ventana_usb_init();
+ ventana_gps_init();
+ ventana_panel_init();
+ ventana_sensors_init();
+ ventana_bt_rfkill();
+ ventana_power_off_init();
+ ventana_emc_init();
+
+ ventana_setup_bluesleep();
+ tegra_release_bootloader_fb();
+}
+
+int __init tegra_ventana_protected_aperture_init(void)
+{
+ if (!machine_is_ventana())
+ return 0;
+
+ tegra_protected_aperture_init(tegra_grhost_aperture);
+ return 0;
+}
+late_initcall(tegra_ventana_protected_aperture_init);
+
+void __init tegra_ventana_reserve(void)
+{
+ if (memblock_reserve(0x0, 4096) < 0)
+ pr_warn("Cannot reserve first 4K of memory for safety\n");
+
+ tegra_reserve(SZ_256M, SZ_8M + SZ_1M, SZ_16M);
+}
+
+MACHINE_START(VENTANA, "ventana")
+ .boot_params = 0x00000100,
+ .map_io = tegra_map_common_io,
+ .reserve = tegra_ventana_reserve,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_ventana_init,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-ventana.h b/arch/arm/mach-tegra/board-ventana.h
new file mode 100644
index 000000000000..61b75363a19c
--- /dev/null
+++ b/arch/arm/mach-tegra/board-ventana.h
@@ -0,0 +1,89 @@
+/*
+ * arch/arm/mach-tegra/board-ventana.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_BOARD_VENTANA_H
+#define _MACH_TEGRA_BOARD_VENTANA_H
+
+int ventana_charge_init(void);
+int ventana_regulator_init(void);
+int ventana_sdhci_init(void);
+int ventana_pinmux_init(void);
+int ventana_panel_init(void);
+int ventana_sensors_init(void);
+int ventana_kbc_init(void);
+int ventana_emc_init(void);
+int ventana_charger_init(void);
+
+/* external gpios */
+
+/* TPS6586X gpios */
+#define TPS6586X_GPIO_BASE TEGRA_NR_GPIOS
+#define AVDD_DSI_CSI_ENB_GPIO (TPS6586X_GPIO_BASE + 1) /* gpio2 */
+
+/* TCA6416 gpios */
+#define TCA6416_GPIO_BASE (TEGRA_NR_GPIOS + 4)
+#define CAM1_PWR_DN_GPIO (TCA6416_GPIO_BASE + 0) /* gpio0 */
+#define CAM1_RST_L_GPIO (TCA6416_GPIO_BASE + 1) /* gpio1 */
+#define CAM1_AF_PWR_DN_L_GPIO (TCA6416_GPIO_BASE + 2) /* gpio2 */
+#define CAM1_LDO_SHUTDN_L_GPIO (TCA6416_GPIO_BASE + 3) /* gpio3 */
+#define CAM2_PWR_DN_GPIO (TCA6416_GPIO_BASE + 4) /* gpio4 */
+#define CAM2_RST_L_GPIO (TCA6416_GPIO_BASE + 5) /* gpio5 */
+#define CAM2_AF_PWR_DN_L_GPIO (TCA6416_GPIO_BASE + 6) /* gpio6 */
+#define CAM2_LDO_SHUTDN_L_GPIO (TCA6416_GPIO_BASE + 7) /* gpio7 */
+#define CAM3_PWR_DN_GPIO (TCA6416_GPIO_BASE + 8) /* gpio8 */
+#define CAM3_RST_L_GPIO (TCA6416_GPIO_BASE + 9) /* gpio9 */
+#define CAM3_AF_PWR_DN_L_GPIO (TCA6416_GPIO_BASE + 10) /* gpio10 */
+#define CAM3_LDO_SHUTDN_L_GPIO (TCA6416_GPIO_BASE + 11) /* gpio11 */
+#define CAM_I2C_MUX_RST_GPIO (TCA6416_GPIO_BASE + 15) /* gpio15 */
+#define TCA6416_GPIO_END (TCA6416_GPIO_BASE + 31)
+
+/* WM8903 GPIOs */
+#define VENTANA_GPIO_WM8903(_x_) (TCA6416_GPIO_END + 1 + (_x_))
+#define VENTANA_GPIO_WM8903_END VENTANA_GPIO_WM8903(4)
+
+/* Audio-related GPIOs */
+#define TEGRA_GPIO_CDC_IRQ TEGRA_GPIO_PX3
+#define TEGRA_GPIO_SPKR_EN VENTANA_GPIO_WM8903(2)
+#define TEGRA_GPIO_HP_DET TEGRA_GPIO_PW2
+#define TEGRA_GPIO_HP_DET TEGRA_GPIO_PW2
+#define TEGRA_GPIO_INT_MIC_EN TEGRA_GPIO_PX0
+#define TEGRA_GPIO_EXT_MIC_EN TEGRA_GPIO_PX1
+
+/* AC detect GPIO */
+#define AC_PRESENT_GPIO TEGRA_GPIO_PV3
+
+/* Interrupt numbers from external peripherals */
+#define TPS6586X_INT_BASE TEGRA_NR_IRQS
+#define TPS6586X_INT_END (TPS6586X_INT_BASE + 32)
+
+/* Invensense MPU Definitions */
+#define MPU_GYRO_NAME "mpu3050"
+#define MPU_GYRO_IRQ_GPIO TEGRA_GPIO_PZ4
+#define MPU_GYRO_ADDR 0x68
+#define MPU_GYRO_BUS_NUM 0
+#define MPU_GYRO_ORIENTATION { 0, -1, 0, -1, 0, 0, 0, 0, -1 }
+#define MPU_ACCEL_NAME "kxtf9"
+#define MPU_ACCEL_IRQ_GPIO 0 /* Disable ACCELIRQ: TEGRA_GPIO_PN4 */
+#define MPU_ACCEL_ADDR 0x0F
+#define MPU_ACCEL_BUS_NUM 0
+#define MPU_ACCEL_ORIENTATION { 0, -1, 0, -1, 0, 0, 0, 0, -1 }
+#define MPU_COMPASS_NAME "ak8975"
+#define MPU_COMPASS_IRQ_GPIO TEGRA_GPIO_PN5
+#define MPU_COMPASS_ADDR 0x0C
+#define MPU_COMPASS_BUS_NUM 4
+#define MPU_COMPASS_ORIENTATION { 1, 0, 0, 0, 1, 0, 0, 0, 1 }
+
+#endif
diff --git a/arch/arm/mach-tegra/board-whistler-baseband.c b/arch/arm/mach-tegra/board-whistler-baseband.c
new file mode 100644
index 000000000000..143d14a8721d
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-baseband.c
@@ -0,0 +1,230 @@
+/*
+ * arch/arm/mach-tegra/board-whistler-baseband.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/tegra_caif.h>
+#include <mach/tegra_usb_modem_power.h>
+
+#include "board.h"
+#include "board-whistler-baseband.h"
+
+static int baseband_phy_on(void);
+static int baseband_phy_off(void);
+static void baseband_phy_restore_start(void);
+static void baseband_phy_restore_end(void);
+
+static struct wake_lock mdm_wake_lock;
+
+static struct gpio modem_gpios[] = {
+ {MODEM_PWR_ON, GPIOF_OUT_INIT_LOW, "MODEM PWR ON"},
+ {MODEM_RESET, GPIOF_IN, "MODEM RESET"},
+ {BB_RST_OUT, GPIOF_IN, "BB RST OUT"},
+ {MDM2AP_ACK, GPIOF_IN, "MDM2AP_ACK"},
+ {AP2MDM_ACK2, GPIOF_OUT_INIT_HIGH, "AP2MDM ACK2"},
+ {AP2MDM_ACK, GPIOF_OUT_INIT_LOW, "AP2MDM ACK"},
+ {ULPI_STP, GPIOF_IN, "ULPI_STP"},
+ {ULPI_DIR, GPIOF_OUT_INIT_LOW, "ULPI_DIR"},
+ {ULPI_D0, GPIOF_OUT_INIT_LOW, "ULPI_D0"},
+ {ULPI_D1, GPIOF_OUT_INIT_LOW, "ULPI_D1"},
+};
+
+static __initdata struct tegra_pingroup_config whistler_null_ulpi_pinmux[] = {
+ {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL,
+ TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL,
+ TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL,
+ TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL,
+ TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_UARTA, TEGRA_PUPD_PULL_UP,
+ TEGRA_TRI_NORMAL},
+};
+
+static struct tegra_ulpi_trimmer e1219_trimmer = { 10, 1, 1, 1 };
+
+static struct tegra_ulpi_config ehci2_null_ulpi_phy_config = {
+ .trimmer = &e1219_trimmer,
+ .post_phy_on = baseband_phy_on,
+ .pre_phy_off = baseband_phy_off,
+ .phy_restore_start = baseband_phy_restore_start,
+ .phy_restore_end = baseband_phy_restore_end,
+ .phy_restore_gpio = MDM2AP_ACK,
+ .ulpi_dir_gpio = ULPI_DIR,
+ .ulpi_d0_gpio = ULPI_D0,
+ .ulpi_d1_gpio = ULPI_D1,
+};
+
+static struct tegra_ehci_platform_data ehci2_null_ulpi_platform_data = {
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 0,
+ .phy_config = &ehci2_null_ulpi_phy_config,
+ .phy_type = TEGRA_USB_PHY_TYPE_NULL_ULPI,
+};
+
+static int __init tegra_null_ulpi_init(void)
+{
+ tegra_ehci2_device.dev.platform_data = &ehci2_null_ulpi_platform_data;
+ platform_device_register(&tegra_ehci2_device);
+ return 0;
+}
+
+static irqreturn_t mdm_start_thread(int irq, void *data)
+{
+ if (gpio_get_value(BB_RST_OUT)) {
+ pr_info("BB_RST_OUT high\n");
+ } else {
+ pr_info("BB_RST_OUT low\n");
+ /* hold wait lock to complete the enumeration */
+ wake_lock_timeout(&mdm_wake_lock, HZ * 10);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int baseband_phy_on(void)
+{
+ static bool phy_init;
+
+ if (!phy_init) {
+ /* set AP2MDM_ACK2 low */
+ gpio_set_value(AP2MDM_ACK2, 0);
+ phy_init = true;
+ }
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static int baseband_phy_off(void)
+{
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static void baseband_phy_restore_start(void)
+{
+ /* set AP2MDM_ACK2 high */
+ gpio_set_value(AP2MDM_ACK2, 1);
+}
+
+static void baseband_phy_restore_end(void)
+{
+ /* set AP2MDM_ACK2 low */
+ gpio_set_value(AP2MDM_ACK2, 0);
+}
+
+static void baseband_start(void)
+{
+ /*
+ * Leave baseband powered OFF.
+ * User-space daemons will take care of powering it up.
+ */
+ pr_info("%s\n", __func__);
+ gpio_set_value(MODEM_PWR_ON, 0);
+}
+
+static void baseband_reset(void)
+{
+ /* Initiate power cycle on baseband sub system */
+ pr_info("%s\n", __func__);
+ gpio_set_value(MODEM_PWR_ON, 0);
+ mdelay(200);
+ gpio_set_value(MODEM_PWR_ON, 1);
+}
+
+static int baseband_init(void)
+{
+ int irq;
+ int ret;
+
+ ret = gpio_request_array(modem_gpios, ARRAY_SIZE(modem_gpios));
+ if (ret)
+ return ret;
+
+ /* enable pull-up for BB_RST_OUT */
+ tegra_pinmux_set_pullupdown(TEGRA_PINGROUP_UAC,
+ TEGRA_PUPD_PULL_UP);
+
+ tegra_gpio_enable(MODEM_PWR_ON);
+ tegra_gpio_enable(MODEM_RESET);
+ tegra_gpio_enable(AP2MDM_ACK2);
+ tegra_gpio_enable(BB_RST_OUT);
+ tegra_gpio_enable(AP2MDM_ACK);
+ tegra_gpio_enable(MDM2AP_ACK);
+ tegra_gpio_enable(TEGRA_GPIO_PY3);
+ tegra_gpio_enable(TEGRA_GPIO_PY1);
+ tegra_gpio_enable(TEGRA_GPIO_PO1);
+ tegra_gpio_enable(TEGRA_GPIO_PO2);
+
+ /* export GPIO for user space access through sysfs */
+ gpio_export(MODEM_PWR_ON, false);
+
+ /* phy init */
+ tegra_null_ulpi_init();
+
+ wake_lock_init(&mdm_wake_lock, WAKE_LOCK_SUSPEND, "mdm_lock");
+
+ /* enable IRQ for BB_RST_OUT */
+ irq = gpio_to_irq(BB_RST_OUT);
+
+ ret = request_threaded_irq(irq, NULL, mdm_start_thread,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "mdm_start", NULL);
+ if (ret < 0) {
+ pr_err("%s: request_threaded_irq error\n", __func__);
+ return ret;
+ }
+
+ ret = enable_irq_wake(irq);
+ if (ret) {
+ pr_err("%s: enable_irq_wake error\n", __func__);
+ free_irq(irq, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct tegra_modem_operations baseband_operations = {
+ .init = baseband_init,
+ .start = baseband_start,
+ .reset = baseband_reset,
+};
+
+static struct tegra_usb_modem_power_platform_data baseband_pdata = {
+ .ops = &baseband_operations,
+ .wake_gpio = MDM2AP_ACK2,
+ .flags = IRQF_TRIGGER_FALLING,
+};
+
+static struct platform_device icera_baseband_device = {
+ .name = "tegra_usb_modem_power",
+ .id = -1,
+ .dev = {
+ .platform_data = &baseband_pdata,
+ },
+};
+
+int __init whistler_baseband_init(void)
+{
+ tegra_pinmux_config_table(whistler_null_ulpi_pinmux,
+ ARRAY_SIZE(whistler_null_ulpi_pinmux));
+ platform_device_register(&icera_baseband_device);
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-whistler-baseband.h b/arch/arm/mach-tegra/board-whistler-baseband.h
new file mode 100644
index 000000000000..aceef6cd9676
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-baseband.h
@@ -0,0 +1,81 @@
+/*
+ * arch/arm/mach-tegra/board-whistler-baseband.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef BOARD_WHISTLER_BASEBAND_H
+#define BOARD_WHISTLER_BASEBAND_H
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <mach/usb_phy.h>
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/wakelock.h>
+#include <asm/mach-types.h>
+#include <mach/pinmux.h>
+#include <mach/spi.h>
+#include "clock.h"
+#include "devices.h"
+#include "gpio-names.h"
+
+#define BOARD_WHISTLER_BASEBAND_U3XX 0
+#define BOARD_WHISTLER_BASEBAND_N731 1
+#define BOARD_WHISTLER_BASEBAND_SPI_LOOPBACK 2
+#define BOARD_WHISTLER_BASEBAND_HSIC 3
+
+#define TEGRA_CAIF_SSPI_GPIO_RESET TEGRA_GPIO_PV0
+#define TEGRA_CAIF_SSPI_GPIO_POWER TEGRA_GPIO_PV1
+#define TEGRA_CAIF_SSPI_GPIO_AWR TEGRA_GPIO_PZ0
+#define TEGRA_CAIF_SSPI_GPIO_CWR TEGRA_GPIO_PY6
+#define TEGRA_CAIF_SSPI_GPIO_SPI_INT TEGRA_GPIO_PO6
+#define TEGRA_CAIF_SSPI_GPIO_SS TEGRA_GPIO_PV2
+
+#define MODEM_PWR_ON TEGRA_GPIO_PV1
+#define MODEM_RESET TEGRA_GPIO_PV0
+
+/* Rainbow1 and 570 */
+#define AWR TEGRA_GPIO_PZ0
+#define CWR TEGRA_GPIO_PY6
+#define SPI_INT TEGRA_GPIO_PO6
+#define SPI_SLAVE_SEL TEGRA_GPIO_PV2
+
+/* Icera 450 GPIO */
+#define AP2MDM_ACK TEGRA_GPIO_PZ0
+#define MDM2AP_ACK TEGRA_GPIO_PY6
+#define AP2MDM_ACK2 TEGRA_GPIO_PU2
+#define MDM2AP_ACK2 TEGRA_GPIO_PV2
+#define BB_RST_OUT TEGRA_GPIO_PV3
+
+/* ULPI GPIO */
+#define ULPI_STP TEGRA_GPIO_PY3
+#define ULPI_DIR TEGRA_GPIO_PY1
+#define ULPI_D0 TEGRA_GPIO_PO1
+#define ULPI_D1 TEGRA_GPIO_PO2
+
+struct whistler_baseband {
+ struct tegra_clk_init_table *clk_init;
+ struct platform_device **platform_device;
+ int platform_device_size;
+ struct spi_board_info *spi_board_info;
+ int spi_board_info_size;
+};
+
+int whistler_baseband_init(void);
+#endif /* BOARD_WHISTLER_BASEBAND_H */
diff --git a/arch/arm/mach-tegra/board-whistler-kbc.c b/arch/arm/mach-tegra/board-whistler-kbc.c
new file mode 100644
index 000000000000..0dbcbbcdc313
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-kbc.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2010-2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/device.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+#include <mach/kbc.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+/*
+* Scrollwheel is connected to KBC pins but has it's own
+* driver using those pins as gpio.
+* In case of using scrollwheel Row3 and Col3/4/5
+* should NOT be configured as KBC
+*/
+#ifdef CONFIG_INPUT_ALPS_GPIO_SCROLLWHEEL
+#define WHISTLER_ROW_COUNT 3
+#define WHISTLER_COL_COUNT 2
+#else
+#define WHISTLER_ROW_COUNT 4
+#define WHISTLER_COL_COUNT 2
+#endif
+
+#ifdef CONFIG_INPUT_ALPS_GPIO_SCROLLWHEEL
+static const u32 whistler_keymap[] = {
+ KEY(0, 0, KEY_POWER),
+ KEY(0, 1, KEY_RESERVED),
+ KEY(1, 0, KEY_HOME),
+ KEY(1, 1, KEY_BACK),
+ KEY(2, 0, KEY_RESERVED),
+ KEY(2, 1, KEY_MENU),
+};
+#else
+static const u32 whistler_keymap[] = {
+ KEY(0, 0, KEY_POWER),
+ KEY(0, 1, KEY_RESERVED),
+ KEY(1, 0, KEY_HOME),
+ KEY(1, 1, KEY_BACK),
+ KEY(2, 0, KEY_RESERVED),
+ KEY(2, 1, KEY_MENU),
+ KEY(3, 0, KEY_RESERVED),
+ KEY(3, 1, KEY_RESERVED),
+};
+#endif
+
+static const struct matrix_keymap_data whistler_keymap_data = {
+ .keymap = whistler_keymap,
+ .keymap_size = ARRAY_SIZE(whistler_keymap),
+};
+
+static struct tegra_kbc_wake_key whistler_wake_cfg[] = {
+ [0] = {
+ .row = 0,
+ .col = 0,
+ },
+};
+
+static struct tegra_kbc_platform_data whistler_kbc_platform_data = {
+ .debounce_cnt = 20,
+ .repeat_cnt = 50 * 32,
+ .wake_cnt = 1,
+ .wake_cfg = &whistler_wake_cfg[0],
+ .keymap_data = &whistler_keymap_data,
+ .use_fn_map = false,
+ .wakeup = true,
+#ifdef CONFIG_ANDROID
+ .disable_ev_rep = true,
+#endif
+};
+
+static struct resource whistler_kbc_resources[] = {
+ [0] = {
+ .start = TEGRA_KBC_BASE,
+ .end = TEGRA_KBC_BASE + TEGRA_KBC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_KBC,
+ .end = INT_KBC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device whistler_kbc_device = {
+ .name = "tegra-kbc",
+ .id = -1,
+ .dev = {
+ .platform_data = &whistler_kbc_platform_data,
+ },
+ .resource = whistler_kbc_resources,
+ .num_resources = ARRAY_SIZE(whistler_kbc_resources),
+};
+
+int __init whistler_kbc_init(void)
+{
+ struct tegra_kbc_platform_data *data = &whistler_kbc_platform_data;
+ int i;
+
+ pr_info("KBC: whistler_kbc_init\n");
+ for (i = 0; i < WHISTLER_ROW_COUNT; i++) {
+ data->pin_cfg[i].num = i;
+ data->pin_cfg[i].is_row = true;
+ data->pin_cfg[i].en = true;
+ }
+ for (i = 0; i < WHISTLER_COL_COUNT; i++) {
+ data->pin_cfg[i + KBC_PIN_GPIO_16].num = i;
+ data->pin_cfg[i + KBC_PIN_GPIO_16].en = true;
+ }
+
+ platform_device_register(&whistler_kbc_device);
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-whistler-memory.c b/arch/arm/mach-tegra/board-whistler-memory.c
new file mode 100644
index 000000000000..918e96d5c463
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-memory.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "board-whistler.h"
+#include "tegra2_emc.h"
+#include "board.h"
+
+static const struct tegra_emc_table whistler_emc_tables_elpida_300Mhz[] = {
+ {
+ .rate = 25000, /* SDRAM frquency */
+ .regs = {
+ 0x00000002, /* RC */
+ 0x00000006, /* RFC */
+ 0x00000003, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000004, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000b, /* RDV */
+ 0x0000004d, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000a, /* RW2PDEN */
+ 0x00000004, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000006, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000068, /* TREFBW */
+ 0x00000003, /* QUSE_EXTRA */
+ 0x00000003, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa06a04ae, /* CFG_DIG_DLL */
+ 0x0001f000, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000003, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 50000, /* SDRAM frequency */
+ .regs = {
+ 0x00000003, /* RC */
+ 0x00000007, /* RFC */
+ 0x00000003, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000005, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000b, /* RDV */
+ 0x0000009f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000a, /* RW2PDEN */
+ 0x00000007, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000006, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x000000d0, /* TREFBW */
+ 0x00000004, /* QUSE_EXTRA */
+ 0x00000000, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa06a04ae, /* CFG_DIG_DLL */
+ 0x0001f000, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000005, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 75000, /* SDRAM frequency */
+ .regs = {
+ 0x00000005, /* RC */
+ 0x0000000a, /* RFC */
+ 0x00000004, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000005, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000b, /* RDV */
+ 0x000000ff, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000a, /* RW2PDEN */
+ 0x0000000b, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000006, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000138, /* TREFBW */
+ 0x00000004, /* QUSE_EXTRA */
+ 0x00000000, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa06a04ae, /* CFG_DIG_DLL */
+ 0x0001f000, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000007, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 150000, /* SDRAM frequency */
+ .regs = {
+ 0x00000009, /* RC */
+ 0x00000014, /* RFC */
+ 0x00000007, /* RAS */
+ 0x00000004, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000005, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000b, /* RDV */
+ 0x0000021f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000004, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000a, /* RW2PDEN */
+ 0x00000015, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000006, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000270, /* TREFBW */
+ 0x00000000, /* QUSE_EXTRA */
+ 0x00000001, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xA04C04AE, /* CFG_DIG_DLL */
+ 0x007FC010, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x0000000e, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 300000, /* SDRAM frequency */
+ .regs = {
+ 0x00000012, /* RC */
+ 0x00000027, /* RFC */
+ 0x0000000D, /* RAS */
+ 0x00000007, /* RP */
+ 0x00000007, /* R2W */
+ 0x00000005, /* W2R */
+ 0x00000003, /* R2P */
+ 0x00000009, /* W2P */
+ 0x00000006, /* RD_RCD */
+ 0x00000006, /* WR_RCD */
+ 0x00000003, /* RRD */
+ 0x00000003, /* REXT */
+ 0x00000002, /* WDV */
+ 0x00000006, /* QUSE */
+ 0x00000003, /* QRST */
+ 0x00000009, /* QSAFE */
+ 0x0000000c, /* RDV */
+ 0x0000045f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000004, /* PDEX2WR */
+ 0x00000004, /* PDEX2RD */
+ 0x00000007, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000e, /* RW2PDEN */
+ 0x0000002A, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x0000000F, /* TFAW */
+ 0x00000008, /* TRPAB */
+ 0x00000005, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x000004E1, /* TREFBW */
+ 0x00000005, /* QUSE_EXTRA */
+ 0x00000002, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000282, /* FBIO_CFG5 */
+ 0xE03C048B, /* CFG_DIG_DLL */
+ 0x007FC010, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x0000001B, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ }
+};
+
+static const struct tegra_emc_table whistler_emc_tables_elpida_400Mhz[] = {
+ {
+ .rate = 23750, /* SDRAM frquency */
+ .regs = {
+ 0x00000002, /* RC */
+ 0x00000006, /* RFC */
+ 0x00000003, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000005, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000c, /* RDV */
+ 0x00000047, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000b, /* RW2PDEN */
+ 0x00000004, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000008, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000060, /* TREFBW */
+ 0x00000004, /* QUSE_EXTRA */
+ 0x00000003, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa0ae04ae, /* CFG_DIG_DLL */
+ 0x0001f800, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000003, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 63333, /* SDRAM frquency */
+ .regs = {
+ 0x00000004, /* RC */
+ 0x00000009, /* RFC */
+ 0x00000003, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000006, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000c, /* RDV */
+ 0x000000c4, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000b, /* RW2PDEN */
+ 0x00000009, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000008, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000107, /* TREFBW */
+ 0x00000005, /* QUSE_EXTRA */
+ 0x00000000, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa0ae04ae, /* CFG_DIG_DLL */
+ 0x0001f800, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000006, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 95000, /* SDRAM frquency */
+ .regs = {
+ 0x00000006, /* RC */
+ 0x0000000d, /* RFC */
+ 0x00000004, /* RAS */
+ 0x00000003, /* RP */
+ 0x00000006, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000003, /* RD_RCD */
+ 0x00000003, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000002, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000006, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x00000008, /* QSAFE */
+ 0x0000000c, /* RDV */
+ 0x0000013f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000003, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000b, /* RW2PDEN */
+ 0x0000000e, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000008, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000008, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x0000018c, /* TREFBW */
+ 0x00000005, /* QUSE_EXTRA */
+ 0x00000001, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa0ae04ae, /* CFG_DIG_DLL */
+ 0x0001f000, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000009, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 190000, /* SDRAM frquency */
+ .regs = {
+ 0x0000000c, /* RC */
+ 0x00000019, /* RFC */
+ 0x00000008, /* RAS */
+ 0x00000004, /* RP */
+ 0x00000007, /* R2W */
+ 0x00000004, /* W2R */
+ 0x00000002, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000004, /* RD_RCD */
+ 0x00000004, /* WR_RCD */
+ 0x00000002, /* RRD */
+ 0x00000003, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000006, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x00000009, /* QSAFE */
+ 0x0000000d, /* RDV */
+ 0x000002bf, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000003, /* PDEX2WR */
+ 0x00000003, /* PDEX2RD */
+ 0x00000004, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x0000000c, /* RW2PDEN */
+ 0x0000001b, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x0000000a, /* TFAW */
+ 0x00000004, /* TRPAB */
+ 0x00000008, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x00000317, /* TREFBW */
+ 0x00000005, /* QUSE_EXTRA */
+ 0x00000002, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000082, /* FBIO_CFG5 */
+ 0xa06204ae, /* CFG_DIG_DLL */
+ 0x007f7010, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000012, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ },
+ {
+ .rate = 380000, /* SDRAM frquency */
+ .regs = {
+ 0x00000017, /* RC */
+ 0x00000032, /* RFC */
+ 0x00000010, /* RAS */
+ 0x00000007, /* RP */
+ 0x00000008, /* R2W */
+ 0x00000005, /* W2R */
+ 0x00000003, /* R2P */
+ 0x0000000b, /* W2P */
+ 0x00000007, /* RD_RCD */
+ 0x00000007, /* WR_RCD */
+ 0x00000004, /* RRD */
+ 0x00000003, /* REXT */
+ 0x00000003, /* WDV */
+ 0x00000007, /* QUSE */
+ 0x00000004, /* QRST */
+ 0x0000000a, /* QSAFE */
+ 0x0000000e, /* RDV */
+ 0x0000059f, /* REFRESH */
+ 0x00000000, /* BURST_REFRESH_NUM */
+ 0x00000004, /* PDEX2WR */
+ 0x00000004, /* PDEX2RD */
+ 0x00000007, /* PCHG2PDEN */
+ 0x00000008, /* ACT2PDEN */
+ 0x00000001, /* AR2PDEN */
+ 0x00000011, /* RW2PDEN */
+ 0x00000036, /* TXSR */
+ 0x00000003, /* TCKE */
+ 0x00000013, /* TFAW */
+ 0x00000008, /* TRPAB */
+ 0x00000007, /* TCLKSTABLE */
+ 0x00000002, /* TCLKSTOP */
+ 0x0000062d, /* TREFBW */
+ 0x00000006, /* QUSE_EXTRA */
+ 0x00000003, /* FBIO_CFG6 */
+ 0x00000000, /* ODT_WRITE */
+ 0x00000000, /* ODT_READ */
+ 0x00000282, /* FBIO_CFG5 */
+ 0xe044048b, /* CFG_DIG_DLL */
+ 0x007fb010, /* DLL_XFORM_DQS */
+ 0x00000000, /* DLL_XFORM_QUSE */
+ 0x00000000, /* ZCAL_REF_CNT */
+ 0x00000023, /* ZCAL_WAIT_CNT */
+ 0x00000000, /* AUTO_CAL_INTERVAL */
+ 0x00000000, /* CFG_CLKTRIM_0 */
+ 0x00000000, /* CFG_CLKTRIM_1 */
+ 0x00000000, /* CFG_CLKTRIM_2 */
+ }
+ }
+};
+
+static const struct tegra_emc_chip whistler_emc_chips[] = {
+ {
+ .description = "Elpida 300MHz",
+ .mem_manufacturer_id = 0x0303,
+ .mem_revision_id1 = 0,
+ .mem_revision_id2 = 0,
+ .mem_pid = 0x1414,
+ .table = whistler_emc_tables_elpida_300Mhz,
+ .table_size = ARRAY_SIZE(whistler_emc_tables_elpida_300Mhz)
+ },
+ {
+ .description = "Elpida 300MHz",
+ .mem_manufacturer_id = 0x0303,
+ .mem_revision_id1 = 0,
+ .mem_revision_id2 = 0,
+ .mem_pid = 0x5454,
+ .table = whistler_emc_tables_elpida_300Mhz,
+ .table_size = ARRAY_SIZE(whistler_emc_tables_elpida_300Mhz)
+ },
+};
+
+int __init whistler_emc_init(void)
+{
+ tegra_init_emc(whistler_emc_chips,
+ ARRAY_SIZE(whistler_emc_chips));
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-whistler-panel.c b/arch/arm/mach-tegra/board-whistler-panel.c
new file mode 100644
index 000000000000..e68d6bf40292
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-panel.c
@@ -0,0 +1,390 @@
+/*
+ * arch/arm/mach-tegra/board-whistler-panel.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <asm/mach-types.h>
+#include <linux/platform_device.h>
+#include <linux/earlysuspend.h>
+#include <linux/kernel.h>
+#include <linux/pwm_backlight.h>
+#include <linux/tegra_pwm_bl.h>
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+
+#include "devices.h"
+#include "gpio-names.h"
+#include "board.h"
+
+#define whistler_hdmi_hpd TEGRA_GPIO_PN7
+
+#ifdef CONFIG_TEGRA_DC
+static struct regulator *whistler_hdmi_reg = NULL;
+static struct regulator *whistler_hdmi_pll = NULL;
+#endif
+
+/*
+ * In case which_pwm is TEGRA_PWM_PM0,
+ * gpio_conf_to_sfio should be TEGRA_GPIO_PW0: set LCD_CS1_N pin to SFIO
+ * In case which_pwm is TEGRA_PWM_PM1,
+ * gpio_conf_to_sfio should be TEGRA_GPIO_PW1: set LCD_M1 pin to SFIO
+ */
+static struct platform_tegra_pwm_backlight_data whistler_disp1_backlight_data = {
+ .which_dc = 0,
+ .which_pwm = TEGRA_PWM_PM1,
+ .max_brightness = 256,
+ .dft_brightness = 77,
+ .gpio_conf_to_sfio = TEGRA_GPIO_PW1,
+ .switch_to_sfio = &tegra_gpio_disable,
+ .period = 0x1F,
+ .clk_div = 3,
+ .clk_select = 2,
+};
+
+static struct platform_device whistler_disp1_backlight_device = {
+ .name = "tegra-pwm-bl",
+ .id = -1,
+ .dev = {
+ .platform_data = &whistler_disp1_backlight_data,
+ },
+};
+
+#ifdef CONFIG_TEGRA_DC
+static int whistler_hdmi_enable(void)
+{
+ if (!whistler_hdmi_reg) {
+ whistler_hdmi_reg = regulator_get(NULL, "avdd_hdmi"); /* LD011 */
+ if (IS_ERR_OR_NULL(whistler_hdmi_reg)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi\n");
+ whistler_hdmi_reg = NULL;
+ return PTR_ERR(whistler_hdmi_reg);
+ }
+ }
+ regulator_enable(whistler_hdmi_reg);
+
+ if (!whistler_hdmi_pll) {
+ whistler_hdmi_pll = regulator_get(NULL, "avdd_hdmi_pll"); /* LD06 */
+ if (IS_ERR_OR_NULL(whistler_hdmi_pll)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi_pll\n");
+ whistler_hdmi_pll = NULL;
+ regulator_disable(whistler_hdmi_reg);
+ whistler_hdmi_reg = NULL;
+ return PTR_ERR(whistler_hdmi_pll);
+ }
+ }
+ regulator_enable(whistler_hdmi_pll);
+ return 0;
+}
+
+static int whistler_hdmi_disable(void)
+{
+ regulator_disable(whistler_hdmi_reg);
+ regulator_disable(whistler_hdmi_pll);
+ return 0;
+}
+
+static struct resource whistler_disp1_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource whistler_disp2_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_B_GENERAL,
+ .end = INT_DISPLAY_B_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "hdmi_regs",
+ .start = TEGRA_HDMI_BASE,
+ .end = TEGRA_HDMI_BASE + TEGRA_HDMI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_dc_mode whistler_panel_modes[] = {
+ {
+ .pclk = 27000000,
+ .h_ref_to_sync = 4,
+ .v_ref_to_sync = 2,
+ .h_sync_width = 10,
+ .v_sync_width = 3,
+ .h_back_porch = 20,
+ .v_back_porch = 3,
+ .h_active = 800,
+ .v_active = 480,
+ .h_front_porch = 70,
+ .v_front_porch = 3,
+ },
+};
+
+static struct tegra_dc_out_pin whistler_dc_out_pins[] = {
+ {
+ .name = TEGRA_DC_OUT_PIN_H_SYNC,
+ .pol = TEGRA_DC_OUT_PIN_POL_LOW,
+ },
+ {
+ .name = TEGRA_DC_OUT_PIN_V_SYNC,
+ .pol = TEGRA_DC_OUT_PIN_POL_LOW,
+ },
+ {
+ .name = TEGRA_DC_OUT_PIN_PIXEL_CLOCK,
+ .pol = TEGRA_DC_OUT_PIN_POL_LOW,
+ },
+};
+
+static u8 whistler_dc_out_pin_sel_config[] = {
+ TEGRA_PIN_OUT_CONFIG_SEL_LM1_PM1,
+};
+
+static struct tegra_dc_out whistler_disp1_out = {
+ .type = TEGRA_DC_OUT_RGB,
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .height = 54, /* mm */
+ .width = 90, /* mm */
+
+ .modes = whistler_panel_modes,
+ .n_modes = ARRAY_SIZE(whistler_panel_modes),
+
+ .out_pins = whistler_dc_out_pins,
+ .n_out_pins = ARRAY_SIZE(whistler_dc_out_pins),
+
+ .out_sel_configs = whistler_dc_out_pin_sel_config,
+ .n_out_sel_configs = ARRAY_SIZE(whistler_dc_out_pin_sel_config),
+};
+
+static struct tegra_dc_out whistler_disp2_out = {
+ .type = TEGRA_DC_OUT_HDMI,
+ .flags = TEGRA_DC_OUT_HOTPLUG_HIGH,
+
+ .dcc_bus = 1,
+ .hotplug_gpio = whistler_hdmi_hpd,
+
+ .max_pixclock = KHZ2PICOS(148500),
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .enable = whistler_hdmi_enable,
+ .disable = whistler_hdmi_disable,
+};
+
+static struct tegra_fb_data whistler_fb_data = {
+ .win = 0,
+ .xres = 800,
+ .yres = 480,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_fb_data whistler_hdmi_fb_data = {
+ .win = 0,
+ .xres = 800,
+ .yres = 480,
+ .bits_per_pixel = 32,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+
+static struct tegra_dc_platform_data whistler_disp1_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &whistler_disp1_out,
+ .fb = &whistler_fb_data,
+};
+
+static struct nvhost_device whistler_disp1_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = whistler_disp1_resources,
+ .num_resources = ARRAY_SIZE(whistler_disp1_resources),
+ .dev = {
+ .platform_data = &whistler_disp1_pdata,
+ },
+};
+
+static struct tegra_dc_platform_data whistler_disp2_pdata = {
+ .flags = 0,
+ .default_out = &whistler_disp2_out,
+ .fb = &whistler_hdmi_fb_data,
+};
+
+static struct nvhost_device whistler_disp2_device = {
+ .name = "tegradc",
+ .id = 1,
+ .resource = whistler_disp2_resources,
+ .num_resources = ARRAY_SIZE(whistler_disp2_resources),
+ .dev = {
+ .platform_data = &whistler_disp2_pdata,
+ },
+};
+#endif
+
+#if defined(CONFIG_TEGRA_NVMAP)
+static struct nvmap_platform_carveout whistler_carveouts[] = {
+ [0] = NVMAP_HEAP_CARVEOUT_IRAM_INIT,
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .base = 0x18C00000,
+ .size = SZ_128M - 0xC00000,
+ .buddy_size = SZ_32K,
+ },
+};
+
+static struct nvmap_platform_data whistler_nvmap_data = {
+ .carveouts = whistler_carveouts,
+ .nr_carveouts = ARRAY_SIZE(whistler_carveouts),
+};
+
+static struct platform_device whistler_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &whistler_nvmap_data,
+ },
+};
+#endif
+
+static struct platform_device *whistler_gfx_devices[] __initdata = {
+#if defined(CONFIG_TEGRA_NVMAP)
+ &whistler_nvmap_device,
+#endif
+#ifdef CONFIG_TEGRA_GRHOST
+ &tegra_grhost_device,
+#endif
+ &whistler_disp1_backlight_device,
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/* put early_suspend/late_resume handlers here for the display in order
+ * to keep the code out of the display driver, keeping it closer to upstream
+ */
+struct early_suspend whistler_panel_early_suspender;
+
+static void whistler_panel_early_suspend(struct early_suspend *h)
+{
+ unsigned i;
+ for (i = 0; i < num_registered_fb; i++)
+ fb_blank(registered_fb[i], FB_BLANK_POWERDOWN);
+#ifdef CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+ cpufreq_save_default_governor();
+ cpufreq_set_conservative_governor();
+ cpufreq_set_conservative_governor_param(
+ SET_CONSERVATIVE_GOVERNOR_UP_THRESHOLD,
+ SET_CONSERVATIVE_GOVERNOR_DOWN_THRESHOLD);
+#endif
+}
+
+static void whistler_panel_late_resume(struct early_suspend *h)
+{
+ unsigned i;
+#ifdef CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+ cpufreq_restore_default_governor();
+#endif
+ for (i = 0; i < num_registered_fb; i++)
+ fb_blank(registered_fb[i], FB_BLANK_UNBLANK);
+}
+#endif
+
+int __init whistler_panel_init(void)
+{
+ int err;
+ struct resource __maybe_unused *res;
+
+ tegra_gpio_enable(whistler_hdmi_hpd);
+ gpio_request(whistler_hdmi_hpd, "hdmi_hpd");
+ gpio_direction_input(whistler_hdmi_hpd);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ whistler_panel_early_suspender.suspend = whistler_panel_early_suspend;
+ whistler_panel_early_suspender.resume = whistler_panel_late_resume;
+ whistler_panel_early_suspender.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ register_early_suspend(&whistler_panel_early_suspender);
+#endif
+
+#if defined(CONFIG_TEGRA_NVMAP)
+ whistler_carveouts[1].base = tegra_carveout_start;
+ whistler_carveouts[1].size = tegra_carveout_size;
+#endif
+
+ err = platform_add_devices(whistler_gfx_devices,
+ ARRAY_SIZE(whistler_gfx_devices));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ res = nvhost_get_resource_byname(&whistler_disp1_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb_start;
+ res->end = tegra_fb_start + tegra_fb_size - 1;
+#endif
+
+ /* Copy the bootloader fb to the fb. */
+ tegra_move_framebuffer(tegra_fb_start, tegra_bootloader_fb_start,
+ min(tegra_fb_size, tegra_bootloader_fb_size));
+
+#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
+ res = nvhost_get_resource_byname(&whistler_disp2_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb2_start;
+ res->end = tegra_fb2_start + tegra_fb2_size - 1;
+
+ if (!err)
+ err = nvhost_device_register(&whistler_disp1_device);
+
+ if (!err)
+ err = nvhost_device_register(&whistler_disp2_device);
+#endif
+
+ return err;
+}
+
diff --git a/arch/arm/mach-tegra/board-whistler-pinmux.c b/arch/arm/mach-tegra/board-whistler-pinmux.c
new file mode 100644
index 000000000000..12e9975061ca
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-pinmux.c
@@ -0,0 +1,177 @@
+/*
+ * arch/arm/mach-tegra/board-whistler-pinmux.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <mach/pinmux.h>
+
+#include "board-whistler.h"
+#include "gpio-names.h"
+
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+
+static __initdata struct tegra_drive_pingroup_config whistler_drive_pinmux[] = {
+ DEFAULT_DRIVE(DBG),
+ DEFAULT_DRIVE(DDC),
+ DEFAULT_DRIVE(VI1),
+ DEFAULT_DRIVE(VI2),
+ DEFAULT_DRIVE(SDIO1),
+};
+
+static __initdata struct tegra_pingroup_config whistler_pinmux[] = {
+ {TEGRA_PINGROUP_ATA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATC, TEGRA_MUX_SDIO4, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATD, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_OSC, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMD, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GME, TEGRA_MUX_DAP5, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCB, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LCSN, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LDC, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPW1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSCK, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDA, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDI, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_OWC, TEGRA_MUX_OWR, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDB, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDC, TEGRA_MUX_SDIO3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDD, TEGRA_MUX_SDIO3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXA, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXK, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIA, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIB, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIC, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAA, TEGRA_MUX_UARTA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAB, TEGRA_MUX_UARTA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAC, TEGRA_MUX_OWR, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UDA, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+static struct tegra_gpio_table gpio_table[] = {
+ { .gpio = TEGRA_GPIO_HP_DET, .enable = true },
+};
+
+int __init whistler_pinmux_init(void)
+{
+ tegra_pinmux_config_table(whistler_pinmux, ARRAY_SIZE(whistler_pinmux));
+ tegra_drive_pinmux_config_table(whistler_drive_pinmux,
+ ARRAY_SIZE(whistler_drive_pinmux));
+
+ tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-whistler-power.c b/arch/arm/mach-tegra/board-whistler-power.c
new file mode 100644
index 000000000000..89ca697b92ba
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-power.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2010-2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/max8907c.h>
+#include <linux/regulator/max8907c-regulator.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "gpio-names.h"
+#include "fuse.h"
+#include "pm.h"
+#include "wakeups-t2.h"
+#include "board.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_INTR_LOW (1 << 17)
+
+static struct regulator_consumer_supply max8907c_SD1_supply[] = {
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_SD2_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+ REGULATOR_SUPPLY("vdd_aon", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_SD3_supply[] = {
+ REGULATOR_SUPPLY("vddio_sys", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO1_supply[] = {
+ REGULATOR_SUPPLY("vddio_rx_ddr", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO2_supply[] = {
+ REGULATOR_SUPPLY("avdd_plla", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO3_supply[] = {
+ REGULATOR_SUPPLY("vdd_vcom_1v8b", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO4_supply[] = {
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO5_supply[] = {
+};
+
+static struct regulator_consumer_supply max8907c_LDO6_supply[] = {
+ REGULATOR_SUPPLY("avdd_hdmi_pll", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO7_supply[] = {
+ REGULATOR_SUPPLY("avddio_audio", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO8_supply[] = {
+ REGULATOR_SUPPLY("vdd_vcom_3v0", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO9_supply[] = {
+ REGULATOR_SUPPLY("vdd_cam1", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO10_supply[] = {
+ REGULATOR_SUPPLY("avdd_usb_ic", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO11_supply[] = {
+ REGULATOR_SUPPLY("vddio_pex_clk", NULL),
+ REGULATOR_SUPPLY("avdd_hdmi", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO12_supply[] = {
+ REGULATOR_SUPPLY("vddio_sdio", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO13_supply[] = {
+ REGULATOR_SUPPLY("vdd_vcore_phtn", NULL),
+ REGULATOR_SUPPLY("vdd_vcore_af", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO14_supply[] = {
+ REGULATOR_SUPPLY("avdd_vdac", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO15_supply[] = {
+ REGULATOR_SUPPLY("vdd_vcore_temp", NULL),
+ REGULATOR_SUPPLY("vdd_vcore_hdcp", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO16_supply[] = {
+ REGULATOR_SUPPLY("vdd_vbrtr", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO17_supply[] = {
+ REGULATOR_SUPPLY("vddio_mipi", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO18_supply[] = {
+ REGULATOR_SUPPLY("vddio_vi", NULL),
+ REGULATOR_SUPPLY("vcsi", "tegra_camera"),
+};
+
+static struct regulator_consumer_supply max8907c_LDO19_supply[] = {
+ REGULATOR_SUPPLY("vddio_lx", NULL),
+};
+
+static struct regulator_consumer_supply max8907c_LDO20_supply[] = {
+ REGULATOR_SUPPLY("vddio_ddr_1v2", NULL),
+ REGULATOR_SUPPLY("vddio_hsic", NULL),
+};
+
+#define MAX8907C_REGULATOR_DEVICE(_id, _minmv, _maxmv) \
+static struct regulator_init_data max8907c_##_id##_data = { \
+ .constraints = { \
+ .min_uV = (_minmv), \
+ .max_uV = (_maxmv), \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ }, \
+ .num_consumer_supplies = ARRAY_SIZE(max8907c_##_id##_supply), \
+ .consumer_supplies = max8907c_##_id##_supply, \
+}; \
+static struct platform_device max8907c_##_id##_device = { \
+ .name = "max8907c-regulator", \
+ .id = MAX8907C_##_id, \
+ .dev = { \
+ .platform_data = &max8907c_##_id##_data, \
+ }, \
+}
+
+MAX8907C_REGULATOR_DEVICE(SD1, 637500, 1425000);
+MAX8907C_REGULATOR_DEVICE(SD2, 637500, 1425000);
+MAX8907C_REGULATOR_DEVICE(SD3, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO1, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO2, 650000, 2225000);
+MAX8907C_REGULATOR_DEVICE(LDO3, 650000, 2225000);
+MAX8907C_REGULATOR_DEVICE(LDO4, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO5, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO6, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO7, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO8, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO9, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO10, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO11, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO12, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO13, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO14, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO15, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO16, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO17, 650000, 2225000);
+MAX8907C_REGULATOR_DEVICE(LDO18, 650000, 2225000);
+MAX8907C_REGULATOR_DEVICE(LDO19, 750000, 3900000);
+MAX8907C_REGULATOR_DEVICE(LDO20, 750000, 3900000);
+
+static struct platform_device *whistler_max8907c_power_devices[] = {
+ &max8907c_SD1_device,
+ &max8907c_SD2_device,
+ &max8907c_SD3_device,
+ &max8907c_LDO1_device,
+ &max8907c_LDO2_device,
+ &max8907c_LDO3_device,
+ &max8907c_LDO4_device,
+ &max8907c_LDO5_device,
+ &max8907c_LDO6_device,
+ &max8907c_LDO7_device,
+ &max8907c_LDO8_device,
+ &max8907c_LDO9_device,
+ &max8907c_LDO10_device,
+ &max8907c_LDO11_device,
+ &max8907c_LDO12_device,
+ &max8907c_LDO13_device,
+ &max8907c_LDO14_device,
+ &max8907c_LDO15_device,
+ &max8907c_LDO16_device,
+ &max8907c_LDO17_device,
+ &max8907c_LDO18_device,
+ &max8907c_LDO19_device,
+ &max8907c_LDO20_device,
+};
+
+static int whistler_max8907c_setup(void)
+{
+ int ret;
+
+ /*
+ * Configure PWREN, and attach CPU V1 rail to it.
+ * TODO: h/w events (power cycle, reset, battery low) auto-disables PWREN.
+ * Only soft reset (not supported) requires s/w to disable PWREN explicitly
+ */
+ ret = max8907c_pwr_en_config();
+ if (ret != 0)
+ return ret;
+
+ return max8907c_pwr_en_attach();
+}
+
+static struct max8907c_platform_data max8907c_pdata = {
+ .num_subdevs = ARRAY_SIZE(whistler_max8907c_power_devices),
+ .subdevs = whistler_max8907c_power_devices,
+ .irq_base = TEGRA_NR_IRQS,
+ .max8907c_setup = whistler_max8907c_setup,
+};
+
+static struct i2c_board_info __initdata whistler_regulators[] = {
+ {
+ I2C_BOARD_INFO("max8907c", 0x3C),
+ .irq = INT_EXTERNAL_PMU,
+ .platform_data = &max8907c_pdata,
+ },
+};
+
+static struct tegra_suspend_platform_data whistler_suspend_data = {
+ .cpu_timer = 2000,
+ .cpu_off_timer = 1000,
+ .suspend_mode = TEGRA_SUSPEND_LP0,
+ .core_timer = 0x7e,
+ .core_off_timer = 0xc00,
+ .corereq_high = true,
+ .sysclkreq_high = true,
+ .combined_req = true,
+};
+
+int __init whistler_regulator_init(void)
+{
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ void __iomem *chip_id = IO_ADDRESS(TEGRA_APB_MISC_BASE) + 0x804;
+ u32 pmc_ctrl;
+ u32 minor;
+
+ minor = (readl(chip_id) >> 16) & 0xf;
+ /* A03 (but not A03p) chips do not support LP0 */
+ if (minor == 3 && !(tegra_spare_fuse(18) || tegra_spare_fuse(19)))
+ whistler_suspend_data.suspend_mode = TEGRA_SUSPEND_LP1;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+
+ i2c_register_board_info(4, whistler_regulators, 1);
+
+ tegra_deep_sleep = max8907c_deep_sleep;
+
+ tegra_init_suspend(&whistler_suspend_data);
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-whistler-sdhci.c b/arch/arm/mach-tegra/board-whistler-sdhci.c
new file mode 100644
index 000000000000..08ebe33ae8b0
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-sdhci.c
@@ -0,0 +1,248 @@
+/*
+ * arch/arm/mach-tegra/board-whistler-sdhci.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/wlan_plat.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/sdhci.h>
+
+#include "gpio-names.h"
+#include "board.h"
+
+#define WHISTLER_WLAN_PWR TEGRA_GPIO_PK5
+#define WHISTLER_WLAN_RST TEGRA_GPIO_PK6
+#define WHISTLER_WLAN_WOW TEGRA_GPIO_PU5
+
+#define WHISTLER_EXT_SDCARD_DETECT TEGRA_GPIO_PI5
+
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+
+static int whistler_wifi_status_register(
+ void (*sdhcicallback)(int card_present, void *dev_id),
+ void *dev_id)
+{
+ if (wifi_status_cb)
+ return -EAGAIN;
+ wifi_status_cb = sdhcicallback;
+ wifi_status_cb_devid = dev_id;
+ return 0;
+}
+
+static int whistler_wifi_set_carddetect(int val)
+{
+ pr_debug("%s: %d\n", __func__, val);
+ if (wifi_status_cb)
+ wifi_status_cb(val, wifi_status_cb_devid);
+ else
+ pr_warning("%s: Nobody to notify\n", __func__);
+ return 0;
+}
+
+static int whistler_wifi_power(int on)
+{
+ gpio_set_value(WHISTLER_WLAN_PWR, on);
+ mdelay(100);
+ gpio_set_value(WHISTLER_WLAN_RST, on);
+ mdelay(200);
+
+ return 0;
+}
+
+static int whistler_wifi_reset(int on)
+{
+ pr_debug("%s: do nothing\n", __func__);
+ return 0;
+}
+
+
+static struct wifi_platform_data whistler_wifi_control = {
+ .set_power = whistler_wifi_power,
+ .set_reset = whistler_wifi_reset,
+ .set_carddetect = whistler_wifi_set_carddetect,
+};
+
+static struct resource wifi_resource[] = {
+ [0] = {
+ .name = "bcm4329_wlan_irq",
+ .start = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU5),
+ .end = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU5),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct platform_device whistler_wifi_device = {
+ .name = "bcm4329_wlan",
+ .id = 1,
+ .num_resources = 1,
+ .resource = wifi_resource,
+ .dev = {
+ .platform_data = &whistler_wifi_control,
+ },
+};
+
+static struct resource sdhci_resource1[] = {
+ [0] = {
+ .start = INT_SDMMC2,
+ .end = INT_SDMMC2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC2_BASE,
+ .end = TEGRA_SDMMC2_BASE + TEGRA_SDMMC2_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct embedded_sdio_data embedded_sdio_data1 = {
+ .cccr = {
+ .sdio_vsn = 2,
+ .multi_block = 1,
+ .low_speed = 0,
+ .wide_bus = 0,
+ .high_power = 1,
+ .high_speed = 1,
+ },
+ .cis = {
+ .vendor = 0x02d0,
+ .device = 0x4329,
+ },
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data1 = {
+ .mmc_data = {
+ .register_status_notify = whistler_wifi_status_register,
+ .embedded_sdio = &embedded_sdio_data1,
+ .built_in = 1,
+ },
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .max_clk_limit = 25000000,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = {
+ .cd_gpio = WHISTLER_EXT_SDCARD_DETECT,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data3 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .mmc_data = {
+ .built_in = 1,
+ }
+};
+
+static struct platform_device tegra_sdhci_device1 = {
+ .name = "sdhci-tegra",
+ .id = 1,
+ .resource = sdhci_resource1,
+ .num_resources = ARRAY_SIZE(sdhci_resource1),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data1,
+ },
+};
+
+static struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data2,
+ },
+};
+
+static struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data3,
+ },
+};
+
+static int __init whistler_wifi_init(void)
+{
+ gpio_request(WHISTLER_WLAN_PWR, "wlan_power");
+ gpio_request(WHISTLER_WLAN_RST, "wlan_rst");
+ gpio_request(WHISTLER_WLAN_WOW, "bcmsdh_sdmmc");
+
+ tegra_gpio_enable(WHISTLER_WLAN_PWR);
+ tegra_gpio_enable(WHISTLER_WLAN_RST);
+ tegra_gpio_enable(WHISTLER_WLAN_WOW);
+
+ gpio_direction_output(WHISTLER_WLAN_PWR, 0);
+ gpio_direction_output(WHISTLER_WLAN_RST, 0);
+ gpio_direction_input(WHISTLER_WLAN_WOW);
+
+ platform_device_register(&whistler_wifi_device);
+ return 0;
+}
+int __init whistler_sdhci_init(void)
+{
+ int ret;
+
+ tegra_gpio_enable(WHISTLER_EXT_SDCARD_DETECT);
+
+ platform_device_register(&tegra_sdhci_device3);
+ platform_device_register(&tegra_sdhci_device2);
+ platform_device_register(&tegra_sdhci_device1);
+
+ whistler_wifi_init();
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-whistler-sensors.c b/arch/arm/mach-tegra/board-whistler-sensors.c
new file mode 100644
index 000000000000..5177770f4cbe
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler-sensors.c
@@ -0,0 +1,406 @@
+/*
+ * arch/arm/mach-tegra/board-whistler-sensors.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <mach/gpio.h>
+#include <media/ov5650.h>
+#include <media/soc380.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/adt7461.h>
+#include <generated/mach-types.h>
+#include <linux/gpio.h>
+#include <linux/i2c/pca953x.h>
+
+#include <mach/tegra_odm_fuses.h>
+
+#include "gpio-names.h"
+#include "cpu-tegra.h"
+#include "board-whistler.h"
+
+#define CAMERA1_PWDN_GPIO TEGRA_GPIO_PT2
+#define CAMERA1_RESET_GPIO TEGRA_GPIO_PD2
+#define CAMERA2_PWDN_GPIO TEGRA_GPIO_PBB5
+#define CAMERA2_RESET_GPIO TEGRA_GPIO_PBB1
+#define CAMERA_AF_PD_GPIO TEGRA_GPIO_PT3
+#define CAMERA_FLASH_EN1_GPIO TEGRA_GPIO_PBB4
+#define CAMERA_FLASH_EN2_GPIO TEGRA_GPIO_PA0
+
+#define FUSE_POWER_EN_GPIO (TCA6416_GPIO_BASE + 2)
+
+#define ADXL34X_IRQ_GPIO TEGRA_GPIO_PAA1
+#define ISL29018_IRQ_GPIO TEGRA_GPIO_PK2
+#define ADT7461_IRQ_GPIO TEGRA_GPIO_PI2
+
+static struct regulator *reg_avdd_cam1; /* LDO9 */
+static struct regulator *reg_vdd_af; /* LDO13 */
+static struct regulator *reg_vdd_mipi; /* LDO17 */
+static struct regulator *reg_vddio_vi; /* LDO18 */
+
+static int whistler_camera_init(void)
+{
+ tegra_gpio_enable(CAMERA1_PWDN_GPIO);
+ gpio_request(CAMERA1_PWDN_GPIO, "camera1_powerdown");
+ gpio_direction_output(CAMERA1_PWDN_GPIO, 0);
+ gpio_export(CAMERA1_PWDN_GPIO, false);
+
+ tegra_gpio_enable(CAMERA1_RESET_GPIO);
+ gpio_request(CAMERA1_RESET_GPIO, "camera1_reset");
+ gpio_direction_output(CAMERA1_RESET_GPIO, 0);
+ gpio_export(CAMERA1_RESET_GPIO, false);
+
+ tegra_gpio_enable(CAMERA2_PWDN_GPIO);
+ gpio_request(CAMERA2_PWDN_GPIO, "camera2_powerdown");
+ gpio_direction_output(CAMERA2_PWDN_GPIO, 0);
+ gpio_export(CAMERA2_PWDN_GPIO, false);
+
+ tegra_gpio_enable(CAMERA2_RESET_GPIO);
+ gpio_request(CAMERA2_RESET_GPIO, "camera2_reset");
+ gpio_direction_output(CAMERA2_RESET_GPIO, 0);
+ gpio_export(CAMERA2_RESET_GPIO, false);
+
+ tegra_gpio_enable(CAMERA_AF_PD_GPIO);
+ gpio_request(CAMERA_AF_PD_GPIO, "camera_autofocus");
+ gpio_direction_output(CAMERA_AF_PD_GPIO, 0);
+ gpio_export(CAMERA_AF_PD_GPIO, false);
+
+ tegra_gpio_enable(CAMERA_FLASH_EN1_GPIO);
+ gpio_request(CAMERA_FLASH_EN1_GPIO, "camera_flash_en1");
+ gpio_direction_output(CAMERA_FLASH_EN1_GPIO, 0);
+ gpio_export(CAMERA_FLASH_EN1_GPIO, false);
+
+ tegra_gpio_enable(CAMERA_FLASH_EN2_GPIO);
+ gpio_request(CAMERA_FLASH_EN2_GPIO, "camera_flash_en2");
+ gpio_direction_output(CAMERA_FLASH_EN2_GPIO, 0);
+ gpio_export(CAMERA_FLASH_EN2_GPIO, false);
+
+ gpio_set_value(CAMERA1_PWDN_GPIO, 1);
+ mdelay(5);
+
+ return 0;
+}
+
+static int whistler_ov5650_power_on(void)
+{
+ gpio_set_value(CAMERA1_PWDN_GPIO, 0);
+
+ if (!reg_avdd_cam1) {
+ reg_avdd_cam1 = regulator_get(NULL, "vdd_cam1");
+ if (IS_ERR_OR_NULL(reg_avdd_cam1)) {
+ pr_err("whistler_ov5650_power_on: vdd_cam1 failed\n");
+ reg_avdd_cam1 = NULL;
+ return PTR_ERR(reg_avdd_cam1);
+ }
+ regulator_enable(reg_avdd_cam1);
+ }
+ mdelay(5);
+
+ if (!reg_vdd_mipi) {
+ reg_vdd_mipi = regulator_get(NULL, "vddio_mipi");
+ if (IS_ERR_OR_NULL(reg_vdd_mipi)) {
+ pr_err("whistler_ov5650_power_on: vddio_mipi failed\n");
+ reg_vdd_mipi = NULL;
+ return PTR_ERR(reg_vdd_mipi);
+ }
+ regulator_enable(reg_vdd_mipi);
+ }
+ mdelay(5);
+
+ if (!reg_vdd_af) {
+ reg_vdd_af = regulator_get(NULL, "vdd_vcore_af");
+ if (IS_ERR_OR_NULL(reg_vdd_af)) {
+ pr_err("whistler_ov5650_power_on: vdd_vcore_af failed\n");
+ reg_vdd_af = NULL;
+ return PTR_ERR(reg_vdd_af);
+ }
+ regulator_enable(reg_vdd_af);
+ }
+ mdelay(5);
+
+ gpio_set_value(CAMERA1_RESET_GPIO, 1);
+ mdelay(10);
+ gpio_set_value(CAMERA1_RESET_GPIO, 0);
+ mdelay(5);
+ gpio_set_value(CAMERA1_RESET_GPIO, 1);
+ mdelay(20);
+ gpio_set_value(CAMERA_AF_PD_GPIO, 1);
+
+ return 0;
+}
+
+static int whistler_ov5650_power_off(void)
+{
+ gpio_set_value(CAMERA_AF_PD_GPIO, 0);
+ gpio_set_value(CAMERA1_PWDN_GPIO, 1);
+ gpio_set_value(CAMERA1_RESET_GPIO, 0);
+
+ if (reg_avdd_cam1) {
+ regulator_disable(reg_avdd_cam1);
+ regulator_put(reg_avdd_cam1);
+ reg_avdd_cam1 = NULL;
+ }
+
+ if (reg_vdd_mipi) {
+ regulator_disable(reg_vdd_mipi);
+ regulator_put(reg_vdd_mipi);
+ reg_vdd_mipi = NULL;
+ }
+
+ if (reg_vdd_af) {
+ regulator_disable(reg_vdd_af);
+ regulator_put(reg_vdd_af);
+ reg_vdd_af = NULL;
+ }
+
+ return 0;
+}
+
+static int whistler_soc380_power_on(void)
+{
+ gpio_set_value(CAMERA2_PWDN_GPIO, 0);
+
+ if (!reg_vddio_vi) {
+ reg_vddio_vi = regulator_get(NULL, "vddio_vi");
+ if (IS_ERR_OR_NULL(reg_vddio_vi)) {
+ pr_err("whistler_soc380_power_on: vddio_vi failed\n");
+ reg_vddio_vi = NULL;
+ return PTR_ERR(reg_vddio_vi);
+ }
+ regulator_set_voltage(reg_vddio_vi, 1800*1000, 1800*1000);
+ mdelay(5);
+ regulator_enable(reg_vddio_vi);
+ }
+
+ if (!reg_avdd_cam1) {
+ reg_avdd_cam1 = regulator_get(NULL, "vdd_cam1");
+ if (IS_ERR_OR_NULL(reg_avdd_cam1)) {
+ pr_err("whistler_soc380_power_on: vdd_cam1 failed\n");
+ reg_avdd_cam1 = NULL;
+ return PTR_ERR(reg_avdd_cam1);
+ }
+ regulator_enable(reg_avdd_cam1);
+ }
+ mdelay(5);
+
+ gpio_set_value(CAMERA2_RESET_GPIO, 1);
+ mdelay(10);
+ gpio_set_value(CAMERA2_RESET_GPIO, 0);
+ mdelay(5);
+ gpio_set_value(CAMERA2_RESET_GPIO, 1);
+ mdelay(20);
+
+ return 0;
+
+}
+
+static int whistler_soc380_power_off(void)
+{
+ gpio_set_value(CAMERA2_PWDN_GPIO, 1);
+ gpio_set_value(CAMERA2_RESET_GPIO, 0);
+
+ if (reg_avdd_cam1) {
+ regulator_disable(reg_avdd_cam1);
+ regulator_put(reg_avdd_cam1);
+ reg_avdd_cam1 = NULL;
+ }
+ if (reg_vddio_vi) {
+ regulator_disable(reg_vddio_vi);
+ regulator_put(reg_vddio_vi);
+ reg_vddio_vi = NULL;
+ }
+
+ return 0;
+}
+
+struct ov5650_platform_data whistler_ov5650_data = {
+ .power_on = whistler_ov5650_power_on,
+ .power_off = whistler_ov5650_power_off,
+};
+
+struct soc380_platform_data whistler_soc380_data = {
+ .power_on = whistler_soc380_power_on,
+ .power_off = whistler_soc380_power_off,
+};
+
+static int whistler_fuse_power_en(int enb)
+{
+ int ret;
+
+ ret = gpio_request(FUSE_POWER_EN_GPIO, "fuse_power_en");
+ if (ret) {
+ pr_err("%s: gpio_request fail (%d)\n", __func__, ret);
+ return ret;
+ }
+
+ ret = gpio_direction_output(FUSE_POWER_EN_GPIO, enb);
+ if (ret) {
+ pr_err("%s: gpio_direction_output fail (%d)\n", __func__, ret);
+ return ret;
+ }
+
+ gpio_free(FUSE_POWER_EN_GPIO);
+ return 0;
+}
+
+static struct pca953x_platform_data whistler_tca6416_data = {
+ .gpio_base = TCA6416_GPIO_BASE,
+};
+
+static struct i2c_board_info whistler_i2c3_board_info[] = {
+ {
+ I2C_BOARD_INFO("ov5650", 0x36),
+ .platform_data = &whistler_ov5650_data,
+ },
+ {
+ I2C_BOARD_INFO("ad5820", 0x0c),
+ },
+ {
+ I2C_BOARD_INFO("soc380", 0x3C),
+ .platform_data = &whistler_soc380_data,
+ },
+};
+
+static void whistler_adxl34x_init(void)
+{
+ tegra_gpio_enable(ADXL34X_IRQ_GPIO);
+ gpio_request(ADXL34X_IRQ_GPIO, "adxl34x");
+ gpio_direction_input(ADXL34X_IRQ_GPIO);
+}
+
+static void whistler_isl29018_init(void)
+{
+ tegra_gpio_enable(ISL29018_IRQ_GPIO);
+ gpio_request(ISL29018_IRQ_GPIO, "isl29018");
+ gpio_direction_input(ISL29018_IRQ_GPIO);
+}
+
+static struct i2c_board_info whistler_i2c1_board_info[] = {
+ {
+ I2C_BOARD_INFO("adxl34x", 0x1D),
+ .irq = TEGRA_GPIO_TO_IRQ(ADXL34X_IRQ_GPIO),
+ },
+ {
+ I2C_BOARD_INFO("isl29018", 0x44),
+ .irq = TEGRA_GPIO_TO_IRQ(ISL29018_IRQ_GPIO),
+ },
+};
+
+static void whistler_adt7461_init(void)
+{
+ tegra_gpio_enable(ADT7461_IRQ_GPIO);
+ gpio_request(ADT7461_IRQ_GPIO, "adt7461");
+ gpio_direction_input(ADT7461_IRQ_GPIO);
+}
+
+static struct adt7461_platform_data whistler_adt7461_pdata = {
+ .supported_hwrev = true,
+ .ext_range = false,
+ .therm2 = true,
+ .conv_rate = 0x05,
+ .offset = 0,
+ .hysteresis = 0,
+ .shutdown_ext_limit = 115,
+ .shutdown_local_limit = 120,
+ .throttling_ext_limit = 90,
+ .alarm_fn = tegra_throttling_enable,
+};
+
+static struct i2c_board_info whistler_i2c4_board_info[] = {
+ {
+ I2C_BOARD_INFO("adt7461", 0x4C),
+ .irq = TEGRA_GPIO_TO_IRQ(ADT7461_IRQ_GPIO),
+ .platform_data = &whistler_adt7461_pdata,
+ },
+ {
+ I2C_BOARD_INFO("tca6416", 0x20),
+ .platform_data = &whistler_tca6416_data,
+ },
+};
+
+int __init whistler_sensors_init(void)
+{
+ whistler_camera_init();
+
+ whistler_adxl34x_init();
+
+ whistler_isl29018_init();
+
+ whistler_adt7461_init();
+
+ i2c_register_board_info(0, whistler_i2c1_board_info,
+ ARRAY_SIZE(whistler_i2c1_board_info));
+
+ i2c_register_board_info(4, whistler_i2c4_board_info,
+ ARRAY_SIZE(whistler_i2c4_board_info));
+
+ i2c_register_board_info(3, whistler_i2c3_board_info,
+ ARRAY_SIZE(whistler_i2c3_board_info));
+
+ tegra_fuse_regulator_en = whistler_fuse_power_en;
+
+ return 0;
+}
+
+int __init whistler_sensor_late_init(void)
+{
+ int ret;
+
+ if (!machine_is_whistler())
+ return 0;
+
+ reg_vddio_vi = regulator_get(NULL, "vddio_vi");
+ if (IS_ERR_OR_NULL(reg_vddio_vi)) {
+ pr_err("%s: Couldn't get regulator vddio_vi\n", __func__);
+ return PTR_ERR(reg_vddio_vi);
+ }
+
+ /* set vddio_vi voltage to 1.8v */
+ ret = regulator_set_voltage(reg_vddio_vi, 1800*1000, 1800*1000);
+ if (ret) {
+ pr_err("%s: Failed to set vddio_vi to 1.8v\n", __func__);
+ goto fail_put_regulator;
+ }
+
+ regulator_put(reg_vddio_vi);
+ reg_vddio_vi = NULL;
+ return 0;
+
+fail_put_regulator:
+ regulator_put(reg_vddio_vi);
+ reg_vddio_vi = NULL;
+ return ret;
+}
+
+late_initcall(whistler_sensor_late_init);
+
diff --git a/arch/arm/mach-tegra/board-whistler.c b/arch/arm/mach-tegra/board-whistler.c
new file mode 100644
index 000000000000..a1b61eae0eb3
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler.c
@@ -0,0 +1,612 @@
+/*
+ * arch/arm/mach-tegra/board-whistler.c
+ *
+ * Copyright (c) 2010 - 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/i2c.h>
+#include <linux/synaptics_i2c_rmi.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/i2c-tegra.h>
+#include <linux/gpio.h>
+#include <linux/gpio_scrollwheel.h>
+#include <linux/input.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/mfd/max8907c.h>
+#include <linux/memblock.h>
+#include <linux/tegra_uart.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+#include <mach/i2s.h>
+#include <mach/tegra_wm8753_pdata.h>
+#include <sound/tlv320aic326x.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/usb_phy.h>
+
+#include "board.h"
+#include "clock.h"
+#include "board-whistler.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "fuse.h"
+#include "pm.h"
+#include "board-whistler-baseband.h"
+
+#define USB1_VBUS_GPIO TCA6416_GPIO_BASE
+
+static struct plat_serial8250_port debug_uartb_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTB_BASE),
+ .mapbase = TEGRA_UARTB_BASE,
+ .irq = INT_UARTB,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .type = PORT_TEGRA,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = 216000000,
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device debug_uartb = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uartb_platform_data,
+ },
+};
+
+static struct plat_serial8250_port debug_uarta_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTA_BASE),
+ .mapbase = TEGRA_UARTA_BASE,
+ .irq = INT_UARTA,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .type = PORT_TEGRA,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = 216000000,
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device debug_uarta = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uarta_platform_data,
+ },
+};
+
+static struct platform_device *whistler_uart_devices[] __initdata = {
+ &tegra_uarta_device,
+ &tegra_uartb_device,
+ &tegra_uartc_device,
+};
+
+struct uart_clk_parent uart_parent_clk[] = {
+ [0] = {.name = "pll_p"},
+ [1] = {.name = "pll_m"},
+ [2] = {.name = "clk_m"},
+};
+
+static struct tegra_uart_platform_data whistler_uart_pdata;
+
+static void __init uart_debug_init(void)
+{
+ unsigned long rate;
+ struct clk *debug_uart_clk;
+ struct clk *c;
+ int modem_id = tegra_get_modem_id();
+
+ if (modem_id == 0x1) {
+ /* UARTB is the debug port. */
+ pr_info("Selecting UARTB as the debug console\n");
+ whistler_uart_devices[1] = &debug_uartb;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uartb");
+
+ /* Clock enable for the debug channel */
+ if (!IS_ERR_OR_NULL(debug_uart_clk)) {
+ rate = debug_uartb_platform_data[0].uartclk;
+ pr_info("The debug console clock name is %s\n",
+ debug_uart_clk->name);
+ c = tegra_get_clock_by_name("pll_p");
+ if (IS_ERR_OR_NULL(c))
+ pr_err("Not getting the parent clock pll_p\n");
+ else
+ clk_set_parent(debug_uart_clk, c);
+
+ clk_enable(debug_uart_clk);
+ clk_set_rate(debug_uart_clk, rate);
+ } else {
+ pr_err("Not getting the clock %s for debug console\n",
+ debug_uart_clk->name);
+ }
+ } else {
+ /* UARTA is the debug port. */
+ pr_info("Selecting UARTA as the debug console\n");
+ whistler_uart_devices[0] = &debug_uarta;
+ debug_uart_clk = clk_get_sys("serial8250.0", "uarta");
+
+ /* Clock enable for the debug channel */
+ if (!IS_ERR_OR_NULL(debug_uart_clk)) {
+ rate = debug_uarta_platform_data[0].uartclk;
+ pr_info("The debug console clock name is %s\n",
+ debug_uart_clk->name);
+ c = tegra_get_clock_by_name("pll_p");
+ if (IS_ERR_OR_NULL(c))
+ pr_err("Not getting the parent clock pll_p\n");
+ else
+ clk_set_parent(debug_uart_clk, c);
+
+ clk_enable(debug_uart_clk);
+ clk_set_rate(debug_uart_clk, rate);
+ } else {
+ pr_err("Not getting the clock %s for debug console\n",
+ debug_uart_clk->name);
+ }
+ }
+}
+
+static void __init whistler_uart_init(void)
+{
+ int i;
+ struct clk *c;
+
+ for (i = 0; i < ARRAY_SIZE(uart_parent_clk); ++i) {
+ c = tegra_get_clock_by_name(uart_parent_clk[i].name);
+ if (IS_ERR_OR_NULL(c)) {
+ pr_err("Not able to get the clock for %s\n",
+ uart_parent_clk[i].name);
+ continue;
+ }
+ uart_parent_clk[i].parent_clk = c;
+ uart_parent_clk[i].fixed_clk_rate = clk_get_rate(c);
+ }
+ whistler_uart_pdata.parent_clk_list = uart_parent_clk;
+ whistler_uart_pdata.parent_clk_count = ARRAY_SIZE(uart_parent_clk);
+
+ tegra_uarta_device.dev.platform_data = &whistler_uart_pdata;
+ tegra_uartb_device.dev.platform_data = &whistler_uart_pdata;
+ tegra_uartc_device.dev.platform_data = &whistler_uart_pdata;
+
+ if (!is_tegra_debug_uartport_hs())
+ uart_debug_init();
+
+ platform_add_devices(whistler_uart_devices,
+ ARRAY_SIZE(whistler_uart_devices));
+}
+
+static struct resource whistler_bcm4329_rfkill_resources[] = {
+ {
+ .name = "bcm4329_nshutdown_gpio",
+ .start = TEGRA_GPIO_PU0,
+ .end = TEGRA_GPIO_PU0,
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct platform_device whistler_bcm4329_rfkill_device = {
+ .name = "bcm4329_rfkill",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(whistler_bcm4329_rfkill_resources),
+ .resource = whistler_bcm4329_rfkill_resources,
+};
+
+static struct resource whistler_bluesleep_resources[] = {
+ [0] = {
+ .name = "gpio_host_wake",
+ .start = TEGRA_GPIO_PU6,
+ .end = TEGRA_GPIO_PU6,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .name = "gpio_ext_wake",
+ .start = TEGRA_GPIO_PU1,
+ .end = TEGRA_GPIO_PU1,
+ .flags = IORESOURCE_IO,
+ },
+ [2] = {
+ .name = "host_wake",
+ .start = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ .end = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ },
+};
+
+static struct platform_device whistler_bluesleep_device = {
+ .name = "bluesleep",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(whistler_bluesleep_resources),
+ .resource = whistler_bluesleep_resources,
+};
+
+static void __init whistler_setup_bluesleep(void)
+{
+ platform_device_register(&whistler_bluesleep_device);
+ tegra_gpio_enable(TEGRA_GPIO_PU6);
+ tegra_gpio_enable(TEGRA_GPIO_PU1);
+ return;
+}
+
+static struct tegra_utmip_config utmi_phy_config[] = {
+ [0] = {
+ .hssync_start_delay = 9,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+ [1] = {
+ .hssync_start_delay = 9,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+};
+
+static struct tegra_ulpi_config ulpi_phy_config = {
+ .reset_gpio = TEGRA_GPIO_PG2,
+ .clk = "cdev2",
+};
+
+static __initdata struct tegra_clk_init_table whistler_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "pwm", "clk_32k", 32768, false},
+ { "kbc", "clk_32k", 32768, true},
+ { "sdmmc2", "pll_p", 25000000, false},
+ { "i2s1", "pll_a_out0", 0, false},
+ { "i2s2", "pll_a_out0", 0, false},
+ { "spdif_out", "pll_a_out0", 0, false},
+ { NULL, NULL, 0, 0},
+};
+
+static struct tegra_i2c_platform_data whistler_i2c1_platform_data = {
+ .adapter_nr = 0,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PC4, 0},
+ .sda_gpio = {TEGRA_GPIO_PC5, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static const struct tegra_pingroup_config i2c2_ddc = {
+ .pingroup = TEGRA_PINGROUP_DDC,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static const struct tegra_pingroup_config i2c2_gen2 = {
+ .pingroup = TEGRA_PINGROUP_PTA,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static struct tegra_i2c_platform_data whistler_i2c2_platform_data = {
+ .adapter_nr = 1,
+ .bus_count = 2,
+ .bus_clk_rate = { 100000, 100000 },
+ .bus_mux = { &i2c2_ddc, &i2c2_gen2 },
+ .bus_mux_len = { 1, 1 },
+ .scl_gpio = {0, TEGRA_GPIO_PT5},
+ .sda_gpio = {0, TEGRA_GPIO_PT6},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data whistler_i2c3_platform_data = {
+ .adapter_nr = 3,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .scl_gpio = {TEGRA_GPIO_PBB2, 0},
+ .sda_gpio = {TEGRA_GPIO_PBB3, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct tegra_i2c_platform_data whistler_dvc_platform_data = {
+ .adapter_nr = 4,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .is_dvc = true,
+ .scl_gpio = {TEGRA_GPIO_PZ6, 0},
+ .sda_gpio = {TEGRA_GPIO_PZ7, 0},
+ .arb_recovery = arb_lost_recovery,
+};
+
+static struct aic326x_pdata whistler_aic3262_pdata = {
+ /* debounce time */
+ .debounce_time_ms = 512,
+};
+
+static struct i2c_board_info __initdata wm8753_board_info[] = {
+ {
+ I2C_BOARD_INFO("wm8753", 0x1a),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_HP_DET),
+ },
+ {
+ I2C_BOARD_INFO("tlv320aic3262", 0x18),
+ .platform_data = &whistler_aic3262_pdata,
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_HP_DET),
+ },
+};
+
+static void whistler_i2c_init(void)
+{
+ tegra_i2c_device1.dev.platform_data = &whistler_i2c1_platform_data;
+ tegra_i2c_device2.dev.platform_data = &whistler_i2c2_platform_data;
+ tegra_i2c_device3.dev.platform_data = &whistler_i2c3_platform_data;
+ tegra_i2c_device4.dev.platform_data = &whistler_dvc_platform_data;
+
+ platform_device_register(&tegra_i2c_device4);
+ platform_device_register(&tegra_i2c_device3);
+ platform_device_register(&tegra_i2c_device2);
+ platform_device_register(&tegra_i2c_device1);
+
+ i2c_register_board_info(4, wm8753_board_info,
+ ARRAY_SIZE(wm8753_board_info));
+}
+
+#define GPIO_SCROLL(_pinaction, _gpio, _desc) \
+{ \
+ .pinaction = GPIO_SCROLLWHEEL_PIN_##_pinaction, \
+ .gpio = TEGRA_GPIO_##_gpio, \
+ .desc = _desc, \
+ .active_low = 1, \
+ .debounce_interval = 2, \
+}
+
+static struct gpio_scrollwheel_button scroll_keys[] = {
+ [0] = GPIO_SCROLL(ONOFF, PR3, "sw_onoff"),
+ [1] = GPIO_SCROLL(PRESS, PQ5, "sw_press"),
+ [2] = GPIO_SCROLL(ROT1, PQ3, "sw_rot1"),
+ [3] = GPIO_SCROLL(ROT2, PQ4, "sw_rot2"),
+};
+
+static struct gpio_scrollwheel_platform_data whistler_scroll_platform_data = {
+ .buttons = scroll_keys,
+ .nbuttons = ARRAY_SIZE(scroll_keys),
+};
+
+static struct platform_device whistler_scroll_device = {
+ .name = "alps-gpio-scrollwheel",
+ .id = 0,
+ .dev = {
+ .platform_data = &whistler_scroll_platform_data,
+ },
+};
+
+static struct platform_device tegra_camera = {
+ .name = "tegra_camera",
+ .id = -1,
+};
+
+static struct tegra_wm8753_platform_data whistler_audio_pdata = {
+ .gpio_spkr_en = -1,
+ .gpio_hp_det = TEGRA_GPIO_HP_DET,
+ .gpio_hp_mute = -1,
+ .gpio_int_mic_en = -1,
+ .gpio_ext_mic_en = -1,
+ .debounce_time_hp = 200,
+};
+
+static struct platform_device whistler_audio_device1 = {
+ .name = "tegra-snd-aic326x",
+ .id = 0,
+ .dev = {
+ .platform_data = &whistler_audio_pdata,
+ },
+};
+
+static struct platform_device whistler_audio_device2 = {
+ .name = "tegra-snd-wm8753",
+ .id = 0,
+ .dev = {
+ .platform_data = &whistler_audio_pdata,
+ },
+};
+
+static struct platform_device *whistler_devices[] __initdata = {
+ &tegra_pmu_device,
+ &tegra_udc_device,
+ &tegra_gart_device,
+ &tegra_wdt_device,
+ &tegra_avp_device,
+ &whistler_scroll_device,
+ &tegra_camera,
+ &tegra_i2s_device1,
+ &tegra_i2s_device2,
+ &tegra_spdif_device,
+ &tegra_das_device,
+ &spdif_dit_device,
+ &bluetooth_dit_device,
+ &baseband_dit_device,
+ &whistler_bcm4329_rfkill_device,
+ &tegra_pcm_device,
+ &whistler_audio_device1,
+ &whistler_audio_device2,
+};
+
+static struct synaptics_i2c_rmi_platform_data synaptics_pdata = {
+ .flags = SYNAPTICS_FLIP_X | SYNAPTICS_FLIP_Y | SYNAPTICS_SWAP_XY,
+ .irqflags = IRQF_TRIGGER_LOW,
+};
+
+static const struct i2c_board_info whistler_i2c_touch_info[] = {
+ {
+ I2C_BOARD_INFO("synaptics-rmi-ts", 0x20),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PC6),
+ .platform_data = &synaptics_pdata,
+ },
+};
+
+static int __init whistler_touch_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PC6);
+ i2c_register_board_info(0, whistler_i2c_touch_info, 1);
+
+ return 0;
+}
+
+static int __init whistler_scroll_init(void)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(scroll_keys); i++)
+ tegra_gpio_enable(scroll_keys[i].gpio);
+
+ return 0;
+}
+
+static struct usb_phy_plat_data tegra_usb_phy_pdata[] = {
+ [0] = {
+ .instance = 0,
+ .vbus_irq = MAX8907C_INT_BASE + MAX8907C_IRQ_VCHG_DC_R,
+ .vbus_gpio = USB1_VBUS_GPIO,
+ },
+ [1] = {
+ .instance = 1,
+ .vbus_gpio = -1,
+ },
+ [2] = {
+ .instance = 2,
+ .vbus_gpio = -1,
+ },
+};
+
+static struct tegra_ehci_platform_data tegra_ehci_pdata[] = {
+ [0] = {
+ .phy_config = &utmi_phy_config[0],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+ [1] = {
+ .phy_config = &ulpi_phy_config,
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+ [2] = {
+ .phy_config = &utmi_phy_config[1],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 1,
+ },
+};
+
+static struct tegra_otg_platform_data tegra_otg_pdata = {
+ .ehci_device = &tegra_ehci1_device,
+ .ehci_pdata = &tegra_ehci_pdata[0],
+};
+
+static int __init whistler_gps_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PU4);
+ return 0;
+}
+
+static void whistler_power_off(void)
+{
+ int ret;
+
+ ret = max8907c_power_off();
+ if (ret)
+ pr_err("whistler: failed to power off\n");
+
+ while (1);
+}
+
+static void __init whistler_power_off_init(void)
+{
+ pm_power_off = whistler_power_off;
+}
+
+static void whistler_usb_init(void)
+{
+ tegra_usb_phy_init(tegra_usb_phy_pdata, ARRAY_SIZE(tegra_usb_phy_pdata));
+
+ tegra_otg_device.dev.platform_data = &tegra_otg_pdata;
+ platform_device_register(&tegra_otg_device);
+
+ tegra_ehci3_device.dev.platform_data = &tegra_ehci_pdata[2];
+ platform_device_register(&tegra_ehci3_device);
+}
+
+static void __init tegra_whistler_init(void)
+{
+ int modem_id = tegra_get_modem_id();
+ tegra_clk_init_from_table(whistler_clk_init_table);
+ whistler_pinmux_init();
+ whistler_i2c_init();
+ whistler_uart_init();
+ platform_add_devices(whistler_devices, ARRAY_SIZE(whistler_devices));
+
+ whistler_sdhci_init();
+ whistler_regulator_init();
+ whistler_panel_init();
+ whistler_sensors_init();
+ whistler_touch_init();
+ whistler_kbc_init();
+ whistler_gps_init();
+ whistler_usb_init();
+ whistler_scroll_init();
+ whistler_power_off_init();
+ whistler_emc_init();
+ if (modem_id == 0x1)
+ whistler_baseband_init();
+ whistler_setup_bluesleep();
+ tegra_release_bootloader_fb();
+}
+
+int __init tegra_whistler_protected_aperture_init(void)
+{
+ tegra_protected_aperture_init(tegra_grhost_aperture);
+ return 0;
+}
+
+void __init tegra_whistler_reserve(void)
+{
+ if (memblock_reserve(0x0, 4096) < 0)
+ pr_warn("Cannot reserve first 4K of memory for safety\n");
+
+ tegra_reserve(SZ_160M, SZ_8M, SZ_16M);
+}
+
+MACHINE_START(WHISTLER, "whistler")
+ .boot_params = 0x00000100,
+ .map_io = tegra_map_common_io,
+ .reserve = tegra_whistler_reserve,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_whistler_init,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-whistler.h b/arch/arm/mach-tegra/board-whistler.h
new file mode 100644
index 000000000000..a31be96e915b
--- /dev/null
+++ b/arch/arm/mach-tegra/board-whistler.h
@@ -0,0 +1,39 @@
+/*
+ * arch/arm/mach-tegra/board-whistler.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_BOARD_WHISTLER_H
+#define _MACH_TEGRA_BOARD_WHISTLER_H
+
+int whistler_regulator_init(void);
+int whistler_sdhci_init(void);
+int whistler_pinmux_init(void);
+int whistler_panel_init(void);
+int whistler_kbc_init(void);
+int whistler_sensors_init(void);
+int whistler_baseband_init(void);
+int whistler_emc_init(void);
+
+/* Interrupt numbers from external peripherals */
+#define MAX8907C_INT_BASE TEGRA_NR_IRQS
+#define MAX8907C_INT_END (MAX8907C_INT_BASE + 31)
+
+/* Audio-related GPIOs */
+#define TEGRA_GPIO_HP_DET TEGRA_GPIO_PW3
+
+/* TCA6416 GPIO expander */
+#define TCA6416_GPIO_BASE (TEGRA_NR_GPIOS)
+
+#endif
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 1d14df7eb7de..bb533458238f 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -2,6 +2,7 @@
* arch/arm/mach-tegra/board.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -22,14 +23,96 @@
#define __MACH_TEGRA_BOARD_H
#include <linux/types.h>
+#include <linux/power_supply.h>
+
+#if defined(CONFIG_TEGRA_NVMAP)
+#define NVMAP_HEAP_CARVEOUT_IRAM_INIT \
+ { .name = "iram", \
+ .usage_mask = NVMAP_HEAP_CARVEOUT_IRAM, \
+ .base = TEGRA_IRAM_BASE + TEGRA_RESET_HANDLER_SIZE, \
+ .size = TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE, \
+ .buddy_size = 0, /* no buddy allocation for IRAM */ \
+ }
+#endif
void tegra_assert_system_reset(char mode, const char *cmd);
void __init tegra_init_early(void);
+void __init tegra_mc_init(void);
void __init tegra_map_common_io(void);
void __init tegra_init_irq(void);
void __init tegra_init_clock(void);
-int __init tegra_pcie_init(bool init_port0, bool init_port1);
+void __init tegra_reserve(unsigned long carveout_size, unsigned long fb_size,
+ unsigned long fb2_size);
+void tegra_init_cache(bool init);
+void __init tegra_release_bootloader_fb(void);
+void __init tegra_protected_aperture_init(unsigned long aperture);
+void tegra_move_framebuffer(unsigned long to, unsigned long from,
+ unsigned long size);
+bool is_tegra_debug_uartport_hs(void);
+int get_tegra_uart_debug_port_id(void);
+int arb_lost_recovery(int scl_gpio, int sda_gpio);
+
+extern unsigned long tegra_bootloader_fb_start;
+extern unsigned long tegra_bootloader_fb_size;
+extern unsigned long tegra_fb_start;
+extern unsigned long tegra_fb_size;
+extern unsigned long tegra_fb2_start;
+extern unsigned long tegra_fb2_size;
+extern unsigned long tegra_carveout_start;
+extern unsigned long tegra_carveout_size;
+extern unsigned long tegra_vpr_start;
+extern unsigned long tegra_vpr_size;
+extern unsigned long tegra_lp0_vec_start;
+extern unsigned long tegra_lp0_vec_size;
+extern bool tegra_lp0_vec_relocate;
+extern unsigned long tegra_grhost_aperture;
extern struct sys_timer tegra_timer;
+
+enum board_fab {
+ BOARD_FAB_A = 0,
+ BOARD_FAB_B,
+ BOARD_FAB_C,
+ BOARD_FAB_D,
+};
+
+struct board_info {
+ u16 board_id;
+ u16 sku;
+ u8 fab;
+ u8 major_revision;
+ u8 minor_revision;
+};
+
+enum panel_type {
+ panel_type_lvds = 0,
+ panel_type_dsi,
+};
+
+enum audio_codec_type {
+ audio_codec_none,
+ audio_codec_wm8903,
+};
+
+void tegra_get_board_info(struct board_info *);
+void tegra_get_pmu_board_info(struct board_info *bi);
+void tegra_get_display_board_info(struct board_info *bi);
+void tegra_get_camera_board_info(struct board_info *bi);
+#ifdef CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+#define SET_CONSERVATIVE_GOVERNOR_UP_THRESHOLD 95
+#define SET_CONSERVATIVE_GOVERNOR_DOWN_THRESHOLD 50
+
+void cpufreq_save_default_governor(void);
+void cpufreq_restore_default_governor(void);
+void cpufreq_set_conservative_governor(void);
+void cpufreq_set_conservative_governor_param(int up_th, int down_th);
+#endif
+int get_core_edp(void);
+enum panel_type get_panel_type(void);
+int tegra_get_modem_id(void);
+enum power_supply_type get_power_supply_type(void);
+enum audio_codec_type get_audio_codec_type(void);
+int get_maximum_cpu_current_supported(void);
+
#endif
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index f8d41ffc0ca9..ec483c840672 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -5,6 +5,8 @@
* Author:
* Colin Cross <ccross@google.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -24,34 +26,52 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <trace/events/power.h>
#include <mach/clk.h>
#include "board.h"
#include "clock.h"
+#include "dvfs.h"
+
+#define DISABLE_BOOT_CLOCKS 1
/*
* Locking:
*
- * Each struct clk has a spinlock.
+ * Each struct clk has a lock. Depending on the cansleep flag, that lock
+ * may be a spinlock or a mutex. For most clocks, the spinlock is sufficient,
+ * and using the spinlock allows the clock to be manipulated from an interrupt
+ * or while holding a spinlock. Some clocks may need to adjust a regulator
+ * in order to maintain the required voltage for a new frequency. Those
+ * clocks set the cansleep flag, and take a mutex so that the regulator api
+ * can be used while holding the lock.
*
* To avoid AB-BA locking problems, locks must always be traversed from child
* clock to parent clock. For example, when enabling a clock, the clock's lock
* is taken, and then clk_enable is called on the parent, which take's the
- * parent clock's lock. There is one exceptions to this ordering: When dumping
- * the clock tree through debugfs. In this case, clk_lock_all is called,
- * which attemps to iterate through the entire list of clocks and take every
- * clock lock. If any call to spin_trylock fails, all locked clocks are
- * unlocked, and the process is retried. When all the locks are held,
- * the only clock operation that can be called is clk_get_rate_all_locked.
+ * parent clock's lock. There are two exceptions to this ordering:
+ * 1. When setting a clock as cansleep, in which case the entire list of clocks
+ * is traversed to set the children as cansleep as well. This must occur
+ * during init, before any calls to clk_get, so no other clock locks can
+ * get taken.
+ * 2. When dumping the clock tree through debugfs. In this case, clk_lock_all
+ * is called, which attemps to iterate through the entire list of clocks
+ * and take every clock lock. If any call to clk_trylock fails, a locked
+ * clocks are unlocked, and the process is retried. When all the locks
+ * are held, the only clock operation that can be called is
+ * clk_get_rate_all_locked.
*
* Within a single clock, no clock operation can call another clock operation
- * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any
- * clock operation can call any other clock operation on any of it's possible
- * parents.
+ * on itself, except for clk_xxx_locked. Any clock operation can call any other
+ * clock operation on any of it's possible parents.
+ *
+ * clk_set_cansleep is used to mark a clock as sleeping. It is called during
+ * dvfs (Dynamic Voltage and Frequency Scaling) init on any clock that has a
+ * dvfs requirement, and propagated to all possible children of sleeping clock.
*
* An additional mutex, clock_list_lock, is used to protect the list of all
* clocks.
@@ -59,6 +79,10 @@
* The clock operations must lock internally to protect against
* read-modify-write on registers that are shared by multiple clocks
*/
+
+/* FIXME: remove and never ignore overclock */
+#define IGNORE_PARENT_OVERCLOCK 0
+
static DEFINE_MUTEX(clock_list_lock);
static LIST_HEAD(clocks);
@@ -76,8 +100,21 @@ struct clk *tegra_get_clock_by_name(const char *name)
mutex_unlock(&clock_list_lock);
return ret;
}
+EXPORT_SYMBOL(tegra_get_clock_by_name);
+
+static void clk_stats_update(struct clk *c)
+{
+ u64 cur_jiffies = get_jiffies_64();
+
+ if (c->refcnt) {
+ c->stats.time_on = cputime64_add(c->stats.time_on,
+ cputime64_sub(cur_jiffies, c->stats.last_update));
+ }
+
+ c->stats.last_update = cur_jiffies;
+}
-/* Must be called with c->spinlock held */
+/* Must be called with clk_lock(c) held */
static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
{
u64 rate;
@@ -93,7 +130,17 @@ static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
return rate;
}
-/* Must be called with c->spinlock held */
+unsigned long clk_get_max_rate(struct clk *c)
+{
+ return c->max_rate;
+}
+
+unsigned long clk_get_min_rate(struct clk *c)
+{
+ return c->min_rate;
+}
+
+/* Must be called with clk_lock(c) held */
unsigned long clk_get_rate_locked(struct clk *c)
{
unsigned long rate;
@@ -111,16 +158,54 @@ unsigned long clk_get_rate(struct clk *c)
unsigned long flags;
unsigned long rate;
- spin_lock_irqsave(&c->spinlock, flags);
+ clk_lock_save(c, &flags);
rate = clk_get_rate_locked(c);
- spin_unlock_irqrestore(&c->spinlock, flags);
+ clk_unlock_restore(c, &flags);
return rate;
}
EXPORT_SYMBOL(clk_get_rate);
+static void __clk_set_cansleep(struct clk *c)
+{
+ struct clk *child;
+ int i;
+ BUG_ON(mutex_is_locked(&c->mutex));
+ BUG_ON(spin_is_locked(&c->spinlock));
+
+ /* Make sure that all possible descendants of sleeping clock are
+ marked as sleeping (to eliminate "sleeping parent - non-sleeping
+ child" relationship */
+ list_for_each_entry(child, &clocks, node) {
+ bool possible_parent = (child->parent == c);
+
+ if (!possible_parent && child->inputs) {
+ for (i = 0; child->inputs[i].input; i++) {
+ if (child->inputs[i].input == c) {
+ possible_parent = true;
+ break;
+ }
+ }
+ }
+
+ if (possible_parent)
+ __clk_set_cansleep(child);
+ }
+
+ c->cansleep = true;
+}
+
+/* Must be called before any clk_get calls */
+void clk_set_cansleep(struct clk *c)
+{
+
+ mutex_lock(&clock_list_lock);
+ __clk_set_cansleep(c);
+ mutex_unlock(&clock_list_lock);
+}
+
int clk_reparent(struct clk *c, struct clk *parent)
{
c->parent = parent;
@@ -129,7 +214,7 @@ int clk_reparent(struct clk *c, struct clk *parent)
void clk_init(struct clk *c)
{
- spin_lock_init(&c->spinlock);
+ clk_lock_init(c);
if (c->ops && c->ops->init)
c->ops->init(c);
@@ -142,78 +227,117 @@ void clk_init(struct clk *c)
else
c->state = ON;
}
+ c->stats.last_update = get_jiffies_64();
mutex_lock(&clock_list_lock);
list_add(&c->node, &clocks);
mutex_unlock(&clock_list_lock);
}
-int clk_enable(struct clk *c)
+static int clk_enable_locked(struct clk *c)
{
int ret = 0;
- unsigned long flags;
+ int rate = clk_get_rate_locked(c);
+ bool set_rate = false;
- spin_lock_irqsave(&c->spinlock, flags);
+ if (rate > c->max_rate) {
+ rate = c->max_rate;
+ set_rate = true;
+ }
+
+ if (clk_is_auto_dvfs(c)) {
+ ret = tegra_dvfs_set_rate(c, rate);
+ if (ret)
+ return ret;
+ }
if (c->refcnt == 0) {
if (c->parent) {
ret = clk_enable(c->parent);
if (ret)
- goto out;
+ return ret;
}
+ if (set_rate)
+ clk_set_rate_locked(c, rate);
+
if (c->ops && c->ops->enable) {
ret = c->ops->enable(c);
+ trace_clock_enable(c->name, 1, 0);
if (ret) {
if (c->parent)
clk_disable(c->parent);
- goto out;
+ return ret;
}
c->state = ON;
c->set = true;
}
+ clk_stats_update(c);
}
c->refcnt++;
-out:
- spin_unlock_irqrestore(&c->spinlock, flags);
+
return ret;
}
-EXPORT_SYMBOL(clk_enable);
-void clk_disable(struct clk *c)
+
+int clk_enable(struct clk *c)
{
+ int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&c->spinlock, flags);
+ clk_lock_save(c, &flags);
+ ret = clk_enable_locked(c);
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+static void clk_disable_locked(struct clk *c)
+{
if (c->refcnt == 0) {
WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
- spin_unlock_irqrestore(&c->spinlock, flags);
return;
}
if (c->refcnt == 1) {
- if (c->ops && c->ops->disable)
+ if (c->ops && c->ops->disable) {
+ trace_clock_disable(c->name, 0, 0);
c->ops->disable(c);
-
+ }
if (c->parent)
clk_disable(c->parent);
c->state = OFF;
+ clk_stats_update(c);
}
c->refcnt--;
- spin_unlock_irqrestore(&c->spinlock, flags);
+ if (clk_is_auto_dvfs(c) && c->refcnt == 0)
+ tegra_dvfs_set_rate(c, 0);
}
-EXPORT_SYMBOL(clk_disable);
-int clk_set_parent(struct clk *c, struct clk *parent)
+void clk_disable(struct clk *c)
{
- int ret;
unsigned long flags;
+
+ clk_lock_save(c, &flags);
+ clk_disable_locked(c);
+ clk_unlock_restore(c, &flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+static int clk_rate_change_notify(struct clk *c, unsigned long rate)
+{
+ if (!c->rate_change_nh)
+ return -ENOSYS;
+ return raw_notifier_call_chain(c->rate_change_nh, rate, NULL);
+}
+
+int clk_set_parent_locked(struct clk *c, struct clk *parent)
+{
+ int ret = 0;
unsigned long new_rate;
unsigned long old_rate;
-
- spin_lock_irqsave(&c->spinlock, flags);
+ bool disable = false;
if (!c->ops || !c->ops->set_parent) {
ret = -ENOSYS;
@@ -223,12 +347,64 @@ int clk_set_parent(struct clk *c, struct clk *parent)
new_rate = clk_predict_rate_from_parent(c, parent);
old_rate = clk_get_rate_locked(c);
+ if (new_rate > clk_get_max_rate(c)) {
+
+ pr_err("Failed to set parent %s for %s (violates clock limit"
+ " %lu)\n", parent->name, c->name, clk_get_max_rate(c));
+#if !IGNORE_PARENT_OVERCLOCK
+ ret = -EINVAL;
+ goto out;
+#endif
+ }
+
+ /* The new clock control register setting does not take effect if
+ * clock is disabled. Later, when the clock is enabled it would run
+ * for several cycles on the old parent, which may hang h/w if the
+ * parent is already disabled. To guarantee h/w switch to the new
+ * setting enable clock while setting parent.
+ */
+ if ((c->refcnt == 0) && (c->flags & MUX)) {
+ pr_debug("Setting parent of clock %s with refcnt 0\n", c->name);
+ ret = clk_enable_locked(c);
+ if (ret)
+ goto out;
+ disable = true;
+ }
+
+ if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
+ (!c->parent || new_rate > old_rate)) {
+ ret = tegra_dvfs_set_rate(c, new_rate);
+ if (ret)
+ goto out;
+ }
+
ret = c->ops->set_parent(c, parent);
if (ret)
goto out;
+ if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
+ new_rate < old_rate)
+ ret = tegra_dvfs_set_rate(c, new_rate);
+
+ if (new_rate != old_rate)
+ clk_rate_change_notify(c, new_rate);
+
out:
- spin_unlock_irqrestore(&c->spinlock, flags);
+ if (disable)
+ clk_disable_locked(c);
+ return ret;
+}
+
+
+int clk_set_parent(struct clk *c, struct clk *parent)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ clk_lock_save(c, &flags);
+ ret = clk_set_parent_locked(c, parent);
+ clk_unlock_restore(c, &flags);
+
return ret;
}
EXPORT_SYMBOL(clk_set_parent);
@@ -241,42 +417,84 @@ EXPORT_SYMBOL(clk_get_parent);
int clk_set_rate_locked(struct clk *c, unsigned long rate)
{
+ int ret = 0;
+ unsigned long old_rate, max_rate;
long new_rate;
+ bool disable = false;
- if (!c->ops || !c->ops->set_rate)
- return -ENOSYS;
+ old_rate = clk_get_rate_locked(c);
- if (rate > c->max_rate)
- rate = c->max_rate;
+ max_rate = clk_get_max_rate(c);
+ if (rate > max_rate)
+ rate = max_rate;
if (c->ops && c->ops->round_rate) {
new_rate = c->ops->round_rate(c, rate);
- if (new_rate < 0)
- return new_rate;
+ if (new_rate < 0) {
+ ret = new_rate;
+ return ret;
+ }
rate = new_rate;
}
- return c->ops->set_rate(c, rate);
+ /* The new clock control register setting does not take effect if
+ * clock is disabled. Later, when the clock is enabled it would run
+ * for several cycles on the old rate, which may over-clock module
+ * at given voltage. To guarantee h/w switch to the new setting
+ * enable clock while setting rate.
+ */
+ if ((c->refcnt == 0) && (c->flags & (DIV_U71 | DIV_U16)) &&
+ clk_is_auto_dvfs(c)) {
+ pr_debug("Setting rate of clock %s with refcnt 0\n", c->name);
+ ret = clk_enable_locked(c);
+ if (ret)
+ goto out;
+ disable = true;
+ }
+
+ if (clk_is_auto_dvfs(c) && rate > old_rate && c->refcnt > 0) {
+ ret = tegra_dvfs_set_rate(c, rate);
+ if (ret)
+ goto out;
+ }
+
+ trace_clock_set_rate(c->name, rate, 0);
+ ret = c->ops->set_rate(c, rate);
+ if (ret)
+ goto out;
+
+ if (clk_is_auto_dvfs(c) && rate < old_rate && c->refcnt > 0)
+ ret = tegra_dvfs_set_rate(c, rate);
+
+ if (rate != old_rate)
+ clk_rate_change_notify(c, rate);
+
+out:
+ if (disable)
+ clk_disable_locked(c);
+ return ret;
}
int clk_set_rate(struct clk *c, unsigned long rate)
{
- int ret;
unsigned long flags;
+ int ret;
- spin_lock_irqsave(&c->spinlock, flags);
+ if (!c->ops || !c->ops->set_rate)
+ return -ENOSYS;
+
+ clk_lock_save(c, &flags);
ret = clk_set_rate_locked(c, rate);
- spin_unlock_irqrestore(&c->spinlock, flags);
+ clk_unlock_restore(c, &flags);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
-
/* Must be called with clocks lock and all indvidual clock locks held */
unsigned long clk_get_rate_all_locked(struct clk *c)
{
@@ -303,27 +521,50 @@ unsigned long clk_get_rate_all_locked(struct clk *c)
long clk_round_rate(struct clk *c, unsigned long rate)
{
- unsigned long flags;
+ unsigned long flags, max_rate;
long ret;
- spin_lock_irqsave(&c->spinlock, flags);
+ clk_lock_save(c, &flags);
if (!c->ops || !c->ops->round_rate) {
ret = -ENOSYS;
goto out;
}
- if (rate > c->max_rate)
- rate = c->max_rate;
+ max_rate = clk_get_max_rate(c);
+ if (rate > max_rate)
+ rate = max_rate;
ret = c->ops->round_rate(c, rate);
out:
- spin_unlock_irqrestore(&c->spinlock, flags);
+ clk_unlock_restore(c, &flags);
return ret;
}
EXPORT_SYMBOL(clk_round_rate);
+static int tegra_clk_clip_rate_for_parent(struct clk *c, struct clk *p)
+{
+ unsigned long flags, max_rate, old_rate, new_rate;
+
+ clk_lock_save(c, &flags);
+
+ max_rate = clk_get_max_rate(c);
+ new_rate = clk_predict_rate_from_parent(c, p);
+ old_rate = clk_get_rate_locked(c);
+
+ clk_unlock_restore(c, &flags);
+
+ if (new_rate > max_rate) {
+ u64 rate = max_rate;
+ rate *= old_rate;
+ do_div(rate, new_rate);
+
+ return clk_set_rate(c, (unsigned long)rate);
+ }
+ return 0;
+}
+
static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
{
struct clk *c;
@@ -348,6 +589,14 @@ static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
}
if (c->parent != p) {
+ ret = tegra_clk_clip_rate_for_parent(c, p);
+ if (ret) {
+ pr_warning("Unable to clip rate for parent %s"
+ " of clock %s: %d\n",
+ table->parent, table->name, ret);
+ return -EINVAL;
+ }
+
ret = clk_set_parent(c, p);
if (ret) {
pr_warning("Unable to set parent %s of clock %s: %d\n",
@@ -387,60 +636,272 @@ EXPORT_SYMBOL(tegra_clk_init_from_table);
void tegra_periph_reset_deassert(struct clk *c)
{
- tegra2_periph_reset_deassert(c);
+ BUG_ON(!c->ops->reset);
+ c->ops->reset(c, false);
}
EXPORT_SYMBOL(tegra_periph_reset_deassert);
void tegra_periph_reset_assert(struct clk *c)
{
- tegra2_periph_reset_assert(c);
+ BUG_ON(!c->ops->reset);
+ c->ops->reset(c, true);
}
EXPORT_SYMBOL(tegra_periph_reset_assert);
+int tegra_is_clk_enabled(struct clk *c)
+{
+ return c->refcnt;
+}
+EXPORT_SYMBOL(tegra_is_clk_enabled);
+
+int tegra_clk_shared_bus_update(struct clk *c)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ clk_lock_save(c, &flags);
+
+ if (c->ops && c->ops->shared_bus_update)
+ ret = c->ops->shared_bus_update(c);
+
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+
+/* dvfs initialization may lower default maximum rate */
+void __init tegra_init_max_rate(struct clk *c, unsigned long max_rate)
+{
+ struct clk *shared_bus_user;
+
+ if (c->max_rate <= max_rate)
+ return;
+
+ pr_warning("Lowering %s maximum rate from %lu to %lu\n",
+ c->name, c->max_rate, max_rate);
+
+ c->max_rate = max_rate;
+ list_for_each_entry(shared_bus_user,
+ &c->shared_bus_list, u.shared_bus_user.node) {
+ shared_bus_user->u.shared_bus_user.rate = max_rate;
+ shared_bus_user->max_rate = max_rate;
+ }
+}
+
void __init tegra_init_clock(void)
{
- tegra2_init_clocks();
+ int ret;
+ struct clk *cpu;
+ struct clk *twd;
+
+ tegra_soc_init_clocks();
+ tegra_soc_init_dvfs();
+
+ /* The twd clock is a detached child of the CPU complex clock.
+ Force an update of the twd clock after DVFS as updated the
+ CPU clock rate. */
+ cpu = tegra_get_clock_by_name("cpu");
+ twd = tegra_get_clock_by_name("twd");
+ ret = clk_set_rate(twd, clk_get_rate(cpu));
+ if (ret)
+ pr_err("Failed to set twd clock rate: %d\n", ret);
+ else
+ pr_debug("TWD clock rate: %ld\n", clk_get_rate(twd));
+}
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+/* On tegra 2 SoC the SDMMC clock source register have extra bits that
+ * adjust the SDMMC controller delay between the clock and data to
+ * compenstate for delays on the PCB. */
+void tegra_sdmmc_tap_delay(struct clk *c, int delay) {
+ unsigned long flags;
+
+ clk_lock_save(c, &flags);
+ tegra2_sdmmc_tap_delay(c, delay);
+
+ clk_unlock_restore(c, &flags);
+}
+#endif
+
+static bool tegra_keep_boot_clocks = false;
+static int __init tegra_keep_boot_clocks_setup(char *__unused)
+{
+ tegra_keep_boot_clocks = true;
+ return 1;
+}
+__setup("tegra_keep_boot_clocks", tegra_keep_boot_clocks_setup);
+
+/*
+ * Bootloader may not match kernel restrictions on CPU clock sources.
+ * Make sure CPU clock is sourced from either main or backup parent.
+ */
+static int tegra_sync_cpu_clock(void)
+{
+ int ret;
+ unsigned long rate;
+ struct clk *c = tegra_get_clock_by_name("cpu");
+
+ BUG_ON(!c);
+ rate = clk_get_rate(c);
+ ret = clk_set_rate(c, rate);
+ if (ret)
+ pr_err("%s: Failed to sync CPU at rate %lu\n", __func__, rate);
+ else
+ pr_info("CPU rate: %lu MHz\n", clk_get_rate(c) / 1000000);
+ return ret;
}
+late_initcall(tegra_sync_cpu_clock);
/*
- * The SDMMC controllers have extra bits in the clock source register that
- * adjust the delay between the clock and data to compenstate for delays
- * on the PCB.
+ * Iterate through all clocks, disabling any for which the refcount is 0
+ * but the clock init detected the bootloader left the clock on.
*/
-void tegra_sdmmc_tap_delay(struct clk *c, int delay)
+static int __init tegra_init_disable_boot_clocks(void)
{
+#if DISABLE_BOOT_CLOCKS
unsigned long flags;
+ struct clk *c;
- spin_lock_irqsave(&c->spinlock, flags);
- tegra2_sdmmc_tap_delay(c, delay);
- spin_unlock_irqrestore(&c->spinlock, flags);
+ mutex_lock(&clock_list_lock);
+
+ list_for_each_entry(c, &clocks, node) {
+ clk_lock_save(c, &flags);
+ if (c->refcnt == 0 && c->state == ON &&
+ c->ops && c->ops->disable) {
+ pr_warn_once("%s clocks left on by bootloader:\n",
+ tegra_keep_boot_clocks ?
+ "Prevented disabling" :
+ "Disabling");
+
+ pr_warn(" %s\n", c->name);
+
+ if (!tegra_keep_boot_clocks) {
+ c->ops->disable(c);
+ c->state = OFF;
+ }
+ }
+ clk_unlock_restore(c, &flags);
+ }
+
+ mutex_unlock(&clock_list_lock);
+#endif
+ return 0;
+}
+late_initcall(tegra_init_disable_boot_clocks);
+
+/* Several extended clock configuration bits (e.g., clock routing, clock
+ * phase control) are included in PLL and peripheral clock source
+ * registers. */
+int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ clk_lock_save(c, &flags);
+
+ if (!c->ops || !c->ops->clk_cfg_ex) {
+ ret = -ENOSYS;
+ goto out;
+ }
+ ret = c->ops->clk_cfg_ex(c, p, setting);
+
+out:
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+
+int tegra_register_clk_rate_notifier(struct clk *c, struct notifier_block *nb)
+{
+ int ret;
+ unsigned long flags;
+
+ if (!c->rate_change_nh)
+ return -ENOSYS;
+
+ clk_lock_save(c, &flags);
+ ret = raw_notifier_chain_register(c->rate_change_nh, nb);
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+
+void tegra_unregister_clk_rate_notifier(
+ struct clk *c, struct notifier_block *nb)
+{
+ unsigned long flags;
+
+ if (!c->rate_change_nh)
+ return;
+
+ clk_lock_save(c, &flags);
+ raw_notifier_chain_unregister(c->rate_change_nh, nb);
+ clk_unlock_restore(c, &flags);
}
#ifdef CONFIG_DEBUG_FS
+/*
+ * Attempt to lock all the clocks that are marked cansleep
+ * Must be called with irqs enabled
+ */
+static int __clk_lock_all_mutexes(void)
+{
+ struct clk *c;
+
+ might_sleep();
+
+ list_for_each_entry(c, &clocks, node)
+ if (clk_cansleep(c))
+ if (!mutex_trylock(&c->mutex))
+ goto unlock_mutexes;
+
+ return 0;
+
+unlock_mutexes:
+ list_for_each_entry_continue_reverse(c, &clocks, node)
+ if (clk_cansleep(c))
+ mutex_unlock(&c->mutex);
+
+ return -EAGAIN;
+}
+
+/*
+ * Attempt to lock all the clocks that are not marked cansleep
+ * Must be called with irqs disabled
+ */
static int __clk_lock_all_spinlocks(void)
{
struct clk *c;
list_for_each_entry(c, &clocks, node)
- if (!spin_trylock(&c->spinlock))
- goto unlock_spinlocks;
+ if (!clk_cansleep(c))
+ if (!spin_trylock(&c->spinlock))
+ goto unlock_spinlocks;
return 0;
unlock_spinlocks:
list_for_each_entry_continue_reverse(c, &clocks, node)
- spin_unlock(&c->spinlock);
+ if (!clk_cansleep(c))
+ spin_unlock(&c->spinlock);
return -EAGAIN;
}
+static void __clk_unlock_all_mutexes(void)
+{
+ struct clk *c;
+
+ list_for_each_entry_reverse(c, &clocks, node)
+ if (clk_cansleep(c))
+ mutex_unlock(&c->mutex);
+}
+
static void __clk_unlock_all_spinlocks(void)
{
struct clk *c;
list_for_each_entry_reverse(c, &clocks, node)
- spin_unlock(&c->spinlock);
+ if (!clk_cansleep(c))
+ spin_unlock(&c->spinlock);
}
/*
@@ -453,6 +914,10 @@ static void clk_lock_all(void)
{
int ret;
retry:
+ ret = __clk_lock_all_mutexes();
+ if (ret)
+ goto failed_mutexes;
+
local_irq_disable();
ret = __clk_lock_all_spinlocks();
@@ -464,7 +929,9 @@ retry:
failed_spinlocks:
local_irq_enable();
- yield();
+ __clk_unlock_all_mutexes();
+failed_mutexes:
+ msleep(1);
goto retry;
}
@@ -478,16 +945,28 @@ static void clk_unlock_all(void)
__clk_unlock_all_spinlocks();
local_irq_enable();
+
+ __clk_unlock_all_mutexes();
}
static struct dentry *clk_debugfs_root;
+static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
+{
+ seq_printf(s, "%*s %-*s%21s%d mV\n",
+ level * 3 + 1, "",
+ 30 - level * 3, d->dvfs_rail->reg_id,
+ "",
+ d->cur_millivolts);
+}
static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
const char *state = "uninit";
char div[8] = {0};
+ unsigned long rate = clk_get_rate_all_locked(c);
+ unsigned long max_rate = clk_get_max_rate(c);;
if (c->state == ON)
state = "on";
@@ -511,12 +990,19 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
}
}
- seq_printf(s, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n",
+ seq_printf(s, "%*s%c%c%-*s%c %-6s %-3d %-8s %-10lu",
level * 3 + 1, "",
- c->rate > c->max_rate ? '!' : ' ',
+ rate > max_rate ? '!' : ' ',
!c->set ? '*' : ' ',
30 - level * 3, c->name,
- state, c->refcnt, div, clk_get_rate_all_locked(c));
+ c->cansleep ? '$' : ' ',
+ state, c->refcnt, div, rate);
+ if (c->parent && !list_empty(&c->parent->shared_bus_list))
+ seq_printf(s, " (%lu)", c->u.shared_bus_user.rate);
+ seq_printf(s, "\n");
+
+ if (c->dvfs)
+ dvfs_show_one(s, c->dvfs, level + 1);
list_for_each_entry(child, &clocks, node) {
if (child->parent != c)
@@ -529,8 +1015,8 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
static int clock_tree_show(struct seq_file *s, void *data)
{
struct clk *c;
- seq_printf(s, " clock state ref div rate\n");
- seq_printf(s, "--------------------------------------------------------------\n");
+ seq_printf(s, " clock state ref div rate (shared rate)\n");
+ seq_printf(s, "------------------------------------------------------------------------------\n");
mutex_lock(&clock_list_lock);
@@ -558,6 +1044,61 @@ static const struct file_operations clock_tree_fops = {
.release = single_release,
};
+static void syncevent_one(struct clk *c)
+{
+ struct clk *child;
+
+ if (c->state == ON)
+ trace_clock_enable(c->name, 1, smp_processor_id());
+ else
+ trace_clock_disable(c->name, 0, smp_processor_id());
+
+ trace_clock_set_rate(c->name, clk_get_rate_all_locked(c),
+ smp_processor_id());
+
+ list_for_each_entry(child, &clocks, node) {
+ if (child->parent != c)
+ continue;
+
+ syncevent_one(child);
+ }
+}
+
+static int syncevent_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct clk *c;
+ char buffer[40];
+ int buf_size;
+
+ memset(buffer, 0, sizeof(buffer));
+ buf_size = min(count, (sizeof(buffer)-1));
+
+ if (copy_from_user(buffer, user_buf, buf_size))
+ return -EFAULT;
+
+ if (!strnicmp("all", buffer, 3)) {
+ mutex_lock(&clock_list_lock);
+
+ clk_lock_all();
+
+ list_for_each_entry(c, &clocks, node) {
+ if (c->parent == NULL)
+ syncevent_one(c);
+ }
+
+ clk_unlock_all();
+
+ mutex_unlock(&clock_list_lock);
+ }
+
+ return count;
+}
+
+static const struct file_operations syncevent_fops = {
+ .write = syncevent_write,
+};
+
static int possible_parents_show(struct seq_file *s, void *data)
{
struct clk *c = s->private;
@@ -583,6 +1124,124 @@ static const struct file_operations possible_parents_fops = {
.release = single_release,
};
+static int parent_show(struct seq_file *s, void *data)
+{
+ struct clk *c = s->private;
+ struct clk *p = clk_get_parent(c);
+
+ seq_printf(s, "%s\n", p ? p->name : "clk_root");
+ return 0;
+}
+
+static int parent_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, parent_show, inode->i_private);
+}
+
+static int rate_get(void *data, u64 *val)
+{
+ struct clk *c = (struct clk *)data;
+ *val = (u64)clk_get_rate(c);
+ return 0;
+}
+
+static int state_get(void *data, u64 *val)
+{
+ struct clk *c = (struct clk *)data;
+ *val = (u64)((c->state == ON) ? 1 : 0);
+ return 0;
+}
+
+#ifdef CONFIG_TEGRA_CLOCK_DEBUG_WRITE
+
+static const mode_t parent_rate_mode = S_IRUGO | S_IWUSR;
+
+static ssize_t parent_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct clk *c = s->private;
+ struct clk *p = NULL;
+ char buf[32];
+
+ if (sizeof(buf) <= count)
+ return -EINVAL;
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ /* terminate buffer and trim - white spaces may be appended
+ * at the end when invoked from shell command line */
+ buf[count]='\0';
+ strim(buf);
+
+ p = tegra_get_clock_by_name(buf);
+ if (!p)
+ return -EINVAL;
+
+ if (clk_set_parent(c, p))
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations parent_fops = {
+ .open = parent_open,
+ .read = seq_read,
+ .write = parent_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int rate_set(void *data, u64 val)
+{
+ struct clk *c = (struct clk *)data;
+ return clk_set_rate(c, (unsigned long)val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(rate_fops, rate_get, rate_set, "%llu\n");
+
+static int state_set(void *data, u64 val)
+{
+ struct clk *c = (struct clk *)data;
+
+ if (val)
+ return clk_enable(c);
+ else {
+ clk_disable(c);
+ return 0;
+ }
+}
+DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, state_set, "%llu\n");
+
+#else
+
+static const mode_t parent_rate_mode = S_IRUGO;
+
+static const struct file_operations parent_fops = {
+ .open = parent_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_SIMPLE_ATTRIBUTE(rate_fops, rate_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, NULL, "%llu\n");
+#endif
+
+static int time_on_get(void *data, u64 *val)
+{
+ unsigned long flags;
+ struct clk *c = (struct clk *)data;
+
+ clk_lock_save(c, &flags);
+ clk_stats_update(c);
+ *val = cputime64_to_clock_t(c->stats.time_on);
+ clk_unlock_restore(c, &flags);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(time_on_fops, time_on_get, NULL, "%llu\n");
+
static int clk_debugfs_register_one(struct clk *c)
{
struct dentry *d;
@@ -596,11 +1255,31 @@ static int clk_debugfs_register_one(struct clk *c)
if (!d)
goto err_out;
- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+ d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
if (!d)
goto err_out;
- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
+ d = debugfs_create_u32("max", S_IRUGO, c->dent, (u32 *)&c->max_rate);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file(
+ "parent", parent_rate_mode, c->dent, c, &parent_fops);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file(
+ "rate", parent_rate_mode, c->dent, c, &rate_fops);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file(
+ "state", parent_rate_mode, c->dent, c, &state_fops);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file(
+ "time_on", S_IRUGO, c->dent, c, &time_on_fops);
if (!d)
goto err_out;
@@ -653,6 +1332,12 @@ static int __init clk_debugfs_init(void)
if (!d)
goto err_out;
+ d = debugfs_create_file("syncevents", S_IRUGO|S_IWUSR, clk_debugfs_root, NULL,
+ &syncevent_fops);
+
+ if (dvfs_debugfs_init(clk_debugfs_root))
+ goto err_out;
+
list_for_each_entry(c, &clocks, node) {
err = clk_debugfs_register(c);
if (err)
diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h
index 688316abc64e..25ba88bc4893 100644
--- a/arch/arm/mach-tegra/clock.h
+++ b/arch/arm/mach-tegra/clock.h
@@ -6,6 +6,8 @@
* Author:
* Colin Cross <ccross@google.com>
*
+ * Copyright (C) 2010-2011, NVIDIA Corporation.
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -20,9 +22,13 @@
#ifndef __MACH_TEGRA_CLOCK_H
#define __MACH_TEGRA_CLOCK_H
-#include <linux/clkdev.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define USE_PLL_LOCK_BITS 0 /* Never use lock bits on Tegra2 */
+#else
+#define USE_PLL_LOCK_BITS 1 /* Use lock bits for PLL stabiliation */
+#define USE_PLLE_SS 1 /* Use spread spectrum coefficients for PLLE */
+#define PLL_POST_LOCK_DELAY 50 /* Safety delay after lock is detected */
+#endif
#define DIV_BUS (1 << 0)
#define DIV_U71 (1 << 1)
@@ -39,7 +45,29 @@
#define PERIPH_MANUAL_RESET (1 << 12)
#define PLL_ALT_MISC_REG (1 << 13)
#define PLLU (1 << 14)
+#define PLLX (1 << 15)
+#define MUX_PWM (1 << 16)
+#define MUX8 (1 << 17)
+#define DIV_U151_UART (1 << 18)
+#define MUX_CLK_OUT (1 << 19)
+#define PLLM (1 << 20)
+#define DIV_U71_INT (1 << 21)
+#define DIV_U71_IDLE (1 << 22)
+#define DIV_U151 (1 << 23)
#define ENABLE_ON_INIT (1 << 28)
+#define PERIPH_ON_APB (1 << 29)
+#define PERIPH_ON_CBUS (1 << 30)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/clkdev.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <asm/cputime.h>
+
+#include <mach/clk.h>
+#define MAX_SAME_LIMIT_SKU_IDS 16
struct clk;
@@ -64,7 +92,26 @@ struct clk_ops {
int (*set_parent)(struct clk *, struct clk *);
int (*set_rate)(struct clk *, unsigned long);
long (*round_rate)(struct clk *, unsigned long);
+ int (*clk_cfg_ex)(struct clk *, enum tegra_clk_ex_param, u32);
void (*reset)(struct clk *, bool);
+ int (*shared_bus_update)(struct clk *);
+};
+
+struct clk_stats {
+ cputime64_t time_on;
+ u64 last_update;
+};
+
+enum cpu_mode {
+ MODE_G = 0,
+ MODE_LP,
+};
+
+enum shared_bus_users_mode {
+ SHARED_FLOOR = 0,
+ SHARED_BW,
+ SHARED_CEILING,
+ SHARED_AUTO,
};
enum clk_state {
@@ -76,6 +123,7 @@ enum clk_state {
struct clk {
/* node for master clocks list */
struct list_head node; /* node for list of all clocks */
+ struct dvfs *dvfs;
struct clk_lookup lookup;
#ifdef CONFIG_DEBUG_FS
@@ -83,9 +131,12 @@ struct clk {
#endif
bool set;
struct clk_ops *ops;
+ unsigned long dvfs_rate;
unsigned long rate;
unsigned long max_rate;
unsigned long min_rate;
+ bool auto_dvfs;
+ bool cansleep;
u32 flags;
const char *name;
@@ -94,12 +145,14 @@ struct clk {
struct clk *parent;
u32 div;
u32 mul;
+ struct clk_stats stats;
const struct clk_mux_sel *inputs;
u32 reg;
u32 reg_shift;
struct list_head shared_bus_list;
+ struct clk_mux_sel shared_bus_backup;
union {
struct {
@@ -114,6 +167,7 @@ struct clk {
unsigned long vco_max;
const struct clk_pll_freq_table *freq_table;
int lock_delay;
+ unsigned long fixed_rate;
} pll;
struct {
u32 sel;
@@ -122,14 +176,29 @@ struct clk {
struct {
struct clk *main;
struct clk *backup;
+ enum cpu_mode mode;
} cpu;
struct {
+ struct clk *pclk;
+ struct clk *hclk;
+ struct clk *sclk_low;
+ struct clk *sclk_high;
+ unsigned long threshold;
+ } system;
+ struct {
struct list_head node;
bool enabled;
unsigned long rate;
+ const char *client_id;
+ struct clk *client;
+ u32 client_div;
+ enum shared_bus_users_mode mode;
} shared_bus_user;
} u;
+ struct raw_notifier_head *rate_change_nh;
+
+ struct mutex mutex;
spinlock_t spinlock;
};
@@ -145,16 +214,83 @@ struct tegra_clk_init_table {
bool enabled;
};
-void tegra2_init_clocks(void);
-void tegra2_periph_reset_deassert(struct clk *c);
-void tegra2_periph_reset_assert(struct clk *c);
+struct tegra_sku_rate_limit {
+ const char *clk_name;
+ unsigned long max_rate;
+ int sku_ids[MAX_SAME_LIMIT_SKU_IDS];
+};
+
+void tegra_soc_init_clocks(void);
+void tegra_init_max_rate(struct clk *c, unsigned long max_rate);
void clk_init(struct clk *clk);
struct clk *tegra_get_clock_by_name(const char *name);
unsigned long clk_measure_input_freq(void);
int clk_reparent(struct clk *c, struct clk *parent);
void tegra_clk_init_from_table(struct tegra_clk_init_table *table);
+void clk_set_cansleep(struct clk *c);
+unsigned long clk_get_max_rate(struct clk *c);
+unsigned long clk_get_min_rate(struct clk *c);
unsigned long clk_get_rate_locked(struct clk *c);
int clk_set_rate_locked(struct clk *c, unsigned long rate);
+int clk_set_parent_locked(struct clk *c, struct clk *parent);
+int tegra_clk_shared_bus_update(struct clk *c);
void tegra2_sdmmc_tap_delay(struct clk *c, int delay);
+int tegra_emc_set_rate(unsigned long rate);
+long tegra_emc_round_rate(unsigned long rate);
+struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value);
+void tegra_emc_timing_invalidate(void);
+
+static inline bool clk_is_auto_dvfs(struct clk *c)
+{
+ return c->auto_dvfs;
+}
+
+static inline bool clk_is_dvfs(struct clk *c)
+{
+ return (c->dvfs != NULL);
+}
+
+static inline bool clk_cansleep(struct clk *c)
+{
+ return c->cansleep;
+}
+static inline void clk_lock_save(struct clk *c, unsigned long *flags)
+{
+ if (clk_cansleep(c)) {
+ *flags = 0;
+ mutex_lock(&c->mutex);
+ } else {
+ spin_lock_irqsave(&c->spinlock, *flags);
+ }
+}
+
+static inline void clk_unlock_restore(struct clk *c, unsigned long *flags)
+{
+ if (clk_cansleep(c))
+ mutex_unlock(&c->mutex);
+ else
+ spin_unlock_irqrestore(&c->spinlock, *flags);
+}
+
+static inline void clk_lock_init(struct clk *c)
+{
+ mutex_init(&c->mutex);
+ spin_lock_init(&c->spinlock);
+}
+
+#ifdef CONFIG_CPU_FREQ
+struct cpufreq_frequency_table;
+
+struct tegra_cpufreq_table_data {
+ struct cpufreq_frequency_table *freq_table;
+ int throttle_lowest_index;
+ int throttle_highest_index;
+ int suspend_index;
+};
+struct tegra_cpufreq_table_data *tegra_cpufreq_table_get(void);
+unsigned long tegra_emc_to_cpu_ratio(unsigned long cpu_rate);
+#endif
+
+#endif
#endif
diff --git a/arch/arm/mach-tegra/common-t2.c b/arch/arm/mach-tegra/common-t2.c
new file mode 100644
index 000000000000..6f9b177892ce
--- /dev/null
+++ b/arch/arm/mach-tegra/common-t2.c
@@ -0,0 +1,192 @@
+/*
+ * arch/arm/mach-tegra/common-t2.c
+ *
+ * Tegra 2 SoC-specific initialization (memory controller, etc.)
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#define MC_INT_STATUS 0x0
+#define MC_INT_MASK 0x4
+#define MC_INT_DECERR_EMEM_OTHERS (1<<6)
+#define MC_INT_INVALID_GART_PAGE (1<<7)
+#define MC_INT_SECURITY_VIOLATION (1<<8)
+
+#define MC_GART_ERROR_STATUS 0x30
+#define MC_GART_ERROR_ADDRESS 0x34
+
+#define MC_DECERR_EMEM_OTHERS_STATUS 0x58
+#define MC_DECERR_EMEM_OTHERS_ADDRESS 0x5c
+
+#define MC_SECURITY_VIOLATION_STATUS 0x74
+#define MC_SECURITY_VIOLATION_ADDRESS 0x78
+
+struct mc_client {
+ bool write;
+ const char *name;
+};
+
+#define client(_name,_write) \
+ { \
+ .write = _write, \
+ .name = _name, \
+ }
+
+static const struct mc_client mc_clients[] = {
+ client("display0_wina", false), client("display1_wina", false),
+ client("display0_winb", false), client("display1_winb", false),
+ client("display0_winc", false), client("display1_winc", false),
+ client("display0_winb_vfilter", false),
+ client("display1_winb_vfilter", false),
+ client("epp", false), client("gr2d_pat", false),
+ client("gr2d_src", false), client("mpe_unified", false),
+ client("vi_chroma_filter", false), client("cop", false),
+ client("display0_cursor", false), client("display1_cursor", false),
+ client("gr3d_fdc", false), client("gr2d_dst", false),
+ client("host1x_dma", false), client("host1x_generic", false),
+ client("gr3d_idx", false), client("cpu_uncached", false),
+ client("mpe_intrapred", false), client("mpe_mpea", false),
+ client("mpe_mpec", false), client("ahb_dma", false),
+ client("ahb_slave", false), client("gr3d_tex", false),
+ client("vde_bsev", false), client("vde_mbe", false),
+ client("vde_mce", false), client("vde_tpe", false),
+ client("epp_u", true), client("epp_v", true),
+ client("epp_y", true), client("mpe_unified", true),
+ client("vi_sb", true), client("vi_u", true),
+ client("vi_v", true), client("vi_y", true),
+ client("gr2d_dst", true), client("gr3d_fdc", true),
+ client("host1x", true), client("isp", true),
+ client("cpu_uncached", true), client("mpe_mpec", true),
+ client("ahb_dma", true), client("ahb_slave", true),
+ client("avp_bsev", true), client("avp_mbe", true),
+ client("avp_tpm", true),
+};
+
+static DEFINE_SPINLOCK(mc_lock);
+static unsigned long error_count = 0;
+#define MAX_PRINTS 5
+
+static void unthrottle_prints(struct work_struct *work)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc_lock, flags);
+ error_count = 0;
+ spin_unlock_irqrestore(&mc_lock, flags);
+}
+
+static DECLARE_DELAYED_WORK(unthrottle_prints_work, unthrottle_prints);
+
+static irqreturn_t tegra_mc_error_isr(int irq, void *data)
+{
+ void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
+ unsigned long count;
+ u32 stat;
+
+ stat = readl(mc + MC_INT_STATUS);
+ stat &= (MC_INT_SECURITY_VIOLATION |
+ MC_INT_INVALID_GART_PAGE |
+ MC_INT_DECERR_EMEM_OTHERS);
+
+ __cancel_delayed_work(&unthrottle_prints_work);
+
+ spin_lock(&mc_lock);
+ count = ++error_count;
+ spin_unlock(&mc_lock);
+
+ if (count >= MAX_PRINTS) {
+ if (count == MAX_PRINTS)
+ pr_err("Too many MC errors; throttling prints\n");
+ schedule_delayed_work(&unthrottle_prints_work, HZ/2);
+ goto out;
+ }
+
+ if (stat & MC_INT_DECERR_EMEM_OTHERS) {
+ const struct mc_client *client = NULL;
+ u32 addr, req;
+
+ req = readl(mc + MC_DECERR_EMEM_OTHERS_STATUS);
+ addr = readl(mc + MC_DECERR_EMEM_OTHERS_ADDRESS);
+ req &= 0x3f;
+ if (req < ARRAY_SIZE(mc_clients))
+ client = &mc_clients[req];
+
+ pr_err("MC_DECERR: %p %s (%s)\n", (void*)addr,
+ (client) ? client->name : "unknown",
+ (client && client->write) ? "write" : "read");
+ }
+
+ if (stat & MC_INT_INVALID_GART_PAGE) {
+ const struct mc_client *client = NULL;
+ u32 addr, req;
+
+ req = readl(mc + MC_GART_ERROR_STATUS);
+ addr = readl(mc + MC_GART_ERROR_ADDRESS);
+ req = (req >> 1) & 0x3f;
+
+ if (req < ARRAY_SIZE(mc_clients))
+ client = &mc_clients[req];
+
+ pr_err("MC_GART_ERR: %p %s (%s)\n", (void*)addr,
+ (client) ? client->name : "unknown",
+ (client && client->write) ? "write" : "read");
+ }
+
+ if (stat & MC_INT_SECURITY_VIOLATION) {
+ const struct mc_client *client = NULL;
+ const char *type = NULL;
+ u32 addr, req;
+
+ req = readl(mc + MC_SECURITY_VIOLATION_STATUS);
+ addr = readl(mc + MC_SECURITY_VIOLATION_ADDRESS);
+
+ type = (req & (1<<30)) ? "carveout" : "trustzone";
+
+ req &= 0x3f;
+ if (req < ARRAY_SIZE(mc_clients))
+ client = &mc_clients[req];
+
+ pr_err("MC_SECURITY_ERR (%s): %p %s (%s)\n", type, (void*)addr,
+ (client) ? client->name : "unknown",
+ (client && client->write) ? "write" : "read");
+ }
+out:
+ writel(stat, mc + MC_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+void __init tegra_mc_init(void)
+{
+ if (request_irq(INT_MC_GENERAL, tegra_mc_error_isr, 0,
+ "mc_status", NULL)) {
+ pr_err("%s: unable to register MC error interrupt\n", __func__);
+ } else {
+ void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
+ u32 reg = MC_INT_SECURITY_VIOLATION | MC_INT_INVALID_GART_PAGE |
+ MC_INT_DECERR_EMEM_OTHERS;
+ writel(reg, mc + MC_INT_MASK);
+ }
+}
+arch_initcall(tegra_mc_init);
diff --git a/arch/arm/mach-tegra/common-t3.c b/arch/arm/mach-tegra/common-t3.c
new file mode 100644
index 000000000000..d65e44779f14
--- /dev/null
+++ b/arch/arm/mach-tegra/common-t3.c
@@ -0,0 +1,268 @@
+/*
+ * arch/arm/mach-tegra/common-t3.c
+ *
+ * Tegra 3 SoC-specific initialization (memory controller, etc.)
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "tegra3_emc.h"
+
+#define MC_INT_STATUS 0x0
+#define MC_INT_MASK 0x4
+#define MC_INT_DECERR_EMEM (1<<6)
+#define MC_INT_SECURITY_VIOLATION (1<<8)
+#define MC_INT_ARBITRATION_EMEM (1<<9)
+#define MC_INT_INVALID_SMMU_PAGE (1<<10)
+
+#define MC_ERROR_STATUS 0x8
+#define MC_ERROR_ADDRESS 0xC
+
+#define MC_TIMING_REG_NUM1 \
+ ((MC_EMEM_ARB_TIMING_W2R - MC_EMEM_ARB_CFG) / 4 + 1)
+#define MC_TIMING_REG_NUM2 \
+ ((MC_EMEM_ARB_MISC1 - MC_EMEM_ARB_DA_TURNS) / 4 + 1)
+
+struct mc_client {
+ const char *name;
+};
+
+#define client(_name) \
+ { \
+ .name = _name, \
+ }
+
+
+static void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
+
+
+#ifdef CONFIG_PM_SLEEP
+static u32 mc_boot_timing[MC_TIMING_REG_NUM1 + MC_TIMING_REG_NUM2 + 4];
+
+static void tegra_mc_timing_save(void)
+{
+ u32 off;
+ u32 *ctx = mc_boot_timing;
+
+ for (off = MC_EMEM_ARB_CFG; off <= MC_EMEM_ARB_TIMING_W2R; off += 4)
+ *ctx++ = readl((u32)mc + off);
+
+ for (off = MC_EMEM_ARB_DA_TURNS; off <= MC_EMEM_ARB_MISC1; off += 4)
+ *ctx++ = readl((u32)mc + off);
+
+ *ctx++ = readl((u32)mc + MC_EMEM_ARB_RING3_THROTTLE);
+ *ctx++ = readl((u32)mc + MC_EMEM_ARB_OVERRIDE);
+ *ctx++ = readl((u32)mc + MC_RESERVED_RSV);
+
+ *ctx++ = readl((u32)mc + MC_INT_MASK);
+}
+
+void tegra_mc_timing_restore(void)
+{
+ u32 off;
+ u32 *ctx = mc_boot_timing;
+
+ for (off = MC_EMEM_ARB_CFG; off <= MC_EMEM_ARB_TIMING_W2R; off += 4)
+ __raw_writel(*ctx++, (u32)mc + off);
+
+ for (off = MC_EMEM_ARB_DA_TURNS; off <= MC_EMEM_ARB_MISC1; off += 4)
+ __raw_writel(*ctx++, (u32)mc + off);
+
+ __raw_writel(*ctx++, (u32)mc + MC_EMEM_ARB_RING3_THROTTLE);
+ __raw_writel(*ctx++, (u32)mc + MC_EMEM_ARB_OVERRIDE);
+ __raw_writel(*ctx++, (u32)mc + MC_RESERVED_RSV);
+
+ writel(*ctx++, (u32)mc + MC_INT_MASK);
+ off = readl((u32)mc + MC_INT_MASK);
+
+ writel(0x1, (u32)mc + MC_TIMING_CONTROL);
+ off = readl((u32)mc + MC_TIMING_CONTROL);
+}
+#else
+#define tegra_mc_timing_save()
+#endif
+
+
+static const struct mc_client mc_clients[] = {
+ client("ptc"),
+ client("display0_wina"), client("display1_wina"),
+ client("display0_winb"), client("display1_winb"),
+ client("display0_winc"), client("display1_winc"),
+ client("display0_winb_vfilter"),
+ client("display1_winb_vfilter"),
+ client("epp"), client("gr2d_pat"),
+ client("gr2d_src"), client("mpe_unified"),
+ client("vi_chroma_filter"), client("pcie"),
+ client("avp"),
+ client("display0_cursor"), client("display1_cursor"),
+ client("gr3d0_fdc"), client("gr3d1_fdc"),
+ client("gr2d_dst"), client("hda"),
+ client("host1x_dma"), client("host1x_generic"),
+ client("gr3d0_idx"), client("gr3d1_idx"),
+ client("mpe_intrapred"), client("mpe_mpea"),
+ client("mpe_mpec"), client("ahb_dma"),
+ client("ahb_slave"), client("sata"),
+ client("gr3d0_tex"), client("gr3d1_tex"),
+ client("vde_bsev"), client("vde_mbe"),
+ client("vde_mce"), client("vde_tpe"),
+ client("cpu_lp"), client("cpu"),
+ client("epp_u"), client("epp_v"),
+ client("epp_y"), client("mpe_unified"),
+ client("vi_sb"), client("vi_u"),
+ client("vi_v"), client("vi_y"),
+ client("gr2d_dst"), client("pcie"),
+ client("avp"), client("gr3d0_fdc"),
+ client("gr3d1_fdc"), client("hda"),
+ client("host1x"), client("isp"),
+ client("cpu_lp"), client("cpu"),
+ client("mpe_mpec"), client("ahb_dma"),
+ client("ahb_slave"), client("sata"),
+ client("vde_bsev"), client("vde_dbg"),
+ client("vde_mbe"), client("vde_tpm"),
+};
+
+static const char *smmu_page_attrib[] = {
+ "SMMU: nr-nw-s",
+ "SMMU: nr-nw-ns",
+ "SMMU: nr-wr-s",
+ "SMMU: nr-wr-ns",
+ "SMMU: rd-nw-s",
+ "SMMU: rd-nw-ns",
+ "SMMU: rd-wr-s",
+ "SMMU: rd-wr-ns"
+};
+
+static DEFINE_SPINLOCK(mc_lock);
+static unsigned long error_count = 0;
+#define MAX_PRINTS 5
+
+static void unthrottle_prints(struct work_struct *work)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc_lock, flags);
+ error_count = 0;
+ spin_unlock_irqrestore(&mc_lock, flags);
+}
+
+static DECLARE_DELAYED_WORK(unthrottle_prints_work, unthrottle_prints);
+
+static irqreturn_t tegra_mc_error_isr(int irq, void *data)
+{
+ const struct mc_client *client = NULL;
+ const char *mc_err;
+ const char *mc_err_info;
+ unsigned long count;
+ u32 stat;
+ u32 addr;
+ u32 err;
+ u32 type;
+ u32 is_write;
+ u32 is_secure;
+ u32 client_id;
+
+ stat = readl(mc + MC_INT_STATUS);
+ stat &= (MC_INT_DECERR_EMEM |
+ MC_INT_SECURITY_VIOLATION |
+ MC_INT_INVALID_SMMU_PAGE);
+
+ __cancel_delayed_work(&unthrottle_prints_work);
+
+ spin_lock(&mc_lock);
+ count = ++error_count;
+ spin_unlock(&mc_lock);
+
+ if (count >= MAX_PRINTS) {
+ if (count == MAX_PRINTS)
+ pr_err("Too many MC errors; throttling prints\n");
+ schedule_delayed_work(&unthrottle_prints_work, HZ/2);
+ goto out;
+ }
+
+ err = readl(mc + MC_ERROR_STATUS);
+ addr = readl(mc + MC_ERROR_ADDRESS);
+ is_write = err & (1<<16);
+ is_secure = err & (1<<17);
+ type = (err >> 28) & 7;
+ client_id = err & 0x7f;
+ if (client_id < ARRAY_SIZE(mc_clients))
+ client = &mc_clients[client_id];
+
+ if (stat & MC_INT_DECERR_EMEM)
+ mc_err = "MC_DECERR";
+ else if (stat & MC_INT_SECURITY_VIOLATION)
+ mc_err = "MC_SECURITY_ERR";
+ else if (stat & MC_INT_INVALID_SMMU_PAGE)
+ mc_err = "MC_SMMU_ERR";
+ else
+ mc_err = "unknown";
+
+ mc_err_info = "";
+ if (type == 3) {
+ mc_err_info = "SECURITY_TRUSTZONE";
+ } else if (type == 4) {
+ mc_err_info = "SECURITY_CARVEOUT";
+ } else if (type == 6) {
+ u32 attrib = (err >> 25) & 7;
+ mc_err_info = smmu_page_attrib[attrib];
+ }
+
+ pr_err("%s (0x%08X): %p %s (%s %s %s)\n", mc_err, err, (void*)addr,
+ (client) ? client->name : "unknown",
+ (is_secure)? "secure" : "non-secure",
+ (is_write) ? "write" : "read",
+ mc_err_info);
+
+out:
+ writel(stat, mc + MC_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+int __init tegra_mc_init(void)
+{
+ u32 reg;
+ int ret = 0;
+
+ reg = 0x0A7F1010;
+ writel(reg, mc + MC_RESERVED_RSV);
+
+ reg = readl(mc + MC_EMEM_ARB_OVERRIDE);
+ reg |= 3;
+ writel(reg, mc + MC_EMEM_ARB_OVERRIDE);
+
+ if (request_irq(INT_MC_GENERAL, tegra_mc_error_isr, 0,
+ "mc_status", NULL)) {
+ pr_err("%s: unable to register MC error interrupt\n", __func__);
+ ret = -ENXIO;
+ } else {
+ reg = MC_INT_DECERR_EMEM | MC_INT_SECURITY_VIOLATION |
+ MC_INT_INVALID_SMMU_PAGE;
+ writel(reg, mc + MC_INT_MASK);
+ }
+ tegra_mc_timing_save();
+
+ return ret;
+}
+arch_initcall(tegra_mc_init);
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index d5e3f89b05af..dd281d8d4f69 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -1,7 +1,8 @@
/*
- * arch/arm/mach-tegra/board-harmony.c
+ * arch/arm/mach-tegra/common.c
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010-2011 NVIDIA Corporation
*
* Author:
* Colin Cross <ccross@android.com>
@@ -17,67 +18,1030 @@
*
*/
+#include <linux/platform_device.h>
+#include <linux/console.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/system.h>
+#include <mach/gpio.h>
#include <mach/iomap.h>
+#include <mach/pinmux.h>
+#include <mach/powergate.h>
#include <mach/system.h>
+#include "apbio.h"
#include "board.h"
#include "clock.h"
#include "fuse.h"
+#include "pm.h"
+#include "reset.h"
+#include "tegra_smmu.h"
+
+#define MC_SECURITY_CFG2 0x7c
+
+#define AHB_ARBITRATION_PRIORITY_CTRL 0x4
+#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
+#define PRIORITY_SELECT_USB BIT(6)
+#define PRIORITY_SELECT_USB2 BIT(18)
+#define PRIORITY_SELECT_USB3 BIT(17)
+
+#define AHB_GIZMO_AHB_MEM 0xc
+#define ENB_FAST_REARBITRATE BIT(2)
+#define DONT_SPLIT_AHB_WR BIT(7)
+
+#define AHB_GIZMO_USB 0x1c
+#define AHB_GIZMO_USB2 0x78
+#define AHB_GIZMO_USB3 0x7c
+#define IMMEDIATE BIT(18)
+
+#define AHB_MEM_PREFETCH_CFG3 0xe0
+#define AHB_MEM_PREFETCH_CFG4 0xe4
+#define AHB_MEM_PREFETCH_CFG1 0xec
+#define AHB_MEM_PREFETCH_CFG2 0xf0
+#define PREFETCH_ENB BIT(31)
+#define MST_ID(x) (((x) & 0x1f) << 26)
+#define AHBDMA_MST_ID MST_ID(5)
+#define USB_MST_ID MST_ID(6)
+#define USB2_MST_ID MST_ID(18)
+#define USB3_MST_ID MST_ID(17)
+#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
+#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
+
+unsigned long tegra_bootloader_fb_start;
+unsigned long tegra_bootloader_fb_size;
+unsigned long tegra_fb_start;
+unsigned long tegra_fb_size;
+unsigned long tegra_fb2_start;
+unsigned long tegra_fb2_size;
+unsigned long tegra_carveout_start;
+unsigned long tegra_carveout_size;
+unsigned long tegra_vpr_start;
+unsigned long tegra_vpr_size;
+unsigned long tegra_lp0_vec_start;
+unsigned long tegra_lp0_vec_size;
+bool tegra_lp0_vec_relocate;
+unsigned long tegra_grhost_aperture = ~0ul;
+static bool is_tegra_debug_uart_hsport;
+static struct board_info pmu_board_info;
+static struct board_info display_board_info;
+static struct board_info camera_board_info;
+
+static int pmu_core_edp = 1200; /* default 1.2V EDP limit */
+static int board_panel_type;
+static enum power_supply_type pow_supply_type = POWER_SUPPLY_TYPE_MAINS;
void (*arch_reset)(char mode, const char *cmd) = tegra_assert_system_reset;
+#define NEVER_RESET 0
+
void tegra_assert_system_reset(char mode, const char *cmd)
{
- void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04);
+#if defined(CONFIG_TEGRA_FPGA_PLATFORM) || NEVER_RESET
+ printk("tegra_assert_system_reset() ignored.....");
+ do { } while (1);
+#else
+ void __iomem *reset = IO_ADDRESS(TEGRA_PMC_BASE + 0x00);
u32 reg;
/* use *_related to avoid spinlock since caches are off */
reg = readl_relaxed(reset);
- reg |= 0x04;
+ reg |= 0x10;
writel_relaxed(reg, reset);
+#endif
}
+static int modem_id;
+static int debug_uart_port_id;
+static enum audio_codec_type audio_codec_name;
+static int max_cpu_current;
+/* WARNING: There is implicit client of pllp_out3 like i2c, uart, dsi
+ * and so this clock (pllp_out3) should never be disabled.
+ */
static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
/* name parent rate enabled */
{ "clk_m", NULL, 0, true },
- { "pll_p", "clk_m", 216000000, true },
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ { "pll_p", NULL, 216000000, true },
{ "pll_p_out1", "pll_p", 28800000, true },
{ "pll_p_out2", "pll_p", 48000000, true },
{ "pll_p_out3", "pll_p", 72000000, true },
{ "pll_p_out4", "pll_p", 108000000, true },
+ { "pll_m", "clk_m", 0, true },
+ { "pll_m_out1", "pll_m", 120000000, true },
+ { "sclk", "pll_c_out1", 40000000, true },
+ { "hclk", "sclk", 40000000, true },
+ { "pclk", "hclk", 40000000, true },
+ { "mpe", "pll_c", 0, false },
+ { "epp", "pll_c", 0, false },
+ { "vi_sensor", "pll_c", 0, false },
+ { "vi", "pll_c", 0, false },
+ { "2d", "pll_c", 0, false },
+ { "3d", "pll_c", 0, false },
+#else
+ { "pll_p", NULL, 408000000, true },
+ { "pll_p_out1", "pll_p", 9600000, true },
+ { "pll_p_out2", "pll_p", 48000000, true },
+ { "pll_p_out3", "pll_p", 102000000, true },
+ { "pll_m_out1", "pll_m", 275000000, false },
+ { "pll_p_out4", "pll_p", 102000000, true },
+ { "sclk", "pll_p_out4", 102000000, true },
+ { "hclk", "sclk", 102000000, true },
+ { "pclk", "hclk", 51000000, true },
+#endif
+#else
+ { "pll_p", NULL, 216000000, true },
+ { "pll_p_out1", "pll_p", 28800000, true },
+ { "pll_p_out2", "pll_p", 48000000, true },
+ { "pll_p_out3", "pll_p", 72000000, true },
+ { "pll_m_out1", "pll_m", 275000000, true },
+ { "pll_c", NULL, ULONG_MAX, false },
+ { "pll_c_out1", "pll_c", 208000000, false },
+ { "pll_p_out4", "pll_p", 108000000, true },
{ "sclk", "pll_p_out4", 108000000, true },
{ "hclk", "sclk", 108000000, true },
{ "pclk", "hclk", 54000000, true },
+#endif
{ "csite", NULL, 0, true },
{ "emc", NULL, 0, true },
{ "cpu", NULL, 0, true },
+ { "kfuse", NULL, 0, true },
+ { "fuse", NULL, 0, true },
+ { "pll_u", NULL, 480000000, false },
+ { "sdmmc1", "pll_p", 48000000, false},
+ { "sdmmc3", "pll_p", 48000000, false},
+ { "sdmmc4", "pll_p", 48000000, false},
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ { "cbus", "pll_c", 416000000, false },
+ { "pll_c_out1", "pll_c", 208000000, false },
+#endif
{ NULL, NULL, 0, 0},
};
-void __init tegra_init_cache(void)
+#if defined(CONFIG_TRUSTED_FOUNDATIONS) && defined(CONFIG_CACHE_L2X0)
+static void tegra_cache_smc(bool enable, u32 arg)
+{
+ void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+ bool need_affinity_switch;
+ bool can_switch_affinity;
+ bool l2x0_enabled;
+ cpumask_t local_cpu_mask;
+ cpumask_t saved_cpu_mask;
+ unsigned long flags;
+ long ret;
+
+ /*
+ * ISSUE : Some registers of PL310 controler must be written
+ * from Secure context (and from CPU0)!
+ *
+ * When called form Normal we obtain an abort or do nothing.
+ * Instructions that must be called in Secure:
+ * - Write to Control register (L2X0_CTRL==0x100)
+ * - Write in Auxiliary controler (L2X0_AUX_CTRL==0x104)
+ * - Invalidate all entries (L2X0_INV_WAY==0x77C),
+ * mandatory at boot time.
+ * - Tag and Data RAM Latency Control Registers
+ * (0x108 & 0x10C) must be written in Secure.
+ */
+ need_affinity_switch = (smp_processor_id() != 0);
+ can_switch_affinity = !irqs_disabled();
+
+ WARN_ON(need_affinity_switch && !can_switch_affinity);
+ if (need_affinity_switch && can_switch_affinity) {
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret = sched_setaffinity(0, &local_cpu_mask);
+ WARN_ON(ret != 0);
+ }
+
+ local_irq_save(flags);
+ l2x0_enabled = readl_relaxed(p + L2X0_CTRL) & 1;
+ if (enable && !l2x0_enabled)
+ tegra_generic_smc(0xFFFFF100, 0x00000001, arg);
+ else if (!enable && l2x0_enabled)
+ tegra_generic_smc(0xFFFFF100, 0x00000002, arg);
+ local_irq_restore(flags);
+
+ if (need_affinity_switch && can_switch_affinity) {
+ ret = sched_setaffinity(0, &saved_cpu_mask);
+ WARN_ON(ret != 0);
+ }
+}
+
+static void tegra_l2x0_disable(void)
+{
+ unsigned long flags;
+ static u32 l2x0_way_mask;
+
+ if (!l2x0_way_mask) {
+ void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+ u32 aux_ctrl;
+ u32 ways;
+
+ aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
+ ways = (aux_ctrl & (1 << 16)) ? 16 : 8;
+ l2x0_way_mask = (1 << ways) - 1;
+ }
+
+ local_irq_save(flags);
+ tegra_cache_smc(false, l2x0_way_mask);
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_TRUSTED_FOUNDATIONS && defined(CONFIG_CACHE_L2X0) */
+
+void tegra_init_cache(bool init)
{
#ifdef CONFIG_CACHE_L2X0
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+ u32 aux_ctrl;
+ u32 speedo;
+ u32 tmp;
+
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+ /* issue the SMC to enable the L2 */
+ aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
+ tegra_cache_smc(true, aux_ctrl);
+ /* after init, reread aux_ctrl and register handlers */
+ aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
+ l2x0_init(p, aux_ctrl, 0xFFFFFFFF);
+
+ /* override outer_disable() with our disable */
+ outer_cache.disable = tegra_l2x0_disable;
+#else
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);
- l2x0_init(p, 0x6C080001, 0x8200c3fe);
+#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ /* PL310 RAM latency is CPU dependent. NOTE: Changes here
+ must also be reflected in __cortex_a9_l2x0_restart */
+
+ if (is_lp_cluster()) {
+ writel(0x221, p + L2X0_TAG_LATENCY_CTRL);
+ writel(0x221, p + L2X0_DATA_LATENCY_CTRL);
+ } else {
+ /* relax l2-cache latency for speedos 4,5,6 (T33's chips) */
+ speedo = tegra_cpu_speedo_id();
+ if (speedo == 4 || speedo == 5 || speedo == 6) {
+ writel(0x442, p + L2X0_TAG_LATENCY_CTRL);
+ writel(0x552, p + L2X0_DATA_LATENCY_CTRL);
+ } else {
+ writel(0x441, p + L2X0_TAG_LATENCY_CTRL);
+ writel(0x551, p + L2X0_DATA_LATENCY_CTRL);
+ }
+ }
+#else
+ writel(0x770, p + L2X0_TAG_LATENCY_CTRL);
+ writel(0x770, p + L2X0_DATA_LATENCY_CTRL);
+#endif
+#endif
+ aux_ctrl = readl(p + L2X0_CACHE_TYPE);
+ aux_ctrl = (aux_ctrl & 0x700) << (17-8);
+ aux_ctrl |= 0x7C000001;
+ if (init) {
+ l2x0_init(p, aux_ctrl, 0x8200c3fe);
+ } else {
+ tmp = aux_ctrl;
+ aux_ctrl = readl(p + L2X0_AUX_CTRL);
+ aux_ctrl &= 0x8200c3fe;
+ aux_ctrl |= tmp;
+ writel(aux_ctrl, p + L2X0_AUX_CTRL);
+ }
+ l2x0_enable();
#endif
+#endif
+}
+static void __init tegra_init_power(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_HAS_SATA
+ tegra_powergate_partition_with_clk_off(TEGRA_POWERGATE_SATA);
+#endif
+#ifdef CONFIG_ARCH_TEGRA_HAS_PCIE
+ tegra_powergate_partition_with_clk_off(TEGRA_POWERGATE_PCIE);
+#endif
+}
+
+static inline unsigned long gizmo_readl(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(TEGRA_AHB_GIZMO_BASE + offset));
+}
+
+static inline void gizmo_writel(unsigned long value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(TEGRA_AHB_GIZMO_BASE + offset));
+}
+
+static void __init tegra_init_ahb_gizmo_settings(void)
+{
+ unsigned long val;
+
+ val = gizmo_readl(AHB_GIZMO_AHB_MEM);
+ val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
+ gizmo_writel(val, AHB_GIZMO_AHB_MEM);
+
+ val = gizmo_readl(AHB_GIZMO_USB);
+ val |= IMMEDIATE;
+ gizmo_writel(val, AHB_GIZMO_USB);
+
+ val = gizmo_readl(AHB_GIZMO_USB2);
+ val |= IMMEDIATE;
+ gizmo_writel(val, AHB_GIZMO_USB2);
+
+ val = gizmo_readl(AHB_GIZMO_USB3);
+ val |= IMMEDIATE;
+ gizmo_writel(val, AHB_GIZMO_USB3);
+
+ val = gizmo_readl(AHB_ARBITRATION_PRIORITY_CTRL);
+ val |= PRIORITY_SELECT_USB | PRIORITY_SELECT_USB2 | PRIORITY_SELECT_USB3
+ | AHB_PRIORITY_WEIGHT(7);
+ gizmo_writel(val, AHB_ARBITRATION_PRIORITY_CTRL);
+
+ val = gizmo_readl(AHB_MEM_PREFETCH_CFG1);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB | AHBDMA_MST_ID | ADDR_BNDRY(0xc) | INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(val, AHB_MEM_PREFETCH_CFG1);
+
+ val = gizmo_readl(AHB_MEM_PREFETCH_CFG2);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB | USB_MST_ID | ADDR_BNDRY(0xc) | INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(val, AHB_MEM_PREFETCH_CFG2);
+
+ val = gizmo_readl(AHB_MEM_PREFETCH_CFG3);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB | USB3_MST_ID | ADDR_BNDRY(0xc) | INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(val, AHB_MEM_PREFETCH_CFG3);
+
+ val = gizmo_readl(AHB_MEM_PREFETCH_CFG4);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB | USB2_MST_ID | ADDR_BNDRY(0xc) | INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(val, AHB_MEM_PREFETCH_CFG4);
+}
+
+static bool console_flushed;
+
+static void tegra_pm_flush_console(void)
+{
+ if (console_flushed)
+ return;
+ console_flushed = true;
+
+ pr_emerg("Restarting %s\n", linux_banner);
+ if (console_trylock()) {
+ console_unlock();
+ return;
+ }
+
+ mdelay(50);
+
+ local_irq_disable();
+ if (!console_trylock())
+ pr_emerg("%s: Console was locked! Busting\n", __func__);
+ else
+ pr_emerg("%s: Console was locked!\n", __func__);
+ console_unlock();
+}
+
+static void tegra_pm_restart(char mode, const char *cmd)
+{
+ tegra_pm_flush_console();
+ arm_machine_restart(mode, cmd);
}
void __init tegra_init_early(void)
{
+ arm_pm_restart = tegra_pm_restart;
+#ifndef CONFIG_SMP
+ /* For SMP system, initializing the reset handler here is too
+ late. For non-SMP systems, the function that calls the reset
+ handler initializer is not called, so do it here for non-SMP. */
+ tegra_cpu_reset_handler_init();
+#endif
tegra_init_fuse();
+ tegra_gpio_resume_init();
tegra_init_clock();
+ tegra_init_pinmux();
tegra_clk_init_from_table(common_clk_init_table);
- tegra_init_cache();
+ tegra_init_power();
+ tegra_init_cache(true);
+ tegra_init_ahb_gizmo_settings();
+}
+
+static int __init tegra_lp0_vec_arg(char *options)
+{
+ char *p = options;
+
+ tegra_lp0_vec_size = memparse(p, &p);
+ if (*p == '@')
+ tegra_lp0_vec_start = memparse(p+1, &p);
+ if (!tegra_lp0_vec_size || !tegra_lp0_vec_start) {
+ tegra_lp0_vec_size = 0;
+ tegra_lp0_vec_start = 0;
+ }
+
+ return 0;
+}
+early_param("lp0_vec", tegra_lp0_vec_arg);
+
+static int __init tegra_bootloader_fb_arg(char *options)
+{
+ char *p = options;
+
+ tegra_bootloader_fb_size = memparse(p, &p);
+ if (*p == '@')
+ tegra_bootloader_fb_start = memparse(p+1, &p);
+
+ pr_info("Found tegra_fbmem: %08lx@%08lx\n",
+ tegra_bootloader_fb_size, tegra_bootloader_fb_start);
+
+ return 0;
+}
+early_param("tegra_fbmem", tegra_bootloader_fb_arg);
+
+static int __init tegra_vpr_arg(char *options)
+{
+ char *p = options;
+
+ tegra_vpr_size = memparse(p, &p);
+ if (*p == '@')
+ tegra_vpr_start = memparse(p+1, &p);
+ pr_info("Found vpr, start=0x%lx size=%lx",
+ tegra_vpr_start, tegra_vpr_size);
+ return 0;
+}
+early_param("vpr", tegra_vpr_arg);
+
+enum panel_type get_panel_type(void)
+{
+ return board_panel_type;
+}
+static int __init tegra_board_panel_type(char *options)
+{
+ if (!strcmp(options, "lvds"))
+ board_panel_type = panel_type_lvds;
+ else if (!strcmp(options, "dsi"))
+ board_panel_type = panel_type_dsi;
+ else
+ return 0;
+ return 1;
+}
+__setup("panel=", tegra_board_panel_type);
+
+enum power_supply_type get_power_supply_type(void)
+{
+ return pow_supply_type;
+}
+static int __init tegra_board_power_supply_type(char *options)
+{
+ if (!strcmp(options, "Adapter"))
+ pow_supply_type = POWER_SUPPLY_TYPE_MAINS;
+ if (!strcmp(options, "Mains"))
+ pow_supply_type = POWER_SUPPLY_TYPE_MAINS;
+ else if (!strcmp(options, "Battery"))
+ pow_supply_type = POWER_SUPPLY_TYPE_BATTERY;
+ else
+ return 0;
+ return 1;
+}
+__setup("power_supply=", tegra_board_power_supply_type);
+
+int get_core_edp(void)
+{
+ return pmu_core_edp;
+}
+static int __init tegra_pmu_core_edp(char *options)
+{
+ char *p = options;
+ int core_edp = memparse(p, &p);
+ if (core_edp != 0)
+ pmu_core_edp = core_edp;
+ return 0;
+}
+early_param("core_edp_mv", tegra_pmu_core_edp);
+
+int get_maximum_cpu_current_supported(void)
+{
+ return max_cpu_current;
+}
+static int __init tegra_max_cpu_current(char *options)
+{
+ char *p = options;
+ max_cpu_current = memparse(p, &p);
+ return 1;
+}
+__setup("max_cpu_cur_ma=", tegra_max_cpu_current);
+
+static int __init tegra_debug_uartport(char *info)
+{
+ char *p = info;
+ unsigned long long port_id;
+ if (!strncmp(p, "hsport", 6))
+ is_tegra_debug_uart_hsport = true;
+ else if (!strncmp(p, "lsport", 6))
+ is_tegra_debug_uart_hsport = false;
+
+ if (p[6] == ',') {
+ if (p[7] == '-') {
+ debug_uart_port_id = -1;
+ } else {
+ port_id = memparse(p + 7, &p);
+ debug_uart_port_id = (int) port_id;
+ }
+ } else {
+ debug_uart_port_id = -1;
+ }
+
+ return 1;
+}
+
+bool is_tegra_debug_uartport_hs(void)
+{
+ return is_tegra_debug_uart_hsport;
+}
+
+int get_tegra_uart_debug_port_id(void)
+{
+ return debug_uart_port_id;
+}
+__setup("debug_uartport=", tegra_debug_uartport);
+
+static int __init tegra_audio_codec_type(char *info)
+{
+ char *p = info;
+ if (!strncmp(p, "wm8903", 6))
+ audio_codec_name = audio_codec_wm8903;
+ else
+ audio_codec_name = audio_codec_none;
+
+ return 1;
+}
+
+enum audio_codec_type get_audio_codec_type(void)
+{
+ return audio_codec_name;
+}
+__setup("audio_codec=", tegra_audio_codec_type);
+
+
+void tegra_get_board_info(struct board_info *bi)
+{
+ bi->board_id = (system_serial_high >> 16) & 0xFFFF;
+ bi->sku = (system_serial_high) & 0xFFFF;
+ bi->fab = (system_serial_low >> 24) & 0xFF;
+ bi->major_revision = (system_serial_low >> 16) & 0xFF;
+ bi->minor_revision = (system_serial_low >> 8) & 0xFF;
+}
+
+static int __init tegra_pmu_board_info(char *info)
+{
+ char *p = info;
+ pmu_board_info.board_id = memparse(p, &p);
+ pmu_board_info.sku = memparse(p+1, &p);
+ pmu_board_info.fab = memparse(p+1, &p);
+ pmu_board_info.major_revision = memparse(p+1, &p);
+ pmu_board_info.minor_revision = memparse(p+1, &p);
+ return 1;
+}
+
+void tegra_get_pmu_board_info(struct board_info *bi)
+{
+ memcpy(bi, &pmu_board_info, sizeof(struct board_info));
+}
+
+__setup("pmuboard=", tegra_pmu_board_info);
+
+static int __init tegra_display_board_info(char *info)
+{
+ char *p = info;
+ display_board_info.board_id = memparse(p, &p);
+ display_board_info.sku = memparse(p+1, &p);
+ display_board_info.fab = memparse(p+1, &p);
+ display_board_info.major_revision = memparse(p+1, &p);
+ display_board_info.minor_revision = memparse(p+1, &p);
+ return 1;
+}
+
+void tegra_get_display_board_info(struct board_info *bi)
+{
+ memcpy(bi, &display_board_info, sizeof(struct board_info));
+}
+
+__setup("displayboard=", tegra_display_board_info);
+
+static int __init tegra_camera_board_info(char *info)
+{
+ char *p = info;
+ camera_board_info.board_id = memparse(p, &p);
+ camera_board_info.sku = memparse(p+1, &p);
+ camera_board_info.fab = memparse(p+1, &p);
+ camera_board_info.major_revision = memparse(p+1, &p);
+ camera_board_info.minor_revision = memparse(p+1, &p);
+ return 1;
+}
+
+void tegra_get_camera_board_info(struct board_info *bi)
+{
+ memcpy(bi, &camera_board_info, sizeof(struct board_info));
+}
+
+__setup("cameraboard=", tegra_camera_board_info);
+
+static int __init tegra_modem_id(char *id)
+{
+ char *p = id;
+
+ modem_id = memparse(p, &p);
+ return 1;
+}
+
+int tegra_get_modem_id(void)
+{
+ return modem_id;
+}
+
+__setup("modem_id=", tegra_modem_id);
+
+
+
+/*
+ * Tegra has a protected aperture that prevents access by most non-CPU
+ * memory masters to addresses above the aperture value. Enabling it
+ * secures the CPU's memory from the GPU, except through the GART.
+ */
+void __init tegra_protected_aperture_init(unsigned long aperture)
+{
+#ifndef CONFIG_NVMAP_ALLOW_SYSMEM
+ void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
+ pr_info("Enabling Tegra protected aperture at 0x%08lx\n", aperture);
+ writel(aperture, mc_base + MC_SECURITY_CFG2);
+#else
+ pr_err("Tegra protected aperture disabled because nvmap is using "
+ "system memory\n");
+#endif
+}
+
+/*
+ * Due to conflicting restrictions on the placement of the framebuffer,
+ * the bootloader is likely to leave the framebuffer pointed at a location
+ * in memory that is outside the grhost aperture. This function will move
+ * the framebuffer contents from a physical address that is anywher (lowmem,
+ * highmem, or outside the memory map) to a physical address that is outside
+ * the memory map.
+ */
+void tegra_move_framebuffer(unsigned long to, unsigned long from,
+ unsigned long size)
+{
+ struct page *page;
+ void __iomem *to_io;
+ void *from_virt;
+ unsigned long i;
+
+ BUG_ON(PAGE_ALIGN((unsigned long)to) != (unsigned long)to);
+ BUG_ON(PAGE_ALIGN(from) != from);
+ BUG_ON(PAGE_ALIGN(size) != size);
+
+ to_io = ioremap(to, size);
+ if (!to_io) {
+ pr_err("%s: Failed to map target framebuffer\n", __func__);
+ return;
+ }
+
+ if (pfn_valid(page_to_pfn(phys_to_page(from)))) {
+ for (i = 0 ; i < size; i += PAGE_SIZE) {
+ page = phys_to_page(from + i);
+ from_virt = kmap(page);
+ memcpy(to_io + i, from_virt, PAGE_SIZE);
+ kunmap(page);
+ }
+ } else {
+ void __iomem *from_io = ioremap(from, size);
+ if (!from_io) {
+ pr_err("%s: Failed to map source framebuffer\n",
+ __func__);
+ goto out;
+ }
+
+ for (i = 0; i < size; i += 4)
+ writel(readl(from_io + i), to_io + i);
+
+ iounmap(from_io);
+ }
+out:
+ iounmap(to_io);
+}
+
+#ifdef CONFIG_TEGRA_SMMU_BASE_AT_E0000000
+#define FORCE_SMMU_BASE_FOR_TEGRA3_A01 1
+#else
+#define FORCE_SMMU_BASE_FOR_TEGRA3_A01 0
+#endif
+#if FORCE_SMMU_BASE_FOR_TEGRA3_A01 || \
+ (defined(CONFIG_TEGRA_IOVMM_SMMU) && defined(CONFIG_ARCH_TEGRA_3x_SOC))
+/* Support for Tegra3 A01 chip mask that needs to have SMMU IOVA reside in
+ * the upper half of 4GB IOVA space. A02 and after use the bottom 1GB and
+ * do not need to reserve memory.
+ */
+#define SUPPORT_SMMU_BASE_FOR_TEGRA3_A01
+#endif
+
+void __init tegra_reserve(unsigned long carveout_size, unsigned long fb_size,
+ unsigned long fb2_size)
+{
+#ifdef SUPPORT_SMMU_BASE_FOR_TEGRA3_A01
+ int smmu_reserved = 0;
+ struct tegra_smmu_window *smmu_window = tegra_smmu_window(0);
+#endif
+
+ if (carveout_size) {
+ tegra_carveout_start = memblock_end_of_DRAM() - carveout_size;
+ if (memblock_remove(tegra_carveout_start, carveout_size)) {
+ pr_err("Failed to remove carveout %08lx@%08lx "
+ "from memory map\n",
+ carveout_size, tegra_carveout_start);
+ tegra_carveout_start = 0;
+ tegra_carveout_size = 0;
+ } else
+ tegra_carveout_size = carveout_size;
+ }
+
+ if (fb2_size) {
+ tegra_fb2_start = memblock_end_of_DRAM() - fb2_size;
+ if (memblock_remove(tegra_fb2_start, fb2_size)) {
+ pr_err("Failed to remove second framebuffer "
+ "%08lx@%08lx from memory map\n",
+ fb2_size, tegra_fb2_start);
+ tegra_fb2_start = 0;
+ tegra_fb2_size = 0;
+ } else
+ tegra_fb2_size = fb2_size;
+ }
+
+ if (fb_size) {
+ tegra_fb_start = memblock_end_of_DRAM() - fb_size;
+ if (memblock_remove(tegra_fb_start, fb_size)) {
+ pr_err("Failed to remove framebuffer %08lx@%08lx "
+ "from memory map\n",
+ fb_size, tegra_fb_start);
+ tegra_fb_start = 0;
+ tegra_fb_size = 0;
+ } else
+ tegra_fb_size = fb_size;
+ }
+
+ if (tegra_fb_size)
+ tegra_grhost_aperture = tegra_fb_start;
+
+ if (tegra_fb2_size && tegra_fb2_start < tegra_grhost_aperture)
+ tegra_grhost_aperture = tegra_fb2_start;
+
+ if (tegra_carveout_size && tegra_carveout_start < tegra_grhost_aperture)
+ tegra_grhost_aperture = tegra_carveout_start;
+
+#ifdef SUPPORT_SMMU_BASE_FOR_TEGRA3_A01
+ if (!smmu_window) {
+ pr_err("No SMMU resource\n");
+ } else {
+ size_t smmu_window_size;
+
+ if (FORCE_SMMU_BASE_FOR_TEGRA3_A01 ||
+ (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3 &&
+ tegra_get_revision() == TEGRA_REVISION_A01)) {
+ smmu_window->start = TEGRA_SMMU_BASE_TEGRA3_A01;
+ smmu_window->end = TEGRA_SMMU_BASE_TEGRA3_A01 +
+ TEGRA_SMMU_SIZE_TEGRA3_A01 - 1;
+ }
+ smmu_window_size = smmu_window->end + 1 - smmu_window->start;
+ if (smmu_window->start >= 0x80000000) {
+ if (memblock_reserve(smmu_window->start,
+ smmu_window_size))
+ pr_err(
+ "Failed to reserve SMMU I/O VA window %08lx@%08lx\n",
+ (unsigned long)smmu_window_size,
+ (unsigned long)smmu_window->start);
+ else
+ smmu_reserved = 1;
+ }
+ }
+#endif
+
+ if (tegra_lp0_vec_size &&
+ (tegra_lp0_vec_start < memblock_end_of_DRAM())) {
+ if (memblock_reserve(tegra_lp0_vec_start, tegra_lp0_vec_size)) {
+ pr_err("Failed to reserve lp0_vec %08lx@%08lx\n",
+ tegra_lp0_vec_size, tegra_lp0_vec_start);
+ tegra_lp0_vec_start = 0;
+ tegra_lp0_vec_size = 0;
+ }
+ tegra_lp0_vec_relocate = false;
+ } else
+ tegra_lp0_vec_relocate = true;
+
+ /*
+ * We copy the bootloader's framebuffer to the framebuffer allocated
+ * above, and then free this one.
+ * */
+ if (tegra_bootloader_fb_size) {
+ tegra_bootloader_fb_size = PAGE_ALIGN(tegra_bootloader_fb_size);
+ if (memblock_reserve(tegra_bootloader_fb_start,
+ tegra_bootloader_fb_size)) {
+ pr_err("Failed to reserve bootloader frame buffer "
+ "%08lx@%08lx\n", tegra_bootloader_fb_size,
+ tegra_bootloader_fb_start);
+ tegra_bootloader_fb_start = 0;
+ tegra_bootloader_fb_size = 0;
+ }
+ }
+
+ pr_info("Tegra reserved memory:\n"
+ "LP0: %08lx - %08lx\n"
+ "Bootloader framebuffer: %08lx - %08lx\n"
+ "Framebuffer: %08lx - %08lx\n"
+ "2nd Framebuffer: %08lx - %08lx\n"
+ "Carveout: %08lx - %08lx\n"
+ "Vpr: %08lx - %08lx\n",
+ tegra_lp0_vec_start,
+ tegra_lp0_vec_size ?
+ tegra_lp0_vec_start + tegra_lp0_vec_size - 1 : 0,
+ tegra_bootloader_fb_start,
+ tegra_bootloader_fb_size ?
+ tegra_bootloader_fb_start + tegra_bootloader_fb_size - 1 : 0,
+ tegra_fb_start,
+ tegra_fb_size ?
+ tegra_fb_start + tegra_fb_size - 1 : 0,
+ tegra_fb2_start,
+ tegra_fb2_size ?
+ tegra_fb2_start + tegra_fb2_size - 1 : 0,
+ tegra_carveout_start,
+ tegra_carveout_size ?
+ tegra_carveout_start + tegra_carveout_size - 1 : 0,
+ tegra_vpr_start,
+ tegra_vpr_size ?
+ tegra_vpr_start + tegra_vpr_size - 1 : 0);
+
+#ifdef SUPPORT_SMMU_BASE_FOR_TEGRA3_A01
+ if (smmu_reserved)
+ pr_info("SMMU: %08lx - %08lx\n",
+ smmu_window->start, smmu_window->end);
+#endif
+}
+
+void __init tegra_release_bootloader_fb(void)
+{
+ /* Since bootloader fb is reserved in common.c, it is freed here. */
+ if (tegra_bootloader_fb_size)
+ if (memblock_free(tegra_bootloader_fb_start,
+ tegra_bootloader_fb_size))
+ pr_err("Failed to free bootloader fb.\n");
+}
+
+#ifdef CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND
+static char cpufreq_gov_default[32];
+static char *cpufreq_gov_conservative = "conservative";
+static char *cpufreq_sysfs_place_holder="/sys/devices/system/cpu/cpu%i/cpufreq/scaling_governor";
+static char *cpufreq_gov_conservative_param="/sys/devices/system/cpu/cpufreq/conservative/%s";
+
+static void cpufreq_set_governor(char *governor)
+{
+ struct file *scaling_gov = NULL;
+ mm_segment_t old_fs;
+ char buf[128];
+ int i = 0;
+ loff_t offset = 0;
+
+ if (governor == NULL)
+ return;
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+#ifndef CONFIG_TEGRA_AUTO_HOTPLUG
+ for_each_online_cpu(i)
+#endif
+ {
+ sprintf(buf, cpufreq_sysfs_place_holder, i);
+ scaling_gov = filp_open(buf, O_RDWR, 0);
+ if (IS_ERR_OR_NULL(scaling_gov)) {
+ pr_err("%s. Can't open %s\n", __func__, buf);
+ } else {
+ if (scaling_gov->f_op != NULL &&
+ scaling_gov->f_op->write != NULL)
+ scaling_gov->f_op->write(scaling_gov,
+ governor,
+ strlen(governor),
+ &offset);
+ else
+ pr_err("f_op might be null\n");
+
+ filp_close(scaling_gov, NULL);
+ }
+ }
+ set_fs(old_fs);
+}
+
+void cpufreq_save_default_governor(void)
+{
+ struct file *scaling_gov = NULL;
+ mm_segment_t old_fs;
+ char buf[128];
+ loff_t offset = 0;
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ buf[127] = 0;
+ sprintf(buf, cpufreq_sysfs_place_holder,0);
+ scaling_gov = filp_open(buf, O_RDONLY, 0);
+ if (IS_ERR_OR_NULL(scaling_gov)) {
+ pr_err("%s. Can't open %s\n", __func__, buf);
+ } else {
+ if (scaling_gov->f_op != NULL &&
+ scaling_gov->f_op->read != NULL)
+ scaling_gov->f_op->read(scaling_gov,
+ cpufreq_gov_default,
+ 32,
+ &offset);
+ else
+ pr_err("f_op might be null\n");
+
+ filp_close(scaling_gov, NULL);
+ }
+ set_fs(old_fs);
+}
+
+void cpufreq_restore_default_governor(void)
+{
+ cpufreq_set_governor(cpufreq_gov_default);
+}
+
+void cpufreq_set_conservative_governor_param(int up_th, int down_th)
+{
+ struct file *gov_param = NULL;
+ static char buf[128],parm[8];
+ loff_t offset = 0;
+ mm_segment_t old_fs;
+
+ if (up_th <= down_th) {
+ printk(KERN_ERR "%s: up_th(%d) is lesser than down_th(%d)\n",
+ __func__, up_th, down_th);
+ return;
+ }
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ sprintf(parm, "%d", up_th);
+ sprintf(buf, cpufreq_gov_conservative_param ,"up_threshold");
+ gov_param = filp_open(buf, O_RDONLY, 0);
+ if (IS_ERR_OR_NULL(gov_param)) {
+ pr_err("%s. Can't open %s\n", __func__, buf);
+ } else {
+ if (gov_param->f_op != NULL &&
+ gov_param->f_op->write != NULL)
+ gov_param->f_op->write(gov_param,
+ parm,
+ strlen(parm),
+ &offset);
+ else
+ pr_err("f_op might be null\n");
+
+ filp_close(gov_param, NULL);
+ }
+
+ sprintf(parm, "%d", down_th);
+ sprintf(buf, cpufreq_gov_conservative_param ,"down_threshold");
+ gov_param = filp_open(buf, O_RDONLY, 0);
+ if (IS_ERR_OR_NULL(gov_param)) {
+ pr_err("%s. Can't open %s\n", __func__, buf);
+ } else {
+ if (gov_param->f_op != NULL &&
+ gov_param->f_op->write != NULL)
+ gov_param->f_op->write(gov_param,
+ parm,
+ strlen(parm),
+ &offset);
+ else
+ pr_err("f_op might be null\n");
+
+ filp_close(gov_param, NULL);
+ }
+ set_fs(old_fs);
+}
+
+void cpufreq_set_conservative_governor(void)
+{
+ cpufreq_set_governor(cpufreq_gov_conservative);
}
+#endif /* CONFIG_TEGRA_CONVSERVATIVE_GOV_ON_EARLYSUPSEND */
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index 0e0fd4d889bd..5599c298ac5b 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -7,6 +7,8 @@
* Colin Cross <ccross@google.com>
* Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -29,32 +31,408 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/cpu.h>
#include <asm/system.h>
#include <mach/clk.h>
+#include <mach/edp.h>
-/* Frequency table index must be sequential starting at 0 */
-static struct cpufreq_frequency_table freq_table[] = {
- { 0, 216000 },
- { 1, 312000 },
- { 2, 456000 },
- { 3, 608000 },
- { 4, 760000 },
- { 5, 816000 },
- { 6, 912000 },
- { 7, 1000000 },
- { 8, CPUFREQ_TABLE_END },
-};
+#include "clock.h"
+#include "cpu-tegra.h"
-#define NUM_CPUS 2
+/* tegra throttling and edp governors require frequencies in the table
+ to be in ascending order */
+static struct cpufreq_frequency_table *freq_table;
static struct clk *cpu_clk;
static struct clk *emc_clk;
-static unsigned long target_cpu_speed[NUM_CPUS];
+static unsigned long policy_max_speed[CONFIG_NR_CPUS];
+static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
static DEFINE_MUTEX(tegra_cpu_lock);
static bool is_suspended;
+static int suspend_index;
+
+static bool force_policy_max;
+
+static int force_policy_max_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+ bool old_policy = force_policy_max;
+
+ mutex_lock(&tegra_cpu_lock);
+
+ ret = param_set_bool(arg, kp);
+ if ((ret == 0) && (old_policy != force_policy_max))
+ tegra_cpu_set_speed_cap(NULL);
+
+ mutex_unlock(&tegra_cpu_lock);
+ return ret;
+}
+
+static int force_policy_max_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_bool(buffer, kp);
+}
+
+static struct kernel_param_ops policy_ops = {
+ .set = force_policy_max_set,
+ .get = force_policy_max_get,
+};
+module_param_cb(force_policy_max, &policy_ops, &force_policy_max, 0644);
+
+
+static unsigned int cpu_user_cap;
+
+static int cpu_user_cap_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+
+ mutex_lock(&tegra_cpu_lock);
+
+ ret = param_set_uint(arg, kp);
+ if (ret == 0) {
+#ifndef CONFIG_TEGRA_CPU_CAP_EXACT_FREQ
+ if (cpu_user_cap != 0) {
+ int i;
+ for (i = 0; freq_table[i].frequency !=
+ CPUFREQ_TABLE_END; i++) {
+ if (freq_table[i].frequency > cpu_user_cap)
+ break;
+ }
+ i = (i == 0) ? 0 : i - 1;
+ cpu_user_cap = freq_table[i].frequency;
+ }
+#endif
+ tegra_cpu_set_speed_cap(NULL);
+ }
+
+ mutex_unlock(&tegra_cpu_lock);
+ return ret;
+}
+
+static int cpu_user_cap_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_uint(buffer, kp);
+}
+
+static struct kernel_param_ops cap_ops = {
+ .set = cpu_user_cap_set,
+ .get = cpu_user_cap_get,
+};
+module_param_cb(cpu_user_cap, &cap_ops, &cpu_user_cap, 0644);
+
+static unsigned int user_cap_speed(unsigned int requested_speed)
+{
+ if ((cpu_user_cap) && (requested_speed > cpu_user_cap))
+ return cpu_user_cap;
+ return requested_speed;
+}
+
+#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
+
+static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", tegra_is_throttling());
+}
+
+cpufreq_freq_attr_ro(throttle);
+#endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
+
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+
+static const struct tegra_edp_limits *cpu_edp_limits;
+static int cpu_edp_limits_size;
+
+static const unsigned int *system_edp_limits;
+static bool system_edp_alarm;
+
+static int edp_thermal_index;
+static cpumask_t edp_cpumask;
+static unsigned int edp_limit;
+
+unsigned int tegra_get_edp_limit(void)
+{
+ return edp_limit;
+}
+
+static unsigned int edp_predict_limit(unsigned int cpus)
+{
+ unsigned int limit = 0;
+
+ BUG_ON(cpus == 0);
+ if (cpu_edp_limits) {
+ BUG_ON(edp_thermal_index >= cpu_edp_limits_size);
+ limit = cpu_edp_limits[edp_thermal_index].freq_limits[cpus - 1];
+ }
+ if (system_edp_limits && system_edp_alarm)
+ limit = min(limit, system_edp_limits[cpus - 1]);
+
+ return limit;
+}
+
+static void edp_update_limit(void)
+{
+ unsigned int limit = edp_predict_limit(cpumask_weight(&edp_cpumask));
+
+#ifdef CONFIG_TEGRA_EDP_EXACT_FREQ
+ edp_limit = limit;
+#else
+ unsigned int i;
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (freq_table[i].frequency > limit) {
+ break;
+ }
+ }
+ BUG_ON(i == 0); /* min freq above the limit or table empty */
+ edp_limit = freq_table[i-1].frequency;
+#endif
+}
+
+static unsigned int edp_governor_speed(unsigned int requested_speed)
+{
+ if ((!edp_limit) || (requested_speed <= edp_limit))
+ return requested_speed;
+ else
+ return edp_limit;
+}
+
+int tegra_edp_update_thermal_zone(int temperature)
+{
+ int i;
+ int ret = 0;
+ int nlimits = cpu_edp_limits_size;
+ int index;
+
+ if (!cpu_edp_limits)
+ return -EINVAL;
+
+ index = nlimits - 1;
+
+ if (temperature < cpu_edp_limits[0].temperature) {
+ index = 0;
+ } else {
+ for (i = 0; i < (nlimits - 1); i++) {
+ if (temperature >= cpu_edp_limits[i].temperature &&
+ temperature < cpu_edp_limits[i + 1].temperature) {
+ index = i + 1;
+ break;
+ }
+ }
+ }
+
+ mutex_lock(&tegra_cpu_lock);
+ edp_thermal_index = index;
+
+ /* Update cpu rate if cpufreq (at least on cpu0) is already started */
+ if (target_cpu_speed[0]) {
+ edp_update_limit();
+ tegra_cpu_set_speed_cap(NULL);
+ }
+ mutex_unlock(&tegra_cpu_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tegra_edp_update_thermal_zone);
+
+int tegra_system_edp_alarm(bool alarm)
+{
+ int ret = -ENODEV;
+
+ mutex_lock(&tegra_cpu_lock);
+ system_edp_alarm = alarm;
+
+ /* Update cpu rate if cpufreq (at least on cpu0) is already started
+ and cancel emergency throttling after edp limit is applied */
+ if (target_cpu_speed[0]) {
+ edp_update_limit();
+ ret = tegra_cpu_set_speed_cap(NULL);
+ if (!ret && alarm)
+ tegra_edp_throttle_cpu_now(0);
+ }
+ mutex_unlock(&tegra_cpu_lock);
+
+ return ret;
+}
+
+bool tegra_cpu_edp_favor_up(unsigned int n, int mp_overhead)
+{
+ unsigned int current_limit, next_limit;
+
+ if (n == 0)
+ return true;
+
+ if (n >= ARRAY_SIZE(cpu_edp_limits->freq_limits))
+ return false;
+
+ current_limit = edp_predict_limit(n);
+ next_limit = edp_predict_limit(n + 1);
+
+ return ((next_limit * (n + 1)) >=
+ (current_limit * n * (100 + mp_overhead) / 100));
+}
+
+bool tegra_cpu_edp_favor_down(unsigned int n, int mp_overhead)
+{
+ unsigned int current_limit, next_limit;
+
+ if (n <= 1)
+ return false;
+
+ if (n > ARRAY_SIZE(cpu_edp_limits->freq_limits))
+ return true;
+
+ current_limit = edp_predict_limit(n);
+ next_limit = edp_predict_limit(n - 1);
+
+ return ((next_limit * (n - 1) * (100 + mp_overhead) / 100)) >
+ (current_limit * n);
+}
+
+static int tegra_cpu_edp_notify(
+ struct notifier_block *nb, unsigned long event, void *hcpu)
+{
+ int ret = 0;
+ unsigned int cpu_speed, new_speed;
+ int cpu = (long)hcpu;
+
+ switch (event) {
+ case CPU_UP_PREPARE:
+ mutex_lock(&tegra_cpu_lock);
+ cpu_set(cpu, edp_cpumask);
+ edp_update_limit();
+
+ cpu_speed = tegra_getspeed(0);
+ new_speed = edp_governor_speed(cpu_speed);
+ if (new_speed < cpu_speed) {
+ ret = tegra_cpu_set_speed_cap(NULL);
+ if (ret) {
+ cpu_clear(cpu, edp_cpumask);
+ edp_update_limit();
+ }
+
+ printk(KERN_DEBUG "tegra CPU:%sforce EDP limit %u kHz"
+ "\n", ret ? " failed to " : " ", new_speed);
+ }
+ mutex_unlock(&tegra_cpu_lock);
+ break;
+ case CPU_DEAD:
+ mutex_lock(&tegra_cpu_lock);
+ cpu_clear(cpu, edp_cpumask);
+ edp_update_limit();
+ tegra_cpu_set_speed_cap(NULL);
+ mutex_unlock(&tegra_cpu_lock);
+ break;
+ }
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block tegra_cpu_edp_notifier = {
+ .notifier_call = tegra_cpu_edp_notify,
+};
+
+static void tegra_cpu_edp_init(bool resume)
+{
+ tegra_get_system_edp_limits(&system_edp_limits);
+ tegra_get_cpu_edp_limits(&cpu_edp_limits, &cpu_edp_limits_size);
+
+ if (!(cpu_edp_limits || system_edp_limits)) {
+ if (!resume)
+ pr_info("cpu-tegra: no EDP table is provided\n");
+ return;
+ }
+
+ /* FIXME: use the highest temperature limits if sensor is not on-line?
+ * If thermal zone is not set yet by the sensor, edp_thermal_index = 0.
+ * Boot frequency allowed SoC to get here, should work till sensor is
+ * initialized.
+ */
+ edp_cpumask = *cpu_online_mask;
+ edp_update_limit();
+
+ if (!resume) {
+ register_hotcpu_notifier(&tegra_cpu_edp_notifier);
+ pr_info("cpu-tegra: init EDP limit: %u MHz\n", edp_limit/1000);
+ }
+}
+
+static void tegra_cpu_edp_exit(void)
+{
+ if (!(cpu_edp_limits || system_edp_limits))
+ return;
+
+ unregister_hotcpu_notifier(&tegra_cpu_edp_notifier);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int system_edp_alarm_get(void *data, u64 *val)
+{
+ *val = (u64)system_edp_alarm;
+ return 0;
+}
+static int system_edp_alarm_set(void *data, u64 val)
+{
+ if (val > 1) { /* emulate emergency throttling */
+ tegra_edp_throttle_cpu_now(val);
+ return 0;
+ }
+ return tegra_system_edp_alarm((bool)val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(system_edp_alarm_fops,
+ system_edp_alarm_get, system_edp_alarm_set, "%llu\n");
+
+static int __init tegra_edp_debug_init(struct dentry *cpu_tegra_debugfs_root)
+{
+ if (!debugfs_create_file("edp_alarm", 0644, cpu_tegra_debugfs_root,
+ NULL, &system_edp_alarm_fops))
+ return -ENOMEM;
+
+ return 0;
+}
+#endif
+
+#else /* CONFIG_TEGRA_EDP_LIMITS */
+#define edp_governor_speed(requested_speed) (requested_speed)
+#define tegra_cpu_edp_init(resume)
+#define tegra_cpu_edp_exit()
+#define tegra_edp_debug_init(cpu_tegra_debugfs_root) (0)
+#endif /* CONFIG_TEGRA_EDP_LIMITS */
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *cpu_tegra_debugfs_root;
+
+static int __init tegra_cpu_debug_init(void)
+{
+ cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
+
+ if (!cpu_tegra_debugfs_root)
+ return -ENOMEM;
+
+ if (tegra_throttle_debug_init(cpu_tegra_debugfs_root))
+ goto err_out;
+
+ if (tegra_edp_debug_init(cpu_tegra_debugfs_root))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(cpu_tegra_debugfs_root);
+ return -ENOMEM;
+}
+
+static void __exit tegra_cpu_debug_exit(void)
+{
+ debugfs_remove_recursive(cpu_tegra_debugfs_root);
+}
+
+late_initcall(tegra_cpu_debug_init);
+module_exit(tegra_cpu_debug_exit);
+#endif /* CONFIG_DEBUG_FS */
int tegra_verify_speed(struct cpufreq_policy *policy)
{
@@ -65,7 +443,7 @@ unsigned int tegra_getspeed(unsigned int cpu)
{
unsigned long rate;
- if (cpu >= NUM_CPUS)
+ if (cpu >= CONFIG_NR_CPUS)
return 0;
rate = clk_get_rate(cpu_clk) / 1000;
@@ -80,6 +458,10 @@ static int tegra_update_cpu_speed(unsigned long rate)
freqs.old = tegra_getspeed(0);
freqs.new = rate;
+ rate = clk_round_rate(cpu_clk, rate * 1000);
+ if (!IS_ERR_VALUE(rate))
+ freqs.new = rate / 1000;
+
if (freqs.old == freqs.new)
return ret;
@@ -87,12 +469,8 @@ static int tegra_update_cpu_speed(unsigned long rate)
* Vote on memory bus frequency based on cpu frequency
* This sets the minimum frequency, display or avp may request higher
*/
- if (rate >= 816000)
- clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
- else if (rate >= 456000)
- clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
- else
- clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
+ if (freqs.old < freqs.new)
+ clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
for_each_online_cpu(freqs.cpu)
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -112,48 +490,104 @@ static int tegra_update_cpu_speed(unsigned long rate)
for_each_online_cpu(freqs.cpu)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ if (freqs.old > freqs.new)
+ clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
+
return 0;
}
-static unsigned long tegra_cpu_highest_speed(void)
+unsigned int tegra_count_slow_cpus(unsigned long speed_limit)
{
- unsigned long rate = 0;
+ unsigned int cnt = 0;
int i;
for_each_online_cpu(i)
+ if (target_cpu_speed[i] <= speed_limit)
+ cnt++;
+ return cnt;
+}
+
+unsigned int tegra_get_slowest_cpu_n(void) {
+ unsigned int cpu = nr_cpu_ids;
+ unsigned long rate = ULONG_MAX;
+ int i;
+
+ for_each_online_cpu(i)
+ if ((i > 0) && (rate > target_cpu_speed[i])) {
+ cpu = i;
+ rate = target_cpu_speed[i];
+ }
+ return cpu;
+}
+
+unsigned long tegra_cpu_lowest_speed(void) {
+ unsigned long rate = ULONG_MAX;
+ int i;
+
+ for_each_online_cpu(i)
+ rate = min(rate, target_cpu_speed[i]);
+ return rate;
+}
+
+unsigned long tegra_cpu_highest_speed(void) {
+ unsigned long policy_max = ULONG_MAX;
+ unsigned long rate = 0;
+ int i;
+
+ for_each_online_cpu(i) {
+ if (force_policy_max)
+ policy_max = min(policy_max, policy_max_speed[i]);
rate = max(rate, target_cpu_speed[i]);
+ }
+ rate = min(rate, policy_max);
return rate;
}
+int tegra_cpu_set_speed_cap(unsigned int *speed_cap)
+{
+ int ret = 0;
+ unsigned int new_speed = tegra_cpu_highest_speed();
+
+ if (is_suspended)
+ return -EBUSY;
+
+ new_speed = tegra_throttle_governor_speed(new_speed);
+ new_speed = edp_governor_speed(new_speed);
+ new_speed = user_cap_speed(new_speed);
+ if (speed_cap)
+ *speed_cap = new_speed;
+
+ ret = tegra_update_cpu_speed(new_speed);
+ if (ret == 0)
+ tegra_auto_hotplug_governor(new_speed, false);
+ return ret;
+}
+
static int tegra_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int idx;
unsigned int freq;
+ unsigned int new_speed;
int ret = 0;
mutex_lock(&tegra_cpu_lock);
- if (is_suspended) {
- ret = -EBUSY;
- goto out;
- }
-
cpufreq_frequency_table_target(policy, freq_table, target_freq,
relation, &idx);
freq = freq_table[idx].frequency;
target_cpu_speed[policy->cpu] = freq;
+ ret = tegra_cpu_set_speed_cap(&new_speed);
- ret = tegra_update_cpu_speed(tegra_cpu_highest_speed());
-
-out:
mutex_unlock(&tegra_cpu_lock);
+
return ret;
}
+
static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
void *dummy)
{
@@ -161,10 +595,17 @@ static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
if (event == PM_SUSPEND_PREPARE) {
is_suspended = true;
pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
- freq_table[0].frequency);
- tegra_update_cpu_speed(freq_table[0].frequency);
+ freq_table[suspend_index].frequency);
+ tegra_update_cpu_speed(freq_table[suspend_index].frequency);
+ tegra_auto_hotplug_governor(
+ freq_table[suspend_index].frequency, true);
} else if (event == PM_POST_SUSPEND) {
+ unsigned int freq;
is_suspended = false;
+ tegra_cpu_edp_init(true);
+ tegra_cpu_set_speed_cap(&freq);
+ pr_info("Tegra cpufreq resume: restoring frequency to %d kHz\n",
+ freq);
}
mutex_unlock(&tegra_cpu_lock);
@@ -177,7 +618,7 @@ static struct notifier_block tegra_cpu_pm_notifier = {
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu >= NUM_CPUS)
+ if (policy->cpu >= CONFIG_NR_CPUS)
return -EINVAL;
cpu_clk = clk_get_sys(NULL, "cpu");
@@ -204,8 +645,9 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->related_cpus, cpu_possible_mask);
- if (policy->cpu == 0)
+ if (policy->cpu == 0) {
register_pm_notifier(&tegra_cpu_pm_notifier);
+ }
return 0;
}
@@ -219,8 +661,30 @@ static int tegra_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
+static int tegra_cpufreq_policy_notifier(
+ struct notifier_block *nb, unsigned long event, void *data)
+{
+ int i, ret;
+ struct cpufreq_policy *policy = data;
+
+ if (event == CPUFREQ_NOTIFY) {
+ ret = cpufreq_frequency_table_target(policy, freq_table,
+ policy->max, CPUFREQ_RELATION_H, &i);
+ policy_max_speed[policy->cpu] =
+ ret ? policy->max : freq_table[i].frequency;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tegra_cpufreq_policy_nb = {
+ .notifier_call = tegra_cpufreq_policy_notifier,
+};
+
static struct freq_attr *tegra_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
+#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
+ &throttle,
+#endif
NULL,
};
@@ -236,12 +700,42 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
static int __init tegra_cpufreq_init(void)
{
+ int ret = 0;
+
+ struct tegra_cpufreq_table_data *table_data =
+ tegra_cpufreq_table_get();
+ if (IS_ERR_OR_NULL(table_data))
+ return -EINVAL;
+
+ suspend_index = table_data->suspend_index;
+
+ ret = tegra_throttle_init(&tegra_cpu_lock);
+ if (ret)
+ return ret;
+
+ ret = tegra_auto_hotplug_init(&tegra_cpu_lock);
+ if (ret)
+ return ret;
+
+ freq_table = table_data->freq_table;
+ tegra_cpu_edp_init(false);
+
+ ret = cpufreq_register_notifier(
+ &tegra_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
+ if (ret)
+ return ret;
+
return cpufreq_register_driver(&tegra_cpufreq_driver);
}
static void __exit tegra_cpufreq_exit(void)
{
- cpufreq_unregister_driver(&tegra_cpufreq_driver);
+ tegra_throttle_exit();
+ tegra_cpu_edp_exit();
+ tegra_auto_hotplug_exit();
+ cpufreq_unregister_driver(&tegra_cpufreq_driver);
+ cpufreq_unregister_notifier(
+ &tegra_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
}
diff --git a/arch/arm/mach-tegra/cpu-tegra.h b/arch/arm/mach-tegra/cpu-tegra.h
new file mode 100644
index 000000000000..a89ccd32d463
--- /dev/null
+++ b/arch/arm/mach-tegra/cpu-tegra.h
@@ -0,0 +1,79 @@
+/*
+ * arch/arm/mach-tegra/cpu-tegra.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_CPU_TEGRA_H
+#define __MACH_TEGRA_CPU_TEGRA_H
+
+unsigned int tegra_getspeed(unsigned int cpu);
+int tegra_cpu_set_speed_cap(unsigned int *speed_cap);
+unsigned int tegra_count_slow_cpus(unsigned long speed_limit);
+unsigned int tegra_get_slowest_cpu_n(void);
+unsigned long tegra_cpu_lowest_speed(void);
+unsigned long tegra_cpu_highest_speed(void);
+
+#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
+int tegra_throttle_init(struct mutex *cpu_lock);
+void tegra_throttle_exit(void);
+bool tegra_is_throttling(void);
+unsigned int tegra_throttle_governor_speed(unsigned int requested_speed);
+int tegra_throttle_debug_init(struct dentry *cpu_tegra_debugfs_root);
+void tegra_throttling_enable(bool enable);
+#else
+static inline int tegra_throttle_init(struct mutex *cpu_lock)
+{ return 0; }
+static inline void tegra_throttle_exit(void)
+{}
+static inline bool tegra_is_throttling(void)
+{ return false; }
+static inline unsigned int tegra_throttle_governor_speed(
+ unsigned int requested_speed)
+{ return requested_speed; }
+static inline int tegra_throttle_debug_init(
+ struct dentry *cpu_tegra_debugfs_root)
+{ return 0; }
+static inline void tegra_throttling_enable(bool enable)
+{}
+#endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
+
+#if defined(CONFIG_TEGRA_AUTO_HOTPLUG) && !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+int tegra_auto_hotplug_init(struct mutex *cpu_lock);
+void tegra_auto_hotplug_exit(void);
+void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend);
+#else
+static inline int tegra_auto_hotplug_init(struct mutex *cpu_lock)
+{ return 0; }
+static inline void tegra_auto_hotplug_exit(void)
+{ }
+static inline void tegra_auto_hotplug_governor(unsigned int cpu_freq,
+ bool suspend)
+{ }
+#endif
+
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+bool tegra_cpu_edp_favor_up(unsigned int n, int mp_overhead);
+bool tegra_cpu_edp_favor_down(unsigned int n, int mp_overhead);
+#else
+static inline bool tegra_cpu_edp_favor_up(unsigned int n, int mp_overhead)
+{ return true; }
+static inline bool tegra_cpu_edp_favor_down(unsigned int n, int mp_overhead)
+{ return false; }
+#endif
+
+#endif /* __MACH_TEGRA_CPU_TEGRA_H */
diff --git a/arch/arm/mach-tegra/cpu-tegra3.c b/arch/arm/mach-tegra/cpu-tegra3.c
new file mode 100644
index 000000000000..78425f4f7643
--- /dev/null
+++ b/arch/arm/mach-tegra/cpu-tegra3.c
@@ -0,0 +1,490 @@
+/*
+ * arch/arm/mach-tegra/cpu-tegra3.c
+ *
+ * CPU auto-hotplug for Tegra3 CPUs
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_qos_params.h>
+
+#include "pm.h"
+#include "cpu-tegra.h"
+#include "clock.h"
+
+#define INITIAL_STATE TEGRA_HP_DISABLED
+#define UP2G0_DELAY_MS 200
+#define UP2Gn_DELAY_MS 1000
+#define DOWN_DELAY_MS 2000
+
+static struct mutex *tegra3_cpu_lock;
+
+static struct workqueue_struct *hotplug_wq;
+static struct delayed_work hotplug_work;
+
+static bool no_lp;
+module_param(no_lp, bool, 0644);
+
+static unsigned long up2gn_delay;
+static unsigned long up2g0_delay;
+static unsigned long down_delay;
+module_param(up2gn_delay, ulong, 0644);
+module_param(up2g0_delay, ulong, 0644);
+module_param(down_delay, ulong, 0644);
+
+static unsigned int idle_top_freq;
+static unsigned int idle_bottom_freq;
+module_param(idle_top_freq, uint, 0644);
+module_param(idle_bottom_freq, uint, 0644);
+
+static int mp_overhead = 10;
+module_param(mp_overhead, int, 0644);
+
+static int balance_level = 75;
+module_param(balance_level, int, 0644);
+
+static struct clk *cpu_clk;
+static struct clk *cpu_g_clk;
+static struct clk *cpu_lp_clk;
+
+static struct {
+ cputime64_t time_up_total;
+ u64 last_update;
+ unsigned int up_down_count;
+} hp_stats[CONFIG_NR_CPUS + 1]; /* Append LP CPU entry at the end */
+
+static void hp_init_stats(void)
+{
+ int i;
+ u64 cur_jiffies = get_jiffies_64();
+
+ for (i = 0; i <= CONFIG_NR_CPUS; i++) {
+ hp_stats[i].time_up_total = 0;
+ hp_stats[i].last_update = cur_jiffies;
+
+ hp_stats[i].up_down_count = 0;
+ if (is_lp_cluster()) {
+ if (i == CONFIG_NR_CPUS)
+ hp_stats[i].up_down_count = 1;
+ } else {
+ if ((i < nr_cpu_ids) && cpu_online(i))
+ hp_stats[i].up_down_count = 1;
+ }
+ }
+
+}
+
+static void hp_stats_update(unsigned int cpu, bool up)
+{
+ u64 cur_jiffies = get_jiffies_64();
+ bool was_up = hp_stats[cpu].up_down_count & 0x1;
+
+ if (was_up)
+ hp_stats[cpu].time_up_total = cputime64_add(
+ hp_stats[cpu].time_up_total, cputime64_sub(
+ cur_jiffies, hp_stats[cpu].last_update));
+
+ if (was_up != up) {
+ hp_stats[cpu].up_down_count++;
+ if ((hp_stats[cpu].up_down_count & 0x1) != up) {
+ /* FIXME: sysfs user space CPU control breaks stats */
+ pr_err("tegra hotplug stats out of sync with %s CPU%d",
+ (cpu < CONFIG_NR_CPUS) ? "G" : "LP",
+ (cpu < CONFIG_NR_CPUS) ? cpu : 0);
+ hp_stats[cpu].up_down_count ^= 0x1;
+ }
+ }
+ hp_stats[cpu].last_update = cur_jiffies;
+}
+
+
+enum {
+ TEGRA_HP_DISABLED = 0,
+ TEGRA_HP_IDLE,
+ TEGRA_HP_DOWN,
+ TEGRA_HP_UP,
+};
+static int hp_state;
+
+static int hp_state_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ int old_state;
+
+ if (!tegra3_cpu_lock)
+ return ret;
+
+ mutex_lock(tegra3_cpu_lock);
+
+ old_state = hp_state;
+ ret = param_set_bool(arg, kp); /* set idle or disabled only */
+
+ if (ret == 0) {
+ if ((hp_state == TEGRA_HP_DISABLED) &&
+ (old_state != TEGRA_HP_DISABLED))
+ pr_info("Tegra auto-hotplug disabled\n");
+ else if (hp_state != TEGRA_HP_DISABLED) {
+ if (old_state == TEGRA_HP_DISABLED) {
+ pr_info("Tegra auto-hotplug enabled\n");
+ hp_init_stats();
+ }
+ /* catch-up with governor target speed */
+ tegra_cpu_set_speed_cap(NULL);
+ }
+ } else
+ pr_warn("%s: unable to set tegra hotplug state %s\n",
+ __func__, arg);
+
+ mutex_unlock(tegra3_cpu_lock);
+ return ret;
+}
+
+static int hp_state_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_int(buffer, kp);
+}
+
+static struct kernel_param_ops tegra_hp_state_ops = {
+ .set = hp_state_set,
+ .get = hp_state_get,
+};
+module_param_cb(auto_hotplug, &tegra_hp_state_ops, &hp_state, 0644);
+
+
+enum {
+ TEGRA_CPU_SPEED_BALANCED,
+ TEGRA_CPU_SPEED_BIASED,
+ TEGRA_CPU_SPEED_SKEWED,
+};
+
+static noinline int tegra_cpu_speed_balance(void)
+{
+ unsigned long highest_speed = tegra_cpu_highest_speed();
+ unsigned long balanced_speed = highest_speed * balance_level / 100;
+ unsigned long skewed_speed = balanced_speed / 2;
+ unsigned int nr_cpus = num_online_cpus();
+ unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
+
+ /* balanced: freq targets for all CPUs are above 50% of highest speed
+ biased: freq target for at least one CPU is below 50% threshold
+ skewed: freq targets for at least 2 CPUs are below 25% threshold */
+ if ((tegra_count_slow_cpus(skewed_speed) >= 2) ||
+ tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
+ (nr_cpus > max_cpus))
+ return TEGRA_CPU_SPEED_SKEWED;
+
+ if ((tegra_count_slow_cpus(balanced_speed) >= 1) ||
+ (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
+ (nr_cpus == max_cpus))
+ return TEGRA_CPU_SPEED_BIASED;
+
+ return TEGRA_CPU_SPEED_BALANCED;
+}
+
+static void tegra_auto_hotplug_work_func(struct work_struct *work)
+{
+ bool up = false;
+ unsigned int cpu = nr_cpu_ids;
+
+ mutex_lock(tegra3_cpu_lock);
+
+ switch (hp_state) {
+ case TEGRA_HP_DISABLED:
+ case TEGRA_HP_IDLE:
+ break;
+ case TEGRA_HP_DOWN:
+ cpu = tegra_get_slowest_cpu_n();
+ if (cpu < nr_cpu_ids) {
+ up = false;
+ queue_delayed_work(
+ hotplug_wq, &hotplug_work, down_delay);
+ hp_stats_update(cpu, false);
+ } else if (!is_lp_cluster() && !no_lp) {
+ if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
+ hp_stats_update(CONFIG_NR_CPUS, true);
+ hp_stats_update(0, false);
+ /* catch-up with governor target speed */
+ tegra_cpu_set_speed_cap(NULL);
+ } else
+ queue_delayed_work(
+ hotplug_wq, &hotplug_work, down_delay);
+ }
+ break;
+ case TEGRA_HP_UP:
+ if (is_lp_cluster() && !no_lp) {
+ if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
+ hp_stats_update(CONFIG_NR_CPUS, false);
+ hp_stats_update(0, true);
+ /* catch-up with governor target speed */
+ tegra_cpu_set_speed_cap(NULL);
+ }
+ } else {
+ switch (tegra_cpu_speed_balance()) {
+ /* cpu speed is up and balanced - one more on-line */
+ case TEGRA_CPU_SPEED_BALANCED:
+ cpu = cpumask_next_zero(0, cpu_online_mask);
+ if (cpu < nr_cpu_ids) {
+ up = true;
+ hp_stats_update(cpu, true);
+ }
+ break;
+ /* cpu speed is up, but skewed - remove one core */
+ case TEGRA_CPU_SPEED_SKEWED:
+ cpu = tegra_get_slowest_cpu_n();
+ if (cpu < nr_cpu_ids) {
+ up = false;
+ hp_stats_update(cpu, false);
+ }
+ break;
+ /* cpu speed is up, but under-utilized - do nothing */
+ case TEGRA_CPU_SPEED_BIASED:
+ default:
+ break;
+ }
+ }
+ queue_delayed_work(
+ hotplug_wq, &hotplug_work, up2gn_delay);
+ break;
+ default:
+ pr_err("%s: invalid tegra hotplug state %d\n",
+ __func__, hp_state);
+ }
+ mutex_unlock(tegra3_cpu_lock);
+
+ if (cpu < nr_cpu_ids) {
+ if (up)
+ cpu_up(cpu);
+ else
+ cpu_down(cpu);
+ }
+}
+
+void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
+{
+ unsigned long up_delay, top_freq, bottom_freq;
+
+ if (!is_g_cluster_present())
+ return;
+
+ if (suspend && (hp_state != TEGRA_HP_DISABLED)) {
+ hp_state = TEGRA_HP_IDLE;
+ return;
+ }
+
+ if (is_lp_cluster()) {
+ up_delay = up2g0_delay;
+ top_freq = idle_top_freq;
+ bottom_freq = 0;
+ } else {
+ up_delay = up2gn_delay;
+ top_freq = idle_bottom_freq;
+ bottom_freq = idle_bottom_freq;
+ }
+
+ switch (hp_state) {
+ case TEGRA_HP_DISABLED:
+ break;
+ case TEGRA_HP_IDLE:
+ if (cpu_freq > top_freq) {
+ hp_state = TEGRA_HP_UP;
+ queue_delayed_work(
+ hotplug_wq, &hotplug_work, up_delay);
+ } else if (cpu_freq <= bottom_freq) {
+ hp_state = TEGRA_HP_DOWN;
+ queue_delayed_work(
+ hotplug_wq, &hotplug_work, down_delay);
+ }
+ break;
+ case TEGRA_HP_DOWN:
+ if (cpu_freq > top_freq) {
+ hp_state = TEGRA_HP_UP;
+ queue_delayed_work(
+ hotplug_wq, &hotplug_work, up_delay);
+ } else if (cpu_freq > bottom_freq) {
+ hp_state = TEGRA_HP_IDLE;
+ }
+ break;
+ case TEGRA_HP_UP:
+ if (cpu_freq <= bottom_freq) {
+ hp_state = TEGRA_HP_DOWN;
+ queue_delayed_work(
+ hotplug_wq, &hotplug_work, down_delay);
+ } else if (cpu_freq <= top_freq) {
+ hp_state = TEGRA_HP_IDLE;
+ }
+ break;
+ default:
+ pr_err("%s: invalid tegra hotplug state %d\n",
+ __func__, hp_state);
+ BUG();
+ }
+}
+
+int tegra_auto_hotplug_init(struct mutex *cpu_lock)
+{
+ /*
+ * Not bound to the issuer CPU (=> high-priority), has rescue worker
+ * task, single-threaded, freezable.
+ */
+ hotplug_wq = alloc_workqueue(
+ "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
+ if (!hotplug_wq)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func);
+
+ cpu_clk = clk_get_sys(NULL, "cpu");
+ cpu_g_clk = clk_get_sys(NULL, "cpu_g");
+ cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
+ if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk))
+ return -ENOENT;
+
+ idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000;
+ idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000;
+
+ up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS);
+ up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS);
+ down_delay = msecs_to_jiffies(DOWN_DELAY_MS);
+
+ tegra3_cpu_lock = cpu_lock;
+ hp_state = INITIAL_STATE;
+ hp_init_stats();
+ pr_info("Tegra auto-hotplug initialized: %s\n",
+ (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled");
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *hp_debugfs_root;
+
+struct pm_qos_request_list max_cpu_req;
+
+static int hp_stats_show(struct seq_file *s, void *data)
+{
+ int i;
+ u64 cur_jiffies = get_jiffies_64();
+
+ mutex_lock(tegra3_cpu_lock);
+ if (hp_state != TEGRA_HP_DISABLED) {
+ for (i = 0; i <= CONFIG_NR_CPUS; i++) {
+ bool was_up = (hp_stats[i].up_down_count & 0x1);
+ hp_stats_update(i, was_up);
+ }
+ }
+ mutex_unlock(tegra3_cpu_lock);
+
+ seq_printf(s, "%-15s ", "cpu:");
+ for (i = 0; i < CONFIG_NR_CPUS; i++) {
+ seq_printf(s, "G%-9d ", i);
+ }
+ seq_printf(s, "LP\n");
+
+ seq_printf(s, "%-15s ", "transitions:");
+ for (i = 0; i <= CONFIG_NR_CPUS; i++) {
+ seq_printf(s, "%-10u ", hp_stats[i].up_down_count);
+ }
+ seq_printf(s, "\n");
+
+ seq_printf(s, "%-15s ", "time plugged:");
+ for (i = 0; i <= CONFIG_NR_CPUS; i++) {
+ seq_printf(s, "%-10llu ",
+ cputime64_to_clock_t(hp_stats[i].time_up_total));
+ }
+ seq_printf(s, "\n");
+
+ seq_printf(s, "%-15s %llu\n", "time-stamp:",
+ cputime64_to_clock_t(cur_jiffies));
+
+ return 0;
+}
+
+static int hp_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hp_stats_show, inode->i_private);
+}
+
+static const struct file_operations hp_stats_fops = {
+ .open = hp_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int max_cpus_get(void *data, u64 *val)
+{
+ *val = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
+ return 0;
+}
+static int max_cpus_set(void *data, u64 val)
+{
+ pm_qos_update_request(&max_cpu_req, (s32)val);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(max_cpus_fops, max_cpus_get, max_cpus_set, "%llu\n");
+
+static int __init tegra_auto_hotplug_debug_init(void)
+{
+ if (!tegra3_cpu_lock)
+ return -ENOENT;
+
+ hp_debugfs_root = debugfs_create_dir("tegra_hotplug", NULL);
+ if (!hp_debugfs_root)
+ return -ENOMEM;
+
+ pm_qos_add_request(&max_cpu_req, PM_QOS_MAX_ONLINE_CPUS,
+ PM_QOS_DEFAULT_VALUE);
+
+ if (!debugfs_create_file(
+ "max_cpus", S_IRUGO, hp_debugfs_root, NULL, &max_cpus_fops))
+ goto err_out;
+
+ if (!debugfs_create_file(
+ "stats", S_IRUGO, hp_debugfs_root, NULL, &hp_stats_fops))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(hp_debugfs_root);
+ pm_qos_remove_request(&max_cpu_req);
+ return -ENOMEM;
+}
+
+late_initcall(tegra_auto_hotplug_debug_init);
+#endif
+
+void tegra_auto_hotplug_exit(void)
+{
+ destroy_workqueue(hotplug_wq);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(hp_debugfs_root);
+ pm_qos_remove_request(&max_cpu_req);
+#endif
+}
diff --git a/arch/arm/mach-tegra/cpuidle-t2.c b/arch/arm/mach-tegra/cpuidle-t2.c
new file mode 100644
index 000000000000..d95d0e712652
--- /dev/null
+++ b/arch/arm/mach-tegra/cpuidle-t2.c
@@ -0,0 +1,413 @@
+/*
+ * arch/arm/mach-tegra/cpuidle-t2.c
+ *
+ * CPU idle driver for Tegra2 CPUs
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ * Copyright (c) 2011 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ * Gary King <gking@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
+
+#include <asm/cpu_pm.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "cpuidle.h"
+#include "gic.h"
+#include "pm.h"
+#include "sleep.h"
+#include "timer.h"
+
+static struct {
+ unsigned int cpu_ready_count[2];
+ unsigned long long cpu_wants_lp2_time[2];
+ unsigned long long in_lp2_time;
+ unsigned int both_idle_count;
+ unsigned int tear_down_count;
+ unsigned int lp2_count;
+ unsigned int lp2_completed_count;
+ unsigned int lp2_count_bin[32];
+ unsigned int lp2_completed_count_bin[32];
+ unsigned int lp2_int_count[NR_IRQS];
+ unsigned int last_lp2_int_count[NR_IRQS];
+} idle_stats;
+
+static inline unsigned int time_to_bin(unsigned int time)
+{
+ return fls(time);
+}
+
+#ifdef CONFIG_SMP
+
+#define CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4C
+#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR 0x344
+
+static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+static u64 tegra_cpu1_wake_by_time = LLONG_MAX;
+
+static int tegra2_reset_sleeping_cpu(int cpu)
+{
+ int ret = 0;
+
+ BUG_ON(cpu == 0);
+ BUG_ON(cpu == smp_processor_id());
+ tegra_pen_lock();
+
+ if (readl(pmc + PMC_SCRATCH41) == CPU_RESETTABLE)
+ tegra2_cpu_reset(cpu);
+ else
+ ret = -EINVAL;
+
+ tegra_pen_unlock();
+
+ return ret;
+}
+
+static void tegra2_wake_reset_cpu(int cpu)
+{
+ u32 reg;
+
+ BUG_ON(cpu == 0);
+ BUG_ON(cpu == smp_processor_id());
+
+ tegra_pen_lock();
+
+ tegra2_cpu_clear_resettable();
+
+ /* enable cpu clock on cpu */
+ reg = readl(clk_rst + 0x4c);
+ writel(reg & ~(1 << (8 + cpu)),
+ clk_rst + CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+
+ /* take the CPU out of reset */
+ reg = 0x1111 << cpu;
+ writel(reg, clk_rst +
+ CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+
+ /* unhalt the cpu */
+ flowctrl_writel(0, FLOW_CTRL_HALT_CPU(1));
+
+ tegra_pen_unlock();
+}
+
+static int tegra2_reset_other_cpus(int cpu)
+{
+ int i;
+ int ret = 0;
+
+ BUG_ON(cpu != 0);
+
+ for_each_online_cpu(i) {
+ if (i != cpu) {
+ if (tegra2_reset_sleeping_cpu(i)) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ for_each_online_cpu(i) {
+ if (i != cpu)
+ tegra2_wake_reset_cpu(i);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+#else
+static void tegra2_wake_reset_cpu(int cpu)
+{
+}
+
+static int tegra2_reset_other_cpus(int cpu)
+{
+ return 0;
+}
+#endif
+
+bool tegra2_lp2_is_allowed(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ u64 request = ktime_to_us(tick_nohz_get_sleep_length());
+
+ if (request < state->target_residency) {
+ /* Not enough time left to enter LP2 */
+ return false;
+ }
+
+ return true;
+}
+
+static inline void tegra2_lp3_fall_back(struct cpuidle_device *dev)
+{
+ /* Not enough time left to enter LP2 */
+ tegra_cpu_wfi();
+
+ /* fall back here from LP2 path - tell cpuidle governor */
+ dev->last_state = &dev->states[0];
+}
+
+static int tegra2_idle_lp2_cpu_0(struct cpuidle_device *dev,
+ struct cpuidle_state *state, unsigned int request)
+{
+ ktime_t entry_time;
+ ktime_t exit_time;
+ u64 wake_time;
+ bool sleep_completed = false;
+ int bin;
+ int i;
+
+ while (tegra2_cpu_is_resettable_soon())
+ cpu_relax();
+
+ if (tegra2_reset_other_cpus(dev->cpu))
+ return 0;
+
+ idle_stats.both_idle_count++;
+
+ if (request < state->target_residency) {
+ tegra2_lp3_fall_back(dev);
+ return -EBUSY;
+ }
+
+ /* LP2 entry time */
+ entry_time = ktime_get();
+
+ /* LP2 initial targeted wake time */
+ wake_time = ktime_to_us(entry_time) + request;
+
+ /* CPU0 must wake up before CPU1. */
+ smp_rmb();
+ wake_time = min_t(u64, wake_time, tegra_cpu1_wake_by_time);
+
+ /* LP2 actual targeted wake time */
+ request = wake_time - ktime_to_us(entry_time);
+ BUG_ON(wake_time < 0LL);
+
+ idle_stats.tear_down_count++;
+ entry_time = ktime_get();
+
+ if (request > state->target_residency) {
+ u64 sleep_time = request - tegra_lp2_exit_latency;
+
+ bin = time_to_bin(request / 1000);
+ idle_stats.lp2_count++;
+ idle_stats.lp2_count_bin[bin]++;
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+
+ if (tegra_idle_lp2_last(sleep_time, 0) == 0)
+ sleep_completed = true;
+ else {
+ int irq = tegra_gic_pending_interrupt();
+ idle_stats.lp2_int_count[irq]++;
+ }
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ }
+
+ for_each_online_cpu(i) {
+ if (i != dev->cpu)
+ tegra2_wake_reset_cpu(i);
+ }
+
+ exit_time = ktime_get();
+ if (sleep_completed) {
+ /*
+ * Stayed in LP2 for the full time until the next tick,
+ * adjust the exit latency based on measurement
+ */
+ u64 actual_time = ktime_to_us(ktime_sub(exit_time, entry_time));
+ unsigned int offset = actual_time - request;
+ unsigned int latency = tegra_lp2_exit_latency + offset / 16;
+ latency = clamp(latency, 0, 10000);
+ tegra_lp2_exit_latency = latency;
+ smp_wmb();
+
+ idle_stats.lp2_completed_count++;
+ idle_stats.lp2_completed_count_bin[bin]++;
+ idle_stats.in_lp2_time += actual_time;
+
+ pr_debug("%d %lld %ld %d\n", request, actual_time,
+ offset, bin);
+ }
+
+ return 0;
+}
+
+static void tegra2_idle_lp2_cpu_1(struct cpuidle_device *dev,
+ struct cpuidle_state *state, unsigned int request)
+{
+#ifdef CONFIG_SMP
+ struct tegra_twd_context twd_context;
+
+ if (request < tegra_lp2_exit_latency) {
+ tegra2_cpu_clear_resettable();
+ tegra2_lp3_fall_back(dev);
+ return;
+ }
+
+ /* Save time this CPU must be awakened by. */
+ tegra_cpu1_wake_by_time = ktime_to_us(ktime_get()) + request;
+ smp_wmb();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+
+ tegra_twd_suspend(&twd_context);
+
+ tegra2_sleep_wfi(PLAT_PHYS_OFFSET - PAGE_OFFSET);
+
+ tegra2_cpu_clear_resettable();
+
+ tegra_cpu1_wake_by_time = LLONG_MAX;
+
+ tegra_twd_resume(&twd_context);
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+#endif
+}
+
+void tegra2_idle_lp2(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ u64 request = ktime_to_us(tick_nohz_get_sleep_length());
+ bool last_cpu = tegra_set_cpu_in_lp2(dev->cpu);
+
+ cpu_pm_enter();
+
+ if (dev->cpu == 0) {
+ if (last_cpu) {
+ if (tegra2_idle_lp2_cpu_0(dev, state, request) < 0) {
+ int i;
+ for_each_online_cpu(i) {
+ if (i != dev->cpu)
+ tegra2_wake_reset_cpu(i);
+ }
+ }
+ } else {
+ tegra2_lp3_fall_back(dev);
+ }
+ } else {
+ BUG_ON(last_cpu);
+ tegra2_idle_lp2_cpu_1(dev, state, request);
+ }
+
+ cpu_pm_exit();
+ tegra_clear_cpu_in_lp2(dev->cpu);
+}
+
+void tegra2_cpu_idle_stats_lp2_ready(unsigned int cpu)
+{
+ idle_stats.cpu_ready_count[cpu]++;
+}
+
+void tegra2_cpu_idle_stats_lp2_time(unsigned int cpu, s64 us)
+{
+ idle_stats.cpu_wants_lp2_time[cpu] += us;
+}
+
+#ifdef CONFIG_DEBUG_FS
+int tegra2_lp2_debug_show(struct seq_file *s, void *data)
+{
+ int bin;
+ int i;
+ seq_printf(s, " cpu0 cpu1\n");
+ seq_printf(s, "-------------------------------------------------\n");
+ seq_printf(s, "cpu ready: %8u %8u\n",
+ idle_stats.cpu_ready_count[0],
+ idle_stats.cpu_ready_count[1]);
+ seq_printf(s, "both idle: %8u %7u%% %7u%%\n",
+ idle_stats.both_idle_count,
+ idle_stats.both_idle_count * 100 /
+ (idle_stats.cpu_ready_count[0] ?: 1),
+ idle_stats.both_idle_count * 100 /
+ (idle_stats.cpu_ready_count[1] ?: 1));
+ seq_printf(s, "tear down: %8u %7u%%\n", idle_stats.tear_down_count,
+ idle_stats.tear_down_count * 100 /
+ (idle_stats.both_idle_count ?: 1));
+ seq_printf(s, "lp2: %8u %7u%%\n", idle_stats.lp2_count,
+ idle_stats.lp2_count * 100 /
+ (idle_stats.both_idle_count ?: 1));
+ seq_printf(s, "lp2 completed: %8u %7u%%\n",
+ idle_stats.lp2_completed_count,
+ idle_stats.lp2_completed_count * 100 /
+ (idle_stats.lp2_count ?: 1));
+
+ seq_printf(s, "\n");
+ seq_printf(s, "cpu ready time: %8llu %8llu ms\n",
+ div64_u64(idle_stats.cpu_wants_lp2_time[0], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[1], 1000));
+ seq_printf(s, "lp2 time: %8llu ms %7d%% %7d%%\n",
+ div64_u64(idle_stats.in_lp2_time, 1000),
+ (int)div64_u64(idle_stats.in_lp2_time * 100,
+ idle_stats.cpu_wants_lp2_time[0] ?: 1),
+ (int)div64_u64(idle_stats.in_lp2_time * 100,
+ idle_stats.cpu_wants_lp2_time[1] ?: 1));
+
+ seq_printf(s, "\n");
+ seq_printf(s, "%19s %8s %8s %8s\n", "", "lp2", "comp", "%");
+ seq_printf(s, "-------------------------------------------------\n");
+ for (bin = 0; bin < 32; bin++) {
+ if (idle_stats.lp2_count_bin[bin] == 0)
+ continue;
+ seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
+ 1 << (bin - 1), 1 << bin,
+ idle_stats.lp2_count_bin[bin],
+ idle_stats.lp2_completed_count_bin[bin],
+ idle_stats.lp2_completed_count_bin[bin] * 100 /
+ idle_stats.lp2_count_bin[bin]);
+ }
+
+ seq_printf(s, "\n");
+ seq_printf(s, "%3s %20s %6s %10s\n",
+ "int", "name", "count", "last count");
+ seq_printf(s, "--------------------------------------------\n");
+ for (i = 0; i < NR_IRQS; i++) {
+ if (idle_stats.lp2_int_count[i] == 0)
+ continue;
+ seq_printf(s, "%3d %20s %6d %10d\n",
+ i, irq_to_desc(i)->action ?
+ irq_to_desc(i)->action->name ?: "???" : "???",
+ idle_stats.lp2_int_count[i],
+ idle_stats.lp2_int_count[i] -
+ idle_stats.last_lp2_int_count[i]);
+ idle_stats.last_lp2_int_count[i] = idle_stats.lp2_int_count[i];
+ };
+ return 0;
+}
+#endif
diff --git a/arch/arm/mach-tegra/cpuidle-t3.c b/arch/arm/mach-tegra/cpuidle-t3.c
new file mode 100644
index 000000000000..14d818e06e92
--- /dev/null
+++ b/arch/arm/mach-tegra/cpuidle-t3.c
@@ -0,0 +1,446 @@
+/*
+ * arch/arm/mach-tegra/cpuidle-t3.c
+ *
+ * CPU idle driver for Tegra3 CPUs
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
+#include <linux/clk.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu_pm.h>
+#include <asm/hardware/gic.h>
+#include <asm/localtimer.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include <trace/events/power.h>
+
+#include "clock.h"
+#include "cpuidle.h"
+#include "dvfs.h"
+#include "fuse.h"
+#include "gic.h"
+#include "pm.h"
+#include "reset.h"
+#include "sleep.h"
+#include "timer.h"
+
+#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
+
+#ifdef CONFIG_SMP
+static u64 tegra_cpu_wake_by_time[4] = {
+ LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
+#endif
+
+static bool lp2_0_in_idle = true;
+module_param(lp2_0_in_idle, bool, 0644);
+
+static bool lp2_n_in_idle = true;
+module_param(lp2_n_in_idle, bool, 0644);
+
+static struct clk *cpu_clk_for_dvfs;
+static struct clk *twd_clk;
+
+static struct {
+ unsigned int cpu_ready_count[5];
+ unsigned int tear_down_count[5];
+ unsigned long long cpu_wants_lp2_time[5];
+ unsigned long long in_lp2_time[5];
+ unsigned int lp2_count;
+ unsigned int lp2_completed_count;
+ unsigned int lp2_count_bin[32];
+ unsigned int lp2_completed_count_bin[32];
+ unsigned int lp2_int_count[NR_IRQS];
+ unsigned int last_lp2_int_count[NR_IRQS];
+} idle_stats;
+
+static inline unsigned int time_to_bin(unsigned int time)
+{
+ return fls(time);
+}
+
+static inline void tegra_irq_unmask(int irq)
+{
+ struct irq_data *data = irq_get_irq_data(irq);
+ data->chip->irq_unmask(data);
+}
+
+static inline unsigned int cpu_number(unsigned int n)
+{
+ return is_lp_cluster() ? 4 : n;
+}
+
+void tegra3_cpu_idle_stats_lp2_ready(unsigned int cpu)
+{
+ idle_stats.cpu_ready_count[cpu_number(cpu)]++;
+}
+
+void tegra3_cpu_idle_stats_lp2_time(unsigned int cpu, s64 us)
+{
+ idle_stats.cpu_wants_lp2_time[cpu_number(cpu)] += us;
+}
+
+bool tegra3_lp2_is_allowed(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ unsigned int request;
+
+ if (!tegra_all_cpus_booted)
+ return false;
+
+ if ((!lp2_0_in_idle && !dev->cpu) || (!lp2_n_in_idle && dev->cpu))
+ return false;
+
+ /* On A01, LP2 on slave CPU's cause ranhdom CPU hangs.
+ * Refer to Bug 804085.
+ */
+ if ((tegra_get_revision() == TEGRA_REVISION_A01) &&
+ num_online_cpus() > 1)
+ return false;
+
+ /* FIXME: All CPU's entering LP2 is not working.
+ * Don't let CPU0 enter LP2 when any secondary CPU is online.
+ */
+ if ((dev->cpu == 0) && (num_online_cpus() > 1))
+ return false;
+
+ if (dev->cpu == 0) {
+ u32 reg = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ if ((reg & 0xE) != 0xE)
+ return false;
+
+ if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
+ return false;
+ }
+
+ request = ktime_to_us(tick_nohz_get_sleep_length());
+ if (request < state->target_residency) {
+ /* Not enough time left to enter LP2 */
+ return false;
+ }
+
+ return true;
+}
+
+static inline void tegra3_lp3_fall_back(struct cpuidle_device *dev)
+{
+ tegra_cpu_wfi();
+ /* fall back here from LP2 path - tell cpuidle governor */
+ dev->last_state = &dev->states[0];
+}
+
+static void tegra3_idle_enter_lp2_cpu_0(struct cpuidle_device *dev,
+ struct cpuidle_state *state, unsigned int request)
+{
+ ktime_t entry_time;
+ ktime_t exit_time;
+ bool sleep_completed = false;
+ int bin;
+
+ /* LP2 entry time */
+ entry_time = ktime_get();
+
+ if (request < state->target_residency) {
+ /* Not enough time left to enter LP2 */
+ tegra3_lp3_fall_back(dev);
+ return;
+ }
+
+#ifdef CONFIG_SMP
+ if (!is_lp_cluster() && (num_online_cpus() > 1)) {
+ u64 wake_time;
+ unsigned int i;
+
+ /* Disable the distributor -- this is the only way to
+ prevent the other CPUs from responding to interrupts
+ and potentially fiddling with the distributor
+ registers while we're fiddling with them. */
+ tegra_gic_dist_disable();
+
+ /* Did an interrupt come in for another CPU before we
+ could disable the distributor? */
+ if (!tegra3_lp2_is_allowed(dev, state)) {
+ /* Yes, re-enable the distributor and LP3. */
+ tegra_gic_dist_enable();
+ tegra3_lp3_fall_back(dev);
+ return;
+ }
+
+ /* Save and disable the affinity setting for the other
+ CPUs and route all interrupts to CPU0. */
+ tegra_gic_disable_affinity();
+
+ /* Re-enable the distributor. */
+ tegra_gic_dist_enable();
+
+ /* LP2 initial targeted wake time */
+ wake_time = ktime_to_us(entry_time) + request;
+
+ /* CPU0 must wake up before any of the other CPUs. */
+ smp_rmb();
+ for (i = 1; i < CONFIG_NR_CPUS; i++)
+ wake_time = min_t(u64, wake_time,
+ tegra_cpu_wake_by_time[i]);
+
+ /* LP2 actual targeted wake time */
+ request = wake_time - ktime_to_us(entry_time);
+ BUG_ON(wake_time < 0LL);
+ }
+#endif
+
+ if (request > state->target_residency) {
+ u64 sleep_time = request - tegra_lp2_exit_latency;
+
+ bin = time_to_bin(request / 1000);
+ idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
+ idle_stats.lp2_count++;
+ idle_stats.lp2_count_bin[bin]++;
+
+ trace_power_start(POWER_CSTATE, 2, dev->cpu);
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+ if (!is_lp_cluster())
+ tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
+
+ if (tegra_idle_lp2_last(sleep_time, 0) == 0)
+ sleep_completed = true;
+ else {
+ int irq = tegra_gic_pending_interrupt();
+ idle_stats.lp2_int_count[irq]++;
+ }
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ exit_time = ktime_get();
+ if (!is_lp_cluster())
+ tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
+ } else
+ exit_time = ktime_get();
+
+
+#ifdef CONFIG_SMP
+ if (!is_lp_cluster() && (num_online_cpus() > 1)) {
+
+ /* Disable the distributor. */
+ tegra_gic_dist_disable();
+
+ /* Restore the other CPU's interrupt affinity. */
+ tegra_gic_restore_affinity();
+
+ /* Re-enable the distributor. */
+ tegra_gic_dist_enable();
+ }
+#endif
+
+ if (sleep_completed) {
+ /*
+ * Stayed in LP2 for the full time until the next tick,
+ * adjust the exit latency based on measurement
+ */
+ unsigned int offset = ktime_to_us(
+ ktime_sub(exit_time, entry_time))
+ - request;
+ unsigned int latency = tegra_lp2_exit_latency + offset / 16;
+ latency = clamp(latency, 0, 10000);
+ tegra_lp2_exit_latency = latency;
+ smp_wmb();
+
+ idle_stats.lp2_completed_count++;
+ idle_stats.lp2_completed_count_bin[bin]++;
+ idle_stats.in_lp2_time[cpu_number(dev->cpu)] +=
+ ktime_to_us(ktime_sub(exit_time, entry_time));
+
+ pr_debug("%d %lld %d %d\n", request,
+ ktime_to_us(ktime_sub(exit_time, entry_time)),
+ offset, bin);
+ }
+}
+
+static void tegra3_idle_enter_lp2_cpu_n(struct cpuidle_device *dev,
+ struct cpuidle_state *state, unsigned int request)
+{
+#ifdef CONFIG_SMP
+ ktime_t entery_time;
+ u32 twd_cnt;
+ u32 twd_ctrl = readl(twd_base + TWD_TIMER_CONTROL);
+ unsigned long twd_rate = clk_get_rate(twd_clk);
+
+ if ((twd_ctrl & TWD_TIMER_CONTROL_ENABLE) &&
+ (twd_ctrl & TWD_TIMER_CONTROL_IT_ENABLE)) {
+ twd_cnt = readl(twd_base + TWD_TIMER_COUNTER);
+ request = div_u64((u64)twd_cnt * 1000000, twd_rate);
+ }
+
+ if (request < tegra_lp2_exit_latency) {
+ /*
+ * Not enough time left to enter LP2
+ */
+ tegra3_lp3_fall_back(dev);
+ return;
+ }
+
+ idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
+
+ trace_power_start(POWER_CSTATE, 2, dev->cpu);
+
+ entery_time = ktime_get();
+
+ /* Save time this CPU must be awakened by. */
+ tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(ktime_get()) + request;
+ smp_wmb();
+
+ tegra3_sleep_cpu_secondary(PLAT_PHYS_OFFSET - PAGE_OFFSET);
+
+ tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
+
+ idle_stats.in_lp2_time[cpu_number(dev->cpu)] +=
+ ktime_to_us(ktime_sub(ktime_get(), entery_time));
+#endif
+}
+
+void tegra3_idle_lp2(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ unsigned int request = ktime_to_us(tick_nohz_get_sleep_length());
+ bool last_cpu = tegra_set_cpu_in_lp2(dev->cpu);
+
+ cpu_pm_enter();
+
+ if (last_cpu && (dev->cpu == 0))
+ tegra3_idle_enter_lp2_cpu_0(dev, state, request);
+ else
+ tegra3_idle_enter_lp2_cpu_n(dev, state, request);
+
+ cpu_pm_exit();
+ tegra_clear_cpu_in_lp2(dev->cpu);
+}
+
+int tegra3_cpudile_init_soc(void)
+{
+ cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
+ twd_clk = tegra_get_clock_by_name("twd");
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+int tegra3_lp2_debug_show(struct seq_file *s, void *data)
+{
+ int bin;
+ int i;
+ seq_printf(s, " cpu0 cpu1 cpu2 cpu3 cpulp\n");
+ seq_printf(s, "-----------------------------------------------------------------------------\n");
+ seq_printf(s, "cpu ready: %8u %8u %8u %8u %8u\n",
+ idle_stats.cpu_ready_count[0],
+ idle_stats.cpu_ready_count[1],
+ idle_stats.cpu_ready_count[2],
+ idle_stats.cpu_ready_count[3],
+ idle_stats.cpu_ready_count[4]);
+ seq_printf(s, "tear down: %8u %8u %8u %8u %8u\n",
+ idle_stats.tear_down_count[0],
+ idle_stats.tear_down_count[1],
+ idle_stats.tear_down_count[2],
+ idle_stats.tear_down_count[3],
+ idle_stats.tear_down_count[4]);
+ seq_printf(s, "lp2: %8u\n", idle_stats.lp2_count);
+ seq_printf(s, "lp2 completed: %8u %7u%%\n",
+ idle_stats.lp2_completed_count,
+ idle_stats.lp2_completed_count * 100 /
+ (idle_stats.lp2_count ?: 1));
+
+ seq_printf(s, "\n");
+ seq_printf(s, "cpu ready time: %8llu %8llu %8llu %8llu %8llu ms\n",
+ div64_u64(idle_stats.cpu_wants_lp2_time[0], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[1], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[2], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[3], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[4], 1000));
+
+ seq_printf(s, "lp2 time: %8llu %8llu %8llu %8llu %8llu ms\n",
+ div64_u64(idle_stats.in_lp2_time[0], 1000),
+ div64_u64(idle_stats.in_lp2_time[1], 1000),
+ div64_u64(idle_stats.in_lp2_time[2], 1000),
+ div64_u64(idle_stats.in_lp2_time[3], 1000),
+ div64_u64(idle_stats.in_lp2_time[4], 1000));
+
+ seq_printf(s, "lp2 %%: %7d%% %7d%% %7d%% %7d%% %7d%%\n",
+ (int)(idle_stats.cpu_wants_lp2_time[0] ?
+ div64_u64(idle_stats.in_lp2_time[0] * 100,
+ idle_stats.cpu_wants_lp2_time[0]) : 0),
+ (int)(idle_stats.cpu_wants_lp2_time[1] ?
+ div64_u64(idle_stats.in_lp2_time[1] * 100,
+ idle_stats.cpu_wants_lp2_time[1]) : 0),
+ (int)(idle_stats.cpu_wants_lp2_time[2] ?
+ div64_u64(idle_stats.in_lp2_time[2] * 100,
+ idle_stats.cpu_wants_lp2_time[2]) : 0),
+ (int)(idle_stats.cpu_wants_lp2_time[3] ?
+ div64_u64(idle_stats.in_lp2_time[3] * 100,
+ idle_stats.cpu_wants_lp2_time[3]) : 0),
+ (int)(idle_stats.cpu_wants_lp2_time[4] ?
+ div64_u64(idle_stats.in_lp2_time[4] * 100,
+ idle_stats.cpu_wants_lp2_time[4]) : 0));
+ seq_printf(s, "\n");
+
+ seq_printf(s, "%19s %8s %8s %8s\n", "", "lp2", "comp", "%");
+ seq_printf(s, "-------------------------------------------------\n");
+ for (bin = 0; bin < 32; bin++) {
+ if (idle_stats.lp2_count_bin[bin] == 0)
+ continue;
+ seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
+ 1 << (bin - 1), 1 << bin,
+ idle_stats.lp2_count_bin[bin],
+ idle_stats.lp2_completed_count_bin[bin],
+ idle_stats.lp2_completed_count_bin[bin] * 100 /
+ idle_stats.lp2_count_bin[bin]);
+ }
+
+ seq_printf(s, "\n");
+ seq_printf(s, "%3s %20s %6s %10s\n",
+ "int", "name", "count", "last count");
+ seq_printf(s, "--------------------------------------------\n");
+ for (i = 0; i < NR_IRQS; i++) {
+ if (idle_stats.lp2_int_count[i] == 0)
+ continue;
+ seq_printf(s, "%3d %20s %6d %10d\n",
+ i, irq_to_desc(i)->action ?
+ irq_to_desc(i)->action->name ?: "???" : "???",
+ idle_stats.lp2_int_count[i],
+ idle_stats.lp2_int_count[i] -
+ idle_stats.last_lp2_int_count[i]);
+ idle_stats.last_lp2_int_count[i] = idle_stats.lp2_int_count[i];
+ };
+ return 0;
+}
+#endif
diff --git a/arch/arm/mach-tegra/cpuidle.c b/arch/arm/mach-tegra/cpuidle.c
new file mode 100644
index 000000000000..91d140141a50
--- /dev/null
+++ b/arch/arm/mach-tegra/cpuidle.c
@@ -0,0 +1,315 @@
+/*
+ * arch/arm/mach-tegra/cpuidle.c
+ *
+ * CPU idle driver for Tegra CPUs
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ * Copyright (c) 2011 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ * Gary King <gking@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
+
+#include <asm/cpu_pm.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include <trace/events/power.h>
+
+#include "cpuidle.h"
+#include "pm.h"
+#include "sleep.h"
+
+unsigned int tegra_lp2_exit_latency;
+static int tegra_lp2_power_off_time;
+static unsigned int tegra_lp2_min_residency;
+
+struct cpuidle_driver tegra_idle = {
+ .name = "tegra_idle",
+ .owner = THIS_MODULE,
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device *, idle_devices);
+
+static int tegra_idle_enter_lp3(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ ktime_t enter, exit;
+ s64 us;
+
+ trace_power_start(POWER_CSTATE, 1, dev->cpu);
+
+ local_irq_disable();
+ local_fiq_disable();
+
+ enter = ktime_get();
+
+ tegra_cpu_wfi();
+
+ exit = ktime_sub(ktime_get(), enter);
+ us = ktime_to_us(exit);
+
+ local_fiq_enable();
+ local_irq_enable();
+ return (int)us;
+}
+
+static bool lp2_in_idle __read_mostly = false;
+
+#ifdef CONFIG_PM_SLEEP
+static bool lp2_in_idle_modifiable __read_mostly = true;
+static bool lp2_disabled_by_suspend;
+
+void tegra_lp2_in_idle(bool enable)
+{
+ /* If LP2 in idle is permanently disabled it can't be re-enabled. */
+ if (lp2_in_idle_modifiable) {
+ lp2_in_idle = enable;
+ lp2_in_idle_modifiable = enable;
+ if (!enable)
+ pr_warn("LP2 in idle disabled\n");
+ }
+}
+
+static int tegra_idle_enter_lp2(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ ktime_t enter, exit;
+ s64 us;
+
+ if (!lp2_in_idle || lp2_disabled_by_suspend ||
+ !tegra_lp2_is_allowed(dev, state))
+ return tegra_idle_enter_lp3(dev, state);
+
+ local_irq_disable();
+ enter = ktime_get();
+
+ tegra_cpu_idle_stats_lp2_ready(dev->cpu);
+ tegra_idle_lp2(dev, state);
+
+ exit = ktime_sub(ktime_get(), enter);
+ us = ktime_to_us(exit);
+
+ local_irq_enable();
+
+ /* cpu clockevents may have been reset by powerdown */
+ hrtimer_peek_ahead_timers();
+
+ smp_rmb();
+
+ /* Update LP2 latency provided no fall back to LP3 */
+ if (state == dev->last_state) {
+ state->exit_latency = tegra_lp2_exit_latency;
+ state->target_residency = tegra_lp2_exit_latency +
+ tegra_lp2_power_off_time;
+ if (state->target_residency < tegra_lp2_min_residency)
+ state->target_residency = tegra_lp2_min_residency;
+ }
+ tegra_cpu_idle_stats_lp2_time(dev->cpu, us);
+
+ return (int)us;
+}
+#endif
+
+static int tegra_idle_prepare(struct cpuidle_device *dev)
+{
+#ifdef CONFIG_PM_SLEEP
+ if (lp2_in_idle)
+ dev->states[1].flags &= ~CPUIDLE_FLAG_IGNORE;
+ else
+ dev->states[1].flags |= CPUIDLE_FLAG_IGNORE;
+#endif
+
+ return 0;
+}
+
+static int tegra_cpuidle_register_device(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+ struct cpuidle_state *state;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->state_count = 0;
+ dev->cpu = cpu;
+
+ state = &dev->states[0];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "LP3");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU flow-controlled");
+ state->exit_latency = 10;
+ state->target_residency = 10;
+ state->power_usage = 600;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
+ state->enter = tegra_idle_enter_lp3;
+ dev->safe_state = state;
+ dev->state_count++;
+
+#ifdef CONFIG_PM_SLEEP
+ state = &dev->states[1];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "LP2");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU power-gate");
+ state->exit_latency = tegra_cpu_power_good_time();
+
+ state->target_residency = tegra_cpu_power_off_time() +
+ tegra_cpu_power_good_time();
+ if (state->target_residency < tegra_lp2_min_residency)
+ state->target_residency = tegra_lp2_min_residency;
+ state->power_usage = 0;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
+ state->enter = tegra_idle_enter_lp2;
+
+ dev->power_specified = 1;
+ dev->safe_state = state;
+ dev->state_count++;
+#endif
+
+ dev->prepare = tegra_idle_prepare;
+
+ if (cpuidle_register_device(dev)) {
+ pr_err("CPU%u: failed to register idle device\n", cpu);
+ kfree(dev);
+ return -EIO;
+ }
+ per_cpu(idle_devices, cpu) = dev;
+ return 0;
+}
+
+static int tegra_cpuidle_pm_notify(struct notifier_block *nb,
+ unsigned long event, void *dummy)
+{
+#ifdef CONFIG_PM_SLEEP
+ if (event == PM_SUSPEND_PREPARE)
+ lp2_disabled_by_suspend = true;
+ else if (event == PM_POST_SUSPEND)
+ lp2_disabled_by_suspend = false;
+#endif
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tegra_cpuidle_pm_notifier = {
+ .notifier_call = tegra_cpuidle_pm_notify,
+};
+
+static int __init tegra_cpuidle_init(void)
+{
+ unsigned int cpu;
+ int ret;
+
+ ret = cpuidle_register_driver(&tegra_idle);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_PM_SLEEP
+ tegra_lp2_min_residency = tegra_cpu_lp2_min_residency();
+ tegra_lp2_exit_latency = tegra_cpu_power_good_time();
+ tegra_lp2_power_off_time = tegra_cpu_power_off_time();
+
+ ret = tegra_cpudile_init_soc();
+ if (ret)
+ return ret;
+#endif
+
+ for_each_possible_cpu(cpu) {
+ if (tegra_cpuidle_register_device(cpu))
+ pr_err("CPU%u: error initializing idle loop\n", cpu);
+ }
+
+ register_pm_notifier(&tegra_cpuidle_pm_notifier);
+ return 0;
+}
+
+static void __exit tegra_cpuidle_exit(void)
+{
+ unregister_pm_notifier(&tegra_cpuidle_pm_notifier);
+ cpuidle_unregister_driver(&tegra_idle);
+}
+
+module_init(tegra_cpuidle_init);
+module_exit(tegra_cpuidle_exit);
+
+static int lp2_in_idle_set(const char *arg, const struct kernel_param *kp)
+{
+#ifdef CONFIG_PM_SLEEP
+ int ret;
+
+ /* If LP2 in idle is permanently disabled it can't be re-enabled. */
+ if (lp2_in_idle_modifiable) {
+ ret = param_set_bool(arg, kp);
+ return ret;
+ }
+#endif
+ return -ENODEV;
+}
+
+static int lp2_in_idle_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_bool(buffer, kp);
+}
+
+static struct kernel_param_ops lp2_in_idle_ops = {
+ .set = lp2_in_idle_set,
+ .get = lp2_in_idle_get,
+};
+module_param_cb(lp2_in_idle, &lp2_in_idle_ops, &lp2_in_idle, 0644);
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PM_SLEEP)
+static int tegra_lp2_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_lp2_debug_show, inode->i_private);
+}
+
+static const struct file_operations tegra_lp2_debug_ops = {
+ .open = tegra_lp2_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_cpuidle_debug_init(void)
+{
+ struct dentry *dir;
+ struct dentry *d;
+
+ dir = debugfs_create_dir("cpuidle", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ d = debugfs_create_file("lp2", S_IRUGO, dir, NULL,
+ &tegra_lp2_debug_ops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+late_initcall(tegra_cpuidle_debug_init);
+#endif
diff --git a/arch/arm/mach-tegra/cpuidle.h b/arch/arm/mach-tegra/cpuidle.h
new file mode 100644
index 000000000000..9055250251ee
--- /dev/null
+++ b/arch/arm/mach-tegra/cpuidle.h
@@ -0,0 +1,120 @@
+/*
+ * arch/arm/mach-tegra/cpuidle.h
+ *
+ * Declarations for power state transition code
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_CPUIDLE_H
+#define __MACH_TEGRA_CPUIDLE_H
+
+#include <linux/cpuidle.h>
+
+#ifdef CONFIG_PM_SLEEP
+
+extern unsigned int tegra_lp2_exit_latency;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void tegra2_idle_lp2(struct cpuidle_device *dev, struct cpuidle_state *state);
+void tegra2_cpu_idle_stats_lp2_ready(unsigned int cpu);
+void tegra2_cpu_idle_stats_lp2_time(unsigned int cpu, s64 us);
+bool tegra2_lp2_is_allowed(struct cpuidle_device *dev,
+ struct cpuidle_state *state);
+#ifdef CONFIG_DEBUG_FS
+int tegra2_lp2_debug_show(struct seq_file *s, void *data);
+#endif
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void tegra3_idle_lp2(struct cpuidle_device *dev, struct cpuidle_state *state);
+void tegra3_cpu_idle_stats_lp2_ready(unsigned int cpu);
+void tegra3_cpu_idle_stats_lp2_time(unsigned int cpu, s64 us);
+bool tegra3_lp2_is_allowed(struct cpuidle_device *dev,
+ struct cpuidle_state *state);
+int tegra3_cpudile_init_soc(void);
+#ifdef CONFIG_DEBUG_FS
+int tegra3_lp2_debug_show(struct seq_file *s, void *data);
+#endif
+#endif
+
+static inline int tegra_cpudile_init_soc(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ return 0;
+#else
+ return tegra3_cpudile_init_soc();
+#endif
+}
+
+static inline void tegra_cpu_idle_stats_lp2_ready(unsigned int cpu)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra2_cpu_idle_stats_lp2_ready(cpu);
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ tegra3_cpu_idle_stats_lp2_ready(cpu);
+#endif
+}
+
+static inline void tegra_cpu_idle_stats_lp2_time(unsigned int cpu, s64 us)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra2_cpu_idle_stats_lp2_time(cpu, us);
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ tegra3_cpu_idle_stats_lp2_time(cpu, us);
+#endif
+}
+
+static inline void tegra_idle_lp2(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra2_idle_lp2(dev, state);
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ tegra3_idle_lp2(dev, state);
+#endif
+}
+
+static inline bool tegra_lp2_is_allowed(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ return tegra2_lp2_is_allowed(dev, state);
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ return tegra3_lp2_is_allowed(dev, state);
+#endif
+}
+
+#ifdef CONFIG_DEBUG_FS
+static inline int tegra_lp2_debug_show(struct seq_file *s, void *data)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ return tegra2_lp2_debug_show(s, data);
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ return tegra3_lp2_debug_show(s, data);
+#endif
+}
+#endif
+#endif /* CONFIG_PM_SLEEP */
+
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_PM_SLEEP)
+void tegra_lp2_in_idle(bool enable);
+#else
+static inline void tegra_lp2_in_idle(bool enable) {}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/csi.c b/arch/arm/mach-tegra/csi.c
new file mode 100644
index 000000000000..3b26c7ae2233
--- /dev/null
+++ b/arch/arm/mach-tegra/csi.c
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/mach-tegra/csi.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <mach/iomap.h>
+#include <mach/csi.h>
+
+#include "clock.h"
+
+static struct clk *vi_clk;
+static struct clk *csi_clk;
+
+int tegra_vi_csi_writel(u32 val, u32 offset)
+{
+ if (vi_clk == NULL) {
+ vi_clk = tegra_get_clock_by_name("vi");
+ if (IS_ERR_OR_NULL(vi_clk)) {
+ pr_err("vi: can't get vi clock\n");
+ return -EINVAL;
+ }
+ }
+ clk_enable(vi_clk);
+
+ if (csi_clk == NULL) {
+ csi_clk = tegra_get_clock_by_name("csi");
+ if (IS_ERR_OR_NULL(csi_clk)) {
+ pr_err("csi: can't get csi clock\n");
+ return -EINVAL;
+ }
+ }
+ clk_enable(csi_clk);
+
+ writel(val, IO_TO_VIRT(TEGRA_VI_BASE) + offset * 4);
+
+ clk_disable(csi_clk);
+ clk_disable(vi_clk);
+ return 0;
+}
+
+int tegra_vi_csi_readl(u32 offset, u32 *val)
+{
+ if (vi_clk == NULL) {
+ vi_clk = tegra_get_clock_by_name("vi");
+ if (IS_ERR_OR_NULL(vi_clk)) {
+ pr_err("vi: can't get vi clock\n");
+ return -EINVAL;
+ }
+ }
+ clk_enable(vi_clk);
+
+ if (csi_clk == NULL) {
+ csi_clk = tegra_get_clock_by_name("csi");
+ if (IS_ERR_OR_NULL(csi_clk)) {
+ pr_err("csi: can't get csi clock\n");
+ return -EINVAL;
+ }
+ }
+ clk_enable(csi_clk);
+
+ *val = readl(IO_TO_VIRT(TEGRA_VI_BASE) + offset * 4);
+
+ clk_disable(csi_clk);
+ clk_disable(vi_clk);
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/delay.S b/arch/arm/mach-tegra/delay.S
new file mode 100644
index 000000000000..76bfafa76ebe
--- /dev/null
+++ b/arch/arm/mach-tegra/delay.S
@@ -0,0 +1,52 @@
+/*
+ * arch/arm/mach-tegra/delay.S
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+#include "asm_macros.h"
+
+ .text
+
+ENTRY(__udelay)
+ENTRY(__const_udelay)
+ mov32 r3, (IO_PPSB_VIRT + TEGRA_TMRUS_BASE - IO_PPSB_PHYS)
+ ldr r1, [r3]
+
+/* r0 - usecs to wait
+ * r1 - initial value of the counter
+ */
+loop:
+ ldr r2, [r3]
+ sub r2, r2, r1
+ cmp r2, r0
+ bls loop
+ mov pc, lr
+ENDPROC(__const_udelay)
+ENDPROC(__udelay)
+
+
+@ Delay routine
+ENTRY(__delay)
+ subs r0, r0, #1
+ bhi __delay
+ mov pc, lr
+ENDPROC(__delay)
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index 57e35d20c24c..c301654426b5 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -5,6 +5,8 @@
* Colin Cross <ccross@android.com>
* Erik Gilling <ccross@android.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -24,12 +26,21 @@
#include <linux/serial_8250.h>
#include <linux/i2c-tegra.h>
#include <linux/platform_data/tegra_usb.h>
+#include <linux/tegra_avp.h>
+#include <linux/nvhost.h>
#include <asm/pmu.h>
#include <mach/irqs.h>
#include <mach/iomap.h>
#include <mach/dma.h>
#include <mach/usb_phy.h>
#include "gpio-names.h"
+#include "tegra_smmu.h"
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define UART_SOURCE_RATE 408000000
+#else
+#define UART_SOURCE_RATE 216000000
+#endif
static struct resource i2c_resource1[] = {
[0] = {
@@ -70,6 +81,7 @@ static struct resource i2c_resource3[] = {
},
};
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
static struct resource i2c_resource4[] = {
[0] = {
.start = INT_DVC,
@@ -83,20 +95,48 @@ static struct resource i2c_resource4[] = {
},
};
+#else
+static struct resource i2c_resource4[] = {
+ [0] = {
+ .start = INT_I2C4,
+ .end = INT_I2C4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C4_BASE,
+ .end = TEGRA_I2C4_BASE + TEGRA_I2C4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource5[] = {
+ [0] = {
+ .start = INT_I2C5,
+ .end = INT_I2C5,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C5_BASE,
+ .end = TEGRA_I2C5_BASE + TEGRA_I2C5_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+#endif
+
static struct tegra_i2c_platform_data tegra_i2c1_platform_data = {
- .bus_clk_rate = 400000,
+ .bus_clk_rate = { 400000 },
};
static struct tegra_i2c_platform_data tegra_i2c2_platform_data = {
- .bus_clk_rate = 400000,
+ .bus_clk_rate = { 400000 },
};
static struct tegra_i2c_platform_data tegra_i2c3_platform_data = {
- .bus_clk_rate = 400000,
+ .bus_clk_rate = { 400000 },
};
static struct tegra_i2c_platform_data tegra_dvc_platform_data = {
- .bus_clk_rate = 400000,
+ .bus_clk_rate = { 400000 },
};
struct platform_device tegra_i2c_device1 = {
@@ -139,10 +179,22 @@ struct platform_device tegra_i2c_device4 = {
},
};
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+struct platform_device tegra_i2c_device5 = {
+ .name = "tegra-i2c",
+ .id = 4,
+ .resource = i2c_resource5,
+ .num_resources = ARRAY_SIZE(i2c_resource5),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+#endif
+
static struct resource spi_resource1[] = {
[0] = {
- .start = INT_S_LINK1,
- .end = INT_S_LINK1,
+ .start = INT_SPI_1,
+ .end = INT_SPI_1,
.flags = IORESOURCE_IRQ,
},
[1] = {
@@ -190,6 +242,33 @@ static struct resource spi_resource4[] = {
.flags = IORESOURCE_MEM,
},
};
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static struct resource spi_resource5[] = {
+ [0] = {
+ .start = INT_SPI_5,
+ .end = INT_SPI_5,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI5_BASE,
+ .end = TEGRA_SPI5_BASE + TEGRA_SPI5_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource spi_resource6[] = {
+ [0] = {
+ .start = INT_SPI_6,
+ .end = INT_SPI_6,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI6_BASE,
+ .end = TEGRA_SPI6_BASE + TEGRA_SPI6_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+#endif
struct platform_device tegra_spi_device1 = {
.name = "spi_tegra",
@@ -230,7 +309,118 @@ struct platform_device tegra_spi_device4 = {
.coherent_dma_mask = 0xffffffff,
},
};
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+struct platform_device tegra_spi_device5 = {
+ .name = "spi_tegra",
+ .id = 4,
+ .resource = spi_resource5,
+ .num_resources = ARRAY_SIZE(spi_resource5),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+struct platform_device tegra_spi_device6 = {
+ .name = "spi_tegra",
+ .id = 5,
+ .resource = spi_resource6,
+ .num_resources = ARRAY_SIZE(spi_resource6),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+#endif
+
+struct platform_device tegra_spi_slave_device1 = {
+ .name = "spi_slave_tegra",
+ .id = 0,
+ .resource = spi_resource1,
+ .num_resources = ARRAY_SIZE(spi_resource1),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_slave_device2 = {
+ .name = "spi_slave_tegra",
+ .id = 1,
+ .resource = spi_resource2,
+ .num_resources = ARRAY_SIZE(spi_resource2),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_slave_device3 = {
+ .name = "spi_slave_tegra",
+ .id = 2,
+ .resource = spi_resource3,
+ .num_resources = ARRAY_SIZE(spi_resource3),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_slave_device4 = {
+ .name = "spi_slave_tegra",
+ .id = 3,
+ .resource = spi_resource4,
+ .num_resources = ARRAY_SIZE(spi_resource4),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+struct platform_device tegra_spi_slave_device5 = {
+ .name = "spi_slave_tegra",
+ .id = 4,
+ .resource = spi_resource5,
+ .num_resources = ARRAY_SIZE(spi_resource5),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_slave_device6 = {
+ .name = "spi_slave_tegra",
+ .id = 5,
+ .resource = spi_resource6,
+ .num_resources = ARRAY_SIZE(spi_resource6),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+#endif
+
+static struct resource resources_nor[] = {
+ [0] = {
+ .start = INT_SNOR,
+ .end = INT_SNOR,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ /* Map SNOR Controller */
+ .start = TEGRA_SNOR_BASE,
+ .end = TEGRA_SNOR_BASE + TEGRA_SNOR_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ /* Map the size of flash */
+ .start = TEGRA_NOR_FLASH_BASE,
+ .end = TEGRA_NOR_FLASH_BASE + TEGRA_NOR_FLASH_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+struct platform_device tegra_nor_device = {
+ .name = "tegra-nor",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(resources_nor),
+ .resource = resources_nor,
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
static struct resource sdhci_resource1[] = {
[0] = {
@@ -284,6 +474,16 @@ static struct resource sdhci_resource4[] = {
},
};
+struct platform_device tegra_pci_device = {
+ .name = "tegra-pcie",
+ .id = 0,
+ .resource = 0,
+ .num_resources = 0,
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
/* board files should fill in platform_data register the devices themselvs.
* See board-harmony.c for an example
*/
@@ -425,6 +625,18 @@ static struct resource tegra_pmu_resources[] = {
.end = INT_CPU1_PMU_INTR,
.flags = IORESOURCE_IRQ,
},
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ [2] = {
+ .start = INT_CPU2_PMU_INTR,
+ .end = INT_CPU2_PMU_INTR,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = INT_CPU3_PMU_INTR,
+ .end = INT_CPU3_PMU_INTR,
+ .flags = IORESOURCE_IRQ,
+ },
+#endif
};
struct platform_device tegra_pmu_device = {
@@ -549,6 +761,131 @@ struct platform_device tegra_uarte_device = {
},
};
+static struct plat_serial8250_port debug_uarta_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTA_BASE),
+ .mapbase = TEGRA_UARTA_BASE,
+ .irq = INT_UARTA,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .type = PORT_TEGRA,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = UART_SOURCE_RATE,
+ },
+ {
+ .flags = 0,
+ },
+};
+
+static struct plat_serial8250_port debug_uartb_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTB_BASE),
+ .mapbase = TEGRA_UARTB_BASE,
+ .irq = INT_UARTB,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .type = PORT_TEGRA,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = UART_SOURCE_RATE,
+ },
+ {
+ .flags = 0,
+ },
+};
+
+static struct plat_serial8250_port debug_uartc_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTC_BASE),
+ .mapbase = TEGRA_UARTC_BASE,
+ .irq = INT_UARTC,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .type = PORT_TEGRA,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = UART_SOURCE_RATE,
+ },
+ {
+ .flags = 0,
+ },
+};
+
+static struct plat_serial8250_port debug_uartd_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTD_BASE),
+ .mapbase = TEGRA_UARTD_BASE,
+ .irq = INT_UARTD,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .type = PORT_TEGRA,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = UART_SOURCE_RATE,
+ },
+ {
+ .flags = 0,
+ },
+};
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+static struct plat_serial8250_port debug_uarte_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTE_BASE),
+ .mapbase = TEGRA_UARTE_BASE,
+ .irq = INT_UARTE,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .type = PORT_TEGRA,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = UART_SOURCE_RATE,
+ },
+ {
+ .flags = 0,
+ },
+};
+#endif
+
+struct platform_device debug_uarta_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uarta_platform_data,
+ },
+};
+
+struct platform_device debug_uartb_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uartb_platform_data,
+ },
+};
+
+struct platform_device debug_uartc_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uartc_platform_data,
+ },
+};
+
+struct platform_device debug_uartd_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uartd_platform_data,
+ },
+};
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+struct platform_device debug_uarte_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uarte_platform_data,
+ },
+};
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
static struct resource i2s_resource1[] = {
[0] = {
.start = INT_I2S1,
@@ -567,6 +904,13 @@ static struct resource i2s_resource1[] = {
}
};
+struct platform_device tegra_i2s_device1 = {
+ .name = "tegra20-i2s",
+ .id = 0,
+ .resource = i2s_resource1,
+ .num_resources = ARRAY_SIZE(i2s_resource1),
+};
+
static struct resource i2s_resource2[] = {
[0] = {
.start = INT_I2S2,
@@ -585,36 +929,734 @@ static struct resource i2s_resource2[] = {
}
};
-struct platform_device tegra_i2s_device1 = {
- .name = "tegra-i2s",
+struct platform_device tegra_i2s_device2 = {
+ .name = "tegra20-i2s",
+ .id = 1,
+ .resource = i2s_resource2,
+ .num_resources = ARRAY_SIZE(i2s_resource2),
+};
+#else
+static struct resource i2s_resource0[] = {
+ [0] = {
+ .start = TEGRA_I2S0_BASE,
+ .end = TEGRA_I2S0_BASE + TEGRA_I2S0_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_i2s_device0 = {
+ .name = "tegra30-i2s",
.id = 0,
+ .resource = i2s_resource0,
+ .num_resources = ARRAY_SIZE(i2s_resource0),
+};
+
+static struct resource i2s_resource1[] = {
+ [0] = {
+ .start = TEGRA_I2S1_BASE,
+ .end = TEGRA_I2S1_BASE + TEGRA_I2S1_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_i2s_device1 = {
+ .name = "tegra30-i2s",
+ .id = 1,
.resource = i2s_resource1,
.num_resources = ARRAY_SIZE(i2s_resource1),
};
+static struct resource i2s_resource2[] = {
+ [0] = {
+ .start = TEGRA_I2S2_BASE,
+ .end = TEGRA_I2S2_BASE + TEGRA_I2S2_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
struct platform_device tegra_i2s_device2 = {
- .name = "tegra-i2s",
- .id = 1,
+ .name = "tegra30-i2s",
+ .id = 2,
.resource = i2s_resource2,
.num_resources = ARRAY_SIZE(i2s_resource2),
};
-static struct resource tegra_das_resources[] = {
+static struct resource i2s_resource3[] = {
[0] = {
- .start = TEGRA_APB_MISC_DAS_BASE,
- .end = TEGRA_APB_MISC_DAS_BASE + TEGRA_APB_MISC_DAS_SIZE - 1,
- .flags = IORESOURCE_MEM,
+ .start = TEGRA_I2S3_BASE,
+ .end = TEGRA_I2S3_BASE + TEGRA_I2S3_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_i2s_device3 = {
+ .name = "tegra30-i2s",
+ .id = 3,
+ .resource = i2s_resource3,
+ .num_resources = ARRAY_SIZE(i2s_resource3),
+};
+
+static struct resource i2s_resource4[] = {
+ [0] = {
+ .start = TEGRA_I2S4_BASE,
+ .end = TEGRA_I2S4_BASE + TEGRA_I2S4_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_i2s_device4 = {
+ .name = "tegra30-i2s",
+ .id = 4,
+ .resource = i2s_resource4,
+ .num_resources = ARRAY_SIZE(i2s_resource4),
+};
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static struct resource spdif_resource[] = {
+ [0] = {
+ .start = INT_SPDIF,
+ .end = INT_SPDIF,
+ .flags = IORESOURCE_IRQ
+ },
+ [1] = {
+ .start = TEGRA_DMA_REQ_SEL_SPD_I,
+ .end = TEGRA_DMA_REQ_SEL_SPD_I,
+ .flags = IORESOURCE_DMA
},
+ [2] = {
+ .start = TEGRA_SPDIF_BASE,
+ .end = TEGRA_SPDIF_BASE + TEGRA_SPDIF_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
};
-struct platform_device tegra_das_device = {
- .name = "tegra-das",
+struct platform_device tegra_spdif_device = {
+ .name = "tegra20-spdif",
+ .id = -1,
+ .resource = spdif_resource,
+ .num_resources = ARRAY_SIZE(spdif_resource),
+};
+#else
+static struct resource spdif_resource[] = {
+ [0] = {
+ .start = TEGRA_SPDIF_BASE,
+ .end = TEGRA_SPDIF_BASE + TEGRA_SPDIF_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_spdif_device = {
+ .name = "tegra30-spdif",
+ .id = -1,
+ .resource = spdif_resource,
+ .num_resources = ARRAY_SIZE(spdif_resource),
+};
+#endif
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static struct resource ahub_resource[] = {
+ [0] = {
+ .start = TEGRA_APBIF0_BASE,
+ .end = TEGRA_APBIF3_BASE + TEGRA_APBIF3_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = TEGRA_AHUB_BASE,
+ .end = TEGRA_AHUB_BASE + TEGRA_AHUB_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_ahub_device = {
+ .name = "tegra30-ahub",
+ .id = -1,
+ .resource = ahub_resource,
+ .num_resources = ARRAY_SIZE(ahub_resource),
+};
+
+static struct resource dam_resource0[] = {
+ [0] = {
+ .start = TEGRA_DAM0_BASE,
+ .end = TEGRA_DAM0_BASE + TEGRA_DAM0_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_dam_device0 = {
+ .name = "tegra30-dam",
+ .id = 0,
+ .resource = dam_resource0,
+ .num_resources = ARRAY_SIZE(dam_resource0),
+};
+
+static struct resource dam_resource1[] = {
+ [0] = {
+ .start = TEGRA_DAM1_BASE,
+ .end = TEGRA_DAM1_BASE + TEGRA_DAM1_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_dam_device1 = {
+ .name = "tegra30-dam",
+ .id = 1,
+ .resource = dam_resource1,
+ .num_resources = ARRAY_SIZE(dam_resource1),
+};
+
+static struct resource dam_resource2[] = {
+ [0] = {
+ .start = TEGRA_DAM2_BASE,
+ .end = TEGRA_DAM2_BASE + TEGRA_DAM2_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_dam_device2 = {
+ .name = "tegra30-dam",
+ .id = 2,
+ .resource = dam_resource2,
+ .num_resources = ARRAY_SIZE(dam_resource2),
+};
+
+static u64 tegra_hda_dma_mask = DMA_BIT_MASK(32);
+static struct resource hda_platform_resources[] = {
+ [0] = {
+ .start = TEGRA_HDA_BASE,
+ .end = TEGRA_HDA_BASE + TEGRA_HDA_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = INT_HDA,
+ .end = INT_HDA,
+ .flags = IORESOURCE_IRQ
+ },
+};
+
+struct platform_device tegra_hda_device = {
+ .name = "tegra30-hda",
.id = -1,
- .num_resources = ARRAY_SIZE(tegra_das_resources),
- .resource = tegra_das_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .dma_mask = &tegra_hda_dma_mask,
+ },
+ .resource = hda_platform_resources,
+ .num_resources = ARRAY_SIZE(hda_platform_resources),
+};
+#endif
+
+struct platform_device spdif_dit_device = {
+ .name = "spdif-dit",
+ .id = 0,
+};
+
+struct platform_device bluetooth_dit_device = {
+ .name = "spdif-dit",
+ .id = 1,
+};
+
+struct platform_device baseband_dit_device = {
+ .name = "spdif-dit",
+ .id = 2,
};
struct platform_device tegra_pcm_device = {
.name = "tegra-pcm-audio",
.id = -1,
};
+
+static struct resource w1_resources[] = {
+ [0] = {
+ .start = INT_OWR,
+ .end = INT_OWR,
+ .flags = IORESOURCE_IRQ
+ },
+ [1] = {
+ .start = TEGRA_OWR_BASE,
+ .end = TEGRA_OWR_BASE + TEGRA_OWR_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_w1_device = {
+ .name = "tegra_w1",
+ .id = -1,
+ .resource = w1_resources,
+ .num_resources = ARRAY_SIZE(w1_resources),
+};
+
+static struct resource tegra_udc_resources[] = {
+ [0] = {
+ .start = TEGRA_USB_BASE,
+ .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB,
+ .end = INT_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 tegra_udc_dmamask = DMA_BIT_MASK(32);
+
+static struct fsl_usb2_platform_data tegra_udc_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_UTMI,
+};
+
+struct platform_device tegra_udc_device = {
+ .name = "fsl-tegra-udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = &tegra_udc_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &tegra_udc_pdata,
+ },
+ .resource = tegra_udc_resources,
+ .num_resources = ARRAY_SIZE(tegra_udc_resources),
+};
+
+static struct resource tegra_otg_resources[] = {
+ [0] = {
+ .start = TEGRA_USB_BASE,
+ .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB,
+ .end = INT_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_otg_device = {
+ .name = "tegra-otg",
+ .id = -1,
+ .resource = tegra_otg_resources,
+ .num_resources = ARRAY_SIZE(tegra_otg_resources),
+};
+
+#ifdef CONFIG_SATA_AHCI_TEGRA
+static u64 tegra_sata_dma_mask = DMA_BIT_MASK(32);
+
+static struct resource tegra_sata_resources[] = {
+ [0] = {
+ .start = TEGRA_SATA_BAR5_BASE,
+ .end = TEGRA_SATA_BAR5_BASE + TEGRA_SATA_BAR5_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = TEGRA_SATA_CONFIG_BASE,
+ .end = TEGRA_SATA_CONFIG_BASE + TEGRA_SATA_CONFIG_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = INT_SATA_CTL,
+ .end = INT_SATA_CTL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_sata_device = {
+ .name = "tegra-sata",
+ .id = 0,
+ .dev = {
+ .platform_data = 0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .dma_mask = &tegra_sata_dma_mask,
+ },
+ .resource = tegra_sata_resources,
+ .num_resources = ARRAY_SIZE(tegra_sata_resources),
+};
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static struct resource das_resource[] = {
+ [0] = {
+ .start = TEGRA_APB_MISC_DAS_BASE,
+ .end = TEGRA_APB_MISC_DAS_BASE + TEGRA_APB_MISC_DAS_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_das_device = {
+ .name = "tegra20-das",
+ .id = -1,
+ .resource = das_resource,
+ .num_resources = ARRAY_SIZE(das_resource),
+};
+#endif
+
+#if defined(CONFIG_TEGRA_IOVMM_GART)
+static struct resource tegra_gart_resources[] = {
+ [0] = {
+ .name = "mc",
+ .flags = IORESOURCE_MEM,
+ .start = TEGRA_MC_BASE,
+ .end = TEGRA_MC_BASE + TEGRA_MC_SIZE - 1,
+ },
+ [1] = {
+ .name = "gart",
+ .flags = IORESOURCE_MEM,
+ .start = TEGRA_GART_BASE,
+ .end = TEGRA_GART_BASE + TEGRA_GART_SIZE - 1,
+ }
+};
+
+struct platform_device tegra_gart_device = {
+ .name = "tegra_gart",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_gart_resources),
+ .resource = tegra_gart_resources
+};
+#endif
+
+#if defined(CONFIG_TEGRA_IOVMM_SMMU)
+static struct resource tegra_smmu_resources[] = {
+ [0] = {
+ .name = "mc",
+ .flags = IORESOURCE_MEM,
+ .start = TEGRA_MC_BASE,
+ .end = TEGRA_MC_BASE + TEGRA_MC_SIZE - 1,
+ },
+ [1] = {
+ .name = "ahbarb",
+ .flags = IORESOURCE_MEM,
+ .start = TEGRA_AHB_ARB_BASE,
+ .end = TEGRA_AHB_ARB_BASE + TEGRA_AHB_ARB_SIZE - 1,
+ }
+};
+
+struct platform_device tegra_smmu_device = {
+ .name = "tegra_smmu",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_smmu_resources),
+ .resource = tegra_smmu_resources
+};
+
+
+static struct tegra_smmu_window tegra_smmu[] = {
+ [0] = {
+ .start = TEGRA_SMMU_BASE,
+ .end = TEGRA_SMMU_BASE + TEGRA_SMMU_SIZE - 1,
+ },
+};
+
+struct tegra_smmu_window *tegra_smmu_window(int wnum)
+{
+ return &tegra_smmu[wnum];
+}
+
+int tegra_smmu_window_count(void)
+{
+ return ARRAY_SIZE(tegra_smmu);
+}
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define CLK_RESET_RST_SOURCE 0x0
+static struct resource tegra_wdt_resources[] = {
+ [0] = {
+ .start = TEGRA_CLK_RESET_BASE + CLK_RESET_RST_SOURCE,
+ .end = TEGRA_CLK_RESET_BASE + CLK_RESET_RST_SOURCE + 4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = TEGRA_TMR1_BASE,
+ .end = TEGRA_TMR1_BASE + TEGRA_TMR1_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = INT_TMR1,
+ .end = INT_TMR1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+#else
+static struct resource tegra_wdt_resources[] = {
+ [0] = {
+ .start = TEGRA_WDT0_BASE,
+ .end = TEGRA_WDT0_BASE + TEGRA_WDT0_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = TEGRA_TMR10_BASE,
+ .end = TEGRA_TMR10_BASE + TEGRA_TMR10_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = INT_WDT_CPU,
+ .end = INT_WDT_CPU,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+#endif
+
+struct platform_device tegra_wdt_device = {
+ .name = "tegra_wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_wdt_resources),
+ .resource = tegra_wdt_resources,
+};
+
+static struct resource tegra_pwfm0_resource = {
+ .start = TEGRA_PWFM0_BASE,
+ .end = TEGRA_PWFM0_BASE + TEGRA_PWFM0_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource tegra_pwfm1_resource = {
+ .start = TEGRA_PWFM1_BASE,
+ .end = TEGRA_PWFM1_BASE + TEGRA_PWFM1_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource tegra_pwfm2_resource = {
+ .start = TEGRA_PWFM2_BASE,
+ .end = TEGRA_PWFM2_BASE + TEGRA_PWFM2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource tegra_pwfm3_resource = {
+ .start = TEGRA_PWFM3_BASE,
+ .end = TEGRA_PWFM3_BASE + TEGRA_PWFM3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+struct platform_device tegra_pwfm0_device = {
+ .name = "tegra_pwm",
+ .id = 0,
+ .num_resources = 1,
+ .resource = &tegra_pwfm0_resource,
+};
+
+struct platform_device tegra_pwfm1_device = {
+ .name = "tegra_pwm",
+ .id = 1,
+ .num_resources = 1,
+ .resource = &tegra_pwfm1_resource,
+};
+
+struct platform_device tegra_pwfm2_device = {
+ .name = "tegra_pwm",
+ .id = 2,
+ .num_resources = 1,
+ .resource = &tegra_pwfm2_resource,
+};
+
+struct platform_device tegra_pwfm3_device = {
+ .name = "tegra_pwm",
+ .id = 3,
+ .num_resources = 1,
+ .resource = &tegra_pwfm3_resource,
+};
+
+static struct resource tegra_grhost_resources[] = {
+ {
+ .start = TEGRA_HOST1X_BASE,
+ .end = TEGRA_HOST1X_BASE + TEGRA_HOST1X_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_VI_BASE,
+ .end = TEGRA_VI_BASE + TEGRA_VI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_ISP_BASE,
+ .end = TEGRA_ISP_BASE + TEGRA_ISP_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_MPE_BASE,
+ .end = TEGRA_MPE_BASE + TEGRA_MPE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_SYNCPT_THRESH_BASE,
+ .end = INT_SYNCPT_THRESH_BASE + INT_SYNCPT_THRESH_NR - 1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_HOST1X_MPCORE_GENERAL,
+ .end = INT_HOST1X_MPCORE_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_grhost_device = {
+ .name = "tegra_grhost",
+ .id = -1,
+ .resource = tegra_grhost_resources,
+ .num_resources = ARRAY_SIZE(tegra_grhost_resources),
+};
+
+static struct tegra_avp_platform_data tegra_avp_pdata = {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ .emc_clk_rate = ULONG_MAX,
+#else
+ .emc_clk_rate = 200000000,
+#endif
+};
+
+struct resource tegra_nvavp_resources[] = {
+ [0] = {
+ .start = INT_SHR_SEM_INBOX_IBF,
+ .end = INT_SHR_SEM_INBOX_IBF,
+ .flags = IORESOURCE_IRQ,
+ .name = "mbox_from_nvavp_pending",
+ },
+};
+
+struct nvhost_device nvavp_device = {
+ .name = "nvavp",
+ .id = -1,
+ .resource = tegra_nvavp_resources,
+ .num_resources = ARRAY_SIZE(tegra_nvavp_resources),
+};
+
+static struct resource tegra_avp_resources[] = {
+ [0] = {
+ .start = INT_SHR_SEM_INBOX_IBF,
+ .end = INT_SHR_SEM_INBOX_IBF,
+ .flags = IORESOURCE_IRQ,
+ .name = "mbox_from_avp_pending",
+ },
+};
+
+struct platform_device tegra_avp_device = {
+ .name = "tegra-avp",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_avp_resources),
+ .resource = tegra_avp_resources,
+ .dev = {
+ .coherent_dma_mask = 0xffffffffULL,
+ .platform_data = &tegra_avp_pdata,
+ },
+};
+
+static struct resource tegra_aes_resources[] = {
+ {
+ .start = TEGRA_VDE_BASE,
+ .end = TEGRA_VDE_BASE + TEGRA_VDE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_BSEA_BASE,
+ .end = TEGRA_BSEA_BASE + TEGRA_BSEA_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static u64 tegra_aes_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device tegra_aes_device = {
+ .name = "tegra-aes",
+ .id = -1,
+ .resource = tegra_aes_resources,
+ .num_resources = ARRAY_SIZE(tegra_aes_resources),
+ .dev = {
+ .dma_mask = &tegra_aes_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource tegra_kbc_resources[] = {
+ [0] = {
+ .start = TEGRA_KBC_BASE,
+ .end = TEGRA_KBC_BASE + TEGRA_KBC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_KBC,
+ .end = INT_KBC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_kbc_device = {
+ .name = "tegra-kbc",
+ .id = -1,
+ .resource = tegra_kbc_resources,
+ .num_resources = ARRAY_SIZE(tegra_kbc_resources),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC)
+static struct resource tegra_tsensor_resources[]= {
+ {
+ .start = TEGRA_TSENSOR_BASE,
+ .end = TEGRA_TSENSOR_BASE + TEGRA_TSENSOR_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_TSENSOR,
+ .end = INT_TSENSOR,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = TEGRA_PMC_BASE + 0x1B0,
+ /* 2 pmc registers mapped */
+ .end = TEGRA_PMC_BASE + 0x1B0 + (2 * 4),
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device tegra_tsensor_device = {
+ .name = "tegra-tsensor",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_tsensor_resources),
+ .resource = tegra_tsensor_resources,
+ .dev = {
+ .platform_data = 0,
+ },
+};
+#endif
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+static u64 tegra_se_dma_mask = DMA_BIT_MASK(32);
+
+struct resource tegra_se_resources[] = {
+ [0] = {
+ .start = TEGRA_SE_BASE,
+ .end = TEGRA_SE_BASE + TEGRA_SE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = TEGRA_PMC_BASE,
+ .end = TEGRA_PMC_BASE + SZ_256 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = INT_SE,
+ .end = INT_SE,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_se_device = {
+ .name = "tegra-se",
+ .id = -1,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .dma_mask = &tegra_se_dma_mask,
+ },
+ .resource = tegra_se_resources,
+ .num_resources = ARRAY_SIZE(tegra_se_resources),
+};
+#endif
+
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
index 4a7dc0a097d6..0facd56ce3df 100644
--- a/arch/arm/mach-tegra/devices.h
+++ b/arch/arm/mach-tegra/devices.h
@@ -5,6 +5,8 @@
* Colin Cross <ccross@android.com>
* Erik Gilling <ccross@android.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -29,10 +31,25 @@ extern struct platform_device tegra_i2c_device1;
extern struct platform_device tegra_i2c_device2;
extern struct platform_device tegra_i2c_device3;
extern struct platform_device tegra_i2c_device4;
+extern struct platform_device tegra_kbc_device;
+extern struct platform_device tegra_pci_device;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+extern struct platform_device tegra_i2c_device5;
+#endif
extern struct platform_device tegra_spi_device1;
extern struct platform_device tegra_spi_device2;
extern struct platform_device tegra_spi_device3;
extern struct platform_device tegra_spi_device4;
+extern struct platform_device tegra_spi_slave_device1;
+extern struct platform_device tegra_spi_slave_device2;
+extern struct platform_device tegra_spi_slave_device3;
+extern struct platform_device tegra_spi_slave_device4;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+extern struct platform_device tegra_spi_device5;
+extern struct platform_device tegra_spi_device6;
+extern struct platform_device tegra_spi_slave_device5;
+extern struct platform_device tegra_spi_slave_device6;
+#endif
extern struct platform_device tegra_ehci1_device;
extern struct platform_device tegra_ehci2_device;
extern struct platform_device tegra_ehci3_device;
@@ -44,7 +61,65 @@ extern struct platform_device tegra_uarte_device;
extern struct platform_device tegra_pmu_device;
extern struct platform_device tegra_i2s_device1;
extern struct platform_device tegra_i2s_device2;
+extern struct platform_device tegra_spdif_device;
extern struct platform_device tegra_das_device;
+extern struct platform_device spdif_dit_device;
+extern struct platform_device bluetooth_dit_device;
+extern struct platform_device baseband_dit_device;
extern struct platform_device tegra_pcm_device;
+extern struct platform_device tegra_w1_device;
+extern struct platform_device tegra_udc_device;
+extern struct platform_device tegra_ehci1_device;
+extern struct platform_device tegra_ehci2_device;
+extern struct platform_device tegra_ehci3_device;
+extern struct platform_device tegra_i2s_device1;
+extern struct platform_device tegra_i2s_device2;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+extern struct platform_device tegra_i2s_device0;
+extern struct platform_device tegra_i2s_device3;
+extern struct platform_device tegra_i2s_device4;
+extern struct platform_device tegra_ahub_device;
+extern struct platform_device tegra_apbif0_device;
+extern struct platform_device tegra_apbif1_device;
+extern struct platform_device tegra_apbif2_device;
+extern struct platform_device tegra_apbif3_device;
+extern struct platform_device tegra_dam_device0;
+extern struct platform_device tegra_dam_device1;
+extern struct platform_device tegra_dam_device2;
+extern struct platform_device tegra_hda_device;
+extern struct platform_device tegra_sata_device;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+extern struct platform_device tegra_gart_device;
+#else
+extern struct platform_device tegra_smmu_device;
+#endif
+extern struct platform_device tegra_wdt_device;
+extern struct platform_device tegra_pwfm0_device;
+extern struct platform_device tegra_pwfm1_device;
+extern struct platform_device tegra_pwfm2_device;
+extern struct platform_device tegra_pwfm3_device;
+extern struct platform_device tegra_otg_device;
+extern struct platform_device tegra_uarta_device;
+extern struct platform_device tegra_uartb_device;
+extern struct platform_device tegra_uartc_device;
+extern struct platform_device tegra_uartd_device;
+extern struct platform_device tegra_uarte_device;
+extern struct platform_device tegra_grhost_device;
+extern struct platform_device tegra_avp_device;
+extern struct nvhost_device nvavp_device;
+extern struct platform_device tegra_aes_device;
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+extern struct platform_device tegra_tsensor_device;
+#endif
+extern struct platform_device tegra_nor_device;
+extern struct platform_device debug_uarta_device;
+extern struct platform_device debug_uartb_device;
+extern struct platform_device debug_uartc_device;
+extern struct platform_device debug_uartd_device;
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+extern struct platform_device tegra_se_device;
+extern struct platform_device debug_uarte_device;
+#endif
#endif
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index f4ef5eb317bd..c36c31d6ec01 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -3,7 +3,7 @@
*
* System DMA driver for NVIDIA Tegra SoCs
*
- * Copyright (c) 2008-2009, NVIDIA Corporation.
+ * Copyright (c) 2008-2011, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,10 +28,11 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/clk.h>
+#include <linux/syscore_ops.h>
#include <mach/dma.h>
#include <mach/irqs.h>
#include <mach/iomap.h>
-#include <mach/suspend.h>
+#include <mach/clk.h>
#define APB_DMA_GEN 0x000
#define GEN_ENABLE (1<<31)
@@ -51,7 +52,6 @@
#define CSR_FLOW (1<<21)
#define CSR_REQ_SEL_SHIFT 16
#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
-#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
#define CSR_WCOUNT_SHIFT 2
#define CSR_WCOUNT_MASK 0xFFFC
@@ -97,14 +97,17 @@
#define APB_SEQ_WRAP_SHIFT 16
#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
#define TEGRA_SYSTEM_DMA_CH_NR 16
+#else
+#define TEGRA_SYSTEM_DMA_CH_NR 32
+#endif
#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
#define TEGRA_SYSTEM_DMA_CH_MIN 0
#define TEGRA_SYSTEM_DMA_CH_MAX \
(TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
-#define NV_DMA_MAX_TRASFER_SIZE 0x10000
-
+static struct clk *dma_clk;
const unsigned int ahb_addr_wrap_table[8] = {
0, 32, 64, 128, 256, 512, 1024, 2048
};
@@ -119,6 +122,7 @@ struct tegra_dma_channel {
int id;
spinlock_t lock;
char name[TEGRA_DMA_NAME_SIZE];
+ char client_name[TEGRA_DMA_NAME_SIZE];
void __iomem *addr;
int mode;
int irq;
@@ -129,6 +133,7 @@ struct tegra_dma_channel {
static bool tegra_dma_initialized;
static DEFINE_MUTEX(tegra_dma_lock);
+static DEFINE_SPINLOCK(enable_lock);
static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
@@ -174,91 +179,126 @@ void tegra_dma_stop(struct tegra_dma_channel *ch)
writel(status, ch->addr + APB_DMA_CHAN_STA);
}
+bool tegra_dma_is_stopped(struct tegra_dma_channel *ch)
+{
+ return !!(readl(ch->addr + APB_DMA_CHAN_STA) & CSR_ENB);
+}
+
int tegra_dma_cancel(struct tegra_dma_channel *ch)
{
- u32 csr;
unsigned long irq_flags;
spin_lock_irqsave(&ch->lock, irq_flags);
while (!list_empty(&ch->list))
list_del(ch->list.next);
- csr = readl(ch->addr + APB_DMA_CHAN_CSR);
- csr &= ~CSR_REQ_SEL_MASK;
- csr |= CSR_REQ_SEL_INVALID;
- writel(csr, ch->addr + APB_DMA_CHAN_CSR);
-
tegra_dma_stop(ch);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return 0;
}
+EXPORT_SYMBOL(tegra_dma_cancel);
-int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
- struct tegra_dma_req *_req)
+static unsigned int get_channel_status(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, bool is_stop_dma)
{
- unsigned int csr;
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
unsigned int status;
- struct tegra_dma_req *req = NULL;
- int found = 0;
- unsigned long irq_flags;
- int to_transfer;
- int req_transfer_count;
- spin_lock_irqsave(&ch->lock, irq_flags);
- list_for_each_entry(req, &ch->list, node) {
- if (req == _req) {
- list_del(&req->node);
- found = 1;
- break;
+ if (is_stop_dma) {
+ /* STOP the DMA and get the transfer count.
+ * Getting the transfer count is tricky.
+ * - Globally disable DMA on all channels
+ * - Read the channel's status register to know the number
+ * of pending bytes to be transfered.
+ * - Stop the dma channel
+ * - Globally re-enable DMA to resume other transfers
+ */
+ spin_lock(&enable_lock);
+ writel(0, addr + APB_DMA_GEN);
+ udelay(20);
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ tegra_dma_stop(ch);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ spin_unlock(&enable_lock);
+ if (status & STA_ISE_EOC) {
+ pr_err("Got Dma Int here clearing");
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
}
+ req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
+ } else {
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
}
- if (!found) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return 0;
- }
+ return status;
+}
- /* STOP the DMA and get the transfer count.
- * Getting the transfer count is tricky.
- * - Change the source selector to invalid to stop the DMA from
- * FIFO to memory.
- * - Read the status register to know the number of pending
- * bytes to be transferred.
- * - Finally stop or program the DMA to the next buffer in the
- * list.
- */
- csr = readl(ch->addr + APB_DMA_CHAN_CSR);
- csr &= ~CSR_REQ_SEL_MASK;
- csr |= CSR_REQ_SEL_INVALID;
- writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+/* should be called with the channel lock held */
+static unsigned int dma_active_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, unsigned int status)
+{
+ unsigned int to_transfer;
+ unsigned int req_transfer_count;
+
+ unsigned int bytes_transferred;
- /* Get the transfer count */
- status = readl(ch->addr + APB_DMA_CHAN_STA);
to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
req_transfer_count = ch->req_transfer_count;
req_transfer_count += 1;
to_transfer += 1;
- req->bytes_transferred = req_transfer_count;
+ bytes_transferred = req_transfer_count;
if (status & STA_BUSY)
- req->bytes_transferred -= to_transfer;
+ bytes_transferred -= to_transfer;
/* In continuous transfer mode, DMA only tracks the count of the
* half DMA buffer. So, if the DMA already finished half the DMA
* then add the half buffer to the completed count.
- *
- * FIXME: There can be a race here. What if the req to
- * dequue happens at the same time as the DMA just moved to
- * the new buffer and SW didn't yet received the interrupt?
*/
- if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
+ if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
- req->bytes_transferred += req_transfer_count;
+ bytes_transferred += req_transfer_count;
- req->bytes_transferred *= 4;
+ if (status & STA_ISE_EOC)
+ bytes_transferred += req_transfer_count;
+
+ bytes_transferred *= 4;
+
+ return bytes_transferred;
+}
+
+int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *_req)
+{
+ struct tegra_dma_req *req = NULL;
+ int found = 0;
+ unsigned int status;
+ unsigned long irq_flags;
+ int stop = 0;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
+ stop = 1;
+
+ list_for_each_entry(req, &ch->list, node) {
+ if (req == _req) {
+ list_del(&req->node);
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return 0;
+ }
+
+ if (!stop)
+ goto skip_status;
+
+ status = get_channel_status(ch, req, true);
+ req->bytes_transferred = dma_active_count(ch, req, status);
- tegra_dma_stop(ch);
if (!list_empty(&ch->list)) {
/* if the list is not empty, queue the next request */
struct tegra_dma_req *next_req;
@@ -266,6 +306,7 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
typeof(*next_req), node);
tegra_dma_update_hw(ch, next_req);
}
+skip_status:
req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -308,6 +349,36 @@ bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
return false;
}
EXPORT_SYMBOL(tegra_dma_is_req_inflight);
+int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, bool is_stop_dma)
+{
+ unsigned int status;
+ unsigned long irq_flags;
+ int bytes_transferred = 0;
+
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (list_entry(ch->list.next, struct tegra_dma_req, node) != req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_debug("The dma request is not the head req\n");
+ return req->bytes_transferred;
+ }
+
+ if (req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_debug("The dma request is not running\n");
+ return req->bytes_transferred;
+ }
+
+ status = get_channel_status(ch, req, is_stop_dma);
+ bytes_transferred = dma_active_count(ch, req, status);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return bytes_transferred;
+}
+EXPORT_SYMBOL(tegra_dma_get_transfer_count);
int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *req)
@@ -316,7 +387,7 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *_req;
int start_dma = 0;
- if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
+ if (req->size > TEGRA_DMA_MAX_TRANSFER_SIZE ||
req->source_addr & 0x3 || req->dest_addr & 0x3) {
pr_err("Invalid DMA request for channel %d\n", ch->id);
return -EINVAL;
@@ -326,14 +397,15 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
list_for_each_entry(_req, &ch->list, node) {
if (req == _req) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return -EEXIST;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return -EEXIST;
}
}
req->bytes_transferred = 0;
req->status = 0;
- req->buffer_status = 0;
+ /* STATUS_EMPTY just means the DMA hasn't processed the buf yet. */
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY;
if (list_empty(&ch->list))
start_dma = 1;
@@ -341,6 +413,34 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
if (start_dma)
tegra_dma_update_hw(ch, req);
+ /* Check to see if this request needs to be pushed immediately.
+ * For continuous single-buffer DMA:
+ * The first buffer is always in-flight. The 2nd buffer should
+ * also be in-flight. The 3rd buffer becomes in-flight when the
+ * first is completed in the interrupt.
+ */
+ else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE) {
+ struct tegra_dma_req *first_req, *second_req;
+ first_req = list_entry(ch->list.next,
+ typeof(*first_req), node);
+ second_req = list_entry(first_req->node.next,
+ typeof(*second_req), node);
+ if (second_req == req) {
+ unsigned long status =
+ readl(ch->addr + APB_DMA_CHAN_STA);
+ if (!(status & STA_ISE_EOC))
+ tegra_dma_update_hw_partial(ch, req);
+ /* Handle the case where the IRQ fired while we're
+ * writing the interrupts.
+ */
+ if (status & STA_ISE_EOC) {
+ /* Interrupt fired, let the IRQ stop/restart
+ * the DMA with this buffer in a clean way.
+ */
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ }
+ }
+ }
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -348,10 +448,23 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
}
EXPORT_SYMBOL(tegra_dma_enqueue_req);
-struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
+static void tegra_dma_dump_channel_usage(void)
+{
+ int i;
+ pr_info("DMA channel allocation dump:\n");
+ for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
+ struct tegra_dma_channel *ch = &dma_channels[i];
+ pr_warn("dma %d used by %s\n", i, ch->client_name);
+ }
+ return;
+}
+
+struct tegra_dma_channel *tegra_dma_allocate_channel(int mode,
+ const char namefmt[], ...)
{
int channel;
struct tegra_dma_channel *ch = NULL;
+ va_list args;
if (WARN_ON(!tegra_dma_initialized))
return NULL;
@@ -364,12 +477,18 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
} else {
channel = find_first_zero_bit(channel_usage,
ARRAY_SIZE(dma_channels));
- if (channel >= ARRAY_SIZE(dma_channels))
+ if (channel >= ARRAY_SIZE(dma_channels)) {
+ tegra_dma_dump_channel_usage();
goto out;
+ }
}
__set_bit(channel, channel_usage);
ch = &dma_channels[channel];
ch->mode = mode;
+ va_start(args, namefmt);
+ vsnprintf(ch->client_name, sizeof(ch->client_name),
+ namefmt, args);
+ va_end(args);
out:
mutex_unlock(&tegra_dma_lock);
@@ -384,6 +503,7 @@ void tegra_dma_free_channel(struct tegra_dma_channel *ch)
tegra_dma_cancel(ch);
mutex_lock(&tegra_dma_lock);
__clear_bit(ch->id, channel_usage);
+ memset(ch->client_name, 0, sizeof(ch->client_name));
mutex_unlock(&tegra_dma_lock);
}
EXPORT_SYMBOL(tegra_dma_free_channel);
@@ -393,6 +513,7 @@ static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
{
u32 apb_ptr;
u32 ahb_ptr;
+ u32 csr;
if (req->to_memory) {
apb_ptr = req->source_addr;
@@ -404,6 +525,15 @@ static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+ if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
+ ch->req_transfer_count = (req->size >> 3) - 1;
+ else
+ ch->req_transfer_count = (req->size >> 2) - 1;
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
+ csr &= ~CSR_WCOUNT_MASK;
+ csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
req->status = TEGRA_DMA_REQ_INFLIGHT;
return;
}
@@ -424,26 +554,70 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
u32 csr;
csr = CSR_IE_EOC | CSR_FLOW;
- ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
+ ahb_seq = AHB_SEQ_INTR_ENB;
+
+ switch (req->req_sel) {
+ case TEGRA_DMA_REQ_SEL_SL2B1:
+ case TEGRA_DMA_REQ_SEL_SL2B2:
+ case TEGRA_DMA_REQ_SEL_SL2B3:
+ case TEGRA_DMA_REQ_SEL_SL2B4:
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ case TEGRA_DMA_REQ_SEL_SL2B5:
+ case TEGRA_DMA_REQ_SEL_SL2B6:
+ case TEGRA_DMA_REQ_SEL_APBIF_CH0:
+ case TEGRA_DMA_REQ_SEL_APBIF_CH1:
+ case TEGRA_DMA_REQ_SEL_APBIF_CH2:
+ case TEGRA_DMA_REQ_SEL_APBIF_CH3:
+#endif
+ case TEGRA_DMA_REQ_SEL_SPI:
+ /* For spi/slink the burst size based on transfer size
+ * i.e. if multiple of 32 bytes then busrt is
+ * 8 word else if multiple of 16 bytes then burst is
+ * 4 word else burst size is 1 word */
+ if (req->size & 0xF)
+ ahb_seq |= AHB_SEQ_BURST_1;
+ else if ((req->size >> 4) & 0x1)
+ ahb_seq |= AHB_SEQ_BURST_4;
+ else
+ ahb_seq |= AHB_SEQ_BURST_8;
+ break;
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ case TEGRA_DMA_REQ_SEL_I2S_2:
+ case TEGRA_DMA_REQ_SEL_I2S_1:
+ case TEGRA_DMA_REQ_SEL_SPD_I:
+ case TEGRA_DMA_REQ_SEL_UI_I:
+ case TEGRA_DMA_REQ_SEL_I2S2_2:
+ case TEGRA_DMA_REQ_SEL_I2S2_1:
+ /* For ARCH_2x i2s/spdif burst size is 4 word */
+ ahb_seq |= AHB_SEQ_BURST_4;
+ break;
+#endif
+
+ default:
+ ahb_seq |= AHB_SEQ_BURST_1;
+ break;
+ }
+
apb_seq = 0;
csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
- /* One shot mode is always single buffered,
- * continuous mode is always double buffered
- * */
+ ch->req_transfer_count = (req->size >> 2) - 1;
+
+ /* One shot mode is always single buffered. Continuous mode could
+ * support either.
+ */
if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
csr |= CSR_ONCE;
- ch->req_transfer_count = (req->size >> 2) - 1;
- } else {
+ } else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) {
ahb_seq |= AHB_SEQ_DBL_BUF;
-
- /* In double buffered mode, we set the size to half the
- * requested size and interrupt when half the buffer
- * is full */
+ /* We want an interrupt halfway through, then on the
+ * completion. The double buffer means 2 interrupts
+ * pass before the DMA HW latches a new AHB_PTR etc.
+ */
ch->req_transfer_count = (req->size >> 3) - 1;
}
-
csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
if (req->to_memory) {
@@ -528,14 +702,8 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
req = list_entry(ch->list.next, typeof(*req), node);
if (req) {
- int bytes_transferred;
-
- bytes_transferred = ch->req_transfer_count;
- bytes_transferred += 1;
- bytes_transferred <<= 2;
-
list_del(&req->node);
- req->bytes_transferred = bytes_transferred;
+ req->bytes_transferred = req->size;
req->status = TEGRA_DMA_REQ_SUCCESS;
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -556,9 +724,10 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
-static void handle_continuous_dma(struct tegra_dma_channel *ch)
+static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ struct tegra_dma_req *next_req;
unsigned long irq_flags;
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -588,8 +757,6 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
tegra_dma_stop(ch);
if (!list_is_last(&req->node, &ch->list)) {
- struct tegra_dma_req *next_req;
-
next_req = list_entry(req->node.next,
typeof(*next_req), node);
tegra_dma_update_hw(ch, next_req);
@@ -605,14 +772,12 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
/* Load the next request into the hardware, if available
* */
if (!list_is_last(&req->node, &ch->list)) {
- struct tegra_dma_req *next_req;
-
next_req = list_entry(req->node.next,
typeof(*next_req), node);
tegra_dma_update_hw_partial(ch, next_req);
}
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
- req->status = TEGRA_DMA_REQ_SUCCESS;
+ req->bytes_transferred = req->size >> 1;
/* DMA lock is NOT held when callback is called */
spin_unlock_irqrestore(&ch->lock, irq_flags);
if (likely(req->threshold))
@@ -623,15 +788,23 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
/* Callback when the buffer is completely full (i.e on
* the second interrupt */
- int bytes_transferred;
-
- bytes_transferred = ch->req_transfer_count;
- bytes_transferred += 1;
- bytes_transferred <<= 3;
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
- req->bytes_transferred = bytes_transferred;
+ req->bytes_transferred = req->size;
req->status = TEGRA_DMA_REQ_SUCCESS;
+ if (list_is_last(&req->node, &ch->list))
+ tegra_dma_stop(ch);
+ else {
+ /* It may be possible that req came after
+ * half dma complete so it need to start
+ * immediately */
+ next_req = list_entry(req->node.next, typeof(*next_req), node);
+ if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ tegra_dma_stop(ch);
+ tegra_dma_update_hw(ch, next_req);
+ }
+ }
+
list_del(&req->node);
/* DMA lock is NOT held when callbak is called */
@@ -640,12 +813,65 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
return;
} else {
+ tegra_dma_stop(ch);
+ /* Dma should be stop much earlier */
BUG();
}
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
+static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch)
+{
+ struct tegra_dma_req *req;
+ struct tegra_dma_req *next_req;
+ struct tegra_dma_req *next_next_req;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (list_empty(&ch->list)) {
+ tegra_dma_stop(ch);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_err("%s: No requests in the list.\n", __func__);
+ return;
+ }
+ req = list_entry(ch->list.next, typeof(*req), node);
+ if (!req || (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_FULL)) {
+ tegra_dma_stop(ch);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_err("%s: DMA complete irq without corresponding req\n",
+ __func__);
+ return;
+ }
+
+ /* Handle the case when buffer is completely full */
+ req->bytes_transferred = req->size;
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ if (list_is_last(&req->node, &ch->list)) {
+ pr_debug("%s: stop\n", __func__);
+ tegra_dma_stop(ch);
+ } else {
+ /* The next entry should have already been queued and is now
+ * in the middle of xfer. We can then write the next->next one
+ * if it exists.
+ */
+ next_req = list_entry(req->node.next, typeof(*next_req), node);
+ if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ pr_debug("%s: interrupt during enqueue\n", __func__);
+ tegra_dma_stop(ch);
+ tegra_dma_update_hw(ch, next_req);
+ } else if (!list_is_last(&next_req->node, &ch->list)) {
+ next_next_req = list_entry(next_req->node.next,
+ typeof(*next_next_req), node);
+ tegra_dma_update_hw_partial(ch, next_next_req);
+ }
+ }
+ list_del(&req->node);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ req->complete(req);
+}
+
static irqreturn_t dma_isr(int irq, void *data)
{
struct tegra_dma_channel *ch = data;
@@ -658,19 +884,15 @@ static irqreturn_t dma_isr(int irq, void *data)
pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
return IRQ_HANDLED;
}
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t dma_thread_fn(int irq, void *data)
-{
- struct tegra_dma_channel *ch = data;
if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
handle_oneshot_dma(ch);
+ else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
+ handle_continuous_dbl_dma(ch);
+ else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE)
+ handle_continuous_sngl_dma(ch);
else
- handle_continuous_dma(ch);
-
-
+ pr_err("Bad channel mode for DMA ISR to handle\n");
return IRQ_HANDLED;
}
@@ -696,11 +918,20 @@ int __init tegra_dma_init(void)
goto fail;
}
+ dma_clk = clk_get_sys("apbdma", "apbdma");
+ if (!IS_ERR_OR_NULL(dma_clk)) {
+ clk_enable(dma_clk);
+ tegra_periph_reset_assert(dma_clk);
+ udelay(10);
+ tegra_periph_reset_deassert(dma_clk);
+ udelay(10);
+ }
+
addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
writel(GEN_ENABLE, addr + APB_DMA_GEN);
writel(0, addr + APB_DMA_CNTRL);
writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
- addr + APB_DMA_IRQ_MASK_SET);
+ addr + APB_DMA_IRQ_MASK_SET);
for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
struct tegra_dma_channel *ch = &dma_channels[i];
@@ -708,15 +939,21 @@ int __init tegra_dma_init(void)
ch->id = i;
snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
+ memset(ch->client_name, 0, sizeof(ch->client_name));
+
ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
TEGRA_APB_DMA_CH0_SIZE * i);
spin_lock_init(&ch->lock);
INIT_LIST_HEAD(&ch->list);
- irq = INT_APB_DMA_CH0 + i;
- ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
- dma_channels[i].name, ch);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (i >= 16)
+ irq = INT_APB_DMA_CH16 + i - 16;
+ else
+#endif
+ irq = INT_APB_DMA_CH0 + i;
+ ret = request_irq(irq, dma_isr, 0, dma_channels[i].name, ch);
if (ret) {
pr_err("Failed to register IRQ %d for DMA %d\n",
irq, i);
@@ -743,10 +980,11 @@ fail:
}
postcore_initcall(tegra_dma_init);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+
static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
-void tegra_dma_suspend(void)
+static int tegra_dma_suspend(void)
{
void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
u32 *ctx = apb_dma;
@@ -766,9 +1004,11 @@ void tegra_dma_suspend(void)
*ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
*ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
}
+
+ return 0;
}
-void tegra_dma_resume(void)
+static void tegra_dma_resume(void)
{
void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
u32 *ctx = apb_dma;
@@ -790,4 +1030,79 @@ void tegra_dma_resume(void)
}
}
+static struct syscore_ops tegra_dma_syscore_ops = {
+ .suspend = tegra_dma_suspend,
+ .resume = tegra_dma_resume,
+};
+
+static int tegra_dma_syscore_init(void)
+{
+ register_syscore_ops(&tegra_dma_syscore_ops);
+
+ return 0;
+}
+subsys_initcall(tegra_dma_syscore_init);
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static int dbg_dma_show(struct seq_file *s, void *unused)
+{
+ int i;
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+
+ seq_printf(s, " APBDMA global register\n");
+ seq_printf(s, "DMA_GEN: 0x%08x\n", __raw_readl(addr + APB_DMA_GEN));
+ seq_printf(s, "DMA_CNTRL: 0x%08x\n", __raw_readl(addr + APB_DMA_CNTRL));
+ seq_printf(s, "IRQ_MASK: 0x%08x\n",
+ __raw_readl(addr + APB_DMA_IRQ_MASK));
+
+ for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
+ addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
+ TEGRA_APB_DMA_CH0_SIZE * i);
+
+ seq_printf(s, " APBDMA channel %02d register\n", i);
+ seq_printf(s, "0x00: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ __raw_readl(addr + 0x0),
+ __raw_readl(addr + 0x4),
+ __raw_readl(addr + 0x8),
+ __raw_readl(addr + 0xC));
+ seq_printf(s, "0x10: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ __raw_readl(addr + 0x10),
+ __raw_readl(addr + 0x14),
+ __raw_readl(addr + 0x18),
+ __raw_readl(addr + 0x1C));
+ }
+ seq_printf(s, "\nAPB DMA users\n");
+ seq_printf(s, "-------------\n");
+ for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
+ struct tegra_dma_channel *ch = &dma_channels[i];
+ if (strlen(ch->client_name) > 0)
+ seq_printf(s, "dma %d -> %s\n", i, ch->client_name);
+ }
+ return 0;
+}
+
+static int dbg_dma_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_dma_show, &inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = dbg_dma_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_dma_debuginit(void)
+{
+ (void) debugfs_create_file("tegra_dma", S_IRUGO,
+ NULL, NULL, &debug_fops);
+ return 0;
+}
+late_initcall(tegra_dma_debuginit);
#endif
diff --git a/arch/arm/mach-tegra/dvfs.c b/arch/arm/mach-tegra/dvfs.c
new file mode 100644
index 000000000000..22c666081c90
--- /dev/null
+++ b/arch/arm/mach-tegra/dvfs.c
@@ -0,0 +1,806 @@
+/*
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+
+#include <mach/clk.h>
+
+#include "board.h"
+#include "clock.h"
+#include "dvfs.h"
+
+#define DVFS_RAIL_STATS_BIN 25
+#define DVFS_RAIL_STATS_SCALE 2
+#define DVFS_RAIL_STATS_RANGE ((DVFS_RAIL_STATS_TOP_BIN - 1) * \
+ DVFS_RAIL_STATS_BIN / DVFS_RAIL_STATS_SCALE)
+
+static LIST_HEAD(dvfs_rail_list);
+static DEFINE_MUTEX(dvfs_lock);
+static DEFINE_MUTEX(rail_disable_lock);
+
+static int dvfs_rail_update(struct dvfs_rail *rail);
+
+void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n)
+{
+ int i;
+ struct dvfs_relationship *rel;
+
+ mutex_lock(&dvfs_lock);
+
+ for (i = 0; i < n; i++) {
+ rel = &rels[i];
+ list_add_tail(&rel->from_node, &rel->to->relationships_from);
+ list_add_tail(&rel->to_node, &rel->from->relationships_to);
+ }
+
+ mutex_unlock(&dvfs_lock);
+}
+
+int tegra_dvfs_init_rails(struct dvfs_rail *rails[], int n)
+{
+ int i;
+
+ mutex_lock(&dvfs_lock);
+
+ for (i = 0; i < n; i++) {
+ INIT_LIST_HEAD(&rails[i]->dvfs);
+ INIT_LIST_HEAD(&rails[i]->relationships_from);
+ INIT_LIST_HEAD(&rails[i]->relationships_to);
+ rails[i]->millivolts = rails[i]->nominal_millivolts;
+ rails[i]->new_millivolts = rails[i]->nominal_millivolts;
+ if (!rails[i]->step)
+ rails[i]->step = rails[i]->max_millivolts;
+
+ list_add_tail(&rails[i]->node, &dvfs_rail_list);
+ }
+
+ mutex_unlock(&dvfs_lock);
+
+ return 0;
+};
+
+static int dvfs_solve_relationship(struct dvfs_relationship *rel)
+{
+ return rel->solve(rel->from, rel->to);
+}
+
+/* rail statistic - called during rail init, or under dfs_lock, or with
+ CPU0 only on-line, and interrupts disabled */
+static void dvfs_rail_stats_init(struct dvfs_rail *rail, int millivolts)
+{
+ rail->stats.last_update = ktime_get();
+ if (millivolts >= rail->min_millivolts) {
+ int i = 1 + (2 * (millivolts - rail->min_millivolts) *
+ DVFS_RAIL_STATS_SCALE + DVFS_RAIL_STATS_BIN) /
+ (2 * DVFS_RAIL_STATS_BIN);
+ rail->stats.last_index = min(i, DVFS_RAIL_STATS_TOP_BIN);
+ }
+
+ if (rail->max_millivolts >
+ rail->min_millivolts + DVFS_RAIL_STATS_RANGE)
+ pr_warn("tegra_dvfs: %s: stats above %d mV will be squashed\n",
+ rail->reg_id,
+ rail->min_millivolts + DVFS_RAIL_STATS_RANGE);
+}
+
+static void dvfs_rail_stats_update(
+ struct dvfs_rail *rail, int millivolts, ktime_t now)
+{
+ rail->stats.time_at_mv[rail->stats.last_index] = ktime_add(
+ rail->stats.time_at_mv[rail->stats.last_index], ktime_sub(
+ now, rail->stats.last_update));
+ rail->stats.last_update = now;
+
+ if (rail->stats.off)
+ return;
+
+ if (millivolts >= rail->min_millivolts) {
+ int i = 1 + (2 * (millivolts - rail->min_millivolts) *
+ DVFS_RAIL_STATS_SCALE + DVFS_RAIL_STATS_BIN) /
+ (2 * DVFS_RAIL_STATS_BIN);
+ rail->stats.last_index = min(i, DVFS_RAIL_STATS_TOP_BIN);
+ } else if (millivolts == 0)
+ rail->stats.last_index = 0;
+}
+
+static void dvfs_rail_stats_pause(struct dvfs_rail *rail,
+ ktime_t delta, bool on)
+{
+ int i = on ? rail->stats.last_index : 0;
+ rail->stats.time_at_mv[i] = ktime_add(rail->stats.time_at_mv[i], delta);
+}
+
+void tegra_dvfs_rail_off(struct dvfs_rail *rail, ktime_t now)
+{
+ if (rail) {
+ dvfs_rail_stats_update(rail, 0, now);
+ rail->stats.off = true;
+ }
+}
+
+void tegra_dvfs_rail_on(struct dvfs_rail *rail, ktime_t now)
+{
+ if (rail) {
+ rail->stats.off = false;
+ dvfs_rail_stats_update(rail, rail->millivolts, now);
+ }
+}
+
+void tegra_dvfs_rail_pause(struct dvfs_rail *rail, ktime_t delta, bool on)
+{
+ if (rail)
+ dvfs_rail_stats_pause(rail, delta, on);
+}
+
+/* Sets the voltage on a dvfs rail to a specific value, and updates any
+ * rails that depend on this rail. */
+static int dvfs_rail_set_voltage(struct dvfs_rail *rail, int millivolts)
+{
+ int ret = 0;
+ struct dvfs_relationship *rel;
+ int step = (millivolts > rail->millivolts) ? rail->step : -rail->step;
+ int i;
+ int steps;
+ bool jmp_to_zero;
+
+ if (!rail->reg) {
+ if (millivolts == rail->millivolts)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
+ if (rail->disabled)
+ return 0;
+
+ rail->resolving_to = true;
+ jmp_to_zero = rail->jmp_to_zero &&
+ ((millivolts == 0) || (rail->millivolts == 0));
+ steps = jmp_to_zero ? 1 :
+ DIV_ROUND_UP(abs(millivolts - rail->millivolts), rail->step);
+
+ for (i = 0; i < steps; i++) {
+ if (!jmp_to_zero &&
+ (abs(millivolts - rail->millivolts) > rail->step))
+ rail->new_millivolts = rail->millivolts + step;
+ else
+ rail->new_millivolts = millivolts;
+
+ /* Before changing the voltage, tell each rail that depends
+ * on this rail that the voltage will change.
+ * This rail will be the "from" rail in the relationship,
+ * the rail that depends on this rail will be the "to" rail.
+ * from->millivolts will be the old voltage
+ * from->new_millivolts will be the new voltage */
+ list_for_each_entry(rel, &rail->relationships_to, to_node) {
+ ret = dvfs_rail_update(rel->to);
+ if (ret)
+ goto out;
+ }
+
+ if (!rail->disabled) {
+ rail->updating = true;
+ ret = regulator_set_voltage(rail->reg,
+ rail->new_millivolts * 1000,
+ rail->max_millivolts * 1000);
+ rail->updating = false;
+ }
+ if (ret) {
+ pr_err("Failed to set dvfs regulator %s\n", rail->reg_id);
+ goto out;
+ }
+
+ rail->millivolts = rail->new_millivolts;
+ dvfs_rail_stats_update(rail, rail->millivolts, ktime_get());
+
+ /* After changing the voltage, tell each rail that depends
+ * on this rail that the voltage has changed.
+ * from->millivolts and from->new_millivolts will be the
+ * new voltage */
+ list_for_each_entry(rel, &rail->relationships_to, to_node) {
+ ret = dvfs_rail_update(rel->to);
+ if (ret)
+ goto out;
+ }
+ }
+
+ if (unlikely(rail->millivolts != millivolts)) {
+ pr_err("%s: rail didn't reach target %d in %d steps (%d)\n",
+ __func__, millivolts, steps, rail->millivolts);
+ ret = -EINVAL;
+ }
+
+out:
+ rail->resolving_to = false;
+ return ret;
+}
+
+/* Determine the minimum valid voltage for a rail, taking into account
+ * the dvfs clocks and any rails that this rail depends on. Calls
+ * dvfs_rail_set_voltage with the new voltage, which will call
+ * dvfs_rail_update on any rails that depend on this rail. */
+static int dvfs_rail_update(struct dvfs_rail *rail)
+{
+ int millivolts = 0;
+ struct dvfs *d;
+ struct dvfs_relationship *rel;
+ int ret = 0;
+ int steps;
+
+ /* if dvfs is suspended, return and handle it during resume */
+ if (rail->suspended)
+ return 0;
+
+ /* if regulators are not connected yet, return and handle it later */
+ if (!rail->reg)
+ return 0;
+
+ /* if rail update is entered while resolving circular dependencies,
+ abort recursion */
+ if (rail->resolving_to)
+ return 0;
+
+ /* Find the maximum voltage requested by any clock */
+ list_for_each_entry(d, &rail->dvfs, reg_node)
+ millivolts = max(d->cur_millivolts, millivolts);
+
+ /* retry update if limited by from-relationship to account for
+ circular dependencies */
+ steps = DIV_ROUND_UP(abs(millivolts - rail->millivolts), rail->step);
+ for (; steps >= 0; steps--) {
+ rail->new_millivolts = millivolts;
+
+ /* Check any rails that this rail depends on */
+ list_for_each_entry(rel, &rail->relationships_from, from_node)
+ rail->new_millivolts = dvfs_solve_relationship(rel);
+
+ if (rail->new_millivolts == rail->millivolts)
+ break;
+
+ ret = dvfs_rail_set_voltage(rail, rail->new_millivolts);
+ }
+
+ return ret;
+}
+
+static int dvfs_rail_connect_to_regulator(struct dvfs_rail *rail)
+{
+ struct regulator *reg;
+ int v;
+
+ if (!rail->reg) {
+ reg = regulator_get(NULL, rail->reg_id);
+ if (IS_ERR(reg)) {
+ pr_err("tegra_dvfs: failed to connect %s rail\n",
+ rail->reg_id);
+ return -EINVAL;
+ }
+ rail->reg = reg;
+ }
+
+ v = regulator_get_voltage(rail->reg);
+ if (v < 0) {
+ pr_err("tegra_dvfs: failed initial get %s voltage\n",
+ rail->reg_id);
+ return v;
+ }
+ rail->millivolts = v / 1000;
+ rail->new_millivolts = rail->millivolts;
+ dvfs_rail_stats_init(rail, rail->millivolts);
+ return 0;
+}
+
+static int
+__tegra_dvfs_set_rate(struct dvfs *d, unsigned long rate)
+{
+ int i = 0;
+ int ret;
+
+ if (d->freqs == NULL || d->millivolts == NULL)
+ return -ENODEV;
+
+ if (rate > d->freqs[d->num_freqs - 1]) {
+ pr_warn("tegra_dvfs: rate %lu too high for dvfs on %s\n", rate,
+ d->clk_name);
+ return -EINVAL;
+ }
+
+ if (rate == 0) {
+ d->cur_millivolts = 0;
+ } else {
+ while (i < d->num_freqs && rate > d->freqs[i])
+ i++;
+
+ if ((d->max_millivolts) &&
+ (d->millivolts[i] > d->max_millivolts)) {
+ pr_warn("tegra_dvfs: voltage %d too high for dvfs on"
+ " %s\n", d->millivolts[i], d->clk_name);
+ return -EINVAL;
+ }
+ d->cur_millivolts = d->millivolts[i];
+ }
+
+ d->cur_rate = rate;
+
+ ret = dvfs_rail_update(d->dvfs_rail);
+ if (ret)
+ pr_err("Failed to set regulator %s for clock %s to %d mV\n",
+ d->dvfs_rail->reg_id, d->clk_name, d->cur_millivolts);
+
+ return ret;
+}
+
+int tegra_dvfs_predict_millivolts(struct clk *c, unsigned long rate)
+{
+ int i;
+
+ if (!rate || !c->dvfs)
+ return 0;
+
+ if (!c->dvfs->millivolts)
+ return -ENODEV;
+
+ for (i = 0; i < c->dvfs->num_freqs; i++) {
+ if (rate <= c->dvfs->freqs[i])
+ break;
+ }
+
+ if (i == c->dvfs->num_freqs)
+ return -EINVAL;
+
+ return c->dvfs->millivolts[i];
+}
+
+int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+
+ if (!c->dvfs)
+ return -EINVAL;
+
+ mutex_lock(&dvfs_lock);
+ ret = __tegra_dvfs_set_rate(c->dvfs, rate);
+ mutex_unlock(&dvfs_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(tegra_dvfs_set_rate);
+
+/* May only be called during clock init, does not take any locks on clock c. */
+int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
+{
+ int i;
+
+ if (c->dvfs) {
+ pr_err("Error when enabling dvfs on %s for clock %s:\n",
+ d->dvfs_rail->reg_id, c->name);
+ pr_err("DVFS already enabled for %s\n",
+ c->dvfs->dvfs_rail->reg_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_DVFS_FREQS; i++) {
+ if (d->millivolts[i] == 0)
+ break;
+
+ d->freqs[i] *= d->freqs_mult;
+
+ /* If final frequencies are 0, pad with previous frequency */
+ if (d->freqs[i] == 0 && i > 1)
+ d->freqs[i] = d->freqs[i - 1];
+ }
+ d->num_freqs = i;
+
+ if (d->auto_dvfs) {
+ c->auto_dvfs = true;
+ clk_set_cansleep(c);
+ }
+
+ c->dvfs = d;
+
+ mutex_lock(&dvfs_lock);
+ list_add_tail(&d->reg_node, &d->dvfs_rail->dvfs);
+ mutex_unlock(&dvfs_lock);
+
+ return 0;
+}
+
+static bool tegra_dvfs_all_rails_suspended(void)
+{
+ struct dvfs_rail *rail;
+ bool all_suspended = true;
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ if (!rail->suspended && !rail->disabled)
+ all_suspended = false;
+
+ return all_suspended;
+}
+
+static bool tegra_dvfs_from_rails_suspended_or_solved(struct dvfs_rail *to)
+{
+ struct dvfs_relationship *rel;
+ bool all_suspended = true;
+
+ list_for_each_entry(rel, &to->relationships_from, from_node)
+ if (!rel->from->suspended && !rel->from->disabled &&
+ !rel->solved_at_nominal)
+ all_suspended = false;
+
+ return all_suspended;
+}
+
+static int tegra_dvfs_suspend_one(void)
+{
+ struct dvfs_rail *rail;
+ int ret;
+
+ list_for_each_entry(rail, &dvfs_rail_list, node) {
+ if (!rail->suspended && !rail->disabled &&
+ tegra_dvfs_from_rails_suspended_or_solved(rail)) {
+ ret = dvfs_rail_set_voltage(rail,
+ rail->nominal_millivolts);
+ if (ret)
+ return ret;
+ rail->suspended = true;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void tegra_dvfs_resume(void)
+{
+ struct dvfs_rail *rail;
+
+ mutex_lock(&dvfs_lock);
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ rail->suspended = false;
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ dvfs_rail_update(rail);
+
+ mutex_unlock(&dvfs_lock);
+}
+
+static int tegra_dvfs_suspend(void)
+{
+ int ret = 0;
+
+ mutex_lock(&dvfs_lock);
+
+ while (!tegra_dvfs_all_rails_suspended()) {
+ ret = tegra_dvfs_suspend_one();
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&dvfs_lock);
+
+ if (ret)
+ tegra_dvfs_resume();
+
+ return ret;
+}
+
+static int tegra_dvfs_pm_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ if (tegra_dvfs_suspend())
+ return NOTIFY_STOP;
+ break;
+ case PM_POST_SUSPEND:
+ tegra_dvfs_resume();
+ break;
+ }
+
+ return NOTIFY_OK;
+};
+
+static struct notifier_block tegra_dvfs_nb = {
+ .notifier_call = tegra_dvfs_pm_notify,
+};
+
+static int tegra_dvfs_reboot_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case SYS_RESTART:
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ tegra_dvfs_suspend();
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tegra_dvfs_reboot_nb = {
+ .notifier_call = tegra_dvfs_reboot_notify,
+};
+
+/* must be called with dvfs lock held */
+static void __tegra_dvfs_rail_disable(struct dvfs_rail *rail)
+{
+ int ret;
+
+ ret = dvfs_rail_set_voltage(rail, rail->nominal_millivolts);
+ if (ret)
+ pr_info("dvfs: failed to set regulator %s to disable "
+ "voltage %d\n", rail->reg_id,
+ rail->nominal_millivolts);
+ rail->disabled = true;
+}
+
+/* must be called with dvfs lock held */
+static void __tegra_dvfs_rail_enable(struct dvfs_rail *rail)
+{
+ rail->disabled = false;
+ dvfs_rail_update(rail);
+}
+
+void tegra_dvfs_rail_enable(struct dvfs_rail *rail)
+{
+ mutex_lock(&rail_disable_lock);
+
+ if (rail->disabled) {
+ mutex_lock(&dvfs_lock);
+ __tegra_dvfs_rail_enable(rail);
+ mutex_unlock(&dvfs_lock);
+
+ tegra_dvfs_rail_post_enable(rail);
+ }
+ mutex_unlock(&rail_disable_lock);
+
+}
+
+void tegra_dvfs_rail_disable(struct dvfs_rail *rail)
+{
+ mutex_lock(&rail_disable_lock);
+ if (rail->disabled)
+ goto out;
+
+ /* rail disable will set it to nominal voltage underneath clock
+ framework - need to re-configure clock rates that are not safe
+ at nominal (yes, unsafe at nominal is ugly, but possible). Rate
+ change must be done outside of dvfs lock. */
+ if (tegra_dvfs_rail_disable_prepare(rail)) {
+ pr_info("dvfs: failed to prepare regulator %s to disable\n",
+ rail->reg_id);
+ goto out;
+ }
+
+ mutex_lock(&dvfs_lock);
+ __tegra_dvfs_rail_disable(rail);
+ mutex_unlock(&dvfs_lock);
+out:
+ mutex_unlock(&rail_disable_lock);
+}
+
+int tegra_dvfs_rail_disable_by_name(const char *reg_id)
+{
+ struct dvfs_rail *rail = tegra_dvfs_get_rail_by_name(reg_id);
+ if (!rail)
+ return -EINVAL;
+
+ tegra_dvfs_rail_disable(rail);
+ return 0;
+}
+
+struct dvfs_rail *tegra_dvfs_get_rail_by_name(const char *reg_id)
+{
+ struct dvfs_rail *rail;
+
+ mutex_lock(&dvfs_lock);
+ list_for_each_entry(rail, &dvfs_rail_list, node) {
+ if (!strcmp(reg_id, rail->reg_id)) {
+ mutex_unlock(&dvfs_lock);
+ return rail;
+ }
+ }
+ mutex_unlock(&dvfs_lock);
+ return NULL;
+}
+
+bool tegra_dvfs_rail_updating(struct clk *clk)
+{
+ return (!clk ? false :
+ (!clk->dvfs ? false :
+ (!clk->dvfs->dvfs_rail ? false :
+ (clk->dvfs->dvfs_rail->updating))));
+}
+
+/*
+ * Iterate through all the dvfs regulators, finding the regulator exported
+ * by the regulator api for each one. Must be called in late init, after
+ * all the regulator api's regulators are initialized.
+ */
+int __init tegra_dvfs_late_init(void)
+{
+ bool connected = true;
+ struct dvfs_rail *rail;
+
+ mutex_lock(&dvfs_lock);
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ if (dvfs_rail_connect_to_regulator(rail))
+ connected = false;
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ if (connected)
+ dvfs_rail_update(rail);
+ else
+ __tegra_dvfs_rail_disable(rail);
+
+ mutex_unlock(&dvfs_lock);
+
+ register_pm_notifier(&tegra_dvfs_nb);
+ register_reboot_notifier(&tegra_dvfs_reboot_nb);
+
+ return 0;
+}
+late_initcall(tegra_dvfs_late_init);
+
+#ifdef CONFIG_DEBUG_FS
+static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b)
+{
+ struct dvfs *da = list_entry(a, struct dvfs, reg_node);
+ struct dvfs *db = list_entry(b, struct dvfs, reg_node);
+ int ret;
+
+ ret = strcmp(da->dvfs_rail->reg_id, db->dvfs_rail->reg_id);
+ if (ret != 0)
+ return ret;
+
+ if (da->cur_millivolts < db->cur_millivolts)
+ return 1;
+ if (da->cur_millivolts > db->cur_millivolts)
+ return -1;
+
+ return strcmp(da->clk_name, db->clk_name);
+}
+
+static int dvfs_tree_show(struct seq_file *s, void *data)
+{
+ struct dvfs *d;
+ struct dvfs_rail *rail;
+ struct dvfs_relationship *rel;
+
+ seq_printf(s, " clock rate mV\n");
+ seq_printf(s, "--------------------------------\n");
+
+ mutex_lock(&dvfs_lock);
+
+ list_for_each_entry(rail, &dvfs_rail_list, node) {
+ seq_printf(s, "%s %d mV%s:\n", rail->reg_id,
+ rail->millivolts, rail->disabled ? " disabled" : "");
+ list_for_each_entry(rel, &rail->relationships_from, from_node) {
+ seq_printf(s, " %-10s %-7d mV %-4d mV\n",
+ rel->from->reg_id,
+ rel->from->millivolts,
+ dvfs_solve_relationship(rel));
+ }
+
+ list_sort(NULL, &rail->dvfs, dvfs_tree_sort_cmp);
+
+ list_for_each_entry(d, &rail->dvfs, reg_node) {
+ seq_printf(s, " %-10s %-10lu %-4d mV\n", d->clk_name,
+ d->cur_rate, d->cur_millivolts);
+ }
+ }
+
+ mutex_unlock(&dvfs_lock);
+
+ return 0;
+}
+
+static int dvfs_tree_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dvfs_tree_show, inode->i_private);
+}
+
+static const struct file_operations dvfs_tree_fops = {
+ .open = dvfs_tree_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int rail_stats_show(struct seq_file *s, void *data)
+{
+ int i;
+ struct dvfs_rail *rail;
+
+ seq_printf(s, "%-12s %-10s (bin: %d.%dmV)\n", "millivolts", "time",
+ DVFS_RAIL_STATS_BIN / DVFS_RAIL_STATS_SCALE,
+ ((DVFS_RAIL_STATS_BIN * 100) / DVFS_RAIL_STATS_SCALE) % 100);
+
+ mutex_lock(&dvfs_lock);
+
+ list_for_each_entry(rail, &dvfs_rail_list, node) {
+ seq_printf(s, "%s\n", rail->reg_id);
+ dvfs_rail_stats_update(rail, -1, ktime_get());
+
+ seq_printf(s, "%-12d %-10llu\n", 0,
+ cputime64_to_clock_t(msecs_to_jiffies(
+ ktime_to_ms(rail->stats.time_at_mv[0]))));
+
+ for (i = 1; i <= DVFS_RAIL_STATS_TOP_BIN; i++) {
+ ktime_t ktime_zero = ktime_set(0, 0);
+ if (ktime_equal(rail->stats.time_at_mv[i], ktime_zero))
+ continue;
+ seq_printf(s, "%-12d %-10llu\n",
+ rail->min_millivolts + (i - 1) *
+ DVFS_RAIL_STATS_BIN / DVFS_RAIL_STATS_SCALE,
+ cputime64_to_clock_t(msecs_to_jiffies(
+ ktime_to_ms(rail->stats.time_at_mv[i])))
+ );
+ }
+ }
+ mutex_unlock(&dvfs_lock);
+ return 0;
+}
+
+static int rail_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rail_stats_show, inode->i_private);
+}
+
+static const struct file_operations rail_stats_fops = {
+ .open = rail_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init dvfs_debugfs_init(struct dentry *clk_debugfs_root)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("dvfs", S_IRUGO, clk_debugfs_root, NULL,
+ &dvfs_tree_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file("rails", S_IRUGO, clk_debugfs_root, NULL,
+ &rail_stats_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mach-tegra/dvfs.h b/arch/arm/mach-tegra/dvfs.h
new file mode 100644
index 000000000000..462eef645a4f
--- /dev/null
+++ b/arch/arm/mach-tegra/dvfs.h
@@ -0,0 +1,165 @@
+/*
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _TEGRA_DVFS_H_
+#define _TEGRA_DVFS_H_
+
+#define MAX_DVFS_FREQS 18
+#define DVFS_RAIL_STATS_TOP_BIN 40
+
+struct clk;
+struct dvfs_rail;
+
+/*
+ * dvfs_relationship between to rails, "from" and "to"
+ * when the rail changes, it will call dvfs_rail_update on the rails
+ * in the relationship_to list.
+ * when determining the voltage to set a rail to, it will consider each
+ * rail in the relationship_from list.
+ */
+struct dvfs_relationship {
+ struct dvfs_rail *to;
+ struct dvfs_rail *from;
+ int (*solve)(struct dvfs_rail *, struct dvfs_rail *);
+
+ struct list_head to_node; /* node in relationship_to list */
+ struct list_head from_node; /* node in relationship_from list */
+ bool solved_at_nominal;
+};
+
+struct rail_stats {
+ ktime_t time_at_mv[DVFS_RAIL_STATS_TOP_BIN + 1];
+ ktime_t last_update;
+ int last_index;
+ bool off;
+};
+
+struct dvfs_rail {
+ const char *reg_id;
+ int min_millivolts;
+ int max_millivolts;
+ int nominal_millivolts;
+ int step;
+ bool jmp_to_zero;
+ bool disabled;
+ bool updating;
+ bool resolving_to;
+
+ struct list_head node; /* node in dvfs_rail_list */
+ struct list_head dvfs; /* list head of attached dvfs clocks */
+ struct list_head relationships_to;
+ struct list_head relationships_from;
+ struct regulator *reg;
+ int millivolts;
+ int new_millivolts;
+ bool suspended;
+ struct rail_stats stats;
+};
+
+struct dvfs {
+ /* Used only by tegra2_clock.c */
+ const char *clk_name;
+ int speedo_id;
+ int process_id;
+
+ /* Must be initialized before tegra_dvfs_init */
+ int freqs_mult;
+ unsigned long freqs[MAX_DVFS_FREQS];
+ const int *millivolts;
+ struct dvfs_rail *dvfs_rail;
+ bool auto_dvfs;
+
+ /* Filled in by tegra_dvfs_init */
+ int max_millivolts;
+ int num_freqs;
+
+ int cur_millivolts;
+ unsigned long cur_rate;
+ struct list_head node;
+ struct list_head debug_node;
+ struct list_head reg_node;
+};
+
+extern struct dvfs_rail *tegra_cpu_rail;
+
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+void tegra_soc_init_dvfs(void);
+int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d);
+int dvfs_debugfs_init(struct dentry *clk_debugfs_root);
+int tegra_dvfs_late_init(void);
+int tegra_dvfs_init_rails(struct dvfs_rail *dvfs_rails[], int n);
+void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n);
+void tegra_dvfs_rail_enable(struct dvfs_rail *rail);
+void tegra_dvfs_rail_disable(struct dvfs_rail *rail);
+bool tegra_dvfs_rail_updating(struct clk *clk);
+void tegra_dvfs_rail_off(struct dvfs_rail *rail, ktime_t now);
+void tegra_dvfs_rail_on(struct dvfs_rail *rail, ktime_t now);
+void tegra_dvfs_rail_pause(struct dvfs_rail *rail, ktime_t delta, bool on);
+struct dvfs_rail *tegra_dvfs_get_rail_by_name(const char *reg_id);
+int tegra_dvfs_predict_millivolts(struct clk *c, unsigned long rate);
+void tegra_dvfs_core_cap_enable(bool enable);
+void tegra_dvfs_core_cap_level_set(int level);
+#else
+static inline void tegra_soc_init_dvfs(void)
+{}
+static inline int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
+{ return 0; }
+static inline int dvfs_debugfs_init(struct dentry *clk_debugfs_root)
+{ return 0; }
+static inline int tegra_dvfs_late_init(void)
+{ return 0; }
+static inline int tegra_dvfs_init_rails(struct dvfs_rail *dvfs_rails[], int n)
+{ return 0; }
+static inline void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n)
+{}
+static inline void tegra_dvfs_rail_enable(struct dvfs_rail *rail)
+{}
+static inline void tegra_dvfs_rail_disable(struct dvfs_rail *rail)
+{}
+static inline bool tegra_dvfs_rail_updating(struct clk *clk)
+{ return false; }
+static inline void tegra_dvfs_rail_off(struct dvfs_rail *rail, ktime_t now)
+{}
+static inline void tegra_dvfs_rail_on(struct dvfs_rail *rail, ktime_t now)
+{}
+static inline void tegra_dvfs_rail_pause(
+ struct dvfs_rail *rail, ktime_t delta, bool on)
+{}
+static inline struct dvfs_rail *tegra_dvfs_get_rail_by_name(const char *reg_id)
+{ return NULL; }
+static inline int tegra_dvfs_predict_millivolts(struct clk *c, unsigned long rate)
+{ return 0; }
+static inline void tegra_dvfs_core_cap_enable(bool enable)
+{}
+static inline void tegra_dvfs_core_cap_level_set(int level)
+{}
+#endif
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+int tegra_dvfs_rail_disable_prepare(struct dvfs_rail *rail);
+int tegra_dvfs_rail_post_enable(struct dvfs_rail *rail);
+#else
+static inline int tegra_dvfs_rail_disable_prepare(struct dvfs_rail *rail)
+{ return 0; }
+static inline int tegra_dvfs_rail_post_enable(struct dvfs_rail *rail)
+{ return 0; }
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/edp.c b/arch/arm/mach-tegra/edp.c
new file mode 100644
index 000000000000..3808978565bd
--- /dev/null
+++ b/arch/arm/mach-tegra/edp.c
@@ -0,0 +1,387 @@
+/*
+ * arch/arm/mach-tegra/edp.c
+ *
+ * Copyright (C) 2011 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <mach/edp.h>
+
+#include "fuse.h"
+
+static const struct tegra_edp_limits *edp_limits;
+static int edp_limits_size;
+
+static const unsigned int *system_edp_limits;
+
+/*
+ * Temperature step size cannot be less than 4C because of hysteresis
+ * delta
+ * Code assumes different temperatures for the same speedo_id /
+ * regulator_cur are adjacent in the table, and higest regulator_cur
+ * comes first
+ */
+static char __initdata tegra_edp_map[] = {
+ 0x00, 0x2f, 0x2d, 0x82, 0x78, 0x78, 0x78, 0x00,
+ 0x2f, 0x3c, 0x82, 0x78, 0x78, 0x78, 0x00, 0x2f,
+ 0x4b, 0x82, 0x78, 0x78, 0x78, 0x00, 0x2f, 0x55,
+ 0x82, 0x78, 0x78, 0x78, 0x00, 0x28, 0x2d, 0x82,
+ 0x78, 0x78, 0x78, 0x00, 0x28, 0x3c, 0x82, 0x78,
+ 0x78, 0x78, 0x00, 0x28, 0x4b, 0x82, 0x78, 0x78,
+ 0x73, 0x00, 0x28, 0x55, 0x82, 0x78, 0x78, 0x69,
+ 0x00, 0x23, 0x2d, 0x82, 0x78, 0x78, 0x78, 0x00,
+ 0x23, 0x3c, 0x82, 0x78, 0x78, 0x6e, 0x00, 0x23,
+ 0x4b, 0x82, 0x78, 0x78, 0x64, 0x00, 0x23, 0x55,
+ 0x82, 0x78, 0x6e, 0x5a, 0x00, 0x1e, 0x2d, 0x82,
+ 0x78, 0x78, 0x69, 0x00, 0x1e, 0x3c, 0x82, 0x78,
+ 0x78, 0x64, 0x00, 0x1e, 0x4b, 0x82, 0x78, 0x6e,
+ 0x5a, 0x00, 0x1e, 0x55, 0x82, 0x78, 0x64, 0x50,
+ 0x00, 0x19, 0x2d, 0x82, 0x78, 0x6e, 0x5a, 0x00,
+ 0x19, 0x3c, 0x82, 0x78, 0x69, 0x55, 0x00, 0x19,
+ 0x4b, 0x82, 0x78, 0x5f, 0x4b, 0x00, 0x19, 0x55,
+ 0x82, 0x73, 0x55, 0x3c, 0x01, 0x2f, 0x2d, 0x82,
+ 0x78, 0x78, 0x78, 0x01, 0x2f, 0x3c, 0x82, 0x78,
+ 0x78, 0x78, 0x01, 0x2f, 0x4b, 0x82, 0x78, 0x78,
+ 0x78, 0x01, 0x2f, 0x55, 0x82, 0x78, 0x78, 0x78,
+ 0x01, 0x28, 0x2d, 0x82, 0x78, 0x78, 0x78, 0x01,
+ 0x28, 0x3c, 0x82, 0x78, 0x78, 0x78, 0x01, 0x28,
+ 0x4b, 0x82, 0x78, 0x78, 0x73, 0x01, 0x28, 0x55,
+ 0x82, 0x78, 0x78, 0x69, 0x01, 0x23, 0x2d, 0x82,
+ 0x78, 0x78, 0x78, 0x01, 0x23, 0x3c, 0x82, 0x78,
+ 0x78, 0x6e, 0x01, 0x23, 0x4b, 0x82, 0x78, 0x78,
+ 0x64, 0x01, 0x23, 0x55, 0x82, 0x78, 0x6e, 0x5a,
+ 0x01, 0x1e, 0x2d, 0x82, 0x78, 0x78, 0x69, 0x01,
+ 0x1e, 0x3c, 0x82, 0x78, 0x78, 0x64, 0x01, 0x1e,
+ 0x4b, 0x82, 0x78, 0x6e, 0x5a, 0x01, 0x1e, 0x55,
+ 0x82, 0x78, 0x64, 0x50, 0x01, 0x19, 0x2d, 0x82,
+ 0x78, 0x6e, 0x5a, 0x01, 0x19, 0x3c, 0x82, 0x78,
+ 0x69, 0x55, 0x01, 0x19, 0x4b, 0x82, 0x78, 0x5f,
+ 0x4b, 0x01, 0x19, 0x55, 0x82, 0x73, 0x55, 0x3c,
+ 0x02, 0x3d, 0x2d, 0x8c, 0x82, 0x82, 0x82, 0x02,
+ 0x3d, 0x3c, 0x8c, 0x82, 0x82, 0x82, 0x02, 0x3d,
+ 0x4b, 0x8c, 0x82, 0x82, 0x82, 0x02, 0x3d, 0x55,
+ 0x8c, 0x82, 0x82, 0x82, 0x02, 0x32, 0x2d, 0x8c,
+ 0x82, 0x82, 0x82, 0x02, 0x32, 0x3c, 0x8c, 0x82,
+ 0x82, 0x82, 0x02, 0x32, 0x4b, 0x8c, 0x82, 0x82,
+ 0x78, 0x02, 0x32, 0x55, 0x8c, 0x82, 0x82, 0x6e,
+ 0x02, 0x28, 0x2d, 0x8c, 0x82, 0x82, 0x78, 0x02,
+ 0x28, 0x3c, 0x8c, 0x82, 0x82, 0x73, 0x02, 0x28,
+ 0x4b, 0x8c, 0x82, 0x78, 0x69, 0x02, 0x28, 0x55,
+ 0x8c, 0x82, 0x6e, 0x5a, 0x02, 0x23, 0x2d, 0x8c,
+ 0x82, 0x82, 0x6e, 0x02, 0x23, 0x3c, 0x8c, 0x82,
+ 0x78, 0x69, 0x02, 0x23, 0x4b, 0x8c, 0x82, 0x6e,
+ 0x5a, 0x02, 0x23, 0x55, 0x8c, 0x82, 0x64, 0x50,
+ 0x03, 0x3d, 0x2d, 0x8c, 0x82, 0x82, 0x82, 0x03,
+ 0x3d, 0x3c, 0x8c, 0x82, 0x82, 0x82, 0x03, 0x3d,
+ 0x4b, 0x8c, 0x82, 0x82, 0x82, 0x03, 0x3d, 0x55,
+ 0x8c, 0x82, 0x82, 0x82, 0x03, 0x32, 0x2d, 0x8c,
+ 0x82, 0x82, 0x82, 0x03, 0x32, 0x3c, 0x8c, 0x82,
+ 0x82, 0x82, 0x03, 0x32, 0x4b, 0x8c, 0x82, 0x82,
+ 0x78, 0x03, 0x32, 0x55, 0x8c, 0x82, 0x82, 0x6e,
+ 0x03, 0x28, 0x2d, 0x8c, 0x82, 0x82, 0x78, 0x03,
+ 0x28, 0x3c, 0x8c, 0x82, 0x82, 0x73, 0x03, 0x28,
+ 0x4b, 0x8c, 0x82, 0x78, 0x69, 0x03, 0x28, 0x55,
+ 0x8c, 0x82, 0x6e, 0x5a, 0x03, 0x23, 0x2d, 0x8c,
+ 0x82, 0x82, 0x6e, 0x03, 0x23, 0x3c, 0x8c, 0x82,
+ 0x78, 0x69, 0x03, 0x23, 0x4b, 0x8c, 0x82, 0x6e,
+ 0x5a, 0x03, 0x23, 0x55, 0x8c, 0x82, 0x64, 0x50,
+ 0x04, 0x32, 0x2d, 0x96, 0x8c, 0x8c, 0x8c, 0x04,
+ 0x32, 0x3c, 0x96, 0x8c, 0x8c, 0x8c, 0x04, 0x32,
+ 0x46, 0x96, 0x8c, 0x8c, 0x8c, 0x04, 0x32, 0x4b,
+ 0x82, 0x78, 0x78, 0x78, 0x04, 0x32, 0x55, 0x82,
+ 0x78, 0x78, 0x78, 0x04, 0x2f, 0x2d, 0x96, 0x8c,
+ 0x8c, 0x8c, 0x04, 0x2f, 0x3c, 0x96, 0x8c, 0x8c,
+ 0x8c, 0x04, 0x2f, 0x46, 0x96, 0x8c, 0x8c, 0x82,
+ 0x04, 0x2f, 0x4b, 0x82, 0x78, 0x78, 0x78, 0x04,
+ 0x2f, 0x55, 0x82, 0x78, 0x78, 0x78, 0x04, 0x28,
+ 0x2d, 0x96, 0x8c, 0x8c, 0x82, 0x04, 0x28, 0x3c,
+ 0x96, 0x8c, 0x8c, 0x82, 0x04, 0x28, 0x46, 0x96,
+ 0x8c, 0x8c, 0x78, 0x04, 0x28, 0x4b, 0x82, 0x78,
+ 0x78, 0x78, 0x04, 0x28, 0x55, 0x82, 0x78, 0x78,
+ 0x6e, 0x04, 0x23, 0x2d, 0x96, 0x8c, 0x8c, 0x78,
+ 0x04, 0x23, 0x3c, 0x96, 0x8c, 0x82, 0x78, 0x04,
+ 0x23, 0x46, 0x96, 0x8c, 0x82, 0x6e, 0x04, 0x23,
+ 0x4b, 0x82, 0x78, 0x78, 0x6e, 0x04, 0x23, 0x55,
+ 0x82, 0x78, 0x78, 0x64, 0x04, 0x1e, 0x2d, 0x96,
+ 0x8c, 0x82, 0x6e, 0x04, 0x1e, 0x3c, 0x96, 0x8c,
+ 0x78, 0x64, 0x04, 0x1e, 0x46, 0x96, 0x8c, 0x78,
+ 0x5a, 0x04, 0x1e, 0x4b, 0x82, 0x78, 0x78, 0x5a,
+ 0x04, 0x1e, 0x55, 0x82, 0x78, 0x69, 0x50, 0x04,
+ 0x19, 0x2d, 0x96, 0x8c, 0x6e, 0x5a, 0x04, 0x19,
+ 0x3c, 0x96, 0x82, 0x6e, 0x55, 0x04, 0x19, 0x46,
+ 0x96, 0x82, 0x64, 0x50, 0x04, 0x19, 0x4b, 0x82,
+ 0x78, 0x64, 0x50, 0x04, 0x19, 0x55, 0x82, 0x78,
+ 0x55, 0x3c, 0x05, 0x64, 0x3c, 0xaa, 0xa0, 0xa0,
+ 0xa0, 0x05, 0x64, 0x55, 0x8c, 0x82, 0x82, 0x82,
+ 0x05, 0x3c, 0x3c, 0x8c, 0x82, 0x82, 0x82, 0x05,
+ 0x3c, 0x55, 0x8c, 0x82, 0x82, 0x82, 0x06, 0x64,
+ 0x3c, 0xaa, 0xa0, 0x82, 0x82, 0x06, 0x64, 0x55,
+ 0x8c, 0x82, 0x82, 0x82, 0x06, 0x3c, 0x3c, 0x8c,
+ 0x82, 0x82, 0x82, 0x06, 0x3c, 0x55, 0x8c, 0x82,
+ 0x82, 0x82, 0x07, 0x3b, 0x2d, 0x82, 0x78, 0x78,
+ 0x78, 0x07, 0x3b, 0x3c, 0x82, 0x78, 0x78, 0x78,
+ 0x07, 0x3b, 0x4b, 0x82, 0x78, 0x78, 0x78, 0x07,
+ 0x3b, 0x5a, 0x82, 0x78, 0x78, 0x78, 0x07, 0x32,
+ 0x2d, 0x82, 0x78, 0x78, 0x78, 0x07, 0x32, 0x3c,
+ 0x82, 0x78, 0x78, 0x78, 0x07, 0x32, 0x4b, 0x82,
+ 0x78, 0x78, 0x78, 0x07, 0x32, 0x5a, 0x82, 0x78,
+ 0x6e, 0x64, 0x07, 0x28, 0x2d, 0x82, 0x78, 0x78,
+ 0x6e, 0x07, 0x28, 0x3c, 0x82, 0x78, 0x78, 0x64,
+ 0x07, 0x28, 0x4b, 0x82, 0x78, 0x78, 0x64, 0x07,
+ 0x28, 0x5a, 0x82, 0x78, 0x64, 0x50, 0x07, 0x23,
+ 0x2d, 0x82, 0x78, 0x78, 0x64, 0x07, 0x23, 0x3c,
+ 0x82, 0x78, 0x78, 0x64, 0x07, 0x23, 0x4b, 0x82,
+ 0x78, 0x64, 0x50, 0x07, 0x23, 0x5a, 0x82, 0x78,
+ 0x5a, 0x46, 0x08, 0x3b, 0x2d, 0x82, 0x78, 0x78,
+ 0x78, 0x08, 0x3b, 0x3c, 0x82, 0x78, 0x78, 0x78,
+ 0x08, 0x3b, 0x4b, 0x82, 0x78, 0x78, 0x78, 0x08,
+ 0x3b, 0x5a, 0x82, 0x78, 0x78, 0x78, 0x08, 0x32,
+ 0x2d, 0x82, 0x78, 0x78, 0x78, 0x08, 0x32, 0x3c,
+ 0x82, 0x78, 0x78, 0x78, 0x08, 0x32, 0x4b, 0x82,
+ 0x78, 0x78, 0x78, 0x08, 0x32, 0x5a, 0x82, 0x78,
+ 0x6e, 0x64, 0x08, 0x28, 0x2d, 0x82, 0x78, 0x78,
+ 0x6e, 0x08, 0x28, 0x3c, 0x82, 0x78, 0x78, 0x64,
+ 0x08, 0x28, 0x4b, 0x82, 0x78, 0x78, 0x64, 0x08,
+ 0x28, 0x5a, 0x82, 0x78, 0x64, 0x50, 0x08, 0x23,
+ 0x2d, 0x82, 0x78, 0x78, 0x64, 0x08, 0x23, 0x3c,
+ 0x82, 0x78, 0x78, 0x64, 0x08, 0x23, 0x4b, 0x82,
+ 0x78, 0x64, 0x50, 0x08, 0x23, 0x5a, 0x82, 0x78,
+ 0x5a, 0x46,
+};
+
+
+static struct system_edp_entry __initdata tegra_system_edp_map[] = {
+
+/* {SKU, power-limit (in 100mW), {freq limits (in 10Mhz)} } */
+
+ { 1, 49, {130, 120, 120, 120} },
+ { 1, 44, {130, 120, 120, 110} },
+ { 1, 37, {130, 120, 110, 100} },
+ { 1, 35, {130, 120, 110, 90} },
+ { 1, 29, {130, 120, 100, 80} },
+ { 1, 27, {130, 120, 90, 80} },
+ { 1, 25, {130, 110, 80, 60} },
+ { 1, 21, {130, 100, 80, 40} },
+};
+
+/*
+ * "Safe entry" to be used when no match for speedo_id /
+ * regulator_cur is found; must be the last one
+ */
+static struct tegra_edp_limits edp_default_limits[] = {
+ {85, {1000000, 1000000, 1000000, 1000000} },
+};
+
+
+
+/*
+ * Specify regulator current in mA, e.g. 5000mA
+ * Use 0 for default
+ */
+void __init tegra_init_cpu_edp_limits(unsigned int regulator_mA)
+{
+ int cpu_speedo_id = tegra_cpu_speedo_id();
+ int i, j;
+ struct tegra_edp_limits *e;
+ struct tegra_edp_entry *t = (struct tegra_edp_entry *)tegra_edp_map;
+ int tsize = sizeof(tegra_edp_map)/sizeof(struct tegra_edp_entry);
+
+ if (!regulator_mA) {
+ edp_limits = edp_default_limits;
+ edp_limits_size = ARRAY_SIZE(edp_default_limits);
+ return;
+ }
+
+ for (i = 0; i < tsize; i++) {
+ if (t[i].speedo_id == cpu_speedo_id &&
+ t[i].regulator_100mA <= regulator_mA / 100)
+ break;
+ }
+
+ /* No entry found in tegra_edp_map */
+ if (i >= tsize) {
+ edp_limits = edp_default_limits;
+ edp_limits_size = ARRAY_SIZE(edp_default_limits);
+ return;
+ }
+
+ /* Find all rows for this entry */
+ for (j = i + 1; j < tsize; j++) {
+ if (t[i].speedo_id != t[j].speedo_id ||
+ t[i].regulator_100mA != t[j].regulator_100mA)
+ break;
+ }
+
+ edp_limits_size = j - i;
+ e = kmalloc(sizeof(struct tegra_edp_limits) * edp_limits_size,
+ GFP_KERNEL);
+ BUG_ON(!e);
+
+ for (j = 0; j < edp_limits_size; j++) {
+ e[j].temperature = (int)t[i+j].temperature;
+ e[j].freq_limits[0] = (unsigned int)t[i+j].freq_limits[0] * 10000;
+ e[j].freq_limits[1] = (unsigned int)t[i+j].freq_limits[1] * 10000;
+ e[j].freq_limits[2] = (unsigned int)t[i+j].freq_limits[2] * 10000;
+ e[j].freq_limits[3] = (unsigned int)t[i+j].freq_limits[3] * 10000;
+ }
+
+ if (edp_limits != edp_default_limits)
+ kfree(edp_limits);
+
+ edp_limits = e;
+}
+
+
+void __init tegra_init_system_edp_limits(unsigned int power_limit_mW)
+{
+ int cpu_speedo_id = tegra_cpu_speedo_id();
+ int i;
+ unsigned int *e;
+ struct system_edp_entry *t =
+ (struct system_edp_entry *)tegra_system_edp_map;
+ int tsize = sizeof(tegra_system_edp_map) /
+ sizeof(struct system_edp_entry);
+
+ if (!power_limit_mW) {
+ e = NULL;
+ goto out;
+ }
+
+ for (i = 0; i < tsize; i++)
+ if (t[i].speedo_id == cpu_speedo_id)
+ break;
+
+ if (i >= tsize) {
+ e = NULL;
+ goto out;
+ }
+
+ do {
+ if (t[i].power_limit_100mW <= power_limit_mW / 100)
+ break;
+ i++;
+ } while (i < tsize && t[i].speedo_id == cpu_speedo_id);
+
+ if (i >= tsize || t[i].speedo_id != cpu_speedo_id)
+ i--; /* No low enough entry in the table, use best possible */
+
+ e = kmalloc(sizeof(unsigned int) * 4, GFP_KERNEL);
+ BUG_ON(!e);
+
+ e[0] = (unsigned int)t[i].freq_limits[0] * 10000;
+ e[1] = (unsigned int)t[i].freq_limits[1] * 10000;
+ e[2] = (unsigned int)t[i].freq_limits[2] * 10000;
+ e[3] = (unsigned int)t[i].freq_limits[3] * 10000;
+
+out:
+ kfree(system_edp_limits);
+
+ system_edp_limits = e;
+}
+
+
+void tegra_get_cpu_edp_limits(const struct tegra_edp_limits **limits, int *size)
+{
+ *limits = edp_limits;
+ *size = edp_limits_size;
+}
+
+void tegra_get_system_edp_limits(const unsigned int **limits)
+{
+ *limits = system_edp_limits;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int edp_limit_debugfs_show(struct seq_file *s, void *data)
+{
+ seq_printf(s, "%u\n", tegra_get_edp_limit());
+ return 0;
+}
+
+static int edp_debugfs_show(struct seq_file *s, void *data)
+{
+ int i;
+
+ seq_printf(s, "-- CPU EDP table --\n");
+ for (i = 0; i < edp_limits_size; i++) {
+ seq_printf(s, "%4dC: %10u %10u %10u %10u\n",
+ edp_limits[i].temperature,
+ edp_limits[i].freq_limits[0],
+ edp_limits[i].freq_limits[1],
+ edp_limits[i].freq_limits[2],
+ edp_limits[i].freq_limits[3]);
+ }
+
+ if (system_edp_limits) {
+ seq_printf(s, "\n-- System EDP table --\n");
+ seq_printf(s, "%10u %10u %10u %10u\n",
+ system_edp_limits[0],
+ system_edp_limits[1],
+ system_edp_limits[2],
+ system_edp_limits[3]);
+ }
+
+ return 0;
+}
+
+
+static int edp_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, edp_debugfs_show, inode->i_private);
+}
+
+static int edp_limit_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, edp_limit_debugfs_show, inode->i_private);
+}
+
+
+static const struct file_operations edp_debugfs_fops = {
+ .open = edp_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations edp_limit_debugfs_fops = {
+ .open = edp_limit_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_edp_debugfs_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("edp", S_IRUGO, NULL, NULL,
+ &edp_debugfs_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file("edp_limit", S_IRUGO, NULL, NULL,
+ &edp_limit_debugfs_fops);
+
+ return 0;
+}
+
+late_initcall(tegra_edp_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/mach-tegra/fiq.c b/arch/arm/mach-tegra/fiq.c
new file mode 100644
index 000000000000..19f9c059d100
--- /dev/null
+++ b/arch/arm/mach-tegra/fiq.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Brian Swetland <swetland@google.com>
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <asm/hardware/gic.h>
+
+#include <mach/iomap.h>
+#include <mach/fiq.h>
+
+#include "board.h"
+
+#define ICTLR_CPU_IER 0x20
+#define ICTLR_CPU_IER_SET 0x24
+#define ICTLR_CPU_IER_CLR 0x28
+#define ICTLR_CPU_IEP_CLASS 0x2C
+
+#define FIRST_LEGACY_IRQ 32
+
+static void __iomem *ictlr_reg_base[] = {
+ IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
+};
+
+static void tegra_legacy_select_fiq(unsigned int irq, bool fiq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= FIRST_LEGACY_IRQ;
+ base = ictlr_reg_base[irq>>5];
+ writel(fiq << (irq & 31), base + ICTLR_CPU_IEP_CLASS);
+}
+
+static void tegra_fiq_mask(struct irq_data *d)
+{
+ void __iomem *base;
+ int leg_irq;
+
+ if (d->irq < FIRST_LEGACY_IRQ)
+ return;
+
+ leg_irq = d->irq - FIRST_LEGACY_IRQ;
+ base = ictlr_reg_base[leg_irq >> 5];
+ writel(1 << (leg_irq & 31), base + ICTLR_CPU_IER_CLR);
+}
+
+static void tegra_fiq_unmask(struct irq_data *d)
+{
+ void __iomem *base;
+ int leg_irq;
+
+ if (d->irq < FIRST_LEGACY_IRQ)
+ return;
+
+ leg_irq = d->irq - FIRST_LEGACY_IRQ;
+ base = ictlr_reg_base[leg_irq >> 5];
+ writel(1 << (leg_irq & 31), base + ICTLR_CPU_IER_SET);
+}
+
+void tegra_fiq_enable(int irq)
+{
+ void __iomem *base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100);
+ /* enable FIQ */
+ u32 val = readl(base + GIC_CPU_CTRL);
+ val &= ~8; /* pass FIQs through */
+ val |= 2; /* enableNS */
+ writel(val, base + GIC_CPU_CTRL);
+ tegra_legacy_select_fiq(irq, true);
+ tegra_fiq_unmask(irq_get_irq_data(irq));
+}
+
+void tegra_fiq_disable(int irq)
+{
+ tegra_fiq_mask(irq_get_irq_data(irq));
+ tegra_legacy_select_fiq(irq, false);
+}
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c
index 1fa26d9a1a68..fcc10afb07a1 100644
--- a/arch/arm/mach-tegra/fuse.c
+++ b/arch/arm/mach-tegra/fuse.c
@@ -2,6 +2,7 @@
* arch/arm/mach-tegra/fuse.c
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010-2011 NVIDIA Corp.
*
* Author:
* Colin Cross <ccross@android.com>
@@ -19,24 +20,85 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <mach/iomap.h>
+#include <mach/tegra_fuse.h>
#include "fuse.h"
+#include "apbio.h"
+#define FUSE_SKU_INFO 0x110
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
#define FUSE_UID_LOW 0x108
#define FUSE_UID_HIGH 0x10c
-#define FUSE_SKU_INFO 0x110
#define FUSE_SPARE_BIT 0x200
+#else
+#define FUSE_VENDOR_CODE 0x200
+#define FUSE_VENDOR_CODE_MASK 0xf
+#define FUSE_FAB_CODE 0x204
+#define FUSE_FAB_CODE_MASK 0x3f
+#define FUSE_LOT_CODE_0 0x208
+#define FUSE_LOT_CODE_1 0x20c
+#define FUSE_WAFER_ID 0x210
+#define FUSE_WAFER_ID_MASK 0x3f
+#define FUSE_X_COORDINATE 0x214
+#define FUSE_X_COORDINATE_MASK 0x1ff
+#define FUSE_Y_COORDINATE 0x218
+#define FUSE_Y_COORDINATE_MASK 0x1ff
+#define FUSE_GPU_INFO 0x390
+#define FUSE_GPU_INFO_MASK (1<<2)
+#define FUSE_SPARE_BIT 0x244
+/* fuse registers used in public fuse data read API */
+#define FUSE_TEST_PROGRAM_REVISION_0 0x128
+/* fuse spare bits are used to get Tj-ADT values */
+#define FUSE_SPARE_BIT_0_0 0x244
+#define NUM_TSENSOR_SPARE_BITS 28
+/* tsensor calibration register */
+#define FUSE_TSENSOR_CALIB_0 0x198
+
+#endif
+
+struct tegra_id {
+ enum tegra_chipid chipid;
+ unsigned int major, minor, netlist, patch;
+ enum tegra_revision revision;
+ char *priv;
+};
+
+static struct tegra_id tegra_id;
+static unsigned int tegra_chip_id;
+static unsigned int tegra_chip_rev;
+
+static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
+ [TEGRA_REVISION_UNKNOWN] = "unknown",
+ [TEGRA_REVISION_A01] = "A01",
+ [TEGRA_REVISION_A02] = "A02",
+ [TEGRA_REVISION_A03] = "A03",
+ [TEGRA_REVISION_A03p] = "A03 prime",
+};
+
+u32 tegra_fuse_readl(unsigned long offset)
+{
+ return tegra_apb_readl(TEGRA_FUSE_BASE + offset);
+}
+
+void tegra_fuse_writel(u32 value, unsigned long offset)
+{
+ tegra_apb_writel(value, TEGRA_FUSE_BASE + offset);
+}
-static inline u32 fuse_readl(unsigned long offset)
+static inline bool get_spare_fuse(int bit)
{
- return readl(IO_TO_VIRT(TEGRA_FUSE_BASE + offset));
+ return tegra_fuse_readl(FUSE_SPARE_BIT + bit * 4);
}
-static inline void fuse_writel(u32 value, unsigned long offset)
+const char *tegra_get_revision_name(void)
{
- writel(value, IO_TO_VIRT(TEGRA_FUSE_BASE + offset));
+ return tegra_revision_name[tegra_get_revision()];
}
void tegra_init_fuse(void)
@@ -44,41 +106,350 @@ void tegra_init_fuse(void)
u32 reg = readl(IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48));
reg |= 1 << 28;
writel(reg, IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48));
+ tegra_init_speedo_data();
+
+ pr_info("Tegra Revision: %s "
+ "SKU: 0x%x CPU Process: %d Core Process: %d\n",
+ tegra_get_revision_name(), tegra_sku_id(),
+ tegra_cpu_process_id(), tegra_core_process_id());
+}
- pr_info("Tegra SKU: %d CPU Process: %d Core Process: %d\n",
- tegra_sku_id(), tegra_cpu_process_id(),
- tegra_core_process_id());
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+int tegra_fuse_get_revision(u32 *rev)
+{
+ return -ENOENT;
}
+EXPORT_SYMBOL(tegra_fuse_get_revision);
+
+int tegra_fuse_get_tsensor_calibration_data(u32 *calib)
+{
+ return -ENOENT;
+}
+EXPORT_SYMBOL(tegra_fuse_get_tsensor_calibration_data);
+
+int tegra_fuse_get_tsensor_spare_bits(u32 *spare_bits)
+{
+ return -ENOENT;
+}
+EXPORT_SYMBOL(tegra_fuse_get_tsensor_spare_bits);
+
+#else
+
+int tegra_fuse_get_revision(u32 *rev)
+{
+ /* fuse revision */
+ *rev = tegra_fuse_readl(FUSE_TEST_PROGRAM_REVISION_0);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_fuse_get_revision);
+
+int tegra_fuse_get_tsensor_calibration_data(u32 *calib)
+{
+ /* tsensor calibration fuse */
+ *calib = tegra_fuse_readl(FUSE_TSENSOR_CALIB_0);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_fuse_get_tsensor_calibration_data);
+
+int tegra_fuse_get_tsensor_spare_bits(u32 *spare_bits)
+{
+ u32 value;
+ int i;
+
+ BUG_ON(NUM_TSENSOR_SPARE_BITS > (sizeof(u32) * 8));
+ if (!spare_bits)
+ return -ENOMEM;
+ *spare_bits = 0;
+ /* spare bits 0-27 */
+ for (i = 0; i < NUM_TSENSOR_SPARE_BITS; i++) {
+ value = tegra_fuse_readl(FUSE_SPARE_BIT_0_0 +
+ (i << 2));
+ if (value)
+ *spare_bits |= BIT(i);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tegra_fuse_get_tsensor_spare_bits);
+#endif
unsigned long long tegra_chip_uid(void)
{
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
unsigned long long lo, hi;
- lo = fuse_readl(FUSE_UID_LOW);
- hi = fuse_readl(FUSE_UID_HIGH);
+ lo = tegra_fuse_readl(FUSE_UID_LOW);
+ hi = tegra_fuse_readl(FUSE_UID_HIGH);
return (hi << 32ull) | lo;
+#else
+ u64 uid = 0ull;
+ u32 reg;
+ u32 cid;
+ u32 vendor;
+ u32 fab;
+ u32 lot;
+ u32 wafer;
+ u32 x;
+ u32 y;
+ u32 i;
+
+ /* This used to be so much easier in prior chips. Unfortunately, there
+ is no one-stop shopping for the unique id anymore. It must be
+ constructed from various bits of information burned into the fuses
+ during the manufacturing process. The 64-bit unique id is formed
+ by concatenating several bit fields. The notation used for the
+ various fields is <fieldname:size_in_bits> with the UID composed
+ thusly:
+
+ <CID:4><VENDOR:4><FAB:6><LOT:26><WAFER:6><X:9><Y:9>
+
+ Where:
+
+ Field Bits Position Data
+ ------- ---- -------- ----------------------------------------
+ CID 4 60 Chip id (encoded as zero for T30)
+ VENDOR 4 56 Vendor code
+ FAB 6 50 FAB code
+ LOT 26 24 Lot code (5-digit base-36-coded-decimal,
+ re-encoded to 26 bits binary)
+ WAFER 6 18 Wafer id
+ X 9 9 Wafer X-coordinate
+ Y 9 0 Wafer Y-coordinate
+ ------- ----
+ Total 64
+ */
+
+ /* Get the chip id and encode each chip variant as a unique value. */
+ reg = readl(IO_TO_VIRT(TEGRA_APB_MISC_BASE + 0x804));
+ reg = (reg & 0xFF00) >> 8;
+
+ switch (reg) {
+ case TEGRA_CHIPID_TEGRA3:
+ cid = 0;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ vendor = tegra_fuse_readl(FUSE_VENDOR_CODE) & FUSE_VENDOR_CODE_MASK;
+ fab = tegra_fuse_readl(FUSE_FAB_CODE) & FUSE_FAB_CODE_MASK;
+
+ /* Lot code must be re-encoded from a 5 digit base-36 'BCD' number
+ to a binary number. */
+ lot = 0;
+ reg = tegra_fuse_readl(FUSE_LOT_CODE_0) << 2;
+
+ for (i = 0; i < 5; ++i) {
+ u32 digit = (reg & 0xFC000000) >> 26;
+ BUG_ON(digit >= 36);
+ lot *= 36;
+ lot += digit;
+ reg <<= 6;
+ }
+
+ wafer = tegra_fuse_readl(FUSE_WAFER_ID) & FUSE_WAFER_ID_MASK;
+ x = tegra_fuse_readl(FUSE_X_COORDINATE) & FUSE_X_COORDINATE_MASK;
+ y = tegra_fuse_readl(FUSE_Y_COORDINATE) & FUSE_Y_COORDINATE_MASK;
+
+ uid = ((unsigned long long)cid << 60ull)
+ | ((unsigned long long)vendor << 56ull)
+ | ((unsigned long long)fab << 50ull)
+ | ((unsigned long long)lot << 24ull)
+ | ((unsigned long long)wafer << 18ull)
+ | ((unsigned long long)x << 9ull)
+ | ((unsigned long long)y << 0ull);
+ return uid;
+#endif
+}
+
+unsigned int tegra_spare_fuse(int bit)
+{
+ BUG_ON(bit < 0 || bit > 61);
+ return tegra_fuse_readl(FUSE_SPARE_BIT + bit * 4);
}
int tegra_sku_id(void)
{
int sku_id;
- u32 reg = fuse_readl(FUSE_SKU_INFO);
+ u32 reg = tegra_fuse_readl(FUSE_SKU_INFO);
sku_id = reg & 0xFF;
return sku_id;
}
-int tegra_cpu_process_id(void)
+int tegra_gpu_register_sets(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_HAS_DUAL_3D
+ u32 reg = readl(IO_TO_VIRT(TEGRA_CLK_RESET_BASE + FUSE_GPU_INFO));
+ if (reg & FUSE_GPU_INFO_MASK)
+ return 1;
+ else
+ return 2;
+#else
+ return 1;
+#endif
+}
+
+struct chip_revision {
+ enum tegra_chipid chipid;
+ unsigned int major;
+ unsigned int minor;
+ char prime;
+ enum tegra_revision revision;
+};
+
+#define CHIP_REVISION(id, m, n, p, rev) { \
+ .chipid = TEGRA_CHIPID_##id, \
+ .major = m, \
+ .minor = n, \
+ .prime = p, \
+ .revision = TEGRA_REVISION_##rev }
+
+static struct chip_revision tegra_chip_revisions[] = {
+ CHIP_REVISION(TEGRA2, 1, 2, 0, A02),
+ CHIP_REVISION(TEGRA2, 1, 3, 0, A03),
+ CHIP_REVISION(TEGRA2, 1, 3, 'p', A03p),
+ CHIP_REVISION(TEGRA3, 1, 1, 0, A01),
+ CHIP_REVISION(TEGRA3, 1, 2, 0, A02),
+ CHIP_REVISION(TEGRA3, 1, 3, 0, A03),
+};
+
+static enum tegra_revision tegra_decode_revision(const struct tegra_id *id)
+{
+ enum tegra_revision revision = TEGRA_REVISION_UNKNOWN;
+
+#if defined(CONFIG_TEGRA_SILICON_PLATFORM)
+ int i ;
+ char prime;
+
+ if (id->priv == NULL)
+ prime = 0;
+ else
+ prime = *(id->priv);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_chip_revisions); i++) {
+ if ((id->chipid != tegra_chip_revisions[i].chipid) ||
+ (id->minor != tegra_chip_revisions[i].minor) ||
+ (id->major != tegra_chip_revisions[i].major) ||
+ (prime != tegra_chip_revisions[i].prime))
+ continue;
+
+ revision = tegra_chip_revisions[i].revision;
+ break;
+ }
+
+#elif defined(CONFIG_TEGRA_FPGA_PLATFORM)
+ if ((id->chipid & 0xf0) == TEGRA_CHIPID_TEGRA3) {
+ if ((id->major == 0) && (id->minor == 1)) {
+ unsigned int patch = id->patch & 0xF;
+ if ((id->netlist == 12) && (patch == 12))
+ revision = TEGRA_REVISION_A01;
+ else if ((id->netlist == 12) && (patch > 12))
+ revision = TEGRA_REVISION_A02;
+ else if (id->netlist > 12)
+ revision = TEGRA_REVISION_A02;
+ }
+ }
+#endif
+
+ return revision;
+}
+
+static void tegra_set_tegraid(u32 chipid,
+ u32 major, u32 minor,
+ u32 nlist, u32 patch, const char *priv)
+{
+ tegra_id.chipid = (enum tegra_chipid) chipid;
+ tegra_id.major = major;
+ tegra_id.minor = minor;
+ tegra_id.netlist = nlist;
+ tegra_id.patch = patch;
+ tegra_id.priv = (char *)priv;
+ tegra_id.revision = tegra_decode_revision(&tegra_id);
+}
+
+static void tegra_get_tegraid_from_hw(void)
{
- int cpu_process_id;
- u32 reg = fuse_readl(FUSE_SPARE_BIT);
- cpu_process_id = (reg >> 6) & 3;
- return cpu_process_id;
+ void __iomem *chip_id = IO_ADDRESS(TEGRA_APB_MISC_BASE) + 0x804;
+ void __iomem *netlist = IO_ADDRESS(TEGRA_APB_MISC_BASE) + 0x860;
+ u32 cid = readl(chip_id);
+ u32 nlist = readl(netlist);
+ char *priv = NULL;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if (get_spare_fuse(18) || get_spare_fuse(19))
+ priv = "p";
+#endif
+ tegra_set_tegraid((cid >> 8) & 0xff,
+ (cid >> 4) & 0xf,
+ (cid >> 16) & 0xf,
+ (nlist >> 0) & 0xffff,
+ (nlist >> 16) & 0xffff,
+ priv);
}
-int tegra_core_process_id(void)
+enum tegra_chipid tegra_get_chipid(void)
{
- int core_process_id;
- u32 reg = fuse_readl(FUSE_SPARE_BIT);
- core_process_id = (reg >> 12) & 3;
- return core_process_id;
+ if (tegra_id.chipid == TEGRA_CHIPID_UNKNOWN) {
+ /* Boot loader did not pass a valid chip ID.
+ * Get it from hardware */
+ tegra_get_tegraid_from_hw();
+ }
+
+ return tegra_id.chipid;
}
+
+enum tegra_revision tegra_get_revision(void)
+{
+ if (tegra_id.chipid == TEGRA_CHIPID_UNKNOWN) {
+ /* Boot loader did not pass a valid chip ID.
+ * Get it from hardware */
+ tegra_get_tegraid_from_hw();
+ }
+
+ return tegra_id.revision;
+}
+
+static char chippriv[16]; /* Permanent buffer for private string */
+static int __init tegra_bootloader_tegraid(char *str)
+{
+ u32 id[5];
+ int i = 0;
+ char *priv = NULL;
+
+ do {
+ id[i++] = simple_strtoul(str, &str, 16);
+ } while (*str++ && i < ARRAY_SIZE(id));
+
+ if (*(str - 1) == '.') {
+ strncpy(chippriv, str, sizeof(chippriv) - 1);
+ priv = chippriv;
+ if (strlen(str) > sizeof(chippriv) - 1)
+ pr_err("### tegraid.priv in kernel arg truncated\n");
+ }
+
+ while (i < ARRAY_SIZE(id))
+ id[i++] = 0;
+
+ tegra_set_tegraid(id[0], id[1], id[2], id[3], id[4], priv);
+ return 0;
+}
+
+static unsigned int get_chip_id(char *val, struct kernel_param *kp)
+{
+ tegra_chip_id = (unsigned int)tegra_get_chipid();
+ return param_get_uint(val, kp);
+}
+static unsigned int get_chip_rev(char *val, struct kernel_param *kp)
+{
+ tegra_chip_rev = (unsigned int)tegra_get_revision();
+ return param_get_uint(val, kp);
+}
+
+module_param_call(tegra_chip_id, NULL, get_chip_id, &tegra_chip_id, 0444);
+__MODULE_PARM_TYPE(tegra_chip_id, "uint");
+module_param_call(tegra_chip_rev, NULL, get_chip_rev, &tegra_chip_rev, 0444);
+__MODULE_PARM_TYPE(tegra_chip_rev, "uint");
+
+/* tegraid=chipid.major.minor.netlist.patch[.priv] */
+early_param("tegraid", tegra_bootloader_tegraid);
diff --git a/arch/arm/mach-tegra/fuse.h b/arch/arm/mach-tegra/fuse.h
index 584b2e27dbda..88ea8402978a 100644
--- a/arch/arm/mach-tegra/fuse.h
+++ b/arch/arm/mach-tegra/fuse.h
@@ -1,7 +1,8 @@
/*
- * arch/arm/mach-tegra/fuse.c
+ * arch/arm/mach-tegra/fuse.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010-2011 NVIDIA Corp.
*
* Author:
* Colin Cross <ccross@android.com>
@@ -17,8 +18,46 @@
*
*/
+#include <mach/hardware.h>
+
+#define INVALID_PROCESS_ID 99 /* don't expect to have 100 process id's */
+
unsigned long long tegra_chip_uid(void);
+unsigned int tegra_spare_fuse(int bit);
int tegra_sku_id(void);
+void tegra_init_fuse(void);
+u32 tegra_fuse_readl(unsigned long offset);
+void tegra_fuse_writel(u32 value, unsigned long offset);
+const char *tegra_get_revision_name(void);
+
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+
int tegra_cpu_process_id(void);
int tegra_core_process_id(void);
-void tegra_init_fuse(void);
+int tegra_soc_speedo_id(void);
+void tegra_init_speedo_data(void);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+int tegra_package_id(void);
+int tegra_cpu_speedo_id(void);
+int tegra_cpu_speedo_mv(void);
+int tegra_core_speedo_mv(void);
+#else
+static inline int tegra_package_id(void) { return -1; }
+static inline int tegra_cpu_speedo_id(void) { return 0; }
+static inline int tegra_cpu_speedo_mv(void) { return 1000; }
+static inline int tegra_core_speedo_mv(void) { return 1200; }
+#endif
+
+#else
+
+static inline int tegra_cpu_process_id(void) { return 0; }
+static inline int tegra_core_process_id(void) { return 0; }
+static inline int tegra_cpu_speedo_id(void) { return 0; }
+static inline int tegra_soc_speedo_id(void) { return 0; }
+static inline int tegra_package_id(void) { return -1; }
+static inline int tegra_cpu_speedo_mv(void) { return 1000; }
+static inline int tegra_core_speedo_mv(void) { return 1200; }
+static inline void tegra_init_speedo_data(void) { }
+
+#endif
diff --git a/arch/arm/mach-tegra/gic.c b/arch/arm/mach-tegra/gic.c
new file mode 100644
index 000000000000..50cecc4eed64
--- /dev/null
+++ b/arch/arm/mach-tegra/gic.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2010-2011, NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpumask.h> /* Required by asm/hardware/gic.h */
+#include <linux/io.h>
+#include <linux/irqnr.h>
+
+#include <asm/hardware/gic.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "gic.h"
+#include "pm.h"
+
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP)
+static void __iomem *gic_cpu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100);
+
+void tegra_gic_cpu_disable(void)
+{
+ writel(0, gic_cpu_base + GIC_CPU_CTRL);
+}
+
+void tegra_gic_cpu_enable(void)
+{
+ writel(1, gic_cpu_base + GIC_CPU_CTRL);
+}
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+
+void tegra_gic_pass_through_disable(void)
+{
+ u32 val = readl(gic_cpu_base + GIC_CPU_CTRL);
+ val |= 2; /* enableNS = disable GIC pass through */
+ writel(val, gic_cpu_base + GIC_CPU_CTRL);
+}
+
+#endif
+#endif
+
+#if defined(CONFIG_PM_SLEEP)
+
+int tegra_gic_pending_interrupt(void)
+{
+ u32 irq = readl(gic_cpu_base + GIC_CPU_HIGHPRI);
+ irq &= 0x3FF;
+
+ return irq;
+}
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+
+static void __iomem *gic_dist_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
+static u32 gic_affinity[INT_GIC_NR/4];
+
+void tegra_gic_dist_disable(void)
+{
+ writel(0, gic_dist_base + GIC_DIST_CTRL);
+}
+
+void tegra_gic_dist_enable(void)
+{
+ writel(1, gic_dist_base + GIC_DIST_CTRL);
+}
+
+void tegra_gic_disable_affinity(void)
+{
+ unsigned int i;
+
+ BUG_ON(is_lp_cluster());
+
+ /* The GIC distributor TARGET register is one byte per IRQ. */
+ for (i = 32; i < INT_GIC_NR; i += 4) {
+ /* Save the affinity. */
+ gic_affinity[i/4] = __raw_readl(gic_dist_base +
+ GIC_DIST_TARGET + i);
+
+ /* Force this interrupt to CPU0. */
+ __raw_writel(0x01010101, gic_dist_base + GIC_DIST_TARGET + i);
+ }
+
+ wmb();
+}
+
+void tegra_gic_restore_affinity(void)
+{
+ unsigned int i;
+
+ BUG_ON(is_lp_cluster());
+
+ /* The GIC distributor TARGET register is one byte per IRQ. */
+ for (i = 32; i < INT_GIC_NR; i += 4) {
+#ifdef CONFIG_BUG
+ u32 reg = __raw_readl(gic_dist_base + GIC_DIST_TARGET + i);
+ if (reg & 0xFEFEFEFE)
+ panic("GIC affinity changed!");
+#endif
+ /* Restore this interrupt's affinity. */
+ __raw_writel(gic_affinity[i/4], gic_dist_base +
+ GIC_DIST_TARGET + i);
+ }
+
+ wmb();
+}
+
+void tegra_gic_affinity_to_cpu0(void)
+{
+ unsigned int i;
+
+ BUG_ON(is_lp_cluster());
+
+ for (i = 32; i < INT_GIC_NR; i += 4)
+ __raw_writel(0x01010101, gic_dist_base + GIC_DIST_TARGET + i);
+ wmb();
+}
+#endif
+#endif
diff --git a/arch/arm/mach-tegra/gic.h b/arch/arm/mach-tegra/gic.h
new file mode 100644
index 000000000000..94dab6e581af
--- /dev/null
+++ b/arch/arm/mach-tegra/gic.h
@@ -0,0 +1,49 @@
+/*
+ * arch/arm/mach-tegra/include/mach/gic.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_GIC_H_
+#define _MACH_TEGRA_GIC_H_
+
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP)
+
+void tegra_gic_cpu_disable(void);
+void tegra_gic_cpu_enable(void);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+
+void tegra_gic_pass_through_disable(void);
+
+#endif
+#endif
+
+
+#if defined(CONFIG_PM_SLEEP)
+
+int tegra_gic_pending_interrupt(void);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+
+void tegra_gic_dist_disable(void);
+void tegra_gic_dist_enable(void);
+
+void tegra_gic_disable_affinity(void);
+void tegra_gic_restore_affinity(void);
+void tegra_gic_affinity_to_cpu0(void);
+
+#endif
+#endif
+
+#endif /* _MACH_TEGRA_GIC_H_ */
diff --git a/arch/arm/mach-tegra/gpio-names.h b/arch/arm/mach-tegra/gpio-names.h
index f28220a641b2..cb3c5ce29c0f 100644
--- a/arch/arm/mach-tegra/gpio-names.h
+++ b/arch/arm/mach-tegra/gpio-names.h
@@ -2,6 +2,7 @@
* arch/arm/mach-tegra/include/mach/gpio-names.h
*
* Copyright (c) 2010 Google, Inc
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Erik Gilling <konkers@google.com>
@@ -243,5 +244,26 @@
#define TEGRA_GPIO_PBB5 221
#define TEGRA_GPIO_PBB6 222
#define TEGRA_GPIO_PBB7 223
-
+#define TEGRA_GPIO_PCC0 224
+#define TEGRA_GPIO_PCC1 225
+#define TEGRA_GPIO_PCC2 226
+#define TEGRA_GPIO_PCC3 227
+#define TEGRA_GPIO_PCC4 228
+#define TEGRA_GPIO_PCC5 229
+#define TEGRA_GPIO_PCC6 230
+#define TEGRA_GPIO_PCC7 231
+#define TEGRA_GPIO_PDD0 232
+#define TEGRA_GPIO_PDD1 233
+#define TEGRA_GPIO_PDD2 234
+#define TEGRA_GPIO_PDD3 235
+#define TEGRA_GPIO_PDD4 236
+#define TEGRA_GPIO_PDD5 237
+#define TEGRA_GPIO_PDD6 238
+#define TEGRA_GPIO_PDD7 239
+#define TEGRA_GPIO_PEE0 240
+#define TEGRA_GPIO_PEE1 241
+#define TEGRA_GPIO_PEE2 242
+#define TEGRA_GPIO_PEE3 243
+#define TEGRA_GPIO_INVALID 244
+#define TEGRA_MAX_GPIO 245
#endif
diff --git a/arch/arm/mach-tegra/headsmp.S b/arch/arm/mach-tegra/headsmp.S
index b5349b2f13d2..4763528a5f16 100644
--- a/arch/arm/mach-tegra/headsmp.S
+++ b/arch/arm/mach-tegra/headsmp.S
@@ -1,61 +1,313 @@
+/*
+ * arch/arm/mach-tegra/headsmp.S
+ *
+ * CPU initialization routines for Tegra SoCs
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ * Copyright (c) 2011 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ * Gary King <gking@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
#include <linux/linkage.h>
#include <linux/init.h>
- .section ".text.head", "ax"
- __CPUINIT
+#include <asm/assembler.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+#include "asm_macros.h"
+#include "reset.h"
+#include "sleep.h"
+
+#define DEBUG_CPU_RESET_HANDLER 0 /* Non-zero enables debug code */
+
+#define PMC_SCRATCH41 0x140
+#define RESET_DATA(x) ((TEGRA_RESET_##x)*4)
+
+
+#ifdef CONFIG_SMP
/*
- * Tegra specific entry point for secondary CPUs.
- * The secondary kernel init calls v7_flush_dcache_all before it enables
- * the L1; however, the L1 comes out of reset in an undefined state, so
- * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
- * of cache lines with uninitialized data and uninitialized tags to get
- * written out to memory, which does really unpleasant things to the main
- * processor. We fix this by performing an invalidate, rather than a
- * clean + invalidate, before jumping into the kernel.
+ * tegra_secondary_startup
+ *
+ * Initial secondary processor boot vector; jumps to kernel's
+ * secondary_startup routine. Used for initial boot and hotplug
+ * of secondary CPUs.
*/
-ENTRY(v7_invalidate_l1)
- mov r0, #0
- mcr p15, 2, r0, c0, c0, 0
- mrc p15, 1, r0, c0, c0, 0
-
- ldr r1, =0x7fff
- and r2, r1, r0, lsr #13
-
- ldr r1, =0x3ff
-
- and r3, r1, r0, lsr #3 @ NumWays - 1
- add r2, r2, #1 @ NumSets
-
- and r0, r0, #0x7
- add r0, r0, #4 @ SetShift
-
- clz r1, r3 @ WayShift
- add r4, r3, #1 @ NumWays
-1: sub r2, r2, #1 @ NumSets--
- mov r3, r4 @ Temp = NumWays
-2: subs r3, r3, #1 @ Temp--
- mov r5, r3, lsl r1
- mov r6, r2, lsl r0
- orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
- mcr p15, 0, r5, c7, c6, 2
- bgt 2b
- cmp r2, #0
- bgt 1b
- dsb
- isb
- mov pc, lr
-ENDPROC(v7_invalidate_l1)
-
ENTRY(tegra_secondary_startup)
- msr cpsr_fsxc, #0xd3
- bl v7_invalidate_l1
- mrc p15, 0, r0, c0, c0, 5
- and r0, r0, #15
- ldr r1, =0x6000f100
- str r0, [r1]
-1: ldr r2, [r1]
- cmp r0, r2
- beq 1b
- b secondary_startup
+ bl tegra_invalidate_l1
+ bl tegra_enable_coresite
+ b secondary_startup
ENDPROC(tegra_secondary_startup)
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * tegra_resume
+ *
+ * CPU boot vector when restarting the a CPU following
+ * an LP2 transition. Also branched to by LP0 and LP1 resume after
+ * re-enabling sdram.
+ */
+ENTRY(tegra_resume)
+ bl tegra_enable_coresite
+ bl tegra_invalidate_l1
+
+ cpu_id r0
+ cmp r0, #0 @ CPU0?
+ bne tegra_cpu_resume_phys @ no
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ @ Clear the flow controller flags for this CPU.
+ mov32 r2, TEGRA_FLOW_CTRL_BASE+8 @ CPU0 CSR
+ ldr r1, [r2]
+ orr r1, r1, #(1 << 15) | (1 << 14) @ write to clear event & intr
+ movw r0, #0x0FFD @ enable, cluster_switch, immed, & bitmaps
+ bic r1, r1, r0
+ str r1, [r2]
+#endif
+
+ /* enable SCU */
+ mov32 r0, TEGRA_ARM_PERIF_BASE
+ ldr r1, [r0]
+ orr r1, r1, #1
+ str r1, [r0]
+
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+ /* wake up (should have specified args?) */
+ bl tegra_generic_smc
+#endif
+
+ b tegra_cpu_resume_phys
+ENDPROC(tegra_resume)
+#endif
+
+/*
+ * tegra_invalidate_l1
+ *
+ * Invalidates the L1 data cache (no clean) during initial boot of a cpu
+ *
+ * Corrupted registers: r0-r6
+ */
+tegra_invalidate_l1:
+ mov r0, #0
+ mcr p15, 2, r0, c0, c0, 0
+ mrc p15, 1, r0, c0, c0, 0
+
+ movw r1, #0x7fff
+ and r2, r1, r0, lsr #13
+
+ movw r1, #0x3ff
+
+ and r3, r1, r0, lsr #3 @ NumWays - 1
+ add r2, r2, #1 @ NumSets
+
+ and r0, r0, #0x7
+ add r0, r0, #4 @ SetShift
+
+ clz r1, r3 @ WayShift
+ add r4, r3, #1 @ NumWays
+1: sub r2, r2, #1 @ NumSets--
+ mov r3, r4 @ Temp = NumWays
+2: subs r3, r3, #1 @ Temp--
+ mov r5, r3, lsl r1
+ mov r6, r2, lsl r0
+ orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+ mcr p15, 0, r5, c7, c6, 2
+ bgt 2b
+ cmp r2, #0
+ bgt 1b
+ dsb
+ isb
+ mov pc, lr
+
+ /* Enable Coresight access on cpu */
+tegra_enable_coresite:
+ mov32 r0, 0xC5ACCE55
+ mcr p14, 0, r0, c7, c12, 6
+ mov pc, lr
+
+/*
+ * __tegra_cpu_reset_handler_halt_failed:
+ *
+ * Alternate entry point for reset handler for cases where the
+ * WFI halt failed to take effect.
+ *
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(__tegra_cpu_reset_handler_start)
+
+/*
+ * __tegra_cpu_reset_handler:
+ *
+ * Common handler for all CPU reset events.
+ *
+ * Register usage within the reset handler:
+ *
+ * R7 = CPU present (to the OS) mask
+ * R8 = CPU in LP1 state mask
+ * R9 = CPU in LP2 state mask
+ * R10 = CPU number
+ * R11 = CPU mask
+ * R12 = pointer to reset handler data
+ *
+ * NOTE: This code is copied to IRAM. All code and data accesses
+ * must be position-independent.
+ */
+
+ .align L1_CACHE_SHIFT
+ENTRY(__tegra_cpu_reset_handler)
+
+#if DEBUG_CPU_RESET_HANDLER
+ mov32 r0, 0xC5ACCE55
+ mcr p14, 0, r0, c7, c12, 6 @ Enable CoreSight access
+ b .
+#endif
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ cpsid aif, 0x13 @ SVC mode, interrupts disabled
+ mrc p15, 0, r0, c0, c0, 0 @ read main ID register
+ and r5, r0, #0x00f00000 @ variant
+ and r6, r0, #0x0000000f @ revision
+ orr r6, r6, r5, lsr #20-4 @ combine variant and revision
+#ifdef CONFIG_ARM_ERRATA_743622
+ teq r6, #0x20 @ present in r2p0
+ teqne r6, #0x21 @ present in r2p1
+ teqne r6, #0x22 @ present in r2p2
+ teqne r6, #0x27 @ present in r2p7
+ teqne r6, #0x29 @ present in r2p9
+ mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orreq r10, r10, #1 << 6 @ set bit #6
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#endif
+ mrc p15, 0, r10, c0, c0, 5 @ MPIDR
+ and r10, r10, #0x3 @ R10 = CPU number
+ mov r11, #1
+ mov r11, r11, lsl r10 @ R11 = CPU mask
+ adr r12, __tegra_cpu_reset_handler_data
+
+#ifdef CONFIG_SMP
+ /* Does the OS know about this CPU? */
+ ldr r7, [r12, #RESET_DATA(MASK_PRESENT)]
+ tst r7, r11 @ if !present
+ bleq __die @ CPU not present (to OS)
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ /* If CPU1, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
+ mov32 r6, TEGRA_PMC_BASE
+ mov r0, #0
+ cmp r10, #0
+ strne r0, [r6, #PMC_SCRATCH41]
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+ /* Waking up from LP1? */
+ ldr r8, [r12, #RESET_DATA(MASK_LP1)]
+ tst r8, r11 @ if in_lp1
+ beq __is_not_lp1
+ cmp r10, #0
+ bne __die @ only CPU0 can be here
+ ldr lr, [r12, #RESET_DATA(STARTUP_LP1)]
+ cmp lr, #0
+ bleq __die @ no LP1 startup handler
+ bx lr
+__is_not_lp1:
+#endif
+
+ /* Waking up from LP2? */
+ ldr r9, [r12, #RESET_DATA(MASK_LP2)]
+ tst r9, r11 @ if in_lp2
+ beq __is_not_lp2
+ ldr lr, [r12, #RESET_DATA(STARTUP_LP2)]
+ cmp lr, #0
+ bleq __die @ no LP2 startup handler
+ bx lr
+
+__is_not_lp2:
+
+#ifdef CONFIG_SMP
+ /* Can only be secondary boot (initial or hotplug) but CPU 0
+ cannot be here. */
+ cmp r10, #0
+ bleq __die @ CPU0 cannot be here
+ ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)]
+ cmp lr, #0
+ bleq __die @ no secondary startup handler
+ bx lr
+#endif
+
+/*
+ * We don't know why the CPU reset. Just kill it.
+ * The LR register will contain the address we died at + 4.
+ */
+
+__die:
+ sub lr, lr, #4
+ mov32 r7, TEGRA_PMC_BASE
+ str lr, [r7, #PMC_SCRATCH41]
+
+ mov32 r7, TEGRA_CLK_RESET_BASE
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ mov32 r0, 0x1111
+ mov r1, r0, lsl r10
+ str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET
+#else
+ mov32 r6, TEGRA_FLOW_CTRL_BASE
+
+ cmp r10, #0
+ moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS
+ moveq r2, #FLOW_CTRL_CPU0_CSR
+ movne r1, r10, lsl #3
+ addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8)
+ addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8)
+
+ /* Clear CPU "event" and "interrupt" flags and power gate
+ it when halting but not before it is in the "WFI" state. */
+ ldr r0, [r6, +r2]
+ orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
+ orr r0, r0, #FLOW_CTRL_CSR_ENABLE
+ str r0, [r6, +r2]
+
+ /* Unconditionally halt this CPU */
+ mov r0, #FLOW_CTRL_WAITEVENT
+ str r0, [r6, +r1]
+ ldr r0, [r6, +r1] @ memory barrier
+
+ dsb
+ isb
+ wfi @ CPU should be power gated here
+
+ /* If the CPU didn't power gate above just kill it's clock. */
+
+ mov r0, r11, lsl #8
+ str r0, [r7, #348] @ CLK_CPU_CMPLX_SET
+#endif
+ /* If the CPU still isn't dead, just spin here. */
+ b .
+
+ENDPROC(__tegra_cpu_reset_handler)
+ .align L1_CACHE_SHIFT
+ .type __tegra_cpu_reset_handler_data, %object
+ .globl __tegra_cpu_reset_handler_data
+__tegra_cpu_reset_handler_data:
+ .rept TEGRA_RESET_DATA_SIZE
+ .long 0
+ .endr
+ .size __tegra_cpu_reset_handler_data, .-tegra_cpu_reset_handler_data
+ .align L1_CACHE_SHIFT
+ENTRY(__tegra_cpu_reset_handler_end)
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c
index f3294040d357..d44e457e7182 100644
--- a/arch/arm/mach-tegra/hotplug.c
+++ b/arch/arm/mach-tegra/hotplug.c
@@ -1,119 +1,83 @@
/*
- * linux/arch/arm/mach-realview/hotplug.c
+ * arch/arm/mach-tegra/hotplug.c
*
- * Copyright (C) 2002 ARM Ltd.
- * All Rights Reserved
+ * Copyright (C) 2010-2011 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
-#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/smp.h>
+#include <asm/cpu_pm.h>
#include <asm/cacheflush.h>
-static inline void cpu_enter_lowpower(void)
-{
- unsigned int v;
+#include <mach/iomap.h>
- flush_cache_all();
- asm volatile(
- " mcr p15, 0, %1, c7, c5, 0\n"
- " mcr p15, 0, %1, c7, c10, 4\n"
- /*
- * Turn off coherency
- */
- " mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, #0x20\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- " mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (v)
- : "r" (0), "Ir" (CR_C)
- : "cc");
-}
+#include "gic.h"
+#include "sleep.h"
-static inline void cpu_leave_lowpower(void)
-{
- unsigned int v;
-
- asm volatile(
- "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, #0x20\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (CR_C)
- : "cc");
-}
+#define CPU_CLOCK(cpu) (0x1<<(8+cpu))
-static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
-{
- /*
- * there is no power-control hardware on this platform, so all
- * we can do is put the core into WFI; this is safe as the calling
- * code will have already disabled interrupts
- */
- for (;;) {
- /*
- * here's the WFI
- */
- asm(".word 0xe320f003\n"
- :
- :
- : "memory", "cc");
-
- /*if (pen_release == cpu) {*/
- /*
- * OK, proper wakeup, we're done
- */
- break;
- /*}*/
-
- /*
- * Getting here, means that we have come out of WFI without
- * having been woken up - this shouldn't happen
- *
- * Just note it happening - when we're woken, we can report
- * its occurrence.
- */
- (*spurious)++;
- }
-}
+#define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c)
+#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x340)
+#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344)
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+/* For Tegra2 use the software-written value of the reset register for status.*/
+#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET
+#else
+#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
+#endif
int platform_cpu_kill(unsigned int cpu)
{
+ unsigned int reg;
+
+ do {
+ reg = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_relax();
+ } while (!(reg & (1<<cpu)));
+
+ reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | CPU_CLOCK(cpu), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+
return 1;
}
-/*
- * platform-specific code to shutdown a CPU
- *
- * Called with IRQs disabled
- */
void platform_cpu_die(unsigned int cpu)
{
- int spurious = 0;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Flush the L1 data cache. */
+ flush_cache_all();
- /*
- * we're ready for shutdown now, so do it
- */
- cpu_enter_lowpower();
- platform_do_lowpower(cpu, &spurious);
+ /* Place the current CPU in reset. */
+ tegra2_hotplug_shutdown();
+#else
+ /* Disable GIC CPU interface for this CPU. */
+ tegra_gic_cpu_disable();
- /*
- * bring this CPU back into the world of cache
- * coherency, and then restore interrupts
- */
- cpu_leave_lowpower();
+ /* Tegra3 enters LPx states via WFI - do not propagate legacy IRQs
+ to CPU core to avoid fall through WFI; then GIC output will be
+ enabled, however at this time - CPU is dying - no interrupt should
+ have affinity to this CPU. */
+ tegra_gic_pass_through_disable();
+
+ /* Flush the L1 data cache. */
+ flush_cache_all();
+
+ /* Shut down the current CPU. */
+ tegra3_hotplug_shutdown();
+#endif
- if (spurious)
- pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+ /* Should never return here. */
+ BUG();
}
int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-tegra/i2c_error_recovery.c b/arch/arm/mach-tegra/i2c_error_recovery.c
new file mode 100644
index 000000000000..a3ac4e122a8f
--- /dev/null
+++ b/arch/arm/mach-tegra/i2c_error_recovery.c
@@ -0,0 +1,103 @@
+/*
+ * arch/arm/mach-tegra/i2c_error_recovery.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include "board.h"
+
+#define RETRY_MAX_COUNT (9*8+1) /*I2C controller supports eight-byte burst transfer*/
+
+int arb_lost_recovery(int scl_gpio, int sda_gpio)
+{
+ int ret;
+ int retry = RETRY_MAX_COUNT;
+ int recovered_successfully = 0;
+ int val;
+
+ if ((!scl_gpio) || (!sda_gpio)) {
+ pr_err("not proper input:scl_gpio 0x%08x,"
+ "sda_gpio 0x%08x\n", scl_gpio, sda_gpio);
+ return -EINVAL;;
+ }
+
+ ret = gpio_request(scl_gpio, "scl_gpio");
+ if (ret < 0) {
+ pr_err("error in gpio 0x%08x request 0x%08x\n",
+ scl_gpio, ret);
+ return -EINVAL;;
+ }
+ tegra_gpio_enable(scl_gpio);
+
+ ret = gpio_request(sda_gpio, "sda_gpio");
+ if (ret < 0) {
+ pr_err("error in gpio 0x%08x request 0x%08x\n",
+ sda_gpio, ret);
+ goto err;
+ }
+ tegra_gpio_enable(sda_gpio);
+ gpio_direction_input(sda_gpio);
+
+ while (retry--) {
+ gpio_direction_output(scl_gpio,0);
+ udelay(5);
+ gpio_direction_output(scl_gpio,1);
+ udelay(5);
+
+ /* check whether sda struct low release */
+ val = gpio_get_value(sda_gpio);
+ if (val) {
+ /* send START */
+ gpio_direction_output(sda_gpio,0);
+ udelay(5);
+
+ /* send STOP in next clock cycle */
+ gpio_direction_output(scl_gpio,0);
+ udelay(5);
+ gpio_direction_output(scl_gpio,1);
+ udelay(5);
+ gpio_direction_output(sda_gpio,1);
+ udelay(5);
+
+ recovered_successfully = 1;
+ break;
+ }
+ }
+
+ gpio_free(scl_gpio);
+ tegra_gpio_disable(scl_gpio);
+ gpio_free(sda_gpio);
+ tegra_gpio_disable(sda_gpio);
+
+ if (likely(recovered_successfully)) {
+ pr_err("arbitration lost recovered by re-try-count 0x%08x\n",
+ RETRY_MAX_COUNT - retry);
+ return 0;
+ } else {
+ pr_err("Un-recovered arbitration lost.\n");
+ return -EINVAL;
+ }
+
+err:
+ gpio_free(scl_gpio);
+ tegra_gpio_disable(scl_gpio);
+ return ret;
+}
+
diff --git a/arch/arm/mach-tegra/include/mach/arb_sema.h b/arch/arm/mach-tegra/include/mach/arb_sema.h
new file mode 100644
index 000000000000..9283f079cf61
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/arb_sema.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm/mach-tegra/include/mach/arb_sema.h
+ *
+ * Hardware arbitration semaphore interface
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_ARB_SEMA_H
+#define __MACH_TEGRA_ARB_SEMA_H
+
+enum tegra_arb_module {
+ TEGRA_ARB_BSEV = 0,
+ TEGRA_ARB_BSEA,
+};
+
+int tegra_arb_mutex_lock_timeout(enum tegra_arb_module lock, int msecs);
+
+int tegra_arb_mutex_unlock(enum tegra_arb_module lock);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/audio.h b/arch/arm/mach-tegra/include/mach/audio.h
new file mode 100644
index 000000000000..5950ececae00
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/audio.h
@@ -0,0 +1,57 @@
+/*
+ * arch/arm/mach-tegra/include/mach/audio.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_TEGRA_AUDIO_H
+#define __ARCH_ARM_MACH_TEGRA_AUDIO_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/i2s.h>
+
+#define FIFO1 0
+#define FIFO2 1
+
+/* FIXME: this is not enforced by the hardware. */
+#define I2S_FIFO_TX FIFO1
+#define I2S_FIFO_RX FIFO2
+
+#define TEGRA_AUDIO_ENABLE_TX 1
+#define TEGRA_AUDIO_ENABLE_RX 2
+
+struct tegra_audio_platform_data {
+ bool i2s_master;
+ bool dsp_master;
+ int i2s_master_clk; /* When I2S mode and master, the framesync rate. */
+ int dsp_master_clk; /* When DSP mode and master, the framesync rate. */
+ bool dma_on;
+ unsigned long i2s_clk_rate;
+ const char *dap_clk;
+ const char *audio_sync_clk;
+
+ int mode; /* I2S, LJM, RJM, etc. */
+ int fifo_fmt;
+ int bit_size;
+ int i2s_bus_width; /* 32-bit for 16-bit packed I2S */
+ int dsp_bus_width; /* 16-bit for DSP data format */
+ int mask; /* enable tx and rx? */
+ bool stereo_capture; /* True if hardware supports stereo */
+ void *driver_data;
+};
+
+#endif /* __ARCH_ARM_MACH_TEGRA_AUDIO_H */
diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h
index c8baf8f80d23..abd7b11dc8b9 100644
--- a/arch/arm/mach-tegra/include/mach/clk.h
+++ b/arch/arm/mach-tegra/include/mach/clk.h
@@ -6,6 +6,8 @@
* Author:
* Erik Gilling <konkers@google.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -21,11 +23,47 @@
#define __MACH_CLK_H
struct clk;
+struct dvfs;
+struct notifier_block;
+
+enum tegra_clk_ex_param {
+ TEGRA_CLK_VI_INP_SEL,
+ TEGRA_CLK_DTV_INVERT,
+ TEGRA_CLK_NAND_PAD_DIV2_ENB,
+ TEGRA_CLK_PLLD_CSI_OUT_ENB,
+ TEGRA_CLK_PLLD_DSI_OUT_ENB,
+ TEGRA_CLK_PLLD_MIPI_MUX_SEL,
+};
void tegra_periph_reset_deassert(struct clk *c);
void tegra_periph_reset_assert(struct clk *c);
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+int tegra_dvfs_set_rate(struct clk *c, unsigned long rate);
+#else
+static inline int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
+{ return 0; }
+#endif
unsigned long clk_get_rate_all_locked(struct clk *c);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
void tegra_sdmmc_tap_delay(struct clk *c, int delay);
+#else
+static inline void tegra_sdmmc_tap_delay(struct clk *c, int delay)
+{
+}
+#endif
+int tegra_dvfs_rail_disable_by_name(const char *reg_id);
+int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting);
+int tegra_register_clk_rate_notifier(struct clk *c, struct notifier_block *nb);
+void tegra_unregister_clk_rate_notifier(
+ struct clk *c, struct notifier_block *nb);
+
+/**
+ * tegra_is_clk_enabled - get info if the clk is enabled or not
+ * @clk: clock source
+ *
+ * Returns refcnt.
+ */
+int tegra_is_clk_enabled(struct clk *clk);
#endif
diff --git a/arch/arm/mach-tegra/include/mach/csi.h b/arch/arm/mach-tegra/include/mach/csi.h
new file mode 100644
index 000000000000..8797f98f3cc4
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/csi.h
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/mach-tegra/include/mach/csi.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_CSI_H
+#define __MACH_TEGRA_CSI_H
+
+#define CSI_CILA_MIPI_CAL_CONFIG_0 0x22a
+#define MIPI_CAL_TERMOSA(x) (((x) & 0x1f) << 0)
+
+#define CSI_CILB_MIPI_CAL_CONFIG_0 0x22b
+#define MIPI_CAL_TERMOSB(x) (((x) & 0x1f) << 0)
+
+
+#define CSI_DSI_MIPI_CAL_CONFIG 0x234
+#define MIPI_CAL_HSPDOSD(x) (((x) & 0x1f) << 16)
+#define MIPI_CAL_HSPUOSD(x) (((x) & 0x1f) << 8)
+
+#define CSI_MIPIBIAS_PAD_CONFIG 0x235
+#define PAD_DRIV_DN_REF(x) (((x) & 0x7) << 16)
+#define PAD_DRIV_UP_REF(x) (((x) & 0x7) << 8)
+
+int tegra_vi_csi_readl(u32 offset, u32 *val);
+int tegra_vi_csi_writel(u32 value, u32 offset);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/dc.h b/arch/arm/mach-tegra/include/mach/dc.h
new file mode 100644
index 000000000000..caca9d84c1be
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/dc.h
@@ -0,0 +1,558 @@
+/*
+ * arch/arm/mach-tegra/include/mach/dc.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_DC_H
+#define __MACH_TEGRA_DC_H
+
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <drm/drm_fixed.h>
+
+#define TEGRA_MAX_DC 2
+#define DC_N_WINDOWS 3
+
+
+/* DSI pixel data format */
+enum {
+ TEGRA_DSI_PIXEL_FORMAT_16BIT_P,
+ TEGRA_DSI_PIXEL_FORMAT_18BIT_P,
+ TEGRA_DSI_PIXEL_FORMAT_18BIT_NP,
+ TEGRA_DSI_PIXEL_FORMAT_24BIT_P,
+};
+
+/* DSI virtual channel number */
+enum {
+ TEGRA_DSI_VIRTUAL_CHANNEL_0,
+ TEGRA_DSI_VIRTUAL_CHANNEL_1,
+ TEGRA_DSI_VIRTUAL_CHANNEL_2,
+ TEGRA_DSI_VIRTUAL_CHANNEL_3,
+};
+
+/* DSI transmit method for video data */
+enum {
+ TEGRA_DSI_VIDEO_TYPE_VIDEO_MODE,
+ TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE,
+};
+
+/* DSI HS clock mode */
+enum {
+ TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS,
+ TEGRA_DSI_VIDEO_CLOCK_TX_ONLY,
+};
+
+/* DSI burst mode setting in video mode. Each mode is assigned with a
+ * fixed value. The rationale behind this is to avoid change of these
+ * values, since the calculation of dsi clock depends on them. */
+enum {
+ TEGRA_DSI_VIDEO_NONE_BURST_MODE = 0,
+ TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END = 1,
+ TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED = 2,
+ TEGRA_DSI_VIDEO_BURST_MODE_LOW_SPEED = 3,
+ TEGRA_DSI_VIDEO_BURST_MODE_MEDIUM_SPEED = 4,
+ TEGRA_DSI_VIDEO_BURST_MODE_FAST_SPEED = 5,
+ TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED = 6,
+};
+
+enum {
+ TEGRA_DSI_PACKET_CMD,
+ TEGRA_DSI_DELAY_MS,
+};
+
+struct tegra_dsi_cmd {
+ u8 cmd_type;
+ u8 data_id;
+ union {
+ u16 data_len;
+ u16 delay_ms;
+ struct{
+ u8 data0;
+ u8 data1;
+ }sp;
+ }sp_len_dly;
+ u8 *pdata;
+};
+
+#define DSI_CMD_SHORT(di, p0, p1) { \
+ .cmd_type = TEGRA_DSI_PACKET_CMD, \
+ .data_id = di, \
+ .sp_len_dly.sp.data0 = p0, \
+ .sp_len_dly.sp.data1 = p1, \
+ }
+#define DSI_DLY_MS(ms) { \
+ .cmd_type = TEGRA_DSI_DELAY_MS, \
+ .sp_len_dly.delay_ms = ms, \
+ }
+
+#define DSI_CMD_LONG(di, ptr) { \
+ .cmd_type = TEGRA_DSI_PACKET_CMD, \
+ .data_id = di, \
+ .sp_len_dly.data_len = ARRAY_SIZE(ptr), \
+ .pdata = ptr, \
+ }
+
+struct dsi_phy_timing_ns {
+ u16 t_hsdexit_ns;
+ u16 t_hstrail_ns;
+ u16 t_hsprepr_ns;
+ u16 t_datzero_ns;
+
+ u16 t_clktrail_ns;
+ u16 t_clkpost_ns;
+ u16 t_clkzero_ns;
+ u16 t_tlpx_ns;
+};
+
+struct tegra_dsi_out {
+ u8 n_data_lanes; /* required */
+ u8 pixel_format; /* required */
+ u8 refresh_rate; /* required */
+ u8 panel_reset; /* required */
+ u8 virtual_channel; /* required */
+ u8 dsi_instance;
+ u8 chip_id;
+ u8 chip_rev;
+
+ bool panel_has_frame_buffer; /* required*/
+
+ struct tegra_dsi_cmd* dsi_init_cmd; /* required */
+ u16 n_init_cmd; /* required */
+
+ struct tegra_dsi_cmd* dsi_early_suspend_cmd;
+ u16 n_early_suspend_cmd;
+
+ struct tegra_dsi_cmd* dsi_late_resume_cmd;
+ u16 n_late_resume_cmd;
+
+ struct tegra_dsi_cmd* dsi_suspend_cmd; /* required */
+ u16 n_suspend_cmd; /* required */
+
+ u8 video_data_type; /* required */
+ u8 video_clock_mode;
+ u8 video_burst_mode;
+
+ u16 panel_buffer_size_byte;
+ u16 panel_reset_timeout_msec;
+
+ bool hs_cmd_mode_supported;
+ bool hs_cmd_mode_on_blank_supported;
+ bool enable_hs_clock_on_lp_cmd_mode;
+ bool no_pkt_seq_eot; /* 1st generation panel may not
+ * support eot. Don't set it for
+ * most panels. */
+ bool te_polarity_low;
+ bool power_saving_suspend;
+
+ u32 max_panel_freq_khz;
+ u32 lp_cmd_mode_freq_khz;
+ u32 lp_read_cmd_mode_freq_khz;
+ u32 hs_clk_in_lp_cmd_mode_freq_khz;
+ u32 burst_mode_freq_khz;
+
+ struct dsi_phy_timing_ns phy_timing;
+};
+
+enum {
+ TEGRA_DC_STEREO_MODE_2D,
+ TEGRA_DC_STEREO_MODE_3D
+};
+
+enum {
+ TEGRA_DC_STEREO_LANDSCAPE,
+ TEGRA_DC_STEREO_PORTRAIT
+};
+
+struct tegra_stereo_out {
+ int mode_2d_3d;
+ int orientation;
+
+ void (*set_mode)(int mode);
+ void (*set_orientation)(int orientation);
+};
+
+struct tegra_dc_mode {
+ int pclk;
+ int h_ref_to_sync;
+ int v_ref_to_sync;
+ int h_sync_width;
+ int v_sync_width;
+ int h_back_porch;
+ int v_back_porch;
+ int h_active;
+ int v_active;
+ int h_front_porch;
+ int v_front_porch;
+ int stereo_mode;
+ u32 flags;
+};
+
+#define TEGRA_DC_MODE_FLAG_NEG_V_SYNC (1 << 0)
+#define TEGRA_DC_MODE_FLAG_NEG_H_SYNC (1 << 1)
+
+enum {
+ TEGRA_DC_OUT_RGB,
+ TEGRA_DC_OUT_HDMI,
+ TEGRA_DC_OUT_DSI,
+};
+
+struct tegra_dc_out_pin {
+ int name;
+ int pol;
+};
+
+enum {
+ TEGRA_DC_OUT_PIN_DATA_ENABLE,
+ TEGRA_DC_OUT_PIN_H_SYNC,
+ TEGRA_DC_OUT_PIN_V_SYNC,
+ TEGRA_DC_OUT_PIN_PIXEL_CLOCK,
+};
+
+enum {
+ TEGRA_DC_OUT_PIN_POL_LOW,
+ TEGRA_DC_OUT_PIN_POL_HIGH,
+};
+
+enum {
+ TEGRA_DC_DISABLE_DITHER = 1,
+ TEGRA_DC_ORDERED_DITHER,
+ TEGRA_DC_ERRDIFF_DITHER,
+};
+
+typedef u8 tegra_dc_bl_output[256];
+typedef u8 *p_tegra_dc_bl_output;
+
+struct tegra_dc_sd_blp {
+ u16 time_constant;
+ u8 step;
+};
+
+struct tegra_dc_sd_fc {
+ u8 time_limit;
+ u8 threshold;
+};
+
+struct tegra_dc_sd_rgb {
+ u8 r;
+ u8 g;
+ u8 b;
+};
+
+struct tegra_dc_sd_agg_priorities {
+ u8 pri_lvl;
+ u8 agg[4];
+};
+
+struct tegra_dc_sd_settings {
+ unsigned enable;
+ bool use_auto_pwm;
+ u8 hw_update_delay;
+ u8 aggressiveness;
+ short bin_width;
+ u8 phase_in_settings;
+ u8 phase_in_adjustments;
+ u8 cmd;
+ u8 final_agg;
+ u16 cur_agg_step;
+ u16 phase_settings_step;
+ u16 phase_adj_step;
+ u16 num_phase_in_steps;
+
+ struct tegra_dc_sd_agg_priorities agg_priorities;
+
+ bool use_vid_luma;
+ struct tegra_dc_sd_rgb coeff;
+
+ struct tegra_dc_sd_fc fc;
+ struct tegra_dc_sd_blp blp;
+ u8 bltf[4][4][4];
+ struct tegra_dc_sd_rgb lut[4][9];
+
+ atomic_t *sd_brightness;
+ struct platform_device *bl_device;
+};
+
+enum {
+ NO_CMD = 0x0,
+ ENABLE = 0x1,
+ DISABLE = 0x2,
+ PHASE_IN = 0x4,
+ AGG_CHG = 0x8,
+};
+
+enum {
+ TEGRA_PIN_OUT_CONFIG_SEL_LHP0_LD21,
+ TEGRA_PIN_OUT_CONFIG_SEL_LHP1_LD18,
+ TEGRA_PIN_OUT_CONFIG_SEL_LHP2_LD19,
+ TEGRA_PIN_OUT_CONFIG_SEL_LVP0_LVP0_Out,
+ TEGRA_PIN_OUT_CONFIG_SEL_LVP1_LD20,
+
+ TEGRA_PIN_OUT_CONFIG_SEL_LM1_M1,
+ TEGRA_PIN_OUT_CONFIG_SEL_LM1_LD21,
+ TEGRA_PIN_OUT_CONFIG_SEL_LM1_PM1,
+
+ TEGRA_PIN_OUT_CONFIG_SEL_LDI_LD22,
+ TEGRA_PIN_OUT_CONFIG_SEL_LPP_LD23,
+ TEGRA_PIN_OUT_CONFIG_SEL_LDC_SDC,
+ TEGRA_PIN_OUT_CONFIG_SEL_LSPI_DE,
+};
+
+struct tegra_dc_out {
+ int type;
+ unsigned flags;
+
+ /* size in mm */
+ unsigned h_size;
+ unsigned v_size;
+
+ int dcc_bus;
+ int hotplug_gpio;
+ const char *parent_clk;
+
+ unsigned max_pixclock;
+ unsigned order;
+ unsigned align;
+ unsigned depth;
+ unsigned dither;
+
+ struct tegra_dc_mode *modes;
+ int n_modes;
+
+ struct tegra_dsi_out *dsi;
+ struct tegra_stereo_out *stereo;
+
+ unsigned height; /* mm */
+ unsigned width; /* mm */
+
+ struct tegra_dc_out_pin *out_pins;
+ unsigned n_out_pins;
+
+ struct tegra_dc_sd_settings *sd_settings;
+
+ u8 *out_sel_configs;
+ unsigned n_out_sel_configs;
+
+ int (*enable)(void);
+ int (*postpoweron)(void);
+ int (*disable)(void);
+
+ int (*hotplug_init)(void);
+ int (*postsuspend)(void);
+};
+
+/* bits for tegra_dc_out.flags */
+#define TEGRA_DC_OUT_HOTPLUG_HIGH (0 << 1)
+#define TEGRA_DC_OUT_HOTPLUG_LOW (1 << 1)
+#define TEGRA_DC_OUT_HOTPLUG_MASK (1 << 1)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_ALWAYS_ON (0 << 2)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_ON_DEMAND (1 << 2)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_MASK (1 << 2)
+#define TEGRA_DC_OUT_CONTINUOUS_MODE (0 << 3)
+#define TEGRA_DC_OUT_ONE_SHOT_MODE (1 << 3)
+#define TEGRA_DC_OUT_N_SHOT_MODE (1 << 4)
+
+#define TEGRA_DC_ALIGN_MSB 0
+#define TEGRA_DC_ALIGN_LSB 1
+
+#define TEGRA_DC_ORDER_RED_BLUE 0
+#define TEGRA_DC_ORDER_BLUE_RED 1
+
+struct tegra_dc;
+struct nvmap_handle_ref;
+
+struct tegra_dc_csc {
+ unsigned short yof;
+ unsigned short kyrgb;
+ unsigned short kur;
+ unsigned short kvr;
+ unsigned short kug;
+ unsigned short kvg;
+ unsigned short kub;
+ unsigned short kvb;
+};
+
+/* palette lookup table */
+struct tegra_dc_lut {
+ u8 r[256];
+ u8 g[256];
+ u8 b[256];
+};
+
+struct tegra_dc_win {
+ u8 idx;
+ u8 fmt;
+ u8 ppflags; /* see TEGRA_WIN_PPFLAG* */
+ u32 flags;
+
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ dma_addr_t phys_addr_u;
+ dma_addr_t phys_addr_v;
+ unsigned stride;
+ unsigned stride_uv;
+ fixed20_12 x;
+ fixed20_12 y;
+ fixed20_12 w;
+ fixed20_12 h;
+ unsigned out_x;
+ unsigned out_y;
+ unsigned out_w;
+ unsigned out_h;
+ unsigned z;
+
+ struct tegra_dc_csc csc;
+
+ int dirty;
+ int underflows;
+ struct tegra_dc *dc;
+
+ struct nvmap_handle_ref *cur_handle;
+ unsigned bandwidth;
+ unsigned new_bandwidth;
+ struct tegra_dc_lut lut;
+};
+
+#define TEGRA_WIN_PPFLAG_CP_ENABLE (1 << 0) /* enable RGB color lut */
+#define TEGRA_WIN_PPFLAG_CP_FBOVERRIDE (1 << 1) /* override fbdev color lut */
+
+#define TEGRA_WIN_FLAG_ENABLED (1 << 0)
+#define TEGRA_WIN_FLAG_BLEND_PREMULT (1 << 1)
+#define TEGRA_WIN_FLAG_BLEND_COVERAGE (1 << 2)
+#define TEGRA_WIN_FLAG_INVERT_H (1 << 3)
+#define TEGRA_WIN_FLAG_INVERT_V (1 << 4)
+#define TEGRA_WIN_FLAG_TILED (1 << 5)
+#define TEGRA_WIN_FLAG_H_FILTER (1 << 6)
+#define TEGRA_WIN_FLAG_V_FILTER (1 << 7)
+
+
+#define TEGRA_WIN_BLEND_FLAGS_MASK \
+ (TEGRA_WIN_FLAG_BLEND_PREMULT | TEGRA_WIN_FLAG_BLEND_COVERAGE)
+
+/* Note: These are the actual values written to the DC_WIN_COLOR_DEPTH register
+ * and may change in new tegra architectures.
+ */
+#define TEGRA_WIN_FMT_P1 0
+#define TEGRA_WIN_FMT_P2 1
+#define TEGRA_WIN_FMT_P4 2
+#define TEGRA_WIN_FMT_P8 3
+#define TEGRA_WIN_FMT_B4G4R4A4 4
+#define TEGRA_WIN_FMT_B5G5R5A 5
+#define TEGRA_WIN_FMT_B5G6R5 6
+#define TEGRA_WIN_FMT_AB5G5R5 7
+#define TEGRA_WIN_FMT_B8G8R8A8 12
+#define TEGRA_WIN_FMT_R8G8B8A8 13
+#define TEGRA_WIN_FMT_B6x2G6x2R6x2A8 14
+#define TEGRA_WIN_FMT_R6x2G6x2B6x2A8 15
+#define TEGRA_WIN_FMT_YCbCr422 16
+#define TEGRA_WIN_FMT_YUV422 17
+#define TEGRA_WIN_FMT_YCbCr420P 18
+#define TEGRA_WIN_FMT_YUV420P 19
+#define TEGRA_WIN_FMT_YCbCr422P 20
+#define TEGRA_WIN_FMT_YUV422P 21
+#define TEGRA_WIN_FMT_YCbCr422R 22
+#define TEGRA_WIN_FMT_YUV422R 23
+#define TEGRA_WIN_FMT_YCbCr422RA 24
+#define TEGRA_WIN_FMT_YUV422RA 25
+
+struct tegra_fb_data {
+ int win;
+
+ int xres;
+ int yres;
+ int bits_per_pixel; /* -1 means autodetect */
+
+ unsigned long flags;
+};
+
+#define TEGRA_FB_FLIP_ON_PROBE (1 << 0)
+
+struct tegra_dc_platform_data {
+ unsigned long flags;
+ unsigned long emc_clk_rate;
+ struct tegra_dc_out *default_out;
+ struct tegra_fb_data *fb;
+};
+
+#define TEGRA_DC_FLAG_ENABLED (1 << 0)
+
+struct tegra_dc *tegra_dc_get_dc(unsigned idx);
+struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win);
+bool tegra_dc_get_connected(struct tegra_dc *);
+
+void tegra_dc_blank(struct tegra_dc *dc);
+
+void tegra_dc_enable(struct tegra_dc *dc);
+void tegra_dc_disable(struct tegra_dc *dc);
+
+u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc, int i);
+u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc, int i);
+void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, int i, u32 val);
+
+/* tegra_dc_update_windows and tegra_dc_sync_windows do not support windows
+ * with differenct dcs in one call
+ */
+int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n);
+int tegra_dc_sync_windows(struct tegra_dc_win *windows[], int n);
+
+int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode);
+struct fb_videomode;
+int tegra_dc_set_fb_mode(struct tegra_dc *dc, const struct fb_videomode *fbmode,
+ bool stereo_mode);
+
+unsigned tegra_dc_get_out_height(const struct tegra_dc *dc);
+unsigned tegra_dc_get_out_width(const struct tegra_dc *dc);
+unsigned tegra_dc_get_out_max_pixclock(const struct tegra_dc *dc);
+
+/* PM0 and PM1 signal control */
+#define TEGRA_PWM_PM0 0
+#define TEGRA_PWM_PM1 1
+
+struct tegra_dc_pwm_params {
+ int which_pwm;
+ void (*switch_to_sfio)(int);
+ int gpio_conf_to_sfio;
+ unsigned int period;
+ unsigned int clk_div;
+ unsigned int clk_select;
+ unsigned int duty_cycle;
+};
+
+void tegra_dc_config_pwm(struct tegra_dc *dc, struct tegra_dc_pwm_params *cfg);
+
+int tegra_dsi_send_panel_short_cmd(struct tegra_dc *dc, u8 *pdata, u8 data_len);
+
+int tegra_dc_update_csc(struct tegra_dc *dc, int win_index);
+
+int tegra_dc_update_lut(struct tegra_dc *dc, int win_index, int fboveride);
+
+/*
+ * In order to get a dc's current EDID, first call tegra_dc_get_edid() from an
+ * interruptible context. The returned value (if non-NULL) points to a
+ * snapshot of the current state; after copying data from it, call
+ * tegra_dc_put_edid() on that pointer. Do not dereference anything through
+ * that pointer after calling tegra_dc_put_edid().
+ */
+struct tegra_dc_edid {
+ size_t len;
+ u8 buf[0];
+};
+struct tegra_dc_edid *tegra_dc_get_edid(struct tegra_dc *dc);
+void tegra_dc_put_edid(struct tegra_dc_edid *edid);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/delay.h b/arch/arm/mach-tegra/include/mach/delay.h
new file mode 100644
index 000000000000..2defb7b9b658
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/delay.h
@@ -0,0 +1,41 @@
+/*
+ * arch/arm/mach-tegra/include/mach/delay.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __MACH_TEGRA_DELAY_H
+#define __MACH_TEGRA_DELAY_H
+
+/* needed by loops_per_jiffy calculations */
+extern void __delay(int loops);
+
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long usecs);
+
+/* we don't have any restrictions on maximum udelay length, but we'll enforce
+ * the same restriction as the ARM default so we don't introduce any
+ * incompatibilties in drivers.
+ */
+extern void __bad_udelay(void);
+
+#define MAX_UDELAY_MS 2
+
+#define udelay(n) \
+ ((__builtin_constant_p(n) && (n) > (MAX_UDELAY_MS * 1000)) ? \
+ __bad_udelay() : \
+ __udelay(n))
+
+#endif /* defined(__MACH_TEGRA_DELAY_H) */
diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h
index d0132e8031a1..42084b697b7f 100644
--- a/arch/arm/mach-tegra/include/mach/dma.h
+++ b/arch/arm/mach-tegra/include/mach/dma.h
@@ -1,7 +1,7 @@
/*
* arch/arm/mach-tegra/include/mach/dma.h
*
- * Copyright (c) 2008-2009, NVIDIA Corporation.
+ * Copyright (c) 2008-2010, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,9 +30,13 @@ struct tegra_dma_channel;
#define TEGRA_DMA_REQ_SEL_CNTR 0
#define TEGRA_DMA_REQ_SEL_I2S_2 1
+#define TEGRA_DMA_REQ_SEL_APBIF_CH0 TEGRA_DMA_REQ_SEL_I2S_2
#define TEGRA_DMA_REQ_SEL_I2S_1 2
+#define TEGRA_DMA_REQ_SEL_APBIF_CH1 TEGRA_DMA_REQ_SEL_I2S_1
#define TEGRA_DMA_REQ_SEL_SPD_I 3
+#define TEGRA_DMA_REQ_SEL_APBIF_CH2 TEGRA_DMA_REQ_SEL_SPD_I
#define TEGRA_DMA_REQ_SEL_UI_I 4
+#define TEGRA_DMA_REQ_SEL_APBIF_CH3 TEGRA_DMA_REQ_SEL_UI_I
#define TEGRA_DMA_REQ_SEL_MIPI 5
#define TEGRA_DMA_REQ_SEL_I2S2_2 6
#define TEGRA_DMA_REQ_SEL_I2S2_1 7
@@ -40,6 +44,7 @@ struct tegra_dma_channel;
#define TEGRA_DMA_REQ_SEL_UARTB 9
#define TEGRA_DMA_REQ_SEL_UARTC 10
#define TEGRA_DMA_REQ_SEL_SPI 11
+#define TEGRA_DMA_REQ_SEL_DTV TEGRA_DMA_REQ_SEL_SPI
#define TEGRA_DMA_REQ_SEL_AC97 12
#define TEGRA_DMA_REQ_SEL_ACMODEM 13
#define TEGRA_DMA_REQ_SEL_SL4B 14
@@ -54,12 +59,20 @@ struct tegra_dma_channel;
#define TEGRA_DMA_REQ_SEL_I2C3 23
#define TEGRA_DMA_REQ_SEL_DVC_I2C 24
#define TEGRA_DMA_REQ_SEL_OWR 25
+#define TEGRA_DMA_REQ_SEL_OWR 25
+#define TEGRA_DMA_REQ_SEL_I2C4 26
+#define TEGRA_DMA_REQ_SEL_SL2B5 27
+#define TEGRA_DMA_REQ_SEL_SL2B6 28
#define TEGRA_DMA_REQ_SEL_INVALID 31
+#define TEGRA_DMA_MAX_TRANSFER_SIZE 0x10000
+
enum tegra_dma_mode {
TEGRA_DMA_SHARED = 1,
- TEGRA_DMA_MODE_CONTINOUS = 2,
- TEGRA_DMA_MODE_ONESHOT = 4,
+ TEGRA_DMA_MODE_CONTINUOUS = 2,
+ TEGRA_DMA_MODE_CONTINUOUS_DOUBLE = TEGRA_DMA_MODE_CONTINUOUS,
+ TEGRA_DMA_MODE_CONTINUOUS_SINGLE = 4,
+ TEGRA_DMA_MODE_ONESHOT = 8,
};
enum tegra_dma_req_error {
@@ -143,13 +156,23 @@ void tegra_dma_flush(struct tegra_dma_channel *ch);
bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
+int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, bool is_stop_dma);
bool tegra_dma_is_empty(struct tegra_dma_channel *ch);
+bool tegra_dma_is_stopped(struct tegra_dma_channel *ch);
-struct tegra_dma_channel *tegra_dma_allocate_channel(int mode);
+struct tegra_dma_channel *tegra_dma_allocate_channel(int mode, const char namefmt [ ],...);
void tegra_dma_free_channel(struct tegra_dma_channel *ch);
+int tegra_dma_cancel(struct tegra_dma_channel *ch);
int __init tegra_dma_init(void);
+#else /* !defined(CONFIG_TEGRA_SYSTEM_DMA) */
+static inline int tegra_dma_init(void)
+{
+ return 0;
+}
+
#endif
#endif
diff --git a/arch/arm/mach-tegra/include/mach/edp.h b/arch/arm/mach-tegra/include/mach/edp.h
new file mode 100644
index 000000000000..48321cae4959
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/edp.h
@@ -0,0 +1,80 @@
+/*
+ * arch/arm/mach-tegra/include/mach/edp.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_EDP_H
+#define __MACH_EDP_H
+
+#include <linux/debugfs.h>
+
+struct tegra_edp_entry {
+ char speedo_id;
+ char regulator_100mA;
+ char temperature;
+ char freq_limits[4];
+};
+
+struct tegra_edp_limits {
+ int temperature;
+ unsigned int freq_limits[4];
+};
+
+struct system_edp_entry {
+ char speedo_id;
+ char power_limit_100mW;
+ char freq_limits[4];
+};
+
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+
+
+int tegra_edp_update_thermal_zone(int temperature);
+void tegra_init_cpu_edp_limits(unsigned int regulator_mA);
+void tegra_init_system_edp_limits(unsigned int power_limit_mW);
+void tegra_get_cpu_edp_limits(const struct tegra_edp_limits **limits, int *size);
+unsigned int tegra_get_edp_limit(void);
+void tegra_get_system_edp_limits(const unsigned int **limits);
+int tegra_system_edp_alarm(bool alarm);
+
+#else
+static inline void tegra_init_cpu_edp_limits(int regulator_mA)
+{}
+static inline void tegra_init_system_edp_limits(int power_limit_mW)
+{}
+static inline int tegra_edp_update_thermal_zone(int temperature)
+{ return -1; }
+static inline void tegra_get_cpu_edp_limits(struct tegra_edp_limits **limits,
+ int *size)
+{}
+static inline unsigned int tegra_get_edp_limit(void)
+{ return -1; }
+static inline void tegra_get_system_edp_limits(unsigned int **limits)
+{}
+static inline int tegra_system_edp_alarm(bool alarm)
+{ return -1; }
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static inline void tegra_edp_throttle_cpu_now(u8 factor)
+{}
+#else
+void tegra_edp_throttle_cpu_now(u8 factor);
+#endif
+
+#endif /* __MACH_EDP_H */
diff --git a/arch/arm/mach-tegra/include/mach/entry-macro.S b/arch/arm/mach-tegra/include/mach/entry-macro.S
index dd165c53889d..50d1b212da2b 100644
--- a/arch/arm/mach-tegra/include/mach/entry-macro.S
+++ b/arch/arm/mach-tegra/include/mach/entry-macro.S
@@ -20,7 +20,7 @@
#include <asm/hardware/entry-macro-gic.S>
/* Uses the GIC interrupt controller built into the cpu */
-#define ICTRL_BASE (IO_CPU_VIRT + 0x100)
+#define ICTRL_BASE (IO_CPU_VIRT + 0x40100)
.macro disable_fiq
.endm
@@ -33,24 +33,5 @@
.macro arch_ret_to_user, tmp1, tmp2
.endm
#else
- /* legacy interrupt controller for AP16 */
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- @ enable imprecise aborts
- cpsie a
- @ EVP base at 0xf010f000
- mov \base, #0xf0000000
- orr \base, #0x00100000
- orr \base, #0x0000f000
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqnr, [\base, #0x20] @ EVT_IRQ_STS
- cmp \irqnr, #0x80
- .endm
+#error "Unsupported configuration"
#endif
diff --git a/arch/arm/mach-tegra/include/mach/fb.h b/arch/arm/mach-tegra/include/mach/fb.h
new file mode 100644
index 000000000000..ced6f9c2cb44
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/fb.h
@@ -0,0 +1,61 @@
+/*
+ * arch/arm/mach-tegra/include/mach/fb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_FB_H
+#define __MACH_TEGRA_FB_H
+
+#include <linux/fb.h>
+
+struct nvhost_device;
+struct tegra_dc;
+struct tegra_fb_data;
+struct tegra_fb_info;
+struct resource;
+
+#ifdef CONFIG_FB_TEGRA
+struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc,
+ struct tegra_fb_data *fb_data,
+ struct resource *fb_mem);
+void tegra_fb_unregister(struct tegra_fb_info *fb_info);
+void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+ struct fb_monspecs *specs,
+ bool (*mode_filter)(const struct tegra_dc *dc,
+ struct fb_videomode *mode));
+#else
+static inline struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc,
+ struct tegra_fb_data *fb_data,
+ struct resource *fb_mem)
+{
+ return NULL;
+}
+
+static inline void tegra_fb_unregister(struct tegra_fb_info *fb_info)
+{
+}
+
+static inline void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+ struct fb_monspecs *specs,
+ bool (*mode_filter)(struct fb_videomode *mode))
+{
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/fiq.h b/arch/arm/mach-tegra/include/mach/fiq.h
new file mode 100644
index 000000000000..17625facf627
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/fiq.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_TEGRA_FIQ_H
+#define __ASM_ARCH_TEGRA_FIQ_H
+
+/* enable/disable an interrupt that is an FIQ (safe from FIQ context?) */
+void tegra_fiq_enable(int n);
+void tegra_fiq_disable(int n);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/gpio.h b/arch/arm/mach-tegra/include/mach/gpio.h
index 196f114dc241..b7357ab0c4dd 100644
--- a/arch/arm/mach-tegra/include/mach/gpio.h
+++ b/arch/arm/mach-tegra/include/mach/gpio.h
@@ -24,8 +24,18 @@
#include <mach/irqs.h>
#define TEGRA_NR_GPIOS INT_GPIO_NR
+#define ARCH_NR_GPIOS (TEGRA_NR_GPIOS + 128)
#include <asm-generic/gpio.h>
+#include "pinmux.h"
+
+struct gpio_init_pin_info {
+ char name[16];
+ int gpio_nr;
+ bool is_gpio;
+ bool is_input;
+ int value; /* Value if it is output*/
+};
#define gpio_get_value __gpio_get_value
#define gpio_set_value __gpio_set_value
@@ -36,16 +46,23 @@
static inline int gpio_to_irq(unsigned int gpio)
{
+ /* SOC gpio */
if (gpio < TEGRA_NR_GPIOS)
return INT_GPIO_BASE + gpio;
- return -EINVAL;
+
+ /* For non soc gpio, the external peripheral driver need to
+ * provide the implementation */
+ return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
{
+ /* SOC gpio */
if ((irq >= INT_GPIO_BASE) && (irq < INT_GPIO_BASE + INT_GPIO_NR))
return irq - INT_GPIO_BASE;
- return -EINVAL;
+
+ /* we don't supply reverse mappings for non-SOC gpios */
+ return -EIO;
}
struct tegra_gpio_table {
@@ -56,5 +73,8 @@ struct tegra_gpio_table {
void tegra_gpio_config(struct tegra_gpio_table *table, int num);
void tegra_gpio_enable(int gpio);
void tegra_gpio_disable(int gpio);
-
+int tegra_gpio_resume_init(void);
+void tegra_gpio_init_configure(unsigned gpio, bool is_input, int value);
+void tegra_gpio_set_tristate(int gpio, enum tegra_tristate ts);
+int tegra_gpio_get_bank_int_nr(int gpio);
#endif
diff --git a/arch/arm/mach-tegra/include/mach/gpufuse.h b/arch/arm/mach-tegra/include/mach/gpufuse.h
new file mode 100644
index 000000000000..4aa6cb66d5d9
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/gpufuse.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-tegra/include/mach/gpufuse.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Number of register sets to handle in host context switching */
+int tegra_gpu_register_sets(void);
+
diff --git a/arch/arm/mach-tegra/include/mach/hardware.h b/arch/arm/mach-tegra/include/mach/hardware.h
new file mode 100644
index 000000000000..da0bc95eec5b
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/hardware.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corp.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MACH_TEGRA_HARDWARE_H
+#define MACH_TEGRA_HARDWARE_H
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0
+#define pcibios_assign_all_busses() 1
+
+#else
+
+#define PCIBIOS_MIN_IO 0x03000000ul
+#define PCIBIOS_MIN_MEM 0x10000000ul
+#define pcibios_assign_all_busses() 0
+#endif
+
+enum tegra_chipid {
+ TEGRA_CHIPID_UNKNOWN = 0,
+ TEGRA_CHIPID_TEGRA2 = 0x20,
+ TEGRA_CHIPID_TEGRA3 = 0x30,
+};
+
+enum tegra_revision {
+ TEGRA_REVISION_UNKNOWN = 0,
+ TEGRA_REVISION_A01,
+ TEGRA_REVISION_A02,
+ TEGRA_REVISION_A03,
+ TEGRA_REVISION_A03p,
+ TEGRA_REVISION_MAX,
+};
+
+enum tegra_chipid tegra_get_chipid(void);
+enum tegra_revision tegra_get_revision(void);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/hdmi-audio.h b/arch/arm/mach-tegra/include/mach/hdmi-audio.h
new file mode 100644
index 000000000000..7d760690081a
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/hdmi-audio.h
@@ -0,0 +1,46 @@
+/*
+ * arch/arm/mach-tegra/include/mach/hdmi-audio.h
+ *
+ * Copyright (c) 2008-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_HDMI_AUDIO_H
+#define __MACH_TEGRA_HDMI_AUDIO_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+enum {
+ AUDIO_FREQ_32K = 32000,
+ AUDIO_FREQ_44_1K = 44100,
+ AUDIO_FREQ_48K = 48000,
+ AUDIO_FREQ_88_2K = 88200,
+ AUDIO_FREQ_96K = 96000,
+ AUDIO_FREQ_176_4K = 176400,
+ AUDIO_FREQ_192K = 192000,
+};
+
+enum {
+ AUTO = 0,
+ SPDIF,
+ HDA,
+};
+
+int tegra_hdmi_setup_audio_freq_source(unsigned audio_freq, unsigned audio_source);
+int tegra_hdmi_setup_hda_presence(void);
+
+#endif /* __MACH_TEGRA_HDMI_AUDIO_H */
diff --git a/arch/arm/mach-tegra/include/mach/i2s.h b/arch/arm/mach-tegra/include/mach/i2s.h
new file mode 100644
index 000000000000..42cce885cdac
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/i2s.h
@@ -0,0 +1,316 @@
+/*
+ * arch/arm/mach-tegra/include/mach/i2s.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_TEGRA_I2S_H
+#define __ARCH_ARM_MACH_TEGRA_I2S_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+/* Offsets from TEGRA_I2S1_BASE and TEGRA_I2S2_BASE */
+
+#define I2S_I2S_CTRL_0 0
+#define I2S_I2S_STATUS_0 4
+#define I2S_I2S_TIMING_0 8
+#define I2S_I2S_FIFO_SCR_0 0x0c
+#define I2S_I2S_PCM_CTRL_0 0x10
+#define I2S_I2S_NW_CTRL_0 0x14
+#define I2S_I2S_TDM_CTRL_0 0x20
+#define I2S_I2S_TDM_TX_RX_CTRL_0 0x24
+#define I2S_I2S_FIFO1_0 0x40
+#define I2S_I2S_FIFO2_0 0x80
+
+/*
+ * I2S_I2S_CTRL_0
+ */
+
+#define I2S_I2S_CTRL_FIFO2_TX_ENABLE (1<<30)
+#define I2S_I2S_CTRL_FIFO1_ENABLE (1<<29)
+#define I2S_I2S_CTRL_FIFO2_ENABLE (1<<28)
+#define I2S_I2S_CTRL_FIFO1_RX_ENABLE (1<<27)
+#define I2S_I2S_CTRL_FIFO_LPBK_ENABLE (1<<26)
+#define I2S_I2S_CTRL_MASTER_ENABLE (1<<25)
+#define I2S_I2S_CTRL_L_R_CTRL (1<<24) /* 0 = L/R: low/high */
+
+#define I2S_BIT_FORMAT_I2S 0
+#define I2S_BIT_FORMAT_RJM 1
+#define I2S_BIT_FORMAT_LJM 2
+#define I2S_BIT_FORMAT_DSP 3
+#define I2S_BIT_FORMAT_SHIFT 10
+
+#define I2S_I2S_CTRL_BIT_FORMAT_MASK (3<<10)
+#define I2S_I2S_CTRL_BIT_FORMAT_I2S (I2S_BIT_FORMAT_I2S<<10)
+#define I2S_I2S_CTRL_BIT_FORMAT_RJM (I2S_BIT_FORMAT_RJM<<10)
+#define I2S_I2S_CTRL_BIT_FORMAT_LJM (I2S_BIT_FORMAT_LJM<<10)
+#define I2S_I2S_CTRL_BIT_FORMAT_DSP (I2S_BIT_FORMAT_DSP<<10)
+
+#define I2S_BIT_SIZE_16 0
+#define I2S_BIT_SIZE_20 1
+#define I2S_BIT_SIZE_24 2
+#define I2S_BIT_SIZE_32 3
+#define I2S_BIT_SIZE_SHIFT 8
+
+#define I2S_I2S_CTRL_BIT_SIZE_MASK (3 << I2S_BIT_SIZE_SHIFT)
+#define I2S_I2S_CTRL_BIT_SIZE_16 (I2S_BIT_SIZE_16 << I2S_BIT_SIZE_SHIFT)
+#define I2S_I2S_CTRL_BIT_SIZE_20 (I2S_BIT_SIZE_20 << I2S_BIT_SIZE_SHIFT)
+#define I2S_I2S_CTRL_BIT_SIZE_24 (I2S_BIT_SIZE_24 << I2S_BIT_SIZE_SHIFT)
+#define I2S_I2S_CTRL_BIT_SIZE_32 (I2S_BIT_SIZE_32 << I2S_BIT_SIZE_SHIFT)
+
+#define I2S_FIFO_16_LSB 0
+#define I2S_FIFO_20_LSB 1
+#define I2S_FIFO_24_LSB 2
+#define I2S_FIFO_32 3
+#define I2S_FIFO_PACKED 7
+#define I2S_FIFO_SHIFT 4
+
+#define I2S_I2S_CTRL_FIFO_FORMAT_MASK (7<<4)
+#define I2S_I2S_CTRL_FIFO_FORMAT_16_LSB \
+ (I2S_FIFO_16_LSB << I2S_FIFO_SHIFT)
+#define I2S_I2S_CTRL_FIFO_FORMAT_20_LSB \
+ (I2S_FIFO_20_LSB << I2S_FIFO_SHIFT)
+#define I2S_I2S_CTRL_FIFO_FORMAT_24_LSB \
+ (I2S_FIFO_24_LSB << I2S_FIFO_SHIFT)
+#define I2S_I2S_CTRL_FIFO_FORMAT_32 \
+ (I2S_FIFO_32 << I2S_FIFO_SHIFT)
+#define I2S_I2S_CTRL_FIFO_FORMAT_PACKED \
+ (I2S_FIFO_PACKED << I2S_FIFO_SHIFT)
+
+#define I2S_I2S_IE_FIFO1_ERR (1<<3)
+#define I2S_I2S_IE_FIFO2_ERR (1<<2)
+#define I2S_I2S_QE_FIFO1 (1<<1)
+#define I2S_I2S_QE_FIFO2 (1<<0)
+
+/*
+ * I2S_I2S_STATUS_0
+ */
+
+#define I2S_I2S_STATUS_FIFO1_RDY (1<<31)
+#define I2S_I2S_STATUS_FIFO2_RDY (1<<30)
+#define I2S_I2S_STATUS_FIFO1_BSY (1<<29)
+#define I2S_I2S_STATUS_FIFO2_BSY (1<<28)
+#define I2S_I2S_STATUS_FIFO1_ERR (1<<3)
+#define I2S_I2S_STATUS_FIFO2_ERR (1<<2)
+#define I2S_I2S_STATUS_QS_FIFO1 (1<<1)
+#define I2S_I2S_STATUS_QS_FIFO2 (1<<0)
+
+/*
+ * I2S_I2S_TIMING_0
+ */
+
+#define I2S_I2S_TIMING_NON_SYM_ENABLE (1<<12)
+#define I2S_I2S_TIMING_CHANNEL_BIT_COUNT_MASK 0x7ff
+#define I2S_I2S_TIMING_CHANNEL_BIT_COUNT (1<<0)
+
+/*
+ * I2S_I2S_FIFO_SCR_0
+ */
+
+#define I2S_I2S_FIFO_SCR_FIFO_FULL_EMPTY_COUNT_MASK 0x3f
+#define I2S_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_SHIFT 24
+#define I2S_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_SHIFT 16
+
+#define I2S_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_MASK (0x3f<<24)
+#define I2S_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_MASK (0x3f<<16)
+
+#define I2S_I2S_FIFO_SCR_FIFO2_CLR (1<<12)
+#define I2S_I2S_FIFO_SCR_FIFO1_CLR (1<<8)
+
+#define I2S_FIFO_ATN_LVL_ONE_SLOT 0
+#define I2S_FIFO_ATN_LVL_FOUR_SLOTS 1
+#define I2S_FIFO_ATN_LVL_EIGHT_SLOTS 2
+#define I2S_FIFO_ATN_LVL_TWELVE_SLOTS 3
+#define I2S_FIFO2_ATN_LVL_SHIFT 4
+#define I2S_FIFO1_ATN_LVL_SHIFT 0
+
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_MASK \
+ (3 << I2S_FIFO2_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_ONE_SLOT \
+ (I2S_FIFO_ATN_LVL_ONE_SLOT << I2S_FIFO2_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS \
+ (I2S_FIFO_ATN_LVL_FOUR_SLOTS << I2S_FIFO2_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_EIGHT_SLOTS \
+ (I2S_FIFO_ATN_LVL_EIGHT_SLOTS << I2S_FIFO2_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_TWELVE_SLOTS \
+ (I2S_FIFO_ATN_LVL_TWELVE_SLOTS << I2S_FIFO2_ATN_LVL_SHIFT)
+
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_MASK \
+ (3 << I2S_FIFO1_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_ONE_SLOT \
+ (I2S_FIFO_ATN_LVL_ONE_SLOT << I2S_FIFO1_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS \
+ (I2S_FIFO_ATN_LVL_FOUR_SLOTS << I2S_FIFO1_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_EIGHT_SLOTS \
+ (I2S_FIFO_ATN_LVL_EIGHT_SLOTS << I2S_FIFO1_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_TWELVE_SLOTS \
+ (I2S_FIFO_ATN_LVL_TWELVE_SLOTS << I2S_FIFO1_ATN_LVL_SHIFT)
+/*
+ * I2S_I2S_PCM_CTRL_0
+ */
+#define I2S_PCM_TRM_EDGE_POS_EDGE_NO_HIGHZ 0
+#define I2S_PCM_TRM_EDGE_POS_EDGE_HIGHZ 1
+#define I2S_PCM_TRM_EDGE_NEG_EDGE_NO_HIGHZ 2
+#define I2S_PCM_TRM_EDGE_NEG_EDGE_HIGHZ 3
+#define I2S_PCM_TRM_EDGE_CTRL_SHIFT 9
+
+#define I2S_I2S_PCM_TRM_EDGE_CTRL_MASK \
+ (3 << I2S_I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+#define I2S_I2S_PCM_TRM_EDGE_POS_EDGE_NO_HIGHZ \
+ (I2S_PCM_TRM_EDGE_POS_EDGE_HIGHZ \
+ << I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+#define I2S_I2S_PCM_TRM_EDGE_POS_EDGE_HIGHZ \
+ (I2S_PCM_TRM_EDGE_POS_EDGE_NO_HIGHZ \
+ << I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+#define I2S_I2S_PCM_TRM_EDGE_NEG_EDGE_NO_HIGHZ \
+ (I2S_PCM_TRM_EDGE_NEG_EDGE_NO_HIGHZ \
+ << I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+#define I2S_I2S_PCM_TRM_EDGE_NEG_EDGE_HIGHZ \
+ (I2S_PCM_TRM_EDGE_NEG_EDGE_HIGHZ \
+ << I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+
+#define I2S_PCM_TRM_MASK_BITS_ZERO 0
+#define I2S_PCM_TRM_MASK_BITS_ONE 1
+#define I2S_PCM_TRM_MASK_BITS_TWO 2
+#define I2S_PCM_TRM_MASK_BITS_THREE 3
+#define I2S_PCM_TRM_MASK_BITS_FOUR 4
+#define I2S_PCM_TRM_MASK_BITS_FIVE 5
+#define I2S_PCM_TRM_MASK_BITS_SIX 6
+#define I2S_PCM_TRM_MASK_BITS_SEVEN 7
+#define I2S_PCM_TRM_MASK_BITS_SHIFT 6
+
+#define I2S_I2S_PCM_TRM_MASK_BITS_MASK \
+ (7 << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_ZERO \
+ (I2S_PCM_TRM_MASK_BITS_ZERO \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_ONE \
+ (I2S_PCM_TRM_MASK_BITS_ONE \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_TWO \
+ (I2S_PCM_TRM_MASK_BITS_TWO \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_THREE \
+ (I2S_PCM_TRM_MASK_BITS_THREE \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_FOUR \
+ (I2S_PCM_TRM_MASK_BITS_FOUR \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_FIVE \
+ (I2S_PCM_TRM_MASK_BITS_FIVE \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_SIX \
+ (I2S_PCM_TRM_MASK_BITS_SIX \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_SEVEN \
+ (I2S_PCM_TRM_MASK_BITS_SEVEN \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+
+#define I2S_I2S_PCM_CTRL_FSYNC_PCM_CTRL (1<<5)
+#define I2S_I2S_PCM_CTRL_TRM_MODE (1<<4)
+
+#define I2S_PCM_RCV_MASK_BITS_ZERO 0
+#define I2S_PCM_RCV_MASK_BITS_ONE 1
+#define I2S_PCM_RCV_MASK_BITS_TWO 2
+#define I2S_PCM_RCV_MASK_BITS_THREE 3
+#define I2S_PCM_RCV_MASK_BITS_FOUR 4
+#define I2S_PCM_RCV_MASK_BITS_FIVE 5
+#define I2S_PCM_RCV_MASK_BITS_SIX 6
+#define I2S_PCM_RCV_MASK_BITS_SEVEN 7
+#define I2S_PCM_RCV_MASK_BITS_SHIFT 1
+
+#define I2S_I2S_PCM_RCV_MASK_BITS_MASK \
+ (7 << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_ZERO \
+ (I2S_PCM_RCV_MASK_BITS_ZERO \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_ONE \
+ (I2S_PCM_RCV_MASK_BITS_ONE \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_TWO \
+ (I2S_PCM_RCV_MASK_BITS_TWO \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_THREE \
+ (I2S_PCM_RCV_MASK_BITS_THREE \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_FOUR \
+ (I2S_PCM_RCV_MASK_BITS_FOUR \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_FIVE \
+ (I2S_PCM_RCV_MASK_BITS_FIVE \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_SIX \
+ (I2S_PCM_RCV_MASK_BITS_SIX \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_SEVEN \
+ (I2S_PCM_RCV_MASK_BITS_SEVEN \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+
+#define I2S_I2S_PCM_CTRL_RCV_MODE (1<<0)
+
+/*
+ * I2S_I2S_NW_CTRL_0
+ */
+
+#define I2S_TRM_TLPHY_SLOT_SEL_SLOT1 0
+#define I2S_TRM_TLPHY_SLOT_SEL_SLOT2 1
+#define I2S_TRM_TLPHY_SLOT_SEL_SLOT3 2
+#define I2S_TRM_TLPHY_SLOT_SEL_SLOT4 3
+#define I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT 4
+
+#define I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_MASK \
+ (3 << I2S_TRM_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_TRM_TLPHY_SLOT_SEL_SLOT1 \
+ (I2S_TRM_TLPHY_SLOT_SEL_SLOT1 \
+ << I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_TRM_TLPHY_SLOT_SEL_SLOT2 \
+ (I2S_TRM_TLPHY_SLOT_SEL_SLOT2 \
+ << I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_TRM_TLPHY_SLOT_SEL_SLOT3 \
+ (I2S_TRM_TLPHY_SLOT_SEL_SLOT3 \
+ << I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_TRM_TLPHY_SLOT_SEL_SLOT4 \
+ (I2S_TRM_TLPHY_SLOT_SEL_SLOT4 \
+ << I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT)
+
+#define I2S_I2S_NW_CTRL_TRM_TLPHY_MODE (1<<3)
+
+#define I2S_RCV_TLPHY_SLOT_SEL_SLOT1 0
+#define I2S_RCV_TLPHY_SLOT_SEL_SLOT2 1
+#define I2S_RCV_TLPHY_SLOT_SEL_SLOT3 2
+#define I2S_RCV_TLPHY_SLOT_SEL_SLOT4 3
+#define I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT 1
+
+#define I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_MASK \
+ (3 << I2S_RCV_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_RCV_TLPHY_SLOT_SEL_SLOT1 \
+ (I2S_RCV_TLPHY_SLOT_SEL_SLOT1 \
+ << I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_RCV_TLPHY_SLOT_SEL_SLOT2 \
+ (I2S_RCV_TLPHY_SLOT_SEL_SLOT2 \
+ << I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_RCV_TLPHY_SLOT_SEL_SLOT3 \
+ (I2S_RCV_TLPHY_SLOT_SEL_SLOT3 \
+ << I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_RCV_TLPHY_SLOT_SEL_SLOT4 \
+ (I2S_RCV_TLPHY_SLOT_SEL_SLOT4 \
+ << I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT)
+
+#define I2S_I2S_NW_CTRL_RCV_TLPHY_MODE (1<<0)
+
+#endif /* __ARCH_ARM_MACH_TEGRA_I2S_H */
diff --git a/arch/arm/mach-tegra/include/mach/io.h b/arch/arm/mach-tegra/include/mach/io.h
index 4cea2230c8dc..0cc4982969b9 100644
--- a/arch/arm/mach-tegra/include/mach/io.h
+++ b/arch/arm/mach-tegra/include/mach/io.h
@@ -2,6 +2,7 @@
* arch/arm/mach-tegra/include/mach/io.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -21,7 +22,11 @@
#ifndef __MACH_TEGRA_IO_H
#define __MACH_TEGRA_IO_H
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
#define IO_SPACE_LIMIT 0xffff
+#else
+#define IO_SPACE_LIMIT 0xffffffff
+#endif
/* On TEGRA, many peripherals are very closely packed in
* two 256MB io windows (that actually only use about 64KB
@@ -37,9 +42,9 @@
#define IO_IRAM_VIRT 0xFE400000
#define IO_IRAM_SIZE SZ_256K
-#define IO_CPU_PHYS 0x50040000
-#define IO_CPU_VIRT 0xFE000000
-#define IO_CPU_SIZE SZ_16K
+#define IO_CPU_PHYS 0x50000000
+#define IO_CPU_VIRT 0xFE000000
+#define IO_CPU_SIZE SZ_1M
#define IO_PPSB_PHYS 0x60000000
#define IO_PPSB_VIRT 0xFE200000
@@ -49,6 +54,26 @@
#define IO_APB_VIRT 0xFE300000
#define IO_APB_SIZE SZ_1M
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define IO_USB_PHYS 0xC5000000
+#else
+#define IO_USB_PHYS 0x7D000000
+#endif
+#define IO_USB_VIRT 0xFE500000
+#define IO_USB_SIZE SZ_1M
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define IO_SDMMC_PHYS 0xC8000000
+#else
+#define IO_SDMMC_PHYS 0x78000000
+#endif
+#define IO_SDMMC_VIRT 0xFE600000
+#define IO_SDMMC_SIZE SZ_1M
+
+#define IO_HOST1X_PHYS 0x54000000
+#define IO_HOST1X_VIRT 0xFE700000
+#define IO_HOST1X_SIZE SZ_4M
+
#define IO_TO_VIRT_BETWEEN(p, st, sz) ((p) >= (st) && (p) < ((st) + (sz)))
#define IO_TO_VIRT_XLATE(p, pst, vst) (((p) - (pst) + (vst)))
@@ -61,6 +86,12 @@
IO_TO_VIRT_XLATE((n), IO_CPU_PHYS, IO_CPU_VIRT) : \
IO_TO_VIRT_BETWEEN((n), IO_IRAM_PHYS, IO_IRAM_SIZE) ? \
IO_TO_VIRT_XLATE((n), IO_IRAM_PHYS, IO_IRAM_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_HOST1X_PHYS, IO_HOST1X_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_HOST1X_PHYS, IO_HOST1X_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_USB_PHYS, IO_USB_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_USB_PHYS, IO_USB_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_SDMMC_PHYS, IO_SDMMC_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_SDMMC_PHYS, IO_SDMMC_VIRT) : \
0)
#ifndef __ASSEMBLER__
@@ -73,7 +104,7 @@ void tegra_iounmap(volatile void __iomem *addr);
#define IO_ADDRESS(n) ((void __iomem *) IO_TO_VIRT(n))
-#ifdef CONFIG_TEGRA_PCI
+#if (defined(CONFIG_TEGRA_PCI) && defined(CONFIG_ARCH_TEGRA_2x_SOC))
extern void __iomem *tegra_pcie_io_base;
static inline void __iomem *__io(unsigned long addr)
diff --git a/arch/arm/mach-tegra/include/mach/io_dpd.h b/arch/arm/mach-tegra/include/mach/io_dpd.h
new file mode 100644
index 000000000000..16385b463d77
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/io_dpd.h
@@ -0,0 +1,25 @@
+/*
+ * arch/arm/mach-tegra/include/mach/io_dpd.h
+ *
+ * Copyright (C) 2012 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_IO_DPD_H
+#define __MACH_TEGRA_IO_DPD_H
+
+/* Tegra io dpd APIs */
+struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev); /* get handle */
+void tegra_io_dpd_enable(struct tegra_io_dpd *hnd); /* enable dpd */
+void tegra_io_dpd_disable(struct tegra_io_dpd *hnd); /* disable dpd */
+
+#endif /* end __MACH_TEGRA_IO_DPD_H */
diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h
index 19dec3ac0854..c491abafb8b9 100644
--- a/arch/arm/mach-tegra/include/mach/iomap.h
+++ b/arch/arm/mach-tegra/include/mach/iomap.h
@@ -2,11 +2,14 @@
* arch/arm/mach-tegra/include/mach/iomap.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Colin Cross <ccross@google.com>
* Erik Gilling <konkers@google.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -23,15 +26,38 @@
#include <asm/sizes.h>
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define TEGRA_NOR_FLASH_BASE 0xD0000000
+#define TEGRA_NOR_FLASH_SIZE SZ_256M
+#else
+#define TEGRA_NOR_FLASH_BASE 0x48000000
+#define TEGRA_NOR_FLASH_SIZE SZ_128M
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define TEGRA_DRAM_BASE 0x00000000
+#define TEGRA_DRAM_SIZE SZ_1G /* Maximum size */
+#else
+#define TEGRA_DRAM_BASE 0x80000000
+#define TEGRA_DRAM_SIZE (SZ_2G - SZ_1M) /* Maximum size */
+#endif
+
#define TEGRA_IRAM_BASE 0x40000000
#define TEGRA_IRAM_SIZE SZ_256K
+/* First 1K of IRAM is reserved for cpu reset handler. */
+#define TEGRA_RESET_HANDLER_BASE TEGRA_IRAM_BASE
+#define TEGRA_RESET_HANDLER_SIZE SZ_1K
+
#define TEGRA_HOST1X_BASE 0x50000000
#define TEGRA_HOST1X_SIZE 0x24000
#define TEGRA_ARM_PERIF_BASE 0x50040000
#define TEGRA_ARM_PERIF_SIZE SZ_8K
+#define TEGRA_MSELECT_BASE 0x50042000
+#define TEGRA_MSELECT_SIZE 80
+
#define TEGRA_ARM_PL310_BASE 0x50043000
#define TEGRA_ARM_PL310_SIZE SZ_4K
@@ -56,23 +82,53 @@
#define TEGRA_HDMI_BASE 0x54280000
#define TEGRA_HDMI_SIZE SZ_256K
+#define TEGRA_DSI_BASE 0x54300000
+#define TEGRA_DSI_SIZE SZ_256K
+
+#define TEGRA_DSIB_BASE 0x54400000
+#define TEGRA_DSIB_SIZE SZ_256K
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
#define TEGRA_GART_BASE 0x58000000
#define TEGRA_GART_SIZE SZ_32M
-#define TEGRA_RES_SEMA_BASE 0x60001000
+#else
+
+#define TEGRA_SMMU_BASE_TEGRA3_A01 0xe0000000
+#define TEGRA_SMMU_SIZE_TEGRA3_A01 SZ_256M
+#define TEGRA_SMMU_BASE 0x00001000
+#define TEGRA_SMMU_SIZE (SZ_1G - SZ_4K * 2)
+
+#endif
+
#define TEGRA_RES_SEMA_SIZE SZ_4K
+#define TEGRA_RES_SEMA_BASE 0x60001000
+
+#define TEGRA_ARB_SEMA_BASE 0x60002000
+#define TEGRA_ARB_SEMA_SIZE SZ_4K
#define TEGRA_PRIMARY_ICTLR_BASE 0x60004000
-#define TEGRA_PRIMARY_ICTLR_SIZE SZ_64
+#define TEGRA_PRIMARY_ICTLR_SIZE 64
+
+#define TEGRA_ARBGNT_ICTLR_BASE 0x60004040
+#define TEGRA_ARBGNT_ICTLR_SIZE 192
#define TEGRA_SECONDARY_ICTLR_BASE 0x60004100
-#define TEGRA_SECONDARY_ICTLR_SIZE SZ_64
+#define TEGRA_SECONDARY_ICTLR_SIZE 64
#define TEGRA_TERTIARY_ICTLR_BASE 0x60004200
-#define TEGRA_TERTIARY_ICTLR_SIZE SZ_64
+#define TEGRA_TERTIARY_ICTLR_SIZE 64
#define TEGRA_QUATERNARY_ICTLR_BASE 0x60004300
-#define TEGRA_QUATERNARY_ICTLR_SIZE SZ_64
+#define TEGRA_QUATERNARY_ICTLR_SIZE 64
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+
+#define TEGRA_QUINARY_ICTLR_BASE 0x60004400
+#define TEGRA_QUINARY_ICTLR_SIZE SZ_64
+
+#endif
#define TEGRA_TMR1_BASE 0x60005000
#define TEGRA_TMR1_SIZE SZ_8
@@ -81,7 +137,7 @@
#define TEGRA_TMR2_SIZE SZ_8
#define TEGRA_TMRUS_BASE 0x60005010
-#define TEGRA_TMRUS_SIZE SZ_64
+#define TEGRA_TMRUS_SIZE 64
#define TEGRA_TMR3_BASE 0x60005050
#define TEGRA_TMR3_SIZE SZ_8
@@ -89,6 +145,43 @@
#define TEGRA_TMR4_BASE 0x60005058
#define TEGRA_TMR4_SIZE SZ_8
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+
+#define TEGRA_TMR5_BASE 0x60005060
+#define TEGRA_TMR5_SIZE 8
+
+#define TEGRA_TMR6_BASE 0x60005068
+#define TEGRA_TMR6_SIZE 8
+
+#define TEGRA_TMR7_BASE 0x60005070
+#define TEGRA_TMR7_SIZE 8
+
+#define TEGRA_TMR8_BASE 0x60005078
+#define TEGRA_TMR8_SIZE 8
+
+#define TEGRA_TMR9_BASE 0x60005080
+#define TEGRA_TMR9_SIZE 8
+
+#define TEGRA_TMR10_BASE 0x60005088
+#define TEGRA_TMR10_SIZE 8
+
+#define TEGRA_WDT0_BASE 0x60005100
+#define TEGRA_WDT0_SIZE 32
+
+#define TEGRA_WDT1_BASE 0x60005120
+#define TEGRA_WDT1_SIZE 32
+
+#define TEGRA_WDT2_BASE 0x60005140
+#define TEGRA_WDT2_SIZE 32
+
+#define TEGRA_WDT3_BASE 0x60005160
+#define TEGRA_WDT3_SIZE 32
+
+#define TEGRA_WDT4_BASE 0x60005180
+#define TEGRA_WDT4_SIZE 32
+
+#endif
+
#define TEGRA_CLK_RESET_BASE 0x60006000
#define TEGRA_CLK_RESET_SIZE SZ_4K
@@ -107,24 +200,49 @@
#define TEGRA_APB_DMA_CH0_BASE 0x6000B000
#define TEGRA_APB_DMA_CH0_SIZE 32
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+
+#define TEGRA_AHB_ARB_BASE 0x6000C000
+#define TEGRA_AHB_ARB_SIZE 768 /* Overlaps with GISMO */
+
+#endif
+
#define TEGRA_AHB_GIZMO_BASE 0x6000C004
#define TEGRA_AHB_GIZMO_SIZE 0x10C
+#define TEGRA_SB_BASE 0x6000C200
+#define TEGRA_SB_SIZE 256
+
#define TEGRA_STATMON_BASE 0x6000C400
#define TEGRA_STATMON_SIZE SZ_1K
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
+#define TEGRA_ACTMON_BASE 0x6000C800
+#define TEGRA_ACTMON_SIZE SZ_1K
+
+#endif
+
#define TEGRA_GPIO_BASE 0x6000D000
#define TEGRA_GPIO_SIZE SZ_4K
#define TEGRA_EXCEPTION_VECTORS_BASE 0x6000F000
#define TEGRA_EXCEPTION_VECTORS_SIZE SZ_4K
+#define TEGRA_BSEA_BASE 0x60010000
+#define TEGRA_BSEA_SIZE SZ_4K
+
+#define TEGRA_VDE_BASE 0x6001A000
+#define TEGRA_VDE_SIZE 0x3c00
+
#define TEGRA_APB_MISC_BASE 0x70000000
#define TEGRA_APB_MISC_SIZE SZ_4K
#define TEGRA_APB_MISC_DAS_BASE 0x70000c00
#define TEGRA_APB_MISC_DAS_SIZE SZ_128
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
#define TEGRA_AC97_BASE 0x70002000
#define TEGRA_AC97_SIZE SZ_512
@@ -137,11 +255,72 @@
#define TEGRA_I2S2_BASE 0x70002A00
#define TEGRA_I2S2_SIZE SZ_256
+#define TEGRA_PCIE_BASE 0x80000000
+#define TEGRA_PCIE_SIZE SZ_1G
+
+#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
+
+#define TEGRA_TSENSOR_BASE 0x70014000
+#define TEGRA_TSENSOR_SIZE SZ_4K
+
+#define TEGRA_HDA_BASE 0x70030000
+#define TEGRA_HDA_SIZE SZ_64K
+
+#define TEGRA_AUDIO_CLUSTER_BASE 0x70080000
+#define TEGRA_AUDIO_CLUSTER_SIZE SZ_4K
+
+#define TEGRA_APBIF0_BASE TEGRA_AUDIO_CLUSTER_BASE
+#define TEGRA_APBIF0_SIZE 32
+
+#define TEGRA_APBIF1_BASE 0x70080020
+#define TEGRA_APBIF1_SIZE 32
+
+#define TEGRA_APBIF2_BASE 0x70080040
+#define TEGRA_APBIF2_SIZE 32
+
+#define TEGRA_APBIF3_BASE 0x70080060
+#define TEGRA_APBIF3_SIZE 32
+
+#define TEGRA_AHUB_BASE 0x70080200
+#define TEGRA_AHUB_SIZE SZ_256
+
+#define TEGRA_I2S0_BASE 0x70080300
+#define TEGRA_I2S0_SIZE SZ_256
+
+#define TEGRA_I2S1_BASE 0x70080400
+#define TEGRA_I2S1_SIZE SZ_256
+
+#define TEGRA_I2S2_BASE 0x70080500
+#define TEGRA_I2S2_SIZE SZ_256
+
+#define TEGRA_I2S3_BASE 0x70080600
+#define TEGRA_I2S3_SIZE SZ_256
+
+#define TEGRA_I2S4_BASE 0x70080700
+#define TEGRA_I2S4_SIZE SZ_256
+
+#define TEGRA_DAM0_BASE 0x70080800
+#define TEGRA_DAM0_SIZE SZ_256
+
+#define TEGRA_DAM1_BASE 0x70080900
+#define TEGRA_DAM1_SIZE SZ_256
+
+#define TEGRA_DAM2_BASE 0x70080A00
+#define TEGRA_DAM2_SIZE SZ_256
+
+#define TEGRA_SPDIF_BASE 0x70080B00
+#define TEGRA_SPDIF_SIZE SZ_256
+
+#define TEGRA_PCIE_BASE 0x00000000
+#define TEGRA_PCIE_SIZE SZ_1G
+
+#endif
+
#define TEGRA_UARTA_BASE 0x70006000
-#define TEGRA_UARTA_SIZE SZ_64
+#define TEGRA_UARTA_SIZE 64
#define TEGRA_UARTB_BASE 0x70006040
-#define TEGRA_UARTB_SIZE SZ_64
+#define TEGRA_UARTB_SIZE 64
#define TEGRA_UARTC_BASE 0x70006200
#define TEGRA_UARTC_SIZE SZ_256
@@ -185,9 +364,18 @@
#define TEGRA_TWC_BASE 0x7000C100
#define TEGRA_TWC_SIZE SZ_256
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
#define TEGRA_SPI_BASE 0x7000C380
#define TEGRA_SPI_SIZE 48
+#else
+
+#define TEGRA_DTV_BASE 0x7000C300
+#define TEGRA_DTV_SIZE SZ_256
+
+#endif
+
#define TEGRA_I2C2_BASE 0x7000C400
#define TEGRA_I2C2_SIZE SZ_256
@@ -197,9 +385,21 @@
#define TEGRA_OWR_BASE 0x7000C600
#define TEGRA_OWR_SIZE 80
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
#define TEGRA_DVC_BASE 0x7000D000
#define TEGRA_DVC_SIZE SZ_512
+#else
+
+#define TEGRA_I2C4_BASE 0x7000C700
+#define TEGRA_I2C4_SIZE SZ_512
+
+#define TEGRA_I2C5_BASE 0x7000D000
+#define TEGRA_I2C5_SIZE SZ_512
+
+#endif
+
#define TEGRA_SPI1_BASE 0x7000D400
#define TEGRA_SPI1_SIZE SZ_512
@@ -212,6 +412,16 @@
#define TEGRA_SPI4_BASE 0x7000DA00
#define TEGRA_SPI4_SIZE SZ_512
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+
+#define TEGRA_SPI5_BASE 0x7000DC00
+#define TEGRA_SPI5_SIZE SZ_512
+
+#define TEGRA_SPI6_BASE 0x7000DE00
+#define TEGRA_SPI6_SIZE SZ_512
+
+#endif
+
#define TEGRA_RTC_BASE 0x7000E000
#define TEGRA_RTC_SIZE SZ_256
@@ -236,6 +446,8 @@
#define TEGRA_CSITE_BASE 0x70040000
#define TEGRA_CSITE_SIZE SZ_256K
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
#define TEGRA_USB_BASE 0xC5000000
#define TEGRA_USB_SIZE SZ_16K
@@ -257,6 +469,43 @@
#define TEGRA_SDMMC4_BASE 0xC8000600
#define TEGRA_SDMMC4_SIZE SZ_512
+#else
+
+#define TEGRA_SATA_BASE 0x70020000
+#define TEGRA_SATA_SIZE SZ_64K
+
+#define TEGRA_SATA_CONFIG_BASE 0x70021000
+#define TEGRA_SATA_CONFIG_SIZE SZ_4K
+
+#define TEGRA_SATA_BAR5_BASE 0x70027000
+#define TEGRA_SATA_BAR5_SIZE SZ_8K
+
+#define TEGRA_SDMMC1_BASE 0x78000000
+#define TEGRA_SDMMC1_SIZE SZ_512
+
+#define TEGRA_SDMMC2_BASE 0x78000200
+#define TEGRA_SDMMC2_SIZE SZ_512
+
+#define TEGRA_SDMMC3_BASE 0x78000400
+#define TEGRA_SDMMC3_SIZE SZ_512
+
+#define TEGRA_SDMMC4_BASE 0x78000600
+#define TEGRA_SDMMC4_SIZE SZ_512
+
+#define TEGRA_USB_BASE 0x7D000000
+#define TEGRA_USB_SIZE SZ_16K
+
+#define TEGRA_USB2_BASE 0x7D004000
+#define TEGRA_USB2_SIZE SZ_16K
+
+#define TEGRA_USB3_BASE 0x7D008000
+#define TEGRA_USB3_SIZE SZ_16K
+
+#define TEGRA_SE_BASE 0x70012000
+#define TEGRA_SE_SIZE SZ_8K
+
+#endif
+
#if defined(CONFIG_TEGRA_DEBUG_UART_NONE)
# define TEGRA_DEBUG_UART_BASE 0
#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
diff --git a/arch/arm/mach-tegra/include/mach/iovmm.h b/arch/arm/mach-tegra/include/mach/iovmm.h
new file mode 100644
index 000000000000..fd83a326e129
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/iovmm.h
@@ -0,0 +1,323 @@
+/*
+ * arch/arm/mach-tegra/include/mach/iovmm.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed i the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#ifndef _MACH_TEGRA_IOVMM_H_
+#define _MACH_TEGRA_IOVMM_H_
+
+typedef u32 tegra_iovmm_addr_t;
+
+struct tegra_iovmm_device_ops;
+
+/*
+ * each I/O virtual memory manager unit should register a device with
+ * the iovmm system
+ */
+struct tegra_iovmm_device {
+ struct tegra_iovmm_device_ops *ops;
+ const char *name;
+ struct list_head list;
+ int pgsize_bits;
+};
+
+/*
+ * tegra_iovmm_domain serves a purpose analagous to mm_struct as defined in
+ * <linux/mm_types.h> - it defines a virtual address space within which
+ * tegra_iovmm_areas can be created.
+ */
+struct tegra_iovmm_domain {
+ atomic_t clients;
+ atomic_t locks;
+ spinlock_t block_lock; /* RB-tree for iovmm_area blocks */
+ unsigned long flags;
+ wait_queue_head_t delay_lock; /* when lock_client fails */
+ struct rw_semaphore map_lock;
+ struct rb_root all_blocks; /* ordered by address */
+ struct rb_root free_blocks; /* ordered by size */
+ struct tegra_iovmm_device *dev;
+};
+
+/*
+ * tegra_iovmm_client is analagous to an individual task in the task group
+ * which owns an mm_struct.
+ */
+
+struct iovmm_share_group;
+
+struct tegra_iovmm_client {
+ const char *name;
+ unsigned long flags;
+ struct iovmm_share_group *group;
+ struct tegra_iovmm_domain *domain;
+ struct miscdevice *misc_dev;
+ struct list_head list;
+};
+
+/*
+ * tegra_iovmm_area serves a purpose analagous to vm_area_struct as defined
+ * in <linux/mm_types.h> - it defines a virtual memory area which can be
+ * mapped to physical memory by a client-provided mapping function. */
+
+struct tegra_iovmm_area {
+ struct tegra_iovmm_domain *domain;
+ tegra_iovmm_addr_t iovm_start;
+ size_t iovm_length;
+ pgprot_t pgprot;
+ struct tegra_iovmm_area_ops *ops;
+};
+
+struct tegra_iovmm_device_ops {
+ /* maps a VMA using the page residency functions provided by the VMA */
+ int (*map)(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *io_vma);
+ /* marks all PTEs in a VMA as invalid; decommits the virtual addres
+ * space (potentially freeing PDEs when decommit is true.) */
+ void (*unmap)(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *io_vma, bool decommit);
+ void (*map_pfn)(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *io_vma,
+ unsigned long offs, unsigned long pfn);
+ /*
+ * ensures that a domain is resident in the hardware's mapping region
+ * so that it may be used by a client
+ */
+ int (*lock_domain)(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_client *client);
+ void (*unlock_domain)(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_client *client);
+ /*
+ * allocates a vmm_domain for the specified client; may return the same
+ * domain for multiple clients
+ */
+ struct tegra_iovmm_domain* (*alloc_domain)(
+ struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_client *client);
+ void (*free_domain)(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_client *client);
+ int (*suspend)(struct tegra_iovmm_device *dev);
+ void (*resume)(struct tegra_iovmm_device *dev);
+};
+
+struct tegra_iovmm_area_ops {
+ /*
+ * ensures that the page of data starting at the specified offset
+ * from the start of the iovma is resident and pinned for use by
+ * DMA, returns the system pfn, or an invalid pfn if the
+ * operation fails.
+ */
+ unsigned long (*lock_makeresident)(struct tegra_iovmm_area *area,
+ tegra_iovmm_addr_t offs);
+ /* called when the page is unmapped from the I/O VMA */
+ void (*release)(struct tegra_iovmm_area *area, tegra_iovmm_addr_t offs);
+};
+
+#ifdef CONFIG_TEGRA_IOVMM
+/*
+ * called by clients to allocate an I/O VMM client mapping context which
+ * will be shared by all clients in the same share_group
+ */
+struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name,
+ const char *share_group, struct miscdevice *misc_dev);
+
+size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client);
+
+void tegra_iovmm_free_client(struct tegra_iovmm_client *client);
+
+/*
+ * called by clients to ensure that their mapping context is resident
+ * before performing any DMA operations addressing I/O VMM regions.
+ * client_lock may return -EINTR.
+ */
+int tegra_iovmm_client_lock(struct tegra_iovmm_client *client);
+int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client);
+
+/* called by clients after DMA operations are complete */
+void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client);
+
+/*
+ * called by clients to allocate a new iovmm_area and reserve I/O virtual
+ * address space for it. if ops is NULL, clients should subsequently call
+ * tegra_iovmm_vm_map_pages and/or tegra_iovmm_vm_insert_pfn to explicitly
+ * map the I/O virtual address to an OS-allocated page or physical address,
+ * respectively. VM operations may be called before this call returns
+ */
+struct tegra_iovmm_area *tegra_iovmm_create_vm(
+ struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
+ size_t size, size_t align, pgprot_t pgprot, unsigned long iovm_start);
+
+/*
+ * called by clients to "zap" an iovmm_area, and replace all mappings
+ * in it with invalid ones, without freeing the virtual address range
+ */
+void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm);
+
+/*
+ * after zapping a demand-loaded iovmm_area, the client should unzap it
+ * to allow the VMM device to remap the page range.
+ */
+void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm);
+
+/* called by clients to return an iovmm_area to the free pool for the domain */
+void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm);
+
+/* returns size of largest free iovm block */
+size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client);
+
+/*
+ * called by client software to map the page-aligned I/O address vaddr to
+ * a specific physical address pfn. I/O VMA should have been created with
+ * a NULL tegra_iovmm_area_ops structure.
+ */
+void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
+ tegra_iovmm_addr_t vaddr, unsigned long pfn);
+
+/*
+ * called by clients to return the iovmm_area containing addr, or NULL if
+ * addr has not been allocated. caller should call tegra_iovmm_area_put when
+ * finished using the returned pointer
+ */
+struct tegra_iovmm_area *tegra_iovmm_find_area_get(
+ struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr);
+
+struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm);
+void tegra_iovmm_area_put(struct tegra_iovmm_area *vm);
+
+/* called by drivers to initialize a tegra_iovmm_domain structure */
+int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
+ tegra_iovmm_addr_t end);
+
+/* called by drivers to register an I/O VMM device with the system */
+int tegra_iovmm_register(struct tegra_iovmm_device *dev);
+
+/* called by drivers to remove an I/O VMM device from the system */
+int tegra_iovmm_unregister(struct tegra_iovmm_device *dev);
+
+#else /* CONFIG_TEGRA_IOVMM */
+
+static inline struct tegra_iovmm_client *tegra_iovmm_alloc_client(
+ const char *name, const char *share_group, struct miscdevice *misc_dev)
+{
+ return NULL;
+}
+
+static inline size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
+{
+ return 0;
+}
+
+static inline void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
+{
+}
+
+static inline int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
+{
+ return 0;
+}
+
+static inline int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
+{
+ return 0;
+}
+
+static inline void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
+{
+}
+
+static inline struct tegra_iovmm_area *tegra_iovmm_create_vm(
+ struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
+ size_t size, size_t align, pgprot_t pgprot, unsigned long iovm_start)
+{
+ return NULL;
+}
+
+static inline void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
+{
+}
+
+static inline void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
+{
+}
+
+static inline void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
+{
+}
+
+static inline size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client)
+{
+ return 0;
+}
+
+static inline void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
+ tegra_iovmm_addr_t vaddr, unsigned long pfn)
+{
+}
+
+static inline struct tegra_iovmm_area *tegra_iovmm_find_area_get(
+ struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
+{
+ return NULL;
+}
+
+static inline struct tegra_iovmm_area *tegra_iovmm_area_get(
+ struct tegra_iovmm_area *vm)
+{
+ return NULL;
+}
+
+static inline void tegra_iovmm_area_put(struct tegra_iovmm_area *vm)
+{
+}
+
+static inline int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
+ tegra_iovmm_addr_t end)
+{
+ return 0;
+}
+
+static inline int tegra_iovmm_register(struct tegra_iovmm_device *dev)
+{
+ return 0;
+}
+
+static inline int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
+{
+ return 0;
+}
+
+static inline int tegra_iovmm_suspend(void)
+{
+ return 0;
+}
+
+static inline void tegra_iovmm_resume(void)
+{
+}
+
+#endif /* CONFIG_TEGRA_IOVMM */
+#endif /* _MACH_TEGRA_IOVMM_H_*/
diff --git a/arch/arm/mach-tegra/include/mach/irqs.h b/arch/arm/mach-tegra/include/mach/irqs.h
index 73265af4dda3..986820fa5ff0 100644
--- a/arch/arm/mach-tegra/include/mach/irqs.h
+++ b/arch/arm/mach-tegra/include/mach/irqs.h
@@ -2,6 +2,7 @@
* arch/arm/mach-tegra/include/mach/irqs.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -90,7 +91,7 @@
#define INT_CPU0_PMU_INTR (INT_SEC_BASE + 24)
#define INT_CPU1_PMU_INTR (INT_SEC_BASE + 25)
#define INT_SEC_RES_26 (INT_SEC_BASE + 26)
-#define INT_S_LINK1 (INT_SEC_BASE + 27)
+#define INT_SPI_1 (INT_SEC_BASE + 27)
#define INT_APB_DMA_COP (INT_SEC_BASE + 28)
#define INT_AHB_DMA_COP (INT_SEC_BASE + 29)
#define INT_DMA_TX (INT_SEC_BASE + 30)
@@ -166,18 +167,224 @@
#define INT_QUAD_RES_30 (INT_QUAD_BASE + 30)
#define INT_QUAD_RES_31 (INT_QUAD_BASE + 31)
-#define INT_MAIN_NR (INT_QUAD_BASE + 32 - INT_PRI_BASE)
+#define INT_GIC_NR (INT_QUAD_BASE + 32)
-#define INT_GPIO_BASE (INT_PRI_BASE + INT_MAIN_NR)
+#define INT_MAIN_NR (INT_GIC_NR - INT_PRI_BASE)
+#define INT_SYNCPT_THRESH_BASE (INT_QUAD_BASE + 32)
+#define INT_SYNCPT_THRESH_NR 32
+
+#define INT_GPIO_BASE (INT_SYNCPT_THRESH_BASE + \
+ INT_SYNCPT_THRESH_NR)
#define INT_GPIO_NR (28 * 8)
-#define TEGRA_NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR)
+#define INT_PCI_MSI_BASE (INT_GPIO_BASE + \
+ INT_GPIO_NR)
+#define INT_PCI_MSI_NR (0)
+
+#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
+
+/* Primary Interrupt Controller */
+#define INT_PRI_BASE (INT_GIC_BASE + 32)
+#define INT_TMR1 (INT_PRI_BASE + 0)
+#define INT_TMR2 (INT_PRI_BASE + 1)
+#define INT_RTC (INT_PRI_BASE + 2)
+#define INT_CEC (INT_PRI_BASE + 3)
+#define INT_SHR_SEM_INBOX_IBF (INT_PRI_BASE + 4)
+#define INT_SHR_SEM_INBOX_IBE (INT_PRI_BASE + 5)
+#define INT_SHR_SEM_OUTBOX_IBF (INT_PRI_BASE + 6)
+#define INT_SHR_SEM_OUTBOX_IBE (INT_PRI_BASE + 7)
+#define INT_VDE_UCQ_ERROR (INT_PRI_BASE + 8)
+#define INT_VDE_SYNC_TOKEN (INT_PRI_BASE + 9)
+#define INT_VDE_BSE_V (INT_PRI_BASE + 10)
+#define INT_VDE_BSE_A (INT_PRI_BASE + 11)
+#define INT_VDE_SXE (INT_PRI_BASE + 12)
+#define INT_SATA_RX_STAT (INT_PRI_BASE + 13)
+#define INT_SDMMC1 (INT_PRI_BASE + 14)
+#define INT_SDMMC2 (INT_PRI_BASE + 15)
+#define INT_XIO (INT_PRI_BASE + 16)
+#define INT_VDE (INT_PRI_BASE + 17)
+#define INT_AVP_UCQ (INT_PRI_BASE + 18)
+#define INT_SDMMC3 (INT_PRI_BASE + 19)
+#define INT_USB (INT_PRI_BASE + 20)
+#define INT_USB2 (INT_PRI_BASE + 21)
+#define INT_HSMMC (INT_PRI_BASE + 22)
+#define INT_SATA_CTL (INT_PRI_BASE + 23)
+#define INT_NANDFLASH (INT_PRI_BASE + 24)
+#define INT_VCP (INT_PRI_BASE + 25)
+#define INT_APB_DMA (INT_PRI_BASE + 26)
+#define INT_AHB_DMA (INT_PRI_BASE + 27)
+#define INT_GNT_0 (INT_PRI_BASE + 28)
+#define INT_GNT_1 (INT_PRI_BASE + 29)
+#define INT_OWR (INT_PRI_BASE + 30)
+#define INT_SDMMC4 (INT_PRI_BASE + 31)
+
+/* Secondary Interrupt Controller */
+#define INT_SEC_BASE (INT_PRI_BASE + 32)
+#define INT_GPIO1 (INT_SEC_BASE + 0)
+#define INT_GPIO2 (INT_SEC_BASE + 1)
+#define INT_GPIO3 (INT_SEC_BASE + 2)
+#define INT_GPIO4 (INT_SEC_BASE + 3)
+#define INT_UARTA (INT_SEC_BASE + 4)
+#define INT_UARTB (INT_SEC_BASE + 5)
+#define INT_I2C (INT_SEC_BASE + 6)
+#define INT_SPI (INT_SEC_BASE + 7)
+#define INT_TWC (INT_SEC_BASE + 8)
+#define INT_TMR3 (INT_SEC_BASE + 9)
+#define INT_TMR4 (INT_SEC_BASE + 10)
+#define INT_FLOW_RSM0 (INT_SEC_BASE + 11)
+#define INT_FLOW_RSM1 (INT_SEC_BASE + 12)
+#define INT_ACTMON (INT_SEC_BASE + 13)
+#define INT_UARTC (INT_SEC_BASE + 14)
+#define INT_MIPI (INT_SEC_BASE + 15)
+#define INT_EVENTA (INT_SEC_BASE + 16)
+#define INT_EVENTB (INT_SEC_BASE + 17)
+#define INT_EVENTC (INT_SEC_BASE + 18)
+#define INT_EVENTD (INT_SEC_BASE + 19)
+#define INT_VFIR (INT_SEC_BASE + 20)
+#define INT_I2C5 (INT_SEC_BASE + 21)
+#define INT_SYS_STATS_MON (INT_SEC_BASE + 22)
+#define INT_GPIO5 (INT_SEC_BASE + 23)
+#define INT_SPEEDO_PMON_0 (INT_SEC_BASE + 24)
+#define INT_SPEEDO_PMON_1 (INT_SEC_BASE + 25)
+#define INT_SE (INT_SEC_BASE + 26)
+#define INT_SPI_1 (INT_SEC_BASE + 27)
+#define INT_APB_DMA_COP (INT_SEC_BASE + 28)
+#define INT_AHB_DMA_COP (INT_SEC_BASE + 29)
+#define INT_DMA_TX (INT_SEC_BASE + 30)
+#define INT_DMA_RX (INT_SEC_BASE + 31)
+
+/* Tertiary Interrupt Controller */
+#define INT_TRI_BASE (INT_SEC_BASE + 32)
+#define INT_HOST1X_COP_SYNCPT (INT_TRI_BASE + 0)
+#define INT_HOST1X_MPCORE_SYNCPT (INT_TRI_BASE + 1)
+#define INT_HOST1X_COP_GENERAL (INT_TRI_BASE + 2)
+#define INT_HOST1X_MPCORE_GENERAL (INT_TRI_BASE + 3)
+#define INT_MPE_GENERAL (INT_TRI_BASE + 4)
+#define INT_VI_GENERAL (INT_TRI_BASE + 5)
+#define INT_EPP_GENERAL (INT_TRI_BASE + 6)
+#define INT_ISP_GENERAL (INT_TRI_BASE + 7)
+#define INT_2D_GENERAL (INT_TRI_BASE + 8)
+#define INT_DISPLAY_GENERAL (INT_TRI_BASE + 9)
+#define INT_DISPLAY_B_GENERAL (INT_TRI_BASE + 10)
+#define INT_HDMI (INT_TRI_BASE + 11)
+#define INT_TVO_GENERAL (INT_TRI_BASE + 12)
+#define INT_MC_GENERAL (INT_TRI_BASE + 13)
+#define INT_EMC_GENERAL (INT_TRI_BASE + 14)
+#define INT_SPI_6 (INT_SEC_BASE + 15)
+#define INT_NOR_FLASH (INT_TRI_BASE + 16)
+#define INT_HDA (INT_TRI_BASE + 17)
+#define INT_SPI_2 (INT_TRI_BASE + 18)
+#define INT_SPI_3 (INT_TRI_BASE + 19)
+#define INT_I2C2 (INT_TRI_BASE + 20)
+#define INT_KBC (INT_TRI_BASE + 21)
+#define INT_EXTERNAL_PMU (INT_TRI_BASE + 22)
+#define INT_GPIO6 (INT_TRI_BASE + 23)
+#define INT_TVDAC (INT_TRI_BASE + 24)
+#define INT_GPIO7 (INT_TRI_BASE + 25)
+#define INT_UARTD (INT_TRI_BASE + 26)
+#define INT_UARTE (INT_TRI_BASE + 27)
+#define INT_I2C3 (INT_TRI_BASE + 28)
+#define INT_SPI_4 (INT_TRI_BASE + 29)
+#define INT_SPI_5 (INT_TRI_BASE + 30)
+#define INT_SW_RESERVED (INT_TRI_BASE + 31)
+
+/* Quaternary Interrupt Controller */
+#define INT_QUAD_BASE (INT_TRI_BASE + 32)
+#define INT_SNOR (INT_QUAD_BASE + 0)
+#define INT_USB3 (INT_QUAD_BASE + 1)
+#define INT_PCIE_INTR (INT_QUAD_BASE + 2)
+#define INT_PCIE_MSI (INT_QUAD_BASE + 3)
+#define INT_PCIE (INT_QUAD_BASE + 4)
+#define INT_AVP_CACHE (INT_QUAD_BASE + 5)
+#define INT_TSENSOR (INT_QUAD_BASE + 6)
+#define INT_AUDIO_CLUSTER (INT_QUAD_BASE + 7)
+#define INT_APB_DMA_CH0 (INT_QUAD_BASE + 8)
+#define INT_APB_DMA_CH1 (INT_QUAD_BASE + 9)
+#define INT_APB_DMA_CH2 (INT_QUAD_BASE + 10)
+#define INT_APB_DMA_CH3 (INT_QUAD_BASE + 11)
+#define INT_APB_DMA_CH4 (INT_QUAD_BASE + 12)
+#define INT_APB_DMA_CH5 (INT_QUAD_BASE + 13)
+#define INT_APB_DMA_CH6 (INT_QUAD_BASE + 14)
+#define INT_APB_DMA_CH7 (INT_QUAD_BASE + 15)
+#define INT_APB_DMA_CH8 (INT_QUAD_BASE + 16)
+#define INT_APB_DMA_CH9 (INT_QUAD_BASE + 17)
+#define INT_APB_DMA_CH10 (INT_QUAD_BASE + 18)
+#define INT_APB_DMA_CH11 (INT_QUAD_BASE + 19)
+#define INT_APB_DMA_CH12 (INT_QUAD_BASE + 20)
+#define INT_APB_DMA_CH13 (INT_QUAD_BASE + 21)
+#define INT_APB_DMA_CH14 (INT_QUAD_BASE + 22)
+#define INT_APB_DMA_CH15 (INT_QUAD_BASE + 23)
+#define INT_I2C4 (INT_QUAD_BASE + 24)
+#define INT_TMR5 (INT_QUAD_BASE + 25)
+#define INT_TMR_SHARED (INT_QUAD_BASE + 26) /* Deprecated */
+#define INT_WDT_CPU (INT_QUAD_BASE + 27)
+#define INT_WDT_AVP (INT_QUAD_BASE + 28)
+#define INT_GPIO8 (INT_QUAD_BASE + 29)
+#define INT_CAR (INT_QUAD_BASE + 30)
+#define INT_QUAD_RES_31 (INT_QUAD_BASE + 31)
+
+/* Quintary Interrupt Controller */
+#define INT_QUINT_BASE (INT_QUAD_BASE + 32)
+#define INT_APB_DMA_CH16 (INT_QUINT_BASE + 0)
+#define INT_APB_DMA_CH17 (INT_QUINT_BASE + 1)
+#define INT_APB_DMA_CH18 (INT_QUINT_BASE + 2)
+#define INT_APB_DMA_CH19 (INT_QUINT_BASE + 3)
+#define INT_APB_DMA_CH20 (INT_QUINT_BASE + 4)
+#define INT_APB_DMA_CH21 (INT_QUINT_BASE + 5)
+#define INT_APB_DMA_CH22 (INT_QUINT_BASE + 6)
+#define INT_APB_DMA_CH23 (INT_QUINT_BASE + 7)
+#define INT_APB_DMA_CH24 (INT_QUINT_BASE + 8)
+#define INT_APB_DMA_CH25 (INT_QUINT_BASE + 9)
+#define INT_APB_DMA_CH26 (INT_QUINT_BASE + 10)
+#define INT_APB_DMA_CH27 (INT_QUINT_BASE + 11)
+#define INT_APB_DMA_CH28 (INT_QUINT_BASE + 12)
+#define INT_APB_DMA_CH29 (INT_QUINT_BASE + 13)
+#define INT_APB_DMA_CH30 (INT_QUINT_BASE + 14)
+#define INT_APB_DMA_CH31 (INT_QUINT_BASE + 15)
+#define INT_CPU0_PMU_INTR (INT_QUINT_BASE + 16)
+#define INT_CPU1_PMU_INTR (INT_QUINT_BASE + 17)
+#define INT_CPU2_PMU_INTR (INT_QUINT_BASE + 18)
+#define INT_CPU3_PMU_INTR (INT_QUINT_BASE + 19)
+#define INT_CPU4_PMU_INTR (INT_QUINT_BASE + 20)
+#define INT_CPU5_PMU_INTR (INT_QUINT_BASE + 21)
+#define INT_CPU6_PMU_INTR (INT_QUINT_BASE + 22)
+#define INT_CPU7_PMU_INTR (INT_QUINT_BASE + 23)
+#define INT_TMR6 (INT_QUINT_BASE + 24)
+#define INT_TMR7 (INT_QUINT_BASE + 25)
+#define INT_TMR8 (INT_QUINT_BASE + 26)
+#define INT_TMR9 (INT_QUINT_BASE + 27)
+#define INT_TMR10 (INT_QUINT_BASE + 28)
+#define INT_QUINT_RES_29 (INT_QUINT_BASE + 29)
+#define INT_QUINT_RES_30 (INT_QUINT_BASE + 30)
+#define INT_QUINT_RES_31 (INT_QUINT_BASE + 31)
+
+#define INT_GIC_NR (INT_QUINT_BASE + 32)
+
+#define INT_MAIN_NR (INT_GIC_NR - INT_PRI_BASE)
+
+#define INT_SYNCPT_THRESH_BASE (INT_QUINT_BASE + 32)
+#define INT_SYNCPT_THRESH_NR 32
+
+#define INT_GPIO_BASE (INT_SYNCPT_THRESH_BASE + \
+ INT_SYNCPT_THRESH_NR)
+#define INT_GPIO_NR (32 * 8)
+
+#define INT_PCI_MSI_BASE (INT_GPIO_BASE + \
+ INT_GPIO_NR)
+#define INT_PCI_MSI_NR (32 * 8)
+
+#endif
+
+#define FIQ_START INT_GIC_BASE
+
+#define TEGRA_NR_IRQS (INT_PCI_MSI_BASE + \
+ INT_PCI_MSI_NR)
#define INT_BOARD_BASE TEGRA_NR_IRQS
-#define NR_BOARD_IRQS 32
+
+#define NR_BOARD_IRQS 64
#define NR_IRQS (INT_BOARD_BASE + NR_BOARD_IRQS)
-#endif
#endif
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
index 4f3572a1c684..7b68baa04f11 100644
--- a/arch/arm/mach-tegra/include/mach/kbc.h
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -24,20 +24,41 @@
#include <linux/types.h>
#include <linux/input/matrix_keypad.h>
-#ifdef CONFIG_ARCH_TEGRA_2x_SOC
#define KBC_MAX_GPIO 24
#define KBC_MAX_KPENT 8
-#else
-#define KBC_MAX_GPIO 20
-#define KBC_MAX_KPENT 7
-#endif
#define KBC_MAX_ROW 16
#define KBC_MAX_COL 8
#define KBC_MAX_KEY (KBC_MAX_ROW * KBC_MAX_COL)
+#define KBC_PIN_GPIO_0 0
+#define KBC_PIN_GPIO_1 1
+#define KBC_PIN_GPIO_2 2
+#define KBC_PIN_GPIO_3 3
+#define KBC_PIN_GPIO_4 4
+#define KBC_PIN_GPIO_5 5
+#define KBC_PIN_GPIO_6 6
+#define KBC_PIN_GPIO_7 7
+#define KBC_PIN_GPIO_8 8
+#define KBC_PIN_GPIO_9 9
+#define KBC_PIN_GPIO_10 10
+#define KBC_PIN_GPIO_11 11
+#define KBC_PIN_GPIO_12 12
+#define KBC_PIN_GPIO_13 13
+#define KBC_PIN_GPIO_14 14
+#define KBC_PIN_GPIO_15 15
+#define KBC_PIN_GPIO_16 16
+#define KBC_PIN_GPIO_17 17
+#define KBC_PIN_GPIO_18 18
+#define KBC_PIN_GPIO_19 19
+#define KBC_PIN_GPIO_20 20
+#define KBC_PIN_GPIO_21 21
+#define KBC_PIN_GPIO_22 22
+#define KBC_PIN_GPIO_23 23
+
struct tegra_kbc_pin_cfg {
bool is_row;
+ bool en;
unsigned char num;
};
@@ -49,6 +70,10 @@ struct tegra_kbc_wake_key {
struct tegra_kbc_platform_data {
unsigned int debounce_cnt;
unsigned int repeat_cnt;
+ unsigned int scan_count;
+
+ unsigned int wake_cnt; /* 0:wake on any key >1:wake on wake_cfg */
+ const struct tegra_kbc_wake_key *wake_cfg;
struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
const struct matrix_keymap_data *keymap_data;
@@ -56,5 +81,6 @@ struct tegra_kbc_platform_data {
bool wakeup;
bool use_fn_map;
bool use_ghost_filter;
+ bool disable_ev_rep;
};
#endif
diff --git a/arch/arm/mach-tegra/include/mach/kfuse.h b/arch/arm/mach-tegra/include/mach/kfuse.h
new file mode 100644
index 000000000000..cfe85cc86ff2
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/kfuse.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-tegra/kfuse.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* there are 144 32-bit values in total */
+#define KFUSE_DATA_SZ (144 * 4)
+
+int tegra_kfuse_read(void *dest, size_t len);
diff --git a/arch/arm/mach-tegra/include/mach/latency_allowance.h b/arch/arm/mach-tegra/include/mach/latency_allowance.h
new file mode 100644
index 000000000000..f0d27f0b8ba9
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/latency_allowance.h
@@ -0,0 +1,121 @@
+/*
+ * arch/arm/mach-tegra/include/mach/latency_allowance.h
+ *
+ * Copyright (C) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_LATENCY_ALLOWANCE_H_
+#define _MACH_TEGRA_LATENCY_ALLOWANCE_H_
+
+enum tegra_la_id {
+ TEGRA_LA_AFIR = 0,
+ TEGRA_LA_AFIW,
+ TEGRA_LA_AVPC_ARM7R,
+ TEGRA_LA_AVPC_ARM7W,
+ TEGRA_LA_DISPLAY_0A,
+ TEGRA_LA_DISPLAY_0B,
+ TEGRA_LA_DISPLAY_0C,
+ TEGRA_LA_DISPLAY_1B,
+ TEGRA_LA_DISPLAY_HC,
+ TEGRA_LA_DISPLAY_0AB,
+ TEGRA_LA_DISPLAY_0BB,
+ TEGRA_LA_DISPLAY_0CB,
+ TEGRA_LA_DISPLAY_1BB,
+ TEGRA_LA_DISPLAY_HCB,
+ TEGRA_LA_EPPUP,
+ TEGRA_LA_EPPU,
+ TEGRA_LA_EPPV,
+ TEGRA_LA_EPPY,
+ TEGRA_LA_G2PR,
+ TEGRA_LA_G2SR,
+ TEGRA_LA_G2DR,
+ TEGRA_LA_G2DW,
+ TEGRA_LA_HOST1X_DMAR,
+ TEGRA_LA_HOST1XR,
+ TEGRA_LA_HOST1XW,
+ TEGRA_LA_HDAR,
+ TEGRA_LA_HDAW,
+ TEGRA_LA_ISPW,
+ TEGRA_LA_MPCORER,
+ TEGRA_LA_MPCOREW,
+ TEGRA_LA_MPCORE_LPR,
+ TEGRA_LA_MPCORE_LPW,
+ TEGRA_LA_MPE_UNIFBR,
+ TEGRA_LA_MPE_IPRED,
+ TEGRA_LA_MPE_AMEMRD,
+ TEGRA_LA_MPE_CSRD,
+ TEGRA_LA_MPE_UNIFBW,
+ TEGRA_LA_MPE_CSWR,
+ TEGRA_LA_FDCDRD,
+ TEGRA_LA_IDXSRD,
+ TEGRA_LA_TEXSRD,
+ TEGRA_LA_FDCDWR,
+ TEGRA_LA_FDCDRD2,
+ TEGRA_LA_IDXSRD2,
+ TEGRA_LA_TEXSRD2,
+ TEGRA_LA_FDCDWR2,
+ TEGRA_LA_PPCS_AHBDMAR,
+ TEGRA_LA_PPCS_AHBSLVR,
+ TEGRA_LA_PPCS_AHBDMAW,
+ TEGRA_LA_PPCS_AHBSLVW,
+ TEGRA_LA_PTCR,
+ TEGRA_LA_SATAR,
+ TEGRA_LA_SATAW,
+ TEGRA_LA_VDE_BSEVR,
+ TEGRA_LA_VDE_MBER,
+ TEGRA_LA_VDE_MCER,
+ TEGRA_LA_VDE_TPER,
+ TEGRA_LA_VDE_BSEVW,
+ TEGRA_LA_VDE_DBGW,
+ TEGRA_LA_VDE_MBEW,
+ TEGRA_LA_VDE_TPMW,
+ TEGRA_LA_VI_RUV,
+ TEGRA_LA_VI_WSB,
+ TEGRA_LA_VI_WU,
+ TEGRA_LA_VI_WV,
+ TEGRA_LA_VI_WY,
+ TEGRA_LA_MAX_ID
+};
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_TEGRA_FPGA_PLATFORM)
+static inline int tegra_set_latency_allowance(enum tegra_la_id id,
+ int bandwidth_in_mbps)
+{
+ return 0;
+}
+
+static inline int tegra_enable_latency_scaling(enum tegra_la_id id,
+ unsigned int threshold_low,
+ unsigned int threshold_mid,
+ unsigned int threshold_high)
+{
+ return 0;
+}
+
+static inline void tegra_disable_latency_scaling(enum tegra_la_id id)
+{
+ return 0;
+}
+#else
+int tegra_set_latency_allowance(enum tegra_la_id id,
+ unsigned int bandwidth_in_mbps);
+
+int tegra_enable_latency_scaling(enum tegra_la_id id,
+ unsigned int threshold_low,
+ unsigned int threshold_mid,
+ unsigned int threshold_high);
+
+void tegra_disable_latency_scaling(enum tegra_la_id id);
+#endif
+
+#endif /* _MACH_TEGRA_LATENCY_ALLOWANCE_H_ */
diff --git a/arch/arm/mach-tegra/include/mach/legacy_irq.h b/arch/arm/mach-tegra/include/mach/legacy_irq.h
new file mode 100644
index 000000000000..86f1ff7d06b0
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/legacy_irq.h
@@ -0,0 +1,23 @@
+/*
+ * arch/arm/mach-tegra/include/mach/legacy_irq.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_LEGARY_IRQ_H
+#define _ARCH_ARM_MACH_TEGRA_LEGARY_IRQ_H
+
+void tegra_init_legacy_irq_cop(void);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/mc.h b/arch/arm/mach-tegra/include/mach/mc.h
new file mode 100644
index 000000000000..576153ad08d6
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/mc.h
@@ -0,0 +1,109 @@
+/*
+ * arch/arm/mach-tegra/include/mach/mc.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_MC_H
+#define __MACH_TEGRA_MC_H
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define TEGRA_MC_FPRI_CTRL_AVPC 0x17c
+#define TEGRA_MC_FPRI_CTRL_DC 0x180
+#define TEGRA_MC_FPRI_CTRL_DCB 0x184
+#define TEGRA_MC_FPRI_CTRL_EPP 0x188
+#define TEGRA_MC_FPRI_CTRL_G2 0x18c
+#define TEGRA_MC_FPRI_CTRL_HC 0x190
+#define TEGRA_MC_FPRI_CTRL_ISP 0x194
+#define TEGRA_MC_FPRI_CTRL_MPCORE 0x198
+#define TEGRA_MC_FPRI_CTRL_MPEA 0x19c
+#define TEGRA_MC_FPRI_CTRL_MPEB 0x1a0
+#define TEGRA_MC_FPRI_CTRL_MPEC 0x1a4
+#define TEGRA_MC_FPRI_CTRL_NV 0x1a8
+#define TEGRA_MC_FPRI_CTRL_PPCS 0x1ac
+#define TEGRA_MC_FPRI_CTRL_VDE 0x1b0
+#define TEGRA_MC_FPRI_CTRL_VI 0x1b4
+
+#define TEGRA_MC_CLIENT_AVPCARM7R ((TEGRA_MC_FPRI_CTRL_AVPC << 8) | 0)
+#define TEGRA_MC_CLIENT_AVPCARM7W ((TEGRA_MC_FPRI_CTRL_AVPC << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0A ((TEGRA_MC_FPRI_CTRL_DC << 8) | 0)
+#define TEGRA_MC_CLIENT_DISPLAY0B ((TEGRA_MC_FPRI_CTRL_DC << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0C ((TEGRA_MC_FPRI_CTRL_DC << 8) | 4)
+#define TEGRA_MC_CLIENT_DISPLAY1B ((TEGRA_MC_FPRI_CTRL_DC << 8) | 6)
+#define TEGRA_MC_CLIENT_DISPLAYHC ((TEGRA_MC_FPRI_CTRL_DC << 8) | 8)
+#define TEGRA_MC_CLIENT_DISPLAY0AB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 0)
+#define TEGRA_MC_CLIENT_DISPLAY0BB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0CB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 4)
+#define TEGRA_MC_CLIENT_DISPLAY1BB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 6)
+#define TEGRA_MC_CLIENT_DISPLAYHCB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 8)
+#define TEGRA_MC_CLIENT_EPPUP ((TEGRA_MC_FPRI_CTRL_EPP << 8) | 0)
+#define TEGRA_MC_CLIENT_EPPU ((TEGRA_MC_FPRI_CTRL_EPP << 8) | 2)
+#define TEGRA_MC_CLIENT_EPPV ((TEGRA_MC_FPRI_CTRL_EPP << 8) | 4)
+#define TEGRA_MC_CLIENT_EPPY ((TEGRA_MC_FPRI_CTRL_EPP << 8) | 6)
+#define TEGRA_MC_CLIENT_G2PR ((TEGRA_MC_FPRI_CTRL_G2 << 8) | 0)
+#define TEGRA_MC_CLIENT_G2SR ((TEGRA_MC_FPRI_CTRL_G2 << 8) | 2)
+#define TEGRA_MC_CLIENT_G2DR ((TEGRA_MC_FPRI_CTRL_G2 << 8) | 4)
+#define TEGRA_MC_CLIENT_G2DW ((TEGRA_MC_FPRI_CTRL_G2 << 8) | 6)
+#define TEGRA_MC_CLIENT_HOST1XDMAR ((TEGRA_MC_FPRI_CTRL_HC << 8) | 0)
+#define TEGRA_MC_CLIENT_HOST1XR ((TEGRA_MC_FPRI_CTRL_HC << 8) | 2)
+#define TEGRA_MC_CLIENT_HOST1XW ((TEGRA_MC_FPRI_CTRL_HC << 8) | 4)
+#define TEGRA_MC_CLIENT_ISPW ((TEGRA_MC_FPRI_CTRL_ISP << 8) | 0)
+#define TEGRA_MC_CLIENT_MPCORER ((TEGRA_MC_FPRI_CTRL_MPCORE << 8) | 0)
+#define TEGRA_MC_CLIENT_MPCOREW ((TEGRA_MC_FPRI_CTRL_MPCORE << 8) | 2)
+#define TEGRA_MC_CLIENT_MPEAMEMRD ((TEGRA_MC_FPRI_CTRL_MPEA << 8) | 0)
+#define TEGRA_MC_CLIENT_MPEUNIFBR ((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 0)
+#define TEGRA_MC_CLIENT_MPE_IPRED ((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 2)
+#define TEGRA_MC_CLIENT_MPEUNIFBW ((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 4)
+#define TEGRA_MC_CLIENT_MPECSRD ((TEGRA_MC_FPRI_CTRL_MPEC << 8) | 0)
+#define TEGRA_MC_CLIENT_MPECSWR ((TEGRA_MC_FPRI_CTRL_MPEC << 8) | 2)
+#define TEGRA_MC_CLIENT_FDCDRD ((TEGRA_MC_FPRI_CTRL_NV << 8) | 0)
+#define TEGRA_MC_CLIENT_IDXSRD ((TEGRA_MC_FPRI_CTRL_NV << 8) | 2)
+#define TEGRA_MC_CLIENT_TEXSRD ((TEGRA_MC_FPRI_CTRL_NV << 8) | 4)
+#define TEGRA_MC_CLIENT_FDCDWR ((TEGRA_MC_FPRI_CTRL_NV << 8) | 6)
+#define TEGRA_MC_CLIENT_PPCSAHBDMAR ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 0)
+#define TEGRA_MC_CLIENT_PPCSAHBSLVR ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 2)
+#define TEGRA_MC_CLIENT_PPCSAHBDMAW ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 4)
+#define TEGRA_MC_CLIENT_PPCSAHBSLVW ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 6)
+#define TEGRA_MC_CLIENT_VDEBSEVR ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 0)
+#define TEGRA_MC_CLIENT_VDEMBER ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 2)
+#define TEGRA_MC_CLIENT_VDEMCER ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 4)
+#define TEGRA_MC_CLIENT_VDETPER ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 6)
+#define TEGRA_MC_CLIENT_VDEBSEVW ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 8)
+#define TEGRA_MC_CLIENT_VDEMBEW ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 10)
+#define TEGRA_MC_CLIENT_VDETPMW ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 12)
+#define TEGRA_MC_CLIENT_VIRUV ((TEGRA_MC_FPRI_CTRL_VI << 8) | 0)
+#define TEGRA_MC_CLIENT_VIWSB ((TEGRA_MC_FPRI_CTRL_VI << 8) | 2)
+#define TEGRA_MC_CLIENT_VIWU ((TEGRA_MC_FPRI_CTRL_VI << 8) | 4)
+#define TEGRA_MC_CLIENT_VIWV ((TEGRA_MC_FPRI_CTRL_VI << 8) | 6)
+#define TEGRA_MC_CLIENT_VIWY ((TEGRA_MC_FPRI_CTRL_VI << 8) | 8)
+
+#define TEGRA_MC_PRIO_LOWEST 0
+#define TEGRA_MC_PRIO_LOW 1
+#define TEGRA_MC_PRIO_MED 2
+#define TEGRA_MC_PRIO_HIGH 3
+#define TEGRA_MC_PRIO_MASK 3
+
+void tegra_mc_set_priority(unsigned long client, unsigned long prio);
+
+#else
+ /* !!!FIXME!!! IMPLEMENT ME */
+#define tegra_mc_set_priority(client, prio) \
+ do { /* nothing for now */ } while (0)
+#endif
+
+int tegra_mc_get_tiled_memory_bandwidth_multiplier(void);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/memory.h b/arch/arm/mach-tegra/include/mach/memory.h
index 537db3aa81a7..fb7a1f2877a4 100644
--- a/arch/arm/mach-tegra/include/mach/memory.h
+++ b/arch/arm/mach-tegra/include/mach/memory.h
@@ -2,6 +2,7 @@
* arch/arm/mach-tegra/include/mach/memory.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -22,7 +23,21 @@
#define __MACH_TEGRA_MEMORY_H
/* physical offset of RAM */
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
#define PLAT_PHYS_OFFSET UL(0)
+#else
+#define PLAT_PHYS_OFFSET UL(0x80000000)
+#endif
+
+/*
+ * Unaligned DMA causes tegra dma to place data on 4-byte boundary after
+ * expected address. Call to skb_reserve(skb, NET_IP_ALIGN) was causing skb
+ * buffers in usbnet.c to become unaligned.
+ */
+#define NET_IP_ALIGN 0
+#define NET_SKB_PAD L1_CACHE_BYTES
+
+#define CONSISTENT_DMA_SIZE (14 * SZ_1M)
#endif
diff --git a/arch/arm/mach-tegra/include/mach/nand.h b/arch/arm/mach-tegra/include/mach/nand.h
new file mode 100644
index 000000000000..91ad7d1c9ae5
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/nand.h
@@ -0,0 +1,55 @@
+/*
+ * arch/arm/mach-tegra/include/mach/nand.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Dima Zavin <dmitriyz@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_NAND_H
+#define __MACH_TEGRA_NAND_H
+
+struct tegra_nand_chip_parms {
+ uint8_t vendor_id;
+ uint8_t device_id;
+ uint32_t flags;
+ uint8_t read_id_fourth_byte;
+ uint32_t capacity;
+
+ /* all timing info is in nanoseconds */
+ struct {
+ uint32_t trp;
+ uint32_t trh;
+ uint32_t twp;
+ uint32_t twh;
+ uint32_t tcs;
+ uint32_t twhr;
+ uint32_t tcr_tar_trr;
+ uint32_t twb;
+ uint32_t trp_resp;
+ uint32_t tadl;
+ } timing;
+};
+
+struct tegra_nand_platform {
+ uint8_t max_chips;
+ struct tegra_nand_chip_parms *chip_parms;
+ unsigned int nr_chip_parms;
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+ int wp_gpio;
+};
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/nvmap.h b/arch/arm/mach-tegra/include/mach/nvmap.h
new file mode 100644
index 000000000000..79f5a881681f
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/nvmap.h
@@ -0,0 +1,152 @@
+/*
+ * arch/arm/mach-tegra/include/mach/nvmap.h
+ *
+ * structure declarations for nvmem and nvmap user-space ioctls
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/ioctl.h>
+#include <linux/file.h>
+#include <linux/rbtree.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#ifndef __NVMAP_H
+#define __NVMAP_H
+
+#define NVMAP_HEAP_SYSMEM (1ul<<31)
+#define NVMAP_HEAP_IOVMM (1ul<<30)
+
+/* common carveout heaps */
+#define NVMAP_HEAP_CARVEOUT_IRAM (1ul<<29)
+#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
+#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
+
+#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
+
+/* allocation flags */
+#define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0)
+#define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0)
+#define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
+#define NVMAP_HANDLE_CACHEABLE (0x3ul << 0)
+#define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0)
+
+#define NVMAP_HANDLE_SECURE (0x1ul << 2)
+
+
+#if defined(__KERNEL__)
+
+#if defined(CONFIG_TEGRA_NVMAP)
+struct nvmap_handle;
+struct nvmap_client;
+struct nvmap_device;
+#define nvmap_ref_to_handle(_ref) (*(struct nvmap_handle **)(_ref))
+/* Convert User space handle to Kernel. */
+#define nvmap_convert_handle_u2k(h) (h)
+#elif defined(CONFIG_ION_TEGRA)
+/* For Ion Mem Manager support through nvmap_* API's. */
+#include "../../../../../drivers/gpu/ion/ion_priv.h"
+
+#define nvmap_client ion_client
+#define nvmap_device ion_device
+#define nvmap_handle ion_handle
+#define nvmap_handle_ref ion_handle
+#define nvmap_ref_to_handle(_ref) (struct ion_handle *)_ref
+/* Convert User space handle to Kernel. */
+#define nvmap_convert_handle_u2k(h) (*((u32 *)h))
+#endif
+
+#define nvmap_id_to_handle(_id) ((struct nvmap_handle *)(_id))
+
+
+struct nvmap_pinarray_elem {
+ __u32 patch_mem;
+ __u32 patch_offset;
+ __u32 pin_mem;
+ __u32 pin_offset;
+ __u32 reloc_shift;
+};
+
+#if defined(CONFIG_TEGRA_NVMAP)
+/* handle_ref objects are client-local references to an nvmap_handle;
+ * they are distinct objects so that handles can be unpinned and
+ * unreferenced the correct number of times when a client abnormally
+ * terminates */
+struct nvmap_handle_ref {
+ struct nvmap_handle *handle;
+ struct rb_node node;
+ atomic_t dupes; /* number of times to free on file close */
+ atomic_t pin; /* number of times to unpin on free */
+};
+#endif
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name);
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags);
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+void *nvmap_mmap(struct nvmap_handle_ref *r);
+
+void nvmap_munmap(struct nvmap_handle_ref *r, void *addr);
+
+struct nvmap_client *nvmap_client_get_file(int fd);
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
+
+void nvmap_client_put(struct nvmap_client *c);
+
+phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r);
+
+phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id);
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique);
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr);
+
+int nvmap_patch_word(struct nvmap_client *client,
+ struct nvmap_handle *patch,
+ u32 patch_offset, u32 patch_value);
+
+struct nvmap_platform_carveout {
+ const char *name;
+ unsigned int usage_mask;
+ phys_addr_t base;
+ size_t size;
+ size_t buddy_size;
+};
+
+struct nvmap_platform_data {
+ const struct nvmap_platform_carveout *carveouts;
+ unsigned int nr_carveouts;
+};
+
+extern struct nvmap_device *nvmap_dev;
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/pci.h b/arch/arm/mach-tegra/include/mach/pci.h
new file mode 100644
index 000000000000..388ad320775e
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/pci.h
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/mach-tegra/include/mach/pci.h
+ *
+ * Header file containing constants for the tegra PCIe driver.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_PCI_H
+#define __MACH_PCI_H
+
+#include <linux/pci.h>
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ #define MAX_PCIE_SUPPORTED_PORTS 2
+#else
+ #define MAX_PCIE_SUPPORTED_PORTS 3
+#endif
+
+struct tegra_pci_platform_data {
+ int port_status[MAX_PCIE_SUPPORTED_PORTS];
+ bool use_dock_detect;
+ int gpio;
+};
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/pinmux-t3.h b/arch/arm/mach-tegra/include/mach/pinmux-t3.h
new file mode 100644
index 000000000000..12fd5e8ffc1d
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/pinmux-t3.h
@@ -0,0 +1,321 @@
+/*
+ * linux/arch/arm/mach-tegra/include/mach/pinmux-t3.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_PINMUX_T3_H
+#define __MACH_TEGRA_PINMUX_T3_H
+
+#define TEGRA_PINMUX_HAS_IO_DIRECTION 1
+
+enum tegra_pingroup {
+ TEGRA_PINGROUP_ULPI_DATA0 = 0,
+ TEGRA_PINGROUP_ULPI_DATA1,
+ TEGRA_PINGROUP_ULPI_DATA2,
+ TEGRA_PINGROUP_ULPI_DATA3,
+ TEGRA_PINGROUP_ULPI_DATA4,
+ TEGRA_PINGROUP_ULPI_DATA5,
+ TEGRA_PINGROUP_ULPI_DATA6,
+ TEGRA_PINGROUP_ULPI_DATA7,
+ TEGRA_PINGROUP_ULPI_CLK,
+ TEGRA_PINGROUP_ULPI_DIR,
+ TEGRA_PINGROUP_ULPI_NXT,
+ TEGRA_PINGROUP_ULPI_STP,
+ TEGRA_PINGROUP_DAP3_FS,
+ TEGRA_PINGROUP_DAP3_DIN,
+ TEGRA_PINGROUP_DAP3_DOUT,
+ TEGRA_PINGROUP_DAP3_SCLK,
+ TEGRA_PINGROUP_GPIO_PV0,
+ TEGRA_PINGROUP_GPIO_PV1,
+ TEGRA_PINGROUP_SDMMC1_CLK,
+ TEGRA_PINGROUP_SDMMC1_CMD,
+ TEGRA_PINGROUP_SDMMC1_DAT3,
+ TEGRA_PINGROUP_SDMMC1_DAT2,
+ TEGRA_PINGROUP_SDMMC1_DAT1,
+ TEGRA_PINGROUP_SDMMC1_DAT0,
+ TEGRA_PINGROUP_GPIO_PV2,
+ TEGRA_PINGROUP_GPIO_PV3,
+ TEGRA_PINGROUP_CLK2_OUT,
+ TEGRA_PINGROUP_CLK2_REQ,
+ TEGRA_PINGROUP_LCD_PWR1,
+ TEGRA_PINGROUP_LCD_PWR2,
+ TEGRA_PINGROUP_LCD_SDIN,
+ TEGRA_PINGROUP_LCD_SDOUT,
+ TEGRA_PINGROUP_LCD_WR_N,
+ TEGRA_PINGROUP_LCD_CS0_N,
+ TEGRA_PINGROUP_LCD_DC0,
+ TEGRA_PINGROUP_LCD_SCK,
+ TEGRA_PINGROUP_LCD_PWR0,
+ TEGRA_PINGROUP_LCD_PCLK,
+ TEGRA_PINGROUP_LCD_DE,
+ TEGRA_PINGROUP_LCD_HSYNC,
+ TEGRA_PINGROUP_LCD_VSYNC,
+ TEGRA_PINGROUP_LCD_D0,
+ TEGRA_PINGROUP_LCD_D1,
+ TEGRA_PINGROUP_LCD_D2,
+ TEGRA_PINGROUP_LCD_D3,
+ TEGRA_PINGROUP_LCD_D4,
+ TEGRA_PINGROUP_LCD_D5,
+ TEGRA_PINGROUP_LCD_D6,
+ TEGRA_PINGROUP_LCD_D7,
+ TEGRA_PINGROUP_LCD_D8,
+ TEGRA_PINGROUP_LCD_D9,
+ TEGRA_PINGROUP_LCD_D10,
+ TEGRA_PINGROUP_LCD_D11,
+ TEGRA_PINGROUP_LCD_D12,
+ TEGRA_PINGROUP_LCD_D13,
+ TEGRA_PINGROUP_LCD_D14,
+ TEGRA_PINGROUP_LCD_D15,
+ TEGRA_PINGROUP_LCD_D16,
+ TEGRA_PINGROUP_LCD_D17,
+ TEGRA_PINGROUP_LCD_D18,
+ TEGRA_PINGROUP_LCD_D19,
+ TEGRA_PINGROUP_LCD_D20,
+ TEGRA_PINGROUP_LCD_D21,
+ TEGRA_PINGROUP_LCD_D22,
+ TEGRA_PINGROUP_LCD_D23,
+ TEGRA_PINGROUP_LCD_CS1_N,
+ TEGRA_PINGROUP_LCD_M1,
+ TEGRA_PINGROUP_LCD_DC1,
+ TEGRA_PINGROUP_HDMI_INT,
+ TEGRA_PINGROUP_DDC_SCL,
+ TEGRA_PINGROUP_DDC_SDA,
+ TEGRA_PINGROUP_CRT_HSYNC,
+ TEGRA_PINGROUP_CRT_VSYNC,
+ TEGRA_PINGROUP_VI_D0,
+ TEGRA_PINGROUP_VI_D1,
+ TEGRA_PINGROUP_VI_D2,
+ TEGRA_PINGROUP_VI_D3,
+ TEGRA_PINGROUP_VI_D4,
+ TEGRA_PINGROUP_VI_D5,
+ TEGRA_PINGROUP_VI_D6,
+ TEGRA_PINGROUP_VI_D7,
+ TEGRA_PINGROUP_VI_D8,
+ TEGRA_PINGROUP_VI_D9,
+ TEGRA_PINGROUP_VI_D10,
+ TEGRA_PINGROUP_VI_D11,
+ TEGRA_PINGROUP_VI_PCLK,
+ TEGRA_PINGROUP_VI_MCLK,
+ TEGRA_PINGROUP_VI_VSYNC,
+ TEGRA_PINGROUP_VI_HSYNC,
+ TEGRA_PINGROUP_UART2_RXD,
+ TEGRA_PINGROUP_UART2_TXD,
+ TEGRA_PINGROUP_UART2_RTS_N,
+ TEGRA_PINGROUP_UART2_CTS_N,
+ TEGRA_PINGROUP_UART3_TXD,
+ TEGRA_PINGROUP_UART3_RXD,
+ TEGRA_PINGROUP_UART3_CTS_N,
+ TEGRA_PINGROUP_UART3_RTS_N,
+ TEGRA_PINGROUP_GPIO_PU0,
+ TEGRA_PINGROUP_GPIO_PU1,
+ TEGRA_PINGROUP_GPIO_PU2,
+ TEGRA_PINGROUP_GPIO_PU3,
+ TEGRA_PINGROUP_GPIO_PU4,
+ TEGRA_PINGROUP_GPIO_PU5,
+ TEGRA_PINGROUP_GPIO_PU6,
+ TEGRA_PINGROUP_GEN1_I2C_SDA,
+ TEGRA_PINGROUP_GEN1_I2C_SCL,
+ TEGRA_PINGROUP_DAP4_FS,
+ TEGRA_PINGROUP_DAP4_DIN,
+ TEGRA_PINGROUP_DAP4_DOUT,
+ TEGRA_PINGROUP_DAP4_SCLK,
+ TEGRA_PINGROUP_CLK3_OUT,
+ TEGRA_PINGROUP_CLK3_REQ,
+ TEGRA_PINGROUP_GMI_WP_N,
+ TEGRA_PINGROUP_GMI_IORDY,
+ TEGRA_PINGROUP_GMI_WAIT,
+ TEGRA_PINGROUP_GMI_ADV_N,
+ TEGRA_PINGROUP_GMI_CLK,
+ TEGRA_PINGROUP_GMI_CS0_N,
+ TEGRA_PINGROUP_GMI_CS1_N,
+ TEGRA_PINGROUP_GMI_CS2_N,
+ TEGRA_PINGROUP_GMI_CS3_N,
+ TEGRA_PINGROUP_GMI_CS4_N,
+ TEGRA_PINGROUP_GMI_CS6_N,
+ TEGRA_PINGROUP_GMI_CS7_N,
+ TEGRA_PINGROUP_GMI_AD0,
+ TEGRA_PINGROUP_GMI_AD1,
+ TEGRA_PINGROUP_GMI_AD2,
+ TEGRA_PINGROUP_GMI_AD3,
+ TEGRA_PINGROUP_GMI_AD4,
+ TEGRA_PINGROUP_GMI_AD5,
+ TEGRA_PINGROUP_GMI_AD6,
+ TEGRA_PINGROUP_GMI_AD7,
+ TEGRA_PINGROUP_GMI_AD8,
+ TEGRA_PINGROUP_GMI_AD9,
+ TEGRA_PINGROUP_GMI_AD10,
+ TEGRA_PINGROUP_GMI_AD11,
+ TEGRA_PINGROUP_GMI_AD12,
+ TEGRA_PINGROUP_GMI_AD13,
+ TEGRA_PINGROUP_GMI_AD14,
+ TEGRA_PINGROUP_GMI_AD15,
+ TEGRA_PINGROUP_GMI_A16,
+ TEGRA_PINGROUP_GMI_A17,
+ TEGRA_PINGROUP_GMI_A18,
+ TEGRA_PINGROUP_GMI_A19,
+ TEGRA_PINGROUP_GMI_WR_N,
+ TEGRA_PINGROUP_GMI_OE_N,
+ TEGRA_PINGROUP_GMI_DQS,
+ TEGRA_PINGROUP_GMI_RST_N,
+ TEGRA_PINGROUP_GEN2_I2C_SCL,
+ TEGRA_PINGROUP_GEN2_I2C_SDA,
+ TEGRA_PINGROUP_SDMMC4_CLK,
+ TEGRA_PINGROUP_SDMMC4_CMD,
+ TEGRA_PINGROUP_SDMMC4_DAT0,
+ TEGRA_PINGROUP_SDMMC4_DAT1,
+ TEGRA_PINGROUP_SDMMC4_DAT2,
+ TEGRA_PINGROUP_SDMMC4_DAT3,
+ TEGRA_PINGROUP_SDMMC4_DAT4,
+ TEGRA_PINGROUP_SDMMC4_DAT5,
+ TEGRA_PINGROUP_SDMMC4_DAT6,
+ TEGRA_PINGROUP_SDMMC4_DAT7,
+ TEGRA_PINGROUP_SDMMC4_RST_N,
+ TEGRA_PINGROUP_CAM_MCLK,
+ TEGRA_PINGROUP_GPIO_PCC1,
+ TEGRA_PINGROUP_GPIO_PBB0,
+ TEGRA_PINGROUP_CAM_I2C_SCL,
+ TEGRA_PINGROUP_CAM_I2C_SDA,
+ TEGRA_PINGROUP_GPIO_PBB3,
+ TEGRA_PINGROUP_GPIO_PBB4,
+ TEGRA_PINGROUP_GPIO_PBB5,
+ TEGRA_PINGROUP_GPIO_PBB6,
+ TEGRA_PINGROUP_GPIO_PBB7,
+ TEGRA_PINGROUP_GPIO_PCC2,
+ TEGRA_PINGROUP_JTAG_RTCK,
+ TEGRA_PINGROUP_PWR_I2C_SCL,
+ TEGRA_PINGROUP_PWR_I2C_SDA,
+ TEGRA_PINGROUP_KB_ROW0,
+ TEGRA_PINGROUP_KB_ROW1,
+ TEGRA_PINGROUP_KB_ROW2,
+ TEGRA_PINGROUP_KB_ROW3,
+ TEGRA_PINGROUP_KB_ROW4,
+ TEGRA_PINGROUP_KB_ROW5,
+ TEGRA_PINGROUP_KB_ROW6,
+ TEGRA_PINGROUP_KB_ROW7,
+ TEGRA_PINGROUP_KB_ROW8,
+ TEGRA_PINGROUP_KB_ROW9,
+ TEGRA_PINGROUP_KB_ROW10,
+ TEGRA_PINGROUP_KB_ROW11,
+ TEGRA_PINGROUP_KB_ROW12,
+ TEGRA_PINGROUP_KB_ROW13,
+ TEGRA_PINGROUP_KB_ROW14,
+ TEGRA_PINGROUP_KB_ROW15,
+ TEGRA_PINGROUP_KB_COL0,
+ TEGRA_PINGROUP_KB_COL1,
+ TEGRA_PINGROUP_KB_COL2,
+ TEGRA_PINGROUP_KB_COL3,
+ TEGRA_PINGROUP_KB_COL4,
+ TEGRA_PINGROUP_KB_COL5,
+ TEGRA_PINGROUP_KB_COL6,
+ TEGRA_PINGROUP_KB_COL7,
+ TEGRA_PINGROUP_CLK_32K_OUT,
+ TEGRA_PINGROUP_SYS_CLK_REQ,
+ TEGRA_PINGROUP_CORE_PWR_REQ,
+ TEGRA_PINGROUP_CPU_PWR_REQ,
+ TEGRA_PINGROUP_PWR_INT_N,
+ TEGRA_PINGROUP_CLK_32K_IN,
+ TEGRA_PINGROUP_OWR,
+ TEGRA_PINGROUP_DAP1_FS,
+ TEGRA_PINGROUP_DAP1_DIN,
+ TEGRA_PINGROUP_DAP1_DOUT,
+ TEGRA_PINGROUP_DAP1_SCLK,
+ TEGRA_PINGROUP_CLK1_REQ,
+ TEGRA_PINGROUP_CLK1_OUT,
+ TEGRA_PINGROUP_SPDIF_IN,
+ TEGRA_PINGROUP_SPDIF_OUT,
+ TEGRA_PINGROUP_DAP2_FS,
+ TEGRA_PINGROUP_DAP2_DIN,
+ TEGRA_PINGROUP_DAP2_DOUT,
+ TEGRA_PINGROUP_DAP2_SCLK,
+ TEGRA_PINGROUP_SPI2_MOSI,
+ TEGRA_PINGROUP_SPI2_MISO,
+ TEGRA_PINGROUP_SPI2_CS0_N,
+ TEGRA_PINGROUP_SPI2_SCK,
+ TEGRA_PINGROUP_SPI1_MOSI,
+ TEGRA_PINGROUP_SPI1_SCK,
+ TEGRA_PINGROUP_SPI1_CS0_N,
+ TEGRA_PINGROUP_SPI1_MISO,
+ TEGRA_PINGROUP_SPI2_CS1_N,
+ TEGRA_PINGROUP_SPI2_CS2_N,
+ TEGRA_PINGROUP_SDMMC3_CLK,
+ TEGRA_PINGROUP_SDMMC3_CMD,
+ TEGRA_PINGROUP_SDMMC3_DAT0,
+ TEGRA_PINGROUP_SDMMC3_DAT1,
+ TEGRA_PINGROUP_SDMMC3_DAT2,
+ TEGRA_PINGROUP_SDMMC3_DAT3,
+ TEGRA_PINGROUP_SDMMC3_DAT4,
+ TEGRA_PINGROUP_SDMMC3_DAT5,
+ TEGRA_PINGROUP_SDMMC3_DAT6,
+ TEGRA_PINGROUP_SDMMC3_DAT7,
+ TEGRA_PINGROUP_PEX_L0_PRSNT_N,
+ TEGRA_PINGROUP_PEX_L0_RST_N,
+ TEGRA_PINGROUP_PEX_L0_CLKREQ_N,
+ TEGRA_PINGROUP_PEX_WAKE_N,
+ TEGRA_PINGROUP_PEX_L1_PRSNT_N,
+ TEGRA_PINGROUP_PEX_L1_RST_N,
+ TEGRA_PINGROUP_PEX_L1_CLKREQ_N,
+ TEGRA_PINGROUP_PEX_L2_PRSNT_N,
+ TEGRA_PINGROUP_PEX_L2_RST_N,
+ TEGRA_PINGROUP_PEX_L2_CLKREQ_N,
+ TEGRA_PINGROUP_HDMI_CEC,
+ TEGRA_MAX_PINGROUP,
+};
+
+enum tegra_drive_pingroup {
+ TEGRA_DRIVE_PINGROUP_AO1 = 0,
+ TEGRA_DRIVE_PINGROUP_AO2,
+ TEGRA_DRIVE_PINGROUP_AT1,
+ TEGRA_DRIVE_PINGROUP_AT2,
+ TEGRA_DRIVE_PINGROUP_AT3,
+ TEGRA_DRIVE_PINGROUP_AT4,
+ TEGRA_DRIVE_PINGROUP_AT5,
+ TEGRA_DRIVE_PINGROUP_CDEV1,
+ TEGRA_DRIVE_PINGROUP_CDEV2,
+ TEGRA_DRIVE_PINGROUP_CSUS,
+ TEGRA_DRIVE_PINGROUP_DAP1,
+ TEGRA_DRIVE_PINGROUP_DAP2,
+ TEGRA_DRIVE_PINGROUP_DAP3,
+ TEGRA_DRIVE_PINGROUP_DAP4,
+ TEGRA_DRIVE_PINGROUP_DBG,
+ TEGRA_DRIVE_PINGROUP_LCD1,
+ TEGRA_DRIVE_PINGROUP_LCD2,
+ TEGRA_DRIVE_PINGROUP_SDIO2,
+ TEGRA_DRIVE_PINGROUP_SDIO3,
+ TEGRA_DRIVE_PINGROUP_SPI,
+ TEGRA_DRIVE_PINGROUP_UAA,
+ TEGRA_DRIVE_PINGROUP_UAB,
+ TEGRA_DRIVE_PINGROUP_UART2,
+ TEGRA_DRIVE_PINGROUP_UART3,
+ TEGRA_DRIVE_PINGROUP_VI1,
+ TEGRA_DRIVE_PINGROUP_SDIO1,
+ TEGRA_DRIVE_PINGROUP_CRT,
+ TEGRA_DRIVE_PINGROUP_DDC,
+ TEGRA_DRIVE_PINGROUP_GMA,
+ TEGRA_DRIVE_PINGROUP_GMB,
+ TEGRA_DRIVE_PINGROUP_GMC,
+ TEGRA_DRIVE_PINGROUP_GMD,
+ TEGRA_DRIVE_PINGROUP_GME,
+ TEGRA_DRIVE_PINGROUP_GMF,
+ TEGRA_DRIVE_PINGROUP_GMG,
+ TEGRA_DRIVE_PINGROUP_GMH,
+ TEGRA_DRIVE_PINGROUP_OWR,
+ TEGRA_DRIVE_PINGROUP_UAD,
+ TEGRA_DRIVE_PINGROUP_GPV,
+ TEGRA_DRIVE_PINGROUP_DEV3,
+ TEGRA_DRIVE_PINGROUP_CEC,
+ TEGRA_MAX_DRIVE_PINGROUP,
+};
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/pinmux.h b/arch/arm/mach-tegra/include/mach/pinmux.h
index defd8775defa..f485d0bb0729 100644
--- a/arch/arm/mach-tegra/include/mach/pinmux.h
+++ b/arch/arm/mach-tegra/include/mach/pinmux.h
@@ -2,6 +2,7 @@
* linux/arch/arm/mach-tegra/include/mach/pinmux.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -19,79 +20,144 @@
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
#include "pinmux-t2.h"
+#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
+#include "pinmux-t3.h"
#else
#error "Undefined Tegra architecture"
#endif
+#define TEGRA_MUX_LIST \
+ TEGRA_MUX(NONE) \
+ TEGRA_MUX(AHB_CLK) \
+ TEGRA_MUX(APB_CLK) \
+ TEGRA_MUX(AUDIO_SYNC) \
+ TEGRA_MUX(CRT) \
+ TEGRA_MUX(DAP1) \
+ TEGRA_MUX(DAP2) \
+ TEGRA_MUX(DAP3) \
+ TEGRA_MUX(DAP4) \
+ TEGRA_MUX(DAP5) \
+ TEGRA_MUX(DISPLAYA) \
+ TEGRA_MUX(DISPLAYB) \
+ TEGRA_MUX(EMC_TEST0_DLL) \
+ TEGRA_MUX(EMC_TEST1_DLL) \
+ TEGRA_MUX(GMI) \
+ TEGRA_MUX(GMI_INT) \
+ TEGRA_MUX(HDMI) \
+ TEGRA_MUX(I2C1) \
+ TEGRA_MUX(I2C2) \
+ TEGRA_MUX(I2C3) \
+ TEGRA_MUX(IDE) \
+ TEGRA_MUX(IRDA) \
+ TEGRA_MUX(KBC) \
+ TEGRA_MUX(MIO) \
+ TEGRA_MUX(MIPI_HS) \
+ TEGRA_MUX(NAND) \
+ TEGRA_MUX(OSC) \
+ TEGRA_MUX(OWR) \
+ TEGRA_MUX(PCIE) \
+ TEGRA_MUX(PLLA_OUT) \
+ TEGRA_MUX(PLLC_OUT1) \
+ TEGRA_MUX(PLLM_OUT1) \
+ TEGRA_MUX(PLLP_OUT2) \
+ TEGRA_MUX(PLLP_OUT3) \
+ TEGRA_MUX(PLLP_OUT4) \
+ TEGRA_MUX(PWM) \
+ TEGRA_MUX(PWR_INTR) \
+ TEGRA_MUX(PWR_ON) \
+ TEGRA_MUX(RTCK) \
+ TEGRA_MUX(SDIO1) \
+ TEGRA_MUX(SDIO2) \
+ TEGRA_MUX(SDIO3) \
+ TEGRA_MUX(SDIO4) \
+ TEGRA_MUX(SFLASH) \
+ TEGRA_MUX(SPDIF) \
+ TEGRA_MUX(SPI1) \
+ TEGRA_MUX(SPI2) \
+ TEGRA_MUX(SPI2_ALT) \
+ TEGRA_MUX(SPI3) \
+ TEGRA_MUX(SPI4) \
+ TEGRA_MUX(TRACE) \
+ TEGRA_MUX(TWC) \
+ TEGRA_MUX(UARTA) \
+ TEGRA_MUX(UARTB) \
+ TEGRA_MUX(UARTC) \
+ TEGRA_MUX(UARTD) \
+ TEGRA_MUX(UARTE) \
+ TEGRA_MUX(ULPI) \
+ TEGRA_MUX(VI) \
+ TEGRA_MUX(VI_SENSOR_CLK) \
+ TEGRA_MUX(XIO) \
+ /* End of Tegra2 MUX selectors */ \
+ TEGRA_MUX(BLINK) \
+ TEGRA_MUX(CEC) \
+ TEGRA_MUX(CLK12) \
+ TEGRA_MUX(DAP) \
+ TEGRA_MUX(DAPSDMMC2) \
+ TEGRA_MUX(DDR) \
+ TEGRA_MUX(DEV3) \
+ TEGRA_MUX(DTV) \
+ TEGRA_MUX(VI_ALT1) \
+ TEGRA_MUX(VI_ALT2) \
+ TEGRA_MUX(VI_ALT3) \
+ TEGRA_MUX(EMC_DLL) \
+ TEGRA_MUX(EXTPERIPH1) \
+ TEGRA_MUX(EXTPERIPH2) \
+ TEGRA_MUX(EXTPERIPH3) \
+ TEGRA_MUX(GMI_ALT) \
+ TEGRA_MUX(HDA) \
+ TEGRA_MUX(HSI) \
+ TEGRA_MUX(I2C4) \
+ TEGRA_MUX(I2C5) \
+ TEGRA_MUX(I2CPWR) \
+ TEGRA_MUX(I2S0) \
+ TEGRA_MUX(I2S1) \
+ TEGRA_MUX(I2S2) \
+ TEGRA_MUX(I2S3) \
+ TEGRA_MUX(I2S4) \
+ TEGRA_MUX(NAND_ALT) \
+ TEGRA_MUX(POPSDIO4) \
+ TEGRA_MUX(POPSDMMC4) \
+ TEGRA_MUX(PWM0) \
+ TEGRA_MUX(PWM1) \
+ TEGRA_MUX(PWM2) \
+ TEGRA_MUX(PWM3) \
+ TEGRA_MUX(SATA) \
+ TEGRA_MUX(SPI5) \
+ TEGRA_MUX(SPI6) \
+ TEGRA_MUX(SYSCLK) \
+ TEGRA_MUX(VGP1) \
+ TEGRA_MUX(VGP2) \
+ TEGRA_MUX(VGP3) \
+ TEGRA_MUX(VGP4) \
+ TEGRA_MUX(VGP5) \
+ TEGRA_MUX(VGP6) \
+ /* End of Tegra3 MUX selectors */
+
enum tegra_mux_func {
- TEGRA_MUX_RSVD = 0x8000,
- TEGRA_MUX_RSVD1 = 0x8000,
- TEGRA_MUX_RSVD2 = 0x8001,
- TEGRA_MUX_RSVD3 = 0x8002,
- TEGRA_MUX_RSVD4 = 0x8003,
- TEGRA_MUX_NONE = -1,
- TEGRA_MUX_AHB_CLK,
- TEGRA_MUX_APB_CLK,
- TEGRA_MUX_AUDIO_SYNC,
- TEGRA_MUX_CRT,
- TEGRA_MUX_DAP1,
- TEGRA_MUX_DAP2,
- TEGRA_MUX_DAP3,
- TEGRA_MUX_DAP4,
- TEGRA_MUX_DAP5,
- TEGRA_MUX_DISPLAYA,
- TEGRA_MUX_DISPLAYB,
- TEGRA_MUX_EMC_TEST0_DLL,
- TEGRA_MUX_EMC_TEST1_DLL,
- TEGRA_MUX_GMI,
- TEGRA_MUX_GMI_INT,
- TEGRA_MUX_HDMI,
- TEGRA_MUX_I2C,
- TEGRA_MUX_I2C2,
- TEGRA_MUX_I2C3,
- TEGRA_MUX_IDE,
- TEGRA_MUX_IRDA,
- TEGRA_MUX_KBC,
- TEGRA_MUX_MIO,
- TEGRA_MUX_MIPI_HS,
- TEGRA_MUX_NAND,
- TEGRA_MUX_OSC,
- TEGRA_MUX_OWR,
- TEGRA_MUX_PCIE,
- TEGRA_MUX_PLLA_OUT,
- TEGRA_MUX_PLLC_OUT1,
- TEGRA_MUX_PLLM_OUT1,
- TEGRA_MUX_PLLP_OUT2,
- TEGRA_MUX_PLLP_OUT3,
- TEGRA_MUX_PLLP_OUT4,
- TEGRA_MUX_PWM,
- TEGRA_MUX_PWR_INTR,
- TEGRA_MUX_PWR_ON,
- TEGRA_MUX_RTCK,
- TEGRA_MUX_SDIO1,
- TEGRA_MUX_SDIO2,
- TEGRA_MUX_SDIO3,
- TEGRA_MUX_SDIO4,
- TEGRA_MUX_SFLASH,
- TEGRA_MUX_SPDIF,
- TEGRA_MUX_SPI1,
- TEGRA_MUX_SPI2,
- TEGRA_MUX_SPI2_ALT,
- TEGRA_MUX_SPI3,
- TEGRA_MUX_SPI4,
- TEGRA_MUX_TRACE,
- TEGRA_MUX_TWC,
- TEGRA_MUX_UARTA,
- TEGRA_MUX_UARTB,
- TEGRA_MUX_UARTC,
- TEGRA_MUX_UARTD,
- TEGRA_MUX_UARTE,
- TEGRA_MUX_ULPI,
- TEGRA_MUX_VI,
- TEGRA_MUX_VI_SENSOR_CLK,
- TEGRA_MUX_XIO,
- TEGRA_MUX_SAFE,
- TEGRA_MAX_MUX,
+#define TEGRA_MUX(mux) TEGRA_MUX_##mux,
+ TEGRA_MUX_LIST
+#undef TEGRA_MUX
+ TEGRA_MUX_SAFE, /* "Safe" default mux selector */
+ TEGRA_MAX_MUX, /* Number of mux selectors */
+ TEGRA_MUX_TEGRA2_LAST = TEGRA_MUX_XIO,
+ TEGRA_MUX_TEGRA3_LAST = TEGRA_MUX_VGP6,
+
+ /* Mux selector aliases */
+ TEGRA_MUX_I2C = TEGRA_MUX_I2C1,
+ TEGRA_MUX_SDMMC1 = TEGRA_MUX_SDIO1,
+ TEGRA_MUX_SDMMC2 = TEGRA_MUX_SDIO2,
+ TEGRA_MUX_SDMMC3 = TEGRA_MUX_SDIO3,
+ TEGRA_MUX_SDMMC4 = TEGRA_MUX_SDIO4,
+
+ /* Special mux selector values */
+ TEGRA_MUX_INVALID = 0x4000,
+ TEGRA_MUX_RSVD = 0x8000,
+ TEGRA_MUX_RSVD0 = TEGRA_MUX_RSVD,
+ TEGRA_MUX_RSVD1 = 0x8001,
+ TEGRA_MUX_RSVD2 = 0x8002,
+ TEGRA_MUX_RSVD3 = 0x8003,
+ TEGRA_MUX_RSVD4 = 0x8004,
};
enum tegra_pullupdown {
@@ -105,6 +171,29 @@ enum tegra_tristate {
TEGRA_TRI_TRISTATE = 1,
};
+enum tegra_pin_io {
+ TEGRA_PIN_OUTPUT = 0,
+ TEGRA_PIN_INPUT = 1,
+};
+
+enum tegra_pin_lock {
+ TEGRA_PIN_LOCK_DEFAULT = 0,
+ TEGRA_PIN_LOCK_DISABLE,
+ TEGRA_PIN_LOCK_ENABLE,
+};
+
+enum tegra_pin_od {
+ TEGRA_PIN_OD_DEFAULT = 0,
+ TEGRA_PIN_OD_DISABLE,
+ TEGRA_PIN_OD_ENABLE,
+};
+
+enum tegra_pin_ioreset {
+ TEGRA_PIN_IO_RESET_DEFAULT = 0,
+ TEGRA_PIN_IO_RESET_DISABLE,
+ TEGRA_PIN_IO_RESET_ENABLE,
+};
+
enum tegra_vddio {
TEGRA_VDDIO_BB = 0,
TEGRA_VDDIO_LCD,
@@ -115,6 +204,14 @@ enum tegra_vddio {
TEGRA_VDDIO_SYS,
TEGRA_VDDIO_AUDIO,
TEGRA_VDDIO_SD,
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ TEGRA_VDDIO_CAM,
+ TEGRA_VDDIO_GMI,
+ TEGRA_VDDIO_PEXCTL,
+ TEGRA_VDDIO_SDMMC1,
+ TEGRA_VDDIO_SDMMC3,
+ TEGRA_VDDIO_SDMMC4,
+#endif
};
struct tegra_pingroup_config {
@@ -122,6 +219,10 @@ struct tegra_pingroup_config {
enum tegra_mux_func func;
enum tegra_pullupdown pupd;
enum tegra_tristate tristate;
+ enum tegra_pin_io io;
+ enum tegra_pin_lock lock;
+ enum tegra_pin_od od;
+ enum tegra_pin_ioreset ioreset;
};
enum tegra_slew {
@@ -165,6 +266,21 @@ enum tegra_pull_strength {
TEGRA_PULL_29,
TEGRA_PULL_30,
TEGRA_PULL_31,
+ TEGRA_PULL_32,
+ TEGRA_PULL_33,
+ TEGRA_PULL_34,
+ TEGRA_PULL_35,
+ TEGRA_PULL_36,
+ TEGRA_PULL_37,
+ TEGRA_PULL_38,
+ TEGRA_PULL_39,
+ TEGRA_PULL_40,
+ TEGRA_PULL_41,
+ TEGRA_PULL_42,
+ TEGRA_PULL_43,
+ TEGRA_PULL_44,
+ TEGRA_PULL_45,
+ TEGRA_PULL_46,
TEGRA_MAX_PULL,
};
@@ -200,6 +316,14 @@ struct tegra_drive_pingroup_config {
struct tegra_drive_pingroup_desc {
const char *name;
s16 reg;
+ u8 drvup_offset;
+ u16 drvup_mask;
+ u8 drvdown_offset;
+ u16 drvdown_mask;
+ u8 slewrise_offset;
+ u16 slewrise_mask;
+ u8 slewfall_offset;
+ u16 slewfall_mask;
};
struct tegra_pingroup_desc {
@@ -207,19 +331,29 @@ struct tegra_pingroup_desc {
int funcs[4];
int func_safe;
int vddio;
- s16 tri_reg; /* offset into the TRISTATE_REG_* register bank */
+ s16 tri_reg; /* offset into the TRISTATE_REG_* register bank */
s16 mux_reg; /* offset into the PIN_MUX_CTL_* register bank */
s16 pupd_reg; /* offset into the PULL_UPDOWN_REG_* register bank */
- s8 tri_bit; /* offset into the TRISTATE_REG_* register bit */
+ s8 tri_bit; /* offset into the TRISTATE_REG_* register bit */
s8 mux_bit; /* offset into the PIN_MUX_CTL_* register bit */
s8 pupd_bit; /* offset into the PULL_UPDOWN_REG_* register bit */
+ s8 lock_bit; /* offser of the LOCK bit into mux register bit */
+ s8 od_bit; /* offset of the OD bit into mux register bit */
+ s8 ioreset_bit; /* offset of the IO_RESET bit into mux register bit */
+ s8 io_default;
+ int gpionr;
};
extern const struct tegra_pingroup_desc tegra_soc_pingroups[];
extern const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[];
+extern const int gpio_to_pingroup[];
+int tegra_pinmux_get_func(enum tegra_pingroup pg);
int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
enum tegra_tristate tristate);
+int tegra_pinmux_set_io(enum tegra_pingroup pg,
+ enum tegra_pin_io input);
+int tegra_pinmux_get_pingroup(int gpio_nr);
int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
enum tegra_pullupdown pupd);
@@ -236,5 +370,6 @@ void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *conf
int len, enum tegra_tristate tristate);
void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *config,
int len, enum tegra_pullupdown pupd);
-#endif
+void __init tegra_init_pinmux(void);
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/powergate.h b/arch/arm/mach-tegra/include/mach/powergate.h
index 401d1b725291..a8587ec2d360 100644
--- a/arch/arm/mach-tegra/include/mach/powergate.h
+++ b/arch/arm/mach-tegra/include/mach/powergate.h
@@ -2,6 +2,7 @@
* drivers/regulator/tegra-regulator.c
*
* Copyright (c) 2010 Google, Inc
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -21,20 +22,76 @@
#define _MACH_TEGRA_POWERGATE_H_
#define TEGRA_POWERGATE_CPU 0
+#define TEGRA_POWERGATE_CPU0 TEGRA_POWERGATE_CPU
#define TEGRA_POWERGATE_3D 1
+#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
#define TEGRA_POWERGATE_VENC 2
#define TEGRA_POWERGATE_PCIE 3
#define TEGRA_POWERGATE_VDEC 4
#define TEGRA_POWERGATE_L2 5
#define TEGRA_POWERGATE_MPE 6
+#define TEGRA_POWERGATE_HEG 7
+#define TEGRA_POWERGATE_SATA 8
+#define TEGRA_POWERGATE_CPU1 9
+#define TEGRA_POWERGATE_CPU2 10
+#define TEGRA_POWERGATE_CPU3 11
+#define TEGRA_POWERGATE_CELP 12
+#define TEGRA_POWERGATE_3D1 13
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
#define TEGRA_NUM_POWERGATE 7
+#define TEGRA_CPU_POWERGATE_ID(cpu) (TEGRA_POWERGATE_CPU)
+#define TEGRA_IS_CPU_POWERGATE_ID(id) ((id) == TEGRA_POWERGATE_CPU)
+#else
+#define TEGRA_NUM_POWERGATE 14
+#define TEGRA_CPU_POWERGATE_ID(cpu) ((cpu == 0) ? TEGRA_POWERGATE_CPU0 : \
+ (cpu + TEGRA_POWERGATE_CPU1 - 1))
+#define TEGRA_IS_CPU_POWERGATE_ID(id) (((id) == TEGRA_POWERGATE_CPU0) || \
+ ((id) == TEGRA_POWERGATE_CPU1) || \
+ ((id) == TEGRA_POWERGATE_CPU2) || \
+ ((id) == TEGRA_POWERGATE_CPU3))
+#endif
+
+struct clk;
-int tegra_powergate_power_on(int id);
-int tegra_powergate_power_off(int id);
bool tegra_powergate_is_powered(int id);
+int tegra_powergate_mc_disable(int id);
+int tegra_powergate_mc_enable(int id);
+int tegra_powergate_mc_flush(int id);
+int tegra_powergate_mc_flush_done(int id);
int tegra_powergate_remove_clamping(int id);
+const char *tegra_powergate_get_name(int id);
+
+/*
+ * Functions to powergate/un-powergate partitions.
+ * Handle clk management in the API's.
+ *
+ * tegra_powergate_partition_with_clk_off() can be called with
+ * clks ON. It disables all required clks.
+ *
+ * tegra_unpowergate_partition_with_clk_on() can be called with
+ * all required clks OFF. Returns with all clks ON.
+ *
+ * Warning: In general drivers should take care of the module
+ * clks and use tegra_powergate_partition() &
+ * tegra_unpowergate_partition() API's.
+ */
+int tegra_powergate_partition_with_clk_off(int id);
+int tegra_unpowergate_partition_with_clk_on(int id);
-/* Must be called with clk disabled, and returns with clk enabled */
-int tegra_powergate_sequence_power_up(int id, struct clk *clk);
+/*
+ * Functions to powergate un-powergate partitions.
+ * Drivers are responsible for clk enable-disable
+ *
+ * tegra_powergate_partition() should be called with all
+ * required clks OFF. Drivers should disable clks BEFORE
+ * calling this fucntion
+ *
+ * tegra_unpowergate_partition should be called with all
+ * required clks OFF. Returns with all clks OFF. Drivers
+ * should enable all clks AFTER this function
+ */
+int tegra_powergate_partition(int id);
+int tegra_unpowergate_partition(int id);
#endif /* _MACH_TEGRA_POWERGATE_H_ */
diff --git a/arch/arm/mach-tegra/include/mach/sdhci.h b/arch/arm/mach-tegra/include/mach/sdhci.h
index 4231bc7b8652..d360f6f04949 100644
--- a/arch/arm/mach-tegra/include/mach/sdhci.h
+++ b/arch/arm/mach-tegra/include/mach/sdhci.h
@@ -18,6 +18,7 @@
#define __ASM_ARM_ARCH_TEGRA_SDHCI_H
#include <linux/mmc/host.h>
+#include <asm/mach/mmc.h>
struct tegra_sdhci_platform_data {
int cd_gpio;
@@ -25,6 +26,9 @@ struct tegra_sdhci_platform_data {
int power_gpio;
int is_8bit;
int pm_flags;
+ unsigned int max_clk_limit;
+ unsigned int tap_delay;
+ struct mmc_platform_data mmc_data;
};
#endif
diff --git a/arch/arm/mach-tegra/include/mach/spdif.h b/arch/arm/mach-tegra/include/mach/spdif.h
new file mode 100644
index 000000000000..96103fae91b1
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/spdif.h
@@ -0,0 +1,392 @@
+/*
+ * arch/arm/mach-tegra/include/mach/spdif.h
+ *
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+
+#ifndef __ARCH_ARM_MACH_TEGRA_SPDIF_H
+#define __ARCH_ARM_MACH_TEGRA_SPDIF_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+/* Offsets from TEGRA_SPDIF_BASE */
+
+#define SPDIF_CTRL_0 0x0
+#define SPDIF_STATUS_0 0x4
+#define SPDIF_STROBE_CTRL_0 0x8
+#define SPDIF_DATA_FIFO_CSR_0 0x0C
+#define SPDIF_DATA_OUT_0 0x40
+#define SPDIF_DATA_IN_0 0x80
+#define SPDIF_CH_STA_RX_A_0 0x100
+#define SPDIF_CH_STA_RX_B_0 0x104
+#define SPDIF_CH_STA_RX_C_0 0x108
+#define SPDIF_CH_STA_RX_D_0 0x10C
+#define SPDIF_CH_STA_RX_E_0 0x110
+#define SPDIF_CH_STA_RX_F_0 0x114
+#define SPDIF_CH_STA_TX_A_0 0x140
+#define SPDIF_CH_STA_TX_B_0 0x144
+#define SPDIF_CH_STA_TX_C_0 0x148
+#define SPDIF_CH_STA_TX_D_0 0x14C
+#define SPDIF_CH_STA_TX_E_0 0x150
+#define SPDIF_CH_STA_TX_F_0 0x154
+#define SPDIF_USR_STA_RX_A_0 0x180
+#define SPDIF_USR_DAT_TX_A_0 0x1C0
+
+/*
+ * Register SPDIF_CTRL_0
+ */
+
+/*
+ * 1=start capturing from left channel,0=start
+ * capturing from right channel.
+ */
+#define SPDIF_CTRL_0_CAP_LC (1<<30)
+
+/* SPDIF receiver(RX): 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_RX_EN (1<<29)
+
+/* SPDIF Transmitter(TX): 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TX_EN (1<<28)
+
+/* Transmit Channel status: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TC_EN (1<<27)
+
+/* Transmit user Data: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TU_EN (1<<26)
+
+/* Interrupt on transmit error: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_TXE (1<<25)
+
+/* Interrupt on receive error: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_RXE (1<<24)
+
+/* Interrupt on invalid preamble: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_P (1<<23)
+
+/* Interrupt on "B" preamble: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_B (1<<22)
+
+/*
+ * Interrupt when block of channel status received:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_IE_C (1<<21)
+
+/*
+ * Interrupt when a valid information unit (IU) recieve:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_IE_U (1<<20)
+
+/*
+ * Interrupt when RX user FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_RU (1<<19)
+
+/*
+ * Interrupt when TX user FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_TU (1<<18)
+
+/*
+ * Interrupt when RX data FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_RX (1<<17)
+
+/*
+ * Interrupt when TX data FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_TX (1<<16)
+
+/* Loopback test mode: 1=enable internal loopback, 0=Normal mode. */
+#define SPDIF_CTRL_0_LBK_EN (1<<15)
+
+/*
+ * Pack data mode:
+ * 1=Packeted left/right channel data into a single word,
+ * 0=Single data (16 bit needs to be padded to match the
+ * interface data bit size)
+ */
+#define SPDIF_CTRL_0_PACK (1<<14)
+
+/*
+ * 00=16bit data
+ * 01=20bit data
+ * 10=24bit data
+ * 11=raw data
+ */
+#define SPDIF_BIT_MODE_MODE16BIT (0)
+#define SPDIF_BIT_MODE_MODE20BIT (1)
+#define SPDIF_BIT_MODE_MODE24BIT (2)
+#define SPDIF_BIT_MODE_MODERAW (3)
+#define SPDIF_CTRL_0_BIT_MODE_SHIFT (12)
+
+#define SPDIF_CTRL_0_BIT_MODE_MASK \
+ ((0x3) << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE16BIT \
+ (SPDIF_BIT_MODE_MODE16BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE20BIT \
+ (SPDIF_BIT_MODE_MODE20BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE24BIT \
+ (SPDIF_BIT_MODE_MODE24BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODERAW \
+ (SPDIF_BIT_MODE_MODERAW << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+
+
+/*
+ * SPDIF Status Register
+ * -------------------------
+ * Note: IS_P, IS_B, IS_C, and IS_U are sticky bits.
+ * Software must write a 1 to the corresponding bit location
+ * to clear the status.
+ */
+
+/* Register SPDIF_STATUS_0 */
+
+/*
+ * Receiver(RX) shifter is busy receiving data. 1=busy, 0=not busy.
+ * This bit is asserted when the receiver first locked onto the
+ * preamble of the data stream after RX_EN is asserted. This bit is
+ * deasserted when either,
+ * (a) the end of a frame is reached after RX_EN is deeasserted, or
+ * (b) the SPDIF data stream becomes inactive.
+ */
+#define SPDIF_STATUS_0_RX_BSY (1<<29)
+
+
+/*
+ * Transmitter(TX) shifter is busy transmitting data.
+ * 1=busy, 0=not busy.
+ * This bit is asserted when TX_EN is asserted.
+ * This bit is deasserted when the end of a frame is reached after
+ * TX_EN is deasserted.
+ */
+#define SPDIF_STATUS_0_TX_BSY (1<<28)
+
+/*
+ * TX is busy shifting out channel status. 1=busy, 0=not busy.
+ * This bit is asserted when both TX_EN and TC_EN are asserted and
+ * data from CH_STA_TX_A register is loaded into the internal shifter.
+ * This bit is deasserted when either,
+ * (a) the end of a frame is reached after TX_EN is deasserted, or
+ * (b) CH_STA_TX_F register is loaded into the internal shifter.
+ */
+#define SPDIF_STATUS_0_TC_BSY (1<<27)
+
+/*
+ * TX User data FIFO busy. 1=busy, 0=not busy.
+ * This bit is asserted when TX_EN and TXU_EN are asserted and
+ * there's data in the TX user FIFO. This bit is deassert when either,
+ * (a) the end of a frame is reached after TX_EN is deasserted, or
+ * (b) there's no data left in the TX user FIFO.
+ */
+#define SPDIF_STATUS_0_TU_BSY (1<<26)
+
+/* Tx FIFO Underrun error status: 1=error, 0=no error */
+#define SPDIF_STATUS_0_TX_ERR (1<<25)
+
+/* Rx FIFO Overrun error status: 1=error, 0=no error */
+#define SPDIF_STATUS_0_RX_ERR (1<<24)
+
+/* Preamble status: 1=bad/missing preamble, 0=Preamble ok */
+#define SPDIF_STATUS_0_IS_P (1<<23)
+
+/* B-preamble detection status: 0=not detected, 1=B-preamble detected */
+#define SPDIF_STATUS_0_IS_B (1<<22)
+
+/*
+ * RX channel block data receive status:
+ * 1=received entire block of channel status,
+ * 0=entire block not recieved yet.
+ */
+#define SPDIF_STATUS_0_IS_C (1<<21)
+
+/* RX User Data Valid flag: 1=valid IU detected, 0 = no IU detected. */
+#define SPDIF_STATUS_0_IS_U (1<<20)
+
+/*
+ * RX User FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_RU (1<<19)
+
+/*
+ * TX User FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_TU (1<<18)
+
+/*
+ * RX Data FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_RX (1<<17)
+
+/*
+ * TX Data FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_TX (1<<16)
+
+
+/* SPDIF FIFO Configuration and Status Register */
+
+/* Register SPDIF_DATA_FIFO_CSR_0 */
+
+#define SPDIF_FIFO_ATN_LVL_ONE_SLOT 0
+#define SPDIF_FIFO_ATN_LVL_FOUR_SLOTS 1
+#define SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS 2
+#define SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS 3
+
+
+/* Clear Receiver User FIFO (RX USR.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_RU_CLR (1<<31)
+
+/*
+ * RX USR.FIFO Attention Level:
+ * 00=1-slot-full, 01=2-slots-full, 10=3-slots-full, 11=4-slots-full.
+ */
+
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1 (0)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2 (1)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3 (2)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4 (3)
+
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIFT (29)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+
+/* Number of RX USR.FIFO levels with valid data. */
+#define SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_SHIFT (24)
+#define SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_MASK \
+ (0x1f << SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_SHIFT)
+
+/* Clear Transmitter User FIFO (TX USR.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_TU_CLR (1<<23)
+
+/*
+ * TxUSR.FIFO Attention Level:
+ * 11=4-slots-empty, 10=3-slots-empty, 01=2-slots-empty, 00=1-slot-empty.
+ */
+
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1 (0)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2 (1)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3 (2)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4 (3)
+
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT (21)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+
+/* Number of Tx USR.FIFO levels that could be filled. */
+#define SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_SHIFT (16)
+#define SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_FIELD \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_SHIFT)
+
+/* Clear Receiver Data FIFO (RX DATA.FIFO). */
+#define SPDIF_DATA_FIFO_CSR_0_RX_CLR (1<<15)
+
+/*
+ * Rx FIFO Attention Level:
+ * 11=12-slots-full, 10=8-slots-full, 01=4-slots-full, 00=1-slot-full.
+ */
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT (13)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX1_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_ONE_SLOT << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX4_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_FOUR_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX8_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX12_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+
+
+/* Number of RX DATA.FIFO levels with valid data */
+#define SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_SHIFT (8)
+#define SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_FIELD \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_SHIFT)
+
+/* Clear Transmitter Data FIFO (TX DATA.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_TX_CLR (1<<7)
+
+/*
+ * Tx FIFO Attention Level:
+ * 11=12-slots-empty, 10=8-slots-empty, 01=4-slots-empty, 00=1-slot-empty
+ */
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT (5)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX1_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_ONE_SLOT << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX4_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_FOUR_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX8_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX12_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+
+
+/* Number of Tx DATA.FIFO levels that could be filled. */
+#define SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT (0)
+#define SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_MASK \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT)
+
+
+#endif /* __ARCH_ARM_MACH_TEGRA_SPDIF_H */
diff --git a/arch/arm/mach-tegra/include/mach/spi.h b/arch/arm/mach-tegra/include/mach/spi.h
new file mode 100644
index 000000000000..171e4007b4bc
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/spi.h
@@ -0,0 +1,42 @@
+/*
+ * arch/arm/mach-tegra/include/mach/spi.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_SPI_H
+#define __MACH_TEGRA_SPI_H
+
+#include <linux/types.h>
+#include <linux/spi/spi.h>
+
+typedef int (*callback)(void *client_data);
+
+/**
+ * register_spi_slave_callback - registers notification callback provided by
+ * the client.
+ * This callback indicate that the controller is all set to receive/transfer
+ * data.
+ * @spi: struct spi_device - refer to linux/spi/spi.h
+ * @func: Callback function
+ * @client_data: Data to be passed in callback
+ * Context: can not sleep
+ */
+int spi_tegra_register_callback(struct spi_device *spi, callback func,
+ void *client_data);
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/suspend.h b/arch/arm/mach-tegra/include/mach/suspend.h
deleted file mode 100644
index 5af8715d2e1e..000000000000
--- a/arch/arm/mach-tegra/include/mach/suspend.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * arch/arm/mach-tegra/include/mach/suspend.h
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- * Colin Cross <ccross@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-
-#ifndef _MACH_TEGRA_SUSPEND_H_
-#define _MACH_TEGRA_SUSPEND_H_
-
-void tegra_pinmux_suspend(void);
-void tegra_irq_suspend(void);
-void tegra_gpio_suspend(void);
-void tegra_clk_suspend(void);
-void tegra_dma_suspend(void);
-void tegra_timer_suspend(void);
-
-void tegra_pinmux_resume(void);
-void tegra_irq_resume(void);
-void tegra_gpio_resume(void);
-void tegra_clk_resume(void);
-void tegra_dma_resume(void);
-void tegra_timer_resume(void);
-
-#endif /* _MACH_TEGRA_SUSPEND_H_ */
diff --git a/arch/arm/mach-tegra/include/mach/system.h b/arch/arm/mach-tegra/include/mach/system.h
index 027c4215d313..7bc605e5dc84 100644
--- a/arch/arm/mach-tegra/include/mach/system.h
+++ b/arch/arm/mach-tegra/include/mach/system.h
@@ -7,6 +7,8 @@
* Colin Cross <ccross@google.com>
* Erik Gilling <konkers@google.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
diff --git a/arch/arm/mach-tegra/include/mach/tegra-bb-power.h b/arch/arm/mach-tegra/include/mach/tegra-bb-power.h
new file mode 100644
index 000000000000..e0b7e3de326f
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra-bb-power.h
@@ -0,0 +1,61 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra-bb-power.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define GPIO_INVALID UINT_MAX
+
+union tegra_bb_gpio_id {
+ struct {
+ int mdm_reset;
+ int mdm_on;
+ int ap2mdm_ack;
+ int mdm2ap_ack;
+ int ap2mdm_ack2;
+ int mdm2ap_ack2;
+ int rsvd1;
+ int rsvd2;
+ } generic;
+ struct {
+ int bb_rst;
+ int bb_on;
+ int ipc_bb_wake;
+ int ipc_ap_wake;
+ int ipc_hsic_active;
+ int ipc_hsic_sus_req;
+ int rsvd1;
+ int rsvd2;
+ } xmm;
+ struct {
+ int pwr_status;
+ int pwr_on;
+ int uart_awr;
+ int uart_cwr;
+ int usb_awr;
+ int usb_cwr;
+ int service;
+ int resout2;
+ } m7400;
+};
+
+typedef struct platform_device* (*ehci_register_cb)(void);
+typedef void (*ehci_unregister_cb)(struct platform_device *);
+
+struct tegra_bb_pdata {
+ union tegra_bb_gpio_id *id;
+ struct platform_device *device;
+ ehci_register_cb ehci_register;
+ ehci_unregister_cb ehci_unregister;
+ int bb_id;
+};
diff --git a/arch/arm/mach-tegra/include/mach/tegra_aic326x_pdata.h b/arch/arm/mach-tegra/include/mach/tegra_aic326x_pdata.h
new file mode 100644
index 000000000000..a47ef1982e4d
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_aic326x_pdata.h
@@ -0,0 +1,39 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_aic326x_pdata.h
+ *
+ * Copyright 2011 NVIDIA, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __MACH_TEGRA_TLVAIC326X_H
+#define __MACH_TEGRA_TLVAIC326X_H
+
+#define HIFI_CODEC 0
+#define BASEBAND 1
+#define BT_SCO 2
+#define NUM_I2S_DEVICES 3
+
+struct baseband_config {
+ int rate;
+ int channels;
+};
+
+struct tegra_aic326x_platform_data {
+ int gpio_spkr_en;
+ int gpio_hp_det;
+ int gpio_hp_mute;
+ int gpio_int_mic_en;
+ int gpio_ext_mic_en;
+ int audio_port_id[NUM_I2S_DEVICES];
+ struct baseband_config baseband_param;
+};
+#endif
+
diff --git a/arch/arm/mach-tegra/include/mach/tegra_dc_ext.h b/arch/arm/mach-tegra/include/mach/tegra_dc_ext.h
new file mode 100644
index 000000000000..521039283d8c
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_dc_ext.h
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_dc_ext.h
+ *
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MACH_TEGRA_DC_EXT_H
+#define __MACH_TEGRA_DC_EXT_H
+
+#include <linux/nvhost.h>
+
+struct tegra_dc_ext;
+
+#ifdef CONFIG_TEGRA_DC_EXTENSIONS
+int __init tegra_dc_ext_module_init(void);
+void __exit tegra_dc_ext_module_exit(void);
+
+struct tegra_dc_ext *tegra_dc_ext_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc);
+void tegra_dc_ext_unregister(struct tegra_dc_ext *dc_ext);
+
+/* called by display controller on enable/disable */
+void tegra_dc_ext_enable(struct tegra_dc_ext *dc_ext);
+void tegra_dc_ext_disable(struct tegra_dc_ext *dc_ext);
+
+int tegra_dc_ext_process_hotplug(int output);
+
+#else /* CONFIG_TEGRA_DC_EXTENSIONS */
+
+static inline
+int tegra_dc_ext_module_init(void)
+{
+ return 0;
+}
+static inline
+void tegra_dc_ext_module_exit(void)
+{
+}
+
+static inline
+struct tegra_dc_ext *tegra_dc_ext_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc)
+{
+ return NULL;
+}
+static inline
+void tegra_dc_ext_unregister(struct tegra_dc_ext *dc_ext)
+{
+}
+static inline
+void tegra_dc_ext_enable(struct tegra_dc_ext *dc_ext)
+{
+}
+static inline
+void tegra_dc_ext_disable(struct tegra_dc_ext *dc_ext)
+{
+}
+static inline
+int tegra_dc_ext_process_hotplug(int output)
+{
+ return 0;
+}
+#endif /* CONFIG_TEGRA_DC_EXTENSIONS */
+
+#endif /* __MACH_TEGRA_DC_EXT_H */
diff --git a/arch/arm/mach-tegra/include/mach/tegra_fb.h b/arch/arm/mach-tegra/include/mach/tegra_fb.h
new file mode 100644
index 000000000000..84ae8869b247
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_fb.h
@@ -0,0 +1,27 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_fb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Platform data structure to be passed to the driver */
+struct tegra_fb_lcd_data {
+ int fb_xres;
+ int fb_yres;
+ /* Resolution of the output to the LCD. If different from the
+ framebuffer resolution, the Tegra display block will scale it */
+ int lcd_xres;
+ int lcd_yres;
+ int bits_per_pixel;
+};
diff --git a/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h b/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h
new file mode 100644
index 000000000000..4d1a0b54f2ae
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h
@@ -0,0 +1,30 @@
+/*
+ * linux/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_FIQ_DEBUGGER_H
+#define __MACH_TEGRA_FIQ_DEBUGGER_H
+
+#ifdef CONFIG_TEGRA_FIQ_DEBUGGER
+void tegra_serial_debug_init(unsigned int base, int irq,
+ struct clk *clk, int signal_irq, int wakeup_irq);
+#else
+static inline void tegra_serial_debug_init(unsigned int base, int irq,
+ struct clk *clk, int signal_irq, int wakeup_irq)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/tegra_fuse.h b/arch/arm/mach-tegra/include/mach/tegra_fuse.h
new file mode 100644
index 000000000000..d264745c70c0
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_fuse.h
@@ -0,0 +1,27 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_fuse.h
+ *
+ * Tegra Public Fuse header file
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_PUBLIC_FUSE_H_
+#define _MACH_TEGRA_PUBLIC_FUSE_H_
+
+int tegra_fuse_get_revision(u32 *rev);
+int tegra_fuse_get_tsensor_calibration_data(u32 *calib);
+int tegra_fuse_get_tsensor_spare_bits(u32 *spare_bits);
+
+#endif /* _MACH_TEGRA_PUBLIC_FUSE_H_*/
+
diff --git a/arch/arm/mach-tegra/include/mach/tegra_max98088_pdata.h b/arch/arm/mach-tegra/include/mach/tegra_max98088_pdata.h
new file mode 100644
index 000000000000..eb59cf0962bd
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_max98088_pdata.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_max98088_pdata.h
+ *
+ * Copyright 2011 NVIDIA, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define HIFI_CODEC 0
+#define BASEBAND 1
+#define BT_SCO 2
+#define NUM_I2S_DEVICES 3
+
+struct baseband_config {
+ int rate;
+ int channels;
+};
+
+struct tegra_max98088_platform_data {
+ int gpio_spkr_en;
+ int gpio_hp_det;
+ int gpio_hp_mute;
+ int gpio_int_mic_en;
+ int gpio_ext_mic_en;
+ int audio_port_id[NUM_I2S_DEVICES];
+ struct baseband_config baseband_param;
+};
diff --git a/arch/arm/mach-tegra/include/mach/tegra_odm_fuses.h b/arch/arm/mach-tegra/include/mach/tegra_odm_fuses.h
new file mode 100644
index 000000000000..4ab8433dbbff
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_odm_fuses.h
@@ -0,0 +1,107 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_odm_fuses.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_ODM_FUSES_H
+#define __MACH_TEGRA_ODM_FUSES_H
+
+#define SBK_DEVKEY_STATUS_SZ sizeof(u32)
+
+/*
+ * fuse io parameters: params with sizes less than a byte are
+ * explicitly mentioned
+ */
+enum fuse_io_param {
+ DEVKEY,
+ JTAG_DIS, /* 1 bit long */
+ /*
+ * Programming the odm production fuse at the same
+ * time as the sbk or dev_key is not allowed as it is not possible to
+ * verify that the sbk or dev_key were programmed correctly.
+ */
+ ODM_PROD_MODE, /* 1 bit long */
+ SEC_BOOT_DEV_CFG,
+ SEC_BOOT_DEV_SEL, /* 3 bits long */
+ SBK,
+ SW_RSVD, /* 4 bits long */
+ IGNORE_DEV_SEL_STRAPS, /* 1 bit long */
+ ODM_RSVD,
+ SBK_DEVKEY_STATUS,
+ _PARAMS_U32 = 0x7FFFFFFF
+};
+
+#define MAX_PARAMS SBK_DEVKEY_STATUS
+
+/* the order of the members is pre-decided. please do not change */
+struct fuse_data {
+ u32 devkey;
+ u32 jtag_dis;
+ u32 odm_prod_mode;
+ u32 bootdev_cfg;
+ u32 bootdev_sel;
+ u32 sbk[4];
+ u32 sw_rsvd;
+ u32 ignore_devsel_straps;
+ u32 odm_rsvd[8];
+};
+
+/* secondary boot device options */
+enum {
+ SECBOOTDEV_SDMMC,
+ SECBOOTDEV_NOR,
+ SECBOOTDEV_SPI,
+ SECBOOTDEV_NAND,
+ SECBOOTDEV_LBANAND,
+ SECBOOTDEV_MUXONENAND,
+ _SECBOOTDEV_MAX,
+ _SECBOOTDEV_U32 = 0x7FFFFFFF
+};
+
+/*
+ * read the fuse settings
+ * @param: io_param_type - param type enum
+ * @param: size - read size in bytes
+ */
+int tegra_fuse_read(enum fuse_io_param io_param_type, u32 *data, int size);
+
+#define FLAGS_DEVKEY BIT(DEVKEY)
+#define FLAGS_JTAG_DIS BIT(JTAG_DIS)
+#define FLAGS_SBK_DEVKEY_STATUS BIT(SBK_DEVKEY_STATUS)
+#define FLAGS_ODM_PROD_MODE BIT(ODM_PROD_MODE)
+#define FLAGS_SEC_BOOT_DEV_CFG BIT(SEC_BOOT_DEV_CFG)
+#define FLAGS_SEC_BOOT_DEV_SEL BIT(SEC_BOOT_DEV_SEL)
+#define FLAGS_SBK BIT(SBK)
+#define FLAGS_SW_RSVD BIT(SW_RSVD)
+#define FLAGS_IGNORE_DEV_SEL_STRAPS BIT(IGNORE_DEV_SEL_STRAPS)
+#define FLAGS_ODMRSVD BIT(ODM_RSVD)
+
+/*
+ * Prior to invoking this routine, the caller is responsible for supplying
+ * valid fuse programming voltage.
+ *
+ * @param: pgm_data - entire data to be programmed
+ * @flags: program flags (e.g. FLAGS_DEVKEY)
+ */
+int tegra_fuse_program(struct fuse_data *pgm_data, u32 flags);
+
+/* Disables the fuse programming until the next system reset */
+void tegra_fuse_program_disable(void);
+
+extern int (*tegra_fuse_regulator_en)(int);
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/tegra_usb_modem_power.h b/arch/arm/mach-tegra/include/mach/tegra_usb_modem_power.h
new file mode 100644
index 000000000000..0ce7fa40eb2e
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_usb_modem_power.h
@@ -0,0 +1,47 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_usb_modem_power.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_USB_MODEM_POWER_H
+#define __MACH_TEGRA_USB_MODEM_POWER_H
+
+#include <linux/interrupt.h>
+
+/* modem capabilities */
+#define TEGRA_MODEM_AUTOSUSPEND 0x01
+#define TEGRA_MODEM_RECOVERY 0x02
+
+/* modem operations */
+struct tegra_modem_operations {
+ int (*init) (void); /* modem init */
+ void (*start) (void); /* modem start */
+ void (*stop) (void); /* modem stop */
+ void (*suspend) (void); /* send L3 hint during system suspend */
+ void (*resume) (void); /* send L3->0 hint during system resume */
+ void (*reset) (void); /* modem reset */
+};
+
+/* tegra usb modem power platform data */
+struct tegra_usb_modem_power_platform_data {
+ const struct tegra_modem_operations *ops;
+ unsigned int wake_gpio; /* remote wakeup gpio */
+ unsigned int flags; /* remote wakeup irq flags */
+};
+
+#endif /* __MACH_TEGRA_USB_MODEM_POWER_H */
diff --git a/arch/arm/mach-tegra/include/mach/tegra_wm8753_pdata.h b/arch/arm/mach-tegra/include/mach/tegra_wm8753_pdata.h
new file mode 100644
index 000000000000..944e410b4aec
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra_wm8753_pdata.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_wm8903_pdata.h
+ *
+ * Copyright 2011 NVIDIA, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+struct tegra_wm8753_platform_data {
+ int gpio_spkr_en;
+ int gpio_hp_det;
+ int gpio_hp_mute;
+ int gpio_int_mic_en;
+ int gpio_ext_mic_en;
+ unsigned int debounce_time_hp;
+};
diff --git a/arch/arm/mach-tegra/include/mach/thermal.h b/arch/arm/mach-tegra/include/mach/thermal.h
new file mode 100644
index 000000000000..ab7b34492d9e
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/thermal.h
@@ -0,0 +1,62 @@
+/*
+ * arch/arm/mach-tegra/thermal.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_THERMAL_H
+#define __MACH_THERMAL_H
+
+/* All units in millicelsius */
+struct tegra_thermal_data {
+ long temp_throttle;
+ long temp_shutdown;
+ long temp_offset;
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ long edp_offset;
+ long hysteresis_edp;
+#endif
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ int tc1;
+ int tc2;
+ long passive_delay;
+#else
+ long hysteresis_throttle;
+#endif
+};
+
+struct tegra_thermal_device {
+ char *name;
+ void *data;
+ long offset;
+ int (*get_temp) (void *, long *);
+ int (*get_temp_low)(void *, long *);
+ int (*set_limits) (void *, long, long);
+ int (*set_alert)(void *, void (*)(void *), void *);
+ int (*set_shutdown_temp)(void *, long);
+};
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+int tegra_thermal_init(struct tegra_thermal_data *data);
+int tegra_thermal_set_device(struct tegra_thermal_device *device);
+int tegra_thermal_exit(void);
+#else
+static inline int tegra_thermal_init(struct tegra_thermal_data *data)
+{ return 0; }
+static inline int tegra_thermal_set_device(struct tegra_thermal_device *dev)
+{ return 0; }
+static inline int tegra_thermal_exit(void)
+{ return 0; }
+#endif
+
+#endif /* __MACH_THERMAL_H */
diff --git a/arch/arm/mach-tegra/include/mach/tsensor.h b/arch/arm/mach-tegra/include/mach/tsensor.h
new file mode 100644
index 000000000000..10a33217bc98
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tsensor.h
@@ -0,0 +1,61 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tsensor.h
+ *
+ * Tegra tsensor header file
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_TSENSOR_H
+#define __MACH_TEGRA_TSENSOR_H
+
+#include <linux/types.h>
+
+#include <mach/edp.h>
+
+#define MAX_ZONES 16
+
+struct tegra_tsensor_pmu_data {
+ u8 poweroff_reg_data;
+ u8 poweroff_reg_addr;
+ u8 reset_tegra;
+ u8 controller_type;
+ u8 i2c_controller_id;
+ u8 pinmux;
+ u8 pmu_16bit_ops;
+ u8 pmu_i2c_addr;
+};
+
+struct tegra_tsensor_data;
+
+struct tegra_tsensor_platform_data {
+ void (*probe_callback)(struct tegra_tsensor_data *);
+};
+
+void __init tegra3_tsensor_init(struct tegra_tsensor_pmu_data *);
+
+int tsensor_thermal_get_temp(struct tegra_tsensor_data *data,
+ long *milli_temp);
+int tsensor_thermal_get_temp_low(struct tegra_tsensor_data *data,
+ long *milli_temp);
+int tsensor_thermal_set_limits(struct tegra_tsensor_data *data,
+ long lo_limit_milli,
+ long hi_limit_milli);
+int tsensor_thermal_set_alert(struct tegra_tsensor_data *data,
+ void (*alert_func)(void *),
+ void *alert_data);
+int tsensor_thermal_set_shutdown_temp(struct tegra_tsensor_data *data,
+ long shutdown_temp_milli);
+
+#endif /* __MACH_TEGRA_TSENSOR_H */
+
diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h
index 4e8323770c79..9665858ab11d 100644
--- a/arch/arm/mach-tegra/include/mach/uncompress.h
+++ b/arch/arm/mach-tegra/include/mach/uncompress.h
@@ -7,6 +7,8 @@
* Colin Cross <ccross@google.com>
* Erik Gilling <konkers@google.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -26,6 +28,50 @@
#include <mach/iomap.h>
+#if defined(CONFIG_TEGRA_DEBUG_UARTA)
+#define DEBUG_UART_CLK_SRC (TEGRA_CLK_RESET_BASE + 0x178)
+#define DEBUG_UART_CLK_ENB_SET_REG (TEGRA_CLK_RESET_BASE + 0x320)
+#define DEBUG_UART_CLK_ENB_SET_BIT (1 << 6)
+#define DEBUG_UART_RST_CLR_REG (TEGRA_CLK_RESET_BASE + 0x304)
+#define DEBUG_UART_RST_CLR_BIT (1 << 6)
+#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
+#define DEBUG_UART_CLK_SRC (TEGRA_CLK_RESET_BASE + 0x17c)
+#define DEBUG_UART_CLK_ENB_SET_REG (TEGRA_CLK_RESET_BASE + 0x320)
+#define DEBUG_UART_CLK_ENB_SET_BIT (1 << 7)
+#define DEBUG_UART_RST_CLR_REG (TEGRA_CLK_RESET_BASE + 0x304)
+#define DEBUG_UART_RST_CLR_BIT (1 << 7)
+#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
+#define DEBUG_UART_CLK_SRC (TEGRA_CLK_RESET_BASE + 0x1a0)
+#define DEBUG_UART_CLK_ENB_SET_REG (TEGRA_CLK_RESET_BASE + 0x328)
+#define DEBUG_UART_CLK_ENB_SET_BIT (1 << 23)
+#define DEBUG_UART_RST_CLR_REG (TEGRA_CLK_RESET_BASE + 0x30C)
+#define DEBUG_UART_RST_CLR_BIT (1 << 23)
+#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
+#define DEBUG_UART_CLK_SRC (TEGRA_CLK_RESET_BASE + 0x1c0)
+#define DEBUG_UART_CLK_ENB_SET_REG (TEGRA_CLK_RESET_BASE + 0x330)
+#define DEBUG_UART_CLK_ENB_SET_BIT (1 << 1)
+#define DEBUG_UART_RST_CLR_REG (TEGRA_CLK_RESET_BASE + 0x314)
+#define DEBUG_UART_RST_CLR_BIT (1 << 1)
+#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
+#define DEBUG_UART_CLK_SRC (TEGRA_CLK_RESET_BASE + 0x1c4)
+#define DEBUG_UART_CLK_ENB_SET_REG (TEGRA_CLK_RESET_BASE + 0x330)
+#define DEBUG_UART_CLK_ENB_SET_BIT (1 << 2)
+#define DEBUG_UART_RST_CLR_REG (TEGRA_CLK_RESET_BASE + 0x314)
+#define DEBUG_UART_RST_CLR_BIT (1 << 2)
+#else
+#define DEBUG_UART_CLK_SRC 0
+#define DEBUG_UART_CLK_ENB_SET_REG 0
+#define DEBUG_UART_CLK_ENB_SET_BIT 0
+#define DEBUG_UART_RST_CLR_REG 0
+#define DEBUG_UART_RST_CLR_BIT 0
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define DEBUG_UART_DLL 0x75
+#else
+#define DEBUG_UART_DLL 0xdd
+#endif
+
static void putc(int c)
{
volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
@@ -43,16 +89,44 @@ static inline void flush(void)
{
}
+static inline void konk_delay(int delay)
+{
+ int i;
+
+ for (i = 0; i < (1000 * delay); i++) {
+ barrier();
+ }
+}
+
+
static inline void arch_decomp_setup(void)
{
volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
int shift = 2;
+ volatile u32 *addr;
if (uart == NULL)
return;
+ /* Debug UART clock source is PLLP_OUT0. */
+ addr = (volatile u32 *)DEBUG_UART_CLK_SRC;
+ *addr = 0;
+
+ /* Enable clock to debug UART. */
+ addr = (volatile u32 *)DEBUG_UART_CLK_ENB_SET_REG;
+ *addr = DEBUG_UART_CLK_ENB_SET_BIT;
+
+ konk_delay(5);
+
+ /* Deassert reset to debug UART. */
+ addr = (volatile u32 *)DEBUG_UART_RST_CLR_REG;
+ *addr = DEBUG_UART_RST_CLR_BIT;
+
+ konk_delay(5);
+
+ /* Set up debug UART. */
uart[UART_LCR << shift] |= UART_LCR_DLAB;
- uart[UART_DLL << shift] = 0x75;
+ uart[UART_DLL << shift] = DEBUG_UART_DLL;
uart[UART_DLM << shift] = 0x0;
uart[UART_LCR << shift] = 3;
}
diff --git a/arch/arm/mach-tegra/include/mach/usb_phy.h b/arch/arm/mach-tegra/include/mach/usb_phy.h
index d4b8f9e298a8..6e3a9f90bdd9 100644
--- a/arch/arm/mach-tegra/include/mach/usb_phy.h
+++ b/arch/arm/mach-tegra/include/mach/usb_phy.h
@@ -2,6 +2,7 @@
* arch/arm/mach-tegra/include/mach/usb_phy.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -18,7 +19,9 @@
#define __MACH_USB_PHY_H
#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
#include <linux/usb/otg.h>
+#include <linux/platform_data/tegra_usb.h>
struct tegra_utmip_config {
u8 hssync_start_delay;
@@ -26,13 +29,48 @@ struct tegra_utmip_config {
u8 idle_wait_delay;
u8 term_range_adj;
u8 xcvr_setup;
+ u8 xcvr_setup_offset;
+ u8 xcvr_use_fuses;
u8 xcvr_lsfslew;
u8 xcvr_lsrslew;
};
+struct tegra_ulpi_trimmer {
+ u8 shadow_clk_delay; /* 0 ~ 31 */
+ u8 clock_out_delay; /* 0 ~ 31 */
+ u8 data_trimmer; /* 0 ~ 7 */
+ u8 stpdirnxt_trimmer; /* 0 ~ 7 */
+};
+
struct tegra_ulpi_config {
+ int enable_gpio;
int reset_gpio;
const char *clk;
+ const struct tegra_ulpi_trimmer *trimmer;
+ int (*pre_phy_on)(void);
+ int (*post_phy_on)(void);
+ int (*pre_phy_off)(void);
+ int (*post_phy_off)(void);
+ void (*phy_restore_start)(void);
+ void (*phy_restore_end)(void);
+ int phy_restore_gpio; /* null phy restore ack from device */
+ int ulpi_dir_gpio; /* ulpi dir */
+ int ulpi_d0_gpio; /* usb linestate[0] */
+ int ulpi_d1_gpio; /* usb linestate[1] */
+};
+
+struct tegra_uhsic_config {
+ int enable_gpio;
+ int reset_gpio;
+ u8 sync_start_delay;
+ u8 idle_wait_delay;
+ u8 term_range_adj;
+ u8 elastic_underrun_limit;
+ u8 elastic_overrun_limit;
+ int (*postsuspend)(void);
+ int (*preresume)(void);
+ int (*usb_phy_ready)(void);
+ int (*post_phy_off)(void);
};
enum tegra_usb_phy_port_speed {
@@ -46,6 +84,13 @@ enum tegra_usb_phy_mode {
TEGRA_USB_PHY_MODE_HOST,
};
+struct usb_phy_plat_data {
+ int instance;
+ int vbus_irq;
+ int vbus_gpio;
+ char * vbus_reg_supply;
+};
+
struct tegra_xtal_freq;
struct tegra_usb_phy {
@@ -58,23 +103,44 @@ struct tegra_usb_phy {
struct clk *pad_clk;
enum tegra_usb_phy_mode mode;
void *config;
+ struct regulator *reg_vdd;
+ struct regulator *reg_vbus;
+ enum tegra_usb_phy_type usb_phy_type;
+ bool regulator_on;
struct otg_transceiver *ulpi;
+ int initialized;
+ bool power_on;
+ bool remote_wakeup;
+ int hotplug;
+ unsigned int xcvr_setup_value;
};
+typedef int (*tegra_phy_fp)(struct tegra_usb_phy *phy, bool is_dpd);
+typedef void (*tegra_phy_restore_start_fp)(struct tegra_usb_phy *phy,
+ enum tegra_usb_phy_port_speed);
+typedef void (*tegra_phy_restore_end_fp)(struct tegra_usb_phy *phy);
+
struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
- void *config, enum tegra_usb_phy_mode phy_mode);
+ void *config, enum tegra_usb_phy_mode phy_mode,
+ enum tegra_usb_phy_type usb_phy_type);
-int tegra_usb_phy_power_on(struct tegra_usb_phy *phy);
+int tegra_usb_phy_power_on(struct tegra_usb_phy *phy, bool is_dpd);
void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy);
void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy);
-void tegra_usb_phy_power_off(struct tegra_usb_phy *phy);
+void tegra_usb_phy_power_off(struct tegra_usb_phy *phy, bool is_dpd);
-void tegra_usb_phy_preresume(struct tegra_usb_phy *phy);
+void tegra_usb_phy_postsuspend(struct tegra_usb_phy *phy, bool is_dpd);
-void tegra_usb_phy_postresume(struct tegra_usb_phy *phy);
+void tegra_usb_phy_preresume(struct tegra_usb_phy *phy, bool is_dpd);
+
+void tegra_usb_phy_postresume(struct tegra_usb_phy *phy, bool is_dpd);
+
+void tegra_ehci_pre_reset(struct tegra_usb_phy *phy, bool is_dpd);
+
+void tegra_ehci_post_reset(struct tegra_usb_phy *phy, bool is_dpd);
void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed);
@@ -83,4 +149,18 @@ void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy);
void tegra_usb_phy_close(struct tegra_usb_phy *phy);
+int tegra_usb_phy_bus_connect(struct tegra_usb_phy *phy);
+
+int tegra_usb_phy_bus_reset(struct tegra_usb_phy *phy);
+
+int tegra_usb_phy_bus_idle(struct tegra_usb_phy *phy);
+
+bool tegra_usb_phy_is_device_connected(struct tegra_usb_phy *phy);
+
+bool tegra_usb_phy_charger_detect(struct tegra_usb_phy *phy);
+
+int __init tegra_usb_phy_init(struct usb_phy_plat_data *pdata, int size);
+
+bool tegra_usb_phy_is_remotewake_detected(struct tegra_usb_phy *phy);
+
#endif /* __MACH_USB_PHY_H */
diff --git a/arch/arm/mach-tegra/include/mach/vmalloc.h b/arch/arm/mach-tegra/include/mach/vmalloc.h
index fd6aa65b2dc6..db488e890b9e 100644
--- a/arch/arm/mach-tegra/include/mach/vmalloc.h
+++ b/arch/arm/mach-tegra/include/mach/vmalloc.h
@@ -23,6 +23,6 @@
#include <asm/sizes.h>
-#define VMALLOC_END 0xFE000000UL
+#define VMALLOC_END 0xF8000000UL
#endif
diff --git a/arch/arm/mach-tegra/include/mach/w1.h b/arch/arm/mach-tegra/include/mach/w1.h
new file mode 100644
index 000000000000..c96df7abdf96
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/w1.h
@@ -0,0 +1,84 @@
+/*
+ * include/mach/w1.h
+ *
+ * Copyright (C) 2010 Motorola, Inc
+ * Author: Andrei Warkentin <andreiw@motorola.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_ARM_ARCH_TEGRA_W1_H
+#define __ASM_ARM_ARCH_TEGRA_W1_H
+
+struct tegra_w1_timings {
+
+ /* tsu, trelease, trdv, tlow0, tlow1 and tslot are formed
+ into the value written into OWR_RST_PRESENCE_TCTL_0 register. */
+
+ /* Read data setup, Tsu = N owr clks, Range = tsu < 1,
+ Typical value = 0x1 */
+ uint32_t tsu;
+
+ /* Release 1-wire time, Trelease = N owr clks,
+ Range = 0 <= trelease < 45, Typical value = 0xf */
+ uint32_t trelease;
+
+ /* Read data valid time, Trdv = N+1 owr clks, Range = Exactly 15 */
+ uint32_t trdv;
+
+ /* Write zero time low, Tlow0 = N+1 owr clks,
+ Range = 60 <= tlow0 < tslot < 120, typical value = 0x3c. */
+ uint32_t tlow0;
+
+ /* Write one time low, or TLOWR both are same Tlow1 = N+1 owr clks,
+ Range = 1 <= tlow1 < 15 TlowR = N+1 owr clks,
+ Range = 1 <= tlowR < 15, Typical value = 0x1. */
+ uint32_t tlow1;
+
+ /* Active time slot for write or read data, Tslot = N+1 owr clks,
+ Range = 60 <= tslot < 120, Typical value = 0x77. */
+ uint32_t tslot;
+
+ /* tpdl, tpdh, trstl, trsth are formed in the the value written
+ into the OWR_RST_PRESENCE_TCTL_0 register. */
+
+ /* Tpdl = N owr clks, Range = 60 <= tpdl < 240,
+ Typical value = 0x78. */
+ uint32_t tpdl;
+
+ /* Tpdh = N+1 owr clks, Range = 15 <= tpdh < 60.
+ Typical value = 0x1e. */
+ uint32_t tpdh;
+
+ /* Trstl = N+1 owr clks, Range = 480 <= trstl < infinity,
+ Typical value = 0x1df. */
+ uint32_t trstl;
+
+ /* Trsth = N+1 owr clks, Range = 480 <= trsth < infinity,
+ Typical value = 0x1df. */
+ uint32_t trsth;
+
+ /* Read data sample clock. Should be <= to (tlow1 - 6) clks,
+ 6 clks are used for deglitch. If deglitch bypassed it
+ is 3 clks, Typical value = 0x7. */
+ uint32_t rdsclk;
+
+ /* Presence sample clock. Should be <= to (tpdl - 6) clks,
+ 6 clks are used for deglitch. If deglitch bypassed it is 3 clks,
+ Typical value = 0x50. */
+ uint32_t psclk;
+};
+
+struct tegra_w1_platform_data {
+ const char *clk_id;
+ struct tegra_w1_timings *timings;
+};
+
+#endif
diff --git a/arch/arm/mach-tegra/io.c b/arch/arm/mach-tegra/io.c
index ea50fe28cf6a..7089123c6f0a 100644
--- a/arch/arm/mach-tegra/io.c
+++ b/arch/arm/mach-tegra/io.c
@@ -7,6 +7,8 @@
* Colin Cross <ccross@google.com>
* Erik Gilling <konkers@google.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -24,6 +26,7 @@
#include <linux/mm.h>
#include <linux/io.h>
+#include <mach/iomap.h>
#include <asm/page.h>
#include <asm/mach/map.h>
@@ -54,6 +57,24 @@ static struct map_desc tegra_io_desc[] __initdata = {
.length = IO_IRAM_SIZE,
.type = MT_DEVICE,
},
+ {
+ .virtual = IO_HOST1X_VIRT,
+ .pfn = __phys_to_pfn(IO_HOST1X_PHYS),
+ .length = IO_HOST1X_SIZE,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = IO_USB_VIRT,
+ .pfn = __phys_to_pfn(IO_USB_PHYS),
+ .length = IO_USB_SIZE,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = IO_SDMMC_VIRT,
+ .pfn = __phys_to_pfn(IO_SDMMC_PHYS),
+ .length = IO_SDMMC_SIZE,
+ .type = MT_DEVICE,
+ },
};
void __init tegra_map_common_io(void)
@@ -67,8 +88,27 @@ void __init tegra_map_common_io(void)
void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type)
{
void __iomem *v = IO_ADDRESS(p);
- if (v == NULL)
- v = __arm_ioremap(p, size, type);
+
+ /*
+ * __arm_ioremap fails to set the domain of ioremapped memory
+ * correctly, only use it on physical memory.
+ */
+ if (v == NULL) {
+ if ((p >= TEGRA_DRAM_BASE &&
+ (p + size) <= (TEGRA_DRAM_BASE + TEGRA_DRAM_SIZE)) ||
+ (p >= TEGRA_NOR_FLASH_BASE &&
+ (p + size) <= (TEGRA_NOR_FLASH_BASE + TEGRA_NOR_FLASH_SIZE)) ||
+ (p >= TEGRA_PCIE_BASE &&
+ (p + size) <= (TEGRA_PCIE_BASE + TEGRA_PCIE_SIZE)))
+ v = __arm_ioremap(p, size, type);
+ }
+
+ /*
+ * If the physical address was not physical memory or statically
+ * mapped, there's nothing we can do to map it safely.
+ */
+ BUG_ON(v == NULL);
+
return v;
}
EXPORT_SYMBOL(tegra_ioremap);
diff --git a/arch/arm/mach-tegra/iovmm-gart.c b/arch/arm/mach-tegra/iovmm-gart.c
new file mode 100644
index 000000000000..9616af6672a1
--- /dev/null
+++ b/arch/arm/mach-tegra/iovmm-gart.c
@@ -0,0 +1,346 @@
+/*
+ * arch/arm/mach-tegra/iovmm-gart.c
+ *
+ * Tegra I/O VMM implementation for GART devices in Tegra and Tegra 2 series
+ * systems-on-a-chip.
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#include <mach/iovmm.h>
+
+#define GART_CONFIG 0x24
+#define GART_ENTRY_ADDR 0x28
+#define GART_ENTRY_DATA 0x2c
+
+#define VMM_NAME "iovmm-gart"
+#define DRIVER_NAME "tegra_gart"
+
+#define GART_PAGE_SHIFT (12)
+#define GART_PAGE_MASK (~((1<<GART_PAGE_SHIFT)-1))
+
+struct gart_device {
+ void __iomem *regs;
+ u32 *savedata;
+ u32 page_count; /* total remappable size */
+ tegra_iovmm_addr_t iovmm_base; /* offset to apply to vmm_area */
+ spinlock_t pte_lock;
+ struct tegra_iovmm_device iovmm;
+ struct tegra_iovmm_domain domain;
+ bool enable;
+};
+
+static int gart_map(struct tegra_iovmm_domain *, struct tegra_iovmm_area *);
+static void gart_unmap(struct tegra_iovmm_domain *,
+ struct tegra_iovmm_area *, bool);
+static void gart_map_pfn(struct tegra_iovmm_domain *,
+ struct tegra_iovmm_area *, tegra_iovmm_addr_t, unsigned long);
+static struct tegra_iovmm_domain *gart_alloc_domain(
+ struct tegra_iovmm_device *, struct tegra_iovmm_client *);
+
+static int gart_probe(struct platform_device *);
+static int gart_remove(struct platform_device *);
+static int gart_suspend(struct tegra_iovmm_device *dev);
+static void gart_resume(struct tegra_iovmm_device *dev);
+
+
+static struct tegra_iovmm_device_ops tegra_iovmm_gart_ops = {
+ .map = gart_map,
+ .unmap = gart_unmap,
+ .map_pfn = gart_map_pfn,
+ .alloc_domain = gart_alloc_domain,
+ .suspend = gart_suspend,
+ .resume = gart_resume,
+};
+
+static struct platform_driver tegra_iovmm_gart_drv = {
+ .probe = gart_probe,
+ .remove = gart_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int gart_suspend(struct tegra_iovmm_device *dev)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+ unsigned int i;
+ unsigned long reg;
+
+ if (!gart)
+ return -ENODEV;
+
+ if (!gart->enable)
+ return 0;
+
+ spin_lock(&gart->pte_lock);
+ reg = gart->iovmm_base;
+ for (i = 0; i < gart->page_count; i++) {
+ writel(reg, gart->regs + GART_ENTRY_ADDR);
+ gart->savedata[i] = readl(gart->regs + GART_ENTRY_DATA);
+ dmb();
+ reg += 1 << GART_PAGE_SHIFT;
+ }
+ spin_unlock(&gart->pte_lock);
+ return 0;
+}
+
+static void do_gart_setup(struct gart_device *gart, const u32 *data)
+{
+ unsigned long reg;
+ unsigned int i;
+
+ writel(1, gart->regs + GART_CONFIG);
+
+ reg = gart->iovmm_base;
+ for (i = 0; i < gart->page_count; i++) {
+ writel(reg, gart->regs + GART_ENTRY_ADDR);
+ writel((data) ? data[i] : 0, gart->regs + GART_ENTRY_DATA);
+ wmb();
+ reg += 1 << GART_PAGE_SHIFT;
+ }
+}
+
+static void gart_resume(struct tegra_iovmm_device *dev)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+
+ if (!gart || !gart->enable || (gart->enable && !gart->savedata))
+ return;
+
+ spin_lock(&gart->pte_lock);
+ do_gart_setup(gart, gart->savedata);
+ spin_unlock(&gart->pte_lock);
+}
+
+static int gart_remove(struct platform_device *pdev)
+{
+ struct gart_device *gart = platform_get_drvdata(pdev);
+
+ if (!gart)
+ return 0;
+
+ if (gart->enable)
+ writel(0, gart->regs + GART_CONFIG);
+
+ gart->enable = 0;
+ platform_set_drvdata(pdev, NULL);
+ tegra_iovmm_unregister(&gart->iovmm);
+ if (gart->savedata)
+ vfree(gart->savedata);
+ if (gart->regs)
+ iounmap(gart->regs);
+ kfree(gart);
+ return 0;
+}
+
+static int gart_probe(struct platform_device *pdev)
+{
+ struct gart_device *gart;
+ struct resource *res, *res_remap;
+ void __iomem *gart_regs;
+ int e;
+
+ if (!pdev) {
+ pr_err(DRIVER_NAME ": platform_device required\n");
+ return -ENODEV;
+ }
+
+ if (PAGE_SHIFT != GART_PAGE_SHIFT) {
+ pr_err(DRIVER_NAME ": GART and CPU page size must match\n");
+ return -ENXIO;
+ }
+
+ /* the GART memory aperture is required */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ if (!res || !res_remap) {
+ pr_err(DRIVER_NAME ": GART memory aperture expected\n");
+ return -ENXIO;
+ }
+ gart = kzalloc(sizeof(*gart), GFP_KERNEL);
+ if (!gart) {
+ pr_err(DRIVER_NAME ": failed to allocate tegra_iovmm_device\n");
+ return -ENOMEM;
+ }
+
+ gart_regs = ioremap_wc(res->start, res->end - res->start + 1);
+ if (!gart_regs) {
+ pr_err(DRIVER_NAME ": failed to remap GART registers\n");
+ e = -ENXIO;
+ goto fail;
+ }
+
+ gart->iovmm.name = VMM_NAME;
+ gart->iovmm.ops = &tegra_iovmm_gart_ops;
+ gart->iovmm.pgsize_bits = GART_PAGE_SHIFT;
+ spin_lock_init(&gart->pte_lock);
+
+ platform_set_drvdata(pdev, gart);
+
+ e = tegra_iovmm_register(&gart->iovmm);
+ if (e)
+ goto fail;
+
+ e = tegra_iovmm_domain_init(&gart->domain, &gart->iovmm,
+ (tegra_iovmm_addr_t)res_remap->start,
+ (tegra_iovmm_addr_t)res_remap->end+1);
+ if (e)
+ goto fail;
+
+ gart->regs = gart_regs;
+ gart->iovmm_base = (tegra_iovmm_addr_t)res_remap->start;
+ gart->page_count = resource_size(res_remap);
+ gart->page_count >>= GART_PAGE_SHIFT;
+
+ gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
+ if (!gart->savedata) {
+ pr_err(DRIVER_NAME ": failed to allocate context save area\n");
+ e = -ENOMEM;
+ goto fail;
+ }
+
+ do_gart_setup(gart, NULL);
+ gart->enable = 1;
+
+ return 0;
+
+fail:
+ if (gart_regs)
+ iounmap(gart_regs);
+ if (gart && gart->savedata)
+ vfree(gart->savedata);
+ kfree(gart);
+ return e;
+}
+
+static int __devinit gart_init(void)
+{
+ return platform_driver_register(&tegra_iovmm_gart_drv);
+}
+
+static void __exit gart_exit(void)
+{
+ platform_driver_unregister(&tegra_iovmm_gart_drv);
+}
+
+#define GART_PTE(_pfn) (0x80000000ul | ((_pfn)<<PAGE_SHIFT))
+
+
+static int gart_map(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *iovma)
+{
+ struct gart_device *gart =
+ container_of(domain, struct gart_device, domain);
+ unsigned long gart_page, count;
+ unsigned int i;
+
+ gart_page = iovma->iovm_start;
+ count = iovma->iovm_length >> GART_PAGE_SHIFT;
+
+ for (i = 0; i < count; i++) {
+ unsigned long pfn;
+
+ pfn = iovma->ops->lock_makeresident(iovma, i<<PAGE_SHIFT);
+ if (!pfn_valid(pfn))
+ goto fail;
+
+ spin_lock(&gart->pte_lock);
+
+ writel(gart_page, gart->regs + GART_ENTRY_ADDR);
+ writel(GART_PTE(pfn), gart->regs + GART_ENTRY_DATA);
+ wmb();
+ gart_page += 1 << GART_PAGE_SHIFT;
+
+ spin_unlock(&gart->pte_lock);
+ }
+
+ return 0;
+
+fail:
+ spin_lock(&gart->pte_lock);
+ while (i--) {
+ iovma->ops->release(iovma, i << PAGE_SHIFT);
+ gart_page -= 1 << GART_PAGE_SHIFT;
+ writel(gart_page, gart->regs + GART_ENTRY_ADDR);
+ writel(0, gart->regs + GART_ENTRY_DATA);
+ wmb();
+ }
+ spin_unlock(&gart->pte_lock);
+
+ return -ENOMEM;
+}
+
+static void gart_unmap(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *iovma, bool decommit)
+{
+ struct gart_device *gart =
+ container_of(domain, struct gart_device, domain);
+ unsigned long gart_page, count;
+ unsigned int i;
+
+ count = iovma->iovm_length >> GART_PAGE_SHIFT;
+ gart_page = iovma->iovm_start;
+
+ spin_lock(&gart->pte_lock);
+ for (i = 0; i < count; i++) {
+ if (iovma->ops && iovma->ops->release)
+ iovma->ops->release(iovma, i << PAGE_SHIFT);
+
+ writel(gart_page, gart->regs + GART_ENTRY_ADDR);
+ writel(0, gart->regs + GART_ENTRY_DATA);
+ wmb();
+ gart_page += 1 << GART_PAGE_SHIFT;
+ }
+ spin_unlock(&gart->pte_lock);
+}
+
+static void gart_map_pfn(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *iovma, tegra_iovmm_addr_t offs,
+ unsigned long pfn)
+{
+ struct gart_device *gart =
+ container_of(domain, struct gart_device, domain);
+
+ BUG_ON(!pfn_valid(pfn));
+ spin_lock(&gart->pte_lock);
+ writel(offs, gart->regs + GART_ENTRY_ADDR);
+ writel(GART_PTE(pfn), gart->regs + GART_ENTRY_DATA);
+ wmb();
+ spin_unlock(&gart->pte_lock);
+}
+
+static struct tegra_iovmm_domain *gart_alloc_domain(
+ struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+ return &gart->domain;
+}
+
+subsys_initcall(gart_init);
+module_exit(gart_exit);
diff --git a/arch/arm/mach-tegra/iovmm-smmu.c b/arch/arm/mach-tegra/iovmm-smmu.c
new file mode 100644
index 000000000000..1f67096736a2
--- /dev/null
+++ b/arch/arm/mach-tegra/iovmm-smmu.c
@@ -0,0 +1,1351 @@
+/*
+ * arch/arm/mach-tegra/iovmm-smmu.c
+ *
+ * Tegra I/O VMM implementation for SMMU devices for Tegra 3 series
+ * systems-on-a-chip.
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/iomap.h>
+
+#include "tegra_smmu.h"
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+/*
+ * ALL-CAP macros copied from armc.h
+ */
+#define MC_SMMU_CONFIG_0 0x10
+#define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE 0
+#define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE 1
+
+#define MC_SMMU_TLB_CONFIG_0 0x14
+#define MC_SMMU_TLB_CONFIG_0_TLB_STATS__MASK (1 << 31)
+#define MC_SMMU_TLB_CONFIG_0_TLB_STATS__ENABLE (1 << 31)
+#define MC_SMMU_TLB_CONFIG_0_TLB_HIT_UNDER_MISS__ENABLE (1 << 29)
+#define MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES__VALUE 0x10
+#define MC_SMMU_TLB_CONFIG_0_RESET_VAL 0x20000010
+
+#define MC_SMMU_PTC_CONFIG_0 0x18
+#define MC_SMMU_PTC_CONFIG_0_PTC_STATS__MASK (1 << 31)
+#define MC_SMMU_PTC_CONFIG_0_PTC_STATS__ENABLE (1 << 31)
+#define MC_SMMU_PTC_CONFIG_0_PTC_CACHE__ENABLE (1 << 29)
+#define MC_SMMU_PTC_CONFIG_0_PTC_INDEX_MAP__PATTERN 0x3f
+#define MC_SMMU_PTC_CONFIG_0_RESET_VAL 0x2000003f
+
+#define MC_SMMU_PTB_ASID_0 0x1c
+#define MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT 0
+
+#define MC_SMMU_PTB_DATA_0 0x20
+#define MC_SMMU_PTB_DATA_0_RESET_VAL 0
+#define MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT 29
+#define MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT 30
+#define MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT 31
+
+#define MC_SMMU_TLB_FLUSH_0 0x30
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL 0
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_SECTION 2
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_GROUP 3
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT 29
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE 0
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE 1
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT 31
+
+#define MC_SMMU_PTC_FLUSH_0 0x34
+#define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL 0
+#define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR 1
+#define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_ADR_SHIFT 4
+
+#define MC_SMMU_ASID_SECURITY_0 0x38
+
+#define MC_SMMU_STATS_TLB_HIT_COUNT_0 0x1f0
+#define MC_SMMU_STATS_TLB_MISS_COUNT_0 0x1f4
+#define MC_SMMU_STATS_PTC_HIT_COUNT_0 0x1f8
+#define MC_SMMU_STATS_PTC_MISS_COUNT_0 0x1fc
+
+#define MC_SMMU_TRANSLATION_ENABLE_0_0 0x228
+#define MC_SMMU_TRANSLATION_ENABLE_1_0 0x22c
+#define MC_SMMU_TRANSLATION_ENABLE_2_0 0x230
+
+#define MC_SMMU_AFI_ASID_0 0x238 /* PCIE */
+#define MC_SMMU_AVPC_ASID_0 0x23c /* AVP */
+#define MC_SMMU_DC_ASID_0 0x240 /* Display controller */
+#define MC_SMMU_DCB_ASID_0 0x244 /* Display controller B */
+#define MC_SMMU_EPP_ASID_0 0x248 /* Encoder pre-processor */
+#define MC_SMMU_G2_ASID_0 0x24c /* 2D engine */
+#define MC_SMMU_HC_ASID_0 0x250 /* Host1x */
+#define MC_SMMU_HDA_ASID_0 0x254 /* High-def audio */
+#define MC_SMMU_ISP_ASID_0 0x258 /* Image signal processor */
+#define MC_SMMU_MPE_ASID_0 0x264 /* MPEG encoder */
+#define MC_SMMU_NV_ASID_0 0x268 /* (3D) */
+#define MC_SMMU_NV2_ASID_0 0x26c /* (3D) */
+#define MC_SMMU_PPCS_ASID_0 0x270 /* AHB */
+#define MC_SMMU_SATA_ASID_0 0x278 /* SATA */
+#define MC_SMMU_VDE_ASID_0 0x27c /* Video decoder */
+#define MC_SMMU_VI_ASID_0 0x280 /* Video input */
+
+#define SMMU_PDE_NEXT_SHIFT 28
+
+/* Copied from arahb_arbc.h */
+#define AHB_ARBITRATION_XBAR_CTRL_0 0xe0
+#define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE 1
+#define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT 17
+
+#endif
+
+#define MC_SMMU_NUM_ASIDS 4
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, which) \
+ ((((iova) & MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__MASK) >> \
+ MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__SHIFT) | \
+ MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_##which)
+#define MC_SMMU_PTB_ASID_0_CURRENT_ASID(n) \
+ ((n) << MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT)
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable \
+ (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE << \
+ MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
+#define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE \
+ (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE << \
+ MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
+
+#define VMM_NAME "iovmm-smmu"
+#define DRIVER_NAME "tegra_smmu"
+
+#define SMMU_PAGE_SHIFT 12
+#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
+
+#define SMMU_PDIR_COUNT 1024
+#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
+#define SMMU_PTBL_COUNT 1024
+#define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
+#define SMMU_PDIR_SHIFT 12
+#define SMMU_PDE_SHIFT 12
+#define SMMU_PTE_SHIFT 12
+#define SMMU_PFN_MASK 0x000fffff
+
+#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
+#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
+#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
+
+#define _READABLE (1 << MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT)
+#define _WRITABLE (1 << MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT)
+#define _NONSECURE (1 << MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT)
+#define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
+#define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
+
+#define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
+
+#define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
+#define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
+#define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
+
+#define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
+#define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
+
+#define SMMU_MK_PDIR(page, attr) \
+ ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
+#define SMMU_MK_PDE(page, attr) \
+ (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
+#define SMMU_EX_PTBL_PAGE(pde) \
+ pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
+#define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
+
+#define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
+#define SMMU_ASID_DISABLE 0
+#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
+
+/* Keep this as a "natural" enumeration (no assignments) */
+enum smmu_hwclient {
+ HWC_AFI,
+ HWC_AVPC,
+ HWC_DC,
+ HWC_DCB,
+ HWC_EPP,
+ HWC_G2,
+ HWC_HC,
+ HWC_HDA,
+ HWC_ISP,
+ HWC_MPE,
+ HWC_NV,
+ HWC_NV2,
+ HWC_PPCS,
+ HWC_SATA,
+ HWC_VDE,
+ HWC_VI,
+
+ HWC_COUNT
+};
+
+struct smmu_hwc_state {
+ unsigned long reg;
+ unsigned long enable_disable;
+};
+
+/* Hardware client mapping initializer */
+#define HWC_INIT(client) \
+ [HWC_##client] = {MC_SMMU_##client##_ASID_0, SMMU_ASID_DISABLE},
+
+static const struct smmu_hwc_state smmu_hwc_state_init[] = {
+ HWC_INIT(AFI)
+ HWC_INIT(AVPC)
+ HWC_INIT(DC)
+ HWC_INIT(DCB)
+ HWC_INIT(EPP)
+ HWC_INIT(G2)
+ HWC_INIT(HC)
+ HWC_INIT(HDA)
+ HWC_INIT(ISP)
+ HWC_INIT(MPE)
+ HWC_INIT(NV)
+ HWC_INIT(NV2)
+ HWC_INIT(PPCS)
+ HWC_INIT(SATA)
+ HWC_INIT(VDE)
+ HWC_INIT(VI)
+};
+
+
+struct domain_hwc_map {
+ const char *dev_name;
+ const enum smmu_hwclient *hwcs;
+ const unsigned int nr_hwcs;
+};
+
+/* Enable all hardware clients for SMMU translation */
+static const enum smmu_hwclient nvmap_hwcs[] = {
+ HWC_AFI,
+ HWC_AVPC,
+ HWC_DC,
+ HWC_DCB,
+ HWC_EPP,
+ HWC_G2,
+ HWC_HC,
+ HWC_HDA,
+ HWC_ISP,
+ HWC_MPE,
+ HWC_NV,
+ HWC_NV2,
+ HWC_PPCS,
+ HWC_SATA,
+ HWC_VDE,
+ HWC_VI
+};
+
+static const struct domain_hwc_map smmu_hwc_map[] = {
+ {
+ .dev_name = "nvmap",
+ .hwcs = nvmap_hwcs,
+ .nr_hwcs = ARRAY_SIZE(nvmap_hwcs),
+ },
+};
+
+/*
+ * Per address space
+ */
+struct smmu_as {
+ struct smmu_device *smmu; /* back pointer to container */
+ unsigned int asid;
+ const struct domain_hwc_map *hwclients;
+ struct mutex lock; /* for pagetable */
+ struct tegra_iovmm_domain domain;
+ struct page *pdir_page;
+ unsigned long pdir_attr;
+ unsigned long pde_attr;
+ unsigned long pte_attr;
+ unsigned int *pte_count;
+ struct device sysfs_dev;
+ int sysfs_use_count;
+};
+
+/*
+ * Per SMMU device
+ */
+struct smmu_device {
+ void __iomem *regs, *regs_ahbarb;
+ tegra_iovmm_addr_t iovmm_base; /* remappable base address */
+ unsigned long page_count; /* total remappable size */
+ spinlock_t lock;
+ char *name;
+ struct tegra_iovmm_device iovmm_dev;
+ int num_ases;
+ struct smmu_as *as; /* Run-time allocated array */
+ struct smmu_hwc_state hwc_state[HWC_COUNT];
+ struct device sysfs_dev;
+ int sysfs_use_count;
+ bool enable;
+ struct page *avp_vector_page; /* dummy page shared by all AS's */
+
+ /*
+ * Register image savers for suspend/resume
+ */
+ unsigned long translation_enable_0_0;
+ unsigned long translation_enable_1_0;
+ unsigned long translation_enable_2_0;
+ unsigned long asid_security_0;
+
+ unsigned long lowest_asid; /* Variables for hardware testing */
+ unsigned long debug_asid;
+ unsigned long signature_pid; /* For debugging aid */
+};
+
+#define VA_PAGE_TO_PA(va, page) \
+ (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
+
+#define FLUSH_CPU_DCACHE(va, page, size) \
+ do { \
+ unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \
+ __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
+ outer_flush_range(_pa_, _pa_+(size_t)(size)); \
+ } while (0)
+
+/*
+ * Any interaction between any block on PPSB and a block on APB or AHB
+ * must have these read-back to ensure the APB/AHB bus transaction is
+ * complete before initiating activity on the PPSB block.
+ */
+#define FLUSH_SMMU_REGS(smmu) (void)readl((smmu)->regs + MC_SMMU_CONFIG_0)
+
+/*
+ * Flush all TLB entries and all PTC entries
+ * Caller must lock smmu
+ */
+static void smmu_flush_regs(struct smmu_device *smmu, int enable)
+{
+ writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL,
+ smmu->regs + MC_SMMU_PTC_FLUSH_0);
+ FLUSH_SMMU_REGS(smmu);
+ writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
+ MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable,
+ smmu->regs + MC_SMMU_TLB_FLUSH_0);
+
+ if (enable)
+ writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE,
+ smmu->regs + MC_SMMU_CONFIG_0);
+ FLUSH_SMMU_REGS(smmu);
+}
+
+static void smmu_setup_regs(struct smmu_device *smmu)
+{
+ int i;
+
+ if (smmu->as) {
+ int asid;
+
+ /* Set/restore page directory for each AS */
+ for (asid = 0; asid < smmu->num_ases; asid++) {
+ struct smmu_as *as = &smmu->as[asid];
+
+ writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
+ as->smmu->regs + MC_SMMU_PTB_ASID_0);
+ writel(as->pdir_page
+ ? SMMU_MK_PDIR(as->pdir_page, as->pdir_attr)
+ : MC_SMMU_PTB_DATA_0_RESET_VAL,
+ as->smmu->regs + MC_SMMU_PTB_DATA_0);
+ }
+ }
+
+ /* Set/restore ASID for each hardware client */
+ for (i = 0; i < HWC_COUNT; i++) {
+ struct smmu_hwc_state *hwcst = &smmu->hwc_state[i];
+ writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
+ }
+
+ writel(smmu->translation_enable_0_0,
+ smmu->regs + MC_SMMU_TRANSLATION_ENABLE_0_0);
+ writel(smmu->translation_enable_1_0,
+ smmu->regs + MC_SMMU_TRANSLATION_ENABLE_1_0);
+ writel(smmu->translation_enable_2_0,
+ smmu->regs + MC_SMMU_TRANSLATION_ENABLE_2_0);
+ writel(smmu->asid_security_0,
+ smmu->regs + MC_SMMU_ASID_SECURITY_0);
+ writel(MC_SMMU_TLB_CONFIG_0_RESET_VAL,
+ smmu->regs + MC_SMMU_TLB_CONFIG_0);
+ writel(MC_SMMU_PTC_CONFIG_0_RESET_VAL,
+ smmu->regs + MC_SMMU_PTC_CONFIG_0);
+
+ smmu_flush_regs(smmu, 1);
+ writel(
+ readl(smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0) |
+ (AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE <<
+ AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT),
+ smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0);
+}
+
+static int smmu_suspend(struct tegra_iovmm_device *dev)
+{
+ struct smmu_device *smmu =
+ container_of(dev, struct smmu_device, iovmm_dev);
+
+ smmu->translation_enable_0_0 =
+ readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_0_0);
+ smmu->translation_enable_1_0 =
+ readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_1_0);
+ smmu->translation_enable_2_0 =
+ readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_2_0);
+ smmu->asid_security_0 =
+ readl(smmu->regs + MC_SMMU_ASID_SECURITY_0);
+ return 0;
+}
+
+static void smmu_resume(struct tegra_iovmm_device *dev)
+{
+ struct smmu_device *smmu =
+ container_of(dev, struct smmu_device, iovmm_dev);
+
+ if (!smmu->enable)
+ return;
+
+ spin_lock(&smmu->lock);
+ smmu_setup_regs(smmu);
+ spin_unlock(&smmu->lock);
+}
+
+static void flush_ptc_and_tlb(struct smmu_device *smmu,
+ struct smmu_as *as, unsigned long iova,
+ unsigned long *pte, struct page *ptpage, int is_pde)
+{
+ unsigned long tlb_flush_va = is_pde
+ ? MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, SECTION)
+ : MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, GROUP);
+
+ writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
+ VA_PAGE_TO_PA(pte, ptpage),
+ smmu->regs + MC_SMMU_PTC_FLUSH_0);
+ FLUSH_SMMU_REGS(smmu);
+ writel(tlb_flush_va |
+ MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE |
+ (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
+ smmu->regs + MC_SMMU_TLB_FLUSH_0);
+ FLUSH_SMMU_REGS(smmu);
+}
+
+static void free_ptbl(struct smmu_as *as, unsigned long iova)
+{
+ unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
+ unsigned long *pdir = (unsigned long *)kmap(as->pdir_page);
+
+ if (pdir[pdn] != _PDE_VACANT(pdn)) {
+ pr_debug("%s:%d pdn=%lx\n", __func__, __LINE__, pdn);
+
+ ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
+ __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
+ pdir[pdn] = _PDE_VACANT(pdn);
+ FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
+ flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
+ as->pdir_page, 1);
+ }
+ kunmap(as->pdir_page);
+}
+
+static void free_pdir(struct smmu_as *as)
+{
+ if (as->pdir_page) {
+ unsigned addr = as->smmu->iovmm_base;
+ int count = as->smmu->page_count;
+
+ while (count-- > 0) {
+ free_ptbl(as, addr);
+ addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
+ }
+ ClearPageReserved(as->pdir_page);
+ __free_page(as->pdir_page);
+ as->pdir_page = NULL;
+ kfree(as->pte_count);
+ as->pte_count = NULL;
+ }
+}
+
+static int smmu_remove(struct platform_device *pdev)
+{
+ struct smmu_device *smmu = platform_get_drvdata(pdev);
+
+ if (!smmu)
+ return 0;
+
+ if (smmu->enable) {
+ writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE,
+ smmu->regs + MC_SMMU_CONFIG_0);
+ smmu->enable = 0;
+ }
+ platform_set_drvdata(pdev, NULL);
+
+ if (smmu->as) {
+ int asid;
+
+ for (asid = 0; asid < smmu->num_ases; asid++)
+ free_pdir(&smmu->as[asid]);
+ kfree(smmu->as);
+ }
+
+ if (smmu->avp_vector_page)
+ __free_page(smmu->avp_vector_page);
+ if (smmu->regs)
+ iounmap(smmu->regs);
+ if (smmu->regs_ahbarb)
+ iounmap(smmu->regs_ahbarb);
+ tegra_iovmm_unregister(&smmu->iovmm_dev);
+ kfree(smmu);
+ return 0;
+}
+
+/*
+ * Maps PTBL for given iova and returns the PTE address
+ * Caller must unmap the mapped PTBL returned in *ptbl_page_p
+ */
+static unsigned long *locate_pte(struct smmu_as *as,
+ unsigned long iova, bool allocate,
+ struct page **ptbl_page_p,
+ unsigned int **pte_counter)
+{
+ unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
+ unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
+ unsigned long *pdir = kmap(as->pdir_page);
+ unsigned long *ptbl;
+
+ if (pdir[pdn] != _PDE_VACANT(pdn)) {
+ /* Mapped entry table already exists */
+ *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
+ ptbl = kmap(*ptbl_page_p);
+ } else if (!allocate) {
+ kunmap(as->pdir_page);
+ return NULL;
+ } else {
+ /* Vacant - allocate a new page table */
+ pr_debug("%s:%d new PTBL pdn=%lx\n", __func__, __LINE__, pdn);
+
+ *ptbl_page_p = alloc_page(GFP_KERNEL | __GFP_DMA);
+ if (!*ptbl_page_p) {
+ kunmap(as->pdir_page);
+ pr_err(DRIVER_NAME
+ ": failed to allocate tegra_iovmm_device page table\n");
+ return NULL;
+ }
+ SetPageReserved(*ptbl_page_p);
+ ptbl = (unsigned long *)kmap(*ptbl_page_p);
+ {
+ int pn;
+ unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
+ for (pn = 0; pn < SMMU_PTBL_COUNT;
+ pn++, addr += SMMU_PAGE_SIZE) {
+ ptbl[pn] = _PTE_VACANT(addr);
+ }
+ }
+ FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
+ pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
+ as->pde_attr | _PDE_NEXT);
+ FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
+ flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
+ as->pdir_page, 1);
+ }
+ *pte_counter = &as->pte_count[pdn];
+
+ kunmap(as->pdir_page);
+ return &ptbl[ptn % SMMU_PTBL_COUNT];
+}
+
+static void put_signature(struct smmu_as *as,
+ unsigned long addr, unsigned long pfn)
+{
+ if (as->smmu->signature_pid == current->pid) {
+ struct page *page = pfn_to_page(pfn);
+ unsigned long *vaddr = kmap(page);
+ if (vaddr) {
+ vaddr[0] = addr;
+ vaddr[1] = pfn << PAGE_SHIFT;
+ FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
+ kunmap(page);
+ }
+ }
+}
+
+static int smmu_map(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *iovma)
+{
+ struct smmu_as *as = container_of(domain, struct smmu_as, domain);
+ unsigned long addr = iovma->iovm_start;
+ unsigned long pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
+ int i;
+
+ pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__,
+ addr, as - as->smmu->as);
+
+ for (i = 0; i < pcount; i++) {
+ unsigned long pfn;
+ unsigned long *pte;
+ unsigned int *pte_counter;
+ struct page *ptpage;
+
+ pfn = iovma->ops->lock_makeresident(iovma, i << PAGE_SHIFT);
+ if (!pfn_valid(pfn))
+ goto fail;
+
+ mutex_lock(&as->lock);
+
+ pte = locate_pte(as, addr, true, &ptpage, &pte_counter);
+ if (!pte)
+ goto fail2;
+
+ pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n",
+ __func__, __LINE__, addr, pfn, as - as->smmu->as);
+
+ if (*pte == _PTE_VACANT(addr))
+ (*pte_counter)++;
+ *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
+ if (unlikely((*pte == _PTE_VACANT(addr))))
+ (*pte_counter)--;
+ FLUSH_CPU_DCACHE(pte, ptpage, sizeof *pte);
+ flush_ptc_and_tlb(as->smmu, as, addr, pte, ptpage, 0);
+ kunmap(ptpage);
+ mutex_unlock(&as->lock);
+ put_signature(as, addr, pfn);
+ addr += SMMU_PAGE_SIZE;
+ }
+ return 0;
+
+fail:
+ mutex_lock(&as->lock);
+fail2:
+
+ while (i-- > 0) {
+ unsigned long *pte;
+ unsigned int *pte_counter;
+ struct page *page;
+
+ iovma->ops->release(iovma, i<<PAGE_SHIFT);
+ addr -= SMMU_PAGE_SIZE;
+ pte = locate_pte(as, addr, false, &page, &pte_counter);
+ if (pte) {
+ if (*pte != _PTE_VACANT(addr)) {
+ *pte = _PTE_VACANT(addr);
+ FLUSH_CPU_DCACHE(pte, page, sizeof *pte);
+ flush_ptc_and_tlb(as->smmu, as, addr, pte,
+ page, 0);
+ kunmap(page);
+ if (!--(*pte_counter))
+ free_ptbl(as, addr);
+ } else {
+ kunmap(page);
+ }
+ }
+ }
+ mutex_unlock(&as->lock);
+ return -ENOMEM;
+}
+
+static void smmu_unmap(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *iovma, bool decommit)
+{
+ struct smmu_as *as = container_of(domain, struct smmu_as, domain);
+ unsigned long addr = iovma->iovm_start;
+ unsigned int pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
+ unsigned int i, *pte_counter;
+
+ pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__,
+ addr, as - as->smmu->as);
+
+ mutex_lock(&as->lock);
+ for (i = 0; i < pcount; i++) {
+ unsigned long *pte;
+ struct page *page;
+
+ if (iovma->ops && iovma->ops->release)
+ iovma->ops->release(iovma, i << PAGE_SHIFT);
+
+ pte = locate_pte(as, addr, false, &page, &pte_counter);
+ if (pte) {
+ if (*pte != _PTE_VACANT(addr)) {
+ *pte = _PTE_VACANT(addr);
+ FLUSH_CPU_DCACHE(pte, page, sizeof *pte);
+ flush_ptc_and_tlb(as->smmu, as, addr, pte,
+ page, 0);
+ kunmap(page);
+ if (!--(*pte_counter) && decommit) {
+ free_ptbl(as, addr);
+ smmu_flush_regs(as->smmu, 0);
+ }
+ }
+ }
+ addr += SMMU_PAGE_SIZE;
+ }
+ mutex_unlock(&as->lock);
+}
+
+static void smmu_map_pfn(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_area *iovma, tegra_iovmm_addr_t addr,
+ unsigned long pfn)
+{
+ struct smmu_as *as = container_of(domain, struct smmu_as, domain);
+ struct smmu_device *smmu = as->smmu;
+ unsigned long *pte;
+ unsigned int *pte_counter;
+ struct page *ptpage;
+
+ pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n", __func__, __LINE__,
+ (unsigned long)addr, pfn, as - as->smmu->as);
+
+ BUG_ON(!pfn_valid(pfn));
+ mutex_lock(&as->lock);
+ pte = locate_pte(as, addr, true, &ptpage, &pte_counter);
+ if (pte) {
+ if (*pte == _PTE_VACANT(addr))
+ (*pte_counter)++;
+ *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
+ if (unlikely((*pte == _PTE_VACANT(addr))))
+ (*pte_counter)--;
+ FLUSH_CPU_DCACHE(pte, ptpage, sizeof *pte);
+ flush_ptc_and_tlb(smmu, as, addr, pte, ptpage, 0);
+ kunmap(ptpage);
+ put_signature(as, addr, pfn);
+ }
+ mutex_unlock(&as->lock);
+}
+
+/*
+ * Caller must lock/unlock as
+ */
+static int alloc_pdir(struct smmu_as *as)
+{
+ unsigned long *pdir;
+ int pdn;
+
+ if (as->pdir_page)
+ return 0;
+
+ as->pte_count = kzalloc(sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT,
+ GFP_KERNEL);
+ if (!as->pte_count) {
+ pr_err(DRIVER_NAME
+ ": failed to allocate tegra_iovmm_device PTE cunters\n");
+ return -ENOMEM;
+ }
+ as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
+ if (!as->pdir_page) {
+ pr_err(DRIVER_NAME
+ ": failed to allocate tegra_iovmm_device page directory\n");
+ kfree(as->pte_count);
+ as->pte_count = NULL;
+ return -ENOMEM;
+ }
+ SetPageReserved(as->pdir_page);
+ pdir = kmap(as->pdir_page);
+
+ for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
+ pdir[pdn] = _PDE_VACANT(pdn);
+ FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
+ writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
+ VA_PAGE_TO_PA(pdir, as->pdir_page),
+ as->smmu->regs + MC_SMMU_PTC_FLUSH_0);
+ FLUSH_SMMU_REGS(as->smmu);
+ writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
+ MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE |
+ (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
+ as->smmu->regs + MC_SMMU_TLB_FLUSH_0);
+ FLUSH_SMMU_REGS(as->smmu);
+ kunmap(as->pdir_page);
+
+ return 0;
+}
+
+static void _sysfs_create(struct smmu_as *as, struct device *sysfs_parent);
+
+/*
+ * Allocate resources for an AS
+ * TODO: split into "alloc" and "lock"
+ */
+static struct tegra_iovmm_domain *smmu_alloc_domain(
+ struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
+{
+ struct smmu_device *smmu =
+ container_of(dev, struct smmu_device, iovmm_dev);
+ struct smmu_as *as = NULL;
+ const struct domain_hwc_map *map = NULL;
+ int asid, i;
+
+ /* Look for a free AS */
+ for (asid = smmu->lowest_asid; asid < smmu->num_ases; asid++) {
+ mutex_lock(&smmu->as[asid].lock);
+ if (!smmu->as[asid].hwclients) {
+ as = &smmu->as[asid];
+ break;
+ }
+ mutex_unlock(&smmu->as[asid].lock);
+ }
+
+ if (!as) {
+ pr_err(DRIVER_NAME ": no free AS\n");
+ return NULL;
+ }
+
+ if (alloc_pdir(as) < 0)
+ goto bad3;
+
+ /* Look for a matching hardware client group */
+ for (i = 0; ARRAY_SIZE(smmu_hwc_map); i++) {
+ if (!strcmp(smmu_hwc_map[i].dev_name, client->misc_dev->name)) {
+ map = &smmu_hwc_map[i];
+ break;
+ }
+ }
+
+ if (!map) {
+ pr_err(DRIVER_NAME ": no SMMU resource for %s (%s)\n",
+ client->name, client->misc_dev->name);
+ goto bad2;
+ }
+
+ spin_lock(&smmu->lock);
+ /* Update PDIR register */
+ writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
+ as->smmu->regs + MC_SMMU_PTB_ASID_0);
+ writel(SMMU_MK_PDIR(as->pdir_page, as->pdir_attr),
+ as->smmu->regs + MC_SMMU_PTB_DATA_0);
+ FLUSH_SMMU_REGS(smmu);
+
+ /* Put each hardware client in the group into the address space */
+ for (i = 0; i < map->nr_hwcs; i++) {
+ struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
+
+ /* Is the hardware client busy? */
+ if (hwcst->enable_disable != SMMU_ASID_DISABLE &&
+ hwcst->enable_disable != SMMU_ASID_ENABLE(as->asid)) {
+ pr_err(DRIVER_NAME
+ ": HW 0x%lx busy for ASID %ld (client!=%s)\n",
+ hwcst->reg,
+ SMMU_ASID_ASID(hwcst->enable_disable),
+ client->name);
+ goto bad;
+ }
+ hwcst->enable_disable = SMMU_ASID_ENABLE(as->asid);
+ writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
+ }
+ FLUSH_SMMU_REGS(smmu);
+ spin_unlock(&smmu->lock);
+ as->hwclients = map;
+ _sysfs_create(as, client->misc_dev->this_device);
+ mutex_unlock(&as->lock);
+
+ /* Reserve "page zero" for AVP vectors using a common dummy page */
+ smmu_map_pfn(&as->domain, NULL, 0,
+ page_to_phys(as->smmu->avp_vector_page) >> SMMU_PAGE_SHIFT);
+ return &as->domain;
+
+bad:
+ /* Reset hardware clients that have been enabled */
+ while (--i >= 0) {
+ struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
+
+ hwcst->enable_disable = SMMU_ASID_DISABLE;
+ writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
+ }
+ FLUSH_SMMU_REGS(smmu);
+ spin_unlock(&as->smmu->lock);
+bad2:
+ free_pdir(as);
+bad3:
+ mutex_unlock(&as->lock);
+ return NULL;
+
+}
+
+/*
+ * Release resources for an AS
+ * TODO: split into "unlock" and "free"
+ */
+static void smmu_free_domain(
+ struct tegra_iovmm_domain *domain, struct tegra_iovmm_client *client)
+{
+ struct smmu_as *as = container_of(domain, struct smmu_as, domain);
+ struct smmu_device *smmu = as->smmu;
+ const struct domain_hwc_map *map = NULL;
+ int i;
+
+ mutex_lock(&as->lock);
+ map = as->hwclients;
+
+ spin_lock(&smmu->lock);
+ for (i = 0; i < map->nr_hwcs; i++) {
+ struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
+
+ hwcst->enable_disable = SMMU_ASID_DISABLE;
+ writel(SMMU_ASID_DISABLE, smmu->regs + hwcst->reg);
+ }
+ FLUSH_SMMU_REGS(smmu);
+ spin_unlock(&smmu->lock);
+
+ as->hwclients = NULL;
+ if (as->pdir_page) {
+ spin_lock(&smmu->lock);
+ writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
+ smmu->regs + MC_SMMU_PTB_ASID_0);
+ writel(MC_SMMU_PTB_DATA_0_RESET_VAL,
+ smmu->regs + MC_SMMU_PTB_DATA_0);
+ FLUSH_SMMU_REGS(smmu);
+ spin_unlock(&smmu->lock);
+
+ free_pdir(as);
+ }
+ mutex_unlock(&as->lock);
+}
+
+static struct tegra_iovmm_device_ops tegra_iovmm_smmu_ops = {
+ .map = smmu_map,
+ .unmap = smmu_unmap,
+ .map_pfn = smmu_map_pfn,
+ .alloc_domain = smmu_alloc_domain,
+ .free_domain = smmu_free_domain,
+ .suspend = smmu_suspend,
+ .resume = smmu_resume,
+};
+
+static int smmu_probe(struct platform_device *pdev)
+{
+ struct smmu_device *smmu;
+ struct resource *regs, *regs2;
+ struct tegra_smmu_window *window;
+ int e, asid;
+
+ BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
+ BUILD_BUG_ON(ARRAY_SIZE(smmu_hwc_state_init) != HWC_COUNT);
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mc");
+ regs2 = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahbarb");
+ window = tegra_smmu_window(0);
+
+ if (!regs || !regs2 || !window) {
+ pr_err(DRIVER_NAME ": No SMMU resources\n");
+ return -ENODEV;
+ }
+
+ smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
+ if (!smmu) {
+ pr_err(DRIVER_NAME ": failed to allocate smmu_device\n");
+ return -ENOMEM;
+ }
+
+ smmu->num_ases = MC_SMMU_NUM_ASIDS;
+ smmu->iovmm_base = (tegra_iovmm_addr_t)window->start;
+ smmu->page_count = (window->end + 1 - window->start) >> SMMU_PAGE_SHIFT;
+ smmu->regs = ioremap(regs->start, regs->end + 1 - regs->start);
+ smmu->regs_ahbarb =
+ ioremap(regs2->start, regs2->end + 1 - regs2->start);
+ if (!smmu->regs || !smmu->regs_ahbarb) {
+ pr_err(DRIVER_NAME ": failed to remap SMMU registers\n");
+ e = -ENXIO;
+ goto fail;
+ }
+
+ smmu->translation_enable_0_0 = ~0;
+ smmu->translation_enable_1_0 = ~0;
+ smmu->translation_enable_2_0 = ~0;
+ smmu->asid_security_0 = 0;
+
+ memcpy(smmu->hwc_state, smmu_hwc_state_init, sizeof(smmu->hwc_state));
+
+ smmu->iovmm_dev.name = VMM_NAME;
+ smmu->iovmm_dev.ops = &tegra_iovmm_smmu_ops;
+ smmu->iovmm_dev.pgsize_bits = SMMU_PAGE_SHIFT;
+
+ e = tegra_iovmm_register(&smmu->iovmm_dev);
+ if (e)
+ goto fail;
+
+ smmu->as = kzalloc(sizeof(smmu->as[0]) * smmu->num_ases, GFP_KERNEL);
+ if (!smmu->as) {
+ pr_err(DRIVER_NAME ": failed to allocate smmu_as\n");
+ e = -ENOMEM;
+ goto fail;
+ }
+
+ /* Initialize address space structure array */
+ for (asid = 0; asid < smmu->num_ases; asid++) {
+ struct smmu_as *as = &smmu->as[asid];
+
+ as->smmu = smmu;
+ as->asid = asid;
+ as->pdir_attr = _PDIR_ATTR;
+ as->pde_attr = _PDE_ATTR;
+ as->pte_attr = _PTE_ATTR;
+
+ mutex_init(&as->lock);
+
+ e = tegra_iovmm_domain_init(&as->domain, &smmu->iovmm_dev,
+ smmu->iovmm_base,
+ smmu->iovmm_base +
+ (smmu->page_count << SMMU_PAGE_SHIFT));
+ if (e)
+ goto fail;
+ }
+ spin_lock_init(&smmu->lock);
+ smmu_setup_regs(smmu);
+ smmu->enable = 1;
+ platform_set_drvdata(pdev, smmu);
+
+ smmu->avp_vector_page = alloc_page(GFP_KERNEL);
+ if (!smmu->avp_vector_page)
+ goto fail;
+ return 0;
+
+fail:
+ if (smmu->avp_vector_page)
+ __free_page(smmu->avp_vector_page);
+ if (smmu->regs)
+ iounmap(smmu->regs);
+ if (smmu->regs_ahbarb)
+ iounmap(smmu->regs_ahbarb);
+ if (smmu && smmu->as) {
+ for (asid = 0; asid < smmu->num_ases; asid++) {
+ if (smmu->as[asid].pdir_page) {
+ ClearPageReserved(smmu->as[asid].pdir_page);
+ __free_page(smmu->as[asid].pdir_page);
+ }
+ }
+ kfree(smmu->as);
+ }
+ kfree(smmu);
+ return e;
+}
+
+static struct platform_driver tegra_iovmm_smmu_drv = {
+ .probe = smmu_probe,
+ .remove = smmu_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __devinit smmu_init(void)
+{
+ return platform_driver_register(&tegra_iovmm_smmu_drv);
+}
+
+static void __exit smmu_exit(void)
+{
+ platform_driver_unregister(&tegra_iovmm_smmu_drv);
+}
+
+subsys_initcall(smmu_init);
+module_exit(smmu_exit);
+
+/*
+ * SMMU-global sysfs interface for debugging
+ */
+static ssize_t _sysfs_show_reg(struct device *d,
+ struct device_attribute *da, char *buf);
+static ssize_t _sysfs_store_reg(struct device *d,
+ struct device_attribute *da, const char *buf,
+ size_t count);
+
+#define _NAME_MAP(_name) { \
+ .name = __stringify(_name), \
+ .offset = _name##_0, \
+ .dev_attr = __ATTR(_name, S_IRUGO | S_IWUSR, \
+ _sysfs_show_reg, _sysfs_store_reg) \
+}
+
+static
+struct _reg_name_map {
+ const char *name;
+ unsigned offset;
+ struct device_attribute dev_attr;
+} _smmu_reg_name_map[] = {
+ _NAME_MAP(MC_SMMU_CONFIG),
+ _NAME_MAP(MC_SMMU_TLB_CONFIG),
+ _NAME_MAP(MC_SMMU_PTC_CONFIG),
+ _NAME_MAP(MC_SMMU_PTB_ASID),
+ _NAME_MAP(MC_SMMU_PTB_DATA),
+ _NAME_MAP(MC_SMMU_TLB_FLUSH),
+ _NAME_MAP(MC_SMMU_PTC_FLUSH),
+ _NAME_MAP(MC_SMMU_ASID_SECURITY),
+ _NAME_MAP(MC_SMMU_STATS_TLB_HIT_COUNT),
+ _NAME_MAP(MC_SMMU_STATS_TLB_MISS_COUNT),
+ _NAME_MAP(MC_SMMU_STATS_PTC_HIT_COUNT),
+ _NAME_MAP(MC_SMMU_STATS_PTC_MISS_COUNT),
+ _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_0),
+ _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_1),
+ _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_2),
+ _NAME_MAP(MC_SMMU_AFI_ASID),
+ _NAME_MAP(MC_SMMU_AVPC_ASID),
+ _NAME_MAP(MC_SMMU_DC_ASID),
+ _NAME_MAP(MC_SMMU_DCB_ASID),
+ _NAME_MAP(MC_SMMU_EPP_ASID),
+ _NAME_MAP(MC_SMMU_G2_ASID),
+ _NAME_MAP(MC_SMMU_HC_ASID),
+ _NAME_MAP(MC_SMMU_HDA_ASID),
+ _NAME_MAP(MC_SMMU_ISP_ASID),
+ _NAME_MAP(MC_SMMU_MPE_ASID),
+ _NAME_MAP(MC_SMMU_NV_ASID),
+ _NAME_MAP(MC_SMMU_NV2_ASID),
+ _NAME_MAP(MC_SMMU_PPCS_ASID),
+ _NAME_MAP(MC_SMMU_SATA_ASID),
+ _NAME_MAP(MC_SMMU_VDE_ASID),
+ _NAME_MAP(MC_SMMU_VI_ASID),
+};
+
+static ssize_t lookup_reg(struct device_attribute *da)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) {
+ if (!strcmp(_smmu_reg_name_map[i].name, da->attr.name))
+ return _smmu_reg_name_map[i].offset;
+ }
+ return -ENODEV;
+}
+
+static ssize_t _sysfs_show_reg(struct device *d,
+ struct device_attribute *da, char *buf)
+{
+ struct smmu_device *smmu =
+ container_of(d, struct smmu_device, sysfs_dev);
+ ssize_t offset = lookup_reg(da);
+
+ if (offset < 0)
+ return offset;
+ return sprintf(buf, "%08lx\n",
+ (unsigned long)readl(smmu->regs + offset));
+}
+
+static ssize_t _sysfs_store_reg(struct device *d,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct smmu_device *smmu =
+ container_of(d, struct smmu_device, sysfs_dev);
+ ssize_t offset = lookup_reg(da);
+ u32 value;
+ int err;
+
+ if (offset < 0)
+ return offset;
+
+ err = kstrtou32(buf, 16, &value);
+ if (err)
+ return err;
+
+#ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
+ writel(value, smmu->regs + offset);
+#else
+ /* Allow writing to reg only for TLB/PTC stats enabling/disabling */
+ {
+ unsigned long mask = 0;
+ switch (offset) {
+ case MC_SMMU_TLB_CONFIG_0:
+ mask = MC_SMMU_TLB_CONFIG_0_TLB_STATS__MASK;
+ break;
+ case MC_SMMU_PTC_CONFIG_0:
+ mask = MC_SMMU_PTC_CONFIG_0_PTC_STATS__MASK;
+ break;
+ default:
+ break;
+ }
+
+ if (mask) {
+ unsigned long currval = readl(smmu->regs + offset);
+ currval &= ~mask;
+ value &= mask;
+ value |= currval;
+ writel(value, smmu->regs + offset);
+ }
+ }
+#endif
+ return count;
+}
+
+static ssize_t _sysfs_show_smmu(struct device *d,
+ struct device_attribute *da, char *buf)
+{
+ struct smmu_device *smmu =
+ container_of(d, struct smmu_device, sysfs_dev);
+ ssize_t rv = 0;
+
+ rv += sprintf(buf + rv , " regs: %p\n", smmu->regs);
+ rv += sprintf(buf + rv , "iovmm_base: %p\n", (void *)smmu->iovmm_base);
+ rv += sprintf(buf + rv , "page_count: %lx\n", smmu->page_count);
+ rv += sprintf(buf + rv , " num_ases: %d\n", smmu->num_ases);
+ rv += sprintf(buf + rv , " as: %p\n", smmu->as);
+ rv += sprintf(buf + rv , " enable: %s\n",
+ smmu->enable ? "yes" : "no");
+ return rv;
+}
+
+static struct device_attribute _attr_show_smmu
+ = __ATTR(show_smmu, S_IRUGO, _sysfs_show_smmu, NULL);
+
+#define _SYSFS_SHOW_VALUE(name, field, fmt) \
+static ssize_t _sysfs_show_##name(struct device *d, \
+ struct device_attribute *da, char *buf) \
+{ \
+ struct smmu_device *smmu = \
+ container_of(d, struct smmu_device, sysfs_dev); \
+ ssize_t rv = 0; \
+ rv += sprintf(buf + rv, fmt "\n", smmu->field); \
+ return rv; \
+}
+
+static void (*_sysfs_null_callback)(struct smmu_device *, unsigned long *) =
+ NULL;
+
+#define _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback) \
+static ssize_t _sysfs_set_##name(struct device *d, \
+ struct device_attribute *da, const char *buf, size_t count) \
+{ \
+ int err; \
+ u32 value; \
+ struct smmu_device *smmu = \
+ container_of(d, struct smmu_device, sysfs_dev); \
+ err = kstrtou32(buf, base, &value); \
+ if (err) \
+ return err; \
+ if (0 <= value && value < ceil) { \
+ smmu->field = value; \
+ if (callback) \
+ callback(smmu, &smmu->field); \
+ } \
+ return count; \
+}
+#ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
+#define _SYSFS_SET_VALUE _SYSFS_SET_VALUE_DO
+#else
+#define _SYSFS_SET_VALUE(name, field, base, ceil, callback) \
+static ssize_t _sysfs_set_##name(struct device *d, \
+ struct device_attribute *da, const char *buf, size_t count) \
+{ \
+ return count; \
+}
+#endif
+
+_SYSFS_SHOW_VALUE(lowest_asid, lowest_asid, "%lu")
+_SYSFS_SET_VALUE(lowest_asid, lowest_asid, 10,
+ MC_SMMU_NUM_ASIDS, _sysfs_null_callback)
+_SYSFS_SHOW_VALUE(debug_asid, debug_asid, "%lu")
+_SYSFS_SET_VALUE(debug_asid, debug_asid, 10,
+ MC_SMMU_NUM_ASIDS, _sysfs_null_callback)
+_SYSFS_SHOW_VALUE(signature_pid, signature_pid, "%lu")
+_SYSFS_SET_VALUE_DO(signature_pid, signature_pid, 10, PID_MAX_LIMIT + 1,
+ _sysfs_null_callback)
+
+#ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
+static void _sysfs_mask_attr(struct smmu_device *smmu, unsigned long *field)
+{
+ *field &= _MASK_ATTR;
+}
+
+static void _sysfs_mask_pdir_attr(struct smmu_device *smmu,
+ unsigned long *field)
+{
+ unsigned long pdir;
+
+ _sysfs_mask_attr(smmu, field);
+ writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(smmu->debug_asid),
+ smmu->regs + MC_SMMU_PTB_ASID_0);
+ pdir = readl(smmu->regs + MC_SMMU_PTB_DATA_0);
+ pdir &= ~_MASK_ATTR;
+ pdir |= *field;
+ writel(pdir, smmu->regs + MC_SMMU_PTB_DATA_0);
+ FLUSH_SMMU_REGS(smmu);
+}
+
+static void (*_sysfs_mask_attr_callback)(struct smmu_device *,
+ unsigned long *field) = &_sysfs_mask_attr;
+static void (*_sysfs_mask_pdir_attr_callback)(struct smmu_device *,
+ unsigned long *field) = &_sysfs_mask_pdir_attr;
+#endif
+
+_SYSFS_SHOW_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, "%lx")
+_SYSFS_SET_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, 16,
+ _PDIR_ATTR + 1, _sysfs_mask_pdir_attr_callback)
+_SYSFS_SHOW_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, "%lx")
+_SYSFS_SET_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, 16,
+ _PDE_ATTR + 1, _sysfs_mask_attr_callback)
+_SYSFS_SHOW_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, "%lx")
+_SYSFS_SET_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, 16,
+ _PTE_ATTR + 1, _sysfs_mask_attr_callback)
+
+static struct device_attribute _attr_values[] = {
+ __ATTR(lowest_asid, S_IRUGO | S_IWUSR,
+ _sysfs_show_lowest_asid, _sysfs_set_lowest_asid),
+ __ATTR(debug_asid, S_IRUGO | S_IWUSR,
+ _sysfs_show_debug_asid, _sysfs_set_debug_asid),
+ __ATTR(signature_pid, S_IRUGO | S_IWUSR,
+ _sysfs_show_signature_pid, _sysfs_set_signature_pid),
+
+ __ATTR(pdir_attr, S_IRUGO | S_IWUSR,
+ _sysfs_show_pdir_attr, _sysfs_set_pdir_attr),
+ __ATTR(pde_attr, S_IRUGO | S_IWUSR,
+ _sysfs_show_pde_attr, _sysfs_set_pde_attr),
+ __ATTR(pte_attr, S_IRUGO | S_IWUSR,
+ _sysfs_show_pte_attr, _sysfs_set_pte_attr),
+};
+
+static struct attribute *_smmu_attrs[
+ ARRAY_SIZE(_smmu_reg_name_map) + ARRAY_SIZE(_attr_values) + 3];
+static struct attribute_group _smmu_attr_group = {
+ .attrs = _smmu_attrs
+};
+
+static void _sysfs_smmu(struct smmu_device *smmu, struct device *parent)
+{
+ int i, j;
+
+ if (smmu->sysfs_use_count++ > 0)
+ return;
+ for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++)
+ _smmu_attrs[i] = &_smmu_reg_name_map[i].dev_attr.attr;
+ for (j = 0; j < ARRAY_SIZE(_attr_values); j++)
+ _smmu_attrs[i++] = &_attr_values[j].attr;
+ _smmu_attrs[i++] = &_attr_show_smmu.attr;
+ _smmu_attrs[i] = NULL;
+
+ dev_set_name(&smmu->sysfs_dev, "smmu");
+ smmu->sysfs_dev.parent = parent;
+ smmu->sysfs_dev.driver = NULL;
+ smmu->sysfs_dev.release = NULL;
+ if (device_register(&smmu->sysfs_dev)) {
+ pr_err("%s: failed to register smmu_sysfs_dev\n", __func__);
+ smmu->sysfs_use_count--;
+ return;
+ }
+ if (sysfs_create_group(&smmu->sysfs_dev.kobj, &_smmu_attr_group)) {
+ pr_err("%s: failed to create group for smmu_sysfs_dev\n",
+ __func__);
+ smmu->sysfs_use_count--;
+ return;
+ }
+}
+
+static void _sysfs_create(struct smmu_as *as, struct device *parent)
+{
+ _sysfs_smmu(as->smmu, parent);
+}
diff --git a/arch/arm/mach-tegra/iovmm.c b/arch/arm/mach-tegra/iovmm.c
new file mode 100644
index 000000000000..1b900bf49dd0
--- /dev/null
+++ b/arch/arm/mach-tegra/iovmm.c
@@ -0,0 +1,950 @@
+/*
+ * arch/arm/mach-tegra/iovmm.c
+ *
+ * Tegra I/O VM manager
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <mach/iovmm.h>
+
+/*
+ * after the best-fit block is located, the remaining pages not needed
+ * for the allocation will be split into a new free block if the
+ * number of remaining pages is >= MIN_SPLIT_PAGE.
+ */
+#define MIN_SPLIT_PAGE 4
+#define MIN_SPLIT_BYTES(_d) (MIN_SPLIT_PAGE << (_d)->dev->pgsize_bits)
+#define NO_SPLIT(m) ((m) < MIN_SPLIT_BYTES(domain))
+#define DO_SPLIT(m) ((m) >= MIN_SPLIT_BYTES(domain))
+
+#define iovmm_start(_b) ((_b)->vm_area.iovm_start)
+#define iovmm_length(_b) ((_b)->vm_area.iovm_length)
+#define iovmm_end(_b) (iovmm_start(_b) + iovmm_length(_b))
+
+/* flags for the block */
+#define BK_free 0 /* indicates free mappings */
+#define BK_map_dirty 1 /* used by demand-loaded mappings */
+
+/* flags for the client */
+#define CL_locked 0
+
+/* flags for the domain */
+#define DM_map_dirty 0
+
+struct tegra_iovmm_block {
+ struct tegra_iovmm_area vm_area;
+ tegra_iovmm_addr_t start;
+ size_t length;
+ atomic_t ref;
+ unsigned long flags;
+ unsigned long poison;
+ struct rb_node free_node;
+ struct rb_node all_node;
+};
+
+struct iovmm_share_group {
+ const char *name;
+ struct tegra_iovmm_domain *domain;
+ struct list_head client_list;
+ struct list_head group_list;
+ spinlock_t lock; /* for client_list */
+};
+
+static LIST_HEAD(iovmm_devices);
+static LIST_HEAD(iovmm_groups);
+static DEFINE_MUTEX(iovmm_group_list_lock);
+static struct kmem_cache *iovmm_cache;
+
+static tegra_iovmm_addr_t iovmm_align_up(struct tegra_iovmm_device *dev,
+ tegra_iovmm_addr_t addr)
+{
+ addr += (1<<dev->pgsize_bits);
+ addr--;
+ addr &= ~((1<<dev->pgsize_bits)-1);
+ return addr;
+}
+
+static tegra_iovmm_addr_t iovmm_align_down(struct tegra_iovmm_device *dev,
+ tegra_iovmm_addr_t addr)
+{
+ addr &= ~((1<<dev->pgsize_bits)-1);
+ return addr;
+}
+
+#define SIMALIGN(b, a) (((b)->start % (a)) ? ((a) - ((b)->start % (a))) : 0)
+
+size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_domain *domain = client->domain;
+ tegra_iovmm_addr_t max_free = 0;
+
+ spin_lock(&domain->block_lock);
+ n = rb_first(&domain->all_blocks);
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ n = rb_next(n);
+ if (test_bit(BK_free, &b->flags)) {
+ max_free = max_t(tegra_iovmm_addr_t,
+ max_free, iovmm_length(b));
+ }
+ }
+ spin_unlock(&domain->block_lock);
+ return max_free;
+}
+
+
+static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
+ unsigned int *num_blocks, unsigned int *num_free,
+ tegra_iovmm_addr_t *total, size_t *total_free, size_t *max_free)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b;
+
+ *num_blocks = 0;
+ *num_free = 0;
+ *total = 0;
+ *total_free = 0;
+ *max_free = 0;
+
+ spin_lock(&domain->block_lock);
+ n = rb_first(&domain->all_blocks);
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ n = rb_next(n);
+ (*num_blocks)++;
+ *total += b->length;
+ if (test_bit(BK_free, &b->flags)) {
+ (*num_free)++;
+ *total_free += b->length;
+ *max_free = max_t(size_t, *max_free, b->length);
+ }
+ }
+ spin_unlock(&domain->block_lock);
+}
+
+static int tegra_iovmm_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct iovmm_share_group *grp;
+ size_t max_free, total_free, total;
+ unsigned int num, num_free;
+
+ int len = 0;
+
+ mutex_lock(&iovmm_group_list_lock);
+ len += snprintf(page + len, count - len, "\ngroups\n");
+ if (list_empty(&iovmm_groups))
+ len += snprintf(page + len, count - len, "\t<empty>\n");
+ else {
+ list_for_each_entry(grp, &iovmm_groups, group_list) {
+ len += snprintf(page + len, count - len,
+ "\t%s (device: %s)\n",
+ grp->name ? grp->name : "<unnamed>",
+ grp->domain->dev->name);
+ tegra_iovmm_block_stats(grp->domain, &num,
+ &num_free, &total, &total_free, &max_free);
+ total >>= 10;
+ total_free >>= 10;
+ max_free >>= 10;
+ len += snprintf(page + len, count - len,
+ "\t\tsize: %uKiB free: %uKiB "
+ "largest: %uKiB (%u free / %u total blocks)\n",
+ total, total_free, max_free, num_free, num);
+ }
+ }
+ mutex_unlock(&iovmm_group_list_lock);
+
+ *eof = 1;
+ return len;
+}
+
+static void iovmm_block_put(struct tegra_iovmm_block *b)
+{
+ BUG_ON(b->poison);
+ BUG_ON(atomic_read(&b->ref) == 0);
+ if (!atomic_dec_return(&b->ref)) {
+ b->poison = 0xa5a5a5a5;
+ kmem_cache_free(iovmm_cache, b);
+ }
+}
+
+static void iovmm_free_block(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_block *block)
+{
+ struct tegra_iovmm_block *pred = NULL; /* address-order predecessor */
+ struct tegra_iovmm_block *succ = NULL; /* address-order successor */
+ struct rb_node **p;
+ struct rb_node *parent = NULL, *temp;
+ int pred_free = 0, succ_free = 0;
+
+ iovmm_block_put(block);
+
+ spin_lock(&domain->block_lock);
+ temp = rb_prev(&block->all_node);
+ if (temp)
+ pred = rb_entry(temp, struct tegra_iovmm_block, all_node);
+ temp = rb_next(&block->all_node);
+ if (temp)
+ succ = rb_entry(temp, struct tegra_iovmm_block, all_node);
+
+ if (pred)
+ pred_free = test_bit(BK_free, &pred->flags);
+ if (succ)
+ succ_free = test_bit(BK_free, &succ->flags);
+
+ if (pred_free && succ_free) {
+ pred->length += block->length;
+ pred->length += succ->length;
+ rb_erase(&block->all_node, &domain->all_blocks);
+ rb_erase(&succ->all_node, &domain->all_blocks);
+ rb_erase(&succ->free_node, &domain->free_blocks);
+ rb_erase(&pred->free_node, &domain->free_blocks);
+ iovmm_block_put(block);
+ iovmm_block_put(succ);
+ block = pred;
+ } else if (pred_free) {
+ pred->length += block->length;
+ rb_erase(&block->all_node, &domain->all_blocks);
+ rb_erase(&pred->free_node, &domain->free_blocks);
+ iovmm_block_put(block);
+ block = pred;
+ } else if (succ_free) {
+ block->length += succ->length;
+ rb_erase(&succ->all_node, &domain->all_blocks);
+ rb_erase(&succ->free_node, &domain->free_blocks);
+ iovmm_block_put(succ);
+ }
+
+ p = &domain->free_blocks.rb_node;
+ while (*p) {
+ struct tegra_iovmm_block *b;
+ parent = *p;
+ b = rb_entry(parent, struct tegra_iovmm_block, free_node);
+ if (block->length >= b->length)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&block->free_node, parent, p);
+ rb_insert_color(&block->free_node, &domain->free_blocks);
+ set_bit(BK_free, &block->flags);
+ spin_unlock(&domain->block_lock);
+}
+
+/*
+ * if the best-fit block is larger than the requested size, a remainder
+ * block will be created and inserted into the free list in its place.
+ * since all free blocks are stored in two trees the new block needs to be
+ * linked into both.
+ */
+static struct tegra_iovmm_block *iovmm_split_free_block(
+ struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_block *block, unsigned long size)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct tegra_iovmm_block *rem;
+ struct tegra_iovmm_block *b;
+
+ rem = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
+ if (!rem)
+ return NULL;
+
+ spin_lock(&domain->block_lock);
+ p = &domain->free_blocks.rb_node;
+
+ rem->start = block->start + size;
+ rem->length = block->length - size;
+ atomic_set(&rem->ref, 1);
+ block->length = size;
+
+ while (*p) {
+ parent = *p;
+ b = rb_entry(parent, struct tegra_iovmm_block, free_node);
+ if (rem->length >= b->length)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ set_bit(BK_free, &rem->flags);
+ rb_link_node(&rem->free_node, parent, p);
+ rb_insert_color(&rem->free_node, &domain->free_blocks);
+
+ p = &domain->all_blocks.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ b = rb_entry(parent, struct tegra_iovmm_block, all_node);
+ if (rem->start >= b->start)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&rem->all_node, parent, p);
+ rb_insert_color(&rem->all_node, &domain->all_blocks);
+
+ return rem;
+}
+
+static int iovmm_block_splitting;
+static struct tegra_iovmm_block *iovmm_alloc_block(
+ struct tegra_iovmm_domain *domain, size_t size, size_t align)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b, *best;
+ size_t simalign;
+
+ BUG_ON(!size);
+ size = iovmm_align_up(domain->dev, size);
+ align = iovmm_align_up(domain->dev, align);
+ for (;;) {
+ spin_lock(&domain->block_lock);
+ if (!iovmm_block_splitting)
+ break;
+ spin_unlock(&domain->block_lock);
+ schedule();
+ }
+ n = domain->free_blocks.rb_node;
+ best = NULL;
+ while (n) {
+ tegra_iovmm_addr_t aligned_start, block_ceil;
+
+ b = rb_entry(n, struct tegra_iovmm_block, free_node);
+ simalign = SIMALIGN(b, align);
+ aligned_start = b->start + simalign;
+ block_ceil = b->start + b->length;
+
+ if (block_ceil >= aligned_start + size) {
+ /* Block has enough size */
+ best = b;
+ if (NO_SPLIT(simalign) &&
+ NO_SPLIT(block_ceil - (aligned_start + size)))
+ break;
+ n = n->rb_left;
+ } else {
+ n = n->rb_right;
+ }
+ }
+ if (!best) {
+ spin_unlock(&domain->block_lock);
+ return NULL;
+ }
+
+ simalign = SIMALIGN(best, align);
+ if (DO_SPLIT(simalign)) {
+ iovmm_block_splitting = 1;
+ spin_unlock(&domain->block_lock);
+
+ /* Split off misalignment */
+ b = best;
+ best = iovmm_split_free_block(domain, b, simalign);
+ if (best)
+ simalign = 0;
+ else
+ best = b;
+ }
+
+ /* Unfree designed block */
+ rb_erase(&best->free_node, &domain->free_blocks);
+ clear_bit(BK_free, &best->flags);
+ atomic_inc(&best->ref);
+
+ iovmm_start(best) = best->start + simalign;
+ iovmm_length(best) = size;
+
+ if (DO_SPLIT((best->start + best->length) - iovmm_end(best))) {
+ iovmm_block_splitting = 1;
+ spin_unlock(&domain->block_lock);
+
+ /* Split off excess */
+ (void)iovmm_split_free_block(domain, best, size + simalign);
+ }
+
+ iovmm_block_splitting = 0;
+ spin_unlock(&domain->block_lock);
+
+ return best;
+}
+
+static struct tegra_iovmm_block *iovmm_allocate_vm(
+ struct tegra_iovmm_domain *domain, size_t size,
+ size_t align, unsigned long iovm_start)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b, *best;
+
+ BUG_ON(iovm_start % align);
+ BUG_ON(!size);
+
+ size = iovmm_align_up(domain->dev, size);
+ for (;;) {
+ spin_lock(&domain->block_lock);
+ if (!iovmm_block_splitting)
+ break;
+ spin_unlock(&domain->block_lock);
+ schedule();
+ }
+
+ n = rb_first(&domain->free_blocks);
+ best = NULL;
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, free_node);
+ if ((b->start <= iovm_start) &&
+ (b->start + b->length) >= (iovm_start + size)) {
+ best = b;
+ break;
+ }
+ n = rb_next(n);
+ }
+
+ if (!best)
+ goto fail;
+
+ /* split the mem before iovm_start. */
+ if (DO_SPLIT(iovm_start - best->start)) {
+ iovmm_block_splitting = 1;
+ spin_unlock(&domain->block_lock);
+ best = iovmm_split_free_block(domain, best,
+ (iovm_start - best->start));
+ }
+ if (!best)
+ goto fail;
+
+ /* remove the desired block from free list. */
+ rb_erase(&best->free_node, &domain->free_blocks);
+ clear_bit(BK_free, &best->flags);
+ atomic_inc(&best->ref);
+
+ iovmm_start(best) = iovm_start;
+ iovmm_length(best) = size;
+
+ BUG_ON(best->start > iovmm_start(best));
+ BUG_ON((best->start + best->length) < iovmm_end(best));
+ /* split the mem after iovm_start+size. */
+ if (DO_SPLIT(best->start + best->length - iovmm_end(best))) {
+ iovmm_block_splitting = 1;
+ spin_unlock(&domain->block_lock);
+ (void)iovmm_split_free_block(domain, best,
+ (iovmm_start(best) - best->start + size));
+ }
+fail:
+ iovmm_block_splitting = 0;
+ spin_unlock(&domain->block_lock);
+ return best;
+}
+
+int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
+ tegra_iovmm_addr_t end)
+{
+ struct tegra_iovmm_block *b;
+
+ b = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ domain->dev = dev;
+ atomic_set(&domain->clients, 0);
+ atomic_set(&domain->locks, 0);
+ atomic_set(&b->ref, 1);
+ spin_lock_init(&domain->block_lock);
+ init_rwsem(&domain->map_lock);
+ init_waitqueue_head(&domain->delay_lock);
+ b->start = iovmm_align_up(dev, start);
+ b->length = iovmm_align_down(dev, end) - b->start;
+ set_bit(BK_free, &b->flags);
+ rb_link_node(&b->free_node, NULL, &domain->free_blocks.rb_node);
+ rb_insert_color(&b->free_node, &domain->free_blocks);
+ rb_link_node(&b->all_node, NULL, &domain->all_blocks.rb_node);
+ rb_insert_color(&b->all_node, &domain->all_blocks);
+ return 0;
+}
+
+/*
+ * If iovm_start != 0, tries to allocate specified iova block if it is
+ * free. if it is not free, it fails.
+ */
+struct tegra_iovmm_area *tegra_iovmm_create_vm(
+ struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
+ size_t size, size_t align, pgprot_t pgprot, unsigned long iovm_start)
+{
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_domain *domain;
+
+ if (!client)
+ return NULL;
+
+ domain = client->domain;
+
+ if (iovm_start)
+ b = iovmm_allocate_vm(domain, size, align, iovm_start);
+ else
+ b = iovmm_alloc_block(domain, size, align);
+ if (!b)
+ return NULL;
+
+ b->vm_area.domain = domain;
+ b->vm_area.pgprot = pgprot;
+ b->vm_area.ops = ops;
+
+ down_read(&b->vm_area.domain->map_lock);
+ if (ops && !test_bit(CL_locked, &client->flags)) {
+ set_bit(BK_map_dirty, &b->flags);
+ set_bit(DM_map_dirty, &client->domain->flags);
+ } else if (ops) {
+ if (domain->dev->ops->map(domain, &b->vm_area))
+ pr_err("%s failed to map locked domain\n", __func__);
+ }
+ up_read(&b->vm_area.domain->map_lock);
+
+ return &b->vm_area;
+}
+
+void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *vm,
+ tegra_iovmm_addr_t vaddr, unsigned long pfn)
+{
+ struct tegra_iovmm_domain *domain = vm->domain;
+ BUG_ON(vaddr & ((1<<domain->dev->pgsize_bits)-1));
+ BUG_ON(vaddr >= vm->iovm_start + vm->iovm_length);
+ BUG_ON(vaddr < vm->iovm_start);
+ BUG_ON(vm->ops);
+
+ domain->dev->ops->map_pfn(domain, vm, vaddr, pfn);
+}
+
+void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_domain *domain;
+
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+ domain = vm->domain;
+ /*
+ * if the vm area mapping was deferred, don't unmap it since
+ * the memory for the page tables it uses may not be allocated
+ */
+ down_read(&domain->map_lock);
+ if (!test_and_clear_bit(BK_map_dirty, &b->flags))
+ domain->dev->ops->unmap(domain, vm, false);
+ up_read(&domain->map_lock);
+}
+
+void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_domain *domain;
+
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+ domain = vm->domain;
+ if (!vm->ops)
+ return;
+
+ down_read(&domain->map_lock);
+ if (vm->ops) {
+ if (atomic_read(&domain->locks))
+ domain->dev->ops->map(domain, vm);
+ else {
+ set_bit(BK_map_dirty, &b->flags);
+ set_bit(DM_map_dirty, &domain->flags);
+ }
+ }
+ up_read(&domain->map_lock);
+}
+
+void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_domain *domain;
+
+ if (!vm)
+ return;
+
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+ domain = vm->domain;
+ down_read(&domain->map_lock);
+ if (!test_and_clear_bit(BK_map_dirty, &b->flags))
+ domain->dev->ops->unmap(domain, vm, true);
+ iovmm_free_block(domain, b);
+ up_read(&domain->map_lock);
+}
+
+struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+
+ BUG_ON(!vm);
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+
+ atomic_inc(&b->ref);
+ return &b->vm_area;
+}
+
+void tegra_iovmm_area_put(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+ BUG_ON(!vm);
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+ iovmm_block_put(b);
+}
+
+struct tegra_iovmm_area *tegra_iovmm_find_area_get(
+ struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b = NULL;
+
+ if (!client)
+ return NULL;
+
+ spin_lock(&client->domain->block_lock);
+ n = client->domain->all_blocks.rb_node;
+
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ if (iovmm_start(b) <= addr && addr <= iovmm_end(b)) {
+ if (test_bit(BK_free, &b->flags))
+ b = NULL;
+ break;
+ }
+ if (addr > iovmm_start(b))
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ b = NULL;
+ }
+ if (b)
+ atomic_inc(&b->ref);
+ spin_unlock(&client->domain->block_lock);
+ if (!b)
+ return NULL;
+ return &b->vm_area;
+}
+
+static int _iovmm_client_lock(struct tegra_iovmm_client *client)
+{
+ struct tegra_iovmm_device *dev;
+ struct tegra_iovmm_domain *domain;
+ int v;
+
+ if (unlikely(!client))
+ return -ENODEV;
+
+ if (unlikely(test_bit(CL_locked, &client->flags))) {
+ pr_err("attempting to relock client %s\n", client->name);
+ return 0;
+ }
+
+ domain = client->domain;
+ dev = domain->dev;
+ down_write(&domain->map_lock);
+ v = atomic_inc_return(&domain->locks);
+ /*
+ * if the device doesn't export the lock_domain function, the
+ * device must guarantee that any valid domain will be locked.
+ */
+ if (v == 1 && dev->ops->lock_domain) {
+ if (dev->ops->lock_domain(domain, client)) {
+ atomic_dec(&domain->locks);
+ up_write(&domain->map_lock);
+ return -EAGAIN;
+ }
+ }
+ if (test_and_clear_bit(DM_map_dirty, &domain->flags)) {
+ struct rb_node *n;
+ struct tegra_iovmm_block *b;
+
+ spin_lock(&domain->block_lock);
+ n = rb_first(&domain->all_blocks);
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ n = rb_next(n);
+ if (test_bit(BK_free, &b->flags))
+ continue;
+
+ if (test_and_clear_bit(BK_map_dirty, &b->flags)) {
+ if (!b->vm_area.ops) {
+ pr_err("%s: "
+ "vm_area ops must exist for lazy maps\n",
+ __func__);
+ continue;
+ }
+ dev->ops->map(domain, &b->vm_area);
+ }
+ }
+ }
+ set_bit(CL_locked, &client->flags);
+ up_write(&domain->map_lock);
+ return 0;
+}
+
+int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
+{
+ return _iovmm_client_lock(client);
+}
+
+int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
+{
+ int ret;
+
+ if (!client)
+ return -ENODEV;
+
+ ret = wait_event_interruptible(client->domain->delay_lock,
+ _iovmm_client_lock(client) != -EAGAIN);
+
+ if (ret == -ERESTARTSYS)
+ return -EINTR;
+
+ return ret;
+}
+
+void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
+{
+ struct tegra_iovmm_device *dev;
+ struct tegra_iovmm_domain *domain;
+ int do_wake = 0;
+
+ if (!client)
+ return;
+
+ if (!test_and_clear_bit(CL_locked, &client->flags)) {
+ pr_err("unlocking unlocked client %s\n", client->name);
+ return;
+ }
+
+ domain = client->domain;
+ dev = domain->dev;
+ down_write(&domain->map_lock);
+ if (!atomic_dec_return(&domain->locks)) {
+ if (dev->ops->unlock_domain)
+ dev->ops->unlock_domain(domain, client);
+ do_wake = 1;
+ }
+ up_write(&domain->map_lock);
+ if (do_wake)
+ wake_up(&domain->delay_lock);
+}
+
+size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
+{
+ struct tegra_iovmm_domain *domain;
+ struct rb_node *n;
+ struct tegra_iovmm_block *b;
+ size_t size = 0;
+
+ if (!client)
+ return 0;
+
+ domain = client->domain;
+
+ spin_lock(&domain->block_lock);
+ n = rb_first(&domain->all_blocks);
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ n = rb_next(n);
+ size += b->length;
+ }
+ spin_unlock(&domain->block_lock);
+
+ return size;
+}
+
+void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
+{
+ struct tegra_iovmm_device *dev;
+ struct tegra_iovmm_domain *domain;
+
+ if (!client)
+ return;
+
+ BUG_ON(!client->domain || !client->domain->dev);
+
+ domain = client->domain;
+ dev = domain->dev;
+
+ if (test_and_clear_bit(CL_locked, &client->flags)) {
+ pr_err("freeing locked client %s\n", client->name);
+ if (!atomic_dec_return(&domain->locks)) {
+ down_write(&domain->map_lock);
+ if (dev->ops->unlock_domain)
+ dev->ops->unlock_domain(domain, client);
+ up_write(&domain->map_lock);
+ wake_up(&domain->delay_lock);
+ }
+ }
+ mutex_lock(&iovmm_group_list_lock);
+ if (!atomic_dec_return(&domain->clients))
+ if (dev->ops->free_domain)
+ dev->ops->free_domain(domain, client);
+ list_del(&client->list);
+ if (list_empty(&client->group->client_list)) {
+ list_del(&client->group->group_list);
+ kfree(client->group->name);
+ kfree(client->group);
+ }
+ kfree(client->name);
+ kfree(client);
+ mutex_unlock(&iovmm_group_list_lock);
+}
+
+struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name,
+ const char *share_group, struct miscdevice *misc_dev)
+{
+ struct tegra_iovmm_client *c = kzalloc(sizeof(*c), GFP_KERNEL);
+ struct iovmm_share_group *grp = NULL;
+ struct tegra_iovmm_device *dev;
+
+ if (!c)
+ return NULL;
+ c->name = kstrdup(name, GFP_KERNEL);
+ if (!c->name)
+ goto fail;
+ c->misc_dev = misc_dev;
+
+ mutex_lock(&iovmm_group_list_lock);
+ if (share_group) {
+ list_for_each_entry(grp, &iovmm_groups, group_list) {
+ if (grp->name && !strcmp(grp->name, share_group))
+ break;
+ }
+ }
+ if (!grp || strcmp(grp->name, share_group)) {
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp)
+ goto fail_lock;
+ grp->name =
+ share_group ? kstrdup(share_group, GFP_KERNEL) : NULL;
+ if (share_group && !grp->name) {
+ kfree(grp);
+ goto fail_lock;
+ }
+ list_for_each_entry(dev, &iovmm_devices, list) {
+ grp->domain = dev->ops->alloc_domain(dev, c);
+ if (grp->domain)
+ break;
+ }
+ if (!grp->domain) {
+ pr_err("%s: alloc_domain failed for %s\n",
+ __func__, c->name);
+ dump_stack();
+ kfree(grp->name);
+ kfree(grp);
+ grp = NULL;
+ goto fail_lock;
+ }
+ spin_lock_init(&grp->lock);
+ INIT_LIST_HEAD(&grp->client_list);
+ list_add_tail(&grp->group_list, &iovmm_groups);
+ }
+
+ atomic_inc(&grp->domain->clients);
+ c->group = grp;
+ c->domain = grp->domain;
+ spin_lock(&grp->lock);
+ list_add_tail(&c->list, &grp->client_list);
+ spin_unlock(&grp->lock);
+ mutex_unlock(&iovmm_group_list_lock);
+ return c;
+
+fail_lock:
+ mutex_unlock(&iovmm_group_list_lock);
+fail:
+ if (c)
+ kfree(c->name);
+ kfree(c);
+ return NULL;
+}
+
+int tegra_iovmm_register(struct tegra_iovmm_device *dev)
+{
+ BUG_ON(!dev);
+ mutex_lock(&iovmm_group_list_lock);
+ if (list_empty(&iovmm_devices)) {
+ iovmm_cache = KMEM_CACHE(tegra_iovmm_block, 0);
+ if (!iovmm_cache) {
+ pr_err("%s: failed to make kmem cache\n", __func__);
+ mutex_unlock(&iovmm_group_list_lock);
+ return -ENOMEM;
+ }
+ create_proc_read_entry("iovmminfo", S_IRUGO, NULL,
+ tegra_iovmm_read_proc, NULL);
+ }
+ list_add_tail(&dev->list, &iovmm_devices);
+ mutex_unlock(&iovmm_group_list_lock);
+ pr_info("%s: added %s\n", __func__, dev->name);
+ return 0;
+}
+
+int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
+{
+ mutex_lock(&iovmm_group_list_lock);
+ list_del(&dev->list);
+ mutex_unlock(&iovmm_group_list_lock);
+ return 0;
+}
+
+static int tegra_iovmm_suspend(void)
+{
+ int rc = 0;
+ struct tegra_iovmm_device *dev;
+
+ list_for_each_entry(dev, &iovmm_devices, list) {
+ if (!dev->ops->suspend)
+ continue;
+
+ rc = dev->ops->suspend(dev);
+ if (rc) {
+ pr_err("%s: %s suspend returned %d\n",
+ __func__, dev->name, rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void tegra_iovmm_resume(void)
+{
+ struct tegra_iovmm_device *dev;
+
+ list_for_each_entry(dev, &iovmm_devices, list) {
+ if (dev->ops->resume)
+ dev->ops->resume(dev);
+ }
+}
+
+static struct syscore_ops tegra_iovmm_syscore_ops = {
+ .suspend = tegra_iovmm_suspend,
+ .resume = tegra_iovmm_resume,
+};
+
+static __init int tegra_iovmm_syscore_init(void)
+{
+ register_syscore_ops(&tegra_iovmm_syscore_ops);
+ return 0;
+}
+subsys_initcall(tegra_iovmm_syscore_init);
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index 4956c3cea731..e989d19a2fa4 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -4,7 +4,7 @@
* Author:
* Colin Cross <ccross@android.com>
*
- * Copyright (C) 2010, NVIDIA Corporation
+ * Copyright (C) 2010-2011, NVIDIA Corporation
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -21,12 +21,15 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/syscore_ops.h>
#include <asm/hardware/gic.h>
#include <mach/iomap.h>
+#include <mach/legacy_irq.h>
#include "board.h"
+#include "pm-irq.h"
#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE)
#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
@@ -47,7 +50,7 @@
#define ICTLR_COP_IER_CLR 0x38
#define ICTLR_COP_IEP_CLASS 0x3c
-#define NUM_ICTLRS 4
+#define NUM_ICTLRS (INT_MAIN_NR/32)
#define FIRST_LEGACY_IRQ 32
static void __iomem *ictlr_reg_base[] = {
@@ -55,8 +58,17 @@ static void __iomem *ictlr_reg_base[] = {
IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
+#if (NUM_ICTLRS > 4)
+ IO_ADDRESS(TEGRA_QUINARY_ICTLR_BASE),
+#endif
};
+#ifdef CONFIG_PM_SLEEP
+static u32 cop_ier[NUM_ICTLRS];
+static u32 cpu_ier[NUM_ICTLRS];
+static u32 cpu_iep[NUM_ICTLRS];
+#endif
+
static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
{
void __iomem *base;
@@ -113,6 +125,70 @@ static int tegra_retrigger(struct irq_data *d)
return 1;
}
+static int tegra_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ return tegra_pm_irq_set_wake_type(d->irq, flow_type);
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_set_wake(struct irq_data *d, unsigned int enable)
+{
+ return tegra_pm_irq_set_wake(d->irq, enable);
+}
+
+static int tegra_legacy_irq_suspend(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER);
+ cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS);
+ cop_ier[i] = readl(ictlr + ICTLR_COP_IER);
+ writel(~0, ictlr + ICTLR_COP_IER_CLR);
+ }
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static void tegra_legacy_irq_resume(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS);
+ writel(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET);
+ writel(0, ictlr + ICTLR_COP_IEP_CLASS);
+ writel(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET);
+ }
+ local_irq_restore(flags);
+}
+
+static struct syscore_ops tegra_legacy_irq_syscore_ops = {
+ .suspend = tegra_legacy_irq_suspend,
+ .resume = tegra_legacy_irq_resume,
+};
+
+static int tegra_legacy_irq_syscore_init(void)
+{
+ register_syscore_ops(&tegra_legacy_irq_syscore_ops);
+
+ return 0;
+}
+subsys_initcall(tegra_legacy_irq_syscore_init);
+#else
+#define tegra_set_wake NULL
+#endif
+
void __init tegra_init_irq(void)
{
int i;
@@ -121,6 +197,7 @@ void __init tegra_init_irq(void)
void __iomem *ictlr = ictlr_reg_base[i];
writel(~0, ictlr + ICTLR_CPU_IER_CLR);
writel(0, ictlr + ICTLR_CPU_IEP_CLASS);
+ writel(~0, ictlr + ICTLR_CPU_IEP_FIR_CLR);
}
gic_arch_extn.irq_ack = tegra_ack;
@@ -128,7 +205,21 @@ void __init tegra_init_irq(void)
gic_arch_extn.irq_mask = tegra_mask;
gic_arch_extn.irq_unmask = tegra_unmask;
gic_arch_extn.irq_retrigger = tegra_retrigger;
+ gic_arch_extn.irq_set_type = tegra_set_type;
+ gic_arch_extn.irq_set_wake = tegra_set_wake;
+ gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND;
gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE),
IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
}
+
+void tegra_init_legacy_irq_cop(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ writel(~0, ictlr + ICTLR_COP_IER_CLR);
+ writel(0, ictlr + ICTLR_COP_IEP_CLASS);
+ }
+}
diff --git a/arch/arm/mach-tegra/kfuse.c b/arch/arm/mach-tegra/kfuse.c
new file mode 100644
index 000000000000..9e4b482e4691
--- /dev/null
+++ b/arch/arm/mach-tegra/kfuse.c
@@ -0,0 +1,114 @@
+/*
+ * arch/arm/mach-tegra/kfuse.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* The kfuse block stores downstream and upstream HDCP keys for use by HDMI
+ * module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <mach/iomap.h>
+#include <mach/kfuse.h>
+
+#include "clock.h"
+#include "apbio.h"
+
+static struct clk *kfuse_clk = NULL;
+
+/* register definition */
+#define KFUSE_STATE 0x80
+#define KFUSE_STATE_DONE (1u << 16)
+#define KFUSE_STATE_CRCPASS (1u << 17)
+#define KFUSE_KEYADDR 0x88
+#define KFUSE_KEYADDR_AUTOINC (1u << 16)
+#define KFUSE_KEYS 0x8c
+
+static inline u32 tegra_kfuse_readl(unsigned long offset)
+{
+ return tegra_apb_readl(TEGRA_KFUSE_BASE + offset);
+}
+
+static inline void tegra_kfuse_writel(u32 value, unsigned long offset)
+{
+ tegra_apb_writel(value, TEGRA_KFUSE_BASE + offset);
+}
+
+static int wait_for_done(void)
+{
+ u32 reg;
+ int retries = 50;
+ do {
+ reg = tegra_kfuse_readl(KFUSE_STATE);
+ if (reg & KFUSE_STATE_DONE)
+ return 0;
+ msleep(10);
+ } while(--retries);
+ return -ETIMEDOUT;
+}
+
+/* read up to KFUSE_DATA_SZ bytes into dest.
+ * always starts at the first kfuse.
+ */
+int tegra_kfuse_read(void *dest, size_t len)
+{
+ int err;
+ u32 v;
+ unsigned cnt;
+
+ if (len > KFUSE_DATA_SZ)
+ return -EINVAL;
+
+ if (kfuse_clk == NULL) {
+ kfuse_clk = tegra_get_clock_by_name("kfuse");
+ if (IS_ERR_OR_NULL(kfuse_clk)) {
+ pr_err("kfuse: can't get kfuse clock\n");
+ return -EINVAL;
+ }
+ }
+
+ err = clk_enable(kfuse_clk);
+ if (err)
+ return err;
+
+ tegra_kfuse_writel(KFUSE_KEYADDR_AUTOINC, KFUSE_KEYADDR);
+
+ err = wait_for_done();
+ if (err) {
+ pr_err("kfuse: read timeout\n");
+ clk_disable(kfuse_clk);
+ return err;
+ }
+
+ if ((tegra_kfuse_readl(KFUSE_STATE) & KFUSE_STATE_CRCPASS) == 0) {
+ pr_err("kfuse: crc failed\n");
+ clk_disable(kfuse_clk);
+ return -EIO;
+ }
+
+ for (cnt = 0; cnt < len; cnt += 4) {
+ v = tegra_kfuse_readl(KFUSE_KEYS);
+ memcpy(dest + cnt, &v, sizeof v);
+ }
+
+ clk_disable(kfuse_clk);
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/latency_allowance.c b/arch/arm/mach-tegra/latency_allowance.c
new file mode 100644
index 000000000000..ab2459bc4ca8
--- /dev/null
+++ b/arch/arm/mach-tegra/latency_allowance.c
@@ -0,0 +1,593 @@
+/*
+ * arch/arm/mach-tegra/latency_allowance.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/spinlock_types.h>
+#include <linux/spinlock.h>
+#include <linux/stringify.h>
+#include <asm/bug.h>
+#include <asm/io.h>
+#include <asm/string.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+#include <mach/latency_allowance.h>
+
+#define MC_ARB_OVERRIDE 0xe8
+#define GLOBAL_LATENCY_SCALING_ENABLE_BIT 7
+
+#define MC_LA_AFI_0 0x2e0
+#define MC_LA_AVPC_ARM7_0 0x2e4
+#define MC_LA_DC_0 0x2e8
+#define MC_LA_DC_1 0x2ec
+#define MC_LA_DC_2 0x2f0
+#define MC_LA_DCB_0 0x2f4
+#define MC_LA_DCB_1 0x2f8
+#define MC_LA_DCB_2 0x2fc
+#define MC_LA_EPP_0 0x300
+#define MC_LA_EPP_1 0x304
+#define MC_LA_G2_0 0x308
+#define MC_LA_G2_1 0x304
+#define MC_LA_HC_0 0x310
+#define MC_LA_HC_1 0x314
+#define MC_LA_HDA_0 0x318
+#define MC_LA_ISP_0 0x31C
+#define MC_LA_MPCORE_0 0x320
+#define MC_LA_MPCORELP_0 0x324
+#define MC_LA_MPE_0 0x328
+#define MC_LA_MPE_1 0x32c
+#define MC_LA_MPE_2 0x330
+#define MC_LA_NV_0 0x334
+#define MC_LA_NV_1 0x338
+#define MC_LA_NV2_0 0x33c
+#define MC_LA_NV2_1 0x340
+#define MC_LA_PPCS_0 0x344
+#define MC_LA_PPCS_1 0x348
+#define MC_LA_PTC_0 0x34c
+#define MC_LA_SATA_0 0x350
+#define MC_LA_VDE_0 0x354
+#define MC_LA_VDE_1 0x358
+#define MC_LA_VDE_2 0x35c
+#define MC_LA_VDE_3 0x360
+#define MC_LA_VI_0 0x364
+#define MC_LA_VI_1 0x368
+#define MC_LA_VI_2 0x36c
+
+#define DS_DISP_MCCIF_DISPLAY0A_HYST (0x481 * 4)
+#define DS_DISP_MCCIF_DISPLAY0B_HYST (0x482 * 4)
+#define DS_DISP_MCCIF_DISPLAY0C_HYST (0x483 * 4)
+#define DS_DISP_MCCIF_DISPLAY1B_HYST (0x484 * 4)
+
+#define DS_DISP_MCCIF_DISPLAY0AB_HYST (0x481 * 4)
+#define DS_DISP_MCCIF_DISPLAY0BB_HYST (0x482 * 4)
+#define DS_DISP_MCCIF_DISPLAY0CB_HYST (0x483 * 4)
+#define DS_DISP_MCCIF_DISPLAY1BB_HYST (0x484 * 4)
+
+#define VI_MCCIF_VIWSB_HYST (0x9a * 4)
+#define VI_MCCIF_VIWU_HYST (0x9b * 4)
+#define VI_MCCIF_VIWV_HYST (0x9c * 4)
+#define VI_MCCIF_VIWY_HYST (0x9d * 4)
+
+#define VI_TIMEOUT_WOCAL_VI (0x70 * 4)
+#define VI_RESERVE_3 (0x97 * 4)
+#define VI_RESERVE_4 (0x98 * 4)
+
+/* maximum valid value for latency allowance */
+#define MC_LA_MAX_VALUE 255
+
+#define ENABLE_LA_DEBUG 0
+#define TEST_LA_CODE 0
+
+#define la_debug(fmt, ...) \
+ if (ENABLE_LA_DEBUG) { \
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__); \
+ }
+
+static struct dentry *latency_debug_dir;
+
+struct la_client_info {
+ unsigned int fifo_size_in_atoms;
+ unsigned int expiration_in_ns; /* worst case expiration value */
+ unsigned long reg_addr;
+ unsigned long mask;
+ unsigned long shift;
+ enum tegra_la_id id;
+ char *name;
+ bool scaling_supported;
+};
+
+static DEFINE_SPINLOCK(safety_lock);
+
+static const int ns_per_tick = 30;
+/* fifo atom size in bytes for non-fdc clients*/
+static const int normal_atom_size = 16;
+/* fifo atom size in bytes for fdc clients*/
+static const int fdc_atom_size = 32;
+
+#define MC_RA(r) \
+ ((u32)IO_ADDRESS(TEGRA_MC_BASE) + (MC_##r))
+#define RA(r) \
+ ((u32)IO_ADDRESS(TEGRA_MC_BASE) + (MC_LA_##r))
+
+#define MASK(x) \
+ ((0xFFFFFFFFUL >> (31 - (1 ? x) + (0 ? x))) << (0 ? x))
+#define SHIFT(x) \
+ (0 ? x)
+#define ID(id) \
+ TEGRA_LA_##id
+
+#define LA_INFO(f, e, a, r, id, ss) \
+{f, e, RA(a), MASK(r), SHIFT(r), ID(id), __stringify(id), ss}
+
+/*
+ * The rule for getting the fifo_size_in_atoms is:
+ * 1.If REORDER_DEPTH exists, use it(default is overridden).
+ * 2.Else if (write_client) use RFIFO_DEPTH.
+ * 3.Else (read client) use RDFIFO_DEPTH.
+ * Refer to project.h file.
+ */
+struct la_client_info la_info[] = {
+ LA_INFO(32, 150, AFI_0, 7 : 0, AFIR, false),
+ LA_INFO(32, 150, AFI_0, 23 : 16, AFIW, false),
+ LA_INFO(2, 150, AVPC_ARM7_0, 7 : 0, AVPC_ARM7R, false),
+ LA_INFO(2, 150, AVPC_ARM7_0, 23 : 16, AVPC_ARM7W, false),
+ LA_INFO(128, 1050, DC_0, 7 : 0, DISPLAY_0A, true),
+ LA_INFO(64, 1050, DC_0, 23 : 16, DISPLAY_0B, true),
+ LA_INFO(128, 1050, DC_1, 7 : 0, DISPLAY_0C, true),
+ LA_INFO(64, 1050, DC_1, 23 : 16, DISPLAY_1B, true),
+ LA_INFO(2, 1050, DC_2, 7 : 0, DISPLAY_HC, false),
+ LA_INFO(128, 1050, DCB_0, 7 : 0, DISPLAY_0AB, true),
+ LA_INFO(64, 1050, DCB_0, 23 : 16, DISPLAY_0BB, true),
+ LA_INFO(128, 1050, DCB_1, 7 : 0, DISPLAY_0CB, true),
+ LA_INFO(64, 1050, DCB_1, 23 : 16, DISPLAY_1BB, true),
+ LA_INFO(2, 1050, DCB_2, 7 : 0, DISPLAY_HCB, false),
+ LA_INFO(8, 150, EPP_0, 7 : 0, EPPUP, false),
+ LA_INFO(64, 150, EPP_0, 23 : 16, EPPU, false),
+ LA_INFO(64, 150, EPP_1, 7 : 0, EPPV, false),
+ LA_INFO(64, 150, EPP_1, 23 : 16, EPPY, false),
+ LA_INFO(64, 150, G2_0, 7 : 0, G2PR, false),
+ LA_INFO(64, 150, G2_0, 23 : 16, G2SR, false),
+ LA_INFO(48, 150, G2_1, 7 : 0, G2DR, false),
+ LA_INFO(128, 150, G2_1, 23 : 16, G2DW, false),
+ LA_INFO(16, 150, HC_0, 7 : 0, HOST1X_DMAR, false),
+ LA_INFO(8, 150, HC_0, 23 : 16, HOST1XR, false),
+ LA_INFO(32, 150, HC_1, 7 : 0, HOST1XW, false),
+ LA_INFO(16, 150, HDA_0, 7 : 0, HDAR, false),
+ LA_INFO(16, 150, HDA_0, 23 : 16, HDAW, false),
+ LA_INFO(64, 150, ISP_0, 7 : 0, ISPW, false),
+ LA_INFO(14, 150, MPCORE_0, 7 : 0, MPCORER, false),
+ LA_INFO(24, 150, MPCORE_0, 23 : 16, MPCOREW, false),
+ LA_INFO(14, 150, MPCORELP_0, 7 : 0, MPCORE_LPR, false),
+ LA_INFO(24, 150, MPCORELP_0, 23 : 16, MPCORE_LPW, false),
+ LA_INFO(8, 150, MPE_0, 7 : 0, MPE_UNIFBR, false),
+ LA_INFO(2, 150, MPE_0, 23 : 16, MPE_IPRED, false),
+ LA_INFO(64, 150, MPE_1, 7 : 0, MPE_AMEMRD, false),
+ LA_INFO(8, 150, MPE_1, 23 : 16, MPE_CSRD, false),
+ LA_INFO(8, 150, MPE_2, 7 : 0, MPE_UNIFBW, false),
+ LA_INFO(8, 150, MPE_2, 23 : 16, MPE_CSWR, false),
+ LA_INFO(48, 150, NV_0, 7 : 0, FDCDRD, false),
+ LA_INFO(64, 150, NV_0, 23 : 16, IDXSRD, false),
+ LA_INFO(64, 150, NV_1, 7 : 0, TEXSRD, false),
+ LA_INFO(48, 150, NV_1, 23 : 16, FDCDWR, false),
+ LA_INFO(48, 150, NV2_0, 7 : 0, FDCDRD2, false),
+ LA_INFO(64, 150, NV2_0, 23 : 16, IDXSRD2, false),
+ LA_INFO(64, 150, NV2_1, 7 : 0, TEXSRD2, false),
+ LA_INFO(48, 150, NV2_1, 23 : 16, FDCDWR2, false),
+ LA_INFO(2, 150, PPCS_0, 7 : 0, PPCS_AHBDMAR, false),
+ LA_INFO(8, 150, PPCS_0, 23 : 16, PPCS_AHBSLVR, false),
+ LA_INFO(2, 150, PPCS_1, 7 : 0, PPCS_AHBDMAW, false),
+ LA_INFO(4, 150, PPCS_1, 23 : 16, PPCS_AHBSLVW, false),
+ LA_INFO(2, 150, PTC_0, 7 : 0, PTCR, false),
+ LA_INFO(32, 150, SATA_0, 7 : 0, SATAR, false),
+ LA_INFO(32, 150, SATA_0, 23 : 16, SATAW, false),
+ LA_INFO(8, 150, VDE_0, 7 : 0, VDE_BSEVR, false),
+ LA_INFO(4, 150, VDE_0, 23 : 16, VDE_MBER, false),
+ LA_INFO(16, 150, VDE_1, 7 : 0, VDE_MCER, false),
+ LA_INFO(16, 150, VDE_1, 23 : 16, VDE_TPER, false),
+ LA_INFO(4, 150, VDE_2, 7 : 0, VDE_BSEVW, false),
+ LA_INFO(16, 150, VDE_2, 23 : 16, VDE_DBGW, false),
+ LA_INFO(2, 150, VDE_3, 7 : 0, VDE_MBEW, false),
+ LA_INFO(16, 150, VDE_3, 23 : 16, VDE_TPMW, false),
+ LA_INFO(8, 1050, VI_0, 7 : 0, VI_RUV, false),
+ LA_INFO(64, 1050, VI_0, 23 : 16, VI_WSB, true),
+ LA_INFO(64, 1050, VI_1, 7 : 0, VI_WU, true),
+ LA_INFO(64, 1050, VI_1, 23 : 16, VI_WV, true),
+ LA_INFO(64, 1050, VI_2, 7 : 0, VI_WY, true),
+
+/* end of list. */
+ LA_INFO(0, 0, AFI_0, 0 : 0, MAX_ID, false)
+};
+
+struct la_scaling_info {
+ unsigned int threshold_low;
+ unsigned int threshold_mid;
+ unsigned int threshold_high;
+ int scaling_ref_count;
+ int actual_la_to_set;
+ int la_set;
+};
+
+struct la_scaling_reg_info {
+ enum tegra_la_id id;
+ unsigned int tl_reg_addr;
+ unsigned int tl_mask;
+ unsigned int tl_shift;
+ unsigned int tm_reg_addr;
+ unsigned int tm_mask;
+ unsigned int tm_shift;
+ unsigned int th_reg_addr;
+ unsigned int th_mask;
+ unsigned int th_shift;
+};
+
+#define DISP1_RA(r) \
+ ((u32)IO_ADDRESS(TEGRA_DISPLAY_BASE) + DS_DISP_MCCIF_##r##_HYST)
+#define DISP2_RA(r) \
+ ((u32)IO_ADDRESS(TEGRA_DISPLAY2_BASE) + DS_DISP_MCCIF_##r##_HYST)
+
+#define DISP_SCALING_REG_INFO(id, r, ra) \
+ { \
+ ID(id), \
+ ra(r), MASK(15 : 8), SHIFT(15 : 8), \
+ ra(r), MASK(23 : 16), SHIFT(15 : 8), \
+ ra(r), MASK(7 : 0), SHIFT(15 : 8) \
+ }
+
+struct la_scaling_reg_info disp_info[] = {
+ DISP_SCALING_REG_INFO(DISPLAY_0A, DISPLAY0A, DISP1_RA),
+ DISP_SCALING_REG_INFO(DISPLAY_0B, DISPLAY0B, DISP1_RA),
+ DISP_SCALING_REG_INFO(DISPLAY_0C, DISPLAY0C, DISP1_RA),
+ DISP_SCALING_REG_INFO(DISPLAY_1B, DISPLAY1B, DISP1_RA),
+ DISP_SCALING_REG_INFO(MAX_ID, DISPLAY1B, DISP1_RA), /*dummy entry*/
+ DISP_SCALING_REG_INFO(DISPLAY_0AB, DISPLAY0AB, DISP2_RA),
+ DISP_SCALING_REG_INFO(DISPLAY_0BB, DISPLAY0BB, DISP2_RA),
+ DISP_SCALING_REG_INFO(DISPLAY_0CB, DISPLAY0CB, DISP2_RA),
+ DISP_SCALING_REG_INFO(DISPLAY_1BB, DISPLAY1BB, DISP2_RA),
+};
+
+#define VI_TH_RA(r) \
+ ((u32)IO_ADDRESS(TEGRA_VI_BASE) + VI_MCCIF_##r##_HYST)
+#define VI_TM_RA(r) \
+ ((u32)IO_ADDRESS(TEGRA_VI_BASE) + VI_TIMEOUT_WOCAL_VI)
+#define VI_TL_RA(r) \
+ ((u32)IO_ADDRESS(TEGRA_VI_BASE) + VI_RESERVE_##r)
+
+struct la_scaling_reg_info vi_info[] = {
+ {
+ ID(VI_WSB),
+ VI_TL_RA(4), MASK(7 : 0), SHIFT(7 : 0),
+ VI_TM_RA(0), MASK(7 : 0), SHIFT(7 : 0),
+ VI_TH_RA(VIWSB), MASK(7 : 0), SHIFT(7 : 0)
+ },
+ {
+ ID(VI_WU),
+ VI_TL_RA(3), MASK(15 : 8), SHIFT(15 : 8),
+ VI_TM_RA(0), MASK(15 : 8), SHIFT(15 : 8),
+ VI_TH_RA(VIWU), MASK(7 : 0), SHIFT(7 : 0)
+ },
+ {
+ ID(VI_WV),
+ VI_TL_RA(3), MASK(7 : 0), SHIFT(7 : 0),
+ VI_TM_RA(0), MASK(23 : 16), SHIFT(23 : 16),
+ VI_TH_RA(VIWV), MASK(7 : 0), SHIFT(7 : 0)
+ },
+ {
+ ID(VI_WY),
+ VI_TL_RA(4), MASK(15 : 8), SHIFT(15 : 8),
+ VI_TM_RA(0), MASK(31 : 24), SHIFT(31 : 24),
+ VI_TH_RA(VIWY), MASK(7 : 0), SHIFT(7 : 0)
+ }
+};
+
+static struct la_scaling_info scaling_info[TEGRA_LA_MAX_ID];
+static int la_scaling_enable_count;
+
+#define VALIDATE_ID(id) \
+ do { \
+ if (id >= TEGRA_LA_MAX_ID) \
+ return -EINVAL; \
+ BUG_ON(la_info[id].id != id); \
+ } while (0)
+
+#define VALIDATE_BW(bw_in_mbps) \
+ do { \
+ if (bw_in_mbps >= 4096) \
+ return -EINVAL; \
+ } while (0)
+
+#define VALIDATE_THRESHOLDS(tl, tm, th) \
+ do { \
+ if (tl > 100 || tm > 100 || th > 100) \
+ return -EINVAL; \
+ } while (0)
+
+static void set_thresholds(struct la_scaling_reg_info *info,
+ enum tegra_la_id id)
+{
+ unsigned long reg_read;
+ unsigned long reg_write;
+ unsigned int thresh_low;
+ unsigned int thresh_mid;
+ unsigned int thresh_high;
+ int la_set;
+
+ reg_read = readl(la_info[id].reg_addr);
+ la_set = (reg_read & la_info[id].mask) >> la_info[id].shift;
+ /* la should be set before enabling scaling. */
+ BUG_ON(la_set != scaling_info[id].la_set);
+
+ thresh_low = (scaling_info[id].threshold_low * la_set) / 100;
+ thresh_mid = (scaling_info[id].threshold_mid * la_set) / 100;
+ thresh_high = (scaling_info[id].threshold_high * la_set) / 100;
+ la_debug("%s: la_set=%d, thresh_low=%d(%d%%), thresh_mid=%d(%d%%),"
+ " thresh_high=%d(%d%%) ", __func__, la_set,
+ thresh_low, scaling_info[id].threshold_low,
+ thresh_mid, scaling_info[id].threshold_mid,
+ thresh_high, scaling_info[id].threshold_high);
+
+ reg_read = readl(info->tl_reg_addr);
+ reg_write = (reg_read & ~info->tl_mask) |
+ (thresh_low << info->tl_shift);
+ writel(reg_write, info->tl_reg_addr);
+ la_debug("reg_addr=0x%x, read=0x%x, write=0x%x",
+ (u32)info->tl_reg_addr, (u32)reg_read, (u32)reg_write);
+
+ reg_read = readl(info->tm_reg_addr);
+ reg_write = (reg_read & ~info->tm_mask) |
+ (thresh_mid << info->tm_shift);
+ writel(reg_write, info->tm_reg_addr);
+ la_debug("reg_addr=0x%x, read=0x%x, write=0x%x",
+ (u32)info->tm_reg_addr, (u32)reg_read, (u32)reg_write);
+
+ reg_read = readl(info->th_reg_addr);
+ reg_write = (reg_read & ~info->th_mask) |
+ (thresh_high << info->th_shift);
+ writel(reg_write, info->th_reg_addr);
+ la_debug("reg_addr=0x%x, read=0x%x, write=0x%x",
+ (u32)info->th_reg_addr, (u32)reg_read, (u32)reg_write);
+}
+
+static void set_disp_latency_thresholds(enum tegra_la_id id)
+{
+ set_thresholds(&disp_info[id - ID(DISPLAY_0A)], id);
+}
+
+static void set_vi_latency_thresholds(enum tegra_la_id id)
+{
+ set_thresholds(&vi_info[id - ID(VI_WSB)], id);
+}
+
+/* Sets latency allowance based on clients memory bandwitdh requirement.
+ * Bandwidth passed is in mega bytes per second.
+ */
+int tegra_set_latency_allowance(enum tegra_la_id id,
+ unsigned int bandwidth_in_mbps)
+{
+ int ideal_la;
+ int la_to_set;
+ unsigned long reg_read;
+ unsigned long reg_write;
+ int bytes_per_atom = normal_atom_size;
+ struct la_client_info *ci;
+
+ VALIDATE_ID(id);
+ VALIDATE_BW(bandwidth_in_mbps);
+ if (id == ID(FDCDRD) || id == ID(FDCDWR) ||
+ id == ID(FDCDRD2) || id == ID(FDCDWR2))
+ bytes_per_atom = fdc_atom_size;
+
+ ci = &la_info[id];
+
+ if (bandwidth_in_mbps == 0) {
+ la_to_set = MC_LA_MAX_VALUE;
+ } else {
+ ideal_la = (ci->fifo_size_in_atoms * bytes_per_atom * 1000) /
+ (bandwidth_in_mbps * ns_per_tick);
+ la_to_set = ideal_la - (ci->expiration_in_ns/ns_per_tick) - 1;
+ }
+
+ la_debug("\n%s:id=%d,bw=%dmbps, la_to_set=%d",
+ __func__, id, bandwidth_in_mbps, la_to_set);
+ la_to_set = (la_to_set < 0) ? 0 : la_to_set;
+ la_to_set = (la_to_set > MC_LA_MAX_VALUE) ? MC_LA_MAX_VALUE : la_to_set;
+ scaling_info[id].actual_la_to_set = la_to_set;
+
+ /* until display can use latency allowance scaling, use a more
+ * aggressive LA setting. Bug 862709 */
+ if (id >= ID(DISPLAY_0A) && id <= ID(DISPLAY_HCB))
+ la_to_set /= 3;
+
+ spin_lock(&safety_lock);
+ reg_read = readl(ci->reg_addr);
+ reg_write = (reg_read & ~ci->mask) |
+ (la_to_set << ci->shift);
+ writel(reg_write, ci->reg_addr);
+ scaling_info[id].la_set = la_to_set;
+ la_debug("reg_addr=0x%x, read=0x%x, write=0x%x",
+ (u32)ci->reg_addr, (u32)reg_read, (u32)reg_write);
+ spin_unlock(&safety_lock);
+ return 0;
+}
+
+/* Thresholds for scaling are specified in % of fifo freeness.
+ * If threshold_low is specified as 20%, it means when the fifo free
+ * between 0 to 20%, use la as programmed_la.
+ * If threshold_mid is specified as 50%, it means when the fifo free
+ * between 20 to 50%, use la as programmed_la/2 .
+ * If threshold_high is specified as 80%, it means when the fifo free
+ * between 50 to 80%, use la as programmed_la/4.
+ * When the fifo is free between 80 to 100%, use la as 0(highest priority).
+ */
+int tegra_enable_latency_scaling(enum tegra_la_id id,
+ unsigned int threshold_low,
+ unsigned int threshold_mid,
+ unsigned int threshold_high)
+{
+ unsigned long reg;
+ unsigned long scaling_enable_reg = MC_RA(ARB_OVERRIDE);
+
+ VALIDATE_ID(id);
+ VALIDATE_THRESHOLDS(threshold_low, threshold_mid, threshold_high);
+
+ if (la_info[id].scaling_supported == false)
+ goto exit;
+
+ spin_lock(&safety_lock);
+
+ la_debug("\n%s: id=%d, tl=%d, tm=%d, th=%d", __func__,
+ id, threshold_low, threshold_mid, threshold_high);
+ scaling_info[id].threshold_low = threshold_low;
+ scaling_info[id].threshold_mid = threshold_mid;
+ scaling_info[id].threshold_high = threshold_high;
+ scaling_info[id].scaling_ref_count++;
+
+ if (id >= ID(DISPLAY_0A) && id <= ID(DISPLAY_1BB))
+ set_disp_latency_thresholds(id);
+ else if (id >= ID(VI_WSB) && id <= ID(VI_WY))
+ set_vi_latency_thresholds(id);
+ if (!la_scaling_enable_count++) {
+ reg = readl(scaling_enable_reg);
+ reg |= (1 << GLOBAL_LATENCY_SCALING_ENABLE_BIT);
+ writel(reg, scaling_enable_reg);
+ la_debug("enabled scaling.");
+ }
+ spin_unlock(&safety_lock);
+exit:
+ return 0;
+}
+
+void tegra_disable_latency_scaling(enum tegra_la_id id)
+{
+ unsigned long reg;
+ unsigned long scaling_enable_reg = MC_RA(ARB_OVERRIDE);
+
+ if (id >= TEGRA_LA_MAX_ID)
+ return;
+ BUG_ON(la_info[id].id != id);
+
+ if (la_info[id].scaling_supported == false)
+ return;
+ spin_lock(&safety_lock);
+ la_debug("\n%s: id=%d", __func__, id);
+ scaling_info[id].scaling_ref_count--;
+ BUG_ON(scaling_info[id].scaling_ref_count < 0);
+
+ if (!--la_scaling_enable_count) {
+ reg = readl(scaling_enable_reg);
+ reg = reg & ~(1 << GLOBAL_LATENCY_SCALING_ENABLE_BIT);
+ writel(reg, scaling_enable_reg);
+ la_debug("disabled scaling.");
+ }
+ spin_unlock(&safety_lock);
+}
+
+static int la_regs_show(struct seq_file *s, void *unused)
+{
+ unsigned i;
+ unsigned long la;
+
+ /* iterate the list, but don't print MAX_ID */
+ for (i = 0; i < ARRAY_SIZE(la_info) - 1; i++) {
+ la = (readl(la_info[i].reg_addr) & la_info[i].mask)
+ >> la_info[i].shift;
+ seq_printf(s, "%-16s: %4lu\n", la_info[i].name, la);
+ }
+
+ return 0;
+}
+
+static int dbg_la_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, la_regs_show, inode->i_private);
+}
+
+static const struct file_operations regs_fops = {
+ .open = dbg_la_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_latency_allowance_debugfs_init(void)
+{
+ if (latency_debug_dir)
+ return 0;
+
+ latency_debug_dir = debugfs_create_dir("tegra_latency", NULL);
+
+ debugfs_create_file("la_info", S_IRUGO, latency_debug_dir, NULL,
+ &regs_fops);
+
+ return 0;
+}
+
+late_initcall(tegra_latency_allowance_debugfs_init);
+
+static int __init tegra_latency_allowance_init(void)
+{
+ la_scaling_enable_count = 0;
+ return 0;
+}
+
+core_initcall(tegra_latency_allowance_init);
+
+#if TEST_LA_CODE
+static int __init test_la(void)
+{
+ int err;
+ enum tegra_la_id id = 0;
+ int repeat_count = 5;
+
+ do {
+ for (id = 0; id < TEGRA_LA_MAX_ID; id++) {
+ err = tegra_set_latency_allowance(id, 200);
+ if (err)
+ la_debug("\n***tegra_set_latency_allowance,"
+ " err=%d", err);
+ }
+
+ for (id = 0; id < TEGRA_LA_MAX_ID; id++) {
+ if (id >= ID(DISPLAY_0AB) && id <= ID(DISPLAY_HCB))
+ continue;
+ if (id >= ID(VI_WSB) && id <= ID(VI_WY))
+ continue;
+ err = tegra_enable_latency_scaling(id, 20, 50, 80);
+ if (err)
+ la_debug("\n***tegra_enable_latency_scaling,"
+ " err=%d", err);
+ }
+
+ la_debug("la_scaling_enable_count =%d",
+ la_scaling_enable_count);
+ for (id = 0; id < TEGRA_LA_MAX_ID; id++) {
+ if (id >= ID(DISPLAY_0AB) && id <= ID(DISPLAY_HCB))
+ continue;
+ if (id >= ID(VI_WSB) && id <= ID(VI_WY))
+ continue;
+ tegra_disable_latency_scaling(id);
+ }
+ la_debug("la_scaling_enable_count=%d",
+ la_scaling_enable_count);
+ } while (--repeat_count);
+ return 0;
+}
+
+late_initcall(test_la);
+#endif
diff --git a/arch/arm/mach-tegra/mc.c b/arch/arm/mach-tegra/mc.c
new file mode 100644
index 000000000000..0dff50461a3c
--- /dev/null
+++ b/arch/arm/mach-tegra/mc.c
@@ -0,0 +1,73 @@
+/*
+ * arch/arm/mach-tegra/mc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include <mach/iomap.h>
+#include <mach/mc.h>
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+static DEFINE_SPINLOCK(tegra_mc_lock);
+
+void tegra_mc_set_priority(unsigned long client, unsigned long prio)
+{
+ unsigned long mc_base = IO_TO_VIRT(TEGRA_MC_BASE);
+ unsigned long reg = client >> 8;
+ int field = client & 0xff;
+ unsigned long val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tegra_mc_lock, flags);
+ val = readl(mc_base + reg);
+ val &= ~(TEGRA_MC_PRIO_MASK << field);
+ val |= prio << field;
+ writel(val, mc_base + reg);
+ spin_unlock_irqrestore(&tegra_mc_lock, flags);
+
+}
+
+int tegra_mc_get_tiled_memory_bandwidth_multiplier(void)
+{
+ return 1;
+}
+
+#else
+ /* !!!FIXME!!! IMPLEMENT tegra_mc_set_priority() */
+
+#include "tegra3_emc.h"
+
+/*
+ * If using T30/DDR3, the 2nd 16 bytes part of DDR3 atom is 2nd line and is
+ * discarded in tiling mode.
+ */
+int tegra_mc_get_tiled_memory_bandwidth_multiplier(void)
+{
+ int type;
+
+ type = tegra_emc_get_dram_type();
+ WARN_ONCE(type == -1, "unknown type DRAM because DVFS is disabled\n");
+
+ if (type == DRAM_TYPE_DDR3)
+ return 2;
+ else
+ return 1;
+}
+#endif
diff --git a/arch/arm/mach-tegra/p852/Kconfig b/arch/arm/mach-tegra/p852/Kconfig
new file mode 100644
index 000000000000..ca44f9543be2
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/Kconfig
@@ -0,0 +1,110 @@
+config MACH_P852
+ bool "P852 board"
+ depends on ARCH_TEGRA_2x_SOC
+ help
+ Support for NVIDIA P852 platform
+
+config P852_SKU1
+ bool "P852 SKU1 board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU1 platform
+
+config P852_SKU1_B00
+ bool "P852 SKU1 rev B board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU1 B00 platform
+
+config P852_SKU1_C0x
+ bool "P852 SKU1 rev C boards"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU1 C0x platform
+
+config P852_SKU3
+ bool "P852 SKU3 board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU3 platform
+
+config P852_SKU5_B00
+ bool "P852 SKU5 rev B board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU5 B00 platform
+
+config P852_SKU5_C01
+ bool "P852 SKU5 rev C board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU5 C01 platform
+
+config P852_SKU8_B00
+ bool "P852 SKU8 rev B board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU8 B00 platform
+
+config P852_SKU8_C01
+ bool "P852 SKU8 rev C board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU8 C01 platform
+
+config P852_SKU9_B00
+ bool "P852 SKU9 rev B board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU9 B00 platform
+
+config P852_SKU9_C01
+ bool "P852 SKU9 rev C board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU9 C01 platform
+
+config P852_SKU13
+ bool "P852 SKU13 board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU13 platform
+
+config P852_SKU13_B00
+ bool "P852 SKU13 rev B board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU23 B00 platform
+
+config P852_SKU23
+ bool "P852 SKU23 board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU23 platform
+
+config P852_SKU23_B00
+ bool "P852 SKU23 rev B board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU23 B00 platform
+
+config P852_SKU23_C01
+ bool "P852 SKU23 rev C board"
+ depends on MACH_P852
+ default MACH_P852
+ help
+ Support for NVIDIA P852 SKU23 C01 platform
diff --git a/arch/arm/mach-tegra/p852/Makefile b/arch/arm/mach-tegra/p852/Makefile
new file mode 100644
index 000000000000..2f04ba08f71f
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/Makefile
@@ -0,0 +1,39 @@
+#
+# arch/arm/mach-tegra/p852/Makefile
+#
+# Copyright (c) 2010-2011, NVIDIA Corporation.
+#
+# This software is licensed under the terms of the GNU General Public
+# License version 2, as published by the Free Software Foundation, and
+# may be copied, distributed, and modified under those terms.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+#
+
+obj-${CONFIG_MACH_P852} += board-p852.o
+obj-${CONFIG_MACH_P852} += board-p852-sdhci.o
+obj-${CONFIG_MACH_P852} += board-p852-i2c.o
+obj-${CONFIG_MACH_P852} += board-p852-power.o
+obj-${CONFIG_MACH_P852} += board-p852-pinmux.o
+obj-${CONFIG_MACH_P852} += board-p852-gpio.o
+obj-${CONFIG_MACH_P852} += board-p852-panel.o
+
+obj-${CONFIG_P852_SKU1} += board-p852-sku1.o
+obj-${CONFIG_P852_SKU1_B00} += board-p852-sku1-b00.o
+obj-${CONFIG_P852_SKU1_C0x} += board-p852-sku1-c0x.o
+obj-${CONFIG_P852_SKU3} += board-p852-sku3.o
+obj-${CONFIG_P852_SKU5_B00} += board-p852-sku5-b00.o
+obj-${CONFIG_P852_SKU5_C01} += board-p852-sku5-c01.o
+obj-${CONFIG_P852_SKU8_B00} += board-p852-sku8-b00.o
+obj-${CONFIG_P852_SKU8_C01} += board-p852-sku8-c01.o
+obj-${CONFIG_P852_SKU9_B00} += board-p852-sku9-b00.o
+obj-${CONFIG_P852_SKU9_C01} += board-p852-sku9-c01.o
+obj-${CONFIG_P852_SKU13} += board-p852-sku13.o
+obj-${CONFIG_P852_SKU13_B00} += board-p852-sku13-b00.o
+obj-${CONFIG_P852_SKU23} += board-p852-sku23.o
+obj-${CONFIG_P852_SKU23_B00} += board-p852-sku23-b00.o
+obj-${CONFIG_P852_SKU23_C01} += board-p852-sku23-c01.o
diff --git a/arch/arm/mach-tegra/p852/board-p852-gpio.c b/arch/arm/mach-tegra/p852/board-p852-gpio.c
new file mode 100644
index 000000000000..71f568087c5d
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-gpio.c
@@ -0,0 +1,158 @@
+/*
+ * arch/arm/mach-tegra/board-p852-gpio.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+
+#include "board-p852.h"
+
+static struct gpio p852_sku23_gpios[] = {
+ {TEGRA_GPIO_PW1, GPIOF_OUT_INIT_LOW, "usbpwr0_ena"},
+ {TEGRA_GPIO_PB2, GPIOF_OUT_INIT_LOW, "usbpwr1_ena"},
+ {TEGRA_GPIO_PA0, GPIOF_OUT_INIT_LOW, "a0"},
+ {TEGRA_GPIO_PV2, GPIOF_OUT_INIT_HIGH, "v2"},
+ {TEGRA_GPIO_PT4, GPIOF_OUT_INIT_LOW, "t4"},
+ {TEGRA_GPIO_PD6, GPIOF_OUT_INIT_HIGH, "d6"},
+ {TEGRA_GPIO_PI3, GPIOF_OUT_INIT_LOW, "i3"},
+ {TEGRA_GPIO_PV3, GPIOF_OUT_INIT_HIGH, "v3"},
+ {TEGRA_GPIO_PW4, GPIOF_IN, "w4"},
+ {TEGRA_GPIO_PW5, GPIOF_IN, "w5"},
+ {TEGRA_GPIO_PT1, GPIOF_OUT_INIT_LOW, "t1"},
+ {TEGRA_GPIO_PW3, GPIOF_OUT_INIT_HIGH, "w3"},
+ {TEGRA_GPIO_PD5, GPIOF_IN, "d5"},
+ {TEGRA_GPIO_PBB1, GPIOF_OUT_INIT_LOW, "bb1"},
+};
+
+static struct gpio p852_sku23_b00_gpios[] = {
+ {TEGRA_GPIO_PW1, GPIOF_OUT_INIT_HIGH, "usbpwr0_ena"},
+ {TEGRA_GPIO_PB2, GPIOF_OUT_INIT_HIGH, "usbpwr1_ena"},
+ {TEGRA_GPIO_PA0, GPIOF_OUT_INIT_LOW, "a0"},
+ {TEGRA_GPIO_PV2, GPIOF_OUT_INIT_HIGH, "v2"},
+ {TEGRA_GPIO_PT4, GPIOF_OUT_INIT_LOW, "t4"},
+ {TEGRA_GPIO_PD6, GPIOF_OUT_INIT_HIGH, "d6"},
+ {TEGRA_GPIO_PI3, GPIOF_OUT_INIT_LOW, "i3"},
+ {TEGRA_GPIO_PV3, GPIOF_OUT_INIT_HIGH, "v3"},
+ {TEGRA_GPIO_PW4, GPIOF_IN, "w4"},
+ {TEGRA_GPIO_PW5, GPIOF_IN, "w5"},
+ {TEGRA_GPIO_PT1, GPIOF_OUT_INIT_LOW, "t1"},
+ {TEGRA_GPIO_PW3, GPIOF_OUT_INIT_HIGH, "w3"},
+ {TEGRA_GPIO_PD5, GPIOF_IN, "d5"},
+ {TEGRA_GPIO_PBB1, GPIOF_OUT_INIT_LOW, "bb1"},
+};
+
+static struct gpio p852_sku5_gpios[] = {
+ {TEGRA_GPIO_PW1, GPIOF_OUT_INIT_HIGH, "usbpwr0_ena"},
+ {TEGRA_GPIO_PB2, GPIOF_OUT_INIT_HIGH, "usbpwr1_ena"},
+ {TEGRA_GPIO_PA0, GPIOF_OUT_INIT_LOW, "a0"},
+ {TEGRA_GPIO_PV2, GPIOF_OUT_INIT_HIGH, "v2"},
+ {TEGRA_GPIO_PT4, GPIOF_OUT_INIT_LOW, "t4"},
+ {TEGRA_GPIO_PD6, GPIOF_OUT_INIT_HIGH, "d6"},
+ {TEGRA_GPIO_PI3, GPIOF_OUT_INIT_LOW, "i3"},
+ {TEGRA_GPIO_PV3, GPIOF_OUT_INIT_HIGH, "v3"},
+ {TEGRA_GPIO_PW4, GPIOF_IN, "w4"},
+ {TEGRA_GPIO_PW5, GPIOF_IN, "w5"},
+ {TEGRA_GPIO_PT1, GPIOF_OUT_INIT_LOW, "t1"},
+ {TEGRA_GPIO_PW3, GPIOF_OUT_INIT_HIGH, "w3"},
+ {TEGRA_GPIO_PD5, GPIOF_IN, "d5"},
+ {TEGRA_GPIO_PBB1, GPIOF_OUT_INIT_LOW, "bb1"},
+ {TEGRA_GPIO_PS3, GPIOF_IN, "s3"},
+};
+
+static struct gpio p852_sku8_gpios[] = {
+ {TEGRA_GPIO_PW1, GPIOF_OUT_INIT_HIGH, "w1"},
+ {TEGRA_GPIO_PB2, GPIOF_OUT_INIT_HIGH, "b2"},
+};
+
+static struct gpio p852_sku13_b00_gpios[] = {
+ {TEGRA_GPIO_PW1, GPIOF_OUT_INIT_HIGH, "w1"},
+ {TEGRA_GPIO_PB2, GPIOF_OUT_INIT_HIGH, "b2"},
+ {TEGRA_GPIO_PW2, GPIOF_IN, "w2"},
+ {TEGRA_GPIO_PW3, GPIOF_IN, "w3"},
+ {TEGRA_GPIO_PD5, GPIOF_OUT_INIT_LOW, "d5"},
+ {TEGRA_GPIO_PBB1, GPIOF_OUT_INIT_LOW, "bb1"},
+ {TEGRA_GPIO_PN7, GPIOF_OUT_INIT_LOW, "n7"},
+ {TEGRA_GPIO_PA6, GPIOF_OUT_INIT_HIGH, "a6"},
+ {TEGRA_GPIO_PA7, GPIOF_OUT_INIT_HIGH, "a7"},
+};
+
+static struct gpio p852_gpios[] = {
+ {TEGRA_GPIO_PW1, GPIOF_OUT_INIT_LOW, "w1"},
+ {TEGRA_GPIO_PB2, GPIOF_OUT_INIT_LOW, "b2"},
+ {TEGRA_GPIO_PW2, GPIOF_IN, "w2"},
+ {TEGRA_GPIO_PW3, GPIOF_IN, "w3"},
+ {TEGRA_GPIO_PD5, GPIOF_OUT_INIT_LOW, "d5"},
+ {TEGRA_GPIO_PBB1, GPIOF_OUT_INIT_LOW, "bb1"},
+ {TEGRA_GPIO_PN7, GPIOF_OUT_INIT_LOW, "n7"},
+ {TEGRA_GPIO_PA6, GPIOF_OUT_INIT_HIGH, "a6"},
+ {TEGRA_GPIO_PA7, GPIOF_OUT_INIT_HIGH, "a7"},
+};
+
+void __init p852_gpio_init(void)
+{
+ int pin_count = 0;
+ int i;
+ struct gpio *gpios_info = NULL;
+
+ switch (system_rev) {
+ case P852_SKU23:
+ {
+ gpios_info = p852_sku23_gpios;
+ pin_count = ARRAY_SIZE(p852_sku23_gpios);
+ }
+ break;
+ case P852_SKU23_B00:
+ case P852_SKU23_C01:
+ {
+ gpios_info = p852_sku23_b00_gpios;
+ pin_count = ARRAY_SIZE(p852_sku23_b00_gpios);
+ }
+ break;
+ case P852_SKU5_B00:
+ case P852_SKU5_C01:
+ {
+ gpios_info = p852_sku5_gpios;
+ pin_count = ARRAY_SIZE(p852_sku5_gpios);
+ }
+ break;
+ case P852_SKU8_B00:
+ case P852_SKU8_C01:
+ case P852_SKU9_B00:
+ case P852_SKU9_C01:
+ {
+ gpios_info = p852_sku8_gpios;
+ pin_count = ARRAY_SIZE(p852_sku8_gpios);
+ }
+ break;
+ case P852_SKU13_B00:
+ {
+ gpios_info = p852_sku13_b00_gpios;
+ pin_count = ARRAY_SIZE(p852_sku13_b00_gpios);
+ }
+ break;
+ default:
+ {
+ gpios_info = p852_gpios;
+ pin_count = ARRAY_SIZE(p852_gpios);
+ }
+ }
+
+ gpio_request_array(gpios_info, pin_count);
+ for (i = 0; i < pin_count; i++) {
+ tegra_gpio_enable(gpios_info[i].gpio);
+ gpio_export(gpios_info[i].gpio, true);
+ }
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-i2c.c b/arch/arm/mach-tegra/p852/board-p852-i2c.c
new file mode 100644
index 000000000000..041ec252b6c1
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-i2c.c
@@ -0,0 +1,180 @@
+/*
+ * arch/arm/mach-tegra/board-p852-i2c.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <linux/i2c.h>
+#include <mach/pinmux.h>
+#include <asm/mach-types.h>
+
+#include "board-p852.h"
+
+static struct resource i2c_resource1[] = {
+ [0] = {
+ .start = INT_I2C,
+ .end = INT_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C_BASE,
+ .end = TEGRA_I2C_BASE + TEGRA_I2C_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource2[] = {
+ [0] = {
+ .start = INT_I2C2,
+ .end = INT_I2C2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C2_BASE,
+ .end = TEGRA_I2C2_BASE + TEGRA_I2C2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource3[] = {
+ [0] = {
+ .start = INT_I2C3,
+ .end = INT_I2C3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C3_BASE,
+ .end = TEGRA_I2C3_BASE + TEGRA_I2C3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource4[] = {
+ [0] = {
+ .start = INT_DVC,
+ .end = INT_DVC,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_DVC_BASE,
+ .end = TEGRA_DVC_BASE + TEGRA_DVC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static const struct tegra_pingroup_config i2c2_ddc = {
+ .pingroup = TEGRA_PINGROUP_DDC,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static const struct tegra_pingroup_config i2c_i2cp = {
+ .pingroup = TEGRA_PINGROUP_I2CP,
+ .func = TEGRA_MUX_I2C,
+};
+
+static struct tegra_i2c_platform_data p852_i2c1_platform_data = {
+ .adapter_nr = 0,
+ .bus_count = 1,
+ .bus_clk_rate = {400000},
+};
+
+static struct tegra_i2c_platform_data p852_i2c2_platform_data = {
+ .adapter_nr = 1,
+ .bus_count = 1,
+ .bus_clk_rate = {100000},
+ .bus_mux = {&i2c2_ddc},
+ .bus_mux_len = {1},
+};
+
+static struct tegra_i2c_platform_data p852_i2c3_platform_data = {
+ .adapter_nr = 2,
+ .bus_count = 1,
+ .bus_clk_rate = {400000},
+};
+
+static struct tegra_i2c_platform_data p852_dvc_platform_data = {
+ .adapter_nr = 3,
+ .bus_count = 1,
+ .bus_clk_rate = {100000},
+ .bus_mux = {&i2c_i2cp},
+ .bus_mux_len = {1},
+ .is_dvc = true,
+};
+
+struct platform_device tegra_i2c_device[] = {
+ {
+ .name = "tegra-i2c",
+ .id = 0,
+ .resource = i2c_resource1,
+ .num_resources = ARRAY_SIZE(i2c_resource1),
+ .dev = {
+ .platform_data = &p852_i2c1_platform_data,
+ },
+ },
+ {
+ .name = "tegra-i2c",
+ .id = 1,
+ .resource = i2c_resource2,
+ .num_resources = ARRAY_SIZE(i2c_resource2),
+ .dev = {
+ .platform_data = &p852_i2c2_platform_data,
+ },
+ },
+ {
+ .name = "tegra-i2c",
+ .id = 2,
+ .resource = i2c_resource3,
+ .num_resources = ARRAY_SIZE(i2c_resource3),
+ .dev = {
+ .platform_data = &p852_i2c3_platform_data,
+ },
+ },
+ {
+ .name = "tegra-i2c",
+ .id = 3,
+ .resource = i2c_resource4,
+ .num_resources = ARRAY_SIZE(i2c_resource4),
+ .dev = {
+ .platform_data = &p852_dvc_platform_data,
+ },
+ }
+};
+
+void __init p852_i2c_set_default_clock(int adapter, unsigned long clock)
+{
+ if (adapter >= 0 && adapter < ARRAY_SIZE(tegra_i2c_device))
+ ((struct tegra_i2c_platform_data *)tegra_i2c_device[adapter].
+ dev.platform_data)->bus_clk_rate[0] = clock;
+}
+
+void __init p852_i2c_init(void)
+{
+ int i;
+ unsigned int i2c_config = 0;
+ if (p852_sku_peripherals & P852_SKU_I2C_ENABLE) {
+ for (i = 0; i < P852_MAX_I2C; i++) {
+ i2c_config =
+ (p852_i2c_peripherals >> (P852_I2C_SHIFT * i));
+ if (i2c_config & P852_I2C_ENABLE)
+ platform_device_register(&tegra_i2c_device[i]);
+ }
+ }
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-panel.c b/arch/arm/mach-tegra/p852/board-p852-panel.c
new file mode 100644
index 000000000000..bfd35fa5da53
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-panel.c
@@ -0,0 +1,191 @@
+/*
+ * arch/arm/mach-tegra/board-p852-panel.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <linux/nvhost.h>
+#include <linux/platform_device.h>
+#include <asm/mach-types.h>
+#include <mach/nvmap.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+
+#include "board-p852.h"
+
+#define CARVEOUT_IRAM {\
+ .name = "iram",\
+ .usage_mask = NVMAP_HEAP_CARVEOUT_IRAM,\
+ .base = TEGRA_IRAM_BASE,\
+ .size = TEGRA_IRAM_SIZE,\
+ .buddy_size = 0, /* no buddy allocation for IRAM */\
+}
+
+static int p852_panel_enable(void)
+{
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static int p852_panel_disable(void)
+{
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static struct resource p852_disp_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_dc_mode p852_panel_modes[] = {
+/* Timings for the LG LB070WV4 panel */
+ {
+ .pclk = 33230769,
+
+ .h_ref_to_sync = 1,
+ .v_ref_to_sync = 1,
+
+ .h_sync_width = 128,
+ .v_sync_width = 2,
+
+ .h_back_porch = 88,
+ .v_back_porch = 30,
+
+ .h_front_porch = 40,
+ .v_front_porch = 13,
+
+ .h_active = 800,
+ .v_active = 480,
+ },
+};
+
+static struct tegra_fb_data p852_fb_data = {
+ .win = 0,
+ .xres = 800,
+ .yres = 480,
+ .bits_per_pixel = 16,
+};
+
+static struct tegra_dc_out p852_disp_out = {
+ .type = TEGRA_DC_OUT_RGB,
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .modes = p852_panel_modes,
+ .n_modes = ARRAY_SIZE(p852_panel_modes),
+
+ .enable = p852_panel_enable,
+ .disable = p852_panel_disable,
+};
+
+static struct tegra_dc_platform_data p852_disp_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &p852_disp_out,
+ .fb = &p852_fb_data,
+};
+
+static struct nvhost_device p852_disp_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = p852_disp_resources,
+ .num_resources = ARRAY_SIZE(p852_disp_resources),
+ .dev = {
+ .platform_data = &p852_disp_pdata,
+ },
+};
+
+static struct nvmap_platform_carveout p852_carveouts[] = {
+ [0] = CARVEOUT_IRAM,
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .base = 0,
+ .size = 0,
+ .buddy_size = SZ_32K,
+ },
+};
+
+static struct nvmap_platform_data p852_nvmap_data = {
+ .carveouts = p852_carveouts,
+ .nr_carveouts = ARRAY_SIZE(p852_carveouts),
+};
+
+static struct platform_device p852_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &p852_nvmap_data,
+ },
+};
+
+static struct platform_device *p852_gfx_devices[] __initdata = {
+ &tegra_grhost_device,
+ &tegra_pwfm2_device,
+};
+
+int __init p852_panel_init(void)
+{
+ int err;
+ struct resource *res;
+
+ pr_info("%s\n", __func__);
+
+ p852_carveouts[1].base = tegra_carveout_start;
+ p852_carveouts[1].size = tegra_carveout_size;
+
+ err = platform_device_register(&p852_nvmap_device);
+ if (err)
+ return err;
+
+ err = platform_add_devices(p852_gfx_devices,
+ ARRAY_SIZE(p852_gfx_devices));
+
+ res = nvhost_get_resource_byname(&p852_disp_device,
+ IORESOURCE_MEM, "fbmem");
+
+ res->start = tegra_fb_start;
+ res->end = tegra_fb_start + tegra_fb_size - 1;
+
+ if (!err)
+ err = nvhost_device_register(&p852_disp_device);
+
+ return err;
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-pinmux.c b/arch/arm/mach-tegra/p852/board-p852-pinmux.c
new file mode 100644
index 000000000000..0ded989f7a13
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-pinmux.c
@@ -0,0 +1,439 @@
+/*
+ * arch/arm/mach-tegra/board-p852-pinmux.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/pinmux.h>
+#include <asm/mach-types.h>
+
+#include "board-p852.h"
+
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+#define P852_PAD_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_DISABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_18, \
+ .pull_up = TEGRA_PULL_22, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+#define P852_PAD_DRIVE_HSM(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_ENABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+#define DAP_PAD_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_3, \
+ .pull_up = TEGRA_PULL_3, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+static __initdata struct tegra_drive_pingroup_config p852_drive_pinmux[] = {
+ DEFAULT_DRIVE(DBG),
+ DEFAULT_DRIVE(DDC),
+ DEFAULT_DRIVE(VI1),
+ DEFAULT_DRIVE(VI2),
+ DEFAULT_DRIVE(SDIO1),
+ P852_PAD_DRIVE(SPI),
+ DAP_PAD_DRIVE(DAP1),
+ DAP_PAD_DRIVE(DAP2),
+ DEFAULT_DRIVE(CDEV1),
+ DEFAULT_DRIVE(CDEV2),
+};
+
+static __initdata struct tegra_drive_pingroup_config
+ p852_drive_pinmux_sku8_sku9[] = {
+ DAP_PAD_DRIVE(DAP3),
+};
+
+
+static __initdata struct tegra_pingroup_config p852_common_pinmux[] = {
+ {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSDI, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDA, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LCSN, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSCK, TEGRA_MUX_SPI3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTE, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ /* IRDA is same as UART2 option for the pingroup */
+ {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCB, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPDI, TEGRA_MUX_SPDIF, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GME, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAC, TEGRA_MUX_OWR, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_pingroup_config p852_nand_pinmux[] = {
+ {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU, TEGRA_MUX_UARTA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATA, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMB, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP4, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+};
+
+static __initdata struct tegra_pingroup_config p852_sdio3_pinmux[] = {
+ {TEGRA_PINGROUP_SDD, TEGRA_MUX_SDIO3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDC, TEGRA_MUX_SDIO3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDB, TEGRA_MUX_SDIO3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_pingroup_config p852_uarta_pinmux[] = {
+ {TEGRA_PINGROUP_SDD, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDC, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDB, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+};
+
+static __initdata struct tegra_pingroup_config p852_ulpi_pinmux[] = {
+ {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_pingroup_config p852_uarta_1_pinmux[] = {
+ {TEGRA_PINGROUP_UAA, TEGRA_MUX_UARTA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+};
+
+static __initdata struct tegra_pingroup_config p852_uartd_pinmux[] = {
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_pingroup_config p852_spi4_pinmux[] = {
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_SPI4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPI4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_pingroup_config p852_spi4_1_pinmux[] = {
+ {TEGRA_PINGROUP_SLXA, TEGRA_MUX_SPI4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXK, TEGRA_MUX_SPI4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPI4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPI4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_pingroup_config p852_nor_pinmux[] = {
+ {TEGRA_PINGROUP_IRRX, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRTX, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCA, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP2, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPID, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATC, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATA, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP4, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_pingroup_config p852_display_a_pinmux[] = {
+ {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LVP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LM0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LDC, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPW1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+};
+
+static __initdata struct tegra_pingroup_config p852_display_b_pinmux[] = {
+ {TEGRA_PINGROUP_LPW1, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LDC, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM0, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP0, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_drive_pingroup_config
+ p852_drive_pinmux_sku23[] = {
+ P852_PAD_DRIVE_HSM(SDMMC3),
+};
+
+static __initdata struct tegra_drive_pingroup_config
+ p852_drive_pinmux_sku13[] = {
+ P852_PAD_DRIVE_HSM(SDMMC3),
+};
+
+static __initdata struct tegra_pingroup_config p852_pupd_sku23[] = {
+ {TEGRA_PINGROUP_GPV, TEGRA_MUX_NONE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_NONE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_NONE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+};
+
+
+static __initdata struct tegra_pingroup_config p852_pupd_sku13[] = {
+ {TEGRA_PINGROUP_GPV, TEGRA_MUX_NONE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+};
+
+static __initdata struct tegra_pingroup_config p852_pupd_sku5[] = {
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_NONE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_NONE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+};
+
+static void tegra_pinmux_config_pupd_table(
+ const struct tegra_pingroup_config *config,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++) {
+ /* config.func, the pin_mux setting is not used here */
+ tegra_pinmux_config_pullupdown_table(&config[i], 1,
+ config[i].pupd);
+ }
+}
+
+void __init p852_pinmux_init(void)
+{
+ unsigned int sdio3_config = (p852_sdhci_peripherals >>
+ P852_SDHCI3_SHIFT) & P852_SDHCI_MASK;
+ unsigned int uartd_config = (p852_uart_peripherals >> P852_UARTD_SHIFT)
+ & P852_UART_MASK;
+ unsigned int uarta_config = (p852_uart_peripherals >> P852_UARTA_SHIFT)
+ & P852_UART_MASK;
+ unsigned int spi4_config = (p852_spi_peripherals >> P852_SPI4_SHIFT)
+ & P852_SPI_MASK;
+ unsigned int displayb_config = (p852_display_peripherals >>
+ P852_DISPB_SHIFT) & P852_DISP_MASK;
+
+ tegra_pinmux_config_table(p852_common_pinmux,
+ ARRAY_SIZE(p852_common_pinmux));
+
+ if ((uarta_config & P852_UART_ENABLE)
+ && (uarta_config & P852_UART_ALT_PIN_CFG)) {
+ tegra_pinmux_config_table(p852_uarta_1_pinmux,
+ ARRAY_SIZE(p852_uarta_1_pinmux));
+ } else {
+ tegra_pinmux_config_table(p852_ulpi_pinmux,
+ ARRAY_SIZE(p852_ulpi_pinmux));
+ }
+
+ if (sdio3_config & P852_SDHCI_ENABLE) {
+ tegra_pinmux_config_table(p852_sdio3_pinmux,
+ ARRAY_SIZE(p852_sdio3_pinmux));
+ } else {
+ tegra_pinmux_config_table(p852_uarta_pinmux,
+ ARRAY_SIZE(p852_uarta_pinmux));
+ }
+
+ if ((uartd_config & P852_UART_ENABLE) &&
+ (spi4_config & P852_SPI_ENABLE)) {
+ tegra_pinmux_config_table(p852_uartd_pinmux,
+ ARRAY_SIZE(p852_uartd_pinmux));
+ tegra_pinmux_config_table(p852_spi4_1_pinmux,
+ ARRAY_SIZE(p852_spi4_1_pinmux));
+ } else {
+ tegra_pinmux_config_table(p852_spi4_pinmux,
+ ARRAY_SIZE(p852_spi4_pinmux));
+ }
+
+ if (p852_sku_peripherals & P852_SKU_NOR_ENABLE) {
+ tegra_pinmux_config_table(p852_nor_pinmux,
+ ARRAY_SIZE(p852_nor_pinmux));
+ } else {
+ tegra_pinmux_config_table(p852_nand_pinmux,
+ ARRAY_SIZE(p852_nand_pinmux));
+ }
+
+ if (p852_sku_peripherals & P852_SKU_DISPLAY_ENABLE) {
+ if (displayb_config) {
+ tegra_pinmux_config_table(p852_display_b_pinmux,
+ ARRAY_SIZE(p852_display_b_pinmux));
+ } else {
+ tegra_pinmux_config_table(p852_display_a_pinmux,
+ ARRAY_SIZE(p852_display_a_pinmux));
+ }
+ }
+
+ tegra_drive_pinmux_config_table(p852_drive_pinmux,
+ ARRAY_SIZE(p852_drive_pinmux));
+
+ if (system_rev == P852_SKU23 ||
+ system_rev == P852_SKU23_B00 ||
+ system_rev == P852_SKU23_C01) {
+ tegra_drive_pinmux_config_table(p852_drive_pinmux_sku23,
+ ARRAY_SIZE(p852_drive_pinmux_sku23));
+
+ tegra_pinmux_config_pupd_table(p852_pupd_sku23,
+ ARRAY_SIZE(p852_pupd_sku23));
+ } else if (system_rev == P852_SKU13 ||
+ system_rev == P852_SKU13_B00) {
+ tegra_drive_pinmux_config_table(p852_drive_pinmux_sku13,
+ ARRAY_SIZE(p852_drive_pinmux_sku13));
+
+ tegra_pinmux_config_pupd_table(p852_pupd_sku13,
+ ARRAY_SIZE(p852_pupd_sku13));
+ } else if (system_rev == P852_SKU5_B00 || system_rev == P852_SKU5_C01) {
+ tegra_pinmux_config_pupd_table(p852_pupd_sku5,
+ ARRAY_SIZE(p852_pupd_sku5));
+ } else if (system_rev == P852_SKU8_B00 || system_rev == P852_SKU9_B00 ||
+ system_rev == P852_SKU8_C01 || system_rev == P852_SKU9_C01) {
+ tegra_drive_pinmux_config_table(p852_drive_pinmux_sku8_sku9,
+ ARRAY_SIZE(p852_drive_pinmux_sku8_sku9));
+ }
+
+}
+
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-power.c b/arch/arm/mach-tegra/p852/board-p852-power.c
new file mode 100644
index 000000000000..dce9bd7e83e2
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-power.c
@@ -0,0 +1,225 @@
+/*
+ * arch/arm/mach-tegra/p852/board-p852-power.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include "board-p852.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_INTR_LOW (1 << 17)
+
+static struct regulator_consumer_supply tps658621_sm0_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+};
+static struct regulator_consumer_supply tps658621_sm1_supply[] = {
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+};
+static struct regulator_consumer_supply tps658621_sm2_supply[] = {
+ REGULATOR_SUPPLY("vdd_sm2", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo0_supply[] = {
+ REGULATOR_SUPPLY("vddio_pex_clk", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo1_supply[] = {
+ REGULATOR_SUPPLY("avdd_pll", NULL),
+ REGULATOR_SUPPLY("avdd_plla_pc", NULL),
+ REGULATOR_SUPPLY("avdd_pllm", NULL),
+ REGULATOR_SUPPLY("avdd_pllu", NULL),
+ REGULATOR_SUPPLY("avdd_pllx6", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo2_supply[] = {
+ REGULATOR_SUPPLY("vdd_rtc", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo3_supply[] = {
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+ REGULATOR_SUPPLY("avdd_lvds", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo4_supply[] = {
+ REGULATOR_SUPPLY("avdd_osc", NULL),
+ REGULATOR_SUPPLY("vddio_sys", "panjit_touch"),
+};
+static struct regulator_consumer_supply tps658621_ldo5_supply[] = {
+ REGULATOR_SUPPLY("vddio_lcd", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo6_supply[] = {
+ REGULATOR_SUPPLY("avdd_vdac", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo7_supply[] = {
+ REGULATOR_SUPPLY("vddio_vi", NULL),
+ REGULATOR_SUPPLY("vdd_fuse", NULL),
+ REGULATOR_SUPPLY("vspi", "spi_tegra.0"),
+};
+static struct regulator_consumer_supply tps658621_ldo8_supply[] = {
+ REGULATOR_SUPPLY("vddio_bb", NULL),
+ REGULATOR_SUPPLY("vmmc", "sdhci-tegra.0"),
+ REGULATOR_SUPPLY("vmmc", "sdhci-tegra.2"),
+};
+static struct regulator_consumer_supply tps658621_ldo9_supply[] = {
+ REGULATOR_SUPPLY("vdd_ddr_rx", NULL),
+ REGULATOR_SUPPLY("vmmc", "sdhci-tegra.3"),
+};
+
+#define REGULATOR_INIT(_id, _minmv, _maxmv) \
+ { \
+ .constraints = { \
+ .min_uV = (_minmv)*1000, \
+ .max_uV = (_maxmv)*1000, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ }, \
+ .num_consumer_supplies = ARRAY_SIZE(tps658621_##_id##_supply),\
+ .consumer_supplies = tps658621_##_id##_supply, \
+ }
+
+static struct regulator_init_data sm0_data = REGULATOR_INIT(sm0, 725, 1500);
+static struct regulator_init_data sm1_data = REGULATOR_INIT(sm1, 725, 1500);
+static struct regulator_init_data sm2_data = REGULATOR_INIT(sm2, 3000, 4550);
+static struct regulator_init_data ldo0_data = REGULATOR_INIT(ldo0, 1250, 3300);
+static struct regulator_init_data ldo1_data = REGULATOR_INIT(ldo1, 725, 1500);
+static struct regulator_init_data ldo2_data = REGULATOR_INIT(ldo2, 725, 1225);
+static struct regulator_init_data ldo3_data = REGULATOR_INIT(ldo3, 1250, 3300);
+static struct regulator_init_data ldo4_data = REGULATOR_INIT(ldo4, 1700, 2475);
+static struct regulator_init_data ldo5_data = REGULATOR_INIT(ldo5, 1250, 3300);
+static struct regulator_init_data ldo6_data = REGULATOR_INIT(ldo6, 1250, 3300);
+static struct regulator_init_data ldo7_data = REGULATOR_INIT(ldo7, 1250, 3300);
+static struct regulator_init_data ldo8_data = REGULATOR_INIT(ldo8, 1250, 3300);
+static struct regulator_init_data ldo9_data = REGULATOR_INIT(ldo9, 1250, 3300);
+
+
+static struct tps6586x_rtc_platform_data rtc_data = {
+ .irq = TEGRA_NR_IRQS + TPS6586X_INT_RTC_ALM1,
+ .cl_sel = 0,
+};
+
+
+#define TPS_REG(_id, _data) \
+ { \
+ .id = TPS6586X_ID_##_id, \
+ .name = "tps6586x-regulator", \
+ .platform_data = _data, \
+ }
+
+static struct tps6586x_subdev_info tps_devs[] = {
+ TPS_REG(SM_0, &sm0_data),
+ TPS_REG(SM_1, &sm1_data),
+ TPS_REG(SM_2, &sm2_data),
+ TPS_REG(LDO_0, &ldo0_data),
+ TPS_REG(LDO_1, &ldo1_data),
+ TPS_REG(LDO_2, &ldo2_data),
+ TPS_REG(LDO_3, &ldo3_data),
+ TPS_REG(LDO_4, &ldo4_data),
+ TPS_REG(LDO_5, &ldo5_data),
+ TPS_REG(LDO_6, &ldo6_data),
+ TPS_REG(LDO_7, &ldo7_data),
+ TPS_REG(LDO_8, &ldo8_data),
+ TPS_REG(LDO_9, &ldo9_data),
+ {
+ .id = 0,
+ .name = "tps6586x-rtc",
+ .platform_data = &rtc_data,
+ },
+};
+
+static struct tps6586x_platform_data tps_platform = {
+ .irq_base = TEGRA_NR_IRQS,
+ .num_subdevs = ARRAY_SIZE(tps_devs),
+ .subdevs = tps_devs,
+ .gpio_base = TEGRA_NR_GPIOS,
+};
+
+static struct i2c_board_info __initdata p852_regulators[] = {
+ {
+ I2C_BOARD_INFO("tps6586x", 0x34),
+ .irq = INT_EXTERNAL_PMU,
+ .platform_data = &tps_platform,
+ },
+};
+
+static struct tegra_suspend_platform_data p852_suspend_data = {
+ .cpu_timer = 2000,
+ .cpu_off_timer = 0,
+ .suspend_mode = TEGRA_SUSPEND_LP1,
+ .core_timer = 0x7e7e,
+ .core_off_timer = 0,
+ .corereq_high = false,
+ .sysclkreq_high = true,
+};
+
+static void p852_power_off(void)
+{
+ int ret;
+
+ ret = tps6586x_power_off();
+ if (ret)
+ pr_err("p852: failed to power off\n");
+
+ while (1)
+ ;
+}
+
+void __init p852_power_off_init(void)
+{
+ pm_power_off = p852_power_off;
+}
+
+static void __init tps6586x_rtc_preinit(void)
+{
+ int i;
+ struct tps6586x_rtc_platform_data *rtc_pdata;
+
+ if (system_rev == P852_SKU23_B00 ||
+ system_rev == P852_SKU23_C01 ||
+ system_rev == P852_SKU13_B00 ||
+ system_rev == P852_SKU5_B00 ||
+ system_rev == P852_SKU5_C01) {
+ for (i = 0; i < tps_platform.num_subdevs; ++i)
+ if (!strcmp(tps_platform.subdevs[i].name,
+ "tps6586x-rtc"))
+ rtc_pdata =
+ (struct tps6586x_rtc_platform_data *)
+ (tps_platform.subdevs[i].platform_data);
+ rtc_pdata->cl_sel = TPS6586X_RTC_CL_SEL_1_5PF;
+ }
+}
+
+int __init p852_regulator_init(void)
+{
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ u32 pmc_ctrl;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+ i2c_register_board_info(3, p852_regulators, 1);
+ tegra_init_suspend(&p852_suspend_data);
+
+ tps6586x_rtc_preinit();
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-sdhci.c b/arch/arm/mach-tegra/p852/board-p852-sdhci.c
new file mode 100644
index 000000000000..dc5b81fa3727
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sdhci.c
@@ -0,0 +1,199 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sdhci.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/sdhci.h>
+#include <mach/pinmux.h>
+#include <asm/mach-types.h>
+
+#include "board-p852.h"
+
+static struct resource sdhci_resource1[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC2,
+ .end = INT_SDMMC2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC2_BASE,
+ .end = TEGRA_SDMMC2_BASE + TEGRA_SDMMC2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource4[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct tegra_sdhci_platform_data p852_sdhci_platform_data[] = {
+ {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ },
+ {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ },
+ {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ },
+ {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ },
+};
+
+static struct platform_device tegra_sdhci_device[] = {
+ {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource1,
+ .num_resources = ARRAY_SIZE(sdhci_resource1),
+ .dev = {
+ .platform_data = &p852_sdhci_platform_data[0],
+ },
+ },
+ {
+ .name = "sdhci-tegra",
+ .id = 1,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .platform_data = &p852_sdhci_platform_data[1],
+ },
+ },
+ {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+ .dev = {
+ .platform_data = &p852_sdhci_platform_data[2],
+ },
+ },
+ {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource4,
+ .num_resources = ARRAY_SIZE(sdhci_resource4),
+ .dev = {
+ .platform_data = &p852_sdhci_platform_data[3],
+ },
+ },
+
+};
+
+void __init p852_sdhci_init(void)
+{
+
+ int i, count = 10;
+ int cd = 0, wp = 0, pw = 0;
+ static char gpio_name[12][10];
+ unsigned int sdhci_config = 0;
+
+ if (p852_sku_peripherals & P852_SKU_SDHCI_ENABLE)
+ for (i = 0; i < P852_MAX_SDHCI; i++) {
+ sdhci_config =
+ (p852_sdhci_peripherals >> (P852_SDHCI_SHIFT * i));
+ cd = i * 3;
+ wp = cd + 1;
+ pw = wp + 1;
+ if (sdhci_config & P852_SDHCI_ENABLE) {
+ if (sdhci_config & P852_SDHCI_CD_EN) {
+ snprintf(gpio_name[cd], count,
+ "sdhci%d_cd", i);
+ gpio_request(p852_sdhci_platform_data
+ [i].cd_gpio,
+ gpio_name[cd]);
+ tegra_gpio_enable
+ (p852_sdhci_platform_data[i].
+ cd_gpio);
+ }
+
+ if (sdhci_config & P852_SDHCI_WP_EN) {
+ snprintf(gpio_name[wp], count,
+ "sdhci%d_wp", i);
+ gpio_request(p852_sdhci_platform_data
+ [i].wp_gpio,
+ gpio_name[wp]);
+ tegra_gpio_enable
+ (p852_sdhci_platform_data[i].
+ wp_gpio);
+ }
+
+ if (sdhci_config & P852_SDHCI_PW_EN) {
+ snprintf(gpio_name[pw], count,
+ "sdhci%d_pw", i);
+ gpio_request(p852_sdhci_platform_data
+ [i].power_gpio,
+ gpio_name[pw]);
+ tegra_gpio_enable
+ (p852_sdhci_platform_data[i].
+ power_gpio);
+ }
+
+ platform_device_register(&tegra_sdhci_device
+ [i]);
+ }
+ }
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku1-b00.c b/arch/arm/mach-tegra/p852/board-p852-sku1-b00.c
new file mode 100644
index 000000000000..1cd89c5dfd76
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku1-b00.c
@@ -0,0 +1,98 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku1-b00.c
+ *
+ * Copyright (C) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku1_b00_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku1_b00_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S1_SHIFT) | ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku1_b00_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE)
+ << P852_SDHCI4_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI1_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0;
+ p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+ p852_sdhci_platform_data[2].wp_gpio = TEGRA_GPIO_PT4;
+}
+
+static inline void p852_sku1_b00_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTD_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS | P852_UART_ALT_PIN_CFG)
+ << P852_UARTA_SHIFT);
+}
+
+static inline void p852_sku1_b00_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPB_SHIFT);
+}
+
+static inline void p852_sku1_b00_ulpi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_ULPI_DISABLE;
+}
+
+static inline void p852_sku1_b00_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+void __init p852_sku1_b00_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NOR_ENABLE;
+
+ p852_sku1_b00_spi_init();
+ p852_sku1_b00_i2s_init();
+ p852_sku1_b00_uart_init();
+ p852_sku1_b00_sdhci_init();
+ p852_sku1_b00_i2c_init();
+ p852_sku1_b00_display_init();
+ p852_sku1_b00_ulpi_init();
+
+ p852_common_init();
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku1-c0x.c b/arch/arm/mach-tegra/p852/board-p852-sku1-c0x.c
new file mode 100644
index 000000000000..4a783fb9b635
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku1-c0x.c
@@ -0,0 +1,98 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku1-c0x.c
+ *
+ * Copyright (C) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku1_c0x_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku1_c0x_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S1_SHIFT) | ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku1_c0x_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE)
+ << P852_SDHCI4_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI1_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0;
+ p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+ p852_sdhci_platform_data[2].wp_gpio = TEGRA_GPIO_PT4;
+}
+
+static inline void p852_sku1_c0x_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTD_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS | P852_UART_ALT_PIN_CFG)
+ << P852_UARTA_SHIFT);
+}
+
+static inline void p852_sku1_c0x_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPB_SHIFT);
+}
+
+static inline void p852_sku1_c0x_ulpi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_ULPI_DISABLE;
+}
+
+static inline void p852_sku1_c0x_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+void __init p852_sku1_c0x_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NOR_ENABLE;
+
+ p852_sku1_c0x_spi_init();
+ p852_sku1_c0x_i2s_init();
+ p852_sku1_c0x_uart_init();
+ p852_sku1_c0x_sdhci_init();
+ p852_sku1_c0x_i2c_init();
+ p852_sku1_c0x_display_init();
+ p852_sku1_c0x_ulpi_init();
+
+ p852_common_init();
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku1.c b/arch/arm/mach-tegra/p852/board-p852-sku1.c
new file mode 100644
index 000000000000..387ba054bd84
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku1.c
@@ -0,0 +1,89 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku1.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku1_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku1_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S1_SHIFT) | ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku1_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI1_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI3_SHIFT) |
+ (P852_SDHCI_ENABLE << P852_SDHCI4_SHIFT);
+
+ p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0;
+ p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+ p852_sdhci_platform_data[2].wp_gpio = TEGRA_GPIO_PT4;
+}
+
+static inline void p852_sku1_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTD_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT);
+}
+
+static inline void p852_sku1_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPB_SHIFT);
+}
+
+static inline void p852_sku1_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+void __init p852_sku1_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NOR_ENABLE;
+
+ p852_sku1_spi_init();
+ p852_sku1_i2s_init();
+ p852_sku1_uart_init();
+ p852_sku1_sdhci_init();
+ p852_sku1_i2c_init();
+ p852_sku1_display_init();
+
+ p852_common_init();
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku13-b00.c b/arch/arm/mach-tegra/p852/board-p852-sku13-b00.c
new file mode 100644
index 000000000000..39e01f660eaf
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku13-b00.c
@@ -0,0 +1,114 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku13-b00.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku13_b00_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI2_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI3_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku13_b00_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= (P852_I2S_ENABLE << P852_I2S1_SHIFT) |
+ (P852_I2S_ENABLE << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku13_b00_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ (P852_SDHCI_ENABLE << P852_SDHCI1_SHIFT) |
+ (P852_SDHCI_ENABLE << P852_SDHCI2_SHIFT);
+
+ p852_sdhci_platform_data[1].is_8bit = true;
+}
+
+static inline void p852_sku13_b00_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTA_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE) << P852_UARTC_SHIFT);
+}
+
+static inline void p852_sku13_b00_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPA_SHIFT);
+}
+
+static inline void p852_sku13_b00_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+ p852_i2c_set_default_clock(0, 40000);
+}
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+static struct tegra_spi_i2s_platform_data spi_i2s_data = {
+ .gpio_i2s = {
+ .gpio_no = TEGRA_GPIO_PT5,
+ .active_state = 1,
+ },
+ .gpio_spi = {
+ .gpio_no = TEGRA_GPIO_PV7,
+ .active_state = 1,
+ },
+ .spi_i2s_timeout_ms = 25,
+};
+
+static inline void p852_sku13_b00_spi_i2s_init(void)
+{
+ tegra_spi_i2s_device.platform_data = &spi_i2s_data;
+ /* cpld_gpio_dir1 */
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_PTA, TEGRA_TRI_NORMAL);
+ /* cpld_gpio_dir2 */
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_LVP0, TEGRA_TRI_NORMAL);
+ p852_spi_i2s_init();
+}
+#endif
+
+void __init p852_sku13_b00_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NAND_ENABLE;
+
+
+ p852_sku13_b00_spi_init();
+ p852_sku13_b00_i2s_init();
+ p852_sku13_b00_uart_init();
+ p852_sku13_b00_sdhci_init();
+ p852_sku13_b00_i2c_init();
+ p852_sku13_b00_display_init();
+
+ p852_common_init();
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+ p852_sku13_b00_spi_i2s_init();
+#endif
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku13.c b/arch/arm/mach-tegra/p852/board-p852-sku13.c
new file mode 100644
index 000000000000..92d917e6e2c1
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku13.c
@@ -0,0 +1,112 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku13.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku13_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI2_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI3_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+
+}
+
+static inline void p852_sku13_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= (P852_I2S_ENABLE << P852_I2S1_SHIFT) |
+ (P852_I2S_ENABLE << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku13_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ (P852_SDHCI_ENABLE << P852_SDHCI1_SHIFT) |
+ (P852_SDHCI_ENABLE << P852_SDHCI2_SHIFT);
+
+ p852_sdhci_platform_data[1].is_8bit = true;
+}
+
+static inline void p852_sku13_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTA_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE) << P852_UARTC_SHIFT);
+}
+
+static inline void p852_sku13_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPA_SHIFT);
+}
+
+static inline void p852_sku13_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+ p852_i2c_set_default_clock(0, 40000);
+}
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+static struct tegra_spi_i2s_platform_data spi_i2s_data = {
+ .gpio_i2s = {
+ .gpio_no = TEGRA_GPIO_PS3,
+ .active_state = 0,
+ },
+ .gpio_spi = {
+ .gpio_no = TEGRA_GPIO_PS4,
+ .active_state = 0,
+ },
+ .spi_i2s_timeout_ms = 25,
+};
+
+static inline void p852_sku13_spi_i2s_init(void)
+{
+ tegra_spi_i2s_device.platform_data = &spi_i2s_data;
+ /* cpld_gpio_dir1 and cpld_gpio_dir2*/
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_KBCB, TEGRA_TRI_NORMAL);
+ p852_spi_i2s_init();
+}
+#endif
+
+void __init p852_sku13_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NAND_ENABLE;
+
+ p852_sku13_spi_init();
+ p852_sku13_i2s_init();
+ p852_sku13_uart_init();
+ p852_sku13_sdhci_init();
+ p852_sku13_i2c_init();
+ p852_sku13_display_init();
+
+ p852_common_init();
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+ p852_sku13_spi_i2s_init();
+#endif
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku23-b00.c b/arch/arm/mach-tegra/p852/board-p852-sku23-b00.c
new file mode 100644
index 000000000000..6f464ec3620f
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku23-b00.c
@@ -0,0 +1,115 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku23-b00.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku23_b00_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI2_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI3_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku23_b00_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |=
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S1_SHIFT) |
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku23_b00_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |= (P852_SDHCI_ENABLE << P852_SDHCI1_SHIFT) |
+ (P852_SDHCI_ENABLE << P852_SDHCI2_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN) << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[1].is_8bit = true;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+}
+
+static inline void p852_sku23_b00_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE) << P852_UARTA_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTC_SHIFT);
+}
+
+static inline void p852_sku23_b00_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPA_SHIFT);
+}
+
+static inline void p852_sku23_b00_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+static struct tegra_spi_i2s_platform_data spi_i2s_data = {
+ .gpio_i2s = {
+ .gpio_no = TEGRA_GPIO_PT5,
+ .active_state = 1,
+ },
+ .gpio_spi = {
+ .gpio_no = TEGRA_GPIO_PV7,
+ .active_state = 1,
+ },
+ .spi_i2s_timeout_ms = 25,
+};
+
+static inline void p852_sku23_b00_spi_i2s_init(void)
+{
+ tegra_spi_i2s_device.platform_data = &spi_i2s_data;
+ /* cpld_gpio_dir1 */
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_PTA, TEGRA_TRI_NORMAL);
+ /* cpld_gpio_dir2 */
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_LVP0, TEGRA_TRI_NORMAL);
+ p852_spi_i2s_init();
+}
+#endif
+
+void __init p852_sku23_b00_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NAND_ENABLE;
+
+ p852_sku23_b00_spi_init();
+ p852_sku23_b00_i2s_init();
+ p852_sku23_b00_uart_init();
+ p852_sku23_b00_sdhci_init();
+ p852_sku23_b00_i2c_init();
+ p852_sku23_b00_display_init();
+
+ p852_common_init();
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+ p852_sku23_b00_spi_i2s_init();
+#endif
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku23-c01.c b/arch/arm/mach-tegra/p852/board-p852-sku23-c01.c
new file mode 100644
index 000000000000..f946e0ed35ee
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku23-c01.c
@@ -0,0 +1,87 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku23-c01.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku23_c01_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI2_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI3_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku23_c01_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |=
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S1_SHIFT) |
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku23_c01_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |= (P852_SDHCI_ENABLE << P852_SDHCI1_SHIFT) |
+ (P852_SDHCI_ENABLE << P852_SDHCI2_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN) << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[1].is_8bit = true;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+}
+
+static inline void p852_sku23_c01_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE) << P852_UARTA_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTC_SHIFT);
+}
+
+static inline void p852_sku23_c01_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPA_SHIFT);
+}
+
+static inline void p852_sku23_c01_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+void __init p852_sku23_c01_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NAND_ENABLE;
+
+ p852_sku23_c01_spi_init();
+ p852_sku23_c01_i2s_init();
+ p852_sku23_c01_uart_init();
+ p852_sku23_c01_sdhci_init();
+ p852_sku23_c01_i2c_init();
+ p852_sku23_c01_display_init();
+
+ p852_common_init();
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku23.c b/arch/arm/mach-tegra/p852/board-p852-sku23.c
new file mode 100644
index 000000000000..a2bc9b4ca0b6
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku23.c
@@ -0,0 +1,113 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku23.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void __init p852_sku23_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI2_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI3_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku23_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |= (P852_SDHCI_ENABLE << P852_SDHCI1_SHIFT) |
+ (P852_SDHCI_ENABLE << P852_SDHCI2_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN) << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[1].is_8bit = true;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+}
+
+static inline void p852_sku23_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |=
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S1_SHIFT) |
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku23_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE) << P852_UARTA_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTC_SHIFT);
+}
+
+static inline void p852_sku23_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPA_SHIFT);
+}
+
+static inline void p852_sku23_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+static struct tegra_spi_i2s_platform_data spi_i2s_data = {
+ .gpio_i2s = {
+ .gpio_no = TEGRA_GPIO_PS3,
+ .active_state = 0,
+ },
+ .gpio_spi = {
+ .gpio_no = TEGRA_GPIO_PS4,
+ .active_state = 0,
+ },
+ .spi_i2s_timeout_ms = 25,
+};
+
+static inline void p852_sku23_spi_i2s_init(void)
+{
+ tegra_spi_i2s_device.platform_data = &spi_i2s_data;
+ /* cpld_gpio_dir1 and cpld_gpio_dir2*/
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_KBCB, TEGRA_TRI_NORMAL);
+ p852_spi_i2s_init();
+}
+#endif
+
+void __init p852_sku23_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NAND_ENABLE;
+
+ p852_sku23_spi_init();
+ p852_sku23_i2s_init();
+ p852_sku23_uart_init();
+ p852_sku23_sdhci_init();
+ p852_sku23_i2c_init();
+ p852_sku23_display_init();
+
+ p852_common_init();
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+ p852_sku23_spi_i2s_init();
+#endif
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku3.c b/arch/arm/mach-tegra/p852/board-p852-sku3.c
new file mode 100644
index 000000000000..380df9a7439a
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku3.c
@@ -0,0 +1,103 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku3.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku3_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI2_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI3_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku3_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= (P852_I2S_ENABLE << P852_I2S1_SHIFT) |
+ (P852_I2S_ENABLE << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku3_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ (P852_SDHCI_ENABLE << P852_SDHCI1_SHIFT) |
+ (P852_SDHCI_ENABLE << P852_SDHCI2_SHIFT);
+
+ p852_sdhci_platform_data[1].is_8bit = true;
+}
+
+static inline void p852_sku3_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTA_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTC_SHIFT);
+}
+
+static inline void p852_sku3_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+ p852_i2c_set_default_clock(0, 40000);
+}
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+static struct tegra_spi_i2s_platform_data spi_i2s_data = {
+ .gpio_i2s = {
+ .gpio_no = TEGRA_GPIO_PS3,
+ .active_state = 0,
+ },
+ .gpio_spi = {
+ .gpio_no = TEGRA_GPIO_PS4,
+ .active_state = 0,
+ },
+ .spi_i2s_timeout_ms = 25,
+};
+
+static inline void p852_sku3_spi_i2s_init(void)
+{
+ tegra_spi_i2s_device.platform_data = &spi_i2s_data;
+ /* cpld_gpio_dir1 and cpld_gpio_dir2*/
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_KBCB, TEGRA_TRI_NORMAL);
+ p852_spi_i2s_init();
+}
+#endif
+
+void __init p852_sku3_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NAND_ENABLE;
+
+ p852_sku3_spi_init();
+ p852_sku3_i2s_init();
+ p852_sku3_uart_init();
+ p852_sku3_sdhci_init();
+ p852_sku3_i2c_init();
+
+ p852_common_init();
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+ p852_sku3_spi_i2s_init();
+#endif
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku5-b00.c b/arch/arm/mach-tegra/p852/board-p852-sku5-b00.c
new file mode 100644
index 000000000000..59f6f13f7729
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku5-b00.c
@@ -0,0 +1,115 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku5_b00.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku5_b00_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI2_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI3_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku5_b00_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |=
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S1_SHIFT) |
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku5_b00_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE << P852_SDHCI1_SHIFT) |
+ (P852_SDHCI_ENABLE << P852_SDHCI2_SHIFT));
+
+ p852_sdhci_platform_data[1].is_8bit = true;
+}
+
+static inline void p852_sku5_b00_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE) << P852_UARTA_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTC_SHIFT);
+}
+
+static inline void p852_sku5_b00_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPA_SHIFT);
+}
+
+static inline void p852_sku5_b00_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+static struct tegra_spi_i2s_platform_data spi_i2s_data = {
+ .gpio_i2s = {
+ .gpio_no = TEGRA_GPIO_PT5,
+ .active_state = 1,
+ },
+ .gpio_spi = {
+ .gpio_no = TEGRA_GPIO_PV7,
+ .active_state = 1,
+ },
+ .spi_i2s_timeout_ms = 25,
+};
+
+static inline void p852_sku5_b00_spi_i2s_init(void)
+{
+ tegra_spi_i2s_device.platform_data = &spi_i2s_data;
+ /* cpld_gpio_dir1 */
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_PTA, TEGRA_TRI_NORMAL);
+ /* cpld_gpio_dir2 */
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_LVP0, TEGRA_TRI_NORMAL);
+ p852_spi_i2s_init();
+}
+#endif
+
+void __init p852_sku5_b00_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NAND_ENABLE;
+
+ p852_sku5_b00_spi_init();
+ p852_sku5_b00_i2s_init();
+ p852_sku5_b00_uart_init();
+ p852_sku5_b00_sdhci_init();
+ p852_sku5_b00_i2c_init();
+ p852_sku5_b00_display_init();
+
+ p852_common_init();
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+ p852_sku5_b00_spi_i2s_init();
+#endif
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku5-c01.c b/arch/arm/mach-tegra/p852/board-p852-sku5-c01.c
new file mode 100644
index 000000000000..f9c8e72911b6
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku5-c01.c
@@ -0,0 +1,93 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku5-c01.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku5_c01_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI2_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI3_SHIFT) |
+ ((P852_SPI_SLAVE | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku5_c01_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |=
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S1_SHIFT) |
+ ((P852_I2S_TDM | P852_I2S_ENABLE) << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku5_c01_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI1_SHIFT);
+
+ p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0;
+ p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1;
+}
+
+static inline void p852_sku5_c01_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE) << P852_UARTA_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTC_SHIFT);
+}
+
+static inline void p852_sku5_c01_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+ p852_display_peripherals |=
+ (P852_DISP_ENABLE << P852_DISPA_SHIFT);
+}
+
+static inline void p852_sku5_c01_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+static inline void p852_sku5_c01_ulpi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_ULPI_DISABLE;
+}
+
+void __init p852_sku5_c01_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NAND_ENABLE;
+
+ p852_sku5_c01_spi_init();
+ p852_sku5_c01_i2s_init();
+ p852_sku5_c01_uart_init();
+ p852_sku5_c01_sdhci_init();
+ p852_sku5_c01_i2c_init();
+ p852_sku5_c01_display_init();
+ p852_sku5_c01_ulpi_init();
+
+ p852_common_init();
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku8-b00.c b/arch/arm/mach-tegra/p852/board-p852-sku8-b00.c
new file mode 100644
index 000000000000..4cc4d53d980f
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku8-b00.c
@@ -0,0 +1,88 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku8-b00.c
+ *
+ * Copyright (C) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku8_b00_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku8_b00_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S1_SHIFT) | ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku8_b00_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE)
+ << P852_SDHCI4_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI1_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0;
+ p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+ p852_sdhci_platform_data[2].wp_gpio = TEGRA_GPIO_PT4;
+}
+
+static inline void p852_sku8_b00_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTD_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT);
+}
+
+static inline void p852_sku8_b00_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+}
+
+static inline void p852_sku8_b00_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+
+void __init p852_sku8_b00_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NOR_ENABLE;
+
+ p852_sku8_b00_spi_init();
+ p852_sku8_b00_i2s_init();
+ p852_sku8_b00_uart_init();
+ p852_sku8_b00_sdhci_init();
+ p852_sku8_b00_display_init();
+ p852_sku8_b00_i2c_init();
+
+ p852_common_init();
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku8-c01.c b/arch/arm/mach-tegra/p852/board-p852-sku8-c01.c
new file mode 100644
index 000000000000..71210cd12b90
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku8-c01.c
@@ -0,0 +1,87 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku8-c00.c
+ *
+ * Copyright (C) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku8_c00_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku8_c00_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S1_SHIFT) | ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku8_c00_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE)
+ << P852_SDHCI4_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI1_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0;
+ p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+ p852_sdhci_platform_data[2].wp_gpio = TEGRA_GPIO_PT4;
+}
+
+static inline void p852_sku8_c00_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTD_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT);
+}
+
+static inline void p852_sku8_c00_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+}
+
+static inline void p852_sku8_c00_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+
+void __init p852_sku8_c00_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NOR_ENABLE;
+
+ p852_sku8_c00_spi_init();
+ p852_sku8_c00_i2s_init();
+ p852_sku8_c00_uart_init();
+ p852_sku8_c00_sdhci_init();
+ p852_sku8_c00_display_init();
+ p852_sku8_c00_i2c_init();
+
+ p852_common_init();
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku9-b00.c b/arch/arm/mach-tegra/p852/board-p852-sku9-b00.c
new file mode 100644
index 000000000000..7c3d9c3d9a3d
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku9-b00.c
@@ -0,0 +1,93 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku9-b00.c
+ *
+ * Copyright (C) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku9_b00_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku9_b00_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S1_SHIFT) | ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku9_b00_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE)
+ << P852_SDHCI4_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI1_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0;
+ p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+ p852_sdhci_platform_data[2].wp_gpio = TEGRA_GPIO_PT4;
+}
+
+static inline void p852_sku9_b00_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTD_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT);
+}
+
+static inline void p852_sku9_b00_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+}
+
+static inline void p852_sku9_b00_ulpi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_ULPI_DISABLE;
+}
+
+static inline void p852_sku9_b00_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+void __init p852_sku9_b00_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NOR_ENABLE;
+
+ p852_sku9_b00_spi_init();
+ p852_sku9_b00_i2s_init();
+ p852_sku9_b00_uart_init();
+ p852_sku9_b00_sdhci_init();
+ p852_sku9_b00_display_init();
+ p852_sku9_b00_ulpi_init();
+ p852_sku9_b00_i2c_init();
+
+ p852_common_init();
+}
+
diff --git a/arch/arm/mach-tegra/p852/board-p852-sku9-c01.c b/arch/arm/mach-tegra/p852/board-p852-sku9-c01.c
new file mode 100644
index 000000000000..94c79294fb47
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852-sku9-c01.c
@@ -0,0 +1,92 @@
+/*
+ * arch/arm/mach-tegra/board-p852-sku9-c00.c
+ *
+ * Copyright (C) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+
+static inline void p852_sku9_c00_spi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SPI_ENABLE;
+ p852_spi_peripherals |=
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) |
+ ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT);
+}
+
+static inline void p852_sku9_c00_i2s_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2S_ENABLE;
+ p852_i2s_peripherals |= ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S1_SHIFT) | ((P852_I2S_ENABLE | P852_I2S_TDM)
+ << P852_I2S2_SHIFT);
+}
+
+static inline void p852_sku9_c00_sdhci_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE;
+ p852_sdhci_peripherals |=
+ ((P852_SDHCI_ENABLE)
+ << P852_SDHCI4_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI1_SHIFT) |
+ ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN)
+ << P852_SDHCI3_SHIFT);
+
+ p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0;
+ p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1;
+ p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7;
+ p852_sdhci_platform_data[2].wp_gpio = TEGRA_GPIO_PT4;
+}
+
+static inline void p852_sku9_c00_uart_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_UART_ENABLE;
+ p852_uart_peripherals |=
+ ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTD_SHIFT) |
+ ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT);
+}
+
+static inline void p852_sku9_c00_display_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE;
+}
+
+static inline void p852_sku9_c00_ulpi_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_ULPI_DISABLE;
+}
+
+static inline void p852_sku9_c00_i2c_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_I2C_ENABLE;
+ p852_i2c_peripherals |=
+ ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) |
+ ((P852_I2C_ENABLE) << P852_I2C4_SHIFT);
+}
+
+void __init p852_sku9_c00_init(void)
+{
+ p852_sku_peripherals |= P852_SKU_NOR_ENABLE;
+
+ p852_sku9_c00_spi_init();
+ p852_sku9_c00_i2s_init();
+ p852_sku9_c00_uart_init();
+ p852_sku9_c00_sdhci_init();
+ p852_sku9_c00_display_init();
+ p852_sku9_c00_ulpi_init();
+ p852_sku9_c00_i2c_init();
+
+ p852_common_init();
+}
diff --git a/arch/arm/mach-tegra/p852/board-p852.c b/arch/arm/mach-tegra/p852/board-p852.c
new file mode 100644
index 000000000000..44d86bee50e1
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852.c
@@ -0,0 +1,765 @@
+/*
+ * arch/arm/mach-tegra/board-p852.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "board-p852.h"
+#include <mach/spdif.h>
+
+unsigned int p852_sku_peripherals;
+unsigned int p852_spi_peripherals;
+unsigned int p852_i2s_peripherals;
+unsigned int p852_uart_peripherals;
+unsigned int p852_i2c_peripherals;
+unsigned int p852_sdhci_peripherals;
+unsigned int p852_display_peripherals;
+
+/* If enable_usb3 can have two options ehci3=eth or usb*/
+static char enable_usb3[4];
+
+int __init parse_enable_usb3(char *arg)
+{
+ if (!arg)
+ return 0;
+
+ strncpy(enable_usb3, arg, sizeof(enable_usb3));
+ return 0;
+}
+
+early_param("ehci3", parse_enable_usb3);
+
+static __initdata struct tegra_clk_init_table p852_clk_init_table[] = {
+ /* name parent rate enabled */
+ {"uarta", "pll_p", 216000000, true},
+ {"uartb", "pll_p", 216000000, true},
+ {"uartc", "pll_p", 216000000, true},
+ {"uartd", "pll_p", 216000000, true},
+ {"pll_m", "clk_m", 600000000, true},
+ {"pll_m_out1", "pll_m", 240000000, true},
+ {"pll_p_out4", "pll_p", 240000000, true},
+ {"host1x", "pll_p", 166000000, true},
+ {"disp1", "pll_p", 216000000, true},
+ {"vi", "pll_m", 100000000, true},
+ {"csus", "pll_m", 100000000, true},
+ {"emc", "pll_m", 600000000, true},
+ {"pll_c", "clk_m", 600000000, true},
+ {"pll_c_out1", "pll_c", 240000000, true},
+ {"pwm", "clk_32k", 32768, false},
+ {"clk_32k", NULL, 32768, true},
+ {"pll_a", NULL, 56448000, true},
+ {"pll_a_out0", "pll_a", 11289600, true},
+ {"audio", "pll_a_out0", 11289600, true},
+ {"audio_2x", "audio", 22579200, false},
+ {"vde", "pll_c", 240000000, false},
+ {"vi_sensor", "pll_m", 111000000, true},
+ {"epp", "pll_m", 111000000, true},
+ {"mpe", "pll_m", 111000000, true},
+ {"i2s1", "pll_a_out0", 11289600, true},
+ {"i2s2", "pll_a_out0", 11289600, true},
+ {"ndflash", "pll_p", 86500000, true},
+ {"sbc1", "pll_p", 12000000, false},
+ {"spdif_in", "pll_m", 22579000, true},
+ {"spdif_out", "pll_a_out0", 5644800, true},
+ {"sbc2", "pll_p", 12000000, false},
+ {"sbc3", "pll_p", 12000000, false},
+ {"sbc4", "pll_p", 12000000, false},
+ {"nor", "pll_p", 86500000, true},
+ {NULL, NULL, 0, 0},
+};
+
+static struct tegra_nand_chip_parms nand_chip_parms[] = {
+ /* Micron 29F4G08ABADA */
+ [0] = {
+ .vendor_id = 0x2C,
+ .device_id = 0xDC,
+ .capacity = 512,
+ .read_id_fourth_byte = 0x95,
+ .timing = {
+ .trp = 1,
+ .trh = 1,
+ .twp = 12,
+ .twh = 12,
+ .tcs = 24,
+ .twhr = 58,
+ .tcr_tar_trr = 12,
+ .twb = 116,
+ .trp_resp = 24,
+ .tadl = 24,
+ },
+ },
+ /* Micron 29F4G16ABADA */
+ [1] = {
+ .vendor_id = 0x2C,
+ .device_id = 0xCC,
+ .capacity = 512,
+ .read_id_fourth_byte = 0xD5,
+ .timing = {
+ .trp = 10,
+ .trh = 7,
+ .twp = 10,
+ .twh = 7,
+ .tcs = 15,
+ .twhr = 60,
+ .tcr_tar_trr = 20,
+ .twb = 100,
+ .trp_resp = 20,
+ .tadl = 70,
+ },
+ },
+ /* Hynix HY27UF084G2B */
+ [2] = {
+ .vendor_id = 0xAD,
+ .device_id = 0xDC,
+ .read_id_fourth_byte = 0x95,
+ .capacity = 512,
+ .timing = {
+ .trp = 12,
+ .trh = 1,
+ .twp = 12,
+ .twh = 0,
+ .tcs = 24,
+ .twhr = 58,
+ .tcr_tar_trr = 0,
+ .twb = 116,
+ .trp_resp = 24,
+ .tadl = 24,
+ },
+ },
+};
+
+struct tegra_nand_platform p852_nand_data = {
+ .max_chips = 8,
+ .chip_parms = nand_chip_parms,
+ .nr_chip_parms = ARRAY_SIZE(nand_chip_parms),
+ .wp_gpio = TEGRA_GPIO_PC7,
+};
+
+static struct resource resources_nand[] = {
+ [0] = {
+ .start = INT_NANDFLASH,
+ .end = INT_NANDFLASH,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device p852_nand_device = {
+ .name = "tegra_nand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(resources_nand),
+ .resource = resources_nand,
+ .dev = {
+ .platform_data = &p852_nand_data,
+ },
+};
+
+unsigned int p852_uart_irqs[] = {
+ INT_UARTA,
+ INT_UARTB,
+ INT_UARTC,
+ INT_UARTD,
+};
+
+unsigned int p852_uart_bases[] = {
+ TEGRA_UARTA_BASE,
+ TEGRA_UARTB_BASE,
+ TEGRA_UARTC_BASE,
+ TEGRA_UARTD_BASE,
+};
+
+static struct platform_device *p852_spi_devices[] __initdata = {
+ &tegra_spi_device1,
+ &tegra_spi_device2,
+ &tegra_spi_device3,
+ &tegra_spi_device4,
+};
+
+static struct plat_serial8250_port debug_uart_platform_data[] = {
+ {
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = 216000000,
+ },
+ {
+ .flags = 0,
+ }
+};
+
+#define DEF_8250_PLATFORM_DATA(_base, _irq) { \
+ .flags = UPF_BOOT_AUTOCONF, \
+ .iotype = UPIO_MEM, \
+ .membase = IO_ADDRESS(_base), \
+ .mapbase = _base, \
+ .irq = _irq, \
+ .regshift = 2, \
+ .uartclk = 216000000, \
+}
+
+static struct plat_serial8250_port tegra_8250_uarta_platform_data[] = {
+ DEF_8250_PLATFORM_DATA(TEGRA_UARTA_BASE, INT_UARTA),
+ {
+ .flags = 0,
+ }
+};
+
+static struct plat_serial8250_port tegra_8250_uartb_platform_data[] = {
+ DEF_8250_PLATFORM_DATA(TEGRA_UARTB_BASE, INT_UARTB),
+ {
+ .flags = 0,
+ }
+};
+
+static struct plat_serial8250_port tegra_8250_uartc_platform_data[] = {
+ DEF_8250_PLATFORM_DATA(TEGRA_UARTC_BASE, INT_UARTC),
+ {
+ .flags = 0,
+ }
+};
+
+static struct plat_serial8250_port tegra_8250_uartd_platform_data[] = {
+ DEF_8250_PLATFORM_DATA(TEGRA_UARTD_BASE, INT_UARTD),
+ {
+ .flags = 0,
+ }
+};
+
+static struct plat_serial8250_port tegra_8250_uarte_platform_data[] = {
+ DEF_8250_PLATFORM_DATA(TEGRA_UARTE_BASE, INT_UARTE),
+ {
+ .flags = 0,
+ }
+};
+
+struct platform_device tegra_8250_uarta_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = tegra_8250_uarta_platform_data,
+ },
+};
+
+struct platform_device tegra_8250_uartb_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM1,
+ .dev = {
+ .platform_data = tegra_8250_uartb_platform_data,
+ },
+};
+
+struct platform_device tegra_8250_uartc_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM2,
+ .dev = {
+ .platform_data = tegra_8250_uartc_platform_data,
+ },
+};
+
+struct platform_device tegra_8250_uartd_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_FOURPORT,
+ .dev = {
+ .platform_data = tegra_8250_uartd_platform_data,
+ },
+};
+
+struct platform_device tegra_8250_uarte_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_ACCENT,
+ .dev = {
+ .platform_data = tegra_8250_uarte_platform_data,
+ },
+};
+
+static struct platform_device debug_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uart_platform_data,
+ },
+};
+
+static struct tegra_utmip_config utmi_phy_config[] = {
+ [0] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+ [1] = {
+ .hssync_start_delay = 0,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+};
+
+static struct tegra_ulpi_config ulpi_usb2_config = {
+ .reset_gpio = TEGRA_GPIO_PI5,
+};
+
+static struct tegra_ehci_platform_data tegra_ehci_pdata[] = {
+ [0] = {
+ .phy_config = &utmi_phy_config[0],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 0,
+ },
+ [1] = {
+ .phy_config = &ulpi_usb2_config,
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 0,
+ .phy_type = TEGRA_USB_PHY_TYPE_LINK_ULPI,
+ },
+ [2] = {
+ .phy_config = &utmi_phy_config[1],
+ .operating_mode = TEGRA_USB_HOST,
+ .power_down_on_bus_suspend = 0,
+ },
+};
+
+static void p852_usb_gpio_config(void)
+{
+ unsigned int usbeth_mux_gpio = 0, usb_ena_val;
+ unsigned int has_onboard_ethernet = 0;
+ unsigned int p852_eth_reset = TEGRA_GPIO_PD3;
+
+ switch (system_rev) {
+ case P852_SKU13_B00:
+ case P852_SKU23_B00:
+ case P852_SKU23_C01:
+ case P852_SKU8_B00:
+ case P852_SKU8_C01:
+ case P852_SKU9_B00:
+ case P852_SKU9_C01:
+ {
+ usbeth_mux_gpio = TEGRA_GPIO_PS3;
+ has_onboard_ethernet = 1;
+ usb_ena_val = 1;
+ }
+ break;
+ case P852_SKU5_B00:
+ case P852_SKU5_C01:
+ {
+ usb_ena_val = 1;
+ has_onboard_ethernet = 0;
+ }
+ break;
+ case P852_SKU1:
+ {
+ has_onboard_ethernet = 0;
+ usb_ena_val = 0;
+ strncpy(enable_usb3, "usb", sizeof(enable_usb3));
+ }
+ break;
+ case P852_SKU1_B00:
+ case P852_SKU1_C0X:
+ {
+ has_onboard_ethernet = 0;
+ usb_ena_val = 1;
+ strncpy(enable_usb3, "usb", sizeof(enable_usb3));
+ }
+ break;
+ default:
+ {
+ usbeth_mux_gpio = TEGRA_GPIO_PD4;
+ has_onboard_ethernet = 1;
+ usb_ena_val = 0;
+ }
+ }
+
+ if (has_onboard_ethernet) {
+ gpio_request_one(usbeth_mux_gpio, GPIOF_OUT_INIT_LOW,
+ "eth_ena");
+ tegra_gpio_enable(usbeth_mux_gpio);
+
+ /* eth reset */
+ gpio_request_one(p852_eth_reset, GPIOF_OUT_INIT_LOW,
+ "eth_reset");
+ tegra_gpio_enable(p852_eth_reset);
+ udelay(1);
+ gpio_direction_output(p852_eth_reset, 1);
+
+ if (!strcmp(enable_usb3, "eth"))
+ gpio_direction_output(usbeth_mux_gpio, 1);
+
+ /* exporting usbeth_mux_gpio */
+ gpio_export(usbeth_mux_gpio, true);
+ }
+
+ if (!strcmp(enable_usb3, "usb")) {
+ gpio_direction_output(TEGRA_GPIO_PB2, usb_ena_val);
+ gpio_direction_output(TEGRA_GPIO_PW1, usb_ena_val);
+ }
+}
+
+static struct platform_device *p852_uart_devices[] __initdata = {
+ &tegra_uarta_device,
+ &tegra_uartb_device,
+ &tegra_uartc_device,
+ &tegra_uartd_device,
+};
+
+static struct platform_device *p852_8250_uart_devices[] __initdata = {
+ &tegra_8250_uarta_device,
+ &tegra_8250_uartb_device,
+ &tegra_8250_uartc_device,
+ &tegra_8250_uartd_device,
+ &tegra_8250_uarte_device,
+};
+
+static struct platform_device tegra_itu656 = {
+ .name = "tegra_itu656",
+ .id = -1,
+};
+
+static struct platform_device *p852_devices[] __initdata = {
+ &tegra_gart_device,
+ &tegra_avp_device,
+ &tegra_itu656,
+};
+
+static struct tegra_nor_platform_data p852_nor_data = {
+ .flash = {
+ .map_name = "cfi_probe",
+ .width = 2,
+ },
+ .chip_parms = {
+ /* FIXME: use characterized clock freq */
+ .timing_default = {
+ .timing0 = 0xA0300243,
+ .timing1 = 0x00040406,
+ },
+ .timing_read = {
+ .timing0 = 0xA0300243,
+ .timing1 = 0x00000A00,
+ },
+ },
+};
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+struct spi_board_info tegra_spi_i2s_device __initdata = {
+ .modalias = "spi_i2s_pcm",
+ .bus_num = 2,
+ .chip_select = 2,
+ .mode = SPI_MODE_0,
+ .max_speed_hz = 18000000,
+ .platform_data = NULL,
+ .irq = 0,
+};
+
+void __init p852_spi_i2s_init(void)
+{
+ struct tegra_spi_i2s_platform_data *pdata;
+
+ pdata = (struct tegra_spi_i2s_platform_data *)
+ tegra_spi_i2s_device.platform_data;
+ if (pdata->gpio_i2s.active_state) {
+ gpio_request_one(pdata->gpio_i2s.gpio_no, GPIOF_OUT_INIT_LOW,
+ "i2s_cpld_dir1");
+ } else {
+ gpio_request_one(pdata->gpio_i2s.gpio_no, GPIOF_OUT_INIT_HIGH,
+ "i2s_cpld_dir1");
+ }
+ tegra_gpio_enable(pdata->gpio_i2s.gpio_no);
+ if (pdata->gpio_spi.active_state) {
+ gpio_request_one(pdata->gpio_spi.gpio_no, GPIOF_OUT_INIT_LOW,
+ "spi_cpld_dir2");
+ } else {
+ gpio_request_one(pdata->gpio_spi.gpio_no, GPIOF_OUT_INIT_HIGH,
+ "spi_cpld_dir2");
+ }
+
+ tegra_gpio_enable(pdata->gpio_spi.gpio_no);
+ spi_register_board_info(&tegra_spi_i2s_device, 1);
+}
+#endif
+
+#if defined(CONFIG_SPI_TEGRA) && defined(CONFIG_SPI_SPIDEV)
+static struct spi_board_info tegra_spi_devices[] __initdata = {
+ {
+ .modalias = "spidev",
+ .bus_num = 0,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ .max_speed_hz = 18000000,
+ .platform_data = NULL,
+ .irq = 0,
+ },
+ {
+ .modalias = "spidev",
+ .bus_num = 1,
+ .chip_select = 1,
+ .mode = SPI_MODE_0,
+ .max_speed_hz = 18000000,
+ .platform_data = NULL,
+ .irq = 0,
+ },
+ {
+ .modalias = "spidev",
+ .bus_num = 3,
+ .chip_select = 1,
+ .mode = SPI_MODE_0,
+ .max_speed_hz = 18000000,
+ .platform_data = NULL,
+ .irq = 0,
+ },
+};
+
+static void __init p852_register_spidev(void)
+{
+ spi_register_board_info(tegra_spi_devices,
+ ARRAY_SIZE(tegra_spi_devices));
+}
+#else
+#define p852_register_spidev() do {} while (0)
+#endif
+
+static void __init p852_usb_init(void)
+{
+
+ p852_usb_gpio_config();
+ /*
+ if (system_rev == P852_SKU8)
+ {
+ platform_device_register(&tegra_udc_device);
+ }
+ else
+ */
+ {
+ tegra_ehci1_device.dev.platform_data = &tegra_ehci_pdata[0];
+ platform_device_register(&tegra_ehci1_device);
+ }
+
+ if (!(p852_sku_peripherals & P852_SKU_ULPI_DISABLE)) {
+ tegra_ehci2_device.dev.platform_data = &tegra_ehci_pdata[1];
+ platform_device_register(&tegra_ehci2_device);
+ }
+
+ tegra_ehci3_device.dev.platform_data = &tegra_ehci_pdata[2];
+ platform_device_register(&tegra_ehci3_device);
+}
+
+static void __init spi3_pingroup_clear_tristate(void)
+{
+ /* spi3 mosi, miso, cs, clk */
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_LSDI, TEGRA_TRI_NORMAL);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_LSDA, TEGRA_TRI_NORMAL);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_LCSN, TEGRA_TRI_NORMAL);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_LSCK, TEGRA_TRI_NORMAL);
+}
+
+static void __init p852_spi_init(void)
+{
+ if (p852_sku_peripherals & P852_SKU_SPI_ENABLE) {
+ int i = 0;
+ unsigned int spi_config = 0;
+ unsigned int spi3_config =
+ (p852_spi_peripherals >> P852_SPI3_SHIFT) & P852_SPI_MASK;
+
+ for (i = 0; i < P852_MAX_SPI; i++) {
+ spi_config =
+ (p852_spi_peripherals >> (P852_SPI_SHIFT * i)) &
+ P852_SPI_MASK;
+ if (spi_config & P852_SPI_ENABLE) {
+ if (spi_config & P852_SPI_SLAVE)
+ p852_spi_devices[i]->name =
+ "tegra_spi_slave";
+ platform_device_register(p852_spi_devices[i]);
+ }
+ }
+ /* Default spi3 pingroups are in tristate */
+ if (spi3_config & P852_SPI_ENABLE)
+ spi3_pingroup_clear_tristate();
+ }
+}
+
+static void __init p852_uart_init(void)
+{
+ if (p852_sku_peripherals & P852_SKU_UART_ENABLE) {
+ int i = 0;
+ unsigned int uart_config = 0, uart8250Id = 0;
+ int debug_console = -1;
+
+ /* register the debug console as the first serial console */
+ for (i = 0; i < P852_MAX_UART; i++) {
+ uart_config =
+ (p852_uart_peripherals >> (P852_UART_SHIFT * i));
+ if (uart_config & P852_UART_DB) {
+ debug_console = i;
+ debug_uart_platform_data[0].membase =
+ IO_ADDRESS(p852_uart_bases[i]);
+ debug_uart_platform_data[0].mapbase =
+ p852_uart_bases[i];
+ debug_uart_platform_data[0].irq =
+ p852_uart_irqs[i];
+ uart8250Id++;
+ platform_device_register(&debug_uart);
+ break;
+ }
+ }
+
+ /* register remaining UARTS */
+ for (i = 0; i < P852_MAX_UART; i++) {
+ uart_config =
+ (p852_uart_peripherals >> (P852_UART_SHIFT * i)) &
+ P852_UART_MASK;
+ if ((uart_config & P852_UART_ENABLE)
+ && i != debug_console) {
+ if (uart_config & P852_UART_HS) {
+ platform_device_register
+ (p852_uart_devices[i]);
+ } else {
+ p852_8250_uart_devices[i]->id =
+ uart8250Id++;
+ platform_device_register
+ (p852_8250_uart_devices[i]);
+ }
+ }
+ }
+ }
+}
+
+static struct platform_device generic_codec_driver = {
+ .name = "generic-dit",
+};
+
+static void __init p852_flash_init(void)
+{
+ if (p852_sku_peripherals & P852_SKU_NAND_ENABLE)
+ platform_device_register(&p852_nand_device);
+
+ if (p852_sku_peripherals & P852_SKU_NOR_ENABLE) {
+ tegra_nor_device.resource[2].end = TEGRA_NOR_FLASH_BASE + SZ_64M - 1;
+ tegra_nor_device.dev.platform_data = &p852_nor_data;
+ platform_device_register(&tegra_nor_device);
+ }
+}
+
+void __init p852_common_init(void)
+{
+ tegra_clk_init_from_table(p852_clk_init_table);
+
+ p852_pinmux_init();
+
+ p852_i2c_init();
+
+ p852_regulator_init();
+
+ p852_uart_init();
+
+ p852_flash_init();
+
+ platform_add_devices(p852_devices, ARRAY_SIZE(p852_devices));
+
+ //p852_panel_init();
+
+ p852_spi_init();
+
+ p852_register_spidev();
+
+ p852_usb_init();
+
+ p852_sdhci_init();
+
+ p852_gpio_init();
+
+ p852_power_off_init();
+}
+
+void __init tegra_p852_init(void)
+{
+ switch (system_rev) {
+ case P852_SKU3:
+ p852_sku3_init();
+ break;
+ case P852_SKU13:
+ p852_sku13_init();
+ break;
+ case P852_SKU13_B00:
+ case P852_SKU13_C01:
+ p852_sku13_b00_init();
+ break;
+ case P852_SKU23:
+ p852_sku23_init();
+ break;
+ case P852_SKU23_B00:
+ p852_sku23_b00_init();
+ break;
+ case P852_SKU23_C01:
+ p852_sku23_c01_init();
+ break;
+ case P852_SKU1:
+ p852_sku1_init();
+ break;
+ case P852_SKU11:
+ case P852_SKU1_B00:
+ p852_sku1_b00_init();
+ break;
+ case P852_SKU1_C0X:
+ p852_sku1_c0x_init();
+ break;
+ case P852_SKU5_B00:
+ p852_sku5_b00_init();
+ break;
+ case P852_SKU5_C01:
+ p852_sku5_c01_init();
+ break;
+ case P852_SKU8_B00:
+ p852_sku8_b00_init();
+ break;
+ case P852_SKU8_C01:
+ p852_sku8_c00_init();
+ break;
+ case P852_SKU9_B00:
+ p852_sku9_b00_init();
+ break;
+ case P852_SKU9_C01:
+ p852_sku9_c00_init();
+ break;
+ default:
+ printk(KERN_ERR "Unknow Board Revision\n");
+ break;
+ }
+}
+
+static void __init tegra_p852_reserve(void)
+{
+ switch (system_rev) {
+ case P852_SKU3:
+ case P852_SKU5_B00:
+ case P852_SKU5_C01:
+ case P852_SKU9_B00:
+ case P852_SKU9_C01:
+ tegra_reserve(SZ_64M + SZ_16M, SZ_8M, 0);
+ break;
+ default:
+ tegra_reserve(SZ_128M, SZ_8M, 0);
+ break;
+ }
+}
+
+MACHINE_START(P852, "Tegra P852")
+ .boot_params = 0x00000100,
+ .map_io = tegra_map_common_io,
+ .reserve = tegra_p852_reserve,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_p852_init,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/p852/board-p852.h b/arch/arm/mach-tegra/p852/board-p852.h
new file mode 100644
index 000000000000..6be80ca14d8f
--- /dev/null
+++ b/arch/arm/mach-tegra/p852/board-p852.h
@@ -0,0 +1,301 @@
+/*
+ * arch/arm/mach-tegra/board-p852.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_BOARD_P852M_H
+#define _MACH_TEGRA_BOARD_P852M_H
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/clk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c-tegra.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/platform_data/tegra_nor.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/flash.h>
+
+#include <mach/sdhci.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/nand.h>
+#include <mach/usb_phy.h>
+#include <mach/clk.h>
+#include <mach/i2s.h>
+#include <mach/audio.h>
+
+#include "../clock.h"
+#include "../board.h"
+#include "../pm.h"
+#include "../devices.h"
+#include "../gpio-names.h"
+#include "../wakeups-t2.h"
+
+
+#define P852_SKU3 0x030000UL
+#define P852_SKU13 0x130000UL
+#define P852_SKU13_B00 0x130200UL
+#define P852_SKU13_C01 0x130401UL
+#define P852_SKU23 0x230000UL
+#define P852_SKU23_B00 0x230200UL
+#define P852_SKU23_C01 0x230401UL
+#define P852_SKU1 0x010000UL
+#define P852_SKU1_B00 0x010200UL
+#define P852_SKU1_C0X 0x010400UL
+#define P852_SKU11 0x110000UL
+#define P852_SKU5_B00 0x040200UL
+#define P852_SKU5_C01 0x050401UL
+#define P852_SKU8_B00 0x080200UL
+#define P852_SKU8_C01 0x080401UL
+#define P852_SKU9_B00 0x090200UL
+#define P852_SKU9_C01 0x090401UL
+
+int p852_regulator_init(void);
+int p852_panel_init(void);
+void p852_sdhci_init(void);
+void p852_i2c_init(void);
+void p852_i2c_set_default_clock(int adapter, unsigned long clock);
+void p852_pinmux_init(void);
+void p852_gpio_init(void);
+void p852_power_off_init(void);
+
+void p852_sku1_init(void);
+void p852_sku1_b00_init(void);
+void p852_sku1_c0x_init(void);
+void p852_sku3_init(void);
+void p852_sku5_b00_init(void);
+void p852_sku5_c01_init(void);
+void p852_sku8_b00_init(void);
+void p852_sku8_c00_init(void);
+void p852_sku9_b00_init(void);
+void p852_sku9_c00_init(void);
+void p852_sku13_init(void);
+void p852_sku13_b00_init(void);
+void p852_sku23_init(void);
+void p852_sku23_b00_init(void);
+void p852_sku23_c01_init(void);
+
+#ifndef CONFIG_P852_SKU1
+void p852_sku1_init(void);
+#endif
+#ifndef CONFIG_P852_SKU1_B00
+void p852_sku1_b00_init(void);
+#endif
+#ifndef CONFIG_P852_SKU1_C0x
+void p852_sku1_c0x_init(void);
+#endif
+#ifndef CONFIG_P852_SKU3
+void p852_sku3_init(void);
+#endif
+#ifndef CONFIG_P852_SKU5_B00
+void p852_sku5_b00_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU5_C01
+void p852_sku5_c01_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU8_B00
+void p852_sku8_b00_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU8_C01
+void p852_sku8_c00_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU9_B00
+void p852_sku9_b00_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU9_C01
+void p852_sku9_c00_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU13
+void p852_sku13_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU13_B00
+void p852_sku13_b00_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU23
+void p852_sku23_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU23_B00
+void p852_sku23_b00_init(void){};
+#endif
+#ifndef CONFIG_P852_SKU23_C01
+void p852_sku23_c01_init(void){};
+#endif
+
+extern unsigned int system_rev;
+extern unsigned int p852_sku_peripherals;
+extern unsigned int p852_spi_peripherals;
+extern unsigned int p852_i2s_peripherals;
+extern unsigned int p852_uart_peripherals;
+extern unsigned int p852_sdhci_peripherals;
+extern unsigned int p852_display_peripherals;
+extern unsigned int p852_i2c_peripherals;
+extern struct tegra_sdhci_platform_data p852_sdhci_platform_data[];
+extern struct platform_device tegra_8250_uarta_device;
+extern struct platform_device tegra_8250_uartb_device;
+extern struct platform_device tegra_8250_uartc_device;
+extern struct platform_device tegra_8250_uartd_device;
+extern struct platform_device tegra_8250_uarte_device;
+
+#ifdef CONFIG_TEGRA_SPI_I2S
+extern void p852_spi_i2s_init(void);
+extern struct spi_board_info tegra_spi_i2s_device;
+#endif
+
+void tegra_p852_fixup(struct machine_desc *desc,
+ struct tag *tags, char **cmdline, struct meminfo *mi);
+
+void p852_common_init(void);
+
+#define P852_SDIO3_PINMUX_ENABLE 0x01
+
+#define P852_SKU_SPI_SHIFT 0x00
+#define P852_SKU_SPI_ENABLE (1 << P852_SKU_SPI_SHIFT)
+#define P852_SKU_SPI_MASK (1 << P852_SKU_SPI_SHIFT)
+
+#define P852_SKU_I2S_SHIFT 0x01
+#define P852_SKU_I2S_ENABLE (1 << P852_SKU_I2S_SHIFT)
+#define P852_SKU_I2S_MASK (1 << P852_SKU_I2S_SHIFT)
+
+#define P852_SKU_SDHCI_SHIFT 0x02
+#define P852_SKU_SDHCI_ENABLE (1 << P852_SKU_SDHCI_SHIFT)
+#define P852_SKU_SDHCI_MASK (1 << P852_SKU_SDHCI_SHIFT)
+
+#define P852_SKU_UART_SHIFT 0x03
+#define P852_SKU_UART_ENABLE (1 << P852_SKU_UART_SHIFT)
+#define P852_SKU_UART_MASK (1 << P852_SKU_UART_SHIFT)
+
+#define P852_SKU_NAND_SHIFT 0x04
+#define P852_SKU_NAND_ENABLE (1 << P852_SKU_NAND_SHIFT)
+#define P852_SKU_NAND_MASK (1 << P852_SKU_NAND_SHIFT)
+
+#define P852_SKU_NOR_SHIFT 0x05
+#define P852_SKU_NOR_ENABLE (1 << P852_SKU_NOR_SHIFT)
+#define P852_SKU_NOR_MASK (1 << P852_SKU_NOR_SHIFT)
+
+#define P852_SKU_DISPLAY_SHIFT 0x06
+#define P852_SKU_DISPLAY_ENABLE (1 << P852_SKU_DISPLAY_SHIFT)
+#define P852_SKU_DISPLAY_MASK (1 << P852_SKU_DISPLAY_SHIFT)
+
+#define P852_SKU_ULPI_SHIFT 0x07
+#define P852_SKU_ULPI_DISABLE (1 << P852_SKU_ULPI_SHIFT)
+
+#define P852_SKU_I2C_SHIFT 0x08
+#define P852_SKU_I2C_ENABLE (1 << P852_SKU_I2C_SHIFT)
+#define P852_SKU_I2C_MASK (1 << P852_SKU_I2C_SHIFT)
+
+#define P852_MAX_DISP 0x2
+#define P852_DISP_SHIFT 0x16
+#define P852_DISPA_SHIFT 0x0
+#define P852_DISPB_SHIFT 0x16
+
+#define P852_DISP_MASK 0x1
+#define P852_DISP_ENABLE 0x1
+#define P852_DISPA_MASK (P852_DISP_MASK << P852_DISPA_SHIFT)
+#define P852_DISPB_MASK (P852_DISP_MASK << P852_DISPB_SHIFT)
+
+#define P852_MAX_SPI 0x04
+#define P852_SPI_SHIFT 0x03
+#define P852_SPI1_SHIFT 0x00
+#define P852_SPI2_SHIFT 0x03
+#define P852_SPI3_SHIFT 0x06
+#define P852_SPI4_SHIFT 0x09
+
+#define P852_SPI_MASK 0x07
+#define P852_SPI1_MASK (P852_SPI_MASK << P852_SPI1_SHIFT)
+#define P852_SPI2_MASK (P852_SPI_MASK << P852_SPI2_SHIFT)
+#define P852_SPI3_MASK (P852_SPI_MASK << P852_SPI3_SHIFT)
+#define P852_SPI4_MASK (P852_SPI_MASK << P852_SPI4_SHIFT)
+
+#define P852_SPI_ENABLE 0x01
+#define P852_SPI_MASTER 0x02
+#define P852_SPI_SLAVE 0x04
+
+#define P852_I2S_SHIFT 0x05
+#define P852_I2S1_SHIFT 0x00
+#define P852_I2S2_SHIFT 0x05
+
+#define P852_I2S_MASK 0x1F
+#define P852_I2S1_MASK (P852_I2S_MASK << P852_I2S1_SHIFT)
+#define P852_I2S2_MASK (P852_I2S_MASK << P852_I2S2_SHIFT)
+
+#define P852_I2S_ENABLE 0x10
+#define P852_I2S_TDM 0x08
+#define P852_MAX_SDHCI 0x04
+#define P852_SDHCI_SHIFT 0x04
+#define P852_SDHCI1_SHIFT 0x00
+#define P852_SDHCI2_SHIFT 0x04
+#define P852_SDHCI3_SHIFT 0x08
+#define P852_SDHCI4_SHIFT 0x0C
+
+#define P852_SDHCI_MASK 0x0F
+#define P852_SDHCI1_MASK (P852_SDHCI_MASK << P852_SDHCI1_SHIFT)
+#define P852_SDHCI2_MASK (P852_SDHCI_MASK << P852_SDHCI2_SHIFT)
+#define P852_SDHCI3_MASK (P852_SDHCI_MASK << P852_SDHCI3_SHIFT)
+#define P852_SDHCI4_MASK (P852_SDHCI_MASK << P852_SDHCI4_SHIFT)
+
+#define P852_SDHCI_ENABLE 0x01
+#define P852_SDHCI_CD_EN 0x02
+#define P852_SDHCI_WP_EN 0x04
+#define P852_SDHCI_PW_EN 0x08
+
+#define P852_UART_SHIFT 0x04
+#define P852_UARTA_SHIFT 0x00
+#define P852_UARTB_SHIFT 0x04
+#define P852_UARTC_SHIFT 0x08
+#define P852_UARTD_SHIFT 0x0C
+
+#define P852_UART_MASK 0x0F
+#define P852_UARTA_MASK (P852_UART_MASK << P852_UARTA_SHIFT)
+#define P852_UARTB_MASK (P852_UART_MASK << P852_UARTB_SHIFT)
+#define P852_UARTC_MASK (P852_UART_MASK << P852_UARTC_SHIFT)
+#define P852_UARTD_MASK (P852_UART_MASK << P852_UARTD_SHIFT)
+
+#define P852_MAX_UART 0x4
+#define P852_UART_ALT_PIN_CFG 0x8
+#define P852_UART_ENABLE 0x4
+#define P852_UART_DB 0x1
+#define P852_UART_HS 0x2
+
+#define P852_MAX_I2C 0x4
+#define P852_I2C_SHIFT 0x01
+#define P852_I2C1_SHIFT 0x00
+#define P852_I2C2_SHIFT 0x01
+#define P852_I2C3_SHIFT 0x02
+#define P852_I2C4_SHIFT 0x03
+
+
+#define P852_I2C_MASK 0x01
+#define P852_I2C1_MASK (P852_I2C_MASK << P852_I2C1_SHIFT)
+#define P852_I2C2_MASK (P852_I2C_MASK << P852_I2C2_SHIFT)
+#define P852_I2C3_MASK (P852_I2C_MASK << P852_I2C3_SHIFT)
+#define P852_I2C4_MASK (P852_I2C_MASK << P852_I2C4_SHIFT)
+
+#define P852_I2C_ENABLE 0x01
+
+#endif
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c
index f1f699d86c32..c665220f5516 100644
--- a/arch/arm/mach-tegra/pcie.c
+++ b/arch/arm/mach-tegra/pcie.c
@@ -1,13 +1,13 @@
/*
- * arch/arm/mach-tegra/pci.c
+ * arch/arm/mach-tegra/pcie.c
*
- * PCIe host controller driver for TEGRA(2) SOCs
+ * PCIe host controller driver for TEGRA SOCs
*
* Copyright (c) 2010, CompuLab, Ltd.
* Author: Mike Rapoport <mike@compulab.co.il>
*
* Based on NVIDIA PCIe driver
- * Copyright (c) 2008-2009, NVIDIA Corporation.
+ * Copyright (c) 2008-2011, NVIDIA Corporation.
*
* Bits taken from arch/arm/mach-dove/pcie.c
*
@@ -32,6 +32,9 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <asm/sizes.h>
#include <asm/mach/pci.h>
@@ -40,120 +43,172 @@
#include <mach/iomap.h>
#include <mach/clk.h>
#include <mach/powergate.h>
+#include <mach/pci.h>
-/* register definitions */
-#define AFI_OFFSET 0x3800
-#define PADS_OFFSET 0x3000
-#define RP0_OFFSET 0x0000
-#define RP1_OFFSET 0x1000
-
-#define AFI_AXI_BAR0_SZ 0x00
-#define AFI_AXI_BAR1_SZ 0x04
-#define AFI_AXI_BAR2_SZ 0x08
-#define AFI_AXI_BAR3_SZ 0x0c
-#define AFI_AXI_BAR4_SZ 0x10
-#define AFI_AXI_BAR5_SZ 0x14
-
-#define AFI_AXI_BAR0_START 0x18
-#define AFI_AXI_BAR1_START 0x1c
-#define AFI_AXI_BAR2_START 0x20
-#define AFI_AXI_BAR3_START 0x24
-#define AFI_AXI_BAR4_START 0x28
-#define AFI_AXI_BAR5_START 0x2c
-
-#define AFI_FPCI_BAR0 0x30
-#define AFI_FPCI_BAR1 0x34
-#define AFI_FPCI_BAR2 0x38
-#define AFI_FPCI_BAR3 0x3c
-#define AFI_FPCI_BAR4 0x40
-#define AFI_FPCI_BAR5 0x44
-
-#define AFI_CACHE_BAR0_SZ 0x48
-#define AFI_CACHE_BAR0_ST 0x4c
-#define AFI_CACHE_BAR1_SZ 0x50
-#define AFI_CACHE_BAR1_ST 0x54
-
-#define AFI_MSI_BAR_SZ 0x60
-#define AFI_MSI_FPCI_BAR_ST 0x64
-#define AFI_MSI_AXI_BAR_ST 0x68
-
-#define AFI_CONFIGURATION 0xac
-#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
-
-#define AFI_FPCI_ERROR_MASKS 0xb0
-
-#define AFI_INTR_MASK 0xb4
-#define AFI_INTR_MASK_INT_MASK (1 << 0)
-#define AFI_INTR_MASK_MSI_MASK (1 << 8)
-
-#define AFI_INTR_CODE 0xb8
-#define AFI_INTR_CODE_MASK 0xf
-#define AFI_INTR_MASTER_ABORT 4
-#define AFI_INTR_LEGACY 6
-
-#define AFI_INTR_SIGNATURE 0xbc
-#define AFI_SM_INTR_ENABLE 0xc4
-
-#define AFI_AFI_INTR_ENABLE 0xc8
-#define AFI_INTR_EN_INI_SLVERR (1 << 0)
-#define AFI_INTR_EN_INI_DECERR (1 << 1)
-#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
-#define AFI_INTR_EN_TGT_DECERR (1 << 3)
-#define AFI_INTR_EN_TGT_WRERR (1 << 4)
-#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
-#define AFI_INTR_EN_AXI_DECERR (1 << 6)
-#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
-
-#define AFI_PCIE_CONFIG 0x0f8
-#define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1)
-#define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2)
-#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
-#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
-#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
-
-#define AFI_FUSE 0x104
-#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
-
-#define AFI_PEX0_CTRL 0x110
-#define AFI_PEX1_CTRL 0x118
-#define AFI_PEX_CTRL_RST (1 << 0)
-#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
-
-#define RP_VEND_XP 0x00000F00
-#define RP_VEND_XP_DL_UP (1 << 30)
-
-#define RP_LINK_CONTROL_STATUS 0x00000090
-#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
-
-#define PADS_CTL_SEL 0x0000009C
-
-#define PADS_CTL 0x000000A0
-#define PADS_CTL_IDDQ_1L (1 << 0)
-#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
-#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
-
-#define PADS_PLL_CTL 0x000000B8
-#define PADS_PLL_CTL_RST_B4SM (1 << 1)
-#define PADS_PLL_CTL_LOCKDET (1 << 8)
-#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
-#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
-#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
-#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
-#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
-#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
-#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
+#define MSELECT_CONFIG_0_ENABLE_PCIE_APERTURE 5
-/* PMC access is required for PCIE xclk (un)clamping */
-#define PMC_SCRATCH42 0x144
-#define PMC_SCRATCH42_PCX_CLAMP (1 << 0)
+#define PINMUX_AUX_PEX_L0_RST_N_0 0x33bc
+#define PINMUX_AUX_PEX_L0_RST_N_0_E_INPUT 5
+#define PINMUX_AUX_PEX_L0_RST_N_0_E_INPUT_ENABLE 1
-static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+#define PINMUX_AUX_PEX_L1_RST_N_0 0x33cc
+#define PINMUX_AUX_PEX_L1_RST_N_0_E_INPUT 5
+#define PINMUX_AUX_PEX_L1_RST_N_0_E_INPUT_ENABLE 1
-#define pmc_writel(value, reg) \
- __raw_writel(value, (u32)reg_pmc_base + (reg))
-#define pmc_readl(reg) \
- __raw_readl((u32)reg_pmc_base + (reg))
+#define PINMUX_AUX_PEX_L2_RST_N_0 0x33d8
+#define PINMUX_AUX_PEX_L2_RST_N_0_E_INPUT 5
+#define PINMUX_AUX_PEX_L2_RST_N_0_E_INPUT_ENABLE 1
+#define AFI_PEX0_CTRL_0_PEX0_CLKREQ_EN 1
+#define NV_PCIE2_PADS_REFCLK_CFG1 0x000000cc
+#define APBDEV_PMC_SCRATCH42_0_PCX_CLAMP_MASK 0x1
+
+
+#define AFI_MSI_VEC0_0 0x6c
+#define AFI_MSI_VEC1_0 0x70
+#define AFI_MSI_VEC2_0 0x74
+#define AFI_MSI_VEC3_0 0x78
+#define AFI_MSI_VEC4_0 0x7c
+#define AFI_MSI_VEC5_0 0x80
+#define AFI_MSI_VEC6_0 0x84
+#define AFI_MSI_VEC7_0 0x88
+
+#define AFI_MSI_EN_VEC0_0 0x8c
+#define AFI_MSI_EN_VEC1_0 0x90
+#define AFI_MSI_EN_VEC2_0 0x94
+#define AFI_MSI_EN_VEC3_0 0x98
+#define AFI_MSI_EN_VEC4_0 0x9c
+#define AFI_MSI_EN_VEC5_0 0xa0
+#define AFI_MSI_EN_VEC6_0 0xa4
+#define AFI_MSI_EN_VEC7_0 0xa8
+
+#define AFI_MSI_FPCI_BAR_ST_0 0x64
+#define AFI_MSI_BAR_SZ_0 0x60
+#define AFI_MSI_AXI_BAR_ST_0 0x68
+#define AFI_INTR_MASK_0 0xb4
+#define AFI_INTR_MASK_0_INT_MASK 0
+#define AFI_INTR_MASK_0_MSI_MASK 8
+
+
+#define AFI_PEXBIAS_CTRL_0 0x168
+
+/* register definitions */
+#define AFI_OFFSET 0x3800
+#define PADS_OFFSET 0x3000
+#define RP0_OFFSET 0x0000
+#define RP1_OFFSET 0x1000
+#define RP2_OFFSET 0x4000
+
+#define AFI_AXI_BAR0_SZ 0x00
+#define AFI_AXI_BAR1_SZ 0x04
+#define AFI_AXI_BAR2_SZ 0x08
+#define AFI_AXI_BAR3_SZ 0x0c
+#define AFI_AXI_BAR4_SZ 0x10
+#define AFI_AXI_BAR5_SZ 0x14
+
+#define AFI_AXI_BAR0_START 0x18
+#define AFI_AXI_BAR1_START 0x1c
+#define AFI_AXI_BAR2_START 0x20
+#define AFI_AXI_BAR3_START 0x24
+#define AFI_AXI_BAR4_START 0x28
+#define AFI_AXI_BAR5_START 0x2c
+
+#define AFI_FPCI_BAR0 0x30
+#define AFI_FPCI_BAR1 0x34
+#define AFI_FPCI_BAR2 0x38
+#define AFI_FPCI_BAR3 0x3c
+#define AFI_FPCI_BAR4 0x40
+#define AFI_FPCI_BAR5 0x44
+
+#define AFI_CACHE_BAR0_SZ 0x48
+#define AFI_CACHE_BAR0_ST 0x4c
+#define AFI_CACHE_BAR1_SZ 0x50
+#define AFI_CACHE_BAR1_ST 0x54
+
+#define AFI_MSI_BAR_SZ 0x60
+#define AFI_MSI_FPCI_BAR_ST 0x64
+#define AFI_MSI_AXI_BAR_ST 0x68
+
+#define AFI_CONFIGURATION 0xac
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+
+#define AFI_FPCI_ERROR_MASKS 0xb0
+
+#define AFI_INTR_MASK 0xb4
+#define AFI_INTR_MASK_INT_MASK (1 << 0)
+#define AFI_INTR_MASK_MSI_MASK (1 << 8)
+
+#define AFI_INTR_CODE 0xb8
+#define AFI_INTR_CODE_MASK 0xf
+#define AFI_INTR_MASTER_ABORT 4
+#define AFI_INTR_LEGACY 6
+
+#define AFI_INTR_SIGNATURE 0xbc
+#define AFI_SM_INTR_ENABLE 0xc4
+
+#define AFI_AFI_INTR_ENABLE 0xc8
+#define AFI_INTR_EN_INI_SLVERR (1 << 0)
+#define AFI_INTR_EN_INI_DECERR (1 << 1)
+#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
+#define AFI_INTR_EN_TGT_DECERR (1 << 3)
+#define AFI_INTR_EN_TGT_WRERR (1 << 4)
+#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
+#define AFI_INTR_EN_AXI_DECERR (1 << 6)
+#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
+#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
+
+#define AFI_PCIE_CONFIG 0x0f8
+#define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1)
+#define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2)
+#define AFI_PCIE_CONFIG_PCIEC2_DISABLE_DEVICE (1 << 3)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
+
+#define AFI_FUSE 0x104
+#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
+
+#define AFI_PEX0_CTRL 0x110
+#define AFI_PEX1_CTRL 0x118
+#define AFI_PEX2_CTRL 0x128
+#define AFI_PEX_CTRL_RST (1 << 0)
+#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
+
+#define RP_VEND_XP 0x00000F00
+#define RP_VEND_XP_DL_UP (1 << 30)
+
+#define RP_LINK_CONTROL_STATUS 0x00000090
+#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
+
+#define PADS_CTL_SEL 0x0000009C
+
+#define PADS_CTL 0x000000A0
+#define PADS_CTL_IDDQ_1L (1 << 0)
+#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
+#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define PADS_PLL_CTL 0x000000B8
+#else
+#define PADS_PLL_CTL 0x000000B4
+#endif
+#define PADS_PLL_CTL_RST_B4SM (1 << 1)
+#define PADS_PLL_CTL_LOCKDET (1 << 8)
+#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
+#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
+#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
+#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
+
+/* PMC access is required for PCIE xclk (un)clamping */
+#define PMC_SCRATCH42 0x144
+#define PMC_SCRATCH42_PCX_CLAMP (1 << 0)
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
/*
* Tegra2 defines 1GB in the AXI address map for PCIe.
*
@@ -184,14 +239,61 @@ static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
#define MEM_SIZE_0 SZ_128M
#define MEM_BASE_1 (MEM_BASE_0 + MEM_SIZE_0)
#define MEM_SIZE_1 SZ_128M
+#define MEM_SIZE (MEM_SIZE_0 + MEM_SIZE_1)
#define PREFETCH_MEM_BASE_0 (MEM_BASE_1 + MEM_SIZE_1)
#define PREFETCH_MEM_SIZE_0 SZ_128M
#define PREFETCH_MEM_BASE_1 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0)
#define PREFETCH_MEM_SIZE_1 SZ_128M
+#define PREFETCH_MEM_SIZE (PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1)
+
+#else
-#define PCIE_CONF_BUS(b) ((b) << 16)
-#define PCIE_CONF_DEV(d) ((d) << 11)
-#define PCIE_CONF_FUNC(f) ((f) << 8)
+/*
+ * AXI address map for the PCIe aperture , defines 1GB in the AXI
+ * address map for PCIe.
+ *
+ * That address space is split into different regions, with sizes and
+ * offsets as follows. Exepct for the Register space, SW is free to slice the
+ * regions as it chooces.
+ *
+ * The split below seems to work fine for now.
+ *
+ * 0x0000_0000 to 0x00ff_ffff - Register space 16MB.
+ * 0x0100_0000 to 0x01ff_ffff - Config space 16MB.
+ * 0x0200_0000 to 0x02ff_ffff - Extended config space 16MB.
+ * 0x0300_0000 to 0x03ff_ffff - Downstream IO space
+ * ... Will be filled with other BARS like MSI/upstream IO etc.
+ * 0x1000_0000 to 0x1fff_ffff - non-prefetchable memory aperture
+ * 0x2000_0000 to 0x3fff_ffff - Prefetchable memory aperture
+ *
+ * Config and Extended config sizes are choosen to support
+ * maximum of 256 devices,
+ * which is good enough for all the current use cases.
+ *
+ */
+#define TEGRA_PCIE_BASE 0x00000000
+
+#define PCIE_REGS_SZ SZ_16M
+#define PCIE_CFG_OFF PCIE_REGS_SZ
+#define PCIE_CFG_SZ SZ_16M
+#define PCIE_EXT_CFG_OFF (PCIE_CFG_SZ + PCIE_CFG_OFF)
+#define PCIE_EXT_CFG_SZ SZ_16M
+/* During the boot only registers/config and extended config apertures are
+ * mapped. Rest are mapped on demand by the PCI device drivers.
+ */
+#define PCIE_IOMAP_SZ (PCIE_REGS_SZ + PCIE_CFG_SZ + PCIE_EXT_CFG_SZ)
+
+#define MMIO_BASE (TEGRA_PCIE_BASE + SZ_48M)
+#define MMIO_SIZE SZ_1M
+#define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M)
+#define MEM_SIZE SZ_256M
+#define PREFETCH_MEM_BASE_0 (MEM_BASE_0 + MEM_SIZE)
+#define PREFETCH_MEM_SIZE SZ_512M
+#endif
+
+#define PCIE_CONF_BUS(b) ((b) << 16)
+#define PCIE_CONF_DEV(d) ((d) << 11)
+#define PCIE_CONF_FUNC(f) ((f) << 8)
#define PCIE_CONF_REG(r) \
(((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF))
@@ -209,18 +311,32 @@ struct tegra_pcie_port {
};
struct tegra_pcie_info {
- struct tegra_pcie_port port[2];
+ struct tegra_pcie_port port[MAX_PCIE_SUPPORTED_PORTS];
int num_ports;
+ void __iomem *reg_clk_base;
void __iomem *regs;
struct resource res_mmio;
+ int power_rails_enabled;
+ int pcie_power_enabled;
- struct clk *pex_clk;
- struct clk *afi_clk;
+ struct regulator *regulator_hvdd;
+ struct regulator *regulator_pexio;
+ struct regulator *regulator_avdd_plle;
struct clk *pcie_xclk;
struct clk *pll_e;
+ struct clk *clk_cml0;
+ struct clk *clk_tera_pcie_cml;
+ struct tegra_pci_platform_data *plat_data;
};
+#define pmc_writel(value, reg) \
+ __raw_writel(value, (u32)reg_pmc_base + (reg))
+#define pmc_readl(reg) \
+ __raw_readl((u32)reg_pmc_base + (reg))
+
+static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+
static struct tegra_pcie_info tegra_pcie = {
.res_mmio = {
.name = "PCI IO",
@@ -304,6 +420,19 @@ static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
u32 mask;
u32 tmp;
+ /* pcie core is supposed to enable bus mastering and io/mem responses
+ * if its not setting then enable corresponding bits in pci_command
+ */
+ if (where == PCI_COMMAND) {
+ if (!(val & PCI_COMMAND_IO))
+ val |= PCI_COMMAND_IO;
+ if (!(val & PCI_COMMAND_MEMORY))
+ val |= PCI_COMMAND_MEMORY;
+ if (!(val & PCI_COMMAND_MASTER))
+ val |= PCI_COMMAND_MASTER;
+ if (!(val & PCI_COMMAND_SERR))
+ val |= PCI_COMMAND_SERR;
+ }
if (pp) {
if (devfn != 0)
@@ -359,8 +488,14 @@ static void __devinit tegra_pcie_fixup_class(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
}
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
+#else
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
+#endif
/* Tegra PCIE requires relaxed ordering */
static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev)
@@ -404,8 +539,11 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
pp->res[0].end = IO_SPACE_LIMIT;
}
pp->res[0].flags = IORESOURCE_IO;
- if (request_resource(&ioport_resource, &pp->res[0]))
- panic("Request PCIe IO resource failed\n");
+ if (request_resource(&ioport_resource, &pp->res[0])) {
+ pr_err("Request PCIe IO resource failed\n");
+ /* return failure */
+ return -EBUSY;
+ }
sys->resource[0] = &pp->res[0];
/*
@@ -415,16 +553,14 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
"PCIe %d MEM", pp->index);
pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0;
pp->res[1].name = pp->mem_space_name;
- if (pp->index == 0) {
- pp->res[1].start = MEM_BASE_0;
- pp->res[1].end = pp->res[1].start + MEM_SIZE_0 - 1;
- } else {
- pp->res[1].start = MEM_BASE_1;
- pp->res[1].end = pp->res[1].start + MEM_SIZE_1 - 1;
- }
+ pp->res[1].start = MEM_BASE_0;
+ pp->res[1].end = pp->res[1].start + MEM_SIZE - 1;
pp->res[1].flags = IORESOURCE_MEM;
- if (request_resource(&iomem_resource, &pp->res[1]))
- panic("Request PCIe Memory resource failed\n");
+ if (request_resource(&iomem_resource, &pp->res[1])) {
+ pr_err("Request PCIe Memory resource failed\n");
+ /* return failure */
+ return -EBUSY;
+ }
sys->resource[1] = &pp->res[1];
/*
@@ -434,18 +570,15 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
"PCIe %d PREFETCH MEM", pp->index);
pp->prefetch_space_name[sizeof(pp->prefetch_space_name) - 1] = 0;
pp->res[2].name = pp->prefetch_space_name;
- if (pp->index == 0) {
- pp->res[2].start = PREFETCH_MEM_BASE_0;
- pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_0 - 1;
- } else {
- pp->res[2].start = PREFETCH_MEM_BASE_1;
- pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_1 - 1;
- }
+ pp->res[2].start = PREFETCH_MEM_BASE_0;
+ pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE - 1;
pp->res[2].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (request_resource(&iomem_resource, &pp->res[2]))
- panic("Request PCIe Prefetch Memory resource failed\n");
+ if (request_resource(&iomem_resource, &pp->res[2])) {
+ pr_err("Request PCIe Prefetch Memory resource failed\n");
+ /* return failure */
+ return -EBUSY;
+ }
sys->resource[2] = &pp->res[2];
-
return 1;
}
@@ -469,7 +602,7 @@ static struct pci_bus __init *tegra_pcie_scan_bus(int nr,
}
static struct hw_pci tegra_pcie_hw __initdata = {
- .nr_controllers = 2,
+ .nr_controllers = MAX_PCIE_SUPPORTED_PORTS,
.setup = tegra_pcie_setup,
.scan = tegra_pcie_scan_bus,
.swizzle = pci_std_swizzle,
@@ -508,13 +641,17 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
* happen a lot during enumeration
*/
if (code == AFI_INTR_MASTER_ABORT)
- pr_debug("PCIE: %s, signature: %08x\n", err_msg[code], signature);
+ pr_debug("PCIE: %s, signature: %08x\n",
+ err_msg[code], signature);
else
pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature);
return IRQ_HANDLED;
}
+/*
+ * PCIe support functions
+ */
static void tegra_pcie_setup_translations(void)
{
u32 fpci_bar;
@@ -547,7 +684,7 @@ static void tegra_pcie_setup_translations(void)
/* Bar 3: prefetchable memory BAR */
fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1;
- size = PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1;
+ size = PREFETCH_MEM_SIZE;
axi_address = PREFETCH_MEM_BASE_0;
afi_writel(axi_address, AFI_AXI_BAR3_START);
afi_writel(size >> 12, AFI_AXI_BAR3_SZ);
@@ -555,7 +692,7 @@ static void tegra_pcie_setup_translations(void)
/* Bar 4: non prefetchable memory BAR */
fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1;
- size = MEM_SIZE_0 + MEM_SIZE_1;
+ size = MEM_SIZE;
axi_address = MEM_BASE_0;
afi_writel(axi_address, AFI_AXI_BAR4_START);
afi_writel(size >> 12, AFI_AXI_BAR4_SZ);
@@ -586,10 +723,21 @@ static void tegra_pcie_enable_controller(void)
{
u32 val, reg;
int i;
+ void __iomem *reg_apb_misc_base;
+ void __iomem *reg_mselect_base;
+ reg_apb_misc_base = IO_ADDRESS(TEGRA_APB_MISC_BASE);
+ reg_mselect_base = IO_ADDRESS(TEGRA_MSELECT_BASE);
+
+ /* select the PCIE APERTURE in MSELECT config */
+ reg = readl(reg_mselect_base);
+ reg |= 1 << MSELECT_CONFIG_0_ENABLE_PCIE_APERTURE;
+ writel(reg, reg_mselect_base);
/* Enable slot clock and pulse the reset signals */
- for (i = 0, reg = AFI_PEX0_CTRL; i < 2; i++, reg += 0x8) {
- val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN;
+ for (i = 0, reg = AFI_PEX0_CTRL; i < MAX_PCIE_SUPPORTED_PORTS;
+ i++, reg += (i*8)) {
+ val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN |
+ (1 << AFI_PEX0_CTRL_0_PEX0_CLKREQ_EN);
afi_writel(val, reg);
val &= ~AFI_PEX_CTRL_RST;
afi_writel(val, reg);
@@ -597,13 +745,19 @@ static void tegra_pcie_enable_controller(void)
val = afi_readl(reg) | AFI_PEX_CTRL_RST;
afi_writel(val, reg);
}
+ afi_writel(0, AFI_PEXBIAS_CTRL_0);
/* Enable dual controller and both ports */
val = afi_readl(AFI_PCIE_CONFIG);
val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE |
AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE |
+ AFI_PCIE_CONFIG_PCIEC2_DISABLE_DEVICE |
AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
+#else
+ val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
+#endif
afi_writel(val, AFI_PCIE_CONFIG);
val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS;
@@ -622,7 +776,12 @@ static void tegra_pcie_enable_controller(void)
*/
val = pads_readl(PADS_PLL_CTL);
val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10);
+#else
+ val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML |
+ PADS_PLL_CTL_TXCLKREF_BUF_EN);
+#endif
pads_writel(val, PADS_PLL_CTL);
/* take PLL out of reset */
@@ -634,6 +793,7 @@ static void tegra_pcie_enable_controller(void)
* This doesn't exist in the documentation
*/
pads_writel(0xfa5cfa5c, 0xc8);
+ pads_writel(0x0000FA5C, NV_PCIE2_PADS_REFCLK_CFG1);
/* Wait for the PLL to lock */
do {
@@ -658,7 +818,8 @@ static void tegra_pcie_enable_controller(void)
val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
- AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR);
+ AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR |
+ AFI_INTR_EN_PRSNT_SENSE);
afi_writel(val, AFI_AFI_INTR_ENABLE);
afi_writel(0xffffffff, AFI_SM_INTR_ENABLE);
@@ -683,93 +844,199 @@ static void tegra_pcie_xclk_clamp(bool clamp)
pmc_writel(reg, PMC_SCRATCH42);
}
-static void tegra_pcie_power_off(void)
+static int tegra_pci_enable_regulators(void)
{
- tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
- tegra_periph_reset_assert(tegra_pcie.afi_clk);
- tegra_periph_reset_assert(tegra_pcie.pex_clk);
+ if (tegra_pcie.power_rails_enabled)
+ return 0;
+ if (tegra_pcie.regulator_hvdd == NULL) {
+ printk(KERN_INFO "PCIE.C: %s : regulator hvdd_pex\n",
+ __func__);
+ tegra_pcie.regulator_hvdd =
+ regulator_get(NULL, "hvdd_pex");
+ if (IS_ERR_OR_NULL(tegra_pcie.regulator_hvdd)) {
+ pr_err("%s: unable to get hvdd_pex regulator\n",
+ __func__);
+ tegra_pcie.regulator_hvdd = 0;
+ }
+ }
+
+ if (tegra_pcie.regulator_pexio == NULL) {
+ printk(KERN_INFO "PCIE.C: %s : regulator pexio\n", __func__);
+ tegra_pcie.regulator_pexio =
+ regulator_get(NULL, "vdd_pexb");
+ if (IS_ERR_OR_NULL(tegra_pcie.regulator_pexio)) {
+ pr_err("%s: unable to get pexio regulator\n", __func__);
+ tegra_pcie.regulator_pexio = 0;
+ }
+ }
- tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
- tegra_pcie_xclk_clamp(true);
+ /*SATA and PCIE use same PLLE, In default configuration,
+ * and we set default AVDD_PLLE with SATA.
+ * So if use default board, you have to turn on (LDO2) AVDD_PLLE.
+ */
+ if (tegra_pcie.regulator_avdd_plle == NULL) {
+ printk(KERN_INFO "PCIE.C: %s : regulator avdd_plle\n",
+ __func__);
+ tegra_pcie.regulator_avdd_plle = regulator_get(NULL,
+ "avdd_plle");
+ if (IS_ERR_OR_NULL(tegra_pcie.regulator_avdd_plle)) {
+ pr_err("%s: unable to get avdd_plle regulator\n",
+ __func__);
+ tegra_pcie.regulator_avdd_plle = 0;
+ }
+ }
+ if (tegra_pcie.regulator_hvdd)
+ regulator_enable(tegra_pcie.regulator_hvdd);
+ if (tegra_pcie.regulator_pexio)
+ regulator_enable(tegra_pcie.regulator_pexio);
+ if (tegra_pcie.regulator_avdd_plle)
+ regulator_enable(tegra_pcie.regulator_avdd_plle);
+
+ tegra_pcie.power_rails_enabled = 1;
+
+ return 0;
}
-static int tegra_pcie_power_regate(void)
+static int tegra_pci_disable_regulators(void)
{
- int err;
-
- tegra_pcie_power_off();
+ int err = 0;
+ if (tegra_pcie.power_rails_enabled == 0)
+ goto err_exit;
+ if (tegra_pcie.regulator_hvdd)
+ err = regulator_disable(tegra_pcie.regulator_hvdd);
+ if (err)
+ goto err_exit;
+ if (tegra_pcie.regulator_pexio)
+ err = regulator_disable(tegra_pcie.regulator_pexio);
+ if (err)
+ goto err_exit;
+ if (tegra_pcie.regulator_avdd_plle)
+ err = regulator_disable(tegra_pcie.regulator_avdd_plle);
+ tegra_pcie.power_rails_enabled = 0;
+err_exit:
+ return err;
+}
- tegra_pcie_xclk_clamp(true);
+static int tegra_pcie_power_on(void)
+{
+ int err = 0;
+ if (tegra_pcie.pcie_power_enabled)
+ return 0;
+ err = tegra_pci_enable_regulators();
+ if (err)
+ goto err_exit;
+ err = tegra_unpowergate_partition_with_clk_on(TEGRA_POWERGATE_PCIE);
+ if (err)
+ goto err_exit;
+ if (tegra_pcie.clk_cml0)
+ clk_enable(tegra_pcie.clk_cml0);
+ if (tegra_pcie.clk_tera_pcie_cml)
+ clk_enable(tegra_pcie.clk_tera_pcie_cml);
+ if (tegra_pcie.pll_e)
+ clk_enable(tegra_pcie.pll_e);
+
+ tegra_pcie.pcie_power_enabled = 1;
+err_exit:
+ return err;
+}
- tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
- tegra_periph_reset_assert(tegra_pcie.afi_clk);
+static int tegra_pcie_power_off(void)
+{
+ int err = 0;
+ if (tegra_pcie.pcie_power_enabled == 0)
+ return 0;
+ err = tegra_powergate_partition_with_clk_off(TEGRA_POWERGATE_PCIE);
+ if (err)
+ goto err_exit;
+ if (tegra_pcie.clk_cml0)
+ clk_disable(tegra_pcie.clk_cml0);
+ if (tegra_pcie.clk_tera_pcie_cml)
+ clk_disable(tegra_pcie.clk_tera_pcie_cml);
+ if (tegra_pcie.pll_e)
+ clk_disable(tegra_pcie.pll_e);
+ err = tegra_pci_disable_regulators();
+
+ tegra_pcie.pcie_power_enabled = 0;
+err_exit:
+ return err;
+}
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
- tegra_pcie.pex_clk);
+static int tegra_pcie_power_regate(void)
+{
+ int err;
+ err = tegra_unpowergate_partition_with_clk_on(TEGRA_POWERGATE_PCIE);
if (err) {
pr_err("PCIE: powerup sequence failed: %d\n", err);
return err;
}
-
- tegra_periph_reset_deassert(tegra_pcie.afi_clk);
-
- tegra_pcie_xclk_clamp(false);
-
- clk_enable(tegra_pcie.afi_clk);
- clk_enable(tegra_pcie.pex_clk);
+ tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
return clk_enable(tegra_pcie.pll_e);
}
static int tegra_pcie_clocks_get(void)
{
- int err;
-
- tegra_pcie.pex_clk = clk_get(NULL, "pex");
- if (IS_ERR(tegra_pcie.pex_clk))
- return PTR_ERR(tegra_pcie.pex_clk);
-
- tegra_pcie.afi_clk = clk_get(NULL, "afi");
- if (IS_ERR(tegra_pcie.afi_clk)) {
- err = PTR_ERR(tegra_pcie.afi_clk);
- goto err_afi_clk;
+ /* reset the PCIEXCLK */
+ tegra_pcie.pcie_xclk = clk_get(NULL, "pciex");
+ if (IS_ERR_OR_NULL(tegra_pcie.pcie_xclk)) {
+ pr_err("%s: unable to get PCIE Xclock\n", __func__);
+ goto error_exit;
}
+ tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e");
+ if (IS_ERR_OR_NULL(tegra_pcie.pll_e)) {
+ pr_err("%s: unable to get PLLE\n", __func__);
+ goto error_exit;
+ }
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
- tegra_pcie.pcie_xclk = clk_get(NULL, "pcie_xclk");
- if (IS_ERR(tegra_pcie.pcie_xclk)) {
- err = PTR_ERR(tegra_pcie.pcie_xclk);
- goto err_pcie_xclk;
+ tegra_pcie.clk_cml0 = clk_get_sys(NULL, "cml0");
+ if (IS_ERR_OR_NULL(tegra_pcie.clk_cml0)) {
+ pr_err("%s: unable to get cml0\n", __func__);
+ goto error_exit;
}
- tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e");
- if (IS_ERR(tegra_pcie.pll_e)) {
- err = PTR_ERR(tegra_pcie.pll_e);
- goto err_pll_e;
+ tegra_pcie.clk_tera_pcie_cml = clk_get_sys("tegra_pcie", "cml");
+ if (IS_ERR_OR_NULL(tegra_pcie.clk_tera_pcie_cml)) {
+ pr_err("%s: unable to get cml0\n", __func__);
+ goto error_exit;
}
+ clk_enable(tegra_pcie.clk_cml0);
+ clk_enable(tegra_pcie.clk_tera_pcie_cml);
+#endif
return 0;
-
-err_pll_e:
- clk_put(tegra_pcie.pcie_xclk);
-err_pcie_xclk:
- clk_put(tegra_pcie.afi_clk);
-err_afi_clk:
- clk_put(tegra_pcie.pex_clk);
-
- return err;
+error_exit:
+ if (tegra_pcie.clk_cml0)
+ clk_put(tegra_pcie.clk_cml0);
+ if (tegra_pcie.clk_tera_pcie_cml)
+ clk_put(tegra_pcie.clk_tera_pcie_cml);
+ if (tegra_pcie.pcie_xclk)
+ clk_put(tegra_pcie.pcie_xclk);
+ if (tegra_pcie.pll_e)
+ clk_put(tegra_pcie.pll_e);
+ return -EINVAL;
}
static void tegra_pcie_clocks_put(void)
{
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ clk_put(tegra_pcie.clk_cml0);
+ clk_put(tegra_pcie.clk_tera_pcie_cml);
+#endif
clk_put(tegra_pcie.pll_e);
clk_put(tegra_pcie.pcie_xclk);
- clk_put(tegra_pcie.afi_clk);
- clk_put(tegra_pcie.pex_clk);
}
static int __init tegra_pcie_get_resources(void)
{
- struct resource *res_mmio = &tegra_pcie.res_mmio;
+ struct resource *res_mmio = 0;
int err;
+ tegra_pcie.power_rails_enabled = 0;
+ tegra_unpowergate_partition(TEGRA_POWERGATE_PCIE);
+ err = tegra_pci_enable_regulators();
+ if (err) {
+ pr_err("PCIE: failed to enable power rails %d\n", err);
+ goto err_pwr_on_rail;
+ }
err = tegra_pcie_clocks_get();
if (err) {
@@ -789,7 +1056,8 @@ static int __init tegra_pcie_get_resources(void)
err = -ENOMEM;
goto err_map_reg;
}
-
+ res_mmio = &tegra_pcie.res_mmio;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
err = request_resource(&iomem_resource, res_mmio);
if (err) {
pr_err("PCIE: Failed to request resources: %d\n", err);
@@ -803,7 +1071,7 @@ static int __init tegra_pcie_get_resources(void)
err = -ENOMEM;
goto err_map_io;
}
-
+#endif
err = request_irq(INT_PCIE_INTR, tegra_pcie_isr,
IRQF_SHARED, "PCIE", &tegra_pcie);
if (err) {
@@ -815,16 +1083,19 @@ static int __init tegra_pcie_get_resources(void)
return 0;
err_irq:
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
iounmap(tegra_pcie_io_base);
err_map_io:
release_resource(&tegra_pcie.res_mmio);
err_req_io:
+#endif
iounmap(tegra_pcie.regs);
err_map_reg:
tegra_pcie_power_off();
err_pwr_on:
tegra_pcie_clocks_put();
-
+err_pwr_on_rail:
+ tegra_pci_disable_regulators();
return err;
}
@@ -905,31 +1176,303 @@ static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg)
memset(pp->res, 0, sizeof(pp->res));
}
-int __init tegra_pcie_init(bool init_port0, bool init_port1)
+static int tegra_pcie_init(void)
{
- int err;
-
- if (!(init_port0 || init_port1))
- return -ENODEV;
+ int err = 0;
+ int port;
+ int rp_offset = 0;
+ int ctrl_offset = AFI_PEX0_CTRL;
pcibios_min_mem = 0;
err = tegra_pcie_get_resources();
if (err)
return err;
-
tegra_pcie_enable_controller();
/* setup the AFI address translations */
tegra_pcie_setup_translations();
+ for (port = 0; port < MAX_PCIE_SUPPORTED_PORTS; port++) {
+ ctrl_offset += (port * 8);
+ rp_offset = (rp_offset + 0x1000) * port;
+ if (tegra_pcie.plat_data->port_status[port])
+ tegra_pcie_add_port(port, rp_offset, ctrl_offset);
+ }
- if (init_port0)
- tegra_pcie_add_port(0, RP0_OFFSET, AFI_PEX0_CTRL);
+ tegra_pcie.pcie_power_enabled = 1;
+ if (tegra_pcie.num_ports)
+ pci_common_init(&tegra_pcie_hw);
+ else
+ err = tegra_pcie_power_off();
+
+ return err;
+}
+
+static int tegra_pci_probe(struct platform_device *pdev)
+{
+ tegra_pcie.plat_data = pdev->dev.platform_data;
+ dev_dbg(&pdev->dev, "PCIE.C: %s : _port_status[0] %d\n",
+ __func__, tegra_pcie.plat_data->port_status[0]);
+ dev_dbg(&pdev->dev, "PCIE.C: %s : _port_status[1] %d\n",
+ __func__, tegra_pcie.plat_data->port_status[1]);
+ dev_dbg(&pdev->dev, "PCIE.C: %s : _port_status[2] %d\n",
+ __func__, tegra_pcie.plat_data->port_status[2]);
+
+ return tegra_pcie_init();
+}
- if (init_port1)
- tegra_pcie_add_port(1, RP1_OFFSET, AFI_PEX1_CTRL);
+static int tegra_pci_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return tegra_pcie_power_off();
+}
- pci_common_init(&tegra_pcie_hw);
+static int tegra_pci_resume(struct platform_device *pdev)
+{
+ return tegra_pcie_power_on();
+}
+static int tegra_pci_remove(struct platform_device *pdev)
+{
return 0;
}
+
+static struct platform_driver tegra_pci_driver = {
+ .probe = tegra_pci_probe,
+ .remove = tegra_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_pci_suspend,
+ .resume = tegra_pci_resume,
+#endif
+ .driver = {
+ .name = "tegra-pcie",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_pci_init_driver(void)
+{
+ return platform_driver_register(&tegra_pci_driver);
+}
+
+static void __exit tegra_pci_exit_driver(void)
+{
+ platform_driver_unregister(&tegra_pci_driver);
+}
+
+module_init(tegra_pci_init_driver);
+module_exit(tegra_pci_exit_driver);
+
+static struct irq_chip tegra_irq_chip_msi_pcie = {
+ .name = "PCIe-MSI",
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+};
+
+/* 1:1 matching of these to the MSI vectors, 1 per bit */
+/* and each mapping matches one of the available interrupts */
+/* irq should equal INT_PCI_MSI_BASE + index */
+struct msi_map_entry {
+ bool used;
+ u8 index;
+ int irq;
+};
+
+/* hardware supports 256 max*/
+#if (INT_PCI_MSI_NR > 256)
+#error "INT_PCI_MSI_NR too big"
+#endif
+
+#define MSI_MAP_SIZE (INT_PCI_MSI_NR)
+static struct msi_map_entry msi_map[MSI_MAP_SIZE];
+
+static void msi_map_init(void)
+{
+ int i;
+
+ for (i = 0; i < MSI_MAP_SIZE; i++) {
+ msi_map[i].used = false;
+ msi_map[i].index = i;
+ msi_map[i].irq = 0;
+ }
+}
+
+/* returns an index into the map*/
+static struct msi_map_entry *msi_map_get(void)
+{
+ struct msi_map_entry *retval = NULL;
+ int i;
+
+ for (i = 0; i < MSI_MAP_SIZE; i++) {
+ if (!msi_map[i].used) {
+ retval = msi_map + i;
+ retval->irq = INT_PCI_MSI_BASE + i;
+ retval->used = true;
+ break;
+ }
+ }
+
+ return retval;
+}
+
+void msi_map_release(struct msi_map_entry *entry)
+{
+ if (entry) {
+ entry->used = false;
+ entry->irq = 0;
+ }
+}
+
+static irqreturn_t pci_tegra_msi_isr(int irq, void *arg)
+{
+ int i;
+ int offset;
+ int index;
+ u32 reg;
+
+ for (i = 0; i < 8; i++) {
+ reg = afi_readl(AFI_MSI_VEC0_0 + i * 4);
+ while (reg != 0x00000000) {
+ offset = find_first_bit((unsigned long int *)&reg, 32);
+ index = i * 32 + offset;
+ if (index < MSI_MAP_SIZE) {
+ if (msi_map[index].used)
+ generic_handle_irq(msi_map[index].irq);
+ else
+ printk(KERN_INFO "unexpected MSI (1)\n");
+ } else {
+ /* that's weird who triggered this?*/
+ /* just clear it*/
+ printk(KERN_INFO "unexpected MSI (2)\n");
+ }
+ /* clear the interrupt */
+ afi_writel(1ul << index, AFI_MSI_VEC0_0 + i * 4);
+ /* see if there's any more pending in this vector */
+ reg = afi_readl(AFI_MSI_VEC0_0 + i * 4);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool pci_tegra_enable_msi(void)
+{
+ bool retval = false;
+ static bool already_done;
+ u32 reg;
+ u32 msi_base = 0;
+ u32 msi_aligned = 0;
+
+ /* enables MSI interrupts. */
+ /* this only happens once. */
+ if (already_done) {
+ retval = true;
+ goto exit;
+ }
+
+ msi_map_init();
+
+ if (request_irq(INT_PCIE_MSI, pci_tegra_msi_isr,
+ IRQF_SHARED, "PCIe-MSI",
+ pci_tegra_msi_isr)) {
+ pr_err("%s: Cannot register IRQ %u\n",
+ __func__, INT_PCIE_MSI);
+ goto exit;
+ }
+
+ /* setup AFI/FPCI range */
+ /* FIXME do this better! should be based on PAGE_SIZE */
+ msi_base = __get_free_pages(GFP_KERNEL, 3);
+ msi_aligned = ((msi_base + ((1<<12) - 1)) & ~((1<<12) - 1));
+ msi_aligned = virt_to_phys((void *)msi_aligned);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ afi_writel(msi_aligned, AFI_MSI_FPCI_BAR_ST_0);
+#else
+ /* different from T20!*/
+ afi_writel(msi_aligned>>8, AFI_MSI_FPCI_BAR_ST_0);
+#endif
+ afi_writel(msi_aligned, AFI_MSI_AXI_BAR_ST_0);
+ /* this register is in 4K increments */
+ afi_writel(1, AFI_MSI_BAR_SZ_0);
+
+ /* enable all MSI vectors */
+ afi_writel(0xffffffff, AFI_MSI_EN_VEC0_0);
+ afi_writel(0xffffffff, AFI_MSI_EN_VEC1_0);
+ afi_writel(0xffffffff, AFI_MSI_EN_VEC2_0);
+ afi_writel(0xffffffff, AFI_MSI_EN_VEC3_0);
+ afi_writel(0xffffffff, AFI_MSI_EN_VEC4_0);
+ afi_writel(0xffffffff, AFI_MSI_EN_VEC5_0);
+ afi_writel(0xffffffff, AFI_MSI_EN_VEC6_0);
+ afi_writel(0xffffffff, AFI_MSI_EN_VEC7_0);
+
+ /* and unmask the MSI interrupt */
+ reg = 0;
+ reg |= ((1 << AFI_INTR_MASK_0_INT_MASK) |
+ (1 << AFI_INTR_MASK_0_MSI_MASK));
+ afi_writel(reg, AFI_INTR_MASK_0);
+
+ set_irq_flags(INT_PCIE_MSI, IRQF_VALID);
+
+ already_done = true;
+ retval = true;
+exit:
+ if (!retval) {
+ if (msi_base)
+ free_pages(msi_base, 3);
+ }
+ return retval;
+}
+
+
+/* called by arch_setup_msi_irqs in drivers/pci/msi.c */
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+ int retval = -EINVAL;
+ struct msi_msg msg;
+ struct msi_map_entry *map_entry = NULL;
+
+ if (!pci_tegra_enable_msi())
+ goto exit;
+
+ map_entry = msi_map_get();
+ if (map_entry == NULL)
+ goto exit;
+
+ irq_alloc_desc(map_entry->irq);
+ irq_set_chip_and_handler(map_entry->irq,
+ &tegra_irq_chip_msi_pcie,
+ handle_simple_irq);
+
+ irq_set_msi_desc(map_entry->irq, desc);
+ set_irq_flags(map_entry->irq, IRQF_VALID);
+
+ msg.address_lo = afi_readl(AFI_MSI_AXI_BAR_ST_0);
+ /* 32 bit address only */
+ msg.address_hi = 0;
+ msg.data = map_entry->index;
+
+ write_msi_msg(map_entry->irq, &msg);
+
+ retval = 0;
+exit:
+ if (retval != 0) {
+ if (map_entry)
+ msi_map_release(map_entry);
+ }
+
+ return retval;
+}
+
+void arch_teardown_msi_irq(unsigned int irq)
+{
+ int i;
+ for (i = 0; i < MSI_MAP_SIZE; i++) {
+ if ((msi_map[i].used) && (msi_map[i].irq == irq)) {
+ irq_free_desc(msi_map[i].irq);
+ msi_map_release(msi_map + i);
+ break;
+ }
+ }
+}
diff --git a/arch/arm/mach-tegra/pinmux-t2-tables.c b/arch/arm/mach-tegra/pinmux-t2-tables.c
index a475367befa3..3a39f45cb57e 100644
--- a/arch/arm/mach-tegra/pinmux-t2-tables.c
+++ b/arch/arm/mach-tegra/pinmux-t2-tables.c
@@ -26,59 +26,83 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/string.h>
+#include <linux/syscore_ops.h>
#include <mach/iomap.h>
#include <mach/pinmux.h>
-#include <mach/suspend.h>
+#include "gpio-names.h"
-#define DRIVE_PINGROUP(pg_name, r) \
- [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
- .name = #pg_name, \
- .reg = r \
+#define SET_DRIVE_PINGROUP(pg_name, r, drv_down_offset, drv_down_mask, drv_up_offset, drv_up_mask, \
+ slew_rise_offset, slew_rise_mask, slew_fall_offset, slew_fall_mask) \
+ [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .reg = r, \
+ .drvup_offset = drv_up_offset, \
+ .drvup_mask = drv_up_mask, \
+ .drvdown_offset = drv_down_offset, \
+ .drvdown_mask = drv_down_mask, \
+ .slewrise_offset = slew_rise_offset, \
+ .slewrise_mask = slew_rise_mask, \
+ .slewfall_offset = slew_fall_offset, \
+ .slewfall_mask = slew_fall_mask, \
+ }
+
+#define DEFAULT_DRIVE_PINGROUP(pg_name, r) \
+ [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .reg = r, \
+ .drvup_offset = 20, \
+ .drvup_mask = 0x1f, \
+ .drvdown_offset = 12, \
+ .drvdown_mask = 0x1f, \
+ .slewrise_offset = 28, \
+ .slewrise_mask = 0x3, \
+ .slewfall_offset = 30, \
+ .slewfall_mask = 0x3, \
}
const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE_PINGROUP] = {
- DRIVE_PINGROUP(AO1, 0x868),
- DRIVE_PINGROUP(AO2, 0x86c),
- DRIVE_PINGROUP(AT1, 0x870),
- DRIVE_PINGROUP(AT2, 0x874),
- DRIVE_PINGROUP(CDEV1, 0x878),
- DRIVE_PINGROUP(CDEV2, 0x87c),
- DRIVE_PINGROUP(CSUS, 0x880),
- DRIVE_PINGROUP(DAP1, 0x884),
- DRIVE_PINGROUP(DAP2, 0x888),
- DRIVE_PINGROUP(DAP3, 0x88c),
- DRIVE_PINGROUP(DAP4, 0x890),
- DRIVE_PINGROUP(DBG, 0x894),
- DRIVE_PINGROUP(LCD1, 0x898),
- DRIVE_PINGROUP(LCD2, 0x89c),
- DRIVE_PINGROUP(SDMMC2, 0x8a0),
- DRIVE_PINGROUP(SDMMC3, 0x8a4),
- DRIVE_PINGROUP(SPI, 0x8a8),
- DRIVE_PINGROUP(UAA, 0x8ac),
- DRIVE_PINGROUP(UAB, 0x8b0),
- DRIVE_PINGROUP(UART2, 0x8b4),
- DRIVE_PINGROUP(UART3, 0x8b8),
- DRIVE_PINGROUP(VI1, 0x8bc),
- DRIVE_PINGROUP(VI2, 0x8c0),
- DRIVE_PINGROUP(XM2A, 0x8c4),
- DRIVE_PINGROUP(XM2C, 0x8c8),
- DRIVE_PINGROUP(XM2D, 0x8cc),
- DRIVE_PINGROUP(XM2CLK, 0x8d0),
- DRIVE_PINGROUP(MEMCOMP, 0x8d4),
- DRIVE_PINGROUP(SDIO1, 0x8e0),
- DRIVE_PINGROUP(CRT, 0x8ec),
- DRIVE_PINGROUP(DDC, 0x8f0),
- DRIVE_PINGROUP(GMA, 0x8f4),
- DRIVE_PINGROUP(GMB, 0x8f8),
- DRIVE_PINGROUP(GMC, 0x8fc),
- DRIVE_PINGROUP(GMD, 0x900),
- DRIVE_PINGROUP(GME, 0x904),
- DRIVE_PINGROUP(OWR, 0x908),
- DRIVE_PINGROUP(UAD, 0x90c),
+ DEFAULT_DRIVE_PINGROUP(AO1, 0x868),
+ DEFAULT_DRIVE_PINGROUP(AO2, 0x86c),
+ DEFAULT_DRIVE_PINGROUP(AT1, 0x870),
+ DEFAULT_DRIVE_PINGROUP(AT2, 0x874),
+ DEFAULT_DRIVE_PINGROUP(CDEV1, 0x878),
+ DEFAULT_DRIVE_PINGROUP(CDEV2, 0x87c),
+ DEFAULT_DRIVE_PINGROUP(CSUS, 0x880),
+ DEFAULT_DRIVE_PINGROUP(DAP1, 0x884),
+ DEFAULT_DRIVE_PINGROUP(DAP2, 0x888),
+ DEFAULT_DRIVE_PINGROUP(DAP3, 0x88c),
+ DEFAULT_DRIVE_PINGROUP(DAP4, 0x890),
+ DEFAULT_DRIVE_PINGROUP(DBG, 0x894),
+ DEFAULT_DRIVE_PINGROUP(LCD1, 0x898),
+ DEFAULT_DRIVE_PINGROUP(LCD2, 0x89c),
+ DEFAULT_DRIVE_PINGROUP(SDMMC2, 0x8a0),
+ DEFAULT_DRIVE_PINGROUP(SDMMC3, 0x8a4),
+ DEFAULT_DRIVE_PINGROUP(SPI, 0x8a8),
+ DEFAULT_DRIVE_PINGROUP(UAA, 0x8ac),
+ DEFAULT_DRIVE_PINGROUP(UAB, 0x8b0),
+ DEFAULT_DRIVE_PINGROUP(UART2, 0x8b4),
+ DEFAULT_DRIVE_PINGROUP(UART3, 0x8b8),
+ DEFAULT_DRIVE_PINGROUP(VI1, 0x8bc),
+ DEFAULT_DRIVE_PINGROUP(VI2, 0x8c0),
+ DEFAULT_DRIVE_PINGROUP(XM2A, 0x8c4),
+ DEFAULT_DRIVE_PINGROUP(XM2C, 0x8c8),
+ DEFAULT_DRIVE_PINGROUP(XM2D, 0x8cc),
+ DEFAULT_DRIVE_PINGROUP(XM2CLK, 0x8d0),
+ DEFAULT_DRIVE_PINGROUP(MEMCOMP, 0x8d4),
+ DEFAULT_DRIVE_PINGROUP(SDIO1, 0x8e0),
+ DEFAULT_DRIVE_PINGROUP(CRT, 0x8ec),
+ DEFAULT_DRIVE_PINGROUP(DDC, 0x8f0),
+ DEFAULT_DRIVE_PINGROUP(GMA, 0x8f4),
+ DEFAULT_DRIVE_PINGROUP(GMB, 0x8f8),
+ DEFAULT_DRIVE_PINGROUP(GMC, 0x8fc),
+ DEFAULT_DRIVE_PINGROUP(GMD, 0x900),
+ DEFAULT_DRIVE_PINGROUP(GME, 0x904),
+ DEFAULT_DRIVE_PINGROUP(OWR, 0x908),
+ DEFAULT_DRIVE_PINGROUP(UAD, 0x90c),
};
-#define PINGROUP(pg_name, vdd, f0, f1, f2, f3, f_safe, \
+#define PINGROUP(pg_name, gpio_nr, vdd, f0, f1, f2, f3, f_safe, \
tri_r, tri_b, mux_r, mux_b, pupd_r, pupd_b) \
[TEGRA_PINGROUP_ ## pg_name] = { \
.name = #pg_name, \
@@ -88,7 +112,8 @@ const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE
TEGRA_MUX_ ## f1, \
TEGRA_MUX_ ## f2, \
TEGRA_MUX_ ## f3, \
- }, \
+ }, \
+ .gpionr = TEGRA_GPIO_ ## gpio_nr, \
.func_safe = TEGRA_MUX_ ## f_safe, \
.tri_reg = tri_r, \
.tri_bit = tri_b, \
@@ -96,129 +121,148 @@ const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE
.mux_bit = mux_b, \
.pupd_reg = pupd_r, \
.pupd_bit = pupd_b, \
- }
+ .io_default = 0, \
+ .od_bit = -1, \
+ .lock_bit = -1, \
+ .ioreset_bit = -1, \
+}
+
+#define PINGROUPS \
+ /* pg_name,gpio_nr, vdd, f0, f1, f2, f3, f_safe, tri_r, tri_b, mux_r, mux_b, pupd_r, pupd_b*/\
+ PINGROUP(ATA, PI3, NAND, IDE, NAND, GMI, RSVD, IDE, 0x14, 0, 0x80, 24, 0xA0, 0),\
+ PINGROUP(ATB, PI2, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 1, 0x80, 16, 0xA0, 2),\
+ PINGROUP(ATC, PI5, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 2, 0x80, 22, 0xA0, 4),\
+ PINGROUP(ATD, PH0, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 3, 0x80, 20, 0xA0, 6),\
+ PINGROUP(ATE, PH4, NAND, IDE, NAND, GMI, RSVD, IDE, 0x18, 25, 0x80, 12, 0xA0, 8),\
+ PINGROUP(CDEV1, PW4, AUDIO, OSC, PLLA_OUT, PLLM_OUT1, AUDIO_SYNC, OSC, 0x14, 4, 0x88, 2, 0xA8, 0),\
+ PINGROUP(CDEV2, PW5, AUDIO, OSC, AHB_CLK, APB_CLK, PLLP_OUT4, OSC, 0x14, 5, 0x88, 4, 0xA8, 2),\
+ PINGROUP(CRTP, INVALID, LCD, CRT, RSVD, RSVD, RSVD, RSVD, 0x20, 14, 0x98, 20, 0xA4, 24),\
+ PINGROUP(CSUS, PT1, VI, PLLC_OUT1, PLLP_OUT2, PLLP_OUT3, VI_SENSOR_CLK, PLLC_OUT1, 0x14, 6, 0x88, 6, 0xAC, 24),\
+ PINGROUP(DAP1, PN0, AUDIO, DAP1, RSVD, GMI, SDIO2, DAP1, 0x14, 7, 0x88, 20, 0xA0, 10),\
+ PINGROUP(DAP2, PA2, AUDIO, DAP2, TWC, RSVD, GMI, DAP2, 0x14, 8, 0x88, 22, 0xA0, 12),\
+ PINGROUP(DAP3, PP0, BB, DAP3, RSVD, RSVD, RSVD, DAP3, 0x14, 9, 0x88, 24, 0xA0, 14),\
+ PINGROUP(DAP4, PP4, UART, DAP4, RSVD, GMI, RSVD, DAP4, 0x14, 10, 0x88, 26, 0xA0, 16),\
+ PINGROUP(DDC, INVALID, LCD, I2C2, RSVD, RSVD, RSVD, RSVD, 0x18, 31, 0x88, 0, 0xB0, 28),\
+ PINGROUP(DTA, PT4, VI, RSVD, SDIO2, VI, RSVD, RSVD4, 0x14, 11, 0x84, 20, 0xA0, 18),\
+ PINGROUP(DTB, PT2, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 12, 0x84, 22, 0xA0, 20),\
+ PINGROUP(DTC, PD6, VI, RSVD, RSVD, VI, RSVD, RSVD1, 0x14, 13, 0x84, 26, 0xA0, 22),\
+ PINGROUP(DTD, PT0, VI, RSVD, SDIO2, VI, RSVD, RSVD1, 0x14, 14, 0x84, 28, 0xA0, 24),\
+ PINGROUP(DTE, PBB1, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 15, 0x84, 30, 0xA0, 26),\
+ PINGROUP(DTF, PBB2, VI, I2C3, RSVD, VI, RSVD, RSVD4, 0x20, 12, 0x98, 30, 0xA0, 28),\
+ PINGROUP(GMA, PAA0, NAND, UARTE, SPI3, GMI, SDIO4, SPI3, 0x14, 28, 0x84, 0, 0xB0, 20),\
+ PINGROUP(GMB, PC7, NAND, IDE, NAND, GMI, GMI_INT, GMI, 0x18, 29, 0x88, 28, 0xB0, 22),\
+ PINGROUP(GMC, PJ7, NAND, UARTD, SPI4, GMI, SFLASH, SPI4, 0x14, 29, 0x84, 2, 0xB0, 24),\
+ PINGROUP(GMD, PJ0, NAND, RSVD, NAND, GMI, SFLASH, GMI, 0x18, 30, 0x88, 30, 0xB0, 26),\
+ PINGROUP(GME, PAA4, NAND, RSVD, DAP5, GMI, SDIO4, GMI, 0x18, 0, 0x8C, 0, 0xA8, 24),\
+ PINGROUP(GPU, PU0, UART, PWM, UARTA, GMI, RSVD, RSVD4, 0x14, 16, 0x8C, 4, 0xA4, 20),\
+ PINGROUP(GPU7, PU7, SYS, RTCK, RSVD, RSVD, RSVD, RTCK, 0x20, 11, 0x98, 28, 0xA4, 6),\
+ PINGROUP(GPV, PV4, SD, PCIE, RSVD, RSVD, RSVD, PCIE, 0x14, 17, 0x8C, 2, 0xA0, 30),\
+ PINGROUP(HDINT, PN7, LCD, HDMI, RSVD, RSVD, RSVD, HDMI, 0x1C, 23, 0x84, 4, 0xAC, 22),\
+ PINGROUP(I2CP, PZ6, SYS, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 18, 0x88, 8, 0xA4, 2),\
+ PINGROUP(IRRX, PJ6, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 20, 0x88, 18, 0xA8, 22),\
+ PINGROUP(IRTX, PJ5, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 19, 0x88, 16, 0xA8, 20),\
+ PINGROUP(KBCA, PR0, SYS, KBC, NAND, SDIO2, EMC_TEST0_DLL, KBC, 0x14, 22, 0x88, 10, 0xA4, 8),\
+ PINGROUP(KBCB, PR7, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x14, 21, 0x88, 12, 0xA4, 10),\
+ PINGROUP(KBCC, PQ0, SYS, KBC, NAND, TRACE, EMC_TEST1_DLL, KBC, 0x18, 26, 0x88, 14, 0xA4, 12),\
+ PINGROUP(KBCD, PR3, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x20, 10, 0x98, 26, 0xA4, 14),\
+ PINGROUP(KBCE, PQ7, SYS, KBC, NAND, OWR, RSVD, KBC, 0x14, 26, 0x80, 28, 0xB0, 2),\
+ PINGROUP(KBCF, PQ2, SYS, KBC, NAND, TRACE, MIO, KBC, 0x14, 27, 0x80, 26, 0xB0, 0),\
+ PINGROUP(LCSN, PN4, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 31, 0x90, 12, 0xAC, 20),\
+ PINGROUP(LD0, PE0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 0, 0x94, 0, 0xAC, 12),\
+ PINGROUP(LD1, PE1, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 1, 0x94, 2, 0xAC, 12),\
+ PINGROUP(LD10, PF2, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 10, 0x94, 20, 0xAC, 12),\
+ PINGROUP(LD11, PF3, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 11, 0x94, 22, 0xAC, 12),\
+ PINGROUP(LD12, PF4, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 12, 0x94, 24, 0xAC, 12),\
+ PINGROUP(LD13, PF5, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 13, 0x94, 26, 0xAC, 12),\
+ PINGROUP(LD14, PF6, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 14, 0x94, 28, 0xAC, 12),\
+ PINGROUP(LD15, PF7, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 15, 0x94, 30, 0xAC, 12),\
+ PINGROUP(LD16, PM0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 16, 0x98, 0, 0xAC, 12),\
+ PINGROUP(LD17, PM1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 17, 0x98, 2, 0xAC, 12),\
+ PINGROUP(LD2, PE2, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 2, 0x94, 4, 0xAC, 12),\
+ PINGROUP(LD3, PE3, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 3, 0x94, 6, 0xAC, 12),\
+ PINGROUP(LD4, PE4, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 4, 0x94, 8, 0xAC, 12),\
+ PINGROUP(LD5, PE5, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 5, 0x94, 10, 0xAC, 12),\
+ PINGROUP(LD6, PE6, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 6, 0x94, 12, 0xAC, 12),\
+ PINGROUP(LD7, PE7, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 7, 0x94, 14, 0xAC, 12),\
+ PINGROUP(LD8, PF0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 8, 0x94, 16, 0xAC, 12),\
+ PINGROUP(LD9, PF1, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 9, 0x94, 18, 0xAC, 12),\
+ PINGROUP(LDC, PN6, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 30, 0x90, 14, 0xAC, 20),\
+ PINGROUP(LDI, PM6, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 6, 0x98, 16, 0xAC, 18),\
+ PINGROUP(LHP0, PM5, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 18, 0x98, 10, 0xAC, 16),\
+ PINGROUP(LHP1, PM2, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 19, 0x98, 4, 0xAC, 14),\
+ PINGROUP(LHP2, PM3, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 20, 0x98, 6, 0xAC, 14),\
+ PINGROUP(LHS, PJ3, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x20, 7, 0x90, 22, 0xAC, 22),\
+ PINGROUP(LM0, PW0, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 24, 0x90, 26, 0xAC, 22),\
+ PINGROUP(LM1, PW1, LCD, DISPLAYA, DISPLAYB, RSVD, CRT, RSVD3, 0x1C, 25, 0x90, 28, 0xAC, 22),\
+ PINGROUP(LPP, PM7, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 8, 0x98, 14, 0xAC, 18),\
+ PINGROUP(LPW0, PB2, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 3, 0x90, 0, 0xAC, 20),\
+ PINGROUP(LPW1, PC1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 4, 0x90, 2, 0xAC, 20),\
+ PINGROUP(LPW2, PC6, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 5, 0x90, 4, 0xAC, 20),\
+ PINGROUP(LSC0, PB3, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 27, 0x90, 18, 0xAC, 22),\
+ PINGROUP(LSC1, PZ3, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 28, 0x90, 20, 0xAC, 20),\
+ PINGROUP(LSCK, PZ4, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 29, 0x90, 16, 0xAC, 20),\
+ PINGROUP(LSDA, PN5, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 1, 0x90, 8, 0xAC, 20),\
+ PINGROUP(LSDI, PZ2, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, DISPLAYA, 0x20, 2, 0x90, 6, 0xAC, 20),\
+ PINGROUP(LSPI, PJ1, LCD, DISPLAYA, DISPLAYB, XIO, HDMI, DISPLAYA, 0x20, 0, 0x90, 10, 0xAC, 22),\
+ PINGROUP(LVP0, PV7, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 21, 0x90, 30, 0xAC, 22),\
+ PINGROUP(LVP1, PM4, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 22, 0x98, 8, 0xAC, 16),\
+ PINGROUP(LVS, PJ4, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 26, 0x90, 24, 0xAC, 22),\
+ PINGROUP(OWC, INVALID, SYS, OWR, RSVD, RSVD, RSVD, OWR, 0x14, 31, 0x84, 8, 0xB0, 30),\
+ PINGROUP(PMC, PBB0, SYS, PWR_ON, PWR_INTR, RSVD, RSVD, PWR_ON, 0x14, 23, 0x98, 18, -1, -1),\
+ PINGROUP(PTA, PT5, NAND, I2C2, HDMI, GMI, RSVD, RSVD, 0x14, 24, 0x98, 22, 0xA4, 4),\
+ PINGROUP(RM, PC5, UART, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 25, 0x80, 14, 0xA4, 0),\
+ PINGROUP(SDB, PA7, SD, UARTA, PWM, SDIO3, SPI2, PWM, 0x20, 15, 0x8C, 10, -1, -1),\
+ PINGROUP(SDC, PB7, SD, PWM, TWC, SDIO3, SPI3, TWC, 0x18, 1, 0x8C, 12, 0xAC, 28),\
+ PINGROUP(SDD, PA6, SD, UARTA, PWM, SDIO3, SPI3, PWM, 0x18, 2, 0x8C, 14, 0xAC, 30),\
+ PINGROUP(SDIO1, PZ0, BB, SDIO1, RSVD, UARTE, UARTA, RSVD2, 0x14, 30, 0x80, 30, 0xB0, 18),\
+ PINGROUP(SLXA, PD1, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 3, 0x84, 6, 0xA4, 22),\
+ PINGROUP(SLXC, PD3, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 5, 0x84, 10, 0xA4, 26),\
+ PINGROUP(SLXD, PD4, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 6, 0x84, 12, 0xA4, 28),\
+ PINGROUP(SLXK, PD0, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 7, 0x84, 14, 0xA4, 30),\
+ PINGROUP(SPDI, PK6, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 8, 0x8C, 8, 0xA4, 16),\
+ PINGROUP(SPDO, PK5, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 9, 0x8C, 6, 0xA4, 18),\
+ PINGROUP(SPIA, PX0, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 10, 0x8C, 30, 0xA8, 4),\
+ PINGROUP(SPIB, PX1, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 11, 0x8C, 28, 0xA8, 6),\
+ PINGROUP(SPIC, PX2, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 12, 0x8C, 26, 0xA8, 8),\
+ PINGROUP(SPID, PX4, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 13, 0x8C, 24, 0xA8, 10),\
+ PINGROUP(SPIE, PX5, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 14, 0x8C, 22, 0xA8, 12),\
+ PINGROUP(SPIF, PX7, AUDIO, SPI3, SPI1, SPI2, RSVD, RSVD4, 0x18, 15, 0x8C, 20, 0xA8, 14),\
+ PINGROUP(SPIG, PW2, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 16, 0x8C, 18, 0xA8, 16),\
+ PINGROUP(SPIH, PW3, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 17, 0x8C, 16, 0xA8, 18),\
+ PINGROUP(UAA, PO1, BB, SPI3, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 18, 0x80, 0, 0xAC, 0),\
+ PINGROUP(UAB, PO5, BB, SPI2, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 19, 0x80, 2, 0xAC, 2),\
+ PINGROUP(UAC, PV0, BB, OWR, RSVD, RSVD, RSVD, RSVD4, 0x18, 20, 0x80, 4, 0xAC, 4),\
+ PINGROUP(UAD, PC2, UART, IRDA, SPDIF, UARTA, SPI4, SPDIF, 0x18, 21, 0x80, 6, 0xAC, 6),\
+ PINGROUP(UCA, PW6, UART, UARTC, RSVD, GMI, RSVD, RSVD4, 0x18, 22, 0x84, 16, 0xAC, 8),\
+ PINGROUP(UCB, PC0, UART, UARTC, PWM, GMI, RSVD, RSVD4, 0x18, 23, 0x84, 18, 0xAC, 10),\
+ PINGROUP(UDA, PY0, BB, SPI1, RSVD, UARTD, ULPI, RSVD2, 0x20, 13, 0x80, 8, 0xB0, 16),\
+ /* these pin groups only have pullup and pull down control */\
+ PINGROUP(CK32, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 14),\
+ PINGROUP(DDRC, INVALID, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xAC, 26),\
+ PINGROUP(PMCA, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 4),\
+ PINGROUP(PMCB, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 6),\
+ PINGROUP(PMCC, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 8),\
+ PINGROUP(PMCD, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 10),\
+ PINGROUP(PMCE, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 12),\
+ PINGROUP(XM2C, INVALID, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 30),\
+ PINGROUP(XM2D, INVALID, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 28),\
+ /* END OF LIST */
const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
- PINGROUP(ATA, NAND, IDE, NAND, GMI, RSVD, IDE, 0x14, 0, 0x80, 24, 0xA0, 0),
- PINGROUP(ATB, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 1, 0x80, 16, 0xA0, 2),
- PINGROUP(ATC, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 2, 0x80, 22, 0xA0, 4),
- PINGROUP(ATD, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 3, 0x80, 20, 0xA0, 6),
- PINGROUP(ATE, NAND, IDE, NAND, GMI, RSVD, IDE, 0x18, 25, 0x80, 12, 0xA0, 8),
- PINGROUP(CDEV1, AUDIO, OSC, PLLA_OUT, PLLM_OUT1, AUDIO_SYNC, OSC, 0x14, 4, 0x88, 2, 0xA8, 0),
- PINGROUP(CDEV2, AUDIO, OSC, AHB_CLK, APB_CLK, PLLP_OUT4, OSC, 0x14, 5, 0x88, 4, 0xA8, 2),
- PINGROUP(CRTP, LCD, CRT, RSVD, RSVD, RSVD, RSVD, 0x20, 14, 0x98, 20, 0xA4, 24),
- PINGROUP(CSUS, VI, PLLC_OUT1, PLLP_OUT2, PLLP_OUT3, VI_SENSOR_CLK, PLLC_OUT1, 0x14, 6, 0x88, 6, 0xAC, 24),
- PINGROUP(DAP1, AUDIO, DAP1, RSVD, GMI, SDIO2, DAP1, 0x14, 7, 0x88, 20, 0xA0, 10),
- PINGROUP(DAP2, AUDIO, DAP2, TWC, RSVD, GMI, DAP2, 0x14, 8, 0x88, 22, 0xA0, 12),
- PINGROUP(DAP3, BB, DAP3, RSVD, RSVD, RSVD, DAP3, 0x14, 9, 0x88, 24, 0xA0, 14),
- PINGROUP(DAP4, UART, DAP4, RSVD, GMI, RSVD, DAP4, 0x14, 10, 0x88, 26, 0xA0, 16),
- PINGROUP(DDC, LCD, I2C2, RSVD, RSVD, RSVD, RSVD4, 0x18, 31, 0x88, 0, 0xB0, 28),
- PINGROUP(DTA, VI, RSVD, SDIO2, VI, RSVD, RSVD4, 0x14, 11, 0x84, 20, 0xA0, 18),
- PINGROUP(DTB, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 12, 0x84, 22, 0xA0, 20),
- PINGROUP(DTC, VI, RSVD, RSVD, VI, RSVD, RSVD1, 0x14, 13, 0x84, 26, 0xA0, 22),
- PINGROUP(DTD, VI, RSVD, SDIO2, VI, RSVD, RSVD1, 0x14, 14, 0x84, 28, 0xA0, 24),
- PINGROUP(DTE, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 15, 0x84, 30, 0xA0, 26),
- PINGROUP(DTF, VI, I2C3, RSVD, VI, RSVD, RSVD4, 0x20, 12, 0x98, 30, 0xA0, 28),
- PINGROUP(GMA, NAND, UARTE, SPI3, GMI, SDIO4, SPI3, 0x14, 28, 0x84, 0, 0xB0, 20),
- PINGROUP(GMB, NAND, IDE, NAND, GMI, GMI_INT, GMI, 0x18, 29, 0x88, 28, 0xB0, 22),
- PINGROUP(GMC, NAND, UARTD, SPI4, GMI, SFLASH, SPI4, 0x14, 29, 0x84, 2, 0xB0, 24),
- PINGROUP(GMD, NAND, RSVD, NAND, GMI, SFLASH, GMI, 0x18, 30, 0x88, 30, 0xB0, 26),
- PINGROUP(GME, NAND, RSVD, DAP5, GMI, SDIO4, GMI, 0x18, 0, 0x8C, 0, 0xA8, 24),
- PINGROUP(GPU, UART, PWM, UARTA, GMI, RSVD, RSVD4, 0x14, 16, 0x8C, 4, 0xA4, 20),
- PINGROUP(GPU7, SYS, RTCK, RSVD, RSVD, RSVD, RTCK, 0x20, 11, 0x98, 28, 0xA4, 6),
- PINGROUP(GPV, SD, PCIE, RSVD, RSVD, RSVD, PCIE, 0x14, 17, 0x8C, 2, 0xA0, 30),
- PINGROUP(HDINT, LCD, HDMI, RSVD, RSVD, RSVD, HDMI, 0x1C, 23, 0x84, 4, 0xAC, 22),
- PINGROUP(I2CP, SYS, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 18, 0x88, 8, 0xA4, 2),
- PINGROUP(IRRX, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 20, 0x88, 18, 0xA8, 22),
- PINGROUP(IRTX, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 19, 0x88, 16, 0xA8, 20),
- PINGROUP(KBCA, SYS, KBC, NAND, SDIO2, EMC_TEST0_DLL, KBC, 0x14, 22, 0x88, 10, 0xA4, 8),
- PINGROUP(KBCB, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x14, 21, 0x88, 12, 0xA4, 10),
- PINGROUP(KBCC, SYS, KBC, NAND, TRACE, EMC_TEST1_DLL, KBC, 0x18, 26, 0x88, 14, 0xA4, 12),
- PINGROUP(KBCD, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x20, 10, 0x98, 26, 0xA4, 14),
- PINGROUP(KBCE, SYS, KBC, NAND, OWR, RSVD, KBC, 0x14, 26, 0x80, 28, 0xB0, 2),
- PINGROUP(KBCF, SYS, KBC, NAND, TRACE, MIO, KBC, 0x14, 27, 0x80, 26, 0xB0, 0),
- PINGROUP(LCSN, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 31, 0x90, 12, 0xAC, 20),
- PINGROUP(LD0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 0, 0x94, 0, 0xAC, 12),
- PINGROUP(LD1, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 1, 0x94, 2, 0xAC, 12),
- PINGROUP(LD10, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 10, 0x94, 20, 0xAC, 12),
- PINGROUP(LD11, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 11, 0x94, 22, 0xAC, 12),
- PINGROUP(LD12, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 12, 0x94, 24, 0xAC, 12),
- PINGROUP(LD13, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 13, 0x94, 26, 0xAC, 12),
- PINGROUP(LD14, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 14, 0x94, 28, 0xAC, 12),
- PINGROUP(LD15, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 15, 0x94, 30, 0xAC, 12),
- PINGROUP(LD16, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 16, 0x98, 0, 0xAC, 12),
- PINGROUP(LD17, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 17, 0x98, 2, 0xAC, 12),
- PINGROUP(LD2, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 2, 0x94, 4, 0xAC, 12),
- PINGROUP(LD3, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 3, 0x94, 6, 0xAC, 12),
- PINGROUP(LD4, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 4, 0x94, 8, 0xAC, 12),
- PINGROUP(LD5, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 5, 0x94, 10, 0xAC, 12),
- PINGROUP(LD6, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 6, 0x94, 12, 0xAC, 12),
- PINGROUP(LD7, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 7, 0x94, 14, 0xAC, 12),
- PINGROUP(LD8, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 8, 0x94, 16, 0xAC, 12),
- PINGROUP(LD9, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 9, 0x94, 18, 0xAC, 12),
- PINGROUP(LDC, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 30, 0x90, 14, 0xAC, 20),
- PINGROUP(LDI, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 6, 0x98, 16, 0xAC, 18),
- PINGROUP(LHP0, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 18, 0x98, 10, 0xAC, 16),
- PINGROUP(LHP1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 19, 0x98, 4, 0xAC, 14),
- PINGROUP(LHP2, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 20, 0x98, 6, 0xAC, 14),
- PINGROUP(LHS, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x20, 7, 0x90, 22, 0xAC, 22),
- PINGROUP(LM0, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 24, 0x90, 26, 0xAC, 22),
- PINGROUP(LM1, LCD, DISPLAYA, DISPLAYB, RSVD, CRT, RSVD3, 0x1C, 25, 0x90, 28, 0xAC, 22),
- PINGROUP(LPP, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 8, 0x98, 14, 0xAC, 18),
- PINGROUP(LPW0, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 3, 0x90, 0, 0xAC, 20),
- PINGROUP(LPW1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 4, 0x90, 2, 0xAC, 20),
- PINGROUP(LPW2, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 5, 0x90, 4, 0xAC, 20),
- PINGROUP(LSC0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 27, 0x90, 18, 0xAC, 22),
- PINGROUP(LSC1, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 28, 0x90, 20, 0xAC, 20),
- PINGROUP(LSCK, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 29, 0x90, 16, 0xAC, 20),
- PINGROUP(LSDA, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 1, 0x90, 8, 0xAC, 20),
- PINGROUP(LSDI, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, DISPLAYA, 0x20, 2, 0x90, 6, 0xAC, 20),
- PINGROUP(LSPI, LCD, DISPLAYA, DISPLAYB, XIO, HDMI, DISPLAYA, 0x20, 0, 0x90, 10, 0xAC, 22),
- PINGROUP(LVP0, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 21, 0x90, 30, 0xAC, 22),
- PINGROUP(LVP1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 22, 0x98, 8, 0xAC, 16),
- PINGROUP(LVS, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 26, 0x90, 24, 0xAC, 22),
- PINGROUP(OWC, SYS, OWR, RSVD, RSVD, RSVD, OWR, 0x14, 31, 0x84, 8, 0xB0, 30),
- PINGROUP(PMC, SYS, PWR_ON, PWR_INTR, RSVD, RSVD, PWR_ON, 0x14, 23, 0x98, 18, -1, -1),
- PINGROUP(PTA, NAND, I2C2, HDMI, GMI, RSVD, RSVD4, 0x14, 24, 0x98, 22, 0xA4, 4),
- PINGROUP(RM, UART, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 25, 0x80, 14, 0xA4, 0),
- PINGROUP(SDB, SD, UARTA, PWM, SDIO3, SPI2, PWM, 0x20, 15, 0x8C, 10, -1, -1),
- PINGROUP(SDC, SD, PWM, TWC, SDIO3, SPI3, TWC, 0x18, 1, 0x8C, 12, 0xAC, 28),
- PINGROUP(SDD, SD, UARTA, PWM, SDIO3, SPI3, PWM, 0x18, 2, 0x8C, 14, 0xAC, 30),
- PINGROUP(SDIO1, BB, SDIO1, RSVD, UARTE, UARTA, RSVD2, 0x14, 30, 0x80, 30, 0xB0, 18),
- PINGROUP(SLXA, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 3, 0x84, 6, 0xA4, 22),
- PINGROUP(SLXC, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 5, 0x84, 10, 0xA4, 26),
- PINGROUP(SLXD, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 6, 0x84, 12, 0xA4, 28),
- PINGROUP(SLXK, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 7, 0x84, 14, 0xA4, 30),
- PINGROUP(SPDI, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 8, 0x8C, 8, 0xA4, 16),
- PINGROUP(SPDO, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 9, 0x8C, 6, 0xA4, 18),
- PINGROUP(SPIA, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 10, 0x8C, 30, 0xA8, 4),
- PINGROUP(SPIB, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 11, 0x8C, 28, 0xA8, 6),
- PINGROUP(SPIC, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 12, 0x8C, 26, 0xA8, 8),
- PINGROUP(SPID, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 13, 0x8C, 24, 0xA8, 10),
- PINGROUP(SPIE, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 14, 0x8C, 22, 0xA8, 12),
- PINGROUP(SPIF, AUDIO, SPI3, SPI1, SPI2, RSVD, RSVD4, 0x18, 15, 0x8C, 20, 0xA8, 14),
- PINGROUP(SPIG, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 16, 0x8C, 18, 0xA8, 16),
- PINGROUP(SPIH, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 17, 0x8C, 16, 0xA8, 18),
- PINGROUP(UAA, BB, SPI3, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 18, 0x80, 0, 0xAC, 0),
- PINGROUP(UAB, BB, SPI2, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 19, 0x80, 2, 0xAC, 2),
- PINGROUP(UAC, BB, OWR, RSVD, RSVD, RSVD, RSVD4, 0x18, 20, 0x80, 4, 0xAC, 4),
- PINGROUP(UAD, UART, IRDA, SPDIF, UARTA, SPI4, SPDIF, 0x18, 21, 0x80, 6, 0xAC, 6),
- PINGROUP(UCA, UART, UARTC, RSVD, GMI, RSVD, RSVD4, 0x18, 22, 0x84, 16, 0xAC, 8),
- PINGROUP(UCB, UART, UARTC, PWM, GMI, RSVD, RSVD4, 0x18, 23, 0x84, 18, 0xAC, 10),
- PINGROUP(UDA, BB, SPI1, RSVD, UARTD, ULPI, RSVD2, 0x20, 13, 0x80, 8, 0xB0, 16),
- /* these pin groups only have pullup and pull down control */
- PINGROUP(CK32, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 14),
- PINGROUP(DDRC, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xAC, 26),
- PINGROUP(PMCA, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 4),
- PINGROUP(PMCB, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 6),
- PINGROUP(PMCC, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 8),
- PINGROUP(PMCD, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 10),
- PINGROUP(PMCE, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 12),
- PINGROUP(XM2C, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 30),
- PINGROUP(XM2D, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 28),
+ PINGROUPS
+};
+
+#undef PINGROUP
+
+#define PINGROUP(pg_name, gpio_nr, vdd, f0, f1, f2, f3, f_safe, \
+ tri_r, tri_b, mux_r, mux_b, pupd_r, pupd_b) \
+ [TEGRA_GPIO_##gpio_nr] = TEGRA_PINGROUP_ ##pg_name\
+
+const int gpio_to_pingroup[TEGRA_MAX_GPIO] = {
+ PINGROUPS
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
#define TRISTATE_REG_A 0x14
#define TRISTATE_REG_NUM 4
#define PIN_MUX_CTL_REG_A 0x80
@@ -240,7 +284,7 @@ static inline void pg_writel(unsigned long value, unsigned long offset)
writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
}
-void tegra_pinmux_suspend(void)
+static int tegra_pinmux_suspend(void)
{
unsigned int i;
u32 *ctx = pinmux_reg;
@@ -256,9 +300,11 @@ void tegra_pinmux_suspend(void)
for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++)
*ctx++ = pg_readl(tegra_soc_drive_pingroups[i].reg);
+
+ return 0;
}
-void tegra_pinmux_resume(void)
+static void tegra_pinmux_resume(void)
{
unsigned int i;
u32 *ctx = pinmux_reg;
@@ -275,4 +321,18 @@ void tegra_pinmux_resume(void)
for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++)
pg_writel(*ctx++, tegra_soc_drive_pingroups[i].reg);
}
+
+static struct syscore_ops tegra_pinmux_syscore_ops = {
+ .suspend = tegra_pinmux_suspend,
+ .resume = tegra_pinmux_resume,
+};
+
+void __init tegra_init_pinmux(void)
+{
+ register_syscore_ops(&tegra_pinmux_syscore_ops);
+}
+#else
+void __init tegra_init_pinmux(void)
+{
+}
#endif
diff --git a/arch/arm/mach-tegra/pinmux-t3-tables.c b/arch/arm/mach-tegra/pinmux-t3-tables.c
new file mode 100644
index 000000000000..aaf1390933ed
--- /dev/null
+++ b/arch/arm/mach-tegra/pinmux-t3-tables.c
@@ -0,0 +1,478 @@
+/*
+ * linux/arch/arm/mach-tegra/pinmux-t3-tables.c
+ *
+ * Common pinmux configurations for Tegra 3 SoCs
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/syscore_ops.h>
+
+#include <mach/iomap.h>
+#include <mach/pinmux.h>
+#include "gpio-names.h"
+
+#define SET_DRIVE_PINGROUP(pg_name, r, drv_down_offset, drv_down_mask, drv_up_offset, drv_up_mask, \
+ slew_rise_offset, slew_rise_mask, slew_fall_offset, slew_fall_mask) \
+ [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .reg = r, \
+ .drvup_offset = drv_up_offset, \
+ .drvup_mask = drv_up_mask, \
+ .drvdown_offset = drv_down_offset, \
+ .drvdown_mask = drv_down_mask, \
+ .slewrise_offset = slew_rise_offset, \
+ .slewrise_mask = slew_rise_mask, \
+ .slewfall_offset = slew_fall_offset, \
+ .slewfall_mask = slew_fall_mask, \
+ }
+
+#define DEFAULT_DRIVE_PINGROUP(pg_name, r) \
+ [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .reg = r, \
+ .drvup_offset = 20, \
+ .drvup_mask = 0x1f, \
+ .drvdown_offset = 12, \
+ .drvdown_mask = 0x1f, \
+ .slewrise_offset = 28, \
+ .slewrise_mask = 0x3, \
+ .slewfall_offset = 30, \
+ .slewfall_mask = 0x3, \
+ }
+
+const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE_PINGROUP] = {
+ DEFAULT_DRIVE_PINGROUP(AO1, 0x868),
+ DEFAULT_DRIVE_PINGROUP(AO2, 0x86c),
+ DEFAULT_DRIVE_PINGROUP(AT1, 0x870),
+ DEFAULT_DRIVE_PINGROUP(AT2, 0x874),
+ DEFAULT_DRIVE_PINGROUP(AT3, 0x878),
+ DEFAULT_DRIVE_PINGROUP(AT4, 0x87c),
+ DEFAULT_DRIVE_PINGROUP(AT5, 0x880),
+ DEFAULT_DRIVE_PINGROUP(CDEV1, 0x884),
+ DEFAULT_DRIVE_PINGROUP(CDEV2, 0x888),
+ DEFAULT_DRIVE_PINGROUP(CSUS, 0x88c),
+ DEFAULT_DRIVE_PINGROUP(DAP1, 0x890),
+ DEFAULT_DRIVE_PINGROUP(DAP2, 0x894),
+ DEFAULT_DRIVE_PINGROUP(DAP3, 0x898),
+ DEFAULT_DRIVE_PINGROUP(DAP4, 0x89c),
+ DEFAULT_DRIVE_PINGROUP(DBG, 0x8a0),
+ DEFAULT_DRIVE_PINGROUP(LCD1, 0x8a4),
+ DEFAULT_DRIVE_PINGROUP(LCD2, 0x8a8),
+ SET_DRIVE_PINGROUP(SDIO2, 0x8ac, 12, 0x7f, 20, 0x7f,
+ 28, 0x3, 30, 0x3),
+ SET_DRIVE_PINGROUP(SDIO3, 0x8b0, 12, 0x7f, 20, 0x7f,
+ 28, 0x3, 30, 0x3),
+ DEFAULT_DRIVE_PINGROUP(SPI, 0x8b4),
+ DEFAULT_DRIVE_PINGROUP(UAA, 0x8b8),
+ DEFAULT_DRIVE_PINGROUP(UAB, 0x8bc),
+ DEFAULT_DRIVE_PINGROUP(UART2, 0x8c0),
+ DEFAULT_DRIVE_PINGROUP(UART3, 0x8c4),
+ DEFAULT_DRIVE_PINGROUP(VI1, 0x8c8),
+ SET_DRIVE_PINGROUP(SDIO1, 0x8ec, 12, 0x7f, 20, 0x7f,
+ 28, 0x3, 30, 0x3),
+ DEFAULT_DRIVE_PINGROUP(CRT, 0x8f8),
+ DEFAULT_DRIVE_PINGROUP(DDC, 0x8fc),
+ SET_DRIVE_PINGROUP(GMA, 0x900, 14, 0x1f, 19, 0x1f,
+ 24, 0xf, 28, 0xf),
+ SET_DRIVE_PINGROUP(GMB, 0x904, 14, 0x1f, 19, 0x1f,
+ 24, 0xf, 28, 0xf),
+ SET_DRIVE_PINGROUP(GMC, 0x908, 14, 0x1f, 19, 0x1f,
+ 24, 0xf, 28, 0xf),
+ SET_DRIVE_PINGROUP(GMD, 0x90c, 14, 0x1f, 19, 0x1f,
+ 24, 0xf, 28, 0xf),
+ DEFAULT_DRIVE_PINGROUP(GME, 0x910),
+ DEFAULT_DRIVE_PINGROUP(GMF, 0x914),
+ DEFAULT_DRIVE_PINGROUP(GMG, 0x918),
+ DEFAULT_DRIVE_PINGROUP(GMH, 0x91c),
+ DEFAULT_DRIVE_PINGROUP(OWR, 0x920),
+ DEFAULT_DRIVE_PINGROUP(UAD, 0x924),
+ DEFAULT_DRIVE_PINGROUP(GPV, 0x928),
+ DEFAULT_DRIVE_PINGROUP(DEV3, 0x92c),
+ DEFAULT_DRIVE_PINGROUP(CEC, 0x938),
+};
+
+#define PINGROUP(pg_name, gpio_nr, vdd, f0, f1, f2, f3, fs, iod, reg) \
+ [TEGRA_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .vddio = TEGRA_VDDIO_ ## vdd, \
+ .funcs = { \
+ TEGRA_MUX_ ## f0, \
+ TEGRA_MUX_ ## f1, \
+ TEGRA_MUX_ ## f2, \
+ TEGRA_MUX_ ## f3, \
+ }, \
+ .gpionr = TEGRA_GPIO_ ## gpio_nr, \
+ .func_safe = TEGRA_MUX_ ## fs, \
+ .tri_reg = reg, \
+ .tri_bit = 4, \
+ .mux_reg = reg, \
+ .mux_bit = 0, \
+ .pupd_reg = reg, \
+ .pupd_bit = 2, \
+ .io_default = TEGRA_PIN_ ## iod, \
+ .od_bit = 6, \
+ .lock_bit = 7, \
+ .ioreset_bit = 8, \
+ }
+
+/* !!!FIXME!!! FILL IN fSafe COLUMN IN TABLE ....... */
+
+#define PINGROUPS \
+ /* NAME GPIO VDD f0 f1 f2 f3 fSafe io reg */\
+ PINGROUP(ULPI_DATA0, PO1, BB, SPI3, HSI, UARTA, ULPI, RSVD, INPUT, 0x3000),\
+ PINGROUP(ULPI_DATA1, PO2, BB, SPI3, HSI, UARTA, ULPI, RSVD, INPUT, 0x3004),\
+ PINGROUP(ULPI_DATA2, PO3, BB, SPI3, HSI, UARTA, ULPI, RSVD, INPUT, 0x3008),\
+ PINGROUP(ULPI_DATA3, PO4, BB, SPI3, HSI, UARTA, ULPI, RSVD, INPUT, 0x300c),\
+ PINGROUP(ULPI_DATA4, PO5, BB, SPI2, HSI, UARTA, ULPI, RSVD, INPUT, 0x3010),\
+ PINGROUP(ULPI_DATA5, PO6, BB, SPI2, HSI, UARTA, ULPI, RSVD, INPUT, 0x3014),\
+ PINGROUP(ULPI_DATA6, PO7, BB, SPI2, HSI, UARTA, ULPI, RSVD, INPUT, 0x3018),\
+ PINGROUP(ULPI_DATA7, PO0, BB, SPI2, HSI, UARTA, ULPI, RSVD, INPUT, 0x301c),\
+ PINGROUP(ULPI_CLK, PY0, BB, SPI1, RSVD, UARTD, ULPI, RSVD, INPUT, 0x3020),\
+ PINGROUP(ULPI_DIR, PY1, BB, SPI1, RSVD, UARTD, ULPI, RSVD, INPUT, 0x3024),\
+ PINGROUP(ULPI_NXT, PY2, BB, SPI1, RSVD, UARTD, ULPI, RSVD, INPUT, 0x3028),\
+ PINGROUP(ULPI_STP, PY3, BB, SPI1, RSVD, UARTD, ULPI, RSVD, INPUT, 0x302c),\
+ PINGROUP(DAP3_FS, PP0, BB, I2S2, RSVD1, DISPLAYA, DISPLAYB, RSVD, INPUT, 0x3030),\
+ PINGROUP(DAP3_DIN, PP1, BB, I2S2, RSVD1, DISPLAYA, DISPLAYB, RSVD, INPUT, 0x3034),\
+ PINGROUP(DAP3_DOUT, PP2, BB, I2S2, RSVD1, DISPLAYA, DISPLAYB, RSVD, INPUT, 0x3038),\
+ PINGROUP(DAP3_SCLK, PP3, BB, I2S2, RSVD1, DISPLAYA, DISPLAYB, RSVD, INPUT, 0x303c),\
+ PINGROUP(GPIO_PV0, PV0, BB, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3040),\
+ PINGROUP(GPIO_PV1, PV1, BB, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3044),\
+ PINGROUP(SDMMC1_CLK, PZ0, SDMMC1, SDMMC1, RSVD1, RSVD2, INVALID, RSVD, INPUT, 0x3048),\
+ PINGROUP(SDMMC1_CMD, PZ1, SDMMC1, SDMMC1, RSVD1, RSVD2, INVALID, RSVD, INPUT, 0x304c),\
+ PINGROUP(SDMMC1_DAT3, PY4, SDMMC1, SDMMC1, RSVD1, UARTE, INVALID, RSVD, INPUT, 0x3050),\
+ PINGROUP(SDMMC1_DAT2, PY5, SDMMC1, SDMMC1, RSVD1, UARTE, INVALID, RSVD, INPUT, 0x3054),\
+ PINGROUP(SDMMC1_DAT1, PY6, SDMMC1, SDMMC1, RSVD1, UARTE, INVALID, RSVD, INPUT, 0x3058),\
+ PINGROUP(SDMMC1_DAT0, PY7, SDMMC1, SDMMC1, RSVD1, UARTE, INVALID, RSVD, INPUT, 0x305c),\
+ PINGROUP(GPIO_PV2, PV2, SDMMC1, OWR, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3060),\
+ PINGROUP(GPIO_PV3, PV3, SDMMC1, INVALID, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3064),\
+ PINGROUP(CLK2_OUT, PW5, SDMMC1, EXTPERIPH2, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3068),\
+ PINGROUP(CLK2_REQ, PCC5, SDMMC1, DAP, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x306c),\
+ PINGROUP(LCD_PWR1, PC1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3070),\
+ PINGROUP(LCD_PWR2, PC6, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x3074),\
+ PINGROUP(LCD_SDIN, PZ2, LCD, DISPLAYA, DISPLAYB, SPI5, RSVD, RSVD, OUTPUT, 0x3078),\
+ PINGROUP(LCD_SDOUT, PN5, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x307c),\
+ PINGROUP(LCD_WR_N, PZ3, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x3080),\
+ PINGROUP(LCD_CS0_N, PN4, LCD, DISPLAYA, DISPLAYB, SPI5, RSVD, RSVD, OUTPUT, 0x3084),\
+ PINGROUP(LCD_DC0, PN6, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3088),\
+ PINGROUP(LCD_SCK, PZ4, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x308c),\
+ PINGROUP(LCD_PWR0, PB2, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x3090),\
+ PINGROUP(LCD_PCLK, PB3, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3094),\
+ PINGROUP(LCD_DE, PJ1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3098),\
+ PINGROUP(LCD_HSYNC, PJ3, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x309c),\
+ PINGROUP(LCD_VSYNC, PJ4, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30a0),\
+ PINGROUP(LCD_D0, PE0, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30a4),\
+ PINGROUP(LCD_D1, PE1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30a8),\
+ PINGROUP(LCD_D2, PE2, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30ac),\
+ PINGROUP(LCD_D3, PE3, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30b0),\
+ PINGROUP(LCD_D4, PE4, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30b4),\
+ PINGROUP(LCD_D5, PE5, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30b8),\
+ PINGROUP(LCD_D6, PE6, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30bc),\
+ PINGROUP(LCD_D7, PE7, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30c0),\
+ PINGROUP(LCD_D8, PF0, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30c4),\
+ PINGROUP(LCD_D9, PF1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30c8),\
+ PINGROUP(LCD_D10, PF2, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30cc),\
+ PINGROUP(LCD_D11, PF3, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30d0),\
+ PINGROUP(LCD_D12, PF4, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30d4),\
+ PINGROUP(LCD_D13, PF5, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30d8),\
+ PINGROUP(LCD_D14, PF6, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30dc),\
+ PINGROUP(LCD_D15, PF7, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30e0),\
+ PINGROUP(LCD_D16, PM0, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30e4),\
+ PINGROUP(LCD_D17, PM1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30e8),\
+ PINGROUP(LCD_D18, PM2, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30ec),\
+ PINGROUP(LCD_D19, PM3, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30f0),\
+ PINGROUP(LCD_D20, PM4, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30f4),\
+ PINGROUP(LCD_D21, PM5, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30f8),\
+ PINGROUP(LCD_D22, PM6, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30fc),\
+ PINGROUP(LCD_D23, PM7, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3100),\
+ PINGROUP(LCD_CS1_N, PW0, LCD, DISPLAYA, DISPLAYB, SPI5, RSVD2, RSVD, OUTPUT, 0x3104),\
+ PINGROUP(LCD_M1, PW1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3108),\
+ PINGROUP(LCD_DC1, PD2, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x310c),\
+ PINGROUP(HDMI_INT, PN7, LCD, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3110),\
+ PINGROUP(DDC_SCL, PV4, LCD, I2C4, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3114),\
+ PINGROUP(DDC_SDA, PV5, LCD, I2C4, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3118),\
+ PINGROUP(CRT_HSYNC, PV6, LCD, CRT, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x311c),\
+ PINGROUP(CRT_VSYNC, PV7, LCD, CRT, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3120),\
+ PINGROUP(VI_D0, PT4, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x3124),\
+ PINGROUP(VI_D1, PD5, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x3128),\
+ PINGROUP(VI_D2, PL0, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x312c),\
+ PINGROUP(VI_D3, PL1, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x3130),\
+ PINGROUP(VI_D4, PL2, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x3134),\
+ PINGROUP(VI_D5, PL3, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x3138),\
+ PINGROUP(VI_D6, PL4, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x313c),\
+ PINGROUP(VI_D7, PL5, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x3140),\
+ PINGROUP(VI_D8, PL6, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x3144),\
+ PINGROUP(VI_D9, PL7, VI, INVALID, SDMMC2, VI, RSVD1, RSVD, INPUT, 0x3148),\
+ PINGROUP(VI_D10, PT2, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x314c),\
+ PINGROUP(VI_D11, PT3, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x3150),\
+ PINGROUP(VI_PCLK, PT0, VI, RSVD1, SDMMC2, VI, RSVD2, RSVD, INPUT, 0x3154),\
+ PINGROUP(VI_MCLK, PT1, VI, INVALID, INVALID, INVALID, VI, RSVD, INPUT, 0x3158),\
+ PINGROUP(VI_VSYNC, PD6, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x315c),\
+ PINGROUP(VI_HSYNC, PD7, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x3160),\
+ PINGROUP(UART2_RXD, PC3, UART, IRDA, SPDIF, UARTA, SPI4, RSVD, INPUT, 0x3164),\
+ PINGROUP(UART2_TXD, PC2, UART, IRDA, SPDIF, UARTA, SPI4, RSVD, INPUT, 0x3168),\
+ PINGROUP(UART2_RTS_N, PJ6, UART, UARTA, UARTB, GMI, SPI4, RSVD, INPUT, 0x316c),\
+ PINGROUP(UART2_CTS_N, PJ5, UART, UARTA, UARTB, GMI, SPI4, RSVD, INPUT, 0x3170),\
+ PINGROUP(UART3_TXD, PW6, UART, UARTC, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x3174),\
+ PINGROUP(UART3_RXD, PW7, UART, UARTC, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x3178),\
+ PINGROUP(UART3_CTS_N, PA1, UART, UARTC, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x317c),\
+ PINGROUP(UART3_RTS_N, PC0, UART, UARTC, PWM0, GMI, RSVD2, RSVD, INPUT, 0x3180),\
+ PINGROUP(GPIO_PU0, PU0, UART, OWR, UARTA, GMI, RSVD1, RSVD, INPUT, 0x3184),\
+ PINGROUP(GPIO_PU1, PU1, UART, RSVD1, UARTA, GMI, RSVD2, RSVD, INPUT, 0x3188),\
+ PINGROUP(GPIO_PU2, PU2, UART, RSVD1, UARTA, GMI, RSVD2, RSVD, INPUT, 0x318c),\
+ PINGROUP(GPIO_PU3, PU3, UART, PWM0, UARTA, GMI, RSVD1, RSVD, INPUT, 0x3190),\
+ PINGROUP(GPIO_PU4, PU4, UART, PWM1, UARTA, GMI, RSVD1, RSVD, INPUT, 0x3194),\
+ PINGROUP(GPIO_PU5, PU5, UART, PWM2, UARTA, GMI, RSVD1, RSVD, INPUT, 0x3198),\
+ PINGROUP(GPIO_PU6, PU6, UART, PWM3, UARTA, GMI, RSVD1, RSVD, INPUT, 0x319c),\
+ PINGROUP(GEN1_I2C_SDA, PC5, UART, I2C1, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x31a0),\
+ PINGROUP(GEN1_I2C_SCL, PC4, UART, I2C1, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x31a4),\
+ PINGROUP(DAP4_FS, PP4, UART, I2S3, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x31a8),\
+ PINGROUP(DAP4_DIN, PP5, UART, I2S3, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x31ac),\
+ PINGROUP(DAP4_DOUT, PP6, UART, I2S3, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x31b0),\
+ PINGROUP(DAP4_SCLK, PP7, UART, I2S3, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x31b4),\
+ PINGROUP(CLK3_OUT, PEE0, UART, EXTPERIPH3, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x31b8),\
+ PINGROUP(CLK3_REQ, PEE1, UART, DEV3, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x31bc),\
+ PINGROUP(GMI_WP_N, PC7, GMI, RSVD1, NAND, GMI, GMI_ALT, RSVD, INPUT, 0x31c0),\
+ PINGROUP(GMI_IORDY, PI5, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31c4),\
+ PINGROUP(GMI_WAIT, PI7, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31c8),\
+ PINGROUP(GMI_ADV_N, PK0, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31cc),\
+ PINGROUP(GMI_CLK, PK1, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31d0),\
+ PINGROUP(GMI_CS0_N, PJ0, GMI, RSVD1, NAND, GMI, INVALID, RSVD, INPUT, 0x31d4),\
+ PINGROUP(GMI_CS1_N, PJ2, GMI, RSVD1, NAND, GMI, DTV, RSVD, INPUT, 0x31d8),\
+ PINGROUP(GMI_CS2_N, PK3, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31dc),\
+ PINGROUP(GMI_CS3_N, PK4, GMI, RSVD1, NAND, GMI, GMI_ALT, RSVD, INPUT, 0x31e0),\
+ PINGROUP(GMI_CS4_N, PK2, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31e4),\
+ PINGROUP(GMI_CS6_N, PI3, GMI, NAND, NAND_ALT, GMI, SATA, RSVD, INPUT, 0x31e8),\
+ PINGROUP(GMI_CS7_N, PI6, GMI, NAND, NAND_ALT, GMI, GMI_ALT, RSVD, INPUT, 0x31ec),\
+ PINGROUP(GMI_AD0, PG0, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31f0),\
+ PINGROUP(GMI_AD1, PG1, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31f4),\
+ PINGROUP(GMI_AD2, PG2, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31f8),\
+ PINGROUP(GMI_AD3, PG3, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31fc),\
+ PINGROUP(GMI_AD4, PG4, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3200),\
+ PINGROUP(GMI_AD5, PG5, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3204),\
+ PINGROUP(GMI_AD6, PG6, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3208),\
+ PINGROUP(GMI_AD7, PG7, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x320c),\
+ PINGROUP(GMI_AD8, PH0, GMI, PWM0, NAND, GMI, RSVD2, RSVD, INPUT, 0x3210),\
+ PINGROUP(GMI_AD9, PH1, GMI, PWM1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3214),\
+ PINGROUP(GMI_AD10, PH2, GMI, PWM2, NAND, GMI, RSVD2, RSVD, INPUT, 0x3218),\
+ PINGROUP(GMI_AD11, PH3, GMI, PWM3, NAND, GMI, RSVD2, RSVD, INPUT, 0x321c),\
+ PINGROUP(GMI_AD12, PH4, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3220),\
+ PINGROUP(GMI_AD13, PH5, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3224),\
+ PINGROUP(GMI_AD14, PH6, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3228),\
+ PINGROUP(GMI_AD15, PH7, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x322c),\
+ PINGROUP(GMI_A16, PJ7, GMI, UARTD, SPI4, GMI, GMI_ALT, RSVD, INPUT, 0x3230),\
+ PINGROUP(GMI_A17, PB0, GMI, UARTD, SPI4, GMI, INVALID, RSVD, INPUT, 0x3234),\
+ PINGROUP(GMI_A18, PB1, GMI, UARTD, SPI4, GMI, INVALID, RSVD, INPUT, 0x3238),\
+ PINGROUP(GMI_A19, PK7, GMI, UARTD, SPI4, GMI, RSVD3, RSVD, INPUT, 0x323c),\
+ PINGROUP(GMI_WR_N, PI0, GMI, RSVD1, NAND, GMI, RSVD3, RSVD, INPUT, 0x3240),\
+ PINGROUP(GMI_OE_N, PI1, GMI, RSVD1, NAND, GMI, RSVD3, RSVD, INPUT, 0x3244),\
+ PINGROUP(GMI_DQS, PI2, GMI, RSVD1, NAND, GMI, RSVD3, RSVD, INPUT, 0x3248),\
+ PINGROUP(GMI_RST_N, PI4, GMI, NAND, NAND_ALT, GMI, RSVD3, RSVD, INPUT, 0x324c),\
+ PINGROUP(GEN2_I2C_SCL, PT5, GMI, I2C2, INVALID, GMI, RSVD3, RSVD, INPUT, 0x3250),\
+ PINGROUP(GEN2_I2C_SDA, PT6, GMI, I2C2, INVALID, GMI, RSVD3, RSVD, INPUT, 0x3254),\
+ PINGROUP(SDMMC4_CLK, PCC4, SDMMC4, INVALID, NAND, GMI, SDMMC4, RSVD, INPUT, 0x3258),\
+ PINGROUP(SDMMC4_CMD, PT7, SDMMC4, I2C3, NAND, GMI, SDMMC4, RSVD, INPUT, 0x325c),\
+ PINGROUP(SDMMC4_DAT0, PAA0, SDMMC4, UARTE, SPI3, GMI, SDMMC4, RSVD, INPUT, 0x3260),\
+ PINGROUP(SDMMC4_DAT1, PAA1, SDMMC4, UARTE, SPI3, GMI, SDMMC4, RSVD, INPUT, 0x3264),\
+ PINGROUP(SDMMC4_DAT2, PAA2, SDMMC4, UARTE, SPI3, GMI, SDMMC4, RSVD, INPUT, 0x3268),\
+ PINGROUP(SDMMC4_DAT3, PAA3, SDMMC4, UARTE, SPI3, GMI, SDMMC4, RSVD, INPUT, 0x326c),\
+ PINGROUP(SDMMC4_DAT4, PAA4, SDMMC4, I2C3, I2S4, GMI, SDMMC4, RSVD, INPUT, 0x3270),\
+ PINGROUP(SDMMC4_DAT5, PAA5, SDMMC4, VGP3, I2S4, GMI, SDMMC4, RSVD, INPUT, 0x3274),\
+ PINGROUP(SDMMC4_DAT6, PAA6, SDMMC4, VGP4, I2S4, GMI, SDMMC4, RSVD, INPUT, 0x3278),\
+ PINGROUP(SDMMC4_DAT7, PAA7, SDMMC4, VGP5, I2S4, GMI, SDMMC4, RSVD, INPUT, 0x327c),\
+ PINGROUP(SDMMC4_RST_N, PCC3, SDMMC4, VGP6, RSVD1, RSVD2, POPSDMMC4, RSVD, INPUT, 0x3280),\
+ PINGROUP(CAM_MCLK, PCC0, CAM, VI, INVALID, VI_ALT2, POPSDMMC4, RSVD, INPUT, 0x3284),\
+ PINGROUP(GPIO_PCC1, PCC1, CAM, I2S4, RSVD1, RSVD2, POPSDMMC4, RSVD, INPUT, 0x3288),\
+ PINGROUP(GPIO_PBB0, PBB0, CAM, I2S4, RSVD1, RSVD2, POPSDMMC4, RSVD, INPUT, 0x328c),\
+ PINGROUP(CAM_I2C_SCL, PBB1, CAM, INVALID, I2C3, RSVD2, POPSDMMC4, RSVD, INPUT, 0x3290),\
+ PINGROUP(CAM_I2C_SDA, PBB2, CAM, INVALID, I2C3, RSVD2, POPSDMMC4, RSVD, INPUT, 0x3294),\
+ PINGROUP(GPIO_PBB3, PBB3, CAM, VGP3, DISPLAYA, DISPLAYB, POPSDMMC4, RSVD, INPUT, 0x3298),\
+ PINGROUP(GPIO_PBB4, PBB4, CAM, VGP4, DISPLAYA, DISPLAYB, POPSDMMC4, RSVD, INPUT, 0x329c),\
+ PINGROUP(GPIO_PBB5, PBB5, CAM, VGP5, DISPLAYA, DISPLAYB, POPSDMMC4, RSVD, INPUT, 0x32a0),\
+ PINGROUP(GPIO_PBB6, PBB6, CAM, VGP6, DISPLAYA, DISPLAYB, POPSDMMC4, RSVD, INPUT, 0x32a4),\
+ PINGROUP(GPIO_PBB7, PBB7, CAM, I2S4, RSVD1, RSVD2, POPSDMMC4, RSVD, INPUT, 0x32a8),\
+ PINGROUP(GPIO_PCC2, PCC2, CAM, I2S4, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x32ac),\
+ PINGROUP(JTAG_RTCK, PU7, SYS, RTCK, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x32b0),\
+ PINGROUP(PWR_I2C_SCL, PZ6, SYS, I2CPWR, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x32b4),\
+ PINGROUP(PWR_I2C_SDA, PZ7, SYS, I2CPWR, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x32b8),\
+ PINGROUP(KB_ROW0, PR0, SYS, KBC, INVALID, RSVD2, RSVD3, RSVD, INPUT, 0x32bc),\
+ PINGROUP(KB_ROW1, PR1, SYS, KBC, INVALID, RSVD2, RSVD3, RSVD, INPUT, 0x32c0),\
+ PINGROUP(KB_ROW2, PR2, SYS, KBC, INVALID, RSVD2, RSVD3, RSVD, INPUT, 0x32c4),\
+ PINGROUP(KB_ROW3, PR3, SYS, KBC, INVALID, RSVD2, INVALID, RSVD, INPUT, 0x32c8),\
+ PINGROUP(KB_ROW4, PR4, SYS, KBC, INVALID, TRACE, RSVD3, RSVD, INPUT, 0x32cc),\
+ PINGROUP(KB_ROW5, PR5, SYS, KBC, INVALID, TRACE, OWR, RSVD, INPUT, 0x32d0),\
+ PINGROUP(KB_ROW6, PR6, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32d4),\
+ PINGROUP(KB_ROW7, PR7, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32d8),\
+ PINGROUP(KB_ROW8, PS0, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32dc),\
+ PINGROUP(KB_ROW9, PS1, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32e0),\
+ PINGROUP(KB_ROW10, PS2, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32e4),\
+ PINGROUP(KB_ROW11, PS3, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32e8),\
+ PINGROUP(KB_ROW12, PS4, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32ec),\
+ PINGROUP(KB_ROW13, PS5, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32f0),\
+ PINGROUP(KB_ROW14, PS6, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32f4),\
+ PINGROUP(KB_ROW15, PS7, SYS, KBC, INVALID, SDMMC2, INVALID, RSVD, INPUT, 0x32f8),\
+ PINGROUP(KB_COL0, PQ0, SYS, KBC, INVALID, TRACE, INVALID, RSVD, INPUT, 0x32fc),\
+ PINGROUP(KB_COL1, PQ1, SYS, KBC, INVALID, TRACE, INVALID, RSVD, INPUT, 0x3300),\
+ PINGROUP(KB_COL2, PQ2, SYS, KBC, INVALID, TRACE, RSVD, RSVD, INPUT, 0x3304),\
+ PINGROUP(KB_COL3, PQ3, SYS, KBC, INVALID, TRACE, RSVD, RSVD, INPUT, 0x3308),\
+ PINGROUP(KB_COL4, PQ4, SYS, KBC, INVALID, TRACE, RSVD, RSVD, INPUT, 0x330c),\
+ PINGROUP(KB_COL5, PQ5, SYS, KBC, INVALID, TRACE, RSVD, RSVD, INPUT, 0x3310),\
+ PINGROUP(KB_COL6, PQ6, SYS, KBC, INVALID, TRACE, INVALID, RSVD, INPUT, 0x3314),\
+ PINGROUP(KB_COL7, PQ7, SYS, KBC, INVALID, TRACE, INVALID, RSVD, INPUT, 0x3318),\
+ PINGROUP(CLK_32K_OUT, PA0, SYS, BLINK, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x331c),\
+ PINGROUP(SYS_CLK_REQ, PZ5, SYS, SYSCLK, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3320),\
+ PINGROUP(CORE_PWR_REQ, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3324),\
+ PINGROUP(CPU_PWR_REQ, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3328),\
+ PINGROUP(PWR_INT_N, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x332c),\
+ PINGROUP(CLK_32K_IN, INVALID, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3330),\
+ PINGROUP(OWR, INVALID, SYS, OWR, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3334),\
+ PINGROUP(DAP1_FS, PN0, AUDIO, I2S0, HDA, GMI, SDMMC2, RSVD, INPUT, 0x3338),\
+ PINGROUP(DAP1_DIN, PN1, AUDIO, I2S0, HDA, GMI, SDMMC2, RSVD, INPUT, 0x333c),\
+ PINGROUP(DAP1_DOUT, PN2, AUDIO, I2S0, HDA, GMI, SDMMC2, RSVD, INPUT, 0x3340),\
+ PINGROUP(DAP1_SCLK, PN3, AUDIO, I2S0, HDA, GMI, SDMMC2, RSVD, INPUT, 0x3344),\
+ PINGROUP(CLK1_REQ, PEE2, AUDIO, DAP, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x3348),\
+ PINGROUP(CLK1_OUT, PW4, AUDIO, EXTPERIPH1, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x334c),\
+ PINGROUP(SPDIF_IN, PK6, AUDIO, SPDIF, HDA, INVALID, DAPSDMMC2, RSVD, INPUT, 0x3350),\
+ PINGROUP(SPDIF_OUT, PK5, AUDIO, SPDIF, RSVD1, INVALID, DAPSDMMC2, RSVD, INPUT, 0x3354),\
+ PINGROUP(DAP2_FS, PA2, AUDIO, I2S1, HDA, RSVD2, GMI, RSVD, INPUT, 0x3358),\
+ PINGROUP(DAP2_DIN, PA4, AUDIO, I2S1, HDA, RSVD2, GMI, RSVD, INPUT, 0x335c),\
+ PINGROUP(DAP2_DOUT, PA5, AUDIO, I2S1, HDA, RSVD2, GMI, RSVD, INPUT, 0x3360),\
+ PINGROUP(DAP2_SCLK, PA3, AUDIO, I2S1, HDA, RSVD2, GMI, RSVD, INPUT, 0x3364),\
+ PINGROUP(SPI2_MOSI, PX0, AUDIO, SPI6, SPI2, INVALID, GMI, RSVD, INPUT, 0x3368),\
+ PINGROUP(SPI2_MISO, PX1, AUDIO, SPI6, SPI2, INVALID, GMI, RSVD, INPUT, 0x336c),\
+ PINGROUP(SPI2_CS0_N, PX3, AUDIO, SPI6, SPI2, INVALID, GMI, RSVD, INPUT, 0x3370),\
+ PINGROUP(SPI2_SCK, PX2, AUDIO, SPI6, SPI2, INVALID, GMI, RSVD, INPUT, 0x3374),\
+ PINGROUP(SPI1_MOSI, PX4, AUDIO, SPI2, SPI1, INVALID, GMI, RSVD, INPUT, 0x3378),\
+ PINGROUP(SPI1_SCK, PX5, AUDIO, SPI2, SPI1, INVALID, GMI, RSVD, INPUT, 0x337c),\
+ PINGROUP(SPI1_CS0_N, PX6, AUDIO, SPI2, SPI1, INVALID, GMI, RSVD, INPUT, 0x3380),\
+ PINGROUP(SPI1_MISO, PX7, AUDIO, INVALID, SPI1, INVALID, RSVD3, RSVD, INPUT, 0x3384),\
+ PINGROUP(SPI2_CS1_N, PW2, AUDIO, INVALID, SPI2, INVALID, INVALID, RSVD, INPUT, 0x3388),\
+ PINGROUP(SPI2_CS2_N, PW3, AUDIO, INVALID, SPI2, INVALID, INVALID, RSVD, INPUT, 0x338c),\
+ PINGROUP(SDMMC3_CLK, PA6, SDMMC3, UARTA, PWM2, SDMMC3, INVALID, RSVD, INPUT, 0x3390),\
+ PINGROUP(SDMMC3_CMD, PA7, SDMMC3, UARTA, PWM3, SDMMC3, INVALID, RSVD, INPUT, 0x3394),\
+ PINGROUP(SDMMC3_DAT0, PB7, SDMMC3, RSVD0, RSVD1, SDMMC3, INVALID, RSVD, INPUT, 0x3398),\
+ PINGROUP(SDMMC3_DAT1, PB6, SDMMC3, RSVD0, RSVD1, SDMMC3, INVALID, RSVD, INPUT, 0x339c),\
+ PINGROUP(SDMMC3_DAT2, PB5, SDMMC3, RSVD0, PWM1, SDMMC3, INVALID, RSVD, INPUT, 0x33a0),\
+ PINGROUP(SDMMC3_DAT3, PB4, SDMMC3, RSVD0, PWM0, SDMMC3, INVALID, RSVD, INPUT, 0x33a4),\
+ PINGROUP(SDMMC3_DAT4, PD1, SDMMC3, PWM1, INVALID, SDMMC3, INVALID, RSVD, INPUT, 0x33a8),\
+ PINGROUP(SDMMC3_DAT5, PD0, SDMMC3, PWM0, INVALID, SDMMC3, INVALID, RSVD, INPUT, 0x33ac),\
+ PINGROUP(SDMMC3_DAT6, PD3, SDMMC3, SPDIF, INVALID, SDMMC3, INVALID, RSVD, INPUT, 0x33b0),\
+ PINGROUP(SDMMC3_DAT7, PD4, SDMMC3, SPDIF, INVALID, SDMMC3, INVALID, RSVD, INPUT, 0x33b4),\
+ PINGROUP(PEX_L0_PRSNT_N, PDD0, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33b8),\
+ PINGROUP(PEX_L0_RST_N, PDD1, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33bc),\
+ PINGROUP(PEX_L0_CLKREQ_N, PDD2, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33c0),\
+ PINGROUP(PEX_WAKE_N, PDD3, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33c4),\
+ PINGROUP(PEX_L1_PRSNT_N, PDD4, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33c8),\
+ PINGROUP(PEX_L1_RST_N, PDD5, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33cc),\
+ PINGROUP(PEX_L1_CLKREQ_N, PDD6, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33d0),\
+ PINGROUP(PEX_L2_PRSNT_N, PDD7, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33d4),\
+ PINGROUP(PEX_L2_RST_N, PCC6, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33d8),\
+ PINGROUP(PEX_L2_CLKREQ_N, PCC7, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33dc),\
+ PINGROUP(HDMI_CEC, PEE3, SYS, CEC, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x33e0),\
+ /* END OF LIST */
+
+const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
+ PINGROUPS
+};
+
+#undef PINGROUP
+
+#define PINGROUP(pg_name, gpio_nr, vdd, f0, f1, f2, f3, fs, iod, reg) \
+ [TEGRA_GPIO_##gpio_nr] = TEGRA_PINGROUP_ ##pg_name\
+
+const int gpio_to_pingroup[TEGRA_MAX_GPIO] = {
+ PINGROUPS
+};
+
+#ifdef CONFIG_PM_SLEEP
+
+static u32 pinmux_reg[TEGRA_MAX_PINGROUP +
+ ARRAY_SIZE(tegra_soc_drive_pingroups)];
+
+static inline unsigned long pg_readl(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
+}
+
+static inline void pg_writel(unsigned long value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
+}
+
+static int tegra_pinmux_suspend(void)
+{
+ unsigned int i;
+ u32 *ctx = pinmux_reg;
+
+ for (i = 0; i < TEGRA_MAX_PINGROUP; i++)
+ *ctx++ = pg_readl(tegra_soc_pingroups[i].mux_reg);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++)
+ *ctx++ = pg_readl(tegra_soc_drive_pingroups[i].reg);
+
+ return 0;
+}
+
+static void tegra_pinmux_resume(void)
+{
+ unsigned int i;
+ u32 *ctx = pinmux_reg;
+
+ for (i = 0; i < TEGRA_MAX_PINGROUP; i++)
+ pg_writel(*ctx++, tegra_soc_pingroups[i].mux_reg);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++)
+ pg_writel(*ctx++, tegra_soc_drive_pingroups[i].reg);
+}
+
+static struct syscore_ops tegra_pinmux_syscore_ops = {
+ .suspend = tegra_pinmux_suspend,
+ .resume = tegra_pinmux_resume,
+};
+#endif
+
+#define SET_DRIVE(_name, _hsm, _schmitt, _drive, _pulldn_drive, _pullup_drive, _pulldn_slew, _pullup_slew) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_##_hsm, \
+ .schmitt = TEGRA_SCHMITT_##_schmitt, \
+ .drive = TEGRA_DRIVE_##_drive, \
+ .pull_down = TEGRA_PULL_##_pulldn_drive, \
+ .pull_up = TEGRA_PULL_##_pullup_drive, \
+ .slew_rising = TEGRA_SLEW_##_pulldn_slew, \
+ .slew_falling = TEGRA_SLEW_##_pullup_slew, \
+ }
+
+static __initdata struct tegra_drive_pingroup_config t30_def_drive_pinmux[] = {
+ SET_DRIVE(DAP2, DISABLE, ENABLE, DIV_1, 31, 31, FASTEST, FASTEST),
+};
+
+void __init tegra_init_pinmux(void)
+{
+#ifdef CONFIG_PM_SLEEP
+ register_syscore_ops(&tegra_pinmux_syscore_ops);
+#endif
+
+ tegra_drive_pinmux_config_table(t30_def_drive_pinmux,
+ ARRAY_SIZE(t30_def_drive_pinmux));
+}
diff --git a/arch/arm/mach-tegra/pinmux.c b/arch/arm/mach-tegra/pinmux.c
index f80d507671bc..bf8627fb2c94 100644
--- a/arch/arm/mach-tegra/pinmux.c
+++ b/arch/arm/mach-tegra/pinmux.c
@@ -2,6 +2,7 @@
* linux/arch/arm/mach-tegra/pinmux.c
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -34,68 +35,12 @@
static const struct tegra_pingroup_desc *const pingroups = tegra_soc_pingroups;
static const struct tegra_drive_pingroup_desc *const drive_pingroups = tegra_soc_drive_pingroups;
+static const int *gpio_to_pingroups_map = gpio_to_pingroup;
static char *tegra_mux_names[TEGRA_MAX_MUX] = {
- [TEGRA_MUX_AHB_CLK] = "AHB_CLK",
- [TEGRA_MUX_APB_CLK] = "APB_CLK",
- [TEGRA_MUX_AUDIO_SYNC] = "AUDIO_SYNC",
- [TEGRA_MUX_CRT] = "CRT",
- [TEGRA_MUX_DAP1] = "DAP1",
- [TEGRA_MUX_DAP2] = "DAP2",
- [TEGRA_MUX_DAP3] = "DAP3",
- [TEGRA_MUX_DAP4] = "DAP4",
- [TEGRA_MUX_DAP5] = "DAP5",
- [TEGRA_MUX_DISPLAYA] = "DISPLAYA",
- [TEGRA_MUX_DISPLAYB] = "DISPLAYB",
- [TEGRA_MUX_EMC_TEST0_DLL] = "EMC_TEST0_DLL",
- [TEGRA_MUX_EMC_TEST1_DLL] = "EMC_TEST1_DLL",
- [TEGRA_MUX_GMI] = "GMI",
- [TEGRA_MUX_GMI_INT] = "GMI_INT",
- [TEGRA_MUX_HDMI] = "HDMI",
- [TEGRA_MUX_I2C] = "I2C",
- [TEGRA_MUX_I2C2] = "I2C2",
- [TEGRA_MUX_I2C3] = "I2C3",
- [TEGRA_MUX_IDE] = "IDE",
- [TEGRA_MUX_IRDA] = "IRDA",
- [TEGRA_MUX_KBC] = "KBC",
- [TEGRA_MUX_MIO] = "MIO",
- [TEGRA_MUX_MIPI_HS] = "MIPI_HS",
- [TEGRA_MUX_NAND] = "NAND",
- [TEGRA_MUX_OSC] = "OSC",
- [TEGRA_MUX_OWR] = "OWR",
- [TEGRA_MUX_PCIE] = "PCIE",
- [TEGRA_MUX_PLLA_OUT] = "PLLA_OUT",
- [TEGRA_MUX_PLLC_OUT1] = "PLLC_OUT1",
- [TEGRA_MUX_PLLM_OUT1] = "PLLM_OUT1",
- [TEGRA_MUX_PLLP_OUT2] = "PLLP_OUT2",
- [TEGRA_MUX_PLLP_OUT3] = "PLLP_OUT3",
- [TEGRA_MUX_PLLP_OUT4] = "PLLP_OUT4",
- [TEGRA_MUX_PWM] = "PWM",
- [TEGRA_MUX_PWR_INTR] = "PWR_INTR",
- [TEGRA_MUX_PWR_ON] = "PWR_ON",
- [TEGRA_MUX_RTCK] = "RTCK",
- [TEGRA_MUX_SDIO1] = "SDIO1",
- [TEGRA_MUX_SDIO2] = "SDIO2",
- [TEGRA_MUX_SDIO3] = "SDIO3",
- [TEGRA_MUX_SDIO4] = "SDIO4",
- [TEGRA_MUX_SFLASH] = "SFLASH",
- [TEGRA_MUX_SPDIF] = "SPDIF",
- [TEGRA_MUX_SPI1] = "SPI1",
- [TEGRA_MUX_SPI2] = "SPI2",
- [TEGRA_MUX_SPI2_ALT] = "SPI2_ALT",
- [TEGRA_MUX_SPI3] = "SPI3",
- [TEGRA_MUX_SPI4] = "SPI4",
- [TEGRA_MUX_TRACE] = "TRACE",
- [TEGRA_MUX_TWC] = "TWC",
- [TEGRA_MUX_UARTA] = "UARTA",
- [TEGRA_MUX_UARTB] = "UARTB",
- [TEGRA_MUX_UARTC] = "UARTC",
- [TEGRA_MUX_UARTD] = "UARTD",
- [TEGRA_MUX_UARTE] = "UARTE",
- [TEGRA_MUX_ULPI] = "ULPI",
- [TEGRA_MUX_VI] = "VI",
- [TEGRA_MUX_VI_SENSOR_CLK] = "VI_SENSOR_CLK",
- [TEGRA_MUX_XIO] = "XIO",
+#define TEGRA_MUX(mux) [TEGRA_MUX_##mux] = #mux,
+ TEGRA_MUX_LIST
+#undef TEGRA_MUX
[TEGRA_MUX_SAFE] = "<safe>",
};
@@ -137,8 +82,8 @@ static const char *func_name(enum tegra_mux_func func)
if (func == TEGRA_MUX_RSVD4)
return "RSVD4";
- if (func == TEGRA_MUX_NONE)
- return "NONE";
+ if (func == TEGRA_MUX_INVALID)
+ return "INVALID";
if (func < 0 || func >= TEGRA_MAX_MUX)
return "<UNKNOWN>";
@@ -169,21 +114,93 @@ static const char *pupd_name(unsigned long val)
}
}
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+static const char *lock_name(unsigned long val)
+{
+ switch (val) {
+ case TEGRA_PIN_LOCK_DEFAULT:
+ return "LOCK_DEFUALT";
+
+ case TEGRA_PIN_LOCK_DISABLE:
+ return "LOCK_DISABLE";
+
+ case TEGRA_PIN_LOCK_ENABLE:
+ return "LOCK_ENABLE";
+ default:
+ return "LOCK_DEFAULT";
+ }
+}
+
+static const char *od_name(unsigned long val)
+{
+ switch (val) {
+ case TEGRA_PIN_OD_DEFAULT:
+ return "OD_DEFAULT";
+
+ case TEGRA_PIN_OD_DISABLE:
+ return "OD_DISABLE";
+
+ case TEGRA_PIN_OD_ENABLE:
+ return "OD_ENABLE";
+ default:
+ return "OD_DEFAULT";
+ }
+}
+
+static const char *ioreset_name(unsigned long val)
+{
+ switch (val) {
+ case TEGRA_PIN_IO_RESET_DEFAULT:
+ return "IO_RESET_DEFAULT";
+
+ case TEGRA_PIN_IO_RESET_DISABLE:
+ return "IO_RESET_DISABLE";
+
+ case TEGRA_PIN_IO_RESET_ENABLE:
+ return "IO_RESET_ENABLE";
+ default:
+ return "IO_RESET_DEFAULT";
+ }
+}
+#endif
+
+#if defined(TEGRA_PINMUX_HAS_IO_DIRECTION)
+static const char *io_name(unsigned long val)
+{
+ switch (val) {
+ case 0:
+ return "OUTPUT";
+
+ case 1:
+ return "INPUT";
+
+ default:
+ return "RSVD";
+ }
+}
+#endif
static inline unsigned long pg_readl(unsigned long offset)
{
- return readl(IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
+ return readl(IO_TO_VIRT(TEGRA_APB_MISC_BASE) + offset);
}
static inline void pg_writel(unsigned long value, unsigned long offset)
{
- writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
+ writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE) + offset);
}
+int tegra_pinmux_get_pingroup(int gpio_nr)
+{
+ return gpio_to_pingroups_map[gpio_nr];
+}
+EXPORT_SYMBOL_GPL(tegra_pinmux_get_pingroup);
+
static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
{
int mux = -1;
int i;
+ int find = 0;
unsigned long reg;
unsigned long flags;
enum tegra_pingroup pg = config->pingroup;
@@ -192,8 +209,15 @@ static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].mux_reg < 0)
+ if (pingroups[pg].mux_reg <= 0)
+ return -EINVAL;
+
+ if (func == TEGRA_MUX_INVALID) {
+ pr_err("The pingroup %s is not recommended for option %s\n",
+ pingroup_name(pg), func_name(func));
+ WARN_ON(1);
return -EINVAL;
+ }
if (func < 0)
return -ERANGE;
@@ -202,24 +226,47 @@ static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
func = pingroups[pg].func_safe;
if (func & TEGRA_MUX_RSVD) {
- mux = func & 0x3;
+ for (i = 0; i < 4; i++) {
+ if (pingroups[pg].funcs[i] & TEGRA_MUX_RSVD)
+ mux = i;
+
+ if (pingroups[pg].funcs[i] == func) {
+ mux = i;
+ find = 1;
+ break;
+ }
+ }
} else {
for (i = 0; i < 4; i++) {
if (pingroups[pg].funcs[i] == func) {
mux = i;
+ find = 1;
break;
}
}
}
- if (mux < 0)
+ if (mux < 0) {
+ pr_err("The pingroup %s is not supported option %s\n",
+ pingroup_name(pg), func_name(func));
+ WARN_ON(1);
return -EINVAL;
+ }
+
+ if (!find)
+ pr_warn("The pingroup %s was configured to %s instead of %s\n",
+ pingroup_name(pg), func_name(pingroups[pg].funcs[mux]),
+ func_name(func));
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(pingroups[pg].mux_reg);
reg &= ~(0x3 << pingroups[pg].mux_bit);
reg |= mux << pingroups[pg].mux_bit;
+#if defined(TEGRA_PINMUX_HAS_IO_DIRECTION)
+ reg &= ~(0x1 << 5);
+ reg |= ((config->io & 0x1) << 5);
+#endif
pg_writel(reg, pingroups[pg].mux_reg);
spin_unlock_irqrestore(&mux_lock, flags);
@@ -227,6 +274,28 @@ static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
return 0;
}
+int tegra_pinmux_get_func(enum tegra_pingroup pg)
+{
+ int mux = -1;
+ unsigned long reg;
+ unsigned long flags;
+
+ if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ return -ERANGE;
+
+ if (pingroups[pg].mux_reg <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&mux_lock, flags);
+
+ reg = pg_readl(pingroups[pg].mux_reg);
+ mux = (reg >> pingroups[pg].mux_bit) & 0x3;
+
+ spin_unlock_irqrestore(&mux_lock, flags);
+
+ return mux;
+}
+
int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
enum tegra_tristate tristate)
{
@@ -236,7 +305,7 @@ int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].tri_reg < 0)
+ if (pingroups[pg].tri_reg <= 0)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
@@ -252,6 +321,114 @@ int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
return 0;
}
+int tegra_pinmux_set_io(enum tegra_pingroup pg,
+ enum tegra_pin_io input)
+{
+#if defined(TEGRA_PINMUX_HAS_IO_DIRECTION)
+ unsigned long io;
+
+ if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ return -ERANGE;
+
+ io = pg_readl(pingroups[pg].mux_reg);
+ if (input)
+ io |= 0x20;
+ else
+ io &= ~(1 << 5);
+ pg_writel(io, pingroups[pg].mux_reg);
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_pinmux_set_io);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+static int tegra_pinmux_set_lock(enum tegra_pingroup pg,
+ enum tegra_pin_lock lock)
+{
+ unsigned long reg;
+ unsigned long flags;
+
+ if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ return -ERANGE;
+
+ if (pingroups[pg].mux_reg <= 0)
+ return -EINVAL;
+
+ if ((lock == TEGRA_PIN_LOCK_DEFAULT) || (pingroups[pg].lock_bit < 0))
+ return 0;
+
+ spin_lock_irqsave(&mux_lock, flags);
+
+ reg = pg_readl(pingroups[pg].mux_reg);
+ reg &= ~(0x1 << pingroups[pg].lock_bit);
+ if (lock == TEGRA_PIN_LOCK_ENABLE)
+ reg |= (0x1 << pingroups[pg].lock_bit);
+
+ pg_writel(reg, pingroups[pg].mux_reg);
+
+ spin_unlock_irqrestore(&mux_lock, flags);
+ return 0;
+}
+
+static int tegra_pinmux_set_od(enum tegra_pingroup pg,
+ enum tegra_pin_od od)
+{
+ unsigned long reg;
+ unsigned long flags;
+
+ if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ return -ERANGE;
+
+ if (pingroups[pg].mux_reg <= 0)
+ return -EINVAL;
+
+ if ((od == TEGRA_PIN_OD_DEFAULT) || (pingroups[pg].od_bit < 0))
+ return 0;
+
+ spin_lock_irqsave(&mux_lock, flags);
+
+ reg = pg_readl(pingroups[pg].mux_reg);
+ reg &= ~(0x1 << pingroups[pg].od_bit);
+ if (od == TEGRA_PIN_OD_ENABLE)
+ reg |= 1 << pingroups[pg].od_bit;
+
+ pg_writel(reg, pingroups[pg].mux_reg);
+
+ spin_unlock_irqrestore(&mux_lock, flags);
+
+ return 0;
+}
+
+static int tegra_pinmux_set_ioreset(enum tegra_pingroup pg,
+ enum tegra_pin_ioreset ioreset)
+{
+ unsigned long reg;
+ unsigned long flags;
+
+ if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ return -ERANGE;
+
+ if (pingroups[pg].mux_reg <= 0)
+ return -EINVAL;
+
+ if ((ioreset == TEGRA_PIN_IO_RESET_DEFAULT) || (pingroups[pg].ioreset_bit < 0))
+ return 0;
+
+ spin_lock_irqsave(&mux_lock, flags);
+
+ reg = pg_readl(pingroups[pg].mux_reg);
+ reg &= ~(0x1 << pingroups[pg].ioreset_bit);
+ if (ioreset == TEGRA_PIN_IO_RESET_ENABLE)
+ reg |= 1 << pingroups[pg].ioreset_bit;
+
+ pg_writel(reg, pingroups[pg].mux_reg);
+
+ spin_unlock_irqrestore(&mux_lock, flags);
+
+ return 0;
+}
+#endif
+
int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
enum tegra_pullupdown pupd)
{
@@ -261,7 +438,7 @@ int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].pupd_reg < 0)
+ if (pingroups[pg].pupd_reg <= 0)
return -EINVAL;
if (pupd != TEGRA_PUPD_NORMAL &&
@@ -288,28 +465,56 @@ static void tegra_pinmux_config_pingroup(const struct tegra_pingroup_config *con
enum tegra_mux_func func = config->func;
enum tegra_pullupdown pupd = config->pupd;
enum tegra_tristate tristate = config->tristate;
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ enum tegra_pin_lock lock = config->lock;
+ enum tegra_pin_od od = config->od;
+ enum tegra_pin_ioreset ioreset = config->ioreset;
+#endif
int err;
- if (pingroups[pingroup].mux_reg >= 0) {
+ if (pingroups[pingroup].mux_reg > 0) {
err = tegra_pinmux_set_func(config);
if (err < 0)
pr_err("pinmux: can't set pingroup %s func to %s: %d\n",
pingroup_name(pingroup), func_name(func), err);
}
- if (pingroups[pingroup].pupd_reg >= 0) {
+ if (pingroups[pingroup].pupd_reg > 0) {
err = tegra_pinmux_set_pullupdown(pingroup, pupd);
if (err < 0)
pr_err("pinmux: can't set pingroup %s pullupdown to %s: %d\n",
pingroup_name(pingroup), pupd_name(pupd), err);
}
- if (pingroups[pingroup].tri_reg >= 0) {
+ if (pingroups[pingroup].tri_reg > 0) {
err = tegra_pinmux_set_tristate(pingroup, tristate);
if (err < 0)
pr_err("pinmux: can't set pingroup %s tristate to %s: %d\n",
pingroup_name(pingroup), tri_name(func), err);
}
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ if (pingroups[pingroup].mux_reg > 0) {
+ err = tegra_pinmux_set_lock(pingroup, lock);
+ if (err < 0)
+ pr_err("pinmux: can't set pingroup %s lock to %s: %d\n",
+ pingroup_name(pingroup), lock_name(func), err);
+ }
+
+ if (pingroups[pingroup].mux_reg > 0) {
+ err = tegra_pinmux_set_od(pingroup, od);
+ if (err < 0)
+ pr_err("pinmux: can't set pingroup %s od to %s: %d\n",
+ pingroup_name(pingroup), od_name(func), err);
+ }
+
+ if (pingroups[pingroup].mux_reg > 0) {
+ err = tegra_pinmux_set_ioreset(pingroup, ioreset);
+ if (err < 0)
+ pr_err("pinmux: can't set pingroup %s ioreset to %s: %d\n",
+ pingroup_name(pingroup), ioreset_name(func), err);
+ }
+#endif
}
void tegra_pinmux_config_table(const struct tegra_pingroup_config *config, int len)
@@ -319,6 +524,7 @@ void tegra_pinmux_config_table(const struct tegra_pingroup_config *config, int l
for (i = 0; i < len; i++)
tegra_pinmux_config_pingroup(&config[i]);
}
+EXPORT_SYMBOL(tegra_pinmux_config_table);
static const char *drive_pinmux_name(enum tegra_drive_pingroup pg)
{
@@ -427,6 +633,7 @@ static int tegra_drive_pinmux_set_pull_down(enum tegra_drive_pingroup pg,
{
unsigned long flags;
u32 reg;
+
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
@@ -436,8 +643,9 @@ static int tegra_drive_pinmux_set_pull_down(enum tegra_drive_pingroup pg,
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
- reg &= ~(0x1f << 12);
- reg |= pull_down << 12;
+ reg &= ~(drive_pingroups[pg].drvdown_mask <<
+ drive_pingroups[pg].drvdown_offset);
+ reg |= pull_down << drive_pingroups[pg].drvdown_offset;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
@@ -450,6 +658,7 @@ static int tegra_drive_pinmux_set_pull_up(enum tegra_drive_pingroup pg,
{
unsigned long flags;
u32 reg;
+
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
@@ -459,8 +668,9 @@ static int tegra_drive_pinmux_set_pull_up(enum tegra_drive_pingroup pg,
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
- reg &= ~(0x1f << 12);
- reg |= pull_up << 12;
+ reg &= ~(drive_pingroups[pg].drvup_mask <<
+ drive_pingroups[pg].drvup_offset);
+ reg |= pull_up << drive_pingroups[pg].drvup_offset;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
@@ -482,8 +692,9 @@ static int tegra_drive_pinmux_set_slew_rising(enum tegra_drive_pingroup pg,
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
- reg &= ~(0x3 << 28);
- reg |= slew_rising << 28;
+ reg &= ~(drive_pingroups[pg].slewrise_mask <<
+ drive_pingroups[pg].slewrise_offset);
+ reg |= slew_rising << drive_pingroups[pg].slewrise_offset;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
@@ -505,8 +716,9 @@ static int tegra_drive_pinmux_set_slew_falling(enum tegra_drive_pingroup pg,
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
- reg &= ~(0x3 << 30);
- reg |= slew_falling << 30;
+ reg &= ~(drive_pingroups[pg].slewfall_mask <<
+ drive_pingroups[pg].slewfall_offset);
+ reg |= slew_falling << drive_pingroups[pg].slewfall_offset;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
@@ -636,7 +848,7 @@ void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *conf
for (i = 0; i < len; i++) {
pingroup = config[i].pingroup;
- if (pingroups[pingroup].tri_reg >= 0) {
+ if (pingroups[pingroup].tri_reg > 0) {
err = tegra_pinmux_set_tristate(pingroup, tristate);
if (err < 0)
pr_err("pinmux: can't set pingroup %s tristate"
@@ -655,7 +867,7 @@ void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *co
for (i = 0; i < len; i++) {
pingroup = config[i].pingroup;
- if (pingroups[pingroup].pupd_reg >= 0) {
+ if (pingroups[pingroup].pupd_reg > 0) {
err = tegra_pinmux_set_pullupdown(pingroup, pupd);
if (err < 0)
pr_err("pinmux: can't set pingroup %s pullupdown"
@@ -690,18 +902,23 @@ static int dbg_pinmux_show(struct seq_file *s, void *unused)
seq_printf(s, "\t{TEGRA_PINGROUP_%s", pingroups[i].name);
len = strlen(pingroups[i].name);
- dbg_pad_field(s, 5 - len);
+ dbg_pad_field(s, 15 - len);
- if (pingroups[i].mux_reg < 0) {
+ if (pingroups[i].mux_reg <= 0) {
seq_printf(s, "TEGRA_MUX_NONE");
len = strlen("NONE");
} else {
mux = (pg_readl(pingroups[i].mux_reg) >>
pingroups[i].mux_bit) & 0x3;
- if (pingroups[i].funcs[mux] == TEGRA_MUX_RSVD) {
+ BUG_ON(pingroups[i].funcs[mux] == 0);
+ if (pingroups[i].funcs[mux] == TEGRA_MUX_INVALID) {
+ seq_printf(s, "TEGRA_MUX_INVALID");
+ len = 7;
+ } else if (pingroups[i].funcs[mux] & TEGRA_MUX_RSVD) {
seq_printf(s, "TEGRA_MUX_RSVD%1lu", mux+1);
len = 5;
} else {
+ BUG_ON(!tegra_mux_names[pingroups[i].funcs[mux]]);
seq_printf(s, "TEGRA_MUX_%s",
tegra_mux_names[pingroups[i].funcs[mux]]);
len = strlen(tegra_mux_names[pingroups[i].funcs[mux]]);
@@ -709,7 +926,16 @@ static int dbg_pinmux_show(struct seq_file *s, void *unused)
}
dbg_pad_field(s, 13-len);
- if (pingroups[i].pupd_reg < 0) {
+#if defined(TEGRA_PINMUX_HAS_IO_DIRECTION)
+ {
+ unsigned long io;
+ io = (pg_readl(pingroups[i].mux_reg) >> 5) & 0x1;
+ seq_printf(s, "TEGRA_PIN_%s", io_name(io));
+ len = strlen(io_name(io));
+ dbg_pad_field(s, 6 - len);
+ }
+#endif
+ if (pingroups[i].pupd_reg <= 0) {
seq_printf(s, "TEGRA_PUPD_NORMAL");
len = strlen("NORMAL");
} else {
@@ -720,7 +946,7 @@ static int dbg_pinmux_show(struct seq_file *s, void *unused)
}
dbg_pad_field(s, 9 - len);
- if (pingroups[i].tri_reg < 0) {
+ if (pingroups[i].tri_reg <= 0) {
seq_printf(s, "TEGRA_TRI_NORMAL");
} else {
tri = (pg_readl(pingroups[i].tri_reg) >>
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 7d2b5d03c1df..582810c96fc2 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -7,103 +7,240 @@
* Copyright (C) 2009 Palm
* All Rights Reserved
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/jiffies.h>
-#include <linux/smp.h>
#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/cpumask.h>
-#include <asm/cacheflush.h>
#include <asm/hardware/gic.h>
-#include <asm/mach-types.h>
#include <asm/smp_scu.h>
#include <mach/iomap.h>
+#include <mach/powergate.h>
-extern void tegra_secondary_startup(void);
+#include "pm.h"
+#include "clock.h"
+#include "reset.h"
+#include "sleep.h"
-static DEFINE_SPINLOCK(boot_lock);
-static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
+bool tegra_all_cpus_booted;
+
+static DECLARE_BITMAP(tegra_cpu_init_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const tegra_cpu_init_mask = to_cpumask(tegra_cpu_init_bits);
+#define tegra_cpu_init_map (*(cpumask_t *)tegra_cpu_init_mask)
-#define EVP_CPU_RESET_VECTOR \
- (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100)
#define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \
(IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c)
+#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x340)
#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \
(IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344)
-void __cpuinit platform_secondary_init(unsigned int cpu)
+#define CPU_CLOCK(cpu) (0x1<<(8+cpu))
+#define CPU_RESET(cpu) (0x1111ul<<(cpu))
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+#define CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x34c)
+#define CAR_BOND_OUT_V \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
+#define CAR_BOND_OUT_V_CPU_G (1<<0)
+#endif
+
+static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
+
+static unsigned int available_cpus(void)
{
- /*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
+ static unsigned int ncores;
- /*
- * Synchronise with the boot thread.
- */
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ if (ncores == 0) {
+ ncores = scu_get_core_count(scu_base);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (ncores > 1) {
+ u32 fuse_sku = readl(FUSE_SKU_DIRECT_CONFIG);
+ ncores -= FUSE_SKU_NUM_DISABLED_CPUS(fuse_sku);
+ BUG_ON((int)ncores <= 0);
+ }
+#endif
+ }
+ return ncores;
}
-int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int is_g_cluster_available(unsigned int cpu)
+{
+#ifdef CONFIG_TEGRA_CLUSTER_CONTROL
+ u32 fuse_sku = readl(FUSE_SKU_DIRECT_CONFIG);
+ u32 bond_out = readl(CAR_BOND_OUT_V);
+
+ /* Does the G CPU complex exist at all? */
+ if ((fuse_sku & FUSE_SKU_DISABLE_ALL_CPUS) ||
+ (bond_out & CAR_BOND_OUT_V_CPU_G))
+ return -EPERM;
+
+ if (cpu >= available_cpus())
+ return -EPERM;
+
+ /* FIXME: The G CPU can be unavailable for a number of reasons
+ * (e.g., low battery, over temperature, etc.). Add checks for
+ * these conditions. */
+ return 0;
+#else
+ return -EPERM;
+#endif
+}
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static bool is_cpu_powered(unsigned int cpu)
+{
+ if (is_lp_cluster())
+ return true;
+ else
+ return tegra_powergate_is_powered(TEGRA_CPU_POWERGATE_ID(cpu));
+}
+#endif
+
+static int power_up_cpu(unsigned int cpu)
{
- unsigned long old_boot_vector;
- unsigned long boot_vector;
- unsigned long timeout;
u32 reg;
+ int ret = 0;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long timeout;
+
+ BUG_ON(cpu == smp_processor_id());
+ BUG_ON(is_lp_cluster());
- /*
- * set synchronisation state between this boot processor
- * and the secondary one
+ /* If this cpu has booted this function is entered after
+ * CPU has been already un-gated by flow controller. Wait
+ * for confirmation that cpu is powered and remove clamps.
+ * On first boot entry do not wait - go to direct ungate.
*/
- spin_lock(&boot_lock);
+ if (cpu_isset(cpu, tegra_cpu_init_map)) {
+ timeout = jiffies + 5;
+ do {
+ if (is_cpu_powered(cpu))
+ goto remove_clamps;
+ udelay(10);
+ } while (time_before(jiffies, timeout));
+ }
+ /* First boot or Flow controller did not work as expected. Try to
+ directly toggle power gates. Error if direct power on also fails. */
+ if (!is_cpu_powered(cpu)) {
+ ret = tegra_unpowergate_partition(TEGRA_CPU_POWERGATE_ID(cpu));
+ if (ret)
+ goto fail;
- /* set the reset vector to point to the secondary_startup routine */
+ /* Wait for the power to come up. */
+ timeout = jiffies + 10*HZ;
- boot_vector = virt_to_phys(tegra_secondary_startup);
- old_boot_vector = readl(EVP_CPU_RESET_VECTOR);
- writel(boot_vector, EVP_CPU_RESET_VECTOR);
+ do {
+ if (is_cpu_powered(cpu))
+ goto remove_clamps;
+ udelay(10);
+ } while (time_before(jiffies, timeout));
+ ret = -ETIMEDOUT;
+ goto fail;
+ }
- /* enable cpu clock on cpu1 */
+remove_clamps:
+ /* CPU partition is powered. Enable the CPU clock. */
+ writel(CPU_CLOCK(cpu), CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+ reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+ udelay(10);
+
+ /* Remove I/O clamps. */
+ ret = tegra_powergate_remove_clamping(TEGRA_CPU_POWERGATE_ID(cpu));
+ udelay(10);
+fail:
+#else
+ /* Enable the CPU clock. */
+ reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg & ~CPU_CLOCK(cpu), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ barrier();
reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
- writel(reg & ~(1<<9), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+#endif
+ /* Clear flow controller CSR. */
+ flowctrl_writel(0, FLOW_CTRL_CPU_CSR(cpu));
+ return ret;
+}
- reg = (1<<13) | (1<<9) | (1<<5) | (1<<1);
- writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ gic_secondary_init(0);
- smp_wmb();
- flush_cache_all();
+ cpumask_set_cpu(cpu, to_cpumask(tegra_cpu_init_bits));
+ if (!tegra_all_cpus_booted)
+ if (cpumask_equal(tegra_cpu_init_mask, cpu_present_mask))
+ tegra_all_cpus_booted = true;
+}
- /* unhalt the cpu */
- writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14);
+int boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ int status;
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (readl(EVP_CPU_RESET_VECTOR) != boot_vector)
- break;
- udelay(10);
+ /* Avoid timer calibration on slave cpus. Use the value calibrated
+ * on master cpu. This reduces the bringup time for each slave cpu
+ * by around 260ms.
+ */
+ preset_lpj = loops_per_jiffy;
+ if (is_lp_cluster()) {
+ struct clk *cpu_clk, *cpu_g_clk;
+
+ /* The G CPU may not be available for a variety of reasons. */
+ status = is_g_cluster_available(cpu);
+ if (status)
+ goto done;
+
+ cpu_clk = tegra_get_clock_by_name("cpu");
+ cpu_g_clk = tegra_get_clock_by_name("cpu_g");
+
+ /* Switch to G CPU before continuing. */
+ if (!cpu_clk || !cpu_g_clk) {
+ /* Early boot, clock infrastructure is not initialized
+ - CPU mode switch is not allowed */
+ status = -EINVAL;
+ } else
+ status = clk_set_parent(cpu_clk, cpu_g_clk);
+
+ if (status)
+ goto done;
}
- /* put the old boot vector back */
- writel(old_boot_vector, EVP_CPU_RESET_VECTOR);
+ smp_wmb();
- /*
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- spin_unlock(&boot_lock);
+ /* Force the CPU into reset. The CPU must remain in reset when the
+ flow controller state is cleared (which will cause the flow
+ controller to stop driving reset if the CPU has been power-gated
+ via the flow controller). This will have no effect on first boot
+ of the CPU since it should already be in reset. */
+ writel(CPU_RESET(cpu), CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ dmb();
- return 0;
+ /* Unhalt the CPU. If the flow controller was used to power-gate the
+ CPU this will cause the flow controller to stop driving reset.
+ The CPU will remain in reset because the clock and reset block
+ is now driving reset. */
+ flowctrl_writel(0, FLOW_CTRL_HALT_CPU(cpu));
+
+ status = power_up_cpu(cpu);
+ if (status)
+ goto done;
+
+ /* Take the CPU out of reset. */
+ writel(CPU_RESET(cpu), CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+ wmb();
+done:
+ return status;
}
/*
@@ -112,7 +249,8 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
*/
void __init smp_init_cpus(void)
{
- unsigned int i, ncores = scu_get_core_count(scu_base);
+ unsigned int ncores = available_cpus();
+ unsigned int i;
if (ncores > nr_cpu_ids) {
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
@@ -123,11 +261,31 @@ void __init smp_init_cpus(void)
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
+ /* If only one CPU is possible, platform_smp_prepare_cpus() will
+ never get called. We must therefore initialize the reset handler
+ here. If there is more than one CPU, we must wait until after
+ the cpu_present_mask has been updated with all present CPUs in
+ platform_smp_prepare_cpus() before initializing the reset handler. */
+ if (ncores == 1) {
+ tegra_cpu_reset_handler_init();
+ tegra_all_cpus_booted = true;
+ }
+
set_smp_cross_call(gic_raise_softirq);
}
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
+ /* Always mark the boot CPU as initialized. */
+ cpumask_set_cpu(0, to_cpumask(tegra_cpu_init_bits));
+
+ if (max_cpus == 1)
+ tegra_all_cpus_booted = true;
+
+ /* If we're here, it means that more than one CPU was found by
+ smp_init_cpus() which also means that it did not initialize the
+ reset handler. Do it now before the secondary CPUs are started. */
+ tegra_cpu_reset_handler_init();
scu_enable(scu_base);
}
diff --git a/arch/arm/mach-tegra/pm-irq.c b/arch/arm/mach-tegra/pm-irq.c
new file mode 100644
index 000000000000..57d21361ca14
--- /dev/null
+++ b/arch/arm/mach-tegra/pm-irq.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/seq_file.h>
+#include <linux/syscore_ops.h>
+
+#include <mach/iomap.h>
+
+#include "pm-irq.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
+#define PMC_WAKE_MASK 0xc
+#define PMC_WAKE_LEVEL 0x10
+#define PMC_WAKE_STATUS 0x14
+#define PMC_SW_WAKE_STATUS 0x18
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+#define PMC_WAKE2_MASK 0x160
+#define PMC_WAKE2_LEVEL 0x164
+#define PMC_WAKE2_STATUS 0x168
+#define PMC_SW_WAKE2_STATUS 0x16C
+#endif
+
+#define PMC_MAX_WAKE_COUNT 64
+
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+
+static u64 tegra_lp0_wake_enb;
+static u64 tegra_lp0_wake_level;
+static u64 tegra_lp0_wake_level_any;
+static int tegra_prevent_lp0;
+
+static unsigned int tegra_wake_irq_count[PMC_MAX_WAKE_COUNT];
+
+static bool debug_lp0;
+module_param(debug_lp0, bool, S_IRUGO | S_IWUSR);
+
+static bool warn_prevent_lp0;
+module_param(warn_prevent_lp0, bool, S_IRUGO | S_IWUSR);
+
+bool tegra_pm_irq_lp0_allowed(void)
+{
+ return (tegra_prevent_lp0 == 0);
+}
+
+/* ensures that sufficient time is passed for a register write to
+ * serialize into the 32KHz domain */
+static void pmc_32kwritel(u32 val, unsigned long offs)
+{
+ writel(val, pmc + offs);
+ udelay(130);
+}
+
+static inline void write_pmc_wake_mask(u64 value)
+{
+ pr_info("Wake[31-0] enable=0x%x\n", (u32)(value & 0xFFFFFFFF));
+ writel((u32)value, pmc + PMC_WAKE_MASK);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ pr_info("Tegra3 wake[63-32] enable=0x%x\n", (u32)((value >> 32) &
+ 0xFFFFFFFF));
+ __raw_writel((u32)(value >> 32), pmc + PMC_WAKE2_MASK);
+#endif
+}
+
+static inline u64 read_pmc_wake_level(void)
+{
+ u64 reg;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ reg = readl(pmc + PMC_WAKE_LEVEL);
+#else
+ reg = __raw_readl(pmc + PMC_WAKE_LEVEL);
+ reg |= ((u64)readl(pmc + PMC_WAKE2_LEVEL)) << 32;
+#endif
+ return reg;
+}
+
+static inline void write_pmc_wake_level(u64 value)
+{
+ pr_info("Wake[31-0] level=0x%x\n", (u32)(value & 0xFFFFFFFF));
+ writel((u32)value, pmc + PMC_WAKE_LEVEL);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ pr_info("Tegra3 wake[63-32] level=0x%x\n", (u32)((value >> 32) &
+ 0xFFFFFFFF));
+ __raw_writel((u32)(value >> 32), pmc + PMC_WAKE2_LEVEL);
+#endif
+}
+
+static inline u64 read_pmc_wake_status(void)
+{
+ u64 reg;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ reg = readl(pmc + PMC_WAKE_STATUS);
+#else
+ reg = __raw_readl(pmc + PMC_WAKE_STATUS);
+ reg |= ((u64)readl(pmc + PMC_WAKE2_STATUS)) << 32;
+#endif
+ return reg;
+}
+
+static inline u64 read_pmc_sw_wake_status(void)
+{
+ u64 reg;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ reg = readl(pmc + PMC_SW_WAKE_STATUS);
+#else
+ reg = __raw_readl(pmc + PMC_SW_WAKE_STATUS);
+ reg |= ((u64)readl(pmc + PMC_SW_WAKE2_STATUS)) << 32;
+#endif
+ return reg;
+}
+
+static inline void clear_pmc_sw_wake_status(void)
+{
+ pmc_32kwritel(0, PMC_SW_WAKE_STATUS);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ pmc_32kwritel(0, PMC_SW_WAKE2_STATUS);
+#endif
+}
+
+int tegra_pm_irq_set_wake(int irq, int enable)
+{
+ int wake = tegra_irq_to_wake(irq);
+
+ if (wake == -EALREADY) {
+ /* EALREADY means wakeup event already accounted for */
+ return 0;
+ } else if (wake == -ENOTSUPP) {
+ /* ENOTSUPP means LP0 not supported with this wake source */
+ WARN(enable && warn_prevent_lp0, "irq %d prevents lp0\n", irq);
+ if (enable)
+ tegra_prevent_lp0++;
+ else if (!WARN_ON(tegra_prevent_lp0 == 0))
+ tegra_prevent_lp0--;
+ return 0;
+ } else if (wake < 0) {
+ return -EINVAL;
+ }
+
+ if (enable) {
+ tegra_lp0_wake_enb |= 1ull << wake;
+ pr_info("Enabling wake%d\n", wake);
+ } else {
+ tegra_lp0_wake_enb &= ~(1ull << wake);
+ pr_info("Disabling wake%d\n", wake);
+ }
+
+ return 0;
+}
+
+int tegra_pm_irq_set_wake_type(int irq, int flow_type)
+{
+ int wake = tegra_irq_to_wake(irq);
+
+ if (wake < 0)
+ return 0;
+
+ switch (flow_type) {
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ tegra_lp0_wake_level &= ~(1ull << wake);
+ tegra_lp0_wake_level_any &= ~(1ull << wake);
+ break;
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ tegra_lp0_wake_level |= (1ull << wake);
+ tegra_lp0_wake_level_any &= ~(1ull << wake);
+ break;
+
+ case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
+ tegra_lp0_wake_level_any |= (1ull << wake);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* translate lp0 wake sources back into irqs to catch edge triggered wakeups */
+static void tegra_pm_irq_syscore_resume_helper(
+ unsigned long wake_status,
+ unsigned int index)
+{
+ int wake;
+ int irq;
+ struct irq_desc *desc;
+
+ for_each_set_bit(wake, &wake_status, sizeof(wake_status) * 8) {
+ irq = tegra_wake_to_irq(wake + 32 * index);
+ if (!irq) {
+ pr_info("Resume caused by WAKE%d\n",
+ (wake + 32 * index));
+ continue;
+ }
+
+ desc = irq_to_desc(irq);
+ if (!desc || !desc->action || !desc->action->name) {
+ pr_info("Resume caused by WAKE%d, irq %d\n",
+ (wake + 32 * index), irq);
+ continue;
+ }
+
+ pr_info("Resume caused by WAKE%d, %s\n", (wake + 32 * index),
+ desc->action->name);
+
+ tegra_wake_irq_count[wake + 32 * index]++;
+
+ generic_handle_irq(irq);
+ }
+}
+
+static void tegra_pm_irq_syscore_resume(void)
+{
+ unsigned long long wake_status = read_pmc_wake_status();
+
+ pr_info(" legacy wake status=0x%x\n", (u32)wake_status);
+ tegra_pm_irq_syscore_resume_helper((unsigned long)wake_status, 0);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ pr_info(" tegra3 wake status=0x%x\n", (u32)(wake_status >> 32));
+ tegra_pm_irq_syscore_resume_helper(
+ (unsigned long)(wake_status >> 32), 1);
+#endif
+}
+
+/* set up lp0 wake sources */
+static int tegra_pm_irq_syscore_suspend(void)
+{
+ u32 temp;
+ u64 status;
+ u64 lvl;
+ u64 wake_level;
+ u64 wake_enb;
+
+ clear_pmc_sw_wake_status();
+
+ temp = readl(pmc + PMC_CTRL);
+ temp |= PMC_CTRL_LATCH_WAKEUPS;
+ pmc_32kwritel(temp, PMC_CTRL);
+
+ temp &= ~PMC_CTRL_LATCH_WAKEUPS;
+ pmc_32kwritel(temp, PMC_CTRL);
+
+ status = read_pmc_sw_wake_status();
+
+ lvl = read_pmc_wake_level();
+
+ /* flip the wakeup trigger for any-edge triggered pads
+ * which are currently asserting as wakeups */
+ lvl ^= status;
+
+ lvl &= tegra_lp0_wake_level_any;
+
+ wake_level = lvl | tegra_lp0_wake_level;
+ wake_enb = tegra_lp0_wake_enb;
+
+ if (debug_lp0) {
+ wake_level = lvl ^ status;
+ wake_enb = 0xffffffff;
+ }
+
+ /* Clear PMC Wake Status register while going to suspend */
+ temp = readl(pmc + PMC_WAKE_STATUS);
+ if (temp)
+ pmc_32kwritel(temp, PMC_WAKE_STATUS);
+
+ write_pmc_wake_level(wake_level);
+
+ write_pmc_wake_mask(wake_enb);
+
+ return 0;
+}
+
+static struct syscore_ops tegra_pm_irq_syscore_ops = {
+ .suspend = tegra_pm_irq_syscore_suspend,
+ .resume = tegra_pm_irq_syscore_resume,
+};
+
+static int tegra_pm_irq_syscore_init(void)
+{
+ register_syscore_ops(&tegra_pm_irq_syscore_ops);
+
+ return 0;
+}
+subsys_initcall(tegra_pm_irq_syscore_init);
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_pm_irq_debug_show(struct seq_file *s, void *data)
+{
+ int wake;
+ int irq;
+ struct irq_desc *desc;
+ const char *irq_name;
+
+ seq_printf(s, "wake irq count name\n");
+ seq_printf(s, "----------------------\n");
+ for (wake = 0; wake < PMC_MAX_WAKE_COUNT; wake++) {
+ irq = tegra_wake_to_irq(wake);
+ if (irq < 0)
+ continue;
+
+ desc = irq_to_desc(irq);
+ if (tegra_wake_irq_count[wake] == 0 && desc->action == NULL)
+ continue;
+
+ irq_name = (desc->action && desc->action->name) ?
+ desc->action->name : "???";
+
+ seq_printf(s, "%4d %3d %5d %s\n",
+ wake, irq, tegra_wake_irq_count[wake], irq_name);
+ }
+ return 0;
+}
+
+static int tegra_pm_irq_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_pm_irq_debug_show, NULL);
+}
+
+static const struct file_operations tegra_pm_irq_debug_fops = {
+ .open = tegra_pm_irq_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_pm_irq_debug_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("wake_irq", S_IRUGO, NULL, NULL,
+ &tegra_pm_irq_debug_fops);
+ if (!d) {
+ pr_err("Failed to create suspend_mode debug file\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+late_initcall(tegra_pm_irq_debug_init);
+#endif
diff --git a/arch/arm/mach-tegra/pm-irq.h b/arch/arm/mach-tegra/pm-irq.h
new file mode 100644
index 000000000000..8e87b4bba246
--- /dev/null
+++ b/arch/arm/mach-tegra/pm-irq.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TERA_PM_IRQ_H_
+#define _MACH_TERA_PM_IRQ_H_
+
+#ifdef CONFIG_PM_SLEEP
+int tegra_pm_irq_set_wake(int irq, int enable);
+int tegra_pm_irq_set_wake_type(int irq, int flow_type);
+bool tegra_pm_irq_lp0_allowed(void);
+int tegra_irq_to_wake(int irq);
+int tegra_wake_to_irq(int wake);
+#else
+static inline int tegra_pm_irq_set_wake_type(int irq, int flow_type)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/arch/arm/mach-tegra/pm-t2.c b/arch/arm/mach-tegra/pm-t2.c
new file mode 100644
index 000000000000..7ddbb2125595
--- /dev/null
+++ b/arch/arm/mach-tegra/pm-t2.c
@@ -0,0 +1,376 @@
+/*
+ * arch/arm/mach-tegra/pm-t2.c
+ *
+ * Tegra 2 LP0 scratch register preservation
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "pm.h"
+
+#define PMC_SCRATCH3 0x5c
+#define PMC_SCRATCH5 0x64
+#define PMC_SCRATCH6 0x68
+#define PMC_SCRATCH7 0x6c
+#define PMC_SCRATCH8 0x70
+#define PMC_SCRATCH9 0x74
+#define PMC_SCRATCH10 0x78
+#define PMC_SCRATCH11 0x7c
+#define PMC_SCRATCH12 0x80
+#define PMC_SCRATCH13 0x84
+#define PMC_SCRATCH14 0x88
+#define PMC_SCRATCH15 0x8c
+#define PMC_SCRATCH16 0x90
+#define PMC_SCRATCH17 0x94
+#define PMC_SCRATCH18 0x98
+#define PMC_SCRATCH19 0x9c
+#define PMC_SCRATCH20 0xa0
+#define PMC_SCRATCH21 0xa4
+#define PMC_SCRATCH22 0xa8
+#define PMC_SCRATCH23 0xac
+#define PMC_SCRATCH25 0x100
+#define PMC_SCRATCH35 0x128
+#define PMC_SCRATCH36 0x12c
+#define PMC_SCRATCH40 0x13c
+
+struct pmc_scratch_field {
+ unsigned long addr;
+ unsigned int mask;
+ int shift_src;
+ int shift_dst;
+};
+
+#define field(reg, start, end, dst) \
+ { \
+ .addr = (reg), \
+ .mask = 0xfffffffful >> (31 - ((end) - (start))), \
+ .shift_src = (start), \
+ .shift_dst = (dst), \
+ }
+
+static const struct pmc_scratch_field pllx[] __initdata = {
+ field(TEGRA_CLK_RESET_BASE + 0xe0, 20, 22, 15), /* PLLX_DIVP */
+ field(TEGRA_CLK_RESET_BASE + 0xe0, 8, 17, 5), /* PLLX_DIVN */
+ field(TEGRA_CLK_RESET_BASE + 0xe0, 0, 4, 0), /* PLLX_DIVM */
+ field(TEGRA_CLK_RESET_BASE + 0xe4, 8, 11, 22), /* PLLX_CPCON */
+ field(TEGRA_CLK_RESET_BASE + 0xe4, 4, 7, 18), /* PLLX_LFCON */
+ field(TEGRA_APB_MISC_BASE + 0x8e4, 24, 27, 27), /* XM2CFGC_VREF_DQ */
+ field(TEGRA_APB_MISC_BASE + 0x8c8, 3, 3, 26), /* XM2CFGC_SCHMT_EN */
+ field(TEGRA_APB_MISC_BASE + 0x8d0, 2, 2, 31), /* XM2CLKCFG_PREEMP_EN */
+};
+
+static const struct pmc_scratch_field emc_0[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x3c, 0, 4, 27), /* R2W */
+ field(TEGRA_EMC_BASE + 0x34, 0, 5, 15), /* RAS */
+ field(TEGRA_EMC_BASE + 0x2c, 0, 5, 0), /* RC */
+ field(TEGRA_EMC_BASE + 0x30, 0, 8, 6), /* RFC */
+ field(TEGRA_EMC_BASE + 0x38, 0, 5, 21), /* RP */
+};
+
+static const struct pmc_scratch_field emc_1[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x44, 0, 4, 5), /* R2P */
+ field(TEGRA_EMC_BASE + 0x4c, 0, 5, 15), /* RD_RCD */
+ field(TEGRA_EMC_BASE + 0x54, 0, 3, 27), /* RRD */
+ field(TEGRA_EMC_BASE + 0x48, 0, 4, 10), /* W2P */
+ field(TEGRA_EMC_BASE + 0x40, 0, 4, 0), /* W2R */
+ field(TEGRA_EMC_BASE + 0x50, 0, 5, 21), /* WR_RCD */
+};
+
+static const struct pmc_scratch_field emc_2[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2b8, 2, 2, 31), /* CLKCHANGE_SR_ENABLE */
+ field(TEGRA_EMC_BASE + 0x2b8, 10, 10, 30), /* USE_ADDR_CLK */
+ field(TEGRA_EMC_BASE + 0x80, 0, 4, 25), /* PCHG2PDEN */
+ field(TEGRA_EMC_BASE + 0x64, 0, 3, 12), /* QRST */
+ field(TEGRA_EMC_BASE + 0x68, 0, 3, 16), /* QSAFE */
+ field(TEGRA_EMC_BASE + 0x60, 0, 3, 8), /* QUSE */
+ field(TEGRA_EMC_BASE + 0x6c, 0, 4, 20), /* RDV */
+ field(TEGRA_EMC_BASE + 0x58, 0, 3, 0), /* REXT */
+ field(TEGRA_EMC_BASE + 0x5c, 0, 3, 4), /* WDV */
+};
+
+static const struct pmc_scratch_field emc_3[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x74, 0, 3, 16), /* BURST_REFRESH_NUM */
+ field(TEGRA_EMC_BASE + 0x7c, 0, 3, 24), /* PDEX2RD */
+ field(TEGRA_EMC_BASE + 0x78, 0, 3, 20), /* PDEX2WR */
+ field(TEGRA_EMC_BASE + 0x70, 0, 4, 0), /* REFRESH_LO */
+ field(TEGRA_EMC_BASE + 0x70, 5, 15, 5), /* REFRESH */
+ field(TEGRA_EMC_BASE + 0xa0, 0, 3, 28), /* TCLKSTABLE */
+};
+
+static const struct pmc_scratch_field emc_4[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x84, 0, 4, 0), /* ACT2PDEN */
+ field(TEGRA_EMC_BASE + 0x88, 0, 4, 5), /* AR2PDEN */
+ field(TEGRA_EMC_BASE + 0x8c, 0, 5, 10), /* RW2PDEN */
+ field(TEGRA_EMC_BASE + 0x94, 0, 3, 28), /* TCKE */
+ field(TEGRA_EMC_BASE + 0x90, 0, 11, 16), /* TXSR */
+};
+
+static const struct pmc_scratch_field emc_5[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x8, 10, 10, 30), /* AP_REQ_BUSY_CTRL */
+ field(TEGRA_EMC_BASE + 0x8, 24, 24, 31), /* CFG_PRIORITY */
+ field(TEGRA_EMC_BASE + 0x8, 2, 2, 26), /* FORCE_UPDATE */
+ field(TEGRA_EMC_BASE + 0x8, 4, 4, 27), /* MRS_WAIT */
+ field(TEGRA_EMC_BASE + 0x8, 5, 5, 28), /* PERIODIC_QRST */
+ field(TEGRA_EMC_BASE + 0x8, 9, 9, 29), /* READ_DQM_CTRL */
+ field(TEGRA_EMC_BASE + 0x8, 0, 0, 24), /* READ_MUX */
+ field(TEGRA_EMC_BASE + 0x8, 1, 1, 25), /* WRITE_MUX */
+ field(TEGRA_EMC_BASE + 0xa4, 0, 3, 6), /* TCLKSTOP */
+ field(TEGRA_EMC_BASE + 0xa8, 0, 13, 10), /* TREFBW */
+ field(TEGRA_EMC_BASE + 0x9c, 0, 5, 0), /* TRPAB */
+};
+
+static const struct pmc_scratch_field emc_6[] __initdata = {
+ field(TEGRA_EMC_BASE + 0xfc, 0, 1, 0), /* DQSIB_DLY_MSB_BYTE_0 */
+ field(TEGRA_EMC_BASE + 0xfc, 8, 9, 2), /* DQSIB_DLY_MSB_BYTE_1 */
+ field(TEGRA_EMC_BASE + 0xfc, 16, 17, 4), /* DQSIB_DLY_MSB_BYTE_2 */
+ field(TEGRA_EMC_BASE + 0xfc, 24, 25, 6), /* DQSIB_DLY_MSB_BYTE_3 */
+ field(TEGRA_EMC_BASE + 0x110, 0, 1, 8), /* QUSE_DLY_MSB_BYTE_0 */
+ field(TEGRA_EMC_BASE + 0x110, 8, 9, 10), /* QUSE_DLY_MSB_BYTE_1 */
+ field(TEGRA_EMC_BASE + 0x110, 16, 17, 12), /* QUSE_DLY_MSB_BYTE_2 */
+ field(TEGRA_EMC_BASE + 0x110, 24, 25, 14), /* QUSE_DLY_MSB_BYTE_3 */
+ field(TEGRA_EMC_BASE + 0xac, 0, 3, 22), /* QUSE_EXTRA */
+ field(TEGRA_EMC_BASE + 0x98, 0, 5, 16), /* TFAW */
+ field(TEGRA_APB_MISC_BASE + 0x8e4, 5, 5, 30), /* XM2CFGC_VREF_DQ_EN */
+ field(TEGRA_APB_MISC_BASE + 0x8e4, 16, 19, 26), /* XM2CFGC_VREF_DQS */
+};
+
+static const struct pmc_scratch_field emc_dqsib_dly[] __initdata = {
+ field(TEGRA_EMC_BASE + 0xf8, 0, 31, 0), /* DQSIB_DLY_BYTE_0 - DQSIB_DLY_BYTE_3*/
+};
+
+static const struct pmc_scratch_field emc_quse_dly[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x10c, 0, 31, 0), /* QUSE_DLY_BYTE_0 - QUSE_DLY_BYTE_3*/
+};
+
+static const struct pmc_scratch_field emc_clktrim[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2d0, 0, 29, 0), /* DATA0_CLKTRIM - DATA3_CLKTRIM +
+ * MCLK_ADDR_CLKTRIM */
+};
+
+static const struct pmc_scratch_field emc_autocal_fbio[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2a4, 29, 29, 29), /* AUTO_CAL_ENABLE */
+ field(TEGRA_EMC_BASE + 0x2a4, 30, 30, 30), /* AUTO_CAL_OVERRIDE */
+ field(TEGRA_EMC_BASE + 0x2a4, 8, 12, 14), /* AUTO_CAL_PD_OFFSET */
+ field(TEGRA_EMC_BASE + 0x2a4, 0, 4, 9), /* AUTO_CAL_PU_OFFSET */
+ field(TEGRA_EMC_BASE + 0x2a4, 16, 25, 19), /* AUTO_CAL_STEP */
+ field(TEGRA_EMC_BASE + 0xf4, 16, 16, 0), /* CFG_DEN_EARLY */
+ field(TEGRA_EMC_BASE + 0x104, 8, 8, 8), /* CTT_TERMINATION */
+ field(TEGRA_EMC_BASE + 0x104, 7, 7, 7), /* DIFFERENTIAL_DQS */
+ field(TEGRA_EMC_BASE + 0x104, 9, 9, 31), /* DQS_PULLD */
+ field(TEGRA_EMC_BASE + 0x104, 0, 1, 4), /* DRAM_TYPE */
+ field(TEGRA_EMC_BASE + 0x104, 4, 4, 6), /* DRAM_WIDTH */
+ field(TEGRA_EMC_BASE + 0x114, 0, 2, 1), /* CFG_QUSE_LATE */
+};
+
+static const struct pmc_scratch_field emc_autocal_interval[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2a8, 0, 27, 0), /* AUTOCAL_INTERVAL */
+ field(TEGRA_EMC_BASE + 0x2b8, 1, 1, 29), /* CLKCHANGE_PD_ENABLE */
+ field(TEGRA_EMC_BASE + 0x2b8, 0, 0, 28), /* CLKCHANGE_REQ_ENABLE */
+ field(TEGRA_EMC_BASE + 0x2b8, 8, 9, 30), /* PIN_CONFIG */
+};
+
+static const struct pmc_scratch_field emc_cfgs[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x10, 8, 9, 3), /* EMEM_BANKWIDTH */
+ field(TEGRA_EMC_BASE + 0x10, 0, 2, 0), /* EMEM_COLWIDTH */
+ field(TEGRA_EMC_BASE + 0x10, 16, 19, 5), /* EMEM_DEVSIZE */
+ field(TEGRA_EMC_BASE + 0x10, 24, 25, 9), /* EMEM_NUMDEV */
+ field(TEGRA_EMC_BASE + 0xc, 24, 24, 21), /* AUTO_PRE_RD */
+ field(TEGRA_EMC_BASE + 0xc, 25, 25, 22), /* AUTO_PRE_WR */
+ field(TEGRA_EMC_BASE + 0xc, 16, 16, 20), /* CLEAR_AP_PREV_SPREQ */
+ field(TEGRA_EMC_BASE + 0xc, 29, 29, 23), /* DRAM_ACPD */
+ field(TEGRA_EMC_BASE + 0xc, 30, 30, 24), /* DRAM_CLKSTOP_PDSR_ONLY */
+ field(TEGRA_EMC_BASE + 0xc, 31, 31, 25), /* DRAM_CLKSTOP */
+ field(TEGRA_EMC_BASE + 0xc, 8, 15, 12), /* PRE_IDLE_CYCLES */
+ field(TEGRA_EMC_BASE + 0xc, 0, 0, 11), /* PRE_IDLE_EN */
+ field(TEGRA_EMC_BASE + 0x2bc, 28, 29, 28), /* CFG_DLL_LOCK_LIMIT */
+ field(TEGRA_EMC_BASE + 0x2bc, 6, 7, 30), /* CFG_DLL_MODE */
+ field(TEGRA_MC_BASE + 0x10c, 0, 0, 26), /* LL_CTRL */
+ field(TEGRA_MC_BASE + 0x10c, 1, 1, 27), /* LL_SEND_BOTH */
+};
+
+static const struct pmc_scratch_field emc_adr_cfg1[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x14, 8, 9, 8), /* EMEM1_BANKWIDTH */
+ field(TEGRA_EMC_BASE + 0x14, 0, 2, 5), /* EMEM1_COLWIDTH */
+ field(TEGRA_EMC_BASE + 0x14, 16, 19, 10), /* EMEM1_DEVSIZE */
+ field(TEGRA_EMC_BASE + 0x2dc, 24, 28, 0), /* TERM_DRVUP */
+ field(TEGRA_APB_MISC_BASE + 0x8d4, 0, 3, 14), /* XM2COMP_VREF_SEL */
+ field(TEGRA_APB_MISC_BASE + 0x8d8, 16, 18, 21), /* XM2VTTGEN_CAL_DRVDN */
+ field(TEGRA_APB_MISC_BASE + 0x8d8, 24, 26, 18), /* XM2VTTGEN_CAL_DRVUP */
+ field(TEGRA_APB_MISC_BASE + 0x8d8, 1, 1, 30), /* XM2VTTGEN_SHORT_PWRGND */
+ field(TEGRA_APB_MISC_BASE + 0x8d8, 0, 0, 31), /* XM2VTTGEN_SHORT */
+ field(TEGRA_APB_MISC_BASE + 0x8d8, 12, 14, 24), /* XM2VTTGEN_VAUXP_LEVEL */
+ field(TEGRA_APB_MISC_BASE + 0x8d8, 8, 10, 27), /* XM2VTTGEN_VCLAMP_LEVEL */
+};
+
+static const struct pmc_scratch_field emc_digital_dll[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2bc, 1, 1, 23), /* DLI_TRIMMER_EN */
+ field(TEGRA_EMC_BASE + 0x2bc, 0, 0, 22), /* DLL_EN */
+ field(TEGRA_EMC_BASE + 0x2bc, 5, 5, 27), /* DLL_LOWSPEED */
+ field(TEGRA_EMC_BASE + 0x2bc, 2, 2, 24), /* DLL_OVERRIDE_EN */
+ field(TEGRA_EMC_BASE + 0x2bc, 8, 11, 28), /* DLL_UDSET */
+ field(TEGRA_EMC_BASE + 0x2bc, 4, 4, 26), /* PERBYTE_TRIMMER_OVERRIDE */
+ field(TEGRA_EMC_BASE + 0x2bc, 3, 3, 25), /* USE_SINGLE_DLL */
+ field(TEGRA_MC_BASE + 0xc, 0, 21, 0), /* EMEM_SIZE_KB */
+};
+
+static const struct pmc_scratch_field emc_dqs_clktrim[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2d4, 0, 29, 0), /* DQS0_CLKTRIM - DQS3 + MCLK*/
+ field(TEGRA_APB_MISC_BASE + 0x8e4, 3, 3, 31), /* XM2CFGC_CTT_HIZ_EN */
+ field(TEGRA_APB_MISC_BASE + 0x8e4, 4, 4, 30), /* XM2CFGC_VREF_DQS_EN */
+};
+
+static const struct pmc_scratch_field emc_dq_clktrim[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2d8, 0, 29, 0),
+ field(TEGRA_APB_MISC_BASE + 0x8e4, 2, 2, 30), /* XM2CFGC_PREEMP_EN */
+ field(TEGRA_APB_MISC_BASE + 0x8e4, 0, 0, 31), /* XM2CFGC_RX_FT_REC_EN */
+};
+
+static const struct pmc_scratch_field emc_dll_xform_dqs[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2bc, 16, 25, 20), /* CFG_DLL_OVERRIDE_VAL */
+ field(TEGRA_EMC_BASE + 0x2c0, 0, 4, 0), /* DQS_MULT */
+ field(TEGRA_EMC_BASE + 0x2c0, 8, 22, 5), /* DQS_OFFS */
+ field(TEGRA_MC_BASE + 0x10c, 31, 31, 30), /* LL_DRAM_INTERLEAVE */
+};
+
+static const struct pmc_scratch_field emc_odt_rw[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2c4, 0, 4, 0), /* QUSE_MULT */
+ field(TEGRA_EMC_BASE + 0x2c4, 8, 22, 5), /* QUSE_OFF */
+ field(TEGRA_EMC_BASE + 0xb4, 31, 31, 29), /* DISABLE_ODT_DURING_READ */
+ field(TEGRA_EMC_BASE + 0xb4, 30, 30, 28), /* B4_READ */
+ field(TEGRA_EMC_BASE + 0xb4, 0, 2, 25), /* RD_DELAY */
+ field(TEGRA_EMC_BASE + 0xb0, 31, 31, 24), /* ENABLE_ODT_DURING_WRITE */
+ field(TEGRA_EMC_BASE + 0xb0, 30, 30, 23), /* B4_WRITE */
+ field(TEGRA_EMC_BASE + 0xb0, 0, 2, 20), /* WR_DELAY */
+};
+
+static const struct pmc_scratch_field arbitration_xbar[] __initdata = {
+ field(TEGRA_AHB_GIZMO_BASE + 0xdc, 0, 31, 0),
+};
+
+static const struct pmc_scratch_field emc_zcal[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2e0, 0, 23, 0), /* ZCAL_REF_INTERVAL */
+ field(TEGRA_EMC_BASE + 0x2e4, 0, 7, 24), /* ZCAL_WAIT_CNT */
+};
+
+static const struct pmc_scratch_field emc_ctt_term[] __initdata = {
+ field(TEGRA_EMC_BASE + 0x2dc, 15, 19, 26), /* TERM_DRVDN */
+ field(TEGRA_EMC_BASE + 0x2dc, 8, 12, 21), /* TERM_OFFSET */
+ field(TEGRA_EMC_BASE + 0x2dc, 31, 31, 31), /* TERM_OVERRIDE */
+ field(TEGRA_EMC_BASE + 0x2dc, 0, 2, 18), /* TERM_SLOPE */
+ field(TEGRA_EMC_BASE + 0x2e8, 16, 23, 8), /* ZQ_MRW_MA */
+ field(TEGRA_EMC_BASE + 0x2e8, 0, 7, 0), /* ZQ_MRW_OP */
+};
+
+static const struct pmc_scratch_field xm2_cfgd[] __initdata = {
+ field(TEGRA_APB_MISC_BASE + 0x8e8, 16, 18, 9), /* CFGD0_DLYIN_TRM */
+ field(TEGRA_APB_MISC_BASE + 0x8e8, 20, 22, 6), /* CFGD1_DLYIN_TRM */
+ field(TEGRA_APB_MISC_BASE + 0x8e8, 24, 26, 3), /* CFGD2_DLYIN_TRM */
+ field(TEGRA_APB_MISC_BASE + 0x8e8, 28, 30, 0), /* CFGD3_DLYIN_TRM */
+ field(TEGRA_APB_MISC_BASE + 0x8e8, 3, 3, 12), /* XM2CFGD_CTT_HIZ_EN */
+ field(TEGRA_APB_MISC_BASE + 0x8e8, 2, 2, 13), /* XM2CFGD_PREEMP_EN */
+ field(TEGRA_APB_MISC_BASE + 0x8e8, 0, 0, 14), /* CM2CFGD_RX_FT_REC_EN */
+};
+
+struct pmc_scratch_reg {
+ const struct pmc_scratch_field *fields;
+ void __iomem *scratch_addr;
+ int num_fields;
+};
+
+#define scratch(offs, field_list) \
+ { \
+ .scratch_addr = IO_ADDRESS(TEGRA_PMC_BASE) + offs, \
+ .fields = field_list, \
+ .num_fields = ARRAY_SIZE(field_list), \
+ }
+
+static const struct pmc_scratch_reg scratch[] __initdata = {
+ scratch(PMC_SCRATCH3, pllx),
+ scratch(PMC_SCRATCH5, emc_0),
+ scratch(PMC_SCRATCH6, emc_1),
+ scratch(PMC_SCRATCH7, emc_2),
+ scratch(PMC_SCRATCH8, emc_3),
+ scratch(PMC_SCRATCH9, emc_4),
+ scratch(PMC_SCRATCH10, emc_5),
+ scratch(PMC_SCRATCH11, emc_6),
+ scratch(PMC_SCRATCH12, emc_dqsib_dly),
+ scratch(PMC_SCRATCH13, emc_quse_dly),
+ scratch(PMC_SCRATCH14, emc_clktrim),
+ scratch(PMC_SCRATCH15, emc_autocal_fbio),
+ scratch(PMC_SCRATCH16, emc_autocal_interval),
+ scratch(PMC_SCRATCH17, emc_cfgs),
+ scratch(PMC_SCRATCH18, emc_adr_cfg1),
+ scratch(PMC_SCRATCH19, emc_digital_dll),
+ scratch(PMC_SCRATCH20, emc_dqs_clktrim),
+ scratch(PMC_SCRATCH21, emc_dq_clktrim),
+ scratch(PMC_SCRATCH22, emc_dll_xform_dqs),
+ scratch(PMC_SCRATCH23, emc_odt_rw),
+ scratch(PMC_SCRATCH25, arbitration_xbar),
+ scratch(PMC_SCRATCH35, emc_zcal),
+ scratch(PMC_SCRATCH36, emc_ctt_term),
+ scratch(PMC_SCRATCH40, xm2_cfgd),
+};
+
+void __init tegra2_lp0_suspend_init(void)
+{
+ int i;
+ int j;
+ unsigned int v;
+ unsigned int r;
+
+ for (i = 0; i < ARRAY_SIZE(scratch); i++) {
+ r = 0;
+
+ for (j = 0; j < scratch[i].num_fields; j++) {
+ v = readl(IO_ADDRESS(scratch[i].fields[j].addr));
+ v >>= scratch[i].fields[j].shift_src;
+ v &= scratch[i].fields[j].mask;
+ v <<= scratch[i].fields[j].shift_dst;
+ r |= v;
+ }
+
+ __raw_writel(r, scratch[i].scratch_addr);
+ }
+ wmb();
+}
+
+struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(tegra_io_dpd_get);
+
+void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
+{
+ return;
+}
+EXPORT_SYMBOL(tegra_io_dpd_enable);
+
+void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
+{
+ return;
+}
+EXPORT_SYMBOL(tegra_io_dpd_disable);
diff --git a/arch/arm/mach-tegra/pm-t3.c b/arch/arm/mach-tegra/pm-t3.c
new file mode 100644
index 000000000000..2de6f8770ba8
--- /dev/null
+++ b/arch/arm/mach-tegra/pm-t3.c
@@ -0,0 +1,522 @@
+/*
+ * arch/arm/mach-tegra/pm-t3.c
+ *
+ * Tegra3 SOC-specific power and cluster management
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <mach/gpio.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include <asm/cpu_pm.h>
+#include <asm/hardware/gic.h>
+
+#include <trace/events/power.h>
+
+#include "clock.h"
+#include "cpuidle.h"
+#include "pm.h"
+#include "sleep.h"
+#include "tegra3_emc.h"
+#include "dvfs.h"
+
+#ifdef CONFIG_TEGRA_CLUSTER_CONTROL
+#define CAR_CCLK_BURST_POLICY \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
+
+#define CAR_SUPER_CCLK_DIVIDER \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
+
+#define CAR_CCLKG_BURST_POLICY \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
+
+#define CAR_SUPER_CCLKG_DIVIDER \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
+
+#define CAR_CCLKLP_BURST_POLICY \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
+#define PLLX_DIV2_BYPASS_LP (1<<16)
+
+#define CAR_SUPER_CCLKLP_DIVIDER \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
+
+#define CAR_BOND_OUT_V \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
+#define CAR_BOND_OUT_V_CPU_G (1<<0)
+#define CAR_BOND_OUT_V_CPU_LP (1<<1)
+
+#define CAR_CLK_ENB_V_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
+#define CAR_CLK_ENB_V_CPU_G (1<<0)
+#define CAR_CLK_ENB_V_CPU_LP (1<<1)
+
+#define CAR_RST_CPUG_CMPLX_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
+
+#define CAR_RST_CPUG_CMPLX_CLR \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
+
+#define CAR_RST_CPULP_CMPLX_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
+
+#define CAR_RST_CPULP_CMPLX_CLR \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
+
+#define CAR_CLK_CPUG_CMPLX_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
+
+#define CAR_CLK_CPUG_CMPLX_CLR \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
+
+#define CAR_CLK_CPULP_CMPLX_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
+
+#define CAR_CLK_CPULP_CMPLX_CLR \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
+
+#define CPU_CLOCK(cpu) (0x1<<(8+cpu))
+#define CPU_RESET(cpu) (0x1111ul<<(cpu))
+
+static int cluster_switch_prolog_clock(unsigned int flags)
+{
+ u32 reg;
+ u32 CclkBurstPolicy;
+ u32 SuperCclkDivier;
+
+ /* Read the bond out register containing the G and LP CPUs. */
+ reg = readl(CAR_BOND_OUT_V);
+
+ /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
+ LP settings overwrite by save/restore code */
+ CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
+ CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
+ writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
+
+ /* Switching to G? */
+ if (flags & TEGRA_POWER_CLUSTER_G) {
+ /* Do the G CPUs exist? */
+ if (reg & CAR_BOND_OUT_V_CPU_G)
+ return -ENXIO;
+
+ /* Keep G CPU clock policy set by upper laayer, with the
+ exception of the transition via LP1 */
+ if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
+ /* In LP1 power mode come up on CLKM (oscillator) */
+ CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
+ CclkBurstPolicy &= ~0xF;
+ SuperCclkDivier = 0;
+
+ writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
+ writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
+ }
+
+ /* Hold G CPUs 1-3 in reset after the switch */
+ reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
+ writel(reg, CAR_RST_CPUG_CMPLX_SET);
+
+ /* Take G CPU 0 out of reset after the switch */
+ reg = CPU_RESET(0);
+ writel(reg, CAR_RST_CPUG_CMPLX_CLR);
+
+ /* Disable the clocks on G CPUs 1-3 after the switch */
+ reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
+ writel(reg, CAR_CLK_CPUG_CMPLX_SET);
+
+ /* Enable the clock on G CPU 0 after the switch */
+ reg = CPU_CLOCK(0);
+ writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
+
+ /* Enable the G CPU complex clock after the switch */
+ reg = CAR_CLK_ENB_V_CPU_G;
+ writel(reg, CAR_CLK_ENB_V_SET);
+ }
+ /* Switching to LP? */
+ else if (flags & TEGRA_POWER_CLUSTER_LP) {
+ /* Does the LP CPU exist? */
+ if (reg & CAR_BOND_OUT_V_CPU_LP)
+ return -ENXIO;
+
+ /* Keep LP CPU clock policy set by upper layer, with the
+ exception of the transition via LP1 */
+ if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
+ /* In LP1 power mode come up on CLKM (oscillator) */
+ CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
+ CclkBurstPolicy &= ~0xF;
+ SuperCclkDivier = 0;
+
+ writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
+ writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
+ }
+
+ /* Take the LP CPU ut of reset after the switch */
+ reg = CPU_RESET(0);
+ writel(reg, CAR_RST_CPULP_CMPLX_CLR);
+
+ /* Enable the clock on the LP CPU after the switch */
+ reg = CPU_CLOCK(0);
+ writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
+
+ /* Enable the LP CPU complex clock after the switch */
+ reg = CAR_CLK_ENB_V_CPU_LP;
+ writel(reg, CAR_CLK_ENB_V_SET);
+ }
+
+ return 0;
+}
+
+void tegra_cluster_switch_prolog(unsigned int flags)
+{
+ unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
+ unsigned int current_cluster = is_lp_cluster()
+ ? TEGRA_POWER_CLUSTER_LP
+ : TEGRA_POWER_CLUSTER_G;
+ u32 reg;
+
+ /* Read the flow controler CSR register and clear the CPU switch
+ and immediate flags. If an actual CPU switch is to be performed,
+ re-write the CSR register with the desired values. */
+ reg = readl(FLOW_CTRL_CPU_CSR(0));
+ reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE |
+ FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER);
+
+ /* Program flow controller for immediate wake if requested */
+ if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
+ reg |= FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE;
+
+ /* Do nothing if no switch actions requested */
+ if (!target_cluster)
+ goto done;
+
+ if ((current_cluster != target_cluster) ||
+ (flags & TEGRA_POWER_CLUSTER_FORCE)) {
+ if (current_cluster != target_cluster) {
+ // Set up the clocks for the target CPU.
+ if (cluster_switch_prolog_clock(flags)) {
+ /* The target CPU does not exist */
+ goto done;
+ }
+
+ /* Set up the flow controller to switch CPUs. */
+ reg |= FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER;
+ }
+ }
+
+done:
+ writel(reg, FLOW_CTRL_CPU_CSR(0));
+}
+
+
+static void cluster_switch_epilog_actlr(void)
+{
+ u32 actlr;
+
+ /* TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
+ as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
+ switch by CPU save/restore code, but SMP bit is restored correctly.
+ Synchronize these two bits here after LP=>G transition. Note that
+ only CPU0 core is powered on before and after the switch. See also
+ bug 807595. */
+
+ __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
+
+ if (actlr & (0x1 << 6)) {
+ actlr |= 0x1;
+ __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
+ }
+}
+
+static void cluster_switch_epilog_gic(void)
+{
+ unsigned int max_irq, i;
+ void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
+
+ /* Reprogram the interrupt affinity because the on the LP CPU,
+ the interrupt distributor affinity regsiters are stubbed out
+ by ARM (reads as zero, writes ignored). So when the LP CPU
+ context save code runs, the affinity registers will read
+ as all zero. This causes all interrupts to be effectively
+ disabled when back on the G CPU because they aren't routable
+ to any CPU. See bug 667720 for details. */
+
+ max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
+ max_irq = (max_irq + 1) * 32;
+
+ for (i = 32; i < max_irq; i += 4) {
+ u32 val = 0x01010101;
+#ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
+ unsigned int irq;
+ for (irq = i; irq < (i + 4); irq++) {
+ struct cpumask mask;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc && desc->affinity_hint &&
+ desc->irq_data.affinity) {
+ if (cpumask_and(&mask, desc->affinity_hint,
+ desc->irq_data.affinity))
+ val |= (*cpumask_bits(&mask) & 0xff) <<
+ ((irq & 3) * 8);
+ }
+ }
+#endif
+ writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
+ }
+}
+
+void tegra_cluster_switch_epilog(unsigned int flags)
+{
+ u32 reg;
+
+ /* Make sure the switch and immediate flags are cleared in
+ the flow controller to prevent undesirable side-effects
+ for future users of the flow controller. */
+ reg = readl(FLOW_CTRL_CPU_CSR(0));
+ reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE |
+ FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER);
+ writel(reg, FLOW_CTRL_CPU_CSR(0));
+
+ /* Perform post-switch LP=>G clean-up */
+ if (!is_lp_cluster()) {
+ cluster_switch_epilog_actlr();
+ cluster_switch_epilog_gic();
+ }
+
+ #if DEBUG_CLUSTER_SWITCH
+ {
+ /* FIXME: clock functions below are taking mutex */
+ struct clk *c = tegra_get_clock_by_name(
+ is_lp_cluster() ? "cpu_lp" : "cpu_g");
+ DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
+ is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
+ }
+ #endif
+}
+
+int tegra_cluster_control(unsigned int us, unsigned int flags)
+{
+ static ktime_t last_g2lp;
+
+ unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
+ unsigned int current_cluster = is_lp_cluster()
+ ? TEGRA_POWER_CLUSTER_LP
+ : TEGRA_POWER_CLUSTER_G;
+ unsigned long irq_flags;
+
+ if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
+ return -EINVAL;
+
+ if (num_online_cpus() > 1)
+ return -EBUSY;
+
+ if ((current_cluster == target_cluster)
+ && !(flags & TEGRA_POWER_CLUSTER_FORCE))
+ return -EEXIST;
+
+ if (target_cluster == TEGRA_POWER_CLUSTER_G)
+ if (!is_g_cluster_present())
+ return -EPERM;
+
+ trace_power_start(POWER_PSTATE, target_cluster, 0);
+
+ if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
+ us = 0;
+
+ DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
+ (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
+ is_lp_cluster() ? "LP" : "G",
+ (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
+ (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
+ (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
+ us));
+
+ local_irq_save(irq_flags);
+
+ if (current_cluster != target_cluster && !timekeeping_suspended) {
+ ktime_t now = ktime_get();
+ if (target_cluster == TEGRA_POWER_CLUSTER_G) {
+ s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
+ s64 t_off = tegra_cpu_power_off_time();
+ if (t_off > t)
+ udelay((unsigned int)(t_off - t));
+
+ tegra_dvfs_rail_on(tegra_cpu_rail, now);
+
+ } else {
+ last_g2lp = now;
+ tegra_dvfs_rail_off(tegra_cpu_rail, now);
+ }
+ }
+
+ if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
+ if (us)
+ tegra_lp2_set_trigger(us);
+
+ tegra_cluster_switch_prolog(flags);
+ tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
+ tegra_cluster_switch_epilog(flags);
+
+ if (us)
+ tegra_lp2_set_trigger(0);
+ } else {
+ tegra_set_cpu_in_lp2(0);
+ cpu_pm_enter();
+ tegra_idle_lp2_last(0, flags);
+ cpu_pm_exit();
+ tegra_clear_cpu_in_lp2(0);
+ }
+ local_irq_restore(irq_flags);
+
+ DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+void tegra_lp0_suspend_mc(void)
+{
+ /* Since memory frequency after LP0 is restored to boot rate
+ mc timing is saved during init, not on entry to LP0. Keep
+ this hook just in case, anyway */
+}
+
+void tegra_lp0_resume_mc(void)
+{
+ tegra_mc_timing_restore();
+}
+
+void tegra_lp0_cpu_mode(bool enter)
+{
+ static bool entered_on_g = false;
+ unsigned int flags;
+
+ if (enter)
+ entered_on_g = !is_lp_cluster();
+
+ if (entered_on_g) {
+ flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
+ flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
+ tegra_cluster_control(0, flags);
+ }
+}
+#endif
+
+#define IO_DPD_INFO(_name, _index, _bit) \
+ { \
+ .name = _name, \
+ .io_dpd_reg_index = _index, \
+ .io_dpd_bit = _bit, \
+ }
+
+/* PMC IO DPD register offsets */
+#define APBDEV_PMC_IO_DPD_REQ_0 0x1b8
+#define APBDEV_PMC_IO_DPD_STATUS_0 0x1bc
+#define APBDEV_PMC_SEL_DPD_TIM_0 0x1c8
+#define APBDEV_DPD_ENABLE_LSB 30
+#define APBDEV_DPD2_ENABLE_LSB 5
+#define PMC_DPD_SAMPLE 0x20
+
+struct tegra_io_dpd tegra_list_io_dpd[] = {
+ /* sd dpd bits in dpd2 register */
+ IO_DPD_INFO("sdhci-tegra.0", 1, 1), /* SDMMC1 */
+ IO_DPD_INFO("sdhci-tegra.2", 1, 2), /* SDMMC3 */
+ IO_DPD_INFO("sdhci-tegra.3", 1, 3), /* SDMMC4 */
+};
+
+struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
+{
+ int i;
+ const char *name = dev ? dev_name(dev) : NULL;
+ if (name) {
+ for (i = 0; i < (sizeof(tegra_list_io_dpd) /
+ sizeof(struct tegra_io_dpd)); i++) {
+ if (!(strncmp(tegra_list_io_dpd[i].name, name,
+ strlen(name)))) {
+ return &tegra_list_io_dpd[i];
+ }
+ }
+ }
+ dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
+ ((name) ? name : "NULL"));
+ return NULL;
+}
+EXPORT_SYMBOL(tegra_io_dpd_get);
+
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+static DEFINE_SPINLOCK(tegra_io_dpd_lock);
+
+void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
+{
+ unsigned int enable_mask;
+ unsigned int dpd_status;
+ unsigned int dpd_enable_lsb;
+
+ if (WARN_ON(!hnd))
+ return;
+ spin_lock(&tegra_io_dpd_lock);
+ dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
+ APBDEV_DPD_ENABLE_LSB;
+ writel(0x1, pmc + PMC_DPD_SAMPLE);
+ writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
+ enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
+ writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
+ hnd->io_dpd_reg_index * 8));
+ udelay(1);
+ dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
+ hnd->io_dpd_reg_index * 8));
+ if (!(dpd_status & (1 << hnd->io_dpd_bit)))
+ pr_info("Error: dpd%d enable failed, status=%#x\n",
+ (hnd->io_dpd_reg_index + 1), dpd_status);
+ /* Sample register must be reset before next sample operation */
+ writel(0x0, pmc + PMC_DPD_SAMPLE);
+ spin_unlock(&tegra_io_dpd_lock);
+ return;
+}
+EXPORT_SYMBOL(tegra_io_dpd_enable);
+
+void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
+{
+ unsigned int enable_mask;
+ unsigned int dpd_status;
+ unsigned int dpd_enable_lsb;
+
+ if (WARN_ON(!hnd))
+ return;
+ spin_lock(&tegra_io_dpd_lock);
+ dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
+ APBDEV_DPD_ENABLE_LSB;
+ enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
+ writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
+ hnd->io_dpd_reg_index * 8));
+ dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
+ hnd->io_dpd_reg_index * 8));
+ if (dpd_status & (1 << hnd->io_dpd_bit))
+ pr_info("Error: dpd%d disable failed, status=%#x\n",
+ (hnd->io_dpd_reg_index + 1), dpd_status);
+ spin_unlock(&tegra_io_dpd_lock);
+ return;
+}
+EXPORT_SYMBOL(tegra_io_dpd_disable);
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
new file mode 100644
index 000000000000..edcb28304d41
--- /dev/null
+++ b/arch/arm/mach-tegra/pm.c
@@ -0,0 +1,1244 @@
+/*
+ * arch/arm/mach-tegra/pm.c
+ *
+ * CPU complex suspend & resume functions for Tegra SoCs
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+#include <linux/serial_reg.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/syscore_ops.h>
+#include <linux/vmalloc.h>
+#include <linux/memblock.h>
+#include <linux/console.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu_pm.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/localtimer.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/powergate.h>
+
+#include "board.h"
+#include "clock.h"
+#include "cpuidle.h"
+#include "fuse.h"
+#include "gic.h"
+#include "pm.h"
+#include "pm-irq.h"
+#include "reset.h"
+#include "sleep.h"
+#include "timer.h"
+#include "dvfs.h"
+
+struct suspend_context {
+ /*
+ * The next 7 values are referenced by offset in __restart_plls
+ * in headsmp-t2.S, and should not be moved
+ */
+ u32 pllx_misc;
+ u32 pllx_base;
+ u32 pllp_misc;
+ u32 pllp_base;
+ u32 pllp_outa;
+ u32 pllp_outb;
+ u32 pll_timeout;
+
+ u32 cpu_burst;
+ u32 clk_csite_src;
+ u32 cclk_divider;
+
+ u32 mc[3];
+ u8 uart[5];
+
+ struct tegra_twd_context twd;
+};
+
+#ifdef CONFIG_PM_SLEEP
+#if USE_TEGRA_CPU_SUSPEND
+void *tegra_cpu_context; /* non-cacheable page for CPU context */
+#endif
+phys_addr_t tegra_pgd_phys; /* pgd used by hotplug & LP2 bootup */
+static pgd_t *tegra_pgd;
+static DEFINE_SPINLOCK(tegra_lp2_lock);
+static cpumask_t tegra_in_lp2;
+static cpumask_t *iram_cpu_lp2_mask;
+static unsigned long *iram_cpu_lp1_mask;
+static u8 *iram_save;
+static unsigned long iram_save_size;
+static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
+static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+static int tegra_last_pclk;
+#endif
+
+struct suspend_context tegra_sctx;
+
+#define TEGRA_POWER_PWRREQ_POLARITY (1 << 8) /* core power request polarity */
+#define TEGRA_POWER_PWRREQ_OE (1 << 9) /* core power request enable */
+#define TEGRA_POWER_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */
+#define TEGRA_POWER_SYSCLK_OE (1 << 11) /* system clock enable */
+#define TEGRA_POWER_PWRGATE_DIS (1 << 12) /* power gate disabled */
+#define TEGRA_POWER_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */
+#define TEGRA_POWER_CPU_PWRREQ_POLARITY (1 << 15) /* CPU power request polarity */
+#define TEGRA_POWER_CPU_PWRREQ_OE (1 << 16) /* CPU power request enable */
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
+#define PMC_WAKE_MASK 0xc
+#define PMC_WAKE_LEVEL 0x10
+#define PMC_DPAD_ORIDE 0x1C
+#define PMC_WAKE_DELAY 0xe0
+#define PMC_DPD_SAMPLE 0x20
+
+#define PMC_WAKE_STATUS 0x14
+#define PMC_SW_WAKE_STATUS 0x18
+#define PMC_COREPWRGOOD_TIMER 0x3c
+#define PMC_SCRATCH0 0x50
+#define PMC_SCRATCH1 0x54
+#define PMC_SCRATCH4 0x60
+#define PMC_CPUPWRGOOD_TIMER 0xc8
+#define PMC_CPUPWROFF_TIMER 0xcc
+#define PMC_COREPWROFF_TIMER PMC_WAKE_DELAY
+
+#ifdef CONFIG_TEGRA_CLUSTER_CONTROL
+#define PMC_SCRATCH4_WAKE_CLUSTER_MASK (1<<31)
+#endif
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+#define CLK_RESET_PLLC_BASE 0x80
+#define CLK_RESET_PLLM_BASE 0x90
+#define CLK_RESET_PLLX_BASE 0xe0
+#define CLK_RESET_PLLX_MISC 0xe4
+#define CLK_RESET_PLLP_BASE 0xa0
+#define CLK_RESET_PLLP_OUTA 0xa4
+#define CLK_RESET_PLLP_OUTB 0xa8
+#define CLK_RESET_PLLP_MISC 0xac
+
+#define CLK_RESET_SOURCE_CSITE 0x1d4
+
+#define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
+#define CLK_RESET_CCLK_BURST_POLICY_PLLM 3
+#define CLK_RESET_CCLK_BURST_POLICY_PLLX 8
+
+#define EMC_MRW_0 0x0e8
+#define EMC_MRW_DEV_SELECTN 30
+#define EMC_MRW_DEV_NONE (3 << EMC_MRW_DEV_SELECTN)
+
+#define MC_SECURITY_START 0x6c
+#define MC_SECURITY_SIZE 0x70
+#define MC_SECURITY_CFG2 0x7c
+
+struct dvfs_rail *tegra_cpu_rail;
+static struct dvfs_rail *tegra_core_rail;
+static struct clk *tegra_pclk;
+static const struct tegra_suspend_platform_data *pdata;
+static enum tegra_suspend_mode current_suspend_mode = TEGRA_SUSPEND_NONE;
+
+static const char *tegra_suspend_name[TEGRA_MAX_SUSPEND_MODE] = {
+ [TEGRA_SUSPEND_NONE] = "none",
+ [TEGRA_SUSPEND_LP2] = "lp2",
+ [TEGRA_SUSPEND_LP1] = "lp1",
+ [TEGRA_SUSPEND_LP0] = "lp0",
+};
+
+#if defined(CONFIG_TEGRA_CLUSTER_CONTROL) && INSTRUMENT_CLUSTER_SWITCH
+enum tegra_cluster_switch_time_id {
+ tegra_cluster_switch_time_id_start = 0,
+ tegra_cluster_switch_time_id_prolog,
+ tegra_cluster_switch_time_id_switch,
+ tegra_cluster_switch_time_id_epilog,
+ tegra_cluster_switch_time_id_max
+};
+
+static unsigned long
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_max];
+#define tegra_cluster_switch_time(flags, id) \
+ do { \
+ barrier(); \
+ if (flags & TEGRA_POWER_CLUSTER_MASK) { \
+ void __iomem *timer_us = \
+ IO_ADDRESS(TEGRA_TMRUS_BASE); \
+ if (id < tegra_cluster_switch_time_id_max) \
+ tegra_cluster_switch_times[id] = \
+ readl(timer_us); \
+ wmb(); \
+ } \
+ barrier(); \
+ } while(0)
+#else
+#define tegra_cluster_switch_time(flags, id) do {} while(0)
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+unsigned long tegra_cpu_power_good_time(void)
+{
+ if (WARN_ON_ONCE(!pdata))
+ return 5000;
+
+ return pdata->cpu_timer;
+}
+
+unsigned long tegra_cpu_power_off_time(void)
+{
+ if (WARN_ON_ONCE(!pdata))
+ return 5000;
+
+ return pdata->cpu_off_timer;
+}
+
+unsigned long tegra_cpu_lp2_min_residency(void)
+{
+ if (WARN_ON_ONCE(!pdata))
+ return 2000;
+
+ return pdata->cpu_lp2_min_residency;
+}
+
+/*
+ * create_suspend_pgtable
+ *
+ * Creates a page table with identity mappings of physical memory and IRAM
+ * for use when the MMU is off, in addition to all the regular kernel mappings.
+ */
+static __init int create_suspend_pgtable(void)
+{
+ tegra_pgd = pgd_alloc(&init_mm);
+ if (!tegra_pgd)
+ return -ENOMEM;
+
+ identity_mapping_add(tegra_pgd, PLAT_PHYS_OFFSET,
+ PLAT_PHYS_OFFSET + memblock_phys_mem_size());
+ identity_mapping_add(tegra_pgd, IO_IRAM_PHYS,
+ IO_IRAM_PHYS + SECTION_SIZE);
+
+ /* inner/outer write-back/write-allocate, sharable */
+ tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK) | 0x4A;
+
+ return 0;
+}
+
+/*
+ * alloc_suspend_context
+ *
+ * Allocate a non-cacheable page to hold the CPU contexts.
+ * The standard ARM CPU context save functions don't work if there's
+ * an external L2 cache controller (like a PL310) in system.
+ */
+static __init int alloc_suspend_context(void)
+{
+#if USE_TEGRA_CPU_SUSPEND
+ pgprot_t prot = __pgprot_modify(pgprot_kernel, L_PTE_MT_MASK,
+ L_PTE_MT_BUFFERABLE | L_PTE_XN);
+ struct page *ctx_page;
+ unsigned long ctx_virt = 0;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ ctx_page = alloc_pages(GFP_KERNEL, 0);
+ if (IS_ERR_OR_NULL(ctx_page))
+ goto fail;
+
+ tegra_cpu_context = vm_map_ram(&ctx_page, 1, -1, prot);
+ if (IS_ERR_OR_NULL(tegra_cpu_context))
+ goto fail;
+
+ /* Add the context page to our private pgd. */
+ ctx_virt = (unsigned long)tegra_cpu_context;
+
+ pgd = tegra_pgd + pgd_index(ctx_virt);
+ if (!pgd_present(*pgd))
+ goto fail;
+ pmd = pmd_offset(pgd, ctx_virt);
+ if (!pmd_none(*pmd))
+ goto fail;
+ pte = pte_alloc_kernel(pmd, ctx_virt);
+ if (!pte)
+ goto fail;
+
+ set_pte_ext(pte, mk_pte(ctx_page, prot), 0);
+
+ outer_clean_range(__pa(pmd), __pa(pmd + 1));
+
+ return 0;
+
+fail:
+ if (ctx_page)
+ __free_page(ctx_page);
+ if (ctx_virt)
+ vm_unmap_ram((void*)ctx_virt, 1);
+ tegra_cpu_context = NULL;
+ return -ENOMEM;
+#else
+ return 0;
+#endif
+}
+
+/* ensures that sufficient time is passed for a register write to
+ * serialize into the 32KHz domain */
+static void pmc_32kwritel(u32 val, unsigned long offs)
+{
+ writel(val, pmc + offs);
+ udelay(130);
+}
+
+static void set_power_timers(unsigned long us_on, unsigned long us_off,
+ long rate)
+{
+ static unsigned long last_us_off = 0;
+ unsigned long long ticks;
+ unsigned long long pclk;
+
+ if (WARN_ON_ONCE(rate <= 0))
+ pclk = 100000000;
+ else
+ pclk = rate;
+
+ if ((rate != tegra_last_pclk) || (us_off != last_us_off)) {
+ ticks = (us_on * pclk) + 999999ull;
+ do_div(ticks, 1000000);
+ writel((unsigned long)ticks, pmc + PMC_CPUPWRGOOD_TIMER);
+
+ ticks = (us_off * pclk) + 999999ull;
+ do_div(ticks, 1000000);
+ writel((unsigned long)ticks, pmc + PMC_CPUPWROFF_TIMER);
+ wmb();
+ }
+ tegra_last_pclk = pclk;
+ last_us_off = us_off;
+}
+
+/*
+ * restore_cpu_complex
+ *
+ * restores cpu clock setting, clears flow controller
+ *
+ * Always called on CPU 0.
+ */
+static void restore_cpu_complex(u32 mode)
+{
+ int cpu = smp_processor_id();
+ unsigned int reg;
+
+ BUG_ON(cpu != 0);
+
+ /* restore original PLL settings */
+ writel(tegra_sctx.pllx_misc, clk_rst + CLK_RESET_PLLX_MISC);
+ writel(tegra_sctx.pllx_base, clk_rst + CLK_RESET_PLLX_BASE);
+ writel(tegra_sctx.pllp_misc, clk_rst + CLK_RESET_PLLP_MISC);
+ writel(tegra_sctx.pllp_base, clk_rst + CLK_RESET_PLLP_BASE);
+ writel(tegra_sctx.pllp_outa, clk_rst + CLK_RESET_PLLP_OUTA);
+ writel(tegra_sctx.pllp_outb, clk_rst + CLK_RESET_PLLP_OUTB);
+
+ /* Is CPU complex already running on PLLX? */
+ reg = readl(clk_rst + CLK_RESET_CCLK_BURST);
+ reg &= 0xF;
+ if (reg != 0x8) {
+ /* restore original burst policy setting; PLLX state restored
+ * by CPU boot-up code - wait for PLL stabilization if PLLX
+ * was enabled */
+
+ reg = readl(clk_rst + CLK_RESET_PLLX_BASE);
+ /* mask out bit 27 - not to check PLL lock bit */
+ BUG_ON((reg & (~(1 << 27))) !=
+ (tegra_sctx.pllx_base & (~(1 << 27))));
+
+ if (tegra_sctx.pllx_base & (1<<30)) {
+#if USE_PLL_LOCK_BITS
+ /* Enable lock detector */
+ reg = readl(clk_rst + CLK_RESET_PLLX_MISC);
+ reg |= 1<<18;
+ writel(reg, clk_rst + CLK_RESET_PLLX_MISC);
+ while (!(readl(clk_rst + CLK_RESET_PLLX_BASE) &&
+ (1<<27)))
+ cpu_relax();
+#else
+ udelay(300);
+#endif
+ }
+ writel(tegra_sctx.cclk_divider, clk_rst +
+ CLK_RESET_CCLK_DIVIDER);
+ writel(tegra_sctx.cpu_burst, clk_rst +
+ CLK_RESET_CCLK_BURST);
+ }
+
+ writel(tegra_sctx.clk_csite_src, clk_rst + CLK_RESET_SOURCE_CSITE);
+
+ /* Do not power-gate CPU 0 when flow controlled */
+ reg = readl(FLOW_CTRL_CPU_CSR(cpu));
+ reg &= ~FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfe bitmap */
+ reg &= ~FLOW_CTRL_CSR_WFI_BITMAP; /* clear wfi bitmap */
+ reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */
+ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */
+ flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
+
+ /* If an immedidate cluster switch is being perfomed, restore the
+ local timer registers. For calls resulting from CPU LP2 in
+ idle or system suspend, the local timer was shut down and
+ timekeeping switched over to the global system timer. In this
+ case keep local timer disabled, and restore only periodic load. */
+ if (!(mode & (TEGRA_POWER_CLUSTER_MASK |
+ TEGRA_POWER_CLUSTER_IMMEDIATE)))
+ tegra_sctx.twd.twd_ctrl = 0;
+ tegra_twd_resume(&tegra_sctx.twd);
+}
+
+/*
+ * suspend_cpu_complex
+ *
+ * saves pll state for use by restart_plls, prepares flow controller for
+ * transition to suspend state
+ *
+ * Must always be called on cpu 0.
+ */
+static void suspend_cpu_complex(u32 mode)
+{
+ int cpu = smp_processor_id();
+ unsigned int reg;
+ int i;
+
+ BUG_ON(cpu != 0);
+
+ /* switch coresite to clk_m, save off original source */
+ tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
+ writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
+
+ tegra_sctx.cpu_burst = readl(clk_rst + CLK_RESET_CCLK_BURST);
+ tegra_sctx.pllx_base = readl(clk_rst + CLK_RESET_PLLX_BASE);
+ tegra_sctx.pllx_misc = readl(clk_rst + CLK_RESET_PLLX_MISC);
+ tegra_sctx.pllp_base = readl(clk_rst + CLK_RESET_PLLP_BASE);
+ tegra_sctx.pllp_outa = readl(clk_rst + CLK_RESET_PLLP_OUTA);
+ tegra_sctx.pllp_outb = readl(clk_rst + CLK_RESET_PLLP_OUTB);
+ tegra_sctx.pllp_misc = readl(clk_rst + CLK_RESET_PLLP_MISC);
+ tegra_sctx.cclk_divider = readl(clk_rst + CLK_RESET_CCLK_DIVIDER);
+
+ tegra_twd_suspend(&tegra_sctx.twd);
+
+ reg = readl(FLOW_CTRL_CPU_CSR(cpu));
+ reg &= ~FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfe bitmap */
+ reg &= ~FLOW_CTRL_CSR_WFI_BITMAP; /* clear wfi bitmap */
+ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ reg |= FLOW_CTRL_CSR_WFE_CPU0 << cpu; /* enable power gating on wfe */
+#else
+ reg |= FLOW_CTRL_CSR_WFI_CPU0 << cpu; /* enable power gating on wfi */
+#endif
+ reg |= FLOW_CTRL_CSR_ENABLE; /* enable power gating */
+ flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (i == cpu)
+ continue;
+ reg = readl(FLOW_CTRL_CPU_CSR(i));
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG;
+ reg |= FLOW_CTRL_CSR_INTR_FLAG;
+ flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(i));
+ }
+
+ tegra_gic_cpu_disable();
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Tegra3 enters LPx states via WFI - do not propagate legacy IRQs
+ to CPU core to avoid fall through WFI (IRQ-to-flow controller wake
+ path is not affected). */
+ tegra_gic_pass_through_disable();
+#endif
+}
+
+void tegra_clear_cpu_in_lp2(int cpu)
+{
+ spin_lock(&tegra_lp2_lock);
+ BUG_ON(!cpumask_test_cpu(cpu, &tegra_in_lp2));
+ cpumask_clear_cpu(cpu, &tegra_in_lp2);
+
+ /* Update the IRAM copy used by the reset handler. The IRAM copy
+ can't use used directly by cpumask_clear_cpu() because it uses
+ LDREX/STREX which requires the addressed location to be inner
+ cacheable and sharable which IRAM isn't. */
+ writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
+ dsb();
+
+ spin_unlock(&tegra_lp2_lock);
+}
+
+bool tegra_set_cpu_in_lp2(int cpu)
+{
+ bool last_cpu = false;
+
+ spin_lock(&tegra_lp2_lock);
+ BUG_ON(cpumask_test_cpu(cpu, &tegra_in_lp2));
+ cpumask_set_cpu(cpu, &tegra_in_lp2);
+
+ /* Update the IRAM copy used by the reset handler. The IRAM copy
+ can't use used directly by cpumask_set_cpu() because it uses
+ LDREX/STREX which requires the addressed location to be inner
+ cacheable and sharable which IRAM isn't. */
+ writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
+ dsb();
+
+ if ((cpu == 0) && cpumask_equal(&tegra_in_lp2, cpu_online_mask))
+ last_cpu = true;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ else
+ tegra2_cpu_set_resettable_soon();
+#endif
+
+ spin_unlock(&tegra_lp2_lock);
+ return last_cpu;
+}
+
+static void tegra_sleep_core(enum tegra_suspend_mode mode,
+ unsigned long v2p)
+{
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+ if (mode == TEGRA_SUSPEND_LP0) {
+ tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE3,
+ virt_to_phys(tegra_resume));
+ } else {
+ tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE6,
+ (TEGRA_RESET_HANDLER_BASE +
+ tegra_cpu_reset_handler_offset));
+ }
+#endif
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra2_sleep_core(v2p);
+#else
+ tegra3_sleep_core(v2p);
+#endif
+}
+
+static inline void tegra_sleep_cpu(unsigned long v2p)
+{
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+ tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE4,
+ (TEGRA_RESET_HANDLER_BASE +
+ tegra_cpu_reset_handler_offset));
+#endif
+ tegra_sleep_cpu_save(v2p);
+}
+
+unsigned int tegra_idle_lp2_last(unsigned int sleep_time, unsigned int flags)
+{
+ u32 mode; /* hardware + software power mode flags */
+ unsigned int remain;
+
+ /* Only the last cpu down does the final suspend steps */
+ mode = readl(pmc + PMC_CTRL);
+ mode |= TEGRA_POWER_CPU_PWRREQ_OE;
+ if (pdata->combined_req)
+ mode &= ~TEGRA_POWER_PWRREQ_OE;
+ else
+ mode |= TEGRA_POWER_PWRREQ_OE;
+ mode &= ~TEGRA_POWER_EFFECT_LP0;
+ pmc_32kwritel(mode, PMC_CTRL);
+ mode |= flags;
+
+ tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
+
+ /*
+ * We can use clk_get_rate_all_locked() here, because all other cpus
+ * are in LP2 state and irqs are disabled
+ */
+ if (flags & TEGRA_POWER_CLUSTER_MASK) {
+ set_power_timers(pdata->cpu_timer, 0,
+ clk_get_rate_all_locked(tegra_pclk));
+ tegra_cluster_switch_prolog(mode);
+ } else {
+ set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
+ clk_get_rate_all_locked(tegra_pclk));
+ }
+
+ if (sleep_time)
+ tegra_lp2_set_trigger(sleep_time);
+
+ cpu_complex_pm_enter();
+ suspend_cpu_complex(mode);
+ tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_prolog);
+ flush_cache_all();
+ outer_disable();
+
+ tegra_sleep_cpu(PLAT_PHYS_OFFSET - PAGE_OFFSET);
+
+#ifdef CONFIG_CACHE_L2X0
+ tegra_init_cache(false);
+#endif
+ tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
+ restore_cpu_complex(mode);
+ cpu_complex_pm_exit();
+
+ remain = tegra_lp2_timer_remain();
+ if (sleep_time)
+ tegra_lp2_set_trigger(0);
+
+ if (flags & TEGRA_POWER_CLUSTER_MASK)
+ tegra_cluster_switch_epilog(mode);
+
+ tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_epilog);
+
+#if INSTRUMENT_CLUSTER_SWITCH
+ if (flags & TEGRA_POWER_CLUSTER_MASK) {
+ pr_err("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
+ is_lp_cluster() ? "G=>LP" : "LP=>G",
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog] -
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_start],
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch] -
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog],
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch],
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_start]);
+ }
+#endif
+ return remain;
+}
+
+static int tegra_common_suspend(void)
+{
+ void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
+
+ tegra_sctx.mc[0] = readl(mc + MC_SECURITY_START);
+ tegra_sctx.mc[1] = readl(mc + MC_SECURITY_SIZE);
+ tegra_sctx.mc[2] = readl(mc + MC_SECURITY_CFG2);
+
+ /* copy the reset vector and SDRAM shutdown code into IRAM */
+ memcpy(iram_save, iram_code, iram_save_size);
+ memcpy(iram_code, tegra_iram_start(), iram_save_size);
+
+ return 0;
+}
+
+static void tegra_common_resume(void)
+{
+ void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
+#endif
+
+ /* Clear DPD sample */
+ writel(0x0, pmc + PMC_DPD_SAMPLE);
+
+ writel(tegra_sctx.mc[0], mc + MC_SECURITY_START);
+ writel(tegra_sctx.mc[1], mc + MC_SECURITY_SIZE);
+ writel(tegra_sctx.mc[2], mc + MC_SECURITY_CFG2);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ /* trigger emc mode write */
+ writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
+#endif
+ /* clear scratch registers shared by suspend and the reset pen */
+ writel(0x0, pmc + PMC_SCRATCH39);
+ writel(0x0, pmc + PMC_SCRATCH41);
+
+ /* restore IRAM */
+ memcpy(iram_code, iram_save, iram_save_size);
+}
+
+static int tegra_suspend_prepare_late(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ disable_irq(INT_SYS_STATS_MON);
+#endif
+ return 0;
+}
+
+static void tegra_suspend_wake(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ enable_irq(INT_SYS_STATS_MON);
+#endif
+}
+
+static void tegra_pm_set(enum tegra_suspend_mode mode)
+{
+ u32 reg, boot_flag;
+ unsigned long rate = 32768;
+
+ reg = readl(pmc + PMC_CTRL);
+ reg |= TEGRA_POWER_CPU_PWRREQ_OE;
+ if (pdata->combined_req)
+ reg &= ~TEGRA_POWER_PWRREQ_OE;
+ else
+ reg |= TEGRA_POWER_PWRREQ_OE;
+ reg &= ~TEGRA_POWER_EFFECT_LP0;
+
+ switch (mode) {
+ case TEGRA_SUSPEND_LP0:
+ if (pdata->combined_req) {
+ reg |= TEGRA_POWER_PWRREQ_OE;
+ reg &= ~TEGRA_POWER_CPU_PWRREQ_OE;
+ }
+
+ /*
+ * LP0 boots through the AVP, which then resumes the AVP to
+ * the address in scratch 39, and the cpu to the address in
+ * scratch 41 to tegra_resume
+ */
+ writel(0x0, pmc + PMC_SCRATCH39);
+
+ /* Enable DPD sample to trigger sampling pads data and direction
+ * in which pad will be driven during lp0 mode*/
+ writel(0x1, pmc + PMC_DPD_SAMPLE);
+
+ /* Set warmboot flag */
+ boot_flag = readl(pmc + PMC_SCRATCH0);
+ pmc_32kwritel(boot_flag | 1, PMC_SCRATCH0);
+
+ pmc_32kwritel(tegra_lp0_vec_start, PMC_SCRATCH1);
+
+ reg |= TEGRA_POWER_EFFECT_LP0;
+ /* No break here. LP0 code falls through to write SCRATCH41 */
+ case TEGRA_SUSPEND_LP1:
+ __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
+ wmb();
+ break;
+ case TEGRA_SUSPEND_LP2:
+ rate = clk_get_rate(tegra_pclk);
+ break;
+ case TEGRA_SUSPEND_NONE:
+ return;
+ default:
+ BUG();
+ }
+
+ set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, rate);
+
+ pmc_32kwritel(reg, PMC_CTRL);
+}
+
+static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
+ [TEGRA_SUSPEND_NONE] = "none",
+ [TEGRA_SUSPEND_LP2] = "LP2",
+ [TEGRA_SUSPEND_LP1] = "LP1",
+ [TEGRA_SUSPEND_LP0] = "LP0",
+};
+
+static int tegra_suspend_enter(suspend_state_t state)
+{
+ int ret;
+ ktime_t delta;
+ struct timespec ts_entry, ts_exit;
+
+ if (pdata && pdata->board_suspend)
+ pdata->board_suspend(current_suspend_mode, TEGRA_SUSPEND_BEFORE_PERIPHERAL);
+
+ read_persistent_clock(&ts_entry);
+
+ ret = tegra_suspend_dram(current_suspend_mode, 0);
+
+ read_persistent_clock(&ts_exit);
+
+ if (timespec_compare(&ts_exit, &ts_entry) > 0) {
+ delta = timespec_to_ktime(timespec_sub(ts_exit, ts_entry));
+
+ tegra_dvfs_rail_pause(tegra_cpu_rail, delta, false);
+ if (current_suspend_mode == TEGRA_SUSPEND_LP0)
+ tegra_dvfs_rail_pause(tegra_core_rail, delta, false);
+ else
+ tegra_dvfs_rail_pause(tegra_core_rail, delta, true);
+ }
+
+ if (pdata && pdata->board_resume)
+ pdata->board_resume(current_suspend_mode, TEGRA_RESUME_AFTER_PERIPHERAL);
+
+ return ret;
+}
+
+static void tegra_suspend_check_pwr_stats(void)
+{
+ /* cpus and l2 are powered off later */
+ unsigned long pwrgate_partid_mask =
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ (1 << TEGRA_POWERGATE_HEG) |
+ (1 << TEGRA_POWERGATE_SATA) |
+ (1 << TEGRA_POWERGATE_3D1) |
+#endif
+ (1 << TEGRA_POWERGATE_3D) |
+ (1 << TEGRA_POWERGATE_VENC) |
+ (1 << TEGRA_POWERGATE_PCIE) |
+ (1 << TEGRA_POWERGATE_VDEC) |
+ (1 << TEGRA_POWERGATE_MPE);
+
+ int partid;
+
+ for (partid = 0; partid < TEGRA_NUM_POWERGATE; partid++)
+ if ((1 << partid) & pwrgate_partid_mask)
+ if (tegra_powergate_is_powered(partid))
+ pr_warning("partition %s is left on before suspend\n",
+ tegra_powergate_get_name(partid));
+
+ return;
+}
+
+int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
+{
+ BUG_ON(mode < 0 || mode >= TEGRA_MAX_SUSPEND_MODE);
+
+ if ((mode == TEGRA_SUSPEND_LP0) && !tegra_pm_irq_lp0_allowed()) {
+ pr_info("LP0 not used due to unsupported wakeup events\n");
+ mode = TEGRA_SUSPEND_LP1;
+ }
+
+ if ((mode == TEGRA_SUSPEND_LP0) || (mode == TEGRA_SUSPEND_LP1))
+ tegra_suspend_check_pwr_stats();
+
+ tegra_common_suspend();
+
+ tegra_pm_set(mode);
+
+ if (pdata && pdata->board_suspend)
+ pdata->board_suspend(mode, TEGRA_SUSPEND_BEFORE_CPU);
+
+ local_fiq_disable();
+
+ cpu_pm_enter();
+ cpu_complex_pm_enter();
+
+ if (mode == TEGRA_SUSPEND_LP0) {
+#ifdef CONFIG_TEGRA_CLUSTER_CONTROL
+ u32 reg = readl(pmc + PMC_SCRATCH4);
+ if (is_lp_cluster())
+ reg |= PMC_SCRATCH4_WAKE_CLUSTER_MASK;
+ else
+ reg &= (~PMC_SCRATCH4_WAKE_CLUSTER_MASK);
+ pmc_32kwritel(reg, PMC_SCRATCH4);
+#endif
+ tegra_lp0_suspend_mc();
+ tegra_cpu_reset_handler_save();
+
+ }
+ else if (mode == TEGRA_SUSPEND_LP1)
+ *iram_cpu_lp1_mask = 1;
+
+ suspend_cpu_complex(flags);
+
+ flush_cache_all();
+ outer_flush_all();
+ outer_disable();
+
+ if (mode == TEGRA_SUSPEND_LP2)
+ tegra_sleep_cpu(PLAT_PHYS_OFFSET - PAGE_OFFSET);
+ else
+ tegra_sleep_core(mode, PLAT_PHYS_OFFSET - PAGE_OFFSET);
+
+ tegra_init_cache(true);
+
+ if (mode == TEGRA_SUSPEND_LP0) {
+ tegra_cpu_reset_handler_restore();
+ tegra_lp0_resume_mc();
+ } else if (mode == TEGRA_SUSPEND_LP1)
+ *iram_cpu_lp1_mask = 0;
+
+ restore_cpu_complex(flags);
+
+ /* for platforms where the core & CPU power requests are
+ * combined as a single request to the PMU, transition out
+ * of LP0 state by temporarily enabling both requests
+ */
+ if (mode == TEGRA_SUSPEND_LP0 && pdata->combined_req) {
+ u32 reg;
+ reg = readl(pmc + PMC_CTRL);
+ reg |= TEGRA_POWER_CPU_PWRREQ_OE;
+ pmc_32kwritel(reg, PMC_CTRL);
+ reg &= ~TEGRA_POWER_PWRREQ_OE;
+ pmc_32kwritel(reg, PMC_CTRL);
+ }
+
+ cpu_complex_pm_exit();
+ cpu_pm_exit();
+
+ if (pdata && pdata->board_resume)
+ pdata->board_resume(mode, TEGRA_RESUME_AFTER_CPU);
+
+ local_fiq_enable();
+
+ tegra_common_resume();
+
+ return 0;
+}
+
+/*
+ * Function pointers to optional board specific function
+ */
+void (*tegra_deep_sleep)(int);
+EXPORT_SYMBOL(tegra_deep_sleep);
+
+static int tegra_suspend_prepare(void)
+{
+ if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
+ tegra_deep_sleep(1);
+ return 0;
+}
+
+static void tegra_suspend_finish(void)
+{
+ if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
+ tegra_deep_sleep(0);
+}
+
+static const struct platform_suspend_ops tegra_suspend_ops = {
+ .valid = suspend_valid_only_mem,
+ .prepare = tegra_suspend_prepare,
+ .finish = tegra_suspend_finish,
+ .prepare_late = tegra_suspend_prepare_late,
+ .wake = tegra_suspend_wake,
+ .enter = tegra_suspend_enter,
+};
+
+static ssize_t suspend_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *start = buf;
+ char *end = buf + PAGE_SIZE;
+
+ start += scnprintf(start, end - start, "%s ", \
+ tegra_suspend_name[current_suspend_mode]);
+ start += scnprintf(start, end - start, "\n");
+
+ return start - buf;
+}
+
+static ssize_t suspend_mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int len;
+ const char *name_ptr;
+ enum tegra_suspend_mode new_mode;
+
+ name_ptr = buf;
+ while (*name_ptr && !isspace(*name_ptr))
+ name_ptr++;
+ len = name_ptr - buf;
+ if (!len)
+ goto bad_name;
+
+ for (new_mode = TEGRA_SUSPEND_NONE; \
+ new_mode < TEGRA_MAX_SUSPEND_MODE; ++new_mode) {
+ if (!strncmp(buf, tegra_suspend_name[new_mode], len)) {
+ current_suspend_mode = new_mode;
+ break;
+ }
+ }
+
+bad_name:
+ return n;
+}
+
+static struct kobj_attribute suspend_mode_attribute =
+ __ATTR(mode, 0644, suspend_mode_show, suspend_mode_store);
+
+static struct kobject *suspend_kobj;
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_pm_enter_suspend(void)
+{
+ pr_info("Entering suspend state %s\n", lp_state[current_suspend_mode]);
+ if (current_suspend_mode == TEGRA_SUSPEND_LP0)
+ tegra_lp0_cpu_mode(true);
+ return 0;
+}
+
+static void tegra_pm_enter_resume(void)
+{
+ if (current_suspend_mode == TEGRA_SUSPEND_LP0)
+ tegra_lp0_cpu_mode(false);
+ pr_info("Exited suspend state %s\n", lp_state[current_suspend_mode]);
+}
+
+static struct syscore_ops tegra_pm_enter_syscore_ops = {
+ .suspend = tegra_pm_enter_suspend,
+ .resume = tegra_pm_enter_resume,
+};
+
+static __init int tegra_pm_enter_syscore_init(void)
+{
+ register_syscore_ops(&tegra_pm_enter_syscore_ops);
+ return 0;
+}
+subsys_initcall(tegra_pm_enter_syscore_init);
+#endif
+
+void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat)
+{
+ u32 reg;
+ u32 mode;
+
+ tegra_cpu_rail = tegra_dvfs_get_rail_by_name("vdd_cpu");
+ tegra_core_rail = tegra_dvfs_get_rail_by_name("vdd_core");
+ tegra_pclk = clk_get_sys(NULL, "pclk");
+ BUG_ON(IS_ERR(tegra_pclk));
+ pdata = plat;
+ (void)reg;
+ (void)mode;
+
+#ifndef CONFIG_PM_SLEEP
+ if (plat->suspend_mode != TEGRA_SUSPEND_NONE) {
+ pr_warning("%s: Suspend requires CONFIG_PM_SLEEP -- "
+ "disabling suspend\n", __func__);
+ plat->suspend_mode = TEGRA_SUSPEND_NONE;
+ }
+#else
+ if (create_suspend_pgtable() < 0) {
+ pr_err("%s: PGD memory alloc failed -- LP0/LP1/LP2 unavailable\n",
+ __func__);
+ plat->suspend_mode = TEGRA_SUSPEND_NONE;
+ goto fail;
+ }
+
+ if (alloc_suspend_context() < 0) {
+ pr_err("%s: CPU context alloc failed -- LP0/LP1/LP2 unavailable\n",
+ __func__);
+ plat->suspend_mode = TEGRA_SUSPEND_NONE;
+ goto fail;
+ }
+
+ if ((tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) &&
+ (tegra_get_revision() == TEGRA_REVISION_A01) &&
+ (plat->suspend_mode == TEGRA_SUSPEND_LP0)) {
+ /* Tegra 3 A01 supports only LP1 */
+ pr_warning("%s: Suspend mode LP0 is not supported on A01 "
+ "-- disabling LP0\n", __func__);
+ plat->suspend_mode = TEGRA_SUSPEND_LP1;
+ }
+ if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && tegra_lp0_vec_size &&
+ tegra_lp0_vec_relocate) {
+ unsigned char *reloc_lp0;
+ unsigned long tmp;
+ void __iomem *orig;
+ reloc_lp0 = kmalloc(tegra_lp0_vec_size + L1_CACHE_BYTES - 1,
+ GFP_KERNEL);
+ WARN_ON(!reloc_lp0);
+ if (!reloc_lp0) {
+ pr_err("%s: Failed to allocate reloc_lp0\n",
+ __func__);
+ goto out;
+ }
+
+ orig = ioremap(tegra_lp0_vec_start, tegra_lp0_vec_size);
+ WARN_ON(!orig);
+ if (!orig) {
+ pr_err("%s: Failed to map tegra_lp0_vec_start %08lx\n",
+ __func__, tegra_lp0_vec_start);
+ kfree(reloc_lp0);
+ goto out;
+ }
+
+ tmp = (unsigned long) reloc_lp0;
+ tmp = (tmp + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
+ reloc_lp0 = (unsigned char *)tmp;
+ memcpy(reloc_lp0, orig, tegra_lp0_vec_size);
+ iounmap(orig);
+ tegra_lp0_vec_start = virt_to_phys(reloc_lp0);
+ }
+
+out:
+ if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && !tegra_lp0_vec_size) {
+ pr_warning("%s: Suspend mode LP0 requested, no lp0_vec "
+ "provided by bootlader -- disabling LP0\n",
+ __func__);
+ plat->suspend_mode = TEGRA_SUSPEND_LP1;
+ }
+
+ iram_save_size = tegra_iram_end() - tegra_iram_start();
+
+ iram_save = kmalloc(iram_save_size, GFP_KERNEL);
+ if (!iram_save && (plat->suspend_mode >= TEGRA_SUSPEND_LP1)) {
+ pr_err("%s: unable to allocate memory for SDRAM self-refresh "
+ "-- LP0/LP1 unavailable\n", __func__);
+ plat->suspend_mode = TEGRA_SUSPEND_LP2;
+ }
+
+ /* !!!FIXME!!! THIS IS TEGRA2 ONLY */
+ /* Initialize scratch registers used for CPU LP2 synchronization */
+ writel(0, pmc + PMC_SCRATCH37);
+ writel(0, pmc + PMC_SCRATCH38);
+ writel(0, pmc + PMC_SCRATCH39);
+ writel(0, pmc + PMC_SCRATCH41);
+
+ /* Always enable CPU power request; just normal polarity is supported */
+ reg = readl(pmc + PMC_CTRL);
+ BUG_ON(reg & TEGRA_POWER_CPU_PWRREQ_POLARITY);
+ reg |= TEGRA_POWER_CPU_PWRREQ_OE;
+ pmc_32kwritel(reg, PMC_CTRL);
+
+ /* Configure core power request and system clock control if LP0
+ is supported */
+ __raw_writel(pdata->core_timer, pmc + PMC_COREPWRGOOD_TIMER);
+ __raw_writel(pdata->core_off_timer, pmc + PMC_COREPWROFF_TIMER);
+
+ reg = readl(pmc + PMC_CTRL);
+
+ if (!pdata->sysclkreq_high)
+ reg |= TEGRA_POWER_SYSCLK_POLARITY;
+ else
+ reg &= ~TEGRA_POWER_SYSCLK_POLARITY;
+
+ if (!pdata->corereq_high)
+ reg |= TEGRA_POWER_PWRREQ_POLARITY;
+ else
+ reg &= ~TEGRA_POWER_PWRREQ_POLARITY;
+
+ /* configure output inverters while the request is tristated */
+ pmc_32kwritel(reg, PMC_CTRL);
+
+ /* now enable requests */
+ reg |= TEGRA_POWER_SYSCLK_OE;
+ if (!pdata->combined_req)
+ reg |= TEGRA_POWER_PWRREQ_OE;
+ pmc_32kwritel(reg, PMC_CTRL);
+
+ if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
+ tegra_lp0_suspend_init();
+
+ suspend_set_ops(&tegra_suspend_ops);
+
+ /* Create /sys/power/suspend/type */
+ suspend_kobj = kobject_create_and_add("suspend", power_kobj);
+ if (suspend_kobj) {
+ if (sysfs_create_file(suspend_kobj, \
+ &suspend_mode_attribute.attr))
+ pr_err("%s: sysfs_create_file suspend type failed!\n",
+ __func__);
+ }
+
+ iram_cpu_lp2_mask = tegra_cpu_lp2_mask;
+ iram_cpu_lp1_mask = tegra_cpu_lp1_mask;
+fail:
+#endif
+ if (plat->suspend_mode == TEGRA_SUSPEND_NONE)
+ tegra_lp2_in_idle(false);
+
+ current_suspend_mode = plat->suspend_mode;
+}
+
+unsigned long debug_uart_port_base = 0;
+EXPORT_SYMBOL(debug_uart_port_base);
+
+static int tegra_debug_uart_suspend(void)
+{
+ void __iomem *uart;
+ u32 lcr;
+
+ if (!debug_uart_port_base)
+ return 0;
+
+ uart = IO_ADDRESS(debug_uart_port_base);
+
+ lcr = readb(uart + UART_LCR * 4);
+
+ tegra_sctx.uart[0] = lcr;
+ tegra_sctx.uart[1] = readb(uart + UART_MCR * 4);
+
+ /* DLAB = 0 */
+ writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
+
+ tegra_sctx.uart[2] = readb(uart + UART_IER * 4);
+
+ /* DLAB = 1 */
+ writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
+
+ tegra_sctx.uart[3] = readb(uart + UART_DLL * 4);
+ tegra_sctx.uart[4] = readb(uart + UART_DLM * 4);
+
+ writeb(lcr, uart + UART_LCR * 4);
+
+ return 0;
+}
+
+static void tegra_debug_uart_resume(void)
+{
+ void __iomem *uart;
+ u32 lcr;
+
+ if (!debug_uart_port_base)
+ return;
+
+ uart = IO_ADDRESS(debug_uart_port_base);
+
+ lcr = tegra_sctx.uart[0];
+
+ writeb(tegra_sctx.uart[1], uart + UART_MCR * 4);
+
+ /* DLAB = 0 */
+ writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
+
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_01 | UART_FCR_R_TRIG_01,
+ uart + UART_FCR * 4);
+
+ writeb(tegra_sctx.uart[2], uart + UART_IER * 4);
+
+ /* DLAB = 1 */
+ writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
+
+ writeb(tegra_sctx.uart[3], uart + UART_DLL * 4);
+ writeb(tegra_sctx.uart[4], uart + UART_DLM * 4);
+
+ writeb(lcr, uart + UART_LCR * 4);
+}
+
+static struct syscore_ops tegra_debug_uart_syscore_ops = {
+ .suspend = tegra_debug_uart_suspend,
+ .resume = tegra_debug_uart_resume,
+};
+
+struct clk *debug_uart_clk = NULL;
+EXPORT_SYMBOL(debug_uart_clk);
+
+void tegra_console_uart_suspend(void)
+{
+ if (console_suspend_enabled && debug_uart_clk)
+ clk_disable(debug_uart_clk);
+}
+
+void tegra_console_uart_resume(void)
+{
+ if (console_suspend_enabled && debug_uart_clk)
+ clk_enable(debug_uart_clk);
+}
+
+static int tegra_debug_uart_syscore_init(void)
+{
+ register_syscore_ops(&tegra_debug_uart_syscore_ops);
+ return 0;
+}
+arch_initcall(tegra_debug_uart_syscore_init);
diff --git a/arch/arm/mach-tegra/pm.h b/arch/arm/mach-tegra/pm.h
new file mode 100644
index 000000000000..0ad1f24612cc
--- /dev/null
+++ b/arch/arm/mach-tegra/pm.h
@@ -0,0 +1,215 @@
+/*
+ * arch/arm/mach-tegra/include/mach/pm.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#ifndef _MACH_TEGRA_PM_H_
+#define _MACH_TEGRA_PM_H_
+
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/clkdev.h>
+
+#include <mach/iomap.h>
+
+enum tegra_suspend_mode {
+ TEGRA_SUSPEND_NONE = 0,
+ TEGRA_SUSPEND_LP2, /* CPU voltage off */
+ TEGRA_SUSPEND_LP1, /* CPU voltage off, DRAM self-refresh */
+ TEGRA_SUSPEND_LP0, /* CPU + core voltage off, DRAM self-refresh */
+ TEGRA_MAX_SUSPEND_MODE,
+};
+
+enum suspend_stage {
+ TEGRA_SUSPEND_BEFORE_PERIPHERAL,
+ TEGRA_SUSPEND_BEFORE_CPU,
+};
+
+enum resume_stage {
+ TEGRA_RESUME_AFTER_PERIPHERAL,
+ TEGRA_RESUME_AFTER_CPU,
+};
+
+struct tegra_suspend_platform_data {
+ unsigned long cpu_timer; /* CPU power good time in us, LP2/LP1 */
+ unsigned long cpu_off_timer; /* CPU power off time us, LP2/LP1 */
+ unsigned long core_timer; /* core power good time in ticks, LP0 */
+ unsigned long core_off_timer; /* core power off time ticks, LP0 */
+ bool corereq_high; /* Core power request active-high */
+ bool sysclkreq_high; /* System clock request is active-high */
+ bool combined_req; /* if core & CPU power requests are combined */
+ enum tegra_suspend_mode suspend_mode;
+ unsigned long cpu_lp2_min_residency; /* Min LP2 state residency in us */
+ void (*board_suspend)(int lp_state, enum suspend_stage stg);
+ /* lp_state = 0 for LP0 state, 1 for LP1 state, 2 for LP2 state */
+ void (*board_resume)(int lp_state, enum resume_stage stg);
+};
+
+/* Tegra io dpd entry - for each supported driver */
+struct tegra_io_dpd {
+ const char *name; /* driver name */
+ u8 io_dpd_reg_index; /* io dpd register index */
+ u8 io_dpd_bit; /* bit position for driver in dpd register */
+};
+
+unsigned long tegra_cpu_power_good_time(void);
+unsigned long tegra_cpu_power_off_time(void);
+unsigned long tegra_cpu_lp2_min_residency(void);
+void tegra_clear_cpu_in_lp2(int cpu);
+bool tegra_set_cpu_in_lp2(int cpu);
+
+int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags);
+
+#define FLOW_CTRL_CLUSTER_CONTROL \
+ (IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x2c)
+#define FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE (1<<3)
+#define FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER (1<<2)
+
+#define FUSE_SKU_DIRECT_CONFIG \
+ (IO_ADDRESS(TEGRA_FUSE_BASE) + 0x1F4)
+#define FUSE_SKU_DISABLE_ALL_CPUS (1<<5)
+#define FUSE_SKU_NUM_DISABLED_CPUS(x) (((x) >> 3) & 3)
+
+void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat);
+
+u64 tegra_rtc_read_ms(void);
+
+/*
+ * Callbacks for platform drivers to implement.
+ */
+extern void (*tegra_deep_sleep)(int);
+
+unsigned int tegra_idle_lp2_last(unsigned int us, unsigned int flags);
+
+#if defined(CONFIG_PM_SLEEP) && !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+void tegra_lp0_suspend_mc(void);
+void tegra_lp0_resume_mc(void);
+void tegra_lp0_cpu_mode(bool enter);
+#else
+static inline void tegra_lp0_suspend_mc(void) {}
+static inline void tegra_lp0_resume_mc(void) {}
+static inline void tegra_lp0_cpu_mode(bool enter) {}
+#endif
+
+#ifdef CONFIG_TEGRA_CLUSTER_CONTROL
+#define INSTRUMENT_CLUSTER_SWITCH 0 /* Should be zero for shipping code */
+#define DEBUG_CLUSTER_SWITCH 0 /* Should be zero for shipping code */
+#define PARAMETERIZE_CLUSTER_SWITCH 1 /* Should be zero for shipping code */
+
+static inline bool is_g_cluster_present(void)
+{
+ u32 fuse_sku = readl(FUSE_SKU_DIRECT_CONFIG);
+ if (fuse_sku & FUSE_SKU_DISABLE_ALL_CPUS)
+ return false;
+ return true;
+}
+static inline unsigned int is_lp_cluster(void)
+{
+ unsigned int reg;
+ reg = readl(FLOW_CTRL_CLUSTER_CONTROL);
+ return (reg & 1); /* 0 == G, 1 == LP*/
+}
+int tegra_cluster_control(unsigned int us, unsigned int flags);
+void tegra_cluster_switch_prolog(unsigned int flags);
+void tegra_cluster_switch_epilog(unsigned int flags);
+#else
+#define INSTRUMENT_CLUSTER_SWITCH 0 /* Must be zero for ARCH_TEGRA_2x_SOC */
+#define DEBUG_CLUSTER_SWITCH 0 /* Must be zero for ARCH_TEGRA_2x_SOC */
+#define PARAMETERIZE_CLUSTER_SWITCH 0 /* Must be zero for ARCH_TEGRA_2x_SOC */
+
+static inline bool is_g_cluster_present(void) { return true; }
+static inline unsigned int is_lp_cluster(void) { return 0; }
+static inline int tegra_cluster_control(unsigned int us, unsigned int flags)
+{
+ return -EPERM;
+}
+static inline void tegra_cluster_switch_prolog(unsigned int flags) {}
+static inline void tegra_cluster_switch_epilog(unsigned int flags) {}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void tegra2_lp0_suspend_init(void);
+void tegra2_lp2_set_trigger(unsigned long cycles);
+unsigned long tegra2_lp2_timer_remain(void);
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void tegra3_lp2_set_trigger(unsigned long cycles);
+unsigned long tegra3_lp2_timer_remain(void);
+#endif
+
+static inline void tegra_lp0_suspend_init(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra2_lp0_suspend_init();
+#endif
+}
+
+static inline void tegra_lp2_set_trigger(unsigned long cycles)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra2_lp2_set_trigger(cycles);
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ tegra3_lp2_set_trigger(cycles);
+#endif
+}
+
+static inline unsigned long tegra_lp2_timer_remain(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ return tegra2_lp2_timer_remain();
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ return tegra3_lp2_timer_remain();
+#endif
+}
+
+#if DEBUG_CLUSTER_SWITCH && 0 /* !!!FIXME!!! THIS IS BROKEN */
+extern unsigned int tegra_cluster_debug;
+#define DEBUG_CLUSTER(x) do { if (tegra_cluster_debug) printk x; } while (0)
+#else
+#define DEBUG_CLUSTER(x) do { } while (0)
+#endif
+#if PARAMETERIZE_CLUSTER_SWITCH
+void tegra_cluster_switch_set_parameters(unsigned int us, unsigned int flags);
+#else
+static inline void tegra_cluster_switch_set_parameters(
+ unsigned int us, unsigned int flags)
+{ }
+#endif
+
+#ifdef CONFIG_SMP
+extern bool tegra_all_cpus_booted __read_mostly;
+#else
+#define tegra_all_cpus_booted (true)
+#endif
+
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+void tegra_generic_smc(u32 type, u32 subtype, u32 arg);
+#endif
+
+/* The debug channel uart base physical address */
+extern unsigned long debug_uart_port_base;
+
+extern struct clk *debug_uart_clk;
+void tegra_console_uart_suspend(void);
+void tegra_console_uart_resume(void);
+
+#endif /* _MACH_TEGRA_PM_H_ */
diff --git a/arch/arm/mach-tegra/powerdetect.c b/arch/arm/mach-tegra/powerdetect.c
new file mode 100644
index 000000000000..6a2a57794589
--- /dev/null
+++ b/arch/arm/mach-tegra/powerdetect.c
@@ -0,0 +1,348 @@
+/*
+ * arch/arm/mach-tegra/powerdetect.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/notifier.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/iomap.h>
+
+#include "board.h"
+#include "fuse.h"
+
+#define PMC_PWR_IO_DISABLE 0x44
+#define PMC_PWR_DET_ENABLE 0x48
+#define PMC_PWR_DET_LATCH 0x4C
+#define PMC_PWR_DET_VAL 0xE4
+
+struct pwr_detect_cell {
+ const char *reg_id;
+ u32 pwrdet_mask;
+ u32 pwrio_mask;
+ u32 package_mask;
+
+ struct notifier_block regulator_nb;
+};
+
+static bool pwrdet_rails_found;
+static bool pwrdet_always_on;
+static bool pwrio_always_on;
+static u32 pwrdet_val;
+static u32 pwrio_val;
+static u32 pwrio_disabled_mask;
+
+static DEFINE_SPINLOCK(pwr_lock);
+
+static void __iomem *pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+
+static inline void pmc_writel(u32 val, unsigned long addr)
+{
+ writel(val, (u32)pmc_base + addr);
+}
+static inline u32 pmc_readl(unsigned long addr)
+{
+ return readl((u32)pmc_base + addr);
+}
+
+
+#define POWER_CELL(_reg_id, _pwrdet_mask, _pwrio_mask, _package_mask) \
+ { \
+ .reg_id = _reg_id, \
+ .pwrdet_mask = _pwrdet_mask, \
+ .pwrio_mask = _pwrio_mask, \
+ .package_mask = _package_mask, \
+ }
+
+/* Some IO pads does not have power detect cells, but still can/should be
+ * turned off when no power - set pwrdet_mask=0 for such pads */
+static struct pwr_detect_cell pwr_detect_cells[] = {
+ POWER_CELL("pwrdet_nand", (0x1 << 1), (0x1 << 1), 0xFFFFFFFF),
+ POWER_CELL("pwrdet_uart", (0x1 << 2), (0x1 << 2), 0xFFFFFFFF),
+ POWER_CELL("pwrdet_bb", (0x1 << 3), (0x1 << 3), 0xFFFFFFFF),
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ POWER_CELL("pwrdet_vi", 0, (0x1 << 4), 0xFFFFFFFF),
+#else
+ /* Tegra3 VI is connected on MID package only (id = 1, mask = 0x2) */
+ POWER_CELL("pwrdet_vi", 0, (0x1 << 4), 0x00000002),
+#endif
+ POWER_CELL("pwrdet_audio", (0x1 << 5), (0x1 << 5), 0xFFFFFFFF),
+ POWER_CELL("pwrdet_lcd", (0x1 << 6), (0x1 << 6), 0xFFFFFFFF),
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ POWER_CELL("pwrdet_sd", 0, (0x1 << 8), 0xFFFFFFFF),
+#endif
+ POWER_CELL("pwrdet_mipi", 0, (0x1 << 9), 0xFFFFFFFF),
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ POWER_CELL("pwrdet_cam", (0x1 << 10), (0x1 << 10), 0xFFFFFFFF),
+ POWER_CELL("pwrdet_pex_ctl", (0x1 << 11), (0x1 << 11), 0xFFFFFFFF),
+ POWER_CELL("pwrdet_sdmmc1", (0x1 << 12), (0x1 << 12), 0xFFFFFFFF),
+ POWER_CELL("pwrdet_sdmmc3", (0x1 << 13), (0x1 << 13), 0xFFFFFFFF),
+ POWER_CELL("pwrdet_sdmmc4", 0, (0x1 << 14), 0xFFFFFFFF),
+#endif
+};
+
+static void pwr_detect_reset(u32 pwrdet_mask)
+{
+ pmc_writel(pwrdet_mask, PMC_PWR_DET_ENABLE);
+ barrier();
+ pmc_writel(pwrdet_mask, PMC_PWR_DET_VAL);
+
+ pmc_readl(PMC_PWR_DET_VAL);
+ pmc_writel(0, PMC_PWR_DET_ENABLE);
+}
+
+static void pwr_detect_start(u32 pwrdet_mask)
+{
+ pmc_writel(pwrdet_mask, PMC_PWR_DET_ENABLE);
+ udelay(4);
+
+ pmc_writel(1, PMC_PWR_DET_LATCH);
+ pmc_readl(PMC_PWR_DET_LATCH);
+}
+
+static void pwr_detect_latch(void)
+{
+ pmc_writel(0, PMC_PWR_DET_LATCH);
+
+ pmc_readl(PMC_PWR_DET_VAL);
+ pmc_writel(0, PMC_PWR_DET_ENABLE);
+}
+
+static void pwr_io_enable(u32 pwrio_mask)
+{
+ u32 val = pmc_readl(PMC_PWR_IO_DISABLE);
+ val &= ~pwrio_mask;
+ pmc_writel(val, PMC_PWR_IO_DISABLE);
+}
+
+static void pwr_io_disable(u32 pwrio_mask)
+{
+ u32 val = pmc_readl(PMC_PWR_IO_DISABLE);
+ val |= pwrio_mask;
+ pmc_writel(val, PMC_PWR_IO_DISABLE);
+}
+
+static int pwrdet_always_on_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pwr_lock, flags);
+
+ ret = param_set_bool(arg, kp);
+ if (ret) {
+ spin_unlock_irqrestore(&pwr_lock, flags);
+ return ret;
+ }
+
+ if (pwrdet_always_on)
+ pwr_detect_start(0xFFFFFFFF);
+ else
+ pwr_detect_latch();
+
+ spin_unlock_irqrestore(&pwr_lock, flags);
+ return 0;
+}
+
+static int pwrio_always_on_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pwr_lock, flags);
+
+ ret = param_set_bool(arg, kp);
+ if (ret) {
+ spin_unlock_irqrestore(&pwr_lock, flags);
+ return ret;
+ }
+
+ if (pwrio_always_on)
+ pwr_io_enable(0xFFFFFFFF);
+ else
+ pwr_io_disable(pwrio_disabled_mask);
+
+ spin_unlock_irqrestore(&pwr_lock, flags);
+ return 0;
+}
+
+static int pwrdet_always_on_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_bool(buffer, kp);
+}
+
+static struct kernel_param_ops pwrdet_always_on_ops = {
+ .set = pwrdet_always_on_set,
+ .get = pwrdet_always_on_get,
+};
+static struct kernel_param_ops pwrio_always_on_ops = {
+ .set = pwrio_always_on_set,
+ .get = pwrdet_always_on_get,
+};
+module_param_cb(pwrdet_always_on, &pwrdet_always_on_ops,
+ &pwrdet_always_on, 0644);
+module_param_cb(pwrio_always_on, &pwrio_always_on_ops,
+ &pwrio_always_on, 0644);
+
+static int pwrdet_val_get(char *buffer, const struct kernel_param *kp)
+{
+ pwrdet_val = pmc_readl(PMC_PWR_DET_VAL);
+ return param_get_ulong(buffer, kp);
+}
+static struct kernel_param_ops pwrdet_val_ops = {
+ .get = pwrdet_val_get,
+};
+module_param_cb(pwrdet_val, &pwrdet_val_ops, &pwrdet_val, 0444);
+
+static int pwrio_val_get(char *buffer, const struct kernel_param *kp)
+{
+ pwrio_val = pmc_readl(PMC_PWR_IO_DISABLE);
+ return param_get_ulong(buffer, kp);
+}
+static struct kernel_param_ops pwrio_val_ops = {
+ .get = pwrio_val_get,
+};
+module_param_cb(pwrio_val, &pwrio_val_ops, &pwrio_val, 0444);
+
+
+static int pwrdet_notify_cb(
+ struct notifier_block *nb, unsigned long event, void *v)
+{
+ unsigned long flags;
+ struct pwr_detect_cell *cell;
+
+ if (!pwrdet_rails_found)
+ return NOTIFY_OK;
+
+ cell = container_of(nb, struct pwr_detect_cell, regulator_nb);
+
+ spin_lock_irqsave(&pwr_lock, flags);
+
+ if (event & REGULATOR_EVENT_PRE_ENABLE) {
+ pwrio_disabled_mask &= ~cell->pwrio_mask;
+ if (!pwrio_always_on)
+ pwr_io_enable(cell->pwrio_mask);
+ }
+ if (event & (REGULATOR_EVENT_PRE_ENABLE |
+ REGULATOR_EVENT_OUT_PRECHANGE)) {
+ if (!pwrdet_always_on && cell->pwrdet_mask)
+ pwr_detect_reset(cell->pwrdet_mask);
+ }
+ if (event & (REGULATOR_EVENT_POST_ENABLE |
+ REGULATOR_EVENT_OUT_POSTCHANGE)) {
+ if (!pwrdet_always_on && cell->pwrdet_mask) {
+ pwr_detect_start(cell->pwrdet_mask);
+ pwr_detect_latch();
+ }
+ }
+ if (event & (REGULATOR_EVENT_DISABLE |
+ REGULATOR_EVENT_FORCE_DISABLE)) {
+ pwrio_disabled_mask |= cell->pwrio_mask;
+ if (!pwrio_always_on)
+ pwr_io_disable(cell->pwrio_mask);
+ }
+
+ pr_debug("tegra: %s: event %lu, pwrdet 0x%x, pwrio 0x%x\n",
+ cell->reg_id, event,
+ pmc_readl(PMC_PWR_DET_VAL), pmc_readl(PMC_PWR_IO_DISABLE));
+ spin_unlock_irqrestore(&pwr_lock, flags);
+
+ return NOTIFY_OK;
+}
+
+static int __init pwr_detect_cell_init_one(
+ struct pwr_detect_cell *cell, u32 *disabled_mask)
+{
+ int ret;
+ struct regulator *regulator = regulator_get(NULL, cell->reg_id);
+
+ if (IS_ERR(regulator))
+ return PTR_ERR(regulator);
+
+ cell->regulator_nb.notifier_call = pwrdet_notify_cb;
+ ret = regulator_register_notifier(regulator, &cell->regulator_nb);
+ if (ret) {
+ regulator_put(regulator);
+ return ret;
+ }
+
+ if (!regulator_is_enabled(regulator))
+ *disabled_mask |= cell->pwrio_mask;
+
+ regulator_put(regulator);
+ return 0;
+}
+
+int __init tegra_pwr_detect_cell_init(void)
+{
+ int i, ret;
+ u32 package_mask;
+ unsigned long flags;
+ bool rails_found = true;
+
+ i = tegra_package_id();
+ if ((i != -1) && (i & (~0x1F))) {
+ pr_err("tegra: not supported package id %d - io power detection"
+ " is left always on\n", i);
+ return 0;
+ }
+ package_mask = (i == -1) ? i : (0x1 << i);
+
+ for (i = 0; i < ARRAY_SIZE(pwr_detect_cells); i++) {
+ struct pwr_detect_cell *cell = &pwr_detect_cells[i];
+
+ if (!(cell->package_mask & package_mask)) {
+ pwrio_disabled_mask |= cell->pwrio_mask;
+ continue;
+ }
+
+ ret = pwr_detect_cell_init_one(cell, &pwrio_disabled_mask);
+ if (ret) {
+ pr_err("tegra: failed to map regulator to power detect"
+ " cell %s(%d)\n", cell->reg_id, ret);
+ rails_found = false;
+ }
+ }
+
+ if (!rails_found) {
+ pr_err("tegra: failed regulators mapping - io power detection"
+ " is left always on\n");
+ return 0;
+ }
+ pwrdet_rails_found = true;
+
+ /* Latch initial i/o power levels, disable all detection cells
+ and not powered interfaces */
+ spin_lock_irqsave(&pwr_lock, flags);
+ if (!pwrdet_always_on)
+ pwr_detect_latch();
+ if (!pwrio_always_on)
+ pwr_io_disable(pwrio_disabled_mask);
+ spin_unlock_irqrestore(&pwr_lock, flags);
+
+ pr_info("tegra: started io power detection dynamic control\n");
+ pr_info("tegra: NO_IO_POWER setting 0x%x\n", pwrio_disabled_mask);
+
+ return 0;
+}
+
+fs_initcall(tegra_pwr_detect_cell_init);
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c
index 3cee9aa1f2c8..8efdf44dd3e8 100644
--- a/arch/arm/mach-tegra/powergate.c
+++ b/arch/arm/mach-tegra/powergate.c
@@ -2,6 +2,7 @@
* drivers/powergate/tegra-powergate.c
*
* Copyright (c) 2010 Google, Inc
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -31,15 +32,135 @@
#include <mach/iomap.h>
#include <mach/powergate.h>
+#include "clock.h"
+
#define PWRGATE_TOGGLE 0x30
-#define PWRGATE_TOGGLE_START (1 << 8)
+#define PWRGATE_TOGGLE_START (1 << 8)
#define REMOVE_CLAMPING 0x34
#define PWRGATE_STATUS 0x38
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+enum mc_client {
+ MC_CLIENT_AFI = 0,
+ MC_CLIENT_AVPC = 1,
+ MC_CLIENT_DC = 2,
+ MC_CLIENT_DCB = 3,
+ MC_CLIENT_EPP = 4,
+ MC_CLIENT_G2 = 5,
+ MC_CLIENT_HC = 6,
+ MC_CLIENT_HDA = 7,
+ MC_CLIENT_ISP = 8,
+ MC_CLIENT_MPCORE = 9,
+ MC_CLIENT_MPCORELP = 10,
+ MC_CLIENT_MPE = 11,
+ MC_CLIENT_NV = 12,
+ MC_CLIENT_NV2 = 13,
+ MC_CLIENT_PPCS = 14,
+ MC_CLIENT_SATA = 15,
+ MC_CLIENT_VDE = 16,
+ MC_CLIENT_VI = 17,
+ MC_CLIENT_LAST = -1,
+};
+#else
+enum mc_client {
+ MC_CLIENT_AVPC = 0,
+ MC_CLIENT_DC = 1,
+ MC_CLIENT_DCB = 2,
+ MC_CLIENT_EPP = 3,
+ MC_CLIENT_G2 = 4,
+ MC_CLIENT_HC = 5,
+ MC_CLIENT_ISP = 6,
+ MC_CLIENT_MPCORE = 7,
+ MC_CLIENT_MPEA = 8,
+ MC_CLIENT_MPEB = 9,
+ MC_CLIENT_MPEC = 10,
+ MC_CLIENT_NV = 11,
+ MC_CLIENT_PPCS = 12,
+ MC_CLIENT_VDE = 13,
+ MC_CLIENT_VI = 14,
+ MC_CLIENT_LAST = -1,
+ MC_CLIENT_AFI = MC_CLIENT_LAST,
+};
+#endif
+
+#define MAX_CLK_EN_NUM 4
+
static DEFINE_SPINLOCK(tegra_powergate_lock);
+#define MAX_HOTRESET_CLIENT_NUM 4
+
+enum clk_type {
+ CLK_AND_RST,
+ RST_ONLY,
+ CLK_ONLY,
+};
+
+struct partition_clk_info {
+ const char *clk_name;
+ enum clk_type clk_type;
+ /* true if clk is only used in assert/deassert reset and not while enable-den*/
+ struct clk *clk_ptr;
+};
+
+struct powergate_partition {
+ const char *name;
+ enum mc_client hot_reset_clients[MAX_HOTRESET_CLIENT_NUM];
+ struct partition_clk_info clk_info[MAX_CLK_EN_NUM];
+};
+
+static struct powergate_partition powergate_partition_info[TEGRA_NUM_POWERGATE] = {
+ [TEGRA_POWERGATE_CPU] = { "cpu0", {MC_CLIENT_LAST}, },
+ [TEGRA_POWERGATE_L2] = { "l2", {MC_CLIENT_LAST}, },
+ [TEGRA_POWERGATE_3D] = { "3d0",
+ {MC_CLIENT_NV, MC_CLIENT_LAST},
+ {{"3d", CLK_AND_RST} }, },
+ [TEGRA_POWERGATE_PCIE] = { "pcie",
+ {MC_CLIENT_AFI, MC_CLIENT_LAST},
+ {{"afi", CLK_AND_RST},
+ {"pcie", CLK_AND_RST},
+ {"pciex", RST_ONLY} }, },
+ [TEGRA_POWERGATE_VDEC] = { "vde",
+ {MC_CLIENT_VDE, MC_CLIENT_LAST},
+ {{"vde", CLK_AND_RST} }, },
+ [TEGRA_POWERGATE_MPE] = { "mpe",
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ {MC_CLIENT_MPE, MC_CLIENT_LAST},
+#else
+ {MC_CLIENT_MPEA, MC_CLIENT_MPEB,
+ MC_CLIENT_MPEC, MC_CLIENT_LAST},
+#endif
+ {{"mpe", CLK_AND_RST} }, },
+ [TEGRA_POWERGATE_VENC] = { "ve",
+ {MC_CLIENT_ISP, MC_CLIENT_VI, MC_CLIENT_LAST},
+ {{"isp", CLK_AND_RST},
+ {"vi", CLK_AND_RST},
+ {"csi", CLK_AND_RST} }, },
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ [TEGRA_POWERGATE_CPU1] = { "cpu1", {MC_CLIENT_LAST}, },
+ [TEGRA_POWERGATE_CPU2] = { "cpu2", {MC_CLIENT_LAST}, },
+ [TEGRA_POWERGATE_CPU3] = { "cpu3", {MC_CLIENT_LAST}, },
+ [TEGRA_POWERGATE_CELP] = { "celp", {MC_CLIENT_LAST}, },
+ [TEGRA_POWERGATE_SATA] = { "sata", {MC_CLIENT_SATA, MC_CLIENT_LAST},
+ {{"sata", CLK_AND_RST},
+ {"sata_oob", CLK_AND_RST},
+ {"cml1", CLK_ONLY},
+ {"sata_cold", RST_ONLY} }, },
+ [TEGRA_POWERGATE_3D1] = { "3d1",
+ {MC_CLIENT_NV2, MC_CLIENT_LAST},
+ {{"3d2", CLK_AND_RST} }, },
+ [TEGRA_POWERGATE_HEG] = { "heg",
+ {MC_CLIENT_G2, MC_CLIENT_EPP,
+ MC_CLIENT_HC,
+ MC_CLIENT_LAST},
+ {{"2d", CLK_AND_RST},
+ {"epp", CLK_AND_RST},
+ {"host1x", CLK_AND_RST},
+ {"3d", RST_ONLY} }, },
+#endif
+};
+
static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
static u32 pmc_read(unsigned long reg)
@@ -52,40 +173,306 @@ static void pmc_write(u32 val, unsigned long reg)
writel(val, pmc + reg);
}
+static void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
+
+static u32 mc_read(unsigned long reg)
+{
+ return readl(mc + reg);
+}
+
+static void mc_write(u32 val, unsigned long reg)
+{
+ writel(val, mc + reg);
+}
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
+#define MC_CLIENT_HOTRESET_CTRL 0x200
+#define MC_CLIENT_HOTRESET_STAT 0x204
+
+static void mc_flush(int id)
+{
+ u32 idx, rst_ctrl, rst_stat;
+ enum mc_client mcClientBit;
+ unsigned long flags;
+
+ BUG_ON(id < 0 || id >= TEGRA_NUM_POWERGATE);
+
+ for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+ mcClientBit = powergate_partition_info[id].hot_reset_clients[idx];
+ if (mcClientBit == MC_CLIENT_LAST)
+ break;
+
+ spin_lock_irqsave(&tegra_powergate_lock, flags);
+ rst_ctrl = mc_read(MC_CLIENT_HOTRESET_CTRL);
+ rst_ctrl |= (1 << mcClientBit);
+ mc_write(rst_ctrl, MC_CLIENT_HOTRESET_CTRL);
+
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+
+ do {
+ udelay(10);
+ rst_stat = mc_read(MC_CLIENT_HOTRESET_STAT);
+ } while (!(rst_stat & (1 << mcClientBit)));
+ }
+}
+
+static void mc_flush_done(int id)
+{
+ u32 idx, rst_ctrl;
+ enum mc_client mcClientBit;
+ unsigned long flags;
+
+ BUG_ON(id < 0 || id >= TEGRA_NUM_POWERGATE);
+
+ for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+ mcClientBit = powergate_partition_info[id].hot_reset_clients[idx];
+ if (mcClientBit == MC_CLIENT_LAST)
+ break;
+
+ spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+ rst_ctrl = mc_read(MC_CLIENT_HOTRESET_CTRL);
+ rst_ctrl &= ~(1 << mcClientBit);
+ mc_write(rst_ctrl, MC_CLIENT_HOTRESET_CTRL);
+
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+ }
+
+ wmb();
+}
+
+int tegra_powergate_mc_flush(int id)
+{
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+ mc_flush(id);
+ return 0;
+}
+
+int tegra_powergate_mc_flush_done(int id)
+{
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+ mc_flush_done(id);
+ return 0;
+}
+
+int tegra_powergate_mc_disable(int id)
+{
+ return 0;
+}
+
+int tegra_powergate_mc_enable(int id)
+{
+ return 0;
+}
+
+#else
+
+#define MC_CLIENT_CTRL 0x100
+#define MC_CLIENT_HOTRESETN 0x104
+#define MC_CLIENT_ORRC_BASE 0x140
+
+int tegra_powergate_mc_disable(int id)
+{
+ u32 idx, clt_ctrl, orrc_reg;
+ enum mc_client mcClientBit;
+ unsigned long flags;
+
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+ mcClientBit =
+ powergate_partition_info[id].hot_reset_clients[idx];
+ if (mcClientBit == MC_CLIENT_LAST)
+ break;
+
+ spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+ /* clear client enable bit */
+ clt_ctrl = mc_read(MC_CLIENT_CTRL);
+ clt_ctrl &= ~(1 << mcClientBit);
+ mc_write(clt_ctrl, MC_CLIENT_CTRL);
+
+ /* read back to flush write */
+ clt_ctrl = mc_read(MC_CLIENT_CTRL);
+
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+
+ /* wait for outstanding requests to reach 0 */
+ orrc_reg = MC_CLIENT_ORRC_BASE + (mcClientBit * 4);
+ while (mc_read(orrc_reg) != 0)
+ udelay(10);
+ }
+ return 0;
+}
+
+int tegra_powergate_mc_flush(int id)
+{
+ u32 idx, hot_rstn;
+ enum mc_client mcClientBit;
+ unsigned long flags;
+
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+ mcClientBit =
+ powergate_partition_info[id].hot_reset_clients[idx];
+ if (mcClientBit == MC_CLIENT_LAST)
+ break;
+
+ spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+ /* assert hotreset (client module is currently in reset) */
+ hot_rstn = mc_read(MC_CLIENT_HOTRESETN);
+ hot_rstn &= ~(1 << mcClientBit);
+ mc_write(hot_rstn, MC_CLIENT_HOTRESETN);
+
+ /* read back to flush write */
+ hot_rstn = mc_read(MC_CLIENT_HOTRESETN);
+
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+ }
+ return 0;
+}
+
+int tegra_powergate_mc_flush_done(int id)
+{
+ u32 idx, hot_rstn;
+ enum mc_client mcClientBit;
+ unsigned long flags;
+
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+ mcClientBit =
+ powergate_partition_info[id].hot_reset_clients[idx];
+ if (mcClientBit == MC_CLIENT_LAST)
+ break;
+
+ spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+ /* deassert hotreset */
+ hot_rstn = mc_read(MC_CLIENT_HOTRESETN);
+ hot_rstn |= (1 << mcClientBit);
+ mc_write(hot_rstn, MC_CLIENT_HOTRESETN);
+
+ /* read back to flush write */
+ hot_rstn = mc_read(MC_CLIENT_HOTRESETN);
+
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+ }
+ return 0;
+}
+
+int tegra_powergate_mc_enable(int id)
+{
+ u32 idx, clt_ctrl;
+ enum mc_client mcClientBit;
+ unsigned long flags;
+
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+ mcClientBit =
+ powergate_partition_info[id].hot_reset_clients[idx];
+ if (mcClientBit == MC_CLIENT_LAST)
+ break;
+
+ spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+ /* enable client */
+ clt_ctrl = mc_read(MC_CLIENT_CTRL);
+ clt_ctrl |= (1 << mcClientBit);
+ mc_write(clt_ctrl, MC_CLIENT_CTRL);
+
+ /* read back to flush write */
+ clt_ctrl = mc_read(MC_CLIENT_CTRL);
+
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+ }
+ return 0;
+}
+
+static void mc_flush(int id) {}
+static void mc_flush_done(int id) {}
+#endif
+
static int tegra_powergate_set(int id, bool new_state)
{
bool status;
unsigned long flags;
+ /* 10us timeout for toggle operation if it takes affect*/
+ int toggle_timeout = 10;
+ /* 100 * 10 = 1000us timeout for toggle command to take affect in case
+ of contention with h/w initiated CPU power gating */
+ int contention_timeout = 100;
spin_lock_irqsave(&tegra_powergate_lock, flags);
- status = pmc_read(PWRGATE_STATUS) & (1 << id);
+ status = !!(pmc_read(PWRGATE_STATUS) & (1 << id));
if (status == new_state) {
spin_unlock_irqrestore(&tegra_powergate_lock, flags);
- return -EINVAL;
+ return 0;
+ }
+
+ if (TEGRA_IS_CPU_POWERGATE_ID(id)) {
+ /* CPU ungated in s/w only during boot/resume with outer
+ waiting loop and no contention from other CPUs */
+ pmc_write(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+ return 0;
}
- pmc_write(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+ do {
+ pmc_write(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+ do {
+ udelay(1);
+ status = !!(pmc_read(PWRGATE_STATUS) & (1 << id));
+
+ toggle_timeout--;
+ } while ((status != new_state) && (toggle_timeout > 0));
+
+ contention_timeout--;
+ } while ((status != new_state) && (contention_timeout > 0));
spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+ if (status != new_state) {
+ WARN(1, "Could not set powergate %d to %d", id, new_state);
+ return -EBUSY;
+ }
+
return 0;
}
-int tegra_powergate_power_on(int id)
+static int unpowergate_module(int id)
{
if (id < 0 || id >= TEGRA_NUM_POWERGATE)
return -EINVAL;
-
return tegra_powergate_set(id, true);
}
-int tegra_powergate_power_off(int id)
+static int powergate_module(int id)
{
if (id < 0 || id >= TEGRA_NUM_POWERGATE)
return -EINVAL;
+ mc_flush(id);
return tegra_powergate_set(id, false);
}
@@ -94,7 +481,7 @@ bool tegra_powergate_is_powered(int id)
u32 status;
if (id < 0 || id >= TEGRA_NUM_POWERGATE)
- return -EINVAL;
+ return false;
status = pmc_read(PWRGATE_STATUS) & (1 << id);
return !!status;
@@ -103,17 +490,16 @@ bool tegra_powergate_is_powered(int id)
int tegra_powergate_remove_clamping(int id)
{
u32 mask;
-
if (id < 0 || id >= TEGRA_NUM_POWERGATE)
return -EINVAL;
/*
- * Tegra 2 has a bug where PCIE and VDE clamping masks are
- * swapped relatively to the partition ids
+ * PCIE and VDE clamping masks are swapped with respect to their
+ * partition ids
*/
if (id == TEGRA_POWERGATE_VDEC)
mask = (1 << TEGRA_POWERGATE_PCIE);
- else if (id == TEGRA_POWERGATE_PCIE)
+ else if (id == TEGRA_POWERGATE_PCIE)
mask = (1 << TEGRA_POWERGATE_VDEC);
else
mask = (1 << id);
@@ -123,20 +509,184 @@ int tegra_powergate_remove_clamping(int id)
return 0;
}
-/* Must be called with clk disabled, and returns with clk enabled */
-int tegra_powergate_sequence_power_up(int id, struct clk *clk)
+static void get_clk_info(int id)
+{
+ int idx;
+
+ for (idx = 0; idx < MAX_CLK_EN_NUM; idx++) {
+ if (!powergate_partition_info[id].clk_info[idx].clk_name)
+ break;
+ powergate_partition_info[id].
+ clk_info[idx].clk_ptr =
+ tegra_get_clock_by_name(
+ powergate_partition_info[id].clk_info[idx].clk_name);
+ }
+}
+
+static int partition_clk_enable(int id)
+{
+ int ret;
+ u32 idx;
+ struct clk *clk;
+ struct partition_clk_info *clk_info;
+
+ BUG_ON(id < 0 || id >= TEGRA_NUM_POWERGATE);
+
+ for (idx = 0; idx < MAX_CLK_EN_NUM; idx++) {
+ clk_info = &powergate_partition_info[id].clk_info[idx];
+ clk = clk_info->clk_ptr;
+ if (!clk)
+ break;
+
+ if (clk_info->clk_type != RST_ONLY) {
+ ret = clk_enable(clk);
+ if (ret)
+ goto err_clk_en;
+ }
+ }
+
+ return 0;
+
+err_clk_en:
+ WARN(1, "Could not enable clk %s", clk->name);
+ while (idx--) {
+ clk_info = &powergate_partition_info[id].clk_info[idx];
+ if (clk_info->clk_type != RST_ONLY)
+ clk_disable(clk_info->clk_ptr);
+ }
+
+ return ret;
+}
+
+static int is_partition_clk_disabled(int id)
+{
+ u32 idx;
+ struct clk *clk;
+ struct partition_clk_info *clk_info;
+ int ret = 0;
+
+ BUG_ON(id < 0 || id >= TEGRA_NUM_POWERGATE);
+
+ for (idx = 0; idx < MAX_CLK_EN_NUM; idx++) {
+ clk_info = &powergate_partition_info[id].clk_info[idx];
+ clk = clk_info->clk_ptr;
+ if (!clk)
+ break;
+
+ if (clk_info->clk_type != RST_ONLY) {
+ if (tegra_is_clk_enabled(clk)) {
+ ret = -1;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void partition_clk_disable(int id)
+{
+ u32 idx;
+ struct clk *clk;
+ struct partition_clk_info *clk_info;
+
+ BUG_ON(id < 0 || id >= TEGRA_NUM_POWERGATE);
+
+ for (idx = 0; idx < MAX_CLK_EN_NUM; idx++) {
+ clk_info = &powergate_partition_info[id].clk_info[idx];
+ clk = clk_info->clk_ptr;
+ if (!clk)
+ break;
+
+ if (clk_info->clk_type != RST_ONLY)
+ clk_disable(clk);
+ }
+}
+
+static void powergate_partition_assert_reset(int id)
+{
+ u32 idx;
+ struct clk *clk_ptr;
+ struct partition_clk_info *clk_info;
+
+ BUG_ON(id < 0 || id >= TEGRA_NUM_POWERGATE);
+
+ for (idx = 0; idx < MAX_CLK_EN_NUM; idx++) {
+ clk_info = &powergate_partition_info[id].clk_info[idx];
+ clk_ptr = clk_info->clk_ptr;
+ if (!clk_ptr)
+ break;
+ if (clk_info->clk_type != CLK_ONLY)
+ tegra_periph_reset_assert(clk_ptr);
+ }
+}
+
+static void powergate_partition_deassert_reset(int id)
+{
+ u32 idx;
+ struct clk *clk_ptr;
+ struct partition_clk_info *clk_info;
+
+ BUG_ON(id < 0 || id >= TEGRA_NUM_POWERGATE);
+
+ for (idx = 0; idx < MAX_CLK_EN_NUM; idx++) {
+ clk_info = &powergate_partition_info[id].clk_info[idx];
+ clk_ptr = clk_info->clk_ptr;
+ if (!clk_ptr)
+ break;
+ if (clk_info->clk_type != CLK_ONLY)
+ tegra_periph_reset_deassert(clk_ptr);
+ }
+}
+
+/* Must be called with clk disabled, and returns with clk disabled */
+static int tegra_powergate_reset_module(int id)
+{
+ int ret;
+
+ powergate_partition_assert_reset(id);
+
+ udelay(10);
+
+ ret = partition_clk_enable(id);
+ if (ret)
+ return ret;
+
+ udelay(10);
+
+ powergate_partition_deassert_reset(id);
+
+ partition_clk_disable(id);
+
+ return 0;
+}
+
+/*
+ * Must be called with clk disabled, and returns with clk disabled
+ * Drivers should enable clks for partition. Unpowergates only the
+ * partition.
+ */
+int tegra_unpowergate_partition(int id)
{
int ret;
- tegra_periph_reset_assert(clk);
+ /* If first clk_ptr is null, fill clk info for the partition */
+ if (!powergate_partition_info[id].clk_info[0].clk_ptr)
+ get_clk_info(id);
- ret = tegra_powergate_power_on(id);
+ if (tegra_powergate_is_powered(id))
+ return tegra_powergate_reset_module(id);
+
+ ret = unpowergate_module(id);
if (ret)
goto err_power;
- ret = clk_enable(clk);
+ powergate_partition_assert_reset(id);
+
+ /* Un-Powergating fails if all clks are not enabled */
+ ret = partition_clk_enable(id);
if (ret)
- goto err_clk;
+ goto err_clk_on;
udelay(10);
@@ -145,29 +695,130 @@ int tegra_powergate_sequence_power_up(int id, struct clk *clk)
goto err_clamp;
udelay(10);
- tegra_periph_reset_deassert(clk);
+ powergate_partition_deassert_reset(id);
+
+ mc_flush_done(id);
+
+ /* Disable all clks enabled earlier. Drivers should enable clks */
+ partition_clk_disable(id);
return 0;
err_clamp:
- clk_disable(clk);
-err_clk:
- tegra_powergate_power_off(id);
+ partition_clk_disable(id);
+err_clk_on:
+ powergate_module(id);
err_power:
+ WARN(1, "Could not Un-Powergate %d", id);
return ret;
}
-#ifdef CONFIG_DEBUG_FS
+/*
+ * Must be called with clk disabled, and returns with clk enabled
+ * Unpowergates the partition and enables all required clks.
+ */
+int tegra_unpowergate_partition_with_clk_on(int id)
+{
+ int ret = 0;
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Restrict this functions use to few partitions */
+ BUG_ON(id != TEGRA_POWERGATE_SATA && id != TEGRA_POWERGATE_PCIE);
+#else
+ /* Restrict this functions use to few partitions */
+ BUG_ON(id != TEGRA_POWERGATE_PCIE);
+#endif
-static const char * const powergate_name[] = {
- [TEGRA_POWERGATE_CPU] = "cpu",
- [TEGRA_POWERGATE_3D] = "3d",
- [TEGRA_POWERGATE_VENC] = "venc",
- [TEGRA_POWERGATE_VDEC] = "vdec",
- [TEGRA_POWERGATE_PCIE] = "pcie",
- [TEGRA_POWERGATE_L2] = "l2",
- [TEGRA_POWERGATE_MPE] = "mpe",
-};
+ ret = tegra_unpowergate_partition(id);
+ if (ret)
+ goto err_unpowergating;
+
+ /* Enable clks for the partition */
+ ret = partition_clk_enable(id);
+ if (ret)
+ goto err_unpowergate_clk;
+
+ return ret;
+
+err_unpowergate_clk:
+ tegra_powergate_partition(id);
+ WARN(1, "Could not Un-Powergate %d, err in enabling clk", id);
+err_unpowergating:
+ WARN(1, "Could not Un-Powergate %d", id);
+ return ret;
+}
+
+/*
+ * Must be called with clk disabled. Powergates the partition only
+ */
+int tegra_powergate_partition(int id)
+{
+ int ret;
+
+ /* If first clk_ptr is null, fill clk info for the partition */
+ if (powergate_partition_info[id].clk_info[0].clk_ptr)
+ get_clk_info(id);
+ powergate_partition_assert_reset(id);
+
+ /* Powergating is done only if refcnt of all clks is 0 */
+ ret = is_partition_clk_disabled(id);
+ if (ret)
+ goto err_clk_off;
+
+ ret = powergate_module(id);
+ if (ret)
+ goto err_power_off;
+
+ return 0;
+
+err_power_off:
+ WARN(1, "Could not Powergate Partition %d", id);
+err_clk_off:
+ WARN(1, "Could not Powergate Partition %d, all clks not disabled", id);
+ return ret;
+}
+
+int tegra_powergate_partition_with_clk_off(int id)
+{
+ int ret = 0;
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Restrict functions use to selected partitions */
+ BUG_ON(id != TEGRA_POWERGATE_PCIE && id != TEGRA_POWERGATE_SATA);
+#else
+ /* Restrict functions use to selected partitions */
+ BUG_ON(id != TEGRA_POWERGATE_PCIE);
+#endif
+ /* Disable clks for the partition */
+ partition_clk_disable(id);
+
+ ret = is_partition_clk_disabled(id);
+ if (ret)
+ goto err_powergate_clk;
+
+ ret = tegra_powergate_partition(id);
+ if (ret)
+ goto err_powergating;
+
+ return ret;
+
+err_powergate_clk:
+ WARN(1, "Could not Powergate Partition %d, all clks not disabled", id);
+err_powergating:
+ partition_clk_enable(id);
+ WARN(1, "Could not Powergate Partition %d", id);
+ return ret;
+}
+
+const char *tegra_powergate_get_name(int id)
+{
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return "invalid";
+
+ return powergate_partition_info[id].name;
+}
+
+#ifdef CONFIG_DEBUG_FS
static int powergate_show(struct seq_file *s, void *data)
{
@@ -177,7 +828,7 @@ static int powergate_show(struct seq_file *s, void *data)
seq_printf(s, "------------------\n");
for (i = 0; i < TEGRA_NUM_POWERGATE; i++)
- seq_printf(s, " %9s %7s\n", powergate_name[i],
+ seq_printf(s, " %9s %7s\n", powergate_partition_info[i].name,
tegra_powergate_is_powered(i) ? "yes" : "no");
return 0;
}
@@ -197,14 +848,13 @@ static const struct file_operations powergate_fops = {
static int __init powergate_debugfs_init(void)
{
struct dentry *d;
- int err = -ENOMEM;
d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
&powergate_fops);
if (!d)
return -ENOMEM;
- return err;
+ return 0;
}
late_initcall(powergate_debugfs_init);
diff --git a/arch/arm/mach-tegra/pwm.c b/arch/arm/mach-tegra/pwm.c
new file mode 100644
index 000000000000..a268c391cb27
--- /dev/null
+++ b/arch/arm/mach-tegra/pwm.c
@@ -0,0 +1,296 @@
+/*
+ * arch/arm/mach-tegra/pwm.c
+ *
+ * Tegra pulse-width-modulation controller driver
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ * Based on arch/arm/plat-mxc/pwm.c by Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define PWM_ENABLE (1 << 31)
+#define PWM_DUTY_WIDTH 8
+#define PWM_DUTY_SHIFT 16
+#define PWM_SCALE_WIDTH 13
+#define PWM_SCALE_SHIFT 0
+
+struct pwm_device {
+ struct list_head node;
+ struct platform_device *pdev;
+
+ const char *label;
+ struct clk *clk;
+
+ int clk_enb;
+ void __iomem *mmio_base;
+
+ unsigned int in_use;
+ unsigned int id;
+};
+
+static DEFINE_MUTEX(pwm_lock);
+static LIST_HEAD(pwm_list);
+
+static inline int pwm_writel(struct pwm_device *pwm, unsigned long val)
+{
+ int rc;
+
+ rc = clk_enable(pwm->clk);
+ if (WARN_ON(rc))
+ return rc;
+ writel(val, pwm->mmio_base);
+ clk_disable(pwm->clk);
+ return 0;
+}
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ unsigned long long c;
+ unsigned long rate, hz;
+ u32 val = 0;
+
+ /* convert from duty_ns / period_ns to a fixed number of duty
+ * ticks per (1 << PWM_DUTY_WIDTH) cycles. */
+ c = duty_ns * ((1 << PWM_DUTY_WIDTH) - 1);
+ do_div(c, period_ns);
+
+ val = (u32)c << PWM_DUTY_SHIFT;
+
+ /* compute the prescaler value for which (1 << PWM_DUTY_WIDTH)
+ * cycles at the PWM clock rate will take period_ns nanoseconds. */
+ rate = clk_get_rate(pwm->clk) >> PWM_DUTY_WIDTH;
+ hz = 1000000000ul / period_ns;
+
+ rate = (rate + (hz / 2)) / hz;
+
+ if (rate >> PWM_SCALE_WIDTH)
+ return -EINVAL;
+ /* Due to the PWM divider is zero-based, we need to minus 1 to get desired frequency*/
+ if (rate>0)
+ rate--;
+
+ val |= (rate << PWM_SCALE_SHIFT);
+
+ /* the struct clk may be shared across multiple PWM devices, so
+ * only enable the PWM if this device has been enabled */
+ if (pwm->clk_enb)
+ val |= PWM_ENABLE;
+
+ return pwm_writel(pwm, val);
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ int rc = 0;
+
+ mutex_lock(&pwm_lock);
+ if (!pwm->clk_enb) {
+ rc = clk_enable(pwm->clk);
+ if (!rc) {
+ u32 val = readl(pwm->mmio_base);
+ writel(val | PWM_ENABLE, pwm->mmio_base);
+ pwm->clk_enb = 1;
+ }
+ }
+ mutex_unlock(&pwm_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+ if (pwm->clk_enb) {
+ u32 val = readl(pwm->mmio_base);
+ writel(val & ~PWM_ENABLE, pwm->mmio_base);
+ clk_disable(pwm->clk);
+ pwm->clk_enb = 0;
+ } else
+ dev_warn(&pwm->pdev->dev, "%s called on disabled PWM\n",
+ __func__);
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL(pwm_disable);
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+ int found = 0;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->id == pwm_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ if (!pwm->in_use) {
+ pwm->in_use = 1;
+ pwm->label = label;
+ } else
+ pwm = ERR_PTR(-EBUSY);
+ } else
+ pwm = ERR_PTR(-ENOENT);
+
+ mutex_unlock(&pwm_lock);
+
+ return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+ if (pwm->in_use) {
+ pwm->in_use = 0;
+ pwm->label = NULL;
+ } else
+ dev_warn(&pwm->pdev->dev, "PWM device already freed\n");
+
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL(pwm_free);
+
+static int tegra_pwm_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ struct resource *r;
+ int ret;
+
+ pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
+ if (!pwm) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ pwm->clk = clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(pwm->clk)) {
+ ret = PTR_ERR(pwm->clk);
+ goto err_free;
+ }
+
+ pwm->clk_enb = 0;
+ pwm->in_use = 0;
+ pwm->id = pdev->id;
+ pwm->pdev = pdev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resources defined\n");
+ ret = -ENODEV;
+ goto err_put_clk;
+ }
+
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
+ if (!r) {
+ dev_err(&pdev->dev, "failed to request memory\n");
+ ret = -EBUSY;
+ goto err_put_clk;
+ }
+
+ pwm->mmio_base = ioremap(r->start, resource_size(r));
+ if (!pwm->mmio_base) {
+ dev_err(&pdev->dev, "failed to ioremap() region\n");
+ ret = -ENODEV;
+ goto err_free_mem;
+ }
+
+ platform_set_drvdata(pdev, pwm);
+
+ mutex_lock(&pwm_lock);
+ list_add_tail(&pwm->node, &pwm_list);
+ mutex_unlock(&pwm_lock);
+
+ return 0;
+
+err_free_mem:
+ release_mem_region(r->start, resource_size(r));
+err_put_clk:
+ clk_put(pwm->clk);
+err_free:
+ kfree(pwm);
+ return ret;
+}
+
+static int __devexit tegra_pwm_remove(struct platform_device *pdev)
+{
+ struct pwm_device *pwm = platform_get_drvdata(pdev);
+ struct resource *r;
+ int rc;
+
+ if (WARN_ON(!pwm))
+ return -ENODEV;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mutex_lock(&pwm_lock);
+ if (pwm->in_use) {
+ mutex_unlock(&pwm_lock);
+ return -EBUSY;
+ }
+ list_del(&pwm->node);
+ mutex_unlock(&pwm_lock);
+
+ rc = pwm_writel(pwm, 0);
+
+ iounmap(pwm->mmio_base);
+ release_mem_region(r->start, resource_size(r));
+
+ if (pwm->clk_enb)
+ clk_disable(pwm->clk);
+
+ clk_put(pwm->clk);
+
+ kfree(pwm);
+ return rc;
+}
+
+static struct platform_driver tegra_pwm_driver = {
+ .driver = {
+ .name = "tegra_pwm",
+ },
+ .probe = tegra_pwm_probe,
+ .remove = __devexit_p(tegra_pwm_remove),
+};
+
+static int __init tegra_pwm_init(void)
+{
+ return platform_driver_register(&tegra_pwm_driver);
+}
+subsys_initcall(tegra_pwm_init);
+
+static void __exit tegra_pwm_exit(void)
+{
+ platform_driver_unregister(&tegra_pwm_driver);
+}
+module_exit(tegra_pwm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("NVIDIA Corporation");
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
new file mode 100644
index 000000000000..c44a3de07873
--- /dev/null
+++ b/arch/arm/mach-tegra/reset.c
@@ -0,0 +1,116 @@
+/*
+ * arch/arm/mach-tegra/reset.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <mach/iomap.h>
+
+#include "reset.h"
+#include "sleep.h"
+#include "pm.h"
+
+static bool is_enabled;
+
+static void tegra_cpu_reset_handler_enable(void)
+{
+ void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_BASE);
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ void __iomem *evp_cpu_reset =
+ IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE + 0x100);
+ void __iomem *sb_ctrl = IO_ADDRESS(TEGRA_SB_BASE);
+ unsigned long reg;
+#endif
+ BUG_ON(is_enabled);
+ BUG_ON(tegra_cpu_reset_handler_size > TEGRA_RESET_HANDLER_SIZE);
+
+ memcpy(iram_base, (void *)__tegra_cpu_reset_handler_start,
+ tegra_cpu_reset_handler_size);
+
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+ tegra_generic_smc(0xFFFFF200,
+ TEGRA_RESET_HANDLER_BASE + tegra_cpu_reset_handler_offset, 0);
+#else
+ /* NOTE: This must be the one and only write to the EVP CPU reset
+ vector in the entire system. */
+ writel(TEGRA_RESET_HANDLER_BASE + tegra_cpu_reset_handler_offset,
+ evp_cpu_reset);
+ wmb();
+ reg = readl(evp_cpu_reset);
+
+ /* Prevent further modifications to the physical reset vector.
+ NOTE: Has no effect on chips prior to Tegra3. */
+ reg = readl(sb_ctrl);
+ reg |= 2;
+ writel(reg, sb_ctrl);
+ wmb();
+#endif
+ is_enabled = true;
+}
+
+#ifdef CONFIG_PM_SLEEP
+void tegra_cpu_reset_handler_save(void)
+{
+ unsigned int i;
+ BUG_ON(!is_enabled);
+ for (i = 0; i < TEGRA_RESET_DATA_SIZE; i++)
+ __tegra_cpu_reset_handler_data[i] =
+ tegra_cpu_reset_handler_ptr[i];
+ is_enabled = false;
+}
+
+void tegra_cpu_reset_handler_restore(void)
+{
+ unsigned int i;
+ BUG_ON(is_enabled);
+ tegra_cpu_reset_handler_enable();
+ for (i = 0; i < TEGRA_RESET_DATA_SIZE; i++)
+ tegra_cpu_reset_handler_ptr[i] =
+ __tegra_cpu_reset_handler_data[i];
+ is_enabled = true;
+}
+#endif
+
+void __init tegra_cpu_reset_handler_init(void)
+{
+#ifdef CONFIG_SMP
+ __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
+ *((u32 *)cpu_present_mask);
+ __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] =
+ virt_to_phys((void *)tegra_secondary_startup);
+#endif
+#ifdef CONFIG_PM_SLEEP
+ __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] =
+ TEGRA_IRAM_CODE_AREA;
+ __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] =
+ virt_to_phys((void *)tegra_resume);
+#endif
+
+ /* Push all of reset handler data out to the L3 memory system. */
+ __cpuc_coherent_kern_range(
+ (unsigned long)&__tegra_cpu_reset_handler_data[0],
+ (unsigned long)&__tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE]);
+
+ outer_clean_range(__pa(&__tegra_cpu_reset_handler_data[0]),
+ __pa(&__tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE]));
+
+ tegra_cpu_reset_handler_enable();
+}
diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
new file mode 100644
index 000000000000..44a671160e57
--- /dev/null
+++ b/arch/arm/mach-tegra/reset.h
@@ -0,0 +1,70 @@
+/*
+ * arch/arm/mach-tegra/reset.h
+ *
+ * CPU reset dispatcher.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_RESET_H
+#define __MACH_TEGRA_RESET_H
+
+#define TEGRA_RESET_MASK_PRESENT 0
+#define TEGRA_RESET_MASK_LP1 1
+#define TEGRA_RESET_MASK_LP2 2
+#define TEGRA_RESET_STARTUP_SECONDARY 3
+#define TEGRA_RESET_STARTUP_LP2 4
+#define TEGRA_RESET_STARTUP_LP1 5
+#define TEGRA_RESET_DATA_SIZE 6
+
+#ifndef __ASSEMBLY__
+
+#include <linux/cpumask.h>
+
+extern unsigned long __tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE];
+
+void __tegra_cpu_reset_handler_start(void);
+void __tegra_cpu_reset_handler(void);
+void __tegra_cpu_reset_handler_end(void);
+void tegra_secondary_startup(void);
+
+#ifdef CONFIG_PM_SLEEP
+#define tegra_cpu_lp1_mask ((unsigned long *)(IO_ADDRESS(TEGRA_RESET_HANDLER_BASE + \
+ ((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP1] - \
+ (u32)__tegra_cpu_reset_handler_start))))
+
+#define tegra_cpu_reset_handler_ptr ((u32 *)(IO_ADDRESS(TEGRA_RESET_HANDLER_BASE + \
+ ((u32)__tegra_cpu_reset_handler_data - \
+ (u32)__tegra_cpu_reset_handler_start))))
+
+#define tegra_cpu_lp2_mask ((cpumask_t *)(IO_ADDRESS(TEGRA_RESET_HANDLER_BASE + \
+ ((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP2] - \
+ (u32)__tegra_cpu_reset_handler_start))))
+#endif
+
+#define tegra_cpu_reset_handler_offset \
+ ((u32)__tegra_cpu_reset_handler - \
+ (u32)__tegra_cpu_reset_handler_start)
+
+#define tegra_cpu_reset_handler_size \
+ (__tegra_cpu_reset_handler_end - \
+ __tegra_cpu_reset_handler_start)
+
+void __init tegra_cpu_reset_handler_init(void);
+
+#ifdef CONFIG_PM_SLEEP
+void tegra_cpu_reset_handler_save(void);
+void tegra_cpu_reset_handler_restore(void);
+#endif
+#endif
+#endif
diff --git a/arch/arm/mach-tegra/sleep-t2.S b/arch/arm/mach-tegra/sleep-t2.S
new file mode 100644
index 000000000000..01791439426b
--- /dev/null
+++ b/arch/arm/mach-tegra/sleep-t2.S
@@ -0,0 +1,569 @@
+/*
+ * arch/arm/mach-tegra/include/mach/sleep-t2.S
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ * Gary King <gking@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/const.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/cache.h>
+#include <asm/domain.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+#include <asm/system.h>
+
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+#include "asm_macros.h"
+#include "sleep.h"
+
+#define EMC_CFG 0xc
+#define EMC_ADR_CFG 0x10
+#define EMC_REFRESH 0x70
+#define EMC_NOP 0xdc
+#define EMC_SELF_REF 0xe0
+#define EMC_REQ_CTRL 0x2b0
+#define EMC_EMC_STATUS 0x2b4
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+#define CLK_RESET_SCLK_BURST 0x28
+#define CLK_RESET_SCLK_DIVIDER 0x2c
+
+#define CLK_RESET_PLLC_BASE 0x80
+#define CLK_RESET_PLLM_BASE 0x90
+#define CLK_RESET_PLLP_BASE 0xa0
+#define CLK_RESET_PLLP_OUTA 0xa4
+#define CLK_RESET_PLLP_OUTB 0xa8
+#define CLK_RESET_PLLP_MISC 0xac
+#define CLK_RESET_PLLX_BASE 0xe0
+#define CLK_RESET_PLLX_MISC 0xe4
+
+#define CLK_RESET_RST_CPU_CMPLX_SET 0x340
+
+#define TEGRA_PMC_VIRT (TEGRA_PMC_BASE - IO_APB_PHYS + IO_APB_VIRT)
+#define TEGRA_ARM_PERIF_VIRT (TEGRA_ARM_PERIF_BASE - IO_CPU_PHYS + IO_CPU_VIRT)
+#define TEGRA_CLK_RESET_VIRT (TEGRA_CLK_RESET_BASE - IO_PPSB_PHYS + IO_PPSB_VIRT)
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * tegra2_hotplug_shutdown(void)
+ *
+ * puts the current cpu in reset
+ * should never return
+ */
+ENTRY(tegra2_hotplug_shutdown)
+ mov r6, lr
+ bl tegra_cpu_exit_coherency
+
+ /* Put this CPU into reset. */
+ cpu_id r0
+ bl tegra2_cpu_reset
+ mov pc, r6
+ENDPROC(tegra2_hotplug_shutdown)
+#endif
+
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP)
+/*
+ * tegra2_cpu_reset(int cpu)
+ *
+ * r0 is cpu to reset
+ *
+ * puts the specified CPU in wait-for-event mode on the flow controller
+ * and puts the CPU in reset
+ * can be called on the current cpu or another cpu
+ * if called on the current cpu, does not return
+ * MUST NOT BE CALLED FOR CPU 0.
+ *
+ * corrupts r0-r3, r12
+ */
+ENTRY(tegra2_cpu_reset)
+ cmp r0, #0
+ moveq pc, lr @ must not be called for CPU 0
+
+ mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ mov r12, #CPU_RESETTABLE
+ str r12, [r1]
+
+ cpu_to_halt_reg r1, r0
+ mov32 r3, TEGRA_FLOW_CTRL_VIRT
+ mov r2, #FLOW_CTRL_WAITEVENT | FLOW_CTRL_JTAG_RESUME
+ str r2, [r3, r1] @ put flow controller in wait event mode
+ ldr r2, [r3, r1]
+ isb
+ dsb
+ movw r1, 0x1011
+ mov r1, r1, lsl r0
+ mov32 r3, TEGRA_CLK_RESET_VIRT
+ str r1, [r3, #CLK_RESET_RST_CPU_CMPLX_SET] @ put slave CPU in reset
+ isb
+ dsb
+ cpu_id r3
+ cmp r3, r0
+ beq .
+ mov pc, lr
+ENDPROC(tegra2_cpu_reset)
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * tegra2_cpu_clear_resettable(void)
+ *
+ * Called to clear the "resettable soon" flag in PMC_SCRATCH41 when
+ * it is expected that the secondary CPU will be idle soon.
+ */
+ENTRY(tegra2_cpu_clear_resettable)
+ mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ mov r12, #CPU_NOT_RESETTABLE
+ str r12, [r1]
+ mov pc, lr
+ENDPROC(tegra2_cpu_clear_resettable)
+
+/*
+ * tegra2_cpu_set_resettable_soon(void)
+ *
+ * Called to set the "resettable soon" flag in PMC_SCRATCH41 when
+ * it is expected that the secondary CPU will be idle soon.
+ */
+ENTRY(tegra2_cpu_set_resettable_soon)
+ mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ mov r12, #CPU_RESETTABLE_SOON
+ str r12, [r1]
+ mov pc, lr
+ENDPROC(tegra2_cpu_set_resettable_soon)
+
+/*
+ * tegra2_cpu_is_resettable_soon(void)
+ *
+ * Returns true if the "resettable soon" flag in PMC_SCRATCH41 has been
+ * set because it is expected that the secondary CPU will be idle soon.
+ */
+ENTRY(tegra2_cpu_is_resettable_soon)
+ mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ ldr r12, [r1]
+ cmp r12, #CPU_RESETTABLE_SOON
+ moveq r0, #1
+ movne r0, #0
+ mov pc, lr
+ENDPROC(tegra2_cpu_is_resettable_soon)
+
+/*
+ * tegra2_sleep_core(unsigned long v2p)
+ *
+ * enters suspend in LP0 or LP1 by turning off the mmu and jumping to
+ * tegra2_tear_down_core in IRAM
+ */
+ENTRY(tegra2_sleep_core)
+ mov r12, pc @ return here is via r12
+ b tegra_cpu_save
+ mov32 r1, tegra2_tear_down_core
+ mov32 r2, tegra2_iram_start
+ sub r1, r1, r2
+ mov32 r2, TEGRA_IRAM_CODE_AREA
+ add r1, r1, r2
+ b tegra_turn_off_mmu
+ENDPROC(tegra2_sleep_core)
+
+/*
+ * tegra2_sleep_wfi(unsigned long v2p)
+ */
+ENTRY(tegra2_sleep_wfi)
+ mrc p15, 0, r2, c1, c0, 1 @ save actlr before exiting coherency
+ mov r12, pc @ return here is via r12
+ b tegra_cpu_save
+ mov r11, r2
+
+ mov32 r0, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ mov r3, #CPU_RESETTABLE
+ str r3, [r0]
+
+ bl tegra_cpu_wfi
+
+ mov32 r0, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ mov r3, #CPU_NOT_RESETTABLE
+ str r3, [r0]
+
+ /*
+ * cpu may be reset while in wfi, which will return through
+ * tegra_resume to tegra_cpu_resume_phys to tegra_cpu_resume
+ * or interrupt may wake wfi, which will return here
+ * cpu state is unchanged - MMU is on, cache is on, coherency
+ * is off, and the data cache is off
+ *
+ * r11 contains the original actlr
+ */
+
+ mov sp, r7 @ restore SP for aborted suspend
+ bl tegra_pen_lock
+
+ mov32 r3, TEGRA_PMC_VIRT
+ add r0, r3, #PMC_SCRATCH41
+ mov r3, #CPU_NOT_RESETTABLE
+ str r3, [r0]
+
+ bl tegra_pen_unlock
+
+#if USE_TEGRA_CPU_SUSPEND
+ /* Enable the data cache and SMP coherency */
+ mrc p15, 0, r10, c1, c0, 0
+ orr r10, r10, #CR_C
+ dsb
+ mcr p15, 0, r10, c1, c0, 0
+ isb
+ mcr p15, 0, r11, c1, c0, 1 @ reenable coherency
+
+#else
+ mcr p15, 0, r11, c1, c0, 1 @ reenable coherency
+#endif
+ /* Invalidate the TLBs & BTAC */
+ mov r1, #0
+ mcr p15, 0, r1, c8, c3, 0 @ invalidate shared TLBs
+ mcr p15, 0, r1, c7, c1, 6 @ invalidate shared BTAC
+ dsb
+ isb
+
+ @ the cpu was running with coherency disabled, caches may be out of date
+#ifdef MULTI_CACHE
+ mov32 r10, cpu_cache
+ mov lr, pc
+ ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
+#else
+ bl __cpuc_flush_kern_all
+#endif
+
+#ifdef CONFIG_CACHE_L2X0
+ /* Issue a PL310 cache sync operation */
+ dsb
+ mov32 r2, TEGRA_PL310_VIRT
+ movw r1, 0x730 @ cache sync
+ add r2, r2, r1
+ mov r1, #0
+ str r1, [r2]
+#endif
+
+ pop_ctx_regs r0, r1 @ restore context registers
+ mov pc, lr
+ENDPROC(tegra2_sleep_wfi)
+
+/*
+ * tegra2_tear_down_cpu
+ *
+ * Switches the CPU cluster to PLL-P and enters sleep.
+ */
+ENTRY(tegra2_tear_down_cpu)
+ bl tegra_cpu_pllp
+ b tegra2_enter_sleep
+ENDPROC(tegra2_tear_down_cpu)
+
+/* START OF ROUTINES COPIED TO IRAM */
+ .align L1_CACHE_SHIFT
+ .globl tegra2_iram_start
+tegra2_iram_start:
+
+/*
+ * tegra2_lp1_reset
+ *
+ * reset vector for LP1 restore; copied into IRAM during suspend.
+ * brings the system back up to a safe starting point (SDRAM out of
+ * self-refresh, PLLC, PLLM and PLLP reenabled, CPU running on PLLP,
+ * system clock running on the same PLL that it suspended at), and
+ * jumps to tegra_lp2_startup to restore PLLX and virtual addressing.
+ * physical address of tegra_lp2_startup expected to be stored in
+ * PMC_SCRATCH41
+ *
+ * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_CODE_AREA AND MUST BE FIRST.
+ */
+ENTRY(tegra2_lp1_reset)
+ /*
+ * the CPU and system bus are running at 32KHz and executing from
+ * IRAM when this code is executed; immediately switch to CLKM and
+ * enable PLLP.
+ */
+ mov32 r0, TEGRA_CLK_RESET_BASE
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ /* secure code handles 32KHz to CLKM/OSC clock switch */
+ mov r1, #(1 << 28)
+ str r1, [r0, #CLK_RESET_SCLK_BURST]
+ str r1, [r0, #CLK_RESET_CCLK_BURST]
+ mov r1, #0
+ str r1, [r0, #CLK_RESET_SCLK_DIVIDER]
+ str r1, [r0, #CLK_RESET_CCLK_DIVIDER]
+#endif
+ ldr r1, [r0, #CLK_RESET_PLLM_BASE]
+ tst r1, #(1 << 30)
+ orreq r1, r1, #(1 << 30)
+ streq r1, [r0, #CLK_RESET_PLLM_BASE]
+ ldr r1, [r0, #CLK_RESET_PLLP_BASE]
+ tst r1, #(1 << 30)
+ orreq r1, r1, #(1 << 30)
+ streq r1, [r0, #CLK_RESET_PLLP_BASE]
+ ldr r1, [r0, #CLK_RESET_PLLC_BASE]
+ tst r1, #(1 << 30)
+ orreq r1, r1, #(1 << 30)
+ streq r1, [r0, #CLK_RESET_PLLC_BASE]
+
+ adr r2, tegra2_sdram_pad_address
+ adr r4, tegra2_sdram_pad_save
+ mov r5, #0
+
+padload:
+ ldr r0, [r2, r5] @ r0 is emc register address
+
+ ldr r1, [r4, r5]
+ str r1, [r0] @ set emc register to safe vals
+
+ add r5, r5, #4
+ ldr r0, tegra2_sdram_pad_size
+ cmp r0, r5
+ bne padload
+
+padload_done:
+ mov32 r7, TEGRA_TMRUS_BASE
+ ldr r1, [r7]
+ add r1, r1, #0xff @ 255uS delay for PLL stabilization
+
+1: ldr r0, [r7]
+ cmp r0, r1
+ dmb
+ bmi 1b
+
+ adr r4, tegra2_sclk_save
+ ldr r4, [r4]
+ mov32 r0, TEGRA_CLK_RESET_BASE
+ str r4, [r0, #CLK_RESET_SCLK_BURST]
+ ldr r4, =((1 << 28) | (4)) @ burst policy is PLLP
+ str r4, [r0, #CLK_RESET_CCLK_BURST]
+
+ mov32 r0, TEGRA_EMC_BASE
+ ldr r1, [r0, #EMC_CFG]
+ bic r1, r1, #(1 << 31) @ disable DRAM_CLK_STOP
+ str r1, [r0, #EMC_CFG]
+
+ mov r1, #0
+ str r1, [r0, #EMC_SELF_REF] @ take DRAM out of self refresh
+ mov r1, #1
+ str r1, [r0, #EMC_NOP]
+ str r1, [r0, #EMC_NOP]
+ str r1, [r0, #EMC_REFRESH]
+
+ ldr r1, [r0, #EMC_ADR_CFG]
+ tst r1, #(0x3 << 24)
+ moveq r1, #(0x1 << 8) @ just 1 device
+ movne r1, #(0x3 << 8) @ 2 devices
+
+exit_selfrefresh_loop:
+ ldr r2, [r0, #EMC_EMC_STATUS]
+ ands r2, r2, r1
+ bne exit_selfrefresh_loop
+
+ mov r1, #0
+ str r1, [r0, #EMC_REQ_CTRL]
+
+ mov32 r0, TEGRA_PMC_BASE
+ ldr r0, [r0, #PMC_SCRATCH41]
+ mov pc, r0
+ENDPROC(tegra2_lp1_reset)
+
+/*
+ * tegra2_tear_down_core
+ *
+ * copied into and executed from IRAM
+ * puts memory in self-refresh for LP0 and LP1
+ */
+tegra2_tear_down_core:
+ bl tegra2_sdram_self_refresh
+ bl tegra2_cpu_clk32k
+ b tegra2_enter_sleep
+
+/*
+ * tegra2_cpu_clk32k
+ *
+ * In LP0 and LP1 all plls will be turned off. Switch the CPU and system clock
+ * to the 32khz clock (clks)
+ */
+tegra2_cpu_clk32k:
+ /* start by jumping to clkm to safely disable PLLs, then jump
+ * to clks */
+ mov r0, #(1 << 28)
+ str r0, [r5, #CLK_RESET_SCLK_BURST]
+ str r0, [r5, #CLK_RESET_CCLK_BURST]
+ mov r0, #0
+ str r0, [r5, #CLK_RESET_CCLK_DIVIDER]
+ str r0, [r5, #CLK_RESET_SCLK_DIVIDER]
+
+ /* 2 us delay between changing sclk and disabling PLLs */
+ mov32 r7, TEGRA_TMRUS_BASE
+ ldr r1, [r7]
+ add r1, r1, #3
+
+1: ldr r0, [r7]
+ cmp r0, r1
+ dmb
+ bmi 1b
+
+ /* switch to CLKS */
+ mov r0, #0 /* burst policy = 32KHz */
+ str r0, [r5, #CLK_RESET_SCLK_BURST]
+
+ /* disable PLLP, PLLM, PLLC in LP0 and LP1 states */
+ ldr r0, [r5, #CLK_RESET_PLLM_BASE]
+ bic r0, r0, #(1 << 30)
+ str r0, [r5, #CLK_RESET_PLLM_BASE]
+ ldr r0, [r5, #CLK_RESET_PLLP_BASE]
+ bic r0, r0, #(1 << 30)
+ str r0, [r5, #CLK_RESET_PLLP_BASE]
+ ldr r0, [r5, #CLK_RESET_PLLC_BASE]
+ bic r0, r0, #(1 << 30)
+ str r0, [r5, #CLK_RESET_PLLC_BASE]
+ mov pc, lr
+
+/*
+ * tegra2_enter_sleep
+ *
+ * uses flow controller to enter sleep state
+ * executes from IRAM with SDRAM in selfrefresh when target state is LP0 and LP1
+ * executes from SDRAM with target state is LP2
+ */
+tegra2_enter_sleep:
+ mov32 r7, TEGRA_TMRUS_BASE
+ ldr r1, [r7]
+ mov32 r4, TEGRA_PMC_BASE
+ str r1, [r4, #PMC_SCRATCH38]
+ dsb
+ mov32 r6, TEGRA_FLOW_CTRL_BASE
+
+ mov r0, #FLOW_CTRL_WAIT_FOR_INTERRUPT
+ orr r0, r0, #FLOW_CTRL_HALT_CPU_IRQ | FLOW_CTRL_HALT_CPU_FIQ
+ cpu_id r1
+ cpu_to_halt_reg r1, r1
+ str r0, [r6, r1]
+ dsb
+ ldr r0, [r6, r1] /* memory barrier */
+
+halted: dsb
+ wfe /* CPU should be power gated here */
+ isb
+ b halted
+
+/*
+ * tegra2_sdram_self_refresh
+ *
+ * called with MMU off and caches disabled
+ * puts sdram in self refresh
+ * must execute from IRAM
+ */
+tegra2_sdram_self_refresh:
+ mov32 r1, TEGRA_EMC_BASE
+ mov r2, #3
+ str r2, [r1, #EMC_REQ_CTRL] @ stall incoming DRAM requests
+
+emcidle:ldr r2, [r1, #EMC_EMC_STATUS]
+ tst r2, #4
+ beq emcidle
+
+ mov r2, #1
+ str r2, [r1, #EMC_SELF_REF]
+
+ ldr r2, [r1, #EMC_ADR_CFG]
+ tst r2, #(0x3 << 24)
+ moveq r2, #(0x1 << 8) @ just 1 device
+ movne r2, #(0x3 << 8) @ 2 devices
+
+emcself:ldr r3, [r1, #EMC_EMC_STATUS]
+ and r3, r3, r2
+ cmp r3, r2
+ bne emcself @ loop until DDR in self-refresh
+
+ adr r2, tegra2_sdram_pad_address
+ adr r3, tegra2_sdram_pad_safe
+ adr r4, tegra2_sdram_pad_save
+ mov r5, #0
+
+padsave:
+ ldr r0, [r2, r5] @ r0 is emc register address
+
+ ldr r1, [r0]
+ str r1, [r4, r5] @ save emc register
+
+ ldr r1, [r3, r5]
+ str r1, [r0] @ set emc register to safe vals
+
+ add r5, r5, #4
+ ldr r0, tegra2_sdram_pad_size
+ cmp r0, r5
+ bne padsave
+padsave_done:
+
+ mov32 r5, TEGRA_CLK_RESET_BASE
+ ldr r0, [r5, #CLK_RESET_SCLK_BURST]
+ adr r2, tegra2_sclk_save
+ str r0, [r2]
+ dsb
+ mov pc, lr
+
+tegra2_sdram_pad_address:
+ .word TEGRA_APB_MISC_BASE + 0x8c8 /* XM2CFGCPADCTRL */
+ .word TEGRA_APB_MISC_BASE + 0x8cc /* XM2CFGDPADCTRL */
+ .word TEGRA_APB_MISC_BASE + 0x8d0 /* XM2CLKCFGPADCTRL */
+ .word TEGRA_APB_MISC_BASE + 0x8d4 /* XM2COMPPADCTRL */
+ .word TEGRA_APB_MISC_BASE + 0x8d8 /* XM2VTTGENPADCTRL */
+ .word TEGRA_APB_MISC_BASE + 0x8e4 /* XM2CFGCPADCTRL2 */
+ .word TEGRA_APB_MISC_BASE + 0x8e8 /* XM2CFGDPADCTRL2 */
+
+tegra2_sdram_pad_size:
+ .word tegra2_sdram_pad_size - tegra2_sdram_pad_address
+
+tegra2_sdram_pad_safe:
+ .word 0x8
+ .word 0x8
+ .word 0x0
+ .word 0x8
+ .word 0x5500
+ .word 0x08080040
+ .word 0x0
+
+tegra2_sclk_save:
+ .word 0x0
+
+tegra2_sdram_pad_save:
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+
+ .ltorg
+/* dummy symbol for end of IRAM */
+ .align L1_CACHE_SHIFT
+ .globl tegra2_iram_end
+tegra2_iram_end:
+ b .
+#endif
diff --git a/arch/arm/mach-tegra/sleep-t3.S b/arch/arm/mach-tegra/sleep-t3.S
new file mode 100644
index 000000000000..4e634099d53f
--- /dev/null
+++ b/arch/arm/mach-tegra/sleep-t3.S
@@ -0,0 +1,713 @@
+/*
+ * arch/arm/mach-tegra/include/mach/sleep-t3.S
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/const.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/cache.h>
+#include <asm/domain.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+#include <asm/system.h>
+
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+#include "asm_macros.h"
+#include "sleep.h"
+#include "clock.h"
+
+#define EMC_CFG 0xc
+#define EMC_ADR_CFG 0x10
+#define EMC_TIMING_CONTROL 0x28
+#define EMC_REFRESH 0x70
+#define EMC_NOP 0xdc
+#define EMC_SELF_REF 0xe0
+#define EMC_MRW 0xe8
+#define EMC_REQ_CTRL 0x2b0
+#define EMC_EMC_STATUS 0x2b4
+#define EMC_FBIO_CFG5 0x104
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_AUTO_CAL_STATUS 0x2ac
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_XM2VTTGENPADCTRL 0x310
+#define EMC_XM2VTTGENPADCTRL2 0x314
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_SIDE_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */
+
+#define PMC_PWRGATE_TOGGLE 0x30
+#define PMC_REMOVE_CLAMPING_CMD 0x34
+#define PMC_PWRGATE_STATUS 0x38
+
+#define PMC_PWRGATE_PARTID_L2C (0x5)
+
+#define PMC_IO_DPD_REQ 0x1b8
+#define PMC_IO_DPD_STATUS 0x1bc
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+#define CLK_RESET_SCLK_BURST 0x28
+#define CLK_RESET_SCLK_DIVIDER 0x2c
+
+#define CLK_RESET_PLLC_BASE 0x80
+#define CLK_RESET_PLLM_BASE 0x90
+#define CLK_RESET_PLLP_BASE 0xa0
+#define CLK_RESET_PLLA_BASE 0xb0
+#define CLK_RESET_PLLX_BASE 0xe0
+
+#define CLK_RESET_PLLC_MISC 0x8c
+#define CLK_RESET_PLLM_MISC 0x9c
+#define CLK_RESET_PLLP_MISC 0xac
+#define CLK_RESET_PLLA_MISC 0xbc
+#define CLK_RESET_PLLX_MISC 0xe4
+
+#define CLK_RESET_PLLP_OUTA 0xa4
+#define CLK_RESET_PLLP_OUTB 0xa8
+
+#define PMC_PLLP_WB0_OVERRIDE 0xf8
+
+#define CLK_RESET_CLK_SOURCE_MSELECT 0x3b4
+
+#define MSELECT_CLKM (0x3 << 30)
+
+#if USE_PLL_LOCK_BITS
+#define LOCK_DELAY PLL_POST_LOCK_DELAY
+#else
+#define LOCK_DELAY 0xff /* 255uS delay for PLL stabilization */
+#endif
+
+#define USE_PLLP_ON_SLEEP_ENTRY 0
+
+.macro emc_device_mask, rd, base
+ ldr \rd, [\base, #EMC_ADR_CFG]
+ tst \rd, #0x1
+ moveq \rd, #(0x1<<8) @ just 1 device
+ movne \rd, #(0x3<<8) @ 2 devices
+.endm
+
+.macro emc_timing_update, rd, base
+ mov \rd, #1
+ str \rd, [\base, #EMC_TIMING_CONTROL]
+1001:
+ ldr \rd, [\base, #EMC_EMC_STATUS]
+ tst \rd, #(0x1<<23) @ wait until EMC_STATUS_TIMING_UPDATE_STALLED is clear
+ bne 1001b
+.endm
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * tegra3_hotplug_shutdown(void)
+ *
+ * Powergates the current CPU.
+ * Should never return.
+ */
+ENTRY(tegra3_hotplug_shutdown)
+ mov r6, lr
+ bl tegra_cpu_exit_coherency
+
+ /* Powergate this CPU. */
+ mov r0, #TEGRA_POWER_HOTPLUG_SHUTDOWN
+ bl tegra3_cpu_reset
+ mov pc, r6 @ should never get here
+ENDPROC(tegra3_hotplug_shutdown)
+#endif
+
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP)
+/*
+ * tegra3_cpu_reset(unsigned long flags)
+ *
+ * Puts the current CPU in wait-for-event mode on the flow controller
+ * and powergates it -- flags (in R0) indicate the request type.
+ * Must never be called for CPU 0.
+ *
+ * corrupts r0-r4, r12
+ */
+ENTRY(tegra3_cpu_reset)
+ cpu_id r3
+ cmp r3, #0
+ moveq pc, lr @ Must never be called for CPU 0
+
+ mov32 r12, TEGRA_FLOW_CTRL_VIRT
+ cpu_to_csr_reg r1, r3
+ add r1, r1, r12 @ virtual CSR address for this CPU
+ cpu_to_halt_reg r2, r3
+ add r2, r2, r12 @ virtual HALT_EVENTS address for this CPU
+
+ /* Clear this CPU's "event" and "interrupt" flags and power gate
+ it when halting but not before it is in the "WFE" state. */
+ movw r12, FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG | FLOW_CTRL_CSR_ENABLE
+ mov r4, #(1 << 4)
+ orr r12, r12, r4, lsl r3
+ str r12, [r1]
+
+ /* Halt this CPU. */
+ mov r3, #0x400
+delay_1:
+ subs r3, r3, #1 @ delay as a part of wfe war.
+ bge delay_1;
+ cpsid a @ disable imprecise aborts.
+ ldr r3, [r1] @ read CSR
+ str r3, [r1] @ clear CSR
+ tst r0, #TEGRA_POWER_HOTPLUG_SHUTDOWN
+ moveq r3, #FLOW_CTRL_WAIT_FOR_INTERRUPT @ For LP2
+ movne r3, #FLOW_CTRL_WAITEVENT @ For hotplug
+ str r3, [r2]
+ ldr r0, [r2]
+ b wfe_war
+
+__cpu_reset_again:
+ dsb
+ .align 5
+ wfe @ CPU should be power gated here
+wfe_war:
+ b __cpu_reset_again
+
+ /* 38 nop's, which fills reset of wfe cache line and 4 more cachelines with nop*/
+ .rept 38
+ nop
+ .endr
+ b . @ should never get here
+
+ENDPROC(tegra3_cpu_reset)
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * tegra3_sleep_core(unsigned long v2p)
+ *
+ * enters suspend in LP0 or LP1 by turning off the mmu and jumping to
+ * tegra3_tear_down_core in IRAM
+ */
+ENTRY(tegra3_sleep_core)
+ mov r12, pc @ return here is via r12
+ b tegra_cpu_save
+
+ /* preload all the address literals that are needed for the
+ * CPU power-gating process, to avoid loads from SDRAM (which are
+ * not supported once SDRAM is put into self-refresh.
+ * LP0 / LP1 use physical address, since the MMU needs to be
+ * disabled before putting SDRAM into self-refresh to avoid
+ * memory access due to page table walks */
+ mov32 r4, TEGRA_PMC_BASE
+ mov32 r5, TEGRA_CLK_RESET_BASE
+ mov32 r6, TEGRA_FLOW_CTRL_BASE
+ mov32 r7, TEGRA_TMRUS_BASE
+
+ mov32 r1, tegra3_tear_down_core
+ mov32 r2, tegra3_iram_start
+ sub r1, r1, r2
+ mov32 r2, TEGRA_IRAM_CODE_AREA
+ add r1, r1, r2
+ b tegra_turn_off_mmu
+ENDPROC(tegra3_sleep_core)
+
+/*
+ * tegra3_sleep_cpu_secondary(unsigned long v2p)
+ *
+ * Enters LP2 on secondary CPU by exiting coherency and powergating the CPU.
+ */
+ENTRY(tegra3_sleep_cpu_secondary)
+ mov r12, pc @ return here is via r12
+ b tegra_cpu_save
+
+ /* Powergate this CPU. */
+ mov r0, #0 @ power mode flags (!hotplug)
+ bl tegra3_cpu_reset
+ b . @ should never get here
+ENDPROC(tegra3_sleep_cpu_secondary)
+
+/*
+ * tegra3_tear_down_cpu
+ *
+ * Switches the CPU cluster to PLL-P and enters sleep.
+ */
+ENTRY(tegra3_tear_down_cpu)
+ mov32 r4, TEGRA_PMC_BASE
+ mov32 r5, TEGRA_CLK_RESET_BASE
+ mov32 r6, TEGRA_FLOW_CTRL_BASE
+ mov32 r7, TEGRA_TMRUS_BASE
+#if USE_PLLP_ON_SLEEP_ENTRY
+ bl tegra_cpu_pllp
+#endif
+ b tegra3_enter_sleep
+ENDPROC(tegra3_tear_down_cpu)
+
+/* START OF ROUTINES COPIED TO IRAM */
+ .align L1_CACHE_SHIFT
+ .globl tegra3_iram_start
+tegra3_iram_start:
+
+/*
+ * tegra3_lp1_reset
+ *
+ * reset vector for LP1 restore; copied into IRAM during suspend.
+ * brings the system back up to a safe starting point (SDRAM out of
+ * self-refresh, PLLC, PLLM and PLLP reenabled, CPU running on PLLP,
+ * system clock running on the same PLL that it suspended at), and
+ * jumps to tegra_lp2_startup to restore PLLX and virtual addressing.
+ * physical address of tegra_lp2_startup expected to be stored in
+ * PMC_SCRATCH41
+ *
+ * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_CODE_AREA AND MUST BE FIRST.
+ */
+.macro pll_enable, rd, car, base, misc
+ ldr \rd, [\car, #\base]
+ tst \rd, #(1<<30)
+ orreq \rd, \rd, #(1<<30)
+ streq \rd, [\car, #\base]
+#if USE_PLL_LOCK_BITS
+ ldr \rd, [\car, #\misc]
+ orr \rd, \rd, #(1<<18)
+ str \rd, [\car, #\misc]
+#endif
+.endm
+
+.macro pll_locked, rd, car, base
+#if USE_PLL_LOCK_BITS
+1:
+ ldr \rd, [\car, #\base]
+ tst \rd, #(1<<27)
+ beq 1b
+#endif
+.endm
+
+ENTRY(tegra3_lp1_reset)
+ /* the CPU and system bus are running at 32KHz and executing from
+ * IRAM when this code is executed; immediately switch to CLKM and
+ * enable PLLP, PLLM, PLLC, PLLA and PLLX. */
+ mov32 r0, TEGRA_CLK_RESET_BASE
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ /* secure code handles 32KHz to CLKM/OSC clock switch */
+ mov r1, #(1<<28)
+ str r1, [r0, #CLK_RESET_SCLK_BURST]
+ str r1, [r0, #CLK_RESET_CCLK_BURST]
+ mov r1, #0
+ str r1, [r0, #CLK_RESET_SCLK_DIVIDER]
+ str r1, [r0, #CLK_RESET_CCLK_DIVIDER]
+#endif
+ /* enable PLLM via PMC */
+ mov32 r2, TEGRA_PMC_BASE
+ ldr r1, [r2, #PMC_PLLP_WB0_OVERRIDE]
+ orr r1, r1, #(1<<12)
+ str r1, [r2, #PMC_PLLP_WB0_OVERRIDE]
+
+ pll_enable r1, r0, CLK_RESET_PLLM_BASE, CLK_RESET_PLLM_MISC
+ pll_enable r1, r0, CLK_RESET_PLLP_BASE, CLK_RESET_PLLP_MISC
+ pll_enable r1, r0, CLK_RESET_PLLA_BASE, CLK_RESET_PLLA_MISC
+ pll_enable r1, r0, CLK_RESET_PLLC_BASE, CLK_RESET_PLLC_MISC
+ pll_enable r1, r0, CLK_RESET_PLLX_BASE, CLK_RESET_PLLX_MISC
+
+ pll_locked r1, r0, CLK_RESET_PLLM_BASE
+ pll_locked r1, r0, CLK_RESET_PLLP_BASE
+ pll_locked r1, r0, CLK_RESET_PLLA_BASE
+ pll_locked r1, r0, CLK_RESET_PLLC_BASE
+ pll_locked r1, r0, CLK_RESET_PLLX_BASE
+
+ mov32 r7, TEGRA_TMRUS_BASE
+ ldr r1, [r7]
+ add r1, r1, #LOCK_DELAY
+ wait_until r1, r7, r3
+
+ add r5, pc, #tegra3_sdram_pad_save-(.+8) @ r5 reserved for pad base
+
+ ldr r4, [r5, #0x18]
+ str r4, [r0, #CLK_RESET_CLK_SOURCE_MSELECT]
+
+ ldr r4, [r5, #0x1C]
+ str r4, [r0, #CLK_RESET_SCLK_BURST]
+
+ mov32 r4, ((1<<28) | (8)) @ burst policy is PLLX
+ str r4, [r0, #CLK_RESET_CCLK_BURST]
+
+#if defined (CONFIG_CACHE_L2X0)
+ /* power up L2 */
+ ldr r0, [r2, #PMC_PWRGATE_STATUS]
+ tst r0, #(1<<PMC_PWRGATE_PARTID_L2C)
+ bne powerup_l2_done
+ movw r0, #(1<<8) | PMC_PWRGATE_PARTID_L2C
+ str r0, [r2, #PMC_PWRGATE_TOGGLE]
+powerup_l2_wait:
+ ldr r0, [r2, #PMC_PWRGATE_STATUS]
+ tst r0, #(1<<PMC_PWRGATE_PARTID_L2C)
+ beq powerup_l2_wait
+powerup_l2_done:
+ mov r0, #PMC_PWRGATE_PARTID_L2C
+ str r0, [r2, #PMC_REMOVE_CLAMPING_CMD]
+#endif
+
+ mov32 r0, TEGRA_EMC_BASE @ r0 reserved for emc base
+
+ ldr r1, [r5, #0x14] @ PMC_IO_DPD_STATUS
+ mvn r1, r1
+ bic r1, r1, #(0x1<<31)
+ orr r1, r1, #(0x1<<30)
+ str r1, [r2, #PMC_IO_DPD_REQ]
+ ldr r1, [r5, #0xC]
+ str r1, [r0, #EMC_XM2VTTGENPADCTRL]
+ ldr r1, [r5, #0x10]
+ str r1, [r0, #EMC_XM2VTTGENPADCTRL2]
+ ldr r1, [r5, #0x8]
+ str r1, [r0, #EMC_AUTO_CAL_INTERVAL]
+
+ ldr r1, [r0, #EMC_CFG_DIG_DLL]
+ orr r1, r1, #(0x1<<30) @ set DLL_RESET
+ str r1, [r0, #EMC_CFG_DIG_DLL]
+
+ emc_timing_update r1, r0
+
+ ldr r1, [r0, #EMC_AUTO_CAL_CONFIG]
+ orr r1, r1, #(0x1<<31) @ set AUTO_CAL_ACTIVE
+ str r1, [r0, #EMC_AUTO_CAL_CONFIG]
+
+emc_wait_audo_cal_onetime:
+ ldr r1, [r0, #EMC_AUTO_CAL_STATUS]
+ tst r1, #(0x1<<31) @ wait until AUTO_CAL_ACTIVE is clear
+ bne emc_wait_audo_cal_onetime
+
+ ldr r1, [r0, #EMC_CFG]
+ bic r1, r1, #(1<<31) @ disable DRAM_CLK_STOP
+ str r1, [r0, #EMC_CFG]
+
+ mov r1, #0
+ str r1, [r0, #EMC_SELF_REF] @ take DRAM out of self refresh
+ mov r1, #1
+ str r1, [r0, #EMC_NOP]
+ str r1, [r0, #EMC_NOP]
+ str r1, [r0, #EMC_REFRESH]
+
+ emc_device_mask r1, r0
+
+exit_selfrefresh_loop:
+ ldr r2, [r0, #EMC_EMC_STATUS]
+ ands r2, r2, r1
+ bne exit_selfrefresh_loop
+
+ lsr r1, r1, #8 @ devSel, bit0:dev0 bit1:dev1
+
+ mov32 r7, TEGRA_TMRUS_BASE
+ ldr r2, [r0, #EMC_FBIO_CFG5]
+
+ and r2, r2, #3
+ cmp r2, #2
+ beq emc_lpddr2
+
+ mov32 r2, 0x80000011
+ str r2, [r0, #EMC_ZQ_CAL]
+ ldr r2, [r7]
+ add r2, r2, #10
+ wait_until r2, r7, r3
+
+ tst r1, #2
+ beq zcal_done
+
+ mov32 r2, 0x40000011
+ str r2, [r0, #EMC_ZQ_CAL]
+ ldr r2, [r7]
+ add r2, r2, #10
+ wait_until r2, r7, r3
+ b zcal_done
+
+emc_lpddr2:
+
+ mov32 r2, 0x800A00AB
+ str r2, [r0, #EMC_MRW]
+ ldr r2, [r7]
+ add r2, r2, #1
+ wait_until r2, r7, r3
+
+ tst r1, #2
+ beq zcal_done
+
+ mov32 r2, 0x400A00AB
+ str r2, [r0, #EMC_MRW]
+ ldr r2, [r7]
+ add r2, r2, #1
+ wait_until r2, r7, r3
+
+zcal_done:
+
+ mov r1, #0
+ str r1, [r0, #EMC_REQ_CTRL]
+ ldr r1, [r5, #0x4]
+ str r1, [r0, #EMC_ZCAL_INTERVAL]
+ ldr r1, [r5, #0x0]
+ str r1, [r0, #EMC_CFG]
+
+ mov32 r0, TEGRA_PMC_BASE
+ ldr r0, [r0, #PMC_SCRATCH41]
+ mov pc, r0
+ENDPROC(tegra3_lp1_reset)
+
+ .align L1_CACHE_SHIFT
+ .type tegra3_sdram_pad_save, %object
+tegra3_sdram_pad_save:
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+
+tegra3_sdram_pad_address:
+ .word TEGRA_EMC_BASE + EMC_CFG @0x0
+ .word TEGRA_EMC_BASE + EMC_ZCAL_INTERVAL @0x4
+ .word TEGRA_EMC_BASE + EMC_AUTO_CAL_INTERVAL @0x8
+ .word TEGRA_EMC_BASE + EMC_XM2VTTGENPADCTRL @0xc
+ .word TEGRA_EMC_BASE + EMC_XM2VTTGENPADCTRL2 @0x10
+ .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14
+ .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18
+ .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c
+
+tegra3_sdram_pad_size:
+ .word tegra3_sdram_pad_address - tegra3_sdram_pad_save
+
+/*
+ * tegra3_tear_down_core
+ *
+ * copied into and executed from IRAM
+ * puts memory in self-refresh for LP0 and LP1
+ */
+tegra3_tear_down_core:
+ bl tegra3_sdram_self_refresh
+ bl tegra3_cpu_clk32k
+ b tegra3_enter_sleep
+
+/*
+ * tegra3_cpu_clk32k
+ *
+ * In LP0 and LP1 all plls will be turned off. Switch the CPU and system clock
+ * to the 32khz clock (clks)
+ * r4 = TEGRA_PMC_BASE
+ * r5 = TEGRA_CLK_RESET_BASE
+ * r6 = TEGRA_FLOW_CTRL_BASE
+ * r7 = TEGRA_TMRUS_BASE
+ */
+tegra3_cpu_clk32k:
+ /* start by jumping to clkm to safely disable PLLs, then jump
+ * to clks */
+ mov r0, #(1 << 28)
+ str r0, [r5, #CLK_RESET_SCLK_BURST]
+ str r0, [r5, #CLK_RESET_CCLK_BURST]
+ mov r0, #0
+ str r0, [r5, #CLK_RESET_CCLK_DIVIDER]
+ str r0, [r5, #CLK_RESET_SCLK_DIVIDER]
+
+ /* switch the clock source for mselect to be CLK_M */
+ ldr r0, [r5, #CLK_RESET_CLK_SOURCE_MSELECT]
+ orr r0, r0, #MSELECT_CLKM
+ str r0, [r5, #CLK_RESET_CLK_SOURCE_MSELECT]
+
+ /* 2 us delay between changing sclk and disabling PLLs */
+ wait_for_us r1, r7, r9
+ add r1, r1, #2
+ wait_until r1, r7, r9
+
+#if 1
+ /* switch to CLKS */
+ mov r0, #0 /* burst policy = 32KHz */
+ str r0, [r5, #CLK_RESET_SCLK_BURST]
+#endif
+
+ /* disable PLLM via PMC in LP1 */
+ ldr r0, [r4, #PMC_CTRL]
+ tst r0, #PMC_CTRL_SIDE_EFFECT_LP0
+ bne enable_pllm_lp0
+ ldr r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
+ bic r0, r0, #(1<<12)
+ str r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
+ b powerdown_pll_pcx
+
+enable_pllm_lp0:
+ /* enable PLLM via PMC in LP0 */
+ ldr r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
+ orr r0, r0, #((1<<12) | (1 << 11))
+ str r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
+
+powerdown_pll_pcx:
+ /* disable PLLP, PLLA, PLLC, and PLLX in LP0 and LP1 states */
+ ldr r0, [r4, #PMC_CTRL]
+ tst r0, #PMC_CTRL_SIDE_EFFECT_LP0
+ beq powerdown_pll_cx
+ ldr r0, [r5, #CLK_RESET_PLLP_BASE]
+ bic r0, r0, #(1<<30)
+ str r0, [r5, #CLK_RESET_PLLP_BASE]
+ ldr r0, [r5, #CLK_RESET_PLLA_BASE]
+ bic r0, r0, #(1<<30)
+ str r0, [r5, #CLK_RESET_PLLA_BASE]
+powerdown_pll_cx:
+ ldr r0, [r5, #CLK_RESET_PLLC_BASE]
+ bic r0, r0, #(1<<30)
+ str r0, [r5, #CLK_RESET_PLLC_BASE]
+ ldr r0, [r5, #CLK_RESET_PLLX_BASE]
+ bic r0, r0, #(1<<30)
+ str r0, [r5, #CLK_RESET_PLLX_BASE]
+
+ mov pc, lr
+
+/*
+ * tegra3_enter_sleep
+ *
+ * uses flow controller to enter sleep state
+ * executes from IRAM with SDRAM in selfrefresh when target state is LP0 or LP1
+ * executes from SDRAM with target state is LP2
+ * r4 = TEGRA_PMC_BASE
+ * r5 = TEGRA_CLK_RESET_BASE
+ * r6 = TEGRA_FLOW_CTRL_BASE
+ * r7 = TEGRA_TMRUS_BASE
+ */
+tegra3_enter_sleep:
+ ldr r1, [r7]
+ str r1, [r4, #PMC_SCRATCH38]
+ dsb
+ cpu_id r1
+
+ cpu_to_csr_reg r2, r1
+ ldr r0, [r6, r2]
+ orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
+ orr r0, r0, #FLOW_CTRL_CSR_ENABLE
+ str r0, [r6, r2]
+
+ mov r0, #FLOW_CTRL_WAIT_FOR_INTERRUPT
+ orr r0, r0, #FLOW_CTRL_HALT_CPU_IRQ | FLOW_CTRL_HALT_CPU_FIQ
+ cpu_to_halt_reg r2, r1
+ str r0, [r6, r2]
+ dsb
+ ldr r0, [r6, r2] /* memory barrier */
+
+halted:
+ isb
+ dsb
+ wfi /* CPU should be power gated here */
+
+ /* !!!FIXME!!! Implement halt failure handler */
+ b halted
+
+/*
+ * tegra3_sdram_self_refresh
+ *
+ * called with MMU off and caches disabled
+ /* puts sdram in self refresh
+ * must execute from IRAM
+ * r4 = TEGRA_PMC_BASE
+ * r5 = TEGRA_CLK_RESET_BASE
+ * r6 = TEGRA_FLOW_CTRL_BASE
+ * r7 = TEGRA_TMRUS_BASE
+ */
+
+tegra3_sdram_self_refresh:
+
+ adr r2, tegra3_sdram_pad_address
+ adr r8, tegra3_sdram_pad_save
+ mov r9, #0
+
+padsave:
+ ldr r0, [r2, r9] @ r0 is emc register address
+
+ ldr r1, [r0]
+ str r1, [r8, r9] @ save emc register
+
+ add r9, r9, #4
+ ldr r0, tegra3_sdram_pad_size
+ cmp r0, r9
+ bne padsave
+padsave_done:
+
+ dsb
+
+ mov32 r0, TEGRA_EMC_BASE @ r0 reserved for emc base
+
+ mov r1, #0
+ str r1, [r0, #EMC_ZCAL_INTERVAL]
+ str r1, [r0, #EMC_AUTO_CAL_INTERVAL]
+ ldr r1, [r0, #EMC_CFG]
+ bic r1, r1, #(1<<28)
+ str r1, [r0, #EMC_CFG] @ disable DYN_SELF_REF
+
+ emc_timing_update r1, r0
+
+ ldr r1, [r7]
+ add r1, r1, #5
+ wait_until r1, r7, r2
+
+emc_wait_audo_cal:
+ ldr r1, [r0, #EMC_AUTO_CAL_STATUS]
+ tst r1, #(0x1<<31) @ wait until AUTO_CAL_ACTIVE is clear
+ bne emc_wait_audo_cal
+
+ mov r1, #3
+ str r1, [r0, #EMC_REQ_CTRL] @ stall incoming DRAM requests
+
+emcidle:
+ ldr r1, [r0, #EMC_EMC_STATUS]
+ tst r1, #4
+ beq emcidle
+
+ mov r1, #1
+ str r1, [r0, #EMC_SELF_REF]
+
+ emc_device_mask r1, r0
+
+emcself:
+ ldr r2, [r0, #EMC_EMC_STATUS]
+ and r2, r2, r1
+ cmp r2, r1
+ bne emcself @ loop until DDR in self-refresh
+
+ ldr r1, [r0, #EMC_XM2VTTGENPADCTRL]
+ mov32 r2, 0xF8F8FFFF @ clear XM2VTTGEN_DRVUP and XM2VTTGEN_DRVDN
+ and r1, r1, r2
+ str r1, [r0, #EMC_XM2VTTGENPADCTRL]
+ ldr r1, [r0, #EMC_XM2VTTGENPADCTRL2]
+ orr r1, r1, #7 @ set E_NO_VTTGEN
+ str r1, [r0, #EMC_XM2VTTGENPADCTRL2]
+
+ emc_timing_update r1, r0
+
+ ldr r1, [r4, #PMC_CTRL]
+ tst r1, #PMC_CTRL_SIDE_EFFECT_LP0
+ bne pmc_io_dpd_skip
+ mov32 r1, 0x8EC00000
+ str r1, [r4, #PMC_IO_DPD_REQ]
+pmc_io_dpd_skip:
+
+ dsb
+
+ mov pc, lr
+
+ .ltorg
+/* dummy symbol for end of IRAM */
+ .align L1_CACHE_SHIFT
+ .globl tegra3_iram_end
+tegra3_iram_end:
+ b .
+#endif
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
new file mode 100644
index 000000000000..38e5f69a3437
--- /dev/null
+++ b/arch/arm/mach-tegra/sleep.S
@@ -0,0 +1,491 @@
+/*
+ * arch/arm/mach-tegra/sleep.S
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ * Gary King <gking@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/const.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/cache.h>
+#include <asm/domain.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+#include <asm/system.h>
+
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+#include "asm_macros.h"
+#include "sleep.h"
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+
+#define TEGRA_PMC_VIRT (TEGRA_PMC_BASE - IO_APB_PHYS + IO_APB_VIRT)
+#define TEGRA_CLK_RESET_VIRT (TEGRA_CLK_RESET_BASE - IO_PPSB_PHYS + IO_PPSB_VIRT)
+
+/*
+ * tegra_pen_lock
+ *
+ * spinlock implementation with no atomic test-and-set and no coherence
+ * using Peterson's algorithm on strongly-ordered registers
+ * used to synchronize a cpu waking up from wfi with entering lp2 on idle
+ *
+ * SCRATCH37 = r1 = !turn (inverted from Peterson's algorithm)
+ * on cpu 0:
+ * SCRATCH38 = r2 = flag[0]
+ * SCRATCH39 = r3 = flag[1]
+ * on cpu1:
+ * SCRATCH39 = r2 = flag[1]
+ * SCRATCH38 = r3 = flag[0]
+ *
+ * must be called with MMU on
+ * corrupts r0-r3, r12
+ */
+ENTRY(tegra_pen_lock)
+ mov32 r3, TEGRA_PMC_VIRT
+ cpu_id r0
+ add r1, r3, #PMC_SCRATCH37
+ cmp r0, #0
+ addeq r2, r3, #PMC_SCRATCH38
+ addeq r3, r3, #PMC_SCRATCH39
+ addne r2, r3, #PMC_SCRATCH39
+ addne r3, r3, #PMC_SCRATCH38
+
+ mov r12, #1
+ str r12, [r2] @ flag[cpu] = 1
+ dsb
+ str r12, [r1] @ !turn = cpu
+1: dsb
+ ldr r12, [r3]
+ cmp r12, #1 @ flag[!cpu] == 1?
+ ldreq r12, [r1]
+ cmpeq r12, r0 @ !turn == cpu?
+ beq 1b @ while !turn == cpu && flag[!cpu] == 1
+
+ mov pc, lr @ locked
+ENDPROC(tegra_pen_lock)
+
+ENTRY(tegra_pen_unlock)
+ dsb
+ mov32 r3, TEGRA_PMC_VIRT
+ cpu_id r0
+ cmp r0, #0
+ addeq r2, r3, #PMC_SCRATCH38
+ addne r2, r3, #PMC_SCRATCH39
+ mov r12, #0
+ str r12, [r2]
+ mov pc, lr
+ENDPROC(tegra_pen_unlock)
+
+/*
+ * tegra_cpu_wfi
+ *
+ * puts current CPU in clock-gated wfi using the flow controller
+ *
+ * corrupts r0-r3
+ * must be called with MMU on
+ */
+ENTRY(tegra_cpu_wfi)
+ cpu_id r0
+ cpu_to_halt_reg r1, r0
+ cpu_to_csr_reg r2, r0
+ mov32 r0, TEGRA_FLOW_CTRL_VIRT
+ mov r3, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
+ str r3, [r0, r2] @ clear event & interrupt status
+ mov r3, #FLOW_CTRL_WAIT_FOR_INTERRUPT | FLOW_CTRL_JTAG_RESUME
+ str r3, [r0, r1] @ put flow controller in wait irq mode
+ dsb
+ wfi
+ mov r3, #0
+ str r3, [r0, r1] @ clear flow controller halt status
+ mov r3, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
+ str r3, [r0, r2] @ clear event & interrupt status
+ dsb
+ mov pc, lr
+ENDPROC(tegra_cpu_wfi)
+
+/*
+ * tegra_cpu_exit_coherency
+ *
+ * Exits SMP coherency.
+ * corrupts r4-r5
+ */
+ENTRY(tegra_cpu_exit_coherency)
+ exit_smp r4, r5
+ mov pc, lr
+ENDPROC(tegra_cpu_exit_coherency)
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * Restore CPU state for a suspend
+ *
+ * NOTE: This is a copy of cpu_resume in arch/arm/sleep.S that has been
+ * modified to work with an L2 cache.
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(tegra_cpu_resume_phys)
+#if USE_TEGRA_CPU_SUSPEND
+#ifdef CONFIG_SMP
+ adr r0, tegra_phys_sleep_sp
+ ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
+ ALT_UP(mov r1, #0)
+ and r1, r1, #15
+ ldr r0, [r0, r1, lsl #2] @ stack phys addr
+#else
+ ldr r0, tegra_phys_sleep_sp @ stack phys addr
+#endif
+ setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
+#ifdef MULTI_CPU
+ @ load v:p, stack, resume fn
+ ARM( ldmia r0!, {r1, sp, pc} )
+THUMB( ldmia r0!, {r1, r2, r3} )
+THUMB( mov sp, r2 )
+THUMB( bx r3 )
+#else
+ @ load v:p, stack, return fn
+ ARM( ldmia r0!, {r1, sp, lr} )
+THUMB( ldmia r0!, {r1, r2, lr} )
+THUMB( mov sp, r2 )
+ b cpu_do_resume
+#endif
+#else
+ /* Use the standard cpu_resume. */
+ b cpu_resume
+#endif
+ENDPROC(tegra_cpu_resume_phys)
+
+#if USE_TEGRA_CPU_SUSPEND
+ .align L1_CACHE_SHIFT
+ .globl tegra_phys_sleep_sp
+tegra_phys_sleep_sp:
+ .rept 4
+ .long 0 @ preserve stack phys ptr here
+ .endr
+ .align L1_CACHE_SHIFT @ nothing else must be in this cache line
+#endif
+
+/*
+ * tegra_cpu_suspend
+ *
+ * Save CPU suspend state
+ * NOTE: This is a copy of cpu_suspend in arch/arm/sleep.S that has been
+ * modified to work with an L2 cache.
+ *
+ * Input:
+ * r1 = v:p offset
+ * r3 = virtual return function
+ * Output:
+ * sp is decremented to allocate space for CPU state on stack
+ * r0-r3,ip,lr corrupted
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(tegra_cpu_suspend)
+#if USE_TEGRA_CPU_SUSPEND
+ stmfd sp!, {r3}
+ stmfd sp!, {r4 - r11}
+ mov r9, lr
+#ifdef MULTI_CPU
+ mov32 r10, processor
+ mov r2, sp @ current virtual SP
+ ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
+ ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
+ sub sp, sp, r0 @ allocate CPU state on stack
+ mov r0, sp @ save pointer
+ add ip, ip, r1 @ convert resume fn to phys
+ stmfd sp!, {r1, r2, ip} @ save v:p, virt SP, phys resume fn
+ mov lr, pc
+ ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
+#else
+ mov r2, sp @ current virtual SP
+ mov32 r0, cpu_suspend_size
+ sub sp, sp, r0 @ allocate CPU state on stack
+ mov r0, sp @ save pointer
+ stmfd sp!, {r1, r2, ip} @ save v:p, virt SP, phys resume fn
+ bl cpu_do_suspend
+#endif
+ dsb
+
+ /* Disable the data cache */
+ mrc p15, 0, r10, c1, c0, 0
+ bic r10, r10, #CR_C
+ dsb
+ mcr p15, 0, r10, c1, c0, 0
+ isb
+
+ /* Flush data cache */
+#ifdef MULTI_CACHE
+ mov32 r10, cpu_cache
+ mov lr, pc
+ ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
+#else
+ bl __cpuc_flush_kern_all
+#endif
+#ifdef CONFIG_CACHE_L2X0
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ cpu_id r2
+ cmp r2, #0
+ bne no_l2_sync
+#endif
+ /* Issue a PL310 cache sync operation */
+ dsb
+ mov32 r2, TEGRA_PL310_VIRT
+ movw r1, 0x730 @ cache sync
+ add r2, r2, r1
+ mov r1, #0
+ str r1, [r2]
+#endif
+
+no_l2_sync:
+ /* Invalidate the TLBs & BTAC */
+ mov r1, #0
+ mcr p15, 0, r1, c8, c3, 0 @ invalidate shared TLBs
+ mcr p15, 0, r1, c7, c1, 6 @ invalidate shared BTAC
+ dsb
+ isb
+
+ /* Turn off SMP coherency */
+ exit_smp r1, r2
+
+ /* Convert SP from virtual to physical address. */
+ movw r1, #0xFFF
+ bic r2, sp, r1 @ VA & 0xFFFFF000
+ mcr p15, 0, r2, c7, c8, 0 @ V2PPRPC
+ mrc p15, 0, r2, c7, c4, 0 @ PAR
+ bic r2, r2, r1 @ PA & 0xFFFFF000
+ and r0, sp, r1 @ VA & 0x00000FFF
+ orr r2, r0, r2 @ (PA & 0xFFFFF000) | (VA & 0x00000FFF)
+
+ mov32 r3, tegra_phys_sleep_sp @ per-CPU phys SP save area
+
+#ifdef CONFIG_SMP
+ ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
+ ALT_UP(mov lr, #0)
+ and lr, lr, #15
+#else
+ mov lr, #0
+#endif
+
+ /* Save the normal PRRR value */
+ mrc p15, 0, r0, c10, c2, 0 @ PRRR
+
+ /* Override all remappings to strongly ordered */
+ mov r1, #0
+ mcr p15, 0, r1, c10, c2, 0 @ PRRR
+ mcr p15, 0, r1, c8, c7, 0 @ invalidate local TLBs
+ dsb
+ isb
+
+ /* Save the physical stack pointer */
+ str r2, [r3, lr, lsl #2] @ save phys SP
+
+ /* Restore the regular remappings */
+ mcr p15, 0, r0, c10, c2, 0 @ PRRR
+ mcr p15, 0, r1, c8, c7, 0 @ invalidate local TLBs
+ dsb
+ isb
+
+ mov pc, r9
+#else
+ /* Use the standard cpu_suspend. */
+ mov r8, lr
+ bl cpu_suspend
+ exit_smp r0, r2
+ mov pc, r8
+#endif
+ENDPROC(tegra_cpu_suspend)
+
+/*
+ * tegra_cpu_save
+ *
+ * Input:
+ * r0 = v:p offset
+ * r12 = return to the caller of this function
+ * lr = resume address
+ * Output:
+ * r0 = v:p offset
+ * r7 = SP after saving the registers but before cpu_suspend, suitable
+ * for restoring an aborted suspend
+ * sp = SP after tegra_cpu_suspend (the 'real' SP)
+ * Saves r4-r11 on the stack
+ * Corrupts r1, r3-r10
+ */
+
+ENTRY(tegra_cpu_save)
+ push_ctx_regs r1 @ save context registers
+
+ adr r3, tegra_cpu_resume
+
+ mov r7, sp @ SP after reg save, before suspend
+
+#if USE_TEGRA_CPU_SUSPEND
+ cpu_id r4
+ mov32 r5, tegra_cpu_context @ address of non-cacheable context page
+ ldr r5, [r5] @ non-cacheable context save area
+ mov r6, #0x400 @ size of one CPU context stack area
+ add r4, r4, #1
+ smlabb sp, r6, r4, r5 @ context area for this CPU
+ push_stack_token r4 @ debug check word
+ stmfd sp!, {r7} @ save the real stack pointer
+ push_stack_token r4 @ debug check word
+#endif
+
+ mov r4, r12
+ mov r5, r0
+ mov r6, r2
+ mov r1, r0
+ bl tegra_cpu_suspend
+ mov r0, r5
+ mov r2, r6
+ mov pc, r4
+ENDPROC(tegra_cpu_save)
+
+/*
+ * tegra_sleep_cpu_save(unsigned long v2p)
+ *
+ * enters suspend in LP2 by turning off the mmu and jumping to
+ * tegra?_tear_down_cpu
+ */
+ENTRY(tegra_sleep_cpu_save)
+ mov r12, pc @ return here is via r12
+ b tegra_cpu_save
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ mov32 r1, tegra2_tear_down_cpu
+#else
+ mov32 r1, tegra3_tear_down_cpu
+#endif
+ add r1, r1, r0
+ b tegra_turn_off_mmu
+ENDPROC(tegra_sleep_cpu_save)
+
+/*
+ * tegra_cpu_resume
+ *
+ * reloads the volatile CPU state from the context area
+ * initializes the processor mode stacks
+ * the mmu should be on and the CPU should be coherent before this is called
+ */
+ .align L1_CACHE_SHIFT
+tegra_cpu_resume:
+ mov r0, #0
+ mcr p15, 0, r0, c8, c3, 0 @ invalidate TLB
+ mcr p15, 0, r0, c7, c5, 6 @ flush BTAC
+ mcr p15, 0, r0, c7, c5, 0 @ flush instruction cache
+ dsb
+ isb
+
+#if USE_TEGRA_CPU_SUSPEND
+ pop_stack_token r4, r5 @ check stack debug token
+ ldmfd sp!, {r0} @ get the real stack pointer
+ pop_stack_token r4, r5 @ check stack debug token
+ mov sp, r0 @ switch to the real stack pointer
+#endif
+
+ bl cpu_init
+
+ pop_ctx_regs r1, r2 @ restore context registers
+ mov pc, lr
+
+/*
+ * tegra_turn_off_mmu
+ *
+ * r0 = v2p
+ * r1 = physical address to jump to with mmu off
+ */
+ENTRY(tegra_turn_off_mmu)
+ mov32 r3, tegra_shut_off_mmu
+ add r3, r3, r0
+ mov r0, r1
+ mov pc, r3
+ENDPROC(tegra_turn_off_mmu)
+
+tegra_pgd_phys_address:
+ .word tegra_pgd_phys
+
+/*
+ * tegra_shut_off_mmu
+ *
+ * r0 = physical address to jump to with mmu off
+ *
+ * called with VA=PA mapping
+ * turns off MMU, icache, dcache and branch prediction
+ */
+ .align L1_CACHE_SHIFT
+tegra_shut_off_mmu:
+ mrc p15, 0, r3, c1, c0, 0
+ movw r2, #CR_I | CR_Z | CR_C | CR_M
+ bic r3, r3, r2
+ dsb
+ mcr p15, 0, r3, c1, c0, 0
+ isb
+ mov pc, r0
+
+/*
+ * tegra_cpu_clk32k
+ *
+ * In LP2 the normal cpu clock pllx will be turned off. Switch the CPU to pllp
+ */
+ENTRY(tegra_cpu_pllp)
+ /* in LP2 idle (SDRAM active), set the CPU burst policy to PLLP */
+ mov32 r5, TEGRA_CLK_RESET_BASE
+ mov r0, #(2 << 28) @ burst policy = run mode
+ orr r0, r0, #(4 << 4) @ use PLLP in run mode burst
+ str r0, [r5, #CLK_RESET_CCLK_BURST]
+ mov r0, #0
+ str r0, [r5, #CLK_RESET_CCLK_DIVIDER]
+ mov pc, lr
+ENDPROC(tegra_cpu_pllp)
+#endif
+
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+/*
+ * tegra_generic_smc
+ *
+ * r0 = smc type
+ * r1 = smc subtype
+ * r2 = argument passed to smc
+ *
+ * issues SMC (secure monitor call) instruction with
+ * the specified parameters.
+ */
+ENTRY(tegra_generic_smc)
+ adr r3, __tegra_smc_stack
+ stmia r3, {r4-r12, lr}
+ mov r3, #0
+ mov r4, #0
+ dsb
+ smc #0
+ adr r3, __tegra_smc_stack
+ ldmia r3, {r4-r12, pc}
+ENDPROC(tegra_generic_smc)
+ .type __tegra_smc_stack, %object
+__tegra_smc_stack:
+ .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .size __tegra_smc_stack, . - __tegra_smc_stack
+#endif
diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h
new file mode 100644
index 000000000000..7b8f84d61699
--- /dev/null
+++ b/arch/arm/mach-tegra/sleep.h
@@ -0,0 +1,246 @@
+/*
+ * arch/arm/mach-tegra/sleep.h
+ *
+ * Declarations for power state transition code
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_SLEEP_H
+#define __MACH_TEGRA_SLEEP_H
+
+#include <mach/iomap.h>
+
+#ifdef CONFIG_CACHE_L2X0
+#define USE_TEGRA_CPU_SUSPEND 1
+#else
+#define USE_TEGRA_CPU_SUSPEND 0
+#endif
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+/* FIXME: The code associated with this should be removed if our change to
+ save the diagnostic regsiter in the CPU context is accepted. */
+#define USE_TEGRA_DIAG_REG_SAVE 1
+#else
+#define USE_TEGRA_DIAG_REG_SAVE 0
+#endif
+
+#define TEGRA_POWER_SDRAM_SELFREFRESH (1 << 26) /* SDRAM is in self-refresh */
+#define TEGRA_POWER_HOTPLUG_SHUTDOWN (1 << 27) /* Hotplug shutdown */
+#define TEGRA_POWER_CLUSTER_G (1 << 28) /* G CPU */
+#define TEGRA_POWER_CLUSTER_LP (1 << 29) /* LP CPU */
+#define TEGRA_POWER_CLUSTER_MASK (TEGRA_POWER_CLUSTER_G | \
+ TEGRA_POWER_CLUSTER_LP)
+#define TEGRA_POWER_CLUSTER_IMMEDIATE (1 << 30) /* Immediate wake */
+#define TEGRA_POWER_CLUSTER_FORCE (1 << 31) /* Force switch */
+
+#define TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K)
+
+/* PMC_SCRATCH37-39 and 41 are used for tegra_pen_lock in Tegra2 idle */
+#define PMC_SCRATCH37 0x130
+#define PMC_SCRATCH38 0x134
+/* PMC_SCRATCH39 stores the reset vector of the AVP (always 0) after LP0 */
+#define PMC_SCRATCH39 0x138
+/* PMC_SCRATCH41 stores the reset vector of the CPU after LP0 and LP1 */
+#define PMC_SCRATCH41 0x140
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define CPU_RESETTABLE 2
+#define CPU_RESETTABLE_SOON 1
+#define CPU_NOT_RESETTABLE 0
+#endif
+
+#define FLOW_CTRL_HALT_CPU0_EVENTS 0x0
+#define FLOW_CTRL_WAITEVENT (2 << 29)
+#define FLOW_CTRL_WAIT_FOR_INTERRUPT (4 << 29)
+#define FLOW_CTRL_JTAG_RESUME (1 << 28)
+#define FLOW_CTRL_HALT_CPU_IRQ (1 << 10)
+#define FLOW_CTRL_HALT_CPU_FIQ (1 << 8)
+#define FLOW_CTRL_CPU0_CSR 0x8
+#define FLOW_CTRL_CSR_INTR_FLAG (1 << 15)
+#define FLOW_CTRL_CSR_EVENT_FLAG (1 << 14)
+#define FLOW_CTRL_CSR_ENABLE (1 << 0)
+#define FLOW_CTRL_HALT_CPU1_EVENTS 0x14
+#define FLOW_CTRL_CPU1_CSR 0x18
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define FLOW_CTRL_CSR_WFE_CPU0 (1 << 4)
+#define FLOW_CTRL_CSR_WFE_BITMAP (3 << 4)
+#define FLOW_CTRL_CSR_WFI_BITMAP 0
+#else
+#define FLOW_CTRL_CSR_WFE_BITMAP (0xF << 4)
+#define FLOW_CTRL_CSR_WFI_CPU0 (1 << 8)
+#define FLOW_CTRL_CSR_WFI_BITMAP (0xF << 8)
+#endif
+
+#define TEGRA_PL310_VIRT (TEGRA_ARM_PL310_BASE - IO_CPU_PHYS + IO_CPU_VIRT)
+#define TEGRA_FLOW_CTRL_VIRT (TEGRA_FLOW_CTRL_BASE - IO_PPSB_PHYS + IO_PPSB_VIRT)
+#define TEGRA_ARM_PERIF_VIRT (TEGRA_ARM_PERIF_BASE - IO_CPU_PHYS + IO_CPU_VIRT)
+
+#ifdef __ASSEMBLY__
+
+/* Macro to exit SMP coherency. */
+.macro exit_smp, tmp1, tmp2
+ mrc p15, 0, \tmp1, c1, c0, 1 @ ACTLR
+ bic \tmp1, \tmp1, #(1<<6) | (1<<0) @ clear ACTLR.SMP | ACTLR.FW
+ mcr p15, 0, \tmp1, c1, c0, 1 @ ACTLR
+ isb
+ cpu_id \tmp1
+ mov \tmp1, \tmp1, lsl #2
+ mov \tmp2, #0xf
+ mov \tmp2, \tmp2, lsl \tmp1
+ mov32 \tmp1, TEGRA_ARM_PERIF_VIRT + 0xC
+ str \tmp2, [\tmp1] @ invalidate SCU tags for CPU
+ dsb
+.endm
+
+#define DEBUG_CONTEXT_STACK 0
+
+/* pops a debug check token from the stack */
+.macro pop_stack_token tmp1, tmp2
+#if DEBUG_CONTEXT_STACK
+ mov32 \tmp1, 0xBAB1F00D
+ ldmfd sp!, {\tmp2}
+ cmp \tmp1, \tmp2
+ movne pc, #0
+#endif
+.endm
+
+/* pushes a debug check token onto the stack */
+.macro push_stack_token tmp1
+#if DEBUG_CONTEXT_STACK
+ mov32 \tmp1, 0xBAB1F00D
+ stmfd sp!, {\tmp1}
+#endif
+.endm
+
+.macro push_ctx_regs, tmp1
+ push_stack_token \tmp1 @ debug check word
+ stmfd sp!, {r4 - r11, lr}
+ /* Save the current TTB0 and CONTEXTID registers. */
+ mrc p15, 0, r5, c2, c0, 0 @ TTB 0
+ mrc p15, 0, r6, c13, c0, 1 @ CONTEXTID
+#if USE_TEGRA_DIAG_REG_SAVE
+ mrc p15, 0, r4, c15, c0, 1 @ read diagnostic register
+ stmfd sp!, {r4-r6}
+#else
+ stmfd sp!, {r5-r6}
+#endif
+ /* Switch to the tegra_pgd so that IRAM and the MMU shut-off code
+ will be flat mapped (VA==PA). We also do this because the common
+ ARM CPU state save/restore code doesn't support an external L2
+ cache controller. If the current PGD is left active, the common
+ ARM MMU restore may (and eventually will) damage the currently
+ running page tables by adding a temporary flat section mapping
+ that could be picked up by other CPUs from the L2 cache
+ resulting in a kernel panic. */
+ ldr r6, tegra_pgd_phys_address
+ ldr r6, [r6]
+ mov r7, #0
+ dsb
+ mcr p15, 0, r7, c13, c0, 1 @ CONTEXTID = reserved context
+ isb
+ mcr p15, 0, r6, c2, c0, 0 @ TTB 0
+ isb
+ mcr p15, 0, r7, c8, c3, 0 @ invalidate TLB
+ mcr p15, 0, r7, c7, c5, 6 @ flush BTAC
+ mcr p15, 0, r7, c7, c5, 0 @ flush instruction cache
+ dsb
+.endm
+
+.macro pop_ctx_regs, tmp1, tmp2
+#if USE_TEGRA_DIAG_REG_SAVE
+ ldmfd sp!, {r4-r6}
+ mcr p15, 0, r4, c15, c0, 1 @ write diagnostic register
+#else
+ ldmfd sp!, {r5-r6}
+#endif
+ dsb
+ mcr p15, 0, r5, c2, c0, 0 @ TTB 0
+ isb
+ mcr p15, 0, r6, c13, c0, 1 @ CONTEXTID = reserved context
+ isb
+ mov r7, #0
+ mcr p15, 0, r7, c8, c3, 0 @ invalidate TLB
+ mcr p15, 0, r7, c7, c5, 6 @ flush BTAC
+ mcr p15, 0, r7, c7, c5, 0 @ flush instruction cache
+ dsb
+ ldmfd sp!, {r4 - r11, lr}
+ pop_stack_token \tmp1, \tmp2 @ debug stack debug token
+.endm
+
+#else /* !defined(__ASSEMBLY__) */
+
+#define FLOW_CTRL_HALT_CPU(cpu) (IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + \
+ ((cpu) ? (FLOW_CTRL_HALT_CPU1_EVENTS + 8 * ((cpu) - 1)) : \
+ FLOW_CTRL_HALT_CPU0_EVENTS))
+
+#define FLOW_CTRL_CPU_CSR(cpu) (IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + \
+ ((cpu) ? (FLOW_CTRL_CPU1_CSR + 8 * ((cpu) - 1)) : \
+ FLOW_CTRL_CPU0_CSR))
+
+static inline void flowctrl_writel(unsigned long val, void __iomem *addr)
+{
+ writel(val, addr);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ wmb();
+#endif
+ (void)__raw_readl(addr);
+}
+
+void tegra_pen_lock(void);
+void tegra_pen_unlock(void);
+void tegra_cpu_wfi(void);
+void tegra_sleep_cpu_save(unsigned long v2p);
+void tegra_resume(void);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+extern void tegra2_iram_start;
+extern void tegra2_iram_end;
+int tegra2_cpu_is_resettable_soon(void);
+void tegra2_cpu_reset(int cpu);
+void tegra2_cpu_set_resettable_soon(void);
+void tegra2_cpu_clear_resettable(void);
+void tegra2_sleep_core(unsigned long v2p);
+void tegra2_hotplug_shutdown(void);
+void tegra2_sleep_wfi(unsigned long v2p);
+#else
+extern void tegra3_iram_start;
+extern void tegra3_iram_end;
+void tegra3_sleep_core(unsigned long v2p);
+void tegra3_sleep_cpu_secondary(unsigned long v2p);
+void tegra3_hotplug_shutdown(void);
+#endif
+
+static inline void *tegra_iram_start(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ return &tegra2_iram_start;
+#else
+ return &tegra3_iram_start;
+#endif
+}
+
+static inline void *tegra_iram_end(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ return &tegra2_iram_end;
+#else
+ return &tegra3_iram_end;
+#endif
+}
+#endif
+#endif
diff --git a/arch/arm/mach-tegra/syncpt.c b/arch/arm/mach-tegra/syncpt.c
new file mode 100644
index 000000000000..8ebab3801a8a
--- /dev/null
+++ b/arch/arm/mach-tegra/syncpt.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * Copyright (C) 2010, NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/mach/irq.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#define HOST1X_SYNC_OFFSET 0x3000
+#define HOST1X_SYNC_SIZE 0x800
+enum {
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS = 0x40,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE = 0x60
+};
+
+static void syncpt_thresh_mask(struct irq_data *data)
+{
+ (void)data;
+}
+
+static void syncpt_thresh_unmask(struct irq_data *data)
+{
+ (void)data;
+}
+
+static void syncpt_thresh_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *sync_regs = irq_desc_get_handler_data(desc);
+ unsigned long reg;
+ int id;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ reg = readl(sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ for_each_set_bit(id, &reg, 32)
+ generic_handle_irq(id + INT_SYNCPT_THRESH_BASE);
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip syncpt_thresh_irq = {
+ .name = "syncpt",
+ .irq_mask = syncpt_thresh_mask,
+ .irq_unmask = syncpt_thresh_unmask
+};
+
+static int __init syncpt_init_irq(void)
+{
+ void __iomem *sync_regs;
+ unsigned int i;
+ int irq;
+
+ sync_regs = ioremap(TEGRA_HOST1X_BASE + HOST1X_SYNC_OFFSET,
+ HOST1X_SYNC_SIZE);
+ BUG_ON(!sync_regs);
+
+ writel(0xffffffffUL,
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+ writel(0xffffffffUL,
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ for (i = 0; i < INT_SYNCPT_THRESH_NR; i++) {
+ irq = INT_SYNCPT_THRESH_BASE + i;
+ irq_set_chip_and_handler(irq, &syncpt_thresh_irq,
+ handle_simple_irq);
+ irq_set_chip_data(irq, sync_regs);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ irq_set_chained_handler(INT_HOST1X_MPCORE_SYNCPT,
+ syncpt_thresh_cascade);
+ irq_set_handler_data(INT_HOST1X_MPCORE_SYNCPT, sync_regs);
+
+ return 0;
+}
+
+core_initcall(syncpt_init_irq);
diff --git a/arch/arm/mach-tegra/sysfs-cluster.c b/arch/arm/mach-tegra/sysfs-cluster.c
new file mode 100644
index 000000000000..49c3abcf32b9
--- /dev/null
+++ b/arch/arm/mach-tegra/sysfs-cluster.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2010-2011 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This driver creates the /sys/kernel/cluster node and attributes for CPU
+ * switch testing. Node attributes:
+ *
+ * active: currently active CPU (G or LP)
+ * write: 'g' = switch to G CPU
+ * 'lp' = switch to LP CPU
+ * 'toggle' = switch to the other CPU
+ * read: returns the currently active CPU (g or lp)
+ *
+ * force: force switch even if already on target CPU
+ * write: '0' = do not perform switch if
+ * active CPU == target CPU (default)
+ * '1' = force switch regardless of
+ * currently active CPU
+ * read: returns the current status of the force flag
+ *
+ * immediate: request immediate wake-up from switch request
+ * write: '0' = non-immediate wake-up on next interrupt (default)
+ * '1' = immediate wake-up
+ * read: returns the current status of the immediate flag
+ *
+ * power_mode: power mode to use for switch (LP1 or LP2)
+ * write: '1' = use LP1 power mode
+ * '2' = use LP2 power mode (default)
+ * read: returns the current status of the immediate flag
+ *
+ * wake_ms: wake time (in milliseconds) -- ignored if immediate==1
+ * write: '0' = wake up at the next non-timer interrupt
+ * 'n' = (n > 0) wake-up after 'n' milliseconds or the
+ * next non-timer interrupt (whichever comes first)
+ * read: returns the current wake_ms value
+ *
+ * Writing the force, immediate and wake_ms attributes simply updates the
+ * state of internal variables that will be used for the next switch request.
+ * Writing to the active attribute initates a switch request using the
+ * current values of the force, immediate, and wake_ms attributes.
+ *
+ * The OS tick timer is not a valid interrupt source for waking up following
+ * a switch request. This is because the kernel uses local timers that are
+ * part of the CPU complex. These get shut down when the CPU complex is
+ * placed into reset by the switch request. If you want a timed wake up
+ * from a switch, you must specify a positive wake_ms value. This will
+ * ensure that a non-local timer is programmed to fire an interrupt
+ * after the desired interval.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <mach/iomap.h>
+#include "clock.h"
+#include "sleep.h"
+#include "pm.h"
+
+#define SYSFS_CLUSTER_PRINTS 1 /* Nonzero: enable status prints */
+#define SYSFS_CLUSTER_TRACE_PRINTS 0 /* Nonzero: enable trace prints */
+#define SYSFS_CLUSTER_POWER_MODE 1 /* Nonzero: use power modes other than LP2*/
+
+#if SYSFS_CLUSTER_TRACE_PRINTS
+#define TRACE_CLUSTER(x) printk x
+#else
+#define TRACE_CLUSTER(x)
+#endif
+
+#if SYSFS_CLUSTER_PRINTS
+#define PRINT_CLUSTER(x) printk x
+#else
+#define PRINT_CLUSTER(x)
+#endif
+
+static struct kobject *cluster_kobj;
+static spinlock_t cluster_lock;
+static unsigned int flags = 0;
+static unsigned int wake_ms = 0;
+
+static ssize_t sysfscluster_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+
+static ssize_t sysfscluster_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count);
+
+/* Active CPU: "G", "LP", "toggle" */
+static struct kobj_attribute cluster_active_attr =
+ __ATTR(active, 0640, sysfscluster_show, sysfscluster_store);
+
+/* Immediate wake-up when performing switch: 0, 1 */
+static struct kobj_attribute cluster_immediate_attr =
+ __ATTR(immediate, 0640, sysfscluster_show, sysfscluster_store);
+
+/* Force power transition even if already on the desired CPU: 0, 1 */
+static struct kobj_attribute cluster_force_attr =
+ __ATTR(force, 0640, sysfscluster_show, sysfscluster_store);
+
+/* Wake time (in milliseconds) */
+static struct kobj_attribute cluster_wake_ms_attr =
+ __ATTR(wake_ms, 0640, sysfscluster_show, sysfscluster_store);
+
+#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
+/* LPx power mode to use when switching CPUs: 1=LP1, 2=LP2 */
+static unsigned int power_mode = 2;
+static struct kobj_attribute cluster_powermode_attr =
+ __ATTR(power_mode, 0640, sysfscluster_show, sysfscluster_store);
+#endif
+
+#if DEBUG_CLUSTER_SWITCH
+unsigned int tegra_cluster_debug = 0;
+static struct kobj_attribute cluster_debug_attr =
+ __ATTR(debug, 0640, sysfscluster_show, sysfscluster_store);
+#endif
+
+typedef enum
+{
+ ClusterAttr_Invalid = 0,
+ ClusterAttr_Active,
+ ClusterAttr_Immediate,
+ ClusterAttr_Force,
+ ClusterAttr_WakeMs,
+#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
+ ClusterAttr_PowerMode,
+#endif
+#if DEBUG_CLUSTER_SWITCH
+ ClusterAttr_Debug
+#endif
+} ClusterAttr;
+
+static ClusterAttr GetClusterAttr(const char *name)
+{
+ if (!strcmp(name, "active"))
+ return ClusterAttr_Active;
+ if (!strcmp(name, "immediate"))
+ return ClusterAttr_Immediate;
+ if (!strcmp(name, "force"))
+ return ClusterAttr_Force;
+ if (!strcmp(name, "wake_ms"))
+ return ClusterAttr_WakeMs;
+#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
+ if (!strcmp(name, "power_mode"))
+ return ClusterAttr_PowerMode;
+#endif
+#if DEBUG_CLUSTER_SWITCH
+ if (!strcmp(name, "debug"))
+ return ClusterAttr_Debug;
+#endif
+ TRACE_CLUSTER(("GetClusterAttr(%s): invalid\n", name));
+ return ClusterAttr_Invalid;
+}
+
+static ssize_t sysfscluster_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ClusterAttr type;
+ ssize_t len;
+
+ TRACE_CLUSTER(("+sysfscluster_show\n"));
+
+ type = GetClusterAttr(attr->attr.name);
+ switch (type) {
+ case ClusterAttr_Active:
+ len = sprintf(buf, "%s\n", is_lp_cluster() ? "LP" : "G");
+ break;
+
+ case ClusterAttr_Immediate:
+ len = sprintf(buf, "%d\n",
+ ((flags & TEGRA_POWER_CLUSTER_IMMEDIATE) != 0));
+ break;
+
+ case ClusterAttr_Force:
+ len = sprintf(buf, "%d\n",
+ ((flags & TEGRA_POWER_CLUSTER_FORCE) != 0));
+ break;
+
+ case ClusterAttr_WakeMs:
+ len = sprintf(buf, "%d\n", wake_ms);
+ break;
+
+#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
+ case ClusterAttr_PowerMode:
+ len = sprintf(buf, "%d\n", power_mode);
+ break;
+#endif
+
+#if DEBUG_CLUSTER_SWITCH
+ case ClusterAttr_Debug:
+ len = sprintf(buf, "%d\n", tegra_cluster_debug);
+ break;
+#endif
+
+ default:
+ len = sprintf(buf, "invalid\n");
+ break;
+ }
+
+ TRACE_CLUSTER(("-sysfscluster_show\n"));
+ return len;
+}
+
+static ssize_t sysfscluster_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ ClusterAttr type;
+ ssize_t ret = count--;
+ unsigned request;
+ int e;
+ int tmp;
+ int cnt;
+ struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
+ struct clk *cpu_g_clk = tegra_get_clock_by_name("cpu_g");
+ struct clk *cpu_lp_clk = tegra_get_clock_by_name("cpu_lp");
+ struct clk *new_parent = NULL;
+
+ if (!cpu_clk || !cpu_g_clk || !cpu_lp_clk) {
+ ret = -ENOSYS;
+ goto fail;
+ }
+
+ TRACE_CLUSTER(("+sysfscluster_store: %p, %d\n", buf, count));
+
+ /* The count includes data bytes follow by a line feed character. */
+ if (!buf || (count < 1)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ type = GetClusterAttr(attr->attr.name);
+
+ spin_lock(&cluster_lock);
+
+ switch (type) {
+ case ClusterAttr_Active:
+ if (!strncasecmp(buf, "g", count)) {
+ flags &= ~TEGRA_POWER_CLUSTER_MASK;
+ flags |= TEGRA_POWER_CLUSTER_G;
+ } else if (!strncasecmp(buf, "lp", count)) {
+ flags &= ~TEGRA_POWER_CLUSTER_MASK;
+ flags |= TEGRA_POWER_CLUSTER_LP;
+ } else if (!strncasecmp(buf, "toggle", count)) {
+ flags &= ~TEGRA_POWER_CLUSTER_MASK;
+ if (is_lp_cluster())
+ flags |= TEGRA_POWER_CLUSTER_G;
+ else
+ flags |= TEGRA_POWER_CLUSTER_LP;
+ } else {
+ PRINT_CLUSTER(("cluster/active: '%*.*s' invalid, "
+ " must be g, lp, or toggle\n",
+ count, count, buf));
+ ret = -EINVAL;
+ break;
+ }
+ PRINT_CLUSTER(("cluster/active -> %s\n",
+ (flags & TEGRA_POWER_CLUSTER_G) ? "G" : "LP"));
+
+ request = flags;
+#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
+ if (power_mode == 1) {
+ request |= TEGRA_POWER_SDRAM_SELFREFRESH;
+ }
+#endif
+ tegra_cluster_switch_set_parameters(wake_ms * 1000, request);
+ new_parent = (flags & TEGRA_POWER_CLUSTER_LP) ?
+ cpu_lp_clk : cpu_g_clk;
+ break;
+
+ case ClusterAttr_Immediate:
+ if ((count == 1) && (*buf == '0'))
+ flags &= ~TEGRA_POWER_CLUSTER_IMMEDIATE;
+ else if ((count == 1) && *buf == '1')
+ flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
+ else {
+ PRINT_CLUSTER(("cluster/immediate: '%*.*s' invalid, "
+ "must be 0 or 1\n", count, count, buf));
+ ret = -EINVAL;
+ break;
+ }
+ PRINT_CLUSTER(("cluster/immediate -> %c\n",
+ (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? '1' : '0'));
+ break;
+
+ case ClusterAttr_Force:
+ if ((count == 1) && (*buf == '0'))
+ flags &= ~TEGRA_POWER_CLUSTER_FORCE;
+ else if ((count == 1) && (*buf == '1'))
+ flags |= TEGRA_POWER_CLUSTER_FORCE;
+ else {
+ PRINT_CLUSTER(("cluster/force: '%*.*s' invalid, "
+ "must be 0 or 1\n", count, count, buf));
+ ret = -EINVAL;
+ break;
+ }
+ PRINT_CLUSTER(("cluster/force -> %c\n",
+ (flags & TEGRA_POWER_CLUSTER_FORCE) ? '1' : '0'));
+ break;
+
+ case ClusterAttr_WakeMs:
+ tmp = 0;
+ cnt = sscanf(buf, "%d\n", &tmp);
+ if ((cnt != 1) || (tmp < 0)) {
+ PRINT_CLUSTER(("cluster/wake_ms: '%*.*s' is invalid\n",
+ count, count, buf));
+ ret = -EINVAL;
+ break;
+ }
+ wake_ms = tmp;
+ PRINT_CLUSTER(("cluster/wake_ms -> %d\n", wake_ms));
+ break;
+
+#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
+ case ClusterAttr_PowerMode:
+ if ((count == 1) && (*buf == '2'))
+ power_mode = 2;
+ else if ((count == 1) && *buf == '1')
+ power_mode = 1;
+ else {
+ PRINT_CLUSTER(("cluster/power_mode: '%*.*s' invalid, "
+ "must be 2 or 1\n", count, count, buf));
+ ret = -EINVAL;
+ break;
+ }
+ PRINT_CLUSTER(("cluster/power_mode -> %d\n", power_mode));
+ break;
+#endif
+
+#if DEBUG_CLUSTER_SWITCH
+ case ClusterAttr_Debug:
+ if ((count == 1) && (*buf == '0'))
+ tegra_cluster_debug = 0;
+ else if ((count == 1) && (*buf == '1'))
+ tegra_cluster_debug = 1;
+ else {
+ PRINT_CLUSTER(("cluster/debug: '%*.*s' invalid, "
+ "must be 0 or 1\n", count, count, buf));
+ ret = -EINVAL;
+ break;
+ }
+ PRINT_CLUSTER(("cluster/debug -> %d\n",tegra_cluster_debug));
+ break;
+#endif
+
+ default:
+ ret = -ENOENT;
+ break;
+ }
+
+ spin_unlock(&cluster_lock);
+
+ if (new_parent) {
+ e = clk_set_parent(cpu_clk, new_parent);
+ if (e) {
+ PRINT_CLUSTER(("cluster/active: request failed (%d)\n",
+ e));
+ ret = e;
+ }
+ }
+fail:
+ TRACE_CLUSTER(("-sysfscluster_store: %d\n", count));
+ return ret;
+}
+
+#define CREATE_FILE(x) \
+ do { \
+ e = sysfs_create_file(cluster_kobj, &cluster_##x##_attr.attr); \
+ if (e) { \
+ TRACE_CLUSTER(("cluster/" __stringify(x) \
+ ": sysfs_create_file failed!\n")); \
+ goto fail; \
+ } \
+ } while (0)
+
+static int __init sysfscluster_init(void)
+{
+ int e;
+
+ TRACE_CLUSTER(("+sysfscluster_init\n"));
+
+ spin_lock_init(&cluster_lock);
+ cluster_kobj = kobject_create_and_add("cluster", kernel_kobj);
+
+ CREATE_FILE(active);
+ CREATE_FILE(immediate);
+ CREATE_FILE(force);
+ CREATE_FILE(wake_ms);
+#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
+ CREATE_FILE(powermode);
+#endif
+#if DEBUG_CLUSTER_SWITCH
+ CREATE_FILE(debug);
+#endif
+
+ spin_lock(&cluster_lock);
+ if (is_lp_cluster())
+ flags |= TEGRA_POWER_CLUSTER_LP;
+ else
+ flags |= TEGRA_POWER_CLUSTER_G;
+ spin_unlock(&cluster_lock);
+
+fail:
+ TRACE_CLUSTER(("-sysfscluster_init\n"));
+ return e;
+}
+
+#define REMOVE_FILE(x) \
+ sysfs_remove_file(cluster_kobj, &cluster_##x##_attr.attr)
+
+static void __exit sysfscluster_exit(void)
+{
+ TRACE_CLUSTER(("+sysfscluster_exit\n"));
+#if DEBUG_CLUSTER_SWITCH
+ REMOVE_FILE(debug);
+#endif
+#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
+ REMOVE_FILE(powermode);
+#endif
+ REMOVE_FILE(wake_ms);
+ REMOVE_FILE(force);
+ REMOVE_FILE(immediate);
+ REMOVE_FILE(active);
+ kobject_del(cluster_kobj);
+ TRACE_CLUSTER(("-sysfscluster_exit\n"));
+}
+
+module_init(sysfscluster_init);
+module_exit(sysfscluster_exit);
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-tegra/sysfs-dcc.c b/arch/arm/mach-tegra/sysfs-dcc.c
new file mode 100644
index 000000000000..a4dc9a721354
--- /dev/null
+++ b/arch/arm/mach-tegra/sysfs-dcc.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2010-2011 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+
+#define DCC_TIMEOUT_US 100000 /* Delay time for DCC timeout (in uS) */
+#define CP14_DSCR_WDTRFULL 0x20000000 /* Write Data Transfer Register Full */
+#define SYSFS_DCC_DEBUG_PRINTS 0 /* Set non-zero to enable debug prints */
+
+#if SYSFS_DCC_DEBUG_PRINTS
+#define DEBUG_DCC(x) printk x
+#else
+#define DEBUG_DCC(x)
+#endif
+
+static int DebuggerConnected = 0; /* -1=not connected, 0=unknown, 1=connected */
+static struct kobject *nvdcc_kobj;
+static spinlock_t dcc_lock;
+static struct list_head dcc_list;
+
+static ssize_t sysfsdcc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+
+static ssize_t sysfsdcc_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count);
+
+
+static struct kobj_attribute nvdcc_attr =
+ __ATTR(dcc0, 0222, sysfsdcc_show, sysfsdcc_store);
+
+static int write_to_dcc(u32 c)
+{
+ volatile u32 dscr;
+
+ /* Have we already determined that there is no debugger connected? */
+ if (DebuggerConnected < 0)
+ {
+ return -ENXIO;
+ }
+
+ /* Read the DSCR. */
+ asm volatile ("mrc p14, 0, %0, c0, c1, 0" : "=r" (dscr) : : "cc");
+
+ /* If DSCR Bit 29 (wDTRFull) is set there is data in the write
+ * register. If it stays there for than the DCC_TIMEOUT_US
+ * period, ignore this write and disable further DCC accesses. */
+ if (dscr & CP14_DSCR_WDTRFULL)
+ {
+ ktime_t end = ktime_add_ns(ktime_get(), DCC_TIMEOUT_US * 1000);
+ ktime_t now;
+
+ for (;;)
+ {
+ /* Re-read the DSCR. */
+ asm volatile ("mrc p14, 0, %0, c0, c1, 0" : "=r" (dscr) : : "cc");
+
+ /* Previous data still there? */
+ if (dscr & CP14_DSCR_WDTRFULL)
+ {
+ now = ktime_get();
+
+ if (ktime_to_ns(now) >= ktime_to_ns(end))
+ {
+ goto fail;
+ }
+ }
+ else
+ {
+ if (DebuggerConnected == 0) {
+ /* Debugger connected */
+ spin_lock(&dcc_lock);
+ DebuggerConnected = 1;
+ spin_unlock(&dcc_lock);
+ }
+ break;
+ }
+ }
+ }
+
+ // Write the data into the DCC output register
+ asm volatile ("mcr p14, 0, %0, c0, c5, 0" : : "r" (c) : "cc");
+ return 0;
+
+fail:
+ /* No debugged connected -- disable DCC */
+ spin_lock(&dcc_lock);
+ DebuggerConnected = -1;
+ spin_unlock(&dcc_lock);
+ return -ENXIO;
+}
+
+
+struct tegra_dcc_req {
+ struct list_head node;
+
+ const char *pBuf;
+ unsigned int size;
+};
+
+struct dcc_action {
+ struct tegra_dcc_req req;
+ struct work_struct work;
+ struct list_head node;
+};
+
+
+static void dcc_writer(struct work_struct *work)
+{
+ struct dcc_action *action = container_of(work, struct dcc_action, work);
+ const char *p;
+
+ DEBUG_DCC(("+dcc_writer\n"));
+
+ spin_lock(&dcc_lock);
+ list_del(&action->req.node);
+ spin_unlock(&dcc_lock);
+
+ p = action->req.pBuf;
+ if (p)
+ while ((p < &(action->req.pBuf[action->req.size])) && (*p))
+ if (write_to_dcc(*p++))
+ break;
+
+ kfree(action->req.pBuf);
+ kfree(action);
+
+ DEBUG_DCC(("-dcc_writer\n"));
+}
+
+static ssize_t sysfsdcc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ DEBUG_DCC(("!sysfsdcc_show\n"));
+ return -EACCES;
+}
+
+static ssize_t sysfsdcc_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct dcc_action *action;
+ char *pBuf;
+ ssize_t ret = count;
+
+ DEBUG_DCC(("+sysfsdcc_store: %p, %d\n", buf, count));
+
+ if (!buf || !count) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ pBuf = kmalloc(count+1, GFP_KERNEL);
+ if (!pBuf) {
+ pr_debug("%s: insufficient memory\n", __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ action = kzalloc(sizeof(*action), GFP_KERNEL);
+ if (!action) {
+ kfree(pBuf);
+ pr_debug("%s: insufficient memory\n", __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ strncpy(pBuf, buf, count);
+ pBuf[count] = '\0';
+ action->req.pBuf = pBuf;
+ action->req.size = count;
+
+ INIT_WORK(&action->work, dcc_writer);
+
+ spin_lock(&dcc_lock);
+ list_add_tail(&action->req.node, &dcc_list);
+ spin_unlock(&dcc_lock);
+
+ /* DCC writes can only be performed from CPU0 */
+ schedule_work_on(0, &action->work);
+
+fail:
+ DEBUG_DCC(("-sysfsdcc_store: %d\n", count));
+ return ret;
+}
+
+static int __init sysfsdcc_init(void)
+{
+ spin_lock_init(&dcc_lock);
+ INIT_LIST_HEAD(&dcc_list);
+
+ DEBUG_DCC(("+sysfsdcc_init\n"));
+ nvdcc_kobj = kobject_create_and_add("dcc", kernel_kobj);
+
+ if (sysfs_create_file(nvdcc_kobj, &nvdcc_attr.attr))
+ {
+ DEBUG_DCC(("DCC: sysfs_create_file failed!\n"));
+ return -ENXIO;
+ }
+
+ DEBUG_DCC(("-sysfsdcc_init\n"));
+ return 0;
+}
+
+static void __exit sysfsdcc_exit(void)
+{
+ DEBUG_DCC(("+sysfsdcc_exit\n"));
+ sysfs_remove_file(nvdcc_kobj, &nvdcc_attr.attr);
+ kobject_del(nvdcc_kobj);
+ DEBUG_DCC(("-sysfsdcc_exit\n"));
+}
+
+module_init(sysfsdcc_init);
+module_exit(sysfsdcc_exit);
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index 0fe9b3ee2947..3457d951f6c7 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -6,6 +6,8 @@
* Author:
* Colin Cross <ccross@google.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -25,13 +27,16 @@
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
+#include <linux/syscore_ops.h>
+#include <linux/cpufreq.h>
#include <mach/iomap.h>
-#include <mach/suspend.h>
+#include <mach/pinmux.h>
#include "clock.h"
#include "fuse.h"
#include "tegra2_emc.h"
+#include "tegra2_statmon.h"
#define RST_DEVICES 0x004
#define RST_DEVICES_SET 0x300
@@ -149,8 +154,17 @@
#define PMC_BLINK_TIMER_DATA_OFF_SHIFT 16
#define PMC_BLINK_TIMER_DATA_OFF_MASK 0xffff
+#define AP25_EMC_BRIDGE_RATE 380000000
+#define AP25_EMC_INTERMEDIATE_RATE 760000000
+#define AP25_EMC_SCALING_STEP 600000000
+
static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+static void __iomem *misc_gp_hidrev_base = IO_ADDRESS(TEGRA_APB_MISC_BASE);
+
+#define MISC_GP_HIDREV 0x804
+
+static int tegra2_clk_shared_bus_update(struct clk *bus);
/*
* Some clocks share a register with other clocks. Any clock op that
@@ -173,6 +187,8 @@ static int tegra_periph_clk_enable_refcount[3 * 32];
__raw_writel(value, (u32)reg_pmc_base + (reg))
#define pmc_readl(reg) \
__raw_readl((u32)reg_pmc_base + (reg))
+#define chipid_readl() \
+ __raw_readl((u32)misc_gp_hidrev_base + MISC_GP_HIDREV)
unsigned long clk_measure_input_freq(void)
{
@@ -221,12 +237,17 @@ static int clk_div16_get_divider(unsigned long parent_rate, unsigned long rate)
if (divider_u16 - 1 < 0)
return 0;
- if (divider_u16 - 1 > 255)
+ if (divider_u16 - 1 > 0xFFFF)
return -EINVAL;
return divider_u16 - 1;
}
+static inline int clk_set_div(struct clk *c, int n)
+{
+ return clk_set_rate(c, (clk_get_rate(c->parent) + n-1) / n);
+}
+
/* clk_m functions */
static unsigned long tegra2_clk_m_autodetect_rate(struct clk *c)
{
@@ -278,18 +299,6 @@ static struct clk_ops tegra_clk_m_ops = {
.disable = tegra2_clk_m_disable,
};
-void tegra2_periph_reset_assert(struct clk *c)
-{
- BUG_ON(!c->ops->reset);
- c->ops->reset(c, true);
-}
-
-void tegra2_periph_reset_deassert(struct clk *c)
-{
- BUG_ON(!c->ops->reset);
- c->ops->reset(c, false);
-}
-
/* super clock functions */
/* "super clocks" on tegra have two-stage muxes and a clock skipping
* super divider. We will ignore the clock skipping divider, since we
@@ -382,6 +391,29 @@ static struct clk_ops tegra_super_ops = {
.set_rate = tegra2_super_clk_set_rate,
};
+static int tegra2_twd_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ /* The input value 'rate' is the clock rate of the CPU complex. */
+ c->rate = (rate * c->mul) / c->div;
+ return 0;
+}
+
+static struct clk_ops tegra2_twd_ops = {
+ .set_rate = tegra2_twd_clk_set_rate,
+};
+
+static struct clk tegra2_clk_twd = {
+ /* NOTE: The twd clock must have *NO* parent. It's rate is directly
+ updated by tegra3_cpu_cmplx_clk_set_rate() because the
+ frequency change notifer for the twd is called in an
+ atomic context which cannot take a mutex. */
+ .name = "twd",
+ .ops = &tegra2_twd_ops,
+ .max_rate = 1000000000, /* Same as tegra_clk_virtual_cpu.max_rate */
+ .mul = 1,
+ .div = 4,
+};
+
/* virtual cpu clock functions */
/* some clocks can not be stopped (cpu, memory bus) while the SoC is running.
To change the frequency of these clocks, the parent pll may need to be
@@ -436,6 +468,12 @@ static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate)
goto out;
}
+ /* We can't parent the twd to directly to the CPU complex because
+ the TWD frequency update notifier is called in an atomic context
+ and the CPU frequency update requires a mutex. Update the twd
+ clock rate with the new CPU complex rate. */
+ clk_set_rate(&tegra2_clk_twd, clk_get_rate_locked(c));
+
out:
clk_disable(c->u.cpu.main);
return ret;
@@ -448,6 +486,55 @@ static struct clk_ops tegra_cpu_ops = {
.set_rate = tegra2_cpu_clk_set_rate,
};
+static void tegra2_virtual_sclk_init(struct clk *c)
+{
+ c->max_rate = c->parent->max_rate;
+ c->min_rate = c->parent->min_rate;
+}
+
+static long tegra2_virtual_sclk_round_rate(struct clk *c, unsigned long rate)
+{
+ long new_rate = rate;
+ return new_rate;
+}
+
+static int tegra2_virtual_sclk_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+
+ if (rate >= c->u.system.pclk->min_rate * 2) {
+ ret = clk_set_div(c->u.system.pclk, 2);
+ if (ret) {
+ pr_err("Failed to set 1 : 2 pclk divider\n");
+ return ret;
+ }
+ }
+
+ ret = clk_set_rate(c->parent, rate);
+ if (ret) {
+ pr_err("Failed to set sclk source %s to %lu\n",
+ c->parent->name, rate);
+ return ret;
+ }
+
+ if (rate < c->u.system.pclk->min_rate * 2) {
+ ret = clk_set_div(c->u.system.pclk, 1);
+ if (ret) {
+ pr_err("Failed to set 1 : 1 pclk divider\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct clk_ops tegra_virtual_sclk_ops = {
+ .init = tegra2_virtual_sclk_init,
+ .set_rate = tegra2_virtual_sclk_set_rate,
+ .round_rate = tegra2_virtual_sclk_round_rate,
+ .shared_bus_update = tegra2_clk_shared_bus_update,
+};
+
/* virtual cop clock functions. Used to acquire the fake 'cop' clock to
* reset the COP block (i.e. AVP) */
static void tegra2_cop_clk_reset(struct clk *c, bool assert)
@@ -513,7 +600,7 @@ static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate)
val = clk_readl(c->reg);
for (i = 1; i <= 4; i++) {
- if (rate == parent_rate / i) {
+ if (rate >= parent_rate / i) {
val &= ~(BUS_CLK_DIV_MASK << c->reg_shift);
val |= (i - 1) << c->reg_shift;
clk_writel(val, c->reg);
@@ -659,6 +746,12 @@ static int tegra2_pll_clk_enable(struct clk *c)
val |= PLL_BASE_ENABLE;
clk_writel(val, c->reg + PLL_BASE);
+ if (c->flags & PLLD) {
+ val = clk_readl(c->reg + PLL_MISC(c) + PLL_BASE);
+ val |= PLLD_MISC_CLKENABLE;
+ clk_writel(val, c->reg + PLL_MISC(c) + PLL_BASE);
+ }
+
tegra2_pll_clk_wait_for_lock(c);
return 0;
@@ -672,6 +765,12 @@ static void tegra2_pll_clk_disable(struct clk *c)
val = clk_readl(c->reg);
val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
clk_writel(val, c->reg);
+
+ if (c->flags & PLLD) {
+ val = clk_readl(c->reg + PLL_MISC(c) + PLL_BASE);
+ val &= ~PLLD_MISC_CLKENABLE;
+ clk_writel(val, c->reg + PLL_MISC(c) + PLL_BASE);
+ }
}
static int tegra2_pll_clk_set_rate(struct clk *c, unsigned long rate)
@@ -695,13 +794,25 @@ static int tegra2_pll_clk_set_rate(struct clk *c, unsigned long rate)
PLL_BASE_DIVM_MASK);
val |= (sel->m << PLL_BASE_DIVM_SHIFT) |
(sel->n << PLL_BASE_DIVN_SHIFT);
- BUG_ON(sel->p < 1 || sel->p > 2);
+ BUG_ON(sel->p < 1 || sel->p > 128);
if (c->flags & PLLU) {
if (sel->p == 1)
val |= PLLU_BASE_POST_DIV;
} else {
if (sel->p == 2)
val |= 1 << PLL_BASE_DIVP_SHIFT;
+ else if (sel->p == 4)
+ val |= 2 << PLL_BASE_DIVP_SHIFT;
+ else if (sel->p == 8)
+ val |= 3 << PLL_BASE_DIVP_SHIFT;
+ else if (sel->p == 16)
+ val |= 4 << PLL_BASE_DIVP_SHIFT;
+ else if (sel->p == 32)
+ val |= 5 << PLL_BASE_DIVP_SHIFT;
+ else if (sel->p == 64)
+ val |= 6 << PLL_BASE_DIVP_SHIFT;
+ else if (sel->p == 128)
+ val |= 7 << PLL_BASE_DIVP_SHIFT;
}
clk_writel(val, c->reg + PLL_BASE);
@@ -728,21 +839,6 @@ static struct clk_ops tegra_pll_ops = {
.set_rate = tegra2_pll_clk_set_rate,
};
-static void tegra2_pllx_clk_init(struct clk *c)
-{
- tegra2_pll_clk_init(c);
-
- if (tegra_sku_id() == 7)
- c->max_rate = 750000000;
-}
-
-static struct clk_ops tegra_pllx_ops = {
- .init = tegra2_pllx_clk_init,
- .enable = tegra2_pll_clk_enable,
- .disable = tegra2_pll_clk_disable,
- .set_rate = tegra2_pll_clk_set_rate,
-};
-
static int tegra2_plle_clk_enable(struct clk *c)
{
u32 val;
@@ -999,6 +1095,7 @@ out:
static void tegra2_periph_clk_disable(struct clk *c)
{
unsigned long flags;
+ unsigned long val;
pr_debug("%s on clock %s\n", __func__, c->name);
@@ -1010,9 +1107,16 @@ static void tegra2_periph_clk_disable(struct clk *c)
if (c->refcnt)
tegra_periph_clk_enable_refcount[c->u.periph.clk_num]--;
- if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0)
+ if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0) {
+ /* If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock*/
+ if (c->flags & PERIPH_ON_APB)
+ val = chipid_readl();
+
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
+ }
spin_unlock_irqrestore(&clock_register_lock, flags);
}
@@ -1020,15 +1124,23 @@ static void tegra2_periph_clk_disable(struct clk *c)
static void tegra2_periph_clk_reset(struct clk *c, bool assert)
{
unsigned long base = assert ? RST_DEVICES_SET : RST_DEVICES_CLR;
+ unsigned long val;
pr_debug("%s %s on clock %s\n", __func__,
assert ? "assert" : "deassert", c->name);
BUG_ON(!c->u.periph.clk_num);
- if (!(c->flags & PERIPH_NO_RESET))
+ if (!(c->flags & PERIPH_NO_RESET)) {
+ /* If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock*/
+ if (c->flags & PERIPH_ON_APB)
+ val = chipid_readl();
+
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
base + PERIPH_CLK_TO_ENB_SET_REG(c));
+ }
}
static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
@@ -1039,8 +1151,8 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
for (sel = c->inputs; sel->input != NULL; sel++) {
if (sel->input == p) {
val = clk_readl(c->reg);
- val &= ~PERIPH_CLK_SOURCE_MASK;
- val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT;
+ val &= ~((c->reg_shift >> 8) << (c->reg_shift & 0xFF));
+ val |= (sel->value) << (c->reg_shift & 0xFF);
if (c->refcnt)
clk_enable(p);
@@ -1156,14 +1268,35 @@ static long tegra2_emc_clk_round_rate(struct clk *c, unsigned long rate)
if (new_rate < 0)
return c->max_rate;
- BUG_ON(new_rate != tegra2_periph_clk_round_rate(c, new_rate));
-
return new_rate;
}
static int tegra2_emc_clk_set_rate(struct clk *c, unsigned long rate)
{
int ret;
+ int divider;
+ struct clk *p = NULL;
+ unsigned long inp_rate;
+ unsigned long new_rate;
+ const struct clk_mux_sel *sel;
+
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ inp_rate = clk_get_rate(sel->input);
+
+ divider = clk_div71_get_divider(inp_rate, rate);
+ if (divider < 0)
+ return divider;
+
+ new_rate = DIV_ROUND_UP(inp_rate * 2, divider + 2);
+ if ((abs(rate - new_rate)) < 2000) {
+ p = sel->input;
+ break;
+ }
+ }
+
+ BUG_ON(!p);
+ BUG_ON(divider & 0x1);
+
/*
* The Tegra2 memory controller has an interlock with the clock
* block that allows memory shadowed registers to be updated,
@@ -1174,6 +1307,13 @@ static int tegra2_emc_clk_set_rate(struct clk *c, unsigned long rate)
if (ret < 0)
return ret;
+ if (c->parent != p) {
+ BUG_ON(divider != 0);
+ ret = clk_set_parent_locked(c, p);
+ udelay(1);
+ return ret;
+ }
+
ret = tegra2_periph_clk_set_rate(c, rate);
udelay(1);
@@ -1188,6 +1328,7 @@ static struct clk_ops tegra_emc_clk_ops = {
.set_rate = &tegra2_emc_clk_set_rate,
.round_rate = &tegra2_emc_clk_round_rate,
.reset = &tegra2_periph_clk_reset,
+ .shared_bus_update = &tegra2_clk_shared_bus_update,
};
/* Clock doubler ops */
@@ -1280,10 +1421,38 @@ static struct clk_ops tegra_audio_sync_clk_ops = {
.set_parent = tegra2_audio_sync_clk_set_parent,
};
-/* cdev1 and cdev2 (dap_mclk1 and dap_mclk2) ops */
+/* call this function after pinmux configuration */
+static void tegra2_cdev_clk_set_parent(struct clk *c)
+{
+ const struct clk_mux_sel *mux = 0;
+ const struct clk_mux_sel *sel;
+ enum tegra_pingroup pg = TEGRA_PINGROUP_CDEV1;
+ int val;
+
+ /* Get pinmux setting for cdev1 and cdev2 from APB_MISC register */
+ if (!strcmp(c->name, "cdev2"))
+ pg = TEGRA_PINGROUP_CDEV2;
+
+ val = tegra_pinmux_get_func(pg);
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (val == sel->value)
+ mux = sel;
+ }
+ BUG_ON(!mux);
+
+ c->parent = mux->input;
+}
+/* cdev1 and cdev2 (dap_mclk1 and dap_mclk2) ops */
static void tegra2_cdev_clk_init(struct clk *c)
{
+ const struct clk_mux_sel *sel;
+
+ /* Find max rate from inputs */
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ c->max_rate = max(sel->input->max_rate, c->max_rate);
+ }
+
/* We could un-tristate the cdev1 or cdev2 pingroup here; this is
* currently done in the pinmux code. */
c->state = ON;
@@ -1299,6 +1468,12 @@ static int tegra2_cdev_clk_enable(struct clk *c)
{
BUG_ON(!c->u.periph.clk_num);
+ if (!c->parent) {
+ /* Set parent from inputs */
+ tegra2_cdev_clk_set_parent(c);
+ clk_enable(c->parent);
+ }
+
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c));
return 0;
@@ -1326,18 +1501,41 @@ static struct clk_ops tegra_cdev_clk_ops = {
* enabled shared_bus_user clock, with a minimum value set by the
* shared bus.
*/
-static int tegra_clk_shared_bus_update(struct clk *bus)
+static int tegra2_clk_shared_bus_update(struct clk *bus)
{
struct clk *c;
+ unsigned long old_rate;
unsigned long rate = bus->min_rate;
+ int sku_id = tegra_sku_id();
- list_for_each_entry(c, &bus->shared_bus_list, u.shared_bus_user.node)
+ list_for_each_entry(c, &bus->shared_bus_list,
+ u.shared_bus_user.node) {
if (c->u.shared_bus_user.enabled)
rate = max(c->u.shared_bus_user.rate, rate);
+ }
+
+ old_rate = clk_get_rate_locked(bus);
- if (rate == clk_get_rate_locked(bus))
+ if (rate == old_rate)
return 0;
+ /* WAR: For AP25 EMC scaling */
+ if ((sku_id == 0x17) && (bus->flags & PERIPH_EMC_ENB)) {
+ if (old_rate == AP25_EMC_SCALING_STEP &&
+ rate != AP25_EMC_INTERMEDIATE_RATE)
+ clk_set_rate_locked(bus, AP25_EMC_INTERMEDIATE_RATE);
+
+ if (((old_rate > AP25_EMC_BRIDGE_RATE) &&
+ (rate < AP25_EMC_BRIDGE_RATE)) ||
+ ((old_rate < AP25_EMC_BRIDGE_RATE) &&
+ (rate > AP25_EMC_BRIDGE_RATE)))
+ clk_set_rate_locked(bus, AP25_EMC_BRIDGE_RATE);
+
+ if (rate == AP25_EMC_SCALING_STEP &&
+ old_rate != AP25_EMC_INTERMEDIATE_RATE)
+ clk_set_rate_locked(bus, AP25_EMC_INTERMEDIATE_RATE);
+ }
+
return clk_set_rate_locked(bus, rate);
};
@@ -1350,17 +1548,16 @@ static void tegra_clk_shared_bus_init(struct clk *c)
c->state = OFF;
c->set = true;
- spin_lock_irqsave(&c->parent->spinlock, flags);
+ clk_lock_save(c->parent, &flags);
list_add_tail(&c->u.shared_bus_user.node,
&c->parent->shared_bus_list);
- spin_unlock_irqrestore(&c->parent->spinlock, flags);
+ clk_unlock_restore(c->parent, &flags);
}
static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
{
- unsigned long flags;
int ret;
long new_rate = rate;
@@ -1368,13 +1565,9 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
if (new_rate < 0)
return new_rate;
- spin_lock_irqsave(&c->parent->spinlock, flags);
-
c->u.shared_bus_user.rate = new_rate;
ret = tegra_clk_shared_bus_update(c->parent);
- spin_unlock_irqrestore(&c->parent->spinlock, flags);
-
return ret;
}
@@ -1385,31 +1578,25 @@ static long tegra_clk_shared_bus_round_rate(struct clk *c, unsigned long rate)
static int tegra_clk_shared_bus_enable(struct clk *c)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&c->parent->spinlock, flags);
-
c->u.shared_bus_user.enabled = true;
ret = tegra_clk_shared_bus_update(c->parent);
-
- spin_unlock_irqrestore(&c->parent->spinlock, flags);
+ if (strcmp(c->name, "avp.sclk") == 0)
+ tegra2_statmon_start();
return ret;
}
static void tegra_clk_shared_bus_disable(struct clk *c)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&c->parent->spinlock, flags);
-
+ if (strcmp(c->name, "avp.sclk") == 0)
+ tegra2_statmon_stop();
c->u.shared_bus_user.enabled = false;
ret = tegra_clk_shared_bus_update(c->parent);
WARN_ON_ONCE(ret);
-
- spin_unlock_irqrestore(&c->parent->spinlock, flags);
}
static struct clk_ops tegra_clk_shared_bus_ops = {
@@ -1473,6 +1660,14 @@ static struct clk tegra_clk_m = {
};
static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
+ { 12000000, 522000000, 348, 8, 1, 8},
+ { 13000000, 522000000, 522, 13, 1, 8},
+ { 19200000, 522000000, 435, 16, 1, 8},
+ { 26000000, 522000000, 522, 26, 1, 8},
+ { 12000000, 598000000, 598, 12, 1, 8},
+ { 13000000, 598000000, 598, 13, 1, 8},
+ { 19200000, 598000000, 375, 12, 1, 6},
+ { 26000000, 598000000, 598, 26, 1, 8},
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1659,6 +1854,11 @@ static struct clk_pll_freq_table tegra_pll_d_freq_table[] = {
{ 19200000, 216000000, 135, 12, 1, 3},
{ 26000000, 216000000, 216, 26, 1, 4},
+ { 12000000, 5000000, 10, 24, 1, 4},
+ { 12000000, 10000000, 10, 12, 1, 4},
+ { 12000000, 161500000, 323, 24, 1, 4},
+ { 12000000, 162000000, 162, 12, 1, 4},
+
{ 12000000, 594000000, 594, 12, 1, 8},
{ 13000000, 594000000, 594, 13, 1, 8},
{ 19200000, 594000000, 495, 16, 1, 8},
@@ -1669,6 +1869,11 @@ static struct clk_pll_freq_table tegra_pll_d_freq_table[] = {
{ 19200000, 1000000000, 625, 12, 1, 8},
{ 26000000, 1000000000, 1000, 26, 1, 12},
+ { 12000000, 504000000, 504, 12, 1, 8},
+ { 13000000, 504000000, 504, 13, 1, 8},
+ { 19200000, 504000000, 420, 16, 1, 8},
+ { 26000000, 504000000, 504, 26, 1, 8},
+
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1727,6 +1932,12 @@ static struct clk tegra_pll_u = {
};
static struct clk_pll_freq_table tegra_pll_x_freq_table[] = {
+ /* 1.2 GHz */
+ { 12000000, 1200000000, 600, 6, 1, 12},
+ { 13000000, 1200000000, 923, 10, 1, 12},
+ { 19200000, 1200000000, 750, 12, 1, 8},
+ { 26000000, 1200000000, 600, 13, 1, 12},
+
/* 1 GHz */
{ 12000000, 1000000000, 1000, 12, 1, 12},
{ 13000000, 1000000000, 1000, 13, 1, 12},
@@ -1751,6 +1962,12 @@ static struct clk_pll_freq_table tegra_pll_x_freq_table[] = {
{ 19200000, 760000000, 950, 24, 1, 8},
{ 26000000, 760000000, 760, 26, 1, 12},
+ /* 750 MHz */
+ { 12000000, 750000000, 750, 12, 1, 12},
+ { 13000000, 750000000, 750, 13, 1, 12},
+ { 19200000, 750000000, 625, 16, 1, 8},
+ { 26000000, 750000000, 750, 26, 1, 12},
+
/* 608 MHz */
{ 12000000, 608000000, 608, 12, 1, 12},
{ 13000000, 608000000, 608, 13, 1, 12},
@@ -1775,7 +1992,7 @@ static struct clk_pll_freq_table tegra_pll_x_freq_table[] = {
static struct clk tegra_pll_x = {
.name = "pll_x",
.flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG,
- .ops = &tegra_pllx_ops,
+ .ops = &tegra_pll_ops,
.reg = 0xe0,
.parent = &tegra_clk_m,
.max_rate = 1000000000,
@@ -1823,28 +2040,6 @@ static struct clk tegra_clk_d = {
},
};
-/* dap_mclk1, belongs to the cdev1 pingroup. */
-static struct clk tegra_clk_cdev1 = {
- .name = "cdev1",
- .ops = &tegra_cdev_clk_ops,
- .rate = 26000000,
- .max_rate = 26000000,
- .u.periph = {
- .clk_num = 94,
- },
-};
-
-/* dap_mclk2, belongs to the cdev2 pingroup. */
-static struct clk tegra_clk_cdev2 = {
- .name = "cdev2",
- .ops = &tegra_cdev_clk_ops,
- .rate = 26000000,
- .max_rate = 26000000,
- .u.periph = {
- .clk_num = 93,
- },
-};
-
/* initialized before peripheral clocks */
static struct clk_mux_sel mux_audio_sync_clk[8+1];
static const struct audio_sources {
@@ -1955,7 +2150,7 @@ static struct clk tegra_clk_sclk = {
.reg = 0x28,
.ops = &tegra_super_ops,
.max_rate = 240000000,
- .min_rate = 120000000,
+ .min_rate = 40000000,
};
static struct clk tegra_clk_virtual_cpu = {
@@ -1984,6 +2179,7 @@ static struct clk tegra_clk_hclk = {
.reg_shift = 4,
.ops = &tegra_bus_ops,
.max_rate = 240000000,
+ .min_rate = 36000000,
};
static struct clk tegra_clk_pclk = {
@@ -1994,6 +2190,16 @@ static struct clk tegra_clk_pclk = {
.reg_shift = 0,
.ops = &tegra_bus_ops,
.max_rate = 120000000,
+ .min_rate = 36000000,
+};
+
+static struct clk tegra_clk_virtual_sclk = {
+ .name = "virt_sclk",
+ .parent = &tegra_clk_sclk,
+ .ops = &tegra_virtual_sclk_ops,
+ .u.system = {
+ .pclk = &tegra_clk_pclk,
+ },
};
static struct clk tegra_clk_blink = {
@@ -2003,6 +2209,43 @@ static struct clk tegra_clk_blink = {
.ops = &tegra_blink_clk_ops,
.max_rate = 32768,
};
+static struct clk_mux_sel mux_dev1_clk[] = {
+ { .input = &tegra_clk_m, .value = 0 },
+ { .input = &tegra_pll_a_out0, .value = 1 },
+ { .input = &tegra_pll_m_out1, .value = 2 },
+ { .input = &tegra_clk_audio, .value = 3 },
+ { 0, 0 }
+};
+
+static struct clk_mux_sel mux_dev2_clk[] = {
+ { .input = &tegra_clk_m, .value = 0 },
+ { .input = &tegra_clk_hclk, .value = 1 },
+ { .input = &tegra_clk_pclk, .value = 2 },
+ { .input = &tegra_pll_p_out4, .value = 3 },
+ { 0, 0 }
+};
+
+/* dap_mclk1, belongs to the cdev1 pingroup. */
+static struct clk tegra_clk_cdev1 = {
+ .name = "cdev1",
+ .ops = &tegra_cdev_clk_ops,
+ .inputs = mux_dev1_clk,
+ .u.periph = {
+ .clk_num = 94,
+ },
+ .flags = MUX,
+};
+
+/* dap_mclk2, belongs to the cdev2 pingroup. */
+static struct clk tegra_clk_cdev2 = {
+ .name = "cdev2",
+ .ops = &tegra_cdev_clk_ops,
+ .inputs = mux_dev2_clk,
+ .u.periph = {
+ .clk_num = 93,
+ },
+ .flags = MUX,
+};
static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = {
{ .input = &tegra_pll_m, .value = 0},
@@ -2070,8 +2313,8 @@ static struct clk_mux_sel mux_pllp_out3[] = {
{ 0, 0},
};
-static struct clk_mux_sel mux_plld[] = {
- { .input = &tegra_pll_d, .value = 0},
+static struct clk_mux_sel mux_plld_out0[] = {
+ { .input = &tegra_pll_d_out0, .value = 0},
{ 0, 0},
};
@@ -2097,7 +2340,7 @@ static struct clk tegra_clk_emc = {
},
};
-#define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \
+#define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _reg_shift, _max, _inputs, _flags) \
{ \
.name = _name, \
.lookup = { \
@@ -2106,6 +2349,7 @@ static struct clk tegra_clk_emc = {
}, \
.ops = &tegra_periph_clk_ops, \
.reg = _reg, \
+ .reg_shift = _reg_shift, \
.inputs = _inputs, \
.flags = _flags, \
.max_rate = _max, \
@@ -2125,83 +2369,92 @@ static struct clk tegra_clk_emc = {
.parent = _parent, \
}
-struct clk tegra_list_clks[] = {
- PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 108000000, mux_pclk, 0),
- PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET),
- PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0),
- PERIPH_CLK("i2s1", "tegra-i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2s2", "tegra-i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
- PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
- PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71),
- PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71),
- PERIPH_CLK("spi", "spi", NULL, 43, 0x114, 40000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("xio", "xio", NULL, 45, 0x120, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("twc", "twc", NULL, 16, 0x12c, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc1", "spi_tegra.0", NULL, 41, 0x134, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc2", "spi_tegra.1", NULL, 44, 0x118, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc3", "spi_tegra.2", NULL, 46, 0x11c, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("ide", "ide", NULL, 25, 0x144, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
- PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, 164000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0),
- PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0),
- PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 250000000, mux_clk_m, 0),
- PERIPH_CLK("vde", "tegra-avp", "vde", 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
- PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */
+struct clk tegra_list_periph_clks[] = {
+ PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 0x31E, 108000000, mux_pclk, 0),
+ PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 0x31E, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB),
+ PERIPH_CLK("kbc", "tegra-kbc", NULL, 36, 0, 0x31E, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB),
+ PERIPH_CLK("timer", "timer", NULL, 5, 0, 0x31E, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("i2s1", "tegra20-i2s.0", NULL, 11, 0x100, 0x31E, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("i2s2", "tegra20-i2s.1", NULL, 18, 0x104, 0x31E, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("kfuse", "kfuse-tegra", NULL, 40, 0, 0x31E, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("spdif_out", "tegra20-spdif", "spdif_out", 10, 0x108, 0x31E, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("spdif_in", "tegra20-spdif", "spdif_in", 10, 0x10c, 0x31E, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 0x71C, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("spi", "spi", NULL, 43, 0x114, 0x31E, 40000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("xio", "xio", NULL, 45, 0x120, 0x31E, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("twc", "twc", NULL, 16, 0x12c, 0x31E, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc1", "spi_tegra.0", NULL, 41, 0x134, 0x31E, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc2", "spi_tegra.1", NULL, 44, 0x118, 0x31E, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc3", "spi_tegra.2", NULL, 46, 0x11c, 0x31E, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 0x31E, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("ide", "ide", NULL, 25, 0x144, 0x31E, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, 0x31E, 164000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 0x31E, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 0x31E, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 0x31E, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 0x31E, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 0x31E, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 0x31E, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 0x31E, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 0x31E, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("vde", "tegra-avp", "vde", 61, 0x1c8, 0x31E, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 0x31E, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */
/* FIXME: what is la? */
- PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("owr", "tegra_w1", NULL, 71, 0x1cc, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("nor", "nor", NULL, 42, 0x1d0, 92000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
- PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, 60000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
- PERIPH_CLK("i2c1", "tegra-i2c.0", NULL, 12, 0x124, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
- PERIPH_CLK("i2c2", "tegra-i2c.1", NULL, 54, 0x198, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
- PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
- PERIPH_CLK("dvc", "tegra-i2c.3", NULL, 47, 0x128, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
- PERIPH_CLK("i2c1_i2c", "tegra-i2c.0", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
- PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
- PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
- PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
- PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */
- PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
- PERIPH_CLK("vi", "tegra_camera", "vi", 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
- PERIPH_CLK("vi_sensor", "tegra_camera", "vi_sensor", 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */
- PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
- PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 250000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
- PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 166000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
- PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
- PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
- PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
- PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
- PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 600000000, mux_pllp_plld_pllc_clkm, MUX), /* scales with voltage and process_id */
- PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 600000000, mux_pllp_plld_pllc_clkm, MUX), /* scales with voltage and process_id */
- PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
- PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
- PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
- PERIPH_CLK("dsi", "dsi", NULL, 48, 0, 500000000, mux_plld, 0), /* scales with voltage */
- PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 72000000, mux_pllp_out3, 0),
- PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */
- PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET),
- PERIPH_CLK("pex", NULL, "pex", 70, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
- PERIPH_CLK("afi", NULL, "afi", 72, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
- PERIPH_CLK("pcie_xclk", NULL, "pcie_xclk", 74, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
-
- SHARED_CLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_sclk),
+ PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 0x31E, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("owr", "tegra_w1", NULL, 71, 0x1cc, 0x31E, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("nor", "tegra-nor", NULL, 42, 0x1d0, 0x31E, 92000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, 0x31E, 60000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), /* scales with voltage */
+ PERIPH_CLK("i2c1", "tegra-i2c.0", NULL, 12, 0x124, 0x31E, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("i2c2", "tegra-i2c.1", NULL, 54, 0x198, 0x31E, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, 0x31E, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("dvc", "tegra-i2c.3", NULL, 47, 0x128, 0x31E, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("uarta", "tegra_uart.0", NULL, 6, 0x178, 0x31E, 600000000, mux_pllp_pllc_pllm_clkm, MUX | PERIPH_ON_APB),
+ PERIPH_CLK("uartb", "tegra_uart.1", NULL, 7, 0x17c, 0x31E, 600000000, mux_pllp_pllc_pllm_clkm, MUX | PERIPH_ON_APB),
+ PERIPH_CLK("uartc", "tegra_uart.2", NULL, 55, 0x1a0, 0x31E, 600000000, mux_pllp_pllc_pllm_clkm, MUX | PERIPH_ON_APB),
+ PERIPH_CLK("uartd", "tegra_uart.3", NULL, 65, 0x1c0, 0x31E, 600000000, mux_pllp_pllc_pllm_clkm, MUX | PERIPH_ON_APB),
+ PERIPH_CLK("uarte", "tegra_uart.4", NULL, 66, 0x1c4, 0x31E, 600000000, mux_pllp_pllc_pllm_clkm, MUX | PERIPH_ON_APB),
+ PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 0x31E, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */
+ PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 0x31E, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("vi", "tegra_camera", "vi", 20, 0x148, 0x31E, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("vi_sensor", "tegra_camera", "vi_sensor", 20, 0x1a8, 0x31E, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */
+ PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 0x31E, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 0x31E, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 0x31E, 166000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 0x31E, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 0x31E, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 0x31E, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 0x31E, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 0x31E, 600000000, mux_pllp_plld_pllc_clkm, MUX), /* scales with voltage and process_id */
+ PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 0x31E, 600000000, mux_pllp_plld_pllc_clkm, MUX), /* scales with voltage and process_id */
+ PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 0x31E, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 0x31E, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 0x31E, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("dsia", "tegradc.0", "dsia", 48, 0, 0x31E, 500000000, mux_plld_out0, 0), /* scales with voltage */
+ PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 0x31E, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 0x31E, 150000000, mux_clk_m, 0), /* same frequency as VI */
+ PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 0x31E, 150000000, mux_clk_m, PERIPH_NO_RESET),
+ PERIPH_CLK("pex", NULL, "pex", 70, 0, 0x31E, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
+ PERIPH_CLK("afi", NULL, "afi", 72, 0, 0x31E, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
+ PERIPH_CLK("pcie_xclk", NULL, "pcie_xclk", 74, 0, 0x31E, 26000000, mux_clk_m, PERIPH_MANUAL_RESET),
+ PERIPH_CLK("stat_mon", "tegra-stat-mon", NULL, 37, 0, 0x31E, 26000000, mux_clk_m, 0),
+};
+
+struct clk tegra_list_shared_clks[] = {
+ SHARED_CLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_virtual_sclk),
+ SHARED_CLK("mon.sclk", "tegra-stat-mon", "sclk", &tegra_clk_virtual_sclk),
+ SHARED_CLK("bsea.sclk", "tegra-aes", "sclk", &tegra_clk_virtual_sclk),
+ SHARED_CLK("usbd.sclk", "fsl-tegra-udc", "sclk", &tegra_clk_virtual_sclk),
+ SHARED_CLK("usb1.sclk", "tegra-ehci.0", "sclk", &tegra_clk_virtual_sclk),
+ SHARED_CLK("usb2.sclk", "tegra-ehci.1", "sclk", &tegra_clk_virtual_sclk),
+ SHARED_CLK("usb3.sclk", "tegra-ehci.2", "sclk", &tegra_clk_virtual_sclk),
SHARED_CLK("avp.emc", "tegra-avp", "emc", &tegra_clk_emc),
SHARED_CLK("cpu.emc", "cpu", "emc", &tegra_clk_emc),
SHARED_CLK("disp1.emc", "tegradc.0", "emc", &tegra_clk_emc),
SHARED_CLK("disp2.emc", "tegradc.1", "emc", &tegra_clk_emc),
SHARED_CLK("hdmi.emc", "hdmi", "emc", &tegra_clk_emc),
- SHARED_CLK("host.emc", "tegra_grhost", "emc", &tegra_clk_emc),
+ SHARED_CLK("3d.emc", "tegra_gr3d", "emc", &tegra_clk_emc),
+ SHARED_CLK("2d.emc", "tegra_gr2d", "emc", &tegra_clk_emc),
+ SHARED_CLK("mpe.emc", "tegra_mpe", "emc", &tegra_clk_emc),
SHARED_CLK("usbd.emc", "fsl-tegra-udc", "emc", &tegra_clk_emc),
SHARED_CLK("usb1.emc", "tegra-ehci.0", "emc", &tegra_clk_emc),
SHARED_CLK("usb2.emc", "tegra-ehci.1", "emc", &tegra_clk_emc),
@@ -2222,27 +2475,30 @@ struct clk tegra_list_clks[] = {
* table under two names.
*/
struct clk_duplicate tegra_clk_duplicates[] = {
- CLK_DUPLICATE("uarta", "tegra_uart.0", NULL),
- CLK_DUPLICATE("uartb", "tegra_uart.1", NULL),
- CLK_DUPLICATE("uartc", "tegra_uart.2", NULL),
- CLK_DUPLICATE("uartd", "tegra_uart.3", NULL),
- CLK_DUPLICATE("uarte", "tegra_uart.4", NULL),
+ CLK_DUPLICATE("uarta", "serial8250.0", "uarta"),
+ CLK_DUPLICATE("uartb", "serial8250.0", "uartb"),
+ CLK_DUPLICATE("uartc", "serial8250.0", "uartc"),
+ CLK_DUPLICATE("uartd", "serial8250.0", "uartd"),
+ CLK_DUPLICATE("uarte", "serial8250.0", "uarte"),
CLK_DUPLICATE("usbd", "utmip-pad", NULL),
CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL),
CLK_DUPLICATE("usbd", "tegra-otg", NULL),
CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"),
CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"),
+ CLK_DUPLICATE("dsia", "tegradc.1", "dsia"),
CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL),
CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL),
CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL),
CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL),
- CLK_DUPLICATE("host1x", "tegra_grhost", "host1x"),
- CLK_DUPLICATE("2d", "tegra_grhost", "gr2d"),
- CLK_DUPLICATE("3d", "tegra_grhost", "gr3d"),
- CLK_DUPLICATE("epp", "tegra_grhost", "epp"),
- CLK_DUPLICATE("mpe", "tegra_grhost", "mpe"),
+ CLK_DUPLICATE("host1x", "tegra_host1x", "host1x"),
+ CLK_DUPLICATE("2d", "tegra_gr2d", "gr2d"),
+ CLK_DUPLICATE("3d", "tegra_gr3d", "gr3d"),
+ CLK_DUPLICATE("epp", "tegra_gr2d", "epp"),
+ CLK_DUPLICATE("mpe", "tegra_mpe", "mpe"),
CLK_DUPLICATE("cop", "tegra-avp", "cop"),
CLK_DUPLICATE("vde", "tegra-aes", "vde"),
+ CLK_DUPLICATE("twd", "smp_twd", NULL),
+ CLK_DUPLICATE("bsea", "tegra-aes", "bsea"),
};
#define CLK(dev, con, ck) \
@@ -2280,11 +2536,80 @@ struct clk *tegra_ptr_clks[] = {
&tegra_clk_cdev1,
&tegra_clk_cdev2,
&tegra_clk_virtual_cpu,
+ &tegra_clk_virtual_sclk,
&tegra_clk_blink,
&tegra_clk_cop,
&tegra_clk_emc,
+ &tegra2_clk_twd,
+};
+
+/* For some clocks maximum rate limits depend on tegra2 SKU */
+#define RATE_LIMIT(_name, _max_rate, _skus...) \
+ { \
+ .clk_name = _name, \
+ .max_rate = _max_rate, \
+ .sku_ids = {_skus} \
+ }
+
+static struct tegra_sku_rate_limit sku_limits[] =
+{
+ RATE_LIMIT("cpu", 750000000, 0x07, 0x10),
+ RATE_LIMIT("cclk", 750000000, 0x07, 0x10),
+ RATE_LIMIT("pll_x", 750000000, 0x07, 0x10),
+
+ RATE_LIMIT("cpu", 1000000000, 0x04, 0x08, 0x0F),
+ RATE_LIMIT("cclk", 1000000000, 0x04, 0x08, 0x0F),
+ RATE_LIMIT("pll_x", 1000000000, 0x04, 0x08, 0x0F),
+
+ RATE_LIMIT("cpu", 1200000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("cclk", 1200000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("pll_x", 1200000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+
+ RATE_LIMIT("sclk", 240000000, 0x04, 0x7, 0x08, 0x0F, 0x10),
+ RATE_LIMIT("hclk", 240000000, 0x04, 0x7, 0x08, 0x0F, 0x10),
+ RATE_LIMIT("vde", 240000000, 0x04, 0x7, 0x08, 0x0F, 0x10),
+ RATE_LIMIT("3d", 300000000, 0x04, 0x7, 0x08, 0x0F, 0x10),
+
+ RATE_LIMIT("host1x", 108000000, 0x0F),
+
+ RATE_LIMIT("sclk", 300000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("virt_sclk", 300000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("hclk", 300000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("pclk", 150000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("vde", 300000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("3d", 400000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+
+ RATE_LIMIT("uarta", 800000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("uartb", 800000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("uartc", 800000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("uartd", 800000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
+ RATE_LIMIT("uarte", 800000000, 0x14, 0x17, 0x18, 0x1B, 0x1C),
};
+static void tegra2_init_sku_limits(void)
+{
+ int i, j;
+ struct clk *c;
+ int sku_id = tegra_sku_id();
+
+ for (i = 0; i < ARRAY_SIZE(sku_limits); i++) {
+ struct tegra_sku_rate_limit *limit = &sku_limits[i];
+
+ for (j = 0; (j < MAX_SAME_LIMIT_SKU_IDS) &&
+ (limit->sku_ids[j] != 0); j++) {
+ if (limit->sku_ids[j] == sku_id) {
+ c = tegra_get_clock_by_name(limit->clk_name);
+ if (!c) {
+ pr_err("%s: Unknown sku clock %s\n",
+ __func__, limit->clk_name);
+ continue;
+ }
+ c->max_rate = limit->max_rate;
+ }
+ }
+ }
+}
+
static void tegra2_init_one_clock(struct clk *c)
{
clk_init(c);
@@ -2295,37 +2620,92 @@ static void tegra2_init_one_clock(struct clk *c)
clkdev_add(&c->lookup);
}
-void __init tegra2_init_clocks(void)
-{
- int i;
- struct clk *c;
+#ifdef CONFIG_CPU_FREQ
- for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++)
- tegra2_init_one_clock(tegra_ptr_clks[i]);
-
- for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++)
- tegra2_init_one_clock(&tegra_list_clks[i]);
-
- for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) {
- c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name);
- if (!c) {
- pr_err("%s: Unknown duplicate clock %s\n", __func__,
- tegra_clk_duplicates[i].name);
- continue;
- }
+/*
+ * Frequency table index must be sequential starting at 0 and frequencies
+ * must be ascending.
+ */
- tegra_clk_duplicates[i].lookup.clk = c;
- clkdev_add(&tegra_clk_duplicates[i].lookup);
+static struct cpufreq_frequency_table freq_table_750MHz[] = {
+ { 0, 216000 },
+ { 1, 312000 },
+ { 2, 456000 },
+ { 3, 608000 },
+ { 4, 750000 },
+ { 5, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table freq_table_1p0GHz[] = {
+ { 0, 216000 },
+ { 1, 312000 },
+ { 2, 456000 },
+ { 3, 608000 },
+ { 4, 760000 },
+ { 5, 816000 },
+ { 6, 912000 },
+ { 7, 1000000 },
+ { 8, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table freq_table_1p2GHz[] = {
+ { 0, 216000 },
+ { 1, 312000 },
+ { 2, 456000 },
+ { 3, 608000 },
+ { 4, 760000 },
+ { 5, 816000 },
+ { 6, 912000 },
+ { 7, 1000000 },
+ { 8, 1200000 },
+ { 9, CPUFREQ_TABLE_END },
+};
+
+static struct tegra_cpufreq_table_data cpufreq_tables[] = {
+ { freq_table_750MHz, 1, 4 },
+ { freq_table_1p0GHz, 2, 6 },
+ { freq_table_1p2GHz, 2, 7 },
+};
+
+struct tegra_cpufreq_table_data *tegra_cpufreq_table_get(void)
+{
+ int i, ret;
+ struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
+
+ for (i = 0; i < ARRAY_SIZE(cpufreq_tables); i++) {
+ struct cpufreq_policy policy;
+ ret = cpufreq_frequency_table_cpuinfo(
+ &policy, cpufreq_tables[i].freq_table);
+ BUG_ON(ret);
+ if ((policy.max * 1000) == cpu_clk->max_rate)
+ return &cpufreq_tables[i];
}
+ pr_err("%s: No cpufreq table matching cpu range", __func__);
+ BUG();
+ return &cpufreq_tables[0];
+}
- init_audio_sync_clock_mux();
+unsigned long tegra_emc_to_cpu_ratio(unsigned long cpu_rate)
+{
+ /* Vote on memory bus frequency based on cpu frequency */
+ if (cpu_rate >= 816000)
+ return 600000000; /* cpu 816 MHz, emc max */
+ else if (cpu_rate >= 608000)
+ return 300000000; /* cpu 608 MHz, emc 150Mhz */
+ else if (cpu_rate >= 456000)
+ return 150000000; /* cpu 456 MHz, emc 75Mhz */
+ else if (cpu_rate >= 312000)
+ return 100000000; /* cpu 312 MHz, emc 50Mhz */
+ else
+ return 50000000; /* emc 25Mhz */
}
+#endif
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM +
PERIPH_CLK_SOURCE_NUM + 22];
-void tegra_clk_suspend(void)
+static int tegra_clk_suspend(void)
{
unsigned long off, i;
u32 *ctx = clk_rst_suspend;
@@ -2374,9 +2754,11 @@ void tegra_clk_suspend(void)
*ctx++ = clk_readl(CLK_MASK_ARM);
BUG_ON(ctx - clk_rst_suspend != ARRAY_SIZE(clk_rst_suspend));
+
+ return 0;
}
-void tegra_clk_resume(void)
+static void tegra_clk_resume(void)
{
unsigned long off, i;
const u32 *ctx = clk_rst_suspend;
@@ -2438,4 +2820,45 @@ void tegra_clk_resume(void)
clk_writel(*ctx++, MISC_CLK_ENB);
clk_writel(*ctx++, CLK_MASK_ARM);
}
+
+#else
+#define tegra_clk_suspend NULL
+#define tegra_clk_resume NULL
#endif
+
+static struct syscore_ops tegra_clk_syscore_ops = {
+ .suspend = tegra_clk_suspend,
+ .resume = tegra_clk_resume,
+};
+
+void __init tegra_soc_init_clocks(void)
+{
+ int i;
+ struct clk *c;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++)
+ tegra2_init_one_clock(tegra_ptr_clks[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_list_periph_clks); i++)
+ tegra2_init_one_clock(&tegra_list_periph_clks[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) {
+ c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name);
+ if (!c) {
+ pr_err("%s: Unknown duplicate clock %s\n", __func__,
+ tegra_clk_duplicates[i].name);
+ continue;
+ }
+
+ tegra_clk_duplicates[i].lookup.clk = c;
+ clkdev_add(&tegra_clk_duplicates[i].lookup);
+ }
+
+ init_audio_sync_clock_mux();
+ tegra2_init_sku_limits();
+
+ for (i = 0; i < ARRAY_SIZE(tegra_list_shared_clks); i++)
+ tegra2_init_one_clock(&tegra_list_shared_clks[i]);
+
+ register_syscore_ops(&tegra_clk_syscore_ops);
+}
diff --git a/arch/arm/mach-tegra/tegra2_dvfs.c b/arch/arm/mach-tegra/tegra2_dvfs.c
new file mode 100644
index 000000000000..a864184140e6
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_dvfs.c
@@ -0,0 +1,357 @@
+/*
+ * arch/arm/mach-tegra/tegra2_dvfs.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+#include "clock.h"
+#include "dvfs.h"
+#include "fuse.h"
+
+#ifdef CONFIG_TEGRA_CORE_DVFS
+static bool tegra_dvfs_core_disabled;
+#else
+static bool tegra_dvfs_core_disabled = true;
+#endif
+#ifdef CONFIG_TEGRA_CPU_DVFS
+static bool tegra_dvfs_cpu_disabled;
+#else
+static bool tegra_dvfs_cpu_disabled = true;
+#endif
+
+static const int core_millivolts[MAX_DVFS_FREQS] =
+ {950, 1000, 1100, 1200, 1225, 1275, 1300};
+static const int cpu_millivolts[MAX_DVFS_FREQS] =
+ {750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000, 1025, 1050, 1100, 1125};
+
+static const int cpu_speedo_nominal_millivolts[] =
+/* spedo_id 0, 1, 2 */
+ { 1100, 1025, 1125 };
+
+static const int core_speedo_nominal_millivolts[] =
+/* spedo_id 0, 1, 2 */
+ { 1225, 1225, 1300 };
+
+#define KHZ 1000
+#define MHZ 1000000
+
+static struct dvfs_rail tegra2_dvfs_rail_vdd_cpu = {
+ .reg_id = "vdd_cpu",
+ .max_millivolts = 1125,
+ .min_millivolts = 750,
+ .nominal_millivolts = 1125,
+};
+
+static struct dvfs_rail tegra2_dvfs_rail_vdd_core = {
+ .reg_id = "vdd_core",
+ .max_millivolts = 1300,
+ .min_millivolts = 950,
+ .nominal_millivolts = 1225,
+ .step = 150, /* step vdd_core by 150 mV to allow vdd_aon to follow */
+};
+
+static struct dvfs_rail tegra2_dvfs_rail_vdd_aon = {
+ .reg_id = "vdd_aon",
+ .max_millivolts = 1300,
+ .min_millivolts = 950,
+ .nominal_millivolts = 1225,
+#ifndef CONFIG_TEGRA_CORE_DVFS
+ .disabled = true,
+#endif
+};
+
+/* vdd_core and vdd_aon must be 120 mV higher than vdd_cpu */
+static int tegra2_dvfs_rel_vdd_cpu_vdd_core(struct dvfs_rail *vdd_cpu,
+ struct dvfs_rail *vdd_core)
+{
+ if (vdd_cpu->new_millivolts > vdd_cpu->millivolts &&
+ vdd_core->new_millivolts < vdd_cpu->new_millivolts + 120)
+ return vdd_cpu->new_millivolts + 120;
+
+ if (vdd_core->new_millivolts < vdd_cpu->millivolts + 120)
+ return vdd_cpu->millivolts + 120;
+
+ return vdd_core->new_millivolts;
+}
+
+/* vdd_aon must be within 170 mV of vdd_core */
+static int tegra2_dvfs_rel_vdd_core_vdd_aon(struct dvfs_rail *vdd_core,
+ struct dvfs_rail *vdd_aon)
+{
+ BUG_ON(abs(vdd_aon->millivolts - vdd_core->millivolts) >
+ vdd_aon->step);
+ return vdd_core->millivolts;
+}
+
+static struct dvfs_relationship tegra2_dvfs_relationships[] = {
+ {
+ /* vdd_core must be 120 mV higher than vdd_cpu */
+ .from = &tegra2_dvfs_rail_vdd_cpu,
+ .to = &tegra2_dvfs_rail_vdd_core,
+ .solve = tegra2_dvfs_rel_vdd_cpu_vdd_core,
+ },
+ {
+ /* vdd_aon must be 120 mV higher than vdd_cpu */
+ .from = &tegra2_dvfs_rail_vdd_cpu,
+ .to = &tegra2_dvfs_rail_vdd_aon,
+ .solve = tegra2_dvfs_rel_vdd_cpu_vdd_core,
+ },
+ {
+ /* vdd_aon must be within 170 mV of vdd_core */
+ .from = &tegra2_dvfs_rail_vdd_core,
+ .to = &tegra2_dvfs_rail_vdd_aon,
+ .solve = tegra2_dvfs_rel_vdd_core_vdd_aon,
+ },
+};
+
+static struct dvfs_rail *tegra2_dvfs_rails[] = {
+ &tegra2_dvfs_rail_vdd_cpu,
+ &tegra2_dvfs_rail_vdd_core,
+ &tegra2_dvfs_rail_vdd_aon,
+};
+
+#define CPU_DVFS(_clk_name, _speedo_id, _process_id, _mult, _freqs...) \
+ { \
+ .clk_name = _clk_name, \
+ .speedo_id = _speedo_id, \
+ .process_id = _process_id, \
+ .freqs = {_freqs}, \
+ .freqs_mult = _mult, \
+ .millivolts = cpu_millivolts, \
+ .auto_dvfs = true, \
+ .dvfs_rail = &tegra2_dvfs_rail_vdd_cpu, \
+ }
+
+#define CORE_DVFS(_clk_name, _process_id, _auto, _mult, _freqs...) \
+ { \
+ .clk_name = _clk_name, \
+ .speedo_id = -1, \
+ .process_id = _process_id, \
+ .freqs = {_freqs}, \
+ .freqs_mult = _mult, \
+ .millivolts = core_millivolts, \
+ .auto_dvfs = _auto, \
+ .dvfs_rail = &tegra2_dvfs_rail_vdd_core, \
+ }
+
+static struct dvfs dvfs_init[] = {
+ /* Cpu voltages (mV): 750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000, 1025, 1050, 1100, 1125 */
+ CPU_DVFS("cpu", 0, 0, MHZ, 314, 314, 314, 456, 456, 456, 608, 608, 608, 760, 817, 817, 912, 1000),
+ CPU_DVFS("cpu", 0, 1, MHZ, 314, 314, 314, 456, 456, 456, 618, 618, 618, 770, 827, 827, 922, 1000),
+ CPU_DVFS("cpu", 0, 2, MHZ, 494, 494, 494, 675, 675, 817, 817, 922, 922, 1000),
+ CPU_DVFS("cpu", 0, 3, MHZ, 730, 760, 845, 845, 940, 1000),
+
+ CPU_DVFS("cpu", 1, 0, MHZ, 380, 380, 503, 503, 655, 655, 798, 798, 902, 902, 960, 1000),
+ CPU_DVFS("cpu", 1, 1, MHZ, 389, 389, 503, 503, 655, 760, 798, 798, 950, 950, 1000),
+ CPU_DVFS("cpu", 1, 2, MHZ, 598, 598, 750, 750, 893, 893, 1000),
+ CPU_DVFS("cpu", 1, 3, MHZ, 730, 760, 845, 845, 940, 1000),
+
+ CPU_DVFS("cpu", 2, 0, MHZ, 0, 0, 0, 0, 655, 655, 798, 798, 902, 902, 960, 1000, 1100, 1100, 1200),
+ CPU_DVFS("cpu", 2, 1, MHZ, 0, 0, 0, 0, 655, 760, 798, 798, 950, 950, 1015, 1015, 1100, 1200),
+ CPU_DVFS("cpu", 2, 2, MHZ, 0, 0, 0, 0, 769, 769, 902, 902, 1026, 1026, 1140, 1140, 1200),
+ CPU_DVFS("cpu", 2, 3, MHZ, 0, 0, 0, 0, 940, 1000, 1000, 1000, 1130, 1130, 1200),
+
+ /* Core voltages (mV): 950, 1000, 1100, 1200, 1225, 1275, 1300 */
+ CORE_DVFS("emc", -1, 1, KHZ, 57000, 333000, 380000, 666000, 666000, 666000, 760000),
+
+#if 0
+ /*
+ * The sdhci core calls the clock ops with a spinlock held, which
+ * conflicts with the sleeping dvfs api.
+ * For now, boards must ensure that the core voltage does not drop
+ * below 1V, or that the sdmmc busses are set to 44 MHz or less.
+ */
+ CORE_DVFS("sdmmc1", -1, 1, KHZ, 44000, 52000, 52000, 52000, 52000, 52000, 52000),
+ CORE_DVFS("sdmmc2", -1, 1, KHZ, 44000, 52000, 52000, 52000, 52000, 52000, 52000),
+ CORE_DVFS("sdmmc3", -1, 1, KHZ, 44000, 52000, 52000, 52000, 52000, 52000, 52000),
+ CORE_DVFS("sdmmc4", -1, 1, KHZ, 44000, 52000, 52000, 52000, 52000, 52000, 52000),
+#endif
+
+ CORE_DVFS("ndflash", -1, 1, KHZ, 130000, 150000, 158000, 164000, 164000, 164000, 164000),
+ CORE_DVFS("nor", -1, 1, KHZ, 0, 92000, 92000, 92000, 92000, 92000, 92000),
+ CORE_DVFS("ide", -1, 1, KHZ, 0, 0, 100000, 100000, 100000, 100000, 100000),
+ CORE_DVFS("mipi", -1, 1, KHZ, 0, 40000, 40000, 40000, 40000, 60000, 60000),
+ CORE_DVFS("usbd", -1, 1, KHZ, 0, 0, 480000, 480000, 480000, 480000, 480000),
+ CORE_DVFS("usb2", -1, 1, KHZ, 0, 0, 480000, 480000, 480000, 480000, 480000),
+ CORE_DVFS("usb3", -1, 1, KHZ, 0, 0, 480000, 480000, 480000, 480000, 480000),
+ CORE_DVFS("pcie", -1, 1, KHZ, 0, 0, 0, 250000, 250000, 250000, 250000),
+ CORE_DVFS("dsi", -1, 1, KHZ, 100000, 100000, 100000, 500000, 500000, 500000, 500000),
+ CORE_DVFS("tvo", -1, 1, KHZ, 0, 0, 0, 250000, 250000, 250000, 250000),
+
+ /*
+ * The clock rate for the display controllers that determines the
+ * necessary core voltage depends on a divider that is internal
+ * to the display block. Disable auto-dvfs on the display clocks,
+ * and let the display driver call tegra_dvfs_set_rate manually
+ */
+ CORE_DVFS("disp1", -1, 0, KHZ, 158000, 158000, 190000, 190000, 190000, 190000, 190000),
+ CORE_DVFS("disp2", -1, 0, KHZ, 158000, 158000, 190000, 190000, 190000, 190000, 190000),
+ CORE_DVFS("hdmi", -1, 0, KHZ, 0, 0, 0, 148500, 148500, 148500, 148500),
+
+ /*
+ * Clocks below depend on the core process id. Define per process_id
+ * tables for SCLK/VDE/3D clocks (maximum rate for these clocks is
+ * increased depending on tegra2 sku). Use the worst case value for
+ * other clocks for now.
+ */
+ CORE_DVFS("host1x", -1, 1, KHZ, 104500, 133000, 166000, 166000, 166000, 166000, 166000),
+ CORE_DVFS("epp", -1, 1, KHZ, 133000, 171000, 247000, 300000, 300000, 300000, 300000),
+ CORE_DVFS("2d", -1, 1, KHZ, 133000, 171000, 247000, 300000, 300000, 300000, 300000),
+
+ CORE_DVFS("3d", 0, 1, KHZ, 114000, 161500, 247000, 304000, 304000, 333500, 333500),
+ CORE_DVFS("3d", 1, 1, KHZ, 161500, 209000, 285000, 333500, 333500, 361000, 361000),
+ CORE_DVFS("3d", 2, 1, KHZ, 218500, 256500, 323000, 380000, 380000, 400000, 400000),
+ CORE_DVFS("3d", 3, 1, KHZ, 247000, 285000, 351500, 400000, 400000, 400000, 400000),
+
+ CORE_DVFS("mpe", 0, 1, KHZ, 104500, 152000, 228000, 300000, 300000, 300000, 300000),
+ CORE_DVFS("mpe", 1, 1, KHZ, 142500, 190000, 275500, 300000, 300000, 300000, 300000),
+ CORE_DVFS("mpe", 2, 1, KHZ, 190000, 237500, 300000, 300000, 300000, 300000, 300000),
+ CORE_DVFS("mpe", 3, 1, KHZ, 228000, 266000, 300000, 300000, 300000, 300000, 300000),
+
+ CORE_DVFS("vi", -1, 1, KHZ, 85000, 100000, 150000, 150000, 150000, 150000, 150000),
+
+ CORE_DVFS("sclk", 0, 1, KHZ, 95000, 133000, 190000, 222500, 240000, 247000, 262000),
+ CORE_DVFS("sclk", 1, 1, KHZ, 123500, 159500, 207000, 240000, 240000, 264000, 277500),
+ CORE_DVFS("sclk", 2, 1, KHZ, 152000, 180500, 229500, 260000, 260000, 285000, 300000),
+ CORE_DVFS("sclk", 3, 1, KHZ, 171000, 218500, 256500, 292500, 292500, 300000, 300000),
+
+ CORE_DVFS("vde", 0, 1, KHZ, 95000, 123500, 209000, 275500, 275500, 300000, 300000),
+ CORE_DVFS("vde", 1, 1, KHZ, 123500, 152000, 237500, 300000, 300000, 300000, 300000),
+ CORE_DVFS("vde", 2, 1, KHZ, 152000, 209000, 285000, 300000, 300000, 300000, 300000),
+ CORE_DVFS("vde", 3, 1, KHZ, 171000, 218500, 300000, 300000, 300000, 300000, 300000),
+ /* What is this? */
+ CORE_DVFS("NVRM_DEVID_CLK_SRC", -1, 1, MHZ, 480, 600, 800, 1067, 1067, 1067, 1067),
+};
+
+int tegra_dvfs_disable_core_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(arg, kp);
+ if (ret)
+ return ret;
+
+ if (tegra_dvfs_core_disabled)
+ tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_core);
+ else
+ tegra_dvfs_rail_enable(&tegra2_dvfs_rail_vdd_core);
+
+ return 0;
+}
+
+int tegra_dvfs_disable_cpu_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(arg, kp);
+ if (ret)
+ return ret;
+
+ if (tegra_dvfs_cpu_disabled)
+ tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_cpu);
+ else
+ tegra_dvfs_rail_enable(&tegra2_dvfs_rail_vdd_cpu);
+
+ return 0;
+}
+
+int tegra_dvfs_disable_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_bool(buffer, kp);
+}
+
+static struct kernel_param_ops tegra_dvfs_disable_core_ops = {
+ .set = tegra_dvfs_disable_core_set,
+ .get = tegra_dvfs_disable_get,
+};
+
+static struct kernel_param_ops tegra_dvfs_disable_cpu_ops = {
+ .set = tegra_dvfs_disable_cpu_set,
+ .get = tegra_dvfs_disable_get,
+};
+
+module_param_cb(disable_core, &tegra_dvfs_disable_core_ops,
+ &tegra_dvfs_core_disabled, 0644);
+module_param_cb(disable_cpu, &tegra_dvfs_disable_cpu_ops,
+ &tegra_dvfs_cpu_disabled, 0644);
+
+void __init tegra_soc_init_dvfs(void)
+{
+ int i;
+ struct clk *c;
+ struct dvfs *d;
+ int process_id;
+ int ret;
+ int cpu_process_id = tegra_cpu_process_id();
+ int core_process_id = tegra_core_process_id();
+ int speedo_id = tegra_soc_speedo_id();
+
+ BUG_ON(speedo_id >= ARRAY_SIZE(cpu_speedo_nominal_millivolts));
+ tegra2_dvfs_rail_vdd_cpu.nominal_millivolts =
+ cpu_speedo_nominal_millivolts[speedo_id];
+ BUG_ON(speedo_id >= ARRAY_SIZE(core_speedo_nominal_millivolts));
+ tegra2_dvfs_rail_vdd_core.nominal_millivolts =
+ core_speedo_nominal_millivolts[speedo_id];
+ tegra2_dvfs_rail_vdd_aon.nominal_millivolts =
+ core_speedo_nominal_millivolts[speedo_id];
+
+ tegra_dvfs_init_rails(tegra2_dvfs_rails, ARRAY_SIZE(tegra2_dvfs_rails));
+ tegra_dvfs_add_relationships(tegra2_dvfs_relationships,
+ ARRAY_SIZE(tegra2_dvfs_relationships));
+ /*
+ * VDD_CORE must always be at least 50 mV higher than VDD_CPU
+ * Fill out cpu_core_millivolts based on cpu_millivolts
+ */
+ for (i = 0; i < ARRAY_SIZE(dvfs_init); i++) {
+ d = &dvfs_init[i];
+
+ process_id = strcmp(d->clk_name, "cpu") ?
+ core_process_id : cpu_process_id;
+ if ((d->process_id != -1 && d->process_id != process_id) ||
+ (d->speedo_id != -1 && d->speedo_id != speedo_id)) {
+ pr_debug("tegra_dvfs: rejected %s speedo %d,"
+ " process %d\n", d->clk_name, d->speedo_id,
+ d->process_id);
+ continue;
+ }
+
+ c = tegra_get_clock_by_name(d->clk_name);
+
+ if (!c) {
+ pr_debug("tegra_dvfs: no clock found for %s\n",
+ d->clk_name);
+ continue;
+ }
+
+ ret = tegra_enable_dvfs_on_clk(c, d);
+ if (ret)
+ pr_err("tegra_dvfs: failed to enable dvfs on %s\n",
+ c->name);
+ }
+
+ if (tegra_dvfs_core_disabled)
+ tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_core);
+
+ if (tegra_dvfs_cpu_disabled)
+ tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_cpu);
+}
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c
index 0f7ae6e90b55..f1ac82ad5c15 100644
--- a/arch/arm/mach-tegra/tegra2_emc.c
+++ b/arch/arm/mach-tegra/tegra2_emc.c
@@ -25,6 +25,11 @@
#include "tegra2_emc.h"
+#define TEGRA_MRR_DIVLD (1<<20)
+#define TEGRA_EMC_STATUS 0x02b4
+#define TEGRA_EMC_MRR 0x00ec
+static DEFINE_MUTEX(tegra_emc_mrr_lock);
+
#ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
static bool emc_enable = true;
#else
@@ -36,6 +41,9 @@ static void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
static const struct tegra_emc_table *tegra_emc_table;
static int tegra_emc_table_size;
+static unsigned long tegra_emc_max_bus_rate; /* 2 * 1000 * maximum emc_clock rate */
+static unsigned long tegra_emc_min_bus_rate; /* 2 * 1000 * minimum emc_clock rate */
+
static inline void emc_writel(u32 val, unsigned long addr)
{
writel(val, emc + addr);
@@ -46,6 +54,35 @@ static inline u32 emc_readl(unsigned long addr)
return readl(emc + addr);
}
+/* read LPDDR2 memory modes */
+static int tegra_emc_read_mrr(unsigned long addr)
+{
+ u32 value;
+ int count = 100;
+
+ mutex_lock(&tegra_emc_mrr_lock);
+ do {
+ emc_readl(TEGRA_EMC_MRR);
+ } while (--count && (emc_readl(TEGRA_EMC_STATUS) & TEGRA_MRR_DIVLD));
+ if (count == 0) {
+ pr_err("%s: Failed to read memory type\n", __func__);
+ BUG();
+ }
+ value = (1 << 30) | (addr << 16);
+ emc_writel(value, TEGRA_EMC_MRR);
+
+ count = 100;
+ while (--count && !(emc_readl(TEGRA_EMC_STATUS) & TEGRA_MRR_DIVLD));
+ if (count == 0) {
+ pr_err("%s: Failed to read memory type\n", __func__);
+ BUG();
+ }
+ value = emc_readl(TEGRA_EMC_MRR) & 0xFFFF;
+ mutex_unlock(&tegra_emc_mrr_lock);
+
+ return value;
+}
+
static const unsigned long emc_reg_addr[TEGRA_EMC_NUM_REGS] = {
0x2c, /* RC */
0x30, /* RFC */
@@ -108,6 +145,14 @@ long tegra_emc_round_rate(unsigned long rate)
if (!emc_enable)
return -EINVAL;
+ if (rate >= tegra_emc_max_bus_rate) {
+ best = tegra_emc_table_size - 1;
+ goto round_out;
+ } else if (rate <= tegra_emc_min_bus_rate) {
+ best = 0;
+ goto round_out;
+ }
+
pr_debug("%s: %lu\n", __func__, rate);
/*
@@ -126,7 +171,7 @@ long tegra_emc_round_rate(unsigned long rate)
if (best < 0)
return -EINVAL;
-
+round_out:
pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate);
return tegra_emc_table[best].rate * 2 * 1000;
@@ -154,11 +199,11 @@ int tegra_emc_set_rate(unsigned long rate)
*/
rate = rate / 2 / 1000;
- for (i = 0; i < tegra_emc_table_size; i++)
+ for (i = tegra_emc_table_size - 1; i >= 0; i--)
if (tegra_emc_table[i].rate == rate)
break;
- if (i >= tegra_emc_table_size)
+ if (i < 0)
return -EINVAL;
pr_debug("%s: setting to %lu\n", __func__, rate);
@@ -171,8 +216,57 @@ int tegra_emc_set_rate(unsigned long rate)
return 0;
}
-void tegra_init_emc(const struct tegra_emc_table *table, int table_size)
+void tegra_init_emc(const struct tegra_emc_chip *chips, int chips_size)
{
- tegra_emc_table = table;
- tegra_emc_table_size = table_size;
+ int i;
+ int vid;
+ int rev_id1;
+ int rev_id2;
+ int pid;
+ int chip_matched = -1;
+
+ vid = tegra_emc_read_mrr(5);
+ rev_id1 = tegra_emc_read_mrr(6);
+ rev_id2 = tegra_emc_read_mrr(7);
+ pid = tegra_emc_read_mrr(8);
+
+ for (i = 0; i < chips_size; i++) {
+ if (chips[i].mem_manufacturer_id >= 0) {
+ if (chips[i].mem_manufacturer_id != vid)
+ continue;
+ }
+ if (chips[i].mem_revision_id1 >= 0) {
+ if (chips[i].mem_revision_id1 != rev_id1)
+ continue;
+ }
+ if (chips[i].mem_revision_id2 >= 0) {
+ if (chips[i].mem_revision_id2 != rev_id2)
+ continue;
+ }
+ if (chips[i].mem_pid >= 0) {
+ if (chips[i].mem_pid != pid)
+ continue;
+ }
+
+ chip_matched = i;
+ break;
+ }
+
+ if (chip_matched >= 0) {
+ pr_info("%s: %s memory found\n", __func__,
+ chips[chip_matched].description);
+ tegra_emc_table = chips[chip_matched].table;
+ tegra_emc_table_size = chips[chip_matched].table_size;
+
+ tegra_emc_min_bus_rate = tegra_emc_table[0].rate * 2 * 1000;
+ tegra_emc_max_bus_rate = tegra_emc_table[tegra_emc_table_size - 1].rate * 2 * 1000;
+
+ } else {
+ pr_err("%s: Memory not recognized, memory scaling disabled\n",
+ __func__);
+ pr_info("%s: Memory vid = 0x%04x", __func__, vid);
+ pr_info("%s: Memory rev_id1 = 0x%04x", __func__, rev_id1);
+ pr_info("%s: Memory rev_id2 = 0x%04x", __func__, rev_id2);
+ pr_info("%s: Memory pid = 0x%04x", __func__, pid);
+ }
}
diff --git a/arch/arm/mach-tegra/tegra2_emc.h b/arch/arm/mach-tegra/tegra2_emc.h
index 19f08cb31603..a40937dd7fcf 100644
--- a/arch/arm/mach-tegra/tegra2_emc.h
+++ b/arch/arm/mach-tegra/tegra2_emc.h
@@ -22,6 +22,15 @@ struct tegra_emc_table {
u32 regs[TEGRA_EMC_NUM_REGS];
};
-int tegra_emc_set_rate(unsigned long rate);
-long tegra_emc_round_rate(unsigned long rate);
-void tegra_init_emc(const struct tegra_emc_table *table, int table_size);
+struct tegra_emc_chip {
+ const char *description;
+ int mem_manufacturer_id; /* LPDDR2 MR5 or -1 to ignore */
+ int mem_revision_id1; /* LPDDR2 MR6 or -1 to ignore */
+ int mem_revision_id2; /* LPDDR2 MR7 or -1 to ignore */
+ int mem_pid; /* LPDDR2 MR8 or -1 to ignore */
+
+ const struct tegra_emc_table *table;
+ int table_size;
+};
+
+void tegra_init_emc(const struct tegra_emc_chip *chips, int chips_size);
diff --git a/arch/arm/mach-tegra/tegra2_mc.c b/arch/arm/mach-tegra/tegra2_mc.c
new file mode 100644
index 000000000000..6df9c232c02f
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_mc.c
@@ -0,0 +1,1017 @@
+/*
+ * arch/arm/mach-tegra/tegra2_mc.c
+ *
+ * Memory controller bandwidth profiling interface
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/sysdev.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/parser.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <mach/iomap.h>
+
+#include <asm/uaccess.h>
+
+#include "clock.h"
+#include "tegra2_mc.h"
+
+static void stat_start(void);
+static void stat_stop(void);
+static void stat_log(void);
+
+static struct hrtimer sample_timer;
+
+#define MC_COUNTER_INITIALIZER() \
+ { \
+ .enabled = false, \
+ .period = 10, \
+ .mode = FILTER_CLIENT, \
+ .address_low = 0, \
+ .address_length = 0xfffffffful, \
+ .sample_data = { \
+ .signature = 0xdeadbeef, \
+ } \
+ }
+
+static struct tegra_mc_counter mc_counter0 = MC_COUNTER_INITIALIZER();
+static struct tegra_mc_counter mc_counter1 = MC_COUNTER_INITIALIZER();
+static struct tegra_mc_counter emc_llp_counter = MC_COUNTER_INITIALIZER();
+
+/* /sys/devices/system/tegra_mc */
+static bool sample_enable = SAMPLE_ENABLE_DEFAULT;
+static u16 sample_quantum = SAMPLE_QUANTUM_DEFAULT;
+static u8 sample_log[SAMPLE_LOG_SIZE];
+
+static DEFINE_SPINLOCK(sample_enable_lock);
+static DEFINE_SPINLOCK(sample_log_lock);
+
+static u8 *sample_log_wptr = sample_log, *sample_log_rptr = sample_log;
+static int sample_log_size = SAMPLE_LOG_SIZE - 1;
+static struct clk *emc_clock = NULL;
+
+static bool sampling(void)
+{
+ bool ret;
+
+ spin_lock_bh(&sample_enable_lock);
+ ret = (sample_enable == true)? true : false;
+ spin_unlock_bh(&sample_enable_lock);
+
+ return ret;
+}
+
+static struct sysdev_class tegra_mc_sysclass = {
+ .name = "tegra_mc",
+};
+
+static ssize_t tegra_mc_enable_show(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", sample_enable);
+}
+
+static ssize_t tegra_mc_enable_store(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value, i;
+ struct tegra_mc_counter *counters[] = {
+ &mc_counter0,
+ &mc_counter1,
+ &emc_llp_counter
+ };
+
+ sscanf(buf, "%d", &value);
+
+ if (value == 0 || value == 1)
+ sample_enable = value;
+ else
+ return -EINVAL;
+
+ if (!sample_enable) {
+ stat_stop();
+ hrtimer_cancel(&sample_timer);
+ return count;
+ }
+
+ hrtimer_cancel(&sample_timer);
+
+ /* we need to initialize variables that change during sampling */
+ sample_log_wptr = sample_log_rptr = sample_log;
+ sample_log_size = SAMPLE_LOG_SIZE - 1;
+
+ for (i = 0; i < ARRAY_SIZE(counters); i++) {
+ struct tegra_mc_counter *c = counters[i];
+
+ if (!c->enabled)
+ continue;
+
+ c->current_client_index = 0;
+ }
+
+ stat_start();
+
+ hrtimer_start(&sample_timer,
+ ktime_add_ns(ktime_get(), (u64)sample_quantum * 1000000),
+ HRTIMER_MODE_ABS);
+
+ return count;
+}
+
+static ssize_t tegra_mc_log_show(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ int index = 0, count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sample_log_lock, flags);
+
+ while (sample_log_rptr != sample_log_wptr) {
+ if (sample_log_rptr < sample_log_wptr) {
+ count = sample_log_wptr - sample_log_rptr;
+ memcpy(buf + index, sample_log_rptr, count);
+ sample_log_rptr = sample_log_wptr;
+ sample_log_size += count;
+ } else {
+ count = SAMPLE_LOG_SIZE -
+ (sample_log_rptr - sample_log);
+ memcpy(buf + index, sample_log_rptr, count);
+ sample_log_rptr = sample_log;
+ sample_log_size += count;
+ }
+ index += count;
+ }
+
+ spin_unlock_irqrestore(&sample_log_lock, flags);
+
+ return index;
+}
+
+static ssize_t tegra_mc_log_store(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ return -EPERM;
+}
+
+static ssize_t tegra_mc_quantum_show(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", sample_quantum);
+}
+
+static ssize_t tegra_mc_quantum_store(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+
+ if (sampling())
+ return -EINVAL;
+
+ sscanf(buf, "%d", &value);
+ sample_quantum = value;
+
+ return count;
+}
+
+#define TEGRA_MC_EXPAND(_attr,_mode) \
+ static SYSDEV_CLASS_ATTR( \
+ _attr, _mode, tegra_mc_##_attr##_show, tegra_mc_##_attr##_store);
+
+#define TEGRA_MC_ATTRIBUTES(_attr1,_mode1,_attr2,_mode2,_attr3,_mode3) \
+ TEGRA_MC_EXPAND(_attr1,_mode1) \
+ TEGRA_MC_EXPAND(_attr2,_mode2) \
+ TEGRA_MC_EXPAND(_attr3,_mode3)
+
+TEGRA_MC_ATTRIBUTES(enable,0666,log,0444,quantum,0666)
+
+#undef TEGRA_MC_EXPAND
+
+#define TEGRA_MC_EXPAND(_attr,_mode) \
+ &attr_##_attr,
+
+static struct sysdev_class_attribute *tegra_mc_attrs[] = {
+ TEGRA_MC_ATTRIBUTES(enable,0666,log,0444,quantum,0666)
+ NULL
+};
+
+/* /sys/devices/system/tegra_mc/client */
+static bool tegra_mc_client_0_enabled = CLIENT_ENABLED_DEFAULT;
+static u8 tegra_mc_client_0_on_schedule_buffer[CLIENT_ON_SCHEDULE_LENGTH];
+static struct kobject *tegra_mc_client_kobj, *tegra_mc_client_0_kobj;
+
+struct match_mode {
+ const char *name;
+ int mode;
+};
+
+static const struct match_mode mode_list[] = {
+ [0] = {
+ .name = "none",
+ .mode = FILTER_NONE,
+ },
+ [1] = {
+ .name = "address",
+ .mode = FILTER_ADDR,
+ },
+ [2] = {
+ .name = "client",
+ .mode = FILTER_CLIENT,
+ },
+};
+
+static int tegra_mc_parse_mode(const char* str) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mode_list); i++) {
+ if (!strncmp(str, mode_list[i].name, strlen(mode_list[i].name)))
+ return mode_list[i].mode;
+ }
+ return -EINVAL;
+}
+
+static int tegra_mc_client_parse(const char *buf, size_t count,
+ tegra_mc_counter_t *counter0, tegra_mc_counter_t *counter1,
+ tegra_mc_counter_t *llp)
+{
+ char *options, *p, *ptr;
+ tegra_mc_counter_t *counter;
+ substring_t args[MAX_OPT_ARGS];
+ enum {
+ opt_period,
+ opt_mode,
+ opt_client,
+ opt_address_low,
+ opt_address_length,
+ opt_err,
+ };
+ const match_table_t tokens = {
+ {opt_period, "period=%s"},
+ {opt_mode, "mode=%s"},
+ {opt_client, "client=%s"},
+ {opt_address_low, "address_low=%s"},
+ {opt_address_length, "address_length=%s"},
+ {opt_err, NULL},
+ };
+ int ret = 0, i, token, index = 0;
+ bool aggregate = false;
+ int period, *client_ids, mode;
+ u64 address_low = 0;
+ u64 address_length = 1ull << 32;
+
+ client_ids = kmalloc(sizeof(int) * (MC_COUNTER_CLIENT_SIZE + 1),
+ GFP_KERNEL);
+ if (!client_ids)
+ return -ENOMEM;
+
+ memset(client_ids, -1, (sizeof(int) * (MC_COUNTER_CLIENT_SIZE + 1)));
+
+ options = kstrdup(buf, GFP_KERNEL);
+ if (!options) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ while ((p = strsep(&options, " ")) != NULL) {
+ if (!*p)
+ continue;
+
+ pr_debug("\t %s\n", p);
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case opt_period:
+ if (match_int(&args[0], &period) || period <= 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case opt_mode:
+ mode = tegra_mc_parse_mode(args[0].from);
+ if (mode < 0) {
+ ret = mode;
+ goto end;
+ }
+ break;
+
+ case opt_client:
+ ptr = get_options(args[0].from,
+ MC_COUNTER_CLIENT_SIZE + 1, client_ids);
+
+ if (client_ids[1] == MC_STAT_AGGREGATE) {
+ aggregate = true;
+ break;
+ }
+ break;
+
+ case opt_address_low:
+ address_low = simple_strtoull(args[0].from, NULL, 0);
+ break;
+
+ case opt_address_length:
+ address_length = simple_strtoull(args[0].from, NULL, 0);
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ address_low &= PAGE_MASK;
+ address_length += PAGE_SIZE - 1;
+ address_length &= ~((1ull << PAGE_SHIFT) - 1ull);
+
+ if (mode == FILTER_CLIENT) {
+ counter = counter0;
+ llp->enabled = false;
+ counter1->enabled = false;
+ } else if (mode == FILTER_ADDR || mode == FILTER_NONE) {
+ if (aggregate) {
+ counter = counter1;
+ llp->enabled = false;
+ counter0->enabled = false;
+ } else {
+ counter = counter0;
+ counter1->enabled = false;
+ llp->enabled = false;
+ }
+ } else {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ counter->mode = mode;
+ counter->enabled = true;
+ counter->address_low = (u32)address_low;
+ counter->address_length = (u32)(address_length - 1);
+
+ for (i = 1; i < MC_COUNTER_CLIENT_SIZE; i++) {
+ if (client_ids[i] != -1)
+ counter->clients[index++] = client_ids[i];
+ }
+
+ counter->total_clients = index;
+
+ if (llp->enabled) {
+ llp->mode = counter->mode;
+ llp->period = counter->period;
+ llp->address_low = counter->address_low;
+ llp->address_length = counter->address_length;
+ }
+
+end:
+ if (options)
+ kfree(options);
+ if (client_ids)
+ kfree(client_ids);
+
+ return ret;
+}
+
+static ssize_t tegra_mc_client_0_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ if (strcmp(attr->attr.name, "enable") == 0)
+ return sprintf(buf, "%d\n", tegra_mc_client_0_enabled);
+ else if (strcmp(attr->attr.name, "on_schedule") == 0)
+ return sprintf(buf, "%s", tegra_mc_client_0_on_schedule_buffer);
+ else
+ return -EINVAL;
+}
+
+static ssize_t tegra_mc_client_0_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int value;
+
+ if (sampling())
+ return -EINVAL;
+
+ if (strcmp(attr->attr.name, "enable") == 0) {
+ sscanf(buf, "%d\n", &value);
+ if (value == 0 || value == 1)
+ tegra_mc_client_0_enabled = value;
+ else
+ return -EINVAL;
+
+ return count;
+ } else if (strcmp(attr->attr.name, "on_schedule") == 0) {
+ if (tegra_mc_client_parse(buf, count,
+ &mc_counter0, &mc_counter1,
+ &emc_llp_counter) == 0) {
+
+ strncpy(tegra_mc_client_0_on_schedule_buffer,
+ buf, count);
+
+ return count;
+ } else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+}
+
+static struct kobj_attribute tegra_mc_client_0_enable =
+ __ATTR(enable, 0660, tegra_mc_client_0_show, tegra_mc_client_0_store);
+
+static struct kobj_attribute tegra_mc_client_0_on_schedule =
+ __ATTR(on_schedule, 0660, tegra_mc_client_0_show, tegra_mc_client_0_store);
+
+static struct attribute *tegra_mc_client_0_attrs[] = {
+ &tegra_mc_client_0_enable.attr,
+ &tegra_mc_client_0_on_schedule.attr,
+ NULL,
+};
+
+static struct attribute_group tegra_mc_client_0_attr_group = {
+ .attrs = tegra_mc_client_0_attrs
+};
+
+/* /sys/devices/system/tegra_mc/dram */
+#define dram_counters(_x) \
+ _x(activate_cnt, ACTIVATE_CNT) \
+ _x(read_cnt, READ_CNT) \
+ _x(write_cnt, WRITE_CNT) \
+ _x(ref_cnt, REF_CNT) \
+ _x(cumm_banks_active_cke_eq1, CUMM_BANKS_ACTIVE_CKE_EQ1) \
+ _x(cumm_banks_active_cke_eq0, CUMM_BANKS_ACTIVE_CKE_EQ0) \
+ _x(cke_eq1_clks, CKE_EQ1_CLKS) \
+ _x(extclks_cke_eq1, EXTCLKS_CKE_EQ1) \
+ _x(extclks_cke_eq0, EXTCLKS_CKE_EQ0) \
+ _x(no_banks_active_cke_eq1, NO_BANKS_ACTIVE_CKE_EQ1) \
+ _x(no_banks_active_cke_eq0, NO_BANKS_ACTIVE_CKE_EQ0)
+
+#define DEFINE_COUNTER(_name, _val) { .enabled = false, .device_mask = 0, },
+
+static tegra_emc_dram_counter_t dram_counters[] = {
+ dram_counters(DEFINE_COUNTER)
+};
+
+#define DEFINE_SYSFS(_name, _val) \
+ \
+static struct kobject *tegra_mc_dram_##_name##_kobj; \
+ \
+static ssize_t tegra_mc_dram_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return tegra_mc_dram_show(kobj, attr, buf, \
+ _val - EMC_DRAM_STAT_BEGIN); \
+} \
+ \
+static ssize_t tegra_mc_dram_##_name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, const char *buf, size_t count) \
+{ \
+ if (sampling()) \
+ return 0; \
+ \
+ return tegra_mc_dram_store(kobj, attr, buf, count, \
+ _val - EMC_DRAM_STAT_BEGIN); \
+} \
+ \
+ \
+static struct kobj_attribute tegra_mc_dram_##_name##_enable = \
+ __ATTR(enable, 0660, tegra_mc_dram_##_name##_show, \
+ tegra_mc_dram_##_name##_store); \
+ \
+static struct kobj_attribute tegra_mc_dram_##_name##_device_mask = \
+ __ATTR(device_mask, 0660, tegra_mc_dram_##_name##_show, \
+ tegra_mc_dram_##_name##_store); \
+ \
+static struct attribute *tegra_mc_dram_##_name##_attrs[] = { \
+ &tegra_mc_dram_##_name##_enable.attr, \
+ &tegra_mc_dram_##_name##_device_mask.attr, \
+ NULL, \
+}; \
+ \
+static struct attribute_group tegra_mc_dram_##_name##_attr_group = { \
+ .attrs = tegra_mc_dram_##_name##_attrs, \
+};
+
+static struct kobject *tegra_mc_dram_kobj;
+
+static ssize_t tegra_mc_dram_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf, int index)
+{
+ if (index >= EMC_DRAM_STAT_END - EMC_DRAM_STAT_BEGIN)
+ return -EINVAL;
+
+ if (strcmp(attr->attr.name, "enable") == 0)
+ return sprintf(buf, "%d\n", dram_counters[index].enabled);
+ else if (strcmp(attr->attr.name, "device_mask") == 0)
+ return sprintf(buf, "%d\n", dram_counters[index].device_mask);
+ else
+ return -EINVAL;
+}
+static ssize_t tegra_mc_dram_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count, int index)
+{
+ int value;
+
+ if (index >= EMC_DRAM_STAT_END - EMC_DRAM_STAT_BEGIN)
+ return -EINVAL;
+
+ if (strcmp(attr->attr.name, "enable") == 0) {
+ sscanf(buf, "%d\n", &value);
+ if (value == 0 || value == 1)
+ dram_counters[index].enabled = value;
+ else
+ return -EINVAL;
+
+ return count;
+ } else if (strcmp(attr->attr.name, "device_mask") == 0) {
+ sscanf(buf, "%d\n", &value);
+ dram_counters[index].device_mask = (u8)value;
+
+ return count;
+ } else
+ return -EINVAL;
+}
+
+dram_counters(DEFINE_SYSFS)
+
+/* Tegra Statistics */
+typedef struct {
+ void __iomem *mmio;
+} tegra_device_t;
+
+static tegra_device_t mc = {
+ .mmio = IO_ADDRESS(TEGRA_MC_BASE),
+};
+
+static tegra_device_t emc = {
+ .mmio = IO_ADDRESS(TEGRA_EMC_BASE),
+};
+
+void mc_stat_start(tegra_mc_counter_t *counter0, tegra_mc_counter_t *counter1)
+{
+ struct tegra_mc_counter *c;
+ u32 filter_client = ARMC_STAT_CONTROL_FILTER_CLIENT_DISABLE;
+ u32 filter_addr = ARMC_STAT_CONTROL_FILTER_ADDR_DISABLE;
+
+ if (!tegra_mc_client_0_enabled)
+ return;
+
+ c = (counter0->enabled) ? counter0 : counter1;
+
+ /* disable statistics */
+ writel((MC_STAT_CONTROL_0_EMC_GATHER_DISABLE << MC_STAT_CONTROL_0_EMC_GATHER_SHIFT),
+ mc.mmio + MC_STAT_CONTROL_0);
+
+ if (c->enabled && c->mode == FILTER_ADDR)
+ filter_addr = ARMC_STAT_CONTROL_FILTER_ADDR_ENABLE;
+ else if (c->enabled && c->mode == FILTER_CLIENT)
+ filter_client = ARMC_STAT_CONTROL_FILTER_CLIENT_ENABLE;
+
+ filter_addr <<= ARMC_STAT_CONTROL_FILTER_ADDR_SHIFT;
+ filter_client <<= ARMC_STAT_CONTROL_FILTER_CLIENT_SHIFT;
+
+ if (c->enabled) {
+ u32 reg = 0;
+ reg |= (ARMC_STAT_CONTROL_MODE_BANDWIDTH <<
+ ARMC_STAT_CONTROL_MODE_SHIFT);
+ reg |= (ARMC_STAT_CONTROL_EVENT_QUALIFIED <<
+ ARMC_STAT_CONTROL_EVENT_SHIFT);
+ reg |= (ARMC_STAT_CONTROL_FILTER_PRI_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_PRI_SHIFT);
+ reg |= (ARMC_STAT_CONTROL_FILTER_COALESCED_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_COALESCED_SHIFT);
+ reg |= filter_client;
+ reg |= filter_addr;
+ reg |= (c->clients[c->current_client_index] <<
+ ARMC_STAT_CONTROL_CLIENT_ID_SHIFT);
+
+ /* note these registers are shared */
+ writel(c->address_low,
+ mc.mmio + MC_STAT_EMC_ADDR_LOW_0);
+ writel((c->address_low + c->address_length),
+ mc.mmio + MC_STAT_EMC_ADDR_HIGH_0);
+ writel(0xFFFFFFFF, mc.mmio + MC_STAT_EMC_CLOCK_LIMIT_0);
+
+ writel(reg, mc.mmio + MC_STAT_EMC_CONTROL_0_0);
+ }
+
+ /* reset then enable statistics */
+ writel((MC_STAT_CONTROL_0_EMC_GATHER_CLEAR << MC_STAT_CONTROL_0_EMC_GATHER_SHIFT),
+ mc.mmio + MC_STAT_CONTROL_0);
+
+ writel((MC_STAT_CONTROL_0_EMC_GATHER_ENABLE << MC_STAT_CONTROL_0_EMC_GATHER_SHIFT),
+ mc.mmio + MC_STAT_CONTROL_0);
+}
+
+void mc_stat_stop(tegra_mc_counter_t *counter0,
+ tegra_mc_counter_t *counter1)
+{
+ u32 total_counts = readl(mc.mmio + MC_STAT_EMC_CLOCKS_0);
+
+ /* Disable statistics */
+ writel((MC_STAT_CONTROL_0_EMC_GATHER_DISABLE << MC_STAT_CONTROL_0_EMC_GATHER_SHIFT),
+ mc.mmio + MC_STAT_CONTROL_0);
+
+ if (counter0->enabled) {
+ counter0->sample_data.client_counts = readl(mc.mmio + MC_STAT_EMC_COUNT_0_0);
+ counter0->sample_data.total_counts = total_counts;
+ counter0->sample_data.emc_clock_rate = clk_get_rate(emc_clock);
+ }
+ else {
+ counter1->sample_data.client_counts = readl(mc.mmio + MC_STAT_EMC_COUNT_1_0);
+ counter1->sample_data.total_counts = total_counts;
+ counter1->sample_data.emc_clock_rate = clk_get_rate(emc_clock);
+ }
+}
+
+void emc_stat_start(tegra_mc_counter_t *llp_counter,
+ tegra_emc_dram_counter_t *dram_counter)
+{
+ u32 llmc_stat = 0;
+ u32 llmc_ctrl =
+ (AREMC_STAT_CONTROL_MODE_BANDWIDTH <<
+ AREMC_STAT_CONTROL_MODE_SHIFT) |
+ (AREMC_STAT_CONTROL_CLIENT_TYPE_MPCORER <<
+ AREMC_STAT_CONTROL_CLIENT_TYPE_SHIFT) |
+ (AREMC_STAT_CONTROL_EVENT_QUALIFIED <<
+ AREMC_STAT_CONTROL_EVENT_SHIFT);
+
+ /* disable statistics */
+ llmc_stat |= (EMC_STAT_CONTROL_0_LLMC_GATHER_DISABLE <<
+ EMC_STAT_CONTROL_0_LLMC_GATHER_SHIFT);
+ llmc_stat |= (EMC_STAT_CONTROL_0_DRAM_GATHER_DISABLE <<
+ EMC_STAT_CONTROL_0_DRAM_GATHER_SHIFT);
+ writel(llmc_stat, emc.mmio + EMC_STAT_CONTROL_0);
+
+ if (tegra_mc_client_0_enabled && llp_counter->enabled) {
+ if (llp_counter->mode == FILTER_ADDR) {
+ llmc_ctrl |=
+ (AREMC_STAT_CONTROL_FILTER_ADDR_ENABLE <<
+ AREMC_STAT_CONTROL_FILTER_ADDR_SHIFT);
+ llmc_ctrl |=
+ (AREMC_STAT_CONTROL_FILTER_CLIENT_DISABLE <<
+ AREMC_STAT_CONTROL_FILTER_CLIENT_SHIFT);
+ } else if (llp_counter->mode == FILTER_CLIENT) {
+ /* not allow aggregate client in client mode */
+ llmc_ctrl |=
+ (AREMC_STAT_CONTROL_FILTER_ADDR_DISABLE <<
+ AREMC_STAT_CONTROL_FILTER_ADDR_SHIFT);
+ llmc_ctrl |=
+ (AREMC_STAT_CONTROL_FILTER_CLIENT_DISABLE <<
+ AREMC_STAT_CONTROL_FILTER_CLIENT_SHIFT);
+ } else if (llp_counter->mode == FILTER_NONE) {
+ llmc_ctrl |=
+ (AREMC_STAT_CONTROL_FILTER_ADDR_DISABLE <<
+ AREMC_STAT_CONTROL_FILTER_ADDR_SHIFT);
+ llmc_ctrl |=
+ (AREMC_STAT_CONTROL_FILTER_CLIENT_DISABLE <<
+ AREMC_STAT_CONTROL_FILTER_CLIENT_SHIFT);
+ }
+
+ writel(llp_counter->address_low,
+ emc.mmio + EMC_STAT_LLMC_ADDR_LOW_0);
+ writel( (llp_counter->address_low + llp_counter->address_length),
+ emc.mmio + EMC_STAT_LLMC_ADDR_HIGH_0);
+ writel(0xFFFFFFFF, emc.mmio + EMC_STAT_LLMC_CLOCK_LIMIT_0);
+ writel(llmc_ctrl, emc.mmio + EMC_STAT_LLMC_CONTROL_0_0);
+ }
+
+ writel(0xFFFFFFFF, emc.mmio + EMC_STAT_DRAM_CLOCK_LIMIT_LO_0);
+ writel(0xFF, emc.mmio + EMC_STAT_DRAM_CLOCK_LIMIT_HI_0);
+
+ llmc_stat = 0;
+ /* Reset then enable statistics */
+ llmc_stat |= (EMC_STAT_CONTROL_0_LLMC_GATHER_CLEAR <<
+ EMC_STAT_CONTROL_0_LLMC_GATHER_SHIFT);
+ llmc_stat |= (EMC_STAT_CONTROL_0_DRAM_GATHER_CLEAR <<
+ EMC_STAT_CONTROL_0_DRAM_GATHER_SHIFT);
+ writel(llmc_stat, emc.mmio + EMC_STAT_CONTROL_0);
+
+ llmc_stat = 0;
+ llmc_stat |= (EMC_STAT_CONTROL_0_LLMC_GATHER_ENABLE <<
+ EMC_STAT_CONTROL_0_LLMC_GATHER_SHIFT);
+ llmc_stat |= (EMC_STAT_CONTROL_0_DRAM_GATHER_ENABLE <<
+ EMC_STAT_CONTROL_0_DRAM_GATHER_SHIFT);
+ writel(llmc_stat, emc.mmio + EMC_STAT_CONTROL_0);
+}
+
+void emc_stat_stop(tegra_mc_counter_t *llp_counter,
+ tegra_emc_dram_counter_t *dram_counter)
+{
+ u32 llmc_stat = 0;
+ int i;
+ int dev0_offsets_lo[] = {
+ EMC_STAT_DRAM_DEV0_ACTIVATE_CNT_LO_0,
+ EMC_STAT_DRAM_DEV0_READ_CNT_LO_0,
+ EMC_STAT_DRAM_DEV0_WRITE_CNT_LO_0,
+ EMC_STAT_DRAM_DEV0_REF_CNT_LO_0,
+ EMC_STAT_DRAM_DEV0_CUMM_BANKS_ACTIVE_CKE_EQ1_LO_0,
+ EMC_STAT_DRAM_DEV0_CUMM_BANKS_ACTIVE_CKE_EQ0_LO_0,
+ EMC_STAT_DRAM_DEV0_CKE_EQ1_CLKS_LO_0,
+ EMC_STAT_DRAM_DEV0_EXTCLKS_CKE_EQ1_LO_0,
+ EMC_STAT_DRAM_DEV0_EXTCLKS_CKE_EQ0_LO_0,
+ EMC_STAT_DRAM_DEV0_NO_BANKS_ACTIVE_CKE_EQ1_LO_0,
+ EMC_STAT_DRAM_DEV0_NO_BANKS_ACTIVE_CKE_EQ0_LO_0,
+ };
+ int dev0_offsets_hi[] = {
+ EMC_STAT_DRAM_DEV0_ACTIVATE_CNT_HI_0,
+ EMC_STAT_DRAM_DEV0_READ_CNT_HI_0,
+ EMC_STAT_DRAM_DEV0_WRITE_CNT_HI_0,
+ EMC_STAT_DRAM_DEV0_REF_CNT_HI_0,
+ EMC_STAT_DRAM_DEV0_CUMM_BANKS_ACTIVE_CKE_EQ1_HI_0,
+ EMC_STAT_DRAM_DEV0_CUMM_BANKS_ACTIVE_CKE_EQ0_HI_0,
+ EMC_STAT_DRAM_DEV0_CKE_EQ1_CLKS_HI_0,
+ EMC_STAT_DRAM_DEV0_EXTCLKS_CKE_EQ1_HI_0,
+ EMC_STAT_DRAM_DEV0_EXTCLKS_CKE_EQ0_HI_0,
+ EMC_STAT_DRAM_DEV0_NO_BANKS_ACTIVE_CKE_EQ1_HI_0,
+ EMC_STAT_DRAM_DEV0_NO_BANKS_ACTIVE_CKE_EQ0_HI_0,
+ };
+ int dev1_offsets_lo[] = {
+ EMC_STAT_DRAM_DEV1_ACTIVATE_CNT_LO_0,
+ EMC_STAT_DRAM_DEV1_READ_CNT_LO_0,
+ EMC_STAT_DRAM_DEV1_WRITE_CNT_LO_0,
+ EMC_STAT_DRAM_DEV1_REF_CNT_LO_0,
+ EMC_STAT_DRAM_DEV1_CUMM_BANKS_ACTIVE_CKE_EQ1_LO_0,
+ EMC_STAT_DRAM_DEV1_CUMM_BANKS_ACTIVE_CKE_EQ0_LO_0,
+ EMC_STAT_DRAM_DEV1_CKE_EQ1_CLKS_LO_0,
+ EMC_STAT_DRAM_DEV1_EXTCLKS_CKE_EQ1_LO_0,
+ EMC_STAT_DRAM_DEV1_EXTCLKS_CKE_EQ0_LO_0,
+ EMC_STAT_DRAM_DEV1_NO_BANKS_ACTIVE_CKE_EQ1_LO_0,
+ EMC_STAT_DRAM_DEV1_NO_BANKS_ACTIVE_CKE_EQ0_LO_0,
+ };
+ int dev1_offsets_hi[] = {
+ EMC_STAT_DRAM_DEV1_ACTIVATE_CNT_HI_0,
+ EMC_STAT_DRAM_DEV1_READ_CNT_HI_0,
+ EMC_STAT_DRAM_DEV1_WRITE_CNT_HI_0,
+ EMC_STAT_DRAM_DEV1_REF_CNT_HI_0,
+ EMC_STAT_DRAM_DEV1_CUMM_BANKS_ACTIVE_CKE_EQ1_HI_0,
+ EMC_STAT_DRAM_DEV1_CUMM_BANKS_ACTIVE_CKE_EQ0_HI_0,
+ EMC_STAT_DRAM_DEV1_CKE_EQ1_CLKS_HI_0,
+ EMC_STAT_DRAM_DEV1_EXTCLKS_CKE_EQ1_HI_0,
+ EMC_STAT_DRAM_DEV1_EXTCLKS_CKE_EQ0_HI_0,
+ EMC_STAT_DRAM_DEV1_NO_BANKS_ACTIVE_CKE_EQ1_HI_0,
+ EMC_STAT_DRAM_DEV1_NO_BANKS_ACTIVE_CKE_EQ0_HI_0,
+ };
+
+ /* Disable statistics */
+ llmc_stat |= (EMC_STAT_CONTROL_0_LLMC_GATHER_DISABLE <<
+ EMC_STAT_CONTROL_0_LLMC_GATHER_SHIFT);
+ llmc_stat |= (EMC_STAT_CONTROL_0_DRAM_GATHER_DISABLE <<
+ EMC_STAT_CONTROL_0_DRAM_GATHER_SHIFT);
+ writel(llmc_stat, emc.mmio + EMC_STAT_CONTROL_0);
+
+ if (tegra_mc_client_0_enabled == true && llp_counter->enabled) {
+ u32 total_counts = readl(mc.mmio + MC_STAT_EMC_CLOCKS_0);
+ llp_counter->sample_data.client_counts = readl(emc.mmio + EMC_STAT_LLMC_COUNT_0_0);
+ llp_counter->sample_data.total_counts = total_counts;
+ llp_counter->sample_data.emc_clock_rate = clk_get_rate(emc_clock);
+ }
+
+ for (i = 0; i < EMC_DRAM_STAT_END - EMC_DRAM_STAT_BEGIN; i++) {
+ if (dram_counter[i].enabled) {
+
+ dram_counter[i].sample_data.client_counts = 0;
+ dram_counter[i].sample_data.emc_clock_rate = clk_get_rate(emc_clock);
+
+ if (!(dram_counter[i].device_mask & 0x1)) {
+ if (readl(emc.mmio + dev0_offsets_hi[i]) != 0) {
+ dram_counter[i].sample_data.client_counts = 0xFFFFFFFF;
+ continue;
+ }
+ dram_counter[i].sample_data.client_counts +=
+ readl(emc.mmio + dev0_offsets_lo[i]);
+ }
+
+ if (!(dram_counter[i].device_mask & 0x2)) {
+ if (readl(emc.mmio + dev1_offsets_hi[i]) != 0) {
+ dram_counter[i].sample_data.client_counts = 0xFFFFFFFF;
+ continue;
+ }
+ dram_counter[i].sample_data.client_counts +=
+ readl(emc.mmio + dev1_offsets_lo[i]);
+ }
+ }
+ }
+}
+
+static void stat_start(void)
+{
+ mc_stat_start(&mc_counter0, &mc_counter1);
+ emc_stat_start(&emc_llp_counter, dram_counters);
+}
+
+static void stat_stop(void)
+{
+ mc_stat_stop(&mc_counter0, &mc_counter1);
+ emc_stat_stop(&emc_llp_counter, dram_counters);
+}
+
+#define statcpy(_buf, _bufstart, _buflen, _elem) \
+ do { \
+ size_t s = sizeof(_elem); \
+ memcpy(_buf, &_elem, s); \
+ _buf += s; \
+ if (_buf >= _bufstart + _buflen) \
+ _buf = _bufstart; \
+ } while (0);
+
+static void stat_log(void)
+{
+ int i;
+ unsigned long flags;
+
+ struct tegra_mc_counter *counters[] = {
+ &mc_counter0,
+ &mc_counter1,
+ &emc_llp_counter
+ };
+
+ spin_lock_irqsave(&sample_log_lock, flags);
+
+ if (tegra_mc_client_0_enabled) {
+ for (i = 0; i < ARRAY_SIZE(counters); i++) {
+ struct tegra_mc_counter *c = counters[i];
+
+ if (!c->enabled)
+ continue;
+
+ c->sample_data.client_number = c->clients[c->current_client_index];
+
+ c->current_client_index++;
+ if (c->current_client_index == c->total_clients)
+ c->current_client_index = 0;
+
+ statcpy(sample_log_wptr, sample_log,
+ SAMPLE_LOG_SIZE, c->sample_data);
+ }
+ }
+
+ for (i = 0; i < EMC_DRAM_STAT_END - EMC_DRAM_STAT_BEGIN; i++) {
+ if (dram_counters[i].enabled) {
+ statcpy(sample_log_wptr, sample_log,
+ SAMPLE_LOG_SIZE, dram_counters[i].sample_data);
+ }
+ }
+
+ spin_unlock_irqrestore(&sample_log_lock, flags);
+}
+
+static enum hrtimer_restart sample_timer_function(struct hrtimer *handle)
+{
+ stat_stop();
+ stat_log();
+
+ if (!sample_enable)
+ return HRTIMER_NORESTART;
+
+ stat_start();
+
+ hrtimer_add_expires_ns(&sample_timer, (u64)sample_quantum * 1000000);
+ return HRTIMER_RESTART;
+}
+
+/* module init */
+#define REGISTER_SYSFS(_name, _val) \
+ tegra_mc_dram_##_name##_kobj = \
+ kobject_create_and_add(#_name, tegra_mc_dram_kobj); \
+ sysfs_create_group(tegra_mc_dram_##_name##_kobj, \
+ &tegra_mc_dram_##_name##_attr_group);
+
+static int tegra_mc_init(void)
+{
+ int i;
+ int rc;
+
+ /* /sys/devices/system/tegra_mc */
+ rc = sysdev_class_register(&tegra_mc_sysclass);
+ if(rc)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_mc_attrs)-1; i++) {
+ rc = sysdev_class_create_file(&tegra_mc_sysclass,
+ tegra_mc_attrs[i]);
+ if(rc) {
+ printk("\n sysdev_class_create_file : failed \n");
+ goto out_unreg_class;
+ }
+ }
+
+ /* /sys/devices/system/tegra_mc/client */
+ tegra_mc_client_kobj = kobject_create_and_add("client",
+ &tegra_mc_sysclass.kset.kobj);
+ if(!tegra_mc_client_kobj)
+ goto out_remove_sysdev_files;
+
+ tegra_mc_client_0_kobj = kobject_create_and_add("0",
+ tegra_mc_client_kobj);
+ if(!tegra_mc_client_0_kobj)
+ goto out_put_kobject_client;
+
+ rc = sysfs_create_group(tegra_mc_client_0_kobj,
+ &tegra_mc_client_0_attr_group);
+ if(rc)
+ goto out_put_kobject_client_0;
+
+ /* /sys/devices/system/tegra_mc/dram */
+ tegra_mc_dram_kobj = kobject_create_and_add("dram",
+ &tegra_mc_sysclass.kset.kobj);
+ if(!tegra_mc_dram_kobj)
+ goto out_remove_group_client_0;
+
+ dram_counters(REGISTER_SYSFS)
+
+ /* hrtimer */
+ hrtimer_init(&sample_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ sample_timer.function = sample_timer_function;
+
+ for (i = 0; i < EMC_DRAM_STAT_END - EMC_DRAM_STAT_BEGIN; i++) {
+ dram_counters[i].sample_data.client_number = EMC_DRAM_STAT_BEGIN + i;
+ dram_counters[i].sample_data.signature = 0xdeadbeef;
+ }
+
+ emc_clock = clk_get_sys(NULL, "emc");
+ if (!emc_clock) {
+ pr_err("Could not get EMC clock\n");
+ goto out_remove_group_client_0;
+ }
+
+ return 0;
+
+out_remove_group_client_0:
+ sysfs_remove_group(tegra_mc_client_0_kobj, &tegra_mc_client_0_attr_group);
+
+out_put_kobject_client_0:
+ kobject_put(tegra_mc_client_0_kobj);
+
+out_put_kobject_client:
+ kobject_put(tegra_mc_client_kobj);
+
+out_remove_sysdev_files:
+ for (i = 0; i < ARRAY_SIZE(tegra_mc_attrs)-1; i++) {
+ sysdev_class_remove_file(&tegra_mc_sysclass, tegra_mc_attrs[i]);
+ }
+
+out_unreg_class:
+ sysdev_class_unregister(&tegra_mc_sysclass);
+
+out:
+ return rc;
+}
+
+/* module deinit */
+#define REMOVE_SYSFS(_name, _val) \
+ sysfs_remove_group(tegra_mc_dram_##_name##_kobj, \
+ &tegra_mc_dram_##_name##_attr_group); \
+ kobject_put(tegra_mc_dram_##_name##_kobj);
+
+static void tegra_mc_exit(void)
+{
+ int i;
+
+ stat_stop();
+
+ /* hrtimer */
+ hrtimer_cancel(&sample_timer);
+
+ /* /sys/devices/system/tegra_mc/client */
+ sysfs_remove_group(tegra_mc_client_0_kobj,
+ &tegra_mc_client_0_attr_group);
+ kobject_put(tegra_mc_client_0_kobj);
+ kobject_put(tegra_mc_client_kobj);
+
+ /* /sys/devices/system/tegra_mc/dram */
+ dram_counters(REMOVE_SYSFS)
+ kobject_put(tegra_mc_dram_kobj);
+
+ /* /sys/devices/system/tegra_mc */
+ for (i = 0; i < ARRAY_SIZE(tegra_mc_attrs)-1; i++) {
+ sysdev_class_remove_file(&tegra_mc_sysclass, tegra_mc_attrs[i]);
+ }
+ sysdev_class_unregister(&tegra_mc_sysclass);
+}
+
+module_init(tegra_mc_init);
+module_exit(tegra_mc_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/arm/mach-tegra/tegra2_mc.h b/arch/arm/mach-tegra/tegra2_mc.h
new file mode 100644
index 000000000000..211213c5f585
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_mc.h
@@ -0,0 +1,250 @@
+/*
+ * arch/arm/mach-tegra/tegra2_mc.c
+ *
+ * Memory controller bandwidth profiling interface
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _INCLUDE_TEGRA2_MC_H_
+#define _INCLUDE_TEGRA2_MC_H_
+
+#define SAMPLE_ENABLE_DEFAULT 0
+#define SAMPLE_LOG_SIZE 1024 /* need to be DWORD aligned */
+#define SAMPLE_QUANTUM_DEFAULT 1 /* in milliseconds */
+#define CLIENT_ENABLED_DEFAULT false
+#define CLIENT_ON_SCHEDULE_LENGTH 256
+#define SHIFT_4K 12
+
+typedef enum {
+ FILTER_NONE,
+ FILTER_ADDR,
+ FILTER_CLIENT,
+} FILTER_MODE;
+
+#define MC_COUNTER_CLIENT_SIZE 256
+
+#define MC_STAT_CONTROL_0 0x90
+#define MC_STAT_CONTROL_0_EMC_GATHER_SHIFT 8
+#define MC_STAT_CONTROL_0_EMC_GATHER_CLEAR 1
+#define MC_STAT_CONTROL_0_EMC_GATHER_DISABLE 2
+#define MC_STAT_CONTROL_0_EMC_GATHER_ENABLE 3
+
+#define MC_STAT_EMC_ADDR_LOW_0 0x98
+#define MC_STAT_EMC_ADDR_HIGH_0 0x9c
+#define MC_STAT_EMC_CLOCK_LIMIT_0 0xa0
+#define MC_STAT_EMC_CLOCKS_0 0xa4
+#define MC_STAT_EMC_CONTROL_0_0 0xa8
+#define MC_STAT_EMC_COUNT_0_0 0xb8
+#define MC_STAT_EMC_COUNT_1_0 0xbc
+
+#define ARMC_STAT_CONTROL_FILTER_ADDR_SHIFT 27
+#define ARMC_STAT_CONTROL_FILTER_ADDR_DISABLE 0
+#define ARMC_STAT_CONTROL_FILTER_ADDR_ENABLE 1
+#define ARMC_STAT_CONTROL_FILTER_CLIENT_SHIFT 26
+#define ARMC_STAT_CONTROL_FILTER_CLIENT_DISABLE 0
+#define ARMC_STAT_CONTROL_FILTER_CLIENT_ENABLE 1
+#define ARMC_STAT_CONTROL_FILTER_PRI_SHIFT 28
+#define ARMC_STAT_CONTROL_FILTER_PRI_DISABLE 0
+#define ARMC_STAT_CONTROL_FILTER_COALESCED_SHIFT 30
+#define ARMC_STAT_CONTROL_FILTER_COALESCED_DISABLE 0
+#define ARMC_STAT_CONTROL_CLIENT_ID_SHIFT 8
+#define ARMC_STAT_CONTROL_MODE_SHIFT 0
+#define ARMC_STAT_CONTROL_MODE_BANDWIDTH 0
+#define ARMC_STAT_CONTROL_EVENT_SHIFT 16
+#define ARMC_STAT_CONTROL_EVENT_QUALIFIED 0
+
+#define EMC_STAT_CONTROL_0 0x160
+#define EMC_STAT_CONTROL_0_LLMC_GATHER_SHIFT 0
+#define EMC_STAT_CONTROL_0_LLMC_GATHER_CLEAR 1
+#define EMC_STAT_CONTROL_0_LLMC_GATHER_DISABLE 2
+#define EMC_STAT_CONTROL_0_LLMC_GATHER_ENABLE 3
+#define EMC_STAT_CONTROL_0_DRAM_GATHER_SHIFT 16
+#define EMC_STAT_CONTROL_0_DRAM_GATHER_CLEAR 1
+#define EMC_STAT_CONTROL_0_DRAM_GATHER_DISABLE 2
+#define EMC_STAT_CONTROL_0_DRAM_GATHER_ENABLE 3
+
+#define AREMC_STAT_CONTROL_MODE_SHIFT 0
+#define AREMC_STAT_CONTROL_MODE_BANDWIDTH 0
+#define AREMC_STAT_CONTROL_FILTER_ADDR_SHIFT 27
+#define AREMC_STAT_CONTROL_FILTER_ADDR_ENABLE 1
+#define AREMC_STAT_CONTROL_CLIENT_TYPE_SHIFT 8
+#define AREMC_STAT_CONTROL_CLIENT_TYPE_MPCORER 0
+#define AREMC_STAT_CONTROL_FILTER_CLIENT_SHIFT 26
+#define AREMC_STAT_CONTROL_FILTER_CLIENT_DISABLE 0
+#define AREMC_STAT_CONTROL_FILTER_ADDR_DISABLE 0
+#define AREMC_STAT_CONTROL_EVENT_SHIFT 16
+#define AREMC_STAT_CONTROL_EVENT_QUALIFIED 0
+
+#define EMC_STAT_LLMC_ADDR_LOW_0 0x168
+#define EMC_STAT_LLMC_ADDR_HIGH_0 0x16c
+#define EMC_STAT_LLMC_CLOCK_LIMIT_0 0x170
+#define EMC_STAT_LLMC_CONTROL_0_0 0x178
+#define EMC_STAT_LLMC_COUNT_0_0 0x188
+
+#define EMC_STAT_DRAM_CLOCK_LIMIT_LO_0 0x1a4
+#define EMC_STAT_DRAM_CLOCK_LIMIT_HI_0 0x1a8
+#define EMC_STAT_DRAM_DEV0_ACTIVATE_CNT_LO_0 0x1b4
+#define EMC_STAT_DRAM_DEV0_ACTIVATE_CNT_HI_0 0x1b8
+#define EMC_STAT_DRAM_DEV0_READ_CNT_LO_0 0x1bc
+#define EMC_STAT_DRAM_DEV0_READ_CNT_HI_0 0x1c0
+#define EMC_STAT_DRAM_DEV0_WRITE_CNT_LO_0 0x1c4
+#define EMC_STAT_DRAM_DEV0_WRITE_CNT_HI_0 0x1c8
+#define EMC_STAT_DRAM_DEV0_REF_CNT_LO_0 0x1cc
+#define EMC_STAT_DRAM_DEV0_REF_CNT_HI_0 0x1d0
+#define EMC_STAT_DRAM_DEV0_CUMM_BANKS_ACTIVE_CKE_EQ1_LO_0 0x1d4
+#define EMC_STAT_DRAM_DEV0_CUMM_BANKS_ACTIVE_CKE_EQ1_HI_0 0x1d8
+#define EMC_STAT_DRAM_DEV0_CUMM_BANKS_ACTIVE_CKE_EQ0_LO_0 0x1dc
+#define EMC_STAT_DRAM_DEV0_CUMM_BANKS_ACTIVE_CKE_EQ0_HI_0 0x1e0
+#define EMC_STAT_DRAM_DEV0_CKE_EQ1_CLKS_LO_0 0x1e4
+#define EMC_STAT_DRAM_DEV0_CKE_EQ1_CLKS_HI_0 0x1e8
+#define EMC_STAT_DRAM_DEV0_EXTCLKS_CKE_EQ1_LO_0 0x1ec
+#define EMC_STAT_DRAM_DEV0_EXTCLKS_CKE_EQ1_HI_0 0x1f0
+#define EMC_STAT_DRAM_DEV0_EXTCLKS_CKE_EQ0_LO_0 0x1f4
+#define EMC_STAT_DRAM_DEV0_EXTCLKS_CKE_EQ0_HI_0 0x1f8
+#define EMC_STAT_DRAM_DEV1_ACTIVATE_CNT_LO_0 0x1fc
+#define EMC_STAT_DRAM_DEV1_ACTIVATE_CNT_HI_0 0x200
+#define EMC_STAT_DRAM_DEV1_READ_CNT_LO_0 0x204
+#define EMC_STAT_DRAM_DEV1_READ_CNT_HI_0 0x208
+#define EMC_STAT_DRAM_DEV1_WRITE_CNT_LO_0 0x20c
+#define EMC_STAT_DRAM_DEV1_WRITE_CNT_HI_0 0x210
+#define EMC_STAT_DRAM_DEV1_REF_CNT_LO_0 0x214
+#define EMC_STAT_DRAM_DEV1_REF_CNT_HI_0 0x218
+#define EMC_STAT_DRAM_DEV1_CUMM_BANKS_ACTIVE_CKE_EQ1_LO_0 0x21c
+#define EMC_STAT_DRAM_DEV1_CUMM_BANKS_ACTIVE_CKE_EQ1_HI_0 0x220
+#define EMC_STAT_DRAM_DEV1_CUMM_BANKS_ACTIVE_CKE_EQ0_LO_0 0x224
+#define EMC_STAT_DRAM_DEV1_CUMM_BANKS_ACTIVE_CKE_EQ0_HI_0 0x228
+#define EMC_STAT_DRAM_DEV1_CKE_EQ1_CLKS_LO_0 0x22c
+#define EMC_STAT_DRAM_DEV1_CKE_EQ1_CLKS_HI_0 0x230
+#define EMC_STAT_DRAM_DEV1_EXTCLKS_CKE_EQ1_LO_0 0x234
+#define EMC_STAT_DRAM_DEV1_EXTCLKS_CKE_EQ1_HI_0 0x238
+#define EMC_STAT_DRAM_DEV1_EXTCLKS_CKE_EQ0_LO_0 0x23c
+#define EMC_STAT_DRAM_DEV1_EXTCLKS_CKE_EQ0_HI_0 0x240
+#define EMC_STAT_DRAM_DEV0_NO_BANKS_ACTIVE_CKE_EQ1_LO_0 0x244
+#define EMC_STAT_DRAM_DEV0_NO_BANKS_ACTIVE_CKE_EQ1_HI_0 0x248
+#define EMC_STAT_DRAM_DEV0_NO_BANKS_ACTIVE_CKE_EQ0_LO_0 0x24c
+#define EMC_STAT_DRAM_DEV0_NO_BANKS_ACTIVE_CKE_EQ0_HI_0 0x250
+#define EMC_STAT_DRAM_DEV1_NO_BANKS_ACTIVE_CKE_EQ1_LO_0 0x254
+#define EMC_STAT_DRAM_DEV1_NO_BANKS_ACTIVE_CKE_EQ1_HI_0 0x258
+#define EMC_STAT_DRAM_DEV1_NO_BANKS_ACTIVE_CKE_EQ0_LO_0 0x25c
+#define EMC_STAT_DRAM_DEV1_NO_BANKS_ACTIVE_CKE_EQ0_HI_0 0x260
+
+#pragma pack(push)
+#pragma pack(1)
+
+typedef struct {
+ u32 signature;
+ u32 client_number;
+ u32 client_counts;
+ u32 total_counts;
+ u32 emc_clock_rate;
+} sample_data_t;
+
+#pragma pack(pop)
+
+typedef struct tegra_mc_counter {
+ bool enabled;
+ u32 period;
+ FILTER_MODE mode;
+ u32 address_low;
+ u32 address_length;
+ u32 current_client_index;
+ u32 total_clients;
+ u8 clients[MC_COUNTER_CLIENT_SIZE];
+ sample_data_t sample_data;
+} tegra_mc_counter_t;
+
+typedef struct tegra_emc_dram_counter {
+ bool enabled;
+ u8 device_mask;
+
+ sample_data_t sample_data;
+} tegra_emc_dram_counter_t;
+
+/* client ids of mc/emc */
+typedef enum {
+ MC_STAT_BEGIN = 0,
+ CBR_DISPLAY0A = 0,
+ CBR_DISPLAY0AB,
+ CBR_DISPLAY0B,
+ CBR_DISPLAY0BB,
+ CBR_DISPLAY0C,
+ CBR_DISPLAY0CB,
+ CBR_DISPLAY1B,
+ CBR_DISPLAY1BB,
+ CBR_EPPUP,
+ CBR_G2PR,
+ CBR_G2SR,
+ CBR_MPEUNIFBR,
+ CBR_VIRUV,
+ CSR_AVPCARM7R,
+ CSR_DISPLAYHC,
+ CSR_DISPLAYHCB,
+ CSR_FDCDRD,
+ CSR_G2DR,
+ CSR_HOST1XDMAR,
+ CSR_HOST1XR,
+ CSR_IDXSRD,
+ CSR_MPCORER,
+ CSR_MPE_IPRED,
+ CSR_MPEAMEMRD,
+ CSR_MPECSRD,
+ CSR_PPCSAHBDMAR,
+ CSR_PPCSAHBSLVR,
+ CSR_TEXSRD,
+ CSR_VDEBSEVR,
+ CSR_VDEMBER,
+ CSR_VDEMCER,
+ CSR_VDETPER,
+ CBW_EPPU,
+ CBW_EPPV,
+ CBW_EPPY,
+ CBW_MPEUNIFBW,
+ CBW_VIWSB,
+ CBW_VIWU,
+ CBW_VIWV,
+ CBW_VIWY,
+ CCW_G2DW,
+ CSW_AVPCARM7W,
+ CSW_FDCDWR,
+ CSW_HOST1XW,
+ CSW_ISPW,
+ CSW_MPCOREW,
+ CSW_MPECSWR,
+ CSW_PPCSAHBDMAW,
+ CSW_PPCSAHBSLVW,
+ CSW_VDEBSEVW,
+ CSW_VDEMBEW,
+ CSW_VDETPMW,
+ MC_STAT_END,
+ EMC_DRAM_STAT_BEGIN = 128,
+ ACTIVATE_CNT = 128,
+ READ_CNT,
+ WRITE_CNT,
+ REF_CNT,
+ CUMM_BANKS_ACTIVE_CKE_EQ1,
+ CUMM_BANKS_ACTIVE_CKE_EQ0,
+ CKE_EQ1_CLKS,
+ EXTCLKS_CKE_EQ1,
+ EXTCLKS_CKE_EQ0,
+ NO_BANKS_ACTIVE_CKE_EQ1,
+ NO_BANKS_ACTIVE_CKE_EQ0,
+ EMC_DRAM_STAT_END,
+ MC_STAT_AGGREGATE = 255,
+} device_id;
+
+#endif
diff --git a/arch/arm/mach-tegra/tegra2_speedo.c b/arch/arm/mach-tegra/tegra2_speedo.c
new file mode 100644
index 000000000000..1e5fa26a5c41
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_speedo.c
@@ -0,0 +1,140 @@
+/*
+ * arch/arm/mach-tegra/tegra2_speedo.c
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <mach/iomap.h>
+
+#include "fuse.h"
+
+#define CPU_SPEEDO_LSBIT 20
+#define CPU_SPEEDO_MSBIT 29
+#define CPU_SPEEDO_REDUND_LSBIT 30
+#define CPU_SPEEDO_REDUND_MSBIT 39
+#define CPU_SPEEDO_REDUND_OFFS (CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT)
+
+#define CORE_SPEEDO_LSBIT 40
+#define CORE_SPEEDO_MSBIT 47
+#define CORE_SPEEDO_REDUND_LSBIT 48
+#define CORE_SPEEDO_REDUND_MSBIT 55
+#define CORE_SPEEDO_REDUND_OFFS (CORE_SPEEDO_REDUND_MSBIT - CORE_SPEEDO_MSBIT)
+
+#define SPEEDO_MULT 4
+
+#define CHIP_ID 0x804
+#define CHIP_MINOR_SHIFT 16
+#define CHIP_MINOR_MASK (0xF << CHIP_MINOR_SHIFT)
+
+#define PROCESS_CORNERS_NUM 4
+
+#define SPEEDO_ID_SELECT_0(rev) ((rev) <= 2)
+#define SPEEDO_ID_SELECT_1(sku) \
+ (((sku) != 20) && ((sku) != 23) && ((sku) != 24) && \
+ ((sku) != 27) && ((sku) != 28))
+
+/* Maximum speedo levels for each CPU process corner */
+static const u32 cpu_process_speedos[][PROCESS_CORNERS_NUM] = {
+/* proc_id 0 1 2 3 */
+ {315, 366, 420, UINT_MAX}, /* speedo_id 0 */
+ {303, 368, 419, UINT_MAX}, /* speedo_id 1 */
+ {316, 331, 383, UINT_MAX}, /* speedo_id 2 */
+};
+
+/* Maximum speedo levels for each core process corner */
+static const u32 core_process_speedos[][PROCESS_CORNERS_NUM] = {
+/* proc_id 0 1 2 3 */
+ {165, 195, 224, UINT_MAX}, /* speedo_id 0 */
+ {165, 195, 224, UINT_MAX}, /* speedo_id 1 */
+ {165, 195, 224, UINT_MAX}, /* speedo_id 2 */
+};
+
+static int cpu_process_id;
+static int core_process_id;
+static int soc_speedo_id;
+
+void tegra_init_speedo_data(void)
+{
+ u32 reg, val;
+ int i, bit, rev;
+ int sku = tegra_sku_id();
+ void __iomem *apb_misc = IO_ADDRESS(TEGRA_APB_MISC_BASE);
+
+ reg = readl(apb_misc + CHIP_ID);
+ rev = (reg & CHIP_MINOR_MASK) >> CHIP_MINOR_SHIFT;
+ if (SPEEDO_ID_SELECT_0(rev))
+ soc_speedo_id = 0;
+ else if (SPEEDO_ID_SELECT_1(sku))
+ soc_speedo_id = 1;
+ else
+ soc_speedo_id = 2;
+ BUG_ON(soc_speedo_id >= ARRAY_SIZE(cpu_process_speedos));
+ BUG_ON(soc_speedo_id >= ARRAY_SIZE(core_process_speedos));
+
+ val = 0;
+ for (bit = CPU_SPEEDO_MSBIT; bit >= CPU_SPEEDO_LSBIT; bit--) {
+ reg = tegra_spare_fuse(bit) |
+ tegra_spare_fuse(bit + CPU_SPEEDO_REDUND_OFFS);
+ val = (val << 1) | (reg & 0x1);
+ }
+ val = val * SPEEDO_MULT;
+ pr_debug("%s CPU speedo level %u\n", __func__, val);
+
+ for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
+ if (val <= cpu_process_speedos[soc_speedo_id][i])
+ break;
+ }
+ cpu_process_id = i;
+
+ val = 0;
+ for (bit = CORE_SPEEDO_MSBIT; bit >= CORE_SPEEDO_LSBIT; bit--) {
+ reg = tegra_spare_fuse(bit) |
+ tegra_spare_fuse(bit + CORE_SPEEDO_REDUND_OFFS);
+ val = (val << 1) | (reg & 0x1);
+ }
+ val = val * SPEEDO_MULT;
+ pr_debug("%s Core speedo level %u\n", __func__, val);
+
+ for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
+ if (val <= core_process_speedos[soc_speedo_id][i])
+ break;
+ }
+ core_process_id = i;
+
+ pr_info("Tegra SKU: %d Rev: A%.2d CPU Process: %d Core Process: %d"
+ " Speedo ID: %d\n", sku, rev, cpu_process_id, core_process_id,
+ soc_speedo_id);
+}
+
+int tegra_cpu_process_id(void)
+{
+ return cpu_process_id;
+}
+
+int tegra_core_process_id(void)
+{
+ return core_process_id;
+}
+
+int tegra_soc_speedo_id(void)
+{
+ return soc_speedo_id;
+}
diff --git a/arch/arm/mach-tegra/tegra2_statmon.c b/arch/arm/mach-tegra/tegra2_statmon.c
new file mode 100644
index 000000000000..92f9b883f93d
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_statmon.c
@@ -0,0 +1,440 @@
+/*
+ * arch/arm/mach-tegra/tegra2_statmon.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/sysdev.h>
+#include <linux/bitops.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/io.h>
+#include <mach/clk.h>
+
+#include "clock.h"
+#include "tegra2_statmon.h"
+
+#define COP_MON_CTRL 0x120
+#define COP_MON_STATUS 0x124
+
+#define SAMPLE_PERIOD_SHIFT 20
+#define SAMPLE_PERIOD_MASK (0xFF << SAMPLE_PERIOD_SHIFT)
+#define INT_STATUS BIT(29) /* write 1 to clear */
+#define INT_ENABLE BIT(30)
+#define MON_ENABLE BIT(31)
+
+#define WINDOW_SIZE 128
+#define FREQ_MULT 1000
+#define UPPER_BAND 1000
+#define LOWER_BAND 1000
+#define BOOST_FRACTION_BITS 8
+
+struct sampler {
+ struct clk *clock;
+ unsigned long active_cycles[WINDOW_SIZE];
+ unsigned long total_active_cycles;
+ unsigned long avg_freq;
+ unsigned long *last_sample;
+ unsigned long idle_cycles;
+ unsigned long boost_freq;
+ unsigned long bumped_freq;
+ unsigned long *table;
+ int table_size;
+ u32 sample_count;
+ bool enable;
+ int sample_time;
+ int window_ms;
+ int min_samples;
+ unsigned long boost_step;
+ u8 boost_inc_coef;
+ u8 boost_dec_coef;
+};
+
+struct tegra2_stat_mon {
+ void __iomem *stat_mon_base;
+ void __iomem *vde_mon_base;
+ struct clk *stat_mon_clock;
+ struct mutex stat_mon_lock;
+ struct sampler avp_sampler;
+};
+
+static unsigned long sclk_table[] = {
+ 300000,
+ 240000,
+ 200000,
+ 150000,
+ 120000,
+ 100000,
+ 80000,
+ 75000,
+ 60000,
+ 50000,
+ 48000,
+ 40000
+};
+
+static struct tegra2_stat_mon *stat_mon;
+
+static inline u32 tegra2_stat_mon_read(u32 offset)
+{
+ return readl(stat_mon->stat_mon_base + offset);
+}
+
+static inline void tegra2_stat_mon_write(u32 value, u32 offset)
+{
+ writel(value, stat_mon->stat_mon_base + offset);
+}
+
+static inline u32 tegra2_vde_mon_read(u32 offset)
+{
+ return readl(stat_mon->vde_mon_base + offset);
+}
+
+static inline void tegra2_vde_mon_write(u32 value, u32 offset)
+{
+ writel(value, stat_mon->vde_mon_base + offset);
+}
+
+/* read the ticks in ISR and store */
+static irqreturn_t stat_mon_isr(int irq, void *data)
+{
+ u32 reg_val;
+
+ /* disable AVP monitor */
+ reg_val = tegra2_stat_mon_read(COP_MON_CTRL);
+ reg_val |= INT_STATUS;
+ tegra2_stat_mon_write(reg_val, COP_MON_CTRL);
+
+ stat_mon->avp_sampler.idle_cycles =
+ tegra2_stat_mon_read(COP_MON_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+
+static void add_active_sample(struct sampler *s, unsigned long cycles)
+{
+ if (s->last_sample == &s->active_cycles[WINDOW_SIZE - 1])
+ s->last_sample = &s->active_cycles[0];
+ else
+ s->last_sample++;
+
+ s->total_active_cycles -= *s->last_sample;
+ *s->last_sample = cycles;
+ s->total_active_cycles += *s->last_sample;
+}
+
+static unsigned long round_rate(struct sampler *s, unsigned long rate)
+{
+ int i;
+ unsigned long *table = s->table;
+
+ if (rate >= table[0])
+ return table[0];
+
+ for (i = 1; i < s->table_size; i++) {
+ if (rate <= table[i])
+ continue;
+ else {
+ return table[i-1];
+ break;
+ }
+ }
+ if (rate <= table[s->table_size - 1])
+ return table[s->table_size - 1];
+ return rate;
+}
+
+static void set_target_freq(struct sampler *s)
+{
+ unsigned long clock_rate;
+ unsigned long target_freq;
+ unsigned long active_count;
+
+ clock_rate = clk_get_rate(s->clock) / FREQ_MULT;
+ active_count = (s->sample_time + 1) * clock_rate;
+ active_count = (active_count > s->idle_cycles) ?
+ (active_count - s->idle_cycles) : (0);
+
+ s->sample_count++;
+
+ add_active_sample(s, active_count);
+
+ s->avg_freq = s->total_active_cycles / s->window_ms;
+
+ if ((s->idle_cycles >= (1 + (active_count >> 3))) &&
+ (s->bumped_freq >= s->avg_freq)) {
+ s->boost_freq = (s->boost_freq *
+ ((0x1 << BOOST_FRACTION_BITS) - s->boost_dec_coef))
+ >> BOOST_FRACTION_BITS;
+ if (s->boost_freq < s->boost_step)
+ s->boost_freq = 0;
+ } else if (s->sample_count < s->min_samples) {
+ s->sample_count++;
+ } else {
+ s->boost_freq = ((s->boost_freq *
+ ((0x1 << BOOST_FRACTION_BITS) + s->boost_inc_coef))
+ >> BOOST_FRACTION_BITS) + s->boost_step;
+ if (s->boost_freq > s->clock->max_rate)
+ s->boost_freq = s->clock->max_rate;
+ }
+
+ if ((s->avg_freq + LOWER_BAND) < s->bumped_freq)
+ s->bumped_freq = s->avg_freq + LOWER_BAND;
+ else if (s->avg_freq > (s->bumped_freq + UPPER_BAND))
+ s->bumped_freq = s->avg_freq - UPPER_BAND;
+
+ s->bumped_freq += (s->bumped_freq >> 3);
+
+ target_freq = max(s->bumped_freq, s->clock->min_rate);
+ target_freq += s->boost_freq;
+
+ active_count = target_freq;
+ target_freq = round_rate(s, target_freq) * FREQ_MULT;
+ clk_set_rate(s->clock, target_freq);
+}
+
+/* - process ticks in thread context
+ */
+static irqreturn_t stat_mon_isr_thread_fn(int irq, void *data)
+{
+ u32 reg_val = 0;
+
+ mutex_lock(&stat_mon->stat_mon_lock);
+ set_target_freq(&stat_mon->avp_sampler);
+ mutex_unlock(&stat_mon->stat_mon_lock);
+
+ /* start AVP sampler */
+ reg_val = tegra2_stat_mon_read(COP_MON_CTRL);
+ reg_val |= MON_ENABLE;
+ tegra2_stat_mon_write(reg_val, COP_MON_CTRL);
+ return IRQ_HANDLED;
+}
+
+void tegra2_statmon_stop(void)
+{
+ u32 reg_val = 0;
+
+ /* disable AVP monitor */
+ reg_val |= INT_STATUS;
+ tegra2_stat_mon_write(reg_val, COP_MON_CTRL);
+
+ clk_disable(stat_mon->stat_mon_clock);
+ clk_disable(stat_mon->avp_sampler.clock);
+}
+
+int tegra2_statmon_start(void)
+{
+ u32 reg_val = 0;
+
+ clk_enable(stat_mon->avp_sampler.clock);
+ clk_enable(stat_mon->stat_mon_clock);
+
+ /* disable AVP monitor */
+ reg_val |= INT_STATUS;
+ tegra2_stat_mon_write(reg_val, COP_MON_CTRL);
+
+ /* start AVP sampler. also enable INT to CPU */
+ reg_val = 0;
+ reg_val |= MON_ENABLE;
+ reg_val |= INT_ENABLE;
+ reg_val |= ((stat_mon->avp_sampler.sample_time \
+ << SAMPLE_PERIOD_SHIFT) & SAMPLE_PERIOD_MASK);
+ tegra2_stat_mon_write(reg_val, COP_MON_CTRL);
+ return 0;
+}
+
+static ssize_t tegra2_statmon_enable_show(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", stat_mon->avp_sampler.enable);
+}
+
+static ssize_t tegra2_statmon_enable_store(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, const char *buf, size_t count)
+{
+ int value;
+
+ mutex_lock(&stat_mon->stat_mon_lock);
+ sscanf(buf, "%d", &value);
+
+ if (value == 0 || value == 1)
+ stat_mon->avp_sampler.enable = value;
+ else {
+ mutex_unlock(&stat_mon->stat_mon_lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&stat_mon->stat_mon_lock);
+
+ if (stat_mon->avp_sampler.enable)
+ tegra2_statmon_start();
+ else
+ tegra2_statmon_stop();
+
+ return 0;
+}
+
+static ssize_t tegra2_statmon_sample_time_show(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", stat_mon->avp_sampler.sample_time);
+}
+
+static ssize_t tegra2_statmon_sample_time_store(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+
+ mutex_lock(&stat_mon->stat_mon_lock);
+ sscanf(buf, "%d", &value);
+ stat_mon->avp_sampler.sample_time = value;
+ mutex_unlock(&stat_mon->stat_mon_lock);
+
+ return count;
+}
+
+static struct sysdev_class tegra2_statmon_sysclass = {
+ .name = "tegra2_statmon",
+};
+
+#define TEGRA2_STATMON_ATTRIBUTE_EXPAND(_attr, _mode) \
+ static SYSDEV_CLASS_ATTR(_attr, _mode, \
+ tegra2_statmon_##_attr##_show, tegra2_statmon_##_attr##_store)
+
+TEGRA2_STATMON_ATTRIBUTE_EXPAND(enable, 0666);
+TEGRA2_STATMON_ATTRIBUTE_EXPAND(sample_time, 0666);
+
+#define TEGRA2_STATMON_ATTRIBUTE(_name) (&attr_##_name)
+
+static struct sysdev_class_attribute *tegra2_statmon_attrs[] = {
+ TEGRA2_STATMON_ATTRIBUTE(enable),
+ TEGRA2_STATMON_ATTRIBUTE(sample_time),
+ NULL,
+};
+
+static int sampler_init(struct sampler *s)
+{
+ int i;
+ struct clk *clock;
+ unsigned long clock_rate;
+ unsigned long active_count;
+
+ s->enable = false;
+ s->sample_time = 9;
+
+ clock = tegra_get_clock_by_name("mon.sclk");
+ if (IS_ERR(clock)) {
+ pr_err("%s: Couldn't get mon.sckl\n", __func__);
+ return -1;
+ }
+
+ if (clk_set_rate(clock, clock->min_rate)) {
+ pr_err("%s: Failed to set rate\n", __func__);
+ return -1;
+ }
+ clock_rate = clk_get_rate(clock) / FREQ_MULT;
+ active_count = clock_rate * (s->sample_time + 1);
+
+ for (i = 0; i < WINDOW_SIZE; i++)
+ s->active_cycles[i] = active_count;
+
+ s->clock = clock;
+ s->last_sample = &s->active_cycles[0];
+ s->total_active_cycles = active_count << 7;
+ s->window_ms = (s->sample_time + 1) << 7;
+ s->avg_freq = s->total_active_cycles / s->window_ms;
+ s->bumped_freq = s->avg_freq;
+ s->boost_freq = 0;
+
+ return 0;
+}
+
+static int tegra2_stat_mon_init(void)
+{
+ int rc, i;
+ int ret_val = 0;
+
+ stat_mon = kzalloc(sizeof(struct tegra2_stat_mon), GFP_KERNEL);
+ if (stat_mon == NULL) {
+ pr_err("%s: unable to alloc data struct.\n", __func__);
+ return -ENOMEM;
+ }
+
+ stat_mon->stat_mon_base = IO_ADDRESS(TEGRA_STATMON_BASE);
+ stat_mon->vde_mon_base = IO_ADDRESS(TEGRA_VDE_BASE);
+
+ stat_mon->stat_mon_clock = tegra_get_clock_by_name("stat_mon");
+ if (stat_mon->stat_mon_clock == NULL) {
+ pr_err("Failed to get stat mon clock");
+ return -1;
+ }
+
+ if (sampler_init(&stat_mon->avp_sampler))
+ return -1;
+
+ stat_mon->avp_sampler.table = sclk_table;
+ stat_mon->avp_sampler.table_size = ARRAY_SIZE(sclk_table);
+ stat_mon->avp_sampler.boost_step = 1000;
+ stat_mon->avp_sampler.boost_inc_coef = 255;
+ stat_mon->avp_sampler.boost_dec_coef = 128;
+ stat_mon->avp_sampler.min_samples = 3;
+
+ mutex_init(&stat_mon->stat_mon_lock);
+
+ /* /sys/devices/system/tegra2_statmon */
+ rc = sysdev_class_register(&tegra2_statmon_sysclass);
+ if (rc) {
+ pr_err("%s : Couldn't create statmon sysfs entry\n", __func__);
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra2_statmon_attrs) - 1; i++) {
+ rc = sysdev_class_create_file(&tegra2_statmon_sysclass,
+ tegra2_statmon_attrs[i]);
+ if (rc) {
+ pr_err("%s: Failed to create sys class\n", __func__);
+ sysdev_class_unregister(&tegra2_statmon_sysclass);
+ kfree(stat_mon);
+ return 0;
+ }
+ }
+
+ ret_val = request_threaded_irq(INT_SYS_STATS_MON, stat_mon_isr,
+ stat_mon_isr_thread_fn, 0, "stat_mon_int", NULL);
+ if (ret_val) {
+ pr_err("%s: cannot register INT_SYS_STATS_MON handler, \
+ ret_val = 0x%x\n", __func__, ret_val);
+ tegra2_statmon_stop();
+ stat_mon->avp_sampler.enable = false;
+ kfree(stat_mon);
+ return ret_val;
+ }
+
+ return 0;
+}
+
+late_initcall(tegra2_stat_mon_init);
diff --git a/arch/arm/mach-tegra/tegra2_statmon.h b/arch/arm/mach-tegra/tegra2_statmon.h
new file mode 100644
index 000000000000..ae1094eb1c33
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_statmon.h
@@ -0,0 +1,33 @@
+/*
+ * arch/arm/mach-tegra/tegra2_statmon.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifdef CONFIG_TEGRA_STAT_MON
+int tegra2_statmon_start(void);
+void tegra2_statmon_stop(void);
+#else
+static inline int tegra2_statmon_start(void)
+{
+ return 0;
+}
+
+static inline void tegra2_statmon_stop(void)
+{
+}
+#endif
diff --git a/arch/arm/mach-tegra/tegra2_throttle.c b/arch/arm/mach-tegra/tegra2_throttle.c
new file mode 100644
index 000000000000..6114b20c6f5c
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra2_throttle.c
@@ -0,0 +1,180 @@
+/*
+ * arch/arm/mach-tegra/tegra2_throttle.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+
+#include "clock.h"
+#include "cpu-tegra.h"
+
+/* tegra throttling require frequencies in the table to be in ascending order */
+static struct cpufreq_frequency_table *throttle_table;
+static struct mutex *cpu_throttle_lock;
+
+/* CPU frequency is gradually lowered when throttling is enabled */
+#define THROTTLE_DELAY msecs_to_jiffies(2000)
+
+static int is_throttling;
+static int throttle_lowest_index;
+static int throttle_highest_index;
+static int throttle_index;
+static int throttle_next_index;
+static struct delayed_work throttle_work;
+static struct workqueue_struct *workqueue;
+static DEFINE_MUTEX(tegra_throttle_lock);
+
+static void tegra_throttle_work_func(struct work_struct *work)
+{
+ unsigned int current_freq;
+
+ mutex_lock(cpu_throttle_lock);
+ if (!is_throttling)
+ goto out;
+
+ current_freq = tegra_getspeed(0);
+ throttle_index = throttle_next_index;
+
+ if (throttle_table[throttle_index].frequency < current_freq)
+ tegra_cpu_set_speed_cap(NULL);
+
+ if (throttle_index > throttle_lowest_index) {
+ throttle_next_index = throttle_index - 1;
+ queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY);
+ }
+out:
+ mutex_unlock(cpu_throttle_lock);
+}
+
+/*
+ * tegra_throttling_enable
+ * This function may sleep
+ */
+void tegra_throttling_enable(bool enable)
+{
+ mutex_lock(&tegra_throttle_lock);
+ mutex_lock(cpu_throttle_lock);
+
+ if (enable && !(is_throttling++)) {
+ unsigned int current_freq = tegra_getspeed(0);
+
+ for (throttle_index = throttle_highest_index;
+ throttle_index >= throttle_lowest_index;
+ throttle_index--)
+ if (throttle_table[throttle_index].frequency
+ < current_freq)
+ break;
+
+ throttle_index = max(throttle_index, throttle_lowest_index);
+ throttle_next_index = throttle_index;
+ queue_delayed_work(workqueue, &throttle_work, 0);
+ } else if (!enable && is_throttling) {
+ if (!(--is_throttling)) {
+ /* restore speed requested by governor */
+ tegra_cpu_set_speed_cap(NULL);
+
+ mutex_unlock(cpu_throttle_lock);
+ cancel_delayed_work_sync(&throttle_work);
+ mutex_unlock(&tegra_throttle_lock);
+ return;
+ }
+ }
+ mutex_unlock(cpu_throttle_lock);
+ mutex_unlock(&tegra_throttle_lock);
+}
+EXPORT_SYMBOL_GPL(tegra_throttling_enable);
+
+unsigned int tegra_throttle_governor_speed(unsigned int requested_speed)
+{
+ return is_throttling ?
+ min(requested_speed, throttle_table[throttle_index].frequency) :
+ requested_speed;
+}
+
+bool tegra_is_throttling(void)
+{
+ return is_throttling;
+}
+
+int __init tegra_throttle_init(struct mutex *cpu_lock)
+{
+ struct tegra_cpufreq_table_data *table_data =
+ tegra_cpufreq_table_get();
+ if (IS_ERR_OR_NULL(table_data))
+ return -EINVAL;
+
+ /*
+ * High-priority, others flags default: not bound to a specific
+ * CPU, has rescue worker task (in case of allocation deadlock,
+ * etc.). Single-threaded.
+ */
+ workqueue = alloc_workqueue("cpu-tegra",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
+ if (!workqueue)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
+
+ throttle_lowest_index = table_data->throttle_lowest_index;
+ throttle_highest_index = table_data->throttle_highest_index;
+ throttle_table = table_data->freq_table;
+ cpu_throttle_lock = cpu_lock;
+
+ return 0;
+}
+
+void tegra_throttle_exit(void)
+{
+ destroy_workqueue(workqueue);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int throttle_debug_set(void *data, u64 val)
+{
+ tegra_throttling_enable(val);
+ return 0;
+}
+static int throttle_debug_get(void *data, u64 *val)
+{
+ *val = (u64) is_throttling;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set,
+ "%llu\n");
+
+int __init tegra_throttle_debug_init(struct dentry *cpu_tegra_debugfs_root)
+{
+ if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root,
+ NULL, &throttle_fops))
+ return -ENOMEM;
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/arch/arm/mach-tegra/tegra3_actmon.c b/arch/arm/mach-tegra/tegra3_actmon.c
new file mode 100644
index 000000000000..05cdc1f86465
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_actmon.c
@@ -0,0 +1,848 @@
+/*
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/clk.h>
+
+#include "clock.h"
+
+#define ACTMON_GLB_STATUS 0x00
+#define ACTMON_GLB_PERIOD_CTRL 0x04
+
+#define ACTMON_DEV_CTRL 0x00
+#define ACTMON_DEV_CTRL_ENB (0x1 << 31)
+#define ACTMON_DEV_CTRL_UP_WMARK_ENB (0x1 << 30)
+#define ACTMON_DEV_CTRL_DOWN_WMARK_ENB (0x1 << 29)
+#define ACTMON_DEV_CTRL_UP_WMARK_NUM_SHIFT 26
+#define ACTMON_DEV_CTRL_UP_WMARK_NUM_MASK (0x7 << 26)
+#define ACTMON_DEV_CTRL_DOWN_WMARK_NUM_SHIFT 23
+#define ACTMON_DEV_CTRL_DOWN_WMARK_NUM_MASK (0x7 << 23)
+#define ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB (0x1 << 21)
+#define ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB (0x1 << 20)
+#define ACTMON_DEV_CTRL_PERIODIC_ENB (0x1 << 18)
+#define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
+#define ACTMON_DEV_CTRL_K_VAL_MASK (0x7 << 10)
+
+#define ACTMON_DEV_UP_WMARK 0x04
+#define ACTMON_DEV_DOWN_WMARK 0x08
+#define ACTMON_DEV_INIT_AVG 0x0c
+#define ACTMON_DEV_AVG_UP_WMARK 0x10
+#define ACTMON_DEV_AVG_DOWN_WMARK 0x14
+
+#define ACTMON_DEV_COUNT_WEGHT 0x18
+#define ACTMON_DEV_COUNT 0x1c
+#define ACTMON_DEV_AVG_COUNT 0x20
+
+#define ACTMON_DEV_INTR_STATUS 0x24
+#define ACTMON_DEV_INTR_UP_WMARK (0x1 << 31)
+#define ACTMON_DEV_INTR_DOWN_WMARK (0x1 << 30)
+#define ACTMON_DEV_INTR_AVG_DOWN_WMARK (0x1 << 25)
+#define ACTMON_DEV_INTR_AVG_UP_WMARK (0x1 << 24)
+
+#define ACTMON_DEFAULT_AVG_WINDOW_LOG2 6
+#define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
+
+enum actmon_type {
+ ACTMON_LOAD_SAMPLER,
+ ACTMON_FREQ_SAMPLER,
+};
+
+enum actmon_state {
+ ACTMON_UNINITIALIZED = -1,
+ ACTMON_OFF = 0,
+ ACTMON_ON = 1,
+ ACTMON_SUSPENDED = 2,
+};
+
+#define ACTMON_DEFAULT_SAMPLING_PERIOD 12
+static u8 actmon_sampling_period;
+
+static unsigned long actmon_clk_freq;
+
+
+/* Units:
+ * - frequency in kHz
+ * - coefficients, and thresholds in %
+ * - sampling period in ms
+ * - window in sample periods (value = setting + 1)
+ */
+struct actmon_dev {
+ u32 reg;
+ u32 glb_status_irq_mask;
+ const char *dev_id;
+ const char *con_id;
+ struct clk *clk;
+
+ unsigned long max_freq;
+ unsigned long target_freq;
+ unsigned long cur_freq;
+
+ unsigned long avg_actv_freq;
+ unsigned long avg_band_freq;
+ unsigned int avg_sustain_coef;
+ u32 avg_count;
+
+ unsigned long boost_freq;
+ unsigned long boost_freq_step;
+ unsigned int boost_up_coef;
+ unsigned int boost_down_coef;
+ unsigned int boost_up_threshold;
+ unsigned int boost_down_threshold;
+
+ u8 up_wmark_window;
+ u8 down_wmark_window;
+ u8 avg_window_log2;
+ u32 count_weight;
+
+ enum actmon_type type;
+ enum actmon_state state;
+ enum actmon_state saved_state;
+
+ spinlock_t lock;
+
+ struct notifier_block rate_change_nb;
+};
+
+static void __iomem *actmon_base = IO_ADDRESS(TEGRA_ACTMON_BASE);
+
+static inline u32 actmon_readl(u32 offset)
+{
+ return __raw_readl((u32)actmon_base + offset);
+}
+static inline void actmon_writel(u32 val, u32 offset)
+{
+ __raw_writel(val, (u32)actmon_base + offset);
+}
+static inline void actmon_wmb(void)
+{
+ wmb();
+ actmon_readl(ACTMON_GLB_STATUS);
+}
+
+#define offs(x) (dev->reg + x)
+
+static inline unsigned long do_percent(unsigned long val, unsigned int pct)
+{
+ return val * pct / 100;
+}
+
+static inline void actmon_dev_up_wmark_set(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
+ dev->cur_freq : actmon_clk_freq;
+
+ val = freq * actmon_sampling_period;
+ actmon_writel(do_percent(val, dev->boost_up_threshold),
+ offs(ACTMON_DEV_UP_WMARK));
+}
+
+static inline void actmon_dev_down_wmark_set(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
+ dev->cur_freq : actmon_clk_freq;
+
+ val = freq * actmon_sampling_period;
+ actmon_writel(do_percent(val, dev->boost_down_threshold),
+ offs(ACTMON_DEV_DOWN_WMARK));
+}
+
+static inline void actmon_dev_wmark_set(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
+ dev->cur_freq : actmon_clk_freq;
+
+ val = freq * actmon_sampling_period;
+ actmon_writel(do_percent(val, dev->boost_up_threshold),
+ offs(ACTMON_DEV_UP_WMARK));
+ actmon_writel(do_percent(val, dev->boost_down_threshold),
+ offs(ACTMON_DEV_DOWN_WMARK));
+}
+
+static inline void actmon_dev_avg_wmark_set(struct actmon_dev *dev)
+{
+ u32 avg = dev->avg_count;
+ u32 band = dev->avg_band_freq * actmon_sampling_period;
+
+ actmon_writel(avg + band, offs(ACTMON_DEV_AVG_UP_WMARK));
+ avg = max(avg, band);
+ actmon_writel(avg - band, offs(ACTMON_DEV_AVG_DOWN_WMARK));
+}
+
+static unsigned long actmon_dev_avg_freq_get(struct actmon_dev *dev)
+{
+ u64 val;
+
+ if (dev->type == ACTMON_FREQ_SAMPLER)
+ return dev->avg_count / actmon_sampling_period;
+
+ val = (u64)dev->avg_count * dev->cur_freq;
+ do_div(val, actmon_clk_freq * actmon_sampling_period);
+ return (u32)val;
+}
+
+/* Activity monitor sampling operations */
+irqreturn_t actmon_dev_isr(int irq, void *dev_id)
+{
+ u32 val;
+ unsigned long flags;
+ struct actmon_dev *dev = (struct actmon_dev *)dev_id;
+
+ val = actmon_readl(ACTMON_GLB_STATUS) & dev->glb_status_irq_mask;
+ if (!val)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ dev->avg_count = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
+ actmon_dev_avg_wmark_set(dev);
+
+ val = actmon_readl(offs(ACTMON_DEV_INTR_STATUS));
+ if (val & ACTMON_DEV_INTR_UP_WMARK) {
+ val = actmon_readl(offs(ACTMON_DEV_CTRL)) |
+ ACTMON_DEV_CTRL_UP_WMARK_ENB |
+ ACTMON_DEV_CTRL_DOWN_WMARK_ENB;
+
+ dev->boost_freq = dev->boost_freq_step +
+ do_percent(dev->boost_freq, dev->boost_up_coef);
+ if (dev->boost_freq >= dev->max_freq) {
+ dev->boost_freq = dev->max_freq;
+ val &= ~ACTMON_DEV_CTRL_UP_WMARK_ENB;
+ }
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ } else if (val & ACTMON_DEV_INTR_DOWN_WMARK) {
+ val = actmon_readl(offs(ACTMON_DEV_CTRL)) |
+ ACTMON_DEV_CTRL_UP_WMARK_ENB |
+ ACTMON_DEV_CTRL_DOWN_WMARK_ENB;
+
+ dev->boost_freq =
+ do_percent(dev->boost_freq, dev->boost_down_coef);
+ if (dev->boost_freq < (dev->boost_freq_step >> 1)) {
+ dev->boost_freq = 0;
+ val &= ~ACTMON_DEV_CTRL_DOWN_WMARK_ENB;
+ }
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ }
+
+ actmon_writel(0xffffffff, offs(ACTMON_DEV_INTR_STATUS)); /* clr all */
+ actmon_wmb();
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t actmon_dev_fn(int irq, void *dev_id)
+{
+ unsigned long flags, freq;
+ struct actmon_dev *dev = (struct actmon_dev *)dev_id;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->state != ACTMON_ON) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ freq = actmon_dev_avg_freq_get(dev);
+ dev->avg_actv_freq = freq;
+ freq = do_percent(freq, dev->avg_sustain_coef);
+ freq += dev->boost_freq;
+ dev->target_freq = freq;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ pr_debug("%s.%s(kHz): avg: %lu, target: %lu current: %lu\n",
+ dev->dev_id, dev->con_id, dev->avg_actv_freq,
+ dev->target_freq, dev->cur_freq);
+ clk_set_rate(dev->clk, freq * 1000);
+
+ return IRQ_HANDLED;
+}
+
+static int actmon_rate_notify_cb(
+ struct notifier_block *nb, unsigned long rate, void *v)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = container_of(
+ nb, struct actmon_dev, rate_change_nb);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ dev->cur_freq = rate / 1000;
+ if (dev->type == ACTMON_FREQ_SAMPLER) {
+ actmon_dev_wmark_set(dev);
+ actmon_wmb();
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return NOTIFY_OK;
+};
+
+/* Activity monitor configuration and control */
+static void actmon_dev_configure(struct actmon_dev *dev, unsigned long freq)
+{
+ u32 val;
+
+ dev->cur_freq = freq;
+ dev->target_freq = freq;
+ dev->avg_actv_freq = freq;
+
+ if (dev->type == ACTMON_FREQ_SAMPLER) {
+ dev->avg_count = dev->cur_freq * actmon_sampling_period;
+ dev->avg_band_freq = dev->max_freq *
+ ACTMON_DEFAULT_AVG_BAND / 1000;
+ } else {
+ dev->avg_count = actmon_clk_freq * actmon_sampling_period;
+ dev->avg_band_freq = actmon_clk_freq *
+ ACTMON_DEFAULT_AVG_BAND / 1000;
+ }
+ actmon_writel(dev->avg_count, offs(ACTMON_DEV_INIT_AVG));
+
+ BUG_ON(!dev->boost_up_threshold);
+ dev->avg_sustain_coef = 100 * 100 / dev->boost_up_threshold;
+ actmon_dev_avg_wmark_set(dev);
+ actmon_dev_wmark_set(dev);
+
+ actmon_writel(dev->count_weight, offs(ACTMON_DEV_COUNT_WEGHT));
+ actmon_writel(0xffffffff, offs(ACTMON_DEV_INTR_STATUS)); /* clr all */
+
+ val = ACTMON_DEV_CTRL_PERIODIC_ENB | ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
+ ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB;
+ val |= ((dev->avg_window_log2 - 1) << ACTMON_DEV_CTRL_K_VAL_SHIFT) &
+ ACTMON_DEV_CTRL_K_VAL_MASK;
+ val |= ((dev->down_wmark_window - 1) <<
+ ACTMON_DEV_CTRL_DOWN_WMARK_NUM_SHIFT) &
+ ACTMON_DEV_CTRL_DOWN_WMARK_NUM_MASK;
+ val |= ((dev->up_wmark_window - 1) <<
+ ACTMON_DEV_CTRL_UP_WMARK_NUM_SHIFT) &
+ ACTMON_DEV_CTRL_UP_WMARK_NUM_MASK;
+ val |= ACTMON_DEV_CTRL_DOWN_WMARK_ENB | ACTMON_DEV_CTRL_UP_WMARK_ENB;
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ actmon_wmb();
+}
+
+static void actmon_dev_enable(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->state == ACTMON_OFF) {
+ dev->state = ACTMON_ON;
+
+ val = actmon_readl(offs(ACTMON_DEV_CTRL));
+ val |= ACTMON_DEV_CTRL_ENB;
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ actmon_wmb();
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void actmon_dev_disable(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->state == ACTMON_ON) {
+ dev->state = ACTMON_OFF;
+
+ val = actmon_readl(offs(ACTMON_DEV_CTRL));
+ val &= ~ACTMON_DEV_CTRL_ENB;
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ actmon_writel(0xffffffff, offs(ACTMON_DEV_INTR_STATUS));
+ actmon_wmb();
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void actmon_dev_suspend(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if ((dev->state == ACTMON_ON) || (dev->state == ACTMON_OFF)){
+ dev->saved_state = dev->state;
+ dev->state = ACTMON_SUSPENDED;
+
+ val = actmon_readl(offs(ACTMON_DEV_CTRL));
+ val &= ~ACTMON_DEV_CTRL_ENB;
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ actmon_writel(0xffffffff, offs(ACTMON_DEV_INTR_STATUS));
+ actmon_wmb();
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void actmon_dev_resume(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long flags;
+ unsigned long freq = clk_get_rate(dev->clk) / 1000;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->state == ACTMON_SUSPENDED) {
+ actmon_dev_configure(dev, freq);
+ dev->state = dev->saved_state;
+ if (dev->state == ACTMON_ON) {
+ val = actmon_readl(offs(ACTMON_DEV_CTRL));
+ val |= ACTMON_DEV_CTRL_ENB;
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ actmon_wmb();
+ }
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static int __init actmon_dev_init(struct actmon_dev *dev)
+{
+ int ret;
+ struct clk *p;
+ unsigned long freq;
+
+ spin_lock_init(&dev->lock);
+
+ dev->clk = clk_get_sys(dev->dev_id, dev->con_id);
+ if (IS_ERR(dev->clk)) {
+ pr_err("Failed to find %s.%s clock\n",
+ dev->dev_id, dev->con_id);
+ return -ENODEV;
+ }
+ dev->max_freq = clk_round_rate(dev->clk, ULONG_MAX);
+ clk_set_rate(dev->clk, dev->max_freq);
+ dev->max_freq /= 1000;
+ freq = clk_get_rate(dev->clk) / 1000;
+ actmon_dev_configure(dev, freq);
+
+ /* actmon device controls shared bus user clock, but rate
+ change notification should come from bus clock itself */
+ p = clk_get_parent(dev->clk);
+ BUG_ON(!p);
+
+ if (dev->rate_change_nb.notifier_call) {
+ ret = tegra_register_clk_rate_notifier(p, &dev->rate_change_nb);
+ if (ret) {
+ pr_err("Failed to register %s rate change notifier"
+ " for %s\n", p->name, dev->dev_id);
+ return ret;
+ }
+ }
+
+ ret = request_threaded_irq(INT_ACTMON, actmon_dev_isr, actmon_dev_fn,
+ IRQF_SHARED, dev->dev_id, dev);
+ if (ret) {
+ pr_err("Failed irq %d request for %s.%s\n",
+ INT_ACTMON, dev->dev_id, dev->con_id);
+ tegra_unregister_clk_rate_notifier(p, &dev->rate_change_nb);
+ return ret;
+ }
+
+ dev->state = ACTMON_OFF;
+ actmon_dev_enable(dev);
+ clk_enable(dev->clk);
+ return 0;
+}
+
+/* EMC activity monitor: frequency sampling device:
+ * activity counter is incremented every 256 memory transactions, and
+ * each transaction takes 2 EMC clocks; count_weight = 512.
+ */
+static struct actmon_dev actmon_dev_emc = {
+ .reg = 0x1c0,
+ .glb_status_irq_mask = (0x1 << 26),
+ .dev_id = "tegra_actmon",
+ .con_id = "emc",
+
+ .boost_freq_step = 16000,
+ .boost_up_coef = 200,
+ .boost_down_coef = 50,
+ .boost_up_threshold = 60,
+ .boost_down_threshold = 40,
+
+ .up_wmark_window = 1,
+ .down_wmark_window = 3,
+ .avg_window_log2 = ACTMON_DEFAULT_AVG_WINDOW_LOG2,
+ .count_weight = 0x200,
+
+ .type = ACTMON_FREQ_SAMPLER,
+ .state = ACTMON_UNINITIALIZED,
+
+ .rate_change_nb = {
+ .notifier_call = actmon_rate_notify_cb,
+ },
+};
+
+/* AVP activity monitor: load sampling device:
+ * activity counter is incremented on every actmon clock pulse while
+ * AVP is not halted by flow controller; count_weight = 1.
+ */
+static struct actmon_dev actmon_dev_avp = {
+ .reg = 0x0c0,
+ .glb_status_irq_mask = (0x1 << 30),
+ .dev_id = "tegra_actmon",
+ .con_id = "avp",
+
+ .boost_freq_step = 8000,
+ .boost_up_coef = 200,
+ .boost_down_coef = 50,
+ .boost_up_threshold = 75,
+ .boost_down_threshold = 50,
+
+ .up_wmark_window = 1,
+ .down_wmark_window = 3,
+ .avg_window_log2 = ACTMON_DEFAULT_AVG_WINDOW_LOG2,
+ .count_weight = 0x1,
+
+ .type = ACTMON_LOAD_SAMPLER,
+ .state = ACTMON_UNINITIALIZED,
+
+ .rate_change_nb = {
+ .notifier_call = actmon_rate_notify_cb,
+ },
+};
+
+static struct actmon_dev *actmon_devices[] = {
+ &actmon_dev_emc,
+ &actmon_dev_avp,
+};
+
+/* Activity monitor suspend/resume */
+static int actmon_pm_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ int i;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++)
+ actmon_dev_suspend(actmon_devices[i]);
+ break;
+ case PM_POST_SUSPEND:
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++)
+ actmon_dev_resume(actmon_devices[i]);
+ break;
+ }
+
+ return NOTIFY_OK;
+};
+
+static struct notifier_block actmon_pm_nb = {
+ .notifier_call = actmon_pm_notify,
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define RW_MODE (S_IWUSR | S_IRUGO)
+#define RO_MODE S_IRUGO
+
+static struct dentry *clk_debugfs_root;
+
+static int type_show(struct seq_file *s, void *data)
+{
+ struct actmon_dev *dev = s->private;
+
+ seq_printf(s, "%s\n", (dev->type == ACTMON_LOAD_SAMPLER) ?
+ "Load Activity Monitor" : "Frequency Activity Monitor");
+ return 0;
+}
+static int type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, type_show, inode->i_private);
+}
+static const struct file_operations type_fops = {
+ .open = type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int actv_get(void *data, u64 *val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ *val = actmon_dev_avg_freq_get(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(actv_fops, actv_get, NULL, "%llu\n");
+
+static int step_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->boost_freq_step * 100 / dev->max_freq;
+ return 0;
+}
+static int step_set(void *data, u64 val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+
+ if (val > 100)
+ val = 100;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->boost_freq_step = do_percent(dev->max_freq, (unsigned int)val);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(step_fops, step_get, step_set, "%llu\n");
+
+static int up_threshold_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->boost_up_threshold;
+ return 0;
+}
+static int up_threshold_set(void *data, u64 val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+ unsigned int up_threshold = (unsigned int)val;
+
+ if (up_threshold > 100)
+ up_threshold = 100;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (up_threshold <= dev->boost_down_threshold)
+ up_threshold = dev->boost_down_threshold;
+ if (up_threshold)
+ dev->avg_sustain_coef = 100 * 100 / up_threshold;
+ dev->boost_up_threshold = up_threshold;
+
+ actmon_dev_up_wmark_set(dev);
+ actmon_wmb();
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(up_threshold_fops, up_threshold_get,
+ up_threshold_set, "%llu\n");
+
+static int down_threshold_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->boost_down_threshold;
+ return 0;
+}
+static int down_threshold_set(void *data, u64 val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+ unsigned int down_threshold = (unsigned int)val;
+
+ if (down_threshold < 0)
+ down_threshold = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (down_threshold >= dev->boost_up_threshold)
+ down_threshold = dev->boost_up_threshold;
+ dev->boost_down_threshold = down_threshold;
+
+ actmon_dev_down_wmark_set(dev);
+ actmon_wmb();
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(down_threshold_fops, down_threshold_get,
+ down_threshold_set, "%llu\n");
+
+static int state_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->state;
+ return 0;
+}
+static int state_set(void *data, u64 val)
+{
+ struct actmon_dev *dev = data;
+
+ if (val)
+ actmon_dev_enable(dev);
+ else
+ actmon_dev_disable(dev);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, state_set, "%llu\n");
+
+static int period_get(void *data, u64 *val)
+{
+ *val = actmon_sampling_period;
+ return 0;
+}
+static int period_set(void *data, u64 val)
+{
+ int i;
+ unsigned long flags;
+ u8 period = (u8)val;
+
+ if (period) {
+ actmon_sampling_period = period;
+ actmon_writel(period - 1, ACTMON_GLB_PERIOD_CTRL);
+
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
+ struct actmon_dev *dev = actmon_devices[i];
+ spin_lock_irqsave(&dev->lock, flags);
+ actmon_dev_wmark_set(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ actmon_wmb();
+ return 0;
+ }
+ return -EINVAL;
+}
+DEFINE_SIMPLE_ATTRIBUTE(period_fops, period_get, period_set, "%llu\n");
+
+
+static int actmon_debugfs_create_dev(struct actmon_dev *dev)
+{
+ struct dentry *dir, *d;
+
+ if (dev->state == ACTMON_UNINITIALIZED)
+ return 0;
+
+ dir = debugfs_create_dir(dev->con_id, clk_debugfs_root);
+ if (!dir)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "actv_type", RO_MODE, dir, dev, &type_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "avg_activity", RO_MODE, dir, dev, &actv_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "boost_step", RW_MODE, dir, dev, &step_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_u32(
+ "boost_rate_dec", RW_MODE, dir, (u32 *)&dev->boost_down_coef);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_u32(
+ "boost_rate_inc", RW_MODE, dir, (u32 *)&dev->boost_up_coef);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "boost_threshold_dn", RW_MODE, dir, dev, &down_threshold_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "boost_threshold_up", RW_MODE, dir, dev, &up_threshold_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "state", RW_MODE, dir, dev, &state_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int __init actmon_debugfs_init(void)
+{
+ int i;
+ int ret = -ENOMEM;
+ struct dentry *d;
+
+ d = debugfs_create_dir("tegra_actmon", NULL);
+ if (!d)
+ return ret;
+ clk_debugfs_root = d;
+
+ d = debugfs_create_file("period", RW_MODE, d, NULL, &period_fops);
+ if (!d)
+ goto err_out;
+
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
+ ret = actmon_debugfs_create_dev(actmon_devices[i]);
+ if (ret)
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(clk_debugfs_root);
+ return ret;
+}
+
+#endif
+
+static int __init tegra_actmon_init(void)
+{
+ int i, ret;
+ struct clk *c = tegra_get_clock_by_name("actmon");
+
+ if (!c) {
+ pr_err("%s: Failed to find actmon clock\n", __func__);
+ return 0;
+ }
+ actmon_clk_freq = clk_get_rate(c) / 1000;
+ ret = clk_enable(c);
+ if (ret) {
+ pr_err("%s: Failed to enable actmon clock\n", __func__);
+ return 0;
+ }
+ actmon_sampling_period = ACTMON_DEFAULT_SAMPLING_PERIOD;
+ actmon_writel(actmon_sampling_period - 1, ACTMON_GLB_PERIOD_CTRL);
+
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
+ ret = actmon_dev_init(actmon_devices[i]);
+ pr_info("%s.%s: %s initialization (%d)\n",
+ actmon_devices[i]->dev_id, actmon_devices[i]->con_id,
+ ret ? "Failed" : "Completed", ret);
+ }
+ register_pm_notifier(&actmon_pm_nb);
+
+#ifdef CONFIG_DEBUG_FS
+ actmon_debugfs_init();
+#endif
+ return 0;
+}
+late_initcall(tegra_actmon_init);
diff --git a/arch/arm/mach-tegra/tegra3_clocks.c b/arch/arm/mach-tegra/tegra3_clocks.c
new file mode 100644
index 000000000000..39a1712c7225
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_clocks.c
@@ -0,0 +1,4751 @@
+/*
+ * arch/arm/mach-tegra/tegra3_clocks.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/clkdev.h>
+
+#include <mach/iomap.h>
+#include <mach/edp.h>
+
+#include "clock.h"
+#include "fuse.h"
+#include "dvfs.h"
+#include "pm.h"
+#include "sleep.h"
+#include "tegra3_emc.h"
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00C
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35C
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_NUM 5
+
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_NUM 5
+
+#define RST_DEVICES_V_SWR_CPULP_RST_DIS (0x1 << 1)
+#define CLK_OUT_ENB_V_CLK_ENB_CPULP_EN (0x1 << 1)
+
+#define PERIPH_CLK_TO_BIT(c) (1 << (c->u.periph.clk_num % 32))
+#define PERIPH_CLK_TO_RST_REG(c) \
+ periph_clk_to_reg((c), RST_DEVICES_L, RST_DEVICES_V, 4)
+#define PERIPH_CLK_TO_RST_SET_REG(c) \
+ periph_clk_to_reg((c), RST_DEVICES_SET_L, RST_DEVICES_SET_V, 8)
+#define PERIPH_CLK_TO_RST_CLR_REG(c) \
+ periph_clk_to_reg((c), RST_DEVICES_CLR_L, RST_DEVICES_CLR_V, 8)
+
+#define PERIPH_CLK_TO_ENB_REG(c) \
+ periph_clk_to_reg((c), CLK_OUT_ENB_L, CLK_OUT_ENB_V, 4)
+#define PERIPH_CLK_TO_ENB_SET_REG(c) \
+ periph_clk_to_reg((c), CLK_OUT_ENB_SET_L, CLK_OUT_ENB_SET_V, 8)
+#define PERIPH_CLK_TO_ENB_CLR_REG(c) \
+ periph_clk_to_reg((c), CLK_OUT_ENB_CLR_L, CLK_OUT_ENB_CLR_V, 8)
+
+#define CLK_MASK_ARM 0x44
+#define MISC_CLK_ENB 0x48
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_MASK (0xF<<28)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0x0<<28)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (0x4<<28)
+#define OSC_CTRL_OSC_FREQ_12MHZ (0x8<<28)
+#define OSC_CTRL_OSC_FREQ_26MHZ (0xC<<28)
+#define OSC_CTRL_OSC_FREQ_16_8MHZ (0x1<<28)
+#define OSC_CTRL_OSC_FREQ_38_4MHZ (0x5<<28)
+#define OSC_CTRL_OSC_FREQ_48MHZ (0x9<<28)
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3<<26)
+#define OSC_CTRL_PLL_REF_DIV_1 (0<<26)
+#define OSC_CTRL_PLL_REF_DIV_2 (1<<26)
+#define OSC_CTRL_PLL_REF_DIV_4 (2<<26)
+
+#define OSC_FREQ_DET 0x58
+#define OSC_FREQ_DET_TRIG (1<<31)
+
+#define OSC_FREQ_DET_STATUS 0x5C
+#define OSC_FREQ_DET_BUSY (1<<31)
+#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+
+#define PERIPH_CLK_SOURCE_I2S1 0x100
+#define PERIPH_CLK_SOURCE_EMC 0x19c
+#define PERIPH_CLK_SOURCE_OSC 0x1fc
+#define PERIPH_CLK_SOURCE_NUM1 \
+ ((PERIPH_CLK_SOURCE_OSC - PERIPH_CLK_SOURCE_I2S1) / 4)
+
+#define PERIPH_CLK_SOURCE_G3D2 0x3b0
+#define PERIPH_CLK_SOURCE_SE 0x42c
+#define PERIPH_CLK_SOURCE_NUM2 \
+ ((PERIPH_CLK_SOURCE_SE - PERIPH_CLK_SOURCE_G3D2) / 4 + 1)
+
+#define AUDIO_DLY_CLK 0x49c
+#define AUDIO_SYNC_CLK_SPDIF 0x4b4
+#define PERIPH_CLK_SOURCE_NUM3 \
+ ((AUDIO_SYNC_CLK_SPDIF - AUDIO_DLY_CLK) / 4 + 1)
+
+#define PERIPH_CLK_SOURCE_NUM (PERIPH_CLK_SOURCE_NUM1 + \
+ PERIPH_CLK_SOURCE_NUM2 + \
+ PERIPH_CLK_SOURCE_NUM3)
+
+#define CPU_SOFTRST_CTRL 0x380
+
+#define PERIPH_CLK_SOURCE_DIVU71_MASK 0xFF
+#define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF
+#define PERIPH_CLK_SOURCE_DIV_SHIFT 0
+#define PERIPH_CLK_SOURCE_DIVIDLE_SHIFT 8
+#define PERIPH_CLK_SOURCE_DIVIDLE_VAL 50
+#define PERIPH_CLK_UART_DIV_ENB (1<<24)
+#define PERIPH_CLK_VI_SEL_EX_SHIFT 24
+#define PERIPH_CLK_VI_SEL_EX_MASK (0x3<<PERIPH_CLK_VI_SEL_EX_SHIFT)
+#define PERIPH_CLK_NAND_DIV_EX_ENB (1<<8)
+#define PERIPH_CLK_DTV_POLARITY_INV (1<<25)
+
+#define AUDIO_SYNC_SOURCE_MASK 0x0F
+#define AUDIO_SYNC_DISABLE_BIT 0x10
+#define AUDIO_SYNC_TAP_NIBBLE_SHIFT(c) ((c->reg_shift - 24) * 4)
+
+#define PLL_BASE 0x0
+#define PLL_BASE_BYPASS (1<<31)
+#define PLL_BASE_ENABLE (1<<30)
+#define PLL_BASE_REF_ENABLE (1<<29)
+#define PLL_BASE_OVERRIDE (1<<28)
+#define PLL_BASE_LOCK (1<<27)
+#define PLL_BASE_DIVP_MASK (0x7<<20)
+#define PLL_BASE_DIVP_SHIFT 20
+#define PLL_BASE_DIVN_MASK (0x3FF<<8)
+#define PLL_BASE_DIVN_SHIFT 8
+#define PLL_BASE_DIVM_MASK (0x1F)
+#define PLL_BASE_DIVM_SHIFT 0
+
+#define PLL_OUT_RATIO_MASK (0xFF<<8)
+#define PLL_OUT_RATIO_SHIFT 8
+#define PLL_OUT_OVERRIDE (1<<2)
+#define PLL_OUT_CLKEN (1<<1)
+#define PLL_OUT_RESET_DISABLE (1<<0)
+
+#define PLL_MISC(c) \
+ (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc)
+#define PLL_MISC_LOCK_ENABLE(c) \
+ (((c)->flags & (PLLU | PLLD)) ? (1<<22) : (1<<18))
+
+#define PLL_MISC_DCCON_SHIFT 20
+#define PLL_MISC_CPCON_SHIFT 8
+#define PLL_MISC_CPCON_MASK (0xF<<PLL_MISC_CPCON_SHIFT)
+#define PLL_MISC_LFCON_SHIFT 4
+#define PLL_MISC_LFCON_MASK (0xF<<PLL_MISC_LFCON_SHIFT)
+#define PLL_MISC_VCOCON_SHIFT 0
+#define PLL_MISC_VCOCON_MASK (0xF<<PLL_MISC_VCOCON_SHIFT)
+#define PLLD_MISC_CLKENABLE (1<<30)
+
+#define PLLU_BASE_POST_DIV (1<<20)
+
+#define PLLD_BASE_DSIB_MUX_SHIFT 25
+#define PLLD_BASE_DSIB_MUX_MASK (1<<PLLD_BASE_DSIB_MUX_SHIFT)
+#define PLLD_BASE_CSI_CLKENABLE (1<<26)
+#define PLLD_MISC_DSI_CLKENABLE (1<<30)
+#define PLLD_MISC_DIV_RST (1<<23)
+#define PLLD_MISC_DCCON_SHIFT 12
+
+#define PLLDU_LFCON_SET_DIVN 600
+
+/* FIXME: OUT_OF_TABLE_CPCON per pll */
+#define OUT_OF_TABLE_CPCON 0x8
+
+#define SUPER_CLK_MUX 0x00
+#define SUPER_STATE_SHIFT 28
+#define SUPER_STATE_MASK (0xF << SUPER_STATE_SHIFT)
+#define SUPER_STATE_STANDBY (0x0 << SUPER_STATE_SHIFT)
+#define SUPER_STATE_IDLE (0x1 << SUPER_STATE_SHIFT)
+#define SUPER_STATE_RUN (0x2 << SUPER_STATE_SHIFT)
+#define SUPER_STATE_IRQ (0x3 << SUPER_STATE_SHIFT)
+#define SUPER_STATE_FIQ (0x4 << SUPER_STATE_SHIFT)
+#define SUPER_LP_DIV2_BYPASS (0x1 << 16)
+#define SUPER_SOURCE_MASK 0xF
+#define SUPER_FIQ_SOURCE_SHIFT 12
+#define SUPER_IRQ_SOURCE_SHIFT 8
+#define SUPER_RUN_SOURCE_SHIFT 4
+#define SUPER_IDLE_SOURCE_SHIFT 0
+
+#define SUPER_CLK_DIVIDER 0x04
+#define SUPER_CLOCK_SKIP_ENABLE (0x1 << 31)
+#define SUPER_CLOCK_DIV_U71_SHIFT 16
+#define SUPER_CLOCK_DIV_U71_MASK (0xff << SUPER_CLOCK_DIV_U71_SHIFT)
+/* guarantees safe cpu backup */
+#define SUPER_CLOCK_DIV_U71_MIN 0x2
+#define SUPER_CLOCK_SKIP_NOMIN_SHIFT 8
+#define SUPER_CLOCK_SKIP_DENOM_SHIFT 0
+#define SUPER_CLOCK_SKIP_MASK (0xffff << SUPER_CLOCK_SKIP_DENOM_SHIFT)
+
+#define BUS_CLK_DISABLE (1<<3)
+#define BUS_CLK_DIV_MASK 0x3
+
+#define PMC_CTRL 0x0
+ #define PMC_CTRL_BLINK_ENB (1 << 7)
+
+#define PMC_DPD_PADS_ORIDE 0x1c
+ #define PMC_DPD_PADS_ORIDE_BLINK_ENB (1 << 20)
+
+#define PMC_BLINK_TIMER_DATA_ON_SHIFT 0
+#define PMC_BLINK_TIMER_DATA_ON_MASK 0x7fff
+#define PMC_BLINK_TIMER_ENB (1 << 15)
+#define PMC_BLINK_TIMER_DATA_OFF_SHIFT 16
+#define PMC_BLINK_TIMER_DATA_OFF_MASK 0xffff
+
+#define PMC_PLLP_WB0_OVERRIDE 0xf8
+#define PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE (1 << 12)
+
+#define UTMIP_PLL_CFG2 0x488
+#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xfff) << 6)
+#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN (1 << 0)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN (1 << 2)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN (1 << 4)
+
+#define UTMIP_PLL_CFG1 0x484
+#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
+#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN (1 << 14)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN (1 << 12)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN (1 << 16)
+
+#define PLLE_BASE_CML_ENABLE (1<<31)
+#define PLLE_BASE_ENABLE (1<<30)
+#define PLLE_BASE_DIVCML_SHIFT 24
+#define PLLE_BASE_DIVCML_MASK (0xf<<PLLE_BASE_DIVCML_SHIFT)
+#define PLLE_BASE_DIVP_SHIFT 16
+#define PLLE_BASE_DIVP_MASK (0x3f<<PLLE_BASE_DIVP_SHIFT)
+#define PLLE_BASE_DIVN_SHIFT 8
+#define PLLE_BASE_DIVN_MASK (0xFF<<PLLE_BASE_DIVN_SHIFT)
+#define PLLE_BASE_DIVM_SHIFT 0
+#define PLLE_BASE_DIVM_MASK (0xFF<<PLLE_BASE_DIVM_SHIFT)
+#define PLLE_BASE_DIV_MASK \
+ (PLLE_BASE_DIVCML_MASK | PLLE_BASE_DIVP_MASK | \
+ PLLE_BASE_DIVN_MASK | PLLE_BASE_DIVM_MASK)
+#define PLLE_BASE_DIV(m, n, p, cml) \
+ (((cml)<<PLLE_BASE_DIVCML_SHIFT) | ((p)<<PLLE_BASE_DIVP_SHIFT) | \
+ ((n)<<PLLE_BASE_DIVN_SHIFT) | ((m)<<PLLE_BASE_DIVM_SHIFT))
+
+#define PLLE_MISC_SETUP_BASE_SHIFT 16
+#define PLLE_MISC_SETUP_BASE_MASK (0xFFFF<<PLLE_MISC_SETUP_BASE_SHIFT)
+#define PLLE_MISC_READY (1<<15)
+#define PLLE_MISC_LOCK (1<<11)
+#define PLLE_MISC_LOCK_ENABLE (1<<9)
+#define PLLE_MISC_SETUP_EX_SHIFT 2
+#define PLLE_MISC_SETUP_EX_MASK (0x3<<PLLE_MISC_SETUP_EX_SHIFT)
+#define PLLE_MISC_SETUP_MASK \
+ (PLLE_MISC_SETUP_BASE_MASK | PLLE_MISC_SETUP_EX_MASK)
+#define PLLE_MISC_SETUP_VALUE \
+ ((0x7<<PLLE_MISC_SETUP_BASE_SHIFT) | (0x0<<PLLE_MISC_SETUP_EX_SHIFT))
+
+#define PLLE_SS_CTRL 0x68
+#define PLLE_SS_INCINTRV_SHIFT 24
+#define PLLE_SS_INCINTRV_MASK (0x3f<<PLLE_SS_INCINTRV_SHIFT)
+#define PLLE_SS_INC_SHIFT 16
+#define PLLE_SS_INC_MASK (0xff<<PLLE_SS_INC_SHIFT)
+#define PLLE_SS_MAX_SHIFT 0
+#define PLLE_SS_MAX_MASK (0x1ff<<PLLE_SS_MAX_SHIFT)
+#define PLLE_SS_COEFFICIENTS_MASK \
+ (PLLE_SS_INCINTRV_MASK | PLLE_SS_INC_MASK | PLLE_SS_MAX_MASK)
+#define PLLE_SS_COEFFICIENTS_12MHZ \
+ ((0x18<<PLLE_SS_INCINTRV_SHIFT) | (0x1<<PLLE_SS_INC_SHIFT) | \
+ (0x24<<PLLE_SS_MAX_SHIFT))
+#define PLLE_SS_DISABLE ((1<<12) | (1<<11) | (1<<10))
+
+#define PLLE_AUX 0x48c
+#define PLLE_AUX_PLLP_SEL (1<<2)
+#define PLLE_AUX_CML_SATA_ENABLE (1<<1)
+#define PLLE_AUX_CML_PCIE_ENABLE (1<<0)
+
+#define PMC_SATA_PWRGT 0x1ac
+#define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE (1<<5)
+#define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL (1<<4)
+
+#define ROUND_DIVIDER_UP 0
+#define ROUND_DIVIDER_DOWN 1
+
+static bool tegra3_clk_is_parent_allowed(struct clk *c, struct clk *p);
+
+static int tegra3_clk_shared_bus_update(struct clk *bus);
+
+static struct clk *emc_bridge;
+
+static bool detach_shared_bus;
+module_param(detach_shared_bus, bool, 0644);
+
+/**
+* Structure defining the fields for USB UTMI clocks Parameters.
+*/
+struct utmi_clk_param
+{
+ /* Oscillator Frequency in KHz */
+ u32 osc_frequency;
+ /* UTMIP PLL Enable Delay Count */
+ u8 enable_delay_count;
+ /* UTMIP PLL Stable count */
+ u8 stable_count;
+ /* UTMIP PLL Active delay count */
+ u8 active_delay_count;
+ /* UTMIP PLL Xtal frequency count */
+ u8 xtal_freq_count;
+};
+
+static const struct utmi_clk_param utmi_parameters[] =
+{
+/* OSC_FREQUENCY, ENABLE_DLY, STABLE_CNT, ACTIVE_DLY, XTAL_FREQ_CNT */
+ {13000000, 0x02, 0x33, 0x05, 0x7F},
+ {19200000, 0x03, 0x4B, 0x06, 0xBB},
+ {12000000, 0x02, 0x2F, 0x04, 0x76},
+ {26000000, 0x04, 0x66, 0x09, 0xFE},
+ {16800000, 0x03, 0x41, 0x0A, 0xA4},
+};
+
+static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+static void __iomem *misc_gp_hidrev_base = IO_ADDRESS(TEGRA_APB_MISC_BASE);
+
+#define MISC_GP_HIDREV 0x804
+
+/*
+ * Some peripheral clocks share an enable bit, so refcount the enable bits
+ * in registers CLK_ENABLE_L, ... CLK_ENABLE_W
+ */
+static int tegra_periph_clk_enable_refcount[CLK_OUT_ENB_NUM * 32];
+
+#define clk_writel(value, reg) \
+ __raw_writel(value, (u32)reg_clk_base + (reg))
+#define clk_readl(reg) \
+ __raw_readl((u32)reg_clk_base + (reg))
+#define pmc_writel(value, reg) \
+ __raw_writel(value, (u32)reg_pmc_base + (reg))
+#define pmc_readl(reg) \
+ __raw_readl((u32)reg_pmc_base + (reg))
+#define chipid_readl() \
+ __raw_readl((u32)misc_gp_hidrev_base + MISC_GP_HIDREV)
+
+#define clk_writel_delay(value, reg) \
+ do { \
+ __raw_writel((value), (u32)reg_clk_base + (reg)); \
+ udelay(2); \
+ } while (0)
+
+
+static inline int clk_set_div(struct clk *c, u32 n)
+{
+ return clk_set_rate(c, (clk_get_rate(c->parent) + n-1) / n);
+}
+
+static inline u32 periph_clk_to_reg(
+ struct clk *c, u32 reg_L, u32 reg_V, int offs)
+{
+ u32 reg = c->u.periph.clk_num / 32;
+ BUG_ON(reg >= RST_DEVICES_NUM);
+ if (reg < 3) {
+ reg = reg_L + (reg * offs);
+ } else {
+ reg = reg_V + ((reg - 3) * offs);
+ }
+ return reg;
+}
+
+unsigned long clk_measure_input_freq(void)
+{
+ u32 clock_autodetect;
+ clk_writel(OSC_FREQ_DET_TRIG | 1, OSC_FREQ_DET);
+ do {} while (clk_readl(OSC_FREQ_DET_STATUS) & OSC_FREQ_DET_BUSY);
+ clock_autodetect = clk_readl(OSC_FREQ_DET_STATUS);
+ if (clock_autodetect >= 732 - 3 && clock_autodetect <= 732 + 3) {
+ return 12000000;
+ } else if (clock_autodetect >= 794 - 3 && clock_autodetect <= 794 + 3) {
+ return 13000000;
+ } else if (clock_autodetect >= 1172 - 3 && clock_autodetect <= 1172 + 3) {
+ return 19200000;
+ } else if (clock_autodetect >= 1587 - 3 && clock_autodetect <= 1587 + 3) {
+ return 26000000;
+ } else if (clock_autodetect >= 1025 - 3 && clock_autodetect <= 1025 + 3) {
+ return 16800000;
+ } else if (clock_autodetect >= 2344 - 3 && clock_autodetect <= 2344 + 3) {
+ return 38400000;
+ } else if (clock_autodetect >= 2928 - 3 && clock_autodetect <= 2928 + 3) {
+ return 48000000;
+ } else {
+ pr_err("%s: Unexpected clock autodetect value %d", __func__, clock_autodetect);
+ BUG();
+ return 0;
+ }
+}
+
+static int clk_div_x1_get_divider(unsigned long parent_rate, unsigned long rate,
+ u32 max_x, u32 flags, u32 round_mode)
+{
+ s64 divider_ux1 = parent_rate;
+ if (!rate)
+ return -EINVAL;
+
+ if (!(flags & DIV_U71_INT))
+ divider_ux1 *= 2;
+
+ if (round_mode == ROUND_DIVIDER_UP)
+ divider_ux1 += rate - 1;
+ do_div(divider_ux1, rate);
+
+ if (flags & DIV_U71_INT)
+ divider_ux1 *= 2;
+
+ if (divider_ux1 - 2 < 0)
+ return 0;
+
+ if (divider_ux1 - 2 > max_x)
+ return -EINVAL;
+
+ return divider_ux1 - 2;
+}
+
+static int clk_div71_get_divider(unsigned long parent_rate, unsigned long rate,
+ u32 flags, u32 round_mode)
+{
+ return clk_div_x1_get_divider(parent_rate, rate, 0xFF,
+ flags, round_mode);
+}
+
+static int clk_div151_get_divider(unsigned long parent_rate, unsigned long rate,
+ u32 flags, u32 round_mode)
+{
+ return clk_div_x1_get_divider(parent_rate, rate, 0xFFFF,
+ flags, round_mode);
+}
+
+static int clk_div16_get_divider(unsigned long parent_rate, unsigned long rate)
+{
+ s64 divider_u16;
+
+ divider_u16 = parent_rate;
+ if (!rate)
+ return -EINVAL;
+ divider_u16 += rate - 1;
+ do_div(divider_u16, rate);
+
+ if (divider_u16 - 1 < 0)
+ return 0;
+
+ if (divider_u16 - 1 > 0xFFFF)
+ return -EINVAL;
+
+ return divider_u16 - 1;
+}
+
+/* clk_m functions */
+static unsigned long tegra3_clk_m_autodetect_rate(struct clk *c)
+{
+ u32 osc_ctrl = clk_readl(OSC_CTRL);
+ u32 auto_clock_control = osc_ctrl & ~OSC_CTRL_OSC_FREQ_MASK;
+ u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
+
+ c->rate = clk_measure_input_freq();
+ switch (c->rate) {
+ case 12000000:
+ auto_clock_control |= OSC_CTRL_OSC_FREQ_12MHZ;
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ break;
+ case 13000000:
+ auto_clock_control |= OSC_CTRL_OSC_FREQ_13MHZ;
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ break;
+ case 19200000:
+ auto_clock_control |= OSC_CTRL_OSC_FREQ_19_2MHZ;
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ break;
+ case 26000000:
+ auto_clock_control |= OSC_CTRL_OSC_FREQ_26MHZ;
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ break;
+ case 16800000:
+ auto_clock_control |= OSC_CTRL_OSC_FREQ_16_8MHZ;
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ break;
+ case 38400000:
+ auto_clock_control |= OSC_CTRL_OSC_FREQ_38_4MHZ;
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_2);
+ break;
+ case 48000000:
+ auto_clock_control |= OSC_CTRL_OSC_FREQ_48MHZ;
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_4);
+ break;
+ default:
+ pr_err("%s: Unexpected clock rate %ld", __func__, c->rate);
+ BUG();
+ }
+ clk_writel(auto_clock_control, OSC_CTRL);
+ return c->rate;
+}
+
+static void tegra3_clk_m_init(struct clk *c)
+{
+ pr_debug("%s on clock %s\n", __func__, c->name);
+ tegra3_clk_m_autodetect_rate(c);
+}
+
+static int tegra3_clk_m_enable(struct clk *c)
+{
+ pr_debug("%s on clock %s\n", __func__, c->name);
+ return 0;
+}
+
+static void tegra3_clk_m_disable(struct clk *c)
+{
+ pr_debug("%s on clock %s\n", __func__, c->name);
+ WARN(1, "Attempting to disable main SoC clock\n");
+}
+
+static struct clk_ops tegra_clk_m_ops = {
+ .init = tegra3_clk_m_init,
+ .enable = tegra3_clk_m_enable,
+ .disable = tegra3_clk_m_disable,
+};
+
+static struct clk_ops tegra_clk_m_div_ops = {
+ .enable = tegra3_clk_m_enable,
+};
+
+/* PLL reference divider functions */
+static void tegra3_pll_ref_init(struct clk *c)
+{
+ u32 pll_ref_div = clk_readl(OSC_CTRL) & OSC_CTRL_PLL_REF_DIV_MASK;
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ switch (pll_ref_div) {
+ case OSC_CTRL_PLL_REF_DIV_1:
+ c->div = 1;
+ break;
+ case OSC_CTRL_PLL_REF_DIV_2:
+ c->div = 2;
+ break;
+ case OSC_CTRL_PLL_REF_DIV_4:
+ c->div = 4;
+ break;
+ default:
+ pr_err("%s: Invalid pll ref divider %d", __func__, pll_ref_div);
+ BUG();
+ }
+ c->mul = 1;
+ c->state = ON;
+}
+
+static struct clk_ops tegra_pll_ref_ops = {
+ .init = tegra3_pll_ref_init,
+ .enable = tegra3_clk_m_enable,
+ .disable = tegra3_clk_m_disable,
+};
+
+/* super clock functions */
+/* "super clocks" on tegra3 have two-stage muxes, fractional 7.1 divider and
+ * clock skipping super divider. We will ignore the clock skipping divider,
+ * since we can't lower the voltage when using the clock skip, but we can if
+ * we lower the PLL frequency. We will use 7.1 divider for CPU super-clock
+ * only when its parent is a fixed rate PLL, since we can't change PLL rate
+ * in this case.
+ */
+static void tegra3_super_clk_init(struct clk *c)
+{
+ u32 val;
+ int source;
+ int shift;
+ const struct clk_mux_sel *sel;
+
+ val = clk_readl(c->reg + SUPER_CLK_MUX);
+ c->state = ON;
+ BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) &&
+ ((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE));
+ shift = ((val & SUPER_STATE_MASK) == SUPER_STATE_IDLE) ?
+ SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT;
+ source = (val >> shift) & SUPER_SOURCE_MASK;
+ if (c->flags & DIV_2)
+ source |= val & SUPER_LP_DIV2_BYPASS;
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->value == source)
+ break;
+ }
+ BUG_ON(sel->input == NULL);
+ c->parent = sel->input;
+
+ if (c->flags & DIV_U71) {
+ /* Init safe 7.1 divider value (does not affect PLLX path).
+ Super skipper is enabled to be ready for emergency throttle,
+ but set 1:1 */
+ val = SUPER_CLOCK_SKIP_ENABLE |
+ (SUPER_CLOCK_DIV_U71_MIN << SUPER_CLOCK_DIV_U71_SHIFT);
+ clk_writel(val, c->reg + SUPER_CLK_DIVIDER);
+ c->mul = 2;
+ c->div = 2;
+ if (!(c->parent->flags & PLLX))
+ c->div += SUPER_CLOCK_DIV_U71_MIN;
+ }
+ else
+ clk_writel(0, c->reg + SUPER_CLK_DIVIDER);
+}
+
+static int tegra3_super_clk_enable(struct clk *c)
+{
+ return 0;
+}
+
+static void tegra3_super_clk_disable(struct clk *c)
+{
+ /* since tegra 3 has 2 CPU super clocks - low power lp-mode clock and
+ geared up g-mode super clock - mode switch may request to disable
+ either of them; accept request with no affect on h/w */
+}
+
+static int tegra3_super_clk_set_parent(struct clk *c, struct clk *p)
+{
+ u32 val;
+ const struct clk_mux_sel *sel;
+ int shift;
+
+ val = clk_readl(c->reg + SUPER_CLK_MUX);;
+ BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) &&
+ ((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE));
+ shift = ((val & SUPER_STATE_MASK) == SUPER_STATE_IDLE) ?
+ SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT;
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->input == p) {
+ /* For LP mode super-clock switch between PLLX direct
+ and divided-by-2 outputs is allowed only when other
+ than PLLX clock source is current parent */
+ if ((c->flags & DIV_2) && (p->flags & PLLX) &&
+ ((sel->value ^ val) & SUPER_LP_DIV2_BYPASS)) {
+ if (c->parent->flags & PLLX)
+ return -EINVAL;
+ val ^= SUPER_LP_DIV2_BYPASS;
+ clk_writel_delay(val, c->reg);
+ }
+ val &= ~(SUPER_SOURCE_MASK << shift);
+ val |= (sel->value & SUPER_SOURCE_MASK) << shift;
+
+ /* 7.1 divider for CPU super-clock does not affect
+ PLLX path */
+ if (c->flags & DIV_U71) {
+ u32 div = 0;
+ if (!(p->flags & PLLX)) {
+ div = clk_readl(c->reg +
+ SUPER_CLK_DIVIDER);
+ div &= SUPER_CLOCK_DIV_U71_MASK;
+ div >>= SUPER_CLOCK_DIV_U71_SHIFT;
+ }
+ c->div = div + 2;
+ c->mul = 2;
+ }
+
+ if (c->refcnt)
+ clk_enable(p);
+
+ clk_writel_delay(val, c->reg);
+
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static DEFINE_SPINLOCK(super_divider_lock);
+
+static void tegra3_super_clk_divider_update(struct clk *c, u8 div)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&super_divider_lock, flags);
+ val = clk_readl(c->reg + SUPER_CLK_DIVIDER);
+ val &= ~SUPER_CLOCK_DIV_U71_MASK;
+ val |= div << SUPER_CLOCK_DIV_U71_SHIFT;
+ clk_writel(val, c->reg + SUPER_CLK_DIVIDER);
+ spin_unlock_irqrestore(&super_divider_lock, flags);
+ udelay(2);
+}
+
+static void tegra3_super_clk_skipper_update(struct clk *c, u8 nomin, u8 denom)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&super_divider_lock, flags);
+ val = clk_readl(c->reg + SUPER_CLK_DIVIDER);
+ val &= ~SUPER_CLOCK_SKIP_MASK;
+ val |= (nomin << SUPER_CLOCK_SKIP_NOMIN_SHIFT) |
+ (denom << SUPER_CLOCK_SKIP_DENOM_SHIFT);
+ clk_writel(val, c->reg + SUPER_CLK_DIVIDER);
+ spin_unlock_irqrestore(&super_divider_lock, flags);
+}
+
+/*
+ * Do not use super clocks "skippers", since dividing using a clock skipper
+ * does not allow the voltage to be scaled down. Instead adjust the rate of
+ * the parent clock. This requires that the parent of a super clock have no
+ * other children, otherwise the rate will change underneath the other
+ * children. Special case: if fixed rate PLL is CPU super clock parent the
+ * rate of this PLL can't be changed, and it has many other children. In
+ * this case use 7.1 fractional divider to adjust the super clock rate.
+ */
+static int tegra3_super_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ if ((c->flags & DIV_U71) && (c->parent->flags & PLL_FIXED)) {
+ int div = clk_div71_get_divider(c->parent->u.pll.fixed_rate,
+ rate, c->flags, ROUND_DIVIDER_DOWN);
+ div = max(div, SUPER_CLOCK_DIV_U71_MIN);
+ tegra3_super_clk_divider_update(c, div);
+ c->div = div + 2;
+ c->mul = 2;
+ return 0;
+ }
+ return clk_set_rate(c->parent, rate);
+}
+
+static struct clk_ops tegra_super_ops = {
+ .init = tegra3_super_clk_init,
+ .enable = tegra3_super_clk_enable,
+ .disable = tegra3_super_clk_disable,
+ .set_parent = tegra3_super_clk_set_parent,
+ .set_rate = tegra3_super_clk_set_rate,
+};
+
+static int tegra3_twd_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ /* The input value 'rate' is the clock rate of the CPU complex. */
+ c->rate = (rate * c->mul) / c->div;
+ return 0;
+}
+
+static struct clk_ops tegra3_twd_ops = {
+ .set_rate = tegra3_twd_clk_set_rate,
+};
+
+static struct clk tegra3_clk_twd = {
+ /* NOTE: The twd clock must have *NO* parent. It's rate is directly
+ updated by tegra3_cpu_cmplx_clk_set_rate() because the
+ frequency change notifer for the twd is called in an
+ atomic context which cannot take a mutex. */
+ .name = "twd",
+ .ops = &tegra3_twd_ops,
+ .max_rate = 1400000000, /* Same as tegra_clk_cpu_cmplx.max_rate */
+ .mul = 1,
+ .div = 2,
+};
+
+/* virtual cpu clock functions */
+/* some clocks can not be stopped (cpu, memory bus) while the SoC is running.
+ To change the frequency of these clocks, the parent pll may need to be
+ reprogrammed, so the clock must be moved off the pll, the pll reprogrammed,
+ and then the clock moved back to the pll. To hide this sequence, a virtual
+ clock handles it.
+ */
+static void tegra3_cpu_clk_init(struct clk *c)
+{
+ c->state = (!is_lp_cluster() == (c->u.cpu.mode == MODE_G))? ON : OFF;
+}
+
+static int tegra3_cpu_clk_enable(struct clk *c)
+{
+ return 0;
+}
+
+static void tegra3_cpu_clk_disable(struct clk *c)
+{
+ /* since tegra 3 has 2 virtual CPU clocks - low power lp-mode clock
+ and geared up g-mode clock - mode switch may request to disable
+ either of them; accept request with no affect on h/w */
+}
+
+static int tegra3_cpu_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret = 0;
+
+ /* Hardware clock control is not possible on FPGA platforms.
+ Report success so that upper level layers don't complain
+ needlessly. */
+#ifndef CONFIG_TEGRA_FPGA_PLATFORM
+ if (c->dvfs) {
+ if (!c->dvfs->dvfs_rail)
+ return -ENOSYS;
+ else if ((!c->dvfs->dvfs_rail->reg) &&
+ (clk_get_rate_locked(c) < rate)) {
+ WARN(1, "Increasing CPU rate while regulator is not"
+ " ready may overclock CPU\n");
+ return -ENOSYS;
+ }
+ }
+
+ /*
+ * Take an extra reference to the main pll so it doesn't turn
+ * off when we move the cpu off of it
+ */
+ clk_enable(c->u.cpu.main);
+
+ ret = clk_set_parent(c->parent, c->u.cpu.backup);
+ if (ret) {
+ pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.backup->name);
+ goto out;
+ }
+
+ ret = clk_set_rate(c->parent, rate);
+ if (!ret && (rate <= clk_get_rate(c->parent)))
+ goto out;
+
+ if (rate != clk_get_rate(c->u.cpu.main)) {
+ ret = clk_set_rate(c->u.cpu.main, rate);
+ if (ret) {
+ pr_err("Failed to change cpu pll to %lu\n", rate);
+ goto out;
+ }
+ }
+
+ ret = clk_set_parent(c->parent, c->u.cpu.main);
+ if (ret) {
+ pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.main->name);
+ goto out;
+ }
+
+out:
+ clk_disable(c->u.cpu.main);
+#endif
+ return ret;
+}
+
+static struct clk_ops tegra_cpu_ops = {
+ .init = tegra3_cpu_clk_init,
+ .enable = tegra3_cpu_clk_enable,
+ .disable = tegra3_cpu_clk_disable,
+ .set_rate = tegra3_cpu_clk_set_rate,
+};
+
+
+static void tegra3_cpu_cmplx_clk_init(struct clk *c)
+{
+ int i = !!is_lp_cluster();
+
+ BUG_ON(c->inputs[0].input->u.cpu.mode != MODE_G);
+ BUG_ON(c->inputs[1].input->u.cpu.mode != MODE_LP);
+ c->parent = c->inputs[i].input;
+}
+
+/* cpu complex clock provides second level vitualization (on top of
+ cpu virtual cpu rate control) in order to hide the CPU mode switch
+ sequence */
+#if PARAMETERIZE_CLUSTER_SWITCH
+static unsigned int switch_delay;
+static unsigned int switch_flags;
+static DEFINE_SPINLOCK(parameters_lock);
+
+void tegra_cluster_switch_set_parameters(unsigned int us, unsigned int flags)
+{
+ spin_lock(&parameters_lock);
+ switch_delay = us;
+ switch_flags = flags;
+ spin_unlock(&parameters_lock);
+}
+#endif
+
+static int tegra3_cpu_cmplx_clk_enable(struct clk *c)
+{
+ return 0;
+}
+
+static void tegra3_cpu_cmplx_clk_disable(struct clk *c)
+{
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ /* oops - don't disable the CPU complex clock! */
+ BUG();
+}
+
+static int tegra3_cpu_cmplx_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long flags;
+ int ret;
+ struct clk *parent = c->parent;
+
+ if (!parent->ops || !parent->ops->set_rate)
+ return -ENOSYS;
+
+ clk_lock_save(parent, &flags);
+
+ ret = clk_set_rate_locked(parent, rate);
+
+ /* We can't parent the twd to directly to the CPU complex because
+ the TWD frequency update notifier is called in an atomic context
+ and the CPU frequency update requires a mutex. Update the twd
+ clock rate with the new CPU complex rate. */
+ clk_set_rate(&tegra3_clk_twd, clk_get_rate_locked(parent));
+
+ clk_unlock_restore(parent, &flags);
+
+ return ret;
+}
+
+static int tegra3_cpu_cmplx_clk_set_parent(struct clk *c, struct clk *p)
+{
+ int ret;
+ unsigned int flags, delay;
+ const struct clk_mux_sel *sel;
+ unsigned long rate = clk_get_rate(c->parent);
+
+ pr_debug("%s: %s %s\n", __func__, c->name, p->name);
+ BUG_ON(c->parent->u.cpu.mode != (is_lp_cluster() ? MODE_LP : MODE_G));
+
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->input == p)
+ break;
+ }
+ if (!sel->input)
+ return -EINVAL;
+
+#if PARAMETERIZE_CLUSTER_SWITCH
+ spin_lock(&parameters_lock);
+ flags = switch_flags;
+ delay = switch_delay;
+ switch_flags = 0;
+ spin_unlock(&parameters_lock);
+
+ if (flags) {
+ /* over-clocking after the switch - allow, but lower rate */
+ if (rate > p->max_rate) {
+ rate = p->max_rate;
+ ret = clk_set_rate(c->parent, rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate %lu for %s\n",
+ __func__, rate, p->name);
+ return ret;
+ }
+ }
+ } else
+#endif
+ {
+ if (p == c->parent) /* already switched - exit*/
+ return 0;
+
+ if (rate > p->max_rate) { /* over-clocking - no switch */
+ pr_warn("%s: No %s mode switch to %s at rate %lu\n",
+ __func__, c->name, p->name, rate);
+ return -ECANCELED;
+ }
+ flags = TEGRA_POWER_CLUSTER_IMMEDIATE;
+ delay = 0;
+ }
+ flags |= (p->u.cpu.mode == MODE_LP) ? TEGRA_POWER_CLUSTER_LP :
+ TEGRA_POWER_CLUSTER_G;
+
+ /* Since in both LP and G mode CPU main and backup sources are the
+ same, set rate on the new parent just synchronizes super-clock
+ muxes before mode switch with no PLL re-locking */
+ ret = clk_set_rate(p, rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate %lu for %s\n",
+ __func__, rate, p->name);
+ return ret;
+ }
+
+ /* Enabling new parent scales new mode voltage rail in advanvce
+ before the switch happens*/
+ if (c->refcnt)
+ clk_enable(p);
+
+ /* switch CPU mode */
+ ret = tegra_cluster_control(delay, flags);
+ if (ret) {
+ if (c->refcnt)
+ clk_disable(p);
+ pr_err("%s: Failed to switch %s mode to %s\n",
+ __func__, c->name, p->name);
+ return ret;
+ }
+
+ /* Disabling old parent scales old mode voltage rail */
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
+ return 0;
+}
+
+static long tegra3_cpu_cmplx_round_rate(struct clk *c,
+ unsigned long rate)
+{
+ if (rate > c->parent->max_rate)
+ rate = c->parent->max_rate;
+ else if (rate < c->parent->min_rate)
+ rate = c->parent->min_rate;
+ return rate;
+}
+
+static struct clk_ops tegra_cpu_cmplx_ops = {
+ .init = tegra3_cpu_cmplx_clk_init,
+ .enable = tegra3_cpu_cmplx_clk_enable,
+ .disable = tegra3_cpu_cmplx_clk_disable,
+ .set_rate = tegra3_cpu_cmplx_clk_set_rate,
+ .set_parent = tegra3_cpu_cmplx_clk_set_parent,
+ .round_rate = tegra3_cpu_cmplx_round_rate,
+};
+
+/* virtual cop clock functions. Used to acquire the fake 'cop' clock to
+ * reset the COP block (i.e. AVP) */
+static void tegra3_cop_clk_reset(struct clk *c, bool assert)
+{
+ unsigned long reg = assert ? RST_DEVICES_SET_L : RST_DEVICES_CLR_L;
+
+ pr_debug("%s %s\n", __func__, assert ? "assert" : "deassert");
+ clk_writel(1 << 1, reg);
+}
+
+static struct clk_ops tegra_cop_ops = {
+ .reset = tegra3_cop_clk_reset,
+};
+
+/* bus clock functions */
+static void tegra3_bus_clk_init(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ c->state = ((val >> c->reg_shift) & BUS_CLK_DISABLE) ? OFF : ON;
+ c->div = ((val >> c->reg_shift) & BUS_CLK_DIV_MASK) + 1;
+ c->mul = 1;
+}
+
+static int tegra3_bus_clk_enable(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ val &= ~(BUS_CLK_DISABLE << c->reg_shift);
+ clk_writel(val, c->reg);
+ return 0;
+}
+
+static void tegra3_bus_clk_disable(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ val |= BUS_CLK_DISABLE << c->reg_shift;
+ clk_writel(val, c->reg);
+}
+
+static int tegra3_bus_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ u32 val = clk_readl(c->reg);
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ int i;
+ for (i = 1; i <= 4; i++) {
+ if (rate >= parent_rate / i) {
+ val &= ~(BUS_CLK_DIV_MASK << c->reg_shift);
+ val |= (i - 1) << c->reg_shift;
+ clk_writel(val, c->reg);
+ c->div = i;
+ c->mul = 1;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_bus_ops = {
+ .init = tegra3_bus_clk_init,
+ .enable = tegra3_bus_clk_enable,
+ .disable = tegra3_bus_clk_disable,
+ .set_rate = tegra3_bus_clk_set_rate,
+};
+
+/* Virtual system bus complex clock is used to hide the sequence of
+ changing sclk/hclk/pclk parents and dividers to configure requested
+ sclk target rate. */
+static void tegra3_sbus_cmplx_init(struct clk *c)
+{
+ unsigned long rate;
+
+ c->max_rate = c->parent->max_rate;
+ c->min_rate = c->parent->min_rate;
+
+ /* Threshold must be an exact proper factor of low range parent,
+ and both low/high range parents have 7.1 fractional dividers */
+ rate = clk_get_rate(c->u.system.sclk_low->parent);
+ if (c->u.system.threshold) {
+ BUG_ON(c->u.system.threshold > rate) ;
+ BUG_ON((rate % c->u.system.threshold) != 0);
+ }
+ BUG_ON(!(c->u.system.sclk_low->flags & DIV_U71));
+ BUG_ON(!(c->u.system.sclk_high->flags & DIV_U71));
+}
+
+/* This special sbus round function is implemented because:
+ *
+ * (a) fractional dividers can not be used to derive system bus clock with one
+ * exception: 1 : 2.5 divider is allowed at 1.2V and above (and we do need this
+ * divider to reach top sbus frequencies from high frequency source).
+ *
+ * (b) since sbus is a shared bus, and its frequency is set to the highest
+ * enabled shared_bus_user clock, the target rate should be rounded up divider
+ * ladder (if max limit allows it) - for pll_div and peripheral_div common is
+ * rounding down - special case again.
+ *
+ * Note that final rate is trimmed (not rounded up) to avoid spiraling up in
+ * recursive calls. Lost 1Hz is added in tegra3_sbus_cmplx_set_rate before
+ * actually setting divider rate.
+ */
+static unsigned long sclk_high_2_5_rate;
+static bool sclk_high_2_5_valid;
+
+static long tegra3_sbus_cmplx_round_rate(struct clk *c, unsigned long rate)
+{
+ int i, divider;
+ unsigned long source_rate, round_rate;
+ struct clk *new_parent;
+
+ rate = max(rate, c->min_rate);
+
+ if (!sclk_high_2_5_rate) {
+ source_rate = clk_get_rate(c->u.system.sclk_high->parent);
+ sclk_high_2_5_rate = 2 * source_rate / 5;
+ i = tegra_dvfs_predict_millivolts(c, sclk_high_2_5_rate);
+ if (!IS_ERR_VALUE(i) && (i >= 1200) &&
+ (sclk_high_2_5_rate <= c->max_rate))
+ sclk_high_2_5_valid = true;
+ }
+
+ new_parent = (rate <= c->u.system.threshold) ?
+ c->u.system.sclk_low : c->u.system.sclk_high;
+ source_rate = clk_get_rate(new_parent->parent);
+
+ divider = clk_div71_get_divider(source_rate, rate,
+ new_parent->flags | DIV_U71_INT, ROUND_DIVIDER_DOWN);
+ if (divider < 0)
+ return divider;
+
+ round_rate = source_rate * 2 / (divider + 2);
+ if (round_rate > c->max_rate) {
+ divider += 2;
+ round_rate = source_rate * 2 / (divider + 2);
+ }
+
+ if (new_parent == c->u.system.sclk_high) {
+ /* Check if 1 : 2.5 ratio provides better approximation */
+ if (sclk_high_2_5_valid) {
+ if (((sclk_high_2_5_rate < round_rate) &&
+ (sclk_high_2_5_rate >= rate)) ||
+ ((round_rate < sclk_high_2_5_rate) &&
+ (round_rate < rate)))
+ round_rate = sclk_high_2_5_rate;
+ }
+
+ if (round_rate <= c->u.system.threshold)
+ round_rate = c->u.system.threshold;
+ }
+ return round_rate;
+}
+
+static int tegra3_sbus_cmplx_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+ struct clk *new_parent;
+
+ /* - select the appropriate sclk parent
+ - keep hclk at the same rate as sclk
+ - set pclk at 1:2 rate of hclk unless pclk minimum is violated,
+ in the latter case switch to 1:1 ratio */
+
+ if (rate >= c->u.system.pclk->min_rate * 2) {
+ ret = clk_set_div(c->u.system.pclk, 2);
+ if (ret) {
+ pr_err("Failed to set 1 : 2 pclk divider\n");
+ return ret;
+ }
+ }
+
+ new_parent = (rate <= c->u.system.threshold) ?
+ c->u.system.sclk_low : c->u.system.sclk_high;
+
+ ret = clk_set_rate(new_parent, rate + 1);
+ if (ret) {
+ pr_err("Failed to set sclk source %s to %lu\n",
+ new_parent->name, rate);
+ return ret;
+ }
+
+ if (new_parent != clk_get_parent(c->parent)) {
+ ret = clk_set_parent(c->parent, new_parent);
+ if (ret) {
+ pr_err("Failed to switch sclk source to %s\n",
+ new_parent->name);
+ return ret;
+ }
+ }
+
+ if (rate < c->u.system.pclk->min_rate * 2) {
+ ret = clk_set_div(c->u.system.pclk, 1);
+ if (ret) {
+ pr_err("Failed to set 1 : 1 pclk divider\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct clk_ops tegra_sbus_cmplx_ops = {
+ .init = tegra3_sbus_cmplx_init,
+ .set_rate = tegra3_sbus_cmplx_set_rate,
+ .round_rate = tegra3_sbus_cmplx_round_rate,
+ .shared_bus_update = tegra3_clk_shared_bus_update,
+};
+
+/* Blink output functions */
+
+static void tegra3_blink_clk_init(struct clk *c)
+{
+ u32 val;
+
+ val = pmc_readl(PMC_CTRL);
+ c->state = (val & PMC_CTRL_BLINK_ENB) ? ON : OFF;
+ c->mul = 1;
+ val = pmc_readl(c->reg);
+
+ if (val & PMC_BLINK_TIMER_ENB) {
+ unsigned int on_off;
+
+ on_off = (val >> PMC_BLINK_TIMER_DATA_ON_SHIFT) &
+ PMC_BLINK_TIMER_DATA_ON_MASK;
+ val >>= PMC_BLINK_TIMER_DATA_OFF_SHIFT;
+ val &= PMC_BLINK_TIMER_DATA_OFF_MASK;
+ on_off += val;
+ /* each tick in the blink timer is 4 32KHz clocks */
+ c->div = on_off * 4;
+ } else {
+ c->div = 1;
+ }
+}
+
+static int tegra3_blink_clk_enable(struct clk *c)
+{
+ u32 val;
+
+ val = pmc_readl(PMC_DPD_PADS_ORIDE);
+ pmc_writel(val | PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE);
+
+ val = pmc_readl(PMC_CTRL);
+ pmc_writel(val | PMC_CTRL_BLINK_ENB, PMC_CTRL);
+
+ return 0;
+}
+
+static void tegra3_blink_clk_disable(struct clk *c)
+{
+ u32 val;
+
+ val = pmc_readl(PMC_CTRL);
+ pmc_writel(val & ~PMC_CTRL_BLINK_ENB, PMC_CTRL);
+
+ val = pmc_readl(PMC_DPD_PADS_ORIDE);
+ pmc_writel(val & ~PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE);
+}
+
+static int tegra3_blink_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ if (rate >= parent_rate) {
+ c->div = 1;
+ pmc_writel(0, c->reg);
+ } else {
+ unsigned int on_off;
+ u32 val;
+
+ on_off = DIV_ROUND_UP(parent_rate / 8, rate);
+ c->div = on_off * 8;
+
+ val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) <<
+ PMC_BLINK_TIMER_DATA_ON_SHIFT;
+ on_off &= PMC_BLINK_TIMER_DATA_OFF_MASK;
+ on_off <<= PMC_BLINK_TIMER_DATA_OFF_SHIFT;
+ val |= on_off;
+ val |= PMC_BLINK_TIMER_ENB;
+ pmc_writel(val, c->reg);
+ }
+
+ return 0;
+}
+
+static struct clk_ops tegra_blink_clk_ops = {
+ .init = &tegra3_blink_clk_init,
+ .enable = &tegra3_blink_clk_enable,
+ .disable = &tegra3_blink_clk_disable,
+ .set_rate = &tegra3_blink_clk_set_rate,
+};
+
+/* PLL Functions */
+static int tegra3_pll_clk_wait_for_lock(struct clk *c, u32 lock_reg, u32 lock_bit)
+{
+#if USE_PLL_LOCK_BITS
+ int i;
+ for (i = 0; i < c->u.pll.lock_delay; i++) {
+ if (clk_readl(lock_reg) & lock_bit) {
+ udelay(PLL_POST_LOCK_DELAY);
+ return 0;
+ }
+ udelay(2); /* timeout = 2 * lock time */
+ }
+ pr_err("Timed out waiting for lock bit on pll %s", c->name);
+ return -1;
+#endif
+ udelay(c->u.pll.lock_delay);
+
+ return 0;
+}
+
+
+static void tegra3_utmi_param_configure(struct clk *c)
+{
+ u32 reg;
+ int i;
+ unsigned long main_rate =
+ clk_get_rate(c->parent->parent);
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (main_rate == utmi_parameters[i].osc_frequency) {
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(utmi_parameters)) {
+ pr_err("%s: Unexpected main rate %lu\n", __func__, main_rate);
+ return;
+ }
+
+ reg = clk_readl(UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL stable and active counts */
+ /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */
+ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ reg |= UTMIP_PLL_CFG2_STABLE_COUNT(
+ utmi_parameters[i].stable_count);
+
+ reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(
+ utmi_parameters[i].active_delay_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+
+ clk_writel(reg, UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ reg = clk_readl(UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(
+ utmi_parameters[i].enable_delay_count);
+
+ reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(
+ utmi_parameters[i].xtal_freq_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+
+ clk_writel(reg, UTMIP_PLL_CFG1);
+}
+
+static void tegra3_pll_clk_init(struct clk *c)
+{
+ u32 val = clk_readl(c->reg + PLL_BASE);
+
+ c->state = (val & PLL_BASE_ENABLE) ? ON : OFF;
+
+ if (c->flags & PLL_FIXED && !(val & PLL_BASE_OVERRIDE)) {
+ const struct clk_pll_freq_table *sel;
+ unsigned long input_rate = clk_get_rate(c->parent);
+ for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) {
+ if (sel->input_rate == input_rate &&
+ sel->output_rate == c->u.pll.fixed_rate) {
+ c->mul = sel->n;
+ c->div = sel->m * sel->p;
+ return;
+ }
+ }
+ pr_err("Clock %s has unknown fixed frequency\n", c->name);
+ BUG();
+ } else if (val & PLL_BASE_BYPASS) {
+ c->mul = 1;
+ c->div = 1;
+ } else {
+ c->mul = (val & PLL_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT;
+ c->div = (val & PLL_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT;
+ if (c->flags & PLLU)
+ c->div *= (val & PLLU_BASE_POST_DIV) ? 1 : 2;
+ else
+ c->div *= (0x1 << ((val & PLL_BASE_DIVP_MASK) >>
+ PLL_BASE_DIVP_SHIFT));
+ if (c->flags & PLL_FIXED) {
+ unsigned long rate = clk_get_rate_locked(c);
+ BUG_ON(rate != c->u.pll.fixed_rate);
+ }
+ }
+
+ if (c->flags & PLLU) {
+ tegra3_utmi_param_configure(c);
+ }
+}
+
+static int tegra3_pll_clk_enable(struct clk *c)
+{
+ u32 val;
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+#if USE_PLL_LOCK_BITS
+ val = clk_readl(c->reg + PLL_MISC(c));
+ val |= PLL_MISC_LOCK_ENABLE(c);
+ clk_writel(val, c->reg + PLL_MISC(c));
+#endif
+ val = clk_readl(c->reg + PLL_BASE);
+ val &= ~PLL_BASE_BYPASS;
+ val |= PLL_BASE_ENABLE;
+ clk_writel(val, c->reg + PLL_BASE);
+
+ if (c->flags & PLLM) {
+ val = pmc_readl(PMC_PLLP_WB0_OVERRIDE);
+ val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
+ pmc_writel(val, PMC_PLLP_WB0_OVERRIDE);
+ }
+
+ tegra3_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLL_BASE_LOCK);
+
+ return 0;
+}
+
+static void tegra3_pll_clk_disable(struct clk *c)
+{
+ u32 val;
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ val = clk_readl(c->reg);
+ val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ clk_writel(val, c->reg);
+
+ if (c->flags & PLLM) {
+ val = pmc_readl(PMC_PLLP_WB0_OVERRIDE);
+ val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
+ pmc_writel(val, PMC_PLLP_WB0_OVERRIDE);
+ }
+}
+
+static int tegra3_pll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ u32 val, p_div, old_base;
+ unsigned long input_rate;
+ const struct clk_pll_freq_table *sel;
+ struct clk_pll_freq_table cfg;
+
+ pr_debug("%s: %s %lu\n", __func__, c->name, rate);
+
+ if (c->flags & PLL_FIXED) {
+ int ret = 0;
+ if (rate != c->u.pll.fixed_rate) {
+ pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
+ __func__, c->name, c->u.pll.fixed_rate, rate);
+ ret = -EINVAL;
+ }
+ return ret;
+ }
+
+ if (c->flags & PLLM) {
+ if (rate != clk_get_rate_locked(c)) {
+ pr_err("%s: Can not change memory %s rate in flight\n",
+ __func__, c->name);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ p_div = 0;
+ input_rate = clk_get_rate(c->parent);
+
+ /* Check if the target rate is tabulated */
+ for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) {
+ if (sel->input_rate == input_rate && sel->output_rate == rate) {
+ if (c->flags & PLLU) {
+ BUG_ON(sel->p < 1 || sel->p > 2);
+ if (sel->p == 1)
+ p_div = PLLU_BASE_POST_DIV;
+ } else {
+ BUG_ON(sel->p < 1);
+ for (val = sel->p; val > 1; val >>= 1, p_div++);
+ p_div <<= PLL_BASE_DIVP_SHIFT;
+ }
+ break;
+ }
+ }
+
+ /* Configure out-of-table rate */
+ if (sel->input_rate == 0) {
+ unsigned long cfreq;
+ BUG_ON(c->flags & PLLU);
+ sel = &cfg;
+
+ switch (input_rate) {
+ case 12000000:
+ case 26000000:
+ cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2000000;
+ break;
+ case 13000000:
+ cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2600000;
+ break;
+ case 16800000:
+ case 19200000:
+ cfreq = (rate <= 1200000 * 1000) ? 1200000 : 2400000;
+ break;
+ default:
+ pr_err("%s: Unexpected reference rate %lu\n",
+ __func__, input_rate);
+ BUG();
+ }
+
+ /* Raise VCO to guarantee 0.5% accuracy */
+ for (cfg.output_rate = rate; cfg.output_rate < 200 * cfreq;
+ cfg.output_rate <<= 1, p_div++);
+
+ cfg.p = 0x1 << p_div;
+ cfg.m = input_rate / cfreq;
+ cfg.n = cfg.output_rate / cfreq;
+ cfg.cpcon = OUT_OF_TABLE_CPCON;
+
+ if ((cfg.m > (PLL_BASE_DIVM_MASK >> PLL_BASE_DIVM_SHIFT)) ||
+ (cfg.n > (PLL_BASE_DIVN_MASK >> PLL_BASE_DIVN_SHIFT)) ||
+ (p_div > (PLL_BASE_DIVP_MASK >> PLL_BASE_DIVP_SHIFT)) ||
+ (cfg.output_rate > c->u.pll.vco_max)) {
+ pr_err("%s: Failed to set %s out-of-table rate %lu\n",
+ __func__, c->name, rate);
+ return -EINVAL;
+ }
+ p_div <<= PLL_BASE_DIVP_SHIFT;
+ }
+
+ c->mul = sel->n;
+ c->div = sel->m * sel->p;
+
+ old_base = val = clk_readl(c->reg + PLL_BASE);
+ val &= ~(PLL_BASE_DIVM_MASK | PLL_BASE_DIVN_MASK |
+ ((c->flags & PLLU) ? PLLU_BASE_POST_DIV : PLL_BASE_DIVP_MASK));
+ val |= (sel->m << PLL_BASE_DIVM_SHIFT) |
+ (sel->n << PLL_BASE_DIVN_SHIFT) | p_div;
+ if (val == old_base)
+ return 0;
+
+ if (c->state == ON) {
+ tegra3_pll_clk_disable(c);
+ val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ }
+ clk_writel(val, c->reg + PLL_BASE);
+
+ if (c->flags & PLL_HAS_CPCON) {
+ val = clk_readl(c->reg + PLL_MISC(c));
+ val &= ~PLL_MISC_CPCON_MASK;
+ val |= sel->cpcon << PLL_MISC_CPCON_SHIFT;
+ if (c->flags & (PLLU | PLLD)) {
+ val &= ~PLL_MISC_LFCON_MASK;
+ if (sel->n >= PLLDU_LFCON_SET_DIVN)
+ val |= 0x1 << PLL_MISC_LFCON_SHIFT;
+ } else if (c->flags & (PLLX | PLLM)) {
+ val &= ~(0x1 << PLL_MISC_DCCON_SHIFT);
+ if (rate >= (c->u.pll.vco_max >> 1))
+ val |= 0x1 << PLL_MISC_DCCON_SHIFT;
+ }
+ clk_writel(val, c->reg + PLL_MISC(c));
+ }
+
+ if (c->state == ON)
+ tegra3_pll_clk_enable(c);
+
+ return 0;
+}
+
+static struct clk_ops tegra_pll_ops = {
+ .init = tegra3_pll_clk_init,
+ .enable = tegra3_pll_clk_enable,
+ .disable = tegra3_pll_clk_disable,
+ .set_rate = tegra3_pll_clk_set_rate,
+};
+
+static int
+tegra3_plld_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
+{
+ u32 val, mask, reg;
+
+ switch (p) {
+ case TEGRA_CLK_PLLD_CSI_OUT_ENB:
+ mask = PLLD_BASE_CSI_CLKENABLE;
+ reg = c->reg + PLL_BASE;
+ break;
+ case TEGRA_CLK_PLLD_DSI_OUT_ENB:
+ mask = PLLD_MISC_DSI_CLKENABLE;
+ reg = c->reg + PLL_MISC(c);
+ break;
+ case TEGRA_CLK_PLLD_MIPI_MUX_SEL:
+ if (!(c->flags & PLL_ALT_MISC_REG)) {
+ mask = PLLD_BASE_DSIB_MUX_MASK;
+ reg = c->reg + PLL_BASE;
+ break;
+ }
+ /* fall through - error since PLLD2 does not have MUX_SEL control */
+ default:
+ return -EINVAL;
+ }
+
+ val = clk_readl(reg);
+ if (setting)
+ val |= mask;
+ else
+ val &= ~mask;
+ clk_writel(val, reg);
+ return 0;
+}
+
+static struct clk_ops tegra_plld_ops = {
+ .init = tegra3_pll_clk_init,
+ .enable = tegra3_pll_clk_enable,
+ .disable = tegra3_pll_clk_disable,
+ .set_rate = tegra3_pll_clk_set_rate,
+ .clk_cfg_ex = tegra3_plld_clk_cfg_ex,
+};
+
+static void tegra3_plle_clk_init(struct clk *c)
+{
+ u32 val;
+
+ val = clk_readl(PLLE_AUX);
+ c->parent = (val & PLLE_AUX_PLLP_SEL) ?
+ tegra_get_clock_by_name("pll_p") :
+ tegra_get_clock_by_name("pll_ref");
+
+ val = clk_readl(c->reg + PLL_BASE);
+ c->state = (val & PLLE_BASE_ENABLE) ? ON : OFF;
+ c->mul = (val & PLLE_BASE_DIVN_MASK) >> PLLE_BASE_DIVN_SHIFT;
+ c->div = (val & PLLE_BASE_DIVM_MASK) >> PLLE_BASE_DIVM_SHIFT;
+ c->div *= (val & PLLE_BASE_DIVP_MASK) >> PLLE_BASE_DIVP_SHIFT;
+}
+
+static void tegra3_plle_clk_disable(struct clk *c)
+{
+ u32 val;
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ val = clk_readl(c->reg + PLL_BASE);
+ val &= ~(PLLE_BASE_CML_ENABLE | PLLE_BASE_ENABLE);
+ clk_writel(val, c->reg + PLL_BASE);
+}
+
+static void tegra3_plle_training(struct clk *c)
+{
+ u32 val;
+
+ /* PLLE is already disabled, and setup cleared;
+ * create falling edge on PLLE IDDQ input */
+ val = pmc_readl(PMC_SATA_PWRGT);
+ val |= PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
+ pmc_writel(val, PMC_SATA_PWRGT);
+
+ val = pmc_readl(PMC_SATA_PWRGT);
+ val |= PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL;
+ pmc_writel(val, PMC_SATA_PWRGT);
+
+ val = pmc_readl(PMC_SATA_PWRGT);
+ val &= ~PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
+ pmc_writel(val, PMC_SATA_PWRGT);
+
+ do {
+ val = clk_readl(c->reg + PLL_MISC(c));
+ } while (!(val & PLLE_MISC_READY));
+}
+
+static int tegra3_plle_configure(struct clk *c, bool force_training)
+{
+ u32 val;
+ const struct clk_pll_freq_table *sel;
+ unsigned long rate = c->u.pll.fixed_rate;
+ unsigned long input_rate = clk_get_rate(c->parent);
+
+ for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) {
+ if (sel->input_rate == input_rate && sel->output_rate == rate)
+ break;
+ }
+
+ if (sel->input_rate == 0)
+ return -ENOSYS;
+
+ /* disable PLLE, clear setup fiels */
+ tegra3_plle_clk_disable(c);
+
+ val = clk_readl(c->reg + PLL_MISC(c));
+ val &= ~(PLLE_MISC_LOCK_ENABLE | PLLE_MISC_SETUP_MASK);
+ clk_writel(val, c->reg + PLL_MISC(c));
+
+ /* training */
+ val = clk_readl(c->reg + PLL_MISC(c));
+ if (force_training || (!(val & PLLE_MISC_READY)))
+ tegra3_plle_training(c);
+
+ /* configure dividers, setup, disable SS */
+ val = clk_readl(c->reg + PLL_BASE);
+ val &= ~PLLE_BASE_DIV_MASK;
+ val |= PLLE_BASE_DIV(sel->m, sel->n, sel->p, sel->cpcon);
+ clk_writel(val, c->reg + PLL_BASE);
+ c->mul = sel->n;
+ c->div = sel->m * sel->p;
+
+ val = clk_readl(c->reg + PLL_MISC(c));
+ val |= PLLE_MISC_SETUP_VALUE;
+ val |= PLLE_MISC_LOCK_ENABLE;
+ clk_writel(val, c->reg + PLL_MISC(c));
+
+ val = clk_readl(PLLE_SS_CTRL);
+ val |= PLLE_SS_DISABLE;
+ clk_writel(val, PLLE_SS_CTRL);
+
+ /* enable and lock PLLE*/
+ val = clk_readl(c->reg + PLL_BASE);
+ val |= (PLLE_BASE_CML_ENABLE | PLLE_BASE_ENABLE);
+ clk_writel(val, c->reg + PLL_BASE);
+
+ tegra3_pll_clk_wait_for_lock(c, c->reg + PLL_MISC(c), PLLE_MISC_LOCK);
+
+#if USE_PLLE_SS
+ /* configure spread spectrum coefficients */
+ /* FIXME: coefficients for 216MHZ input? */
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ if (input_rate == 12000000)
+#endif
+ {
+ val = clk_readl(PLLE_SS_CTRL);
+ val &= ~(PLLE_SS_COEFFICIENTS_MASK | PLLE_SS_DISABLE);
+ val |= PLLE_SS_COEFFICIENTS_12MHZ;
+ clk_writel(val, PLLE_SS_CTRL);
+ }
+#endif
+ return 0;
+}
+
+static int tegra3_plle_clk_enable(struct clk *c)
+{
+ pr_debug("%s on clock %s\n", __func__, c->name);
+ return tegra3_plle_configure(c, !c->set);
+}
+
+static struct clk_ops tegra_plle_ops = {
+ .init = tegra3_plle_clk_init,
+ .enable = tegra3_plle_clk_enable,
+ .disable = tegra3_plle_clk_disable,
+};
+
+/* Clock divider ops */
+static void tegra3_pll_div_clk_init(struct clk *c)
+{
+ if (c->flags & DIV_U71) {
+ u32 divu71;
+ u32 val = clk_readl(c->reg);
+ val >>= c->reg_shift;
+ c->state = (val & PLL_OUT_CLKEN) ? ON : OFF;
+ if (!(val & PLL_OUT_RESET_DISABLE))
+ c->state = OFF;
+
+ divu71 = (val & PLL_OUT_RATIO_MASK) >> PLL_OUT_RATIO_SHIFT;
+ c->div = (divu71 + 2);
+ c->mul = 2;
+ } else if (c->flags & DIV_2) {
+ c->state = ON;
+ if (c->flags & (PLLD | PLLX)) {
+ c->div = 2;
+ c->mul = 1;
+ }
+ else
+ BUG();
+ } else {
+ c->state = ON;
+ c->div = 1;
+ c->mul = 1;
+ }
+}
+
+static int tegra3_pll_div_clk_enable(struct clk *c)
+{
+ u32 val;
+ u32 new_val;
+
+ pr_debug("%s: %s\n", __func__, c->name);
+ if (c->flags & DIV_U71) {
+ val = clk_readl(c->reg);
+ new_val = val >> c->reg_shift;
+ new_val &= 0xFFFF;
+
+ new_val |= PLL_OUT_CLKEN | PLL_OUT_RESET_DISABLE;
+
+ val &= ~(0xFFFF << c->reg_shift);
+ val |= new_val << c->reg_shift;
+ clk_writel_delay(val, c->reg);
+ return 0;
+ } else if (c->flags & DIV_2) {
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void tegra3_pll_div_clk_disable(struct clk *c)
+{
+ u32 val;
+ u32 new_val;
+
+ pr_debug("%s: %s\n", __func__, c->name);
+ if (c->flags & DIV_U71) {
+ val = clk_readl(c->reg);
+ new_val = val >> c->reg_shift;
+ new_val &= 0xFFFF;
+
+ new_val &= ~(PLL_OUT_CLKEN | PLL_OUT_RESET_DISABLE);
+
+ val &= ~(0xFFFF << c->reg_shift);
+ val |= new_val << c->reg_shift;
+ clk_writel_delay(val, c->reg);
+ }
+}
+
+static int tegra3_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ u32 val;
+ u32 new_val;
+ int divider_u71;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+
+ pr_debug("%s: %s %lu\n", __func__, c->name, rate);
+ if (c->flags & DIV_U71) {
+ divider_u71 = clk_div71_get_divider(
+ parent_rate, rate, c->flags, ROUND_DIVIDER_UP);
+ if (divider_u71 >= 0) {
+ val = clk_readl(c->reg);
+ new_val = val >> c->reg_shift;
+ new_val &= 0xFFFF;
+ if (c->flags & DIV_U71_FIXED)
+ new_val |= PLL_OUT_OVERRIDE;
+ new_val &= ~PLL_OUT_RATIO_MASK;
+ new_val |= divider_u71 << PLL_OUT_RATIO_SHIFT;
+
+ val &= ~(0xFFFF << c->reg_shift);
+ val |= new_val << c->reg_shift;
+ clk_writel_delay(val, c->reg);
+ c->div = divider_u71 + 2;
+ c->mul = 2;
+ return 0;
+ }
+ } else if (c->flags & DIV_2)
+ return clk_set_rate(c->parent, rate * 2);
+
+ return -EINVAL;
+}
+
+static long tegra3_pll_div_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ pr_debug("%s: %s %lu\n", __func__, c->name, rate);
+
+ if (c->flags & DIV_U71) {
+ divider = clk_div71_get_divider(
+ parent_rate, rate, c->flags, ROUND_DIVIDER_UP);
+ if (divider < 0)
+ return divider;
+ return DIV_ROUND_UP(parent_rate * 2, divider + 2);
+ } else if (c->flags & DIV_2)
+ /* no rounding - fixed DIV_2 dividers pass rate to parent PLL */
+ return rate;
+
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_pll_div_ops = {
+ .init = tegra3_pll_div_clk_init,
+ .enable = tegra3_pll_div_clk_enable,
+ .disable = tegra3_pll_div_clk_disable,
+ .set_rate = tegra3_pll_div_clk_set_rate,
+ .round_rate = tegra3_pll_div_clk_round_rate,
+};
+
+/* Periph clk ops */
+static inline u32 periph_clk_source_mask(struct clk *c)
+{
+ if (c->flags & MUX8)
+ return 7 << 29;
+ else if (c->flags & MUX_PWM)
+ return 3 << 28;
+ else if (c->flags & MUX_CLK_OUT)
+ return 3 << (c->u.periph.clk_num + 4);
+ else if (c->flags & PLLD)
+ return PLLD_BASE_DSIB_MUX_MASK;
+ else
+ return 3 << 30;
+}
+
+static inline u32 periph_clk_source_shift(struct clk *c)
+{
+ if (c->flags & MUX8)
+ return 29;
+ else if (c->flags & MUX_PWM)
+ return 28;
+ else if (c->flags & MUX_CLK_OUT)
+ return c->u.periph.clk_num + 4;
+ else if (c->flags & PLLD)
+ return PLLD_BASE_DSIB_MUX_SHIFT;
+ else
+ return 30;
+}
+
+static void tegra3_periph_clk_init(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ const struct clk_mux_sel *mux = 0;
+ const struct clk_mux_sel *sel;
+ if (c->flags & MUX) {
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (((val & periph_clk_source_mask(c)) >>
+ periph_clk_source_shift(c)) == sel->value)
+ mux = sel;
+ }
+ BUG_ON(!mux);
+
+ c->parent = mux->input;
+ } else {
+ c->parent = c->inputs[0].input;
+ }
+
+ if (c->flags & DIV_U71) {
+ u32 divu71 = val & PERIPH_CLK_SOURCE_DIVU71_MASK;
+ if (c->flags & DIV_U71_IDLE) {
+ val &= ~(PERIPH_CLK_SOURCE_DIVU71_MASK <<
+ PERIPH_CLK_SOURCE_DIVIDLE_SHIFT);
+ val |= (PERIPH_CLK_SOURCE_DIVIDLE_VAL <<
+ PERIPH_CLK_SOURCE_DIVIDLE_SHIFT);
+ clk_writel(val, c->reg);
+ }
+ c->div = divu71 + 2;
+ c->mul = 2;
+ } else if (c->flags & DIV_U151) {
+ u32 divu151 = val & PERIPH_CLK_SOURCE_DIVU16_MASK;
+ if ((c->flags & DIV_U151_UART) &&
+ (!(val & PERIPH_CLK_UART_DIV_ENB))) {
+ divu151 = 0;
+ }
+ c->div = divu151 + 2;
+ c->mul = 2;
+ } else if (c->flags & DIV_U16) {
+ u32 divu16 = val & PERIPH_CLK_SOURCE_DIVU16_MASK;
+ c->div = divu16 + 1;
+ c->mul = 1;
+ } else {
+ c->div = 1;
+ c->mul = 1;
+ }
+
+ c->state = ON;
+ if (!(clk_readl(PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_BIT(c)))
+ c->state = OFF;
+ if (!(c->flags & PERIPH_NO_RESET))
+ if (clk_readl(PERIPH_CLK_TO_RST_REG(c)) & PERIPH_CLK_TO_BIT(c))
+ c->state = OFF;
+}
+
+static int tegra3_periph_clk_enable(struct clk *c)
+{
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ tegra_periph_clk_enable_refcount[c->u.periph.clk_num]++;
+ if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] > 1)
+ return 0;
+
+ clk_writel_delay(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_ENB_SET_REG(c));
+ if (!(c->flags & PERIPH_NO_RESET) && !(c->flags & PERIPH_MANUAL_RESET)) {
+ if (clk_readl(PERIPH_CLK_TO_RST_REG(c)) & PERIPH_CLK_TO_BIT(c)) {
+ udelay(5); /* reset propagation delay */
+ clk_writel(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_RST_CLR_REG(c));
+ }
+ }
+ return 0;
+}
+
+static void tegra3_periph_clk_disable(struct clk *c)
+{
+ unsigned long val;
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ if (c->refcnt)
+ tegra_periph_clk_enable_refcount[c->u.periph.clk_num]--;
+
+ if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0) {
+ /* If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock*/
+ if (c->flags & PERIPH_ON_APB)
+ val = chipid_readl();
+
+ clk_writel_delay(
+ PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_ENB_CLR_REG(c));
+ }
+}
+
+static void tegra3_periph_clk_reset(struct clk *c, bool assert)
+{
+ unsigned long val;
+ pr_debug("%s %s on clock %s\n", __func__,
+ assert ? "assert" : "deassert", c->name);
+
+ if (!(c->flags & PERIPH_NO_RESET)) {
+ if (assert) {
+ /* If peripheral is in the APB bus then read the APB
+ * bus to flush the write operation in apb bus. This
+ * will avoid the peripheral access after disabling
+ * clock */
+ if (c->flags & PERIPH_ON_APB)
+ val = chipid_readl();
+
+ clk_writel(PERIPH_CLK_TO_BIT(c),
+ PERIPH_CLK_TO_RST_SET_REG(c));
+ } else
+ clk_writel(PERIPH_CLK_TO_BIT(c),
+ PERIPH_CLK_TO_RST_CLR_REG(c));
+ }
+}
+
+static int tegra3_periph_clk_set_parent(struct clk *c, struct clk *p)
+{
+ u32 val;
+ const struct clk_mux_sel *sel;
+ pr_debug("%s: %s %s\n", __func__, c->name, p->name);
+
+ if (!(c->flags & MUX))
+ return (p == c->parent) ? 0 : (-EINVAL);
+
+ if (!tegra3_clk_is_parent_allowed(c, p))
+ return -EINVAL;
+
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->input == p) {
+ val = clk_readl(c->reg);
+ val &= ~periph_clk_source_mask(c);
+ val |= (sel->value << periph_clk_source_shift(c));
+
+ if (c->refcnt)
+ clk_enable(p);
+
+ clk_writel_delay(val, c->reg);
+
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tegra3_periph_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ u32 val;
+ int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+
+ if (c->flags & DIV_U71) {
+ divider = clk_div71_get_divider(
+ parent_rate, rate, c->flags, ROUND_DIVIDER_UP);
+ if (divider >= 0) {
+ val = clk_readl(c->reg);
+ val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK;
+ val |= divider;
+ clk_writel_delay(val, c->reg);
+ c->div = divider + 2;
+ c->mul = 2;
+ return 0;
+ }
+ } else if (c->flags & DIV_U151) {
+ divider = clk_div151_get_divider(
+ parent_rate, rate, c->flags, ROUND_DIVIDER_UP);
+ if (divider >= 0) {
+ val = clk_readl(c->reg);
+ val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK;
+ val |= divider;
+ if (c->flags & DIV_U151_UART) {
+ if (divider)
+ val |= PERIPH_CLK_UART_DIV_ENB;
+ else
+ val &= ~PERIPH_CLK_UART_DIV_ENB;
+ }
+ clk_writel_delay(val, c->reg);
+ c->div = divider + 2;
+ c->mul = 2;
+ return 0;
+ }
+ } else if (c->flags & DIV_U16) {
+ divider = clk_div16_get_divider(parent_rate, rate);
+ if (divider >= 0) {
+ val = clk_readl(c->reg);
+ val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK;
+ val |= divider;
+ clk_writel_delay(val, c->reg);
+ c->div = divider + 1;
+ c->mul = 1;
+ return 0;
+ }
+ } else if (parent_rate <= rate) {
+ c->div = 1;
+ c->mul = 1;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static long tegra3_periph_clk_round_rate(struct clk *c,
+ unsigned long rate)
+{
+ int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ pr_debug("%s: %s %lu\n", __func__, c->name, rate);
+
+ if (c->flags & DIV_U71) {
+ divider = clk_div71_get_divider(
+ parent_rate, rate, c->flags, ROUND_DIVIDER_UP);
+ if (divider < 0)
+ return divider;
+
+ return DIV_ROUND_UP(parent_rate * 2, divider + 2);
+ } else if (c->flags & DIV_U151) {
+ divider = clk_div151_get_divider(
+ parent_rate, rate, c->flags, ROUND_DIVIDER_UP);
+ if (divider < 0)
+ return divider;
+
+ return DIV_ROUND_UP(parent_rate * 2, divider + 2);
+ } else if (c->flags & DIV_U16) {
+ divider = clk_div16_get_divider(parent_rate, rate);
+ if (divider < 0)
+ return divider;
+ return DIV_ROUND_UP(parent_rate, divider + 1);
+ }
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_periph_clk_ops = {
+ .init = &tegra3_periph_clk_init,
+ .enable = &tegra3_periph_clk_enable,
+ .disable = &tegra3_periph_clk_disable,
+ .set_parent = &tegra3_periph_clk_set_parent,
+ .set_rate = &tegra3_periph_clk_set_rate,
+ .round_rate = &tegra3_periph_clk_round_rate,
+ .reset = &tegra3_periph_clk_reset,
+};
+
+
+/* Periph extended clock configuration ops */
+static int
+tegra3_vi_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
+{
+ if (p == TEGRA_CLK_VI_INP_SEL) {
+ u32 val = clk_readl(c->reg);
+ val &= ~PERIPH_CLK_VI_SEL_EX_MASK;
+ val |= (setting << PERIPH_CLK_VI_SEL_EX_SHIFT) &
+ PERIPH_CLK_VI_SEL_EX_MASK;
+ clk_writel(val, c->reg);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_vi_clk_ops = {
+ .init = &tegra3_periph_clk_init,
+ .enable = &tegra3_periph_clk_enable,
+ .disable = &tegra3_periph_clk_disable,
+ .set_parent = &tegra3_periph_clk_set_parent,
+ .set_rate = &tegra3_periph_clk_set_rate,
+ .round_rate = &tegra3_periph_clk_round_rate,
+ .clk_cfg_ex = &tegra3_vi_clk_cfg_ex,
+ .reset = &tegra3_periph_clk_reset,
+};
+
+static int
+tegra3_nand_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
+{
+ if (p == TEGRA_CLK_NAND_PAD_DIV2_ENB) {
+ u32 val = clk_readl(c->reg);
+ if (setting)
+ val |= PERIPH_CLK_NAND_DIV_EX_ENB;
+ else
+ val &= ~PERIPH_CLK_NAND_DIV_EX_ENB;
+ clk_writel(val, c->reg);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_nand_clk_ops = {
+ .init = &tegra3_periph_clk_init,
+ .enable = &tegra3_periph_clk_enable,
+ .disable = &tegra3_periph_clk_disable,
+ .set_parent = &tegra3_periph_clk_set_parent,
+ .set_rate = &tegra3_periph_clk_set_rate,
+ .round_rate = &tegra3_periph_clk_round_rate,
+ .clk_cfg_ex = &tegra3_nand_clk_cfg_ex,
+ .reset = &tegra3_periph_clk_reset,
+};
+
+
+static int
+tegra3_dtv_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
+{
+ if (p == TEGRA_CLK_DTV_INVERT) {
+ u32 val = clk_readl(c->reg);
+ if (setting)
+ val |= PERIPH_CLK_DTV_POLARITY_INV;
+ else
+ val &= ~PERIPH_CLK_DTV_POLARITY_INV;
+ clk_writel(val, c->reg);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_dtv_clk_ops = {
+ .init = &tegra3_periph_clk_init,
+ .enable = &tegra3_periph_clk_enable,
+ .disable = &tegra3_periph_clk_disable,
+ .set_parent = &tegra3_periph_clk_set_parent,
+ .set_rate = &tegra3_periph_clk_set_rate,
+ .round_rate = &tegra3_periph_clk_round_rate,
+ .clk_cfg_ex = &tegra3_dtv_clk_cfg_ex,
+ .reset = &tegra3_periph_clk_reset,
+};
+
+static int tegra3_dsib_clk_set_parent(struct clk *c, struct clk *p)
+{
+ const struct clk_mux_sel *sel;
+ struct clk *d = tegra_get_clock_by_name("pll_d");
+
+ pr_debug("%s: %s %s\n", __func__, c->name, p->name);
+
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->input == p) {
+ if (c->refcnt)
+ clk_enable(p);
+
+ /* The DSIB parent selection bit is in PLLD base
+ register - can not do direct r-m-w, must be
+ protected by PLLD lock */
+ tegra_clk_cfg_ex(
+ d, TEGRA_CLK_PLLD_MIPI_MUX_SEL, sel->value);
+
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_dsib_clk_ops = {
+ .init = &tegra3_periph_clk_init,
+ .enable = &tegra3_periph_clk_enable,
+ .disable = &tegra3_periph_clk_disable,
+ .set_parent = &tegra3_dsib_clk_set_parent,
+ .set_rate = &tegra3_periph_clk_set_rate,
+ .round_rate = &tegra3_periph_clk_round_rate,
+ .reset = &tegra3_periph_clk_reset,
+};
+
+/* pciex clock support only reset function */
+static struct clk_ops tegra_pciex_clk_ops = {
+ .reset = tegra3_periph_clk_reset,
+};
+
+/* Output clock ops */
+
+static DEFINE_SPINLOCK(clk_out_lock);
+
+static void tegra3_clk_out_init(struct clk *c)
+{
+ const struct clk_mux_sel *mux = 0;
+ const struct clk_mux_sel *sel;
+ u32 val = pmc_readl(c->reg);
+
+ c->state = (val & (0x1 << c->u.periph.clk_num)) ? ON : OFF;
+ c->mul = 1;
+ c->div = 1;
+
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (((val & periph_clk_source_mask(c)) >>
+ periph_clk_source_shift(c)) == sel->value)
+ mux = sel;
+ }
+ BUG_ON(!mux);
+ c->parent = mux->input;
+}
+
+static int tegra3_clk_out_enable(struct clk *c)
+{
+ u32 val;
+ unsigned long flags;
+
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ spin_lock_irqsave(&clk_out_lock, flags);
+ val = pmc_readl(c->reg);
+ val |= (0x1 << c->u.periph.clk_num);
+ pmc_writel(val, c->reg);
+ spin_unlock_irqrestore(&clk_out_lock, flags);
+
+ return 0;
+}
+
+static void tegra3_clk_out_disable(struct clk *c)
+{
+ u32 val;
+ unsigned long flags;
+
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ spin_lock_irqsave(&clk_out_lock, flags);
+ val = pmc_readl(c->reg);
+ val &= ~(0x1 << c->u.periph.clk_num);
+ pmc_writel(val, c->reg);
+ spin_unlock_irqrestore(&clk_out_lock, flags);
+}
+
+static int tegra3_clk_out_set_parent(struct clk *c, struct clk *p)
+{
+ u32 val;
+ unsigned long flags;
+ const struct clk_mux_sel *sel;
+
+ pr_debug("%s: %s %s\n", __func__, c->name, p->name);
+
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->input == p) {
+ if (c->refcnt)
+ clk_enable(p);
+
+ spin_lock_irqsave(&clk_out_lock, flags);
+ val = pmc_readl(c->reg);
+ val &= ~periph_clk_source_mask(c);
+ val |= (sel->value << periph_clk_source_shift(c));
+ pmc_writel(val, c->reg);
+ spin_unlock_irqrestore(&clk_out_lock, flags);
+
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_clk_out_ops = {
+ .init = &tegra3_clk_out_init,
+ .enable = &tegra3_clk_out_enable,
+ .disable = &tegra3_clk_out_disable,
+ .set_parent = &tegra3_clk_out_set_parent,
+};
+
+
+/* External memory controller clock ops */
+static void tegra3_emc_clk_init(struct clk *c)
+{
+ tegra3_periph_clk_init(c);
+ tegra_emc_dram_type_init(c);
+
+ /* On A01 limit EMC maximum rate to boot frequency;
+ starting with A02 full PLLM range should be supported */
+ if (tegra_get_revision() == TEGRA_REVISION_A01)
+ c->max_rate = clk_get_rate_locked(c);
+ else
+ c->max_rate = clk_get_rate(c->parent);
+}
+
+static long tegra3_emc_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ long new_rate = max(rate, c->min_rate);
+
+ new_rate = tegra_emc_round_rate(new_rate);
+ if (new_rate < 0)
+ new_rate = c->max_rate;
+
+ return new_rate;
+}
+
+static int tegra3_emc_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+ u32 div_value;
+ struct clk *p;
+
+ /* The tegra3 memory controller has an interlock with the clock
+ * block that allows memory shadowed registers to be updated,
+ * and then transfer them to the main registers at the same
+ * time as the clock update without glitches. During clock change
+ * operation both clock parent and divider may change simultaneously
+ * to achieve requested rate. */
+ p = tegra_emc_predict_parent(rate, &div_value);
+ div_value += 2; /* emc has fractional DIV_U71 divider */
+ if (!p)
+ return -EINVAL;
+
+ if (p == c->parent) {
+ if (div_value == c->div)
+ return 0;
+ } else if (c->refcnt)
+ clk_enable(p);
+
+ ret = tegra_emc_set_rate(rate);
+ if (ret < 0)
+ return ret;
+
+ if (p != c->parent) {
+ if(c->refcnt && c->parent)
+ clk_disable(c->parent);
+ clk_reparent(c, p);
+ }
+ c->div = div_value;
+ c->mul = 2;
+ return 0;
+}
+
+static struct clk_ops tegra_emc_clk_ops = {
+ .init = &tegra3_emc_clk_init,
+ .enable = &tegra3_periph_clk_enable,
+ .disable = &tegra3_periph_clk_disable,
+ .set_rate = &tegra3_emc_clk_set_rate,
+ .round_rate = &tegra3_emc_clk_round_rate,
+ .reset = &tegra3_periph_clk_reset,
+ .shared_bus_update = &tegra3_clk_shared_bus_update,
+};
+
+/* Clock doubler ops */
+static void tegra3_clk_double_init(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ c->mul = val & (0x1 << c->reg_shift) ? 1 : 2;
+ c->div = 1;
+ c->state = ON;
+ if (!(clk_readl(PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_BIT(c)))
+ c->state = OFF;
+};
+
+static int tegra3_clk_double_set_rate(struct clk *c, unsigned long rate)
+{
+ u32 val;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ if (rate == parent_rate) {
+ val = clk_readl(c->reg) | (0x1 << c->reg_shift);
+ clk_writel(val, c->reg);
+ c->mul = 1;
+ c->div = 1;
+ return 0;
+ } else if (rate == 2 * parent_rate) {
+ val = clk_readl(c->reg) & (~(0x1 << c->reg_shift));
+ clk_writel(val, c->reg);
+ c->mul = 2;
+ c->div = 1;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_clk_double_ops = {
+ .init = &tegra3_clk_double_init,
+ .enable = &tegra3_periph_clk_enable,
+ .disable = &tegra3_periph_clk_disable,
+ .set_rate = &tegra3_clk_double_set_rate,
+};
+
+/* Audio sync clock ops */
+static int tegra3_sync_source_set_rate(struct clk *c, unsigned long rate)
+{
+ c->rate = rate;
+ return 0;
+}
+
+static struct clk_ops tegra_sync_source_ops = {
+ .set_rate = &tegra3_sync_source_set_rate,
+};
+
+static void tegra3_audio_sync_clk_init(struct clk *c)
+{
+ int source;
+ const struct clk_mux_sel *sel;
+ u32 val = clk_readl(c->reg);
+ c->state = (val & AUDIO_SYNC_DISABLE_BIT) ? OFF : ON;
+ source = val & AUDIO_SYNC_SOURCE_MASK;
+ for (sel = c->inputs; sel->input != NULL; sel++)
+ if (sel->value == source)
+ break;
+ BUG_ON(sel->input == NULL);
+ c->parent = sel->input;
+}
+
+static int tegra3_audio_sync_clk_enable(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ clk_writel((val & (~AUDIO_SYNC_DISABLE_BIT)), c->reg);
+ return 0;
+}
+
+static void tegra3_audio_sync_clk_disable(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ clk_writel((val | AUDIO_SYNC_DISABLE_BIT), c->reg);
+}
+
+static int tegra3_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
+{
+ u32 val;
+ const struct clk_mux_sel *sel;
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->input == p) {
+ val = clk_readl(c->reg);
+ val &= ~AUDIO_SYNC_SOURCE_MASK;
+ val |= sel->value;
+
+ if (c->refcnt)
+ clk_enable(p);
+
+ clk_writel(val, c->reg);
+
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_audio_sync_clk_ops = {
+ .init = tegra3_audio_sync_clk_init,
+ .enable = tegra3_audio_sync_clk_enable,
+ .disable = tegra3_audio_sync_clk_disable,
+ .set_parent = tegra3_audio_sync_clk_set_parent,
+};
+
+/* cml0 (pcie), and cml1 (sata) clock ops */
+static void tegra3_cml_clk_init(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ c->state = val & (0x1 << c->u.periph.clk_num) ? ON : OFF;
+}
+
+static int tegra3_cml_clk_enable(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ val |= (0x1 << c->u.periph.clk_num);
+ clk_writel(val, c->reg);
+ return 0;
+}
+
+static void tegra3_cml_clk_disable(struct clk *c)
+{
+ u32 val = clk_readl(c->reg);
+ val &= ~(0x1 << c->u.periph.clk_num);
+ clk_writel(val, c->reg);
+}
+
+static struct clk_ops tegra_cml_clk_ops = {
+ .init = &tegra3_cml_clk_init,
+ .enable = &tegra3_cml_clk_enable,
+ .disable = &tegra3_cml_clk_disable,
+};
+
+
+/* cbus ops */
+/*
+ * Some clocks require dynamic re-locking of source PLL in order to
+ * achieve frequency scaling granularity that matches characterized
+ * core voltage steps. The cbus clock creates a shared bus that
+ * provides a virtual root for such clocks to hide and synchronize
+ * parent PLL re-locking as well as backup operations.
+*/
+
+static void tegra3_clk_cbus_init(struct clk *c)
+{
+ c->state = OFF;
+ c->set = true;
+}
+
+static int tegra3_clk_cbus_enable(struct clk *c)
+{
+ return 0;
+}
+
+static long tegra3_clk_cbus_round_rate(struct clk *c, unsigned long rate)
+{
+ int i;
+
+ if (!c->dvfs)
+ return rate;
+
+ /* update min now, since no dvfs table was available during init */
+ if (!c->min_rate)
+ c->min_rate = c->dvfs->freqs[0];
+
+ for (i = 0; i < (c->dvfs->num_freqs - 1); i++) {
+ unsigned long f = c->dvfs->freqs[i];
+ if (f >= rate)
+ break;
+ }
+ return c->dvfs->freqs[i];
+}
+
+static int cbus_switch_one(struct clk *c, struct clk *p, u32 div, bool abort)
+{
+ int ret = 0;
+
+ /* set new divider if it is bigger than the current one */
+ if (c->div < c->mul * div) {
+ ret = clk_set_div(c, div);
+ if (ret) {
+ pr_err("%s: failed to set %s clock divider %u: %d\n",
+ __func__, c->name, div, ret);
+ if (abort)
+ return ret;
+ }
+ }
+
+ ret = clk_set_parent(c, p);
+ if (ret) {
+ pr_err("%s: failed to set %s clock parent %s: %d\n",
+ __func__, c->name, p->name, ret);
+ if (abort)
+ return ret;
+ }
+
+ /* set new divider if it is smaller than the current one */
+ if (c->div > c->mul * div) {
+ ret = clk_set_div(c, div);
+ if (ret)
+ pr_err("%s: failed to set %s clock divider %u: %d\n",
+ __func__, c->name, div, ret);
+ }
+
+ return ret;
+}
+
+static int cbus_backup(struct clk *c)
+{
+ int ret;
+ struct clk *user;
+
+ list_for_each_entry(user, &c->shared_bus_list,
+ u.shared_bus_user.node) {
+ bool enabled = user->u.shared_bus_user.client &&
+ (user->u.shared_bus_user.enabled ||
+ user->u.shared_bus_user.client->refcnt);
+ if (enabled) {
+ ret = cbus_switch_one(user->u.shared_bus_user.client,
+ c->shared_bus_backup.input,
+ c->shared_bus_backup.value *
+ user->div, true);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void cbus_restore(struct clk *c)
+{
+ struct clk *user;
+
+ list_for_each_entry(user, &c->shared_bus_list,
+ u.shared_bus_user.node) {
+ bool back = user->u.shared_bus_user.client && (c->parent !=
+ user->u.shared_bus_user.client->parent);
+ if (back)
+ cbus_switch_one(user->u.shared_bus_user.client,
+ c->parent, c->div * user->div, false);
+ }
+}
+
+static int tegra3_clk_cbus_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+
+ if (rate == 0)
+ return 0;
+
+ ret = clk_enable(c->parent);
+ if (ret) {
+ pr_err("%s: failed to enable %s clock: %d\n",
+ __func__, c->name, ret);
+ return ret;
+ }
+
+ ret = cbus_backup(c);
+ if (ret)
+ goto out;
+
+ ret = clk_set_rate(c->parent, rate * c->div);
+ if (ret) {
+ pr_err("%s: failed to set %s clock rate %lu: %d\n",
+ __func__, c->name, rate, ret);
+ goto out;
+ }
+
+ cbus_restore(c);
+
+out:
+ clk_disable(c->parent);
+ return ret;
+}
+
+static struct clk_ops tegra_clk_cbus_ops = {
+ .init = tegra3_clk_cbus_init,
+ .enable = tegra3_clk_cbus_enable,
+ .set_rate = tegra3_clk_cbus_set_rate,
+ .round_rate = tegra3_clk_cbus_round_rate,
+ .shared_bus_update = tegra3_clk_shared_bus_update,
+};
+
+/* shared bus ops */
+/*
+ * Some clocks may have multiple downstream users that need to request a
+ * higher clock rate. Shared bus clocks provide a unique shared_bus_user
+ * clock to each user. The frequency of the bus is set to the highest
+ * enabled shared_bus_user clock, with a minimum value set by the
+ * shared bus.
+ */
+
+static noinline int shared_bus_set_rate(struct clk *bus, unsigned long rate,
+ unsigned long old_rate)
+{
+ int ret, mv, old_mv;
+ unsigned long bridge_rate = emc_bridge->u.shared_bus_user.rate;
+
+ /* If bridge is not needed (LPDDR2) just set bus rate */
+ if (tegra_emc_get_dram_type() == DRAM_TYPE_LPDDR2)
+ return clk_set_rate_locked(bus, rate);
+
+ mv = tegra_dvfs_predict_millivolts(bus, rate);
+ old_mv = tegra_dvfs_predict_millivolts(bus, old_rate);
+ if (IS_ERR_VALUE(mv) || IS_ERR_VALUE(old_mv)) {
+ pr_err("%s: Failed to predict %s voltage for %lu => %lu\n",
+ __func__, bus->name, old_rate, rate);
+ return -EINVAL;
+ }
+
+ /* emc bus: set bridge rate as intermediate step when crossing
+ * bridge threshold in any direction
+ */
+ if (bus->flags & PERIPH_EMC_ENB) {
+ if (((mv > TEGRA_EMC_BRIDGE_MVOLTS_MIN) &&
+ (old_rate < bridge_rate)) ||
+ ((old_mv > TEGRA_EMC_BRIDGE_MVOLTS_MIN) &&
+ (rate < bridge_rate))) {
+ ret = clk_set_rate_locked(bus, bridge_rate);
+ if (ret) {
+ pr_err("%s: Failed to set emc bridge rate %lu\n",
+ __func__, bridge_rate);
+ return ret;
+ }
+ }
+ return clk_set_rate_locked(bus, rate);
+ }
+
+ /* sbus and cbus: enable/disable emc bridge user when crossing voltage
+ * threshold up/down respectively; hence, emc rate is kept above the
+ * bridge rate as long as any sbus or cbus user requires high voltage
+ */
+ if ((mv > TEGRA_EMC_BRIDGE_MVOLTS_MIN) &&
+ (old_mv <= TEGRA_EMC_BRIDGE_MVOLTS_MIN)) {
+ ret = clk_enable(emc_bridge);
+ if (ret) {
+ pr_err("%s: Failed to enable emc bridge\n", __func__);
+ return ret;
+ }
+ }
+
+ ret = clk_set_rate_locked(bus, rate);
+ if (ret)
+ return ret;
+
+ if ((mv <= TEGRA_EMC_BRIDGE_MVOLTS_MIN) &&
+ (old_mv > TEGRA_EMC_BRIDGE_MVOLTS_MIN))
+ clk_disable(emc_bridge);
+
+ return 0;
+}
+
+static int tegra3_clk_shared_bus_update(struct clk *bus)
+{
+ struct clk *c;
+ unsigned long old_rate;
+ unsigned long rate = bus->min_rate;
+ unsigned long bw = 0;
+ unsigned long ceiling = bus->max_rate;
+
+ if (detach_shared_bus)
+ return 0;
+
+ list_for_each_entry(c, &bus->shared_bus_list,
+ u.shared_bus_user.node) {
+ /* Ignore requests from disabled users and from users with
+ fixed bus-to-client ratio */
+ if (c->u.shared_bus_user.enabled) {
+ switch (c->u.shared_bus_user.mode) {
+ case SHARED_BW:
+ bw += c->u.shared_bus_user.rate;
+ break;
+ case SHARED_CEILING:
+ ceiling = min(c->u.shared_bus_user.rate,
+ ceiling);
+ break;
+ case SHARED_AUTO:
+ case SHARED_FLOOR:
+ default:
+ rate = max(c->u.shared_bus_user.rate, rate);
+ }
+ }
+ }
+ rate = min(max(rate, bw), ceiling);
+
+ old_rate = clk_get_rate_locked(bus);
+ if (rate == old_rate)
+ return 0;
+
+ return shared_bus_set_rate(bus, rate, old_rate);
+};
+
+static void tegra_clk_shared_bus_init(struct clk *c)
+{
+ c->max_rate = c->parent->max_rate;
+ c->u.shared_bus_user.rate = c->parent->max_rate;
+ c->state = OFF;
+ c->set = true;
+
+ if (c->u.shared_bus_user.client_id) {
+ c->u.shared_bus_user.client =
+ tegra_get_clock_by_name(c->u.shared_bus_user.client_id);
+ if (!c->u.shared_bus_user.client) {
+ pr_err("%s: could not find clk %s\n", __func__,
+ c->u.shared_bus_user.client_id);
+ return;
+ }
+ c->u.shared_bus_user.client->flags |=
+ c->parent->flags & PERIPH_ON_CBUS;
+ c->flags |= c->parent->flags & PERIPH_ON_CBUS;
+ c->div = c->u.shared_bus_user.client_div ? : 1;
+ c->mul = 1;
+ }
+
+ list_add_tail(&c->u.shared_bus_user.node,
+ &c->parent->shared_bus_list);
+}
+
+static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
+{
+ c->u.shared_bus_user.rate = rate;
+ tegra_clk_shared_bus_update(c->parent);
+ return 0;
+}
+
+static long tegra_clk_shared_bus_round_rate(struct clk *c, unsigned long rate)
+{
+ /* auto user follow others, by itself it run at minimum bus rate */
+ if (c->u.shared_bus_user.mode == SHARED_AUTO)
+ rate = 0;
+
+ return clk_round_rate(c->parent, rate);
+}
+
+static int tegra_clk_shared_bus_enable(struct clk *c)
+{
+ c->u.shared_bus_user.enabled = true;
+ tegra_clk_shared_bus_update(c->parent);
+ if (c->u.shared_bus_user.client) {
+ return clk_enable(c->u.shared_bus_user.client);
+ }
+ return 0;
+}
+
+static void tegra_clk_shared_bus_disable(struct clk *c)
+{
+ if (c->u.shared_bus_user.client)
+ clk_disable(c->u.shared_bus_user.client);
+ c->u.shared_bus_user.enabled = false;
+ tegra_clk_shared_bus_update(c->parent);
+}
+
+static void tegra_clk_shared_bus_reset(struct clk *c, bool assert)
+{
+ if (c->u.shared_bus_user.client) {
+ if (c->u.shared_bus_user.client->ops &&
+ c->u.shared_bus_user.client->ops->reset)
+ c->u.shared_bus_user.client->ops->reset(
+ c->u.shared_bus_user.client, assert);
+ }
+}
+
+static struct clk_ops tegra_clk_shared_bus_ops = {
+ .init = tegra_clk_shared_bus_init,
+ .enable = tegra_clk_shared_bus_enable,
+ .disable = tegra_clk_shared_bus_disable,
+ .set_rate = tegra_clk_shared_bus_set_rate,
+ .round_rate = tegra_clk_shared_bus_round_rate,
+ .reset = tegra_clk_shared_bus_reset,
+};
+
+/* emc bridge ops */
+/* On Tegra3 platforms emc configurations for DDR3 low rates can not work
+ * at high core voltage; the intermediate step (bridge) is mandatory whenever
+ * core voltage is crossing the threshold: TEGRA_EMC_BRIDGE_MVOLTS_MIN (fixed
+ * for the entire Tegra3 arch); also emc must run above the bridge rate if any
+ * other than emc clock requires high voltage. LP CPU, memory, sbus and cbus
+ * together include all clocks that may require core voltage above threshold
+ * (other peripherals can reach their maximum rates below threshold). LP CPU
+ * dependency is taken care of via tegra_emc_to_cpu_ratio() api. Memory clock
+ * transitions are forced to step through bridge rate; sbus and cbus control
+ * emc bridge to set emc clock floor as necessary.
+ *
+ * EMC bridge is implemented as a special emc shared bus user: initialized at
+ * minimum rate until updated once by emc dvfs setup; then it is only enabled
+ * or disabled when sbus and/or cbus voltage is crossing the threshold.
+ */
+static void tegra3_clk_emc_bridge_init(struct clk *c)
+{
+ tegra_clk_shared_bus_init(c);
+ c->u.shared_bus_user.rate = 0;
+}
+
+static int tegra3_clk_emc_bridge_set_rate(struct clk *c, unsigned long rate)
+{
+ if (c->u.shared_bus_user.rate == 0)
+ c->u.shared_bus_user.rate = rate;
+ return 0;
+}
+
+static struct clk_ops tegra_clk_emc_bridge_ops = {
+ .init = tegra3_clk_emc_bridge_init,
+ .enable = tegra_clk_shared_bus_enable,
+ .disable = tegra_clk_shared_bus_disable,
+ .set_rate = tegra3_clk_emc_bridge_set_rate,
+ .round_rate = tegra_clk_shared_bus_round_rate,
+};
+
+/* Clock definitions */
+static struct clk tegra_clk_32k = {
+ .name = "clk_32k",
+ .rate = 32768,
+ .ops = NULL,
+ .max_rate = 32768,
+};
+
+static struct clk tegra_clk_m = {
+ .name = "clk_m",
+ .flags = ENABLE_ON_INIT,
+ .ops = &tegra_clk_m_ops,
+ .reg = 0x1fc,
+ .reg_shift = 28,
+ .max_rate = 48000000,
+};
+
+static struct clk tegra_clk_m_div2 = {
+ .name = "clk_m_div2",
+ .ops = &tegra_clk_m_div_ops,
+ .parent = &tegra_clk_m,
+ .mul = 1,
+ .div = 2,
+ .state = ON,
+ .max_rate = 24000000,
+};
+
+static struct clk tegra_clk_m_div4 = {
+ .name = "clk_m_div4",
+ .ops = &tegra_clk_m_div_ops,
+ .parent = &tegra_clk_m,
+ .mul = 1,
+ .div = 4,
+ .state = ON,
+ .max_rate = 12000000,
+};
+
+static struct clk tegra_pll_ref = {
+ .name = "pll_ref",
+ .flags = ENABLE_ON_INIT,
+ .ops = &tegra_pll_ref_ops,
+ .parent = &tegra_clk_m,
+ .max_rate = 26000000,
+};
+
+static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
+ { 12000000, 1040000000, 520, 6, 1, 8},
+ { 13000000, 1040000000, 480, 6, 1, 8},
+ { 16800000, 1040000000, 495, 8, 1, 8}, /* actual: 1039.5 MHz */
+ { 19200000, 1040000000, 325, 6, 1, 6},
+ { 26000000, 1040000000, 520, 13, 1, 8},
+
+ { 12000000, 832000000, 416, 6, 1, 8},
+ { 13000000, 832000000, 832, 13, 1, 8},
+ { 16800000, 832000000, 396, 8, 1, 8}, /* actual: 831.6 MHz */
+ { 19200000, 832000000, 260, 6, 1, 8},
+ { 26000000, 832000000, 416, 13, 1, 8},
+
+ { 12000000, 624000000, 624, 12, 1, 8},
+ { 13000000, 624000000, 624, 13, 1, 8},
+ { 16800000, 600000000, 520, 14, 1, 8},
+ { 19200000, 624000000, 520, 16, 1, 8},
+ { 26000000, 624000000, 624, 26, 1, 8},
+
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 16800000, 600000000, 500, 14, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+
+ { 12000000, 520000000, 520, 12, 1, 8},
+ { 13000000, 520000000, 520, 13, 1, 8},
+ { 16800000, 520000000, 495, 16, 1, 8}, /* actual: 519.75 MHz */
+ { 19200000, 520000000, 325, 12, 1, 6},
+ { 26000000, 520000000, 520, 26, 1, 8},
+
+ { 12000000, 416000000, 416, 12, 1, 8},
+ { 13000000, 416000000, 416, 13, 1, 8},
+ { 16800000, 416000000, 396, 16, 1, 8}, /* actual: 415.8 MHz */
+ { 19200000, 416000000, 260, 12, 1, 6},
+ { 26000000, 416000000, 416, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_c = {
+ .name = "pll_c",
+ .flags = PLL_HAS_CPCON,
+ .ops = &tegra_pll_ops,
+ .reg = 0x80,
+ .parent = &tegra_pll_ref,
+ .max_rate = 1400000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_c_freq_table,
+ .lock_delay = 300,
+ },
+};
+
+static struct clk tegra_pll_c_out1 = {
+ .name = "pll_c_out1",
+ .ops = &tegra_pll_div_ops,
+ .flags = DIV_U71,
+ .parent = &tegra_pll_c,
+ .reg = 0x84,
+ .reg_shift = 0,
+ .max_rate = 700000000,
+};
+
+static struct clk_pll_freq_table tegra_pll_m_freq_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 16800000, 666000000, 555, 14, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 16800000, 600000000, 500, 14, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_m = {
+ .name = "pll_m",
+ .flags = PLL_HAS_CPCON | PLLM,
+ .ops = &tegra_pll_ops,
+ .reg = 0x90,
+ .parent = &tegra_pll_ref,
+ .max_rate = 800000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .freq_table = tegra_pll_m_freq_table,
+ .lock_delay = 300,
+ },
+};
+
+static struct clk tegra_pll_m_out1 = {
+ .name = "pll_m_out1",
+ .ops = &tegra_pll_div_ops,
+ .flags = DIV_U71,
+ .parent = &tegra_pll_m,
+ .reg = 0x94,
+ .reg_shift = 0,
+ .max_rate = 600000000,
+};
+
+static struct clk_pll_freq_table tegra_pll_p_freq_table[] = {
+ { 12000000, 216000000, 432, 12, 2, 8},
+ { 13000000, 216000000, 432, 13, 2, 8},
+ { 16800000, 216000000, 360, 14, 2, 8},
+ { 19200000, 216000000, 360, 16, 2, 8},
+ { 26000000, 216000000, 432, 26, 2, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_p = {
+ .name = "pll_p",
+ .flags = ENABLE_ON_INIT | PLL_FIXED | PLL_HAS_CPCON,
+ .ops = &tegra_pll_ops,
+ .reg = 0xa0,
+ .parent = &tegra_pll_ref,
+ .max_rate = 432000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_p_freq_table,
+ .lock_delay = 300,
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ .fixed_rate = 408000000,
+#else
+ .fixed_rate = 216000000,
+#endif
+ },
+};
+
+static struct clk tegra_pll_p_out1 = {
+ .name = "pll_p_out1",
+ .ops = &tegra_pll_div_ops,
+ .flags = ENABLE_ON_INIT | DIV_U71 | DIV_U71_FIXED,
+ .parent = &tegra_pll_p,
+ .reg = 0xa4,
+ .reg_shift = 0,
+ .max_rate = 432000000,
+};
+
+static struct clk tegra_pll_p_out2 = {
+ .name = "pll_p_out2",
+ .ops = &tegra_pll_div_ops,
+ .flags = ENABLE_ON_INIT | DIV_U71 | DIV_U71_FIXED,
+ .parent = &tegra_pll_p,
+ .reg = 0xa4,
+ .reg_shift = 16,
+ .max_rate = 432000000,
+};
+
+static struct clk tegra_pll_p_out3 = {
+ .name = "pll_p_out3",
+ .ops = &tegra_pll_div_ops,
+ .flags = ENABLE_ON_INIT | DIV_U71 | DIV_U71_FIXED,
+ .parent = &tegra_pll_p,
+ .reg = 0xa8,
+ .reg_shift = 0,
+ .max_rate = 432000000,
+};
+
+static struct clk tegra_pll_p_out4 = {
+ .name = "pll_p_out4",
+ .ops = &tegra_pll_div_ops,
+ .flags = ENABLE_ON_INIT | DIV_U71 | DIV_U71_FIXED,
+ .parent = &tegra_pll_p,
+ .reg = 0xa8,
+ .reg_shift = 16,
+ .max_rate = 432000000,
+};
+
+static struct clk_pll_freq_table tegra_pll_a_freq_table[] = {
+ { 9600000, 564480000, 294, 5, 1, 4},
+ { 9600000, 552960000, 288, 5, 1, 4},
+ { 9600000, 24000000, 5, 2, 1, 1},
+
+ { 28800000, 56448000, 49, 25, 1, 1},
+ { 28800000, 73728000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_a = {
+ .name = "pll_a",
+ .flags = PLL_HAS_CPCON,
+ .ops = &tegra_pll_ops,
+ .reg = 0xb0,
+ .parent = &tegra_pll_p_out1,
+ .max_rate = 700000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_a_freq_table,
+ .lock_delay = 300,
+ },
+};
+
+static struct clk tegra_pll_a_out0 = {
+ .name = "pll_a_out0",
+ .ops = &tegra_pll_div_ops,
+ .flags = DIV_U71,
+ .parent = &tegra_pll_a,
+ .reg = 0xb4,
+ .reg_shift = 0,
+ .max_rate = 100000000,
+};
+
+static struct clk_pll_freq_table tegra_pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 16800000, 216000000, 180, 14, 1, 4},
+ { 19200000, 216000000, 180, 16, 1, 4},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 16800000, 594000000, 495, 14, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_d = {
+ .name = "pll_d",
+ .flags = PLL_HAS_CPCON | PLLD,
+ .ops = &tegra_plld_ops,
+ .reg = 0xd0,
+ .parent = &tegra_pll_ref,
+ .max_rate = 1000000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .freq_table = tegra_pll_d_freq_table,
+ .lock_delay = 1000,
+ },
+};
+
+static struct clk tegra_pll_d_out0 = {
+ .name = "pll_d_out0",
+ .ops = &tegra_pll_div_ops,
+ .flags = DIV_2 | PLLD,
+ .parent = &tegra_pll_d,
+ .max_rate = 500000000,
+};
+
+static struct clk tegra_pll_d2 = {
+ .name = "pll_d2",
+ .flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG | PLLD,
+ .ops = &tegra_plld_ops,
+ .reg = 0x4b8,
+ .parent = &tegra_pll_ref,
+ .max_rate = 1000000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .freq_table = tegra_pll_d_freq_table,
+ .lock_delay = 1000,
+ },
+};
+
+static struct clk tegra_pll_d2_out0 = {
+ .name = "pll_d2_out0",
+ .ops = &tegra_pll_div_ops,
+ .flags = DIV_2 | PLLD,
+ .parent = &tegra_pll_d2,
+ .max_rate = 500000000,
+};
+
+static struct clk_pll_freq_table tegra_pll_u_freq_table[] = {
+ { 12000000, 480000000, 960, 12, 2, 12},
+ { 13000000, 480000000, 960, 13, 2, 12},
+ { 16800000, 480000000, 400, 7, 2, 5},
+ { 19200000, 480000000, 200, 4, 2, 3},
+ { 26000000, 480000000, 960, 26, 2, 12},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_u = {
+ .name = "pll_u",
+ .flags = PLL_HAS_CPCON | PLLU,
+ .ops = &tegra_pll_ops,
+ .reg = 0xc0,
+ .parent = &tegra_pll_ref,
+ .max_rate = 480000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 480000000,
+ .vco_max = 960000000,
+ .freq_table = tegra_pll_u_freq_table,
+ .lock_delay = 1000,
+ },
+};
+
+static struct clk_pll_freq_table tegra_pll_x_freq_table[] = {
+ /* 1.7 GHz */
+ { 12000000, 1700000000, 850, 6, 1, 8},
+ { 13000000, 1700000000, 915, 7, 1, 8}, /* actual: 1699.2 MHz */
+ { 16800000, 1700000000, 708, 7, 1, 8}, /* actual: 1699.2 MHz */
+ { 19200000, 1700000000, 885, 10, 1, 8}, /* actual: 1699.2 MHz */
+ { 26000000, 1700000000, 850, 13, 1, 8},
+
+ /* 1.6 GHz */
+ { 12000000, 1600000000, 800, 6, 1, 8},
+ { 13000000, 1600000000, 738, 6, 1, 8}, /* actual: 1599.0 MHz */
+ { 16800000, 1600000000, 857, 9, 1, 8}, /* actual: 1599.7 MHz */
+ { 19200000, 1600000000, 500, 6, 1, 8},
+ { 26000000, 1600000000, 800, 13, 1, 8},
+
+ /* 1.5 GHz */
+ { 12000000, 1500000000, 750, 6, 1, 8},
+ { 13000000, 1500000000, 923, 8, 1, 8}, /* actual: 1499.8 MHz */
+ { 16800000, 1500000000, 625, 7, 1, 8},
+ { 19200000, 1500000000, 625, 8, 1, 8},
+ { 26000000, 1500000000, 750, 13, 1, 8},
+
+ /* 1.4 GHz */
+ { 12000000, 1400000000, 700, 6, 1, 8},
+ { 13000000, 1400000000, 969, 9, 1, 8}, /* actual: 1399.7 MHz */
+ { 16800000, 1400000000, 1000, 12, 1, 8},
+ { 19200000, 1400000000, 875, 12, 1, 8},
+ { 26000000, 1400000000, 700, 13, 1, 8},
+
+ /* 1.3 GHz */
+ { 12000000, 1300000000, 975, 9, 1, 8},
+ { 13000000, 1300000000, 1000, 10, 1, 8},
+ { 16800000, 1300000000, 928, 12, 1, 8}, /* actual: 1299.2 MHz */
+ { 19200000, 1300000000, 812, 12, 1, 8}, /* actual: 1299.2 MHz */
+ { 26000000, 1300000000, 650, 13, 1, 8},
+
+ /* 1.2 GHz */
+ { 12000000, 1200000000, 1000, 10, 1, 8},
+ { 13000000, 1200000000, 923, 10, 1, 8}, /* actual: 1199.9 MHz */
+ { 16800000, 1200000000, 1000, 14, 1, 8},
+ { 19200000, 1200000000, 1000, 16, 1, 8},
+ { 26000000, 1200000000, 600, 13, 1, 8},
+
+ /* 1.1 GHz */
+ { 12000000, 1100000000, 825, 9, 1, 8},
+ { 13000000, 1100000000, 846, 10, 1, 8}, /* actual: 1099.8 MHz */
+ { 16800000, 1100000000, 982, 15, 1, 8}, /* actual: 1099.8 MHz */
+ { 19200000, 1100000000, 859, 15, 1, 8}, /* actual: 1099.5 MHz */
+ { 26000000, 1100000000, 550, 13, 1, 8},
+
+ /* 1 GHz */
+ { 12000000, 1000000000, 1000, 12, 1, 8},
+ { 13000000, 1000000000, 1000, 13, 1, 8},
+ { 16800000, 1000000000, 833, 14, 1, 8}, /* actual: 999.6 MHz */
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 8},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_x = {
+ .name = "pll_x",
+ .flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG | PLLX,
+ .ops = &tegra_pll_ops,
+ .reg = 0xe0,
+ .parent = &tegra_pll_ref,
+ .max_rate = 1700000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1700000000,
+ .freq_table = tegra_pll_x_freq_table,
+ .lock_delay = 300,
+ },
+};
+
+static struct clk tegra_pll_x_out0 = {
+ .name = "pll_x_out0",
+ .ops = &tegra_pll_div_ops,
+ .flags = DIV_2 | PLLX,
+ .parent = &tegra_pll_x,
+ .max_rate = 850000000,
+};
+
+
+static struct clk_pll_freq_table tegra_pll_e_freq_table[] = {
+ /* PLLE special case: use cpcon field to store cml divider value */
+ { 12000000, 100000000, 150, 1, 18, 11},
+ { 216000000, 100000000, 200, 18, 24, 13},
+#ifndef CONFIG_TEGRA_SILICON_PLATFORM
+ { 13000000, 100000000, 200, 1, 26, 13},
+#endif
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct clk tegra_pll_e = {
+ .name = "pll_e",
+ .flags = PLL_ALT_MISC_REG,
+ .ops = &tegra_plle_ops,
+ .reg = 0xe8,
+ .max_rate = 100000000,
+ .u.pll = {
+ .input_min = 12000000,
+ .input_max = 216000000,
+ .cf_min = 12000000,
+ .cf_max = 12000000,
+ .vco_min = 1200000000,
+ .vco_max = 2400000000U,
+ .freq_table = tegra_pll_e_freq_table,
+ .lock_delay = 300,
+ .fixed_rate = 100000000,
+ },
+};
+
+static struct clk tegra_cml0_clk = {
+ .name = "cml0",
+ .parent = &tegra_pll_e,
+ .ops = &tegra_cml_clk_ops,
+ .reg = PLLE_AUX,
+ .max_rate = 100000000,
+ .u.periph = {
+ .clk_num = 0,
+ },
+};
+
+static struct clk tegra_cml1_clk = {
+ .name = "cml1",
+ .parent = &tegra_pll_e,
+ .ops = &tegra_cml_clk_ops,
+ .reg = PLLE_AUX,
+ .max_rate = 100000000,
+ .u.periph = {
+ .clk_num = 1,
+ },
+};
+
+static struct clk tegra_pciex_clk = {
+ .name = "pciex",
+ .parent = &tegra_pll_e,
+ .ops = &tegra_pciex_clk_ops,
+ .max_rate = 100000000,
+ .u.periph = {
+ .clk_num = 74,
+ },
+};
+
+/* Audio sync clocks */
+#define SYNC_SOURCE(_id, _dev) \
+ { \
+ .name = #_id "_sync", \
+ .lookup = { \
+ .dev_id = #_dev , \
+ .con_id = "ext_audio_sync", \
+ }, \
+ .rate = 24000000, \
+ .max_rate = 24000000, \
+ .ops = &tegra_sync_source_ops \
+ }
+static struct clk tegra_sync_source_list[] = {
+ SYNC_SOURCE(spdif_in, tegra30-spdif),
+ SYNC_SOURCE(i2s0, tegra30-i2s.0),
+ SYNC_SOURCE(i2s1, tegra30-i2s.1),
+ SYNC_SOURCE(i2s2, tegra30-i2s.2),
+ SYNC_SOURCE(i2s3, tegra30-i2s.3),
+ SYNC_SOURCE(i2s4, tegra30-i2s.4),
+ SYNC_SOURCE(vimclk, vimclk),
+};
+
+static struct clk_mux_sel mux_audio_sync_clk[] =
+{
+ { .input = &tegra_sync_source_list[0], .value = 0},
+ { .input = &tegra_sync_source_list[1], .value = 1},
+ { .input = &tegra_sync_source_list[2], .value = 2},
+ { .input = &tegra_sync_source_list[3], .value = 3},
+ { .input = &tegra_sync_source_list[4], .value = 4},
+ { .input = &tegra_sync_source_list[5], .value = 5},
+ { .input = &tegra_pll_a_out0, .value = 6},
+ { .input = &tegra_sync_source_list[6], .value = 7},
+ { 0, 0 }
+};
+
+#define AUDIO_SYNC_CLK(_id, _dev, _index) \
+ { \
+ .name = #_id, \
+ .lookup = { \
+ .dev_id = #_dev, \
+ .con_id = "audio_sync", \
+ }, \
+ .inputs = mux_audio_sync_clk, \
+ .reg = 0x4A0 + (_index) * 4, \
+ .max_rate = 24000000, \
+ .ops = &tegra_audio_sync_clk_ops \
+ }
+static struct clk tegra_clk_audio_list[] = {
+ AUDIO_SYNC_CLK(audio0, tegra30-i2s.0, 0),
+ AUDIO_SYNC_CLK(audio1, tegra30-i2s.1, 1),
+ AUDIO_SYNC_CLK(audio2, tegra30-i2s.2, 2),
+ AUDIO_SYNC_CLK(audio3, tegra30-i2s.3, 3),
+ AUDIO_SYNC_CLK(audio4, tegra30-i2s.4, 4),
+ AUDIO_SYNC_CLK(audio, tegra30-spdif, 5),
+};
+
+#define AUDIO_SYNC_2X_CLK(_id, _dev, _index) \
+ { \
+ .name = #_id "_2x", \
+ .lookup = { \
+ .dev_id = #_dev, \
+ .con_id = "audio_sync_2x" \
+ }, \
+ .flags = PERIPH_NO_RESET, \
+ .max_rate = 48000000, \
+ .ops = &tegra_clk_double_ops, \
+ .reg = 0x49C, \
+ .reg_shift = 24 + (_index), \
+ .parent = &tegra_clk_audio_list[(_index)], \
+ .u.periph = { \
+ .clk_num = 113 + (_index), \
+ }, \
+ }
+static struct clk tegra_clk_audio_2x_list[] = {
+ AUDIO_SYNC_2X_CLK(audio0, tegra30-i2s.0, 0),
+ AUDIO_SYNC_2X_CLK(audio1, tegra30-i2s.1, 1),
+ AUDIO_SYNC_2X_CLK(audio2, tegra30-i2s.2, 2),
+ AUDIO_SYNC_2X_CLK(audio3, tegra30-i2s.3, 3),
+ AUDIO_SYNC_2X_CLK(audio4, tegra30-i2s.4, 4),
+ AUDIO_SYNC_2X_CLK(audio, tegra30-spdif, 5),
+};
+
+#define MUX_I2S_SPDIF(_id, _index) \
+static struct clk_mux_sel mux_pllaout0_##_id##_2x_pllp_clkm[] = { \
+ {.input = &tegra_pll_a_out0, .value = 0}, \
+ {.input = &tegra_clk_audio_2x_list[(_index)], .value = 1}, \
+ {.input = &tegra_pll_p, .value = 2}, \
+ {.input = &tegra_clk_m, .value = 3}, \
+ { 0, 0}, \
+}
+MUX_I2S_SPDIF(audio0, 0);
+MUX_I2S_SPDIF(audio1, 1);
+MUX_I2S_SPDIF(audio2, 2);
+MUX_I2S_SPDIF(audio3, 3);
+MUX_I2S_SPDIF(audio4, 4);
+MUX_I2S_SPDIF(audio, 5); /* SPDIF */
+
+/* External clock outputs (through PMC) */
+#define MUX_EXTERN_OUT(_id) \
+static struct clk_mux_sel mux_clkm_clkm2_clkm4_extern##_id[] = { \
+ {.input = &tegra_clk_m, .value = 0}, \
+ {.input = &tegra_clk_m_div2, .value = 1}, \
+ {.input = &tegra_clk_m_div4, .value = 2}, \
+ {.input = NULL, .value = 3}, /* placeholder */ \
+ { 0, 0}, \
+}
+MUX_EXTERN_OUT(1);
+MUX_EXTERN_OUT(2);
+MUX_EXTERN_OUT(3);
+
+static struct clk_mux_sel *mux_extern_out_list[] = {
+ mux_clkm_clkm2_clkm4_extern1,
+ mux_clkm_clkm2_clkm4_extern2,
+ mux_clkm_clkm2_clkm4_extern3,
+};
+
+#define CLK_OUT_CLK(_id) \
+ { \
+ .name = "clk_out_" #_id, \
+ .lookup = { \
+ .dev_id = "clk_out_" #_id, \
+ .con_id = "extern" #_id, \
+ }, \
+ .ops = &tegra_clk_out_ops, \
+ .reg = 0x1a8, \
+ .inputs = mux_clkm_clkm2_clkm4_extern##_id, \
+ .flags = MUX_CLK_OUT, \
+ .max_rate = 216000000, \
+ .u.periph = { \
+ .clk_num = (_id - 1) * 8 + 2, \
+ }, \
+ }
+static struct clk tegra_clk_out_list[] = {
+ CLK_OUT_CLK(1),
+ CLK_OUT_CLK(2),
+ CLK_OUT_CLK(3),
+};
+
+/* called after peripheral external clocks are initialized */
+static void init_clk_out_mux(void)
+{
+ int i;
+ struct clk *c;
+
+ /* output clock con_id is the name of peripheral
+ external clock connected to input 3 of the output mux */
+ for (i = 0; i < ARRAY_SIZE(tegra_clk_out_list); i++) {
+ c = tegra_get_clock_by_name(
+ tegra_clk_out_list[i].lookup.con_id);
+ if (!c)
+ pr_err("%s: could not find clk %s\n", __func__,
+ tegra_clk_out_list[i].lookup.con_id);
+ mux_extern_out_list[i][3].input = c;
+ }
+}
+
+/* Peripheral muxes */
+static struct clk_mux_sel mux_cclk_g[] = {
+ { .input = &tegra_clk_m, .value = 0},
+ { .input = &tegra_pll_c, .value = 1},
+ { .input = &tegra_clk_32k, .value = 2},
+ { .input = &tegra_pll_m, .value = 3},
+ { .input = &tegra_pll_p, .value = 4},
+ { .input = &tegra_pll_p_out4, .value = 5},
+ { .input = &tegra_pll_p_out3, .value = 6},
+ /* { .input = &tegra_clk_d, .value = 7}, - no use on tegra3 */
+ { .input = &tegra_pll_x, .value = 8},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_cclk_lp[] = {
+ { .input = &tegra_clk_m, .value = 0},
+ { .input = &tegra_pll_c, .value = 1},
+ { .input = &tegra_clk_32k, .value = 2},
+ { .input = &tegra_pll_m, .value = 3},
+ { .input = &tegra_pll_p, .value = 4},
+ { .input = &tegra_pll_p_out4, .value = 5},
+ { .input = &tegra_pll_p_out3, .value = 6},
+ /* { .input = &tegra_clk_d, .value = 7}, - no use on tegra3 */
+ { .input = &tegra_pll_x_out0, .value = 8},
+ { .input = &tegra_pll_x, .value = 8 | SUPER_LP_DIV2_BYPASS},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_sclk[] = {
+ { .input = &tegra_clk_m, .value = 0},
+ { .input = &tegra_pll_c_out1, .value = 1},
+ { .input = &tegra_pll_p_out4, .value = 2},
+ { .input = &tegra_pll_p_out3, .value = 3},
+ { .input = &tegra_pll_p_out2, .value = 4},
+ /* { .input = &tegra_clk_d, .value = 5}, - no use on tegra3 */
+ { .input = &tegra_clk_32k, .value = 6},
+ { .input = &tegra_pll_m_out1, .value = 7},
+ { 0, 0},
+};
+
+static struct clk tegra_clk_cclk_g = {
+ .name = "cclk_g",
+ .flags = DIV_U71 | DIV_U71_INT,
+ .inputs = mux_cclk_g,
+ .reg = 0x368,
+ .ops = &tegra_super_ops,
+ .max_rate = 1700000000,
+};
+
+static struct clk tegra_clk_cclk_lp = {
+ .name = "cclk_lp",
+ .flags = DIV_2 | DIV_U71 | DIV_U71_INT,
+ .inputs = mux_cclk_lp,
+ .reg = 0x370,
+ .ops = &tegra_super_ops,
+ .max_rate = 620000000,
+};
+
+static struct clk tegra_clk_sclk = {
+ .name = "sclk",
+ .inputs = mux_sclk,
+ .reg = 0x28,
+ .ops = &tegra_super_ops,
+ .max_rate = 334000000,
+ .min_rate = 40000000,
+};
+
+static struct clk tegra_clk_virtual_cpu_g = {
+ .name = "cpu_g",
+ .parent = &tegra_clk_cclk_g,
+ .ops = &tegra_cpu_ops,
+ .max_rate = 1700000000,
+ .u.cpu = {
+ .main = &tegra_pll_x,
+ .backup = &tegra_pll_p,
+ .mode = MODE_G,
+ },
+};
+
+static struct clk tegra_clk_virtual_cpu_lp = {
+ .name = "cpu_lp",
+ .parent = &tegra_clk_cclk_lp,
+ .ops = &tegra_cpu_ops,
+ .max_rate = 620000000,
+ .u.cpu = {
+ .main = &tegra_pll_x,
+ .backup = &tegra_pll_p,
+ .mode = MODE_LP,
+ },
+};
+
+static struct clk_mux_sel mux_cpu_cmplx[] = {
+ { .input = &tegra_clk_virtual_cpu_g, .value = 0},
+ { .input = &tegra_clk_virtual_cpu_lp, .value = 1},
+ { 0, 0},
+};
+
+static struct clk tegra_clk_cpu_cmplx = {
+ .name = "cpu",
+ .inputs = mux_cpu_cmplx,
+ .ops = &tegra_cpu_cmplx_ops,
+ .max_rate = 1700000000,
+};
+
+static struct clk tegra_clk_cop = {
+ .name = "cop",
+ .parent = &tegra_clk_sclk,
+ .ops = &tegra_cop_ops,
+ .max_rate = 334000000,
+};
+
+static struct clk tegra_clk_hclk = {
+ .name = "hclk",
+ .flags = DIV_BUS,
+ .parent = &tegra_clk_sclk,
+ .reg = 0x30,
+ .reg_shift = 4,
+ .ops = &tegra_bus_ops,
+ .max_rate = 334000000,
+ .min_rate = 40000000,
+};
+
+static struct clk tegra_clk_pclk = {
+ .name = "pclk",
+ .flags = DIV_BUS,
+ .parent = &tegra_clk_hclk,
+ .reg = 0x30,
+ .reg_shift = 0,
+ .ops = &tegra_bus_ops,
+ .max_rate = 167000000,
+ .min_rate = 40000000,
+};
+
+static struct raw_notifier_head sbus_rate_change_nh;
+
+static struct clk tegra_clk_sbus_cmplx = {
+ .name = "sbus",
+ .parent = &tegra_clk_sclk,
+ .ops = &tegra_sbus_cmplx_ops,
+ .u.system = {
+ .pclk = &tegra_clk_pclk,
+ .hclk = &tegra_clk_hclk,
+ .sclk_low = &tegra_pll_p_out4,
+ .sclk_high = &tegra_pll_m_out1,
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ .threshold = 204000000, /* exact factor of low range pll_p */
+#else
+ .threshold = 108000000, /* exact factor of low range pll_p */
+#endif
+ },
+ .rate_change_nh = &sbus_rate_change_nh,
+};
+
+static struct clk tegra_clk_blink = {
+ .name = "blink",
+ .parent = &tegra_clk_32k,
+ .reg = 0x40,
+ .ops = &tegra_blink_clk_ops,
+ .max_rate = 32768,
+};
+
+static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = {
+ { .input = &tegra_pll_m, .value = 0},
+ { .input = &tegra_pll_c, .value = 1},
+ { .input = &tegra_pll_p, .value = 2},
+ { .input = &tegra_pll_a_out0, .value = 3},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllm_pllc_pllp_clkm[] = {
+ { .input = &tegra_pll_m, .value = 0},
+ /* { .input = &tegra_pll_c, .value = 1}, not used on tegra3 */
+ { .input = &tegra_pll_p, .value = 2},
+ { .input = &tegra_clk_m, .value = 3},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllp_pllc_pllm_clkm[] = {
+ { .input = &tegra_pll_p, .value = 0},
+ { .input = &tegra_pll_c, .value = 1},
+#ifndef CONFIG_TEGRA_PLLM_RESTRICTED
+ { .input = &tegra_pll_m, .value = 2},
+#endif
+ { .input = &tegra_clk_m, .value = 3},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllp_clkm[] = {
+ { .input = &tegra_pll_p, .value = 0},
+ { .input = &tegra_clk_m, .value = 3},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllp_plld_pllc_clkm[] = {
+ {.input = &tegra_pll_p, .value = 0},
+ {.input = &tegra_pll_d_out0, .value = 1},
+ {.input = &tegra_pll_c, .value = 2},
+ {.input = &tegra_clk_m, .value = 3},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllp_pllm_plld_plla_pllc_plld2_clkm[] = {
+ {.input = &tegra_pll_p, .value = 0},
+#ifndef CONFIG_TEGRA_PLLM_RESTRICTED
+ {.input = &tegra_pll_m, .value = 1},
+#endif
+ {.input = &tegra_pll_d_out0, .value = 2},
+ {.input = &tegra_pll_a_out0, .value = 3},
+ {.input = &tegra_pll_c, .value = 4},
+ {.input = &tegra_pll_d2_out0, .value = 5},
+ {.input = &tegra_clk_m, .value = 6},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_plla_pllc_pllp_clkm[] = {
+ { .input = &tegra_pll_a_out0, .value = 0},
+ /* { .input = &tegra_pll_c, .value = 1}, no use on tegra3 */
+ { .input = &tegra_pll_p, .value = 2},
+ { .input = &tegra_clk_m, .value = 3},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllp_pllc_clk32_clkm[] = {
+ {.input = &tegra_pll_p, .value = 0},
+ {.input = &tegra_pll_c, .value = 1},
+ {.input = &tegra_clk_32k, .value = 2},
+ {.input = &tegra_clk_m, .value = 3},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllp_pllc_clkm_clk32[] = {
+ {.input = &tegra_pll_p, .value = 0},
+ {.input = &tegra_pll_c, .value = 1},
+ {.input = &tegra_clk_m, .value = 2},
+ {.input = &tegra_clk_32k, .value = 3},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllp_pllc_pllm[] = {
+ {.input = &tegra_pll_p, .value = 0},
+ {.input = &tegra_pll_c, .value = 1},
+#ifndef CONFIG_TEGRA_PLLM_RESTRICTED
+ {.input = &tegra_pll_m, .value = 2},
+#endif
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_clk_m[] = {
+ { .input = &tegra_clk_m, .value = 0},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_pllp_out3[] = {
+ { .input = &tegra_pll_p_out3, .value = 0},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_plld_out0[] = {
+ { .input = &tegra_pll_d_out0, .value = 0},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_plld_out0_plld2_out0[] = {
+ { .input = &tegra_pll_d_out0, .value = 0},
+ { .input = &tegra_pll_d2_out0, .value = 1},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_clk_32k[] = {
+ { .input = &tegra_clk_32k, .value = 0},
+ { 0, 0},
+};
+
+static struct clk_mux_sel mux_plla_clk32_pllp_clkm_plle[] = {
+ { .input = &tegra_pll_a_out0, .value = 0},
+ { .input = &tegra_clk_32k, .value = 1},
+ { .input = &tegra_pll_p, .value = 2},
+ { .input = &tegra_clk_m, .value = 3},
+ { .input = &tegra_pll_e, .value = 4},
+ { 0, 0},
+};
+
+static struct raw_notifier_head emc_rate_change_nh;
+
+static struct clk tegra_clk_emc = {
+ .name = "emc",
+ .ops = &tegra_emc_clk_ops,
+ .reg = 0x19c,
+ .max_rate = 800000000,
+ .min_rate = 25000000,
+ .inputs = mux_pllm_pllc_pllp_clkm,
+ .flags = MUX | DIV_U71 | PERIPH_EMC_ENB,
+ .u.periph = {
+ .clk_num = 57,
+ },
+ .rate_change_nh = &emc_rate_change_nh,
+};
+
+static struct clk tegra_clk_emc_bridge = {
+ .name = "bridge.emc",
+ .ops = &tegra_clk_emc_bridge_ops,
+ .parent = &tegra_clk_emc,
+};
+
+static struct clk tegra_clk_cbus = {
+ .name = "cbus",
+ .parent = &tegra_pll_c,
+ .ops = &tegra_clk_cbus_ops,
+ .max_rate = 700000000,
+ .mul = 1,
+ .div = 2,
+ .flags = PERIPH_ON_CBUS,
+ .shared_bus_backup = {
+ .input = &tegra_pll_p,
+ .value = 2,
+ }
+};
+
+#define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \
+ { \
+ .name = _name, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ .ops = &tegra_periph_clk_ops, \
+ .reg = _reg, \
+ .inputs = _inputs, \
+ .flags = _flags, \
+ .max_rate = _max, \
+ .u.periph = { \
+ .clk_num = _clk_num, \
+ }, \
+ }
+
+#define PERIPH_CLK_EX(_name, _dev, _con, _clk_num, _reg, _max, _inputs, \
+ _flags, _ops) \
+ { \
+ .name = _name, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ .ops = _ops, \
+ .reg = _reg, \
+ .inputs = _inputs, \
+ .flags = _flags, \
+ .max_rate = _max, \
+ .u.periph = { \
+ .clk_num = _clk_num, \
+ }, \
+ }
+
+#define SHARED_CLK(_name, _dev, _con, _parent, _id, _div, _mode)\
+ { \
+ .name = _name, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ .ops = &tegra_clk_shared_bus_ops, \
+ .parent = _parent, \
+ .u.shared_bus_user = { \
+ .client_id = _id, \
+ .client_div = _div, \
+ .mode = _mode, \
+ }, \
+ }
+struct clk tegra_list_clks[] = {
+ PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB),
+ PERIPH_CLK("kbc", "tegra-kbc", NULL, 36, 0, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB),
+ PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("kfuse", "kfuse-tegra", NULL, 40, 0, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("fuse", "fuse-tegra", "fuse", 39, 0, 26000000, mux_clk_m, PERIPH_ON_APB),
+ PERIPH_CLK("fuse_burn", "fuse-tegra", "fuse_burn", 39, 0, 26000000, mux_clk_m, PERIPH_ON_APB),
+ PERIPH_CLK("apbif", "tegra30-ahub", "apbif", 107, 0, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("i2s0", "tegra30-i2s.0", "i2s", 30, 0x1d8, 26000000, mux_pllaout0_audio0_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("i2s1", "tegra30-i2s.1", "i2s", 11, 0x100, 26000000, mux_pllaout0_audio1_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("i2s2", "tegra30-i2s.2", "i2s", 18, 0x104, 26000000, mux_pllaout0_audio2_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("i2s3", "tegra30-i2s.3", "i2s", 101, 0x3bc, 26000000, mux_pllaout0_audio3_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("i2s4", "tegra30-i2s.4", "i2s", 102, 0x3c0, 26000000, mux_pllaout0_audio4_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("spdif_out", "tegra30-spdif", "spdif_out", 10, 0x108, 100000000, mux_pllaout0_audio_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("spdif_in", "tegra30-spdif", "spdif_in", 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_clk32_clkm, MUX | MUX_PWM | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("d_audio", "tegra30-ahub", "d_audio", 106, 0x3d0, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("dam0", "tegra30-dam.0", NULL, 108, 0x3d8, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("dam1", "tegra30-dam.1", NULL, 109, 0x3dc, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("dam2", "tegra30-dam.2", NULL, 110, 0x3e0, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("hda", "tegra30-hda", "hda", 125, 0x428, 108000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("hda2codec_2x", "tegra30-hda", "hda2codec", 111, 0x3e4, 48000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("hda2hdmi", "tegra30-hda", "hda2hdmi", 128, 0, 48000000, mux_clk_m, 0),
+ PERIPH_CLK("sbc1", "spi_tegra.0", NULL, 41, 0x134, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc2", "spi_tegra.1", NULL, 44, 0x118, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc3", "spi_tegra.2", NULL, 46, 0x11c, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc5", "spi_tegra.4", NULL, 104, 0x3c8, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sbc6", "spi_tegra.5", NULL, 105, 0x3cc, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sata_oob", "tegra_sata_oob", NULL, 123, 0x420, 216000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sata", "tegra_sata", NULL, 124, 0x424, 216000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sata_cold", "tegra_sata_cold", NULL, 129, 0, 48000000, mux_clk_m, 0),
+ PERIPH_CLK_EX("ndflash","tegra_nand", NULL, 13, 0x160, 240000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71, &tegra_nand_clk_ops),
+ PERIPH_CLK("ndspeed", "tegra_nand_speed", NULL, 80, 0x3f8, 240000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 208000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 104000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 208000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 104000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, 520000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_INT),
+ PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */
+ PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("owr", "tegra_w1", NULL, 71, 0x1cc, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("nor", "tegra-nor", NULL, 42, 0x1d0, 127000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, 60000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), /* scales with voltage */
+ PERIPH_CLK("i2c1", "tegra-i2c.0", NULL, 12, 0x124, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("i2c2", "tegra-i2c.1", NULL, 54, 0x198, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("i2c4", "tegra-i2c.3", NULL, 103, 0x3c4, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("i2c5", "tegra-i2c.4", NULL, 47, 0x128, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
+ PERIPH_CLK("uarta", "tegra_uart.0", NULL, 6, 0x178, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartb", "tegra_uart.1", NULL, 7, 0x17c, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartc", "tegra_uart.2", NULL, 55, 0x1a0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartd", "tegra_uart.3", NULL, 65, 0x1c0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uarte", "tegra_uart.4", NULL, 66, 0x1c4, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uarta_dbg", "serial8250.0", "uarta",6, 0x178, 800000000, mux_pllp_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartb_dbg", "serial8250.0", "uartb",7, 0x17c, 800000000, mux_pllp_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartc_dbg", "serial8250.0", "uartc",55, 0x1a0, 800000000, mux_pllp_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartd_dbg", "serial8250.0", "uartd",65, 0x1c0, 800000000, mux_pllp_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uarte_dbg", "serial8250.0", "uarte",66, 0x1c4, 800000000, mux_pllp_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB),
+ PERIPH_CLK_EX("vi", "tegra_camera", "vi", 20, 0x148, 425000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT, &tegra_vi_clk_ops),
+ PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT | DIV_U71_IDLE | PERIPH_MANUAL_RESET),
+ PERIPH_CLK("3d2", "3d2", NULL, 98, 0x3b0, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT | DIV_U71_IDLE | PERIPH_MANUAL_RESET),
+ PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT | DIV_U71_IDLE),
+ PERIPH_CLK("vi_sensor", "tegra_camera", "vi_sensor", 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET),
+ PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT),
+ PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT),
+ PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 260000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT),
+ PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK_EX("dtv", "dtv", NULL, 79, 0x1dc, 250000000, mux_clk_m, 0, &tegra_dtv_clk_ops),
+ PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 148500000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX | MUX8 | DIV_U71),
+ PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 220000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 600000000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX | MUX8),
+ PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 600000000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX | MUX8),
+ PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("dsia", "tegradc.0", "dsia", 48, 0, 500000000, mux_plld_out0, 0),
+ PERIPH_CLK_EX("dsib", "tegradc.1", "dsib", 82, 0xd0, 500000000, mux_plld_out0_plld2_out0, MUX | PLLD, &tegra_dsib_clk_ops),
+ PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 102000000, mux_pllp_out3, 0),
+ PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */
+ PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET),
+
+ PERIPH_CLK("tsensor", "tegra-tsensor", NULL, 100, 0x3b8, 216000000, mux_pllp_pllc_clkm_clk32, MUX | DIV_U71),
+ PERIPH_CLK("actmon", "actmon", NULL, 119, 0x3e8, 216000000, mux_pllp_pllc_clk32_clkm, MUX | DIV_U71),
+ PERIPH_CLK("extern1", "extern1", NULL, 120, 0x3ec, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | MUX8 | DIV_U71),
+ PERIPH_CLK("extern2", "extern2", NULL, 121, 0x3f0, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | MUX8 | DIV_U71),
+ PERIPH_CLK("extern3", "extern3", NULL, 122, 0x3f4, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | MUX8 | DIV_U71),
+ PERIPH_CLK("i2cslow", "i2cslow", NULL, 81, 0x3fc, 26000000, mux_pllp_pllc_clk32_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("pcie", "tegra-pcie", "pcie", 70, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("afi", "tegra-pcie", "afi", 72, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("se", "se", NULL, 127, 0x42c, 520000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71| DIV_U71_INT),
+
+ SHARED_CLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0),
+ SHARED_CLK("bsea.sclk", "tegra-aes", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0),
+ SHARED_CLK("usbd.sclk", "fsl-tegra-udc", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0),
+ SHARED_CLK("usb1.sclk", "tegra-ehci.0", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0),
+ SHARED_CLK("usb2.sclk", "tegra-ehci.1", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0),
+ SHARED_CLK("usb3.sclk", "tegra-ehci.2", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0),
+ SHARED_CLK("mon.avp", "tegra_actmon", "avp", &tegra_clk_sbus_cmplx, NULL, 0, 0),
+ SHARED_CLK("cap.sclk", "cap_sclk", NULL, &tegra_clk_sbus_cmplx, NULL, 0, SHARED_CEILING),
+ SHARED_CLK("floor.sclk", "floor_sclk", NULL, &tegra_clk_sbus_cmplx, NULL, 0, 0),
+
+ SHARED_CLK("avp.emc", "tegra-avp", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("cpu.emc", "cpu", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("disp1.emc", "tegradc.0", "emc", &tegra_clk_emc, NULL, 0, SHARED_BW),
+ SHARED_CLK("disp2.emc", "tegradc.1", "emc", &tegra_clk_emc, NULL, 0, SHARED_BW),
+ SHARED_CLK("hdmi.emc", "hdmi", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("usbd.emc", "fsl-tegra-udc", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("usb1.emc", "tegra-ehci.0", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("usb2.emc", "tegra-ehci.1", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("usb3.emc", "tegra-ehci.2", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("mon.emc", "tegra_actmon", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("cap.emc", "cap.emc", NULL, &tegra_clk_emc, NULL, 0, SHARED_CEILING),
+ SHARED_CLK("3d.emc", "tegra_gr3d", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("2d.emc", "tegra_gr2d", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("mpe.emc", "tegra_mpe", "emc", &tegra_clk_emc, NULL, 0, 0),
+ SHARED_CLK("floor.emc", "floor.emc", NULL, &tegra_clk_emc, NULL, 0, 0),
+
+ SHARED_CLK("host1x.cbus", "tegra_host1x", "host1x", &tegra_clk_cbus, "host1x", 2, SHARED_AUTO),
+ SHARED_CLK("3d.cbus", "tegra_gr3d", "gr3d", &tegra_clk_cbus, "3d", 0, 0),
+ SHARED_CLK("3d2.cbus", "tegra_gr3d", "gr3d2", &tegra_clk_cbus, "3d2", 0, 0),
+ SHARED_CLK("2d.cbus", "tegra_gr2d", "gr2d", &tegra_clk_cbus, "2d", 0, 0),
+ SHARED_CLK("epp.cbus", "tegra_gr2d", "epp", &tegra_clk_cbus, "epp", 0, 0),
+ SHARED_CLK("mpe.cbus", "tegra_mpe", "mpe", &tegra_clk_cbus, "mpe", 0, 0),
+ SHARED_CLK("vde.cbus", "tegra-avp", "vde", &tegra_clk_cbus, "vde", 0, 0),
+ SHARED_CLK("se.cbus", "tegra-se", NULL, &tegra_clk_cbus, "se", 0, 0),
+ SHARED_CLK("cap.cbus", "cap.cbus", NULL, &tegra_clk_cbus, NULL, 0, SHARED_CEILING),
+ SHARED_CLK("floor.cbus", "floor.cbus", NULL, &tegra_clk_cbus, NULL, 0, 0),
+};
+
+#define CLK_DUPLICATE(_name, _dev, _con) \
+ { \
+ .name = _name, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ }
+
+/* Some clocks may be used by different drivers depending on the board
+ * configuration. List those here to register them twice in the clock lookup
+ * table under two names.
+ */
+struct clk_duplicate tegra_clk_duplicates[] = {
+ CLK_DUPLICATE("usbd", "utmip-pad", NULL),
+ CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL),
+ CLK_DUPLICATE("usbd", "tegra-otg", NULL),
+ CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"),
+ CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"),
+ CLK_DUPLICATE("dsib", "tegradc.0", "dsib"),
+ CLK_DUPLICATE("dsia", "tegradc.1", "dsia"),
+ CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL),
+ CLK_DUPLICATE("cop", "tegra-avp", "cop"),
+ CLK_DUPLICATE("bsev", "tegra-avp", "bsev"),
+ CLK_DUPLICATE("cop", "nvavp", "cop"),
+ CLK_DUPLICATE("bsev", "nvavp", "bsev"),
+ CLK_DUPLICATE("vde", "tegra-aes", "vde"),
+ CLK_DUPLICATE("bsea", "tegra-aes", "bsea"),
+ CLK_DUPLICATE("bsea", "nvavp", "bsea"),
+ CLK_DUPLICATE("cml1", "tegra_sata_cml", NULL),
+ CLK_DUPLICATE("cml0", "tegra_pcie", "cml"),
+ CLK_DUPLICATE("pciex", "tegra_pcie", "pciex"),
+ CLK_DUPLICATE("i2c1", "tegra-i2c-slave.0", NULL),
+ CLK_DUPLICATE("i2c2", "tegra-i2c-slave.1", NULL),
+ CLK_DUPLICATE("i2c3", "tegra-i2c-slave.2", NULL),
+ CLK_DUPLICATE("i2c4", "tegra-i2c-slave.3", NULL),
+ CLK_DUPLICATE("i2c5", "tegra-i2c-slave.4", NULL),
+ CLK_DUPLICATE("sbc1", "spi_slave_tegra.0", NULL),
+ CLK_DUPLICATE("sbc2", "spi_slave_tegra.1", NULL),
+ CLK_DUPLICATE("sbc3", "spi_slave_tegra.2", NULL),
+ CLK_DUPLICATE("sbc4", "spi_slave_tegra.3", NULL),
+ CLK_DUPLICATE("sbc5", "spi_slave_tegra.4", NULL),
+ CLK_DUPLICATE("sbc6", "spi_slave_tegra.5", NULL),
+ CLK_DUPLICATE("twd", "smp_twd", NULL),
+ CLK_DUPLICATE("vcp", "nvavp", "vcp"),
+ CLK_DUPLICATE("avp.sclk", "nvavp", "sclk"),
+ CLK_DUPLICATE("avp.emc", "nvavp", "emc"),
+ CLK_DUPLICATE("vde.cbus", "nvavp", "vde"),
+};
+
+struct clk *tegra_ptr_clks[] = {
+ &tegra_clk_32k,
+ &tegra_clk_m,
+ &tegra_clk_m_div2,
+ &tegra_clk_m_div4,
+ &tegra_pll_ref,
+ &tegra_pll_m,
+ &tegra_pll_m_out1,
+ &tegra_pll_c,
+ &tegra_pll_c_out1,
+ &tegra_pll_p,
+ &tegra_pll_p_out1,
+ &tegra_pll_p_out2,
+ &tegra_pll_p_out3,
+ &tegra_pll_p_out4,
+ &tegra_pll_a,
+ &tegra_pll_a_out0,
+ &tegra_pll_d,
+ &tegra_pll_d_out0,
+ &tegra_pll_d2,
+ &tegra_pll_d2_out0,
+ &tegra_pll_u,
+ &tegra_pll_x,
+ &tegra_pll_x_out0,
+ &tegra_pll_e,
+ &tegra_cml0_clk,
+ &tegra_cml1_clk,
+ &tegra_pciex_clk,
+ &tegra_clk_cclk_g,
+ &tegra_clk_cclk_lp,
+ &tegra_clk_sclk,
+ &tegra_clk_hclk,
+ &tegra_clk_pclk,
+ &tegra_clk_virtual_cpu_g,
+ &tegra_clk_virtual_cpu_lp,
+ &tegra_clk_cpu_cmplx,
+ &tegra_clk_blink,
+ &tegra_clk_cop,
+ &tegra_clk_sbus_cmplx,
+ &tegra_clk_emc,
+ &tegra3_clk_twd,
+ &tegra_clk_emc_bridge,
+ &tegra_clk_cbus,
+};
+
+static bool tegra3_clk_is_parent_allowed(struct clk *c, struct clk *p)
+{
+ if (c->flags & PERIPH_ON_CBUS)
+ return p != &tegra_pll_m;
+ return true;
+}
+
+static void tegra3_init_one_clock(struct clk *c)
+{
+ clk_init(c);
+ INIT_LIST_HEAD(&c->shared_bus_list);
+ if (!c->lookup.dev_id && !c->lookup.con_id)
+ c->lookup.con_id = c->name;
+ c->lookup.clk = c;
+ clkdev_add(&c->lookup);
+}
+
+/*
+ * Emergency throttle of G-CPU by setting G-super clock skipper underneath
+ * clock framework, dvfs, and cpufreq driver s/w layers. Can be called in
+ * ISR context for EDP events. When releasing throttle, LP-divider is cleared
+ * just in case it was set as a result of save/restore operations across
+ * cluster switch (should not happen)
+ */
+void tegra_edp_throttle_cpu_now(u8 factor)
+{
+ if (factor > 1) {
+ if (!is_lp_cluster())
+ tegra3_super_clk_skipper_update(
+ &tegra_clk_cclk_g, 0, factor - 1);
+ } else {
+ tegra3_super_clk_skipper_update(&tegra_clk_cclk_g, 0, 0);
+ tegra3_super_clk_skipper_update(&tegra_clk_cclk_lp, 0, 0);
+ }
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+/*
+ * Frequency table index must be sequential starting at 0 and frequencies
+ * must be ascending.
+ */
+
+static struct cpufreq_frequency_table freq_table_300MHz[] = {
+ { 0, 204000 },
+ { 1, 300000 },
+ { 2, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table freq_table_1p0GHz[] = {
+ { 0, 102000 },
+ { 1, 204000 },
+ { 2, 312000 },
+ { 3, 456000 },
+ { 4, 608000 },
+ { 5, 760000 },
+ { 6, 816000 },
+ { 7, 912000 },
+ { 8, 1000000 },
+ { 9, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table freq_table_1p3GHz[] = {
+ { 0, 102000 },
+ { 1, 204000 },
+ { 2, 340000 },
+ { 3, 475000 },
+ { 4, 640000 },
+ { 5, 760000 },
+ { 6, 880000 },
+ { 7, 1000000 },
+ { 8, 1100000 },
+ { 9, 1200000 },
+ {10, 1300000 },
+ {11, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table freq_table_1p4GHz[] = {
+ { 0, 102000 },
+ { 1, 204000 },
+ { 2, 370000 },
+ { 3, 475000 },
+ { 4, 620000 },
+ { 5, 760000 },
+ { 6, 880000 },
+ { 7, 1000000 },
+ { 8, 1100000 },
+ { 9, 1200000 },
+ {10, 1300000 },
+ {11, 1400000 },
+ {12, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table freq_table_1p5GHz[] = {
+ { 0, 102000 },
+ { 1, 204000 },
+ { 2, 340000 },
+ { 3, 475000 },
+ { 4, 640000 },
+ { 5, 760000 },
+ { 6, 880000 },
+ { 7, 1000000 },
+ { 8, 1100000 },
+ { 9, 1200000 },
+ {10, 1300000 },
+ {11, 1400000 },
+ {12, 1500000 },
+ {13, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table freq_table_1p7GHz[] = {
+ { 0, 102000 },
+ { 1, 204000 },
+ { 2, 370000 },
+ { 3, 475000 },
+ { 4, 620000 },
+ { 5, 800000 },
+ { 6, 1000000 },
+ { 7, 1150000 },
+ { 8, 1300000 },
+ { 9, 1400000 },
+ {10, 1500000 },
+ {11, 1600000 },
+ {12, 1700000 },
+ {13, CPUFREQ_TABLE_END },
+};
+
+static struct tegra_cpufreq_table_data cpufreq_tables[] = {
+ { freq_table_300MHz, 0, 1 },
+ { freq_table_1p0GHz, 1, 7, 2},
+ { freq_table_1p3GHz, 1, 9, 2},
+ { freq_table_1p4GHz, 1, 10, 2},
+ { freq_table_1p5GHz, 1, 11, 2},
+ { freq_table_1p7GHz, 1, 11, 2},
+};
+
+static int clip_cpu_rate_limits(
+ struct cpufreq_frequency_table *freq_table,
+ struct cpufreq_policy *policy,
+ struct clk *cpu_clk_g,
+ struct clk *cpu_clk_lp)
+{
+ int idx, ret;
+
+ /* clip CPU G mode maximum frequency to table entry */
+ ret = cpufreq_frequency_table_target(policy, freq_table,
+ cpu_clk_g->max_rate / 1000, CPUFREQ_RELATION_H, &idx);
+ if (ret) {
+ pr_err("%s: G CPU max rate %lu outside of cpufreq table",
+ __func__, cpu_clk_g->max_rate);
+ return ret;
+ }
+ cpu_clk_g->max_rate = freq_table[idx].frequency * 1000;
+ if (cpu_clk_g->max_rate < cpu_clk_lp->max_rate) {
+ pr_err("%s: G CPU max rate %lu is below LP CPU max rate %lu",
+ __func__, cpu_clk_g->max_rate, cpu_clk_lp->max_rate);
+ return -EINVAL;
+ }
+
+ /* clip CPU LP mode maximum frequency to table entry, and
+ set CPU G mode minimum frequency one table step below */
+ ret = cpufreq_frequency_table_target(policy, freq_table,
+ cpu_clk_lp->max_rate / 1000, CPUFREQ_RELATION_H, &idx);
+ if (ret || !idx) {
+ pr_err("%s: LP CPU max rate %lu %s of cpufreq table", __func__,
+ cpu_clk_lp->max_rate, ret ? "outside" : "at the bottom");
+ return ret;
+ }
+ cpu_clk_lp->max_rate = freq_table[idx].frequency * 1000;
+ cpu_clk_g->min_rate = freq_table[idx-1].frequency * 1000;
+ return 0;
+}
+
+struct tegra_cpufreq_table_data *tegra_cpufreq_table_get(void)
+{
+ int i, ret;
+ unsigned long selection_rate;
+ struct clk *cpu_clk_g = tegra_get_clock_by_name("cpu_g");
+ struct clk *cpu_clk_lp = tegra_get_clock_by_name("cpu_lp");
+
+ /* For table selection use top cpu_g rate in dvfs ladder; selection
+ rate may exceed cpu max_rate (e.g., because of edp limitations on
+ cpu voltage) - in any case max_rate will be clipped to the table */
+ if (cpu_clk_g->dvfs && cpu_clk_g->dvfs->num_freqs)
+ selection_rate =
+ cpu_clk_g->dvfs->freqs[cpu_clk_g->dvfs->num_freqs - 1];
+ else
+ selection_rate = cpu_clk_g->max_rate;
+
+ for (i = 0; i < ARRAY_SIZE(cpufreq_tables); i++) {
+ struct cpufreq_policy policy;
+ policy.cpu = 0; /* any on-line cpu */
+ ret = cpufreq_frequency_table_cpuinfo(
+ &policy, cpufreq_tables[i].freq_table);
+ if (!ret) {
+ if ((policy.max * 1000) == selection_rate) {
+ ret = clip_cpu_rate_limits(
+ cpufreq_tables[i].freq_table,
+ &policy, cpu_clk_g, cpu_clk_lp);
+ if (!ret)
+ return &cpufreq_tables[i];
+ }
+ }
+ }
+ WARN(1, "%s: No cpufreq table matching G & LP cpu ranges", __func__);
+ return NULL;
+}
+
+/* On DDR3 platforms there is an implicit dependency in this mapping: when cpu
+ * exceeds max dvfs level for LP CPU clock at TEGRA_EMC_BRIDGE_MVOLTS_MIN, the
+ * respective emc rate should be above TEGRA_EMC_BRIDGE_RATE_MIN
+ */
+/* FIXME: explicitly check this dependency */
+unsigned long tegra_emc_to_cpu_ratio(unsigned long cpu_rate)
+{
+ static unsigned long emc_max_rate = 0;
+
+ if (emc_max_rate == 0)
+ emc_max_rate = clk_round_rate(
+ tegra_get_clock_by_name("emc"), ULONG_MAX);
+
+ /* Vote on memory bus frequency based on cpu frequency;
+ cpu rate is in kHz, emc rate is in Hz */
+ if (cpu_rate >= 750000)
+ return emc_max_rate; /* cpu >= 750 MHz, emc max */
+ else if (cpu_rate >= 450000)
+ return emc_max_rate/2; /* cpu >= 500 MHz, emc max/2 */
+ else if (cpu_rate >= 250000)
+ return 100000000; /* cpu >= 250 MHz, emc 100 MHz */
+ else
+ return 0; /* emc min */
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM +
+ PERIPH_CLK_SOURCE_NUM + 22];
+
+static int tegra_clk_suspend(void)
+{
+ unsigned long off;
+ u32 *ctx = clk_rst_suspend;
+
+ *ctx++ = clk_readl(OSC_CTRL) & OSC_CTRL_MASK;
+ *ctx++ = clk_readl(CPU_SOFTRST_CTRL);
+ *ctx++ = clk_readl(tegra_pll_c.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_c.reg + PLL_MISC(&tegra_pll_c));
+ *ctx++ = clk_readl(tegra_pll_a.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_a.reg + PLL_MISC(&tegra_pll_a));
+ *ctx++ = clk_readl(tegra_pll_d.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_d.reg + PLL_MISC(&tegra_pll_d));
+ *ctx++ = clk_readl(tegra_pll_d2.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_d2.reg + PLL_MISC(&tegra_pll_d2));
+
+ *ctx++ = clk_readl(tegra_pll_m_out1.reg);
+ *ctx++ = clk_readl(tegra_pll_a_out0.reg);
+ *ctx++ = clk_readl(tegra_pll_c_out1.reg);
+
+ *ctx++ = clk_readl(tegra_clk_cclk_g.reg);
+ *ctx++ = clk_readl(tegra_clk_cclk_g.reg + SUPER_CLK_DIVIDER);
+ *ctx++ = clk_readl(tegra_clk_cclk_lp.reg);
+ *ctx++ = clk_readl(tegra_clk_cclk_lp.reg + SUPER_CLK_DIVIDER);
+
+ *ctx++ = clk_readl(tegra_clk_sclk.reg);
+ *ctx++ = clk_readl(tegra_clk_sclk.reg + SUPER_CLK_DIVIDER);
+ *ctx++ = clk_readl(tegra_clk_pclk.reg);
+
+ for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC;
+ off += 4) {
+ if (off == PERIPH_CLK_SOURCE_EMC)
+ continue;
+ *ctx++ = clk_readl(off);
+ }
+ for (off = PERIPH_CLK_SOURCE_G3D2; off <= PERIPH_CLK_SOURCE_SE;
+ off+=4) {
+ *ctx++ = clk_readl(off);
+ }
+ for (off = AUDIO_DLY_CLK; off <= AUDIO_SYNC_CLK_SPDIF; off+=4) {
+ *ctx++ = clk_readl(off);
+ }
+
+ *ctx++ = clk_readl(RST_DEVICES_L);
+ *ctx++ = clk_readl(RST_DEVICES_H);
+ *ctx++ = clk_readl(RST_DEVICES_U);
+ *ctx++ = clk_readl(RST_DEVICES_V);
+ *ctx++ = clk_readl(RST_DEVICES_W);
+
+ *ctx++ = clk_readl(CLK_OUT_ENB_L);
+ *ctx++ = clk_readl(CLK_OUT_ENB_H);
+ *ctx++ = clk_readl(CLK_OUT_ENB_U);
+ *ctx++ = clk_readl(CLK_OUT_ENB_V);
+ *ctx++ = clk_readl(CLK_OUT_ENB_W);
+
+ *ctx++ = clk_readl(MISC_CLK_ENB);
+ *ctx++ = clk_readl(CLK_MASK_ARM);
+
+ return 0;
+}
+
+static void tegra_clk_resume(void)
+{
+ unsigned long off;
+ const u32 *ctx = clk_rst_suspend;
+ u32 val;
+ u32 pllc_base;
+ u32 plla_base;
+ u32 plld_base;
+ u32 plld2_base;
+ struct clk *p;
+
+ val = clk_readl(OSC_CTRL) & ~OSC_CTRL_MASK;
+ val |= *ctx++;
+ clk_writel(val, OSC_CTRL);
+ clk_writel(*ctx++, CPU_SOFTRST_CTRL);
+
+ /* Since we are going to reset devices in this function, pllc/a is
+ * required to be enabled. The actual value will be restore back later.
+ */
+ pllc_base = *ctx++;
+ clk_writel(pllc_base | PLL_BASE_ENABLE, tegra_pll_c.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_c.reg + PLL_MISC(&tegra_pll_c));
+
+ plla_base = *ctx++;
+ clk_writel(plla_base | PLL_BASE_ENABLE, tegra_pll_a.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_a.reg + PLL_MISC(&tegra_pll_a));
+
+ plld_base = *ctx++;
+ clk_writel(plld_base | PLL_BASE_ENABLE, tegra_pll_d.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_d.reg + PLL_MISC(&tegra_pll_d));
+
+ plld2_base = *ctx++;
+ clk_writel(plld2_base | PLL_BASE_ENABLE, tegra_pll_d2.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_d2.reg + PLL_MISC(&tegra_pll_d2));
+
+ udelay(1000);
+
+ clk_writel(*ctx++, tegra_pll_m_out1.reg);
+ clk_writel(*ctx++, tegra_pll_a_out0.reg);
+ clk_writel(*ctx++, tegra_pll_c_out1.reg);
+
+ clk_writel(*ctx++, tegra_clk_cclk_g.reg);
+ clk_writel(*ctx++, tegra_clk_cclk_g.reg + SUPER_CLK_DIVIDER);
+ clk_writel(*ctx++, tegra_clk_cclk_lp.reg);
+ clk_writel(*ctx++, tegra_clk_cclk_lp.reg + SUPER_CLK_DIVIDER);
+
+ clk_writel(*ctx++, tegra_clk_sclk.reg);
+ clk_writel(*ctx++, tegra_clk_sclk.reg + SUPER_CLK_DIVIDER);
+ clk_writel(*ctx++, tegra_clk_pclk.reg);
+
+ /* enable all clocks before configuring clock sources */
+ clk_writel(0xfdfffff1ul, CLK_OUT_ENB_L);
+ clk_writel(0xfefff7f7ul, CLK_OUT_ENB_H);
+ clk_writel(0x75f79bfful, CLK_OUT_ENB_U);
+ clk_writel(0xfffffffful, CLK_OUT_ENB_V);
+ clk_writel(0x00003ffful, CLK_OUT_ENB_W);
+ wmb();
+
+ for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC;
+ off += 4) {
+ if (off == PERIPH_CLK_SOURCE_EMC)
+ continue;
+ clk_writel(*ctx++, off);
+ }
+ for (off = PERIPH_CLK_SOURCE_G3D2; off <= PERIPH_CLK_SOURCE_SE;
+ off += 4) {
+ clk_writel(*ctx++, off);
+ }
+ for (off = AUDIO_DLY_CLK; off <= AUDIO_SYNC_CLK_SPDIF; off+=4) {
+ clk_writel(*ctx++, off);
+ }
+ wmb();
+
+ clk_writel(*ctx++, RST_DEVICES_L);
+ clk_writel(*ctx++, RST_DEVICES_H);
+ clk_writel(*ctx++, RST_DEVICES_U);
+
+ /* For LP0 resume, don't reset lpcpu, since we are running from it */
+ val = *ctx++;
+ val &= ~RST_DEVICES_V_SWR_CPULP_RST_DIS;
+ clk_writel(val, RST_DEVICES_V);
+
+ clk_writel(*ctx++, RST_DEVICES_W);
+ wmb();
+
+ clk_writel(*ctx++, CLK_OUT_ENB_L);
+ clk_writel(*ctx++, CLK_OUT_ENB_H);
+ clk_writel(*ctx++, CLK_OUT_ENB_U);
+
+ /* For LP0 resume, clk to lpcpu is required to be on */
+ val = *ctx++;
+ val |= CLK_OUT_ENB_V_CLK_ENB_CPULP_EN;
+ clk_writel(val, CLK_OUT_ENB_V);
+
+ clk_writel(*ctx++, CLK_OUT_ENB_W);
+ wmb();
+
+ clk_writel(*ctx++, MISC_CLK_ENB);
+ clk_writel(*ctx++, CLK_MASK_ARM);
+
+ /* Restore back the actual pllc/a value */
+ /* FIXME: need to root cause why pllc is required to be on
+ * clk_writel(pllc_base, tegra_pll_c.reg + PLL_BASE);
+ */
+ clk_writel(plla_base, tegra_pll_a.reg + PLL_BASE);
+ clk_writel(plld_base, tegra_pll_d.reg + PLL_BASE);
+ clk_writel(plld2_base, tegra_pll_d2.reg + PLL_BASE);
+
+ /* Since EMC clock is not restored, and may not preserve parent across
+ suspend, update current state, and mark EMC DFS as out of sync */
+ p = tegra_clk_emc.parent;
+ tegra3_periph_clk_init(&tegra_clk_emc);
+
+ if (p != tegra_clk_emc.parent) {
+ /* FIXME: old parent is left enabled here even if EMC was its
+ only child before suspend (never happens on Tegra3) */
+ pr_debug("EMC parent(refcount) across suspend: %s(%d) : %s(%d)",
+ p->name, p->refcnt, tegra_clk_emc.parent->name,
+ tegra_clk_emc.parent->refcnt);
+
+ BUG_ON(!p->refcnt);
+ p->refcnt--;
+
+ /* the new parent is enabled by low level code, but ref count
+ need to be updated up to the root */
+ p = tegra_clk_emc.parent;
+ while (p && ((p->refcnt++) == 0))
+ p = p->parent;
+ }
+ tegra_emc_timing_invalidate();
+
+ tegra3_pll_clk_init(&tegra_pll_u); /* Re-init utmi parameters */
+ tegra3_pll_clk_init(&tegra_pll_p); /* Fire a bug if not restored */
+}
+#else
+#define tegra_clk_suspend NULL
+#define tegra_clk_resume NULL
+#endif
+
+static struct syscore_ops tegra_clk_syscore_ops = {
+ .suspend = tegra_clk_suspend,
+ .resume = tegra_clk_resume,
+};
+
+void __init tegra_soc_init_clocks(void)
+{
+ int i;
+ struct clk *c;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++)
+ tegra3_init_one_clock(tegra_ptr_clks[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++)
+ tegra3_init_one_clock(&tegra_list_clks[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) {
+ c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name);
+ if (!c) {
+ pr_err("%s: Unknown duplicate clock %s\n", __func__,
+ tegra_clk_duplicates[i].name);
+ continue;
+ }
+
+ tegra_clk_duplicates[i].lookup.clk = c;
+ clkdev_add(&tegra_clk_duplicates[i].lookup);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_sync_source_list); i++)
+ tegra3_init_one_clock(&tegra_sync_source_list[i]);
+ for (i = 0; i < ARRAY_SIZE(tegra_clk_audio_list); i++)
+ tegra3_init_one_clock(&tegra_clk_audio_list[i]);
+ for (i = 0; i < ARRAY_SIZE(tegra_clk_audio_2x_list); i++)
+ tegra3_init_one_clock(&tegra_clk_audio_2x_list[i]);
+
+ init_clk_out_mux();
+ for (i = 0; i < ARRAY_SIZE(tegra_clk_out_list); i++)
+ tegra3_init_one_clock(&tegra_clk_out_list[i]);
+
+ emc_bridge = &tegra_clk_emc_bridge;
+
+ /* Initialize to default */
+ tegra_init_cpu_edp_limits(0);
+
+ register_syscore_ops(&tegra_clk_syscore_ops);
+}
diff --git a/arch/arm/mach-tegra/tegra3_dvfs.c b/arch/arm/mach-tegra/tegra3_dvfs.c
new file mode 100644
index 000000000000..f1f2e8fbf007
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_dvfs.c
@@ -0,0 +1,893 @@
+/*
+ * arch/arm/mach-tegra/tegra3_dvfs.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/kobject.h>
+#include <linux/err.h>
+
+#include "clock.h"
+#include "dvfs.h"
+#include "fuse.h"
+#include "board.h"
+#include "tegra3_emc.h"
+
+static bool tegra_dvfs_cpu_disabled;
+static bool tegra_dvfs_core_disabled;
+
+static const int cpu_millivolts[MAX_DVFS_FREQS] =
+ {800, 825, 850, 875, 900, 912, 925, 950, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1200, 1237};
+
+static const int core_millivolts[MAX_DVFS_FREQS] =
+ {1000, 1050, 1100, 1150, 1200, 1250, 1300};
+
+#define KHZ 1000
+#define MHZ 1000000
+
+/* VDD_CPU >= (VDD_CORE - cpu_below_core) */
+/* VDD_CORE >= min_level(VDD_CPU), see tegra3_get_core_floor_mv() below */
+#define VDD_CPU_BELOW_VDD_CORE 300
+static int cpu_below_core = VDD_CPU_BELOW_VDD_CORE;
+
+#define VDD_SAFE_STEP 100
+
+static struct dvfs_rail tegra3_dvfs_rail_vdd_cpu = {
+ .reg_id = "vdd_cpu",
+ .max_millivolts = 1250,
+ .min_millivolts = 850,
+ .step = VDD_SAFE_STEP,
+ .jmp_to_zero = true,
+};
+
+static struct dvfs_rail tegra3_dvfs_rail_vdd_core = {
+ .reg_id = "vdd_core",
+ .max_millivolts = 1300,
+ .min_millivolts = 1000,
+ .step = VDD_SAFE_STEP,
+};
+
+static struct dvfs_rail *tegra3_dvfs_rails[] = {
+ &tegra3_dvfs_rail_vdd_cpu,
+ &tegra3_dvfs_rail_vdd_core,
+};
+
+static int tegra3_get_core_floor_mv(int cpu_mv)
+{
+ if (cpu_mv <= 825)
+ return 1000;
+ if (cpu_mv <= 975)
+ return 1100;
+ if ((tegra_cpu_speedo_id() < 2) ||
+ (tegra_cpu_speedo_id() == 4))
+ return 1200;
+ if (cpu_mv <= 1075)
+ return 1200;
+ if (cpu_mv <= 1250)
+ return 1300;
+ BUG();
+}
+
+/* vdd_core must be >= min_level as a function of vdd_cpu */
+static int tegra3_dvfs_rel_vdd_cpu_vdd_core(struct dvfs_rail *vdd_cpu,
+ struct dvfs_rail *vdd_core)
+{
+ int core_floor = max(vdd_cpu->new_millivolts, vdd_cpu->millivolts);
+ core_floor = tegra3_get_core_floor_mv(core_floor);
+ return max(vdd_core->new_millivolts, core_floor);
+}
+
+/* vdd_cpu must be >= (vdd_core - cpu_below_core) */
+static int tegra3_dvfs_rel_vdd_core_vdd_cpu(struct dvfs_rail *vdd_core,
+ struct dvfs_rail *vdd_cpu)
+{
+ int cpu_floor;
+
+ if (vdd_cpu->new_millivolts == 0)
+ return 0; /* If G CPU is off, core relations can be ignored */
+
+ cpu_floor = max(vdd_core->new_millivolts, vdd_core->millivolts) -
+ cpu_below_core;
+ return max(vdd_cpu->new_millivolts, cpu_floor);
+}
+
+static struct dvfs_relationship tegra3_dvfs_relationships[] = {
+ {
+ .from = &tegra3_dvfs_rail_vdd_cpu,
+ .to = &tegra3_dvfs_rail_vdd_core,
+ .solve = tegra3_dvfs_rel_vdd_cpu_vdd_core,
+ .solved_at_nominal = true,
+ },
+ {
+ .from = &tegra3_dvfs_rail_vdd_core,
+ .to = &tegra3_dvfs_rail_vdd_cpu,
+ .solve = tegra3_dvfs_rel_vdd_core_vdd_cpu,
+ },
+};
+
+#define CPU_DVFS(_clk_name, _speedo_id, _process_id, _mult, _freqs...) \
+ { \
+ .clk_name = _clk_name, \
+ .speedo_id = _speedo_id, \
+ .process_id = _process_id, \
+ .freqs = {_freqs}, \
+ .freqs_mult = _mult, \
+ .millivolts = cpu_millivolts, \
+ .auto_dvfs = true, \
+ .dvfs_rail = &tegra3_dvfs_rail_vdd_cpu, \
+ }
+
+static struct dvfs cpu_dvfs_table[] = {
+ /* Cpu voltages (mV): 800, 825, 850, 875, 900, 912, 925, 950, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1200, 1237 */
+ CPU_DVFS("cpu_g", 0, 0, MHZ, 1, 1, 684, 684, 817, 817, 817, 817, 1026, 1102, 1149, 1187, 1225, 1282, 1300),
+ CPU_DVFS("cpu_g", 0, 1, MHZ, 1, 1, 807, 807, 948, 948, 948, 948, 1117, 1171, 1206, 1300),
+ CPU_DVFS("cpu_g", 0, 2, MHZ, 1, 1, 883, 883, 1039, 1039, 1039, 1039, 1178, 1206, 1300),
+ CPU_DVFS("cpu_g", 0, 3, MHZ, 1, 1, 931, 931, 1102, 1102, 1102, 1102, 1216, 1300),
+
+ CPU_DVFS("cpu_g", 1, 0, MHZ, 1, 1, 550, 550, 680, 680, 680, 680, 820, 970, 1040, 1080, 1150, 1200, 1280, 1300),
+ CPU_DVFS("cpu_g", 1, 1, MHZ, 1, 1, 650, 650, 820, 820, 820, 820, 1000, 1060, 1100, 1200, 1300),
+ CPU_DVFS("cpu_g", 1, 2, MHZ, 1, 1, 720, 720, 880, 880, 880, 880, 1090, 1180, 1200, 1300),
+ CPU_DVFS("cpu_g", 1, 3, MHZ, 1, 1, 800, 800, 1000, 1000, 1000, 1000, 1180, 1230, 1300),
+
+ CPU_DVFS("cpu_g", 2, 1, MHZ, 1, 1, 650, 650, 820, 820, 820, 820, 1000, 1060, 1100, 1200, 1250, 1300, 1330, 1400),
+ CPU_DVFS("cpu_g", 2, 2, MHZ, 1, 1, 720, 720, 880, 880, 880, 880, 1090, 1180, 1200, 1300, 1310, 1350, 1400),
+ CPU_DVFS("cpu_g", 2, 3, MHZ, 1, 1, 800, 800, 1000, 1000, 1000, 1000, 1180, 1230, 1300, 1320, 1350, 1400),
+
+ CPU_DVFS("cpu_g", 3, 1, MHZ, 1, 1, 650, 650, 820, 820, 820, 820, 1000, 1060, 1100, 1200, 1250, 1300, 1330, 1400),
+ CPU_DVFS("cpu_g", 3, 2, MHZ, 1, 1, 720, 720, 880, 880, 880, 880, 1090, 1180, 1200, 1300, 1310, 1350, 1400),
+ CPU_DVFS("cpu_g", 3, 3, MHZ, 1, 1, 800, 800, 1000, 1000, 1000, 1000, 1180, 1230, 1300, 1320, 1350, 1400),
+
+ CPU_DVFS("cpu_g", 4, 0, MHZ, 1, 1, 550, 550, 680, 680, 680, 680, 820, 970, 1040, 1080, 1150, 1200, 1280, 1350, 1400, 1500),
+ CPU_DVFS("cpu_g", 4, 1, MHZ, 1, 1, 650, 650, 820, 820, 820, 820, 1000, 1060, 1100, 1200, 1250, 1300, 1360, 1400, 1500),
+ CPU_DVFS("cpu_g", 4, 2, MHZ, 1, 1, 720, 720, 880, 880, 880, 880, 1090, 1180, 1200, 1300, 1310, 1380, 1400, 1500),
+ CPU_DVFS("cpu_g", 4, 3, MHZ, 1, 1, 800, 800, 1000, 1000, 1000, 1000, 1180, 1230, 1300, 1330, 1380, 1400, 1500),
+
+ CPU_DVFS("cpu_g", 5, 3, MHZ, 1, 1, 800, 800, 1000, 1000, 1000, 1000, 1180, 1230, 1300, 1330, 1380, 1400, 1470, 1500, 1540, 1700),
+ CPU_DVFS("cpu_g", 5, 4, MHZ, 1, 1, 840, 840, 1000, 1000, 1000, 1000, 1200, 1280, 1330, 1380, 1400, 1480, 1500, 1520, 1700),
+
+ CPU_DVFS("cpu_g", 6, 3, MHZ, 1, 1, 800, 800, 1000, 1000, 1000, 1000, 1180, 1230, 1300, 1330, 1380, 1400, 1470, 1500, 1540, 1700),
+ CPU_DVFS("cpu_g", 6, 4, MHZ, 1, 1, 840, 840, 1000, 1000, 1000, 1000, 1200, 1280, 1330, 1380, 1400, 1480, 1500, 1520, 1700),
+
+ CPU_DVFS("cpu_g", 7, 0, MHZ, 1, 1, 550, 550, 680, 680, 680, 680, 820, 970, 1040, 1080, 1150, 1200, 1280, 1300),
+ CPU_DVFS("cpu_g", 7, 1, MHZ, 1, 1, 650, 650, 820, 820, 820, 820, 1000, 1060, 1100, 1200, 1300),
+ CPU_DVFS("cpu_g", 7, 2, MHZ, 1, 1, 720, 720, 880, 880, 880, 880, 1090, 1180, 1200, 1300),
+ CPU_DVFS("cpu_g", 7, 3, MHZ, 1, 1, 800, 800, 1000, 1000, 1000, 1000, 1180, 1200, 1300),
+ CPU_DVFS("cpu_g", 7, 4, MHZ, 1, 1, 840, 840, 1000, 1000, 1000, 1000, 1200, 1300),
+
+ CPU_DVFS("cpu_g", 8, 0, MHZ, 1, 1, 550, 550, 680, 680, 680, 680, 820, 970, 1040, 1080, 1150, 1200, 1280, 1300),
+ CPU_DVFS("cpu_g", 8, 1, MHZ, 1, 1, 650, 650, 820, 820, 820, 820, 1000, 1060, 1100, 1200, 1300),
+ CPU_DVFS("cpu_g", 8, 2, MHZ, 1, 1, 720, 720, 880, 880, 880, 880, 1090, 1180, 1200, 1300),
+ CPU_DVFS("cpu_g", 8, 3, MHZ, 1, 1, 800, 800, 1000, 1000, 1000, 1000, 1180, 1200, 1300),
+ CPU_DVFS("cpu_g", 8, 4, MHZ, 1, 1, 840, 840, 1000, 1000, 1000, 1000, 1200, 1300),
+
+ CPU_DVFS("cpu_g", 9, -1, MHZ, 1, 1, 1, 1, 1, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900),
+ CPU_DVFS("cpu_g", 10, -1, MHZ, 1, 1, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900),
+ CPU_DVFS("cpu_g", 11, -1, MHZ, 1, 1, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600),
+
+ /*
+ * "Safe entry" to be used when no match for chip speedo, process
+ * corner is found (just to boot at low rate); must be the last one
+ */
+ CPU_DVFS("cpu_g", -1, -1, MHZ, 1, 1, 216, 216, 300),
+};
+
+#define CORE_DVFS(_clk_name, _speedo_id, _auto, _mult, _freqs...) \
+ { \
+ .clk_name = _clk_name, \
+ .speedo_id = _speedo_id, \
+ .process_id = -1, \
+ .freqs = {_freqs}, \
+ .freqs_mult = _mult, \
+ .millivolts = core_millivolts, \
+ .auto_dvfs = _auto, \
+ .dvfs_rail = &tegra3_dvfs_rail_vdd_core, \
+ }
+
+static struct dvfs core_dvfs_table[] = {
+ /* Core voltages (mV): 1000, 1050, 1100, 1150, 1200, 1250, 1300 */
+ /* Clock limits for internal blocks, PLLs */
+ CORE_DVFS("cpu_lp", 0, 1, KHZ, 294000, 342000, 427000, 475000, 500000, 500000, 500000),
+ CORE_DVFS("cpu_lp", 1, 1, KHZ, 294000, 342000, 427000, 475000, 500000, 500000, 500000),
+ CORE_DVFS("cpu_lp", 2, 1, KHZ, 295000, 370000, 428000, 475000, 513000, 579000, 620000),
+ CORE_DVFS("cpu_lp", 3, 1, KHZ, 1, 1, 1, 1, 1, 450000, 450000),
+
+ CORE_DVFS("emc", 0, 1, KHZ, 266500, 266500, 266500, 266500, 533000, 533000, 533000),
+ CORE_DVFS("emc", 1, 1, KHZ, 408000, 408000, 408000, 408000, 667000, 667000, 667000),
+ CORE_DVFS("emc", 2, 1, KHZ, 408000, 408000, 408000, 408000, 667000, 667000, 800000),
+ CORE_DVFS("emc", 3, 1, KHZ, 1, 1, 1, 1, 1, 625000, 625000),
+
+ CORE_DVFS("sbus", 0, 1, KHZ, 136000, 164000, 191000, 216000, 216000, 216000, 216000),
+ CORE_DVFS("sbus", 1, 1, KHZ, 205000, 205000, 227000, 227000, 267000, 267000, 267000),
+ CORE_DVFS("sbus", 2, 1, KHZ, 205000, 205000, 227000, 227000, 267000, 334000, 334000),
+ CORE_DVFS("sbus", 3, 1, KHZ, 1, 1, 1, 1, 1, 216000, 216000),
+
+ CORE_DVFS("vi", 0, 1, KHZ, 216000, 285000, 300000, 300000, 300000, 300000, 300000),
+ CORE_DVFS("vi", 1, 1, KHZ, 216000, 267000, 300000, 371000, 409000, 409000, 409000),
+ CORE_DVFS("vi", 2, 1, KHZ, 219000, 267000, 300000, 371000, 409000, 425000, 425000),
+ CORE_DVFS("vi", 3, 1, KHZ, 1, 1, 1, 1, 1, 300000, 300000),
+
+ CORE_DVFS("vde", 0, 1, KHZ, 228000, 275000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("mpe", 0, 1, KHZ, 234000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("2d", 0, 1, KHZ, 267000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("epp", 0, 1, KHZ, 267000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("3d", 0, 1, KHZ, 234000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("3d2", 0, 1, KHZ, 234000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("se", 0, 1, KHZ, 267000, 285000, 332000, 380000, 416000, 416000, 416000),
+
+ CORE_DVFS("vde", 1, 1, KHZ, 228000, 275000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("mpe", 1, 1, KHZ, 234000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("2d", 1, 1, KHZ, 267000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("epp", 1, 1, KHZ, 267000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("3d", 1, 1, KHZ, 234000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("3d2", 1, 1, KHZ, 234000, 285000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("se", 1, 1, KHZ, 267000, 285000, 332000, 380000, 416000, 416000, 416000),
+
+ CORE_DVFS("vde", 2, 1, KHZ, 247000, 304000, 352000, 400000, 437000, 484000, 520000),
+ CORE_DVFS("mpe", 2, 1, KHZ, 247000, 304000, 361000, 408000, 446000, 484000, 520000),
+ CORE_DVFS("2d", 2, 1, KHZ, 267000, 304000, 361000, 408000, 446000, 484000, 520000),
+ CORE_DVFS("epp", 2, 1, KHZ, 267000, 304000, 361000, 408000, 446000, 484000, 520000),
+ CORE_DVFS("3d", 2, 1, KHZ, 247000, 304000, 361000, 408000, 446000, 484000, 520000),
+ CORE_DVFS("3d2", 2, 1, KHZ, 247000, 304000, 361000, 408000, 446000, 484000, 520000),
+ CORE_DVFS("se", 2, 1, KHZ, 267000, 304000, 361000, 408000, 446000, 484000, 520000),
+
+ CORE_DVFS("vde", 3, 1, KHZ, 1, 1, 1, 1, 1, 484000, 484000),
+ CORE_DVFS("mpe", 3, 1, KHZ, 1, 1, 1, 1, 1, 484000, 484000),
+ CORE_DVFS("2d", 3, 1, KHZ, 1, 1, 1, 1, 1, 484000, 484000),
+ CORE_DVFS("epp", 3, 1, KHZ, 1, 1, 1, 1, 1, 484000, 484000),
+ CORE_DVFS("3d", 3, 1, KHZ, 1, 1, 1, 1, 1, 484000, 484000),
+ CORE_DVFS("3d2", 3, 1, KHZ, 1, 1, 1, 1, 1, 484000, 484000),
+ CORE_DVFS("se", 3, 1, KHZ, 1, 1, 1, 1, 1, 650000, 650000),
+
+ CORE_DVFS("host1x", 0, 1, KHZ, 152000, 188000, 222000, 254000, 267000, 267000, 267000),
+ CORE_DVFS("host1x", 1, 1, KHZ, 152000, 188000, 222000, 254000, 267000, 267000, 267000),
+ CORE_DVFS("host1x", 2, 1, KHZ, 152000, 188000, 222000, 254000, 267000, 267000, 267000),
+ CORE_DVFS("host1x", 3, 1, KHZ, 1, 1, 1, 1, 1, 300000, 300000),
+
+ CORE_DVFS("cbus", 0, 1, KHZ, 228000, 275000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("cbus", 1, 1, KHZ, 228000, 275000, 332000, 380000, 416000, 416000, 416000),
+ CORE_DVFS("cbus", 2, 1, KHZ, 247000, 304000, 352000, 400000, 437000, 484000, 520000),
+ CORE_DVFS("cbus", 3, 1, KHZ, 484000, 484000, 484000, 484000, 484000, 484000, 484000),
+
+ CORE_DVFS("pll_c", -1, 1, KHZ, 667000, 667000, 800000, 800000, 1066000, 1066000, 1066000),
+ CORE_DVFS("pll_m", -1, 1, KHZ, 667000, 667000, 800000, 800000, 1066000, 1066000, 1066000),
+
+ /* Core voltages (mV): 1000, 1050, 1100, 1150, 1200, 1250, 1300 */
+ /* Clock limits for I/O peripherals */
+ CORE_DVFS("mipi", 0, 1, KHZ, 1, 1, 1, 1, 1, 1, 1),
+ CORE_DVFS("mipi", 1, 1, KHZ, 1, 1, 1, 1, 60000, 60000, 60000),
+ CORE_DVFS("mipi", 2, 1, KHZ, 1, 1, 1, 1, 60000, 60000, 60000),
+ CORE_DVFS("mipi", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 1),
+
+ CORE_DVFS("fuse_burn", -1, 1, KHZ, 1, 1, 1, 26000, 26000, 26000, 26000),
+ CORE_DVFS("sdmmc1",-1, 1, KHZ, 104000, 104000, 104000, 104000, 208000, 208000, 208000),
+ CORE_DVFS("sdmmc3",-1, 1, KHZ, 104000, 104000, 104000, 104000, 208000, 208000, 208000),
+ CORE_DVFS("ndflash", -1, 1, KHZ, 120000, 120000, 120000, 200000, 200000, 200000, 200000),
+
+ CORE_DVFS("nor", 0, 1, KHZ, 115000, 130000, 130000, 133000, 133000, 133000, 133000),
+ CORE_DVFS("nor", 1, 1, KHZ, 115000, 130000, 130000, 133000, 133000, 133000, 133000),
+ CORE_DVFS("nor", 2, 1, KHZ, 115000, 130000, 130000, 133000, 133000, 133000, 133000),
+ CORE_DVFS("nor", 3, 1, KHZ, 1, 1, 1, 1, 1, 108000, 108000),
+
+ CORE_DVFS("sbc1", -1, 1, KHZ, 40000, 60000, 60000, 60000, 100000, 100000, 100000),
+ CORE_DVFS("sbc2", -1, 1, KHZ, 40000, 60000, 60000, 60000, 100000, 100000, 100000),
+ CORE_DVFS("sbc3", -1, 1, KHZ, 40000, 60000, 60000, 60000, 100000, 100000, 100000),
+ CORE_DVFS("sbc4", -1, 1, KHZ, 40000, 60000, 60000, 60000, 100000, 100000, 100000),
+ CORE_DVFS("sbc5", -1, 1, KHZ, 40000, 60000, 60000, 60000, 100000, 100000, 100000),
+ CORE_DVFS("sbc6", -1, 1, KHZ, 40000, 60000, 60000, 60000, 100000, 100000, 100000),
+
+ CORE_DVFS("tvo", -1, 1, KHZ, 1, 297000, 297000, 297000, 297000, 297000, 297000),
+ CORE_DVFS("cve", -1, 1, KHZ, 1, 297000, 297000, 297000, 297000, 297000, 297000),
+ CORE_DVFS("dsia", -1, 1, KHZ, 275000, 275000, 275000, 275000, 275000, 275000, 275000),
+ CORE_DVFS("dsib", -1, 1, KHZ, 275000, 275000, 275000, 275000, 275000, 275000, 275000),
+
+ /*
+ * The clock rate for the display controllers that determines the
+ * necessary core voltage depends on a divider that is internal
+ * to the display block. Disable auto-dvfs on the display clocks,
+ * and let the display driver call tegra_dvfs_set_rate manually
+ */
+ CORE_DVFS("disp1", 0, 0, KHZ, 120000, 120000, 120000, 120000, 190000, 190000, 190000),
+ CORE_DVFS("disp1", 1, 0, KHZ, 151000, 268000, 268000, 268000, 268000, 268000, 268000),
+ CORE_DVFS("disp1", 2, 0, KHZ, 151000, 268000, 268000, 268000, 268000, 268000, 268000),
+ CORE_DVFS("disp1", 3, 0, KHZ, 120000, 120000, 120000, 120000, 190000, 190000, 190000),
+
+ CORE_DVFS("disp2", 0, 0, KHZ, 120000, 120000, 120000, 120000, 190000, 190000, 190000),
+ CORE_DVFS("disp2", 1, 0, KHZ, 151000, 268000, 268000, 268000, 268000, 268000, 268000),
+ CORE_DVFS("disp2", 2, 0, KHZ, 151000, 268000, 268000, 268000, 268000, 268000, 268000),
+ CORE_DVFS("disp2", 3, 0, KHZ, 120000, 120000, 120000, 120000, 190000, 190000, 190000),
+};
+
+
+int tegra_dvfs_disable_core_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(arg, kp);
+ if (ret)
+ return ret;
+
+ if (tegra_dvfs_core_disabled)
+ tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_core);
+ else
+ tegra_dvfs_rail_enable(&tegra3_dvfs_rail_vdd_core);
+
+ return 0;
+}
+
+int tegra_dvfs_disable_cpu_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(arg, kp);
+ if (ret)
+ return ret;
+
+ if (tegra_dvfs_cpu_disabled)
+ tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_cpu);
+ else
+ tegra_dvfs_rail_enable(&tegra3_dvfs_rail_vdd_cpu);
+
+ return 0;
+}
+
+int tegra_dvfs_disable_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_bool(buffer, kp);
+}
+
+static struct kernel_param_ops tegra_dvfs_disable_core_ops = {
+ .set = tegra_dvfs_disable_core_set,
+ .get = tegra_dvfs_disable_get,
+};
+
+static struct kernel_param_ops tegra_dvfs_disable_cpu_ops = {
+ .set = tegra_dvfs_disable_cpu_set,
+ .get = tegra_dvfs_disable_get,
+};
+
+module_param_cb(disable_core, &tegra_dvfs_disable_core_ops,
+ &tegra_dvfs_core_disabled, 0644);
+module_param_cb(disable_cpu, &tegra_dvfs_disable_cpu_ops,
+ &tegra_dvfs_cpu_disabled, 0644);
+
+static bool __init is_pllm_dvfs(struct clk *c, struct dvfs *d)
+{
+#ifdef CONFIG_TEGRA_PLLM_RESTRICTED
+ /* Restricting PLLM usage on T30 and T33, rev A02+, allows to apply
+ maximum PLLM frequency to clock tree at minimum core voltage;
+ no need to enable dvfs on PLLM in this case */
+ if ((tegra_cpu_speedo_id() == 2) || (tegra_cpu_speedo_id() == 5))
+ return false;
+#endif
+ /* Check if PLLM boot frequency can be applied to clock tree at
+ minimum voltage. If yes, no need to enable dvfs on PLLM */
+ if (clk_get_rate_all_locked(c) <= d->freqs[0] * d->freqs_mult)
+ return false;
+
+ return true;
+}
+
+static void __init init_dvfs_one(struct dvfs *d, int nominal_mv_index)
+{
+ int ret;
+ struct clk *c = tegra_get_clock_by_name(d->clk_name);
+
+ if (!c) {
+ pr_debug("tegra3_dvfs: no clock found for %s\n",
+ d->clk_name);
+ return;
+ }
+
+ /*
+ * Update max rate for auto-dvfs clocks, except EMC.
+ * EMC is a special case, since EMC dvfs is board dependent: max rate
+ * and EMC scaling frequencies are determined by tegra BCT (flashed
+ * together with the image) and board specific EMC DFS table; we will
+ * check the scaling ladder against nominal core voltage when the table
+ * is loaded (and if on particular board the table is not loaded, EMC
+ * scaling is disabled).
+ */
+ if (!(c->flags & PERIPH_EMC_ENB) && d->auto_dvfs) {
+ BUG_ON(!d->freqs[nominal_mv_index]);
+ tegra_init_max_rate(
+ c, d->freqs[nominal_mv_index] * d->freqs_mult);
+ }
+ d->max_millivolts = d->dvfs_rail->nominal_millivolts;
+
+ /*
+ * Check if we may skip enabling dvfs on PLLM. PLLM is a special case,
+ * since its frequency never exceeds boot rate, and configuration with
+ * restricted PLLM usage is possible.
+ */
+ if (!(c->flags & PLLM) || is_pllm_dvfs(c, d)) {
+ ret = tegra_enable_dvfs_on_clk(c, d);
+ if (ret)
+ pr_err("tegra3_dvfs: failed to enable dvfs on %s\n",
+ c->name);
+ }
+}
+
+static bool __init match_dvfs_one(struct dvfs *d, int speedo_id, int process_id)
+{
+ if ((d->process_id != -1 && d->process_id != process_id) ||
+ (d->speedo_id != -1 && d->speedo_id != speedo_id)) {
+ pr_debug("tegra3_dvfs: rejected %s speedo %d,"
+ " process %d\n", d->clk_name, d->speedo_id,
+ d->process_id);
+ return false;
+ }
+ return true;
+}
+
+static int __init get_cpu_nominal_mv_index(
+ int speedo_id, int process_id, struct dvfs **cpu_dvfs)
+{
+ int i, j, mv;
+ struct dvfs *d;
+ struct clk *c;
+
+ /*
+ * Find maximum cpu voltage that satisfies cpu_to_core dependency for
+ * nominal core voltage ("solve from cpu to core at nominal"). Clip
+ * result to the nominal cpu level for the chips with this speedo_id.
+ */
+ mv = tegra3_dvfs_rail_vdd_core.nominal_millivolts;
+ for (i = 0; i < MAX_DVFS_FREQS; i++) {
+ if ((cpu_millivolts[i] == 0) ||
+ tegra3_get_core_floor_mv(cpu_millivolts[i]) > mv)
+ break;
+ }
+ BUG_ON(i == 0);
+ mv = cpu_millivolts[i - 1];
+ BUG_ON(mv < tegra3_dvfs_rail_vdd_cpu.min_millivolts);
+ mv = min(mv, tegra_cpu_speedo_mv());
+
+ /*
+ * Find matching cpu dvfs entry, and use it to determine index to the
+ * final nominal voltage, that satisfies the following requirements:
+ * - allows CPU to run at minimum of the maximum rates specified in
+ * the dvfs entry and clock tree
+ * - does not violate cpu_to_core dependency as determined above
+ */
+ for (i = 0, j = 0; j < ARRAY_SIZE(cpu_dvfs_table); j++) {
+ d = &cpu_dvfs_table[j];
+ if (match_dvfs_one(d, speedo_id, process_id)) {
+ c = tegra_get_clock_by_name(d->clk_name);
+ BUG_ON(!c);
+
+ for (; i < MAX_DVFS_FREQS; i++) {
+ if ((d->freqs[i] == 0) ||
+ (cpu_millivolts[i] == 0) ||
+ (mv < cpu_millivolts[i]))
+ break;
+
+ if (c->max_rate <= d->freqs[i]*d->freqs_mult) {
+ i++;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ BUG_ON(i == 0);
+ if (j == (ARRAY_SIZE(cpu_dvfs_table) - 1))
+ pr_err("tegra3_dvfs: WARNING!!!\n"
+ "tegra3_dvfs: no cpu dvfs table found for chip speedo_id"
+ " %d and process_id %d: set CPU rate limit at %lu\n"
+ "tegra3_dvfs: WARNING!!!\n",
+ speedo_id, process_id, d->freqs[i-1] * d->freqs_mult);
+
+ *cpu_dvfs = d;
+ return (i - 1);
+}
+
+static int __init get_core_nominal_mv_index(int speedo_id)
+{
+ int i;
+ int mv = tegra_core_speedo_mv();
+ int core_edp_limit = get_core_edp();
+
+ /*
+ * Start with nominal level for the chips with this speedo_id. Then,
+ * make sure core nominal voltage is below edp limit for the board
+ * (if edp limit is set).
+ */
+ if (core_edp_limit)
+ mv = min(mv, core_edp_limit);
+
+ /* Round nominal level down to the nearest core scaling step */
+ for (i = 0; i < MAX_DVFS_FREQS; i++) {
+ if ((core_millivolts[i] == 0) || (mv < core_millivolts[i]))
+ break;
+ }
+
+ if (i == 0) {
+ pr_err("tegra3_dvfs: unable to adjust core dvfs table to"
+ " nominal voltage %d\n", mv);
+ return -ENOSYS;
+ }
+ return (i - 1);
+}
+
+void __init tegra_soc_init_dvfs(void)
+{
+ int cpu_speedo_id = tegra_cpu_speedo_id();
+ int soc_speedo_id = tegra_soc_speedo_id();
+ int cpu_process_id = tegra_cpu_process_id();
+ int core_process_id = tegra_core_process_id();
+
+ int i;
+ int core_nominal_mv_index;
+ int cpu_nominal_mv_index;
+ struct dvfs *cpu_dvfs = NULL;
+
+#ifndef CONFIG_TEGRA_CORE_DVFS
+ tegra_dvfs_core_disabled = true;
+#endif
+#ifndef CONFIG_TEGRA_CPU_DVFS
+ tegra_dvfs_cpu_disabled = true;
+#endif
+
+ /*
+ * Find nominal voltages for core (1st) and cpu rails before rail
+ * init. Nominal voltage index in the scaling ladder will also be
+ * used to determine max dvfs frequency for the respective domains.
+ */
+ core_nominal_mv_index = get_core_nominal_mv_index(soc_speedo_id);
+ if (core_nominal_mv_index < 0) {
+ tegra3_dvfs_rail_vdd_core.disabled = true;
+ tegra_dvfs_core_disabled = true;
+ core_nominal_mv_index = 0;
+ }
+ tegra3_dvfs_rail_vdd_core.nominal_millivolts =
+ core_millivolts[core_nominal_mv_index];
+
+ cpu_nominal_mv_index = get_cpu_nominal_mv_index(
+ cpu_speedo_id, cpu_process_id, &cpu_dvfs);
+ BUG_ON((cpu_nominal_mv_index < 0) || (!cpu_dvfs));
+ tegra3_dvfs_rail_vdd_cpu.nominal_millivolts =
+ cpu_millivolts[cpu_nominal_mv_index];
+
+ /* Init rail structures and dependencies */
+ tegra_dvfs_init_rails(tegra3_dvfs_rails, ARRAY_SIZE(tegra3_dvfs_rails));
+ tegra_dvfs_add_relationships(tegra3_dvfs_relationships,
+ ARRAY_SIZE(tegra3_dvfs_relationships));
+
+ /* Search core dvfs table for speedo/process matching entries and
+ initialize dvfs-ed clocks */
+ for (i = 0; i < ARRAY_SIZE(core_dvfs_table); i++) {
+ struct dvfs *d = &core_dvfs_table[i];
+ if (!match_dvfs_one(d, soc_speedo_id, core_process_id))
+ continue;
+ init_dvfs_one(d, core_nominal_mv_index);
+ }
+
+ /* Initialize matching cpu dvfs entry already found when nominal
+ voltage was determined */
+ init_dvfs_one(cpu_dvfs, cpu_nominal_mv_index);
+
+ /* Finally disable dvfs on rails if necessary */
+ if (tegra_dvfs_core_disabled)
+ tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_core);
+ if (tegra_dvfs_cpu_disabled)
+ tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_cpu);
+
+ pr_info("tegra dvfs: VDD_CPU nominal %dmV, scaling %s\n",
+ tegra3_dvfs_rail_vdd_cpu.nominal_millivolts,
+ tegra_dvfs_cpu_disabled ? "disabled" : "enabled");
+ pr_info("tegra dvfs: VDD_CORE nominal %dmV, scaling %s\n",
+ tegra3_dvfs_rail_vdd_core.nominal_millivolts,
+ tegra_dvfs_core_disabled ? "disabled" : "enabled");
+}
+
+int tegra_dvfs_rail_disable_prepare(struct dvfs_rail *rail)
+{
+ int ret = 0;
+
+ if (tegra_emc_get_dram_type() != DRAM_TYPE_DDR3)
+ return ret;
+
+ if (((&tegra3_dvfs_rail_vdd_core == rail) &&
+ (rail->nominal_millivolts > TEGRA_EMC_BRIDGE_MVOLTS_MIN)) ||
+ ((&tegra3_dvfs_rail_vdd_cpu == rail) &&
+ (tegra3_get_core_floor_mv(rail->nominal_millivolts) >
+ TEGRA_EMC_BRIDGE_MVOLTS_MIN))) {
+ struct clk *bridge = tegra_get_clock_by_name("bridge.emc");
+ BUG_ON(!bridge);
+
+ ret = clk_enable(bridge);
+ pr_info("%s: %s: %s bridge.emc\n", __func__,
+ rail->reg_id, ret ? "failed to enable" : "enabled");
+ }
+ return ret;
+}
+
+int tegra_dvfs_rail_post_enable(struct dvfs_rail *rail)
+{
+ if (tegra_emc_get_dram_type() != DRAM_TYPE_DDR3)
+ return 0;
+
+ if (((&tegra3_dvfs_rail_vdd_core == rail) &&
+ (rail->nominal_millivolts > TEGRA_EMC_BRIDGE_MVOLTS_MIN)) ||
+ ((&tegra3_dvfs_rail_vdd_cpu == rail) &&
+ (tegra3_get_core_floor_mv(rail->nominal_millivolts) >
+ TEGRA_EMC_BRIDGE_MVOLTS_MIN))) {
+ struct clk *bridge = tegra_get_clock_by_name("bridge.emc");
+ BUG_ON(!bridge);
+
+ clk_disable(bridge);
+ pr_info("%s: %s: disabled bridge.emc\n",
+ __func__, rail->reg_id);
+ }
+ return 0;
+}
+
+/*
+ * sysfs and dvfs interfaces to cap tegra core domains frequencies
+ */
+static DEFINE_MUTEX(core_cap_lock);
+
+struct core_cap {
+ int refcnt;
+ int level;
+};
+static struct core_cap tegra3_core_cap;
+static struct core_cap kdvfs_core_cap;
+static struct core_cap user_core_cap;
+
+static struct kobject *cap_kobj;
+
+/* Arranged in order required for enabling/lowering the cap */
+static struct {
+ const char *cap_name;
+ struct clk *cap_clk;
+ unsigned long freqs[MAX_DVFS_FREQS];
+} core_cap_table[] = {
+ { .cap_name = "cap.cbus" },
+ { .cap_name = "cap.sclk" },
+ { .cap_name = "cap.emc" },
+};
+
+
+static void core_cap_level_set(int level)
+{
+ int i, j;
+
+ for (j = 0; j < ARRAY_SIZE(core_millivolts); j++) {
+ int v = core_millivolts[j];
+ if ((v == 0) || (level < v))
+ break;
+ }
+ j = (j == 0) ? 0 : j - 1;
+ level = core_millivolts[j];
+
+ if (level < tegra3_core_cap.level) {
+ for (i = 0; i < ARRAY_SIZE(core_cap_table); i++)
+ if (core_cap_table[i].cap_clk)
+ clk_set_rate(core_cap_table[i].cap_clk,
+ core_cap_table[i].freqs[j]);
+ } else if (level > tegra3_core_cap.level) {
+ for (i = ARRAY_SIZE(core_cap_table) - 1; i >= 0; i--)
+ if (core_cap_table[i].cap_clk)
+ clk_set_rate(core_cap_table[i].cap_clk,
+ core_cap_table[i].freqs[j]);
+ }
+ tegra3_core_cap.level = level;
+}
+
+static void core_cap_update(void)
+{
+ int new_level = tegra3_dvfs_rail_vdd_core.max_millivolts;
+
+ if (kdvfs_core_cap.refcnt)
+ new_level = min(new_level, kdvfs_core_cap.level);
+ if (user_core_cap.refcnt)
+ new_level = min(new_level, user_core_cap.level);
+
+ if (tegra3_core_cap.level != new_level)
+ core_cap_level_set(new_level);
+}
+
+static void core_cap_enable(bool enable)
+{
+ int i;
+
+ if (enable) {
+ tegra3_core_cap.refcnt++;
+ if (tegra3_core_cap.refcnt == 1)
+ for (i = 0; i < ARRAY_SIZE(core_cap_table); i++)
+ if (core_cap_table[i].cap_clk)
+ clk_enable(core_cap_table[i].cap_clk);
+ } else if (tegra3_core_cap.refcnt) {
+ tegra3_core_cap.refcnt--;
+ if (tegra3_core_cap.refcnt == 0)
+ for (i = ARRAY_SIZE(core_cap_table) - 1; i >= 0; i--)
+ if (core_cap_table[i].cap_clk)
+ clk_disable(core_cap_table[i].cap_clk);
+ }
+ core_cap_update();
+}
+
+static ssize_t
+core_cap_state_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d (%d)\n", tegra3_core_cap.refcnt ? 1 : 0,
+ user_core_cap.refcnt ? 1 : 0);
+}
+static ssize_t
+core_cap_state_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int state;
+
+ if (sscanf(buf, "%d", &state) != 1)
+ return -1;
+
+ mutex_lock(&core_cap_lock);
+
+ if (state) {
+ user_core_cap.refcnt++;
+ if (user_core_cap.refcnt == 1)
+ core_cap_enable(true);
+ } else if (user_core_cap.refcnt) {
+ user_core_cap.refcnt--;
+ if (user_core_cap.refcnt == 0)
+ core_cap_enable(false);
+ }
+
+ mutex_unlock(&core_cap_lock);
+ return count;
+}
+
+static ssize_t
+core_cap_level_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d (%d)\n", tegra3_core_cap.level,
+ user_core_cap.level);
+}
+static ssize_t
+core_cap_level_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int level;
+
+ if (sscanf(buf, "%d", &level) != 1)
+ return -1;
+
+ mutex_lock(&core_cap_lock);
+ user_core_cap.level = level;
+ core_cap_update();
+ mutex_unlock(&core_cap_lock);
+ return count;
+}
+
+static struct kobj_attribute cap_state_attribute =
+ __ATTR(core_cap_state, 0644, core_cap_state_show, core_cap_state_store);
+static struct kobj_attribute cap_level_attribute =
+ __ATTR(core_cap_level, 0644, core_cap_level_show, core_cap_level_store);
+
+const struct attribute *cap_attributes[] = {
+ &cap_state_attribute.attr,
+ &cap_level_attribute.attr,
+ NULL,
+};
+
+void tegra_dvfs_core_cap_enable(bool enable)
+{
+ mutex_lock(&core_cap_lock);
+
+ if (enable) {
+ kdvfs_core_cap.refcnt++;
+ if (kdvfs_core_cap.refcnt == 1)
+ core_cap_enable(true);
+ } else if (kdvfs_core_cap.refcnt) {
+ kdvfs_core_cap.refcnt--;
+ if (kdvfs_core_cap.refcnt == 0)
+ core_cap_enable(false);
+ }
+ mutex_unlock(&core_cap_lock);
+}
+
+void tegra_dvfs_core_cap_level_set(int level)
+{
+ mutex_lock(&core_cap_lock);
+ kdvfs_core_cap.level = level;
+ core_cap_update();
+ mutex_unlock(&core_cap_lock);
+}
+
+static int __init init_core_cap_one(struct clk *c, unsigned long *freqs)
+{
+ int i, v, next_v;
+ unsigned long rate, next_rate = 0;
+
+ for (i = 0; i < ARRAY_SIZE(core_millivolts); i++) {
+ v = core_millivolts[i];
+ if (v == 0)
+ break;
+
+ for (;;) {
+ rate = next_rate;
+ next_rate = clk_round_rate(c, rate + 1000);
+ if (IS_ERR_VALUE(next_rate)) {
+ pr_debug("tegra3_dvfs: failed to round %s"
+ " rate %lu", c->name, rate);
+ return -EINVAL;
+ }
+ if (rate == next_rate)
+ break;
+
+ next_v = tegra_dvfs_predict_millivolts(
+ c->parent, next_rate);
+ if (IS_ERR_VALUE(next_rate)) {
+ pr_debug("tegra3_dvfs: failed to predict %s mV"
+ " for rate %lu", c->name, next_rate);
+ return -EINVAL;
+ }
+ if (next_v > v)
+ break;
+ }
+
+ if (rate == 0) {
+ rate = next_rate;
+ pr_warn("tegra3_dvfs: minimum %s cap %lu requires"
+ " %d mV", c->name, rate, next_v);
+ }
+ freqs[i] = rate;
+ next_rate = rate;
+ }
+ return 0;
+}
+
+static int __init tegra_dvfs_init_core_cap(void)
+{
+ int i;
+ struct clk *c = NULL;
+
+ tegra3_core_cap.level = kdvfs_core_cap.level = user_core_cap.level =
+ tegra3_dvfs_rail_vdd_core.max_millivolts;
+
+ for (i = 0; i < ARRAY_SIZE(core_cap_table); i++) {
+ c = tegra_get_clock_by_name(core_cap_table[i].cap_name);
+ if (!c || !c->parent ||
+ init_core_cap_one(c, core_cap_table[i].freqs)) {
+ pr_err("tegra3_dvfs: failed to initialize %s frequency"
+ " table", core_cap_table[i].cap_name);
+ continue;
+ }
+ core_cap_table[i].cap_clk = c;
+ }
+
+ cap_kobj = kobject_create_and_add("tegra_cap", kernel_kobj);
+ if (!cap_kobj) {
+ pr_err("tegra3_dvfs: failed to create sysfs cap object");
+ return 0;
+ }
+
+ if (sysfs_create_files(cap_kobj, cap_attributes)) {
+ pr_err("tegra3_dvfs: failed to create sysfs cap interface");
+ return 0;
+ }
+ pr_info("tegra dvfs: tegra sysfs cap interface is initialized\n");
+
+ return 0;
+}
+late_initcall(tegra_dvfs_init_core_cap);
diff --git a/arch/arm/mach-tegra/tegra3_emc.c b/arch/arm/mach-tegra/tegra3_emc.c
new file mode 100644
index 000000000000..1a6dd8194328
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_emc.c
@@ -0,0 +1,1069 @@
+/*
+ * arch/arm/mach-tegra/tegra3_emc.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/cputime.h>
+
+#include <mach/iomap.h>
+
+#include "clock.h"
+#include "dvfs.h"
+#include "tegra3_emc.h"
+
+#ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
+static bool emc_enable = true;
+#else
+static bool emc_enable;
+#endif
+module_param(emc_enable, bool, 0644);
+
+#define EMC_MIN_RATE_DDR3 50000000
+#define EMC_STATUS_UPDATE_TIMEOUT 100
+#define TEGRA_EMC_TABLE_MAX_SIZE 16
+
+enum {
+ DLL_CHANGE_NONE = 0,
+ DLL_CHANGE_ON,
+ DLL_CHANGE_OFF,
+};
+
+#define EMC_CLK_DIV_SHIFT 0
+#define EMC_CLK_DIV_MASK (0xFF << EMC_CLK_DIV_SHIFT)
+#define EMC_CLK_SOURCE_SHIFT 30
+#define EMC_CLK_SOURCE_MASK (0x3 << EMC_CLK_SOURCE_SHIFT)
+#define EMC_CLK_LOW_JITTER_ENABLE (0x1 << 29)
+#define EMC_CLK_MC_SAME_FREQ (0x1 << 16)
+
+#define BURST_REG_LIST \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_RC), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_RP), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG6), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS0), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE0), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS0), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ0), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2), \
+ DEFINE_REG(0 , EMC_XM2CLKPADCTRL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2QUSEPADCTRL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL3), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL), \
+ \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0), \
+ DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE), \
+ \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE), \
+ DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_RSV),
+
+#define DEFINE_REG(base, reg) ((base) ? ((u32)IO_ADDRESS((base)) + (reg)) : 0)
+static const u32 burst_reg_addr[TEGRA_EMC_NUM_REGS] = {
+ BURST_REG_LIST
+};
+#undef DEFINE_REG
+
+#define DEFINE_REG(base, reg) reg##_INDEX
+enum {
+ BURST_REG_LIST
+};
+#undef DEFINE_REG
+
+static int emc_num_burst_regs;
+
+static struct clk_mux_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
+static int emc_last_sel;
+static struct tegra_emc_table start_timing;
+static bool emc_timing_in_sync;
+
+static const struct tegra_emc_table *tegra_emc_table;
+static int tegra_emc_table_size;
+
+static u32 dram_dev_num;
+static u32 emc_cfg_saved;
+static u32 dram_type = -1;
+
+static struct clk *emc;
+static struct clk *bridge;
+
+static struct {
+ cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
+ u64 last_update;
+ u64 clkchange_count;
+ spinlock_t spinlock;
+} emc_stats;
+
+static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
+static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
+static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+
+static inline void emc_writel(u32 val, unsigned long addr)
+{
+ writel(val, (u32)emc_base + addr);
+ barrier();
+}
+static inline u32 emc_readl(unsigned long addr)
+{
+ return readl((u32)emc_base + addr);
+}
+static inline void mc_writel(u32 val, unsigned long addr)
+{
+ writel(val, (u32)mc_base + addr);
+ barrier();
+}
+static inline u32 mc_readl(unsigned long addr)
+{
+ return readl((u32)mc_base + addr);
+}
+
+static void emc_last_stats_update(int last_sel)
+{
+ unsigned long flags;
+ u64 cur_jiffies = get_jiffies_64();
+
+ spin_lock_irqsave(&emc_stats.spinlock, flags);
+
+ emc_stats.time_at_clock[emc_last_sel] = cputime64_add(
+ emc_stats.time_at_clock[emc_last_sel], cputime64_sub(
+ cur_jiffies, emc_stats.last_update));
+
+ emc_stats.last_update = cur_jiffies;
+
+ if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
+ emc_stats.clkchange_count++;
+ emc_last_sel = last_sel;
+ }
+ spin_unlock_irqrestore(&emc_stats.spinlock, flags);
+}
+
+static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
+{
+ int i;
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
+ if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static inline void emc_timing_update(void)
+{
+ int err;
+
+ emc_writel(0x1, EMC_TIMING_CONTROL);
+ err = wait_for_update(EMC_STATUS,
+ EMC_STATUS_TIMING_UPDATE_STALLED, false);
+ if (err) {
+ pr_err("%s: timing update error: %d", __func__, err);
+ BUG();
+ }
+}
+
+static inline void auto_cal_disable(void)
+{
+ int err;
+
+ emc_writel(0, EMC_AUTO_CAL_INTERVAL);
+ err = wait_for_update(EMC_AUTO_CAL_STATUS,
+ EMC_AUTO_CAL_STATUS_ACTIVE, false);
+ if (err) {
+ pr_err("%s: disable auto-cal error: %d", __func__, err);
+ BUG();
+ }
+}
+
+static inline void set_mc_arbiter_limits(void)
+{
+ u32 reg = mc_readl(MC_EMEM_ARB_OUTSTANDING_REQ);
+ u32 max_val = 0x50 << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
+
+ if (!(reg & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) ||
+ ((reg & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > max_val)) {
+ reg = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
+ MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | max_val;
+ mc_writel(reg, MC_EMEM_ARB_OUTSTANDING_REQ);
+ mc_writel(0x1, MC_TIMING_CONTROL);
+ }
+}
+
+static inline bool dqs_preset(const struct tegra_emc_table *next_timing,
+ const struct tegra_emc_table *last_timing)
+{
+ bool ret = false;
+
+#define DQS_SET(reg, bit) \
+ do { \
+ if ((next_timing->burst_regs[EMC_##reg##_INDEX] & \
+ EMC_##reg##_##bit##_ENABLE) && \
+ (!(last_timing->burst_regs[EMC_##reg##_INDEX] & \
+ EMC_##reg##_##bit##_ENABLE))) { \
+ emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
+ | EMC_##reg##_##bit##_ENABLE, EMC_##reg); \
+ ret = true; \
+ } \
+ } while (0)
+
+ DQS_SET(XM2DQSPADCTRL2, VREF);
+ DQS_SET(XM2DQSPADCTRL3, VREF);
+ DQS_SET(XM2QUSEPADCTRL, IVREF);
+
+ return ret;
+}
+
+static inline void overwrite_mrs_wait_cnt(
+ const struct tegra_emc_table *next_timing,
+ bool zcal_long)
+{
+ u32 reg;
+ u32 cnt = 512;
+
+ /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
+ for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
+ expected operation length. Reduce the latter by the overlapping
+ zq-calibration, if any */
+ if (zcal_long)
+ cnt -= dram_dev_num * 256;
+
+ reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
+ if (cnt < reg)
+ cnt = reg;
+
+ reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
+ (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
+ reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
+ EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+
+ emc_writel(reg, EMC_MRS_WAIT_CNT);
+}
+
+static inline bool need_qrst(const struct tegra_emc_table *next_timing,
+ const struct tegra_emc_table *last_timing,
+ u32 emc_dpd_reg)
+{
+ u32 last_mode = (last_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
+ EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
+ u32 next_mode = (next_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
+ EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
+
+ /* QUSE DPD is disabled */
+ bool ret = !(emc_dpd_reg & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) &&
+
+ /* QUSE uses external mode before or after clock change */
+ (((last_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
+ (last_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)) ||
+ ((next_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
+ (next_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK))) &&
+
+ /* QUSE pad switches from schmitt to vref mode */
+ (((last_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
+ EMC_XM2QUSEPADCTRL_IVREF_ENABLE) == 0) &&
+ ((next_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
+ EMC_XM2QUSEPADCTRL_IVREF_ENABLE) != 0));
+
+ return ret;
+}
+
+static inline void periodic_qrst_enable(u32 emc_cfg_reg, u32 emc_dbg_reg)
+{
+ /* enable write mux => enable periodic QRST => restore mux */
+ emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+ emc_writel(emc_cfg_reg | EMC_CFG_PERIODIC_QRST, EMC_CFG);
+ emc_writel(emc_dbg_reg, EMC_DBG);
+}
+
+static inline int get_dll_change(const struct tegra_emc_table *next_timing,
+ const struct tegra_emc_table *last_timing)
+{
+ bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
+ bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
+
+ if (next_dll_enabled == last_dll_enabled)
+ return DLL_CHANGE_NONE;
+ else if (next_dll_enabled)
+ return DLL_CHANGE_ON;
+ else
+ return DLL_CHANGE_OFF;
+}
+
+static inline void set_dram_mode(const struct tegra_emc_table *next_timing,
+ const struct tegra_emc_table *last_timing,
+ int dll_change)
+{
+ if (dram_type == DRAM_TYPE_DDR3) {
+ /* first mode_1, then mode_2, then mode_reset*/
+ if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
+ emc_writel(next_timing->emc_mode_1, EMC_EMRS);
+ if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
+ emc_writel(next_timing->emc_mode_2, EMC_EMRS);
+
+ if ((next_timing->emc_mode_reset !=
+ last_timing->emc_mode_reset) ||
+ (dll_change == DLL_CHANGE_ON))
+ {
+ u32 reg = next_timing->emc_mode_reset &
+ (~EMC_MODE_SET_DLL_RESET);
+ if (dll_change == DLL_CHANGE_ON) {
+ reg |= EMC_MODE_SET_DLL_RESET;
+ reg |= EMC_MODE_SET_LONG_CNT;
+ }
+ emc_writel(reg, EMC_MRS);
+ }
+ } else {
+ /* first mode_2, then mode_1; mode_reset is not applicable */
+ if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
+ emc_writel(next_timing->emc_mode_2, EMC_MRW);
+ if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
+ emc_writel(next_timing->emc_mode_1, EMC_MRW);
+ }
+}
+
+static inline void do_clock_change(u32 clk_setting)
+{
+ int err;
+
+ mc_readl(MC_EMEM_ADR_CFG); /* completes prev writes */
+ writel(clk_setting, (u32)clk_base + emc->reg);
+
+ err = wait_for_update(EMC_INTSTATUS,
+ EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
+ if (err) {
+ pr_err("%s: clock change completion error: %d", __func__, err);
+ BUG();
+ }
+}
+
+static noinline void emc_set_clock(const struct tegra_emc_table *next_timing,
+ const struct tegra_emc_table *last_timing,
+ u32 clk_setting)
+{
+ int i, dll_change, pre_wait;
+ bool dyn_sref_enabled, vref_cal_toggle, qrst_used, zcal_long;
+
+ u32 emc_cfg_reg = emc_readl(EMC_CFG);
+ u32 emc_dbg_reg = emc_readl(EMC_DBG);
+
+ dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
+ dll_change = get_dll_change(next_timing, last_timing);
+ zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
+ (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
+
+ /* FIXME: remove steps enumeration below? */
+
+ /* 1. clear clkchange_complete interrupts */
+ emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
+
+ /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
+ possible self-refresh entry/exit and/or dqs vref settled - waiting
+ before the clock change decreases worst case change stall time */
+ pre_wait = 0;
+ if (dyn_sref_enabled) {
+ emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
+ emc_writel(emc_cfg_reg, EMC_CFG);
+ pre_wait = 5; /* 5us+ for self-refresh entry/exit */
+ }
+
+ /* 2.25 update MC arbiter settings */
+ set_mc_arbiter_limits();
+
+ /* 2.5 check dq/dqs vref delay */
+ if (dqs_preset(next_timing, last_timing)) {
+ if (pre_wait < 3)
+ pre_wait = 3; /* 3us+ for dqs vref settled */
+ }
+ if (pre_wait) {
+ emc_timing_update();
+ udelay(pre_wait);
+ }
+
+ /* 3. disable auto-cal if vref mode is switching */
+ vref_cal_toggle = (next_timing->emc_acal_interval != 0) &&
+ ((next_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX] ^
+ last_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX]) &
+ EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE);
+ if (vref_cal_toggle)
+ auto_cal_disable();
+
+ /* 4. program burst shadow registers */
+ for (i = 0; i < emc_num_burst_regs; i++) {
+ if (!burst_reg_addr[i])
+ continue;
+ __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
+ }
+ wmb();
+ barrier();
+
+ /* On ddr3 when DLL is re-started predict MRS long wait count and
+ overwrite DFS table setting */
+ if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
+ overwrite_mrs_wait_cnt(next_timing, zcal_long);
+
+ /* the last read below makes sure prev writes are completed */
+ qrst_used = need_qrst(next_timing, last_timing,
+ emc_readl(EMC_SEL_DPD_CTRL));
+
+ /* 5. flow control marker 1 (no EMC read access after this) */
+ emc_writel(1, EMC_STALL_BEFORE_CLKCHANGE);
+
+ /* 6. enable periodic QRST */
+ if (qrst_used)
+ periodic_qrst_enable(emc_cfg_reg, emc_dbg_reg);
+
+ /* 6.1 disable auto-refresh to save time after clock change */
+ emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
+
+ /* 7. turn Off dll and enter self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (dll_change == DLL_CHANGE_OFF)
+ emc_writel(next_timing->emc_mode_1, EMC_EMRS);
+ emc_writel(DRAM_BROADCAST(dram_dev_num) |
+ EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
+ }
+
+ /* 8. flow control marker 2 */
+ emc_writel(1, EMC_STALL_AFTER_CLKCHANGE);
+
+ /* 8.1 enable write mux, update unshadowed pad control */
+ emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+ emc_writel(next_timing->burst_regs[EMC_XM2CLKPADCTRL_INDEX],
+ EMC_XM2CLKPADCTRL);
+
+ /* 9. restore periodic QRST, and disable write mux */
+ if ((qrst_used) || (next_timing->emc_periodic_qrst !=
+ last_timing->emc_periodic_qrst)) {
+ emc_cfg_reg = next_timing->emc_periodic_qrst ?
+ emc_cfg_reg | EMC_CFG_PERIODIC_QRST :
+ emc_cfg_reg & (~EMC_CFG_PERIODIC_QRST);
+ emc_writel(emc_cfg_reg, EMC_CFG);
+ }
+ emc_writel(emc_dbg_reg, EMC_DBG);
+
+ /* 10. exit self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3)
+ emc_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
+
+ /* 11. set dram mode registers */
+ set_dram_mode(next_timing, last_timing, dll_change);
+
+ /* 12. issue zcal command if turning zcal On */
+ if (zcal_long) {
+ emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
+ if (dram_dev_num > 1)
+ emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
+ }
+
+ /* 13. flow control marker 3 */
+ emc_writel(1, EMC_UNSTALL_RW_AFTER_CLKCHANGE);
+
+ /* 14. read any MC register to ensure the programming is done
+ change EMC clock source register (EMC read access restored)
+ wait for clk change completion */
+ do_clock_change(clk_setting);
+
+ /* 14.1 re-enable auto-refresh */
+ emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
+
+ /* 15. restore auto-cal */
+ if (vref_cal_toggle)
+ emc_writel(next_timing->emc_acal_interval,
+ EMC_AUTO_CAL_INTERVAL);
+
+ /* 16. restore dynamic self-refresh */
+ if (next_timing->rev >= 0x32)
+ dyn_sref_enabled = next_timing->emc_dsr;
+ if (dyn_sref_enabled) {
+ emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
+ emc_writel(emc_cfg_reg, EMC_CFG);
+ }
+
+ /* 17. set zcal wait count */
+ if (zcal_long)
+ emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
+
+ /* 18. update restored timing */
+ udelay(2);
+ emc_timing_update();
+}
+
+static inline void emc_get_timing(struct tegra_emc_table *timing)
+{
+ int i;
+
+ for (i = 0; i < emc_num_burst_regs; i++) {
+ if (burst_reg_addr[i])
+ timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
+ else
+ timing->burst_regs[i] = 0;
+ }
+ timing->emc_acal_interval = 0;
+ timing->emc_zcal_cnt_long = 0;
+ timing->emc_mode_reset = 0;
+ timing->emc_mode_1 = 0;
+ timing->emc_mode_2 = 0;
+ timing->emc_periodic_qrst = (emc_readl(EMC_CFG) &
+ EMC_CFG_PERIODIC_QRST) ? 1 : 0;
+}
+
+/* After deep sleep EMC power features are not restored.
+ * Do it at run-time after the 1st clock change.
+ */
+static inline void emc_cfg_power_restore(void)
+{
+ u32 reg = emc_readl(EMC_CFG);
+ u32 pwr_mask = EMC_CFG_PWR_MASK;
+
+ if (tegra_emc_table[0].rev >= 0x32)
+ pwr_mask &= ~EMC_CFG_DYN_SREF_ENABLE;
+
+ if ((reg ^ emc_cfg_saved) & pwr_mask) {
+ reg = (reg & (~pwr_mask)) | (emc_cfg_saved & pwr_mask);
+ emc_writel(reg, EMC_CFG);
+ emc_timing_update();
+ }
+}
+
+/* The EMC registers have shadow registers. When the EMC clock is updated
+ * in the clock controller, the shadow registers are copied to the active
+ * registers, allowing glitchless memory bus frequency changes.
+ * This function updates the shadow registers for a new clock frequency,
+ * and relies on the clock lock on the emc clock to avoid races between
+ * multiple frequency changes */
+int tegra_emc_set_rate(unsigned long rate)
+{
+ int i;
+ u32 clk_setting;
+ const struct tegra_emc_table *last_timing;
+
+ if (!tegra_emc_table)
+ return -EINVAL;
+
+ /* Table entries specify rate in kHz */
+ rate = rate / 1000;
+
+ for (i = 0; i < tegra_emc_table_size; i++) {
+ if (tegra_emc_clk_sel[i].input == NULL)
+ continue; /* invalid entry */
+
+ if (tegra_emc_table[i].rate == rate)
+ break;
+ }
+
+ if (i >= tegra_emc_table_size)
+ return -EINVAL;
+
+ if (!emc_timing_in_sync) {
+ /* can not assume that boot timing matches dfs table even
+ if boot frequency matches one of the table nodes */
+ emc_get_timing(&start_timing);
+ last_timing = &start_timing;
+ }
+ else
+ last_timing = &tegra_emc_table[emc_last_sel];
+
+ clk_setting = tegra_emc_clk_sel[i].value;
+ emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
+ if (!emc_timing_in_sync)
+ emc_cfg_power_restore();
+ emc_timing_in_sync = true;
+ emc_last_stats_update(i);
+
+ pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
+
+ return 0;
+}
+
+/* Select the closest EMC rate that is higher than the requested rate */
+long tegra_emc_round_rate(unsigned long rate)
+{
+ int i;
+ int best = -1;
+ unsigned long distance = ULONG_MAX;
+
+ if (!tegra_emc_table)
+ return clk_get_rate_locked(emc); /* no table - no rate change */
+
+ if (!emc_enable)
+ return -EINVAL;
+
+ pr_debug("%s: %lu\n", __func__, rate);
+
+ /* Table entries specify rate in kHz */
+ rate = rate / 1000;
+
+ for (i = 0; i < tegra_emc_table_size; i++) {
+ if (tegra_emc_clk_sel[i].input == NULL)
+ continue; /* invalid entry */
+
+ if (tegra_emc_table[i].rate >= rate &&
+ (tegra_emc_table[i].rate - rate) < distance) {
+ distance = tegra_emc_table[i].rate - rate;
+ best = i;
+ }
+ }
+
+ if (best < 0)
+ return -EINVAL;
+
+ pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate);
+
+ return tegra_emc_table[best].rate * 1000;
+}
+
+struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
+{
+ int i;
+
+ if (!tegra_emc_table)
+ return NULL;
+
+ pr_debug("%s: %lu\n", __func__, rate);
+
+ /* Table entries specify rate in kHz */
+ rate = rate / 1000;
+
+ for (i = 0; i < tegra_emc_table_size; i++) {
+ if (tegra_emc_table[i].rate == rate) {
+ *div_value = (tegra_emc_clk_sel[i].value &
+ EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
+ return tegra_emc_clk_sel[i].input;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct clk_mux_sel *find_matching_input(
+ unsigned long table_rate,
+ u32 *div_value)
+{
+ unsigned long inp_rate;
+ const struct clk_mux_sel *sel;
+
+ for (sel = emc->inputs; sel->input != NULL; sel++) {
+ /* Table entries specify rate in kHz */
+ inp_rate = clk_get_rate(sel->input) / 1000;
+
+ if ((inp_rate >= table_rate) &&
+ (inp_rate % table_rate == 0)) {
+ *div_value = 2 * inp_rate / table_rate - 2;
+ return sel;
+ }
+ }
+ return NULL;
+}
+
+static void adjust_emc_dvfs_table(const struct tegra_emc_table *table,
+ int table_size)
+{
+ int i, j;
+ unsigned long rate;
+
+ if (table[0].rev < 0x33)
+ return;
+
+ for (i = 0; i < MAX_DVFS_FREQS; i++) {
+ int mv = emc->dvfs->millivolts[i];
+ if (!mv)
+ break;
+
+ /* For each dvfs voltage find maximum supported rate;
+ use 1MHz placeholder if not found */
+ for (rate = 1000, j = 0; j < table_size; j++) {
+ if (tegra_emc_clk_sel[j].input == NULL)
+ continue; /* invalid entry */
+
+ if ((mv >= table[j].emc_min_mv) &&
+ (rate < table[j].rate))
+ rate = table[j].rate;
+ }
+ /* Table entries specify rate in kHz */
+ emc->dvfs->freqs[i] = rate * 1000;
+ }
+}
+
+static bool is_emc_bridge(void)
+{
+ int mv;
+ unsigned long rate;
+
+ bridge = tegra_get_clock_by_name("bridge.emc");
+ BUG_ON(!bridge);
+
+ /* LPDDR2 does not need a bridge entry in DFS table: just lock bridge
+ rate at minimum so it won't interfere with emc bus operations */
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ clk_set_rate(bridge, 0);
+ return true;
+ }
+
+ /* DDR3 requires EMC DFS table to include a bridge entry with frequency
+ above minimum bridge threshold, and voltage below bridge threshold */
+ rate = clk_round_rate(bridge, TEGRA_EMC_BRIDGE_RATE_MIN);
+ if (IS_ERR_VALUE(rate))
+ return false;
+
+ mv = tegra_dvfs_predict_millivolts(emc, rate);
+ if (IS_ERR_VALUE(mv) || (mv > TEGRA_EMC_BRIDGE_MVOLTS_MIN))
+ return false;
+
+ if (clk_set_rate(bridge, rate))
+ return false;
+
+ return true;
+}
+
+static int tegra_emc_suspend_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ if (event != PM_SUSPEND_PREPARE)
+ return NOTIFY_OK;
+
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (clk_enable(bridge)) {
+ pr_info("Tegra emc suspend:"
+ " failed to enable bridge.emc\n");
+ return NOTIFY_STOP;
+ }
+ pr_info("Tegra emc suspend: enabled bridge.emc\n");
+ }
+ return NOTIFY_OK;
+};
+static struct notifier_block tegra_emc_suspend_nb = {
+ .notifier_call = tegra_emc_suspend_notify,
+ .priority = 2,
+};
+
+static int tegra_emc_resume_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ if (event != PM_POST_SUSPEND)
+ return NOTIFY_OK;
+
+ if (dram_type == DRAM_TYPE_DDR3) {
+ clk_disable(bridge);
+ pr_info("Tegra emc resume: disabled bridge.emc\n");
+ }
+ return NOTIFY_OK;
+};
+static struct notifier_block tegra_emc_resume_nb = {
+ .notifier_call = tegra_emc_resume_notify,
+ .priority = -1,
+};
+
+void tegra_init_emc(const struct tegra_emc_table *table, int table_size)
+{
+ int i, mv;
+ u32 reg, div_value;
+ bool max_entry = false;
+ unsigned long boot_rate, max_rate;
+ const struct clk_mux_sel *sel;
+
+ emc_stats.clkchange_count = 0;
+ spin_lock_init(&emc_stats.spinlock);
+ emc_stats.last_update = get_jiffies_64();
+
+ boot_rate = clk_get_rate(emc) / 1000;
+ max_rate = clk_get_max_rate(emc) / 1000;
+
+ if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
+ pr_err("tegra: not supported DRAM type %u\n", dram_type);
+ return;
+ }
+
+ if (emc->parent != tegra_get_clock_by_name("pll_m")) {
+ pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
+ emc->parent->name);
+ return;
+ }
+
+ if (!table || !table_size) {
+ pr_err("tegra: EMC DFS table is empty\n");
+ return;
+ }
+
+ tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
+ switch (table[0].rev) {
+ case 0x30:
+ emc_num_burst_regs = 105;
+ break;
+ case 0x31:
+ case 0x32:
+ case 0x33:
+ emc_num_burst_regs = 107;
+ break;
+ default:
+ pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
+ table[0].rev);
+ return;
+ }
+
+ /* Match EMC source/divider settings with table entries */
+ for (i = 0; i < tegra_emc_table_size; i++) {
+ unsigned long table_rate = table[i].rate;
+ if (!table_rate)
+ continue;
+
+ BUG_ON(table[i].rev != table[0].rev);
+
+ sel = find_matching_input(table_rate, &div_value);
+ if (!sel)
+ continue;
+
+ if (table_rate == boot_rate)
+ emc_last_sel = i;
+
+ if (table_rate == max_rate)
+ max_entry = true;
+
+ tegra_emc_clk_sel[i] = *sel;
+ BUG_ON(div_value >
+ (EMC_CLK_DIV_MASK >> EMC_CLK_DIV_SHIFT));
+ tegra_emc_clk_sel[i].value <<= EMC_CLK_SOURCE_SHIFT;
+ tegra_emc_clk_sel[i].value |= (div_value << EMC_CLK_DIV_SHIFT);
+
+ if ((div_value == 0) &&
+ (tegra_emc_clk_sel[i].input == emc->parent)) {
+ tegra_emc_clk_sel[i].value |= EMC_CLK_LOW_JITTER_ENABLE;
+ }
+
+ if (table[i].burst_regs[MC_EMEM_ARB_MISC0_INDEX] &
+ MC_EMEM_ARB_MISC0_EMC_SAME_FREQ)
+ tegra_emc_clk_sel[i].value |= EMC_CLK_MC_SAME_FREQ;
+ }
+
+ /* Validate EMC rate and voltage limits */
+ if (!max_entry) {
+ pr_err("tegra: invalid EMC DFS table: entry for max rate"
+ " %lu kHz is not found\n", max_rate);
+ return;
+ }
+
+ tegra_emc_table = table;
+
+ adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
+ mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
+ if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
+ tegra_emc_table = NULL;
+ pr_err("tegra: invalid EMC DFS table: maximum rate %lu kHz does"
+ " not match nominal voltage %d\n",
+ max_rate, emc->dvfs->max_millivolts);
+ return;
+ }
+
+ if (!is_emc_bridge()) {
+ tegra_emc_table = NULL;
+ pr_err("tegra: invalid EMC DFS table: emc bridge not found");
+ return;
+ }
+ pr_info("tegra: validated EMC DFS table\n");
+
+ /* Configure clock change mode according to dram type */
+ reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
+ reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
+ EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
+ emc_writel(reg, EMC_CFG_2);
+
+ register_pm_notifier(&tegra_emc_suspend_nb);
+ register_pm_notifier(&tegra_emc_resume_nb);
+}
+
+void tegra_emc_timing_invalidate(void)
+{
+ emc_timing_in_sync = false;
+}
+
+void tegra_emc_dram_type_init(struct clk *c)
+{
+ emc = c;
+
+ dram_type = (emc_readl(EMC_FBIO_CFG5) &
+ EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
+ if (dram_type == DRAM_TYPE_DDR3)
+ emc->min_rate = EMC_MIN_RATE_DDR3;
+
+ dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
+ emc_cfg_saved = emc_readl(EMC_CFG);
+}
+
+int tegra_emc_get_dram_type(void)
+{
+ return dram_type;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *emc_debugfs_root;
+
+static int emc_stats_show(struct seq_file *s, void *data)
+{
+ int i;
+
+ emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
+
+ seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
+ for (i = 0; i < tegra_emc_table_size; i++) {
+ if (tegra_emc_clk_sel[i].input == NULL)
+ continue; /* invalid entry */
+
+ seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
+ cputime64_to_clock_t(emc_stats.time_at_clock[i]));
+ }
+ seq_printf(s, "%-15s %llu\n", "transitions:",
+ emc_stats.clkchange_count);
+ seq_printf(s, "%-15s %llu\n", "time-stamp:",
+ cputime64_to_clock_t(emc_stats.last_update));
+
+ return 0;
+}
+
+static int emc_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, emc_stats_show, inode->i_private);
+}
+
+static const struct file_operations emc_stats_fops = {
+ .open = emc_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_emc_debug_init(void)
+{
+ if (!tegra_emc_table)
+ return 0;
+
+ emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
+ if (!emc_debugfs_root)
+ return -ENOMEM;
+
+ if (!debugfs_create_file(
+ "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(emc_debugfs_root);
+ return -ENOMEM;
+}
+
+late_initcall(tegra_emc_debug_init);
+#endif
diff --git a/arch/arm/mach-tegra/tegra3_emc.h b/arch/arm/mach-tegra/tegra3_emc.h
new file mode 100644
index 000000000000..cc0abc680166
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_emc.h
@@ -0,0 +1,279 @@
+/*
+ * arch/arm/mach-tegra/tegra3_emc.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef _MACH_TEGRA_TEGRA3_EMC_H
+#define _MACH_TEGRA_TEGRA3_EMC_H
+
+#define TEGRA_EMC_NUM_REGS 110
+
+#define TEGRA_EMC_BRIDGE_RATE_MIN 300000000
+#define TEGRA_EMC_BRIDGE_MVOLTS_MIN 1200
+
+struct tegra_emc_table {
+ u8 rev;
+ unsigned long rate;
+
+ /* unconditionally updated in one burst shot */
+ u32 burst_regs[TEGRA_EMC_NUM_REGS];
+
+ /* updated separately under some conditions */
+ u32 emc_zcal_cnt_long;
+ u32 emc_acal_interval;
+ u32 emc_periodic_qrst;
+ u32 emc_mode_reset;
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_dsr;
+ int emc_min_mv;
+};
+
+struct clk;
+
+void tegra_init_emc(const struct tegra_emc_table *table, int table_size);
+
+void tegra_emc_dram_type_init(struct clk *c);
+int tegra_emc_get_dram_type(void);
+
+#ifdef CONFIG_PM_SLEEP
+void tegra_mc_timing_restore(void);
+#else
+static inline void tegra_mc_timing_restore(void)
+{ }
+#endif
+
+#define EMC_INTSTATUS 0x0
+#define EMC_INTSTATUS_CLKCHANGE_COMPLETE (0x1 << 4)
+
+#define EMC_DBG 0x8
+#define EMC_DBG_WRITE_MUX_ACTIVE (0x1 << 1)
+
+#define EMC_CFG 0xc
+#define EMC_CFG_PERIODIC_QRST (0x1 << 21)
+#define EMC_CFG_DYN_SREF_ENABLE (0x1 << 28)
+#define EMC_CFG_PWR_MASK (0xF << 28)
+
+#define EMC_REFCTRL 0x20
+#define EMC_REFCTRL_DEV_SEL_SHIFT 0
+#define EMC_REFCTRL_DEV_SEL_MASK (0x3 << EMC_REFCTRL_DEV_SEL_SHIFT)
+#define EMC_REFCTRL_ENABLE (0x1 << 31)
+#define EMC_REFCTRL_ENABLE_ALL(num) \
+ ((((num > 1) ? 0 : 2) << EMC_REFCTRL_DEV_SEL_SHIFT) \
+ | EMC_REFCTRL_ENABLE)
+#define EMC_REFCTRL_DISABLE_ALL(num) \
+ (((num > 1) ? 0 : 2) << EMC_REFCTRL_DEV_SEL_SHIFT)
+
+#define EMC_TIMING_CONTROL 0x28
+#define EMC_RC 0x2c
+#define EMC_RFC 0x30
+#define EMC_RAS 0x34
+#define EMC_RP 0x38
+#define EMC_R2W 0x3c
+#define EMC_W2R 0x40
+#define EMC_R2P 0x44
+#define EMC_W2P 0x48
+#define EMC_RD_RCD 0x4c
+#define EMC_WR_RCD 0x50
+#define EMC_RRD 0x54
+#define EMC_REXT 0x58
+#define EMC_WDV 0x5c
+#define EMC_QUSE 0x60
+#define EMC_QRST 0x64
+#define EMC_QSAFE 0x68
+#define EMC_RDV 0x6c
+#define EMC_REFRESH 0x70
+#define EMC_BURST_REFRESH_NUM 0x74
+#define EMC_PDEX2WR 0x78
+#define EMC_PDEX2RD 0x7c
+#define EMC_PCHG2PDEN 0x80
+#define EMC_ACT2PDEN 0x84
+#define EMC_AR2PDEN 0x88
+#define EMC_RW2PDEN 0x8c
+#define EMC_TXSR 0x90
+#define EMC_TCKE 0x94
+#define EMC_TFAW 0x98
+#define EMC_TRPAB 0x9c
+#define EMC_TCLKSTABLE 0xa0
+#define EMC_TCLKSTOP 0xa4
+#define EMC_TREFBW 0xa8
+#define EMC_QUSE_EXTRA 0xac
+#define EMC_ODT_WRITE 0xb0
+#define EMC_ODT_READ 0xb4
+#define EMC_WEXT 0xb8
+#define EMC_CTT 0xbc
+
+#define EMC_MRS_WAIT_CNT 0xc8
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT 0
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+
+#define EMC_MRS 0xcc
+#define EMC_MODE_SET_DLL_RESET (0x1 << 8)
+#define EMC_MODE_SET_LONG_CNT (0x1 << 26)
+#define EMC_EMRS 0xd0
+
+#define EMC_SELF_REF 0xe0
+#define EMC_SELF_REF_CMD_ENABLED (0x1 << 0)
+#define EMC_SELF_REF_DEV_SEL_SHIFT 30
+#define EMC_SELF_REF_DEV_SEL_MASK (0x3 << EMC_SELF_REF_DEV_SEL_SHIFT)
+enum {
+ DRAM_DEV_SEL_ALL = 0,
+ DRAM_DEV_SEL_0 = (2 << EMC_SELF_REF_DEV_SEL_SHIFT),
+ DRAM_DEV_SEL_1 = (1 << EMC_SELF_REF_DEV_SEL_SHIFT),
+};
+#define DRAM_BROADCAST(num) \
+ (((num) > 1) ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0)
+
+#define EMC_MRW 0xe8
+#define EMC_MRR 0xec
+#define EMC_XM2DQSPADCTRL3 0xf8
+#define EMC_XM2DQSPADCTRL3_VREF_ENABLE (0x1 << 5)
+#define EMC_FBIO_SPARE 0x100
+
+#define EMC_FBIO_CFG5 0x104
+#define EMC_CFG5_TYPE_SHIFT 0x0
+#define EMC_CFG5_TYPE_MASK (0x3 << EMC_CFG5_TYPE_SHIFT)
+enum {
+ DRAM_TYPE_DDR3 = 0,
+ DRAM_TYPE_LPDDR2 = 2,
+};
+#define EMC_CFG5_QUSE_MODE_SHIFT 13
+#define EMC_CFG5_QUSE_MODE_MASK (0x7 << EMC_CFG5_QUSE_MODE_SHIFT)
+enum {
+ EMC_CFG5_QUSE_MODE_NORMAL = 0,
+ EMC_CFG5_QUSE_MODE_ALWAYS_ON,
+ EMC_CFG5_QUSE_MODE_INTERNAL_LPBK,
+ EMC_CFG5_QUSE_MODE_PULSE_INTERN,
+ EMC_CFG5_QUSE_MODE_PULSE_EXTERN,
+};
+
+#define EMC_FBIO_CFG6 0x114
+#define EMC_CFG_RSV 0x120
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_AUTO_CAL_STATUS 0x2ac
+#define EMC_AUTO_CAL_STATUS_ACTIVE (0x1 << 31)
+#define EMC_STATUS 0x2b4
+#define EMC_STATUS_TIMING_UPDATE_STALLED (0x1 << 23)
+
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_2_MODE_SHIFT 0
+#define EMC_CFG_2_MODE_MASK (0x7 << EMC_CFG_2_MODE_SHIFT)
+#define EMC_CFG_2_SREF_MODE 0x1
+#define EMC_CFG_2_PD_MODE 0x3
+
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_CTT_DURATION 0x2d8
+#define EMC_CTT_TERM_CTRL 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_ZQ_CAL_CMD (0x1 << 0)
+#define EMC_ZQ_CAL_LONG (0x1 << 4)
+#define EMC_ZQ_CAL_LONG_CMD_DEV0 \
+ (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+#define EMC_ZQ_CAL_LONG_CMD_DEV1 \
+ (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+
+#define EMC_XM2CMDPADCTRL 0x2f0
+#define EMC_XM2DQSPADCTRL2 0x2fc
+#define EMC_XM2DQSPADCTRL2_VREF_ENABLE (0x1 << 5)
+#define EMC_XM2DQPADCTRL2 0x304
+#define EMC_XM2CLKPADCTRL 0x308
+#define EMC_XM2COMPPADCTRL 0x30c
+#define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE (0x1 << 10)
+#define EMC_XM2VTTGENPADCTRL 0x310
+#define EMC_XM2VTTGENPADCTRL2 0x314
+#define EMC_XM2QUSEPADCTRL 0x318
+#define EMC_XM2QUSEPADCTRL_IVREF_ENABLE (0x1 << 4)
+#define EMC_DLL_XFORM_DQS0 0x328
+#define EMC_DLL_XFORM_DQS1 0x32c
+#define EMC_DLL_XFORM_DQS2 0x330
+#define EMC_DLL_XFORM_DQS3 0x334
+#define EMC_DLL_XFORM_DQS4 0x338
+#define EMC_DLL_XFORM_DQS5 0x33c
+#define EMC_DLL_XFORM_DQS6 0x340
+#define EMC_DLL_XFORM_DQS7 0x344
+#define EMC_DLL_XFORM_QUSE0 0x348
+#define EMC_DLL_XFORM_QUSE1 0x34c
+#define EMC_DLL_XFORM_QUSE2 0x350
+#define EMC_DLL_XFORM_QUSE3 0x354
+#define EMC_DLL_XFORM_QUSE4 0x358
+#define EMC_DLL_XFORM_QUSE5 0x35c
+#define EMC_DLL_XFORM_QUSE6 0x360
+#define EMC_DLL_XFORM_QUSE7 0x364
+#define EMC_DLL_XFORM_DQ0 0x368
+#define EMC_DLL_XFORM_DQ1 0x36c
+#define EMC_DLL_XFORM_DQ2 0x370
+#define EMC_DLL_XFORM_DQ3 0x374
+#define EMC_DLI_TRIM_TXDQS0 0x3a8
+#define EMC_DLI_TRIM_TXDQS1 0x3ac
+#define EMC_DLI_TRIM_TXDQS2 0x3b0
+#define EMC_DLI_TRIM_TXDQS3 0x3b4
+#define EMC_DLI_TRIM_TXDQS4 0x3b8
+#define EMC_DLI_TRIM_TXDQS5 0x3bc
+#define EMC_DLI_TRIM_TXDQS6 0x3c0
+#define EMC_DLI_TRIM_TXDQS7 0x3c4
+#define EMC_STALL_BEFORE_CLKCHANGE 0x3c8
+#define EMC_STALL_AFTER_CLKCHANGE 0x3cc
+#define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE (0x1 << 9)
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+
+#define MC_EMEM_ADR_CFG 0x54
+#define MC_EMEM_ARB_CFG 0x90
+#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
+#define MC_EMEM_ARB_OUTSTANDING_REQ_MAX_SHIFT 0
+#define MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK \
+ (0x1FF << MC_EMEM_ARB_OUTSTANDING_REQ_MAX_SHIFT)
+#define MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE (0x1 << 30)
+#define MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE (0x1 << 31)
+#define MC_EMEM_ARB_TIMING_RCD 0x98
+#define MC_EMEM_ARB_TIMING_RP 0x9c
+#define MC_EMEM_ARB_TIMING_RC 0xa0
+#define MC_EMEM_ARB_TIMING_RAS 0xa4
+#define MC_EMEM_ARB_TIMING_FAW 0xa8
+#define MC_EMEM_ARB_TIMING_RRD 0xac
+#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
+#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
+#define MC_EMEM_ARB_TIMING_R2R 0xb8
+#define MC_EMEM_ARB_TIMING_W2W 0xbc
+#define MC_EMEM_ARB_TIMING_R2W 0xc0
+#define MC_EMEM_ARB_TIMING_W2R 0xc4
+#define MC_EMEM_ARB_DA_TURNS 0xd0
+#define MC_EMEM_ARB_DA_COVERS 0xd4
+#define MC_EMEM_ARB_MISC0 0xd8
+#define MC_EMEM_ARB_MISC0_EMC_SAME_FREQ (0x1 << 27)
+#define MC_EMEM_ARB_MISC1 0xdc
+#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
+#define MC_EMEM_ARB_RING3_THROTTLE 0xe4
+#define MC_EMEM_ARB_OVERRIDE 0xe8
+#define MC_TIMING_CONTROL 0xfc
+#define MC_RESERVED_RSV 0x3fc
+
+#endif
diff --git a/arch/arm/mach-tegra/tegra3_speedo.c b/arch/arm/mach-tegra/tegra3_speedo.c
new file mode 100644
index 000000000000..a9dfb7344b1e
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_speedo.c
@@ -0,0 +1,404 @@
+/*
+ * arch/arm/mach-tegra/tegra3_speedo.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <mach/iomap.h>
+
+#include "fuse.h"
+
+#define CORE_PROCESS_CORNERS_NUM 1
+#define CPU_PROCESS_CORNERS_NUM 7
+
+#define FUSE_SPEEDO_CALIB_0 0x114
+#define FUSE_PACKAGE_INFO 0X1FC
+
+/* Maximum speedo levels for each core process corner */
+static const u32 core_process_speedos[][CORE_PROCESS_CORNERS_NUM] = {
+/* proc_id 0 */
+ {180}, /* [0]: soc_speedo_id 0: any A01 */
+
+/* T30 family */
+ {180}, /* [1]: soc_speedo_id 1: AP30 */
+ {204}, /* [2]: soc_speedo_id 2: T30 */
+ {192}, /* [3]: soc_speedo_id 2: T30S */
+
+/* Characterization SKUs */
+ {168}, /* [4]: soc_speedo_id 1: AP30 char */
+ {192}, /* [5]: soc_speedo_id 2: T30 char */
+ {184}, /* [6]: soc_speedo_id 2: T30S char */
+
+/* T33 family */
+ {180}, /* [7]: soc_speedo_id = 1 - AP33 */
+ {208}, /* [8]: soc_speedo_id = 2 - T33 */
+ {192}, /* [9]: soc_speedo_id = 2 - T33S */
+
+/* T30 'L' family */
+ {192}, /* [10]: soc_speedo_id 1: T30L */
+ {192}, /* [11]: soc_speedo_id 1: T30SL */
+
+/* T30 Automotives */
+ {185}, /* [12]: soc_speedo_id = 3 - Automotives */
+ {185}, /* [13]: soc_speedo_id = 3 - Automotives */
+};
+
+/* Maximum speedo levels for each CPU process corner */
+static const u32 cpu_process_speedos[][CPU_PROCESS_CORNERS_NUM] = {
+/* proc_id 0 1 2 3 4*/
+ {306, 338, 360, 376, UINT_MAX}, /* [0]: cpu_speedo_id 0: any A01 */
+
+/* T30 family */
+ {304, 336, 359, 375, UINT_MAX}, /* [1]: cpu_speedo_id 1: AP30 */
+ {336, 336, 359, 375, UINT_MAX}, /* [2]: cpu_speedo_id 2: T30 */
+ {336, 336, 359, 375, UINT_MAX}, /* [3]: cpu_speedo_id 3: T30S */
+
+/* Characterization SKUs */
+ {292, 324, 348, 364, UINT_MAX}, /* [4]: cpu_speedo_id 1: AP30char */
+ {324, 324, 348, 364, UINT_MAX}, /* [5]: cpu_speedo_id 2: T30char */
+ {324, 324, 348, 364, UINT_MAX}, /* [6]: cpu_speedo_id 3: T30Schar */
+
+/* T33 family */
+ {305, 337, 359, 376, UINT_MAX}, /* [7]: cpu_speedo_id = 4 - AP33 */
+ {368, 368, 368, 368, 392}, /* [8]: cpu_speedo_id = 5 - T33 */
+ {376, 376, 376, 376, 392}, /* [9]: cpu_speedo_id = 6 - T33S */
+
+/* T30 'L' family */
+ {305, 337, 359, 376, 392}, /* [10]: cpu_speedo_id 7: T30L */
+ {305, 337, 359, 376, 392}, /* [11]: cpu_speedo_id 8: T30SL */
+
+/* T30 Automotives */
+ /* threshold_index 12: cpu_speedo_id 9 & 10
+ * 0,1,2 values correspond to speedo_id 9
+ * 3,4,5 values correspond to speedo_id 10
+ */
+ {300, 311, 360, 371, 381, 415, 431},
+ {300, 311, 410, 431}, /* threshold_index 13: cpu_speedo_id = 11 */
+};
+
+/*
+ * Common speedo_value array threshold index for both core_process_speedos and
+ * cpu_process_speedos arrays. Make sure these two arrays are always in synch.
+ */
+static int threshold_index;
+
+static int cpu_process_id;
+static int core_process_id;
+static int cpu_speedo_id;
+static int soc_speedo_id;
+static int package_id;
+
+static void fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp)
+{
+ u32 reg;
+
+ BUG_ON(!speedo_g || !speedo_lp);
+ reg = tegra_fuse_readl(FUSE_SPEEDO_CALIB_0);
+
+ /* Speedo LP = Lower 16-bits Multiplied by 4 */
+ *speedo_lp = (reg & 0xFFFF) * 4;
+
+ /* Speedo G = Upper 16-bits Multiplied by 4 */
+ *speedo_g = ((reg >> 16) & 0xFFFF) * 4;
+}
+
+static void rev_sku_to_speedo_ids(int rev, int sku)
+{
+ switch (rev) {
+ case TEGRA_REVISION_A01: /* any A01 */
+ cpu_speedo_id = 0;
+ soc_speedo_id = 0;
+ threshold_index = 0;
+ break;
+
+ case TEGRA_REVISION_A02:
+ case TEGRA_REVISION_A03:
+ switch (sku) {
+ case 0x87: /* AP30 */
+ case 0x82: /* T30V */
+ cpu_speedo_id = 1;
+ soc_speedo_id = 1;
+ threshold_index = 1;
+ break;
+
+ case 0x81: /* T30 */
+ switch (package_id) {
+ case 1: /* MID => T30 */
+ cpu_speedo_id = 2;
+ soc_speedo_id = 2;
+ threshold_index = 2;
+ break;
+ case 2: /* DSC => AP33 */
+ cpu_speedo_id = 4;
+ soc_speedo_id = 1;
+ threshold_index = 7;
+ break;
+ default:
+ pr_err("Tegra3 Rev-A02: Reserved pkg: %d\n",
+ package_id);
+ BUG();
+ break;
+ }
+ break;
+
+ case 0x80: /* T33 or T33S */
+ switch (package_id) {
+ case 1: /* MID => T33 */
+ cpu_speedo_id = 5;
+ soc_speedo_id = 2;
+ threshold_index = 8;
+ break;
+ case 2: /* DSC => T33S */
+ cpu_speedo_id = 6;
+ soc_speedo_id = 2;
+ threshold_index = 9;
+ break;
+ default:
+ pr_err("Tegra3 Rev-A02: Reserved pkg: %d\n",
+ package_id);
+ BUG();
+ break;
+ }
+ break;
+
+ case 0x83: /* T30L or T30S */
+ switch (package_id) {
+ case 1: /* MID => T30L */
+ cpu_speedo_id = 7;
+ soc_speedo_id = 1;
+ threshold_index = 10;
+ break;
+ case 2: /* DSC => T30S */
+ cpu_speedo_id = 3;
+ soc_speedo_id = 2;
+ threshold_index = 3;
+ break;
+ default:
+ pr_err("Tegra3 Rev-A02: Reserved pkg: %d\n",
+ package_id);
+ BUG();
+ break;
+ }
+ break;
+
+ case 0x8F: /* T30SL */
+ cpu_speedo_id = 8;
+ soc_speedo_id = 1;
+ threshold_index = 11;
+ break;
+
+/* Characterization SKUs */
+ case 0x08: /* AP30 char */
+ cpu_speedo_id = 1;
+ soc_speedo_id = 1;
+ threshold_index = 4;
+ break;
+ case 0x02: /* T30 char */
+ cpu_speedo_id = 2;
+ soc_speedo_id = 2;
+ threshold_index = 5;
+ break;
+ case 0x04: /* T30S char */
+ cpu_speedo_id = 3;
+ soc_speedo_id = 2;
+ threshold_index = 6;
+ break;
+
+ case 0x91: /* T30AGS-Ax */
+ case 0xb0: /* T30IQS-Ax */
+ case 0xb1: /* T30MQS-Ax */
+ case 0x90: /* T30AQS-Ax */
+ soc_speedo_id = 3;
+ threshold_index = 12;
+ break;
+ case 0x93: /* T30AG-Ax */
+ cpu_speedo_id = 11;
+ soc_speedo_id = 3;
+ threshold_index = 13;
+ break;
+ case 0: /* ENG - check package_id */
+ pr_info("Tegra3 ENG SKU: Checking package_id\n");
+ switch (package_id) {
+ case 1: /* MID => assume T30 */
+ cpu_speedo_id = 2;
+ soc_speedo_id = 2;
+ threshold_index = 2;
+ break;
+ case 2: /* DSC => assume T30S */
+ cpu_speedo_id = 3;
+ soc_speedo_id = 2;
+ threshold_index = 3;
+ break;
+ default:
+ pr_err("Tegra3 Rev-A02: Reserved pkg: %d\n",
+ package_id);
+ BUG();
+ break;
+ }
+ break;
+
+ default:
+ /* FIXME: replace with BUG() when all SKU's valid */
+ pr_err("Tegra3 Rev-A02: Unknown SKU %d\n", sku);
+ cpu_speedo_id = 0;
+ soc_speedo_id = 0;
+ threshold_index = 0;
+ break;
+ }
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+void tegra_init_speedo_data(void)
+{
+ u32 cpu_speedo_val, core_speedo_val;
+ int iv;
+
+ /* Package info: 4 bits - 0,3:reserved 1:MID 2:DSC */
+ package_id = tegra_fuse_readl(FUSE_PACKAGE_INFO) & 0x0F;
+
+ /* Arrays must be of equal size - each index corresponds to a SKU */
+ BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ ARRAY_SIZE(core_process_speedos));
+
+ rev_sku_to_speedo_ids(tegra_get_revision(), tegra_sku_id());
+ BUG_ON(threshold_index >= ARRAY_SIZE(cpu_process_speedos));
+
+ fuse_speedo_calib(&cpu_speedo_val, &core_speedo_val);
+ pr_debug("%s CPU speedo value %u\n", __func__, cpu_speedo_val);
+ pr_debug("%s Core speedo value %u\n", __func__, core_speedo_val);
+
+ for (iv = 0; iv < CPU_PROCESS_CORNERS_NUM; iv++) {
+ if (cpu_speedo_val <
+ cpu_process_speedos[threshold_index][iv]) {
+ break;
+ }
+ }
+ cpu_process_id = iv -1;
+
+ if (cpu_process_id == -1) {
+ pr_err("****************************************************");
+ pr_err("****************************************************");
+ pr_err("* tegra3_speedo: CPU speedo value %3d out of range *",
+ cpu_speedo_val);
+ pr_err("****************************************************");
+ pr_err("****************************************************");
+
+ cpu_process_id = INVALID_PROCESS_ID;
+ cpu_speedo_id = 1;
+ }
+
+ for (iv = 0; iv < CORE_PROCESS_CORNERS_NUM; iv++) {
+ if (core_speedo_val <
+ core_process_speedos[threshold_index][iv]) {
+ break;
+ }
+ }
+ core_process_id = iv -1;
+
+ if (core_process_id == -1) {
+ pr_err("****************************************************");
+ pr_err("****************************************************");
+ pr_err("* tegra3_speedo: CORE speedo value %3d out of range *",
+ core_speedo_val);
+ pr_err("****************************************************");
+ pr_err("****************************************************");
+
+ core_process_id = INVALID_PROCESS_ID;
+ soc_speedo_id = 1;
+ }
+ if (threshold_index == 12 && cpu_process_id != INVALID_PROCESS_ID) {
+ if (cpu_process_id <= 2)
+ cpu_speedo_id = 9;
+ else if (cpu_process_id >= 3 && cpu_process_id < 6)
+ cpu_speedo_id = 10;
+ }
+ pr_info("Tegra3: CPU Speedo ID %d, Soc Speedo ID %d",
+ cpu_speedo_id, soc_speedo_id);
+}
+
+int tegra_cpu_process_id(void)
+{
+ /* FIXME: remove when ready to deprecate invalid process-id boards */
+ if (cpu_process_id == INVALID_PROCESS_ID)
+ return 0;
+ else
+ return cpu_process_id;
+}
+
+int tegra_core_process_id(void)
+{
+ /* FIXME: remove when ready to deprecate invalid process-id boards */
+ if (core_process_id == INVALID_PROCESS_ID)
+ return 0;
+ else
+ return core_process_id;
+}
+
+int tegra_cpu_speedo_id(void)
+{
+ return cpu_speedo_id;
+}
+
+int tegra_soc_speedo_id(void)
+{
+ return soc_speedo_id;
+}
+
+int tegra_package_id(void)
+{
+ return package_id;
+}
+
+/*
+ * CPU and core nominal voltage levels as determined by chip SKU and speedo
+ * (not final - can be lowered by dvfs tables and rail dependencies; the
+ * latter is resolved by the dvfs code)
+ */
+static const int cpu_speedo_nominal_millivolts[] =
+/* speedo_id 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 */
+ { 1125, 1150, 1150, 1150, 1237, 1237, 1237, 1150, 1150, 912, 850, 850};
+
+int tegra_cpu_speedo_mv(void)
+{
+ BUG_ON(cpu_speedo_id >= ARRAY_SIZE(cpu_speedo_nominal_millivolts));
+ return cpu_speedo_nominal_millivolts[cpu_speedo_id];
+}
+
+int tegra_core_speedo_mv(void)
+{
+ switch (soc_speedo_id) {
+ case 0:
+ return 1200;
+ case 1:
+ if ((cpu_speedo_id != 7) && (cpu_speedo_id != 8))
+ return 1200;
+ /* fall thru for T30L or T30SL */
+ case 2:
+ return 1300;
+ case 3:
+ return 1250;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/arm/mach-tegra/tegra3_thermal.c b/arch/arm/mach-tegra/tegra3_thermal.c
new file mode 100644
index 000000000000..8ad7bd5b670f
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_thermal.c
@@ -0,0 +1,544 @@
+/*
+ * arch/arm/mach-tegra/tegra3_thermal.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/thermal.h>
+#include <mach/thermal.h>
+#include <mach/edp.h>
+#include <linux/slab.h>
+
+#include "clock.h"
+#include "cpu-tegra.h"
+#include "dvfs.h"
+
+#define MAX_ZONES (16)
+
+struct tegra_thermal {
+ struct tegra_thermal_device *device;
+ long temp_throttle_tj;
+ long temp_shutdown_tj;
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ struct thermal_zone_device *thz;
+ int tc1;
+ int tc2;
+ long passive_delay;
+#else
+ long temp_throttle_low_tj;
+#endif
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ int edp_thermal_zone_val;
+ long edp_offset;
+ long hysteresis_edp;
+#endif
+ struct mutex mutex;
+};
+
+static struct tegra_thermal thermal_state = {
+ .device = NULL,
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ .edp_thermal_zone_val = -1,
+#endif
+};
+
+#ifndef CONFIG_TEGRA_THERMAL_SYSFS
+static bool throttle_enb;
+#endif
+
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+static inline long edp2tj(struct tegra_thermal *thermal,
+ long edp_temp)
+{
+ return edp_temp + thermal->edp_offset;
+}
+
+static inline long tj2edp(struct tegra_thermal *thermal,
+ long temp_tj)
+{
+ return temp_tj - thermal->edp_offset;
+}
+#endif
+
+static inline long dev2tj(struct tegra_thermal_device *dev,
+ long dev_temp)
+{
+ return dev_temp + dev->offset;
+}
+
+static inline long tj2dev(struct tegra_thermal_device *dev,
+ long tj_temp)
+{
+ return tj_temp - dev->offset;
+}
+
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+
+static int tegra_thermal_zone_bind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdevice) {
+ /* Support only Thermal Throttling (1 trip) for now */
+ return thermal_zone_bind_cooling_device(thermal, 0, cdevice);
+}
+
+static int tegra_thermal_zone_unbind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdevice) {
+ /* Support only Thermal Throttling (1 trip) for now */
+ return thermal_zone_unbind_cooling_device(thermal, 0, cdevice);
+}
+
+static int tegra_thermal_zone_get_temp(struct thermal_zone_device *thz,
+ long *temp)
+{
+ struct tegra_thermal *thermal = thz->devdata;
+ thermal->device->get_temp(thermal->device->data, temp);
+
+ return 0;
+}
+
+static int tegra_thermal_zone_get_trip_type(
+ struct thermal_zone_device *thermal,
+ int trip,
+ enum thermal_trip_type *type) {
+
+ /* Support only Thermal Throttling (1 trip) for now */
+ if (trip != 0)
+ return -EINVAL;
+
+ *type = THERMAL_TRIP_PASSIVE;
+
+ return 0;
+}
+
+static int tegra_thermal_zone_get_trip_temp(struct thermal_zone_device *thz,
+ int trip,
+ long *temp) {
+ struct tegra_thermal *thermal = thz->devdata;
+
+ /* Support only Thermal Throttling (1 trip) for now */
+ if (trip != 0)
+ return -EINVAL;
+
+ *temp = tj2dev(thermal->device, thermal->temp_throttle_tj);
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops tegra_thermal_zone_ops = {
+ .bind = tegra_thermal_zone_bind,
+ .unbind = tegra_thermal_zone_unbind,
+ .get_temp = tegra_thermal_zone_get_temp,
+ .get_trip_type = tegra_thermal_zone_get_trip_type,
+ .get_trip_temp = tegra_thermal_zone_get_trip_temp,
+};
+#endif
+
+/* The thermal sysfs handles notifying the throttling
+ * cooling device */
+#ifndef CONFIG_TEGRA_THERMAL_SYSFS
+static void tegra_therm_throttle(bool enable)
+{
+ if (throttle_enb != enable) {
+ mutex_lock(&thermal_state.mutex);
+ tegra_throttling_enable(enable);
+ throttle_enb = enable;
+ mutex_unlock(&thermal_state.mutex);
+ }
+}
+#endif
+
+/* Make sure this function remains stateless */
+void tegra_thermal_alert(void *data)
+{
+ struct tegra_thermal *thermal = data;
+ int err;
+ long temp_dev, temp_tj;
+ long lo_limit_throttle_tj, hi_limit_throttle_tj;
+ long lo_limit_edp_tj = 0, hi_limit_edp_tj = 0;
+ long temp_low_dev, temp_low_tj;
+ int lo_limit_tj = 0, hi_limit_tj = 0;
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ const struct tegra_edp_limits *z;
+ int zones_sz;
+ int i;
+#endif
+
+ if (thermal != &thermal_state)
+ BUG();
+
+ mutex_lock(&thermal_state.mutex);
+
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ if (thermal->thz) {
+ if (!thermal->thz->passive)
+ thermal_zone_device_update(thermal->thz);
+ }
+#endif
+
+ err = thermal->device->get_temp(thermal->device->data, &temp_dev);
+ if (err) {
+ pr_err("%s: get temp fail(%d)", __func__, err);
+ goto done;
+ }
+
+ /* Convert all temps to tj and then do all work/logic in terms of
+ tj in order to avoid confusion */
+ temp_tj = dev2tj(thermal->device, temp_dev);
+ thermal->device->get_temp_low(thermal->device, &temp_low_dev);
+ temp_low_tj = dev2tj(thermal->device, temp_low_dev);
+
+ lo_limit_throttle_tj = temp_low_tj;
+ hi_limit_throttle_tj = thermal->temp_throttle_tj;
+
+#ifndef CONFIG_TEGRA_THERMAL_SYSFS
+ /* Check to see if we are currently throttling */
+ if ((tegra_is_throttling() &&
+ (temp_tj > thermal->temp_throttle_low_tj))
+ || (temp_tj >= thermal->temp_throttle_tj)) {
+ lo_limit_throttle_tj = thermal->temp_throttle_low_tj;
+ hi_limit_throttle_tj = thermal->temp_shutdown_tj;
+ }
+#else
+ if (temp_tj > thermal->temp_throttle_tj) {
+ lo_limit_throttle_tj = thermal->temp_throttle_tj;
+ hi_limit_throttle_tj = thermal->temp_shutdown_tj;
+ }
+#endif
+
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ tegra_get_cpu_edp_limits(&z, &zones_sz);
+
+/* edp table based off of tdiode measurements */
+#define EDP_TEMP_TJ(_index) edp2tj(thermal, z[_index].temperature * 1000)
+
+ if (temp_tj < EDP_TEMP_TJ(0)) {
+ lo_limit_edp_tj = temp_low_tj;
+ hi_limit_edp_tj = EDP_TEMP_TJ(0);
+ } else if (temp_tj >= EDP_TEMP_TJ(zones_sz-1)) {
+ lo_limit_edp_tj = EDP_TEMP_TJ(zones_sz-1) -
+ thermal->hysteresis_edp;
+ hi_limit_edp_tj = thermal->temp_shutdown_tj;
+ } else {
+ for (i = 0; (i + 1) < zones_sz; i++) {
+ if ((temp_tj >= EDP_TEMP_TJ(i)) &&
+ (temp_tj < EDP_TEMP_TJ(i+1))) {
+ lo_limit_edp_tj = EDP_TEMP_TJ(i) -
+ thermal->hysteresis_edp;
+ hi_limit_edp_tj = EDP_TEMP_TJ(i+1);
+ break;
+ }
+ }
+ }
+#undef EDP_TEMP_TJ
+#else
+ lo_limit_edp_tj = temp_low_tj;
+ hi_limit_edp_tj = thermal->temp_shutdown_tj;
+#endif
+
+ /* Get smallest window size */
+ lo_limit_tj = max(lo_limit_throttle_tj, lo_limit_edp_tj);
+ hi_limit_tj = min(hi_limit_throttle_tj, hi_limit_edp_tj);
+
+ thermal->device->set_limits(thermal->device->data,
+ tj2dev(thermal->device, lo_limit_tj),
+ tj2dev(thermal->device, hi_limit_tj));
+
+#ifndef CONFIG_TEGRA_THERMAL_SYSFS
+ if (temp_tj >= thermal->temp_throttle_tj) {
+ /* start throttling */
+ if (!tegra_is_throttling())
+ tegra_therm_throttle(true);
+ } else if (temp_tj <= thermal->temp_throttle_low_tj) {
+ /* switch off throttling */
+ if (tegra_is_throttling())
+ tegra_therm_throttle(false);
+ }
+#endif
+
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ /* inform edp governor */
+ if (thermal->edp_thermal_zone_val != temp_tj)
+ tegra_edp_update_thermal_zone(tj2edp(thermal, temp_tj)/1000);
+
+ thermal->edp_thermal_zone_val = temp_tj;
+#endif
+
+done:
+ mutex_unlock(&thermal_state.mutex);
+}
+
+int tegra_thermal_set_device(struct tegra_thermal_device *device)
+{
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ struct thermal_zone_device *thz;
+#endif
+
+ /* only support one device */
+ if (thermal_state.device)
+ return -EINVAL;
+
+ thermal_state.device = device;
+
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ thz = thermal_zone_device_register(thermal_state.device->name,
+ 1, /* trips */
+ &thermal_state,
+ &tegra_thermal_zone_ops,
+ thermal_state.tc1, /* dT/dt */
+ thermal_state.tc2, /* throttle */
+ thermal_state.passive_delay,
+ 0); /* polling delay */
+
+ if (IS_ERR(thz)) {
+ thz = NULL;
+ return -ENODEV;
+ }
+
+ thermal_state.thz = thz;
+#endif
+ thermal_state.device->set_alert(thermal_state.device->data,
+ tegra_thermal_alert,
+ &thermal_state);
+
+ thermal_state.device->set_shutdown_temp(thermal_state.device->data,
+ tj2dev(device, thermal_state.temp_shutdown_tj));
+
+ /* initialize limits */
+ tegra_thermal_alert(&thermal_state);
+
+ return 0;
+}
+
+int __init tegra_thermal_init(struct tegra_thermal_data *data)
+{
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ thermal_state.tc1 = data->tc1;
+ thermal_state.tc2 = data->tc2;
+ thermal_state.passive_delay = data->passive_delay;
+#else
+ thermal_state.temp_throttle_low_tj = data->temp_throttle +
+ data->temp_offset -
+ data->hysteresis_throttle;
+#endif
+ mutex_init(&thermal_state.mutex);
+#ifdef CONFIG_TEGRA_EDP_LIMITS
+ thermal_state.edp_offset = data->edp_offset;
+ thermal_state.hysteresis_edp = data->hysteresis_edp;
+#endif
+ thermal_state.temp_throttle_tj = data->temp_throttle +
+ data->temp_offset;
+ thermal_state.temp_shutdown_tj = data->temp_shutdown +
+ data->temp_offset;
+
+ return 0;
+}
+
+int tegra_thermal_exit(void)
+{
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ if (thermal_state.thz)
+ thermal_zone_device_unregister(thermal_state.thz);
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int tegra_thermal_throttle_temp_tj_set(void *data, u64 val)
+{
+#ifndef CONFIG_TEGRA_THERMAL_SYSFS
+ long throttle_hysteresis = thermal_state.temp_throttle_tj -
+ thermal_state.temp_throttle_low_tj;
+#endif
+
+ mutex_lock(&thermal_state.mutex);
+ thermal_state.temp_throttle_tj = val;
+#ifndef CONFIG_TEGRA_THERMAL_SYSFS
+ thermal_state.temp_throttle_low_tj = thermal_state.temp_throttle_tj -
+ throttle_hysteresis;
+#endif
+ mutex_unlock(&thermal_state.mutex);
+
+ tegra_thermal_alert(&thermal_state);
+
+ return 0;
+}
+
+static int tegra_thermal_throttle_temp_tj_get(void *data, u64 *val)
+{
+ *val = (u64)thermal_state.temp_throttle_tj;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(throttle_temp_tj_fops,
+ tegra_thermal_throttle_temp_tj_get,
+ tegra_thermal_throttle_temp_tj_set,
+ "%llu\n");
+
+static int tegra_thermal_shutdown_temp_tj_set(void *data, u64 val)
+{
+ thermal_state.temp_shutdown_tj = val;
+
+ if (thermal_state.device)
+ thermal_state.device->set_shutdown_temp(
+ thermal_state.device->data,
+ tj2dev(thermal_state.device,
+ thermal_state.temp_shutdown_tj));
+
+ tegra_thermal_alert(&thermal_state);
+
+ return 0;
+}
+
+static int tegra_thermal_shutdown_temp_tj_get(void *data, u64 *val)
+{
+ *val = (u64)thermal_state.temp_shutdown_tj;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(shutdown_temp_tj_fops,
+ tegra_thermal_shutdown_temp_tj_get,
+ tegra_thermal_shutdown_temp_tj_set,
+ "%llu\n");
+
+
+static int tegra_thermal_temp_tj_get(void *data, u64 *val)
+{
+ long temp_tj, temp_dev;
+
+ if (thermal_state.device) {
+ thermal_state.device->get_temp(thermal_state.device->data,
+ &temp_dev);
+
+ /* Convert all temps to tj and then do all work/logic in
+ terms of tj in order to avoid confusion */
+ temp_tj = dev2tj(thermal_state.device, temp_dev);
+ } else {
+ temp_tj = -1;
+ }
+
+ *val = (u64)temp_tj;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(temp_tj_fops,
+ tegra_thermal_temp_tj_get,
+ NULL,
+ "%llu\n");
+
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+static int tegra_thermal_tc1_set(void *data, u64 val)
+{
+ thermal_state.thz->tc1 = val;
+ return 0;
+}
+
+static int tegra_thermal_tc1_get(void *data, u64 *val)
+{
+ *val = (u64)thermal_state.thz->tc1;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tc1_fops,
+ tegra_thermal_tc1_get,
+ tegra_thermal_tc1_set,
+ "%llu\n");
+
+static int tegra_thermal_tc2_set(void *data, u64 val)
+{
+ thermal_state.thz->tc2 = val;
+ return 0;
+}
+
+static int tegra_thermal_tc2_get(void *data, u64 *val)
+{
+ *val = (u64)thermal_state.thz->tc2;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tc2_fops,
+ tegra_thermal_tc2_get,
+ tegra_thermal_tc2_set,
+ "%llu\n");
+
+static int tegra_thermal_passive_delay_set(void *data, u64 val)
+{
+ thermal_state.thz->passive_delay = val;
+ return 0;
+}
+
+static int tegra_thermal_passive_delay_get(void *data, u64 *val)
+{
+ *val = (u64)thermal_state.thz->passive_delay;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(passive_delay_fops,
+ tegra_thermal_passive_delay_get,
+ tegra_thermal_passive_delay_set,
+ "%llu\n");
+#endif
+
+
+static struct dentry *thermal_debugfs_root;
+
+static int __init tegra_thermal_debug_init(void)
+{
+ thermal_debugfs_root = debugfs_create_dir("tegra_thermal", 0);
+
+ if (!debugfs_create_file("throttle_temp_tj", 0644, thermal_debugfs_root,
+ NULL, &throttle_temp_tj_fops))
+ goto err_out;
+
+ if (!debugfs_create_file("shutdown_temp_tj", 0644, thermal_debugfs_root,
+ NULL, &shutdown_temp_tj_fops))
+ goto err_out;
+
+ if (!debugfs_create_file("temp_tj", 0644, thermal_debugfs_root,
+ NULL, &temp_tj_fops))
+ goto err_out;
+
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ if (!debugfs_create_file("tc1", 0644, thermal_debugfs_root,
+ NULL, &tc1_fops))
+ goto err_out;
+
+ if (!debugfs_create_file("tc2", 0644, thermal_debugfs_root,
+ NULL, &tc2_fops))
+ goto err_out;
+
+ if (!debugfs_create_file("passive_delay", 0644, thermal_debugfs_root,
+ NULL, &passive_delay_fops))
+ goto err_out;
+#endif
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(thermal_debugfs_root);
+ return -ENOMEM;
+}
+
+late_initcall(tegra_thermal_debug_init);
+#endif
diff --git a/arch/arm/mach-tegra/tegra3_throttle.c b/arch/arm/mach-tegra/tegra3_throttle.c
new file mode 100644
index 000000000000..f927be7800d6
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_throttle.c
@@ -0,0 +1,367 @@
+/*
+ * arch/arm/mach-tegra/tegra3_throttle.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/thermal.h>
+
+#include "clock.h"
+#include "cpu-tegra.h"
+#include "dvfs.h"
+
+/* tegra throttling require frequencies in the table to be in ascending order */
+static struct cpufreq_frequency_table *cpu_freq_table;
+static struct mutex *cpu_throttle_lock;
+
+static struct {
+ unsigned int cpu_freq;
+ int core_cap_level;
+ int ms;
+} throttle_table[] = {
+ { 0, 1000, 2000 }, /* placeholder for cpu floor rate */
+ { 640000, 1000, 2000 },
+ { 640000, 1000, 2000 },
+ { 640000, 1000, 2000 },
+ { 640000, 1000, 2000 },
+ { 640000, 1000, 2000 },
+ { 760000, 1000, 2000 },
+ { 760000, 1050, 2000 },
+ {1000000, 1050, 2000 },
+ {1000000, 1100, 2000 },
+};
+
+static int is_throttling;
+static int throttle_index;
+static struct delayed_work throttle_work;
+static struct workqueue_struct *workqueue;
+static DEFINE_MUTEX(tegra_throttle_lock);
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+static struct thermal_cooling_device *cdev;
+#endif
+
+static unsigned int clip_to_table(unsigned int cpu_freq)
+{
+ int i;
+
+ for (i = 0; cpu_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (cpu_freq_table[i].frequency > cpu_freq)
+ break;
+ }
+ i = (i == 0) ? 0 : i-1;
+ return cpu_freq_table[i].frequency;
+}
+
+static void tegra_throttle_work_func(struct work_struct *work)
+{
+ unsigned int cpu_freq;
+ int core_level;
+
+ mutex_lock(cpu_throttle_lock);
+ if (!is_throttling) {
+ mutex_unlock(cpu_throttle_lock);
+ return;
+ }
+
+ cpu_freq = tegra_getspeed(0);
+ throttle_index -= throttle_index ? 1 : 0;
+
+ core_level = throttle_table[throttle_index].core_cap_level;
+ if (throttle_table[throttle_index].cpu_freq < cpu_freq)
+ tegra_cpu_set_speed_cap(NULL);
+
+ if (throttle_index || (throttle_table[0].cpu_freq < cpu_freq))
+ queue_delayed_work(workqueue, &throttle_work,
+ msecs_to_jiffies(throttle_table[throttle_index].ms));
+
+ mutex_unlock(cpu_throttle_lock);
+
+ tegra_dvfs_core_cap_level_set(core_level);
+}
+
+/*
+ * tegra_throttling_enable
+ * This function may sleep
+ */
+void tegra_throttling_enable(bool enable)
+{
+ mutex_lock(&tegra_throttle_lock);
+ mutex_lock(cpu_throttle_lock);
+
+ if (enable && !(is_throttling++)) {
+ int core_level;
+ unsigned int cpu_freq = tegra_getspeed(0);
+ throttle_index = ARRAY_SIZE(throttle_table) - 1;
+
+ core_level = throttle_table[throttle_index].core_cap_level;
+ if (throttle_table[throttle_index].cpu_freq < cpu_freq)
+ tegra_cpu_set_speed_cap(NULL);
+
+ queue_delayed_work(workqueue, &throttle_work,
+ msecs_to_jiffies(throttle_table[throttle_index].ms));
+
+ mutex_unlock(cpu_throttle_lock);
+
+ tegra_dvfs_core_cap_level_set(core_level);
+ tegra_dvfs_core_cap_enable(true);
+
+ mutex_unlock(&tegra_throttle_lock);
+ return;
+ }
+
+ if (!enable && is_throttling) {
+ if (!(--is_throttling)) {
+ /* restore speed requested by governor */
+ tegra_cpu_set_speed_cap(NULL);
+ mutex_unlock(cpu_throttle_lock);
+
+ tegra_dvfs_core_cap_enable(false);
+ cancel_delayed_work_sync(&throttle_work);
+ mutex_unlock(&tegra_throttle_lock);
+ return;
+ }
+ }
+
+ mutex_unlock(cpu_throttle_lock);
+ mutex_unlock(&tegra_throttle_lock);
+}
+EXPORT_SYMBOL_GPL(tegra_throttling_enable);
+
+unsigned int tegra_throttle_governor_speed(unsigned int requested_speed)
+{
+ return is_throttling ?
+ min(requested_speed, throttle_table[throttle_index].cpu_freq) :
+ requested_speed;
+}
+
+bool tegra_is_throttling(void)
+{
+ return is_throttling;
+}
+
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+
+static int
+tegra_throttle_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *max_state)
+{
+ *max_state = ARRAY_SIZE(throttle_table);
+ return 0;
+}
+
+static int
+tegra_throttle_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *cur_state)
+{
+ mutex_lock(cpu_throttle_lock);
+ *cur_state = is_throttling ?
+ (ARRAY_SIZE(throttle_table) - throttle_index) :
+ 0;
+ mutex_unlock(cpu_throttle_lock);
+
+ return 0;
+}
+
+static int
+tegra_throttle_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long cur_state)
+{
+ int core_level;
+
+ mutex_lock(cpu_throttle_lock);
+ if (cur_state == 0) {
+ /* restore speed requested by governor */
+ if (is_throttling) {
+ tegra_dvfs_core_cap_enable(false);
+ is_throttling = false;
+ }
+
+ tegra_cpu_set_speed_cap(NULL);
+ } else {
+ if (!is_throttling) {
+ tegra_dvfs_core_cap_enable(true);
+ is_throttling = true;
+ }
+
+ throttle_index = ARRAY_SIZE(throttle_table) - cur_state;
+ core_level = throttle_table[throttle_index].core_cap_level;
+ tegra_dvfs_core_cap_level_set(core_level);
+
+ tegra_cpu_set_speed_cap(NULL);
+ }
+
+ mutex_unlock(cpu_throttle_lock);
+
+ return 0;
+}
+
+struct thermal_cooling_device_ops tegra_throttle_cooling_ops = {
+ .get_max_state = tegra_throttle_get_max_state,
+ .get_cur_state = tegra_throttle_get_cur_state,
+ .set_cur_state = tegra_throttle_set_cur_state,
+};
+#endif
+
+int __init tegra_throttle_init(struct mutex *cpu_lock)
+{
+ int i;
+ struct tegra_cpufreq_table_data *table_data =
+ tegra_cpufreq_table_get();
+ if (IS_ERR_OR_NULL(table_data))
+ return -EINVAL;
+
+ /*
+ * High-priority, others flags default: not bound to a specific
+ * CPU, has rescue worker task (in case of allocation deadlock,
+ * etc.). Single-threaded.
+ */
+ workqueue = alloc_workqueue("cpu-tegra",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
+ if (!workqueue)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
+
+ cpu_throttle_lock = cpu_lock;
+ cpu_freq_table = table_data->freq_table;
+ throttle_table[0].cpu_freq =
+ cpu_freq_table[table_data->throttle_lowest_index].frequency;
+
+ for (i = 0; i < ARRAY_SIZE(throttle_table); i++) {
+ unsigned int cpu_freq = throttle_table[i].cpu_freq;
+ throttle_table[i].cpu_freq = clip_to_table(cpu_freq);
+ }
+
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ cdev = thermal_cooling_device_register("Throttle", NULL,
+ &tegra_throttle_cooling_ops);
+
+ if (IS_ERR(cdev)) {
+ cdev = NULL;
+ return -ENODEV;
+ }
+#endif
+
+ return 0;
+}
+
+void tegra_throttle_exit(void)
+{
+#ifdef CONFIG_TEGRA_THERMAL_SYSFS
+ if (cdev) {
+ thermal_cooling_device_unregister(cdev);
+ cdev = NULL;
+ }
+#endif
+ destroy_workqueue(workqueue);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int throttle_debug_set(void *data, u64 val)
+{
+ tegra_throttling_enable(val);
+ return 0;
+}
+static int throttle_debug_get(void *data, u64 *val)
+{
+ *val = (u64) is_throttling;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set,
+ "%llu\n");
+static int table_show(struct seq_file *s, void *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(throttle_table); i++)
+ seq_printf(s, "[%d] = %7u %4d %5d\n",
+ i, throttle_table[i].cpu_freq,
+ throttle_table[i].core_cap_level, throttle_table[i].ms);
+ return 0;
+}
+
+static int table_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, table_show, inode->i_private);
+}
+
+static ssize_t table_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[80];
+ int table_idx;
+ unsigned int cpu_freq;
+ int core_cap_level;
+ int ms;
+
+ if (sizeof(buf) <= count)
+ return -EINVAL;
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ /* terminate buffer and trim - white spaces may be appended
+ * at the end when invoked from shell command line */
+ buf[count] = '\0';
+ strim(buf);
+
+ if (sscanf(buf, "[%d] = %u %d %d",
+ &table_idx, &cpu_freq, &core_cap_level, &ms) != 4)
+ return -1;
+
+ if ((table_idx < 0) || (table_idx >= ARRAY_SIZE(throttle_table)))
+ return -EINVAL;
+
+ /* round new settings before updating table */
+ throttle_table[table_idx].cpu_freq = clip_to_table(cpu_freq);
+ throttle_table[table_idx].core_cap_level = (core_cap_level / 50) * 50;
+ throttle_table[table_idx].ms = jiffies_to_msecs(msecs_to_jiffies(ms));
+
+ return count;
+}
+
+static const struct file_operations table_fops = {
+ .open = table_open,
+ .read = seq_read,
+ .write = table_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+int __init tegra_throttle_debug_init(struct dentry *cpu_tegra_debugfs_root)
+{
+ if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root,
+ NULL, &throttle_fops))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("throttle_table", 0644, cpu_tegra_debugfs_root,
+ NULL, &table_fops))
+ return -ENOMEM;
+
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/arch/arm/mach-tegra/tegra3_tsensor.c b/arch/arm/mach-tegra/tegra3_tsensor.c
new file mode 100644
index 000000000000..a19a99785ae7
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_tsensor.c
@@ -0,0 +1,194 @@
+/*
+ * arch/arm/mach-tegra/tegra3_tsensor.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_SENSORS_TEGRA_TSENSOR
+#include <mach/tsensor.h>
+#include <mach/tegra_fuse.h>
+#include <devices.h>
+#include <mach/iomap.h>
+#include <mach/thermal.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+/* fuse revision constants used for tsensor */
+#define TSENSOR_FUSE_REVISION_DECIMAL 8
+#define TSENSOR_FUSE_REVISION_INTEGER 0
+
+/* scratch register offsets needed for powering off PMU */
+#define SCRATCH54_OFFSET 0x258
+#define SCRATCH55_OFFSET 0x25C
+
+/* scratch 54 register bit field offsets */
+#define PMU_OFF_DATA_OFFSET 8
+
+/* scratch 55 register bit field offsets */
+#define RESET_TEGRA_OFFSET 31
+#define CONTROLLER_TYPE_OFFSET 30
+#define I2C_CONTROLLER_ID_OFFSET 27
+#define PINMUX_OFFSET 24
+#define CHECKSUM_OFFSET 16
+#define PMU_16BIT_SUPPORT_OFFSET 15
+/* scratch 55 register bit field masks */
+#define RESET_TEGRA_MASK 0x1
+#define CONTROLLER_TYPE_MASK 0x1
+#define I2C_CONTROLLER_ID_MASK 0x7
+#define PINMUX_MASK 0x7
+#define CHECKSUM_MASK 0xff
+#define PMU_16BIT_SUPPORT_MASK 0x1
+
+#define TSENSOR_OFFSET (4000 + 5000)
+
+#ifdef CONFIG_TEGRA_INTERNAL_TSENSOR_EDP_SUPPORT
+static int tsensor_get_temp(void *vdata, long *milli_temp)
+{
+ struct tegra_tsensor_data *data = vdata;
+ return tsensor_thermal_get_temp(data, milli_temp);
+}
+
+static int tsensor_get_temp_low(void *vdata, long *milli_temp)
+{
+ struct tegra_tsensor_data *data = vdata;
+ return tsensor_thermal_get_temp_low(data, milli_temp);
+}
+
+static int tsensor_set_limits(void *vdata,
+ long lo_limit_milli,
+ long hi_limit_milli)
+{
+ struct tegra_tsensor_data *data = vdata;
+ return tsensor_thermal_set_limits(data,
+ lo_limit_milli,
+ hi_limit_milli);
+}
+
+static int tsensor_set_alert(void *vdata,
+ void (*alert_func)(void *),
+ void *alert_data)
+{
+ struct tegra_tsensor_data *data = vdata;
+ return tsensor_thermal_set_alert(data, alert_func, alert_data);
+}
+
+static int tsensor_set_shutdown_temp(void *vdata, long shutdown_temp_milli)
+{
+ struct tegra_tsensor_data *data = vdata;
+ return tsensor_thermal_set_shutdown_temp(data, shutdown_temp_milli);
+}
+
+static void tegra3_tsensor_probe_callback(struct tegra_tsensor_data *data)
+{
+ struct tegra_thermal_device *thermal_device;
+
+ thermal_device = kzalloc(sizeof(struct tegra_thermal_device),
+ GFP_KERNEL);
+
+ if (!thermal_device) {
+ pr_err("unable to allocate thermal device\n");
+ return;
+ }
+
+ thermal_device->name = "tsensor";
+ thermal_device->data = data;
+ thermal_device->offset = TSENSOR_OFFSET;
+ thermal_device->get_temp = tsensor_get_temp;
+ thermal_device->get_temp_low = tsensor_get_temp_low;
+ thermal_device->set_limits = tsensor_set_limits;
+ thermal_device->set_alert = tsensor_set_alert;
+ thermal_device->set_shutdown_temp = tsensor_set_shutdown_temp;
+
+ if (tegra_thermal_set_device(thermal_device)) /* This should not fail */
+ BUG();
+}
+#endif
+
+static struct tegra_tsensor_platform_data tsensor_data = {
+#ifdef CONFIG_TEGRA_INTERNAL_TSENSOR_EDP_SUPPORT
+ .probe_callback = tegra3_tsensor_probe_callback,
+#endif
+};
+
+void __init tegra3_tsensor_init(struct tegra_tsensor_pmu_data *data)
+{
+ unsigned int reg;
+ int err;
+ u32 val, checksum;
+ void __iomem *pMem = NULL;
+ /* tsensor driver is instantiated based on fuse revision */
+ err = tegra_fuse_get_revision(&reg);
+ if (err)
+ goto labelEnd;
+ pr_info("\nTegra3 fuse revision %d ", reg);
+ if (reg < TSENSOR_FUSE_REVISION_DECIMAL)
+ goto labelEnd;
+
+ if (!data)
+ goto labelSkipPowerOff;
+
+ if (!request_mem_region(TEGRA_PMC_BASE +
+ SCRATCH54_OFFSET, 8, "tegra-tsensor"))
+ pr_err(" [%s, line=%d]: Error mem busy\n",
+ __func__, __LINE__);
+
+ pMem = ioremap(TEGRA_PMC_BASE + SCRATCH54_OFFSET, 8);
+ if (!pMem) {
+ pr_err(" [%s, line=%d]: can't ioremap "
+ "pmc iomem\n", __FILE__, __LINE__);
+ goto labelEnd;
+ }
+
+ /*
+ * Fill scratch registers to power off the device
+ * in case if temperature crosses threshold TH3
+ */
+ val = (data->poweroff_reg_data << PMU_OFF_DATA_OFFSET) |
+ data->poweroff_reg_addr;
+ writel(val, pMem);
+
+ val = ((data->reset_tegra & RESET_TEGRA_MASK) << RESET_TEGRA_OFFSET) |
+ ((data->controller_type & CONTROLLER_TYPE_MASK) <<
+ CONTROLLER_TYPE_OFFSET) |
+ ((data->i2c_controller_id & I2C_CONTROLLER_ID_MASK) <<
+ I2C_CONTROLLER_ID_OFFSET) |
+ ((data->pinmux & PINMUX_MASK) << PINMUX_OFFSET) |
+ ((data->pmu_16bit_ops & PMU_16BIT_SUPPORT_MASK) <<
+ PMU_16BIT_SUPPORT_OFFSET) | data->pmu_i2c_addr;
+
+ checksum = data->poweroff_reg_addr +
+ data->poweroff_reg_data + (val & 0xFF) +
+ ((val >> 8) & 0xFF) + ((val >> 24) & 0xFF);
+ checksum &= 0xFF;
+ checksum = 0x100 - checksum;
+
+ val |= (checksum << CHECKSUM_OFFSET);
+ writel(val, pMem + 4);
+
+labelSkipPowerOff:
+ /* set platform data for device before register */
+ tegra_tsensor_device.dev.platform_data = &tsensor_data;
+ platform_device_register(&tegra_tsensor_device);
+
+labelEnd:
+ return;
+}
+
+#else
+void __init tegra3_tsensor_init(void) { }
+#endif
+
diff --git a/arch/arm/mach-tegra/tegra_fiq_debugger.c b/arch/arm/mach-tegra/tegra_fiq_debugger.c
new file mode 100644
index 000000000000..2a19a214acb5
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra_fiq_debugger.c
@@ -0,0 +1,206 @@
+/*
+ * arch/arm/mach-tegra/fiq_debugger.c
+ *
+ * Serial Debugger Interface for Tegra
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <asm/fiq_debugger.h>
+#include <mach/tegra_fiq_debugger.h>
+#include <mach/system.h>
+#include <mach/fiq.h>
+
+#include <linux/uaccess.h>
+
+struct tegra_fiq_debugger {
+ struct fiq_debugger_pdata pdata;
+ void __iomem *debug_port_base;
+ bool break_seen;
+};
+
+static inline void tegra_write(struct tegra_fiq_debugger *t,
+ unsigned int val, unsigned int off)
+{
+ __raw_writeb(val, t->debug_port_base + off * 4);
+}
+
+static inline unsigned int tegra_read(struct tegra_fiq_debugger *t,
+ unsigned int off)
+{
+ return __raw_readb(t->debug_port_base + off * 4);
+}
+
+static inline unsigned int tegra_read_lsr(struct tegra_fiq_debugger *t)
+{
+ unsigned int lsr;
+
+ lsr = tegra_read(t, UART_LSR);
+ if (lsr & UART_LSR_BI)
+ t->break_seen = true;
+
+ return lsr;
+}
+
+static int debug_port_init(struct platform_device *pdev)
+{
+ struct tegra_fiq_debugger *t;
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ if (tegra_read(t, UART_LSR) & UART_LSR_DR)
+ (void)tegra_read(t, UART_RX);
+ /* enable rx and lsr interrupt */
+ tegra_write(t, UART_IER_RLSI | UART_IER_RDI, UART_IER);
+ /* interrupt on every character */
+ tegra_write(t, 0, UART_IIR);
+
+ return 0;
+}
+
+static int debug_getc(struct platform_device *pdev)
+{
+ unsigned int lsr;
+ struct tegra_fiq_debugger *t;
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ lsr = tegra_read_lsr(t);
+
+ if (lsr & UART_LSR_BI || t->break_seen) {
+ t->break_seen = false;
+ return FIQ_DEBUGGER_BREAK;
+ }
+
+ if (lsr & UART_LSR_DR)
+ return tegra_read(t, UART_RX);
+
+ return FIQ_DEBUGGER_NO_CHAR;
+}
+
+static void debug_putc(struct platform_device *pdev, unsigned int c)
+{
+ struct tegra_fiq_debugger *t;
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ while (!(tegra_read_lsr(t) & UART_LSR_THRE))
+ cpu_relax();
+
+ tegra_write(t, c, UART_TX);
+}
+
+static void debug_flush(struct platform_device *pdev)
+{
+ struct tegra_fiq_debugger *t;
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ while (!(tegra_read_lsr(t) & UART_LSR_TEMT))
+ cpu_relax();
+}
+
+static void fiq_enable(struct platform_device *pdev, unsigned int irq, bool on)
+{
+ if (on)
+ tegra_fiq_enable(irq);
+ else
+ tegra_fiq_disable(irq);
+}
+
+static int tegra_fiq_debugger_id;
+
+void tegra_serial_debug_init(unsigned int base, int irq,
+ struct clk *clk, int signal_irq, int wakeup_irq)
+{
+ struct tegra_fiq_debugger *t;
+ struct platform_device *pdev;
+ struct resource *res;
+ int res_count;
+
+ t = kzalloc(sizeof(struct tegra_fiq_debugger), GFP_KERNEL);
+ if (!t) {
+ pr_err("Failed to allocate for fiq debugger\n");
+ return;
+ }
+
+ t->pdata.uart_init = debug_port_init;
+ t->pdata.uart_getc = debug_getc;
+ t->pdata.uart_putc = debug_putc;
+ t->pdata.uart_flush = debug_flush;
+ t->pdata.fiq_enable = fiq_enable;
+
+ t->debug_port_base = ioremap(base, PAGE_SIZE);
+ if (!t->debug_port_base) {
+ pr_err("Failed to ioremap for fiq debugger\n");
+ goto out1;
+ }
+
+ res = kzalloc(sizeof(struct resource) * 3, GFP_KERNEL);
+ if (!res) {
+ pr_err("Failed to alloc fiq debugger resources\n");
+ goto out2;
+ }
+
+ pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ if (!pdev) {
+ pr_err("Failed to alloc fiq debugger platform device\n");
+ goto out3;
+ };
+
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].start = irq;
+ res[0].end = irq;
+ res[0].name = "fiq";
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = signal_irq;
+ res[1].end = signal_irq;
+ res[1].name = "signal";
+ res_count = 2;
+
+ if (wakeup_irq >= 0) {
+ res[2].flags = IORESOURCE_IRQ;
+ res[2].start = wakeup_irq;
+ res[2].end = wakeup_irq;
+ res[2].name = "wakeup";
+ res_count++;
+ }
+
+ pdev->name = "fiq_debugger";
+ pdev->id = tegra_fiq_debugger_id++;
+ pdev->dev.platform_data = &t->pdata;
+ pdev->resource = res;
+ pdev->num_resources = res_count;
+
+ if (platform_device_register(pdev)) {
+ pr_err("Failed to register fiq debugger\n");
+ goto out4;
+ }
+
+ return;
+
+out4:
+ kfree(pdev);
+out3:
+ kfree(res);
+out2:
+ iounmap(t->debug_port_base);
+out1:
+ kfree(t);
+}
diff --git a/arch/arm/mach-tegra/tegra_i2s_audio.c b/arch/arm/mach-tegra/tegra_i2s_audio.c
new file mode 100644
index 000000000000..fc790066694a
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra_i2s_audio.c
@@ -0,0 +1,1965 @@
+/*
+ * arch/arm/mach-tegra/tegra_i2s_audio.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* TODO:
+ -- replace make I2S_MAX_NUM_BUFS configurable through an ioctl
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/sysfs.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/tegra_audio.h>
+#include <linux/pm.h>
+#include <linux/workqueue.h>
+
+#include <mach/dma.h>
+#include <mach/iomap.h>
+#include <mach/i2s.h>
+#include <mach/audio.h>
+#include <mach/irqs.h>
+
+#include "clock.h"
+
+#define PCM_BUFFER_MAX_SIZE_ORDER PAGE_SHIFT
+
+#define TEGRA_AUDIO_DSP_NONE 0
+#define TEGRA_AUDIO_DSP_PCM 1
+#define TEGRA_AUDIO_DSP_NETWORK 2
+#define TEGRA_AUDIO_DSP_TDM 3
+
+#define I2S_MAX_NUM_BUFS 4
+#define I2S_DEFAULT_TX_NUM_BUFS 2
+#define I2S_DEFAULT_RX_NUM_BUFS 2
+
+/* per stream (input/output) */
+struct audio_stream {
+ int opened;
+ struct mutex lock;
+
+ bool active; /* is DMA in progress? */
+ int num_bufs;
+ void *buffer[I2S_MAX_NUM_BUFS];
+ dma_addr_t buf_phy[I2S_MAX_NUM_BUFS];
+ struct completion comp[I2S_MAX_NUM_BUFS];
+ struct tegra_dma_req dma_req[I2S_MAX_NUM_BUFS];
+ int last_queued;
+
+ int i2s_fifo_atn_level;
+
+ struct tegra_dma_channel *dma_chan;
+ bool stop;
+ struct completion stop_completion;
+ spinlock_t dma_req_lock;
+
+ struct work_struct allow_suspend_work;
+ struct wake_lock wake_lock;
+ char wake_lock_name[100];
+};
+
+/* per i2s controller */
+struct audio_driver_state {
+ struct list_head next;
+
+ struct platform_device *pdev;
+ struct tegra_audio_platform_data *pdata;
+ phys_addr_t i2s_phys;
+ unsigned long i2s_base;
+
+ unsigned long dma_req_sel;
+
+ int irq;
+ struct tegra_audio_in_config in_config;
+
+ struct miscdevice misc_out;
+ struct miscdevice misc_out_ctl;
+ struct audio_stream out;
+
+ struct miscdevice misc_in;
+ struct miscdevice misc_in_ctl;
+ struct audio_stream in;
+
+ /* Control for whole I2S (Data format, etc.) */
+ struct miscdevice misc_ctl;
+ unsigned int bit_format;
+};
+
+static inline bool pending_buffer_requests(struct audio_stream *stream)
+{
+ int i;
+ for (i = 0; i < stream->num_bufs; i++)
+ if (!completion_done(&stream->comp[i]))
+ return true;
+ return false;
+}
+
+static inline int buf_size(struct audio_stream *s __attribute__((unused)))
+{
+ return 1 << PCM_BUFFER_MAX_SIZE_ORDER;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out(struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state, misc_out);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_out_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_in(struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state, misc_in);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_in_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_in_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_out(
+ struct audio_stream *aos)
+{
+ return container_of(aos, struct audio_driver_state, out);
+}
+
+static inline struct audio_driver_state *ads_from_in(
+ struct audio_stream *ais)
+{
+ return container_of(ais, struct audio_driver_state, in);
+}
+
+static inline void prevent_suspend(struct audio_stream *as)
+{
+ pr_debug("%s\n", __func__);
+ cancel_work_sync(&as->allow_suspend_work);
+ wake_lock(&as->wake_lock);
+}
+
+static void allow_suspend_worker(struct work_struct *w)
+{
+ struct audio_stream *as = container_of(w,
+ struct audio_stream, allow_suspend_work);
+ pr_debug("%s\n", __func__);
+ wake_unlock(&as->wake_lock);
+}
+
+static inline void allow_suspend(struct audio_stream *as)
+{
+ schedule_work(&as->allow_suspend_work);
+}
+
+#define I2S_I2S_FIFO_TX_BUSY I2S_I2S_STATUS_FIFO1_BSY
+#define I2S_I2S_FIFO_TX_QS I2S_I2S_STATUS_QS_FIFO1
+#define I2S_I2S_FIFO_TX_ERR I2S_I2S_STATUS_FIFO1_ERR
+
+#define I2S_I2S_FIFO_RX_BUSY I2S_I2S_STATUS_FIFO2_BSY
+#define I2S_I2S_FIFO_RX_QS I2S_I2S_STATUS_QS_FIFO2
+#define I2S_I2S_FIFO_RX_ERR I2S_I2S_STATUS_FIFO2_ERR
+
+#define I2S_FIFO_ERR (I2S_I2S_STATUS_FIFO1_ERR | I2S_I2S_STATUS_FIFO2_ERR)
+
+static inline void i2s_writel(unsigned long base, u32 val, u32 reg)
+{
+ writel(val, base + reg);
+}
+
+static inline u32 i2s_readl(unsigned long base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void i2s_fifo_write(unsigned long base, int fifo, u32 data)
+{
+ i2s_writel(base, data, fifo ? I2S_I2S_FIFO2_0 : I2S_I2S_FIFO1_0);
+}
+
+static inline u32 i2s_fifo_read(unsigned long base, int fifo)
+{
+ return i2s_readl(base, fifo ? I2S_I2S_FIFO2_0 : I2S_I2S_FIFO1_0);
+}
+
+static int i2s_set_channel_bit_count(unsigned long base,
+ int sampling, int bitclk)
+{
+ u32 val;
+ int bitcnt = bitclk / (2 * sampling) - 1;
+
+ if (bitcnt < 0 || bitcnt >= 1<<11) {
+ pr_err("%s: bit count %d is out of bounds\n", __func__,
+ bitcnt);
+ return -EINVAL;
+ }
+
+ val = bitcnt;
+ if (bitclk % (2 * sampling)) {
+ pr_info("%s: enabling non-symmetric mode\n", __func__);
+ val |= I2S_I2S_TIMING_NON_SYM_ENABLE;
+ }
+
+ pr_debug("%s: I2S_I2S_TIMING_0 = %08x\n", __func__, val);
+ i2s_writel(base, val, I2S_I2S_TIMING_0);
+ return 0;
+}
+
+static void i2s_set_fifo_mode(unsigned long base, int fifo, int tx)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (fifo == 0) {
+ val &= ~I2S_I2S_CTRL_FIFO1_RX_ENABLE;
+ val |= (!tx) ? I2S_I2S_CTRL_FIFO1_RX_ENABLE : 0;
+ } else {
+ val &= ~I2S_I2S_CTRL_FIFO2_TX_ENABLE;
+ val |= tx ? I2S_I2S_CTRL_FIFO2_TX_ENABLE : 0;
+ }
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+static int i2s_fifo_set_attention_level(unsigned long base,
+ int fifo, unsigned level)
+{
+ u32 val;
+
+ if (level > I2S_FIFO_ATN_LVL_TWELVE_SLOTS) {
+ pr_err("%s: invalid fifo level selector %d\n", __func__,
+ level);
+ return -EINVAL;
+ }
+
+ val = i2s_readl(base, I2S_I2S_FIFO_SCR_0);
+
+ if (!fifo) {
+ val &= ~I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_MASK;
+ val |= level << I2S_FIFO1_ATN_LVL_SHIFT;
+ } else {
+ val &= ~I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_MASK;
+ val |= level << I2S_FIFO2_ATN_LVL_SHIFT;
+ }
+
+ i2s_writel(base, val, I2S_I2S_FIFO_SCR_0);
+ return 0;
+}
+
+static void i2s_fifo_enable(unsigned long base, int fifo, int on)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (!fifo) {
+ val &= ~I2S_I2S_CTRL_FIFO1_ENABLE;
+ val |= on ? I2S_I2S_CTRL_FIFO1_ENABLE : 0;
+ } else {
+ val &= ~I2S_I2S_CTRL_FIFO2_ENABLE;
+ val |= on ? I2S_I2S_CTRL_FIFO2_ENABLE : 0;
+ }
+
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+#if 0
+static bool i2s_is_fifo_enabled(unsigned long base, int fifo)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (!fifo)
+ return !!(val & I2S_I2S_CTRL_FIFO1_ENABLE);
+ return !!(val & I2S_I2S_CTRL_FIFO2_ENABLE);
+}
+#endif
+
+static void i2s_fifo_clear(unsigned long base, int fifo)
+{
+ u32 val = i2s_readl(base, I2S_I2S_FIFO_SCR_0);
+ if (!fifo) {
+ val &= ~I2S_I2S_FIFO_SCR_FIFO1_CLR;
+ val |= I2S_I2S_FIFO_SCR_FIFO1_CLR;
+#if 0
+ /* Per Nvidia, reduces pop on the next run. */
+ if (!(val & I2S_I2S_CTRL_FIFO1_RX_ENABLE)) {
+ int cnt = 16;
+ while (cnt--)
+ i2s_writel(base, 0, I2S_I2S_FIFO1_0);
+ }
+#endif
+ } else {
+ val &= ~I2S_I2S_FIFO_SCR_FIFO2_CLR;
+ val |= I2S_I2S_FIFO_SCR_FIFO2_CLR;
+ }
+
+ i2s_writel(base, val, I2S_I2S_FIFO_SCR_0);
+}
+
+static void i2s_set_master(unsigned long base, int master)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_MASTER_ENABLE;
+ val |= master ? I2S_I2S_CTRL_MASTER_ENABLE : 0;
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+static int i2s_set_dsp_mode(unsigned long base, unsigned int mode)
+{
+ u32 val;
+ if (mode > TEGRA_AUDIO_DSP_TDM) {
+ pr_err("%s: invalid mode %d.\n", __func__, mode);
+ return -EINVAL;
+ }
+ if (mode == TEGRA_AUDIO_DSP_TDM) {
+ pr_err("TEGRA_AUDIO_DSP_TDM not implemented.\n");
+ return -EINVAL;
+ }
+
+ /* Disable unused modes */
+ if (mode != TEGRA_AUDIO_DSP_PCM) {
+ /* Disable PCM mode */
+ val = i2s_readl(base, I2S_I2S_PCM_CTRL_0);
+ val &= ~(I2S_I2S_PCM_CTRL_TRM_MODE |
+ I2S_I2S_PCM_CTRL_RCV_MODE);
+ i2s_writel(base, val, I2S_I2S_PCM_CTRL_0);
+ }
+ if (mode != TEGRA_AUDIO_DSP_NETWORK) {
+ /* Disable Network mode */
+ val = i2s_readl(base, I2S_I2S_NW_CTRL_0);
+ val &= ~(I2S_I2S_NW_CTRL_TRM_TLPHY_MODE |
+ I2S_I2S_NW_CTRL_RCV_TLPHY_MODE);
+ i2s_writel(base, val, I2S_I2S_NW_CTRL_0);
+ }
+
+ /* Enable the selected mode. */
+ switch (mode) {
+ case TEGRA_AUDIO_DSP_NETWORK:
+ /* Set DSP Network (Telephony) Mode */
+ val = i2s_readl(base, I2S_I2S_NW_CTRL_0);
+ val |= I2S_I2S_NW_CTRL_TRM_TLPHY_MODE |
+ I2S_I2S_NW_CTRL_RCV_TLPHY_MODE;
+ i2s_writel(base, val, I2S_I2S_NW_CTRL_0);
+ break;
+ case TEGRA_AUDIO_DSP_PCM:
+ /* Set DSP PCM Mode */
+ val = i2s_readl(base, I2S_I2S_PCM_CTRL_0);
+ val |= I2S_I2S_PCM_CTRL_TRM_MODE |
+ I2S_I2S_PCM_CTRL_RCV_MODE;
+ i2s_writel(base, val, I2S_I2S_PCM_CTRL_0);
+ break;
+ }
+
+ return 0;
+}
+
+static int i2s_set_bit_format(unsigned long base, unsigned fmt)
+{
+ u32 val;
+
+ if (fmt > I2S_BIT_FORMAT_DSP) {
+ pr_err("%s: invalid bit-format selector %d\n", __func__, fmt);
+ return -EINVAL;
+ }
+
+ val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_BIT_FORMAT_MASK;
+ val |= fmt << I2S_BIT_FORMAT_SHIFT;
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+ /* For DSP format, select DSP PCM mode. */
+ /* PCM mode and Network Mode slot 0 are effectively identical. */
+ if (fmt == I2S_BIT_FORMAT_DSP)
+ i2s_set_dsp_mode(base, TEGRA_AUDIO_DSP_PCM);
+ else
+ i2s_set_dsp_mode(base, TEGRA_AUDIO_DSP_NONE);
+
+ return 0;
+}
+
+static int i2s_set_bit_size(unsigned long base, unsigned bit_size)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_BIT_SIZE_MASK;
+
+ if (bit_size > I2S_BIT_SIZE_32) {
+ pr_err("%s: invalid bit_size selector %d\n", __func__,
+ bit_size);
+ return -EINVAL;
+ }
+
+ val |= bit_size << I2S_BIT_SIZE_SHIFT;
+
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+ return 0;
+}
+
+static int i2s_set_fifo_format(unsigned long base, unsigned fmt)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_FIFO_FORMAT_MASK;
+
+ if (fmt > I2S_FIFO_32 && fmt != I2S_FIFO_PACKED) {
+ pr_err("%s: invalid fmt selector %d\n", __func__, fmt);
+ return -EINVAL;
+ }
+
+ val |= fmt << I2S_FIFO_SHIFT;
+
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+ return 0;
+}
+
+static void i2s_set_left_right_control_polarity(unsigned long base,
+ int high_low)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_L_R_CTRL;
+ val |= high_low ? I2S_I2S_CTRL_L_R_CTRL : 0;
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+#if 0
+static void i2s_set_fifo_irq_on_err(unsigned long base, int fifo, int on)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (!fifo) {
+ val &= ~I2S_I2S_IE_FIFO1_ERR;
+ val |= on ? I2S_I2S_IE_FIFO1_ERR : 0;
+ } else {
+ val &= ~I2S_I2S_IE_FIFO2_ERR;
+ val |= on ? I2S_I2S_IE_FIFO2_ERR : 0;
+ }
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+static void i2s_set_fifo_irq_on_qe(unsigned long base, int fifo, int on)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (!fifo) {
+ val &= ~I2S_I2S_QE_FIFO1;
+ val |= on ? I2S_I2S_QE_FIFO1 : 0;
+ } else {
+ val &= ~I2S_I2S_QE_FIFO2;
+ val |= on ? I2S_I2S_QE_FIFO2 : 0;
+ }
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+#endif
+
+static void i2s_enable_fifos(unsigned long base, int on)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (on)
+ val |= I2S_I2S_QE_FIFO1 | I2S_I2S_QE_FIFO2 |
+ I2S_I2S_IE_FIFO1_ERR | I2S_I2S_IE_FIFO2_ERR;
+ else
+ val &= ~(I2S_I2S_QE_FIFO1 | I2S_I2S_QE_FIFO2 |
+ I2S_I2S_IE_FIFO1_ERR | I2S_I2S_IE_FIFO2_ERR);
+
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+static inline u32 i2s_get_status(unsigned long base)
+{
+ return i2s_readl(base, I2S_I2S_STATUS_0);
+}
+
+static inline u32 i2s_get_control(unsigned long base)
+{
+ return i2s_readl(base, I2S_I2S_CTRL_0);
+}
+
+static inline void i2s_ack_status(unsigned long base)
+{
+ return i2s_writel(base, i2s_readl(base, I2S_I2S_STATUS_0),
+ I2S_I2S_STATUS_0);
+}
+
+static inline u32 i2s_get_fifo_scr(unsigned long base)
+{
+ return i2s_readl(base, I2S_I2S_FIFO_SCR_0);
+}
+
+static inline phys_addr_t i2s_get_fifo_phy_base(unsigned long phy_base,
+ int fifo)
+{
+ return phy_base + (fifo ? I2S_I2S_FIFO2_0 : I2S_I2S_FIFO1_0);
+}
+
+static inline u32 i2s_get_fifo_full_empty_count(unsigned long base, int fifo)
+{
+ u32 val = i2s_readl(base, I2S_I2S_FIFO_SCR_0);
+
+ if (!fifo)
+ val = val >> I2S_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_SHIFT;
+ else
+ val = val >> I2S_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_SHIFT;
+
+ return val & I2S_I2S_FIFO_SCR_FIFO_FULL_EMPTY_COUNT_MASK;
+}
+
+static int i2s_configure(struct platform_device *pdev)
+{
+ struct tegra_audio_platform_data *pdata = pdev->dev.platform_data;
+ struct audio_driver_state *state = pdata->driver_data;
+ bool master;
+ struct clk *i2s_clk;
+ int master_clk;
+
+ /* dev_info(&pdev->dev, "%s\n", __func__); */
+
+ if (!state)
+ return -ENOMEM;
+
+ /* disable interrupts from I2S */
+ i2s_enable_fifos(state->i2s_base, 0);
+ i2s_fifo_clear(state->i2s_base, I2S_FIFO_TX);
+ i2s_fifo_clear(state->i2s_base, I2S_FIFO_RX);
+ i2s_set_left_right_control_polarity(state->i2s_base, 0); /* default */
+
+ i2s_clk = clk_get(&pdev->dev, NULL);
+ if (!i2s_clk) {
+ dev_err(&pdev->dev, "%s: could not get i2s clock\n",
+ __func__);
+ return -EIO;
+ }
+
+ master = state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP ?
+ state->pdata->dsp_master : state->pdata->i2s_master;
+
+
+ master_clk = state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP ?
+ state->pdata->dsp_master_clk :
+ state->pdata->i2s_master_clk;
+#define I2S_CLK_TO_BITCLK_RATIO 2 /* Todo, Bitclk based on 2X clock? */
+ if (master)
+ i2s_set_channel_bit_count(state->i2s_base, master_clk,
+ clk_get_rate(i2s_clk)*I2S_CLK_TO_BITCLK_RATIO);
+ i2s_set_master(state->i2s_base, master);
+
+ i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_TX, 1);
+ i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_RX, 0);
+
+ if (state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP)
+ i2s_set_bit_format(state->i2s_base, I2S_BIT_FORMAT_DSP);
+ else
+ i2s_set_bit_format(state->i2s_base, state->pdata->mode);
+ i2s_set_bit_size(state->i2s_base, state->pdata->bit_size);
+ i2s_set_fifo_format(state->i2s_base, state->pdata->fifo_fmt);
+
+ return 0;
+}
+
+static int init_stream_buffer(struct audio_stream *, int);
+
+static int setup_dma(struct audio_driver_state *, int);
+static void tear_down_dma(struct audio_driver_state *, int);
+static void stop_dma_playback(struct audio_stream *);
+static int start_dma_recording(struct audio_stream *, int);
+static void stop_dma_recording(struct audio_stream *);
+
+struct sound_ops {
+ int (*setup)(struct audio_driver_state *, int);
+ void (*tear_down)(struct audio_driver_state *, int);
+ void (*stop_playback)(struct audio_stream *);
+ int (*start_recording)(struct audio_stream *, int);
+ void (*stop_recording)(struct audio_stream *);
+};
+
+static const struct sound_ops dma_sound_ops = {
+ .setup = setup_dma,
+ .tear_down = tear_down_dma,
+ .stop_playback = stop_dma_playback,
+ .start_recording = start_dma_recording,
+ .stop_recording = stop_dma_recording,
+};
+
+static const struct sound_ops *sound_ops = &dma_sound_ops;
+
+static int start_recording_if_necessary(struct audio_stream *ais, int size)
+{
+ int rc = 0;
+ unsigned long flags;
+ prevent_suspend(ais);
+ spin_lock_irqsave(&ais->dma_req_lock, flags);
+ if (!ais->stop && !pending_buffer_requests(ais)) {
+ /* pr_debug("%s: starting recording\n", __func__); */
+ rc = sound_ops->start_recording(ais, size);
+ if (rc) {
+ pr_err("%s start_recording() failed\n", __func__);
+ allow_suspend(ais);
+ }
+ }
+ spin_unlock_irqrestore(&ais->dma_req_lock, flags);
+ return rc;
+}
+
+static bool stop_playback_if_necessary(struct audio_stream *aos)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+ pr_debug("%s\n", __func__);
+ if (!pending_buffer_requests(aos)) {
+ pr_debug("%s: no more data to play back\n", __func__);
+ sound_ops->stop_playback(aos);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+ allow_suspend(aos);
+ return true;
+ }
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ return false;
+}
+
+/* playback and recording */
+static bool wait_till_stopped(struct audio_stream *as)
+{
+ int rc;
+ pr_debug("%s: wait for completion\n", __func__);
+ rc = wait_for_completion_timeout(
+ &as->stop_completion, HZ);
+ if (!rc)
+ pr_err("%s: wait timed out", __func__);
+ if (rc < 0)
+ pr_err("%s: wait error %d\n", __func__, rc);
+ allow_suspend(as);
+ pr_debug("%s: done: %d\n", __func__, rc);
+ return true;
+}
+
+/* Ask for playback and recording to stop. The _nosync means that
+ * as->lock has to be locked by the caller.
+ */
+static void request_stop_nosync(struct audio_stream *as)
+{
+ int i;
+ pr_debug("%s\n", __func__);
+ if (!as->stop) {
+ as->stop = true;
+ if (pending_buffer_requests(as))
+ wait_till_stopped(as);
+ for (i = 0; i < as->num_bufs; i++) {
+ init_completion(&as->comp[i]);
+ complete(&as->comp[i]);
+ }
+ }
+ if (!tegra_dma_is_empty(as->dma_chan))
+ pr_err("%s: DMA not empty!\n", __func__);
+ /* Stop the DMA then dequeue anything that's in progress. */
+ tegra_dma_cancel(as->dma_chan);
+ as->active = false; /* applies to recording only */
+ pr_debug("%s: done\n", __func__);
+}
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos);
+
+static void setup_dma_rx_request(struct tegra_dma_req *req,
+ struct audio_stream *ais);
+
+static int setup_dma(struct audio_driver_state *ads, int mask)
+{
+ int rc, i;
+ pr_info("%s\n", __func__);
+
+ if (mask & TEGRA_AUDIO_ENABLE_TX) {
+ /* setup audio playback */
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ ads->out.buf_phy[i] = dma_map_single(&ads->pdev->dev,
+ ads->out.buffer[i],
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER,
+ DMA_TO_DEVICE);
+ BUG_ON(!ads->out.buf_phy[i]);
+ setup_dma_tx_request(&ads->out.dma_req[i], &ads->out);
+ ads->out.dma_req[i].source_addr = ads->out.buf_phy[i];
+ }
+ ads->out.dma_chan = tegra_dma_allocate_channel(
+ TEGRA_DMA_MODE_CONTINUOUS_SINGLE,
+ "i2s_tx_req_%d", ads->dma_req_sel);
+ if (!ads->out.dma_chan) {
+ pr_err("%s: error alloc output DMA channel: %ld\n",
+ __func__, PTR_ERR(ads->out.dma_chan));
+ rc = -ENODEV;
+ goto fail_tx;
+ }
+ }
+
+ if (mask & TEGRA_AUDIO_ENABLE_RX) {
+ /* setup audio recording */
+ for (i = 0; i < ads->in.num_bufs; i++) {
+ ads->in.buf_phy[i] = dma_map_single(&ads->pdev->dev,
+ ads->in.buffer[i],
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER,
+ DMA_FROM_DEVICE);
+ BUG_ON(!ads->in.buf_phy[i]);
+ setup_dma_rx_request(&ads->in.dma_req[i], &ads->in);
+ ads->in.dma_req[i].dest_addr = ads->in.buf_phy[i];
+ }
+ ads->in.dma_chan = tegra_dma_allocate_channel(
+ TEGRA_DMA_MODE_CONTINUOUS_SINGLE,
+ "i2s_rx_req_%d", ads->dma_req_sel);
+ if (!ads->in.dma_chan) {
+ pr_err("%s: error allocating input DMA channel: %ld\n",
+ __func__, PTR_ERR(ads->in.dma_chan));
+ rc = -ENODEV;
+ goto fail_rx;
+ }
+ }
+
+ return 0;
+
+fail_rx:
+ if (mask & TEGRA_AUDIO_ENABLE_RX) {
+ for (i = 0; i < ads->in.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->in.buf_phy[i],
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER,
+ DMA_FROM_DEVICE);
+ ads->in.buf_phy[i] = 0;
+ }
+ tegra_dma_free_channel(ads->in.dma_chan);
+ ads->in.dma_chan = 0;
+ }
+fail_tx:
+ if (mask & TEGRA_AUDIO_ENABLE_TX) {
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phy[i],
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER,
+ DMA_TO_DEVICE);
+ ads->out.buf_phy[i] = 0;
+ }
+ tegra_dma_free_channel(ads->out.dma_chan);
+ ads->out.dma_chan = 0;
+ }
+
+ return rc;
+}
+
+static void tear_down_dma(struct audio_driver_state *ads, int mask)
+{
+ int i;
+ pr_info("%s\n", __func__);
+
+ if (mask & TEGRA_AUDIO_ENABLE_TX) {
+ tegra_dma_free_channel(ads->out.dma_chan);
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phy[i],
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ ads->out.buf_phy[i] = 0;
+ }
+ }
+ ads->out.dma_chan = NULL;
+
+ if (mask & TEGRA_AUDIO_ENABLE_RX) {
+ tegra_dma_free_channel(ads->in.dma_chan);
+ for (i = 0; i < ads->in.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->in.buf_phy[i],
+ buf_size(&ads->in),
+ DMA_FROM_DEVICE);
+ ads->in.buf_phy[i] = 0;
+ }
+ }
+ ads->in.dma_chan = NULL;
+}
+
+static void dma_tx_complete_callback(struct tegra_dma_req *req)
+{
+ unsigned long flags;
+ struct audio_stream *aos = req->dev;
+ unsigned req_num;
+
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+
+ req_num = req - aos->dma_req;
+ pr_debug("%s: completed buffer %d size %d\n", __func__,
+ req_num, req->bytes_transferred);
+ BUG_ON(req_num >= aos->num_bufs);
+
+ complete(&aos->comp[req_num]);
+
+ if (!pending_buffer_requests(aos)) {
+ pr_debug("%s: Playback underflow\n", __func__);
+ complete(&aos->stop_completion);
+ }
+
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+}
+
+static void dma_rx_complete_callback(struct tegra_dma_req *req)
+{
+ unsigned long flags;
+ struct audio_stream *ais = req->dev;
+ unsigned req_num;
+
+ spin_lock_irqsave(&ais->dma_req_lock, flags);
+
+ req_num = req - ais->dma_req;
+ pr_debug("%s: completed buffer %d size %d\n", __func__,
+ req_num, req->bytes_transferred);
+ BUG_ON(req_num >= ais->num_bufs);
+
+ complete(&ais->comp[req_num]);
+
+ if (!pending_buffer_requests(ais))
+ pr_debug("%s: Capture overflow\n", __func__);
+
+ spin_unlock_irqrestore(&ais->dma_req_lock, flags);
+}
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos)
+{
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ memset(req, 0, sizeof(*req));
+
+ req->complete = dma_tx_complete_callback;
+ req->dev = aos;
+ req->to_memory = false;
+ req->dest_addr = i2s_get_fifo_phy_base(ads->i2s_phys, I2S_FIFO_TX);
+ req->dest_wrap = 4;
+ if (ads->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP)
+ req->dest_bus_width = ads->pdata->dsp_bus_width;
+ else
+ req->dest_bus_width = ads->pdata->i2s_bus_width;
+ req->source_bus_width = 32;
+ req->source_wrap = 0;
+ req->req_sel = ads->dma_req_sel;
+}
+
+static void setup_dma_rx_request(struct tegra_dma_req *req,
+ struct audio_stream *ais)
+{
+ struct audio_driver_state *ads = ads_from_in(ais);
+
+ memset(req, 0, sizeof(*req));
+
+ req->complete = dma_rx_complete_callback;
+ req->dev = ais;
+ req->to_memory = true;
+ req->source_addr = i2s_get_fifo_phy_base(ads->i2s_phys, I2S_FIFO_RX);
+ req->source_wrap = 4;
+ if (ads->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP)
+ req->source_bus_width = ads->pdata->dsp_bus_width;
+ else
+ req->source_bus_width = ads->pdata->i2s_bus_width;
+ req->dest_bus_width = 32;
+ req->dest_wrap = 0;
+ req->req_sel = ads->dma_req_sel;
+}
+
+static int start_playback(struct audio_stream *aos,
+ struct tegra_dma_req *req)
+{
+ int rc;
+ unsigned long flags;
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ pr_debug("%s: (writing %d)\n",
+ __func__, req->size);
+
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+#if 0
+ i2s_fifo_clear(ads->i2s_base, I2S_FIFO_TX);
+#endif
+ i2s_fifo_set_attention_level(ads->i2s_base,
+ I2S_FIFO_TX, aos->i2s_fifo_atn_level);
+
+ i2s_fifo_enable(ads->i2s_base, I2S_FIFO_TX, 1);
+
+ rc = tegra_dma_enqueue_req(aos->dma_chan, req);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ if (rc)
+ pr_err("%s: could not enqueue TX DMA req\n", __func__);
+ return rc;
+}
+
+/* Called with aos->dma_req_lock taken. */
+static void stop_dma_playback(struct audio_stream *aos)
+{
+ int spin = 0;
+ struct audio_driver_state *ads = ads_from_out(aos);
+ pr_debug("%s\n", __func__);
+ i2s_fifo_enable(ads->i2s_base, I2S_FIFO_TX, 0);
+ while ((i2s_get_status(ads->i2s_base) & I2S_I2S_FIFO_TX_BUSY) &&
+ spin < 100) {
+ udelay(10);
+ if (spin++ > 50)
+ pr_info("%s: spin %d\n", __func__, spin);
+ }
+ if (spin == 100)
+ pr_warn("%s: spinny\n", __func__);
+}
+
+/* This function may be called from either interrupt or process context. */
+/* Called with ais->dma_req_lock taken. */
+static int start_dma_recording(struct audio_stream *ais, int size)
+{
+ int i;
+ struct audio_driver_state *ads = ads_from_in(ais);
+
+ pr_debug("%s\n", __func__);
+
+ BUG_ON(pending_buffer_requests(ais));
+
+ for (i = 0; i < ais->num_bufs; i++) {
+ init_completion(&ais->comp[i]);
+ ais->dma_req[i].dest_addr = ais->buf_phy[i];
+ ais->dma_req[i].size = size;
+ tegra_dma_enqueue_req(ais->dma_chan, &ais->dma_req[i]);
+ }
+
+ ais->last_queued = ais->num_bufs - 1;
+
+#if 0
+ i2s_fifo_clear(ads->i2s_base, I2S_FIFO_RX);
+#endif
+ i2s_fifo_set_attention_level(ads->i2s_base,
+ I2S_FIFO_RX, ais->i2s_fifo_atn_level);
+ i2s_fifo_enable(ads->i2s_base, I2S_FIFO_RX, 1);
+ return 0;
+}
+
+static void stop_dma_recording(struct audio_stream *ais)
+{
+ int spin = 0;
+ struct audio_driver_state *ads = ads_from_in(ais);
+ pr_debug("%s\n", __func__);
+ tegra_dma_cancel(ais->dma_chan);
+ i2s_fifo_enable(ads->i2s_base, I2S_FIFO_RX, 0);
+ i2s_fifo_clear(ads->i2s_base, I2S_FIFO_RX);
+ while ((i2s_get_status(ads->i2s_base) & I2S_I2S_FIFO_RX_BUSY) &&
+ spin < 100) {
+ udelay(10);
+ if (spin++ > 50)
+ pr_info("%s: spin %d\n", __func__, spin);
+ }
+ if (spin == 100)
+ pr_warn("%s: spinny\n", __func__);
+}
+
+static irqreturn_t i2s_interrupt(int irq, void *data)
+{
+ struct audio_driver_state *ads = data;
+ u32 status = i2s_get_status(ads->i2s_base);
+
+ pr_debug("%s: %08x\n", __func__, status);
+
+ if (status & I2S_FIFO_ERR)
+ i2s_ack_status(ads->i2s_base);
+
+ pr_debug("%s: done %08x\n", __func__, i2s_get_status(ads->i2s_base));
+ return IRQ_HANDLED;
+}
+
+static ssize_t tegra_audio_write(struct file *file,
+ const char __user *buf, size_t size, loff_t *off)
+{
+ ssize_t rc = 0;
+ int out_buf;
+ struct tegra_dma_req *req;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ mutex_lock(&ads->out.lock);
+
+ if (!IS_ALIGNED(size, 4) || size < 4 || size > buf_size(&ads->out)) {
+ pr_err("%s: invalid user size %d\n", __func__, size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%s: write %d bytes\n", __func__, size);
+
+ if (ads->out.stop) {
+ pr_debug("%s: playback has been cancelled\n", __func__);
+ goto done;
+ }
+
+ /* Decide which buf is next. */
+ out_buf = (ads->out.last_queued + 1) % ads->out.num_bufs;
+ req = &ads->out.dma_req[out_buf];
+
+ /* Wait for the buffer to be emptied (complete). The maximum timeout
+ * value could be calculated dynamically based on buf_size(&ads->out).
+ * For a buffer size of 16k, at 44.1kHz/stereo/16-bit PCM, you would
+ * have ~93ms.
+ */
+ pr_debug("%s: waiting for buffer %d\n", __func__, out_buf);
+ rc = wait_for_completion_interruptible_timeout(
+ &ads->out.comp[out_buf], HZ);
+ if (!rc) {
+ pr_err("%s: timeout", __func__);
+ rc = -ETIMEDOUT;
+ goto done;
+ } else if (rc < 0) {
+ pr_err("%s: wait error %d", __func__, rc);
+ goto done;
+ }
+
+ /* Fill the buffer and enqueue it. */
+ pr_debug("%s: acquired buffer %d, copying data\n", __func__, out_buf);
+ rc = copy_from_user(ads->out.buffer[out_buf], buf, size);
+ if (rc) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ prevent_suspend(&ads->out);
+
+ req->size = size;
+ dma_sync_single_for_device(NULL,
+ req->source_addr, req->size, DMA_TO_DEVICE);
+ ads->out.last_queued = out_buf;
+ init_completion(&ads->out.stop_completion);
+
+ rc = start_playback(&ads->out, req);
+ if (!rc)
+ rc = size;
+ else
+ allow_suspend(&ads->out);
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static long tegra_audio_out_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_out_ctl(file);
+ struct audio_stream *aos = &ads->out;
+
+ mutex_lock(&aos->lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_OUT_FLUSH:
+ if (pending_buffer_requests(aos)) {
+ pr_debug("%s: flushing\n", __func__);
+ request_stop_nosync(aos);
+ pr_debug("%s: flushed\n", __func__);
+ }
+ if (stop_playback_if_necessary(aos))
+ pr_debug("%s: done (stopped)\n", __func__);
+ aos->stop = false;
+ break;
+ case TEGRA_AUDIO_OUT_SET_NUM_BUFS: {
+ unsigned int num;
+ if (copy_from_user(&num, (const void __user *)arg,
+ sizeof(num))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (!num || num > I2S_MAX_NUM_BUFS) {
+ pr_err("%s: invalid buffer count %d\n", __func__, num);
+ rc = -EINVAL;
+ break;
+ }
+ if (pending_buffer_requests(aos)) {
+ pr_err("%s: playback in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ rc = init_stream_buffer(aos, num);
+ if (rc < 0)
+ break;
+ aos->num_bufs = num;
+ sound_ops->tear_down(ads, TEGRA_AUDIO_ENABLE_TX);
+ sound_ops->setup(ads, TEGRA_AUDIO_ENABLE_TX);
+ pr_debug("%s: num buf set to %d\n", __func__, num);
+ }
+ break;
+ case TEGRA_AUDIO_OUT_GET_NUM_BUFS:
+ if (copy_to_user((void __user *)arg,
+ &aos->num_bufs, sizeof(aos->num_bufs)))
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&aos->lock);
+ return rc;
+}
+
+static long tegra_audio_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_ctl(file);
+ unsigned int mode;
+ bool dma_restart = false;
+
+ mutex_lock(&ads->out.lock);
+ mutex_lock(&ads->in.lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_SET_BIT_FORMAT:
+ if (copy_from_user(&mode, (const void __user *)arg,
+ sizeof(mode))) {
+ rc = -EFAULT;
+ goto done;
+ }
+ dma_restart = (mode != ads->bit_format);
+ switch (mode) {
+ case TEGRA_AUDIO_BIT_FORMAT_DEFAULT:
+ i2s_set_bit_format(ads->i2s_base, ads->pdata->mode);
+ ads->bit_format = mode;
+ break;
+ case TEGRA_AUDIO_BIT_FORMAT_DSP:
+ i2s_set_bit_format(ads->i2s_base, I2S_BIT_FORMAT_DSP);
+ ads->bit_format = mode;
+ break;
+ default:
+ pr_err("%s: Invald PCM mode %d", __func__, mode);
+ rc = -EINVAL;
+ goto done;
+ }
+ break;
+ case TEGRA_AUDIO_GET_BIT_FORMAT:
+ if (copy_to_user((void __user *)arg, &ads->bit_format,
+ sizeof(mode)))
+ rc = -EFAULT;
+ goto done;
+ }
+
+ if (dma_restart) {
+ pr_debug("%s: Restarting DMA due to configuration change.\n",
+ __func__);
+ if (pending_buffer_requests(&ads->out) || ads->in.active) {
+ pr_err("%s: dma busy, cannot restart.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ sound_ops->tear_down(ads, ads->pdata->mask);
+ i2s_configure(ads->pdev);
+ sound_ops->setup(ads, ads->pdata->mask);
+ }
+
+done:
+ mutex_unlock(&ads->in.lock);
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static long tegra_audio_in_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_in_ctl(file);
+ struct audio_stream *ais = &ads->in;
+
+ mutex_lock(&ais->lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_IN_START:
+ pr_debug("%s: start recording\n", __func__);
+ ais->stop = false;
+ break;
+ case TEGRA_AUDIO_IN_STOP:
+ pr_debug("%s: stop recording\n", __func__);
+ if (ais->active) {
+ /* Clean up DMA/I2S, and complete the completion */
+ sound_ops->stop_recording(ais);
+ complete(&ais->stop_completion);
+ /* Set stop flag and allow suspend. */
+ request_stop_nosync(ais);
+ }
+ break;
+ case TEGRA_AUDIO_IN_SET_CONFIG: {
+ struct tegra_audio_in_config cfg;
+
+ if (ais->active) {
+ pr_err("%s: recording in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ if (copy_from_user(&cfg, (const void __user *)arg,
+ sizeof(cfg))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (cfg.stereo && !ads->pdata->stereo_capture) {
+ pr_err("%s: not capable of stereo capture.",
+ __func__);
+ rc = -EINVAL;
+ }
+ if (!rc) {
+ pr_info("%s: setting input sampling rate to %d, %s\n",
+ __func__, cfg.rate,
+ cfg.stereo ? "stereo" : "mono");
+ ads->in_config = cfg;
+ ads->in_config.stereo = !!ads->in_config.stereo;
+ }
+ }
+ break;
+ case TEGRA_AUDIO_IN_GET_CONFIG:
+ if (copy_to_user((void __user *)arg, &ads->in_config,
+ sizeof(ads->in_config)))
+ rc = -EFAULT;
+ break;
+ case TEGRA_AUDIO_IN_SET_NUM_BUFS: {
+ unsigned int num;
+ if (copy_from_user(&num, (const void __user *)arg,
+ sizeof(num))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (!num || num > I2S_MAX_NUM_BUFS) {
+ pr_err("%s: invalid buffer count %d\n", __func__,
+ num);
+ rc = -EINVAL;
+ break;
+ }
+ if (ais->active || pending_buffer_requests(ais)) {
+ pr_err("%s: recording in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ rc = init_stream_buffer(ais, num);
+ if (rc < 0)
+ break;
+ ais->num_bufs = num;
+ sound_ops->tear_down(ads, TEGRA_AUDIO_ENABLE_RX);
+ sound_ops->setup(ads, TEGRA_AUDIO_ENABLE_RX);
+ }
+ break;
+ case TEGRA_AUDIO_IN_GET_NUM_BUFS:
+ if (copy_to_user((void __user *)arg,
+ &ais->num_bufs, sizeof(ais->num_bufs)))
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&ais->lock);
+ return rc;
+}
+
+static ssize_t tegra_audio_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ ssize_t rc;
+ ssize_t nr = 0;
+ int in_buf;
+ struct tegra_dma_req *req;
+ struct audio_driver_state *ads = ads_from_misc_in(file);
+
+ mutex_lock(&ads->in.lock);
+
+ if (!IS_ALIGNED(size, 4) || size < 4 || size > buf_size(&ads->in)) {
+ pr_err("%s: invalid size %d.\n", __func__, size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%s: size %d\n", __func__, size);
+
+ /* If we want recording to stop immediately after it gets cancelled,
+ * then we do not want to wait for the fifo to get drained.
+ */
+ if (ads->in.stop) {
+ pr_debug("%s: recording has been cancelled\n", __func__);
+ rc = 0;
+ goto done;
+ }
+
+ /* This function calls prevent_suspend() internally */
+ rc = start_recording_if_necessary(&ads->in, size);
+ if (rc < 0 && rc != -EALREADY) {
+ pr_err("%s: could not start recording\n", __func__);
+ goto done;
+ }
+
+ ads->in.active = true;
+
+ /* Note that when tegra_audio_read() is called for the first time (or
+ * when all the buffers are empty), then it queues up all
+ * ads->in.num_bufs buffers, and in_buf is set to zero below.
+ */
+ in_buf = (ads->in.last_queued + 1) % ads->in.num_bufs;
+
+ /* Wait for the buffer to be filled (complete). The maximum timeout
+ * value could be calculated dynamically based on buf_size(&ads->in).
+ * For a buffer size of 16k, at 44.1kHz/stereo/16-bit PCM, you would
+ * have ~93ms.
+ */
+ rc = wait_for_completion_interruptible_timeout(
+ &ads->in.comp[in_buf], HZ);
+ if (!rc) {
+ pr_err("%s: timeout", __func__);
+ rc = -ETIMEDOUT;
+ goto done;
+ } else if (rc < 0) {
+ pr_err("%s: wait error %d", __func__, rc);
+ goto done;
+ }
+
+ req = &ads->in.dma_req[in_buf];
+
+ nr = size > req->size ? req->size : size;
+ req->size = size;
+ dma_sync_single_for_cpu(NULL, ads->in.dma_req[in_buf].dest_addr,
+ ads->in.dma_req[in_buf].size, DMA_FROM_DEVICE);
+ rc = copy_to_user(buf, ads->in.buffer[in_buf], nr);
+ if (rc) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ init_completion(&ads->in.stop_completion);
+
+ ads->in.last_queued = in_buf;
+ rc = tegra_dma_enqueue_req(ads->in.dma_chan, req);
+ /* We've successfully enqueued this request before. */
+ BUG_ON(rc);
+
+ rc = nr;
+ *off += nr;
+done:
+ mutex_unlock(&ads->in.lock);
+ pr_debug("%s: done %d\n", __func__, rc);
+ return rc;
+}
+
+static int tegra_audio_out_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ int i;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+
+ if (ads->out.opened) {
+ rc = -EBUSY;
+ goto done;
+ }
+
+ ads->out.opened = 1;
+ ads->out.stop = false;
+
+ for (i = 0; i < I2S_MAX_NUM_BUFS; i++) {
+ init_completion(&ads->out.comp[i]);
+ /* TX buf rest state is unqueued, complete. */
+ complete(&ads->out.comp[i]);
+ }
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static int tegra_audio_out_release(struct inode *inode, struct file *file)
+{
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+ ads->out.opened = 0;
+ request_stop_nosync(&ads->out);
+ if (stop_playback_if_necessary(&ads->out))
+ pr_debug("%s: done (stopped)\n", __func__);
+ allow_suspend(&ads->out);
+ mutex_unlock(&ads->out.lock);
+ pr_debug("%s: done\n", __func__);
+ return 0;
+}
+
+static int tegra_audio_in_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ int i;
+ struct audio_driver_state *ads = ads_from_misc_in(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->in.lock);
+ if (ads->in.opened) {
+ rc = -EBUSY;
+ goto done;
+ }
+
+ ads->in.opened = 1;
+ ads->in.stop = false;
+
+ for (i = 0; i < I2S_MAX_NUM_BUFS; i++) {
+ init_completion(&ads->in.comp[i]);
+ /* RX buf rest state is unqueued, complete. */
+ complete(&ads->in.comp[i]);
+ }
+
+done:
+ mutex_unlock(&ads->in.lock);
+ return rc;
+}
+
+static int tegra_audio_in_release(struct inode *inode, struct file *file)
+{
+ struct audio_driver_state *ads = ads_from_misc_in(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->in.lock);
+ ads->in.opened = 0;
+ if (ads->in.active) {
+ sound_ops->stop_recording(&ads->in);
+ complete(&ads->in.stop_completion);
+ request_stop_nosync(&ads->in);
+ }
+ allow_suspend(&ads->in);
+ mutex_unlock(&ads->in.lock);
+ pr_debug("%s: done\n", __func__);
+ return 0;
+}
+
+static const struct file_operations tegra_audio_out_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_out_open,
+ .release = tegra_audio_out_release,
+ .write = tegra_audio_write,
+};
+
+static const struct file_operations tegra_audio_in_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_in_open,
+ .read = tegra_audio_read,
+ .release = tegra_audio_in_release,
+};
+
+static int tegra_audio_ctl_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int tegra_audio_ctl_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations tegra_audio_out_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_ctl_open,
+ .release = tegra_audio_ctl_release,
+ .unlocked_ioctl = tegra_audio_out_ioctl,
+};
+
+static const struct file_operations tegra_audio_in_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_ctl_open,
+ .release = tegra_audio_ctl_release,
+ .unlocked_ioctl = tegra_audio_in_ioctl,
+};
+
+static const struct file_operations tegra_audio_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_ctl_open,
+ .release = tegra_audio_ctl_release,
+ .unlocked_ioctl = tegra_audio_ioctl,
+};
+
+static int init_stream_buffer(struct audio_stream *s, int num)
+{
+ int i, j;
+ pr_debug("%s (num %d)\n", __func__, num);
+
+ for (i = 0; i < num; i++) {
+ kfree(s->buffer[i]);
+ s->buffer[i] =
+ kmalloc((1 << PCM_BUFFER_MAX_SIZE_ORDER),
+ GFP_KERNEL | GFP_DMA);
+ if (!s->buffer[i]) {
+ pr_err("%s: could not allocate buffer\n", __func__);
+ for (j = i - 1; j >= 0; j--) {
+ kfree(s->buffer[j]);
+ s->buffer[j] = 0;
+ }
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+
+static int setup_misc_device(struct miscdevice *misc,
+ const struct file_operations *fops,
+ const char *fmt, ...)
+{
+ int rc = 0;
+ va_list args;
+ const int sz = 64;
+
+ va_start(args, fmt);
+
+ memset(misc, 0, sizeof(*misc));
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = kmalloc(sz, GFP_KERNEL);
+ if (!misc->name) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ vsnprintf((char *)misc->name, sz, fmt, args);
+ misc->fops = fops;
+ if (misc_register(misc)) {
+ pr_err("%s: could not register %s\n", __func__, misc->name);
+ kfree(misc->name);
+ rc = -EIO;
+ goto done;
+ }
+
+done:
+ va_end(args);
+ return rc;
+}
+
+static ssize_t dma_toggle_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "dma\n");
+}
+
+static ssize_t dma_toggle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ pr_err("%s: Not implemented.", __func__);
+ return 0;
+}
+
+static DEVICE_ATTR(dma_toggle, 0644, dma_toggle_show, dma_toggle_store);
+
+static ssize_t __attr_fifo_atn_read(char *buf, int atn_lvl)
+{
+ switch (atn_lvl) {
+ case I2S_FIFO_ATN_LVL_ONE_SLOT:
+ strncpy(buf, "1\n", 2);
+ return 2;
+ case I2S_FIFO_ATN_LVL_FOUR_SLOTS:
+ strncpy(buf, "4\n", 2);
+ return 2;
+ case I2S_FIFO_ATN_LVL_EIGHT_SLOTS:
+ strncpy(buf, "8\n", 2);
+ return 2;
+ case I2S_FIFO_ATN_LVL_TWELVE_SLOTS:
+ strncpy(buf, "12\n", 3);
+ return 3;
+ default:
+ BUG_ON(1);
+ return -EIO;
+ }
+}
+
+static ssize_t __attr_fifo_atn_write(struct audio_driver_state *ads,
+ struct audio_stream *as,
+ int *fifo_lvl,
+ const char *buf, size_t size)
+{
+ int lvl;
+
+ if (size > 3) {
+ pr_err("%s: buffer size %d too big\n", __func__, size);
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, "%d", &lvl) != 1) {
+ pr_err("%s: invalid input string [%s]\n", __func__, buf);
+ return -EINVAL;
+ }
+
+ switch (lvl) {
+ case 1:
+ lvl = I2S_FIFO_ATN_LVL_ONE_SLOT;
+ break;
+ case 4:
+ lvl = I2S_FIFO_ATN_LVL_FOUR_SLOTS;
+ break;
+ case 8:
+ lvl = I2S_FIFO_ATN_LVL_EIGHT_SLOTS;
+ break;
+ case 12:
+ lvl = I2S_FIFO_ATN_LVL_TWELVE_SLOTS;
+ break;
+ default:
+ pr_err("%s: invalid attention level %d\n", __func__, lvl);
+ return -EINVAL;
+ }
+
+ *fifo_lvl = lvl;
+ pr_info("%s: fifo level %d\n", __func__, *fifo_lvl);
+
+ return size;
+}
+
+static ssize_t tx_fifo_atn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ return __attr_fifo_atn_read(buf, ads->out.i2s_fifo_atn_level);
+}
+
+static ssize_t tx_fifo_atn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t rc;
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ mutex_lock(&ads->out.lock);
+ if (pending_buffer_requests(&ads->out)) {
+ pr_err("%s: playback in progress.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ rc = __attr_fifo_atn_write(ads, &ads->out,
+ &ads->out.i2s_fifo_atn_level,
+ buf, count);
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static DEVICE_ATTR(tx_fifo_atn, 0644, tx_fifo_atn_show, tx_fifo_atn_store);
+
+static ssize_t rx_fifo_atn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ return __attr_fifo_atn_read(buf, ads->in.i2s_fifo_atn_level);
+}
+
+static ssize_t rx_fifo_atn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t rc;
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ mutex_lock(&ads->in.lock);
+ if (ads->in.active) {
+ pr_err("%s: recording in progress.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ rc = __attr_fifo_atn_write(ads, &ads->in,
+ &ads->in.i2s_fifo_atn_level,
+ buf, count);
+done:
+ mutex_unlock(&ads->in.lock);
+ return rc;
+}
+
+static DEVICE_ATTR(rx_fifo_atn, 0644, rx_fifo_atn_show, rx_fifo_atn_store);
+
+static int tegra_audio_probe(struct platform_device *pdev)
+{
+ int rc, i;
+ struct resource *res;
+ struct clk *i2s_clk, *dap_mclk;
+ struct audio_driver_state *state;
+
+ pr_info("%s\n", __func__);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->pdev = pdev;
+ state->pdata = pdev->dev.platform_data;
+ state->pdata->driver_data = state;
+ BUG_ON(!state->pdata);
+
+ if (!(state->pdata->mask &
+ (TEGRA_AUDIO_ENABLE_TX | TEGRA_AUDIO_ENABLE_RX))) {
+ dev_err(&pdev->dev, "neither tx nor rx is enabled!\n");
+ return -EIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource!\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "memory region already claimed!\n");
+ return -ENOMEM;
+ }
+
+ state->i2s_phys = res->start;
+ state->i2s_base = (unsigned long)ioremap(res->start,
+ res->end - res->start + 1);
+ if (!state->i2s_base) {
+ dev_err(&pdev->dev, "cannot remap iomem!\n");
+ return -EIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no dma resource!\n");
+ return -ENODEV;
+ }
+ state->dma_req_sel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource!\n");
+ return -ENODEV;
+ }
+ state->irq = res->start;
+
+ i2s_clk = clk_get(&pdev->dev, NULL);
+ if (!i2s_clk) {
+ dev_err(&pdev->dev, "%s: could not get i2s clock\n",
+ __func__);
+ return -EIO;
+ }
+
+ clk_set_rate(i2s_clk, state->pdata->i2s_clk_rate);
+ if (clk_enable(i2s_clk)) {
+ dev_err(&pdev->dev, "%s: failed to enable i2s clock\n",
+ __func__);
+ return -EIO;
+ }
+ pr_info("%s: i2s_clk rate %ld\n", __func__, clk_get_rate(i2s_clk));
+
+ dap_mclk = tegra_get_clock_by_name(state->pdata->dap_clk);
+ if (!dap_mclk) {
+ dev_err(&pdev->dev, "%s: could not get DAP clock\n",
+ __func__);
+ return -EIO;
+ }
+ clk_enable(dap_mclk);
+
+ rc = i2s_configure(pdev);
+ if (rc < 0)
+ return rc;
+
+ if ((state->pdata->mask & TEGRA_AUDIO_ENABLE_TX)) {
+ state->out.opened = 0;
+ state->out.active = false;
+ mutex_init(&state->out.lock);
+ init_completion(&state->out.stop_completion);
+ spin_lock_init(&state->out.dma_req_lock);
+ state->out.dma_chan = NULL;
+ state->out.i2s_fifo_atn_level = I2S_FIFO_ATN_LVL_FOUR_SLOTS;
+ state->out.num_bufs = I2S_DEFAULT_TX_NUM_BUFS;
+ for (i = 0; i < I2S_MAX_NUM_BUFS; i++) {
+ init_completion(&state->out.comp[i]);
+ /* TX buf rest state is unqueued, complete. */
+ complete(&state->out.comp[i]);
+ state->out.buffer[i] = 0;
+ state->out.buf_phy[i] = 0;
+ }
+ state->out.last_queued = 0;
+ rc = init_stream_buffer(&state->out, state->out.num_bufs);
+ if (rc < 0)
+ return rc;
+
+ INIT_WORK(&state->out.allow_suspend_work, allow_suspend_worker);
+
+ snprintf(state->out.wake_lock_name,
+ sizeof(state->out.wake_lock_name),
+ "i2s.%d-audio-out", state->pdev->id);
+ wake_lock_init(&state->out.wake_lock, WAKE_LOCK_SUSPEND,
+ state->out.wake_lock_name);
+
+ rc = setup_misc_device(&state->misc_out,
+ &tegra_audio_out_fops,
+ "audio%d_out", state->pdev->id);
+ if (rc < 0)
+ return rc;
+
+ rc = setup_misc_device(&state->misc_out_ctl,
+ &tegra_audio_out_ctl_fops,
+ "audio%d_out_ctl", state->pdev->id);
+ if (rc < 0)
+ return rc;
+ }
+
+ if ((state->pdata->mask & TEGRA_AUDIO_ENABLE_RX)) {
+ state->in.opened = 0;
+ state->in.active = false;
+ mutex_init(&state->in.lock);
+ init_completion(&state->in.stop_completion);
+ spin_lock_init(&state->in.dma_req_lock);
+ state->in.dma_chan = NULL;
+ state->in.i2s_fifo_atn_level = I2S_FIFO_ATN_LVL_FOUR_SLOTS;
+ state->in.num_bufs = I2S_DEFAULT_RX_NUM_BUFS;
+ for (i = 0; i < I2S_MAX_NUM_BUFS; i++) {
+ init_completion(&state->in.comp[i]);
+ /* RX buf rest state is unqueued, complete. */
+ complete(&state->in.comp[i]);
+ state->in.buffer[i] = 0;
+ state->in.buf_phy[i] = 0;
+ }
+ state->in.last_queued = 0;
+ rc = init_stream_buffer(&state->in, state->in.num_bufs);
+ if (rc < 0)
+ return rc;
+
+ INIT_WORK(&state->in.allow_suspend_work, allow_suspend_worker);
+
+ snprintf(state->in.wake_lock_name,
+ sizeof(state->in.wake_lock_name),
+ "i2s.%d-audio-in", state->pdev->id);
+ wake_lock_init(&state->in.wake_lock, WAKE_LOCK_SUSPEND,
+ state->in.wake_lock_name);
+
+ rc = setup_misc_device(&state->misc_in,
+ &tegra_audio_in_fops,
+ "audio%d_in", state->pdev->id);
+ if (rc < 0)
+ return rc;
+
+ rc = setup_misc_device(&state->misc_in_ctl,
+ &tegra_audio_in_ctl_fops,
+ "audio%d_in_ctl", state->pdev->id);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (request_irq(state->irq, i2s_interrupt,
+ IRQF_DISABLED, state->pdev->name, state) < 0) {
+ dev_err(&pdev->dev,
+ "%s: could not register handler for irq %d\n",
+ __func__, state->irq);
+ return -EIO;
+ }
+
+ rc = setup_misc_device(&state->misc_ctl,
+ &tegra_audio_ctl_fops,
+ "audio%d_ctl", state->pdev->id);
+ if (rc < 0)
+ return rc;
+
+ sound_ops->setup(state, state->pdata->mask);
+
+ rc = device_create_file(&pdev->dev, &dev_attr_dma_toggle);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_dma_toggle.attr.name, rc);
+ return rc;
+ }
+
+ rc = device_create_file(&pdev->dev, &dev_attr_tx_fifo_atn);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_tx_fifo_atn.attr.name, rc);
+ return rc;
+ }
+
+ rc = device_create_file(&pdev->dev, &dev_attr_rx_fifo_atn);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_rx_fifo_atn.attr.name, rc);
+ return rc;
+ }
+
+ state->in_config.rate = 11025;
+ state->in_config.stereo = false;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_audio_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ /* dev_info(&pdev->dev, "%s\n", __func__); */
+ return 0;
+}
+
+static int tegra_audio_resume(struct platform_device *pdev)
+{
+ return i2s_configure(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver tegra_audio_driver = {
+ .driver = {
+ .name = "tegra-i2s",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_audio_probe,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = tegra_audio_suspend,
+ .resume = tegra_audio_resume,
+#endif
+};
+
+static int __init tegra_audio_init(void)
+{
+ return platform_driver_register(&tegra_audio_driver);
+}
+
+module_init(tegra_audio_init);
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-tegra/tegra_odm_fuses.c b/arch/arm/mach-tegra/tegra_odm_fuses.c
new file mode 100644
index 000000000000..6f6d22362fd1
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra_odm_fuses.c
@@ -0,0 +1,951 @@
+/*
+ * arch/arm/mach-tegra/tegra_odm_fuses.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * Fuses are one time programmable bits on the chip which are used by
+ * the chip manufacturer and device manufacturers to store chip/device
+ * configurations. The fuse bits are encapsulated in a 32 x 64 array.
+ * If a fuse bit is programmed to 1, it cannot be reverted to 0. Either
+ * another fuse bit has to be used for the same purpose or a new chip
+ * needs to be used.
+ *
+ * Each and every fuse word has its own shadow word which resides adjacent to
+ * a particular fuse word. e.g. Fuse words 0-1 form a fuse-shadow pair.
+ * So in theory we have only 32 fuse words to work with.
+ * The shadow fuse word is a mirror of the actual fuse word at all times
+ * and this is maintained while programming a particular fuse.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/regulator/consumer.h>
+#include <linux/ctype.h>
+#include <linux/wakelock.h>
+#include <linux/clk.h>
+
+#include <mach/tegra_odm_fuses.h>
+#include <mach/iomap.h>
+
+#include "fuse.h"
+
+#define NFUSES 64
+#define STATE_IDLE (0x4 << 16)
+
+/* since fuse burning is irreversible, use this for testing */
+#define ENABLE_FUSE_BURNING 1
+
+/* fuse registers */
+#define FUSE_CTRL 0x000
+#define FUSE_REG_ADDR 0x004
+#define FUSE_REG_READ 0x008
+#define FUSE_REG_WRITE 0x00C
+#define FUSE_TIME_PGM 0x01C
+#define FUSE_PRIV2INTFC 0x020
+#define FUSE_DIS_PGM 0x02C
+#define FUSE_WRITE_ACCESS 0x030
+#define FUSE_PWR_GOOD_SW 0x034
+
+static struct kobject *fuse_kobj;
+
+static ssize_t fuse_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf);
+static ssize_t fuse_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+
+static struct kobj_attribute devkey_attr =
+ __ATTR(device_key, 0440, fuse_show, fuse_store);
+
+static struct kobj_attribute jtagdis_attr =
+ __ATTR(jtag_disable, 0440, fuse_show, fuse_store);
+
+static struct kobj_attribute odm_prod_mode_attr =
+ __ATTR(odm_production_mode, 0440, fuse_show, fuse_store);
+
+static struct kobj_attribute sec_boot_dev_cfg_attr =
+ __ATTR(sec_boot_dev_cfg, 0440, fuse_show, fuse_store);
+
+static struct kobj_attribute sec_boot_dev_sel_attr =
+ __ATTR(sec_boot_dev_sel, 0440, fuse_show, fuse_store);
+
+static struct kobj_attribute sbk_attr =
+ __ATTR(secure_boot_key, 0440, fuse_show, fuse_store);
+
+static struct kobj_attribute sw_rsvd_attr =
+ __ATTR(sw_reserved, 0440, fuse_show, fuse_store);
+
+static struct kobj_attribute ignore_dev_sel_straps_attr =
+ __ATTR(ignore_dev_sel_straps, 0440, fuse_show, fuse_store);
+
+static struct kobj_attribute odm_rsvd_attr =
+ __ATTR(odm_reserved, 0440, fuse_show, fuse_store);
+
+static u32 fuse_pgm_data[NFUSES / 2];
+static u32 fuse_pgm_mask[NFUSES / 2];
+static u32 tmp_fuse_pgm_data[NFUSES / 2];
+
+DEFINE_MUTEX(fuse_lock);
+
+static struct fuse_data fuse_info;
+struct regulator *vdd_fuse;
+struct clk *clk_fuse;
+
+#define FUSE_NAME_LEN 30
+
+struct param_info {
+ u32 *addr;
+ int sz;
+ u32 start_off;
+ int start_bit;
+ int nbits;
+ int data_offset;
+ char sysfs_name[FUSE_NAME_LEN];
+};
+
+static struct param_info fuse_info_tbl[] = {
+ [DEVKEY] = {
+ .addr = &fuse_info.devkey,
+ .sz = sizeof(fuse_info.devkey),
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ .start_off = 0x12,
+ .start_bit = 8,
+#else
+ .start_off = 0x16,
+ .start_bit = 22,
+#endif
+ .nbits = 32,
+ .data_offset = 0,
+ .sysfs_name = "device_key",
+ },
+ [JTAG_DIS] = {
+ .addr = &fuse_info.jtag_dis,
+ .sz = sizeof(fuse_info.jtag_dis),
+ .start_off = 0x0,
+ .start_bit = 24,
+ .nbits = 1,
+ .data_offset = 1,
+ .sysfs_name = "jtag_disable",
+ },
+ [ODM_PROD_MODE] = {
+ .addr = &fuse_info.odm_prod_mode,
+ .sz = sizeof(fuse_info.odm_prod_mode),
+ .start_off = 0x0,
+ .start_bit = 23,
+ .nbits = 1,
+ .data_offset = 2,
+ .sysfs_name = "odm_production_mode",
+ },
+ [SEC_BOOT_DEV_CFG] = {
+ .addr = &fuse_info.bootdev_cfg,
+ .sz = sizeof(fuse_info.bootdev_cfg),
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ .start_off = 0x14,
+ .start_bit = 8,
+#else
+ .start_off = 0x18,
+ .start_bit = 22,
+#endif
+ .nbits = 16,
+ .data_offset = 3,
+ .sysfs_name = "sec_boot_dev_cfg",
+ },
+ [SEC_BOOT_DEV_SEL] = {
+ .addr = &fuse_info.bootdev_sel,
+ .sz = sizeof(fuse_info.bootdev_sel),
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ .start_off = 0x14,
+ .start_bit = 24,
+#else
+ .start_off = 0x1A,
+ .start_bit = 6,
+#endif
+ .nbits = 3,
+ .data_offset = 4,
+ .sysfs_name = "sec_boot_dev_sel",
+ },
+ [SBK] = {
+ .addr = fuse_info.sbk,
+ .sz = sizeof(fuse_info.sbk),
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ .start_off = 0x0A,
+ .start_bit = 8,
+#else
+ .start_off = 0x0E,
+ .start_bit = 22,
+#endif
+ .nbits = 128,
+ .data_offset = 5,
+ .sysfs_name = "secure_boot_key",
+ },
+ [SW_RSVD] = {
+ .addr = &fuse_info.sw_rsvd,
+ .sz = sizeof(fuse_info.sw_rsvd),
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ .start_off = 0x14,
+ .start_bit = 28,
+#else
+ .start_off = 0x1A,
+ .start_bit = 10,
+#endif
+ .nbits = 4,
+ .data_offset = 9,
+ .sysfs_name = "sw_reserved",
+ },
+ [IGNORE_DEV_SEL_STRAPS] = {
+ .addr = &fuse_info.ignore_devsel_straps,
+ .sz = sizeof(fuse_info.ignore_devsel_straps),
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ .start_off = 0x14,
+ .start_bit = 27,
+#else
+ .start_off = 0x1A,
+ .start_bit = 9,
+#endif
+ .nbits = 1,
+ .data_offset = 10,
+ .sysfs_name = "ignore_dev_sel_straps",
+ },
+ [ODM_RSVD] = {
+ .addr = fuse_info.odm_rsvd,
+ .sz = sizeof(fuse_info.odm_rsvd),
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ .start_off = 0x16,
+ .start_bit = 4,
+#else
+ .start_off = 0x1A,
+ .start_bit = 14,
+#endif
+ .nbits = 256,
+ .data_offset = 11,
+ .sysfs_name = "odm_reserved",
+ },
+ [SBK_DEVKEY_STATUS] = {
+ .sz = SBK_DEVKEY_STATUS_SZ,
+ },
+};
+
+static void wait_for_idle(void)
+{
+ u32 reg;
+
+ do {
+ udelay(1);
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ } while ((reg & (0xF << 16)) != STATE_IDLE);
+}
+
+#define FUSE_READ 0x1
+#define FUSE_WRITE 0x2
+#define FUSE_SENSE 0x3
+#define FUSE_CMD_MASK 0x3
+
+static u32 fuse_cmd_read(u32 addr)
+{
+ u32 reg;
+
+ wait_for_idle();
+ tegra_fuse_writel(addr, FUSE_REG_ADDR);
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ reg &= ~FUSE_CMD_MASK;
+ reg |= FUSE_READ;
+ tegra_fuse_writel(reg, FUSE_CTRL);
+ wait_for_idle();
+
+ reg = tegra_fuse_readl(FUSE_REG_READ);
+ return reg;
+}
+
+static void fuse_cmd_write(u32 value, u32 addr)
+{
+ u32 reg;
+
+ wait_for_idle();
+ tegra_fuse_writel(addr, FUSE_REG_ADDR);
+ tegra_fuse_writel(value, FUSE_REG_WRITE);
+
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ reg &= ~FUSE_CMD_MASK;
+ reg |= FUSE_WRITE;
+ tegra_fuse_writel(reg, FUSE_CTRL);
+ wait_for_idle();
+}
+
+static void fuse_cmd_sense(void)
+{
+ u32 reg;
+
+ wait_for_idle();
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ reg &= ~FUSE_CMD_MASK;
+ reg |= FUSE_SENSE;
+ tegra_fuse_writel(reg, FUSE_CTRL);
+ wait_for_idle();
+}
+
+static void fuse_reg_hide(void)
+{
+ u32 reg = tegra_fuse_readl(0x48);
+ reg &= ~(1 << 28);
+ tegra_fuse_writel(reg, 0x48);
+}
+
+static void fuse_reg_unhide(void)
+{
+ u32 reg = tegra_fuse_readl(0x48);
+ reg |= (1 << 28);
+ tegra_fuse_writel(reg, 0x48);
+}
+
+static void get_fuse(enum fuse_io_param io_param, u32 *out)
+{
+ int start_bit = fuse_info_tbl[io_param].start_bit;
+ int nbits = fuse_info_tbl[io_param].nbits;
+ int offset = fuse_info_tbl[io_param].start_off;
+ u32 *dst = fuse_info_tbl[io_param].addr;
+ int dst_bit = 0;
+ int i;
+ u32 val;
+ int loops;
+
+ if (out)
+ dst = out;
+
+ do {
+ val = fuse_cmd_read(offset);
+ loops = min(nbits, 32 - start_bit);
+ for (i = 0; i < loops; i++) {
+ if (val & (BIT(start_bit + i)))
+ *dst |= BIT(dst_bit);
+ else
+ *dst &= ~BIT(dst_bit);
+ dst_bit++;
+ if (dst_bit == 32) {
+ dst++;
+ dst_bit = 0;
+ }
+ }
+ nbits -= loops;
+ offset += 2;
+ start_bit = 0;
+ } while (nbits > 0);
+}
+
+int tegra_fuse_read(enum fuse_io_param io_param, u32 *data, int size)
+{
+ int nbits;
+ u32 sbk[4], devkey = 0;
+
+ if (IS_ERR_OR_NULL(clk_fuse)) {
+ pr_err("fuse read disabled");
+ return -ENODEV;
+ }
+
+ if (!data)
+ return -EINVAL;
+
+ if (size != fuse_info_tbl[io_param].sz) {
+ pr_err("%s: size mismatch(%d), %d vs %d\n", __func__,
+ (int)io_param, size, fuse_info_tbl[io_param].sz);
+ return -EINVAL;
+ }
+
+ mutex_lock(&fuse_lock);
+
+ clk_enable(clk_fuse);
+ fuse_reg_unhide();
+ fuse_cmd_sense();
+
+ if (io_param == SBK_DEVKEY_STATUS) {
+ *data = 0;
+
+ get_fuse(SBK, sbk);
+ get_fuse(DEVKEY, &devkey);
+ nbits = sizeof(sbk) * BITS_PER_BYTE;
+ if (find_first_bit((unsigned long *)sbk, nbits) != nbits)
+ *data = 1;
+ else if (devkey)
+ *data = 1;
+ } else {
+ get_fuse(io_param, data);
+ }
+
+ fuse_reg_hide();
+ clk_disable(clk_fuse);
+ mutex_unlock(&fuse_lock);
+
+ return 0;
+}
+
+static bool fuse_odm_prod_mode(void)
+{
+ u32 odm_prod_mode = 0;
+
+ clk_enable(clk_fuse);
+ get_fuse(ODM_PROD_MODE, &odm_prod_mode);
+ clk_disable(clk_fuse);
+ return (odm_prod_mode ? true : false);
+}
+
+static void set_fuse(enum fuse_io_param io_param, u32 *data)
+{
+ int i, start_bit = fuse_info_tbl[io_param].start_bit;
+ int nbits = fuse_info_tbl[io_param].nbits, loops;
+ int offset = fuse_info_tbl[io_param].start_off >> 1;
+ int src_bit = 0;
+ u32 val;
+
+ do {
+ val = *data;
+ loops = min(nbits, 32 - start_bit);
+ for (i = 0; i < loops; i++) {
+ fuse_pgm_mask[offset] |= BIT(start_bit + i);
+ if (val & BIT(src_bit))
+ fuse_pgm_data[offset] |= BIT(start_bit + i);
+ else
+ fuse_pgm_data[offset] &= ~BIT(start_bit + i);
+ src_bit++;
+ if (src_bit == 32) {
+ data++;
+ val = *data;
+ src_bit = 0;
+ }
+ }
+ nbits -= loops;
+ offset++;
+ start_bit = 0;
+ } while (nbits > 0);
+}
+
+static void populate_fuse_arrs(struct fuse_data *info, u32 flags)
+{
+ u32 *src = (u32 *)info;
+ int i;
+
+ memset(fuse_pgm_data, 0, sizeof(fuse_pgm_data));
+ memset(fuse_pgm_mask, 0, sizeof(fuse_pgm_mask));
+
+ if ((flags & FLAGS_ODMRSVD)) {
+ set_fuse(ODM_RSVD, info->odm_rsvd);
+ flags &= ~FLAGS_ODMRSVD;
+ }
+
+ /* do not burn any more if secure mode is set */
+ if (fuse_odm_prod_mode())
+ goto out;
+
+ for_each_set_bit(i, (unsigned long *)&flags, MAX_PARAMS)
+ set_fuse(i, src + fuse_info_tbl[i].data_offset);
+
+out:
+ pr_debug("ready to program");
+}
+
+static void fuse_power_enable(void)
+{
+#if ENABLE_FUSE_BURNING
+ tegra_fuse_writel(0x1, FUSE_PWR_GOOD_SW);
+ udelay(1);
+#endif
+}
+
+static void fuse_power_disable(void)
+{
+#if ENABLE_FUSE_BURNING
+ tegra_fuse_writel(0, FUSE_PWR_GOOD_SW);
+ udelay(1);
+#endif
+}
+
+static void fuse_program_array(int pgm_cycles)
+{
+ u32 reg, fuse_val[2];
+ u32 *data = tmp_fuse_pgm_data, addr = 0, *mask = fuse_pgm_mask;
+ int i = 0;
+
+ fuse_cmd_sense();
+
+ /* get the first 2 fuse bytes */
+ fuse_val[0] = fuse_cmd_read(0);
+ fuse_val[1] = fuse_cmd_read(1);
+
+ fuse_power_enable();
+
+ /*
+ * The fuse macro is a high density macro. Fuses are
+ * burned using an addressing mechanism, so no need to prepare
+ * the full list, but more write to control registers are needed.
+ * The only bit that can be written at first is bit 0, a special write
+ * protection bit by assumptions all other bits are at 0
+ *
+ * The programming pulse must have a precise width of
+ * [9000, 11000] ns.
+ */
+ if (pgm_cycles > 0) {
+ reg = pgm_cycles;
+ tegra_fuse_writel(reg, FUSE_TIME_PGM);
+ }
+ fuse_val[0] = (0x1 & ~fuse_val[0]);
+ fuse_val[1] = (0x1 & ~fuse_val[1]);
+ fuse_cmd_write(fuse_val[0], 0);
+ fuse_cmd_write(fuse_val[1], 1);
+
+ fuse_power_disable();
+
+ /*
+ * this will allow programming of other fuses
+ * and the reading of the existing fuse values
+ */
+ fuse_cmd_sense();
+
+ /* Clear out all bits that have already been burned or masked out */
+ memcpy(data, fuse_pgm_data, sizeof(fuse_pgm_data));
+
+ for (addr = 0; addr < NFUSES; addr += 2, data++, mask++) {
+ reg = fuse_cmd_read(addr);
+ pr_debug("%d: 0x%x 0x%x 0x%x\n", addr, (u32)(*data),
+ ~reg, (u32)(*mask));
+ *data = (*data & ~reg) & *mask;
+ }
+
+ fuse_power_enable();
+
+ /*
+ * Finally loop on all fuses, program the non zero ones.
+ * Words 0 and 1 are written last and they contain control fuses. We
+ * need to invalidate after writing to a control word (with the exception
+ * of the master enable). This is also the reason we write them last.
+ */
+ for (i = ARRAY_SIZE(fuse_pgm_data) - 1; i >= 0; i--) {
+ if (tmp_fuse_pgm_data[i]) {
+ fuse_cmd_write(tmp_fuse_pgm_data[i], i * 2);
+ fuse_cmd_write(tmp_fuse_pgm_data[i], (i * 2) + 1);
+ }
+
+ if (i < 2) {
+ wait_for_idle();
+ fuse_power_disable();
+ fuse_cmd_sense();
+ fuse_power_enable();
+ }
+ }
+
+ fuse_power_disable();
+}
+
+static int fuse_set(enum fuse_io_param io_param, u32 *param, int size)
+{
+ int i, nwords = size / sizeof(u32);
+ u32 *data;
+
+ if (io_param > MAX_PARAMS)
+ return -EINVAL;
+
+ data = (u32*)kzalloc(size, GFP_KERNEL);
+ if (!data) {
+ pr_err("failed to alloc %d bytes\n", size);
+ return -ENOMEM;
+ }
+
+ get_fuse(io_param, data);
+
+ /* set only new fuse bits */
+ for (i = 0; i < nwords; i++) {
+ param[i] = (~data[i] & param[i]);
+ }
+
+ kfree(data);
+ return 0;
+}
+
+/*
+ * Function pointer to optional board specific function
+ */
+int (*tegra_fuse_regulator_en)(int);
+EXPORT_SYMBOL(tegra_fuse_regulator_en);
+
+#define CAR_OSC_CTRL 0x50
+#define PMC_PLLP_OVERRIDE 0xF8
+#define PMC_OSC_OVERRIDE BIT(0)
+#define PMC_OSC_FREQ_MASK (BIT(2) | BIT(3))
+#define PMC_OSC_FREQ_SHIFT 2
+#define CAR_OSC_FREQ_SHIFT 30
+
+#define FUSE_SENSE_DONE_BIT BIT(30)
+#define START_DATA BIT(0)
+#define SKIP_RAMREPAIR BIT(1)
+#define FUSE_PGM_TIMEOUT_MS 50
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+/* cycles corresponding to 13MHz, 19.2MHz, 12MHz, 26MHz */
+static int fuse_pgm_cycles[] = {130, 192, 120, 260};
+#else
+/* cycles corresponding to 13MHz, 16.8MHz, 19.2MHz, 38.4MHz, 12MHz, 48MHz, 26MHz */
+static int fuse_pgm_cycles[] = {130, 168, 0, 0, 192, 384, 0, 0, 120, 480, 0, 0, 260};
+#endif
+
+int tegra_fuse_program(struct fuse_data *pgm_data, u32 flags)
+{
+ u32 reg;
+ int i = 0;
+ int index;
+ int ret;
+ int delay = FUSE_PGM_TIMEOUT_MS;
+
+ if (!pgm_data || !flags) {
+ pr_err("invalid parameter");
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(clk_fuse) ||
+ (!tegra_fuse_regulator_en && IS_ERR_OR_NULL(vdd_fuse))) {
+ pr_err("fuse write disabled");
+ return -ENODEV;
+ }
+
+ if (fuse_odm_prod_mode() && (flags != FLAGS_ODMRSVD)) {
+ pr_err("reserved odm fuses aren't allowed in secure mode");
+ return -EPERM;
+ }
+
+ if ((flags & FLAGS_ODM_PROD_MODE) &&
+ (flags & (FLAGS_SBK | FLAGS_DEVKEY))) {
+ pr_err("odm production mode and sbk/devkey not allowed");
+ return -EPERM;
+ }
+
+ clk_enable(clk_fuse);
+
+ /* make all the fuse registers visible */
+ fuse_reg_unhide();
+
+ /* check that fuse options write access hasn't been disabled */
+ mutex_lock(&fuse_lock);
+ reg = tegra_fuse_readl(FUSE_DIS_PGM);
+ mutex_unlock(&fuse_lock);
+ if (reg) {
+ pr_err("fuse programming disabled");
+ fuse_reg_hide();
+ clk_disable(clk_fuse);
+ return -EACCES;
+ }
+
+ /* enable software writes to the fuse registers */
+ tegra_fuse_writel(0, FUSE_WRITE_ACCESS);
+
+ mutex_lock(&fuse_lock);
+ memcpy(&fuse_info, pgm_data, sizeof(fuse_info));
+ for_each_set_bit(i, (unsigned long *)&flags, MAX_PARAMS) {
+ fuse_set((u32)i, fuse_info_tbl[i].addr,
+ fuse_info_tbl[i].sz);
+ }
+
+#if ENABLE_FUSE_BURNING
+ if (tegra_fuse_regulator_en)
+ ret = tegra_fuse_regulator_en(1);
+ else
+ ret = regulator_enable(vdd_fuse);
+
+ if (ret)
+ BUG_ON("regulator enable fail\n");
+
+ populate_fuse_arrs(&fuse_info, flags);
+
+ /* calculate the number of program cycles from the oscillator freq */
+ reg = readl(IO_ADDRESS(TEGRA_PMC_BASE) + PMC_PLLP_OVERRIDE);
+ if (reg & PMC_OSC_OVERRIDE) {
+ index = (reg & PMC_OSC_FREQ_MASK) >> PMC_OSC_FREQ_SHIFT;
+ } else {
+ reg = readl(IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CAR_OSC_CTRL);
+ index = reg >> CAR_OSC_FREQ_SHIFT;
+ }
+
+ pr_debug("%s: use %d programming cycles\n", __func__, fuse_pgm_cycles[index]);
+ fuse_program_array(fuse_pgm_cycles[index]);
+
+ memset(&fuse_info, 0, sizeof(fuse_info));
+
+ if (tegra_fuse_regulator_en)
+ tegra_fuse_regulator_en(0);
+ else
+ regulator_disable(vdd_fuse);
+#endif
+
+ mutex_unlock(&fuse_lock);
+
+ /* disable software writes to the fuse registers */
+ tegra_fuse_writel(1, FUSE_WRITE_ACCESS);
+
+ /* make all the fuse registers invisible */
+ fuse_reg_hide();
+
+ /* apply the fuse values immediately instead of resetting the chip */
+ fuse_cmd_sense();
+
+ tegra_fuse_writel(START_DATA | SKIP_RAMREPAIR, FUSE_PRIV2INTFC);
+
+ /* check sense and shift done in addition to IDLE */
+ do {
+ mdelay(1);
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ reg &= (FUSE_SENSE_DONE_BIT | STATE_IDLE);
+ } while ((reg != (FUSE_SENSE_DONE_BIT | STATE_IDLE)) && (--delay > 0));
+
+ clk_disable(clk_fuse);
+
+ return ((delay > 0) ? 0 : -ETIMEDOUT);
+}
+
+static int fuse_name_to_param(const char *str)
+{
+ int i;
+
+ for (i = DEVKEY; i < ARRAY_SIZE(fuse_info_tbl); i++) {
+ if (!strcmp(str, fuse_info_tbl[i].sysfs_name))
+ return i;
+ }
+
+ return -ENODATA;
+}
+
+static int char_to_xdigit(char c)
+{
+ return (c>='0' && c<='9') ? c - '0' :
+ (c>='a' && c<='f') ? c - 'a' + 10 :
+ (c>='A' && c<='F') ? c - 'A' + 10 : -1;
+}
+
+#define CHK_ERR(x) \
+{ \
+ if (x) \
+ { \
+ pr_err("%s: sysfs_create_file fail(%d)!", __func__, x); \
+ return x; \
+ } \
+}
+
+static ssize_t fuse_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ enum fuse_io_param param = fuse_name_to_param(attr->attr.name);
+ int ret, i = 0;
+ int orig_count = count;
+ struct fuse_data data = {0};
+ u32 *raw_data = ((u32 *)&data) + fuse_info_tbl[param].data_offset;
+ u8 *raw_byte_data = (u8 *)raw_data;
+ struct wake_lock fuse_wk_lock;
+
+ if ((param == -1) || (param == -ENODATA)) {
+ pr_err("%s: invalid fuse\n", __func__);
+ return -EINVAL;
+ }
+
+ if (fuse_odm_prod_mode()) {
+ pr_err("%s: device locked. odm fuse already blown\n", __func__);
+ return -EPERM;
+ }
+
+ count--;
+ if (DIV_ROUND_UP(count, 2) > fuse_info_tbl[param].sz) {
+ pr_err("%s: fuse parameter too long, should be %d character(s)\n",
+ __func__, fuse_info_tbl[param].sz * 2);
+ return -EINVAL;
+ }
+
+ /* see if the string has 0x/x at the start */
+ if (*buf == 'x') {
+ count -= 1;
+ buf++;
+ } else if (*(buf + 1) == 'x') {
+ count -= 2;
+ buf += 2;
+ }
+
+ /* wakelock to avoid device powering down while programming */
+ wake_lock_init(&fuse_wk_lock, WAKE_LOCK_SUSPEND, "fuse_wk_lock");
+ wake_lock(&fuse_wk_lock);
+
+ /* we need to fit each character into a single nibble */
+ raw_byte_data += DIV_ROUND_UP(count, 2) - 1;
+
+ /* in case of odd number of writes, write the first one here */
+ if (count & BIT(0)) {
+ *raw_byte_data = char_to_xdigit(*buf);
+ buf++;
+ raw_byte_data--;
+ count--;
+ }
+
+ for (i = 1; i <= count; i++, buf++) {
+ if (i & BIT(0)) {
+ *raw_byte_data = char_to_xdigit(*buf);
+ } else {
+ *raw_byte_data <<= 4;
+ *raw_byte_data |= char_to_xdigit(*buf);
+ raw_byte_data--;
+ }
+ }
+
+ ret = tegra_fuse_program(&data, BIT(param));
+ if (ret) {
+ pr_err("%s: fuse program fail(%d)\n", __func__, ret);
+ orig_count = ret;
+ goto done;
+ }
+
+ /* if odm prodn mode fuse is burnt, change file permissions to 0440 */
+ if (param == ODM_PROD_MODE) {
+ CHK_ERR(sysfs_chmod_file(kobj, &attr->attr, 0440));
+ CHK_ERR(sysfs_chmod_file(kobj, &devkey_attr.attr, 0440));
+ CHK_ERR(sysfs_chmod_file(kobj, &jtagdis_attr.attr, 0440));
+ CHK_ERR(sysfs_chmod_file(kobj, &sec_boot_dev_cfg_attr.attr, 0440));
+ CHK_ERR(sysfs_chmod_file(kobj, &sec_boot_dev_sel_attr.attr, 0440));
+ CHK_ERR(sysfs_chmod_file(kobj, &sbk_attr.attr, 0440));
+ CHK_ERR(sysfs_chmod_file(kobj, &sw_rsvd_attr.attr, 0440));
+ CHK_ERR(sysfs_chmod_file(kobj, &ignore_dev_sel_straps_attr.attr, 0440));
+ CHK_ERR(sysfs_chmod_file(kobj, &odm_rsvd_attr.attr, 0440));
+ }
+
+done:
+ wake_unlock(&fuse_wk_lock);
+ wake_lock_destroy(&fuse_wk_lock);
+ return orig_count;
+}
+
+static ssize_t fuse_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ enum fuse_io_param param = fuse_name_to_param(attr->attr.name);
+ u32 data[8];
+ char str[8];
+ int ret, i;
+
+ if ((param == -1) || (param == -ENODATA)) {
+ pr_err("%s: invalid fuse\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((param == SBK) && fuse_odm_prod_mode()) {
+ pr_err("device locked. sbk read not allowed\n");
+ return 0;
+ }
+
+ memset(data, 0, sizeof(data));
+ ret = tegra_fuse_read(param, data, fuse_info_tbl[param].sz);
+ if (ret) {
+ pr_err("%s: read fail(%d)\n", __func__, ret);
+ return ret;
+ }
+
+ strcpy(buf, "0x");
+ for (i = (fuse_info_tbl[param].sz/sizeof(u32)) - 1; i >= 0 ; i--) {
+ sprintf(str, "%08x", data[i]);
+ strcat(buf, str);
+ }
+
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static int __init tegra_fuse_program_init(void)
+{
+ if (!tegra_fuse_regulator_en) {
+ /* get vdd_fuse regulator */
+ vdd_fuse = regulator_get(NULL, "vdd_fuse");
+ if (IS_ERR_OR_NULL(vdd_fuse))
+ pr_err("%s: no vdd_fuse. fuse write disabled\n", __func__);
+ }
+
+ clk_fuse = clk_get_sys("fuse-tegra", "fuse_burn");
+ if (IS_ERR_OR_NULL(clk_fuse)) {
+ pr_err("%s: no clk_fuse. fuse read/write disabled\n", __func__);
+ if (!IS_ERR_OR_NULL(vdd_fuse)) {
+ regulator_put(vdd_fuse);
+ vdd_fuse = NULL;
+ }
+ return -ENODEV;
+ }
+
+ fuse_kobj = kobject_create_and_add("fuse", firmware_kobj);
+ if (!fuse_kobj) {
+ pr_err("%s: fuse_kobj create fail\n", __func__);
+ regulator_put(vdd_fuse);
+ clk_put(clk_fuse);
+ return -ENODEV;
+ }
+
+ mutex_init(&fuse_lock);
+
+ /* change fuse file permissions, if ODM production fuse is not blown */
+ if (!fuse_odm_prod_mode())
+ {
+ devkey_attr.attr.mode = 0640;
+ jtagdis_attr.attr.mode = 0640;
+ odm_prod_mode_attr.attr.mode = 0640;
+ sec_boot_dev_cfg_attr.attr.mode = 0640;
+ sec_boot_dev_sel_attr.attr.mode = 0640;
+ sbk_attr.attr.mode = 0640;
+ sw_rsvd_attr.attr.mode = 0640;
+ ignore_dev_sel_straps_attr.attr.mode = 0640;
+ odm_rsvd_attr.attr.mode = 0640;
+ odm_prod_mode_attr.attr.mode = 0640;
+ }
+
+ CHK_ERR(sysfs_create_file(fuse_kobj, &odm_prod_mode_attr.attr));
+ CHK_ERR(sysfs_create_file(fuse_kobj, &devkey_attr.attr));
+ CHK_ERR(sysfs_create_file(fuse_kobj, &jtagdis_attr.attr));
+ CHK_ERR(sysfs_create_file(fuse_kobj, &sec_boot_dev_cfg_attr.attr));
+ CHK_ERR(sysfs_create_file(fuse_kobj, &sec_boot_dev_sel_attr.attr));
+ CHK_ERR(sysfs_create_file(fuse_kobj, &sbk_attr.attr));
+ CHK_ERR(sysfs_create_file(fuse_kobj, &sw_rsvd_attr.attr));
+ CHK_ERR(sysfs_create_file(fuse_kobj, &ignore_dev_sel_straps_attr.attr));
+ CHK_ERR(sysfs_create_file(fuse_kobj, &odm_rsvd_attr.attr));
+
+ return 0;
+}
+
+static void __exit tegra_fuse_program_exit(void)
+{
+
+ fuse_power_disable();
+ fuse_reg_hide();
+
+ if (!IS_ERR_OR_NULL(vdd_fuse))
+ regulator_put(vdd_fuse);
+
+ if (!IS_ERR_OR_NULL(clk_fuse))
+ clk_put(clk_fuse);
+
+ sysfs_remove_file(fuse_kobj, &odm_prod_mode_attr.attr);
+ sysfs_remove_file(fuse_kobj, &devkey_attr.attr);
+ sysfs_remove_file(fuse_kobj, &jtagdis_attr.attr);
+ sysfs_remove_file(fuse_kobj, &sec_boot_dev_cfg_attr.attr);
+ sysfs_remove_file(fuse_kobj, &sec_boot_dev_sel_attr.attr);
+ sysfs_remove_file(fuse_kobj, &sbk_attr.attr);
+ sysfs_remove_file(fuse_kobj, &sw_rsvd_attr.attr);
+ sysfs_remove_file(fuse_kobj, &ignore_dev_sel_straps_attr.attr);
+ sysfs_remove_file(fuse_kobj, &odm_rsvd_attr.attr);
+ kobject_del(fuse_kobj);
+}
+
+late_initcall(tegra_fuse_program_init);
+module_exit(tegra_fuse_program_exit);
diff --git a/arch/arm/mach-tegra/tegra_smmu.h b/arch/arm/mach-tegra/tegra_smmu.h
new file mode 100644
index 000000000000..b46ae11c6f2e
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra_smmu.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/mach-tegra/tegra_smmu.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifdef CONFIG_TEGRA_IOVMM_SMMU
+struct tegra_smmu_window {
+ unsigned long start;
+ unsigned long end;
+};
+
+extern struct tegra_smmu_window *tegra_smmu_window(int wnum);
+extern int tegra_smmu_window_count(void);
+#endif
diff --git a/arch/arm/mach-tegra/tegra_spdif_audio.c b/arch/arm/mach-tegra/tegra_spdif_audio.c
new file mode 100644
index 000000000000..3765633606cd
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra_spdif_audio.c
@@ -0,0 +1,1187 @@
+/*
+ * arch/arm/mach-tegra/tegra_spdif_audio.c
+ *
+ * S/PDIF audio driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/sysfs.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/tegra_audio.h>
+#include <linux/pm.h>
+#include <linux/workqueue.h>
+
+#include <mach/dma.h>
+#include <mach/iomap.h>
+#include <mach/spdif.h>
+#include <mach/audio.h>
+#include <mach/irqs.h>
+
+#include "clock.h"
+
+#define PCM_BUFFER_MAX_SIZE_ORDER (PAGE_SHIFT)
+
+#define SPDIF_MAX_NUM_BUFS 4
+/* Todo: Add IOCTL to configure the number of buffers. */
+#define SPDIF_DEFAULT_TX_NUM_BUFS 2
+#define SPDIF_DEFAULT_RX_NUM_BUFS 2
+/* per stream (input/output) */
+struct audio_stream {
+ int opened;
+ struct mutex lock;
+
+ bool active; /* is DMA in progress? */
+ int num_bufs;
+ void *buffer[SPDIF_MAX_NUM_BUFS];
+ dma_addr_t buf_phy[SPDIF_MAX_NUM_BUFS];
+ struct completion comp[SPDIF_MAX_NUM_BUFS];
+ struct tegra_dma_req dma_req[SPDIF_MAX_NUM_BUFS];
+ int last_queued;
+
+ int spdif_fifo_atn_level;
+
+ struct tegra_dma_channel *dma_chan;
+ bool stop;
+ struct completion stop_completion;
+ spinlock_t dma_req_lock;
+
+ struct work_struct allow_suspend_work;
+ struct wake_lock wake_lock;
+ char wake_lock_name[100];
+};
+
+struct audio_driver_state {
+ struct list_head next;
+
+ struct platform_device *pdev;
+ struct tegra_audio_platform_data *pdata;
+ phys_addr_t spdif_phys;
+ unsigned long spdif_base;
+
+ unsigned long dma_req_sel;
+ bool fifo_init;
+
+ int irq;
+
+ struct miscdevice misc_out;
+ struct miscdevice misc_out_ctl;
+ struct audio_stream out;
+};
+
+static inline bool pending_buffer_requests(struct audio_stream *stream)
+{
+ int i;
+ for (i = 0; i < stream->num_bufs; i++)
+ if (!completion_done(&stream->comp[i]))
+ return true;
+ return false;
+}
+
+static inline int buf_size(struct audio_stream *s __attribute__((unused)))
+{
+ return 1 << PCM_BUFFER_MAX_SIZE_ORDER;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out(struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state, misc_out);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_out_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_out(
+ struct audio_stream *aos)
+{
+ return container_of(aos, struct audio_driver_state, out);
+}
+
+static inline void prevent_suspend(struct audio_stream *as)
+{
+ pr_debug("%s\n", __func__);
+ cancel_work_sync(&as->allow_suspend_work);
+ wake_lock(&as->wake_lock);
+}
+
+static void allow_suspend_worker(struct work_struct *w)
+{
+ struct audio_stream *as = container_of(w,
+ struct audio_stream, allow_suspend_work);
+ pr_debug("%s\n", __func__);
+ wake_unlock(&as->wake_lock);
+}
+
+static inline void allow_suspend(struct audio_stream *as)
+{
+ schedule_work(&as->allow_suspend_work);
+}
+
+#define I2S_I2S_FIFO_TX_BUSY I2S_I2S_STATUS_FIFO1_BSY
+#define I2S_I2S_FIFO_TX_QS I2S_I2S_STATUS_QS_FIFO1
+#define I2S_I2S_FIFO_TX_ERR I2S_I2S_STATUS_FIFO1_ERR
+
+#define I2S_I2S_FIFO_RX_BUSY I2S_I2S_STATUS_FIFO2_BSY
+#define I2S_I2S_FIFO_RX_QS I2S_I2S_STATUS_QS_FIFO2
+#define I2S_I2S_FIFO_RX_ERR I2S_I2S_STATUS_FIFO2_ERR
+
+#define I2S_FIFO_ERR (I2S_I2S_STATUS_FIFO1_ERR | I2S_I2S_STATUS_FIFO2_ERR)
+
+
+static inline void spdif_writel(unsigned long base, u32 val, u32 reg)
+{
+ writel(val, base + reg);
+}
+
+static inline u32 spdif_readl(unsigned long base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void spdif_fifo_write(unsigned long base, u32 data)
+{
+ spdif_writel(base, data, SPDIF_DATA_OUT_0);
+}
+
+static int spdif_fifo_set_attention_level(unsigned long base,
+ unsigned level)
+{
+ u32 val;
+
+ if (level > SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS) {
+ pr_err("%s: invalid fifo level selector %d\n", __func__,
+ level);
+ return -EINVAL;
+ }
+
+ val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+
+ val &= ~SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_MASK;
+ val |= level << SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT;
+
+
+ spdif_writel(base, val, SPDIF_DATA_FIFO_CSR_0);
+ return 0;
+}
+
+static void spdif_fifo_enable(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~(SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN | SPDIF_CTRL_0_TU_EN);
+ val |= on ? (SPDIF_CTRL_0_TX_EN) : 0;
+ val |= on ? (SPDIF_CTRL_0_TC_EN) : 0;
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+#if 0
+static bool spdif_is_fifo_enabled(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ return !!(val & SPDIF_CTRL_0_TX_EN);
+}
+#endif
+
+static void spdif_fifo_clear(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+ val &= ~(SPDIF_DATA_FIFO_CSR_0_TX_CLR | SPDIF_DATA_FIFO_CSR_0_TU_CLR);
+ val |= SPDIF_DATA_FIFO_CSR_0_TX_CLR | SPDIF_DATA_FIFO_CSR_0_TU_CLR;
+ spdif_writel(base, val, SPDIF_DATA_FIFO_CSR_0);
+}
+
+
+static int spdif_set_bit_mode(unsigned long base, unsigned mode)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_BIT_MODE_MASK;
+
+ if (mode > SPDIF_BIT_MODE_MODERAW) {
+ pr_err("%s: invalid bit_size selector %d\n", __func__,
+ mode);
+ return -EINVAL;
+ }
+
+ val |= mode << SPDIF_CTRL_0_BIT_MODE_SHIFT;
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+ return 0;
+}
+
+static int spdif_set_fifo_packed(unsigned long base, unsigned on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_PACK;
+ val |= on ? SPDIF_CTRL_0_PACK : 0;
+ spdif_writel(base, val, SPDIF_CTRL_0);
+ return 0;
+}
+
+#if 0
+static void spdif_set_fifo_irq_on_err(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_IE_TXE;
+ val |= on ? SPDIF_CTRL_0_IE_TXE : 0;
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+#endif
+
+
+static void spdif_enable_fifos(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ if (on)
+ val |= SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN |
+ SPDIF_CTRL_0_IE_TXE;
+ else
+ val &= ~(SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN |
+ SPDIF_CTRL_0_IE_TXE);
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+
+static inline u32 spdif_get_status(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_STATUS_0);
+}
+
+static inline u32 spdif_get_control(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_CTRL_0);
+}
+
+static inline void spdif_ack_status(unsigned long base)
+{
+ return spdif_writel(base, spdif_readl(base, SPDIF_STATUS_0),
+ SPDIF_STATUS_0);
+}
+
+static inline u32 spdif_get_fifo_scr(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+}
+
+static inline phys_addr_t spdif_get_fifo_phy_base(unsigned long phy_base)
+{
+ return phy_base + SPDIF_DATA_OUT_0;
+}
+
+static inline u32 spdif_get_fifo_full_empty_count(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+ val = val >> SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT;
+ return val & SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_MASK;
+}
+
+
+static int spdif_set_sample_rate(struct audio_driver_state *state,
+ unsigned int sample_rate)
+{
+ unsigned int clock_freq = 0;
+ struct clk *spdif_clk;
+
+ unsigned int ch_sta[] = {
+ 0x0, /* 44.1, default values */
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ };
+
+ switch (sample_rate) {
+ case 32000:
+ clock_freq = 4096000; /* 4.0960 MHz */
+ ch_sta[0] = 0x3 << 24;
+ ch_sta[1] = 0xC << 4;
+ break;
+ case 44100:
+ clock_freq = 5644800; /* 5.6448 MHz */
+ ch_sta[0] = 0x0;
+ ch_sta[1] = 0xF << 4;
+ break;
+ case 48000:
+ clock_freq = 6144000; /* 6.1440MHz */
+ ch_sta[0] = 0x2 << 24;
+ ch_sta[1] = 0xD << 4;
+ break;
+ case 88200:
+ clock_freq = 11289600; /* 11.2896 MHz */
+ break;
+ case 96000:
+ clock_freq = 12288000; /* 12.288 MHz */
+ break;
+ case 176400:
+ clock_freq = 22579200; /* 22.5792 MHz */
+ break;
+ case 192000:
+ clock_freq = 24576000; /* 24.5760 MHz */
+ break;
+ default:
+ return -1;
+ }
+
+ spdif_clk = clk_get(&state->pdev->dev, NULL);
+ if (!spdif_clk) {
+ dev_err(&state->pdev->dev, "%s: could not get spdif clock\n",
+ __func__);
+ return -EIO;
+ }
+
+ clk_set_rate(spdif_clk, clock_freq);
+ if (clk_enable(spdif_clk)) {
+ dev_err(&state->pdev->dev,
+ "%s: failed to enable spdif_clk clock\n", __func__);
+ return -EIO;
+ }
+ pr_info("%s: spdif_clk rate %ld\n", __func__, clk_get_rate(spdif_clk));
+
+ spdif_writel(state->spdif_base, ch_sta[0], SPDIF_CH_STA_TX_A_0);
+ spdif_writel(state->spdif_base, ch_sta[1], SPDIF_CH_STA_TX_B_0);
+ spdif_writel(state->spdif_base, ch_sta[2], SPDIF_CH_STA_TX_C_0);
+ spdif_writel(state->spdif_base, ch_sta[3], SPDIF_CH_STA_TX_D_0);
+ spdif_writel(state->spdif_base, ch_sta[4], SPDIF_CH_STA_TX_E_0);
+ spdif_writel(state->spdif_base, ch_sta[5], SPDIF_CH_STA_TX_F_0);
+
+ return 0;
+}
+
+static int init_stream_buffer(struct audio_stream *, int);
+
+static int setup_dma(struct audio_driver_state *);
+static void tear_down_dma(struct audio_driver_state *);
+static void stop_dma_playback(struct audio_stream *);
+
+
+struct sound_ops {
+ int (*setup)(struct audio_driver_state *);
+ void (*tear_down)(struct audio_driver_state *);
+ void (*stop_playback)(struct audio_stream *);
+};
+
+static const struct sound_ops dma_sound_ops = {
+ .setup = setup_dma,
+ .tear_down = tear_down_dma,
+ .stop_playback = stop_dma_playback,
+};
+
+static const struct sound_ops *sound_ops = &dma_sound_ops;
+
+
+
+static bool stop_playback_if_necessary(struct audio_stream *aos)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+ pr_debug("%s\n", __func__);
+ if (!pending_buffer_requests(aos)) {
+ pr_debug("%s: no more data to play back\n", __func__);
+ sound_ops->stop_playback(aos);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+ allow_suspend(aos);
+ return true;
+ }
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ return false;
+}
+
+/* playback */
+static bool wait_till_stopped(struct audio_stream *as)
+{
+ int rc;
+ pr_debug("%s: wait for completion\n", __func__);
+ rc = wait_for_completion_timeout(
+ &as->stop_completion, HZ);
+ if (!rc)
+ pr_err("%s: wait timed out", __func__);
+ if (rc < 0)
+ pr_err("%s: wait error %d\n", __func__, rc);
+ allow_suspend(as);
+ pr_debug("%s: done: %d\n", __func__, rc);
+ return true;
+}
+
+/* Ask for playback to stop. The _nosync means that
+ * as->lock has to be locked by the caller.
+ */
+static void request_stop_nosync(struct audio_stream *as)
+{
+ int i;
+ pr_debug("%s\n", __func__);
+ if (!as->stop) {
+ as->stop = true;
+ if (pending_buffer_requests(as))
+ wait_till_stopped(as);
+ for (i = 0; i < as->num_bufs; i++) {
+ init_completion(&as->comp[i]);
+ complete(&as->comp[i]);
+ }
+ }
+ if (!tegra_dma_is_empty(as->dma_chan))
+ pr_err("%s: DMA not empty!\n", __func__);
+ /* Stop the DMA then dequeue anything that's in progress. */
+ tegra_dma_cancel(as->dma_chan);
+ as->active = false; /* applies to recording only */
+ pr_debug("%s: done\n", __func__);
+}
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos);
+
+static int setup_dma(struct audio_driver_state *ads)
+{
+ int rc, i;
+ pr_info("%s\n", __func__);
+
+ /* setup audio playback */
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ ads->out.buf_phy[i] = dma_map_single(&ads->pdev->dev,
+ ads->out.buffer[i],
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ BUG_ON(!ads->out.buf_phy[i]);
+ setup_dma_tx_request(&ads->out.dma_req[i], &ads->out);
+ ads->out.dma_req[i].source_addr = ads->out.buf_phy[i];
+ }
+ ads->out.dma_chan =
+ tegra_dma_allocate_channel(TEGRA_DMA_MODE_CONTINUOUS_SINGLE,
+ "spdif_tx_req_%d", ads->dma_req_sel);
+ if (!ads->out.dma_chan) {
+ pr_err("%s: error alloc output DMA channel: %ld\n",
+ __func__, PTR_ERR(ads->out.dma_chan));
+ rc = -ENODEV;
+ goto fail_tx;
+ }
+ return 0;
+
+
+fail_tx:
+
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phy[i],
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ ads->out.buf_phy[i] = 0;
+ }
+ tegra_dma_free_channel(ads->out.dma_chan);
+ ads->out.dma_chan = 0;
+
+
+ return rc;
+}
+
+static void tear_down_dma(struct audio_driver_state *ads)
+{
+ int i;
+ pr_info("%s\n", __func__);
+
+
+ tegra_dma_free_channel(ads->out.dma_chan);
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phy[i],
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ ads->out.buf_phy[i] = 0;
+ }
+
+ ads->out.dma_chan = NULL;
+}
+
+static void dma_tx_complete_callback(struct tegra_dma_req *req)
+{
+ struct audio_stream *aos = req->dev;
+ unsigned req_num;
+
+ req_num = req - aos->dma_req;
+ pr_debug("%s: completed buffer %d size %d\n", __func__,
+ req_num, req->bytes_transferred);
+ BUG_ON(req_num >= aos->num_bufs);
+
+ complete(&aos->comp[req_num]);
+
+ if (!pending_buffer_requests(aos)) {
+ pr_debug("%s: Playback underflow", __func__);
+ complete(&aos->stop_completion);
+ }
+}
+
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos)
+{
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ memset(req, 0, sizeof(*req));
+
+ req->complete = dma_tx_complete_callback;
+ req->dev = aos;
+ req->to_memory = false;
+ req->dest_addr = spdif_get_fifo_phy_base(ads->spdif_phys);
+ req->dest_bus_width = 32;
+ req->dest_wrap = 4;
+ req->source_wrap = 0;
+ req->source_bus_width = 32;
+ req->req_sel = ads->dma_req_sel;
+}
+
+
+static int start_playback(struct audio_stream *aos,
+ struct tegra_dma_req *req)
+{
+ int rc;
+ unsigned long flags;
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ pr_debug("%s: (writing %d)\n",
+ __func__, req->size);
+
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+#if 0
+ spdif_fifo_clear(ads->spdif_base);
+#endif
+
+ spdif_fifo_set_attention_level(ads->spdif_base,
+ ads->out.spdif_fifo_atn_level);
+
+ if (ads->fifo_init) {
+ spdif_set_bit_mode(ads->spdif_base, SPDIF_BIT_MODE_MODE16BIT);
+ spdif_set_fifo_packed(ads->spdif_base, 1);
+ ads->fifo_init = false;
+ }
+
+ spdif_fifo_enable(ads->spdif_base, 1);
+
+ rc = tegra_dma_enqueue_req(aos->dma_chan, req);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ if (rc)
+ pr_err("%s: could not enqueue TX DMA req\n", __func__);
+ return rc;
+}
+
+/* Called with aos->dma_req_lock taken. */
+static void stop_dma_playback(struct audio_stream *aos)
+{
+ int spin = 0;
+ struct audio_driver_state *ads = ads_from_out(aos);
+ pr_debug("%s\n", __func__);
+ spdif_fifo_enable(ads->spdif_base, 0);
+ while ((spdif_get_status(ads->spdif_base) & SPDIF_STATUS_0_TX_BSY) &&
+ spin < 100) {
+ udelay(10);
+ if (spin++ > 50)
+ pr_info("%s: spin %d\n", __func__, spin);
+ }
+ if (spin == 100)
+ pr_warn("%s: spinny\n", __func__);
+ ads->fifo_init = true;
+}
+
+
+
+static irqreturn_t spdif_interrupt(int irq, void *data)
+{
+ struct audio_driver_state *ads = data;
+ u32 status = spdif_get_status(ads->spdif_base);
+
+ pr_debug("%s: %08x\n", __func__, status);
+
+/* if (status & SPDIF_STATUS_0_TX_ERR) */
+ spdif_ack_status(ads->spdif_base);
+
+ pr_debug("%s: done %08x\n", __func__,
+ spdif_get_status(ads->spdif_base));
+ return IRQ_HANDLED;
+}
+
+static ssize_t tegra_spdif_write(struct file *file,
+ const char __user *buf, size_t size, loff_t *off)
+{
+ ssize_t rc = 0;
+ int out_buf;
+ struct tegra_dma_req *req;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ mutex_lock(&ads->out.lock);
+
+ if (!IS_ALIGNED(size, 4) || size < 4 || size > buf_size(&ads->out)) {
+ pr_err("%s: invalid user size %d\n", __func__, size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%s: write %d bytes\n", __func__, size);
+
+ if (ads->out.stop) {
+ pr_debug("%s: playback has been cancelled\n", __func__);
+ goto done;
+ }
+
+ /* Decide which buf is next. */
+ out_buf = (ads->out.last_queued + 1) % ads->out.num_bufs;
+ req = &ads->out.dma_req[out_buf];
+
+ /* Wait for the buffer to be emptied (complete). The maximum timeout
+ * value could be calculated dynamically based on buf_size(&ads->out).
+ * For a buffer size of 16k, at 44.1kHz/stereo/16-bit PCM, you would
+ * have ~93ms.
+ */
+ pr_debug("%s: waiting for buffer %d\n", __func__, out_buf);
+ rc = wait_for_completion_interruptible_timeout(
+ &ads->out.comp[out_buf], HZ);
+ if (!rc) {
+ pr_err("%s: timeout", __func__);
+ rc = -ETIMEDOUT;
+ goto done;
+ } else if (rc < 0) {
+ pr_err("%s: wait error %d", __func__, rc);
+ goto done;
+ }
+
+ /* Fill the buffer and enqueue it. */
+ pr_debug("%s: acquired buffer %d, copying data\n", __func__, out_buf);
+ rc = copy_from_user(ads->out.buffer[out_buf], buf, size);
+ if (rc) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ prevent_suspend(&ads->out);
+
+ req->size = size;
+ dma_sync_single_for_device(NULL,
+ req->source_addr, req->size, DMA_TO_DEVICE);
+ ads->out.last_queued = out_buf;
+ init_completion(&ads->out.stop_completion);
+
+ rc = start_playback(&ads->out, req);
+ if (!rc)
+ rc = size;
+ else
+ allow_suspend(&ads->out);
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static long tegra_spdif_out_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_out_ctl(file);
+ struct audio_stream *aos = &ads->out;
+
+ mutex_lock(&aos->lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_OUT_FLUSH:
+ if (pending_buffer_requests(aos)) {
+ pr_debug("%s: flushing\n", __func__);
+ request_stop_nosync(aos);
+ pr_debug("%s: flushed\n", __func__);
+ }
+ if (stop_playback_if_necessary(aos))
+ pr_debug("%s: done (stopped)\n", __func__);
+ aos->stop = false;
+ break;
+ case TEGRA_AUDIO_OUT_SET_NUM_BUFS: {
+ unsigned int num;
+ if (copy_from_user(&num, (const void __user *)arg,
+ sizeof(num))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (!num || num > SPDIF_MAX_NUM_BUFS) {
+ pr_err("%s: invalid buffer count %d\n", __func__, num);
+ rc = -EINVAL;
+ break;
+ }
+ if (pending_buffer_requests(aos)) {
+ pr_err("%s: playback in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ rc = init_stream_buffer(aos, num);
+ if (rc < 0)
+ break;
+ aos->num_bufs = num;
+ sound_ops->setup(ads);
+ }
+ break;
+ case TEGRA_AUDIO_OUT_GET_NUM_BUFS:
+ if (copy_to_user((void __user *)arg,
+ &aos->num_bufs, sizeof(aos->num_bufs)))
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&aos->lock);
+ return rc;
+}
+
+
+static int tegra_spdif_out_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ int i;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+
+ if (ads->out.opened) {
+ rc = -EBUSY;
+ goto done;
+ }
+
+ ads->out.opened = 1;
+ ads->out.stop = false;
+
+ for (i = 0; i < SPDIF_MAX_NUM_BUFS; i++) {
+ init_completion(&ads->out.comp[i]);
+ /* TX buf rest state is unqueued, complete. */
+ complete(&ads->out.comp[i]);
+ }
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static int tegra_spdif_out_release(struct inode *inode, struct file *file)
+{
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+ ads->out.opened = 0;
+ request_stop_nosync(&ads->out);
+ if (stop_playback_if_necessary(&ads->out))
+ pr_debug("%s: done (stopped)\n", __func__);
+ allow_suspend(&ads->out);
+ mutex_unlock(&ads->out.lock);
+ pr_debug("%s: done\n", __func__);
+ return 0;
+}
+
+
+static const struct file_operations tegra_spdif_out_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_spdif_out_open,
+ .release = tegra_spdif_out_release,
+ .write = tegra_spdif_write,
+};
+
+static int tegra_spdif_ctl_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int tegra_spdif_ctl_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations tegra_spdif_out_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_spdif_ctl_open,
+ .release = tegra_spdif_ctl_release,
+ .unlocked_ioctl = tegra_spdif_out_ioctl,
+};
+
+static int init_stream_buffer(struct audio_stream *s, int num)
+{
+ int i, j;
+ pr_debug("%s (num %d)\n", __func__, num);
+
+ for (i = 0; i < num; i++) {
+ kfree(s->buffer[i]);
+ s->buffer[i] =
+ kmalloc(buf_size(s), GFP_KERNEL | GFP_DMA);
+ if (!s->buffer[i]) {
+ pr_err("%s: could not allocate buffer\n", __func__);
+ for (j = i - 1; j >= 0; j--) {
+ kfree(s->buffer[j]);
+ s->buffer[j] = 0;
+ }
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+
+static int setup_misc_device(struct miscdevice *misc,
+ const struct file_operations *fops,
+ const char *fmt, ...)
+{
+ int rc = 0;
+ va_list args;
+ const int sz = 64;
+
+ va_start(args, fmt);
+
+ memset(misc, 0, sizeof(*misc));
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = kmalloc(sz, GFP_KERNEL);
+ if (!misc->name) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ vsnprintf((char *)misc->name, sz, fmt, args);
+ misc->fops = fops;
+ if (misc_register(misc)) {
+ pr_err("%s: could not register %s\n", __func__, misc->name);
+ kfree(misc->name);
+ rc = -EIO;
+ goto done;
+ }
+
+done:
+ va_end(args);
+ return rc;
+}
+
+static ssize_t dma_toggle_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "dma\n");
+}
+
+static ssize_t dma_toggle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ pr_err("%s: Not implemented.", __func__);
+ return 0;
+}
+
+static DEVICE_ATTR(dma_toggle, 0644, dma_toggle_show, dma_toggle_store);
+
+static ssize_t __attr_fifo_atn_read(char *buf, int atn_lvl)
+{
+ switch (atn_lvl) {
+ case SPDIF_FIFO_ATN_LVL_ONE_SLOT:
+ strncpy(buf, "1\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_FOUR_SLOTS:
+ strncpy(buf, "4\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS:
+ strncpy(buf, "8\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS:
+ strncpy(buf, "12\n", 3);
+ return 3;
+ default:
+ BUG_ON(1);
+ return -EIO;
+ }
+}
+
+static ssize_t __attr_fifo_atn_write(struct audio_driver_state *ads,
+ struct audio_stream *as,
+ int *fifo_lvl,
+ const char *buf, size_t size)
+{
+ int lvl;
+
+ if (size > 3) {
+ pr_err("%s: buffer size %d too big\n", __func__, size);
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, "%d", &lvl) != 1) {
+ pr_err("%s: invalid input string [%s]\n", __func__, buf);
+ return -EINVAL;
+ }
+
+ switch (lvl) {
+ case 1:
+ lvl = SPDIF_FIFO_ATN_LVL_ONE_SLOT;
+ break;
+ case 4:
+ lvl = SPDIF_FIFO_ATN_LVL_FOUR_SLOTS;
+ break;
+ case 8:
+ lvl = SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS;
+ break;
+ case 12:
+ lvl = SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS;
+ break;
+ default:
+ pr_err("%s: invalid attention level %d\n", __func__, lvl);
+ return -EINVAL;
+ }
+
+ *fifo_lvl = lvl;
+ pr_info("%s: fifo level %d\n", __func__, *fifo_lvl);
+
+ return size;
+}
+
+static ssize_t tx_fifo_atn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ return __attr_fifo_atn_read(buf, ads->out.spdif_fifo_atn_level);
+}
+
+static ssize_t tx_fifo_atn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t rc;
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ mutex_lock(&ads->out.lock);
+ if (pending_buffer_requests(&ads->out)) {
+ pr_err("%s: playback in progress.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ rc = __attr_fifo_atn_write(ads, &ads->out,
+ &ads->out.spdif_fifo_atn_level,
+ buf, count);
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static DEVICE_ATTR(tx_fifo_atn, 0644, tx_fifo_atn_show, tx_fifo_atn_store);
+
+
+static int spdif_configure(struct platform_device *pdev)
+{
+ struct tegra_audio_platform_data *pdata = pdev->dev.platform_data;
+ struct audio_driver_state *state = pdata->driver_data;
+
+ if (!state)
+ return -ENOMEM;
+
+ /* disable interrupts from SPDIF */
+ spdif_writel(state->spdif_base, 0x0, SPDIF_CTRL_0);
+ spdif_fifo_clear(state->spdif_base);
+ spdif_enable_fifos(state->spdif_base, 0);
+
+ spdif_set_bit_mode(state->spdif_base, SPDIF_BIT_MODE_MODE16BIT);
+ spdif_set_fifo_packed(state->spdif_base, 1);
+
+ spdif_fifo_set_attention_level(state->spdif_base,
+ state->out.spdif_fifo_atn_level);
+
+ spdif_set_sample_rate(state, 44100);
+
+ state->fifo_init = true;
+ return 0;
+}
+
+static int tegra_spdif_probe(struct platform_device *pdev)
+{
+ int rc, i;
+ struct resource *res;
+ struct audio_driver_state *state;
+
+ pr_info("%s\n", __func__);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->pdev = pdev;
+ state->pdata = pdev->dev.platform_data;
+ state->pdata->driver_data = state;
+ BUG_ON(!state->pdata);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource!\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "memory region already claimed!\n");
+ return -ENOMEM;
+ }
+
+ state->spdif_phys = res->start;
+ state->spdif_base = (unsigned long)ioremap(res->start,
+ res->end - res->start + 1);
+ if (!state->spdif_base) {
+ dev_err(&pdev->dev, "cannot remap iomem!\n");
+ return -EIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no dma resource!\n");
+ return -ENODEV;
+ }
+ state->dma_req_sel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource!\n");
+ return -ENODEV;
+ }
+ state->irq = res->start;
+
+ rc = spdif_configure(pdev);
+ if (rc < 0)
+ return rc;
+
+ state->out.opened = 0;
+ state->out.active = false;
+ mutex_init(&state->out.lock);
+ init_completion(&state->out.stop_completion);
+ spin_lock_init(&state->out.dma_req_lock);
+ state->out.dma_chan = NULL;
+ state->out.num_bufs = SPDIF_DEFAULT_TX_NUM_BUFS;
+ for (i = 0; i < SPDIF_MAX_NUM_BUFS; i++) {
+ init_completion(&state->out.comp[i]);
+ /* TX buf rest state is unqueued, complete. */
+ complete(&state->out.comp[i]);
+ state->out.buffer[i] = 0;
+ state->out.buf_phy[i] = 0;
+ }
+ state->out.last_queued = 0;
+ rc = init_stream_buffer(&state->out, state->out.num_bufs);
+ if (rc < 0)
+ return rc;
+
+ INIT_WORK(&state->out.allow_suspend_work, allow_suspend_worker);
+ snprintf(state->out.wake_lock_name, sizeof(state->out.wake_lock_name),
+ "tegra-audio-spdif");
+ wake_lock_init(&state->out.wake_lock, WAKE_LOCK_SUSPEND,
+ state->out.wake_lock_name);
+
+ if (request_irq(state->irq, spdif_interrupt,
+ IRQF_DISABLED, state->pdev->name, state) < 0) {
+ dev_err(&pdev->dev,
+ "%s: could not register handler for irq %d\n",
+ __func__, state->irq);
+ return -EIO;
+ }
+
+ rc = setup_misc_device(&state->misc_out,
+ &tegra_spdif_out_fops,
+ "spdif_out");
+ if (rc < 0)
+ return rc;
+
+ rc = setup_misc_device(&state->misc_out_ctl,
+ &tegra_spdif_out_ctl_fops,
+ "spdif_out_ctl");
+ if (rc < 0)
+ return rc;
+
+ sound_ops->setup(state);
+
+ rc = device_create_file(&pdev->dev, &dev_attr_dma_toggle);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_dma_toggle.attr.name, rc);
+ return rc;
+ }
+
+ rc = device_create_file(&pdev->dev, &dev_attr_tx_fifo_atn);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_tx_fifo_atn.attr.name, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_spdif_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ /* dev_info(&pdev->dev, "%s\n", __func__); */
+ return 0;
+}
+
+static int tegra_spdif_resume(struct platform_device *pdev)
+{
+ return spdif_configure(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver tegra_spdif_driver = {
+ .driver = {
+ .name = "spdif_out",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_spdif_probe,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = tegra_spdif_suspend,
+ .resume = tegra_spdif_resume,
+#endif
+};
+
+static int __init tegra_spdif_init(void)
+{
+ return platform_driver_register(&tegra_spdif_driver);
+}
+
+module_init(tegra_spdif_init);
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-tegra/tegra_usb_modem_power.c b/arch/arm/mach-tegra/tegra_usb_modem_power.c
new file mode 100644
index 000000000000..de377ac28964
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra_usb_modem_power.c
@@ -0,0 +1,290 @@
+/*
+ * arch/arm/mach-tegra/tegra_usb_modem_power.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/usb.h>
+#include <linux/err.h>
+#include <linux/wakelock.h>
+#include <mach/tegra_usb_modem_power.h>
+
+struct tegra_usb_modem {
+ unsigned int wake_gpio; /* remote wakeup gpio */
+ unsigned int wake_cnt; /* remote wakeup counter */
+ int irq; /* remote wakeup irq */
+ struct mutex lock;
+ struct wake_lock wake_lock; /* modem wake lock */
+ unsigned int vid; /* modem vendor id */
+ unsigned int pid; /* modem product id */
+ struct usb_device *udev; /* modem usb device */
+ struct usb_interface *intf; /* first modem usb interface */
+ struct workqueue_struct *wq; /* modem workqueue */
+ struct delayed_work recovery_work; /* modem recovery work */
+ const struct tegra_modem_operations *ops; /* modem operations */
+ unsigned int capability; /* modem capability */
+};
+
+static struct tegra_usb_modem tegra_mdm;
+
+/* supported modems */
+static const struct usb_device_id modem_list[] = {
+ {USB_DEVICE(0x1983, 0x0310), /* Icera 450 rev1 */
+ .driver_info = 0,
+ },
+ {USB_DEVICE(0x1983, 0x0321), /* Icera 450 rev2 */
+ .driver_info = 0,
+ },
+ {}
+};
+
+static irqreturn_t tegra_usb_modem_wake_thread(int irq, void *data)
+{
+ struct tegra_usb_modem *modem = (struct tegra_usb_modem *)data;
+
+ wake_lock_timeout(&modem->wake_lock, HZ);
+ mutex_lock(&modem->lock);
+ if (modem->udev) {
+ usb_lock_device(modem->udev);
+ pr_info("modem wake (%u)\n", ++(modem->wake_cnt));
+ if (usb_autopm_get_interface(modem->intf) == 0)
+ usb_autopm_put_interface_async(modem->intf);
+ usb_unlock_device(modem->udev);
+ }
+ mutex_unlock(&modem->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_usb_modem_recovery(struct work_struct *ws)
+{
+ struct tegra_usb_modem *modem = container_of(ws, struct tegra_usb_modem,
+ recovery_work.work);
+
+ mutex_lock(&modem->lock);
+ if (!modem->udev) { /* assume modem crashed */
+ if (modem->ops && modem->ops->reset)
+ modem->ops->reset();
+ }
+ mutex_unlock(&modem->lock);
+}
+
+static void device_add_handler(struct usb_device *udev)
+{
+ const struct usb_device_descriptor *desc = &udev->descriptor;
+ struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
+ const struct usb_device_id *id = usb_match_id(intf, modem_list);
+
+ if (id) {
+ pr_info("Add device %d <%s %s>\n", udev->devnum,
+ udev->manufacturer, udev->product);
+
+ mutex_lock(&tegra_mdm.lock);
+ tegra_mdm.udev = udev;
+ tegra_mdm.intf = intf;
+ tegra_mdm.vid = desc->idVendor;
+ tegra_mdm.pid = desc->idProduct;
+ tegra_mdm.wake_cnt = 0;
+ tegra_mdm.capability = id->driver_info;
+ mutex_unlock(&tegra_mdm.lock);
+
+ pr_info("persist_enabled: %u\n", udev->persist_enabled);
+
+ if (tegra_mdm.capability & TEGRA_MODEM_AUTOSUSPEND) {
+ usb_enable_autosuspend(udev);
+ pr_info("enable autosuspend for %s %s\n",
+ udev->manufacturer, udev->product);
+ }
+ }
+}
+
+static void device_remove_handler(struct usb_device *udev)
+{
+ const struct usb_device_descriptor *desc = &udev->descriptor;
+
+ if (desc->idVendor == tegra_mdm.vid &&
+ desc->idProduct == tegra_mdm.pid) {
+ pr_info("Remove device %d <%s %s>\n", udev->devnum,
+ udev->manufacturer, udev->product);
+
+ mutex_lock(&tegra_mdm.lock);
+ tegra_mdm.udev = NULL;
+ tegra_mdm.intf = NULL;
+ tegra_mdm.vid = 0;
+ mutex_unlock(&tegra_mdm.lock);
+
+ if (tegra_mdm.capability & TEGRA_MODEM_RECOVERY)
+ queue_delayed_work(tegra_mdm.wq,
+ &tegra_mdm.recovery_work, HZ * 10);
+ }
+}
+
+static int usb_notify(struct notifier_block *self, unsigned long action,
+ void *blob)
+{
+ switch (action) {
+ case USB_DEVICE_ADD:
+ device_add_handler(blob);
+ break;
+ case USB_DEVICE_REMOVE:
+ device_remove_handler(blob);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block usb_nb = {
+ .notifier_call = usb_notify,
+};
+
+static int tegra_usb_modem_probe(struct platform_device *pdev)
+{
+ struct tegra_usb_modem_power_platform_data *pdata =
+ pdev->dev.platform_data;
+ int ret;
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "platform_data not available\n");
+ return -EINVAL;
+ }
+
+ /* get modem operations from platform data */
+ tegra_mdm.ops = (const struct tegra_modem_operations *)pdata->ops;
+
+ if (tegra_mdm.ops) {
+ /* modem init */
+ if (tegra_mdm.ops->init) {
+ ret = tegra_mdm.ops->init();
+ if (ret)
+ return ret;
+ }
+
+ /* start modem */
+ if (tegra_mdm.ops->start)
+ tegra_mdm.ops->start();
+ }
+
+ mutex_init(&(tegra_mdm.lock));
+ wake_lock_init(&(tegra_mdm.wake_lock), WAKE_LOCK_SUSPEND,
+ "tegra_usb_mdm_lock");
+
+ /* create work queue */
+ tegra_mdm.wq = create_workqueue("tegra_usb_mdm_queue");
+ INIT_DELAYED_WORK(&(tegra_mdm.recovery_work), tegra_usb_modem_recovery);
+
+ /* create threaded irq for remote wakeup */
+ if (pdata->wake_gpio) {
+ /* get remote wakeup gpio from platform data */
+ tegra_mdm.wake_gpio = pdata->wake_gpio;
+
+ ret = gpio_request(tegra_mdm.wake_gpio, "usb_mdm_wake");
+ if (ret)
+ return ret;
+
+ tegra_gpio_enable(tegra_mdm.wake_gpio);
+
+ /* enable IRQ for remote wakeup */
+ tegra_mdm.irq = gpio_to_irq(tegra_mdm.wake_gpio);
+
+ ret =
+ request_threaded_irq(tegra_mdm.irq, NULL,
+ tegra_usb_modem_wake_thread,
+ pdata->flags, "tegra_usb_mdm_wake",
+ &tegra_mdm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: request_threaded_irq error\n",
+ __func__);
+ return ret;
+ }
+
+ ret = enable_irq_wake(tegra_mdm.irq);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: enable_irq_wake error\n",
+ __func__);
+ free_irq(tegra_mdm.irq, &tegra_mdm);
+ return ret;
+ }
+ }
+
+ usb_register_notify(&usb_nb);
+ dev_info(&pdev->dev, "Initialized tegra_usb_modem_power\n");
+
+ return 0;
+}
+
+static int __exit tegra_usb_modem_remove(struct platform_device *pdev)
+{
+ usb_unregister_notify(&usb_nb);
+ free_irq(tegra_mdm.irq, &tegra_mdm);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_usb_modem_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ /* send L3 hint to modem */
+ if (tegra_mdm.ops && tegra_mdm.ops->suspend)
+ tegra_mdm.ops->suspend();
+ return 0;
+}
+
+static int tegra_usb_modem_resume(struct platform_device *pdev)
+{
+ /* send L3->L0 hint to modem */
+ if (tegra_mdm.ops && tegra_mdm.ops->resume)
+ tegra_mdm.ops->resume();
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_usb_modem_power_driver = {
+ .driver = {
+ .name = "tegra_usb_modem_power",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_usb_modem_probe,
+ .remove = __exit_p(tegra_usb_modem_remove),
+#ifdef CONFIG_PM
+ .suspend = tegra_usb_modem_suspend,
+ .resume = tegra_usb_modem_resume,
+#endif
+};
+
+static int __init tegra_usb_modem_power_init(void)
+{
+ return platform_driver_register(&tegra_usb_modem_power_driver);
+}
+
+subsys_initcall(tegra_usb_modem_power_init);
+
+static void __exit tegra_usb_modem_power_exit(void)
+{
+ platform_driver_unregister(&tegra_usb_modem_power_driver);
+}
+
+module_exit(tegra_usb_modem_power_exit);
+
+MODULE_DESCRIPTION("Tegra usb modem power management driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-tegra/timer-t2.c b/arch/arm/mach-tegra/timer-t2.c
new file mode 100644
index 000000000000..dff9abb76272
--- /dev/null
+++ b/arch/arm/mach-tegra/timer-t2.c
@@ -0,0 +1,128 @@
+/*
+ * arch/arch/mach-tegra/timer.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/mach/time.h>
+#include <asm/localtimer.h>
+#include <asm/sched_clock.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "board.h"
+#include "clock.h"
+#include "timer.h"
+
+/*
+ * Timers usage:
+ * TMR1 - Free.
+ * TMR2 - used by AVP.
+ * TMR3 - used as general CPU timer.
+ * TMR4 - used for LP2 wakeup.
+*/
+
+#define TIMER1_OFFSET (TEGRA_TMR1_BASE-TEGRA_TMR1_BASE)
+#define TIMER2_OFFSET (TEGRA_TMR2_BASE-TEGRA_TMR1_BASE)
+#define TIMER3_OFFSET (TEGRA_TMR3_BASE-TEGRA_TMR1_BASE)
+#define TIMER4_OFFSET (TEGRA_TMR4_BASE-TEGRA_TMR1_BASE)
+
+#define timer_writel(value, reg) \
+ __raw_writel(value, (u32)timer_reg_base + (reg))
+#define timer_readl(reg) \
+ __raw_readl((u32)timer_reg_base + (reg))
+
+
+static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE);
+
+#ifdef CONFIG_PM_SLEEP
+static irqreturn_t tegra_lp2wake_interrupt(int irq, void *dev_id)
+{
+ timer_writel(1<<30, TIMER4_OFFSET + TIMER_PCR);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction tegra_lp2wake_irq = {
+ .name = "timer_lp2wake",
+ .flags = IRQF_DISABLED,
+ .handler = tegra_lp2wake_interrupt,
+ .dev_id = NULL,
+ .irq = INT_TMR4,
+};
+
+void tegra2_lp2_set_trigger(unsigned long cycles)
+{
+ timer_writel(0, TIMER4_OFFSET + TIMER_PTV);
+ if (cycles) {
+ u32 reg = 0x80000000ul | min(0x1ffffffful, cycles);
+ timer_writel(reg, TIMER4_OFFSET + TIMER_PTV);
+ }
+}
+EXPORT_SYMBOL(tegra2_lp2_set_trigger);
+
+unsigned long tegra2_lp2_timer_remain(void)
+{
+ return timer_readl(TIMER4_OFFSET + TIMER_PCR) & 0x1ffffffful;
+}
+#endif
+
+void __init tegra2_init_timer(u32 *offset, int *irq)
+{
+ unsigned long rate = clk_measure_input_freq();
+ int ret;
+
+ switch (rate) {
+ case 12000000:
+ timer_writel(0x000b, TIMERUS_USEC_CFG);
+ break;
+ case 13000000:
+ timer_writel(0x000c, TIMERUS_USEC_CFG);
+ break;
+ case 19200000:
+ timer_writel(0x045f, TIMERUS_USEC_CFG);
+ break;
+ case 26000000:
+ timer_writel(0x0019, TIMERUS_USEC_CFG);
+ break;
+ default:
+ WARN(1, "Unknown clock rate");
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ ret = setup_irq(tegra_lp2wake_irq.irq, &tegra_lp2wake_irq);
+ if (ret) {
+ pr_err("Failed to register LP2 timer IRQ: %d\n", ret);
+ BUG();
+ }
+#endif
+
+ *offset = TIMER3_OFFSET;
+ *irq = INT_TMR3;
+}
diff --git a/arch/arm/mach-tegra/timer-t3.c b/arch/arm/mach-tegra/timer-t3.c
new file mode 100644
index 000000000000..cc8d540bcdca
--- /dev/null
+++ b/arch/arm/mach-tegra/timer-t3.c
@@ -0,0 +1,288 @@
+/*
+ * arch/arch/mach-tegra/timer-t3.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/syscore_ops.h>
+#include <linux/cpu.h>
+
+#include <asm/mach/time.h>
+#include <asm/localtimer.h>
+#include <asm/sched_clock.h>
+
+#include <mach/hardware.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "board.h"
+#include "clock.h"
+#include "cpuidle.h"
+#include "timer.h"
+
+#define TEST_LP2_WAKE_TIMERS 0
+
+/*
+ * Timers usage:
+ * TMR1 - used as general CPU timer.
+ * TMR2 - used by AVP.
+ * TMR3 - used by CPU0 for LP2 wakeup.
+ * TMR4 - used by CPU1 for LP2 wakeup.
+ * TMR5 - used by CPU2 for LP2 wakeup.
+ * TMR6 - used by CPU3 for LP2 wakeup.
+ * TMR7 - Free.
+ * TMR8 - Free.
+ * TMR9 - Free.
+ * TMR10 - used as source for watchdog controller 0.
+*/
+
+#define TIMER1_OFFSET (TEGRA_TMR1_BASE-TEGRA_TMR1_BASE)
+#define TIMER2_OFFSET (TEGRA_TMR2_BASE-TEGRA_TMR1_BASE)
+#define TIMER3_OFFSET (TEGRA_TMR3_BASE-TEGRA_TMR1_BASE)
+#define TIMER4_OFFSET (TEGRA_TMR4_BASE-TEGRA_TMR1_BASE)
+#define TIMER5_OFFSET (TEGRA_TMR5_BASE-TEGRA_TMR1_BASE)
+#define TIMER6_OFFSET (TEGRA_TMR6_BASE-TEGRA_TMR1_BASE)
+
+static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE);
+
+#define timer_writel(value, reg) \
+ __raw_writel(value, (u32)timer_reg_base + (reg))
+#define timer_readl(reg) \
+ __raw_readl((u32)timer_reg_base + (reg))
+
+
+#ifdef CONFIG_PM_SLEEP
+static u32 lp2_wake_timers[] = {
+ TIMER3_OFFSET,
+#ifdef CONFIG_SMP
+ TIMER4_OFFSET,
+ TIMER5_OFFSET,
+ TIMER6_OFFSET,
+#endif
+};
+
+static irqreturn_t tegra_lp2wake_interrupt(int irq, void *dev_id)
+{
+ int cpu = (int)dev_id;
+ int base;
+
+ base = lp2_wake_timers[cpu];
+ timer_writel(1<<30, base + TIMER_PCR);
+ return IRQ_HANDLED;
+}
+
+#define LP2_TIMER_IRQ_ACTION(cpu, irqnum) { \
+ .name = "tmr_lp2wake_cpu" __stringify(cpu), \
+ .flags = IRQF_DISABLED, \
+ .handler = tegra_lp2wake_interrupt, \
+ .dev_id = (void*)cpu, \
+ .irq = irqnum }
+
+static struct irqaction tegra_lp2wake_irq[] = {
+ LP2_TIMER_IRQ_ACTION(0, INT_TMR3),
+#ifdef CONFIG_SMP
+ LP2_TIMER_IRQ_ACTION(1, INT_TMR4),
+ LP2_TIMER_IRQ_ACTION(2, INT_TMR5),
+ LP2_TIMER_IRQ_ACTION(3, INT_TMR6),
+#endif
+};
+
+#ifdef CONFIG_SMP
+#define hard_smp_processor_id() \
+ ({ \
+ unsigned int cpunum; \
+ __asm__("\n" \
+ "1: mrc p15, 0, %0, c0, c0, 5\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n"\
+ " .long 1b\n" \
+ " mov %0, #0\n" \
+ " .popsection" \
+ : "=r" (cpunum)); \
+ cpunum &= 0x0F; \
+ })
+#define cpu_number() hard_smp_processor_id()
+#else
+#define cpu_number() 0
+#endif
+
+/*
+ * To sanity test LP2 timer interrupts for CPU 0-3, enable this flag and check
+ * /proc/interrupts for timer interrupts. CPUs 0-3 should have one interrupt
+ * counted against them for tmr_lp2wake_cpu<n>, where <n> is the CPU number.
+ */
+#if TEST_LP2_WAKE_TIMERS
+static void test_lp2_wake_timer(unsigned int cpu)
+{
+ unsigned long cycles = 50000;
+ unsigned int base = lp2_wake_timers[cpu];
+ static bool tested[4] = {false, false, false, false};
+
+ /* Don't repeat the test process on hotplug restart. */
+ if (!tested[cpu]) {
+ timer_writel(0, base + TIMER_PTV);
+ if (cycles) {
+ u32 reg = 0x80000000ul | min(0x1ffffffful, cycles);
+ timer_writel(reg, base + TIMER_PTV);
+ tested[cpu] = true;
+ }
+ }
+}
+#else
+static inline void test_lp2_wake_timer(unsigned int cpu) {}
+#endif
+
+static void tegra3_register_wake_timer(unsigned int cpu)
+{
+ int ret;
+
+ ret = setup_irq(tegra_lp2wake_irq[cpu].irq, &tegra_lp2wake_irq[cpu]);
+ if (ret) {
+ pr_err("Failed to register LP2 timer IRQ for CPU %d: "
+ "irq=%d, ret=%d\n", cpu,
+ tegra_lp2wake_irq[cpu].irq, ret);
+ goto fail;
+ }
+
+#ifdef CONFIG_SMP
+ ret = irq_set_affinity(tegra_lp2wake_irq[cpu].irq, cpumask_of(cpu));
+ if (ret) {
+ pr_err("Failed to set affinity for LP2 timer IRQ to "
+ "CPU %d: irq=%d, ret=%d\n", cpu,
+ tegra_lp2wake_irq[cpu].irq, ret);
+ goto fail;
+ }
+#endif
+
+ test_lp2_wake_timer(cpu);
+ return;
+fail:
+ tegra_lp2_in_idle(false);
+}
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_HOTPLUG_CPU)
+static void tegra3_unregister_wake_timer(unsigned int cpu)
+{
+#ifdef CONFIG_SMP
+ /* Reassign the affinity of the wake IRQ to CPU 0. */
+ (void)irq_set_affinity(tegra_lp2wake_irq[cpu].irq, cpumask_of(0));
+#endif
+
+ /* Dispose of this IRQ. */
+ remove_irq(tegra_lp2wake_irq[cpu].irq, &tegra_lp2wake_irq[cpu]);
+}
+#endif
+
+void tegra3_lp2_set_trigger(unsigned long cycles)
+{
+ int cpu = cpu_number();
+ int base;
+
+ base = lp2_wake_timers[cpu];
+ timer_writel(0, base + TIMER_PTV);
+ if (cycles) {
+ u32 reg = 0x80000000ul | min(0x1ffffffful, cycles);
+ timer_writel(reg, base + TIMER_PTV);
+ }
+}
+EXPORT_SYMBOL(tegra3_lp2_set_trigger);
+
+unsigned long tegra3_lp2_timer_remain(void)
+{
+ int cpu = cpu_number();
+
+ return timer_readl(lp2_wake_timers[cpu] + TIMER_PCR) & 0x1ffffffful;
+}
+#endif
+
+void __init tegra3_init_timer(u32 *offset, int *irq)
+{
+ unsigned long rate = clk_measure_input_freq();
+
+ switch (rate) {
+ case 12000000:
+ timer_writel(0x000b, TIMERUS_USEC_CFG);
+ break;
+ case 13000000:
+ timer_writel(0x000c, TIMERUS_USEC_CFG);
+ break;
+ case 19200000:
+ timer_writel(0x045f, TIMERUS_USEC_CFG);
+ break;
+ case 26000000:
+ timer_writel(0x0019, TIMERUS_USEC_CFG);
+ break;
+ case 16800000:
+ timer_writel(0x0453, TIMERUS_USEC_CFG);
+ break;
+ case 38400000:
+ timer_writel(0x04BF, TIMERUS_USEC_CFG);
+ break;
+ case 48000000:
+ timer_writel(0x002F, TIMERUS_USEC_CFG);
+ break;
+ default:
+ WARN(1, "Unknown clock rate");
+ }
+
+#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_SMP
+ /* For T30.A01 use INT_TMR_SHARED instead of INT_TMR6 for CPU3. */
+ if ((tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) &&
+ (tegra_get_revision() == TEGRA_REVISION_A01))
+ tegra_lp2wake_irq[3].irq = INT_TMR_SHARED;
+#endif
+
+ tegra3_register_wake_timer(0);
+#endif
+
+ *offset = TIMER1_OFFSET;
+ *irq = INT_TMR1;
+}
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_HOTPLUG_CPU)
+static int hotplug_notify(struct notifier_block *self,
+ unsigned long action, void *cpu)
+{
+ if (action == CPU_ONLINE)
+ tegra3_register_wake_timer((unsigned int)cpu);
+ else if (action == CPU_DOWN_PREPARE)
+ tegra3_unregister_wake_timer((unsigned int)cpu);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata hotplug_notifier_block = {
+ .notifier_call = hotplug_notify,
+};
+
+static int __init hotplug_cpu_register(void)
+{
+ return register_cpu_notifier(&hotplug_notifier_block);
+}
+early_initcall(hotplug_cpu_register);
+#endif
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 90350420c4e9..d869ba3a3e2c 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -2,10 +2,13 @@
* arch/arch/mach-tegra/timer.c
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Author:
* Colin Cross <ccross@google.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -27,39 +30,30 @@
#include <linux/clocksource.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/syscore_ops.h>
#include <asm/mach/time.h>
#include <asm/localtimer.h>
+#include <asm/smp_twd.h>
#include <asm/sched_clock.h>
#include <mach/iomap.h>
#include <mach/irqs.h>
-#include <mach/suspend.h>
#include "board.h"
#include "clock.h"
-
-#define RTC_SECONDS 0x08
-#define RTC_SHADOW_SECONDS 0x0c
-#define RTC_MILLISECONDS 0x10
-
-#define TIMERUS_CNTR_1US 0x10
-#define TIMERUS_USEC_CFG 0x14
-#define TIMERUS_CNTR_FREEZE 0x4c
-
-#define TIMER1_BASE 0x0
-#define TIMER2_BASE 0x8
-#define TIMER3_BASE 0x50
-#define TIMER4_BASE 0x58
-
-#define TIMER_PTV 0x0
-#define TIMER_PCR 0x4
+#include "timer.h"
static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE);
static void __iomem *rtc_base = IO_ADDRESS(TEGRA_RTC_BASE);
static struct timespec persistent_ts;
static u64 persistent_ms, last_persistent_ms;
+static u32 usec_config;
+static u32 usec_offset;
+static bool usec_suspended;
+
+static u32 system_timer;
#define timer_writel(value, reg) \
__raw_writel(value, (u32)timer_reg_base + (reg))
@@ -72,7 +66,7 @@ static int tegra_timer_set_next_event(unsigned long cycles,
u32 reg;
reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0);
- timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+ timer_writel(reg, system_timer + TIMER_PTV);
return 0;
}
@@ -82,12 +76,12 @@ static void tegra_timer_set_mode(enum clock_event_mode mode,
{
u32 reg;
- timer_writel(0, TIMER3_BASE + TIMER_PTV);
+ timer_writel(0, system_timer + TIMER_PTV);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
reg = 0xC0000000 | ((1000000/HZ)-1);
- timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+ timer_writel(reg, system_timer + TIMER_PTV);
break;
case CLOCK_EVT_MODE_ONESHOT:
break;
@@ -115,15 +109,23 @@ static DEFINE_CLOCK_DATA(cd);
#define SC_MULT 4194304000u
#define SC_SHIFT 22
+static u32 notrace tegra_read_usec(void)
+{
+ u32 cyc = usec_offset;
+ if (!usec_suspended)
+ cyc += timer_readl(TIMERUS_CNTR_1US);
+ return cyc;
+}
+
unsigned long long notrace sched_clock(void)
{
- u32 cyc = timer_readl(TIMERUS_CNTR_1US);
+ u32 cyc = tegra_read_usec();
return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
}
static void notrace tegra_update_sched_clock(void)
{
- u32 cyc = timer_readl(TIMERUS_CNTR_1US);
+ u32 cyc = tegra_read_usec();
update_sched_clock(&cd, cyc, (u32)~0);
}
@@ -133,7 +135,7 @@ static void notrace tegra_update_sched_clock(void)
* tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register
*/
-u64 tegra_rtc_read_ms(void)
+static u64 tegra_rtc_read_ms(void)
{
u32 ms = readl(rtc_base + RTC_MILLISECONDS);
u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
@@ -166,7 +168,7 @@ void read_persistent_clock(struct timespec *ts)
static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
- timer_writel(1<<30, TIMER3_BASE + TIMER_PCR);
+ timer_writel(1<<30, system_timer + TIMER_PCR);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -176,13 +178,60 @@ static struct irqaction tegra_timer_irq = {
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH,
.handler = tegra_timer_interrupt,
.dev_id = &tegra_clockevent,
- .irq = INT_TMR3,
};
+static int tegra_timer_suspend(void)
+{
+ usec_config = timer_readl(TIMERUS_USEC_CFG);
+
+ usec_offset += timer_readl(TIMERUS_CNTR_1US);
+ usec_suspended = true;
+
+ return 0;
+}
+
+static void tegra_timer_resume(void)
+{
+ timer_writel(usec_config, TIMERUS_USEC_CFG);
+
+ usec_offset -= timer_readl(TIMERUS_CNTR_1US);
+ usec_suspended = false;
+}
+
+static struct syscore_ops tegra_timer_syscore_ops = {
+ .suspend = tegra_timer_suspend,
+ .resume = tegra_timer_resume,
+};
+
+#ifdef CONFIG_HAVE_ARM_TWD
+void tegra_twd_suspend(struct tegra_twd_context *context)
+{
+ context->twd_ctrl = readl(twd_base + TWD_TIMER_CONTROL);
+ context->twd_load = readl(twd_base + TWD_TIMER_LOAD);
+ if ((context->twd_load == 0) &&
+ (context->twd_ctrl & TWD_TIMER_CONTROL_PERIODIC) &&
+ (context->twd_ctrl & (TWD_TIMER_CONTROL_ENABLE |
+ TWD_TIMER_CONTROL_IT_ENABLE))) {
+ WARN("%s: TWD enabled but counter was 0\n", __func__);
+ context->twd_load = 1;
+ }
+ __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+}
+
+void tegra_twd_resume(struct tegra_twd_context *context)
+{
+ BUG_ON((context->twd_load == 0) &&
+ (context->twd_ctrl & TWD_TIMER_CONTROL_PERIODIC) &&
+ (context->twd_ctrl & (TWD_TIMER_CONTROL_ENABLE |
+ TWD_TIMER_CONTROL_IT_ENABLE)));
+ writel(context->twd_load, twd_base + TWD_TIMER_LOAD);
+ writel(context->twd_ctrl, twd_base + TWD_TIMER_CONTROL);
+}
+#endif
+
static void __init tegra_init_timer(void)
{
struct clk *clk;
- unsigned long rate = clk_measure_input_freq();
int ret;
clk = clk_get_sys("timer", NULL);
@@ -201,22 +250,11 @@ static void __init tegra_init_timer(void)
twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600);
#endif
- switch (rate) {
- case 12000000:
- timer_writel(0x000b, TIMERUS_USEC_CFG);
- break;
- case 13000000:
- timer_writel(0x000c, TIMERUS_USEC_CFG);
- break;
- case 19200000:
- timer_writel(0x045f, TIMERUS_USEC_CFG);
- break;
- case 26000000:
- timer_writel(0x0019, TIMERUS_USEC_CFG);
- break;
- default:
- WARN(1, "Unknown clock rate");
- }
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra2_init_timer(&system_timer, &tegra_timer_irq.irq);
+#else
+ tegra3_init_timer(&system_timer, &tegra_timer_irq.irq);
+#endif
init_fixed_sched_clock(&cd, tegra_update_sched_clock, 32,
1000000, SC_MULT, SC_SHIFT);
@@ -241,22 +279,10 @@ static void __init tegra_init_timer(void)
tegra_clockevent.cpumask = cpu_all_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_register_device(&tegra_clockevent);
+
+ register_syscore_ops(&tegra_timer_syscore_ops);
}
struct sys_timer tegra_timer = {
.init = tegra_init_timer,
};
-
-#ifdef CONFIG_PM
-static u32 usec_config;
-
-void tegra_timer_suspend(void)
-{
- usec_config = timer_readl(TIMERUS_USEC_CFG);
-}
-
-void tegra_timer_resume(void)
-{
- timer_writel(usec_config, TIMERUS_USEC_CFG);
-}
-#endif
diff --git a/arch/arm/mach-tegra/timer.h b/arch/arm/mach-tegra/timer.h
new file mode 100644
index 000000000000..04d858fb77ea
--- /dev/null
+++ b/arch/arm/mach-tegra/timer.h
@@ -0,0 +1,51 @@
+/*
+ * arch/arm/mach-tegra/timer.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_TIMER_H_
+#define _MACH_TEGRA_TIMER_H_
+
+#define RTC_SECONDS 0x08
+#define RTC_SHADOW_SECONDS 0x0c
+#define RTC_MILLISECONDS 0x10
+
+#define TIMER_PTV 0x0
+#define TIMER_PCR 0x4
+
+#define TIMERUS_CNTR_1US 0x10
+#define TIMERUS_USEC_CFG 0x14
+#define TIMERUS_CNTR_FREEZE 0x4c
+
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void __init tegra2_init_timer(u32 *offset, int *irq);
+#else
+void __init tegra3_init_timer(u32 *offset, int *irq);
+#endif
+
+struct tegra_twd_context {
+ u32 twd_ctrl;
+ u32 twd_load;
+};
+
+#ifdef CONFIG_HAVE_ARM_TWD
+void tegra_twd_suspend(struct tegra_twd_context *context);
+void tegra_twd_resume(struct tegra_twd_context *context);
+#else
+static inline void tegra_twd_suspend(struct tegra_twd_context *context) {}
+static inline void tegra_twd_resume(struct tegra_twd_context *context) {}
+#endif
+
+#endif /* _MACH_TEGRA_TIMER_H_ */
diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c
index 88081bb3ec52..bc183959353b 100644
--- a/arch/arm/mach-tegra/usb_phy.c
+++ b/arch/arm/mach-tegra/usb_phy.c
@@ -2,6 +2,7 @@
* arch/arm/mach-tegra/usb_phy.c
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010 - 2011 NVIDIA Corporation
*
* Author:
* Erik Gilling <konkers@google.com>
@@ -20,6 +21,7 @@
#include <linux/resource.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -30,8 +32,26 @@
#include <asm/mach-types.h>
#include <mach/usb_phy.h>
#include <mach/iomap.h>
+#include <mach/pinmux.h>
+#include "fuse.h"
+
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define USB_USBCMD 0x140
+#define USB_USBCMD_RS (1 << 0)
+
+#define USB_USBSTS 0x144
+#define USB_USBSTS_PCI (1 << 2)
+#define USB_USBSTS_HCH (1 << 12)
+
+#define USB_TXFILLTUNING 0x164
+#define USB_FIFO_TXFILL_THRES(x) (((x) & 0x1f) << 16)
+#define USB_FIFO_TXFILL_MASK 0x1f0000
#define ULPI_VIEWPORT 0x170
+#define ULPI_WAKEUP (1 << 31)
+#define ULPI_RUN (1 << 30)
+#define ULPI_RD_WR (1 << 29)
#define USB_PORTSC1 0x184
#define USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
@@ -42,6 +62,7 @@
#define USB_PORTSC1_WKCN (1 << 20)
#define USB_PORTSC1_PTC(x) (((x) & 0xf) << 16)
#define USB_PORTSC1_PP (1 << 12)
+#define USB_PORTSC1_LS(x) (((x) & 0x3) << 10)
#define USB_PORTSC1_SUSP (1 << 7)
#define USB_PORTSC1_PE (1 << 2)
#define USB_PORTSC1_CCS (1 << 0)
@@ -50,14 +71,22 @@
#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
#define USB_SUSP_CLR (1 << 5)
+#define USB_CLKEN (1 << 6)
#define USB_PHY_CLK_VALID (1 << 7)
-#define UTMIP_RESET (1 << 11)
-#define UHSIC_RESET (1 << 11)
-#define UTMIP_PHY_ENABLE (1 << 12)
+#define USB_PHY_CLK_VALID_INT_ENB (1 << 9)
+#define UTMIP_RESET (1 << 11)
+#define UHSIC_RESET (1 << 11)
+#define UTMIP_PHY_ENABLE (1 << 12)
+#define UHSIC_PHY_ENABLE (1 << 12)
#define ULPI_PHY_ENABLE (1 << 13)
#define USB_SUSP_SET (1 << 14)
#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
+#define USB_PHY_VBUS_WAKEUP_ID 0x408
+#define VDAT_DET_INT_EN (1 << 16)
+#define VDAT_DET_CHG_DET (1 << 17)
+#define VDAT_DET_STS (1 << 18)
+
#define USB1_LEGACY_CTRL 0x410
#define USB1_NO_LEGACY_MODE (1 << 0)
#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
@@ -67,17 +96,6 @@
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1)
#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1)
-#define ULPI_TIMING_CTRL_0 0x424
-#define ULPI_OUTPUT_PINMUX_BYP (1 << 10)
-#define ULPI_CLKOUT_PINMUX_BYP (1 << 11)
-
-#define ULPI_TIMING_CTRL_1 0x428
-#define ULPI_DATA_TRIMMER_LOAD (1 << 0)
-#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
-#define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16)
-#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
-#define ULPI_DIR_TRIMMER_LOAD (1 << 24)
-#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
#define UTMIP_PLL_CFG1 0x804
#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
@@ -90,8 +108,14 @@
#define UTMIP_FORCE_PD_POWERDOWN (1 << 14)
#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16)
#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18)
+#define UTMIP_XCVR_LSBIAS_SEL (1 << 21)
+#define UTMIP_XCVR_SETUP_MSB(x) (((x) & 0x7) << 22)
#define UTMIP_XCVR_HSSLEW_MSB(x) (((x) & 0x7f) << 25)
+#define UTMIP_XCVR_MAX_OFFSET 2
+#define UTMIP_XCVR_SETUP_MAX_VALUE 0x7f
+#define XCVR_SETUP_MSB_CALIB(x) ((x) >> 4)
+
#define UTMIP_BIAS_CFG0 0x80c
#define UTMIP_OTGPD (1 << 11)
#define UTMIP_BIASPD (1 << 10)
@@ -107,15 +131,6 @@
#define UTMIP_FS_PREABMLE_J (1 << 19)
#define UTMIP_HS_DISCON_DISABLE (1 << 8)
-#define UTMIP_MISC_CFG0 0x824
-#define UTMIP_DPDM_OBSERVE (1 << 26)
-#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
-#define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22)
-
#define UTMIP_MISC_CFG1 0x828
#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
@@ -125,9 +140,164 @@
#define UTMIP_BAT_CHRG_CFG0 0x830
#define UTMIP_PD_CHRG (1 << 0)
+#define UTMIP_ON_SINK_EN (1 << 2)
+#define UTMIP_OP_SRC_EN (1 << 3)
-#define UTMIP_SPARE_CFG0 0x834
-#define FUSE_SETUP_SEL (1 << 3)
+#define UTMIP_XCVR_CFG1 0x838
+#define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0)
+#define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2)
+#define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4)
+#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
+
+#define UTMIP_BIAS_CFG1 0x83c
+#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
+
+#define UHSIC_PLL_CFG1 0x804
+#define UHSIC_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UHSIC_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 14)
+
+#define UHSIC_HSRX_CFG0 0x808
+#define UHSIC_ELASTIC_UNDERRUN_LIMIT(x) (((x) & 0x1f) << 2)
+#define UHSIC_ELASTIC_OVERRUN_LIMIT(x) (((x) & 0x1f) << 8)
+#define UHSIC_IDLE_WAIT(x) (((x) & 0x1f) << 13)
+
+#define UHSIC_HSRX_CFG1 0x80c
+#define UHSIC_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
+
+#define UHSIC_MISC_CFG0 0x814
+#define UHSIC_SUSPEND_EXIT_ON_EDGE (1 << 7)
+#define UHSIC_DETECT_SHORT_CONNECT (1 << 8)
+#define UHSIC_FORCE_XCVR_MODE (1 << 15)
+
+#define UHSIC_MISC_CFG1 0X818
+#define UHSIC_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 2)
+
+#define UHSIC_PADS_CFG0 0x81c
+#define UHSIC_TX_RTUNEN 0xf000
+#define UHSIC_TX_RTUNE(x) (((x) & 0xf) << 12)
+
+#define UHSIC_PADS_CFG1 0x820
+#define UHSIC_PD_BG (1 << 2)
+#define UHSIC_PD_TX (1 << 3)
+#define UHSIC_PD_TRK (1 << 4)
+#define UHSIC_PD_RX (1 << 5)
+#define UHSIC_PD_ZI (1 << 6)
+#define UHSIC_RX_SEL (1 << 7)
+#define UHSIC_RPD_DATA (1 << 9)
+#define UHSIC_RPD_STROBE (1 << 10)
+#define UHSIC_RPU_DATA (1 << 11)
+#define UHSIC_RPU_STROBE (1 << 12)
+
+#define UHSIC_STAT_CFG0 0x828
+#define UHSIC_CONNECT_DETECT (1 << 0)
+
+
+#else
+
+#define USB_USBCMD 0x130
+#define USB_USBCMD_RS (1 << 0)
+
+#define USB_USBSTS 0x134
+#define USB_USBSTS_PCI (1 << 2)
+#define USB_USBSTS_SRI (1 << 7)
+#define USB_USBSTS_HCH (1 << 12)
+
+#define ULPI_VIEWPORT 0x160
+
+#define USB_PORTSC1 0x174
+#define USB_PORTSC1_WKOC (1 << 22)
+#define USB_PORTSC1_WKDS (1 << 21)
+#define USB_PORTSC1_WKCN (1 << 20)
+#define USB_PORTSC1_PTC(x) (((x) & 0xf) << 16)
+#define USB_PORTSC1_PP (1 << 12)
+#define USB_PORTSC1_SUSP (1 << 7)
+#define USB_PORTSC1_RESUME (1 << 6)
+#define USB_PORTSC1_PE (1 << 2)
+#define USB_PORTSC1_CCS (1 << 0)
+
+#define USB_SUSP_CTRL 0x400
+#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
+#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
+#define USB_SUSP_CLR (1 << 5)
+#define USB_PHY_CLK_VALID (1 << 7)
+#define USB_PHY_CLK_VALID_INT_ENB (1 << 9)
+
+
+#define UTMIP_RESET (1 << 11)
+#define UTMIP_PHY_ENABLE (1 << 12)
+#define ULPI_PHY_ENABLE (1 << 13)
+#define UHSIC_RESET (1 << 14)
+
+#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
+#define UHSIC_PHY_ENABLE (1 << 19)
+#define ULPIS2S_SLV0_RESET (1 << 20)
+#define ULPIS2S_SLV1_RESET (1 << 21)
+#define ULPIS2S_LINE_RESET (1 << 22)
+#define ULPI_PADS_RESET (1 << 23)
+#define ULPI_PADS_CLKEN_RESET (1 << 24)
+
+#define USB_PHY_VBUS_WAKEUP_ID 0x408
+#define VDAT_DET_INT_EN (1 << 16)
+#define VDAT_DET_CHG_DET (1 << 17)
+#define VDAT_DET_STS (1 << 18)
+
+#define USB1_LEGACY_CTRL 0x410
+#define USB1_NO_LEGACY_MODE (1 << 0)
+#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
+#define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1)
+#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \
+ (1 << 1)
+#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1)
+#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1)
+
+#define UTMIP_PLL_CFG1 0x804
+#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
+
+#define UTMIP_XCVR_CFG0 0x808
+#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0)
+#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8)
+#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10)
+#define UTMIP_FORCE_PD_POWERDOWN (1 << 14)
+#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16)
+#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18)
+#define UTMIP_XCVR_LSBIAS_SEL (1 << 21)
+#define UTMIP_XCVR_SETUP_MSB(x) (((x) & 0x7) << 22)
+#define UTMIP_XCVR_HSSLEW_MSB(x) (((x) & 0x7f) << 25)
+
+#define UTMIP_XCVR_MAX_OFFSET 5
+#define UTMIP_XCVR_SETUP_MAX_VALUE 0x7f
+#define XCVR_SETUP_MSB_CALIB(x) ((x) >> 4)
+
+#define UTMIP_BIAS_CFG0 0x80c
+#define UTMIP_OTGPD (1 << 11)
+#define UTMIP_BIASPD (1 << 10)
+#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
+#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
+#define UTMIP_HSDISCON_LEVEL_MSB (1 << 24)
+
+#define UTMIP_HSRX_CFG0 0x810
+#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
+#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
+
+#define UTMIP_HSRX_CFG1 0x814
+#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
+
+#define UTMIP_TX_CFG0 0x820
+#define UTMIP_FS_PREABMLE_J (1 << 19)
+#define UTMIP_HS_DISCON_DISABLE (1 << 8)
+
+#define UTMIP_MISC_CFG1 0x828
+#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
+#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
+
+#define UTMIP_DEBOUNCE_CFG0 0x82c
+#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
+
+#define UTMIP_BAT_CHRG_CFG0 0x830
+#define UTMIP_PD_CHRG (1 << 0)
+#define UTMIP_ON_SINK_EN (1 << 2)
+#define UTMIP_OP_SRC_EN (1 << 3)
#define UTMIP_XCVR_CFG1 0x838
#define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0)
@@ -137,6 +307,270 @@
#define UTMIP_BIAS_CFG1 0x83c
#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
+#define UTMIP_BIAS_PDTRK_POWERDOWN (1 << 0)
+#define UTMIP_BIAS_PDTRK_POWERUP (1 << 1)
+
+#define HOSTPC1_DEVLC 0x1b4
+#define HOSTPC1_DEVLC_PHCD (1 << 22)
+#define HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
+#define HOSTPC1_DEVLC_PTS_MASK 7
+#define HOSTPC1_DEVLC_PTS_HSIC 4
+#define HOSTPC1_DEVLC_STS (1 << 28)
+#define HOSTPC1_DEVLC_PSPD(x) (((x) & 0x3) << 25)
+#define HOSTPC1_DEVLC_PSPD_MASK 3
+#define HOSTPC1_DEVLC_PSPD_HIGH_SPEED 2
+
+#define TEGRA_USB_USBMODE_REG_OFFSET 0x1f8
+#define TEGRA_USB_USBMODE_HOST (3 << 0)
+
+#define TEGRA_PMC_USB_AO 0xf0
+#define TEGRA_PMC_USB_AO_VBUS_WAKEUP_PD_P0 (1 << 2)
+#define TEGRA_PMC_USB_AO_ID_PD_P0 (1 << 3)
+#define TEGRA_PMC_USB_AO_PD_P2 (0xf << 8)
+
+#define ICUSB_CTRL 0x15c
+
+#define UHSIC_PLL_CFG1 0xc04
+#define UHSIC_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UHSIC_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 14)
+
+#define UHSIC_HSRX_CFG0 0xc08
+#define UHSIC_ELASTIC_UNDERRUN_LIMIT(x) (((x) & 0x1f) << 2)
+#define UHSIC_ELASTIC_OVERRUN_LIMIT(x) (((x) & 0x1f) << 8)
+#define UHSIC_IDLE_WAIT(x) (((x) & 0x1f) << 13)
+
+#define UHSIC_HSRX_CFG1 0xc0c
+#define UHSIC_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
+
+#define UHSIC_MISC_CFG0 0xc14
+#define UHSIC_SUSPEND_EXIT_ON_EDGE (1 << 7)
+#define UHSIC_DETECT_SHORT_CONNECT (1 << 8)
+#define UHSIC_FORCE_XCVR_MODE (1 << 15)
+
+#define UHSIC_MISC_CFG1 0xc18
+#define UHSIC_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 2)
+
+#define UHSIC_PADS_CFG0 0xc1c
+#define UHSIC_TX_RTUNEN 0xf000
+#define UHSIC_TX_RTUNE(x) (((x) & 0xf) << 12)
+
+#define UHSIC_PADS_CFG1 0xc20
+#define UHSIC_PD_BG (1 << 2)
+#define UHSIC_PD_TX (1 << 3)
+#define UHSIC_PD_TRK (1 << 4)
+#define UHSIC_PD_RX (1 << 5)
+#define UHSIC_PD_ZI (1 << 6)
+#define UHSIC_RX_SEL (1 << 7)
+#define UHSIC_RPD_DATA (1 << 9)
+#define UHSIC_RPD_STROBE (1 << 10)
+#define UHSIC_RPU_DATA (1 << 11)
+#define UHSIC_RPU_STROBE (1 << 12)
+
+#define UHSIC_STAT_CFG0 0xc28
+#define UHSIC_CONNECT_DETECT (1 << 0)
+
+#define PMC_UTMIP_MASTER_CONFIG 0x310
+#define UTMIP_PWR(inst) (1 << (inst))
+
+#define PMC_USB_DEBOUNCE 0xec
+#define UTMIP_LINE_DEB_CNT(x) (((x) & 0xf) << 16)
+
+#define PMC_UTMIP_UHSIC_FAKE 0x218
+#define USBON_VAL(inst) (1 << ((4*(inst))+1))
+#define USBON_VAL_P2 (1 << 9)
+#define USBON_VAL_P1 (1 << 5)
+#define USBON_VAL_P0 (1 << 1)
+#define USBOP_VAL(inst) (1 << (4*(inst)))
+#define USBOP_VAL_P2 (1 << 8)
+#define USBOP_VAL_P1 (1 << 4)
+#define USBOP_VAL_P0 (1 << 0)
+
+#define PMC_SLEEPWALK_CFG 0x200
+#define UTMIP_LINEVAL_WALK_EN(inst) (1 << ((8*(inst))+7))
+#define UTMIP_LINEVAL_WALK_EN_P2 (1 << 23)
+#define UTMIP_LINEVAL_WALK_EN_P1 (1 << 15)
+#define UTMIP_LINEVAL_WALK_EN_P0 (1 << 7)
+#define UTMIP_WAKE_VAL(inst, x) (((x) & 0xf) << ((8*(inst))+4))
+#define UTMIP_WAKE_VAL_P2(x) (((x) & 0xf) << 20)
+#define UTMIP_WAKE_VAL_P1(x) (((x) & 0xf) << 12)
+#define UTMIP_WAKE_VAL_P0(x) (((x) & 0xf) << 4)
+#define WAKE_VAL_NONE 0xc
+#define WAKE_VAL_FSJ 0x2
+#define WAKE_VAL_FSK 0x1
+#define WAKE_VAL_SE0 0x0
+#define WAKE_VAL_ANY 0xf
+
+#define PMC_SLEEP_CFG 0x1fc
+#define UTMIP_TCTRL_USE_PMC(inst) (1 << ((8*(inst))+3))
+#define UTMIP_TCTRL_USE_PMC_P2 (1 << 19)
+#define UTMIP_TCTRL_USE_PMC_P1 (1 << 11)
+#define UTMIP_TCTRL_USE_PMC_P0 (1 << 3)
+#define UTMIP_RCTRL_USE_PMC(inst) (1 << ((8*(inst))+2))
+#define UTMIP_RCTRL_USE_PMC_P2 (1 << 18)
+#define UTMIP_RCTRL_USE_PMC_P1 (1 << 10)
+#define UTMIP_RCTRL_USE_PMC_P0 (1 << 2)
+#define UTMIP_FSLS_USE_PMC(inst) (1 << ((8*(inst))+1))
+#define UTMIP_FSLS_USE_PMC_P2 (1 << 17)
+#define UTMIP_FSLS_USE_PMC_P1 (1 << 9)
+#define UTMIP_FSLS_USE_PMC_P0 (1 << 1)
+#define UTMIP_MASTER_ENABLE(inst) (1 << (8*(inst)))
+#define UTMIP_MASTER_ENABLE_P2 (1 << 16)
+#define UTMIP_MASTER_ENABLE_P1 (1 << 8)
+#define UTMIP_MASTER_ENABLE_P0 (1 << 0)
+
+#define PMC_USB_AO 0xf0
+#define USBON_VAL_PD(inst) (1 << ((4*(inst))+1))
+#define USBON_VAL_PD_P2 (1 << 9)
+#define USBON_VAL_PD_P1 (1 << 5)
+#define USBON_VAL_PD_P0 (1 << 1)
+#define USBOP_VAL_PD(inst) (1 << (4*(inst)))
+#define USBOP_VAL_PD_P2 (1 << 8)
+#define USBOP_VAL_PD_P1 (1 << 4)
+#define USBOP_VAL_PD_P0 (1 << 0)
+
+#define PMC_TRIGGERS 0x1ec
+#define UTMIP_CLR_WALK_PTR(inst) (1 << (inst))
+#define UTMIP_CLR_WALK_PTR_P2 (1 << 2)
+#define UTMIP_CLR_WALK_PTR_P1 (1 << 1)
+#define UTMIP_CLR_WALK_PTR_P0 (1 << 0)
+#define UTMIP_CAP_CFG(inst) (1 << ((inst)+4))
+#define UTMIP_CAP_CFG_P2 (1 << 6)
+#define UTMIP_CAP_CFG_P1 (1 << 5)
+#define UTMIP_CAP_CFG_P0 (1 << 4)
+#define UTMIP_CLR_WAKE_ALARM(inst) (1 << ((inst)+12))
+#define UTMIP_CLR_WAKE_ALARM_P2 (1 << 14)
+
+#define PMC_PAD_CFG (0x1f4)
+
+#define PMC_UTMIP_BIAS_MASTER_CNTRL 0x30c
+#define BIAS_MASTER_PROG_VAL (1 << 1)
+
+#define PMC_SLEEPWALK_REG(inst) (0x204 + (4*(inst)))
+#define PMC_SLEEPWALK_P0 0x204
+#define PMC_SLEEPWALK_P1 0x208
+#define PMC_SLEEPWALK_P2 0x20c
+#define UTMIP_USBOP_RPD_A (1 << 0)
+#define UTMIP_USBON_RPD_A (1 << 1)
+#define UTMIP_AP_A (1 << 4)
+#define UTMIP_AN_A (1 << 5)
+#define UTMIP_HIGHZ_A (1 << 6)
+#define UTMIP_USBOP_RPD_B (1 << 8)
+#define UTMIP_USBON_RPD_B (1 << 9)
+#define UTMIP_AP_B (1 << 12)
+#define UTMIP_AN_B (1 << 13)
+#define UTMIP_HIGHZ_B (1 << 14)
+#define UTMIP_USBOP_RPD_C (1 << 16)
+#define UTMIP_USBON_RPD_C (1 << 17)
+#define UTMIP_AP_C (1 << 20)
+#define UTMIP_AN_C (1 << 21)
+#define UTMIP_HIGHZ_C (1 << 22)
+#define UTMIP_USBOP_RPD_D (1 << 24)
+#define UTMIP_USBON_RPD_D (1 << 25)
+#define UTMIP_AP_D (1 << 28)
+#define UTMIP_AN_D (1 << 29)
+#define UTMIP_HIGHZ_D (1 << 30)
+
+#define UTMIP_PMC_WAKEUP0 0x84c
+#define EVENT_INT_ENB (1 << 0)
+
+#define UTMIP_UHSIC_STATUS 0x214
+#define UTMIP_WALK_PTR_VAL(inst) (0x3 << ((inst)*2))
+#define UTMIP_USBOP_VAL(inst) (1 << ((2*(inst)) + 8))
+#define UTMIP_USBOP_VAL_P2 (1 << 12)
+#define UTMIP_USBOP_VAL_P1 (1 << 10)
+#define UTMIP_USBOP_VAL_P0 (1 << 8)
+#define UTMIP_USBON_VAL(inst) (1 << ((2*(inst)) + 9))
+#define UTMIP_USBON_VAL_P2 (1 << 13)
+#define UTMIP_USBON_VAL_P1 (1 << 11)
+#define UTMIP_USBON_VAL_P0 (1 << 9)
+#define UTMIP_WAKE_ALARM(inst) (1 << ((inst) + 16))
+#define UTMIP_WAKE_ALARM_P2 (1 << 18)
+#define UTMIP_WAKE_ALARM_P1 (1 << 17)
+#define UTMIP_WAKE_ALARM_P0 (1 << 16)
+#define UTMIP_WALK_PTR(inst) (1 << ((inst)*2))
+#define UTMIP_WALK_PTR_P2 (1 << 4)
+#define UTMIP_WALK_PTR_P1 (1 << 2)
+#define UTMIP_WALK_PTR_P0 (1 << 0)
+
+#define UTMIP_BIAS_STS0 0x840
+#define UTMIP_RCTRL_VAL(x) (((x) & 0xffff) << 0)
+#define UTMIP_TCTRL_VAL(x) (((x) & (0xffff << 16)) >> 16)
+
+#define PMC_UTMIP_TERM_PAD_CFG 0x1f8
+#define PMC_TCTRL_VAL(x) (((x) & 0x1f) << 5)
+#define PMC_RCTRL_VAL(x) (((x) & 0x1f) << 0)
+
+static u32 utmip_rctrl_val, utmip_tctrl_val;
+
+#endif
+
+/* Common registers */
+#define UTMIP_MISC_CFG0 0x824
+#define UTMIP_DPDM_OBSERVE (1 << 26)
+#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
+#define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22)
+#define FORCE_PULLDN_DM (1 << 8)
+#define FORCE_PULLDN_DP (1 << 9)
+#define COMB_TERMS (1 << 0)
+#define ALWAYS_FREE_RUNNING_TERMS (1 << 1)
+
+#define ULPIS2S_CTRL 0x418
+#define ULPIS2S_ENA (1 << 0)
+#define ULPIS2S_SUPPORT_DISCONNECT (1 << 2)
+#define ULPIS2S_PLLU_MASTER_BLASTER60 (1 << 3)
+#define ULPIS2S_SPARE(x) (((x) & 0xF) << 8)
+#define ULPIS2S_FORCE_ULPI_CLK_OUT (1 << 12)
+#define ULPIS2S_DISCON_DONT_CHECK_SE0 (1 << 13)
+#define ULPIS2S_SUPPORT_HS_KEEP_ALIVE (1 << 14)
+#define ULPIS2S_DISABLE_STP_PU (1 << 15)
+#define ULPIS2S_SLV0_CLAMP_XMIT (1 << 16)
+
+
+#define ULPI_TIMING_CTRL_0 0x424
+#define ULPI_CLOCK_OUT_DELAY(x) ((x) & 0x1F)
+#define ULPI_OUTPUT_PINMUX_BYP (1 << 10)
+#define ULPI_CLKOUT_PINMUX_BYP (1 << 11)
+#define ULPI_SHADOW_CLK_LOOPBACK_EN (1 << 12)
+#define ULPI_SHADOW_CLK_SEL (1 << 13)
+#define ULPI_CORE_CLK_SEL (1 << 14)
+#define ULPI_SHADOW_CLK_DELAY(x) (((x) & 0x1F) << 16)
+#define ULPI_LBK_PAD_EN (1 << 26)
+#define ULPI_LBK_PAD_E_INPUT_OR (1 << 27)
+#define ULPI_CLK_OUT_ENA (1 << 28)
+#define ULPI_CLK_PADOUT_ENA (1 << 29)
+
+#define ULPI_TIMING_CTRL_1 0x428
+#define ULPI_DATA_TRIMMER_LOAD (1 << 0)
+#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
+#define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16)
+#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
+#define ULPI_DIR_TRIMMER_LOAD (1 << 24)
+#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
+
+#define UTMIP_SPARE_CFG0 0x834
+#define FUSE_SETUP_SEL (1 << 3)
+#define FUSE_ATERM_SEL (1 << 4)
+
+#define FUSE_USB_CALIB_0 0x1F0
+#define FUSE_USB_CALIB_XCVR_SETUP(x) (((x) & 0x7F) << 0)
+
+#define UHSIC_PLL_CFG0 0x800
+
+#define UHSIC_TX_CFG0 0x810
+#define UHSIC_HS_POSTAMBLE_OUTPUT_ENABLE (1 << 6)
+
+#define UHSIC_CMD_CFG0 0x824
+#define UHSIC_PRETEND_CONNECT_DETECT (1 << 5)
+
+#define UHSIC_SPARE_CFG0 0x82c
+
+/* These values (in milli second) are taken from the battery charging spec */
+#define TDP_SRC_ON_MS 100
+#define TDPSRC_CON_MS 40
static DEFINE_SPINLOCK(utmip_pad_lock);
static int utmip_pad_count;
@@ -146,8 +580,9 @@ struct tegra_xtal_freq {
u8 enable_delay;
u8 stable_count;
u8 active_delay;
- u8 xtal_freq_count;
+ u16 xtal_freq_count;
u16 debounce;
+ u8 pdtrk_count;
};
static const struct tegra_xtal_freq tegra_freq_table[] = {
@@ -158,6 +593,7 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
.active_delay = 0x04,
.xtal_freq_count = 0x76,
.debounce = 0x7530,
+ .pdtrk_count = 5,
},
{
.freq = 13000000,
@@ -166,6 +602,7 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
.active_delay = 0x05,
.xtal_freq_count = 0x7F,
.debounce = 0x7EF4,
+ .pdtrk_count = 5,
},
{
.freq = 19200000,
@@ -174,6 +611,7 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
.active_delay = 0x06,
.xtal_freq_count = 0xBB,
.debounce = 0xBB80,
+ .pdtrk_count = 7,
},
{
.freq = 26000000,
@@ -182,6 +620,38 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
.active_delay = 0x09,
.xtal_freq_count = 0xFE,
.debounce = 0xFDE8,
+ .pdtrk_count = 9,
+ },
+};
+
+static const struct tegra_xtal_freq tegra_uhsic_freq_table[] = {
+ {
+ .freq = 12000000,
+ .enable_delay = 0x02,
+ .stable_count = 0x2F,
+ .active_delay = 0x0,
+ .xtal_freq_count = 0x1CA,
+ },
+ {
+ .freq = 13000000,
+ .enable_delay = 0x02,
+ .stable_count = 0x33,
+ .active_delay = 0x0,
+ .xtal_freq_count = 0x1F0,
+ },
+ {
+ .freq = 19200000,
+ .enable_delay = 0x03,
+ .stable_count = 0x4B,
+ .active_delay = 0x0,
+ .xtal_freq_count = 0x2DD,
+ },
+ {
+ .freq = 26000000,
+ .enable_delay = 0x04,
+ .stable_count = 0x66,
+ .active_delay = 0x0,
+ .xtal_freq_count = 0x3E0,
},
};
@@ -192,24 +662,29 @@ static struct tegra_utmip_config utmip_default[] = {
.elastic_limit = 16,
.term_range_adj = 6,
.xcvr_setup = 9,
- .xcvr_lsfslew = 1,
- .xcvr_lsrslew = 1,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
},
[2] = {
.hssync_start_delay = 9,
.idle_wait_delay = 17,
.elastic_limit = 16,
.term_range_adj = 6,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
.xcvr_setup = 9,
.xcvr_lsfslew = 2,
.xcvr_lsrslew = 2,
},
};
-static inline bool phy_is_ulpi(struct tegra_usb_phy *phy)
-{
- return (phy->instance == 1);
-}
+struct usb_phy_plat_data usb_phy_data[] = {
+ { 0, 0, -1, NULL},
+ { 0, 0, -1, NULL},
+ { 0, 0, -1, NULL},
+};
static int utmip_pad_open(struct tegra_usb_phy *phy)
{
@@ -239,7 +714,7 @@ static void utmip_pad_close(struct tegra_usb_phy *phy)
clk_put(phy->pad_clk);
}
-static void utmip_pad_power_on(struct tegra_usb_phy *phy)
+static int utmip_pad_power_on(struct tegra_usb_phy *phy)
{
unsigned long val, flags;
void __iomem *base = phy->pad_regs;
@@ -248,18 +723,23 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
spin_lock_irqsave(&utmip_pad_lock, flags);
- if (utmip_pad_count++ == 0) {
- val = readl(base + UTMIP_BIAS_CFG0);
- val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
- writel(val, base + UTMIP_BIAS_CFG0);
- }
+ utmip_pad_count++;
+ val = readl(base + UTMIP_BIAS_CFG0);
+ val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ val |= UTMIP_HSSQUELCH_LEVEL(0x2) | UTMIP_HSDISCON_LEVEL(0x1) |
+ UTMIP_HSDISCON_LEVEL_MSB;
+#endif
+ writel(val, base + UTMIP_BIAS_CFG0);
spin_unlock_irqrestore(&utmip_pad_lock, flags);
clk_disable(phy->pad_clk);
+
+ return 0;
}
-static int utmip_pad_power_off(struct tegra_usb_phy *phy)
+static int utmip_pad_power_off(struct tegra_usb_phy *phy, bool is_dpd)
{
unsigned long val, flags;
void __iomem *base = phy->pad_regs;
@@ -273,9 +753,13 @@ static int utmip_pad_power_off(struct tegra_usb_phy *phy)
spin_lock_irqsave(&utmip_pad_lock, flags);
- if (--utmip_pad_count == 0) {
+ if (--utmip_pad_count == 0 && is_dpd) {
val = readl(base + UTMIP_BIAS_CFG0);
val |= UTMIP_OTGPD | UTMIP_BIASPD;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ val &= ~(UTMIP_HSSQUELCH_LEVEL(~0) | UTMIP_HSDISCON_LEVEL(~0) |
+ UTMIP_HSDISCON_LEVEL_MSB);
+#endif
writel(val, base + UTMIP_BIAS_CFG0);
}
@@ -288,7 +772,7 @@ static int utmip_pad_power_off(struct tegra_usb_phy *phy)
static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
{
- unsigned long timeout = 2000;
+ unsigned long timeout = 2500;
do {
if ((readl(reg) & mask) == result)
return 0;
@@ -302,7 +786,7 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
-
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (phy->instance == 0) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
@@ -320,6 +804,11 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
val |= USB_PORTSC1_PHCD;
writel(val, base + USB_PORTSC1);
}
+#else
+ val = readl(base + HOSTPC1_DEVLC);
+ val |= HOSTPC1_DEVLC_PHCD;
+ writel(val, base + HOSTPC1_DEVLC);
+#endif
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
@@ -342,35 +831,175 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
writel(val, base + USB_SUSP_CTRL);
}
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (phy->instance == 2) {
val = readl(base + USB_PORTSC1);
val &= ~USB_PORTSC1_PHCD;
writel(val, base + USB_PORTSC1);
}
+#endif
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
- USB_PHY_CLK_VALID))
+ USB_PHY_CLK_VALID) < 0)
pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
}
-static int utmi_phy_power_on(struct tegra_usb_phy *phy)
+static void vbus_enable(struct tegra_usb_phy *phy)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ int gpio_status;
+ int gpio = usb_phy_data[phy->instance].vbus_gpio;
+
+ if (gpio == -1)
+ return;
+
+ gpio_status = gpio_request(gpio,"VBUS_USB");
+ if (gpio_status < 0) {
+ printk("VBUS_USB request GPIO FAILED\n");
+ WARN_ON(1);
+ return;
+ }
+ if (gpio < TEGRA_NR_GPIOS) tegra_gpio_enable(gpio);
+ gpio_status = gpio_direction_output(gpio, 1);
+ if (gpio_status < 0) {
+ printk("VBUS_USB request GPIO DIRECTION FAILED \n");
+ WARN_ON(1);
+ return;
+ }
+ gpio_set_value_cansleep(gpio, 1);
+#else
+ if (phy->reg_vbus)
+ regulator_enable(phy->reg_vbus);
+#endif
+}
+
+static void vbus_disable(struct tegra_usb_phy *phy)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ int gpio = usb_phy_data[phy->instance].vbus_gpio;
+
+ if (gpio == -1)
+ return;
+
+ gpio_set_value_cansleep(gpio, 0);
+ gpio_free(gpio);
+#else
+ if (phy->reg_vbus)
+ regulator_disable(phy->reg_vbus);
+#endif
+}
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static void utmip_phy_enable_trking_data(struct tegra_usb_phy *phy)
+{
+ void __iomem *base = phy->pad_regs;
+ void __iomem *pmc_base = IO_ADDRESS(TEGRA_USB_BASE);
+ static bool init_done = false;
+ u32 val;
+
+ /* Should be done only once after system boot */
+ if (init_done)
+ return;
+
+ clk_enable(phy->pad_clk);
+ /* Bias pad MASTER_ENABLE=1 */
+ val = readl(pmc_base + PMC_UTMIP_BIAS_MASTER_CNTRL);
+ val |= BIAS_MASTER_PROG_VAL;
+ writel(val, pmc_base + PMC_UTMIP_BIAS_MASTER_CNTRL);
+
+ /* Setting the tracking length time */
+ val = readl(base + UTMIP_BIAS_CFG1);
+ val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
+ val |= UTMIP_BIAS_PDTRK_COUNT(5);
+ writel(val, base + UTMIP_BIAS_CFG1);
+
+ /* Bias PDTRK is Shared and MUST be done from USB1 ONLY, PD_TRK=0 */
+ val = readl(base + UTMIP_BIAS_CFG1);
+ val &= ~ UTMIP_BIAS_PDTRK_POWERDOWN;
+ writel(val, base + UTMIP_BIAS_CFG1);
+
+ val = readl(base + UTMIP_BIAS_CFG1);
+ val |= UTMIP_BIAS_PDTRK_POWERUP;
+ writel(val, base + UTMIP_BIAS_CFG1);
+
+ /* Wait for 25usec */
+ udelay(25);
+
+ /* Bias pad MASTER_ENABLE=0 */
+ val = readl(pmc_base + PMC_UTMIP_BIAS_MASTER_CNTRL);
+ val &= ~BIAS_MASTER_PROG_VAL;
+ writel(val, pmc_base + PMC_UTMIP_BIAS_MASTER_CNTRL);
+
+ /* Wait for 1usec */
+ udelay(1);
+
+ /* Bias pad MASTER_ENABLE=1 */
+ val = readl(pmc_base + PMC_UTMIP_BIAS_MASTER_CNTRL);
+ val |= BIAS_MASTER_PROG_VAL;
+ writel(val, pmc_base + PMC_UTMIP_BIAS_MASTER_CNTRL);
+
+ /* Read RCTRL and TCTRL from UTMIP space */
+ val = readl(base + UTMIP_BIAS_STS0);
+ utmip_rctrl_val = ffz(UTMIP_RCTRL_VAL(val));
+ utmip_tctrl_val = ffz(UTMIP_TCTRL_VAL(val));
+
+ /* PD_TRK=1 */
+ val = readl(base + UTMIP_BIAS_CFG1);
+ val |= UTMIP_BIAS_PDTRK_POWERDOWN;
+ writel(val, base + UTMIP_BIAS_CFG1);
+
+ /* Program thermally encoded RCTRL_VAL, TCTRL_VAL into PMC space */
+ val = readl(pmc_base + PMC_UTMIP_TERM_PAD_CFG);
+ val = PMC_TCTRL_VAL(utmip_tctrl_val) | PMC_RCTRL_VAL(utmip_rctrl_val);
+ writel(val, pmc_base + PMC_UTMIP_TERM_PAD_CFG);
+ clk_disable(phy->pad_clk);
+ init_done = true;
+}
+#endif
+
+static unsigned int tegra_phy_xcvr_setup_value(struct tegra_utmip_config *cfg)
+{
+ unsigned long val;
+
+ if (cfg->xcvr_use_fuses) {
+ val = FUSE_USB_CALIB_XCVR_SETUP(
+ tegra_fuse_readl(FUSE_USB_CALIB_0));
+ if (cfg->xcvr_setup_offset <= UTMIP_XCVR_MAX_OFFSET)
+ val = val + cfg->xcvr_setup_offset;
+
+ if (val > UTMIP_XCVR_SETUP_MAX_VALUE) {
+ val = UTMIP_XCVR_SETUP_MAX_VALUE;
+ pr_info("%s: reset XCVR_SETUP to max value\n",
+ __func__);
+ }
+ } else {
+ val = cfg->xcvr_setup;
+ }
+
+ return val;
+}
+
+static int utmi_phy_power_on(struct tegra_usb_phy *phy, bool is_dpd)
{
unsigned long val;
void __iomem *base = phy->regs;
+ unsigned int xcvr_setup_value;
struct tegra_utmip_config *config = phy->config;
val = readl(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
writel(val, base + USB_SUSP_CTRL);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (phy->instance == 0) {
val = readl(base + USB1_LEGACY_CTRL);
val |= USB1_NO_LEGACY_MODE;
writel(val, base + USB1_LEGACY_CTRL);
}
+#endif
val = readl(base + UTMIP_TX_CFG0);
- val &= ~UTMIP_FS_PREABMLE_J;
+ val |= UTMIP_FS_PREABMLE_J;
writel(val, base + UTMIP_TX_CFG0);
val = readl(base + UTMIP_HSRX_CFG0);
@@ -393,6 +1022,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
writel(val, base + UTMIP_MISC_CFG0);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
val = readl(base + UTMIP_MISC_CFG1);
val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) | UTMIP_PLLU_STABLE_COUNT(~0));
val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
@@ -404,6 +1034,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
writel(val, base + UTMIP_PLL_CFG1);
+#endif
if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
val = readl(base + USB_SUSP_CTRL);
@@ -413,14 +1044,20 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
utmip_pad_power_on(phy);
+ xcvr_setup_value = phy->xcvr_setup_value;
+
val = readl(base + UTMIP_XCVR_CFG0);
- val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
- UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_SETUP(~0) |
- UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0) |
- UTMIP_XCVR_HSSLEW_MSB(~0));
- val |= UTMIP_XCVR_SETUP(config->xcvr_setup);
+ val &= ~(UTMIP_XCVR_LSBIAS_SEL | UTMIP_FORCE_PD_POWERDOWN |
+ UTMIP_FORCE_PD2_POWERDOWN | UTMIP_FORCE_PDZI_POWERDOWN |
+ UTMIP_XCVR_SETUP(~0) | UTMIP_XCVR_LSFSLEW(~0) |
+ UTMIP_XCVR_LSRSLEW(~0) | UTMIP_XCVR_HSSLEW_MSB(~0));
+ val |= UTMIP_XCVR_SETUP(xcvr_setup_value);
+ val |= UTMIP_XCVR_SETUP_MSB(XCVR_SETUP_MSB_CALIB(xcvr_setup_value));
val |= UTMIP_XCVR_LSFSLEW(config->xcvr_lsfslew);
val |= UTMIP_XCVR_LSRSLEW(config->xcvr_lsrslew);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ val |= UTMIP_XCVR_HSSLEW_MSB(0x8);
+#endif
writel(val, base + UTMIP_XCVR_CFG0);
val = readl(base + UTMIP_XCVR_CFG1);
@@ -430,28 +1067,41 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
writel(val, base + UTMIP_XCVR_CFG1);
val = readl(base + UTMIP_BAT_CHRG_CFG0);
- val &= ~UTMIP_PD_CHRG;
+ if (phy->mode == TEGRA_USB_PHY_MODE_HOST)
+ val |= UTMIP_PD_CHRG;
+ else
+ val &= ~UTMIP_PD_CHRG;
writel(val, base + UTMIP_BAT_CHRG_CFG0);
val = readl(base + UTMIP_BIAS_CFG1);
val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
- val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
+ val |= UTMIP_BIAS_PDTRK_COUNT(phy->freq->pdtrk_count);
writel(val, base + UTMIP_BIAS_CFG1);
- if (phy->instance == 0) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ val = readl(base + UTMIP_SPARE_CFG0);
+ val &= ~FUSE_SETUP_SEL;
+ writel(val, base + UTMIP_SPARE_CFG0);
+
+ if (phy->instance == 2) {
val = readl(base + UTMIP_SPARE_CFG0);
- if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE)
- val &= ~FUSE_SETUP_SEL;
- else
- val |= FUSE_SETUP_SEL;
+ val |= FUSE_SETUP_SEL;
writel(val, base + UTMIP_SPARE_CFG0);
- }
- if (phy->instance == 2) {
val = readl(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
writel(val, base + USB_SUSP_CTRL);
}
+#else
+ val = readl(base + UTMIP_SPARE_CFG0);
+ val &= ~FUSE_SETUP_SEL;
+ val |= FUSE_ATERM_SEL;
+ writel(val, base + UTMIP_SPARE_CFG0);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UTMIP_PHY_ENABLE;
+ writel(val, base + USB_SUSP_CTRL);
+#endif
val = readl(base + USB_SUSP_CTRL);
val &= ~UTMIP_RESET;
@@ -463,29 +1113,202 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
writel(val, base + USB1_LEGACY_CTRL);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
writel(val, base + USB_SUSP_CTRL);
+#endif
}
utmi_phy_clk_enable(phy);
-
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (phy->instance == 2) {
val = readl(base + USB_PORTSC1);
val &= ~USB_PORTSC1_PTS(~0);
writel(val, base + USB_PORTSC1);
}
+#else
+ if (phy->instance == 0)
+ utmip_phy_enable_trking_data(phy);
+
+ if(phy->instance == 2) {
+ writel(0, base + ICUSB_CTRL);
+ }
+
+ if (phy->mode == TEGRA_USB_PHY_MODE_HOST) {
+ val = readl(base + TEGRA_USB_USBMODE_REG_OFFSET);
+ writel((val | TEGRA_USB_USBMODE_HOST),
+ (base + TEGRA_USB_USBMODE_REG_OFFSET));
+ }
+ val = readl(base + HOSTPC1_DEVLC);
+ val &= ~HOSTPC1_DEVLC_PTS(~0);
+ val |= HOSTPC1_DEVLC_STS;
+ writel(val, base + HOSTPC1_DEVLC);
+#endif
return 0;
}
-static void utmi_phy_power_off(struct tegra_usb_phy *phy)
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static void utmip_setup_pmc_wake_detect(struct tegra_usb_phy *phy)
{
- unsigned long val;
+ unsigned long val, pmc_pad_cfg_val;
+ void __iomem *pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+ unsigned int inst = phy->instance;
void __iomem *base = phy->regs;
+ bool port_connected;
+ enum tegra_usb_phy_port_speed port_speed;
- utmi_phy_clk_disable(phy);
+ /* check for port connect status */
+ val = readl(base + USB_PORTSC1);
+ port_connected = val & USB_PORTSC1_CCS;
+
+ if (!port_connected)
+ return;
+
+ port_speed = (readl(base + HOSTPC1_DEVLC) >> 25) &
+ HOSTPC1_DEVLC_PSPD_MASK;
+ /*Set PMC MASTER bits to do the following
+ * a. Take over the UTMI drivers
+ * b. set up such that it will take over resume
+ * if remote wakeup is detected
+ * Prepare PMC to take over suspend-wake detect-drive resume until USB
+ * controller ready
+ */
+
+ /* disable master enable in PMC */
+ val = readl(pmc_base + PMC_SLEEP_CFG);
+ val &= ~UTMIP_MASTER_ENABLE(inst);
+ writel(val, pmc_base + PMC_SLEEP_CFG);
+
+ /* UTMIP_PWR_PX=1 for power savings mode */
+ val = readl(pmc_base + PMC_UTMIP_MASTER_CONFIG);
+ val |= UTMIP_PWR(inst);
+ writel(val, pmc_base + PMC_UTMIP_MASTER_CONFIG);
+
+ /* config debouncer */
+ val = readl(pmc_base + PMC_USB_DEBOUNCE);
+ val &= ~UTMIP_LINE_DEB_CNT(~0);
+ val |= UTMIP_LINE_DEB_CNT(1);
+ writel(val, pmc_base + PMC_USB_DEBOUNCE);
+
+ /* Make sure nothing is happening on the line with respect to PMC */
+ val = readl(pmc_base + PMC_UTMIP_UHSIC_FAKE);
+ val &= ~USBOP_VAL(inst);
+ val &= ~USBON_VAL(inst);
+ writel(val, pmc_base + PMC_UTMIP_UHSIC_FAKE);
+
+ /* Make sure wake value for line is none */
+ val = readl(pmc_base + PMC_SLEEPWALK_CFG);
+ val &= ~UTMIP_LINEVAL_WALK_EN(inst);
+ writel(val, pmc_base + PMC_SLEEPWALK_CFG);
+ val = readl(pmc_base + PMC_SLEEP_CFG);
+ val &= ~UTMIP_WAKE_VAL(inst, ~0);
+ val |= UTMIP_WAKE_VAL(inst, WAKE_VAL_NONE);
+ writel(val, pmc_base + PMC_SLEEP_CFG);
+
+ /* turn off pad detectors */
+ val = readl(pmc_base + PMC_USB_AO);
+ val |= (USBOP_VAL_PD(inst) | USBON_VAL_PD(inst));
+ writel(val, pmc_base + PMC_USB_AO);
+
+ /* Remove fake values and make synchronizers work a bit */
+ val = readl(pmc_base + PMC_UTMIP_UHSIC_FAKE);
+ val &= ~USBOP_VAL(inst);
+ val &= ~USBON_VAL(inst);
+ writel(val, pmc_base + PMC_UTMIP_UHSIC_FAKE);
+
+ /* Enable which type of event can trigger a walk,
+ in this case usb_line_wake */
+ val = readl(pmc_base + PMC_SLEEPWALK_CFG);
+ val |= UTMIP_LINEVAL_WALK_EN(inst);
+ writel(val, pmc_base + PMC_SLEEPWALK_CFG);
+
+ /* Enable which type of event can trigger a walk,
+ * in this case usb_line_wake */
+ val = readl(pmc_base + PMC_SLEEPWALK_CFG);
+ val |= UTMIP_LINEVAL_WALK_EN(inst);
+ writel(val, pmc_base + PMC_SLEEPWALK_CFG);
+
+ /* Clear the walk pointers and wake alarm */
+ val = readl(pmc_base + PMC_TRIGGERS);
+ val |= UTMIP_CLR_WAKE_ALARM(inst) | UTMIP_CLR_WALK_PTR(inst);
+ writel(val, pmc_base + PMC_TRIGGERS);
+
+
+ /* Capture FS/LS pad configurations */
+ pmc_pad_cfg_val = readl(pmc_base + PMC_PAD_CFG);
+ val = readl(pmc_base + PMC_TRIGGERS);
+ val |= UTMIP_CAP_CFG(inst);
+ writel(val, pmc_base + PMC_TRIGGERS);
+ udelay(1);
+ pmc_pad_cfg_val = readl(pmc_base + PMC_PAD_CFG);
+
+ /* BIAS MASTER_ENABLE=0 */
+ val = readl(pmc_base + PMC_UTMIP_BIAS_MASTER_CNTRL);
+ val &= ~BIAS_MASTER_PROG_VAL;
+ writel(val, pmc_base + PMC_UTMIP_BIAS_MASTER_CNTRL);
+
+ /* program walk sequence, maintain a J, followed by a driven K
+ * to signal a resume once an wake event is detected */
+ val = readl(pmc_base + PMC_SLEEPWALK_REG(inst));
+ val &= ~UTMIP_AP_A;
+ val |= UTMIP_USBOP_RPD_A | UTMIP_USBON_RPD_A| UTMIP_AN_A | UTMIP_HIGHZ_A |
+ UTMIP_USBOP_RPD_B | UTMIP_USBON_RPD_B | UTMIP_AP_B |
+ UTMIP_USBOP_RPD_C | UTMIP_USBON_RPD_C | UTMIP_AP_C |
+ UTMIP_USBOP_RPD_D | UTMIP_USBON_RPD_D | UTMIP_AP_D;
+ writel(val, pmc_base + PMC_SLEEPWALK_REG(inst));
+
+ if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) {
+ val = readl(pmc_base + PMC_SLEEPWALK_REG(inst));
+ val &= ~(UTMIP_AN_B | UTMIP_HIGHZ_B | UTMIP_AN_C |
+ UTMIP_HIGHZ_C | UTMIP_AN_D | UTMIP_HIGHZ_D);
+ writel(val, pmc_base + PMC_SLEEPWALK_REG(inst));
+ } else {
+ val = readl(pmc_base + PMC_SLEEPWALK_REG(inst));
+ val &= ~(UTMIP_AP_B | UTMIP_HIGHZ_B | UTMIP_AP_C |
+ UTMIP_HIGHZ_C | UTMIP_AP_D | UTMIP_HIGHZ_D);
+ writel(val, pmc_base + PMC_SLEEPWALK_REG(inst));
+ }
+
+ /* turn on pad detectors */
+ val = readl(pmc_base + PMC_USB_AO);
+ val &= ~(USBOP_VAL_PD(inst) | USBON_VAL_PD(inst));
+ writel(val, pmc_base + PMC_USB_AO);
+
+ /* Add small delay before usb detectors provide stable line values */
+ udelay(1);
+ /* Program thermally encoded RCTRL_VAL, TCTRL_VAL into PMC space */
+ val = readl(pmc_base + PMC_UTMIP_TERM_PAD_CFG);
+ val = PMC_TCTRL_VAL(utmip_tctrl_val) | PMC_RCTRL_VAL(utmip_rctrl_val);
+ writel(val, pmc_base + PMC_UTMIP_TERM_PAD_CFG);
+
+ phy->remote_wakeup = false;
+
+ /* Turn over pad configuration to PMC for line wake events*/
+ val = readl(pmc_base + PMC_SLEEP_CFG);
+ val &= ~UTMIP_WAKE_VAL(inst, ~0);
+ val |= UTMIP_WAKE_VAL(inst, WAKE_VAL_ANY);
+ val |= UTMIP_RCTRL_USE_PMC(inst) | UTMIP_TCTRL_USE_PMC(inst);
+ val |= UTMIP_MASTER_ENABLE(inst) | UTMIP_FSLS_USE_PMC(inst);
+ writel(val, pmc_base + PMC_SLEEP_CFG);
+
+ val = readl(base + UTMIP_PMC_WAKEUP0);
+ val |= EVENT_INT_ENB;
+ writel(val, base + UTMIP_PMC_WAKEUP0);
+}
+#endif
+
+static int utmi_phy_power_off(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (phy->mode == TEGRA_USB_PHY_MODE_HOST)
+ utmip_setup_pmc_wake_detect(phy);
+#endif
if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
@@ -493,50 +1316,187 @@ static void utmi_phy_power_off(struct tegra_usb_phy *phy)
writel(val, base + USB_SUSP_CTRL);
}
- val = readl(base + USB_SUSP_CTRL);
- val |= UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
-
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
- val |= UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
-
- val = readl(base + UTMIP_XCVR_CFG0);
- val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
- UTMIP_FORCE_PDZI_POWERDOWN;
- writel(val, base + UTMIP_XCVR_CFG0);
+ if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
+ val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val |= UTMIP_PD_CHRG;
+ writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ }
+ if (phy->instance != 2) {
+ val = readl(base + UTMIP_XCVR_CFG0);
+ val |= (UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
+ UTMIP_FORCE_PDZI_POWERDOWN);
+ writel(val, base + UTMIP_XCVR_CFG0);
+ }
val = readl(base + UTMIP_XCVR_CFG1);
val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN;
writel(val, base + UTMIP_XCVR_CFG1);
- utmip_pad_power_off(phy);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ val = readl(base + UTMIP_BIAS_CFG1);
+ val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
+ writel(val, base + UTMIP_BIAS_CFG1);
+#endif
+
+ if (phy->hotplug) {
+ val = readl(base + USB_PORTSC1);
+ val |= USB_PORTSC1_WKCN;
+ writel(val, base + USB_PORTSC1);
+ }
+ if (phy->instance != 0) {
+ val = readl(base + UTMIP_BIAS_CFG0);
+ val |= UTMIP_OTGPD;
+ writel(val, base + UTMIP_BIAS_CFG0);
+ }
+
+ utmi_phy_clk_disable(phy);
+
+ if (phy->hotplug) {
+ val = readl(base + USB_SUSP_CTRL);
+ val |= USB_PHY_CLK_VALID_INT_ENB;
+ writel(val, base + USB_SUSP_CTRL);
+ } else {
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UTMIP_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+ }
+ utmip_pad_power_off(phy, true);
+ return 0;
}
-static void utmi_phy_preresume(struct tegra_usb_phy *phy)
+static void utmip_phy_disable_pmc_bus_ctrl(struct tegra_usb_phy *phy)
{
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
unsigned long val;
+ void __iomem *pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+ unsigned int inst = phy->instance;
void __iomem *base = phy->regs;
+ val = readl(pmc_base + PMC_SLEEP_CFG);
+ val &= ~UTMIP_WAKE_VAL(inst, 0x0);
+ val |= UTMIP_WAKE_VAL(inst, WAKE_VAL_NONE);
+ writel(val, pmc_base + PMC_SLEEP_CFG);
+
+ val = readl(pmc_base + PMC_TRIGGERS);
+ val |= UTMIP_CLR_WAKE_ALARM(inst) | UTMIP_CLR_WALK_PTR(inst);
+ writel(val, pmc_base + PMC_TRIGGERS);
+
+ val = readl(base + UTMIP_PMC_WAKEUP0);
+ val &= ~EVENT_INT_ENB;
+ writel(val, base + UTMIP_PMC_WAKEUP0);
+
+ /* Disable PMC master mode by clearing MASTER_EN */
+ val = readl(pmc_base + PMC_SLEEP_CFG);
+ val &= ~(UTMIP_RCTRL_USE_PMC(inst) | UTMIP_TCTRL_USE_PMC(inst) |
+ UTMIP_FSLS_USE_PMC(inst) | UTMIP_MASTER_ENABLE(inst));
+ writel(val, pmc_base + PMC_SLEEP_CFG);
+
+ val = readl(pmc_base + PMC_TRIGGERS);
+ val &= ~UTMIP_CAP_CFG(inst);
+ writel(val, pmc_base + PMC_TRIGGERS);
+
+ /* turn off pad detectors */
+ val = readl(pmc_base + PMC_USB_AO);
+ val |= (USBOP_VAL_PD(inst) | USBON_VAL_PD(inst));
+ writel(val, pmc_base + PMC_USB_AO);
+
+ phy->remote_wakeup = false;
+#endif
+}
+
+static int utmi_phy_preresume(struct tegra_usb_phy *phy, bool is_dpd)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long val;
+ void __iomem *base = phy->regs;
val = readl(base + UTMIP_TX_CFG0);
val |= UTMIP_HS_DISCON_DISABLE;
writel(val, base + UTMIP_TX_CFG0);
+#else
+ utmip_phy_disable_pmc_bus_ctrl(phy);
+#endif
+
+ return 0;
}
-static void utmi_phy_postresume(struct tegra_usb_phy *phy)
+static int utmi_phy_postresume(struct tegra_usb_phy *phy, bool is_dpd)
{
unsigned long val;
void __iomem *base = phy->regs;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* check if OBS bus is already enabled */
+ val = readl(base + UTMIP_MISC_CFG0);
+ if (val & UTMIP_DPDM_OBSERVE) {
+ /* Change the UTMIP OBS bus to drive SE0 */
+ val = readl(base + UTMIP_MISC_CFG0);
+ val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
+ val |= UTMIP_DPDM_OBSERVE_SEL_FS_SE0;
+ writel(val, base + UTMIP_MISC_CFG0);
+
+ /* Wait for 3us(2 LS bit times) */
+ udelay (3);
+
+ /* Release UTMIP OBS bus */
+ val = readl(base + UTMIP_MISC_CFG0);
+ val &= ~UTMIP_DPDM_OBSERVE;
+ writel(val, base + UTMIP_MISC_CFG0);
+
+ /* Release DP/DM pulldown for Host mode */
+ val = readl(base + UTMIP_MISC_CFG0);
+ val &= ~(FORCE_PULLDN_DM | FORCE_PULLDN_DP |
+ COMB_TERMS | ALWAYS_FREE_RUNNING_TERMS);
+ writel(val, base + UTMIP_MISC_CFG0);
+ }
+#else
val = readl(base + UTMIP_TX_CFG0);
val &= ~UTMIP_HS_DISCON_DISABLE;
writel(val, base + UTMIP_TX_CFG0);
+#endif
+ return 0;
+}
+
+static int uhsic_phy_postsuspend(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ struct tegra_uhsic_config *uhsic_config = phy->config;
+
+ if (uhsic_config->postsuspend)
+ uhsic_config->postsuspend();
+
+ return 0;
+}
+
+static int uhsic_phy_preresume(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ struct tegra_uhsic_config *uhsic_config = phy->config;
+
+ if (uhsic_config->preresume)
+ uhsic_config->preresume();
+
+ return 0;
+}
+
+static int uhsic_phy_postresume(struct tegra_usb_phy *phy, bool is_dpd)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ val = readl(base + USB_TXFILLTUNING);
+ if ((val & USB_FIFO_TXFILL_MASK) != USB_FIFO_TXFILL_THRES(0x10)) {
+ val = USB_FIFO_TXFILL_THRES(0x10);
+ writel(val, base + USB_TXFILLTUNING);
+ }
+#endif
+
+ return 0;
}
static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed)
{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
unsigned long val;
void __iomem *base = phy->regs;
@@ -553,10 +1513,56 @@ static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
val |= UTMIP_DPDM_OBSERVE;
writel(val, base + UTMIP_MISC_CFG0);
udelay(10);
+#else
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ void __iomem *pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+ int inst = phy->instance;
+
+ val = readl(pmc_base + UTMIP_UHSIC_STATUS);
+ /* check whether we wake up from the remote resume */
+ if (UTMIP_WALK_PTR_VAL(inst) & val) {
+ phy->remote_wakeup = true;
+ } else {
+ if (!((UTMIP_USBON_VAL(phy->instance) |
+ UTMIP_USBOP_VAL(phy->instance)) & val)) {
+ utmip_phy_disable_pmc_bus_ctrl(phy);
+ }
+ }
+
+ /* (2LS WAR)is not required for LS and FS devices and is only for HS */
+ if ((port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) ||
+ (port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)) {
+ /* do not enable the OBS bus */
+ val = readl(base + UTMIP_MISC_CFG0);
+ val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
+ writel(val, base + UTMIP_MISC_CFG0);
+ return;
+ }
+ /* Force DP/DM pulldown active for Host mode */
+ val = readl(base + UTMIP_MISC_CFG0);
+ val |= FORCE_PULLDN_DM | FORCE_PULLDN_DP |
+ COMB_TERMS | ALWAYS_FREE_RUNNING_TERMS;
+ writel(val, base + UTMIP_MISC_CFG0);
+ val = readl(base + UTMIP_MISC_CFG0);
+ val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
+ if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
+ val |= UTMIP_DPDM_OBSERVE_SEL_FS_J;
+ else
+ val |= UTMIP_DPDM_OBSERVE_SEL_FS_K;
+ writel(val, base + UTMIP_MISC_CFG0);
+ udelay(1);
+
+ val = readl(base + UTMIP_MISC_CFG0);
+ val |= UTMIP_DPDM_OBSERVE;
+ writel(val, base + UTMIP_MISC_CFG0);
+ udelay(10);
+#endif
}
static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
unsigned long val;
void __iomem *base = phy->regs;
@@ -564,25 +1570,169 @@ static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
val &= ~UTMIP_DPDM_OBSERVE;
writel(val, base + UTMIP_MISC_CFG0);
udelay(10);
+#else
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ int wait_time_us = 3000; /* FPR should be set by this time */
+
+ /* check whether we wake up from the remote resume */
+ if (phy->remote_wakeup) {
+ /* wait until FPR bit is set automatically on remote resume */
+ do {
+ val = readl(base + USB_PORTSC1);
+ udelay(1);
+ if (wait_time_us == 0) {
+ utmip_phy_disable_pmc_bus_ctrl(phy);
+ tegra_usb_phy_postresume(phy, false);
+ return;
+ }
+ wait_time_us--;
+ } while (!(val & USB_PORTSC1_RESUME));
+ /* disable PMC master control */
+ utmip_phy_disable_pmc_bus_ctrl(phy);
+ /* wait for 25 ms to port resume complete */
+ msleep(25);
+
+ /* Clear PCI and SRI bits to avoid an interrupt upon resume */
+ val = readl(base + USB_USBSTS);
+ writel(val, base + USB_USBSTS);
+ /* wait to avoid SOF if there is any */
+ if (utmi_wait_register(base + USB_USBSTS,
+ USB_USBSTS_SRI, USB_USBSTS_SRI) < 0) {
+ pr_err("%s: timeout waiting for SOF\n", __func__);
+ }
+ tegra_usb_phy_postresume(phy, false);
+ }
+#endif
+}
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static void ulpi_set_tristate(bool enable)
+{
+ int tristate = (enable)? TEGRA_TRI_TRISTATE : TEGRA_TRI_NORMAL;
+
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UAA, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UAB, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UDA, tristate);
+}
+#endif
+
+static void ulpi_phy_reset(void __iomem *base)
+{
+ unsigned long val;
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UHSIC_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UTMIP_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+#endif
+}
+
+static void ulpi_set_host(void __iomem *base)
+{
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long val;
+
+ val = readl(base + TEGRA_USB_USBMODE_REG_OFFSET);
+ val |= TEGRA_USB_USBMODE_HOST;
+ writel(val, base + TEGRA_USB_USBMODE_REG_OFFSET);
+
+ val = readl(base + HOSTPC1_DEVLC);
+ val |= HOSTPC1_DEVLC_PTS(2);
+ writel(val, base + HOSTPC1_DEVLC);
+#endif
+}
+
+static void ulpi_set_trimmer(void __iomem *base, u8 data, u8 sdn, u8 dir)
+{
+ unsigned long val;
+
+ val = ULPI_DATA_TRIMMER_SEL(data);
+ val |= ULPI_STPDIRNXT_TRIMMER_SEL(sdn);
+ val |= ULPI_DIR_TRIMMER_SEL(dir);
+ writel(val, base + ULPI_TIMING_CTRL_1);
+ udelay(10);
+
+ val |= ULPI_DATA_TRIMMER_LOAD;
+ val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
+ val |= ULPI_DIR_TRIMMER_LOAD;
+ writel(val, base + ULPI_TIMING_CTRL_1);
+}
+
+static inline void ulpi_pinmux_bypass(struct tegra_usb_phy *phy, bool enable)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ val = readl(base + ULPI_TIMING_CTRL_0);
+
+ if (enable)
+ val |= ULPI_OUTPUT_PINMUX_BYP;
+ else
+ val &= ~ULPI_OUTPUT_PINMUX_BYP;
+
+ writel(val, base + ULPI_TIMING_CTRL_0);
}
-static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
+static void ulpi_phy_restore_start(struct tegra_usb_phy *phy,
+ enum tegra_usb_phy_port_speed port_speed)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ /*Tristate ulpi interface before USB controller resume*/
+ ulpi_set_tristate(true);
+
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val &= ~ULPI_OUTPUT_PINMUX_BYP;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+#endif
+}
+
+static void ulpi_phy_restore_end(struct tegra_usb_phy *phy)
+{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_OUTPUT_PINMUX_BYP;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+
+ ulpi_set_tristate(false);
+#endif
+}
+
+static int ulpi_phy_power_on(struct tegra_usb_phy *phy, bool is_dpd)
{
int ret;
unsigned long val;
void __iomem *base = phy->regs;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
struct tegra_ulpi_config *config = phy->config;
+#endif
- gpio_direction_output(config->reset_gpio, 0);
- msleep(5);
- gpio_direction_output(config->reset_gpio, 1);
+ if (phy->clk)
+ clk_enable(phy->clk);
- clk_enable(phy->clk);
msleep(1);
- val = readl(base + USB_SUSP_CTRL);
- val |= UHSIC_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ if (!phy->initialized) {
+ phy->initialized = 1;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ gpio_direction_output(config->reset_gpio, 0);
+ msleep(5);
+ gpio_direction_output(config->reset_gpio, 1);
+#endif
+ }
+
+ ulpi_phy_reset(base);
+ ulpi_set_host(base);
val = readl(base + ULPI_TIMING_CTRL_0);
val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
@@ -592,20 +1742,30 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
val |= ULPI_PHY_ENABLE;
writel(val, base + USB_SUSP_CTRL);
- val = 0;
- writel(val, base + ULPI_TIMING_CTRL_1);
+ val = readl(base + USB_SUSP_CTRL);
+ val |= USB_SUSP_CLR;
+ writel(val, base + USB_SUSP_CTRL);
- val |= ULPI_DATA_TRIMMER_SEL(4);
- val |= ULPI_STPDIRNXT_TRIMMER_SEL(4);
- val |= ULPI_DIR_TRIMMER_SEL(4);
- writel(val, base + ULPI_TIMING_CTRL_1);
- udelay(10);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+ USB_PHY_CLK_VALID) < 0)
+ pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
- val |= ULPI_DATA_TRIMMER_LOAD;
- val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
- val |= ULPI_DIR_TRIMMER_LOAD;
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_CLKEN, USB_CLKEN) < 0)
+ pr_err("%s: timeout waiting for AHB clock\n", __func__);
+#else
+ udelay(100);
+#endif
+
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~USB_SUSP_CLR;
+ writel(val, base + USB_SUSP_CTRL);
+
+ val = 0;
writel(val, base + ULPI_TIMING_CTRL_1);
+ ulpi_set_trimmer(base, 4, 4, 4);
+
/* Fix VbusInvalid due to floating VBUS */
ret = otg_io_write(phy->ulpi, 0x40, 0x08);
if (ret) {
@@ -623,45 +1783,476 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
val |= USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN;
writel(val, base + USB_PORTSC1);
+ return 0;
+}
+
+static int ulpi_phy_power_off(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ int ret;
+
+ /* Disable VbusValid, SessEnd comparators */
+ ret = otg_io_write(phy->ulpi, 0x00, 0x0D);
+ if (ret)
+ pr_err("%s: ulpi write 0x0D failed\n", __func__);
+
+ ret = otg_io_write(phy->ulpi, 0x00, 0x10);
+ if (ret)
+ pr_err("%s: ulpi write 0x10 failed\n", __func__);
+
+ /* Disable IdFloat comparator */
+ ret = otg_io_write(phy->ulpi, 0x00, 0x19);
+ if (ret)
+ pr_err("%s: ulpi write 0x19 failed\n", __func__);
+
+ ret = otg_io_write(phy->ulpi, 0x00, 0x1D);
+ if (ret)
+ pr_err("%s: ulpi write 0x1D failed\n", __func__);
+
+ /* Clear WKCN/WKDS/WKOC wake-on events that can cause the USB
+ * Controller to immediately bring the ULPI PHY out of low power
+ */
+ val = readl(base + USB_PORTSC1);
+ val &= ~(USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN);
+ writel(val, base + USB_PORTSC1);
+
+ /* Put the PHY in the low power mode */
+ val = readl(base + USB_PORTSC1);
+ val |= USB_PORTSC1_PHCD;
+ writel(val, base + USB_PORTSC1);
+
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
+ pr_err("%s: timeout waiting for phy to stop\n", __func__);
+#else
+ val = readl(base + HOSTPC1_DEVLC);
+ val &= ~(HOSTPC1_DEVLC_PHCD);
+ writel(val, base + HOSTPC1_DEVLC);
+#endif
+
+ if(phy->clk)
+ clk_disable(phy->clk);
+
+ return 0;
+}
+
+static inline void null_phy_set_tristate(bool enable)
+{
+ int tristate = (enable) ? TEGRA_TRI_TRISTATE : TEGRA_TRI_NORMAL;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UDA, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UAA, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UAB, tristate);
+#else
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DATA0, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DATA1, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DATA2, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DATA3, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DATA4, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DATA5, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DATA6, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DATA7, tristate);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_NXT, tristate);
+
+ if (enable)
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DIR, tristate);
+#endif
+}
+
+static void null_phy_restore_start(struct tegra_usb_phy *phy,
+ enum tegra_usb_phy_port_speed port_speed)
+{
+ struct tegra_ulpi_config *config = phy->config;
+
+ if (config->phy_restore_start)
+ config->phy_restore_start();
+}
+
+static void null_phy_restore_end(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ struct tegra_ulpi_config *config = phy->config;
+
+ /* disable ULPI pinmux bypass */
+ ulpi_pinmux_bypass(phy, false);
+
+ /* driving linestate using GPIO */
+ gpio_set_value(config->ulpi_d0_gpio, 0);
+ gpio_set_value(config->ulpi_d1_gpio, 0);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ /* driving DIR high */
+ gpio_set_value(config->ulpi_dir_gpio, 1);
+#endif
+
+ /* remove ULPI tristate */
+ null_phy_set_tristate(false);
+
+ if (config->phy_restore_end)
+ config->phy_restore_end();
+
+ if (gpio_is_valid(config->phy_restore_gpio)) {
+ int phy_restore_gpio = config->phy_restore_gpio;
+ int retry = 20000;
+
+ while (retry) {
+ /* poll phy_restore_gpio high */
+ if (gpio_get_value(phy_restore_gpio))
+ break;
+ retry--;
+ }
+
+ if (retry == 0)
+ pr_info("phy_restore_gpio timeout\n");
+ }
+
+ /* enable ULPI CLK output pad */
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_CLK_PADOUT_ENA;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ udelay(5); /* wait for CLK stabilize */
+
+ /* enable ULPI pinmux bypass */
+ ulpi_pinmux_bypass(phy, true);
+#else
+ /* enable ULPI pinmux bypass */
+ ulpi_pinmux_bypass(phy, true);
+ udelay(5);
+
+ /* remove DIR tristate */
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_ULPI_DIR, TEGRA_TRI_NORMAL);
+#endif
+}
+
+static int null_phy_power_on(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ const struct tegra_ulpi_trimmer default_trimmer = {0, 0, 4, 4};
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ struct tegra_ulpi_config *config = phy->config;
+ static bool cold_boot = true;
+
+ if (!config->trimmer)
+ config->trimmer = &default_trimmer;
+
+ ulpi_phy_reset(base);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* remove ULPI PADS CLKEN reset */
val = readl(base + USB_SUSP_CTRL);
- val |= USB_SUSP_CLR;
+ val &= ~ULPI_PADS_CLKEN_RESET;
writel(val, base + USB_SUSP_CTRL);
- udelay(100);
+ udelay(10);
+#endif
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+
+ if (config->pre_phy_on && config->pre_phy_on())
+ return -EAGAIN;
val = readl(base + USB_SUSP_CTRL);
- val &= ~USB_SUSP_CLR;
+ val |= ULPI_PHY_ENABLE;
+ writel(val, base + USB_SUSP_CTRL);
+ udelay(10);
+
+ /* set timming parameters */
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_SHADOW_CLK_LOOPBACK_EN;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ val &= ~ULPI_SHADOW_CLK_SEL;
+ val &= ~ULPI_LBK_PAD_EN;
+#else
+ val |= ULPI_SHADOW_CLK_SEL;
+ val |= ULPI_LBK_PAD_EN;
+#endif
+ val |= ULPI_SHADOW_CLK_DELAY(config->trimmer->shadow_clk_delay);
+ val |= ULPI_CLOCK_OUT_DELAY(config->trimmer->clock_out_delay);
+ val |= ULPI_LBK_PAD_E_INPUT_OR;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+
+ writel(0, base + ULPI_TIMING_CTRL_1);
+ udelay(10);
+
+ /* start internal 60MHz clock */
+ val = readl(base + ULPIS2S_CTRL);
+ val |= ULPIS2S_ENA;
+ val |= ULPIS2S_SUPPORT_DISCONNECT;
+ val |= ULPIS2S_SPARE((phy->mode == TEGRA_USB_PHY_MODE_HOST) ? 3 : 1);
+ val |= ULPIS2S_PLLU_MASTER_BLASTER60;
+ writel(val, base + ULPIS2S_CTRL);
+
+ /* select ULPI_CORE_CLK_SEL to SHADOW_CLK */
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_CORE_CLK_SEL;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+ udelay(10);
+
+ /* enable ULPI null phy clock - can't set the trimmers before this */
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_CLK_OUT_ENA;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+ udelay(10);
+
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+ USB_PHY_CLK_VALID)) {
+ pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* set ULPI trimmers */
+ ulpi_set_trimmer(base, config->trimmer->data_trimmer,
+ config->trimmer->stpdirnxt_trimmer, 1);
+
+ ulpi_set_host(base);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* remove slave0 reset */
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~ULPIS2S_SLV0_RESET;
writel(val, base + USB_SUSP_CTRL);
+ /* remove slave1 and line reset */
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~ULPIS2S_SLV1_RESET;
+ val &= ~ULPIS2S_LINE_RESET;
+
+ /* remove ULPI PADS reset */
+ val &= ~ULPI_PADS_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+#endif
+ if (cold_boot) {
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_CLK_PADOUT_ENA;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+ cold_boot = false;
+ }
+
+ udelay(10);
+
+ if (config->post_phy_on && config->post_phy_on())
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int null_phy_power_off(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ struct tegra_ulpi_config *config = phy->config;
+
+ if (config->pre_phy_off && config->pre_phy_off())
+ return -EAGAIN;
+
+ null_phy_set_tristate(true);
+
+ if (config->post_phy_off && config->post_phy_off())
+ return -EAGAIN;
+
return 0;
}
-static void ulpi_phy_power_off(struct tegra_usb_phy *phy)
+static int null_phy_pre_usbcmd_reset(struct tegra_usb_phy *phy, bool is_dpd)
{
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
unsigned long val;
void __iomem *base = phy->regs;
- struct tegra_ulpi_config *config = phy->config;
- /* Clear WKCN/WKDS/WKOC wake-on events that can cause the USB
- * Controller to immediately bring the ULPI PHY out of low power
- */
+ val = readl(base + ULPIS2S_CTRL);
+ val |= ULPIS2S_SLV0_CLAMP_XMIT;
+ writel(val, base + ULPIS2S_CTRL);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= ULPIS2S_SLV0_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+ udelay(10);
+#endif
+ return 0;
+}
+
+static int null_phy_post_usbcmd_reset(struct tegra_usb_phy *phy, bool is_dpd)
+{
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ ulpi_set_host(base);
+
+ /* remove slave0 reset */
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~ULPIS2S_SLV0_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+
+ val = readl(base + ULPIS2S_CTRL);
+ val &= ~ULPIS2S_SLV0_CLAMP_XMIT;
+ writel(val, base + ULPIS2S_CTRL);
+ udelay(10);
+#endif
+ return 0;
+}
+
+static int uhsic_phy_power_on(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ struct tegra_uhsic_config *uhsic_config = phy->config;
+
+ if (uhsic_config->enable_gpio != -1) {
+ gpio_set_value_cansleep(uhsic_config->enable_gpio, 1);
+ /* keep hsic reset asserted for 1 ms */
+ udelay(1000);
+ }
+
+ val = readl(base + UHSIC_PADS_CFG1);
+ val &= ~(UHSIC_PD_BG | UHSIC_PD_TX | UHSIC_PD_TRK | UHSIC_PD_RX |
+ UHSIC_PD_ZI | UHSIC_RPD_DATA | UHSIC_RPD_STROBE);
+ val |= UHSIC_RX_SEL;
+ writel(val, base + UHSIC_PADS_CFG1);
+ udelay(2);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UHSIC_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+ udelay(30);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UHSIC_PHY_ENABLE;
+ writel(val, base + USB_SUSP_CTRL);
+
+ val = readl(base + UHSIC_HSRX_CFG0);
+ val |= UHSIC_IDLE_WAIT(uhsic_config->idle_wait_delay);
+ val |= UHSIC_ELASTIC_UNDERRUN_LIMIT(uhsic_config->elastic_underrun_limit);
+ val |= UHSIC_ELASTIC_OVERRUN_LIMIT(uhsic_config->elastic_overrun_limit);
+ writel(val, base + UHSIC_HSRX_CFG0);
+
+ val = readl(base + UHSIC_HSRX_CFG1);
+ val |= UHSIC_HS_SYNC_START_DLY(uhsic_config->sync_start_delay);
+ writel(val, base + UHSIC_HSRX_CFG1);
+
+ val = readl(base + UHSIC_MISC_CFG0);
+ val |= UHSIC_SUSPEND_EXIT_ON_EDGE;
+ writel(val, base + UHSIC_MISC_CFG0);
+
+ val = readl(base + UHSIC_MISC_CFG1);
+ val |= UHSIC_PLLU_STABLE_COUNT(phy->freq->stable_count);
+ writel(val, base + UHSIC_MISC_CFG1);
+
+ val = readl(base + UHSIC_PLL_CFG1);
+ val |= UHSIC_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
+ val |= UHSIC_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count);
+ writel(val, base + UHSIC_PLL_CFG1);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~(UHSIC_RESET);
+ writel(val, base + USB_SUSP_CTRL);
+ udelay(2);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ val = readl(base + USB_PORTSC1);
+ val &= ~USB_PORTSC1_PTS(~0);
+ writel(val, base + USB_PORTSC1);
+
+ val = readl(base + USB_TXFILLTUNING);
+ if ((val & USB_FIFO_TXFILL_MASK) != USB_FIFO_TXFILL_THRES(0x10)) {
+ val = USB_FIFO_TXFILL_THRES(0x10);
+ writel(val, base + USB_TXFILLTUNING);
+ }
+#endif
+
val = readl(base + USB_PORTSC1);
val &= ~(USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN);
writel(val, base + USB_PORTSC1);
- gpio_direction_output(config->reset_gpio, 0);
- clk_disable(phy->clk);
+ val = readl(base + UHSIC_PADS_CFG0);
+ val &= ~(UHSIC_TX_RTUNEN);
+ /* set Rtune impedance to 40 ohm */
+ val |= UHSIC_TX_RTUNE(0);
+ writel(val, base + UHSIC_PADS_CFG0);
+
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+ USB_PHY_CLK_VALID)) {
+ pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int uhsic_phy_power_off(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ struct tegra_uhsic_config *uhsic_config = phy->config;
+
+ val = readl(base + UHSIC_PADS_CFG1);
+ val &= ~UHSIC_RPU_STROBE;
+ val |= UHSIC_RPD_STROBE;
+ writel(val, base + UHSIC_PADS_CFG1);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UHSIC_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+ udelay(30);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~UHSIC_PHY_ENABLE;
+ writel(val, base + USB_SUSP_CTRL);
+
+ if (uhsic_config->enable_gpio != -1) {
+ gpio_set_value_cansleep(uhsic_config->enable_gpio, 0);
+ /* keep hsic reset de-asserted for 1 ms */
+ udelay(1000);
+ }
+ if (uhsic_config->post_phy_off && uhsic_config->post_phy_off())
+ return -EAGAIN;
+
+ return 0;
+}
+
+#ifdef CONFIG_USB_TEGRA_OTG
+extern void tegra_otg_check_vbus_detection(void);
+#endif
+
+static irqreturn_t usb_phy_vbus_irq_thr(int irq, void *pdata)
+{
+ struct tegra_usb_phy *phy = pdata;
+
+ if (phy->reg_vdd && !phy->regulator_on) {
+ regulator_enable(phy->reg_vdd);
+ phy->regulator_on = 1;
+ /*
+ * Optimal time to get the regulator turned on
+ * before detecting vbus interrupt.
+ */
+ mdelay(15);
+ }
+
+#ifdef CONFIG_USB_TEGRA_OTG
+ tegra_otg_check_vbus_detection();
+#endif
+
+ return IRQ_HANDLED;
}
struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
- void *config, enum tegra_usb_phy_mode phy_mode)
+ void *config, enum tegra_usb_phy_mode phy_mode,
+ enum tegra_usb_phy_type usb_phy_type)
{
struct tegra_usb_phy *phy;
struct tegra_ulpi_config *ulpi_config;
unsigned long parent_rate;
int i;
int err;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct tegra_ulpi_config *uhsic_config;
+ int reset_gpio, enable_gpio;
+#endif
- phy = kmalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
+ phy = kzalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
if (!phy)
return ERR_PTR(-ENOMEM);
@@ -669,9 +2260,17 @@ struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
phy->regs = regs;
phy->config = config;
phy->mode = phy_mode;
+ phy->usb_phy_type = usb_phy_type;
+ phy->initialized = 0;
+ phy->regulator_on = 0;
+ phy->power_on = 0;
+ phy->remote_wakeup = false;
+ phy->hotplug = 0;
+ phy->xcvr_setup_value = 0;
if (!phy->config) {
- if (phy_is_ulpi(phy)) {
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_LINK_ULPI ||
+ phy->usb_phy_type == TEGRA_USB_PHY_TYPE_NULL_ULPI) {
pr_err("%s: ulpi phy configuration missing", __func__);
err = -EINVAL;
goto err0;
@@ -689,10 +2288,19 @@ struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
clk_enable(phy->pll_u);
parent_rate = clk_get_rate(clk_get_parent(phy->pll_u));
- for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) {
- if (tegra_freq_table[i].freq == parent_rate) {
- phy->freq = &tegra_freq_table[i];
- break;
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC) {
+ for (i = 0; i < ARRAY_SIZE(tegra_uhsic_freq_table); i++) {
+ if (tegra_uhsic_freq_table[i].freq == parent_rate) {
+ phy->freq = &tegra_uhsic_freq_table[i];
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) {
+ if (tegra_freq_table[i].freq == parent_rate) {
+ phy->freq = &tegra_freq_table[i];
+ break;
+ }
}
}
if (!phy->freq) {
@@ -701,25 +2309,104 @@ struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
goto err1;
}
- if (phy_is_ulpi(phy)) {
- ulpi_config = config;
- phy->clk = clk_get_sys(NULL, ulpi_config->clk);
- if (IS_ERR(phy->clk)) {
- pr_err("%s: can't get ulpi clock\n", __func__);
- err = -ENXIO;
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) {
+ err = utmip_pad_open(phy);
+ phy->xcvr_setup_value = tegra_phy_xcvr_setup_value(phy->config);
+ if (err < 0)
goto err1;
+ } else if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_LINK_ULPI) {
+ ulpi_config = config;
+
+ if (ulpi_config->clk) {
+ phy->clk = clk_get_sys(NULL, ulpi_config->clk);
+ if (IS_ERR(phy->clk)) {
+ pr_err("%s: can't get ulpi clock\n", __func__);
+ err = -ENXIO;
+ goto err1;
+ }
+ } else {
+ /* Some USB ULPI chips are not driven by Tegra clocks or PLL */
+ phy->clk = NULL;
}
tegra_gpio_enable(ulpi_config->reset_gpio);
gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
gpio_direction_output(ulpi_config->reset_gpio, 0);
phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
phy->ulpi->io_priv = regs + ULPI_VIEWPORT;
- } else {
- err = utmip_pad_open(phy);
- if (err < 0)
+ }
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ else if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC) {
+ uhsic_config = config;
+ enable_gpio = gpio_request(uhsic_config->enable_gpio,
+ "uhsic_enable");
+ reset_gpio = gpio_request(uhsic_config->reset_gpio,
+ "uhsic_reset");
+ /* hsic enable signal deasserted, hsic reset asserted */
+ if (!enable_gpio)
+ gpio_direction_output(uhsic_config->enable_gpio,
+ 0 /* deasserted */);
+ if (!reset_gpio)
+ gpio_direction_output(uhsic_config->reset_gpio,
+ 0 /* asserted */);
+ if (!enable_gpio)
+ tegra_gpio_enable(uhsic_config->enable_gpio);
+ if (!reset_gpio)
+ tegra_gpio_enable(uhsic_config->reset_gpio);
+ /* keep hsic reset asserted for 1 ms */
+ udelay(1000);
+ /* enable (power on) hsic */
+ if (!enable_gpio)
+ gpio_set_value_cansleep(uhsic_config->enable_gpio, 1);
+ udelay(1000);
+ /* deassert reset */
+ if (!reset_gpio)
+ gpio_set_value_cansleep(uhsic_config->reset_gpio, 1);
+ }
+#endif
+
+ phy->reg_vdd = regulator_get(NULL, "avdd_usb");
+ if (IS_ERR_OR_NULL(phy->reg_vdd)) {
+ pr_err("couldn't get regulator avdd_usb: %ld \n",
+ PTR_ERR(phy->reg_vdd));
+ phy->reg_vdd = NULL;
+ }
+
+ if (instance == 0 && usb_phy_data[0].vbus_irq) {
+ err = request_threaded_irq(usb_phy_data[0].vbus_irq, NULL, usb_phy_vbus_irq_thr, IRQF_SHARED,
+ "usb_phy_vbus", phy);
+ if (err) {
+ pr_err("Failed to register IRQ\n");
goto err1;
+ }
}
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Power-up the VBUS detector for UTMIP PHY */
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) {
+ writel(readl((IO_ADDRESS(TEGRA_PMC_BASE) + TEGRA_PMC_USB_AO)) &
+ ~(TEGRA_PMC_USB_AO_VBUS_WAKEUP_PD_P0 | TEGRA_PMC_USB_AO_ID_PD_P0),
+ (IO_ADDRESS(TEGRA_PMC_BASE) + TEGRA_PMC_USB_AO));
+
+ if (usb_phy_data[phy->instance].vbus_reg_supply) {
+ phy->reg_vbus = regulator_get(NULL, usb_phy_data[phy->instance].vbus_reg_supply);
+ if (WARN_ON(IS_ERR_OR_NULL(phy->reg_vbus))) {
+ pr_err("couldn't get regulator vdd_vbus_usb: %ld, instance : %d\n",
+ PTR_ERR(phy->reg_vbus), phy->instance);
+ err = PTR_ERR(phy->reg_vbus);
+ goto err1;
+ }
+ }
+ }
+ if (instance == 2) {
+ writel(readl((IO_ADDRESS(TEGRA_PMC_BASE) + TEGRA_PMC_USB_AO)) &
+ (TEGRA_PMC_USB_AO_PD_P2),
+ (IO_ADDRESS(TEGRA_PMC_BASE) + TEGRA_PMC_USB_AO));
+ }
+#endif
+ if (((instance == 2) || (instance == 0)) &&
+ (phy->mode == TEGRA_USB_PHY_MODE_HOST)) {
+ vbus_enable(phy);
+ }
return phy;
err1:
@@ -730,66 +2417,458 @@ err0:
return ERR_PTR(err);
}
-int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
+int tegra_usb_phy_power_on(struct tegra_usb_phy *phy, bool is_dpd)
{
- if (phy_is_ulpi(phy))
- return ulpi_phy_power_on(phy);
- else
- return utmi_phy_power_on(phy);
+ int ret = 0;
+
+ const tegra_phy_fp power_on[] = {
+ utmi_phy_power_on,
+ ulpi_phy_power_on,
+ null_phy_power_on,
+ uhsic_phy_power_on,
+ };
+
+ if (phy->power_on)
+ return ret;
+
+ if (phy->reg_vdd && !phy->regulator_on) {
+ regulator_enable(phy->reg_vdd);
+ phy->regulator_on = 1;
+ }
+
+ if (power_on[phy->usb_phy_type])
+ ret = power_on[phy->usb_phy_type](phy, is_dpd);
+
+ phy->power_on = true;
+ return ret;
}
-void tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
+void tegra_usb_phy_power_off(struct tegra_usb_phy *phy, bool is_dpd)
{
- if (phy_is_ulpi(phy))
- ulpi_phy_power_off(phy);
- else
- utmi_phy_power_off(phy);
+ const tegra_phy_fp power_off[] = {
+ utmi_phy_power_off,
+ ulpi_phy_power_off,
+ null_phy_power_off,
+ uhsic_phy_power_off,
+ };
+
+ if (!phy->power_on)
+ return;
+
+ if (power_off[phy->usb_phy_type])
+ power_off[phy->usb_phy_type](phy, is_dpd);
+
+ if (phy->reg_vdd && phy->regulator_on && is_dpd) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if (tegra_get_revision() >= TEGRA_REVISION_A03)
+#endif
+ regulator_disable(phy->reg_vdd);
+ phy->regulator_on = 0;
+ }
+ phy->power_on = false;
+}
+
+void tegra_usb_phy_preresume(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ const tegra_phy_fp preresume[] = {
+ utmi_phy_preresume,
+ NULL,
+ NULL,
+ uhsic_phy_preresume,
+ };
+
+ if (preresume[phy->usb_phy_type])
+ preresume[phy->usb_phy_type](phy, is_dpd);
+}
+
+void tegra_usb_phy_postsuspend(struct tegra_usb_phy *phy, bool is_dpd)
+
+{
+ const tegra_phy_fp postsuspend[] = {
+ NULL,
+ NULL,
+ NULL,
+ uhsic_phy_postsuspend,
+ };
+
+ if (postsuspend[phy->usb_phy_type])
+ postsuspend[phy->usb_phy_type](phy, is_dpd);
}
-void tegra_usb_phy_preresume(struct tegra_usb_phy *phy)
+void tegra_usb_phy_postresume(struct tegra_usb_phy *phy, bool is_dpd)
{
- if (!phy_is_ulpi(phy))
- utmi_phy_preresume(phy);
+ const tegra_phy_fp postresume[] = {
+ utmi_phy_postresume,
+ NULL,
+ NULL,
+ uhsic_phy_postresume,
+ };
+
+ if (postresume[phy->usb_phy_type])
+ postresume[phy->usb_phy_type](phy, is_dpd);
}
-void tegra_usb_phy_postresume(struct tegra_usb_phy *phy)
+void tegra_ehci_pre_reset(struct tegra_usb_phy *phy, bool is_dpd)
{
- if (!phy_is_ulpi(phy))
- utmi_phy_postresume(phy);
+ const tegra_phy_fp pre_reset[] = {
+ NULL,
+ NULL,
+ null_phy_pre_usbcmd_reset,
+ NULL,
+ };
+
+ if (pre_reset[phy->usb_phy_type])
+ pre_reset[phy->usb_phy_type](phy, is_dpd);
+}
+
+void tegra_ehci_post_reset(struct tegra_usb_phy *phy, bool is_dpd)
+{
+ const tegra_phy_fp post_reset[] = {
+ NULL,
+ NULL,
+ null_phy_post_usbcmd_reset,
+ NULL,
+ };
+
+ if (post_reset[phy->usb_phy_type])
+ post_reset[phy->usb_phy_type](phy, is_dpd);
}
void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed)
{
- if (!phy_is_ulpi(phy))
- utmi_phy_restore_start(phy, port_speed);
+ const tegra_phy_restore_start_fp phy_restore_start[] = {
+ utmi_phy_restore_start,
+ ulpi_phy_restore_start,
+ null_phy_restore_start,
+ NULL,
+ };
+
+ if (phy_restore_start[phy->usb_phy_type])
+ phy_restore_start[phy->usb_phy_type](phy, port_speed);
}
void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy)
{
- if (!phy_is_ulpi(phy))
- utmi_phy_restore_end(phy);
+ const tegra_phy_restore_end_fp phy_restore_end[] = {
+ utmi_phy_restore_end,
+ ulpi_phy_restore_end,
+ null_phy_restore_end,
+ NULL,
+ };
+
+ if (phy_restore_end[phy->usb_phy_type])
+ phy_restore_end[phy->usb_phy_type](phy);
}
void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy)
{
- if (!phy_is_ulpi(phy))
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP)
utmi_phy_clk_disable(phy);
}
void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy)
{
- if (!phy_is_ulpi(phy))
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP)
utmi_phy_clk_enable(phy);
}
void tegra_usb_phy_close(struct tegra_usb_phy *phy)
{
- if (phy_is_ulpi(phy))
- clk_put(phy->clk);
- else
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) {
utmip_pad_close(phy);
+ utmip_phy_disable_pmc_bus_ctrl(phy);
+ }
+ else if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_LINK_ULPI && phy->clk)
+ clk_put(phy->clk);
+ if (phy->mode == TEGRA_USB_PHY_MODE_HOST) {
+ vbus_disable(phy);
+ }
clk_disable(phy->pll_u);
clk_put(phy->pll_u);
+ if (phy->reg_vbus)
+ regulator_put(phy->reg_vbus);
+ if (phy->reg_vdd)
+ regulator_put(phy->reg_vdd);
+ if (phy->instance == 0 && usb_phy_data[0].vbus_irq)
+ free_irq(usb_phy_data[0].vbus_irq, phy);
kfree(phy);
}
+
+int tegra_usb_phy_bus_connect(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ struct tegra_uhsic_config *uhsic_config = phy->config;
+
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC) {
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Change the USB controller PHY type to HSIC */
+ val = readl(base + HOSTPC1_DEVLC);
+ val &= ~HOSTPC1_DEVLC_PTS(HOSTPC1_DEVLC_PTS_MASK);
+ val |= HOSTPC1_DEVLC_PTS(HOSTPC1_DEVLC_PTS_HSIC);
+ val &= ~HOSTPC1_DEVLC_PSPD(HOSTPC1_DEVLC_PSPD_MASK);
+ val |= HOSTPC1_DEVLC_PSPD(HOSTPC1_DEVLC_PSPD_HIGH_SPEED);
+ writel(val, base + HOSTPC1_DEVLC);
+#endif
+ val = readl(base + UHSIC_MISC_CFG0);
+ val |= UHSIC_DETECT_SHORT_CONNECT;
+ writel(val, base + UHSIC_MISC_CFG0);
+ udelay(1);
+
+ val = readl(base + UHSIC_MISC_CFG0);
+ val |= UHSIC_FORCE_XCVR_MODE;
+ writel(val, base + UHSIC_MISC_CFG0);
+
+ val = readl(base + UHSIC_PADS_CFG1);
+ val &= ~UHSIC_RPD_STROBE;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ val |= UHSIC_RPU_STROBE;
+#endif
+ writel(val, base + UHSIC_PADS_CFG1);
+
+ if (uhsic_config->usb_phy_ready &&
+ uhsic_config->usb_phy_ready())
+ return -EAGAIN;
+
+ if (utmi_wait_register(base + UHSIC_STAT_CFG0, UHSIC_CONNECT_DETECT, UHSIC_CONNECT_DETECT) < 0) {
+ pr_err("%s: timeout waiting for hsic connect detect\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if (utmi_wait_register(base + USB_PORTSC1, USB_PORTSC1_LS(2), USB_PORTSC1_LS(2)) < 0) {
+ pr_err("%s: timeout waiting for dplus state\n", __func__);
+ return -ETIMEDOUT;
+ }
+#endif
+ }
+
+ return 0;
+}
+
+int tegra_usb_phy_bus_reset(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ val = readl(base + USB_PORTSC1);
+ val |= USB_PORTSC1_PTC(5);
+ writel(val, base + USB_PORTSC1);
+ udelay(2);
+
+ val = readl(base + USB_PORTSC1);
+ val &= ~USB_PORTSC1_PTC(~0);
+ writel(val, base + USB_PORTSC1);
+ udelay(2);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if (utmi_wait_register(base + USB_PORTSC1, USB_PORTSC1_LS(0), 0) < 0) {
+ pr_err("%s: timeout waiting for SE0\n", __func__);
+ return -ETIMEDOUT;
+ }
+#endif
+ if (utmi_wait_register(base + USB_PORTSC1, USB_PORTSC1_CCS, USB_PORTSC1_CCS) < 0) {
+ pr_err("%s: timeout waiting for connection status\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if (utmi_wait_register(base + USB_PORTSC1, USB_PORTSC1_PSPD(2), USB_PORTSC1_PSPD(2)) < 0) {
+ pr_err("%s: timeout waiting hsic high speed configuration\n", __func__);
+ return -ETIMEDOUT;
+ }
+#endif
+ val = readl(base + USB_USBCMD);
+ val &= ~USB_USBCMD_RS;
+ writel(val, base + USB_USBCMD);
+
+ if (utmi_wait_register(base + USB_USBSTS, USB_USBSTS_HCH, USB_USBSTS_HCH) < 0) {
+ pr_err("%s: timeout waiting for stopping the controller\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ val = readl(base + UHSIC_PADS_CFG1);
+ val &= ~UHSIC_RPU_STROBE;
+ val |= UHSIC_RPD_STROBE;
+ writel(val, base + UHSIC_PADS_CFG1);
+
+ mdelay(50);
+
+ val = readl(base + UHSIC_PADS_CFG1);
+ val &= ~UHSIC_RPD_STROBE;
+ val |= UHSIC_RPU_STROBE;
+ writel(val, base + UHSIC_PADS_CFG1);
+
+ val = readl(base + USB_USBCMD);
+ val |= USB_USBCMD_RS;
+ writel(val, base + USB_USBCMD);
+
+ val = readl(base + UHSIC_PADS_CFG1);
+ val &= ~UHSIC_RPU_STROBE;
+ writel(val, base + UHSIC_PADS_CFG1);
+
+ if (utmi_wait_register(base + USB_USBCMD, USB_USBCMD_RS, USB_USBCMD_RS) < 0) {
+ pr_err("%s: timeout waiting for starting the controller\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+int tegra_usb_phy_bus_idle(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ struct tegra_uhsic_config *uhsic_config = phy->config;
+
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC) {
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Change the USB controller PHY type to HSIC */
+ val = readl(base + HOSTPC1_DEVLC);
+ val &= ~HOSTPC1_DEVLC_PTS(HOSTPC1_DEVLC_PTS_MASK);
+ val |= HOSTPC1_DEVLC_PTS(HOSTPC1_DEVLC_PTS_HSIC);
+ val &= ~HOSTPC1_DEVLC_PSPD(HOSTPC1_DEVLC_PSPD_MASK);
+ val |= HOSTPC1_DEVLC_PSPD(HOSTPC1_DEVLC_PSPD_HIGH_SPEED);
+ writel(val, base + HOSTPC1_DEVLC);
+#endif
+ val = readl(base + UHSIC_MISC_CFG0);
+ val |= UHSIC_DETECT_SHORT_CONNECT;
+ writel(val, base + UHSIC_MISC_CFG0);
+ udelay(1);
+
+ val = readl(base + UHSIC_MISC_CFG0);
+ val |= UHSIC_FORCE_XCVR_MODE;
+ writel(val, base + UHSIC_MISC_CFG0);
+
+ val = readl(base + UHSIC_PADS_CFG1);
+ val &= ~UHSIC_RPD_STROBE;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ val |= UHSIC_RPU_STROBE;
+#endif
+ writel(val, base + UHSIC_PADS_CFG1);
+
+ if (uhsic_config->usb_phy_ready &&
+ uhsic_config->usb_phy_ready())
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+bool tegra_usb_phy_is_device_connected(struct tegra_usb_phy *phy)
+{
+ void __iomem *base = phy->regs;
+
+ if (phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC) {
+ if (!((readl(base + UHSIC_STAT_CFG0) & UHSIC_CONNECT_DETECT) == UHSIC_CONNECT_DETECT)) {
+ pr_err("%s: hsic no device connection\n", __func__);
+ return false;
+ }
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if (utmi_wait_register(base + USB_PORTSC1, USB_PORTSC1_LS(2), USB_PORTSC1_LS(2)) < 0) {
+ pr_err("%s: timeout waiting for dplus state\n", __func__);
+ return false;
+ }
+#endif
+ }
+ return true;
+}
+
+bool tegra_usb_phy_charger_detect(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ bool status;
+
+ if (phy->usb_phy_type != TEGRA_USB_PHY_TYPE_UTMIP)
+ {
+ /* Charger detection is not there for ULPI
+ * return Charger not available */
+ return false;
+ }
+
+ /* Enable charger detection logic */
+ val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val |= UTMIP_OP_SRC_EN | UTMIP_ON_SINK_EN;
+ writel(val, base + UTMIP_BAT_CHRG_CFG0);
+
+ /* Source should be on for 100 ms as per USB charging spec */
+ msleep(TDP_SRC_ON_MS);
+
+ val = readl(base + USB_PHY_VBUS_WAKEUP_ID);
+ /* If charger is not connected disable the interrupt */
+ val &= ~VDAT_DET_INT_EN;
+ val |= VDAT_DET_CHG_DET;
+ writel(val,base + USB_PHY_VBUS_WAKEUP_ID);
+
+ val = readl(base + USB_PHY_VBUS_WAKEUP_ID);
+ if (val & VDAT_DET_STS)
+ status = true;
+ else
+ status = false;
+
+ /* Disable charger detection logic */
+ val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val &= ~(UTMIP_OP_SRC_EN | UTMIP_ON_SINK_EN);
+ writel(val, base + UTMIP_BAT_CHRG_CFG0);
+
+ /* Delay of 40 ms before we pull the D+ as per battery charger spec */
+ msleep(TDPSRC_CON_MS);
+
+ return status;
+}
+
+int __init tegra_usb_phy_init(struct usb_phy_plat_data *pdata, int size)
+{
+ if (pdata) {
+ int i;
+
+ for (i = 0; i < size; i++, pdata++) {
+ usb_phy_data[pdata->instance].instance = pdata->instance;
+ usb_phy_data[pdata->instance].vbus_irq = pdata->vbus_irq;
+ usb_phy_data[pdata->instance].vbus_gpio = pdata->vbus_gpio;
+ usb_phy_data[pdata->instance].vbus_reg_supply = pdata->vbus_reg_supply;
+ }
+ }
+
+ return 0;
+}
+
+/* disable walk and wake events after resume from LP0 */
+bool tegra_usb_phy_is_remotewake_detected(struct tegra_usb_phy *phy)
+{
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ void __iomem *pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+ void __iomem *base = phy->regs;
+ unsigned int inst = phy->instance;
+ u32 val;
+
+ val = readl(base + UTMIP_PMC_WAKEUP0);
+ if (val & EVENT_INT_ENB) {
+ val = readl(pmc_base + UTMIP_UHSIC_STATUS);
+ if (UTMIP_WAKE_ALARM(inst) & val) {
+ val = readl(pmc_base + PMC_SLEEP_CFG);
+ val &= ~UTMIP_WAKE_VAL(inst, 0x0);
+ val |= UTMIP_WAKE_VAL(inst, WAKE_VAL_NONE);
+ writel(val, pmc_base + PMC_SLEEP_CFG);
+
+ val = readl(pmc_base + PMC_TRIGGERS);
+ val |= UTMIP_CLR_WAKE_ALARM(inst) |
+ UTMIP_CLR_WALK_PTR(inst);
+ writel(val, pmc_base + PMC_TRIGGERS);
+
+ val = readl(base + UTMIP_PMC_WAKEUP0);
+ val &= ~EVENT_INT_ENB;
+ writel(val, base + UTMIP_PMC_WAKEUP0);
+ phy->remote_wakeup = true;
+ return true;
+ }
+ }
+#endif
+ return false;
+}
+
diff --git a/arch/arm/mach-tegra/wakeups-t2.c b/arch/arm/mach-tegra/wakeups-t2.c
new file mode 100644
index 000000000000..7c5d12ac60d4
--- /dev/null
+++ b/arch/arm/mach-tegra/wakeups-t2.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+
+#include "gpio-names.h"
+
+static int tegra_wake_event_irq[] = {
+ [0] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PO5),
+ [1] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV3),
+ [2] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PL1),
+ [3] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PB6),
+ [4] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PN7),
+ [5] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PA0),
+ [6] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU5),
+ [7] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ [8] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PC7),
+ [9] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS2),
+ [10] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PAA1),
+ [11] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PW3),
+ [12] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PW2),
+ [13] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PY6),
+ [14] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV6),
+ [15] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PJ7),
+ [16] = INT_RTC,
+ [17] = INT_KBC,
+ [18] = INT_EXTERNAL_PMU,
+ [19] = -EINVAL, /* TEGRA_USB1_VBUS, */
+ [20] = -EINVAL, /* TEGRA_USB3_VBUS, */
+ [21] = -EINVAL, /* TEGRA_USB1_ID, */
+ [22] = -EINVAL, /* TEGRA_USB3_ID, */
+ [23] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PI5),
+ [24] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV2),
+ [25] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS4),
+ [26] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS5),
+ [27] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS0),
+ [28] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PQ6),
+ [29] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PQ7),
+ [30] = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PN2),
+};
+
+int tegra_irq_to_wake(int irq)
+{
+ int i;
+ int wake_irq;
+ int search_gpio;
+ static int last_wake = -1;
+
+ /* Two level wake irq search for gpio based wakeups -
+ * 1. check for GPIO irq(based on tegra_wake_event_irq table)
+ * e.g. for a board, wake7 based on GPIO PU6 and irq==358 done first
+ * 2. check for gpio bank irq assuming search for GPIO irq
+ * preceded this search.
+ * e.g. in this step check for gpio bank irq GPIO6 irq==119
+ */
+ for (i = 0; i < ARRAY_SIZE(tegra_wake_event_irq); i++) {
+ /* return if step 1 matches */
+ if (tegra_wake_event_irq[i] == irq) {
+ pr_info("Wake%d for irq=%d\n", i, irq);
+ last_wake = i;
+ return i;
+ }
+
+ /* step 2 below uses saved last_wake from step 1
+ * in previous call */
+ search_gpio = irq_to_gpio(
+ tegra_wake_event_irq[i]);
+ if (search_gpio < 0)
+ continue;
+ wake_irq = tegra_gpio_get_bank_int_nr(search_gpio);
+ if (wake_irq < 0)
+ continue;
+ if ((last_wake == i) &&
+ (wake_irq == irq)) {
+ pr_info("gpio bank wake found: wake%d for irq=%d\n",
+ i, irq);
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int tegra_wake_to_irq(int wake)
+{
+ if (wake < 0)
+ return -EINVAL;
+
+ if (wake >= ARRAY_SIZE(tegra_wake_event_irq))
+ return -EINVAL;
+
+ return tegra_wake_event_irq[wake];
+}
diff --git a/arch/arm/mach-tegra/wakeups-t2.h b/arch/arm/mach-tegra/wakeups-t2.h
new file mode 100644
index 000000000000..eb193c0aaf9e
--- /dev/null
+++ b/arch/arm/mach-tegra/wakeups-t2.h
@@ -0,0 +1,65 @@
+/*
+ * arch/arm/mach-tegra/wakeups-t2.h
+ *
+ * Declarations of Tegra 2 LP0 wakeup sources
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_WAKEUPS_T2_H
+#define __MACH_TEGRA_WAKEUPS_T2_H
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+#error "Tegra 2 wakeup sources valid only for CONFIG_ARCH_TEGRA_2x_SOC"
+#endif
+
+int tegra_irq_to_wake(int irq);
+int tegra_wake_to_irq(int wake);
+
+#define TEGRA_WAKE_GPIO_PO5 (1 << 0)
+#define TEGRA_WAKE_GPIO_PV3 (1 << 1)
+#define TEGRA_WAKE_GPIO_PL1 (1 << 2)
+#define TEGRA_WAKE_GPIO_PB6 (1 << 3)
+#define TEGRA_WAKE_GPIO_PN7 (1 << 4)
+#define TEGRA_WAKE_GPIO_PA0 (1 << 5)
+#define TEGRA_WAKE_GPIO_PU5 (1 << 6)
+#define TEGRA_WAKE_GPIO_PU6 (1 << 7)
+#define TEGRA_WAKE_GPIO_PC7 (1 << 8)
+#define TEGRA_WAKE_GPIO_PS2 (1 << 9)
+#define TEGRA_WAKE_GPIO_PAA1 (1 << 10)
+#define TEGRA_WAKE_GPIO_PW3 (1 << 11)
+#define TEGRA_WAKE_GPIO_PW2 (1 << 12)
+#define TEGRA_WAKE_GPIO_PY6 (1 << 13)
+#define TEGRA_WAKE_GPIO_PV6 (1 << 14)
+#define TEGRA_WAKE_GPIO_PJ7 (1 << 15)
+#define TEGRA_WAKE_RTC_ALARM (1 << 16)
+#define TEGRA_WAKE_KBC_EVENT (1 << 17)
+#define TEGRA_WAKE_PWR_INT (1 << 18)
+#define TEGRA_WAKE_USB1_VBUS (1 << 19)
+#define TEGRA_WAKE_USB3_VBUS (1 << 20)
+#define TEGRA_WAKE_USB1_ID (1 << 21)
+#define TEGRA_WAKE_USB3_ID (1 << 22)
+#define TEGRA_WAKE_GPIO_PI5 (1 << 23)
+#define TEGRA_WAKE_GPIO_PV2 (1 << 24)
+#define TEGRA_WAKE_GPIO_PS4 (1 << 25)
+#define TEGRA_WAKE_GPIO_PS5 (1 << 26)
+#define TEGRA_WAKE_GPIO_PS0 (1 << 27)
+#define TEGRA_WAKE_GPIO_PQ6 (1 << 28)
+#define TEGRA_WAKE_GPIO_PQ7 (1 << 29)
+#define TEGRA_WAKE_GPIO_PN2 (1 << 30)
+
+#endif
diff --git a/arch/arm/mach-tegra/wakeups-t3.c b/arch/arm/mach-tegra/wakeups-t3.c
new file mode 100644
index 000000000000..823736204362
--- /dev/null
+++ b/arch/arm/mach-tegra/wakeups-t3.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+
+#include "gpio-names.h"
+
+static int tegra_wake_event_irq[] = {
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PO5), /* wake0 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV1), /* wake1 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PL1), /* wake2 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PB6), /* wake3 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PN7), /* wake4 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PBB6), /* wake5 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU5), /* wake6 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6), /* wake7 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PC7), /* wake8 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS2), /* wake9 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PAA1), /* wake10 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PW3), /* wake11 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PW2), /* wake12 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PY6), /* wake13 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PDD3), /* wake14 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PJ2), /* wake15 */
+ INT_RTC, /* wake16 */
+ INT_KBC, /* wake17 */
+ INT_EXTERNAL_PMU, /* wake18 */
+ -EINVAL, /* TEGRA_USB1_VBUS, */ /* wake19 */
+ -EINVAL, /* TEGRA_USB2_VBUS, */ /* wake20 */
+ -EINVAL, /* TEGRA_USB1_ID, */ /* wake21 */
+ -EINVAL, /* TEGRA_USB2_ID, */ /* wake22 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PI5), /* wake23 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV0), /* wake24 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS4), /* wake25 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS5), /* wake26 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS0), /* wake27 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS6), /* wake28 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS7), /* wake29 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PN2), /* wake30 */
+ -EINVAL, /* not used */ /* wake31 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PO4), /* wake32 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PJ0), /* wake33 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PK2), /* wake34 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PI6), /* wake35 */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PBB1), /* wake36 */
+ -EINVAL, /* TEGRA_USB3_VBUS, */ /* wake37 */
+ -EINVAL, /* TEGRA_USB3_ID, */ /* wake38 */
+ INT_USB, /* TEGRA_USB1_UTMIP, */ /* wake39 */
+ INT_USB2, /* TEGRA_USB2_UTMIP, */ /* wake40 */
+ INT_USB3, /* TEGRA_USB3_UTMIP, */ /* wake41 */
+};
+
+int tegra_irq_to_wake(int irq)
+{
+ int i;
+ int wake_irq;
+ int search_gpio;
+ static int last_wake = -1;
+
+ /* Two level wake irq search for gpio based wakeups -
+ * 1. check for GPIO irq(based on tegra_wake_event_irq table)
+ * e.g. for a board, wake7 based on GPIO PU6 and irq==390 done first
+ * 2. check for gpio bank irq assuming search for GPIO irq
+ * preceded this search.
+ * e.g. in this step check for gpio bank irq GPIO6 irq==119
+ */
+ for (i = 0; i < ARRAY_SIZE(tegra_wake_event_irq); i++) {
+ /* return if step 1 matches */
+ if (tegra_wake_event_irq[i] == irq) {
+ pr_info("Wake%d for irq=%d\n", i, irq);
+ last_wake = i;
+ return i;
+ }
+
+ /* step 2 below uses saved last_wake from step 1
+ * in previous call */
+ search_gpio = irq_to_gpio(
+ tegra_wake_event_irq[i]);
+ if (search_gpio < 0)
+ continue;
+ wake_irq = tegra_gpio_get_bank_int_nr(search_gpio);
+ if (wake_irq < 0)
+ continue;
+ if ((last_wake == i) &&
+ (wake_irq == irq)) {
+ pr_info("gpio bank wake found: wake%d for irq=%d\n",
+ i, irq);
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int tegra_wake_to_irq(int wake)
+{
+ if (wake < 0)
+ return -EINVAL;
+
+ if (wake >= ARRAY_SIZE(tegra_wake_event_irq))
+ return -EINVAL;
+
+ return tegra_wake_event_irq[wake];
+}
diff --git a/arch/arm/mach-tegra/wakeups-t3.h b/arch/arm/mach-tegra/wakeups-t3.h
new file mode 100644
index 000000000000..f811d8939387
--- /dev/null
+++ b/arch/arm/mach-tegra/wakeups-t3.h
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/mach-tegra/wakeups-t3.h
+ *
+ * Declarations of Tegra 3 LP0 wakeup sources
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_WAKEUPS_T3_H
+#define __MACH_TEGRA_WAKEUPS_T3_H
+
+#ifndef CONFIG_ARCH_TEGRA_3x_SOC
+#error "Tegra 3 wakeup sources valid only for CONFIG_ARCH_TEGRA_3x_SOC"
+#endif
+
+#define TEGRA_WAKE_GPIO_PO5 (1ull << 0)
+#define TEGRA_WAKE_GPIO_PV1 (1ull << 1)
+#define TEGRA_WAKE_GPIO_PL1 (1ull << 2)
+#define TEGRA_WAKE_GPIO_PB6 (1ull << 3)
+#define TEGRA_WAKE_GPIO_PN7 (1ull << 4)
+#define TEGRA_WAKE_GPIO_PBB6 (1ull << 5)
+#define TEGRA_WAKE_GPIO_PU5 (1ull << 6)
+#define TEGRA_WAKE_GPIO_PU6 (1ull << 7)
+#define TEGRA_WAKE_GPIO_PC7 (1ull << 8)
+#define TEGRA_WAKE_GPIO_PS2 (1ull << 9)
+#define TEGRA_WAKE_GPIO_PAA1 (1ull << 10)
+#define TEGRA_WAKE_GPIO_PW3 (1ull << 11)
+#define TEGRA_WAKE_GPIO_PW2 (1ull << 12)
+#define TEGRA_WAKE_GPIO_PY6 (1ull << 13)
+#define TEGRA_WAKE_GPIO_PDD3 (1ull << 14)
+#define TEGRA_WAKE_GPIO_PJ2 (1ull << 15)
+#define TEGRA_WAKE_RTC_ALARM (1ull << 16)
+#define TEGRA_WAKE_KBC_EVENT (1ull << 17)
+#define TEGRA_WAKE_PWR_INT (1ull << 18)
+#define TEGRA_WAKE_USB1_VBUS (1ull << 19)
+#define TEGRA_WAKE_USB2_VBUS (1ull << 20)
+#define TEGRA_WAKE_USB1_ID (1ull << 21)
+#define TEGRA_WAKE_USB2_ID (1ull << 22)
+#define TEGRA_WAKE_GPIO_PI5 (1ull << 23)
+#define TEGRA_WAKE_GPIO_PV0 (1ull << 24)
+#define TEGRA_WAKE_GPIO_PS4 (1ull << 25)
+#define TEGRA_WAKE_GPIO_PS5 (1ull << 26)
+#define TEGRA_WAKE_GPIO_PS0 (1ull << 27)
+#define TEGRA_WAKE_GPIO_PS6 (1ull << 28)
+#define TEGRA_WAKE_GPIO_PS7 (1ull << 29)
+#define TEGRA_WAKE_GPIO_PN2 (1ull << 30)
+/* bit 31 is unused */
+
+#define TEGRA_WAKE_GPIO_PO4 (1ull << 32)
+#define TEGRA_WAKE_GPIO_PJ0 (1ull << 33)
+#define TEGRA_WAKE_GPIO_PK2 (1ull << 34)
+#define TEGRA_WAKE_GPIO_PI6 (1ull << 35)
+#define TEGRA_WAKE_GPIO_PBB1 (1ull << 36)
+#define TEGRA_WAKE_USB3_ID (1ull << 37)
+#define TEGRA_WAKE_USB3_VBUS (1ull << 38)
+
+#endif
diff --git a/arch/arm/mach-tegra/wdt-recovery.c b/arch/arm/mach-tegra/wdt-recovery.c
new file mode 100644
index 000000000000..537b1c0db853
--- /dev/null
+++ b/arch/arm/mach-tegra/wdt-recovery.c
@@ -0,0 +1,131 @@
+/*
+ * arch/arm/mach-tegra/wdt-recovery.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <linux/resource.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/time.h>
+#include <asm/localtimer.h>
+
+#include <mach/nvmap.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/clk.h>
+#include <mach/io.h>
+
+static int wdt_heartbeat = 30;
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC)
+#define TIMER_PTV 0
+ #define TIMER_EN (1 << 31)
+ #define TIMER_PERIODIC (1 << 30)
+#define TIMER_PCR 0x4
+ #define TIMER_PCR_INTR (1 << 30)
+#define WDT_CFG (0)
+ #define WDT_CFG_TMR_SRC (7 << 0) /* for TMR7. */
+ #define WDT_CFG_PERIOD (1 << 4)
+ #define WDT_CFG_INT_EN (1 << 12)
+ #define WDT_CFG_SYS_RST_EN (1 << 14)
+ #define WDT_CFG_PMC2CAR_RST_EN (1 << 15)
+#define WDT_CMD (8)
+ #define WDT_CMD_START_COUNTER (1 << 0)
+ #define WDT_CMD_DISABLE_COUNTER (1 << 1)
+#define WDT_UNLOCK (0xC)
+ #define WDT_UNLOCK_PATTERN (0xC45A << 0)
+
+static void __iomem *wdt_timer = IO_ADDRESS(TEGRA_TMR7_BASE);
+static void __iomem *wdt_source = IO_ADDRESS(TEGRA_WDT0_BASE);
+
+static void tegra_wdt_reset_enable(void)
+{
+ u32 val;
+
+ writel(TIMER_PCR_INTR, wdt_timer + TIMER_PCR);
+ val = (wdt_heartbeat * 1000000ul) / 4;
+ val |= (TIMER_EN | TIMER_PERIODIC);
+ writel(val, wdt_timer + TIMER_PTV);
+
+ val = WDT_CFG_TMR_SRC | WDT_CFG_PERIOD | /*WDT_CFG_INT_EN |*/
+ /*WDT_CFG_SYS_RST_EN |*/ WDT_CFG_PMC2CAR_RST_EN;
+ writel(val, wdt_source + WDT_CFG);
+ writel(WDT_CMD_START_COUNTER, wdt_source + WDT_CMD);
+ pr_info("%s: WDT Recovery Enabled\n", __func__);
+}
+
+static int tegra_wdt_reset_disable(void)
+{
+ writel(TIMER_PCR_INTR, wdt_timer + TIMER_PCR);
+ writel(WDT_UNLOCK_PATTERN, wdt_source + WDT_UNLOCK);
+ writel(WDT_CMD_DISABLE_COUNTER, wdt_source + WDT_CMD);
+
+ writel(0, wdt_timer + TIMER_PTV);
+ pr_info("%s: WDT Recovery Disabled\n", __func__);
+
+ return 0;
+}
+#elif defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
+static void tegra_wdt_reset_enable(void)
+{
+}
+static int tegra_wdt_reset_disable(void)
+{
+ return 0;
+}
+#endif
+
+static int tegra_pm_notify(struct notifier_block *nb,
+ unsigned long event, void *nouse)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ tegra_wdt_reset_enable();
+ break;
+ case PM_POST_SUSPEND:
+ tegra_wdt_reset_disable();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tegra_wdt_notify = {
+ .notifier_call = tegra_pm_notify,
+};
+
+static struct syscore_ops tegra_wdt_syscore_ops = {
+ .suspend = tegra_wdt_reset_disable,
+ .resume = tegra_wdt_reset_enable,
+};
+
+void __init tegra_wdt_recovery_init(void)
+{
+#ifdef CONFIG_PM
+ /* Register PM notifier. */
+ register_pm_notifier(&tegra_wdt_notify);
+#endif
+ register_syscore_ops(&tegra_wdt_syscore_ops);
+}
diff --git a/arch/arm/mach-tegra/wdt-recovery.h b/arch/arm/mach-tegra/wdt-recovery.h
new file mode 100644
index 000000000000..e26c1d038574
--- /dev/null
+++ b/arch/arm/mach-tegra/wdt-recovery.h
@@ -0,0 +1,17 @@
+/*
+ * arch/arm/mach-tegra/wdt-recovery.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+void __init tegra_wdt_recovery_init(void);
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index bca7e61928c7..47e2e3ba1902 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
- mmap.o pgd.o mmu.o vmregion.o
+ mmap.o pgd.o mmu.o vmregion.o pageattr.o
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9ecfdb511951..0dddb54ea986 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -29,6 +29,16 @@ static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static uint32_t l2x0_size;
+static u32 l2x0_cache_id;
+static unsigned int l2x0_sets;
+static unsigned int l2x0_ways;
+
+static inline bool is_pl310_rev(int rev)
+{
+ return (l2x0_cache_id &
+ (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
+ (L2X0_CACHE_ID_PART_L310 | rev);
+}
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
@@ -120,6 +130,23 @@ static void l2x0_cache_sync(void)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
+#ifdef CONFIG_PL310_ERRATA_727915
+static void l2x0_for_each_set_way(void __iomem *reg)
+{
+ int set;
+ int way;
+ unsigned long flags;
+
+ for (way = 0; way < l2x0_ways; way++) {
+ spin_lock_irqsave(&l2x0_lock, flags);
+ for (set = 0; set < l2x0_sets; set++)
+ writel_relaxed((way << 28) | (set << 5), reg);
+ cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+}
+#endif
+
static void __l2x0_flush_all(void)
{
debug_writel(0x03);
@@ -133,6 +160,13 @@ static void l2x0_flush_all(void)
{
unsigned long flags;
+#ifdef CONFIG_PL310_ERRATA_727915
+ if (is_pl310_rev(REV_PL310_R2P0)) {
+ l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
+ return;
+ }
+#endif
+
/* clean all ways */
spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
@@ -143,11 +177,20 @@ static void l2x0_clean_all(void)
{
unsigned long flags;
+#ifdef CONFIG_PL310_ERRATA_727915
+ if (is_pl310_rev(REV_PL310_R2P0)) {
+ l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
+ return;
+ }
+#endif
+
/* clean all ways */
spin_lock_irqsave(&l2x0_lock, flags);
+ debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
cache_sync();
+ debug_writel(0x00);
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -266,6 +309,16 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
+/* enables l2x0 after l2x0_disable, does not invalidate */
+void l2x0_enable(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(1, l2x0_base + L2X0_CTRL);
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
static void l2x0_disable(void)
{
unsigned long flags;
@@ -296,50 +349,49 @@ static void __init l2x0_unlock(__u32 cache_id)
}
}
-void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+void l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
- __u32 cache_id;
__u32 way_size = 0;
- int ways;
const char *type;
l2x0_base = base;
- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+ l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
- switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
- ways = 16;
+ l2x0_ways = 16;
else
- ways = 8;
+ l2x0_ways = 8;
type = "L310";
break;
case L2X0_CACHE_ID_PART_L210:
- ways = (aux >> 13) & 0xf;
+ l2x0_ways = (aux >> 13) & 0xf;
type = "L210";
break;
default:
/* Assume unknown chips have 8 ways */
- ways = 8;
+ l2x0_ways = 8;
type = "L2x0 series";
break;
}
- l2x0_way_mask = (1 << ways) - 1;
+ l2x0_way_mask = (1 << l2x0_ways) - 1;
/*
* L2 cache Size = Way size * Number of ways
*/
way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
- way_size = 1 << (way_size + 3);
- l2x0_size = ways * way_size * SZ_1K;
+ way_size = SZ_1K << (way_size + 3);
+ l2x0_size = l2x0_ways * way_size;
+ l2x0_sets = way_size / CACHE_LINE_SIZE;
/*
* Check if l2x0 controller is already enabled.
@@ -348,7 +400,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
/* Make sure that I&D is not locked down when starting */
- l2x0_unlock(cache_id);
+ l2x0_unlock(l2x0_cache_id);
/* l2x0 controller is disabled */
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -368,7 +420,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.disable = l2x0_disable;
outer_cache.set_debug = l2x0_set_debug;
- printk(KERN_INFO "%s cache controller enabled\n", type);
- printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
- ways, cache_id, aux, l2x0_size);
+ pr_info_once("%s cache controller enabled\n", type);
+ pr_info_once("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
+ l2x0_ways, l2x0_cache_id, aux, l2x0_size);
}
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 74c2e5a33a4d..2edb6f67f69d 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -272,6 +272,11 @@ v6_dma_clean_range:
* - end - virtual end address of region
*/
ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+ sub r2, r1, r0
+ cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+ bhi v6_dma_flush_dcache_all
+#endif
#ifdef CONFIG_DMA_CACHE_RWFO
ldrb r2, [r0] @ read for ownership
strb r2, [r0] @ write for ownership
@@ -294,6 +299,18 @@ ENTRY(v6_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+ mov r0, #0
+#ifdef HARVARD_CACHE
+ mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
+#else
+ mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
+#endif
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mov pc, lr
+#endif
+
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 07c4bc8ea0a4..963325eb083e 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -33,27 +33,28 @@ ENTRY(v7_flush_icache_all)
ENDPROC(v7_flush_icache_all)
/*
- * v7_flush_dcache_all()
+ * v7_op_dcache_all op
*
- * Flush the whole D-cache.
+ * op=c14, Flush the whole D-cache.
+ * op=c10, Clean the whole D-cache.
*
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v7_flush_dcache_all)
+.macro v7_op_dcache_all op @ op=c10 clean, op=c14 flush
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
- beq finished @ if loc is 0, then no need to clean
+ beq 1005f @ if loc is 0, then no need to clean
mov r10, #0 @ start clean at cache level 0
-loop1:
+1001:
add r2, r10, r10, lsr #1 @ work out 3x current cache level
mov r1, r0, lsr r2 @ extract cache type bits from clidr
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
- blt skip @ skip if no cache, or just i-cache
+ blt 1004f @ skip if no cache, or just i-cache
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
@@ -64,32 +65,40 @@ loop1:
clz r5, r4 @ find bit position of way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 @ extract max number of the index size
-loop2:
+1002:
mov r9, r4 @ create working copy of max way size
-loop3:
+1003:
ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
THUMB( lsl r6, r9, r5 )
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
THUMB( lsl r6, r7, r2 )
THUMB( orr r11, r11, r6 ) @ factor index number into r11
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+ mcr p15, 0, r11, c7, \op, 2 @ op=c10/c14, clean/flush by set/way
subs r9, r9, #1 @ decrement the way
- bge loop3
+ bge 1003b
subs r7, r7, #1 @ decrement the index
- bge loop2
-skip:
+ bge 1002b
+1004:
add r10, r10, #2 @ increment cache number
cmp r3, r10
- bgt loop1
-finished:
+ bgt 1001b
+1005:
mov r10, #0 @ swith back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
dsb
isb
mov pc, lr
+.endm
+
+ENTRY(v7_flush_dcache_all)
+ v7_op_dcache_all c14
ENDPROC(v7_flush_dcache_all)
+ENTRY(v7_clean_dcache_all)
+ v7_op_dcache_all c10
+ENDPROC(v7_clean_dcache_all)
+
/*
* v7_flush_cache_all()
*
@@ -114,6 +123,24 @@ ENTRY(v7_flush_kern_cache_all)
ENDPROC(v7_flush_kern_cache_all)
/*
+ * v7_clean_kern_cache_all()
+ */
+ENTRY(v7_clean_kern_cache_all)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ bl v7_clean_dcache_all
+ mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
+#else
+ mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
+#endif
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(v7_clean_kern_cache_all)
+
+/*
* v7_flush_cache_all()
*
* Flush all TLB entries in a particular address space
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c3ff82f92d9c..9cd5334019e4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -310,6 +310,13 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
struct page *page;
void *addr;
+ /* Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on ARM as it is not supported on this
+ * platform--see CONFIG_HUGETLB_PAGE. */
+ gfp &= ~(__GFP_COMP);
+
*handle = ~0;
size = PAGE_ALIGN(size);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677b92c8..4fa9c246ae93 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -554,6 +554,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
const struct mem_type *type)
{
pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long pages_2m = 0, pages_4k = 0;
+ unsigned long stash_phys = phys;
/*
* Try a section mapping - end, addr and phys must all be aligned
@@ -564,6 +566,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
if (((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
+ pages_2m = (end - addr) >> (PGDIR_SHIFT);
+
if (addr & SECTION_SIZE)
pmd++;
@@ -574,12 +578,18 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
flush_pmd_entry(p);
} else {
+ pages_4k = (end - addr) >> PAGE_SHIFT;
/*
* No need to loop; pte's aren't interested in the
* individual L1 entries.
*/
alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
}
+
+ if ((stash_phys >= PHYS_OFFSET) && (stash_phys < lowmem_limit)) {
+ update_page_count(PG_LEVEL_2M, pages_2m);
+ update_page_count(PG_LEVEL_4K, pages_4k);
+ }
}
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
@@ -757,7 +767,7 @@ static int __init early_vmalloc(char *arg)
}
early_param("vmalloc", early_vmalloc);
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t lowmem_limit;
void __init sanity_check_meminfo(void)
{
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
new file mode 100644
index 000000000000..098b957454f6
--- /dev/null
+++ b/arch/arm/mm/pageattr.c
@@ -0,0 +1,998 @@
+/*
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ * Thanks to Ben LaHaise for precious feedback.
+ */
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/pfn.h>
+#include <linux/percpu.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <linux/mutex.h>
+
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+#ifdef CPA_DEBUG
+#define cpa_debug(x, ...) printk(x, __VA_ARGS__)
+#else
+#define cpa_debug(x, ...)
+#endif
+
+/*
+ * The current flushing context - we pass it instead of 5 arguments:
+ */
+struct cpa_data {
+ unsigned long *vaddr;
+ pgprot_t mask_set;
+ pgprot_t mask_clr;
+ int numpages;
+ int flags;
+ unsigned long pfn;
+ unsigned force_split:1;
+ int curpage;
+ struct page **pages;
+};
+
+/*
+ * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
+ * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
+ * entries change the page attribute in parallel to some other cpu
+ * splitting a large page entry along with changing the attribute.
+ */
+static DEFINE_MUTEX(cpa_lock);
+
+#define CPA_FLUSHTLB 1
+#define CPA_ARRAY 2
+#define CPA_PAGES_ARRAY 4
+
+#ifdef CONFIG_PROC_FS
+static unsigned long direct_pages_count[PG_LEVEL_NUM];
+
+void update_page_count(int level, unsigned long pages)
+{
+ unsigned long flags;
+
+ /* Protect against CPA */
+ spin_lock_irqsave(&pgd_lock, flags);
+ direct_pages_count[level] += pages;
+ spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+static void split_page_count(int level)
+{
+ direct_pages_count[level]--;
+ direct_pages_count[level - 1] += PTRS_PER_PTE;
+}
+
+void arch_report_meminfo(struct seq_file *m)
+{
+ seq_printf(m, "DirectMap4k: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_4K] << 2);
+ seq_printf(m, "DirectMap2M: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_2M] << 11);
+}
+#else
+static inline void split_page_count(int level) { }
+#endif
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+# define debug_pagealloc 1
+#else
+# define debug_pagealloc 0
+#endif
+
+static inline int
+within(unsigned long addr, unsigned long start, unsigned long end)
+{
+ return addr >= start && addr < end;
+}
+
+static void cpa_flush_range(unsigned long start, int numpages, int cache)
+{
+ unsigned int i, level;
+ unsigned long addr;
+
+ BUG_ON(irqs_disabled());
+ WARN_ON(PAGE_ALIGN(start) != start);
+
+ flush_tlb_kernel_range(start, start + (numpages << PAGE_SHIFT));
+
+ if (!cache)
+ return;
+
+ for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
+ pte_t *pte = lookup_address(addr, &level);
+
+ /*
+ * Only flush present addresses:
+ */
+ if (pte && pte_present(*pte)) {
+ __cpuc_flush_dcache_area((void *) addr, PAGE_SIZE);
+ outer_flush_range(__pa((void *)addr),
+ __pa((void *)addr) + PAGE_SIZE);
+ }
+ }
+}
+
+static void cpa_flush_array(unsigned long *start, int numpages, int cache,
+ int in_flags, struct page **pages)
+{
+ unsigned int i, level;
+
+ BUG_ON(irqs_disabled());
+
+ for (i = 0; i < numpages; i++) {
+ unsigned long addr;
+ pte_t *pte;
+
+ if (in_flags & CPA_PAGES_ARRAY)
+ addr = (unsigned long)page_address(pages[i]);
+ else
+ addr = start[i];
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ if (cache) {
+
+ pte = lookup_address(addr, &level);
+
+ /*
+ * Only flush present addresses:
+ */
+ if (pte && pte_present(*pte)) {
+ __cpuc_flush_dcache_area((void *)addr,
+ PAGE_SIZE);
+ outer_flush_range(__pa((void *)addr),
+ __pa((void *)addr) + PAGE_SIZE);
+ }
+ }
+ }
+}
+
+/*
+ * Certain areas of memory require very specific protection flags,
+ * for example the kernel text. Callers don't always get this
+ * right so this function checks and fixes these known static
+ * required protection bits.
+ */
+static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+ unsigned long pfn)
+{
+ pgprot_t forbidden = __pgprot(0);
+
+ /*
+ * The kernel text needs to be executable for obvious reasons
+ * Does not cover __inittext since that is gone later on.
+ */
+ if (within(address, (unsigned long)_text, (unsigned long)_etext))
+ pgprot_val(forbidden) |= L_PTE_XN;
+
+ /*
+ * The .rodata section needs to be read-only. Using the pfn
+ * catches all aliases.
+ */
+ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
+ prot |= L_PTE_RDONLY;
+
+ /*
+ * Mask off the forbidden bits and set the bits that are needed
+ */
+ prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+
+
+ return prot;
+}
+
+static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte,
+ unsigned long ext_prot)
+{
+ pgprot_t ref_prot;
+
+ ref_prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
+
+ if (pte & L_PTE_MT_BUFFERABLE)
+ ref_prot |= PMD_SECT_BUFFERABLE;
+
+ if (pte & L_PTE_MT_WRITETHROUGH)
+ ref_prot |= PMD_SECT_CACHEABLE;
+
+ if (pte & L_PTE_SHARED)
+ ref_prot |= PMD_SECT_S;
+
+ if (pte & L_PTE_XN)
+ ref_prot |= PMD_SECT_XN;
+
+ if (pte & L_PTE_RDONLY)
+ ref_prot &= ~PMD_SECT_AP_WRITE;
+
+ ref_prot |= (ext_prot & (PTE_EXT_AP0 | PTE_EXT_AP1 | PTE_EXT_APX |
+ PTE_EXT_NG | (7 << 6))) << 6;
+
+ return ref_prot;
+}
+
+static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd,
+ unsigned long *ext_prot)
+{
+ pgprot_t ref_prot = 0;
+
+ ref_prot |= L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY;
+
+ if (pmd & PMD_SECT_BUFFERABLE)
+ ref_prot |= L_PTE_MT_BUFFERABLE;
+
+ if (pmd & PMD_SECT_CACHEABLE)
+ ref_prot |= L_PTE_MT_WRITETHROUGH;
+
+ if (pmd & PMD_SECT_S)
+ ref_prot |= L_PTE_SHARED;
+
+ if (pmd & PMD_SECT_XN)
+ ref_prot |= L_PTE_XN;
+
+ if (pmd & PMD_SECT_AP_WRITE)
+ ref_prot &= ~L_PTE_RDONLY;
+
+ /* AP/APX/TEX bits */
+ *ext_prot = (pmd & (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
+ PMD_SECT_APX | PMD_SECT_nG | (7 << 12))) >> 6;
+
+ return ref_prot;
+}
+
+/*
+ * Lookup the page table entry for a virtual address. Return a pointer
+ * to the entry and the level of the mapping.
+ *
+ * Note: We return pud and pmd either when the entry is marked large
+ * or when the present bit is not set. Otherwise we would return a
+ * pointer to a nonexisting mapping.
+ */
+pte_t *lookup_address(unsigned long address, unsigned int *level)
+{
+ pgd_t *pgd = pgd_offset_k(address);
+ pte_t *pte;
+ pmd_t *pmd;
+
+ /* pmds are folded into pgds on ARM */
+ *level = PG_LEVEL_NONE;
+
+ if (pgd == NULL || pgd_none(*pgd))
+ return NULL;
+
+ pmd = pmd_offset(pgd, address);
+
+ if (pmd == NULL || pmd_none(*pmd) || !pmd_present(*pmd))
+ return NULL;
+
+ if (((pmd_val(*pmd) & (PMD_TYPE_SECT | PMD_SECT_SUPER))
+ == (PMD_TYPE_SECT | PMD_SECT_SUPER)) || !pmd_present(*pmd)) {
+
+ return NULL;
+ } else if (pmd_val(*pmd) & PMD_TYPE_SECT) {
+
+ *level = PG_LEVEL_2M;
+ return (pte_t *)pmd;
+ }
+
+ pte = pte_offset_kernel(pmd, address);
+
+ if ((pte == NULL) || pte_none(*pte))
+ return NULL;
+
+ *level = PG_LEVEL_4K;
+
+ return pte;
+}
+EXPORT_SYMBOL_GPL(lookup_address);
+
+/*
+ * Set the new pmd in all the pgds we know about:
+ */
+static void __set_pmd_pte(pmd_t *pmd, unsigned long address, pte_t *pte)
+{
+ struct page *page;
+
+ cpa_debug("__set_pmd_pte %x %x %x\n", pmd, pte, *pte);
+
+ /* change init_mm */
+ pmd_populate_kernel(&init_mm, pmd, pte);
+
+ /* change entry in all the pgd's */
+ list_for_each_entry(page, &pgd_list, lru) {
+ cpa_debug("list %x %x %x\n", (unsigned long)page,
+ (unsigned long)pgd_index(address), address);
+ pmd = pmd_offset(((pgd_t *)page_address(page)) +
+ pgd_index(address), address);
+ pmd_populate_kernel(NULL, pmd, pte);
+ }
+
+}
+
+static int
+try_preserve_large_page(pte_t *kpte, unsigned long address,
+ struct cpa_data *cpa)
+{
+ unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
+ pte_t old_pte, *tmp;
+ pgprot_t old_prot, new_prot, ext_prot, req_prot;
+ int i, do_split = 1;
+ unsigned int level;
+
+ if (cpa->force_split)
+ return 1;
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ /*
+ * Check for races, another CPU might have split this page
+ * up already:
+ */
+ tmp = lookup_address(address, &level);
+ if (tmp != kpte)
+ goto out_unlock;
+
+ switch (level) {
+
+ case PG_LEVEL_2M:
+ psize = PMD_SIZE;
+ pmask = PMD_MASK;
+ break;
+
+ default:
+ do_split = -EINVAL;
+ goto out_unlock;
+ }
+
+ /*
+ * Calculate the number of pages, which fit into this large
+ * page starting at address:
+ */
+ nextpage_addr = (address + psize) & pmask;
+ numpages = (nextpage_addr - address) >> PAGE_SHIFT;
+ if (numpages < cpa->numpages)
+ cpa->numpages = numpages;
+
+ old_prot = new_prot = req_prot = pmd_to_pte_pgprot(pmd_val(*kpte),
+ &ext_prot);
+
+ pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
+ pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
+
+ /*
+ * old_pte points to the large page base address. So we need
+ * to add the offset of the virtual address:
+ */
+ pfn = pmd_pfn(*kpte) + ((address & (psize - 1)) >> PAGE_SHIFT);
+ cpa->pfn = pfn;
+
+ new_prot = static_protections(req_prot, address, pfn);
+
+ /*
+ * We need to check the full range, whether
+ * static_protection() requires a different pgprot for one of
+ * the pages in the range we try to preserve:
+ */
+ addr = address & pmask;
+ pfn = pmd_pfn(old_pte);
+ for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
+ pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
+
+ if (pgprot_val(chk_prot) != pgprot_val(new_prot))
+ goto out_unlock;
+ }
+
+ /*
+ * If there are no changes, return. maxpages has been updated
+ * above:
+ */
+ if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
+ do_split = 0;
+ goto out_unlock;
+ }
+
+ /*
+ * convert prot to pmd format
+ */
+ new_prot = pte_to_pmd_pgprot(new_prot, ext_prot);
+
+ /*
+ * We need to change the attributes. Check, whether we can
+ * change the large page in one go. We request a split, when
+ * the address is not aligned and the number of pages is
+ * smaller than the number of pages in the large page. Note
+ * that we limited the number of possible pages already to
+ * the number of pages in the large page.
+ */
+ if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
+ /*
+ * The address is aligned and the number of pages
+ * covers the full page.
+ */
+ phys_addr_t phys = __pfn_to_phys(pmd_pfn(*kpte));
+ pmd_t *p = (pmd_t *)kpte;
+
+ *kpte++ = __pmd(phys | new_prot);
+ *kpte = __pmd((phys + SECTION_SIZE) | new_prot);
+ flush_pmd_entry(p);
+ cpa->flags |= CPA_FLUSHTLB;
+ do_split = 0;
+ cpa_debug("preserving page at phys %x pmd %x\n", phys, p);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&pgd_lock, flags);
+
+ return do_split;
+}
+
+static int split_large_page(pte_t *kpte, unsigned long address)
+{
+ unsigned long flags, pfn, pfninc = 1;
+ unsigned int i, level;
+ pte_t *pbase, *tmp;
+ pgprot_t ref_prot = 0, ext_prot = 0;
+ int ret = 0;
+
+ pbase = pte_alloc_one_kernel(&init_mm, address);
+ if (!pbase)
+ return -ENOMEM;
+
+ cpa_debug("split_large_page %x PMD %x new pte @ %x\n", address,
+ *kpte, pbase);
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ /*
+ * Check for races, another CPU might have split this page
+ * up for us already:
+ */
+ tmp = lookup_address(address, &level);
+ if (tmp != kpte)
+ goto out_unlock;
+
+ /*
+ * we only split 2MB entries for now
+ */
+ if (level != PG_LEVEL_2M) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ref_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot);
+
+ /*
+ * Get the target pfn from the original entry:
+ */
+ pfn = pmd_pfn(*kpte);
+ for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
+ set_pte_ext(&pbase[i], pfn_pte(pfn, ref_prot), ext_prot);
+
+ if (address >= (unsigned long)__va(0) &&
+ address < (unsigned long)__va(lowmem_limit))
+ split_page_count(level);
+
+ /*
+ * Install the new, split up pagetable.
+ */
+ __set_pmd_pte((pmd_t *)kpte, address, pbase);
+
+ pbase = NULL;
+
+out_unlock:
+ /*
+ * If we dropped out via the lookup_address check under
+ * pgd_lock then stick the page back into the pool:
+ */
+ if (pbase)
+ pte_free_kernel(&init_mm, pbase);
+
+ spin_unlock_irqrestore(&pgd_lock, flags);
+
+ return ret;
+}
+
+static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
+ int primary)
+{
+ /*
+ * Ignore all non primary paths.
+ */
+ if (!primary)
+ return 0;
+
+ /*
+ * Ignore the NULL PTE for kernel identity mapping, as it is expected
+ * to have holes.
+ * Also set numpages to '1' indicating that we processed cpa req for
+ * one virtual address page and its pfn. TBD: numpages can be set based
+ * on the initial value and the level returned by lookup_address().
+ */
+ if (within(vaddr, PAGE_OFFSET,
+ PAGE_OFFSET + lowmem_limit)) {
+ cpa->numpages = 1;
+ cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
+ return 0;
+ } else {
+ WARN(1, KERN_WARNING "CPA: called for zero pte. "
+ "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
+ *cpa->vaddr);
+
+ return -EFAULT;
+ }
+}
+
+static int __change_page_attr(struct cpa_data *cpa, int primary)
+{
+ unsigned long address;
+ int do_split, err;
+ unsigned int level;
+ pte_t *kpte, old_pte;
+
+ if (cpa->flags & CPA_PAGES_ARRAY) {
+ struct page *page = cpa->pages[cpa->curpage];
+
+ if (unlikely(PageHighMem(page)))
+ return 0;
+
+ address = (unsigned long)page_address(page);
+
+ } else if (cpa->flags & CPA_ARRAY)
+ address = cpa->vaddr[cpa->curpage];
+ else
+ address = *cpa->vaddr;
+
+repeat:
+ kpte = lookup_address(address, &level);
+ if (!kpte)
+ return __cpa_process_fault(cpa, address, primary);
+
+ old_pte = *kpte;
+ if (!pte_val(old_pte))
+ return __cpa_process_fault(cpa, address, primary);
+
+ if (level == PG_LEVEL_4K) {
+ pte_t new_pte;
+ pgprot_t new_prot = pte_pgprot(old_pte);
+ unsigned long pfn = pte_pfn(old_pte);
+
+ pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
+ pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
+
+ new_prot = static_protections(new_prot, address, pfn);
+
+ /*
+ * We need to keep the pfn from the existing PTE,
+ * after all we're only going to change it's attributes
+ * not the memory it points to
+ */
+ new_pte = pfn_pte(pfn, new_prot);
+ cpa->pfn = pfn;
+
+ /*
+ * Do we really change anything ?
+ */
+ if (pte_val(old_pte) != pte_val(new_pte)) {
+ set_pte_ext(kpte, new_pte, 0);
+ /*
+ * FIXME : is this needed on arm?
+ * set_pte_ext already does a flush
+ */
+ cpa->flags |= CPA_FLUSHTLB;
+ }
+ cpa->numpages = 1;
+ return 0;
+ }
+
+ /*
+ * Check, whether we can keep the large page intact
+ * and just change the pte:
+ */
+ do_split = try_preserve_large_page(kpte, address, cpa);
+
+ /*
+ * When the range fits into the existing large page,
+ * return. cp->numpages and cpa->tlbflush have been updated in
+ * try_large_page:
+ */
+ if (do_split <= 0)
+ return do_split;
+
+ /*
+ * We have to split the large page:
+ */
+ err = split_large_page(kpte, address);
+
+ if (!err) {
+ /*
+ * Do a global flush tlb after splitting the large page
+ * and before we do the actual change page attribute in the PTE.
+ *
+ * With out this, we violate the TLB application note, that says
+ * "The TLBs may contain both ordinary and large-page
+ * translations for a 4-KByte range of linear addresses. This
+ * may occur if software modifies the paging structures so that
+ * the page size used for the address range changes. If the two
+ * translations differ with respect to page frame or attributes
+ * (e.g., permissions), processor behavior is undefined and may
+ * be implementation-specific."
+ *
+ * We do this global tlb flush inside the cpa_lock, so that we
+ * don't allow any other cpu, with stale tlb entries change the
+ * page attribute in parallel, that also falls into the
+ * just split large page entry.
+ */
+ flush_tlb_all();
+ goto repeat;
+ }
+
+ return err;
+}
+
+static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
+
+static int cpa_process_alias(struct cpa_data *cpa)
+{
+ struct cpa_data alias_cpa;
+ unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
+ unsigned long vaddr;
+ int ret;
+
+ if (cpa->pfn >= (lowmem_limit >> PAGE_SHIFT))
+ return 0;
+
+ /*
+ * No need to redo, when the primary call touched the direct
+ * mapping already:
+ */
+ if (cpa->flags & CPA_PAGES_ARRAY) {
+ struct page *page = cpa->pages[cpa->curpage];
+ if (unlikely(PageHighMem(page)))
+ return 0;
+ vaddr = (unsigned long)page_address(page);
+ } else if (cpa->flags & CPA_ARRAY)
+ vaddr = cpa->vaddr[cpa->curpage];
+ else
+ vaddr = *cpa->vaddr;
+
+ if (!(within(vaddr, PAGE_OFFSET,
+ PAGE_OFFSET + lowmem_limit))) {
+
+ alias_cpa = *cpa;
+ alias_cpa.vaddr = &laddr;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+
+ ret = __change_page_attr_set_clr(&alias_cpa, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
+{
+ int ret, numpages = cpa->numpages;
+
+ while (numpages) {
+ /*
+ * Store the remaining nr of pages for the large page
+ * preservation check.
+ */
+ cpa->numpages = numpages;
+ /* for array changes, we can't use large page */
+ if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
+ cpa->numpages = 1;
+
+ if (!debug_pagealloc)
+ mutex_lock(&cpa_lock);
+ ret = __change_page_attr(cpa, checkalias);
+ if (!debug_pagealloc)
+ mutex_unlock(&cpa_lock);
+ if (ret)
+ return ret;
+
+ if (checkalias) {
+ ret = cpa_process_alias(cpa);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Adjust the number of pages with the result of the
+ * CPA operation. Either a large page has been
+ * preserved or a single page update happened.
+ */
+ BUG_ON(cpa->numpages > numpages);
+ numpages -= cpa->numpages;
+ if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
+ cpa->curpage++;
+ else
+ *cpa->vaddr += cpa->numpages * PAGE_SIZE;
+ }
+ return 0;
+}
+
+static inline int cache_attr(pgprot_t attr)
+{
+ /*
+ * We need to flush the cache for all memory type changes
+ * except when a page is being marked write back cacheable
+ */
+ return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK);
+}
+
+static int change_page_attr_set_clr(unsigned long *addr, int numpages,
+ pgprot_t mask_set, pgprot_t mask_clr,
+ int force_split, int in_flag,
+ struct page **pages)
+{
+ struct cpa_data cpa;
+ int ret, cache, checkalias;
+ unsigned long baddr = 0;
+
+ if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
+ return 0;
+
+ /* Ensure we are PAGE_SIZE aligned */
+ if (in_flag & CPA_ARRAY) {
+ int i;
+ for (i = 0; i < numpages; i++) {
+ if (addr[i] & ~PAGE_MASK) {
+ addr[i] &= PAGE_MASK;
+ WARN_ON_ONCE(1);
+ }
+ }
+ } else if (!(in_flag & CPA_PAGES_ARRAY)) {
+ /*
+ * in_flag of CPA_PAGES_ARRAY implies it is aligned.
+ * No need to cehck in that case
+ */
+ if (*addr & ~PAGE_MASK) {
+ *addr &= PAGE_MASK;
+ /*
+ * People should not be passing in unaligned addresses:
+ */
+ WARN_ON_ONCE(1);
+ }
+ /*
+ * Save address for cache flush. *addr is modified in the call
+ * to __change_page_attr_set_clr() below.
+ */
+ baddr = *addr;
+ }
+
+ /* Must avoid aliasing mappings in the highmem code */
+ kmap_flush_unused();
+
+ vm_unmap_aliases();
+
+ cpa.vaddr = addr;
+ cpa.pages = pages;
+ cpa.numpages = numpages;
+ cpa.mask_set = mask_set;
+ cpa.mask_clr = mask_clr;
+ cpa.flags = 0;
+ cpa.curpage = 0;
+ cpa.force_split = force_split;
+
+ if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
+ cpa.flags |= in_flag;
+
+ /* No alias checking for XN bit modifications */
+ checkalias = (pgprot_val(mask_set) |
+ pgprot_val(mask_clr)) != L_PTE_XN;
+
+ ret = __change_page_attr_set_clr(&cpa, checkalias);
+
+ /*
+ * Check whether we really changed something:
+ */
+ if (!(cpa.flags & CPA_FLUSHTLB))
+ goto out;
+
+ cache = cache_attr(mask_set);
+
+ if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
+ cpa_flush_array(addr, numpages, cache,
+ cpa.flags, pages);
+ } else
+ cpa_flush_range(baddr, numpages, cache);
+
+out:
+ return ret;
+}
+
+static inline int change_page_attr_set(unsigned long *addr, int numpages,
+ pgprot_t mask, int array)
+{
+ return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
+ (array ? CPA_ARRAY : 0), NULL);
+}
+
+static inline int change_page_attr_clear(unsigned long *addr, int numpages,
+ pgprot_t mask, int array)
+{
+ return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
+ (array ? CPA_ARRAY : 0), NULL);
+}
+
+static inline int cpa_set_pages_array(struct page **pages, int numpages,
+ pgprot_t mask)
+{
+ return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
+ CPA_PAGES_ARRAY, pages);
+}
+
+static inline int cpa_clear_pages_array(struct page **pages, int numpages,
+ pgprot_t mask)
+{
+ return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
+ CPA_PAGES_ARRAY, pages);
+}
+
+int set_memory_uc(unsigned long addr, int numpages)
+{
+ return change_page_attr_set_clr(&addr, numpages,
+ __pgprot(L_PTE_MT_UNCACHED),
+ __pgprot(L_PTE_MT_MASK), 0, 0, NULL);
+}
+EXPORT_SYMBOL(set_memory_uc);
+
+int _set_memory_array(unsigned long *addr, int addrinarray,
+ unsigned long set, unsigned long clr)
+{
+ return change_page_attr_set_clr(addr, addrinarray, __pgprot(set),
+ __pgprot(clr), 0, CPA_ARRAY, NULL);
+}
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray)
+{
+ return _set_memory_array(addr, addrinarray,
+ L_PTE_MT_UNCACHED, L_PTE_MT_MASK);
+}
+EXPORT_SYMBOL(set_memory_array_uc);
+
+int set_memory_array_wc(unsigned long *addr, int addrinarray)
+{
+ return _set_memory_array(addr, addrinarray,
+ L_PTE_MT_BUFFERABLE, L_PTE_MT_MASK);
+}
+EXPORT_SYMBOL(set_memory_array_wc);
+
+int set_memory_wc(unsigned long addr, int numpages)
+{
+ int ret;
+
+ ret = change_page_attr_set_clr(&addr, numpages,
+ __pgprot(L_PTE_MT_BUFFERABLE),
+ __pgprot(L_PTE_MT_MASK),
+ 0, 0, NULL);
+ return ret;
+}
+EXPORT_SYMBOL(set_memory_wc);
+
+int set_memory_wb(unsigned long addr, int numpages)
+{
+ return change_page_attr_set_clr(&addr, numpages,
+ __pgprot(L_PTE_MT_WRITEBACK),
+ __pgprot(L_PTE_MT_MASK),
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL(set_memory_wb);
+
+int set_memory_iwb(unsigned long addr, int numpages)
+{
+ return change_page_attr_set_clr(&addr, numpages,
+ __pgprot(L_PTE_MT_INNER_WB),
+ __pgprot(L_PTE_MT_MASK),
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL(set_memory_iwb);
+
+int set_memory_array_wb(unsigned long *addr, int addrinarray)
+{
+ return change_page_attr_set_clr(addr, addrinarray,
+ __pgprot(L_PTE_MT_WRITEBACK),
+ __pgprot(L_PTE_MT_MASK),
+ 0, CPA_ARRAY, NULL);
+
+}
+EXPORT_SYMBOL(set_memory_array_wb);
+
+int set_memory_array_iwb(unsigned long *addr, int addrinarray)
+{
+ return change_page_attr_set_clr(addr, addrinarray,
+ __pgprot(L_PTE_MT_INNER_WB),
+ __pgprot(L_PTE_MT_MASK),
+ 0, CPA_ARRAY, NULL);
+
+}
+EXPORT_SYMBOL(set_memory_array_iwb);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_page_attr_clear(&addr, numpages,
+ __pgprot(L_PTE_XN), 0);
+}
+EXPORT_SYMBOL(set_memory_x);
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_page_attr_set(&addr, numpages,
+ __pgprot(L_PTE_XN), 0);
+}
+EXPORT_SYMBOL(set_memory_nx);
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_page_attr_set(&addr, numpages,
+ __pgprot(L_PTE_RDONLY), 0);
+}
+EXPORT_SYMBOL_GPL(set_memory_ro);
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_page_attr_clear(&addr, numpages,
+ __pgprot(L_PTE_RDONLY), 0);
+}
+EXPORT_SYMBOL_GPL(set_memory_rw);
+
+int set_memory_np(unsigned long addr, int numpages)
+{
+ return change_page_attr_clear(&addr, numpages,
+ __pgprot(L_PTE_PRESENT), 0);
+}
+
+int set_memory_4k(unsigned long addr, int numpages)
+{
+ return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
+ __pgprot(0), 1, 0, NULL);
+}
+
+static int _set_pages_array(struct page **pages, int addrinarray,
+ unsigned long set, unsigned long clr)
+{
+ return change_page_attr_set_clr(NULL, addrinarray,
+ __pgprot(set),
+ __pgprot(clr),
+ 0, CPA_PAGES_ARRAY, pages);
+}
+
+int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return _set_pages_array(pages, addrinarray,
+ L_PTE_MT_UNCACHED, L_PTE_MT_MASK);
+}
+EXPORT_SYMBOL(set_pages_array_uc);
+
+int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return _set_pages_array(pages, addrinarray, L_PTE_MT_BUFFERABLE,
+ L_PTE_MT_MASK);
+}
+EXPORT_SYMBOL(set_pages_array_wc);
+
+int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ return _set_pages_array(pages, addrinarray,
+ L_PTE_MT_WRITEBACK, L_PTE_MT_MASK);
+}
+EXPORT_SYMBOL(set_pages_array_wb);
+
+int set_pages_array_iwb(struct page **pages, int addrinarray)
+{
+ return _set_pages_array(pages, addrinarray,
+ L_PTE_MT_INNER_WB, L_PTE_MT_MASK);
+}
+EXPORT_SYMBOL(set_pages_array_iwb);
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index b2027c154b2a..3e9503bb7bf5 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -17,6 +17,23 @@
#include "mm.h"
+DEFINE_SPINLOCK(pgd_lock);
+LIST_HEAD(pgd_list);
+
+static inline void pgd_list_add(pgd_t *pgd)
+{
+ struct page *page = virt_to_page(pgd);
+
+ list_add(&page->lru, &pgd_list);
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+ struct page *page = virt_to_page(pgd);
+
+ list_del(&page->lru);
+}
+
/*
* need to get a 16k page for level 1
*/
@@ -26,6 +43,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pud_t *new_pud, *init_pud;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
+ unsigned long flags;
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
if (!new_pgd)
@@ -33,6 +51,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ spin_lock_irqsave(&pgd_lock, flags);
/*
* Copy over the kernel and IO PGD entries
*/
@@ -40,7 +59,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+#if !defined(CONFIG_CPU_CACHE_V7) || !defined(CONFIG_SMP)
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
+#endif
+
+ pgd_list_add(new_pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
if (!vectors_high()) {
/*
@@ -74,6 +98,9 @@ no_pte:
no_pmd:
pud_free(mm, new_pud);
no_pud:
+ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_del(new_pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
@@ -85,10 +112,15 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
pud_t *pud;
pmd_t *pmd;
pgtable_t pte;
+ unsigned long flags;
if (!pgd_base)
return;
+ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_del(pgd_base);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+
pgd = pgd_base + pgd_index(0);
if (pgd_none_or_clear_bad(pgd))
goto no_pgd;
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 307a4def8d3a..87f8ee2ebf78 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -121,7 +121,7 @@
.long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
.long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
- .long 0x00 @ unused
+ .long PTE_EXT_TEX(4) | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB
.long 0x00 @ L_PTE_MT_MINICACHE (not present)
.long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
.long 0x00 @ unused
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 9049c0764db2..e666e4fe029c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -176,7 +176,9 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ mrc p15, 0, r3, c0, c1, 7 @ read ID_MMFR3
+ tst r3, #0xf << 20 @ check the coherent walk bits
+ mcreq p15, 0, r0, c7, c10, 1 @ flush_pte
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
@@ -212,38 +214,136 @@ ENDPROC(cpu_v7_set_pte_ext)
* NS1 = PRRR[19] = 1 - normal shareable property
* NOS = PRRR[24+n] = 1 - not outer shareable
*/
-.equ PRRR, 0xff0a81a8
-.equ NMRR, 0x40e040e0
+.equ PRRR, 0xff0a89a8
+.equ NMRR, 0xc0e044e0
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
-.equ cpu_v7_suspend_size, 4 * 9
+.equ cpu_v7_suspend_size, (4 * (10 + 4 + (16 * 2) + (16 * 2)))
+/* 10 CP15 registers
+ * 4 CP14 registers
+ * 16x2 CP14 breakpoint registers (maximum)
+ * 16x2 CP14 watchpoint registers (maximum)
+ */
+
+.macro save_brkpt cm
+ mrc p14, 0, r4, c0, \cm, 4
+ mrc p14, 0, r5, c0, \cm, 5
+ stmia r0!, {r4 - r5}
+.endm
+
+.macro restore_brkpt cm
+ ldmia r0!, {r4 - r5}
+ mcr p14, 0, r4, c0, \cm, 4
+ mcr p14, 0, r5, c0, \cm, 5
+.endm
+
+.macro save_wpt cm
+ mrc p14, 0, r4, c0, \cm, 6
+ mrc p14, 0, r5, c0, \cm, 7
+ stmia r0!, {r4 - r5}
+.endm
+
+.macro restore_wpt cm
+ ldmia r0!, {r4 - r5}
+ mcr p14, 0, r4, c0, \cm, 6
+ mcr p14, 0, r5, c0, \cm, 7
+.endm
+
#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_v7_do_suspend)
- stmfd sp!, {r4 - r11, lr}
+ stmfd sp!, {r0, r3 - r11, lr}
+ mrc p15, 0, r3, c15, c0, 1 @ diag
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 1 @ Context ID
mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID
- stmia r0!, {r4 - r6}
+ stmia r0!, {r3 - r6}
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
mrc p15, 0, r7, c2, c0, 0 @ TTB 0
mrc p15, 0, r8, c2, c0, 1 @ TTB 1
mrc p15, 0, r9, c1, c0, 0 @ Control register
mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control
- stmia r0, {r6 - r11}
- ldmfd sp!, {r4 - r11, pc}
+ stmia r0!, {r6 - r11}
+
+ /* Save CP14 debug controller context */
+ mrc p14, 0, r4, c0, c1, 0 @ DSCR
+ mrc p14, 0, r5, c0, c6, 0 @ WFAR
+ mrc p14, 0, r6, c0, c7, 0 @ VCR
+ mrc p14, 0, r7, c7, c9, 6 @ CLAIM
+ stmia r0!, {r4-r7}
+
+ mrc p14, 0, r8, c0, c0, 0 @ read IDR
+ mov r3, r8, lsr #24
+ and r3, r3, #0xf @ r3 has the number of brkpt
+ rsb r3, r3, #0xf
+
+ /* r3 = (15 - #of brkpt) ;
+ switch offset = r3*12 - 4 = (r3*3 - 1)<<2
+ */
+ add r3, r3, r3, lsl #1
+ sub r3, r3, #1
+ add pc, pc, r3, lsl #2
+
+ save_brkpt c15
+ save_brkpt c14
+ save_brkpt c13
+ save_brkpt c12
+ save_brkpt c11
+ save_brkpt c10
+ save_brkpt c9
+ save_brkpt c8
+ save_brkpt c7
+ save_brkpt c6
+ save_brkpt c5
+ save_brkpt c4
+ save_brkpt c3
+ save_brkpt c2
+ save_brkpt c1
+ save_brkpt c0
+
+ mov r3, r8, lsr #28 @ r3 has the number of wpt
+ rsb r3, r3, #0xf
+
+ /* r3 = (15 - #of wpt) ;
+ switch offset = r3*12 - 4 = (r3*3 - 1)<<2
+ */
+ add r3, r3, r3, lsl #1
+ sub r3, r3, #1
+ add pc, pc, r3, lsl #2
+
+ save_wpt c15
+ save_wpt c14
+ save_wpt c13
+ save_wpt c12
+ save_wpt c11
+ save_wpt c10
+ save_wpt c9
+ save_wpt c8
+ save_wpt c7
+ save_wpt c6
+ save_wpt c5
+ save_wpt c4
+ save_wpt c3
+ save_wpt c2
+ save_wpt c1
+ save_wpt c0
+
+ ldmfd sp!, {r0, r3 - r11, pc}
ENDPROC(cpu_v7_do_suspend)
ENTRY(cpu_v7_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- ldmia r0!, {r4 - r6}
+ ldmia r0!, {r3 - r6}
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ mcr p15, 0, r3, c15, c0, 1 @ diag
+#endif
mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
mcr p15, 0, r5, c13, c0, 1 @ Context ID
mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID
- ldmia r0, {r6 - r11}
+ ldmia r0!, {r6 - r11}
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
mcr p15, 0, r7, c2, c0, 0 @ TTB 0
mcr p15, 0, r8, c2, c0, 1 @ TTB 1
@@ -257,6 +357,74 @@ ENTRY(cpu_v7_do_resume)
mcr p15, 0, r4, c10, c2, 0 @ write PRRR
mcr p15, 0, r5, c10, c2, 1 @ write NMRR
isb
+
+ /* Restore CP14 debug controller context */
+
+ ldmia r0!, {r2 - r5}
+ mcr p14, 0, r3, c0, c6, 0 @ WFAR
+ mcr p14, 0, r4, c0, c7, 0 @ VCR
+ mcr p14, 0, r5, c7, c8, 6 @ CLAIM
+
+ mrc p14, 0, r8, c0, c0, 0 @ read IDR
+ mov r3, r8, lsr #24
+ and r3, r3, #0xf @ r3 has the number of brkpt
+ rsb r3, r3, #0xf
+
+ /* r3 = (15 - #of wpt) ;
+ switch offset = r3*12 - 4 = (r3*3 - 1)<<2
+ */
+ add r3, r3, r3, lsl #1
+ sub r3, r3, #1
+ add pc, pc, r3, lsl #2
+
+ restore_brkpt c15
+ restore_brkpt c14
+ restore_brkpt c13
+ restore_brkpt c12
+ restore_brkpt c11
+ restore_brkpt c10
+ restore_brkpt c9
+ restore_brkpt c8
+ restore_brkpt c7
+ restore_brkpt c6
+ restore_brkpt c5
+ restore_brkpt c4
+ restore_brkpt c3
+ restore_brkpt c2
+ restore_brkpt c1
+ restore_brkpt c0
+
+ mov r3, r8, lsr #28 @ r3 has the number of wpt
+ rsb r3, r3, #0xf
+
+ /* r3 = (15 - #of wpt) ;
+ switch offset = r3*12 - 4 = (r3*3 - 1)<<2
+ */
+ add r3, r3, r3, lsl #1
+ sub r3, r3, #1
+ add pc, pc, r3, lsl #2
+
+start_restore_wpt:
+ restore_wpt c15
+ restore_wpt c14
+ restore_wpt c13
+ restore_wpt c12
+ restore_wpt c11
+ restore_wpt c10
+ restore_wpt c9
+ restore_wpt c8
+ restore_wpt c7
+ restore_wpt c6
+ restore_wpt c5
+ restore_wpt c4
+ restore_wpt c3
+ restore_wpt c2
+ restore_wpt c1
+ restore_wpt c0
+ isb
+
+ mcr p14, 0, r2, c0, c2, 2 @ DSCR
+ isb
dsb
mov r0, r9 @ control register
mov r2, r7, lsr #14 @ get TTB0 base
@@ -346,6 +514,17 @@ __v7_setup:
2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
teq r0, r10
bne 3f
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+ cmp r6, #0x10 @ power ctrl reg added r1p0
+ mrcge p15, 0, r10, c15, c0, 0 @ read power control register
+ orrge r10, r10, #1 @ enable dynamic clock gating
+ mcrge p15, 0, r10, c15, c0, 0 @ write power control register
+#ifdef CONFIG_ARM_ERRATA_720791
+ teq r5, #0x00100000 @ only present in r1p*
+ mrceq p15, 0, r10, c15, c0, 2 @ read "chicken power ctrl" reg
+ orreq r10, r10, #0x30 @ disable core clk gate on
+ mcreq p15, 0, r10, c15, c0, 2 @ instr-side waits
+#endif
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 @ only present up to r2p2
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
@@ -365,6 +544,8 @@ __v7_setup:
teq r6, #0x20 @ present in r2p0
teqne r6, #0x21 @ present in r2p1
teqne r6, #0x22 @ present in r2p2
+ teqne r6, #0x27 @ present in r2p7
+ teqne r6, #0x29 @ present in r2p9
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
@@ -375,6 +556,13 @@ __v7_setup:
orrlt r10, r10, #1 << 11 @ set bit #11
mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
+#ifdef CONFIG_ARM_ERRATA_752520
+ cmp r6, #0x29 @ present prior to r2p9
+ mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrlt r10, r10, #1 << 20 @ set bit #20
+ mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#endif
3: mov r10, #0
#ifdef HARVARD_CACHE
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 755e1bf22681..1a2021cedc76 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -375,7 +375,7 @@ cpu_xsc3_mt_table:
.long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
.long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
- .long 0x00 @ unused
+ .long PTE_EXT_TEX(4) | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB (not present?)
.long 0x00 @ L_PTE_MT_MINICACHE (not present)
.long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?)
.long 0x00 @ unused
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index fbc06e55b87a..b0fe4b1e233d 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -484,7 +484,7 @@ cpu_xscale_mt_table:
.long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
.long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
- .long 0x00 @ unused
+ .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB
.long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE
.long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
.long 0x00 @ unused
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index ae6f99834cdd..138e24774f70 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -302,6 +302,10 @@ static int s3c_pm_enter(suspend_state_t state)
cpu_suspend(0, pm_cpu_sleep);
+ /* restore the cpu state using the kernel's cpu init code. */
+
+ cpu_init();
+
/* restore the system state */
s3c_pm_restore_core();
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 62cc8f981171..fe0b5e140472 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1113,3 +1113,5 @@ blissc MACH_BLISSC BLISSC 3491
thales_adc MACH_THALES_ADC THALES_ADC 3492
ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
atdgp318 MACH_ATDGP318 ATDGP318 3494
+tegra_enterprise MACH_TEGRA_ENTERPRISE TEGRA_ENTERPRISE 3512
+p852 MACH_P852 P852 3667
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 4fa9903b83cf..c1a978402583 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -10,7 +10,7 @@
*
* Basic entry code, called from the kernel's undefined instruction trap.
* r0 = faulted instruction
- * r5 = faulted PC+4
+ * r2 = faulted PC+4
* r9 = successful return
* r10 = thread_info structure
* lr = failure return
@@ -26,6 +26,7 @@ ENTRY(do_vfp)
str r11, [r10, #TI_PREEMPT]
#endif
enable_irq
+ str r2, [sp, #S_PC] @ update regs->ARM_pc for Thumb 2 case
ldr r4, .LCvfp
ldr r11, [r10, #TI_CPU] @ CPU number
add r10, r10, #TI_VFPSTATE @ r10 = workspace
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 2d30c7f6edd3..404538ae591d 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -82,22 +82,19 @@ ENTRY(vfp_support_entry)
ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
cmp r4, r10 @ this thread owns the hw context?
-#ifndef CONFIG_SMP
- @ For UP, checking that this thread owns the hw context is
- @ sufficient to determine that the hardware state is valid.
beq vfp_hw_state_valid
- @ On UP, we lazily save the VFP context. As a different
- @ thread wants ownership of the VFP hardware, save the old
- @ state if there was a previous (valid) owner.
-
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@ rest of it
+#ifndef CONFIG_SMP
+ @ Save out the current registers to the old thread state
+ @ No need for SMP since this is not done lazily
+
DBGSTR1 "save old state %p", r4
- cmp r4, #0 @ if the vfp_current_hw_state is NULL
- beq vfp_reload_hw @ then the hw state needs reloading
+ cmp r4, #0
+ beq no_old_VFP_process
VFPFSTMIA r4, r5 @ save the working registers
VFPFMRX r5, FPSCR @ current status
#ifndef CONFIG_CPU_FEROCEON
@@ -110,33 +107,11 @@ ENTRY(vfp_support_entry)
1:
#endif
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
-vfp_reload_hw:
-
-#else
- @ For SMP, if this thread does not own the hw context, then we
- @ need to reload it. No need to save the old state as on SMP,
- @ we always save the state when we switch away from a thread.
- bne vfp_reload_hw
-
- @ This thread has ownership of the current hardware context.
- @ However, it may have been migrated to another CPU, in which
- @ case the saved state is newer than the hardware context.
- @ Check this by looking at the CPU number which the state was
- @ last loaded onto.
- ldr ip, [r10, #VFP_CPU]
- teq ip, r11
- beq vfp_hw_state_valid
-
-vfp_reload_hw:
- @ We're loading this threads state into the VFP hardware. Update
- @ the CPU number which contains the most up to date VFP context.
- str r11, [r10, #VFP_CPU]
-
- VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
- @ exceptions, so we can get at the
- @ rest of it
+ @ and point r4 at the word at the
+ @ start of the register dump
#endif
+no_old_VFP_process:
DBGSTR1 "load state %p", r10
str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
@ Load the saved state back into the VFP
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 79bcb4316930..e381dc68505d 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -21,6 +21,7 @@
#include <asm/cputype.h>
#include <asm/thread_notify.h>
#include <asm/vfp.h>
+#include <asm/cpu_pm.h>
#include "vfpinstr.h"
#include "vfp.h"
@@ -35,51 +36,18 @@ void vfp_null_entry(void);
void (*vfp_vector)(void) = vfp_null_entry;
/*
- * Dual-use variable.
- * Used in startup: set to non-zero if VFP checks fail
- * After startup, holds VFP architecture
- */
-unsigned int VFP_arch;
-
-/*
* The pointer to the vfpstate structure of the thread which currently
* owns the context held in the VFP hardware, or NULL if the hardware
* context is invalid.
- *
- * For UP, this is sufficient to tell which thread owns the VFP context.
- * However, for SMP, we also need to check the CPU number stored in the
- * saved state too to catch migrations.
*/
union vfp_state *vfp_current_hw_state[NR_CPUS];
/*
- * Is 'thread's most up to date state stored in this CPUs hardware?
- * Must be called from non-preemptible context.
- */
-static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
-{
-#ifdef CONFIG_SMP
- if (thread->vfpstate.hard.cpu != cpu)
- return false;
-#endif
- return vfp_current_hw_state[cpu] == &thread->vfpstate;
-}
-
-/*
- * Force a reload of the VFP context from the thread structure. We do
- * this by ensuring that access to the VFP hardware is disabled, and
- * clear last_VFP_context. Must be called from non-preemptible context.
+ * Dual-use variable.
+ * Used in startup: set to non-zero if VFP checks fail
+ * After startup, holds VFP architecture
*/
-static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
-{
- if (vfp_state_in_hw(cpu, thread)) {
- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
- vfp_current_hw_state[cpu] = NULL;
- }
-#ifdef CONFIG_SMP
- thread->vfpstate.hard.cpu = NR_CPUS;
-#endif
-}
+unsigned int VFP_arch;
/*
* Per-thread VFP initialization.
@@ -89,27 +57,21 @@ static void vfp_thread_flush(struct thread_info *thread)
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu;
+ memset(vfp, 0, sizeof(union vfp_state));
+
+ vfp->hard.fpexc = FPEXC_EN;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+
/*
* Disable VFP to ensure we initialize it first. We must ensure
- * that the modification of vfp_current_hw_state[] and hardware
- * disable are done for the same CPU and without preemption.
- *
- * Do this first to ensure that preemption won't overwrite our
- * state saving should access to the VFP be enabled at this point.
+ * that the modification of vfp_current_hw_state[] and hardware disable
+ * are done for the same CPU and without preemption.
*/
cpu = get_cpu();
if (vfp_current_hw_state[cpu] == vfp)
vfp_current_hw_state[cpu] = NULL;
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
-
- memset(vfp, 0, sizeof(union vfp_state));
-
- vfp->hard.fpexc = FPEXC_EN;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-#ifdef CONFIG_SMP
- vfp->hard.cpu = NR_CPUS;
-#endif
}
static void vfp_thread_exit(struct thread_info *thread)
@@ -129,9 +91,6 @@ static void vfp_thread_copy(struct thread_info *thread)
vfp_sync_hwstate(parent);
thread->vfpstate = parent->vfpstate;
-#ifdef CONFIG_SMP
- thread->vfpstate.hard.cpu = NR_CPUS;
-#endif
}
/*
@@ -177,8 +136,17 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
- if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
+ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+ vfp_current_hw_state[cpu]->hard.cpu = cpu;
+ }
+ /*
+ * Thread migration, just force the reloading of the
+ * state on the new CPU in case the VFP registers
+ * contain stale data.
+ */
+ if (thread->vfpstate.hard.cpu != cpu)
+ vfp_current_hw_state[cpu] = NULL;
#endif
/*
@@ -208,6 +176,35 @@ static struct notifier_block vfp_notifier_block = {
.notifier_call = vfp_notifier,
};
+static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ u32 fpexc = fmrx(FPEXC);
+ unsigned int cpu = smp_processor_id();
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ if (vfp_current_hw_state[cpu]) {
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+ /* force a reload when coming back from idle */
+ vfp_current_hw_state[cpu] = NULL;
+ fmxr(FPEXC, fpexc & ~FPEXC_EN);
+ }
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ /* make sure VFP is disabled when leaving idle */
+ fmxr(FPEXC, fpexc & ~FPEXC_EN);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vfp_cpu_pm_notifier_block = {
+ .notifier_call = vfp_cpu_pm_notifier,
+};
+
/*
* Raise a SIGFPE for the current process.
* sicode describes the signal being raised.
@@ -444,6 +441,12 @@ static int vfp_pm_suspend(void)
struct thread_info *ti = current_thread_info();
u32 fpexc = fmrx(FPEXC);
+ /* If lazy disable, re-enable the VFP ready for it to be saved */
+ if (vfp_current_hw_state[ti->cpu] != &ti->vfpstate) {
+ fpexc |= FPEXC_EN;
+ fmxr(FPEXC, fpexc);
+ }
+
/* if vfp is on, then save state for resumption */
if (fpexc & FPEXC_EN) {
printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
@@ -451,6 +454,10 @@ static int vfp_pm_suspend(void)
/* disable, just in case */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ } else if (vfp_current_hw_state[ti->cpu]) {
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
+ fmxr(FPEXC, fpexc);
}
/* clear any information we had about last context state */
@@ -482,15 +489,15 @@ static void vfp_pm_init(void)
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
-/*
- * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
- * with the hardware state.
- */
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
- if (vfp_state_in_hw(cpu, thread)) {
+ /*
+ * If the thread we're interested in is the current owner of the
+ * hardware VFP state, then we need to save its state.
+ */
+ if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
u32 fpexc = fmrx(FPEXC);
/*
@@ -504,13 +511,36 @@ void vfp_sync_hwstate(struct thread_info *thread)
put_cpu();
}
-/* Ensure that the thread reloads the hardware VFP state on the next use. */
void vfp_flush_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
- vfp_force_reload(cpu, thread);
+ /*
+ * If the thread we're interested in is the current owner of the
+ * hardware VFP state, then we need to save its state.
+ */
+ if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
+ u32 fpexc = fmrx(FPEXC);
+ fmxr(FPEXC, fpexc & ~FPEXC_EN);
+
+ /*
+ * Set the context to NULL to force a reload the next time
+ * the thread uses the VFP.
+ */
+ vfp_current_hw_state[cpu] = NULL;
+ }
+
+#ifdef CONFIG_SMP
+ /*
+ * For SMP we still have to take care of the case where the thread
+ * migrates to another CPU and then back to the original CPU on which
+ * the last VFP user is still the same thread. Mark the thread VFP
+ * state as belonging to a non-existent CPU so that the saved one will
+ * be reloaded in the above case.
+ */
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
put_cpu();
}
@@ -529,7 +559,8 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
void *hcpu)
{
if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
- vfp_force_reload((long)hcpu, current_thread_info());
+ unsigned int cpu = (long)hcpu;
+ vfp_current_hw_state[cpu] = NULL;
} else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
vfp_enable(NULL);
return NOTIFY_OK;
@@ -578,6 +609,7 @@ static int __init vfp_init(void)
vfp_vector = vfp_support_entry;
thread_register_notifier(&vfp_notifier_block);
+ cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
vfp_pm_init();
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6ab16ac64d29..07f73cde90be 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -467,12 +467,6 @@ int __cpuinit start_secondary(void *cpuvoid)
S390_lowcore.restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
__ctl_set_bit(0, 28); /* Enable lowcore protection */
- /*
- * Wait until the cpu which brought this one up marked it
- * active before enabling interrupts.
- */
- while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
- cpu_relax();
local_irq_enable();
/* cpu_idle will call schedule for us */
cpu_idle();
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index f49253d75710..f1e4268ef3c6 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -1,13 +1,6 @@
#ifndef _ASM_X86_IDLE_H
#define _ASM_X86_IDLE_H
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
#ifdef CONFIG_X86_64
void enter_idle(void);
void exit_idle(void);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index f693e44e1bf6..cbd26458911a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -57,31 +57,17 @@ asmlinkage extern void ret_from_fork(void);
DEFINE_PER_CPU(unsigned long, old_rsp);
static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
- atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
-
void enter_idle(void)
{
percpu_write(is_idle, 1);
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+ idle_notifier_call_chain(IDLE_START);
}
static void __exit_idle(void)
{
if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
return;
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+ idle_notifier_call_chain(IDLE_END);
}
/* Called from interrupts to signify idle end */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9f548cb4a958..39e11500b9b9 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -285,19 +285,6 @@ notrace static void __cpuinit start_secondary(void *unused)
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
x86_platform.nmi_init();
- /*
- * Wait until the cpu which brought this one up marked it
- * online before enabling interrupts. If we don't do that then
- * we can end up waking up the softirq thread before this cpu
- * reached the active state, which makes the scheduler unhappy
- * and schedule the softirq thread on the wrong cpu. This is
- * only observable with forced threaded interrupts, but in
- * theory it could also happen w/o them. It's just way harder
- * to achieve.
- */
- while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
- cpu_relax();
-
/* enable local interrupts */
local_irq_enable();
diff --git a/block/genhd.c b/block/genhd.c
index d261b73b9744..d3834710b959 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1105,6 +1105,22 @@ static void disk_release(struct device *dev)
blk_put_queue(disk->queue);
kfree(disk);
}
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int cnt = 0;
+
+ disk_part_iter_init(&piter, disk, 0);
+ while((part = disk_part_iter_next(&piter)))
+ cnt++;
+ disk_part_iter_exit(&piter);
+ add_uevent_var(env, "NPARTS=%u", cnt);
+ return 0;
+}
+
struct class block_class = {
.name = "block",
};
@@ -1123,6 +1139,7 @@ static struct device_type disk_type = {
.groups = disk_attr_groups,
.release = disk_release,
.devnode = block_devnode,
+ .uevent = disk_uevent,
};
#ifdef CONFIG_PROC_FS
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 2222617b3bed..7569ba9b6210 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -944,6 +944,10 @@ static int do_test(int m)
ret += tcrypt_test("rfc4309(ccm(aes))");
break;
+ case 46:
+ ret += tcrypt_test("ofb(aes)");
+ break;
+
case 100:
ret += tcrypt_test("hmac(md5)");
break;
@@ -984,6 +988,10 @@ static int do_test(int m)
ret += tcrypt_test("vmac(aes)");
break;
+ case 110:
+ ret += tcrypt_test("cmac(aes)");
+ break;
+
case 150:
ret += tcrypt_test("ansi_cprng");
break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index b6b93d416351..80ff90bb9252 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1705,6 +1705,16 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
+ },{
+ .alg = "cmac(aes)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = cmac_aes_tv_template,
+ .count = CMAC_AES_TEST_VECTORS
+ }
+ }
}, {
.alg = "crc32c",
.test = alg_test_crc32c,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 27adc92842ba..27de604ec4bc 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -656,6 +656,59 @@ static struct hash_testvec sha512_tv_template[] = {
},
};
+#define CMAC_AES_TEST_VECTORS 4
+
+static struct hash_testvec cmac_aes_tv_template[] = {
+ {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .ksize = 16,
+ .plaintext = "",
+ .psize = 0,
+ .digest = "\xbb\x1d\x69\x29\xe9\x59\x37\x28"
+ "\x7f\xa3\x7d\x12\x9b\x75\x67\x46",
+ },
+ {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .ksize = 16,
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
+ .psize = 16,
+ .digest = "\x07\x0a\x16\xb4\x6b\x4d\x41\x44"
+ "\xf7\x9b\xdd\x9d\xd0\x4a\x28\x7c",
+ },
+ {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .ksize = 16,
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11",
+ .psize = 40,
+ .digest = "\xdf\xa6\x67\x47\xde\x9a\xe6\x30"
+ "\x30\xca\x32\x61\x14\x97\xc8\x27",
+ },
+ {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .ksize = 16,
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .psize = 64,
+ .digest = "\x51\xf0\xbe\xbf\x7e\x3b\x9d\x92"
+ "\xfc\x49\x74\x17\x79\x36\x3c\xfe",
+ },
+
+};
/*
* WHIRLPOOL test vectors from Whirlpool package
@@ -4414,8 +4467,6 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
}
};
-
-
static struct cipher_testvec aes_ctr_enc_tv_template[] = {
{ /* From NIST Special Publication 800-38A, Appendix F.5 */
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
@@ -4574,6 +4625,164 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = {
}
};
+static struct cipher_testvec aes_ofb_enc_tv_template[] = {
+ { /* From NIST Special Publication 800-38A, Appendix F.5 */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ilen = 64,
+ .result = "\x3B\x3F\xD9\x2E\xB7\x2D\xAD\x20"
+ "\x33\x34\x49\xF8\xE8\x3C\xFB\x4A"
+ "\x77\x89\x50\x8D\x16\x91\x8F\x03"
+ "\xF5\x3C\x52\xDA\xC5\x4E\xD8\x25"
+ "\x97\x40\x05\x1E\x9C\x5F\xEC\xF6"
+ "\x43\x44\xF7\xA8\x22\x60\xED\xCC"
+ "\x30\x4C\x65\x28\xF6\x59\xC7\x78"
+ "\x66\xA5\x10\xD9\xC1\xD6\xAE\x5E",
+ .rlen = 64,
+ }, {
+ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ilen = 64,
+ .result = "\xCD\xC8\x0D\x6F\xDD\xF1\x8C\xAB"
+ "\x34\xC2\x59\x09\xC9\x9A\x41\x74"
+ "\xFC\xC2\x8B\x8D\x4C\x63\x83\x7C"
+ "\x09\xE8\x17\x00\xC1\x10\x04\x01"
+ "\x8D\x9A\x9A\xEA\xC0\xF6\x59\x6F"
+ "\x55\x9C\x6D\x4D\xAF\x59\xA5\xF2"
+ "\x6D\x9F\x20\x08\x57\xCA\x6C\x3E"
+ "\x9C\xAC\x52\x4B\xD9\xAC\xC9\x2A",
+ .rlen = 64,
+ }, {
+ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ilen = 64,
+ .result = "\xDC\x7E\x84\xBF\xDA\x79\x16\x4B"
+ "\x7E\xCD\x84\x86\x98\x5D\x38\x60"
+ "\x4F\xEB\xDC\x67\x40\xD2\x0B\x3A"
+ "\xC8\x8F\x6A\xD8\x2A\x4F\xB0\x8D"
+ "\x71\xAB\x47\xA0\x86\xE8\x6E\xED"
+ "\xF3\x9D\x1C\x5B\xBA\x97\xC4\x08"
+ "\x01\x26\x14\x1D\x67\xF3\x7B\xE8"
+ "\x53\x8F\x5A\x8B\xE7\x40\xE4\x84",
+ .rlen = 64,
+ }
+};
+
+static struct cipher_testvec aes_ofb_dec_tv_template[] = {
+ { /* From NIST Special Publication 800-38A, Appendix F.5 */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .input = "\x3B\x3F\xD9\x2E\xB7\x2D\xAD\x20"
+ "\x33\x34\x49\xF8\xE8\x3C\xFB\x4A"
+ "\x77\x89\x50\x8D\x16\x91\x8F\x03"
+ "\xF5\x3C\x52\xDA\xC5\x4E\xD8\x25"
+ "\x97\x40\x05\x1E\x9C\x5F\xEC\xF6"
+ "\x43\x44\xF7\xA8\x22\x60\xED\xCC"
+ "\x30\x4C\x65\x28\xF6\x59\xC7\x78"
+ "\x66\xA5\x10\xD9\xC1\xD6\xAE\x5E",
+ .ilen = 64,
+ .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .rlen = 64,
+ }, {
+ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .input = "\xCD\xC8\x0D\x6F\xDD\xF1\x8C\xAB"
+ "\x34\xC2\x59\x09\xC9\x9A\x41\x74"
+ "\xFC\xC2\x8B\x8D\x4C\x63\x83\x7C"
+ "\x09\xE8\x17\x00\xC1\x10\x04\x01"
+ "\x8D\x9A\x9A\xEA\xC0\xF6\x59\x6F"
+ "\x55\x9C\x6D\x4D\xAF\x59\xA5\xF2"
+ "\x6D\x9F\x20\x08\x57\xCA\x6C\x3E"
+ "\x9C\xAC\x52\x4B\xD9\xAC\xC9\x2A",
+ .ilen = 64,
+ .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .rlen = 64,
+ }, {
+ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .input = "\xDC\x7E\x84\xBF\xDA\x79\x16\x4B"
+ "\x7E\xCD\x84\x86\x98\x5D\x38\x60"
+ "\x4F\xEB\xDC\x67\x40\xD2\x0B\x3A"
+ "\xC8\x8F\x6A\xD8\x2A\x4F\xB0\x8D"
+ "\x71\xAB\x47\xA0\x86\xE8\x6E\xED"
+ "\xF3\x9D\x1C\x5B\xBA\x97\xC4\x08"
+ "\x01\x26\x14\x1D\x67\xF3\x7B\xE8"
+ "\x53\x8F\x5A\x8B\xE7\x40\xE4\x84",
+ .ilen = 64,
+ .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .rlen = 64,
+ }
+};
+
static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
{ /* From RFC 3686 */
.key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 95b9e7eefadc..eee562d0dede 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -94,6 +94,8 @@ source "drivers/memstick/Kconfig"
source "drivers/leds/Kconfig"
+source "drivers/switch/Kconfig"
+
source "drivers/accessibility/Kconfig"
source "drivers/infiniband/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7fa433a7030c..4cbb7ce0a0f9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
+obj-$(CONFIG_SWITCH) += switch/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a85459126bc6..7b4b78a6e82e 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/async.h>
#include <linux/suspend.h>
+#include <linux/timer.h>
#include "../base.h"
#include "power.h"
@@ -49,6 +50,12 @@ LIST_HEAD(dpm_noirq_list);
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static void dpm_drv_timeout(unsigned long data);
+struct dpm_drv_wd_data {
+ struct device *dev;
+ struct task_struct *tsk;
+};
+
static int async_error;
/**
@@ -592,6 +599,30 @@ static bool is_async(struct device *dev)
}
/**
+ * dpm_drv_timeout - Driver suspend / resume watchdog handler
+ * @data: struct device which timed out
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so
+ * BUG() out for a crash-dump
+ *
+ */
+static void dpm_drv_timeout(unsigned long data)
+{
+ struct dpm_drv_wd_data *wd_data = (void *)data;
+ struct device *dev = wd_data->dev;
+ struct task_struct *tsk = wd_data->tsk;
+
+ printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
+ (dev->driver ? dev->driver->name : "no driver"));
+
+ printk(KERN_EMERG "dpm suspend stack:\n");
+ show_stack(tsk, NULL);
+
+ BUG();
+}
+
+/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
*
@@ -849,9 +880,19 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
+ struct timer_list timer;
+ struct dpm_drv_wd_data data;
dpm_wait_for_children(dev, async);
+ data.dev = dev;
+ data.tsk = get_current();
+ init_timer_on_stack(&timer);
+ timer.expires = jiffies + HZ * 12;
+ timer.function = dpm_drv_timeout;
+ timer.data = (unsigned long)&data;
+ add_timer(&timer);
+
if (async_error)
return 0;
@@ -905,6 +946,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.is_suspended = !error;
device_unlock(dev);
+
+ del_timer_sync(&timer);
+ destroy_timer_on_stack(&timer);
+
complete_all(&dev->power.completion);
if (error) {
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6a7f7b06968f..3f141ea9283d 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -28,13 +28,10 @@ static int rpm_suspend(struct device *dev, int rpmflags);
void update_pm_runtime_accounting(struct device *dev)
{
unsigned long now = jiffies;
- int delta;
+ unsigned long delta;
delta = now - dev->power.accounting_timestamp;
- if (delta < 0)
- delta = 0;
-
dev->power.accounting_timestamp = now;
if (dev->power.disable_depth > 0)
@@ -753,6 +750,8 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC));
+
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@@ -782,6 +781,8 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@@ -810,6 +811,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
@@ -999,6 +1002,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
*/
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
+ might_sleep();
spin_lock_irq(&dev->power.lock);
if (dev->power.disable_depth > 0) {
@@ -1184,6 +1188,8 @@ void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
{
int old_delay, old_use;
+ might_sleep();
+
spin_lock_irq(&dev->power.lock);
old_delay = dev->power.autosuspend_delay;
old_use = dev->power.use_autosuspend;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 20663f8dae45..843f722ff9cd 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -450,3 +450,39 @@ out:
return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits);
+
+/**
+ * remap_update_bits_lazy: Perform a read/modify/write cycle on the register
+ * map. Only write new contents if they differ from the previous ones.
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_lazy(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret, new;
+ unsigned int tmp;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_read(map, reg, &tmp);
+ if (ret != 0)
+ goto out;
+
+ new = tmp & ~mask;
+ new |= val & mask;
+ if (new != tmp) {
+ ret = _regmap_write(map, reg, new);
+ }
+
+out:
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_lazy);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c27..adf975635488 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -93,6 +93,16 @@ config BT_HCIBCM203X
Say Y here to compile support for HCI BCM203x devices into the
kernel or say M to compile it as module (bcm203x).
+config BT_BLUESLEEP
+ tristate "Bluesleep driver support"
+ help
+ Bluetooth Bluesleep Driver.
+ This driver provides the dynamic active power saving mechanism for
+ bluetooth radio devices.
+
+ Say Y here to compile support for bluesleep support into the kernel
+ or say M to compile it as module (bluesleep).
+
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index f4460f4f4b78..4c2f9d0051be 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_BT_HCIVHCI) += hci_vhci.o
obj-$(CONFIG_BT_HCIUART) += hci_uart.o
+obj-$(CONFIG_BT_BLUESLEEP) += bluesleep.o
obj-$(CONFIG_BT_HCIBCM203X) += bcm203x.o
obj-$(CONFIG_BT_HCIBPA10X) += bpa10x.o
obj-$(CONFIG_BT_HCIBFUSB) += bfusb.o
diff --git a/drivers/bluetooth/bluesleep.c b/drivers/bluetooth/bluesleep.c
new file mode 100644
index 000000000000..0e2ec0befbe3
--- /dev/null
+++ b/drivers/bluetooth/bluesleep.c
@@ -0,0 +1,864 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * Copyright (C) 2006-2007 - Motorola
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Date Author Comment
+ * ----------- -------------- --------------------------------
+ * 2006-Apr-28 Motorola The kernel module for running the Bluetooth(R)
+ * Sleep-Mode Protocol from the Host side
+ * 2006-Sep-08 Motorola Added workqueue for handling sleep work.
+ * 2007-Jan-24 Motorola Added mbm_handle_ioi() call to ISR.
+ * 2009-Aug-10 Motorola Changed "add_timer" to "mod_timer" to solve
+ * race when flurry of queued work comes in.
+*/
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#include <linux/irq.h>
+#include <linux/ioport.h>
+#include <linux/param.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/wakelock.h>
+#include <mach/gpio.h>
+#include <linux/serial_core.h>
+#include <linux/tegra_uart.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h> /* event notifications */
+#include "hci_uart.h"
+
+#define BT_SLEEP_DBG
+#ifndef BT_SLEEP_DBG
+#define BT_DBG(fmt, arg...)
+#endif
+/*
+ * Defines
+ */
+
+#define VERSION "1.1"
+#define PROC_DIR "bluetooth/sleep"
+
+#define POLARITY_LOW 0
+#define POLARITY_HIGH 1
+
+/* enable/disable wake-on-bluetooth */
+#define BT_ENABLE_IRQ_WAKE 1
+
+struct bluesleep_info {
+ unsigned host_wake;
+ unsigned ext_wake;
+ unsigned host_wake_irq;
+ struct uart_port *uport;
+ struct wake_lock wake_lock;
+ int irq_polarity;
+ int has_ext_wake;
+};
+
+/* work function */
+static void bluesleep_sleep_work(struct work_struct *work);
+
+/* work queue */
+DECLARE_DELAYED_WORK(sleep_workqueue, bluesleep_sleep_work);
+
+/* Macros for handling sleep work */
+#define bluesleep_rx_busy() schedule_delayed_work(&sleep_workqueue, 0)
+#define bluesleep_tx_busy() schedule_delayed_work(&sleep_workqueue, 0)
+#define bluesleep_rx_idle() schedule_delayed_work(&sleep_workqueue, 0)
+#define bluesleep_tx_idle() schedule_delayed_work(&sleep_workqueue, 0)
+
+/* 10 second timeout */
+#define TX_TIMER_INTERVAL 10
+
+/* state variable names and bit positions */
+#define BT_PROTO 0x01
+#define BT_TXDATA 0x02
+#define BT_ASLEEP 0x04
+#define BT_EXT_WAKE 0x08
+#define BT_SUSPEND 0x10
+
+/* global pointer to a single hci device. */
+static struct hci_dev *bluesleep_hdev;
+
+static struct bluesleep_info *bsi;
+
+/* module usage */
+static atomic_t open_count = ATOMIC_INIT(1);
+
+/*
+ * Local function prototypes
+ */
+static int bluesleep_hci_event(struct notifier_block *this,
+ unsigned long event, void *data);
+static int bluesleep_start(void);
+static void bluesleep_stop(void);
+
+/*
+ * Global variables
+ */
+/** Global state flags */
+static unsigned long flags;
+
+/** Tasklet to respond to change in hostwake line */
+static struct tasklet_struct hostwake_task;
+
+/** Transmission timer */
+static void bluesleep_tx_timer_expire(unsigned long data);
+static DEFINE_TIMER(tx_timer, bluesleep_tx_timer_expire, 0, 0);
+
+/** Lock for state transitions */
+static spinlock_t rw_lock;
+
+/** Notifier block for HCI events */
+struct notifier_block hci_event_nblock = {
+ .notifier_call = bluesleep_hci_event,
+};
+
+struct proc_dir_entry *bluetooth_dir, *sleep_dir;
+
+/*
+ * Local functions
+ */
+static void hsuart_power(int on)
+{
+ if (test_bit(BT_SUSPEND, &flags))
+ return;
+ if (on) {
+ tegra_uart_request_clock_on(bsi->uport);
+ tegra_uart_set_mctrl(bsi->uport, TIOCM_RTS);
+ } else {
+ tegra_uart_set_mctrl(bsi->uport, 0);
+ tegra_uart_request_clock_off(bsi->uport);
+ }
+}
+
+/**
+ * @return 1 if the Host can go to sleep, 0 otherwise.
+ */
+int bluesleep_can_sleep(void)
+{
+ /* check if WAKE_BT_GPIO and BT_WAKE_GPIO are both deasserted */
+ return ((gpio_get_value(bsi->host_wake) != bsi->irq_polarity) &&
+ (!test_bit(BT_EXT_WAKE, &flags)) &&
+ (bsi->uport != NULL));
+}
+
+void bluesleep_sleep_wakeup(void)
+{
+ if (test_bit(BT_ASLEEP, &flags)) {
+ BT_DBG("waking up...");
+ wake_lock(&bsi->wake_lock);
+ /* Start the timer */
+ mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
+ clear_bit(BT_ASLEEP, &flags);
+ /*Activating UART */
+ }
+}
+
+/**
+ * @brief@ main sleep work handling function which update the flags
+ * and activate and deactivate UART ,check FIFO.
+ */
+static void bluesleep_sleep_work(struct work_struct *work)
+{
+ if (bluesleep_can_sleep()) {
+ /* already asleep, this is an error case */
+ if (test_bit(BT_ASLEEP, &flags)) {
+ BT_DBG("already asleep");
+ return;
+ }
+
+ if (tegra_uart_is_tx_empty(bsi->uport)) {
+ BT_DBG("going to sleep...");
+ set_bit(BT_ASLEEP, &flags);
+ /*Deactivating UART */
+ /* UART clk is not turned off immediately. Release
+ * wakelock after 500 ms.
+ */
+ wake_lock_timeout(&bsi->wake_lock, HZ / 2);
+ } else {
+ mod_timer(&tx_timer, jiffies + TX_TIMER_INTERVAL * HZ);
+ return;
+ }
+ } else if (!test_bit(BT_EXT_WAKE, &flags)
+ && !test_bit(BT_ASLEEP, &flags)) {
+ mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
+ } else {
+ bluesleep_sleep_wakeup();
+ }
+}
+
+/**
+ * A tasklet function that runs in tasklet context and reads the value
+ * of the HOST_WAKE GPIO pin and further defer the work.
+ * @param data Not used.
+ */
+static void bluesleep_hostwake_task(unsigned long data)
+{
+ BT_DBG("hostwake line change");
+
+ spin_lock(&rw_lock);
+ if ((gpio_get_value(bsi->host_wake) == bsi->irq_polarity))
+ bluesleep_rx_busy();
+ else
+ bluesleep_rx_idle();
+ spin_unlock(&rw_lock);
+
+}
+
+/**
+ * Handles proper timer action when outgoing data is delivered to the
+ * HCI line discipline. Sets BT_TXDATA.
+ */
+static void bluesleep_outgoing_data(void)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&rw_lock, irq_flags);
+ /* log data passing by */
+ set_bit(BT_TXDATA, &flags);
+ /* if the tx side is sleeping... */
+ if (!test_bit(BT_EXT_WAKE, &flags)) {
+ BT_DBG("tx was sleeping");
+ bluesleep_sleep_wakeup();
+ }
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
+}
+
+/**
+ * Handles HCI device events.
+ * @param this Not used.
+ * @param event The event that occurred.
+ * @param data The HCI device associated with the event.
+ * @return <code>NOTIFY_DONE</code>.
+ */
+static int bluesleep_hci_event(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct hci_dev *hdev = (struct hci_dev *) data;
+ struct hci_uart *hu;
+ struct uart_state *state;
+
+ if (!hdev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case HCI_DEV_REG:
+ if (!bluesleep_hdev) {
+ bluesleep_hdev = hdev;
+ hu = (struct hci_uart *) hdev->driver_data;
+ state = (struct uart_state *) hu->tty->driver_data;
+ bsi->uport = state->uart_port;
+ /* if bluetooth started, start bluesleep*/
+ bluesleep_start();
+ }
+ break;
+ case HCI_DEV_UNREG:
+ bluesleep_stop();
+ bluesleep_hdev = NULL;
+ bsi->uport = NULL;
+ /* if bluetooth stopped, stop bluesleep also */
+ break;
+ case HCI_DEV_WRITE:
+ bluesleep_outgoing_data();
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * Handles transmission timer expiration.
+ * @param data Not used.
+ */
+static void bluesleep_tx_timer_expire(unsigned long data)
+{
+ unsigned long irq_flags;
+
+ BT_DBG("Tx timer expired");
+
+ spin_lock_irqsave(&rw_lock, irq_flags);
+
+ /* were we silent during the last timeout? */
+ if (!test_bit(BT_TXDATA, &flags)) {
+ BT_DBG("Tx has been idle");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
+ bluesleep_tx_idle();
+ } else {
+ BT_DBG("Tx data during last period");
+ mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL*HZ));
+ }
+
+ /* clear the incoming data flag */
+ clear_bit(BT_TXDATA, &flags);
+
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
+}
+
+/**
+ * Schedules a tasklet to run when receiving an interrupt on the
+ * <code>HOST_WAKE</code> GPIO pin.
+ * @param irq Not used.
+ * @param dev_id Not used.
+ */
+static irqreturn_t bluesleep_hostwake_isr(int irq, void *dev_id)
+{
+ /* schedule a tasklet to handle the change in the host wake line */
+ tasklet_schedule(&hostwake_task);
+ return IRQ_HANDLED;
+}
+
+/**
+ * Starts the Sleep-Mode Protocol on the Host.
+ * @return On success, 0. On error, -1, and <code>errno</code> is set
+ * appropriately.
+ */
+static int bluesleep_start(void)
+{
+ int retval;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&rw_lock, irq_flags);
+ if (test_bit(BT_PROTO, &flags)) {
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
+
+ if (!atomic_dec_and_test(&open_count)) {
+ atomic_inc(&open_count);
+ return -EBUSY;
+ }
+
+ /* start the timer */
+ mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
+
+ /* assert BT_WAKE */
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
+#if BT_ENABLE_IRQ_WAKE
+ retval = enable_irq_wake(bsi->host_wake_irq);
+ if (retval < 0) {
+ BT_ERR("Couldn't enable BT_HOST_WAKE as wakeup interrupt");
+ goto fail;
+ }
+#endif
+ set_bit(BT_PROTO, &flags);
+ wake_lock(&bsi->wake_lock);
+ return 0;
+fail:
+ del_timer(&tx_timer);
+ atomic_inc(&open_count);
+
+ return retval;
+}
+
+/**
+ * Stops the Sleep-Mode Protocol on the Host.
+ */
+static void bluesleep_stop(void)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&rw_lock, irq_flags);
+ if (!test_bit(BT_PROTO, &flags)) {
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
+ return;
+ }
+ /* assert BT_WAKE */
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
+ del_timer(&tx_timer);
+ clear_bit(BT_PROTO, &flags);
+
+ if (test_bit(BT_ASLEEP, &flags)) {
+ clear_bit(BT_ASLEEP, &flags);
+ hsuart_power(1);
+ }
+
+ atomic_inc(&open_count);
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
+
+#if BT_ENABLE_IRQ_WAKE
+ if (disable_irq_wake(bsi->host_wake_irq))
+ BT_ERR("Couldn't disable hostwake IRQ wakeup mode\n");
+#endif
+ wake_lock_timeout(&bsi->wake_lock, HZ / 2);
+}
+/**
+ * Read the <code>BT_WAKE</code> GPIO pin value via the proc interface.
+ * When this function returns, <code>page</code> will contain a 1 if the
+ * pin is high, 0 otherwise.
+ * @param page Buffer for writing data.
+ * @param start Not used.
+ * @param offset Not used.
+ * @param count Not used.
+ * @param eof Whether or not there is more data to be read.
+ * @param data Not used.
+ * @return The number of bytes written.
+ */
+static int bluepower_read_proc_btwake(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ *eof = 1;
+ return sprintf(page, "btwake:%u\n",test_bit(BT_EXT_WAKE, &flags));
+}
+
+/**
+ * Write the <code>BT_WAKE</code> GPIO pin value via the proc interface.
+ * @param file Not used.
+ * @param buffer The buffer to read from.
+ * @param count The number of bytes to be written.
+ * @param data Not used.
+ * @return On success, the number of bytes written. On error, -1, and
+ * <code>errno</code> is set appropriately.
+ */
+static int bluepower_write_proc_btwake(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char *buf;
+
+ if (count < 1)
+ return -EINVAL;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, buffer, count)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+ if (buf[0] == '0') {
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
+ } else if (buf[0] == '1') {
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
+ } else {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ kfree(buf);
+ return count;
+}
+
+/**
+ * Read the <code>BT_HOST_WAKE</code> GPIO pin value via the proc interface.
+ * When this function returns, <code>page</code> will contain a 1 if the pin
+ * is high, 0 otherwise.
+ * @param page Buffer for writing data.
+ * @param start Not used.
+ * @param offset Not used.
+ * @param count Not used.
+ * @param eof Whether or not there is more data to be read.
+ * @param data Not used.
+ * @return The number of bytes written.
+ */
+static int bluepower_read_proc_hostwake(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ *eof = 1;
+ return sprintf(page, "hostwake: %u\n", gpio_get_value(bsi->host_wake));
+}
+
+
+/**
+ * Read the low-power status of the Host via the proc interface.
+ * When this function returns, <code>page</code> contains a 1 if the Host
+ * is asleep, 0 otherwise.
+ * @param page Buffer for writing data.
+ * @param start Not used.
+ * @param offset Not used.
+ * @param count Not used.
+ * @param eof Whether or not there is more data to be read.
+ * @param data Not used.
+ * @return The number of bytes written.
+ */
+static int bluesleep_read_proc_asleep(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ unsigned int asleep;
+
+ asleep = test_bit(BT_ASLEEP, &flags) ? 1 : 0;
+ *eof = 1;
+ return sprintf(page, "asleep: %u\n", asleep);
+}
+
+/**
+ * Read the low-power protocol being used by the Host via the proc interface.
+ * When this function returns, <code>page</code> will contain a 1 if the Host
+ * is using the Sleep Mode Protocol, 0 otherwise.
+ * @param page Buffer for writing data.
+ * @param start Not used.
+ * @param offset Not used.
+ * @param count Not used.
+ * @param eof Whether or not there is more data to be read.
+ * @param data Not used.
+ * @return The number of bytes written.
+ */
+static int bluesleep_read_proc_proto(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ unsigned int proto;
+
+ proto = test_bit(BT_PROTO, &flags) ? 1 : 0;
+ *eof = 1;
+ return sprintf(page, "proto: %u\n", proto);
+}
+
+/**
+ * Modify the low-power protocol used by the Host via the proc interface.
+ * @param file Not used.
+ * @param buffer The buffer to read from.
+ * @param count The number of bytes to be written.
+ * @param data Not used.
+ * @return On success, the number of bytes written. On error, -1, and
+ * <code>errno</code> is set appropriately.
+ */
+static int bluesleep_write_proc_proto(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char proto;
+
+ if (count < 1)
+ return -EINVAL;
+
+ if (copy_from_user(&proto, buffer, 1))
+ return -EFAULT;
+
+ if (proto == '0')
+ bluesleep_stop();
+ else
+ bluesleep_start();
+
+ /* claim that we wrote everything */
+ return count;
+}
+
+
+static int bluesleep_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+
+ bsi = kzalloc(sizeof(struct bluesleep_info), GFP_KERNEL);
+ if (!bsi)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "gpio_host_wake");
+ if (!res) {
+ BT_ERR("couldn't find host_wake gpio\n");
+ ret = -ENODEV;
+ goto free_bsi;
+ }
+ bsi->host_wake = res->start;
+
+ ret = gpio_request(bsi->host_wake, "bt_host_wake");
+ if (ret)
+ goto free_bsi;
+
+ /* configure host_wake as input */
+ ret = gpio_direction_input(bsi->host_wake);
+ if (ret < 0) {
+ pr_err("gpio-keys: failed to configure input"
+ " direction for GPIO %d, error %d\n",
+ bsi->host_wake, ret);
+ gpio_free(bsi->host_wake);
+ goto free_bsi;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "gpio_ext_wake");
+
+ if (!res)
+ bsi->has_ext_wake = 0;
+ else
+ bsi->has_ext_wake = 1;
+
+ if (bsi->has_ext_wake) {
+ bsi->ext_wake = res->start;
+ ret = gpio_request(bsi->ext_wake, "bt_ext_wake");
+ if (ret)
+ goto free_bt_host_wake;
+
+ /* configure ext_wake as output mode*/
+ ret = gpio_direction_output(bsi->ext_wake, 1);
+ if (ret < 0) {
+ pr_err("gpio-keys: failed to configure output"
+ " direction for GPIO %d, error %d\n",
+ bsi->ext_wake, ret);
+ gpio_free(bsi->ext_wake);
+ goto free_bt_host_wake;
+ }
+ } else
+ set_bit(BT_EXT_WAKE, &flags);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "host_wake");
+ if (!res) {
+ BT_ERR("couldn't find host_wake irq\n");
+ ret = -ENODEV;
+ goto free_bt_host_wake;
+ }
+ bsi->host_wake_irq = res->start;
+ if (bsi->host_wake_irq < 0) {
+ BT_ERR("couldn't find host_wake irq\n");
+ ret = -ENODEV;
+ goto free_bt_ext_wake;
+ }
+ if (res->flags & IORESOURCE_IRQ_LOWEDGE)
+ bsi->irq_polarity = POLARITY_LOW;/*low edge (falling edge)*/
+ else
+ bsi->irq_polarity = POLARITY_HIGH;/*anything else*/
+
+ wake_lock_init(&bsi->wake_lock, WAKE_LOCK_SUSPEND, "bluesleep");
+ clear_bit(BT_SUSPEND, &flags);
+
+ if (bsi->irq_polarity == POLARITY_LOW) {
+ ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+ "bluetooth hostwake", NULL);
+ } else {
+ ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ "bluetooth hostwake", NULL);
+ }
+ if (ret < 0) {
+ BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
+ goto free_bt_ext_wake;
+ }
+
+ return 0;
+
+free_bt_ext_wake:
+ gpio_free(bsi->ext_wake);
+free_bt_host_wake:
+ gpio_free(bsi->host_wake);
+free_bsi:
+ kfree(bsi);
+ return ret;
+}
+
+static int bluesleep_remove(struct platform_device *pdev)
+{
+ free_irq(bsi->host_wake_irq, NULL);
+ gpio_free(bsi->host_wake);
+ gpio_free(bsi->ext_wake);
+ wake_lock_destroy(&bsi->wake_lock);
+ kfree(bsi);
+ return 0;
+}
+
+
+static int bluesleep_resume(struct platform_device *pdev)
+{
+ if (test_bit(BT_SUSPEND, &flags)) {
+ BT_DBG("bluesleep resuming...\n");
+ if ((bsi->uport != NULL) &&
+ (gpio_get_value(bsi->host_wake) == bsi->irq_polarity)) {
+ BT_DBG("bluesleep resume form BT event...\n");
+ tegra_uart_request_clock_on(bsi->uport);
+ tegra_uart_set_mctrl(bsi->uport, TIOCM_RTS);
+ }
+ clear_bit(BT_SUSPEND, &flags);
+ }
+ return 0;
+}
+
+static int bluesleep_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ BT_DBG("bluesleep suspending...\n");
+ set_bit(BT_SUSPEND, &flags);
+ return 0;
+}
+
+static struct platform_driver bluesleep_driver = {
+ .probe = bluesleep_probe,
+ .remove = bluesleep_remove,
+ .suspend = bluesleep_suspend,
+ .resume = bluesleep_resume,
+ .driver = {
+ .name = "bluesleep",
+ .owner = THIS_MODULE,
+ },
+};
+/**
+ * Initializes the module.
+ * @return On success, 0. On error, -1, and <code>errno</code> is set
+ * appropriately.
+ */
+static int __init bluesleep_init(void)
+{
+ int retval;
+ struct proc_dir_entry *ent;
+
+ BT_INFO("BlueSleep Mode Driver Ver %s", VERSION);
+
+ retval = platform_driver_register(&bluesleep_driver);
+ if (retval)
+ return retval;
+
+ if (bsi == NULL)
+ return 0;
+
+ bluesleep_hdev = NULL;
+
+ bluetooth_dir = proc_mkdir("bluetooth", NULL);
+ if (bluetooth_dir == NULL) {
+ BT_ERR("Unable to create /proc/bluetooth directory");
+ return -ENOMEM;
+ }
+
+ sleep_dir = proc_mkdir("sleep", bluetooth_dir);
+ if (sleep_dir == NULL) {
+ BT_ERR("Unable to create /proc/%s directory", PROC_DIR);
+ return -ENOMEM;
+ }
+
+ /* Creating read/write "btwake" entry */
+ ent = create_proc_entry("btwake", 0, sleep_dir);
+ if (ent == NULL) {
+ BT_ERR("Unable to create /proc/%s/btwake entry", PROC_DIR);
+ retval = -ENOMEM;
+ goto fail;
+ }
+ ent->read_proc = bluepower_read_proc_btwake;
+ ent->write_proc = bluepower_write_proc_btwake;
+
+ /* read only proc entries */
+ if (create_proc_read_entry("hostwake", 0, sleep_dir,
+ bluepower_read_proc_hostwake, NULL) == NULL) {
+ BT_ERR("Unable to create /proc/%s/hostwake entry", PROC_DIR);
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ /* read/write proc entries */
+ ent = create_proc_entry("proto", 0, sleep_dir);
+ if (ent == NULL) {
+ BT_ERR("Unable to create /proc/%s/proto entry", PROC_DIR);
+ retval = -ENOMEM;
+ goto fail;
+ }
+ ent->read_proc = bluesleep_read_proc_proto;
+ ent->write_proc = bluesleep_write_proc_proto;
+
+ /* read only proc entries */
+ if (create_proc_read_entry("asleep", 0,
+ sleep_dir, bluesleep_read_proc_asleep, NULL) == NULL) {
+ BT_ERR("Unable to create /proc/%s/asleep entry", PROC_DIR);
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ flags = 0; /* clear all status bits */
+
+ /* Initialize spinlock. */
+ spin_lock_init(&rw_lock);
+
+ /* Initialize timer */
+ init_timer(&tx_timer);
+ tx_timer.function = bluesleep_tx_timer_expire;
+ tx_timer.data = 0;
+
+ /* initialize host wake tasklet */
+ tasklet_init(&hostwake_task, bluesleep_hostwake_task, 0);
+
+ /* assert bt wake */
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
+ hci_register_notifier(&hci_event_nblock);
+
+ return 0;
+
+fail:
+ remove_proc_entry("asleep", sleep_dir);
+ remove_proc_entry("proto", sleep_dir);
+ remove_proc_entry("hostwake", sleep_dir);
+ remove_proc_entry("btwake", sleep_dir);
+ remove_proc_entry("sleep", bluetooth_dir);
+ remove_proc_entry("bluetooth", 0);
+ return retval;
+}
+
+/**
+ * Cleans up the module.
+ */
+static void __exit bluesleep_exit(void)
+{
+ if (bsi == NULL)
+ return;
+
+ /* assert bt wake */
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
+ if (test_bit(BT_PROTO, &flags)) {
+ if (disable_irq_wake(bsi->host_wake_irq))
+ BT_ERR("Couldn't disable hostwake IRQ wakeup mode\n");
+ free_irq(bsi->host_wake_irq, NULL);
+ del_timer(&tx_timer);
+ if (test_bit(BT_ASLEEP, &flags))
+ hsuart_power(1);
+ }
+
+ hci_unregister_notifier(&hci_event_nblock);
+ platform_driver_unregister(&bluesleep_driver);
+
+ remove_proc_entry("asleep", sleep_dir);
+ remove_proc_entry("proto", sleep_dir);
+ remove_proc_entry("hostwake", sleep_dir);
+ remove_proc_entry("btwake", sleep_dir);
+ remove_proc_entry("sleep", bluetooth_dir);
+ remove_proc_entry("bluetooth", 0);
+}
+
+module_init(bluesleep_init);
+module_exit(bluesleep_exit);
+
+MODULE_DESCRIPTION("Bluetooth Sleep Mode Driver ver %s " VERSION);
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("GPL");
+#endif
+
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 423fd56bf612..b2aec0469961 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,19 @@ menu "Character devices"
source "drivers/tty/Kconfig"
+config DEVMEM
+ bool "Memory device driver"
+ default y
+ help
+ The memory driver provides two character devices, mem and kmem, which
+ provide access to the system's memory. The mem device is a view of
+ physical memory, and each byte in the device corresponds to the
+ matching physical address. The kmem device is the same as mem, but
+ the addresses correspond to the kernel's virtual address space rather
+ than physical memory. These devices are standard parts of a Linux
+ system and most users should say Y here. You might say N if very
+ security conscience or memory is tight.
+
config DEVKMEM
bool "/dev/kmem virtual device support"
default y
@@ -598,6 +611,10 @@ config DEVPORT
depends on ISA || PCI
default y
+config DCC_TTY
+ tristate "DCC tty driver"
+ depends on ARM
+
source "drivers/s390/char/Kconfig"
config RAMOOPS
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 32762ba769c2..5e2fd7097027 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi/
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
+obj-$(CONFIG_DCC_TTY) += dcc_tty.o
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
obj-$(CONFIG_RAMOOPS) += ramoops.o
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644
index 000000000000..a787accdcb14
--- /dev/null
+++ b/drivers/char/dcc_tty.c
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED;
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+ char ch;
+ int rch;
+ int written;
+
+ while (g_dcc_buffer_count) {
+ ch = g_dcc_buffer[g_dcc_buffer_head];
+ asm(
+ "mrc 14, 0, r15, c0, c1, 0\n"
+ "mcrcc 14, 0, %1, c0, c5, 0\n"
+ "movcc %0, #1\n"
+ "movcs %0, #0\n"
+ : "=r" (written)
+ : "r" (ch)
+ );
+ if (written) {
+ if (ch == '\n')
+ g_dcc_buffer[g_dcc_buffer_head] = '\r';
+ else {
+ g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+ g_dcc_buffer_count--;
+ if (g_dcc_tty)
+ tty_wakeup(g_dcc_tty);
+ }
+ g_dcc_write_delay_usecs = 1;
+ } else {
+ if (g_dcc_write_delay_usecs > 0x100)
+ break;
+ g_dcc_write_delay_usecs <<= 1;
+ udelay(g_dcc_write_delay_usecs);
+ }
+ }
+
+ if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+ asm(
+ "mrc 14, 0, %0, c0, c1, 0\n"
+ "tst %0, #(1 << 30)\n"
+ "moveq %0, #-1\n"
+ "mrcne 14, 0, %0, c0, c5, 0\n"
+ : "=r" (rch)
+ );
+ if (rch >= 0) {
+ ch = rch;
+ tty_insert_flip_string(g_dcc_tty, &ch, 1);
+ tty_flip_buffer_push(g_dcc_tty);
+ }
+ }
+
+
+ if (g_dcc_buffer_count)
+ hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+ else
+ hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+ g_dcc_tty = tty;
+ g_dcc_tty_open_count++;
+ ret = 0;
+ } else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+ printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+ return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+ printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+ if (g_dcc_tty == tty) {
+ if (--g_dcc_tty_open_count == 0)
+ g_dcc_tty = NULL;
+ }
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+ const unsigned char *buf = buf_start;
+ unsigned long irq_flags;
+ int copy_len;
+ int space_left;
+ int tail;
+
+ if (count < 1)
+ return 0;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ do {
+ tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+ copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ if (copy_len > space_left)
+ copy_len = space_left;
+ if (copy_len > count)
+ copy_len = count;
+ memcpy(&g_dcc_buffer[tail], buf, copy_len);
+ g_dcc_buffer_count += copy_len;
+ buf += copy_len;
+ count -= copy_len;
+ if (copy_len < count && copy_len < space_left) {
+ space_left -= copy_len;
+ copy_len = count;
+ if (copy_len > space_left) {
+ copy_len = space_left;
+ }
+ memcpy(g_dcc_buffer, buf, copy_len);
+ buf += copy_len;
+ count -= copy_len;
+ g_dcc_buffer_count += copy_len;
+ }
+ dcc_poll_locked();
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ } while(count && space_left);
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ int ret;
+ /* printk("dcc_tty_write %p, %d\n", buf, count); */
+ ret = dcc_write(buf, count);
+ if (ret != count)
+ printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+ return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+ int space_left;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ int ret;
+ asm(
+ "mrc 14, 0, %0, c0, c1, 0\n"
+ "mov %0, %0, LSR #30\n"
+ "and %0, %0, #1\n"
+ : "=r" (ret)
+ );
+ return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ dcc_poll_locked();
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ dcc_poll_locked();
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+ dcc_write(b, count);
+#else
+ /* blocking printk */
+ while (count > 0) {
+ int written;
+ written = dcc_write(b, count);
+ if (written) {
+ b += written;
+ count -= written;
+ }
+ }
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+ *index = 0;
+ return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+ if (co->index != 0)
+ return -ENODEV;
+ return 0;
+}
+
+
+static struct console dcc_console =
+{
+ .name = "ttyDCC",
+ .write = dcc_console_write,
+ .device = dcc_console_device,
+ .setup = dcc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+ .open = dcc_tty_open,
+ .close = dcc_tty_close,
+ .write = dcc_tty_write,
+ .write_room = dcc_tty_write_room,
+ .chars_in_buffer = dcc_tty_chars_in_buffer,
+ .unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+ int ret;
+
+ hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ g_dcc_timer.function = dcc_tty_timer_func;
+
+ g_dcc_tty_driver = alloc_tty_driver(1);
+ if (!g_dcc_tty_driver) {
+ printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+ ret = -ENOMEM;
+ goto err_alloc_tty_driver_failed;
+ }
+ g_dcc_tty_driver->owner = THIS_MODULE;
+ g_dcc_tty_driver->driver_name = "dcc";
+ g_dcc_tty_driver->name = "ttyDCC";
+ g_dcc_tty_driver->major = 0; // auto assign
+ g_dcc_tty_driver->minor_start = 0;
+ g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ g_dcc_tty_driver->init_termios = tty_std_termios;
+ g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+ ret = tty_register_driver(g_dcc_tty_driver);
+ if (ret) {
+ printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+ goto err_tty_register_driver_failed;
+ }
+ tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+ register_console(&dcc_console);
+ hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+ return 0;
+
+err_tty_register_driver_failed:
+ put_tty_driver(g_dcc_tty_driver);
+ g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+ return ret;
+}
+
+static void __exit dcc_tty_exit(void)
+{
+ int ret;
+
+ tty_unregister_device(g_dcc_tty_driver, 0);
+ ret = tty_unregister_driver(g_dcc_tty_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+ } else {
+ put_tty_driver(g_dcc_tty_driver);
+ }
+ g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 8fc04b4f311f..9b1eb188acdc 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -56,6 +56,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
}
#endif
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
#ifdef CONFIG_STRICT_DEVMEM
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
@@ -81,7 +82,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#endif
+#endif
+#ifdef CONFIG_DEVMEM
void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
}
@@ -208,6 +211,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
*ppos += written;
return written;
}
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
int __weak phys_mem_access_prot_allowed(struct file *file,
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -329,6 +335,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
}
return 0;
}
+#endif /* CONFIG_DEVMEM */
#ifdef CONFIG_DEVKMEM
static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -693,6 +700,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
return file->f_pos = 0;
}
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
/*
* The memory devices use the full 32/64 bits of the offset, and so we cannot
* check against negative addresses: they are ok. The return value is weird,
@@ -726,10 +735,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
static int open_port(struct inode * inode, struct file * filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
+#endif
#define zero_lseek null_lseek
#define full_lseek null_lseek
@@ -739,6 +752,7 @@ static int open_port(struct inode * inode, struct file * filp)
#define open_kmem open_mem
#define open_oldmem open_mem
+#ifdef CONFIG_DEVMEM
static const struct file_operations mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
@@ -747,6 +761,7 @@ static const struct file_operations mem_fops = {
.open = open_mem,
.get_unmapped_area = get_unmapped_area_mem,
};
+#endif
#ifdef CONFIG_DEVKMEM
static const struct file_operations kmem_fops = {
@@ -850,7 +865,9 @@ static const struct memdev {
const struct file_operations *fops;
struct backing_dev_info *dev_info;
} devlist[] = {
+#ifdef CONFIG_DEVMEM
[1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
#ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e24a2a1b6666..57f96ebbce4b 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -99,6 +99,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
Be aware that not all cpufreq drivers support the conservative
governor. If unsure have a look at the help section of the
driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+ bool "interactive"
+ select CPU_FREQ_GOV_INTERACTIVE
+ help
+ Use the CPUFreq governor 'interactive' as default. This allows
+ you to get a full dynamic cpu frequency capable system by simply
+ loading your cpufreq low-level hardware driver, using the
+ 'interactive' governor for latency-sensitive workloads.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -156,6 +166,23 @@ config CPU_FREQ_GOV_ONDEMAND
If in doubt, say N.
+config CPU_FREQ_GOV_INTERACTIVE
+ tristate "'interactive' cpufreq policy governor"
+ help
+ 'interactive' - This driver adds a dynamic cpufreq policy governor
+ designed for latency-sensitive workloads.
+
+ This governor attempts to reduce the latency of clock
+ increases so that the system is more responsive to
+ interactive workloads.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_interactive.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index a48bc02cd765..d43b39150efa 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644
index 000000000000..a8d29afc709f
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -0,0 +1,772 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+
+#include <asm/cputime.h>
+
+static atomic_t active_count = ATOMIC_INIT(0);
+
+struct cpufreq_interactive_cpuinfo {
+ struct timer_list cpu_timer;
+ int timer_idlecancel;
+ u64 time_in_idle;
+ u64 idle_exit_time;
+ u64 timer_run_time;
+ int idling;
+ u64 freq_change_time;
+ u64 freq_change_time_in_idle;
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int target_freq;
+ int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* Workqueues handle frequency scaling */
+static struct task_struct *up_task;
+static struct workqueue_struct *down_wq;
+static struct work_struct freq_scale_down_work;
+static cpumask_t up_cpumask;
+static spinlock_t up_cpumask_lock;
+static cpumask_t down_cpumask;
+static spinlock_t down_cpumask_lock;
+static struct mutex set_speed_lock;
+
+/* Go to max speed when CPU load at or above this value. */
+#define DEFAULT_GO_MAXSPEED_LOAD 95
+static unsigned long go_maxspeed_load;
+
+/* Base of exponential raise to max speed; if 0 - jump to maximum */
+static unsigned long boost_factor;
+
+/* Max frequency boost in Hz; if 0 - no max is enforced */
+static unsigned long max_boost;
+
+/*
+ * Targeted sustainable load relatively to current frequency.
+ * If 0, target is set realtively to the max speed
+ */
+static unsigned long sustain_load;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp down.
+ */
+#define DEFAULT_MIN_SAMPLE_TIME 20000;
+static unsigned long min_sample_time;
+
+/*
+ * The sample rate of the timer used to increase frequency
+ */
+#define DEFAULT_TIMER_RATE 10000;
+static unsigned long timer_rate;
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+ .name = "interactive",
+ .governor = cpufreq_governor_interactive,
+ .max_transition_latency = 10000000,
+ .owner = THIS_MODULE,
+};
+
+static unsigned int cpufreq_interactive_get_target(
+ int cpu_load, int load_since_change, struct cpufreq_policy *policy)
+{
+ unsigned int target_freq;
+
+ /*
+ * Choose greater of short-term load (since last idle timer
+ * started or timer function re-armed itself) or long-term load
+ * (since last frequency change).
+ */
+ if (load_since_change > cpu_load)
+ cpu_load = load_since_change;
+
+ if (cpu_load >= go_maxspeed_load) {
+ if (!boost_factor)
+ return policy->max;
+
+ target_freq = policy->cur * boost_factor;
+
+ if (max_boost && target_freq > policy->cur + max_boost)
+ target_freq = policy->cur + max_boost;
+ }
+ else {
+ if (!sustain_load)
+ return policy->max * cpu_load / 100;
+
+ target_freq = policy->cur * cpu_load / sustain_load;
+ }
+
+ target_freq = min(target_freq, policy->max);
+ return target_freq;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+ unsigned int delta_idle;
+ unsigned int delta_time;
+ int cpu_load;
+ int load_since_change;
+ u64 time_in_idle;
+ u64 idle_exit_time;
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, data);
+ u64 now_idle;
+ unsigned int new_freq;
+ unsigned int index;
+ unsigned long flags;
+
+ smp_rmb();
+
+ if (!pcpu->governor_enabled)
+ goto exit;
+
+ /*
+ * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
+ * this lets idle exit know the current idle time sample has
+ * been processed, and idle exit can generate a new sample and
+ * re-arm the timer. This prevents a concurrent idle
+ * exit on that CPU from writing a new set of info at the same time
+ * the timer function runs (the timer function can't use that info
+ * until more time passes).
+ */
+ time_in_idle = pcpu->time_in_idle;
+ idle_exit_time = pcpu->idle_exit_time;
+ now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
+ smp_wmb();
+
+ /* If we raced with cancelling a timer, skip. */
+ if (!idle_exit_time)
+ goto exit;
+
+ delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
+ delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
+ idle_exit_time);
+
+ /*
+ * If timer ran less than 1ms after short-term sample started, retry.
+ */
+ if (delta_time < 1000)
+ goto rearm;
+
+ if (delta_idle > delta_time)
+ cpu_load = 0;
+ else
+ cpu_load = 100 * (delta_time - delta_idle) / delta_time;
+
+ delta_idle = (unsigned int) cputime64_sub(now_idle,
+ pcpu->freq_change_time_in_idle);
+ delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
+ pcpu->freq_change_time);
+
+ if ((delta_time == 0) || (delta_idle > delta_time))
+ load_since_change = 0;
+ else
+ load_since_change =
+ 100 * (delta_time - delta_idle) / delta_time;
+
+ /*
+ * Combine short-term load (since last idle timer started or timer
+ * function re-armed itself) and long-term load (since last frequency
+ * change) to determine new target frequency
+ */
+ new_freq = cpufreq_interactive_get_target(cpu_load, load_since_change,
+ pcpu->policy);
+
+ if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+ new_freq, CPUFREQ_RELATION_H,
+ &index)) {
+ pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
+ (int) data);
+ goto rearm;
+ }
+
+ new_freq = pcpu->freq_table[index].frequency;
+
+ if (pcpu->target_freq == new_freq)
+ goto rearm_if_notmax;
+
+ /*
+ * Do not scale down unless we have been at this frequency for the
+ * minimum sample time.
+ */
+ if (new_freq < pcpu->target_freq) {
+ if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time)
+ < min_sample_time)
+ goto rearm;
+ }
+
+ if (new_freq < pcpu->target_freq) {
+ pcpu->target_freq = new_freq;
+ spin_lock_irqsave(&down_cpumask_lock, flags);
+ cpumask_set_cpu(data, &down_cpumask);
+ spin_unlock_irqrestore(&down_cpumask_lock, flags);
+ queue_work(down_wq, &freq_scale_down_work);
+ } else {
+ pcpu->target_freq = new_freq;
+ spin_lock_irqsave(&up_cpumask_lock, flags);
+ cpumask_set_cpu(data, &up_cpumask);
+ spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ wake_up_process(up_task);
+ }
+
+rearm_if_notmax:
+ /*
+ * Already set max speed and don't see a need to change that,
+ * wait until next idle to re-evaluate, don't need timer.
+ */
+ if (pcpu->target_freq == pcpu->policy->max)
+ goto exit;
+
+rearm:
+ if (!timer_pending(&pcpu->cpu_timer)) {
+ /*
+ * If already at min: if that CPU is idle, don't set timer.
+ * Else cancel the timer if that CPU goes idle. We don't
+ * need to re-evaluate speed until the next idle exit.
+ */
+ if (pcpu->target_freq == pcpu->policy->min) {
+ smp_rmb();
+
+ if (pcpu->idling)
+ goto exit;
+
+ pcpu->timer_idlecancel = 1;
+ }
+
+ pcpu->time_in_idle = get_cpu_idle_time_us(
+ data, &pcpu->idle_exit_time);
+ mod_timer(&pcpu->cpu_timer,
+ jiffies + usecs_to_jiffies(timer_rate));
+ }
+
+exit:
+ return;
+}
+
+static void cpufreq_interactive_idle_start(void)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, smp_processor_id());
+ int pending;
+
+ if (!pcpu->governor_enabled)
+ return;
+
+ pcpu->idling = 1;
+ smp_wmb();
+ pending = timer_pending(&pcpu->cpu_timer);
+
+ if (pcpu->target_freq != pcpu->policy->min) {
+#ifdef CONFIG_SMP
+ /*
+ * Entering idle while not at lowest speed. On some
+ * platforms this can hold the other CPU(s) at that speed
+ * even though the CPU is idle. Set a timer to re-evaluate
+ * speed so this idle CPU doesn't hold the other CPUs above
+ * min indefinitely. This should probably be a quirk of
+ * the CPUFreq driver.
+ */
+ if (!pending) {
+ pcpu->time_in_idle = get_cpu_idle_time_us(
+ smp_processor_id(), &pcpu->idle_exit_time);
+ pcpu->timer_idlecancel = 0;
+ mod_timer(&pcpu->cpu_timer,
+ jiffies + usecs_to_jiffies(timer_rate));
+ }
+#endif
+ } else {
+ /*
+ * If at min speed and entering idle after load has
+ * already been evaluated, and a timer has been set just in
+ * case the CPU suddenly goes busy, cancel that timer. The
+ * CPU didn't go busy; we'll recheck things upon idle exit.
+ */
+ if (pending && pcpu->timer_idlecancel) {
+ del_timer(&pcpu->cpu_timer);
+ /*
+ * Ensure last timer run time is after current idle
+ * sample start time, so next idle exit will always
+ * start a new idle sampling period.
+ */
+ pcpu->idle_exit_time = 0;
+ pcpu->timer_idlecancel = 0;
+ }
+ }
+
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, smp_processor_id());
+
+ pcpu->idling = 0;
+ smp_wmb();
+
+ /*
+ * Arm the timer for 1-2 ticks later if not already, and if the timer
+ * function has already processed the previous load sampling
+ * interval. (If the timer is not pending but has not processed
+ * the previous interval, it is probably racing with us on another
+ * CPU. Let it compute load based on the previous sample and then
+ * re-arm the timer for another interval when it's done, rather
+ * than updating the interval start time to be "now", which doesn't
+ * give the timer function enough time to make a decision on this
+ * run.)
+ */
+ if (timer_pending(&pcpu->cpu_timer) == 0 &&
+ pcpu->timer_run_time >= pcpu->idle_exit_time &&
+ pcpu->governor_enabled) {
+ pcpu->time_in_idle =
+ get_cpu_idle_time_us(smp_processor_id(),
+ &pcpu->idle_exit_time);
+ pcpu->timer_idlecancel = 0;
+ mod_timer(&pcpu->cpu_timer,
+ jiffies + usecs_to_jiffies(timer_rate));
+ }
+
+}
+
+static int cpufreq_interactive_up_task(void *data)
+{
+ unsigned int cpu;
+ cpumask_t tmp_mask;
+ unsigned long flags;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&up_cpumask_lock, flags);
+
+ if (cpumask_empty(&up_cpumask)) {
+ spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ schedule();
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&up_cpumask_lock, flags);
+ }
+
+ set_current_state(TASK_RUNNING);
+ tmp_mask = up_cpumask;
+ cpumask_clear(&up_cpumask);
+ spin_unlock_irqrestore(&up_cpumask_lock, flags);
+
+ for_each_cpu(cpu, &tmp_mask) {
+ unsigned int j;
+ unsigned int max_freq = 0;
+
+ pcpu = &per_cpu(cpuinfo, cpu);
+ smp_rmb();
+
+ if (!pcpu->governor_enabled)
+ continue;
+
+ mutex_lock(&set_speed_lock);
+
+ for_each_cpu(j, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, j);
+
+ if (pjcpu->target_freq > max_freq)
+ max_freq = pjcpu->target_freq;
+ }
+
+ if (max_freq != pcpu->policy->cur)
+ __cpufreq_driver_target(pcpu->policy,
+ max_freq,
+ CPUFREQ_RELATION_H);
+ mutex_unlock(&set_speed_lock);
+
+ pcpu->freq_change_time_in_idle =
+ get_cpu_idle_time_us(cpu,
+ &pcpu->freq_change_time);
+ }
+ }
+
+ return 0;
+}
+
+static void cpufreq_interactive_freq_down(struct work_struct *work)
+{
+ unsigned int cpu;
+ cpumask_t tmp_mask;
+ unsigned long flags;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+
+ spin_lock_irqsave(&down_cpumask_lock, flags);
+ tmp_mask = down_cpumask;
+ cpumask_clear(&down_cpumask);
+ spin_unlock_irqrestore(&down_cpumask_lock, flags);
+
+ for_each_cpu(cpu, &tmp_mask) {
+ unsigned int j;
+ unsigned int max_freq = 0;
+
+ pcpu = &per_cpu(cpuinfo, cpu);
+ smp_rmb();
+
+ if (!pcpu->governor_enabled)
+ continue;
+
+ mutex_lock(&set_speed_lock);
+
+ for_each_cpu(j, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, j);
+
+ if (pjcpu->target_freq > max_freq)
+ max_freq = pjcpu->target_freq;
+ }
+
+ if (max_freq != pcpu->policy->cur)
+ __cpufreq_driver_target(pcpu->policy, max_freq,
+ CPUFREQ_RELATION_H);
+
+ mutex_unlock(&set_speed_lock);
+ pcpu->freq_change_time_in_idle =
+ get_cpu_idle_time_us(cpu,
+ &pcpu->freq_change_time);
+ }
+}
+
+static ssize_t show_go_maxspeed_load(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", go_maxspeed_load);
+}
+
+static ssize_t store_go_maxspeed_load(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ go_maxspeed_load = val;
+ return count;
+}
+
+static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0666,
+ show_go_maxspeed_load, store_go_maxspeed_load);
+
+static ssize_t show_boost_factor(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", boost_factor);
+}
+
+static ssize_t store_boost_factor(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ if (!strict_strtoul(buf, 0, &boost_factor))
+ return count;
+ return -EINVAL;
+}
+
+static struct global_attr boost_factor_attr = __ATTR(boost_factor, 0644,
+ show_boost_factor, store_boost_factor);
+
+static ssize_t show_max_boost(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", max_boost);
+}
+
+static ssize_t store_max_boost(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ if (!strict_strtoul(buf, 0, &max_boost))
+ return count;
+ return -EINVAL;
+}
+
+static struct global_attr max_boost_attr = __ATTR(max_boost, 0666,
+ show_max_boost, store_max_boost);
+
+
+static ssize_t show_sustain_load(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", sustain_load);
+}
+
+static ssize_t store_sustain_load(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ if (!strict_strtoul(buf, 0, &sustain_load))
+ return count;
+ return -EINVAL;
+}
+
+static struct global_attr sustain_load_attr = __ATTR(sustain_load, 0644,
+ show_sustain_load, store_sustain_load);
+
+static ssize_t show_min_sample_time(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ min_sample_time = val;
+ return count;
+}
+
+static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
+ show_min_sample_time, store_min_sample_time);
+
+static ssize_t show_timer_rate(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", timer_rate);
+}
+
+static ssize_t store_timer_rate(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ timer_rate = val;
+ return count;
+}
+
+static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
+ show_timer_rate, store_timer_rate);
+
+static struct attribute *interactive_attributes[] = {
+ &go_maxspeed_load_attr.attr,
+ &boost_factor_attr.attr,
+ &max_boost_attr.attr,
+ &sustain_load_attr.attr,
+ &min_sample_time_attr.attr,
+ &timer_rate_attr.attr,
+ NULL,
+};
+
+static struct attribute_group interactive_attr_group = {
+ .attrs = interactive_attributes,
+ .name = "interactive",
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ int rc;
+ unsigned int j;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct cpufreq_frequency_table *freq_table;
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if (!cpu_online(policy->cpu))
+ return -EINVAL;
+
+ freq_table =
+ cpufreq_frequency_get_table(policy->cpu);
+
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+ pcpu->policy = policy;
+ pcpu->target_freq = policy->cur;
+ pcpu->freq_table = freq_table;
+ pcpu->freq_change_time_in_idle =
+ get_cpu_idle_time_us(j,
+ &pcpu->freq_change_time);
+ pcpu->time_in_idle = pcpu->freq_change_time_in_idle;
+ pcpu->idle_exit_time = pcpu->freq_change_time;
+ pcpu->timer_idlecancel = 1;
+ pcpu->governor_enabled = 1;
+ smp_wmb();
+
+ if (!timer_pending(&pcpu->cpu_timer))
+ mod_timer(&pcpu->cpu_timer, jiffies + 2);
+ }
+
+ /*
+ * Do not register the idle hook and create sysfs
+ * entries if we have already done so.
+ */
+ if (atomic_inc_return(&active_count) > 1)
+ return 0;
+
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ if (rc)
+ return rc;
+
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+ pcpu->governor_enabled = 0;
+ smp_wmb();
+ del_timer_sync(&pcpu->cpu_timer);
+
+ /*
+ * Reset idle exit time since we may cancel the timer
+ * before it can run after the last idle exit time,
+ * to avoid tripping the check in idle exit for a timer
+ * that is trying to run.
+ */
+ pcpu->idle_exit_time = 0;
+ }
+
+ flush_work(&freq_scale_down_work);
+ if (atomic_dec_return(&active_count) > 0)
+ return 0;
+
+ sysfs_remove_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->min, CPUFREQ_RELATION_L);
+ break;
+ }
+ return 0;
+}
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ cpufreq_interactive_idle_start();
+ break;
+ case IDLE_END:
+ cpufreq_interactive_idle_end();
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+ .notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int __init cpufreq_interactive_init(void)
+{
+ unsigned int i;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
+ min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+ timer_rate = DEFAULT_TIMER_RATE;
+
+ /* Initalize per-cpu timers */
+ for_each_possible_cpu(i) {
+ pcpu = &per_cpu(cpuinfo, i);
+ init_timer(&pcpu->cpu_timer);
+ pcpu->cpu_timer.function = cpufreq_interactive_timer;
+ pcpu->cpu_timer.data = i;
+ }
+
+ up_task = kthread_create(cpufreq_interactive_up_task, NULL,
+ "kinteractiveup");
+ if (IS_ERR(up_task))
+ return PTR_ERR(up_task);
+
+ sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
+ get_task_struct(up_task);
+
+ /* No rescuer thread, bind to CPU queuing the work for possibly
+ warm cache (probably doesn't matter much). */
+ down_wq = alloc_workqueue("knteractive_down", 0, 1);
+
+ if (!down_wq)
+ goto err_freeuptask;
+
+ INIT_WORK(&freq_scale_down_work,
+ cpufreq_interactive_freq_down);
+
+ spin_lock_init(&up_cpumask_lock);
+ spin_lock_init(&down_cpumask_lock);
+ mutex_init(&set_speed_lock);
+
+ idle_notifier_register(&cpufreq_interactive_idle_nb);
+
+ return cpufreq_register_governor(&cpufreq_gov_interactive);
+
+err_freeuptask:
+ put_task_struct(up_task);
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_interactive);
+ kthread_stop(up_task);
+ put_task_struct(up_task);
+ destroy_workqueue(down_wq);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+ "Latency sensitive workloads");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index faf7c5217848..c136b787c5af 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -59,7 +59,7 @@ static int cpufreq_stats_update(unsigned int cpu)
cur_time = get_jiffies_64();
spin_lock(&cpufreq_stats_lock);
stat = per_cpu(cpufreq_stats_table, cpu);
- if (stat->time_in_state)
+ if (stat->time_in_state && stat->last_index >= 0)
stat->time_in_state[stat->last_index] =
cputime64_add(stat->time_in_state[stat->last_index],
cputime_sub(cur_time, stat->last_time));
@@ -159,10 +159,10 @@ static struct attribute_group stats_attr_group = {
static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
{
int index;
- for (index = 0; index < stat->max_state; index++)
- if (stat->freq_table[index] == freq)
- return index;
- return -1;
+ for (index = 0; index < stat->state_num; index++)
+ if (stat->freq_table[index] > freq)
+ break;
+ return index - 1; /* below lowest freq in table: return -1 */
}
/* should be called late in the CPU removal sequence so that the stats
@@ -193,7 +193,7 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu)
static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
- unsigned int i, j, count = 0, ret = 0;
+ unsigned int i, j, k, l, count = 0, ret = 0;
struct cpufreq_stats *stat;
struct cpufreq_policy *data;
unsigned int alloc_size;
@@ -245,8 +245,16 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
- if (freq_table_get_index(stat, freq) == -1)
- stat->freq_table[j++] = freq;
+
+ /* Insert in sorted stat->freq_table */
+ for (k = 0; k < j && stat->freq_table[k] < freq; k++)
+ ;
+ if (stat->freq_table[k] == freq)
+ continue;
+ for (l = j; l > k; l--)
+ stat->freq_table[l] = stat->freq_table[l - 1];
+ stat->freq_table[k] = freq;
+ j++;
}
stat->state_num = j;
spin_lock(&cpufreq_stats_lock);
@@ -298,10 +306,6 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
old_index = stat->last_index;
new_index = freq_table_get_index(stat, freq->new);
- /* We can't do stat->time_in_state[-1]= .. */
- if (old_index == -1 || new_index == -1)
- return 0;
-
cpufreq_stats_update(freq->cpu);
if (old_index == new_index)
@@ -310,13 +314,35 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
spin_lock(&cpufreq_stats_lock);
stat->last_index = new_index;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- stat->trans_table[old_index * stat->max_state + new_index]++;
+ if (old_index >= 0 && new_index >= 0)
+ stat->trans_table[old_index * stat->max_state + new_index]++;
#endif
stat->total_trans++;
spin_unlock(&cpufreq_stats_lock);
return 0;
}
+static int cpufreq_stats_create_table_cpu(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *table;
+ int ret = -ENODEV;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return -ENODEV;
+
+ table = cpufreq_frequency_get_table(cpu);
+ if (!table)
+ goto out;
+
+ ret = cpufreq_stats_create_table(policy, table);
+
+out:
+ cpufreq_cpu_put(policy);
+ return ret;
+}
+
static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
@@ -335,6 +361,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ cpufreq_stats_create_table_cpu(cpu);
+ break;
}
return NOTIFY_OK;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c47f3d09c1ee..e2f7271915dc 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -172,7 +172,12 @@ static inline int performance_multiplier(void)
/* for higher loadavg, we are more reluctant */
- mult += 2 * get_loadavg();
+ /*
+ * this doesn't work as intended - it is almost always 0, but can
+ * sometimes, depending on workload, spike very high into the hundreds
+ * even when the average cpu load is under 10%.
+ */
+ /* mult += 2 * get_loadavg(); */
/* for IO wait tasks (per cpu!) we add 5x each */
mult += 10 * nr_iowait_cpu(smp_processor_id());
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e0b25de1e339..b6994751719d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -18,7 +18,7 @@ config CRYPTO_DEV_PADLOCK
(so called VIA PadLock ACE, Advanced Cryptography Engine)
that provides instructions for very fast cryptographic
operations with supported algorithms.
-
+
The instructions are used only when the CPU supports them.
Otherwise software encryption is used.
@@ -292,4 +292,21 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110 from AES
algorithms execution.
+config CRYPTO_DEV_TEGRA_AES
+ tristate "Support for TEGRA AES hw engine"
+ depends on ARCH_TEGRA_2x_SOC || ARCH_TEGRA_3x_SOC
+ select CRYPTO_AES
+ select TEGRA_ARB_SEMAPHORE
+ help
+ TEGRA processors have AES module accelerator. Select this if you
+ want to use the TEGRA module for AES algorithms.
+
+config CRYPTO_DEV_TEGRA_SE
+ tristate "Tegra SE driver for crypto algorithms"
+ depends on ARCH_TEGRA_3x_SOC
+ select CRYPTO_AES
+ help
+ This option allows you to have support of Security Engine for crypto
+ acceleration.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 53ea50155319..e244cfcdd505 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,3 +1,5 @@
+GCOV_PROFILE := y
+
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
@@ -13,3 +15,5 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
+obj-$(CONFIG_CRYPTO_DEV_TEGRA_SE) += tegra-se.o
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
new file mode 100644
index 000000000000..31069558d9f1
--- /dev/null
+++ b/drivers/crypto/tegra-aes.c
@@ -0,0 +1,1451 @@
+/*
+ * drivers/crypto/tegra-aes.c
+ *
+ * aes driver for NVIDIA tegra aes hardware
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+
+#include <mach/arb_sema.h>
+#include <mach/clk.h>
+#include "../video/tegra/nvmap/nvmap.h"
+
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include <crypto/internal/rng.h>
+
+#include "tegra-aes.h"
+
+#define FLAGS_MODE_MASK 0x00ff
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+#define FLAGS_GIV BIT(2)
+#define FLAGS_RNG BIT(3)
+#define FLAGS_OFB BIT(4)
+#define FLAGS_INIT BIT(5)
+#define FLAGS_BUSY 1
+
+/*
+ * Defines AES engine Max process bytes size in one go, which takes 1 msec.
+ * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
+ * The duration CPU can use the BSE to 1 msec, then the number of available
+ * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
+ * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
+ */
+#define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
+
+/*
+ * The key table length is 64 bytes
+ * (This includes first upto 32 bytes key + 16 bytes original initial vector
+ * and 16 bytes updated initial vector)
+ */
+#define AES_HW_KEY_TABLE_LENGTH_BYTES 64
+
+#define AES_HW_IV_SIZE 16
+#define AES_HW_KEYSCHEDULE_LEN 256
+#define ARB_SEMA_TIMEOUT 500
+
+/*
+ * The memory being used is divides as follows:
+ * 1. Key - 32 bytes
+ * 2. Original IV - 16 bytes
+ * 3. Updated IV - 16 bytes
+ * 4. Key schedule - 256 bytes
+ *
+ * 1+2+3 constitute the hw key table.
+ */
+#define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
+
+#define DEFAULT_RNG_BLK_SZ 16
+
+/* As of now only 5 commands are USED for AES encryption/Decryption */
+#define AES_HW_MAX_ICQ_LENGTH 4
+
+#define ICQBITSHIFT_BLKCNT 0
+
+/* memdma_vd command */
+#define MEMDMA_DIR_DTOVRAM 0
+#define MEMDMA_DIR_VTODRAM 1
+#define MEMDMABITSHIFT_DIR 25
+#define MEMDMABITSHIFT_NUM_WORDS 12
+
+/* Define AES Interactive command Queue commands Bit positions */
+enum {
+ ICQBITSHIFT_KEYTABLEADDR = 0,
+ ICQBITSHIFT_KEYTABLEID = 17,
+ ICQBITSHIFT_VRAMSEL = 23,
+ ICQBITSHIFT_TABLESEL = 24,
+ ICQBITSHIFT_OPCODE = 26,
+};
+
+/* Define Ucq opcodes required for AES operation */
+enum {
+ UCQOPCODE_BLKSTARTENGINE = 0x0E,
+ UCQOPCODE_DMASETUP = 0x10,
+ UCQOPCODE_DMACOMPLETE = 0x11,
+ UCQOPCODE_SETTABLE = 0x15,
+ UCQOPCODE_MEMDMAVD = 0x22,
+};
+
+/* Define Aes command values */
+enum {
+ UCQCMD_VRAM_SEL = 0x1,
+ UCQCMD_CRYPTO_TABLESEL = 0x3,
+ UCQCMD_KEYSCHEDTABLESEL = 0x4,
+ UCQCMD_KEYTABLESEL = 0x8,
+};
+
+#define UCQCMD_KEYTABLEADDRMASK 0x1FFFF
+
+#define AES_NR_KEYSLOTS 8
+#define SSK_SLOT_NUM 4
+
+struct tegra_aes_slot {
+ struct list_head node;
+ int slot_num;
+ bool available;
+};
+
+struct tegra_aes_reqctx {
+ unsigned long mode;
+};
+
+#define TEGRA_AES_QUEUE_LENGTH 500
+
+struct tegra_aes_engine {
+ struct tegra_aes_dev *dd;
+ struct tegra_aes_ctx *ctx;
+ struct clk *iclk;
+ struct clk *pclk;
+ struct ablkcipher_request *req;
+ struct scatterlist *in_sg;
+ struct completion op_complete;
+ struct scatterlist *out_sg;
+ void __iomem *io_base;
+ void __iomem *ivkey_base;
+ unsigned long phys_base;
+ unsigned long iram_phys;
+ void *iram_virt;
+ dma_addr_t ivkey_phys_base;
+ dma_addr_t dma_buf_in;
+ dma_addr_t dma_buf_out;
+ size_t total;
+ size_t in_offset;
+ size_t out_offset;
+ u32 engine_offset;
+ u32 *buf_in;
+ u32 *buf_out;
+ int res_id;
+ unsigned long busy;
+ u8 irq;
+ bool new_key;
+ u32 status;
+};
+
+struct tegra_aes_dev {
+ struct device *dev;
+ struct tegra_aes_slot *slots;
+ struct tegra_aes_engine bsev;
+ struct tegra_aes_engine bsea;
+ struct nvmap_client *client;
+ struct nvmap_handle_ref *h_ref;
+ struct crypto_queue queue;
+ spinlock_t lock;
+ u64 ctr;
+ unsigned long flags;
+ u8 dt[DEFAULT_RNG_BLK_SZ];
+};
+
+static struct tegra_aes_dev *aes_dev;
+
+struct tegra_aes_ctx {
+ struct tegra_aes_dev *dd;
+ struct tegra_aes_engine *eng;
+ struct tegra_aes_slot *slot;
+ int key[AES_MAX_KEY_SIZE];
+ int keylen;
+ bool use_ssk;
+ u8 dt[DEFAULT_RNG_BLK_SZ];
+};
+
+static struct tegra_aes_ctx rng_ctx;
+
+/* keep registered devices data here */
+static LIST_HEAD(slot_list);
+static DEFINE_SPINLOCK(list_lock);
+static DEFINE_MUTEX(aes_lock);
+
+/* Engine specific work queues */
+static void bsev_workqueue_handler(struct work_struct *work);
+static void bsea_workqueue_handler(struct work_struct *work);
+
+static DECLARE_WORK(bsev_work, bsev_workqueue_handler);
+static DECLARE_WORK(bsea_work, bsea_workqueue_handler);
+
+static struct workqueue_struct *bsev_wq;
+static struct workqueue_struct *bsea_wq;
+
+extern unsigned long long tegra_chip_uid(void);
+
+static inline u32 aes_readl(struct tegra_aes_engine *engine, u32 offset)
+{
+ return readl(engine->io_base + offset);
+}
+
+static inline void aes_writel(struct tegra_aes_engine *engine,
+ u32 val, u32 offset)
+{
+ writel(val, engine->io_base + offset);
+}
+
+static int alloc_iram(struct tegra_aes_dev *dd)
+{
+ size_t size;
+ int err;
+
+ dd->h_ref = NULL;
+
+ /* [key+iv+u-iv=64B] * 8 = 512Bytes */
+ size = AES_MAX_KEY_SIZE;
+ dd->client = nvmap_create_client(nvmap_dev, "aes_bsea");
+ if (IS_ERR(dd->client)) {
+ dev_err(dd->dev, "nvmap_create_client failed\n");
+ goto out;
+ }
+
+ dd->h_ref = nvmap_create_handle(dd->client, size);
+ if (IS_ERR(dd->h_ref)) {
+ dev_err(dd->dev, "nvmap_create_handle failed\n");
+ goto out;
+ }
+
+ /* Allocate memory in the iram */
+ err = nvmap_alloc_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref),
+ NVMAP_HEAP_CARVEOUT_IRAM, size, 0);
+ if (err) {
+ dev_err(dd->dev, "nvmap_alloc_handle_id failed\n");
+ nvmap_free_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref));
+ goto out;
+ }
+ dd->bsea.iram_phys = nvmap_handle_address(dd->client,
+ nvmap_ref_to_id(dd->h_ref));
+
+ dd->bsea.iram_virt = nvmap_mmap(dd->h_ref); /* get virtual address */
+ if (!dd->bsea.iram_virt) {
+ dev_err(dd->dev, "%s: no mem, BSEA IRAM alloc failure\n",
+ __func__);
+ goto out;
+ }
+
+ memset(dd->bsea.iram_virt, 0, size);
+ return 0;
+
+out:
+ if (dd->bsea.iram_virt)
+ nvmap_munmap(dd->h_ref, dd->bsea.iram_virt);
+
+ if (dd->client) {
+ nvmap_free_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref));
+ nvmap_client_put(dd->client);
+ }
+
+ return -ENOMEM;
+}
+
+static void free_iram(struct tegra_aes_dev *dd)
+{
+ if (dd->bsea.iram_virt)
+ nvmap_munmap(dd->h_ref, dd->bsea.iram_virt);
+
+ if (dd->client) {
+ nvmap_free_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref));
+ nvmap_client_put(dd->client);
+ }
+}
+
+static int aes_hw_init(struct tegra_aes_engine *engine)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ int ret = 0;
+
+ if (engine->pclk) {
+ ret = clk_enable(engine->pclk);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: pclock enable fail(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ if (engine->iclk) {
+ ret = clk_enable(engine->iclk);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: iclock enable fail(%d)\n",
+ __func__, ret);
+ if (engine->pclk)
+ clk_disable(engine->pclk);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void aes_hw_deinit(struct tegra_aes_engine *engine)
+{
+ if (engine->pclk)
+ clk_disable(engine->pclk);
+
+ if (engine->iclk)
+ clk_disable(engine->iclk);
+}
+
+#define MIN_RETRIES 3
+static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr,
+ u32 out_addr, int nblocks, int mode, bool upd_iv)
+{
+ u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
+ int qlen = 0, i, eng_busy, icq_empty, ret;
+ u32 value;
+ int retries = MIN_RETRIES;
+
+start:
+ do {
+ value = aes_readl(eng, INTR_STATUS);
+ eng_busy = value & BIT(0);
+ icq_empty = value & BIT(3);
+ } while (eng_busy || (!icq_empty));
+
+ aes_writel(eng, 0xFFFFFFFF, INTR_STATUS);
+
+ /* error, dma xfer complete */
+ aes_writel(eng, 0x33, INT_ENB);
+ enable_irq(eng->irq);
+
+ cmdq[qlen++] = UCQOPCODE_DMASETUP << ICQBITSHIFT_OPCODE;
+ cmdq[qlen++] = in_addr;
+ cmdq[qlen++] = UCQOPCODE_BLKSTARTENGINE << ICQBITSHIFT_OPCODE |
+ (nblocks-1) << ICQBITSHIFT_BLKCNT;
+ cmdq[qlen++] = UCQOPCODE_DMACOMPLETE << ICQBITSHIFT_OPCODE;
+
+ value = aes_readl(eng, CMDQUE_CONTROL);
+ /* access SDRAM through AHB */
+ value &= (~CMDQ_CTRL_SRC_STM_SEL_FIELD & ~CMDQ_CTRL_DST_STM_SEL_FIELD);
+ value |= (CMDQ_CTRL_SRC_STM_SEL_FIELD | CMDQ_CTRL_DST_STM_SEL_FIELD |
+ CMDQ_CTRL_ICMDQEN_FIELD | CMDQ_CTRL_ERROR_FLUSH_ENB);
+ aes_writel(eng, value, CMDQUE_CONTROL);
+
+ value = 0;
+ if (mode & FLAGS_CBC) {
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 2 : 3)
+ << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 2 : 3)
+ << SECURE_VCTRAM_SEL_SHIFT) |
+ ((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT |
+ (0 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ } else if (mode & FLAGS_OFB) {
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ ((u32)0 << SECURE_IV_SELECT_SHIFT) |
+ (SECURE_XOR_POS_FIELD) |
+ (2 << SECURE_INPUT_SEL_SHIFT) |
+ (0 << SECURE_VCTRAM_SEL_SHIFT) |
+ (SECURE_CORE_SEL_FIELD) |
+ (0 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ } else if (mode & FLAGS_RNG){
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (0 << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ ((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT |
+ (1 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ } else {
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (0 << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT) |
+ (0 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ }
+ aes_writel(eng, value, SECURE_INPUT_SELECT);
+
+ aes_writel(eng, out_addr, SECURE_DEST_ADDR);
+ INIT_COMPLETION(eng->op_complete);
+
+ for (i = 0; i < qlen - 1; i++) {
+ do {
+ value = aes_readl(eng, INTR_STATUS);
+ eng_busy = value & BIT(0);
+ icq_empty = value & BIT(3);
+ } while (eng_busy || (!icq_empty));
+ aes_writel(eng, cmdq[i], ICMDQUE_WR);
+ }
+
+ ret = wait_for_completion_timeout(&eng->op_complete,
+ msecs_to_jiffies(150));
+ if (ret == 0) {
+ dev_err(aes_dev->dev, "engine%d timed out (0x%x)\n",
+ eng->res_id, aes_readl(eng, INTR_STATUS));
+ disable_irq(eng->irq);
+ return -ETIMEDOUT;
+ }
+
+ disable_irq(eng->irq);
+ aes_writel(eng, cmdq[qlen - 1], ICMDQUE_WR);
+
+ if ((eng->status != 0) && (retries-- > 0)) {
+ qlen = 0;
+ goto start;
+ }
+
+ return 0;
+}
+
+static void aes_release_key_slot(struct tegra_aes_ctx *ctx)
+{
+ spin_lock(&list_lock);
+ ctx->slot->available = true;
+ ctx->slot = NULL;
+ spin_unlock(&list_lock);
+}
+
+static struct tegra_aes_slot *aes_find_key_slot(struct tegra_aes_dev *dd)
+{
+ struct tegra_aes_slot *slot = NULL;
+ bool found = 0;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(slot, &slot_list, node) {
+ dev_dbg(dd->dev, "empty:%d, num:%d\n", slot->available,
+ slot->slot_num);
+ if (slot->available) {
+ slot->available = false;
+ found = 1;
+ break;
+ }
+ }
+
+ spin_unlock(&list_lock);
+ return found ? slot : NULL;
+}
+
+static int aes_set_key(struct tegra_aes_engine *eng, int slot_num)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ u32 value, cmdq[2];
+ int i, eng_busy, icq_empty, dma_busy;
+
+ if (!eng) {
+ dev_err(dd->dev, "%s: context invalid\n", __func__);
+ return -EINVAL;
+ }
+
+ /* enable key schedule generation in hardware */
+ value = aes_readl(eng, SECURE_CONFIG_EXT);
+ value &= ~SECURE_KEY_SCH_DIS_FIELD;
+ aes_writel(eng, value, SECURE_CONFIG_EXT);
+
+ /* select the key slot */
+ value = aes_readl(eng, SECURE_CONFIG);
+ value &= ~SECURE_KEY_INDEX_FIELD;
+ value |= (slot_num << SECURE_KEY_INDEX_SHIFT);
+ aes_writel(eng, value, SECURE_CONFIG);
+
+ if (slot_num == SSK_SLOT_NUM)
+ goto out;
+
+ if (eng->res_id == TEGRA_ARB_BSEV) {
+ memset(dd->bsev.ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+ memcpy(dd->bsev.ivkey_base, eng->ctx->key, eng->ctx->keylen);
+
+ /* copy the key table from sdram to vram */
+ cmdq[0] = 0;
+ cmdq[0] = UCQOPCODE_MEMDMAVD << ICQBITSHIFT_OPCODE |
+ (MEMDMA_DIR_DTOVRAM << MEMDMABITSHIFT_DIR) |
+ (AES_HW_KEY_TABLE_LENGTH_BYTES/sizeof(u32))
+ << MEMDMABITSHIFT_NUM_WORDS;
+ cmdq[1] = (u32)eng->ivkey_phys_base;
+ for (i = 0; i < ARRAY_SIZE(cmdq); i++)
+ aes_writel(eng, cmdq[i], ICMDQUE_WR);
+ do {
+ value = aes_readl(eng, INTR_STATUS);
+ eng_busy = value & BIT(0);
+ icq_empty = value & BIT(3);
+ dma_busy = value & BIT(23);
+ } while (eng_busy & (!icq_empty) & dma_busy);
+
+ /* settable command to get key into internal registers */
+ value = 0;
+ value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
+ UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
+ UCQCMD_VRAM_SEL << ICQBITSHIFT_VRAMSEL |
+ (UCQCMD_KEYTABLESEL | slot_num)
+ << ICQBITSHIFT_KEYTABLEID;
+ aes_writel(eng, value, ICMDQUE_WR);
+ do {
+ value = aes_readl(eng, INTR_STATUS);
+ eng_busy = value & BIT(0);
+ icq_empty = value & BIT(3);
+ } while (eng_busy & (!icq_empty));
+ } else {
+ memset(dd->bsea.iram_virt, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+ memcpy(dd->bsea.iram_virt, eng->ctx->key, eng->ctx->keylen);
+
+ /* set iram access cfg bit 0 if address >128K */
+ if (dd->bsea.iram_phys > 0x00020000)
+ aes_writel(eng, BIT(0), IRAM_ACCESS_CFG);
+ else
+ aes_writel(eng, 0, IRAM_ACCESS_CFG);
+
+ /* settable command to get key into internal registers */
+ value = 0;
+ value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
+ UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
+ (UCQCMD_KEYTABLESEL | slot_num)
+ << ICQBITSHIFT_KEYTABLEID |
+ dd->bsea.iram_phys >> 2;
+ aes_writel(eng, value, ICMDQUE_WR);
+ do {
+ value = aes_readl(eng, INTR_STATUS);
+ eng_busy = value & BIT(0);
+ icq_empty = value & BIT(3);
+ } while (eng_busy & (!icq_empty));
+ }
+
+out:
+ return 0;
+}
+
+static int tegra_aes_handle_req(struct tegra_aes_engine *eng)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_ctx *ctx;
+ struct crypto_async_request *async_req, *backlog;
+ struct tegra_aes_reqctx *rctx;
+ struct ablkcipher_request *req;
+ unsigned long irq_flags;
+ int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES;
+ int nblocks, total, ret = 0, count = 0;
+ dma_addr_t addr_in, addr_out;
+ struct scatterlist *in_sg, *out_sg;
+
+ spin_lock_irqsave(&dd->lock, irq_flags);
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &eng->busy);
+ spin_unlock_irqrestore(&dd->lock, irq_flags);
+
+ if (!async_req)
+ return -ENODATA;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+ dev_dbg(dd->dev, "%s: get new req (engine #%d)\n", __func__,
+ eng->res_id);
+
+ if (!req->src || !req->dst)
+ return -EINVAL;
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware (%d) not available\n",
+ eng->res_id);
+ return -EBUSY;
+ }
+
+ /* assign new request to device */
+ eng->req = req;
+ eng->total = req->nbytes;
+ eng->in_offset = 0;
+ eng->in_sg = req->src;
+ eng->out_offset = 0;
+ eng->out_sg = req->dst;
+
+ in_sg = eng->in_sg;
+ out_sg = eng->out_sg;
+ total = eng->total;
+
+ rctx = ablkcipher_request_ctx(req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx->mode &= FLAGS_MODE_MASK;
+ dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+ eng->ctx = ctx;
+
+ if (eng->new_key) {
+ if (ctx->use_ssk)
+ aes_set_key(eng, SSK_SLOT_NUM);
+ else
+ aes_set_key(eng, ctx->slot->slot_num);
+
+ eng->new_key = false;
+ }
+
+ if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && req->info) {
+ /* set iv to the aes hw slot
+ * Hw generates updated iv only after iv is set in slot.
+ * So key and iv is passed asynchronously.
+ */
+ memcpy(eng->buf_in, (u8 *)req->info, AES_BLOCK_SIZE);
+
+ ret = aes_start_crypt(eng, (u32)eng->dma_buf_in,
+ (u32)eng->dma_buf_out, 1, FLAGS_CBC, false);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+ }
+
+ while (total) {
+ dev_dbg(dd->dev, "remain: %d\n", total);
+ ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
+ if (!ret) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ goto out;
+ }
+
+ ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
+ if (!ret) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(dd->dev, eng->in_sg,
+ 1, DMA_TO_DEVICE);
+ goto out;
+ }
+
+ addr_in = sg_dma_address(in_sg);
+ addr_out = sg_dma_address(out_sg);
+ count = min((int)sg_dma_len(in_sg), (int)dma_max);
+ WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg));
+ nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
+
+ ret = aes_start_crypt(eng, addr_in, addr_out, nblocks,
+ dd->flags, true);
+
+ dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+
+ dev_dbg(dd->dev, "out: copied %d\n", count);
+ total -= count;
+ in_sg = sg_next(in_sg);
+ out_sg = sg_next(out_sg);
+ WARN_ON(((total != 0) && (!in_sg || !out_sg)));
+ }
+
+out:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(eng->res_id);
+ eng->total = total;
+
+ if (eng->req->base.complete)
+ eng->req->base.complete(&eng->req->base, ret);
+
+ dev_dbg(dd->dev, "%s: exit\n", __func__);
+ return ret;
+}
+
+static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_slot *key_slot;
+
+ if (!ctx || !dd) {
+ pr_err("ctx=0x%x, dd=0x%x\n",
+ (unsigned int)ctx, (unsigned int)dd);
+ return -EINVAL;
+ }
+
+ if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) &&
+ (keylen != AES_KEYSIZE_256)) {
+ dev_err(dd->dev, "unsupported key size\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dd->dev, "keylen: %d\n", keylen);
+
+ ctx->dd = dd;
+
+ if (key) {
+ if (!ctx->slot) {
+ key_slot = aes_find_key_slot(dd);
+ if (!key_slot) {
+ dev_err(dd->dev, "no empty slot\n");
+ return -ENOMEM;
+ }
+ ctx->slot = key_slot;
+ }
+
+ /* copy the key to the proper slot */
+ memset(ctx->key, 0, AES_MAX_KEY_SIZE);
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ ctx->use_ssk = false;
+ } else {
+ ctx->use_ssk = true;
+ ctx->keylen = AES_KEYSIZE_128;
+ }
+
+ dd->bsev.new_key = true;
+ dd->bsea.new_key = true;
+ dev_dbg(dd->dev, "done\n");
+ return 0;
+}
+
+static void bsev_workqueue_handler(struct work_struct *work)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_engine *engine = &dd->bsev;
+ int ret;
+
+ aes_hw_init(engine);
+
+ /* empty the crypto queue and then return */
+ do {
+ ret = tegra_aes_handle_req(engine);
+ } while (!ret);
+
+ aes_hw_deinit(engine);
+}
+
+static void bsea_workqueue_handler(struct work_struct *work)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_engine *engine = &dd->bsea;
+ int ret;
+
+ aes_hw_init(engine);
+
+ /* empty the crypto queue and then return */
+ do {
+ ret = tegra_aes_handle_req(engine);
+ } while (!ret);
+
+ aes_hw_deinit(engine);
+}
+
+#define INT_ERROR_MASK 0xFFF000
+static irqreturn_t aes_bsev_irq(int irq, void *dev_id)
+{
+ struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
+ u32 value = aes_readl(&dd->bsev, INTR_STATUS);
+
+ dev_dbg(dd->dev, "bsev irq_stat: 0x%x", value);
+ dd->bsev.status = 0;
+ if (value & INT_ERROR_MASK) {
+ aes_writel(&dd->bsev, INT_ERROR_MASK, INTR_STATUS);
+ dd->bsev.status = value & INT_ERROR_MASK;
+ }
+
+ value = aes_readl(&dd->bsev, INTR_STATUS);
+ if (!(value & ENGINE_BUSY_FIELD))
+ complete(&dd->bsev.op_complete);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t aes_bsea_irq(int irq, void *dev_id)
+{
+ struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
+ u32 value = aes_readl(&dd->bsea, INTR_STATUS);
+
+ dev_dbg(dd->dev, "bsea irq_stat: 0x%x", value);
+ dd->bsea.status = 0;
+ if (value & INT_ERROR_MASK) {
+ aes_writel(&dd->bsea, INT_ERROR_MASK, INTR_STATUS);
+ dd->bsea.status = value & INT_ERROR_MASK;
+ }
+
+ value = aes_readl(&dd->bsea, INTR_STATUS);
+ if (!(value & ENGINE_BUSY_FIELD))
+ complete(&dd->bsea.op_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct tegra_aes_dev *dd = aes_dev;
+ unsigned long flags;
+ int err = 0;
+ int bsev_busy;
+ int bsea_busy;
+
+ dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n", req->nbytes,
+ !!(mode & FLAGS_ENCRYPT),
+ !!(mode & FLAGS_CBC),
+ !!(mode & FLAGS_OFB));
+
+ rctx->mode = mode;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ err = ablkcipher_enqueue_request(&dd->queue, req);
+ bsev_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsev.busy);
+ bsea_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsea.busy);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!bsev_busy)
+ queue_work(bsev_wq, &bsev_work);
+ if (!bsea_busy)
+ queue_work(bsea_wq, &bsea_work);
+
+ return err;
+}
+
+static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, 0);
+}
+
+static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_CBC);
+}
+static int tegra_aes_ofb_encrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_OFB);
+}
+
+static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_OFB);
+}
+
+static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_engine *eng = rng_ctx.eng;
+ unsigned long flags;
+ int ret, i;
+ u8 *dest = rdata, *dt = rng_ctx.dt;
+
+ /* take mutex to access the aes hw */
+ mutex_lock(&aes_lock);
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware (%d) not available\n",
+ eng->res_id);
+ mutex_unlock(&aes_lock);
+ return -EBUSY;
+ }
+
+ ret = aes_hw_init(eng);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
+ dlen = ret;
+ goto fail;
+ }
+
+ memset(eng->buf_in, 0, AES_BLOCK_SIZE);
+ memcpy(eng->buf_in, dt, DEFAULT_RNG_BLK_SZ);
+
+ ret = aes_start_crypt(eng, (u32)eng->dma_buf_in, (u32)eng->dma_buf_out,
+ 1, FLAGS_ENCRYPT | FLAGS_RNG, true);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ dlen = ret;
+ goto out;
+ }
+ memcpy(dest, eng->buf_out, dlen);
+
+ /* update the DT */
+ for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) {
+ dt[i] += 1;
+ if (dt[i] != 0)
+ break;
+ }
+
+out:
+ aes_hw_deinit(eng);
+
+ spin_lock_irqsave(&dd->lock, flags);
+ clear_bit(FLAGS_BUSY, &eng->busy);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+fail:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(eng->res_id);
+ mutex_unlock(&aes_lock);
+ dev_dbg(dd->dev, "%s: done\n", __func__);
+ return dlen;
+}
+
+static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
+ unsigned int slen)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_ctx *ctx = &rng_ctx;
+ struct tegra_aes_engine *eng = NULL;
+ struct tegra_aes_slot *key_slot;
+ int bsev_busy = false;
+ int bsea_busy = false;
+ unsigned long flags;
+ struct timespec ts;
+ u64 nsec, tmp[2];
+ int ret = 0;
+ u8 *dt;
+
+ if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&dd->lock, flags);
+ bsev_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsev.busy);
+ if (bsev_busy)
+ bsea_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsea.busy);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!bsev_busy) {
+ eng = &dd->bsev;
+ } else if (!bsea_busy) {
+ eng = &dd->bsea;
+ } else {
+ dev_err(dd->dev, "%s: hardware engine is busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ctx->eng = eng;
+ dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
+
+ /* take mutex to access the aes hw */
+ mutex_lock(&aes_lock);
+
+ if (!ctx->slot) {
+ key_slot = aes_find_key_slot(dd);
+ if (!key_slot) {
+ dev_err(dd->dev, "no empty slot\n");
+ mutex_unlock(&aes_lock);
+ return -ENOMEM;
+ }
+ ctx->slot = key_slot;
+ }
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware (%d) not available\n",
+ eng->res_id);
+ mutex_unlock(&aes_lock);
+ return -EBUSY;
+ }
+
+ ret = aes_hw_init(eng);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
+ goto fail;
+ }
+
+ memcpy(ctx->key, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
+
+ eng->ctx = ctx;
+ eng->ctx->keylen = AES_KEYSIZE_128;
+ aes_set_key(eng, ctx->slot->slot_num);
+
+ /* set seed to the aes hw slot */
+ memset(eng->buf_in, 0, AES_BLOCK_SIZE);
+ memcpy(eng->buf_in, seed, DEFAULT_RNG_BLK_SZ);
+ ret = aes_start_crypt(eng, (u32)eng->dma_buf_in,
+ (u32)eng->dma_buf_out, 1, FLAGS_CBC, false);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+
+ if (slen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
+ dt = seed + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
+ } else {
+ getnstimeofday(&ts);
+ nsec = timespec_to_ns(&ts);
+ do_div(nsec, 1000);
+ nsec ^= dd->ctr << 56;
+ dd->ctr++;
+ tmp[0] = nsec;
+ tmp[1] = tegra_chip_uid();
+ dt = (u8 *)tmp;
+ }
+ memcpy(ctx->dt, dt, DEFAULT_RNG_BLK_SZ);
+
+out:
+ aes_hw_deinit(eng);
+
+fail:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(eng->res_id);
+ mutex_unlock(&aes_lock);
+
+ dev_dbg(dd->dev, "%s: done\n", __func__);
+ return ret;
+}
+
+static int tegra_aes_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx);
+ return 0;
+}
+
+void tegra_aes_cra_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
+
+ if (ctx && ctx->slot)
+ aes_release_key_slot(ctx);
+}
+
+static struct crypto_alg algs[] = {
+ {
+ .cra_name = "disabled_ecb(aes)",
+ .cra_driver_name = "ecb-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_exit = tegra_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_ecb_encrypt,
+ .decrypt = tegra_aes_ecb_decrypt,
+ },
+ }, {
+ .cra_name = "disabled_cbc(aes)",
+ .cra_driver_name = "cbc-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_exit = tegra_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_MIN_KEY_SIZE,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_cbc_encrypt,
+ .decrypt = tegra_aes_cbc_decrypt,
+ }
+ }, {
+ .cra_name = "disabled_ofb(aes)",
+ .cra_driver_name = "ofb-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_exit = tegra_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_MIN_KEY_SIZE,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_ofb_encrypt,
+ .decrypt = tegra_aes_ofb_decrypt,
+ }
+ }, {
+ .cra_name = "disabled_ansi_cprng",
+ .cra_driver_name = "rng-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_type = &crypto_rng_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_exit = tegra_aes_cra_exit,
+ .cra_u.rng = {
+ .rng_make_random = tegra_aes_get_random,
+ .rng_reset = tegra_aes_rng_reset,
+ .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ),
+ }
+ }
+};
+
+static int tegra_aes_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_aes_dev *dd;
+ struct resource *res[2];
+ int err = -ENOMEM, i = 0, j;
+
+ if (aes_dev)
+ return -EEXIST;
+
+ dd = kzalloc(sizeof(struct tegra_aes_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ return -ENOMEM;;
+ }
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ dd->slots = kzalloc(sizeof(struct tegra_aes_slot) * AES_NR_KEYSLOTS,
+ GFP_KERNEL);
+ if (dd->slots == NULL) {
+ dev_err(dev, "unable to alloc slot struct.\n");
+ goto out;
+ }
+
+ spin_lock_init(&dd->lock);
+ crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH);
+
+ /* Get the module base address */
+ res[0] = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res[1] = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res[0] || !res[1]) {
+ dev_err(dev, "invalid resource type: base\n");
+ err = -ENODEV;
+ goto out;
+ }
+ dd->bsev.phys_base = res[0]->start;
+ dd->bsev.io_base = ioremap(dd->bsev.phys_base, resource_size(res[0]));
+ dd->bsea.phys_base = res[1]->start;
+ dd->bsea.io_base = ioremap(dd->bsea.phys_base, resource_size(res[1]));
+
+ if (!dd->bsev.io_base || !dd->bsea.io_base) {
+ dev_err(dev, "can't ioremap phys_base\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = alloc_iram(dd);
+ if (err < 0) {
+ dev_err(dev, "Failed to allocate IRAM for BSEA\n");
+ goto out;
+ }
+
+ dd->bsev.res_id = TEGRA_ARB_BSEV;
+ dd->bsea.res_id = TEGRA_ARB_BSEA;
+
+ dd->bsev.pclk = clk_get(dev, "bsev");
+ if (IS_ERR(dd->bsev.pclk)) {
+ dev_err(dev, "v: pclock intialization failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ dd->bsev.iclk = clk_get(dev, "vde");
+ if (IS_ERR(dd->bsev.iclk)) {
+ dev_err(dev, "v: iclock intialization failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ dd->bsea.pclk = clk_get(dev, "bsea");
+ if (IS_ERR(dd->bsea.pclk)) {
+ dev_err(dev, "a: pclock intialization failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ dd->bsea.iclk = clk_get(dev, "sclk");
+ if (IS_ERR(dd->bsea.iclk)) {
+ dev_err(dev, "a: iclock intialization failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = clk_set_rate(dd->bsev.iclk, ULONG_MAX);
+ if (err) {
+ dev_err(dd->dev, "bsev iclk set_rate fail(%d)\n", err);
+ goto out;
+ }
+
+ err = clk_set_rate(dd->bsea.iclk, ULONG_MAX);
+ if (err) {
+ dev_err(dd->dev, "bsea iclk set_rate fail(%d)\n", err);
+ goto out;
+ }
+
+ /*
+ * the foll contiguous memory is allocated as follows -
+ * - hardware key table
+ * - key schedule
+ */
+ dd->bsea.ivkey_base = NULL;
+ dd->bsev.ivkey_base = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
+ &dd->bsev.ivkey_phys_base, GFP_KERNEL);
+ if (!dd->bsev.ivkey_base) {
+ dev_err(dev, "can not allocate iv/key buffer for BSEV\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ memset(dd->bsev.ivkey_base, 0, AES_MAX_KEY_SIZE);
+
+ dd->bsev.buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ &dd->bsev.dma_buf_in, GFP_KERNEL);
+ dd->bsea.buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ &dd->bsea.dma_buf_in, GFP_KERNEL);
+ if (!dd->bsev.buf_in || !dd->bsea.buf_in) {
+ dev_err(dev, "can not allocate dma-in buffer\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dd->bsev.buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ &dd->bsev.dma_buf_out, GFP_KERNEL);
+ dd->bsea.buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ &dd->bsea.dma_buf_out, GFP_KERNEL);
+ if (!dd->bsev.buf_out || !dd->bsea.buf_out) {
+ dev_err(dev, "can not allocate dma-out buffer\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ init_completion(&dd->bsev.op_complete);
+ init_completion(&dd->bsea.op_complete);
+
+ bsev_wq = alloc_workqueue("bsev_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
+ bsea_wq = alloc_workqueue("bsea_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!bsev_wq || !bsea_wq) {
+ dev_err(dev, "alloc_workqueue failed\n");
+ goto out;
+ }
+
+ /* get the irq */
+ dd->bsev.irq = INT_VDE_BSE_V;
+ err = request_irq(dd->bsev.irq, aes_bsev_irq, IRQF_TRIGGER_HIGH,
+ "tegra-aes", dd);
+ if (err) {
+ dev_err(dev, "request_irq failed fir BSEV Engine\n");
+ goto out;
+ }
+ disable_irq(dd->bsev.irq);
+
+ dd->bsea.irq = INT_VDE_BSE_A;
+ err = request_irq(dd->bsea.irq, aes_bsea_irq, IRQF_TRIGGER_HIGH,
+ "tegra-aes", dd);
+ if (err) {
+ dev_err(dev, "request_irq failed for BSEA Engine\n");
+ goto out;
+ }
+ disable_irq(dd->bsea.irq);
+
+ spin_lock_init(&list_lock);
+ spin_lock(&list_lock);
+ for (i = 0; i < AES_NR_KEYSLOTS; i++) {
+ if (i == SSK_SLOT_NUM)
+ continue;
+ dd->slots[i].available = true;
+ dd->slots[i].slot_num = i;
+ INIT_LIST_HEAD(&dd->slots[i].node);
+ list_add_tail(&dd->slots[i].node, &slot_list);
+ }
+ spin_unlock(&list_lock);
+
+ aes_dev = dd;
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ INIT_LIST_HEAD(&algs[i].cra_list);
+ err = crypto_register_alg(&algs[i]);
+ if (err)
+ goto out;
+ }
+
+ dev_info(dev, "registered");
+ return 0;
+
+out:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&algs[j]);
+
+ free_iram(dd);
+
+ if (dd->bsev.ivkey_base) {
+ dma_free_coherent(dev, SZ_512, dd->bsev.ivkey_base,
+ dd->bsev.ivkey_phys_base);
+ }
+
+ if (dd->bsev.buf_in && dd->bsea.buf_in) {
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->bsev.buf_in, dd->bsev.dma_buf_in);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->bsea.buf_in, dd->bsea.dma_buf_in);
+ }
+
+ if (dd->bsev.buf_out && dd->bsea.buf_out) {
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->bsev.buf_out, dd->bsev.dma_buf_out);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->bsea.buf_out, dd->bsea.dma_buf_out);
+ }
+
+ if (dd->bsev.io_base && dd->bsea.io_base) {
+ iounmap(dd->bsev.io_base);
+ iounmap(dd->bsea.io_base);
+ }
+
+ if (dd->bsev.pclk)
+ clk_put(dd->bsev.pclk);
+
+ if (dd->bsev.iclk)
+ clk_put(dd->bsev.iclk);
+
+ if (dd->bsea.pclk)
+ clk_put(dd->bsea.pclk);
+
+ if (bsev_wq)
+ destroy_workqueue(bsev_wq);
+
+ if (bsea_wq)
+ destroy_workqueue(bsea_wq);
+
+ if (dd->bsev.irq)
+ free_irq(dd->bsev.irq, dd);
+
+ if (dd->bsea.irq)
+ free_irq(dd->bsea.irq, dd);
+
+ spin_lock(&list_lock);
+ list_del(&slot_list);
+ spin_unlock(&list_lock);
+
+ kfree(dd->slots);
+ kfree(dd);
+ aes_dev = NULL;
+
+ dev_err(dev, "%s: initialization failed.\n", __func__);
+ return err;
+}
+
+static int __devexit tegra_aes_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
+ int i;
+
+ if (!dd)
+ return -ENODEV;
+
+ cancel_work_sync(&bsev_work);
+ cancel_work_sync(&bsea_work);
+ destroy_workqueue(bsev_wq);
+ destroy_workqueue(bsea_wq);
+ free_irq(dd->bsev.irq, dd);
+ free_irq(dd->bsea.irq, dd);
+ spin_lock(&list_lock);
+ list_del(&slot_list);
+ spin_unlock(&list_lock);
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_alg(&algs[i]);
+
+ free_iram(dd);
+ dma_free_coherent(dev, SZ_512, dd->bsev.ivkey_base,
+ dd->bsev.ivkey_phys_base);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsev.buf_in,
+ dd->bsev.dma_buf_in);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsea.buf_in,
+ dd->bsea.dma_buf_in);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsev.buf_out,
+ dd->bsev.dma_buf_out);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsea.buf_out,
+ dd->bsea.dma_buf_out);
+
+ iounmap(dd->bsev.io_base);
+ iounmap(dd->bsea.io_base);
+ clk_put(dd->bsev.iclk);
+ clk_put(dd->bsev.pclk);
+ clk_put(dd->bsea.pclk);
+ kfree(dd->slots);
+ kfree(dd);
+ aes_dev = NULL;
+
+ return 0;
+}
+
+static struct platform_driver tegra_aes_driver = {
+ .probe = tegra_aes_probe,
+ .remove = __devexit_p(tegra_aes_remove),
+ .driver = {
+ .name = "tegra-aes",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_aes_mod_init(void)
+{
+ mutex_init(&aes_lock);
+ INIT_LIST_HEAD(&slot_list);
+ return platform_driver_register(&tegra_aes_driver);
+}
+
+static void __exit tegra_aes_mod_exit(void)
+{
+ platform_driver_unregister(&tegra_aes_driver);
+}
+
+module_init(tegra_aes_mod_init);
+module_exit(tegra_aes_mod_exit);
+
+MODULE_DESCRIPTION("Tegra AES hw acceleration support.");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/crypto/tegra-aes.h b/drivers/crypto/tegra-aes.h
new file mode 100644
index 000000000000..45696467cdfc
--- /dev/null
+++ b/drivers/crypto/tegra-aes.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __CRYPTODEV_TEGRA_AES_H
+#define __CRYPTODEV_TEGRA_AES_H
+
+#define ICMDQUE_WR 0x1000
+#define CMDQUE_CONTROL 0x1008
+#define INTR_STATUS 0x1018
+#define INT_ENB 0x1040
+#define CONFIG 0x1044
+#define IRAM_ACCESS_CFG 0x10A0
+#define SECURE_DEST_ADDR 0x1100
+#define SECURE_INPUT_SELECT 0x1104
+#define SECURE_CONFIG 0x1108
+#define SECURE_CONFIG_EXT 0x110C
+#define SECURE_SECURITY 0x1110
+#define SECURE_HASH_RESULT0 0x1120
+#define SECURE_HASH_RESULT1 0x1124
+#define SECURE_HASH_RESULT2 0x1128
+#define SECURE_HASH_RESULT3 0x112C
+#define SECURE_SEC_SEL0 0x1140
+#define SECURE_SEC_SEL1 0x1144
+#define SECURE_SEC_SEL2 0x1148
+#define SECURE_SEC_SEL3 0x114C
+#define SECURE_SEC_SEL4 0x1150
+#define SECURE_SEC_SEL5 0x1154
+#define SECURE_SEC_SEL6 0x1158
+#define SECURE_SEC_SEL7 0x115C
+
+/* interrupt status reg masks and shifts */
+#define DMA_BUSY_FIELD BIT(9)
+#define ICQ_EMPTY_FIELD BIT(3)
+#define ENGINE_BUSY_FIELD BIT(0)
+
+/* secure select reg masks and shifts */
+#define SECURE_SEL0_KEYREAD_ENB0_FIELD BIT(0)
+
+/* secure config ext masks and shifts */
+#define SECURE_KEY_SCH_DIS_FIELD BIT(15)
+
+/* secure config masks and shifts */
+#define SECURE_KEY_INDEX_SHIFT 20
+#define SECURE_KEY_INDEX_FIELD (0x1F << SECURE_KEY_INDEX_SHIFT)
+#define SECURE_BLOCK_CNT_FIELD (0xFFFFF)
+
+/* stream interface select masks and shifts */
+#define CMDQ_CTRL_DST_STM_SEL_FIELD BIT(5)
+#define CMDQ_CTRL_SRC_STM_SEL_FIELD BIT(4)
+#define CMDQ_CTRL_ERROR_FLUSH_ENB BIT(2)
+#define CMDQ_CTRL_ICMDQEN_FIELD BIT(1)
+#define CMDQ_CTRL_UCMDQEN_FIELD BIT(0)
+
+/* config regsiter masks and shifts */
+#define CONFIG_ENDIAN_ENB_FIELD BIT(10)
+#define CONFIG_MODE_SEL_FIELD BIT(0)
+
+/* extended config */
+#define SECURE_OFFSET_CNT_FIELD (0xFF << 24)
+#define SECURE_KEYSCHED_GEN_FIELD BIT(15)
+
+/* init vector select */
+#define SECURE_IV_SELECT_SHIFT 10
+#define SECURE_IV_SELECT_FIELD BIT(10)
+
+/* secure engine input */
+#define SECURE_INPUT_ALG_SEL_SHIFT 28
+#define SECURE_INPUT_ALG_SEL_FIELD (0xF << SECURE_INPUT_ALG_SEL_SHIFT)
+#define SECURE_INPUT_KEY_LEN_SHIFT 16
+#define SECURE_INPUT_KEY_LEN_FIELD (0xFFF << SECURE_INPUT_KEY_LEN_SHIFT)
+#define SECURE_RNG_ENB_SHIFT 11
+#define SECURE_RNG_ENB_FIELD BIT(11)
+#define SECURE_CORE_SEL_SHIFT 9
+#define SECURE_CORE_SEL_FIELD BIT(9)
+#define SECURE_VCTRAM_SEL_SHIFT 7
+#define SECURE_VCTRAM_SEL_FIELD (0x3 << SECURE_VCTRAM_SEL_SHIFT)
+#define SECURE_INPUT_SEL_SHIFT 5
+#define SECURE_INPUT_SEL_FIELD (0x3 << SECURE_INPUT_SEL_SHIFT)
+#define SECURE_XOR_POS_SHIFT 3
+#define SECURE_XOR_POS_FIELD (0x3 << SECURE_XOR_POS_SHIFT)
+#define SECURE_HASH_ENB_SHIFT 2
+#define SECURE_HASH_ENB_FIELD BIT(2)
+#define SECURE_ON_THE_FLY_FIELD BIT(0)
+
+#endif
diff --git a/drivers/crypto/tegra-se.c b/drivers/crypto/tegra-se.c
new file mode 100644
index 000000000000..a7659452f9f7
--- /dev/null
+++ b/drivers/crypto/tegra-se.c
@@ -0,0 +1,2442 @@
+/*
+ * Cryptographic API.
+ * drivers/crypto/tegra-se.c
+ *
+ * Support for Tegra Security Engine hardware crypto algorithms.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/pm_runtime.h>
+
+#include "tegra-se.h"
+
+#define DRIVER_NAME "tegra-se"
+
+/* Security Engine operation modes */
+enum tegra_se_aes_op_mode {
+ SE_AES_OP_MODE_CBC, /* Cipher Block Chaining (CBC) mode */
+ SE_AES_OP_MODE_ECB, /* Electronic Codebook (ECB) mode */
+ SE_AES_OP_MODE_CTR, /* Counter (CTR) mode */
+ SE_AES_OP_MODE_OFB, /* Output feedback (CFB) mode */
+ SE_AES_OP_MODE_RNG_X931, /* Random number generator (RNG) mode */
+ SE_AES_OP_MODE_CMAC, /* Cipher-based MAC (CMAC) mode */
+ SE_AES_OP_MODE_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */
+ SE_AES_OP_MODE_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */
+ SE_AES_OP_MODE_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */
+ SE_AES_OP_MODE_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */
+ SE_AES_OP_MODE_SHA512 /* Secure Hash Algorithm-512 (SHA512) mode */
+};
+
+/* Security Engine key table type */
+enum tegra_se_key_table_type {
+ SE_KEY_TABLE_TYPE_KEY, /* Key */
+ SE_KEY_TABLE_TYPE_ORGIV, /* Original IV */
+ SE_KEY_TABLE_TYPE_UPDTDIV /* Updated IV */
+};
+
+/* Security Engine request context */
+struct tegra_se_req_context {
+ enum tegra_se_aes_op_mode op_mode; /* Security Engine operation mode */
+ bool encrypt; /* Operation type */
+};
+
+struct tegra_se_dev {
+ struct device *dev;
+ void __iomem *io_reg; /* se device memory/io */
+ void __iomem *pmc_io_reg; /* pmc device memory/io */
+ int irq; /* irq allocated */
+ spinlock_t lock; /* spin lock */
+ struct clk *pclk; /* Security Engine clock */
+ struct crypto_queue queue; /* Security Engine crypto queue */
+ struct tegra_se_slot *slot_list; /* pointer to key slots */
+ u64 ctr;
+ u32 *src_ll_buf; /* pointer to source linked list buffer */
+ dma_addr_t src_ll_buf_adr; /* Source linked list buffer dma address */
+ u32 src_ll_size; /* Size of source linked list buffer */
+ u32 *dst_ll_buf; /* pointer to destination linked list buffer */
+ dma_addr_t dst_ll_buf_adr; /* Destination linked list dma address */
+ u32 dst_ll_size; /* Size of destination linked list buffer */
+ u32 *ctx_save_buf; /* LP context buffer pointer*/
+ dma_addr_t ctx_save_buf_adr; /* LP context buffer dma address*/
+ struct completion complete; /* Tells the task completion */
+ bool work_q_busy; /* Work queue busy status */
+};
+
+static struct tegra_se_dev *sg_tegra_se_dev;
+
+/* Security Engine AES context */
+struct tegra_se_aes_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ struct tegra_se_slot *slot; /* Security Engine key slot */
+ u32 keylen; /* key length in bits */
+ u32 op_mode; /* AES operation mode */
+};
+
+/* Security Engine random number generator context */
+struct tegra_se_rng_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ struct tegra_se_slot *slot; /* Security Engine key slot */
+ u32 *dt_buf; /* Destination buffer pointer */
+ dma_addr_t dt_buf_adr; /* Destination buffer dma address */
+ u32 *rng_buf; /* RNG buffer pointer */
+ dma_addr_t rng_buf_adr; /* RNG buffer dma address */
+ bool use_org_iv; /* Tells whether original IV is be used
+ or not. If it is false updated IV is used*/
+};
+
+/* Security Engine SHA context */
+struct tegra_se_sha_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ u32 op_mode; /* SHA operation mode */
+};
+
+/* Security Engine AES CMAC context */
+struct tegra_se_aes_cmac_context {
+ struct tegra_se_dev *se_dev; /* Security Engine device */
+ struct tegra_se_slot *slot; /* Security Engine key slot */
+ u32 keylen; /* key length in bits */
+ u8 K1[TEGRA_SE_KEY_128_SIZE]; /* Key1 */
+ u8 K2[TEGRA_SE_KEY_128_SIZE]; /* Key2 */
+ dma_addr_t dma_addr; /* DMA address of local buffer */
+ u32 buflen; /* local buffer length */
+ u8 *buffer; /* local buffer pointer */
+};
+
+/* Security Engine key slot */
+struct tegra_se_slot {
+ struct list_head node;
+ u8 slot_num; /* Key slot number */
+ bool available; /* Tells whether key slot is free to use */
+};
+
+static struct tegra_se_slot ssk_slot = {
+ .slot_num = 15,
+ .available = false,
+};
+
+static struct tegra_se_slot srk_slot = {
+ .slot_num = 0,
+ .available = false,
+};
+
+/* Security Engine Linked List */
+struct tegra_se_ll {
+ dma_addr_t addr; /* DMA buffer address */
+ u32 data_len; /* Data length in DMA buffer */
+};
+
+static LIST_HEAD(key_slot);
+static DEFINE_SPINLOCK(key_slot_lock);
+static DEFINE_MUTEX(se_hw_lock);
+
+/* create a work for handling the async transfers */
+static void tegra_se_work_handler(struct work_struct *work);
+static DECLARE_WORK(se_work, tegra_se_work_handler);
+static struct workqueue_struct *se_work_q;
+
+#define PMC_SCRATCH43_REG_OFFSET 0x22c
+#define GET_MSB(x) ((x) >> (8*sizeof(x)-1))
+static void tegra_se_leftshift_onebit(u8 *in_buf, u32 size, u8 *org_msb)
+{
+ u8 carry;
+ u32 i;
+
+ *org_msb = GET_MSB(in_buf[0]);
+
+ /* left shift one bit */
+ in_buf[0] <<= 1;
+ for (carry = 0, i = 1; i < size; i++) {
+ carry = GET_MSB(in_buf[i]);
+ in_buf[i-1] |= carry;
+ in_buf[i] <<= 1;
+ }
+}
+
+extern unsigned long long tegra_chip_uid(void);
+
+static inline void se_writel(struct tegra_se_dev *se_dev,
+ unsigned int val, unsigned int reg_offset)
+{
+ writel(val, se_dev->io_reg + reg_offset);
+}
+
+static inline unsigned int se_readl(struct tegra_se_dev *se_dev,
+ unsigned int reg_offset)
+{
+ unsigned int val;
+
+ val = readl(se_dev->io_reg + reg_offset);
+
+ return val;
+}
+
+static void tegra_se_free_key_slot(struct tegra_se_slot *slot)
+{
+ if (slot) {
+ spin_lock(&key_slot_lock);
+ slot->available = true;
+ spin_unlock(&key_slot_lock);
+ }
+}
+
+static struct tegra_se_slot *tegra_se_alloc_key_slot(void)
+{
+ struct tegra_se_slot *slot = NULL;
+ bool found = false;
+
+ spin_lock(&key_slot_lock);
+ list_for_each_entry(slot, &key_slot, node) {
+ if (slot->available) {
+ slot->available = false;
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&key_slot_lock);
+ return found ? slot : NULL;
+}
+
+static int tegra_init_key_slot(struct tegra_se_dev *se_dev)
+{
+ int i;
+
+ se_dev->slot_list = kzalloc(sizeof(struct tegra_se_slot) *
+ TEGRA_SE_KEYSLOT_COUNT, GFP_KERNEL);
+ if (se_dev->slot_list == NULL) {
+ dev_err(se_dev->dev, "slot list memory allocation failed\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&key_slot_lock);
+ spin_lock(&key_slot_lock);
+ for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) {
+ /*
+ * Slot 0 and 15 are reserved and will not be added to the
+ * free slots pool. Slot 0 is used for SRK generation and
+ * Slot 15 is used for SSK operation
+ */
+ if ((i == srk_slot.slot_num) || (i == ssk_slot.slot_num))
+ continue;
+ se_dev->slot_list[i].available = true;
+ se_dev->slot_list[i].slot_num = i;
+ INIT_LIST_HEAD(&se_dev->slot_list[i].node);
+ list_add_tail(&se_dev->slot_list[i].node, &key_slot);
+ }
+ spin_unlock(&key_slot_lock);
+
+ return 0;
+}
+
+static void tegra_se_key_read_disable(u8 slot_num)
+{
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ u32 val;
+
+ val = se_readl(se_dev,
+ (SE_KEY_TABLE_ACCESS_REG_OFFSET + (slot_num * 4)));
+ val &= ~(1 << SE_KEY_READ_DISABLE_SHIFT);
+ se_writel(se_dev,
+ val, (SE_KEY_TABLE_ACCESS_REG_OFFSET + (slot_num * 4)));
+}
+
+static void tegra_se_key_read_disable_all(void)
+{
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ u8 slot_num;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ for (slot_num = 0; slot_num < TEGRA_SE_KEYSLOT_COUNT; slot_num++)
+ tegra_se_key_read_disable(slot_num);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+}
+
+static void tegra_se_config_algo(struct tegra_se_dev *se_dev,
+ enum tegra_se_aes_op_mode mode, bool encrypt, u32 key_len)
+{
+ u32 val = 0;
+
+ switch (mode) {
+ case SE_AES_OP_MODE_CBC:
+ case SE_AES_OP_MODE_CMAC:
+ if (encrypt) {
+ val = SE_CONFIG_ENC_ALG(ALG_AES_ENC);
+ if (key_len == TEGRA_SE_KEY_256_SIZE)
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY256);
+ else if (key_len == TEGRA_SE_KEY_192_SIZE)
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY192);
+ else
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY128);
+ val |= SE_CONFIG_DEC_ALG(ALG_NOP);
+ } else {
+ val = SE_CONFIG_DEC_ALG(ALG_AES_DEC);
+ if (key_len == TEGRA_SE_KEY_256_SIZE)
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY256);
+ else if (key_len == TEGRA_SE_KEY_192_SIZE)
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY192);
+ else
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY128);
+ }
+ if (mode == SE_AES_OP_MODE_CMAC)
+ val |= SE_CONFIG_DST(DST_HASHREG);
+ else
+ val |= SE_CONFIG_DST(DST_MEMORY);
+ break;
+ case SE_AES_OP_MODE_RNG_X931:
+ val = SE_CONFIG_ENC_ALG(ALG_RNG) |
+ SE_CONFIG_ENC_MODE(MODE_KEY128) |
+ SE_CONFIG_DST(DST_MEMORY);
+ break;
+ case SE_AES_OP_MODE_ECB:
+ if (encrypt) {
+ val = SE_CONFIG_ENC_ALG(ALG_AES_ENC);
+ if (key_len == TEGRA_SE_KEY_256_SIZE)
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY256);
+ else if (key_len == TEGRA_SE_KEY_192_SIZE)
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY192);
+ else
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY128);
+ } else {
+ val = SE_CONFIG_DEC_ALG(ALG_AES_DEC);
+ if (key_len == TEGRA_SE_KEY_256_SIZE)
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY256);
+ else if (key_len == TEGRA_SE_KEY_192_SIZE)
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY192);
+ else
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY128);
+ }
+ val |= SE_CONFIG_DST(DST_MEMORY);
+ break;
+ case SE_AES_OP_MODE_CTR:
+ if (encrypt) {
+ val = SE_CONFIG_ENC_ALG(ALG_AES_ENC);
+ if (key_len == TEGRA_SE_KEY_256_SIZE)
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY256);
+ else if (key_len == TEGRA_SE_KEY_192_SIZE)
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY192);
+ else
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY128);
+ } else {
+ val = SE_CONFIG_DEC_ALG(ALG_AES_DEC);
+ if (key_len == TEGRA_SE_KEY_256_SIZE) {
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY256);
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY256);
+ } else if (key_len == TEGRA_SE_KEY_192_SIZE) {
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY192);
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY192);
+ } else {
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY128);
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY128);
+ }
+ }
+ val |= SE_CONFIG_DST(DST_MEMORY);
+ break;
+ case SE_AES_OP_MODE_OFB:
+ if (encrypt) {
+ val = SE_CONFIG_ENC_ALG(ALG_AES_ENC);
+ if (key_len == TEGRA_SE_KEY_256_SIZE)
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY256);
+ else if (key_len == TEGRA_SE_KEY_192_SIZE)
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY192);
+ else
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY128);
+ } else {
+ val = SE_CONFIG_DEC_ALG(ALG_AES_DEC);
+ if (key_len == TEGRA_SE_KEY_256_SIZE) {
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY256);
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY256);
+ } else if (key_len == TEGRA_SE_KEY_192_SIZE) {
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY192);
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY192);
+ } else {
+ val |= SE_CONFIG_DEC_MODE(MODE_KEY128);
+ val |= SE_CONFIG_ENC_MODE(MODE_KEY128);
+ }
+ }
+ val |= SE_CONFIG_DST(DST_MEMORY);
+ break;
+ case SE_AES_OP_MODE_SHA1:
+ val = SE_CONFIG_ENC_ALG(ALG_SHA) |
+ SE_CONFIG_ENC_MODE(MODE_SHA1) |
+ SE_CONFIG_DST(DST_HASHREG);
+ break;
+ case SE_AES_OP_MODE_SHA224:
+ val = SE_CONFIG_ENC_ALG(ALG_SHA) |
+ SE_CONFIG_ENC_MODE(MODE_SHA224) |
+ SE_CONFIG_DST(DST_HASHREG);
+ break;
+ case SE_AES_OP_MODE_SHA256:
+ val = SE_CONFIG_ENC_ALG(ALG_SHA) |
+ SE_CONFIG_ENC_MODE(MODE_SHA256) |
+ SE_CONFIG_DST(DST_HASHREG);
+ break;
+ case SE_AES_OP_MODE_SHA384:
+ val = SE_CONFIG_ENC_ALG(ALG_SHA) |
+ SE_CONFIG_ENC_MODE(MODE_SHA384) |
+ SE_CONFIG_DST(DST_HASHREG);
+ break;
+ case SE_AES_OP_MODE_SHA512:
+ val = SE_CONFIG_ENC_ALG(ALG_SHA) |
+ SE_CONFIG_ENC_MODE(MODE_SHA512) |
+ SE_CONFIG_DST(DST_HASHREG);
+ break;
+ default:
+ dev_warn(se_dev->dev, "Invalid operation mode\n");
+ break;
+ }
+
+ se_writel(se_dev, val, SE_CONFIG_REG_OFFSET);
+}
+
+static void tegra_se_write_seed(struct tegra_se_dev *se_dev, u32 *pdata)
+{
+ u32 i;
+
+ for (i = 0; i < SE_CRYPTO_CTR_REG_COUNT; i++)
+ se_writel(se_dev, pdata[i], SE_CRYPTO_CTR_REG_OFFSET + (i * 4));
+}
+
+static void tegra_se_write_key_table(u8 *pdata, u32 data_len,
+ u8 slot_num, enum tegra_se_key_table_type type)
+{
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ u32 data_size = SE_KEYTABLE_REG_MAX_DATA;
+ u32 *pdata_buf = (u32 *)pdata;
+ u8 pkt = 0, quad = 0;
+ u32 val = 0, i;
+
+ if ((type == SE_KEY_TABLE_TYPE_KEY) && (slot_num == ssk_slot.slot_num))
+ return;
+
+ if (type == SE_KEY_TABLE_TYPE_ORGIV)
+ quad = QUAD_ORG_IV;
+ else if (type == SE_KEY_TABLE_TYPE_UPDTDIV)
+ quad = QUAD_UPDTD_IV;
+ else
+ quad = QUAD_KEYS_128;
+
+ /* write data to the key table */
+ do {
+ for (i = 0; i < data_size; i += 4, data_len -= 4)
+ se_writel(se_dev, *pdata_buf++,
+ SE_KEYTABLE_DATA0_REG_OFFSET + i);
+
+ pkt = SE_KEYTABLE_SLOT(slot_num) | SE_KEYTABLE_QUAD(quad);
+ val = SE_KEYTABLE_OP_TYPE(OP_WRITE) |
+ SE_KEYTABLE_TABLE_SEL(TABLE_KEYIV) |
+ SE_KEYTABLE_PKT(pkt);
+
+ se_writel(se_dev, val, SE_KEYTABLE_REG_OFFSET);
+
+ data_size = data_len;
+ quad = QUAD_KEYS_256;
+
+ } while (data_len);
+}
+
+static void tegra_se_config_crypto(struct tegra_se_dev *se_dev,
+ enum tegra_se_aes_op_mode mode, bool encrypt, u8 slot_num, bool org_iv)
+{
+ u32 val = 0;
+
+ switch (mode) {
+ case SE_AES_OP_MODE_CMAC:
+ case SE_AES_OP_MODE_CBC:
+ if (encrypt) {
+ val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) |
+ SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) |
+ SE_CRYPTO_XOR_POS(XOR_TOP) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT);
+ } else {
+ val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) |
+ SE_CRYPTO_VCTRAM_SEL(VCTRAM_PREVAHB) |
+ SE_CRYPTO_XOR_POS(XOR_BOTTOM) |
+ SE_CRYPTO_CORE_SEL(CORE_DECRYPT);
+ }
+ break;
+ case SE_AES_OP_MODE_RNG_X931:
+ val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) |
+ SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT);
+ break;
+ case SE_AES_OP_MODE_ECB:
+ if (encrypt) {
+ val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) |
+ SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT);
+ } else {
+ val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) |
+ SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_DECRYPT);
+ }
+ break;
+ case SE_AES_OP_MODE_CTR:
+ val = SE_CRYPTO_INPUT_SEL(INPUT_LNR_CTR) |
+ SE_CRYPTO_VCTRAM_SEL(VCTRAM_AHB) |
+ SE_CRYPTO_XOR_POS(XOR_BOTTOM) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT);
+ break;
+ case SE_AES_OP_MODE_OFB:
+ val = SE_CRYPTO_INPUT_SEL(INPUT_AESOUT) |
+ SE_CRYPTO_VCTRAM_SEL(VCTRAM_AHB) |
+ SE_CRYPTO_XOR_POS(XOR_BOTTOM) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT);
+ break;
+ default:
+ dev_warn(se_dev->dev, "Invalid operation mode\n");
+ break;
+ }
+
+ if (mode == SE_AES_OP_MODE_CTR) {
+ val |= SE_CRYPTO_HASH(HASH_DISABLE) |
+ SE_CRYPTO_KEY_INDEX(slot_num) |
+ SE_CRYPTO_CTR_CNTN(1);
+ } else {
+ val |= SE_CRYPTO_HASH(HASH_DISABLE) |
+ SE_CRYPTO_KEY_INDEX(slot_num) |
+ (org_iv ? SE_CRYPTO_IV_SEL(IV_ORIGINAL) :
+ SE_CRYPTO_IV_SEL(IV_UPDATED));
+ }
+
+ /* enable hash for CMAC */
+ if (mode == SE_AES_OP_MODE_CMAC)
+ val |= SE_CRYPTO_HASH(HASH_ENABLE);
+
+ se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET);
+
+ if (mode == SE_AES_OP_MODE_CTR)
+ se_writel(se_dev, 1, SE_SPARE_0_REG_OFFSET);
+
+ if (mode == SE_AES_OP_MODE_OFB)
+ se_writel(se_dev, 1, SE_SPARE_0_REG_OFFSET);
+
+}
+
+static void tegra_se_config_sha(struct tegra_se_dev *se_dev, u32 count)
+{
+ int i;
+
+ se_writel(se_dev, (count * 8), SE_SHA_MSG_LENGTH_REG_OFFSET);
+ se_writel(se_dev, (count * 8), SE_SHA_MSG_LEFT_REG_OFFSET);
+ for (i = 1; i < 4; i++) {
+ se_writel(se_dev, 0, SE_SHA_MSG_LENGTH_REG_OFFSET + (4 * i));
+ se_writel(se_dev, 0, SE_SHA_MSG_LEFT_REG_OFFSET + (4 * i));
+ }
+ se_writel(se_dev, SHA_ENABLE, SE_SHA_CONFIG_REG_OFFSET);
+}
+
+static int tegra_se_start_operation(struct tegra_se_dev *se_dev, u32 nbytes,
+ bool context_save)
+{
+ u32 nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
+ int ret = 0;
+ u32 val = 0;
+
+ /* clear any pending interrupts */
+ val = se_readl(se_dev, SE_INT_STATUS_REG_OFFSET);
+ se_writel(se_dev, val, SE_INT_STATUS_REG_OFFSET);
+ se_writel(se_dev, se_dev->src_ll_buf_adr, SE_IN_LL_ADDR_REG_OFFSET);
+ se_writel(se_dev, se_dev->dst_ll_buf_adr, SE_OUT_LL_ADDR_REG_OFFSET);
+
+ if (nblocks)
+ se_writel(se_dev, nblocks-1, SE_BLOCK_COUNT_REG_OFFSET);
+
+ /* enable interupts */
+ val = SE_INT_ERROR(INT_ENABLE) | SE_INT_OP_DONE(INT_ENABLE);
+ se_writel(se_dev, val, SE_INT_ENABLE_REG_OFFSET);
+
+ INIT_COMPLETION(se_dev->complete);
+
+ if (context_save)
+ se_writel(se_dev, SE_OPERATION(OP_CTX_SAVE),
+ SE_OPERATION_REG_OFFSET);
+ else
+ se_writel(se_dev, SE_OPERATION(OP_SRART),
+ SE_OPERATION_REG_OFFSET);
+
+ ret = wait_for_completion_timeout(&se_dev->complete,
+ msecs_to_jiffies(1000));
+ if (ret == 0) {
+ dev_err(se_dev->dev, "operation timed out no interrupt\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void tegra_se_read_hash_result(struct tegra_se_dev *se_dev,
+ u8 *pdata, u32 nbytes, bool swap32)
+{
+ u32 *result = (u32 *)pdata;
+ u32 i;
+
+ for (i = 0; i < nbytes/4; i++) {
+ result[i] = se_readl(se_dev, SE_HASH_RESULT_REG_OFFSET +
+ (i * sizeof(u32)));
+ if (swap32)
+ result[i] = be32_to_cpu(result[i]);
+ }
+}
+
+static int tegra_se_count_sgs(struct scatterlist *sl, u32 total_bytes)
+{
+ int i = 0;
+
+ if (!total_bytes)
+ return 0;
+
+ do {
+ total_bytes -= sl[i].length;
+ i++;
+ } while (total_bytes > 0);
+
+ return i;
+}
+
+static int tegra_se_alloc_ll_buf(struct tegra_se_dev *se_dev,
+ u32 num_src_sgs, u32 num_dst_sgs)
+{
+ if (se_dev->src_ll_buf || se_dev->dst_ll_buf) {
+ dev_err(se_dev->dev, "trying to allocate memory to allocated memory\n");
+ return -EBUSY;
+ }
+
+ if (num_src_sgs) {
+ se_dev->src_ll_size =
+ (sizeof(struct tegra_se_ll) * num_src_sgs) +
+ sizeof(u32);
+ se_dev->src_ll_buf = dma_alloc_coherent(se_dev->dev,
+ se_dev->src_ll_size,
+ &se_dev->src_ll_buf_adr, GFP_KERNEL);
+ if (!se_dev->src_ll_buf) {
+ dev_err(se_dev->dev, "can not allocate src lldma buffer\n");
+ return -ENOMEM;
+ }
+ }
+ if (num_dst_sgs) {
+ se_dev->dst_ll_size =
+ (sizeof(struct tegra_se_ll) * num_dst_sgs) +
+ sizeof(u32);
+ se_dev->dst_ll_buf = dma_alloc_coherent(se_dev->dev,
+ se_dev->dst_ll_size,
+ &se_dev->dst_ll_buf_adr, GFP_KERNEL);
+ if (!se_dev->dst_ll_buf) {
+ dev_err(se_dev->dev, "can not allocate dst ll dma buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_se_free_ll_buf(struct tegra_se_dev *se_dev)
+{
+ if (se_dev->src_ll_buf) {
+ dma_free_coherent(se_dev->dev, se_dev->src_ll_size,
+ se_dev->src_ll_buf, se_dev->src_ll_buf_adr);
+ se_dev->src_ll_buf = NULL;
+ }
+
+ if (se_dev->dst_ll_buf) {
+ dma_free_coherent(se_dev->dev, se_dev->dst_ll_size,
+ se_dev->dst_ll_buf, se_dev->dst_ll_buf_adr);
+ se_dev->dst_ll_buf = NULL;
+ }
+}
+
+static int tegra_se_setup_ablk_req(struct tegra_se_dev *se_dev,
+ struct ablkcipher_request *req)
+{
+ struct scatterlist *src_sg, *dst_sg;
+ struct tegra_se_ll *src_ll, *dst_ll;
+ u32 total, num_src_sgs, num_dst_sgs;
+ int ret = 0;
+
+ num_src_sgs = tegra_se_count_sgs(req->src, req->nbytes);
+ num_dst_sgs = tegra_se_count_sgs(req->dst, req->nbytes);
+
+ if ((num_src_sgs > SE_MAX_SRC_SG_COUNT) ||
+ (num_dst_sgs > SE_MAX_DST_SG_COUNT)) {
+ dev_err(se_dev->dev, "num of SG buffers are more\n");
+ return -EINVAL;
+ }
+
+ *se_dev->src_ll_buf = num_src_sgs-1;
+ *se_dev->dst_ll_buf = num_dst_sgs-1;
+
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+
+ src_sg = req->src;
+ dst_sg = req->dst;
+ total = req->nbytes;
+
+ while (total) {
+ ret = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
+ if (!ret) {
+ dev_err(se_dev->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
+
+ ret = dma_map_sg(se_dev->dev, dst_sg, 1, DMA_FROM_DEVICE);
+ if (!ret) {
+ dev_err(se_dev->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ WARN_ON(src_sg->length != dst_sg->length);
+ src_ll->addr = sg_dma_address(src_sg);
+ src_ll->data_len = src_sg->length;
+ dst_ll->addr = sg_dma_address(dst_sg);
+ dst_ll->data_len = dst_sg->length;
+
+ total -= src_sg->length;
+ src_sg = sg_next(src_sg);
+ dst_sg = sg_next(dst_sg);
+ dst_ll++;
+ src_ll++;
+ WARN_ON(((total != 0) && (!src_sg || !dst_sg)));
+ }
+ return ret;
+}
+
+static void tegra_se_dequeue_complete_req(struct tegra_se_dev *se_dev,
+ struct ablkcipher_request *req)
+{
+ struct scatterlist *src_sg, *dst_sg;
+ u32 total;
+
+ if (req) {
+ src_sg = req->src;
+ dst_sg = req->dst;
+ total = req->nbytes;
+ while (total) {
+ dma_unmap_sg(se_dev->dev, dst_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
+ total -= src_sg->length;
+ src_sg = sg_next(src_sg);
+ dst_sg = sg_next(dst_sg);
+ }
+ }
+}
+
+static void tegra_se_process_new_req(struct crypto_async_request *async_req)
+{
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+ struct tegra_se_aes_context *aes_ctx =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ int ret = 0;
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+
+ /* write IV */
+ if (req->info) {
+ if (req_ctx->op_mode == SE_AES_OP_MODE_CTR) {
+ tegra_se_write_seed(se_dev, (u32 *)req->info);
+ } else {
+ tegra_se_write_key_table(req->info,
+ TEGRA_SE_AES_IV_SIZE,
+ aes_ctx->slot->slot_num,
+ SE_KEY_TABLE_TYPE_ORGIV);
+ }
+ }
+ tegra_se_setup_ablk_req(se_dev, req);
+ tegra_se_config_algo(se_dev, req_ctx->op_mode, req_ctx->encrypt,
+ aes_ctx->keylen);
+ tegra_se_config_crypto(se_dev, req_ctx->op_mode, req_ctx->encrypt,
+ aes_ctx->slot->slot_num, req->info ? true : false);
+ ret = tegra_se_start_operation(se_dev, req->nbytes, false);
+ tegra_se_dequeue_complete_req(se_dev, req);
+
+ mutex_unlock(&se_hw_lock);
+ req->base.complete(&req->base, ret);
+}
+
+static irqreturn_t tegra_se_irq(int irq, void *dev)
+{
+ struct tegra_se_dev *se_dev = dev;
+ u32 val;
+
+ val = se_readl(se_dev, SE_INT_STATUS_REG_OFFSET);
+ se_writel(se_dev, val, SE_INT_STATUS_REG_OFFSET);
+
+ if (val & SE_INT_ERROR(INT_SET))
+ dev_err(se_dev->dev, "tegra_se_irq::error");
+
+ if (val & SE_INT_OP_DONE(INT_SET))
+ complete(&se_dev->complete);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_se_work_handler(struct work_struct *work)
+{
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog = NULL;
+
+ pm_runtime_get_sync(se_dev->dev);
+
+ do {
+ spin_lock_irq(&se_dev->lock);
+ backlog = crypto_get_backlog(&se_dev->queue);
+ async_req = crypto_dequeue_request(&se_dev->queue);
+ if (!async_req)
+ se_dev->work_q_busy = false;
+
+ spin_unlock_irq(&se_dev->lock);
+
+ if (backlog) {
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (async_req) {
+ tegra_se_process_new_req(async_req);
+ async_req = NULL;
+ }
+ } while (se_dev->work_q_busy);
+ pm_runtime_put(se_dev->dev);
+}
+
+static int tegra_se_aes_queue_req(struct ablkcipher_request *req)
+{
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ unsigned long flags;
+ bool idle = true;
+ int err = 0;
+
+ spin_lock_irqsave(&se_dev->lock, flags);
+ err = ablkcipher_enqueue_request(&se_dev->queue, req);
+ if (se_dev->work_q_busy)
+ idle = false;
+ spin_unlock_irqrestore(&se_dev->lock, flags);
+
+ if (idle) {
+ spin_lock_irq(&se_dev->lock);
+ se_dev->work_q_busy = true;
+ spin_unlock_irq(&se_dev->lock);
+ queue_work(se_work_q, &se_work);
+ }
+
+ return err;
+}
+
+static int tegra_se_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->encrypt = true;
+ req_ctx->op_mode = SE_AES_OP_MODE_CBC;
+
+ return tegra_se_aes_queue_req(req);
+}
+
+static int tegra_se_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->encrypt = false;
+ req_ctx->op_mode = SE_AES_OP_MODE_CBC;
+
+ return tegra_se_aes_queue_req(req);
+}
+
+static int tegra_se_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->encrypt = true;
+ req_ctx->op_mode = SE_AES_OP_MODE_ECB;
+
+ return tegra_se_aes_queue_req(req);
+}
+
+static int tegra_se_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->encrypt = false;
+ req_ctx->op_mode = SE_AES_OP_MODE_ECB;
+
+ return tegra_se_aes_queue_req(req);
+}
+
+static int tegra_se_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->encrypt = true;
+ req_ctx->op_mode = SE_AES_OP_MODE_CTR;
+
+ return tegra_se_aes_queue_req(req);
+}
+
+static int tegra_se_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->encrypt = false;
+ req_ctx->op_mode = SE_AES_OP_MODE_CTR;
+
+ return tegra_se_aes_queue_req(req);
+}
+
+static int tegra_se_aes_ofb_encrypt(struct ablkcipher_request *req)
+{
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->encrypt = true;
+ req_ctx->op_mode = SE_AES_OP_MODE_OFB;
+
+ return tegra_se_aes_queue_req(req);
+}
+
+static int tegra_se_aes_ofb_decrypt(struct ablkcipher_request *req)
+{
+ struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->encrypt = false;
+ req_ctx->op_mode = SE_AES_OP_MODE_OFB;
+
+ return tegra_se_aes_queue_req(req);
+}
+
+static int tegra_se_aes_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct tegra_se_aes_context *ctx = crypto_ablkcipher_ctx(tfm);
+ struct tegra_se_dev *se_dev = ctx->se_dev;
+ struct tegra_se_slot *pslot;
+ u8 *pdata = (u8 *)key;
+
+ if (!ctx) {
+ dev_err(se_dev->dev, "invalid context");
+ return -EINVAL;
+ }
+
+ if ((keylen != TEGRA_SE_KEY_128_SIZE) &&
+ (keylen != TEGRA_SE_KEY_192_SIZE) &&
+ (keylen != TEGRA_SE_KEY_256_SIZE)) {
+ dev_err(se_dev->dev, "invalid key size");
+ return -EINVAL;
+ }
+
+ if (key) {
+ if (!ctx->slot || (ctx->slot &&
+ ctx->slot->slot_num == ssk_slot.slot_num)) {
+ pslot = tegra_se_alloc_key_slot();
+ if (!pslot) {
+ dev_err(se_dev->dev, "no free key slot\n");
+ return -ENOMEM;
+ }
+ ctx->slot = pslot;
+ }
+ ctx->keylen = keylen;
+ } else {
+ tegra_se_free_key_slot(ctx->slot);
+ ctx->slot = &ssk_slot;
+ ctx->keylen = AES_KEYSIZE_128;
+ }
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ /* load the key */
+ tegra_se_write_key_table(pdata, keylen, ctx->slot->slot_num,
+ SE_KEY_TABLE_TYPE_KEY);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return 0;
+}
+
+static int tegra_se_aes_cra_init(struct crypto_tfm *tfm)
+{
+ struct tegra_se_aes_context *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->se_dev = sg_tegra_se_dev;
+ tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_se_req_context);
+
+ return 0;
+}
+
+static void tegra_se_aes_cra_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_se_aes_context *ctx = crypto_tfm_ctx(tfm);
+
+ tegra_se_free_key_slot(ctx->slot);
+ ctx->slot = NULL;
+}
+
+static int tegra_se_rng_init(struct crypto_tfm *tfm)
+{
+ struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm);
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+
+ rng_ctx->se_dev = se_dev;
+ rng_ctx->dt_buf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ &rng_ctx->dt_buf_adr, GFP_KERNEL);
+ if (!rng_ctx->dt_buf) {
+ dev_err(se_dev->dev, "can not allocate rng dma buffer");
+ return -ENOMEM;
+ }
+
+ rng_ctx->rng_buf = dma_alloc_coherent(rng_ctx->se_dev->dev,
+ TEGRA_SE_RNG_DT_SIZE, &rng_ctx->rng_buf_adr, GFP_KERNEL);
+ if (!rng_ctx->rng_buf) {
+ dev_err(se_dev->dev, "can not allocate rng dma buffer");
+ dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ rng_ctx->dt_buf, rng_ctx->dt_buf_adr);
+ return -ENOMEM;
+ }
+
+ rng_ctx->slot = tegra_se_alloc_key_slot();
+
+ if (!rng_ctx->slot) {
+ dev_err(rng_ctx->se_dev->dev, "no free slot\n");
+ dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ rng_ctx->dt_buf, rng_ctx->dt_buf_adr);
+ dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ rng_ctx->rng_buf, rng_ctx->rng_buf_adr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void tegra_se_rng_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm);
+
+ if (rng_ctx->dt_buf) {
+ dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ rng_ctx->dt_buf, rng_ctx->dt_buf_adr);
+ }
+
+ if (rng_ctx->rng_buf) {
+ dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE,
+ rng_ctx->rng_buf, rng_ctx->rng_buf_adr);
+ }
+
+ tegra_se_free_key_slot(rng_ctx->slot);
+ rng_ctx->slot = NULL;
+ rng_ctx->se_dev = NULL;
+}
+
+static int tegra_se_rng_get_random(struct crypto_rng *tfm, u8 *rdata, u32 dlen)
+{
+ struct tegra_se_rng_context *rng_ctx = crypto_rng_ctx(tfm);
+ struct tegra_se_dev *se_dev = rng_ctx->se_dev;
+ struct tegra_se_ll *src_ll, *dst_ll;
+ unsigned char *dt_buf = (unsigned char *)rng_ctx->dt_buf;
+ u8 *rdata_addr;
+ int ret = 0, i, j, num_blocks;
+
+ if (dlen < TEGRA_SE_RNG_DT_SIZE)
+ return -EINVAL;
+ num_blocks = (dlen / TEGRA_SE_RNG_DT_SIZE);
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ src_ll->addr = rng_ctx->dt_buf_adr;
+ src_ll->data_len = TEGRA_SE_RNG_DT_SIZE;
+ dst_ll->addr = rng_ctx->rng_buf_adr;
+ dst_ll->data_len = TEGRA_SE_RNG_DT_SIZE;
+
+ tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_X931, true,
+ TEGRA_SE_KEY_128_SIZE);
+ tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_RNG_X931, true,
+ rng_ctx->slot->slot_num, rng_ctx->use_org_iv);
+ for (j = 0; j < num_blocks; j++) {
+ ret = tegra_se_start_operation(se_dev,
+ TEGRA_SE_RNG_DT_SIZE, false);
+
+ if (!ret) {
+ rdata_addr = (rdata + (j * TEGRA_SE_RNG_DT_SIZE));
+ memcpy(rdata_addr,
+ rng_ctx->rng_buf, TEGRA_SE_RNG_DT_SIZE);
+
+ /* update DT vector */
+ for (i = TEGRA_SE_RNG_DT_SIZE - 1; i >= 0; i--) {
+ dt_buf[i] += 1;
+ if (dt_buf[i] != 0)
+ break;
+ }
+ } else {
+ dlen = 0;
+ }
+ if (rng_ctx->use_org_iv) {
+ rng_ctx->use_org_iv = false;
+ tegra_se_config_crypto(se_dev,
+ SE_AES_OP_MODE_RNG_X931, true,
+ rng_ctx->slot->slot_num, rng_ctx->use_org_iv);
+ }
+ }
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return dlen;
+}
+
+static int tegra_se_rng_reset(struct crypto_rng *tfm, u8 *seed, u32 slen)
+{
+ struct tegra_se_rng_context *rng_ctx = crypto_rng_ctx(tfm);
+ struct tegra_se_dev *se_dev = rng_ctx->se_dev;
+ u8 *iv = seed;
+ u8 *key = (u8 *)(seed + TEGRA_SE_RNG_IV_SIZE);
+ u8 *dt = key + TEGRA_SE_RNG_KEY_SIZE;
+ struct timespec ts;
+ u64 nsec, tmp[2];
+
+ BUG_ON(!seed);
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ tegra_se_write_key_table(key, TEGRA_SE_RNG_KEY_SIZE,
+ rng_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_KEY);
+
+ tegra_se_write_key_table(iv, TEGRA_SE_RNG_IV_SIZE,
+ rng_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ if (slen < TEGRA_SE_RNG_SEED_SIZE) {
+ getnstimeofday(&ts);
+ nsec = timespec_to_ns(&ts);
+ do_div(nsec, 1000);
+ nsec ^= se_dev->ctr << 56;
+ se_dev->ctr++;
+ tmp[0] = nsec;
+ tmp[1] = tegra_chip_uid();
+ memcpy(rng_ctx->dt_buf, (u8 *)tmp, TEGRA_SE_RNG_DT_SIZE);
+ } else {
+ memcpy(rng_ctx->dt_buf, dt, TEGRA_SE_RNG_DT_SIZE);
+ }
+
+ rng_ctx->use_org_iv = true;
+
+ return 0;
+}
+
+int tegra_se_sha_init(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_sha_update(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_sha_finup(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_sha_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_se_sha_context *sha_ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ struct scatterlist *src_sg;
+ struct tegra_se_ll *src_ll;
+ u32 total, num_sgs;
+ int err = 0;
+
+ if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ sha_ctx->op_mode = SE_AES_OP_MODE_SHA1;
+
+ if (crypto_ahash_digestsize(tfm) == SHA224_DIGEST_SIZE)
+ sha_ctx->op_mode = SE_AES_OP_MODE_SHA224;
+
+ if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE)
+ sha_ctx->op_mode = SE_AES_OP_MODE_SHA256;
+
+ if (crypto_ahash_digestsize(tfm) == SHA384_DIGEST_SIZE)
+ sha_ctx->op_mode = SE_AES_OP_MODE_SHA384;
+
+ if (crypto_ahash_digestsize(tfm) == SHA512_DIGEST_SIZE)
+ sha_ctx->op_mode = SE_AES_OP_MODE_SHA512;
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ num_sgs = tegra_se_count_sgs(req->src, req->nbytes);
+ if ((num_sgs > SE_MAX_SRC_SG_COUNT)) {
+ dev_err(se_dev->dev, "num of SG buffers are more\n");
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+ return -EINVAL;
+ }
+ *se_dev->src_ll_buf = num_sgs-1;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ src_sg = req->src;
+ total = req->nbytes;
+
+ while (total) {
+ err = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
+ if (!err) {
+ dev_err(se_dev->dev, "dma_map_sg() error\n");
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+ return -EINVAL;
+ }
+ src_ll->addr = sg_dma_address(src_sg);
+ src_ll->data_len = src_sg->length;
+
+ total -= src_sg->length;
+ src_sg = sg_next(src_sg);
+ src_ll++;
+ }
+
+ tegra_se_config_algo(se_dev, sha_ctx->op_mode, false, 0);
+ tegra_se_config_sha(se_dev, req->nbytes);
+ err = tegra_se_start_operation(se_dev, 0, false);
+ if (!err) {
+ tegra_se_read_hash_result(se_dev, req->result,
+ crypto_ahash_digestsize(tfm), true);
+ if ((sha_ctx->op_mode == SE_AES_OP_MODE_SHA384) ||
+ (sha_ctx->op_mode == SE_AES_OP_MODE_SHA512)) {
+ u32 *result = (u32 *)req->result;
+ u32 temp, i;
+
+ for (i = 0; i < crypto_ahash_digestsize(tfm)/4;
+ i += 2) {
+ temp = result[i];
+ result[i] = result[i+1];
+ result[i+1] = temp;
+ }
+ }
+ }
+
+ src_sg = req->src;
+ total = req->nbytes;
+ while (total) {
+ dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
+ total -= src_sg->length;
+ src_sg = sg_next(src_sg);
+ }
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return err;
+}
+
+static int tegra_se_sha_digest(struct ahash_request *req)
+{
+ return tegra_se_sha_init(req) ?: tegra_se_sha_final(req);
+}
+
+int tegra_se_sha_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct tegra_se_sha_context));
+ return 0;
+}
+
+void tegra_se_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ /* do nothing */
+}
+
+int tegra_se_aes_cmac_init(struct ahash_request *req)
+{
+
+ return 0;
+}
+
+int tegra_se_aes_cmac_update(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_aes_cmac_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ struct scatterlist *src_sg;
+ struct tegra_se_ll *src_ll;
+ struct sg_mapping_iter miter;
+ u32 num_sgs, blocks_to_process, last_block_bytes = 0, bytes_to_copy = 0;
+ u8 piv[TEGRA_SE_AES_IV_SIZE];
+ int total, ret = 0, i = 0, mapped_sg_count = 0;
+ bool padding_needed = false;
+ unsigned long flags;
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ u8 *temp_buffer = NULL;
+ bool use_orig_iv = true;
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ blocks_to_process = req->nbytes / TEGRA_SE_AES_BLOCK_SIZE;
+ /* num of bytes less than block size */
+ if ((req->nbytes % TEGRA_SE_AES_BLOCK_SIZE) || !blocks_to_process) {
+ padding_needed = true;
+ last_block_bytes = req->nbytes % TEGRA_SE_AES_BLOCK_SIZE;
+ } else {
+ /* decrement num of blocks */
+ blocks_to_process--;
+ if (blocks_to_process) {
+ /* there are blocks to process and find last block
+ bytes */
+ last_block_bytes = req->nbytes -
+ (blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE);
+ } else {
+ /* this is the last block and equal to block size */
+ last_block_bytes = req->nbytes;
+ }
+ }
+
+ /* first process all blocks except last block */
+ if (blocks_to_process) {
+ num_sgs = tegra_se_count_sgs(req->src, req->nbytes);
+ if (num_sgs > SE_MAX_SRC_SG_COUNT) {
+ dev_err(se_dev->dev, "num of SG buffers are more\n");
+ goto out;
+ }
+ *se_dev->src_ll_buf = num_sgs - 1;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ src_sg = req->src;
+ total = blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE;
+ while (total > 0) {
+ ret = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
+ mapped_sg_count++;
+ if (!ret) {
+ dev_err(se_dev->dev, "dma_map_sg() error\n");
+ goto out;
+ }
+ src_ll->addr = sg_dma_address(src_sg);
+ if (total > src_sg->length)
+ src_ll->data_len = src_sg->length;
+ else
+ src_ll->data_len = total;
+
+ total -= src_sg->length;
+ if (total > 0) {
+ src_sg = sg_next(src_sg);
+ src_ll++;
+ }
+ WARN_ON(((total != 0) && (!src_sg)));
+ }
+ tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CMAC, true,
+ cmac_ctx->keylen);
+ /* write zero IV */
+ memset(piv, 0, TEGRA_SE_AES_IV_SIZE);
+ tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE,
+ cmac_ctx->slot->slot_num,
+ SE_KEY_TABLE_TYPE_ORGIV);
+ tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CMAC, true,
+ cmac_ctx->slot->slot_num, true);
+ tegra_se_start_operation(se_dev,
+ blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE, false);
+ src_sg = req->src;
+ while (mapped_sg_count--) {
+ dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE);
+ src_sg = sg_next(src_sg);
+ }
+ use_orig_iv = false;
+ }
+
+ /* get the last block bytes from the sg_dma buffer using miter */
+ src_sg = req->src;
+ num_sgs = tegra_se_count_sgs(req->src, req->nbytes);
+ sg_flags |= SG_MITER_FROM_SG;
+ sg_miter_start(&miter, req->src, num_sgs, sg_flags);
+ local_irq_save(flags);
+ total = 0;
+ cmac_ctx->buffer = dma_alloc_coherent(se_dev->dev,
+ TEGRA_SE_AES_BLOCK_SIZE,
+ &cmac_ctx->dma_addr, GFP_KERNEL);
+
+ if (!cmac_ctx->buffer)
+ goto out;
+
+ temp_buffer = cmac_ctx->buffer;
+ while (sg_miter_next(&miter) && total < req->nbytes) {
+ unsigned int len;
+ len = min(miter.length, req->nbytes - total);
+ if ((req->nbytes - (total + len)) <= last_block_bytes) {
+ bytes_to_copy =
+ last_block_bytes -
+ (req->nbytes - (total + len));
+ memcpy(temp_buffer, miter.addr + (len - bytes_to_copy),
+ bytes_to_copy);
+ last_block_bytes -= bytes_to_copy;
+ temp_buffer += bytes_to_copy;
+ }
+ total += len;
+ }
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+
+ /* process last block */
+ if (padding_needed) {
+ /* pad with 0x80, 0, 0 ... */
+ last_block_bytes = req->nbytes % TEGRA_SE_AES_BLOCK_SIZE;
+ cmac_ctx->buffer[last_block_bytes] = 0x80;
+ for (i = last_block_bytes+1; i < TEGRA_SE_AES_BLOCK_SIZE; i++)
+ cmac_ctx->buffer[i] = 0;
+ /* XOR with K2 */
+ for (i = 0; i < TEGRA_SE_AES_BLOCK_SIZE; i++)
+ cmac_ctx->buffer[i] ^= cmac_ctx->K2[i];
+ } else {
+ /* XOR with K1 */
+ for (i = 0; i < TEGRA_SE_AES_BLOCK_SIZE; i++)
+ cmac_ctx->buffer[i] ^= cmac_ctx->K1[i];
+ }
+ *se_dev->src_ll_buf = 0;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ src_ll->addr = cmac_ctx->dma_addr;
+ src_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE;
+
+ if (use_orig_iv) {
+ /* use zero IV, this is when num of bytes is
+ less <= block size */
+ memset(piv, 0, TEGRA_SE_AES_IV_SIZE);
+ tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE,
+ cmac_ctx->slot->slot_num,
+ SE_KEY_TABLE_TYPE_ORGIV);
+ }
+
+ tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CMAC, true,
+ cmac_ctx->keylen);
+ tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CMAC, true,
+ cmac_ctx->slot->slot_num, use_orig_iv);
+ tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE, false);
+ tegra_se_read_hash_result(se_dev, req->result,
+ TEGRA_SE_AES_CMAC_DIGEST_SIZE, false);
+
+out:
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ if (cmac_ctx->buffer)
+ dma_free_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE,
+ cmac_ctx->buffer, cmac_ctx->dma_addr);
+
+ return 0;
+}
+
+int tegra_se_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_se_aes_cmac_context *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se_dev *se_dev = sg_tegra_se_dev;
+ struct tegra_se_ll *src_ll, *dst_ll;
+ struct tegra_se_slot *pslot;
+ u8 piv[TEGRA_SE_AES_IV_SIZE];
+ u32 *pbuf;
+ dma_addr_t pbuf_adr;
+ int ret = 0;
+ u8 const rb = 0x87;
+ u8 msb;
+
+ if (!ctx) {
+ dev_err(se_dev->dev, "invalid context");
+ return -EINVAL;
+ }
+
+ if ((keylen != TEGRA_SE_KEY_128_SIZE) &&
+ (keylen != TEGRA_SE_KEY_192_SIZE) &&
+ (keylen != TEGRA_SE_KEY_256_SIZE)) {
+ dev_err(se_dev->dev, "invalid key size");
+ return -EINVAL;
+ }
+
+ if (key) {
+ if (!ctx->slot || (ctx->slot &&
+ ctx->slot->slot_num == ssk_slot.slot_num)) {
+ pslot = tegra_se_alloc_key_slot();
+ if (!pslot) {
+ dev_err(se_dev->dev, "no free key slot\n");
+ return -ENOMEM;
+ }
+ ctx->slot = pslot;
+ }
+ ctx->keylen = keylen;
+ } else {
+ tegra_se_free_key_slot(ctx->slot);
+ ctx->slot = &ssk_slot;
+ ctx->keylen = AES_KEYSIZE_128;
+ }
+
+ pbuf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE,
+ &pbuf_adr, GFP_KERNEL);
+ if (!pbuf) {
+ dev_err(se_dev->dev, "can not allocate dma buffer");
+ return -ENOMEM;
+ }
+ memset(pbuf, 0, TEGRA_SE_AES_BLOCK_SIZE);
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+
+ src_ll->addr = pbuf_adr;
+ src_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE;
+ dst_ll->addr = pbuf_adr;
+ dst_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE;
+
+ /* load the key */
+ tegra_se_write_key_table((u8 *)key, keylen,
+ ctx->slot->slot_num, SE_KEY_TABLE_TYPE_KEY);
+
+ /* write zero IV */
+ memset(piv, 0, TEGRA_SE_AES_IV_SIZE);
+
+ /* load IV */
+ tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE,
+ ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV);
+
+ /* config crypto algo */
+ tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CBC, true, keylen);
+
+ tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CBC, true,
+ ctx->slot->slot_num, true);
+
+ ret = tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE, false);
+ if (ret) {
+ dev_err(se_dev->dev, "tegra_se_aes_cmac_setkey:: start op failed\n");
+ goto out;
+ }
+
+ /* compute K1 subkey */
+ memcpy(ctx->K1, pbuf, TEGRA_SE_AES_BLOCK_SIZE);
+ tegra_se_leftshift_onebit(ctx->K1, TEGRA_SE_AES_BLOCK_SIZE, &msb);
+ if (msb)
+ ctx->K1[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb;
+
+ /* compute K2 subkey */
+ memcpy(ctx->K2, ctx->K1, TEGRA_SE_AES_BLOCK_SIZE);
+ tegra_se_leftshift_onebit(ctx->K2, TEGRA_SE_AES_BLOCK_SIZE, &msb);
+
+ if (msb)
+ ctx->K2[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb;
+
+out:
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ if (pbuf) {
+ dma_free_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE,
+ pbuf, pbuf_adr);
+ }
+
+ return 0;
+}
+
+int tegra_se_aes_cmac_digest(struct ahash_request *req)
+{
+ return tegra_se_aes_cmac_init(req) ?: tegra_se_aes_cmac_final(req);
+}
+
+int tegra_se_aes_cmac_finup(struct ahash_request *req)
+{
+ return 0;
+}
+
+int tegra_se_aes_cmac_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct tegra_se_aes_cmac_context));
+
+ return 0;
+}
+void tegra_se_aes_cmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_se_aes_cmac_context *ctx = crypto_tfm_ctx(tfm);
+
+ tegra_se_free_key_slot(ctx->slot);
+ ctx->slot = NULL;
+}
+
+static struct crypto_alg aes_algs[] = {
+ {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-tegra",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_context),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_aes_cra_init,
+ .cra_exit = tegra_se_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE,
+ .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE,
+ .ivsize = TEGRA_SE_AES_IV_SIZE,
+ .setkey = tegra_se_aes_setkey,
+ .encrypt = tegra_se_aes_cbc_encrypt,
+ .decrypt = tegra_se_aes_cbc_decrypt,
+ }
+ }, {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-tegra",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_context),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_aes_cra_init,
+ .cra_exit = tegra_se_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE,
+ .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE,
+ .ivsize = TEGRA_SE_AES_IV_SIZE,
+ .setkey = tegra_se_aes_setkey,
+ .encrypt = tegra_se_aes_ecb_encrypt,
+ .decrypt = tegra_se_aes_ecb_decrypt,
+ }
+ }, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-tegra",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_context),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_aes_cra_init,
+ .cra_exit = tegra_se_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE,
+ .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE,
+ .ivsize = TEGRA_SE_AES_IV_SIZE,
+ .setkey = tegra_se_aes_setkey,
+ .encrypt = tegra_se_aes_ctr_encrypt,
+ .decrypt = tegra_se_aes_ctr_decrypt,
+ .geniv = "eseqiv",
+ }
+ }, {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-tegra",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_context),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_aes_cra_init,
+ .cra_exit = tegra_se_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE,
+ .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE,
+ .ivsize = TEGRA_SE_AES_IV_SIZE,
+ .setkey = tegra_se_aes_setkey,
+ .encrypt = tegra_se_aes_ofb_encrypt,
+ .decrypt = tegra_se_aes_ofb_decrypt,
+ .geniv = "eseqiv",
+ }
+ }, {
+ .cra_name = "ansi_cprng",
+ .cra_driver_name = "rng-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_ctxsize = sizeof(struct tegra_se_rng_context),
+ .cra_type = &crypto_rng_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_rng_init,
+ .cra_exit = tegra_se_rng_exit,
+ .cra_u = {
+ .rng = {
+ .rng_make_random = tegra_se_rng_get_random,
+ .rng_reset = tegra_se_rng_reset,
+ .seedsize = TEGRA_SE_RNG_SEED_SIZE,
+ }
+ }
+ }
+};
+
+static struct ahash_alg hash_algs[] = {
+ {
+ .init = tegra_se_aes_cmac_init,
+ .update = tegra_se_aes_cmac_update,
+ .final = tegra_se_aes_cmac_final,
+ .finup = tegra_se_aes_cmac_finup,
+ .digest = tegra_se_aes_cmac_digest,
+ .setkey = tegra_se_aes_cmac_setkey,
+ .halg.digestsize = TEGRA_SE_AES_CMAC_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "cmac(aes)",
+ .cra_driver_name = "tegra-se-cmac(aes)",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_aes_cmac_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_aes_cmac_cra_init,
+ .cra_exit = tegra_se_aes_cmac_cra_exit,
+ }
+ }, {
+ .init = tegra_se_sha_init,
+ .update = tegra_se_sha_update,
+ .final = tegra_se_sha_final,
+ .finup = tegra_se_sha_finup,
+ .digest = tegra_se_sha_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "tegra-se-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_sha_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_sha_cra_init,
+ .cra_exit = tegra_se_sha_cra_exit,
+ }
+ }, {
+ .init = tegra_se_sha_init,
+ .update = tegra_se_sha_update,
+ .final = tegra_se_sha_final,
+ .finup = tegra_se_sha_finup,
+ .digest = tegra_se_sha_digest,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "tegra-se-sha224",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_sha_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_sha_cra_init,
+ .cra_exit = tegra_se_sha_cra_exit,
+ }
+ }, {
+ .init = tegra_se_sha_init,
+ .update = tegra_se_sha_update,
+ .final = tegra_se_sha_final,
+ .finup = tegra_se_sha_finup,
+ .digest = tegra_se_sha_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "tegra-se-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_sha_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_sha_cra_init,
+ .cra_exit = tegra_se_sha_cra_exit,
+ }
+ }, {
+ .init = tegra_se_sha_init,
+ .update = tegra_se_sha_update,
+ .final = tegra_se_sha_final,
+ .finup = tegra_se_sha_finup,
+ .digest = tegra_se_sha_digest,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "tegra-se-sha384",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_sha_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_sha_cra_init,
+ .cra_exit = tegra_se_sha_cra_exit,
+ }
+ }, {
+ .init = tegra_se_sha_init,
+ .update = tegra_se_sha_update,
+ .final = tegra_se_sha_final,
+ .finup = tegra_se_sha_finup,
+ .digest = tegra_se_sha_digest,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "tegra-se-sha512",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_se_sha_context),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_se_sha_cra_init,
+ .cra_exit = tegra_se_sha_cra_exit,
+ }
+ }
+};
+
+static int tegra_se_probe(struct platform_device *pdev)
+{
+ struct tegra_se_dev *se_dev = NULL;
+ struct resource *res = NULL;
+ int err = 0, i = 0, j = 0, k = 0;
+
+ se_dev = kzalloc(sizeof(struct tegra_se_dev), GFP_KERNEL);
+ if (!se_dev) {
+ dev_err(&pdev->dev, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&se_dev->lock);
+ crypto_init_queue(&se_dev->queue, TEGRA_SE_CRYPTO_QUEUE_LENGTH);
+ platform_set_drvdata(pdev, se_dev);
+ se_dev->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENXIO;
+ dev_err(se_dev->dev, "platform_get_resource failed\n");
+ goto fail;
+ }
+
+ se_dev->io_reg = ioremap(res->start, resource_size(res));
+ if (!se_dev->io_reg) {
+ err = -ENOMEM;
+ dev_err(se_dev->dev, "ioremap failed\n");
+ goto fail;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ err = -ENXIO;
+ dev_err(se_dev->dev, "platform_get_resource failed\n");
+ goto err_pmc;
+ }
+
+ se_dev->pmc_io_reg = ioremap(res->start, resource_size(res));
+ if (!se_dev->pmc_io_reg) {
+ err = -ENOMEM;
+ dev_err(se_dev->dev, "pmc ioremap failed\n");
+ goto err_pmc;
+ }
+
+ se_dev->irq = platform_get_irq(pdev, 0);
+ if (!se_dev->irq) {
+ err = -ENODEV;
+ dev_err(se_dev->dev, "platform_get_irq failed\n");
+ goto err_irq;
+ }
+
+ err = request_irq(se_dev->irq, tegra_se_irq, IRQF_DISABLED,
+ DRIVER_NAME, se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "request_irq failed - irq[%d] err[%d]\n",
+ se_dev->irq, err);
+ goto err_irq;
+ }
+
+ /* Initialize the clock */
+ se_dev->pclk = clk_get(se_dev->dev, "se");
+ if (IS_ERR(se_dev->pclk)) {
+ dev_err(se_dev->dev, "clock intialization failed (%d)\n",
+ (int)se_dev->pclk);
+ err = -ENODEV;
+ goto clean;
+ }
+
+ err = clk_set_rate(se_dev->pclk, ULONG_MAX);
+ if (err) {
+ dev_err(se_dev->dev, "clock set_rate failed.\n");
+ goto clean;
+ }
+
+ err = tegra_init_key_slot(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "init_key_slot failed\n");
+ goto clean;
+ }
+
+ init_completion(&se_dev->complete);
+ se_work_q = alloc_workqueue("se_work_q", WQ_HIGHPRI | WQ_UNBOUND, 16);
+ if (!se_work_q) {
+ dev_err(se_dev->dev, "alloc_workqueue failed\n");
+ goto clean;
+ }
+
+ sg_tegra_se_dev = se_dev;
+ pm_runtime_enable(se_dev->dev);
+ tegra_se_key_read_disable_all();
+
+ err = tegra_se_alloc_ll_buf(se_dev, SE_MAX_SRC_SG_COUNT,
+ SE_MAX_DST_SG_COUNT);
+ if (err) {
+ dev_err(se_dev->dev, "can not allocate ll dma buffer\n");
+ goto clean;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ INIT_LIST_HEAD(&aes_algs[i].cra_list);
+ err = crypto_register_alg(&aes_algs[i]);
+ if (err) {
+ dev_err(se_dev->dev,
+ "crypto_register_alg failed index[%d]\n", i);
+ goto clean;
+ }
+ }
+
+ for (j = 0; j < ARRAY_SIZE(hash_algs); j++) {
+ err = crypto_register_ahash(&hash_algs[j]);
+ if (err) {
+ dev_err(se_dev->dev,
+ "crypto_register_sha alg failed index[%d]\n", i);
+ goto clean;
+ }
+ }
+
+#if defined(CONFIG_PM)
+ se_dev->ctx_save_buf = dma_alloc_coherent(se_dev->dev,
+ SE_CONTEXT_BUFER_SIZE, &se_dev->ctx_save_buf_adr, GFP_KERNEL);
+ if (!se_dev->ctx_save_buf) {
+ dev_err(se_dev->dev, "Context save buffer alloc filed\n");
+ goto clean;
+ }
+#endif
+
+ dev_info(se_dev->dev, "%s: complete", __func__);
+ return 0;
+
+clean:
+ pm_runtime_disable(se_dev->dev);
+ for (k = 0; k < i; k++)
+ crypto_unregister_alg(&aes_algs[k]);
+
+ for (k = 0; k < j; k++)
+ crypto_unregister_ahash(&hash_algs[j]);
+
+ tegra_se_free_ll_buf(se_dev);
+
+ if (se_work_q)
+ destroy_workqueue(se_work_q);
+
+ if (se_dev->pclk)
+ clk_put(se_dev->pclk);
+
+ free_irq(se_dev->irq, &pdev->dev);
+
+err_irq:
+ iounmap(se_dev->pmc_io_reg);
+err_pmc:
+ iounmap(se_dev->io_reg);
+
+fail:
+ platform_set_drvdata(pdev, NULL);
+ kfree(se_dev);
+ sg_tegra_se_dev = NULL;
+
+ return err;
+}
+
+static int __devexit tegra_se_remove(struct platform_device *pdev)
+{
+ struct tegra_se_dev *se_dev = platform_get_drvdata(pdev);
+ int i;
+
+ if (!se_dev)
+ return -ENODEV;
+
+ pm_runtime_disable(se_dev->dev);
+
+ cancel_work_sync(&se_work);
+ if (se_work_q)
+ destroy_workqueue(se_work_q);
+ free_irq(se_dev->irq, &pdev->dev);
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+ crypto_unregister_alg(&aes_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
+ crypto_unregister_ahash(&hash_algs[i]);
+ if (se_dev->pclk)
+ clk_put(se_dev->pclk);
+ tegra_se_free_ll_buf(se_dev);
+ if (se_dev->ctx_save_buf) {
+ dma_free_coherent(se_dev->dev, SE_CONTEXT_BUFER_SIZE,
+ se_dev->ctx_save_buf, se_dev->ctx_save_buf_adr);
+ se_dev->ctx_save_buf = NULL;
+ }
+ iounmap(se_dev->io_reg);
+ iounmap(se_dev->pmc_io_reg);
+ kfree(se_dev);
+ sg_tegra_se_dev = NULL;
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+static int tegra_se_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int tegra_se_generate_rng_key(struct tegra_se_dev *se_dev)
+{
+ int ret = 0;
+ u32 val = 0;
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+
+ /* Configure algorithm */
+ val = SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_ENC_MODE(MODE_KEY128) |
+ SE_CONFIG_DST(DST_KEYTAB);
+ se_writel(se_dev, val, SE_CONFIG_REG_OFFSET);
+
+ /* Configure destination key index number */
+ val = SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(srk_slot.slot_num) |
+ SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(KEYS_0_3);
+ se_writel(se_dev, val, SE_CRYPTO_KEYTABLE_DST_REG_OFFSET);
+
+ /* Configure crypto */
+ val = SE_CRYPTO_INPUT_SEL(INPUT_LFSR) | SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
+ SE_CRYPTO_HASH(HASH_DISABLE) |
+ SE_CRYPTO_KEY_INDEX(ssk_slot.slot_num) |
+ SE_CRYPTO_IV_SEL(IV_ORIGINAL);
+ se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET);
+
+ ret = tegra_se_start_operation(se_dev, TEGRA_SE_KEY_128_SIZE, false);
+
+ return ret;
+}
+
+static int tegra_se_generate_srk(struct tegra_se_dev *se_dev)
+{
+ int ret = 0;
+ u32 val = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ ret = tegra_se_generate_rng_key(se_dev);
+ if (ret) {
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+ return ret;
+ }
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+
+ val = SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_ENC_MODE(MODE_KEY128) |
+ SE_CONFIG_DEC_ALG(ALG_NOP) | SE_CONFIG_DST(DST_SRK);
+
+ se_writel(se_dev, val, SE_CONFIG_REG_OFFSET);
+
+ val = SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
+ SE_CRYPTO_HASH(HASH_DISABLE) |
+ SE_CRYPTO_KEY_INDEX(srk_slot.slot_num) |
+ SE_CRYPTO_IV_SEL(IV_UPDATED);
+
+ se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev, TEGRA_SE_KEY_128_SIZE, false);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_lp_generate_random_data(struct tegra_se_dev *se_dev)
+{
+ struct tegra_se_ll *src_ll, *dst_ll;
+ int ret = 0;
+ u32 val;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ src_ll->addr = se_dev->ctx_save_buf_adr;
+ src_ll->data_len = SE_CONTEXT_SAVE_RANDOM_DATA_SIZE;
+ dst_ll->addr = se_dev->ctx_save_buf_adr;
+ dst_ll->data_len = SE_CONTEXT_SAVE_RANDOM_DATA_SIZE;
+
+ tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_X931, true,
+ TEGRA_SE_KEY_128_SIZE);
+
+ /* Configure crypto */
+ val = SE_CRYPTO_INPUT_SEL(INPUT_LFSR) | SE_CRYPTO_XOR_POS(XOR_BYPASS) |
+ SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
+ SE_CRYPTO_HASH(HASH_DISABLE) |
+ SE_CRYPTO_KEY_INDEX(srk_slot.slot_num) |
+ SE_CRYPTO_IV_SEL(IV_ORIGINAL);
+
+ se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev,
+ SE_CONTEXT_SAVE_RANDOM_DATA_SIZE, false);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+
+}
+
+static int tegra_se_lp_encrypt_context_data(struct tegra_se_dev *se_dev,
+ u32 context_offset, u32 data_size)
+{
+ struct tegra_se_ll *src_ll, *dst_ll;
+ int ret = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1);
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ src_ll->addr = se_dev->ctx_save_buf_adr + context_offset;
+ src_ll->data_len = data_size;
+ dst_ll->addr = se_dev->ctx_save_buf_adr + context_offset;
+ dst_ll->data_len = data_size;
+
+ se_writel(se_dev, SE_CONTEXT_SAVE_SRC(MEM),
+ SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+
+ ret = tegra_se_start_operation(se_dev, data_size, true);
+
+ pm_runtime_put(se_dev->dev);
+
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_lp_sticky_bits_context_save(struct tegra_se_dev *se_dev)
+{
+ struct tegra_se_ll *dst_ll;
+ int ret = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->src_ll_buf = 0;
+ *se_dev->dst_ll_buf = 0;
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ dst_ll->addr = (se_dev->ctx_save_buf_adr +
+ SE_CONTEXT_SAVE_STICKY_BITS_OFFSET);
+ dst_ll->data_len = SE_CONTEXT_SAVE_STICKY_BITS_SIZE;
+
+ se_writel(se_dev, SE_CONTEXT_SAVE_SRC(STICKY_BITS),
+ SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+
+ ret = tegra_se_start_operation(se_dev,
+ SE_CONTEXT_SAVE_STICKY_BITS_SIZE, true);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_lp_keytable_context_save(struct tegra_se_dev *se_dev)
+{
+ struct tegra_se_ll *dst_ll;
+ int ret = 0, i, j;
+ u32 val = 0;
+
+ /* take access to the hw */
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->dst_ll_buf = 0;
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ dst_ll->addr = (se_dev->ctx_save_buf_adr + SE_CONTEXT_SAVE_KEYS_OFFSET);
+ dst_ll->data_len = TEGRA_SE_KEY_128_SIZE;
+
+ for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) {
+ for (j = 0; j < 2; j++) {
+ val = SE_CONTEXT_SAVE_SRC(KEYTABLE) |
+ SE_CONTEXT_SAVE_KEY_INDEX(i) |
+ SE_CONTEXT_SAVE_WORD_QUAD(j);
+ se_writel(se_dev,
+ val, SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev,
+ TEGRA_SE_KEY_128_SIZE, true);
+ if (ret)
+ break;
+ dst_ll->addr += TEGRA_SE_KEY_128_SIZE;
+ }
+ }
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_lp_iv_context_save(struct tegra_se_dev *se_dev,
+ bool org_iv, u32 context_offset)
+{
+ struct tegra_se_ll *dst_ll;
+ int ret = 0, i;
+ u32 val = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ *se_dev->dst_ll_buf = 0;
+ dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1);
+ dst_ll->addr = (se_dev->ctx_save_buf_adr + context_offset);
+ dst_ll->data_len = TEGRA_SE_AES_IV_SIZE;
+
+ for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) {
+ val = SE_CONTEXT_SAVE_SRC(KEYTABLE) |
+ SE_CONTEXT_SAVE_KEY_INDEX(i) |
+ (org_iv ? SE_CONTEXT_SAVE_WORD_QUAD(ORIG_IV) :
+ SE_CONTEXT_SAVE_WORD_QUAD(UPD_IV));
+ se_writel(se_dev, val, SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev,
+ TEGRA_SE_AES_IV_SIZE, true);
+ if (ret)
+ break;
+ dst_ll->addr += TEGRA_SE_AES_IV_SIZE;
+ }
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_save_SRK(struct tegra_se_dev *se_dev)
+{
+ int ret = 0;
+
+ mutex_lock(&se_hw_lock);
+ pm_runtime_get_sync(se_dev->dev);
+
+ se_writel(se_dev, SE_CONTEXT_SAVE_SRC(SRK),
+ SE_CONTEXT_SAVE_CONFIG_REG_OFFSET);
+ ret = tegra_se_start_operation(se_dev, 0, true);
+
+ pm_runtime_put(se_dev->dev);
+ mutex_unlock(&se_hw_lock);
+
+ return ret;
+}
+
+static int tegra_se_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_se_dev *se_dev = platform_get_drvdata(pdev);
+ int err = 0, i;
+ unsigned char *dt_buf = NULL;
+ u8 pdata[SE_CONTEXT_KNOWN_PATTERN_SIZE] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f};
+
+ if (!se_dev)
+ return -ENODEV;
+
+ /* Generate SRK */
+ err = tegra_se_generate_srk(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP SRK genration failed\n");
+ goto out;
+ }
+
+ /* Generate random data*/
+ err = tegra_se_lp_generate_random_data(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP random pattern generation failed\n");
+ goto out;
+ }
+
+ /* Encrypt random data */
+ err = tegra_se_lp_encrypt_context_data(se_dev,
+ SE_CONTEXT_SAVE_RANDOM_DATA_OFFSET,
+ SE_CONTEXT_SAVE_RANDOM_DATA_SIZE);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP random pattern encryption failed\n");
+ goto out;
+ }
+
+ /* Sticky bits context save*/
+ err = tegra_se_lp_sticky_bits_context_save(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP sticky bits context save failure\n");
+ goto out;
+ }
+
+ /* Key table context save*/
+ err = tegra_se_lp_keytable_context_save(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP key table save failure\n");
+ goto out;
+ }
+
+ /* Original iv context save*/
+ err = tegra_se_lp_iv_context_save(se_dev,
+ true, SE_CONTEXT_ORIGINAL_IV_OFFSET);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP original iv save failure\n");
+ goto out;
+ }
+
+ /* UPdated iv context save*/
+ err = tegra_se_lp_iv_context_save(se_dev,
+ false, SE_CONTEXT_UPDATED_IV_OFFSET);
+ if (err) {
+ dev_err(se_dev->dev, "\n LP updated iv save failure\n");
+ goto out;
+ }
+
+ /* Encrypt known pattern */
+ dt_buf = (unsigned char *)se_dev->ctx_save_buf;
+ dt_buf += SE_CONTEXT_KNOWN_PATTERN_OFFSET;
+ for (i = 0; i < SE_CONTEXT_KNOWN_PATTERN_SIZE; i++)
+ dt_buf[i] = pdata[i];
+ err = tegra_se_lp_encrypt_context_data(se_dev,
+ SE_CONTEXT_KNOWN_PATTERN_OFFSET, SE_CONTEXT_KNOWN_PATTERN_SIZE);
+ if (err) {
+ dev_err(se_dev->dev, "LP known pattern save failure\n");
+ goto out;
+ }
+
+ /* Write lp context buffer address into PMC scratch register */
+ writel(se_dev->ctx_save_buf_adr,
+ se_dev->pmc_io_reg + PMC_SCRATCH43_REG_OFFSET);
+
+ /* Saves SRK in secure scratch */
+ err = tegra_se_save_SRK(se_dev);
+ if (err) {
+ dev_err(se_dev->dev, "LP SRK save failure\n");
+ goto out;
+ }
+
+out:
+ return err;
+}
+#endif
+
+#if defined(CONFIG_PM_RUNTIME)
+static int tegra_se_runtime_suspend(struct device *dev)
+{
+ /*
+ * do a dummy read, to avoid scenarios where you have unposted writes
+ * still on the bus, before disabling clocks
+ */
+ se_readl(sg_tegra_se_dev, SE_CONFIG_REG_OFFSET);
+
+ clk_disable(sg_tegra_se_dev->pclk);
+ return 0;
+}
+
+static int tegra_se_runtime_resume(struct device *dev)
+{
+ clk_enable(sg_tegra_se_dev->pclk);
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_se_dev_pm_ops = {
+ .runtime_suspend = tegra_se_runtime_suspend,
+ .runtime_resume = tegra_se_runtime_resume,
+#if defined(CONFIG_PM)
+ .suspend = tegra_se_suspend,
+ .resume = tegra_se_resume,
+#endif
+};
+#endif
+
+static struct platform_driver tegra_se_driver = {
+ .probe = tegra_se_probe,
+ .remove = __devexit_p(tegra_se_remove),
+ .driver = {
+ .name = "tegra-se",
+ .owner = THIS_MODULE,
+#if defined(CONFIG_PM_RUNTIME)
+ .pm = &tegra_se_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init tegra_se_module_init(void)
+{
+ return platform_driver_register(&tegra_se_driver);
+}
+
+static void __exit tegra_se_module_exit(void)
+{
+ platform_driver_unregister(&tegra_se_driver);
+}
+
+module_init(tegra_se_module_init);
+module_exit(tegra_se_module_exit);
+
+MODULE_DESCRIPTION("Tegra Crypto algorithm support");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("tegra-se");
+
diff --git a/drivers/crypto/tegra-se.h b/drivers/crypto/tegra-se.h
new file mode 100644
index 000000000000..8c54df8991e6
--- /dev/null
+++ b/drivers/crypto/tegra-se.h
@@ -0,0 +1,235 @@
+/*
+ * Driver for Tegra Security Engine
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _CRYPTO_TEGRA_SE_H
+#define _CRYPTO_TEGRA_SE_H
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+
+#define PFX "tegra-se: "
+
+#define TEGRA_SE_CRA_PRIORITY 300
+#define TEGRA_SE_COMPOSITE_PRIORITY 400
+#define TEGRA_SE_CRYPTO_QUEUE_LENGTH 50
+#define SE_MAX_SRC_SG_COUNT 50
+#define SE_MAX_DST_SG_COUNT 50
+
+#define TEGRA_SE_KEYSLOT_COUNT 16
+
+/* SE register definitions */
+#define SE_CONFIG_REG_OFFSET 0x014
+#define SE_CONFIG_ENC_ALG_SHIFT 12
+#define SE_CONFIG_DEC_ALG_SHIFT 8
+#define ALG_AES_ENC 1
+#define ALG_RNG 2
+#define ALG_SHA 3
+#define ALG_NOP 0
+#define ALG_AES_DEC 1
+#define SE_CONFIG_ENC_ALG(x) (x << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_DEC_ALG(x) (x << SE_CONFIG_DEC_ALG_SHIFT)
+#define SE_CONFIG_DST_SHIFT 2
+#define DST_MEMORY 0
+#define DST_HASHREG 1
+#define DST_KEYTAB 2
+#define DST_SRK 3
+#define SE_CONFIG_DST(x) (x << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_ENC_MODE_SHIFT 24
+#define SE_CONFIG_DEC_MODE_SHIFT 16
+#define MODE_KEY128 0
+#define MODE_KEY192 1
+#define MODE_KEY256 2
+#define MODE_SHA1 0
+#define MODE_SHA224 4
+#define MODE_SHA256 5
+#define MODE_SHA384 6
+#define MODE_SHA512 7
+#define SE_CONFIG_ENC_MODE(x) (x << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE(x) (x << SE_CONFIG_DEC_MODE_SHIFT)
+
+#define SE_KEYTABLE_REG_OFFSET 0x31c
+#define SE_KEYTABLE_SLOT_SHIFT 4
+#define SE_KEYTABLE_SLOT(x) (x << SE_KEYTABLE_SLOT_SHIFT)
+#define SE_KEYTABLE_QUAD_SHIFT 2
+#define QUAD_KEYS_128 0
+#define QUAD_KEYS_192 1
+#define QUAD_KEYS_256 1
+#define QUAD_ORG_IV 2
+#define QUAD_UPDTD_IV 3
+#define SE_KEYTABLE_QUAD(x) (x << SE_KEYTABLE_QUAD_SHIFT)
+#define SE_KEYTABLE_OP_TYPE_SHIFT 9
+#define OP_READ 0
+#define OP_WRITE 1
+#define SE_KEYTABLE_OP_TYPE(x) (x << SE_KEYTABLE_OP_TYPE_SHIFT)
+#define SE_KEYTABLE_TABLE_SEL_SHIFT 8
+#define TABLE_KEYIV 0
+#define TABLE_SCHEDULE 1
+#define SE_KEYTABLE_TABLE_SEL(x) (x << SE_KEYTABLE_TABLE_SEL_SHIFT)
+#define SE_KEYTABLE_PKT_SHIFT 0
+#define SE_KEYTABLE_PKT(x) (x << SE_KEYTABLE_PKT_SHIFT)
+
+#define SE_CRYPTO_REG_OFFSET 0x304
+#define SE_CRYPTO_HASH_SHIFT 0
+#define HASH_DISABLE 0
+#define HASH_ENABLE 1
+#define SE_CRYPTO_HASH(x) (x << SE_CRYPTO_HASH_SHIFT)
+#define SE_CRYPTO_XOR_POS_SHIFT 1
+#define XOR_BYPASS 0
+#define XOR_TOP 2
+#define XOR_BOTTOM 3
+#define SE_CRYPTO_XOR_POS(x) (x << SE_CRYPTO_XOR_POS_SHIFT)
+#define SE_CRYPTO_INPUT_SEL_SHIFT 3
+#define INPUT_AHB 0
+#define INPUT_LFSR 1
+#define INPUT_AESOUT 2
+#define INPUT_LNR_CTR 3
+#define SE_CRYPTO_INPUT_SEL(x) (x << SE_CRYPTO_INPUT_SEL_SHIFT)
+#define SE_CRYPTO_VCTRAM_SEL_SHIFT 5
+#define VCTRAM_AHB 0
+#define VCTRAM_AESOUT 2
+#define VCTRAM_PREVAHB 3
+#define SE_CRYPTO_VCTRAM_SEL(x) (x << SE_CRYPTO_VCTRAM_SEL_SHIFT)
+#define SE_CRYPTO_IV_SEL_SHIFT 7
+#define IV_ORIGINAL 0
+#define IV_UPDATED 1
+#define SE_CRYPTO_IV_SEL(x) (x << SE_CRYPTO_IV_SEL_SHIFT)
+#define SE_CRYPTO_CORE_SEL_SHIFT 8
+#define CORE_DECRYPT 0
+#define CORE_ENCRYPT 1
+#define SE_CRYPTO_CORE_SEL(x) (x << SE_CRYPTO_CORE_SEL_SHIFT)
+#define SE_CRYPTO_CTR_VAL_SHIFT 11
+#define SE_CRYPTO_CTR_VAL(x) (x << SE_CRYPTO_CTR_VAL_SHIFT)
+#define SE_CRYPTO_KEY_INDEX_SHIFT 24
+#define SE_CRYPTO_KEY_INDEX(x) (x << SE_CRYPTO_KEY_INDEX_SHIFT)
+#define SE_CRYPTO_CTR_CNTN_SHIFT 11
+#define SE_CRYPTO_CTR_CNTN(x) (x << SE_CRYPTO_CTR_CNTN_SHIFT)
+
+#define SE_CRYPTO_CTR_REG_COUNT 4
+#define SE_CRYPTO_CTR_REG_OFFSET 0x308
+
+#define SE_OPERATION_REG_OFFSET 0x008
+#define SE_OPERATION_SHIFT 0
+#define OP_ABORT 0
+#define OP_SRART 1
+#define OP_RESTART 2
+#define OP_CTX_SAVE 3
+#define SE_OPERATION(x) (x << SE_OPERATION_SHIFT)
+
+#define SE_CONTEXT_SAVE_CONFIG_REG_OFFSET 0x070
+#define SE_CONTEXT_SAVE_WORD_QUAD_SHIFT 0
+#define KEYS_0_3 0
+#define KEYS_4_7 1
+#define ORIG_IV 2
+#define UPD_IV 3
+#define SE_CONTEXT_SAVE_WORD_QUAD(x) (x << SE_CONTEXT_SAVE_WORD_QUAD_SHIFT)
+
+#define SE_CONTEXT_SAVE_KEY_INDEX_SHIFT 8
+#define SE_CONTEXT_SAVE_KEY_INDEX(x) (x << SE_CONTEXT_SAVE_KEY_INDEX_SHIFT)
+
+
+#define SE_CONTEXT_SAVE_SRC_SHIFT 30
+#define STICKY_BITS 0
+#define KEYTABLE 1
+#define MEM 2
+#define SRK 3
+#define SE_CONTEXT_SAVE_SRC(x) (x << SE_CONTEXT_SAVE_SRC_SHIFT)
+
+#define SE_INT_ENABLE_REG_OFFSET 0x00c
+#define SE_INT_STATUS_REG_OFFSET 0x010
+#define INT_DISABLE 0
+#define INT_ENABLE 1
+#define INT_UNSET 0
+#define INT_SET 1
+#define SE_INT_OP_DONE_SHIFT 4
+#define SE_INT_OP_DONE(x) (x << SE_INT_OP_DONE_SHIFT)
+#define SE_INT_ERROR_SHIFT 16
+#define SE_INT_ERROR(x) (x << SE_INT_ERROR_SHIFT)
+
+#define SE_CRYPTO_KEYTABLE_DST_REG_OFFSET 0X330
+#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT 0
+#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(x) \
+ (x << SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT)
+
+#define SE_KEY_INDEX_SHIFT 8
+#define SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(x) (x << SE_KEY_INDEX_SHIFT)
+
+#define SE_IN_LL_ADDR_REG_OFFSET 0x018
+#define SE_OUT_LL_ADDR_REG_OFFSET 0x024
+
+#define SE_KEYTABLE_DATA0_REG_OFFSET 0x320
+#define SE_KEYTABLE_REG_MAX_DATA 16
+
+#define SE_BLOCK_COUNT_REG_OFFSET 0x318
+
+#define SE_SPARE_0_REG_OFFSET 0x80c
+
+#define SE_SHA_CONFIG_REG_OFFSET 0x200
+#define SHA_DISABLE 0
+#define SHA_ENABLE 1
+
+#define SE_SHA_MSG_LENGTH_REG_OFFSET 0x204
+#define SE_SHA_MSG_LEFT_REG_OFFSET 0x214
+
+
+#define SE_HASH_RESULT_REG_COUNT 16
+#define SE_HASH_RESULT_REG_OFFSET 0x030
+
+
+#define TEGRA_SE_KEY_256_SIZE 32
+#define TEGRA_SE_KEY_192_SIZE 24
+#define TEGRA_SE_KEY_128_SIZE 16
+#define TEGRA_SE_AES_BLOCK_SIZE 16
+#define TEGRA_SE_AES_MIN_KEY_SIZE 16
+#define TEGRA_SE_AES_MAX_KEY_SIZE 32
+#define TEGRA_SE_AES_IV_SIZE 16
+#define TEGRA_SE_RNG_IV_SIZE 16
+#define TEGRA_SE_RNG_DT_SIZE 16
+#define TEGRA_SE_RNG_KEY_SIZE 16
+#define TEGRA_SE_RNG_SEED_SIZE (TEGRA_SE_RNG_IV_SIZE + \
+ TEGRA_SE_RNG_KEY_SIZE + \
+ TEGRA_SE_RNG_DT_SIZE)
+#define TEGRA_SE_AES_CMAC_DIGEST_SIZE 16
+
+#define SE_KEY_TABLE_ACCESS_REG_OFFSET 0x284
+#define SE_KEY_READ_DISABLE_SHIFT 0
+
+#define SE_CONTEXT_BUFER_SIZE 1072
+#define SE_CONTEXT_SAVE_RANDOM_DATA_OFFSET 0
+#define SE_CONTEXT_SAVE_RANDOM_DATA_SIZE 16
+#define SE_CONTEXT_SAVE_STICKY_BITS_OFFSET \
+ (SE_CONTEXT_SAVE_RANDOM_DATA_OFFSET + SE_CONTEXT_SAVE_RANDOM_DATA_SIZE)
+#define SE_CONTEXT_SAVE_STICKY_BITS_SIZE 16
+#define SE_CONTEXT_SAVE_KEYS_OFFSET (SE_CONTEXT_SAVE_STICKY_BITS_OFFSET + \
+ SE_CONTEXT_SAVE_STICKY_BITS_SIZE)
+#define SE_CONTEXT_SAVE_KEY_LENGTH 512
+#define SE_CONTEXT_ORIGINAL_IV_OFFSET (SE_CONTEXT_SAVE_KEYS_OFFSET + \
+ SE_CONTEXT_SAVE_KEY_LENGTH)
+#define SE_CONTEXT_ORIGINAL_IV_LENGTH 256
+
+#define SE_CONTEXT_UPDATED_IV_OFFSET (SE_CONTEXT_ORIGINAL_IV_OFFSET + \
+ SE_CONTEXT_ORIGINAL_IV_LENGTH)
+
+#define SE_CONTEXT_UPDATED_IV_LENGTH 256
+#define SE_CONTEXT_KNOWN_PATTERN_OFFSET (SE_CONTEXT_UPDATED_IV_OFFSET + \
+ SE_CONTEXT_UPDATED_IV_LENGTH)
+#define SE_CONTEXT_KNOWN_PATTERN_SIZE 16
+
+
+#endif /* _CRYPTO_TEGRA_SE_H */
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 747eb40e8afe..c1fba8d97f47 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -6,6 +6,8 @@
* Author:
* Erik Gilling <konkers@google.com>
*
+ * Copyright (c) 2011 NVIDIA Corporation.
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -20,24 +22,24 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/of.h>
+#include <linux/syscore_ops.h>
#include <asm/mach/irq.h>
#include <mach/iomap.h>
-#include <mach/suspend.h>
+#include <mach/pinmux.h>
+
+#include "../../../arch/arm/mach-tegra/pm-irq.h"
#define GPIO_BANK(x) ((x) >> 5)
#define GPIO_PORT(x) (((x) >> 3) & 0x3)
#define GPIO_BIT(x) ((x) & 0x7)
-#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \
- GPIO_BANK(x) * 0x80 + \
- GPIO_PORT(x) * 4)
-
#define GPIO_CNF(x) (GPIO_REG(x) + 0x00)
#define GPIO_OE(x) (GPIO_REG(x) + 0x10)
#define GPIO_OUT(x) (GPIO_REG(x) + 0X20)
@@ -47,12 +49,29 @@
#define GPIO_INT_LVL(x) (GPIO_REG(x) + 0x60)
#define GPIO_INT_CLR(x) (GPIO_REG(x) + 0x70)
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \
+ GPIO_BANK(x) * 0x80 + \
+ GPIO_PORT(x) * 4)
+
#define GPIO_MSK_CNF(x) (GPIO_REG(x) + 0x800)
#define GPIO_MSK_OE(x) (GPIO_REG(x) + 0x810)
#define GPIO_MSK_OUT(x) (GPIO_REG(x) + 0X820)
#define GPIO_MSK_INT_STA(x) (GPIO_REG(x) + 0x840)
#define GPIO_MSK_INT_ENB(x) (GPIO_REG(x) + 0x850)
#define GPIO_MSK_INT_LVL(x) (GPIO_REG(x) + 0x860)
+#else
+#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \
+ GPIO_BANK(x) * 0x100 + \
+ GPIO_PORT(x) * 4)
+
+#define GPIO_MSK_CNF(x) (GPIO_REG(x) + 0x80)
+#define GPIO_MSK_OE(x) (GPIO_REG(x) + 0x90)
+#define GPIO_MSK_OUT(x) (GPIO_REG(x) + 0XA0)
+#define GPIO_MSK_INT_STA(x) (GPIO_REG(x) + 0xC0)
+#define GPIO_MSK_INT_ENB(x) (GPIO_REG(x) + 0xD0)
+#define GPIO_MSK_INT_LVL(x) (GPIO_REG(x) + 0xE0)
+#endif
#define GPIO_INT_LVL_MASK 0x010101
#define GPIO_INT_LVL_EDGE_RISING 0x000101
@@ -65,7 +84,7 @@ struct tegra_gpio_bank {
int bank;
int irq;
spinlock_t lvl_lock[4];
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
u32 cnf[4];
u32 out[4];
u32 oe[4];
@@ -74,7 +93,6 @@ struct tegra_gpio_bank {
#endif
};
-
static struct tegra_gpio_bank tegra_gpio_banks[] = {
{.bank = 0, .irq = INT_GPIO1},
{.bank = 1, .irq = INT_GPIO2},
@@ -83,6 +101,9 @@ static struct tegra_gpio_bank tegra_gpio_banks[] = {
{.bank = 4, .irq = INT_GPIO5},
{.bank = 5, .irq = INT_GPIO6},
{.bank = 6, .irq = INT_GPIO7},
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ {.bank = 7, .irq = INT_GPIO8},
+#endif
};
static int tegra_gpio_compose(int bank, int port, int bit)
@@ -90,6 +111,12 @@ static int tegra_gpio_compose(int bank, int port, int bit)
return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7);
}
+void tegra_gpio_set_tristate(int gpio_nr, enum tegra_tristate ts)
+{
+ int pin_group = tegra_pinmux_get_pingroup(gpio_nr);
+ tegra_pinmux_set_tristate(pin_group, ts);
+}
+
static void tegra_gpio_mask_write(u32 reg, int gpio, int value)
{
u32 val;
@@ -100,15 +127,53 @@ static void tegra_gpio_mask_write(u32 reg, int gpio, int value)
__raw_writel(val, reg);
}
+int tegra_gpio_get_bank_int_nr(int gpio)
+{
+ int bank;
+ int irq;
+ if (gpio >= TEGRA_NR_GPIOS) {
+ pr_warn("%s : Invalid gpio ID - %d\n", __func__, gpio);
+ return -EINVAL;
+ }
+ bank = gpio >> 5;
+ irq = tegra_gpio_banks[bank].irq;
+ return irq;
+}
+
void tegra_gpio_enable(int gpio)
{
+ if (gpio >= TEGRA_NR_GPIOS) {
+ pr_warn("%s : Invalid gpio ID - %d\n", __func__, gpio);
+ return;
+ }
tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1);
}
+EXPORT_SYMBOL_GPL(tegra_gpio_enable);
void tegra_gpio_disable(int gpio)
{
+ if (gpio >= TEGRA_NR_GPIOS) {
+ pr_warn("%s : Invalid gpio ID - %d\n", __func__, gpio);
+ return;
+ }
tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0);
}
+EXPORT_SYMBOL_GPL(tegra_gpio_disable);
+
+void tegra_gpio_init_configure(unsigned gpio, bool is_input, int value)
+{
+ if (gpio >= TEGRA_NR_GPIOS) {
+ pr_warn("%s : Invalid gpio ID - %d\n", __func__, gpio);
+ return;
+ }
+ if (is_input) {
+ tegra_gpio_mask_write(GPIO_MSK_OE(gpio), gpio, 0);
+ } else {
+ tegra_gpio_mask_write(GPIO_MSK_OUT(gpio), gpio, value);
+ tegra_gpio_mask_write(GPIO_MSK_OE(gpio), gpio, 1);
+ }
+ tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1);
+}
static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
@@ -117,6 +182,9 @@ static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
{
+ if ((__raw_readl(GPIO_OE(offset)) >> GPIO_BIT(offset)) & 0x1)
+ return (__raw_readl(GPIO_OUT(offset)) >>
+ GPIO_BIT(offset)) & 0x1;
return (__raw_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
}
@@ -151,6 +219,15 @@ static void tegra_gpio_irq_ack(struct irq_data *d)
int gpio = d->irq - INT_GPIO_BASE;
__raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
+
+#ifdef CONFIG_TEGRA_FPGA_PLATFORM
+ /* FPGA platforms have a serializer between the GPIO
+ block and interrupt controller. Allow time for
+ clearing of the GPIO interrupt to propagate to the
+ interrupt controller before re-enabling the IRQ
+ to prevent double interrupts. */
+ udelay(15);
+#endif
}
static void tegra_gpio_irq_mask(struct irq_data *d)
@@ -215,6 +292,8 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
__irq_set_handler_locked(d->irq, handle_edge_irq);
+ tegra_pm_irq_set_wake_type(d->irq, type);
+
return 0;
}
@@ -223,7 +302,6 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
struct tegra_gpio_bank *bank;
int port;
int pin;
- int unmasked = 0;
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
@@ -234,31 +312,17 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
int gpio = tegra_gpio_compose(bank->bank, port, 0);
unsigned long sta = __raw_readl(GPIO_INT_STA(gpio)) &
__raw_readl(GPIO_INT_ENB(gpio));
- u32 lvl = __raw_readl(GPIO_INT_LVL(gpio));
-
- for_each_set_bit(pin, &sta, 8) {
- __raw_writel(1 << pin, GPIO_INT_CLR(gpio));
-
- /* if gpio is edge triggered, clear condition
- * before executing the hander so that we don't
- * miss edges
- */
- if (lvl & (0x100 << pin)) {
- unmasked = 1;
- chained_irq_exit(chip, desc);
- }
+ for_each_set_bit(pin, &sta, 8)
generic_handle_irq(gpio_to_irq(gpio + pin));
- }
}
- if (!unmasked)
- chained_irq_exit(chip, desc);
+ chained_irq_exit(chip, desc);
}
-#ifdef CONFIG_PM
-void tegra_gpio_resume(void)
+#ifdef CONFIG_PM_SLEEP
+static void tegra_gpio_resume(void)
{
unsigned long flags;
int b;
@@ -282,7 +346,7 @@ void tegra_gpio_resume(void)
local_irq_restore(flags);
}
-void tegra_gpio_suspend(void)
+static int tegra_gpio_suspend(void)
{
unsigned long flags;
int b;
@@ -302,24 +366,53 @@ void tegra_gpio_suspend(void)
}
}
local_irq_restore(flags);
+
+ return 0;
}
-static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
+static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- return irq_set_irq_wake(bank->irq, enable);
+ int ret = 0;
+
+ ret = tegra_pm_irq_set_wake(d->irq, enable);
+
+ if (ret)
+ return ret;
+
+ ret = irq_set_irq_wake(bank->irq, enable);
+
+ if (ret)
+ tegra_pm_irq_set_wake(d->irq, !enable);
+
+ return ret;
}
+#else
+#define tegra_gpio_irq_set_wake NULL
+#define tegra_gpio_suspend NULL
+#define tegra_gpio_resume NULL
#endif
+static struct syscore_ops tegra_gpio_syscore_ops = {
+ .suspend = tegra_gpio_suspend,
+ .resume = tegra_gpio_resume,
+};
+
+int tegra_gpio_resume_init(void)
+{
+ register_syscore_ops(&tegra_gpio_syscore_ops);
+
+ return 0;
+}
+
static struct irq_chip tegra_gpio_irq_chip = {
.name = "GPIO",
.irq_ack = tegra_gpio_irq_ack,
.irq_mask = tegra_gpio_irq_mask,
.irq_unmask = tegra_gpio_irq_unmask,
.irq_set_type = tegra_gpio_irq_set_type,
-#ifdef CONFIG_PM
- .irq_set_wake = tegra_gpio_wake_enable,
-#endif
+ .irq_set_wake = tegra_gpio_irq_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
@@ -334,10 +427,11 @@ static int __init tegra_gpio_init(void)
int i;
int j;
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
__raw_writel(0x00, GPIO_INT_ENB(gpio));
+ __raw_writel(0x00, GPIO_INT_STA(gpio));
}
}
@@ -365,11 +459,12 @@ static int __init tegra_gpio_init(void)
for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
bank = &tegra_gpio_banks[i];
- irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler);
- irq_set_handler_data(bank->irq, bank);
-
for (j = 0; j < 4; j++)
spin_lock_init(&bank->lvl_lock[j]);
+
+ irq_set_handler_data(bank->irq, bank);
+ irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler);
+
}
return 0;
@@ -401,7 +496,8 @@ static int dbg_gpio_show(struct seq_file *s, void *unused)
int i;
int j;
- for (i = 0; i < 7; i++) {
+ seq_printf(s, "Bank:Port CNF OE OUT IN INT_STA INT_ENB INT_LVL\n");
+ for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
seq_printf(s,
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd0..ca2d3b34dbf5 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/ vga/ stub/
+obj-y += drm/ vga/ stub/ ion/
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
new file mode 100644
index 000000000000..5b48b4e85e73
--- /dev/null
+++ b/drivers/gpu/ion/Kconfig
@@ -0,0 +1,12 @@
+menuconfig ION
+ tristate "Ion Memory Manager"
+ select GENERIC_ALLOCATOR
+ help
+ Chose this option to enable the ION Memory Manager.
+
+config ION_TEGRA
+ tristate "Ion for Tegra"
+ depends on ARCH_TEGRA && ION
+ help
+ Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
new file mode 100644
index 000000000000..73fe3fa10706
--- /dev/null
+++ b/drivers/gpu/ion/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
new file mode 100644
index 000000000000..b32b1427e3e3
--- /dev/null
+++ b/drivers/gpu/ion/ion.c
@@ -0,0 +1,1132 @@
+/*
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/ion.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "ion_priv.h"
+#define DEBUG
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+ struct ion_buffer *buffer)
+{
+ struct rb_node **p = &dev->buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_buffer *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_buffer, node);
+
+ if (buffer < entry) {
+ p = &(*p)->rb_left;
+ } else if (buffer > entry) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer already found.", __func__);
+ BUG();
+ }
+ }
+
+ rb_link_node(&buffer->node, parent, p);
+ rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->heap = heap;
+ kref_init(&buffer->ref);
+
+ ret = heap->ops->allocate(heap, buffer, len, align, flags);
+ if (ret) {
+ kfree(buffer);
+ return ERR_PTR(ret);
+ }
+ buffer->dev = dev;
+ buffer->size = len;
+ mutex_init(&buffer->lock);
+ ion_buffer_add(dev, buffer);
+ return buffer;
+}
+
+static void ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_device *dev = buffer->dev;
+
+ buffer->heap->ops->free(buffer);
+ mutex_lock(&dev->lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->lock);
+ kfree(buffer);
+}
+
+void ion_buffer_get(struct ion_buffer *buffer)
+{
+ kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+ return kref_put(&buffer->ref, ion_buffer_destroy);
+}
+
+struct ion_handle *ion_handle_create(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct ion_handle *handle;
+
+ handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&handle->ref);
+ rb_init_node(&handle->node);
+ handle->client = client;
+ ion_buffer_get(buffer);
+ handle->buffer = buffer;
+
+ return handle;
+}
+
+static void ion_handle_destroy(struct kref *kref)
+{
+ struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+ /* XXX Can a handle be destroyed while it's map count is non-zero?:
+ if (handle->map_cnt) unmap
+ */
+ ion_buffer_put(handle->buffer);
+ mutex_lock(&handle->client->lock);
+ if (!RB_EMPTY_NODE(&handle->node))
+ rb_erase(&handle->node, &handle->client->handles);
+ mutex_unlock(&handle->client->lock);
+ kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
+
+void ion_handle_get(struct ion_handle *handle)
+{
+ kref_get(&handle->ref);
+}
+
+int ion_handle_put(struct ion_handle *handle)
+{
+ return kref_put(&handle->ref, ion_handle_destroy);
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct rb_node *n;
+
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ if (handle->buffer == buffer)
+ return handle;
+ }
+ return NULL;
+}
+
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+{
+ struct rb_node *n = client->handles.rb_node;
+
+ while (n) {
+ struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
+ node);
+ if (handle < handle_node)
+ n = n->rb_left;
+ else if (handle > handle_node)
+ n = n->rb_right;
+ else
+ return true;
+ }
+ return false;
+}
+
+void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+ struct rb_node **p = &client->handles.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_handle *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_handle, node);
+
+ if (handle < entry)
+ p = &(*p)->rb_left;
+ else if (handle > entry)
+ p = &(*p)->rb_right;
+ else
+ WARN(1, "%s: buffer already found.", __func__);
+ }
+
+ rb_link_node(&handle->node, parent, p);
+ rb_insert_color(&handle->node, &client->handles);
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int flags)
+{
+ struct rb_node *n;
+ struct ion_handle *handle;
+ struct ion_device *dev = client->dev;
+ struct ion_buffer *buffer = NULL;
+
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ mutex_lock(&dev->lock);
+ for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
+ struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+ /* if the client doesn't support this heap type */
+ if (!((1 << heap->type) & client->heap_mask))
+ continue;
+ /* if the caller didn't specify this heap type */
+ if (!((1 << heap->id) & flags))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, align, flags);
+ if (!IS_ERR_OR_NULL(buffer))
+ break;
+ }
+ mutex_unlock(&dev->lock);
+
+ if (IS_ERR_OR_NULL(buffer))
+ return ERR_PTR(PTR_ERR(buffer));
+
+ handle = ion_handle_create(client, buffer);
+
+ if (IS_ERR_OR_NULL(handle))
+ goto end;
+
+ /*
+ * ion_buffer_create will create a buffer with a ref_cnt of 1,
+ * and ion_handle_create will take a second reference, drop one here
+ */
+ ion_buffer_put(buffer);
+
+ mutex_lock(&client->lock);
+ ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ return handle;
+
+end:
+ ion_buffer_put(buffer);
+ return handle;
+}
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ mutex_unlock(&client->lock);
+
+ if (!valid_handle) {
+ WARN("%s: invalid handle passed to free.\n", __func__);
+ return;
+ }
+ ion_handle_put(handle);
+}
+
+static bool _ion_map(int *buffer_cnt, int *handle_cnt)
+{
+ bool map;
+
+ BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
+
+ if (*buffer_cnt)
+ map = false;
+ else
+ map = true;
+ if (*handle_cnt == 0)
+ (*buffer_cnt)++;
+ (*handle_cnt)++;
+ return map;
+}
+
+static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
+{
+ BUG_ON(*handle_cnt == 0);
+ (*handle_cnt)--;
+ if (*handle_cnt != 0)
+ return false;
+ BUG_ON(*buffer_cnt == 0);
+ (*buffer_cnt)--;
+ if (*buffer_cnt == 0)
+ return true;
+ return false;
+}
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ if (!buffer->heap->ops->phys) {
+ pr_err("%s: ion_phys is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&client->lock);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ return ret;
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ void *vaddr;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+
+ if (!handle->buffer->heap->ops->map_kernel) {
+ pr_err("%s: map_kernel is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
+ buffer->vaddr = vaddr;
+ } else {
+ vaddr = buffer->vaddr;
+ }
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return vaddr;
+}
+
+struct scatterlist *ion_map_dma(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct scatterlist *sglist;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+
+ if (!handle->buffer->heap->ops->map_dma) {
+ pr_err("%s: map_kernel is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-ENODEV);
+ }
+ if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
+ sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
+ if (IS_ERR_OR_NULL(sglist))
+ _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
+ buffer->sglist = sglist;
+ } else {
+ sglist = buffer->sglist;
+ }
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return sglist;
+}
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+
+void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+ buffer->sglist = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+
+
+struct ion_buffer *ion_share(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ mutex_unlock(&client->lock);
+ if (!valid_handle) {
+ WARN("%s: invalid handle passed to share.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* do not take an extra reference here, the burden is on the caller
+ * to make sure the buffer doesn't go away while it's passing it
+ * to another client -- ion_free should not be called on this handle
+ * until the buffer has been imported into the other client
+ */
+ return handle->buffer;
+}
+
+struct ion_handle *ion_import(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct ion_handle *handle = NULL;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR_OR_NULL(handle)) {
+ ion_handle_get(handle);
+ goto end;
+ }
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR_OR_NULL(handle))
+ goto end;
+ ion_handle_add(client, handle);
+end:
+ mutex_unlock(&client->lock);
+ return handle;
+}
+
+static const struct file_operations ion_share_fops;
+
+struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
+{
+ struct file *file = fget(fd);
+ struct ion_handle *handle;
+
+ if (!file) {
+ pr_err("%s: imported fd not found in file table.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ if (file->f_op != &ion_share_fops) {
+ pr_err("%s: imported file is not a shared ion file.\n",
+ __func__);
+ handle = ERR_PTR(-EINVAL);
+ goto end;
+ }
+ handle = ion_import(client, file->private_data);
+end:
+ fput(file);
+ return handle;
+}
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+ struct ion_client *client = s->private;
+ struct rb_node *n;
+ size_t sizes[ION_NUM_HEAPS] = {0};
+ const char *names[ION_NUM_HEAPS] = {0};
+ int i;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ enum ion_heap_type type = handle->buffer->heap->type;
+
+ if (!names[type])
+ names[type] = handle->buffer->heap->name;
+ sizes[type] += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+
+ seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+ for (i = 0; i < ION_NUM_HEAPS; i++) {
+ if (!names[i])
+ continue;
+ seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
+ atomic_read(&client->ref.refcount));
+ }
+ return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+ .open = ion_debug_client_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct ion_client *ion_client_lookup(struct ion_device *dev,
+ struct task_struct *task)
+{
+ struct rb_node *n = dev->user_clients.rb_node;
+ struct ion_client *client;
+
+ mutex_lock(&dev->lock);
+ while (n) {
+ client = rb_entry(n, struct ion_client, node);
+ if (task == client->task) {
+ ion_client_get(client);
+ mutex_unlock(&dev->lock);
+ return client;
+ } else if (task < client->task) {
+ n = n->rb_left;
+ } else if (task > client->task) {
+ n = n->rb_right;
+ }
+ }
+ mutex_unlock(&dev->lock);
+ return NULL;
+}
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+ unsigned int heap_mask,
+ const char *name)
+{
+ struct ion_client *client;
+ struct task_struct *task;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ion_client *entry;
+ char debug_name[64];
+ pid_t pid;
+
+ get_task_struct(current->group_leader);
+ task_lock(current->group_leader);
+ pid = task_pid_nr(current->group_leader);
+ /* don't bother to store task struct for kernel threads,
+ they can't be killed anyway */
+ if (current->group_leader->flags & PF_KTHREAD) {
+ put_task_struct(current->group_leader);
+ task = NULL;
+ } else {
+ task = current->group_leader;
+ }
+ task_unlock(current->group_leader);
+
+ /* if this isn't a kernel thread, see if a client already
+ exists */
+ if (task) {
+ client = ion_client_lookup(dev, task);
+ if (!IS_ERR_OR_NULL(client)) {
+ put_task_struct(current->group_leader);
+ return client;
+ }
+ }
+
+ client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ if (!client) {
+ put_task_struct(current->group_leader);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ client->dev = dev;
+ client->handles = RB_ROOT;
+ mutex_init(&client->lock);
+ client->name = name;
+ client->heap_mask = heap_mask;
+ client->task = task;
+ client->pid = pid;
+ kref_init(&client->ref);
+
+ mutex_lock(&dev->lock);
+ if (task) {
+ p = &dev->user_clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
+
+ if (task < entry->task)
+ p = &(*p)->rb_left;
+ else if (task > entry->task)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->user_clients);
+ } else {
+ p = &dev->kernel_clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
+
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->kernel_clients);
+ }
+
+ snprintf(debug_name, 64, "%u", client->pid);
+ client->debug_root = debugfs_create_file(debug_name, 0664,
+ dev->debug_root, client,
+ &debug_client_fops);
+ mutex_unlock(&dev->lock);
+
+ return client;
+}
+
+static void _ion_client_destroy(struct kref *kref)
+{
+ struct ion_client *client = container_of(kref, struct ion_client, ref);
+ struct ion_device *dev = client->dev;
+ struct rb_node *n;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ while ((n = rb_first(&client->handles))) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ ion_handle_destroy(&handle->ref);
+ }
+ mutex_lock(&dev->lock);
+ if (client->task) {
+ rb_erase(&client->node, &dev->user_clients);
+ put_task_struct(client->task);
+ } else {
+ rb_erase(&client->node, &dev->kernel_clients);
+ }
+ debugfs_remove_recursive(client->debug_root);
+ mutex_unlock(&dev->lock);
+
+ kfree(client);
+}
+
+void ion_client_get(struct ion_client *client)
+{
+ kref_get(&client->ref);
+}
+
+int ion_client_put(struct ion_client *client)
+{
+ return kref_put(&client->ref, _ion_client_destroy);
+}
+
+void ion_client_destroy(struct ion_client *client)
+{
+ ion_client_put(client);
+}
+
+static int ion_share_release(struct inode *inode, struct file* file)
+{
+ struct ion_buffer *buffer = file->private_data;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ /* drop the reference to the buffer -- this prevents the
+ buffer from going away because the client holding it exited
+ while it was being passed */
+ ion_buffer_put(buffer);
+ return 0;
+}
+
+static void ion_vma_open(struct vm_area_struct *vma)
+{
+
+ struct ion_buffer *buffer = vma->vm_file->private_data;
+ struct ion_handle *handle = vma->vm_private_data;
+ struct ion_client *client;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ /* check that the client still exists and take a reference so
+ it can't go away until this vma is closed */
+ client = ion_client_lookup(buffer->dev, current->group_leader);
+ if (IS_ERR_OR_NULL(client)) {
+ vma->vm_private_data = NULL;
+ return;
+ }
+ ion_buffer_get(buffer);
+ ion_handle_get(handle);
+ pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+ __func__, __LINE__,
+ atomic_read(&client->ref.refcount),
+ atomic_read(&handle->ref.refcount),
+ atomic_read(&buffer->ref.refcount));
+}
+
+static void ion_vma_close(struct vm_area_struct *vma)
+{
+ struct ion_handle *handle = vma->vm_private_data;
+ struct ion_buffer *buffer = vma->vm_file->private_data;
+ struct ion_client *client;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ /* this indicates the client is gone, nothing to do here */
+ if (!handle)
+ return;
+ client = handle->client;
+ pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+ __func__, __LINE__,
+ atomic_read(&client->ref.refcount),
+ atomic_read(&handle->ref.refcount),
+ atomic_read(&buffer->ref.refcount));
+ ion_handle_put(handle);
+ ion_client_put(client);
+ ion_buffer_put(buffer);
+ pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+ __func__, __LINE__,
+ atomic_read(&client->ref.refcount),
+ atomic_read(&handle->ref.refcount),
+ atomic_read(&buffer->ref.refcount));
+}
+
+static struct vm_operations_struct ion_vm_ops = {
+ .open = ion_vma_open,
+ .close = ion_vma_close,
+};
+
+static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = file->private_data;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct ion_client *client;
+ struct ion_handle *handle;
+ int ret;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ /* make sure the client still exists, it's possible for the client to
+ have gone away but the map/share fd still to be around, take
+ a reference to it so it can't go away while this mapping exists */
+ client = ion_client_lookup(buffer->dev, current->group_leader);
+ if (IS_ERR_OR_NULL(client)) {
+ pr_err("%s: trying to mmap an ion handle in a process with no "
+ "ion client\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
+ buffer->size)) {
+ pr_err("%s: trying to map larger area than handle has available"
+ "\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* find the handle and take a reference to it */
+ handle = ion_import(client, buffer);
+ if (IS_ERR_OR_NULL(handle)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ion_buffer_get(buffer);
+
+ if (!handle->buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping "
+ "to userspace\n", __func__);
+ ret = -EINVAL;
+ goto err1;
+ }
+
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
+ if (ret) {
+ pr_err("%s: failure mapping buffer to userspace\n",
+ __func__);
+ goto err1;
+ }
+
+ vma->vm_ops = &ion_vm_ops;
+ /* move the handle into the vm_private_data so we can access it from
+ vma_open/close */
+ vma->vm_private_data = handle;
+ pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+ __func__, __LINE__,
+ atomic_read(&client->ref.refcount),
+ atomic_read(&handle->ref.refcount),
+ atomic_read(&buffer->ref.refcount));
+ return 0;
+
+err1:
+ /* drop the reference to the handle */
+ ion_handle_put(handle);
+err:
+ /* drop the reference to the client */
+ ion_client_put(client);
+ return ret;
+}
+
+static const struct file_operations ion_share_fops = {
+ .owner = THIS_MODULE,
+ .release = ion_share_release,
+ .mmap = ion_share_mmap,
+};
+
+static int ion_ioctl_share(struct file *parent, struct ion_client *client,
+ struct ion_handle *handle)
+{
+ int fd = get_unused_fd();
+ struct file *file;
+
+ if (fd < 0)
+ return -ENFILE;
+
+ file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
+ handle->buffer, O_RDWR);
+ if (IS_ERR_OR_NULL(file))
+ goto err;
+ ion_buffer_get(handle->buffer);
+ fd_install(fd, file);
+
+ return fd;
+
+err:
+ put_unused_fd(fd);
+ return -ENFILE;
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_allocation_data data;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ data.handle = ion_alloc(client, data.len, data.align,
+ data.flags);
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle_data data;
+ bool valid;
+
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_handle_data)))
+ return -EFAULT;
+ mutex_lock(&client->lock);
+ valid = ion_handle_validate(client, data.handle);
+ mutex_unlock(&client->lock);
+ if (!valid)
+ return -EINVAL;
+ ion_free(client, data.handle);
+ break;
+ }
+ case ION_IOC_MAP:
+ case ION_IOC_SHARE:
+ {
+ struct ion_fd_data data;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, data.handle)) {
+ pr_err("%s: invalid handle passed to share ioctl.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+ data.fd = ion_ioctl_share(filp, client, data.handle);
+ mutex_unlock(&client->lock);
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_fd_data data;
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_fd_data)))
+ return -EFAULT;
+
+ data.handle = ion_import_fd(client, data.fd);
+ if (IS_ERR(data.handle))
+ data.handle = NULL;
+ if (copy_to_user((void __user *)arg, &data,
+ sizeof(struct ion_fd_data)))
+ return -EFAULT;
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ struct ion_device *dev = client->dev;
+ struct ion_custom_data data;
+
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_custom_data)))
+ return -EFAULT;
+ return dev->custom_ioctl(client, data.cmd, data.arg);
+ }
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+ struct ion_client *client = file->private_data;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ ion_client_put(client);
+ return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+ struct ion_client *client;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ client = ion_client_create(dev, -1, "user");
+ if (IS_ERR_OR_NULL(client))
+ return PTR_ERR(client);
+ file->private_data = client;
+
+ return 0;
+}
+
+static const struct file_operations ion_fops = {
+ .owner = THIS_MODULE,
+ .open = ion_open,
+ .release = ion_release,
+ .unlocked_ioctl = ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+ enum ion_heap_type type)
+{
+ size_t size = 0;
+ struct rb_node *n;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n,
+ struct ion_handle,
+ node);
+ if (handle->buffer->heap->type == type)
+ size += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+ return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+ struct ion_heap *heap = s->private;
+ struct ion_device *dev = heap->dev;
+ struct rb_node *n;
+
+ seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+ for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
+ struct ion_client *client = rb_entry(n, struct ion_client,
+ node);
+ char task_comm[TASK_COMM_LEN];
+ size_t size = ion_debug_heap_total(client, heap->type);
+ if (!size)
+ continue;
+
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
+ size);
+ }
+
+ for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+ struct ion_client *client = rb_entry(n, struct ion_client,
+ node);
+ size_t size = ion_debug_heap_total(client, heap->type);
+ if (!size)
+ continue;
+ seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
+ size);
+ }
+ return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+ .open = ion_debug_heap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+ struct rb_node **p = &dev->heaps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_heap *entry;
+
+ heap->dev = dev;
+ mutex_lock(&dev->lock);
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_heap, node);
+
+ if (heap->id < entry->id) {
+ p = &(*p)->rb_left;
+ } else if (heap->id > entry->id ) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: can not insert multiple heaps with "
+ "id %d\n", __func__, heap->id);
+ goto end;
+ }
+ }
+
+ rb_link_node(&heap->node, parent, p);
+ rb_insert_color(&heap->node, &dev->heaps);
+ debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
+ &debug_heap_fops);
+end:
+ mutex_unlock(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg))
+{
+ struct ion_device *idev;
+ int ret;
+
+ idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ if (!idev)
+ return ERR_PTR(-ENOMEM);
+
+ idev->dev.minor = MISC_DYNAMIC_MINOR;
+ idev->dev.name = "ion";
+ idev->dev.fops = &ion_fops;
+ idev->dev.parent = NULL;
+ ret = misc_register(&idev->dev);
+ if (ret) {
+ pr_err("ion: failed to register misc device.\n");
+ return ERR_PTR(ret);
+ }
+
+ idev->debug_root = debugfs_create_dir("ion", NULL);
+ if (IS_ERR_OR_NULL(idev->debug_root))
+ pr_err("ion: failed to create debug files.\n");
+
+ idev->custom_ioctl = custom_ioctl;
+ idev->buffers = RB_ROOT;
+ mutex_init(&idev->lock);
+ idev->heaps = RB_ROOT;
+ idev->user_clients = RB_ROOT;
+ idev->kernel_clients = RB_ROOT;
+ return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+ misc_deregister(&dev->dev);
+ /* XXX need to free the heaps and clients ? */
+ kfree(dev);
+}
+
+struct ion_client *ion_client_get_file(int fd)
+{
+ struct ion_client *client = ERR_PTR(-EFAULT);
+ struct file *f = fget(fd);
+ if (!f)
+ return ERR_PTR(-EINVAL);
+
+ if (f->f_op == &ion_fops) {
+ client = f->private_data;
+ ion_client_get(client);
+ }
+
+ fput(f);
+ return client;
+}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
new file mode 100644
index 000000000000..606adae13f48
--- /dev/null
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -0,0 +1,162 @@
+/*
+ * drivers/gpu/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+
+struct ion_carveout_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+ unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+ if (!offset)
+ return ION_CARVEOUT_ALLOCATE_FAIL;
+
+ return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
+ gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ *addr = buffer->priv_phys;
+ *len = buffer->size;
+ return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ buffer->priv_phys = ion_carveout_allocate(heap, size, align);
+ return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+
+ ion_carveout_free(heap, buffer->priv_phys, buffer->size);
+ buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
+}
+
+struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return __arch_ioremap(buffer->priv_phys, buffer->size,
+ MT_MEMORY_NONCACHED);
+}
+
+void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ __arch_iounmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ return;
+}
+
+int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ return remap_pfn_range(vma, vma->vm_start,
+ __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+ buffer->size,
+ pgprot_noncached(vma->vm_page_prot));
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+ .allocate = ion_carveout_heap_allocate,
+ .free = ion_carveout_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_user = ion_carveout_heap_map_user,
+ .map_kernel = ion_carveout_heap_map_kernel,
+ .unmap_kernel = ion_carveout_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_carveout_heap *carveout_heap;
+
+ carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+ if (!carveout_heap)
+ return ERR_PTR(-ENOMEM);
+
+ carveout_heap->pool = gen_pool_create(12, -1);
+ if (!carveout_heap->pool) {
+ kfree(carveout_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ carveout_heap->base = heap_data->base;
+ gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+ -1);
+ carveout_heap->heap.ops = &carveout_heap_ops;
+ carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+
+ return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ gen_pool_destroy(carveout_heap->pool);
+ kfree(carveout_heap);
+ carveout_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
new file mode 100644
index 000000000000..8ce3c1907bad
--- /dev/null
+++ b/drivers/gpu/ion/ion_heap.c
@@ -0,0 +1,72 @@
+/*
+ * drivers/gpu/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include "ion_priv.h"
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_heap *heap = NULL;
+
+ switch (heap_data->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ heap = ion_system_contig_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ heap = ion_system_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ heap = ion_carveout_heap_create(heap_data);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap_data->type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR_OR_NULL(heap)) {
+ pr_err("%s: error creating heap %s type %d base %lu size %u\n",
+ __func__, heap_data->name, heap_data->type,
+ heap_data->base, heap_data->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap->name = heap_data->name;
+ heap->id = heap_data->id;
+ return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+ if (!heap)
+ return;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ ion_system_contig_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ ion_system_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ ion_carveout_heap_destroy(heap);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap->type);
+ }
+}
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
new file mode 100644
index 000000000000..8c75ff5e0292
--- /dev/null
+++ b/drivers/gpu/ion/ion_priv.h
@@ -0,0 +1,275 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/ion.h>
+#include <linux/miscdevice.h>
+
+struct ion_mapping;
+
+struct ion_dma_mapping {
+ struct kref ref;
+ struct scatterlist *sglist;
+};
+
+struct ion_kernel_mapping {
+ struct kref ref;
+ void *vaddr;
+};
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @lock: lock protecting the buffers & heaps trees
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex lock;
+ struct rb_root heaps;
+ long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root user_clients;
+ struct rb_root kernel_clients;
+ struct dentry *debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @ref: for reference counting the client
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @lock: lock protecting the tree of handles
+ * @heap_mask: mask of all supported heaps
+ * @name: used for debugging
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct kref ref;
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct mutex lock;
+ unsigned int heap_mask;
+ const char *name;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @dmap_cnt: count of times this client has mapped for dma
+ * @usermap_cnt: count of times this client has mapped for userspace
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ unsigned int dmap_cnt;
+ unsigned int usermap_cnt;
+};
+
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle);
+
+void ion_buffer_get(struct ion_buffer *buffer);
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+struct ion_client *ion_client_get_file(int fd);
+
+void ion_client_get(struct ion_client *client);
+
+int ion_client_put(struct ion_client *client);
+
+void ion_handle_get(struct ion_handle *handle);
+
+int ion_handle_put(struct ion_handle *handle);
+
+struct ion_handle *ion_handle_create(struct ion_client *client,
+ struct ion_buffer *buffer);
+
+void ion_handle_add(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref: refernce count
+ * @node: node in the ion_device buffers tree
+ * @dev: back pointer to the ion_device
+ * @heap: back pointer to the heap the buffer came from
+ * @flags: buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt: number of times the buffer is mapped for dma
+ * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
+*/
+struct ion_buffer {
+ struct kref ref;
+ struct rb_node node;
+ struct ion_device *dev;
+ struct ion_heap *heap;
+ unsigned long flags;
+ size_t size;
+ union {
+ void *priv_virt;
+ ion_phys_addr_t priv_phys;
+ };
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ int dmap_cnt;
+ struct scatterlist *sglist;
+};
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate: allocate memory
+ * @free: free memory
+ * @phys get physical address of a buffer (only define on
+ * physically contiguous heaps)
+ * @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
+ */
+struct ion_heap_ops {
+ int (*allocate) (struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free) (struct ion_buffer *buffer);
+ int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct scatterlist *(*map_dma) (struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+};
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node: rb node to put the heap on the device's tree of heaps
+ * @dev: back pointer to the ion_device
+ * @type: type of heap
+ * @ops: ops struct as above
+ * @id: id of heap, also indicates priority of this heap when
+ * allocating. These are specified by platform data and
+ * MUST be unique
+ * @name: used for debugging
+ *
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+ struct rb_node node;
+ struct ion_device *dev;
+ enum ion_heap_type type;
+ struct ion_heap_ops *ops;
+ int id;
+ const char *name;
+};
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl: arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev: the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev: the device
+ * @heap: the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+ unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
new file mode 100644
index 000000000000..c046cf1a3219
--- /dev/null
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -0,0 +1,198 @@
+/*
+ * drivers/gpu/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ buffer->priv_virt = vmalloc_user(size);
+ if (!buffer->priv_virt)
+ return -ENOMEM;
+ return 0;
+}
+
+void ion_system_heap_free(struct ion_buffer *buffer)
+{
+ vfree(buffer->priv_virt);
+}
+
+struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sglist;
+ struct page *page;
+ int i;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ void *vaddr = buffer->priv_virt;
+
+ sglist = vmalloc(npages * sizeof(struct scatterlist));
+ if (!sglist)
+ return ERR_PTR(-ENOMEM);
+ memset(sglist, 0, npages * sizeof(struct scatterlist));
+ sg_init_table(sglist, npages);
+ for (i = 0; i < npages; i++) {
+ page = vmalloc_to_page(vaddr);
+ if (!page)
+ goto end;
+ sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
+ vaddr += PAGE_SIZE;
+ }
+ /* XXX do cache maintenance for dma? */
+ return sglist;
+end:
+ vfree(sglist);
+ return NULL;
+}
+
+void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ /* XXX undo cache maintenance for dma? */
+ if (buffer->sglist)
+ vfree(buffer->sglist);
+}
+
+void *ion_system_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+void ion_system_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff);
+}
+
+static struct ion_heap_ops vmalloc_ops = {
+ .allocate = ion_system_heap_allocate,
+ .free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_system_heap_map_kernel,
+ .unmap_kernel = ion_system_heap_unmap_kernel,
+ .map_user = ion_system_heap_map_user,
+};
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_heap *heap;
+
+ heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &vmalloc_ops;
+ heap->type = ION_HEAP_TYPE_SYSTEM;
+ return heap;
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ buffer->priv_virt = kzalloc(len, GFP_KERNEL);
+ if (!buffer->priv_virt)
+ return -ENOMEM;
+ return 0;
+}
+
+void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+ kfree(buffer->priv_virt);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ *addr = virt_to_phys(buffer->priv_virt);
+ *len = buffer->size;
+ return 0;
+}
+
+struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sglist;
+
+ sglist = vmalloc(sizeof(struct scatterlist));
+ if (!sglist)
+ return ERR_PTR(-ENOMEM);
+ sg_init_table(sglist, 1);
+ sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0);
+ return sglist;
+}
+
+int ion_system_contig_heap_map_user(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
+ return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+ .allocate = ion_system_contig_heap_allocate,
+ .free = ion_system_contig_heap_free,
+ .phys = ion_system_contig_heap_phys,
+ .map_dma = ion_system_contig_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_system_heap_map_kernel,
+ .unmap_kernel = ion_system_heap_unmap_kernel,
+ .map_user = ion_system_contig_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_heap *heap;
+
+ heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &kmalloc_ops;
+ heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
+
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c
new file mode 100644
index 000000000000..692458e07b5e
--- /dev/null
+++ b/drivers/gpu/ion/ion_system_mapper.c
@@ -0,0 +1,114 @@
+/*
+ * drivers/gpu/ion/ion_system_mapper.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+/*
+ * This mapper is valid for any heap that allocates memory that already has
+ * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory,
+ * pages obtained via io_remap, etc.
+ */
+static void *ion_kernel_mapper_map(struct ion_mapper *mapper,
+ struct ion_buffer *buffer,
+ struct ion_mapping **mapping)
+{
+ if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
+ pr_err("%s: attempting to map an unsupported heap\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ /* XXX REVISIT ME!!! */
+ *((unsigned long *)mapping) = (unsigned long)buffer->priv;
+ return buffer->priv;
+}
+
+static void ion_kernel_mapper_unmap(struct ion_mapper *mapper,
+ struct ion_buffer *buffer,
+ struct ion_mapping *mapping)
+{
+ if (!((1 << buffer->heap->type) & mapper->heap_mask))
+ pr_err("%s: attempting to unmap an unsupported heap\n",
+ __func__);
+}
+
+static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper,
+ struct ion_buffer *buffer,
+ struct ion_mapping *mapping)
+{
+ if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
+ pr_err("%s: attempting to unmap an unsupported heap\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ return buffer->priv;
+}
+
+static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma,
+ struct ion_mapping *mapping)
+{
+ int ret;
+
+ switch (buffer->heap->type) {
+ case ION_HEAP_KMALLOC:
+ {
+ unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
+ ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ break;
+ }
+ case ION_HEAP_VMALLOC:
+ ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
+ break;
+ default:
+ pr_err("%s: attempting to map unsupported heap to userspace\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct ion_mapper_ops ops = {
+ .map = ion_kernel_mapper_map,
+ .map_kernel = ion_kernel_mapper_map_kernel,
+ .map_user = ion_kernel_mapper_map_user,
+ .unmap = ion_kernel_mapper_unmap,
+};
+
+struct ion_mapper *ion_system_mapper_create(void)
+{
+ struct ion_mapper *mapper;
+ mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL);
+ if (!mapper)
+ return ERR_PTR(-ENOMEM);
+ mapper->type = ION_SYSTEM_MAPPER;
+ mapper->ops = &ops;
+ mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC);
+ return mapper;
+}
+
+void ion_system_mapper_destroy(struct ion_mapper *mapper)
+{
+ kfree(mapper);
+}
+
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile
new file mode 100644
index 000000000000..11cd003fb08f
--- /dev/null
+++ b/drivers/gpu/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
new file mode 100644
index 000000000000..ab5f3f923f8b
--- /dev/null
+++ b/drivers/gpu/ion/tegra/tegra_ion.c
@@ -0,0 +1,599 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/tegra_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/io.h>
+#include "../ion_priv.h"
+
+#if !defined(CONFIG_TEGRA_NVMAP)
+#include "mach/nvmap.h"
+struct nvmap_device *nvmap_dev;
+#endif
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+static int tegra_ion_pin(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_ion_pin_data data;
+ int ret;
+ struct ion_handle *on_stack[16];
+ struct ion_handle **refs = on_stack;
+ int i;
+ bool valid_handle;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ if (data.count) {
+ size_t bytes = data.count * sizeof(struct ion_handle *);
+
+ if (data.count > ARRAY_SIZE(on_stack))
+ refs = kmalloc(data.count * sizeof(*refs), GFP_KERNEL);
+ else
+ refs = on_stack;
+ if (!refs)
+ return -ENOMEM;
+ if (copy_from_user(refs, (void *)data.handles, bytes)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ } else
+ return -EINVAL;
+
+ mutex_lock(&client->lock);
+ for (i = 0; i < data.count; i++) {
+ valid_handle = ion_handle_validate(client, refs[i]);
+ if (!valid_handle) {
+ pr_err("invalid handle passed to tegra_ion_pin, h=0x%x",
+ (u32)refs[i]);
+ mutex_unlock(&client->lock);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ mutex_unlock(&client->lock);
+
+ if (cmd == TEGRA_ION_PIN) {
+ ion_phys_addr_t addr;
+ size_t len;
+
+ for (i = 0; i < data.count; i++) {
+ ret = ion_phys(client, refs[i], &addr, &len);
+ if (ret)
+ goto err;
+ ion_handle_get(refs[i]);
+ ret = put_user(addr, &data.addr[i]);
+ if (ret)
+ return ret;
+ }
+ } else if (cmd == TEGRA_ION_UNPIN) {
+ /* FIXME: unpin it. */
+ for (i = 0; i < data.count; i++)
+ ion_handle_put(refs[i]);
+ }
+
+err:
+ if (ret) {
+ pr_err("\n*****%s: error, ret=0x%x", __func__, ret);
+ /* FIXME: undo pinning. */
+ }
+ if (refs != on_stack)
+ kfree(refs);
+ return ret;
+}
+
+static int tegra_ion_alloc_from_id(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_ion_id_data data;
+ struct ion_buffer *buffer;
+ struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg;
+
+ pr_debug("%s", __func__);
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ buffer = (struct ion_buffer *)data.id;
+ data.handle = ion_import(client, buffer);
+ data.size = buffer->size;
+ if (put_user(data.handle, &user_data->handle))
+ return -EFAULT;
+ if (put_user(data.size, &user_data->size))
+ return -EFAULT;
+ return 0;
+}
+
+static int tegra_ion_get_id(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ bool valid_handle;
+ struct tegra_ion_id_data data;
+ struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, data.handle);
+ mutex_unlock(&client->lock);
+
+ if (!valid_handle) {
+ pr_err("%s: invalid handle passed, h=0x%x\n",
+ __func__, (u32)data.handle);
+ WARN("%s: invalid handle passed\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: h=0x%x, b=0x%x, bref=%d", __func__,
+ (u32)data.handle, (u32)data.handle->buffer,
+ atomic_read(&data.handle->buffer->ref.refcount));
+ if (put_user((unsigned long)ion_handle_buffer(data.handle),
+ &user_data->id))
+ return -EFAULT;
+ return 0;
+}
+
+static int tegra_ion_cache_maint(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ wmb();
+ return 0;
+}
+
+static int tegra_ion_rw(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ bool valid_handle;
+ struct tegra_ion_rw_data data;
+ char *kern_addr, *src;
+ int ret = 0;
+ size_t copied = 0;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ if (!data.handle || !data.addr || !data.count || !data.elem_size)
+ return -EINVAL;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, data.handle);
+ mutex_unlock(&client->lock);
+
+ if (!valid_handle) {
+ WARN("%s: invalid handle passed to get id.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (data.elem_size == data.mem_stride &&
+ data.elem_size == data.user_stride) {
+ data.elem_size *= data.count;
+ data.mem_stride = data.elem_size;
+ data.user_stride = data.elem_size;
+ data.count = 1;
+ }
+
+ kern_addr = ion_map_kernel(client, data.handle);
+
+ while (data.count--) {
+ if (data.offset + data.elem_size > data.handle->buffer->size) {
+ pr_err("%s: read/write outside of handle\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+
+ src = kern_addr + data.offset;
+ if (cmd == TEGRA_ION_READ)
+ ret = copy_to_user((void *)data.addr,
+ src, data.elem_size);
+ else
+ ret = copy_from_user(src,
+ (void *)data.addr, data.elem_size);
+
+ if (ret)
+ break;
+
+ copied += data.elem_size;
+ data.addr += data.user_stride;
+ data.offset += data.mem_stride;
+ }
+
+ ion_unmap_kernel(client, data.handle);
+ return ret;
+}
+
+static int tegra_ion_get_param(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ bool valid_handle;
+ struct tegra_ion_get_params_data data;
+ struct tegra_ion_get_params_data *user_data =
+ (struct tegra_ion_get_params_data *)arg;
+ struct ion_buffer *buffer;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, data.handle);
+ mutex_unlock(&client->lock);
+
+ if (!valid_handle) {
+ WARN("%s: invalid handle passed to get id.\n", __func__);
+ return -EINVAL;
+ }
+
+ buffer = ion_handle_buffer(data.handle);
+ data.align = 4096;
+ data.heap = 1;
+ ion_phys(client, data.handle, &data.addr, &data.size);
+
+ if (copy_to_user(user_data, &data, sizeof(data)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long tegra_ion_ioctl(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -ENOTTY;
+
+ switch (cmd) {
+ case TEGRA_ION_ALLOC_FROM_ID:
+ ret = tegra_ion_alloc_from_id(client, cmd, arg);
+ break;
+ case TEGRA_ION_GET_ID:
+ ret = tegra_ion_get_id(client, cmd, arg);
+ break;
+ case TEGRA_ION_PIN:
+ case TEGRA_ION_UNPIN:
+ ret = tegra_ion_pin(client, cmd, arg);
+ break;
+ case TEGRA_ION_CACHE_MAINT:
+ ret = tegra_ion_cache_maint(client, cmd, arg);
+ break;
+ case TEGRA_ION_READ:
+ case TEGRA_ION_WRITE:
+ ret = tegra_ion_rw(client, cmd, arg);
+ break;
+ case TEGRA_ION_GET_PARAM:
+ ret = tegra_ion_get_param(client, cmd, arg);
+ break;
+ default:
+ pr_err("%s: Unknown custom ioctl\n", __func__);
+ return -ENOTTY;
+ }
+ return ret;
+}
+
+int tegra_ion_probe(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+ int i;
+
+ num_heaps = pdata->nr;
+
+ heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+ idev = ion_device_create(tegra_ion_ioctl);
+ if (IS_ERR_OR_NULL(idev)) {
+ kfree(heaps);
+ return PTR_ERR(idev);
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ platform_set_drvdata(pdev, idev);
+#if !defined(CONFIG_TEGRA_NVMAP)
+ nvmap_dev = (struct nvmap_device *)idev;
+#endif
+ return 0;
+err:
+ for (i = 0; i < num_heaps; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+ return err;
+}
+
+int tegra_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ ion_device_destroy(idev);
+ for (i = 0; i < num_heaps; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+ return 0;
+}
+
+static struct platform_driver ion_driver = {
+ .probe = tegra_ion_probe,
+ .remove = tegra_ion_remove,
+ .driver = { .name = "ion-tegra" }
+};
+
+static int __init ion_init(void)
+{
+ return platform_driver_register(&ion_driver);
+}
+
+static void __exit ion_exit(void)
+{
+ platform_driver_unregister(&ion_driver);
+}
+
+fs_initcall(ion_init);
+module_exit(ion_exit);
+
+#if !defined(CONFIG_TEGRA_NVMAP)
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name)
+{
+ return ion_client_create(dev, 0xFFFFFFFF, name);
+}
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags)
+{
+ return ion_alloc(client, size, align, 1);
+}
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ ion_free(client, r);
+}
+
+void *nvmap_mmap(struct nvmap_handle_ref *r)
+{
+ return ion_map_kernel(r->client, r);
+}
+
+void nvmap_munmap(struct nvmap_handle_ref *r, void *addr)
+{
+ ion_unmap_kernel(r->client, r);
+}
+
+struct nvmap_client *nvmap_client_get_file(int fd)
+{
+ return ion_client_get_file(fd);
+}
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
+{
+ ion_client_get(client);
+ return client;
+}
+
+void nvmap_client_put(struct nvmap_client *c)
+{
+ ion_client_put(c);
+}
+
+phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r)
+{
+ ion_phys_addr_t addr;
+ size_t len;
+
+ ion_handle_get(r);
+ ion_phys(c, r, &addr, &len);
+ wmb();
+ return addr;
+}
+
+phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
+{
+ struct ion_buffer *buffer = (struct ion_buffer *)id;
+ struct ion_handle *handle;
+ ion_phys_addr_t addr;
+ size_t len;
+
+ handle = ion_import(c, buffer);
+ ion_phys(c, handle, &addr, &len);
+ ion_handle_put(handle);
+ return addr;
+}
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ /* FIXME: this should be implemented for iommu heap. */
+ ion_handle_put(r);
+}
+
+static int nvmap_reloc_pin_array(struct ion_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct ion_handle *gather)
+{
+ struct ion_handle *last_patch = NULL;
+ void *patch_addr;
+ ion_phys_addr_t pin_addr;
+ size_t len;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct ion_handle *patch;
+ struct ion_handle *pin;
+ ion_phys_addr_t reloc_addr;
+
+ /* all of the handles are validated and get'ted prior to
+ * calling this function, so casting is safe here */
+ pin = (struct ion_handle *)arr[i].pin_mem;
+
+ if (arr[i].patch_mem == (unsigned long)last_patch) {
+ patch = last_patch;
+ } else if (arr[i].patch_mem == (unsigned long)gather) {
+ patch = gather;
+ } else {
+ if (last_patch)
+ ion_handle_put(last_patch);
+
+ ion_handle_get((struct ion_handle *)arr[i].patch_mem);
+ patch = (struct ion_handle *)arr[i].patch_mem;
+ if (!patch)
+ return -EPERM;
+ last_patch = patch;
+ }
+
+ patch_addr = ion_map_kernel(client, patch);
+ patch_addr = patch_addr + arr[i].patch_offset;
+
+ ion_phys(client, pin, &pin_addr, &len);
+ reloc_addr = pin_addr + arr[i].pin_offset;
+ __raw_writel(reloc_addr, patch_addr);
+ ion_unmap_kernel(client, patch);
+ }
+
+ if (last_patch)
+ ion_handle_put(last_patch);
+
+ wmb();
+ return 0;
+}
+
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique)
+{
+ int i;
+ int count = 0;
+
+ /* FIXME: take care of duplicate ones & validation. */
+ for (i = 0; i < nr; i++) {
+ unique[i] = (struct nvmap_handle *)arr[i].pin_mem;
+ nvmap_pin(client, (struct nvmap_handle_ref *)unique[i]);
+ count++;
+ }
+ nvmap_reloc_pin_array((struct ion_client *)client,
+ arr, nr, (struct ion_handle *)gather);
+ return nr;
+}
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; i++)
+ nvmap_unpin(client, h[i]);
+}
+
+int nvmap_patch_word(struct nvmap_client *client,
+ struct nvmap_handle *patch,
+ u32 patch_offset, u32 patch_value)
+{
+ void *vaddr;
+ u32 *patch_addr;
+
+ vaddr = ion_map_kernel(client, patch);
+ patch_addr = vaddr + patch_offset;
+ __raw_writel(patch_value, patch_addr);
+ wmb();
+ ion_unmap_kernel(client, patch);
+ return 0;
+}
+
+struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct ion_handle *handle;
+
+ handle = (struct ion_handle *)*((unsigned int*)id);
+ pr_debug("%s: id=0x%x, h=0x%x,c=0x%x",
+ __func__, (u32)id, (u32)handle, (u32)client);
+ nvmap_handle_get(handle);
+ return handle;
+}
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+ struct ion_client *ion_client = client;
+
+ handle = (struct ion_handle *)*((unsigned int*)id);
+ pr_debug("%s: id=0x%x, h=0x%x,c=0x%x",
+ __func__, (u32)id, (u32)handle, (u32)client);
+ buffer = handle->buffer;
+
+ handle = ion_handle_create(client, buffer);
+
+ mutex_lock(&ion_client->lock);
+ ion_handle_add(ion_client, handle);
+ mutex_unlock(&ion_client->lock);
+
+ pr_debug("%s: dup id=0x%x, h=0x%x", __func__,
+ (u32)id, (u32)handle);
+ return handle;
+}
+
+void _nvmap_handle_free(struct nvmap_handle *h)
+{
+ ion_handle_put(h);
+}
+
+struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
+ size_t size, size_t align, unsigned int flags, unsigned int iova_start)
+{
+ /* FIXME: */
+ return NULL;
+}
+
+void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ /* FIXME: */
+}
+
+struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
+{
+ ion_handle_get(h);
+ return h;
+}
+
+void nvmap_handle_put(struct nvmap_handle *h)
+{
+ ion_handle_put(h);
+}
+
+#endif
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index bae48745bb42..9a243ca96e6d 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -450,6 +450,11 @@ void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) {
seq_printf(f, "Logical(");
hid_resolv_usage(field->logical, f); seq_printf(f, ")\n");
}
+ if (field->application) {
+ tab(n, f);
+ seq_printf(f, "Application(");
+ hid_resolv_usage(field->application, f); seq_printf(f, ")\n");
+ }
tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage);
for (j = 0; j < field->maxusage; j++) {
tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n");
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6559e2e3364e..1483c8296d57 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -971,6 +971,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
* UGCI) cram a lot of unrelated inputs into the
* same interface. */
hidinput->report = report;
+ if (hid->driver->input_register &&
+ hid->driver->input_register(hid, hidinput))
+ goto out_cleanup;
if (input_register_device(hidinput->input))
goto out_cleanup;
hidinput = NULL;
@@ -978,6 +981,10 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
}
}
+ if (hidinput && hid->driver->input_register &&
+ hid->driver->input_register(hid, hidinput))
+ goto out_cleanup;
+
if (hidinput && input_register_device(hidinput->input))
goto out_cleanup;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index f0fbd7bd239e..08f5dc773975 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -387,8 +387,10 @@ static int magicmouse_raw_event(struct hid_device *hdev,
return 1;
}
-static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
+static int magicmouse_setup_input(struct hid_device *hdev, struct hid_input *hi)
{
+ struct input_dev *input = hi->input;
+
__set_bit(EV_KEY, input->evbit);
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
@@ -462,6 +464,8 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
+
+ return 0;
}
static int magicmouse_input_mapping(struct hid_device *hdev,
@@ -514,12 +518,6 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_free;
}
- /* We do this after hid-input is done parsing reports so that
- * hid-input uses the most natural button and axis IDs.
- */
- if (msc->input)
- magicmouse_setup_input(msc->input, hdev);
-
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE_REPORT_ID);
@@ -584,6 +582,7 @@ static struct hid_driver magicmouse_driver = {
.remove = magicmouse_remove,
.raw_event = magicmouse_raw_event,
.input_mapping = magicmouse_input_mapping,
+ .input_register = magicmouse_setup_input,
};
static int __init magicmouse_init(void)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 9fc15e1f6270..b03a0b0e9b63 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -213,6 +213,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct mt_class *cls = td->mtclass;
__s32 quirks = cls->quirks;
+ /* Only map fields from TouchScreen or TouchPad collections.
+ * We need to ignore fields that belong to other collections
+ * such as Mouse that might have the same GenericDesktop usages. */
+ if (field->application == HID_DG_TOUCHSCREEN)
+ set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+ else if (field->application == HID_DG_TOUCHPAD)
+ set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+ else
+ return 0;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0b62c3c6b7ce..1f46b8d3a791 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -179,6 +179,16 @@ config SENSORS_ADT7411
This driver can also be built as a module. If so, the module
will be called adt7411.
+config SENSORS_ADT7461
+ tristate "Analog Devices ADT7461"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Analog Devices
+ ADT7461 temperature monitoring chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called adt7461.
+
config SENSORS_ADT7462
tristate "Analog Devices ADT7462"
depends on I2C && EXPERIMENTAL
@@ -1058,6 +1068,14 @@ config SENSORS_AMC6821
This driver can also be build as a module. If so, the module
will be called amc6821.
+config SENSORS_TEGRA_TSENSOR
+ tristate "Nvidia Tegra Integrated temperature sensor"
+ depends on ARCH_TEGRA_3x_SOC
+ default n
+ help
+ If you say yes here you get support for integrated
+ temperature sensor in Nvidia tegra chipset.
+
config SENSORS_THMC50
tristate "Texas Instruments THMC50 / Analog Devices ADM1022"
depends on I2C
@@ -1327,6 +1345,21 @@ config SENSORS_MC13783_ADC
help
Support for the A/D converter on MC13783 PMIC.
+config SENSORS_INA219
+ tristate "Texas Instruments INA219 POWER MONITOR SENSOR DRIVER"
+ depends on I2C
+ default n
+ help
+ Support for the TI INA219 power monitor sensor.
+
+config SENSORS_INA230
+ tristate "Texas Instruments INA230 POWER MONITOR SENSOR DRIVER"
+ depends on I2C
+ help
+ Support for the TI INA230 power monitor sensor.
+ (also works for TI INA226)
+
+
if ACPI
comment "ACPI drivers"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 3c9ccefea791..d9797487180d 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADS1015) += ads1015.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
+obj-$(CONFIG_SENSORS_ADT7461) += adt7461.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
@@ -123,6 +124,9 @@ obj-$(CONFIG_SENSORS_W83L785TS) += w83l785ts.o
obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
+obj-$(CONFIG_SENSORS_INA219) += ina219.o
+obj-$(CONFIG_SENSORS_INA230) += ina230.o
+obj-$(CONFIG_SENSORS_TEGRA_TSENSOR) += tegra-tsensor.o
obj-$(CONFIG_PMBUS) += pmbus/
diff --git a/drivers/hwmon/adt7461.c b/drivers/hwmon/adt7461.c
new file mode 100644
index 000000000000..bf71f993cd2c
--- /dev/null
+++ b/drivers/hwmon/adt7461.c
@@ -0,0 +1,809 @@
+/*
+ * adt7461.c - Linux kernel modules for hardware
+ * monitoring
+ * Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/adt7461.h>
+
+#define DRIVER_NAME "adt7461"
+
+/*
+ * The ADT7461 registers
+ */
+
+#define ADT7461_REG_R_MAN_ID 0xFE
+#define ADT7461_REG_R_CHIP_ID 0xFF
+#define ADT7461_REG_R_CONFIG1 0x03
+#define ADT7461_REG_W_CONFIG1 0x09
+#define ADT7461_REG_R_CONVRATE 0x04
+#define ADT7461_REG_W_CONVRATE 0x0A
+#define ADT7461_REG_R_STATUS 0x02
+#define ADT7461_REG_R_LOCAL_TEMP 0x00
+#define ADT7461_REG_R_LOCAL_HIGH 0x05
+#define ADT7461_REG_W_LOCAL_HIGH 0x0B
+#define ADT7461_REG_R_LOCAL_LOW 0x06
+#define ADT7461_REG_W_LOCAL_LOW 0x0C
+#define ADT7461_REG_R_LOCAL_CRIT 0x20
+#define ADT7461_REG_W_LOCAL_CRIT 0x20
+#define ADT7461_REG_R_REMOTE_TEMPH 0x01
+#define ADT7461_REG_R_REMOTE_TEMPL 0x10
+#define ADT7461_REG_R_REMOTE_OFFSH 0x11
+#define ADT7461_REG_W_REMOTE_OFFSH 0x11
+#define ADT7461_REG_R_REMOTE_OFFSL 0x12
+#define ADT7461_REG_W_REMOTE_OFFSL 0x12
+#define ADT7461_REG_R_REMOTE_HIGHH 0x07
+#define ADT7461_REG_W_REMOTE_HIGHH 0x0D
+#define ADT7461_REG_R_REMOTE_HIGHL 0x13
+#define ADT7461_REG_W_REMOTE_HIGHL 0x13
+#define ADT7461_REG_R_REMOTE_LOWH 0x08
+#define ADT7461_REG_W_REMOTE_LOWH 0x0E
+#define ADT7461_REG_R_REMOTE_LOWL 0x14
+#define ADT7461_REG_W_REMOTE_LOWL 0x14
+#define ADT7461_REG_R_REMOTE_CRIT 0x19
+#define ADT7461_REG_W_REMOTE_CRIT 0x19
+#define ADT7461_REG_R_TCRIT_HYST 0x21
+#define ADT7461_REG_W_TCRIT_HYST 0x21
+
+/* Configuration Register Bits */
+#define EXTENDED_RANGE_BIT BIT(2)
+#define THERM2_BIT BIT(5)
+#define STANDBY_BIT BIT(6)
+#define ALERT_BIT BIT(7)
+
+/* Max Temperature Measurements */
+#define EXTENDED_RANGE_OFFSET 64U
+#define STANDARD_RANGE_MAX 127U
+#define EXTENDED_RANGE_MAX (150U + EXTENDED_RANGE_OFFSET)
+
+/*
+ * Device flags
+ */
+#define ADT7461_FLAG_ADT7461_EXT 0x01 /* ADT7461 extended mode */
+#define ADT7461_FLAG_THERM2 0x02 /* Pin 6 as Therm2 */
+
+/*
+ * Client data
+ */
+
+struct adt7461_data {
+ struct work_struct work;
+ struct i2c_client *client;
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ struct regulator *regulator;
+ char valid; /* zero until following fields are valid */
+ unsigned long last_updated; /* in jiffies */
+ int flags;
+
+ u8 config; /* configuration register value */
+ u8 alert_alarms; /* Which alarm bits trigger ALERT# */
+
+ /* registers values */
+ s8 temp8[4]; /* 0: local low limit
+ 1: local high limit
+ 2: local critical limit
+ 3: remote critical limit */
+ s16 temp11[5]; /* 0: remote input
+ 1: remote low limit
+ 2: remote high limit
+ 3: remote offset
+ 4: local input */
+ u8 temp_hyst;
+ u8 alarms; /* bitvector */
+ void (*alarm_fn)(bool raised);
+};
+
+/*
+ * Conversions
+ */
+
+static inline int temp_from_s8(s8 val)
+{
+ return val * 1000;
+}
+
+static u8 hyst_to_reg(long val)
+{
+ if (val <= 0)
+ return 0;
+ if (val >= 30500)
+ return 31;
+ return (val + 500) / 1000;
+}
+
+/*
+ * ADT7461 attempts to write values that are outside the range
+ * 0 < temp < 127 are treated as the boundary value.
+ *
+ * ADT7461 in "extended mode" operation uses unsigned integers offset by
+ * 64 (e.g., 0 -> -64 degC). The range is restricted to -64..191 degC.
+ */
+static inline int temp_from_u8(struct adt7461_data *data, u8 val)
+{
+ if (data->flags & ADT7461_FLAG_ADT7461_EXT)
+ return (val - 64) * 1000;
+ else
+ return temp_from_s8(val);
+}
+
+static inline int temp_from_u16(struct adt7461_data *data, u16 val)
+{
+ if (data->flags & ADT7461_FLAG_ADT7461_EXT)
+ return (val - 0x4000) / 64 * 250;
+ else
+ return val / 32 * 125;
+}
+
+static u8 temp_to_u8(struct adt7461_data *data, long val)
+{
+ if (data->flags & ADT7461_FLAG_ADT7461_EXT) {
+ if (val <= -64000)
+ return 0;
+ if (val >= 191000)
+ return 0xFF;
+ return (val + 500 + 64000) / 1000;
+ } else {
+ if (val <= 0)
+ return 0;
+ if (val >= 127000)
+ return 127;
+ return (val + 500) / 1000;
+ }
+}
+
+static u16 temp_to_u16(struct adt7461_data *data, long val)
+{
+ if (data->flags & ADT7461_FLAG_ADT7461_EXT) {
+ if (val <= -64000)
+ return 0;
+ if (val >= 191750)
+ return 0xFFC0;
+ return (val + 64000 + 125) / 250 * 64;
+ } else {
+ if (val <= 0)
+ return 0;
+ if (val >= 127750)
+ return 0x7FC0;
+ return (val + 125) / 250 * 64;
+ }
+}
+
+static int adt7461_read_reg(struct i2c_client* client, u8 reg, u8 *value)
+{
+ int err;
+
+ err = i2c_smbus_read_byte_data(client, reg);
+ if (err < 0) {
+ pr_err("adt7461_read_reg:Register %#02x read failed (%d)\n",
+ reg, err);
+ return err;
+ }
+ *value = err;
+
+ return 0;
+}
+
+static int adt7461_read16(struct i2c_client *client, u8 regh, u8 regl,
+ u16 *value)
+{
+ int err;
+ u8 oldh, newh, l;
+
+ /*
+ * There is a trick here. We have to read two registers to have the
+ * sensor temperature, but we have to beware a conversion could occur
+ * inbetween the readings. The datasheet says we should either use
+ * the one-shot conversion register, which we don't want to do
+ * (disables hardware monitoring) or monitor the busy bit, which is
+ * impossible (we can't read the values and monitor that bit at the
+ * exact same time). So the solution used here is to read the high
+ * byte once, then the low byte, then the high byte again. If the new
+ * high byte matches the old one, then we have a valid reading. Else
+ * we have to read the low byte again, and now we believe we have a
+ * correct reading.
+ */
+ if ((err = adt7461_read_reg(client, regh, &oldh))
+ || (err = adt7461_read_reg(client, regl, &l))
+ || (err = adt7461_read_reg(client, regh, &newh)))
+ return err;
+ if (oldh != newh) {
+ err = adt7461_read_reg(client, regl, &l);
+ if (err)
+ return err;
+ }
+ *value = (newh << 8) | l;
+
+ return 0;
+}
+
+static struct adt7461_data *adt7461_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7461_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10)
+ || !data->valid) {
+ u8 h, l;
+
+ adt7461_read_reg(client, ADT7461_REG_R_LOCAL_LOW, &data->temp8[0]);
+ adt7461_read_reg(client, ADT7461_REG_R_LOCAL_HIGH, &data->temp8[1]);
+ adt7461_read_reg(client, ADT7461_REG_R_LOCAL_CRIT, &data->temp8[2]);
+ adt7461_read_reg(client, ADT7461_REG_R_REMOTE_CRIT, &data->temp8[3]);
+ adt7461_read_reg(client, ADT7461_REG_R_TCRIT_HYST, &data->temp_hyst);
+
+ if (adt7461_read_reg(client, ADT7461_REG_R_LOCAL_TEMP, &h) == 0)
+ data->temp11[4] = h << 8;
+
+ adt7461_read16(client, ADT7461_REG_R_REMOTE_TEMPH,
+ ADT7461_REG_R_REMOTE_TEMPL, &data->temp11[0]);
+
+ if (adt7461_read_reg(client, ADT7461_REG_R_REMOTE_LOWH, &h) == 0) {
+ data->temp11[1] = h << 8;
+ if (adt7461_read_reg(client, ADT7461_REG_R_REMOTE_LOWL, &l) == 0)
+ data->temp11[1] |= l;
+ }
+ if (adt7461_read_reg(client, ADT7461_REG_R_REMOTE_HIGHH, &h) == 0) {
+ data->temp11[2] = h << 8;
+ if (adt7461_read_reg(client, ADT7461_REG_R_REMOTE_HIGHL, &l) == 0)
+ data->temp11[2] |= l;
+ }
+
+ if (adt7461_read_reg(client, ADT7461_REG_R_REMOTE_OFFSH,
+ &h) == 0
+ && adt7461_read_reg(client, ADT7461_REG_R_REMOTE_OFFSL,
+ &l) == 0)
+ data->temp11[3] = (h << 8) | l;
+ adt7461_read_reg(client, ADT7461_REG_R_STATUS, &data->alarms);
+
+ /* Re-enable ALERT# output if relevant alarms are all clear */
+ if (!(data->flags & ADT7461_FLAG_THERM2)
+ && (data->alarms & data->alert_alarms) == 0) {
+ u8 config;
+
+ adt7461_read_reg(client, ADT7461_REG_R_CONFIG1, &config);
+ if (config & 0x80) {
+ pr_err("adt7461_update_device:Re-enabling ALERT#\n");
+ i2c_smbus_write_byte_data(client,
+ ADT7461_REG_W_CONFIG1,
+ config & ~ALERT_BIT);
+ }
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+/*
+ * Sysfs stuff
+ */
+
+static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct adt7461_data *data = adt7461_update_device(dev);
+ int temp;
+
+ temp = temp_from_u8(data, data->temp8[attr->index]);
+
+ return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ static const u8 reg[4] = {
+ ADT7461_REG_W_LOCAL_LOW,
+ ADT7461_REG_W_LOCAL_HIGH,
+ ADT7461_REG_W_LOCAL_CRIT,
+ ADT7461_REG_W_REMOTE_CRIT,
+ };
+
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7461_data *data = i2c_get_clientdata(client);
+ long val = simple_strtol(buf, NULL, 10);
+ int nr = attr->index;
+
+ mutex_lock(&data->update_lock);
+ data->temp8[nr] = temp_to_u8(data, val);
+ i2c_smbus_write_byte_data(client, reg[nr], data->temp8[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct adt7461_data *data = adt7461_update_device(dev);
+ int temp;
+
+ temp = temp_from_u16(data, data->temp11[attr->index]);
+
+ return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ static const u8 reg[6] = {
+ ADT7461_REG_W_REMOTE_LOWH,
+ ADT7461_REG_W_REMOTE_LOWL,
+ ADT7461_REG_W_REMOTE_HIGHH,
+ ADT7461_REG_W_REMOTE_HIGHL,
+ ADT7461_REG_W_REMOTE_OFFSH,
+ ADT7461_REG_W_REMOTE_OFFSL,
+ };
+
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7461_data *data = i2c_get_clientdata(client);
+ long val = simple_strtol(buf, NULL, 10);
+ int nr = attr->index;
+
+ mutex_lock(&data->update_lock);
+ data->temp11[nr] = temp_to_u16(data, val);
+
+ i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
+ data->temp11[nr] >> 8);
+ i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
+ data->temp11[nr] & 0xff);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temphyst(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct adt7461_data *data = adt7461_update_device(dev);
+ int temp;
+
+ temp = temp_from_u8(data, data->temp8[attr->index]);
+
+ return sprintf(buf, "%d\n", temp - temp_from_s8(data->temp_hyst));
+}
+
+static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7461_data *data = i2c_get_clientdata(client);
+ long val = simple_strtol(buf, NULL, 10);
+ int temp;
+
+ mutex_lock(&data->update_lock);
+ temp = temp_from_u8(data, data->temp8[2]);
+ data->temp_hyst = hyst_to_reg(temp - val);
+ i2c_smbus_write_byte_data(client, ADT7461_REG_W_TCRIT_HYST,
+ data->temp_hyst);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
+ char *buf)
+{
+ struct adt7461_data *data = adt7461_update_device(dev);
+ return sprintf(buf, "%d\n", data->alarms);
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct adt7461_data *data = adt7461_update_device(dev);
+ int bitnr = attr->index;
+
+ return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp11, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 0);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 1);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 2);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 2);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 3);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temphyst,
+ set_temphyst, 2);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 3);
+
+/* Individual alarm files */
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
+/* Raw alarm file for compatibility */
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+
+static struct attribute *adt7461_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &dev_attr_alarms.attr,
+ NULL
+};
+
+static const struct attribute_group adt7461_group = {
+ .attrs = adt7461_attributes,
+};
+
+static void adt7461_work_func(struct work_struct *work)
+{
+ struct adt7461_data *data =
+ container_of(work, struct adt7461_data, work);
+ int irq = data->client->irq;
+
+ if (data->alarm_fn) {
+ /* Therm2 line is active low */
+ data->alarm_fn(!gpio_get_value(irq_to_gpio(irq)));
+ }
+}
+
+static irqreturn_t adt7461_irq(int irq, void *dev_id)
+{
+ struct adt7461_data *data = dev_id;
+ schedule_work(&data->work);
+
+ return IRQ_HANDLED;
+}
+
+static void adt7461_regulator_enable(struct i2c_client *client)
+{
+ struct adt7461_data *data = i2c_get_clientdata(client);
+
+ data->regulator = regulator_get(NULL, "vdd_vcore_temp");
+ if (IS_ERR_OR_NULL(data->regulator)) {
+ pr_err("adt7461_regulator_enable:Couldn't get regulator vdd_vcore_temp\n");
+ data->regulator = NULL;
+ } else {
+ regulator_enable(data->regulator);
+ /* Optimal time to get the regulator turned on
+ * before initializing adt7461 chip*/
+ mdelay(5);
+ }
+}
+
+static void adt7461_regulator_disable(struct i2c_client *client)
+{
+ struct adt7461_data *data = i2c_get_clientdata(client);
+ struct regulator *adt7461_reg = data->regulator;
+ int ret;
+
+ if (adt7461_reg) {
+ ret = regulator_is_enabled(adt7461_reg);
+ if (ret > 0)
+ regulator_disable(adt7461_reg);
+ regulator_put(adt7461_reg);
+ }
+ data->regulator = NULL;
+}
+
+static void adt7461_enable(struct i2c_client *client)
+{
+ struct adt7461_data *data = i2c_get_clientdata(client);
+
+ i2c_smbus_write_byte_data(client, ADT7461_REG_W_CONFIG1,
+ data->config & ~STANDBY_BIT);
+}
+
+static void adt7461_disable(struct i2c_client *client)
+{
+ struct adt7461_data *data = i2c_get_clientdata(client);
+
+ i2c_smbus_write_byte_data(client, ADT7461_REG_W_CONFIG1,
+ data->config | STANDBY_BIT);
+}
+
+static int adt7461_init_client(struct i2c_client *client)
+{
+ struct adt7461_data *data = i2c_get_clientdata(client);
+ struct adt7461_platform_data *pdata = client->dev.platform_data;
+ u8 config = 0;
+ u8 value;
+ int err;
+
+ if (!pdata || !pdata->supported_hwrev)
+ return -ENODEV;
+
+ if (pdata->therm2)
+ data->flags |= ADT7461_FLAG_THERM2;
+
+ if (pdata->ext_range)
+ data->flags |= ADT7461_FLAG_ADT7461_EXT;
+
+ adt7461_regulator_enable(client);
+
+ /* Start the conversions. */
+ err = i2c_smbus_write_byte_data(client, ADT7461_REG_W_CONVRATE,
+ pdata->conv_rate);
+ if (err < 0)
+ goto error;
+
+ /* External temperature h/w shutdown limit */
+ value = temp_to_u8(data, pdata->shutdown_ext_limit * 1000);
+ err = i2c_smbus_write_byte_data(client,
+ ADT7461_REG_W_REMOTE_CRIT, value);
+ if (err < 0)
+ goto error;
+
+ /* Local temperature h/w shutdown limit */
+ value = temp_to_u8(data, pdata->shutdown_local_limit * 1000);
+ err = i2c_smbus_write_byte_data(client, ADT7461_REG_W_LOCAL_CRIT,
+ value);
+ if (err < 0)
+ goto error;
+
+ /* External Temperature Throttling limit */
+ value = temp_to_u8(data, pdata->throttling_ext_limit * 1000);
+ err = i2c_smbus_write_byte_data(client, ADT7461_REG_W_REMOTE_HIGHH,
+ value);
+ if (err < 0)
+ goto error;
+
+ /* Local Temperature Throttling limit */
+ value = (data->flags & ADT7461_FLAG_ADT7461_EXT) ?
+ EXTENDED_RANGE_MAX : STANDARD_RANGE_MAX;
+ err = i2c_smbus_write_byte_data(client, ADT7461_REG_W_LOCAL_HIGH,
+ value);
+ if (err < 0)
+ goto error;
+
+ /* Remote channel offset */
+ err = i2c_smbus_write_byte_data(client, ADT7461_REG_W_REMOTE_OFFSH,
+ pdata->offset);
+ if (err < 0)
+ goto error;
+
+ /* THERM hysteresis */
+ err = i2c_smbus_write_byte_data(client, ADT7461_REG_W_TCRIT_HYST,
+ pdata->hysteresis);
+ if (err < 0)
+ goto error;
+
+ if (data->flags & ADT7461_FLAG_THERM2) {
+ data->alarm_fn = pdata->alarm_fn;
+ config = (THERM2_BIT | STANDBY_BIT);
+ } else {
+ config = (~ALERT_BIT & ~THERM2_BIT & STANDBY_BIT);
+ }
+
+ err = i2c_smbus_write_byte_data(client, ADT7461_REG_W_CONFIG1, config);
+ if (err < 0)
+ goto error;
+
+ data->config = config;
+ return 0;
+
+error:
+ pr_err("adt7461_init_client:Initialization failed!\n");
+ return err;
+}
+
+static int adt7461_init_irq(struct adt7461_data *data)
+{
+ INIT_WORK(&data->work, adt7461_work_func);
+
+ return request_irq(data->client->irq, adt7461_irq, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING, DRIVER_NAME, data);
+}
+
+static int adt7461_probe(struct i2c_client *new_client,
+ const struct i2c_device_id *id)
+{
+ struct adt7461_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct adt7461_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = new_client;
+ i2c_set_clientdata(new_client, data);
+ mutex_init(&data->update_lock);
+
+ data->alert_alarms = 0x7c;
+
+ /* Initialize the ADT7461 chip */
+ err = adt7461_init_client(new_client);
+ if (err < 0)
+ goto exit_free;
+
+ if (data->flags & ADT7461_FLAG_THERM2) {
+ err = adt7461_init_irq(data);
+ if (err < 0)
+ goto exit_free;
+ }
+
+ /* Register sysfs hooks */
+ if ((err = sysfs_create_group(&new_client->dev.kobj, &adt7461_group)))
+ goto exit_free;
+ if ((err = device_create_file(&new_client->dev,
+ &sensor_dev_attr_temp2_offset.dev_attr)))
+ goto exit_remove_files;
+
+ data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove_files;
+ }
+
+ adt7461_enable(new_client);
+ return 0;
+
+exit_remove_files:
+ sysfs_remove_group(&new_client->dev.kobj, &adt7461_group);
+exit_free:
+ kfree(data);
+ return err;
+}
+
+static int adt7461_remove(struct i2c_client *client)
+{
+ struct adt7461_data *data = i2c_get_clientdata(client);
+
+ free_irq(client->irq, data);
+ cancel_work_sync(&data->work);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &adt7461_group);
+ device_remove_file(&client->dev,
+ &sensor_dev_attr_temp2_offset.dev_attr);
+ adt7461_regulator_disable(client);
+
+ kfree(data);
+ return 0;
+}
+
+static void adt7461_alert(struct i2c_client *client, unsigned int flag)
+{
+ struct adt7461_data *data = i2c_get_clientdata(client);
+ u8 config, alarms;
+
+ adt7461_read_reg(client, ADT7461_REG_R_STATUS, &alarms);
+ if ((alarms & 0x7f) == 0) {
+ pr_err("adt7461_alert:Everything OK\n");
+ } else {
+ if (alarms & 0x61)
+ pr_err("adt7461_alert:temp%d out of range, please check!\n", 1);
+ if (alarms & 0x1a)
+ pr_err("adt7461_alert:temp%d out of range, please check!\n", 2);
+ if (alarms & 0x04)
+ pr_err("adt7461_alert:temp%d diode open, please check!\n", 2);
+
+ /* Disable ALERT# output, because these chips don't implement
+ SMBus alert correctly; they should only hold the alert line
+ low briefly. */
+ if (!(data->flags & ADT7461_FLAG_THERM2)
+ && (alarms & data->alert_alarms)) {
+ pr_err("adt7461_alert:Disabling ALERT#\n");
+ adt7461_read_reg(client, ADT7461_REG_R_CONFIG1, &config);
+ i2c_smbus_write_byte_data(client, ADT7461_REG_W_CONFIG1,
+ config | ALERT_BIT);
+ }
+ }
+}
+
+#ifdef CONFIG_PM
+static int adt7461_suspend(struct i2c_client *client, pm_message_t state)
+{
+ disable_irq(client->irq);
+ adt7461_disable(client);
+
+ return 0;
+}
+
+static int adt7461_resume(struct i2c_client *client)
+{
+ adt7461_enable(client);
+ enable_irq(client->irq);
+
+ return 0;
+}
+#endif
+
+/*
+ * Driver data
+ */
+static const struct i2c_device_id adt7461_id[] = {
+ { DRIVER_NAME, 0 },
+};
+
+MODULE_DEVICE_TABLE(i2c, adt7461_id);
+
+static struct i2c_driver adt7461_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = adt7461_probe,
+ .remove = adt7461_remove,
+ .alert = adt7461_alert,
+ .id_table = adt7461_id,
+#ifdef CONFIG_PM
+ .suspend = adt7461_suspend,
+ .resume = adt7461_resume,
+#endif
+};
+
+static int __init sensors_adt7461_init(void)
+{
+ return i2c_add_driver(&adt7461_driver);
+}
+
+static void __exit sensors_adt7461_exit(void)
+{
+ i2c_del_driver(&adt7461_driver);
+}
+
+MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_DESCRIPTION("ADT7461 driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_adt7461_init);
+module_exit(sensors_adt7461_exit);
diff --git a/drivers/hwmon/ina219.c b/drivers/hwmon/ina219.c
new file mode 100644
index 000000000000..cc5b85fdcf84
--- /dev/null
+++ b/drivers/hwmon/ina219.c
@@ -0,0 +1,414 @@
+/*
+ * ina219.c - driver for TI INA219 current / power monitor sensor
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * The INA219 is a sensor chip made by Texas Instruments. It measures
+ * power, voltage and current on a power rail.
+ * Complete datasheet can be obtained from website:
+ * http://focus.ti.com/lit/ds/symlink/ina219.pdf
+ *
+ * This program is free software. you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include "linux/ina219.h"
+#include <linux/init.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+
+#define DRIVER_NAME "ina219"
+
+/* INA219 register offsets */
+#define INA219_CONFIG 0
+#define INA219_SHUNT 1
+#define INA219_VOLTAGE 2
+#define INA219_POWER 3
+#define INA219_CURRENT 4
+#define INA219_CAL 5
+
+/*
+ INA219 Sensor defines
+ Config info for ina219s
+ D15 D14 D13 D12 D11 D10 D9 D8 D7 D6 D5 D4 D3 D2 D1 D0
+ rst BRNG PG1 PG0 BADC4 .-.BADC1 SADC4 - SADC1 MODE3 - MODE1
+ reset D15
+ bus_range=0 (d13)
+ pga_gain=0 (D12:D11)
+ bus_adc_setting=0x3 (d10:D7) 12-bit w/o oversampling (532uS)
+ shunt_adc_setting=0xb (D6:D3) 8x oversampling (4.26ms)
+ mode=0x7 (D2:D0) continuous shunt & bus
+*/
+#define INA219_CONFIG_DATA 0x1df
+#define INA219_RESET 0x8000
+
+struct power_mon_data {
+ s32 voltage;
+ s32 currentInMillis;
+ s32 power;
+};
+
+struct ina219_data {
+ struct device *hwmon_dev;
+ struct i2c_client *client;
+ struct ina219_platform_data *pInfo;
+ struct power_mon_data pm_data;
+ struct mutex mutex;
+};
+
+/* Set non-zero to enable debug prints */
+#define INA219_DEBUG_PRINTS 0
+
+#if INA219_DEBUG_PRINTS
+#define DEBUG_INA219(x) printk x
+#else
+#define DEBUG_INA219(x)
+#endif
+
+static s16 reorder_bytes(s16 a)
+{
+ s16 ret = ((a >> 8) & 0xff) | ((a & 0xff) << 8);
+ return ret;
+}
+
+/* set ina219 to power down mode */
+static s32 power_down_INA219(struct i2c_client *client)
+{
+ s32 retval;
+ retval = i2c_smbus_write_word_data(client, INA219_CONFIG, 0);
+ if (retval < 0)
+ dev_err(&client->dev, "power down failure sts: 0x%x\n", retval);
+ return retval;
+}
+
+static s32 show_rail_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina219_data *data = i2c_get_clientdata(client);
+ return sprintf(buf, "%s\n", data->pInfo->rail_name);
+}
+
+static s32 show_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina219_data *data = i2c_get_clientdata(client);
+ s32 retval;
+ s32 voltage_mV;
+
+ /* fill config data */
+ retval = i2c_smbus_write_word_data(client, INA219_CONFIG,
+ reorder_bytes(INA219_CONFIG_DATA));
+ if (retval < 0) {
+ dev_err(dev, "config data write failed sts: 0x%x\n", retval);
+ goto error;
+ }
+
+ /* fill calibration data */
+ retval = i2c_smbus_write_word_data(client, INA219_CAL,
+ reorder_bytes(data->pInfo->calibration_data));
+ if (retval < 0) {
+ dev_err(dev, "calib data write failed sts: 0x%x\n", retval);
+ goto error;
+ }
+
+ /* getting voltage readings in milli volts*/
+ voltage_mV =
+ reorder_bytes(i2c_smbus_read_word_data(client,
+ INA219_VOLTAGE));
+ DEBUG_INA219(("Ina219 voltage reg Value: 0x%x\n", voltage_mV));
+ if (voltage_mV < 0)
+ goto error;
+ voltage_mV = voltage_mV >> 1;
+ DEBUG_INA219(("Ina219 voltage in mv: %d\n", voltage_mV));
+
+ /* set ina219 to power down mode */
+ retval = power_down_INA219(client);
+ if (retval < 0)
+ goto error;
+
+ DEBUG_INA219(("%s volt = %d\n", __func__, voltage_mV));
+ return sprintf(buf, "%d mV\n", voltage_mV);
+error:
+ dev_err(dev, "%s: failed\n", __func__);
+ return retval;
+}
+
+
+static s32 show_power(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina219_data *data = i2c_get_clientdata(client);
+ s32 retval;
+ s32 power_mW;
+ s32 voltage_mV;
+ s32 overflow, conversion;
+
+ /* fill config data */
+ retval = i2c_smbus_write_word_data(client, INA219_CONFIG,
+ reorder_bytes(INA219_CONFIG_DATA));
+ if (retval < 0) {
+ dev_err(dev, "config data write failed sts: 0x%x\n", retval);
+ goto error;
+ }
+
+ /* fill calib data */
+ retval = i2c_smbus_write_word_data(client, INA219_CAL,
+ reorder_bytes(data->pInfo->calibration_data));
+ if (retval < 0) {
+ dev_err(dev, "calibration data write failed sts: 0x%x\n",
+ retval);
+ goto error;
+ }
+
+ /* check if the readings are valid */
+ do {
+ /* read power register to clear conversion bit */
+ retval = reorder_bytes(i2c_smbus_read_word_data(client,
+ INA219_POWER));
+ if (retval < 0) {
+ dev_err(dev, "CNVR bit clearing failure sts: 0x%x\n",
+ retval);
+ goto error;
+ }
+
+ voltage_mV =
+ reorder_bytes(i2c_smbus_read_word_data(client,
+ INA219_VOLTAGE));
+ DEBUG_INA219(("Ina219 voltage reg Value: 0x%x\n", voltage_mV));
+ overflow = voltage_mV & 1;
+ if (overflow) {
+ dev_err(dev, "overflow error\n");
+ return 0;
+ }
+ conversion = (voltage_mV >> 1) & 1;
+ DEBUG_INA219(("\n ina219 CNVR value:%d", conversion));
+ } while (!conversion);
+
+ /* getting power readings in milli watts*/
+ power_mW = reorder_bytes(i2c_smbus_read_word_data(client,
+ INA219_POWER));
+ DEBUG_INA219(("Ina219 power Reg: 0x%x\n", power_mW));
+ power_mW *= data->pInfo->power_lsb;
+ DEBUG_INA219(("Ina219 power Val: %d\n", power_mW));
+ if (power_mW < 0)
+ goto error;
+
+ /* set ina219 to power down mode */
+ retval = power_down_INA219(client);
+ if (retval < 0)
+ goto error;
+
+ DEBUG_INA219(("%s pow = %d\n", __func__, power_mW));
+ return sprintf(buf, "%d mW\n", power_mW);
+error:
+ dev_err(dev, "%s: failed\n", __func__);
+ return retval;
+}
+
+static s32 show_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina219_data *data = i2c_get_clientdata(client);
+ s32 retval;
+ s32 current_mA;
+ s32 voltage_mV;
+ s32 overflow, conversion;
+
+ /* fill config data */
+ retval = i2c_smbus_write_word_data(client, INA219_CONFIG,
+ reorder_bytes(INA219_CONFIG_DATA));
+ if (retval < 0) {
+ dev_err(dev, "config data write failed sts: 0x%x\n", retval);
+ goto error;
+ }
+
+ /* fill calib data */
+ retval = i2c_smbus_write_word_data(client, INA219_CAL,
+ reorder_bytes(data->pInfo->calibration_data));
+ if (retval < 0) {
+ dev_err(dev, "calibration data write failed sts: 0x%x\n",
+ retval);
+ goto error;
+ }
+
+ /* check if the readings are valid */
+ do {
+ /* read power register to clear conversion bit */
+ retval = reorder_bytes(i2c_smbus_read_word_data(client,
+ INA219_POWER));
+ if (retval < 0) {
+ dev_err(dev, "CNVR bit clearing failure sts: 0x%x\n",
+ retval);
+ goto error;
+ }
+
+ voltage_mV =
+ reorder_bytes(i2c_smbus_read_word_data(client,
+ INA219_VOLTAGE));
+ DEBUG_INA219(("Ina219 voltage reg Value: 0x%x\n", voltage_mV));
+ overflow = voltage_mV & 1;
+ if (overflow) {
+ dev_err(dev, "overflow error\n");
+ return 0;
+ }
+ conversion = (voltage_mV >> 1) & 1;
+ DEBUG_INA219(("\n ina219 CNVR value:%d", conversion));
+ } while (!conversion);
+
+ /* getting current readings in milli amps*/
+ current_mA = reorder_bytes(i2c_smbus_read_word_data(client,
+ INA219_CURRENT));
+ DEBUG_INA219(("Ina219 current Reg: 0x%x\n", current_mA));
+ if (current_mA < 0)
+ goto error;
+ current_mA =
+ (current_mA * data->pInfo->power_lsb) / data->pInfo->divisor;
+ DEBUG_INA219(("Ina219 current Value: %d\n", current_mA));
+
+ /* set ina219 to power down mode */
+ retval = power_down_INA219(client);
+ if (retval < 0)
+ goto error;
+
+
+ DEBUG_INA219(("%s current = %d\n", __func__, current_mA));
+ return sprintf(buf, "%d mA\n", current_mA);
+error:
+ dev_err(dev, "%s: failed\n", __func__);
+ return retval;
+}
+
+static struct sensor_device_attribute ina219[] = {
+ SENSOR_ATTR(rail_name, S_IRUGO, show_rail_name, NULL, 0),
+ SENSOR_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 0),
+ SENSOR_ATTR(curr1_input, S_IRUGO, show_current, NULL, 0),
+ SENSOR_ATTR(power1_input, S_IRUGO, show_power, NULL, 0),
+};
+
+static int __devinit ina219_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ina219_data *data;
+ int err;
+ u8 i;
+ data = kzalloc(sizeof(struct ina219_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ data->pInfo = client->dev.platform_data;
+ mutex_init(&data->mutex);
+ /* reset ina219 */
+ err = i2c_smbus_write_word_data(client, INA219_CONFIG,
+ reorder_bytes(INA219_RESET));
+ if (err < 0) {
+ dev_err(&client->dev, "ina219 reset failure status: 0x%x\n",
+ err);
+ goto exit_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ina219); i++) {
+ err = device_create_file(&client->dev, &ina219[i].dev_attr);
+ if (err) {
+ dev_err(&client->dev, "device_create_file failed.\n");
+ goto exit_free;
+ }
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove;
+ }
+
+ /* set ina219 to power down mode */
+ err = power_down_INA219(client);
+ if (err < 0)
+ goto exit_remove;
+
+ return 0;
+
+exit_remove:
+ for (i = 0; i < ARRAY_SIZE(ina219); i++)
+ device_remove_file(&client->dev, &ina219[i].dev_attr);
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int __devexit ina219_remove(struct i2c_client *client)
+{
+ u8 i;
+ struct ina219_data *data = i2c_get_clientdata(client);
+ hwmon_device_unregister(data->hwmon_dev);
+ for (i = 0; i < ARRAY_SIZE(ina219); i++)
+ device_remove_file(&client->dev, &ina219[i].dev_attr);
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id ina219_id[] = {
+ {DRIVER_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ina219_id);
+
+static struct i2c_driver ina219_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = ina219_probe,
+ .remove = __devexit_p(ina219_remove),
+ .id_table = ina219_id,
+};
+
+static int __init ina219_init(void)
+{
+ return i2c_add_driver(&ina219_driver);
+}
+
+static void __exit ina219_exit(void)
+{
+ i2c_del_driver(&ina219_driver);
+}
+
+module_init(ina219_init);
+module_exit(ina219_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ina230.c b/drivers/hwmon/ina230.c
new file mode 100644
index 000000000000..3591a9463108
--- /dev/null
+++ b/drivers/hwmon/ina230.c
@@ -0,0 +1,561 @@
+/*
+ * ina230.c - driver for TI INA230 current / power monitor sensor
+ * (also compatible with TI INA226)
+ *
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ *
+ * The INA230(/INA226) is a sensor chip made by Texas Instruments. It measures
+ * power, voltage and current on a power rail and provides an alert on
+ * over voltage/power
+ * Complete datasheet can be obtained from ti.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/platform_data/ina230.h>
+#include <linux/init.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/cpu.h>
+
+
+#define DRIVER_NAME "ina230"
+#define MEASURE_BUS_VOLT 0
+
+/* ina230 (/ ina226)register offsets */
+#define INA230_CONFIG 0
+#define INA230_SHUNT 1
+#define INA230_VOLTAGE 2
+#define INA230_POWER 3
+#define INA230_CURRENT 4
+#define INA230_CAL 5
+#define INA230_MASK 6
+#define INA230_ALERT 7
+
+/*
+Config register for ina230 (/ ina226):
+D15|D14 D13 D12|D11 D10 D09|D08 D07 D06|D05 D04 D03|D02 D01 D00
+rst|- - - |AVG |Vbus_CT |Vsh_CT |MODE
+*/
+#define INA230_RESET (1 << 15)
+#define INA230_AVG (0 << 9) /* 0 Averages */
+#define INA230_VBUS_CT (0 << 6) /* Vbus 140us conversion time */
+#define INA230_VSH_CT (0 << 3) /* Vshunt 140us conversion time */
+
+#if MEASURE_BUS_VOLT
+#define INA230_CONT_MODE 5 /* Continuous Shunt measurement */
+#define INA230_TRIG_MODE 1 /* Triggered Shunt measurement */
+#else
+#define INA230_CONT_MODE 7 /* Continuous Bus and shunt measure */
+#define INA230_TRIG_MODE 3 /* Triggered Bus and shunt measure */
+#endif
+
+#define INA230_POWER_DOWN 0
+#define INA230_CONT_CONFIG (INA230_AVG | INA230_VBUS_CT | \
+ INA230_VSH_CT | INA230_CONT_MODE)
+#define INA230_TRIG_CONFIG (INA230_AVG | INA230_VBUS_CT | \
+ INA230_VSH_CT | INA230_TRIG_MODE)
+
+/*
+Mask register for ina230 (/ina 226):
+D15|D14|D13|D12|D11 D10 D09 D08 D07 D06 D05 D04 D03 D02 D01 D00
+SOL|SUL|BOL|BUL|POL|CVR|- - - - - |AFF|CVF|OVF|APO|LEN
+*/
+#define INA230_MASK_SOL (1 << 15)
+#define INA230_MASK_SUL (1 << 14)
+
+
+struct ina230_data {
+ struct device *hwmon_dev;
+ struct i2c_client *client;
+ struct ina230_platform_data *pdata;
+ struct mutex mutex;
+ bool running;
+ struct notifier_block nb;
+};
+
+
+/* bus voltage resolution: 1.25mv */
+#define busv_register_to_mv(x) (((x) * 5) >> 2)
+
+/* shunt voltage resolution: 2.5uv */
+#define shuntv_register_to_uv(x) (((x) * 5) >> 1)
+#define uv_to_alert_register(x) (((x) << 1) / 5)
+
+
+
+static s32 ensure_enabled_start(struct i2c_client *client)
+{
+ struct ina230_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ if (data->running)
+ return 0;
+
+ retval = i2c_smbus_write_word_data(client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_TRIG_CONFIG));
+ if (retval < 0)
+ dev_err(&client->dev, "config data write failed sts: 0x%x\n",
+ retval);
+
+ return retval;
+}
+
+
+static void ensure_enabled_end(struct i2c_client *client)
+{
+ struct ina230_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ if (data->running)
+ return;
+
+ retval = i2c_smbus_write_word_data(client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_POWER_DOWN));
+ if (retval < 0)
+ dev_err(&client->dev, "power down failure sts: 0x%x\n",
+ retval);
+}
+
+
+static s32 __locked_power_down_ina230(struct i2c_client *client)
+{
+ s32 retval;
+ struct ina230_data *data = i2c_get_clientdata(client);
+
+ if (!data->running)
+ return 0;
+
+ retval = i2c_smbus_write_word_data(client, INA230_MASK, 0);
+ if (retval < 0)
+ dev_err(&client->dev, "mask write failure sts: 0x%x\n",
+ retval);
+
+ retval = i2c_smbus_write_word_data(client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_POWER_DOWN));
+ if (retval < 0)
+ dev_err(&client->dev, "power down failure sts: 0x%x\n",
+ retval);
+
+ data->running = false;
+
+ return retval;
+}
+
+
+static s32 power_down_ina230(struct i2c_client *client)
+{
+ s32 retval;
+ struct ina230_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->mutex);
+ retval = __locked_power_down_ina230(client);
+ mutex_unlock(&data->mutex);
+
+ return retval;
+}
+
+
+static s32 __locked_start_current_mon(struct i2c_client *client)
+{
+ s32 retval;
+ s16 shunt_limit;
+ s16 alert_mask;
+ struct ina230_data *data = i2c_get_clientdata(client);
+
+ if (!data->pdata->current_threshold) {
+ dev_err(&client->dev, "no current threshold specified\n");
+ return -EINVAL;
+ }
+
+ retval = i2c_smbus_write_word_data(client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_CONT_CONFIG));
+ if (retval < 0) {
+ dev_err(&client->dev, "config data write failed sts: 0x%x\n",
+ retval);
+ return retval;
+ }
+
+ shunt_limit = uv_to_alert_register(data->pdata->resistor *
+ data->pdata->current_threshold);
+
+ retval = i2c_smbus_write_word_data(client, INA230_ALERT,
+ cpu_to_be16(shunt_limit));
+ if (retval < 0) {
+ dev_err(&client->dev, "alert data write failed sts: 0x%x\n",
+ retval);
+ return retval;
+ }
+
+ alert_mask = shunt_limit >= 0 ? INA230_MASK_SOL : INA230_MASK_SUL;
+ retval = i2c_smbus_write_word_data(client, INA230_MASK,
+ cpu_to_be16(alert_mask));
+ if (retval < 0) {
+ dev_err(&client->dev, "mask data write failed sts: 0x%x\n",
+ retval);
+ return retval;
+ }
+
+ data->running = true;
+
+ return 0;
+}
+
+
+static void __locked_evaluate_state(struct i2c_client *client)
+{
+ struct ina230_data *data = i2c_get_clientdata(client);
+ int cpus = num_online_cpus();
+
+ if (data->running) {
+ if (cpus < data->pdata->min_cores_online ||
+ !data->pdata->current_threshold)
+ __locked_power_down_ina230(client);
+ } else {
+ if (cpus >= data->pdata->min_cores_online &&
+ data->pdata->current_threshold)
+ __locked_start_current_mon(client);
+ }
+}
+
+
+static void evaluate_state(struct i2c_client *client)
+{
+ struct ina230_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->mutex);
+ __locked_evaluate_state(client);
+ mutex_unlock(&data->mutex);
+}
+
+
+static s32 show_rail_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina230_data *data = i2c_get_clientdata(client);
+ return sprintf(buf, "%s\n", data->pdata->rail_name);
+}
+
+
+static s32 show_current_threshold(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina230_data *data = i2c_get_clientdata(client);
+ return sprintf(buf, "%d mA\n", data->pdata->current_threshold);
+}
+
+
+static s32 set_current_threshold(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina230_data *data = i2c_get_clientdata(client);
+ s32 retval;
+
+ mutex_lock(&data->mutex);
+
+ if (strict_strtol(buf, 10, (long *)&(data->pdata->current_threshold))) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (data->pdata->current_threshold) {
+ if (data->running) {
+ /* force restart */
+ retval = __locked_start_current_mon(client);
+ } else {
+ __locked_evaluate_state(client);
+ retval = 0;
+ }
+ } else {
+ retval = __locked_power_down_ina230(client);
+ }
+
+out:
+ mutex_unlock(&data->mutex);
+ if (retval >= 0)
+ return count;
+ return retval;
+}
+
+
+
+
+#if MEASURE_BUS_VOLT
+static s32 show_bus_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina230_data *data = i2c_get_clientdata(client);
+ s32 voltage_mV;
+ int retval;
+
+ mutex_lock(&data->mutex);
+ retval = ensure_enabled_start(client);
+ if (retval < 0) {
+ mutex_unlock(&data->mutex);
+ return retval;
+ }
+
+ /* getting voltage readings in milli volts*/
+ voltage_mV =
+ (s16)be16_to_cpu(i2c_smbus_read_word_data(client,
+ INA230_VOLTAGE));
+
+ ensure_enabled_end(client);
+ mutex_unlock(&data->mutex);
+
+ if (voltage_mV < 0) {
+ dev_err(dev, "%s: failed\n", __func__);
+ return -1;
+ }
+
+ voltage_mV = busv_register_to_mv(voltage_mV);
+
+ return sprintf(buf, "%d mV\n", voltage_mV);
+}
+#endif
+
+
+
+
+static s32 show_shunt_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina230_data *data = i2c_get_clientdata(client);
+ s32 voltage_uV;
+ int retval;
+
+ mutex_lock(&data->mutex);
+ retval = ensure_enabled_start(client);
+ if (retval < 0) {
+ mutex_unlock(&data->mutex);
+ return retval;
+ }
+
+ voltage_uV =
+ (s16)be16_to_cpu(i2c_smbus_read_word_data(client,
+ INA230_SHUNT));
+
+ ensure_enabled_end(client);
+ mutex_unlock(&data->mutex);
+
+ voltage_uV = shuntv_register_to_uv(voltage_uV);
+
+ return sprintf(buf, "%d uV\n", voltage_uV);
+}
+
+
+static s32 show_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina230_data *data = i2c_get_clientdata(client);
+ s32 voltage_uV;
+ s32 current_mA;
+ int retval;
+
+ mutex_lock(&data->mutex);
+ retval = ensure_enabled_start(client);
+ if (retval < 0) {
+ mutex_unlock(&data->mutex);
+ return retval;
+ }
+
+ voltage_uV =
+ (s16)be16_to_cpu(i2c_smbus_read_word_data(client,
+ INA230_SHUNT));
+
+ ensure_enabled_end(client);
+ mutex_unlock(&data->mutex);
+
+ voltage_uV = shuntv_register_to_uv(voltage_uV);
+ current_mA = voltage_uV / data->pdata->resistor;
+
+ return sprintf(buf, "%d mA\n", current_mA);
+}
+
+
+static int ina230_hotplug_notify(struct notifier_block *nb, unsigned long event,
+ void *hcpu)
+{
+ struct ina230_data *data = container_of(nb, struct ina230_data,
+ nb);
+ struct i2c_client *client = data->client;
+
+ if (event == CPU_ONLINE || event == CPU_DEAD)
+ evaluate_state(client);
+
+ return 0;
+}
+
+
+
+static struct sensor_device_attribute ina230[] = {
+ SENSOR_ATTR(rail_name, S_IRUGO, show_rail_name, NULL, 0),
+ SENSOR_ATTR(current_threshold, S_IWUSR | S_IRUGO,
+ show_current_threshold, set_current_threshold, 0),
+ SENSOR_ATTR(shuntvolt1_input, S_IRUGO, show_shunt_voltage, NULL, 0),
+ SENSOR_ATTR(current1_input, S_IRUGO, show_current, NULL, 0),
+#if MEASURE_BUS_VOLT
+ SENSOR_ATTR(busvolt1_input, S_IRUGO, show_bus_voltage, NULL, 0),
+#endif
+};
+
+
+static int __devinit ina230_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ina230_data *data;
+ int err;
+ u8 i;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct ina230_data),
+ GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ data->pdata = client->dev.platform_data;
+ data->running = false;
+ data->nb.notifier_call = ina230_hotplug_notify;
+ data->client = client;
+ mutex_init(&data->mutex);
+
+ err = i2c_smbus_write_word_data(client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_RESET));
+ if (err < 0) {
+ dev_err(&client->dev, "ina230 reset failure status: 0x%x\n",
+ err);
+ goto exit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ina230); i++) {
+ err = device_create_file(&client->dev, &ina230[i].dev_attr);
+ if (err) {
+ dev_err(&client->dev, "device_create_file failed.\n");
+ goto exit_remove;
+ }
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove;
+ }
+
+ register_hotcpu_notifier(&(data->nb));
+
+ evaluate_state(client);
+
+ return 0;
+
+exit_remove:
+ for (i = 0; i < ARRAY_SIZE(ina230); i++)
+ device_remove_file(&client->dev, &ina230[i].dev_attr);
+exit:
+ return err;
+}
+
+
+static int __devexit ina230_remove(struct i2c_client *client)
+{
+ u8 i;
+ struct ina230_data *data = i2c_get_clientdata(client);
+ unregister_hotcpu_notifier(&(data->nb));
+ power_down_ina230(client);
+ hwmon_device_unregister(data->hwmon_dev);
+ for (i = 0; i < ARRAY_SIZE(ina230); i++)
+ device_remove_file(&client->dev, &ina230[i].dev_attr);
+ return 0;
+}
+
+
+static int ina230_suspend(struct i2c_client *client)
+{
+ return power_down_ina230(client);
+}
+
+
+static int ina230_resume(struct i2c_client *client)
+{
+ evaluate_state(client);
+ return 0;
+}
+
+
+static const struct i2c_device_id ina230_id[] = {
+ {DRIVER_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ina230_id);
+
+
+static struct i2c_driver ina230_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = ina230_probe,
+ .remove = __devexit_p(ina230_remove),
+ .suspend = ina230_suspend,
+ .resume = ina230_resume,
+ .id_table = ina230_id,
+};
+
+
+static int __init ina230_init(void)
+{
+ return i2c_add_driver(&ina230_driver);
+}
+
+
+static void __exit ina230_exit(void)
+{
+ i2c_del_driver(&ina230_driver);
+}
+
+
+module_init(ina230_init);
+module_exit(ina230_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tegra-tsensor.c b/drivers/hwmon/tegra-tsensor.c
new file mode 100644
index 000000000000..e4792cba4937
--- /dev/null
+++ b/drivers/hwmon/tegra-tsensor.c
@@ -0,0 +1,1991 @@
+/*
+ * NVIDIA Tegra SOC - temperature sensor driver
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <mach/iomap.h>
+#include <mach/clk.h>
+#include <mach/delay.h>
+#include <mach/tsensor.h>
+#include <mach/tegra_fuse.h>
+
+/* macro to enable tsensor hw reset */
+/* FIXME: till tsensor temperature is reliable this should be 0 */
+#define ENABLE_TSENSOR_HW_RESET 0
+
+/* tsensor instance used for temperature calculation */
+#define TSENSOR_FUSE_REV1 8
+#define TSENSOR_FUSE_REV2 21
+
+/* version where tsensor temperature reading is accurate */
+#define STABLE_TSENSOR_FUSE_REV TSENSOR_FUSE_REV2
+
+/* We have multiple tsensor instances with following registers */
+#define SENSOR_CFG0 0x40
+#define SENSOR_CFG1 0x48
+#define SENSOR_CFG2 0x4c
+#define SENSOR_STATUS0 0x58
+#define SENSOR_TS_STATUS1 0x5c
+#define SENSOR_TS_STATUS2 0x60
+
+/* interrupt mask in tsensor status register */
+#define TSENSOR_SENSOR_X_STATUS0_0_INTR_MASK (1 << 8)
+
+#define SENSOR_CFG0_M_MASK 0xffff
+#define SENSOR_CFG0_M_SHIFT 8
+#define SENSOR_CFG0_N_MASK 0xff
+#define SENSOR_CFG0_N_SHIFT 24
+#define SENSOR_CFG0_RST_INTR_SHIFT 6
+#define SENSOR_CFG0_HW_DIV2_INTR_SHIFT 5
+#define SENSOR_CFG0_OVERFLOW_INTR 4
+#define SENSOR_CFG0_DVFS_INTR_SHIFT 3
+#define SENSOR_CFG0_RST_ENABLE_SHIFT 2
+#define SENSOR_CFG0_HW_DIV2_ENABLE_SHIFT 1
+#define SENSOR_CFG0_STOP_SHIFT 0
+
+#define SENSOR_CFG_X_TH_X_MASK 0xffff
+#define SENSOR_CFG1_TH2_SHIFT 16
+#define SENSOR_CFG1_TH1_SHIFT 0
+#define SENSOR_CFG2_TH3_SHIFT 0
+#define SENSOR_CFG2_TH0_SHIFT 16
+
+#define SENSOR_STATUS_AVG_VALID_SHIFT 10
+#define SENSOR_STATUS_CURR_VALID_SHIFT 9
+
+#define STATE_MASK 0x7
+#define STATUS0_STATE_SHIFT 0
+#define STATUS0_PREV_STATE_SHIFT 4
+
+#define LOCAL_STR_SIZE1 60
+#define MAX_STR_LINE 100
+#define MAX_TSENSOR_LOOP1 (1000 * 2)
+
+#define TSENSOR_COUNTER_TOLERANCE 100
+
+#define SENSOR_CTRL_RST_SHIFT 1
+#define RST_SRC_MASK 0x7
+#define RST_SRC_SENSOR 2
+#define TEGRA_REV_REG_OFFSET 0x804
+#define CCLK_G_BURST_POLICY_REG_REL_OFFSET 0x368
+#define TSENSOR_SLOWDOWN_BIT 23
+
+/* macros used for temperature calculations */
+#define get_temperature_int(X) ((X) / 100)
+#define get_temperature_fraction(X) (((int)(abs(X))) % 100)
+#define get_temperature_round(X) DIV_ROUND_CLOSEST(X, 100)
+
+#define MILLICELSIUS_TO_CELSIUS(i) ((i) / 1000)
+#define CELSIUS_TO_MILLICELSIUS(i) ((i) * 1000)
+
+#define get_ts_state(data) tsensor_get_reg_field(data,\
+ ((data->tsensor_index << 16) | SENSOR_STATUS0), \
+ STATUS0_STATE_SHIFT, STATE_MASK)
+
+/* tsensor states */
+enum ts_state {
+ TS_INVALID = 0,
+ TS_LEVEL0,
+ TS_LEVEL1,
+ TS_LEVEL2,
+ TS_LEVEL3,
+ TS_OVERFLOW,
+ TS_MAX_STATE = TS_OVERFLOW
+};
+
+enum {
+ /* temperature is sensed from 2 points on tegra */
+ TSENSOR_COUNT = 2,
+ TSENSOR_INSTANCE1 = 0,
+ TSENSOR_INSTANCE2 = 1,
+ /* divide by 2 temperature threshold */
+ DIV2_CELSIUS_TEMP_THRESHOLD_DEFAULT = 70,
+ /* reset chip temperature threshold */
+ RESET_CELSIUS_TEMP_THRESHOLD_DEFAULT = 75,
+ /* tsensor frequency in Hz for clk src CLK_M and divisor=24 */
+ DEFAULT_TSENSOR_CLK_HZ = 500000,
+ DEFAULT_TSENSOR_N = 255,
+ DEFAULT_TSENSOR_M = 12500,
+ /* tsensor instance offset */
+ TSENSOR_INSTANCE_OFFSET = 0x40,
+ MIN_THRESHOLD = 0x0,
+ MAX_THRESHOLD = 0xffff,
+ DEFAULT_THRESHOLD_TH0 = MAX_THRESHOLD,
+ DEFAULT_THRESHOLD_TH1 = MAX_THRESHOLD,
+ DEFAULT_THRESHOLD_TH2 = MAX_THRESHOLD,
+ DEFAULT_THRESHOLD_TH3 = MAX_THRESHOLD,
+};
+
+/* constants used to implement sysfs interface */
+enum tsensor_params {
+ TSENSOR_PARAM_TH1 = 0,
+ TSENSOR_PARAM_TH2,
+ TSENSOR_PARAM_TH3,
+ TSENSOR_TEMPERATURE,
+ TSENSOR_STATE,
+ TSENSOR_LIMITS,
+};
+
+enum tsensor_thresholds {
+ TSENSOR_TH0 = 0,
+ TSENSOR_TH1,
+ TSENSOR_TH2,
+ TSENSOR_TH3
+};
+
+/*
+ * For each registered chip, we need to keep some data in memory.
+ * The structure is dynamically allocated.
+ */
+struct tegra_tsensor_data {
+ struct delayed_work work;
+ struct workqueue_struct *workqueue;
+ struct mutex mutex;
+ struct device *hwmon_dev;
+ spinlock_t tsensor_lock;
+ struct clk *dev_clk;
+ /* tsensor register space */
+ void __iomem *base;
+ unsigned long phys;
+ unsigned long phys_end;
+ /* pmc register space */
+ void __iomem *pmc_rst_base;
+ unsigned long pmc_phys;
+ unsigned long pmc_phys_end;
+ /* clk register space */
+ void __iomem *clk_rst_base;
+ int irq;
+ unsigned int int_status[TSENSOR_COUNT];
+
+ /* save configuration before suspend and restore after resume */
+ unsigned int config0[TSENSOR_COUNT];
+ unsigned int config1[TSENSOR_COUNT];
+ unsigned int config2[TSENSOR_COUNT];
+ /* temperature readings from tsensor_index tsensor - 0/1 */
+ unsigned int tsensor_index;
+ int A_e_minus6;
+ int B_e_minus2;
+ unsigned int fuse_T1;
+ unsigned int fuse_F1;
+ unsigned int fuse_T2;
+ unsigned int fuse_F2;
+ /* Quadratic fit coefficients: m=-0.003512 n=1.528943 p=-11.1 */
+ int m_e_minus6;
+ int n_e_minus6;
+ int p_e_minus2;
+
+ long current_hi_limit;
+ long current_lo_limit;
+
+ bool is_edp_supported;
+
+ void (*alert_func)(void *);
+ void *alert_data;
+};
+
+enum {
+ TSENSOR_COEFF_SET1 = 0,
+ TSENSOR_COEFF_SET2,
+ TSENSOR_COEFF_END
+};
+
+struct tegra_tsensor_coeff {
+ int e_minus6_m;
+ int e_minus6_n;
+ int e_minus2_p;
+};
+
+static struct tegra_tsensor_coeff coeff_table[] = {
+ /* Quadratic fit coefficients: m=-0.002775 n=1.338811 p=-7.30 */
+ [TSENSOR_COEFF_SET1] = {
+ -2775,
+ 1338811,
+ -730
+ },
+ /* Quadratic fit coefficients: m=-0.003512 n=1.528943 p=-11.1 */
+ [TSENSOR_COEFF_SET2] = {
+ -3512,
+ 1528943,
+ -1110
+ }
+ /* FIXME: add tsensor coefficients after chip characterization */
+};
+
+/* pTemperature returned in 100 * Celsius */
+static int tsensor_count_2_temp(struct tegra_tsensor_data *data,
+ unsigned int count, int *p_temperature);
+static unsigned int tsensor_get_threshold_counter(
+ struct tegra_tsensor_data *data, int temp);
+
+/* tsensor register access functions */
+
+static void tsensor_writel(struct tegra_tsensor_data *data, u32 val,
+ unsigned long reg)
+{
+ unsigned int reg_offset = reg & 0xffff;
+ unsigned char inst = (reg >> 16) & 0xffff;
+ writel(val, data->base + (inst * TSENSOR_INSTANCE_OFFSET) +
+ reg_offset);
+ return;
+}
+
+static unsigned int tsensor_readl(struct tegra_tsensor_data *data,
+ unsigned long reg)
+{
+ unsigned int reg_offset = reg & 0xffff;
+ unsigned char inst = (reg >> 16) & 0xffff;
+ return readl(data->base +
+ (inst * TSENSOR_INSTANCE_OFFSET) + reg_offset);
+}
+
+static unsigned int tsensor_get_reg_field(
+ struct tegra_tsensor_data *data, unsigned int reg,
+ unsigned int shift, unsigned int mask)
+{
+ unsigned int reg_val;
+ reg_val = tsensor_readl(data, reg);
+ return (reg_val & (mask << shift)) >> shift;
+}
+
+static int tsensor_set_reg_field(
+ struct tegra_tsensor_data *data, unsigned int value,
+ unsigned int reg, unsigned int shift, unsigned int mask)
+{
+ unsigned int reg_val;
+ unsigned int rd_val;
+ reg_val = tsensor_readl(data, reg);
+ reg_val &= ~(mask << shift);
+ reg_val |= ((value & mask) << shift);
+ tsensor_writel(data, reg_val, reg);
+ rd_val = tsensor_readl(data, reg);
+ if (rd_val == reg_val)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+/* enable argument is true to enable reset, false disables pmc reset */
+static void pmc_rst_enable(struct tegra_tsensor_data *data, bool enable)
+{
+ unsigned int val;
+ /* mapped first pmc reg is SENSOR_CTRL */
+ val = readl(data->pmc_rst_base);
+ if (enable)
+ val |= (1 << SENSOR_CTRL_RST_SHIFT);
+ else
+ val &= ~(1 << SENSOR_CTRL_RST_SHIFT);
+ writel(val, data->pmc_rst_base);
+}
+
+/* true returned when pmc reset source is tsensor */
+static bool pmc_check_rst_sensor(struct tegra_tsensor_data *data)
+{
+ unsigned int val;
+ unsigned char src;
+ val = readl(data->pmc_rst_base + 4);
+ src = (unsigned char)(val & RST_SRC_MASK);
+ if (src == RST_SRC_SENSOR)
+ return true;
+ else
+ return false;
+}
+
+/* function to get chip revision */
+static void get_chip_rev(unsigned short *p_id, unsigned short *p_major,
+ unsigned short *p_minor)
+{
+ unsigned int reg;
+
+ reg = readl(IO_TO_VIRT(TEGRA_APB_MISC_BASE) +
+ TEGRA_REV_REG_OFFSET);
+ *p_id = (reg >> 8) & 0xff;
+ *p_major = (reg >> 4) & 0xf;
+ *p_minor = (reg >> 16) & 0xf;
+ pr_info("Tegra chip revision for tsensor detected as: "
+ "Chip Id=%x, Major=%d, Minor=%d\n", (int)*p_id,
+ (int)*p_major, (int)*p_minor);
+}
+
+/*
+ * function to get chip revision specific tsensor coefficients
+ * obtained after chip characterization
+ */
+static void get_chip_tsensor_coeff(struct tegra_tsensor_data *data)
+{
+ unsigned short chip_id, major_rev, minor_rev;
+ unsigned short coeff_index;
+
+ get_chip_rev(&chip_id, &major_rev, &minor_rev);
+ switch (minor_rev) {
+ default:
+ pr_info("Warning: tsensor coefficient for chip pending\n");
+ case 1:
+ coeff_index = TSENSOR_COEFF_SET1;
+ break;
+ }
+ if (data->tsensor_index == TSENSOR_INSTANCE1)
+ coeff_index = TSENSOR_COEFF_SET2;
+ data->m_e_minus6 = coeff_table[coeff_index].e_minus6_m;
+ data->n_e_minus6 = coeff_table[coeff_index].e_minus6_n;
+ data->p_e_minus2 = coeff_table[coeff_index].e_minus2_p;
+}
+
+/* tsensor counter read function */
+static int tsensor_read_counter(
+ struct tegra_tsensor_data *data,
+ unsigned int *p_counter)
+{
+ unsigned int status_reg;
+ unsigned int config0;
+ int iter_count = 0;
+ const int max_loop = 50;
+
+ do {
+ config0 = tsensor_readl(data, ((data->tsensor_index << 16) |
+ SENSOR_CFG0));
+ if (config0 & (1 << SENSOR_CFG0_STOP_SHIFT)) {
+ dev_dbg(data->hwmon_dev, "Error: tsensor "
+ "counter read with STOP bit not supported\n");
+ *p_counter = 0;
+ return 0;
+ }
+
+ status_reg = tsensor_readl(data,
+ (data->tsensor_index << 16) | SENSOR_STATUS0);
+ if (status_reg & (1 <<
+ SENSOR_STATUS_CURR_VALID_SHIFT)) {
+ *p_counter = tsensor_readl(data, (data->tsensor_index
+ << 16) | SENSOR_TS_STATUS1);
+ break;
+ }
+ if (!(iter_count % 10))
+ dev_dbg(data->hwmon_dev, "retry %d\n", iter_count);
+
+ msleep(21);
+ iter_count++;
+ } while (iter_count < max_loop);
+
+ if (iter_count == max_loop)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* tsensor threshold print function */
+static void dump_threshold(struct tegra_tsensor_data *data)
+{
+ unsigned int TH_2_1, TH_0_3;
+ unsigned int curr_avg;
+ int err;
+
+ TH_2_1 = tsensor_readl(data, (data->tsensor_index << 16) | SENSOR_CFG1);
+ TH_0_3 = tsensor_readl(data, (data->tsensor_index << 16) | SENSOR_CFG2);
+ dev_dbg(data->hwmon_dev, "Tsensor: TH_2_1=0x%x, "
+ "TH_0_3=0x%x\n", TH_2_1, TH_0_3);
+ err = tsensor_read_counter(data, &curr_avg);
+ if (err < 0)
+ pr_err("Error: tsensor counter read, "
+ "err=%d\n", err);
+ else
+ dev_dbg(data->hwmon_dev, "Tsensor: "
+ "curr_avg=0x%x\n", curr_avg);
+}
+
+static int tsensor_get_temperature(
+ struct tegra_tsensor_data *data,
+ int *pTemp, unsigned int *pCounter)
+{
+ int err = 0;
+ unsigned int curr_avg;
+
+ err = tsensor_read_counter(data, &curr_avg);
+ if (err < 0)
+ goto error;
+
+ *pCounter = ((curr_avg & 0xFFFF0000) >> 16);
+ err = tsensor_count_2_temp(data, *pCounter, pTemp);
+
+error:
+ return err;
+}
+
+static ssize_t tsensor_show_state(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ int state;
+ struct tegra_tsensor_data *data = dev_get_drvdata(dev);
+
+ state = get_ts_state(data);
+
+ return snprintf(buf, 50, "%d\n", state);
+}
+
+static ssize_t tsensor_show_limits(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct tegra_tsensor_data *data = dev_get_drvdata(dev);
+ return snprintf(buf, 50, "%ld %ld\n",
+ data->current_lo_limit, data->current_hi_limit);
+}
+
+/* tsensor temperature show function */
+static ssize_t tsensor_show_counters(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ unsigned int curr_avg;
+ char err_str[] = "error-sysfs-counter-read\n";
+ char fixed_str[MAX_STR_LINE];
+ struct tegra_tsensor_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int err;
+ int temp;
+
+ if (attr->index == TSENSOR_TEMPERATURE) {
+ /* use current counter value to calculate temperature */
+ err = tsensor_read_counter(data, &curr_avg);
+ if (err < 0)
+ goto error;
+ err = tsensor_count_2_temp(data,
+ ((curr_avg & 0xFFFF0000) >> 16), &temp);
+ if (err < 0)
+ goto error;
+
+ dev_vdbg(data->hwmon_dev, "%s has curr_avg=0x%x, "
+ "temp0=%d\n", __func__, curr_avg, temp);
+
+ snprintf(buf, (((LOCAL_STR_SIZE1 << 1) + 3) +
+ strlen(fixed_str)),
+ "%d.%02dC\n",
+ get_temperature_int(temp),
+ get_temperature_fraction(temp));
+ }
+ return strlen(buf);
+error:
+ return snprintf(buf, strlen(err_str), "%s", err_str);
+}
+
+/* utility function to check hw clock divide by 2 condition */
+static bool cclkg_check_hwdiv2_sensor(struct tegra_tsensor_data *data)
+{
+ unsigned int val;
+ val = readl(IO_ADDRESS(TEGRA_CLK_RESET_BASE +
+ CCLK_G_BURST_POLICY_REG_REL_OFFSET));
+ if ((1 << TSENSOR_SLOWDOWN_BIT) & val) {
+ dev_err(data->hwmon_dev, "Warning: ***** tsensor "
+ "slowdown bit detected\n");
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/*
+ * function with table to return register, field shift and mask
+ * values for supported parameters
+ */
+static int get_param_values(
+ struct tegra_tsensor_data *data, unsigned int indx,
+ unsigned int *p_reg, unsigned int *p_sft, unsigned int *p_msk,
+ char *info, size_t info_len)
+{
+ switch (indx) {
+ case TSENSOR_PARAM_TH1:
+ *p_reg = ((data->tsensor_index << 16) | SENSOR_CFG1);
+ *p_sft = SENSOR_CFG1_TH1_SHIFT;
+ *p_msk = SENSOR_CFG_X_TH_X_MASK;
+ snprintf(info, info_len, "TH1[%d]: ",
+ data->tsensor_index);
+ break;
+ case TSENSOR_PARAM_TH2:
+ *p_reg = ((data->tsensor_index << 16) | SENSOR_CFG1);
+ *p_sft = SENSOR_CFG1_TH2_SHIFT;
+ *p_msk = SENSOR_CFG_X_TH_X_MASK;
+ snprintf(info, info_len, "TH2[%d]: ",
+ data->tsensor_index);
+ break;
+ case TSENSOR_PARAM_TH3:
+ *p_reg = ((data->tsensor_index << 16) | SENSOR_CFG2);
+ *p_sft = SENSOR_CFG2_TH3_SHIFT;
+ *p_msk = SENSOR_CFG_X_TH_X_MASK;
+ snprintf(info, info_len, "TH3[%d]: ",
+ data->tsensor_index);
+ break;
+ default:
+ return -ENOENT;
+ }
+ return 0;
+}
+
+/* tsensor driver sysfs show function */
+static ssize_t show_tsensor_param(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ unsigned int val;
+ struct tegra_tsensor_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ unsigned int reg;
+ unsigned int sft;
+ unsigned int msk;
+ int err;
+ int temp;
+ char info[LOCAL_STR_SIZE1];
+
+ err = get_param_values(data, attr->index, &reg, &sft, &msk,
+ info, sizeof(info));
+ if (err < 0)
+ goto labelErr;
+ val = tsensor_get_reg_field(data, reg, sft, msk);
+ if (val == MAX_THRESHOLD)
+ snprintf(buf, PAGE_SIZE, "%s un-initialized threshold\n", info);
+ else {
+ err = tsensor_count_2_temp(data, val, &temp);
+ if (err != 0)
+ goto labelErr;
+ snprintf(buf, PAGE_SIZE, "%s threshold: %d.%d Celsius\n", info,
+ get_temperature_int(temp),
+ get_temperature_fraction(temp));
+ }
+ return strlen(buf);
+
+labelErr:
+ snprintf(buf, PAGE_SIZE, "ERROR:");
+ return strlen(buf);
+}
+
+/* tsensor driver sysfs store function */
+static ssize_t set_tsensor_param(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int num;
+ struct tegra_tsensor_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ unsigned int reg;
+ unsigned int sft;
+ unsigned int msk;
+ int err;
+ unsigned int counter;
+ unsigned int val;
+ char info[LOCAL_STR_SIZE1];
+
+ if (kstrtoint(buf, 0, &num)) {
+ dev_err(dev, "file: %s, line=%d return %s()\n",
+ __FILE__, __LINE__, __func__);
+ return -EINVAL;
+ }
+
+ counter = tsensor_get_threshold_counter(data, num);
+
+ err = get_param_values(data, attr->index, &reg, &sft, &msk,
+ info, sizeof(info));
+ if (err < 0)
+ goto labelErr;
+
+ err = tsensor_set_reg_field(data, counter, reg, sft, msk);
+ if (err < 0)
+ goto labelErr;
+
+ /* TH2 clk divide check */
+ if (attr->index == TSENSOR_PARAM_TH2) {
+ msleep(21);
+ (void)cclkg_check_hwdiv2_sensor(data);
+ }
+ val = tsensor_get_reg_field(data, reg, sft, msk);
+ dev_dbg(dev, "%s 0x%x\n", info, val);
+ return count;
+labelErr:
+ dev_err(dev, "file: %s, line=%d, %s(), error=0x%x\n", __FILE__,
+ __LINE__, __func__, err);
+ return 0;
+}
+
+static struct sensor_device_attribute tsensor_nodes[] = {
+ SENSOR_ATTR(tsensor_TH1, S_IRUGO | S_IWUSR,
+ show_tsensor_param, set_tsensor_param, TSENSOR_PARAM_TH1),
+ SENSOR_ATTR(tsensor_TH2, S_IRUGO | S_IWUSR,
+ show_tsensor_param, set_tsensor_param, TSENSOR_PARAM_TH2),
+ SENSOR_ATTR(tsensor_TH3, S_IRUGO | S_IWUSR,
+ show_tsensor_param, set_tsensor_param, TSENSOR_PARAM_TH3),
+ SENSOR_ATTR(tsensor_temperature, S_IRUGO | S_IWUSR,
+ tsensor_show_counters, NULL, TSENSOR_TEMPERATURE),
+ SENSOR_ATTR(tsensor_state, S_IRUGO | S_IWUSR,
+ tsensor_show_state, NULL, TSENSOR_STATE),
+ SENSOR_ATTR(tsensor_limits, S_IRUGO | S_IWUSR,
+ tsensor_show_limits, NULL, TSENSOR_LIMITS),
+};
+
+int tsensor_thermal_get_temp(struct tegra_tsensor_data *data,
+ long *milli_temp)
+{
+ int counter, temp, err;
+ int temp_state, ts_state;
+
+ err = tsensor_get_temperature(data,
+ &temp,
+ &counter);
+ if (err)
+ return err;
+
+ temp *= 10;
+
+ mutex_lock(&data->mutex);
+
+ /* This section of logic is done in order to make sure that
+ * the temperature read corresponds to the current hw state.
+ * If it is not, return the nearest temperature
+ */
+ if ((data->current_lo_limit != 0) ||
+ (data->current_hi_limit)) {
+
+ if (temp <= data->current_lo_limit)
+ temp_state = TS_LEVEL0;
+ else if (temp < data->current_hi_limit)
+ temp_state = TS_LEVEL1;
+ else
+ temp_state = TS_LEVEL2;
+
+ ts_state = get_ts_state(data);
+
+ if (ts_state != temp_state) {
+
+ switch (ts_state) {
+ case TS_LEVEL0:
+ temp = data->current_lo_limit - 1;
+ break;
+ case TS_LEVEL1:
+ if (temp_state == TS_LEVEL0)
+ temp = data->current_lo_limit + 1;
+ else
+ temp = data->current_hi_limit - 1;
+ break;
+ case TS_LEVEL2:
+ temp = data->current_hi_limit + 1;
+ break;
+ }
+
+ }
+
+ }
+
+ mutex_unlock(&data->mutex);
+
+ *milli_temp = temp;
+
+ return 0;
+}
+
+/* tsensor driver interrupt handler */
+static irqreturn_t tegra_tsensor_isr(int irq, void *arg_data)
+{
+ struct tegra_tsensor_data *data =
+ (struct tegra_tsensor_data *)arg_data;
+ unsigned long flags;
+ unsigned int val;
+ int new_state;
+
+ spin_lock_irqsave(&data->tsensor_lock, flags);
+
+ val = tsensor_readl(data, (data->tsensor_index << 16) | SENSOR_STATUS0);
+ if (val & TSENSOR_SENSOR_X_STATUS0_0_INTR_MASK) {
+ new_state = get_ts_state(data);
+
+ /* counter overflow check */
+ if (new_state == TS_OVERFLOW)
+ dev_err(data->hwmon_dev, "Warning: "
+ "***** OVERFLOW tsensor\n");
+
+ /* We only care if we go above hi or below low thresholds */
+ if (data->is_edp_supported && new_state != TS_LEVEL1)
+ queue_delayed_work(data->workqueue, &data->work, 0);
+ }
+
+ tsensor_writel(data, val, (data->tsensor_index << 16) | SENSOR_STATUS0);
+
+ spin_unlock_irqrestore(&data->tsensor_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * function to read fuse registers and give - T1, T2, F1 and F2
+ */
+static int read_tsensor_fuse_regs(struct tegra_tsensor_data *data)
+{
+ unsigned int reg1;
+ unsigned int T1 = 0, T2 = 0;
+ unsigned int spare_bits;
+ int err;
+
+ /* read tsensor calibration register */
+ /*
+ * High (~90 DegC) Temperature Calibration value (upper 16 bits of
+ * FUSE_TSENSOR_CALIB_0) - F2
+ * Low (~25 deg C) Temperature Calibration value (lower 16 bits of
+ * FUSE_TSENSOR_CALIB_0) - F1
+ */
+ err = tegra_fuse_get_tsensor_calibration_data(&reg1);
+ if (err)
+ goto errLabel;
+ data->fuse_F1 = reg1 & 0xFFFF;
+ data->fuse_F2 = (reg1 >> 16) & 0xFFFF;
+
+ err = tegra_fuse_get_tsensor_spare_bits(&spare_bits);
+ if (err) {
+ pr_err("tsensor spare bit fuse read error=%d\n", err);
+ goto errLabel;
+ }
+
+ /*
+ * FUSE_TJ_ADT_LOWT = T1, FUSE_TJ_ADJ = T2
+ */
+
+ /*
+ * Low temp is:
+ * FUSE_TJ_ADT_LOWT = bits [20:14] or’ed with bits [27:21]
+ */
+ T1 = ((spare_bits >> 14) & 0x7F) |
+ ((spare_bits >> 21) & 0x7F);
+ dev_vdbg(data->hwmon_dev, "Tsensor low temp (T1) fuse :\n");
+
+ /*
+ * High temp is:
+ * FUSE_TJ_ADJ = bits [6:0] or’ed with bits [13:7]
+ */
+ dev_vdbg(data->hwmon_dev, "Tsensor low temp (T2) fuse :\n");
+ T2 = (spare_bits & 0x7F) | ((spare_bits >> 7) & 0x7F);
+ pr_info("Tsensor fuse calibration F1=%d, F2=%d, T1=%d, T2=%d\n"
+ , data->fuse_F1, data->fuse_F2, T1, T2);
+ data->fuse_T1 = T1;
+ data->fuse_T2 = T2;
+ return 0;
+errLabel:
+ return err;
+}
+
+/* function to calculate interim temperature */
+static int calc_interim_temp(struct tegra_tsensor_data *data,
+ unsigned int counter, int *p_interim_temp)
+{
+ int val1;
+ /*
+ * T-int = A * Counter + B
+ * (Counter is the sensor frequency output)
+ */
+ if ((data->fuse_F2 - data->fuse_F1) <= (data->fuse_T2 -
+ data->fuse_T1)) {
+ dev_err(data->hwmon_dev, "Error: F2=%d, F1=%d "
+ "difference unexpectedly low. "
+ "Aborting temperature processing\n", data->fuse_F2,
+ data->fuse_F1);
+ return -EINVAL;
+ } else {
+ /* expression modified after assuming s_A is 10^6 times,
+ * s_B is 10^2 times and want end result to be 10^2 times
+ * actual value
+ */
+ val1 = DIV_ROUND_CLOSEST((data->A_e_minus6 * counter) , 10000);
+ dev_vdbg(data->hwmon_dev, "A*counter / 100 = %d\n",
+ val1);
+ *p_interim_temp = (val1 + data->B_e_minus2);
+ }
+ dev_dbg(data->hwmon_dev, "tsensor: counter=0x%x, interim "
+ "temp*100=%d\n",
+ counter, *p_interim_temp);
+ return 0;
+}
+
+/*
+ * function to calculate final temperature, given
+ * interim temperature
+ */
+static void calc_final_temp(struct tegra_tsensor_data *data,
+ int interim_temp, int *p_final_temp)
+{
+ int temp1, temp2, temp;
+ /*
+ * T-final = m * T-int ^2 + n * T-int + p
+ * m = -0.002775
+ * n = 1.338811
+ * p = -7.3
+ */
+
+ dev_vdbg(data->hwmon_dev, "interim_temp=%d\n", interim_temp);
+ temp1 = (DIV_ROUND_CLOSEST((interim_temp * interim_temp) , 100));
+ dev_vdbg(data->hwmon_dev, "temp1=%d\n", temp1);
+ temp1 *= (DIV_ROUND_CLOSEST(data->m_e_minus6 , 10));
+ dev_vdbg(data->hwmon_dev, "m*T-int^2=%d\n", temp1);
+ temp1 = (DIV_ROUND_CLOSEST(temp1, 10000));
+ /* we want to keep 3 decimal point digits */
+ dev_vdbg(data->hwmon_dev, "m*T-int^2 / 10000=%d\n", temp1);
+ dev_dbg(data->hwmon_dev, "temp1*100=%d\n", temp1);
+
+ temp2 = (DIV_ROUND_CLOSEST(interim_temp * (
+ DIV_ROUND_CLOSEST(data->n_e_minus6, 100)
+ ), 1000)); /* 1000 times actual */
+ dev_vdbg(data->hwmon_dev, "n*T-int =%d\n", temp2);
+
+ temp = temp1 + temp2;
+ dev_vdbg(data->hwmon_dev, "m*T-int^2 + n*T-int =%d\n", temp);
+ temp += (data->p_e_minus2 * 10);
+ temp = DIV_ROUND_CLOSEST(temp, 10);
+ /* final temperature(temp) is 100 times actual value
+ * to preserve 2 decimal digits and enable fixed point
+ * computation
+ */
+ dev_vdbg(data->hwmon_dev, "m*T-int^2 + n*T-int + p =%d\n",
+ temp);
+ dev_dbg(data->hwmon_dev, "Final temp=%d.%d\n",
+ get_temperature_int(temp), get_temperature_fraction(temp));
+ *p_final_temp = (int)(temp);
+}
+
+/*
+ * Function to compute constants A and B needed for temperature
+ * calculation
+ * A = (T2-T1) / (F2-F1)
+ * B = T1 – A * F1
+ */
+static int tsensor_get_const_AB(struct tegra_tsensor_data *data)
+{
+ int err;
+
+ /*
+ * 1. Find fusing registers for 25C (T1, F1) and 90C (T2, F2);
+ */
+ err = read_tsensor_fuse_regs(data);
+ if (err) {
+ dev_err(data->hwmon_dev, "Fuse register read required "
+ "for internal tsensor returns err=%d\n", err);
+ return err;
+ }
+
+ if (data->fuse_F2 != data->fuse_F1) {
+ if ((data->fuse_F2 - data->fuse_F1) <= (data->fuse_T2 -
+ data->fuse_T1)) {
+ dev_err(data->hwmon_dev, "Error: F2=%d, "
+ "F1=%d, difference"
+ " unexpectedly low. Aborting temperature"
+ "computation\n", data->fuse_F2, data->fuse_F1);
+ return -EINVAL;
+ } else {
+ data->A_e_minus6 = ((data->fuse_T2 - data->fuse_T1) *
+ 1000000);
+ data->A_e_minus6 /= (data->fuse_F2 - data->fuse_F1);
+ data->B_e_minus2 = (data->fuse_T1 * 100) - (
+ DIV_ROUND_CLOSEST((data->A_e_minus6 *
+ data->fuse_F1), 10000));
+ /* B is 100 times now */
+ }
+ }
+ dev_dbg(data->hwmon_dev, "A_e_minus6 = %d\n", data->A_e_minus6);
+ dev_dbg(data->hwmon_dev, "B_e_minus2 = %d\n", data->B_e_minus2);
+ return 0;
+}
+
+/*
+ * function calculates expected temperature corresponding to
+ * given tsensor counter value
+ * Value returned is 100 times calculated temperature since the
+ * calculations are using fixed point arithmetic instead of floating point
+ */
+static int tsensor_count_2_temp(struct tegra_tsensor_data *data,
+ unsigned int count, int *p_temperature)
+{
+ int interim_temp;
+ int err;
+
+ /*
+ *
+ * 2. Calculate interim temperature:
+ */
+ err = calc_interim_temp(data, count, &interim_temp);
+ if (err < 0) {
+ dev_err(data->hwmon_dev, "tsensor: cannot read temperature\n");
+ *p_temperature = -1;
+ return err;
+ }
+
+ /*
+ *
+ * 3. Calculate final temperature:
+ */
+ calc_final_temp(data, interim_temp, p_temperature);
+ return 0;
+}
+
+/*
+ * utility function implements ceil to power of 10 -
+ * e.g. given 987 it returns 1000
+ */
+static int my_ceil_pow10(int num)
+{
+ int tmp;
+ int val = 1;
+ tmp = (num < 0) ? -num : num;
+ if (tmp == 0)
+ return 0;
+ while (tmp > 1) {
+ val *= 10;
+ tmp /= 10;
+ }
+ return val;
+}
+
+/*
+ * function to solve quadratic roots of equation
+ * used to get counter corresponding to given temperature
+ */
+static void get_quadratic_roots(struct tegra_tsensor_data *data,
+ int temp, unsigned int *p_counter1,
+ unsigned int *p_counter2)
+{
+ /* expr1 = 2 * m * B + n */
+ int expr1_e_minus6;
+ /* expr2 = expr1^2 */
+ int expr2_e_minus6;
+ /* expr3 = m * B^2 + n * B + p */
+ int expr3_e_minus4_1;
+ int expr3_e_minus4_2;
+ int expr3_e_minus4;
+ int expr4_e_minus6;
+ int expr4_e_minus2_1;
+ int expr4_e_minus6_2;
+ int expr4_e_minus6_3;
+ int expr5_e_minus6, expr5_e_minus6_1, expr6, expr7;
+ int expr8_e_minus6, expr9_e_minus6;
+ int multiplier;
+ const int multiplier2 = 1000000;
+ int expr10_e_minus6, expr11_e_minus6;
+ int expr12, expr13;
+
+ dev_vdbg(data->hwmon_dev, "A_e_minus6=%d, B_e_minus2=%d, "
+ "m_e_minus6=%d, n_e_minus6=%d, p_e_minus2=%d, "
+ "temp=%d\n", data->A_e_minus6, data->B_e_minus2,
+ data->m_e_minus6,
+ data->n_e_minus6, data->p_e_minus2, (int)temp);
+ expr1_e_minus6 = (DIV_ROUND_CLOSEST((2 * data->m_e_minus6 *
+ data->B_e_minus2), 100) + data->n_e_minus6);
+ dev_vdbg(data->hwmon_dev, "2_m_B_plun_e_minus6=%d\n",
+ expr1_e_minus6);
+ expr2_e_minus6 = (DIV_ROUND_CLOSEST(expr1_e_minus6, 1000)) *
+ (DIV_ROUND_CLOSEST(expr1_e_minus6, 1000));
+ dev_vdbg(data->hwmon_dev, "expr1^2=%d\n", expr2_e_minus6);
+ expr3_e_minus4_1 = (DIV_ROUND_CLOSEST((
+ (DIV_ROUND_CLOSEST((data->m_e_minus6 * data->B_e_minus2),
+ 1000)) * (DIV_ROUND_CLOSEST(data->B_e_minus2, 10))), 100));
+ dev_vdbg(data->hwmon_dev, "expr3_e_minus4_1=%d\n",
+ expr3_e_minus4_1);
+ expr3_e_minus4_2 = DIV_ROUND_CLOSEST(
+ (DIV_ROUND_CLOSEST(data->n_e_minus6, 100) * data->B_e_minus2),
+ 100);
+ dev_vdbg(data->hwmon_dev, "expr3_e_minus4_2=%d\n",
+ expr3_e_minus4_2);
+ expr3_e_minus4 = expr3_e_minus4_1 + expr3_e_minus4_2;
+ dev_vdbg(data->hwmon_dev, "expr3=%d\n", expr3_e_minus4);
+ expr4_e_minus2_1 = DIV_ROUND_CLOSEST((expr3_e_minus4 +
+ (data->p_e_minus2 * 100)), 100);
+ dev_vdbg(data->hwmon_dev, "expr4_e_minus2_1=%d\n",
+ expr4_e_minus2_1);
+ expr4_e_minus6_2 = (4 * data->m_e_minus6);
+ dev_vdbg(data->hwmon_dev, "expr4_e_minus6_2=%d\n",
+ expr4_e_minus6_2);
+ expr4_e_minus6 = DIV_ROUND_CLOSEST((expr4_e_minus2_1 *
+ expr4_e_minus6_2), 100);
+ dev_vdbg(data->hwmon_dev, "expr4_minus6=%d\n", expr4_e_minus6);
+ expr5_e_minus6_1 = expr2_e_minus6 - expr4_e_minus6;
+ dev_vdbg(data->hwmon_dev, "expr5_e_minus6_1=%d\n",
+ expr5_e_minus6_1);
+ expr4_e_minus6_3 = (expr4_e_minus6_2 * temp);
+ dev_vdbg(data->hwmon_dev, "expr4_e_minus6_3=%d\n",
+ expr4_e_minus6_3);
+ expr5_e_minus6 = (expr5_e_minus6_1 + expr4_e_minus6_3);
+ dev_vdbg(data->hwmon_dev, "expr5_e_minus6=%d\n",
+ expr5_e_minus6);
+ multiplier = my_ceil_pow10(expr5_e_minus6);
+ dev_vdbg(data->hwmon_dev, "multiplier=%d\n", multiplier);
+ expr6 = int_sqrt(expr5_e_minus6);
+ dev_vdbg(data->hwmon_dev, "sqrt top=%d\n", expr6);
+ expr7 = int_sqrt(multiplier);
+ dev_vdbg(data->hwmon_dev, "sqrt bot=%d\n", expr7);
+ if (expr7 == 0) {
+ pr_err("Error: %s line=%d, expr7=%d\n",
+ __func__, __LINE__, expr7);
+ return;
+ } else {
+ expr8_e_minus6 = (expr6 * multiplier2) / expr7;
+ }
+ dev_vdbg(data->hwmon_dev, "sqrt final=%d\n", expr8_e_minus6);
+ dev_vdbg(data->hwmon_dev, "2_m_B_plus_n_e_minus6=%d\n",
+ expr1_e_minus6);
+ expr9_e_minus6 = DIV_ROUND_CLOSEST((2 * data->m_e_minus6 *
+ data->A_e_minus6), 1000000);
+ dev_vdbg(data->hwmon_dev, "denominator=%d\n", expr9_e_minus6);
+ if (expr9_e_minus6 == 0) {
+ pr_err("Error: %s line=%d, expr9_e_minus6=%d\n",
+ __func__, __LINE__, expr9_e_minus6);
+ return;
+ }
+ expr10_e_minus6 = -expr1_e_minus6 - expr8_e_minus6;
+ dev_vdbg(data->hwmon_dev, "expr10_e_minus6=%d\n",
+ expr10_e_minus6);
+ expr11_e_minus6 = -expr1_e_minus6 + expr8_e_minus6;
+ dev_vdbg(data->hwmon_dev, "expr11_e_minus6=%d\n",
+ expr11_e_minus6);
+ expr12 = (expr10_e_minus6 / expr9_e_minus6);
+ dev_vdbg(data->hwmon_dev, "counter1=%d\n", expr12);
+ expr13 = (expr11_e_minus6 / expr9_e_minus6);
+ dev_vdbg(data->hwmon_dev, "counter2=%d\n", expr13);
+ *p_counter1 = expr12;
+ *p_counter2 = expr13;
+}
+
+/*
+ * function returns tsensor expected counter corresponding to input
+ * temperature in degree Celsius.
+ * e.g. for temperature of 35C, temp=35
+ */
+static void tsensor_temp_2_count(struct tegra_tsensor_data *data,
+ int temp,
+ unsigned int *p_counter1,
+ unsigned int *p_counter2)
+{
+ if (temp > 0) {
+ dev_dbg(data->hwmon_dev, "Trying to calculate counter"
+ " for requested temperature"
+ " threshold=%d\n", temp);
+ /*
+ * calculate the constants needed to get roots of
+ * following quadratic eqn:
+ * m * A^2 * Counter^2 +
+ * A * (2 * m * B + n) * Counter +
+ * (m * B^2 + n * B + p - Temperature) = 0
+ */
+ get_quadratic_roots(data, temp, p_counter1, p_counter2);
+ /*
+ * checked at current temperature=35 the counter=11418
+ * for 50 deg temperature: counter1=22731, counter2=11817
+ * at 35 deg temperature: counter1=23137, counter2=11411
+ * hence, for above values we are assuming counter2 has
+ * the correct value
+ */
+ } else {
+ *p_counter1 = DEFAULT_THRESHOLD_TH3;
+ *p_counter2 = DEFAULT_THRESHOLD_TH3;
+ }
+}
+
+/*
+ * function to compare computed and expected values with
+ * certain tolerance setting hard coded here
+ */
+static bool cmp_counter(
+ struct tegra_tsensor_data *data,
+ unsigned int actual, unsigned int exp)
+{
+ unsigned int smaller;
+ unsigned int larger;
+ smaller = (actual > exp) ? exp : actual;
+ larger = (smaller == actual) ? exp : actual;
+ if ((larger - smaller) > TSENSOR_COUNTER_TOLERANCE) {
+ dev_dbg(data->hwmon_dev, "actual=%d, exp=%d, larger=%d, "
+ "smaller=%d, tolerance=%d\n", actual, exp, larger, smaller,
+ TSENSOR_COUNTER_TOLERANCE);
+ return false;
+ }
+ return true;
+}
+
+/* function to print chart of temperature to counter values */
+static void print_temperature_2_counter_table(
+ struct tegra_tsensor_data *data)
+{
+ int i;
+ /* static list of temperature tested */
+ int temp_list[] = {
+ 30,
+ 35,
+ 40,
+ 45,
+ 50,
+ 55,
+ 60,
+ 61,
+ 62,
+ 63,
+ 64,
+ 65,
+ 70,
+ 75,
+ 80,
+ 85,
+ 90,
+ 95,
+ 100,
+ 105,
+ 110,
+ 115,
+ 120
+ };
+ unsigned int counter1, counter2;
+ dev_dbg(data->hwmon_dev, "Temperature and counter1 and "
+ "counter2 chart **********\n");
+ for (i = 0; i < ARRAY_SIZE(temp_list); i++) {
+ tsensor_temp_2_count(data, temp_list[i],
+ &counter1, &counter2);
+ dev_dbg(data->hwmon_dev, "temperature[%d]=%d, "
+ "counter1=0x%x, counter2=0x%x\n",
+ i, temp_list[i], counter1, counter2);
+ }
+ dev_dbg(data->hwmon_dev, "\n\n");
+}
+
+static void dump_a_tsensor_reg(struct tegra_tsensor_data *data,
+ unsigned int addr)
+{
+ dev_dbg(data->hwmon_dev, "tsensor[%d][0x%x]: 0x%x\n", (addr >> 16),
+ addr & 0xFFFF, tsensor_readl(data, addr));
+}
+
+static void dump_tsensor_regs(struct tegra_tsensor_data *data)
+{
+ int i;
+ for (i = 0; i < TSENSOR_COUNT; i++) {
+ /* if STOP bit is set skip this check */
+ dump_a_tsensor_reg(data, ((i << 16) | SENSOR_CFG0));
+ dump_a_tsensor_reg(data, ((i << 16) | SENSOR_CFG1));
+ dump_a_tsensor_reg(data, ((i << 16) | SENSOR_CFG2));
+ dump_a_tsensor_reg(data, ((i << 16) | SENSOR_STATUS0));
+ dump_a_tsensor_reg(data, ((i << 16) | SENSOR_TS_STATUS1));
+ dump_a_tsensor_reg(data, ((i << 16) | SENSOR_TS_STATUS2));
+ dump_a_tsensor_reg(data, ((i << 16) | 0x0));
+ dump_a_tsensor_reg(data, ((i << 16) | 0x44));
+ dump_a_tsensor_reg(data, ((i << 16) | 0x50));
+ dump_a_tsensor_reg(data, ((i << 16) | 0x54));
+ dump_a_tsensor_reg(data, ((i << 16) | 0x64));
+ dump_a_tsensor_reg(data, ((i << 16) | 0x68));
+ }
+}
+
+/*
+ * function to test if conversion of counter to temperature
+ * and vice-versa is working
+ */
+static int test_temperature_algo(struct tegra_tsensor_data *data)
+{
+ unsigned int actual_counter;
+ unsigned int curr_avg;
+ unsigned int counter1, counter2;
+ int T1;
+ int err = 0;
+ bool result1, result2;
+ bool result = false;
+
+ /* read actual counter */
+ err = tsensor_read_counter(data, &curr_avg);
+ if (err < 0) {
+ pr_err("Error: tsensor0 counter read, err=%d\n", err);
+ goto endLabel;
+ }
+ actual_counter = ((curr_avg & 0xFFFF0000) >> 16);
+ dev_dbg(data->hwmon_dev, "counter read=0x%x\n", actual_counter);
+
+ /* calculate temperature */
+ err = tsensor_count_2_temp(data, actual_counter, &T1);
+ dev_dbg(data->hwmon_dev, "%s actual counter=0x%x, calculated "
+ "temperature=%d.%d\n", __func__,
+ actual_counter, get_temperature_int(T1),
+ get_temperature_fraction(T1));
+ if (err < 0) {
+ pr_err("Error: calculate temperature step\n");
+ goto endLabel;
+ }
+
+ /* calculate counter corresponding to read temperature */
+ tsensor_temp_2_count(data, get_temperature_round(T1),
+ &counter1, &counter2);
+ dev_dbg(data->hwmon_dev, "given temperature=%d, counter1=0x%x,"
+ " counter2=0x%x\n",
+ get_temperature_round(T1), counter1, counter2);
+
+ err = tsensor_count_2_temp(data, actual_counter, &T1);
+ dev_dbg(data->hwmon_dev, "%s 2nd time actual counter=0x%x, "
+ "calculated temperature=%d.%d\n", __func__,
+ actual_counter, get_temperature_int(T1),
+ get_temperature_fraction(T1));
+ if (err < 0) {
+ pr_err("Error: calculate temperature step\n");
+ goto endLabel;
+ }
+
+ /* compare counter calculated with actual original counter */
+ result1 = cmp_counter(data, actual_counter, counter1);
+ result2 = cmp_counter(data, actual_counter, counter2);
+ if (result1) {
+ dev_dbg(data->hwmon_dev, "counter1 matches: actual=%d,"
+ " calc=%d\n", actual_counter, counter1);
+ result = true;
+ }
+ if (result2) {
+ dev_dbg(data->hwmon_dev, "counter2 matches: actual=%d,"
+ " calc=%d\n", actual_counter, counter2);
+ result = true;
+ }
+ if (!result) {
+ pr_info("NO Match: actual=%d,"
+ " calc counter2=%d, counter1=%d\n", actual_counter,
+ counter2, counter1);
+ err = -EIO;
+ }
+
+endLabel:
+ return err;
+}
+
+/* tsensor threshold temperature to threshold counter conversion function */
+static unsigned int tsensor_get_threshold_counter(
+ struct tegra_tsensor_data *data,
+ int temp_threshold)
+{
+ unsigned int counter1, counter2;
+ unsigned int counter;
+
+ if (temp_threshold < 0)
+ return MAX_THRESHOLD;
+
+ tsensor_temp_2_count(data, temp_threshold, &counter1, &counter2);
+
+ counter = counter2;
+
+ return counter;
+}
+
+/* tsensor temperature threshold setup function */
+static void tsensor_threshold_setup(struct tegra_tsensor_data *data,
+ unsigned char index)
+{
+ unsigned long config0;
+ unsigned char i = index;
+ unsigned int th2_count = DEFAULT_THRESHOLD_TH2;
+ unsigned int th3_count = DEFAULT_THRESHOLD_TH3;
+ unsigned int th1_count = DEFAULT_THRESHOLD_TH1;
+ int th0_diff = 0;
+
+ dev_dbg(data->hwmon_dev, "started tsensor_threshold_setup %d\n",
+ index);
+ config0 = tsensor_readl(data, ((i << 16) | SENSOR_CFG0));
+
+ dev_dbg(data->hwmon_dev, "before threshold program TH dump:\n");
+ dump_threshold(data);
+ dev_dbg(data->hwmon_dev, "th3=0x%x, th2=0x%x, th1=0x%x, th0=0x%x\n",
+ th3_count, th2_count, th1_count, th0_diff);
+ config0 = (((th2_count & SENSOR_CFG_X_TH_X_MASK)
+ << SENSOR_CFG1_TH2_SHIFT) |
+ ((th1_count & SENSOR_CFG_X_TH_X_MASK) <<
+ SENSOR_CFG1_TH1_SHIFT));
+ tsensor_writel(data, config0, ((i << 16) | SENSOR_CFG1));
+ config0 = (((th0_diff & SENSOR_CFG_X_TH_X_MASK)
+ << SENSOR_CFG2_TH0_SHIFT) |
+ ((th3_count & SENSOR_CFG_X_TH_X_MASK) <<
+ SENSOR_CFG2_TH3_SHIFT));
+ tsensor_writel(data, config0, ((i << 16) | SENSOR_CFG2));
+ dev_dbg(data->hwmon_dev, "after threshold program TH dump:\n");
+ dump_threshold(data);
+}
+
+/* tsensor config programming function */
+static int tsensor_config_setup(struct tegra_tsensor_data *data)
+{
+ unsigned int config0;
+ unsigned int i;
+ unsigned int status_reg;
+ unsigned int no_resp_count;
+ int err = 0;
+
+ for (i = 0; i < TSENSOR_COUNT; i++) {
+ /*
+ * Pre-read setup:
+ * Set M and N values
+ * Enable HW features HW_FREQ_DIV_EN, THERMAL_RST_EN
+ */
+ config0 = tsensor_readl(data, ((i << 16) | SENSOR_CFG0));
+ config0 &= ~((SENSOR_CFG0_M_MASK << SENSOR_CFG0_M_SHIFT) |
+ (SENSOR_CFG0_N_MASK << SENSOR_CFG0_N_SHIFT) |
+ (1 << SENSOR_CFG0_OVERFLOW_INTR) |
+ (1 << SENSOR_CFG0_RST_INTR_SHIFT) |
+ (1 << SENSOR_CFG0_DVFS_INTR_SHIFT) |
+ (1 << SENSOR_CFG0_HW_DIV2_INTR_SHIFT) |
+ (1 << SENSOR_CFG0_RST_ENABLE_SHIFT) |
+ (1 << SENSOR_CFG0_HW_DIV2_ENABLE_SHIFT)
+ );
+ /* Set STOP bit */
+ /* Set M and N values */
+ /* Enable HW features HW_FREQ_DIV_EN, THERMAL_RST_EN */
+ config0 |= (((DEFAULT_TSENSOR_M & SENSOR_CFG0_M_MASK) <<
+ SENSOR_CFG0_M_SHIFT) |
+ ((DEFAULT_TSENSOR_N & SENSOR_CFG0_N_MASK) <<
+ SENSOR_CFG0_N_SHIFT) |
+ (1 << SENSOR_CFG0_OVERFLOW_INTR) |
+ (1 << SENSOR_CFG0_DVFS_INTR_SHIFT) |
+ (1 << SENSOR_CFG0_HW_DIV2_INTR_SHIFT) |
+#if ENABLE_TSENSOR_HW_RESET
+ (1 << SENSOR_CFG0_RST_ENABLE_SHIFT) |
+#endif
+ (1 << SENSOR_CFG0_STOP_SHIFT));
+
+ tsensor_writel(data, config0, ((i << 16) | SENSOR_CFG0));
+ tsensor_threshold_setup(data, i);
+ }
+
+ for (i = 0; i < TSENSOR_COUNT; i++) {
+ config0 = tsensor_readl(data, ((i << 16) | SENSOR_CFG0));
+ /* Enables interrupts and clears sensor stop */
+ /*
+ * Interrupts not enabled as software handling is not
+ * needed in rev1 driver
+ */
+ /* Disable sensor stop bit */
+ config0 &= ~(1 << SENSOR_CFG0_STOP_SHIFT);
+ tsensor_writel(data, config0, ((i << 16) | SENSOR_CFG0));
+ }
+
+ /* Check if counters are getting updated */
+ no_resp_count = 0;
+
+ for (i = 0; i < TSENSOR_COUNT; i++) {
+ /* if STOP bit is set skip this check */
+ config0 = tsensor_readl(data, ((i << 16) | SENSOR_CFG0));
+ if (!(config0 & (1 << SENSOR_CFG0_STOP_SHIFT))) {
+ unsigned int loop_count = 0;
+ do {
+ status_reg = tsensor_readl(data,
+ (i << 16) | SENSOR_STATUS0);
+ if ((status_reg & (1 <<
+ SENSOR_STATUS_AVG_VALID_SHIFT)) &&
+ (status_reg & (1 <<
+ SENSOR_STATUS_CURR_VALID_SHIFT))) {
+ msleep(21);
+ loop_count++;
+ if (!(loop_count % 200))
+ dev_err(data->hwmon_dev,
+ " Warning: Tsensor Counter "
+ "sensor%d not Valid yet.\n", i);
+ if (loop_count > MAX_TSENSOR_LOOP1) {
+ no_resp_count++;
+ break;
+ }
+ }
+ } while (!(status_reg &
+ (1 << SENSOR_STATUS_AVG_VALID_SHIFT)) ||
+ (!(status_reg &
+ (1 << SENSOR_STATUS_CURR_VALID_SHIFT))));
+ if (no_resp_count == TSENSOR_COUNT) {
+ err = -ENODEV;
+ goto skip_all;
+ }
+ }
+ }
+ /* initialize tsensor chip coefficients */
+ get_chip_tsensor_coeff(data);
+skip_all:
+ return err;
+}
+
+/* function to enable tsensor clock */
+static int tsensor_clk_enable(
+ struct tegra_tsensor_data *data,
+ bool enable)
+{
+ int err = 0;
+ unsigned long rate;
+ struct clk *clk_m;
+
+ if (enable) {
+ clk_enable(data->dev_clk);
+ rate = clk_get_rate(data->dev_clk);
+ clk_m = clk_get_sys(NULL, "clk_m");
+ if (clk_get_parent(data->dev_clk) != clk_m) {
+ err = clk_set_parent(data->dev_clk, clk_m);
+ if (err < 0)
+ goto fail;
+ }
+ rate = DEFAULT_TSENSOR_CLK_HZ;
+ if (rate != clk_get_rate(clk_m)) {
+ err = clk_set_rate(data->dev_clk, rate);
+ if (err < 0)
+ goto fail;
+ }
+ } else {
+ clk_disable(data->dev_clk);
+ clk_put(data->dev_clk);
+ }
+fail:
+ return err;
+}
+
+/*
+ * function to set counter threshold corresponding to
+ * given temperature
+ */
+static void tsensor_set_limits(
+ struct tegra_tsensor_data *data,
+ int temp,
+ int threshold_index)
+{
+ unsigned int th_count;
+ unsigned int config;
+ unsigned short sft, offset;
+ unsigned int th1_count;
+
+ th_count = tsensor_get_threshold_counter(data, temp);
+ dev_dbg(data->hwmon_dev, "%s : input temp=%d, counter=0x%x\n", __func__,
+ temp, th_count);
+ switch (threshold_index) {
+ case TSENSOR_TH0:
+ sft = 16;
+ offset = SENSOR_CFG2;
+ /* assumed TH1 set before TH0, else we program
+ * TH0 as TH1 which means hysteresis will be
+ * same as TH1. Also, caller expected to pass
+ * (TH1 - hysteresis) as temp argument for this case */
+ th1_count = tsensor_readl(data,
+ ((data->tsensor_index << 16) |
+ SENSOR_CFG1));
+ th_count = (th1_count > th_count) ?
+ (th1_count - th_count) :
+ th1_count;
+ break;
+ case TSENSOR_TH1:
+ default:
+ sft = 0;
+ offset = SENSOR_CFG1;
+ break;
+ case TSENSOR_TH2:
+ sft = 16;
+ offset = SENSOR_CFG1;
+ break;
+ case TSENSOR_TH3:
+ sft = 0;
+ offset = SENSOR_CFG2;
+ break;
+ }
+ config = tsensor_readl(data, ((data->tsensor_index << 16) | offset));
+ dev_dbg(data->hwmon_dev, "%s: old config=0x%x, sft=%d, offset=0x%x\n",
+ __func__, config, sft, offset);
+ config &= ~(SENSOR_CFG_X_TH_X_MASK << sft);
+ config |= ((th_count & SENSOR_CFG_X_TH_X_MASK) << sft);
+ dev_dbg(data->hwmon_dev, "new config=0x%x\n", config);
+ tsensor_writel(data, config, ((data->tsensor_index << 16) | offset));
+}
+
+int tsensor_thermal_set_limits(struct tegra_tsensor_data *data,
+ long lo_limit_milli,
+ long hi_limit_milli)
+{
+ long lo_limit = MILLICELSIUS_TO_CELSIUS(lo_limit_milli);
+ long hi_limit = MILLICELSIUS_TO_CELSIUS(hi_limit_milli);
+ int i, j, hi_limit_first;
+
+ if (lo_limit_milli == hi_limit_milli)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ if (data->current_lo_limit == lo_limit_milli &&
+ data->current_hi_limit == hi_limit_milli) {
+ goto done;
+ }
+
+ /* If going up, change hi limit first. If going down, change lo
+ limit first */
+ hi_limit_first = hi_limit_milli > data->current_hi_limit;
+
+ for (i = 0; i < 2; i++) {
+ j = (i + hi_limit_first) % 2;
+
+ switch (j) {
+ case 0:
+ tsensor_set_limits(data, hi_limit, TSENSOR_TH2);
+ data->current_hi_limit = hi_limit_milli;
+ break;
+ case 1:
+ tsensor_set_limits(data, lo_limit, TSENSOR_TH1);
+ data->current_lo_limit = lo_limit_milli;
+ break;
+ }
+ }
+
+
+done:
+ mutex_unlock(&data->mutex);
+ return 0;
+}
+
+int tsensor_thermal_set_alert(struct tegra_tsensor_data *data,
+ void (*alert_func)(void *),
+ void *alert_data)
+{
+ mutex_lock(&data->mutex);
+
+ data->alert_data = alert_data;
+ data->alert_func = alert_func;
+
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+int tsensor_thermal_set_shutdown_temp(struct tegra_tsensor_data *data,
+ long shutdown_temp_milli)
+{
+ long shutdown_temp = MILLICELSIUS_TO_CELSIUS(shutdown_temp_milli);
+ tsensor_set_limits(data, shutdown_temp, TSENSOR_TH3);
+
+ return 0;
+}
+
+static int tsensor_within_limits(struct tegra_tsensor_data *data)
+{
+ int ts_state = get_ts_state(data);
+
+ return (ts_state == TS_LEVEL1) ||
+ (ts_state == TS_LEVEL0 && data->current_lo_limit == 0);
+}
+
+static void tsensor_work_func(struct work_struct *work)
+{
+ struct tegra_tsensor_data *data = container_of(work,
+ struct tegra_tsensor_data, work);
+
+ if (!data->alert_func)
+ return;
+
+ if (!tsensor_within_limits(data)) {
+ data->alert_func(data->alert_data);
+
+ if (!tsensor_within_limits(data))
+ queue_delayed_work(data->workqueue, &data->work,
+ HZ * DEFAULT_TSENSOR_M /
+ DEFAULT_TSENSOR_CLK_HZ);
+ }
+}
+
+/*
+ * This function enables the tsensor using default configuration
+ * 1. We would need some configuration APIs to calibrate
+ * the tsensor counters to right temperature
+ * 2. hardware triggered divide cpu clock by 2 as well pmu reset is enabled
+ * implementation. No software actions are enabled at this point
+ */
+static int tegra_tsensor_setup(struct platform_device *pdev)
+{
+ struct tegra_tsensor_data *data = platform_get_drvdata(pdev);
+ struct resource *r;
+ int err = 0;
+ struct tegra_tsensor_platform_data *tsensor_data;
+ unsigned int reg;
+
+ data->dev_clk = clk_get(&pdev->dev, NULL);
+ if ((!data->dev_clk) || ((int)data->dev_clk == -(ENOENT))) {
+ dev_err(&pdev->dev, "Couldn't get the clock\n");
+ err = PTR_ERR(data->dev_clk);
+ goto fail;
+ }
+
+ /* Enable tsensor clock */
+ err = tsensor_clk_enable(data, true);
+ if (err < 0)
+ goto err_irq;
+
+ /* Reset tsensor */
+ dev_dbg(&pdev->dev, "before tsensor reset %s\n", __func__);
+ tegra_periph_reset_assert(data->dev_clk);
+ udelay(100);
+ tegra_periph_reset_deassert(data->dev_clk);
+ udelay(100);
+
+ dev_dbg(&pdev->dev, "before tsensor chk pmc reset %s\n",
+ __func__);
+ /* Check for previous resets in pmc */
+ if (pmc_check_rst_sensor(data)) {
+ dev_err(data->hwmon_dev, "Warning: ***** Last PMC "
+ "Reset source: tsensor detected\n");
+ }
+
+ dev_dbg(&pdev->dev, "before tsensor pmc reset enable %s\n",
+ __func__);
+ /* Enable the sensor reset in PMC */
+ pmc_rst_enable(data, true);
+
+ dev_dbg(&pdev->dev, "before tsensor get platform data %s\n",
+ __func__);
+ dev_dbg(&pdev->dev, "tsensor platform_data=0x%x\n",
+ (unsigned int)pdev->dev.platform_data);
+ tsensor_data = pdev->dev.platform_data;
+
+ /* register interrupt */
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENXIO;
+ goto err_irq;
+ }
+ data->irq = r->start;
+ err = request_irq(data->irq, tegra_tsensor_isr,
+ IRQF_DISABLED, pdev->name, data);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Failed to register IRQ\n");
+ goto err_irq;
+ }
+
+ dev_dbg(&pdev->dev, "tsensor platform_data=0x%x\n",
+ (unsigned int)pdev->dev.platform_data);
+
+ dev_dbg(&pdev->dev, "before tsensor_config_setup\n");
+ err = tsensor_config_setup(data);
+ if (err) {
+ dev_err(&pdev->dev, "[%s,line=%d]: tsensor counters dead!\n",
+ __func__, __LINE__);
+ goto err_setup;
+ }
+ dev_dbg(&pdev->dev, "before tsensor_get_const_AB\n");
+ /* calculate constants needed for temperature conversion */
+ err = tsensor_get_const_AB(data);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Failed to extract temperature\n"
+ "const\n");
+ goto err_setup;
+ }
+
+ /* test if counter-to-temperature and temperature-to-counter
+ * are matching */
+ err = test_temperature_algo(data);
+ if (err) {
+ dev_err(&pdev->dev, "Error: read temperature\n"
+ "algorithm broken\n");
+ goto err_setup;
+ }
+
+ print_temperature_2_counter_table(data);
+
+ /* EDP and throttling support using tsensor enabled
+ * based on fuse revision */
+ err = tegra_fuse_get_revision(&reg);
+ if (err)
+ goto err_setup;
+
+ data->is_edp_supported = (reg >= STABLE_TSENSOR_FUSE_REV);
+
+ if (data->is_edp_supported) {
+ data->workqueue = create_singlethread_workqueue("tsensor");
+ INIT_DELAYED_WORK(&data->work, tsensor_work_func);
+ }
+
+ return 0;
+err_setup:
+ free_irq(data->irq, data);
+err_irq:
+ tsensor_clk_enable(data, false);
+fail:
+ dev_err(&pdev->dev, "%s error=%d returned\n", __func__, err);
+ return err;
+}
+
+static int __devinit tegra_tsensor_probe(struct platform_device *pdev)
+{
+ struct tegra_tsensor_data *data;
+ struct resource *r;
+ int err;
+ unsigned int reg;
+ u8 i;
+ struct tegra_tsensor_platform_data *tsensor_data;
+
+ data = kzalloc(sizeof(struct tegra_tsensor_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "[%s,line=%d]: Failed to allocate "
+ "memory\n", __func__, __LINE__);
+ err = -ENOMEM;
+ goto exit;
+ }
+ mutex_init(&data->mutex);
+ platform_set_drvdata(pdev, data);
+
+ /* Register sysfs hooks */
+ for (i = 0; i < ARRAY_SIZE(tsensor_nodes); i++) {
+ err = device_create_file(&pdev->dev,
+ &tsensor_nodes[i].dev_attr);
+ if (err) {
+ dev_err(&pdev->dev, "device_create_file failed.\n");
+ goto err0;
+ }
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto err1;
+ }
+
+ dev_set_drvdata(data->hwmon_dev, data);
+
+ spin_lock_init(&data->tsensor_lock);
+
+ /* map tsensor register space */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "[%s,line=%d]: Failed to get io "
+ "resource\n", __func__, __LINE__);
+ err = -ENODEV;
+ goto err2;
+ }
+
+ if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "[%s,line=%d]: Error mem busy\n",
+ __func__, __LINE__);
+ err = -EBUSY;
+ goto err2;
+ }
+
+ data->phys = r->start;
+ data->phys_end = r->end;
+ data->base = ioremap(r->start, r->end - r->start + 1);
+ if (!data->base) {
+ dev_err(&pdev->dev, "[%s, line=%d]: can't ioremap "
+ "tsensor iomem\n", __FILE__, __LINE__);
+ err = -ENOMEM;
+ goto err3;
+ }
+
+ /* map pmc rst_status register */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "[%s,line=%d]: Failed to get io "
+ "resource\n", __func__, __LINE__);
+ err = -ENODEV;
+ goto err4;
+ }
+
+ if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "[%s, line=%d]: Error mem busy\n",
+ __func__, __LINE__);
+ err = -EBUSY;
+ goto err4;
+ }
+
+ data->pmc_phys = r->start;
+ data->pmc_phys_end = r->end;
+ data->pmc_rst_base = ioremap(r->start, r->end - r->start + 1);
+ if (!data->pmc_rst_base) {
+ dev_err(&pdev->dev, "[%s, line=%d]: can't ioremap "
+ "pmc iomem\n", __FILE__, __LINE__);
+ err = -ENOMEM;
+ goto err5;
+ }
+
+ /* fuse revisions less than TSENSOR_FUSE_REV1
+ bypass tsensor driver init */
+ /* tsensor active instance decided based on fuse revision */
+ err = tegra_fuse_get_revision(&reg);
+ if (err)
+ goto err6;
+ /* check for higher revision done first */
+ /* instance 0 is used for fuse revision
+ TSENSOR_FUSE_REV2 onwards */
+ if (reg >= TSENSOR_FUSE_REV2)
+ data->tsensor_index = TSENSOR_INSTANCE1;
+ /* instance 1 is used for fuse revision
+ TSENSOR_FUSE_REV1 till
+ TSENSOR_FUSE_REV2 */
+ else if (reg >= TSENSOR_FUSE_REV1)
+ data->tsensor_index = TSENSOR_INSTANCE2;
+ pr_info("tsensor active instance=%d\n", data->tsensor_index);
+
+ /* tegra tsensor - setup and init */
+ err = tegra_tsensor_setup(pdev);
+ if (err)
+ goto err6;
+
+ dump_tsensor_regs(data);
+ dev_dbg(&pdev->dev, "end tegra_tsensor_probe\n");
+
+ tsensor_data = pdev->dev.platform_data;
+ if (tsensor_data->probe_callback)
+ tsensor_data->probe_callback(data);
+
+ return 0;
+err6:
+ iounmap(data->pmc_rst_base);
+err5:
+ release_mem_region(data->pmc_phys, (data->pmc_phys_end -
+ data->pmc_phys) + 1);
+err4:
+ iounmap(data->base);
+err3:
+ release_mem_region(data->phys, (data->phys_end -
+ data->phys) + 1);
+err2:
+ hwmon_device_unregister(data->hwmon_dev);
+err1:
+ for (i = 0; i < ARRAY_SIZE(tsensor_nodes); i++)
+ device_remove_file(&pdev->dev, &tsensor_nodes[i].dev_attr);
+err0:
+ kfree(data);
+exit:
+ dev_err(&pdev->dev, "%s error=%d returned\n", __func__, err);
+ return err;
+}
+
+static int __devexit tegra_tsensor_remove(struct platform_device *pdev)
+{
+ struct tegra_tsensor_data *data = platform_get_drvdata(pdev);
+ u8 i;
+
+ hwmon_device_unregister(data->hwmon_dev);
+ for (i = 0; i < ARRAY_SIZE(tsensor_nodes); i++)
+ device_remove_file(&pdev->dev, &tsensor_nodes[i].dev_attr);
+
+ if (data->is_edp_supported) {
+ cancel_delayed_work_sync(&data->work);
+ destroy_workqueue(data->workqueue);
+ data->workqueue = NULL;
+ }
+
+ free_irq(data->irq, data);
+
+ iounmap(data->pmc_rst_base);
+ release_mem_region(data->pmc_phys, (data->pmc_phys_end -
+ data->pmc_phys) + 1);
+ iounmap(data->base);
+ release_mem_region(data->phys, (data->phys_end -
+ data->phys) + 1);
+
+ kfree(data);
+
+ return 0;
+}
+
+static void save_tsensor_regs(struct tegra_tsensor_data *data)
+{
+ int i;
+ for (i = 0; i < TSENSOR_COUNT; i++) {
+ data->config0[i] = tsensor_readl(data,
+ ((i << 16) | SENSOR_CFG0));
+ data->config1[i] = tsensor_readl(data,
+ ((i << 16) | SENSOR_CFG1));
+ data->config2[i] = tsensor_readl(data,
+ ((i << 16) | SENSOR_CFG2));
+ }
+}
+
+static void restore_tsensor_regs(struct tegra_tsensor_data *data)
+{
+ int i;
+ for (i = 0; i < TSENSOR_COUNT; i++) {
+ tsensor_writel(data, data->config0[i],
+ ((i << 16) | SENSOR_CFG0));
+ tsensor_writel(data, data->config1[i],
+ ((i << 16) | SENSOR_CFG1));
+ tsensor_writel(data, data->config2[i],
+ ((i << 16) | SENSOR_CFG2));
+ }
+}
+
+#ifdef CONFIG_PM
+static int tsensor_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct tegra_tsensor_data *data = platform_get_drvdata(pdev);
+ unsigned int config0;
+ int i;
+ /* set STOP bit, else OVERFLOW interrupt seen in LP1 */
+ for (i = 0; i < TSENSOR_COUNT; i++) {
+ config0 = tsensor_readl(data, ((i << 16) | SENSOR_CFG0));
+ config0 |= (1 << SENSOR_CFG0_STOP_SHIFT);
+ tsensor_writel(data, config0, ((i << 16) | SENSOR_CFG0));
+ }
+ /* save current settings before suspend, when STOP bit is set */
+ save_tsensor_regs(data);
+ tsensor_clk_enable(data, false);
+
+ return 0;
+}
+
+static int tsensor_resume(struct platform_device *pdev)
+{
+ struct tegra_tsensor_data *data = platform_get_drvdata(pdev);
+ unsigned int config0;
+ int i;
+ tsensor_clk_enable(data, true);
+ /* restore current settings before suspend, no need
+ * to clear STOP bit */
+ restore_tsensor_regs(data);
+ /* clear STOP bit, after restoring regs */
+ for (i = 0; i < TSENSOR_COUNT; i++) {
+ config0 = tsensor_readl(data, ((i << 16) | SENSOR_CFG0));
+ config0 &= ~(1 << SENSOR_CFG0_STOP_SHIFT);
+ tsensor_writel(data, config0, ((i << 16) | SENSOR_CFG0));
+ }
+
+ if (data->is_edp_supported)
+ schedule_delayed_work(&data->work, 0);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_tsensor_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tegra-tsensor",
+ },
+ .probe = tegra_tsensor_probe,
+ .remove = __devexit_p(tegra_tsensor_remove),
+#ifdef CONFIG_PM
+ .suspend = tsensor_suspend,
+ .resume = tsensor_resume,
+#endif
+};
+
+static int __init tegra_tsensor_init(void)
+{
+ return platform_driver_register(&tegra_tsensor_driver);
+}
+module_init(tegra_tsensor_init);
+
+static void __exit tegra_tsensor_exit(void)
+{
+ platform_driver_unregister(&tegra_tsensor_driver);
+}
+module_exit(tegra_tsensor_exit);
+
+MODULE_AUTHOR("nvidia");
+MODULE_DESCRIPTION("Nvidia Tegra Temperature Sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 5f13c62e64b4..c4b583dc2567 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -60,6 +60,16 @@ config I2C_MUX
source drivers/i2c/muxes/Kconfig
+config I2C_SLAVE
+ bool "I2C slave driver support"
+ default n
+ help
+ Say Y here if you want the I2C slave functionality in the driver.
+ The external system will be master and the system on which this
+ driver is running act as i2c slave.
+ This drivers supports read/write data from master devices through
+ I2C.
+
config I2C_HELPER_AUTO
bool "Autoselect pertinent helper modules"
default y
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index beee6b2d361d..b4fc25188117 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -1,12 +1,14 @@
#
# Makefile for the i2c core.
#
+GCOV_PROFILE := y
obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
obj-$(CONFIG_I2C) += i2c-core.o
obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-$(CONFIG_I2C_MUX) += i2c-mux.o
+obj-$(CONFIG_I2C_SLAVE) += i2c-slave.o
obj-y += algos/ busses/ muxes/
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 646068e5100b..4c922f39bc47 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -641,6 +641,14 @@ config I2C_TEGRA
If you say yes to this option, support will be included for the
I2C controller embedded in NVIDIA Tegra SOCs
+config I2C_SLAVE_TEGRA
+ tristate "NVIDIA Tegra internal I2C slave controller"
+ depends on ARCH_TEGRA && I2C_SLAVE
+ default n
+ help
+ If you say yes to this option, support will be included for the
+ I2C slave controller embedded in NVIDIA Tegra SOCs
+
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e6cf294d3729..ce35a87a7012 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the i2c bus drivers.
#
+GCOV_PROFILE := y
# ACPI drivers
obj-$(CONFIG_I2C_SCMI) += i2c-scmi.o
@@ -62,6 +63,7 @@ obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
+obj-$(CONFIG_I2C_SLAVE_TEGRA) += i2c-slave-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
diff --git a/drivers/i2c/busses/i2c-slave-tegra.c b/drivers/i2c/busses/i2c-slave-tegra.c
new file mode 100644
index 000000000000..f2468d85c1f8
--- /dev/null
+++ b/drivers/i2c/busses/i2c-slave-tegra.c
@@ -0,0 +1,1114 @@
+/*
+ * drivers/i2c/busses/i2c-slave-tegra.c
+ * I2c slave driver for the Nvidia's tegra SOC.
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/i2c-tegra.h>
+#include <linux/i2c-slave.h>
+#include <asm/unaligned.h>
+#include <mach/clk.h>
+#include <mach/pinmux.h>
+#include <linux/pm_runtime.h>
+#define BYTES_PER_FIFO_WORD 4
+#define to_jiffies(msecs) msecs_to_jiffies(msecs)
+
+#define I2C_CNFG 0x000
+#define I2C_CNFG_PACKET_MODE_EN (1<<10)
+#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
+#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
+#define I2C_CNFG_DEBOUNCE_CNT_MASK (0x7)
+
+#define I2C_STATUS 0x01C
+
+#define I2C_SLV_CNFG 0x020
+#define I2C_SLV_CNFG_NEWSL (1<<2)
+#define I2C_SLV_CNFG_ENABLE_SL (1<<3)
+#define I2C_SLV_CNFG_PKT_MODE_EN (1<<4)
+#define I2C_SLV_CNFG_FIFO_XFER_EN (1<<20)
+#define I2C_SLV_CNFG_ACK_LAST_BYTE (1<<6)
+#define I2C_SLV_CNFG_ACK_LAST_BYTE_VALID (1<<7)
+
+#define I2C_SLV_ADDR1 0x02c
+#define I2C_SLV_ADDR1_ADDR_SHIFT 0x0
+
+#define I2C_SLV_ADDR2 0x030
+#define I2C_SLV_ADDR2_ADDR0_HI_SHIFT 0x1
+#define I2C_SLV_ADDR2_ADDR0_MASK 0x7
+#define I2C_SLV_ADDR2_ADDR0_TEN_BIT_ADDR_MODE 0x1
+
+#define I2C_SLV_INT_MASK 0x040
+
+#define I2C_TX_FIFO 0x050
+#define I2C_RX_FIFO 0x054
+#define I2C_PACKET_TRANSFER_STATUS 0x058
+
+#define I2C_FIFO_CONTROL 0x05c
+#define I2C_FIFO_CONTROL_SLV_TX_FLUSH (1<<9)
+#define I2C_FIFO_CONTROL_SLV_RX_FLUSH (1<<8)
+#define I2C_FIFO_CONTROL_SLV_TX_TRIG_SHIFT 13
+#define I2C_FIFO_CONTROL_SLV_TX_TRIG_MASK (0x7 << 13)
+#define I2C_FIFO_CONTROL_SLV_RX_TRIG_SHIFT 10
+#define I2C_FIFO_CONTROL_SLV_RX_TRIG_MASK (1 << 10)
+
+#define I2C_FIFO_STATUS 0x060
+#define I2C_FIFO_STATUS_SLV_TX_MASK (0xF << 20)
+#define I2C_FIFO_STATUS_SLV_TX_SHIFT 20
+#define I2C_FIFO_STATUS_SLV_RX_MASK (0x0F << 16)
+#define I2C_FIFO_STATUS_SLV_RX_SHIFT 16
+
+#define I2C_INT_MASK 0x064
+#define I2C_INT_STATUS 0x068
+#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
+#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
+#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
+#define I2C_INT_NO_ACK (1<<3)
+#define I2C_INT_ARBITRATION_LOST (1<<2)
+#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
+#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+
+#define I2C_INT_SLV_PKT_XFER_ERR (1 << 25)
+#define I2C_INT_SLV_TX_BUFFER_REQ (1 << 24)
+#define I2C_INT_SLV_RX_BUFFER_FILLED (1 << 23)
+#define I2C_INT_SLV_PACKET_XFER_COMPLETE (1 << 22)
+#define I2C_INT_SLV_TFIFO_OVF_REQ (1 << 21)
+#define I2C_INT_SLV_RFIFO_UNF_REQ (1 << 20)
+#define I2C_INT_SLV_TFIFO_DATA_REQ (1 << 17)
+#define I2C_INT_SLV_RFIFO_DATA_REQ (1 << 16)
+
+#define I2C_SLV_TX_FIFO 0x078
+#define I2C_SLV_RX_FIFO 0x07c
+
+#define I2C_SLV_PACKET_STATUS 0x80
+#define I2C_SLV_PACKET_STATUS_BYTENUM_SHIFT 4
+#define I2C_SLV_PACKET_STATUS_BYTENUM_MASK 0xFFF0
+
+#define I2C_CLK_DIVISOR 0x06c
+
+#define DVC_CTRL_REG1 0x000
+#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG2 0x004
+#define DVC_CTRL_REG3 0x008
+#define DVC_CTRL_REG3_SW_PROG (1<<26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_STATUS 0x00c
+#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+
+#define I2C_ERR_NONE 0x00
+#define I2C_ERR_NO_ACK 0x01
+#define I2C_ERR_ARBITRATION_LOST 0x02
+#define I2C_ERR_UNKNOWN_INTERRUPT 0x04
+
+#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
+#define PACKET_HEADER0_PACKET_ID_SHIFT 16
+#define PACKET_HEADER0_CONT_ID_SHIFT 12
+#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
+
+#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
+#define I2C_HEADER_CONT_ON_NAK (1<<21)
+#define I2C_HEADER_SEND_START_BYTE (1<<20)
+#define I2C_HEADER_READ (1<<19)
+#define I2C_HEADER_10BIT_ADDR (1<<18)
+#define I2C_HEADER_IE_ENABLE (1<<17)
+#define I2C_HEADER_REPEAT_START (1<<16)
+#define I2C_HEADER_MASTER_ADDR_SHIFT 12
+#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+
+#define I2C_FIFO_DEPTH 8
+/* Transfer state of the i2c slave */
+#define TRANSFER_STATE_NONE 0
+#define TRANSFER_STATE_READ 1
+#define TRANSFER_STATE_WRITE 2
+
+#define I2C_SLV_TRANS_PREMATURE_END I2C_INT_SLV_PKT_XFER_ERR
+
+#define I2C_SLV_TRANS_ALL_XFER_END I2C_INT_SLV_PACKET_XFER_COMPLETE
+
+#define I2C_SLV_TRANS_END \
+ (I2C_INT_SLV_PKT_XFER_ERR | I2C_INT_SLV_PACKET_XFER_COMPLETE)
+
+#define I2C_INT_STATUS_RX_BUFFER_FILLED I2C_INT_SLV_RX_BUFFER_FILLED
+
+#define I2C_INT_STATUS_RX_DATA_AVAILABLE \
+ (I2C_INT_SLV_RX_BUFFER_FILLED | I2C_INT_SLV_RFIFO_DATA_REQ)
+
+#define I2C_INT_STATUS_TX_BUFFER_REQUEST \
+ (I2C_INT_SLV_TX_BUFFER_REQ | I2C_INT_SLV_TFIFO_DATA_REQ)
+
+#define I2C_SLV_ERRORS_INT_MASK (I2C_INT_SLV_TFIFO_OVF_REQ | \
+ I2C_INT_SLV_RFIFO_UNF_REQ | I2C_INT_SLV_PKT_XFER_ERR)
+
+#define I2C_SLV_DEFAULT_INT_MASK (I2C_INT_SLV_TFIFO_OVF_REQ | \
+ I2C_INT_SLV_RFIFO_UNF_REQ | I2C_INT_SLV_PKT_XFER_ERR | \
+ I2C_INT_SLV_RX_BUFFER_FILLED | I2C_INT_SLV_TX_BUFFER_REQ)
+
+struct tegra_i2c_slave_dev;
+
+struct tegra_i2c_slave_bus {
+ struct tegra_i2c_slave_dev *dev;
+ const struct tegra_pingroup_config *pinmux;
+ int mux_len;
+ unsigned long bus_clk_rate;
+ struct i2c_slave_adapter slv_adap;
+};
+
+struct tegra_i2c_slave_dev {
+ struct device *dev;
+ struct clk *clk;
+ struct resource *iomem;
+ void __iomem *base;
+ int cont_id;
+ int irq;
+ spinlock_t lock;
+ struct completion rx_msg_complete;
+ struct completion tx_msg_complete;
+ bool is_rx_waiting;
+ bool is_tx_waiting;
+ u8 *rx_msg_buff;
+ int rx_msg_buf_size;
+ int rx_msg_head;
+ int rx_msg_tail;
+ u8 *tx_msg_buff;
+ int tx_msg_buf_size;
+ int tx_msg_head;
+ int tx_msg_tail;
+ bool is_slave_started;
+ int slave_add;
+ bool is_ten_bit_addr;
+ u32 dummy_word;
+ unsigned long rx_pack_hdr1;
+ unsigned long rx_pack_hdr2;
+ unsigned long rx_pack_hdr3;
+ int curr_transfer;
+ unsigned long int_mask;
+ int nack_packet_count;
+ bool is_first_byte_read_wait;
+ int curr_packet_bytes_read;
+ unsigned int cont_status;
+ bool is_dummy_char_cycle;
+ unsigned long curr_packet_tx_tail;
+ const struct tegra_pingroup_config *pin_mux;
+ int bus_clk;
+ struct tegra_i2c_slave_bus bus;
+};
+
+#define get_space_count(rInd, wInd, maxsize) \
+ (((wInd > rInd) ? (maxsize - wInd + rInd) : (rInd - wInd)) - 1)
+
+#define get_data_count(rInd, wInd, maxsize) \
+ ((wInd >= rInd) ? (wInd - rInd) : (maxsize - rInd + wInd - 1))
+
+static void set_tx_trigger_level(struct tegra_i2c_slave_dev *i2c_dev, int trig)
+{
+ unsigned long fifo_control = readl(i2c_dev->base + I2C_FIFO_CONTROL);
+ if (trig) {
+ fifo_control &= ~I2C_FIFO_CONTROL_SLV_TX_TRIG_MASK;
+ fifo_control |= (trig-1) << I2C_FIFO_CONTROL_SLV_TX_TRIG_SHIFT;
+ writel(fifo_control, i2c_dev->base + I2C_FIFO_CONTROL);
+ }
+}
+
+static void set_rx_trigger_level(struct tegra_i2c_slave_dev *i2c_dev, int trig)
+{
+ unsigned long fifo_control = readl(i2c_dev->base + I2C_FIFO_CONTROL);
+ if (trig) {
+ fifo_control &= ~I2C_FIFO_CONTROL_SLV_RX_TRIG_MASK;
+ fifo_control |= (trig-1) << I2C_FIFO_CONTROL_SLV_RX_TRIG_SHIFT;
+ writel(fifo_control, i2c_dev->base + I2C_FIFO_CONTROL);
+ }
+}
+
+static void reset_slave_tx_fifo(struct tegra_i2c_slave_dev *i2c_dev)
+{
+ unsigned long fifo_control = readl(i2c_dev->base + I2C_FIFO_CONTROL);
+ unsigned long timeout_count = 1000;
+
+ writel(fifo_control | I2C_FIFO_CONTROL_SLV_TX_FLUSH,
+ i2c_dev->base + I2C_FIFO_CONTROL);
+ while (timeout_count--) {
+ fifo_control = readl(i2c_dev->base + I2C_FIFO_CONTROL);
+ if (!(fifo_control & I2C_FIFO_CONTROL_SLV_TX_FLUSH))
+ break;
+ udelay(1);
+ }
+ if (!timeout_count) {
+ dev_err(i2c_dev->dev, "Not able to flush tx fifo\n");
+ BUG();
+ }
+}
+
+static void do_tx_fifo_empty(struct tegra_i2c_slave_dev *i2c_dev,
+ unsigned long *empty_count)
+{
+ unsigned long fifo_status = readl(i2c_dev->base + I2C_FIFO_STATUS);
+ unsigned long tx_fifo_empty_count;
+
+ tx_fifo_empty_count = (fifo_status & I2C_FIFO_STATUS_SLV_TX_MASK) >>
+ I2C_FIFO_STATUS_SLV_TX_SHIFT;
+ if (tx_fifo_empty_count < I2C_FIFO_DEPTH)
+ reset_slave_tx_fifo(i2c_dev);
+ if (empty_count)
+ *empty_count = tx_fifo_empty_count;
+}
+
+static void get_packet_headers(struct tegra_i2c_slave_dev *i2c_dev, u32 msg_len,
+ u32 flags, unsigned long *packet_header1,
+ unsigned long *packet_header2, unsigned long *packet_header3)
+{
+ unsigned long packet_header;
+ *packet_header1 = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
+ PACKET_HEADER0_PROTOCOL_I2C |
+ (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
+ (1 << PACKET_HEADER0_PACKET_ID_SHIFT);
+ *packet_header2 = msg_len-1;
+ if (i2c_dev->is_ten_bit_addr)
+ packet_header = i2c_dev->slave_add | I2C_HEADER_10BIT_ADDR;
+ else
+ packet_header = i2c_dev->slave_add <<
+ I2C_HEADER_SLAVE_ADDR_SHIFT;
+
+ if (flags & I2C_M_RD)
+ packet_header |= I2C_HEADER_READ;
+
+ *packet_header3 = packet_header;
+}
+
+static void configure_i2c_slave_packet_mode(struct tegra_i2c_slave_dev *i2c_dev)
+{
+ unsigned long i2c_config;
+ i2c_config = I2C_CNFG_PACKET_MODE_EN | I2C_CNFG_NEW_MASTER_FSM;
+ i2c_config |= (2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
+ writel(i2c_config, i2c_dev->base + I2C_CNFG);
+}
+
+static void configure_i2c_slave_address(struct tegra_i2c_slave_dev *i2c_dev)
+{
+
+ unsigned long slave_add_reg;
+ unsigned long i2c_slv_config;
+ unsigned long slave_add;
+
+ if (i2c_dev->is_ten_bit_addr) {
+ slave_add = i2c_dev->slave_add & 0xFF;
+ slave_add_reg = readl(i2c_dev->base + I2C_SLV_ADDR1);
+ slave_add_reg &= ~(0xFF);
+ slave_add_reg |= slave_add << I2C_SLV_ADDR1_ADDR_SHIFT;
+ writel(slave_add_reg, i2c_dev->base + I2C_SLV_ADDR1);
+
+ slave_add = (i2c_dev->slave_add >> 8) & 0x3;
+ slave_add_reg = readl(i2c_dev->base + I2C_SLV_ADDR2);
+ slave_add_reg &= ~I2C_SLV_ADDR2_ADDR0_MASK;
+ slave_add_reg |= slave_add |
+ I2C_SLV_ADDR2_ADDR0_TEN_BIT_ADDR_MODE;
+ writel(slave_add_reg, i2c_dev->base + I2C_SLV_ADDR2);
+ } else {
+ slave_add = (i2c_dev->slave_add & 0x3FF);
+ slave_add_reg = readl(i2c_dev->base + I2C_SLV_ADDR1);
+ slave_add_reg &= ~(0x3FF);
+ slave_add_reg |= slave_add << I2C_SLV_ADDR1_ADDR_SHIFT;
+ writel(slave_add_reg, i2c_dev->base + I2C_SLV_ADDR1);
+
+ slave_add_reg = readl(i2c_dev->base + I2C_SLV_ADDR2);
+ slave_add_reg &= ~I2C_SLV_ADDR2_ADDR0_MASK;
+ writel(slave_add_reg, i2c_dev->base + I2C_SLV_ADDR2);
+ }
+
+ i2c_slv_config = I2C_SLV_CNFG_NEWSL;
+ if (i2c_dev->slave_add) {
+ i2c_slv_config = I2C_SLV_CNFG_ENABLE_SL |
+ I2C_SLV_CNFG_PKT_MODE_EN |
+ I2C_SLV_CNFG_FIFO_XFER_EN;
+ }
+ writel(i2c_slv_config, i2c_dev->base + I2C_SLV_CNFG);
+}
+
+static void copy_rx_data(struct tegra_i2c_slave_dev *i2c_dev, u8 rcv_char)
+{
+ if (get_space_count(i2c_dev->rx_msg_tail, i2c_dev->rx_msg_head,
+ i2c_dev->rx_msg_buf_size)){
+ i2c_dev->rx_msg_buff[i2c_dev->rx_msg_head++] = rcv_char;
+ if (i2c_dev->rx_msg_head == i2c_dev->rx_msg_buf_size)
+ i2c_dev->rx_msg_head = 0;
+ } else {
+ dev_warn(i2c_dev->dev, "The slave rx buffer is full, ignoring "
+ "new receive data\n");
+ }
+}
+
+static void handle_packet_first_byte_read(struct tegra_i2c_slave_dev *i2c_dev)
+{
+ unsigned long fifo_status;
+ int filled_slots;
+ unsigned long i2c_sl_config;
+ unsigned long recv_data;
+
+ fifo_status = readl(i2c_dev->base + I2C_FIFO_STATUS);
+ filled_slots = (fifo_status & I2C_FIFO_STATUS_SLV_RX_MASK) >>
+ I2C_FIFO_STATUS_SLV_RX_SHIFT;
+
+ writel(I2C_INT_STATUS_RX_DATA_AVAILABLE,
+ i2c_dev->base + I2C_INT_STATUS);
+ if (unlikely(filled_slots != 1)) {
+ dev_err(i2c_dev->dev, "Unexpected number of filed slots %d",
+ filled_slots);
+ BUG();
+ }
+ recv_data = readl(i2c_dev->base + I2C_SLV_RX_FIFO);
+ copy_rx_data(i2c_dev, (u8)recv_data);
+
+ i2c_dev->is_first_byte_read_wait = false;
+ i2c_dev->curr_transfer = TRANSFER_STATE_READ;
+ i2c_dev->curr_packet_bytes_read = 0;
+
+ /* Write packet Header */
+ writel(i2c_dev->rx_pack_hdr1, i2c_dev->base + I2C_SLV_TX_FIFO);
+ writel(i2c_dev->rx_pack_hdr2, i2c_dev->base + I2C_SLV_TX_FIFO);
+ writel(i2c_dev->rx_pack_hdr3, i2c_dev->base + I2C_SLV_TX_FIFO);
+
+ set_rx_trigger_level(i2c_dev, 4);
+ i2c_dev->int_mask |= I2C_INT_SLV_RFIFO_DATA_REQ;
+ writel(i2c_dev->int_mask, i2c_dev->base + I2C_INT_MASK);
+
+ /* Ack the master */
+ i2c_sl_config = readl(i2c_dev->base + I2C_SLV_CNFG);
+ i2c_sl_config |= I2C_SLV_CNFG_ACK_LAST_BYTE |
+ I2C_SLV_CNFG_ACK_LAST_BYTE_VALID;
+ writel(i2c_sl_config, i2c_dev->base + I2C_SLV_CNFG);
+}
+
+static void handle_packet_byte_read(struct tegra_i2c_slave_dev *i2c_dev)
+{
+ unsigned long fifo_status;
+ int i, j;
+ int filled_slots;
+ unsigned long recv_data;
+ int curr_xfer_size;
+
+ fifo_status = readl(i2c_dev->base + I2C_FIFO_STATUS);
+ filled_slots = (fifo_status & I2C_FIFO_STATUS_SLV_RX_MASK) >>
+ I2C_FIFO_STATUS_SLV_RX_SHIFT;
+
+ curr_xfer_size = BYTES_PER_FIFO_WORD * filled_slots;
+ if (i2c_dev->cont_status & I2C_SLV_TRANS_PREMATURE_END) {
+ curr_xfer_size = readl(i2c_dev->base + I2C_SLV_PACKET_STATUS);
+ curr_xfer_size =
+ (curr_xfer_size & I2C_SLV_PACKET_STATUS_BYTENUM_MASK) >>
+ I2C_SLV_PACKET_STATUS_BYTENUM_SHIFT;
+
+ BUG_ON(filled_slots != ((curr_xfer_size -
+ i2c_dev->curr_packet_bytes_read + 3) >> 2));
+ curr_xfer_size -= i2c_dev->curr_packet_bytes_read;
+ }
+
+ i2c_dev->curr_packet_bytes_read += curr_xfer_size;
+ for (i = 0; i < filled_slots; ++i) {
+ recv_data = readl(i2c_dev->base + I2C_SLV_RX_FIFO);
+ for (j = 0; j < BYTES_PER_FIFO_WORD; ++j) {
+ copy_rx_data(i2c_dev, (u8)(recv_data >> j*8));
+ curr_xfer_size--;
+ if (!curr_xfer_size)
+ break;
+ }
+ }
+ if (i2c_dev->cont_status & I2C_SLV_TRANS_PREMATURE_END) {
+ writel(I2C_SLV_TRANS_END | I2C_INT_STATUS_RX_BUFFER_FILLED,
+ i2c_dev->base + I2C_INT_STATUS);
+
+ i2c_dev->is_first_byte_read_wait = true;
+ i2c_dev->curr_transfer = TRANSFER_STATE_NONE;
+ i2c_dev->curr_packet_bytes_read = 0;
+ set_rx_trigger_level(i2c_dev, 1);
+ writel(0, i2c_dev->base + I2C_SLV_INT_MASK);
+ i2c_dev->int_mask = I2C_SLV_DEFAULT_INT_MASK;
+ writel(i2c_dev->int_mask, i2c_dev->base + I2C_INT_MASK);
+ }
+}
+
+static void handle_rx_interrupt(struct tegra_i2c_slave_dev *i2c_dev)
+{
+ if (i2c_dev->is_first_byte_read_wait)
+ handle_packet_first_byte_read(i2c_dev);
+ else
+ handle_packet_byte_read(i2c_dev);
+
+ if (i2c_dev->is_rx_waiting) {
+ complete(&i2c_dev->rx_msg_complete);
+ i2c_dev->is_rx_waiting = false;
+ }
+}
+
+static void handle_tx_transaction_end(struct tegra_i2c_slave_dev *i2c_dev)
+{
+ unsigned long curr_packet_size;
+
+ i2c_dev->curr_transfer = TRANSFER_STATE_NONE;
+ curr_packet_size = readl(i2c_dev->base + I2C_SLV_PACKET_STATUS);
+ curr_packet_size =
+ (curr_packet_size & I2C_SLV_PACKET_STATUS_BYTENUM_MASK) >>
+ I2C_SLV_PACKET_STATUS_BYTENUM_SHIFT;
+
+ /* Get transfer count from request size.*/
+ if ((curr_packet_size == 0) &&
+ (i2c_dev->cont_status & I2C_SLV_TRANS_ALL_XFER_END) &&
+ (!(i2c_dev->cont_status & I2C_SLV_TRANS_PREMATURE_END))) {
+ if (!i2c_dev->is_dummy_char_cycle)
+ i2c_dev->tx_msg_tail = i2c_dev->curr_packet_tx_tail;
+ } else {
+ if (!i2c_dev->is_dummy_char_cycle) {
+ i2c_dev->tx_msg_tail += curr_packet_size;
+ if (i2c_dev->tx_msg_tail >= i2c_dev->tx_msg_buf_size)
+ i2c_dev->tx_msg_tail -=
+ i2c_dev->tx_msg_buf_size;
+ }
+ }
+ writel(I2C_SLV_TRANS_END, i2c_dev->base + I2C_INT_STATUS);
+
+ i2c_dev->curr_transfer = TRANSFER_STATE_NONE;
+ set_tx_trigger_level(i2c_dev, 1);
+ writel(0, i2c_dev->base + I2C_SLV_INT_MASK);
+ i2c_dev->int_mask = I2C_SLV_DEFAULT_INT_MASK;
+ writel(i2c_dev->int_mask, i2c_dev->base + I2C_INT_MASK);
+ if (i2c_dev->is_tx_waiting) {
+ complete(&i2c_dev->tx_msg_complete);
+ i2c_dev->is_tx_waiting = false;
+ }
+}
+
+static void handle_tx_trigger_int(struct tegra_i2c_slave_dev *i2c_dev)
+{
+ unsigned long fifo_status;
+ int empty_slots;
+ int i, j;
+ int data_available;
+ unsigned long header1, header2, header3;
+ unsigned long tx_data;
+ int word_to_write;
+ int bytes_remain;
+ int bytes_in_curr_word;
+ int tx_tail;
+ int packet_len;
+
+ fifo_status = readl(i2c_dev->base + I2C_FIFO_STATUS);
+ empty_slots = (fifo_status & I2C_FIFO_STATUS_SLV_TX_MASK) >>
+ I2C_FIFO_STATUS_SLV_TX_SHIFT;
+ BUG_ON(empty_slots <= 3);
+ if (i2c_dev->curr_transfer == TRANSFER_STATE_NONE) {
+ empty_slots -= 3;
+
+ /* Clear the tfifo request. */
+ writel(I2C_INT_STATUS_TX_BUFFER_REQUEST,
+ i2c_dev->base + I2C_INT_STATUS);
+
+ /* Get Number of bytes it can transfer in current */
+ data_available = get_data_count(i2c_dev->tx_msg_tail,
+ i2c_dev->tx_msg_head, i2c_dev->tx_msg_buf_size);
+ if (data_available)
+ packet_len = min(empty_slots*BYTES_PER_FIFO_WORD,
+ data_available);
+ else
+ packet_len = empty_slots*BYTES_PER_FIFO_WORD;
+
+ get_packet_headers(i2c_dev, packet_len, I2C_M_RD,
+ &header1, &header2, &header3);
+
+ /* Write packet Header */
+ writel(header1, i2c_dev->base + I2C_SLV_TX_FIFO);
+ writel(header2, i2c_dev->base + I2C_SLV_TX_FIFO);
+ writel(header3, i2c_dev->base + I2C_SLV_TX_FIFO);
+
+ fifo_status = readl(i2c_dev->base + I2C_FIFO_STATUS);
+ if (data_available) {
+ word_to_write = (packet_len + 3) >> 2;
+ bytes_remain = packet_len;
+ tx_tail = i2c_dev->tx_msg_tail;
+ for (i = 0; i < word_to_write; i++) {
+ bytes_in_curr_word =
+ min(bytes_remain, BYTES_PER_FIFO_WORD);
+ tx_data = 0;
+ for (j = 0; j < bytes_in_curr_word; ++j) {
+ tx_data |= (i2c_dev->tx_msg_buff[
+ tx_tail++]<<(j*8));
+ if (tx_tail >= i2c_dev->tx_msg_buf_size)
+ tx_tail = 0;
+ }
+ writel(tx_data, i2c_dev->base +
+ I2C_SLV_TX_FIFO);
+ bytes_remain -= bytes_in_curr_word;
+ }
+ i2c_dev->curr_packet_tx_tail = tx_tail;
+ i2c_dev->is_dummy_char_cycle = false;
+ } else {
+ i2c_dev->curr_packet_tx_tail = i2c_dev->tx_msg_tail;
+ for (i = 0; i < empty_slots; i++)
+ writel(i2c_dev->dummy_word,
+ i2c_dev->base + I2C_SLV_TX_FIFO);
+ i2c_dev->is_dummy_char_cycle = true;
+ }
+
+ i2c_dev->curr_transfer = TRANSFER_STATE_WRITE;
+ i2c_dev->int_mask &= ~I2C_INT_SLV_TFIFO_DATA_REQ;
+ i2c_dev->int_mask |= I2C_SLV_TRANS_END;
+ writel(i2c_dev->int_mask, i2c_dev->base + I2C_INT_MASK);
+ } else {
+ dev_err(i2c_dev->dev, "I2cSlaveIsr(): Illegal transfer at "
+ "this point\n");
+ BUG();
+ }
+}
+
+static void handle_tx_interrupt(struct tegra_i2c_slave_dev *i2c_dev)
+{
+ if (i2c_dev->cont_status & I2C_SLV_TRANS_END)
+ handle_tx_transaction_end(i2c_dev);
+ else
+ handle_tx_trigger_int(i2c_dev);
+}
+
+static irqreturn_t tegra_i2c_slave_isr(int irq, void *dev_id)
+{
+ struct tegra_i2c_slave_dev *i2c_dev = dev_id;
+ unsigned long flags;
+
+ /* Read the Interrupt status register & PKT_STATUS */
+ i2c_dev->cont_status = readl(i2c_dev->base + I2C_INT_STATUS);
+
+ dev_dbg(i2c_dev->dev, "ISR ContStatus 0x%08x\n", i2c_dev->cont_status);
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+
+ if ((i2c_dev->cont_status & I2C_INT_STATUS_RX_DATA_AVAILABLE) ||
+ (i2c_dev->curr_transfer == TRANSFER_STATE_READ)) {
+ handle_rx_interrupt(i2c_dev);
+ goto Done;
+ }
+
+ if ((i2c_dev->cont_status & I2C_INT_STATUS_TX_BUFFER_REQUEST) ||
+ (i2c_dev->curr_transfer == TRANSFER_STATE_WRITE)) {
+ handle_tx_interrupt(i2c_dev);
+ goto Done;
+ }
+
+ dev_err(i2c_dev->dev, "Tegra I2c Slave got unwanted interrupt "
+ "IntStatus 0x%08x\n", i2c_dev->cont_status);
+ BUG();
+
+Done:
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int tegra_i2c_slave_start(struct i2c_slave_adapter *slv_adap, int addr,
+ int is_ten_bit_addr, unsigned char dummy_char)
+{
+ struct tegra_i2c_slave_bus *i2c_bus = i2c_get_slave_adapdata(slv_adap);
+ struct tegra_i2c_slave_dev *i2c_dev = i2c_bus->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (i2c_dev->is_slave_started) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return -EBUSY;
+ }
+
+ i2c_dev->rx_msg_buff = (u8 *)(i2c_dev+1);
+ i2c_dev->rx_msg_head = 0;
+ i2c_dev->rx_msg_tail = 0;
+ i2c_dev->is_rx_waiting = false;
+
+ i2c_dev->tx_msg_head = 0;
+ i2c_dev->tx_msg_tail = 0;
+ i2c_dev->is_tx_waiting = true;
+
+ i2c_dev->dummy_word = (dummy_char << 8) | dummy_char;
+ i2c_dev->dummy_word |= i2c_dev->dummy_word << 16;
+
+ i2c_dev->slave_add = addr;
+ i2c_dev->is_ten_bit_addr = is_ten_bit_addr;
+
+ get_packet_headers(i2c_dev, 4096, 0, &i2c_dev->rx_pack_hdr1,
+ &i2c_dev->rx_pack_hdr2, &i2c_dev->rx_pack_hdr3);
+
+ pm_runtime_get_sync(i2c_dev->dev);
+ configure_i2c_slave_packet_mode(i2c_dev);
+ configure_i2c_slave_address(i2c_dev);
+ do_tx_fifo_empty(i2c_dev, NULL);
+ set_rx_trigger_level(i2c_dev, 1);
+ writel(0, i2c_dev->base + I2C_SLV_INT_MASK);
+
+ if (i2c_bus->pinmux)
+ tegra_pinmux_config_tristate_table(i2c_bus->pinmux,
+ i2c_bus->mux_len, TEGRA_TRI_NORMAL);
+
+ i2c_dev->curr_transfer = 0;
+ i2c_dev->is_slave_started = true;
+ i2c_dev->int_mask = I2C_SLV_DEFAULT_INT_MASK;
+ i2c_dev->is_first_byte_read_wait = true;
+ writel(i2c_dev->int_mask, i2c_dev->base + I2C_INT_MASK);
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return 0;
+}
+
+static void tegra_i2c_slave_stop(struct i2c_slave_adapter *slv_adap,
+ int is_buffer_clear)
+{
+ struct tegra_i2c_slave_bus *i2c_bus = i2c_get_slave_adapdata(slv_adap);
+ struct tegra_i2c_slave_dev *i2c_dev = i2c_bus->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (!i2c_dev->is_slave_started) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return;
+ }
+
+ i2c_dev->slave_add = 0;
+ i2c_dev->is_ten_bit_addr = false;
+ configure_i2c_slave_address(i2c_dev);
+ writel(0, i2c_dev->base + I2C_SLV_INT_MASK);
+ writel(0, i2c_dev->base + I2C_INT_MASK);
+ i2c_dev->curr_transfer = 0;
+ i2c_dev->is_slave_started = false;
+ pm_runtime_put_sync(i2c_dev->dev);
+ if (is_buffer_clear) {
+ i2c_dev->rx_msg_head = 0;
+ i2c_dev->rx_msg_tail = 0;
+ i2c_dev->is_rx_waiting = false;
+ i2c_dev->tx_msg_head = 0;
+ i2c_dev->tx_msg_tail = 0;
+ i2c_dev->is_tx_waiting = false;
+ }
+ if (i2c_bus->pinmux)
+ tegra_pinmux_config_tristate_table(i2c_bus->pinmux,
+ i2c_bus->mux_len, TEGRA_TRI_TRISTATE);
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+}
+
+static int tegra_i2c_slave_send(struct i2c_slave_adapter *slv_adap,
+ const char *buf, int count)
+{
+ struct tegra_i2c_slave_bus *i2c_bus = i2c_get_slave_adapdata(slv_adap);
+ struct tegra_i2c_slave_dev *i2c_dev = i2c_bus->dev;
+ unsigned long flags;
+ unsigned long space_available;
+ int i;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (!i2c_dev->is_slave_started) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return -EPERM;
+ }
+
+ space_available = get_space_count(i2c_dev->tx_msg_tail,
+ i2c_dev->tx_msg_head, i2c_dev->tx_msg_buf_size);
+ if (space_available < count) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return 0;
+ }
+
+ for (i = 0; i < count; ++i) {
+ i2c_dev->tx_msg_buff[i2c_dev->tx_msg_head++] = *buf++;
+ if (i2c_dev->tx_msg_head >= i2c_dev->tx_msg_buf_size)
+ i2c_dev->tx_msg_head = 0;
+ }
+ i2c_dev->is_tx_waiting = false;
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return count;
+}
+
+static int tegra_i2c_slave_get_tx_status(struct i2c_slave_adapter *slv_adap,
+ int timeout_ms)
+{
+ struct tegra_i2c_slave_bus *i2c_bus = i2c_get_slave_adapdata(slv_adap);
+ struct tegra_i2c_slave_dev *i2c_dev = i2c_bus->dev;
+ unsigned long flags;
+ unsigned long data_available;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (!i2c_dev->is_slave_started) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return -EPERM;
+ }
+
+ data_available = get_data_count(i2c_dev->tx_msg_tail,
+ i2c_dev->tx_msg_head, i2c_dev->tx_msg_buf_size);
+ if (!data_available) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return 0;
+ }
+
+ INIT_COMPLETION(i2c_dev->tx_msg_complete);
+ if (timeout_ms)
+ i2c_dev->is_tx_waiting = true;
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ if (timeout_ms) {
+ wait_for_completion_timeout(&i2c_dev->tx_msg_complete,
+ to_jiffies(timeout_ms));
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ i2c_dev->is_tx_waiting = false;
+ data_available = get_data_count(i2c_dev->tx_msg_tail,
+ i2c_dev->tx_msg_head,
+ i2c_dev->tx_msg_buf_size);
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ if (data_available)
+ return -ETIMEDOUT;
+ }
+ return data_available;
+}
+
+/*
+ * Timeoutms = 0, MinBytesRead = 0, read without waiting.
+ * Timeoutms = 0, MinBytesRead != 0, block till min bytes read.
+ * Timeoutms != 0, wait till timeout to read data..
+ * Timeoutms = INF, wait till all req bytes read.
+ */
+
+static int tegra_i2c_slave_recv(struct i2c_slave_adapter *slv_adap, char *buf,
+ int count, int min_count, int timeout_ms)
+{
+ struct tegra_i2c_slave_bus *i2c_bus = i2c_get_slave_adapdata(slv_adap);
+ struct tegra_i2c_slave_dev *i2c_dev = i2c_bus->dev;
+ unsigned long flags;
+ int data_available;
+ int bytes_copy;
+ int i;
+ int read_count = 0;
+ bool is_inf_wait = false;
+ int run_count = 0;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (!i2c_dev->is_slave_started) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return -EPERM;
+ }
+
+ do {
+ data_available = get_data_count(i2c_dev->rx_msg_tail,
+ i2c_dev->rx_msg_head, i2c_dev->rx_msg_buf_size);
+
+ bytes_copy = min(data_available, count);
+
+ if (!data_available) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return 0;
+ }
+ for (i = 0; i < bytes_copy; ++i) {
+ *buf++ = i2c_dev->rx_msg_buff[i2c_dev->rx_msg_tail++];
+ if (i2c_dev->rx_msg_tail >= i2c_dev->rx_msg_buf_size)
+ i2c_dev->rx_msg_tail = 0;
+ read_count++;
+ }
+ if (!timeout_ms) {
+ if ((!min_count) || (read_count >= min_count))
+ break;
+ is_inf_wait = true;
+ } else {
+ if ((read_count == count) || run_count)
+ break;
+ }
+ i2c_dev->is_rx_waiting = true;
+ INIT_COMPLETION(i2c_dev->rx_msg_complete);
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ if (is_inf_wait)
+ wait_for_completion(&i2c_dev->rx_msg_complete);
+ else
+ wait_for_completion_timeout(&i2c_dev->rx_msg_complete,
+ to_jiffies(timeout_ms));
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ } while (1);
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ i2c_dev->is_rx_waiting = false;
+ return read_count;
+}
+
+static int tegra_i2c_slave_flush_buffer(struct i2c_slave_adapter *slv_adap,
+ int is_flush_tx_buffer, int is_flush_rx_buffer)
+{
+ struct tegra_i2c_slave_bus *i2c_bus = i2c_get_slave_adapdata(slv_adap);
+ struct tegra_i2c_slave_dev *i2c_dev = i2c_bus->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (!i2c_dev->is_slave_started) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return -EPERM;
+ }
+ if (is_flush_tx_buffer) {
+ i2c_dev->tx_msg_head = 0;
+ i2c_dev->tx_msg_tail = 0;
+ }
+ if (is_flush_rx_buffer) {
+ i2c_dev->rx_msg_head = 0;
+ i2c_dev->rx_msg_tail = 0;
+ }
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return 0;
+}
+
+static int tegra_i2c_slave_get_nack_cycle(struct i2c_slave_adapter *slv_adap,
+ int is_cout_reset)
+{
+ struct tegra_i2c_slave_bus *i2c_bus = i2c_get_slave_adapdata(slv_adap);
+ struct tegra_i2c_slave_dev *i2c_dev = i2c_bus->dev;
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (!i2c_dev->is_slave_started) {
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ dev_dbg(i2c_dev->dev, "The slave bus is already started\n");
+ return -EPERM;
+ }
+
+ retval = i2c_dev->nack_packet_count;
+ if (is_cout_reset)
+ i2c_dev->nack_packet_count = 0;
+
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ return retval;
+}
+
+static const struct i2c_slave_algorithm tegra_i2c_slave_algo = {
+ .slave_start = tegra_i2c_slave_start,
+ .slave_stop = tegra_i2c_slave_stop,
+ .slave_send = tegra_i2c_slave_send,
+ .slave_get_tx_status = tegra_i2c_slave_get_tx_status,
+ .slave_recv = tegra_i2c_slave_recv,
+ .slave_flush_buffer = tegra_i2c_slave_flush_buffer,
+ .slave_get_nack_cycle = tegra_i2c_slave_get_nack_cycle,
+};
+
+static int tegra_i2c_slave_probe(struct platform_device *pdev)
+{
+ struct tegra_i2c_slave_dev *i2c_dev;
+ struct tegra_i2c_slave_bus *i2c_bus = NULL;
+ struct tegra_i2c_slave_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+ struct resource *iomem;
+ struct clk *clk;
+ void *base;
+ int irq;
+ int ret = 0;
+ int rx_buffer_size;
+ int tx_buffer_size;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (pdata->adapter_nr < 0) {
+ dev_err(&pdev->dev, "invalid platform data?\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+ iomem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!iomem) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ base = ioremap(iomem->start, resource_size(iomem));
+ if (!base) {
+ dev_err(&pdev->dev, "Can't ioremap I2C region\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ ret = -ENODEV;
+ goto err_iounmap;
+ }
+ irq = res->start;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (!clk) {
+ ret = -ENODEV;
+ goto err_release_region;
+ }
+
+ rx_buffer_size = (pdata->max_rx_buffer_size)?:4096;
+ tx_buffer_size = (pdata->max_tx_buffer_size)?:4096;
+ i2c_dev = kzalloc(sizeof(struct tegra_i2c_slave_dev) +
+ rx_buffer_size + tx_buffer_size, GFP_KERNEL);
+ if (!i2c_dev) {
+ ret = -ENOMEM;
+ goto err_clk_put;
+ }
+
+ i2c_dev->base = base;
+ i2c_dev->clk = clk;
+ i2c_dev->iomem = iomem;
+ i2c_dev->irq = irq;
+ i2c_dev->cont_id = pdev->id;
+ i2c_dev->dev = &pdev->dev;
+ i2c_dev->bus_clk = pdata->bus_clk_rate?: 100000;
+ i2c_dev->rx_msg_buff = (u8 *)(i2c_dev+1);
+ i2c_dev->rx_msg_buf_size = rx_buffer_size;
+ i2c_dev->rx_msg_head = 0;
+ i2c_dev->rx_msg_tail = 0;
+ i2c_dev->is_rx_waiting = 0;
+ i2c_dev->tx_msg_buff = i2c_dev->rx_msg_buff + rx_buffer_size;
+ i2c_dev->tx_msg_buf_size = tx_buffer_size;
+ i2c_dev->tx_msg_head = 0;
+ i2c_dev->tx_msg_tail = 0;
+ i2c_dev->is_tx_waiting = 0;
+
+ i2c_dev->is_slave_started = false;
+ spin_lock_init(&i2c_dev->lock);
+
+ init_completion(&i2c_dev->rx_msg_complete);
+ init_completion(&i2c_dev->tx_msg_complete);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = request_irq(i2c_dev->irq, tegra_i2c_slave_isr, IRQF_DISABLED,
+ pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
+ goto err_free;
+ }
+
+ i2c_bus = &i2c_dev->bus;
+ i2c_bus->dev = i2c_dev;
+ i2c_bus->pinmux = pdata->pinmux;
+ i2c_bus->mux_len = pdata->bus_mux_len;
+ i2c_bus->bus_clk_rate = pdata->bus_clk_rate ?: 100000;
+
+ i2c_bus->slv_adap.slv_algo = &tegra_i2c_slave_algo;
+ i2c_bus->slv_adap.owner = THIS_MODULE;
+ i2c_bus->slv_adap.class = I2C_CLASS_HWMON;
+ strlcpy(i2c_bus->slv_adap.name, "Tegra I2C SLAVE adapter",
+ sizeof(i2c_bus->slv_adap.name));
+ i2c_bus->slv_adap.parent_dev = &pdev->dev;
+ i2c_bus->slv_adap.dev = NULL;
+ i2c_bus->slv_adap.nr = pdata->adapter_nr;
+ ret = i2c_add_slave_adapter(&i2c_bus->slv_adap, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ goto err_free_irq;
+ }
+ i2c_set_slave_adapdata(&i2c_bus->slv_adap, i2c_bus);
+ dev_dbg(&pdev->dev, "%s() suucess\n", __func__);
+ pm_runtime_enable(i2c_dev->dev);
+ return 0;
+
+err_free_irq:
+ free_irq(i2c_dev->irq, i2c_dev);
+err_free:
+ kfree(i2c_dev);
+err_clk_put:
+ clk_put(clk);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_iounmap:
+ iounmap(base);
+ dev_dbg(&pdev->dev, "%s() failed %d\n", __func__, ret);
+ return ret;
+}
+
+static int tegra_i2c_slave_remove(struct platform_device *pdev)
+{
+ struct tegra_i2c_slave_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ i2c_del_slave_adapter(&i2c_dev->bus.slv_adap);
+ pm_runtime_disable(i2c_dev->dev);
+ free_irq(i2c_dev->irq, i2c_dev);
+ clk_put(i2c_dev->clk);
+ release_mem_region(i2c_dev->iomem->start,
+ resource_size(i2c_dev->iomem));
+ iounmap(i2c_dev->base);
+ kfree(i2c_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_i2c_slave_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ return 0;
+}
+
+static int tegra_i2c_slave_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+#if defined(CONFIG_PM_RUNTIME)
+static int tegra_i2c_slave_runtime_idle(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_i2c_slave_dev *i2c_dev = platform_get_drvdata(pdev);
+ clk_disable(i2c_dev->clk);
+ return 0;
+}
+static int tegra_i2c_slave_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_i2c_slave_dev *i2c_dev = platform_get_drvdata(pdev);
+ clk_enable(i2c_dev->clk);
+ return 0;
+}
+static const struct dev_pm_ops tegra_i2c_slave_dev_pm_ops = {
+ .runtime_idle = tegra_i2c_slave_runtime_idle,
+ .runtime_resume = tegra_i2c_slave_runtime_resume,
+};
+#endif
+
+static struct platform_driver tegra_i2c_slave_driver = {
+ .probe = tegra_i2c_slave_probe,
+ .remove = tegra_i2c_slave_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_i2c_slave_suspend,
+ .resume = tegra_i2c_slave_resume,
+#endif
+ .driver = {
+ .name = "tegra-i2c-slave",
+ .owner = THIS_MODULE,
+#if defined(CONFIG_PM_RUNTIME)
+ .pm = &tegra_i2c_slave_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init tegra_i2c_slave_init_driver(void)
+{
+ return platform_driver_register(&tegra_i2c_slave_driver);
+}
+
+static void __exit tegra_i2c_slave_exit_driver(void)
+{
+ platform_driver_unregister(&tegra_i2c_slave_driver);
+}
+subsys_initcall(tegra_i2c_slave_init_driver);
+module_exit(tegra_i2c_slave_exit_driver);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 3c94c4a81a55..d28cd4e9d3d0 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -4,6 +4,8 @@
* Copyright (C) 2010 Google, Inc.
* Author: Colin Cross <ccross@android.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -15,6 +17,9 @@
*
*/
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -27,19 +32,23 @@
#include <linux/slab.h>
#include <linux/i2c-tegra.h>
#include <linux/of_i2c.h>
+#include <linux/spinlock.h>
#include <asm/unaligned.h>
#include <mach/clk.h>
+#include <mach/pinmux.h>
-#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
-#define BYTES_PER_FIFO_WORD 4
+#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
+#define TEGRA_I2C_RETRIES 3
+#define BYTES_PER_FIFO_WORD 4
#define I2C_CNFG 0x000
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
#define I2C_CNFG_PACKET_MODE_EN (1<<10)
#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
#define I2C_STATUS 0x01C
+#define I2C_STATUS_BUSY (1<<8)
#define I2C_SL_CNFG 0x020
#define I2C_SL_CNFG_NACK (1<<1)
#define I2C_SL_CNFG_NEWSL (1<<2)
@@ -83,6 +92,7 @@
#define I2C_ERR_NO_ACK 0x01
#define I2C_ERR_ARBITRATION_LOST 0x02
#define I2C_ERR_UNKNOWN_INTERRUPT 0x04
+#define I2C_ERR_UNEXPECTED_STATUS 0x08
#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
#define PACKET_HEADER0_PACKET_ID_SHIFT 16
@@ -99,6 +109,23 @@
#define I2C_HEADER_MASTER_ADDR_SHIFT 12
#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+#define SL_ADDR1(addr) (addr & 0xff)
+#define SL_ADDR2(addr) ((addr >> 8) & 0xff)
+
+
+
+struct tegra_i2c_dev;
+
+struct tegra_i2c_bus {
+ struct tegra_i2c_dev *dev;
+ const struct tegra_pingroup_config *mux;
+ int mux_len;
+ unsigned long bus_clk_rate;
+ struct i2c_adapter adapter;
+ int scl_gpio;
+ int sda_gpio;
+};
+
/**
* struct tegra_i2c_dev - per device i2c context
* @dev: device reference for power management
@@ -120,22 +147,37 @@
*/
struct tegra_i2c_dev {
struct device *dev;
- struct i2c_adapter adapter;
struct clk *clk;
- struct clk *i2c_clk;
struct resource *iomem;
+ struct rt_mutex dev_lock;
+ spinlock_t clk_lock;
void __iomem *base;
int cont_id;
int irq;
bool irq_disabled;
+ bool controller_enabled;
int is_dvc;
+ bool is_slave;
struct completion msg_complete;
int msg_err;
u8 *msg_buf;
+ u32 packet_header;
+ u32 payload_size;
+ u32 io_header;
size_t msg_buf_remaining;
int msg_read;
- unsigned long bus_clk_rate;
+ struct i2c_msg *msgs;
+ int msg_add;
+ int msgs_num;
bool is_suspended;
+ int bus_count;
+ const struct tegra_pingroup_config *last_mux;
+ int last_mux_len;
+ unsigned long last_bus_clk_rate;
+ u16 slave_addr;
+ bool is_clkon_always;
+ int (*arb_recovery)(int scl_gpio, int sda_gpio);
+ struct tegra_i2c_bus busses[1];
};
static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
@@ -335,12 +377,31 @@ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
}
+static void tegra_i2c_slave_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val = I2C_SL_CNFG_NEWSL | I2C_SL_CNFG_NACK;
+
+ i2c_writel(i2c_dev, val, I2C_SL_CNFG);
+
+ if (i2c_dev->slave_addr) {
+ u16 addr = i2c_dev->slave_addr;
+
+ i2c_writel(i2c_dev, SL_ADDR1(addr), I2C_SL_ADDR1);
+ i2c_writel(i2c_dev, SL_ADDR2(addr), I2C_SL_ADDR2);
+ }
+}
+
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val;
int err = 0;
- clk_enable(i2c_dev->clk);
+ if (!i2c_dev->is_clkon_always)
+ clk_enable(i2c_dev->clk);
+
+ /* Interrupt generated before sending stop signal so
+ * wait for some time so that stop signal can be send proerly */
+ udelay(100);
tegra_periph_reset_assert(i2c_dev->clk);
udelay(2);
@@ -353,7 +414,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
(0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
i2c_writel(i2c_dev, val, I2C_CNFG);
i2c_writel(i2c_dev, 0, I2C_INT_MASK);
- clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8);
+ clk_set_rate(i2c_dev->clk, i2c_dev->last_bus_clk_rate * 8);
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
@@ -368,10 +429,14 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+ if (i2c_dev->is_slave)
+ tegra_i2c_slave_init(i2c_dev);
+
if (tegra_i2c_flush_fifos(i2c_dev))
err = -ETIMEDOUT;
- clk_disable(i2c_dev->clk);
+ if (!i2c_dev->is_clkon_always)
+ clk_disable(i2c_dev->clk);
if (i2c_dev->irq_disabled) {
i2c_dev->irq_disabled = 0;
@@ -384,16 +449,22 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
{
u32 status;
- const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST | I2C_INT_TX_FIFO_OVERFLOW;
struct tegra_i2c_dev *i2c_dev = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c_dev->clk_lock, flags);
+ if (!i2c_dev->controller_enabled) {
+ dev_warn(i2c_dev->dev, "Controller not enabled\n");
+ spin_unlock_irqrestore(&i2c_dev->clk_lock, flags);
+ return IRQ_NONE;
+ }
status = i2c_readl(i2c_dev, I2C_INT_STATUS);
if (status == 0) {
- dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
- i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
- i2c_readl(i2c_dev, I2C_STATUS),
- i2c_readl(i2c_dev, I2C_CNFG));
+ dev_warn(i2c_dev->dev, "unknown interrupt Add 0x%02x\n",
+ i2c_dev->msg_add);
i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT;
if (!i2c_dev->irq_disabled) {
@@ -401,16 +472,49 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
i2c_dev->irq_disabled = 1;
}
- complete(&i2c_dev->msg_complete);
goto err;
}
if (unlikely(status & status_err)) {
- if (status & I2C_INT_NO_ACK)
+ dev_warn(i2c_dev->dev, "I2c error status 0x%08x\n", status);
+ if (status & I2C_INT_NO_ACK) {
i2c_dev->msg_err |= I2C_ERR_NO_ACK;
- if (status & I2C_INT_ARBITRATION_LOST)
+ dev_warn(i2c_dev->dev, "no acknowledge from address"
+ " 0x%x\n", i2c_dev->msg_add);
+ dev_warn(i2c_dev->dev, "Packet status 0x%08x\n",
+ i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS));
+ }
+
+ if (status & I2C_INT_ARBITRATION_LOST) {
i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
- complete(&i2c_dev->msg_complete);
+ dev_warn(i2c_dev->dev, "arbitration lost during "
+ " communicate to add 0x%x\n", i2c_dev->msg_add);
+ dev_warn(i2c_dev->dev, "Packet status 0x%08x\n",
+ i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS));
+ }
+
+ if (status & I2C_INT_TX_FIFO_OVERFLOW) {
+ i2c_dev->msg_err |= I2C_INT_TX_FIFO_OVERFLOW;
+ dev_warn(i2c_dev->dev, "Tx fifo overflow during "
+ " communicate to add 0x%x\n", i2c_dev->msg_add);
+ dev_warn(i2c_dev->dev, "Packet status 0x%08x\n",
+ i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS));
+ }
+ goto err;
+ }
+
+ if (unlikely((i2c_readl(i2c_dev, I2C_STATUS) & I2C_STATUS_BUSY)
+ && (status == I2C_INT_TX_FIFO_DATA_REQ)
+ && i2c_dev->msg_read
+ && i2c_dev->msg_buf_remaining)) {
+ dev_warn(i2c_dev->dev, "unexpected status\n");
+ i2c_dev->msg_err |= I2C_ERR_UNEXPECTED_STATUS;
+
+ if (!i2c_dev->irq_disabled) {
+ disable_irq_nosync(i2c_dev->irq);
+ i2c_dev->irq_disabled = 1;
+ }
+
goto err;
}
@@ -428,35 +532,66 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
}
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
if (status & I2C_INT_PACKET_XFER_COMPLETE) {
BUG_ON(i2c_dev->msg_buf_remaining);
complete(&i2c_dev->msg_complete);
}
- i2c_writel(i2c_dev, status, I2C_INT_STATUS);
- if (i2c_dev->is_dvc)
- dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ spin_unlock_irqrestore(&i2c_dev->clk_lock, flags);
return IRQ_HANDLED;
+
err:
+ dev_dbg(i2c_dev->dev, "reg: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i2c_readl(i2c_dev, I2C_CNFG), i2c_readl(i2c_dev, I2C_STATUS),
+ i2c_readl(i2c_dev, I2C_INT_STATUS),
+ i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS));
+
+ dev_dbg(i2c_dev->dev, "packet: 0x%08x %u 0x%08x\n",
+ i2c_dev->packet_header, i2c_dev->payload_size,
+ i2c_dev->io_header);
+
+ if (i2c_dev->msgs) {
+ struct i2c_msg *msgs = i2c_dev->msgs;
+ int i;
+
+ for (i = 0; i < i2c_dev->msgs_num; i++)
+ dev_dbg(i2c_dev->dev,
+ "msgs[%d] %c, addr=0x%04x, len=%d\n",
+ i, (msgs[i].flags & I2C_M_RD) ? 'R' : 'W',
+ msgs[i].addr, msgs[i].len);
+ }
+
/* An error occurred, mask all interrupts */
tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
- I2C_INT_RX_FIFO_DATA_REQ);
+ I2C_INT_RX_FIFO_DATA_REQ | I2C_INT_TX_FIFO_OVERFLOW);
+
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+
if (i2c_dev->is_dvc)
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
+ complete(&i2c_dev->msg_complete);
+ spin_unlock_irqrestore(&i2c_dev->clk_lock, flags);
return IRQ_HANDLED;
}
-static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+static int tegra_i2c_xfer_msg(struct tegra_i2c_bus *i2c_bus,
struct i2c_msg *msg, int stop)
{
- u32 packet_header;
+ struct tegra_i2c_dev *i2c_dev = i2c_bus->dev;
u32 int_mask;
int ret;
+ unsigned long flags;
+ int arb_stat;
tegra_i2c_flush_fifos(i2c_dev);
- i2c_writel(i2c_dev, 0xFF, I2C_INT_STATUS);
+
if (msg->len == 0)
return -EINVAL;
@@ -466,32 +601,33 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
i2c_dev->msg_err = I2C_ERR_NONE;
i2c_dev->msg_read = (msg->flags & I2C_M_RD);
INIT_COMPLETION(i2c_dev->msg_complete);
+ i2c_dev->msg_add = msg->addr;
- packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
+ i2c_dev->packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
PACKET_HEADER0_PROTOCOL_I2C |
(i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
(1 << PACKET_HEADER0_PACKET_ID_SHIFT);
- i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+ i2c_writel(i2c_dev, i2c_dev->packet_header, I2C_TX_FIFO);
- packet_header = msg->len - 1;
- i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+ i2c_dev->payload_size = msg->len - 1;
+ i2c_writel(i2c_dev, i2c_dev->payload_size, I2C_TX_FIFO);
- packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
- packet_header |= I2C_HEADER_IE_ENABLE;
+ i2c_dev->io_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ i2c_dev->io_header |= I2C_HEADER_IE_ENABLE;
if (!stop)
- packet_header |= I2C_HEADER_REPEAT_START;
+ i2c_dev->io_header |= I2C_HEADER_REPEAT_START;
if (msg->flags & I2C_M_TEN)
- packet_header |= I2C_HEADER_10BIT_ADDR;
+ i2c_dev->io_header |= I2C_HEADER_10BIT_ADDR;
if (msg->flags & I2C_M_IGNORE_NAK)
- packet_header |= I2C_HEADER_CONT_ON_NAK;
+ i2c_dev->io_header |= I2C_HEADER_CONT_ON_NAK;
if (msg->flags & I2C_M_RD)
- packet_header |= I2C_HEADER_READ;
- i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+ i2c_dev->io_header |= I2C_HEADER_READ;
+ i2c_writel(i2c_dev, i2c_dev->io_header, I2C_TX_FIFO);
if (!(msg->flags & I2C_M_RD))
tegra_i2c_fill_tx_fifo(i2c_dev);
- int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST | I2C_INT_TX_FIFO_OVERFLOW;
if (msg->flags & I2C_M_RD)
int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
else if (i2c_dev->msg_buf_remaining)
@@ -500,13 +636,19 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
i2c_readl(i2c_dev, I2C_INT_MASK));
- ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
+ ret = wait_for_completion_timeout(&i2c_dev->msg_complete,
+ TEGRA_I2C_TIMEOUT);
tegra_i2c_mask_irq(i2c_dev, int_mask);
if (WARN_ON(ret == 0)) {
- dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+ dev_err(i2c_dev->dev,
+ "i2c transfer timed out, addr 0x%04x, data 0x%02x\n",
+ msg->addr, msg->buf[0]);
+ spin_lock_irqsave(&i2c_dev->clk_lock, flags);
+ i2c_dev->controller_enabled = false;
tegra_i2c_init(i2c_dev);
+ spin_unlock_irqrestore(&i2c_dev->clk_lock, flags);
return -ETIMEDOUT;
}
@@ -516,34 +658,89 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
return 0;
+ /* Arbitration Lost occurs, Start recovery */
+ if (i2c_dev->msg_err == I2C_ERR_ARBITRATION_LOST) {
+ if (i2c_dev->arb_recovery) {
+ arb_stat = i2c_dev->arb_recovery(i2c_bus->scl_gpio, i2c_bus->sda_gpio);
+ if (!arb_stat)
+ return -EAGAIN;
+ }
+ }
+
+ spin_lock_irqsave(&i2c_dev->clk_lock, flags);
+ i2c_dev->controller_enabled = false;
tegra_i2c_init(i2c_dev);
+ spin_unlock_irqrestore(&i2c_dev->clk_lock, flags);
+
if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
return 0;
return -EREMOTEIO;
}
+ if (i2c_dev->msg_err & I2C_ERR_UNEXPECTED_STATUS)
+ return -EAGAIN;
+
return -EIO;
}
static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
int num)
{
- struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ struct tegra_i2c_bus *i2c_bus = i2c_get_adapdata(adap);
+ struct tegra_i2c_dev *i2c_dev = i2c_bus->dev;
int i;
int ret = 0;
+ unsigned long flags;
if (i2c_dev->is_suspended)
return -EBUSY;
- clk_enable(i2c_dev->clk);
+ rt_mutex_lock(&i2c_dev->dev_lock);
+
+ if (i2c_dev->last_mux != i2c_bus->mux) {
+ tegra_pinmux_set_safe_pinmux_table(i2c_dev->last_mux,
+ i2c_dev->last_mux_len);
+ tegra_pinmux_config_pinmux_table(i2c_bus->mux,
+ i2c_bus->mux_len);
+ i2c_dev->last_mux = i2c_bus->mux;
+ i2c_dev->last_mux_len = i2c_bus->mux_len;
+ }
+
+ if (i2c_dev->last_bus_clk_rate != i2c_bus->bus_clk_rate) {
+ clk_set_rate(i2c_dev->clk, i2c_bus->bus_clk_rate * 8);
+ i2c_dev->last_bus_clk_rate = i2c_bus->bus_clk_rate;
+ }
+
+ i2c_dev->msgs = msgs;
+ i2c_dev->msgs_num = num;
+
+ if (!i2c_dev->is_clkon_always)
+ clk_enable(i2c_dev->clk);
+
+ spin_lock_irqsave(&i2c_dev->clk_lock, flags);
+ i2c_dev->controller_enabled = true;
+ spin_unlock_irqrestore(&i2c_dev->clk_lock, flags);
+
for (i = 0; i < num; i++) {
int stop = (i == (num - 1)) ? 1 : 0;
- ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
+ ret = tegra_i2c_xfer_msg(i2c_bus, &msgs[i], stop);
if (ret)
break;
}
- clk_disable(i2c_dev->clk);
+
+ spin_lock_irqsave(&i2c_dev->clk_lock, flags);
+ i2c_dev->controller_enabled = false;
+ spin_unlock_irqrestore(&i2c_dev->clk_lock, flags);
+
+ if (!i2c_dev->is_clkon_always)
+ clk_disable(i2c_dev->clk);
+
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+
+ i2c_dev->msgs = NULL;
+ i2c_dev->msgs_num = 0;
+
return ret ?: i;
}
@@ -560,16 +757,30 @@ static const struct i2c_algorithm tegra_i2c_algo = {
static int tegra_i2c_probe(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev;
- struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
+ struct tegra_i2c_platform_data *plat = pdev->dev.platform_data;
struct resource *res;
struct resource *iomem;
struct clk *clk;
- struct clk *i2c_clk;
const unsigned int *prop;
void *base;
int irq;
+ int nbus;
+ int i = 0;
int ret = 0;
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (plat->bus_count <= 0 || plat->adapter_nr < 0) {
+ dev_err(&pdev->dev, "invalid platform data?\n");
+ return -ENODEV;
+ }
+
+ WARN_ON(plat->bus_count > TEGRA_I2C_MAX_BUS);
+ nbus = min(TEGRA_I2C_MAX_BUS, plat->bus_count);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "no mem resource\n");
@@ -602,45 +813,52 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
- i2c_clk = clk_get(&pdev->dev, "i2c");
- if (IS_ERR(i2c_clk)) {
- dev_err(&pdev->dev, "missing bus clock");
- ret = PTR_ERR(i2c_clk);
- goto err_clk_put;
- }
-
- i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev), GFP_KERNEL);
+ i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev) +
+ (nbus-1) * sizeof(struct tegra_i2c_bus), GFP_KERNEL);
if (!i2c_dev) {
ret = -ENOMEM;
- goto err_i2c_clk_put;
+ goto err_clk_put;
}
i2c_dev->base = base;
i2c_dev->clk = clk;
- i2c_dev->i2c_clk = i2c_clk;
i2c_dev->iomem = iomem;
- i2c_dev->adapter.algo = &tegra_i2c_algo;
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
+ i2c_dev->is_clkon_always = plat->is_clkon_always;
- i2c_dev->bus_clk_rate = 100000; /* default clock rate */
- if (pdata) {
- i2c_dev->bus_clk_rate = pdata->bus_clk_rate;
+ i2c_dev->last_bus_clk_rate = 100000; /* default clock rate */
+ if (plat) {
+ i2c_dev->last_bus_clk_rate = plat->bus_clk_rate[0];
} else if (i2c_dev->dev->of_node) { /* if there is a device tree node ... */
+ /* TODO: DAN: this doesn't work for DT */
prop = of_get_property(i2c_dev->dev->of_node,
"clock-frequency", NULL);
if (prop)
- i2c_dev->bus_clk_rate = be32_to_cpup(prop);
+ i2c_dev->last_bus_clk_rate = be32_to_cpup(prop);
}
- if (pdev->id == 3)
- i2c_dev->is_dvc = 1;
+ i2c_dev->msgs = NULL;
+ i2c_dev->msgs_num = 0;
+ i2c_dev->controller_enabled = false;
+ rt_mutex_init(&i2c_dev->dev_lock);
+ spin_lock_init(&i2c_dev->clk_lock);
+
+ i2c_dev->slave_addr = plat->slave_addr;
+ i2c_dev->is_dvc = plat->is_dvc;
+ i2c_dev->arb_recovery = plat->arb_recovery;
init_completion(&i2c_dev->msg_complete);
+ if (irq == INT_I2C || irq == INT_I2C2 || irq == INT_I2C3)
+ i2c_dev->is_slave = true;
+
platform_set_drvdata(pdev, i2c_dev);
+ if (i2c_dev->is_clkon_always)
+ clk_enable(i2c_dev->clk);
+
ret = tegra_i2c_init(i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize i2c controller");
@@ -653,33 +871,54 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto err_free;
}
- clk_enable(i2c_dev->i2c_clk);
- i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
- i2c_dev->adapter.owner = THIS_MODULE;
- i2c_dev->adapter.class = I2C_CLASS_HWMON;
- strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
- sizeof(i2c_dev->adapter.name));
- i2c_dev->adapter.algo = &tegra_i2c_algo;
- i2c_dev->adapter.dev.parent = &pdev->dev;
- i2c_dev->adapter.nr = pdev->id;
- i2c_dev->adapter.dev.of_node = pdev->dev.of_node;
+ for (i = 0; i < nbus; i++) {
+ struct tegra_i2c_bus *i2c_bus = &i2c_dev->busses[i];
- ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add I2C adapter\n");
- goto err_free_irq;
- }
+ i2c_bus->dev = i2c_dev;
+ i2c_bus->mux = plat->bus_mux[i];
+ i2c_bus->mux_len = plat->bus_mux_len[i];
+ i2c_bus->bus_clk_rate = plat->bus_clk_rate[i] ?: 100000;
+
+ i2c_bus->scl_gpio = plat->scl_gpio[i];
+ i2c_bus->sda_gpio = plat->sda_gpio[i];
+
+ i2c_bus->adapter.dev.of_node = pdev->dev.of_node;
+ i2c_bus->adapter.algo = &tegra_i2c_algo;
+ i2c_set_adapdata(&i2c_bus->adapter, i2c_bus);
+ i2c_bus->adapter.owner = THIS_MODULE;
+ i2c_bus->adapter.class = I2C_CLASS_HWMON;
+ strlcpy(i2c_bus->adapter.name, "Tegra I2C adapter",
+ sizeof(i2c_bus->adapter.name));
+ i2c_bus->adapter.dev.parent = &pdev->dev;
+ i2c_bus->adapter.nr = plat->adapter_nr + i;
+
+ if (plat->retries)
+ i2c_bus->adapter.retries = plat->retries;
+ else
+ i2c_bus->adapter.retries = TEGRA_I2C_RETRIES;
+
+ if (plat->timeout)
+ i2c_bus->adapter.timeout = plat->timeout;
+
+ ret = i2c_add_numbered_adapter(&i2c_bus->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ goto err_del_bus;
+ }
+ of_i2c_register_devices(&i2c_bus->adapter);
- of_i2c_register_devices(&i2c_dev->adapter);
+ i2c_dev->bus_count++;
+ }
return 0;
-err_free_irq:
+
+err_del_bus:
+ while (i2c_dev->bus_count--)
+ i2c_del_adapter(&i2c_dev->busses[i2c_dev->bus_count].adapter);
free_irq(i2c_dev->irq, i2c_dev);
err_free:
kfree(i2c_dev);
-err_i2c_clk_put:
- clk_put(i2c_clk);
err_clk_put:
clk_put(clk);
err_release_region:
@@ -692,9 +931,13 @@ err_iounmap:
static int tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
- i2c_del_adapter(&i2c_dev->adapter);
+ while (i2c_dev->bus_count--)
+ i2c_del_adapter(&i2c_dev->busses[i2c_dev->bus_count].adapter);
+
+ if (i2c_dev->is_clkon_always)
+ clk_disable(i2c_dev->clk);
+
free_irq(i2c_dev->irq, i2c_dev);
- clk_put(i2c_dev->i2c_clk);
clk_put(i2c_dev->clk);
release_mem_region(i2c_dev->iomem->start,
resource_size(i2c_dev->iomem));
@@ -704,37 +947,54 @@ static int tegra_i2c_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+static int tegra_i2c_suspend_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
- i2c_lock_adapter(&i2c_dev->adapter);
+ rt_mutex_lock(&i2c_dev->dev_lock);
+
i2c_dev->is_suspended = true;
- i2c_unlock_adapter(&i2c_dev->adapter);
+ if (i2c_dev->is_clkon_always)
+ clk_disable(i2c_dev->clk);
+
+ rt_mutex_unlock(&i2c_dev->dev_lock);
return 0;
}
-static int tegra_i2c_resume(struct platform_device *pdev)
+static int tegra_i2c_resume_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
int ret;
- i2c_lock_adapter(&i2c_dev->adapter);
+ rt_mutex_lock(&i2c_dev->dev_lock);
+
+ if (i2c_dev->is_clkon_always)
+ clk_enable(i2c_dev->clk);
ret = tegra_i2c_init(i2c_dev);
if (ret) {
- i2c_unlock_adapter(&i2c_dev->adapter);
+ rt_mutex_unlock(&i2c_dev->dev_lock);
return ret;
}
i2c_dev->is_suspended = false;
- i2c_unlock_adapter(&i2c_dev->adapter);
+ rt_mutex_unlock(&i2c_dev->dev_lock);
return 0;
}
+
+static const struct dev_pm_ops tegra_i2c_dev_pm_ops = {
+ .suspend_noirq = tegra_i2c_suspend_noirq,
+ .resume_noirq = tegra_i2c_resume_noirq,
+};
+#define TEGRA_I2C_DEV_PM_OPS (&tegra_i2c_dev_pm_ops)
+#else
+#define TEGRA_I2C_DEV_PM_OPS NULL
#endif
#if defined(CONFIG_OF)
@@ -751,14 +1011,11 @@ MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
.remove = tegra_i2c_remove,
-#ifdef CONFIG_PM
- .suspend = tegra_i2c_suspend,
- .resume = tegra_i2c_resume,
-#endif
.driver = {
.name = "tegra-i2c",
.owner = THIS_MODULE,
.of_match_table = tegra_i2c_of_match,
+ .pm = TEGRA_I2C_DEV_PM_OPS,
},
};
diff --git a/drivers/i2c/i2c-slave.c b/drivers/i2c/i2c-slave.c
new file mode 100755
index 000000000000..280a860cd2e8
--- /dev/null
+++ b/drivers/i2c/i2c-slave.c
@@ -0,0 +1,281 @@
+/*
+ * i2c-slave.c - a device driver for the iic-slave bus interface.
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/i2c-slave.h>
+struct i2c_slave_priv {
+ struct i2c_adapter master_adap;
+ struct i2c_slave_adapter *slave_adap;
+ struct i2c_algorithm master_algo;
+};
+
+/**
+ * i2c_slave_send - Sends data to master. When master issues a read cycle, the
+ * data is sent by the slave.
+ * This function copies the client data into the slave tx buffer and return to
+ * client. This is not a blocking call. Data will be sent to master later once
+ * slave got the master-ready cycle transfer.
+ * if there is no sufficient space to write the client buffer, it will return
+ * error. it will not write partial data.
+ * @client: Handle to i2c-slave client.
+ * @buf: Data that will be written to the master
+ * @count: How many bytes to write.
+ *
+ * Returns negative errno, or else the number of bytes written.
+ */
+int i2c_slave_send(struct i2c_client *client, const char *buf, int count)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_slave_priv *priv = adap->algo_data;
+
+ if (!(adap->algo->functionality(adap) & I2C_FUNC_I2C_SLAVE_SUPPORT))
+ BUG();
+
+ if (priv->slave_adap->slv_algo->slave_send)
+ return priv->slave_adap->slv_algo->slave_send(priv->slave_adap,
+ buf, count);
+ return -ENODEV;
+}
+EXPORT_SYMBOL(i2c_slave_send);
+
+/**
+ * i2c_slave_get_tx_status - Get amount of data available in tx buffer. If there
+ * is still data in tx buffer then wait for given time to transfer complete
+ * for a give timeout.
+ * @client: Handle to i2c-slave client.
+ * @timeout_ms: Time to wait for transfer to complete.
+ *
+ * Returns negative errno, or else the number of bytes remaining in tx buffer.
+ */
+int i2c_slave_get_tx_status(struct i2c_client *client, int timeout_ms)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_slave_priv *priv = adap->algo_data;
+
+ if (!(adap->algo->functionality(adap) & I2C_FUNC_I2C_SLAVE_SUPPORT))
+ BUG();
+
+ if (priv->slave_adap->slv_algo->slave_get_tx_status)
+ return priv->slave_adap->slv_algo->slave_get_tx_status(
+ priv->slave_adap, timeout_ms);
+ return -ENODEV;
+}
+EXPORT_SYMBOL(i2c_slave_get_tx_status);
+
+/**
+ * i2c_slave_recv - Receive data from master. The data receive from master is
+ * stored on slave rx buffer. When this api will be called, the data will be
+ * copied from the slave rx buffer to client buffer. If requested amount (count)
+ * of data is not available then it will wait for either min_count to be receive
+ * or timeout whatever first.
+ *
+ * if timeout_ms = 0, then wait for min_count data to be read.
+ * if timoue_ms non zero then wait for the data till timeout happen.
+ * @client: Handle to i2c-slave client.
+ * @buf: Data that will be read from the master
+ * @count: How many bytes to read.
+ * @min_count: Block till read min_count of data.
+ * @timeout_ms: Time to wait for read to be complete.
+ *
+ * Returns negative errno, or else the number of bytes read.
+ */
+int i2c_slave_recv(struct i2c_client *client, char *buf, int count,
+ int min_count, int timeout_ms)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_slave_priv *priv = adap->algo_data;
+
+ if (!(adap->algo->functionality(adap) & I2C_FUNC_I2C_SLAVE_SUPPORT))
+ BUG();
+
+ if (priv->slave_adap->slv_algo->slave_recv)
+ return priv->slave_adap->slv_algo->slave_recv(priv->slave_adap,
+ buf, count, min_count, timeout_ms);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(i2c_slave_recv);
+
+/**
+ * i2c_slave_start - Start the i2c slave to receive/transmit data.
+ * After this i2c controller starts responding master.
+ * The dummy-char will send to master if there is no data to send on slave tx
+ * buffer.
+ * @client: Handle to i2c-slave client.
+ * @dummy_char: Data which will be send to master if there is no data to be send
+ * in slave tx buffer.
+ *
+ * Returns negative errno, or else 0 for success.
+ */
+int i2c_slave_start(struct i2c_client *client, unsigned char dummy_char)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_slave_priv *priv = adap->algo_data;
+ int slave_add;
+ int is_10bit_addr;
+
+ if (!(adap->algo->functionality(adap) & I2C_FUNC_I2C_SLAVE_SUPPORT))
+ BUG();
+ slave_add = client->addr;
+ is_10bit_addr = (client->flags & I2C_CLIENT_TEN) ? 1 : 0;
+ if (priv->slave_adap->slv_algo->slave_start)
+ return priv->slave_adap->slv_algo->slave_start(priv->slave_adap,
+ slave_add, is_10bit_addr, dummy_char);
+ return -ENODEV;
+}
+EXPORT_SYMBOL(i2c_slave_start);
+
+/**
+ * i2c_slave_stop - Stop slave to receive/transmit data.
+ * After this i2c controller stops responding master.
+ * @client: Handle to i2c-slave client.
+ * @is_buffer_clear: Reset the tx and rx slave buffer or not.
+ */
+void i2c_slave_stop(struct i2c_client *client, int is_buffer_clear)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_slave_priv *priv = adap->algo_data;
+
+ if (!(adap->algo->functionality(adap) & I2C_FUNC_I2C_SLAVE_SUPPORT))
+ BUG();
+
+ if (priv->slave_adap->slv_algo->slave_stop)
+ return priv->slave_adap->slv_algo->slave_stop(priv->slave_adap,
+ is_buffer_clear);
+}
+EXPORT_SYMBOL(i2c_slave_stop);
+
+/**
+ * i2c_slave_flush_buffer - Flush the receive and transmit buffer.
+ * @client: Handle to i2c-slave client.
+ * @is_flush_tx_buffer: Reset the tx slave buffer or not.
+ * @is_flush_rx_buffer: Reset the rx slave buffer or not.
+ *
+ * Returns negative errno, or else 0 for success.
+ */
+int i2c_slave_flush_buffer(struct i2c_client *client,
+ int is_flush_tx_buffer, int is_flush_rx_buffer)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_slave_priv *priv = adap->algo_data;
+
+ if (!(adap->algo->functionality(adap) & I2C_FUNC_I2C_SLAVE_SUPPORT))
+ BUG();
+
+ if (priv->slave_adap->slv_algo->slave_flush_buffer)
+ return priv->slave_adap->slv_algo->slave_flush_buffer(
+ priv->slave_adap, is_flush_tx_buffer,
+ is_flush_rx_buffer);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(i2c_slave_flush_buffer);
+
+/**
+ * i2c_slave_get_nack_cycle - Get the number of master read cycle on which
+ * dummy char sent. This is the way to find that how much cycle slave sent the
+ * NACK packet.
+ *
+ * @client: Handle to i2c-slave client.
+ * @is_cout_reset: Reset the nack count or not.
+ *
+ * Returns negative errno, or else 0 for success.
+ */
+int i2c_slave_get_nack_cycle(struct i2c_client *client,
+ int is_cout_reset)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_slave_priv *priv = adap->algo_data;
+
+ if (!(adap->algo->functionality(adap) & I2C_FUNC_I2C_SLAVE_SUPPORT))
+ BUG();
+
+ if (priv->slave_adap->slv_algo->slave_get_nack_cycle)
+ return priv->slave_adap->slv_algo->slave_get_nack_cycle(
+ priv->slave_adap, is_cout_reset);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(i2c_slave_get_nack_cycle);
+
+static u32 i2c_slave_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C_SLAVE_SUPPORT;
+}
+
+int i2c_add_slave_adapter(struct i2c_slave_adapter *slv_adap, bool force_nr)
+{
+ struct i2c_slave_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(struct i2c_slave_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Set up private adapter data */
+ priv->slave_adap = slv_adap;
+ slv_adap->parent_data = priv;
+
+ priv->master_algo.functionality = i2c_slave_func;
+
+ /* Now fill out new adapter structure */
+ snprintf(priv->master_adap.name, sizeof(priv->master_adap.name),
+ "i2c-%d-slave", slv_adap->nr);
+ priv->master_adap.owner = THIS_MODULE;
+ priv->master_adap.class = slv_adap->class;
+ priv->master_adap.algo = &priv->master_algo;
+ priv->master_adap.algo_data = priv;
+ priv->master_adap.dev.parent = slv_adap->parent_dev;
+
+ if (force_nr) {
+ priv->master_adap.nr = slv_adap->nr;
+ ret = i2c_add_numbered_adapter(&priv->master_adap);
+ } else {
+ ret = i2c_add_adapter(&priv->master_adap);
+ }
+ if (ret < 0) {
+ dev_err(slv_adap->parent_dev,
+ "failed to add slave-adapter (error=%d)\n", ret);
+ kfree(priv);
+ return ret;
+ }
+ slv_adap->dev = &priv->master_adap.dev;
+ dev_info(slv_adap->parent_dev, "Added slave i2c bus %d\n",
+ i2c_adapter_id(&priv->master_adap));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i2c_add_slave_adapter);
+
+int i2c_del_slave_adapter(struct i2c_slave_adapter *slv_adap)
+{
+ struct i2c_slave_priv *priv = slv_adap->parent_data;
+ int ret;
+
+ ret = i2c_del_adapter(&priv->master_adap);
+ if (ret < 0)
+ return ret;
+ kfree(priv);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i2c_del_slave_adapter);
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/pca954x.c
index 6f8953664636..dd14ae38d3ee 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/pca954x.c
@@ -41,6 +41,9 @@
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/i2c/pca954x.h>
@@ -62,6 +65,8 @@ struct pca954x {
struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS];
u8 last_chan; /* last register value */
+ struct regulator *vcc_reg;
+ struct regulator *i2c_reg;
};
struct chip_desc {
@@ -123,6 +128,26 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
struct i2c_client *client, u8 val)
{
int ret = -ENODEV;
+ struct pca954x *data = i2c_get_clientdata(client);
+
+ /* Increase ref count for pca954x vcc */
+ if (data->vcc_reg) {
+ ret = regulator_enable(data->vcc_reg);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable vcc\n",
+ __func__);
+ goto vcc_regulator_failed;
+ }
+ }
+ /* Increase ref count for pca954x vcc_i2c */
+ if (data->i2c_reg) {
+ ret = regulator_enable(data->i2c_reg);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable vcc_i2c\n",
+ __func__);
+ goto i2c_regulator_failed;
+ }
+ }
if (adap->algo->master_xfer) {
struct i2c_msg msg;
@@ -142,6 +167,15 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
val, I2C_SMBUS_BYTE, &data);
}
+ /* Decrease ref count for pca954x vcc_i2c */
+ if (data->i2c_reg)
+ regulator_disable(data->i2c_reg);
+
+i2c_regulator_failed:
+ /* Decrease ref count for pca954x vcc */
+ if (data->vcc_reg)
+ regulator_disable(data->vcc_reg);
+vcc_regulator_failed:
return ret;
}
@@ -201,15 +235,71 @@ static int pca954x_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
+ /* Get regulator pointer for pca954x vcc */
+ data->vcc_reg = regulator_get(&client->dev, "vcc");
+ if (PTR_ERR(data->vcc_reg) == -ENODEV)
+ data->vcc_reg = NULL;
+ else if (IS_ERR(data->vcc_reg)) {
+ dev_err(&client->dev, "%s: failed to get vcc\n",
+ __func__);
+ ret = PTR_ERR(data->vcc_reg);
+ goto exit_free;
+ }
+ /* Get regulator pointer for pca954x vcc_i2c */
+ data->i2c_reg = regulator_get(&client->dev, "vcc_i2c");
+ if (PTR_ERR(data->i2c_reg) == -ENODEV)
+ data->i2c_reg = NULL;
+ else if (IS_ERR(data->i2c_reg)) {
+ dev_err(&client->dev, "%s: failed to get vcc_i2c\n",
+ __func__);
+ ret = PTR_ERR(data->i2c_reg);
+ regulator_put(data->vcc_reg);
+ goto exit_free;
+ }
+
+ /* Increase ref count for pca954x vcc */
+ if (data->vcc_reg) {
+ pr_info("%s: enable vcc\n", __func__);
+ ret = regulator_enable(data->vcc_reg);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable vcc\n",
+ __func__);
+ goto exit_regulator_put;
+ }
+ }
+ /* Increase ref count for pca954x vcc_i2c */
+ if (data->i2c_reg) {
+ pr_info("%s: enable vcc_i2c\n", __func__);
+ ret = regulator_enable(data->i2c_reg);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable vcc_i2c\n",
+ __func__);
+ goto exit_vcc_regulator_disable;
+ }
+ }
+
+ /*
+ * Power-On Reset takes time.
+ * I2C is ready after Power-On Reset.
+ */
+ msleep(1);
+
/* Write the mux register at addr to verify
* that the mux is in fact present. This also
* initializes the mux to disconnected state.
*/
if (i2c_smbus_write_byte(client, 0) < 0) {
dev_warn(&client->dev, "probe failed\n");
- goto exit_free;
+ goto exit_regulator_disable;
}
+ /* Decrease ref count for pca954x vcc */
+ if (data->vcc_reg)
+ regulator_disable(data->vcc_reg);
+ /* Decrease ref count for pca954x vcc_i2c */
+ if (data->i2c_reg)
+ regulator_disable(data->i2c_reg);
+
data->type = id->driver_data;
data->last_chan = 0; /* force the first selection */
@@ -250,6 +340,15 @@ static int pca954x_probe(struct i2c_client *client,
virt_reg_failed:
for (num--; num >= 0; num--)
i2c_del_mux_adapter(data->virt_adaps[num]);
+exit_regulator_disable:
+ if (data->i2c_reg)
+ regulator_disable(data->i2c_reg);
+exit_vcc_regulator_disable:
+ if (data->vcc_reg)
+ regulator_disable(data->vcc_reg);
+exit_regulator_put:
+ regulator_put(data->i2c_reg);
+ regulator_put(data->vcc_reg);
exit_free:
kfree(data);
err:
@@ -270,6 +369,9 @@ static int pca954x_remove(struct i2c_client *client)
data->virt_adaps[i] = NULL;
}
+ regulator_put(data->i2c_reg);
+ regulator_put(data->vcc_reg);
+
kfree(data);
return 0;
}
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 23e82e46656d..c0e639c1b179 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -161,6 +161,15 @@ config INPUT_APMPOWER
To compile this driver as a module, choose M here: the
module will be called apm-power.
+config INPUT_KEYRESET
+ tristate "Reset key"
+ depends on INPUT
+ ---help---
+ Say Y here if you want to reboot when some keys are pressed;
+
+ To compile this driver as a module, choose M here: the
+ module will be called keyreset.
+
comment "Input Device Drivers"
source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 0c789490e0b3..5d4593d3101d 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
+obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4cf25347b015..5c5f9db28075 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -23,6 +23,7 @@
#include <linux/input.h>
#include <linux/major.h>
#include <linux/device.h>
+#include <linux/wakelock.h>
#include "input-compat.h"
struct evdev {
@@ -43,6 +44,8 @@ struct evdev_client {
unsigned int tail;
unsigned int packet_head; /* [future] position of the first element of next packet */
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
+ struct wake_lock wake_lock;
+ char name[28];
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
@@ -59,6 +62,7 @@ static void evdev_pass_event(struct evdev_client *client,
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
+ wake_lock_timeout(&client->wake_lock, 5 * HZ);
client->buffer[client->head++] = *event;
client->head &= client->bufsize - 1;
@@ -94,8 +98,11 @@ static void evdev_event(struct input_handle *handle,
struct evdev *evdev = handle->private;
struct evdev_client *client;
struct input_event event;
+ struct timespec ts;
- do_gettimeofday(&event.time);
+ ktime_get_ts(&ts);
+ event.time.tv_sec = ts.tv_sec;
+ event.time.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
event.type = type;
event.code = code;
event.value = value;
@@ -255,6 +262,7 @@ static int evdev_release(struct inode *inode, struct file *file)
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
+ wake_lock_destroy(&client->wake_lock);
kfree(client);
evdev_close_device(evdev);
@@ -306,6 +314,9 @@ static int evdev_open(struct inode *inode, struct file *file)
client->bufsize = bufsize;
spin_lock_init(&client->buffer_lock);
+ snprintf(client->name, sizeof(client->name), "%s-%d",
+ dev_name(&evdev->dev), task_tgid_vnr(current));
+ wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
client->evdev = evdev;
evdev_attach_client(evdev, client);
@@ -320,6 +331,7 @@ static int evdev_open(struct inode *inode, struct file *file)
err_free_client:
evdev_detach_client(evdev, client);
+ wake_lock_destroy(&client->wake_lock);
kfree(client);
err_put_evdev:
put_device(&evdev->dev);
@@ -369,10 +381,12 @@ static int evdev_fetch_next_event(struct evdev_client *client,
spin_lock_irq(&client->buffer_lock);
- have_event = client->head != client->tail;
+ have_event = client->packet_head != client->tail;
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
+ if (client->head == client->tail)
+ wake_unlock(&client->wake_lock);
}
spin_unlock_irq(&client->buffer_lock);
@@ -391,14 +405,12 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
if (count < input_event_size())
return -EINVAL;
- if (client->packet_head == client->tail && evdev->exist &&
- (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
-
- retval = wait_event_interruptible(evdev->wait,
- client->packet_head != client->tail || !evdev->exist);
- if (retval)
- return retval;
+ if (!(file->f_flags & O_NONBLOCK)) {
+ retval = wait_event_interruptible(evdev->wait,
+ client->packet_head != client->tail || !evdev->exist);
+ if (retval)
+ return retval;
+ }
if (!evdev->exist)
return -ENODEV;
@@ -412,6 +424,8 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
retval += input_event_size();
}
+ if (retval == 0 && file->f_flags & O_NONBLOCK)
+ retval = -EAGAIN;
return retval;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index b4dee9d5a055..9433f4e5516c 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -203,6 +203,23 @@ config KEYBOARD_GPIO_POLLED
To compile this driver as a module, choose M here: the
module will be called gpio_keys_polled.
+config KEYBOARD_INTERRUPT
+ tristate "Interrupt Buttons"
+ default n
+ help
+ This driver implements support for buttons connected
+ directly to interrupt pins of peripheral and peripheral
+ does not support any othe functionality like gpio
+ through that pins.
+
+ Say Y here if your device has buttons connected
+ directly to such interrupt pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which interrupts are used.
+
+ To compile this driver as a module, choose M here: the
+ module will be called interrupt_keys.
+
config KEYBOARD_TCA6416
tristate "TCA6416/TCA6408A Keypad Support"
depends on I2C
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index ddde0fd476f7..eaa5eda68a43 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the input core drivers.
#
+GCOV_PROFILE_tegra-kbc.o := y
# Each configuration option enables a list of files.
@@ -15,6 +16,7 @@ obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
+obj-$(CONFIG_KEYBOARD_INTERRUPT) += interrupt_keys.o
obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 67df91af8424..603dadefecae 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -4,6 +4,8 @@
* Copyright 2005 Phil Blundell
* Copyright 2010, 2011 David Jander <david@protonic.nl>
*
+ * Copyright 2010-2011 NVIDIA Corporation
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -714,15 +716,29 @@ static int gpio_keys_suspend(struct device *dev)
static int gpio_keys_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
+ struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
+ int wakeup_key = KEY_RESERVED;
int i;
+ if (pdata && pdata->wakeup_key)
+ wakeup_key = pdata->wakeup_key();
+
for (i = 0; i < ddata->n_buttons; i++) {
struct gpio_keys_button *button = ddata->data[i].button;
if (button->wakeup && device_may_wakeup(dev)) {
int irq = gpio_to_irq(button->gpio);
disable_irq_wake(irq);
+
+ if (wakeup_key == button->code) {
+ unsigned int type = button->type ?: EV_KEY;
+
+ input_event(ddata->input, type, button->code, 1);
+ input_event(ddata->input, type, button->code, 0);
+ input_sync(ddata->input);
+ }
}
gpio_keys_report_event(&ddata->data[i]);
diff --git a/drivers/input/keyboard/interrupt_keys.c b/drivers/input/keyboard/interrupt_keys.c
new file mode 100644
index 000000000000..0117e87788b9
--- /dev/null
+++ b/drivers/input/keyboard/interrupt_keys.c
@@ -0,0 +1,350 @@
+/*
+ * drivers/input/keyboard/interrupt_keys.c
+ * Key driver for keys directly connected to interrupt lines.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt_keys.h>
+#include <linux/spinlock.h>
+
+enum {
+ KEY_RELEASED = 0,
+ KEY_PRESSED,
+};
+
+struct interrupt_button_data {
+ struct interrupt_keys_button *button;
+ struct input_dev *input;
+ struct timer_list timer;
+ int timer_debounce; /* in msecs */
+ bool disabled;
+ int key_state;
+ spinlock_t lock;
+};
+
+struct interrupt_keys_drvdata {
+ struct input_dev *input;
+ struct mutex disable_lock;
+ unsigned int n_int_buttons;
+ int (*enable)(struct device *dev);
+ void (*disable)(struct device *dev);
+ struct interrupt_button_data data[0];
+};
+
+static void interrupt_keys_timer(unsigned long _data)
+{
+ struct interrupt_button_data *bdata =
+ (struct interrupt_button_data *)_data;
+ struct interrupt_keys_button *button = bdata->button;
+ struct input_dev *input = bdata->input;
+ unsigned int type = button->type ?: EV_KEY;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&bdata->lock, iflags);
+ if (bdata->key_state == KEY_PRESSED) {
+ input_event(input, type, button->code, 0);
+ input_sync(input);
+ bdata->key_state = KEY_RELEASED;
+ } else
+ dev_info(&input->dev, "Key state is in release, not sending "
+ "any event\n");
+ spin_unlock_irqrestore(&bdata->lock, iflags);
+ return;
+}
+
+static irqreturn_t interrupt_keys_isr(int irq, void *dev_id)
+{
+ struct interrupt_button_data *bdata = dev_id;
+ struct interrupt_keys_button *button = bdata->button;
+ struct input_dev *input = bdata->input;
+ unsigned int type = button->type ?: EV_KEY;
+ unsigned long iflags;
+
+ BUG_ON(irq != button->irq);
+
+ spin_lock_irqsave(&bdata->lock, iflags);
+ if (bdata->key_state == KEY_RELEASED) {
+ input_event(input, type, button->code, 1);
+ input_sync(input);
+ if (!bdata->timer_debounce) {
+ input_event(input, type, button->code, 0);
+ input_sync(input);
+ spin_unlock_irqrestore(&bdata->lock, iflags);
+ return IRQ_HANDLED;
+ }
+ bdata->key_state = KEY_PRESSED;
+ }
+
+ if ((bdata->key_state == KEY_PRESSED) && (bdata->timer_debounce)) {
+ spin_unlock_irqrestore(&bdata->lock, iflags);
+ mod_timer(&bdata->timer,
+ jiffies + msecs_to_jiffies(bdata->timer_debounce));
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&bdata->lock, iflags);
+
+ /* Should not reach to this point */
+ WARN_ON(1);
+ return IRQ_HANDLED;
+}
+
+static int __devinit interrupt_keys_setup_key(struct platform_device *pdev,
+ struct interrupt_button_data *bdata,
+ struct interrupt_keys_button *button)
+{
+ char *desc = button->desc ? button->desc : "int_keys";
+ struct device *dev = &pdev->dev;
+ unsigned long irqflags;
+ int irq, error;
+
+ setup_timer(&bdata->timer, interrupt_keys_timer, (unsigned long)bdata);
+ spin_lock_init(&bdata->lock);
+
+ irq = button->irq;
+ if (irq < 0) {
+ error = irq;
+ dev_err(dev, "Invalid irq number %d\n", button->irq);
+ goto fail;
+ }
+
+ irqflags = 0;
+ /*
+ * If platform has specified that the button can be disabled,
+ * we don't want it to share the interrupt line.
+ */
+ if (!button->can_disable)
+ irqflags |= IRQF_SHARED;
+
+ error = request_threaded_irq(irq, NULL, interrupt_keys_isr,
+ irqflags, desc, bdata);
+ if (error) {
+ dev_err(dev, "Unable to register irq %d; error %d\n",
+ irq, error);
+ goto fail;
+ }
+ return 0;
+
+fail:
+ return error;
+}
+
+static int interrupt_keys_open(struct input_dev *input)
+{
+ struct interrupt_keys_drvdata *ddata = input_get_drvdata(input);
+
+ return ddata->enable ? ddata->enable(input->dev.parent) : 0;
+}
+
+static void interrupt_keys_close(struct input_dev *input)
+{
+ struct interrupt_keys_drvdata *ddata = input_get_drvdata(input);
+
+ if (ddata->disable)
+ ddata->disable(input->dev.parent);
+}
+
+static int __devinit interrupt_keys_probe(struct platform_device *pdev)
+{
+ struct interrupt_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct interrupt_keys_drvdata *ddata;
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ int i, error;
+ int wakeup = 0;
+
+ ddata = kzalloc(sizeof(struct interrupt_keys_drvdata) +
+ pdata->nbuttons * sizeof(struct interrupt_button_data),
+ GFP_KERNEL);
+ input = input_allocate_device();
+ if (!ddata || !input) {
+ dev_err(dev, "failed to allocate state\n");
+ error = -ENOMEM;
+ goto fail1;
+ }
+
+ ddata->input = input;
+ ddata->n_int_buttons = pdata->nbuttons;
+ ddata->enable = pdata->enable;
+ ddata->disable = pdata->disable;
+ mutex_init(&ddata->disable_lock);
+
+ platform_set_drvdata(pdev, ddata);
+ input_set_drvdata(input, ddata);
+
+ input->name = pdev->name;
+ input->phys = "int-keys/input0";
+ input->dev.parent = &pdev->dev;
+ input->open = interrupt_keys_open;
+ input->close = interrupt_keys_close;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ /* Enable auto repeat feature of Linux input subsystem */
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ struct interrupt_keys_button *button = &pdata->int_buttons[i];
+ struct interrupt_button_data *bdata = &ddata->data[i];
+ unsigned int type = button->type ?: EV_KEY;
+
+ bdata->input = input;
+ bdata->button = button;
+ bdata->timer_debounce = button->debounce_interval;
+
+ error = interrupt_keys_setup_key(pdev, bdata, button);
+ if (error)
+ goto fail2;
+
+ if (button->wakeup)
+ wakeup = 1;
+
+ input_set_capability(input, type, button->code);
+ }
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "Unable to register input device, error: %d\n",
+ error);
+ goto fail2;
+ }
+
+ device_init_wakeup(&pdev->dev, wakeup);
+
+ return 0;
+
+fail2:
+ while (--i >= 0) {
+ free_irq(pdata->int_buttons[i].irq, &ddata->data[i]);
+ if (ddata->data[i].timer_debounce)
+ del_timer_sync(&ddata->data[i].timer);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+fail1:
+ input_free_device(input);
+ kfree(ddata);
+
+ return error;
+}
+
+static int __devexit interrupt_keys_remove(struct platform_device *pdev)
+{
+ struct interrupt_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct interrupt_keys_drvdata *ddata = platform_get_drvdata(pdev);
+ struct input_dev *input = ddata->input;
+ int i;
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ free_irq(pdata->int_buttons[i].irq, &ddata->data[i]);
+ if (ddata->data[i].timer_debounce)
+ del_timer_sync(&ddata->data[i].timer);
+ }
+
+ input_unregister_device(input);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+static int interrupt_keys_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct interrupt_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct interrupt_keys_button *button;
+ int i;
+
+ if (device_may_wakeup(&pdev->dev)) {
+ for (i = 0; i < pdata->nbuttons; i++) {
+ button = &pdata->int_buttons[i];
+ if (button->wakeup)
+ enable_irq_wake(button->irq);
+ }
+ }
+ return 0;
+}
+
+static int interrupt_keys_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct interrupt_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct interrupt_keys_button *button;
+ int i;
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ button = &pdata->int_buttons[i];
+ if (button->wakeup && device_may_wakeup(&pdev->dev)) {
+ int irq = button->irq;
+ disable_irq_wake(irq);
+ }
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops interrupt_keys_pm_ops = {
+ .suspend = interrupt_keys_suspend,
+ .resume = interrupt_keys_resume,
+};
+#endif
+
+static struct platform_driver interrupt_keys_device_driver = {
+ .probe = interrupt_keys_probe,
+ .remove = __devexit_p(interrupt_keys_remove),
+ .driver = {
+ .name = "interrupt-keys",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &interrupt_keys_pm_ops,
+#endif
+ }
+};
+
+static int __init interrupt_keys_init(void)
+{
+ return platform_driver_register(&interrupt_keys_device_driver);
+}
+
+static void __exit interrupt_keys_exit(void)
+{
+ platform_driver_unregister(&interrupt_keys_device_driver);
+}
+
+module_init(interrupt_keys_init);
+module_exit(interrupt_keys_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Keyboard driver for CPU interrupts");
+MODULE_ALIAS("platform:interrupt-keys");
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index a5a77915c650..24f81d9d26d2 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -55,6 +55,7 @@
#define KBC_ROW_CFG0_0 0x8
#define KBC_COL_CFG0_0 0x18
+#define KBC_TO_CNT_0 0x24
#define KBC_INIT_DLY_0 0x28
#define KBC_RPT_DLY_0 0x2c
#define KBC_KP_ENT0_0 0x30
@@ -62,11 +63,15 @@
#define KBC_ROW0_MASK_0 0x38
#define KBC_ROW_SHIFT 3
+#define DEFAULT_SCAN_COUNT 2
+#define DEFAULT_INIT_DLY 5
struct tegra_kbc {
void __iomem *mmio;
struct input_dev *idev;
unsigned int irq;
+ unsigned int wake_enable_rows;
+ unsigned int wake_enable_cols;
spinlock_t lock;
unsigned int repoll_dly;
unsigned long cp_dly_jiffies;
@@ -78,6 +83,9 @@ struct tegra_kbc {
unsigned int num_pressed_keys;
struct timer_list timer;
struct clk *clk;
+ int is_open;
+ unsigned long scan_timeout_count;
+ unsigned long one_scan_time;
};
static const u32 tegra_kbc_default_keymap[] = {
@@ -417,11 +425,21 @@ static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
int i;
unsigned int rst_val;
- /* Either mask all keys or none. */
- rst_val = (filter && !pdata->wakeup) ? ~0 : 0;
+ BUG_ON(pdata->wake_cnt > KBC_MAX_KEY);
+ rst_val = (filter && pdata->wake_cnt) ? ~0 : 0;
for (i = 0; i < KBC_MAX_ROW; i++)
writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
+
+ if (filter) {
+ for (i = 0; i < pdata->wake_cnt; i++) {
+ u32 val, addr;
+ addr = pdata->wake_cfg[i].row * 4 + KBC_ROW0_MASK_0;
+ val = readl(kbc->mmio + addr);
+ val &= ~(1 << pdata->wake_cfg[i].col);
+ writel(val, kbc->mmio + addr);
+ }
+ }
}
static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
@@ -442,11 +460,14 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
row_cfg &= ~r_mask;
col_cfg &= ~c_mask;
- if (pdata->pin_cfg[i].is_row)
- row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
- else
- col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
-
+ if (pdata->pin_cfg[i].en) {
+ if (pdata->pin_cfg[i].is_row)
+ row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1)
+ << r_shft;
+ else
+ col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1)
+ << c_shft;
+ }
writel(row_cfg, kbc->mmio + r_offs);
writel(col_cfg, kbc->mmio + c_offs);
}
@@ -460,6 +481,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
u32 val = 0;
clk_enable(kbc->clk);
+ kbc->is_open = 1;
/* Reset the KBC controller to clear all previous status.*/
tegra_periph_reset_assert(kbc->clk);
@@ -480,6 +502,9 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
val |= KBC_CONTROL_KBC_EN; /* enable */
writel(val, kbc->mmio + KBC_CONTROL_0);
+ writel(DEFAULT_INIT_DLY, kbc->mmio + KBC_INIT_DLY_0);
+ writel(kbc->scan_timeout_count, kbc->mmio + KBC_TO_CNT_0);
+
/*
* Compute the delay(ns) from interrupt mode to continuous polling
* mode so the timer routine is scheduled appropriately.
@@ -526,6 +551,7 @@ static void tegra_kbc_stop(struct tegra_kbc *kbc)
del_timer_sync(&kbc->timer);
clk_disable(kbc->clk);
+ kbc->is_open = 0;
}
static int tegra_kbc_open(struct input_dev *dev)
@@ -583,9 +609,13 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
struct resource *res;
int irq;
int err;
+ int i;
int num_rows = 0;
unsigned int debounce_cnt;
unsigned int scan_time_rows;
+ unsigned long scan_tc;
+
+ dev_dbg(&pdev->dev, "KBC: tegra_kbc_probe\n");
if (!pdata)
return -EINVAL;
@@ -639,6 +669,14 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
goto err_iounmap;
}
+ kbc->is_open = 0;
+ kbc->wake_enable_rows = 0;
+ kbc->wake_enable_cols = 0;
+ for (i = 0; i < pdata->wake_cnt; i++) {
+ kbc->wake_enable_rows |= (1 << pdata->wake_cfg[i].row);
+ kbc->wake_enable_cols |= (1 << pdata->wake_cfg[i].col);
+ }
+
/*
* The time delay between two consecutive reads of the FIFO is
* the sum of the repeat time and the time taken for scanning
@@ -650,6 +688,17 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
+ if (pdata->scan_count)
+ scan_tc = DEFAULT_INIT_DLY + (scan_time_rows +
+ pdata->repeat_cnt) * pdata->scan_count;
+ else
+ scan_tc = DEFAULT_INIT_DLY + (scan_time_rows +
+ pdata->repeat_cnt) * DEFAULT_SCAN_COUNT;
+
+ kbc->one_scan_time = scan_time_rows + pdata->repeat_cnt;
+ /* Bit 19:0 is for scan timeout count */
+ kbc->scan_timeout_count = scan_tc & 0xFFFFF;
+
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
@@ -658,7 +707,10 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, kbc);
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ if (!pdata->disable_ev_rep)
+ input_dev->evbit[0] |= BIT_MASK(EV_REP);
+
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_dev->keycode = kbc->keycode;
@@ -733,8 +785,31 @@ static int tegra_kbc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+ int timeout;
+ unsigned long int_st;
+
+ dev_dbg(&pdev->dev, "KBC: tegra_kbc_suspend\n");
+
+ if (!kbc->is_open)
+ return 0;
if (device_may_wakeup(&pdev->dev)) {
+ timeout = DIV_ROUND_UP((kbc->scan_timeout_count +
+ kbc->one_scan_time), 32);
+ timeout = DIV_ROUND_UP(timeout, 10);
+ while (timeout--) {
+ int_st = readl(kbc->mmio + KBC_INT_0);
+ if (int_st & 0x8) {
+ msleep(10);
+ continue;
+ }
+ break;
+ }
+ int_st = readl(kbc->mmio + KBC_INT_0);
+ if (int_st & 0x8)
+ dev_err(&pdev->dev, "KBC: Controller is not in "
+ "wakeupmode\n");
+
tegra_kbc_setup_wakekeys(kbc, true);
enable_irq_wake(kbc->irq);
/* Forcefully clear the interrupt status */
@@ -756,6 +831,9 @@ static int tegra_kbc_resume(struct device *dev)
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
int err = 0;
+ if (!kbc->is_open)
+ return tegra_kbc_start(kbc);
+
if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(kbc->irq);
tegra_kbc_setup_wakekeys(kbc, false);
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644
index 000000000000..36208fe0baae
--- /dev/null
+++ b/drivers/input/keyreset.c
@@ -0,0 +1,239 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+
+struct keyreset_state {
+ struct input_handler input_handler;
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+ spinlock_t lock;
+ int key_down_target;
+ int key_down;
+ int key_up;
+ int restart_disabled;
+ int (*reset_fn)(void);
+};
+
+int restart_requested;
+static void deferred_restart(struct work_struct *dummy)
+{
+ restart_requested = 2;
+ sys_sync();
+ restart_requested = 3;
+ kernel_restart(NULL);
+}
+static DECLARE_WORK(restart_work, deferred_restart);
+
+static void keyreset_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ unsigned long flags;
+ struct keyreset_state *state = handle->private;
+
+ if (type != EV_KEY)
+ return;
+
+ if (code >= KEY_MAX)
+ return;
+
+ if (!test_bit(code, state->keybit))
+ return;
+
+ spin_lock_irqsave(&state->lock, flags);
+ if (!test_bit(code, state->key) == !value)
+ goto done;
+ __change_bit(code, state->key);
+ if (test_bit(code, state->upbit)) {
+ if (value) {
+ state->restart_disabled = 1;
+ state->key_up++;
+ } else
+ state->key_up--;
+ } else {
+ if (value)
+ state->key_down++;
+ else
+ state->key_down--;
+ }
+ if (state->key_down == 0 && state->key_up == 0)
+ state->restart_disabled = 0;
+
+ pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value,
+ state->key_down, state->key_up, state->restart_disabled);
+
+ if (value && !state->restart_disabled &&
+ state->key_down == state->key_down_target) {
+ state->restart_disabled = 1;
+ if (restart_requested)
+ panic("keyboard reset failed, %d", restart_requested);
+ if (state->reset_fn) {
+ restart_requested = state->reset_fn();
+ } else {
+ pr_info("keyboard reset\n");
+ schedule_work(&restart_work);
+ restart_requested = 1;
+ }
+ }
+done:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keyreset_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int i;
+ int ret;
+ struct input_handle *handle;
+ struct keyreset_state *state =
+ container_of(handler, struct keyreset_state, input_handler);
+
+ for (i = 0; i < KEY_MAX; i++) {
+ if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+ break;
+ }
+ if (i == KEY_MAX)
+ return -ENODEV;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "keyreset";
+ handle->private = state;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_input_register_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_input_open_device;
+
+ pr_info("using input dev %s for key reset\n", dev->name);
+
+ return 0;
+
+err_input_open_device:
+ input_unregister_handle(handle);
+err_input_register_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void keyreset_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id keyreset_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(input, keyreset_ids);
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+ int ret;
+ int key, *keyp;
+ struct keyreset_state *state;
+ struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ spin_lock_init(&state->lock);
+ keyp = pdata->keys_down;
+ while ((key = *keyp++)) {
+ if (key >= KEY_MAX)
+ continue;
+ state->key_down_target++;
+ __set_bit(key, state->keybit);
+ }
+ if (pdata->keys_up) {
+ keyp = pdata->keys_up;
+ while ((key = *keyp++)) {
+ if (key >= KEY_MAX)
+ continue;
+ __set_bit(key, state->keybit);
+ __set_bit(key, state->upbit);
+ }
+ }
+
+ if (pdata->reset_fn)
+ state->reset_fn = pdata->reset_fn;
+
+ state->input_handler.event = keyreset_event;
+ state->input_handler.connect = keyreset_connect;
+ state->input_handler.disconnect = keyreset_disconnect;
+ state->input_handler.name = KEYRESET_NAME;
+ state->input_handler.id_table = keyreset_ids;
+ ret = input_register_handler(&state->input_handler);
+ if (ret) {
+ kfree(state);
+ return ret;
+ }
+ platform_set_drvdata(pdev, state);
+ return 0;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+ struct keyreset_state *state = platform_get_drvdata(pdev);
+ input_unregister_handler(&state->input_handler);
+ kfree(state);
+ return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+ .driver.name = KEYRESET_NAME,
+ .probe = keyreset_probe,
+ .remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+ return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+ return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index c9104bb4db06..cd861d97ffe0 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -214,6 +214,17 @@ config INPUT_ATI_REMOTE2
To compile this driver as a module, choose M here: the module will be
called ati_remote2.
+config INPUT_KEYCHORD
+ tristate "Key chord input driver support"
+ help
+ Say Y here if you want to enable the key chord driver
+ accessible at /dev/keychord. This driver can be used
+ for receiving notifications when client specified key
+ combinations are pressed.
+
+ To compile this driver as a module, choose M here: the
+ module will be called keychord.
+
config INPUT_KEYSPAN_REMOTE
tristate "Keyspan DMR USB remote control (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -343,6 +354,11 @@ config INPUT_SGI_BTNS
To compile this driver as a module, choose M here: the
module will be called sgi_btns.
+config INPUT_GPIO
+ tristate "GPIO driver support"
+ help
+ Say Y here if you want to support gpio based keys, wheels etc...
+
config HP_SDC_RTC
tristate "HP SDC Real Time Clock"
depends on (GSC || HP300) && SERIO
@@ -527,4 +543,20 @@ config INPUT_XEN_KBDDEV_FRONTEND
To compile this driver as a module, choose M here: the
module will be called xen-kbdfront.
+config INPUT_ALPS_GPIO_SCROLLWHEEL
+ tristate "Alps GPIO Scrollwheel"
+ depends on GENERIC_GPIO
+ help
+ This driver implements support for Alps SRBE
+ ScrollWheel connected to GPIO pins of various
+ CPUs (and some other chips).
+
+ Say Y here if your device has ScrollWheel connected
+ directly to such GPIO pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which GPIOs are used.
+
+ To compile this driver as a module, choose M here: the
+ module will be called alps_gpio_scrollwheel.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 299ad5edba84..02118f351ad3 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_INPUT_AD714X_SPI) += ad714x-spi.o
obj-$(CONFIG_INPUT_ADXL34X) += adxl34x.o
obj-$(CONFIG_INPUT_ADXL34X_I2C) += adxl34x-i2c.o
obj-$(CONFIG_INPUT_ADXL34X_SPI) += adxl34x-spi.o
+obj-$(CONFIG_INPUT_ALPS_GPIO_SCROLLWHEEL) += alps_gpio_scrollwheel.o
obj-$(CONFIG_INPUT_APANEL) += apanel.o
obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
@@ -22,8 +23,10 @@ obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
+obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD) += keychord.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
diff --git a/drivers/input/misc/alps_gpio_scrollwheel.c b/drivers/input/misc/alps_gpio_scrollwheel.c
new file mode 100644
index 000000000000..4a789267c475
--- /dev/null
+++ b/drivers/input/misc/alps_gpio_scrollwheel.c
@@ -0,0 +1,428 @@
+/*
+ * kernel/drivers/input/misc/alps_gpio_scrollwheel.c
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * Driver for ScrollWheel on GPIO lines capable of generating interrupts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/gpio_scrollwheel.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+
+struct scrollwheel_button_data {
+ struct gpio_scrollwheel_button *button;
+ struct input_dev *input;
+ struct timer_list timer;
+ struct work_struct work;
+ int timer_debounce; /* in msecs */
+ int rotgpio;
+ bool disabled;
+};
+
+struct gpio_scrollwheel_drvdata {
+ struct input_dev *input;
+ struct mutex disable_lock;
+ unsigned int n_buttons;
+ int (*enable)(struct device *dev);
+ void (*disable)(struct device *dev);
+ struct scrollwheel_button_data data[0];
+};
+
+static void scrollwheel_report_key(struct scrollwheel_button_data *bdata)
+{
+ struct gpio_scrollwheel_button *button = bdata->button;
+ struct input_dev *input = bdata->input;
+ int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ \
+ button->active_low;
+ int state2 = 0;
+
+ switch (button->pinaction) {
+ case GPIO_SCROLLWHEEL_PIN_PRESS:
+ input_report_key(input, KEY_ENTER, 1);
+ input_report_key(input, KEY_ENTER, 0);
+ input_sync(input);
+ break;
+
+ case GPIO_SCROLLWHEEL_PIN_ROT1:
+ case GPIO_SCROLLWHEEL_PIN_ROT2:
+ state2 = (gpio_get_value(bdata->rotgpio) ? 1 : 0) \
+ ^ button->active_low;
+ if (state != state2) {
+ input_report_key(input, KEY_DOWN, 1);
+ input_report_key(input, KEY_DOWN, 0);
+ } else {
+ input_report_key(input, KEY_UP, 1);
+ input_report_key(input, KEY_UP, 0);
+ }
+ input_sync(input);
+ break;
+
+ default:
+ pr_err("%s:Line=%d, Invalid Pinaction\n", __func__, __LINE__);
+ }
+}
+
+static void scrollwheel_work_func(struct work_struct *work)
+{
+ struct scrollwheel_button_data *bdata =
+ container_of(work, struct scrollwheel_button_data, work);
+
+ scrollwheel_report_key(bdata);
+}
+
+static void scrollwheel_timer(unsigned long _data)
+{
+ struct scrollwheel_button_data *data = \
+ (struct scrollwheel_button_data *)_data;
+
+ schedule_work(&data->work);
+}
+
+static irqreturn_t scrollwheel_isr(int irq, void *dev_id)
+{
+ struct scrollwheel_button_data *bdata = dev_id;
+ struct gpio_scrollwheel_button *button = bdata->button;
+
+ BUG_ON(irq != gpio_to_irq(button->gpio));
+
+ if (bdata->timer_debounce)
+ mod_timer(&bdata->timer,
+ jiffies + msecs_to_jiffies(bdata->timer_debounce));
+ else
+ schedule_work(&bdata->work);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit gpio_scrollwheel_setup_key(struct platform_device *pdev,
+ struct scrollwheel_button_data *bdata,
+ struct gpio_scrollwheel_button *button)
+{
+ char *desc = button->desc ? button->desc : "gpio_scrollwheel";
+ struct device *dev = &pdev->dev;
+ unsigned long irqflags;
+ int irq, error;
+
+ setup_timer(&bdata->timer, scrollwheel_timer, (unsigned long)bdata);
+ INIT_WORK(&bdata->work, scrollwheel_work_func);
+
+ error = gpio_request(button->gpio, desc);
+ if (error < 0) {
+ dev_err(dev, "failed to request GPIO %d, error %d\n",
+ button->gpio, error);
+ return error;
+ }
+
+ error = gpio_direction_input(button->gpio);
+ if (error < 0) {
+ dev_err(dev, "failed to configure"
+ " direction for GPIO %d, error %d\n",
+ button->gpio, error);
+ goto fail;
+ }
+
+ if (button->debounce_interval) {
+ error = gpio_set_debounce(button->gpio,
+ button->debounce_interval * 1000);
+ /* use timer if gpiolib doesn't provide debounce */
+ if (error < 0)
+ bdata->timer_debounce = button->debounce_interval;
+ }
+
+ irq = gpio_to_irq(button->gpio);
+ if (irq < 0) {
+ error = irq;
+ dev_err(dev, "Unable to get irq no for GPIO %d, error %d\n",
+ button->gpio, error);
+ goto fail;
+ }
+
+ irqflags = IRQF_TRIGGER_FALLING;
+
+ error = request_irq(irq, scrollwheel_isr, irqflags, desc, bdata);
+ if (error) {
+ dev_err(dev, "Unable to claim irq %d; error %d\n",
+ irq, error);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return error;
+}
+
+static int gpio_scrollwheel_open(struct input_dev *input)
+{
+ struct gpio_scrollwheel_drvdata *ddata = input_get_drvdata(input);
+
+ return ddata->enable ? ddata->enable(input->dev.parent) : 0;
+}
+
+static void gpio_scrollwheel_close(struct input_dev *input)
+{
+ struct gpio_scrollwheel_drvdata *ddata = input_get_drvdata(input);
+
+ if (ddata->disable)
+ ddata->disable(input->dev.parent);
+}
+
+static int __devinit gpio_scrollwheel_probe(struct platform_device *pdev)
+{
+ struct gpio_scrollwheel_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_scrollwheel_drvdata *ddata;
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ int i, error;
+
+ ddata = kzalloc(sizeof(struct gpio_scrollwheel_drvdata) +
+ pdata->nbuttons * sizeof(struct scrollwheel_button_data),
+ GFP_KERNEL);
+ if (ddata == NULL) {
+ dev_err(dev, "failed to allocate memory\n");
+ error = -ENOMEM;
+ return error;
+ }
+
+ input = input_allocate_device();
+ if (input == NULL) {
+ dev_err(dev, "failed to allocate input device\n");
+ error = -ENOMEM;
+ kfree(ddata);
+ return error;
+ }
+
+ ddata->input = input;
+ ddata->n_buttons = pdata->nbuttons;
+ ddata->enable = pdata->enable;
+ ddata->disable = pdata->disable;
+ mutex_init(&ddata->disable_lock);
+
+ platform_set_drvdata(pdev, ddata);
+ input_set_drvdata(input, ddata);
+
+ input->name = pdev->name;
+ input->phys = "gpio-scrollwheel/input0";
+ input->dev.parent = &pdev->dev;
+ input->open = gpio_scrollwheel_open;
+ input->close = gpio_scrollwheel_close;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ /* Enable auto repeat feature of Linux input subsystem */
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ struct gpio_scrollwheel_button *button = &pdata->buttons[i];
+ struct scrollwheel_button_data *bdata = &ddata->data[i];
+
+ bdata->input = input;
+ bdata->button = button;
+
+ if (button->pinaction == GPIO_SCROLLWHEEL_PIN_PRESS ||
+ button->pinaction == GPIO_SCROLLWHEEL_PIN_ROT1) {
+ error = gpio_scrollwheel_setup_key(pdev, bdata, button);
+ if (error)
+ goto fail;
+ } else {
+ if (button->pinaction == GPIO_SCROLLWHEEL_PIN_ONOFF) {
+ gpio_request(button->gpio, button->desc);
+ gpio_direction_output(button->gpio, 0);
+ }
+
+ if (button->pinaction == GPIO_SCROLLWHEEL_PIN_ROT2) {
+ gpio_request(button->gpio, button->desc);
+ gpio_direction_input(button->gpio);
+ /* Save rot2 gpio number in rot1 context */
+ ddata->data[2].rotgpio = button->gpio;
+ }
+ }
+ }
+
+ /* set input capability */
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(KEY_ENTER, input->keybit);
+ __set_bit(KEY_UP, input->keybit);
+ __set_bit(KEY_DOWN, input->keybit);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "Unable to register input device, error: %d\n",
+ error);
+ goto fail;
+ }
+
+ input_sync(input);
+
+ return 0;
+
+fail:
+ while (--i >= 0) {
+ if (pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_PRESS ||
+ pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_ROT1) {
+ free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]);
+ if (ddata->data[i].timer_debounce)
+ del_timer_sync(&ddata->data[i].timer);
+ cancel_work_sync(&ddata->data[i].work);
+ }
+ gpio_free(pdata->buttons[i].gpio);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ input_free_device(input);
+ kfree(ddata);
+ return error;
+}
+
+static int __devexit gpio_scrollwheel_remove(struct platform_device *pdev)
+{
+ struct gpio_scrollwheel_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_scrollwheel_drvdata *ddata = platform_get_drvdata(pdev);
+ struct input_dev *input = ddata->input;
+ int i;
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ if (pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_PRESS ||
+ pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_ROT1) {
+ int irq = gpio_to_irq(pdata->buttons[i].gpio);
+ free_irq(irq, &ddata->data[i]);
+ if (ddata->data[i].timer_debounce)
+ del_timer_sync(&ddata->data[i].timer);
+ cancel_work_sync(&ddata->data[i].work);
+ }
+ gpio_free(pdata->buttons[i].gpio);
+ }
+
+ input_unregister_device(input);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+static int gpio_scrollwheel_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_scrollwheel_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_scrollwheel_drvdata *ddata = platform_get_drvdata(pdev);
+ int i, irq;
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ if (pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_PRESS ||
+ pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_ROT1) {
+ irq = gpio_to_irq(pdata->buttons[i].gpio);
+ disable_irq(irq);
+ if (ddata->data[i].timer_debounce)
+ del_timer_sync(&ddata->data[i].timer);
+ cancel_work_sync(&ddata->data[i].work);
+ } else {
+ if (pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_ONOFF)
+ gpio_direction_output(pdata->buttons[i].gpio, 1);
+ else {
+ irq = gpio_to_irq(pdata->buttons[i].gpio);
+ disable_irq(irq);
+ }
+ }
+ }
+ return 0;
+}
+
+static int gpio_scrollwheel_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_scrollwheel_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_scrollwheel_drvdata *ddata = platform_get_drvdata(pdev);
+ int i, irq;
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ if (pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_PRESS ||
+ pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_ROT1) {
+ irq = gpio_to_irq(pdata->buttons[i].gpio);
+ enable_irq(irq);
+ if (ddata->data[i].timer_debounce)
+ setup_timer(&ddata->data[i].timer,\
+ scrollwheel_timer, (unsigned long)&ddata->data[i]);
+
+ INIT_WORK(&ddata->data[i].work, scrollwheel_work_func);
+ } else {
+ if (pdata->buttons[i].pinaction == GPIO_SCROLLWHEEL_PIN_ONOFF)
+ gpio_direction_output(pdata->buttons[i].gpio, 0);
+ else {
+ irq = gpio_to_irq(pdata->buttons[i].gpio);
+ enable_irq(irq);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops gpio_scrollwheel_pm_ops = {
+ .suspend = gpio_scrollwheel_suspend,
+ .resume = gpio_scrollwheel_resume,
+};
+#endif
+
+static struct platform_driver gpio_scrollwheel_device_driver = {
+ .probe = gpio_scrollwheel_probe,
+ .remove = __devexit_p(gpio_scrollwheel_remove),
+ .driver = {
+ .name = "alps-gpio-scrollwheel",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &gpio_scrollwheel_pm_ops,
+#endif
+ }
+};
+
+static int __init gpio_scrollwheel_init(void)
+{
+ return platform_driver_register(&gpio_scrollwheel_device_driver);
+}
+
+static void __exit gpio_scrollwheel_exit(void)
+{
+ platform_driver_unregister(&gpio_scrollwheel_device_driver);
+}
+
+module_init(gpio_scrollwheel_init);
+module_exit(gpio_scrollwheel_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_DESCRIPTION("Alps SRBE ScrollWheel driver");
+
+MODULE_ALIAS("platform:alps-gpio-scrollwheel");
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644
index 000000000000..0acf4a576f53
--- /dev/null
+++ b/drivers/input/misc/gpio_axis.c
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+ struct gpio_event_input_devs *input_devs;
+ struct gpio_event_axis_info *info;
+ uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+ [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+ [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+ [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+ [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+ [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+ [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+ [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+ [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+ return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+ [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */
+ [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */
+ [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */
+ [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */
+ [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */
+ [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */
+ [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */
+ [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */
+ [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */
+ [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+ struct gpio_event_axis_info *info, uint16_t in)
+{
+ return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+ struct gpio_event_axis_info *ai = as->info;
+ int i;
+ int change;
+ uint16_t state = 0;
+ uint16_t pos;
+ uint16_t old_pos = as->pos;
+ for (i = ai->count - 1; i >= 0; i--)
+ state = (state << 1) | gpio_get_value(ai->gpio[i]);
+ pos = ai->map(ai, state);
+ if (ai->flags & GPIOEAF_PRINT_RAW)
+ pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+ ai->type, ai->code, state, old_pos, pos);
+ if (report && pos != old_pos) {
+ if (ai->type == EV_REL) {
+ change = (ai->decoded_size + pos - old_pos) %
+ ai->decoded_size;
+ if (change > ai->decoded_size / 2)
+ change -= ai->decoded_size;
+ if (change == ai->decoded_size / 2) {
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d unknown direction, "
+ "pos %d -> %d\n", ai->type,
+ ai->code, old_pos, pos);
+ change = 0; /* no closest direction */
+ }
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d change %d\n",
+ ai->type, ai->code, change);
+ input_report_rel(as->input_devs->dev[ai->dev],
+ ai->code, change);
+ } else {
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d now %d\n",
+ ai->type, ai->code, pos);
+ input_event(as->input_devs->dev[ai->dev],
+ ai->type, ai->code, pos);
+ }
+ input_sync(as->input_devs->dev[ai->dev]);
+ }
+ as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_axis_state *as = dev_id;
+ gpio_event_update_axis(as, 1);
+ return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int ret;
+ int i;
+ int irq;
+ struct gpio_event_axis_info *ai;
+ struct gpio_axis_state *as;
+
+ ai = container_of(info, struct gpio_event_axis_info, info);
+ if (func == GPIO_EVENT_FUNC_SUSPEND) {
+ for (i = 0; i < ai->count; i++)
+ disable_irq(gpio_to_irq(ai->gpio[i]));
+ return 0;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME) {
+ for (i = 0; i < ai->count; i++)
+ enable_irq(gpio_to_irq(ai->gpio[i]));
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ *data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (as == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_axis_state_failed;
+ }
+ as->input_devs = input_devs;
+ as->info = ai;
+ if (ai->dev >= input_devs->count) {
+ pr_err("gpio_event_axis: bad device index %d >= %d "
+ "for %d:%d\n", ai->dev, input_devs->count,
+ ai->type, ai->code);
+ ret = -EINVAL;
+ goto err_bad_device_index;
+ }
+
+ input_set_capability(input_devs->dev[ai->dev],
+ ai->type, ai->code);
+ if (ai->type == EV_ABS) {
+ input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+ 0, ai->decoded_size - 1, 0, 0);
+ }
+ for (i = 0; i < ai->count; i++) {
+ ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+ if (ret < 0)
+ goto err_request_gpio_failed;
+ ret = gpio_direction_input(ai->gpio[i]);
+ if (ret < 0)
+ goto err_gpio_direction_input_failed;
+ ret = irq = gpio_to_irq(ai->gpio[i]);
+ if (ret < 0)
+ goto err_get_irq_num_failed;
+ ret = request_irq(irq, gpio_axis_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "gpio_event_axis", as);
+ if (ret < 0)
+ goto err_request_irq_failed;
+ }
+ gpio_event_update_axis(as, 0);
+ return 0;
+ }
+
+ ret = 0;
+ as = *data;
+ for (i = ai->count - 1; i >= 0; i--) {
+ free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+ gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+ ;
+ }
+err_bad_device_index:
+ kfree(as);
+ *data = NULL;
+err_alloc_axis_state_failed:
+ return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644
index 000000000000..a98be67d1ab0
--- /dev/null
+++ b/drivers/input/misc/gpio_event.c
@@ -0,0 +1,260 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+ struct gpio_event_input_devs *input_devs;
+ const struct gpio_event_platform_data *info;
+ struct early_suspend early_suspend;
+ void *state[0];
+};
+
+static int gpio_input_event(
+ struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+ int i;
+ int devnr;
+ int ret = 0;
+ int tmp_ret;
+ struct gpio_event_info **ii;
+ struct gpio_event *ip = input_get_drvdata(dev);
+
+ for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+ if (ip->input_devs->dev[devnr] == dev)
+ break;
+ if (devnr == ip->input_devs->count) {
+ pr_err("gpio_input_event: unknown device %p\n", dev);
+ return -EIO;
+ }
+
+ for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+ if ((*ii)->event) {
+ tmp_ret = (*ii)->event(ip->input_devs, *ii,
+ &ip->state[i],
+ devnr, type, code, value);
+ if (tmp_ret)
+ ret = tmp_ret;
+ }
+ }
+ return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+ int i;
+ int ret;
+ struct gpio_event_info **ii;
+
+ if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+ ii = ip->info->info;
+ for (i = 0; i < ip->info->info_count; i++, ii++) {
+ if ((*ii)->func == NULL) {
+ ret = -ENODEV;
+ pr_err("gpio_event_probe: Incomplete pdata, "
+ "no function\n");
+ goto err_no_func;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+ continue;
+ ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+ func);
+ if (ret) {
+ pr_err("gpio_event_probe: function failed\n");
+ goto err_func_failed;
+ }
+ }
+ return 0;
+ }
+
+ ret = 0;
+ i = ip->info->info_count;
+ ii = ip->info->info + i;
+ while (i > 0) {
+ i--;
+ ii--;
+ if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+ continue;
+ (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+ ;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void gpio_event_suspend(struct early_suspend *h)
+{
+ struct gpio_event *ip;
+ ip = container_of(h, struct gpio_event, early_suspend);
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+ ip->info->power(ip->info, 0);
+}
+
+void gpio_event_resume(struct early_suspend *h)
+{
+ struct gpio_event *ip;
+ ip = container_of(h, struct gpio_event, early_suspend);
+ ip->info->power(ip->info, 1);
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+#endif
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+ int err;
+ struct gpio_event *ip;
+ struct gpio_event_platform_data *event_info;
+ int dev_count = 1;
+ int i;
+ int registered = 0;
+
+ event_info = pdev->dev.platform_data;
+ if (event_info == NULL) {
+ pr_err("gpio_event_probe: No pdata\n");
+ return -ENODEV;
+ }
+ if ((!event_info->name && !event_info->names[0]) ||
+ !event_info->info || !event_info->info_count) {
+ pr_err("gpio_event_probe: Incomplete pdata\n");
+ return -ENODEV;
+ }
+ if (!event_info->name)
+ while (event_info->names[dev_count])
+ dev_count++;
+ ip = kzalloc(sizeof(*ip) +
+ sizeof(ip->state[0]) * event_info->info_count +
+ sizeof(*ip->input_devs) +
+ sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+ if (ip == NULL) {
+ err = -ENOMEM;
+ pr_err("gpio_event_probe: Failed to allocate private data\n");
+ goto err_kp_alloc_failed;
+ }
+ ip->input_devs = (void*)&ip->state[event_info->info_count];
+ platform_set_drvdata(pdev, ip);
+
+ for (i = 0; i < dev_count; i++) {
+ struct input_dev *input_dev = input_allocate_device();
+ if (input_dev == NULL) {
+ err = -ENOMEM;
+ pr_err("gpio_event_probe: "
+ "Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ input_set_drvdata(input_dev, ip);
+ input_dev->name = event_info->name ?
+ event_info->name : event_info->names[i];
+ input_dev->event = gpio_input_event;
+ ip->input_devs->dev[i] = input_dev;
+ }
+ ip->input_devs->count = dev_count;
+ ip->info = event_info;
+ if (event_info->power) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ip->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ip->early_suspend.suspend = gpio_event_suspend;
+ ip->early_suspend.resume = gpio_event_resume;
+ register_early_suspend(&ip->early_suspend);
+#endif
+ ip->info->power(ip->info, 1);
+ }
+
+ err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+ if (err)
+ goto err_call_all_func_failed;
+
+ for (i = 0; i < dev_count; i++) {
+ err = input_register_device(ip->input_devs->dev[i]);
+ if (err) {
+ pr_err("gpio_event_probe: Unable to register %s "
+ "input device\n", ip->input_devs->dev[i]->name);
+ goto err_input_register_device_failed;
+ }
+ registered++;
+ }
+
+ return 0;
+
+err_input_register_device_failed:
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+ if (event_info->power) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ip->early_suspend);
+#endif
+ ip->info->power(ip->info, 0);
+ }
+ for (i = 0; i < registered; i++)
+ input_unregister_device(ip->input_devs->dev[i]);
+ for (i = dev_count - 1; i >= registered; i--) {
+ input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+ ;
+ }
+ kfree(ip);
+err_kp_alloc_failed:
+ return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+ struct gpio_event *ip = platform_get_drvdata(pdev);
+ int i;
+
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+ if (ip->info->power) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ip->early_suspend);
+#endif
+ ip->info->power(ip->info, 0);
+ }
+ for (i = 0; i < ip->input_devs->count; i++)
+ input_unregister_device(ip->input_devs->dev[i]);
+ kfree(ip);
+ return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+ .probe = gpio_event_probe,
+ .remove = gpio_event_remove,
+ .driver = {
+ .name = GPIO_EVENT_DEV_NAME,
+ },
+};
+
+static int __devinit gpio_event_init(void)
+{
+ return platform_driver_register(&gpio_event_driver);
+}
+
+static void __exit gpio_event_exit(void)
+{
+ platform_driver_unregister(&gpio_event_driver);
+}
+
+module_init(gpio_event_init);
+module_exit(gpio_event_exit);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644
index 000000000000..6a0c31510968
--- /dev/null
+++ b/drivers/input/misc/gpio_input.c
@@ -0,0 +1,376 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+enum {
+ DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */
+ DEBOUNCE_PRESSED = BIT(1),
+ DEBOUNCE_NOTPRESSED = BIT(2),
+ DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */
+ DEBOUNCE_POLL = BIT(4), /* Stable polling state */
+
+ DEBOUNCE_UNKNOWN =
+ DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+ struct gpio_input_state *ds;
+ uint8_t debounce;
+};
+
+struct gpio_input_state {
+ struct gpio_event_input_devs *input_devs;
+ const struct gpio_event_input_info *info;
+ struct hrtimer timer;
+ int use_irq;
+ int debounce_count;
+ spinlock_t irq_lock;
+ struct wake_lock wake_lock;
+ struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+ int i;
+ int pressed;
+ struct gpio_input_state *ds =
+ container_of(timer, struct gpio_input_state, timer);
+ unsigned gpio_flags = ds->info->flags;
+ unsigned npolarity;
+ int nkeys = ds->info->keymap_size;
+ const struct gpio_event_direct_entry *key_entry;
+ struct gpio_key_state *key_state;
+ unsigned long irqflags;
+ uint8_t debounce;
+ bool sync_needed;
+
+#if 0
+ key_entry = kp->keys_info->keymap;
+ key_state = kp->key_state;
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+ pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+ gpio_read_detect_status(key_entry->gpio));
+#endif
+ key_entry = ds->info->keymap;
+ key_state = ds->key_state;
+ sync_needed = false;
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+ debounce = key_state->debounce;
+ if (debounce & DEBOUNCE_WAIT_IRQ)
+ continue;
+ if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+ debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+ enable_irq(gpio_to_irq(key_entry->gpio));
+ if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) continue debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ }
+ npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+ pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+ if (debounce & DEBOUNCE_POLL) {
+ if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+ ds->debounce_count++;
+ key_state->debounce = DEBOUNCE_UNKNOWN;
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-"
+ "%x, %d (%d) start debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ }
+ continue;
+ }
+ if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) debounce pressed 1\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ key_state->debounce = DEBOUNCE_PRESSED;
+ continue;
+ }
+ if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) debounce pressed 0\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ key_state->debounce = DEBOUNCE_NOTPRESSED;
+ continue;
+ }
+ /* key is stable */
+ ds->debounce_count--;
+ if (ds->use_irq)
+ key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+ else
+ key_state->debounce |= DEBOUNCE_POLL;
+ if (gpio_flags & GPIOEDF_PRINT_KEYS)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+ "changed to %d\n", ds->info->type,
+ key_entry->code, i, key_entry->gpio, pressed);
+ input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+ key_entry->code, pressed);
+ sync_needed = true;
+ }
+ if (sync_needed) {
+ for (i = 0; i < ds->input_devs->count; i++)
+ input_sync(ds->input_devs->dev[i]);
+ }
+
+#if 0
+ key_entry = kp->keys_info->keymap;
+ key_state = kp->key_state;
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+ pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+ gpio_read_detect_status(key_entry->gpio));
+ }
+#endif
+
+ if (ds->debounce_count)
+ hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+ else if (!ds->use_irq)
+ hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+ else
+ wake_unlock(&ds->wake_lock);
+
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_key_state *ks = dev_id;
+ struct gpio_input_state *ds = ks->ds;
+ int keymap_index = ks - ds->key_state;
+ const struct gpio_event_direct_entry *key_entry;
+ unsigned long irqflags;
+ int pressed;
+
+ if (!ds->use_irq)
+ return IRQ_HANDLED;
+
+ key_entry = &ds->info->keymap[keymap_index];
+
+ if (ds->info->debounce_time.tv64) {
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+ ks->debounce = DEBOUNCE_UNKNOWN;
+ if (ds->debounce_count++ == 0) {
+ wake_lock(&ds->wake_lock);
+ hrtimer_start(
+ &ds->timer, ds->info->debounce_time,
+ HRTIMER_MODE_REL);
+ }
+ if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_event_input_irq_handler: "
+ "key %x-%x, %d (%d) start debounce\n",
+ ds->info->type, key_entry->code,
+ keymap_index, key_entry->gpio);
+ } else {
+ disable_irq_nosync(irq);
+ ks->debounce = DEBOUNCE_UNSTABLE;
+ }
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ } else {
+ pressed = gpio_get_value(key_entry->gpio) ^
+ !(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+ if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+ pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+ "(%d) changed to %d\n",
+ ds->info->type, key_entry->code, keymap_index,
+ key_entry->gpio, pressed);
+ input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+ key_entry->code, pressed);
+ input_sync(ds->input_devs->dev[key_entry->dev]);
+ }
+ return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+ int i;
+ int err;
+ unsigned int irq;
+ unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+ for (i = 0; i < ds->info->keymap_size; i++) {
+ err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+ if (err < 0)
+ goto err_gpio_get_irq_num_failed;
+ err = request_irq(irq, gpio_event_input_irq_handler,
+ req_flags, "gpio_keys", &ds->key_state[i]);
+ if (err) {
+ pr_err("gpio_event_input_request_irqs: request_irq "
+ "failed for input %d, irq %d\n",
+ ds->info->keymap[i].gpio, irq);
+ goto err_request_irq_failed;
+ }
+ if (ds->info->info.no_suspend) {
+ err = enable_irq_wake(irq);
+ if (err) {
+ pr_err("gpio_event_input_request_irqs: "
+ "enable_irq_wake failed for input %d, "
+ "irq %d\n",
+ ds->info->keymap[i].gpio, irq);
+ goto err_enable_irq_wake_failed;
+ }
+ }
+ }
+ return 0;
+
+ for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+ irq = gpio_to_irq(ds->info->keymap[i].gpio);
+ if (ds->info->info.no_suspend)
+ disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+ free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+ ;
+ }
+ return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int ret;
+ int i;
+ unsigned long irqflags;
+ struct gpio_event_input_info *di;
+ struct gpio_input_state *ds = *data;
+
+ di = container_of(info, struct gpio_event_input_info, info);
+
+ if (func == GPIO_EVENT_FUNC_SUSPEND) {
+ if (ds->use_irq)
+ for (i = 0; i < di->keymap_size; i++)
+ disable_irq(gpio_to_irq(di->keymap[i].gpio));
+ hrtimer_cancel(&ds->timer);
+ return 0;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME) {
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ if (ds->use_irq)
+ for (i = 0; i < di->keymap_size; i++)
+ enable_irq(gpio_to_irq(di->keymap[i].gpio));
+ hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ if (ktime_to_ns(di->poll_time) <= 0)
+ di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+ *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+ di->keymap_size, GFP_KERNEL);
+ if (ds == NULL) {
+ ret = -ENOMEM;
+ pr_err("gpio_event_input_func: "
+ "Failed to allocate private data\n");
+ goto err_ds_alloc_failed;
+ }
+ ds->debounce_count = di->keymap_size;
+ ds->input_devs = input_devs;
+ ds->info = di;
+ wake_lock_init(&ds->wake_lock, WAKE_LOCK_SUSPEND, "gpio_input");
+ spin_lock_init(&ds->irq_lock);
+
+ for (i = 0; i < di->keymap_size; i++) {
+ int dev = di->keymap[i].dev;
+ if (dev >= input_devs->count) {
+ pr_err("gpio_event_input_func: bad device "
+ "index %d >= %d for key code %d\n",
+ dev, input_devs->count,
+ di->keymap[i].code);
+ ret = -EINVAL;
+ goto err_bad_keymap;
+ }
+ input_set_capability(input_devs->dev[dev], di->type,
+ di->keymap[i].code);
+ ds->key_state[i].ds = ds;
+ ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+ }
+
+ for (i = 0; i < di->keymap_size; i++) {
+ ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+ if (ret) {
+ pr_err("gpio_event_input_func: gpio_request "
+ "failed for %d\n", di->keymap[i].gpio);
+ goto err_gpio_request_failed;
+ }
+ ret = gpio_direction_input(di->keymap[i].gpio);
+ if (ret) {
+ pr_err("gpio_event_input_func: "
+ "gpio_direction_input failed for %d\n",
+ di->keymap[i].gpio);
+ goto err_gpio_configure_failed;
+ }
+ }
+
+ ret = gpio_event_input_request_irqs(ds);
+
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ ds->use_irq = ret == 0;
+
+ pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+ "mode\n", input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "",
+ ret == 0 ? "interrupt" : "polling");
+
+ hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ds->timer.function = gpio_event_input_timer_func;
+ hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ return 0;
+ }
+
+ ret = 0;
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ hrtimer_cancel(&ds->timer);
+ if (ds->use_irq) {
+ for (i = di->keymap_size - 1; i >= 0; i--) {
+ int irq = gpio_to_irq(di->keymap[i].gpio);
+ if (ds->info->info.no_suspend)
+ disable_irq_wake(irq);
+ free_irq(irq, &ds->key_state[i]);
+ }
+ }
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+ for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+ gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+ ;
+ }
+err_bad_keymap:
+ wake_lock_destroy(&ds->wake_lock);
+ kfree(ds);
+err_ds_alloc_failed:
+ return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644
index 000000000000..eaa9e89d473a
--- /dev/null
+++ b/drivers/input/misc/gpio_matrix.c
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+ struct gpio_event_input_devs *input_devs;
+ struct gpio_event_matrix_info *keypad_info;
+ struct hrtimer timer;
+ struct wake_lock wake_lock;
+ int current_output;
+ unsigned int use_irq:1;
+ unsigned int key_state_changed:1;
+ unsigned int last_key_state_changed:1;
+ unsigned int some_keys_pressed:2;
+ unsigned int disabled_irq:1;
+ unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ int key_index = out * mi->ninputs + in;
+ unsigned short keyentry = mi->keymap[key_index];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+ if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+ if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+ pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+ "cleared\n", keycode, out, in,
+ mi->output_gpios[out], mi->input_gpios[in]);
+ __clear_bit(key_index, kp->keys_pressed);
+ } else {
+ if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+ pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+ "not cleared\n", keycode, out, in,
+ mi->output_gpios[out], mi->input_gpios[in]);
+ }
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+ int rv = 0;
+ int key_index;
+
+ key_index = out * kp->keypad_info->ninputs + in;
+ while (out < kp->keypad_info->noutputs) {
+ if (test_bit(key_index, kp->keys_pressed)) {
+ rv = 1;
+ clear_phantom_key(kp, out, in);
+ }
+ key_index += kp->keypad_info->ninputs;
+ out++;
+ }
+ return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+ int out, in, inp;
+ int key_index;
+
+ if (kp->some_keys_pressed < 3)
+ return;
+
+ for (out = 0; out < kp->keypad_info->noutputs; out++) {
+ inp = -1;
+ key_index = out * kp->keypad_info->ninputs;
+ for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+ if (test_bit(key_index, kp->keys_pressed)) {
+ if (inp == -1) {
+ inp = in;
+ continue;
+ }
+ if (inp >= 0) {
+ if (!restore_keys_for_input(kp, out + 1,
+ inp))
+ break;
+ clear_phantom_key(kp, out, inp);
+ inp = -2;
+ }
+ restore_keys_for_input(kp, out, in);
+ }
+ }
+ }
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ int pressed = test_bit(key_index, kp->keys_pressed);
+ unsigned short keyentry = mi->keymap[key_index];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+ if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+ if (keycode == KEY_RESERVED) {
+ if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+ pr_info("gpiomatrix: unmapped key, %d-%d "
+ "(%d-%d) changed to %d\n",
+ out, in, mi->output_gpios[out],
+ mi->input_gpios[in], pressed);
+ } else {
+ if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+ pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+ "changed to %d\n", keycode,
+ out, in, mi->output_gpios[out],
+ mi->input_gpios[in], pressed);
+ input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+ }
+ }
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+ int i;
+
+ for (i = 0; i < kp->input_devs->count; i++)
+ input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+ int out, in;
+ int key_index;
+ int gpio;
+ struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ unsigned gpio_keypad_flags = mi->flags;
+ unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+ out = kp->current_output;
+ if (out == mi->noutputs) {
+ out = 0;
+ kp->last_key_state_changed = kp->key_state_changed;
+ kp->key_state_changed = 0;
+ kp->some_keys_pressed = 0;
+ } else {
+ key_index = out * mi->ninputs;
+ for (in = 0; in < mi->ninputs; in++, key_index++) {
+ gpio = mi->input_gpios[in];
+ if (gpio_get_value(gpio) ^ !polarity) {
+ if (kp->some_keys_pressed < 3)
+ kp->some_keys_pressed++;
+ kp->key_state_changed |= !__test_and_set_bit(
+ key_index, kp->keys_pressed);
+ } else
+ kp->key_state_changed |= __test_and_clear_bit(
+ key_index, kp->keys_pressed);
+ }
+ gpio = mi->output_gpios[out];
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(gpio, !polarity);
+ else
+ gpio_direction_input(gpio);
+ out++;
+ }
+ kp->current_output = out;
+ if (out < mi->noutputs) {
+ gpio = mi->output_gpios[out];
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(gpio, polarity);
+ else
+ gpio_direction_output(gpio, polarity);
+ hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+ if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+ if (kp->key_state_changed) {
+ hrtimer_start(&kp->timer, mi->debounce_delay,
+ HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+ kp->key_state_changed = kp->last_key_state_changed;
+ }
+ if (kp->key_state_changed) {
+ if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+ remove_phantom_keys(kp);
+ key_index = 0;
+ for (out = 0; out < mi->noutputs; out++)
+ for (in = 0; in < mi->ninputs; in++, key_index++)
+ report_key(kp, key_index, out, in);
+ report_sync(kp);
+ }
+ if (!kp->use_irq || kp->some_keys_pressed) {
+ hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+
+ /* No keys are pressed, reenable interrupt */
+ for (out = 0; out < mi->noutputs; out++) {
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(mi->output_gpios[out], polarity);
+ else
+ gpio_direction_output(mi->output_gpios[out], polarity);
+ }
+ for (in = 0; in < mi->ninputs; in++)
+ enable_irq(gpio_to_irq(mi->input_gpios[in]));
+ wake_unlock(&kp->wake_lock);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+ int i;
+ struct gpio_kp *kp = dev_id;
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ unsigned gpio_keypad_flags = mi->flags;
+
+ if (!kp->use_irq) {
+ /* ignore interrupt while registering the handler */
+ kp->disabled_irq = 1;
+ disable_irq_nosync(irq_in);
+ return IRQ_HANDLED;
+ }
+
+ for (i = 0; i < mi->ninputs; i++)
+ disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+ for (i = 0; i < mi->noutputs; i++) {
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(mi->output_gpios[i],
+ !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+ else
+ gpio_direction_input(mi->output_gpios[i]);
+ }
+ wake_lock(&kp->wake_lock);
+ hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+ int i;
+ int err;
+ unsigned int irq;
+ unsigned long request_flags;
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+ switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+ default:
+ request_flags = IRQF_TRIGGER_FALLING;
+ break;
+ case GPIOKPF_ACTIVE_HIGH:
+ request_flags = IRQF_TRIGGER_RISING;
+ break;
+ case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+ request_flags = IRQF_TRIGGER_LOW;
+ break;
+ case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+ request_flags = IRQF_TRIGGER_HIGH;
+ break;
+ }
+
+ for (i = 0; i < mi->ninputs; i++) {
+ err = irq = gpio_to_irq(mi->input_gpios[i]);
+ if (err < 0)
+ goto err_gpio_get_irq_num_failed;
+ err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+ "gpio_kp", kp);
+ if (err) {
+ pr_err("gpiomatrix: request_irq failed for input %d, "
+ "irq %d\n", mi->input_gpios[i], irq);
+ goto err_request_irq_failed;
+ }
+ err = enable_irq_wake(irq);
+ if (err) {
+ pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+ "irq %d\n", mi->input_gpios[i], irq);
+ }
+ disable_irq(irq);
+ if (kp->disabled_irq) {
+ kp->disabled_irq = 0;
+ enable_irq(irq);
+ }
+ }
+ return 0;
+
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+ free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+ ;
+ }
+ return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int i;
+ int err;
+ int key_count;
+ struct gpio_kp *kp;
+ struct gpio_event_matrix_info *mi;
+
+ mi = container_of(info, struct gpio_event_matrix_info, info);
+ if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+ /* TODO: disable scanning */
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ if (mi->keymap == NULL ||
+ mi->input_gpios == NULL ||
+ mi->output_gpios == NULL) {
+ err = -ENODEV;
+ pr_err("gpiomatrix: Incomplete pdata\n");
+ goto err_invalid_platform_data;
+ }
+ key_count = mi->ninputs * mi->noutputs;
+
+ *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+ BITS_TO_LONGS(key_count), GFP_KERNEL);
+ if (kp == NULL) {
+ err = -ENOMEM;
+ pr_err("gpiomatrix: Failed to allocate private data\n");
+ goto err_kp_alloc_failed;
+ }
+ kp->input_devs = input_devs;
+ kp->keypad_info = mi;
+ for (i = 0; i < key_count; i++) {
+ unsigned short keyentry = mi->keymap[i];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+ if (dev >= input_devs->count) {
+ pr_err("gpiomatrix: bad device index %d >= "
+ "%d for key code %d\n",
+ dev, input_devs->count, keycode);
+ err = -EINVAL;
+ goto err_bad_keymap;
+ }
+ if (keycode && keycode <= KEY_MAX)
+ input_set_capability(input_devs->dev[dev],
+ EV_KEY, keycode);
+ }
+
+ for (i = 0; i < mi->noutputs; i++) {
+ err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+ if (err) {
+ pr_err("gpiomatrix: gpio_request failed for "
+ "output %d\n", mi->output_gpios[i]);
+ goto err_request_output_gpio_failed;
+ }
+ if (gpio_cansleep(mi->output_gpios[i])) {
+ pr_err("gpiomatrix: unsupported output gpio %d,"
+ " can sleep\n", mi->output_gpios[i]);
+ err = -EINVAL;
+ goto err_output_gpio_configure_failed;
+ }
+ if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+ err = gpio_direction_output(mi->output_gpios[i],
+ !(mi->flags & GPIOKPF_ACTIVE_HIGH));
+ else
+ err = gpio_direction_input(mi->output_gpios[i]);
+ if (err) {
+ pr_err("gpiomatrix: gpio_configure failed for "
+ "output %d\n", mi->output_gpios[i]);
+ goto err_output_gpio_configure_failed;
+ }
+ }
+ for (i = 0; i < mi->ninputs; i++) {
+ err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+ if (err) {
+ pr_err("gpiomatrix: gpio_request failed for "
+ "input %d\n", mi->input_gpios[i]);
+ goto err_request_input_gpio_failed;
+ }
+ err = gpio_direction_input(mi->input_gpios[i]);
+ if (err) {
+ pr_err("gpiomatrix: gpio_direction_input failed"
+ " for input %d\n", mi->input_gpios[i]);
+ goto err_gpio_direction_input_failed;
+ }
+ }
+ kp->current_output = mi->noutputs;
+ kp->key_state_changed = 1;
+
+ hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kp->timer.function = gpio_keypad_timer_func;
+ wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+ err = gpio_keypad_request_irqs(kp);
+ kp->use_irq = err == 0;
+
+ pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+ "%s%s in %s mode\n", input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "",
+ kp->use_irq ? "interrupt" : "polling");
+
+ if (kp->use_irq)
+ wake_lock(&kp->wake_lock);
+ hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+ return 0;
+ }
+
+ err = 0;
+ kp = *data;
+
+ if (kp->use_irq)
+ for (i = mi->noutputs - 1; i >= 0; i--)
+ free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+ hrtimer_cancel(&kp->timer);
+ wake_lock_destroy(&kp->wake_lock);
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+ gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+ ;
+ }
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+ gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+ ;
+ }
+err_bad_keymap:
+ kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+ return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644
index 000000000000..2aac2fad0a17
--- /dev/null
+++ b/drivers/input/misc/gpio_output.c
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+ struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+ void **data, unsigned int dev, unsigned int type,
+ unsigned int code, int value)
+{
+ int i;
+ struct gpio_event_output_info *oi;
+ oi = container_of(info, struct gpio_event_output_info, info);
+ if (type != oi->type)
+ return 0;
+ if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+ value = !value;
+ for (i = 0; i < oi->keymap_size; i++)
+ if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+ gpio_set_value(oi->keymap[i].gpio, value);
+ return 0;
+}
+
+int gpio_event_output_func(
+ struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+ void **data, int func)
+{
+ int ret;
+ int i;
+ struct gpio_event_output_info *oi;
+ oi = container_of(info, struct gpio_event_output_info, info);
+
+ if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+ return 0;
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+ for (i = 0; i < oi->keymap_size; i++) {
+ int dev = oi->keymap[i].dev;
+ if (dev >= input_devs->count) {
+ pr_err("gpio_event_output_func: bad device "
+ "index %d >= %d for key code %d\n",
+ dev, input_devs->count,
+ oi->keymap[i].code);
+ ret = -EINVAL;
+ goto err_bad_keymap;
+ }
+ input_set_capability(input_devs->dev[dev], oi->type,
+ oi->keymap[i].code);
+ }
+
+ for (i = 0; i < oi->keymap_size; i++) {
+ ret = gpio_request(oi->keymap[i].gpio,
+ "gpio_event_output");
+ if (ret) {
+ pr_err("gpio_event_output_func: gpio_request "
+ "failed for %d\n", oi->keymap[i].gpio);
+ goto err_gpio_request_failed;
+ }
+ ret = gpio_direction_output(oi->keymap[i].gpio,
+ output_level);
+ if (ret) {
+ pr_err("gpio_event_output_func: "
+ "gpio_direction_output failed for %d\n",
+ oi->keymap[i].gpio);
+ goto err_gpio_direction_output_failed;
+ }
+ }
+ return 0;
+ }
+
+ ret = 0;
+ for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+ gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+ ;
+ }
+err_bad_keymap:
+ return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644
index 000000000000..3ffab6da411b
--- /dev/null
+++ b/drivers/input/misc/keychord.c
@@ -0,0 +1,387 @@
+/*
+ * drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME "keychord"
+#define BUFFER_SIZE 16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+ ((char *)kc + sizeof(struct input_keychord) + \
+ kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+ struct input_handler input_handler;
+ int registered;
+
+ /* list of keychords to monitor */
+ struct input_keychord *keychords;
+ int keychord_count;
+
+ /* bitmask of keys contained in our keychords */
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ /* current state of the keys */
+ unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+ /* number of keys that are currently pressed */
+ int key_down;
+
+ /* second input_device_id is needed for null termination */
+ struct input_device_id device_ids[2];
+
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ unsigned char head;
+ unsigned char tail;
+ __u16 buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+ struct input_keychord *keychord)
+{
+ int i;
+
+ if (keychord->count != kdev->key_down)
+ return 0;
+
+ for (i = 0; i < keychord->count; i++) {
+ if (!test_bit(keychord->keycodes[i], kdev->keystate))
+ return 0;
+ }
+
+ /* we have a match */
+ return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ struct keychord_device *kdev = handle->private;
+ struct input_keychord *keychord;
+ unsigned long flags;
+ int i, got_chord = 0;
+
+ if (type != EV_KEY || code >= KEY_MAX)
+ return;
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* do nothing if key state did not change */
+ if (!test_bit(code, kdev->keystate) == !value)
+ goto done;
+ __change_bit(code, kdev->keystate);
+ if (value)
+ kdev->key_down++;
+ else
+ kdev->key_down--;
+
+ /* don't notify on key up */
+ if (!value)
+ goto done;
+ /* ignore this event if it is not one of the keys we are monitoring */
+ if (!test_bit(code, kdev->keybit))
+ goto done;
+
+ keychord = kdev->keychords;
+ if (!keychord)
+ goto done;
+
+ /* check to see if the keyboard state matches any keychords */
+ for (i = 0; i < kdev->keychord_count; i++) {
+ if (check_keychord(kdev, keychord)) {
+ kdev->buff[kdev->head] = keychord->id;
+ kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+ got_chord = 1;
+ break;
+ }
+ /* skip to next keychord */
+ keychord = NEXT_KEYCHORD(keychord);
+ }
+
+done:
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ if (got_chord)
+ wake_up_interruptible(&kdev->waitq);
+}
+
+static int keychord_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int i, ret;
+ struct input_handle *handle;
+ struct keychord_device *kdev =
+ container_of(handler, struct keychord_device, input_handler);
+
+ /*
+ * ignore this input device if it does not contain any keycodes
+ * that we are monitoring
+ */
+ for (i = 0; i < KEY_MAX; i++) {
+ if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+ break;
+ }
+ if (i == KEY_MAX)
+ return -ENODEV;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = KEYCHORD_NAME;
+ handle->private = kdev;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_input_register_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_input_open_device;
+
+ pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+ return 0;
+
+err_input_open_device:
+ input_unregister_handle(handle);
+err_input_register_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct keychord_device *kdev = file->private_data;
+ __u16 id;
+ int retval;
+ unsigned long flags;
+
+ if (count < sizeof(id))
+ return -EINVAL;
+ count = sizeof(id);
+
+ if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ retval = wait_event_interruptible(kdev->waitq,
+ kdev->head != kdev->tail);
+ if (retval)
+ return retval;
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* pop a keychord ID off the queue */
+ id = kdev->buff[kdev->tail];
+ kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ if (copy_to_user(buffer, &id, count))
+ return -EFAULT;
+
+ return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct keychord_device *kdev = file->private_data;
+ struct input_keychord *keychords = 0;
+ struct input_keychord *keychord, *next, *end;
+ int ret, i, key;
+ unsigned long flags;
+
+ if (count < sizeof(struct input_keychord))
+ return -EINVAL;
+ keychords = kzalloc(count, GFP_KERNEL);
+ if (!keychords)
+ return -ENOMEM;
+
+ /* read list of keychords from userspace */
+ if (copy_from_user(keychords, buffer, count)) {
+ kfree(keychords);
+ return -EFAULT;
+ }
+
+ /* unregister handler before changing configuration */
+ if (kdev->registered) {
+ input_unregister_handler(&kdev->input_handler);
+ kdev->registered = 0;
+ }
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* clear any existing configuration */
+ kfree(kdev->keychords);
+ kdev->keychords = 0;
+ kdev->keychord_count = 0;
+ kdev->key_down = 0;
+ memset(kdev->keybit, 0, sizeof(kdev->keybit));
+ memset(kdev->keystate, 0, sizeof(kdev->keystate));
+ kdev->head = kdev->tail = 0;
+
+ keychord = keychords;
+ end = (struct input_keychord *)((char *)keychord + count);
+
+ while (keychord < end) {
+ next = NEXT_KEYCHORD(keychord);
+ if (keychord->count <= 0 || next > end) {
+ pr_err("keychord: invalid keycode count %d\n",
+ keychord->count);
+ goto err_unlock_return;
+ }
+ if (keychord->version != KEYCHORD_VERSION) {
+ pr_err("keychord: unsupported version %d\n",
+ keychord->version);
+ goto err_unlock_return;
+ }
+
+ /* keep track of the keys we are monitoring in keybit */
+ for (i = 0; i < keychord->count; i++) {
+ key = keychord->keycodes[i];
+ if (key < 0 || key >= KEY_CNT) {
+ pr_err("keychord: keycode %d out of range\n",
+ key);
+ goto err_unlock_return;
+ }
+ __set_bit(key, kdev->keybit);
+ }
+
+ kdev->keychord_count++;
+ keychord = next;
+ }
+
+ kdev->keychords = keychords;
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ ret = input_register_handler(&kdev->input_handler);
+ if (ret) {
+ kfree(keychords);
+ kdev->keychords = 0;
+ return ret;
+ }
+ kdev->registered = 1;
+
+ return count;
+
+err_unlock_return:
+ spin_unlock_irqrestore(&kdev->lock, flags);
+ kfree(keychords);
+ return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+ struct keychord_device *kdev = file->private_data;
+
+ poll_wait(file, &kdev->waitq, wait);
+
+ if (kdev->head != kdev->tail)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+ struct keychord_device *kdev;
+
+ kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+
+ spin_lock_init(&kdev->lock);
+ init_waitqueue_head(&kdev->waitq);
+
+ kdev->input_handler.event = keychord_event;
+ kdev->input_handler.connect = keychord_connect;
+ kdev->input_handler.disconnect = keychord_disconnect;
+ kdev->input_handler.name = KEYCHORD_NAME;
+ kdev->input_handler.id_table = kdev->device_ids;
+
+ kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+ __set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+ file->private_data = kdev;
+
+ return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+ struct keychord_device *kdev = file->private_data;
+
+ if (kdev->registered)
+ input_unregister_handler(&kdev->input_handler);
+ kfree(kdev);
+
+ return 0;
+}
+
+static const struct file_operations keychord_fops = {
+ .owner = THIS_MODULE,
+ .open = keychord_open,
+ .release = keychord_release,
+ .read = keychord_read,
+ .write = keychord_write,
+ .poll = keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+ .fops = &keychord_fops,
+ .name = KEYCHORD_NAME,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+ return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+ misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index cabd9e54863f..b438838b46a7 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -374,6 +374,19 @@ config TOUCHSCREEN_MIGOR
To compile this driver as a module, choose M here: the
module will be called migor_ts.
+config TOUCHSCREEN_PANJIT_I2C
+ tristate "PANJIT I2C touchscreen driver"
+ depends on I2C
+ default n
+ help
+ Say Y here to enable PANJIT I2C capacitive touchscreen support,
+ covering devices such as the MGG1010AI06 and EGG1010AI06
+
+ If unsure, say N
+
+ To compile this driver as a module, choose M here: the module will
+ be called panjit_i2c.
+
config TOUCHSCREEN_TNETV107X
tristate "TI TNETV107X touchscreen support"
depends on ARCH_DAVINCI_TNETV107X
@@ -383,6 +396,12 @@ config TOUCHSCREEN_TNETV107X
To compile this driver as a module, choose M here: the
module will be called tnetv107x-ts.
+config TOUCHSCREEN_SYNAPTICS_I2C_RMI
+ tristate "Synaptics i2c touchscreen"
+ depends on I2C
+ help
+ This enables support for Synaptics RMI over I2C based touchscreens.
+
config TOUCHSCREEN_TOUCHRIGHT
tristate "Touchright serial touchscreen"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 282d6f76ae26..023d75578e23 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -37,12 +37,14 @@ obj-$(CONFIG_TOUCHSCREEN_HP600) += hp680_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_HP7XX) += jornada720_ts.o
obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
+obj-$(CONFIG_TOUCHSCREEN_PANJIT_I2C) += panjit_i2c.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI) += synaptics_i2c_rmi.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index f5d66859f232..ca8605b70a30 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2,6 +2,8 @@
* Atmel maXTouch Touchscreen driver
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Copyright (C) 2011 Atmel Corporation
+ * Copyright (C) 2011 NVIDIA Corporation
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -17,9 +19,17 @@
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/i2c/atmel_mxt_ts.h>
-#include <linux/input/mt.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif
+
+/* Family ID */
+#define MXT224_ID 0x80
+#define MXT768E_ID 0xA1
+#define MXT1386_ID 0xA0
/* Version */
#define MXT_VER_20 20
@@ -172,14 +182,31 @@
#define MXT_VOLTAGE_DEFAULT 2700000
#define MXT_VOLTAGE_STEP 10000
+/* Defines for MXT_TOUCH_CTRL */
+#define MXT_TOUCH_DISABLE 0
+#define MXT_TOUCH_ENABLE 0x83
+
/* Define for MXT_GEN_COMMAND_T6 */
#define MXT_BOOT_VALUE 0xa5
#define MXT_BACKUP_VALUE 0x55
-#define MXT_BACKUP_TIME 25 /* msec */
-#define MXT_RESET_TIME 65 /* msec */
+#define MXT_BACKUP_TIME 200 /* msec */
+#define MXT224_RESET_TIME 65 /* msec */
+#define MXT768E_RESET_TIME 250 /* msec */
+#define MXT1386_RESET_TIME 200 /* msec */
+#define MXT_RESET_TIME 200 /* msec */
+#define MXT_RESET_NOCHGREAD 400 /* msec */
+
+#define MXT_WAKEUP_TIME 25 /* msec */
#define MXT_FWRESET_TIME 175 /* msec */
+/* Defines for MXT_SLOWSCAN_EXTENSIONS */
+#define SLOSCAN_DISABLE 0 /* Disable slow scan */
+#define SLOSCAN_ENABLE 1 /* Enable slow scan */
+#define SLOSCAN_SET_ACTVACQINT 2 /* Set ACTV scan rate */
+#define SLOSCAN_SET_IDLEACQINT 3 /* Set IDLE scan rate */
+#define SLOSCAN_SET_ACTV2IDLETO 4 /* Set the ACTIVE to IDLE TimeOut */
+
/* Command to unlock bootloader */
#define MXT_UNLOCK_CMD_MSB 0xaa
#define MXT_UNLOCK_CMD_LSB 0xdc
@@ -210,8 +237,15 @@
/* Touchscreen absolute values */
#define MXT_MAX_AREA 0xff
+/* Fixed Report ID values */
+#define MXT_RPTID_NOMSG 0xFF /* No messages available to read */
+
#define MXT_MAX_FINGER 10
+#define RESUME_READS 100
+
+#define MXT_DEFAULT_PRESSURE 100
+
struct mxt_info {
u8 family_id;
u8 variant_id;
@@ -225,11 +259,12 @@ struct mxt_info {
struct mxt_object {
u8 type;
u16 start_address;
- u8 size;
- u8 instances;
+ u16 size;
+ u16 instances;
u8 num_report_ids;
/* to map object and message */
+ u8 min_reportid;
u8 max_reportid;
};
@@ -258,8 +293,34 @@ struct mxt_data {
unsigned int irq;
unsigned int max_x;
unsigned int max_y;
+ u8(*read_chg) (void);
+ u16 msg_address;
+ u16 last_address;
+ u8 actv_cycle_time;
+ u8 idle_cycle_time;
+ u8 actv2idle_timeout;
+ u8 is_stopped;
+ struct mutex access_mutex;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ struct early_suspend early_suspend;
+#endif
+ unsigned int driver_paused;
+ struct bin_attribute mem_access_attr;
+ int debug_enabled;
+ int slowscan_enabled;
+ u8 slowscan_actv_cycle_time;
+ u8 slowscan_idle_cycle_time;
+ u8 slowscan_actv2idle_timeout;
+ u8 slowscan_shad_actv_cycle_time;
+ u8 slowscan_shad_idle_cycle_time;
+ u8 slowscan_shad_actv2idle_timeout;
};
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static void mxt_early_suspend(struct early_suspend *es);
+static void mxt_early_resume(struct early_suspend *es);
+#endif
+
static bool mxt_object_readable(unsigned int type)
{
switch (type) {
@@ -400,30 +461,46 @@ static int mxt_fw_write(struct i2c_client *client,
static int __mxt_read_reg(struct i2c_client *client,
u16 reg, u16 len, void *val)
{
- struct i2c_msg xfer[2];
u8 buf[2];
+ int retval = 0;
+ struct mxt_data *data = i2c_get_clientdata(client);
buf[0] = reg & 0xff;
buf[1] = (reg >> 8) & 0xff;
- /* Write register */
- xfer[0].addr = client->addr;
- xfer[0].flags = 0;
- xfer[0].len = 2;
- xfer[0].buf = buf;
+ mutex_lock(&data->access_mutex);
- /* Read data */
- xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = len;
- xfer[1].buf = val;
+ if ((data->last_address != reg) || (reg != data->msg_address)) {
+ if (i2c_master_send(client, (u8 *)buf, 2) != 2) {
+ dev_dbg(&client->dev, "i2c retry\n");
+ msleep(MXT_WAKEUP_TIME);
- if (i2c_transfer(client->adapter, xfer, 2) != 2) {
- dev_err(&client->dev, "%s: i2c transfer failed\n", __func__);
- return -EIO;
+ if (i2c_master_send(client, (u8 *)buf, 2) != 2) {
+ dev_err(&client->dev, "%s: i2c send failed\n",
+ __func__);
+ retval = -EIO;
+ goto mxt_read_exit;
+ }
+ }
}
- return 0;
+ if (i2c_master_recv(client, (u8 *)val, len) != len) {
+ dev_dbg(&client->dev, "i2c retry\n");
+ msleep(MXT_WAKEUP_TIME);
+
+ if (i2c_master_recv(client, (u8 *)val, len) != len) {
+ dev_err(&client->dev, "%s: i2c recv failed\n",
+ __func__);
+ retval = -EIO;
+ goto mxt_read_exit;
+ }
+ }
+
+ data->last_address = reg;
+
+mxt_read_exit:
+ mutex_unlock(&data->access_mutex);
+ return retval;
}
static int mxt_read_reg(struct i2c_client *client, u16 reg, u8 *val)
@@ -434,17 +511,29 @@ static int mxt_read_reg(struct i2c_client *client, u16 reg, u8 *val)
static int mxt_write_reg(struct i2c_client *client, u16 reg, u8 val)
{
u8 buf[3];
+ int retval = 0;
+ struct mxt_data *data = i2c_get_clientdata(client);
buf[0] = reg & 0xff;
buf[1] = (reg >> 8) & 0xff;
buf[2] = val;
+ mutex_lock(&data->access_mutex);
if (i2c_master_send(client, buf, 3) != 3) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
+ dev_dbg(&client->dev, "i2c retry\n");
+ msleep(MXT_WAKEUP_TIME);
+
+ if (i2c_master_send(client, buf, 3) != 3) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ retval = -EIO;
+ goto mxt_write_exit;
+ }
}
+ data->last_address = reg + 1;
- return 0;
+mxt_write_exit:
+ mutex_unlock(&data->access_mutex);
+ return retval;
}
static int mxt_read_object_table(struct i2c_client *client,
@@ -466,7 +555,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
return object;
}
- dev_err(&data->client->dev, "Invalid object type\n");
+ dev_err(&data->client->dev, "Invalid object type T%d\n", type);
return NULL;
}
@@ -525,23 +614,21 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
if (!finger[id].status)
continue;
- input_mt_slot(input_dev, id);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
- finger[id].status != MXT_RELEASE);
-
- if (finger[id].status != MXT_RELEASE) {
- finger_num++;
- input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
- finger[id].area);
- input_report_abs(input_dev, ABS_MT_POSITION_X,
- finger[id].x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y,
- finger[id].y);
- input_report_abs(input_dev, ABS_MT_PRESSURE,
- finger[id].pressure);
- } else {
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
+ finger[id].status != MXT_RELEASE ?
+ finger[id].area : 0);
+ input_report_abs(input_dev, ABS_MT_POSITION_X,
+ finger[id].x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y,
+ finger[id].y);
+ input_report_abs(input_dev, ABS_MT_PRESSURE,
+ finger[id].pressure);
+ input_mt_sync(input_dev);
+
+ if (finger[id].status == MXT_RELEASE)
finger[id].status = 0;
- }
+ else
+ finger_num++;
}
input_report_key(input_dev, BTN_TOUCH, finger_num > 0);
@@ -549,8 +636,7 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
if (status != MXT_RELEASE) {
input_report_abs(input_dev, ABS_X, finger[single_id].x);
input_report_abs(input_dev, ABS_Y, finger[single_id].y);
- input_report_abs(input_dev,
- ABS_PRESSURE, finger[single_id].pressure);
+ input_report_abs(input_dev, ABS_PRESSURE, finger[single_id].pressure);
}
input_sync(input_dev);
@@ -573,6 +659,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
dev_dbg(dev, "[%d] released\n", id);
finger[id].status = MXT_RELEASE;
+ finger[id].pressure = 0;
mxt_input_report(data, id);
}
return;
@@ -592,6 +679,9 @@ static void mxt_input_touchevent(struct mxt_data *data,
area = message->message[4];
pressure = message->message[5];
+ if ((pressure <= 0) || (pressure > 255))
+ pressure = MXT_DEFAULT_PRESSURE;
+
dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
status & MXT_MOVE ? "moved" : "pressed",
x, y, area);
@@ -610,12 +700,10 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
{
struct mxt_data *data = dev_id;
struct mxt_message message;
- struct mxt_object *object;
+ struct mxt_object *touch_object;
struct device *dev = &data->client->dev;
- int id;
+ int touchid;
u8 reportid;
- u8 max_reportid;
- u8 min_reportid;
do {
if (mxt_read_message(data, &message)) {
@@ -625,129 +713,275 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
reportid = message.reportid;
- /* whether reportid is thing of MXT_TOUCH_MULTI_T9 */
- object = mxt_get_object(data, MXT_TOUCH_MULTI_T9);
- if (!object)
+ touch_object = mxt_get_object(data, MXT_TOUCH_MULTI_T9);
+ if (!touch_object)
goto end;
- max_reportid = object->max_reportid;
- min_reportid = max_reportid - object->num_report_ids + 1;
- id = reportid - min_reportid;
+ if (data->debug_enabled)
+ print_hex_dump(KERN_DEBUG, "MXT MSG:", DUMP_PREFIX_NONE,
+ 16, 1, &message, sizeof(struct mxt_message), false);
- if (reportid >= min_reportid && reportid <= max_reportid)
- mxt_input_touchevent(data, &message, id);
- else
+ if (reportid >= touch_object->min_reportid
+ && reportid <= touch_object->max_reportid) {
+ touchid = reportid - touch_object->min_reportid;
+ mxt_input_touchevent(data, &message, touchid);
+ } else if (reportid != MXT_RPTID_NOMSG)
mxt_dump_message(dev, &message);
- } while (reportid != 0xff);
+ } while (reportid != MXT_RPTID_NOMSG);
end:
return IRQ_HANDLED;
}
+static int mxt_make_highchg(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct mxt_message message;
+ int count = 30;
+ int error;
+
+ /* Read dummy message to make high CHG pin */
+ do {
+ error = mxt_read_message(data, &message);
+ if (error)
+ return error;
+ } while (message.reportid != MXT_RPTID_NOMSG && --count);
+
+ if (!count) {
+ dev_err(dev, "CHG pin isn't cleared\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static int mxt_check_reg_init(struct mxt_data *data)
{
+ struct i2c_client *client = data->client;
const struct mxt_platform_data *pdata = data->pdata;
struct mxt_object *object;
+ struct mxt_message message;
struct device *dev = &data->client->dev;
int index = 0;
+ int timeout_counter = 0;
int i, j, config_offset;
+ int error;
+ unsigned long current_crc;
+ u8 command_register;
if (!pdata->config) {
dev_dbg(dev, "No cfg data defined, skipping reg init\n");
return 0;
}
+ /* Try to read the config checksum of the existing cfg */
+ mxt_write_object(data, MXT_GEN_COMMAND_T6,
+ MXT_COMMAND_REPORTALL, 1);
+ msleep(30);
+
+ error = mxt_read_message(data, &message);
+ if (error)
+ return error;
+
+ object = mxt_get_object(data, MXT_GEN_COMMAND_T6);
+ if (!object)
+ return -EIO;
+
+ /* Check if this message is from command processor (which has
+ only one reporting ID), if so, bytes 1-3 are the checksum. */
+ if (message.reportid == object->max_reportid) {
+ current_crc = message.message[1] | (message.message[2] << 8) |
+ (message.message[3] << 16);
+ } else {
+ dev_info(dev, "Couldn't retrieve the current cfg checksum, "
+ "forcing load\n");
+ current_crc = 0xFFFFFFFF;
+ }
+ dev_info(dev,
+ "Config CRC read from the mXT: %X\n",
+ (unsigned int) current_crc);
+
+ if (current_crc == pdata->config_crc) {
+ dev_info(dev,
+ "Matching CRC's, skipping CFG load.\n");
+ return 0;
+ } else {
+ dev_info(dev, "Doesn't match platform data config CRC (%X), "
+ "writing config from platform data...\n",
+ (unsigned int) pdata->config_crc);
+ }
+
for (i = 0; i < data->info.object_num; i++) {
object = data->object_table + i;
if (!mxt_object_writable(object->type))
continue;
-
+ dev_info(dev, "Writing object type %d, config offset %d", data->object_table[i].type, index);
for (j = 0;
- j < (object->size + 1) * (object->instances + 1);
+ j < object->size * object->instances;
j++) {
config_offset = index + j;
if (config_offset > pdata->config_length) {
dev_err(dev, "Not enough config data!\n");
+ dev_err(dev, "config base is %d, offset is %d\n", index, config_offset);
return -EINVAL;
}
mxt_write_object(data, object->type, j,
pdata->config[config_offset]);
}
- index += (object->size + 1) * (object->instances + 1);
+ index += object->size * object->instances;
+ }
+ dev_info(dev, "Config written!");
+
+ error = mxt_make_highchg(data);
+ if (error)
+ return error;
+
+ /* Backup to memory */
+ mxt_write_object(data, MXT_GEN_COMMAND_T6,
+ MXT_COMMAND_BACKUPNV,
+ MXT_BACKUP_VALUE);
+ msleep(MXT_BACKUP_TIME);
+ do {
+ error = mxt_read_object(data, MXT_GEN_COMMAND_T6,
+ MXT_COMMAND_BACKUPNV,
+ &command_register);
+ if (error)
+ return error;
+ msleep(10);
+ } while ((command_register != 0) && (timeout_counter++ <= 100));
+ if (timeout_counter > 100) {
+ dev_err(&client->dev, "No response after backup!\n");
+ return -EIO;
+ }
+
+ /* Clear the interrupt line */
+ error = mxt_make_highchg(data);
+ if (error)
+ return error;
+
+ /* Soft reset */
+ mxt_write_object(data, MXT_GEN_COMMAND_T6,
+ MXT_COMMAND_RESET, 1);
+ if (data->pdata->read_chg == NULL) {
+ msleep(MXT_RESET_NOCHGREAD);
+ } else {
+ switch (data->info.family_id) {
+ case MXT224_ID:
+ msleep(MXT224_RESET_TIME);
+ break;
+ case MXT768E_ID:
+ msleep(MXT768E_RESET_TIME);
+ break;
+ case MXT1386_ID:
+ msleep(MXT1386_RESET_TIME);
+ break;
+ default:
+ msleep(MXT_RESET_TIME);
+ }
+ timeout_counter = 0;
+ while ((timeout_counter++ <= 100) && data->pdata->read_chg())
+ msleep(10);
+ if (timeout_counter > 100) {
+ dev_err(&client->dev, "No response after reset!\n");
+ return -EIO;
+ }
}
return 0;
}
-static int mxt_make_highchg(struct mxt_data *data)
+
+static void mxt_handle_pdata(struct mxt_data *data)
+{
+ const struct mxt_platform_data *pdata = data->pdata;
+
+ if (pdata->read_chg != NULL)
+ data->read_chg = pdata->read_chg;
+}
+
+static int mxt_set_power_cfg(struct mxt_data *data, u8 sleep)
{
struct device *dev = &data->client->dev;
- struct mxt_message message;
- int count = 10;
int error;
+ u8 actv_cycle_time = 0;
+ u8 idle_cycle_time = 0;
+ u8 actv2idle_timeout = data->actv2idle_timeout;
- /* Read dummy message to make high CHG pin */
- do {
- error = mxt_read_message(data, &message);
- if (error)
- return error;
- } while (message.reportid != 0xff && --count);
-
- if (!count) {
- dev_err(dev, "CHG pin isn't cleared\n");
- return -EBUSY;
+ if (!sleep) {
+ actv_cycle_time = data->actv_cycle_time;
+ idle_cycle_time = data->idle_cycle_time;
}
+ error = mxt_write_object(data, MXT_GEN_POWER_T7, MXT_POWER_ACTVACQINT,
+ actv_cycle_time);
+ if (error)
+ goto i2c_error;
+
+ error = mxt_write_object(data, MXT_GEN_POWER_T7, MXT_POWER_IDLEACQINT,
+ idle_cycle_time);
+ if (error)
+ goto i2c_error;
+
+ error = mxt_write_object(data, MXT_GEN_POWER_T7, MXT_POWER_ACTV2IDLETO,
+ actv2idle_timeout);
+ if (error)
+ goto i2c_error;
+
+ dev_dbg(dev, "%s: Set ACTV %d, IDLE %d", __func__,
+ actv_cycle_time, idle_cycle_time);
+
return 0;
+
+i2c_error:
+ dev_err(dev, "Failed to set power cfg");
+ return error;
}
-static void mxt_handle_pdata(struct mxt_data *data)
+static int mxt_init_power_cfg(struct mxt_data *data)
{
const struct mxt_platform_data *pdata = data->pdata;
- u8 voltage;
-
- /* Set touchscreen lines */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_XSIZE,
- pdata->x_line);
- mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_YSIZE,
- pdata->y_line);
-
- /* Set touchscreen orient */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_ORIENT,
- pdata->orient);
-
- /* Set touchscreen burst length */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_BLEN, pdata->blen);
-
- /* Set touchscreen threshold */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_TCHTHR, pdata->threshold);
-
- /* Set touchscreen resolution */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
-
- /* Set touchscreen voltage */
- if (pdata->voltage) {
- if (pdata->voltage < MXT_VOLTAGE_DEFAULT) {
- voltage = (MXT_VOLTAGE_DEFAULT - pdata->voltage) /
- MXT_VOLTAGE_STEP;
- voltage = 0xff - voltage + 1;
- } else
- voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) /
- MXT_VOLTAGE_STEP;
-
- mxt_write_object(data, MXT_SPT_CTECONFIG_T28,
- MXT_CTE_VOLTAGE, voltage);
+ struct device *dev = &data->client->dev;
+ int error;
+
+ data->slowscan_actv_cycle_time = 120; /* 120mS */
+ data->slowscan_idle_cycle_time = 10; /* 10mS */
+ data->slowscan_actv2idle_timeout = 100; /* 10 seconds */
+ if (pdata->actv_cycle_time > 0 && pdata->idle_cycle_time > 0) {
+ data->actv_cycle_time = pdata->actv_cycle_time;
+ data->idle_cycle_time = pdata->idle_cycle_time;
+ } else {
+ error = mxt_read_object(data, MXT_GEN_POWER_T7,
+ MXT_POWER_ACTVACQINT,
+ &data->actv_cycle_time);
+
+ if (error)
+ return error;
+
+ error = mxt_read_object(data, MXT_GEN_POWER_T7,
+ MXT_POWER_IDLEACQINT,
+ &data->idle_cycle_time);
+
+ if (error)
+ return error;
}
+
+ error = mxt_read_object(data, MXT_GEN_POWER_T7,
+ MXT_POWER_ACTV2IDLETO,
+ &data->actv2idle_timeout);
+
+ if (error)
+ return error;
+
+ /* On init, power up */
+ error = mxt_set_power_cfg(data, 0);
+ if (error)
+ return error;
+
+ dev_info(dev, "Initialised power cfg: ACTV %d, IDLE %d",
+ data->actv_cycle_time, data->idle_cycle_time);
+
+ return 0;
}
static int mxt_get_info(struct mxt_data *data)
@@ -757,6 +991,9 @@ static int mxt_get_info(struct mxt_data *data)
int error;
u8 val;
+ /* force send of address pointer on first read during probe */
+ data->last_address = -1;
+
error = mxt_read_reg(client, MXT_FAMILY_ID, &val);
if (error)
return error;
@@ -787,6 +1024,7 @@ static int mxt_get_info(struct mxt_data *data)
static int mxt_get_object_table(struct mxt_data *data)
{
+ struct device *dev = &data->client->dev;
int error;
int i;
u16 reg;
@@ -803,15 +1041,27 @@ static int mxt_get_object_table(struct mxt_data *data)
object->type = buf[0];
object->start_address = (buf[2] << 8) | buf[1];
- object->size = buf[3];
- object->instances = buf[4];
+ object->size = buf[3] + 1;
+ object->instances = buf[4] + 1;
object->num_report_ids = buf[5];
if (object->num_report_ids) {
- reportid += object->num_report_ids *
- (object->instances + 1);
+ reportid += object->num_report_ids * object->instances;
object->max_reportid = reportid;
+ object->min_reportid = object->max_reportid -
+ object->instances * object->num_report_ids + 1;
}
+
+ /* Store message window address so we don't have to
+ search the object table every time we read message */
+ if (object->type == MXT_GEN_MESSAGE_T5)
+ data->msg_address = object->start_address;
+
+ dev_dbg(dev, "T%d, start:%d size:%d instances:%d "
+ "min_reportid:%d max_reportid:%d\n",
+ object->type, object->start_address, object->size,
+ object->instances,
+ object->min_reportid, object->max_reportid);
}
return 0;
@@ -822,7 +1072,6 @@ static int mxt_initialize(struct mxt_data *data)
struct i2c_client *client = data->client;
struct mxt_info *info = &data->info;
int error;
- u8 val;
error = mxt_get_info(data);
if (error)
@@ -838,37 +1087,25 @@ static int mxt_initialize(struct mxt_data *data)
/* Get object table information */
error = mxt_get_object_table(data);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Failed to read object table\n");
return error;
+ }
- /* Check register init values */
+ /* Load initial touch chip configuration */
error = mxt_check_reg_init(data);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Failed to initialize configuration\n");
return error;
+ }
mxt_handle_pdata(data);
- /* Backup to memory */
- mxt_write_object(data, MXT_GEN_COMMAND_T6,
- MXT_COMMAND_BACKUPNV,
- MXT_BACKUP_VALUE);
- msleep(MXT_BACKUP_TIME);
-
- /* Soft reset */
- mxt_write_object(data, MXT_GEN_COMMAND_T6,
- MXT_COMMAND_RESET, 1);
- msleep(MXT_RESET_TIME);
-
- /* Update matrix size at info struct */
- error = mxt_read_reg(client, MXT_MATRIX_X_SIZE, &val);
- if (error)
- return error;
- info->matrix_xsize = val;
-
- error = mxt_read_reg(client, MXT_MATRIX_Y_SIZE, &val);
- if (error)
+ error = mxt_init_power_cfg(data);
+ if (error) {
+ dev_err(&client->dev, "Failed to initialize power cfg\n");
return error;
- info->matrix_ysize = val;
+ }
dev_info(&client->dev,
"Family ID: %d Variant ID: %d Version: %d Build: %d\n",
@@ -910,26 +1147,35 @@ static ssize_t mxt_object_show(struct device *dev,
for (i = 0; i < data->info.object_num; i++) {
object = data->object_table + i;
- count += sprintf(buf + count,
- "Object Table Element %d(Type %d)\n",
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "Object[%d] (Type %d)\n",
i + 1, object->type);
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
if (!mxt_object_readable(object->type)) {
- count += sprintf(buf + count, "\n");
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\n");
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
continue;
}
- for (j = 0; j < object->size + 1; j++) {
+ for (j = 0; j < object->size; j++) {
error = mxt_read_object(data,
object->type, j, &val);
if (error)
return error;
- count += sprintf(buf + count,
- " Byte %d: 0x%x (%d)\n", j, val, val);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\t[%2d]: %02x (%d)\n", j, val, val);
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
}
- count += sprintf(buf + count, "\n");
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
}
return count;
@@ -1040,12 +1286,240 @@ static ssize_t mxt_update_fw_store(struct device *dev,
return count;
}
+static ssize_t mxt_pause_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int count = 0;
+
+ count += sprintf(buf + count, "%d", data->driver_paused);
+ count += sprintf(buf + count, "\n");
+
+ return count;
+}
+
+static ssize_t mxt_pause_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int i;
+
+ if (sscanf(buf, "%u", &i) == 1 && i < 2) {
+ data->driver_paused = i;
+
+ dev_dbg(dev, "%s\n", i ? "paused" : "unpaused");
+ } else {
+ dev_dbg(dev, "pause_driver write error\n");
+ }
+ return count;
+}
+
+static ssize_t mxt_debug_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int count = 0;
+
+ count += sprintf(buf + count, "%d", data->debug_enabled);
+ count += sprintf(buf + count, "\n");
+
+ return count;
+}
+
+static ssize_t mxt_debug_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int i;
+
+ if (sscanf(buf, "%u", &i) == 1 && i < 2) {
+ data->debug_enabled = i;
+
+ dev_dbg(dev, "%s\n", i ? "debug enabled" : "debug disabled");
+ } else {
+ dev_dbg(dev, "debug_enabled write error\n");
+ }
+ return count;
+}
+
+static ssize_t mxt_slowscan_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int count = 0;
+ int error;
+ u8 actv_cycle_time;
+ u8 idle_cycle_time;
+ u8 actv2idle_timeout;
+ dev_info(dev, "Calling mxt_slowscan_show()\n");
+
+ error = mxt_read_object(data, MXT_GEN_POWER_T7,
+ MXT_POWER_ACTVACQINT,
+ &actv_cycle_time);
+
+ if (error)
+ return error;
+
+ error = mxt_read_object(data, MXT_GEN_POWER_T7,
+ MXT_POWER_IDLEACQINT,
+ &idle_cycle_time);
+
+ if (error)
+ return error;
+
+ error = mxt_read_object(data, MXT_GEN_POWER_T7,
+ MXT_POWER_ACTV2IDLETO,
+ &actv2idle_timeout);
+
+ if (error)
+ return error;
+
+ count += sprintf(buf + count, "SLOW SCAN (enable/disable) = %s.\n", data->slowscan_enabled ? "enabled" : "disabled");
+ count += sprintf(buf + count, "SLOW SCAN (actv_cycle_time) = %umS.\n", data->slowscan_actv_cycle_time);
+ count += sprintf(buf + count, "SLOW SCAN (idle_cycle_time) = %umS.\n", data->slowscan_idle_cycle_time);
+ count += sprintf(buf + count, "SLOW SCAN (actv2idle_timeout) = %u.%0uS.\n", data->slowscan_actv2idle_timeout / 10, \
+ data->slowscan_actv2idle_timeout % 10);
+ count += sprintf(buf + count, "CURRENT (actv_cycle_time) = %umS.\n", actv_cycle_time);
+ count += sprintf(buf + count, "CURRENT (idle_cycle_time) = %umS.\n", idle_cycle_time);
+ count += sprintf(buf + count, "CURRENT (actv2idle_timeout) = %u.%0uS.\n", actv2idle_timeout / 10, \
+ actv2idle_timeout % 10);
+
+ return count;
+}
+
+static ssize_t mxt_slowscan_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int fn;
+ int val;
+ int ret;
+
+ dev_info(dev, "Calling mxt_slowscan_store()\n");
+ ret = sscanf(buf, "%u %u", &fn, &val);
+ if ((ret == 1) || (ret == 2)) {
+ switch (fn) {
+ case SLOSCAN_DISABLE:
+ if (data->slowscan_enabled) {
+ data->actv_cycle_time = data->slowscan_shad_actv_cycle_time;
+ data->idle_cycle_time = data->slowscan_shad_idle_cycle_time;
+ data->actv2idle_timeout = data->slowscan_shad_actv2idle_timeout;
+ data->slowscan_enabled = 0;
+ mxt_set_power_cfg(data, 0);
+ }
+ break;
+
+ case SLOSCAN_ENABLE:
+ if (!data->slowscan_enabled) {
+ data->slowscan_shad_actv_cycle_time = data->actv_cycle_time;
+ data->slowscan_shad_idle_cycle_time = data->idle_cycle_time;
+ data->slowscan_shad_actv2idle_timeout = data->actv2idle_timeout;
+ data->actv_cycle_time = data->slowscan_actv_cycle_time;
+ data->idle_cycle_time = data->slowscan_idle_cycle_time;
+ data->actv2idle_timeout = data->slowscan_actv2idle_timeout;
+ data->slowscan_enabled = 1;
+ mxt_set_power_cfg(data, 0);
+ }
+ break;
+
+ case SLOSCAN_SET_ACTVACQINT:
+ data->slowscan_actv_cycle_time = val;
+ break;
+
+ case SLOSCAN_SET_IDLEACQINT:
+ data->slowscan_idle_cycle_time = val;
+ break;
+
+ case SLOSCAN_SET_ACTV2IDLETO:
+ data->slowscan_actv2idle_timeout = val;
+ break;
+ }
+ }
+ return count;
+}
+
+static ssize_t mxt_mem_access_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (off >= 32768)
+ return -EIO;
+
+ if (off + count > 32768)
+ count = 32768 - off;
+
+ if (count > 256)
+ count = 256;
+
+ if (count > 0)
+ ret = __mxt_read_reg(data->client, off, count, buf);
+
+ return ret == 0 ? count : ret;
+}
+
+int mxt_write_block(struct i2c_client *client, u16 addr, u16 length, u8 *value)
+{
+ int i;
+ struct {
+ __le16 le_addr;
+ u8 data[256];
+ } i2c_block_transfer;
+
+ if (length > 256)
+ return -EINVAL;
+
+ i2c_get_clientdata(client);
+
+ for (i = 0; i < length; i++)
+ i2c_block_transfer.data[i] = *value++;
+
+ i2c_block_transfer.le_addr = cpu_to_le16(addr);
+
+ i = i2c_master_send(client, (u8 *) &i2c_block_transfer, length + 2);
+
+ if (i == (length + 2))
+ return 0;
+ else
+ return -EIO;
+}
+
+static ssize_t mxt_mem_access_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (off >= 32768)
+ return -EIO;
+
+ if (off + count > 32768)
+ count = 32768 - off;
+
+ if (count > 256)
+ count = 256;
+
+ if (count > 0)
+ ret = mxt_write_block(data->client, off, count, buf);
+
+ return ret == 0 ? count : 0;
+}
+
static DEVICE_ATTR(object, 0444, mxt_object_show, NULL);
static DEVICE_ATTR(update_fw, 0664, NULL, mxt_update_fw_store);
+static DEVICE_ATTR(pause_driver, 0664, mxt_pause_show, mxt_pause_store);
+static DEVICE_ATTR(debug_enable, 0664, mxt_debug_enable_show, mxt_debug_enable_store);
+static DEVICE_ATTR(slowscan_enable, 0664, mxt_slowscan_show, mxt_slowscan_store);
static struct attribute *mxt_attrs[] = {
&dev_attr_object.attr,
&dev_attr_update_fw.attr,
+ &dev_attr_pause_driver.attr,
+ &dev_attr_debug_enable.attr,
+ &dev_attr_slowscan_enable.attr,
NULL
};
@@ -1055,16 +1529,38 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
+ int error;
+ struct device *dev = &data->client->dev;
+
+ dev_info(dev, "mxt_start: is_stopped = %d\n", data->is_stopped);
+ if (data->is_stopped == 0)
+ return;
+
/* Touch enable */
- mxt_write_object(data,
- MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
+ error = mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, MXT_TOUCH_ENABLE);
+
+ if (!error)
+ dev_info(dev, "MXT started\n");
+
+ data->is_stopped = 0;
}
static void mxt_stop(struct mxt_data *data)
{
+ int error;
+ struct device *dev = &data->client->dev;
+
+ dev_info(dev, "mxt_stop: is_stopped = %d\n", data->is_stopped);
+ if (data->is_stopped)
+ return;
+
/* Touch disable */
- mxt_write_object(data,
- MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
+ error = mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, MXT_TOUCH_DISABLE);
+
+ if (!error)
+ dev_info(dev, "MXT suspended\n");
+
+ data->is_stopped = 1;
}
static int mxt_input_open(struct input_dev *dev)
@@ -1102,7 +1598,7 @@ static int __devinit mxt_probe(struct i2c_client *client,
goto err_free_mem;
}
- input_dev->name = "Atmel maXTouch Touchscreen";
+ input_dev->name = "atmel-maxtouch";
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
input_dev->open = mxt_input_open;
@@ -1112,6 +1608,7 @@ static int __devinit mxt_probe(struct i2c_client *client,
data->input_dev = input_dev;
data->pdata = pdata;
data->irq = client->irq;
+ data->is_stopped = 0;
mxt_calc_resolution(data);
@@ -1128,7 +1625,6 @@ static int __devinit mxt_probe(struct i2c_client *client,
0, 255, 0, 0);
/* For multi touch */
- input_mt_init_slots(input_dev, MXT_MAX_FINGER);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
0, MXT_MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
@@ -1141,6 +1637,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
input_set_drvdata(input_dev, data);
i2c_set_clientdata(client, data);
+ mutex_init(&data->access_mutex);
+
error = mxt_initialize(data);
if (error)
goto err_free_object;
@@ -1152,17 +1650,42 @@ static int __devinit mxt_probe(struct i2c_client *client,
goto err_free_object;
}
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ data->early_suspend.suspend = mxt_early_suspend;
+ data->early_suspend.resume = mxt_early_resume;
+ register_early_suspend(&data->early_suspend);
+#endif
+
error = mxt_make_highchg(data);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Failed to make high CHG\n");
goto err_free_irq;
+ }
error = input_register_device(input_dev);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Failed to register input device\n");
goto err_free_irq;
+ }
error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Failed to create sysfs group\n");
+ goto err_unregister_device;
+ }
+
+ sysfs_bin_attr_init(&data->mem_access_attr);
+ data->mem_access_attr.attr.name = "mem_access";
+ data->mem_access_attr.attr.mode = S_IRUGO | S_IWUGO;
+ data->mem_access_attr.read = mxt_mem_access_read;
+ data->mem_access_attr.write = mxt_mem_access_write;
+ data->mem_access_attr.size = 65535;
+
+ if (sysfs_create_bin_file(&client->dev.kobj, &data->mem_access_attr) < 0) {
+ dev_err(&client->dev, "Failed to create %s\n", data->mem_access_attr.attr.name);
goto err_unregister_device;
+ }
return 0;
@@ -1183,6 +1706,7 @@ static int __devexit mxt_remove(struct i2c_client *client)
{
struct mxt_data *data = i2c_get_clientdata(client);
+ sysfs_remove_bin_file(&client->dev.kobj, &data->mem_access_attr);
sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
free_irq(data->irq, data);
input_unregister_device(data->input_dev);
@@ -1216,8 +1740,7 @@ static int mxt_resume(struct device *dev)
struct input_dev *input_dev = data->input_dev;
/* Soft reset */
- mxt_write_object(data, MXT_GEN_COMMAND_T6,
- MXT_COMMAND_RESET, 1);
+ mxt_write_object(data, MXT_GEN_COMMAND_T6, MXT_COMMAND_RESET, 1);
msleep(MXT_RESET_TIME);
@@ -1231,11 +1754,39 @@ static int mxt_resume(struct device *dev)
return 0;
}
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static void mxt_early_suspend(struct early_suspend *es)
+{
+ struct mxt_data *mxt;
+ struct device *dev;
+ mxt = container_of(es, struct mxt_data, early_suspend);
+ dev = &mxt->client->dev;
+ dev_info(dev, "MXT Early Suspend entered\n");
+
+ if (mxt_suspend(&mxt->client->dev) != 0)
+ dev_err(&mxt->client->dev, "%s: failed\n", __func__);
+ dev_info(dev, "MXT Early Suspended\n");
+}
+
+static void mxt_early_resume(struct early_suspend *es)
+{
+ struct mxt_data *mxt;
+ struct device *dev;
+ mxt = container_of(es, struct mxt_data, early_suspend);
+ dev = &mxt->client->dev;
+ dev_info(dev, "MXT Early Resume entered\n");
+
+ if (mxt_resume(&mxt->client->dev) != 0)
+ dev_err(&mxt->client->dev, "%s: failed\n", __func__);
+ dev_info(dev, "MXT Early Resumed\n");
+}
+#else
static const struct dev_pm_ops mxt_pm_ops = {
.suspend = mxt_suspend,
.resume = mxt_resume,
};
#endif
+#endif
static const struct i2c_device_id mxt_id[] = {
{ "qt602240_ts", 0 },
@@ -1249,7 +1800,7 @@ static struct i2c_driver mxt_driver = {
.driver = {
.name = "atmel_mxt_ts",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
.pm = &mxt_pm_ops,
#endif
},
diff --git a/drivers/input/touchscreen/panjit_i2c.c b/drivers/input/touchscreen/panjit_i2c.c
new file mode 100644
index 000000000000..4fccebc52ea8
--- /dev/null
+++ b/drivers/input/touchscreen/panjit_i2c.c
@@ -0,0 +1,361 @@
+/*
+ * drivers/input/touchscreen/panjit_i2c.c
+ *
+ * Touchscreen class input driver for Panjit touch panel using I2C bus
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/i2c.h>
+#include <linux/i2c/panjit_ts.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#define CSR 0x00
+ #define CSR_SCAN_EN (1 << 3)
+ #define CSR_SLEEP_EN (1 << 7)
+#define C_FLAG 0x01
+#define X1_H 0x03
+
+#define DRIVER_NAME "panjit_touch"
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void pj_early_suspend(struct early_suspend *h);
+static void pj_late_resume(struct early_suspend *h);
+#endif
+
+struct pj_data {
+ struct input_dev *input_dev;
+ struct i2c_client *client;
+ int gpio_reset;
+ struct early_suspend early_suspend;
+};
+
+struct pj_event {
+ __be16 coord[2][2];
+ __u8 fingers;
+ __u8 gesture;
+};
+
+union pj_buff {
+ struct pj_event data;
+ unsigned char buff[sizeof(struct pj_data)];
+};
+
+static void pj_reset(struct pj_data *touch)
+{
+ if (touch->gpio_reset < 0)
+ return;
+
+ gpio_set_value(touch->gpio_reset, 1);
+ msleep(50);
+ gpio_set_value(touch->gpio_reset, 0);
+ msleep(50);
+}
+
+static irqreturn_t pj_irq(int irq, void *dev_id)
+{
+ struct pj_data *touch = dev_id;
+ struct i2c_client *client = touch->client;
+ union pj_buff event;
+ int ret, i;
+
+ ret = i2c_smbus_read_i2c_block_data(client, X1_H,
+ sizeof(event.buff), event.buff);
+ if (WARN_ON(ret < 0)) {
+ dev_err(&client->dev, "error %d reading event data\n", ret);
+ return IRQ_NONE;
+ }
+ ret = i2c_smbus_write_byte_data(client, C_FLAG, 0);
+ if (WARN_ON(ret < 0)) {
+ dev_err(&client->dev, "error %d clearing interrupt\n", ret);
+ return IRQ_NONE;
+ }
+
+ input_report_key(touch->input_dev, BTN_TOUCH,
+ (event.data.fingers == 1 || event.data.fingers == 2));
+ input_report_key(touch->input_dev, BTN_2, (event.data.fingers == 2));
+
+ if (!event.data.fingers || (event.data.fingers > 2))
+ goto out;
+
+ for (i = 0; i < event.data.fingers; i++) {
+ input_report_abs(touch->input_dev, ABS_MT_POSITION_X,
+ __be16_to_cpu(event.data.coord[i][0]));
+ input_report_abs(touch->input_dev, ABS_MT_POSITION_Y,
+ __be16_to_cpu(event.data.coord[i][1]));
+ input_report_abs(touch->input_dev, ABS_MT_TRACKING_ID, i + 1);
+ input_mt_sync(touch->input_dev);
+ }
+
+out:
+ input_sync(touch->input_dev);
+ return IRQ_HANDLED;
+}
+
+static int pj_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct panjit_i2c_ts_platform_data *pdata = client->dev.platform_data;
+ struct pj_data *touch = NULL;
+ struct input_dev *input_dev = NULL;
+ int ret = 0;
+
+ touch = kzalloc(sizeof(struct pj_data), GFP_KERNEL);
+ if (!touch) {
+ dev_err(&client->dev, "%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ touch->gpio_reset = -EINVAL;
+
+ if (pdata) {
+ ret = gpio_request(pdata->gpio_reset, "panjit_reset");
+ if (!ret) {
+ ret = gpio_direction_output(pdata->gpio_reset, 1);
+ if (ret < 0)
+ gpio_free(pdata->gpio_reset);
+ }
+
+ if (!ret)
+ touch->gpio_reset = pdata->gpio_reset;
+ else
+ dev_warn(&client->dev, "unable to configure GPIO\n");
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "%s: no memory\n", __func__);
+ kfree(touch);
+ return -ENOMEM;
+ }
+
+ touch->client = client;
+ i2c_set_clientdata(client, touch);
+
+ pj_reset(touch);
+
+ /* clear interrupt */
+ ret = i2c_smbus_write_byte_data(touch->client, C_FLAG, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: clear interrupt failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ /* enable scanning */
+ ret = i2c_smbus_write_byte_data(touch->client, CSR, CSR_SCAN_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: enable interrupt failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ touch->input_dev = input_dev;
+ touch->input_dev->name = DRIVER_NAME;
+
+ set_bit(EV_SYN, touch->input_dev->evbit);
+ set_bit(EV_KEY, touch->input_dev->evbit);
+ set_bit(EV_ABS, touch->input_dev->evbit);
+ set_bit(BTN_TOUCH, touch->input_dev->keybit);
+ set_bit(BTN_2, touch->input_dev->keybit);
+
+ /* expose multi-touch capabilities */
+ set_bit(ABS_MT_POSITION_X, touch->input_dev->keybit);
+ set_bit(ABS_MT_POSITION_Y, touch->input_dev->keybit);
+ set_bit(ABS_X, touch->input_dev->keybit);
+ set_bit(ABS_Y, touch->input_dev->keybit);
+
+ /* all coordinates are reported in 0..4095 */
+ input_set_abs_params(touch->input_dev, ABS_X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT0X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT0Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT1X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT1Y, 0, 4095, 0, 0);
+
+ input_set_abs_params(touch->input_dev, ABS_MT_POSITION_X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_MT_TRACKING_ID, 0, 2, 1, 0);
+
+ ret = input_register_device(touch->input_dev);
+ if (ret) {
+ dev_err(&client->dev, "%s: input_register_device failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ /* get the irq */
+ ret = request_threaded_irq(touch->client->irq, NULL, pj_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ DRIVER_NAME, touch);
+ if (ret) {
+ dev_err(&client->dev, "%s: request_irq(%d) failed\n",
+ __func__, touch->client->irq);
+ goto fail_irq;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ touch->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ touch->early_suspend.suspend = pj_early_suspend;
+ touch->early_suspend.resume = pj_late_resume;
+ register_early_suspend(&touch->early_suspend);
+#endif
+ dev_info(&client->dev, "%s: initialized\n", __func__);
+ return 0;
+
+fail_irq:
+ input_unregister_device(touch->input_dev);
+
+fail_i2c_or_register:
+ if (touch->gpio_reset >= 0)
+ gpio_free(touch->gpio_reset);
+
+ input_free_device(input_dev);
+ kfree(touch);
+ return ret;
+}
+
+static int pj_suspend(struct i2c_client *client, pm_message_t state)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+ int ret;
+
+ if (WARN_ON(!touch))
+ return -EINVAL;
+
+ disable_irq(client->irq);
+
+ /* disable scanning and enable deep sleep */
+ ret = i2c_smbus_write_byte_data(client, CSR, CSR_SLEEP_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: sleep enable fail\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pj_resume(struct i2c_client *client)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+ int ret = 0;
+
+ if (WARN_ON(!touch))
+ return -EINVAL;
+
+ pj_reset(touch);
+
+ /* enable scanning and disable deep sleep */
+ ret = i2c_smbus_write_byte_data(client, C_FLAG, 0);
+ if (ret >= 0)
+ ret = i2c_smbus_write_byte_data(client, CSR, CSR_SCAN_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: scan enable fail\n", __func__);
+ return ret;
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void pj_early_suspend(struct early_suspend *es)
+{
+ struct pj_data *touch;
+ touch = container_of(es, struct pj_data, early_suspend);
+
+ if (pj_suspend(touch->client, PMSG_SUSPEND) != 0)
+ dev_err(&touch->client->dev, "%s: failed\n", __func__);
+}
+
+static void pj_late_resume(struct early_suspend *es)
+{
+ struct pj_data *touch;
+ touch = container_of(es, struct pj_data, early_suspend);
+
+ if (pj_resume(touch->client) != 0)
+ dev_err(&touch->client->dev, "%s: failed\n", __func__);
+}
+#endif
+
+static int pj_remove(struct i2c_client *client)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+
+ if (!touch)
+ return -EINVAL;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&touch->early_suspend);
+#endif
+ free_irq(touch->client->irq, touch);
+ if (touch->gpio_reset >= 0)
+ gpio_free(touch->gpio_reset);
+ input_unregister_device(touch->input_dev);
+ input_free_device(touch->input_dev);
+ kfree(touch);
+ return 0;
+}
+
+static const struct i2c_device_id panjit_ts_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver panjit_driver = {
+ .probe = pj_probe,
+ .remove = pj_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .suspend = pj_suspend,
+ .resume = pj_resume,
+#endif
+ .id_table = panjit_ts_id,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __devinit panjit_init(void)
+{
+ int e;
+
+ e = i2c_add_driver(&panjit_driver);
+ if (e != 0) {
+ pr_err("%s: failed to register with I2C bus with "
+ "error: 0x%x\n", __func__, e);
+ }
+ return e;
+}
+
+static void __exit panjit_exit(void)
+{
+ i2c_del_driver(&panjit_driver);
+}
+
+module_init(panjit_init);
+module_exit(panjit_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Panjit I2C touch driver");
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c
new file mode 100644
index 000000000000..6f9b83af0359
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c
@@ -0,0 +1,699 @@
+/* drivers/input/keyboard/synaptics_i2c_rmi.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/synaptics_i2c_rmi.h>
+
+#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
+
+static struct workqueue_struct *synaptics_wq;
+
+struct synaptics_ts_data {
+ uint16_t addr;
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ int use_irq;
+ bool has_relative_report;
+ struct hrtimer timer;
+ struct work_struct work;
+ uint16_t max[2];
+ int snap_state[2][2];
+ int snap_down_on[2];
+ int snap_down_off[2];
+ int snap_up_on[2];
+ int snap_up_off[2];
+ int snap_down[2];
+ int snap_up[2];
+ uint32_t flags;
+ int reported_finger_count;
+ int last_pos[2][2];
+ int8_t sensitivity_adjust;
+ int (*power)(int on);
+ struct early_suspend early_suspend;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h);
+static void synaptics_ts_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_init_panel(struct synaptics_ts_data *ts)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_page_select_failed;
+ }
+ ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n");
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0x44,
+ ts->sensitivity_adjust);
+ if (ret < 0)
+ pr_err("synaptics_ts: failed to set Sensitivity Adjust\n");
+
+err_page_select_failed:
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n");
+ return ret;
+}
+
+static void synaptics_ts_work_func(struct work_struct *work)
+{
+ int i;
+ int ret;
+ int bad_data = 0;
+ struct i2c_msg msg[2];
+ uint8_t start_reg;
+ uint8_t buf[15];
+ struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work);
+ int buf_len = ts->has_relative_report ? 15 : 13;
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &start_reg;
+ start_reg = 0x00;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = buf_len;
+ msg[1].buf = buf;
+
+ /* printk("synaptics_ts_work_func\n"); */
+ for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) {
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n");
+ bad_data = 1;
+ } else {
+ /* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */
+ /* " %x %x %x %x %x %x %x %x %x, ret %d\n", */
+ /* buf[0], buf[1], buf[2], buf[3], */
+ /* buf[4], buf[5], buf[6], buf[7], */
+ /* buf[8], buf[9], buf[10], buf[11], */
+ /* buf[12], buf[13], buf[14], ret); */
+ if ((buf[buf_len - 1] & 0xc0) != 0x40) {
+ printk(KERN_WARNING "synaptics_ts_work_func:"
+ " bad read %x %x %x %x %x %x %x %x %x"
+ " %x %x %x %x %x %x, ret %d\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], ret);
+ if (bad_data)
+ synaptics_init_panel(ts);
+ bad_data = 1;
+ continue;
+ }
+ bad_data = 0;
+ if ((buf[buf_len - 1] & 1) == 0) {
+ /* printk("read %d coordinates\n", i); */
+ break;
+ } else {
+ int pos[2][2];
+ int rmpos_x, rmpos_y;
+ int f, a;
+ int base;
+ /* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */
+ /* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */
+ int z = buf[1];
+ int w = buf[0] >> 4;
+ int finger = buf[0] & 7;
+
+ /* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */
+ /* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */
+ /* int z2 = buf[1+6]; */
+ /* int w2 = buf[0+6] >> 4; */
+ /* int finger2 = buf[0+6] & 7; */
+
+ /* int dx = (int8_t)buf[12]; */
+ /* int dy = (int8_t)buf[13]; */
+ int finger2_pressed;
+
+ /* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */
+ /* x, y, z, w, finger, */
+ /* x2, y2, z2, w2, finger2, */
+ /* dx, dy); */
+
+ base = 2;
+ for (f = 0; f < 2; f++) {
+ uint32_t flip_flag = SYNAPTICS_FLIP_X;
+ for (a = 0; a < 2; a++) {
+ int p = buf[base + 1];
+ p |= (uint16_t)(buf[base] & 0x1f) << 8;
+ if (ts->flags & flip_flag)
+ p = ts->max[a] - p;
+ if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) {
+ if (ts->snap_state[f][a]) {
+ if (p <= ts->snap_down_off[a])
+ p = ts->snap_down[a];
+ else if (p >= ts->snap_up_off[a])
+ p = ts->snap_up[a];
+ else
+ ts->snap_state[f][a] = 0;
+ } else {
+ if (p <= ts->snap_down_on[a]) {
+ p = ts->snap_down[a];
+ ts->snap_state[f][a] = 1;
+ } else if (p >= ts->snap_up_on[a]) {
+ p = ts->snap_up[a];
+ ts->snap_state[f][a] = 1;
+ }
+ }
+ }
+ pos[f][a] = p;
+ base += 2;
+ flip_flag <<= 1;
+ }
+ base += 2;
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(pos[f][0], pos[f][1]);
+ }
+
+ if (!finger) {
+ z = 0;
+ if (ts->reported_finger_count > 0) {
+ pos[0][0] = ts->last_pos[0][0];
+ pos[0][1] = ts->last_pos[0][1];
+ }
+ else
+ continue; /* skip touch noise */
+ }
+ finger = (finger == 7) ? 1 : finger; /* correct wrong finger count */
+ finger2_pressed = finger > 1;
+
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]);
+ input_mt_sync(ts->input_dev);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]);
+ input_mt_sync(ts->input_dev);
+ ts->last_pos[1][0] = pos[1][0];
+ ts->last_pos[1][1] = pos[1][1];
+ } else if (ts->reported_finger_count > 1) {
+ /* check which point was removed */
+ if ((ABS_DIFF(pos[0][0],ts->last_pos[0][0]) +
+ ABS_DIFF(pos[0][1],ts->last_pos[0][1])) <
+ (ABS_DIFF(pos[0][0],ts->last_pos[1][0]) +
+ ABS_DIFF(pos[0][1],ts->last_pos[1][1]))) {
+ rmpos_x = ts->last_pos[1][0];
+ rmpos_y = ts->last_pos[1][1];
+ }
+ else {
+ rmpos_x = ts->last_pos[0][0];
+ rmpos_y = ts->last_pos[0][1];
+ }
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, rmpos_x);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, rmpos_y);
+ input_mt_sync(ts->input_dev);
+ }
+
+ if (z) {
+ input_report_abs(ts->input_dev, ABS_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_Y, pos[0][1]);
+ ts->last_pos[0][0] = pos[0][0];
+ ts->last_pos[0][1] = pos[0][1];
+ }
+ input_report_abs(ts->input_dev, ABS_PRESSURE, z);
+ input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w);
+ input_report_key(ts->input_dev, BTN_TOUCH, finger);
+ ts->reported_finger_count = finger;
+ input_sync(ts->input_dev);
+ }
+ }
+ }
+ if (ts->use_irq)
+ enable_irq(ts->client->irq);
+}
+
+static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer)
+{
+ struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer);
+ /* printk("synaptics_ts_timer_func\n"); */
+
+ queue_work(synaptics_wq, &ts->work);
+
+ hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id)
+{
+ struct synaptics_ts_data *ts = dev_id;
+
+ /* printk("synaptics_ts_irq_handler\n"); */
+ disable_irq_nosync(ts->client->irq);
+ queue_work(synaptics_wq, &ts->work);
+ return IRQ_HANDLED;
+}
+
+static int synaptics_ts_probe(
+ struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct synaptics_ts_data *ts;
+ uint8_t buf0[4];
+ uint8_t buf1[8];
+ struct i2c_msg msg[2];
+ int ret = 0;
+ uint16_t max_x, max_y;
+ int fuzz_x, fuzz_y, fuzz_p, fuzz_w;
+ struct synaptics_i2c_rmi_platform_data *pdata;
+ unsigned long irqflags;
+ int inactive_area_left;
+ int inactive_area_right;
+ int inactive_area_top;
+ int inactive_area_bottom;
+ int snap_left_on;
+ int snap_left_off;
+ int snap_right_on;
+ int snap_right_off;
+ int snap_top_on;
+ int snap_top_off;
+ int snap_bottom_on;
+ int snap_bottom_off;
+ uint32_t panel_version;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n");
+ ret = -ENODEV;
+ goto err_check_functionality_failed;
+ }
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_data_failed;
+ }
+ INIT_WORK(&ts->work, synaptics_ts_work_func);
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+ pdata = client->dev.platform_data;
+ if (pdata)
+ ts->power = pdata->power;
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_probe power on failed\n");
+ goto err_power_failed;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ /* fail? */
+ }
+ {
+ int retry = 10;
+ while (retry-- > 0) {
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe4);
+ if (ret >= 0)
+ break;
+ msleep(100);
+ }
+ }
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret);
+ panel_version = ret << 8;
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe5);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret);
+ panel_version |= ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe3);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret);
+
+ if (pdata) {
+ while (pdata->version > panel_version)
+ pdata++;
+ ts->flags = pdata->flags;
+ ts->sensitivity_adjust = pdata->sensitivity_adjust;
+ irqflags = pdata->irqflags;
+ inactive_area_left = pdata->inactive_left;
+ inactive_area_right = pdata->inactive_right;
+ inactive_area_top = pdata->inactive_top;
+ inactive_area_bottom = pdata->inactive_bottom;
+ snap_left_on = pdata->snap_left_on;
+ snap_left_off = pdata->snap_left_off;
+ snap_right_on = pdata->snap_right_on;
+ snap_right_off = pdata->snap_right_off;
+ snap_top_on = pdata->snap_top_on;
+ snap_top_off = pdata->snap_top_off;
+ snap_bottom_on = pdata->snap_bottom_on;
+ snap_bottom_off = pdata->snap_bottom_off;
+ fuzz_x = pdata->fuzz_x;
+ fuzz_y = pdata->fuzz_y;
+ fuzz_p = pdata->fuzz_p;
+ fuzz_w = pdata->fuzz_w;
+ } else {
+ irqflags = 0;
+ inactive_area_left = 0;
+ inactive_area_right = 0;
+ inactive_area_top = 0;
+ inactive_area_bottom = 0;
+ snap_left_on = 0;
+ snap_left_off = 0;
+ snap_right_on = 0;
+ snap_right_off = 0;
+ snap_top_on = 0;
+ snap_top_off = 0;
+ snap_bottom_on = 0;
+ snap_bottom_off = 0;
+ fuzz_x = 0;
+ fuzz_y = 0;
+ fuzz_p = 0;
+ fuzz_w = 0;
+ }
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf0);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret);
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf1);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ goto err_detect_failed;
+ }
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf0;
+ buf0[0] = 0xe0;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 8;
+ msg[1].buf = buf1;
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_transfer failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n",
+ buf1[0], buf1[1], buf1[2], buf1[3],
+ buf1[4], buf1[5], buf1[6], buf1[7]);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_detect_failed;
+ }
+ ret = i2c_smbus_read_word_data(ts->client, 0x02);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->has_relative_report = !(ret & 0x100);
+ printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret);
+ ret = i2c_smbus_read_word_data(ts->client, 0x04);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ ret = i2c_smbus_read_word_data(ts->client, 0x06);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(max_x, max_y);
+
+ ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_init_panel failed\n");
+ goto err_detect_failed;
+ }
+
+ ts->input_dev = input_allocate_device();
+ if (ts->input_dev == NULL) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ ts->input_dev->name = "synaptics-rmi-touchscreen";
+ set_bit(EV_SYN, ts->input_dev->evbit);
+ set_bit(EV_KEY, ts->input_dev->evbit);
+ set_bit(BTN_TOUCH, ts->input_dev->keybit);
+ set_bit(EV_ABS, ts->input_dev->evbit);
+ inactive_area_left = inactive_area_left * max_x / 0x10000;
+ inactive_area_right = inactive_area_right * max_x / 0x10000;
+ inactive_area_top = inactive_area_top * max_y / 0x10000;
+ inactive_area_bottom = inactive_area_bottom * max_y / 0x10000;
+ snap_left_on = snap_left_on * max_x / 0x10000;
+ snap_left_off = snap_left_off * max_x / 0x10000;
+ snap_right_on = snap_right_on * max_x / 0x10000;
+ snap_right_off = snap_right_off * max_x / 0x10000;
+ snap_top_on = snap_top_on * max_y / 0x10000;
+ snap_top_off = snap_top_off * max_y / 0x10000;
+ snap_bottom_on = snap_bottom_on * max_y / 0x10000;
+ snap_bottom_off = snap_bottom_off * max_y / 0x10000;
+ fuzz_x = fuzz_x * max_x / 0x10000;
+ fuzz_y = fuzz_y * max_y / 0x10000;
+ ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left;
+ ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right;
+ ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top;
+ ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom;
+ ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on;
+ ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off;
+ ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on;
+ ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off;
+ ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on;
+ ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off;
+ ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on;
+ ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off;
+ printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y);
+ printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n",
+ inactive_area_left, inactive_area_right,
+ inactive_area_top, inactive_area_bottom);
+ printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n",
+ snap_left_on, snap_left_off, snap_right_on, snap_right_off,
+ snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off);
+ input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0);
+ /* ts->input_dev->name = ts->keypad_info->name; */
+ ret = input_register_device(ts->input_dev);
+ if (ret) {
+ printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name);
+ goto err_input_register_device_failed;
+ }
+ if (client->irq) {
+ ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts);
+ if (ret == 0) {
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+ if (ret)
+ free_irq(client->irq, ts);
+ }
+ if (ret == 0)
+ ts->use_irq = 1;
+ else
+ dev_err(&client->dev, "request_irq failed\n");
+ }
+ if (!ts->use_irq) {
+ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ts->timer.function = synaptics_ts_timer_func;
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = synaptics_ts_early_suspend;
+ ts->early_suspend.resume = synaptics_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+
+ printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling");
+
+ return 0;
+
+err_input_register_device_failed:
+ input_free_device(ts->input_dev);
+
+err_input_dev_alloc_failed:
+err_detect_failed:
+err_power_failed:
+ kfree(ts);
+err_alloc_data_failed:
+err_check_functionality_failed:
+ return ret;
+}
+
+static int synaptics_ts_remove(struct i2c_client *client)
+{
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+ unregister_early_suspend(&ts->early_suspend);
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+ return 0;
+}
+
+static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->use_irq)
+ disable_irq(client->irq);
+ else
+ hrtimer_cancel(&ts->timer);
+ ret = cancel_work_sync(&ts->work);
+ if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */
+ enable_irq(client->irq);
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+
+ ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+ if (ts->power) {
+ ret = ts->power(0);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power off failed\n");
+ }
+ return 0;
+}
+
+static int synaptics_ts_resume(struct i2c_client *client)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power on failed\n");
+ }
+
+ synaptics_init_panel(ts);
+
+ if (ts->use_irq)
+ enable_irq(client->irq);
+
+ if (!ts->use_irq)
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ else
+ i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_suspend(ts->client, PMSG_SUSPEND);
+}
+
+static void synaptics_ts_late_resume(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_resume(ts->client);
+}
+#endif
+
+static const struct i2c_device_id synaptics_ts_id[] = {
+ { SYNAPTICS_I2C_RMI_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver synaptics_ts_driver = {
+ .probe = synaptics_ts_probe,
+ .remove = synaptics_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .suspend = synaptics_ts_suspend,
+ .resume = synaptics_ts_resume,
+#endif
+ .id_table = synaptics_ts_id,
+ .driver = {
+ .name = SYNAPTICS_I2C_RMI_NAME,
+ },
+};
+
+static int __devinit synaptics_ts_init(void)
+{
+ synaptics_wq = create_singlethread_workqueue("synaptics_wq");
+ if (!synaptics_wq)
+ return -ENOMEM;
+ return i2c_add_driver(&synaptics_ts_driver);
+}
+
+static void __exit synaptics_ts_exit(void)
+{
+ i2c_del_driver(&synaptics_ts_driver);
+ if (synaptics_wq)
+ destroy_workqueue(synaptics_wq);
+}
+
+module_init(synaptics_ts_init);
+module_exit(synaptics_ts_exit);
+
+MODULE_DESCRIPTION("Synaptics Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b591e726a6fa..7f9bed7697ac 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -450,6 +450,12 @@ config LEDS_TRIGGER_DEFAULT_ON
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
+config LEDS_TRIGGER_SLEEP
+ tristate "LED Sleep Mode Trigger"
+ depends on LEDS_TRIGGERS && HAS_EARLYSUSPEND
+ help
+ This turns LEDs on when the screen is off but the cpu still running.
+
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index bbfd2e367dc0..cb77b9bb2f98 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_SLEEP) += ledtrig-sleep.o
diff --git a/drivers/leds/ledtrig-sleep.c b/drivers/leds/ledtrig-sleep.c
new file mode 100644
index 000000000000..f16404212152
--- /dev/null
+++ b/drivers/leds/ledtrig-sleep.c
@@ -0,0 +1,80 @@
+/* drivers/leds/ledtrig-sleep.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/leds.h>
+#include <linux/suspend.h>
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored);
+
+DEFINE_LED_TRIGGER(ledtrig_sleep)
+static struct notifier_block ledtrig_sleep_pm_notifier = {
+ .notifier_call = ledtrig_sleep_pm_callback,
+ .priority = 0,
+};
+
+static void ledtrig_sleep_early_suspend(struct early_suspend *h)
+{
+ led_trigger_event(ledtrig_sleep, LED_FULL);
+}
+
+static void ledtrig_sleep_early_resume(struct early_suspend *h)
+{
+ led_trigger_event(ledtrig_sleep, LED_OFF);
+}
+
+static struct early_suspend ledtrig_sleep_early_suspend_handler = {
+ .suspend = ledtrig_sleep_early_suspend,
+ .resume = ledtrig_sleep_early_resume,
+};
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored)
+{
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ led_trigger_event(ledtrig_sleep, LED_OFF);
+ return NOTIFY_OK;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ led_trigger_event(ledtrig_sleep, LED_FULL);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int __init ledtrig_sleep_init(void)
+{
+ led_trigger_register_simple("sleep", &ledtrig_sleep);
+ register_pm_notifier(&ledtrig_sleep_pm_notifier);
+ register_early_suspend(&ledtrig_sleep_early_suspend_handler);
+ return 0;
+}
+
+static void __exit ledtrig_sleep_exit(void)
+{
+ unregister_early_suspend(&ledtrig_sleep_early_suspend_handler);
+ unregister_pm_notifier(&ledtrig_sleep_pm_notifier);
+ led_trigger_unregister_simple(ledtrig_sleep);
+}
+
+module_init(ledtrig_sleep_init);
+module_exit(ledtrig_sleep_exit);
+
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f574dc012cad..1b4b4214e3b0 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -581,6 +581,7 @@ config VIDEO_VIVI
source "drivers/media/video/davinci/Kconfig"
source "drivers/media/video/omap/Kconfig"
+source "drivers/media/video/tegra/Kconfig"
source "drivers/media/video/bt8xx/Kconfig"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 272390072aef..225f8823de2b 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -189,6 +189,7 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-y += davinci/
obj-$(CONFIG_ARCH_OMAP) += omap/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/tegra/Kconfig b/drivers/media/video/tegra/Kconfig
new file mode 100644
index 000000000000..b0f9fa3e6850
--- /dev/null
+++ b/drivers/media/video/tegra/Kconfig
@@ -0,0 +1,81 @@
+source "drivers/media/video/tegra/avp/Kconfig"
+source "drivers/media/video/tegra/mediaserver/Kconfig"
+source "drivers/media/video/tegra/nvavp/Kconfig"
+
+config TEGRA_CAMERA
+ bool "Enable support for tegra camera/isp hardware"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Enables support for the Tegra camera interface
+
+ If unsure, say Y
+
+config VIDEO_OV5650
+ tristate "OV5650 camera sensor support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the Omnivision OV5650 5MP camera sensor
+ for use with the tegra isp.
+
+config VIDEO_OV14810
+ tristate "OV14810 camera sensor support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the Omnivision OV14810 14MP camera sensor
+ for use with the tegra isp.
+
+
+config VIDEO_OV9726
+ tristate "OV9726 camera sensor support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the Omnivision OV9726 camera sensor
+ for use with the tegra isp.
+
+config VIDEO_OV2710
+ tristate "OV2710 camera sensor support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the Omnivision OV2710 camera sensor
+ for use with the tegra isp.
+
+config VIDEO_AR0832
+ tristate "AR0832 camera sensor support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the AR0832 camera sensor
+ for use with the tegra isp.
+
+config VIDEO_SOC380
+ tristate "SOC380 camera sensor support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the Semco soc380 camera sensor
+ for use with the tegra isp.
+
+config TORCH_SSL3250A
+ tristate "SSL3250A flash/torch support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the SSL3250A flash/torch camera device
+
+config TORCH_TPS61050
+ tristate "TPS61050 flash/torch support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the TPS61050 flash/torch camera device
+
+config VIDEO_SH532U
+ tristate "SH532U focuser support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the SEMCO SH532U focuser
+ for use with the tegra isp.
+
+config VIDEO_AD5820
+ tristate "AD5820 focuser support"
+ depends on I2C && ARCH_TEGRA
+ ---help---
+ This is a driver for the AD5820 focuser
+ for use with the tegra isp.
diff --git a/drivers/media/video/tegra/Makefile b/drivers/media/video/tegra/Makefile
new file mode 100644
index 000000000000..a3990435570d
--- /dev/null
+++ b/drivers/media/video/tegra/Makefile
@@ -0,0 +1,19 @@
+GCOV_PROFILE := y
+#
+# Makefile for the video capture/playback device drivers.
+#
+obj-y += avp/
+obj-$(CONFIG_TEGRA_MEDIASERVER) += mediaserver/
+obj-$(CONFIG_TEGRA_NVAVP) += nvavp/
+obj-$(CONFIG_TEGRA_CAMERA) += tegra_camera.o
+obj-$(CONFIG_VIDEO_AR0832) += ar0832_main.o
+obj-$(CONFIG_VIDEO_OV5650) += ov5650.o
+obj-$(CONFIG_VIDEO_OV14810) += ov14810.o
+obj-$(CONFIG_VIDEO_OV9726) += ov9726.o
+obj-$(CONFIG_VIDEO_OV2710) += ov2710.o
+obj-$(CONFIG_VIDEO_SOC380) += soc380.o
+obj-$(CONFIG_TORCH_SSL3250A) += ssl3250a.o
+obj-$(CONFIG_TORCH_TPS61050) += tps61050.o
+obj-$(CONFIG_VIDEO_SH532U) += sh532u.o
+obj-$(CONFIG_VIDEO_AD5820) += ad5820.o
+
diff --git a/drivers/media/video/tegra/ad5820.c b/drivers/media/video/tegra/ad5820.c
new file mode 100644
index 000000000000..19d35bca5b0b
--- /dev/null
+++ b/drivers/media/video/tegra/ad5820.c
@@ -0,0 +1,231 @@
+/*
+ * AD5820 focuser driver.
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * Contributors:
+ * Sachin Nikam <snikam@nvidia.com>
+ *
+ * Based on ov5650.c.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/ad5820.h>
+
+/* Focuser single step & full scale transition time truth table
+ * in the format of:
+ * index mode single step transition full scale transition
+ * 0 0 0 0
+ * 1 1 50uS 51.2mS
+ * 2 1 100uS 102.3mS
+ * 3 1 200uS 204.6mS
+ * 4 1 400uS 409.2mS
+ * 5 1 800uS 818.4mS
+ * 6 1 1600uS 1636.8mS
+ * 7 1 3200uS 3273.6mS
+ * 8 0 0 0
+ * 9 2 50uS 1.1mS
+ * A 2 100uS 2.2mS
+ * B 2 200uS 4.4mS
+ * C 2 400uS 8.8mS
+ * D 2 800uS 17.6mS
+ * E 2 1600uS 35.2mS
+ * F 2 3200uS 70.4mS
+ */
+
+/* pick up the mode index setting and its settle time from the above table */
+#define AD5820_TRANSITION_MODE 0x0B
+#define SETTLETIME_MS 5
+
+#define POS_LOW (0)
+#define POS_HIGH (1023)
+#define FOCAL_LENGTH (4.507f)
+#define FNUMBER (2.8f)
+#define FPOS_COUNT 1024
+
+struct ad5820_info {
+ struct i2c_client *i2c_client;
+ struct regulator *regulator;
+ struct ad5820_config config;
+};
+
+static int ad5820_write(struct i2c_client *client, u32 value)
+{
+ int count;
+ struct i2c_msg msg[1];
+ unsigned char data[2];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) ((value >> 4) & 0x3F);
+ data[1] = (u8) ((value & 0xF) << 4) | AD5820_TRANSITION_MODE;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = ARRAY_SIZE(data);
+ msg[0].buf = data;
+
+ count = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (count == ARRAY_SIZE(msg))
+ return 0;
+
+ return -EIO;
+}
+
+static int ad5820_set_position(struct ad5820_info *info, u32 position)
+{
+ if (position < info->config.pos_low ||
+ position > info->config.pos_high)
+ return -EINVAL;
+
+ return ad5820_write(info->i2c_client, position);
+}
+
+static long ad5820_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ad5820_info *info = file->private_data;
+
+ switch (cmd) {
+ case AD5820_IOCTL_GET_CONFIG:
+ {
+ if (copy_to_user((void __user *) arg,
+ &info->config,
+ sizeof(info->config))) {
+ pr_err("%s: 0x%x\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ break;
+ }
+ case AD5820_IOCTL_SET_POSITION:
+ return ad5820_set_position(info, (u32) arg);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct ad5820_info *info;
+
+static int ad5820_open(struct inode *inode, struct file *file)
+{
+ file->private_data = info;
+ if (info->regulator)
+ regulator_enable(info->regulator);
+ return 0;
+}
+
+int ad5820_release(struct inode *inode, struct file *file)
+{
+ if (info->regulator)
+ regulator_disable(info->regulator);
+ file->private_data = NULL;
+ return 0;
+}
+
+
+static const struct file_operations ad5820_fileops = {
+ .owner = THIS_MODULE,
+ .open = ad5820_open,
+ .unlocked_ioctl = ad5820_ioctl,
+ .release = ad5820_release,
+};
+
+static struct miscdevice ad5820_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ad5820",
+ .fops = &ad5820_fileops,
+};
+
+static int ad5820_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+
+ pr_info("ad5820: probing sensor.\n");
+
+ info = kzalloc(sizeof(struct ad5820_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("ad5820: Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ err = misc_register(&ad5820_device);
+ if (err) {
+ pr_err("ad5820: Unable to register misc device!\n");
+ kfree(info);
+ return err;
+ }
+
+ info->regulator = regulator_get(&client->dev, "vdd_vcore_af");
+ if (IS_ERR_OR_NULL(info->regulator)) {
+ dev_err(&client->dev, "unable to get regulator %s\n",
+ dev_name(&client->dev));
+ info->regulator = NULL;
+ } else {
+ regulator_enable(info->regulator);
+ }
+
+ info->i2c_client = client;
+ info->config.settle_time = SETTLETIME_MS;
+ info->config.focal_length = FOCAL_LENGTH;
+ info->config.fnumber = FNUMBER;
+ info->config.pos_low = POS_LOW;
+ info->config.pos_high = POS_HIGH;
+ i2c_set_clientdata(client, info);
+ return 0;
+}
+
+static int ad5820_remove(struct i2c_client *client)
+{
+ struct ad5820_info *info;
+ info = i2c_get_clientdata(client);
+ misc_deregister(&ad5820_device);
+ kfree(info);
+ return 0;
+}
+
+static const struct i2c_device_id ad5820_id[] = {
+ { "ad5820", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, ad5820_id);
+
+static struct i2c_driver ad5820_i2c_driver = {
+ .driver = {
+ .name = "ad5820",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5820_probe,
+ .remove = ad5820_remove,
+ .id_table = ad5820_id,
+};
+
+static int __init ad5820_init(void)
+{
+ pr_info("ad5820 sensor driver loading\n");
+ return i2c_add_driver(&ad5820_i2c_driver);
+}
+
+static void __exit ad5820_exit(void)
+{
+ i2c_del_driver(&ad5820_i2c_driver);
+}
+
+module_init(ad5820_init);
+module_exit(ad5820_exit);
+
diff --git a/drivers/media/video/tegra/ar0832_main.c b/drivers/media/video/tegra/ar0832_main.c
new file mode 100644
index 000000000000..129825cd5f83
--- /dev/null
+++ b/drivers/media/video/tegra/ar0832_main.c
@@ -0,0 +1,2549 @@
+/*
+* ar0832_main.c - Aptina AR0832 8M Bayer type sensor driver
+*
+* Copyright (c) 2011, NVIDIA, All Rights Reserved.
+*
+* This file is licensed under the terms of the GNU General Public License
+* version 2. This program is licensed "as is" without any warranty of any
+* kind, whether express or implied.
+*/
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <mach/hardware.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/atomic.h>
+#include <linux/regulator/consumer.h>
+#include <media/ar0832_main.h>
+
+#define POS_LOW 0
+#define POS_HIGH 1000
+#define SETTLETIME_MS 100
+
+struct ar0832_sensor_info {
+ int mode;
+ struct ar0832_stereo_region region;
+};
+
+struct ar0832_focuser_info {
+ struct ar0832_focuser_config config;
+ int focuser_init_flag;
+ u16 last_position;
+};
+
+struct ar0832_power_rail {
+ struct regulator *sen_1v8_reg;
+ struct regulator *sen_2v8_reg;
+};
+
+struct ar0832_dev {
+ struct ar0832_sensor_info *sensor_info;
+ struct ar0832_focuser_info *focuser_info;
+ struct ar0832_platform_data *pdata;
+ struct i2c_client *i2c_client;
+ struct mutex ar0832_camera_lock;
+ struct miscdevice misc_dev;
+ struct ar0832_power_rail power_rail;
+ int brd_power_cnt;
+ atomic_t in_use;
+ char dname[20];
+ int is_stereo;
+ u16 sensor_id_data;
+ struct dentry *debugdir;
+};
+
+#define UpperByte16to8(x) ((u8)((x & 0xFF00) >> 8))
+#define LowerByte16to8(x) ((u8)(x & 0x00FF))
+
+#define ar0832_TABLE_WAIT_MS 0
+#define ar0832_TABLE_END 1
+#define ar0832_MAX_RETRIES 3
+
+/* AR0832 Register */
+#define AR0832_SENSORID_REG 0x0002
+#define AR0832_RESET_REG 0x301A
+#define AR0832_ID_REG 0x31FC
+#define AR0832_GLOBAL_GAIN_REG 0x305E
+#define AR0832_TEST_PATTERN_REG 0x0600
+#define AR0832_GROUP_HOLD_REG 0x0104
+#define AR0832_TEST_RED_REG 0x0602
+#define AR0832_TEST_GREENR_REG 0x0604
+#define AR0832_TEST_BLUE_REG 0x0606
+#define AR0832_TEST_GREENB_REG 0x0608
+
+
+
+/* AR0832_RESET_REG */
+#define AR0832_RESET_REG_GROUPED_PARAMETER_HOLD (1 << 15)
+#define AR0832_RESET_REG_GAIN_INSERT (1 << 14)
+#define AR0832_RESET_REG_SMIA_SERIALIZER_DIS (1 << 12)
+#define AR0832_RESET_REG_RESTART_BAD (1 << 10)
+#define AR0832_RESET_REG_MASK_BAD (1 << 9)
+#define AR0832_RESET_REG_GPI_EN (1 << 8)
+#define AR0832_RESET_REG_PARALLEL_EN (1 << 7)
+#define AR0832_RESET_REG_DRIVE_PINS (1 << 6)
+#define AR0832_RESET_REG_STDBY_EOF (1 << 4)
+#define AR0832_RESET_REG_LOCK_REG (1 << 3)
+#define AR0832_RESET_REG_STREAM (1 << 2)
+#define AR0832_RESET_REG_RESTART (1 << 1)
+#define AR0832_RESET_REG_RESET (1 << 0)
+
+static struct ar0832_reg mode_start[] = {
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_3264X2448_8140[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* MT9E013 Recommended Settings */
+ {0x31B0, 0x0083}, /* FRAME_PREAMBLE */
+ {0x31B2, 0x004D}, /* LINE_PREAMBLE */
+ {0x31B4, 0x0E77}, /* MIPI_TIMING_0 */
+ {0x31B6, 0x0D20}, /* MIPI_TIMING_1 */
+ {0x31B8, 0x020E}, /* MIPI_TIMING_2 */
+ {0x31BA, 0x0710}, /* MIPI_TIMING_3 */
+ {0x31BC, 0x2A0D}, /* MIPI_TIMING_4 */
+ {ar0832_TABLE_WAIT_MS, 0x0005},
+ {0x0112, 0x0A0A}, /* CCP_DATA_FORMAT */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+ {0x3174, 0x8000},
+
+ /* mode end */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+ {0x0306, 0x0040}, /* PLL_MULTIPLIER */
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001},
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x0344, 0x0004}, /* X_ADDR_START */
+ {0x0348, 0x0CCB}, /* X_ADDR_END */
+ {0x0346, 0x0004}, /* Y_ADDR_START */
+ {0x034A, 0x099B}, /* Y_ADDR_END */
+ {0x034C, 0x0CC8}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x0998}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC041}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x0342, 0x133C}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x0A27}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x0A27}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x09DC}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0078}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_3264X2448_8141[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* AR0832 Recommended Settings */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+ {0x3174, 0x8000},
+
+ /* mode end */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+ {0x0306, 0x0040}, /* PLL_MULTIPLIER */
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001},
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x0344, 0x0004}, /* X_ADDR_START */
+ {0x0348, 0x0CCB}, /* X_ADDR_END */
+ {0x0346, 0x0004}, /* Y_ADDR_START */
+ {0x034A, 0x099B}, /* Y_ADDR_END */
+ {0x034C, 0x0CC8}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x0998}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC041}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x0342, 0x133C}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x0A27}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x0A27}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x09DC}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0078}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_2880X1620_8140[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* MT9E013 Recommended Settings */
+ {0x31B0, 0x0083}, /* FRAME_PREAMBLE */
+ {0x31B2, 0x004D}, /* LINE_PREAMBLE */
+ {0x31B4, 0x0E77}, /* MIPI_TIMING_0 */
+ {0x31B6, 0x0D20}, /* MIPI_TIMING_1 */
+ {0x31B8, 0x020E}, /* MIPI_TIMING_2 */
+ {0x31BA, 0x0710}, /* MIPI_TIMING_3 */
+ {0x31BC, 0x2A0D}, /* MIPI_TIMING_4 */
+ {ar0832_TABLE_WAIT_MS, 0x0005},
+ {0x0112, 0x0A0A}, /* CCP_DATA_FORMAT */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+ {0x3174, 0x8000},
+
+ /* mode end */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+ {0x0306, 0x0040}, /* PLL_MULTIPLIER */
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001},
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x0344, 0x00C8}, /* X_ADDR_START */
+ {0x0348, 0x0C07}, /* X_ADDR_END */
+ {0x0346, 0x01A6}, /* Y_ADDR_START */
+ {0x034A, 0x07F9}, /* Y_ADDR_END */
+ {0x034C, 0x0B40}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x0654}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC041}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+
+ {0x0342, 0x11B8}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x06E3}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x06E3}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x0BD8}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0078}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_2880X1620_8141[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* AR0832 Recommended Settings */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+ {0x3174, 0x8000},
+
+ /* mode end */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+ {0x0306, 0x0040}, /* PLL_MULTIPLIER */
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001},
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x0344, 0x00C8}, /* X_ADDR_START */
+ {0x0348, 0x0C07}, /* X_ADDR_END */
+ {0x0346, 0x01A6}, /* Y_ADDR_START */
+ {0x034A, 0x07F9}, /* Y_ADDR_END */
+ {0x034C, 0x0B40}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x0654}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC041}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+
+ {0x0342, 0x11B8}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x06E3}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x06E3}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x0BD8}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0078}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_1920X1080_8140[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* MT9E013 Recommended Settings */
+ {0x31B0, 0x0083}, /* FRAME_PREAMBLE */
+ {0x31B2, 0x004D}, /* LINE_PREAMBLE */
+ {0x31B4, 0x0E77}, /* MIPI_TIMING_0 */
+ {0x31B6, 0x0D20}, /* MIPI_TIMING_1 */
+ {0x31B8, 0x020E}, /* MIPI_TIMING_2 */
+ {0x31BA, 0x0710}, /* MIPI_TIMING_3 */
+ {0x31BC, 0x2A0D}, /* MIPI_TIMING_4 */
+ {ar0832_TABLE_WAIT_MS, 0x0005},
+ {0x0112, 0x0A0A}, /* CCP_DATA_FORMAT */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+ {0x3174, 0x8000},
+
+ /* mode end */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+ {0x0306, 0x0040}, /* PLL_MULTIPLIER */
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001},
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x0344, 0x028C}, /* X_ADDR_START */
+ {0x0348, 0x0A0B}, /* X_ADDR_END */
+ {0x0346, 0x006E}, /* Y_ADDR_START */
+ {0x034A, 0x04A5}, /* Y_ADDR_END */
+ {0x034C, 0x0780}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x0438}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC041}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+
+ {0x0342, 0x103B}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x05C4}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x05C4}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x0702}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0078}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_1920X1080_8141[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* AR0832 Recommended Settings */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+ {0x3174, 0x8000},
+
+ /* mode end */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+ {0x0306, 0x0040}, /* PLL_MULTIPLIER */
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001},
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+ {0x0344, 0x028C}, /* X_ADDR_START */
+ {0x0348, 0x0A0B}, /* X_ADDR_END */
+ {0x0346, 0x006E}, /* Y_ADDR_START */
+ {0x034A, 0x04A5}, /* Y_ADDR_END */
+ {0x034C, 0x0780}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x0438}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC041}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+
+ {0x0342, 0x103B}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x05C4}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x05C4}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x0702}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0078}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_1632X1224_8140[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+
+ /* SC-CHANGE: to-do 8 bit write */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* MT9E013 Recommended Settings */
+ {0x31B0, 0x0083}, /* FRAME_PREAMBLE */
+ {0x31B2, 0x004D}, /* LINE_PREAMBLE */
+ {0x31B4, 0x0E77}, /* MIPI_TIMING_0 */
+ {0x31B6, 0x0D20}, /* MIPI_TIMING_1 */
+ {0x31B8, 0x020E}, /* MIPI_TIMING_2 */
+ {0x31BA, 0x0710}, /* MIPI_TIMING_3 */
+ {0x31BC, 0x2A0D}, /* MIPI_TIMING_4 */
+ {ar0832_TABLE_WAIT_MS, 0x0005},
+ {0x0112, 0x0A0A}, /* CCP_DATA_FORMAT */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+ {0x3174, 0x8000},
+
+ /* mode end */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+
+ {0x0306, 0x0040}, /* PLL_MULTIPLIER */
+
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001}, /* waitmsec 1 */
+
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+
+ {0x0344, 0x0008}, /* X_ADDR_START */
+ {0x0348, 0x0CC9}, /* X_ADDR_END */
+ {0x0346, 0x0008}, /* Y_ADDR_START */
+ {0x034A, 0x0999}, /* Y_ADDR_END */
+ {0x034C, 0x0660}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x04C8}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC4C3}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+ {0x0400, 0x0002}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x0342, 0x101A}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x0610}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x0557}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x0988}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0130}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+
+ /* todo 8-bit write */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_1632X1224_8141[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+
+ /* SC-CHANGE: to-do 8 bit write */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* AR0832 Recommended Settings */
+ {0x31B0, 0x0083}, /* FRAME_PREAMBLE */
+ {0x31B2, 0x004D}, /* LINE_PREAMBLE */
+ {0x31B4, 0x0E77}, /* MIPI_TIMING_0 */
+ {0x31B6, 0x0D20}, /* MIPI_TIMING_1 */
+ {0x31B8, 0x020E}, /* MIPI_TIMING_2 */
+ {0x31BA, 0x0710}, /* MIPI_TIMING_3 */
+ {0x31BC, 0x2A0D}, /* MIPI_TIMING_4 */
+ {ar0832_TABLE_WAIT_MS, 0x0005},
+ {0x0112, 0x0A0A}, /* CCP_DATA_FORMAT */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+ {0x3174, 0x8000},
+
+ /* mode end */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+
+ {0x0306, 0x0040}, /* PLL_MULTIPLIER */
+
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001}, /* waitmsec 1 */
+
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+
+ {0x0344, 0x0008}, /* X_ADDR_START */
+ {0x0348, 0x0CC9}, /* X_ADDR_END */
+ {0x0346, 0x0008}, /* Y_ADDR_START */
+ {0x034A, 0x0999}, /* Y_ADDR_END */
+ {0x034C, 0x0660}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x04C8}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC4C3}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+ {0x0400, 0x0002}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x0342, 0x101A}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x0610}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x0557}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x0988}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0130}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+
+ /* todo 8-bit write */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_800X600_8140[] = {
+ /* mode start */
+ {0x301A, 0x0058},
+ {0x301A, 0x0050},
+ {0x0104, 0x0100},
+ {0x3064, 0x7800},
+ {0x31AE, 0x0202},
+ {0x31B8, 0x0E3F},
+ {0x31BE, 0xC003},
+ {0x3070, 0x0000},
+ {ar0832_TABLE_WAIT_MS, 0x0005},
+
+ /* MT9E013 Recommended Settings */
+ {0x3044, 0x0590},
+ {0x306E, 0xFC80},
+ {0x30B2, 0xC000},
+ {0x30D6, 0x0800},
+ {0x316C, 0xB42F},
+ {0x316E, 0x869A},
+ {0x3170, 0x210E},
+ {0x317A, 0x010E},
+ {0x31E0, 0x1FB9},
+ {0x31E6, 0x07FC},
+ {0x37C0, 0x0000},
+ {0x37C2, 0x0000},
+ {0x37C4, 0x0000},
+ {0x37C6, 0x0000},
+ {0x3E00, 0x0011},
+ {0x3E02, 0x8801},
+ {0x3E04, 0x2801},
+ {0x3E06, 0x8449},
+ {0x3E08, 0x6841},
+ {0x3E0A, 0x400C},
+ {0x3E0C, 0x1001},
+ {0x3E0E, 0x2603},
+ {0x3E10, 0x4B41},
+ {0x3E12, 0x4B24},
+ {0x3E14, 0xA3CF},
+ {0x3E16, 0x8802},
+ {0x3E18, 0x8401},
+ {0x3E1A, 0x8601},
+ {0x3E1C, 0x8401},
+ {0x3E1E, 0x840A},
+ {0x3E20, 0xFF00},
+ {0x3E22, 0x8401},
+ {0x3E24, 0x00FF},
+ {0x3E26, 0x0088},
+ {0x3E28, 0x2E8A},
+ {0x3E30, 0x0000},
+ {0x3E32, 0x8801},
+ {0x3E34, 0x4029},
+ {0x3E36, 0x00FF},
+ {0x3E38, 0x8469},
+ {0x3E3A, 0x00FF},
+ {0x3E3C, 0x2801},
+ {0x3E3E, 0x3E2A},
+ {0x3E40, 0x1C01},
+ {0x3E42, 0xFF84},
+ {0x3E44, 0x8401},
+ {0x3E46, 0x0C01},
+ {0x3E48, 0x8401},
+ {0x3E4A, 0x00FF},
+ {0x3E4C, 0x8402},
+ {0x3E4E, 0x8984},
+ {0x3E50, 0x6628},
+ {0x3E52, 0x8340},
+ {0x3E54, 0x00FF},
+ {0x3E56, 0x4A42},
+ {0x3E58, 0x2703},
+ {0x3E5A, 0x6752},
+ {0x3E5C, 0x3F2A},
+ {0x3E5E, 0x846A},
+ {0x3E60, 0x4C01},
+ {0x3E62, 0x8401},
+ {0x3E66, 0x3901},
+ {0x3E90, 0x2C01},
+ {0x3E98, 0x2B02},
+ {0x3E92, 0x2A04},
+ {0x3E94, 0x2509},
+ {0x3E96, 0x0000},
+ {0x3E9A, 0x2905},
+ {0x3E9C, 0x00FF},
+ {0x3ECC, 0x00EB},
+ {0x3ED0, 0x1E24},
+ {0x3ED4, 0xAFC4},
+ {0x3ED6, 0x909B},
+ {0x3EE0, 0x2424},
+ {0x3EE2, 0x9797},
+ {0x3EE4, 0xC100},
+ {0x3EE6, 0x0540},
+
+ /* mode end */
+ {0x3174, 0x8000},
+
+ /* [RAW10] */
+ {0x0112, 0x0A0A},
+
+ /* PLL Configuration Ext=24MHz */
+ {0x0300, 0x0004},
+ {0x0302, 0x0001},
+ {0x0304, 0x0002},
+ {0x0306, 0x0042},
+ {0x0308, 0x000A},
+ {0x030A, 0x0001},
+ {ar0832_TABLE_WAIT_MS, 0x0001},
+
+ /* Output size */
+ {0x0344, 0x04D8},
+ {0x0348, 0x07F7},
+ {0x0346, 0x03A4},
+ {0x034A, 0x05FB},
+ {0x034C, 0x0320},
+ {0x034E, 0x0258},
+ {0x3040, 0xC041},
+
+ {0x306E, 0xFC80},
+ {0x3178, 0x0000},
+ {0x3ED0, 0x1E24},
+
+ /* Scale Configuration */
+ {0x0400, 0x0000},
+ {0x0404, 0x0010},
+
+ /* Timing Configuration */
+ {0x0342, 0x08A8},
+ {0x0340, 0x02E7},
+ {0x0202, 0x02E7},
+ {0x3014, 0x03F6},
+ {0x3010, 0x0078},
+
+ {0x301A, 0x8250},
+ {0x301A, 0x8650},
+ {0x301A, 0x8658},
+ /* STATE= Minimum Gain, 1500 */
+ {0x305E, 0x13AF},
+
+ {0x0104, 0x0000},
+ {0x301A, 0x065C},
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_800X600_8141[] = {
+ /* mode start */
+ {0x301A, 0x0058}, /* RESET_REGISTER */
+ {0x301A, 0x0050}, /* RESET_REGISTER */
+
+ /* SC-CHANGE: to-do 8 bit write */
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+
+ {0x3064, 0x7800}, /* RESERVED_MFR_3064 */
+ {0x31AE, 0x0202}, /* SERIAL_FORMAT */
+ /* AR0832 Recommended Settings */
+ {0x31B0, 0x0083}, /* FRAME_PREAMBLE */
+ {0x31B2, 0x004D}, /* LINE_PREAMBLE */
+ {0x31B4, 0x0E88}, /* MIPI_TIMING_0 */
+ {0x31B6, 0x0D24}, /* MIPI_TIMING_1 */
+ {0x31B8, 0x020E}, /* MIPI_TIMING_2 */
+ {0x31BA, 0x0710}, /* MIPI_TIMING_3 */
+ {0x31BC, 0x2A0D}, /* MIPI_TIMING_4 */
+ {ar0832_TABLE_WAIT_MS, 0x0005},
+ {0x0112, 0x0A0A}, /* CCP_DATA_FORMAT */
+ {0x3044, 0x0590}, /* RESERVED_MFR_3044 */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x30B2, 0xC000}, /* RESERVED_MFR_30B2 */
+ {0x30D6, 0x0800}, /* RESERVED_MFR_30D6 */
+ {0x316C, 0xB42F}, /* RESERVED_MFR_316C */
+ {0x316E, 0x869A}, /* RESERVED_MFR_316E */
+ {0x3170, 0x210E}, /* RESERVED_MFR_3170 */
+ {0x317A, 0x010E}, /* RESERVED_MFR_317A */
+ {0x31E0, 0x1FB9}, /* RESERVED_MFR_31E0 */
+ {0x31E6, 0x07FC}, /* RESERVED_MFR_31E6 */
+ {0x37C0, 0x0000}, /* P_GR_Q5 */
+ {0x37C2, 0x0000}, /* P_RD_Q5 */
+ {0x37C4, 0x0000}, /* P_BL_Q5 */
+ {0x37C6, 0x0000}, /* P_GB_Q5 */
+ {0x3E00, 0x0011}, /* RESERVED_MFR_3E00 */
+ {0x3E02, 0x8801}, /* RESERVED_MFR_3E02 */
+ {0x3E04, 0x2801}, /* RESERVED_MFR_3E04 */
+ {0x3E06, 0x8449}, /* RESERVED_MFR_3E06 */
+ {0x3E08, 0x6841}, /* RESERVED_MFR_3E08 */
+ {0x3E0A, 0x400C}, /* RESERVED_MFR_3E0A */
+ {0x3E0C, 0x1001}, /* RESERVED_MFR_3E0C */
+ {0x3E0E, 0x2603}, /* RESERVED_MFR_3E0E */
+ {0x3E10, 0x4B41}, /* RESERVED_MFR_3E10 */
+ {0x3E12, 0x4B24}, /* RESERVED_MFR_3E12 */
+ {0x3E14, 0xA3CF}, /* RESERVED_MFR_3E14 */
+ {0x3E16, 0x8802}, /* RESERVED_MFR_3E16 */
+ {0x3E18, 0x84FF}, /* RESERVED_MFR_3E18 */
+ {0x3E1A, 0x8601}, /* RESERVED_MFR_3E1A */
+ {0x3E1C, 0x8401}, /* RESERVED_MFR_3E1C */
+ {0x3E1E, 0x840A}, /* RESERVED_MFR_3E1E */
+ {0x3E20, 0xFF00}, /* RESERVED_MFR_3E20 */
+ {0x3E22, 0x8401}, /* RESERVED_MFR_3E22 */
+ {0x3E24, 0x00FF}, /* RESERVED_MFR_3E24 */
+ {0x3E26, 0x0088}, /* RESERVED_MFR_3E26 */
+ {0x3E28, 0x2E8A}, /* RESERVED_MFR_3E28 */
+ {0x3E30, 0x0000}, /* RESERVED_MFR_3E30 */
+ {0x3E32, 0x8801}, /* RESERVED_MFR_3E32 */
+ {0x3E34, 0x4029}, /* RESERVED_MFR_3E34 */
+ {0x3E36, 0x00FF}, /* RESERVED_MFR_3E36 */
+ {0x3E38, 0x8469}, /* RESERVED_MFR_3E38 */
+ {0x3E3A, 0x00FF}, /* RESERVED_MFR_3E3A */
+ {0x3E3C, 0x2801}, /* RESERVED_MFR_3E3C */
+ {0x3E3E, 0x3E2A}, /* RESERVED_MFR_3E3E */
+ {0x3E40, 0x1C01}, /* RESERVED_MFR_3E40 */
+ {0x3E42, 0xFF84}, /* RESERVED_MFR_3E42 */
+ {0x3E44, 0x8401}, /* RESERVED_MFR_3E44 */
+ {0x3E46, 0x0C01}, /* RESERVED_MFR_3E46 */
+ {0x3E48, 0x8401}, /* RESERVED_MFR_3E48 */
+ {0x3E4A, 0x00FF}, /* RESERVED_MFR_3E4A */
+ {0x3E4C, 0x8402}, /* RESERVED_MFR_3E4C */
+ {0x3E4E, 0x8984}, /* RESERVED_MFR_3E4E */
+ {0x3E50, 0x6628}, /* RESERVED_MFR_3E50 */
+ {0x3E52, 0x8340}, /* RESERVED_MFR_3E52 */
+ {0x3E54, 0x00FF}, /* RESERVED_MFR_3E54 */
+ {0x3E56, 0x4A42}, /* RESERVED_MFR_3E56 */
+ {0x3E58, 0x2703}, /* RESERVED_MFR_3E58 */
+ {0x3E5A, 0x6752}, /* RESERVED_MFR_3E5A */
+ {0x3E5C, 0x3F2A}, /* RESERVED_MFR_3E5C */
+ {0x3E5E, 0x846A}, /* RESERVED_MFR_3E5E */
+ {0x3E60, 0x4C01}, /* RESERVED_MFR_3E60 */
+ {0x3E62, 0x8401}, /* RESERVED_MFR_3E62 */
+ {0x3E66, 0x3901}, /* RESERVED_MFR_3E66 */
+ {0x3E90, 0x2C01}, /* RESERVED_MFR_3E90 */
+ {0x3E98, 0x2B02}, /* RESERVED_MFR_3E98 */
+ {0x3E92, 0x2A04}, /* RESERVED_MFR_3E92 */
+ {0x3E94, 0x2509}, /* RESERVED_MFR_3E94 */
+ {0x3E96, 0x0000}, /* RESERVED_MFR_3E96 */
+ {0x3E9A, 0x2905}, /* RESERVED_MFR_3E9A */
+ {0x3E9C, 0x00FF}, /* RESERVED_MFR_3E9C */
+ {0x3ECC, 0x00EB}, /* RESERVED_MFR_3ECC */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+ {0x3ED4, 0xAFC4}, /* RESERVED_MFR_3ED4 */
+ {0x3ED6, 0x909B}, /* RESERVED_MFR_3ED6 */
+ {0x3EE0, 0x2424}, /* RESERVED_MFR_3EE0 */
+ {0x3EE2, 0x9797}, /* RESERVED_MFR_3EE2 */
+ {0x3EE4, 0xC100}, /* RESERVED_MFR_3EE4 */
+ {0x3EE6, 0x0540}, /* RESERVED_MFR_3EE6 */
+
+ /* mode end */
+ {0x3174, 0x8000}, /* RESERVED_MFR_3174 */
+ {0x0300, 0x0004}, /* VT_PIX_CLK_DIV */
+ {0x0302, 0x0001}, /* VT_SYS_CLK_DIV */
+ {0x0304, 0x0002}, /* PRE_PLL_CLK_DIV */
+
+ {0x0306, 0x0042}, /* PLL_MULTIPLIER */
+
+ {0x0308, 0x000A}, /* OP_PIX_CLK_DIV */
+ {0x030A, 0x0001}, /* OP_SYS_CLK_DIV */
+ {ar0832_TABLE_WAIT_MS, 0x0001}, /* waitmsec 1 */
+
+ {0x3064, 0x7400}, /* RESERVED_MFR_3064 */
+
+ {0x0104, 0x0100}, /* GROUPED_PARAMETER_HOLD */
+
+ {0x0344, 0x04D8}, /* X_ADDR_START */
+ {0x0348, 0x07F7}, /* X_ADDR_END */
+ {0x0346, 0x03A4}, /* Y_ADDR_START */
+ {0x034A, 0x05FB}, /* Y_ADDR_END */
+ {0x034C, 0x0320}, /* X_OUTPUT_SIZE */
+ {0x034E, 0x0260}, /* Y_OUTPUT_SIZE */
+ {0x3040, 0xC041}, /* READ_MODE */
+ {0x306E, 0xFC80}, /* DATAPATH_SELECT */
+ {0x3178, 0x0000}, /* RESERVED_MFR_3178 */
+ {0x3ED0, 0x1E24}, /* RESERVED_MFR_3ED0 */
+ {0x0400, 0x0000}, /* SCALING_MODE */
+ {0x0404, 0x0010}, /* SCALE_M */
+ {0x0342, 0x08A8}, /* LINE_LENGTH_PCK */
+ {0x0340, 0x02E7}, /* FRAME_LENGTH_LINES */
+ {0x0202, 0x02E7}, /* COARSE_INTEGRATION_TIME */
+ {0x3014, 0x03F6}, /* FINE_INTEGRATION_TIME */
+ {0x3010, 0x0078}, /* FINE_CORRECTION */
+ {0x301A, 0x8250}, /* RESET_REGISTER */
+ {0x301A, 0x8650}, /* RESET_REGISTER */
+ {0x301A, 0x8658}, /* RESET_REGISTER */
+
+ /* gain */
+ {0x305e, 0x10AA}, /* gain */
+
+ /* todo 8-bit write */
+ {0x0104, 0x0000}, /* GROUPED_PARAMETER_HOLD */
+ {0x301A, 0x065C}, /* RESET_REGISTER */
+ {ar0832_TABLE_END, 0x0000}
+};
+
+static struct ar0832_reg mode_end[] = {
+ {ar0832_TABLE_END, 0x0000}
+};
+
+enum {
+ ar0832_MODE_3264X2448,
+ ar0832_MODE_2880X1620,
+ ar0832_MODE_1920X1080,
+ ar0832_MODE_1632X1224,
+ ar0832_MODE_800X600
+};
+
+static struct ar0832_reg *mode_table_8140[] = {
+ [ar0832_MODE_3264X2448] = mode_3264X2448_8140,
+ [ar0832_MODE_2880X1620] = mode_2880X1620_8140,
+ [ar0832_MODE_1920X1080] = mode_1920X1080_8140,
+ [ar0832_MODE_1632X1224] = mode_1632X1224_8140,
+ [ar0832_MODE_800X600] = mode_800X600_8140,
+};
+
+static struct ar0832_reg *mode_table_8141[] = {
+ [ar0832_MODE_3264X2448] = mode_3264X2448_8141,
+ [ar0832_MODE_2880X1620] = mode_2880X1620_8141,
+ [ar0832_MODE_1920X1080] = mode_1920X1080_8141,
+ [ar0832_MODE_1632X1224] = mode_1632X1224_8141,
+ [ar0832_MODE_800X600] = mode_800X600_8141,
+};
+
+static inline void ar0832_msleep(u32 t)
+{
+ /*
+ why usleep_range() instead of msleep() ?
+ Read Documentation/timers/timers-howto.txt
+ */
+ usleep_range(t*1000, t*1000 + 500);
+}
+
+/* 16 bit reg to program frame length */
+static inline void ar0832_get_frame_length_regs(struct ar0832_reg *regs,
+ u32 frame_length)
+{
+ regs->addr = 0x0340;
+ regs->val = (frame_length) & 0xFFFF;
+}
+
+static inline void ar0832_get_coarse_time_regs(struct ar0832_reg *regs,
+ u32 coarse_time)
+{
+ regs->addr = 0x0202;
+ regs->val = (coarse_time) & 0xFFFF;
+}
+
+static inline void ar0832_get_focuser_vcm_control_regs(struct ar0832_reg *regs,
+ u16 value)
+{
+ regs->addr = 0x30F0;
+ regs->val = (value) & 0xFFFF;
+}
+
+static inline void ar0832_get_focuser_vcm_step_time_regs
+ (struct ar0832_reg *regs, u16 value)
+{
+ regs->addr = 0x30F4;
+ regs->val = (value) & 0xFFFF;
+}
+
+static inline void ar0832_get_focuser_data_regs(struct ar0832_reg *regs,
+ u16 value)
+{
+ regs->addr = 0x30F2;
+ regs->val = (value) & 0xFFFF;
+}
+
+static inline void ar0832_get_gain_regs(struct ar0832_reg *regs, u16 gain)
+{
+ /* global_gain register*/
+ regs->addr = AR0832_GLOBAL_GAIN_REG;
+ regs->val = gain;
+}
+
+static int ar0832_write_reg8(struct i2c_client *client, u16 addr, u8 val)
+{
+ int err;
+ struct i2c_msg msg;
+ unsigned char data[3];
+ int retry = 0;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) (addr >> 8);;
+ data[1] = (u8) (addr & 0xff);
+ data[2] = (u8) (val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = data;
+
+ dev_dbg(&client->dev, "0x%x = 0x%x\n", addr, val);
+
+ do {
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err > 0)
+ return 0;
+ retry++;
+ dev_err(&client->dev,
+ "%s: i2c transfer failed, retrying %x %x\n",
+ __func__, addr, val);
+ ar0832_msleep(3);
+ } while (retry < ar0832_MAX_RETRIES);
+
+ return err;
+}
+
+static int ar0832_write_reg16(struct i2c_client *client, u16 addr, u16 val)
+{
+ int count;
+ struct i2c_msg msg;
+ unsigned char data[4];
+ int retry = 0;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) (addr >> 8);
+ data[1] = (u8) (addr & 0xff);
+ data[2] = (u8) (val >> 8);
+ data[3] = (u8) (val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 4;
+ msg.buf = data;
+
+ dev_dbg(&client->dev, "0x%x = 0x%x\n", addr, val);
+
+ do {
+ count = i2c_transfer(client->adapter, &msg, 1);
+ if (count == 1)
+ return 0;
+ retry++;
+ dev_err(&client->dev,
+ "%s: i2c transfer failed, retrying %x %x\n",
+ __func__, addr, val);
+ ar0832_msleep(3);
+ } while (retry <= ar0832_MAX_RETRIES);
+
+ return -EIO;
+}
+
+static int ar0832_read_reg16(struct i2c_client *client, u16 addr, u16 *val)
+{
+ struct i2c_msg msg[2];
+ u8 data[4];
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = data;
+ data[0] = (addr >> 8);
+ data[1] = (addr & 0xff);
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = data + 2;
+
+ if (i2c_transfer(client->adapter, msg, 2) == 2) {
+ *val = ((data[2] << 8) | data[3]);
+ dev_dbg(&client->dev, "0x%x = 0x%x\n", addr, *val);
+ return 0;
+ } else {
+ *val = 0;
+ dev_err(&client->dev,
+ "%s: i2c read failed.\n", __func__);
+ return -1;
+ }
+}
+
+static int ar0832_write_reg_helper(struct ar0832_dev *dev,
+ u16 addr,
+ u16 val)
+{
+ int ret;
+
+ if (addr == 0x104)
+ ret = ar0832_write_reg8(dev->i2c_client, addr,
+ (val >> 8 & 0xff));
+ else
+ ret = ar0832_write_reg16(dev->i2c_client, addr, val);
+
+ return ret;
+}
+
+static int ar0832_write_table(struct ar0832_dev *dev,
+ const struct ar0832_reg table[],
+ const struct ar0832_reg override_list[],
+ int num_override_regs)
+{
+ int err;
+ const struct ar0832_reg *next;
+ u16 val;
+ int i;
+
+ for (next = table; next->addr != ar0832_TABLE_END; next++) {
+ if (next->addr == ar0832_TABLE_WAIT_MS) {
+ ar0832_msleep(next->val);
+ continue;
+ }
+
+ val = next->val;
+ /* When an override list is passed in, replace the reg */
+ /* value to write if the reg is in the list */
+ if (override_list) {
+ for (i = 0; i < num_override_regs; i++) {
+ if (next->addr == override_list[i].addr) {
+ val = override_list[i].val;
+ break;
+ }
+ }
+ }
+
+ err = ar0832_write_reg_helper(dev, next->addr, val);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int ar0832_set_frame_length(struct ar0832_dev *dev,
+ u32 frame_length)
+{
+ struct ar0832_reg reg_list;
+ struct i2c_client *i2c_client = dev->i2c_client;
+ int ret;
+
+ dev_dbg(&i2c_client->dev, "[%s] (0x%08x)\n", __func__, frame_length);
+
+ ar0832_get_frame_length_regs(&reg_list, frame_length);
+ ret = ar0832_write_reg8(i2c_client, AR0832_GROUP_HOLD_REG, 0x1);
+ if (ret)
+ return ret;
+
+ ret = ar0832_write_reg16(i2c_client, reg_list.addr,
+ reg_list.val);
+ if (ret)
+ return ret;
+
+ ret = ar0832_write_reg8(i2c_client, AR0832_GROUP_HOLD_REG, 0x0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ar0832_set_coarse_time(struct ar0832_dev *dev,
+ u32 coarse_time)
+{
+ int ret;
+ struct ar0832_reg reg_list;
+ struct i2c_client *i2c_client = dev->i2c_client;
+
+ dev_dbg(&i2c_client->dev, "[%s] (0x%08x)\n", __func__, coarse_time);
+ ar0832_get_coarse_time_regs(&reg_list, coarse_time);
+
+ ret = ar0832_write_reg8(i2c_client, AR0832_GROUP_HOLD_REG, 0x1);
+ if (ret)
+ return ret;
+
+ ret = ar0832_write_reg16(i2c_client, reg_list.addr,
+ reg_list.val);
+ if (ret)
+ return ret;
+
+ ret = ar0832_write_reg8(i2c_client, AR0832_GROUP_HOLD_REG, 0x0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ar0832_set_gain(struct ar0832_dev *dev, u16 gain)
+{
+ int ret = 0;
+ struct ar0832_reg reg_list_gain;
+
+ ret = ar0832_write_reg8(dev->i2c_client, AR0832_GROUP_HOLD_REG, 0x1);
+ /* Gain Registers Start */
+ ar0832_get_gain_regs(&reg_list_gain, gain);
+ ret |= ar0832_write_reg16(dev->i2c_client,
+ reg_list_gain.addr,
+ reg_list_gain.val);
+ if (ret)
+ return ret;
+
+ /* Gain register End */
+ ret |= ar0832_write_reg8(dev->i2c_client, AR0832_GROUP_HOLD_REG, 0x0);
+
+ return ret;
+}
+
+static int ar0832_set_mode(struct ar0832_dev *dev,
+ struct ar0832_mode *mode)
+{
+ int sensor_mode;
+ int err;
+ struct i2c_client *i2c_client = dev->i2c_client;
+ struct ar0832_reg reg_ovr[3];
+ struct ar0832_reg *mode_seq;
+
+ dev_dbg(&i2c_client->dev, "%s: ++\n", __func__);
+
+ if (mode->xres == 3264 && mode->yres == 2448)
+ sensor_mode = ar0832_MODE_3264X2448;
+ else if (mode->xres == 2880 && mode->yres == 1620)
+ sensor_mode = ar0832_MODE_2880X1620;
+ else if (mode->xres == 1920 && mode->yres == 1080)
+ sensor_mode = ar0832_MODE_1920X1080;
+ else if (mode->xres == 1632 && mode->yres == 1224)
+ sensor_mode = ar0832_MODE_1632X1224;
+ else if (mode->xres == 800 && mode->yres == 600)
+ sensor_mode = ar0832_MODE_800X600;
+ else {
+ dev_err(&i2c_client->dev,
+ "%s: invalid resolution supplied to set mode %d %d\n",
+ __func__ , mode->xres, mode->yres);
+ return -EINVAL;
+ }
+
+ if (dev->sensor_id_data == AR0832_SENSOR_ID_8141)
+ mode_seq = mode_table_8141[sensor_mode];
+ else
+ mode_seq = mode_table_8140[sensor_mode];
+ /* get a list of override regs for the asking frame length, */
+ /* coarse integration time, and gain.*/
+ err = ar0832_write_table(dev, mode_start, NULL, 0);
+ if (err)
+ return err;
+
+ /* When we change the resolution */
+ ar0832_get_frame_length_regs(&reg_ovr[0], mode->frame_length);
+ ar0832_get_coarse_time_regs(&reg_ovr[1], mode->coarse_time);
+ ar0832_get_gain_regs(&reg_ovr[2], mode->gain);
+ err = ar0832_write_table(dev, mode_seq, reg_ovr, ARRAY_SIZE(reg_ovr));
+ if (err)
+ return err;
+
+ err = ar0832_write_table(dev, mode_end, NULL, 0);
+ if (err)
+ return err;
+
+ dev->sensor_info->mode = sensor_mode;
+ dev_dbg(&i2c_client->dev, "%s: --\n", __func__);
+
+ return 0;
+}
+
+static int ar0832_get_status(struct ar0832_dev *dev, u8 *status)
+{
+ int err = 0;
+ struct i2c_client *i2c_client = dev->i2c_client;
+
+ *status = 0;
+ /* FixMe */
+ /*
+ err = ar0832_read_reg(dev->i2c_client, 0x001, status);
+ */
+ dev_dbg(&i2c_client->dev, "%s: %u %d\n", __func__, *status, err);
+ return err;
+}
+
+static int ar0832_set_alternate_addr(struct i2c_client *client)
+{
+ int ret = 0;
+ u8 new_addr = client->addr;
+ u16 val;
+
+ /* Default slave address of ar0832 is 0x36 */
+ client->addr = 0x36;
+ ret = ar0832_read_reg16(client, AR0832_RESET_REG, &val);
+ val &= ~AR0832_RESET_REG_LOCK_REG;
+ ret |= ar0832_write_reg16(client, AR0832_RESET_REG, val);
+ ret |= ar0832_write_reg16(client, AR0832_ID_REG, new_addr << 1);
+
+ if (!ret) {
+ client->addr = new_addr;
+ dev_dbg(&client->dev,
+ "new slave address is set to 0x%x\n", new_addr);
+ }
+
+ ret |= ar0832_read_reg16(client, AR0832_RESET_REG, &val);
+ val |= AR0832_RESET_REG_LOCK_REG;
+ ret |= ar0832_write_reg16(client, AR0832_RESET_REG, val);
+
+ return ret;
+}
+
+static int ar0832_power_on(struct ar0832_dev *dev)
+{
+ struct i2c_client *i2c_client = dev->i2c_client;
+ int ret = 0;
+
+ dev_dbg(&i2c_client->dev, "%s: ++ %d %d\n",
+ __func__, dev->is_stereo,
+ dev->brd_power_cnt);
+
+ /* Board specific power-on sequence */
+ mutex_lock(&dev->ar0832_camera_lock);
+ if (dev->brd_power_cnt == 0) {
+ /* Plug 1.8V and 2.8V power to sensor */
+ if (dev->power_rail.sen_1v8_reg) {
+ ret = regulator_enable(dev->power_rail.sen_1v8_reg);
+ if (ret) {
+ dev_err(&i2c_client->dev,
+ "%s: failed to enable vdd\n",
+ __func__);
+ goto fail_regulator_1v8_reg;
+ }
+ }
+
+ if (dev->power_rail.sen_2v8_reg) {
+ ret = regulator_enable(dev->power_rail.sen_2v8_reg);
+ if (ret) {
+ dev_err(&i2c_client->dev,
+ "%s: failed to enable vaa\n",
+ __func__);
+ goto fail_regulator_2v8_reg;
+ }
+ }
+ dev->pdata->power_on(dev->is_stereo);
+ }
+ dev->brd_power_cnt++;
+ mutex_unlock(&dev->ar0832_camera_lock);
+
+ /* Change slave address */
+ if (i2c_client->addr)
+ ret = ar0832_set_alternate_addr(i2c_client);
+
+ return 0;
+
+fail_regulator_2v8_reg:
+ regulator_put(dev->power_rail.sen_2v8_reg);
+ dev->power_rail.sen_2v8_reg = NULL;
+ regulator_disable(dev->power_rail.sen_1v8_reg);
+fail_regulator_1v8_reg:
+ regulator_put(dev->power_rail.sen_1v8_reg);
+ dev->power_rail.sen_1v8_reg = NULL;
+ return ret;
+}
+
+static void ar0832_power_off(struct ar0832_dev *dev)
+{
+ struct i2c_client *i2c_client = dev->i2c_client;
+ dev_dbg(&i2c_client->dev, "%s: ++ %d\n", __func__, dev->brd_power_cnt);
+
+ /* Board specific power-down sequence */
+ mutex_lock(&dev->ar0832_camera_lock);
+
+ if (dev->brd_power_cnt <= 0)
+ goto ar0832_pwdn_exit;
+
+ if (dev->brd_power_cnt-- == 1) {
+ /* Unplug 1.8V and 2.8V power from sensor */
+ if (dev->power_rail.sen_2v8_reg)
+ regulator_disable(dev->power_rail.sen_2v8_reg);
+ if (dev->power_rail.sen_1v8_reg)
+ regulator_disable(dev->power_rail.sen_1v8_reg);
+ dev->pdata->power_off(dev->is_stereo);
+ }
+
+ar0832_pwdn_exit:
+ mutex_unlock(&dev->ar0832_camera_lock);
+}
+
+static int ar0832_focuser_set_config(struct ar0832_dev *dev)
+{
+ struct i2c_client *i2c_client = dev->i2c_client;
+ struct ar0832_reg reg_vcm_ctrl, reg_vcm_step_time;
+ int ret = 0;
+ u8 vcm_slew = 1;
+
+ /* bit15(0x80) means that VCM driver enable bit. */
+ /* bit3(0x08) means that keep VCM(AF position) */
+ /* while sensor is in soft standby mode during mode transitions. */
+ u16 vcm_control_data = (0x80 << 8 | (0x08 | (vcm_slew & 0x07)));
+ u16 vcm_step_time = 1024;
+
+ ar0832_get_focuser_vcm_control_regs(&reg_vcm_ctrl, vcm_control_data);
+ ret = ar0832_write_reg16(dev->i2c_client, reg_vcm_ctrl.addr,
+ reg_vcm_ctrl.val);
+
+ dev_dbg(&i2c_client->dev, "%s Reg 0x%X Value 0x%X\n", __func__,
+ reg_vcm_ctrl.addr, reg_vcm_ctrl.val);
+
+ if (ret) {
+ dev_dbg(&i2c_client->dev, "%s Error writing to register 0x%X\n",
+ __func__, reg_vcm_ctrl.addr);
+ return ret;
+ }
+
+ ar0832_get_focuser_vcm_step_time_regs(&reg_vcm_step_time,
+ vcm_step_time);
+ ret = ar0832_write_reg16(dev->i2c_client, reg_vcm_step_time.addr,
+ reg_vcm_step_time.val);
+
+ dev_dbg(&i2c_client->dev, "%s Reg step_time 0x%X Value 0x%X\n",
+ __func__, reg_vcm_step_time.addr,
+ reg_vcm_step_time.val);
+
+ return ret;
+}
+
+static int ar0832_focuser_set_position(struct ar0832_dev *dev,
+ u32 position)
+{
+ int ret = 0;
+ struct ar0832_reg reg_data;
+
+ ar0832_get_focuser_data_regs(&reg_data, position);
+ ret = ar0832_write_reg16(dev->i2c_client, reg_data.addr,
+ reg_data.val);
+ dev->focuser_info->last_position = position;
+
+ return ret;
+}
+
+
+/*
+ * This function is not currently called as we have the hardcoded
+ * step time in ar0832_focuser_set_config function. If we need to
+ * compute the actual step time based on a number of clocks, we need
+ * to use this function. The formula for computing the clock-based
+ * step time is obtained from Aptina and is not part of external
+ * documentation and hence this code needs to be saved.
+ */
+static u16 ar0832_get_focuser_vcm_step_time(struct ar0832_dev *dev)
+{
+ struct i2c_client *i2c_client = dev->i2c_client;
+ int ret;
+ u16 pll_multiplier = 0;
+ u16 pre_pll_clk_div = 0;
+ u16 vt_sys_clk_div = 0;
+ u16 vt_pix_clk_div = 0;
+ u16 vt_pix_clk_freq_mhz = 0;
+
+ ret = ar0832_read_reg16(dev->i2c_client, 0x306, &pll_multiplier);
+ if (ret) {
+ dev_err(&i2c_client->dev, "%s pll_multiplier read failed\n",
+ __func__);
+ }
+
+ ret = ar0832_read_reg16(dev->i2c_client, 0x304, &pre_pll_clk_div);
+ if (ret) {
+ dev_err(&i2c_client->dev, "%s pre_pll_clk_div read failed\n",
+ __func__);
+ }
+
+ ret = ar0832_read_reg16(dev->i2c_client, 0x302, &vt_sys_clk_div);
+ if (ret) {
+ dev_err(&i2c_client->dev, "%s vt_sys_clk_div read failed\n",
+ __func__);
+ }
+
+ ret = ar0832_read_reg16(dev->i2c_client, 0x300, &vt_pix_clk_div);
+ if (ret) {
+ dev_err(&i2c_client->dev, "%s vt_pix_clk_div read failed\n",
+ __func__);
+ }
+
+ vt_pix_clk_freq_mhz =
+ (24 * pll_multiplier) / (pre_pll_clk_div * vt_sys_clk_div *
+ vt_pix_clk_div);
+
+ dev_dbg(&i2c_client->dev, "%s pll_multiplier 0x%X pre_pll_clk_div 0x%X "
+ "vt_sys_clk_div 0x%X vt_pix_clk_div 0x%X vt_pix_clk_freq_mhz 0x%X\n",
+ __func__, pll_multiplier,
+ pre_pll_clk_div, vt_sys_clk_div,
+ vt_pix_clk_div, vt_pix_clk_freq_mhz);
+
+ return vt_pix_clk_freq_mhz;
+
+}
+
+static inline
+int ar0832_get_sensorid(struct ar0832_dev *dev, u16 *sensor_id)
+{
+ int ret;
+ struct i2c_client *i2c_client = dev->i2c_client;
+
+ ret = ar0832_power_on(dev);
+ if (ret)
+ return ret;
+
+ ret = ar0832_read_reg16(i2c_client, AR0832_SENSORID_REG, sensor_id);
+ dev_dbg(&i2c_client->dev,
+ "%s: sensor_id - %04x\n", __func__, *sensor_id);
+
+ ar0832_power_off(dev);
+
+ return ret;
+}
+
+static long ar0832_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err;
+ struct ar0832_dev *dev = file->private_data;
+ struct i2c_client *i2c_client = dev->i2c_client;
+ struct ar0832_mode mode;
+ u16 pos;
+
+ switch (cmd) {
+ case AR0832_IOCTL_SET_POWER_ON:
+ dev_dbg(&i2c_client->dev, "AR0832_IOCTL_SET_POWER_ON\n");
+ if (copy_from_user(&mode,
+ (const void __user *)arg,
+ sizeof(struct ar0832_mode))) {
+ dev_err(&i2c_client->dev,
+ "%s: AR0832_IOCTL_SET_POWER_ON failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ dev->is_stereo = mode.stereo;
+ return ar0832_power_on(dev);
+ case AR0832_IOCTL_SET_MODE:
+ {
+ dev_dbg(&i2c_client->dev, "AR0832_IOCTL_SET_MODE\n");
+ if (copy_from_user(&mode,
+ (const void __user *)arg,
+ sizeof(struct ar0832_mode))) {
+ dev_err(&i2c_client->dev,
+ "%s: AR0832_IOCTL_SET_MODE failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ mutex_lock(&dev->ar0832_camera_lock);
+ err = ar0832_set_mode(dev, &mode);
+
+ /*
+ * We need to re-initialize the Focuser registers during mode
+ * switch due to the known issue of focuser retracting
+ */
+ ar0832_focuser_set_config(dev);
+ dev->focuser_info->focuser_init_flag = true;
+
+ /*
+ * If the last focuser position is not at infinity when we
+ * did the mode switch, we need to go there. Before that,
+ * we need to come back to 0.
+ */
+ if (dev->focuser_info->last_position > 0) {
+ pos = dev->focuser_info->last_position;
+ dev_dbg(&i2c_client->dev, "%s: AR0832_IOCTL_SET_MODE: "
+ " Move to 0, restore the backedup focuser position of %d\n",
+ __func__, pos);
+ ar0832_focuser_set_position(dev, 0);
+ ar0832_msleep(10);
+
+ ar0832_focuser_set_position(dev, pos);
+ ar0832_msleep(10);
+ }
+ mutex_unlock(&dev->ar0832_camera_lock);
+ return err;
+ }
+ case AR0832_IOCTL_SET_FRAME_LENGTH:
+ mutex_lock(&dev->ar0832_camera_lock);
+ err = ar0832_set_frame_length(dev, (u32)arg);
+ mutex_unlock(&dev->ar0832_camera_lock);
+ return err;
+ case AR0832_IOCTL_SET_COARSE_TIME:
+ mutex_lock(&dev->ar0832_camera_lock);
+ err = ar0832_set_coarse_time(dev, (u32)arg);
+ mutex_unlock(&dev->ar0832_camera_lock);
+ return err;
+ case AR0832_IOCTL_SET_GAIN:
+ mutex_lock(&dev->ar0832_camera_lock);
+ err = ar0832_set_gain(dev, (u16)arg);
+ mutex_unlock(&dev->ar0832_camera_lock);
+ return err;
+ case AR0832_IOCTL_GET_STATUS:
+ {
+ u8 status;
+ dev_dbg(&i2c_client->dev, "AR0832_IOCTL_GET_STATUS\n");
+ err = ar0832_get_status(dev, &status);
+ if (err)
+ return err;
+ if (copy_to_user((void __user *)arg, &status,
+ 2)) {
+ dev_err(&i2c_client->dev,
+ "%s: AR0832_IOCTL_GET_STATUS failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ return 0;
+ }
+ case AR0832_IOCTL_SET_SENSOR_REGION:
+ {
+ struct ar0832_stereo_region region;
+ dev_dbg(&i2c_client->dev, "AR0832_IOCTL_SET_SENSOR_REGION\n");
+ /* Right now, it doesn't do anything */
+
+ return 0;
+ }
+
+ case AR0832_FOCUSER_IOCTL_GET_CONFIG:
+ dev_dbg(&i2c_client->dev,
+ "%s AR0832_FOCUSER_IOCTL_GET_CONFIG\n", __func__);
+ if (copy_to_user((void __user *) arg,
+ &dev->focuser_info->config,
+ sizeof(dev->focuser_info->config))) {
+ dev_err(&i2c_client->dev,
+ "%s: AR0832_FOCUSER_IOCTL_GET_CONFIG failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ return 0;
+
+ case AR0832_FOCUSER_IOCTL_SET_POSITION:
+ dev_dbg(&i2c_client->dev,
+ "%s AR0832_FOCUSER_IOCTL_SET_POSITION\n", __func__);
+ mutex_lock(&dev->ar0832_camera_lock);
+ if (dev->focuser_info->focuser_init_flag == false) {
+ ar0832_focuser_set_config(dev);
+ dev->focuser_info->focuser_init_flag = true;
+ }
+ err = ar0832_focuser_set_position(dev, (u32)arg);
+ mutex_unlock(&dev->ar0832_camera_lock);
+ return err;
+
+ case AR0832_IOCTL_GET_SENSOR_ID:
+ dev_dbg(&i2c_client->dev,
+ "%s AR0832_IOCTL_GET_SENSOR_ID\n", __func__);
+
+ if (!dev->sensor_id_data) {
+ err = ar0832_get_sensorid(dev, &dev->sensor_id_data);
+ if (err) {
+ dev_err(&i2c_client->dev,
+ "%s: failed to get sensor id\n",
+ __func__);
+ return -EFAULT;
+ }
+ }
+
+ if (copy_to_user((void __user *) arg,
+ &dev->sensor_id_data,
+ sizeof(dev->sensor_id_data))) {
+ dev_err(&i2c_client->dev,
+ "%s: AR0832_IOCTL_GET_SENSOR_ID failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ return 0;
+
+ default:
+ dev_err(&i2c_client->dev, "(error) %s NONE IOCTL\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ar0832_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct ar0832_dev *dev = dev_get_drvdata(miscdev->parent);
+ struct i2c_client *i2c_client = dev->i2c_client;
+
+ dev_dbg(&i2c_client->dev, "%s: ++\n", __func__);
+ if (atomic_xchg(&dev->in_use, 1))
+ return -EBUSY;
+
+ dev->focuser_info->focuser_init_flag = false;
+ file->private_data = dev;
+
+ return 0;
+}
+
+static int ar0832_release(struct inode *inode, struct file *file)
+{
+ struct ar0832_dev *dev = file->private_data;
+ struct i2c_client *i2c_client = dev->i2c_client;
+
+ dev_dbg(&i2c_client->dev, "%s: ++\n", __func__);
+
+ ar0832_power_off(dev);
+
+ file->private_data = NULL;
+
+ dev->focuser_info->focuser_init_flag = false;
+
+ WARN_ON(!atomic_xchg(&dev->in_use, 0));
+ return 0;
+}
+
+static const struct file_operations ar0832_fileops = {
+ .owner = THIS_MODULE,
+ .open = ar0832_open,
+ .unlocked_ioctl = ar0832_ioctl,
+ .release = ar0832_release,
+};
+
+static int ar0832_debugfs_show(struct seq_file *s, void *unused)
+{
+ struct ar0832_dev *dev = s->private;
+ struct i2c_client *i2c_client = dev->i2c_client;
+ int ret;
+ u16 test_pattern_reg;
+
+ dev_dbg(&dev->i2c_client->dev, "%s: ++\n", __func__);
+ if (!dev->brd_power_cnt) {
+ dev_info(&i2c_client->dev,
+ "%s: camera is off\n", __func__);
+ return 0;
+ }
+
+ mutex_lock(&dev->ar0832_camera_lock);
+ ret = ar0832_read_reg16(i2c_client,
+ AR0832_TEST_PATTERN_REG, &test_pattern_reg);
+ mutex_unlock(&dev->ar0832_camera_lock);
+
+ if (ret) {
+ dev_err(&i2c_client->dev,
+ "%s: test pattern write failed\n", __func__);
+ return -EFAULT;
+ }
+
+ seq_printf(s, "%d\n", test_pattern_reg);
+ return 0;
+}
+
+static ssize_t ar0832_debugfs_write(
+ struct file *file,
+ char const __user *buf,
+ size_t count,
+ loff_t *offset)
+{
+ struct ar0832_dev *dev = ((struct seq_file *)file->private_data)->private;
+ struct i2c_client *i2c_client = dev->i2c_client;
+ int ret = 0;
+ char buffer[10];
+ u16 input, val, red = 0, green = 0, blue = 0;
+
+ dev_dbg(&i2c_client->dev, "%s: ++\n", __func__);
+ if (!dev->brd_power_cnt) {
+ dev_info(&i2c_client->dev,
+ "%s: camera is off\n", __func__);
+ return count;
+ }
+
+ if (copy_from_user(&buffer, buf, sizeof(buffer)))
+ goto debugfs_write_fail;
+
+ input = (u16)simple_strtoul(buffer, NULL, 10);
+
+ mutex_lock(&dev->ar0832_camera_lock);
+ ret = ar0832_write_reg8(i2c_client, AR0832_GROUP_HOLD_REG, 0x1);
+
+ switch (input) {
+ case 1: /* color bar */
+ val = 2;
+ break;
+ case 2: /* Red */
+ val = 1;
+ red = 0x300; /* 10 bit value */
+ green = 0;
+ blue = 0;
+ break;
+ case 3: /* Green */
+ val = 1;
+ red = 0;
+ green = 0x300; /* 10 bit value */
+ blue = 0;
+ break;
+ case 4: /* Blue */
+ val = 1;
+ red = 0;
+ green = 0;
+ blue = 0x300; /* 10 bit value */
+ break;
+ default:
+ val = 0;
+ break;
+ }
+
+ if (input == 2 || input == 3 || input == 4) {
+ ret |= ar0832_write_reg_helper(dev,
+ AR0832_TEST_RED_REG, red);
+ ret |= ar0832_write_reg_helper(dev,
+ AR0832_TEST_GREENR_REG, green);
+ ret |= ar0832_write_reg_helper(dev,
+ AR0832_TEST_GREENR_REG, green);
+ ret |= ar0832_write_reg_helper(dev,
+ AR0832_TEST_BLUE_REG, blue);
+ }
+
+ ret |= ar0832_write_reg_helper(dev, AR0832_TEST_PATTERN_REG, val);
+ ret |= ar0832_write_reg8(i2c_client, AR0832_GROUP_HOLD_REG, 0x0);
+ mutex_unlock(&dev->ar0832_camera_lock);
+
+ if (ret)
+ goto debugfs_write_fail;
+
+ return count;
+
+debugfs_write_fail:
+ dev_err(&i2c_client->dev,
+ "%s: test pattern write failed\n", __func__);
+ return -EFAULT;
+}
+
+static int ar0832_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct ar0832_dev *dev = inode->i_private;
+ struct i2c_client *i2c_client = dev->i2c_client;
+
+ dev_dbg(&i2c_client->dev, "%s: ++\n", __func__);
+
+ return single_open(file, ar0832_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations ar0832_debugfs_fops = {
+ .open = ar0832_debugfs_open,
+ .read = seq_read,
+ .write = ar0832_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __devexit ar0832_remove_debugfs(struct ar0832_dev *dev)
+{
+ struct i2c_client *i2c_client = dev->i2c_client;
+
+ dev_dbg(&i2c_client->dev, "%s: ++\n", __func__);
+
+ if (dev->debugdir)
+ debugfs_remove_recursive(dev->debugdir);
+ dev->debugdir = NULL;
+}
+
+static void ar0832_create_debugfs(struct ar0832_dev *dev)
+{
+ struct dentry *ret;
+ struct i2c_client *i2c_client = dev->i2c_client;
+
+ dev_dbg(&i2c_client->dev, "%s\n", __func__);
+
+ dev->debugdir = debugfs_create_dir(dev->dname, NULL);
+ if (!dev->debugdir)
+ goto remove_debugfs;
+
+ ret = debugfs_create_file("test_pattern",
+ S_IWUGO | S_IRUGO,
+ dev->debugdir, dev,
+ &ar0832_debugfs_fops);
+ if (!ret)
+ goto remove_debugfs;
+
+ return;
+remove_debugfs:
+ dev_err(&i2c_client->dev, "couldn't create debugfs\n");
+ ar0832_remove_debugfs(dev);
+}
+
+static int ar0832_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+ struct ar0832_dev *dev = NULL;
+ int ret;
+
+ dev_info(&client->dev, "ar0832: probing sensor.(id:%s)\n",
+ id->name);
+
+ dev = kzalloc(sizeof(struct ar0832_dev), GFP_KERNEL);
+ if (!dev)
+ goto probe_fail_release;
+
+ dev->sensor_info = kzalloc(sizeof(struct ar0832_sensor_info),
+ GFP_KERNEL);
+ if (!dev->sensor_info)
+ goto probe_fail_release;
+
+ dev->focuser_info = kzalloc(sizeof(struct ar0832_focuser_info),
+ GFP_KERNEL);
+ if (!dev->focuser_info)
+ goto probe_fail_release;
+
+ /* sensor */
+ dev->pdata = client->dev.platform_data;
+ dev->i2c_client = client;
+
+ /* focuser */
+ dev->focuser_info->config.settle_time = SETTLETIME_MS;
+ dev->focuser_info->config.pos_low = POS_LOW;
+ dev->focuser_info->config.pos_high = POS_HIGH;
+
+ snprintf(dev->dname, sizeof(dev->dname), "%s-%s",
+ id->name, dev->pdata->id);
+ dev->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ dev->misc_dev.name = dev->dname;
+ dev->misc_dev.fops = &ar0832_fileops;
+ dev->misc_dev.mode = S_IRWXUGO;
+ dev->misc_dev.parent = &client->dev;
+ err = misc_register(&dev->misc_dev);
+ if (err) {
+ dev_err(&client->dev, "Unable to register misc device!\n");
+ ret = -ENOMEM;
+ goto probe_fail_free;
+ }
+
+ i2c_set_clientdata(client, dev);
+ mutex_init(&dev->ar0832_camera_lock);
+
+ dev->power_rail.sen_1v8_reg = regulator_get(&client->dev, "vdd");
+ if (IS_ERR_OR_NULL(dev->power_rail.sen_1v8_reg)) {
+ dev_err(&client->dev, "%s: failed to get vdd\n",
+ __func__);
+ ret = PTR_ERR(dev->power_rail.sen_1v8_reg);
+ goto probe_fail_free;
+ }
+
+ dev->power_rail.sen_2v8_reg = regulator_get(&client->dev, "vaa");
+ if (IS_ERR_OR_NULL(dev->power_rail.sen_2v8_reg)) {
+ dev_err(&client->dev, "%s: failed to get vaa\n",
+ __func__);
+ ret = PTR_ERR(dev->power_rail.sen_2v8_reg);
+ regulator_put(dev->power_rail.sen_1v8_reg);
+ dev->power_rail.sen_1v8_reg = NULL;
+ goto probe_fail_free;
+ }
+ /* create debugfs interface */
+ ar0832_create_debugfs(dev);
+
+ return 0;
+
+probe_fail_release:
+ dev_err(&client->dev, "%s: unable to allocate memory!\n", __func__);
+ ret = -ENOMEM;
+probe_fail_free:
+ if (dev) {
+ kfree(dev->focuser_info);
+ kfree(dev->sensor_info);
+ }
+ kfree(dev);
+ return ret;
+}
+
+static int ar0832_remove(struct i2c_client *client)
+{
+ struct ar0832_dev *dev = i2c_get_clientdata(client);
+
+ if (dev->power_rail.sen_1v8_reg)
+ regulator_put(dev->power_rail.sen_1v8_reg);
+ if (dev->power_rail.sen_2v8_reg)
+ regulator_put(dev->power_rail.sen_2v8_reg);
+
+ misc_deregister(&dev->misc_dev);
+ if (dev) {
+ kfree(dev->sensor_info);
+ kfree(dev->focuser_info);
+ }
+
+ ar0832_remove_debugfs(dev);
+
+ kfree(dev);
+ return 0;
+}
+
+static const struct i2c_device_id ar0832_id[] = {
+ { "ar0832", 0 },
+ { }
+};
+
+static struct i2c_driver ar0832_i2c_driver = {
+ .probe = ar0832_probe,
+ .remove = ar0832_remove,
+ .id_table = ar0832_id,
+ .driver = {
+ .name = "ar0832",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ar0832_init(void)
+{
+ pr_info("%s: ++\n", __func__);
+ return i2c_add_driver(&ar0832_i2c_driver);
+}
+
+static void __exit ar0832_exit(void)
+{
+ i2c_del_driver(&ar0832_i2c_driver);
+}
+
+module_init(ar0832_init);
+module_exit(ar0832_exit);
diff --git a/drivers/media/video/tegra/avp/Kconfig b/drivers/media/video/tegra/avp/Kconfig
new file mode 100644
index 000000000000..fdd208510fcb
--- /dev/null
+++ b/drivers/media/video/tegra/avp/Kconfig
@@ -0,0 +1,25 @@
+config TEGRA_RPC
+ bool "Enable support for Tegra RPC"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Enables support for the RPC mechanism necessary for the Tegra
+ multimedia framework. It is both used to communicate locally on the
+ CPU between multiple multimedia components as well as to communicate
+ with the AVP for offloading media decode.
+
+ Exports the local tegra RPC interface on device node
+ /dev/tegra_rpc. Also provides tegra fd based semaphores needed by
+ the tegra multimedia framework.
+
+ If unsure, say Y
+
+config TEGRA_AVP
+ bool "Enable support for the AVP multimedia offload engine"
+ depends on ARCH_TEGRA && TEGRA_RPC
+ default y
+ help
+ Enables support for the multimedia offload engine used by Tegra
+ multimedia framework.
+
+ If unsure, say Y
diff --git a/drivers/media/video/tegra/avp/Makefile b/drivers/media/video/tegra/avp/Makefile
new file mode 100644
index 000000000000..148265648a40
--- /dev/null
+++ b/drivers/media/video/tegra/avp/Makefile
@@ -0,0 +1,7 @@
+GCOV_PROFILE := y
+obj-$(CONFIG_TEGRA_RPC) += tegra_rpc.o
+obj-$(CONFIG_TEGRA_RPC) += trpc_local.o
+obj-$(CONFIG_TEGRA_RPC) += trpc_sema.o
+obj-$(CONFIG_TEGRA_AVP) += avp.o
+obj-$(CONFIG_TEGRA_AVP) += avp_svc.o
+obj-$(CONFIG_TEGRA_AVP) += headavp.o
diff --git a/drivers/media/video/tegra/avp/avp.c b/drivers/media/video/tegra/avp/avp.c
new file mode 100644
index 000000000000..964b7c7cf320
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.c
@@ -0,0 +1,1949 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/irq.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/nvmap.h>
+#include <mach/legacy_irq.h>
+#include <mach/hardware.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "headavp.h"
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+#include "nvavp.h"
+
+enum {
+ AVP_DBG_TRACE_XPC = 1U << 0,
+ AVP_DBG_TRACE_XPC_IRQ = 1U << 1,
+ AVP_DBG_TRACE_XPC_MSG = 1U << 2,
+ AVP_DBG_TRACE_XPC_CONN = 1U << 3,
+ AVP_DBG_TRACE_TRPC_MSG = 1U << 4,
+ AVP_DBG_TRACE_TRPC_CONN = 1U << 5,
+ AVP_DBG_TRACE_LIB = 1U << 6,
+};
+
+static u32 avp_debug_mask =
+ AVP_DBG_TRACE_XPC |
+ /* AVP_DBG_TRACE_XPC_IRQ | */
+ /* AVP_DBG_TRACE_XPC_MSG | */
+ /* AVP_DBG_TRACE_TRPC_MSG | */
+ AVP_DBG_TRACE_XPC_CONN |
+ AVP_DBG_TRACE_TRPC_CONN |
+ AVP_DBG_TRACE_LIB;
+
+module_param_named(debug_mask, avp_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (unlikely(avp_debug_mask & (flag))) pr_info(args); } while (0)
+
+#define TEGRA_AVP_NAME "tegra-avp"
+
+#define TEGRA_AVP_RESET_VECTOR_ADDR \
+ (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
+
+#define TEGRA_AVP_RESUME_ADDR IO_ADDRESS(TEGRA_IRAM_BASE + \
+ TEGRA_RESET_HANDLER_SIZE)
+
+#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
+#define FLOW_MODE_STOP (0x2 << 29)
+#define FLOW_MODE_NONE 0x0
+
+#define MBOX_FROM_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
+#define MBOX_TO_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
+
+/* Layout of the mailbox registers:
+ * bit 31 - pending message interrupt enable (mailbox full, i.e. valid=1)
+ * bit 30 - message cleared interrupt enable (mailbox empty, i.e. valid=0)
+ * bit 29 - message valid. peer clears this bit after reading msg
+ * bits 27:0 - message data
+ */
+#define MBOX_MSG_PENDING_INT_EN (1 << 31)
+#define MBOX_MSG_READ_INT_EN (1 << 30)
+#define MBOX_MSG_VALID (1 << 29)
+
+#define AVP_MSG_MAX_CMD_LEN 16
+#define AVP_MSG_AREA_SIZE (AVP_MSG_MAX_CMD_LEN + TEGRA_RPC_MAX_MSG_LEN)
+
+struct tegra_avp_info {
+ struct clk *cop_clk;
+
+ int mbox_from_avp_pend_irq;
+
+ dma_addr_t msg_area_addr;
+ u32 msg;
+ void *msg_to_avp;
+ void *msg_from_avp;
+ struct mutex to_avp_lock;
+ struct mutex from_avp_lock;
+
+ struct work_struct recv_work;
+ struct workqueue_struct *recv_wq;
+
+ struct trpc_node *rpc_node;
+ struct miscdevice misc_dev;
+ int refcount;
+ struct mutex open_lock;
+
+ spinlock_t state_lock;
+ bool initialized;
+ bool shutdown;
+ bool suspending;
+ bool defer_remote;
+
+ struct mutex libs_lock;
+ struct list_head libs;
+ struct nvmap_client *nvmap_libs;
+
+ /* client for driver allocations, persistent */
+ struct nvmap_client *nvmap_drv;
+ struct nvmap_handle_ref *kernel_handle;
+ void *kernel_data;
+ phys_addr_t kernel_phys;
+
+ struct nvmap_handle_ref *iram_backup_handle;
+ void *iram_backup_data;
+ phys_addr_t iram_backup_phys;
+ unsigned long resume_addr;
+ unsigned long reset_addr;
+
+ struct trpc_endpoint *avp_ep;
+ struct rb_root endpoints;
+
+ struct avp_svc_info *avp_svc;
+};
+
+struct remote_info {
+ u32 loc_id;
+ u32 rem_id;
+ struct kref ref;
+
+ struct trpc_endpoint *trpc_ep;
+ struct rb_node rb_node;
+};
+
+struct lib_item {
+ struct list_head list;
+ u32 handle;
+ char name[TEGRA_AVP_LIB_MAX_NAME];
+};
+
+static struct tegra_avp_info *tegra_avp;
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len);
+static void avp_trpc_close(struct trpc_endpoint *ep);
+static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep);
+static void libs_cleanup(struct tegra_avp_info *avp);
+
+static struct trpc_ep_ops remote_ep_ops = {
+ .send = avp_trpc_send,
+ .close = avp_trpc_close,
+ .show = avp_trpc_show,
+};
+
+static struct remote_info *rinfo_alloc(struct tegra_avp_info *avp)
+{
+ struct remote_info *rinfo;
+
+ rinfo = kzalloc(sizeof(struct remote_info), GFP_KERNEL);
+ if (!rinfo)
+ return NULL;
+ kref_init(&rinfo->ref);
+ return rinfo;
+}
+
+static void _rinfo_release(struct kref *ref)
+{
+ struct remote_info *rinfo = container_of(ref, struct remote_info, ref);
+ kfree(rinfo);
+}
+
+static inline void rinfo_get(struct remote_info *rinfo)
+{
+ kref_get(&rinfo->ref);
+}
+
+static inline void rinfo_put(struct remote_info *rinfo)
+{
+ kref_put(&rinfo->ref, _rinfo_release);
+}
+
+static int remote_insert(struct tegra_avp_info *avp, struct remote_info *rinfo)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct remote_info *tmp;
+
+ p = &avp->endpoints.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct remote_info, rb_node);
+
+ if (rinfo->loc_id < tmp->loc_id)
+ p = &(*p)->rb_left;
+ else if (rinfo->loc_id > tmp->loc_id)
+ p = &(*p)->rb_right;
+ else {
+ pr_info("%s: avp endpoint id=%x (%s) already exists\n",
+ __func__, rinfo->loc_id,
+ trpc_name(rinfo->trpc_ep));
+ return -EEXIST;
+ }
+ }
+ rb_link_node(&rinfo->rb_node, parent, p);
+ rb_insert_color(&rinfo->rb_node, &avp->endpoints);
+ rinfo_get(rinfo);
+ return 0;
+}
+
+static struct remote_info *remote_find(struct tegra_avp_info *avp, u32 local_id)
+{
+ struct rb_node *n = avp->endpoints.rb_node;
+ struct remote_info *rinfo;
+
+ while (n) {
+ rinfo = rb_entry(n, struct remote_info, rb_node);
+
+ if (local_id < rinfo->loc_id)
+ n = n->rb_left;
+ else if (local_id > rinfo->loc_id)
+ n = n->rb_right;
+ else
+ return rinfo;
+ }
+ return NULL;
+}
+
+static void remote_remove(struct tegra_avp_info *avp, struct remote_info *rinfo)
+{
+ rb_erase(&rinfo->rb_node, &avp->endpoints);
+ rinfo_put(rinfo);
+}
+
+/* test whether or not the trpc endpoint provided is a valid AVP node
+ * endpoint */
+static struct remote_info *validate_trpc_ep(struct tegra_avp_info *avp,
+ struct trpc_endpoint *ep)
+{
+ struct remote_info *tmp = trpc_priv(ep);
+ struct remote_info *rinfo;
+
+ if (!tmp)
+ return NULL;
+ rinfo = remote_find(avp, tmp->loc_id);
+ if (rinfo && rinfo == tmp && rinfo->trpc_ep == ep)
+ return rinfo;
+ return NULL;
+}
+
+static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ seq_printf(s, " <unknown>\n");
+ goto out;
+ }
+ seq_printf(s, " loc_id:0x%x\n rem_id:0x%x\n",
+ rinfo->loc_id, rinfo->rem_id);
+out:
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+}
+
+static inline void mbox_writel(u32 val, void __iomem *mbox)
+{
+ writel(val, mbox);
+}
+
+static inline u32 mbox_readl(void __iomem *mbox)
+{
+ return readl(mbox);
+}
+
+static inline void msg_ack_remote(struct tegra_avp_info *avp, u32 cmd, u32 arg)
+{
+ struct msg_ack *ack = avp->msg_from_avp;
+
+ /* must make sure the arg is there first */
+ ack->arg = arg;
+ wmb();
+ ack->cmd = cmd;
+ wmb();
+}
+
+static inline u32 msg_recv_get_cmd(struct tegra_avp_info *avp)
+{
+ volatile u32 *cmd = avp->msg_from_avp;
+ rmb();
+ return *cmd;
+}
+
+static inline int __msg_write(struct tegra_avp_info *avp, void *hdr,
+ size_t hdr_len, void *buf, size_t len)
+{
+ memcpy(avp->msg_to_avp, hdr, hdr_len);
+ if (buf && len)
+ memcpy(avp->msg_to_avp + hdr_len, buf, len);
+ mbox_writel(avp->msg, MBOX_TO_AVP);
+ return 0;
+}
+
+static inline int msg_write(struct tegra_avp_info *avp, void *hdr,
+ size_t hdr_len, void *buf, size_t len)
+{
+ /* rem_ack is a pointer into shared memory that the AVP modifies */
+ volatile u32 *rem_ack = avp->msg_to_avp;
+ unsigned long endtime = jiffies + HZ;
+
+ /* the other side ack's the message by clearing the first word,
+ * wait for it to do so */
+ rmb();
+ while (*rem_ack != 0 && time_before(jiffies, endtime)) {
+ usleep_range(100, 2000);
+ rmb();
+ }
+ if (*rem_ack != 0)
+ return -ETIMEDOUT;
+ __msg_write(avp, hdr, hdr_len, buf, len);
+ return 0;
+}
+
+static inline int msg_check_ack(struct tegra_avp_info *avp, u32 cmd, u32 *arg)
+{
+ struct msg_ack ack;
+
+ rmb();
+ memcpy(&ack, avp->msg_to_avp, sizeof(ack));
+ if (ack.cmd != cmd)
+ return -ENOENT;
+ if (arg)
+ *arg = ack.arg;
+ return 0;
+}
+
+/* XXX: add timeout */
+static int msg_wait_ack_locked(struct tegra_avp_info *avp, u32 cmd, u32 *arg)
+{
+ /* rem_ack is a pointer into shared memory that the AVP modifies */
+ volatile u32 *rem_ack = avp->msg_to_avp;
+ unsigned long endtime = jiffies + msecs_to_jiffies(400);
+ int ret;
+
+ do {
+ ret = msg_check_ack(avp, cmd, arg);
+ usleep_range(1000, 5000);
+ } while (ret && time_before(jiffies, endtime));
+
+ /* if we timed out, try one more time */
+ if (ret)
+ ret = msg_check_ack(avp, cmd, arg);
+
+ /* clear out the ack */
+ *rem_ack = 0;
+ wmb();
+ return ret;
+}
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ struct msg_port_data msg;
+ int ret;
+ unsigned long flags;
+
+ DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: ep=%p priv=%p buf=%p len=%d\n",
+ __func__, ep, trpc_priv(ep), buf, len);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (unlikely(avp->suspending && trpc_peer(ep) != avp->avp_ep)) {
+ ret = -EBUSY;
+ goto err_state_locked;
+ } else if (avp->shutdown) {
+ ret = -ENODEV;
+ goto err_state_locked;
+ }
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ ret = -ENOTTY;
+ goto err_state_locked;
+ }
+ rinfo_get(rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ msg.cmd = CMD_MESSAGE;
+ msg.port_id = rinfo->rem_id;
+ msg.msg_len = len;
+
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), buf, len);
+ mutex_unlock(&avp->to_avp_lock);
+
+ DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: msg sent for %s (%x->%x) (%d)\n",
+ __func__, trpc_name(ep), rinfo->loc_id, rinfo->rem_id, ret);
+ rinfo_put(rinfo);
+ return ret;
+
+err_state_locked:
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return ret;
+}
+
+static int _send_disconnect(struct tegra_avp_info *avp, u32 port_id)
+{
+ struct msg_disconnect msg;
+ int ret;
+
+ msg.cmd = CMD_DISCONNECT;
+ msg.port_id = port_id;
+
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ pr_err("%s: remote has not acked last message (%x)\n", __func__,
+ port_id);
+ goto err_msg_write;
+ }
+
+ ret = msg_wait_ack_locked(avp, CMD_ACK, NULL);
+ if (ret) {
+ pr_err("%s: remote end won't respond for %x\n", __func__,
+ port_id);
+ goto err_wait_ack;
+ }
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: sent disconnect msg for %x\n",
+ __func__, port_id);
+
+err_wait_ack:
+err_msg_write:
+ mutex_unlock(&avp->to_avp_lock);
+ return ret;
+}
+
+/* Note: Assumes that the rinfo was previously successfully added to the
+ * endpoints rb_tree. The initial refcnt of 1 is inherited by the port when the
+ * trpc endpoint is created with thi trpc_xxx functions. Thus, on close,
+ * we must drop that reference here.
+ * The avp->endpoints rb_tree keeps its own reference on rinfo objects.
+ *
+ * The try_connect function does not use this on error because it needs to
+ * split the close of trpc_ep port and the put.
+ */
+static inline void remote_close(struct remote_info *rinfo)
+{
+ trpc_close(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+}
+
+static void avp_trpc_close(struct trpc_endpoint *ep)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->shutdown) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return;
+ }
+
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ pr_err("%s: tried to close invalid port '%s' endpoint (%p)\n",
+ __func__, trpc_name(ep), ep);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return;
+ }
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: closing '%s' (%x)\n", __func__,
+ trpc_name(ep), rinfo->rem_id);
+
+ ret = _send_disconnect(avp, rinfo->rem_id);
+ if (ret)
+ pr_err("%s: error while closing remote port '%s' (%x)\n",
+ __func__, trpc_name(ep), rinfo->rem_id);
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+}
+
+/* takes and holds avp->from_avp_lock */
+static void recv_msg_lock(struct tegra_avp_info *avp)
+{
+ unsigned long flags;
+
+ mutex_lock(&avp->from_avp_lock);
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->defer_remote = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+}
+
+/* MUST be called with avp->from_avp_lock held */
+static void recv_msg_unlock(struct tegra_avp_info *avp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->defer_remote = false;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ mutex_unlock(&avp->from_avp_lock);
+}
+
+static int avp_node_try_connect(struct trpc_node *node,
+ struct trpc_node *src_node,
+ struct trpc_endpoint *from)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+ const char *port_name = trpc_name(from);
+ struct remote_info *rinfo;
+ struct msg_connect msg;
+ int ret;
+ unsigned long flags;
+ int len;
+ const int max_retry_cnt = 6;
+ int cnt = 0;
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: trying connect from %s\n", __func__,
+ port_name);
+
+ if (node != avp->rpc_node || node->priv != avp)
+ return -ENODEV;
+
+ len = strlen(port_name);
+ if (len > XPC_PORT_NAME_LEN) {
+ pr_err("%s: port name (%s) too long\n", __func__, port_name);
+ return -EINVAL;
+ }
+
+ ret = 0;
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->suspending) {
+ ret = -EBUSY;
+ } else if (likely(src_node != avp->rpc_node)) {
+ /* only check for initialized when the source is not ourselves
+ * since we'll end up calling into here during initialization */
+ if (!avp->initialized)
+ ret = -ENODEV;
+ } else if (strncmp(port_name, "RPC_AVP_PORT", XPC_PORT_NAME_LEN)) {
+ /* we only allow connections to ourselves for the cpu-to-avp
+ port */
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ return ret;
+
+ rinfo = rinfo_alloc(avp);
+ if (!rinfo) {
+ pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+ ret = -ENOMEM;
+ goto err_alloc_rinfo;
+ }
+ rinfo->loc_id = (u32)rinfo;
+
+ msg.cmd = CMD_CONNECT;
+ msg.port_id = rinfo->loc_id;
+ memcpy(msg.name, port_name, len);
+ memset(msg.name + len, 0, XPC_PORT_NAME_LEN - len);
+
+ /* when trying to connect to remote, we need to block remote
+ * messages until we get our ack and can insert it into our lists.
+ * Otherwise, we can get a message from the other side for a port
+ * that we haven't finished setting up.
+ *
+ * 'defer_remote' will force the irq handler to not process messages
+ * at irq context but to schedule work to do so. The work function will
+ * take the from_avp_lock and everything should stay consistent.
+ */
+ recv_msg_lock(avp);
+ for (cnt = 0; cnt < max_retry_cnt; cnt++) {
+ /* Retry to connect to AVP at this function maximum 6 times.
+ * Because this section is protected by mutex and
+ * needed to re-send the CMD_CONNECT command by CPU
+ * if AVP didn't receive the command.
+ */
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ pr_err("%s: remote has not acked last message (%s)\n",
+ __func__, port_name);
+ mutex_unlock(&avp->to_avp_lock);
+ goto err_msg_write;
+ }
+ ret = msg_wait_ack_locked(avp, CMD_RESPONSE, &rinfo->rem_id);
+ mutex_unlock(&avp->to_avp_lock);
+ if (!ret && rinfo->rem_id)
+ break;
+
+ /* Skip the sleep function at last retry count */
+ if ((cnt + 1) < max_retry_cnt)
+ usleep_range(100, 2000);
+ }
+
+ if (ret) {
+ pr_err("%s: remote end won't respond for '%s'\n", __func__,
+ port_name);
+ goto err_wait_ack;
+ }
+ if (!rinfo->rem_id) {
+ pr_err("%s: can't connect to '%s'\n", __func__, port_name);
+ ret = -ECONNREFUSED;
+ goto err_nack;
+ }
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: got conn ack '%s' (%x <-> %x)\n",
+ __func__, port_name, rinfo->loc_id, rinfo->rem_id);
+
+ rinfo->trpc_ep = trpc_create_peer(node, from, &remote_ep_ops,
+ rinfo);
+ if (!rinfo->trpc_ep) {
+ pr_err("%s: cannot create peer for %s\n", __func__, port_name);
+ ret = -EINVAL;
+ goto err_create_peer;
+ }
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ ret = remote_insert(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ goto err_ep_insert;
+
+ recv_msg_unlock(avp);
+ return 0;
+
+err_ep_insert:
+ trpc_close(rinfo->trpc_ep);
+err_create_peer:
+ _send_disconnect(avp, rinfo->rem_id);
+err_nack:
+err_wait_ack:
+err_msg_write:
+ recv_msg_unlock(avp);
+ rinfo_put(rinfo);
+err_alloc_rinfo:
+ return ret;
+}
+
+static void process_disconnect_locked(struct tegra_avp_info *avp,
+ struct msg_data *raw_msg)
+{
+ struct msg_disconnect *disconn_msg = (struct msg_disconnect *)raw_msg;
+ unsigned long flags;
+ struct remote_info *rinfo;
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got disconnect (%x)\n", __func__,
+ disconn_msg->port_id);
+
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, disconn_msg,
+ sizeof(struct msg_disconnect));
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = remote_find(avp, disconn_msg->port_id);
+ if (!rinfo) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ pr_warning("%s: got disconnect for unknown port %x\n",
+ __func__, disconn_msg->port_id);
+ goto ack;
+ }
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+ack:
+ msg_ack_remote(avp, CMD_ACK, 0);
+}
+
+static void process_connect_locked(struct tegra_avp_info *avp,
+ struct msg_data *raw_msg)
+{
+ struct msg_connect *conn_msg = (struct msg_connect *)raw_msg;
+ struct trpc_endpoint *trpc_ep;
+ struct remote_info *rinfo;
+ char name[XPC_PORT_NAME_LEN + 1];
+ int ret;
+ u32 local_port_id = 0;
+ unsigned long flags;
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got connect (%x)\n", __func__,
+ conn_msg->port_id);
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ conn_msg, sizeof(struct msg_connect));
+
+ rinfo = rinfo_alloc(avp);
+ if (!rinfo) {
+ pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+ ret = -ENOMEM;
+ goto ack;
+ }
+ rinfo->loc_id = (u32)rinfo;
+ rinfo->rem_id = conn_msg->port_id;
+
+ memcpy(name, conn_msg->name, XPC_PORT_NAME_LEN);
+ name[XPC_PORT_NAME_LEN] = '\0';
+ trpc_ep = trpc_create_connect(avp->rpc_node, name, &remote_ep_ops,
+ rinfo, 0);
+ if (IS_ERR(trpc_ep)) {
+ pr_err("%s: remote requested unknown port '%s' (%d)\n",
+ __func__, name, (int)PTR_ERR(trpc_ep));
+ goto nack;
+ }
+ rinfo->trpc_ep = trpc_ep;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ ret = remote_insert(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ goto err_ep_insert;
+
+ local_port_id = rinfo->loc_id;
+ goto ack;
+
+err_ep_insert:
+ trpc_close(trpc_ep);
+nack:
+ rinfo_put(rinfo);
+ local_port_id = 0;
+ack:
+ msg_ack_remote(avp, CMD_RESPONSE, local_port_id);
+}
+
+static int process_message(struct tegra_avp_info *avp, struct msg_data *raw_msg,
+ gfp_t gfp_flags)
+{
+ struct msg_port_data *port_msg = (struct msg_port_data *)raw_msg;
+ struct remote_info *rinfo;
+ unsigned long flags;
+ int len;
+ int ret;
+
+ len = min(port_msg->msg_len, (u32)TEGRA_RPC_MAX_MSG_LEN);
+
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG) {
+ pr_info("%s: got message cmd=%x port=%x len=%d\n", __func__,
+ port_msg->cmd, port_msg->port_id, port_msg->msg_len);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, port_msg,
+ sizeof(struct msg_port_data) + len);
+ }
+
+ if (len != port_msg->msg_len)
+ pr_err("%s: message sent is too long (%d bytes)\n", __func__,
+ port_msg->msg_len);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = remote_find(avp, port_msg->port_id);
+ if (rinfo) {
+ rinfo_get(rinfo);
+ trpc_get(rinfo->trpc_ep);
+ } else {
+ pr_err("%s: port %x not found\n", __func__, port_msg->port_id);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ ret = -ENOENT;
+ goto ack;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ ret = trpc_send_msg(avp->rpc_node, rinfo->trpc_ep, port_msg->data,
+ len, gfp_flags);
+ if (ret == -ENOMEM) {
+ trpc_put(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+ goto no_ack;
+ } else if (ret) {
+ pr_err("%s: cannot queue message for port %s/%x (%d)\n",
+ __func__, trpc_name(rinfo->trpc_ep), rinfo->loc_id,
+ ret);
+ } else {
+ DBG(AVP_DBG_TRACE_XPC_MSG, "%s: msg queued\n", __func__);
+ }
+
+ trpc_put(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+ack:
+ msg_ack_remote(avp, CMD_ACK, 0);
+no_ack:
+ return ret;
+}
+
+static void process_avp_message(struct work_struct *work)
+{
+ struct tegra_avp_info *avp = container_of(work, struct tegra_avp_info,
+ recv_work);
+ struct msg_data *msg = avp->msg_from_avp;
+
+ mutex_lock(&avp->from_avp_lock);
+ rmb();
+ switch (msg->cmd) {
+ case CMD_CONNECT:
+ process_connect_locked(avp, msg);
+ break;
+ case CMD_DISCONNECT:
+ process_disconnect_locked(avp, msg);
+ break;
+ case CMD_MESSAGE:
+ process_message(avp, msg, GFP_KERNEL);
+ break;
+ default:
+ pr_err("%s: unknown cmd (%x) received\n", __func__, msg->cmd);
+ break;
+ }
+ mutex_unlock(&avp->from_avp_lock);
+}
+
+static irqreturn_t avp_mbox_pending_isr(int irq, void *data)
+{
+ struct tegra_avp_info *avp = data;
+ struct msg_data *msg = avp->msg_from_avp;
+ u32 mbox_msg;
+ unsigned long flags;
+ int ret;
+
+ mbox_msg = mbox_readl(MBOX_FROM_AVP);
+ mbox_writel(0, MBOX_FROM_AVP);
+
+ DBG(AVP_DBG_TRACE_XPC_IRQ, "%s: got msg %x\n", __func__, mbox_msg);
+
+ /* XXX: re-use previous message? */
+ if (!(mbox_msg & MBOX_MSG_VALID)) {
+ WARN_ON(1);
+ goto done;
+ }
+
+ mbox_msg <<= 4;
+ if (mbox_msg == 0x2f00bad0UL) {
+ pr_info("%s: petting watchdog\n", __func__);
+ goto done;
+ }
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->shutdown) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ goto done;
+ } else if (avp->defer_remote) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ goto defer;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ rmb();
+ if (msg->cmd == CMD_MESSAGE) {
+ ret = process_message(avp, msg, GFP_ATOMIC);
+ if (ret != -ENOMEM)
+ goto done;
+ pr_info("%s: deferring message (%d)\n", __func__, ret);
+ }
+defer:
+ queue_work(avp->recv_wq, &avp->recv_work);
+done:
+ return IRQ_HANDLED;
+}
+
+static int avp_reset(struct tegra_avp_info *avp, unsigned long reset_addr)
+{
+ unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
+ dma_addr_t stub_data_phys;
+ unsigned long timeout;
+ int ret = 0;
+
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+
+ _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
+ _tegra_avp_boot_stub_data.jump_addr = reset_addr;
+ wmb();
+ stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+
+ writel(stub_code_phys, TEGRA_AVP_RESET_VECTOR_ADDR);
+
+ pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
+ pr_info("%s: Resetting AVP: reset_addr=%lx\n", __func__, reset_addr);
+
+ tegra_periph_reset_assert(avp->cop_clk);
+ udelay(10);
+ tegra_periph_reset_deassert(avp->cop_clk);
+
+ writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
+
+ /* the AVP firmware will reprogram its reset vector as the kernel
+ * starts, so a dead kernel can be detected by polling this value */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (time_before(jiffies, timeout)) {
+ pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
+ if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) != stub_code_phys)
+ break;
+ cpu_relax();
+ }
+ if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) == stub_code_phys) {
+ pr_err("%s: Timed out waiting for AVP kernel to start\n", __func__);
+ ret = -EINVAL;
+ }
+ pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
+ WARN_ON(ret);
+ dma_unmap_single(NULL, stub_data_phys,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static void avp_halt(struct tegra_avp_info *avp)
+{
+ /* ensure the AVP is halted */
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+ tegra_periph_reset_assert(avp->cop_clk);
+
+ /* set up the initial memory areas and mailbox contents */
+ *((u32 *)avp->msg_from_avp) = 0;
+ *((u32 *)avp->msg_to_avp) = 0xfeedf00d;
+ mbox_writel(0, MBOX_FROM_AVP);
+ mbox_writel(0, MBOX_TO_AVP);
+}
+
+/* Note: CPU_PORT server and AVP_PORT client are registered with the avp
+ * node, but are actually meant to be processed on our side (either
+ * by the svc thread for processing remote calls or by the client
+ * of the char dev for receiving replies for managing remote
+ * libraries/modules. */
+
+static int avp_init(struct tegra_avp_info *avp)
+{
+ const struct firmware *avp_fw;
+ int ret;
+ struct trpc_endpoint *ep;
+ char fw_file[30];
+
+ avp->nvmap_libs = nvmap_create_client(nvmap_dev, "avp_libs");
+ if (IS_ERR_OR_NULL(avp->nvmap_libs)) {
+ pr_err("%s: cannot create libs nvmap client\n", __func__);
+ ret = PTR_ERR(avp->nvmap_libs);
+ goto err_nvmap_create_libs_client;
+ }
+
+ /* put the address of the shared mem area into the mailbox for AVP
+ * to read out when its kernel boots. */
+ mbox_writel(avp->msg, MBOX_TO_AVP);
+
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
+ /* paddr is any address returned from nvmap_pin */
+ /* vaddr is AVP_KERNEL_VIRT_BASE */
+ pr_info("%s: Using AVP MMU to relocate AVP kernel\n", __func__);
+ sprintf(fw_file, "nvrm_avp.bin");
+ avp->reset_addr = AVP_KERNEL_VIRT_BASE;
+#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
+ /* paddr is any address behind SMMU */
+ /* vaddr is TEGRA_SMMU_BASE */
+ pr_info("%s: Using SMMU at %lx to load AVP kernel\n",
+ __func__, (unsigned long)avp->kernel_phys);
+ BUG_ON(avp->kernel_phys != 0xeff00000
+ && avp->kernel_phys != 0x0ff00000);
+ sprintf(fw_file, "nvrm_avp_%08lx.bin", (unsigned long)avp->kernel_phys);
+ avp->reset_addr = avp->kernel_phys;
+#else /* nvmem= carveout */
+ /* paddr is found in nvmem= carveout */
+ /* vaddr is same as paddr */
+ /* Find nvmem carveout */
+ if (!pfn_valid(__phys_to_pfn(0x8e000000))) {
+ avp->kernel_phys = 0x8e000000;
+ }
+ else if (!pfn_valid(__phys_to_pfn(0x9e000000))) {
+ avp->kernel_phys = 0x9e000000;
+ }
+ else if (!pfn_valid(__phys_to_pfn(0xbe000000))) {
+ avp->kernel_phys = 0xbe000000;
+ }
+ else {
+ pr_err("Cannot find nvmem= carveout to load AVP kernel\n");
+ pr_err("Check kernel command line "
+ "to see if nvmem= is defined\n");
+ BUG();
+ }
+ pr_info("%s: Using nvmem= carveout at %lx to load AVP kernel\n",
+ __func__, (unsigned long)avp->kernel_phys);
+ sprintf(fw_file, "nvrm_avp_%08lx.bin", (unsigned long)avp->kernel_phys);
+ avp->reset_addr = avp->kernel_phys;
+ avp->kernel_data = ioremap(avp->kernel_phys, SZ_1M);
+#endif
+
+ ret = request_firmware(&avp_fw, fw_file, avp->misc_dev.this_device);
+ if (ret) {
+ pr_err("%s: Cannot read firmware '%s'\n", __func__, fw_file);
+ goto err_req_fw;
+ }
+ pr_info("%s: Reading firmware from '%s' (%d bytes)\n", __func__,
+ fw_file, avp_fw->size);
+
+ pr_info("%s: Loading AVP kernel at vaddr=%p paddr=%lx\n",
+ __func__, avp->kernel_data, (unsigned long)avp->kernel_phys);
+ memcpy(avp->kernel_data, avp_fw->data, avp_fw->size);
+ memset(avp->kernel_data + avp_fw->size, 0, SZ_1M - avp_fw->size);
+
+ wmb();
+ release_firmware(avp_fw);
+
+ tegra_init_legacy_irq_cop();
+
+ ret = avp_reset(avp, avp->reset_addr);
+ if (ret) {
+ pr_err("%s: cannot reset the AVP.. aborting..\n", __func__);
+ goto err_reset;
+ }
+
+ enable_irq(avp->mbox_from_avp_pend_irq);
+ /* Initialize the avp_svc *first*. This creates RPC_CPU_PORT to be
+ * ready for remote commands. Then, connect to the
+ * remote RPC_AVP_PORT to be able to send library load/unload and
+ * suspend commands to it */
+ ret = avp_svc_start(avp->avp_svc);
+ if (ret)
+ goto err_avp_svc_start;
+
+ ep = trpc_create_connect(avp->rpc_node, "RPC_AVP_PORT", NULL,
+ NULL, -1);
+ if (IS_ERR(ep)) {
+ pr_err("%s: can't connect to RPC_AVP_PORT server\n", __func__);
+ ret = PTR_ERR(ep);
+ goto err_rpc_avp_port;
+ }
+ avp->avp_ep = ep;
+
+ avp->initialized = true;
+ smp_wmb();
+ pr_info("%s: avp init done\n", __func__);
+ return 0;
+
+err_rpc_avp_port:
+ avp_svc_stop(avp->avp_svc);
+err_avp_svc_start:
+ disable_irq(avp->mbox_from_avp_pend_irq);
+err_reset:
+ avp_halt(avp);
+err_req_fw:
+ nvmap_client_put(avp->nvmap_libs);
+err_nvmap_create_libs_client:
+ avp->nvmap_libs = NULL;
+ return ret;
+}
+
+static void avp_uninit(struct tegra_avp_info *avp)
+{
+ unsigned long flags;
+ struct rb_node *n;
+ struct remote_info *rinfo;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->initialized = false;
+ avp->shutdown = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ disable_irq(avp->mbox_from_avp_pend_irq);
+ cancel_work_sync(&avp->recv_work);
+
+ avp_halt(avp);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ while ((n = rb_first(&avp->endpoints)) != NULL) {
+ rinfo = rb_entry(n, struct remote_info, rb_node);
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ avp_svc_stop(avp->avp_svc);
+
+ if (avp->avp_ep) {
+ trpc_close(avp->avp_ep);
+ avp->avp_ep = NULL;
+ }
+
+ libs_cleanup(avp);
+
+ avp->shutdown = false;
+ smp_wmb();
+ pr_info("%s: avp teardown done\n", __func__);
+}
+
+/* returns the remote lib handle in lib->handle */
+static int _load_lib(struct tegra_avp_info *avp, struct tegra_avp_lib *lib,
+ bool from_user)
+{
+ struct svc_lib_attach svc;
+ struct svc_lib_attach_resp resp;
+ const struct firmware *fw;
+ void *args;
+ struct nvmap_handle_ref *lib_handle;
+ void *lib_data;
+ phys_addr_t lib_phys;
+ int ret;
+
+ DBG(AVP_DBG_TRACE_LIB, "avp_lib: loading library '%s'\n", lib->name);
+
+ args = kmalloc(lib->args_len, GFP_KERNEL);
+ if (!args) {
+ pr_err("avp_lib: can't alloc mem for args (%d)\n",
+ lib->args_len);
+ return -ENOMEM;
+ }
+
+ if (!from_user)
+ memcpy(args, lib->args, lib->args_len);
+ else if (copy_from_user(args, lib->args, lib->args_len)) {
+ pr_err("avp_lib: can't copy lib args\n");
+ ret = -EFAULT;
+ goto err_cp_args;
+ }
+
+ ret = request_firmware(&fw, lib->name, avp->misc_dev.this_device);
+ if (ret) {
+ pr_err("avp_lib: Cannot read firmware '%s'\n", lib->name);
+ goto err_req_fw;
+ }
+
+ lib_handle = nvmap_alloc(avp->nvmap_libs, fw->size, L1_CACHE_BYTES,
+ NVMAP_HANDLE_UNCACHEABLE);
+ if (IS_ERR_OR_NULL(lib_handle)) {
+ pr_err("avp_lib: can't nvmap alloc for lib '%s'\n", lib->name);
+ ret = PTR_ERR(lib_handle);
+ goto err_nvmap_alloc;
+ }
+
+ lib_data = nvmap_mmap(lib_handle);
+ if (!lib_data) {
+ pr_err("avp_lib: can't nvmap map for lib '%s'\n", lib->name);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ lib_phys = nvmap_pin(avp->nvmap_libs, lib_handle);
+ if (IS_ERR_OR_NULL((void *)lib_phys)) {
+ pr_err("avp_lib: can't nvmap pin for lib '%s'\n", lib->name);
+ ret = PTR_ERR(lib_handle);
+ goto err_nvmap_pin;
+ }
+
+ memcpy(lib_data, fw->data, fw->size);
+
+ svc.svc_id = SVC_LIBRARY_ATTACH;
+ svc.address = lib_phys;
+ svc.args_len = lib->args_len;
+ svc.lib_size = fw->size;
+ svc.reason = lib->greedy ? AVP_LIB_REASON_ATTACH_GREEDY :
+ AVP_LIB_REASON_ATTACH;
+ memcpy(svc.args, args, lib->args_len);
+ wmb();
+
+ /* send message, wait for reply */
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret)
+ goto err_send_msg;
+
+ ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+ sizeof(resp), -1);
+ if (ret != sizeof(resp)) {
+ pr_err("avp_lib: Couldn't get lib load reply (%d)\n", ret);
+ goto err_recv_msg;
+ } else if (resp.err) {
+ pr_err("avp_lib: got remote error (%d) while loading lib %s\n",
+ resp.err, lib->name);
+ ret = -EPROTO;
+ goto err_recv_msg;
+ }
+ lib->handle = resp.lib_id;
+ ret = 0;
+ DBG(AVP_DBG_TRACE_LIB,
+ "avp_lib: Successfully loaded library %s (lib_id=%x)\n",
+ lib->name, resp.lib_id);
+
+ /* We free the memory here because by this point the AVP has already
+ * requested memory for the library for all the sections since it does
+ * it's own relocation and memory management. So, our allocations were
+ * temporary to hand the library code over to the AVP.
+ */
+
+err_recv_msg:
+err_send_msg:
+ nvmap_unpin(avp->nvmap_libs, lib_handle);
+err_nvmap_pin:
+ nvmap_munmap(lib_handle, lib_data);
+err_nvmap_mmap:
+ nvmap_free(avp->nvmap_libs, lib_handle);
+err_nvmap_alloc:
+ release_firmware(fw);
+err_req_fw:
+err_cp_args:
+ kfree(args);
+ return ret;
+}
+
+static int send_unload_lib_msg(struct tegra_avp_info *avp, u32 handle,
+ const char *name)
+{
+ struct svc_lib_detach svc;
+ struct svc_lib_detach_resp resp;
+ int ret;
+
+ svc.svc_id = SVC_LIBRARY_DETACH;
+ svc.reason = AVP_LIB_REASON_DETACH;
+ svc.lib_id = handle;
+
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("avp_lib: can't send unload message to avp for '%s'\n",
+ name);
+ goto err;
+ }
+
+ /* Give it a few extra moments to unload. */
+ msleep(20);
+
+ ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+ sizeof(resp), -1);
+ if (ret != sizeof(resp)) {
+ pr_err("avp_lib: Couldn't get unload reply for '%s' (%d)\n",
+ name, ret);
+ } else if (resp.err) {
+ pr_err("avp_lib: remote error (%d) while unloading lib %s\n",
+ resp.err, name);
+ ret = -EPROTO;
+ } else {
+ pr_info("avp_lib: Successfully unloaded '%s'\n",
+ name);
+ ret = 0;
+ }
+
+err:
+ return ret;
+}
+
+static struct lib_item *_find_lib_locked(struct tegra_avp_info *avp, u32 handle)
+{
+ struct lib_item *item;
+
+ list_for_each_entry(item, &avp->libs, list) {
+ if (item->handle == handle)
+ return item;
+ }
+ return NULL;
+}
+
+static int _insert_lib_locked(struct tegra_avp_info *avp, u32 handle,
+ char *name)
+{
+ struct lib_item *item;
+
+ item = kzalloc(sizeof(struct lib_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->handle = handle;
+ strlcpy(item->name, name, TEGRA_AVP_LIB_MAX_NAME);
+ list_add_tail(&item->list, &avp->libs);
+ return 0;
+}
+
+static void _delete_lib_locked(struct tegra_avp_info *avp,
+ struct lib_item *item)
+{
+ list_del(&item->list);
+ kfree(item);
+}
+
+static int handle_load_lib_ioctl(struct tegra_avp_info *avp, unsigned long arg)
+{
+ struct tegra_avp_lib lib;
+ int ret;
+
+ pr_debug("%s: ioctl\n", __func__);
+ if (copy_from_user(&lib, (void __user *)arg, sizeof(lib)))
+ return -EFAULT;
+ lib.name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
+
+ if (lib.args_len > TEGRA_AVP_LIB_MAX_ARGS) {
+ pr_err("%s: library args too long (%d)\n", __func__,
+ lib.args_len);
+ return -E2BIG;
+ }
+
+ mutex_lock(&avp->libs_lock);
+ ret = _load_lib(avp, &lib, true);
+ if (ret)
+ goto err_load_lib;
+
+ if (copy_to_user((void __user *)arg, &lib, sizeof(lib))) {
+ /* TODO: probably need to free the library from remote
+ * we just loaded */
+ ret = -EFAULT;
+ goto err_copy_to_user;
+ }
+ ret = _insert_lib_locked(avp, lib.handle, lib.name);
+ if (ret) {
+ pr_err("%s: can't insert lib (%d)\n", __func__, ret);
+ goto err_insert_lib;
+ }
+
+ mutex_unlock(&avp->libs_lock);
+ return 0;
+
+err_insert_lib:
+err_copy_to_user:
+ send_unload_lib_msg(avp, lib.handle, lib.name);
+err_load_lib:
+ mutex_unlock(&avp->libs_lock);
+ return ret;
+}
+
+static void libs_cleanup(struct tegra_avp_info *avp)
+{
+ struct lib_item *lib;
+ struct lib_item *lib_tmp;
+
+ mutex_lock(&avp->libs_lock);
+ list_for_each_entry_safe(lib, lib_tmp, &avp->libs, list) {
+ _delete_lib_locked(avp, lib);
+ }
+
+ nvmap_client_put(avp->nvmap_libs);
+ avp->nvmap_libs = NULL;
+ mutex_unlock(&avp->libs_lock);
+}
+
+static long tegra_avp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+ int ret;
+
+ if (_IOC_TYPE(cmd) != TEGRA_AVP_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_AVP_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_AVP_IOCTL_MAX_NR)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case TEGRA_AVP_IOCTL_LOAD_LIB:
+ ret = handle_load_lib_ioctl(avp, arg);
+ break;
+ case TEGRA_AVP_IOCTL_UNLOAD_LIB:
+ ret = tegra_avp_unload_lib(avp, arg);
+ break;
+ default:
+ pr_err("avp_lib: Unknown tegra_avp ioctl 0x%x\n", _IOC_NR(cmd));
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+int tegra_avp_open(struct tegra_avp_info **avp)
+{
+ struct tegra_avp_info *new_avp = tegra_avp;
+ int ret = 0;
+
+ pr_debug("%s: open\n", __func__);
+ mutex_lock(&new_avp->open_lock);
+
+ if (!new_avp->refcount)
+ ret = avp_init(new_avp);
+
+ if (ret < 0) {
+ mutex_unlock(&new_avp->open_lock);
+ new_avp = 0;
+ goto out;
+ }
+
+ new_avp->refcount++;
+
+ mutex_unlock(&new_avp->open_lock);
+out:
+ *avp = new_avp;
+ return ret;
+}
+
+static int tegra_avp_open_fops(struct inode *inode, struct file *file)
+{
+ struct tegra_avp_info *avp;
+
+ nonseekable_open(inode, file);
+ return tegra_avp_open(&avp);
+}
+
+int tegra_avp_release(struct tegra_avp_info *avp)
+{
+ int ret = 0;
+
+ pr_debug("%s: close\n", __func__);
+ mutex_lock(&avp->open_lock);
+ if (!avp->refcount) {
+ pr_err("%s: releasing while in invalid state\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (avp->refcount > 0)
+ avp->refcount--;
+ if (!avp->refcount)
+ avp_uninit(avp);
+
+out:
+ mutex_unlock(&avp->open_lock);
+ return ret;
+}
+
+static int tegra_avp_release_fops(struct inode *inode, struct file *file)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+ return tegra_avp_release(avp);
+}
+
+static int avp_enter_lp0(struct tegra_avp_info *avp)
+{
+ volatile u32 *avp_suspend_done = avp->iram_backup_data
+ + TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE;
+ struct svc_enter_lp0 svc;
+ unsigned long endtime;
+ int ret;
+
+ svc.svc_id = SVC_ENTER_LP0;
+ svc.src_addr = (u32)TEGRA_IRAM_BASE + TEGRA_RESET_HANDLER_SIZE;
+ svc.buf_addr = (u32)avp->iram_backup_phys;
+ svc.buf_size = TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE;
+
+ *avp_suspend_done = 0;
+ wmb();
+
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: cannot send AVP suspend message\n", __func__);
+ return ret;
+ }
+
+ endtime = jiffies + msecs_to_jiffies(1000);
+ rmb();
+ while ((*avp_suspend_done == 0) && time_before(jiffies, endtime)) {
+ udelay(10);
+ rmb();
+ }
+
+ rmb();
+ if (*avp_suspend_done == 0) {
+ pr_err("%s: AVP failed to suspend\n", __func__);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int tegra_avp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+ unsigned long flags;
+ int ret;
+
+ pr_info("%s()+\n", __func__);
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (!avp->initialized) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return 0;
+ }
+ avp->suspending = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ ret = avp_enter_lp0(avp);
+ if (ret)
+ goto err;
+
+ avp->resume_addr = readl(TEGRA_AVP_RESUME_ADDR);
+ if (!avp->resume_addr) {
+ pr_err("%s: AVP failed to set it's resume address\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ disable_irq(avp->mbox_from_avp_pend_irq);
+
+ pr_info("avp_suspend: resume_addr=%lx\n", avp->resume_addr);
+ avp->resume_addr &= 0xfffffffeUL;
+ pr_info("%s()-\n", __func__);
+
+ return 0;
+
+err:
+ /* TODO: we need to kill the AVP so that when we come back
+ * it could be reinitialized.. We'd probably need to kill
+ * the users of it so they don't have the wrong state.
+ */
+ return ret;
+}
+
+static int tegra_avp_resume(struct platform_device *pdev)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ pr_info("%s()+\n", __func__);
+ smp_rmb();
+ if (!avp->initialized)
+ goto out;
+
+ BUG_ON(!avp->resume_addr);
+
+ avp_reset(avp, avp->resume_addr);
+ avp->resume_addr = 0;
+ avp->suspending = false;
+ smp_wmb();
+ enable_irq(avp->mbox_from_avp_pend_irq);
+
+ pr_info("%s()-\n", __func__);
+
+out:
+ return ret;
+}
+
+static const struct file_operations tegra_avp_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_avp_open_fops,
+ .release = tegra_avp_release_fops,
+ .unlocked_ioctl = tegra_avp_ioctl,
+};
+
+static struct trpc_node avp_trpc_node = {
+ .name = "avp-remote",
+ .type = TRPC_NODE_REMOTE,
+ .try_connect = avp_node_try_connect,
+};
+
+static int tegra_avp_probe(struct platform_device *pdev)
+{
+ void *msg_area;
+ struct tegra_avp_info *avp;
+ int ret = 0;
+ int irq;
+ unsigned int heap_mask;
+
+ irq = platform_get_irq_byname(pdev, "mbox_from_avp_pending");
+ if (irq < 0) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ avp = kzalloc(sizeof(struct tegra_avp_info), GFP_KERNEL);
+ if (!avp) {
+ pr_err("%s: cannot allocate tegra_avp_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ avp->nvmap_drv = nvmap_create_client(nvmap_dev, "avp_core");
+ if (IS_ERR_OR_NULL(avp->nvmap_drv)) {
+ pr_err("%s: cannot create drv nvmap client\n", __func__);
+ ret = PTR_ERR(avp->nvmap_drv);
+ goto err_nvmap_create_drv_client;
+ }
+
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
+ heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
+#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
+ heap_mask = NVMAP_HEAP_IOVMM;
+#else /* nvmem= carveout */
+ heap_mask = 0;
+#endif
+
+ if (heap_mask == NVMAP_HEAP_IOVMM) {
+ int i;
+ /* Tegra3 A01 has different SMMU address in 0xe00000000- */
+ u32 iovmm_addr[] = {0x0ff00000, 0xeff00000};
+
+ for (i = 0; i < ARRAY_SIZE(iovmm_addr); i++) {
+ avp->kernel_handle = nvmap_alloc_iovm(avp->nvmap_drv,
+ SZ_1M, L1_CACHE_BYTES,
+ NVMAP_HANDLE_WRITE_COMBINE,
+ iovmm_addr[i]);
+ if (!IS_ERR_OR_NULL(avp->kernel_handle))
+ break;
+ }
+ if (IS_ERR_OR_NULL(avp->kernel_handle)) {
+ pr_err("%s: cannot create handle\n", __func__);
+ ret = PTR_ERR(avp->kernel_handle);
+ goto err_nvmap_alloc;
+ }
+
+ avp->kernel_data = nvmap_mmap(avp->kernel_handle);
+ if (!avp->kernel_data) {
+ pr_err("%s: cannot map kernel handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ avp->kernel_phys =
+ nvmap_pin(avp->nvmap_drv, avp->kernel_handle);
+ if (IS_ERR_OR_NULL((void *)avp->kernel_phys)) {
+ pr_err("%s: cannot pin kernel handle\n", __func__);
+ ret = PTR_ERR((void *)avp->kernel_phys);
+ goto err_nvmap_pin;
+ }
+
+ pr_info("%s: allocated IOVM at %lx for AVP kernel\n",
+ __func__, (unsigned long)avp->kernel_phys);
+ }
+
+ if (heap_mask == NVMAP_HEAP_CARVEOUT_GENERIC) {
+ avp->kernel_handle = nvmap_alloc(avp->nvmap_drv, SZ_1M, SZ_1M,
+ NVMAP_HANDLE_UNCACHEABLE);
+ if (IS_ERR_OR_NULL(avp->kernel_handle)) {
+ pr_err("%s: cannot create handle\n", __func__);
+ ret = PTR_ERR(avp->kernel_handle);
+ goto err_nvmap_alloc;
+ }
+
+ avp->kernel_data = nvmap_mmap(avp->kernel_handle);
+ if (!avp->kernel_data) {
+ pr_err("%s: cannot map kernel handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ avp->kernel_phys = nvmap_pin(avp->nvmap_drv,
+ avp->kernel_handle);
+ if (IS_ERR_OR_NULL((void *)avp->kernel_phys)) {
+ pr_err("%s: cannot pin kernel handle\n", __func__);
+ ret = PTR_ERR((void *)avp->kernel_phys);
+ goto err_nvmap_pin;
+ }
+
+ pr_info("%s: allocated carveout memory at %lx for AVP kernel\n",
+ __func__, (unsigned long)avp->kernel_phys);
+ }
+
+ /* allocate an extra 4 bytes at the end which AVP uses to signal to
+ * us that it is done suspending.
+ */
+ avp->iram_backup_handle =
+ nvmap_alloc(avp->nvmap_drv, TEGRA_IRAM_SIZE + 4,
+ L1_CACHE_BYTES, NVMAP_HANDLE_UNCACHEABLE);
+ if (IS_ERR_OR_NULL(avp->iram_backup_handle)) {
+ pr_err("%s: cannot create handle for iram backup\n", __func__);
+ ret = PTR_ERR(avp->iram_backup_handle);
+ goto err_iram_nvmap_alloc;
+ }
+ avp->iram_backup_data = nvmap_mmap(avp->iram_backup_handle);
+ if (!avp->iram_backup_data) {
+ pr_err("%s: cannot map iram backup handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_iram_nvmap_mmap;
+ }
+ avp->iram_backup_phys = nvmap_pin(avp->nvmap_drv,
+ avp->iram_backup_handle);
+ if (IS_ERR_OR_NULL((void *)avp->iram_backup_phys)) {
+ pr_err("%s: cannot pin iram backup handle\n", __func__);
+ ret = PTR_ERR((void *)avp->iram_backup_phys);
+ goto err_iram_nvmap_pin;
+ }
+
+ avp->mbox_from_avp_pend_irq = irq;
+ avp->endpoints = RB_ROOT;
+ spin_lock_init(&avp->state_lock);
+ mutex_init(&avp->open_lock);
+ mutex_init(&avp->to_avp_lock);
+ mutex_init(&avp->from_avp_lock);
+ INIT_WORK(&avp->recv_work, process_avp_message);
+
+ mutex_init(&avp->libs_lock);
+ INIT_LIST_HEAD(&avp->libs);
+
+ avp->recv_wq = alloc_workqueue("avp-msg-recv",
+ WQ_NON_REENTRANT | WQ_HIGHPRI, 1);
+ if (!avp->recv_wq) {
+ pr_err("%s: can't create recve workqueue\n", __func__);
+ ret = -ENOMEM;
+ goto err_create_wq;
+ }
+
+ avp->cop_clk = clk_get(&pdev->dev, "cop");
+ if (IS_ERR_OR_NULL(avp->cop_clk)) {
+ pr_err("%s: Couldn't get cop clock\n", TEGRA_AVP_NAME);
+ ret = -ENOENT;
+ goto err_get_cop_clk;
+ }
+
+ msg_area = dma_alloc_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2,
+ &avp->msg_area_addr, GFP_KERNEL);
+ if (!msg_area) {
+ pr_err("%s: cannot allocate msg_area\n", __func__);
+ ret = -ENOMEM;
+ goto err_alloc_msg_area;
+ }
+ memset(msg_area, 0, AVP_MSG_AREA_SIZE * 2);
+ avp->msg = ((avp->msg_area_addr >> 4) |
+ MBOX_MSG_VALID | MBOX_MSG_PENDING_INT_EN);
+ avp->msg_to_avp = msg_area;
+ avp->msg_from_avp = msg_area + AVP_MSG_AREA_SIZE;
+
+ avp_halt(avp);
+
+ avp_trpc_node.priv = avp;
+ ret = trpc_node_register(&avp_trpc_node);
+ if (ret) {
+ pr_err("%s: Can't register avp rpc node\n", __func__);
+ goto err_node_reg;
+ }
+ avp->rpc_node = &avp_trpc_node;
+
+ avp->avp_svc = avp_svc_init(pdev, avp->rpc_node);
+ if (IS_ERR_OR_NULL(avp->avp_svc)) {
+ pr_err("%s: Cannot initialize avp_svc\n", __func__);
+ ret = PTR_ERR(avp->avp_svc);
+ goto err_avp_svc_init;
+ }
+
+ avp->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ avp->misc_dev.name = "tegra_avp";
+ avp->misc_dev.fops = &tegra_avp_fops;
+
+ ret = misc_register(&avp->misc_dev);
+ if (ret) {
+ pr_err("%s: Unable to register misc device!\n", TEGRA_AVP_NAME);
+ goto err_misc_reg;
+ }
+
+ ret = request_irq(irq, avp_mbox_pending_isr, 0, TEGRA_AVP_NAME, avp);
+ if (ret) {
+ pr_err("%s: cannot register irq handler\n", __func__);
+ goto err_req_irq_pend;
+ }
+ disable_irq(avp->mbox_from_avp_pend_irq);
+
+ tegra_avp = avp;
+
+ pr_info("%s: message area %lx/%lx\n", __func__,
+ (unsigned long)avp->msg_area_addr,
+ (unsigned long)avp->msg_area_addr + AVP_MSG_AREA_SIZE);
+
+ return 0;
+
+err_req_irq_pend:
+ misc_deregister(&avp->misc_dev);
+err_misc_reg:
+ avp_svc_destroy(avp->avp_svc);
+err_avp_svc_init:
+ trpc_node_unregister(avp->rpc_node);
+err_node_reg:
+ dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, msg_area,
+ avp->msg_area_addr);
+err_alloc_msg_area:
+ clk_put(avp->cop_clk);
+err_get_cop_clk:
+ destroy_workqueue(avp->recv_wq);
+err_create_wq:
+ nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_pin:
+ nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+err_iram_nvmap_mmap:
+ nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_alloc:
+ nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_pin:
+ nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+err_nvmap_mmap:
+ nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_alloc:
+ nvmap_client_put(avp->nvmap_drv);
+err_nvmap_create_drv_client:
+ kfree(avp);
+ tegra_avp = NULL;
+ return ret;
+}
+
+static int tegra_avp_remove(struct platform_device *pdev)
+{
+ struct tegra_avp_info *avp = tegra_avp;
+
+ if (!avp)
+ return 0;
+
+ mutex_lock(&avp->open_lock);
+ /* ensure that noone can open while we tear down */
+ if (avp->refcount) {
+ mutex_unlock(&avp->open_lock);
+ return -EBUSY;
+ }
+ mutex_unlock(&avp->open_lock);
+
+ misc_deregister(&avp->misc_dev);
+
+ avp_halt(avp);
+
+ avp_svc_destroy(avp->avp_svc);
+ trpc_node_unregister(avp->rpc_node);
+ dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, avp->msg_to_avp,
+ avp->msg_area_addr);
+ clk_put(avp->cop_clk);
+ destroy_workqueue(avp->recv_wq);
+ nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+ nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+ nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+ nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+ nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+ nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+ nvmap_client_put(avp->nvmap_drv);
+ kfree(avp);
+ tegra_avp = NULL;
+ return 0;
+}
+
+int tegra_avp_load_lib(struct tegra_avp_info *avp, struct tegra_avp_lib *lib)
+{
+ int ret;
+
+ if (!avp)
+ return -ENODEV;
+
+ if (!lib)
+ return -EFAULT;
+
+ lib->name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
+
+ if (lib->args_len > TEGRA_AVP_LIB_MAX_ARGS) {
+ pr_err("%s: library args too long (%d)\n", __func__,
+ lib->args_len);
+ return -E2BIG;
+ }
+
+ mutex_lock(&avp->libs_lock);
+ ret = _load_lib(avp, lib, false);
+ if (ret)
+ goto err_load_lib;
+
+ ret = _insert_lib_locked(avp, lib->handle, lib->name);
+ if (ret) {
+ pr_err("%s: can't insert lib (%d)\n", __func__, ret);
+ goto err_insert_lib;
+ }
+
+ mutex_unlock(&avp->libs_lock);
+ return 0;
+
+err_insert_lib:
+ ret = send_unload_lib_msg(avp, lib->handle, lib->name);
+ if (!ret)
+ DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", lib->name);
+ else
+ pr_err("avp_lib: can't unload lib '%s' (%d)\n", lib->name, ret);
+ lib->handle = 0;
+err_load_lib:
+ mutex_unlock(&avp->libs_lock);
+ return ret;
+}
+
+int tegra_avp_unload_lib(struct tegra_avp_info *avp, unsigned long handle)
+{
+ struct lib_item *item;
+ int ret;
+
+ if (!avp)
+ return -ENODEV;
+
+ mutex_lock(&avp->libs_lock);
+ item = _find_lib_locked(avp, handle);
+ if (!item) {
+ pr_err("avp_lib: avp lib with handle 0x%x not found\n",
+ (u32)handle);
+ ret = -ENOENT;
+ goto err_find;
+ }
+ ret = send_unload_lib_msg(avp, item->handle, item->name);
+ if (!ret)
+ DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", item->name);
+ else
+ pr_err("avp_lib: can't unload lib '%s'/0x%x (%d)\n", item->name,
+ item->handle, ret);
+ _delete_lib_locked(avp, item);
+
+err_find:
+ mutex_unlock(&avp->libs_lock);
+ return ret;
+}
+
+static struct platform_driver tegra_avp_driver = {
+ .probe = tegra_avp_probe,
+ .remove = tegra_avp_remove,
+ .suspend = tegra_avp_suspend,
+ .resume = tegra_avp_resume,
+ .driver = {
+ .name = TEGRA_AVP_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_avp_init(void)
+{
+ return platform_driver_register(&tegra_avp_driver);
+}
+
+static void __exit tegra_avp_exit(void)
+{
+ platform_driver_unregister(&tegra_avp_driver);
+}
+
+module_init(tegra_avp_init);
+module_exit(tegra_avp_exit);
diff --git a/drivers/media/video/tegra/avp/avp.h b/drivers/media/video/tegra/avp/avp.h
new file mode 100644
index 000000000000..4f2287743a06
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_H
+#define __MEDIA_VIDEO_TEGRA_AVP_H
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "trpc.h"
+
+struct avp_svc_info;
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+ struct trpc_node *rpc_node);
+void avp_svc_destroy(struct avp_svc_info *avp_svc);
+int avp_svc_start(struct avp_svc_info *svc);
+void avp_svc_stop(struct avp_svc_info *svc);
+
+#endif
diff --git a/drivers/media/video/tegra/avp/avp_msg.h b/drivers/media/video/tegra/avp/avp_msg.h
new file mode 100644
index 000000000000..615d890d5444
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_msg.h
@@ -0,0 +1,358 @@
+/* drivers/media/video/tegra/avp/avp_msg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+#define __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+
+#include <linux/tegra_avp.h>
+#include <linux/types.h>
+
+/* Note: the port name string is not NUL terminated, so make sure to
+ * allocate appropriate space locally when operating on the string */
+#define XPC_PORT_NAME_LEN 16
+
+#define SVC_ARGS_MAX_LEN 220
+#define SVC_MAX_STRING_LEN 200
+
+#define AVP_ERR_ENOTSUP 0x2
+#define AVP_ERR_EINVAL 0x4
+#define AVP_ERR_ENOMEM 0x6
+#define AVP_ERR_EACCES 0x00030010
+
+enum {
+ SVC_NVMAP_CREATE = 0,
+ SVC_NVMAP_CREATE_RESPONSE = 1,
+ SVC_NVMAP_FREE = 3,
+ SVC_NVMAP_ALLOC = 4,
+ SVC_NVMAP_ALLOC_RESPONSE = 5,
+ SVC_NVMAP_PIN = 6,
+ SVC_NVMAP_PIN_RESPONSE = 7,
+ SVC_NVMAP_UNPIN = 8,
+ SVC_NVMAP_UNPIN_RESPONSE = 9,
+ SVC_NVMAP_GET_ADDRESS = 10,
+ SVC_NVMAP_GET_ADDRESS_RESPONSE = 11,
+ SVC_NVMAP_FROM_ID = 12,
+ SVC_NVMAP_FROM_ID_RESPONSE = 13,
+ SVC_MODULE_CLOCK = 14,
+ SVC_MODULE_CLOCK_RESPONSE = 15,
+ SVC_MODULE_RESET = 16,
+ SVC_MODULE_RESET_RESPONSE = 17,
+ SVC_POWER_REGISTER = 18,
+ SVC_POWER_UNREGISTER = 19,
+ SVC_POWER_STARVATION = 20,
+ SVC_POWER_BUSY_HINT = 21,
+ SVC_POWER_BUSY_HINT_MULTI = 22,
+ SVC_DFS_GETSTATE = 23,
+ SVC_DFS_GETSTATE_RESPONSE = 24,
+ SVC_POWER_RESPONSE = 25,
+ SVC_POWER_MAXFREQ = 26,
+ SVC_ENTER_LP0 = 27,
+ SVC_ENTER_LP0_RESPONSE = 28,
+ SVC_PRINTF = 29,
+ SVC_LIBRARY_ATTACH = 30,
+ SVC_LIBRARY_ATTACH_RESPONSE = 31,
+ SVC_LIBRARY_DETACH = 32,
+ SVC_LIBRARY_DETACH_RESPONSE = 33,
+ SVC_AVP_WDT_RESET = 34,
+ SVC_DFS_GET_CLK_UTIL = 35,
+ SVC_DFS_GET_CLK_UTIL_RESPONSE = 36,
+ SVC_MODULE_CLOCK_SET = 37,
+ SVC_MODULE_CLOCK_SET_RESPONSE = 38,
+ SVC_MODULE_CLOCK_GET = 39,
+ SVC_MODULE_CLOCK_GET_RESPONSE = 40,
+};
+
+struct svc_msg {
+ u32 svc_id;
+ u8 data[0];
+};
+
+struct svc_common_resp {
+ u32 svc_id;
+ u32 err;
+};
+
+struct svc_printf {
+ u32 svc_id;
+ const char str[SVC_MAX_STRING_LEN];
+};
+
+struct svc_enter_lp0 {
+ u32 svc_id;
+ u32 src_addr;
+ u32 buf_addr;
+ u32 buf_size;
+};
+
+/* nvmap messages */
+struct svc_nvmap_create {
+ u32 svc_id;
+ u32 size;
+};
+
+struct svc_nvmap_create_resp {
+ u32 svc_id;
+ u32 handle_id;
+ u32 err;
+};
+
+enum {
+ AVP_NVMAP_HEAP_EXTERNAL = 1,
+ AVP_NVMAP_HEAP_GART = 2,
+ AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT = 3,
+ AVP_NVMAP_HEAP_IRAM = 4,
+};
+
+struct svc_nvmap_alloc {
+ u32 svc_id;
+ u32 handle_id;
+ u32 heaps[4];
+ u32 num_heaps;
+ u32 align;
+ u32 mapping_type;
+};
+
+struct svc_nvmap_free {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_pin {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_pin_resp {
+ u32 svc_id;
+ u32 addr;
+};
+
+struct svc_nvmap_unpin {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_from_id {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_get_addr {
+ u32 svc_id;
+ u32 handle_id;
+ u32 offs;
+};
+
+struct svc_nvmap_get_addr_resp {
+ u32 svc_id;
+ u32 addr;
+};
+
+/* library management messages */
+enum {
+ AVP_LIB_REASON_ATTACH = 0,
+ AVP_LIB_REASON_DETACH = 1,
+ AVP_LIB_REASON_ATTACH_GREEDY = 2,
+};
+
+struct svc_lib_attach {
+ u32 svc_id;
+ u32 address;
+ u32 args_len;
+ u32 lib_size;
+ u8 args[SVC_ARGS_MAX_LEN];
+ u32 reason;
+};
+
+struct svc_lib_attach_resp {
+ u32 svc_id;
+ u32 err;
+ u32 lib_id;
+};
+
+struct svc_lib_detach {
+ u32 svc_id;
+ u32 reason;
+ u32 lib_id;
+};
+
+struct svc_lib_detach_resp {
+ u32 svc_id;
+ u32 err;
+};
+
+/* hw module management from the AVP side */
+enum {
+ AVP_MODULE_ID_AVP = 2,
+ AVP_MODULE_ID_VCP = 3,
+ AVP_MODULE_ID_BSEA = 27,
+ AVP_MODULE_ID_VDE = 28,
+ AVP_MODULE_ID_MPE = 29,
+};
+
+struct svc_module_ctrl {
+ u32 svc_id;
+ u32 module_id;
+ u32 client_id;
+ u8 enable;
+};
+
+struct svc_clock_ctrl {
+ u32 svc_id;
+ u32 module_id;
+ u32 clk_freq;
+};
+
+struct svc_clock_ctrl_response {
+ u32 svc_id;
+ u32 err;
+ u32 act_freq;
+};
+
+/* power messages */
+struct svc_pwr_register {
+ u32 svc_id;
+ u32 client_id;
+ u32 unused;
+};
+
+struct svc_pwr_register_resp {
+ u32 svc_id;
+ u32 err;
+ u32 client_id;
+};
+
+struct svc_pwr_starve_hint {
+ u32 svc_id;
+ u32 dfs_clk_id;
+ u32 client_id;
+ u8 starving;
+};
+
+struct svc_pwr_busy_hint {
+ u32 svc_id;
+ u32 dfs_clk_id;
+ u32 client_id;
+ u32 boost_ms; /* duration */
+ u32 boost_freq; /* in khz */
+};
+
+struct svc_pwr_max_freq {
+ u32 svc_id;
+ u32 module_id;
+};
+
+struct svc_pwr_max_freq_resp {
+ u32 svc_id;
+ u32 freq;
+};
+
+/* dfs related messages */
+enum {
+ AVP_DFS_STATE_INVALID = 0,
+ AVP_DFS_STATE_DISABLED = 1,
+ AVP_DFS_STATE_STOPPED = 2,
+ AVP_DFS_STATE_CLOSED_LOOP = 3,
+ AVP_DFS_STATE_PROFILED_LOOP = 4,
+};
+
+struct svc_dfs_get_state_resp {
+ u32 svc_id;
+ u32 state;
+};
+
+enum {
+ AVP_DFS_CLK_CPU = 1,
+ AVP_DFS_CLK_AVP = 2,
+ AVP_DFS_CLK_SYSTEM = 3,
+ AVP_DFS_CLK_AHB = 4,
+ AVP_DFS_CLK_APB = 5,
+ AVP_DFS_CLK_VDE = 6,
+ /* external memory controller */
+ AVP_DFS_CLK_EMC = 7,
+};
+
+struct avp_clk_usage {
+ u32 min;
+ u32 max;
+ u32 curr_min;
+ u32 curr_max;
+ u32 curr;
+ u32 avg; /* average activity.. whatever that means */
+};
+
+struct svc_dfs_get_clk_util {
+ u32 svc_id;
+ u32 dfs_clk_id;
+};
+
+/* all units are in kHz */
+struct svc_dfs_get_clk_util_resp {
+ u32 svc_id;
+ u32 err;
+ struct avp_clk_usage usage;
+};
+
+/************************/
+
+enum {
+ CMD_ACK = 0,
+ CMD_CONNECT = 2,
+ CMD_DISCONNECT = 3,
+ CMD_MESSAGE = 4,
+ CMD_RESPONSE = 5,
+};
+
+struct msg_data {
+ u32 cmd;
+ u8 data[0];
+};
+
+struct msg_ack {
+ u32 cmd;
+ u32 arg;
+};
+
+struct msg_connect {
+ u32 cmd;
+ u32 port_id;
+ /* not NUL terminated, just 0 padded */
+ char name[XPC_PORT_NAME_LEN];
+};
+
+struct msg_connect_reply {
+ u32 cmd;
+ u32 port_id;
+};
+
+struct msg_disconnect {
+ u32 cmd;
+ u32 port_id;
+};
+
+struct msg_disconnect_reply {
+ u32 cmd;
+ u32 ack;
+};
+
+struct msg_port_data {
+ u32 cmd;
+ u32 port_id;
+ u32 msg_len;
+ u8 data[0];
+};
+
+#endif
diff --git a/drivers/media/video/tegra/avp/avp_svc.c b/drivers/media/video/tegra/avp/avp_svc.c
new file mode 100644
index 000000000000..c4346bfacdb0
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_svc.c
@@ -0,0 +1,874 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/tegra_avp.h>
+#include <linux/types.h>
+
+#include <mach/clk.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+enum {
+ AVP_DBG_TRACE_SVC = 1U << 0,
+};
+
+static u32 debug_mask;
+module_param_named(debug_mask, debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (unlikely(debug_mask & (flag))) pr_info(args); } while (0)
+
+enum {
+ CLK_REQUEST_VCP = 0,
+ CLK_REQUEST_BSEA = 1,
+ CLK_REQUEST_VDE = 2,
+ CLK_REQUEST_AVP = 3,
+ NUM_CLK_REQUESTS,
+};
+
+struct avp_module {
+ const char *name;
+ u32 clk_req;
+};
+
+static struct avp_module avp_modules[] = {
+ [AVP_MODULE_ID_AVP] = {
+ .name = "cop",
+ .clk_req = CLK_REQUEST_AVP,
+ },
+ [AVP_MODULE_ID_VCP] = {
+ .name = "vcp",
+ .clk_req = CLK_REQUEST_VCP,
+ },
+ [AVP_MODULE_ID_BSEA] = {
+ .name = "bsea",
+ .clk_req = CLK_REQUEST_BSEA,
+ },
+ [AVP_MODULE_ID_VDE] = {
+ .name = "vde",
+ .clk_req = CLK_REQUEST_VDE,
+ },
+};
+#define NUM_AVP_MODULES ARRAY_SIZE(avp_modules)
+
+struct avp_clk {
+ struct clk *clk;
+ int refcnt;
+ struct avp_module *mod;
+};
+
+struct avp_svc_info {
+ struct avp_clk clks[NUM_CLK_REQUESTS];
+ /* used for dvfs */
+ struct clk *sclk;
+ struct clk *emcclk;
+
+ struct mutex clk_lock;
+
+ struct trpc_endpoint *cpu_ep;
+ struct task_struct *svc_thread;
+
+ /* client for remote allocations, for easy tear down */
+ struct nvmap_client *nvmap_remote;
+ struct trpc_node *rpc_node;
+ unsigned long max_avp_rate;
+ unsigned long emc_rate;
+};
+
+static void do_svc_nvmap_create(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_create *msg = (struct svc_nvmap_create *)_msg;
+ struct svc_nvmap_create_resp resp;
+ struct nvmap_handle_ref *handle;
+ u32 handle_id = 0;
+ u32 err = 0;
+
+ handle = nvmap_create_handle(avp_svc->nvmap_remote, msg->size);
+ if (unlikely(IS_ERR(handle))) {
+ pr_err("avp_svc: error creating handle (%d bytes) for remote\n",
+ msg->size);
+ err = AVP_ERR_ENOMEM;
+ } else
+ handle_id = (u32)nvmap_ref_to_id(handle);
+
+ resp.svc_id = SVC_NVMAP_CREATE_RESPONSE;
+ resp.err = err;
+ resp.handle_id = handle_id;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+ /* TODO: do we need to put the handle if send_msg failed? */
+}
+
+static void do_svc_nvmap_alloc(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_alloc *msg = (struct svc_nvmap_alloc *)_msg;
+ struct svc_common_resp resp;
+ struct nvmap_handle *handle;
+ u32 err = 0;
+ u32 heap_mask = 0;
+ int i;
+ size_t align;
+
+ handle = nvmap_get_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: unknown remote handle 0x%x\n", msg->handle_id);
+ err = AVP_ERR_EACCES;
+ goto out;
+ }
+
+ if (msg->num_heaps > 4) {
+ pr_err("avp_svc: invalid remote alloc request (%d heaps?!)\n",
+ msg->num_heaps);
+ /* TODO: should we error out instead ? */
+ msg->num_heaps = 0;
+ }
+ if (msg->num_heaps == 0)
+ heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC | NVMAP_HEAP_SYSMEM;
+
+ for (i = 0; i < msg->num_heaps; i++) {
+ switch (msg->heaps[i]) {
+ case AVP_NVMAP_HEAP_EXTERNAL:
+ heap_mask |= NVMAP_HEAP_SYSMEM;
+ break;
+ case AVP_NVMAP_HEAP_GART:
+ heap_mask |= NVMAP_HEAP_IOVMM;
+ break;
+ case AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT:
+ heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
+ break;
+ case AVP_NVMAP_HEAP_IRAM:
+ heap_mask |= NVMAP_HEAP_CARVEOUT_IRAM;
+ break;
+ default:
+ break;
+ }
+ }
+
+ align = max_t(size_t, L1_CACHE_BYTES, msg->align);
+ err = nvmap_alloc_handle_id(avp_svc->nvmap_remote, msg->handle_id,
+ heap_mask, align, 0);
+ nvmap_handle_put(handle);
+ if (err) {
+ pr_err("avp_svc: can't allocate for handle 0x%x (%d)\n",
+ msg->handle_id, err);
+ err = AVP_ERR_ENOMEM;
+ }
+
+out:
+ resp.svc_id = SVC_NVMAP_ALLOC_RESPONSE;
+ resp.err = err;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_free(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_free *msg = (struct svc_nvmap_free *)_msg;
+
+ nvmap_free_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+}
+
+static void do_svc_nvmap_pin(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_pin *msg = (struct svc_nvmap_pin *)_msg;
+ struct svc_nvmap_pin_resp resp;
+ struct nvmap_handle_ref *handle;
+ phys_addr_t addr = ~0UL;
+ unsigned long id = msg->handle_id;
+ int err;
+
+ handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote, id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: can't dup handle %lx\n", id);
+ goto out;
+ }
+ err = nvmap_pin_ids(avp_svc->nvmap_remote, 1, &id);
+ if (err) {
+ pr_err("avp_svc: can't pin for handle %lx (%d)\n", id, err);
+ goto out;
+ }
+ addr = nvmap_handle_address(avp_svc->nvmap_remote, id);
+
+out:
+ resp.svc_id = SVC_NVMAP_PIN_RESPONSE;
+ resp.addr = addr;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_unpin(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_unpin *msg = (struct svc_nvmap_unpin *)_msg;
+ struct svc_common_resp resp;
+ unsigned long id = msg->handle_id;
+
+ nvmap_unpin_ids(avp_svc->nvmap_remote, 1, &id);
+ nvmap_free_handle_id(avp_svc->nvmap_remote, id);
+
+ resp.svc_id = SVC_NVMAP_UNPIN_RESPONSE;
+ resp.err = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_from_id(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_from_id *msg = (struct svc_nvmap_from_id *)_msg;
+ struct svc_common_resp resp;
+ struct nvmap_handle_ref *handle;
+ int err = 0;
+
+ handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote,
+ msg->handle_id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: can't duplicate handle for id 0x%x (%d)\n",
+ msg->handle_id, (int)PTR_ERR(handle));
+ err = AVP_ERR_ENOMEM;
+ }
+
+ resp.svc_id = SVC_NVMAP_FROM_ID_RESPONSE;
+ resp.err = err;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_get_addr(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_get_addr *msg = (struct svc_nvmap_get_addr *)_msg;
+ struct svc_nvmap_get_addr_resp resp;
+
+ resp.svc_id = SVC_NVMAP_GET_ADDRESS_RESPONSE;
+ resp.addr = nvmap_handle_address(avp_svc->nvmap_remote, msg->handle_id);
+ resp.addr += msg->offs;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_register(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_pwr_register *msg = (struct svc_pwr_register *)_msg;
+ struct svc_pwr_register_resp resp;
+
+ resp.svc_id = SVC_POWER_RESPONSE;
+ resp.err = 0;
+ resp.client_id = msg->client_id;
+
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static struct avp_module *find_avp_module(struct avp_svc_info *avp_svc, u32 id)
+{
+ if (id < NUM_AVP_MODULES && avp_modules[id].name)
+ return &avp_modules[id];
+ return NULL;
+}
+
+static void do_svc_module_reset(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+ struct svc_common_resp resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ if (msg->module_id == AVP_MODULE_ID_AVP)
+ pr_err("avp_svc: AVP suicidal?!?!\n");
+ else
+ pr_err("avp_svc: Unknown module reset requested: %d\n",
+ msg->module_id);
+ /* other side doesn't handle errors for reset */
+ resp.err = 0;
+ goto send_response;
+ }
+
+ aclk = &avp_svc->clks[mod->clk_req];
+ tegra_periph_reset_assert(aclk->clk);
+ udelay(10);
+ tegra_periph_reset_deassert(aclk->clk);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_RESET_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_module_clock(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+ struct svc_common_resp resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ pr_err("avp_svc: unknown module clock requested: %d\n",
+ msg->module_id);
+ resp.err = AVP_ERR_EINVAL;
+ goto send_response;
+ }
+
+ mutex_lock(&avp_svc->clk_lock);
+ aclk = &avp_svc->clks[mod->clk_req];
+ if (msg->enable) {
+ if (aclk->refcnt++ == 0) {
+ clk_enable(avp_svc->emcclk);
+ clk_enable(avp_svc->sclk);
+ clk_enable(aclk->clk);
+ }
+ } else {
+ if (unlikely(aclk->refcnt == 0)) {
+ pr_err("avp_svc: unbalanced clock disable for '%s'\n",
+ aclk->mod->name);
+ } else if (--aclk->refcnt == 0) {
+ clk_disable(aclk->clk);
+ clk_disable(avp_svc->sclk);
+ clk_disable(avp_svc->emcclk);
+ }
+ }
+ mutex_unlock(&avp_svc->clk_lock);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_CLOCK_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_null_response(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len, u32 resp_svc_id)
+{
+ struct svc_common_resp resp;
+ resp.svc_id = resp_svc_id;
+ resp.err = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_state(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_dfs_get_state_resp resp;
+ resp.svc_id = SVC_DFS_GETSTATE_RESPONSE;
+ resp.state = AVP_DFS_STATE_STOPPED;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_clk_util(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_dfs_get_clk_util_resp resp;
+
+ resp.svc_id = SVC_DFS_GET_CLK_UTIL_RESPONSE;
+ resp.err = 0;
+ memset(&resp.usage, 0, sizeof(struct avp_clk_usage));
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_max_freq(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_pwr_max_freq_resp resp;
+
+ resp.svc_id = SVC_POWER_MAXFREQ;
+ resp.freq = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_printf(struct avp_svc_info *avp_svc, struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_printf *msg = (struct svc_printf *)_msg;
+ char tmp_str[SVC_MAX_STRING_LEN];
+
+ /* ensure we null terminate the source */
+ strlcpy(tmp_str, msg->str, SVC_MAX_STRING_LEN);
+ pr_info("[AVP]: %s", tmp_str);
+}
+
+static void do_svc_module_clock_set(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_clock_ctrl *msg = (struct svc_clock_ctrl *)_msg;
+ struct svc_clock_ctrl_response resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+ int ret = 0;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ pr_err("avp_svc: unknown module clock requested: %d\n",
+ msg->module_id);
+ resp.err = AVP_ERR_EINVAL;
+ goto send_response;
+ }
+
+ mutex_lock(&avp_svc->clk_lock);
+ if (msg->module_id == AVP_MODULE_ID_AVP) {
+ /* check if max avp clock is asked and set max emc frequency */
+ if (msg->clk_freq >= avp_svc->max_avp_rate) {
+ clk_set_rate(avp_svc->emcclk, ULONG_MAX);
+ }
+ else {
+ /* if no, set emc frequency as per platform data.
+ * if no platform data is send, set it to maximum */
+ if (avp_svc->emc_rate)
+ clk_set_rate(avp_svc->emcclk, avp_svc->emc_rate);
+ else
+ clk_set_rate(avp_svc->emcclk, ULONG_MAX);
+ }
+ ret = clk_set_rate(avp_svc->sclk, msg->clk_freq);
+ } else {
+ aclk = &avp_svc->clks[mod->clk_req];
+ ret = clk_set_rate(aclk->clk, msg->clk_freq);
+ }
+ if (ret) {
+ pr_err("avp_svc: Failed to set module (id = %d) frequency to %d Hz\n",
+ msg->module_id, msg->clk_freq);
+ resp.err = AVP_ERR_EINVAL;
+ resp.act_freq = 0;
+ mutex_unlock(&avp_svc->clk_lock);
+ goto send_response;
+ }
+
+ if (msg->module_id == AVP_MODULE_ID_AVP)
+ resp.act_freq = clk_get_rate(avp_svc->sclk);
+ else
+ resp.act_freq = clk_get_rate(aclk->clk);
+
+ mutex_unlock(&avp_svc->clk_lock);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_CLOCK_SET_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_unsupported_msg(struct avp_svc_info *avp_svc,
+ u32 resp_svc_id)
+{
+ struct svc_common_resp resp;
+
+ resp.err = AVP_ERR_ENOTSUP;
+ resp.svc_id = resp_svc_id;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_module_clock_get(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_clock_ctrl *msg = (struct svc_clock_ctrl *)_msg;
+ struct svc_clock_ctrl_response resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ pr_err("avp_svc: unknown module get clock requested: %d\n",
+ msg->module_id);
+ resp.err = AVP_ERR_EINVAL;
+ goto send_response;
+ }
+
+ mutex_lock(&avp_svc->clk_lock);
+ aclk = &avp_svc->clks[mod->clk_req];
+ resp.act_freq = clk_get_rate(aclk->clk);
+ mutex_unlock(&avp_svc->clk_lock);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_CLOCK_GET_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static int dispatch_svc_message(struct avp_svc_info *avp_svc,
+ struct svc_msg *msg,
+ size_t len)
+{
+ int ret = 0;
+
+ switch (msg->svc_id) {
+ case SVC_NVMAP_CREATE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_create\n", __func__);
+ do_svc_nvmap_create(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_ALLOC:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_alloc\n", __func__);
+ do_svc_nvmap_alloc(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_FREE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_free\n", __func__);
+ do_svc_nvmap_free(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_PIN:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_pin\n", __func__);
+ do_svc_nvmap_pin(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_UNPIN:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_unpin\n", __func__);
+ do_svc_nvmap_unpin(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_FROM_ID:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_from_id\n", __func__);
+ do_svc_nvmap_from_id(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_GET_ADDRESS:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_get_addr\n", __func__);
+ do_svc_nvmap_get_addr(avp_svc, msg, len);
+ break;
+ case SVC_POWER_REGISTER:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_register\n", __func__);
+ do_svc_pwr_register(avp_svc, msg, len);
+ break;
+ case SVC_POWER_UNREGISTER:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_unregister\n", __func__);
+ /* nothing to do */
+ break;
+ case SVC_POWER_BUSY_HINT_MULTI:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_busy_hint_multi\n",
+ __func__);
+ /* nothing to do */
+ break;
+ case SVC_POWER_BUSY_HINT:
+ case SVC_POWER_STARVATION:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power busy/starve hint\n",
+ __func__);
+ do_svc_null_response(avp_svc, msg, len, SVC_POWER_RESPONSE);
+ break;
+ case SVC_POWER_MAXFREQ:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power get_max_freq\n",
+ __func__);
+ do_svc_pwr_max_freq(avp_svc, msg, len);
+ break;
+ case SVC_DFS_GETSTATE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got dfs_get_state\n", __func__);
+ do_svc_dfs_get_state(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_RESET:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_reset\n", __func__);
+ do_svc_module_reset(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_CLOCK:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock\n", __func__);
+ do_svc_module_clock(avp_svc, msg, len);
+ break;
+ case SVC_DFS_GET_CLK_UTIL:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got get_clk_util\n", __func__);
+ do_svc_dfs_get_clk_util(avp_svc, msg, len);
+ break;
+ case SVC_PRINTF:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got remote printf\n", __func__);
+ do_svc_printf(avp_svc, msg, len);
+ break;
+ case SVC_AVP_WDT_RESET:
+ pr_err("avp_svc: AVP has been reset by watchdog\n");
+ break;
+ case SVC_MODULE_CLOCK_SET:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock_set\n", __func__);
+ do_svc_module_clock_set(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_CLOCK_GET:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock_get\n", __func__);
+ do_svc_module_clock_get(avp_svc, msg, len);
+ break;
+ default:
+ pr_warning("avp_svc: Unsupported SVC call 0x%x\n", msg->svc_id);
+ do_svc_unsupported_msg(avp_svc, msg->svc_id);
+ ret = -ENOMSG;
+ break;
+ }
+
+ return ret;
+}
+
+static int avp_svc_thread(void *data)
+{
+ struct avp_svc_info *avp_svc = data;
+ u8 buf[TEGRA_RPC_MAX_MSG_LEN];
+ struct svc_msg *msg = (struct svc_msg *)buf;
+ int ret;
+ long timeout;
+
+ BUG_ON(!avp_svc->cpu_ep);
+
+ ret = trpc_wait_peer(avp_svc->cpu_ep, -1);
+ if (ret) {
+ pr_err("%s: no connection from AVP (%d)\n", __func__, ret);
+ goto err;
+ }
+
+ pr_info("%s: got remote peer\n", __func__);
+
+ while (!kthread_should_stop()) {
+ DBG(AVP_DBG_TRACE_SVC, "%s: waiting for message\n", __func__);
+ ret = trpc_recv_msg(avp_svc->rpc_node, avp_svc->cpu_ep, buf,
+ TEGRA_RPC_MAX_MSG_LEN, -1);
+ DBG(AVP_DBG_TRACE_SVC, "%s: got message\n", __func__);
+
+ if (ret == -ECONNRESET || ret == -ENOTCONN) {
+ wait_queue_head_t wq;
+ init_waitqueue_head(&wq);
+
+ pr_info("%s: AVP seems to be down; "
+ "wait for kthread_stop\n", __func__);
+ timeout = msecs_to_jiffies(100);
+ timeout = wait_event_interruptible_timeout(wq,
+ kthread_should_stop(), timeout);
+ if (timeout == 0)
+ pr_err("%s: timed out while waiting for "
+ "kthread_stop\n", __func__);
+ continue;
+ } else if (ret <= 0) {
+ pr_err("%s: couldn't receive msg (ret=%d)\n",
+ __func__, ret);
+ continue;
+ }
+ dispatch_svc_message(avp_svc, msg, ret);
+ }
+
+err:
+ trpc_put(avp_svc->cpu_ep);
+ pr_info("%s: exiting\n", __func__);
+ return ret;
+}
+
+int avp_svc_start(struct avp_svc_info *avp_svc)
+{
+ struct trpc_endpoint *ep;
+ int ret;
+
+ avp_svc->nvmap_remote = nvmap_create_client(nvmap_dev, "avp_remote");
+ if (IS_ERR(avp_svc->nvmap_remote)) {
+ pr_err("%s: cannot create remote nvmap client\n", __func__);
+ ret = PTR_ERR(avp_svc->nvmap_remote);
+ goto err_nvmap_create_remote_client;
+ }
+
+ ep = trpc_create(avp_svc->rpc_node, "RPC_CPU_PORT", NULL, NULL);
+ if (IS_ERR(ep)) {
+ pr_err("%s: can't create RPC_CPU_PORT\n", __func__);
+ ret = PTR_ERR(ep);
+ goto err_cpu_port_create;
+ }
+
+ /* TODO: protect this */
+ avp_svc->cpu_ep = ep;
+
+ /* the service thread should get an extra reference for the port */
+ trpc_get(avp_svc->cpu_ep);
+ avp_svc->svc_thread = kthread_run(avp_svc_thread, avp_svc,
+ "avp_svc_thread");
+ if (IS_ERR_OR_NULL(avp_svc->svc_thread)) {
+ avp_svc->svc_thread = NULL;
+ pr_err("%s: can't create svc thread\n", __func__);
+ ret = -ENOMEM;
+ goto err_kthread;
+ }
+ return 0;
+
+err_kthread:
+ trpc_close(avp_svc->cpu_ep);
+ trpc_put(avp_svc->cpu_ep);
+ avp_svc->cpu_ep = NULL;
+err_cpu_port_create:
+ nvmap_client_put(avp_svc->nvmap_remote);
+err_nvmap_create_remote_client:
+ avp_svc->nvmap_remote = NULL;
+ return ret;
+}
+
+void avp_svc_stop(struct avp_svc_info *avp_svc)
+{
+ int ret;
+ int i;
+
+ trpc_close(avp_svc->cpu_ep);
+ ret = kthread_stop(avp_svc->svc_thread);
+ if (ret == -EINTR) {
+ /* the thread never started, drop it's extra reference */
+ trpc_put(avp_svc->cpu_ep);
+ }
+ avp_svc->cpu_ep = NULL;
+
+ nvmap_client_put(avp_svc->nvmap_remote);
+ avp_svc->nvmap_remote = NULL;
+
+ mutex_lock(&avp_svc->clk_lock);
+ for (i = 0; i < NUM_CLK_REQUESTS; i++) {
+ struct avp_clk *aclk = &avp_svc->clks[i];
+ BUG_ON(aclk->refcnt < 0);
+ if (aclk->refcnt > 0) {
+ pr_info("%s: remote left clock '%s' on\n", __func__,
+ aclk->mod->name);
+ clk_disable(aclk->clk);
+ /* sclk/emcclk was enabled once for every clock */
+ clk_disable(avp_svc->sclk);
+ clk_disable(avp_svc->emcclk);
+ }
+ aclk->refcnt = 0;
+ }
+ mutex_unlock(&avp_svc->clk_lock);
+}
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+ struct trpc_node *rpc_node)
+{
+ struct tegra_avp_platform_data *pdata;
+ struct avp_svc_info *avp_svc;
+ int ret;
+ int i;
+ int cnt = 0;
+
+ BUG_ON(!rpc_node);
+
+ avp_svc = kzalloc(sizeof(struct avp_svc_info), GFP_KERNEL);
+ if (!avp_svc) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ BUILD_BUG_ON(NUM_CLK_REQUESTS > BITS_PER_LONG);
+
+ pdata = pdev->dev.platform_data;
+
+ for (i = 0; i < NUM_AVP_MODULES; i++) {
+ struct avp_module *mod = &avp_modules[i];
+ struct clk *clk;
+ if (!mod->name)
+ continue;
+ BUG_ON(mod->clk_req >= NUM_CLK_REQUESTS ||
+ cnt++ >= NUM_CLK_REQUESTS);
+
+ clk = clk_get(&pdev->dev, mod->name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("avp_svc: Couldn't get required clocks\n");
+ goto err_get_clks;
+ }
+ avp_svc->clks[mod->clk_req].clk = clk;
+ avp_svc->clks[mod->clk_req].mod = mod;
+ avp_svc->clks[mod->clk_req].refcnt = 0;
+ }
+
+ avp_svc->sclk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(avp_svc->sclk)) {
+ pr_err("avp_svc: Couldn't get sclk for dvfs\n");
+ ret = -ENOENT;
+ goto err_get_clks;
+ }
+ avp_svc->max_avp_rate = clk_round_rate(avp_svc->sclk, ULONG_MAX);
+ clk_set_rate(avp_svc->sclk, 0);
+
+ avp_svc->emcclk = clk_get(&pdev->dev, "emc");
+ if (IS_ERR(avp_svc->emcclk)) {
+ pr_err("avp_svc: Couldn't get emcclk for dvfs\n");
+ ret = -ENOENT;
+ goto err_get_clks;
+ }
+
+ /*
+ * The emc is a shared clock, it will be set to the rate
+ * requested in platform data. Set the rate to ULONG_MAX
+ * if platform data is NULL.
+ */
+ avp_svc->emc_rate = 0;
+ if (pdata) {
+ clk_set_rate(avp_svc->emcclk, pdata->emc_clk_rate);
+ avp_svc->emc_rate = pdata->emc_clk_rate;
+ }
+ else
+ clk_set_rate(avp_svc->emcclk, ULONG_MAX);
+
+ avp_svc->rpc_node = rpc_node;
+
+ mutex_init(&avp_svc->clk_lock);
+
+ return avp_svc;
+
+err_get_clks:
+ for (i = 0; i < NUM_CLK_REQUESTS; i++)
+ if (avp_svc->clks[i].clk)
+ clk_put(avp_svc->clks[i].clk);
+ if (!IS_ERR_OR_NULL(avp_svc->sclk))
+ clk_put(avp_svc->sclk);
+ if (!IS_ERR_OR_NULL(avp_svc->emcclk))
+ clk_put(avp_svc->emcclk);
+ kfree(avp_svc);
+err_alloc:
+ return ERR_PTR(ret);
+}
+
+void avp_svc_destroy(struct avp_svc_info *avp_svc)
+{
+ int i;
+
+ for (i = 0; i < NUM_CLK_REQUESTS; i++)
+ clk_put(avp_svc->clks[i].clk);
+ clk_put(avp_svc->sclk);
+ clk_put(avp_svc->emcclk);
+
+ kfree(avp_svc);
+}
diff --git a/drivers/media/video/tegra/avp/headavp.S b/drivers/media/video/tegra/avp/headavp.S
new file mode 100644
index 000000000000..c1f8e9fea1cb
--- /dev/null
+++ b/drivers/media/video/tegra/avp/headavp.S
@@ -0,0 +1,68 @@
+/*
+ * arch/arm/mach-tegra/headavp.S
+ *
+ * AVP kernel launcher stub; programs the AVP MMU and jumps to the
+ * kernel code. Must use ONLY ARMv4 instructions, and must be compiled
+ * in ARM mode.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include "headavp.h"
+
+#define PTE0_COMPARE 0
+/* the default translation will translate any VA within
+ * 0x0010:0000..0x001f:ffff to the (megabyte-aligned) value written to
+ * _tegra_avp_boot_stub_data.map_phys_addr
+ */
+#define PTE0_DEFAULT (AVP_KERNEL_VIRT_BASE | 0x3ff0)
+
+#define PTE0_TRANSLATE 4
+
+#define TRANSLATE_DATA (1 << 11)
+#define TRANSLATE_CODE (1 << 10)
+#define TRANSLATE_WR (1 << 9)
+#define TRANSLATE_RD (1 << 8)
+#define TRANSLATE_HIT (1 << 7)
+#define TRANSLATE_EN (1 << 2)
+
+#define TRANSLATE_OPT (TRANSLATE_DATA | TRANSLATE_CODE | TRANSLATE_WR | \
+ TRANSLATE_RD | TRANSLATE_HIT)
+
+ENTRY(_tegra_avp_boot_stub)
+ adr r4, _tegra_avp_boot_stub_data
+ ldmia r4, {r0-r3}
+#ifdef CONFIG_TEGRA_AVP_KERNEL_ON_MMU
+ str r2, [r0, #PTE0_COMPARE]
+ bic r3, r3, #0xff0
+ bic r3, r3, #0x00f
+ orr r3, r3, #TRANSLATE_OPT
+ orr r3, r3, #TRANSLATE_EN
+ str r3, [r0, #PTE0_TRANSLATE]
+#endif
+ bx r1
+ b .
+ENDPROC(_tegra_avp_boot_stub)
+ .type _tegra_avp_boot_stub_data, %object
+ENTRY(_tegra_avp_boot_stub_data)
+ .long AVP_MMU_TLB_BASE
+ .long 0xdeadbeef
+ .long PTE0_DEFAULT
+ .long 0xdeadd00d
+ .size _tegra_avp_boot_stub_data, . - _tegra_avp_boot_stub_data
diff --git a/drivers/media/video/tegra/avp/headavp.h b/drivers/media/video/tegra/avp/headavp.h
new file mode 100644
index 000000000000..2bcc3297bfa4
--- /dev/null
+++ b/drivers/media/video/tegra/avp/headavp.h
@@ -0,0 +1,41 @@
+/*
+ * arch/arm/mach-tegra/headavp.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MACH_TEGRA_HEADAVP_H
+#define _MACH_TEGRA_HEADAVP_H
+
+#define AVP_MMU_TLB_BASE 0xF000F000
+#define AVP_KERNEL_VIRT_BASE 0x00100000
+
+#ifndef __ASSEMBLY__
+
+struct tegra_avp_boot_stub_data {
+ unsigned long mmu_tlb_base;
+ unsigned long jump_addr;
+ unsigned long map_virt_addr;
+ unsigned long map_phys_addr;
+};
+
+extern void _tegra_avp_boot_stub(void);
+extern struct tegra_avp_boot_stub_data _tegra_avp_boot_stub_data;
+
+#endif
+
+#endif
diff --git a/drivers/media/video/tegra/avp/nvavp.h b/drivers/media/video/tegra/avp/nvavp.h
new file mode 100644
index 000000000000..dbc62b485882
--- /dev/null
+++ b/drivers/media/video/tegra/avp/nvavp.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011 Nvidia Corp
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_NVAVP_H
+#define __MEDIA_VIDEO_TEGRA_NVAVP_H
+
+#include <linux/tegra_avp.h>
+
+struct tegra_avp_info;
+
+int tegra_avp_open(struct tegra_avp_info **avp);
+int tegra_avp_release(struct tegra_avp_info *avp);
+int tegra_avp_load_lib(struct tegra_avp_info *avp, struct tegra_avp_lib *lib);
+int tegra_avp_unload_lib(struct tegra_avp_info *avp, unsigned long handle);
+
+
+#include <linux/tegra_sema.h>
+
+struct tegra_sema_info;
+
+int tegra_sema_open(struct tegra_sema_info **sema);
+int tegra_sema_release(struct tegra_sema_info *sema);
+int tegra_sema_wait(struct tegra_sema_info *sema, long* timeout);
+int tegra_sema_signal(struct tegra_sema_info *sema);
+
+
+#include <linux/tegra_rpc.h>
+
+struct tegra_rpc_info;
+
+int tegra_rpc_open(struct tegra_rpc_info **rpc);
+int tegra_rpc_release(struct tegra_rpc_info *rpc);
+int tegra_rpc_port_create(struct tegra_rpc_info *rpc, char *name,
+ struct tegra_sema_info *sema);
+int tegra_rpc_get_name(struct tegra_rpc_info *rpc, char* name);
+int tegra_rpc_port_connect(struct tegra_rpc_info *rpc, long timeout);
+int tegra_rpc_port_listen(struct tegra_rpc_info *rpc, long timeout);
+int tegra_rpc_write(struct tegra_rpc_info *rpc, u8* buf, size_t size);
+int tegra_rpc_read(struct tegra_rpc_info *rpc, u8 *buf, size_t max);
+
+
+#endif
diff --git a/drivers/media/video/tegra/avp/tegra_rpc.c b/drivers/media/video/tegra/avp/tegra_rpc.c
new file mode 100644
index 000000000000..a0fd1dc999f4
--- /dev/null
+++ b/drivers/media/video/tegra/avp/tegra_rpc.c
@@ -0,0 +1,796 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original NVRM code from NVIDIA, and a partial rewrite by:
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "trpc.h"
+
+struct trpc_port;
+struct trpc_endpoint {
+ struct list_head msg_list;
+ wait_queue_head_t msg_waitq;
+
+ struct trpc_endpoint *out;
+ struct trpc_port *port;
+
+ struct trpc_node *owner;
+
+ struct completion *connect_done;
+ bool ready;
+ struct trpc_ep_ops *ops;
+ void *priv;
+};
+
+struct trpc_port {
+ char name[TEGRA_RPC_MAX_NAME_LEN];
+
+ /* protects peer and closed state */
+ spinlock_t lock;
+ struct trpc_endpoint peers[2];
+ bool closed;
+
+ /* private */
+ struct kref ref;
+ struct rb_node rb_node;
+};
+
+enum {
+ TRPC_TRACE_MSG = 1U << 0,
+ TRPC_TRACE_CONN = 1U << 1,
+ TRPC_TRACE_PORT = 1U << 2,
+};
+
+static u32 trpc_debug_mask;
+module_param_named(debug_mask, trpc_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (trpc_debug_mask & (flag)) pr_info(args); } while (0)
+
+struct tegra_rpc_info {
+ struct kmem_cache *msg_cache;
+
+ spinlock_t ports_lock;
+ struct rb_root ports;
+
+ struct list_head node_list;
+ struct mutex node_lock;
+};
+
+struct trpc_msg {
+ struct list_head list;
+
+ size_t len;
+ u8 payload[TEGRA_RPC_MAX_MSG_LEN];
+};
+
+static struct tegra_rpc_info *tegra_rpc;
+static struct dentry *trpc_debug_root;
+
+static struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep);
+
+/* a few accessors for the outside world to keep the trpc_endpoint struct
+ * definition private to this module */
+void *trpc_priv(struct trpc_endpoint *ep)
+{
+ return ep->priv;
+}
+
+struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep)
+{
+ return ep->out;
+}
+
+const char *trpc_name(struct trpc_endpoint *ep)
+{
+ return ep->port->name;
+}
+
+static inline bool is_connected(struct trpc_port *port)
+{
+ return port->peers[0].ready && port->peers[1].ready;
+}
+
+static inline bool is_closed(struct trpc_port *port)
+{
+ return port->closed;
+}
+
+static void rpc_port_free(struct tegra_rpc_info *info, struct trpc_port *port)
+{
+ struct trpc_msg *msg;
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ struct list_head *list = &port->peers[i].msg_list;
+ while (!list_empty(list)) {
+ msg = list_first_entry(list, struct trpc_msg, list);
+ list_del(&msg->list);
+ kmem_cache_free(info->msg_cache, msg);
+ }
+ }
+ kfree(port);
+}
+
+static void _rpc_port_release(struct kref *kref)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = container_of(kref, struct trpc_port, ref);
+ unsigned long flags;
+
+ DBG(TRPC_TRACE_PORT, "%s: releasing port '%s' (%p)\n", __func__,
+ port->name, port);
+ spin_lock_irqsave(&info->ports_lock, flags);
+ rb_erase(&port->rb_node, &info->ports);
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ rpc_port_free(info, port);
+}
+
+/* note that the refcount is actually on the port and not on the endpoint */
+void trpc_put(struct trpc_endpoint *ep)
+{
+ kref_put(&ep->port->ref, _rpc_port_release);
+}
+
+void trpc_get(struct trpc_endpoint *ep)
+{
+ kref_get(&ep->port->ref);
+}
+
+/* Searches the rb_tree for a port with the provided name. If one is not found,
+ * the new port in inserted. Otherwise, the existing port is returned.
+ * Must be called with the ports_lock held */
+static struct trpc_port *rpc_port_find_insert(struct tegra_rpc_info *info,
+ struct trpc_port *port)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct trpc_port *tmp;
+ int ret = 0;
+
+ p = &info->ports.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct trpc_port, rb_node);
+
+ ret = strncmp(port->name, tmp->name, TEGRA_RPC_MAX_NAME_LEN);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else if (ret > 0)
+ p = &(*p)->rb_right;
+ else
+ return tmp;
+ }
+ rb_link_node(&port->rb_node, parent, p);
+ rb_insert_color(&port->rb_node, &info->ports);
+ DBG(TRPC_TRACE_PORT, "%s: inserted port '%s' (%p)\n", __func__,
+ port->name, port);
+ return port;
+}
+
+static int nodes_try_connect(struct tegra_rpc_info *info,
+ struct trpc_node *src,
+ struct trpc_endpoint *from)
+{
+ struct trpc_node *node;
+ int ret;
+
+ mutex_lock(&info->node_lock);
+ list_for_each_entry(node, &info->node_list, list) {
+ if (!node->try_connect)
+ continue;
+ ret = node->try_connect(node, src, from);
+ if (!ret) {
+ mutex_unlock(&info->node_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&info->node_lock);
+ return -ECONNREFUSED;
+}
+
+static struct trpc_port *rpc_port_alloc(const char *name)
+{
+ struct trpc_port *port;
+ int i;
+
+ port = kzalloc(sizeof(struct trpc_port), GFP_KERNEL);
+ if (!port) {
+ pr_err("%s: can't alloc rpc_port\n", __func__);
+ return NULL;
+ }
+ BUILD_BUG_ON(2 != ARRAY_SIZE(port->peers));
+
+ spin_lock_init(&port->lock);
+ kref_init(&port->ref);
+ strlcpy(port->name, name, TEGRA_RPC_MAX_NAME_LEN);
+ for (i = 0; i < 2; i++) {
+ struct trpc_endpoint *ep = port->peers + i;
+ INIT_LIST_HEAD(&ep->msg_list);
+ init_waitqueue_head(&ep->msg_waitq);
+ ep->port = port;
+ }
+ port->peers[0].out = &port->peers[1];
+ port->peers[1].out = &port->peers[0];
+
+ return port;
+}
+
+/* must be holding the ports lock */
+static inline void handle_port_connected(struct trpc_port *port)
+{
+ int i;
+
+ DBG(TRPC_TRACE_CONN, "tegra_rpc: port '%s' connected\n", port->name);
+
+ for (i = 0; i < 2; i++)
+ if (port->peers[i].connect_done)
+ complete(port->peers[i].connect_done);
+}
+
+static inline void _ready_ep(struct trpc_endpoint *ep,
+ struct trpc_node *owner,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ ep->ready = true;
+ ep->owner = owner;
+ ep->ops = ops;
+ ep->priv = priv;
+}
+
+/* this keeps a reference on the port */
+static struct trpc_endpoint *_create_peer(struct tegra_rpc_info *info,
+ struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ struct trpc_port *port = ep->port;
+ struct trpc_endpoint *peer = ep->out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ BUG_ON(port->closed);
+ if (peer->ready || !ep->ready) {
+ peer = NULL;
+ goto out;
+ }
+ _ready_ep(peer, owner, ops, priv);
+ if (WARN_ON(!is_connected(port)))
+ pr_warning("%s: created peer but no connection established?!\n",
+ __func__);
+ else
+ handle_port_connected(port);
+ trpc_get(peer);
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return peer;
+}
+
+/* Exported code. This is out interface to the outside world */
+struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
+ struct trpc_ep_ops *ops, void *priv)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *ep;
+ struct trpc_port *new_port;
+ struct trpc_port *port;
+ unsigned long flags;
+
+ BUG_ON(!owner);
+
+ /* we always allocate a new port even if one already might exist. This
+ * is slightly inefficient, but it allows us to do the allocation
+ * without holding our ports_lock spinlock. */
+ new_port = rpc_port_alloc(name);
+ if (!new_port) {
+ pr_err("%s: can't allocate memory for '%s'\n", __func__, name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ port = rpc_port_find_insert(info, new_port);
+ if (port != new_port) {
+ rpc_port_free(info, new_port);
+ /* There was already a port by that name in the rb_tree,
+ * so just try to create its peer[1], i.e. peer for peer[0]
+ */
+ ep = _create_peer(info, owner, &port->peers[0], ops, priv);
+ if (!ep) {
+ pr_err("%s: port '%s' is not in a connectable state\n",
+ __func__, port->name);
+ ep = ERR_PTR(-EINVAL);
+ }
+ goto out;
+ }
+ /* don't need to grab the individual port lock here since we must be
+ * holding the ports_lock to add the new element, and never dropped
+ * it, and thus noone could have gotten a reference to this port
+ * and thus the state couldn't have been touched */
+ ep = &port->peers[0];
+ _ready_ep(ep, owner, ops, priv);
+out:
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ return ep;
+}
+
+struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *peer;
+ unsigned long flags;
+
+ BUG_ON(!owner);
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ peer = _create_peer(info, owner, ep, ops, priv);
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ return peer;
+}
+
+/* timeout == -1, waits forever
+ * timeout == 0, return immediately
+ */
+int trpc_connect(struct trpc_endpoint *from, long timeout)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = from->port;
+ struct trpc_node *src = from->owner;
+ int ret;
+ bool no_retry = !timeout;
+ unsigned long endtime = jiffies + msecs_to_jiffies(timeout);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* XXX: add state for connections and ports to prevent invalid
+ * states like multiple connections, etc. ? */
+ if (unlikely(is_closed(port))) {
+ ret = -ECONNRESET;
+ pr_err("%s: can't connect to %s, closed\n", __func__,
+ port->name);
+ goto out;
+ } else if (is_connected(port)) {
+ ret = 0;
+ goto out;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ do {
+ ret = nodes_try_connect(info, src, from);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (is_connected(port)) {
+ ret = 0;
+ goto out;
+ } else if (no_retry) {
+ goto out;
+ } else if (signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ usleep_range(5000, 20000);
+ } while (timeout < 0 || time_before(jiffies, endtime));
+
+ return -ETIMEDOUT;
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+/* convenience function for doing this common pattern in a single call */
+struct trpc_endpoint *trpc_create_connect(struct trpc_node *src,
+ char *name,
+ struct trpc_ep_ops *ops,
+ void *priv,
+ long timeout)
+{
+ struct trpc_endpoint *ep;
+ int ret;
+
+ ep = trpc_create(src, name, ops, priv);
+ if (IS_ERR(ep))
+ return ep;
+
+ ret = trpc_connect(ep, timeout);
+ if (ret) {
+ trpc_close(ep);
+ return ERR_PTR(ret);
+ }
+
+ return ep;
+}
+
+void trpc_close(struct trpc_endpoint *ep)
+{
+ struct trpc_port *port = ep->port;
+ struct trpc_endpoint *peer = ep->out;
+ bool need_close_op = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ BUG_ON(!ep->ready);
+ ep->ready = false;
+ port->closed = true;
+ if (peer->ready) {
+ need_close_op = true;
+ /* the peer may be waiting for a message */
+ wake_up_all(&peer->msg_waitq);
+ if (peer->connect_done)
+ complete(peer->connect_done);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (need_close_op && peer->ops && peer->ops->close)
+ peer->ops->close(peer);
+ trpc_put(ep);
+}
+
+int trpc_wait_peer(struct trpc_endpoint *ep, long timeout)
+{
+ struct trpc_port *port = ep->port;
+ DECLARE_COMPLETION_ONSTACK(event);
+ int ret;
+ unsigned long flags;
+
+ if (timeout < 0)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else if (timeout > 0)
+ timeout = msecs_to_jiffies(timeout);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (ep->connect_done) {
+ ret = -EBUSY;
+ goto done;
+ } else if (is_connected(port)) {
+ ret = 0;
+ goto done;
+ } else if (is_closed(port)) {
+ ret = -ECONNRESET;
+ goto done;
+ } else if (!timeout) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ ep->connect_done = &event;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ret = wait_for_completion_interruptible_timeout(&event, timeout);
+
+ spin_lock_irqsave(&port->lock, flags);
+ ep->connect_done = NULL;
+
+ if (is_connected(port)) {
+ ret = 0;
+ } else {
+ if (is_closed(port))
+ ret = -ECONNRESET;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ }
+
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+static inline int _ep_id(struct trpc_endpoint *ep)
+{
+ return ep - ep->port->peers;
+}
+
+static int queue_msg(struct trpc_node *src, struct trpc_endpoint *from,
+ void *buf, size_t len, gfp_t gfp_flags)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *peer = from->out;
+ struct trpc_port *port = from->port;
+ struct trpc_msg *msg;
+ unsigned long flags;
+ int ret;
+
+ BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
+ /* shouldn't be enqueueing to the endpoint */
+ BUG_ON(peer->ops && peer->ops->send);
+
+ DBG(TRPC_TRACE_MSG, "%s: queueing message for %s.%d\n", __func__,
+ port->name, _ep_id(peer));
+
+ msg = kmem_cache_alloc(info->msg_cache, gfp_flags);
+ if (!msg) {
+ pr_err("%s: can't alloc memory for msg\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(msg->payload, buf, len);
+ msg->len = len;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (is_closed(port)) {
+ pr_err("%s: cannot send message for closed port %s.%d\n",
+ __func__, port->name, _ep_id(peer));
+ ret = -ECONNRESET;
+ goto err;
+ } else if (!is_connected(port)) {
+ pr_err("%s: cannot send message for unconnected port %s.%d\n",
+ __func__, port->name, _ep_id(peer));
+ ret = -ENOTCONN;
+ goto err;
+ }
+
+ list_add_tail(&msg->list, &peer->msg_list);
+ if (peer->ops && peer->ops->notify_recv)
+ peer->ops->notify_recv(peer);
+ wake_up_all(&peer->msg_waitq);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&port->lock, flags);
+ kmem_cache_free(info->msg_cache, msg);
+ return ret;
+}
+
+/* Returns -ENOMEM if failed to allocate memory for the message. */
+int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *from,
+ void *buf, size_t len, gfp_t gfp_flags)
+{
+ struct trpc_endpoint *peer = from->out;
+ struct trpc_port *port = from->port;
+
+ BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
+
+ DBG(TRPC_TRACE_MSG, "%s: sending message from %s.%d to %s.%d\n",
+ __func__, port->name, _ep_id(from), port->name, _ep_id(peer));
+
+ if (peer->ops && peer->ops->send) {
+ might_sleep();
+ return peer->ops->send(peer, buf, len);
+ } else {
+ might_sleep_if(gfp_flags & __GFP_WAIT);
+ return queue_msg(src, from, buf, len, gfp_flags);
+ }
+}
+
+static inline struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep)
+{
+ struct trpc_msg *msg = NULL;
+
+ if (!list_empty(&ep->msg_list)) {
+ msg = list_first_entry(&ep->msg_list, struct trpc_msg, list);
+ list_del_init(&msg->list);
+ }
+
+ return msg;
+}
+
+static bool __should_wake(struct trpc_endpoint *ep)
+{
+ struct trpc_port *port = ep->port;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ret = !list_empty(&ep->msg_list) || is_closed(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
+ void *buf, size_t buf_len, long timeout)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = ep->port;
+ struct trpc_msg *msg;
+ size_t len;
+ long ret;
+ unsigned long flags;
+
+ BUG_ON(buf_len > TEGRA_RPC_MAX_MSG_LEN);
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* we allow closed ports to finish receiving already-queued messages */
+ msg = dequeue_msg_locked(ep);
+ if (msg) {
+ goto got_msg;
+ } else if (is_closed(port)) {
+ ret = -ECONNRESET;
+ goto out;
+ } else if (!is_connected(port)) {
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ if (timeout == 0) {
+ ret = 0;
+ goto out;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ DBG(TRPC_TRACE_MSG, "%s: waiting for message for %s.%d\n", __func__,
+ port->name, _ep_id(ep));
+
+ ret = wait_event_interruptible_timeout(ep->msg_waitq, __should_wake(ep),
+ timeout);
+
+ DBG(TRPC_TRACE_MSG, "%s: woke up for %s\n", __func__, port->name);
+ spin_lock_irqsave(&port->lock, flags);
+ msg = dequeue_msg_locked(ep);
+ if (!msg) {
+ if (is_closed(port))
+ ret = -ECONNRESET;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ else
+ pr_err("%s: error (%d) while receiving msg for '%s'\n",
+ __func__, (int)ret, port->name);
+ goto out;
+ }
+
+got_msg:
+ spin_unlock_irqrestore(&port->lock, flags);
+ len = min(buf_len, msg->len);
+ memcpy(buf, msg->payload, len);
+ kmem_cache_free(info->msg_cache, msg);
+ return len;
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+int trpc_node_register(struct trpc_node *node)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+
+ if (!info)
+ return -ENOMEM;
+
+ pr_info("%s: Adding '%s' to node list\n", __func__, node->name);
+
+ mutex_lock(&info->node_lock);
+ if (node->type == TRPC_NODE_LOCAL)
+ list_add(&node->list, &info->node_list);
+ else
+ list_add_tail(&node->list, &info->node_list);
+ mutex_unlock(&info->node_lock);
+ return 0;
+}
+
+void trpc_node_unregister(struct trpc_node *node)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+
+ mutex_lock(&info->node_lock);
+ list_del(&node->list);
+ mutex_unlock(&info->node_lock);
+}
+
+static int trpc_debug_ports_show(struct seq_file *s, void *data)
+{
+ struct tegra_rpc_info *info = s->private;
+ struct rb_node *n;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ for (n = rb_first(&info->ports); n; n = rb_next(n)) {
+ struct trpc_port *port = rb_entry(n, struct trpc_port, rb_node);
+ seq_printf(s, "port: %s\n closed:%s\n", port->name,
+ port->closed ? "yes" : "no");
+
+ spin_lock(&port->lock);
+ for (i = 0; i < ARRAY_SIZE(port->peers); i++) {
+ struct trpc_endpoint *ep = &port->peers[i];
+ seq_printf(s, " peer%d: %s\n ready:%s\n", i,
+ ep->owner ? ep->owner->name : "<none>",
+ ep->ready ? "yes" : "no");
+ if (ep->ops && ep->ops->show)
+ ep->ops->show(s, ep);
+ }
+ spin_unlock(&port->lock);
+ }
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+
+ return 0;
+}
+
+static int trpc_debug_ports_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, trpc_debug_ports_show, inode->i_private);
+}
+
+static const struct file_operations trpc_debug_ports_fops = {
+ .open = trpc_debug_ports_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void trpc_debug_init(struct tegra_rpc_info *info)
+{
+ trpc_debug_root = debugfs_create_dir("tegra_rpc", NULL);
+ if (IS_ERR_OR_NULL(trpc_debug_root)) {
+ pr_err("%s: couldn't create debug files\n", __func__);
+ return;
+ }
+
+ debugfs_create_file("ports", 0664, trpc_debug_root, info,
+ &trpc_debug_ports_fops);
+}
+
+static int __init tegra_rpc_init(void)
+{
+ struct tegra_rpc_info *rpc_info;
+ int ret;
+
+ rpc_info = kzalloc(sizeof(struct tegra_rpc_info), GFP_KERNEL);
+ if (!rpc_info) {
+ pr_err("%s: error allocating rpc_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ rpc_info->ports = RB_ROOT;
+ spin_lock_init(&rpc_info->ports_lock);
+ INIT_LIST_HEAD(&rpc_info->node_list);
+ mutex_init(&rpc_info->node_lock);
+
+ rpc_info->msg_cache = KMEM_CACHE(trpc_msg, 0);
+ if (!rpc_info->msg_cache) {
+ pr_err("%s: unable to create message cache\n", __func__);
+ ret = -ENOMEM;
+ goto err_kmem_cache;
+ }
+
+ trpc_debug_init(rpc_info);
+ tegra_rpc = rpc_info;
+
+ return 0;
+
+err_kmem_cache:
+ kfree(rpc_info);
+ return ret;
+}
+
+subsys_initcall(tegra_rpc_init);
diff --git a/drivers/media/video/tegra/avp/trpc.h b/drivers/media/video/tegra/avp/trpc.h
new file mode 100644
index 000000000000..e7b0d2d55788
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_MACH_TEGRA_RPC_H
+#define __ARM_MACH_TEGRA_RPC_H
+
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/tegra_rpc.h>
+
+struct trpc_endpoint;
+struct trpc_ep_ops {
+ /* send is allowed to sleep */
+ int (*send)(struct trpc_endpoint *ep, void *buf, size_t len);
+ /* notify_recv is NOT allowed to sleep */
+ void (*notify_recv)(struct trpc_endpoint *ep);
+ /* close is allowed to sleep */
+ void (*close)(struct trpc_endpoint *ep);
+ /* not allowed to sleep, not allowed to call back into trpc */
+ void (*show)(struct seq_file *s, struct trpc_endpoint *ep);
+};
+
+enum {
+ TRPC_NODE_LOCAL,
+ TRPC_NODE_REMOTE,
+};
+
+struct trpc_node {
+ struct list_head list;
+ const char *name;
+ int type;
+ void *priv;
+
+ int (*try_connect)(struct trpc_node *node,
+ struct trpc_node *src,
+ struct trpc_endpoint *from);
+};
+
+struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep);
+void *trpc_priv(struct trpc_endpoint *ep);
+const char *trpc_name(struct trpc_endpoint *ep);
+
+void trpc_put(struct trpc_endpoint *ep);
+void trpc_get(struct trpc_endpoint *ep);
+
+int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *ep, void *buf,
+ size_t len, gfp_t gfp_flags);
+int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
+ void *buf, size_t len, long timeout);
+struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
+ struct trpc_ep_ops *ops, void *priv);
+struct trpc_endpoint *trpc_create_connect(struct trpc_node *src, char *name,
+ struct trpc_ep_ops *ops, void *priv,
+ long timeout);
+int trpc_connect(struct trpc_endpoint *from, long timeout);
+struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv);
+void trpc_close(struct trpc_endpoint *ep);
+int trpc_wait_peer(struct trpc_endpoint *ep, long timeout);
+
+int trpc_node_register(struct trpc_node *node);
+void trpc_node_unregister(struct trpc_node *node);
+
+#endif
diff --git a/drivers/media/video/tegra/avp/trpc_local.c b/drivers/media/video/tegra/avp/trpc_local.c
new file mode 100644
index 000000000000..77692e094385
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_local.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original NVRM code from NVIDIA, and a partial rewrite by
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "trpc.h"
+#include "trpc_sema.h"
+#include "nvavp.h"
+
+struct tegra_rpc_info {
+ struct trpc_endpoint *rpc_ep;
+ struct tegra_sema_info *sema;
+};
+
+/* ports names reserved for system functions, i.e. communicating with the
+ * AVP */
+static const char reserved_ports[][TEGRA_RPC_MAX_NAME_LEN] = {
+ "RPC_AVP_PORT",
+ "RPC_CPU_PORT",
+};
+static int num_reserved_ports = ARRAY_SIZE(reserved_ports);
+
+static void rpc_notify_recv(struct trpc_endpoint *ep);
+
+/* TODO: do we need to do anything when port is closed from the other side? */
+static struct trpc_ep_ops ep_ops = {
+ .notify_recv = rpc_notify_recv,
+};
+
+static struct trpc_node rpc_node = {
+ .name = "local",
+ .type = TRPC_NODE_LOCAL,
+};
+
+static void rpc_notify_recv(struct trpc_endpoint *ep)
+{
+ struct tegra_rpc_info *info = trpc_priv(ep);
+
+ if (WARN_ON(!info))
+ return;
+ if (info->sema)
+ tegra_sema_signal(info->sema);
+}
+
+int tegra_rpc_open(struct tegra_rpc_info **info)
+{
+ struct tegra_rpc_info *new_info;
+
+ new_info = kzalloc(sizeof(struct tegra_rpc_info), GFP_KERNEL);
+ if (!new_info)
+ return -ENOMEM;
+
+ *info = new_info;
+ return 0;
+}
+
+static int local_rpc_open(struct inode *inode, struct file *file)
+{
+ struct tegra_rpc_info *info;
+ int ret = 0;
+
+ ret = tegra_rpc_open(&info);
+ if (ret < 0)
+ return -ENOMEM;
+
+ nonseekable_open(inode, file);
+ file->private_data = info;
+ return 0;
+}
+
+int tegra_rpc_release(struct tegra_rpc_info *info)
+{
+ if (info->rpc_ep)
+ trpc_close(info->rpc_ep);
+ if (info->sema)
+ trpc_sema_put(info->sema);
+ kfree(info);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_rpc_release);
+
+static int local_rpc_release(struct inode *inode, struct file *file)
+{
+ struct tegra_rpc_info *info = file->private_data;
+
+ tegra_rpc_release(info);
+ file->private_data = NULL;
+ return 0;
+}
+
+static char uniq_name[] = "aaaaaaaa+";
+static const int uniq_len = sizeof(uniq_name) - 1;
+static DEFINE_MUTEX(uniq_lock);
+
+static void _gen_port_name(char *new_name)
+{
+ int i;
+
+ mutex_lock(&uniq_lock);
+ for (i = 0; i < uniq_len - 1; i++) {
+ ++uniq_name[i];
+ if (uniq_name[i] != 'z')
+ break;
+ uniq_name[i] = 'a';
+ }
+ strlcpy(new_name, uniq_name, TEGRA_RPC_MAX_NAME_LEN);
+ mutex_unlock(&uniq_lock);
+}
+
+static int _validate_port_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < num_reserved_ports; i++)
+ if (!strncmp(name, reserved_ports[i], TEGRA_RPC_MAX_NAME_LEN))
+ return -EINVAL;
+ return 0;
+}
+
+int tegra_rpc_port_create(struct tegra_rpc_info *info, char *name,
+ struct tegra_sema_info *sema)
+{
+ struct trpc_endpoint *ep;
+ int ret = 0;
+
+ if (info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ name[TEGRA_RPC_MAX_NAME_LEN - 1] = '\0';
+ if (name[0]) {
+ ret = _validate_port_name(name);
+ if (ret)
+ goto err;
+ } else {
+ _gen_port_name(name);
+ }
+ ep = trpc_create(&rpc_node, name, &ep_ops, info);
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ goto err;
+ }
+ info->rpc_ep = ep;
+ info->sema = sema;
+ return 0;
+
+err:
+ return ret;
+}
+
+int tegra_rpc_get_name(struct tegra_rpc_info *info, char* name)
+{
+ if (!info->rpc_ep)
+ return -EINVAL;
+
+ strcpy(name, trpc_name(info->rpc_ep));
+ return 0;
+}
+
+int tegra_rpc_port_connect(struct tegra_rpc_info *info, long timeout)
+{
+ if (!info->rpc_ep)
+ return -EINVAL;
+
+ return trpc_connect(info->rpc_ep, timeout);
+
+}
+
+int tegra_rpc_port_listen(struct tegra_rpc_info *info, long timeout)
+{
+ if (!info->rpc_ep)
+ return -EINVAL;
+
+ return trpc_wait_peer(info->rpc_ep, timeout);
+}
+
+static long local_rpc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_rpc_info *info = file->private_data;
+ struct tegra_rpc_port_desc desc;
+ struct tegra_sema_info *sema = NULL;
+ int ret = 0;
+
+ if (_IOC_TYPE(cmd) != TEGRA_RPC_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_RPC_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_RPC_IOCTL_MAX_NR) {
+ ret = -ENOTTY;
+ goto err;
+ }
+
+ switch (cmd) {
+ case TEGRA_RPC_IOCTL_PORT_CREATE:
+
+ if (_IOC_SIZE(cmd) != sizeof(struct tegra_rpc_port_desc))
+ return -EINVAL;
+ if (copy_from_user(&desc, (void __user *)arg, sizeof(desc)))
+ return -EFAULT;
+ if (desc.notify_fd != -1) {
+ sema = trpc_sema_get_from_fd(desc.notify_fd);
+ if (IS_ERR(sema)) {
+ ret = PTR_ERR(sema);
+ goto err;
+ }
+ }
+
+ ret = tegra_rpc_port_create(info, desc.name, sema);
+ if (ret < 0)
+ goto err;
+
+ break;
+ case TEGRA_RPC_IOCTL_PORT_GET_NAME:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_to_user((void __user *)arg,
+ trpc_name(info->rpc_ep),
+ TEGRA_RPC_MAX_NAME_LEN)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ case TEGRA_RPC_IOCTL_PORT_CONNECT:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = trpc_connect(info->rpc_ep, (long)arg);
+ if (ret) {
+ pr_err("%s: can't connect to '%s' (%d)\n", __func__,
+ trpc_name(info->rpc_ep), ret);
+ goto err;
+ }
+ break;
+ case TEGRA_RPC_IOCTL_PORT_LISTEN:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = trpc_wait_peer(info->rpc_ep, (long)arg);
+ if (ret) {
+ pr_err("%s: error waiting for peer for '%s' (%d)\n",
+ __func__, trpc_name(info->rpc_ep), ret);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("%s: unknown cmd %d\n", __func__, _IOC_NR(cmd));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("tegra_rpc: pid=%d ioctl=%x/%lx (%x) ret=%d\n",
+ current->pid, cmd, arg, _IOC_NR(cmd), ret);
+ return (long)ret;
+}
+
+int tegra_rpc_write(struct tegra_rpc_info *info, u8* buf, size_t size)
+{
+ int ret;
+
+ if (!info->rpc_ep)
+ return -EINVAL;
+
+ if (TEGRA_RPC_MAX_MSG_LEN < size)
+ return -EINVAL;
+
+ ret = trpc_send_msg(&rpc_node, info->rpc_ep, buf, size,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+ return size;
+}
+
+static ssize_t local_rpc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct tegra_rpc_info *info = file->private_data;
+ u8 data[TEGRA_RPC_MAX_MSG_LEN];
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+ else if (count > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, count))
+ return -EFAULT;
+
+ ret = trpc_send_msg(&rpc_node, info->rpc_ep, data, count,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+ return count;
+}
+
+int tegra_rpc_read(struct tegra_rpc_info *info, u8 *buf, size_t max)
+{
+ int ret;
+
+ if (max > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ ret = trpc_recv_msg(&rpc_node, info->rpc_ep, buf,
+ TEGRA_RPC_MAX_MSG_LEN, 0);
+ if (ret == 0)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else if (ret > max)
+ return -ENOSPC;
+
+ return ret;
+}
+
+static ssize_t local_rpc_read(struct file *file, char __user *buf, size_t max,
+ loff_t *ppos)
+{
+ struct tegra_rpc_info *info = file->private_data;
+ int ret;
+ u8 data[TEGRA_RPC_MAX_MSG_LEN];
+
+ if (max > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ ret = trpc_recv_msg(&rpc_node, info->rpc_ep, data,
+ TEGRA_RPC_MAX_MSG_LEN, 0);
+ if (ret == 0)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else if (ret > max)
+ return -ENOSPC;
+ else if (copy_to_user(buf, data, ret))
+ return -EFAULT;
+
+ return ret;
+}
+
+static const struct file_operations local_rpc_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = local_rpc_open,
+ .release = local_rpc_release,
+ .unlocked_ioctl = local_rpc_ioctl,
+ .write = local_rpc_write,
+ .read = local_rpc_read,
+};
+
+static struct miscdevice local_rpc_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_rpc",
+ .fops = &local_rpc_misc_fops,
+};
+
+int __init rpc_local_init(void)
+{
+ int ret;
+
+ ret = trpc_sema_init();
+ if (ret) {
+ pr_err("%s: error in trpc_sema_init\n", __func__);
+ goto err_sema_init;
+ }
+
+ ret = misc_register(&local_rpc_misc_device);
+ if (ret) {
+ pr_err("%s: can't register misc device\n", __func__);
+ goto err_misc;
+ }
+
+ ret = trpc_node_register(&rpc_node);
+ if (ret) {
+ pr_err("%s: can't register rpc node\n", __func__);
+ goto err_node_reg;
+ }
+ return 0;
+
+err_node_reg:
+ misc_deregister(&local_rpc_misc_device);
+err_misc:
+err_sema_init:
+ return ret;
+}
+
+module_init(rpc_local_init);
diff --git a/drivers/media/video/tegra/avp/trpc_sema.c b/drivers/media/video/tegra/avp/trpc_sema.c
new file mode 100644
index 000000000000..cd717a1a0ca3
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_sema.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_sema.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "trpc_sema.h"
+
+struct tegra_sema_info {
+ struct file *file;
+ wait_queue_head_t wq;
+ spinlock_t lock;
+ int count;
+};
+
+static int rpc_sema_minor = -1;
+
+static inline bool is_trpc_sema_file(struct file *file)
+{
+ dev_t rdev = file->f_dentry->d_inode->i_rdev;
+
+ if (MAJOR(rdev) == MISC_MAJOR && MINOR(rdev) == rpc_sema_minor)
+ return true;
+ return false;
+}
+
+struct tegra_sema_info *trpc_sema_get_from_fd(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (unlikely(file == NULL)) {
+ pr_err("%s: fd %d is invalid\n", __func__, fd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!is_trpc_sema_file(file)) {
+ pr_err("%s: fd (%d) is not a trpc_sema file\n", __func__, fd);
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file->private_data;
+}
+
+void trpc_sema_put(struct tegra_sema_info *info)
+{
+ if (info->file)
+ fput(info->file);
+}
+
+int tegra_sema_signal(struct tegra_sema_info *info)
+{
+ unsigned long flags;
+
+ if (!info)
+ return -EINVAL;
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->count++;
+ wake_up_interruptible_all(&info->wq);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
+int tegra_sema_wait(struct tegra_sema_info *info, long *timeout)
+{
+ unsigned long flags;
+ int ret = 0;
+ unsigned long endtime;
+ long timeleft = *timeout;
+
+ *timeout = 0;
+ if (timeleft < 0)
+ timeleft = MAX_SCHEDULE_TIMEOUT;
+
+ timeleft = msecs_to_jiffies(timeleft);
+ endtime = jiffies + timeleft;
+
+again:
+ if (timeleft)
+ ret = wait_event_interruptible_timeout(info->wq,
+ info->count > 0,
+ timeleft);
+ spin_lock_irqsave(&info->lock, flags);
+ if (info->count > 0) {
+ info->count--;
+ ret = 0;
+ } else if (ret == 0 || timeout == 0) {
+ ret = -ETIMEDOUT;
+ } else if (ret < 0) {
+ ret = -EINTR;
+ if (timeleft != MAX_SCHEDULE_TIMEOUT &&
+ time_before(jiffies, endtime))
+ *timeout = jiffies_to_msecs(endtime - jiffies);
+ else
+ *timeout = 0;
+ } else {
+ /* we woke up but someone else got the semaphore and we have
+ * time left, try again */
+ timeleft = ret;
+ spin_unlock_irqrestore(&info->lock, flags);
+ goto again;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+ return ret;
+}
+
+int tegra_sema_open(struct tegra_sema_info **sema)
+{
+ struct tegra_sema_info *info;
+ info = kzalloc(sizeof(struct tegra_sema_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ init_waitqueue_head(&info->wq);
+ spin_lock_init(&info->lock);
+ *sema = info;
+ return 0;
+}
+
+static int trpc_sema_open(struct inode *inode, struct file *file)
+{
+ struct tegra_sema_info *info;
+ int ret;
+
+ ret = tegra_sema_open(&info);
+ if (ret < 0)
+ return ret;
+
+ info->file = file;
+ nonseekable_open(inode, file);
+ file->private_data = info;
+ return 0;
+}
+
+int tegra_sema_release(struct tegra_sema_info *sema)
+{
+ kfree(sema);
+ return 0;
+}
+
+static int trpc_sema_release(struct inode *inode, struct file *file)
+{
+ struct tegra_sema_info *info = file->private_data;
+
+ file->private_data = NULL;
+ tegra_sema_release(info);
+ return 0;
+}
+
+static long trpc_sema_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_sema_info *info = file->private_data;
+ int ret;
+ long timeout;
+
+ if (_IOC_TYPE(cmd) != TEGRA_SEMA_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_SEMA_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_SEMA_IOCTL_MAX_NR)
+ return -ENOTTY;
+ else if (!info)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TEGRA_SEMA_IOCTL_WAIT:
+ if (copy_from_user(&timeout, (void __user *)arg, sizeof(long)))
+ return -EFAULT;
+ ret = tegra_sema_wait(info, &timeout);
+ if (ret != -EINTR)
+ break;
+ if (copy_to_user((void __user *)arg, &timeout, sizeof(long)))
+ ret = -EFAULT;
+ break;
+ case TEGRA_SEMA_IOCTL_SIGNAL:
+ ret = tegra_sema_signal(info);
+ break;
+ default:
+ pr_err("%s: Unknown tegra_sema ioctl 0x%x\n", __func__,
+ _IOC_NR(cmd));
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations trpc_sema_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = trpc_sema_open,
+ .release = trpc_sema_release,
+ .unlocked_ioctl = trpc_sema_ioctl,
+};
+
+static struct miscdevice trpc_sema_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_sema",
+ .fops = &trpc_sema_misc_fops,
+};
+
+int __init trpc_sema_init(void)
+{
+ int ret;
+
+ if (rpc_sema_minor >= 0) {
+ pr_err("%s: trpc_sema already registered\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = misc_register(&trpc_sema_misc_device);
+ if (ret) {
+ pr_err("%s: can't register misc device\n", __func__);
+ return ret;
+ }
+
+ rpc_sema_minor = trpc_sema_misc_device.minor;
+ pr_info("%s: registered misc dev %d:%d\n", __func__, MISC_MAJOR,
+ rpc_sema_minor);
+
+ return 0;
+}
diff --git a/drivers/media/video/tegra/avp/trpc_sema.h b/drivers/media/video/tegra/avp/trpc_sema.h
new file mode 100644
index 000000000000..2a7c42245b7f
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_sema.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_MACH_TEGRA_RPC_SEMA_H
+#define __ARM_MACH_TEGRA_RPC_SEMA_H
+
+#include <linux/types.h>
+#include <linux/fs.h>
+
+struct tegra_sema_info;
+
+struct tegra_sema_info *trpc_sema_get_from_fd(int fd);
+void trpc_sema_put(struct tegra_sema_info *sema);
+int __init trpc_sema_init(void);
+
+#endif
diff --git a/drivers/media/video/tegra/mediaserver/Kconfig b/drivers/media/video/tegra/mediaserver/Kconfig
new file mode 100644
index 000000000000..9e60a5b49cd3
--- /dev/null
+++ b/drivers/media/video/tegra/mediaserver/Kconfig
@@ -0,0 +1,10 @@
+config TEGRA_MEDIASERVER
+bool "Tegra Media Server support"
+depends on ARCH_TEGRA && TEGRA_RPC
+default y
+help
+ Enables support for the multiple OpenMAX clients. Exports the
+ interface on the device node /dev/tegra_mediaserver.
+
+ If unsure, say Y
+
diff --git a/drivers/media/video/tegra/mediaserver/Makefile b/drivers/media/video/tegra/mediaserver/Makefile
new file mode 100644
index 000000000000..ed24e91932bc
--- /dev/null
+++ b/drivers/media/video/tegra/mediaserver/Makefile
@@ -0,0 +1,3 @@
+GCOV_PROFILE := y
+obj-$(CONFIG_TEGRA_MEDIASERVER) += tegra_mediaserver.o
+
diff --git a/drivers/media/video/tegra/mediaserver/tegra_mediaserver.c b/drivers/media/video/tegra/mediaserver/tegra_mediaserver.c
new file mode 100644
index 000000000000..a26b9e990ca1
--- /dev/null
+++ b/drivers/media/video/tegra/mediaserver/tegra_mediaserver.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (C) 2011 NVIDIA Corp.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+
+#include <linux/tegra_mediaserver.h>
+#include "../avp/nvavp.h"
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#define CHECK_STATUS(e, tag) \
+ do { if (e < 0) goto tag; } while (0)
+
+#define CHECK_NULL(ptr, tag) \
+ do { if (!ptr) goto tag; } while (0)
+
+#define CHECK_CONDITION(c, tag) \
+ do { if (c) goto tag; } while (0)
+
+struct tegra_mediasrv_block {
+ struct list_head entry;
+ struct tegra_mediaserver_block_info block;
+};
+
+struct tegra_mediasrv_iram {
+ struct list_head entry;
+ struct tegra_mediaserver_iram_info iram;
+};
+
+struct tegra_mediasrv_node {
+ struct tegra_mediasrv_info *mediasrv;
+ struct list_head blocks;
+ int nr_iram_shared;
+};
+
+struct tegra_mediasrv_manager {
+ struct tegra_avp_lib lib;
+ struct tegra_rpc_info *rpc;
+ struct tegra_sema_info *sema;
+};
+
+struct tegra_mediasrv_info {
+ int minor;
+ struct mutex lock;
+ struct nvmap_client *nvmap;
+ struct tegra_avp_info *avp;
+ struct tegra_mediasrv_manager manager;
+ int nr_nodes;
+ int nr_blocks;
+ struct tegra_mediaserver_iram_info iram; /* only one supported */
+ int nr_iram_shared;
+};
+
+static struct tegra_mediasrv_info *mediasrv_info;
+
+
+/*
+ * File entry points
+ */
+static int mediasrv_open(struct inode *inode, struct file *file)
+{
+ struct tegra_mediasrv_info *mediasrv = mediasrv_info;
+ struct tegra_mediasrv_node *node = NULL;
+ struct tegra_mediasrv_manager *manager = &mediasrv->manager;
+ struct tegra_avp_lib *lib = &manager->lib;
+ int e;
+
+ node = kzalloc(sizeof(struct tegra_mediasrv_node), GFP_KERNEL);
+ CHECK_NULL(node, node_alloc_fail);
+ INIT_LIST_HEAD(&node->blocks);
+ node->mediasrv = mediasrv;
+
+ mutex_lock(&mediasrv->lock);
+ nonseekable_open(inode, file);
+
+ if (!mediasrv->nr_nodes) {
+ e = tegra_sema_open(&manager->sema);
+ CHECK_STATUS(e, fail);
+
+ e = tegra_rpc_open(&manager->rpc);
+ CHECK_STATUS(e, fail);
+
+ e = tegra_rpc_port_create(manager->rpc, "NVMM_MANAGER_SRV",
+ manager->sema);
+ CHECK_STATUS(e, fail);
+
+ e = tegra_avp_open(&mediasrv->avp);
+ CHECK_STATUS(e, fail);
+
+ memcpy(lib->name, "nvmm_manager.axf\0",
+ strlen("nvmm_manager.axf") + 1);
+ lib->args = &mediasrv;
+ lib->args_len = sizeof(unsigned long);
+ e = tegra_avp_load_lib(mediasrv->avp, lib);
+ CHECK_STATUS(e, fail);
+
+ e = tegra_rpc_port_connect(manager->rpc, 50000);
+ CHECK_STATUS(e, fail);
+ }
+
+ mediasrv->nr_nodes++;
+ try_module_get(THIS_MODULE);
+
+ mutex_unlock(&mediasrv->lock);
+
+ file->private_data = node;
+
+ return 0;
+
+fail:
+ if (lib->handle) {
+ tegra_avp_unload_lib(mediasrv->avp, lib->handle);
+ lib->handle = 0;
+ }
+
+ if (mediasrv->avp) {
+ tegra_avp_release(mediasrv->avp);
+ mediasrv->avp = NULL;
+ }
+
+ if (manager->rpc) {
+ tegra_rpc_release(manager->rpc);
+ manager->rpc = NULL;
+ }
+ if (manager->sema) {
+ tegra_sema_release(manager->sema);
+ manager->sema = NULL;
+ }
+
+ kfree(node);
+
+ mutex_unlock(&mediasrv->lock);
+ return e;
+
+node_alloc_fail:
+ e = -ENOMEM;
+ return e;
+}
+
+static int mediasrv_release(struct inode *inode, struct file *file)
+{
+ struct tegra_mediasrv_info *mediasrv = mediasrv_info;
+ struct tegra_mediasrv_node *node = file->private_data;
+ struct tegra_mediasrv_block *block;
+ struct list_head *entry;
+ struct list_head *temp;
+ u32 message[2];
+ int e;
+
+ mutex_lock(&mediasrv->lock);
+
+ list_for_each_safe(entry, temp, &node->blocks) {
+ block = list_entry(entry, struct tegra_mediasrv_block, entry);
+
+ pr_info("Improperly closed block found!");
+ pr_info(" NVMM Block Handle: 0x%08x\n",
+ block->block.nvmm_block_handle);
+ pr_info(" AVP Block Handle: 0x%08x\n",
+ block->block.avp_block_handle);
+
+ message[0] = 1; /* NvmmManagerMsgType_AbnormalTerm */
+ message[1] = block->block.avp_block_handle;
+
+ e = tegra_rpc_write(mediasrv->manager.rpc, (u8 *)message,
+ sizeof(u32) * 2);
+ pr_info("Abnormal termination message result: %d\n", e);
+
+ if (block->block.avp_block_library_handle) {
+ e = tegra_avp_unload_lib(mediasrv->avp,
+ block->block.avp_block_library_handle);
+ pr_info("Unload block (0x%08x) result: %d\n",
+ block->block.avp_block_library_handle, e);
+ }
+
+ if (block->block.service_library_handle) {
+ e = tegra_avp_unload_lib(mediasrv->avp,
+ block->block.service_library_handle);
+ pr_info("Unload service (0x%08x) result: %d\n",
+ block->block.service_library_handle, e);
+ }
+
+ mediasrv->nr_blocks--;
+ list_del(entry);
+ kfree(block);
+ }
+
+ mediasrv->nr_iram_shared -= node->nr_iram_shared;
+ if (mediasrv->iram.rm_handle && !mediasrv->nr_iram_shared) {
+ pr_info("Improperly freed shared iram found!");
+ nvmap_unpin_ids(mediasrv->nvmap, 1, &mediasrv->iram.rm_handle);
+ nvmap_free_handle_id(mediasrv->nvmap, mediasrv->iram.rm_handle);
+ mediasrv->iram.rm_handle = 0;
+ mediasrv->iram.physical_address = 0;
+ }
+
+ kfree(node);
+ mediasrv->nr_nodes--;
+ if (!mediasrv->nr_nodes) {
+ struct tegra_mediasrv_manager *manager = &mediasrv->manager;
+
+ tegra_avp_unload_lib(mediasrv->avp, manager->lib.handle);
+ manager->lib.handle = 0;
+
+ tegra_avp_release(mediasrv->avp);
+ mediasrv->avp = NULL;
+
+ tegra_rpc_release(manager->rpc);
+ manager->rpc = NULL;
+
+ tegra_sema_release(manager->sema);
+ manager->sema = NULL;
+ }
+
+ mutex_unlock(&mediasrv->lock);
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+static int mediasrv_alloc(struct tegra_mediasrv_node *node,
+ union tegra_mediaserver_alloc_info *in,
+ union tegra_mediaserver_alloc_info *out)
+{
+ struct tegra_mediasrv_info *mediasrv = node->mediasrv;
+ int e;
+
+ switch (in->in.tegra_mediaserver_resource_type) {
+ case TEGRA_MEDIASERVER_RESOURCE_BLOCK:
+ {
+ struct tegra_mediasrv_block *block;
+
+ block = kzalloc(sizeof(struct tegra_mediasrv_block),
+ GFP_KERNEL);
+ CHECK_NULL(block, block_alloc_fail);
+
+ block->block = in->in.u.block;
+ list_add(&block->entry, &node->blocks);
+ goto block_done;
+
+block_alloc_fail:
+ e = -ENOMEM;
+ goto fail;
+
+block_done:
+ mediasrv->nr_blocks++;
+ out->out.u.block.count = mediasrv->nr_blocks;
+ }
+ break;
+
+ case TEGRA_MEDIASERVER_RESOURCE_IRAM:
+ {
+ if (in->in.u.iram.tegra_mediaserver_iram_type ==
+ TEGRA_MEDIASERVER_IRAM_SHARED) {
+ if (!mediasrv->nr_iram_shared) {
+ size_t align, size;
+ struct nvmap_handle_ref *r = NULL;
+ unsigned long id;
+ int physical_address;
+
+ size = PAGE_ALIGN(in->in.u.iram.size);
+ r = nvmap_create_handle(mediasrv->nvmap, size);
+ CHECK_CONDITION((r < 0),
+ iram_shared_handle_fail);
+
+ id = nvmap_ref_to_id(r);
+
+ align = max_t(size_t, in->in.u.iram.alignment,
+ PAGE_SIZE);
+ e = nvmap_alloc_handle_id(mediasrv->nvmap, id,
+ NVMAP_HEAP_CARVEOUT_IRAM, align,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ CHECK_STATUS(e, iram_shared_alloc_fail);
+
+ physical_address =
+ nvmap_pin_ids(mediasrv->nvmap, 1, &id);
+ CHECK_CONDITION((physical_address < 0),
+ iram_shared_pin_fail);
+
+ mediasrv->iram.rm_handle = id;
+ mediasrv->iram.physical_address =
+ physical_address;
+ goto iram_shared_done;
+
+iram_shared_pin_fail:
+ e = physical_address;
+iram_shared_alloc_fail:
+ nvmap_free_handle_id(mediasrv->nvmap, id);
+iram_shared_handle_fail:
+ goto fail;
+ }
+
+iram_shared_done:
+ out->out.u.iram.rm_handle = mediasrv->iram.rm_handle;
+ out->out.u.iram.physical_address =
+ mediasrv->iram.physical_address;
+ mediasrv->nr_iram_shared++;
+ node->nr_iram_shared++;
+ } else if (in->in.u.iram.tegra_mediaserver_iram_type ==
+ TEGRA_MEDIASERVER_IRAM_SCRATCH) {
+ e = -EINVAL;
+ goto fail;
+ }
+ }
+ break;
+
+ default:
+ {
+ e = -EINVAL;
+ goto fail;
+ }
+ break;
+ }
+
+ return 0;
+
+fail:
+ return e;
+}
+
+static void mediasrv_free(struct tegra_mediasrv_node *node,
+ union tegra_mediaserver_free_info *in)
+{
+ struct tegra_mediasrv_info *mediasrv = node->mediasrv;
+
+ switch (in->in.tegra_mediaserver_resource_type) {
+ case TEGRA_MEDIASERVER_RESOURCE_BLOCK:
+ {
+ struct tegra_mediasrv_block *block = NULL;
+ struct tegra_mediasrv_block *temp;
+ struct list_head *entry;
+
+ list_for_each(entry, &node->blocks) {
+ temp = list_entry(entry, struct tegra_mediasrv_block,
+ entry);
+ if (temp->block.nvmm_block_handle !=
+ in->in.u.nvmm_block_handle)
+ continue;
+
+ block = temp;
+ break;
+ }
+
+ CHECK_NULL(block, done);
+ list_del(&block->entry);
+ kfree(block);
+ }
+ break;
+
+ case TEGRA_MEDIASERVER_RESOURCE_IRAM:
+ {
+ if (in->in.u.iram_rm_handle == mediasrv->iram.rm_handle &&
+ node->nr_iram_shared) {
+ node->nr_iram_shared--;
+ mediasrv->nr_iram_shared--;
+
+ if (!mediasrv->nr_iram_shared) {
+ nvmap_unpin_ids(mediasrv->nvmap, 1,
+ &mediasrv->iram.rm_handle);
+ nvmap_free_handle_id(mediasrv->nvmap,
+ mediasrv->iram.rm_handle);
+ mediasrv->iram.rm_handle = 0;
+ mediasrv->iram.physical_address = 0;
+ }
+ }
+
+ else
+ goto done;
+ }
+ break;
+ }
+
+done:
+ return;
+}
+
+static int mediasrv_update_block_info(
+ struct tegra_mediasrv_node *node,
+ union tegra_mediaserver_update_block_info *in
+)
+{
+ struct tegra_mediasrv_block *entry = NULL;
+ struct tegra_mediasrv_block *block;
+ int e;
+
+ list_for_each_entry(entry, &node->blocks, entry) {
+ if (entry->block.nvmm_block_handle != in->in.nvmm_block_handle)
+ continue;
+
+ block = entry;
+ break;
+ }
+
+ CHECK_NULL(block, fail);
+
+ block->block = in->in;
+ return 0;
+
+fail:
+ e = -EINVAL;
+ return e;
+}
+
+static long mediasrv_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_mediasrv_info *mediasrv = mediasrv_info;
+ struct tegra_mediasrv_node *node = file->private_data;
+ int e = -ENODEV;
+
+ mutex_lock(&mediasrv->lock);
+
+ switch (cmd) {
+ case TEGRA_MEDIASERVER_IOCTL_ALLOC:
+ {
+ union tegra_mediaserver_alloc_info in, out;
+ e = copy_from_user(&in, (void __user *)arg, sizeof(in));
+ CHECK_CONDITION(e, copy_fail);
+ e = mediasrv_alloc(node, &in, &out);
+ CHECK_STATUS(e, fail);
+ e = copy_to_user((void __user *)arg, &out, sizeof(out));
+ CHECK_CONDITION(e, copy_fail);
+ }
+ break;
+
+ case TEGRA_MEDIASERVER_IOCTL_FREE:
+ {
+ union tegra_mediaserver_free_info in;
+ e = copy_from_user(&in, (void __user *)arg, sizeof(in));
+ CHECK_CONDITION(e, copy_fail);
+ mediasrv_free(node, &in);
+ }
+ break;
+
+ case TEGRA_MEDIASERVER_IOCTL_UPDATE_BLOCK_INFO:
+ {
+ union tegra_mediaserver_update_block_info in;
+ e = copy_from_user(&in, (void __user *)arg, sizeof(in));
+ CHECK_CONDITION(e, copy_fail);
+ e = mediasrv_update_block_info(node, &in);
+ CHECK_CONDITION(e, fail);
+ }
+ break;
+
+ default:
+ {
+ e = -ENODEV;
+ goto fail;
+ }
+ break;
+ }
+
+ mutex_unlock(&mediasrv->lock);
+ return 0;
+
+copy_fail:
+ e = -EFAULT;
+fail:
+ return e;
+}
+
+/*
+ * Kernel structures and entry points
+ */
+static const struct file_operations mediaserver_fops = {
+ .owner = THIS_MODULE,
+ .open = mediasrv_open,
+ .release = mediasrv_release,
+ .unlocked_ioctl = mediasrv_unlocked_ioctl,
+};
+
+static struct miscdevice mediaserver_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_mediaserver",
+ .fops = &mediaserver_fops,
+};
+
+static int __init tegra_mediaserver_init(void)
+{
+ struct tegra_mediasrv_info *mediasrv;
+ int e = 0;
+
+ CHECK_NULL(!mediasrv_info, busy);
+
+ mediasrv = kzalloc(sizeof(struct tegra_mediasrv_info), GFP_KERNEL);
+ CHECK_NULL(mediasrv, alloc_fail);
+
+ mediasrv->nvmap = nvmap_create_client(nvmap_dev, "tegra_mediaserver");
+ CHECK_NULL(mediasrv, nvmap_create_fail);
+
+ e = misc_register(&mediaserver_misc_device);
+ CHECK_STATUS(e, register_fail);
+
+ mediasrv->nr_nodes = 0;
+ mutex_init(&mediasrv->lock);
+
+ mediasrv_info = mediasrv;
+ goto done;
+
+nvmap_create_fail:
+ e = -ENOMEM;
+ kfree(mediasrv);
+ goto done;
+
+register_fail:
+ nvmap_client_put(mediasrv->nvmap);
+ kfree(mediasrv);
+ goto done;
+
+alloc_fail:
+ e = -ENOMEM;
+ goto done;
+
+busy:
+ e = -EBUSY;
+ goto done;
+
+done:
+ return e;
+}
+
+void __exit tegra_mediaserver_cleanup(void)
+{
+ struct tegra_mediasrv_info *mediasrv = mediasrv_info;
+ int e;
+
+ e = misc_deregister(&mediaserver_misc_device);
+ CHECK_STATUS(e, fail);
+
+ nvmap_client_put(mediasrv->nvmap);
+ kfree(mediasrv);
+ mediasrv_info = NULL;
+
+fail:
+ return;
+}
+
+module_init(tegra_mediaserver_init);
+module_exit(tegra_mediaserver_cleanup);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/media/video/tegra/nvavp/Kconfig b/drivers/media/video/tegra/nvavp/Kconfig
new file mode 100644
index 000000000000..2d3af3f79fb3
--- /dev/null
+++ b/drivers/media/video/tegra/nvavp/Kconfig
@@ -0,0 +1,10 @@
+config TEGRA_NVAVP
+ bool "Enable support for Tegra NVAVP driver"
+ depends on ARCH_TEGRA && TEGRA_GRHOST
+ default n
+ help
+ Enables support for the push-buffer mechanism based driver for the Tegra
+ multimedia framework. Exports the Tegra nvavp interface on device node
+ /dev/tegra_avpchannel.
+
+ If unsure, say N
diff --git a/drivers/media/video/tegra/nvavp/Makefile b/drivers/media/video/tegra/nvavp/Makefile
new file mode 100644
index 000000000000..82b4238fd085
--- /dev/null
+++ b/drivers/media/video/tegra/nvavp/Makefile
@@ -0,0 +1,3 @@
+GCOV_PROFILE := y
+obj-$(CONFIG_TEGRA_NVAVP) += nvavp_dev.o
+obj-$(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) += ../avp/headavp.o
diff --git a/drivers/media/video/tegra/nvavp/nvavp_dev.c b/drivers/media/video/tegra/nvavp/nvavp_dev.c
new file mode 100644
index 000000000000..c57055d6df4c
--- /dev/null
+++ b/drivers/media/video/tegra/nvavp/nvavp_dev.c
@@ -0,0 +1,1405 @@
+/*
+ * drivers/media/video/tegra/nvavp/nvavp_dev.c
+ *
+ * Copyright (C) 2011 NVIDIA Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/irq.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/nvhost.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tegra_nvavp.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/hardware.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/legacy_irq.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+#include "../../../../video/tegra/host/host1x/host1x_syncpt.h"
+#include "../../../../video/tegra/host/dev.h"
+#include "../../../../video/tegra/host/nvhost_acm.h"
+
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
+#include "../avp/headavp.h"
+#endif
+#include "nvavp_os.h"
+
+#define TEGRA_NVAVP_NAME "nvavp"
+
+#define NVAVP_PUSHBUFFER_SIZE 4096
+
+#define NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE (sizeof(u32) * 3)
+
+#define TEGRA_NVAVP_RESET_VECTOR_ADDR \
+ (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
+
+#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
+#define FLOW_MODE_STOP (0x2 << 29)
+#define FLOW_MODE_NONE 0x0
+
+#define NVAVP_OS_INBOX IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
+#define NVAVP_OS_OUTBOX IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
+
+#define NVAVP_INBOX_VALID (1 << 29)
+
+/* AVP behavior params */
+#define NVAVP_OS_IDLE_TIMEOUT 100 /* milli-seconds */
+
+struct nvavp_info {
+ u32 clk_enabled;
+ struct clk *bsev_clk;
+ struct clk *vde_clk;
+ struct clk *cop_clk;
+
+ /* used for dvfs */
+ struct clk *sclk;
+ struct clk *emc_clk;
+ unsigned long sclk_rate;
+ unsigned long emc_clk_rate;
+
+ int mbox_from_avp_pend_irq;
+
+ struct mutex open_lock;
+ int refcount;
+
+ struct work_struct clock_disable_work;
+
+ /* os information */
+ struct nvavp_os_info os_info;
+
+ /* ucode information */
+ struct nvavp_ucode_info ucode_info;
+
+ /* client for driver allocations, persistent */
+ struct nvmap_client *nvmap;
+
+ struct mutex pushbuffer_lock;
+ struct nvmap_handle_ref *pushbuf_handle;
+ unsigned long pushbuf_phys;
+ u8 *pushbuf_data;
+ u32 pushbuf_index;
+ u32 pushbuf_fence;
+
+ struct nv_e276_control *os_control;
+
+ struct nvhost_syncpt *nvhost_syncpt;
+ u32 syncpt_id;
+ u32 syncpt_value;
+
+ struct nvhost_device *nvhost_dev;
+ struct miscdevice misc_dev;
+};
+
+struct nvavp_clientctx {
+ struct nvmap_client *nvmap;
+ struct nvavp_pushbuffer_submit_hdr submit_hdr;
+ struct nvavp_reloc relocs[NVAVP_MAX_RELOCATION_COUNT];
+ struct nvmap_handle_ref *gather_mem;
+ int num_relocs;
+ struct nvavp_info *nvavp;
+};
+
+static struct clk *nvavp_clk_get(struct nvavp_info *nvavp, int id)
+{
+ if (!nvavp)
+ return NULL;
+
+ if (id == NVAVP_MODULE_ID_AVP)
+ return nvavp->sclk;
+ if (id == NVAVP_MODULE_ID_VDE)
+ return nvavp->vde_clk;
+ if (id == NVAVP_MODULE_ID_EMC)
+ return nvavp->emc_clk;
+
+ return NULL;
+}
+
+static void nvavp_clk_ctrl(struct nvavp_info *nvavp, u32 clk_en)
+{
+ if (clk_en && !nvavp->clk_enabled) {
+ clk_enable(nvavp->bsev_clk);
+ clk_enable(nvavp->vde_clk);
+ clk_set_rate(nvavp->emc_clk, nvavp->emc_clk_rate);
+ clk_set_rate(nvavp->sclk, nvavp->sclk_rate);
+ nvavp->clk_enabled = 1;
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting sclk to %lu\n",
+ __func__, nvavp->sclk_rate);
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting emc_clk to %lu\n",
+ __func__, nvavp->emc_clk_rate);
+ } else if (!clk_en && nvavp->clk_enabled) {
+ clk_disable(nvavp->bsev_clk);
+ clk_disable(nvavp->vde_clk);
+ clk_set_rate(nvavp->emc_clk, 0);
+ clk_set_rate(nvavp->sclk, 0);
+ nvavp->clk_enabled = 0;
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: resetting emc_clk "
+ "and sclk\n", __func__);
+ }
+}
+
+static u32 nvavp_check_idle(struct nvavp_info *nvavp)
+{
+ struct nv_e276_control *control = nvavp->os_control;
+ return (control->put == control->get) ? 1 : 0;
+}
+
+static void clock_disable_handler(struct work_struct *work)
+{
+ struct nvavp_info *nvavp;
+
+ nvavp = container_of(work, struct nvavp_info,
+ clock_disable_work);
+
+ mutex_lock(&nvavp->pushbuffer_lock);
+ nvavp_clk_ctrl(nvavp, !nvavp_check_idle(nvavp));
+ mutex_unlock(&nvavp->pushbuffer_lock);
+}
+
+static int nvavp_service(struct nvavp_info *nvavp)
+{
+ struct nvavp_os_info *os = &nvavp->os_info;
+ u8 *debug_print;
+ u32 inbox;
+
+ inbox = readl(NVAVP_OS_INBOX);
+ if (!(inbox & NVAVP_INBOX_VALID))
+ inbox = 0x00000000;
+
+ writel(0x00000000, NVAVP_OS_INBOX);
+
+ if (inbox & NVE276_OS_INTERRUPT_VIDEO_IDLE)
+ schedule_work(&nvavp->clock_disable_work);
+
+ if (inbox & NVE276_OS_INTERRUPT_DEBUG_STRING) {
+ /* Should only occur with debug AVP OS builds */
+ debug_print = os->data;
+ debug_print += os->debug_offset;
+ dev_info(&nvavp->nvhost_dev->dev, "%s\n", debug_print);
+ }
+ if (inbox & (NVE276_OS_INTERRUPT_SEMAPHORE_AWAKEN |
+ NVE276_OS_INTERRUPT_EXECUTE_AWAKEN)) {
+ dev_info(&nvavp->nvhost_dev->dev,
+ "AVP awaken event (0x%x)\n", inbox);
+ }
+ if (inbox & NVE276_OS_INTERRUPT_AVP_FATAL_ERROR) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "fatal AVP error (0x%08X)\n", inbox);
+ }
+ if (inbox & NVE276_OS_INTERRUPT_AVP_BREAKPOINT)
+ dev_err(&nvavp->nvhost_dev->dev, "AVP breakpoint hit\n");
+ if (inbox & NVE276_OS_INTERRUPT_TIMEOUT)
+ dev_err(&nvavp->nvhost_dev->dev, "AVP timeout\n");
+
+ return 0;
+}
+
+static irqreturn_t nvavp_mbox_pending_isr(int irq, void *data)
+{
+ struct nvavp_info *nvavp = data;
+
+ nvavp_service(nvavp);
+
+ return IRQ_HANDLED;
+}
+
+static void nvavp_halt_avp(struct nvavp_info *nvavp)
+{
+ /* ensure the AVP is halted */
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+ tegra_periph_reset_assert(nvavp->cop_clk);
+
+ writel(0, NVAVP_OS_OUTBOX);
+ writel(0, NVAVP_OS_INBOX);
+}
+
+static int nvavp_reset_avp(struct nvavp_info *nvavp, unsigned long reset_addr)
+{
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
+ unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
+ dma_addr_t stub_data_phys;
+
+ _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
+ _tegra_avp_boot_stub_data.jump_addr = reset_addr;
+ wmb();
+ stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+ rmb();
+ reset_addr = (unsigned long)stub_data_phys;
+#endif
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+
+ writel(reset_addr, TEGRA_NVAVP_RESET_VECTOR_ADDR);
+
+ clk_enable(nvavp->sclk);
+ clk_enable(nvavp->emc_clk);
+
+ /* If sclk_rate and emc_clk is not set by user space,
+ * max clock in dvfs table will be used to get best performance.
+ */
+ nvavp->sclk_rate = ULONG_MAX;
+ nvavp->emc_clk_rate = ULONG_MAX;
+
+ tegra_periph_reset_assert(nvavp->cop_clk);
+ udelay(2);
+ tegra_periph_reset_deassert(nvavp->cop_clk);
+
+ writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
+
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
+ dma_unmap_single(NULL, stub_data_phys,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+#endif
+ return 0;
+}
+
+static void nvavp_halt_vde(struct nvavp_info *nvavp)
+{
+ if (nvavp->clk_enabled) {
+ tegra_periph_reset_assert(nvavp->bsev_clk);
+ clk_disable(nvavp->bsev_clk);
+ tegra_periph_reset_assert(nvavp->vde_clk);
+ clk_disable(nvavp->vde_clk);
+ nvavp->clk_enabled = 0;
+ }
+}
+
+static int nvavp_reset_vde(struct nvavp_info *nvavp)
+{
+ clk_enable(nvavp->bsev_clk);
+ tegra_periph_reset_assert(nvavp->bsev_clk);
+ udelay(2);
+ tegra_periph_reset_deassert(nvavp->bsev_clk);
+
+ clk_enable(nvavp->vde_clk);
+ tegra_periph_reset_assert(nvavp->vde_clk);
+ udelay(2);
+ tegra_periph_reset_deassert(nvavp->vde_clk);
+
+ /*
+ * VDE clock is set to max freq by default.
+ * VDE clock can be set to different freq if needed
+ * through ioctl.
+ */
+ clk_set_rate(nvavp->vde_clk, ULONG_MAX);
+
+ nvavp->clk_enabled = 1;
+ return 0;
+}
+
+static int nvavp_pushbuffer_alloc(struct nvavp_info *nvavp)
+{
+ int ret = 0;
+
+ nvavp->pushbuf_handle = nvmap_alloc(nvavp->nvmap, NVAVP_PUSHBUFFER_SIZE,
+ SZ_1M, NVMAP_HANDLE_UNCACHEABLE);
+ if (IS_ERR(nvavp->pushbuf_handle)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot create pushbuffer handle\n");
+ ret = PTR_ERR(nvavp->pushbuf_handle);
+ goto err_pushbuf_alloc;
+ }
+ nvavp->pushbuf_data = (u8 *)nvmap_mmap(nvavp->pushbuf_handle);
+ if (!nvavp->pushbuf_data) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot map pushbuffer handle\n");
+ ret = -ENOMEM;
+ goto err_pushbuf_mmap;
+ }
+ nvavp->pushbuf_phys = nvmap_pin(nvavp->nvmap, nvavp->pushbuf_handle);
+ if (IS_ERR((void *)nvavp->pushbuf_phys)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot pin pushbuffer handle\n");
+ ret = PTR_ERR((void *)nvavp->pushbuf_phys);
+ goto err_pushbuf_pin;
+ }
+
+ memset(nvavp->pushbuf_data, 0, NVAVP_PUSHBUFFER_SIZE);
+
+ return 0;
+
+err_pushbuf_pin:
+ nvmap_munmap(nvavp->pushbuf_handle, nvavp->pushbuf_data);
+err_pushbuf_mmap:
+ nvmap_free(nvavp->nvmap, nvavp->pushbuf_handle);
+err_pushbuf_alloc:
+ return ret;
+}
+
+static void nvavp_pushbuffer_free(struct nvavp_info *nvavp)
+{
+ nvmap_unpin(nvavp->nvmap, nvavp->pushbuf_handle);
+ nvmap_munmap(nvavp->pushbuf_handle, nvavp->pushbuf_data);
+ nvmap_free(nvavp->nvmap, nvavp->pushbuf_handle);
+}
+
+static int nvavp_pushbuffer_init(struct nvavp_info *nvavp)
+{
+ void *ptr;
+ struct nvavp_os_info *os = &nvavp->os_info;
+ struct nv_e276_control *control;
+ u32 temp;
+ int ret;
+
+ ret = nvavp_pushbuffer_alloc(nvavp);
+ if (ret) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "unable to alloc pushbuffer\n");
+ return ret;
+ }
+
+ ptr = os->data;
+ ptr += os->control_offset;
+ nvavp->os_control = (struct nv_e276_control *)ptr;
+
+ control = nvavp->os_control;
+ memset(control, 0, sizeof(struct nvavp_os_info));
+
+ /* init get and put pointers */
+ writel(0x0, &control->put);
+ writel(0x0, &control->get);
+
+ /* enable avp VDE clock control and disable iram clock gating */
+ writel(0x0, &control->idle_clk_enable);
+ writel(0x0, &control->iram_clk_gating);
+
+ /* enable avp idle timeout interrupt */
+ writel(0x1, &control->idle_notify_enable);
+ writel(NVAVP_OS_IDLE_TIMEOUT, &control->idle_notify_delay);
+
+ /* init dma start and end pointers */
+ writel(nvavp->pushbuf_phys, &control->dma_start);
+ writel((nvavp->pushbuf_phys + NVAVP_PUSHBUFFER_SIZE),
+ &control->dma_end);
+
+ writel(0x00, &nvavp->pushbuf_index);
+ temp = NVAVP_PUSHBUFFER_SIZE - NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE;
+ writel(temp, &nvavp->pushbuf_fence);
+
+ nvavp->syncpt_id = NVSYNCPT_AVP_0;
+ nvavp->syncpt_value = nvhost_syncpt_read(nvavp->nvhost_syncpt,
+ nvavp->syncpt_id);
+
+ return 0;
+}
+
+static void nvavp_pushbuffer_deinit(struct nvavp_info *nvavp)
+{
+ nvavp_pushbuffer_free(nvavp);
+}
+
+static int nvavp_pushbuffer_update(struct nvavp_info *nvavp, u32 phys_addr,
+ u32 gather_count, struct nvavp_syncpt *syncpt,
+ u32 ext_ucode_flag)
+{
+ struct nv_e276_control *control = nvavp->os_control;
+ u32 gather_cmd, setucode_cmd, sync = 0;
+ u32 wordcount = 0;
+ u32 index, value = -1;
+
+ mutex_lock(&nvavp->pushbuffer_lock);
+
+ /* check for pushbuffer wrapping */
+ if (nvavp->pushbuf_index >= nvavp->pushbuf_fence)
+ nvavp->pushbuf_index = 0;
+
+ if (!ext_ucode_flag) {
+ setucode_cmd =
+ NVE26E_CH_OPCODE_INCR(NVE276_SET_MICROCODE_A, 3);
+
+ index = wordcount + nvavp->pushbuf_index;
+ writel(setucode_cmd, (nvavp->pushbuf_data + index));
+ wordcount += sizeof(u32);
+
+ index = wordcount + nvavp->pushbuf_index;
+ writel(0, (nvavp->pushbuf_data + index));
+ wordcount += sizeof(u32);
+
+ index = wordcount + nvavp->pushbuf_index;
+ writel(nvavp->ucode_info.phys, (nvavp->pushbuf_data + index));
+ wordcount += sizeof(u32);
+
+ index = wordcount + nvavp->pushbuf_index;
+ writel(nvavp->ucode_info.size, (nvavp->pushbuf_data + index));
+ wordcount += sizeof(u32);
+ }
+
+ gather_cmd = NVE26E_CH_OPCODE_GATHER(0, 0, 0, gather_count);
+
+ if (syncpt) {
+ value = ++nvavp->syncpt_value;
+ /* XXX: NvSchedValueWrappingComparison */
+ sync = NVE26E_CH_OPCODE_IMM(NVE26E_HOST1X_INCR_SYNCPT,
+ (NVE26E_HOST1X_INCR_SYNCPT_COND_OP_DONE << 8) |
+ (nvavp->syncpt_id & 0xFF));
+ }
+
+ /* write commands out */
+ index = wordcount + nvavp->pushbuf_index;
+ writel(gather_cmd, (nvavp->pushbuf_data + index));
+ wordcount += sizeof(u32);
+
+ index = wordcount + nvavp->pushbuf_index;
+ writel(phys_addr, (nvavp->pushbuf_data + index));
+ wordcount += sizeof(u32);
+
+ if (syncpt) {
+ index = wordcount + nvavp->pushbuf_index;
+ writel(sync, (nvavp->pushbuf_data + index));
+ wordcount += sizeof(u32);
+ }
+
+ /* enable clocks to VDE/BSEV */
+ nvavp_clk_ctrl(nvavp, 1);
+
+ /* update put pointer */
+ nvavp->pushbuf_index = (nvavp->pushbuf_index + wordcount) &
+ (NVAVP_PUSHBUFFER_SIZE - 1);
+ writel(nvavp->pushbuf_index, &control->put);
+ wmb();
+
+ /* wake up avp */
+ writel(0xA0000001, NVAVP_OS_OUTBOX);
+
+ /* Fill out fence struct */
+ if (syncpt) {
+ syncpt->id = nvavp->syncpt_id;
+ syncpt->value = value;
+ }
+
+ mutex_unlock(&nvavp->pushbuffer_lock);
+
+ return 0;
+}
+
+static void nvavp_unload_ucode(struct nvavp_info *nvavp)
+{
+ nvmap_unpin(nvavp->nvmap, nvavp->ucode_info.handle);
+ nvmap_munmap(nvavp->ucode_info.handle, nvavp->ucode_info.data);
+ nvmap_free(nvavp->nvmap, nvavp->ucode_info.handle);
+ kfree(nvavp->ucode_info.ucode_bin);
+}
+
+static int nvavp_load_ucode(struct nvavp_info *nvavp)
+{
+ struct nvavp_ucode_info *ucode_info = &nvavp->ucode_info;
+ const struct firmware *nvavp_ucode_fw;
+ char fw_ucode_file[32];
+ void *ptr;
+ int ret = 0;
+
+ if (!ucode_info->ucode_bin) {
+ sprintf(fw_ucode_file, "nvavp_vid_ucode.bin");
+
+ ret = request_firmware(&nvavp_ucode_fw, fw_ucode_file,
+ nvavp->misc_dev.this_device);
+ if (ret) {
+ /* Try alternative version */
+ sprintf(fw_ucode_file, "nvavp_vid_ucode_alt.bin");
+
+ ret = request_firmware(&nvavp_ucode_fw,
+ fw_ucode_file,
+ nvavp->misc_dev.this_device);
+
+ if (ret) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot read ucode firmware '%s'\n",
+ fw_ucode_file);
+ goto err_req_ucode;
+ }
+ }
+
+ dev_info(&nvavp->nvhost_dev->dev,
+ "read ucode firmware from '%s' (%d bytes)\n",
+ fw_ucode_file, nvavp_ucode_fw->size);
+
+ ptr = (void *)nvavp_ucode_fw->data;
+
+ if (strncmp((const char *)ptr, "NVAVPAPP", 8)) {
+ dev_info(&nvavp->nvhost_dev->dev,
+ "ucode hdr string mismatch\n");
+ ret = -EINVAL;
+ goto err_req_ucode;
+ }
+ ptr += 8;
+ ucode_info->size = nvavp_ucode_fw->size - 8;
+
+ ucode_info->ucode_bin = kzalloc(ucode_info->size,
+ GFP_KERNEL);
+ if (!ucode_info->ucode_bin) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot allocate ucode bin\n");
+ ret = -ENOMEM;
+ goto err_ubin_alloc;
+ }
+
+ ucode_info->handle = nvmap_alloc(nvavp->nvmap,
+ nvavp->ucode_info.size,
+ SZ_1M, NVMAP_HANDLE_UNCACHEABLE);
+ if (IS_ERR(ucode_info->handle)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot create ucode handle\n");
+ ret = PTR_ERR(ucode_info->handle);
+ goto err_ucode_alloc;
+ }
+ ucode_info->data = (u8 *)nvmap_mmap(ucode_info->handle);
+ if (!ucode_info->data) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot map ucode handle\n");
+ ret = -ENOMEM;
+ goto err_ucode_mmap;
+ }
+ ucode_info->phys = nvmap_pin(nvavp->nvmap, ucode_info->handle);
+ if (IS_ERR((void *)ucode_info->phys)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot pin ucode handle\n");
+ ret = PTR_ERR((void *)ucode_info->phys);
+ goto err_ucode_pin;
+ }
+ memcpy(ucode_info->ucode_bin, ptr, ucode_info->size);
+ release_firmware(nvavp_ucode_fw);
+ }
+
+ memcpy(ucode_info->data, ucode_info->ucode_bin, ucode_info->size);
+ return 0;
+
+err_ucode_pin:
+ nvmap_munmap(ucode_info->handle, ucode_info->data);
+err_ucode_mmap:
+ nvmap_free(nvavp->nvmap, ucode_info->handle);
+err_ucode_alloc:
+ kfree(nvavp->ucode_info.ucode_bin);
+err_ubin_alloc:
+ release_firmware(nvavp_ucode_fw);
+err_req_ucode:
+ return ret;
+}
+
+static void nvavp_unload_os(struct nvavp_info *nvavp)
+{
+ nvmap_unpin(nvavp->nvmap, nvavp->os_info.handle);
+ nvmap_munmap(nvavp->os_info.handle, nvavp->os_info.data);
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
+ nvmap_free(nvavp->nvmap, nvavp->os_info.handle);
+#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU)
+ nvmap_free_iovm(nvavp->nvmap, nvavp->os_info.handle);
+#endif
+ kfree(nvavp->os_info.os_bin);
+}
+
+static int nvavp_load_os(struct nvavp_info *nvavp, char *fw_os_file)
+{
+ struct nvavp_os_info *os_info = &nvavp->os_info;
+ const struct firmware *nvavp_os_fw;
+ void *ptr;
+ u32 size;
+ int ret = 0;
+
+ if (!os_info->os_bin) {
+ ret = request_firmware(&nvavp_os_fw, fw_os_file,
+ nvavp->misc_dev.this_device);
+ if (ret) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot read os firmware '%s'\n", fw_os_file);
+ goto err_req_fw;
+ }
+
+ dev_info(&nvavp->nvhost_dev->dev,
+ "read firmware from '%s' (%d bytes)\n",
+ fw_os_file, nvavp_os_fw->size);
+
+ ptr = (void *)nvavp_os_fw->data;
+
+ if (strncmp((const char *)ptr, "NVAVP-OS", 8)) {
+ dev_info(&nvavp->nvhost_dev->dev,
+ "os hdr string mismatch\n");
+ ret = -EINVAL;
+ goto err_os_bin;
+ }
+
+ ptr += 8;
+ os_info->entry_offset = *((u32 *)ptr);
+ ptr += sizeof(u32);
+ os_info->control_offset = *((u32 *)ptr);
+ ptr += sizeof(u32);
+ os_info->debug_offset = *((u32 *)ptr);
+ ptr += sizeof(u32);
+
+ size = *((u32 *)ptr); ptr += sizeof(u32);
+
+ os_info->size = size;
+ os_info->os_bin = kzalloc(os_info->size,
+ GFP_KERNEL);
+ if (!os_info->os_bin) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot allocate os bin\n");
+ ret = -ENOMEM;
+ goto err_os_bin;
+ }
+
+ memcpy(os_info->os_bin, ptr, os_info->size);
+ memset(os_info->data + os_info->size, 0, SZ_1M - os_info->size);
+
+ dev_info(&nvavp->nvhost_dev->dev,
+ "entry=%08x control=%08x debug=%08x size=%d\n",
+ os_info->entry_offset, os_info->control_offset,
+ os_info->debug_offset, os_info->size);
+ release_firmware(nvavp_os_fw);
+ }
+
+ memcpy(os_info->data, os_info->os_bin, os_info->size);
+ os_info->reset_addr = os_info->phys + os_info->entry_offset;
+
+ dev_info(&nvavp->nvhost_dev->dev,
+ "AVP os at vaddr=%p paddr=%lx reset_addr=%p\n",
+ os_info->data, (unsigned long)(os_info->phys),
+ (void *)os_info->reset_addr);
+ return 0;
+
+err_os_bin:
+ release_firmware(nvavp_os_fw);
+err_req_fw:
+ return ret;
+}
+
+static int nvavp_init(struct nvavp_info *nvavp)
+{
+ char fw_os_file[32];
+ int ret = 0;
+
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
+ /* paddr is any address returned from nvmap_pin */
+ /* vaddr is AVP_KERNEL_VIRT_BASE */
+ dev_info(&nvavp->nvhost_dev->dev,
+ "using AVP MMU to relocate AVP os\n");
+ sprintf(fw_os_file, "nvavp_os.bin");
+ nvavp->os_info.reset_addr = AVP_KERNEL_VIRT_BASE;
+#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
+ /* paddr is any address behind SMMU */
+ /* vaddr is TEGRA_SMMU_BASE */
+ dev_info(&nvavp->nvhost_dev->dev,
+ "using SMMU at %lx to load AVP kernel\n",
+ (unsigned long)nvavp->os_info.phys);
+ BUG_ON(nvavp->os_info.phys != 0xeff00000
+ && nvavp->os_info.phys != 0x0ff00000);
+ sprintf(fw_os_file, "nvavp_os_%08lx.bin",
+ (unsigned long)nvavp->os_info.phys);
+ nvavp->os_info.reset_addr = nvavp->os_info.phys;
+#else /* nvmem= carveout */
+ /* paddr is found in nvmem= carveout */
+ /* vaddr is same as paddr */
+ /* Find nvmem carveout */
+ if (!pfn_valid(__phys_to_pfn(0x8e000000))) {
+ nvavp->os_info->phys = 0x8e000000;
+ } else if (!pfn_valid(__phys_to_pfn(0x9e000000))) {
+ nvavp->os_info.phys = 0x9e000000;
+ } else if (!pfn_valid(__phys_to_pfn(0xbe000000))) {
+ nvavp->os_info.phys = 0xbe000000;
+ } else {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot find nvmem= carveout to load AVP os\n");
+ dev_err(&nvavp->nvhost_dev->dev,
+ "check kernel command line "
+ "to see if nvmem= is defined\n");
+ BUG();
+ }
+ dev_info(&nvavp->nvhost_dev->dev,
+ "using nvmem= carveout at %lx to load AVP os\n",
+ nvavp->os_info.phys);
+ sprintf(fw_os_file, "nvavp_os_%08lx.bin", nvavp->os_info.phys);
+ nvavp->os_info.reset_addr = nvavp->os_info.phys;
+ nvavp->os_info.data = ioremap(nvavp->os_info.phys, SZ_1M);
+#endif
+
+ ret = nvavp_load_os(nvavp, fw_os_file);
+ if (ret) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "unable to load os firmware '%s'\n", fw_os_file);
+ goto err_exit;
+ }
+
+ ret = nvavp_pushbuffer_init(nvavp);
+ if (ret) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "unable to init pushbuffer\n");
+ goto err_exit;
+ }
+
+ ret = nvavp_load_ucode(nvavp);
+ if (ret) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "unable to load ucode\n");
+ goto err_exit;
+ }
+
+ tegra_init_legacy_irq_cop();
+
+ nvavp_reset_vde(nvavp);
+ nvavp_reset_avp(nvavp, nvavp->os_info.reset_addr);
+ enable_irq(nvavp->mbox_from_avp_pend_irq);
+
+err_exit:
+ return ret;
+}
+
+static void nvavp_uninit(struct nvavp_info *nvavp)
+{
+ disable_irq(nvavp->mbox_from_avp_pend_irq);
+
+ cancel_work_sync(&nvavp->clock_disable_work);
+
+ nvavp_pushbuffer_deinit(nvavp);
+
+ nvavp_halt_vde(nvavp);
+ nvavp_halt_avp(nvavp);
+
+ clk_disable(nvavp->sclk);
+ clk_disable(nvavp->emc_clk);
+}
+
+static int nvavp_set_clock_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct clk *c;
+ struct nvavp_clock_args config;
+
+ if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
+ return -EFAULT;
+
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id=%d, clk_rate=%lu\n",
+ __func__, config.id, config.rate);
+
+ if (config.id == NVAVP_MODULE_ID_AVP)
+ nvavp->sclk_rate = config.rate;
+ else if (config.id == NVAVP_MODULE_ID_EMC)
+ nvavp->emc_clk_rate = config.rate;
+
+ c = nvavp_clk_get(nvavp, config.id);
+ if (IS_ERR_OR_NULL(c))
+ return -EINVAL;
+
+ clk_enable(c);
+ clk_set_rate(c, config.rate);
+
+ config.rate = clk_get_rate(c);
+ clk_disable(c);
+ if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int nvavp_get_clock_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct clk *c;
+ struct nvavp_clock_args config;
+
+ if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
+ return -EFAULT;
+
+ c = nvavp_clk_get(nvavp, config.id);
+ if (IS_ERR_OR_NULL(c))
+ return -EINVAL;
+
+ clk_enable(c);
+ config.rate = clk_get_rate(c);
+ clk_disable(c);
+
+ if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int nvavp_get_syncpointid_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ u32 id = nvavp->syncpt_id;
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &id, sizeof(u32)))
+ return -EFAULT;
+ else
+ return 0;
+ }
+ return -EFAULT;
+}
+
+static int nvavp_set_nvmapfd_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_set_nvmap_fd_args buf;
+ struct nvmap_client *new_client;
+ int fd;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(&buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ fd = buf.fd;
+ new_client = nvmap_client_get_file(fd);
+ if (IS_ERR(new_client))
+ return PTR_ERR(new_client);
+
+ clientctx->nvmap = new_client;
+ return 0;
+}
+
+static int nvavp_pushbuffer_submit_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct nvavp_pushbuffer_submit_hdr hdr;
+ u32 *cmdbuf_data;
+ struct nvmap_handle *cmdbuf_handle = NULL;
+ struct nvmap_handle_ref *cmdbuf_dupe;
+ int ret = 0, i;
+ unsigned long phys_addr;
+ unsigned long virt_addr;
+ struct nvavp_pushbuffer_submit_hdr *user_hdr =
+ (struct nvavp_pushbuffer_submit_hdr *) arg;
+ struct nvavp_syncpt syncpt;
+
+ syncpt.id = NVSYNCPT_INVALID;
+ syncpt.value = 0;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(&hdr, (void __user *)arg,
+ sizeof(struct nvavp_pushbuffer_submit_hdr)))
+ return -EFAULT;
+ }
+
+ if (!hdr.cmdbuf.mem)
+ return 0;
+
+ if (copy_from_user(clientctx->relocs, (void __user *)hdr.relocs,
+ sizeof(struct nvavp_reloc) * hdr.num_relocs)) {
+ return -EFAULT;
+ }
+
+ cmdbuf_handle = nvmap_get_handle_id(clientctx->nvmap, hdr.cmdbuf.mem);
+ if (cmdbuf_handle == NULL) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "invalid cmd buffer handle %08x\n", hdr.cmdbuf.mem);
+ return -EPERM;
+ }
+
+ /* duplicate the new pushbuffer's handle into the nvavp driver's
+ * nvmap context, to ensure that the handle won't be freed as
+ * long as it is in-use by the fb driver */
+ cmdbuf_dupe = nvmap_duplicate_handle_id(nvavp->nvmap, hdr.cmdbuf.mem);
+ nvmap_handle_put(cmdbuf_handle);
+
+ if (IS_ERR(cmdbuf_dupe)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "could not duplicate handle\n");
+ return PTR_ERR(cmdbuf_dupe);
+ }
+
+ phys_addr = nvmap_pin(nvavp->nvmap, cmdbuf_dupe);
+ if (IS_ERR((void *)phys_addr)) {
+ dev_err(&nvavp->nvhost_dev->dev, "could not pin handle\n");
+ nvmap_free(nvavp->nvmap, cmdbuf_dupe);
+ return PTR_ERR((void *)phys_addr);
+ }
+
+ virt_addr = (unsigned long)nvmap_mmap(cmdbuf_dupe);
+ if (!virt_addr) {
+ dev_err(&nvavp->nvhost_dev->dev, "cannot map cmdbuf handle\n");
+ ret = -ENOMEM;
+ goto err_cmdbuf_mmap;
+ }
+
+ cmdbuf_data = (u32 *)(virt_addr + hdr.cmdbuf.offset);
+
+ for (i = 0; i < hdr.num_relocs; i++) {
+ u32 *reloc_addr, target_phys_addr;
+
+ if (clientctx->relocs[i].cmdbuf_mem != hdr.cmdbuf.mem) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "reloc info does not match target bufferID\n");
+ ret = -EPERM;
+ goto err_reloc_info;
+ }
+
+ reloc_addr = cmdbuf_data +
+ (clientctx->relocs[i].cmdbuf_offset >> 2);
+
+ target_phys_addr = nvmap_handle_address(clientctx->nvmap,
+ clientctx->relocs[i].target);
+ target_phys_addr += clientctx->relocs[i].target_offset;
+ writel(target_phys_addr, reloc_addr);
+ }
+
+ if (hdr.syncpt) {
+ ret = nvavp_pushbuffer_update(nvavp,
+ (phys_addr + hdr.cmdbuf.offset),
+ hdr.cmdbuf.words, &syncpt,
+ (hdr.flags & NVAVP_UCODE_EXT));
+
+ if (copy_to_user((void __user *)user_hdr->syncpt, &syncpt,
+ sizeof(struct nvavp_syncpt))) {
+ ret = -EFAULT;
+ goto err_reloc_info;
+ }
+ } else {
+ ret = nvavp_pushbuffer_update(nvavp,
+ (phys_addr + hdr.cmdbuf.offset),
+ hdr.cmdbuf.words, NULL,
+ (hdr.flags & NVAVP_UCODE_EXT));
+ }
+
+err_reloc_info:
+ nvmap_munmap(cmdbuf_dupe, (void *)virt_addr);
+err_cmdbuf_mmap:
+ nvmap_unpin(nvavp->nvmap, cmdbuf_dupe);
+ nvmap_free(nvavp->nvmap, cmdbuf_dupe);
+ return ret;
+}
+
+static int tegra_nvavp_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
+ int ret = 0;
+ struct nvavp_clientctx *clientctx;
+
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
+
+ nonseekable_open(inode, filp);
+
+ clientctx = kzalloc(sizeof(*clientctx), GFP_KERNEL);
+ if (!clientctx)
+ return -ENOMEM;
+
+ mutex_lock(&nvavp->open_lock);
+
+ if (!nvavp->refcount)
+ ret = nvavp_init(nvavp);
+
+ if (!ret)
+ nvavp->refcount++;
+
+ clientctx->nvmap = nvavp->nvmap;
+ clientctx->nvavp = nvavp;
+
+ filp->private_data = clientctx;
+
+ nvhost_module_busy(&nvavp->nvhost_dev->host->mod);
+ mutex_unlock(&nvavp->open_lock);
+
+ return ret;
+}
+
+static int tegra_nvavp_release(struct inode *inode, struct file *filp)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ int ret = 0;
+
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
+
+ filp->private_data = NULL;
+ nvhost_module_idle(&nvavp->nvhost_dev->host->mod);
+
+ mutex_lock(&nvavp->open_lock);
+
+ if (!nvavp->refcount) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "releasing while in invalid state\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (nvavp->refcount > 0)
+ nvavp->refcount--;
+ if (!nvavp->refcount)
+ nvavp_uninit(nvavp);
+
+out:
+ mutex_unlock(&nvavp->open_lock);
+ kfree(clientctx);
+ return ret;
+}
+
+static long tegra_nvavp_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > NVAVP_IOCTL_MAX_NR)
+ return -EFAULT;
+
+ switch (cmd) {
+ case NVAVP_IOCTL_SET_NVMAP_FD:
+ ret = nvavp_set_nvmapfd_ioctl(filp, cmd, arg);
+ break;
+ case NVAVP_IOCTL_GET_SYNCPOINT_ID:
+ ret = nvavp_get_syncpointid_ioctl(filp, cmd, arg);
+ break;
+ case NVAVP_IOCTL_PUSH_BUFFER_SUBMIT:
+ ret = nvavp_pushbuffer_submit_ioctl(filp, cmd, arg);
+ break;
+ case NVAVP_IOCTL_SET_CLOCK:
+ ret = nvavp_set_clock_ioctl(filp, cmd, arg);
+ break;
+ case NVAVP_IOCTL_GET_CLOCK:
+ ret = nvavp_get_clock_ioctl(filp, cmd, arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations tegra_nvavp_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_nvavp_open,
+ .release = tegra_nvavp_release,
+ .unlocked_ioctl = tegra_nvavp_ioctl,
+};
+
+static int tegra_nvavp_probe(struct nvhost_device *ndev)
+{
+ struct nvavp_info *nvavp;
+ int irq;
+ unsigned int heap_mask;
+ u32 iovmm_addr;
+ int ret = 0;
+
+ irq = nvhost_get_irq_byname(ndev, "mbox_from_nvavp_pending");
+ if (irq < 0) {
+ dev_err(&ndev->dev, "invalid nvhost data\n");
+ return -EINVAL;
+ }
+
+
+ nvavp = kzalloc(sizeof(struct nvavp_info), GFP_KERNEL);
+ if (!nvavp) {
+ dev_err(&ndev->dev, "cannot allocate avp_info\n");
+ return -ENOMEM;
+ }
+
+ memset(nvavp, 0, sizeof(*nvavp));
+
+ nvavp->nvhost_syncpt = &ndev->host->syncpt;
+ if (!nvavp->nvhost_syncpt) {
+ dev_err(&ndev->dev, "cannot get syncpt handle\n");
+ ret = -ENOENT;
+ goto err_get_syncpt;
+ }
+
+ nvavp->nvmap = nvmap_create_client(nvmap_dev, "nvavp_drv");
+ if (IS_ERR_OR_NULL(nvavp->nvmap)) {
+ dev_err(&ndev->dev, "cannot create nvmap client\n");
+ ret = PTR_ERR(nvavp->nvmap);
+ goto err_nvmap_create_drv_client;
+ }
+
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
+ heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
+#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
+ heap_mask = NVMAP_HEAP_IOVMM;
+#else /* nvmem= carveout */
+ heap_mask = 0;
+#endif
+ switch (heap_mask) {
+ case NVMAP_HEAP_IOVMM:
+ iovmm_addr = 0x0ff00000;
+
+ /* Tegra3 A01 has different SMMU address */
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3
+ && tegra_get_revision() == TEGRA_REVISION_A01) {
+ iovmm_addr = 0xeff00000;
+ }
+
+ nvavp->os_info.handle = nvmap_alloc_iovm(nvavp->nvmap, SZ_1M,
+ L1_CACHE_BYTES,
+ NVMAP_HANDLE_UNCACHEABLE,
+ iovmm_addr);
+ if (IS_ERR_OR_NULL(nvavp->os_info.handle)) {
+ dev_err(&ndev->dev,
+ "cannot create os handle\n");
+ ret = PTR_ERR(nvavp->os_info.handle);
+ goto err_nvmap_alloc;
+ }
+
+ nvavp->os_info.data = nvmap_mmap(nvavp->os_info.handle);
+ if (!nvavp->os_info.data) {
+ dev_err(&ndev->dev,
+ "cannot map os handle\n");
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ nvavp->os_info.phys =
+ nvmap_pin(nvavp->nvmap, nvavp->os_info.handle);
+ if (IS_ERR_OR_NULL((void *)nvavp->os_info.phys)) {
+ dev_err(&ndev->dev,
+ "cannot pin os handle\n");
+ ret = PTR_ERR((void *)nvavp->os_info.phys);
+ goto err_nvmap_pin;
+ }
+
+ dev_info(&ndev->dev,
+ "allocated IOVM at %lx for AVP os\n",
+ (unsigned long)nvavp->os_info.phys);
+ break;
+ case NVMAP_HEAP_CARVEOUT_GENERIC:
+ nvavp->os_info.handle = nvmap_alloc(nvavp->nvmap, SZ_1M, SZ_1M,
+ NVMAP_HANDLE_UNCACHEABLE);
+ if (IS_ERR_OR_NULL(nvavp->os_info.handle)) {
+ dev_err(&ndev->dev, "cannot create AVP os handle\n");
+ ret = PTR_ERR(nvavp->os_info.handle);
+ goto err_nvmap_alloc;
+ }
+
+ nvavp->os_info.data = nvmap_mmap(nvavp->os_info.handle);
+ if (!nvavp->os_info.data) {
+ dev_err(&ndev->dev, "cannot map AVP os handle\n");
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ nvavp->os_info.phys = nvmap_pin(nvavp->nvmap,
+ nvavp->os_info.handle);
+ if (IS_ERR_OR_NULL((void *)nvavp->os_info.phys)) {
+ dev_err(&ndev->dev, "cannot pin AVP os handle\n");
+ ret = PTR_ERR((void *)nvavp->os_info.phys);
+ goto err_nvmap_pin;
+ }
+
+ dev_info(&ndev->dev,
+ "allocated carveout memory at %lx for AVP os\n",
+ (unsigned long)nvavp->os_info.phys);
+ break;
+ default:
+ dev_err(&ndev->dev, "invalid/non-supported heap for AVP os\n");
+ ret = -EINVAL;
+ goto err_get_syncpt;
+ }
+
+ nvavp->mbox_from_avp_pend_irq = irq;
+ mutex_init(&nvavp->open_lock);
+ mutex_init(&nvavp->pushbuffer_lock);
+
+ /* TODO DO NOT USE NVAVP DEVICE */
+ nvavp->cop_clk = clk_get(&ndev->dev, "cop");
+ if (IS_ERR(nvavp->cop_clk)) {
+ dev_err(&ndev->dev, "cannot get cop clock\n");
+ ret = -ENOENT;
+ goto err_get_cop_clk;
+ }
+
+ nvavp->vde_clk = clk_get(&ndev->dev, "vde");
+ if (IS_ERR(nvavp->vde_clk)) {
+ dev_err(&ndev->dev, "cannot get vde clock\n");
+ ret = -ENOENT;
+ goto err_get_vde_clk;
+ }
+
+ nvavp->bsev_clk = clk_get(&ndev->dev, "bsev");
+ if (IS_ERR(nvavp->bsev_clk)) {
+ dev_err(&ndev->dev, "cannot get bsev clock\n");
+ ret = -ENOENT;
+ goto err_get_bsev_clk;
+ }
+
+ nvavp->sclk = clk_get(&ndev->dev, "sclk");
+ if (IS_ERR(nvavp->sclk)) {
+ dev_err(&ndev->dev, "cannot get avp.sclk clock\n");
+ ret = -ENOENT;
+ goto err_get_sclk;
+ }
+
+ nvavp->emc_clk = clk_get(&ndev->dev, "emc");
+ if (IS_ERR(nvavp->emc_clk)) {
+ dev_err(&ndev->dev, "cannot get emc clock\n");
+ ret = -ENOENT;
+ goto err_get_emc_clk;
+ }
+
+ nvavp->clk_enabled = 0;
+ nvavp_halt_avp(nvavp);
+
+ INIT_WORK(&nvavp->clock_disable_work, clock_disable_handler);
+
+ nvavp->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ nvavp->misc_dev.name = "tegra_avpchannel";
+ nvavp->misc_dev.fops = &tegra_nvavp_fops;
+ nvavp->misc_dev.mode = S_IRWXUGO;
+ nvavp->misc_dev.parent = &ndev->dev;
+
+ ret = misc_register(&nvavp->misc_dev);
+ if (ret) {
+ dev_err(&ndev->dev, "unable to register misc device!\n");
+ goto err_misc_reg;
+ }
+
+ ret = request_irq(irq, nvavp_mbox_pending_isr, 0,
+ TEGRA_NVAVP_NAME, nvavp);
+ if (ret) {
+ dev_err(&ndev->dev, "cannot register irq handler\n");
+ goto err_req_irq_pend;
+ }
+ disable_irq(nvavp->mbox_from_avp_pend_irq);
+
+ nvhost_set_drvdata(ndev, nvavp);
+ nvavp->nvhost_dev = ndev;
+
+ return 0;
+
+err_req_irq_pend:
+ misc_deregister(&nvavp->misc_dev);
+err_misc_reg:
+ clk_put(nvavp->emc_clk);
+err_get_emc_clk:
+ clk_put(nvavp->sclk);
+err_get_sclk:
+ clk_put(nvavp->bsev_clk);
+err_get_bsev_clk:
+ clk_put(nvavp->vde_clk);
+err_get_vde_clk:
+ clk_put(nvavp->cop_clk);
+err_get_cop_clk:
+ nvmap_unpin(nvavp->nvmap, nvavp->os_info.handle);
+err_nvmap_pin:
+ nvmap_munmap(nvavp->os_info.handle, nvavp->os_info.data);
+err_nvmap_mmap:
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
+ nvmap_free(nvavp->nvmap, nvavp->os_info.handle);
+#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU)
+ nvmap_free_iovm(nvavp->nvmap, nvavp->os_info.handle);
+#endif
+err_nvmap_alloc:
+ nvmap_client_put(nvavp->nvmap);
+err_nvmap_create_drv_client:
+err_get_syncpt:
+ kfree(nvavp);
+ return ret;
+}
+
+static int tegra_nvavp_remove(struct nvhost_device *ndev)
+{
+ struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
+
+ if (!nvavp)
+ return 0;
+
+ mutex_lock(&nvavp->open_lock);
+ if (nvavp->refcount) {
+ mutex_unlock(&nvavp->open_lock);
+ return -EBUSY;
+ }
+ mutex_unlock(&nvavp->open_lock);
+
+ nvavp_unload_ucode(nvavp);
+ nvavp_unload_os(nvavp);
+
+ misc_deregister(&nvavp->misc_dev);
+
+ clk_put(nvavp->bsev_clk);
+ clk_put(nvavp->vde_clk);
+ clk_put(nvavp->cop_clk);
+
+ clk_put(nvavp->emc_clk);
+ clk_put(nvavp->sclk);
+
+ nvmap_client_put(nvavp->nvmap);
+
+ kfree(nvavp);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_nvavp_suspend(struct nvhost_device *ndev, pm_message_t state)
+{
+ struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
+
+ if (nvavp->refcount)
+ nvhost_module_idle(&ndev->host->mod);
+ return 0;
+}
+
+static int tegra_nvavp_resume(struct nvhost_device *ndev)
+{
+ struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
+
+ if (nvavp->refcount)
+ nvhost_module_busy(&ndev->host->mod);
+ return 0;
+}
+#endif
+
+static struct nvhost_driver tegra_nvavp_driver = {
+ .driver = {
+ .name = TEGRA_NVAVP_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_nvavp_probe,
+ .remove = tegra_nvavp_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_nvavp_suspend,
+ .resume = tegra_nvavp_resume,
+#endif
+};
+
+static int __init tegra_nvavp_init(void)
+{
+ return nvhost_driver_register(&tegra_nvavp_driver);
+}
+
+static void __exit tegra_nvavp_exit(void)
+{
+ nvhost_driver_unregister(&tegra_nvavp_driver);
+}
+
+module_init(tegra_nvavp_init);
+module_exit(tegra_nvavp_exit);
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("Channel based AVP driver for Tegra");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/media/video/tegra/nvavp/nvavp_os.h b/drivers/media/video/tegra/nvavp/nvavp_os.h
new file mode 100644
index 000000000000..4d7f6776f110
--- /dev/null
+++ b/drivers/media/video/tegra/nvavp/nvavp_os.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/media/video/tegra/nvavp/nvavp_os.h
+ *
+ * Copyright (C) 2011 NVIDIA Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_NVAVP_OS_H
+#define __MEDIA_VIDEO_TEGRA_NVAVP_OS_H
+
+#include <linux/types.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#define NVE2_AVP (0x0000E276)
+
+struct nv_e276_control {
+ u32 reserved00[5];
+ u32 dma_start;
+ u32 reserved01[2];
+ u32 dma_end;
+ u32 reserved02[7];
+ u32 put;
+ u32 reserved03[15];
+ u32 get;
+ u32 reserved04[10];
+ u32 watchdog_timeout;
+ u32 idle_notify_enable;
+ u32 idle_notify_delay;
+ u32 idle_clk_enable;
+ u32 iram_clk_gating;
+ u32 idle;
+ u32 outbox_data;
+ u32 app_intr_enable;
+ u32 app_start_time;
+ u32 app_in_iram;
+ u32 iram_ucode_addr;
+ u32 iram_ucode_size;
+ u32 dbg_state[57];
+ u32 os_method_data[16];
+ u32 app_method_data[128];
+};
+
+#define NVE26E_HOST1X_INCR_SYNCPT (0x00000000)
+#define NVE26E_HOST1X_INCR_SYNCPT_COND_OP_DONE (0x00000001)
+
+#define NVE26E_CH_OPCODE_INCR(Addr, Count) \
+ /* op, addr, count */ \
+ ((1UL << 28) | ((Addr) << 16) | (Count))
+
+#define NVE26E_CH_OPCODE_IMM(addr, value) \
+ /* op, addr, count */ \
+ ((4UL << 28) | ((addr) << 16) | (value))
+
+#define NVE26E_CH_OPCODE_GATHER(off, ins, type, cnt) \
+ /* op, offset, insert, type, count */ \
+ ((6UL << 28) | ((off) << 16) | ((ins) << 15) | ((type) << 14) | cnt)
+
+/* AVP OS methods */
+#define NVE276_NOP (0x00000080)
+#define NVE276_SET_APP_TIMEOUT (0x00000084)
+#define NVE276_SET_MICROCODE_A (0x00000085)
+#define NVE276_SET_MICROCODE_B (0x00000086)
+#define NVE276_SET_MICROCODE_C (0x00000087)
+
+/* Interrupt codes through inbox/outbox data codes (cpu->avp or avp->cpu) */
+#define NVE276_OS_INTERRUPT_NOP (0x00000000) /* wake up avp */
+#define NVE276_OS_INTERRUPT_TIMEOUT (0x00000001)
+#define NVE276_OS_INTERRUPT_SEMAPHORE_AWAKEN (0x00000002)
+#define NVE276_OS_INTERRUPT_EXECUTE_AWAKEN (0x00000004)
+#define NVE276_OS_INTERRUPT_DEBUG_STRING (0x00000008)
+#define NVE276_OS_INTERRUPT_DH_KEYEXCHANGE (0x00000010)
+#define NVE276_OS_INTERRUPT_APP_NOTIFY (0x00000020)
+#define NVE276_OS_INTERRUPT_VIDEO_IDLE (0x00000040)
+#define NVE276_OS_INTERRUPT_AUDIO_IDLE (0x00000080)
+#define NVE276_OS_INTERRUPT_AVP_BREAKPOINT (0x00800000)
+#define NVE276_OS_INTERRUPT_AVP_FATAL_ERROR (0x01000000)
+
+struct nvavp_os_info {
+ u32 entry_offset;
+ u32 control_offset;
+ u32 debug_offset;
+
+ struct nvmap_handle_ref *handle;
+ void *data;
+ u32 size;
+ phys_addr_t phys;
+ void *os_bin;
+ phys_addr_t reset_addr;
+};
+
+struct nvavp_ucode_info {
+ struct nvmap_handle_ref *handle;
+ void *data;
+ u32 size;
+ phys_addr_t phys;
+ void *ucode_bin;
+};
+
+#endif /* __MEDIA_VIDEO_TEGRA_NVAVP_OS_H */
diff --git a/drivers/media/video/tegra/ov14810.c b/drivers/media/video/tegra/ov14810.c
new file mode 100644
index 000000000000..6f56afee2d83
--- /dev/null
+++ b/drivers/media/video/tegra/ov14810.c
@@ -0,0 +1,1235 @@
+/*
+ * ov14810.c - ov14810 sensor driver
+ *
+ * Copyright (c) 2011, NVIDIA, All Rights Reserved.
+ *
+ * Contributors:
+ * Krupal Divvela <kdivvela@nvidia.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/ov14810.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+
+#define OV14810_I2C_WRITE8(ADDR, OFFSET, VAL) do { \
+ if (0 != ov14810_write8(ADDR, OFFSET, VAL)) \
+ return 1; \
+ } while(0)
+
+#define OV14810_FRAME_LENGTH_REG_ADDR0 0x380e
+#define OV14810_FRAME_LENGTH_REG_ADDR1 0x380f
+
+#define OV14810_COARSE_TIME_REG_ADDR0 0x3500
+#define OV14810_COARSE_TIME_REG_ADDR1 0x3501
+#define OV14810_COARSE_TIME_REG_ADDR2 0x3502
+
+#define OV14810_GAIN_REG_ADDR0 0x350b
+
+#define OV14810_GROUP_ACCESS_REG_ADDR 0x3212
+
+static u8 uCProgram[] = {
+ 0x02,0x03,0x6E,0x02,0x19,0x74,0xBB,0x01,0x06,0x89,0x82,0x8A,0x83,0xE0,0x22,0x50
+,0x02,0xE7,0x22,0xBB,0xFE,0x02,0xE3,0x22,0x89,0x82,0x8A,0x83,0xE4,0x93,0x22,0xBB
+,0x01,0x0C,0xE5,0x82,0x29,0xF5,0x82,0xE5,0x83,0x3A,0xF5,0x83,0xE0,0x22,0x50,0x06
+,0xE9,0x25,0x82,0xF8,0xE6,0x22,0xBB,0xFE,0x06,0xE9,0x25,0x82,0xF8,0xE2,0x22,0xE5
+,0x82,0x29,0xF5,0x82,0xE5,0x83,0x3A,0xF5,0x83,0xE4,0x93,0x22,0xBB,0x01,0x06,0x89
+,0x82,0x8A,0x83,0xF0,0x22,0x50,0x02,0xF7,0x22,0xBB,0xFE,0x01,0xF3,0x22,0xEF,0x8D
+,0xF0,0xA4,0xA8,0xF0,0xCF,0x8C,0xF0,0xA4,0x28,0xCE,0x8D,0xF0,0xA4,0x2E,0xFE,0x22
+,0xBC,0x00,0x0B,0xBE,0x00,0x29,0xEF,0x8D,0xF0,0x84,0xFF,0xAD,0xF0,0x22,0xE4,0xCC
+,0xF8,0x75,0xF0,0x08,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xEC,0x33,0xFC,0xEE,0x9D,0xEC
+,0x98,0x40,0x05,0xFC,0xEE,0x9D,0xFE,0x0F,0xD5,0xF0,0xE9,0xE4,0xCE,0xFD,0x22,0xED
+,0xF8,0xF5,0xF0,0xEE,0x84,0x20,0xD2,0x1C,0xFE,0xAD,0xF0,0x75,0xF0,0x08,0xEF,0x2F
+,0xFF,0xED,0x33,0xFD,0x40,0x07,0x98,0x50,0x06,0xD5,0xF0,0xF2,0x22,0xC3,0x98,0xFD
+,0x0F,0xD5,0xF0,0xEA,0x22,0xC2,0xD5,0xEC,0x30,0xE7,0x09,0xB2,0xD5,0xE4,0xC3,0x9D
+,0xFD,0xE4,0x9C,0xFC,0xEE,0x30,0xE7,0x15,0xB2,0xD5,0xE4,0xC3,0x9F,0xFF,0xE4,0x9E
+,0xFE,0x12,0x00,0x70,0xC3,0xE4,0x9D,0xFD,0xE4,0x9C,0xFC,0x80,0x03,0x12,0x00,0x70
+,0x30,0xD5,0x07,0xC3,0xE4,0x9F,0xFF,0xE4,0x9E,0xFE,0x22,0xC5,0xF0,0xF8,0xA3,0xE0
+,0x28,0xF0,0xC5,0xF0,0xF8,0xE5,0x82,0x15,0x82,0x70,0x02,0x15,0x83,0xE0,0x38,0xF0
+,0x22,0xBB,0x01,0x0A,0x89,0x82,0x8A,0x83,0xE0,0xF5,0xF0,0xA3,0xE0,0x22,0x50,0x06
+,0x87,0xF0,0x09,0xE7,0x19,0x22,0xBB,0xFE,0x07,0xE3,0xF5,0xF0,0x09,0xE3,0x19,0x22
+,0x89,0x82,0x8A,0x83,0xE4,0x93,0xF5,0xF0,0x74,0x01,0x93,0x22,0xBB,0x01,0x10,0xE5
+,0x82,0x29,0xF5,0x82,0xE5,0x83,0x3A,0xF5,0x83,0xE0,0xF5,0xF0,0xA3,0xE0,0x22,0x50
+,0x09,0xE9,0x25,0x82,0xF8,0x86,0xF0,0x08,0xE6,0x22,0xBB,0xFE,0x0A,0xE9,0x25,0x82
+,0xF8,0xE2,0xF5,0xF0,0x08,0xE2,0x22,0xE5,0x83,0x2A,0xF5,0x83,0xE9,0x93,0xF5,0xF0
+,0xA3,0xE9,0x93,0x22,0xE8,0x8F,0xF0,0xA4,0xCC,0x8B,0xF0,0xA4,0x2C,0xFC,0xE9,0x8E
+,0xF0,0xA4,0x2C,0xFC,0x8A,0xF0,0xED,0xA4,0x2C,0xFC,0xEA,0x8E,0xF0,0xA4,0xCD,0xA8
+,0xF0,0x8B,0xF0,0xA4,0x2D,0xCC,0x38,0x25,0xF0,0xFD,0xE9,0x8F,0xF0,0xA4,0x2C,0xCD
+,0x35,0xF0,0xFC,0xEB,0x8E,0xF0,0xA4,0xFE,0xA9,0xF0,0xEB,0x8F,0xF0,0xA4,0xCF,0xC5
+,0xF0,0x2E,0xCD,0x39,0xFE,0xE4,0x3C,0xFC,0xEA,0xA4,0x2D,0xCE,0x35,0xF0,0xFD,0xE4
+,0x3C,0xFC,0x22,0x75,0xF0,0x08,0x75,0x82,0x00,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xCD
+,0x33,0xCD,0xCC,0x33,0xCC,0xC5,0x82,0x33,0xC5,0x82,0x9B,0xED,0x9A,0xEC,0x99,0xE5
+,0x82,0x98,0x40,0x0C,0xF5,0x82,0xEE,0x9B,0xFE,0xED,0x9A,0xFD,0xEC,0x99,0xFC,0x0F
+,0xD5,0xF0,0xD6,0xE4,0xCE,0xFB,0xE4,0xCD,0xFA,0xE4,0xCC,0xF9,0xA8,0x82,0x22,0xB8
+,0x00,0xC1,0xB9,0x00,0x59,0xBA,0x00,0x2D,0xEC,0x8B,0xF0,0x84,0xCF,0xCE,0xCD,0xFC
+,0xE5,0xF0,0xCB,0xF9,0x78,0x18,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD,0xEC
+,0x33,0xFC,0xEB,0x33,0xFB,0x10,0xD7,0x03,0x99,0x40,0x04,0xEB,0x99,0xFB,0x0F,0xD8
+,0xE5,0xE4,0xF9,0xFA,0x22,0x78,0x18,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD
+,0xEC,0x33,0xFC,0xC9,0x33,0xC9,0x10,0xD7,0x05,0x9B,0xE9,0x9A,0x40,0x07,0xEC,0x9B
+,0xFC,0xE9,0x9A,0xF9,0x0F,0xD8,0xE0,0xE4,0xC9,0xFA,0xE4,0xCC,0xFB,0x22,0x75,0xF0
+,0x10,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD,0xCC,0x33,0xCC,0xC8,0x33,0xC8
+,0x10,0xD7,0x07,0x9B,0xEC,0x9A,0xE8,0x99,0x40,0x0A,0xED,0x9B,0xFD,0xEC,0x9A,0xFC
+,0xE8,0x99,0xF8,0x0F,0xD5,0xF0,0xDA,0xE4,0xCD,0xFB,0xE4,0xCC,0xFA,0xE4,0xC8,0xF9
+,0x22,0xEB,0x9F,0xF5,0xF0,0xEA,0x9E,0x42,0xF0,0xE9,0x9D,0x42,0xF0,0xE8,0x9C,0x45
+,0xF0,0x22,0xE8,0x60,0x0F,0xEC,0xC3,0x13,0xFC,0xED,0x13,0xFD,0xEE,0x13,0xFE,0xEF
+,0x13,0xFF,0xD8,0xF1,0x22,0xE8,0x60,0x0F,0xEF,0xC3,0x33,0xFF,0xEE,0x33,0xFE,0xED
+,0x33,0xFD,0xEC,0x33,0xFC,0xD8,0xF1,0x22,0xE0,0xFC,0xA3,0xE0,0xFD,0xA3,0xE0,0xFE
+,0xA3,0xE0,0xFF,0x22,0xE0,0xF8,0xA3,0xE0,0xF9,0xA3,0xE0,0xFA,0xA3,0xE0,0xFB,0x22
+,0xEC,0xF0,0xA3,0xED,0xF0,0xA3,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0xE0,0xFB,0xA3,0xE0
+,0xFA,0xA3,0xE0,0xF9,0x22,0xF8,0xE0,0xFB,0xA3,0xA3,0xE0,0xF9,0x25,0xF0,0xF0,0xE5
+,0x82,0x15,0x82,0x70,0x02,0x15,0x83,0xE0,0xFA,0x38,0xF0,0x22,0xEB,0xF0,0xA3,0xEA
+,0xF0,0xA3,0xE9,0xF0,0x22,0xD0,0x83,0xD0,0x82,0xF8,0xE4,0x93,0x70,0x12,0x74,0x01
+,0x93,0x70,0x0D,0xA3,0xA3,0x93,0xF8,0x74,0x01,0x93,0xF5,0x82,0x88,0x83,0xE4,0x73
+,0x74,0x02,0x93,0x68,0x60,0xEF,0xA3,0xA3,0xA3,0x80,0xDF,0x8A,0x83,0x89,0x82,0xE4
+,0x73,0xEC,0x8E,0xF0,0xA4,0xCC,0xC5,0xF0,0xCC,0xCD,0xF8,0xEF,0xA4,0xCE,0xC5,0xF0
+,0x2D,0xFD,0xE4,0x3C,0xFC,0xE8,0xA4,0x2E,0xC8,0xC5,0xF0,0x3D,0xFD,0xE4,0x3C,0xFC
+,0xEF,0xA4,0xFF,0xE5,0xF0,0x28,0xFE,0xE4,0x3D,0xFD,0xE4,0x3C,0xFC,0x22,0x78,0x7F
+,0xE4,0xF6,0xD8,0xFD,0x75,0x81,0x7F,0x02,0x03,0xB5,0x02,0x17,0x7F,0xE4,0x93,0xA3
+,0xF8,0xE4,0x93,0xA3,0x40,0x03,0xF6,0x80,0x01,0xF2,0x08,0xDF,0xF4,0x80,0x29,0xE4
+,0x93,0xA3,0xF8,0x54,0x07,0x24,0x0C,0xC8,0xC3,0x33,0xC4,0x54,0x0F,0x44,0x20,0xC8
+,0x83,0x40,0x04,0xF4,0x56,0x80,0x01,0x46,0xF6,0xDF,0xE4,0x80,0x0B,0x01,0x02,0x04
+,0x08,0x10,0x20,0x40,0x80,0x90,0x03,0xFA,0xE4,0x7E,0x01,0x93,0x60,0xBC,0xA3,0xFF
+,0x54,0x3F,0x30,0xE5,0x09,0x54,0x1F,0xFE,0xE4,0x93,0xA3,0x60,0x01,0x0E,0xCF,0x54
+,0xC0,0x25,0xE0,0x60,0xA8,0x40,0xB8,0xE4,0x93,0xA3,0xFA,0xE4,0x93,0xA3,0xF8,0xE4
+,0x93,0xA3,0xC8,0xC5,0x82,0xC8,0xCA,0xC5,0x83,0xCA,0xF0,0xA3,0xC8,0xC5,0x82,0xC8
+,0xCA,0xC5,0x83,0xCA,0xDF,0xE9,0xDE,0xE7,0x80,0xBE,0x41,0x1B,0x5F,0x00,0x60,0x26
+,0x1B,0x23,0x07,0x83,0x07,0xE6,0x08,0x0D,0x1F,0x5D,0x1F,0x98,0x07,0x5F,0x04,0x73
+,0xFC,0x18,0x03,0xE8,0xFF,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x01,0x00,0x0B
+,0x00,0x01,0x00,0x04,0x00,0xA0,0x00,0x64,0xC1,0x45,0xC1,0x46,0xC1,0x48,0xC1,0x47
+,0x00,0xE4,0xFD,0xFC,0xC2,0x8D,0xC2,0xA9,0xD2,0x8C,0xED,0x6F,0x70,0x02,0xEC,0x6E
+,0x60,0x0C,0x30,0x8D,0xFD,0x0D,0xBD,0x00,0x01,0x0C,0xC2,0x8D,0x80,0xEC,0xC2,0x8C
+,0x22,0xC2,0x8C,0xC2,0xA9,0x53,0x89,0xF0,0x43,0x89,0x02,0x75,0x8C,0x9C,0x75,0x8A
+,0x9C,0xC2,0x8D,0x22,0xD0,0xE0,0xD0,0xE0,0xE4,0xC0,0xE0,0xC0,0xE0,0x32,0x22,0x02
+,0xFF,0xFC,0x22,0x42,0x30,0x00,0x47,0x30,0x03,0x47,0x30,0x0C,0x41,0x30,0x18,0x42
+,0x30,0x1B,0x41,0x30,0x1E,0x41,0x30,0x20,0x41,0x31,0x06,0x41,0x35,0x03,0x42,0x36
+,0x00,0x42,0x36,0x03,0x42,0x36,0x0A,0x41,0x36,0x0F,0x41,0x36,0x11,0x41,0x36,0x13
+,0x41,0x36,0x15,0x44,0x37,0x02,0x43,0x37,0x07,0x41,0x37,0x0E,0x42,0x37,0x10,0x42
+,0x37,0x14,0x43,0x37,0x17,0x44,0x37,0x1B,0x45,0x37,0x22,0x44,0x38,0x08,0x41,0x38
+,0x19,0x41,0x3B,0x09,0x41,0x3C,0x01,0x41,0x40,0x00,0x44,0x40,0x02,0x41,0x40,0x09
+,0x41,0x40,0x0C,0x41,0x40,0x4F,0x43,0x43,0x00,0x48,0x47,0x00,0x41,0x47,0x09,0x42
+,0x47,0x0B,0x42,0x48,0x00,0x41,0x48,0x03,0x41,0x48,0x06,0x41,0x48,0x37,0x41,0x48
+,0x42,0x41,0x48,0x4A,0x42,0x50,0x00,0x41,0x50,0x1F,0x41,0x50,0x25,0x42,0x50,0x3B
+,0x41,0x50,0x41,0x41,0x50,0x43,0x41,0x5B,0x01,0x41,0x5B,0x03,0x42,0x38,0x2C,0x41
+,0x01,0x00,0x41,0x32,0x12,0x41,0x30,0x13,0x41,0x36,0x02,0x41,0x36,0x05,0x42,0x36
+,0x0C,0x41,0x36,0x14,0x44,0x37,0x0A,0x41,0x37,0x0F,0x41,0x37,0x13,0x41,0x37,0x16
+,0x41,0x37,0x21,0x42,0x37,0x27,0x45,0x38,0x03,0x46,0x38,0x0C,0x42,0x38,0x17,0x46
+,0x38,0x1C,0x42,0x38,0x2C,0x41,0x40,0x01,0x42,0x40,0x50,0x41,0x40,0x53,0x41,0x50
+,0x02,0x41,0x50,0x3D,0x41,0x50,0x42,0x41,0x50,0x47,0x41,0x59,0x01,0xEF,0xFF,0xFF
+,0x41,0x32,0x12,0x41,0x32,0x12,0x84,0xEE,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x09
+,0x00,0xA7,0xA0,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x04,0xE0,0xF8
+,0xF0,0x01,0x05,0x03,0x2D,0x1F,0x20,0x80,0x2E,0x00,0x24,0x6C,0x84,0x13,0x20,0x3D
+,0x28,0xD1,0x73,0x01,0x00,0x04,0x40,0x16,0x5F,0x58,0x80,0x11,0x11,0xA0,0x46,0x40
+,0x2C,0x1A,0x30,0x2E,0x2E,0x70,0x00,0x40,0x00,0xF0,0x80,0x0A,0x80,0x29,0xC5,0x08
+,0x02,0x10,0x40,0x00,0xFF,0xFF,0x00,0xF0,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x04,0x0F,0x50,0x80,0x1B,0x01,0x00,0x5F,0x4E,0x00,0x10,0x01,0x10
+,0x0E,0x08,0x03,0x00,0x02,0x02,0x00,0x00,0x1F,0x53,0x11,0x42,0x13,0x0F,0x83,0x00
+,0x04,0x81,0x00,0x57,0xB6,0x08,0x66,0x02,0x07,0x02,0x88,0x01,0xE6,0x0F,0xA0,0x01
+,0xF4,0x22,0x02,0x24,0x4A,0x32,0xAC,0x07,0xA4,0x03,0xA0,0x10,0x10,0x00,0xC0,0x00
+,0xA1,0x00,0x00,0x21,0x00,0x00,0xFF,0xFF,0x10,0xA0,0x02,0x88,0x01,0xE6,0xFF,0xFF
+,0xFF,0xFF,0xFF,0x00,0x00,0x09,0x00,0xA7,0xA0,0x08,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x40,0x04,0xE0,0xF8,0xF0,0x01,0x05,0x03,0x2D,0x1F,0x20,0x80,0x2E,0x00
+,0x24,0x6C,0x84,0x13,0x20,0x3D,0x28,0xD1,0x73,0x01,0x00,0x04,0x40,0x16,0x5F,0x58
+,0x80,0x11,0x11,0xA0,0x46,0x40,0x2C,0x1A,0x30,0x2E,0x2E,0x70,0x00,0x40,0x00,0xF0
+,0x80,0x0A,0x80,0x29,0xC5,0x08,0x02,0x10,0x40,0x00,0xFF,0xFF,0x00,0xF0,0x04,0x01
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x0F,0x50,0x80,0x1B,0x01,0x00
+,0x5F,0x4E,0x00,0x10,0x01,0x10,0x0E,0x08,0x03,0x00,0x02,0x02,0x00,0x00,0x1F,0x42
+,0x11,0x42,0x13,0x30,0x80,0x00,0x84,0x01,0x61,0xF6,0x17,0x08,0x66,0x0C,0x0B,0x11
+,0x40,0x0C,0xF0,0x1F,0x00,0x0D,0x08,0x44,0x96,0x24,0x40,0x30,0x0C,0x0C,0xFC,0x00
+,0x08,0x04,0x04,0x02,0xC0,0x00,0xA1,0x00,0x00,0x21,0x00,0x00,0xFF,0xFF,0x10,0xA0
+,0x11,0x40,0x0C,0xF0,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x09,0x00,0xA7,0xA0,0x08
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x04,0xE0,0xF8,0xF0,0x01,0x05,0x03
+,0x2D,0x1F,0x20,0x80,0x2E,0x00,0x24,0x6C,0x84,0x13,0x20,0x3D,0x28,0xD1,0x73,0x01
+,0x00,0x04,0x40,0x16,0x5F,0x58,0x80,0x11,0x11,0xA0,0x46,0x40,0x2C,0x1A,0x30,0x2E
+,0x2E,0x70,0x00,0x40,0x00,0xF0,0x80,0x0A,0x80,0x29,0xC5,0x08,0x02,0x10,0x40,0x00
+,0xFF,0xFF,0x00,0xF0,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04
+,0x0F,0x50,0x80,0x1B,0x01,0x00,0x5F,0x4E,0x00,0x10,0x01,0x10,0x0E,0x08,0x03,0x00
+,0x02,0x02,0x00,0x00,0x1F,0x42,0x11,0x42,0x13,0x30,0x80,0x00,0x84,0x01,0x61,0xF6
+,0x17,0x08,0x66,0x0C,0x0B,0x02,0x88,0x01,0xE6,0x0A,0x40,0x01,0xFC,0x44,0x96,0x24
+,0x40,0x35,0x85,0x01,0xF2,0x07,0x6C,0x04,0x04,0x02,0xC0,0x00,0xA1,0x00,0x00,0x31
+,0x00,0x00,0xFF,0xFF,0x10,0xA0,0x02,0x88,0x01,0xE6,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0x00,0x05,0x6D,0x07,0x18,0x06,0x13,0xFF,0x05,0x06,0xB9,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0x01,0x0D,0x4C,0x02,0x08,0xB9,0x03,0x0F,0xF5,0x05,0x0D,0xC7,0x0A
+,0x14,0x6D,0x0B,0x14,0xB6,0x0C,0x0F,0xD2,0x0D,0x14,0xED,0x0F,0x15,0x59,0x10,0x15
+,0x5D,0x14,0x15,0x70,0x18,0x11,0xAF,0x19,0x12,0x79,0x1F,0x15,0x61,0x20,0x17,0x29
+,0x22,0x14,0x06,0x23,0x14,0x2D,0x29,0x18,0xAF,0x2A,0x10,0x9F,0x39,0x17,0x9F,0x3B
+,0x0E,0xA6,0x3C,0x17,0xEB,0x4A,0x18,0x92,0x4B,0x14,0x88,0x50,0x17,0x42,0x57,0x15
+,0x61,0x59,0x11,0x3D,0x60,0x17,0x65,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x22,0xA2,0xC1,0xCC,0xC1,0xCB,0x25,0x11,0x26,0x88
+,0x60,0x00,0x64,0x45,0x30,0x03,0x41,0x01,0x00,0x60,0x00,0x64,0x41,0x01,0x00,0xEF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x0B,0x02,0xF7
+,0x00,0x00,0x00,0x00,0x00,0x04,0x14,0x28,0x36,0x64,0x04,0x00,0x09,0x00,0x9D,0xC3
+,0x0D,0x01,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0x90,0x1B,0x23,0x12,0x18,0x70,0x90,0x1B,0x68,0x12,0x03,0x0C,0x90,0x1B,0x68,0xF1
+,0xE3,0xFF,0xF4,0x60,0x34,0xEF,0x14,0xB5,0xC5,0x23,0x75,0xC5,0xFE,0xE4,0xFD,0x7F
+,0x8E,0x11,0x89,0x90,0x1B,0x68,0x12,0x02,0xEC,0x90,0x00,0x01,0x12,0x01,0x3C,0xAA
+,0xF0,0xF9,0x12,0x03,0x3B,0x7D,0x04,0x7F,0x8E,0x11,0x89,0x80,0x0F,0x90,0x1B,0x69
+,0xE4,0x75,0xF0,0x03,0x12,0x00,0xFB,0x80,0xC3,0x75,0xC1,0x06,0x75,0xC5,0xFF,0x22
+,0xFD,0x7F,0x20,0x11,0x89,0xE4,0xFD,0x7F,0x12,0x8F,0xFA,0x8D,0xFB,0xE4,0xF5,0xF8
+,0x90,0x1B,0x35,0xE0,0xFE,0xA3,0xE0,0xFF,0x43,0xF8,0x01,0xEF,0x4E,0x70,0x08,0xE5
+,0xF9,0x64,0x0F,0x60,0x13,0x80,0xF8,0xE5,0xF9,0x64,0x0F,0x60,0x0B,0xEF,0x1F,0xAC
+,0x06,0x70,0x01,0x1E,0x4C,0x70,0xF0,0x22,0x22,0xAF,0xC1,0xEF,0x14,0x60,0x15,0x24
+,0xFE,0x60,0x3E,0x24,0x03,0x70,0x63,0xE5,0xC2,0xB4,0x01,0x06,0x12,0x17,0x92,0x02
+,0x04,0x64,0x80,0x56,0xE5,0xC2,0x64,0x01,0x70,0x25,0x31,0x35,0xE4,0xFD,0x7F,0x20
+,0x11,0x89,0x7D,0x03,0x7F,0x12,0x11,0x89,0x7D,0x07,0x7F,0x14,0x11,0x89,0x7D,0x03
+,0x7F,0x13,0x11,0x89,0x75,0xC5,0xFF,0xE4,0xF5,0xC1,0xFD,0x7F,0x18,0x80,0x8A,0x80
+,0x29,0xE5,0xC2,0x70,0x12,0x31,0x35,0x7F,0x20,0xB1,0x22,0x90,0x1B,0x5F,0xEF,0xF0
+,0xE4,0x11,0x80,0x7D,0x03,0x80,0x0D,0xE5,0xC2,0xB4,0x01,0x0C,0x90,0x1B,0x5F,0xE0
+,0x11,0x80,0xE4,0xFD,0x7F,0x13,0xA1,0xAD,0x80,0x00,0x75,0xC1,0x01,0x22,0xE4,0xFD
+,0xFF,0x7E,0x01,0x80,0x0E,0x31,0x2E,0x7F,0x0A,0x7E,0x00,0x02,0x04,0x31,0xFD,0x7F
+,0x02,0x7E,0x35,0xAB,0x07,0xAA,0x06,0xEA,0xF5,0xFA,0xEB,0xF5,0xFB,0x8D,0xFC,0x75
+,0xF8,0x10,0x01,0x90,0xE4,0xFD,0x7F,0x12,0x7E,0x32,0x31,0x43,0xAD,0x43,0xAC,0x42
+,0x7F,0x00,0x7E,0x35,0x31,0x84,0xE5,0x45,0x31,0x3E,0xAD,0x49,0xAC,0x48,0x7F,0x0A
+,0x7E,0x35,0x31,0x84,0x7D,0x10,0x7F,0x12,0x7E,0x32,0x31,0x43,0x7D,0xA0,0x7F,0x12
+,0x7E,0x32,0x80,0xBF,0x8E,0x31,0x8F,0x32,0x8C,0x33,0x8D,0x34,0xE5,0x33,0xFD,0x31
+,0x43,0xE5,0x32,0x24,0x01,0xFF,0xE4,0x35,0x31,0xFE,0xAD,0x34,0x80,0xA5,0x8E,0x2F
+,0x8F,0x30,0x90,0x1B,0x10,0xE0,0xFF,0x90,0x1B,0x0F,0xE0,0xFD,0x12,0x18,0x35,0xAA
+,0x06,0xA9,0x07,0x7B,0xFF,0x90,0x1B,0x4E,0x12,0x03,0x0C,0x90,0x00,0x80,0x12,0x00
+,0x1F,0xFE,0x90,0x00,0x81,0x12,0x00,0x1F,0x7C,0x00,0x90,0x1B,0x5B,0xB1,0x18,0x90
+,0x00,0x82,0x12,0x00,0x1F,0xFE,0x90,0x00,0x83,0x12,0x00,0x1F,0x90,0x1B,0x5D,0xB1
+,0x18,0xB1,0x05,0x7C,0x41,0x7D,0x1A,0x12,0x03,0x41,0x7B,0x00,0x7A,0x71,0x79,0x02
+,0x78,0x00,0x12,0x01,0xFF,0x90,0x1B,0x51,0xEE,0xF0,0xA3,0xEF,0xF0,0xAE,0x2F,0xAF
+,0x30,0x7C,0x27,0x7D,0x10,0x12,0x03,0x41,0xC0,0x06,0xC0,0x07,0x90,0x1B,0x51,0xE0
+,0xFE,0xA3,0xE0,0xFB,0xAA,0x06,0xE4,0xF9,0xF8,0xD0,0x07,0xD0,0x06,0x12,0x01,0xFF
+,0x90,0x1B,0x53,0x12,0x02,0xE0,0xB1,0x05,0xE4,0xFC,0xFD,0x90,0x1B,0x53,0x12,0x02
+,0xD4,0x12,0x01,0x74,0xE4,0x7B,0x20,0xFA,0xF9,0xF8,0x12,0x01,0x74,0xE4,0x7B,0xE0
+,0x7A,0x2E,0xF9,0xF8,0x12,0x01,0xFF,0x90,0x1B,0x57,0x12,0x02,0xE0,0x90,0x1B,0x57
+,0x12,0x02,0xC8,0x78,0x08,0x12,0x02,0xA2,0x90,0x1B,0x11,0xEE,0xF0,0xA3,0xEF,0xF0
+,0x90,0x1B,0x57,0x12,0x02,0xC8,0xE4,0x90,0x1B,0x13,0xEF,0xF0,0x7F,0x18,0x7E,0x38
+,0xB1,0x3A,0xEF,0x30,0xE0,0x0D,0x51,0xD8,0x78,0x01,0x12,0x02,0xA2,0x90,0x1B,0x53
+,0x12,0x02,0xE0,0x51,0xD8,0xEF,0x54,0xF0,0xFF,0xE4,0xF5,0x44,0x8F,0x45,0x51,0xD8
+,0x78,0x08,0x12,0x02,0xA2,0xE4,0x8E,0x42,0x8F,0x43,0x51,0xD8,0x78,0x04,0x12,0x02
+,0xA2,0xF1,0x35,0x90,0x1B,0x5E,0xE0,0x24,0xF8,0xFF,0x90,0x1B,0x5D,0xE0,0x34,0xFF
+,0xFE,0xE4,0xFC,0xFD,0xD3,0x12,0x02,0x91,0x40,0x10,0x51,0xD8,0x78,0x04,0x12,0x02
+,0xA2,0xEF,0x24,0x08,0xFD,0xE4,0x3E,0xFC,0x80,0x08,0x90,0x1B,0x5D,0xE0,0xFC,0xA3
+,0xE0,0xFD,0x7F,0x0E,0x7E,0x38,0x21,0x84,0x90,0x1B,0x53,0x02,0x02,0xC8,0x90,0x1B
+,0x2F,0xE0,0xFE,0xA3,0xE0,0xFF,0x90,0x1B,0x49,0x12,0x02,0xEC,0xAC,0x02,0xAD,0x01
+,0x8E,0x33,0x8F,0x34,0x8C,0x35,0x8D,0x36,0x8F,0x82,0x8E,0x83,0xE4,0x93,0x91,0xFC
+,0x70,0x02,0x05,0x33,0x90,0x1B,0x4C,0xE0,0xFF,0xF4,0x70,0x02,0x81,0x19,0xEF,0x54
+,0xE0,0xFB,0x70,0x24,0xE0,0x54,0x1F,0xB1,0x0E,0x05,0x34,0xE5,0x34,0x70,0x02,0x05
+,0x33,0xF5,0x82,0x85,0x33,0x83,0xE4,0x93,0x90,0x1B,0x4C,0xF0,0x60,0xEB,0x05,0x34
+,0xE5,0x34,0x70,0x02,0x05,0x33,0x80,0xCC,0x90,0x1B,0x4C,0xE0,0x54,0x1F,0xA3,0xF0
+,0x90,0x1B,0x4C,0xEB,0xF0,0x64,0x20,0x60,0x0A,0xE0,0xFF,0x64,0x80,0x60,0x04,0xEF
+,0xB4,0xC0,0x15,0x91,0x2A,0xFF,0x90,0x1B,0x4E,0xE4,0xF0,0xA3,0xEF,0xF0,0x05,0x34
+,0xE5,0x34,0x70,0x02,0x05,0x33,0x80,0x19,0x91,0x2A,0xFF,0x74,0x01,0x93,0x90,0x1B
+,0x4E,0xCF,0xF0,0xA3,0xEF,0xF0,0x74,0x02,0x25,0x34,0xF5,0x34,0xE4,0x35,0x33,0xF5
+,0x33,0x90,0x1B,0x4C,0xE0,0xB4,0x60,0x08,0x91,0x33,0xFF,0x12,0x04,0x31,0x80,0x7F
+,0x90,0x1B,0x4C,0xE0,0xB4,0xE0,0x21,0xA3,0xE0,0xB4,0x02,0x16,0xAA,0x35,0xA9,0x36
+,0x7B,0xFF,0x12,0x01,0x11,0x85,0xF0,0x35,0xF5,0x36,0x91,0x33,0x8E,0x33,0xF5,0x34
+,0x80,0x5D,0x74,0x02,0xB1,0x0E,0x80,0x57,0x90,0x1B,0x4D,0xE0,0xD3,0x94,0x00,0x40
+,0x4E,0x90,0x1B,0x4C,0xE0,0xB4,0xC0,0x07,0x91,0x1A,0x12,0x18,0x7A,0x80,0x26,0x90
+,0x1B,0x4C,0xE0,0xB4,0x80,0x09,0x91,0x1A,0x7B,0x01,0x12,0x15,0x27,0x80,0x16,0x90
+,0x1B,0x4C,0xE0,0x90,0x1B,0x4E,0xB4,0x40,0x08,0xE0,0xFE,0x91,0x1D,0x31,0x43,0x80
+,0x04,0x91,0x1D,0x11,0x89,0x05,0x36,0xE5,0x36,0x70,0x02,0x05,0x35,0x90,0x1B,0x4E
+,0xE4,0x75,0xF0,0x01,0x12,0x00,0xFB,0x90,0x1B,0x4D,0xE0,0x14,0xF0,0x80,0xA9,0x91
+,0x2A,0x91,0xFC,0x70,0x02,0x05,0x33,0x61,0x04,0x22,0x90,0x1B,0x4E,0xA3,0xE0,0xFF
+,0x85,0x36,0x82,0x85,0x35,0x83,0xE4,0x93,0xFD,0x22,0x85,0x34,0x82,0x85,0x33,0x83
+,0xE4,0x93,0x22,0x90,0x1B,0x4E,0xE0,0xFE,0xA3,0xE0,0x22,0xE4,0x90,0x1B,0x4E,0xF0
+,0xA3,0xF0,0x30,0x47,0x05,0x90,0x1B,0x01,0x80,0x03,0x90,0x1B,0x45,0xE0,0xFA,0xA3
+,0xE0,0xFB,0x90,0x1B,0x44,0xE0,0x2B,0xFE,0x90,0x1B,0x43,0xE0,0x3A,0x90,0x1B,0x50
+,0xF0,0xA3,0xCE,0xF0,0xA3,0xEA,0xF0,0xA3,0xEB,0xF0,0x30,0x48,0x6B,0xD2,0x45,0x30
+,0x47,0x53,0x91,0xF3,0x90,0x1B,0x43,0xE0,0xFE,0xA3,0xE0,0xFF,0xD3,0x94,0x00,0xEE
+,0x94,0x00,0x40,0x0A,0xEF,0x24,0x06,0xFF,0xE4,0x3E,0xFE,0x12,0x04,0x31,0x91,0x33
+,0xFF,0x90,0x1B,0x52,0xD1,0x25,0x40,0x54,0xB1,0x2C,0xC3,0x90,0x1B,0x38,0xE0,0x9F
+,0x90,0x1B,0x37,0xE0,0x9E,0x50,0x45,0x90,0x1B,0x39,0xA3,0xE0,0xFF,0xBF,0xFF,0x02
+,0x80,0x22,0x90,0x1B,0x37,0xF1,0xBE,0x90,0x1B,0x4D,0xE0,0x9F,0xFF,0x90,0x1B,0x4C
+,0xE0,0x9E,0xFE,0x80,0x24,0x91,0xF3,0x91,0x33,0xFF,0xA3,0xD1,0x25,0x40,0x1D,0xB1
+,0x2C,0x4E,0x60,0x18,0xF1,0xB2,0x80,0x11,0x91,0xF3,0x90,0x1B,0x50,0xE0,0xFE,0xA3
+,0xE0,0xFF,0x7C,0x00,0x7D,0x05,0x12,0x00,0x5E,0x12,0x04,0x31,0x31,0x37,0x53,0xCB
+,0xFB,0x21,0x2E,0xF1,0xAA,0x30,0x48,0x03,0x43,0xCB,0x04,0x22,0x90,0x1B,0x4C,0xF0
+,0x05,0x34,0xE5,0x34,0x22,0x90,0x1B,0x5B,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0x25,0x36
+,0xF5,0x36,0xE4,0x35,0x35,0xF5,0x35,0x22,0x24,0x00,0xFF,0xEC,0x3E,0xF0,0xA3,0xEF
+,0xF0,0x22,0x8F,0xFA,0x75,0xF8,0x02,0x11,0x90,0xAF,0xFB,0x22,0xED,0x9F,0xFF,0xEC
+,0x9E,0xFE,0x90,0x1B,0x4C,0xF0,0xA3,0xEF,0xF0,0x22,0xAD,0x07,0xAC,0x06,0xEC,0xF5
+,0xFA,0xED,0xF5,0xFB,0x75,0xF8,0x12,0x11,0x90,0xAF,0xFB,0x22,0x12,0x04,0x51,0x90
+,0x1B,0x25,0xF1,0xC5,0x90,0x1F,0xF8,0xE4,0x93,0xF4,0x70,0x4D,0x7F,0x05,0x7E,0x3D
+,0xB1,0x3A,0xEF,0x60,0x44,0x75,0x2F,0x3D,0x75,0x30,0x0E,0x75,0x31,0x1F,0x75,0x32
+,0x83,0xE4,0x90,0x1B,0x49,0xF0,0xB1,0xB3,0x70,0x02,0x05,0x31,0x05,0x30,0xE5,0x30
+,0x70,0x02,0x05,0x2F,0xD1,0x11,0x94,0x04,0x40,0xEC,0x75,0x31,0x1F,0x75,0x32,0xB8
+,0xE4,0x90,0x1B,0x49,0xF0,0xB1,0xB3,0x70,0x02,0x05,0x31,0x05,0x30,0xE5,0x30,0x70
+,0x02,0x05,0x2F,0xD1,0x11,0x94,0x1E,0x40,0xEC,0x7D,0x04,0x7F,0x8E,0x11,0x89,0xE4
+,0xF5,0xC1,0x22,0xAF,0x30,0xAE,0x2F,0xB1,0x3A,0xEF,0xF4,0x85,0x32,0x82,0x85,0x31
+,0x83,0xF0,0x05,0x32,0xE5,0x32,0x22,0x7F,0x03,0x7E,0x30,0xB1,0x3A,0x8F,0x2F,0x7F
+,0x06,0x7E,0x30,0xB1,0x3A,0x8F,0x30,0xAF,0xC1,0xEF,0x24,0xFC,0x60,0x12,0x24,0x02
+,0x70,0x1C,0x53,0x2F,0xFC,0x43,0x2F,0x01,0x53,0x30,0xDF,0x43,0x30,0x20,0x80,0x09
+,0x53,0x2F,0xFC,0x43,0x2F,0x02,0x53,0x30,0xDF,0xE4,0xF5,0xC1,0x80,0x03,0x75,0xC1
+,0x01,0xAD,0x2F,0x7F,0x03,0x7E,0x30,0x31,0x43,0xAD,0x30,0x7F,0x06,0x7E,0x30,0x21
+,0x43,0x90,0x1B,0x49,0xE0,0x04,0xF0,0xE0,0xC3,0x22,0x90,0x1B,0x41,0xE0,0xFE,0xA3
+,0xE0,0xFF,0x90,0x1B,0x3F,0xE0,0xFC,0xA3,0xE0,0xFD,0xD3,0x9F,0xEC,0x9E,0x22,0x30
+,0x47,0x22,0xD1,0x1A,0x40,0x11,0xED,0x9F,0xFF,0xEC,0x9E,0xFE,0x90,0x1B,0x02,0xE0
+,0x90,0x1B,0x01,0xF1,0x15,0x80,0x30,0xF1,0x0C,0x90,0x1B,0x02,0xE0,0x2F,0xFF,0x90
+,0x1B,0x01,0x80,0x23,0x30,0x48,0x27,0xD1,0x1A,0x40,0x11,0xED,0x9F,0xFF,0xEC,0x9E
+,0xFE,0x90,0x1B,0x46,0xE0,0x90,0x1B,0x45,0xF1,0x15,0x80,0x0B,0xF1,0x0C,0x90,0x1B
+,0x46,0xE0,0x2F,0xFF,0x90,0x1B,0x45,0xE0,0x3E,0x90,0x1B,0x4C,0x80,0x0B,0x90,0x1B
+,0x45,0xE0,0xFF,0xA3,0xE0,0x90,0x1B,0x4C,0xCF,0xF0,0xA3,0xEF,0xF0,0xF1,0xB2,0x31
+,0x9E,0x30,0x47,0x05,0x90,0x1B,0x03,0x80,0x03,0x90,0x1B,0x47,0xE0,0xFE,0xA3,0xE0
+,0xFF,0x12,0x15,0xB2,0x21,0x54,0x7D,0x01,0xAF,0xC1,0x12,0x18,0x35,0x12,0x19,0xB7
+,0x70,0x06,0xE9,0xF4,0x70,0x02,0xEA,0xF4,0x70,0x04,0x75,0xC1,0x01,0x22,0xD2,0x46
+,0x12,0x19,0x3B,0x90,0x1B,0x0F,0xE0,0x70,0x06,0x31,0x2E,0xF1,0x5C,0x51,0xDE,0x90
+,0x1B,0x0F,0x74,0x01,0xF0,0xA3,0xE5,0xC1,0xF0,0x30,0x47,0x05,0x90,0x1B,0x01,0x80
+,0x03,0x90,0x1B,0x45,0xE0,0xFF,0xA3,0xE0,0x90,0x1B,0x17,0xCF,0xF0,0xA3,0xEF,0xF0
+,0xD1,0x2F,0xE4,0x90,0x1B,0x19,0xF0,0x90,0x1B,0x14,0xE0,0xFF,0xA3,0xE0,0x90,0x1B
+,0x1A,0xCF,0xF0,0xA3,0xEF,0xF0,0x91,0x3B,0xE4,0xF5,0xC1,0x22,0x90,0x1B,0x3D,0xE0
+,0xFE,0xA3,0xE0,0xFF,0x22,0x2F,0xFF,0xE0,0x3E,0xFE,0x90,0x1B,0x3E,0xE0,0x2F,0xFF
+,0x90,0x1B,0x3D,0x22,0xE4,0x33,0x24,0x01,0xFF,0xE4,0x33,0xFE,0xE4,0x33,0xFD,0xE4
+,0x33,0xFC,0x12,0x01,0x74,0xA8,0x04,0xA9,0x05,0xAA,0x06,0xAB,0x07,0x22,0xAB,0x07
+,0xAA,0x06,0xB1,0x3A,0x90,0x1B,0x4C,0xEF,0xF0,0xEB,0x24,0x01,0xFF,0xE4,0x3A,0xFE
+,0xB1,0x3A,0xEF,0xFD,0x90,0x1B,0x4C,0xE0,0xFE,0xED,0xFF,0x22,0x7F,0x00,0x7E,0x35
+,0xF1,0x3E,0x90,0x1B,0x1D,0xEE,0xF0,0xA3,0xEF,0xF0,0x7F,0x02,0x7E,0x35,0xB1,0x3A
+,0x90,0x1B,0x1F,0xE4,0xF0,0xA3,0xEF,0xF0,0x7F,0x0A,0x7E,0x35,0xF1,0x3E,0x90,0x1B
+,0x21,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0x90,0x1B,0x1D,0xE0,0xFC,0xA3,0xE0,0xFD,0x7F
+,0x00,0x7E,0x35,0x31,0x84,0x90,0x1B,0x1F,0xA3,0xE0,0x31,0x3E,0x90,0x1B,0x21,0xE0
+,0xFC,0xA3,0xE0,0xFD,0x7F,0x0A,0x7E,0x35,0x21,0x84,0x7D,0x01,0x7F,0x00,0x7E,0x01
+,0x21,0x43,0x90,0x1B,0x4C,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0x90,0x1B,0x3B,0xE0,0xFE
+,0xA3,0xE0,0xFF,0xC3,0x22,0xE0,0xFE,0xA3,0xE0,0xFF,0xA3,0xE0,0xFC,0xA3,0xE0,0xFD
+,0x41,0xF0,0xE5,0xC1,0xC3,0x94,0xC1,0x50,0x06,0xAD,0xC4,0xAF,0xC1,0xA1,0xAD,0x75
+,0xC1,0x01,0x22,0x12,0x02,0xEC,0x02,0x00,0x06,0x7F,0x0A,0x7E,0x30,0xE1,0x3E,0x7F
+,0x2A,0x7E,0x30,0xA1,0x3A,0x90,0x1B,0x4B,0xE5,0xC1,0xF0,0xE4,0xF5,0xC1,0xE0,0x12
+,0x03,0x15,0x10,0x21,0x00,0x10,0x2B,0x01,0x10,0x40,0x02,0x10,0x52,0x03,0x10,0x5C
+,0x04,0x10,0x63,0x05,0x10,0x6A,0x07,0x10,0x7B,0x08,0x10,0x7F,0x09,0x00,0x00,0x10
+,0x88,0x75,0xC2,0x4B,0xE4,0xF5,0xC3,0x75,0xC4,0x01,0x22,0x12,0x0F,0xE9,0x11,0x8C
+,0x12,0x0F,0xEF,0x8F,0xC2,0x90,0x1B,0x49,0x11,0x98,0xF5,0xC3,0xED,0xF5,0xC4,0x22
+,0x75,0xC2,0x08,0xE4,0xFF,0x12,0x0D,0x22,0x8F,0xC3,0x7F,0x01,0x12,0x0D,0x22,0x8F
+,0xC4,0x22,0xE4,0xF5,0xC2,0x75,0xC3,0x2C,0x75,0xC4,0x38,0x22,0x75,0xC2,0x07,0xE4
+,0xF5,0xC3,0x22,0x75,0xC2,0x5D,0x75,0xC3,0xC0,0x22,0x7F,0x02,0x71,0x44,0x11,0x8C
+,0x90,0x1B,0x49,0x11,0x98,0xF5,0xC2,0xED,0xF5,0xC3,0x22,0x75,0xC2,0x0A,0x22,0x31
+,0x36,0x8F,0xC2,0x31,0x29,0x8F,0xC3,0x22,0x75,0xC1,0x01,0x22,0x90,0x1B,0x49,0xEE
+,0xF0,0xA3,0xEF,0xF0,0x22,0x90,0x1B,0x11,0xE0,0xFC,0xA3,0xE0,0xFD,0xEC,0x22,0xE4
+,0x90,0x1B,0x49,0xF0,0xA3,0xF0,0xA3,0xF0,0xA3,0xF0,0xAF,0xC1,0xEF,0x12,0x03,0x15
+,0x10,0xC9,0x00,0x10,0xD2,0x01,0x10,0xD7,0x05,0x10,0xDC,0x10,0x10,0xED,0x11,0x10
+,0xF6,0x15,0x11,0x00,0x20,0x00,0x00,0x11,0x08,0x11,0x95,0x31,0x21,0x90,0x1B,0x13
+,0x80,0x14,0x90,0x1B,0x14,0x80,0x19,0x90,0x1B,0x16,0x80,0x1D,0x90,0x1B,0x17,0x11
+,0x98,0x31,0x21,0x90,0x1B,0x19,0xE0,0x90,0x1B,0x4C,0xF0,0x80,0x21,0x90,0x1B,0x1A
+,0x11,0x98,0x31,0x21,0x80,0x18,0x90,0x1B,0x1C,0xE0,0x90,0x1B,0x4A,0xF0,0x80,0x0E
+,0x90,0x1B,0x49,0x74,0x04,0xF0,0x80,0x06,0x90,0x1B,0x49,0x74,0x01,0xF0,0x90,0x1B
+,0x49,0xE0,0xF5,0xC1,0xA3,0xE0,0xF5,0xC2,0xA3,0xE0,0xF5,0xC3,0xA3,0xE0,0xF5,0xC4
+,0x22,0x90,0x1B,0x4A,0xF0,0xED,0xA3,0xF0,0x22,0x7F,0x01,0x8F,0xFA,0x75,0xF8,0x22
+,0x12,0x08,0x90,0xAF,0xFB,0x22,0xE4,0xFF,0x31,0x2B,0x7E,0x00,0x22,0xE4,0xF5,0x2F
+,0xAF,0xC1,0xEF,0x14,0x60,0x11,0x14,0x60,0x18,0x14,0x60,0x15,0x14,0x60,0x17,0x24
+,0x04,0x70,0x1F,0x31,0x8B,0x80,0x1E,0x90,0x1B,0x64,0x11,0x98,0xFF,0x8F,0xC3,0x80
+,0x0C,0x75,0x2F,0x04,0x80,0x0F,0x90,0x1B,0x66,0x11,0x98,0xF5,0xC3,0xED,0xF5,0xC4
+,0x80,0x03,0x75,0x2F,0x01,0x85,0x2F,0xC1,0x22,0xE0,0x54,0x1F,0xFC,0xA3,0xE0,0xFD
+,0x7F,0x50,0x51,0x62,0x7F,0xE8,0x7E,0x03,0x12,0x04,0x31,0x71,0x3C,0xC3,0x33,0xCE
+,0x33,0xCE,0xD8,0xF9,0xFF,0x7C,0x00,0x7D,0x08,0x12,0x00,0xC5,0x90,0x1B,0x64,0xEE
+,0xF0,0xA3,0xEF,0xF0,0x71,0xAA,0x90,0x1B,0x66,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0x90
+,0x1B,0x2B,0xE0,0xFE,0xA3,0xE0,0x8E,0x2F,0xF5,0x30,0x8E,0x31,0xF5,0x32,0x90,0x1B
+,0x29,0x12,0x0F,0xC5,0xAF,0xC1,0xEF,0x24,0xFD,0x60,0x0B,0x04,0x70,0x10,0x74,0x36
+,0x51,0x58,0x74,0x38,0x80,0x0E,0x74,0x3A,0x51,0x58,0x74,0x3C,0x80,0x06,0x74,0x32
+,0x51,0x58,0x74,0x34,0x25,0x32,0xF5,0x32,0xE4,0x35,0x31,0xF5,0x31,0xE5,0xC1,0x60
+,0x3A,0x85,0x30,0x82,0x85,0x2F,0x83,0xE4,0x93,0xFE,0x05,0x30,0xE5,0x30,0x70,0x02
+,0x05,0x2F,0xF5,0x82,0x85,0x2F,0x83,0xE4,0x93,0x90,0x1B,0x31,0x51,0x70,0x85,0x32
+,0x82,0x85,0x31,0x83,0xE4,0x93,0xFE,0x05,0x32,0xE5,0x32,0x70,0x02,0x05,0x31,0xF5
+,0x82,0x85,0x31,0x83,0xE4,0x93,0x90,0x1B,0x33,0x51,0x70,0x90,0x1B,0x31,0x31,0x79
+,0x90,0x1B,0x33,0x31,0x79,0x90,0x1B,0x31,0xE0,0xF5,0x3E,0xA3,0xE0,0xF5,0x3F,0xA3
+,0xE0,0xF5,0x40,0xA3,0xE0,0xF5,0x41,0xE4,0xF5,0x3A,0xF5,0x3B,0x75,0xC1,0x01,0x75
+,0xC2,0x44,0x51,0x79,0xE4,0xF5,0xC1,0x22,0x25,0x30,0xF5,0x30,0xE4,0x35,0x2F,0xF5
+,0x2F,0x22,0x8F,0xFA,0xED,0xF5,0xFB,0xEC,0xF5,0xFC,0x75,0xF8,0x04,0x02,0x08,0x90
+,0xFD,0xED,0xFF,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0xE4,0xF5,0x33,0xAF,0xC1,0xEF,0xAD
+,0xC2,0xF5,0x3A,0xED,0xF5,0x3B,0xD3,0x94,0x00,0xE5,0x3A,0x94,0x02,0x40,0x09,0x75
+,0x33,0x01,0x75,0x3A,0x02,0x75,0x3B,0x00,0xC3,0xE5,0x3A,0x94,0x00,0x50,0x08,0x75
+,0x33,0x01,0xE4,0xF5,0x3A,0xF5,0x3B,0xAF,0x3B,0xAE,0x3A,0x71,0x6A,0x8E,0x3C,0x8F
+,0x3D,0x53,0xCB,0xF7,0xE5,0x3C,0x54,0x1F,0xFC,0xAD,0x3D,0x7F,0x50,0x51,0x62,0x75
+,0x34,0x00,0x75,0x35,0xFA,0x7F,0x05,0x7E,0x00,0x12,0x04,0x31,0x85,0x3C,0x36,0x85
+,0x3D,0x37,0x71,0x3C,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0x78,0x03,0xCE,0xA2,0xE7
+,0x13,0xCE,0x13,0xD8,0xF8,0xFF,0xC3,0xE5,0x37,0x9F,0xF5,0x37,0xE5,0x36,0x9E,0xF5
+,0x36,0xC3,0x64,0x80,0x94,0x80,0x50,0x0F,0xAE,0x36,0xAF,0x37,0x7C,0xFF,0x7D,0xFF
+,0x12,0x00,0x5E,0x8E,0x36,0x8F,0x37,0xE5,0x35,0x15,0x35,0x70,0x02,0x15,0x34,0xC3
+,0xE5,0x37,0x94,0x14,0xE5,0x36,0x64,0x80,0x94,0x80,0x40,0x06,0xE5,0x35,0x45,0x34
+,0x70,0xA3,0xE5,0x35,0x45,0x34,0x70,0x03,0x75,0x33,0x04,0x71,0x55,0xE4,0xF5,0xC1
+,0x85,0x33,0xC2,0xE5,0x34,0xF5,0xC3,0xE5,0x35,0xF5,0xC4,0x22,0x7F,0x5A,0x71,0x44
+,0xEF,0x78,0x03,0x22,0x8F,0xFA,0x75,0xF8,0x06,0x12,0x08,0x90,0xAF,0xFC,0xEF,0xFE
+,0xAD,0xFB,0xED,0xFF,0x22,0xE5,0x3C,0x54,0x1F,0xFE,0xE4,0x25,0x3D,0xFD,0xEE,0x34
+,0x20,0xFC,0x7F,0x50,0x51,0x62,0x43,0xCB,0x08,0x22,0x8E,0x38,0x8F,0x39,0x71,0xFB
+,0x78,0x02,0x71,0xF2,0x12,0x0F,0x35,0xAE,0x38,0xAF,0x39,0xE4,0xFC,0xFD,0x12,0x01
+,0x74,0x78,0x09,0x12,0x02,0xA2,0xAD,0x07,0xAC,0x06,0xE5,0x41,0xAE,0x40,0x78,0x02
+,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0xC3,0x9D,0xFF,0xEE,0x9C,0xFE,0xEF,0x78,0x02
+,0xCE,0xA2,0xE7,0x13,0xCE,0x13,0xD8,0xF8,0xFF,0x22,0xD3,0xEF,0x95,0x41,0xE5,0x40
+,0x64,0x80,0xF8,0xEE,0x64,0x80,0x98,0x40,0x04,0xE4,0xFE,0xFF,0x22,0xC3,0xEF,0x95
+,0x3F,0xE5,0x3E,0x64,0x80,0xF8,0xEE,0x64,0x80,0x98,0x50,0x05,0x7E,0x02,0x7F,0x00
+,0x22,0xC3,0xE5,0x41,0x9F,0xFF,0xE5,0x40,0x9E,0x78,0x09,0x71,0xF2,0xC0,0x06,0xC0
+,0x07,0x71,0xFB,0xAB,0x07,0xFA,0x33,0x95,0xE0,0xF9,0xF8,0xD0,0x07,0xD0,0x06,0x02
+,0x01,0xFF,0xFE,0x33,0x95,0xE0,0xFD,0xFC,0x02,0x02,0xB5,0xC3,0xE5,0x41,0x95,0x3F
+,0xFF,0xE5,0x40,0x95,0x3E,0x22,0x91,0x55,0x4E,0x70,0x05,0x75,0xC1,0x01,0x80,0x05
+,0x90,0x1B,0x45,0x91,0x21,0x11,0x95,0xF5,0xC2,0xED,0xF5,0xC3,0xA3,0xE0,0xF5,0xC4
+,0x22,0xEE,0xF0,0xA3,0xEF,0xF0,0x12,0x0E,0x2F,0xE4,0xF5,0xC1,0x22,0x91,0x55,0xD3
+,0x94,0x80,0xEE,0x94,0x0C,0x50,0x09,0xC3,0xEF,0x94,0x32,0xEE,0x94,0x00,0x50,0x05
+,0x75,0xC1,0x01,0x80,0x05,0x90,0x1B,0x47,0x91,0x21,0x90,0x1B,0x14,0x11,0x98,0xF5
+,0xC2,0xED,0xF5,0xC3,0x22,0xAF,0xC1,0xEF,0xFF,0xAD,0xC2,0xED,0x90,0x1B,0x49,0xCF
+,0xF0,0xA3,0xEF,0xF0,0x90,0x1B,0x49,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0x91,0x55,0xC3
+,0x94,0xFF,0xEE,0x94,0x5F,0x50,0x0A,0xAD,0xC4,0x12,0x09,0x43,0xE4,0xF5,0xC1,0x80
+,0x03,0x75,0xC1,0x01,0xE4,0xF5,0xC2,0x22,0x90,0x1B,0x49,0xE5,0xC2,0xF0,0xE4,0xA3
+,0xF0,0x90,0x1B,0x49,0xE0,0xFF,0xC3,0x94,0xFF,0x50,0x0C,0x31,0x2B,0x90,0x1B,0x4A
+,0xEF,0xF0,0xE4,0xF5,0xC1,0x80,0x03,0x75,0xC1,0x01,0xE4,0xF5,0xC2,0xF5,0xC3,0x90
+,0x1B,0x4A,0xE0,0xF5,0xC4,0x22,0xAF,0xC1,0xEF,0xFF,0xAD,0xC2,0xED,0x90,0x1B,0x49
+,0xCF,0xF0,0xA3,0xEF,0xF0,0xE4,0xA3,0x91,0x63,0xC3,0x94,0xFF,0xEE,0x94,0x5F,0x50
+,0x0D,0x12,0x0D,0x3A,0x90,0x1B,0x4B,0xEF,0xF0,0xE4,0xF5,0xC1,0x80,0x03,0x75,0xC1
+,0x01,0xE4,0xF5,0xC2,0xF5,0xC3,0x90,0x1B,0x4B,0xE0,0xF5,0xC4,0x22,0xE4,0xFF,0xE5
+,0xC1,0xC3,0x94,0xC1,0x50,0x0A,0xAF,0xC1,0x12,0x0D,0x22,0xE4,0xF5,0xC1,0x80,0x03
+,0x75,0xC1,0x01,0x8F,0xC4,0xE4,0xF5,0xC3,0xF5,0xC2,0x22,0x8F,0x2F,0xAB,0x2F,0xAD
+,0xC3,0xAF,0xC2,0xB1,0x27,0x8F,0xC3,0x05,0xC2,0xAB,0x2F,0xAD,0xC4,0xAF,0xC2,0xB1
+,0x27,0x8F,0xC4,0xE4,0xF5,0xC1,0x22,0x8D,0x37,0xAE,0x03,0xEF,0x7C,0x00,0x7B,0x01
+,0x24,0x23,0xF9,0xEC,0x34,0x1B,0xFA,0x90,0x1B,0x50,0x12,0x03,0x0C,0xEE,0x60,0x11
+,0xEF,0xC3,0x94,0x26,0x50,0x0B,0x90,0x1B,0x50,0x12,0x02,0xEC,0xE5,0x37,0x12,0x00
+,0x4C,0x90,0x1B,0x50,0x12,0x0F,0xE3,0xFF,0x22,0xE4,0xFF,0x80,0xAE,0x7F,0x01,0x80
+,0xAA,0xAD,0x3B,0xAC,0x3A,0xE4,0xF5,0xC1,0x8C,0xC3,0xAF,0x05,0xEF,0xF5,0xC4,0x22
+,0xAF,0xC2,0x7E,0x00,0xEF,0x78,0x10,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0xFD,0xAC
+,0x06,0xAF,0xC1,0x7E,0x00,0xEF,0x78,0x18,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0xB1
+,0xAA,0xAF,0xC3,0xEF,0x4C,0xFE,0xED,0xFF,0xAD,0xC4,0xEF,0x4D,0xFF,0xE4,0xFC,0xFD
+,0x90,0x1B,0x60,0x12,0x02,0xE0,0xE4,0xF5,0xC1,0x22,0xFF,0xEE,0x4C,0xFC,0xEF,0x4D
+,0xFD,0x22,0x8E,0x2F,0x8F,0x30,0xC3,0xE5,0x30,0x94,0x64,0xE5,0x2F,0x94,0x00,0x50
+,0x0C,0xF1,0x20,0xC2,0x44,0xF1,0x17,0x7E,0x40,0x7D,0x06,0x80,0x6D,0xC3,0xE5,0x30
+,0x94,0xC8,0xE5,0x2F,0x94,0x00,0x50,0x0A,0xF1,0x20,0xF1,0x11,0x7E,0x80,0x7D,0x0C
+,0x80,0x58,0xC3,0xE5,0x30,0x94,0x90,0xE5,0x2F,0x94,0x01,0x50,0x0F,0xC2,0x40,0xD2
+,0x41,0xC2,0x42,0xC2,0x43,0xF1,0x11,0xFE,0x7D,0x19,0x80,0x3E,0xC3,0xE5,0x30,0x94
+,0x20,0xE5,0x2F,0x94,0x03,0x50,0x0D,0xD2,0x40,0xC2,0x41,0xC2,0x42,0xF1,0x0F,0xFE
+,0x7D,0x32,0x80,0x26,0xC3,0xE5,0x30,0x94,0x40,0xE5,0x2F,0x94,0x06,0x50,0x09,0xC2
+,0x40,0xF1,0x0B,0xFE,0x7D,0x64,0x80,0x12,0xC3,0xE5,0x30,0x94,0x80,0xE5,0x2F,0x94
+,0x0C,0x50,0x0F,0xD2,0x40,0xF1,0x0B,0xFE,0x7D,0xC8,0xFC,0x12,0x01,0xFF,0x8E,0x31
+,0x8F,0x32,0xC3,0xE4,0x95,0x32,0xF5,0x34,0x74,0x20,0x95,0x31,0xF5,0x33,0xA2,0x41
+,0xE4,0x33,0x24,0x01,0xFB,0xE4,0x33,0xFA,0xE4,0x33,0xF9,0xE4,0x33,0xF8,0xA2,0x40
+,0x12,0x0F,0x24,0xA2,0x42,0x12,0x0F,0x24,0xA2,0x43,0x12,0x0F,0x24,0xA2,0x44,0xE4
+,0x33,0x24,0x01,0xFF,0xE4,0x33,0xFE,0xE4,0x33,0xFD,0xE4,0x33,0xFC,0x12,0x01,0x74
+,0xC0,0x04,0x12,0x0F,0x37,0xE5,0x33,0xFF,0xC3,0x74,0x20,0x9F,0xFD,0xE4,0x94,0x00
+,0xFC,0x7E,0x06,0x7F,0x40,0x12,0x00,0x70,0xE4,0xFC,0xFD,0xD0,0x00,0x12,0x01,0x74
+,0x90,0x1B,0x14,0xEE,0xF0,0xA3,0xEF,0xF0,0xA2,0x41,0xE4,0xFE,0x33,0x54,0x01,0x78
+,0x07,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0xFD,0xAC,0x06,0xA2,0x40,0xE4,0x33,0x54
+,0x01,0x4C,0xFC,0xA2,0x42,0xE4,0xFE,0x33,0x54,0x01,0x78,0x06,0xC3,0x33,0xCE,0x33
+,0xCE,0xD8,0xF9,0xB1,0xAA,0xA2,0x43,0xE4,0xFE,0x33,0x54,0x01,0x78,0x05,0xC3,0x33
+,0xCE,0x33,0xCE,0xD8,0xF9,0xB1,0xAA,0xA2,0x44,0xE4,0x33,0x54,0x01,0xC4,0xF8,0x54
+,0x0F,0xC8,0x68,0xFF,0xE4,0xC4,0x54,0xF0,0x48,0x4C,0xFC,0xEF,0x4D,0xFD,0xE5,0x33
+,0x54,0x0F,0xFF,0xEC,0xF5,0x48,0xEF,0x4D,0xF5,0x49,0x22,0xD2,0x41,0xD2,0x42,0xD2
+,0x43,0xD2,0x44,0xAE,0x2F,0xAF,0x30,0xAB,0x07,0xAA,0x06,0xE4,0xF9,0xF8,0xFF,0x22
+,0xC2,0x40,0xC2,0x41,0xC2,0x42,0xC2,0x43,0x22,0xE4,0xF5,0xC1,0xF1,0x31,0x8F,0xC2
+,0x22,0x7F,0x20,0x12,0x0D,0x22,0xEF,0x54,0x03,0x64,0x03,0x7F,0x00,0x60,0x02,0x7F
+,0x01,0x22,0xAF,0xC1,0xEF,0x14,0x60,0x0C,0x04,0x70,0x16,0x74,0xFF,0x90,0x1B,0x3B
+,0xF0,0xA3,0x80,0x08,0x90,0x1B,0x3B,0xE4,0xF0,0xA3,0x74,0x0A,0xF0,0xE4,0xF5,0xC1
+,0x22,0x75,0xC1,0x01,0x22,0xAF,0xC1,0xEF,0xFE,0xAD,0xC2,0xED,0xFF,0xE4,0xF5,0xC1
+,0x8F,0x82,0x8E,0x83,0x93,0xFC,0x74,0x01,0x93,0xF5,0xC3,0xEC,0xF5,0xC4,0x22,0xE4
+,0xF5,0x8F,0xF1,0x92,0x75,0xA8,0x81,0x53,0xC9,0xFE,0x75,0xC5,0xFF,0x12,0x04,0x6F
+,0x80,0xFB,0x75,0xC9,0xFF,0x75,0xCA,0xFF,0x75,0xC7,0xFF,0x75,0xC8,0xFF,0x22,0xE5
+,0xC1,0xB4,0x80,0x05,0x12,0x09,0x2E,0x80,0x3E,0xE4,0xFD,0xAF,0xC1,0x12,0x18,0x35
+,0x12,0x19,0xB7,0x70,0x06,0xE9,0xF4,0x70,0x02,0xEA,0xF4,0x70,0x04,0x75,0xC1,0x01
+,0x22,0x90,0x1B,0x0F,0xE0,0x60,0x03,0x12,0x0F,0x87,0xE4,0x90,0x1B,0x0F,0xF0,0xA3
+,0xE5,0xC1,0xF0,0xC2,0x48,0xC2,0x47,0x12,0x0A,0xDE,0x12,0x0F,0xAA,0x7F,0x03,0x7E
+,0x00,0x12,0x04,0x31,0x12,0x0E,0x2F,0xE4,0xF5,0xC1,0x22,0xAD,0xC2,0xAF,0xC3,0x12
+,0x18,0x35,0x12,0x19,0xB7,0x70,0x06,0xE9,0xF4,0x70,0x02,0xEA,0xF4,0x70,0x08,0x75
+,0xC1,0x01,0xF5,0xC2,0xF5,0xC3,0x22,0xE5,0xC1,0x90,0x1B,0x4A,0x70,0x05,0x75,0xF0
+,0x9D,0x80,0x04,0xE4,0x75,0xF0,0x9F,0x12,0x00,0xFB,0x90,0x1B,0x49,0xE4,0x75,0xF0
+,0x01,0x12,0x02,0xF5,0x12,0x00,0x06,0xF5,0xC2,0x90,0x1B,0x49,0x12,0x0F,0xE3,0xF5
+,0xC3,0xE4,0xF5,0xC1,0x22,0xAC,0x07,0x90,0x1B,0x2D,0x11,0x70,0x12,0x01,0x11,0xFF
+,0xAE,0xF0,0xF4,0x70,0x02,0xEE,0xF4,0x60,0x1D,0xED,0x70,0x04,0xEF,0x6C,0x60,0x16
+,0xBD,0x01,0x0A,0x12,0x01,0x11,0xE5,0xF0,0xB5,0x04,0x02,0x80,0x09,0x74,0x04,0x29
+,0xF9,0xE4,0x3A,0xFA,0x80,0xD6,0x90,0x00,0x02,0x12,0x01,0x3C,0xFF,0xAE,0xF0,0x22
+,0xE0,0xFE,0xA3,0xE0,0xAA,0x06,0xF9,0x7B,0xFF,0x22,0xEF,0x24,0x34,0x60,0x06,0x04
+,0x70,0x05,0x8D,0xCB,0x22,0x8D,0xCC,0x22,0x8F,0xFA,0x8D,0xFB,0x75,0xF8,0x20,0x02
+,0x08,0x90,0x90,0x1B,0x49,0xE5,0xC2,0xF0,0xE0,0xFF,0xC3,0x94,0xFF,0x50,0x09,0xAD
+,0xC4,0x11,0x88,0xE4,0xF5,0xC1,0x80,0x03,0x75,0xC1,0x01,0xE4,0xF5,0xC2,0x22,0xE4
+,0xFD,0xAF,0xC1,0xEF,0x14,0x60,0x25,0x14,0x60,0x39,0x24,0xFC,0x60,0x51,0x14,0x60
+,0x56,0x14,0x60,0x5B,0x24,0x08,0x70,0x63,0xE5,0xC2,0xD3,0x94,0x01,0x40,0x04,0x7D
+,0x01,0x80,0x5A,0xAF,0xC2,0x90,0x1B,0x00,0xEF,0xF0,0x80,0x51,0xAF,0xC3,0xAC,0xC2
+,0xEC,0x2F,0xFF,0xE4,0x33,0x4F,0x70,0x04,0x7D,0x01,0x80,0x41,0x31,0x30,0x90,0x1B
+,0x01,0x80,0x29,0x31,0x30,0xD3,0x94,0x80,0xEE,0x94,0x0C,0x50,0x09,0xC3,0xEF,0x94
+,0x32,0xEE,0x94,0x00,0x50,0x04,0x7D,0x01,0x80,0x23,0x90,0x1B,0x03,0x80,0x0D,0x90
+,0x1B,0x09,0xE5,0xC2,0xF0,0x80,0x16,0x31,0x30,0x90,0x1B,0x37,0xEE,0x80,0x06,0xAF
+,0xC2,0x90,0x1B,0x39,0xE4,0xF0,0xA3,0xEF,0xF0,0x80,0x02,0x7D,0x01,0x8D,0xC1,0x22
+,0xAF,0xC2,0xEF,0xFE,0xAC,0xC3,0xEC,0xFB,0xEB,0xFF,0x22,0x90,0x1B,0x00,0xE0,0x60
+,0x04,0xD2,0x47,0x80,0x02,0xC2,0x47,0x30,0x47,0x15,0x12,0x0F,0xBB,0x90,0x1B,0x02
+,0xE0,0x9F,0x90,0x1B,0x01,0xE0,0x9E,0x40,0x03,0xD2,0x48,0x22,0xC2,0x48,0x22,0x12
+,0x0F,0xBB,0x90,0x1B,0x46,0xE0,0x9F,0x90,0x1B,0x45,0xE0,0x9E,0x40,0x03,0xD2,0x48
+,0x22,0xC2,0x48,0x22,0xC0,0xE0,0xC0,0xF0,0xC0,0x83,0xC0,0x82,0xC0,0xD0,0x75,0xD0
+,0x00,0xC0,0x00,0xC0,0x01,0xC0,0x02,0xC0,0x03,0xC0,0x04,0xC0,0x05,0xC0,0x06,0xC0
+,0x07,0xE5,0xC7,0x30,0xE0,0x06,0x12,0x08,0x30,0x53,0xC7,0x01,0xD0,0x07,0xD0,0x06
+,0xD0,0x05,0xD0,0x04,0xD0,0x03,0xD0,0x02,0xD0,0x01,0xD0,0x00,0xD0,0xD0,0xD0,0x82
+,0xD0,0x83,0xD0,0xF0,0xD0,0xE0,0x32,0xAA,0x06,0xA9,0x07,0x7B,0xFF,0x90,0x1B,0x49
+,0x12,0x03,0x0C,0x90,0x1B,0x49,0x12,0x02,0xEC,0x74,0xFF,0xF5,0x83,0xF5,0x82,0x6B
+,0x22,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xEF,0xFF,0xFF
+,0x21,0x20,0x24,0x90,0x21,0xB2,0x21,0x95,0x21,0x94,0x60,0x00,0x64,0x21,0x95,0x26
+,0x5E,0x25,0x66,0x2A,0x22,0x21,0x2E,0x26,0x30,0x22,0x38,0x24,0x40,0x23,0x9E,0x22
+,0x9A,0x84,0x0E,0x84,0xEE,0x84,0xEE,0x22,0x52,0x21,0x20,0x21,0x58,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x40,0x03,0x00,0x01
+,0x03,0x02,0x01,0x00,0xFF,0x60,0x03,0x18,0x00,0xE1,0x0F,0xF8,0xF4,0xF8,0x28,0x24
+,0x0C,0x26,0x00,0x27,0x0F,0x00,0x0E,0x02,0x01,0xD0,0x07,0x64,0x00,0x94,0x11,0xE8
+,0x03,0x64,0x00,0xF4,0x01,0x02,0x11,0x00,0xE8,0x03,0xFC,0x18,0x03,0xE9,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x60,0x03,0x07,0x06,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF
+};
+
+struct ov14810_reg {
+ u16 addr;
+ u16 val;
+};
+
+struct ov14810_sensor {
+ struct i2c_client *i2c_client;
+ struct ov14810_platform_data *pdata;
+};
+
+struct ov14810_info {
+ int mode;
+ struct ov14810_sensor sensor;
+ struct ov14810_sensor uC;
+ struct ov14810_sensor slaveDev;
+};
+
+static struct ov14810_info *info;
+
+#define OV14810_TABLE_WAIT_MS 0
+#define OV14810_TABLE_END 1
+
+static struct ov14810_reg mode_4416x3312[] = {
+ {0x0103, 0x01},
+
+ {0x3003, 0x09},
+ {0x3004, 0x00},
+ {0x3005, 0xa7},
+ {0x3006, 0x80},
+ {0x3007, 0x08},
+ {0x3013, 0x1f},
+
+ {0x3018, 0x04},
+ {0x301b, 0xe0},
+ {0x301c, 0xf8},
+ {0x3020, 0x01},
+ {0x3106, 0x05},
+ {0x3600, 0x2d},
+ {0x3601, 0x1f},
+ {0x360a, 0x2e},
+ {0x360f, 0x24},
+ {0x3611, 0x6c},
+ {0x3613, 0x84},
+ {0x3705, 0xd1},
+ {0x3707, 0x73},
+ {0x3708, 0x01},
+ {0x370e, 0x04},
+ {0x3710, 0x40},
+ {0x3711, 0x1c},
+ {0x3717, 0x80},
+ {0x3718, 0x11},
+ {0x3719, 0x11},
+ {0x371b, 0xa0},
+ {0x371e, 0x2c},
+ {0x3723, 0x30},
+ {0x3726, 0x70},
+ {0x3808, 0x00},
+ {0x380a, 0x00},
+ {0x3817, 0x24},
+ {0x3819, 0x80},
+ {0x3a00, 0x78},
+ {0x3a13, 0x46},
+ {0x3a18, 0x00},
+ {0x3a19, 0x7f},
+ {0x3a1a, 0x06},
+ {0x3a25, 0x83},
+ {0x3b09, 0x0a},
+ {0x4002, 0xc5},
+ {0x4004, 0x02},
+ {0x4005, 0x10},
+ {0x4009, 0x40},
+ {0x404f, 0xff},
+ {0x4709, 0x00},
+ {0x4801, 0x0f},
+ {0x4806, 0x80},
+ {0x4842, 0x01},
+ {0x5000, 0x00},
+ {0x5001, 0x00},
+ {0x5002, 0x00},
+ {0x503b, 0x01},
+ {0x503c, 0x10},
+ {0x5041, 0x0e},
+ {0x5780, 0xfc},
+ {0x5b01, 0x03},
+ {0x5b03, 0x00},
+
+ {0x3003, 0x0a},
+ {0x3005, 0xa7},
+
+ {0x3006, 0x80},
+ {0x3007, 0x08},
+ {0x3013, 0x1f},
+
+ {0x3602, 0x42},
+ {0x3604, 0x80},
+ {0x3605, 0x11},
+ {0x360c, 0x42},
+ {0x360d, 0x13},
+ {0x3614, 0x05},
+
+ {0x3702, 0x10},
+ {0x3704, 0x14},
+ {0x3707, 0x73},
+ {0x370a, 0x80},
+ {0x370b, 0x00},
+ {0x370c, 0x04},
+ {0x370d, 0x0d},
+ {0x370f, 0x61},
+ {0x3713, 0xfa},
+ {0x3714, 0x2f},
+ {0x3715, 0x2c},
+ {0x3716, 0x0b},
+ {0x371c, 0x28},
+ {0x371d, 0x20},
+ {0x3721, 0x08},
+ {0x3724, 0x18},
+ {0x3725, 0x17},
+ {0x3727, 0x65},
+ {0x3728, 0x0c},
+
+ {0x3803, 0x0b},
+ {0x3804, 0x11},
+ {0x3805, 0x40},
+ {0x3806, 0x0c},
+ {0x3807, 0xf9},
+ {0x380c, 0x09},
+ {0x380d, 0x5c},
+ {0x380e, 0x0d},
+ {0x380f, 0x08},
+ {0x3810, 0x44},
+ {0x3811, 0x96},
+ {0x3818, 0x40},
+ {0x381c, 0x30},
+ {0x381d, 0x10},
+ {0x381e, 0x0c},
+ {0x381f, 0xf8},
+ {0x3820, 0x00},
+ {0x3821, 0x0c},
+ {0x3503, 0x13},
+
+ {0x4050, 0xc0},
+ {0x4051, 0x00},
+ {0x4053, 0xa1},
+ {0x4837, 0x1b},
+ {0x503d, 0x00},
+ {0x5042, 0x21},
+ {0x5047, 0x00},
+
+ {0x3a08, 0x1f},
+ {0x3a09, 0x40},
+ {0x3a0a, 0x1a},
+ {0x3a0b, 0x00},
+ {0x3a0d, 0x08},
+ {0x3a0e, 0x06},
+
+ {0x503d, 0x00},
+
+ {0x0100, 0x01},
+ {OV14810_TABLE_END, 0x0000}
+};
+
+enum {
+ OV14810_MODE_4416x3312
+};
+
+static struct ov14810_reg *mode_table[] = {
+ [OV14810_MODE_4416x3312] = mode_4416x3312,
+};
+
+static inline void ov14810_get_frame_length_regs(struct ov14810_reg *regs,
+ u32 frame_length)
+{
+ regs->addr = OV14810_FRAME_LENGTH_REG_ADDR0;
+ regs->val = (frame_length >> 8) & 0xff;
+ (regs + 1)->addr = OV14810_FRAME_LENGTH_REG_ADDR1;
+ (regs + 1)->val = (frame_length) & 0xff;
+}
+
+static inline void ov14810_get_coarse_time_regs(struct ov14810_reg *regs,
+ u32 coarse_time)
+{
+ regs->addr = OV14810_COARSE_TIME_REG_ADDR0;
+ regs->val = (coarse_time >> 12) & 0xff;
+ (regs + 1)->addr = OV14810_COARSE_TIME_REG_ADDR1;
+ (regs + 1)->val = (coarse_time >> 4) & 0xff;
+ (regs + 2)->addr = OV14810_COARSE_TIME_REG_ADDR2;
+ (regs + 2)->val = (coarse_time & 0xf) << 4;
+}
+
+static inline void ov14810_get_gain_reg(struct ov14810_reg *regs, u16 gain)
+{
+ regs->addr = OV14810_GAIN_REG_ADDR0;
+ regs->val = gain;
+}
+
+static int ov14810_write16(struct i2c_client *client, u16 addr, u8 val)
+{
+ int err;
+ struct i2c_msg msg;
+ unsigned char data[3];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) (addr >> 8);
+ data[1] = (u8) (addr & 0xff);
+ data[2] = (u8) (val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = data;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err != 1) {
+ pr_err("ov14810: i2c transfer failed %x %x\n", addr, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ov14810_write8(struct i2c_client *client, u8 addr, u8 val)
+{
+ int err;
+ struct i2c_msg msg;
+ unsigned char data[2];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) (addr);
+ data[1] = (u8) (val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = data;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err != 1) {
+ pr_err("ov14810: i2c transfer failed %x %x\n",addr, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ov14810_write_reg_helper(struct ov14810_info *info,
+ u16 addr, u8 val)
+{
+ return ov14810_write16(info->sensor.i2c_client, addr, val);
+}
+
+static int ov14810_write_table(struct ov14810_info *info,
+ const struct ov14810_reg table[],
+ const struct ov14810_reg override_list[],
+ int num_override_regs)
+{
+ int err;
+ const struct ov14810_reg *next;
+ int i;
+ u16 val;
+
+ for (next = table; next->addr != OV14810_TABLE_END; next++) {
+ val = next->val;
+
+ /* When an override list is passed in, replace the reg */
+ /* value to write if the reg is in the list */
+ if (override_list) {
+ for (i = 0; i < num_override_regs; i++) {
+ if (next->addr == override_list[i].addr) {
+ val = override_list[i].val;
+ break;
+ }
+ }
+ }
+ err = ov14810_write_reg_helper(info, next->addr, val);
+ }
+ return err;
+}
+
+static int ov14810_set_mode(struct ov14810_info *info, struct ov14810_mode *mode)
+{
+ int sensor_mode;
+ int err;
+ struct ov14810_reg reg_list[6];
+
+ pr_info("%s: xres %u yres %u framelength %u coarsetime %u gain %u\n",
+ __func__, mode->xres, mode->yres, mode->frame_length,
+ mode->coarse_time, mode->gain);
+ sensor_mode = OV14810_MODE_4416x3312;
+
+ /* get a list of override regs for the asking frame length, */
+ /* coarse integration time, and gain. */
+ ov14810_get_frame_length_regs(reg_list, mode->frame_length);
+ ov14810_get_coarse_time_regs(reg_list + 2, mode->coarse_time);
+ ov14810_get_gain_reg(reg_list + 5, mode->gain);
+
+ err = ov14810_write_table(info, mode_table[sensor_mode],
+ reg_list, 6);
+
+ if (err)
+ return err;
+
+ info->mode = sensor_mode;
+ return 0;
+}
+
+static int ov14810_set_frame_length(struct ov14810_info *info, u32 frame_length)
+{
+ struct ov14810_reg reg_list[2];
+ int i;
+ int ret;
+
+ ov14810_get_frame_length_regs(reg_list, frame_length);
+
+ for (i = 0; i < 2; i++) {
+ ret = ov14810_write_reg_helper(info, reg_list[i].addr,
+ reg_list[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov14810_set_coarse_time(struct ov14810_info *info, u32 coarse_time)
+{
+ int ret;
+
+ struct ov14810_reg reg_list[3];
+ int i;
+
+ ov14810_get_coarse_time_regs(reg_list, coarse_time);
+
+ ret = ov14810_write_reg_helper(info, OV14810_GROUP_ACCESS_REG_ADDR, 0x01);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = ov14810_write_reg_helper(info, reg_list[i].addr,
+ reg_list[i].val);
+ if (ret)
+ return ret;
+ }
+
+ ret = ov14810_write_reg_helper(info, OV14810_GROUP_ACCESS_REG_ADDR, 0x11);
+ if (ret)
+ return ret;
+
+ ret = ov14810_write_reg_helper(info, OV14810_GROUP_ACCESS_REG_ADDR, 0xa1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ov14810_set_gain(struct ov14810_info *info, u16 gain)
+{
+ int ret;
+ struct ov14810_reg reg_list;
+
+ ov14810_get_gain_reg(&reg_list, gain);
+
+ ret = ov14810_write_reg_helper(info, reg_list.addr, reg_list.val);
+
+ return ret;
+}
+
+static int ov14810_set_power(int powerLevel)
+{
+ pr_info("%s: powerLevel=%d \n", __func__, powerLevel);
+
+ if (info->sensor.pdata) {
+ if (powerLevel && info->sensor.pdata->power_on) {
+ info->sensor.pdata->power_on();
+ msleep(1000);
+ }
+ else if (info->sensor.pdata->power_off) {
+ info->sensor.pdata->power_off();
+ }
+ }
+
+ return 0;
+}
+
+static long ov14810_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ov14810_info *info = file->private_data;
+ int err;
+
+ switch (cmd) {
+ case OV14810_IOCTL_SET_MODE:
+ {
+ struct ov14810_mode mode;
+
+ err = copy_from_user(&mode,(const void __user *)arg,
+ sizeof(struct ov14810_mode));
+ if (err) {
+ pr_err("%s %d\n", __func__, __LINE__);
+ return err;
+ }
+
+ return ov14810_set_mode(info, &mode);
+ }
+ case OV14810_IOCTL_SET_FRAME_LENGTH:
+ return ov14810_set_frame_length(info, (u32)arg);
+ case OV14810_IOCTL_SET_COARSE_TIME:
+ return ov14810_set_coarse_time(info, (u32)arg);
+ case OV14810_IOCTL_SET_GAIN:
+ return ov14810_set_gain(info, (u16)arg);
+ case OV14810_IOCTL_GET_STATUS:
+ {
+ u16 status = 0;
+ err = copy_to_user((void __user *)arg, &status,2);
+ if (err) {
+ pr_err("%s %d\n", __func__, __LINE__);
+ return err;
+ }
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ov14810_slavedev_open(void)
+{
+ pr_info("%s\n", __func__);
+
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0x19, 0x67);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0x18, 0x02);
+
+ return 0;
+}
+
+static int ov14810_slavedev_reset(void)
+{
+ pr_info("%s\n", __func__);
+
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0x18, 0x03);
+ msleep(1000);
+
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc1, 0x0);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc2, 0x0);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc3, 0x0);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc4, 0x0);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc5, 0x0);
+
+ msleep(1000);
+
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc1, 0x0);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc2, 0x0);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc3, 0x0);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc4, 0x0);
+ OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc5, 0x17);
+
+ msleep(1000);
+
+ return 0;
+}
+
+static int ov14810uC_open(void)
+{
+ int i;
+ int err;
+
+ pr_info("ov14810uC programmming started \n");
+
+ for (i=0; i < 8192; i++) {
+ ov14810_write16(info->uC.i2c_client, ( ( (i & 0xff) << 8) | ( (i & 0xff00) >> 8) ), uCProgram[i]);
+ }
+ pr_info("ov14810uC programmming finished \n");
+
+ err = ov14810_slavedev_reset();
+
+ return err;
+}
+
+static int ov14810_open(struct inode *inode, struct file *file)
+{
+ int err;
+ pr_info("%s\n", __func__);
+ file->private_data = info;
+
+ err = ov14810_set_power(1);
+
+ if (err)
+ return err;
+
+ err = ov14810_slavedev_open();
+
+ if (err)
+ return err;
+
+ err = ov14810uC_open();
+
+ return err;
+}
+
+int ov14810_release(struct inode *inode, struct file *file)
+{
+ pr_info("%s\n", __func__);
+ ov14810_set_power(0);
+ file->private_data = NULL;
+ return 0;
+}
+
+static const struct file_operations ov14810_fileops = {
+ .owner = THIS_MODULE,
+ .open = ov14810_open,
+ .unlocked_ioctl = ov14810_ioctl,
+ .release = ov14810_release,
+};
+
+static struct miscdevice ov14810_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ov14810",
+ .fops = &ov14810_fileops,
+};
+
+static int ov14810_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+ pr_info("%s: probing sensor.\n", __func__);
+
+ if (!info) {
+ info = kzalloc(sizeof(struct ov14810_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("ov14810: Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+ }
+
+ err = misc_register(&ov14810_device);
+ if (err) {
+ pr_err("ov14810: Unable to register misc device!\n");
+ kfree(info);
+ return err;
+ }
+
+ info->sensor.pdata = client->dev.platform_data;
+ info->sensor.i2c_client = client;
+
+ return 0;
+}
+
+static int ov14810_remove(struct i2c_client *client)
+{
+ misc_deregister(&ov14810_device);
+ kfree(info);
+ return 0;
+}
+
+static int ov14810_uC_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ if (!info) {
+ info = kzalloc(sizeof(struct ov14810_sensor), GFP_KERNEL);
+ if (!info) {
+ pr_err("ov14810uC: Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+ }
+ info->uC.pdata = client->dev.platform_data;
+ info->uC.i2c_client = client;
+
+ return 0;
+}
+
+static int ov14810_uC_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int ov14810_slavedev_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ pr_info("%s: probing slave Dev of sensor.\n", __func__);
+
+ if (!info) {
+ info = kzalloc(sizeof(struct ov14810_sensor), GFP_KERNEL);
+ if (!info) {
+ pr_err("ov14810uC: Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+ }
+
+ info->slaveDev.pdata = client->dev.platform_data;
+ info->slaveDev.i2c_client = client;
+
+ return 0;
+}
+
+static int ov14810_slavedev_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static const struct i2c_device_id ov14810_id[] = {
+ { "ov14810", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, ov14810_id);
+
+static struct i2c_driver ov14810_i2c_driver = {
+ .driver = {
+ .name = "ov14810",
+ .owner = THIS_MODULE,
+ },
+ .probe = ov14810_probe,
+ .remove = ov14810_remove,
+ .id_table = ov14810_id,
+};
+
+
+static const struct i2c_device_id ov14810_uC_id[] = {
+ { "ov14810uC", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, ov14810_uC_id);
+
+static struct i2c_driver ov14810_uC_i2c_driver = {
+ .driver = {
+ .name = "ov14810uC",
+ .owner = THIS_MODULE,
+ },
+ .probe = ov14810_uC_probe,
+ .remove = ov14810_uC_remove,
+ .id_table = ov14810_uC_id,
+};
+
+static const struct i2c_device_id ov14810_slavedev_id[] = {
+ { "ov14810SlaveDev", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, ov14810_slavedev_id);
+
+static struct i2c_driver ov14810_slavedev_i2c_driver = {
+ .driver = {
+ .name = "ov14810SlaveDev",
+ .owner = THIS_MODULE,
+ },
+ .probe = ov14810_slavedev_probe,
+ .remove = ov14810_slavedev_remove,
+ .id_table = ov14810_slavedev_id,
+};
+
+static int __init ov14810_init(void)
+{
+ int ret;
+ pr_info("ov14810 sensor driver loading\n");
+ ret = i2c_add_driver(&ov14810_i2c_driver);
+ if (ret)
+ return ret;
+
+ ret = i2c_add_driver(&ov14810_uC_i2c_driver);
+ if (ret)
+ return ret;
+
+ return i2c_add_driver(&ov14810_slavedev_i2c_driver);
+}
+
+static void __exit ov14810_exit(void)
+{
+ i2c_del_driver(&ov14810_slavedev_i2c_driver);
+ i2c_del_driver(&ov14810_uC_i2c_driver);
+ i2c_del_driver(&ov14810_i2c_driver);
+}
+
+module_init(ov14810_init);
+module_exit(ov14810_exit);
+
diff --git a/drivers/media/video/tegra/ov2710.c b/drivers/media/video/tegra/ov2710.c
new file mode 100644
index 000000000000..11405746056f
--- /dev/null
+++ b/drivers/media/video/tegra/ov2710.c
@@ -0,0 +1,682 @@
+/*
+ * ov2710.c - ov2710 sensor driver
+ *
+ * Copyright (c) 2011, NVIDIA, All Rights Reserved.
+ *
+ * Contributors:
+ * erik lilliebjerg <elilliebjerg@nvidia.com>
+ *
+ * Leverage OV5650.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/ov2710.h>
+
+struct ov2710_reg {
+ u16 addr;
+ u16 val;
+};
+
+struct ov2710_info {
+ int mode;
+ struct i2c_client *i2c_client;
+ struct ov2710_platform_data *pdata;
+};
+
+#define OV2710_TABLE_WAIT_MS 0
+#define OV2710_TABLE_END 1
+#define OV2710_MAX_RETRIES 3
+
+static struct ov2710_reg mode_1920x1080[] = {
+ {0x3103, 0x93},
+ {0x3008, 0x82},
+ {0x3017, 0x7f},
+ {0x3018, 0xfc},
+ {0x3706, 0x61},
+ {0x3712, 0x0c},
+ {0x3630, 0x6d},
+ {0x3801, 0xb4},
+
+ {0x3621, 0x04},
+ {0x3604, 0x60},
+ {0x3603, 0xa7},
+ {0x3631, 0x26},
+ {0x3600, 0x04},
+ {0x3620, 0x37},
+ {0x3623, 0x00},
+ {0x3702, 0x9e},
+ {0x3703, 0x5c},
+ {0x3704, 0x40},
+ {0x370d, 0x0f},
+ {0x3713, 0x9f},
+ {0x3714, 0x4c},
+ {0x3710, 0x9e},
+ {0x3801, 0xc4},
+ {0x3605, 0x05},
+ {0x3606, 0x3f},
+ {0x302d, 0x90},
+ {0x370b, 0x40},
+ {0x3716, 0x31},
+ {0x3707, 0x52},
+ {0x380d, 0x74},
+ {0x5181, 0x20},
+ {0x518f, 0x00},
+ {0x4301, 0xff},
+ {0x4303, 0x00},
+ {0x3a00, 0x78},
+ {0x300f, 0x88},
+ {0x3011, 0x28},
+ {0x3a1a, 0x06},
+ {0x3a18, 0x00},
+ {0x3a19, 0x7a},
+ {0x3a13, 0x54},
+ {0x382e, 0x0f},
+ {0x381a, 0x1a},
+ {0x401d, 0x02},
+
+ {0x381c, 0x00},
+ {0x381d, 0x02},
+ {0x381e, 0x04},
+ {0x381f, 0x38},
+ {0x3820, 0x00},
+ {0x3821, 0x98},
+ {0x3800, 0x01},
+ {0x3802, 0x00},
+ {0x3803, 0x0a},
+ {0x3804, 0x07},
+ {0x3805, 0x90},
+ {0x3806, 0x04},
+ {0x3807, 0x40},
+ {0x3808, 0x07},
+ {0x3809, 0x90},
+ {0x380a, 0x04},
+ {0x380b, 0x40},
+ {0x380e, 0x04},
+ {0x380f, 0x50},
+ {0x380c, 0x09},
+ {0x380d, 0x74},
+ {0x3810, 0x08},
+ {0x3811, 0x02},
+
+ {0x5688, 0x03},
+ {0x5684, 0x07},
+ {0x5685, 0xa0},
+ {0x5686, 0x04},
+ {0x5687, 0x43},
+ {0x3011, 0x0a},
+ {0x300f, 0x8a},
+ {0x3017, 0x00},
+ {0x3018, 0x00},
+ {0x4800, 0x24},
+ {0x300e, 0x04},
+ {0x4801, 0x0f},
+
+ {0x300f, 0xc3},
+ {0x3010, 0x00},
+ {0x3011, 0x0a},
+ {0x3012, 0x01},
+
+ {0x3a0f, 0x40},
+ {0x3a10, 0x38},
+ {0x3a1b, 0x48},
+ {0x3a1e, 0x30},
+ {0x3a11, 0x90},
+ {0x3a1f, 0x10},
+
+ {0x3a0e, 0x03},
+ {0x3a0d, 0x04},
+ {0x3a08, 0x14},
+ {0x3a09, 0xc0},
+ {0x3a0a, 0x11},
+ {0x3a0b, 0x40},
+
+ {0x300f, 0xc3},
+ {0x3010, 0x00},
+ {0x3011, 0x0e},
+ {0x3012, 0x02},
+ {0x380c, 0x09},
+ {0x380d, 0xec},
+ {0x3703, 0x61},
+ {0x3704, 0x44},
+ {0x3801, 0xd2},
+
+ {0x3503, 0x17},
+ {0x3500, 0x00},
+ {0x3501, 0x00},
+ {0x3502, 0x00},
+ {0x350a, 0x00},
+ {0x350b, 0x00},
+ {0x5001, 0x4e},
+ {0x5000, 0x5f},
+
+ {OV2710_TABLE_END, 0x0000}
+};
+
+static struct ov2710_reg mode_1280x720[] = {
+ {0x3103, 0x93},
+ {0x3008, 0x82},
+ {0x3017, 0x7f},
+ {0x3018, 0xfc},
+
+ {0x3706, 0x61},
+ {0x3712, 0x0c},
+ {0x3630, 0x6d},
+ {0x3801, 0xb4},
+ {0x3621, 0x04},
+ {0x3604, 0x60},
+ {0x3603, 0xa7},
+ {0x3631, 0x26},
+ {0x3600, 0x04},
+ {0x3620, 0x37},
+ {0x3623, 0x00},
+ {0x3702, 0x9e},
+ {0x3703, 0x5c},
+ {0x3704, 0x40},
+ {0x370d, 0x0f},
+ {0x3713, 0x9f},
+ {0x3714, 0x4c},
+ {0x3710, 0x9e},
+ {0x3801, 0xc4},
+ {0x3605, 0x05},
+ {0x3606, 0x3f},
+ {0x302d, 0x90},
+ {0x370b, 0x40},
+ {0x3716, 0x31},
+ {0x3707, 0x52},
+ {0x380d, 0x74},
+ {0x5181, 0x20},
+ {0x518f, 0x00},
+ {0x4301, 0xff},
+ {0x4303, 0x00},
+ {0x3a00, 0x78},
+ {0x300f, 0x88},
+ {0x3011, 0x28},
+ {0x3a1a, 0x06},
+ {0x3a18, 0x00},
+ {0x3a19, 0x7a},
+ {0x3a13, 0x54},
+ {0x382e, 0x0f},
+ {0x381a, 0x1a},
+ {0x401d, 0x02},
+
+ {0x381c, 0x10},
+ {0x381d, 0xb0},
+ {0x381e, 0x02},
+ {0x381f, 0xec},
+ {0x3800, 0x01},
+ {0x3820, 0x0a},
+ {0x3821, 0x2a},
+ {0x3804, 0x05},
+ {0x3805, 0x10},
+ {0x3802, 0x00},
+ {0x3803, 0x04},
+ {0x3806, 0x02},
+ {0x3807, 0xe0},
+ {0x3808, 0x05},
+ {0x3809, 0x10},
+ {0x380a, 0x02},
+ {0x380b, 0xe0},
+ {0x380e, 0x02},
+ {0x380f, 0xf0},
+ {0x380c, 0x07},
+ {0x380d, 0x00},
+ {0x3810, 0x10},
+ {0x3811, 0x06},
+
+ {0x5688, 0x03},
+ {0x5684, 0x05},
+ {0x5685, 0x00},
+ {0x5686, 0x02},
+ {0x5687, 0xd0},
+
+ {0x3a08, 0x1b},
+ {0x3a09, 0xe6},
+ {0x3a0a, 0x17},
+ {0x3a0b, 0x40},
+ {0x3a0e, 0x01},
+ {0x3a0d, 0x02},
+ {0x3011, 0x0a},
+ {0x300f, 0x8a},
+ {0x3017, 0x00},
+ {0x3018, 0x00},
+ {0x4800, 0x24},
+ {0x300e, 0x04},
+ {0x4801, 0x0f},
+ {0x300f, 0xc3},
+ {0x3a0f, 0x40},
+ {0x3a10, 0x38},
+ {0x3a1b, 0x48},
+ {0x3a1e, 0x30},
+ {0x3a11, 0x90},
+ {0x3a1f, 0x10},
+
+ {0x3010, 0x10},
+ {0x3a0e, 0x02},
+ {0x3a0d, 0x03},
+ {0x3a08, 0x0d},
+ {0x3a09, 0xf3},
+ {0x3a0a, 0x0b},
+ {0x3a0b, 0xa0},
+
+ {0x300f, 0xc3},
+ {0x3011, 0x0e},
+ {0x3012, 0x02},
+ {0x380c, 0x07},
+ {0x380d, 0x6a},
+ {0x3703, 0x5c},
+ {0x3704, 0x40},
+ {0x3801, 0xbc},
+
+ {0x3503, 0x17},
+ {0x3500, 0x00},
+ {0x3501, 0x00},
+ {0x3502, 0x00},
+ {0x350a, 0x00},
+ {0x350b, 0x00},
+ {0x5001, 0x4e},
+ {0x5000, 0x5f},
+
+ {OV2710_TABLE_END, 0x0000}
+};
+
+enum {
+ OV2710_MODE_1920x1080,
+ OV2710_MODE_1280x720,
+};
+
+
+static struct ov2710_reg *mode_table[] = {
+ [OV2710_MODE_1920x1080] = mode_1920x1080,
+ [OV2710_MODE_1280x720] = mode_1280x720,
+};
+
+static inline void ov2710_get_frame_length_regs(struct ov2710_reg *regs,
+ u32 frame_length)
+{
+ regs->addr = 0x380e;
+ regs->val = (frame_length >> 8) & 0xff;
+ (regs + 1)->addr = 0x380f;
+ (regs + 1)->val = (frame_length) & 0xff;
+}
+
+static inline void ov2710_get_coarse_time_regs(struct ov2710_reg *regs,
+ u32 coarse_time)
+{
+ regs->addr = 0x3500;
+ regs->val = (coarse_time >> 12) & 0xff;
+ (regs + 1)->addr = 0x3501;
+ (regs + 1)->val = (coarse_time >> 4) & 0xff;
+ (regs + 2)->addr = 0x3502;
+ (regs + 2)->val = (coarse_time & 0xf) << 4;
+}
+
+static inline void ov2710_get_gain_reg(struct ov2710_reg *regs, u16 gain)
+{
+ regs->addr = 0x350b;
+ regs->val = gain;
+}
+
+static int ov2710_read_reg(struct i2c_client *client, u16 addr, u8 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[3];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (addr >> 8);;
+ data[1] = (u8) (addr & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = data + 2;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+
+ if (err != 2)
+
+ return -EINVAL;
+
+ *val = data[2];
+
+ return 0;
+}
+
+static int ov2710_write_reg(struct i2c_client *client, u16 addr, u8 val)
+{
+ int err;
+ struct i2c_msg msg;
+ unsigned char data[3];
+ int retry = 0;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) (addr >> 8);;
+ data[1] = (u8) (addr & 0xff);
+ data[2] = (u8) (val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = data;
+
+ do {
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err == 1)
+ return 0;
+ retry++;
+ pr_err("ov2710: i2c transfer failed, retrying %x %x\n",
+ addr, val);
+
+ msleep(3);
+ } while (retry <= OV2710_MAX_RETRIES);
+
+ return err;
+}
+
+static int ov2710_write_table(struct i2c_client *client,
+ const struct ov2710_reg table[],
+ const struct ov2710_reg override_list[],
+ int num_override_regs)
+{
+ int err;
+ const struct ov2710_reg *next;
+ int i;
+ u16 val;
+
+ for (next = table; next->addr != OV2710_TABLE_END; next++) {
+ if (next->addr == OV2710_TABLE_WAIT_MS) {
+ msleep(next->val);
+ continue;
+ }
+
+
+ val = next->val;
+
+ /* When an override list is passed in, replace the reg */
+ /* value to write if the reg is in the list */
+ if (override_list) {
+ for (i = 0; i < num_override_regs; i++) {
+ if (next->addr == override_list[i].addr) {
+ val = override_list[i].val;
+ break;
+ }
+ }
+ }
+
+ err = ov2710_write_reg(client, next->addr, val);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int ov2710_set_mode(struct ov2710_info *info, struct ov2710_mode *mode)
+{
+ int sensor_mode;
+ int err;
+ struct ov2710_reg reg_list[6];
+
+ pr_info("%s: xres %u yres %u framelength %u coarsetime %u gain %u\n",
+ __func__, mode->xres, mode->yres, mode->frame_length,
+ mode->coarse_time, mode->gain);
+
+ if (mode->xres == 1920 && mode->yres == 1080)
+ sensor_mode = OV2710_MODE_1920x1080;
+ else if (mode->xres == 1280 && mode->yres == 720)
+ sensor_mode = OV2710_MODE_1280x720;
+ else {
+ pr_err("%s: invalid resolution supplied to set mode %d %d\n",
+ __func__, mode->xres, mode->yres);
+ return -EINVAL;
+ }
+
+ /* get a list of override regs for the asking frame length, */
+ /* coarse integration time, and gain. */
+ ov2710_get_frame_length_regs(reg_list, mode->frame_length);
+ ov2710_get_coarse_time_regs(reg_list + 2, mode->coarse_time);
+ ov2710_get_gain_reg(reg_list + 5, mode->gain);
+
+ err = ov2710_write_table(info->i2c_client, mode_table[sensor_mode],
+ reg_list, 6);
+ if (err)
+ return err;
+
+ info->mode = sensor_mode;
+ return 0;
+}
+
+static int ov2710_set_frame_length(struct ov2710_info *info, u32 frame_length)
+{
+ struct ov2710_reg reg_list[2];
+ int i = 0;
+ int ret;
+
+ ov2710_get_frame_length_regs(reg_list, frame_length);
+
+ for (i = 0; i < 2; i++) {
+ ret = ov2710_write_reg(info->i2c_client, reg_list[i].addr,
+ reg_list[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov2710_set_coarse_time(struct ov2710_info *info, u32 coarse_time)
+{
+ int ret;
+
+ struct ov2710_reg reg_list[3];
+ int i = 0;
+
+ ov2710_get_coarse_time_regs(reg_list, coarse_time);
+
+ ret = ov2710_write_reg(info->i2c_client, 0x3212, 0x01);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = ov2710_write_reg(info->i2c_client, reg_list[i].addr,
+ reg_list[i].val);
+ if (ret)
+ return ret;
+ }
+
+ ret = ov2710_write_reg(info->i2c_client, 0x3212, 0x11);
+ if (ret)
+ return ret;
+
+ ret = ov2710_write_reg(info->i2c_client, 0x3212, 0xa1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ov2710_set_gain(struct ov2710_info *info, u16 gain)
+{
+ int ret;
+ struct ov2710_reg reg_list;
+
+ ov2710_get_gain_reg(&reg_list, gain);
+
+ ret = ov2710_write_reg(info->i2c_client, reg_list.addr, reg_list.val);
+
+ return ret;
+}
+
+static int ov2710_get_status(struct ov2710_info *info, u8 *status)
+{
+ int err;
+
+ *status = 0;
+ err = ov2710_read_reg(info->i2c_client, 0x002, status);
+ return err;
+}
+
+
+static long ov2710_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err;
+ struct ov2710_info *info = file->private_data;
+
+ switch (cmd) {
+ case OV2710_IOCTL_SET_MODE:
+ {
+ struct ov2710_mode mode;
+ if (copy_from_user(&mode,
+ (const void __user *)arg,
+ sizeof(struct ov2710_mode))) {
+ return -EFAULT;
+ }
+
+ return ov2710_set_mode(info, &mode);
+ }
+ case OV2710_IOCTL_SET_FRAME_LENGTH:
+ return ov2710_set_frame_length(info, (u32)arg);
+ case OV2710_IOCTL_SET_COARSE_TIME:
+ return ov2710_set_coarse_time(info, (u32)arg);
+ case OV2710_IOCTL_SET_GAIN:
+ return ov2710_set_gain(info, (u16)arg);
+ case OV2710_IOCTL_GET_STATUS:
+ {
+ u8 status;
+
+ err = ov2710_get_status(info, &status);
+ if (err)
+ return err;
+ if (copy_to_user((void __user *)arg, &status,
+ 2)) {
+ return -EFAULT;
+ }
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct ov2710_info *info;
+
+static int ov2710_open(struct inode *inode, struct file *file)
+{
+ u8 status;
+
+ file->private_data = info;
+ if (info->pdata && info->pdata->power_on)
+ info->pdata->power_on();
+ ov2710_get_status(info, &status);
+ return 0;
+}
+
+int ov2710_release(struct inode *inode, struct file *file)
+{
+ if (info->pdata && info->pdata->power_off)
+ info->pdata->power_off();
+ file->private_data = NULL;
+ return 0;
+}
+
+
+static const struct file_operations ov2710_fileops = {
+ .owner = THIS_MODULE,
+ .open = ov2710_open,
+ .unlocked_ioctl = ov2710_ioctl,
+ .release = ov2710_release,
+};
+
+static struct miscdevice ov2710_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ov2710",
+ .fops = &ov2710_fileops,
+};
+
+static int ov2710_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+
+ pr_info("ov2710: probing sensor.\n");
+
+ info = kzalloc(sizeof(struct ov2710_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("ov2710: Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ err = misc_register(&ov2710_device);
+ if (err) {
+ pr_err("ov2710: Unable to register misc device!\n");
+ kfree(info);
+ return err;
+ }
+
+ info->pdata = client->dev.platform_data;
+ info->i2c_client = client;
+
+ i2c_set_clientdata(client, info);
+ return 0;
+}
+
+static int ov2710_remove(struct i2c_client *client)
+{
+ struct ov2710_info *info;
+ info = i2c_get_clientdata(client);
+ misc_deregister(&ov2710_device);
+ kfree(info);
+ return 0;
+}
+
+static const struct i2c_device_id ov2710_id[] = {
+ { "ov2710", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, ov2710_id);
+
+static struct i2c_driver ov2710_i2c_driver = {
+ .driver = {
+ .name = "ov2710",
+ .owner = THIS_MODULE,
+ },
+ .probe = ov2710_probe,
+ .remove = ov2710_remove,
+ .id_table = ov2710_id,
+};
+
+static int __init ov2710_init(void)
+{
+ pr_info("ov2710 sensor driver loading\n");
+ return i2c_add_driver(&ov2710_i2c_driver);
+}
+
+static void __exit ov2710_exit(void)
+{
+ i2c_del_driver(&ov2710_i2c_driver);
+}
+
+module_init(ov2710_init);
+module_exit(ov2710_exit);
+
diff --git a/drivers/media/video/tegra/ov5650.c b/drivers/media/video/tegra/ov5650.c
new file mode 100644
index 000000000000..cc50e9141e66
--- /dev/null
+++ b/drivers/media/video/tegra/ov5650.c
@@ -0,0 +1,1482 @@
+/*
+ * ov5650.c - ov5650 sensor driver
+ *
+ * Copyright (C) 2011 Google Inc.
+ *
+ * Contributors:
+ * Rebecca Schultz Zavin <rebecca@android.com>
+ *
+ * Leverage OV9640.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/ov5650.h>
+#include <media/tegra_camera.h>
+
+#define SIZEOF_I2C_TRANSBUF 32
+
+struct ov5650_reg {
+ u16 addr;
+ u16 val;
+};
+
+struct ov5650_sensor {
+ struct i2c_client *i2c_client;
+ struct ov5650_platform_data *pdata;
+};
+
+struct ov5650_info {
+ int mode;
+ enum StereoCameraMode camera_mode;
+ struct ov5650_sensor left;
+ struct ov5650_sensor right;
+ u8 i2c_trans_buf[SIZEOF_I2C_TRANSBUF];
+};
+
+static struct ov5650_info *stereo_ov5650_info;
+
+#define OV5650_TABLE_WAIT_MS 0
+#define OV5650_TABLE_END 1
+#define OV5650_MAX_RETRIES 3
+
+static struct ov5650_reg tp_none_seq[] = {
+ {0x5046, 0x00},
+ {OV5650_TABLE_END, 0x0000}
+};
+
+static struct ov5650_reg tp_cbars_seq[] = {
+ {0x503D, 0xC0},
+ {0x503E, 0x00},
+ {0x5046, 0x01},
+ {OV5650_TABLE_END, 0x0000}
+};
+
+static struct ov5650_reg tp_checker_seq[] = {
+ {0x503D, 0xC0},
+ {0x503E, 0x0A},
+ {0x5046, 0x01},
+ {OV5650_TABLE_END, 0x0000}
+};
+
+static struct ov5650_reg *test_pattern_modes[] = {
+ tp_none_seq,
+ tp_cbars_seq,
+ tp_checker_seq,
+};
+
+static struct ov5650_reg reset_seq[] = {
+ {0x3008, 0x82},
+ {OV5650_TABLE_WAIT_MS, 5},
+ {0x3008, 0x42},
+ {OV5650_TABLE_WAIT_MS, 5},
+ {OV5650_TABLE_END, 0x0000},
+};
+
+static struct ov5650_reg mode_start[] = {
+ {0x3103, 0x93},
+ {0x3017, 0xff},
+ {0x3018, 0xfc},
+
+ {0x3600, 0x50},
+ {0x3601, 0x0d},
+ {0x3604, 0x50},
+ {0x3605, 0x04},
+ {0x3606, 0x3f},
+ {0x3612, 0x1a},
+ {0x3630, 0x22},
+ {0x3631, 0x22},
+ {0x3702, 0x3a},
+ {0x3704, 0x18},
+ {0x3705, 0xda},
+ {0x3706, 0x41},
+ {0x370a, 0x80},
+ {0x370b, 0x40},
+ {0x370e, 0x00},
+ {0x3710, 0x28},
+ {0x3712, 0x13},
+ {0x3830, 0x50},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3a00, 0x38},
+
+
+ {0x3603, 0xa7},
+ {0x3615, 0x50},
+ {0x3620, 0x56},
+ {0x3810, 0x00},
+ {0x3836, 0x00},
+ {0x3a1a, 0x06},
+ {0x4000, 0x01},
+ {0x401c, 0x48},
+ {0x401d, 0x08},
+ {0x5000, 0x00},
+ {0x5001, 0x00},
+ {0x5002, 0x00},
+ {0x503d, 0x00},
+ {0x5046, 0x00},
+
+ {0x300f, 0x8f},
+
+ {0x3010, 0x10},
+ {0x3011, 0x14},
+ {0x3012, 0x02},
+ {0x3815, 0x82},
+ {0x3503, 0x33},
+ {0x3613, 0x44},
+ {OV5650_TABLE_END, 0x0},
+};
+
+static struct ov5650_reg mode_2592x1944[] = {
+ {0x3621, 0x2f},
+
+ {0x3632, 0x55},
+ {0x3703, 0xe6},
+ {0x370c, 0xa0},
+ {0x370d, 0x04},
+ {0x3713, 0x2f},
+ {0x3800, 0x02},
+ {0x3801, 0x58},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x0a},
+ {0x3805, 0x20},
+ {0x3806, 0x07},
+ {0x3807, 0xa0},
+ {0x3808, 0x0a},
+
+ {0x3809, 0x20},
+
+ {0x380a, 0x07},
+
+ {0x380b, 0xa0},
+
+ {0x380c, 0x0c},
+
+ {0x380d, 0xb4},
+
+ {0x380e, 0x07},
+
+ {0x380f, 0xb0},
+
+ {0x3818, 0xc0},
+ {0x381a, 0x3c},
+ {0x3a0d, 0x06},
+ {0x3c01, 0x00},
+ {0x3007, 0x3f},
+ {0x5059, 0x80},
+ {0x3003, 0x03},
+ {0x3500, 0x00},
+ {0x3501, 0x7a},
+
+ {0x3502, 0xd0},
+
+ {0x350a, 0x00},
+ {0x350b, 0x00},
+ {0x401d, 0x08},
+ {0x4801, 0x0f},
+ {0x300e, 0x0c},
+ {0x4803, 0x50},
+ {0x4800, 0x34},
+ {OV5650_TABLE_END, 0x0000}
+};
+
+static struct ov5650_reg mode_1296x972[] = {
+ {0x3621, 0xaf},
+
+ {0x3632, 0x5a},
+ {0x3703, 0xb0},
+ {0x370c, 0xc5},
+ {0x370d, 0x42},
+ {0x3713, 0x2f},
+ {0x3800, 0x03},
+ {0x3801, 0x3c},
+ {0x3802, 0x00},
+ {0x3803, 0x06},
+ {0x3804, 0x05},
+ {0x3805, 0x10},
+ {0x3806, 0x03},
+ {0x3807, 0xd0},
+ {0x3808, 0x05},
+
+ {0x3809, 0x10},
+
+ {0x380a, 0x03},
+
+ {0x380b, 0xd0},
+
+ {0x380c, 0x08},
+
+ {0x380d, 0xa8},
+
+ {0x380e, 0x05},
+
+ {0x380f, 0xa4},
+
+ {0x3818, 0xc1},
+ {0x381a, 0x00},
+ {0x3a0d, 0x08},
+ {0x3c01, 0x00},
+ {0x3007, 0x3b},
+ {0x5059, 0x80},
+ {0x3003, 0x03},
+ {0x3500, 0x00},
+
+ {0x3501, 0x5a},
+ {0x3502, 0x10},
+ {0x350a, 0x00},
+ {0x350b, 0x10},
+ {0x401d, 0x08},
+ {0x4801, 0x0f},
+ {0x300e, 0x0c},
+ {0x4803, 0x50},
+ {0x4800, 0x34},
+ {OV5650_TABLE_END, 0x0000}
+};
+
+static struct ov5650_reg mode_2080x1164[] = {
+ {0x3103, 0x93},
+ {0x3007, 0x3b},
+ {0x3017, 0xff},
+ {0x3018, 0xfc},
+
+ {0x3600, 0x54},
+ {0x3601, 0x05},
+ {0x3603, 0xa7},
+ {0x3604, 0x40},
+ {0x3605, 0x04},
+ {0x3606, 0x3f},
+ {0x3612, 0x1a},
+ {0x3613, 0x44},
+ {0x3615, 0x52},
+ {0x3620, 0x56},
+ {0x3623, 0x01},
+ {0x3630, 0x22},
+ {0x3631, 0x36},
+ {0x3632, 0x5f},
+ {0x3633, 0x24},
+
+ {0x3702, 0x3a},
+ {0x3704, 0x18},
+ {0x3706, 0x41},
+ {0x370b, 0x40},
+ {0x370e, 0x00},
+ {0x3710, 0x28},
+ {0x3711, 0x24},
+ {0x3712, 0x13},
+
+ {0x3810, 0x00},
+ {0x3815, 0x82},
+ {0x3830, 0x50},
+ {0x3836, 0x00},
+
+ {0x3a1a, 0x06},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3a00, 0x38},
+
+ {0x3a0d, 0x06},
+ {0x3c01, 0x34},
+
+ {0x401f, 0x03},
+ {0x4000, 0x05},
+ {0x401d, 0x08},
+ {0x4001, 0x02},
+
+ {0x5000, 0x00},
+ {0x5001, 0x00},
+ {0x5002, 0x00},
+ {0x503d, 0x00},
+ {0x5046, 0x00},
+
+ {0x300f, 0x8f},
+
+ {0x3010, 0x10},
+ {0x3011, 0x14},
+ {0x3012, 0x02},
+ {0x3503, 0x33},
+
+
+ {0x3621, 0x2f},
+
+ {0x3703, 0xe6},
+ {0x370c, 0x00},
+ {0x370d, 0x04},
+ {0x3713, 0x22},
+ {0x3714, 0x27},
+ {0x3705, 0xda},
+ {0x370a, 0x80},
+
+ {0x3800, 0x02},
+ {0x3801, 0x12},
+ {0x3802, 0x00},
+ {0x3803, 0x0a},
+ {0x3804, 0x08},
+ {0x3805, 0x20},
+ {0x3806, 0x04},
+ {0x3807, 0x92},
+ {0x3808, 0x08},
+
+ {0x3809, 0x20},
+
+ {0x380a, 0x04},
+
+ {0x380b, 0x92},
+
+ {0x380c, 0x0a},
+
+ {0x380d, 0x96},
+
+ {0x380e, 0x04},
+
+ {0x380f, 0x9e},
+
+ {0x3818, 0xc0},
+ {0x381a, 0x3c},
+ {0x381c, 0x31},
+ {0x381d, 0x8e},
+ {0x381e, 0x04},
+ {0x381f, 0x92},
+ {0x3820, 0x04},
+ {0x3821, 0x19},
+ {0x3824, 0x01},
+ {0x3827, 0x0a},
+ {0x401c, 0x46},
+
+ {0x3003, 0x03},
+ {0x3500, 0x00},
+ {0x3501, 0x49},
+ {0x3502, 0xa0},
+ {0x350a, 0x00},
+ {0x350b, 0x00},
+ {0x4801, 0x0f},
+ {0x300e, 0x0c},
+ {0x4803, 0x50},
+ {0x4800, 0x34},
+
+ {OV5650_TABLE_END, 0x0000}
+};
+
+static struct ov5650_reg mode_1920x1080[] = {
+ {0x3103, 0x93},
+ {0x3007, 0x3b},
+ {0x3017, 0xff},
+ {0x3018, 0xfc},
+
+ {0x3600, 0x54},
+ {0x3601, 0x05},
+ {0x3603, 0xa7},
+ {0x3604, 0x40},
+ {0x3605, 0x04},
+ {0x3606, 0x3f},
+ {0x3612, 0x1a},
+ {0x3613, 0x44},
+ {0x3615, 0x52},
+ {0x3620, 0x56},
+ {0x3623, 0x01},
+ {0x3630, 0x22},
+ {0x3631, 0x36},
+ {0x3632, 0x5f},
+ {0x3633, 0x24},
+
+ {0x3702, 0x3a},
+ {0x3704, 0x18},
+ {0x3706, 0x41},
+ {0x370b, 0x40},
+ {0x370e, 0x00},
+ {0x3710, 0x28},
+ {0x3711, 0x24},
+ {0x3712, 0x13},
+
+ {0x3810, 0x00},
+ {0x3815, 0x82},
+
+ {0x3830, 0x50},
+ {0x3836, 0x00},
+
+ {0x3a1a, 0x06},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3a00, 0x38},
+ {0x3a0d, 0x06},
+ {0x3c01, 0x34},
+
+ {0x401f, 0x03},
+ {0x4000, 0x05},
+ {0x401d, 0x08},
+ {0x4001, 0x02},
+
+ {0x5000, 0x00},
+ {0x5001, 0x00},
+ {0x5002, 0x00},
+ {0x503d, 0x00},
+ {0x5046, 0x00},
+
+ {0x300f, 0x8f},
+ {0x3010, 0x10},
+ {0x3011, 0x14},
+ {0x3012, 0x02},
+ {0x3503, 0x33},
+
+ {0x3621, 0x2f},
+ {0x3703, 0xe6},
+ {0x370c, 0x00},
+ {0x370d, 0x04},
+ {0x3713, 0x22},
+ {0x3714, 0x27},
+ {0x3705, 0xda},
+ {0x370a, 0x80},
+
+ {0x3800, 0x02},
+ {0x3801, 0x94},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x07},
+ {0x3805, 0x80},
+ {0x3806, 0x04},
+ {0x3807, 0x40},
+ {0x3808, 0x07},
+ {0x3809, 0x80},
+ {0x380a, 0x04},
+ {0x380b, 0x40},
+ {0x380c, 0x0a},
+ {0x380d, 0x84},
+ {0x380e, 0x04},
+ {0x380f, 0xa4},
+ {0x3818, 0xc0},
+ {0x381a, 0x3c},
+ {0x381c, 0x31},
+ {0x381d, 0xa4},
+ {0x381e, 0x04},
+ {0x381f, 0x60},
+ {0x3820, 0x03},
+ {0x3821, 0x1a},
+ {0x3824, 0x01},
+ {0x3827, 0x0a},
+ {0x401c, 0x46},
+
+ {0x3003, 0x03},
+ {0x3500, 0x00},
+ {0x3501, 0x49},
+ {0x3502, 0xa0},
+ {0x350a, 0x00},
+ {0x350b, 0x00},
+ {0x4801, 0x0f},
+ {0x300e, 0x0c},
+ {0x4803, 0x50},
+ {0x4800, 0x34},
+
+ {OV5650_TABLE_END, 0x0000}
+};
+
+
+static struct ov5650_reg mode_1264x704[] = {
+ {0x3600, 0x54},
+ {0x3601, 0x05},
+ {0x3604, 0x40},
+ {0x3705, 0xdb},
+ {0x370a, 0x81},
+ {0x3615, 0x52},
+ {0x3810, 0x40},
+ {0x3836, 0x41},
+ {0x4000, 0x05},
+ {0x401c, 0x42},
+ {0x401d, 0x08},
+ {0x5046, 0x09},
+ {0x3010, 0x00},
+ {0x3503, 0x00},
+ {0x3613, 0xc4},
+
+ {0x3621, 0xaf},
+
+ {0x3632, 0x55},
+ {0x3703, 0x9a},
+ {0x370c, 0x00},
+ {0x370d, 0x42},
+ {0x3713, 0x22},
+ {0x3800, 0x02},
+ {0x3801, 0x54},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x05},
+ {0x3805, 0x00},
+ {0x3806, 0x02},
+ {0x3807, 0xd0},
+ {0x3808, 0x05},
+
+ {0x3809, 0x00},
+
+ {0x380a, 0x02},
+
+ {0x380b, 0xd0},
+
+ {0x380c, 0x08},
+
+ {0x380d, 0x72},
+
+ {0x380e, 0x02},
+
+ {0x380f, 0xe4},
+
+ {0x3818, 0xc1},
+ {0x381a, 0x3c},
+ {0x3a0d, 0x06},
+ {0x3c01, 0x34},
+ {0x3007, 0x3b},
+ {0x5059, 0x80},
+ {0x3003, 0x03},
+ {0x3500, 0x04},
+ {0x3501, 0xa5},
+
+ {0x3502, 0x10},
+
+ {0x350a, 0x00},
+ {0x350b, 0x00},
+ {0x4801, 0x0f},
+ {0x300e, 0x0c},
+ {0x4803, 0x50},
+ {0x4800, 0x24},
+ {0x300f, 0x8b},
+
+ {0x3711, 0x24},
+ {0x3713, 0x92},
+ {0x3714, 0x17},
+ {0x381c, 0x10},
+ {0x381d, 0x82},
+ {0x381e, 0x05},
+ {0x381f, 0xc0},
+ {0x3821, 0x20},
+ {0x3824, 0x23},
+ {0x3825, 0x2c},
+ {0x3826, 0x00},
+ {0x3827, 0x0c},
+ {0x3623, 0x01},
+ {0x3633, 0x24},
+ {0x3632, 0x5f},
+ {0x401f, 0x03},
+
+ {OV5650_TABLE_END, 0x0000}
+};
+
+static struct ov5650_reg mode_320x240[] = {
+ {0x3103, 0x93},
+ {0x3b07, 0x0c},
+ {0x3017, 0xff},
+ {0x3018, 0xfc},
+ {0x3706, 0x41},
+ {0x3613, 0xc4},
+ {0x370d, 0x42},
+ {0x3703, 0x9a},
+ {0x3630, 0x22},
+ {0x3605, 0x04},
+ {0x3606, 0x3f},
+ {0x3712, 0x13},
+ {0x370e, 0x00},
+ {0x370b, 0x40},
+ {0x3600, 0x54},
+ {0x3601, 0x05},
+ {0x3713, 0x22},
+ {0x3714, 0x27},
+ {0x3631, 0x22},
+ {0x3612, 0x1a},
+ {0x3604, 0x40},
+ {0x3705, 0xdc},
+ {0x370a, 0x83},
+ {0x370c, 0xc8},
+ {0x3710, 0x28},
+ {0x3702, 0x3a},
+ {0x3704, 0x18},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3a00, 0x38},
+ {0x3800, 0x02},
+ {0x3801, 0x54},
+ {0x3803, 0x0c},
+ {0x380c, 0x0c},
+ {0x380d, 0xb4},
+ {0x380e, 0x07},
+ {0x380f, 0xb0},
+ {0x3830, 0x50},
+ {0x3a08, 0x12},
+ {0x3a09, 0x70},
+ {0x3a0a, 0x0f},
+ {0x3a0b, 0x60},
+ {0x3a0d, 0x06},
+ {0x3a0e, 0x06},
+ {0x3a13, 0x54},
+ {0x3815, 0x82},
+ {0x5059, 0x80},
+ {0x3615, 0x52},
+ {0x505a, 0x0a},
+ {0x505b, 0x2e},
+ {0x3713, 0x92},
+ {0x3714, 0x17},
+ {0x3803, 0x0a},
+ {0x3804, 0x05},
+ {0x3805, 0x00},
+ {0x3806, 0x01},
+ {0x3807, 0x00},
+ {0x3808, 0x01},
+ {0x3809, 0x40},
+ {0x380a, 0x01},
+ {0x380b, 0x00},
+ {0x380c, 0x0a},
+
+ {0x380d, 0x04},
+
+ {0x380e, 0x01},
+
+ {0x380f, 0x38},
+
+ {0x3815, 0x81},
+ {0x3824, 0x23},
+ {0x3825, 0x20},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x370d, 0xc2},
+ {0x3a08, 0x17},
+ {0x3a09, 0x64},
+ {0x3a0a, 0x13},
+ {0x3a0b, 0x80},
+ {0x3a00, 0x58},
+ {0x3a1a, 0x06},
+ {0x3503, 0x33},
+ {0x3623, 0x01},
+ {0x3633, 0x24},
+ {0x3c01, 0x34},
+ {0x3c04, 0x28},
+ {0x3c05, 0x98},
+ {0x3c07, 0x07},
+ {0x3c09, 0xc2},
+ {0x4000, 0x05},
+ {0x401d, 0x08},
+ {0x4001, 0x02},
+ {0x401c, 0x42},
+ {0x5046, 0x09},
+ {0x3810, 0x40},
+ {0x3836, 0x41},
+ {0x505f, 0x04},
+ {0x5000, 0x06},
+ {0x5001, 0x00},
+ {0x5002, 0x02},
+ {0x503d, 0x00},
+ {0x5901, 0x08},
+ {0x585a, 0x01},
+ {0x585b, 0x2c},
+ {0x585c, 0x01},
+ {0x585d, 0x93},
+ {0x585e, 0x01},
+ {0x585f, 0x90},
+ {0x5860, 0x01},
+ {0x5861, 0x0d},
+ {0x5180, 0xc0},
+ {0x5184, 0x00},
+ {0x470a, 0x00},
+ {0x470b, 0x00},
+ {0x470c, 0x00},
+ {0x300f, 0x8e},
+ {0x3603, 0xa7},
+ {0x3632, 0x55},
+ {0x3620, 0x56},
+ {0x3621, 0xaf},
+ {0x3818, 0xc3},
+ {0x3631, 0x36},
+ {0x3632, 0x5f},
+ {0x3711, 0x24},
+ {0x401f, 0x03},
+
+ {0x3011, 0x14},
+ {0x3007, 0x3B},
+ {0x300f, 0x8f},
+ {0x4801, 0x0f},
+ {0x3003, 0x03},
+ {0x300e, 0x0c},
+ {0x3010, 0x15},
+ {0x4803, 0x50},
+ {0x4800, 0x24},
+ {0x4837, 0x40},
+ {0x3815, 0x82},
+
+ {OV5650_TABLE_END, 0x0000}
+};
+
+static struct ov5650_reg mode_end[] = {
+ {0x3212, 0x00},
+ {0x3003, 0x01},
+ {0x3212, 0x10},
+ {0x3212, 0xa0},
+ {0x3008, 0x02},
+
+ {OV5650_TABLE_END, 0x0000}
+};
+
+enum {
+ OV5650_MODE_2592x1944,
+ OV5650_MODE_1296x972,
+ OV5650_MODE_2080x1164,
+ OV5650_MODE_1920x1080,
+ OV5650_MODE_1264x704,
+ OV5650_MODE_320x240,
+ OV5650_MODE_INVALID
+};
+
+static struct ov5650_reg *mode_table[] = {
+ [OV5650_MODE_2592x1944] = mode_2592x1944,
+ [OV5650_MODE_1296x972] = mode_1296x972,
+ [OV5650_MODE_2080x1164] = mode_2080x1164,
+ [OV5650_MODE_1920x1080] = mode_1920x1080,
+ [OV5650_MODE_1264x704] = mode_1264x704,
+ [OV5650_MODE_320x240] = mode_320x240
+};
+
+static inline void ov5650_get_frame_length_regs(struct ov5650_reg *regs,
+ u32 frame_length)
+{
+ regs->addr = 0x380e;
+ regs->val = (frame_length >> 8) & 0xff;
+ (regs + 1)->addr = 0x380f;
+ (regs + 1)->val = (frame_length) & 0xff;
+}
+
+static inline void ov5650_get_coarse_time_regs(struct ov5650_reg *regs,
+ u32 coarse_time)
+{
+ regs->addr = 0x3500;
+ regs->val = (coarse_time >> 12) & 0xff;
+ (regs + 1)->addr = 0x3501;
+ (regs + 1)->val = (coarse_time >> 4) & 0xff;
+ (regs + 2)->addr = 0x3502;
+ (regs + 2)->val = (coarse_time & 0xf) << 4;
+}
+
+static inline void ov5650_get_gain_reg(struct ov5650_reg *regs, u16 gain)
+{
+ regs->addr = 0x350b;
+ regs->val = gain;
+}
+
+static int ov5650_read_reg(struct i2c_client *client, u16 addr, u8 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[3];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (addr >> 8);
+ data[1] = (u8) (addr & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = data + 2;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+
+ if (err != 1)
+ return -EINVAL;
+
+ *val = data[2];
+
+ return 0;
+}
+
+static int ov5650_read_reg_helper(struct ov5650_info *info,
+ u16 addr, u8 *val)
+{
+ int ret;
+ switch (info->camera_mode) {
+ case Main:
+ case StereoCameraMode_Left:
+ ret = ov5650_read_reg(info->left.i2c_client, addr, val);
+ break;
+ case StereoCameraMode_Stereo:
+ ret = ov5650_read_reg(info->left.i2c_client, addr, val);
+ if (ret)
+ break;
+ ret = ov5650_read_reg(info->right.i2c_client, addr, val);
+ break;
+ case StereoCameraMode_Right:
+ ret = ov5650_read_reg(info->right.i2c_client, addr, val);
+ break;
+ default:
+ return -1;
+ }
+ return ret;
+}
+
+static int ov5650_write_reg(struct i2c_client *client, u16 addr, u8 val)
+{
+ int err;
+ struct i2c_msg msg;
+ unsigned char data[3];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) (addr >> 8);
+ data[1] = (u8) (addr & 0xff);
+ data[2] = (u8) (val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = data;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err == 1)
+ return 0;
+
+ pr_err("ov5650: i2c transfer failed, retrying %x %x\n", addr, val);
+
+ return err;
+}
+
+static int ov5650_write_reg_helper(struct ov5650_info *info,
+ u16 addr, u8 val)
+{
+ int ret;
+ switch (info->camera_mode) {
+ case Main:
+ case StereoCameraMode_Left:
+ ret = ov5650_write_reg(info->left.i2c_client, addr, val);
+ break;
+ case StereoCameraMode_Stereo:
+ ret = ov5650_write_reg(info->left.i2c_client, addr, val);
+ if (ret)
+ break;
+ ret = ov5650_write_reg(info->right.i2c_client, addr, val);
+ break;
+ case StereoCameraMode_Right:
+ ret = ov5650_write_reg(info->right.i2c_client, addr, val);
+ break;
+ default:
+ return -1;
+ }
+ return ret;
+}
+
+static int ov5650_write_bulk_reg(struct i2c_client *client, u8 *data, int len)
+{
+ int err;
+ struct i2c_msg msg;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err == 1)
+ return 0;
+
+ pr_err("ov5650: i2c bulk transfer failed at %x\n",
+ (int)data[0] << 8 | data[1]);
+
+ return err;
+}
+
+static int ov5650_write_bulk_reg_helper(struct ov5650_info *info, int len)
+{
+ int ret;
+ switch (info->camera_mode) {
+ case Main:
+ case StereoCameraMode_Left:
+ ret = ov5650_write_bulk_reg(info->left.i2c_client,
+ info->i2c_trans_buf, len);
+ break;
+ case StereoCameraMode_Stereo:
+ ret = ov5650_write_bulk_reg(info->left.i2c_client,
+ info->i2c_trans_buf, len);
+ if (ret)
+ break;
+ ret = ov5650_write_bulk_reg(info->right.i2c_client,
+ info->i2c_trans_buf, len);
+ break;
+ case StereoCameraMode_Right:
+ ret = ov5650_write_bulk_reg(info->right.i2c_client,
+ info->i2c_trans_buf, len);
+ break;
+ default:
+ return -1;
+ }
+ return ret;
+}
+
+static int ov5650_write_table(struct ov5650_info *info,
+ const struct ov5650_reg table[],
+ const struct ov5650_reg override_list[],
+ int num_override_regs)
+{
+ int err;
+ const struct ov5650_reg *next, *n_next;
+ u8 *b_ptr = info->i2c_trans_buf;
+ unsigned int buf_filled = 0;
+ unsigned int i;
+ u16 val;
+
+ for (next = table; next->addr != OV5650_TABLE_END; next++) {
+ if (next->addr == OV5650_TABLE_WAIT_MS) {
+ msleep(next->val);
+ continue;
+ }
+
+ val = next->val;
+ /* When an override list is passed in, replace the reg */
+ /* value to write if the reg is in the list */
+ if (override_list) {
+ for (i = 0; i < num_override_regs; i++) {
+ if (next->addr == override_list[i].addr) {
+ val = override_list[i].val;
+ break;
+ }
+ }
+ }
+
+ if (!buf_filled) {
+ b_ptr = info->i2c_trans_buf;
+ *b_ptr++ = next->addr >> 8;
+ *b_ptr++ = next->addr & 0xff;
+ buf_filled = 2;
+ }
+ *b_ptr++ = val;
+ buf_filled++;
+
+ n_next = next + 1;
+ if (n_next->addr != OV5650_TABLE_END &&
+ n_next->addr != OV5650_TABLE_WAIT_MS &&
+ buf_filled < SIZEOF_I2C_TRANSBUF &&
+ n_next->addr == next->addr + 1) {
+ continue;
+ }
+
+ err = ov5650_write_bulk_reg_helper(info, buf_filled);
+ if (err)
+ return err;
+
+ buf_filled = 0;
+ }
+ return 0;
+}
+
+static int ov5650_set_mode(struct ov5650_info *info, struct ov5650_mode *mode)
+{
+ int sensor_mode;
+ int err;
+ struct ov5650_reg reg_list[6];
+
+ pr_info("%s: xres %u yres %u framelength %u coarsetime %u gain %u\n",
+ __func__, mode->xres, mode->yres, mode->frame_length,
+ mode->coarse_time, mode->gain);
+ if (mode->xres == 2592 && mode->yres == 1944)
+ sensor_mode = OV5650_MODE_2592x1944;
+ else if (mode->xres == 1296 && mode->yres == 972)
+ sensor_mode = OV5650_MODE_1296x972;
+ else if (mode->xres == 2080 && mode->yres == 1164)
+ sensor_mode = OV5650_MODE_2080x1164;
+ else if (mode->xres == 1920 && mode->yres == 1080)
+ sensor_mode = OV5650_MODE_1920x1080;
+ else if (mode->xres == 1264 && mode->yres == 704)
+ sensor_mode = OV5650_MODE_1264x704;
+ else if (mode->xres == 320 && mode->yres == 240)
+ sensor_mode = OV5650_MODE_320x240;
+ else {
+ pr_err("%s: invalid resolution supplied to set mode %d %d\n",
+ __func__, mode->xres, mode->yres);
+ return -EINVAL;
+ }
+
+ /* get a list of override regs for the asking frame length, */
+ /* coarse integration time, and gain. */
+ ov5650_get_frame_length_regs(reg_list, mode->frame_length);
+ ov5650_get_coarse_time_regs(reg_list + 2, mode->coarse_time);
+ ov5650_get_gain_reg(reg_list + 5, mode->gain);
+
+ err = ov5650_write_table(info, reset_seq, NULL, 0);
+ if (err)
+ return err;
+
+ err = ov5650_write_table(info, mode_start, NULL, 0);
+ if (err)
+ return err;
+
+ err = ov5650_write_table(info, mode_table[sensor_mode],
+ reg_list, 6);
+ if (err)
+ return err;
+
+ err = ov5650_write_table(info, mode_end, NULL, 0);
+ if (err)
+ return err;
+
+ info->mode = sensor_mode;
+ return 0;
+}
+
+static int ov5650_set_frame_length(struct ov5650_info *info, u32 frame_length)
+{
+ struct ov5650_reg reg_list[2];
+ int i = 0;
+ int ret;
+
+ ov5650_get_frame_length_regs(reg_list, frame_length);
+
+ for (i = 0; i < 2; i++) {
+ ret = ov5650_write_reg_helper(info, reg_list[i].addr,
+ reg_list[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5650_set_coarse_time(struct ov5650_info *info, u32 coarse_time)
+{
+ int ret;
+
+ struct ov5650_reg reg_list[3];
+ int i = 0;
+
+ ov5650_get_coarse_time_regs(reg_list, coarse_time);
+
+ ret = ov5650_write_reg_helper(info, 0x3212, 0x01);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = ov5650_write_reg_helper(info, reg_list[i].addr,
+ reg_list[i].val);
+ if (ret)
+ return ret;
+ }
+
+ ret = ov5650_write_reg_helper(info, 0x3212, 0x11);
+ if (ret)
+ return ret;
+
+ ret = ov5650_write_reg_helper(info, 0x3212, 0xa1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ov5650_set_gain(struct ov5650_info *info, u16 gain)
+{
+ int ret;
+ struct ov5650_reg reg_list;
+
+ ov5650_get_gain_reg(&reg_list, gain);
+
+ ret = ov5650_write_reg_helper(info, reg_list.addr, reg_list.val);
+
+ return ret;
+}
+
+static int ov5650_set_binning(struct ov5650_info *info, u8 enable)
+{
+ s32 ret;
+ u8 array_ctrl_reg, analog_ctrl_reg, timing_reg;
+ u32 val;
+
+ if (info->mode == OV5650_MODE_2592x1944
+ || info->mode == OV5650_MODE_2080x1164
+ || info->mode >= OV5650_MODE_INVALID) {
+ return -EINVAL;
+ }
+
+ ov5650_read_reg_helper(info, OV5650_ARRAY_CONTROL_01, &array_ctrl_reg);
+ ov5650_read_reg_helper(info, OV5650_ANALOG_CONTROL_D, &analog_ctrl_reg);
+ ov5650_read_reg_helper(info, OV5650_TIMING_TC_REG_18, &timing_reg);
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_SRM_GRUP_ACCESS,
+ OV5650_GROUP_ID(3));
+ if (ret < 0)
+ return -EIO;
+
+ if (!enable) {
+ ret = ov5650_write_reg_helper(info,
+ OV5650_ARRAY_CONTROL_01,
+ array_ctrl_reg |
+ (OV5650_H_BINNING_BIT | OV5650_H_SUBSAMPLING_BIT));
+
+ if (ret < 0)
+ goto exit;
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_ANALOG_CONTROL_D,
+ analog_ctrl_reg & ~OV5650_V_BINNING_BIT);
+
+ if (ret < 0)
+ goto exit;
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_TIMING_TC_REG_18,
+ timing_reg | OV5650_V_SUBSAMPLING_BIT);
+
+ if (ret < 0)
+ goto exit;
+
+ if (info->mode == OV5650_MODE_1296x972)
+ val = 0x1A2;
+ else
+ /* FIXME: this value is not verified yet. */
+ val = 0x1A8;
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_TIMING_CONTROL_HS_HIGH,
+ (val >> 8));
+
+ if (ret < 0)
+ goto exit;
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_TIMING_CONTROL_HS_LOW,
+ (val & 0xFF));
+ } else {
+ ret = ov5650_write_reg_helper(info,
+ OV5650_ARRAY_CONTROL_01,
+ (array_ctrl_reg | OV5650_H_BINNING_BIT)
+ & ~OV5650_H_SUBSAMPLING_BIT);
+
+ if (ret < 0)
+ goto exit;
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_ANALOG_CONTROL_D,
+ analog_ctrl_reg | OV5650_V_BINNING_BIT);
+
+ if (ret < 0)
+ goto exit;
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_TIMING_TC_REG_18,
+ timing_reg | OV5650_V_SUBSAMPLING_BIT);
+
+ if (ret < 0)
+ goto exit;
+
+ if (info->mode == OV5650_MODE_1296x972)
+ val = 0x33C;
+ else
+ val = 0x254;
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_TIMING_CONTROL_HS_HIGH,
+ (val >> 8));
+
+ if (ret < 0)
+ goto exit;
+
+ ret = ov5650_write_reg_helper(info,
+ OV5650_TIMING_CONTROL_HS_LOW,
+ (val & 0xFF));
+ }
+
+exit:
+ ret = ov5650_write_reg_helper(info,
+ OV5650_SRM_GRUP_ACCESS,
+ (OV5650_GROUP_HOLD_END_BIT | OV5650_GROUP_ID(3)));
+
+ ret |= ov5650_write_reg_helper(info,
+ OV5650_SRM_GRUP_ACCESS,
+ (OV5650_GROUP_HOLD_BIT | OV5650_GROUP_LAUNCH_BIT |
+ OV5650_GROUP_ID(3)));
+
+ return ret;
+}
+
+static int ov5650_test_pattern(struct ov5650_info *info,
+ enum ov5650_test_pattern pattern)
+{
+ if (pattern >= ARRAY_SIZE(test_pattern_modes))
+ return -EINVAL;
+
+ return ov5650_write_table(info,
+ test_pattern_modes[pattern],
+ NULL, 0);
+}
+
+static int set_power_helper(struct ov5650_platform_data *pdata,
+ int powerLevel)
+{
+ if (pdata) {
+ if (powerLevel && pdata->power_on)
+ pdata->power_on();
+ else if (pdata->power_off)
+ pdata->power_off();
+ }
+ return 0;
+}
+
+static int ov5650_set_power(int powerLevel)
+{
+ pr_info("%s: powerLevel=%d camera mode=%d\n", __func__, powerLevel,
+ stereo_ov5650_info->camera_mode);
+
+ if (StereoCameraMode_Left & stereo_ov5650_info->camera_mode)
+ set_power_helper(stereo_ov5650_info->left.pdata, powerLevel);
+
+ if (StereoCameraMode_Right & stereo_ov5650_info->camera_mode)
+ set_power_helper(stereo_ov5650_info->right.pdata, powerLevel);
+
+ return 0;
+}
+
+static long ov5650_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err;
+ struct ov5650_info *info = file->private_data;
+
+ switch (cmd) {
+ case OV5650_IOCTL_SET_CAMERA_MODE:
+ {
+ if (info->camera_mode != arg) {
+ err = ov5650_set_power(0);
+ if (err) {
+ pr_info("%s %d\n", __func__, __LINE__);
+ return err;
+ }
+ info->camera_mode = arg;
+ err = ov5650_set_power(1);
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+ case OV5650_IOCTL_SYNC_SENSORS:
+ if (info->right.pdata->synchronize_sensors)
+ info->right.pdata->synchronize_sensors();
+ return 0;
+ case OV5650_IOCTL_SET_MODE:
+ {
+ struct ov5650_mode mode;
+ if (copy_from_user(&mode,
+ (const void __user *)arg,
+ sizeof(struct ov5650_mode))) {
+ pr_info("%s %d\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return ov5650_set_mode(info, &mode);
+ }
+ case OV5650_IOCTL_SET_FRAME_LENGTH:
+ return ov5650_set_frame_length(info, (u32)arg);
+ case OV5650_IOCTL_SET_COARSE_TIME:
+ return ov5650_set_coarse_time(info, (u32)arg);
+ case OV5650_IOCTL_SET_GAIN:
+ return ov5650_set_gain(info, (u16)arg);
+ case OV5650_IOCTL_SET_BINNING:
+ return ov5650_set_binning(info, (u8)arg);
+ case OV5650_IOCTL_GET_STATUS:
+ {
+ u16 status = 0;
+ if (copy_to_user((void __user *)arg, &status,
+ 2)) {
+ pr_info("%s %d\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ return 0;
+ }
+ case OV5650_IOCTL_TEST_PATTERN:
+ {
+ err = ov5650_test_pattern(info, (enum ov5650_test_pattern) arg);
+ if (err)
+ pr_err("%s %d %d\n", __func__, __LINE__, err);
+ return err;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ov5650_open(struct inode *inode, struct file *file)
+{
+ pr_info("%s\n", __func__);
+ file->private_data = stereo_ov5650_info;
+ ov5650_set_power(1);
+ return 0;
+}
+
+int ov5650_release(struct inode *inode, struct file *file)
+{
+ ov5650_set_power(0);
+ file->private_data = NULL;
+ return 0;
+}
+
+
+static const struct file_operations ov5650_fileops = {
+ .owner = THIS_MODULE,
+ .open = ov5650_open,
+ .unlocked_ioctl = ov5650_ioctl,
+ .release = ov5650_release,
+};
+
+static struct miscdevice ov5650_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ov5650",
+ .fops = &ov5650_fileops,
+};
+
+static int ov5650_probe_common(void)
+{
+ int err;
+
+ if (!stereo_ov5650_info) {
+ stereo_ov5650_info = kzalloc(sizeof(struct ov5650_info),
+ GFP_KERNEL);
+ if (!stereo_ov5650_info) {
+ pr_err("ov5650: Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ err = misc_register(&ov5650_device);
+ if (err) {
+ pr_err("ov5650: Unable to register misc device!\n");
+ kfree(stereo_ov5650_info);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int ov5650_remove_common(struct i2c_client *client)
+{
+ if (stereo_ov5650_info->left.i2c_client ||
+ stereo_ov5650_info->right.i2c_client)
+ return 0;
+
+ misc_deregister(&ov5650_device);
+ kfree(stereo_ov5650_info);
+ stereo_ov5650_info = NULL;
+
+ return 0;
+}
+
+static int left_ov5650_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+ pr_info("%s: probing sensor.\n", __func__);
+
+ err = ov5650_probe_common();
+ if (err)
+ return err;
+
+ stereo_ov5650_info->left.pdata = client->dev.platform_data;
+ stereo_ov5650_info->left.i2c_client = client;
+
+ return 0;
+}
+
+static int left_ov5650_remove(struct i2c_client *client)
+{
+ if (stereo_ov5650_info) {
+ stereo_ov5650_info->left.i2c_client = NULL;
+ ov5650_remove_common(client);
+ }
+ return 0;
+}
+
+static const struct i2c_device_id left_ov5650_id[] = {
+ { "ov5650", 0 },
+ { "ov5650L", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, left_ov5650_id);
+
+static struct i2c_driver left_ov5650_i2c_driver = {
+ .driver = {
+ .name = "ov5650",
+ .owner = THIS_MODULE,
+ },
+ .probe = left_ov5650_probe,
+ .remove = left_ov5650_remove,
+ .id_table = left_ov5650_id,
+};
+
+static int right_ov5650_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+ pr_info("%s: probing sensor.\n", __func__);
+
+ err = ov5650_probe_common();
+ if (err)
+ return err;
+
+ stereo_ov5650_info->right.pdata = client->dev.platform_data;
+ stereo_ov5650_info->right.i2c_client = client;
+
+ return 0;
+}
+
+static int right_ov5650_remove(struct i2c_client *client)
+{
+ if (stereo_ov5650_info) {
+ stereo_ov5650_info->right.i2c_client = NULL;
+ ov5650_remove_common(client);
+ }
+ return 0;
+}
+
+static const struct i2c_device_id right_ov5650_id[] = {
+ { "ov5650R", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, right_ov5650_id);
+
+static struct i2c_driver right_ov5650_i2c_driver = {
+ .driver = {
+ .name = "ov5650R",
+ .owner = THIS_MODULE,
+ },
+ .probe = right_ov5650_probe,
+ .remove = right_ov5650_remove,
+ .id_table = right_ov5650_id,
+};
+
+static int __init ov5650_init(void)
+{
+ int ret;
+ pr_info("ov5650 sensor driver loading\n");
+ ret = i2c_add_driver(&left_ov5650_i2c_driver);
+ if (ret)
+ return ret;
+ return i2c_add_driver(&right_ov5650_i2c_driver);
+}
+
+static void __exit ov5650_exit(void)
+{
+ i2c_del_driver(&right_ov5650_i2c_driver);
+ i2c_del_driver(&left_ov5650_i2c_driver);
+}
+
+module_init(ov5650_init);
+module_exit(ov5650_exit);
+
diff --git a/drivers/media/video/tegra/ov9726.c b/drivers/media/video/tegra/ov9726.c
new file mode 100644
index 000000000000..655d07c736a8
--- /dev/null
+++ b/drivers/media/video/tegra/ov9726.c
@@ -0,0 +1,845 @@
+/*
+ * ov9726.c - ov9726 sensor driver
+ *
+ * Copyright (c) 2011, NVIDIA, All Rights Reserved.
+ *
+ * Contributors:
+ * Charlie Huang <chahuang@nvidia.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <mach/iomap.h>
+#include <linux/atomic.h>
+#include <mach/gpio.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/ov9726.h>
+
+struct ov9726_power_rail {
+ struct regulator *sen_1v8_reg;
+ struct regulator *sen_2v8_reg;
+};
+
+struct ov9726_devinfo {
+ struct miscdevice miscdev_info;
+ struct i2c_client *i2c_client;
+ struct ov9726_platform_data *pdata;
+ struct ov9726_power_rail power_rail;
+ atomic_t in_use;
+ __u32 mode;
+};
+
+static struct ov9726_reg mode_1280x720[] = {
+ /*
+ (1) clock setting
+ clock formula: (Ref_clk / pre_pll_clk_div) * pll_multiplier /
+ vt_sys_clk_div / vt_pix_clk_div / divmip
+ input clk at 24Mhz
+ pre_pll_clk_div 0305[3:0] => 4
+ pll_multiplier 0307[6:0] => 100
+ vt_sys_clk_div 0303[3:0] => 1
+ vt_pix_clk_div 0301[3:0] => 10
+ divmip 3010[2:0] => 1
+
+ Overall timing:
+ line length: 1664 (reg 0x342/0x343)
+ frame length: 840 (reg 0x340/0x341)
+ coarse integration time: 835 lines (reg 0x202/0x203) => change to 836
+
+ visible pixels: (0,40) - (1280, 720+40) with size 1280x720
+ Output pixels (1280x720)
+
+ Frame rate if MCLK=24MHz:
+ 24Mhz/4 *100/1/10/1 = 60 Mhz
+ 60Mhz/1664/840 = 42.9 fps
+ */
+
+ {0x0103, 0x01},
+
+ {OV9726_TABLE_WAIT_MS, 10},
+
+ {0x3026, 0x00},
+ {0x3027, 0x00},
+ {0x3705, 0x45},
+ {0x3603, 0xaa},
+ {0x3632, 0x2f},
+ {0x3620, 0x66},
+ {0x3621, 0xc0},
+ {0x0202, 0x03},
+ {0x0203, 0x13},
+ {0x3833, 0x04},
+ {0x3835, 0x02},
+ {0x4702, 0x04},
+ {0x4704, 0x00},
+ {0x4706, 0x08},
+ {0x5052, 0x01},
+ {0x3819, 0x6c},
+ {0x3817, 0x94},
+ {0x404e, 0x7e},
+ {0x3601, 0x40},
+ {0x3610, 0xa0},
+ {0x0344, 0x00},
+ {0x0345, 0x00},
+ {0x0346, 0x00},
+ {0x0347, 0x28},
+
+ {0x034c, 0x05},
+ {0x034d, 0x00},
+ {0x034e, 0x02},
+ {0x034f, 0xd8},
+ {0x3002, 0x00},
+ {0x3004, 0x00},
+ {0x3005, 0x00},
+ {0x4800, 0x44},
+ {0x4801, 0x0f},
+ {0x4803, 0x05},
+ {0x4601, 0x16},
+ {0x3014, 0x05},
+ {0x0101, 0x01},
+ {0x3707, 0x14},
+ {0x3622, 0x9f},
+ {0x4002, 0x45},
+ {0x5001, 0x00},
+ {0x3406, 0x01},
+ {0x3503, 0x17},
+ {0x0205, 0x3f},
+ {0x0100, 0x01},
+ {0x0112, 0x0a},
+ {0x0113, 0x0a},
+ {0x3013, 0x20},
+ {0x4837, 0x2f},
+ {0x3615, 0xf0},
+ {0x0340, 0x03},
+ {0x0341, 0x48},
+ {0x0342, 0x06},
+ {0x0343, 0x80},
+ {0x3702, 0x1e},
+ {0x3703, 0x3c},
+ {0x3704, 0x0e},
+
+ {0x3104, 0x20},
+ {0x0305, 0x04},
+ {0x0307, 0x46},
+ {0x0303, 0x01},
+ {0x0301, 0x0a},
+ {0x3010, 0x01},
+ {0x460e, 0x00},
+
+ {0x5000, 0x00},
+ {0x5002, 0x00},
+ {0x3017, 0xd2},
+ {0x3018, 0x69},
+ {0x3019, 0x96},
+ {0x5047, 0x61},
+ {0x3604, 0x1c},
+ {0x3602, 0x10},
+ {0x3612, 0x21},
+ {0x3630, 0x0a},
+ {0x3631, 0x53},
+ {0x3633, 0x70},
+ {0x4005, 0x1a},
+ {0x4009, 0x10},
+
+ {OV9726_TABLE_END, 0x0000}
+};
+
+static struct ov9726_reg mode_1280x800[] = {
+ {0x0103, 0x01},
+
+ {OV9726_TABLE_WAIT_MS, 10},
+
+ {0x3026, 0x00},
+ {0x3027, 0x00},
+ {0x3705, 0x45},
+ {0x3603, 0xaa},
+ {0x3632, 0x2f},
+ {0x3620, 0x66},
+ {0x3621, 0xc0},
+ {0x0202, 0x03},
+ {0x0203, 0x13},
+ {0x3833, 0x04},
+ {0x3835, 0x02},
+ {0x4702, 0x04},
+ {0x4704, 0x00},
+ {0x4706, 0x08},
+ {0x5052, 0x01},
+ {0x3819, 0x6c},
+ {0x3817, 0x94},
+ {0x404e, 0x7e},
+ {0x3601, 0x40},
+ {0x3610, 0xa0},
+
+ {0x0344, 0x00},
+ {0x0345, 0x00},
+ {0x0346, 0x00},
+ {0x0347, 0x00},
+ {0x034c, 0x05},
+ {0x034d, 0x10},
+ {0x034e, 0x03},
+ {0x034f, 0x28},
+
+ {0x3002, 0x00},
+ {0x3004, 0x00},
+ {0x3005, 0x00},
+ {0x4800, 0x44},
+ {0x4801, 0x0f},
+ {0x4803, 0x05},
+ {0x4601, 0x16},
+ {0x3014, 0x05},
+ {0x0101, 0x01},
+ {0x3707, 0x14},
+ {0x3622, 0x9f},
+ {0x4002, 0x45},
+ {0x5001, 0x00},
+ {0x3406, 0x01},
+ {0x3503, 0x17},
+ {0x0205, 0x3f},
+ {0x0100, 0x01},
+ {0x0112, 0x0a},
+ {0x0113, 0x0a},
+ {0x3013, 0x20},
+ {0x4837, 0x2f},
+ {0x3615, 0xf0},
+ {0x0340, 0x03},
+ {0x0341, 0x48},
+ {0x0342, 0x06},
+ {0x0343, 0x80},
+ {0x3702, 0x1e},
+ {0x3703, 0x3c},
+ {0x3704, 0x0e},
+
+ {0x3104, 0x20},
+ {0x0305, 0x04},
+ {0x0307, 0x46},
+ {0x0303, 0x01},
+ {0x0301, 0x0a},
+ {0x3010, 0x01},
+ {0x460e, 0x00},
+
+ {0x5000, 0x00},
+ {0x5002, 0x00},
+ {0x3017, 0xd2},
+ {0x3018, 0x69},
+ {0x3019, 0x96},
+ {0x5047, 0x61},
+ {0x3604, 0x1c},
+ {0x3602, 0x10},
+ {0x3612, 0x21},
+ {0x3630, 0x0a},
+ {0x3631, 0x53},
+ {0x3633, 0x70},
+ {0x4005, 0x1a},
+ {0x4009, 0x10},
+
+ {OV9726_TABLE_END, 0x0000}
+};
+
+enum {
+ OV9726_MODE_1280x720,
+ OV9726_MODE_1280x800,
+};
+
+static struct ov9726_reg *mode_table[] = {
+ [OV9726_MODE_1280x720] = mode_1280x720,
+ [OV9726_MODE_1280x800] = mode_1280x800,
+};
+
+static inline void
+msleep_range(unsigned int delay_base)
+{
+ usleep_range(delay_base*1000, delay_base*1000 + 500);
+}
+
+static inline int
+ov9726_power_init(struct ov9726_devinfo *dev)
+{
+ struct i2c_client *i2c_client = dev->i2c_client;
+ int err = 0;
+
+ dev->power_rail.sen_1v8_reg = regulator_get(&i2c_client->dev, "dovdd");
+ if (IS_ERR_OR_NULL(dev->power_rail.sen_1v8_reg)) {
+ dev_err(&i2c_client->dev, "%s: failed to get vdd\n",
+ __func__);
+ err = PTR_ERR(dev->power_rail.sen_1v8_reg);
+ goto ov9726_power_init_end;
+ }
+
+ dev->power_rail.sen_2v8_reg = regulator_get(&i2c_client->dev, "avdd");
+ if (IS_ERR_OR_NULL(dev->power_rail.sen_2v8_reg)) {
+ dev_err(&i2c_client->dev, "%s: failed to get vaa\n",
+ __func__);
+ err = PTR_ERR(dev->power_rail.sen_2v8_reg);
+
+ regulator_put(dev->power_rail.sen_1v8_reg);
+ dev->power_rail.sen_1v8_reg = NULL;
+ }
+
+ov9726_power_init_end:
+ return err;
+}
+
+static inline void
+ov9726_power_release(struct ov9726_devinfo *dev)
+{
+ regulator_put(dev->power_rail.sen_1v8_reg);
+ regulator_put(dev->power_rail.sen_2v8_reg);
+}
+
+static int
+ov9726_power(struct ov9726_devinfo *dev, bool pwr_on)
+{
+ struct i2c_client *i2c_client = dev->i2c_client;
+ int rst_active_state = dev->pdata->rst_low_active ? 0 : 1;
+ int pwdn_active_state = dev->pdata->pwdn_low_active ? 0 : 1;
+ int ret = 0;
+
+ dev_info(&i2c_client->dev, "%s %s\n", __func__, pwr_on ? "on" : "off");
+
+ if (pwr_on) {
+ /* pull low the RST pin of ov9726 first */
+ gpio_set_value(dev->pdata->gpio_rst, rst_active_state);
+ msleep_range(1);
+ /* Plug 1.8V and 2.8V power to sensor */
+ ret = regulator_enable(dev->power_rail.sen_1v8_reg);
+ if (ret) {
+ dev_err(&i2c_client->dev, "%s: failed to enable vdd\n",
+ __func__);
+ goto fail_regulator_1v8_reg;
+ }
+
+ msleep_range(20);
+
+ ret = regulator_enable(dev->power_rail.sen_2v8_reg);
+ if (ret) {
+ dev_err(&i2c_client->dev, "%s: failed to enable vaa\n",
+ __func__);
+ goto fail_regulator_2v8_reg;
+ }
+ msleep_range(1);
+ /* turn on ov9726 */
+ gpio_set_value(dev->pdata->gpio_pwdn, !pwdn_active_state);
+
+ msleep_range(5);
+ /* release RST pin */
+ gpio_set_value(dev->pdata->gpio_rst, !rst_active_state);
+ msleep_range(20);
+
+ /* Board specific power-on sequence */
+ dev->pdata->power_on();
+ } else {
+ /* pull low the RST pin of ov9726 */
+ gpio_set_value(dev->pdata->gpio_rst, rst_active_state);
+ msleep_range(1);
+ /* turn off ov9726 */
+ gpio_set_value(dev->pdata->gpio_pwdn, pwdn_active_state);
+ msleep_range(1);
+
+ /* Unplug 1.8V and 2.8V power from sensor */
+ regulator_disable(dev->power_rail.sen_2v8_reg);
+ regulator_disable(dev->power_rail.sen_1v8_reg);
+
+ /* Board specific power-down sequence */
+ dev->pdata->power_off();
+ }
+
+ return 0;
+
+fail_regulator_2v8_reg:
+ regulator_put(dev->power_rail.sen_2v8_reg);
+ dev->power_rail.sen_2v8_reg = NULL;
+ regulator_disable(dev->power_rail.sen_1v8_reg);
+fail_regulator_1v8_reg:
+ regulator_put(dev->power_rail.sen_1v8_reg);
+ dev->power_rail.sen_1v8_reg = NULL;
+ return ret;
+}
+
+static inline void
+ov9726_get_frame_length_regs(struct ov9726_reg *regs, u32 frame_length)
+{
+ regs->addr = OV9726_REG_FRAME_LENGTH_HI;
+ regs->val = (frame_length >> 8) & 0xff;
+ regs++;
+ regs->addr = OV9726_REG_FRAME_LENGTH_LO;
+ regs->val = frame_length & 0xff;
+}
+
+static inline void
+ov9726_get_coarse_time_regs(struct ov9726_reg *regs, u32 coarse_time)
+{
+ regs->addr = OV9726_REG_COARSE_TIME_HI;
+ regs->val = (coarse_time >> 8) & 0xff;
+ regs++;
+ regs->addr = OV9726_REG_COARSE_TIME_LO;
+ regs->val = coarse_time & 0xff;
+}
+
+static inline void
+ov9726_get_gain_reg(struct ov9726_reg *regs, u16 gain)
+{
+ regs->addr = OV9726_REG_GAIN_HI;
+ regs->val = (gain >> 8) & 0xff;
+ regs++;
+ regs->addr = OV9726_REG_GAIN_LO;
+ regs->val = gain & 0xff;
+}
+
+static int
+ov9726_read_reg8(struct i2c_client *client, u16 addr, u8 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[3];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(addr >> 8);
+ data[1] = (u8)(addr & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = data + 2;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+
+ if (err != 2)
+ err = -EINVAL;
+ else {
+ *val = data[2];
+ err = 0;
+ }
+
+ return err;
+}
+
+static int
+ov9726_write_reg8(struct i2c_client *client, u16 addr, u8 val)
+{
+ int err;
+ struct i2c_msg msg;
+ unsigned char data[3];
+ int retry = 0;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8)(addr >> 8);
+ data[1] = (u8)(addr & 0xff);
+ data[2] = (u8)(val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = data;
+
+ do {
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err == 1)
+ break;
+
+ retry++;
+ dev_err(&client->dev,
+ "ov9726: i2c transfer failed, retrying %x %x\n",
+ addr, val);
+ msleep_range(3);
+ } while (retry <= OV9726_MAX_RETRIES);
+
+ return (err != 1);
+}
+
+static int
+ov9726_write_reg16(struct i2c_client *client, u16 addr, u16 val)
+{
+ int count;
+ struct i2c_msg msg;
+ unsigned char data[4];
+ int retry = 0;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8)(addr >> 8);
+ data[1] = (u8)(addr & 0xff);
+ data[2] = (u8)(val >> 8);
+ data[3] = (u8)(val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 4;
+ msg.buf = data;
+
+ do {
+ count = i2c_transfer(client->adapter, &msg, 1);
+ if (count == 1)
+ return 0;
+
+ retry++;
+ dev_err(&client->dev,
+ "ov9726: i2c transfer failed, retrying %x %x %x\n",
+ addr, val, count);
+ msleep_range(3);
+ } while (retry <= OV9726_MAX_RETRIES);
+
+ return -EIO;
+}
+
+static int
+ov9726_write_table(
+ struct i2c_client *client,
+ struct ov9726_reg table[],
+ struct ov9726_reg override_list[],
+ int num_override_regs)
+{
+ const struct ov9726_reg *next;
+ int err = 0;
+ int i;
+ u16 val;
+
+ dev_info(&client->dev, "ov9726_write_table\n");
+
+ for (next = table; next->addr != OV9726_TABLE_END; next++) {
+
+ if (next->addr == OV9726_TABLE_WAIT_MS) {
+ msleep_range(next->val);
+ continue;
+ }
+
+ val = next->val;
+
+ /* When an override list is passed in, replace the reg */
+ /* value to write if the reg is in the list */
+ if (override_list) {
+ for (i = 0; i < num_override_regs; i++) {
+ if (next->addr == override_list[i].addr) {
+ val = override_list[i].val;
+ break;
+ }
+ }
+ }
+
+ err = ov9726_write_reg8(client, next->addr, val);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int
+ov9726_set_frame_length(struct i2c_client *i2c_client, u32 frame_length)
+{
+ int ret;
+
+ dev_info(&i2c_client->dev, "[%s] (0x%08x)\n", __func__, frame_length);
+ /* hold register value */
+ ret = ov9726_write_reg8(i2c_client, 0x104, 0x01);
+ if (ret)
+ return ret;
+
+ ret = ov9726_write_reg16(i2c_client,
+ OV9726_REG_FRAME_LENGTH_HI,
+ frame_length);
+
+ /* release hold, update register value */
+ ret |= ov9726_write_reg8(i2c_client, 0x104, 0x00);
+
+ return ret;
+}
+
+static int
+ov9726_set_coarse_time(struct i2c_client *i2c_client, u32 coarse_time)
+{
+ int ret;
+
+ dev_info(&i2c_client->dev, "[%s] (0x%08x)\n", __func__, coarse_time);
+ /* hold register value */
+ ret = ov9726_write_reg8(i2c_client, 0x104, 0x01);
+ if (ret)
+ return ret;
+
+ ret = ov9726_write_reg16(i2c_client,
+ OV9726_REG_COARSE_TIME_HI,
+ coarse_time);
+
+ /* release hold, update register value */
+ ret |= ov9726_write_reg8(i2c_client, 0x104, 0x00);
+
+ return ret;
+}
+
+static int ov9726_set_gain(struct i2c_client *i2c_client, u16 gain)
+{
+ int ret;
+
+ /* hold register value */
+ ret = ov9726_write_reg8(i2c_client, 0x104, 0x01);
+ if (ret)
+ return ret;
+
+ ret = ov9726_write_reg16(i2c_client, OV9726_REG_GAIN_HI, gain);
+
+ /* release hold, update register value */
+ ret |= ov9726_write_reg8(i2c_client, 0x104, 0x00);
+
+ return ret;
+}
+
+static int ov9726_get_status(struct i2c_client *i2c_client, u8 *status)
+{
+ int err;
+
+ err = ov9726_read_reg8(i2c_client, 0x003, status);
+ *status = 0;
+ return err;
+}
+
+static int
+ov9726_set_mode(
+ struct ov9726_devinfo *dev,
+ struct ov9726_mode *mode)
+{
+ struct i2c_client *i2c_client = dev->i2c_client;
+ struct ov9726_reg reg_override[6];
+ int err = 0;
+ int sensor_mode;
+
+ dev_info(&i2c_client->dev, "%s.\n", __func__);
+
+ if (mode->xres == 1280 && mode->yres == 800)
+ sensor_mode = OV9726_MODE_1280x800;
+ else if (mode->xres == 1280 && mode->yres == 720)
+ sensor_mode = OV9726_MODE_1280x720;
+ else {
+ dev_err(&i2c_client->dev,
+ "%s: invalid resolution supplied to set mode %d %d\n",
+ __func__, mode->xres, mode->yres);
+ return -EINVAL;
+ }
+
+ ov9726_get_frame_length_regs(reg_override, mode->frame_length);
+ ov9726_get_coarse_time_regs(reg_override + 2, mode->coarse_time);
+ ov9726_get_gain_reg(reg_override + 4, mode->gain);
+
+ if (dev->mode != mode->mode_id) {
+ dev_info(&i2c_client->dev,
+ "%s: xres %u yres %u framelen %u coarse %u gain %u\n",
+ __func__, mode->xres, mode->yres, mode->frame_length,
+ mode->coarse_time, mode->gain);
+
+ err = ov9726_write_table(i2c_client,
+ mode_table[sensor_mode], reg_override,
+ sizeof(reg_override) / sizeof(reg_override[0]));
+ if (err)
+ goto ov9726_set_mode_exit;
+
+ dev->mode = mode->mode_id;
+ }
+
+ov9726_set_mode_exit:
+ return err;
+}
+
+static long
+ov9726_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ov9726_devinfo *dev = file->private_data;
+ struct i2c_client *i2c_client = dev->i2c_client;
+ int err = 0;
+
+ switch (cmd) {
+ case OV9726_IOCTL_SET_MODE:
+ {
+ struct ov9726_mode mode;
+
+ if (copy_from_user(&mode,
+ (const void __user *)arg,
+ sizeof(struct ov9726_mode))) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ov9726_set_mode(dev, &mode);
+
+ break;
+ }
+
+ case OV9726_IOCTL_SET_FRAME_LENGTH:
+ err = ov9726_set_frame_length(i2c_client, (u32)arg);
+ break;
+
+ case OV9726_IOCTL_SET_COARSE_TIME:
+ err = ov9726_set_coarse_time(i2c_client, (u32)arg);
+ break;
+
+ case OV9726_IOCTL_SET_GAIN:
+ err = ov9726_set_gain(i2c_client, (u16)arg);
+ break;
+
+ case OV9726_IOCTL_GET_STATUS:
+ {
+ u8 status;
+
+ err = ov9726_get_status(i2c_client, &status);
+ if (!err) {
+ if (copy_to_user((void __user *)arg,
+ &status, sizeof(status)))
+ err = -EFAULT;
+ }
+ break;
+ }
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int ov9726_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct ov9726_devinfo *dev;
+
+ dev = container_of(miscdev, struct ov9726_devinfo, miscdev_info);
+ /* check if device is in use */
+ if (atomic_xchg(&dev->in_use, 1))
+ return -EBUSY;
+ dev->mode = (__u32)-1;
+ file->private_data = dev;
+
+ ov9726_power(dev, true);
+
+ return 0;
+}
+
+int ov9726_release(struct inode *inode, struct file *file)
+{
+ struct ov9726_devinfo *dev;
+
+ dev = file->private_data;
+ file->private_data = NULL;
+
+ ov9726_power(dev, false);
+
+ /* warn if device already released */
+ WARN_ON(!atomic_xchg(&dev->in_use, 0));
+ return 0;
+}
+
+static const struct file_operations ov9726_fileops = {
+ .owner = THIS_MODULE,
+ .open = ov9726_open,
+ .unlocked_ioctl = ov9726_ioctl,
+ .release = ov9726_release,
+};
+
+static struct miscdevice ov9726_device = {
+ .name = "ov9726",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &ov9726_fileops,
+};
+
+static int
+ov9726_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct ov9726_devinfo *dev;
+ int err = 0;
+
+ dev_info(&client->dev, "ov9726: probing sensor.\n");
+
+ dev = kzalloc(sizeof(struct ov9726_devinfo), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "ov9726: Unable to allocate memory!\n");
+ err = -ENOMEM;
+ goto probe_end;
+ }
+
+ memcpy(&(dev->miscdev_info),
+ &ov9726_device,
+ sizeof(struct miscdevice));
+
+ err = misc_register(&(dev->miscdev_info));
+ if (err) {
+ dev_err(&client->dev, "ov9726: Unable to register misc device!\n");
+ goto probe_end;
+ }
+
+ dev->pdata = client->dev.platform_data;
+ dev->i2c_client = client;
+ atomic_set(&dev->in_use, 0);
+ i2c_set_clientdata(client, dev);
+
+ err = ov9726_power_init(dev);
+
+probe_end:
+ if (err) {
+ kfree(dev);
+ dev_err(&client->dev, "failed.\n");
+ }
+
+ return err;
+}
+
+static int ov9726_remove(struct i2c_client *client)
+{
+ struct ov9726_devinfo *dev;
+
+ dev = i2c_get_clientdata(client);
+ i2c_set_clientdata(client, NULL);
+ misc_deregister(&ov9726_device);
+ ov9726_power_release(dev);
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov9726_id[] = {
+ {"ov9726", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, ov9726_id);
+
+static struct i2c_driver ov9726_i2c_driver = {
+ .driver = {
+ .name = "ov9726",
+ .owner = THIS_MODULE,
+ },
+ .probe = ov9726_probe,
+ .remove = ov9726_remove,
+ .id_table = ov9726_id,
+};
+
+static int __init ov9726_init(void)
+{
+ pr_info("ov9726 sensor driver loading\n");
+ return i2c_add_driver(&ov9726_i2c_driver);
+}
+
+static void __exit ov9726_exit(void)
+{
+ i2c_del_driver(&ov9726_i2c_driver);
+}
+
+module_init(ov9726_init);
+module_exit(ov9726_exit);
diff --git a/drivers/media/video/tegra/sh532u.c b/drivers/media/video/tegra/sh532u.c
new file mode 100644
index 000000000000..6723fae70ae4
--- /dev/null
+++ b/drivers/media/video/tegra/sh532u.c
@@ -0,0 +1,1688 @@
+/*
+ * SH532U focuser driver.
+ *
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+/* Implementation
+ * --------------
+ * The board level details about the device need to be provided in the board
+ * file with the sh532u_platform_data structure.
+ * Standard among NVC kernel drivers in this structure is:
+ * .cfg = Use the NVC_CFG_ defines that are in nvc.h.
+ * Descriptions of the configuration options are with the defines.
+ * This value is typically 0.
+ * .num = The number of the instance of the device. This should start at 1 and
+ * and increment for each device on the board. This number will be
+ * appended to the MISC driver name, Example: /dev/focuser.1
+ * If not used or 0, then nothing is appended to the name.
+ * .sync = If there is a need to synchronize two devices, then this value is
+ * the number of the device instance (.num above) this device is to
+ * sync to. For example:
+ * Device 1 platform entries =
+ * .num = 1,
+ * .sync = 2,
+ * Device 2 platfrom entries =
+ * .num = 2,
+ * .sync = 1,
+ * The above example sync's device 1 and 2.
+ * This is typically used for stereo applications.
+ * .dev_name = The MISC driver name the device registers as. If not used,
+ * then the part number of the device is used for the driver name.
+ * If using the NVC user driver then use the name found in this
+ * driver under _default_pdata.
+ *
+ * The following is specific to NVC kernel focus drivers:
+ * .nvc = Pointer to the nvc_focus_nvc structure. This structure needs to
+ * be defined and populated if overriding the driver defaults.
+ * .cap = Pointer to the nvc_focus_cap structure. This structure needs to
+ * be defined and populated if overriding the driver defaults.
+ *
+ * The following is specific to only this NVC kernel focus driver:
+ * .info = Pointer to the sh532u_pdata_info structure. This structure does
+ * not need to be defined and populated unless overriding ROM data.
+.* .i2c_addr_rom = The I2C address of the onboard ROM.
+ * .gpio_reset = The GPIO connected to the devices reset. If not used then
+ * leave blank.
+ * .gpio_en = Due to a Linux limitation, a GPIO is defined to "enable" the
+ * device. This workaround is for when the device's power GPIO's
+ * are behind an I2C expander. The Linux limitation doesn't allow
+ * the I2C GPIO expander to be ready for use when this device is
+ * probed. When this problem is solved, this driver needs to
+ * remove the gpio_en WAR.
+ *
+ * Power Requirements
+ * The board power file must contain the following labels for the power
+ * regulator(s) of this device:
+ * "vdd" = the power regulator for the device's power.
+ * "vdd_i2c" = the power regulator for the I2C power.
+ *
+ * The above values should be all that is needed to use the device with this
+ * driver. Modifications of this driver should not be needed.
+ */
+
+
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/gpio.h>
+#include <media/nvc.h>
+#include <media/sh532u.h>
+
+#define SH532U_ID 0xF0
+/* defaults if no ROM data */
+#define SH532U_HYPERFOCAL_RATIO 1836 /* 41.2f/224.4f Ratio source: SEMCO */
+/* _HYPERFOCAL_RATIO is multiplied and _HYPERFOCAL_DIV divides for float */
+#define SH532U_HYPERFOCAL_DIV 10000
+#define SH532U_FOCAL_LENGTH 0x408D70A4
+#define SH532U_FNUMBER 0x40333333
+#define SH532U_MAX_APERATURE 0x3FCA0EA1
+/* SH532U_CAPS_VER = 0: invalid value */
+/* SH532U_CAPS_VER = 1: added NVC_PARAM_STS */
+/* SH532U_CAPS_VER = 2: expanded nvc_focus_cap */
+#define SH532U_CAPS_VER 2
+#define SH532U_ACTUATOR_RANGE 1000
+#define SH532U_SETTLETIME 30
+#define SH532U_FOCUS_MACRO 950
+#define SH532U_FOCUS_HYPER 250
+#define SH532U_FOCUS_INFINITY 50
+#define SH532U_TIMEOUT_MS 200
+#define SH532U_POS_LOW_DEFAULT 0xA000
+#define SH532U_POS_HIGH_DEFAULT 0x6000
+
+
+struct sh532u_info {
+ atomic_t in_use;
+ struct i2c_client *i2c_client;
+ struct sh532u_platform_data *pdata;
+ struct miscdevice miscdev;
+ struct list_head list;
+ int pwr_api;
+ int pwr_dev;
+ struct nvc_regulator vreg_vdd;
+ struct nvc_regulator vreg_i2c;
+ u8 s_mode;
+ struct sh532u_info *s_info;
+ unsigned i2c_addr_rom;
+ struct nvc_focus_nvc nvc;
+ struct nvc_focus_cap cap;
+ enum nvc_focus_sts sts;
+ struct sh532u_pdata_info cfg;
+ bool gpio_flag_reset;
+ bool init_cal_flag;
+ s16 abs_base;
+ u32 abs_range;
+ u32 pos_rel;
+ s16 pos_abs;
+ long pos_time_wr;
+};
+
+static struct sh532u_pdata_info sh532u_default_info = {
+ .move_timeoutms = SH532U_TIMEOUT_MS,
+ .focus_hyper_ratio = SH532U_HYPERFOCAL_RATIO,
+ .focus_hyper_div = SH532U_HYPERFOCAL_DIV,
+};
+
+static struct nvc_focus_cap sh532u_default_cap = {
+ .version = SH532U_CAPS_VER,
+ .actuator_range = SH532U_ACTUATOR_RANGE,
+ .settle_time = SH532U_SETTLETIME,
+ .focus_macro = SH532U_FOCUS_MACRO,
+ .focus_hyper = SH532U_FOCUS_HYPER,
+ .focus_infinity = SH532U_FOCUS_INFINITY,
+};
+
+static struct nvc_focus_nvc sh532u_default_nvc = {
+ .focal_length = SH532U_FOCAL_LENGTH,
+ .fnumber = SH532U_FNUMBER,
+ .max_aperature = SH532U_MAX_APERATURE,
+};
+
+static struct sh532u_platform_data sh532u_default_pdata = {
+ .cfg = 0,
+ .num = 0,
+ .sync = 0,
+ .dev_name = "focuser",
+ .i2c_addr_rom = 0x50,
+};
+
+static u32 sh532u_a2buf[] = {
+ 0x0018019c,
+ 0x0018019d,
+ 0x0000019e,
+ 0x007f0192,
+ 0x00000194,
+ 0x00f00184,
+ 0x00850187,
+ 0x0000018a,
+ 0x00fd7187,
+ 0x007f7183,
+ 0x0008025a,
+ 0x05042218,
+ 0x80010216,
+ 0x000601a0,
+ 0x00808183,
+ 0xffffffff
+};
+
+static LIST_HEAD(sh532u_info_list);
+static DEFINE_SPINLOCK(sh532u_spinlock);
+
+
+static int sh532u_i2c_rd8(struct sh532u_info *info, u8 addr, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+
+ buf[0] = reg;
+ if (addr) {
+ msg[0].addr = addr;
+ msg[1].addr = addr;
+ } else {
+ msg[0].addr = info->i2c_client->addr;
+ msg[1].addr = info->i2c_client->addr;
+ }
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ *val = 0;
+ if (i2c_transfer(info->i2c_client->adapter, msg, 2) != 2)
+ return -EIO;
+
+ *val = buf[1];
+ return 0;
+}
+
+static int sh532u_i2c_wr8(struct sh532u_info *info, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+ msg.addr = info->i2c_client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[0];
+ if (i2c_transfer(info->i2c_client->adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int sh532u_i2c_rd16(struct sh532u_info *info, u8 reg, u16 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[3];
+
+ buf[0] = reg;
+ msg[0].addr = info->i2c_client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+ msg[1].addr = info->i2c_client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = &buf[1];
+ if (i2c_transfer(info->i2c_client->adapter, msg, 2) != 2)
+ return -EIO;
+
+ *val = (((u16)buf[1] << 8) | (u16)buf[2]);
+ return 0;
+}
+
+
+static int sh532u_i2c_wr16(struct sh532u_info *info, u8 reg, u16 val)
+{
+ struct i2c_msg msg;
+ u8 buf[3];
+
+ buf[0] = reg;
+ buf[1] = (u8)(val >> 8);
+ buf[2] = (u8)(val & 0xff);
+ msg.addr = info->i2c_client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = &buf[0];
+ if (i2c_transfer(info->i2c_client->adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int sh532u_i2c_rd32(struct sh532u_info *info, u8 addr, u8 reg, u32 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[5];
+
+ buf[0] = reg;
+ if (addr) {
+ msg[0].addr = addr;
+ msg[1].addr = addr;
+ } else {
+ msg[0].addr = info->i2c_client->addr;
+ msg[1].addr = info->i2c_client->addr;
+ }
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 4;
+ msg[1].buf = &buf[1];
+ if (i2c_transfer(info->i2c_client->adapter, msg, 2) != 2)
+ return -EIO;
+
+ *val = (((u32)buf[4] << 24) | ((u32)buf[3] << 16) |
+ ((u32)buf[2] << 8) | ((u32)buf[1]));
+ return 0;
+}
+
+static void sh532u_gpio_en(struct sh532u_info *info, int val)
+{
+ if (info->pdata->gpio_en)
+ gpio_set_value_cansleep(info->pdata->gpio_en, val);
+}
+
+static void sh532u_gpio_reset(struct sh532u_info *info, int val)
+{
+ if (val) {
+ if (!info->gpio_flag_reset && info->pdata->gpio_reset) {
+ gpio_set_value_cansleep(info->pdata->gpio_reset, 0);
+ mdelay(1);
+ gpio_set_value_cansleep(info->pdata->gpio_reset, 1);
+ mdelay(10); /* delay for device startup */
+ info->gpio_flag_reset = 1;
+ }
+ } else {
+ info->gpio_flag_reset = 0;
+ }
+}
+
+static void sh532u_pm_regulator_put(struct nvc_regulator *sreg)
+{
+ regulator_put(sreg->vreg);
+ sreg->vreg = NULL;
+}
+
+static int sh532u_pm_regulator_get(struct sh532u_info *info,
+ struct nvc_regulator *sreg,
+ char vreg_name[])
+{
+ int err = 0;
+
+ sreg->vreg_flag = 0;
+ sreg->vreg = regulator_get(&info->i2c_client->dev, vreg_name);
+ if (IS_ERR_OR_NULL(sreg->vreg)) {
+ dev_err(&info->i2c_client->dev,
+ "%s err for regulator: %s err: %d\n",
+ __func__, vreg_name, (int)sreg->vreg);
+ err = PTR_ERR(sreg->vreg);
+ sreg->vreg = NULL;
+ } else {
+ sreg->vreg_name = vreg_name;
+ dev_dbg(&info->i2c_client->dev,
+ "%s vreg_name: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ return err;
+}
+
+static int sh532u_pm_regulator_en(struct sh532u_info *info,
+ struct nvc_regulator *sreg)
+{
+ int err = 0;
+
+ if (!sreg->vreg_flag && (sreg->vreg != NULL)) {
+ err = regulator_enable(sreg->vreg);
+ if (!err) {
+ dev_dbg(&info->i2c_client->dev,
+ "%s vreg_name: %s\n",
+ __func__, sreg->vreg_name);
+ sreg->vreg_flag = 1;
+ err = 1; /* flag regulator state change */
+ } else {
+ dev_err(&info->i2c_client->dev,
+ "%s err, regulator: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ }
+ return err;
+}
+
+static int sh532u_pm_regulator_dis(struct sh532u_info *info,
+ struct nvc_regulator *sreg)
+{
+ int err = 0;
+
+ if (sreg->vreg_flag && (sreg->vreg != NULL)) {
+ err = regulator_disable(sreg->vreg);
+ if (err)
+ dev_err(&info->i2c_client->dev,
+ "%s err, regulator: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ sreg->vreg_flag = 0;
+ return err;
+}
+
+static int sh532u_pm_wr(struct sh532u_info *info, int pwr)
+{
+ int err = 0;
+
+ if ((info->pdata->cfg & (NVC_CFG_OFF2STDBY | NVC_CFG_BOOT_INIT)) &&
+ (pwr == NVC_PWR_OFF ||
+ pwr == NVC_PWR_STDBY_OFF))
+ pwr = NVC_PWR_STDBY;
+
+ if (pwr == info->pwr_dev)
+ return 0;
+
+ switch (pwr) {
+ case NVC_PWR_OFF_FORCE:
+ case NVC_PWR_OFF:
+ sh532u_gpio_en(info, 0);
+ err = sh532u_pm_regulator_dis(info, &info->vreg_vdd);
+ err |= sh532u_pm_regulator_dis(info, &info->vreg_i2c);
+ sh532u_gpio_reset(info, 0);
+ break;
+
+ case NVC_PWR_STDBY_OFF:
+ case NVC_PWR_STDBY:
+ err = sh532u_pm_regulator_en(info, &info->vreg_vdd);
+ err |= sh532u_pm_regulator_en(info, &info->vreg_i2c);
+ sh532u_gpio_en(info, 1);
+ sh532u_gpio_reset(info, 1);
+ err |= sh532u_i2c_wr8(info, STBY_211, 0x80);
+ err |= sh532u_i2c_wr8(info, CLKSEL_211, 0x38);
+ err |= sh532u_i2c_wr8(info, CLKSEL_211, 0x39);
+ break;
+
+ case NVC_PWR_COMM:
+ case NVC_PWR_ON:
+ err = sh532u_pm_regulator_en(info, &info->vreg_vdd);
+ err |= sh532u_pm_regulator_en(info, &info->vreg_i2c);
+ sh532u_gpio_en(info, 1);
+ sh532u_gpio_reset(info, 1);
+ err |= sh532u_i2c_wr8(info, CLKSEL_211, 0x38);
+ err |= sh532u_i2c_wr8(info, CLKSEL_211, 0x34);
+ err |= sh532u_i2c_wr8(info, STBY_211, 0xF0);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err < 0) {
+ dev_err(&info->i2c_client->dev, "%s pwr err: %d\n",
+ __func__, pwr);
+ pwr = NVC_PWR_ERR;
+ }
+ info->pwr_dev = pwr;
+ if (err > 0)
+ return 0;
+
+ return err;
+}
+
+static int sh532u_pm_wr_s(struct sh532u_info *info, int pwr)
+{
+ int err1 = 0;
+ int err2 = 0;
+
+ if ((info->s_mode == NVC_SYNC_OFF) ||
+ (info->s_mode == NVC_SYNC_MASTER) ||
+ (info->s_mode == NVC_SYNC_STEREO))
+ err1 = sh532u_pm_wr(info, pwr);
+ if ((info->s_mode == NVC_SYNC_SLAVE) ||
+ (info->s_mode == NVC_SYNC_STEREO))
+ err2 = sh532u_pm_wr(info->s_info, pwr);
+ return err1 | err2;
+}
+
+static int sh532u_pm_api_wr(struct sh532u_info *info, int pwr)
+{
+ int err = 0;
+
+ if (!pwr || (pwr > NVC_PWR_ON))
+ return 0;
+
+ if (pwr > info->pwr_dev)
+ err = sh532u_pm_wr_s(info, pwr);
+ if (!err)
+ info->pwr_api = pwr;
+ else
+ info->pwr_api = NVC_PWR_ERR;
+ if (info->pdata->cfg & NVC_CFG_NOERR)
+ return 0;
+
+ return err;
+}
+
+static int sh532u_pm_dev_wr(struct sh532u_info *info, int pwr)
+{
+ if (pwr < info->pwr_api)
+ pwr = info->pwr_api;
+ if (info->sts == NVC_FOCUS_STS_WAIT_FOR_MOVE_END)
+ pwr = NVC_PWR_ON;
+ return sh532u_pm_wr(info, pwr);
+}
+
+static void sh532u_pm_exit(struct sh532u_info *info)
+{
+ sh532u_pm_wr(info, NVC_PWR_OFF_FORCE);
+ sh532u_pm_regulator_put(&info->vreg_vdd);
+ sh532u_pm_regulator_put(&info->vreg_i2c);
+ if (info->s_info != NULL) {
+ sh532u_pm_wr(info->s_info, NVC_PWR_OFF_FORCE);
+ sh532u_pm_regulator_put(&info->s_info->vreg_vdd);
+ sh532u_pm_regulator_put(&info->s_info->vreg_i2c);
+ }
+}
+
+static void sh532u_pm_init(struct sh532u_info *info)
+{
+ sh532u_pm_regulator_get(info, &info->vreg_vdd, "vdd");
+ sh532u_pm_regulator_get(info, &info->vreg_i2c, "vdd_i2c");
+}
+
+static int sh532u_dev_id(struct sh532u_info *info)
+{
+ u8 val;
+ int err;
+
+ err = sh532u_i2c_rd8(info, 0, HVCA_DEVICE_ID, &val);
+ if (!err && (val == SH532U_ID))
+ return 0;
+
+ return -ENODEV;
+}
+
+static void sh532u_sts_rd(struct sh532u_info *info)
+{
+ u8 us_tmp;
+ u16 us_smv_fin;
+ int err;
+
+ if (info->sts == NVC_FOCUS_STS_INITIALIZING)
+ return;
+
+ info->sts = NVC_FOCUS_STS_NO_DEVICE; /* assume I2C err */
+ err = sh532u_i2c_rd8(info, 0, STMVEN_211, &us_tmp);
+ err |= sh532u_i2c_rd16(info, RZ_211H, &us_smv_fin);
+ if (err)
+ return;
+
+ /* StepMove Error Handling, Unexpected Position */
+ if ((us_smv_fin == 0x7FFF) || (us_smv_fin == 0x8001))
+ /* Stop StepMove Operation */
+ sh532u_i2c_wr8(info, STMVEN_211, us_tmp & 0xFE);
+ if (us_tmp & STMVEN_ON) {
+ err = sh532u_i2c_rd8(info, 0, MSSET_211, &us_tmp);
+ if (!err) {
+ if (us_tmp & CHTGST_ON)
+ info->sts = NVC_FOCUS_STS_WAIT_FOR_SETTLE;
+ else
+ info->sts = NVC_FOCUS_STS_LENS_SETTLED;
+ }
+ } else {
+ info->sts = NVC_FOCUS_STS_WAIT_FOR_MOVE_END;
+ }
+}
+
+static s16 sh532u_rel2abs(struct sh532u_info *info, u32 rel_position)
+{
+ s16 abs_pos;
+
+ if (rel_position > info->cap.actuator_range)
+ rel_position = info->cap.actuator_range;
+ rel_position = info->cap.actuator_range - rel_position;
+ if (rel_position) {
+ rel_position *= info->abs_range;
+ rel_position /= info->cap.actuator_range;
+ }
+ abs_pos = (s16)(info->abs_base + rel_position);
+ if (abs_pos < info->cfg.limit_low)
+ abs_pos = info->cfg.limit_low;
+ if (abs_pos > info->cfg.limit_high)
+ abs_pos = info->cfg.limit_high;
+ return abs_pos;
+}
+
+static u32 sh532u_abs2rel(struct sh532u_info *info, s16 abs_position)
+{
+ u32 rel_pos;
+
+ if (abs_position > info->cfg.limit_high)
+ abs_position = info->cfg.limit_high;
+ if (abs_position < info->abs_base)
+ abs_position = info->abs_base;
+ rel_pos = (u32)(abs_position - info->abs_base);
+ rel_pos *= info->cap.actuator_range;
+ rel_pos /= info->abs_range;
+ if (rel_pos > info->cap.actuator_range)
+ rel_pos = info->cap.actuator_range;
+ rel_pos = info->cap.actuator_range - rel_pos;
+ return rel_pos;
+}
+
+static int sh532u_abs_pos_rd(struct sh532u_info *info, s16 *position)
+{
+ int err;
+ u16 abs_pos = 0;
+
+ err = sh532u_i2c_rd16(info, RZ_211H, &abs_pos);
+ *position = (s16)abs_pos;
+ return err;
+}
+
+static int sh532u_rel_pos_rd(struct sh532u_info *info, u32 *position)
+{
+ s16 abs_pos;
+ long msec;
+ int pos;
+ int err;
+
+ err = sh532u_abs_pos_rd(info, &abs_pos);
+ if (err)
+ return -EINVAL;
+
+ if ((abs_pos >= (info->pos_abs - STMV_SIZE)) &&
+ (abs_pos <= (info->pos_abs + STMV_SIZE))) {
+ pos = (int)info->pos_rel;
+ } else {
+ msec = jiffies;
+ msec -= info->pos_time_wr;
+ msec = msec * 1000 / HZ;
+ sh532u_sts_rd(info);
+ if ((info->sts == NVC_FOCUS_STS_LENS_SETTLED) ||
+ (msec > info->cfg.move_timeoutms)) {
+ pos = (int)info->pos_rel;
+ } else {
+ pos = (int)sh532u_abs2rel(info, abs_pos);
+ if ((pos == (info->pos_rel - 1)) ||
+ (pos == (info->pos_rel + 1)))
+ pos = (int)info->pos_rel;
+ }
+ }
+ if (pos < 0)
+ pos = 0;
+ *position = (u32)pos;
+ return 0;
+}
+
+static int sh532u_calibration(struct sh532u_info *info, bool use_defaults)
+{
+ u8 reg;
+ s16 abs_top;
+ u32 rel_range;
+ u32 rel_lo;
+ u32 rel_hi;
+ u32 step;
+ u32 loop_limit;
+ u32 i;
+ int err;
+ int ret = 0;
+
+ if (info->init_cal_flag)
+ return 0;
+
+ /* set defaults */
+ memcpy(&info->cfg, &sh532u_default_info, sizeof(info->cfg));
+ memcpy(&info->nvc, &sh532u_default_nvc, sizeof(info->nvc));
+ memcpy(&info->cap, &sh532u_default_cap, sizeof(info->cap));
+ if (info->pdata->i2c_addr_rom)
+ info->i2c_addr_rom = info->pdata->i2c_addr_rom;
+ else
+ info->i2c_addr_rom = sh532u_default_pdata.i2c_addr_rom;
+ /* set overrides if any */
+ if (info->pdata->nvc) {
+ if (info->pdata->nvc->fnumber)
+ info->nvc.fnumber = info->pdata->nvc->fnumber;
+ if (info->pdata->nvc->focal_length)
+ info->nvc.focal_length =
+ info->pdata->nvc->focal_length;
+ if (info->pdata->nvc->max_aperature)
+ info->nvc.max_aperature =
+ info->pdata->nvc->max_aperature;
+ }
+ if (info->pdata->cap) {
+ if (info->pdata->cap->actuator_range)
+ info->cap.actuator_range =
+ info->pdata->cap->actuator_range;
+ if (info->pdata->cap->settle_time)
+ info->cap.settle_time = info->pdata->cap->settle_time;
+ if (info->pdata->cap->focus_macro)
+ info->cap.focus_macro = info->pdata->cap->focus_macro;
+ if (info->pdata->cap->focus_hyper)
+ info->cap.focus_hyper = info->pdata->cap->focus_hyper;
+ if (info->pdata->cap->focus_infinity)
+ info->cap.focus_infinity =
+ info->pdata->cap->focus_infinity;
+ }
+ /*
+ * Get Inf1, Mac1
+ * Inf1 and Mac1 are the mechanical limit position.
+ * Inf1: top limit.
+ * Mac1: bottom limit.
+ */
+ err = sh532u_i2c_rd8(info, info->i2c_addr_rom, addrMac1, &reg);
+ if (!err && (reg != 0) && (reg != 0xFF))
+ info->cfg.limit_low = (reg<<8) & 0xff00;
+ ret = err;
+ err = sh532u_i2c_rd8(info, info->i2c_addr_rom, addrInf1, &reg);
+ if (!err && (reg != 0) && (reg != 0xFF))
+ info->cfg.limit_high = (reg<<8) & 0xff00;
+ ret |= err;
+ /*
+ * Get Inf2, Mac2
+ * Inf2 and Mac2 are the calibration data for SEMCO AF lens.
+ * Inf2: Best focus (lens position) when object distance is 1.2M.
+ * Mac2: Best focus (lens position) when object distance is 10cm.
+ */
+ err = sh532u_i2c_rd8(info, info->i2c_addr_rom, addrMac2, &reg);
+ if (!err && (reg != 0) && (reg != 0xFF))
+ info->cfg.pos_low = (reg << 8) & 0xff00;
+ ret |= err;
+ err = sh532u_i2c_rd8(info, info->i2c_addr_rom, addrInf2, &reg);
+ if (!err && (reg != 0) && (reg != 0xFF))
+ info->cfg.pos_high = (reg << 8) & 0xff00;
+ ret |= err;
+ /* set overrides */
+ if (info->pdata->info) {
+ if (info->pdata->info->pos_low)
+ info->cfg.pos_low = info->pdata->info->pos_low;
+ if (info->pdata->info->pos_high)
+ info->cfg.pos_high = info->pdata->info->pos_high;
+ if (info->pdata->info->limit_low)
+ info->cfg.limit_low = info->pdata->info->limit_low;
+ if (info->pdata->info->limit_high)
+ info->cfg.limit_high = info->pdata->info->limit_high;
+ if (info->pdata->info->move_timeoutms)
+ info->cfg.move_timeoutms =
+ info->pdata->info->move_timeoutms;
+ if (info->pdata->info->focus_hyper_ratio)
+ info->cfg.focus_hyper_ratio =
+ info->pdata->info->focus_hyper_ratio;
+ if (info->pdata->info->focus_hyper_div)
+ info->cfg.focus_hyper_div =
+ info->pdata->info->focus_hyper_div;
+ }
+ /*
+ * There is known to be many sh532u devices with no EPROM data.
+ * Using default data is known to reduce the sh532u performance since
+ * the defaults may no where be close to the correct values that
+ * should be used. However, we don't want to prevent the camera from
+ * starting due to the lack of the EPROM data.
+ * The following truth table shows the action to take at this point:
+ * DFLT = the use_defaults flag (used after multiple attempts)
+ * I2C = the I2C transactions to get the data.
+ * DATA = the needed data either from the EPROM or board file.
+ * DFLT I2C DATA Action
+ * --------------------------
+ * 0 FAIL FAIL Exit with -EIO
+ * 0 FAIL PASS Continue to calculations
+ * 0 PASS FAIL Use defaults
+ * 0 PASS PASS Continue to calculations
+ * 1 FAIL FAIL Use defaults
+ * 1 FAIL PASS Continue to calculations
+ * 1 PASS FAIL Use defaults
+ * 1 PASS PASS Continue to calculations
+ */
+ /* err = DATA where FAIL = 1 */
+ if (!info->cfg.pos_low || !info->cfg.pos_high ||
+ !info->cfg.limit_low || !info->cfg.limit_high)
+ err = 1;
+ else
+ err = 0;
+ /* Exit with -EIO */
+ if (!use_defaults && ret && err) {
+ dev_err(&info->i2c_client->dev, "%s ERR\n", __func__);
+ return -EIO;
+ }
+
+ /* Use defaults */
+ if (err) {
+ info->cfg.pos_low = SH532U_POS_LOW_DEFAULT;
+ info->cfg.pos_high = SH532U_POS_HIGH_DEFAULT;
+ info->cfg.limit_low = SH532U_POS_LOW_DEFAULT;
+ info->cfg.limit_high = SH532U_POS_HIGH_DEFAULT;
+ dev_err(&info->i2c_client->dev, "%s ERR: ERPOM data is void! "
+ "Focuser will use defaults that will cause "
+ "reduced functionality!\n", __func__);
+ }
+ if (info->cfg.pos_low < info->cfg.limit_low)
+ info->cfg.pos_low = info->cfg.limit_low;
+ if (info->cfg.pos_high > info->cfg.limit_high)
+ info->cfg.pos_high = info->cfg.limit_high;
+ dev_dbg(&info->i2c_client->dev, "%s pos_low=%d\n", __func__,
+ (int)info->cfg.pos_low);
+ dev_dbg(&info->i2c_client->dev, "%s pos_high=%d\n", __func__,
+ (int)info->cfg.pos_high);
+ dev_dbg(&info->i2c_client->dev, "%s limit_low=%d\n", __func__,
+ (int)info->cfg.limit_low);
+ dev_dbg(&info->i2c_client->dev, "%s limit_high=%d\n", __func__,
+ (int)info->cfg.limit_high);
+ /*
+ * calculate relative and absolute positions
+ * Note that relative values, what upper SW uses, are the
+ * abstraction of HW (absolute) values.
+ * |<--limit_low limit_high-->|
+ * | |<-------------------_ACTUATOR_RANGE------------------->| |
+ * -focus_inf -focus_mac
+ * |<---RI--->| |<---RM--->|
+ * -abs_base -pos_low -pos_high -abs_top
+ *
+ * The pos_low and pos_high are fixed absolute positions and correspond
+ * to the relative focus_infinity and focus_macro, respectively. We'd
+ * like to have "wiggle" room (RI and RM) around these relative
+ * positions so the loop below finds the best fit for RI and RM without
+ * passing the absolute limits.
+ * We want our _ACTUATOR_RANGE to be infinity on the 0 end and macro
+ * on the max end. However, the focuser HW is opposite this.
+ * Therefore we use the rel(ative)_lo/hi variables in the calculation
+ * loop and assign them the focus_infinity and focus_macro values.
+ */
+ rel_lo = (info->cap.actuator_range - info->cap.focus_macro);
+ rel_hi = info->cap.focus_infinity;
+ info->abs_range = (u32)(info->cfg.pos_high - info->cfg.pos_low);
+ loop_limit = (rel_lo > rel_hi) ? rel_lo : rel_hi;
+ for (i = 0; i <= loop_limit; i++) {
+ rel_range = info->cap.actuator_range - (rel_lo + rel_hi);
+ step = info->abs_range / rel_range;
+ info->abs_base = info->cfg.pos_low - (step * rel_lo);
+ abs_top = info->cfg.pos_high + (step * rel_hi);
+ if (info->abs_base < info->cfg.limit_low) {
+ if (rel_lo > 0)
+ rel_lo--;
+ }
+ if (abs_top > info->cfg.limit_high) {
+ if (rel_hi > 0)
+ rel_hi--;
+ }
+ if (info->abs_base >= info->cfg.limit_low &&
+ abs_top <= info->cfg.limit_high)
+ break;
+ }
+ info->cap.focus_hyper = info->abs_range;
+ info->abs_range = (u32)(abs_top - info->abs_base);
+ /* calculate absolute hyperfocus position */
+ info->cap.focus_hyper *= info->cfg.focus_hyper_ratio;
+ info->cap.focus_hyper /= info->cfg.focus_hyper_div;
+ abs_top = (s16)(info->cfg.pos_high - info->cap.focus_hyper);
+ /* update actual relative positions */
+ info->cap.focus_hyper = sh532u_abs2rel(info, abs_top);
+ info->cap.focus_infinity = sh532u_abs2rel(info, info->cfg.pos_high);
+ info->cap.focus_macro = sh532u_abs2rel(info, info->cfg.pos_low);
+ dev_dbg(&info->i2c_client->dev, "%s focus_macro=%u\n", __func__,
+ info->cap.focus_macro);
+ dev_dbg(&info->i2c_client->dev, "%s focus_infinity=%u\n", __func__,
+ info->cap.focus_infinity);
+ dev_dbg(&info->i2c_client->dev, "%s focus_hyper=%u\n", __func__,
+ info->cap.focus_hyper);
+ info->init_cal_flag = 1;
+ dev_dbg(&info->i2c_client->dev, "%s complete\n", __func__);
+ return 0;
+}
+
+ /* Write 1 byte data to the HVCA Drive IC by data type */
+static int sh532u_hvca_wr1(struct sh532u_info *info,
+ u8 ep_type, u8 ep_data1, u8 ep_addr)
+{
+ u8 us_data;
+ int err = 0;
+
+ switch (ep_type & 0xF0) {
+ case DIRECT_MODE:
+ us_data = ep_data1;
+ break;
+
+ case INDIRECT_EEPROM:
+ err = sh532u_i2c_rd8(info,
+ info->i2c_addr_rom,
+ ep_data1,
+ &us_data);
+ break;
+
+ case INDIRECT_HVCA:
+ err = sh532u_i2c_rd8(info, 0, ep_data1, &us_data);
+ break;
+
+ case MASK_AND:
+ err = sh532u_i2c_rd8(info, 0, ep_addr, &us_data);
+ us_data &= ep_data1;
+ break;
+
+ case MASK_OR:
+ err = sh532u_i2c_rd8(info, 0, ep_addr, &us_data);
+ us_data |= ep_data1;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ if (!err)
+ err = sh532u_i2c_wr8(info, ep_addr, us_data);
+ return err;
+}
+
+ /* Write 2 byte data to the HVCA Drive IC by data type */
+static int sh532u_hvca_wr2(struct sh532u_info *info, u8 ep_type,
+ u8 ep_data1, u8 ep_data2, u8 ep_addr)
+{
+ u8 uc_data1;
+ u8 uc_data2;
+ u16 us_data;
+ int err = 0;
+
+ switch (ep_type & 0xF0) {
+ case DIRECT_MODE:
+ us_data = (((u16)ep_data1 << 8) & 0xFF00) |
+ ((u16)ep_data2 & 0x00FF);
+ break;
+
+ case INDIRECT_EEPROM:
+ err = sh532u_i2c_rd8(info,
+ info->i2c_addr_rom,
+ ep_data1,
+ &uc_data1);
+ err |= sh532u_i2c_rd8(info,
+ info->i2c_addr_rom,
+ ep_data2,
+ &uc_data2);
+ us_data = (((u16)uc_data1 << 8) & 0xFF00) |
+ ((u16)uc_data2 & 0x00FF);
+ break;
+
+ case INDIRECT_HVCA:
+ err = sh532u_i2c_rd8(info, 0, ep_data1, &uc_data1);
+ err |= sh532u_i2c_rd8(info, 0, ep_data2, &uc_data2);
+ us_data = (((u16)uc_data1 << 8) & 0xFF00) |
+ ((u16)uc_data2 & 0x00FF);
+ break;
+
+ case MASK_AND:
+ err = sh532u_i2c_rd16(info, ep_addr, &us_data);
+ us_data &= ((((u16)ep_data1 << 8) & 0xFF00) |
+ ((u16)ep_data2 & 0x00FF));
+ break;
+
+ case MASK_OR:
+ err = sh532u_i2c_rd16(info, ep_addr, &us_data);
+ us_data |= ((((u16)ep_data1 << 8) & 0xFF00) |
+ ((u16)ep_data2 & 0x00FF));
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ if (!err)
+ err = sh532u_i2c_wr16(info, ep_addr, us_data);
+ return err;
+}
+
+static int sh532u_dev_init(struct sh532u_info *info)
+{
+ int eeprom_reg;
+ unsigned eeprom_data = 0;
+ u8 ep_addr;
+ u8 ep_type;
+ u8 ep_data1;
+ u8 ep_data2;
+ int err;
+ int ret = 0;
+
+ err = sh532u_i2c_rd8(info, 0, SWTCH_211, &ep_data1);
+ ep_data2 = ep_data1;
+ err |= sh532u_i2c_rd8(info, 0, ANA1_211, &ep_data1);
+ ep_data2 |= ep_data1;
+ if (!err && ep_data2)
+ return 0; /* Already initialized */
+
+ info->sts = NVC_FOCUS_STS_INITIALIZING;
+ for (eeprom_reg = 0x30; eeprom_reg <= 0x013C; eeprom_reg += 4) {
+ if (eeprom_reg > 0xFF) {
+ /* use hardcoded data instead */
+ eeprom_data = sh532u_a2buf[(eeprom_reg & 0xFF) / 4];
+ } else {
+ err = (sh532u_i2c_rd32(info,
+ info->i2c_addr_rom,
+ eeprom_reg & 0xFF,
+ &eeprom_data));
+ if (err) {
+ ret |= err;
+ continue;
+ }
+ }
+
+ /* HVCA Address to write eeprom Data1,Data2 by the Data type */
+ ep_addr = (u8)(eeprom_data & 0x000000ff);
+ ep_type = (u8)((eeprom_data & 0x0000ff00) >> 8);
+ ep_data1 = (u8)((eeprom_data & 0x00ff0000) >> 16);
+ ep_data2 = (u8)((eeprom_data & 0xff000000) >> 24);
+ if (ep_addr == 0xFF)
+ break;
+
+ if (ep_addr == 0xDD) {
+ mdelay((unsigned int)((ep_data1 << 8) | ep_data2));
+ } else {
+ if ((ep_type & 0x0F) == DATA_1BYTE) {
+ err = sh532u_hvca_wr1(info,
+ ep_type,
+ ep_data1,
+ ep_addr);
+ } else {
+ err = sh532u_hvca_wr2(info,
+ ep_type,
+ ep_data1,
+ ep_data2,
+ ep_addr);
+ }
+ }
+ ret |= err;
+ }
+
+ err = ret;
+ if (err)
+ dev_err(&info->i2c_client->dev, "%s programming err=%d\n",
+ __func__, err);
+ err |= sh532u_calibration(info, false);
+ info->sts = NVC_FOCUS_STS_LENS_SETTLED;
+ return err;
+}
+
+static int sh532u_pos_abs_wr(struct sh532u_info *info, s16 tar_pos)
+{
+ s16 cur_pos;
+ s16 move_step;
+ u16 move_distance;
+ int err;
+
+ sh532u_pm_dev_wr(info, NVC_PWR_ON);
+ err = sh532u_dev_init(info);
+ if (err)
+ return err;
+
+ /* Read Current Position */
+ err = sh532u_abs_pos_rd(info, &cur_pos);
+ if (err)
+ return err;
+
+ dev_dbg(&info->i2c_client->dev, "%s cur_pos=%d tar_pos=%d\n",
+ __func__, (int)cur_pos, (int)tar_pos);
+ info->sts = NVC_FOCUS_STS_WAIT_FOR_MOVE_END;
+ /* Check move distance to Target Position */
+ move_distance = abs((int)cur_pos - (int)tar_pos);
+ /* if move distance is shorter than MS1Z12(=Step width) */
+ if (move_distance <= STMV_SIZE) {
+ err = sh532u_i2c_wr8(info, MSSET_211,
+ (INI_MSSET_211 | 0x01));
+ err |= sh532u_i2c_wr16(info, MS1Z22_211H, tar_pos);
+ } else {
+ if (cur_pos < tar_pos)
+ move_step = STMV_SIZE;
+ else
+ move_step = -STMV_SIZE;
+ /* Set StepMove Target Positon */
+ err = sh532u_i2c_wr16(info, MS1Z12_211H, move_step);
+ err |= sh532u_i2c_wr16(info, STMVENDH_211, tar_pos);
+ /* Start StepMove */
+ err |= sh532u_i2c_wr8(info, STMVEN_211,
+ (STMCHTG_ON |
+ STMSV_ON |
+ STMLFF_OFF |
+ STMVEN_ON));
+ }
+ return err;
+}
+
+static int sh532u_move_wait(struct sh532u_info *info)
+{
+ u16 us_smv_fin;
+ u8 moveTime;
+ u8 ucParMod;
+ u8 tmp;
+ int err;
+
+ moveTime = 0;
+ do {
+ mdelay(1);
+ err = sh532u_i2c_rd8(info, 0, STMVEN_211, &ucParMod);
+ err |= sh532u_i2c_rd16(info, RZ_211H, &us_smv_fin);
+ if (err)
+ return err;
+
+ /* StepMove Error Handling, Unexpected Position */
+ if ((us_smv_fin == 0x7FFF) || (us_smv_fin == 0x8001)) {
+ /* Stop StepMove Operation */
+ err = sh532u_i2c_wr8(info, STMVEN_211,
+ ucParMod & 0xFE);
+ if (err)
+ return err;
+ }
+
+ moveTime++;
+ /* Wait StepMove operation end */
+ } while ((ucParMod & STMVEN_ON) && (moveTime < 50));
+
+ moveTime = 0;
+ if ((ucParMod & 0x08) == STMCHTG_ON) {
+ mdelay(5);
+ do {
+ mdelay(1);
+ moveTime++;
+ err = sh532u_i2c_rd8(info, 0, MSSET_211, &tmp);
+ if (err)
+ return err;
+
+ } while ((tmp & CHTGST_ON) && (moveTime < 15));
+ }
+ return err;
+}
+
+static int sh532u_move_pulse(struct sh532u_info *info, s16 position)
+{
+ int err;
+
+ err = sh532u_pos_abs_wr(info, position);
+ err |= sh532u_move_wait(info);
+ return err;
+}
+
+static int sh532u_hvca_pos_init(struct sh532u_info *info)
+{
+ s16 limit_bottom;
+ s16 limit_top;
+ int err;
+
+ limit_bottom = (((int)info->cfg.limit_low * 5) >> 3) & 0xFFC0;
+ if (limit_bottom < info->cfg.limit_low)
+ limit_bottom = info->cfg.limit_low;
+ limit_top = (((int)info->cfg.limit_high * 5) >> 3) & 0xFFC0;
+ if (limit_top > info->cfg.limit_high)
+ limit_top = info->cfg.limit_high;
+ err = sh532u_move_pulse(info, limit_bottom);
+ err |= sh532u_move_pulse(info, limit_top);
+ err |= sh532u_move_pulse(info, info->cfg.pos_high);
+ return err;
+}
+
+static int sh532u_pos_rel_wr(struct sh532u_info *info, u32 position)
+{
+ s16 abs_pos;
+
+ if (position > info->cap.actuator_range) {
+ dev_err(&info->i2c_client->dev, "%s invalid position %u\n",
+ __func__, position);
+ return -EINVAL;
+ }
+
+ abs_pos = sh532u_rel2abs(info, position);
+ info->pos_rel = position;
+ info->pos_abs = abs_pos;
+ info->pos_time_wr = jiffies;
+ return sh532u_pos_abs_wr(info, abs_pos);
+}
+
+
+static int sh532u_param_rd(struct sh532u_info *info, unsigned long arg)
+{
+ struct nvc_param params;
+ const void *data_ptr;
+ u32 data_size = 0;
+ u32 position;
+ int err;
+
+ if (copy_from_user(&params,
+ (const void __user *)arg,
+ sizeof(struct nvc_param))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (info->s_mode == NVC_SYNC_SLAVE)
+ info = info->s_info;
+ switch (params.param) {
+ case NVC_PARAM_LOCUS:
+ sh532u_pm_dev_wr(info, NVC_PWR_COMM);
+ err = sh532u_rel_pos_rd(info, &position);
+ if (err && !(info->pdata->cfg & NVC_CFG_NOERR))
+ return -EINVAL;
+
+ data_ptr = &position;
+ data_size = sizeof(position);
+ sh532u_pm_dev_wr(info, NVC_PWR_STDBY);
+ dev_dbg(&info->i2c_client->dev, "%s LOCUS: %d\n",
+ __func__, position);
+ break;
+
+ case NVC_PARAM_FOCAL_LEN:
+ data_ptr = &info->nvc.focal_length;
+ data_size = sizeof(info->nvc.focal_length);
+ dev_dbg(&info->i2c_client->dev, "%s FOCAL_LEN: %x\n",
+ __func__, info->nvc.focal_length);
+ break;
+
+ case NVC_PARAM_MAX_APERTURE:
+ data_ptr = &info->nvc.max_aperature;
+ data_size = sizeof(info->nvc.max_aperature);
+ dev_dbg(&info->i2c_client->dev, "%s MAX_APERTURE: %x\n",
+ __func__, info->nvc.max_aperature);
+ break;
+
+ case NVC_PARAM_FNUMBER:
+ data_ptr = &info->nvc.fnumber;
+ data_size = sizeof(info->nvc.fnumber);
+ dev_dbg(&info->i2c_client->dev, "%s FNUMBER: %x\n",
+ __func__, info->nvc.fnumber);
+ break;
+
+ case NVC_PARAM_CAPS:
+ sh532u_pm_dev_wr(info, NVC_PWR_COMM);
+ err = sh532u_calibration(info, true);
+ sh532u_pm_dev_wr(info, NVC_PWR_STDBY);
+ if (err)
+ return -EIO;
+
+ data_ptr = &info->cap;
+ /* there are different sizes depending on the version */
+ /* send back just what's requested or our max size */
+ if (params.sizeofvalue < sizeof(info->cap))
+ data_size = params.sizeofvalue;
+ else
+ data_size = sizeof(info->cap);
+ dev_dbg(&info->i2c_client->dev, "%s CAPS\n",
+ __func__);
+ break;
+
+ case NVC_PARAM_STS:
+ data_ptr = &info->sts;
+ data_size = sizeof(info->sts);
+ dev_dbg(&info->i2c_client->dev, "%s STS: %d\n",
+ __func__, info->sts);
+ break;
+
+ case NVC_PARAM_STEREO:
+ data_ptr = &info->s_mode;
+ data_size = sizeof(info->s_mode);
+ dev_dbg(&info->i2c_client->dev, "%s STEREO: %d\n",
+ __func__, info->s_mode);
+ break;
+
+ default:
+ dev_err(&info->i2c_client->dev,
+ "%s unsupported parameter: %d\n",
+ __func__, params.param);
+ return -EINVAL;
+ }
+
+ if (params.sizeofvalue < data_size) {
+ dev_err(&info->i2c_client->dev, "%s %d data size err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (copy_to_user((void __user *)params.p_value,
+ data_ptr,
+ data_size)) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_to_user err\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int sh532u_param_wr_s(struct sh532u_info *info,
+ struct nvc_param *params,
+ u32 u32_val)
+{
+ int err;
+
+ switch (params->param) {
+ case NVC_PARAM_LOCUS:
+ dev_dbg(&info->i2c_client->dev, "%s LOCUS: %u\n",
+ __func__, u32_val);
+ err = sh532u_pos_rel_wr(info, u32_val);
+ return err;
+
+ case NVC_PARAM_RESET:
+ err = sh532u_pm_wr(info, NVC_PWR_OFF);
+ err |= sh532u_pm_wr(info, NVC_PWR_ON);
+ err |= sh532u_pm_wr(info, info->pwr_api);
+ dev_dbg(&info->i2c_client->dev, "%s RESET: %d\n",
+ __func__, err);
+ return err;
+
+ case NVC_PARAM_SELF_TEST:
+ err = sh532u_hvca_pos_init(info);
+ dev_dbg(&info->i2c_client->dev, "%s SELF_TEST: %d\n",
+ __func__, err);
+ return err;
+
+ default:
+ dev_err(&info->i2c_client->dev,
+ "%s unsupported parameter: %d\n",
+ __func__, params->param);
+ return -EINVAL;
+ }
+}
+
+static int sh532u_param_wr(struct sh532u_info *info, unsigned long arg)
+{
+ struct nvc_param params;
+ u8 val;
+ u32 u32_val;
+ int err = 0;
+
+ if (copy_from_user(&params,
+ (const void __user *)arg,
+ sizeof(struct nvc_param))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&u32_val, (const void __user *)params.p_value,
+ sizeof(u32_val))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ /* parameters independent of sync mode */
+ switch (params.param) {
+ case NVC_PARAM_STEREO:
+ dev_dbg(&info->i2c_client->dev, "%s STEREO: %u\n",
+ __func__, u32_val);
+ val = (u8)u32_val;
+ if (val == info->s_mode)
+ return 0;
+
+ switch (val) {
+ case NVC_SYNC_OFF:
+ info->s_mode = val;
+ if (info->s_info != NULL) {
+ info->s_info->s_mode = val;
+ sh532u_pm_wr(info->s_info, NVC_PWR_OFF);
+ }
+ break;
+
+ case NVC_SYNC_MASTER:
+ info->s_mode = val;
+ if (info->s_info != NULL)
+ info->s_info->s_mode = val;
+ break;
+
+ case NVC_SYNC_SLAVE:
+ if (info->s_info != NULL) {
+ /* default slave lens position */
+ err = sh532u_pos_rel_wr(info->s_info,
+ info->s_info->cap.focus_infinity);
+ if (!err) {
+ info->s_mode = val;
+ info->s_info->s_mode = val;
+ } else {
+ if (info->s_mode != NVC_SYNC_STEREO)
+ sh532u_pm_wr(info->s_info,
+ NVC_PWR_OFF);
+ err = -EIO;
+ }
+ } else {
+ err = -EINVAL;
+ }
+ break;
+
+ case NVC_SYNC_STEREO:
+ if (info->s_info != NULL) {
+ /* sync power */
+ info->s_info->pwr_api = info->pwr_api;
+ /* move slave lens to master position */
+ err = sh532u_pos_rel_wr(info->s_info,
+ info->pos_rel);
+ if (!err) {
+ info->s_mode = val;
+ info->s_info->s_mode = val;
+ } else {
+ if (info->s_mode != NVC_SYNC_SLAVE)
+ sh532u_pm_wr(info->s_info,
+ NVC_PWR_OFF);
+ err = -EIO;
+ }
+ } else {
+ err = -EINVAL;
+ }
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ if (info->pdata->cfg & NVC_CFG_NOERR)
+ return 0;
+
+ return err;
+
+ default:
+ /* parameters dependent on sync mode */
+ switch (info->s_mode) {
+ case NVC_SYNC_OFF:
+ case NVC_SYNC_MASTER:
+ return sh532u_param_wr_s(info, &params, u32_val);
+
+ case NVC_SYNC_SLAVE:
+ return sh532u_param_wr_s(info->s_info,
+ &params,
+ u32_val);
+
+ case NVC_SYNC_STEREO:
+ err = sh532u_param_wr_s(info, &params, u32_val);
+ if (!(info->pdata->cfg & NVC_CFG_SYNC_I2C_MUX))
+ err |= sh532u_param_wr_s(info->s_info,
+ &params,
+ u32_val);
+ return err;
+
+ default:
+ dev_err(&info->i2c_client->dev, "%s %d internal err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ }
+}
+
+static long sh532u_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct sh532u_info *info = file->private_data;
+ int pwr;
+
+ switch (cmd) {
+ case NVC_IOCTL_PARAM_WR:
+ return sh532u_param_wr(info, arg);
+
+ case NVC_IOCTL_PARAM_RD:
+ return sh532u_param_rd(info, arg);
+
+ case NVC_IOCTL_PWR_WR:
+ /* This is a Guaranteed Level of Service (GLOS) call */
+ pwr = (int)arg * 2;
+ dev_dbg(&info->i2c_client->dev, "%s PWR: %d\n",
+ __func__, pwr);
+ return sh532u_pm_api_wr(info, pwr);
+
+ case NVC_IOCTL_PWR_RD:
+ if (info->s_mode == NVC_SYNC_SLAVE)
+ pwr = info->s_info->pwr_api / 2;
+ else
+ pwr = info->pwr_api / 2;
+ dev_dbg(&info->i2c_client->dev, "%s PWR_RD: %d\n",
+ __func__, pwr);
+ if (copy_to_user((void __user *)arg, (const void *)&pwr,
+ sizeof(pwr))) {
+ dev_err(&info->i2c_client->dev,
+ "%s copy_to_user err line %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+
+ default:
+ dev_err(&info->i2c_client->dev, "%s unsupported ioctl: %x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+}
+
+static int sh532u_sync_en(int dev1, int dev2)
+{
+ struct sh532u_info *sync1 = NULL;
+ struct sh532u_info *sync2 = NULL;
+ struct sh532u_info *pos = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &sh532u_info_list, list) {
+ if (pos->pdata->num == dev1) {
+ sync1 = pos;
+ break;
+ }
+ }
+ pos = NULL;
+ list_for_each_entry_rcu(pos, &sh532u_info_list, list) {
+ if (pos->pdata->num == dev2) {
+ sync2 = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (sync1 != NULL)
+ sync1->s_info = NULL;
+ if (sync2 != NULL)
+ sync2->s_info = NULL;
+ if (!dev1 && !dev2)
+ return 0; /* no err if default instance 0's used */
+
+ if (dev1 == dev2)
+ return -EINVAL; /* err if sync instance is itself */
+
+ if ((sync1 != NULL) && (sync2 != NULL)) {
+ sync1->s_info = sync2;
+ sync2->s_info = sync1;
+ }
+ return 0;
+}
+
+static int sh532u_sync_dis(struct sh532u_info *info)
+{
+ if (info->s_info != NULL) {
+ info->s_info->s_mode = 0;
+ info->s_info->s_info = NULL;
+ info->s_mode = 0;
+ info->s_info = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sh532u_open(struct inode *inode, struct file *file)
+{
+ struct sh532u_info *info = NULL;
+ struct sh532u_info *pos = NULL;
+ int err;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &sh532u_info_list, list) {
+ if (pos->miscdev.minor == iminor(inode)) {
+ info = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!info)
+ return -ENODEV;
+
+ err = sh532u_sync_en(info->pdata->num, info->pdata->sync);
+ if (err == -EINVAL)
+ dev_err(&info->i2c_client->dev,
+ "%s err: invalid num (%u) and sync (%u) instance\n",
+ __func__, info->pdata->num, info->pdata->sync);
+ if (atomic_xchg(&info->in_use, 1))
+ return -EBUSY;
+
+ if (info->s_info != NULL) {
+ if (atomic_xchg(&info->s_info->in_use, 1))
+ return -EBUSY;
+ }
+
+ file->private_data = info;
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ sh532u_pos_rel_wr(info, info->cap.focus_infinity);
+ return 0;
+}
+
+int sh532u_release(struct inode *inode, struct file *file)
+{
+ struct sh532u_info *info = file->private_data;
+
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ sh532u_pm_wr_s(info, NVC_PWR_OFF);
+ file->private_data = NULL;
+ WARN_ON(!atomic_xchg(&info->in_use, 0));
+ if (info->s_info != NULL)
+ WARN_ON(!atomic_xchg(&info->s_info->in_use, 0));
+ sh532u_sync_dis(info);
+ return 0;
+}
+
+static const struct file_operations sh532u_fileops = {
+ .owner = THIS_MODULE,
+ .open = sh532u_open,
+ .unlocked_ioctl = sh532u_ioctl,
+ .release = sh532u_release,
+};
+
+static void sh532u_del(struct sh532u_info *info)
+{
+ sh532u_pm_exit(info);
+ sh532u_sync_dis(info);
+ spin_lock(&sh532u_spinlock);
+ list_del_rcu(&info->list);
+ spin_unlock(&sh532u_spinlock);
+ synchronize_rcu();
+}
+
+static int sh532u_remove(struct i2c_client *client)
+{
+ struct sh532u_info *info = i2c_get_clientdata(client);
+
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ misc_deregister(&info->miscdev);
+ sh532u_del(info);
+ return 0;
+}
+
+static int sh532u_probe(
+ struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sh532u_info *info = NULL;
+ char dname[16];
+ int err;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&client->dev, "%s: kzalloc error\n", __func__);
+ return -ENOMEM;
+ }
+
+ info->i2c_client = client;
+ if (client->dev.platform_data) {
+ info->pdata = client->dev.platform_data;
+ } else {
+ info->pdata = &sh532u_default_pdata;
+ dev_dbg(&client->dev,
+ "%s No platform data. Using defaults.\n",
+ __func__);
+ }
+ i2c_set_clientdata(client, info);
+ INIT_LIST_HEAD(&info->list);
+ spin_lock(&sh532u_spinlock);
+ list_add_rcu(&info->list, &sh532u_info_list);
+ spin_unlock(&sh532u_spinlock);
+ sh532u_pm_init(info);
+ sh532u_pm_dev_wr(info, NVC_PWR_COMM);
+ err = sh532u_dev_id(info);
+ if (err < 0) {
+ dev_err(&client->dev, "%s device not found\n", __func__);
+ sh532u_pm_wr(info, NVC_PWR_OFF);
+ if (info->pdata->cfg & NVC_CFG_NODEV) {
+ sh532u_del(info);
+ return -ENODEV;
+ }
+ } else {
+ dev_dbg(&client->dev, "%s device found\n", __func__);
+ sh532u_calibration(info, false);
+ if (info->pdata->cfg & NVC_CFG_BOOT_INIT) {
+ /* initial move causes full initialization */
+ sh532u_pos_rel_wr(info, info->cap.focus_infinity);
+ } else {
+ sh532u_pm_wr(info, NVC_PWR_OFF);
+ }
+ }
+
+ if (info->pdata->dev_name != 0)
+ strcpy(dname, info->pdata->dev_name);
+ else
+ strcpy(dname, "sh532u");
+ if (info->pdata->num)
+ snprintf(dname, sizeof(dname), "%s.%u",
+ dname, info->pdata->num);
+ info->miscdev.name = dname;
+ info->miscdev.fops = &sh532u_fileops;
+ info->miscdev.minor = MISC_DYNAMIC_MINOR;
+ if (misc_register(&info->miscdev)) {
+ dev_err(&client->dev, "%s unable to register misc device %s\n",
+ __func__, dname);
+ sh532u_del(info);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id sh532u_id[] = {
+ { "sh532u", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, sh532u_id);
+
+static struct i2c_driver sh532u_i2c_driver = {
+ .driver = {
+ .name = "sh532u",
+ .owner = THIS_MODULE,
+ },
+ .id_table = sh532u_id,
+ .probe = sh532u_probe,
+ .remove = sh532u_remove,
+};
+
+static int __init sh532u_init(void)
+{
+ return i2c_add_driver(&sh532u_i2c_driver);
+}
+
+static void __exit sh532u_exit(void)
+{
+ i2c_del_driver(&sh532u_i2c_driver);
+}
+
+module_init(sh532u_init);
+module_exit(sh532u_exit);
+
diff --git a/drivers/media/video/tegra/soc380.c b/drivers/media/video/tegra/soc380.c
new file mode 100644
index 000000000000..7f2c13614660
--- /dev/null
+++ b/drivers/media/video/tegra/soc380.c
@@ -0,0 +1,473 @@
+/*
+ * soc380.c - soc380 sensor driver
+ *
+ * Copyright (c) 2011, NVIDIA, All Rights Reserved.
+ *
+ * Contributors:
+ * Abhinav Sinha <absinha@nvidia.com>
+ *
+ * Leverage OV2710.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/**
+ * SetMode Sequence for 640x480. Phase 0. Sensor Dependent.
+ * This sequence should put sensor in streaming mode for 640x480
+ * This is usually given by the FAE or the sensor vendor.
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/soc380.h>
+
+struct soc380_reg {
+ u16 addr;
+ u16 val;
+};
+
+struct soc380_info {
+ int mode;
+ struct i2c_client *i2c_client;
+ struct soc380_platform_data *pdata;
+};
+
+#define SOC380_TABLE_WAIT_MS 0
+#define SOC380_TABLE_END 1
+#define SOC380_MAX_RETRIES 3
+
+static struct soc380_reg mode_640x480[] = {
+ {0x001A, 0x0011},
+
+ {SOC380_TABLE_WAIT_MS, 1},
+
+ {0x001A, 0x0010},
+
+ {SOC380_TABLE_WAIT_MS, 1},
+
+ {0x0018, 0x4028},
+ {0x001A, 0x0210},
+ {0x0010, 0x021c},
+ {0x0012, 0x0000},
+ {0x0014, 0x244B},
+
+ {SOC380_TABLE_WAIT_MS, 10},
+
+ {0x0014, 0x304B},
+
+ {SOC380_TABLE_WAIT_MS, 50},
+
+ {0x0014, 0xB04A},
+
+ {0x098C, 0x2703},
+ {0x0990, 0x0280},
+ {0x098C, 0x2705},
+ {0x0990, 0x01E0},
+ {0x098C, 0x2707},
+ {0x0990, 0x0280},
+ {0x098C, 0x2709},
+ {0x0990, 0x01E0},
+ {0x098C, 0x270D},
+ {0x0990, 0x0000},
+ {0x098C, 0x270F},
+ {0x0990, 0x0000},
+ {0x098C, 0x2711},
+ {0x0990, 0x01E7},
+ {0x098C, 0x2713},
+ {0x0990, 0x0287},
+ {0x098C, 0x2715},
+ {0x0990, 0x0001},
+ {0x098C, 0x2717},
+ {0x0990, 0x0026},
+ {0x098C, 0x2719},
+ {0x0990, 0x001A},
+ {0x098C, 0x271B},
+ {0x0990, 0x006B},
+ {0x098C, 0x271D},
+ {0x0990, 0x006B},
+ {0x098C, 0x271F},
+ {0x0990, 0x022A},
+ {0x098C, 0x2721},
+ {0x0990, 0x034A},
+ {0x098C, 0x2723},
+ {0x0990, 0x0000},
+ {0x098C, 0x2725},
+ {0x0990, 0x0000},
+ {0x098C, 0x2727},
+ {0x0990, 0x01E7},
+ {0x098C, 0x2729},
+ {0x0990, 0x0287},
+ {0x098C, 0x272B},
+ {0x0990, 0x0001},
+ {0x098C, 0x272D},
+ {0x0990, 0x0026},
+ {0x098C, 0x272F},
+ {0x0990, 0x001A},
+ {0x098C, 0x2731},
+ {0x0990, 0x006B},
+ {0x098C, 0x2733},
+ {0x0990, 0x006B},
+ {0x098C, 0x2735},
+ {0x0990, 0x022A},
+ {0x098C, 0x2737},
+ {0x0990, 0x034A},
+ {0x098C, 0x2739},
+ {0x0990, 0x0000},
+ {0x098C, 0x273B},
+ {0x0990, 0x027F},
+ {0x098C, 0x273D},
+ {0x0990, 0x0000},
+ {0x098C, 0x273F},
+ {0x0990, 0x01DF},
+ {0x098C, 0x2747},
+ {0x0990, 0x0000},
+ {0x098C, 0x2749},
+ {0x0990, 0x027F},
+ {0x098C, 0x274B},
+ {0x0990, 0x0000},
+ {0x098C, 0x274D},
+ {0x0990, 0x01DF},
+ {0x098C, 0x222D},
+ {0x0990, 0x008B},
+ {0x098C, 0xA408},
+ {0x0990, 0x0021},
+ {0x098C, 0xA409},
+ {0x0990, 0x0023},
+ {0x098C, 0xA40A},
+ {0x0990, 0x0028},
+ {0x098C, 0xA40B},
+ {0x0990, 0x002A},
+ {0x098C, 0x2411},
+ {0x0990, 0x008B},
+ {0x098C, 0x2413},
+ {0x0990, 0x00A6},
+ {0x098C, 0x2415},
+ {0x0990, 0x008B},
+ {0x098C, 0x2417},
+ {0x0990, 0x00A6},
+ {0x098C, 0xA404},
+ {0x0990, 0x0010},
+ {0x098C, 0xA40D},
+ {0x0990, 0x0002},
+ {0x098C, 0xA40E},
+ {0x0990, 0x0003},
+ {0x098C, 0xA410},
+ {0x0990, 0x000A},
+ {0x098C, 0xA215},
+ {0x0990, 0x0003},
+ {0x098C, 0xA20C},
+ {0x0990, 0x0003},
+
+ {0x098C, 0xA103},
+ {0x0990, 0x0006},
+ {SOC380_TABLE_WAIT_MS, 100},
+
+ {0x098C, 0xA103},
+ {0x0990, 0x0005},
+ {SOC380_TABLE_WAIT_MS, 50},
+
+ {SOC380_TABLE_END, 0x0000}
+};
+
+enum {
+ SOC380_MODE_680x480,
+};
+
+static struct soc380_reg *mode_table[] = {
+ [SOC380_MODE_680x480] = mode_640x480,
+};
+
+static int soc380_read_reg(struct i2c_client *client, u16 addr, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[4];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (addr >> 8);
+ data[1] = (u8) (addr & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = data + 2;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+
+ if (err != 2)
+ return -EINVAL;
+
+ *val = data[2] << 8 | data[3];
+
+ return 0;
+}
+
+static int soc380_write_reg(struct i2c_client *client, u16 addr, u16 val)
+{
+ int err;
+ struct i2c_msg msg;
+ unsigned char data[4];
+ int retry = 0;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) (addr >> 8);
+ data[1] = (u8) (addr & 0xff);
+ data[2] = (u8) (val >> 8);
+ data[3] = (u8) (val & 0xff);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 4;
+ msg.buf = data;
+
+ do {
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err == 1)
+ return 0;
+ retry++;
+ pr_err("soc380: i2c transfer failed, retrying %x %x\n",
+ addr, val);
+ msleep(3);
+ } while (retry <= SOC380_MAX_RETRIES);
+
+ return err;
+}
+
+static int soc380_write_table(struct i2c_client *client,
+ const struct soc380_reg table[],
+ const struct soc380_reg override_list[],
+ int num_override_regs)
+{
+ int err;
+ const struct soc380_reg *next;
+ int i;
+ u16 val;
+
+ for (next = table; next->addr != SOC380_TABLE_END; next++) {
+ if (next->addr == SOC380_TABLE_WAIT_MS) {
+ msleep(next->val);
+ continue;
+ }
+
+ val = next->val;
+
+ /* When an override list is passed in, replace the reg */
+ /* value to write if the reg is in the list */
+ if (override_list) {
+ for (i = 0; i < num_override_regs; i++) {
+ if (next->addr == override_list[i].addr) {
+ val = override_list[i].val;
+ break;
+ }
+ }
+ }
+
+ err = soc380_write_reg(client, next->addr, val);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int soc380_set_mode(struct soc380_info *info, struct soc380_mode *mode)
+{
+ int sensor_mode;
+ int err;
+
+ pr_info("%s: xres %u yres %u\n", __func__, mode->xres, mode->yres);
+ if (mode->xres == 640 && mode->yres == 480)
+ sensor_mode = SOC380_MODE_680x480;
+ else {
+ pr_err("%s: invalid resolution supplied to set mode %d %d\n",
+ __func__, mode->xres, mode->yres);
+ return -EINVAL;
+ }
+
+ err = soc380_write_table(info->i2c_client, mode_table[sensor_mode],
+ NULL, 0);
+ if (err)
+ return err;
+
+ info->mode = sensor_mode;
+ return 0;
+}
+
+static int soc380_get_status(struct soc380_info *info,
+ struct soc380_status *dev_status)
+{
+ int err;
+
+ err = soc380_write_reg(info->i2c_client, 0x98C, dev_status->data);
+ if (err)
+ return err;
+
+ err = soc380_read_reg(info->i2c_client, 0x0990,
+ (u16 *) &dev_status->status);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static long soc380_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err;
+ struct soc380_info *info = file->private_data;
+
+ switch (cmd) {
+ case SOC380_IOCTL_SET_MODE:
+ {
+ struct soc380_mode mode;
+ if (copy_from_user(&mode,
+ (const void __user *)arg,
+ sizeof(struct soc380_mode))) {
+ return -EFAULT;
+ }
+
+ return soc380_set_mode(info, &mode);
+ }
+ case SOC380_IOCTL_GET_STATUS:
+ {
+ struct soc380_status dev_status;
+ if (copy_from_user(&dev_status,
+ (const void __user *)arg,
+ sizeof(struct soc380_status))) {
+ return -EFAULT;
+ }
+
+ err = soc380_get_status(info, &dev_status);
+ if (err)
+ return err;
+ if (copy_to_user((void __user *)arg, &dev_status,
+ sizeof(struct soc380_status))) {
+ return -EFAULT;
+ }
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct soc380_info *info;
+
+static int soc380_open(struct inode *inode, struct file *file)
+{
+ struct soc380_status dev_status;
+ int err;
+
+ file->private_data = info;
+ if (info->pdata && info->pdata->power_on)
+ info->pdata->power_on();
+
+ dev_status.data = 0;
+ dev_status.status = 0;
+ err = soc380_get_status(info, &dev_status);
+ return err;
+}
+
+int soc380_release(struct inode *inode, struct file *file)
+{
+ if (info->pdata && info->pdata->power_off)
+ info->pdata->power_off();
+ file->private_data = NULL;
+ return 0;
+}
+
+static const struct file_operations soc380_fileops = {
+ .owner = THIS_MODULE,
+ .open = soc380_open,
+ .unlocked_ioctl = soc380_ioctl,
+ .release = soc380_release,
+};
+
+static struct miscdevice soc380_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "soc380",
+ .fops = &soc380_fileops,
+};
+
+static int soc380_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+
+ pr_info("soc380: probing sensor.\n");
+
+ info = kzalloc(sizeof(struct soc380_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("soc380: Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ err = misc_register(&soc380_device);
+ if (err) {
+ pr_err("soc380: Unable to register misc device!\n");
+ kfree(info);
+ return err;
+ }
+
+ info->pdata = client->dev.platform_data;
+ info->i2c_client = client;
+
+ i2c_set_clientdata(client, info);
+ return 0;
+}
+
+static int soc380_remove(struct i2c_client *client)
+{
+ struct soc380_info *info;
+ info = i2c_get_clientdata(client);
+ misc_deregister(&soc380_device);
+ kfree(info);
+ return 0;
+}
+
+static const struct i2c_device_id soc380_id[] = {
+ { "soc380", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, soc380_id);
+
+static struct i2c_driver soc380_i2c_driver = {
+ .driver = {
+ .name = "soc380",
+ .owner = THIS_MODULE,
+ },
+ .probe = soc380_probe,
+ .remove = soc380_remove,
+ .id_table = soc380_id,
+};
+
+static int __init soc380_init(void)
+{
+ pr_info("soc380 sensor driver loading\n");
+ return i2c_add_driver(&soc380_i2c_driver);
+}
+
+static void __exit soc380_exit(void)
+{
+ i2c_del_driver(&soc380_i2c_driver);
+}
+
+module_init(soc380_init);
+module_exit(soc380_exit);
diff --git a/drivers/media/video/tegra/ssl3250a.c b/drivers/media/video/tegra/ssl3250a.c
new file mode 100644
index 000000000000..30c70bf1cdc5
--- /dev/null
+++ b/drivers/media/video/tegra/ssl3250a.c
@@ -0,0 +1,986 @@
+/*
+ * ssl3250a.c - ssl3250a flash/torch kernel driver
+ *
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+/* Implementation
+ * --------------
+ * The board level details about the device need to be provided in the board
+ * file with the ssl3250a_platform_data structure.
+ * Standard among NVC kernel drivers in this structure is:
+ * .cfg = Use the NVC_CFG_ defines that are in nvc_torch.h.
+ * Descriptions of the configuration options are with the defines.
+ * This value is typically 0.
+ * .num = The number of the instance of the device. This should start at 1 and
+ * and increment for each device on the board. This number will be
+ * appended to the MISC driver name, Example: /dev/ssl3250a.1
+ * .sync = If there is a need to synchronize two devices, then this value is
+ * the number of the device instance this device is allowed to sync to.
+ * This is typically used for stereo applications.
+ * .dev_name = The MISC driver name the device registers as. If not used,
+ * then the part number of the device is used for the driver name.
+ * If using the NVC user driver then use the name found in this
+ * driver under _default_pdata.
+ *
+ * The following is specific to NVC kernel flash/torch drivers:
+ * .pinstate = a pointer to the nvc_torch_pin_state structure. This
+ * structure gives the details of which VI GPIO to use to trigger
+ * the flash. The mask tells which pin and the values is the
+ * level. For example, if VI GPIO pin 6 is used, then
+ * .mask = 0x0040
+ * .values = 0x0040
+ * If VI GPIO pin 0 is used, then
+ * .mask = 0x0001
+ * .values = 0x0001
+ * This is typically just one pin but there is some legacy
+ * here that insinuates more than one pin can be used.
+ * When the flash level is set, then the driver will return the
+ * value in values. When the flash level is off, the driver will
+ * return 0 for the values to deassert the signal.
+ * If a VI GPIO is not used, then the mask and values must be set
+ * to 0. The flash may then be triggered via I2C instead.
+ * However, a VI GPIO is strongly encouraged since it allows
+ * tighter timing with the picture taken as well as reduced power
+ * by asserting the trigger signal for only when needed.
+ * .max_amp_torch = Is the maximum torch value allowed. The value is 0 to
+ * _MAX_TORCH_LEVEL. This is to allow a limit to the amount
+ * of amps used. If left blank then _MAX_TORCH_LEVEL will be
+ * used.
+ * .max_amp_flash = Is the maximum flash value allowed. The value is 0 to
+ * _MAX_FLASH_LEVEL. This is to allow a limit to the amount
+ * of amps used. If left blank then _MAX_FLASH_LEVEL will be
+ * used.
+ *
+ * The following is specific to only this NVC kernel flash/torch driver:
+ * .gpio_act = Is the GPIO needed to control the ACT signal. If tied high,
+ * then this can be left blank.
+ *
+ * Power Requirements
+ * The board power file must contain the following labels for the power
+ * regulator(s) of this device:
+ * "vdd_i2c" = the power regulator for the I2C power.
+ * Note that this device is typically connected directly to the battery rail
+ * and does not need a source power regulator (vdd).
+ *
+ * The above values should be all that is needed to use the device with this
+ * driver. Modifications of this driver should not be needed.
+ */
+
+
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <media/nvc.h>
+#include <media/ssl3250a.h>
+
+#define SSL3250A_REG_AMP 0x00
+#define SSL3250A_REG_TMR 0x01
+#define SSL3250A_REG_STRB 0x02
+#define SSL3250A_REG_STS 0x03
+#define ssl3250a_flash_cap_size (sizeof(ssl3250a_flash_cap.numberoflevels) \
+ + (sizeof(ssl3250a_flash_cap.levels[0]) \
+ * (SSL3250A_MAX_FLASH_LEVEL + 1)))
+#define ssl3250a_torch_cap_size (sizeof(ssl3250a_torch_cap.numberoflevels) \
+ + (sizeof(ssl3250a_torch_cap.guidenum[0]) \
+ * (SSL3250A_MAX_TORCH_LEVEL + 1)))
+
+
+static struct nvc_torch_flash_capabilities ssl3250a_flash_cap = {
+ SSL3250A_MAX_FLASH_LEVEL + 1,
+ {
+ { 0, 0xFFFFFFFF, 0 },
+ { 215, 820, 20 },
+ { 230, 820, 20 },
+ { 245, 820, 20 },
+ { 260, 820, 20 },
+ { 275, 820, 20 },
+ { 290, 820, 20 },
+ { 305, 820, 20 },
+ { 320, 820, 20 },
+ { 335, 820, 20 },
+ { 350, 820, 20 },
+ { 365, 820, 20 },
+ { 380, 820, 20 },
+ { 395, 820, 20 },
+ { 410, 820, 20 },
+ { 425, 820, 20 },
+ { 440, 820, 20 },
+ { 455, 820, 20 },
+ { 470, 820, 20 },
+ { 485, 820, 20 },
+ { 500, 820, 20 }
+ }
+};
+
+static struct nvc_torch_torch_capabilities ssl3250a_torch_cap = {
+ SSL3250A_MAX_TORCH_LEVEL + 1,
+ {
+ 0,
+ 50,
+ 65,
+ 80,
+ 95,
+ 110,
+ 125,
+ 140,
+ 155,
+ 170,
+ 185,
+ 200
+ }
+};
+
+struct ssl3250a_info {
+ atomic_t in_use;
+ struct i2c_client *i2c_client;
+ struct ssl3250a_platform_data *pdata;
+ struct miscdevice miscdev;
+ struct list_head list;
+ int pwr_api;
+ int pwr_dev;
+ struct nvc_regulator vreg_i2c;
+ u8 s_mode;
+ struct ssl3250a_info *s_info;
+};
+
+static struct nvc_torch_pin_state ssl3250a_default_pinstate = {
+ .mask = 0x0000,
+ .values = 0x0000,
+};
+
+static struct ssl3250a_platform_data ssl3250a_default_pdata = {
+ .cfg = 0,
+ .num = 0,
+ .sync = 0,
+ .dev_name = "torch",
+ .pinstate = &ssl3250a_default_pinstate,
+ .max_amp_torch = SSL3250A_MAX_TORCH_LEVEL,
+ .max_amp_flash = SSL3250A_MAX_FLASH_LEVEL,
+};
+
+static LIST_HEAD(ssl3250a_info_list);
+static DEFINE_SPINLOCK(ssl3250a_spinlock);
+
+
+static int ssl3250a_i2c_rd(struct ssl3250a_info *info, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+
+ buf[0] = reg;
+ msg[0].addr = info->i2c_client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+ msg[1].addr = info->i2c_client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ *val = 0;
+ if (i2c_transfer(info->i2c_client->adapter, msg, 2) != 2)
+ return -EIO;
+
+ *val = buf[1];
+ return 0;
+}
+
+static int ssl3250a_i2c_wr(struct ssl3250a_info *info, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+ msg.addr = info->i2c_client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[0];
+ if (i2c_transfer(info->i2c_client->adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static void ssl3250a_gpio_act(struct ssl3250a_info *info, int val)
+{
+ int prev_val;
+
+ if (info->pdata->gpio_act) {
+ prev_val = gpio_get_value(info->pdata->gpio_act);
+ if (val != prev_val) {
+ gpio_set_value(info->pdata->gpio_act, val);
+ if (val)
+ mdelay(1); /* delay for device startup */
+ }
+ }
+}
+
+static void ssl3250a_pm_regulator_put(struct nvc_regulator *sreg)
+{
+ regulator_put(sreg->vreg);
+ sreg->vreg = NULL;
+}
+
+static int ssl3250a_pm_regulator_get(struct ssl3250a_info *info,
+ struct nvc_regulator *sreg,
+ char vreg_name[])
+{
+ int err = 0;
+
+ sreg->vreg_flag = 0;
+ sreg->vreg = regulator_get(&info->i2c_client->dev, vreg_name);
+ if (WARN_ON(IS_ERR(sreg->vreg))) {
+ dev_err(&info->i2c_client->dev,
+ "%s err for regulator: %s err: %d\n",
+ __func__, vreg_name, (int)sreg->vreg);
+ err = PTR_ERR(sreg->vreg);
+ sreg->vreg = NULL;
+ } else {
+ sreg->vreg_name = vreg_name;
+ dev_dbg(&info->i2c_client->dev,
+ "%s vreg_name: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ return err;
+}
+
+static int ssl3250a_pm_regulator_en(struct ssl3250a_info *info,
+ struct nvc_regulator *sreg)
+{
+ int err = 0;
+
+ if (!sreg->vreg_flag && (sreg->vreg != NULL)) {
+ err = regulator_enable(sreg->vreg);
+ if (!err) {
+ dev_dbg(&info->i2c_client->dev,
+ "%s vreg_name: %s\n",
+ __func__, sreg->vreg_name);
+ sreg->vreg_flag = 1;
+ err = 1; /* flag regulator state change */
+ } else {
+ dev_err(&info->i2c_client->dev,
+ "%s err, regulator: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ }
+ return err;
+}
+
+static int ssl3250a_pm_regulator_dis(struct ssl3250a_info *info,
+ struct nvc_regulator *sreg)
+{
+ int err = 0;
+
+ if (sreg->vreg_flag && (sreg->vreg != NULL)) {
+ err = regulator_disable(sreg->vreg);
+ if (!err)
+ dev_dbg(&info->i2c_client->dev,
+ "%s vreg_name: %s\n",
+ __func__, sreg->vreg_name);
+ else
+ dev_err(&info->i2c_client->dev,
+ "%s err, regulator: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ sreg->vreg_flag = 0;
+ return err;
+}
+
+static int ssl3250a_pm_wr(struct ssl3250a_info *info, int pwr)
+{
+ int err = 0;
+
+ if (pwr == info->pwr_dev)
+ return 0;
+
+ switch (pwr) {
+ case NVC_PWR_OFF:
+ if ((info->pdata->cfg & NVC_CFG_OFF2STDBY) ||
+ (info->pdata->cfg & NVC_CFG_BOOT_INIT)) {
+ pwr = NVC_PWR_STDBY;
+ } else {
+ ssl3250a_gpio_act(info, 0);
+ err = ssl3250a_pm_regulator_dis(info, &info->vreg_i2c);
+ break;
+ }
+ case NVC_PWR_STDBY_OFF:
+ if ((info->pdata->cfg & NVC_CFG_OFF2STDBY) ||
+ (info->pdata->cfg & NVC_CFG_BOOT_INIT)) {
+ pwr = NVC_PWR_STDBY;
+ } else {
+ ssl3250a_gpio_act(info, 0);
+ err = ssl3250a_pm_regulator_en(info, &info->vreg_i2c);
+ break;
+ }
+ case NVC_PWR_STDBY:
+ err = ssl3250a_pm_regulator_en(info, &info->vreg_i2c);
+ ssl3250a_gpio_act(info, 1);
+ err |= ssl3250a_i2c_wr(info, SSL3250A_REG_AMP, 0x00);
+ break;
+
+ case NVC_PWR_COMM:
+ case NVC_PWR_ON:
+ err = ssl3250a_pm_regulator_en(info, &info->vreg_i2c);
+ ssl3250a_gpio_act(info, 1);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err < 0) {
+ dev_err(&info->i2c_client->dev, "%s error\n", __func__);
+ pwr = NVC_PWR_ERR;
+ }
+ info->pwr_dev = pwr;
+ if (err > 0)
+ return 0;
+
+ return err;
+}
+
+static int ssl3250a_pm_wr_s(struct ssl3250a_info *info, int pwr)
+{
+ int err1 = 0;
+ int err2 = 0;
+
+ if ((info->s_mode == NVC_SYNC_OFF) ||
+ (info->s_mode == NVC_SYNC_MASTER) ||
+ (info->s_mode == NVC_SYNC_STEREO))
+ err1 = ssl3250a_pm_wr(info, pwr);
+ if ((info->s_mode == NVC_SYNC_SLAVE) ||
+ (info->s_mode == NVC_SYNC_STEREO))
+ err2 = ssl3250a_pm_wr(info->s_info, pwr);
+ return err1 | err2;
+}
+
+static int ssl3250a_pm_api_wr(struct ssl3250a_info *info, int pwr)
+{
+ int err = 0;
+
+ if (!pwr || (pwr > NVC_PWR_ON))
+ return 0;
+
+ if (pwr > info->pwr_dev)
+ err = ssl3250a_pm_wr_s(info, pwr);
+ if (!err)
+ info->pwr_api = pwr;
+ else
+ info->pwr_api = NVC_PWR_ERR;
+ if (info->pdata->cfg & NVC_CFG_NOERR)
+ return 0;
+
+ return err;
+}
+
+static int ssl3250a_pm_dev_wr(struct ssl3250a_info *info, int pwr)
+{
+ if (pwr < info->pwr_api)
+ pwr = info->pwr_api;
+ return ssl3250a_pm_wr(info, pwr);
+}
+
+static void ssl3250a_pm_exit(struct ssl3250a_info *info)
+{
+ ssl3250a_pm_wr_s(info, NVC_PWR_OFF);
+ ssl3250a_pm_regulator_put(&info->vreg_i2c);
+}
+
+static void ssl3250a_pm_init(struct ssl3250a_info *info)
+{
+ ssl3250a_pm_regulator_get(info, &info->vreg_i2c, "vdd_i2c");
+}
+
+static int ssl3250a_dev_id(struct ssl3250a_info *info)
+{
+ u8 addr;
+ u8 reg;
+ int err;
+
+ ssl3250a_pm_dev_wr(info, NVC_PWR_COMM);
+ /* There isn't a device ID so we just check that all the registers
+ * equal their startup defaults, which in this case, is 0.
+ */
+ for (addr = 0; addr < SSL3250A_REG_STS; addr++) {
+ err = ssl3250a_i2c_rd(info, addr, &reg);
+ if (err) {
+ break;
+ } else {
+ if (reg) {
+ err = -ENODEV;
+ break;
+ }
+ }
+ }
+ ssl3250a_pm_dev_wr(info, NVC_PWR_OFF);
+ return err;
+}
+
+static int ssl3250a_param_rd(struct ssl3250a_info *info, long arg)
+{
+ struct nvc_param params;
+ struct nvc_torch_pin_state pinstate;
+ const void *data_ptr;
+ u32 data_size = 0;
+ int err;
+ u8 reg;
+
+ if (copy_from_user(&params,
+ (const void __user *)arg,
+ sizeof(struct nvc_param))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (info->s_mode == NVC_SYNC_SLAVE)
+ info = info->s_info;
+ switch (params.param) {
+ case NVC_PARAM_FLASH_CAPS:
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_CAPS\n", __func__);
+ data_ptr = &ssl3250a_flash_cap;
+ data_size = ssl3250a_flash_cap_size;
+ break;
+
+ case NVC_PARAM_FLASH_LEVEL:
+ ssl3250a_pm_dev_wr(info, NVC_PWR_COMM);
+ err = ssl3250a_i2c_rd(info, SSL3250A_REG_AMP, &reg);
+ ssl3250a_pm_dev_wr(info, NVC_PWR_OFF);
+ if (err < 0)
+ return err;
+
+ reg >>= 3; /*7:3=flash amps*/
+ reg &= 0x1F; /*4:0=flash amps*/
+ if (reg < 12) /*flash starts at 12*/
+ reg = 0; /*<12=torch or off*/
+ else
+ reg -= 11; /*create flash index*/
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_LEVEL: %u\n",
+ __func__,
+ (unsigned)ssl3250a_flash_cap.levels[reg].guidenum);
+ data_ptr = &ssl3250a_flash_cap.levels[reg].guidenum;
+ data_size = sizeof(ssl3250a_flash_cap.levels[reg].guidenum);
+ break;
+
+ case NVC_PARAM_TORCH_CAPS:
+ dev_dbg(&info->i2c_client->dev, "%s TORCH_CAPS\n", __func__);
+ data_ptr = &ssl3250a_torch_cap;
+ data_size = ssl3250a_torch_cap_size;
+ break;
+
+ case NVC_PARAM_TORCH_LEVEL:
+ ssl3250a_pm_dev_wr(info, NVC_PWR_COMM);
+ err = ssl3250a_i2c_rd(info, SSL3250A_REG_AMP, &reg);
+ ssl3250a_pm_dev_wr(info, NVC_PWR_OFF);
+ if (err < 0)
+ return err;
+
+ reg >>= 3; /*7:3=torch amps*/
+ reg &= 0x1F; /*4:0=torch amps*/
+ if (reg > 11) /*flash starts at 12*/
+ reg = 0; /*>11=flash mode (torch off)*/
+ dev_dbg(&info->i2c_client->dev, "%s TORCH_LEVEL: %u\n",
+ __func__,
+ (unsigned)ssl3250a_torch_cap.guidenum[reg]);
+ data_ptr = &ssl3250a_torch_cap.guidenum[reg];
+ data_size = sizeof(ssl3250a_torch_cap.guidenum[reg]);
+ break;
+
+ case NVC_PARAM_FLASH_PIN_STATE:
+ pinstate.mask = info->pdata->pinstate->mask;
+ ssl3250a_pm_dev_wr(info, NVC_PWR_COMM);
+ err = ssl3250a_i2c_rd(info, SSL3250A_REG_AMP, &reg);
+ ssl3250a_pm_dev_wr(info, NVC_PWR_OFF);
+ if (err < 0)
+ return err;
+
+ reg >>= 3; /*7:3=flash amps*/
+ reg &= 0x1F; /*4:0=flash amps*/
+ if (reg < 12) /*flash starts at 12*/
+ pinstate.values = 0; /*deassert strobe*/
+ else
+ /*assert strobe*/
+ pinstate.values = info->pdata->pinstate->values;
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_PIN_STATE: %x&%x\n",
+ __func__, pinstate.mask, pinstate.values);
+ data_ptr = &pinstate;
+ data_size = sizeof(struct nvc_torch_pin_state);
+ break;
+
+ case NVC_PARAM_STEREO:
+ dev_dbg(&info->i2c_client->dev, "%s STEREO: %d\n",
+ __func__, info->s_mode);
+ data_ptr = &info->s_mode;
+ data_size = sizeof(info->s_mode);
+ break;
+
+ default:
+ dev_err(&info->i2c_client->dev,
+ "%s unsupported parameter: %d\n",
+ __func__, params.param);
+ return -EINVAL;
+ }
+
+ if (params.sizeofvalue < data_size) {
+ dev_err(&info->i2c_client->dev,
+ "%s data size mismatch %d != %d\n",
+ __func__, params.sizeofvalue, data_size);
+ return -EINVAL;
+ }
+
+ if (copy_to_user((void __user *)params.p_value,
+ data_ptr,
+ data_size)) {
+ dev_err(&info->i2c_client->dev,
+ "%s copy_to_user err line %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int ssl3250a_param_wr_s(struct ssl3250a_info *info,
+ struct nvc_param *params,
+ u8 val)
+{
+ int err;
+
+ switch (params->param) {
+ case NVC_PARAM_FLASH_LEVEL:
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_LEVEL: %d\n",
+ __func__, val);
+ ssl3250a_pm_dev_wr(info, NVC_PWR_ON);
+ if (val > ssl3250a_default_pdata.max_amp_flash)
+ val = ssl3250a_default_pdata.max_amp_flash;
+ /*Amp limit values are in the board-sensors file.*/
+ if (info->pdata->max_amp_flash &&
+ (val > info->pdata->max_amp_flash))
+ val = info->pdata->max_amp_flash;
+ if (val) {
+ val += 11; /*flash starts at 12*/
+ val <<= 3; /*7:3=flash/torch amps*/
+ }
+ err = ssl3250a_i2c_wr(info, SSL3250A_REG_AMP, val);
+ if (!val) /*turn pwr off if no flash && no pwr_api*/
+ ssl3250a_pm_dev_wr(info, NVC_PWR_OFF);
+ return err;
+
+ case NVC_PARAM_TORCH_LEVEL:
+ dev_dbg(&info->i2c_client->dev, "%s TORCH_LEVEL: %d\n",
+ __func__, val);
+ ssl3250a_pm_dev_wr(info, NVC_PWR_ON);
+ if (val > ssl3250a_default_pdata.max_amp_torch)
+ val = ssl3250a_default_pdata.max_amp_torch;
+ /*Amp limit values are in the board-sensors file.*/
+ if (info->pdata->max_amp_torch &&
+ (val > info->pdata->max_amp_torch))
+ val = info->pdata->max_amp_torch;
+ if (val)
+ val <<= 3; /*7:3=flash/torch amps*/
+ err = ssl3250a_i2c_wr(info, SSL3250A_REG_AMP, val);
+ if (!val) /*turn pwr off if no torch && no pwr_api*/
+ ssl3250a_pm_dev_wr(info, NVC_PWR_OFF);
+ return err;
+
+ case NVC_PARAM_FLASH_PIN_STATE:
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_PIN_STATE: %d\n",
+ __func__, val);
+ if (val)
+ val = 0x01; /*0:0=soft trigger*/
+ return ssl3250a_i2c_wr(info, SSL3250A_REG_STRB, val);
+
+ default:
+ dev_err(&info->i2c_client->dev,
+ "%s unsupported parameter: %d\n",
+ __func__, params->param);
+ return -EINVAL;
+ }
+}
+
+static int ssl3250a_param_wr(struct ssl3250a_info *info, long arg)
+{
+ struct nvc_param params;
+ u8 val;
+ int err = 0;
+
+ if (copy_from_user(&params,
+ (const void __user *)arg,
+ sizeof(struct nvc_param))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&val, (const void __user *)params.p_value,
+ sizeof(val))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ /* parameters independent of sync mode */
+ switch (params.param) {
+ case NVC_PARAM_STEREO:
+ dev_dbg(&info->i2c_client->dev, "%s STEREO: %d\n",
+ __func__, (int)val);
+ if (val == info->s_mode)
+ return 0;
+
+ switch (val) {
+ case NVC_SYNC_OFF:
+ info->s_mode = val;
+ if (info->s_info != NULL) {
+ info->s_info->s_mode = val;
+ ssl3250a_pm_wr(info->s_info, NVC_PWR_OFF);
+ }
+ break;
+
+ case NVC_SYNC_MASTER:
+ info->s_mode = val;
+ if (info->s_info != NULL)
+ info->s_info->s_mode = val;
+ break;
+
+ case NVC_SYNC_SLAVE:
+ case NVC_SYNC_STEREO:
+ if (info->s_info != NULL) {
+ /* sync power */
+ info->s_info->pwr_api = info->pwr_api;
+ err = ssl3250a_pm_wr(info->s_info,
+ info->pwr_dev);
+ if (!err) {
+ info->s_mode = val;
+ info->s_info->s_mode = val;
+ } else {
+ ssl3250a_pm_wr(info->s_info,
+ NVC_PWR_OFF);
+ err = -EIO;
+ }
+ } else {
+ err = -EINVAL;
+ }
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ if (info->pdata->cfg & NVC_CFG_NOERR)
+ return 0;
+
+ return err;
+
+ default:
+ /* parameters dependent on sync mode */
+ switch (info->s_mode) {
+ case NVC_SYNC_OFF:
+ case NVC_SYNC_MASTER:
+ return ssl3250a_param_wr_s(info, &params, val);
+
+ case NVC_SYNC_SLAVE:
+ return ssl3250a_param_wr_s(info->s_info,
+ &params,
+ val);
+
+ case NVC_SYNC_STEREO:
+ err = ssl3250a_param_wr_s(info, &params, val);
+ if (!(info->pdata->cfg & NVC_CFG_SYNC_I2C_MUX))
+ err |= ssl3250a_param_wr_s(info->s_info,
+ &params,
+ val);
+ return err;
+
+ default:
+ dev_err(&info->i2c_client->dev, "%s %d internal err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ }
+}
+
+static long ssl3250a_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct ssl3250a_info *info = file->private_data;
+ int pwr;
+
+ switch (cmd) {
+ case NVC_IOCTL_PARAM_WR:
+ return ssl3250a_param_wr(info, arg);
+
+ case NVC_IOCTL_PARAM_RD:
+ return ssl3250a_param_rd(info, arg);
+
+ case NVC_IOCTL_PWR_WR:
+ /* This is a Guaranteed Level of Service (GLOS) call */
+ pwr = (int)arg * 2;
+ dev_dbg(&info->i2c_client->dev, "%s PWR_WR: %d\n",
+ __func__, pwr);
+ return ssl3250a_pm_api_wr(info, pwr);
+
+ case NVC_IOCTL_PWR_RD:
+ if (info->s_mode == NVC_SYNC_SLAVE)
+ pwr = info->s_info->pwr_api / 2;
+ else
+ pwr = info->pwr_api / 2;
+ dev_dbg(&info->i2c_client->dev, "%s PWR_RD: %d\n",
+ __func__, pwr);
+ if (copy_to_user((void __user *)arg, (const void *)&pwr,
+ sizeof(pwr))) {
+ dev_err(&info->i2c_client->dev,
+ "%s copy_to_user err line %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+
+ default:
+ dev_err(&info->i2c_client->dev, "%s unsupported ioctl: %x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+}
+
+static int ssl3250a_sync_en(int dev1, int dev2)
+{
+ struct ssl3250a_info *sync1 = NULL;
+ struct ssl3250a_info *sync2 = NULL;
+ struct ssl3250a_info *pos = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &ssl3250a_info_list, list) {
+ if (pos->pdata->num == dev1) {
+ sync1 = pos;
+ break;
+ }
+ }
+ pos = NULL;
+ list_for_each_entry_rcu(pos, &ssl3250a_info_list, list) {
+ if (pos->pdata->num == dev2) {
+ sync2 = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (sync1 != NULL)
+ sync1->s_info = NULL;
+ if (sync2 != NULL)
+ sync2->s_info = NULL;
+ if (!dev1 && !dev2)
+ return 0; /* no err if default instance 0's used */
+
+ if (dev1 == dev2)
+ return -EINVAL; /* err if sync instance is itself */
+
+ if ((sync1 != NULL) && (sync2 != NULL)) {
+ sync1->s_info = sync2;
+ sync2->s_info = sync1;
+ }
+
+ return 0;
+}
+
+static int ssl3250a_sync_dis(struct ssl3250a_info *info)
+{
+ if (info->s_info != NULL) {
+ info->s_info->s_mode = 0;
+ info->s_info->s_info = NULL;
+ info->s_mode = 0;
+ info->s_info = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ssl3250a_open(struct inode *inode, struct file *file)
+{
+ struct ssl3250a_info *info = NULL;
+ struct ssl3250a_info *pos = NULL;
+ int err;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &ssl3250a_info_list, list) {
+ if (pos->miscdev.minor == iminor(inode)) {
+ info = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!info)
+ return -ENODEV;
+
+ err = ssl3250a_sync_en(info->pdata->num, info->pdata->sync);
+ if (err == -EINVAL)
+ dev_err(&info->i2c_client->dev,
+ "%s err: invalid num (%u) and sync (%u) instance\n",
+ __func__, info->pdata->num, info->pdata->sync);
+ if (atomic_xchg(&info->in_use, 1))
+ return -EBUSY;
+
+ if (info->s_info != NULL) {
+ if (atomic_xchg(&info->s_info->in_use, 1))
+ return -EBUSY;
+ }
+
+ file->private_data = info;
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int ssl3250a_release(struct inode *inode, struct file *file)
+{
+ struct ssl3250a_info *info = file->private_data;
+
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ ssl3250a_pm_wr_s(info, NVC_PWR_OFF);
+ file->private_data = NULL;
+ WARN_ON(!atomic_xchg(&info->in_use, 0));
+ if (info->s_info != NULL)
+ WARN_ON(!atomic_xchg(&info->s_info->in_use, 0));
+ ssl3250a_sync_dis(info);
+ return 0;
+}
+
+static const struct file_operations ssl3250a_fileops = {
+ .owner = THIS_MODULE,
+ .open = ssl3250a_open,
+ .unlocked_ioctl = ssl3250a_ioctl,
+ .release = ssl3250a_release,
+};
+
+static void ssl3250a_del(struct ssl3250a_info *info)
+{
+ ssl3250a_pm_exit(info);
+ ssl3250a_sync_dis(info);
+ spin_lock(&ssl3250a_spinlock);
+ list_del_rcu(&info->list);
+ spin_unlock(&ssl3250a_spinlock);
+ synchronize_rcu();
+}
+
+static int ssl3250a_remove(struct i2c_client *client)
+{
+ struct ssl3250a_info *info = i2c_get_clientdata(client);
+
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ misc_deregister(&info->miscdev);
+ ssl3250a_del(info);
+ return 0;
+}
+
+static int ssl3250a_probe(
+ struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ssl3250a_info *info;
+ char dname[16];
+ int err;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&client->dev, "%s: kzalloc error\n", __func__);
+ return -ENOMEM;
+ }
+
+ info->i2c_client = client;
+ if (client->dev.platform_data) {
+ info->pdata = client->dev.platform_data;
+ } else {
+ info->pdata = &ssl3250a_default_pdata;
+ dev_dbg(&client->dev,
+ "%s No platform data. Using defaults.\n",
+ __func__);
+ }
+ i2c_set_clientdata(client, info);
+ INIT_LIST_HEAD(&info->list);
+ spin_lock(&ssl3250a_spinlock);
+ list_add_rcu(&info->list, &ssl3250a_info_list);
+ spin_unlock(&ssl3250a_spinlock);
+ ssl3250a_pm_init(info);
+ err = ssl3250a_dev_id(info);
+ if (err < 0) {
+ dev_err(&client->dev, "%s device not found\n", __func__);
+ if (info->pdata->cfg & NVC_CFG_NODEV) {
+ ssl3250a_del(info);
+ return -ENODEV;
+ }
+ } else {
+ dev_dbg(&client->dev, "%s device found\n", __func__);
+ }
+
+ if (info->pdata->dev_name != 0)
+ strcpy(dname, info->pdata->dev_name);
+ else
+ strcpy(dname, "ssl3250a");
+ if (info->pdata->num)
+ snprintf(dname, sizeof(dname), "%s.%u",
+ dname, info->pdata->num);
+ info->miscdev.name = dname;
+ info->miscdev.fops = &ssl3250a_fileops;
+ info->miscdev.minor = MISC_DYNAMIC_MINOR;
+ if (misc_register(&info->miscdev)) {
+ dev_err(&client->dev, "%s unable to register misc device %s\n",
+ __func__, dname);
+ ssl3250a_del(info);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id ssl3250a_id[] = {
+ { "ssl3250a", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, ssl3250a_id);
+
+static struct i2c_driver ssl3250a_i2c_driver = {
+ .driver = {
+ .name = "ssl3250a",
+ .owner = THIS_MODULE,
+ },
+ .id_table = ssl3250a_id,
+ .probe = ssl3250a_probe,
+ .remove = ssl3250a_remove,
+};
+
+static int __init ssl3250a_init(void)
+{
+ return i2c_add_driver(&ssl3250a_i2c_driver);
+}
+
+static void __exit ssl3250a_exit(void)
+{
+ i2c_del_driver(&ssl3250a_i2c_driver);
+}
+
+module_init(ssl3250a_init);
+module_exit(ssl3250a_exit);
+
diff --git a/drivers/media/video/tegra/tegra_camera.c b/drivers/media/video/tegra/tegra_camera.c
new file mode 100644
index 000000000000..d9b5918cf08c
--- /dev/null
+++ b/drivers/media/video/tegra/tegra_camera.c
@@ -0,0 +1,553 @@
+/*
+ * drivers/media/video/tegra/tegra_camera.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <mach/iomap.h>
+#include <mach/clk.h>
+#include <mach/powergate.h>
+
+#include <media/tegra_camera.h>
+
+/* Eventually this should handle all clock and reset calls for the isp, vi,
+ * vi_sensor, and csi modules, replacing nvrm and nvos completely for camera
+ */
+#define TEGRA_CAMERA_NAME "tegra_camera"
+
+struct tegra_camera_dev {
+ struct device *dev;
+ struct miscdevice misc_dev;
+ struct clk *isp_clk;
+ struct clk *vi_clk;
+ struct clk *vi_sensor_clk;
+ struct clk *csus_clk;
+ struct clk *csi_clk;
+ struct regulator *reg;
+ struct tegra_camera_clk_info info;
+ struct mutex tegra_camera_lock;
+ int power_refcnt;
+};
+
+struct tegra_camera_block {
+ int (*enable) (struct tegra_camera_dev *dev);
+ int (*disable) (struct tegra_camera_dev *dev);
+ bool is_enabled;
+};
+
+static int tegra_camera_enable_isp(struct tegra_camera_dev *dev)
+{
+ return clk_enable(dev->isp_clk);
+}
+
+static int tegra_camera_disable_isp(struct tegra_camera_dev *dev)
+{
+ clk_disable(dev->isp_clk);
+ return 0;
+}
+
+static int tegra_camera_enable_vi(struct tegra_camera_dev *dev)
+{
+ int ret = 0;
+
+ ret |= clk_enable(dev->vi_clk);
+ ret |= clk_enable(dev->vi_sensor_clk);
+ ret |= clk_enable(dev->csus_clk);
+ return ret;
+}
+
+static int tegra_camera_disable_vi(struct tegra_camera_dev *dev)
+{
+ clk_disable(dev->vi_clk);
+ clk_disable(dev->vi_sensor_clk);
+ clk_disable(dev->csus_clk);
+ return 0;
+}
+
+static int tegra_camera_enable_csi(struct tegra_camera_dev *dev)
+{
+ return clk_enable(dev->csi_clk);
+}
+
+static int tegra_camera_disable_csi(struct tegra_camera_dev *dev)
+{
+ clk_disable(dev->csi_clk);
+ return 0;
+}
+
+struct tegra_camera_block tegra_camera_block[] = {
+ [TEGRA_CAMERA_MODULE_ISP] = {tegra_camera_enable_isp,
+ tegra_camera_disable_isp, false},
+ [TEGRA_CAMERA_MODULE_VI] = {tegra_camera_enable_vi,
+ tegra_camera_disable_vi, false},
+ [TEGRA_CAMERA_MODULE_CSI] = {tegra_camera_enable_csi,
+ tegra_camera_disable_csi, false},
+};
+
+#define TEGRA_CAMERA_VI_CLK_SEL_INTERNAL 0
+#define TEGRA_CAMERA_VI_CLK_SEL_EXTERNAL (1<<24)
+#define TEGRA_CAMERA_PD2VI_CLK_SEL_VI_SENSOR_CLK (1<<25)
+#define TEGRA_CAMERA_PD2VI_CLK_SEL_PD2VI_CLK 0
+
+static bool tegra_camera_enabled(struct tegra_camera_dev *dev)
+{
+ bool ret = false;
+
+ mutex_lock(&dev->tegra_camera_lock);
+ ret = tegra_camera_block[TEGRA_CAMERA_MODULE_ISP].is_enabled == true ||
+ tegra_camera_block[TEGRA_CAMERA_MODULE_VI].is_enabled == true ||
+ tegra_camera_block[TEGRA_CAMERA_MODULE_CSI].is_enabled == true;
+ mutex_unlock(&dev->tegra_camera_lock);
+ return ret;
+}
+
+static int tegra_camera_clk_set_rate(struct tegra_camera_dev *dev)
+{
+ u32 offset;
+ struct clk *clk;
+ struct tegra_camera_clk_info *info = &dev->info;
+
+ if (!info) {
+ dev_err(dev->dev,
+ "%s: no clock info %d\n",
+ __func__, info->id);
+ return -EINVAL;
+ }
+
+ if (info->id != TEGRA_CAMERA_MODULE_VI) {
+ dev_err(dev->dev,
+ "%s: set rate only aplies to vi module %d\n",
+ __func__, info->id);
+ return -EINVAL;
+ }
+
+ switch (info->clk_id) {
+ case TEGRA_CAMERA_VI_CLK:
+ clk = dev->vi_clk;
+ offset = 0x148;
+ break;
+ case TEGRA_CAMERA_VI_SENSOR_CLK:
+ clk = dev->vi_sensor_clk;
+ offset = 0x1a8;
+ break;
+ default:
+ dev_err(dev->dev,
+ "%s: invalid clk id for set rate %d\n",
+ __func__, info->clk_id);
+ return -EINVAL;
+ }
+
+ clk_set_rate(clk, info->rate);
+
+ if (info->clk_id == TEGRA_CAMERA_VI_CLK) {
+ u32 val = 0x2;
+ void __iomem *car = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+ void __iomem *apb_misc = IO_ADDRESS(TEGRA_APB_MISC_BASE);
+
+ if (info->flag == TEGRA_CAMERA_ENABLE_PD2VI_CLK) {
+ val |= TEGRA_CAMERA_PD2VI_CLK_SEL_VI_SENSOR_CLK;
+ }
+
+ writel(val, car + offset);
+
+ val = readl(apb_misc + 0x42c);
+ writel(val | 0x1, apb_misc + 0x42c);
+ }
+
+ info->rate = clk_get_rate(clk);
+ return 0;
+
+}
+static int tegra_camera_reset(struct tegra_camera_dev *dev, uint id)
+{
+ struct clk *clk;
+
+ switch (id) {
+ case TEGRA_CAMERA_MODULE_VI:
+ clk = dev->vi_clk;
+ break;
+ case TEGRA_CAMERA_MODULE_ISP:
+ clk = dev->isp_clk;
+ break;
+ case TEGRA_CAMERA_MODULE_CSI:
+ clk = dev->csi_clk;
+ break;
+ default:
+ return -EINVAL;
+ }
+ tegra_periph_reset_assert(clk);
+ udelay(10);
+ tegra_periph_reset_deassert(clk);
+
+ return 0;
+}
+
+static int tegra_camera_power_on(struct tegra_camera_dev *dev)
+{
+ int ret = 0;
+
+ if (dev->power_refcnt++ == 0) {
+ /* Enable external power */
+ if (dev->reg) {
+ ret = regulator_enable(dev->reg);
+ if (ret) {
+ dev_err(dev->dev,
+ "%s: enable csi regulator failed.\n",
+ __func__);
+ return ret;
+ }
+ }
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Unpowergate VE */
+ ret = tegra_unpowergate_partition(TEGRA_POWERGATE_VENC);
+ if (ret)
+ dev_err(dev->dev,
+ "%s: unpowergate failed.\n",
+ __func__);
+#endif
+ }
+
+ return ret;
+}
+
+static int tegra_camera_power_off(struct tegra_camera_dev *dev)
+{
+ int ret = 0;
+
+ if (--dev->power_refcnt == 0) {
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Powergate VE */
+ ret = tegra_powergate_partition(TEGRA_POWERGATE_VENC);
+ if (ret)
+ dev_err(dev->dev,
+ "%s: powergate failed.\n",
+ __func__);
+#endif
+ /* Disable external power */
+ if (dev->reg) {
+ ret = regulator_disable(dev->reg);
+ if (ret) {
+ dev_err(dev->dev,
+ "%s: disable csi regulator failed.\n",
+ __func__);
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+static long tegra_camera_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ uint id;
+ struct tegra_camera_dev *dev = file->private_data;
+
+ /* first element of arg must be u32 with id of module to talk to */
+ if (copy_from_user(&id, (const void __user *)arg, sizeof(uint))) {
+ dev_err(dev->dev,
+ "%s: Failed to copy arg from user", __func__);
+ return -EFAULT;
+ }
+
+ if (id >= ARRAY_SIZE(tegra_camera_block)) {
+ dev_err(dev->dev,
+ "%s: Invalid id to tegra isp ioctl%d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case TEGRA_CAMERA_IOCTL_ENABLE:
+ {
+ int ret = 0;
+
+ mutex_lock(&dev->tegra_camera_lock);
+ /* Unpowergate camera blocks (vi, csi and isp)
+ before enabling clocks */
+ ret = tegra_camera_power_on(dev);
+ if (ret) {
+ dev->power_refcnt = 0;
+ mutex_unlock(&dev->tegra_camera_lock);
+ return ret;
+ }
+
+ if (!tegra_camera_block[id].is_enabled) {
+ ret = tegra_camera_block[id].enable(dev);
+ tegra_camera_block[id].is_enabled = true;
+ }
+ mutex_unlock(&dev->tegra_camera_lock);
+ return ret;
+ }
+ case TEGRA_CAMERA_IOCTL_DISABLE:
+ {
+ int ret = 0;
+
+ mutex_lock(&dev->tegra_camera_lock);
+ if (tegra_camera_block[id].is_enabled) {
+ ret = tegra_camera_block[id].disable(dev);
+ tegra_camera_block[id].is_enabled = false;
+ }
+ /* Powergate camera blocks (vi, csi and isp)
+ after disabling all the clocks */
+ if (!ret) {
+ ret = tegra_camera_power_off(dev);
+ }
+ mutex_unlock(&dev->tegra_camera_lock);
+ return ret;
+ }
+ case TEGRA_CAMERA_IOCTL_CLK_SET_RATE:
+ {
+ int ret;
+
+ if (copy_from_user(&dev->info, (const void __user *)arg,
+ sizeof(struct tegra_camera_clk_info))) {
+ dev_err(dev->dev,
+ "%s: Failed to copy arg from user\n", __func__);
+ return -EFAULT;
+ }
+ ret = tegra_camera_clk_set_rate(dev);
+ if (ret)
+ return ret;
+ if (copy_to_user((void __user *)arg, &dev->info,
+ sizeof(struct tegra_camera_clk_info))) {
+ dev_err(dev->dev,
+ "%s: Failed to copy arg to user\n", __func__);
+ return -EFAULT;
+ }
+ return 0;
+ }
+ case TEGRA_CAMERA_IOCTL_RESET:
+ return tegra_camera_reset(dev, id);
+ default:
+ dev_err(dev->dev,
+ "%s: Unknown tegra_camera ioctl.\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tegra_camera_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct tegra_camera_dev *dev = container_of(miscdev,
+ struct tegra_camera_dev,
+ misc_dev);
+ dev_info(dev->dev, "%s\n", __func__);
+ file->private_data = dev;
+
+ return 0;
+}
+
+static int tegra_camera_release(struct inode *inode, struct file *file)
+{
+ int i, err;
+ struct tegra_camera_dev *dev = file->private_data;
+
+ dev_info(dev->dev, "%s\n", __func__);
+ for (i = 0; i < ARRAY_SIZE(tegra_camera_block); i++)
+ if (tegra_camera_block[i].is_enabled) {
+ tegra_camera_block[i].disable(dev);
+ tegra_camera_block[i].is_enabled = false;
+ }
+
+ /* If camera blocks are not powergated yet, do it now */
+ if (dev->power_refcnt > 0) {
+ mutex_lock(&dev->tegra_camera_lock);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ err = tegra_powergate_partition(TEGRA_POWERGATE_VENC);
+ if (err)
+ dev_err(dev->dev, "%s: powergate failed.\n", __func__);
+#endif
+ dev->power_refcnt = 0;
+ mutex_unlock(&dev->tegra_camera_lock);
+ }
+
+ return 0;
+}
+
+static const struct file_operations tegra_camera_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_camera_open,
+ .unlocked_ioctl = tegra_camera_ioctl,
+ .release = tegra_camera_release,
+};
+
+static int tegra_camera_clk_get(struct platform_device *pdev, const char *name,
+ struct clk **clk)
+{
+ *clk = clk_get(&pdev->dev, name);
+ if (IS_ERR_OR_NULL(*clk)) {
+ dev_err(&pdev->dev, "%s: unable to get clock for %s\n",
+ __func__, name);
+ *clk = NULL;
+ return PTR_ERR(*clk);
+ }
+ return 0;
+}
+
+static int tegra_camera_probe(struct platform_device *pdev)
+{
+ int err;
+ struct tegra_camera_dev *dev;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct tegra_camera_dev),
+ GFP_KERNEL);
+ if (!dev) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ goto alloc_err;
+ }
+
+ mutex_init(&dev->tegra_camera_lock);
+
+ /* Powergate VE when boot */
+ mutex_lock(&dev->tegra_camera_lock);
+ dev->power_refcnt = 0;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ err = tegra_powergate_partition(TEGRA_POWERGATE_VENC);
+ if (err)
+ dev_err(&pdev->dev, "%s: powergate failed.\n", __func__);
+#endif
+ mutex_unlock(&dev->tegra_camera_lock);
+
+ dev->dev = &pdev->dev;
+
+ /* Get regulator pointer */
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ dev->reg = regulator_get(&pdev->dev, "vcsi");
+#else
+ dev->reg = regulator_get(&pdev->dev, "avdd_dsi_csi");
+#endif
+ if (IS_ERR_OR_NULL(dev->reg)) {
+ dev_err(&pdev->dev, "%s: couldn't get regulator\n", __func__);
+ return PTR_ERR(dev->reg);
+ }
+
+ dev->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ dev->misc_dev.name = TEGRA_CAMERA_NAME;
+ dev->misc_dev.fops = &tegra_camera_fops;
+ dev->misc_dev.parent = &pdev->dev;
+
+ err = misc_register(&dev->misc_dev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: Unable to register misc device!\n",
+ TEGRA_CAMERA_NAME);
+ goto misc_register_err;
+ }
+
+ err = tegra_camera_clk_get(pdev, "isp", &dev->isp_clk);
+ if (err)
+ goto misc_register_err;
+ err = tegra_camera_clk_get(pdev, "vi", &dev->vi_clk);
+ if (err)
+ goto vi_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "vi_sensor", &dev->vi_sensor_clk);
+ if (err)
+ goto vi_sensor_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "csus", &dev->csus_clk);
+ if (err)
+ goto csus_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "csi", &dev->csi_clk);
+ if (err)
+ goto csi_clk_get_err;
+
+ /* dev is set in order to restore in _remove */
+ platform_set_drvdata(pdev, dev);
+
+ return 0;
+
+csi_clk_get_err:
+ clk_put(dev->csus_clk);
+csus_clk_get_err:
+ clk_put(dev->vi_sensor_clk);
+vi_sensor_clk_get_err:
+ clk_put(dev->vi_clk);
+vi_clk_get_err:
+ clk_put(dev->isp_clk);
+misc_register_err:
+ regulator_put(dev->reg);
+alloc_err:
+ return err;
+}
+
+static int tegra_camera_remove(struct platform_device *pdev)
+{
+ struct tegra_camera_dev *dev = platform_get_drvdata(pdev);
+
+ clk_put(dev->isp_clk);
+ clk_put(dev->vi_clk);
+ clk_put(dev->vi_sensor_clk);
+ clk_put(dev->csus_clk);
+ clk_put(dev->csi_clk);
+
+ misc_deregister(&dev->misc_dev);
+ regulator_put(dev->reg);
+ mutex_destroy(&dev->tegra_camera_lock);
+
+ return 0;
+}
+
+static int tegra_camera_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_camera_dev *dev = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (tegra_camera_enabled(dev)) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "tegra_camera cannot suspend, "
+ "application is holding on to camera. \n");
+ }
+
+ return ret;
+}
+
+static int tegra_camera_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver tegra_camera_driver = {
+ .probe = tegra_camera_probe,
+ .remove = tegra_camera_remove,
+ .suspend = tegra_camera_suspend,
+ .resume = tegra_camera_resume,
+ .driver = { .name = TEGRA_CAMERA_NAME }
+};
+
+static int __init tegra_camera_init(void)
+{
+ return platform_driver_register(&tegra_camera_driver);
+}
+
+static void __exit tegra_camera_exit(void)
+{
+ platform_driver_unregister(&tegra_camera_driver);
+}
+
+module_init(tegra_camera_init);
+module_exit(tegra_camera_exit);
+
diff --git a/drivers/media/video/tegra/tps61050.c b/drivers/media/video/tegra/tps61050.c
new file mode 100644
index 000000000000..e30ebb435f2d
--- /dev/null
+++ b/drivers/media/video/tegra/tps61050.c
@@ -0,0 +1,989 @@
+/*
+ * tps61050.c - tps61050 flash/torch kernel driver
+ *
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+/* Implementation
+ * --------------
+ * The board level details about the device need to be provided in the board
+ * file with the tps61050_platform_data structure.
+ * Standard among NVC kernel drivers in this structure is:
+ * .cfg = Use the NVC_CFG_ defines that are in nvc_torch.h.
+ * Descriptions of the configuration options are with the defines.
+ * This value is typically 0.
+ * .num = The number of the instance of the device. This should start at 1 and
+ * and increment for each device on the board. This number will be
+ * appended to the MISC driver name, Example: /dev/tps61050.1
+ * .sync = If there is a need to synchronize two devices, then this value is
+ * the number of the device instance this device is allowed to sync to.
+ * This is typically used for stereo applications.
+ * .dev_name = The MISC driver name the device registers as. If not used,
+ * then the part number of the device is used for the driver name.
+ * If using the NVC user driver then use the name found in this
+ * driver under _default_pdata.
+ *
+ * The following is specific to NVC kernel flash/torch drivers:
+ * .pinstate = a pointer to the nvc_torch_pin_state structure. This
+ * structure gives the details of which VI GPIO to use to trigger
+ * the flash. The mask tells which pin and the values is the
+ * level. For example, if VI GPIO pin 6 is used, then
+ * .mask = 0x0040
+ * .values = 0x0040
+ * If VI GPIO pin 0 is used, then
+ * .mask = 0x0001
+ * .values = 0x0001
+ * This is typically just one pin but there is some legacy
+ * here that insinuates more than one pin can be used.
+ * When the flash level is set, then the driver will return the
+ * value in values. When the flash level is off, the driver will
+ * return 0 for the values to deassert the signal.
+ * If a VI GPIO is not used, then the mask and values must be set
+ * to 0. The flash may then be triggered via I2C instead.
+ * However, a VI GPIO is strongly encouraged since it allows
+ * tighter timing with the picture taken as well as reduced power
+ * by asserting the trigger signal for only when needed.
+ * .max_amp_torch = Is the maximum torch value allowed. The value is 0 to
+ * _MAX_TORCH_LEVEL. This is to allow a limit to the amount
+ * of amps used. If left blank then _MAX_TORCH_LEVEL will be
+ * used.
+ * .max_amp_flash = Is the maximum flash value allowed. The value is 0 to
+ * _MAX_FLASH_LEVEL. This is to allow a limit to the amount
+ * of amps used. If left blank then _MAX_FLASH_LEVEL will be
+ * used.
+ *
+ * The following is specific to only this NVC kernel flash/torch driver:
+ * N/A
+ *
+ * Power Requirements
+ * The board power file must contain the following labels for the power
+ * regulator(s) of this device:
+ * "vdd_i2c" = the power regulator for the I2C power.
+ * Note that this device is typically connected directly to the battery rail
+ * and does not need a source power regulator (vdd).
+ *
+ * The above values should be all that is needed to use the device with this
+ * driver. Modifications of this driver should not be needed.
+ */
+
+
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <media/nvc.h>
+#include <media/tps61050.h>
+
+#define TPS61050_REG0 0x00
+#define TPS61050_REG1 0x01
+#define TPS61050_REG2 0x02
+#define TPS61050_REG3 0x03
+#define tps61050_flash_cap_size (sizeof(tps61050_flash_cap.numberoflevels) \
+ + (sizeof(tps61050_flash_cap.levels[0]) \
+ * (TPS61050_MAX_FLASH_LEVEL + 1)))
+#define tps61050_torch_cap_size (sizeof(tps61050_torch_cap.numberoflevels) \
+ + (sizeof(tps61050_torch_cap.guidenum[0]) \
+ * (TPS61050_MAX_TORCH_LEVEL + 1)))
+
+
+static struct nvc_torch_flash_capabilities tps61050_flash_cap = {
+ TPS61050_MAX_FLASH_LEVEL + 1,
+ {
+ { 0, 0xFFFFFFFF, 0 },
+ { 150, 558, 2 },
+ { 200, 558, 2 },
+ { 300, 558, 2 },
+ { 400, 558, 2 },
+ { 500, 558, 2 },
+ { 700, 558, 2 },
+ { 900, 558, 2 },
+ { 900, 558, 2 }
+ }
+};
+
+static struct nvc_torch_torch_capabilities tps61050_torch_cap = {
+ TPS61050_MAX_TORCH_LEVEL + 1,
+ {
+ 0,
+ 50,
+ 75,
+ 100,
+ 150,
+ 200,
+ 491,
+ 491
+ }
+};
+
+struct tps61050_info {
+ atomic_t in_use;
+ struct i2c_client *i2c_client;
+ struct tps61050_platform_data *pdata;
+ struct miscdevice miscdev;
+ struct list_head list;
+ int pwr_api;
+ int pwr_dev;
+ struct nvc_regulator vreg_i2c;
+ u8 s_mode;
+ struct tps61050_info *s_info;
+};
+
+static struct nvc_torch_pin_state tps61050_default_pinstate = {
+ .mask = 0x0000,
+ .values = 0x0000,
+};
+
+static struct tps61050_platform_data tps61050_default_pdata = {
+ .cfg = 0,
+ .num = 0,
+ .sync = 0,
+ .dev_name = "torch",
+ .pinstate = &tps61050_default_pinstate,
+ .max_amp_torch = TPS61050_MAX_TORCH_LEVEL,
+ .max_amp_flash = TPS61050_MAX_FLASH_LEVEL,
+};
+
+static LIST_HEAD(tps61050_info_list);
+static DEFINE_SPINLOCK(tps61050_spinlock);
+
+
+static int tps61050_i2c_rd(struct tps61050_info *info, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+
+ buf[0] = reg;
+ msg[0].addr = info->i2c_client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+ msg[1].addr = info->i2c_client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ *val = 0;
+ if (i2c_transfer(info->i2c_client->adapter, msg, 2) != 2)
+ return -EIO;
+
+ *val = buf[1];
+ return 0;
+}
+
+static int tps61050_i2c_wr(struct tps61050_info *info, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+ msg.addr = info->i2c_client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[0];
+ if (i2c_transfer(info->i2c_client->adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static void tps61050_pm_regulator_put(struct nvc_regulator *sreg)
+{
+ regulator_put(sreg->vreg);
+ sreg->vreg = NULL;
+}
+
+static int tps61050_pm_regulator_get(struct tps61050_info *info,
+ struct nvc_regulator *sreg,
+ char vreg_name[])
+{
+ int err = 0;
+
+ sreg->vreg_flag = 0;
+ sreg->vreg = regulator_get(&info->i2c_client->dev, vreg_name);
+ if (IS_ERR_OR_NULL(sreg->vreg)) {
+ dev_err(&info->i2c_client->dev,
+ "%s err for regulator: %s err: %d\n",
+ __func__, vreg_name, (int)sreg->vreg);
+ err = PTR_ERR(sreg->vreg);
+ sreg->vreg = NULL;
+ } else {
+ sreg->vreg_name = vreg_name;
+ dev_dbg(&info->i2c_client->dev,
+ "%s vreg_name: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ return err;
+}
+
+static int tps61050_pm_regulator_en(struct tps61050_info *info,
+ struct nvc_regulator *sreg)
+{
+ int err = 0;
+
+ if (!sreg->vreg_flag && (sreg->vreg != NULL)) {
+ err = regulator_enable(sreg->vreg);
+ if (!err) {
+ dev_dbg(&info->i2c_client->dev,
+ "%s vreg_name: %s\n",
+ __func__, sreg->vreg_name);
+ sreg->vreg_flag = 1;
+ err = 1; /* flag regulator state change */
+ mdelay(5); /* device powerup delay */
+ } else {
+ dev_err(&info->i2c_client->dev,
+ "%s err, regulator: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ }
+ return err;
+}
+
+static int tps61050_pm_regulator_dis(struct tps61050_info *info,
+ struct nvc_regulator *sreg)
+{
+ int err = 0;
+
+ if (sreg->vreg_flag && (sreg->vreg != NULL)) {
+ err = regulator_disable(sreg->vreg);
+ if (!err)
+ dev_dbg(&info->i2c_client->dev,
+ "%s vreg_name: %s\n",
+ __func__, sreg->vreg_name);
+ else
+ dev_err(&info->i2c_client->dev,
+ "%s err, regulator: %s\n",
+ __func__, sreg->vreg_name);
+ }
+ sreg->vreg_flag = 0;
+ return err;
+}
+
+static int tps61050_pm_wr(struct tps61050_info *info, int pwr)
+{
+ int err = 0;
+ u8 reg;
+
+ if (pwr == info->pwr_dev)
+ return 0;
+
+ switch (pwr) {
+ case NVC_PWR_OFF:
+ if ((info->pdata->cfg & NVC_CFG_OFF2STDBY) ||
+ (info->pdata->cfg & NVC_CFG_BOOT_INIT)) {
+ pwr = NVC_PWR_STDBY;
+ } else {
+ err = tps61050_pm_regulator_en(info, &info->vreg_i2c);
+ err |= tps61050_i2c_wr(info, TPS61050_REG0, 0x00);
+ err |= tps61050_i2c_wr(info, TPS61050_REG1, 0x00);
+ err |= tps61050_pm_regulator_dis(info, &info->vreg_i2c);
+ break;
+ }
+ case NVC_PWR_STDBY_OFF:
+ if ((info->pdata->cfg & NVC_CFG_OFF2STDBY) ||
+ (info->pdata->cfg & NVC_CFG_BOOT_INIT)) {
+ pwr = NVC_PWR_STDBY;
+ } else {
+ err = tps61050_pm_regulator_en(info, &info->vreg_i2c);
+ err |= tps61050_i2c_wr(info, TPS61050_REG0, 0x00);
+ err |= tps61050_i2c_wr(info, TPS61050_REG1, 0x00);
+ break;
+ }
+ case NVC_PWR_STDBY:
+ err = tps61050_pm_regulator_en(info, &info->vreg_i2c);
+ err |= tps61050_i2c_rd(info, TPS61050_REG0, &reg);
+ reg &= 0x3F; /* 7:6 = mode */
+ err |= tps61050_i2c_wr(info, TPS61050_REG0, reg);
+ break;
+
+ case NVC_PWR_COMM:
+ case NVC_PWR_ON:
+ err = tps61050_pm_regulator_en(info, &info->vreg_i2c);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err < 0) {
+ dev_err(&info->i2c_client->dev, "%s error\n", __func__);
+ pwr = NVC_PWR_ERR;
+ }
+ info->pwr_dev = pwr;
+ if (err > 0)
+ return 0;
+
+ return err;
+}
+
+static int tps61050_pm_wr_s(struct tps61050_info *info, int pwr)
+{
+ int err1 = 0;
+ int err2 = 0;
+
+ if ((info->s_mode == NVC_SYNC_OFF) ||
+ (info->s_mode == NVC_SYNC_MASTER) ||
+ (info->s_mode == NVC_SYNC_STEREO))
+ err1 = tps61050_pm_wr(info, pwr);
+ if ((info->s_mode == NVC_SYNC_SLAVE) ||
+ (info->s_mode == NVC_SYNC_STEREO))
+ err2 = tps61050_pm_wr(info->s_info, pwr);
+ return err1 | err2;
+}
+
+static int tps61050_pm_api_wr(struct tps61050_info *info, int pwr)
+{
+ int err = 0;
+
+ if (!pwr || (pwr > NVC_PWR_ON))
+ return 0;
+
+ if (pwr > info->pwr_dev)
+ err = tps61050_pm_wr_s(info, pwr);
+ if (!err)
+ info->pwr_api = pwr;
+ else
+ info->pwr_api = NVC_PWR_ERR;
+ if (info->pdata->cfg & NVC_CFG_NOERR)
+ return 0;
+
+ return err;
+}
+
+static int tps61050_pm_dev_wr(struct tps61050_info *info, int pwr)
+{
+ if (pwr < info->pwr_api)
+ pwr = info->pwr_api;
+ return tps61050_pm_wr(info, pwr);
+}
+
+static void tps61050_pm_exit(struct tps61050_info *info)
+{
+ tps61050_pm_wr_s(info, NVC_PWR_OFF);
+ tps61050_pm_regulator_put(&info->vreg_i2c);
+}
+
+static void tps61050_pm_init(struct tps61050_info *info)
+{
+ tps61050_pm_regulator_get(info, &info->vreg_i2c, "vdd_i2c");
+}
+
+struct tps61050_reg_init {
+ u8 mask;
+ u8 val;
+};
+
+static struct tps61050_reg_init tps61050_reg_init_id[] = {
+ {0xC0, 0x00},
+ {0xC0, 0x00},
+ {0x87, 0x00},
+ {0xFF, 0xD1},
+};
+
+static int tps61050_dev_id(struct tps61050_info *info)
+{
+ u8 reg;
+ u8 i;
+ int err;
+
+ tps61050_pm_dev_wr(info, NVC_PWR_COMM);
+ /* There isn't a device ID so we just check that all the registers
+ * equal their startup defaults.
+ */
+ for (i = TPS61050_REG0; i <= TPS61050_REG3; i++) {
+ err = tps61050_i2c_rd(info, i, &reg);
+ if (err) {
+ break;
+ } else {
+ reg &= tps61050_reg_init_id[i].mask;
+ if (reg != tps61050_reg_init_id[i].val) {
+ err = -ENODEV;
+ break;
+ }
+ }
+ }
+ tps61050_pm_dev_wr(info, NVC_PWR_OFF);
+ return err;
+}
+
+static int tps61050_param_rd(struct tps61050_info *info, long arg)
+{
+ struct nvc_param params;
+ struct nvc_torch_pin_state pinstate;
+ const void *data_ptr;
+ u8 reg;
+ u32 data_size = 0;
+ int err;
+
+ if (copy_from_user(&params,
+ (const void __user *)arg,
+ sizeof(struct nvc_param))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (info->s_mode == NVC_SYNC_SLAVE)
+ info = info->s_info;
+ switch (params.param) {
+ case NVC_PARAM_FLASH_CAPS:
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_CAPS\n", __func__);
+ data_ptr = &tps61050_flash_cap;
+ data_size = tps61050_flash_cap_size;
+ break;
+
+ case NVC_PARAM_FLASH_LEVEL:
+ tps61050_pm_dev_wr(info, NVC_PWR_COMM);
+ err = tps61050_i2c_rd(info, TPS61050_REG1, &reg);
+ tps61050_pm_dev_wr(info, NVC_PWR_OFF);
+ if (err < 0)
+ return err;
+
+ if (reg & 0x80) { /* 7:7 flash on/off */
+ reg &= 0x07; /* 2:0 flash setting */
+ reg++; /* flash setting +1 if flash on */
+ } else {
+ reg = 0; /* flash is off */
+ }
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_LEVEL: %u\n",
+ __func__,
+ (unsigned)tps61050_flash_cap.levels[reg].guidenum);
+ data_ptr = &tps61050_flash_cap.levels[reg].guidenum;
+ data_size = sizeof(tps61050_flash_cap.levels[reg].guidenum);
+ break;
+
+ case NVC_PARAM_TORCH_CAPS:
+ dev_dbg(&info->i2c_client->dev, "%s TORCH_CAPS\n", __func__);
+ data_ptr = &tps61050_torch_cap;
+ data_size = tps61050_torch_cap_size;
+ break;
+
+ case NVC_PARAM_TORCH_LEVEL:
+ tps61050_pm_dev_wr(info, NVC_PWR_COMM);
+ err = tps61050_i2c_rd(info, TPS61050_REG0, &reg);
+ tps61050_pm_dev_wr(info, NVC_PWR_OFF);
+ if (err < 0)
+ return err;
+
+ reg &= 0x07;
+ dev_dbg(&info->i2c_client->dev, "%s TORCH_LEVEL: %u\n",
+ __func__,
+ (unsigned)tps61050_torch_cap.guidenum[reg]);
+ data_ptr = &tps61050_torch_cap.guidenum[reg];
+ data_size = sizeof(tps61050_torch_cap.guidenum[reg]);
+ break;
+
+ case NVC_PARAM_FLASH_PIN_STATE:
+ pinstate.mask = info->pdata->pinstate->mask;
+ tps61050_pm_dev_wr(info, NVC_PWR_COMM);
+ err = tps61050_i2c_rd(info, TPS61050_REG1, &reg);
+ tps61050_pm_dev_wr(info, NVC_PWR_OFF);
+ if (err < 0)
+ return err;
+
+ reg &= 0x80; /* 7:7=flash enable */
+ if (reg)
+ /* assert strobe */
+ pinstate.values = info->pdata->pinstate->values;
+ else
+ pinstate.values = 0; /* deassert strobe */
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_PIN_STATE: %x&%x\n",
+ __func__, pinstate.mask, pinstate.values);
+ data_ptr = &pinstate;
+ data_size = sizeof(struct nvc_torch_pin_state);
+ break;
+
+ case NVC_PARAM_STEREO:
+ dev_dbg(&info->i2c_client->dev, "%s STEREO: %d\n",
+ __func__, (int)info->s_mode);
+ data_ptr = &info->s_mode;
+ data_size = sizeof(info->s_mode);
+ break;
+
+ default:
+ dev_err(&info->i2c_client->dev,
+ "%s unsupported parameter: %d\n",
+ __func__, params.param);
+ return -EINVAL;
+ }
+
+ if (params.sizeofvalue < data_size) {
+ dev_err(&info->i2c_client->dev,
+ "%s data size mismatch %d != %d\n",
+ __func__, params.sizeofvalue, data_size);
+ return -EINVAL;
+ }
+
+ if (copy_to_user((void __user *)params.p_value,
+ data_ptr,
+ data_size)) {
+ dev_err(&info->i2c_client->dev,
+ "%s copy_to_user err line %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int tps61050_param_wr_s(struct tps61050_info *info,
+ struct nvc_param *params,
+ u8 val)
+{
+ u8 reg;
+ int err = 0;
+
+ /*
+ * 7:6 flash/torch mode
+ * 0 0 = off (power save)
+ * 0 1 = torch only (torch power is 2:0 REG0 where 0 = off)
+ * 1 0 = flash and torch (flash power is 2:0 REG1 (0 is a power level))
+ * 1 1 = N/A
+ * Note that 7:6 of REG0 and REG1 are shadowed with each other.
+ * In the code below we want to turn on/off one
+ * without affecting the other.
+ */
+ switch (params->param) {
+ case NVC_PARAM_FLASH_LEVEL:
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_LEVEL: %d\n",
+ __func__, val);
+ tps61050_pm_dev_wr(info, NVC_PWR_ON);
+ if (val) {
+ val--;
+ if (val > tps61050_default_pdata.max_amp_flash)
+ val = tps61050_default_pdata.max_amp_flash;
+ /* Amp limit values are in the board-sensors file. */
+ if (info->pdata->max_amp_flash &&
+ (val > info->pdata->max_amp_flash))
+ val = info->pdata->max_amp_flash;
+ val |= 0x80; /* 7:7=flash mode */
+ } else {
+ err = tps61050_i2c_rd(info, TPS61050_REG0, &reg);
+ if (reg & 0x07) /* 2:0=torch setting */
+ val = 0x40; /* 6:6 enable just torch */
+ }
+ err |= tps61050_i2c_wr(info, TPS61050_REG1, val);
+ val &= 0xC0; /* 7:6=flash/torch mode */
+ if (!val) /* turn pwr off if no flash && no pwr_api */
+ tps61050_pm_dev_wr(info, NVC_PWR_OFF);
+ return err;
+
+ case NVC_PARAM_TORCH_LEVEL:
+ dev_dbg(&info->i2c_client->dev, "%s TORCH_LEVEL: %d\n",
+ __func__, val);
+ tps61050_pm_dev_wr(info, NVC_PWR_ON);
+ err = tps61050_i2c_rd(info, TPS61050_REG1, &reg);
+ reg &= 0x80; /* 7:7=flash */
+ if (val) {
+ if (val > tps61050_default_pdata.max_amp_torch)
+ val = tps61050_default_pdata.max_amp_torch;
+ /* Amp limit values are in the board-sensors file. */
+ if (info->pdata->max_amp_torch &&
+ (val > info->pdata->max_amp_torch))
+ val = info->pdata->max_amp_torch;
+ if (!reg) /* test if flash/torch off */
+ val |= (0x40); /* 6:6=torch only mode */
+ } else {
+ val |= reg;
+ }
+ err |= tps61050_i2c_wr(info, TPS61050_REG0, val);
+ val &= 0xC0; /* 7:6=mode */
+ if (!val) /* turn pwr off if no torch && no pwr_api */
+ tps61050_pm_dev_wr(info, NVC_PWR_OFF);
+ return err;
+
+ case NVC_PARAM_FLASH_PIN_STATE:
+ dev_dbg(&info->i2c_client->dev, "%s FLASH_PIN_STATE: %d\n",
+ __func__, val);
+ if (val)
+ val = 0x08; /* 3:3=soft trigger */
+ err = tps61050_i2c_rd(info, TPS61050_REG1, &reg);
+ val |= reg;
+ err |= tps61050_i2c_wr(info, TPS61050_REG1, val);
+ return err;
+
+ default:
+ dev_err(&info->i2c_client->dev,
+ "%s unsupported parameter: %d\n",
+ __func__, params->param);
+ return -EINVAL;
+ }
+}
+
+static int tps61050_param_wr(struct tps61050_info *info, long arg)
+{
+ struct nvc_param params;
+ u8 val;
+ int err = 0;
+
+ if (copy_from_user(&params, (const void __user *)arg,
+ sizeof(struct nvc_param))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&val, (const void __user *)params.p_value,
+ sizeof(val))) {
+ dev_err(&info->i2c_client->dev, "%s %d copy_from_user err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ /* parameters independent of sync mode */
+ switch (params.param) {
+ case NVC_PARAM_STEREO:
+ dev_dbg(&info->i2c_client->dev, "%s STEREO: %d\n",
+ __func__, (int)val);
+ if (val == info->s_mode)
+ return 0;
+
+ switch (val) {
+ case NVC_SYNC_OFF:
+ info->s_mode = val;
+ if (info->s_info != NULL) {
+ info->s_info->s_mode = val;
+ tps61050_pm_wr(info->s_info, NVC_PWR_OFF);
+ }
+ break;
+
+ case NVC_SYNC_MASTER:
+ info->s_mode = val;
+ if (info->s_info != NULL)
+ info->s_info->s_mode = val;
+ break;
+
+ case NVC_SYNC_SLAVE:
+ case NVC_SYNC_STEREO:
+ if (info->s_info != NULL) {
+ /* sync power */
+ info->s_info->pwr_api = info->pwr_api;
+ err = tps61050_pm_wr(info->s_info,
+ info->pwr_dev);
+ if (!err) {
+ info->s_mode = val;
+ info->s_info->s_mode = val;
+ } else {
+ tps61050_pm_wr(info->s_info,
+ NVC_PWR_OFF);
+ err = -EIO;
+ }
+ } else {
+ err = -EINVAL;
+ }
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ if (info->pdata->cfg & NVC_CFG_NOERR)
+ return 0;
+
+ return err;
+
+ default:
+ /* parameters dependent on sync mode */
+ switch (info->s_mode) {
+ case NVC_SYNC_OFF:
+ case NVC_SYNC_MASTER:
+ return tps61050_param_wr_s(info, &params, val);
+
+ case NVC_SYNC_SLAVE:
+ return tps61050_param_wr_s(info->s_info,
+ &params,
+ val);
+
+ case NVC_SYNC_STEREO:
+ err = tps61050_param_wr_s(info, &params, val);
+ if (!(info->pdata->cfg & NVC_CFG_SYNC_I2C_MUX))
+ err |= tps61050_param_wr_s(info->s_info,
+ &params,
+ val);
+ return err;
+
+ default:
+ dev_err(&info->i2c_client->dev, "%s %d internal err\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ }
+}
+
+static long tps61050_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct tps61050_info *info = file->private_data;
+ int pwr;
+
+ switch (cmd) {
+ case NVC_IOCTL_PARAM_WR:
+ return tps61050_param_wr(info, arg);
+
+ case NVC_IOCTL_PARAM_RD:
+ return tps61050_param_rd(info, arg);
+
+ case NVC_IOCTL_PWR_WR:
+ /* This is a Guaranteed Level of Service (GLOS) call */
+ pwr = (int)arg * 2;
+ dev_dbg(&info->i2c_client->dev, "%s PWR_WR: %d\n",
+ __func__, pwr);
+ return tps61050_pm_api_wr(info, pwr);
+
+ case NVC_IOCTL_PWR_RD:
+ if (info->s_mode == NVC_SYNC_SLAVE)
+ pwr = info->s_info->pwr_api / 2;
+ else
+ pwr = info->pwr_api / 2;
+ dev_dbg(&info->i2c_client->dev, "%s PWR_RD: %d\n",
+ __func__, pwr);
+ if (copy_to_user((void __user *)arg, (const void *)&pwr,
+ sizeof(pwr))) {
+ dev_err(&info->i2c_client->dev,
+ "%s copy_to_user err line %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ default:
+ dev_err(&info->i2c_client->dev, "%s unsupported ioctl: %x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+}
+
+static int tps61050_sync_en(int dev1, int dev2)
+{
+ struct tps61050_info *sync1 = NULL;
+ struct tps61050_info *sync2 = NULL;
+ struct tps61050_info *pos = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &tps61050_info_list, list) {
+ if (pos->pdata->num == dev1) {
+ sync1 = pos;
+ break;
+ }
+ }
+ pos = NULL;
+ list_for_each_entry_rcu(pos, &tps61050_info_list, list) {
+ if (pos->pdata->num == dev2) {
+ sync2 = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (sync1 != NULL)
+ sync1->s_info = NULL;
+ if (sync2 != NULL)
+ sync2->s_info = NULL;
+ if (!dev1 && !dev2)
+ return 0; /* no err if default instance 0's used */
+
+ if (dev1 == dev2)
+ return -EINVAL; /* err if sync instance is itself */
+
+ if ((sync1 != NULL) && (sync2 != NULL)) {
+ sync1->s_info = sync2;
+ sync2->s_info = sync1;
+ }
+ return 0;
+}
+
+static int tps61050_sync_dis(struct tps61050_info *info)
+{
+ if (info->s_info != NULL) {
+ info->s_info->s_mode = 0;
+ info->s_info->s_info = NULL;
+ info->s_mode = 0;
+ info->s_info = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tps61050_open(struct inode *inode, struct file *file)
+{
+ struct tps61050_info *info = NULL;
+ struct tps61050_info *pos = NULL;
+ int err;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &tps61050_info_list, list) {
+ if (pos->miscdev.minor == iminor(inode)) {
+ info = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!info)
+ return -ENODEV;
+
+ err = tps61050_sync_en(info->pdata->num, info->pdata->sync);
+ if (err == -EINVAL)
+ dev_err(&info->i2c_client->dev,
+ "%s err: invalid num (%u) and sync (%u) instance\n",
+ __func__, info->pdata->num, info->pdata->sync);
+ if (atomic_xchg(&info->in_use, 1))
+ return -EBUSY;
+
+ if (info->s_info != NULL) {
+ if (atomic_xchg(&info->s_info->in_use, 1))
+ return -EBUSY;
+ }
+
+ file->private_data = info;
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int tps61050_release(struct inode *inode, struct file *file)
+{
+ struct tps61050_info *info = file->private_data;
+
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ tps61050_pm_wr_s(info, NVC_PWR_OFF);
+ file->private_data = NULL;
+ WARN_ON(!atomic_xchg(&info->in_use, 0));
+ if (info->s_info != NULL)
+ WARN_ON(!atomic_xchg(&info->s_info->in_use, 0));
+ tps61050_sync_dis(info);
+ return 0;
+}
+
+static const struct file_operations tps61050_fileops = {
+ .owner = THIS_MODULE,
+ .open = tps61050_open,
+ .unlocked_ioctl = tps61050_ioctl,
+ .release = tps61050_release,
+};
+
+static void tps61050_del(struct tps61050_info *info)
+{
+ tps61050_pm_exit(info);
+ tps61050_sync_dis(info);
+ spin_lock(&tps61050_spinlock);
+ list_del_rcu(&info->list);
+ spin_unlock(&tps61050_spinlock);
+ synchronize_rcu();
+}
+
+static int tps61050_remove(struct i2c_client *client)
+{
+ struct tps61050_info *info = i2c_get_clientdata(client);
+
+ dev_dbg(&info->i2c_client->dev, "%s\n", __func__);
+ misc_deregister(&info->miscdev);
+ tps61050_del(info);
+ return 0;
+}
+
+static int tps61050_probe(
+ struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps61050_info *info;
+ char dname[16];
+ int err;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&client->dev, "%s: kzalloc error\n", __func__);
+ return -ENOMEM;
+ }
+
+ info->i2c_client = client;
+ if (client->dev.platform_data) {
+ info->pdata = client->dev.platform_data;
+ } else {
+ info->pdata = &tps61050_default_pdata;
+ dev_dbg(&client->dev,
+ "%s No platform data. Using defaults.\n",
+ __func__);
+ }
+ i2c_set_clientdata(client, info);
+ INIT_LIST_HEAD(&info->list);
+ spin_lock(&tps61050_spinlock);
+ list_add_rcu(&info->list, &tps61050_info_list);
+ spin_unlock(&tps61050_spinlock);
+ tps61050_pm_init(info);
+ err = tps61050_dev_id(info);
+ if (err < 0) {
+ dev_err(&client->dev, "%s device not found\n", __func__);
+ if (info->pdata->cfg & NVC_CFG_NODEV) {
+ tps61050_del(info);
+ return -ENODEV;
+ }
+ } else {
+ dev_dbg(&client->dev, "%s device found\n", __func__);
+ }
+
+ if (info->pdata->dev_name != 0)
+ strcpy(dname, info->pdata->dev_name);
+ else
+ strcpy(dname, "tps61050");
+ if (info->pdata->num)
+ snprintf(dname, sizeof(dname), "%s.%u",
+ dname, info->pdata->num);
+ info->miscdev.name = dname;
+ info->miscdev.fops = &tps61050_fileops;
+ info->miscdev.minor = MISC_DYNAMIC_MINOR;
+ if (misc_register(&info->miscdev)) {
+ dev_err(&client->dev, "%s unable to register misc device %s\n",
+ __func__, dname);
+ tps61050_del(info);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id tps61050_id[] = {
+ { "tps61050", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, tps61050_id);
+
+static struct i2c_driver tps61050_i2c_driver = {
+ .driver = {
+ .name = "tps61050",
+ .owner = THIS_MODULE,
+ },
+ .id_table = tps61050_id,
+ .probe = tps61050_probe,
+ .remove = tps61050_remove,
+};
+
+static int __init tps61050_init(void)
+{
+ return i2c_add_driver(&tps61050_i2c_driver);
+}
+
+static void __exit tps61050_exit(void)
+{
+ i2c_del_driver(&tps61050_i2c_driver);
+}
+
+module_init(tps61050_init);
+module_exit(tps61050_exit);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 21574bdf485f..55fd5157b438 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -386,6 +386,26 @@ config MFD_MAX8998
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MAX8907C
+ tristate "Maxim Semiconductor MAX8907C PMIC Support"
+ select MFD_CORE
+ depends on I2C
+ help
+ Say yes here to support for Maxim Semiconductor MAX8907C. This is
+ a Power Management IC. This driver provies common support for
+ accessing the device, additional drivers must be enabled in order
+ to use the functionality of the device.
+
+config MFD_MAX77663
+ tristate "Maxim Semiconductor MAX77663 PMIC Support"
+ select MFD_CORE
+ depends on I2C
+ help
+ Say yes here to support for Maxim Semiconductor MAX77663. This is
+ a Power Management IC. This driver provies common support for
+ accessing the device, additional drivers must be enabled in order
+ to use the functionality of the device.
+
config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
select MFD_CORE
@@ -770,6 +790,47 @@ config MFD_AAT2870_CORE
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_TPS6591X
+ bool "TPS6591x Power Management chips"
+ depends on I2C && GPIOLIB && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ If you say yes here you get support for the TPS6591X series of
+ Power Management chips.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
+config MFD_TPS80031
+ bool "TI TPS80031 Power Management chips"
+ depends on I2C && GPIOLIB && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ If you say yes here you get support for the TPS80031 Power
+ Management chips.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
+config GPADC_TPS80031
+ bool "Support for TI TPS80031 Gpadc driver"
+ depends on MFD_TPS80031
+ help
+ If you say yes here you get support for the TPS80031 gpadc
+ Module.
+
+config MFD_RICOH583
+ bool "Ricoh RC5T583 Power Management system device"
+ depends on I2C && GPIOLIB && GENERIC_HARDIRQS
+ select MFD_CORE
+ default n
+ help
+ If you say yes here you get support for the RICOH583 Power
+ Management system device.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c58020303d18..f9f9d400b0ee 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -102,3 +102,10 @@ obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
+obj-$(CONFIG_MFD_TPS6591X) += tps6591x.o
+obj-$(CONFIG_MFD_TPS80031) += tps80031.o
+obj-$(CONFIG_GPADC_TPS80031) += tps8003x-gpadc.o
+obj-$(CONFIG_MFD_MAX8907C) += max8907c.o
+obj-$(CONFIG_MFD_MAX8907C) += max8907c-irq.o
+obj-$(CONFIG_MFD_MAX77663) += max77663-core.o
+obj-$(CONFIG_MFD_RICOH583) += ricoh583.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 345dc658ef06..e7a4e2ab6cdc 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -79,27 +79,22 @@ static struct mfd_cell aat2870_devs[] = {
{
.name = "aat2870-backlight",
.id = AAT2870_ID_BL,
- .pdata_size = sizeof(struct aat2870_bl_platform_data),
},
{
.name = "aat2870-regulator",
.id = AAT2870_ID_LDOA,
- .pdata_size = sizeof(struct regulator_init_data),
},
{
.name = "aat2870-regulator",
.id = AAT2870_ID_LDOB,
- .pdata_size = sizeof(struct regulator_init_data),
},
{
.name = "aat2870-regulator",
.id = AAT2870_ID_LDOC,
- .pdata_size = sizeof(struct regulator_init_data),
},
{
.name = "aat2870-regulator",
.id = AAT2870_ID_LDOD,
- .pdata_size = sizeof(struct regulator_init_data),
},
};
@@ -423,7 +418,7 @@ static int aat2870_i2c_probe(struct i2c_client *client,
if ((pdata->subdevs[i].id == aat2870_devs[j].id) &&
!strcmp(pdata->subdevs[i].name,
aat2870_devs[j].name)) {
- aat2870_devs[j].platform_data =
+ aat2870_devs[j].mfd_data =
pdata->subdevs[i].platform_data;
break;
}
diff --git a/drivers/mfd/max77663-core.c b/drivers/mfd/max77663-core.c
new file mode 100644
index 000000000000..38d1956ee0ec
--- /dev/null
+++ b/drivers/mfd/max77663-core.c
@@ -0,0 +1,1402 @@
+/*
+ * drivers/mfd/max77663-core.c
+ * Max77663 mfd driver (I2C bus access)
+ *
+ * Copyright 2011 Maxim Integrated Products, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+#include <linux/kthread.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <linux/mfd/max77663-core.h>
+
+/* RTC i2c slave address */
+#define MAX77663_RTC_I2C_ADDR 0x48
+
+/* Registers */
+#define MAX77663_REG_IRQ_TOP 0x05
+#define MAX77663_REG_LBT_IRQ 0x06
+#define MAX77663_REG_SD_IRQ 0x07
+#define MAX77663_REG_LDOX_IRQ 0x08
+#define MAX77663_REG_LDO8_IRQ 0x09
+#define MAX77663_REG_GPIO_IRQ 0x0A
+#define MAX77663_REG_ONOFF_IRQ 0x0B
+#define MAX77663_REG_NVER 0x0C
+#define MAX77663_REG_IRQ_TOP_MASK 0x0D
+#define MAX77663_REG_LBT_IRQ_MASK 0x0E
+#define MAX77663_REG_SD_IRQ_MASK 0x0F
+#define MAX77663_REG_LDOX_IRQ_MASK 0x10
+#define MAX77663_REG_LDO8_IRQ_MASK 0x11
+#define MAX77663_REG_ONOFF_IRQ_MASK 0x12
+#define MAX77663_REG_GPIO_CTRL0 0x36
+#define MAX77663_REG_GPIO_CTRL1 0x37
+#define MAX77663_REG_GPIO_CTRL2 0x38
+#define MAX77663_REG_GPIO_CTRL3 0x39
+#define MAX77663_REG_GPIO_CTRL4 0x3A
+#define MAX77663_REG_GPIO_CTRL5 0x3B
+#define MAX77663_REG_GPIO_CTRL6 0x3C
+#define MAX77663_REG_GPIO_CTRL7 0x3D
+#define MAX77663_REG_GPIO_PU 0x3E
+#define MAX77663_REG_GPIO_PD 0x3F
+#define MAX77663_REG_GPIO_ALT 0x40
+#define MAX77663_REG_ONOFF_CFG1 0x41
+
+#define IRQ_TOP_GLBL_MASK (1 << 7)
+#define IRQ_TOP_GLBL_SHIFT 7
+#define IRQ_TOP_SD_MASK (1 << 6)
+#define IRQ_TOP_SD_SHIFT 6
+#define IRQ_TOP_LDO_MASK (1 << 5)
+#define IRQ_TOP_LDO_SHIFT 5
+#define IRQ_TOP_GPIO_MASK (1 << 4)
+#define IRQ_TOP_GPIO_SHIFT 4
+#define IRQ_TOP_RTC_MASK (1 << 3)
+#define IRQ_TOP_RTC_SHIFT 3
+#define IRQ_TOP_32K_MASK (1 << 2)
+#define IRQ_TOP_32K_SHIFT 2
+#define IRQ_TOP_ONOFF_MASK (1 << 1)
+#define IRQ_TOP_ONOFF_SHIFT 1
+#define IRQ_TOP_NVER_MASK (1 << 0)
+#define IRQ_TOP_NVER_SHIFT 0
+
+#define IRQ_GLBL_MASK (1 << 0)
+
+#define IRQ_LBT_BASE MAX77663_IRQ_LBT_LB
+#define IRQ_LBT_END MAX77663_IRQ_LBT_THERM_ALRM2
+
+#define IRQ_GPIO_BASE MAX77663_IRQ_GPIO0
+#define IRQ_GPIO_END MAX77663_IRQ_GPIO7
+
+#define IRQ_ONOFF_BASE MAX77663_IRQ_ONOFF_HRDPOWRN
+#define IRQ_ONOFF_END MAX77663_IRQ_ONOFF_ACOK_RISING
+
+#define GPIO_REG_ADDR(offset) (MAX77663_REG_GPIO_CTRL0 + offset)
+
+#define GPIO_CTRL_DBNC_MASK (3 << 6)
+#define GPIO_CTRL_DBNC_SHIFT 6
+#define GPIO_CTRL_REFE_IRQ_MASK (3 << 4)
+#define GPIO_CTRL_REFE_IRQ_SHIFT 4
+#define GPIO_CTRL_DOUT_MASK (1 << 3)
+#define GPIO_CTRL_DOUT_SHIFT 3
+#define GPIO_CTRL_DIN_MASK (1 << 2)
+#define GPIO_CTRL_DIN_SHIFT 2
+#define GPIO_CTRL_DIR_MASK (1 << 1)
+#define GPIO_CTRL_DIR_SHIFT 1
+#define GPIO_CTRL_OUT_DRV_MASK (1 << 0)
+#define GPIO_CTRL_OUT_DRV_SHIFT 0
+
+#define GPIO_REFE_IRQ_NONE 0
+#define GPIO_REFE_IRQ_EDGE_FALLING 1
+#define GPIO_REFE_IRQ_EDGE_RISING 2
+#define GPIO_REFE_IRQ_EDGE_BOTH 3
+
+#define GPIO_DBNC_NONE 0
+#define GPIO_DBNC_8MS 1
+#define GPIO_DBNC_16MS 2
+#define GPIO_DBNC_32MS 3
+
+#define ONOFF_SFT_RST_MASK (1 << 7)
+#define ONOFF_SLPEN_MASK (1 << 2)
+
+enum {
+ CACHE_IRQ_LBT,
+ CACHE_IRQ_SD,
+ CACHE_IRQ_LDO,
+ CACHE_IRQ_ONOFF,
+ CACHE_IRQ_NR,
+};
+
+struct max77663_irq_data {
+ int mask_reg;
+ u16 mask;
+ u8 top_mask;
+ u8 top_shift;
+ int cache_idx;
+ bool is_rtc;
+ bool is_unmask;
+ u8 trigger_type;
+};
+
+struct max77663_chip {
+ struct device *dev;
+ struct i2c_client *i2c_power;
+ struct i2c_client *i2c_rtc;
+
+ struct max77663_platform_data *pdata;
+ struct mutex io_lock;
+
+ struct irq_chip irq;
+ struct mutex irq_lock;
+ int irq_base;
+ int irq_top_count[8];
+ u8 cache_irq_top_mask;
+ u16 cache_irq_mask[CACHE_IRQ_NR];
+
+ struct gpio_chip gpio;
+ int gpio_base;
+ u8 cache_gpio_ctrl[MAX77663_GPIO_NR];
+ u8 cache_gpio_pu;
+ u8 cache_gpio_pd;
+ u8 cache_gpio_alt;
+};
+
+struct max77663_chip *max77663_chip;
+
+#define IRQ_DATA_LBT(_name, _shift) \
+ [MAX77663_IRQ_LBT_##_name] = { \
+ .mask_reg = MAX77663_REG_LBT_IRQ_MASK, \
+ .mask = (1 << _shift), \
+ .top_mask = IRQ_TOP_GLBL_MASK, \
+ .top_shift = IRQ_TOP_GLBL_SHIFT, \
+ .cache_idx = CACHE_IRQ_LBT, \
+ }
+
+#define IRQ_DATA_GPIO(_name) \
+ [MAX77663_IRQ_GPIO##_name] = { \
+ .mask = (1 << _name), \
+ .top_mask = IRQ_TOP_GPIO_MASK, \
+ .top_shift = IRQ_TOP_GPIO_SHIFT, \
+ .cache_idx = -1, \
+ }
+
+#define IRQ_DATA_ONOFF(_name, _shift) \
+ [MAX77663_IRQ_ONOFF_##_name] = { \
+ .mask_reg = MAX77663_REG_ONOFF_IRQ_MASK,\
+ .mask = (1 << _shift), \
+ .top_mask = IRQ_TOP_ONOFF_MASK, \
+ .top_shift = IRQ_TOP_ONOFF_SHIFT, \
+ .cache_idx = CACHE_IRQ_ONOFF, \
+ }
+
+static struct max77663_irq_data max77663_irqs[MAX77663_IRQ_NR] = {
+ IRQ_DATA_LBT(LB, 3),
+ IRQ_DATA_LBT(THERM_ALRM1, 2),
+ IRQ_DATA_LBT(THERM_ALRM2, 1),
+ IRQ_DATA_GPIO(0),
+ IRQ_DATA_GPIO(1),
+ IRQ_DATA_GPIO(2),
+ IRQ_DATA_GPIO(3),
+ IRQ_DATA_GPIO(4),
+ IRQ_DATA_GPIO(5),
+ IRQ_DATA_GPIO(6),
+ IRQ_DATA_GPIO(7),
+ IRQ_DATA_ONOFF(HRDPOWRN, 0),
+ IRQ_DATA_ONOFF(EN0_1SEC, 1),
+ IRQ_DATA_ONOFF(EN0_FALLING, 2),
+ IRQ_DATA_ONOFF(EN0_RISING, 3),
+ IRQ_DATA_ONOFF(LID_FALLING, 4),
+ IRQ_DATA_ONOFF(LID_RISING, 5),
+ IRQ_DATA_ONOFF(ACOK_FALLING, 6),
+ IRQ_DATA_ONOFF(ACOK_RISING, 7),
+ [MAX77663_IRQ_RTC] = {
+ .top_mask = IRQ_TOP_RTC_MASK,
+ .top_shift = IRQ_TOP_RTC_SHIFT,
+ .cache_idx = -1,
+ .is_rtc = 1,
+ },
+ [MAX77663_IRQ_SD_PF] = {
+ .mask_reg = MAX77663_REG_SD_IRQ_MASK,
+ .mask = 0xF8,
+ .top_mask = IRQ_TOP_SD_MASK,
+ .top_shift = IRQ_TOP_SD_SHIFT,
+ .cache_idx = CACHE_IRQ_SD,
+ },
+ [MAX77663_IRQ_LDO_PF] = {
+ .mask_reg = MAX77663_REG_LDOX_IRQ_MASK,
+ .mask = 0x1FF,
+ .top_mask = IRQ_TOP_LDO_MASK,
+ .top_shift = IRQ_TOP_LDO_SHIFT,
+ .cache_idx = CACHE_IRQ_LDO,
+ },
+ [MAX77663_IRQ_32K] = {
+ .top_mask = IRQ_TOP_32K_MASK,
+ .top_shift = IRQ_TOP_32K_SHIFT,
+ .cache_idx = -1,
+ },
+ [MAX77663_IRQ_NVER] = {
+ .top_mask = IRQ_TOP_NVER_MASK,
+ .top_shift = IRQ_TOP_NVER_SHIFT,
+ .cache_idx = -1,
+ },
+};
+
+static inline int max77663_i2c_write(struct i2c_client *client, u8 addr,
+ void *src, u32 bytes)
+{
+ u8 buf[bytes + 1];
+ int ret;
+
+ dev_dbg(&client->dev, "i2c_write: addr=0x%02x, src=0x%02x, bytes=%u\n",
+ addr, *((u8 *)src), bytes);
+
+ if (client->addr == MAX77663_RTC_I2C_ADDR) {
+ /* RTC registers support sequential writing */
+ buf[0] = addr;
+ memcpy(&buf[1], src, bytes);
+ } else {
+ /* Power registers support register-data pair writing */
+ u8 *src8 = (u8 *)src;
+ int i;
+
+ for (i = 0; i < (bytes * 2); i++) {
+ if (i % 2)
+ buf[i] = *src8++;
+ else
+ buf[i] = addr++;
+ }
+ bytes = (bytes * 2) - 1;
+ }
+
+ ret = i2c_master_send(client, buf, bytes + 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int max77663_i2c_read(struct i2c_client *client, u8 addr,
+ void *dest, u32 bytes)
+{
+ int ret;
+
+ if (bytes > 1) {
+ ret = i2c_smbus_read_i2c_block_data(client, addr, bytes, dest);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = i2c_smbus_read_byte_data(client, addr);
+ if (ret < 0)
+ return ret;
+
+ *((u8 *)dest) = (u8)ret;
+ }
+
+ dev_dbg(&client->dev, "i2c_read: addr=0x%02x, dest=0x%02x, bytes=%u\n",
+ addr, *((u8 *)dest), bytes);
+ return 0;
+}
+
+int max77663_read(struct device *dev, u8 addr, void *values, u32 len,
+ bool is_rtc)
+{
+ struct max77663_chip *chip = dev_get_drvdata(dev);
+ struct i2c_client *client = NULL;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ if (!is_rtc)
+ client = chip->i2c_power;
+ else
+ client = chip->i2c_rtc;
+
+ ret = max77663_i2c_read(client, addr, values, len);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(max77663_read);
+
+int max77663_write(struct device *dev, u8 addr, void *values, u32 len,
+ bool is_rtc)
+{
+ struct max77663_chip *chip = dev_get_drvdata(dev);
+ struct i2c_client *client = NULL;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ if (!is_rtc)
+ client = chip->i2c_power;
+ else
+ client = chip->i2c_rtc;
+
+ ret = max77663_i2c_write(client, addr, values, len);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(max77663_write);
+
+int max77663_set_bits(struct device *dev, u8 addr, u8 mask, u8 value,
+ bool is_rtc)
+{
+ struct max77663_chip *chip = dev_get_drvdata(dev);
+ struct i2c_client *client = NULL;
+ u8 tmp;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ if (!is_rtc)
+ client = chip->i2c_power;
+ else
+ client = chip->i2c_rtc;
+
+ ret = max77663_i2c_read(client, addr, &tmp, 1);
+ if (ret == 0) {
+ value = (tmp & ~mask) | (value & mask);
+ ret = max77663_i2c_write(client, addr, &value, 1);
+ }
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(max77663_set_bits);
+
+int max77663_power_off(void)
+{
+ struct max77663_chip *chip = max77663_chip;
+
+ if (!chip)
+ return -EINVAL;
+
+ dev_info(chip->dev, "%s: Global shutdown\n", __func__);
+ return max77663_set_bits(chip->dev, MAX77663_REG_ONOFF_CFG1,
+ ONOFF_SFT_RST_MASK, ONOFF_SFT_RST_MASK, 0);
+}
+EXPORT_SYMBOL(max77663_power_off);
+
+static int max77663_sleep_enable(struct max77663_chip *chip)
+{
+ /* Enable sleep that AP can be placed into sleep mode
+ * by pulling EN1 low */
+ return max77663_set_bits(chip->dev, MAX77663_REG_ONOFF_CFG1,
+ ONOFF_SLPEN_MASK, ONOFF_SLPEN_MASK, 0);
+}
+
+static inline int max77663_cache_write(struct device *dev, u8 addr, u8 mask,
+ u8 val, u8 *cache)
+{
+ u8 new_val;
+ int ret;
+
+ new_val = (*cache & ~mask) | (val & mask);
+ if (*cache != new_val) {
+ ret = max77663_write(dev, addr, &new_val, 1, 0);
+ if (ret < 0)
+ return ret;
+ *cache = new_val;
+ }
+ return 0;
+}
+
+static inline
+struct max77663_chip *max77663_chip_from_gpio(struct gpio_chip *gpio)
+{
+ return container_of(gpio, struct max77663_chip, gpio);
+}
+
+static int max77663_gpio_set_pull_up(struct max77663_chip *chip, int offset,
+ int pull_up)
+{
+ u8 val = 0;
+
+ if ((offset < MAX77663_GPIO0) || (MAX77663_GPIO7 < offset))
+ return -EINVAL;
+
+ if (pull_up == GPIO_PU_ENABLE)
+ val = (1 << offset);
+
+ return max77663_cache_write(chip->dev, MAX77663_REG_GPIO_PU,
+ (1 << offset), val, &chip->cache_gpio_pu);
+}
+
+static int max77663_gpio_set_pull_down(struct max77663_chip *chip, int offset,
+ int pull_down)
+{
+ u8 val = 0;
+
+ if ((offset < MAX77663_GPIO0) || (MAX77663_GPIO7 < offset))
+ return -EINVAL;
+
+ if (pull_down == GPIO_PD_ENABLE)
+ val = (1 << offset);
+
+ return max77663_cache_write(chip->dev, MAX77663_REG_GPIO_PD,
+ (1 << offset), val, &chip->cache_gpio_pd);
+}
+
+static inline
+int max77663_gpio_is_alternate(struct max77663_chip *chip, int offset)
+{
+ return (chip->cache_gpio_alt & (1 << offset)) ? 1 : 0;
+}
+
+int max77663_gpio_set_alternate(int gpio, int alternate)
+{
+ struct max77663_chip *chip = max77663_chip;
+ u8 val = 0;
+ int ret = 0;
+
+ if (!chip)
+ return -ENXIO;
+
+ gpio -= chip->gpio_base;
+ if ((gpio < MAX77663_GPIO0) || (MAX77663_GPIO7 < gpio))
+ return -EINVAL;
+
+ if (alternate == GPIO_ALT_ENABLE) {
+ val = (1 << gpio);
+ if (gpio == MAX77663_GPIO7) {
+ ret = max77663_gpio_set_pull_up(chip, gpio, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = max77663_gpio_set_pull_down(chip, gpio, 0);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return max77663_cache_write(chip->dev, MAX77663_REG_GPIO_ALT,
+ (1 << gpio), val, &chip->cache_gpio_alt);
+}
+EXPORT_SYMBOL(max77663_gpio_set_alternate);
+
+static int max77663_gpio_dir_input(struct gpio_chip *gpio, unsigned offset)
+{
+ struct max77663_chip *chip = max77663_chip_from_gpio(gpio);
+
+ if (max77663_gpio_is_alternate(chip, offset)) {
+ dev_warn(chip->dev, "gpio_dir_input: "
+ "gpio%u is used as alternate mode\n", offset);
+ return 0;
+ }
+
+ return max77663_cache_write(chip->dev, GPIO_REG_ADDR(offset),
+ GPIO_CTRL_DIR_MASK, GPIO_CTRL_DIR_MASK,
+ &chip->cache_gpio_ctrl[offset]);
+}
+
+static int max77663_gpio_get(struct gpio_chip *gpio, unsigned offset)
+{
+ struct max77663_chip *chip = max77663_chip_from_gpio(gpio);
+ u8 val;
+ int ret;
+
+ if (max77663_gpio_is_alternate(chip, offset)) {
+ dev_warn(chip->dev, "gpio_get: "
+ "gpio%u is used as alternate mode\n", offset);
+ return 0;
+ }
+
+ ret = max77663_read(chip->dev, GPIO_REG_ADDR(offset), &val, 1, 0);
+ if (ret < 0)
+ return ret;
+
+ chip->cache_gpio_ctrl[offset] = val;
+ return (val & GPIO_CTRL_DIN_MASK) >> GPIO_CTRL_DIN_SHIFT;
+}
+
+static int max77663_gpio_dir_output(struct gpio_chip *gpio, unsigned offset,
+ int value)
+{
+ struct max77663_chip *chip = max77663_chip_from_gpio(gpio);
+ u8 mask = GPIO_CTRL_DIR_MASK | GPIO_CTRL_DOUT_MASK;
+ u8 val = (value ? 1 : 0) << GPIO_CTRL_DOUT_SHIFT;
+
+ if (max77663_gpio_is_alternate(chip, offset)) {
+ dev_warn(chip->dev, "gpio_dir_output: "
+ "gpio%u is used as alternate mode\n", offset);
+ return 0;
+ }
+
+ return max77663_cache_write(chip->dev, GPIO_REG_ADDR(offset), mask, val,
+ &chip->cache_gpio_ctrl[offset]);
+}
+
+static int max77663_gpio_set_debounce(struct gpio_chip *gpio, unsigned offset,
+ unsigned debounce)
+{
+ struct max77663_chip *chip = max77663_chip_from_gpio(gpio);
+ u8 shift = GPIO_CTRL_DBNC_SHIFT;
+ u8 val = 0;
+
+ if (max77663_gpio_is_alternate(chip, offset)) {
+ dev_warn(chip->dev, "gpio_set_debounce: "
+ "gpio%u is used as alternate mode\n", offset);
+ return 0;
+ }
+
+ if (debounce == 0)
+ val = 0;
+ else if ((0 < debounce) && (debounce <= 8))
+ val = (GPIO_DBNC_8MS << shift);
+ else if ((8 < debounce) && (debounce <= 16))
+ val = (GPIO_DBNC_16MS << shift);
+ else if ((16 < debounce) && (debounce <= 32))
+ val = (GPIO_DBNC_32MS << shift);
+ else
+ return -EINVAL;
+
+ return max77663_cache_write(chip->dev, GPIO_REG_ADDR(offset),
+ GPIO_CTRL_DBNC_MASK, val,
+ &chip->cache_gpio_ctrl[offset]);
+}
+
+static void max77663_gpio_set(struct gpio_chip *gpio, unsigned offset,
+ int value)
+{
+ struct max77663_chip *chip = max77663_chip_from_gpio(gpio);
+ u8 val = (value ? 1 : 0) << GPIO_CTRL_DOUT_SHIFT;
+
+ if (max77663_gpio_is_alternate(chip, offset)) {
+ dev_warn(chip->dev, "gpio_set: "
+ "gpio%u is used as alternate mode\n", offset);
+ return;
+ }
+
+ max77663_cache_write(chip->dev, GPIO_REG_ADDR(offset),
+ GPIO_CTRL_DOUT_MASK, val,
+ &chip->cache_gpio_ctrl[offset]);
+}
+
+static int max77663_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct max77663_chip *chip = max77663_chip_from_gpio(gpio);
+
+ return chip->irq_base + IRQ_GPIO_BASE + offset;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void max77663_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gpio)
+{
+ struct max77663_chip *chip = max77663_chip_from_gpio(gpio);
+ int i;
+
+ for (i = 0; i < gpio->ngpio; i++) {
+ u8 ctrl_val;
+ const char *label;
+ int is_out;
+ int ret;
+
+ label = gpiochip_is_requested(gpio, i);
+ if (!label)
+ label = "Unrequested";
+
+ seq_printf(s, " gpio-%-3d (%-20.20s) ", i + chip->gpio_base,
+ label);
+
+ if (chip->cache_gpio_alt & (1 << i)) {
+ seq_printf(s, "alt\n");
+ continue;
+ }
+
+ ret = max77663_read(chip->dev, GPIO_REG_ADDR(i), &ctrl_val, 1,
+ 0);
+ if (ret < 0) {
+ seq_printf(s, "\n");
+ continue;
+ }
+
+ is_out = ctrl_val & GPIO_CTRL_DIR_MASK ? 0 : 1;
+ seq_printf(s, "%s %s", (is_out ? "out" : "in"), (is_out ?
+ (ctrl_val & GPIO_CTRL_DOUT_MASK ? "hi" : "lo")
+ : (ctrl_val & GPIO_CTRL_DIN_MASK ? "hi" : "lo")));
+
+ if (!is_out) {
+ int irq = gpio_to_irq(i + chip->gpio_base);
+ struct irq_desc *desc = irq_to_desc(irq);
+ u8 dbnc;
+
+ if (irq >= 0 && desc->action) {
+ u8 mask = GPIO_CTRL_REFE_IRQ_MASK;
+ u8 shift = GPIO_CTRL_REFE_IRQ_SHIFT;
+ char *trigger;
+
+ switch ((ctrl_val & mask) >> shift) {
+ case GPIO_REFE_IRQ_EDGE_FALLING:
+ trigger = "edge-falling";
+ break;
+ case GPIO_REFE_IRQ_EDGE_RISING:
+ trigger = "edge-rising";
+ break;
+ case GPIO_REFE_IRQ_EDGE_BOTH:
+ trigger = "edge-both";
+ break;
+ default:
+ trigger = "masked";
+ break;
+ }
+
+ seq_printf(s, " irq-%d %s", irq, trigger);
+ }
+
+ dbnc = (ctrl_val & GPIO_CTRL_DBNC_MASK)
+ >> GPIO_CTRL_DBNC_SHIFT;
+ seq_printf(s, " debounce-%s",
+ dbnc == GPIO_DBNC_8MS ? "8ms" :
+ dbnc == GPIO_DBNC_16MS ? "16ms" :
+ dbnc == GPIO_DBNC_32MS ? "32ms" : "none");
+ } else {
+ seq_printf(s, " %s",
+ (ctrl_val & GPIO_CTRL_OUT_DRV_MASK ?
+ "output-drive" : "open-drain"));
+ }
+
+ seq_printf(s, "\n");
+ }
+}
+#else
+#define max77663_gpio_dbg_show NULL
+#endif /* CONFIG_DEBUG_FS */
+
+static int max77663_gpio_set_config(struct max77663_chip *chip,
+ struct max77663_gpio_config *gpio_cfg)
+{
+ int gpio = gpio_cfg->gpio;
+ u8 val = 0, mask = 0;
+ int ret = 0;
+
+ if ((gpio < MAX77663_GPIO0) || (MAX77663_GPIO7 < gpio))
+ return -EINVAL;
+
+ if (gpio_cfg->pull_up != GPIO_PU_DEF) {
+ ret = max77663_gpio_set_pull_up(chip, gpio, gpio_cfg->pull_up);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_set_config: "
+ "Failed to set gpio%d pull-up\n", gpio);
+ return ret;
+ }
+ }
+
+ if (gpio_cfg->pull_down != GPIO_PD_DEF) {
+ ret = max77663_gpio_set_pull_down(chip, gpio,
+ gpio_cfg->pull_down);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_set_config: "
+ "Failed to set gpio%d pull-down\n", gpio);
+ return ret;
+ }
+ }
+
+ if (gpio_cfg->dir != GPIO_DIR_DEF) {
+ mask = GPIO_CTRL_DIR_MASK;
+ if (gpio_cfg->dir == GPIO_DIR_IN) {
+ val |= GPIO_CTRL_DIR_MASK;
+ } else {
+ if (gpio_cfg->dout != GPIO_DOUT_DEF) {
+ mask |= GPIO_CTRL_DOUT_MASK;
+ if (gpio_cfg->dout == GPIO_DOUT_HIGH)
+ val |= GPIO_CTRL_DOUT_MASK;
+ }
+
+ if (gpio_cfg->out_drv != GPIO_OUT_DRV_DEF) {
+ mask |= GPIO_CTRL_OUT_DRV_MASK;
+ if (gpio_cfg->out_drv == GPIO_OUT_DRV_PUSH_PULL)
+ val |= GPIO_CTRL_OUT_DRV_MASK;
+ }
+ }
+
+ ret = max77663_cache_write(chip->dev, GPIO_REG_ADDR(gpio), mask,
+ val, &chip->cache_gpio_ctrl[gpio]);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_set_config: "
+ "Failed to set gpio%d control\n", gpio);
+ return ret;
+ }
+ }
+
+ if (gpio_cfg->alternate != GPIO_ALT_DEF) {
+ ret = max77663_gpio_set_alternate(gpio + chip->gpio_base,
+ gpio_cfg->alternate);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_set_config: "
+ "Failed to set gpio%d alternate\n", gpio);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int max77663_gpio_init(struct max77663_chip *chip)
+{
+ int i;
+ int ret;
+
+ chip->gpio.label = chip->i2c_power->name;
+ chip->gpio.dev = chip->dev;
+ chip->gpio.owner = THIS_MODULE;
+ chip->gpio.direction_input = max77663_gpio_dir_input;
+ chip->gpio.get = max77663_gpio_get;
+ chip->gpio.direction_output = max77663_gpio_dir_output;
+ chip->gpio.set_debounce = max77663_gpio_set_debounce;
+ chip->gpio.set = max77663_gpio_set;
+ chip->gpio.to_irq = max77663_gpio_to_irq;
+ chip->gpio.dbg_show = max77663_gpio_dbg_show;
+ chip->gpio.ngpio = MAX77663_GPIO_NR;
+ chip->gpio.can_sleep = 1;
+ if (chip->gpio_base)
+ chip->gpio.base = chip->gpio_base;
+ else
+ chip->gpio.base = -1;
+
+ ret = max77663_read(chip->dev, MAX77663_REG_GPIO_CTRL0,
+ chip->cache_gpio_ctrl, MAX77663_GPIO_NR, 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_init: Failed to get gpio control\n");
+ return ret;
+ }
+
+ ret = max77663_read(chip->dev, MAX77663_REG_GPIO_PU,
+ &chip->cache_gpio_pu, 1, 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_init: Failed to get gpio pull-up\n");
+ return ret;
+ }
+
+ ret = max77663_read(chip->dev, MAX77663_REG_GPIO_PD,
+ &chip->cache_gpio_pd, 1, 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_init: Failed to get gpio pull-down\n");
+ return ret;
+ }
+
+ ret = max77663_read(chip->dev, MAX77663_REG_GPIO_ALT,
+ &chip->cache_gpio_alt, 1, 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_init: Failed to get gpio alternate\n");
+ return ret;
+ }
+
+ ret = gpiochip_add(&chip->gpio);
+ if (ret < 0) {
+ dev_err(chip->dev, "gpio_init: Failed to add gpiochip\n");
+ return ret;
+ }
+ chip->gpio_base = chip->gpio.base;
+
+ for (i = 0; i < chip->pdata->num_gpio_cfgs; i++) {
+ ret = max77663_gpio_set_config(chip,
+ &chip->pdata->gpio_cfgs[i]);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "gpio_init: Failed to set gpio config\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void max77663_gpio_exit(struct max77663_chip *chip)
+{
+ if (gpiochip_remove(&chip->gpio) < 0)
+ dev_err(chip->dev, "gpio_exit: Failed to remove gpiochip\n");
+}
+
+static void max77663_irq_mask(struct irq_data *data)
+{
+ struct max77663_chip *chip = irq_data_get_irq_chip_data(data);
+
+ max77663_irqs[data->irq - chip->irq_base].is_unmask = 0;
+}
+
+static void max77663_irq_unmask(struct irq_data *data)
+{
+ struct max77663_chip *chip = irq_data_get_irq_chip_data(data);
+
+ max77663_irqs[data->irq - chip->irq_base].is_unmask = 1;
+}
+
+static void max77663_irq_lock(struct irq_data *data)
+{
+ struct max77663_chip *chip = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static void max77663_irq_sync_unlock(struct irq_data *data)
+{
+ struct max77663_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77663_irq_data *irq_data =
+ &max77663_irqs[data->irq - chip->irq_base];
+ int idx = irq_data->cache_idx;
+ u8 irq_top_mask = chip->cache_irq_top_mask;
+ u16 irq_mask = chip->cache_irq_mask[idx];
+ int update_irq_top = 0;
+ u32 len = 1;
+ int ret;
+
+ if (irq_data->is_unmask) {
+ if (chip->irq_top_count[irq_data->top_shift] == 0)
+ update_irq_top = 1;
+ chip->irq_top_count[irq_data->top_shift]++;
+
+ if (irq_data->top_mask != IRQ_TOP_GLBL_MASK)
+ irq_top_mask &= ~irq_data->top_mask;
+
+ if (idx != -1)
+ irq_mask &= ~irq_data->mask;
+ } else {
+ if (chip->irq_top_count[irq_data->top_shift] == 1)
+ update_irq_top = 1;
+
+ if (--chip->irq_top_count[irq_data->top_shift] < 0)
+ chip->irq_top_count[irq_data->top_shift] = 0;
+
+ if (irq_data->top_mask != IRQ_TOP_GLBL_MASK)
+ irq_top_mask |= irq_data->top_mask;
+
+ if (idx != -1)
+ irq_mask |= irq_data->mask;
+ }
+
+ if ((idx != -1) && (irq_mask != chip->cache_irq_mask[idx])) {
+ if (irq_data->top_mask == IRQ_TOP_LDO_MASK)
+ len = 2;
+
+ ret = max77663_write(chip->dev, irq_data->mask_reg,
+ &irq_mask, len, irq_data->is_rtc);
+ if (ret < 0)
+ goto out;
+
+ chip->cache_irq_mask[idx] = irq_mask;
+ } else if ((idx == -1) && (irq_data->top_mask == IRQ_TOP_GPIO_MASK)) {
+ unsigned offset = data->irq - chip->irq_base - IRQ_GPIO_BASE;
+ u8 shift = GPIO_CTRL_REFE_IRQ_SHIFT;
+
+ if (irq_data->is_unmask) {
+ if (irq_data->trigger_type)
+ irq_mask = irq_data->trigger_type;
+ else
+ irq_mask = GPIO_REFE_IRQ_EDGE_FALLING << shift;
+ }
+
+ ret = max77663_cache_write(chip->dev, GPIO_REG_ADDR(offset),
+ GPIO_CTRL_REFE_IRQ_MASK, irq_mask,
+ &chip->cache_gpio_ctrl[offset]);
+ if (ret < 0)
+ goto out;
+
+ if (irq_data->is_unmask)
+ irq_data->trigger_type = irq_mask;
+ }
+
+ if (update_irq_top && (irq_top_mask != chip->cache_irq_top_mask)) {
+ ret = max77663_cache_write(chip->dev, MAX77663_REG_IRQ_TOP_MASK,
+ irq_data->top_mask, irq_top_mask,
+ &chip->cache_irq_top_mask);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&chip->irq_lock);
+}
+
+static int max77663_irq_gpio_set_type(struct irq_data *data, unsigned int type)
+{
+ struct max77663_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77663_irq_data *irq_data =
+ &max77663_irqs[data->irq - chip->irq_base];
+ unsigned offset = data->irq - chip->irq_base - IRQ_GPIO_BASE;
+ u8 shift = GPIO_CTRL_REFE_IRQ_SHIFT;
+ u8 val;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ case IRQ_TYPE_EDGE_FALLING:
+ val = (GPIO_REFE_IRQ_EDGE_FALLING << shift);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = (GPIO_REFE_IRQ_EDGE_RISING << shift);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val = (GPIO_REFE_IRQ_EDGE_BOTH << shift);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_data->trigger_type = val;
+ if (!(chip->cache_gpio_ctrl[offset] & GPIO_CTRL_REFE_IRQ_MASK))
+ return 0;
+
+ return max77663_cache_write(chip->dev, GPIO_REG_ADDR(offset),
+ GPIO_CTRL_REFE_IRQ_MASK, val,
+ &chip->cache_gpio_ctrl[offset]);
+}
+
+static inline int max77663_do_irq(struct max77663_chip *chip, u8 addr,
+ int irq_base, int irq_end)
+{
+ struct max77663_irq_data *irq_data = NULL;
+ int irqs_to_handle[irq_end - irq_base + 1];
+ int handled = 0;
+ u16 val;
+ u32 len = 1;
+ int i;
+ int ret;
+
+ ret = max77663_read(chip->dev, addr, &val, len, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = irq_base; i <= irq_end; i++) {
+ irq_data = &max77663_irqs[i];
+ if (val & irq_data->mask) {
+ irqs_to_handle[handled] = i + chip->irq_base;
+ handled++;
+ }
+ }
+
+ for (i = 0; i < handled; i++)
+ handle_nested_irq(irqs_to_handle[i]);
+
+ return 0;
+}
+
+static irqreturn_t max77663_irq(int irq, void *data)
+{
+ struct max77663_chip *chip = data;
+ u8 irq_top;
+ int ret;
+
+ ret = max77663_read(chip->dev, MAX77663_REG_IRQ_TOP, &irq_top, 1, 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "irq: Failed to get irq top status\n");
+ return IRQ_NONE;
+ }
+
+ if (irq_top & IRQ_TOP_GLBL_MASK) {
+ ret = max77663_do_irq(chip, MAX77663_REG_LBT_IRQ, IRQ_LBT_BASE,
+ IRQ_LBT_END);
+ if (ret < 0)
+ return IRQ_NONE;
+ }
+
+ if (irq_top & IRQ_TOP_GPIO_MASK) {
+ ret = max77663_do_irq(chip, MAX77663_REG_GPIO_IRQ,
+ IRQ_GPIO_BASE, IRQ_GPIO_END);
+ if (ret < 0)
+ return IRQ_NONE;
+ }
+
+ if (irq_top & IRQ_TOP_ONOFF_MASK) {
+ ret = max77663_do_irq(chip, MAX77663_REG_ONOFF_IRQ,
+ IRQ_ONOFF_BASE, IRQ_ONOFF_END);
+ if (ret < 0)
+ return IRQ_NONE;
+ }
+
+ if (irq_top & IRQ_TOP_RTC_MASK)
+ handle_nested_irq(MAX77663_IRQ_RTC + chip->irq_base);
+
+ if (irq_top & IRQ_TOP_SD_MASK)
+ handle_nested_irq(MAX77663_IRQ_SD_PF + chip->irq_base);
+
+ if (irq_top & IRQ_TOP_LDO_MASK)
+ handle_nested_irq(MAX77663_IRQ_LDO_PF + chip->irq_base);
+
+ if (irq_top & IRQ_TOP_32K_MASK)
+ handle_nested_irq(MAX77663_IRQ_32K + chip->irq_base);
+
+ if (irq_top & IRQ_TOP_NVER_MASK)
+ handle_nested_irq(MAX77663_IRQ_NVER + chip->irq_base);
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip max77663_irq_gpio_chip = {
+ .name = "max77663-irq",
+ .irq_mask = max77663_irq_mask,
+ .irq_unmask = max77663_irq_unmask,
+ .irq_set_type = max77663_irq_gpio_set_type,
+ .irq_bus_lock = max77663_irq_lock,
+ .irq_bus_sync_unlock = max77663_irq_sync_unlock,
+};
+
+static struct irq_chip max77663_irq_chip = {
+ .name = "max77663-irq",
+ .irq_mask = max77663_irq_mask,
+ .irq_unmask = max77663_irq_unmask,
+ .irq_bus_lock = max77663_irq_lock,
+ .irq_bus_sync_unlock = max77663_irq_sync_unlock,
+};
+
+static int max77663_irq_init(struct max77663_chip *chip)
+{
+ u32 temp;
+ int i, ret = 0;
+
+ mutex_init(&chip->irq_lock);
+
+ /* Mask all interrupts */
+ chip->cache_irq_top_mask = 0xFF;
+ chip->cache_irq_mask[CACHE_IRQ_LBT] = 0x0F;
+ chip->cache_irq_mask[CACHE_IRQ_SD] = 0xFF;
+ chip->cache_irq_mask[CACHE_IRQ_LDO] = 0xFFFF;
+ chip->cache_irq_mask[CACHE_IRQ_ONOFF] = 0xFF;
+
+ max77663_write(chip->dev, MAX77663_REG_IRQ_TOP_MASK,
+ &chip->cache_irq_top_mask, 1, 0);
+ max77663_write(chip->dev, MAX77663_REG_LBT_IRQ_MASK,
+ &chip->cache_irq_mask[CACHE_IRQ_LBT], 1, 0);
+ max77663_write(chip->dev, MAX77663_REG_SD_IRQ_MASK,
+ &chip->cache_irq_mask[CACHE_IRQ_SD], 1, 0);
+ max77663_write(chip->dev, MAX77663_REG_LDOX_IRQ_MASK,
+ &chip->cache_irq_mask[CACHE_IRQ_LDO], 2, 0);
+ max77663_write(chip->dev, MAX77663_REG_ONOFF_IRQ_MASK,
+ &chip->cache_irq_mask[CACHE_IRQ_ONOFF], 1, 0);
+
+ /* Clear all interrups */
+ max77663_read(chip->dev, MAX77663_REG_LBT_IRQ, &temp, 1, 0);
+ max77663_read(chip->dev, MAX77663_REG_SD_IRQ, &temp, 1, 0);
+ max77663_read(chip->dev, MAX77663_REG_LDOX_IRQ, &temp, 2, 0);
+ max77663_read(chip->dev, MAX77663_REG_GPIO_IRQ, &temp, 1, 0);
+ max77663_read(chip->dev, MAX77663_REG_ONOFF_IRQ, &temp, 1, 0);
+
+ for (i = chip->irq_base; i < (MAX77663_IRQ_NR + chip->irq_base); i++) {
+ if (i >= NR_IRQS) {
+ dev_err(chip->dev,
+ "irq_init: Can't set irq chip for irq %d\n", i);
+ continue;
+ }
+
+ irq_set_chip_data(i, chip);
+
+ if ((IRQ_GPIO_BASE <= i - chip->irq_base) &&
+ (i - chip->irq_base <= IRQ_GPIO_END))
+ irq_set_chip_and_handler(i, &max77663_irq_gpio_chip,
+ handle_edge_irq);
+ else
+ irq_set_chip_and_handler(i, &max77663_irq_chip,
+ handle_edge_irq);
+#ifdef CONFIG_ARM
+ set_irq_flags(i, IRQF_VALID);
+#else
+ irq_set_noprobe(i);
+#endif
+ irq_set_nested_thread(i, 1);
+ }
+
+ ret = request_threaded_irq(chip->i2c_power->irq, NULL, max77663_irq,
+ IRQF_ONESHOT, "max77663", chip);
+ if (ret) {
+ dev_err(chip->dev, "irq_init: Failed to request irq %d\n",
+ chip->i2c_power->irq);
+ return ret;
+ }
+
+ device_init_wakeup(chip->dev, 1);
+ enable_irq_wake(chip->i2c_power->irq);
+
+ chip->cache_irq_top_mask &= ~IRQ_TOP_GLBL_MASK;
+ max77663_write(chip->dev, MAX77663_REG_IRQ_TOP_MASK,
+ &chip->cache_irq_top_mask, 1, 0);
+
+ chip->cache_irq_mask[CACHE_IRQ_LBT] &= ~IRQ_GLBL_MASK;
+ max77663_write(chip->dev, MAX77663_REG_LBT_IRQ_MASK,
+ &chip->cache_irq_mask[CACHE_IRQ_LBT], 1, 0);
+
+ return 0;
+}
+
+static void max77663_irq_exit(struct max77663_chip *chip)
+{
+ if (chip->i2c_power->irq)
+ free_irq(chip->i2c_power->irq, chip);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *max77663_dentry_regs;
+
+static int max77663_debugfs_dump_regs(struct max77663_chip *chip, char *label,
+ u8 *addrs, int num_addrs, char *buf,
+ ssize_t *len, int is_rtc)
+{
+ ssize_t count = *len;
+ u8 val;
+ int ret = 0;
+ int i;
+
+ count += sprintf(buf + count, "%s\n", label);
+ if (count >= PAGE_SIZE - 1)
+ return -ERANGE;
+
+ for (i = 0; i < num_addrs; i++) {
+ count += sprintf(buf + count, "0x%02x: ", addrs[i]);
+ if (count >= PAGE_SIZE - 1)
+ return -ERANGE;
+
+ ret = max77663_read(chip->dev, addrs[i], &val, 1, is_rtc);
+ if (ret == 0)
+ count += sprintf(buf + count, "0x%02x\n", val);
+ else
+ count += sprintf(buf + count, "<read fail: %d>\n", ret);
+
+ if (count >= PAGE_SIZE - 1)
+ return -ERANGE;
+ }
+
+ *len = count;
+ return 0;
+}
+
+static int max77663_debugfs_regs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t max77663_debugfs_regs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct max77663_chip *chip = file->private_data;
+ char *buf;
+ size_t len = 0;
+ ssize_t ret;
+
+ /* Excluded interrupt status register to prevent register clear */
+ u8 global_regs[] = { 0x00, 0x01, 0x02, 0x05, 0x0D, 0x0E, 0x13 };
+ u8 sd_regs[] = {
+ 0x07, 0x0F, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
+ 0x1E, 0x1F, 0x20, 0x21, 0x22
+ };
+ u8 ldo_regs[] = {
+ 0x10, 0x11, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A,
+ 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34,
+ 0x35
+ };
+ u8 gpio_regs[] = {
+ 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+ 0x40
+ };
+ u8 rtc_regs[] = {
+ 0x01, 0x02, 0x03, 0x04, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B
+ };
+ u8 osc_32k_regs[] = { 0x03 };
+ u8 bbc_regs[] = { 0x04 };
+ u8 onoff_regs[] = { 0x12, 0x15, 0x41, 0x42 };
+ u8 fps_regs[] = {
+ 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C,
+ 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56,
+ 0x57
+ };
+ u8 cid_regs[] = { 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D };
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += sprintf(buf + len, "MAX77663 Registers\n");
+ max77663_debugfs_dump_regs(chip, "[Global]", global_regs,
+ ARRAY_SIZE(global_regs), buf, &len, 0);
+ max77663_debugfs_dump_regs(chip, "[Step-Down]", sd_regs,
+ ARRAY_SIZE(sd_regs), buf, &len, 0);
+ max77663_debugfs_dump_regs(chip, "[LDO]", ldo_regs,
+ ARRAY_SIZE(ldo_regs), buf, &len, 0);
+ max77663_debugfs_dump_regs(chip, "[GPIO]", gpio_regs,
+ ARRAY_SIZE(gpio_regs), buf, &len, 0);
+ max77663_debugfs_dump_regs(chip, "[RTC]", rtc_regs,
+ ARRAY_SIZE(rtc_regs), buf, &len, 1);
+ max77663_debugfs_dump_regs(chip, "[32kHz Oscillator]", osc_32k_regs,
+ ARRAY_SIZE(osc_32k_regs), buf, &len, 0);
+ max77663_debugfs_dump_regs(chip, "[Backup Battery Charger]", bbc_regs,
+ ARRAY_SIZE(bbc_regs), buf, &len, 0);
+ max77663_debugfs_dump_regs(chip, "[On/OFF Controller]", onoff_regs,
+ ARRAY_SIZE(onoff_regs), buf, &len, 0);
+ max77663_debugfs_dump_regs(chip, "[Flexible Power Sequencer]", fps_regs,
+ ARRAY_SIZE(fps_regs), buf, &len, 0);
+ max77663_debugfs_dump_regs(chip, "[Chip Identification]", cid_regs,
+ ARRAY_SIZE(cid_regs), buf, &len, 0);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations max77663_debugfs_regs_fops = {
+ .open = max77663_debugfs_regs_open,
+ .read = max77663_debugfs_regs_read,
+};
+
+static void max77663_debugfs_init(struct max77663_chip *chip)
+{
+ max77663_dentry_regs = debugfs_create_file(chip->i2c_power->name,
+ 0444, 0, chip,
+ &max77663_debugfs_regs_fops);
+ if (!max77663_dentry_regs)
+ dev_warn(chip->dev,
+ "debugfs_init: Failed to create debugfs file\n");
+}
+
+static void max77663_debugfs_exit(struct max77663_chip *chip)
+{
+ debugfs_remove(max77663_dentry_regs);
+}
+#else
+static inline void max77663_debugfs_init(struct max77663_chip *chip)
+{
+}
+
+static inline void max77663_debugfs_exit(struct max77663_chip *chip)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int max77663_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max77663_platform_data *pdata = client->dev.platform_data;
+ struct max77663_chip *chip;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "probe: Invalid platform_data\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ chip = kzalloc(sizeof(struct max77663_chip), GFP_KERNEL);
+ if (chip == NULL) {
+ dev_err(&client->dev, "probe: kzalloc() failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ max77663_chip = chip;
+
+ chip->i2c_power = client;
+ i2c_set_clientdata(client, chip);
+
+ chip->i2c_rtc = i2c_new_dummy(client->adapter, MAX77663_RTC_I2C_ADDR);
+ i2c_set_clientdata(chip->i2c_rtc, chip);
+
+ chip->dev = &client->dev;
+ chip->pdata = pdata;
+ chip->irq_base = pdata->irq_base;
+ chip->gpio_base = pdata->gpio_base;
+ mutex_init(&chip->io_lock);
+
+ max77663_gpio_init(chip);
+ max77663_irq_init(chip);
+ max77663_debugfs_init(chip);
+ max77663_sleep_enable(chip);
+
+ ret = mfd_add_devices(&client->dev, 0, pdata->sub_devices,
+ pdata->num_subdevs, NULL, 0);
+ if (ret != 0) {
+ dev_err(&client->dev, "probe: Failed to add subdev: %d\n", ret);
+ goto out_exit;
+ }
+
+ return 0;
+
+out_exit:
+ max77663_debugfs_exit(chip);
+ max77663_gpio_exit(chip);
+ max77663_irq_exit(chip);
+ mutex_destroy(&chip->io_lock);
+ max77663_chip = NULL;
+ kfree(chip);
+out:
+ return ret;
+}
+
+static int __devexit max77663_remove(struct i2c_client *client)
+{
+ struct max77663_chip *chip = i2c_get_clientdata(client);
+
+ mfd_remove_devices(chip->dev);
+ max77663_debugfs_exit(chip);
+ max77663_irq_exit(chip);
+ max77663_gpio_exit(chip);
+ mutex_destroy(&chip->io_lock);
+ max77663_chip = NULL;
+ kfree(chip);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int max77663_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max77663_chip *chip = i2c_get_clientdata(client);
+ int ret;
+
+ if (client->irq)
+ disable_irq(client->irq);
+
+ ret = max77663_sleep_enable(chip);
+ if (ret < 0)
+ dev_err(dev, "suspend: Failed to enable sleep\n");
+
+ return ret;
+}
+
+static int max77663_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max77663_chip *chip = i2c_get_clientdata(client);
+ int ret;
+
+ ret = max77663_sleep_enable(chip);
+ if (ret < 0) {
+ dev_err(dev, "resume: Failed to enable sleep\n");
+ return ret;
+ }
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ return 0;
+}
+#else
+#define max77663_suspend NULL
+#define max77663_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id max77663_id[] = {
+ {"max77663", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, max77663_id);
+
+static const struct dev_pm_ops max77663_pm = {
+ .suspend = max77663_suspend,
+ .resume = max77663_resume,
+};
+
+static struct i2c_driver max77663_driver = {
+ .driver = {
+ .name = "max77663",
+ .owner = THIS_MODULE,
+ .pm = &max77663_pm,
+ },
+ .probe = max77663_probe,
+ .remove = __devexit_p(max77663_remove),
+ .id_table = max77663_id,
+};
+
+static int __init max77663_init(void)
+{
+ return i2c_add_driver(&max77663_driver);
+}
+arch_initcall(max77663_init);
+
+static void __exit max77663_exit(void)
+{
+ i2c_del_driver(&max77663_driver);
+}
+module_exit(max77663_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MAX77663 Multi Function Device Core Driver");
+MODULE_VERSION("1.0");
diff --git a/drivers/mfd/max8907c-irq.c b/drivers/mfd/max8907c-irq.c
new file mode 100644
index 000000000000..8d6e9600e7fe
--- /dev/null
+++ b/drivers/mfd/max8907c-irq.c
@@ -0,0 +1,425 @@
+/*
+ * Battery driver for Maxim MAX8907C
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Based on driver/mfd/max8925-core.c, Copyright (C) 2009-2010 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max8907c.h>
+
+struct max8907c_irq_data {
+ int reg;
+ int mask_reg;
+ int enable; /* enable or not */
+ int offs; /* bit offset in mask register */
+ bool is_rtc;
+ int wake;
+};
+
+static struct max8907c_irq_data max8907c_irqs[] = {
+ [MAX8907C_IRQ_VCHG_DC_OVP] = {
+ .reg = MAX8907C_REG_CHG_IRQ1,
+ .mask_reg = MAX8907C_REG_CHG_IRQ1_MASK,
+ .offs = 1 << 0,
+ },
+ [MAX8907C_IRQ_VCHG_DC_F] = {
+ .reg = MAX8907C_REG_CHG_IRQ1,
+ .mask_reg = MAX8907C_REG_CHG_IRQ1_MASK,
+ .offs = 1 << 1,
+ },
+ [MAX8907C_IRQ_VCHG_DC_R] = {
+ .reg = MAX8907C_REG_CHG_IRQ1,
+ .mask_reg = MAX8907C_REG_CHG_IRQ1_MASK,
+ .offs = 1 << 2,
+ },
+ [MAX8907C_IRQ_VCHG_THM_OK_R] = {
+ .reg = MAX8907C_REG_CHG_IRQ2,
+ .mask_reg = MAX8907C_REG_CHG_IRQ2_MASK,
+ .offs = 1 << 0,
+ },
+ [MAX8907C_IRQ_VCHG_THM_OK_F] = {
+ .reg = MAX8907C_REG_CHG_IRQ2,
+ .mask_reg = MAX8907C_REG_CHG_IRQ2_MASK,
+ .offs = 1 << 1,
+ },
+ [MAX8907C_IRQ_VCHG_MBATTLOW_F] = {
+ .reg = MAX8907C_REG_CHG_IRQ2,
+ .mask_reg = MAX8907C_REG_CHG_IRQ2_MASK,
+ .offs = 1 << 2,
+ },
+ [MAX8907C_IRQ_VCHG_MBATTLOW_R] = {
+ .reg = MAX8907C_REG_CHG_IRQ2,
+ .mask_reg = MAX8907C_REG_CHG_IRQ2_MASK,
+ .offs = 1 << 3,
+ },
+ [MAX8907C_IRQ_VCHG_RST] = {
+ .reg = MAX8907C_REG_CHG_IRQ2,
+ .mask_reg = MAX8907C_REG_CHG_IRQ2_MASK,
+ .offs = 1 << 4,
+ },
+ [MAX8907C_IRQ_VCHG_DONE] = {
+ .reg = MAX8907C_REG_CHG_IRQ2,
+ .mask_reg = MAX8907C_REG_CHG_IRQ2_MASK,
+ .offs = 1 << 5,
+ },
+ [MAX8907C_IRQ_VCHG_TOPOFF] = {
+ .reg = MAX8907C_REG_CHG_IRQ2,
+ .mask_reg = MAX8907C_REG_CHG_IRQ2_MASK,
+ .offs = 1 << 6,
+ },
+ [MAX8907C_IRQ_VCHG_TMR_FAULT] = {
+ .reg = MAX8907C_REG_CHG_IRQ2,
+ .mask_reg = MAX8907C_REG_CHG_IRQ2_MASK,
+ .offs = 1 << 7,
+ },
+ [MAX8907C_IRQ_GPM_RSTIN] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ1,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ .offs = 1 << 0,
+ },
+ [MAX8907C_IRQ_GPM_MPL] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ1,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ .offs = 1 << 1,
+ },
+ [MAX8907C_IRQ_GPM_SW_3SEC] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ1,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ .offs = 1 << 2,
+ },
+ [MAX8907C_IRQ_GPM_EXTON_F] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ1,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ .offs = 1 << 3,
+ },
+ [MAX8907C_IRQ_GPM_EXTON_R] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ1,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ .offs = 1 << 4,
+ },
+ [MAX8907C_IRQ_GPM_SW_1SEC] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ1,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ .offs = 1 << 5,
+ },
+ [MAX8907C_IRQ_GPM_SW_F] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ1,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ .offs = 1 << 6,
+ },
+ [MAX8907C_IRQ_GPM_SW_R] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ1,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ .offs = 1 << 7,
+ },
+ [MAX8907C_IRQ_GPM_SYSCKEN_F] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ2,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ2_MASK,
+ .offs = 1 << 0,
+ },
+ [MAX8907C_IRQ_GPM_SYSCKEN_R] = {
+ .reg = MAX8907C_REG_ON_OFF_IRQ2,
+ .mask_reg = MAX8907C_REG_ON_OFF_IRQ2_MASK,
+ .offs = 1 << 1,
+ },
+ [MAX8907C_IRQ_RTC_ALARM1] = {
+ .reg = MAX8907C_REG_RTC_IRQ,
+ .mask_reg = MAX8907C_REG_RTC_IRQ_MASK,
+ .offs = 1 << 2,
+ .is_rtc = true,
+ },
+ [MAX8907C_IRQ_RTC_ALARM0] = {
+ .reg = MAX8907C_REG_RTC_IRQ,
+ .mask_reg = MAX8907C_REG_RTC_IRQ_MASK,
+ .offs = 1 << 3,
+ .is_rtc = true,
+ },
+};
+
+static inline struct max8907c_irq_data *irq_to_max8907c(struct max8907c *chip,
+ int irq)
+{
+ return &max8907c_irqs[irq - chip->irq_base];
+}
+
+static irqreturn_t max8907c_irq(int irq, void *data)
+{
+ struct max8907c *chip = data;
+ struct max8907c_irq_data *irq_data;
+ struct i2c_client *i2c;
+ int read_reg = -1, value = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8907c_irqs); i++) {
+ irq_data = &max8907c_irqs[i];
+
+ if (irq_data->is_rtc)
+ i2c = chip->i2c_rtc;
+ else
+ i2c = chip->i2c_power;
+
+ if (read_reg != irq_data->reg) {
+ read_reg = irq_data->reg;
+ value = max8907c_reg_read(i2c, irq_data->reg);
+ }
+
+ if (value & irq_data->enable)
+ handle_nested_irq(chip->irq_base + i);
+ }
+ return IRQ_HANDLED;
+}
+
+static void max8907c_irq_lock(struct irq_data *data)
+{
+ struct max8907c *chip = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static void max8907c_irq_sync_unlock(struct irq_data *data)
+{
+ struct max8907c *chip = irq_data_get_irq_chip_data(data);
+ struct max8907c_irq_data *irq_data;
+ unsigned char irq_chg[2], irq_on[2];
+ unsigned char irq_rtc;
+ int i;
+
+ irq_chg[0] = irq_chg[1] = irq_on[0] = irq_on[1] = irq_rtc = 0xFF;
+
+ for (i = 0; i < ARRAY_SIZE(max8907c_irqs); i++) {
+ irq_data = &max8907c_irqs[i];
+ /* 1 -- disable, 0 -- enable */
+ switch (irq_data->mask_reg) {
+ case MAX8907C_REG_CHG_IRQ1_MASK:
+ irq_chg[0] &= ~irq_data->enable;
+ break;
+ case MAX8907C_REG_CHG_IRQ2_MASK:
+ irq_chg[1] &= ~irq_data->enable;
+ break;
+ case MAX8907C_REG_ON_OFF_IRQ1_MASK:
+ irq_on[0] &= ~irq_data->enable;
+ break;
+ case MAX8907C_REG_ON_OFF_IRQ2_MASK:
+ irq_on[1] &= ~irq_data->enable;
+ break;
+ case MAX8907C_REG_RTC_IRQ_MASK:
+ irq_rtc &= ~irq_data->enable;
+ break;
+ default:
+ dev_err(chip->dev, "wrong IRQ\n");
+ break;
+ }
+ }
+ /* update mask into registers */
+ if (chip->cache_chg[0] != irq_chg[0]) {
+ chip->cache_chg[0] = irq_chg[0];
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_CHG_IRQ1_MASK,
+ irq_chg[0]);
+ }
+ if (chip->cache_chg[1] != irq_chg[1]) {
+ chip->cache_chg[1] = irq_chg[1];
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_CHG_IRQ2_MASK,
+ irq_chg[1]);
+ }
+ if (chip->cache_on[0] != irq_on[0]) {
+ chip->cache_on[0] = irq_on[0];
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ1_MASK,
+ irq_on[0]);
+ }
+ if (chip->cache_on[1] != irq_on[1]) {
+ chip->cache_on[1] = irq_on[1];
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ2_MASK,
+ irq_on[1]);
+ }
+ if (chip->cache_rtc != irq_rtc) {
+ chip->cache_rtc = irq_rtc;
+ max8907c_reg_write(chip->i2c_rtc, MAX8907C_REG_RTC_IRQ_MASK,
+ irq_rtc);
+ }
+
+ mutex_unlock(&chip->irq_lock);
+}
+
+static void max8907c_irq_enable(struct irq_data *data)
+{
+ struct max8907c *chip = irq_data_get_irq_chip_data(data);
+ max8907c_irqs[data->irq - chip->irq_base].enable
+ = max8907c_irqs[data->irq - chip->irq_base].offs;
+}
+
+static void max8907c_irq_disable(struct irq_data *data)
+{
+ struct max8907c *chip = irq_data_get_irq_chip_data(data);
+ max8907c_irqs[data->irq - chip->irq_base].enable = 0;
+}
+
+static int max8907c_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct max8907c *chip = irq_data_get_irq_chip_data(data);
+ if (on) {
+ max8907c_irqs[data->irq - chip->irq_base].wake
+ = max8907c_irqs[data->irq - chip->irq_base].enable;
+ } else {
+ max8907c_irqs[data->irq - chip->irq_base].wake = 0;
+ }
+ return 0;
+}
+
+static struct irq_chip max8907c_irq_chip = {
+ .name = "max8907c",
+ .irq_bus_lock = max8907c_irq_lock,
+ .irq_bus_sync_unlock = max8907c_irq_sync_unlock,
+ .irq_enable = max8907c_irq_enable,
+ .irq_disable = max8907c_irq_disable,
+ .irq_set_wake = max8907c_irq_set_wake,
+};
+
+int max8907c_irq_init(struct max8907c *chip, int irq, int irq_base)
+{
+ unsigned long flags = IRQF_ONESHOT;
+ struct irq_desc *desc;
+ int i, ret;
+ int __irq;
+
+ if (!irq_base || !irq) {
+ dev_warn(chip->dev, "No interrupt support\n");
+ return -EINVAL;
+ }
+ /* clear all interrupts */
+ max8907c_reg_read(chip->i2c_power, MAX8907C_REG_CHG_IRQ1);
+ max8907c_reg_read(chip->i2c_power, MAX8907C_REG_CHG_IRQ2);
+ max8907c_reg_read(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ1);
+ max8907c_reg_read(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ2);
+ max8907c_reg_read(chip->i2c_rtc, MAX8907C_REG_RTC_IRQ);
+ /* mask all interrupts */
+ max8907c_reg_write(chip->i2c_rtc, MAX8907C_REG_ALARM0_CNTL, 0);
+ max8907c_reg_write(chip->i2c_rtc, MAX8907C_REG_ALARM1_CNTL, 0);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_CHG_IRQ1_MASK, 0xff);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_CHG_IRQ2_MASK, 0xff);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ1_MASK, 0xff);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ2_MASK, 0xff);
+ max8907c_reg_write(chip->i2c_rtc, MAX8907C_REG_RTC_IRQ_MASK, 0xff);
+
+ chip->cache_chg[0] = chip->cache_chg[1] =
+ chip->cache_on[0] = chip->cache_on[1] =
+ chip->cache_rtc = 0xFF;
+
+ mutex_init(&chip->irq_lock);
+ chip->core_irq = irq;
+ chip->irq_base = irq_base;
+ desc = irq_to_desc(chip->core_irq);
+
+ /* register with genirq */
+ for (i = 0; i < ARRAY_SIZE(max8907c_irqs); i++) {
+ __irq = i + chip->irq_base;
+ irq_set_chip_data(__irq, chip);
+ irq_set_chip_and_handler(__irq, &max8907c_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(__irq, 1);
+#ifdef CONFIG_ARM
+ /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
+ * sets on behalf of every irq_chip.
+ */
+ set_irq_flags(__irq, IRQF_VALID);
+#else
+ irq_set_noprobe(__irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, max8907c_irq, flags,
+ "max8907c", chip);
+ if (ret) {
+ dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret);
+ chip->core_irq = 0;
+ }
+
+ device_init_wakeup(chip->dev, 1);
+
+ return ret;
+}
+
+int max8907c_suspend(struct i2c_client *i2c, pm_message_t state)
+{
+ struct max8907c *chip = i2c_get_clientdata(i2c);
+
+ struct max8907c_irq_data *irq_data;
+ unsigned char irq_chg[2], irq_on[2];
+ unsigned char irq_rtc;
+ int i;
+
+ irq_chg[0] = irq_chg[1] = irq_on[0] = irq_on[1] = irq_rtc = 0xFF;
+
+ for (i = 0; i < ARRAY_SIZE(max8907c_irqs); i++) {
+ irq_data = &max8907c_irqs[i];
+ /* 1 -- disable, 0 -- enable */
+ switch (irq_data->mask_reg) {
+ case MAX8907C_REG_CHG_IRQ1_MASK:
+ irq_chg[0] &= ~irq_data->wake;
+ break;
+ case MAX8907C_REG_CHG_IRQ2_MASK:
+ irq_chg[1] &= ~irq_data->wake;
+ break;
+ case MAX8907C_REG_ON_OFF_IRQ1_MASK:
+ irq_on[0] &= ~irq_data->wake;
+ break;
+ case MAX8907C_REG_ON_OFF_IRQ2_MASK:
+ irq_on[1] &= ~irq_data->wake;
+ break;
+ case MAX8907C_REG_RTC_IRQ_MASK:
+ irq_rtc &= ~irq_data->wake;
+ break;
+ default:
+ dev_err(chip->dev, "wrong IRQ\n");
+ break;
+ }
+ }
+
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_CHG_IRQ1_MASK, irq_chg[0]);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_CHG_IRQ2_MASK, irq_chg[1]);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ1_MASK, irq_on[0]);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ2_MASK, irq_on[1]);
+ max8907c_reg_write(chip->i2c_rtc, MAX8907C_REG_RTC_IRQ_MASK, irq_rtc);
+
+ if (device_may_wakeup(chip->dev))
+ enable_irq_wake(chip->core_irq);
+ else
+ disable_irq(chip->core_irq);
+
+ return 0;
+}
+
+int max8907c_resume(struct i2c_client *i2c)
+{
+ struct max8907c *chip = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(chip->dev))
+ disable_irq_wake(chip->core_irq);
+ else
+ enable_irq(chip->core_irq);
+
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_CHG_IRQ1_MASK, chip->cache_chg[0]);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_CHG_IRQ2_MASK, chip->cache_chg[1]);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ1_MASK, chip->cache_on[0]);
+ max8907c_reg_write(chip->i2c_power, MAX8907C_REG_ON_OFF_IRQ2_MASK, chip->cache_on[1]);
+ max8907c_reg_write(chip->i2c_rtc, MAX8907C_REG_RTC_IRQ_MASK, chip->cache_rtc);
+
+ return 0;
+}
+
+void max8907c_irq_free(struct max8907c *chip)
+{
+ if (chip->core_irq)
+ free_irq(chip->core_irq, chip);
+}
+
diff --git a/drivers/mfd/max8907c.c b/drivers/mfd/max8907c.c
new file mode 100644
index 000000000000..f87524ee860b
--- /dev/null
+++ b/drivers/mfd/max8907c.c
@@ -0,0 +1,373 @@
+/*
+ * max8907c.c - mfd driver for MAX8907c
+ *
+ * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max8907c.h>
+
+static struct mfd_cell cells[] = {
+ {.name = "max8907-regulator",},
+ {.name = "max8907c-rtc",},
+};
+
+static int max8907c_i2c_read(struct i2c_client *i2c, u8 reg, u8 count, u8 *dest)
+{
+ struct i2c_msg xfer[2];
+ int ret = 0;
+
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = I2C_M_NOSTART;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = count;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int max8907c_i2c_write(struct i2c_client *i2c, u8 reg, u8 count, const u8 *src)
+{
+ u8 msg[0x100 + 1];
+ int ret = 0;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, count);
+
+ ret = i2c_master_send(i2c, msg, count + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != count + 1)
+ return -EIO;
+
+ return 0;
+}
+
+int max8907c_reg_read(struct i2c_client *i2c, u8 reg)
+{
+ int ret;
+ u8 val;
+
+ ret = max8907c_i2c_read(i2c, reg, 1, &val);
+
+ pr_debug("max8907c: reg read reg=%x, val=%x\n",
+ (unsigned int)reg, (unsigned int)val);
+
+ if (ret != 0)
+ pr_err("Failed to read max8907c I2C driver: %d\n", ret);
+ return val;
+}
+EXPORT_SYMBOL_GPL(max8907c_reg_read);
+
+int max8907c_reg_bulk_read(struct i2c_client *i2c, u8 reg, u8 count, u8 *val)
+{
+ int ret;
+
+ ret = max8907c_i2c_read(i2c, reg, count, val);
+
+ pr_debug("max8907c: reg read reg=%x, val=%x\n",
+ (unsigned int)reg, (unsigned int)*val);
+
+ if (ret != 0)
+ pr_err("Failed to read max8907c I2C driver: %d\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max8907c_reg_bulk_read);
+
+int max8907c_reg_write(struct i2c_client *i2c, u8 reg, u8 val)
+{
+ struct max8907c *max8907c = i2c_get_clientdata(i2c);
+ int ret;
+
+ pr_debug("max8907c: reg write reg=%x, val=%x\n",
+ (unsigned int)reg, (unsigned int)val);
+
+ mutex_lock(&max8907c->io_lock);
+ ret = max8907c_i2c_write(i2c, reg, 1, &val);
+ mutex_unlock(&max8907c->io_lock);
+
+ if (ret != 0)
+ pr_err("Failed to write max8907c I2C driver: %d\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max8907c_reg_write);
+
+int max8907c_reg_bulk_write(struct i2c_client *i2c, u8 reg, u8 count, u8 *val)
+{
+ struct max8907c *max8907c = i2c_get_clientdata(i2c);
+ int ret;
+
+ pr_debug("max8907c: reg write reg=%x, val=%x\n",
+ (unsigned int)reg, (unsigned int)*val);
+
+ mutex_lock(&max8907c->io_lock);
+ ret = max8907c_i2c_write(i2c, reg, count, val);
+ mutex_unlock(&max8907c->io_lock);
+
+ if (ret != 0)
+ pr_err("Failed to write max8907c I2C driver: %d\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max8907c_reg_bulk_write);
+
+int max8907c_set_bits(struct i2c_client *i2c, u8 reg, u8 mask, u8 val)
+{
+ struct max8907c *max8907c = i2c_get_clientdata(i2c);
+ u8 tmp;
+ int ret;
+
+ pr_debug("max8907c: reg write reg=%02X, val=%02X, mask=%02X\n",
+ (unsigned int)reg, (unsigned int)val, (unsigned int)mask);
+
+ mutex_lock(&max8907c->io_lock);
+ ret = max8907c_i2c_read(i2c, reg, 1, &tmp);
+ if (ret == 0) {
+ val = (tmp & ~mask) | (val & mask);
+ ret = max8907c_i2c_write(i2c, reg, 1, &val);
+ }
+ mutex_unlock(&max8907c->io_lock);
+
+ if (ret != 0)
+ pr_err("Failed to write max8907c I2C driver: %d\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max8907c_set_bits);
+
+static struct i2c_client *max8907c_client = NULL;
+int max8907c_power_off(void)
+{
+ if (!max8907c_client)
+ return -EINVAL;
+
+ return max8907c_set_bits(max8907c_client, MAX8907C_REG_RESET_CNFG,
+ MAX8907C_MASK_POWER_OFF, 0x40);
+}
+
+void max8907c_deep_sleep(int enter)
+{
+ if (!max8907c_client)
+ return;
+
+ if (enter) {
+ max8907c_reg_write(max8907c_client, MAX8907C_REG_SDSEQCNT1,
+ MAX8907C_POWER_UP_DELAY_CNT12);
+ max8907c_reg_write(max8907c_client, MAX8907C_REG_SDSEQCNT2,
+ MAX8907C_DELAY_CNT0);
+ max8907c_reg_write(max8907c_client, MAX8907C_REG_SDCTL2,
+ MAX8907C_SD_SEQ2);
+ } else {
+ max8907c_reg_write(max8907c_client, MAX8907C_REG_SDSEQCNT1,
+ MAX8907C_DELAY_CNT0);
+ max8907c_reg_write(max8907c_client, MAX8907C_REG_SDCTL2,
+ MAX8907C_SD_SEQ1);
+ max8907c_reg_write(max8907c_client, MAX8907C_REG_SDSEQCNT2,
+ MAX8907C_POWER_UP_DELAY_CNT1 | MAX8907C_POWER_DOWN_DELAY_CNT12);
+ }
+}
+
+static int max8907c_remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int max8907c_remove_subdevs(struct max8907c *max8907c)
+{
+ return device_for_each_child(max8907c->dev, NULL,
+ max8907c_remove_subdev);
+}
+
+static int max8097c_add_subdevs(struct max8907c *max8907c,
+ struct max8907c_platform_data *pdata)
+{
+ struct platform_device *pdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ pdev = platform_device_alloc(pdata->subdevs[i]->name,
+ pdata->subdevs[i]->id);
+
+ pdev->dev.parent = max8907c->dev;
+ pdev->dev.platform_data = pdata->subdevs[i]->dev.platform_data;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto error;
+ }
+ return 0;
+
+error:
+ max8907c_remove_subdevs(max8907c);
+ return ret;
+}
+
+int max8907c_pwr_en_config(void)
+{
+ int ret;
+ u8 data;
+
+ if (!max8907c_client)
+ return -EINVAL;
+
+ /*
+ * Enable/disable PWREN h/w control mechanism (PWREN signal must be
+ * inactive = high at this time)
+ */
+ ret = max8907c_set_bits(max8907c_client, MAX8907C_REG_RESET_CNFG,
+ MAX8907C_MASK_PWR_EN, MAX8907C_PWR_EN);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * When enabled, connect PWREN to SEQ2 by clearing SEQ2 configuration
+ * settings for silicon revision that requires s/w WAR. On other
+ * MAX8907B revisions PWREN is always connected to SEQ2.
+ */
+ data = max8907c_reg_read(max8907c_client, MAX8907C_REG_II2RR);
+
+ if (data == MAX8907B_II2RR_PWREN_WAR) {
+ data = 0x00;
+ ret = max8907c_reg_write(max8907c_client, MAX8907C_REG_SEQ2CNFG, data);
+ }
+ return ret;
+}
+
+int max8907c_pwr_en_attach(void)
+{
+ int ret;
+
+ if (!max8907c_client)
+ return -EINVAL;
+
+ /* No sequencer delay for CPU rail when it is attached */
+ ret = max8907c_reg_write(max8907c_client, MAX8907C_REG_SDSEQCNT1,
+ MAX8907C_DELAY_CNT0);
+ if (ret != 0)
+ return ret;
+
+ return max8907c_set_bits(max8907c_client, MAX8907C_REG_SDCTL1,
+ MAX8907C_MASK_CTL_SEQ, MAX8907C_CTL_SEQ);
+}
+
+static int max8907c_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max8907c *max8907c;
+ struct max8907c_platform_data *pdata = i2c->dev.platform_data;
+ int ret;
+ int i;
+
+ max8907c = kzalloc(sizeof(struct max8907c), GFP_KERNEL);
+ if (max8907c == NULL)
+ return -ENOMEM;
+
+ max8907c->dev = &i2c->dev;
+ dev_set_drvdata(max8907c->dev, max8907c);
+
+ max8907c->i2c_power = i2c;
+ i2c_set_clientdata(i2c, max8907c);
+
+ max8907c->i2c_rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+ i2c_set_clientdata(max8907c->i2c_rtc, max8907c);
+
+ mutex_init(&max8907c->io_lock);
+
+ for (i = 0; i < ARRAY_SIZE(cells); i++) {
+ cells[i].platform_data = max8907c;
+ cells[i].pdata_size = sizeof(*max8907c);
+ }
+ ret = mfd_add_devices(max8907c->dev, -1, cells, ARRAY_SIZE(cells),
+ NULL, 0);
+ if (ret != 0) {
+ i2c_unregister_device(max8907c->i2c_rtc);
+ kfree(max8907c);
+ pr_debug("max8907c: failed to add MFD devices %X\n", ret);
+ return ret;
+ }
+
+ max8907c_client = i2c;
+
+ max8907c_irq_init(max8907c, i2c->irq, pdata->irq_base);
+
+ ret = max8097c_add_subdevs(max8907c, pdata);
+
+ if (pdata->max8907c_setup)
+ return pdata->max8907c_setup();
+
+ return ret;
+}
+
+static int max8907c_i2c_remove(struct i2c_client *i2c)
+{
+ struct max8907c *max8907c = i2c_get_clientdata(i2c);
+
+ max8907c_remove_subdevs(max8907c);
+ i2c_unregister_device(max8907c->i2c_rtc);
+ mfd_remove_devices(max8907c->dev);
+ max8907c_irq_free(max8907c);
+ kfree(max8907c);
+
+ return 0;
+}
+
+static const struct i2c_device_id max8907c_i2c_id[] = {
+ {"max8907c", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max8907c_i2c_id);
+
+static struct i2c_driver max8907c_i2c_driver = {
+ .driver = {
+ .name = "max8907c",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8907c_i2c_probe,
+ .remove = max8907c_i2c_remove,
+ .suspend = max8907c_suspend,
+ .resume = max8907c_resume,
+ .id_table = max8907c_i2c_id,
+};
+
+static int __init max8907c_i2c_init(void)
+{
+ int ret = -ENODEV;
+
+ ret = i2c_add_driver(&max8907c_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register I2C driver: %d\n", ret);
+
+ return ret;
+}
+
+subsys_initcall(max8907c_i2c_init);
+
+static void __exit max8907c_i2c_exit(void)
+{
+ i2c_del_driver(&max8907c_i2c_driver);
+}
+
+module_exit(max8907c_i2c_exit);
+
+MODULE_DESCRIPTION("MAX8907C multi-function core driver");
+MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@maxim-ic.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ricoh583.c b/drivers/mfd/ricoh583.c
new file mode 100644
index 000000000000..a29053ebaf86
--- /dev/null
+++ b/drivers/mfd/ricoh583.c
@@ -0,0 +1,1213 @@
+/*
+ * driver/mfd/ricoh583.c
+ *
+ * Core driver implementation to access RICOH583 power management chip.
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * Copyright (C) 2011 RICOH COMPANY,LTD
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ricoh583.h>
+
+#define RICOH_ONOFFSEL_REG 0x10
+#define RICOH_SWCTL_REG 0x5E
+
+/* Interrupt enable register */
+#define RICOH583_INT_EN_SYS1 0x19
+#define RICOH583_INT_EN_SYS2 0x1D
+#define RICOH583_INT_EN_DCDC 0x41
+#define RICOH583_INT_EN_RTC 0xED
+#define RICOH583_INT_EN_ADC1 0x90
+#define RICOH583_INT_EN_ADC2 0x91
+#define RICOH583_INT_EN_ADC3 0x92
+#define RICOH583_INT_EN_GPIO 0xA8
+
+/* interrupt status registers (monitor regs in Ricoh)*/
+#define RICOH583_INTC_INTPOL 0xAD
+#define RICOH583_INTC_INTEN 0xAE
+#define RICOH583_INTC_INTMON 0xAF
+
+#define RICOH583_INT_MON_GRP 0xAF
+#define RICOH583_INT_MON_SYS1 0x1B
+#define RICOH583_INT_MON_SYS2 0x1F
+#define RICOH583_INT_MON_DCDC 0x43
+#define RICOH583_INT_MON_RTC 0xEE
+
+/* interrupt clearing registers */
+#define RICOH583_INT_IR_SYS1 0x1A
+#define RICOH583_INT_IR_SYS2 0x1E
+#define RICOH583_INT_IR_DCDC 0x42
+#define RICOH583_INT_IR_RTC 0xEE
+#define RICOH583_INT_IR_ADCL 0x94
+#define RICOH583_INT_IR_ADCH 0x95
+#define RICOH583_INT_IR_ADCEND 0x96
+#define RICOH583_INT_IR_GPIOR 0xA9
+#define RICOH583_INT_IR_GPIOF 0xAA
+
+/* GPIO register base address */
+#define RICOH583_GPIO_IOSEL 0xA0
+#define RICOH583_GPIO_PDEN 0xA1
+#define RICOH583_GPIO_IOOUT 0xA2
+#define RICOH583_GPIO_PGSEL 0xA3
+#define RICOH583_GPIO_GPINV 0xA4
+#define RICOH583_GPIO_GPDEB 0xA5
+#define RICOH583_GPIO_GPEDGE1 0xA6
+#define RICOH583_GPIO_GPEDGE2 0xA7
+#define RICOH583_GPIO_EN_GPIR 0xA8
+#define RICOH583_GPIO_MON_IOIN 0xAB
+#define RICOH583_GPIO_GPOFUNC 0xAC
+#define RICOH583_INTC_INTEN 0xAE
+
+enum int_type {
+ SYS_INT = 0x1,
+ DCDC_INT = 0x2,
+ RTC_INT = 0x4,
+ ADC_INT = 0x8,
+ GPIO_INT = 0x10,
+};
+
+struct ricoh583_irq_data {
+ u8 int_type;
+ u8 master_bit;
+ u8 int_en_bit;
+ u8 mask_reg_index;
+ int grp_index;
+};
+
+struct deepsleep_control_data {
+ u8 reg_add;
+ u8 ds_pos_bit;
+};
+
+#define RICOH583_IRQ(_int_type, _master_bit, _grp_index, _int_bit, _mask_ind) \
+ { \
+ .int_type = _int_type, \
+ .master_bit = _master_bit, \
+ .grp_index = _grp_index, \
+ .int_en_bit = _int_bit, \
+ .mask_reg_index = _mask_ind, \
+ }
+
+static const struct ricoh583_irq_data ricoh583_irqs[] = {
+ [RICOH583_IRQ_ONKEY] = RICOH583_IRQ(SYS_INT, 0, 0, 0, 0),
+ [RICOH583_IRQ_ACOK] = RICOH583_IRQ(SYS_INT, 0, 1, 1, 0),
+ [RICOH583_IRQ_LIDOPEN] = RICOH583_IRQ(SYS_INT, 0, 2, 2, 0),
+ [RICOH583_IRQ_PREOT] = RICOH583_IRQ(SYS_INT, 0, 3, 3, 0),
+ [RICOH583_IRQ_CLKSTP] = RICOH583_IRQ(SYS_INT, 0, 4, 4, 0),
+ [RICOH583_IRQ_ONKEY_OFF] = RICOH583_IRQ(SYS_INT, 0, 5, 5, 0),
+ [RICOH583_IRQ_WD] = RICOH583_IRQ(SYS_INT, 0, 7, 7, 0),
+ [RICOH583_IRQ_EN_PWRREQ1] = RICOH583_IRQ(SYS_INT, 0, 8, 0, 1),
+ [RICOH583_IRQ_EN_PWRREQ2] = RICOH583_IRQ(SYS_INT, 0, 9, 1, 1),
+ [RICOH583_IRQ_PRE_VINDET] = RICOH583_IRQ(SYS_INT, 0, 10, 2, 1),
+
+ [RICOH583_IRQ_DC0LIM] = RICOH583_IRQ(DCDC_INT, 1, 0, 0, 2),
+ [RICOH583_IRQ_DC1LIM] = RICOH583_IRQ(DCDC_INT, 1, 1, 1, 2),
+ [RICOH583_IRQ_DC2LIM] = RICOH583_IRQ(DCDC_INT, 1, 2, 2, 2),
+ [RICOH583_IRQ_DC3LIM] = RICOH583_IRQ(DCDC_INT, 1, 3, 3, 2),
+
+ [RICOH583_IRQ_CTC] = RICOH583_IRQ(RTC_INT, 2, 0, 0, 3),
+ [RICOH583_IRQ_YALE] = RICOH583_IRQ(RTC_INT, 2, 5, 5, 3),
+ [RICOH583_IRQ_DALE] = RICOH583_IRQ(RTC_INT, 2, 6, 6, 3),
+ [RICOH583_IRQ_WALE] = RICOH583_IRQ(RTC_INT, 2, 7, 7, 3),
+
+ [RICOH583_IRQ_AIN1L] = RICOH583_IRQ(ADC_INT, 3, 0, 0, 4),
+ [RICOH583_IRQ_AIN2L] = RICOH583_IRQ(ADC_INT, 3, 1, 1, 4),
+ [RICOH583_IRQ_AIN3L] = RICOH583_IRQ(ADC_INT, 3, 2, 2, 4),
+ [RICOH583_IRQ_VBATL] = RICOH583_IRQ(ADC_INT, 3, 3, 3, 4),
+ [RICOH583_IRQ_VIN3L] = RICOH583_IRQ(ADC_INT, 3, 4, 4, 4),
+ [RICOH583_IRQ_VIN8L] = RICOH583_IRQ(ADC_INT, 3, 5, 5, 4),
+ [RICOH583_IRQ_AIN1H] = RICOH583_IRQ(ADC_INT, 3, 6, 0, 5),
+ [RICOH583_IRQ_AIN2H] = RICOH583_IRQ(ADC_INT, 3, 7, 1, 5),
+ [RICOH583_IRQ_AIN3H] = RICOH583_IRQ(ADC_INT, 3, 8, 2, 5),
+ [RICOH583_IRQ_VBATH] = RICOH583_IRQ(ADC_INT, 3, 9, 3, 5),
+ [RICOH583_IRQ_VIN3H] = RICOH583_IRQ(ADC_INT, 3, 10, 4, 5),
+ [RICOH583_IRQ_VIN8H] = RICOH583_IRQ(ADC_INT, 3, 11, 5, 5),
+ [RICOH583_IRQ_ADCEND] = RICOH583_IRQ(ADC_INT, 3, 12, 0, 6),
+
+ [RICOH583_IRQ_GPIO0] = RICOH583_IRQ(GPIO_INT, 4, 0, 0, 7),
+ [RICOH583_IRQ_GPIO1] = RICOH583_IRQ(GPIO_INT, 4, 1, 1, 7),
+ [RICOH583_IRQ_GPIO2] = RICOH583_IRQ(GPIO_INT, 4, 2, 2, 7),
+ [RICOH583_IRQ_GPIO3] = RICOH583_IRQ(GPIO_INT, 4, 3, 3, 7),
+ [RICOH583_IRQ_GPIO4] = RICOH583_IRQ(GPIO_INT, 4, 4, 4, 7),
+ [RICOH583_IRQ_GPIO5] = RICOH583_IRQ(GPIO_INT, 4, 5, 5, 7),
+ [RICOH583_IRQ_GPIO6] = RICOH583_IRQ(GPIO_INT, 4, 6, 6, 7),
+ [RICOH583_IRQ_GPIO7] = RICOH583_IRQ(GPIO_INT, 4, 7, 7, 7),
+ [RICOH583_NR_IRQS] = RICOH583_IRQ(GPIO_INT, 4, 8, 8, 7),
+};
+
+#define DEEPSLEEP_INIT(_id, _reg, _pos) \
+ [RICOH583_DS_##_id] = {.reg_add = _reg, .ds_pos_bit = _pos}
+
+static struct deepsleep_control_data deepsleep_data[] = {
+ DEEPSLEEP_INIT(DC1, 0x21, 4),
+ DEEPSLEEP_INIT(DC2, 0x22, 0),
+ DEEPSLEEP_INIT(DC3, 0x22, 4),
+ DEEPSLEEP_INIT(LDO0, 0x23, 0),
+ DEEPSLEEP_INIT(LDO1, 0x23, 4),
+ DEEPSLEEP_INIT(LDO2, 0x24, 0),
+ DEEPSLEEP_INIT(LDO3, 0x24, 4),
+ DEEPSLEEP_INIT(LDO4, 0x25, 0),
+ DEEPSLEEP_INIT(LDO5, 0x25, 4),
+ DEEPSLEEP_INIT(LDO6, 0x26, 0),
+ DEEPSLEEP_INIT(LDO7, 0x26, 4),
+ DEEPSLEEP_INIT(LDO8, 0x27, 0),
+ DEEPSLEEP_INIT(LDO9, 0x27, 4),
+ DEEPSLEEP_INIT(PSO0, 0x28, 0),
+ DEEPSLEEP_INIT(PSO1, 0x28, 4),
+ DEEPSLEEP_INIT(PSO2, 0x29, 0),
+ DEEPSLEEP_INIT(PSO3, 0x29, 4),
+ DEEPSLEEP_INIT(PSO4, 0x2A, 0),
+ DEEPSLEEP_INIT(PSO5, 0x2A, 4),
+ DEEPSLEEP_INIT(PSO6, 0x2B, 0),
+ DEEPSLEEP_INIT(PSO7, 0x2B, 4),
+};
+
+#define MAX_INTERRUPT_MASKS 8
+#define MAX_MAIN_INTERRUPT 5
+#define EXT_PWR_REQ \
+ (RICOH583_EXT_PWRREQ1_CONTROL | RICOH583_EXT_PWRREQ2_CONTROL)
+
+struct ricoh583 {
+ struct device *dev;
+ struct i2c_client *client;
+ struct mutex io_lock;
+ int gpio_base;
+ struct gpio_chip gpio;
+ int irq_base;
+ struct irq_chip irq_chip;
+ struct mutex irq_lock;
+ unsigned long group_irq_en[MAX_MAIN_INTERRUPT];
+
+ /* For main interrupt bits in INTC */
+ u8 intc_inten_cache;
+ u8 intc_inten_reg;
+
+ /* For group interrupt bits and address */
+ u8 irq_en_cache[MAX_INTERRUPT_MASKS];
+ u8 irq_en_reg[MAX_INTERRUPT_MASKS];
+ u8 irq_en_add[MAX_INTERRUPT_MASKS];
+
+ /* Interrupt monitor and clear register */
+ u8 irq_mon_add[MAX_INTERRUPT_MASKS + 1];
+ u8 irq_clr_add[MAX_INTERRUPT_MASKS + 1];
+ u8 main_int_type[MAX_INTERRUPT_MASKS + 1];
+
+ /* For gpio edge */
+ u8 gpedge_cache[2];
+ u8 gpedge_reg[2];
+ u8 gpedge_add[2];
+};
+
+static inline int __ricoh583_read(struct i2c_client *client,
+ u8 reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+ dev_dbg(&client->dev, "ricoh583: reg read reg=%x, val=%x\n",
+ reg, *val);
+ return 0;
+}
+
+static inline int __ricoh583_bulk_reads(struct i2c_client *client, u8 reg,
+ int len, uint8_t *val)
+{
+ int ret;
+ int i;
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg, len, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading from 0x%02x\n", reg);
+ return ret;
+ }
+ for (i = 0; i < len; ++i) {
+ dev_dbg(&client->dev, "ricoh583: reg read reg=%x, val=%x\n",
+ reg + i, *(val + i));
+ }
+ return 0;
+}
+
+static inline int __ricoh583_write(struct i2c_client *client,
+ u8 reg, uint8_t val)
+{
+ int ret;
+
+ dev_dbg(&client->dev, "ricoh583: reg write reg=%x, val=%x\n",
+ reg, val);
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n",
+ val, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __ricoh583_bulk_writes(struct i2c_client *client, u8 reg,
+ int len, uint8_t *val)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < len; ++i) {
+ dev_dbg(&client->dev, "ricoh583: reg write reg=%x, val=%x\n",
+ reg + i, *(val + i));
+ }
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ricoh583_write(struct device *dev, u8 reg, uint8_t val)
+{
+ struct ricoh583 *ricoh583 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&ricoh583->io_lock);
+ ret = __ricoh583_write(to_i2c_client(dev), reg, val);
+ mutex_unlock(&ricoh583->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ricoh583_write);
+
+int ricoh583_bulk_writes(struct device *dev, u8 reg, u8 len, uint8_t *val)
+{
+ struct ricoh583 *ricoh583 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&ricoh583->io_lock);
+ ret = __ricoh583_bulk_writes(to_i2c_client(dev), reg, len, val);
+ mutex_unlock(&ricoh583->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ricoh583_bulk_writes);
+
+int ricoh583_read(struct device *dev, u8 reg, uint8_t *val)
+{
+ return __ricoh583_read(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(ricoh583_read);
+
+int ricoh583_bulk_reads(struct device *dev, u8 reg, u8 len, uint8_t *val)
+{
+ return __ricoh583_bulk_reads(to_i2c_client(dev), reg, len, val);
+}
+EXPORT_SYMBOL_GPL(ricoh583_bulk_reads);
+
+int ricoh583_set_bits(struct device *dev, u8 reg, uint8_t bit_mask)
+{
+ struct ricoh583 *ricoh583 = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&ricoh583->io_lock);
+
+ ret = __ricoh583_read(to_i2c_client(dev), reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if ((reg_val & bit_mask) != bit_mask) {
+ reg_val |= bit_mask;
+ ret = __ricoh583_write(to_i2c_client(dev), reg, reg_val);
+ }
+out:
+ mutex_unlock(&ricoh583->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ricoh583_set_bits);
+
+int ricoh583_clr_bits(struct device *dev, u8 reg, uint8_t bit_mask)
+{
+ struct ricoh583 *ricoh583 = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&ricoh583->io_lock);
+
+ ret = __ricoh583_read(to_i2c_client(dev), reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if (reg_val & bit_mask) {
+ reg_val &= ~bit_mask;
+ ret = __ricoh583_write(to_i2c_client(dev), reg, reg_val);
+ }
+out:
+ mutex_unlock(&ricoh583->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ricoh583_clr_bits);
+
+int ricoh583_update(struct device *dev, u8 reg, uint8_t val, uint8_t mask)
+{
+ struct ricoh583 *ricoh583 = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&ricoh583->io_lock);
+
+ ret = __ricoh583_read(ricoh583->client, reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if ((reg_val & mask) != val) {
+ reg_val = (reg_val & ~mask) | (val & mask);
+ ret = __ricoh583_write(ricoh583->client, reg, reg_val);
+ }
+out:
+ mutex_unlock(&ricoh583->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ricoh583_update);
+
+static int __ricoh583_set_ext_pwrreq1_control(struct device *dev,
+ enum ricoh583_deepsleep_control_id id,
+ enum ricoh583_ext_pwrreq_control ext_pwr, int slots)
+{
+ int ret;
+ uint8_t sleepseq_val;
+ u8 en_bit;
+ u8 slot_bit;
+
+ if (!(ext_pwr & RICOH583_EXT_PWRREQ1_CONTROL))
+ return 0;
+
+ if (id == RICOH583_DS_DC0) {
+ dev_err(dev, "PWRREQ1 is invalid control for rail %d\n", id);
+ return -EINVAL;
+ }
+
+ en_bit = deepsleep_data[id].ds_pos_bit;
+ slot_bit = en_bit + 1;
+ ret = ricoh583_read(dev, deepsleep_data[id].reg_add, &sleepseq_val);
+ if (ret < 0) {
+ dev_err(dev, "Error in reading reg 0x%x\n",
+ deepsleep_data[id].reg_add);
+ return ret;
+ }
+
+ sleepseq_val &= ~(0xF << en_bit);
+ sleepseq_val |= (1 << en_bit);
+ sleepseq_val |= ((slots & 0x7) << slot_bit);
+ ret = ricoh583_set_bits(dev, RICOH_ONOFFSEL_REG, (1 << 1));
+ if (ret < 0) {
+ dev_err(dev, "Error in updating the 0x%02x register\n",
+ RICOH_ONOFFSEL_REG);
+ return ret;
+ }
+
+ ret = ricoh583_write(dev, deepsleep_data[id].reg_add, sleepseq_val);
+ if (ret < 0) {
+ dev_err(dev, "Error in writing reg 0x%x\n",
+ deepsleep_data[id].reg_add);
+ return ret;
+ }
+
+ if (id == RICOH583_DS_LDO4) {
+ ret = ricoh583_write(dev, RICOH_SWCTL_REG, 0x1);
+ if (ret < 0)
+ dev_err(dev, "Error in writing reg 0x%x\n",
+ RICOH_SWCTL_REG);
+ }
+ return ret;
+}
+
+static int __ricoh583_set_ext_pwrreq2_control(struct device *dev,
+ enum ricoh583_deepsleep_control_id id,
+ enum ricoh583_ext_pwrreq_control ext_pwr)
+{
+ int ret;
+
+ if (!(ext_pwr & RICOH583_EXT_PWRREQ2_CONTROL))
+ return 0;
+
+ if (id != RICOH583_DS_DC0) {
+ dev_err(dev, "PWRREQ2 is invalid control for rail %d\n", id);
+ return -EINVAL;
+ }
+
+ ret = ricoh583_set_bits(dev, RICOH_ONOFFSEL_REG, (1 << 2));
+ if (ret < 0)
+ dev_err(dev, "Error in updating the ONOFFSEL 0x10 register\n");
+ return ret;
+}
+
+int ricoh583_ext_power_req_config(struct device *dev,
+ enum ricoh583_deepsleep_control_id id,
+ enum ricoh583_ext_pwrreq_control ext_pwr_req,
+ int deepsleep_slot_nr)
+{
+ if ((ext_pwr_req & EXT_PWR_REQ) == EXT_PWR_REQ)
+ return -EINVAL;
+
+ if (ext_pwr_req & RICOH583_EXT_PWRREQ1_CONTROL)
+ return __ricoh583_set_ext_pwrreq1_control(dev, id,
+ ext_pwr_req, deepsleep_slot_nr);
+
+ if (ext_pwr_req & RICOH583_EXT_PWRREQ2_CONTROL)
+ return __ricoh583_set_ext_pwrreq2_control(dev,
+ id, ext_pwr_req);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ricoh583_ext_power_req_config);
+
+static int __devinit ricoh583_ext_power_init(struct ricoh583 *ricoh583,
+ struct ricoh583_platform_data *pdata)
+{
+ int ret;
+ int i;
+ uint8_t on_off_val = 0;
+
+ /* Clear ONOFFSEL register */
+ mutex_lock(&ricoh583->io_lock);
+ if (pdata->enable_shutdown_pin)
+ on_off_val |= 0x1;
+
+ ret = __ricoh583_write(ricoh583->client, RICOH_ONOFFSEL_REG,
+ on_off_val);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Error in writing reg %d error: "
+ "%d\n", RICOH_ONOFFSEL_REG, ret);
+
+ ret = __ricoh583_write(ricoh583->client, RICOH_SWCTL_REG, 0x0);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Error in writing reg %d error: "
+ "%d\n", RICOH_SWCTL_REG, ret);
+
+ /* Clear sleepseq register */
+ for (i = 0x21; i < 0x2B; ++i) {
+ ret = __ricoh583_write(ricoh583->client, i, 0x0);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Error in writing reg 0x%02x "
+ "error: %d\n", i, ret);
+ }
+ mutex_unlock(&ricoh583->io_lock);
+ return 0;
+}
+
+static struct i2c_client *ricoh583_i2c_client;
+int ricoh583_power_off(void)
+{
+ if (!ricoh583_i2c_client)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ricoh583_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct ricoh583 *ricoh583 = container_of(gc, struct ricoh583, gpio);
+ uint8_t val;
+ int ret;
+
+ ret = __ricoh583_read(ricoh583->client, RICOH583_GPIO_MON_IOIN, &val);
+ if (ret < 0)
+ return ret;
+
+ return ((val & (0x1 << offset)) != 0);
+}
+
+static void ricoh583_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct ricoh583 *ricoh583 = container_of(chip, struct ricoh583, gpio);
+ if (value)
+ ricoh583_set_bits(ricoh583->dev, RICOH583_GPIO_IOOUT,
+ 1 << offset);
+ else
+ ricoh583_clr_bits(ricoh583->dev, RICOH583_GPIO_IOOUT,
+ 1 << offset);
+}
+
+static int ricoh583_gpio_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct ricoh583 *ricoh583 = container_of(chip, struct ricoh583, gpio);
+
+ return ricoh583_clr_bits(ricoh583->dev, RICOH583_GPIO_IOSEL,
+ 1 << offset);
+}
+
+static int ricoh583_gpio_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct ricoh583 *ricoh583 = container_of(chip, struct ricoh583, gpio);
+
+ ricoh583_gpio_set(chip, offset, value);
+ return ricoh583_set_bits(ricoh583->dev, RICOH583_GPIO_IOSEL,
+ 1 << offset);
+}
+
+static int ricoh583_gpio_to_irq(struct gpio_chip *chip, unsigned off)
+{
+ struct ricoh583 *ricoh583 = container_of(chip, struct ricoh583, gpio);
+
+ if ((off >= 0) && (off < 8))
+ return ricoh583->irq_base + RICOH583_IRQ_GPIO0 + off;
+
+ return -EIO;
+}
+
+static int ricoh583_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct ricoh583 *ricoh583 = container_of(chip, struct ricoh583, gpio);
+ int ret;
+
+ ret = ricoh583_clr_bits(ricoh583->dev, RICOH583_GPIO_PGSEL,
+ 1 << offset);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "%s(): The error in writing register "
+ "0x%02x\n", __func__, RICOH583_GPIO_PGSEL);
+ return ret;
+}
+
+static void ricoh583_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct ricoh583 *ricoh583 = container_of(chip, struct ricoh583, gpio);
+ int ret;
+
+ ret = ricoh583_set_bits(ricoh583->dev, RICOH583_GPIO_PGSEL,
+ 1 << offset);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "%s(): The error in writing register "
+ "0x%02x\n", __func__, RICOH583_GPIO_PGSEL);
+}
+
+static void __devinit ricoh583_gpio_init(struct ricoh583 *ricoh583,
+ struct ricoh583_platform_data *pdata)
+{
+ int ret;
+ int i;
+ struct ricoh583_gpio_init_data *ginit;
+
+ if (pdata->gpio_base <= 0)
+ return;
+
+ ret = ricoh583_write(ricoh583->dev, RICOH583_GPIO_PGSEL, 0xEF);
+ if (ret < 0) {
+ dev_err(ricoh583->dev, "%s(): The error in writing register "
+ "0x%02x\n", __func__, RICOH583_GPIO_PGSEL);
+ return;
+ }
+
+ for (i = 0; i < pdata->num_gpioinit_data; ++i) {
+ ginit = &pdata->gpio_init_data[i];
+ if (!ginit->init_apply)
+ continue;
+ if (ginit->pulldn_en)
+ ret = ricoh583_set_bits(ricoh583->dev,
+ RICOH583_GPIO_PDEN, 1 << i);
+ else
+ ret = ricoh583_clr_bits(ricoh583->dev,
+ RICOH583_GPIO_PDEN, 1 << i);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Gpio %d init "
+ "pden configuration failed: %d\n", i, ret);
+
+ if (ginit->output_mode_en) {
+ if (ginit->output_val)
+ ret = ricoh583_set_bits(ricoh583->dev,
+ RICOH583_GPIO_IOOUT, 1 << i);
+ else
+ ret = ricoh583_clr_bits(ricoh583->dev,
+ RICOH583_GPIO_IOOUT, 1 << i);
+ if (!ret)
+ ret = ricoh583_set_bits(ricoh583->dev,
+ RICOH583_GPIO_IOSEL, 1 << i);
+ } else
+ ret = ricoh583_clr_bits(ricoh583->dev,
+ RICOH583_GPIO_IOSEL, 1 << i);
+
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Gpio %d init "
+ "dir configuration failed: %d\n", i, ret);
+
+ ret = ricoh583_clr_bits(ricoh583->dev, RICOH583_GPIO_PGSEL,
+ 1 << i);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "%s(): The error in writing "
+ "register 0x%02x\n", __func__,
+ RICOH583_GPIO_PGSEL);
+ }
+
+ ricoh583->gpio.owner = THIS_MODULE;
+ ricoh583->gpio.label = ricoh583->client->name;
+ ricoh583->gpio.dev = ricoh583->dev;
+ ricoh583->gpio.base = pdata->gpio_base;
+ ricoh583->gpio.ngpio = RICOH583_NR_GPIO;
+ ricoh583->gpio.can_sleep = 1;
+
+ ricoh583->gpio.request = ricoh583_gpio_request;
+ ricoh583->gpio.free = ricoh583_gpio_free;
+ ricoh583->gpio.direction_input = ricoh583_gpio_input;
+ ricoh583->gpio.direction_output = ricoh583_gpio_output;
+ ricoh583->gpio.set = ricoh583_gpio_set;
+ ricoh583->gpio.get = ricoh583_gpio_get;
+ ricoh583->gpio.to_irq = ricoh583_gpio_to_irq;
+
+ ret = gpiochip_add(&ricoh583->gpio);
+ if (ret)
+ dev_warn(ricoh583->dev, "GPIO registration failed: %d\n", ret);
+}
+
+static void ricoh583_irq_lock(struct irq_data *irq_data)
+{
+ struct ricoh583 *ricoh583 = irq_data_get_irq_chip_data(irq_data);
+
+ mutex_lock(&ricoh583->irq_lock);
+}
+
+static void ricoh583_irq_unmask(struct irq_data *irq_data)
+{
+ struct ricoh583 *ricoh583 = irq_data_get_irq_chip_data(irq_data);
+ unsigned int __irq = irq_data->irq - ricoh583->irq_base;
+ const struct ricoh583_irq_data *data = &ricoh583_irqs[__irq];
+
+ ricoh583->group_irq_en[data->grp_index] |= (1 << data->grp_index);
+ if (ricoh583->group_irq_en[data->grp_index])
+ ricoh583->intc_inten_reg |= 1 << data->master_bit;
+
+ ricoh583->irq_en_reg[data->mask_reg_index] |= 1 << data->int_en_bit;
+}
+
+static void ricoh583_irq_mask(struct irq_data *irq_data)
+{
+ struct ricoh583 *ricoh583 = irq_data_get_irq_chip_data(irq_data);
+ unsigned int __irq = irq_data->irq - ricoh583->irq_base;
+ const struct ricoh583_irq_data *data = &ricoh583_irqs[__irq];
+
+ ricoh583->group_irq_en[data->grp_index] &= ~(1 << data->grp_index);
+ if (!ricoh583->group_irq_en[data->grp_index])
+ ricoh583->intc_inten_reg &= ~(1 << data->master_bit);
+
+ ricoh583->irq_en_reg[data->mask_reg_index] &= ~(1 << data->int_en_bit);
+}
+
+static void ricoh583_irq_sync_unlock(struct irq_data *irq_data)
+{
+ struct ricoh583 *ricoh583 = irq_data_get_irq_chip_data(irq_data);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ricoh583->gpedge_reg); i++) {
+ if (ricoh583->gpedge_reg[i] != ricoh583->gpedge_cache[i]) {
+ if (!WARN_ON(__ricoh583_write(ricoh583->client,
+ ricoh583->gpedge_add[i],
+ ricoh583->gpedge_reg[i])))
+ ricoh583->gpedge_cache[i] =
+ ricoh583->gpedge_reg[i];
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ricoh583->irq_en_reg); i++) {
+ if (ricoh583->irq_en_reg[i] != ricoh583->irq_en_cache[i]) {
+ if (!WARN_ON(__ricoh583_write(ricoh583->client,
+ ricoh583->irq_en_add[i],
+ ricoh583->irq_en_reg[i])))
+ ricoh583->irq_en_cache[i] =
+ ricoh583->irq_en_reg[i];
+ }
+ }
+
+ if (ricoh583->intc_inten_reg != ricoh583->intc_inten_cache) {
+ if (!WARN_ON(__ricoh583_write(ricoh583->client,
+ RICOH583_INTC_INTEN, ricoh583->intc_inten_reg)))
+ ricoh583->intc_inten_cache = ricoh583->intc_inten_reg;
+ }
+
+ mutex_unlock(&ricoh583->irq_lock);
+}
+
+static int ricoh583_irq_set_type(struct irq_data *irq_data, unsigned int type)
+{
+ struct ricoh583 *ricoh583 = irq_data_get_irq_chip_data(irq_data);
+ unsigned int __irq = irq_data->irq - ricoh583->irq_base;
+ const struct ricoh583_irq_data *data = &ricoh583_irqs[__irq];
+ int val = 0;
+ int gpedge_index;
+ int gpedge_bit_pos;
+
+ if (data->int_type & GPIO_INT) {
+ gpedge_index = data->int_en_bit / 4;
+ gpedge_bit_pos = data->int_en_bit % 4;
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ val |= 0x2;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ val |= 0x1;
+
+ ricoh583->gpedge_reg[gpedge_index] &= ~(3 << gpedge_bit_pos);
+ ricoh583->gpedge_reg[gpedge_index] |= (val << gpedge_bit_pos);
+ ricoh583_irq_unmask(irq_data);
+ }
+ return 0;
+}
+
+static irqreturn_t ricoh583_irq(int irq, void *data)
+{
+ struct ricoh583 *ricoh583 = data;
+ u8 int_sts[9];
+ u8 master_int;
+ int i;
+ int ret;
+ u8 rtc_int_sts = 0;
+
+ /* Clear the status */
+ for (i = 0; i < 9; i++)
+ int_sts[i] = 0;
+
+ ret = __ricoh583_read(ricoh583->client, RICOH583_INTC_INTMON,
+ &master_int);
+ if (ret < 0) {
+ dev_err(ricoh583->dev, "Error in reading reg 0x%02x "
+ "error: %d\n", RICOH583_INTC_INTMON, ret);
+ return IRQ_HANDLED;
+ }
+
+ for (i = 0; i < 9; ++i) {
+ if (!(master_int & ricoh583->main_int_type[i]))
+ continue;
+ ret = __ricoh583_read(ricoh583->client,
+ ricoh583->irq_mon_add[i], &int_sts[i]);
+ if (ret < 0) {
+ dev_err(ricoh583->dev, "Error in reading reg 0x%02x "
+ "error: %d\n", ricoh583->irq_mon_add[i], ret);
+ int_sts[i] = 0;
+ continue;
+ }
+
+ if (ricoh583->main_int_type[i] & RTC_INT) {
+ rtc_int_sts = 0;
+ if (int_sts[i] & 0x1)
+ rtc_int_sts |= BIT(6);
+ if (int_sts[i] & 0x2)
+ rtc_int_sts |= BIT(7);
+ if (int_sts[i] & 0x4)
+ rtc_int_sts |= BIT(0);
+ if (int_sts[i] & 0x8)
+ rtc_int_sts |= BIT(5);
+ }
+
+ ret = __ricoh583_write(ricoh583->client,
+ ricoh583->irq_clr_add[i], ~int_sts[i]);
+ if (ret < 0) {
+ dev_err(ricoh583->dev, "Error in reading reg 0x%02x "
+ "error: %d\n", ricoh583->irq_clr_add[i], ret);
+ }
+ if (ricoh583->main_int_type[i] & RTC_INT)
+ int_sts[i] = rtc_int_sts;
+ }
+
+ /* Merge gpio interrupts for rising and falling case*/
+ int_sts[7] |= int_sts[8];
+
+ /* Call interrupt handler if enabled */
+ for (i = 0; i < RICOH583_NR_IRQS; ++i) {
+ const struct ricoh583_irq_data *data = &ricoh583_irqs[i];
+ if ((int_sts[data->mask_reg_index] & (1 << data->int_en_bit)) &&
+ (ricoh583->group_irq_en[data->master_bit] &
+ (1 << data->grp_index)))
+ handle_nested_irq(ricoh583->irq_base + i);
+ }
+ return IRQ_HANDLED;
+}
+
+static int __devinit ricoh583_irq_init(struct ricoh583 *ricoh583, int irq,
+ int irq_base)
+{
+ int i, ret;
+
+ if (!irq_base) {
+ dev_warn(ricoh583->dev, "No interrupt support on IRQ base\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&ricoh583->irq_lock);
+
+ /* Initialize all locals to 0 */
+ for (i = 0; i < MAX_INTERRUPT_MASKS; i++) {
+ ricoh583->irq_en_cache[i] = 0;
+ ricoh583->irq_en_reg[i] = 0;
+ }
+ ricoh583->intc_inten_cache = 0;
+ ricoh583->intc_inten_reg = 0;
+ for (i = 0; i < 2; i++) {
+ ricoh583->gpedge_cache[i] = 0;
+ ricoh583->gpedge_reg[i] = 0;
+ }
+
+ /* Interrupt enable register */
+ ricoh583->gpedge_add[0] = RICOH583_GPIO_GPEDGE2;
+ ricoh583->gpedge_add[1] = RICOH583_GPIO_GPEDGE1;
+ ricoh583->irq_en_add[0] = RICOH583_INT_EN_SYS1;
+ ricoh583->irq_en_add[1] = RICOH583_INT_EN_SYS2;
+ ricoh583->irq_en_add[2] = RICOH583_INT_EN_DCDC;
+ ricoh583->irq_en_add[3] = RICOH583_INT_EN_RTC;
+ ricoh583->irq_en_add[4] = RICOH583_INT_EN_ADC1;
+ ricoh583->irq_en_add[5] = RICOH583_INT_EN_ADC2;
+ ricoh583->irq_en_add[6] = RICOH583_INT_EN_ADC3;
+ ricoh583->irq_en_add[7] = RICOH583_INT_EN_GPIO;
+
+ /* Interrupt status monitor register */
+ ricoh583->irq_mon_add[0] = RICOH583_INT_MON_SYS1;
+ ricoh583->irq_mon_add[1] = RICOH583_INT_MON_SYS2;
+ ricoh583->irq_mon_add[2] = RICOH583_INT_MON_DCDC;
+ ricoh583->irq_mon_add[3] = RICOH583_INT_MON_RTC;
+ ricoh583->irq_mon_add[4] = RICOH583_INT_IR_ADCL;
+ ricoh583->irq_mon_add[5] = RICOH583_INT_IR_ADCH;
+ ricoh583->irq_mon_add[6] = RICOH583_INT_IR_ADCEND;
+ ricoh583->irq_mon_add[7] = RICOH583_INT_IR_GPIOF;
+ ricoh583->irq_mon_add[8] = RICOH583_INT_IR_GPIOR;
+
+ /* Interrupt status clear register */
+ ricoh583->irq_clr_add[0] = RICOH583_INT_IR_SYS1;
+ ricoh583->irq_clr_add[1] = RICOH583_INT_IR_SYS2;
+ ricoh583->irq_clr_add[2] = RICOH583_INT_IR_DCDC;
+ ricoh583->irq_clr_add[3] = RICOH583_INT_IR_RTC;
+ ricoh583->irq_clr_add[4] = RICOH583_INT_IR_ADCL;
+ ricoh583->irq_clr_add[5] = RICOH583_INT_IR_ADCH;
+ ricoh583->irq_clr_add[6] = RICOH583_INT_IR_ADCEND;
+ ricoh583->irq_clr_add[7] = RICOH583_INT_IR_GPIOF;
+ ricoh583->irq_clr_add[8] = RICOH583_INT_IR_GPIOR;
+
+ ricoh583->main_int_type[0] = SYS_INT;
+ ricoh583->main_int_type[1] = SYS_INT;
+ ricoh583->main_int_type[2] = DCDC_INT;
+ ricoh583->main_int_type[3] = RTC_INT;
+ ricoh583->main_int_type[4] = ADC_INT;
+ ricoh583->main_int_type[5] = ADC_INT;
+ ricoh583->main_int_type[6] = ADC_INT;
+ ricoh583->main_int_type[7] = GPIO_INT;
+ ricoh583->main_int_type[8] = GPIO_INT;
+
+ /* Initailize all int register to 0 */
+ for (i = 0; i < MAX_INTERRUPT_MASKS; i++) {
+ ret = __ricoh583_write(ricoh583->client,
+ ricoh583->irq_en_add[i],
+ ricoh583->irq_en_reg[i]);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Error in writing reg 0x%02x "
+ "error: %d\n", ricoh583->irq_en_add[i], ret);
+ }
+
+ for (i = 0; i < 2; i++) {
+ ret = __ricoh583_write(ricoh583->client,
+ ricoh583->gpedge_add[i],
+ ricoh583->gpedge_reg[i]);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Error in writing reg 0x%02x "
+ "error: %d\n", ricoh583->gpedge_add[i], ret);
+ }
+
+ ret = __ricoh583_write(ricoh583->client, RICOH583_INTC_INTEN, 0x0);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Error in writing reg 0x%02x "
+ "error: %d\n", RICOH583_INTC_INTEN, ret);
+
+ /* Clear all interrupts in case they woke up active. */
+ for (i = 0; i < 9; i++) {
+ ret = __ricoh583_write(ricoh583->client,
+ ricoh583->irq_clr_add[i], 0);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Error in writing reg 0x%02x "
+ "error: %d\n", ricoh583->irq_clr_add[i], ret);
+ }
+
+ ricoh583->irq_base = irq_base;
+ ricoh583->irq_chip.name = "ricoh583";
+ ricoh583->irq_chip.irq_mask = ricoh583_irq_mask;
+ ricoh583->irq_chip.irq_unmask = ricoh583_irq_unmask;
+ ricoh583->irq_chip.irq_bus_lock = ricoh583_irq_lock;
+ ricoh583->irq_chip.irq_bus_sync_unlock = ricoh583_irq_sync_unlock;
+ ricoh583->irq_chip.irq_set_type = ricoh583_irq_set_type;
+
+ for (i = 0; i < RICOH583_NR_IRQS; i++) {
+ int __irq = i + ricoh583->irq_base;
+ irq_set_chip_data(__irq, ricoh583);
+ irq_set_chip_and_handler(__irq, &ricoh583->irq_chip,
+ handle_simple_irq);
+ irq_set_nested_thread(__irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(__irq, IRQF_VALID);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, ricoh583_irq, IRQF_ONESHOT,
+ "ricoh583", ricoh583);
+ if (ret < 0)
+ dev_err(ricoh583->dev, "Error in registering interrupt "
+ "error: %d\n", ret);
+ if (!ret) {
+ device_init_wakeup(ricoh583->dev, 1);
+ enable_irq_wake(irq);
+ }
+ return ret;
+}
+
+static int ricoh583_remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int ricoh583_remove_subdevs(struct ricoh583 *ricoh583)
+{
+ return device_for_each_child(ricoh583->dev, NULL,
+ ricoh583_remove_subdev);
+}
+
+static int __devinit ricoh583_add_subdevs(struct ricoh583 *ricoh583,
+ struct ricoh583_platform_data *pdata)
+{
+ struct ricoh583_subdev_info *subdev;
+ struct platform_device *pdev;
+ int i, ret = 0;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ subdev = &pdata->subdevs[i];
+
+ pdev = platform_device_alloc(subdev->name, subdev->id);
+
+ pdev->dev.parent = ricoh583->dev;
+ pdev->dev.platform_data = subdev->platform_data;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto failed;
+ }
+ return 0;
+
+failed:
+ ricoh583_remove_subdevs(ricoh583);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+static void print_regs(const char *header, struct seq_file *s,
+ struct i2c_client *client, int start_offset,
+ int end_offset)
+{
+ uint8_t reg_val;
+ int i;
+ int ret;
+
+ seq_printf(s, "%s\n", header);
+ for (i = start_offset; i <= end_offset; ++i) {
+ ret = __ricoh583_read(client, i, &reg_val);
+ if (ret >= 0)
+ seq_printf(s, "Reg 0x%02x Value 0x%02x\n", i, reg_val);
+ }
+ seq_printf(s, "------------------\n");
+}
+
+static int dbg_tps_show(struct seq_file *s, void *unused)
+{
+ struct ricoh583 *tps = s->private;
+ struct i2c_client *client = tps->client;
+
+ seq_printf(s, "RICOH583 Registers\n");
+ seq_printf(s, "------------------\n");
+
+ print_regs("System Regs", s, client, 0x0, 0xF);
+ print_regs("Power Control Regs", s, client, 0x10, 0x2B);
+ print_regs("DCDC1 Regs", s, client, 0x30, 0x43);
+ print_regs("DCDC1 Regs", s, client, 0x60, 0x63);
+ print_regs("LDO Regs", s, client, 0x50, 0x5F);
+ print_regs("LDO Regs", s, client, 0x64, 0x6D);
+ print_regs("ADC Regs", s, client, 0x70, 0x72);
+ print_regs("ADC Regs", s, client, 0x74, 0x8B);
+ print_regs("ADC Regs", s, client, 0x90, 0x96);
+ print_regs("GPIO Regs", s, client, 0xA0, 0xAC);
+ print_regs("INTC Regs", s, client, 0xAD, 0xAF);
+ print_regs("RTC Regs", s, client, 0xE0, 0xEE);
+ print_regs("RTC Regs", s, client, 0xF0, 0xF4);
+ return 0;
+}
+
+static int dbg_tps_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_tps_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = dbg_tps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static void __init ricoh583_debuginit(struct ricoh583 *tps)
+{
+ (void)debugfs_create_file("ricoh583", S_IRUGO, NULL,
+ tps, &debug_fops);
+}
+#else
+static void __init ricoh583_debuginit(struct ricoh583 *tpsi)
+{
+ return;
+}
+#endif
+
+static int ricoh583_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct ricoh583 *ricoh583;
+ struct ricoh583_platform_data *pdata = i2c->dev.platform_data;
+ int ret;
+
+ ricoh583 = kzalloc(sizeof(struct ricoh583), GFP_KERNEL);
+ if (ricoh583 == NULL)
+ return -ENOMEM;
+
+ ricoh583->client = i2c;
+ ricoh583->dev = &i2c->dev;
+ i2c_set_clientdata(i2c, ricoh583);
+
+ mutex_init(&ricoh583->io_lock);
+
+ ret = ricoh583_ext_power_init(ricoh583, pdata);
+ if (ret < 0)
+ goto err_irq_init;
+
+ if (i2c->irq) {
+ ret = ricoh583_irq_init(ricoh583, i2c->irq, pdata->irq_base);
+ if (ret) {
+ dev_err(&i2c->dev, "IRQ init failed: %d\n", ret);
+ goto err_irq_init;
+ }
+ }
+
+ ret = ricoh583_add_subdevs(ricoh583, pdata);
+ if (ret) {
+ dev_err(&i2c->dev, "add devices failed: %d\n", ret);
+ goto err_add_devs;
+ }
+
+ ricoh583_gpio_init(ricoh583, pdata);
+
+ ricoh583_debuginit(ricoh583);
+
+ ricoh583_i2c_client = i2c;
+ return 0;
+
+err_add_devs:
+ if (i2c->irq)
+ free_irq(i2c->irq, ricoh583);
+err_irq_init:
+ kfree(ricoh583);
+ return ret;
+}
+
+static int __devexit ricoh583_i2c_remove(struct i2c_client *i2c)
+{
+ struct ricoh583 *ricoh583 = i2c_get_clientdata(i2c);
+
+ if (i2c->irq)
+ free_irq(i2c->irq, ricoh583);
+
+ ricoh583_remove_subdevs(ricoh583);
+ kfree(ricoh583);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ricoh583_i2c_suspend(struct i2c_client *i2c, pm_message_t state)
+{
+ if (i2c->irq)
+ disable_irq(i2c->irq);
+ return 0;
+}
+
+
+static int ricoh583_i2c_resume(struct i2c_client *i2c)
+{
+ if (i2c->irq)
+ enable_irq(i2c->irq);
+ return 0;
+}
+
+#endif
+
+static const struct i2c_device_id ricoh583_i2c_id[] = {
+ {"ricoh583", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ricoh583_i2c_id);
+
+static struct i2c_driver ricoh583_i2c_driver = {
+ .driver = {
+ .name = "ricoh583",
+ .owner = THIS_MODULE,
+ },
+ .probe = ricoh583_i2c_probe,
+ .remove = __devexit_p(ricoh583_i2c_remove),
+#ifdef CONFIG_PM
+ .suspend = ricoh583_i2c_suspend,
+ .resume = ricoh583_i2c_resume,
+#endif
+ .id_table = ricoh583_i2c_id,
+};
+
+
+static int __init ricoh583_i2c_init(void)
+{
+ int ret = -ENODEV;
+ ret = i2c_add_driver(&ricoh583_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register I2C driver: %d\n", ret);
+
+ return ret;
+}
+
+subsys_initcall(ricoh583_i2c_init);
+
+static void __exit ricoh583_i2c_exit(void)
+{
+ i2c_del_driver(&ricoh583_i2c_driver);
+}
+
+module_exit(ricoh583_i2c_exit);
+
+MODULE_DESCRIPTION("RICOH583 multi-function core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index bba26d96c240..6d842fa20693 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -27,6 +27,10 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps6586x.h>
+#define TPS6586X_SUPPLYENE 0x14
+#define EXITSLREQ_BIT BIT(1) /* Exit sleep mode request */
+#define SLEEP_MODE_BIT BIT(3) /* Sleep mode */
+
/* GPIO control registers */
#define TPS6586X_GPIOSET1 0x5d
#define TPS6586X_GPIOSET2 0x5e
@@ -251,6 +255,28 @@ out:
}
EXPORT_SYMBOL_GPL(tps6586x_update);
+static struct i2c_client *tps6586x_i2c_client = NULL;
+int tps6586x_power_off(void)
+{
+ struct device *dev = NULL;
+ int ret = -EINVAL;
+
+ if (!tps6586x_i2c_client)
+ return ret;
+
+ dev = &tps6586x_i2c_client->dev;
+
+ ret = tps6586x_clr_bits(dev, TPS6586X_SUPPLYENE, EXITSLREQ_BIT);
+ if (ret)
+ return ret;
+
+ ret = tps6586x_set_bits(dev, TPS6586X_SUPPLYENE, SLEEP_MODE_BIT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset)
{
struct tps6586x *tps6586x = container_of(gc, struct tps6586x, gpio);
@@ -274,13 +300,24 @@ static void tps6586x_gpio_set(struct gpio_chip *chip, unsigned offset,
value << offset, 1 << offset);
}
+static int tps6586x_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ /* FIXME: add handling of GPIOs as dedicated inputs */
+ return -ENOSYS;
+}
+
static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
struct tps6586x *tps6586x = container_of(gc, struct tps6586x, gpio);
uint8_t val, mask;
+ int ret;
- tps6586x_gpio_set(gc, offset, value);
+ val = value << offset;
+ mask = 0x1 << offset;
+ ret = tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET2, val, mask);
+ if (ret)
+ return ret;
val = 0x1 << (offset * 2);
mask = 0x3 << (offset * 2);
@@ -300,7 +337,7 @@ static int tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
tps6586x->gpio.ngpio = 4;
tps6586x->gpio.can_sleep = 1;
- /* FIXME: add handling of GPIOs as dedicated inputs */
+ tps6586x->gpio.direction_input = tps6586x_gpio_input;
tps6586x->gpio.direction_output = tps6586x_gpio_output;
tps6586x->gpio.set = tps6586x_gpio_set;
tps6586x->gpio.get = tps6586x_gpio_get;
@@ -525,6 +562,8 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
goto err_add_devs;
}
+ tps6586x_i2c_client = client;
+
return 0;
err_add_devs:
diff --git a/drivers/mfd/tps6591x.c b/drivers/mfd/tps6591x.c
new file mode 100644
index 000000000000..8ce44244e9ab
--- /dev/null
+++ b/drivers/mfd/tps6591x.c
@@ -0,0 +1,919 @@
+/*
+ * driver/mfd/tps6591x.c
+ *
+ * Core driver for TI TPS6591x PMIC family
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6591x.h>
+
+/* device control registers */
+#define TPS6591X_DEVCTRL 0x3F
+#define DEVCTRL_PWR_OFF_SEQ (1 << 7)
+#define DEVCTRL_DEV_ON (1 << 2)
+#define DEVCTRL_DEV_SLP (1 << 1)
+#define TPS6591X_DEVCTRL2 0x40
+
+/* device sleep on registers */
+#define TPS6591X_SLEEP_KEEP_ON 0x42
+#define SLEEP_KEEP_ON_THERM (1 << 7)
+#define SLEEP_KEEP_ON_CLKOUT32K (1 << 6)
+#define SLEEP_KEEP_ON_VRTC (1 << 5)
+#define SLEEP_KEEP_ON_I2CHS (1 << 4)
+
+/* interrupt status registers */
+#define TPS6591X_INT_STS 0x50
+#define TPS6591X_INT_STS2 0x52
+#define TPS6591X_INT_STS3 0x54
+
+/* interrupt mask registers */
+#define TPS6591X_INT_MSK 0x51
+#define TPS6591X_INT_MSK2 0x53
+#define TPS6591X_INT_MSK3 0x55
+
+/* GPIO register base address */
+#define TPS6591X_GPIO_BASE_ADDR 0x60
+
+/* silicon version number */
+#define TPS6591X_VERNUM 0x80
+
+#define TPS6591X_GPIO_SLEEP 7
+#define TPS6591X_GPIO_PDEN 3
+#define TPS6591X_GPIO_DIR 2
+
+enum irq_type {
+ EVENT,
+ GPIO,
+};
+
+struct tps6591x_irq_data {
+ u8 mask_reg;
+ u8 mask_pos;
+ enum irq_type type;
+};
+
+#define TPS6591X_IRQ(_reg, _mask_pos, _type) \
+ { \
+ .mask_reg = (_reg), \
+ .mask_pos = (_mask_pos), \
+ .type = (_type), \
+ }
+
+static const struct tps6591x_irq_data tps6591x_irqs[] = {
+ [TPS6591X_INT_PWRHOLD_F] = TPS6591X_IRQ(0, 0, EVENT),
+ [TPS6591X_INT_VMBHI] = TPS6591X_IRQ(0, 1, EVENT),
+ [TPS6591X_INT_PWRON] = TPS6591X_IRQ(0, 2, EVENT),
+ [TPS6591X_INT_PWRON_LP] = TPS6591X_IRQ(0, 3, EVENT),
+ [TPS6591X_INT_PWRHOLD_R] = TPS6591X_IRQ(0, 4, EVENT),
+ [TPS6591X_INT_HOTDIE] = TPS6591X_IRQ(0, 5, EVENT),
+ [TPS6591X_INT_RTC_ALARM] = TPS6591X_IRQ(0, 6, EVENT),
+ [TPS6591X_INT_RTC_PERIOD] = TPS6591X_IRQ(0, 7, EVENT),
+ [TPS6591X_INT_GPIO0] = TPS6591X_IRQ(1, 0, GPIO),
+ [TPS6591X_INT_GPIO1] = TPS6591X_IRQ(1, 2, GPIO),
+ [TPS6591X_INT_GPIO2] = TPS6591X_IRQ(1, 4, GPIO),
+ [TPS6591X_INT_GPIO3] = TPS6591X_IRQ(1, 6, GPIO),
+ [TPS6591X_INT_GPIO4] = TPS6591X_IRQ(2, 0, GPIO),
+ [TPS6591X_INT_GPIO5] = TPS6591X_IRQ(2, 2, GPIO),
+ [TPS6591X_INT_WTCHDG] = TPS6591X_IRQ(2, 4, EVENT),
+ [TPS6591X_INT_VMBCH2_H] = TPS6591X_IRQ(2, 5, EVENT),
+ [TPS6591X_INT_VMBCH2_L] = TPS6591X_IRQ(2, 6, EVENT),
+ [TPS6591X_INT_PWRDN] = TPS6591X_IRQ(2, 7, EVENT),
+};
+
+struct tps6591x {
+ struct mutex lock;
+ struct device *dev;
+ struct i2c_client *client;
+
+ struct gpio_chip gpio;
+ struct irq_chip irq_chip;
+ struct mutex irq_lock;
+ int irq_base;
+ u32 irq_en;
+ u8 mask_cache[3];
+ u8 mask_reg[3];
+};
+
+static inline int __tps6591x_read(struct i2c_client *client,
+ int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+
+ return 0;
+}
+
+static inline int __tps6591x_reads(struct i2c_client *client, int reg,
+ int len, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg, len, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading from 0x%02x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __tps6591x_write(struct i2c_client *client,
+ int reg, uint8_t val)
+{
+ int ret;
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n",
+ val, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __tps6591x_writes(struct i2c_client *client, int reg,
+ int len, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+int tps6591x_write(struct device *dev, int reg, uint8_t val)
+{
+ struct tps6591x *tps6591x = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&tps6591x->lock);
+ ret = __tps6591x_write(to_i2c_client(dev), reg, val);
+ mutex_unlock(&tps6591x->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps6591x_write);
+
+int tps6591x_writes(struct device *dev, int reg, int len, uint8_t *val)
+{
+ struct tps6591x *tps6591x = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&tps6591x->lock);
+ ret = __tps6591x_writes(to_i2c_client(dev), reg, len, val);
+ mutex_unlock(&tps6591x->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps6591x_writes);
+
+int tps6591x_read(struct device *dev, int reg, uint8_t *val)
+{
+ return __tps6591x_read(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(tps6591x_read);
+
+int tps6591x_reads(struct device *dev, int reg, int len, uint8_t *val)
+{
+ return __tps6591x_reads(to_i2c_client(dev), reg, len, val);
+}
+EXPORT_SYMBOL_GPL(tps6591x_reads);
+
+int tps6591x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+ struct tps6591x *tps6591x = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps6591x->lock);
+
+ ret = __tps6591x_read(to_i2c_client(dev), reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if ((reg_val & bit_mask) != bit_mask) {
+ reg_val |= bit_mask;
+ ret = __tps6591x_write(to_i2c_client(dev), reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps6591x->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps6591x_set_bits);
+
+int tps6591x_clr_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+ struct tps6591x *tps6591x = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps6591x->lock);
+
+ ret = __tps6591x_read(to_i2c_client(dev), reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if (reg_val & bit_mask) {
+ reg_val &= ~bit_mask;
+ ret = __tps6591x_write(to_i2c_client(dev), reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps6591x->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps6591x_clr_bits);
+
+int tps6591x_update(struct device *dev, int reg, uint8_t val, uint8_t mask)
+{
+ struct tps6591x *tps6591x = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps6591x->lock);
+
+ ret = __tps6591x_read(tps6591x->client, reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if ((reg_val & mask) != val) {
+ reg_val = (reg_val & ~mask) | (val & mask);
+ ret = __tps6591x_write(tps6591x->client, reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps6591x->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps6591x_update);
+
+static struct i2c_client *tps6591x_i2c_client;
+int tps6591x_power_off(void)
+{
+ struct device *dev = NULL;
+ int ret;
+
+ if (!tps6591x_i2c_client)
+ return -EINVAL;
+
+ dev = &tps6591x_i2c_client->dev;
+
+ ret = tps6591x_set_bits(dev, TPS6591X_DEVCTRL, DEVCTRL_PWR_OFF_SEQ);
+ if (ret < 0)
+ return ret;
+
+ ret = tps6591x_clr_bits(dev, TPS6591X_DEVCTRL, DEVCTRL_DEV_ON);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tps6591x_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps6591x *tps6591x = container_of(gc, struct tps6591x, gpio);
+ uint8_t val;
+ int ret;
+
+ ret = __tps6591x_read(tps6591x->client, TPS6591X_GPIO_BASE_ADDR +
+ offset, &val);
+ if (ret)
+ return ret;
+
+ if (val & 0x4)
+ return val & 0x1;
+ else
+ return (val & 0x2) ? 1 : 0;
+}
+
+static void tps6591x_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+
+ struct tps6591x *tps6591x = container_of(chip, struct tps6591x, gpio);
+
+ tps6591x_update(tps6591x->dev, TPS6591X_GPIO_BASE_ADDR + offset,
+ value, 0x1);
+}
+
+static int tps6591x_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps6591x *tps6591x = container_of(gc, struct tps6591x, gpio);
+ uint8_t reg_val;
+ int ret;
+
+ ret = __tps6591x_read(tps6591x->client, TPS6591X_GPIO_BASE_ADDR +
+ offset, &reg_val);
+ if (ret)
+ return ret;
+
+ reg_val &= ~0x4;
+ return __tps6591x_write(tps6591x->client, TPS6591X_GPIO_BASE_ADDR +
+ offset, reg_val);
+}
+
+static int tps6591x_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps6591x *tps6591x = container_of(gc, struct tps6591x, gpio);
+ uint8_t reg_val, val;
+ int ret;
+
+ ret = __tps6591x_read(tps6591x->client, TPS6591X_GPIO_BASE_ADDR +
+ offset, &reg_val);
+ if (ret)
+ return ret;
+
+ reg_val &= ~0x1;
+ val = (value & 0x1) | 0x4;
+ reg_val = reg_val | val;
+ return __tps6591x_write(tps6591x->client, TPS6591X_GPIO_BASE_ADDR +
+ offset, reg_val);
+}
+
+static int tps6591x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
+{
+ struct tps6591x *tps6591x;
+ tps6591x = container_of(gc, struct tps6591x, gpio);
+
+ if ((off >= 0) && (off <= TPS6591X_INT_GPIO5 - TPS6591X_INT_GPIO0))
+ return tps6591x->irq_base + TPS6591X_INT_GPIO0 + off;
+
+ return -EIO;
+}
+
+static void tps6591x_gpio_init(struct tps6591x *tps6591x,
+ struct tps6591x_platform_data *pdata)
+{
+ int ret;
+ int gpio_base = pdata->gpio_base;
+ int i;
+ u8 gpio_reg;
+ struct tps6591x_gpio_init_data *ginit;
+
+ if (gpio_base <= 0)
+ return;
+
+ for (i = 0; i < pdata->num_gpioinit_data; ++i) {
+ ginit = &pdata->gpio_init_data[i];
+ if (!ginit->init_apply)
+ continue;
+ gpio_reg = (ginit->sleep_en << TPS6591X_GPIO_SLEEP) |
+ (ginit->pulldn_en << TPS6591X_GPIO_PDEN) |
+ (ginit->output_mode_en << TPS6591X_GPIO_DIR);
+
+ if (ginit->output_mode_en)
+ gpio_reg |= ginit->output_val;
+
+ ret = __tps6591x_write(tps6591x->client,
+ TPS6591X_GPIO_BASE_ADDR + i, gpio_reg);
+ if (ret < 0)
+ dev_err(&tps6591x->client->dev, "Gpio %d init "
+ "configuration failed: %d\n", i, ret);
+ }
+
+ tps6591x->gpio.owner = THIS_MODULE;
+ tps6591x->gpio.label = tps6591x->client->name;
+ tps6591x->gpio.dev = tps6591x->dev;
+ tps6591x->gpio.base = gpio_base;
+ tps6591x->gpio.ngpio = TPS6591X_GPIO_NR;
+ tps6591x->gpio.can_sleep = 1;
+
+ tps6591x->gpio.direction_input = tps6591x_gpio_input;
+ tps6591x->gpio.direction_output = tps6591x_gpio_output;
+ tps6591x->gpio.set = tps6591x_gpio_set;
+ tps6591x->gpio.get = tps6591x_gpio_get;
+ tps6591x->gpio.to_irq = tps6591x_gpio_to_irq;
+
+ ret = gpiochip_add(&tps6591x->gpio);
+ if (ret)
+ dev_warn(tps6591x->dev, "GPIO registration failed: %d\n", ret);
+}
+
+static int __remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int tps6591x_remove_subdevs(struct tps6591x *tps6591x)
+{
+ return device_for_each_child(tps6591x->dev, NULL, __remove_subdev);
+}
+
+static void tps6591x_irq_lock(struct irq_data *data)
+{
+ struct tps6591x *tps6591x = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&tps6591x->irq_lock);
+}
+
+static void tps6591x_irq_mask(struct irq_data *irq_data)
+{
+ struct tps6591x *tps6591x = irq_data_get_irq_chip_data(irq_data);
+ unsigned int __irq = irq_data->irq - tps6591x->irq_base;
+ const struct tps6591x_irq_data *data = &tps6591x_irqs[__irq];
+
+ if (data->type == EVENT)
+ tps6591x->mask_reg[data->mask_reg] |= (1 << data->mask_pos);
+ else
+ tps6591x->mask_reg[data->mask_reg] |= (3 << data->mask_pos);
+
+ tps6591x->irq_en &= ~(1 << __irq);
+}
+
+static void tps6591x_irq_unmask(struct irq_data *irq_data)
+{
+ struct tps6591x *tps6591x = irq_data_get_irq_chip_data(irq_data);
+
+ unsigned int __irq = irq_data->irq - tps6591x->irq_base;
+ const struct tps6591x_irq_data *data = &tps6591x_irqs[__irq];
+
+ if (data->type == EVENT) {
+ tps6591x->mask_reg[data->mask_reg] &= ~(1 << data->mask_pos);
+ tps6591x->irq_en |= (1 << __irq);
+ }
+}
+
+static void tps6591x_irq_sync_unlock(struct irq_data *data)
+{
+ struct tps6591x *tps6591x = irq_data_get_irq_chip_data(data);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6591x->mask_reg); i++) {
+ if (tps6591x->mask_reg[i] != tps6591x->mask_cache[i]) {
+ if (!WARN_ON(tps6591x_write(tps6591x->dev,
+ TPS6591X_INT_MSK + 2*i,
+ tps6591x->mask_reg[i])))
+ tps6591x->mask_cache[i] = tps6591x->mask_reg[i];
+ }
+ }
+
+ mutex_unlock(&tps6591x->irq_lock);
+}
+
+static int tps6591x_irq_set_type(struct irq_data *irq_data, unsigned int type)
+{
+ struct tps6591x *tps6591x = irq_data_get_irq_chip_data(irq_data);
+
+ unsigned int __irq = irq_data->irq - tps6591x->irq_base;
+ const struct tps6591x_irq_data *data = &tps6591x_irqs[__irq];
+
+ if (data->type == GPIO) {
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ tps6591x->mask_reg[data->mask_reg]
+ &= ~(1 << data->mask_pos);
+ else
+ tps6591x->mask_reg[data->mask_reg]
+ |= (1 << data->mask_pos);
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ tps6591x->mask_reg[data->mask_reg]
+ &= ~(2 << data->mask_pos);
+ else
+ tps6591x->mask_reg[data->mask_reg]
+ |= (2 << data->mask_pos);
+
+ tps6591x->irq_en |= (1 << __irq);
+ }
+
+ return 0;
+}
+
+static irqreturn_t tps6591x_irq(int irq, void *data)
+{
+ struct tps6591x *tps6591x = data;
+ int ret = 0;
+ u8 tmp[3];
+ u8 int_ack;
+ u32 acks, mask = 0;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ret = tps6591x_read(tps6591x->dev, TPS6591X_INT_STS + 2*i,
+ &tmp[i]);
+ if (ret < 0) {
+ dev_err(tps6591x->dev,
+ "failed to read interrupt status\n");
+ return IRQ_NONE;
+ }
+ if (tmp[i]) {
+ /* Ack only those interrupts which are enabled */
+ int_ack = tmp[i] & (~(tps6591x->mask_cache[i]));
+ ret = tps6591x_write(tps6591x->dev,
+ TPS6591X_INT_STS + 2*i, int_ack);
+ if (ret < 0) {
+ dev_err(tps6591x->dev,
+ "failed to write interrupt status\n");
+ return IRQ_NONE;
+ }
+ }
+ }
+
+ acks = (tmp[2] << 16) | (tmp[1] << 8) | tmp[0];
+
+ for (i = 0; i < ARRAY_SIZE(tps6591x_irqs); i++) {
+ if (tps6591x_irqs[i].type == GPIO)
+ mask = (3 << (tps6591x_irqs[i].mask_pos
+ + tps6591x_irqs[i].mask_reg*8));
+ else if (tps6591x_irqs[i].type == EVENT)
+ mask = (1 << (tps6591x_irqs[i].mask_pos
+ + tps6591x_irqs[i].mask_reg*8));
+
+ if ((acks & mask) && (tps6591x->irq_en & (1 << i)))
+ handle_nested_irq(tps6591x->irq_base + i);
+ }
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps6591x_irq_init(struct tps6591x *tps6591x, int irq,
+ int irq_base)
+{
+ int i, ret;
+
+ if (!irq_base) {
+ dev_warn(tps6591x->dev, "No interrupt support on IRQ base\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&tps6591x->irq_lock);
+
+ tps6591x->mask_reg[0] = 0xFF;
+ tps6591x->mask_reg[1] = 0xFF;
+ tps6591x->mask_reg[2] = 0xFF;
+ for (i = 0; i < 3; i++) {
+ tps6591x->mask_cache[i] = tps6591x->mask_reg[i];
+ tps6591x_write(tps6591x->dev, TPS6591X_INT_MSK + 2*i,
+ tps6591x->mask_cache[i]);
+ }
+
+ for (i = 0; i < 3; i++)
+ tps6591x_write(tps6591x->dev, TPS6591X_INT_STS + 2*i, 0xff);
+
+ tps6591x->irq_base = irq_base;
+
+ tps6591x->irq_chip.name = "tps6591x";
+ tps6591x->irq_chip.irq_mask = tps6591x_irq_mask;
+ tps6591x->irq_chip.irq_unmask = tps6591x_irq_unmask;
+ tps6591x->irq_chip.irq_bus_lock = tps6591x_irq_lock;
+ tps6591x->irq_chip.irq_bus_sync_unlock = tps6591x_irq_sync_unlock;
+ tps6591x->irq_chip.irq_set_type = tps6591x_irq_set_type;
+
+ for (i = 0; i < ARRAY_SIZE(tps6591x_irqs); i++) {
+ int __irq = i + tps6591x->irq_base;
+ irq_set_chip_data(__irq, tps6591x);
+ irq_set_chip_and_handler(__irq, &tps6591x->irq_chip,
+ handle_simple_irq);
+ irq_set_nested_thread(__irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(__irq, IRQF_VALID);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps6591x_irq, IRQF_ONESHOT,
+ "tps6591x", tps6591x);
+ if (!ret) {
+ device_init_wakeup(tps6591x->dev, 1);
+ enable_irq_wake(irq);
+ }
+
+ return ret;
+}
+
+static int __devinit tps6591x_add_subdevs(struct tps6591x *tps6591x,
+ struct tps6591x_platform_data *pdata)
+{
+ struct tps6591x_subdev_info *subdev;
+ struct platform_device *pdev;
+ int i, ret = 0;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ subdev = &pdata->subdevs[i];
+
+ pdev = platform_device_alloc(subdev->name, subdev->id);
+
+ pdev->dev.parent = tps6591x->dev;
+ pdev->dev.platform_data = subdev->platform_data;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto failed;
+ }
+ return 0;
+
+failed:
+ tps6591x_remove_subdevs(tps6591x);
+ return ret;
+}
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+static void print_regs(const char *header, struct seq_file *s,
+ struct i2c_client *client, int start_offset,
+ int end_offset)
+{
+ uint8_t reg_val;
+ int i;
+ int ret;
+
+ seq_printf(s, "%s\n", header);
+ for (i = start_offset; i <= end_offset; ++i) {
+ ret = __tps6591x_read(client, i, &reg_val);
+ if (ret >= 0)
+ seq_printf(s, "Reg 0x%02x Value 0x%02x\n", i, reg_val);
+ }
+ seq_printf(s, "------------------\n");
+}
+
+static int dbg_tps_show(struct seq_file *s, void *unused)
+{
+ struct tps6591x *tps = s->private;
+ struct i2c_client *client = tps->client;
+
+ seq_printf(s, "TPS6591x Registers\n");
+ seq_printf(s, "------------------\n");
+
+ print_regs("Timing Regs", s, client, 0x0, 0x6);
+ print_regs("Alarm Regs", s, client, 0x8, 0xD);
+ print_regs("RTC Regs", s, client, 0x10, 0x16);
+ print_regs("BCK Regs", s, client, 0x17, 0x1B);
+ print_regs("PUADEN Regs", s, client, 0x18, 0x18);
+ print_regs("REF Regs", s, client, 0x1D, 0x1D);
+ print_regs("VDD Regs", s, client, 0x1E, 0x29);
+ print_regs("LDO Regs", s, client, 0x30, 0x37);
+ print_regs("THERM Regs", s, client, 0x38, 0x38);
+ print_regs("BBCH Regs", s, client, 0x39, 0x39);
+ print_regs("DCDCCNTRL Regs", s, client, 0x3E, 0x3E);
+ print_regs("DEV_CNTRL Regs", s, client, 0x3F, 0x40);
+ print_regs("SLEEP Regs", s, client, 0x41, 0x44);
+ print_regs("EN1 Regs", s, client, 0x45, 0x48);
+ print_regs("INT Regs", s, client, 0x50, 0x55);
+ print_regs("GPIO Regs", s, client, 0x60, 0x68);
+ print_regs("WATCHDOG Regs", s, client, 0x69, 0x69);
+ print_regs("VMBCH Regs", s, client, 0x6A, 0x6B);
+ print_regs("LED_CTRL Regs", s, client, 0x6c, 0x6D);
+ print_regs("PWM_CTRL Regs", s, client, 0x6E, 0x6F);
+ print_regs("SPARE Regs", s, client, 0x70, 0x70);
+ print_regs("VERNUM Regs", s, client, 0x80, 0x80);
+ return 0;
+}
+
+static int dbg_tps_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_tps_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = dbg_tps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __init tps6591x_debuginit(struct tps6591x *tps)
+{
+ (void)debugfs_create_file("tps6591x", S_IRUGO, NULL,
+ tps, &debug_fops);
+}
+#else
+static void __init tps6591x_debuginit(struct tps6591x *tpsi)
+{
+ return;
+}
+#endif
+
+static int __init tps6591x_sleepinit(struct tps6591x *tpsi,
+ struct tps6591x_platform_data *pdata)
+{
+ struct device *dev = NULL;
+ int ret = 0;
+
+ dev = tpsi->dev;
+
+ if (!pdata->dev_slp_en)
+ goto no_err_return;
+
+ /* pmu dev_slp_en is set. Make sure slp_keepon is available before
+ * allowing SLEEP device state */
+ if (!pdata->slp_keepon) {
+ dev_err(dev, "slp_keepon_data required for slp_en\n");
+ goto err_sleep_init;
+ }
+
+ /* enabling SLEEP device state */
+ ret = tps6591x_set_bits(dev, TPS6591X_DEVCTRL, DEVCTRL_DEV_SLP);
+ if (ret < 0) {
+ dev_err(dev, "set dev_slp failed: %d\n", ret);
+ goto err_sleep_init;
+ }
+
+ if (pdata->slp_keepon->therm_keepon) {
+ ret = tps6591x_set_bits(dev, TPS6591X_SLEEP_KEEP_ON,
+ SLEEP_KEEP_ON_THERM);
+ if (ret < 0) {
+ dev_err(dev, "set therm_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ if (pdata->slp_keepon->clkout32k_keepon) {
+ ret = tps6591x_set_bits(dev, TPS6591X_SLEEP_KEEP_ON,
+ SLEEP_KEEP_ON_CLKOUT32K);
+ if (ret < 0) {
+ dev_err(dev, "set clkout32k_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+
+ if (pdata->slp_keepon->vrtc_keepon) {
+ ret = tps6591x_set_bits(dev, TPS6591X_SLEEP_KEEP_ON,
+ SLEEP_KEEP_ON_VRTC);
+ if (ret < 0) {
+ dev_err(dev, "set vrtc_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ if (pdata->slp_keepon->i2chs_keepon) {
+ ret = tps6591x_set_bits(dev, TPS6591X_SLEEP_KEEP_ON,
+ SLEEP_KEEP_ON_I2CHS);
+ if (ret < 0) {
+ dev_err(dev, "set i2chs_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+no_err_return:
+ return 0;
+
+disable_dev_slp:
+ tps6591x_clr_bits(dev, TPS6591X_DEVCTRL, DEVCTRL_DEV_SLP);
+
+err_sleep_init:
+ return ret;
+}
+
+static int __devinit tps6591x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps6591x_platform_data *pdata = client->dev.platform_data;
+ struct tps6591x *tps6591x;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&client->dev, "tps6591x requires platform data\n");
+ return -ENOTSUPP;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, TPS6591X_VERNUM);
+ if (ret < 0) {
+ dev_err(&client->dev, "Silicon version number read"
+ " failed: %d\n", ret);
+ return -EIO;
+ }
+
+ dev_info(&client->dev, "VERNUM is %02x\n", ret);
+
+ tps6591x = kzalloc(sizeof(struct tps6591x), GFP_KERNEL);
+ if (tps6591x == NULL)
+ return -ENOMEM;
+
+ tps6591x->client = client;
+ tps6591x->dev = &client->dev;
+ i2c_set_clientdata(client, tps6591x);
+
+ mutex_init(&tps6591x->lock);
+
+ if (client->irq) {
+ ret = tps6591x_irq_init(tps6591x, client->irq,
+ pdata->irq_base);
+ if (ret) {
+ dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+ goto err_irq_init;
+ }
+ }
+
+ ret = tps6591x_add_subdevs(tps6591x, pdata);
+ if (ret) {
+ dev_err(&client->dev, "add devices failed: %d\n", ret);
+ goto err_add_devs;
+ }
+
+ tps6591x_gpio_init(tps6591x, pdata);
+
+ tps6591x_debuginit(tps6591x);
+
+ tps6591x_sleepinit(tps6591x, pdata);
+
+ tps6591x_i2c_client = client;
+
+ return 0;
+
+err_add_devs:
+ if (client->irq)
+ free_irq(client->irq, tps6591x);
+err_irq_init:
+ kfree(tps6591x);
+ return ret;
+}
+
+static int __devexit tps6591x_i2c_remove(struct i2c_client *client)
+{
+ struct tps6591x *tps6591x = i2c_get_clientdata(client);
+
+ if (client->irq)
+ free_irq(client->irq, tps6591x);
+
+ if (gpiochip_remove(&tps6591x->gpio) < 0)
+ dev_err(&client->dev, "Error in removing the gpio driver\n");
+
+ kfree(tps6591x);
+ return 0;
+}
+#ifdef CONFIG_PM
+static int tps6591x_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+ if (client->irq)
+ disable_irq(client->irq);
+ return 0;
+}
+
+static int tps6591x_i2c_resume(struct i2c_client *client)
+{
+ if (client->irq)
+ enable_irq(client->irq);
+ return 0;
+}
+#endif
+
+
+static const struct i2c_device_id tps6591x_id_table[] = {
+ { "tps6591x", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, tps6591x_id_table);
+
+static struct i2c_driver tps6591x_driver = {
+ .driver = {
+ .name = "tps6591x",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6591x_i2c_probe,
+ .remove = __devexit_p(tps6591x_i2c_remove),
+#ifdef CONFIG_PM
+ .suspend = tps6591x_i2c_suspend,
+ .resume = tps6591x_i2c_resume,
+#endif
+ .id_table = tps6591x_id_table,
+};
+
+static int __init tps6591x_init(void)
+{
+ return i2c_add_driver(&tps6591x_driver);
+}
+subsys_initcall(tps6591x_init);
+
+static void __exit tps6591x_exit(void)
+{
+ i2c_del_driver(&tps6591x_driver);
+}
+module_exit(tps6591x_exit);
+
+MODULE_DESCRIPTION("TPS6591X core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
new file mode 100644
index 000000000000..e6bfd475f668
--- /dev/null
+++ b/drivers/mfd/tps80031.c
@@ -0,0 +1,1272 @@
+/*
+ * driver/mfd/tps80031.c
+ *
+ * Core driver for TI TPS80031
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps80031.h>
+
+/* interrupt related registers */
+#define TPS80031_INT_STS_A 0xD0
+#define TPS80031_INT_STS_B 0xD1
+#define TPS80031_INT_STS_C 0xD2
+#define TPS80031_INT_MSK_LINE_A 0xD3
+#define TPS80031_INT_MSK_LINE_B 0xD4
+#define TPS80031_INT_MSK_LINE_C 0xD5
+#define TPS80031_INT_MSK_STS_A 0xD6
+#define TPS80031_INT_MSK_STS_B 0xD7
+#define TPS80031_INT_MSK_STS_C 0xD8
+
+#define TPS80031_CONTROLLER_STAT1 0xE3
+#define CONTROLLER_STAT1_BAT_TEMP 0
+#define CONTROLLER_STAT1_BAT_REMOVED 1
+#define CONTROLLER_STAT1_VBUS_DET 2
+#define CONTROLLER_STAT1_VAC_DET 3
+#define CONTROLLER_STAT1_FAULT_WDG 4
+#define CONTROLLER_STAT1_LINCH_GATED 6
+
+#define TPS80031_CONTROLLER_INT_MASK 0xE0
+#define CONTROLLER_INT_MASK_MVAC_DET 0
+#define CONTROLLER_INT_MASK_MVBUS_DET 1
+#define CONTROLLER_INT_MASK_MBAT_TEMP 2
+#define CONTROLLER_INT_MASK_MFAULT_WDG 3
+#define CONTROLLER_INT_MASK_MBAT_REMOVED 4
+#define CONTROLLER_INT_MASK_MLINCH_GATED 5
+
+#define CHARGE_CONTROL_SUB_INT_MASK 0x3F
+
+/* Version number related register */
+#define TPS80031_JTAGVERNUM 0x87
+/* Epprom version */
+#define TPS80031_EPROM_REV 0xDF
+
+/* External control register */
+#define REGEN1_BASE_ADD 0xAE
+#define REGEN2_BASE_ADD 0xB1
+#define SYSEN_BASE_ADD 0xB4
+
+/* device control registers */
+#define TPS80031_PHOENIX_DEV_ON 0x25
+#define DEVOFF 1
+
+#define CLK32KAO_BASE_ADD 0xBA
+#define CLK32KG_BASE_ADD 0xBD
+#define CLK32KAUDIO_BASE_ADD 0xC0
+
+#define EXT_CONTROL_CFG_TRANS 0
+#define EXT_CONTROL_CFG_STATE 1
+
+#define STATE_OFF 0x00
+#define STATE_ON 0x01
+#define STATE_MASK 0x03
+
+#define TRANS_SLEEP_OFF 0x00
+#define TRANS_SLEEP_ON 0x04
+#define TRANS_SLEEP_MASK 0x0C
+
+#define TPS_NUM_SLAVES 4
+#define EXT_PWR_REQ (PWR_REQ_INPUT_PREQ1 | PWR_REQ_INPUT_PREQ2 | \
+ PWR_REQ_INPUT_PREQ3)
+#define TPS80031_PREQ1_RES_ASS_A 0xD7
+#define TPS80031_PREQ2_RES_ASS_A 0xDA
+#define TPS80031_PREQ3_RES_ASS_A 0xDD
+#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
+
+
+static u8 pmc_ext_control_base[] = {
+ REGEN1_BASE_ADD,
+ REGEN2_BASE_ADD,
+ SYSEN_BASE_ADD,
+};
+
+static u8 pmc_clk32k_control_base[] = {
+ CLK32KAO_BASE_ADD,
+ CLK32KG_BASE_ADD,
+ CLK32KAUDIO_BASE_ADD,
+};
+struct tps80031_irq_data {
+ u8 mask_reg;
+ u8 mask_mask;
+ u8 is_sec_int;
+ u8 parent_int;
+ u8 mask_sec_int_reg;
+ u8 int_mask_bit;
+ u8 int_sec_sts_reg;
+ u8 int_sts_bit;
+};
+
+#define TPS80031_IRQ(_reg, _mask) \
+ { \
+ .mask_reg = (TPS80031_INT_MSK_LINE_##_reg) - \
+ TPS80031_INT_MSK_LINE_A, \
+ .mask_mask = (_mask), \
+ }
+
+#define TPS80031_IRQ_SEC(_reg, _mask, _pint, _sint_mask_bit, _sint_sts_bit) \
+ { \
+ .mask_reg = (TPS80031_INT_MSK_LINE_##_reg) - \
+ TPS80031_INT_MSK_LINE_A, \
+ .mask_mask = (_mask), \
+ .is_sec_int = true, \
+ .parent_int = TPS80031_INT_##_pint, \
+ .mask_sec_int_reg = TPS80031_CONTROLLER_INT_MASK, \
+ .int_mask_bit = CONTROLLER_INT_MASK_##_sint_mask_bit, \
+ .int_sec_sts_reg = TPS80031_CONTROLLER_STAT1, \
+ .int_sts_bit = CONTROLLER_STAT1_##_sint_sts_bit \
+ }
+
+static const struct tps80031_irq_data tps80031_irqs[] = {
+
+ [TPS80031_INT_PWRON] = TPS80031_IRQ(A, 0),
+ [TPS80031_INT_RPWRON] = TPS80031_IRQ(A, 1),
+ [TPS80031_INT_SYS_VLOW] = TPS80031_IRQ(A, 2),
+ [TPS80031_INT_RTC_ALARM] = TPS80031_IRQ(A, 3),
+ [TPS80031_INT_RTC_PERIOD] = TPS80031_IRQ(A, 4),
+ [TPS80031_INT_HOT_DIE] = TPS80031_IRQ(A, 5),
+ [TPS80031_INT_VXX_SHORT] = TPS80031_IRQ(A, 6),
+ [TPS80031_INT_SPDURATION] = TPS80031_IRQ(A, 7),
+ [TPS80031_INT_WATCHDOG] = TPS80031_IRQ(B, 0),
+ [TPS80031_INT_BAT] = TPS80031_IRQ(B, 1),
+ [TPS80031_INT_SIM] = TPS80031_IRQ(B, 2),
+ [TPS80031_INT_MMC] = TPS80031_IRQ(B, 3),
+ [TPS80031_INT_RES] = TPS80031_IRQ(B, 4),
+ [TPS80031_INT_GPADC_RT] = TPS80031_IRQ(B, 5),
+ [TPS80031_INT_GPADC_SW2_EOC] = TPS80031_IRQ(B, 6),
+ [TPS80031_INT_CC_AUTOCAL] = TPS80031_IRQ(B, 7),
+ [TPS80031_INT_ID_WKUP] = TPS80031_IRQ(C, 0),
+ [TPS80031_INT_VBUSS_WKUP] = TPS80031_IRQ(C, 1),
+ [TPS80031_INT_ID] = TPS80031_IRQ(C, 2),
+ [TPS80031_INT_VBUS] = TPS80031_IRQ(C, 3),
+ [TPS80031_INT_CHRG_CTRL] = TPS80031_IRQ(C, 4),
+ [TPS80031_INT_EXT_CHRG] = TPS80031_IRQ(C, 5),
+ [TPS80031_INT_INT_CHRG] = TPS80031_IRQ(C, 6),
+ [TPS80031_INT_RES2] = TPS80031_IRQ(C, 7),
+ [TPS80031_INT_BAT_TEMP_OVRANGE] = TPS80031_IRQ_SEC(C, 4, CHRG_CTRL,
+ MBAT_TEMP, BAT_TEMP),
+ [TPS80031_INT_BAT_REMOVED] = TPS80031_IRQ_SEC(C, 4, CHRG_CTRL,
+ MBAT_REMOVED, BAT_REMOVED),
+ [TPS80031_INT_VBUS_DET] = TPS80031_IRQ_SEC(C, 4, CHRG_CTRL,
+ MVBUS_DET, VBUS_DET),
+ [TPS80031_INT_VAC_DET] = TPS80031_IRQ_SEC(C, 4, CHRG_CTRL,
+ MVAC_DET, VAC_DET),
+ [TPS80031_INT_FAULT_WDG] = TPS80031_IRQ_SEC(C, 4, CHRG_CTRL,
+ MFAULT_WDG, FAULT_WDG),
+ [TPS80031_INT_LINCH_GATED] = TPS80031_IRQ_SEC(C, 4, CHRG_CTRL,
+ MLINCH_GATED, LINCH_GATED),
+};
+
+static const int controller_stat1_irq_nr[] = {
+ TPS80031_INT_BAT_TEMP_OVRANGE,
+ TPS80031_INT_BAT_REMOVED,
+ TPS80031_INT_VBUS_DET,
+ TPS80031_INT_VAC_DET,
+ TPS80031_INT_FAULT_WDG,
+ 0,
+ TPS80031_INT_LINCH_GATED,
+ 0
+};
+
+/* Structure for TPS80031 Slaves */
+struct tps80031_client {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 addr;
+};
+
+struct tps80031 {
+ struct device *dev;
+ unsigned long chip_info;
+ int es_version;
+
+ struct gpio_chip gpio;
+ struct irq_chip irq_chip;
+ struct mutex irq_lock;
+ int irq_base;
+ u32 irq_en;
+ u8 mask_cache[3];
+ u8 mask_reg[3];
+ u8 cont_int_mask_reg;
+ u8 cont_int_mask_cache;
+ u8 cont_int_en;
+ u8 prev_cont_stat1;
+ struct tps80031_client tps_clients[TPS_NUM_SLAVES];
+};
+
+static inline int __tps80031_read(struct i2c_client *client,
+ int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "failed reading from addr 0x%02x, reg 0x%02x\n",
+ client->addr, reg);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+
+ return 0;
+}
+
+static inline int __tps80031_reads(struct i2c_client *client, int reg,
+ int len, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg, len, val);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "failed reading from addr 0x%02x, reg 0x%02x\n",
+ client->addr, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __tps80031_write(struct i2c_client *client,
+ int reg, uint8_t val)
+{
+ int ret;
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "failed writing 0x%02x to 0x%02x\n", val, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __tps80031_writes(struct i2c_client *client, int reg,
+ int len, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+int tps80031_write(struct device *dev, int sid, int reg, uint8_t val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+ int ret;
+
+ mutex_lock(&tps->lock);
+ ret = __tps80031_write(tps->client, reg, val);
+ mutex_unlock(&tps->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_write);
+
+int tps80031_writes(struct device *dev, int sid, int reg, int len, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+ int ret;
+
+ mutex_lock(&tps->lock);
+ ret = __tps80031_writes(tps->client, reg, len, val);
+ mutex_unlock(&tps->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_writes);
+
+int tps80031_read(struct device *dev, int sid, int reg, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+
+ return __tps80031_read(tps->client, reg, val);
+}
+EXPORT_SYMBOL_GPL(tps80031_read);
+
+int tps80031_reads(struct device *dev, int sid, int reg, int len, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+
+ return __tps80031_reads(tps->client, reg, len, val);
+}
+EXPORT_SYMBOL_GPL(tps80031_reads);
+
+int tps80031_set_bits(struct device *dev, int sid, int reg, uint8_t bit_mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps->lock);
+
+ ret = __tps80031_read(tps->client, reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if ((reg_val & bit_mask) != bit_mask) {
+ reg_val |= bit_mask;
+ ret = __tps80031_write(tps->client, reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_set_bits);
+
+int tps80031_clr_bits(struct device *dev, int sid, int reg, uint8_t bit_mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps->lock);
+
+ ret = __tps80031_read(tps->client, reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if (reg_val & bit_mask) {
+ reg_val &= ~bit_mask;
+ ret = __tps80031_write(tps->client, reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_clr_bits);
+
+int tps80031_update(struct device *dev, int sid, int reg, uint8_t val,
+ uint8_t mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps->lock);
+
+ ret = __tps80031_read(tps->client, reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if ((reg_val & mask) != val) {
+ reg_val = (reg_val & ~mask) | (val & mask);
+ ret = __tps80031_write(tps->client, reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_update);
+
+int tps80031_force_update(struct device *dev, int sid, int reg, uint8_t val,
+ uint8_t mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps->lock);
+
+ ret = __tps80031_read(tps->client, reg, &reg_val);
+ if (ret)
+ goto out;
+
+ reg_val = (reg_val & ~mask) | (val & mask);
+ ret = __tps80031_write(tps->client, reg, reg_val);
+
+out:
+ mutex_unlock(&tps->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_force_update);
+
+int tps80031_ext_power_req_config(struct device *dev,
+ unsigned long ext_ctrl_flag, int preq_bit,
+ int state_reg_add, int trans_reg_add)
+{
+ u8 res_ass_reg = 0;
+ int preq_mask_bit = 0;
+ int ret;
+
+ if (!(ext_ctrl_flag & EXT_PWR_REQ))
+ return 0;
+
+ if (ext_ctrl_flag & PWR_REQ_INPUT_PREQ1) {
+ res_ass_reg = TPS80031_PREQ1_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 5;
+ } else if (ext_ctrl_flag & PWR_REQ_INPUT_PREQ2) {
+ res_ass_reg = TPS80031_PREQ2_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 6;
+ } else if (ext_ctrl_flag & PWR_REQ_INPUT_PREQ3) {
+ res_ass_reg = TPS80031_PREQ3_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 7;
+ }
+
+ /* Configure REQ_ASS registers */
+ ret = tps80031_set_bits(dev, SLAVE_ID1, res_ass_reg,
+ BIT(preq_bit & 0x7));
+ if (ret < 0) {
+ dev_err(dev, "%s() Not able to set bit %d of "
+ "reg %d error %d\n",
+ __func__, preq_bit, res_ass_reg, ret);
+ return ret;
+ }
+
+ /* Unmask the PREQ */
+ ret = tps80031_clr_bits(dev, SLAVE_ID1,
+ TPS80031_PHOENIX_MSK_TRANSITION, BIT(preq_mask_bit));
+ if (ret < 0) {
+ dev_err(dev, "%s() Not able to clear bit %d of "
+ "reg %d error %d\n",
+ __func__, preq_mask_bit,
+ TPS80031_PHOENIX_MSK_TRANSITION, ret);
+ return ret;
+ }
+
+ /* Switch regulator control to resource now */
+ if (ext_ctrl_flag & (PWR_REQ_INPUT_PREQ2 | PWR_REQ_INPUT_PREQ3)) {
+ ret = tps80031_update(dev, SLAVE_ID1, state_reg_add, 0x0,
+ STATE_MASK);
+ if (ret < 0)
+ dev_err(dev, "%s() Error in writing the STATE "
+ "register %d error %d\n", __func__,
+ state_reg_add, ret);
+ } else {
+ ret = tps80031_update(dev, SLAVE_ID1, trans_reg_add,
+ TRANS_SLEEP_OFF, TRANS_SLEEP_MASK);
+ if (ret < 0)
+ dev_err(dev, "%s() Error in writing the TRANS "
+ "register %d error %d\n", __func__,
+ trans_reg_add, ret);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_ext_power_req_config);
+
+unsigned long tps80031_get_chip_info(struct device *dev)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ return tps80031->chip_info;
+}
+EXPORT_SYMBOL_GPL(tps80031_get_chip_info);
+
+int tps80031_get_pmu_version(struct device *dev)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ return tps80031->es_version;
+}
+EXPORT_SYMBOL_GPL(tps80031_get_pmu_version);
+
+static struct tps80031 *tps80031_dev;
+int tps80031_power_off(void)
+{
+ struct tps80031_client *tps = &tps80031_dev->tps_clients[SLAVE_ID1];
+
+ if (!tps->client)
+ return -EINVAL;
+ dev_info(&tps->client->dev, "switching off PMU\n");
+ return __tps80031_write(tps->client, TPS80031_PHOENIX_DEV_ON, DEVOFF);
+}
+
+static void tps80031_init_ext_control(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata) {
+ int ret;
+ int i;
+
+ /* Clear all external control for this rail */
+ for (i = 0; i < 9; ++i) {
+ tps80031_write(tps80031->dev, SLAVE_ID1,
+ TPS80031_PREQ1_RES_ASS_A + i, 0);
+ if (ret < 0)
+ dev_err(tps80031->dev, "%s() Error in clearing "
+ "register %02x\n", __func__,
+ TPS80031_PREQ1_RES_ASS_A + i);
+ }
+
+ /* Mask the PREQ */
+ ret = tps80031_set_bits(tps80031->dev, SLAVE_ID1,
+ TPS80031_PHOENIX_MSK_TRANSITION, 0x7 << 5);
+ if (ret < 0)
+ dev_err(tps80031->dev, "%s() Not able to mask register "
+ "0x%02x\n", __func__, TPS80031_PHOENIX_MSK_TRANSITION);
+}
+
+static int tps80031_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps80031 *tps80031 = container_of(gc, struct tps80031, gpio);
+ struct tps80031_client *tps = &tps80031->tps_clients[SLAVE_ID1];
+ uint8_t state;
+ uint8_t trans;
+ int ret;
+
+ ret = __tps80031_read(tps->client,
+ pmc_ext_control_base[offset] +
+ EXT_CONTROL_CFG_STATE, &state);
+ if (ret)
+ return ret;
+
+ if (state != 0) {
+ ret = __tps80031_read(tps->client,
+ pmc_ext_control_base[offset] +
+ EXT_CONTROL_CFG_TRANS, &trans);
+ if (ret)
+ return ret;
+ return trans & 0x1;
+ }
+ return 0;
+}
+
+static void tps80031_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps80031 *tps80031 = container_of(gc, struct tps80031, gpio);
+
+ tps80031_update(tps80031->dev, SLAVE_ID1,
+ pmc_ext_control_base[offset] + EXT_CONTROL_CFG_TRANS,
+ value, 0x1);
+}
+
+static int tps80031_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ return -EIO;
+}
+
+static int tps80031_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ tps80031_gpio_set(gc, offset, value);
+ return 0;
+}
+
+static int tps80031_gpio_enable(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps80031 *tps80031 = container_of(gc, struct tps80031, gpio);
+ int ret;
+
+ ret = tps80031_update(tps80031->dev, SLAVE_ID1,
+ pmc_ext_control_base[offset] + EXT_CONTROL_CFG_STATE,
+ STATE_ON, STATE_MASK);
+ if (ret)
+ return ret;
+
+ return tps80031_write(tps80031->dev, SLAVE_ID1,
+ pmc_ext_control_base[offset] + EXT_CONTROL_CFG_TRANS, 0x0);
+}
+
+static void tps80031_gpio_disable(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps80031 *tps80031 = container_of(gc, struct tps80031, gpio);
+ tps80031_update(tps80031->dev, SLAVE_ID1,
+ pmc_ext_control_base[offset] + EXT_CONTROL_CFG_STATE,
+ STATE_OFF, STATE_MASK);
+}
+
+static void tps80031_gpio_init(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ int ret;
+ int gpio_base = pdata->gpio_base;
+ struct tps80031_client *tps = &tps80031->tps_clients[SLAVE_ID1];
+ struct tps80031_gpio_init_data *gpio_init_data = pdata->gpio_init_data;
+ int data_size = pdata->gpio_init_data_size;
+ static int preq_bit_pos[TPS80031_GPIO_NR] = {16, 17, 18};
+ int base_add;
+ int i;
+
+ if (gpio_base <= 0)
+ return;
+
+ /* Configure the external request mode */
+ for (i = 0; i < data_size; ++i) {
+ struct tps80031_gpio_init_data *gpio_pd = &gpio_init_data[i];
+ base_add = pmc_ext_control_base[gpio_pd->gpio_nr];
+
+ if (gpio_pd->ext_ctrl_flag & EXT_PWR_REQ) {
+ ret = tps80031_ext_power_req_config(tps80031->dev,
+ gpio_pd->ext_ctrl_flag,
+ preq_bit_pos[gpio_pd->gpio_nr],
+ base_add + EXT_CONTROL_CFG_STATE,
+ base_add + EXT_CONTROL_CFG_TRANS);
+ if (ret < 0)
+ dev_warn(tps80031->dev, "Ext pwrreq GPIO "
+ "sleep control fails\n");
+ }
+
+ if (gpio_pd->ext_ctrl_flag & PWR_OFF_ON_SLEEP) {
+ ret = tps80031_update(tps80031->dev, SLAVE_ID1,
+ base_add + EXT_CONTROL_CFG_TRANS, 0x0, 0xC);
+ if (ret < 0)
+ dev_warn(tps80031->dev, "GPIO OFF on sleep "
+ "control fails\n");
+ }
+
+ if (gpio_pd->ext_ctrl_flag & PWR_ON_ON_SLEEP) {
+ ret = tps80031_update(tps80031->dev, SLAVE_ID1,
+ base_add + EXT_CONTROL_CFG_TRANS, 0x4, 0xC);
+ if (ret < 0)
+ dev_warn(tps80031->dev, "GPIO ON on sleep "
+ "control fails\n");
+ }
+ }
+
+ tps80031->gpio.owner = THIS_MODULE;
+ tps80031->gpio.label = tps->client->name;
+ tps80031->gpio.dev = tps80031->dev;
+ tps80031->gpio.base = gpio_base;
+ tps80031->gpio.ngpio = TPS80031_GPIO_NR;
+ tps80031->gpio.can_sleep = 1;
+
+ tps80031->gpio.request = tps80031_gpio_enable;
+ tps80031->gpio.free = tps80031_gpio_disable;
+ tps80031->gpio.direction_input = tps80031_gpio_input;
+ tps80031->gpio.direction_output = tps80031_gpio_output;
+ tps80031->gpio.set = tps80031_gpio_set;
+ tps80031->gpio.get = tps80031_gpio_get;
+
+ ret = gpiochip_add(&tps80031->gpio);
+ if (ret)
+ dev_warn(tps80031->dev, "GPIO registration failed: %d\n", ret);
+}
+
+static int __remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int tps80031_remove_subdevs(struct tps80031 *tps80031)
+{
+ return device_for_each_child(tps80031->dev, NULL, __remove_subdev);
+}
+
+static void tps80031_irq_lock(struct irq_data *data)
+{
+ struct tps80031 *tps80031 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&tps80031->irq_lock);
+}
+
+static void tps80031_irq_enable(struct irq_data *data)
+{
+ struct tps80031 *tps80031 = irq_data_get_irq_chip_data(data);
+ unsigned int __irq = data->irq - tps80031->irq_base;
+ const struct tps80031_irq_data *irq_data = &tps80031_irqs[__irq];
+
+ if (irq_data->is_sec_int) {
+ tps80031->cont_int_mask_reg &= ~(1 << irq_data->int_mask_bit);
+ tps80031->cont_int_en |= (1 << irq_data->int_mask_bit);
+ tps80031->mask_reg[irq_data->mask_reg] &= ~(1 << irq_data->mask_mask);
+ tps80031->irq_en |= (1 << irq_data->parent_int);
+ } else
+ tps80031->mask_reg[irq_data->mask_reg] &= ~(1 << irq_data->mask_mask);
+
+ tps80031->irq_en |= (1 << __irq);
+}
+
+static void tps80031_irq_disable(struct irq_data *data)
+{
+ struct tps80031 *tps80031 = irq_data_get_irq_chip_data(data);
+
+ unsigned int __irq = data->irq - tps80031->irq_base;
+ const struct tps80031_irq_data *irq_data = &tps80031_irqs[__irq];
+
+ if (irq_data->is_sec_int) {
+ tps80031->cont_int_mask_reg |= (1 << irq_data->int_mask_bit);
+ tps80031->cont_int_en &= ~(1 << irq_data->int_mask_bit);
+ if (!tps80031->cont_int_en) {
+ tps80031->mask_reg[irq_data->mask_reg] |=
+ (1 << irq_data->mask_mask);
+ tps80031->irq_en &= ~(1 << irq_data->parent_int);
+ }
+ tps80031->irq_en &= ~(1 << __irq);
+ } else
+ tps80031->mask_reg[irq_data->mask_reg] |= (1 << irq_data->mask_mask);
+
+ tps80031->irq_en &= ~(1 << __irq);
+}
+
+static void tps80031_irq_sync_unlock(struct irq_data *data)
+{
+ struct tps80031 *tps80031 = irq_data_get_irq_chip_data(data);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps80031->mask_reg); i++) {
+ if (tps80031->mask_reg[i] != tps80031->mask_cache[i]) {
+ if (!WARN_ON(tps80031_write(tps80031->dev, SLAVE_ID2,
+ TPS80031_INT_MSK_LINE_A + i,
+ tps80031->mask_reg[i])))
+ if (!WARN_ON(tps80031_write(tps80031->dev,
+ SLAVE_ID2,
+ TPS80031_INT_MSK_STS_A + i,
+ tps80031->mask_reg[i])))
+ tps80031->mask_cache[i] =
+ tps80031->mask_reg[i];
+ }
+ }
+
+ if (tps80031->cont_int_mask_reg != tps80031->cont_int_mask_cache) {
+ if (!WARN_ON(tps80031_write(tps80031->dev, SLAVE_ID2,
+ TPS80031_CONTROLLER_INT_MASK,
+ tps80031->cont_int_mask_reg)))
+ tps80031->cont_int_mask_cache =
+ tps80031->cont_int_mask_reg;
+ }
+
+ mutex_unlock(&tps80031->irq_lock);
+}
+
+static irqreturn_t tps80031_charge_control_irq(int irq, void *data)
+{
+ struct tps80031 *tps80031 = data;
+ int ret = 0;
+ int i;
+ u8 cont_sts;
+ u8 org_sts;
+ if (irq != (tps80031->irq_base + TPS80031_INT_CHRG_CTRL)) {
+ dev_err(tps80031->dev, "%s() Got the illegal interrupt %d\n",
+ __func__, irq);
+ return IRQ_NONE;
+ }
+
+ ret = tps80031_read(tps80031->dev, SLAVE_ID2,
+ TPS80031_CONTROLLER_STAT1, &org_sts);
+ if (ret < 0) {
+ dev_err(tps80031->dev, "%s(): failed to read controller state1 "
+ "status %d\n", __func__, ret);
+ return IRQ_NONE;
+ }
+
+ /* Get change from last interrupt and mask for interested interrupt
+ * for charge control interrupt */
+ cont_sts = org_sts ^ tps80031->prev_cont_stat1;
+ tps80031->prev_cont_stat1 = org_sts;
+ /* Clear watchdog timer state */
+ tps80031->prev_cont_stat1 &= ~(1 << 4);
+ cont_sts &= 0x5F;
+
+ for (i = 0; i < 8; ++i) {
+ if (!controller_stat1_irq_nr[i])
+ continue;
+
+ if ((cont_sts & BIT(i)) &&
+ (tps80031->irq_en & BIT(controller_stat1_irq_nr[i])))
+ handle_nested_irq(tps80031->irq_base +
+ controller_stat1_irq_nr[i]);
+ cont_sts &= ~BIT(i);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tps80031_irq(int irq, void *data)
+{
+ struct tps80031 *tps80031 = data;
+ int ret = 0;
+ u32 acks;
+ int i;
+ uint8_t tmp[3];
+
+ ret = tps80031_reads(tps80031->dev, SLAVE_ID2,
+ TPS80031_INT_STS_A, 3, tmp);
+ if (ret < 0) {
+ dev_err(tps80031->dev, "failed to read interrupt status\n");
+ return IRQ_NONE;
+ }
+ acks = (tmp[2] << 16) | (tmp[1] << 8) | tmp[0];
+
+ if (acks) {
+ ret = tps80031_writes(tps80031->dev, SLAVE_ID2,
+ TPS80031_INT_STS_A, 3, tmp);
+ if (ret < 0) {
+ dev_err(tps80031->dev, "failed to write "
+ "interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ while (acks) {
+ i = __ffs(acks);
+ if (tps80031->irq_en & (1 << i))
+ handle_nested_irq(tps80031->irq_base + i);
+ acks &= ~(1 << i);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps80031_irq_init(struct tps80031 *tps80031, int irq,
+ int irq_base)
+{
+ int i, ret;
+
+ if (!irq_base) {
+ dev_warn(tps80031->dev, "No interrupt support on IRQ base\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&tps80031->irq_lock);
+
+ for (i = 0; i < 3; i++) {
+ tps80031->mask_reg[i] = 0xFF;
+ tps80031->mask_cache[i] = tps80031->mask_reg[i];
+ tps80031_write(tps80031->dev, SLAVE_ID2,
+ TPS80031_INT_MSK_LINE_A + i,
+ tps80031->mask_cache[i]);
+ tps80031_write(tps80031->dev, SLAVE_ID2,
+ TPS80031_INT_MSK_STS_A + i, 0xFF);
+ tps80031_write(tps80031->dev, SLAVE_ID2,
+ TPS80031_INT_STS_A + i, 0xFF);
+ }
+
+ ret = tps80031_read(tps80031->dev, SLAVE_ID2,
+ TPS80031_CONTROLLER_INT_MASK,
+ &tps80031->cont_int_mask_reg);
+ if (ret < 0) {
+ dev_err(tps80031->dev, "Error in reading the controller_mask "
+ "register %d\n", ret);
+ return ret;
+ }
+
+ tps80031->cont_int_mask_reg |= CHARGE_CONTROL_SUB_INT_MASK;
+ tps80031->cont_int_mask_cache = tps80031->cont_int_mask_reg;
+ tps80031->cont_int_en = 0;
+ ret = tps80031_write(tps80031->dev, SLAVE_ID2,
+ TPS80031_CONTROLLER_INT_MASK,
+ tps80031->cont_int_mask_reg);
+ if (ret < 0) {
+ dev_err(tps80031->dev, "Error in writing the controller_mask "
+ "register %d\n", ret);
+ return ret;
+ }
+
+ ret = tps80031_read(tps80031->dev, SLAVE_ID2,
+ TPS80031_CONTROLLER_STAT1, &tps80031->prev_cont_stat1);
+ if (ret < 0) {
+ dev_err(tps80031->dev, "%s(): failed to read controller state1 "
+ "status %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* Clear watch dog interrupt status in status */
+ tps80031->prev_cont_stat1 &= ~(1 << 4);
+
+ tps80031->irq_base = irq_base;
+
+ tps80031->irq_chip.name = "tps80031";
+ tps80031->irq_chip.irq_enable = tps80031_irq_enable;
+ tps80031->irq_chip.irq_disable = tps80031_irq_disable;
+ tps80031->irq_chip.irq_bus_lock = tps80031_irq_lock;
+ tps80031->irq_chip.irq_bus_sync_unlock = tps80031_irq_sync_unlock;
+
+ for (i = 0; i < TPS80031_INT_NR; i++) {
+ int __irq = i + tps80031->irq_base;
+ irq_set_chip_data(__irq, tps80031);
+ irq_set_chip_and_handler(__irq, &tps80031->irq_chip,
+ handle_simple_irq);
+ irq_set_nested_thread(__irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(__irq, IRQF_VALID);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps80031_irq, IRQF_ONESHOT,
+ "tps80031", tps80031);
+ /* register the isr for the secondary interrupt */
+ if (!ret)
+ ret = request_threaded_irq(irq_base + TPS80031_INT_CHRG_CTRL,
+ NULL, tps80031_charge_control_irq,
+ IRQF_ONESHOT, "80031_chg_ctl", tps80031);
+ if (!ret) {
+
+ device_init_wakeup(tps80031->dev, 1);
+ enable_irq_wake(irq);
+ }
+
+ return ret;
+}
+
+static void tps80031_clk32k_enable(struct tps80031 *tps80031, int base_add)
+{
+ int ret;
+ ret = tps80031_update(tps80031->dev, SLAVE_ID1,
+ base_add + EXT_CONTROL_CFG_STATE, STATE_ON, STATE_MASK);
+ if (!ret)
+ ret = tps80031_update(tps80031->dev, SLAVE_ID1,
+ base_add + EXT_CONTROL_CFG_TRANS,
+ STATE_ON, STATE_MASK);
+ if (ret < 0)
+ dev_err(tps80031->dev, "Error in updating clock register\n");
+}
+
+static void tps80031_clk32k_init(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ int ret;
+ struct tps80031_clk32k_init_data *clk32_idata = pdata->clk32k_init_data;
+ int data_size = pdata->clk32k_init_data_size;
+ static int clk32k_preq_bit_pos[TPS80031_CLOCK32K_NR] = {-1, 20, 19};
+ int base_add;
+ int i;
+
+ if (!clk32_idata || !data_size)
+ return;
+
+ /* Configure the external request mode */
+ for (i = 0; i < data_size; ++i) {
+ struct tps80031_clk32k_init_data *clk32_pd = &clk32_idata[i];
+ base_add = pmc_clk32k_control_base[clk32_pd->clk32k_nr];
+ if (clk32_pd->enable)
+ tps80031_clk32k_enable(tps80031, base_add);
+
+ if ((clk32_pd->ext_ctrl_flag & EXT_PWR_REQ) &&
+ (clk32k_preq_bit_pos[clk32_pd->clk32k_nr] != -1)) {
+ ret = tps80031_ext_power_req_config(tps80031->dev,
+ clk32_pd->ext_ctrl_flag,
+ clk32k_preq_bit_pos[clk32_pd->clk32k_nr],
+ base_add + EXT_CONTROL_CFG_STATE,
+ base_add + EXT_CONTROL_CFG_TRANS);
+ if (ret < 0)
+ dev_warn(tps80031->dev, "Clk32 ext control "
+ "fails\n");
+ }
+
+ if (clk32_pd->ext_ctrl_flag & PWR_OFF_ON_SLEEP) {
+ ret = tps80031_update(tps80031->dev, SLAVE_ID1,
+ base_add + EXT_CONTROL_CFG_TRANS, 0x0, 0xC);
+ if (ret < 0)
+ dev_warn(tps80031->dev, "clk OFF on sleep "
+ "control fails\n");
+ }
+
+ if (clk32_pd->ext_ctrl_flag & PWR_ON_ON_SLEEP) {
+ ret = tps80031_update(tps80031->dev, SLAVE_ID1,
+ base_add + EXT_CONTROL_CFG_TRANS, 0x4, 0xC);
+ if (ret < 0)
+ dev_warn(tps80031->dev, "clk ON sleep "
+ "control fails\n");
+ }
+ }
+}
+
+static int __devinit tps80031_add_subdevs(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ struct tps80031_subdev_info *subdev;
+ struct platform_device *pdev;
+ int i, ret = 0;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ subdev = &pdata->subdevs[i];
+
+ pdev = platform_device_alloc(subdev->name, subdev->id);
+
+ pdev->dev.parent = tps80031->dev;
+ pdev->dev.platform_data = subdev->platform_data;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto failed;
+ }
+ return 0;
+
+failed:
+ tps80031_remove_subdevs(tps80031);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+static void print_regs(const char *header, struct seq_file *s,
+ int sid, int start_offset, int end_offset)
+{
+ struct tps80031 *tps80031 = s->private;
+ struct tps80031_client *tps = &tps80031->tps_clients[sid];
+ uint8_t reg_val;
+ int i;
+ int ret;
+
+ seq_printf(s, "%s\n", header);
+ for (i = start_offset; i <= end_offset; ++i) {
+ ret = __tps80031_read(tps->client, i, &reg_val);
+ if (ret >= 0)
+ seq_printf(s, "Addr = 0x%02x Reg 0x%02x Value 0x%02x\n",
+ tps->client->addr, i, reg_val);
+ }
+ seq_printf(s, "------------------\n");
+}
+
+static int dbg_tps_show(struct seq_file *s, void *unused)
+{
+ seq_printf(s, "TPS80031 Registers\n");
+ seq_printf(s, "------------------\n");
+ print_regs("VIO Regs", s, SLAVE_ID1, 0x47, 0x49);
+ print_regs("VIO Regs", s, SLAVE_ID0, 0x49, 0x4A);
+ print_regs("SMPS1 Regs", s, SLAVE_ID1, 0x53, 0x54);
+ print_regs("SMPS1 Regs", s, SLAVE_ID0, 0x55, 0x56);
+ print_regs("SMPS1 Regs", s, SLAVE_ID1, 0x57, 0x57);
+ print_regs("SMPS2 Regs", s, SLAVE_ID1, 0x59, 0x5B);
+ print_regs("SMPS2 Regs", s, SLAVE_ID0, 0x5B, 0x5C);
+ print_regs("SMPS2 Regs", s, SLAVE_ID1, 0x5C, 0x5D);
+ print_regs("SMPS3 Regs", s, SLAVE_ID1, 0x65, 0x68);
+ print_regs("SMPS4 Regs", s, SLAVE_ID1, 0x41, 0x44);
+ print_regs("VANA Regs", s, SLAVE_ID1, 0x81, 0x83);
+ print_regs("VRTC Regs", s, SLAVE_ID1, 0xC3, 0xC4);
+ print_regs("LDO1 Regs", s, SLAVE_ID1, 0x9D, 0x9F);
+ print_regs("LDO2 Regs", s, SLAVE_ID1, 0x85, 0x87);
+ print_regs("LDO3 Regs", s, SLAVE_ID1, 0x8D, 0x8F);
+ print_regs("LDO4 Regs", s, SLAVE_ID1, 0x89, 0x8B);
+ print_regs("LDO5 Regs", s, SLAVE_ID1, 0x99, 0x9B);
+ print_regs("LDO6 Regs", s, SLAVE_ID1, 0x91, 0x93);
+ print_regs("LDO7 Regs", s, SLAVE_ID1, 0xA5, 0xA7);
+ print_regs("LDOUSB Regs", s, SLAVE_ID1, 0xA1, 0xA3);
+ print_regs("LDOLN Regs", s, SLAVE_ID1, 0x95, 0x97);
+ print_regs("REGEN1 Regs", s, SLAVE_ID1, 0xAE, 0xAF);
+ print_regs("REGEN2 Regs", s, SLAVE_ID1, 0xB1, 0xB2);
+ print_regs("SYSEN Regs", s, SLAVE_ID1, 0xB4, 0xB5);
+ print_regs("CLK32KAO Regs", s, SLAVE_ID1, 0xBA, 0xBB);
+ print_regs("CLK32KG Regs", s, SLAVE_ID1, 0xBD, 0xBE);
+ print_regs("CLK32KAUD Regs", s, SLAVE_ID1, 0xC0, 0xC1);
+ print_regs("INT Regs", s, SLAVE_ID2, 0xD0, 0xD8);
+ print_regs("PREQ Regs", s, SLAVE_ID1, 0xD7, 0xDF);
+ print_regs("MASK_PH Regs", s, SLAVE_ID1, 0x20, 0x21);
+ print_regs("PMC MISC Regs", s, SLAVE_ID1, 0xE0, 0xEF);
+ print_regs("CONT_STATE", s, SLAVE_ID2, 0xE0, 0xE4);
+ print_regs("VERNUM Regs", s, SLAVE_ID3, 0x87, 0x87);
+ print_regs("EEPROM Regs", s, SLAVE_ID3, 0xDF, 0xDF);
+ print_regs("CHARGE Regs", s, SLAVE_ID2, 0xDA, 0xF5);
+ return 0;
+}
+
+static int dbg_tps_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_tps_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = dbg_tps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __init tps80031_debuginit(struct tps80031 *tps)
+{
+ (void)debugfs_create_file("tps80031", S_IRUGO, NULL,
+ tps, &debug_fops);
+}
+#else
+static void __init tps80031_debuginit(struct tps80031 *tpsi)
+{
+ return;
+}
+#endif
+
+static int __devexit tps80031_i2c_remove(struct i2c_client *client)
+{
+ struct tps80031 *tps80031 = i2c_get_clientdata(client);
+ int i;
+
+ if (client->irq)
+ free_irq(client->irq, tps80031);
+
+ if (tps80031->gpio.owner != NULL)
+ if (gpiochip_remove(&tps80031->gpio) < 0)
+ dev_err(&client->dev, "Error in removing the gpio driver\n");
+
+ for (i = 0; i < TPS_NUM_SLAVES; i++) {
+ struct tps80031_client *tps = &tps80031->tps_clients[i];
+ if (tps->client && tps->client != client)
+ i2c_unregister_device(tps->client);
+ tps80031->tps_clients[i].client = NULL;
+ mutex_destroy(&tps->lock);
+ }
+
+ kfree(tps80031);
+ return 0;
+}
+
+static int __devinit tps80031_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps80031_platform_data *pdata = client->dev.platform_data;
+ struct tps80031 *tps80031;
+ struct tps80031_client *tps;
+ int ret;
+ int jtag_ver;
+ int ep_ver;
+ int i;
+
+ if (!pdata) {
+ dev_err(&client->dev, "tps80031 requires platform data\n");
+ return -ENOTSUPP;
+ }
+
+ jtag_ver = i2c_smbus_read_byte_data(client, TPS80031_JTAGVERNUM);
+ if (jtag_ver < 0) {
+ dev_err(&client->dev, "Silicon version number read"
+ " failed: %d\n", jtag_ver);
+ return -EIO;
+ }
+
+ ep_ver = i2c_smbus_read_byte_data(client, TPS80031_EPROM_REV);
+ if (ep_ver < 0) {
+ dev_err(&client->dev, "Silicon eeprom version read"
+ " failed: %d\n", ep_ver);
+ return -EIO;
+ }
+
+ dev_info(&client->dev, "Jtag version 0x%02x and Eeprom version 0x%02x\n",
+ jtag_ver, ep_ver);
+
+ tps80031 = kzalloc(sizeof(struct tps80031), GFP_KERNEL);
+ if (tps80031 == NULL)
+ return -ENOMEM;
+
+ tps80031->es_version = jtag_ver;
+ tps80031->dev = &client->dev;
+ i2c_set_clientdata(client, tps80031);
+ tps80031->chip_info = id->driver_data;
+
+ /* Set up slaves */
+ tps80031->tps_clients[SLAVE_ID0].addr = I2C_ID0_ADDR;
+ tps80031->tps_clients[SLAVE_ID1].addr = I2C_ID1_ADDR;
+ tps80031->tps_clients[SLAVE_ID2].addr = I2C_ID2_ADDR;
+ tps80031->tps_clients[SLAVE_ID3].addr = I2C_ID3_ADDR;
+ for (i = 0; i < TPS_NUM_SLAVES; i++) {
+ tps = &tps80031->tps_clients[i];
+ if (tps->addr == client->addr)
+ tps->client = client;
+ else
+ tps->client = i2c_new_dummy(client->adapter,
+ tps->addr);
+ if (!tps->client) {
+ dev_err(&client->dev, "can't attach client %d\n", i);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ i2c_set_clientdata(tps->client, tps80031);
+ mutex_init(&tps->lock);
+ }
+
+ if (client->irq) {
+ ret = tps80031_irq_init(tps80031, client->irq,
+ pdata->irq_base);
+ if (ret) {
+ dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+ tps80031_init_ext_control(tps80031, pdata);
+
+ ret = tps80031_add_subdevs(tps80031, pdata);
+ if (ret) {
+ dev_err(&client->dev, "add devices failed: %d\n", ret);
+ goto fail;
+ }
+
+ tps80031_gpio_init(tps80031, pdata);
+
+ tps80031_clk32k_init(tps80031, pdata);
+
+ tps80031_debuginit(tps80031);
+
+ tps80031_dev = tps80031;
+
+ return 0;
+
+fail:
+ tps80031_i2c_remove(client);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int tps80031_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+ if (client->irq)
+ disable_irq(client->irq);
+ return 0;
+}
+
+static int tps80031_i2c_resume(struct i2c_client *client)
+{
+ if (client->irq)
+ enable_irq(client->irq);
+ return 0;
+}
+#endif
+
+
+static const struct i2c_device_id tps80031_id_table[] = {
+ { "tps80031", TPS80031 },
+ { "tps80032", TPS80032 },
+};
+MODULE_DEVICE_TABLE(i2c, tps80031_id_table);
+
+static struct i2c_driver tps80031_driver = {
+ .driver = {
+ .name = "tps80031",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_i2c_probe,
+ .remove = __devexit_p(tps80031_i2c_remove),
+#ifdef CONFIG_PM
+ .suspend = tps80031_i2c_suspend,
+ .resume = tps80031_i2c_resume,
+#endif
+ .id_table = tps80031_id_table,
+};
+
+static int __init tps80031_init(void)
+{
+ return i2c_add_driver(&tps80031_driver);
+}
+subsys_initcall(tps80031_init);
+
+static void __exit tps80031_exit(void)
+{
+ i2c_del_driver(&tps80031_driver);
+}
+module_exit(tps80031_exit);
+
+MODULE_DESCRIPTION("TPS80031 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps8003x-gpadc.c b/drivers/mfd/tps8003x-gpadc.c
new file mode 100644
index 000000000000..5db912cc01fd
--- /dev/null
+++ b/drivers/mfd/tps8003x-gpadc.c
@@ -0,0 +1,650 @@
+/*
+ * drivers/mfd/tps8003x-gpadc.c
+ *
+ * Gpadc for TI's tps80031
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c/twl.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+
+#define GPADC_CTRL 0x2e
+#define GPSELECT_ISB 0x35
+#define GPCH0_LSB 0x3b
+#define GPCH0_MSB 0x3c
+#define CTRL_P1 0x36
+#define TOGGLE1 0x90
+#define MISC1 0xe4
+
+#define CTRL_P1_SP1 BIT(3)
+#define TOGGLE1_GPADCR BIT(1)
+#define GPADC_BUSY (1 << 0)
+#define GPADC_EOC_SW (1 << 1)
+#define SCALE (1 << 15)
+
+#define TPS80031_GPADC_MAX_CHANNELS 17
+#define TPS80031_GPADC_IOC_MAGIC '`'
+#define TPS80031_GPADC_IOCX_ADC_RAW_READ _IO(TPS80031_GPADC_IOC_MAGIC, 0)
+
+struct tps80031_gpadc_user_parms {
+ int channel;
+ int status;
+ u16 result;
+};
+
+struct tps80031_calibration {
+ s32 gain_error;
+ s32 offset_error;
+};
+
+struct tps80031_ideal_code {
+ s16 code1;
+ s16 code2;
+};
+
+struct tps80031_scalar_channel {
+ uint8_t delta1_addr;
+ uint8_t delta1_mask;
+ uint8_t delta2_addr;
+ uint8_t delta2_mask;
+};
+
+static struct tps80031_calibration
+ tps80031_calib_tbl[TPS80031_GPADC_MAX_CHANNELS];
+static const uint32_t calibration_bit_map = 0x47FF;
+static const uint32_t scalar_bit_map = 0x4785;
+
+#define TPS80031_GPADC_TRIM1 0xCD
+#define TPS80031_GPADC_TRIM2 0xCE
+#define TPS80031_GPADC_TRIM3 0xCF
+#define TPS80031_GPADC_TRIM4 0xD0
+#define TPS80031_GPADC_TRIM5 0xD1
+#define TPS80031_GPADC_TRIM6 0xD2
+#define TPS80031_GPADC_TRIM7 0xD3
+#define TPS80031_GPADC_TRIM8 0xD4
+#define TPS80031_GPADC_TRIM9 0xD5
+#define TPS80031_GPADC_TRIM10 0xD6
+#define TPS80031_GPADC_TRIM11 0xD7
+#define TPS80031_GPADC_TRIM12 0xD8
+#define TPS80031_GPADC_TRIM13 0xD9
+#define TPS80031_GPADC_TRIM14 0xDA
+#define TPS80031_GPADC_TRIM15 0xDB
+#define TPS80031_GPADC_TRIM16 0xDC
+#define TPS80031_GPADC_TRIM19 0xFD
+
+static const struct tps80031_scalar_channel
+ tps80031_trim[TPS80031_GPADC_MAX_CHANNELS] = {
+ { TPS80031_GPADC_TRIM1, 0x7, TPS80031_GPADC_TRIM2, 0x07},
+ { 0x00, },
+ { TPS80031_GPADC_TRIM3, 0x1F, TPS80031_GPADC_TRIM4, 0x3F},
+ { 0x00, },
+ { 0x00, },
+ { 0x00, },
+ { 0x00, },
+ { TPS80031_GPADC_TRIM7, 0x1F, TPS80031_GPADC_TRIM8, 0x1F },
+ { TPS80031_GPADC_TRIM9, 0x0F, TPS80031_GPADC_TRIM10, 0x1F },
+ { TPS80031_GPADC_TRIM11, 0x0F, TPS80031_GPADC_TRIM12, 0x1F },
+ { TPS80031_GPADC_TRIM13, 0x0F, TPS80031_GPADC_TRIM14, 0x1F },
+ { 0x00, },
+ { 0x00, },
+ { 0x00, },
+ { TPS80031_GPADC_TRIM15, 0x0f, TPS80031_GPADC_TRIM16, 0x1F },
+ { 0x00, },
+ { 0x00 ,},
+};
+
+/*
+* actual scaler gain is multiplied by 8 for fixed point operation
+* 1.875 * 8 = 15
+*/
+static const uint16_t tps80031_gain[TPS80031_GPADC_MAX_CHANNELS] = {
+ 1142, /* CHANNEL 0 */
+ 8, /* CHANNEL 1 */
+ /* 1.875 */
+ 15, /* CHANNEL 2 */
+ 8, /* CHANNEL 3 */
+ 8, /* CHANNEL 4 */
+ 8, /* CHANNEL 5 */
+ 8, /* CHANNEL 6 */
+ /* 5 */
+ 40, /* CHANNEL 7 */
+ /* 6.25 */
+ 50, /* CHANNEL 8 */
+ /* 11.25 */
+ 90, /* CHANNEL 9 */
+ /* 6.875 */
+ 55, /* CHANNEL 10 */
+ /* 1.875 */
+ 15, /* CHANNEL 11 */
+ 8, /* CHANNEL 12 */
+ 8, /* CHANNEL 13 */
+ /* 6.875 */
+ 55, /* CHANNEL 14 */
+ 8, /* CHANNEL 15 */
+ 8, /* CHANNEL 16 */
+};
+
+/*
+* calibration not needed for channel 11, 12, 13, 15 and 16
+* calibration offset is same for channel 1, 3, 4, 5
+*/
+static const struct tps80031_ideal_code
+ tps80031_ideal[TPS80031_GPADC_MAX_CHANNELS] = {
+ {463, 2982}, /* CHANNEL 0 */
+ {328, 3604}, /* CHANNEL 1 */
+ {221, 3274}, /* CHANNEL 2 */
+ {328, 3604}, /* CHANNEL 3 */
+ {328, 3604}, /* CHANNEL 4 */
+ {328, 3604}, /* CHANNEL 5 */
+ {328, 3604}, /* CHANNEL 6 */
+ {1966, 3013}, /* CHANNEL 7 */
+ {328, 2754}, /* CHANNEL 8 */
+ {728, 3275}, /* CHANNEL 9 */
+ {596, 3274}, /* CHANNEL 10 */
+ {0, 0}, /* CHANNEL 11 */
+ {0, 0}, /* CHANNEL 12 */
+ {0, 0}, /* CHANNEL 13 */
+ {193, 2859}, /* CHANNEL 14 */
+ {0, 0}, /* CHANNEL 15 */
+ {0, 0}, /* CHANNEL 16 */
+};
+
+struct tps80031_gpadc_data {
+ struct device *dev;
+ struct mutex lock;
+};
+
+static struct tps80031_gpadc_data *the_gpadc;
+
+static ssize_t show_gain(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int value;
+ int status;
+
+ value = tps80031_calib_tbl[attr->index].gain_error;
+ status = sprintf(buf, "%d\n", value);
+ return status;
+}
+
+static ssize_t set_gain(struct device *dev,
+ struct device_attribute *devattr, const char *buf, size_t count)
+{
+ long val;
+ int status = count;
+
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ if ((strict_strtol(buf, 10, &val) < 0) || (val < 15000)
+ || (val > 60000))
+ return -EINVAL;
+ tps80031_calib_tbl[attr->index].gain_error = val;
+ return status;
+}
+
+static ssize_t show_offset(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int value;
+ int status;
+
+ value = tps80031_calib_tbl[attr->index].offset_error;
+ status = sprintf(buf, "%d\n", value);
+ return status;
+}
+
+static ssize_t set_offset(struct device *dev,
+ struct device_attribute *devattr, const char *buf, size_t count)
+{
+ long val;
+ int status = count;
+
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ if ((strict_strtol(buf, 10, &val) < 0) || (val < 15000)
+ || (val > 60000))
+ return -EINVAL;
+ tps80031_calib_tbl[attr->index].offset_error = val;
+ return status;
+}
+
+static int tps80031_reg_read(struct tps80031_gpadc_data *gpadc, int sid,
+ int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = tps80031_read(gpadc->dev->parent, sid, reg, val);
+ if (ret < 0)
+ dev_err(gpadc->dev, "Failed read register 0x%02x\n", reg);
+ return ret;
+}
+
+static int tps80031_reg_write(struct tps80031_gpadc_data *gpadc, int sid,
+ int reg, uint8_t val)
+{
+ int ret;
+
+ ret = tps80031_write(gpadc->dev->parent, sid, reg, val);
+ if (ret < 0)
+ dev_err(gpadc->dev, "Failed write register 0x%02x\n", reg);
+ return ret;
+}
+
+static int tps80031_gpadc_channel_raw_read(struct tps80031_gpadc_data *gpadc)
+{
+ uint8_t msb, lsb;
+ int ret;
+ ret = tps80031_reg_read(gpadc, SLAVE_ID2, GPCH0_LSB, &lsb);
+ if (ret < 0)
+ return ret;
+ ret = tps80031_reg_read(gpadc, SLAVE_ID2, GPCH0_MSB, &msb);
+ if (ret < 0)
+ return ret;
+
+ return (int)((msb << 8) | lsb);
+}
+
+static int tps80031_gpadc_read_channels(struct tps80031_gpadc_data *gpadc,
+ uint32_t channel)
+{
+ uint8_t bits;
+ int gain_error;
+ int offset_error;
+ int raw_code;
+ int corrected_code;
+ int channel_value;
+ int raw_channel_value;
+
+ /* TPS80031 has 12bit ADC */
+ bits = 12;
+ raw_code = tps80031_gpadc_channel_raw_read(gpadc);
+ if (raw_code < 0)
+ return raw_code;
+ /*
+ * Channels 0,2,7,8,9,10,14 offst and gain cannot
+ * be fully compensated by software
+ */
+ if (channel == 7)
+ return raw_code;
+ /*
+ * multiply by 1000 to convert the unit to milli
+ * division by 1024 (>> bits) for 10/12 bit ADC
+ * division by 8 (>> 3) for actual scaler gain
+ */
+ raw_channel_value =
+ (raw_code * tps80031_gain[channel] * 1000) >> (bits + 3);
+
+ gain_error = tps80031_calib_tbl[channel].gain_error;
+ offset_error = tps80031_calib_tbl[channel].offset_error;
+ corrected_code = (raw_code * SCALE - offset_error) / gain_error;
+ channel_value =
+ (corrected_code * tps80031_gain[channel] * 1000) >> (bits + 3);
+ return channel_value;
+}
+
+static int tps80031_gpadc_wait_conversion_ready(
+ struct tps80031_gpadc_data *gpadc,
+ unsigned int timeout_ms)
+{
+ int ret;
+ unsigned long timeout;
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ uint8_t reg;
+ ret = tps80031_reg_read(gpadc, SLAVE_ID2, CTRL_P1, &reg);
+ if (ret < 0)
+ return ret;
+ if (!(reg & GPADC_BUSY) &&
+ (reg & GPADC_EOC_SW))
+ return 0;
+ } while (!time_after(jiffies, timeout));
+ return -EAGAIN;
+}
+
+static inline int tps80031_gpadc_config
+ (struct tps80031_gpadc_data *gpadc, int channel_no)
+{
+ int ret = 0;
+
+ ret = tps80031_reg_write(gpadc, SLAVE_ID2, TOGGLE1, TOGGLE1_GPADCR);
+ if (ret < 0)
+ return ret;
+
+ ret = tps80031_reg_write(gpadc, SLAVE_ID2, GPSELECT_ISB, channel_no);
+ if (ret < 0)
+ return ret;
+
+ ret = tps80031_reg_write(gpadc, SLAVE_ID2, GPADC_CTRL, 0xef);
+ if (ret < 0)
+ return ret;
+
+ ret = tps80031_reg_write(gpadc, SLAVE_ID1, MISC1, 0x02);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+int tps80031_gpadc_conversion(int channel_no)
+{
+ int ret = 0;
+ int read_value;
+
+ mutex_lock(&the_gpadc->lock);
+
+ ret = tps80031_gpadc_config(the_gpadc, channel_no);
+ if (ret < 0)
+ goto err;
+
+ /* start ADC conversion */
+ ret = tps80031_reg_write(the_gpadc, SLAVE_ID2, CTRL_P1, CTRL_P1_SP1);
+ if (ret < 0)
+ goto err;
+
+ /* Wait until conversion is ready (ctrl register returns EOC) */
+ ret = tps80031_gpadc_wait_conversion_ready(the_gpadc, 5);
+ if (ret) {
+ dev_dbg(the_gpadc->dev, "conversion timeout!\n");
+ goto err;
+ }
+
+ read_value = tps80031_gpadc_read_channels(the_gpadc, channel_no);
+ mutex_unlock(&the_gpadc->lock);
+ return read_value;
+err:
+ mutex_unlock(&the_gpadc->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_gpadc_conversion);
+
+static SENSOR_DEVICE_ATTR(in0_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 0);
+static SENSOR_DEVICE_ATTR(in0_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 0);
+static SENSOR_DEVICE_ATTR(in1_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 1);
+static SENSOR_DEVICE_ATTR(in1_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 1);
+static SENSOR_DEVICE_ATTR(in2_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 2);
+static SENSOR_DEVICE_ATTR(in2_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 2);
+static SENSOR_DEVICE_ATTR(in3_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 3);
+static SENSOR_DEVICE_ATTR(in3_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 3);
+static SENSOR_DEVICE_ATTR(in4_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 4);
+static SENSOR_DEVICE_ATTR(in4_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 4);
+static SENSOR_DEVICE_ATTR(in5_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 5);
+static SENSOR_DEVICE_ATTR(in5_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 5);
+static SENSOR_DEVICE_ATTR(in6_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 6);
+static SENSOR_DEVICE_ATTR(in6_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 6);
+static SENSOR_DEVICE_ATTR(in7_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 7);
+static SENSOR_DEVICE_ATTR(in7_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 7);
+static SENSOR_DEVICE_ATTR(in8_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 8);
+static SENSOR_DEVICE_ATTR(in8_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 8);
+static SENSOR_DEVICE_ATTR(in9_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 9);
+static SENSOR_DEVICE_ATTR(in9_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 9);
+static SENSOR_DEVICE_ATTR(in10_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 10);
+static SENSOR_DEVICE_ATTR(in10_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 10);
+static SENSOR_DEVICE_ATTR(in11_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 11);
+static SENSOR_DEVICE_ATTR(in11_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 11);
+static SENSOR_DEVICE_ATTR(in12_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 12);
+static SENSOR_DEVICE_ATTR(in12_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 12);
+static SENSOR_DEVICE_ATTR(in13_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 13);
+static SENSOR_DEVICE_ATTR(in13_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 13);
+static SENSOR_DEVICE_ATTR(in14_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 14);
+static SENSOR_DEVICE_ATTR(in14_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 14);
+static SENSOR_DEVICE_ATTR(in15_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 15);
+static SENSOR_DEVICE_ATTR(in15_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 15);
+static SENSOR_DEVICE_ATTR(in16_gain, S_IRUGO|S_IWUSR, show_gain, set_gain, 16);
+static SENSOR_DEVICE_ATTR(in16_offset, S_IRUGO|S_IWUSR,
+ show_offset, set_offset, 16);
+
+#define IN_ATTRS(X)\
+ &sensor_dev_attr_in##X##_gain.dev_attr.attr, \
+ &sensor_dev_attr_in##X##_offset.dev_attr.attr \
+
+static struct attribute *tps80031_gpadc_attributes[] = {
+ IN_ATTRS(0),
+ IN_ATTRS(1),
+ IN_ATTRS(2),
+ IN_ATTRS(3),
+ IN_ATTRS(4),
+ IN_ATTRS(5),
+ IN_ATTRS(6),
+ IN_ATTRS(7),
+ IN_ATTRS(8),
+ IN_ATTRS(9),
+ IN_ATTRS(10),
+ IN_ATTRS(11),
+ IN_ATTRS(12),
+ IN_ATTRS(13),
+ IN_ATTRS(14),
+ IN_ATTRS(15),
+ IN_ATTRS(16),
+ NULL
+};
+
+static const struct attribute_group tps80031_gpadc_group = {
+ .attrs = tps80031_gpadc_attributes,
+};
+
+static long tps80031_gpadc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tps80031_gpadc_user_parms par;
+ int val, ret, channel_no;
+
+ ret = copy_from_user(&par, (void __user *) arg, sizeof(par));
+ if (ret) {
+ dev_dbg(the_gpadc->dev, "copy_from_user: %d\n", ret);
+ return -EACCES;
+ }
+ switch (cmd) {
+ case TPS80031_GPADC_IOCX_ADC_RAW_READ:
+ channel_no = par.channel;
+ val = tps80031_gpadc_conversion(channel_no);
+ if (likely(val > 0)) {
+ par.status = 0;
+ par.result = val;
+ } else if (val == 0) {
+ par.status = -ENODATA;
+ } else {
+ par.status = val;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = copy_to_user((void __user *) arg, &par, sizeof(par));
+ if (ret) {
+ dev_dbg(the_gpadc->dev, "copy_to_user: %d\n", ret);
+ return -EACCES;
+ }
+ return 0;
+}
+
+static const struct file_operations tps80031_gpadc_fileops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tps80031_gpadc_ioctl,
+};
+
+static struct miscdevice tps80031_gpadc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tps80031-gpadc",
+ .fops = &tps80031_gpadc_fileops
+};
+
+static int __devinit tps80031_gpadc_probe(struct platform_device *pdev)
+{
+ struct tps80031_gpadc_data *gpadc;
+
+ s16 delta_error1 = 0, delta_error2 = 0;
+ s16 ideal_code1, ideal_code2;
+ s16 scalar_delta1 = 0, scalar_delta2 = 0;
+ s32 gain_error_1;
+ s32 offset_error;
+ uint8_t l_delta1, l_delta2, h_delta2;
+ uint8_t l_scalar1, l_scalar2;
+ uint8_t sign;
+ uint8_t index;
+ int ret;
+
+ gpadc = devm_kzalloc(&pdev->dev, sizeof *gpadc, GFP_KERNEL);
+ if (!gpadc)
+ return -ENOMEM;
+
+ gpadc->dev = &pdev->dev;
+ ret = misc_register(&tps80031_gpadc_device);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not register misc_device\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, gpadc);
+ mutex_init(&gpadc->lock);
+
+ for (index = 0; index < TPS80031_GPADC_MAX_CHANNELS; index++) {
+ if (~calibration_bit_map & (1 << index))
+ continue;
+
+ if (~scalar_bit_map & (1 << index)) {
+ ret = tps80031_reg_read(gpadc, SLAVE_ID2,
+ tps80031_trim[index].delta1_addr, &l_scalar1);
+ if (ret < 0)
+ goto err;
+ ret = tps80031_reg_read(gpadc, SLAVE_ID2,
+ tps80031_trim[index].delta2_addr, &l_scalar2);
+ if (ret < 0)
+ goto err;
+
+ l_scalar1 &= tps80031_trim[index].delta1_mask;
+ sign = l_scalar1 & 1;
+ scalar_delta1 = l_scalar1 >> 1;
+ if (sign)
+ scalar_delta1 = 0 - scalar_delta1;
+ l_scalar2 &= tps80031_trim[index].delta2_mask;
+ sign = l_scalar2 & 1;
+ scalar_delta2 = l_scalar2 >> 1;
+ if (sign)
+ scalar_delta2 = 0 - scalar_delta2;
+ } else {
+ scalar_delta1 = 0;
+ scalar_delta2 = 0;
+ }
+ ret = tps80031_reg_read(gpadc, SLAVE_ID2, TPS80031_GPADC_TRIM5,
+ &l_delta1);
+ if (ret < 0)
+ goto err;
+ ret = tps80031_reg_read(gpadc, SLAVE_ID2, TPS80031_GPADC_TRIM6,
+ &l_delta2);
+ if (ret < 0)
+ goto err;
+ ret = tps80031_reg_read(gpadc, SLAVE_ID2, TPS80031_GPADC_TRIM19,
+ &h_delta2);
+ if (ret < 0)
+ goto err;
+
+ sign = l_delta1 & 1;
+
+ delta_error1 = l_delta1 >> 1;
+ if (sign)
+ delta_error1 = (0 - delta_error1);
+ sign = l_delta2 & 1;
+
+ delta_error2 = (l_delta2 >> 1) | (h_delta2 << 7);
+ if (sign)
+ delta_error2 = (0 - delta_error2);
+ ideal_code1 = tps80031_ideal[index].code1 * 4;
+ ideal_code2 = tps80031_ideal[index].code2 * 4;
+
+ gain_error_1 = ((delta_error2 + scalar_delta2) -
+ (delta_error1 - scalar_delta1)) *
+ SCALE / (ideal_code2 - ideal_code1);
+ offset_error = (delta_error1 + scalar_delta1) *
+ SCALE - gain_error_1 * ideal_code1;
+
+ tps80031_calib_tbl[index].gain_error = gain_error_1 + SCALE;
+ tps80031_calib_tbl[index].offset_error = offset_error;
+ }
+
+ the_gpadc = gpadc;
+ ret = sysfs_create_group(&pdev->dev.kobj, &tps80031_gpadc_group);
+ if (ret) {
+ dev_err(&pdev->dev, "could not create sysfs files\n");
+ goto err;
+ }
+ return 0;
+err:
+ misc_deregister(&tps80031_gpadc_device);
+ return ret;
+}
+
+static int __devexit tps80031_gpadc_remove(struct platform_device *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &tps80031_gpadc_group);
+ misc_deregister(&tps80031_gpadc_device);
+ return 0;
+}
+
+static struct platform_driver tps80031_gpadc_driver = {
+ .probe = tps80031_gpadc_probe,
+ .remove = __devexit_p(tps80031_gpadc_remove),
+ .driver = {
+ .name = "tps80031-gpadc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tps80031_gpadc_init(void)
+{
+ return platform_driver_register(&tps80031_gpadc_driver);
+}
+
+module_init(tps80031_gpadc_init);
+
+static void __exit tps80031_gpadc_exit(void)
+{
+ platform_driver_unregister(&tps80031_gpadc_driver);
+}
+
+module_exit(tps80031_gpadc_exit);
+MODULE_ALIAS("platform:tps80031-gpadc");
+MODULE_DESCRIPTION("tps80031 ADC driver");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index b8eef462737a..a455771c1be0 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -58,6 +58,14 @@
#define DRIVER_NAME "twl"
+#if defined(CONFIG_TWL4030_BCI_BATTERY) || \
+ defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) || \
+ defined(CONFIG_TWL6030_BCI_BATTERY) || \
+ defined(CONFIG_TWL6030_BCI_BATTERY_MODULE)
+#define twl_has_bci() true
+#else
+#define twl_has_bci() false
+#endif
#if defined(CONFIG_KEYBOARD_TWL4030) || defined(CONFIG_KEYBOARD_TWL4030_MODULE)
#define twl_has_keypad() true
#else
@@ -77,7 +85,8 @@
#define twl_has_regulator() false
#endif
-#if defined(CONFIG_TWL4030_MADC) || defined(CONFIG_TWL4030_MADC_MODULE)
+#if defined(CONFIG_TWL4030_MADC) || defined(CONFIG_TWL4030_MADC_MODULE) ||\
+ defined(CONFIG_TWL6030_GPADC) || defined(CONFIG_TWL6030_GPADC_MODULE)
#define twl_has_madc() true
#else
#define twl_has_madc() false
@@ -125,7 +134,7 @@
/* Triton Core internal information (BEGIN) */
/* Last - for index max*/
-#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG
+#define TWL4030_MODULE_LAST TWL6025_MODULE_CHARGER
#define TWL_NUM_SLAVES 4
@@ -208,6 +217,11 @@
#define TWL6030_BASEADD_RSV 0x0000
#define TWL6030_BASEADD_ZERO 0x0000
+/* twl6030 SMPS EPROM values */
+#define TWL6030_SMPS_OFFSET 0xB0
+#define TWL6030_SMPS_MULT 0xB3
+
+
/* Few power values */
#define R_CFG_BOOT 0x05
@@ -218,6 +232,9 @@
#define HIGH_PERF_SQ (1 << 3)
#define CK32K_LOWPWR_EN (1 << 7)
+/* MPU80031 specific clock32 generation register */
+#define REG_CLK32KG_CFG_TRANS 0x8D
+#define REG_CLK32KG_CFG_STATE 0x8E
/* chip-specific feature flags, for i2c_device_id.driver_data */
#define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */
@@ -240,6 +257,33 @@ unsigned int twl_rev(void)
}
EXPORT_SYMBOL(twl_rev);
+static unsigned int twl_feat;
+unsigned int twl_features(void)
+{
+ return twl_feat;
+}
+EXPORT_SYMBOL(twl_features);
+
+u8 twl_get_smps_offset(void)
+{
+ u8 value;
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+ TWL6030_SMPS_OFFSET);
+ return value;
+}
+EXPORT_SYMBOL(twl_get_smps_offset);
+
+u8 twl_get_smps_mult(void)
+{
+ u8 value;
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+ TWL6030_SMPS_MULT);
+ return value;
+}
+EXPORT_SYMBOL(twl_get_smps_mult);
+
/* Structure for each TWL4030/TWL6030 Slave */
struct twl_client {
struct i2c_client *client;
@@ -490,6 +534,19 @@ int twl_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
}
EXPORT_SYMBOL(twl_i2c_read_u8);
+
+void twl_reg_dump(int module, int start, int end)
+{
+ int i;
+ u8 val;
+
+ for (i = start; i < end; i++) {
+ twl_i2c_read_u8(module, &val, i);
+ printk(KERN_ERR "reg 0x%2x val 0x%2x\n", i, val);
+ }
+}
+EXPORT_SYMBOL(twl_reg_dump);
+
/*----------------------------------------------------------------------*/
/**
@@ -660,8 +717,16 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
if (IS_ERR(child))
return PTR_ERR(child);
}
+ if (twl_has_bci() && pdata->bci &&
+ (features & TWL6030_CLASS)) {
+ child = add_child(1, "twl6030_bci",
+ pdata->bci, sizeof(*pdata->bci),
+ false,
+ pdata->irq_base + CHARGER_INTR_OFFSET,
+ pdata->irq_base + CHARGERFAULT_INTR_OFFSET);
+ }
- if (twl_has_madc() && pdata->madc) {
+ if (twl_has_madc() && pdata->madc && twl_class_is_4030()) {
child = add_child(2, "twl4030_madc",
pdata->madc, sizeof(*pdata->madc),
true, pdata->irq_base + MADC_INTR_OFFSET, 0);
@@ -669,6 +734,15 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
+ if (twl_has_madc() && pdata->madc && twl_class_is_6030()) {
+ child = add_child(1, "twl6030_gpadc",
+ pdata->madc, sizeof(*pdata->madc),
+ true, pdata->irq_base + MADC_INTR_OFFSET,
+ pdata->irq_base + GPADCSW_INTR_OFFSET);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
if (twl_has_rtc()) {
/*
* REVISIT platform_data here currently might expose the
@@ -1084,13 +1158,29 @@ static inline int __init unprotect_pm_master(void)
}
static void clocks_init(struct device *dev,
- struct twl4030_clock_init_data *clock)
+ struct twl4030_clock_init_data *clock,
+ unsigned long features)
{
int e = 0;
struct clk *osc;
u32 rate;
u8 ctrl = HFCLK_FREQ_26_MHZ;
+ if (features & MPU80031_SUBCLASS) {
+ if (clock && clock->clk32_active_state_on) {
+ e = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0x1,
+ REG_CLK32KG_CFG_TRANS);
+ if (!e)
+ e = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0x1,
+ REG_CLK32KG_CFG_STATE);
+ if (e) {
+ dev_err(dev, "Error in initialization"
+ " of 32K output\n");
+ return;
+ }
+ }
+ }
+
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
if (cpu_is_omap2430())
osc = clk_get(dev, "osc_ck");
@@ -1228,7 +1318,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
/* setup clock framework */
- clocks_init(&client->dev, pdata->clock);
+ clocks_init(&client->dev, pdata->clock, id->driver_data);
/* read TWL IDCODE Register */
if (twl_id == TWL4030_CLASS_ID) {
@@ -1269,7 +1359,16 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1);
}
+ twl_feat = id->driver_data;
+
status = add_children(pdata, id->driver_data);
+ if (status < 0 )
+ goto fail;
+
+ /* Board Specific Init Callback */
+ if(pdata->init)
+ status = pdata->init();
+
fail:
if (status < 0)
twl_remove(client);
@@ -1287,6 +1386,7 @@ static const struct i2c_device_id twl_ids[] = {
and vibrator. Charger in USB module*/
{ "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */
{ "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */
+ { "mpu80031", TWL6030_CLASS | TWL6025_SUBCLASS | MPU80031_SUBCLASS},
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(i2c, twl_ids);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8563e9ab148b..1be15a47c873 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -61,6 +61,10 @@ config AD525X_DPOT_SPI
To compile this driver as a module, choose M here: the
module will be called ad525x_dpot-spi.
+config ANDROID_PMEM
+ bool "Android pmem allocator"
+ default y
+
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
@@ -392,6 +396,22 @@ config HMC6352
This driver provides support for the Honeywell HMC6352 compass,
providing configuration and heading data via sysfs.
+config SENSORS_AK8975
+ tristate "AK8975 compass support"
+ default n
+ depends on I2C
+ help
+ If you say yes here you get support for Asahi Kasei's
+ orientation sensor AK8975.
+
+config SENSORS_NCT1008
+ tristate "ON Semiconductor Temperature Sensor"
+ default n
+ depends on I2C
+ help
+ Say yes here if you wish to include the ON Semiconductor
+ NCT1008 Temperature sensor.
+
config EP93XX_PWM
tristate "EP93xx PWM support"
depends on ARCH_EP93XX
@@ -435,6 +455,10 @@ config TI_DAC7512
This driver can also be built as a module. If so, the module
will be called ti_dac7512.
+config UID_STAT
+ bool "UID based statistics tracking exported to /proc/uid_stat"
+ default n
+
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on X86
@@ -500,6 +524,48 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+config WL127X_RFKILL
+ tristate "Bluetooth power control driver for TI wl127x"
+ depends on RFKILL
+ default n
+ ---help---
+ Creates an rfkill entry in sysfs for power control of Bluetooth
+ TI wl127x chips.
+
+config APANIC
+ bool "Android kernel panic diagnostics driver"
+ default n
+ ---help---
+ Driver which handles kernel panics and attempts to write
+ critical debugging data to flash.
+
+config APANIC_PLABEL
+ string "Android panic dump flash partition label"
+ depends on APANIC
+ default "kpanic"
+ ---help---
+ If your platform uses a different flash partition label for storing
+ crashdumps, enter it here.
+
+config BCM4329_RFKILL
+ bool "Enable BCM4329 RFKILL driver"
+ default n
+ ---help---
+ Adds BCM4329 RFKILL driver for Broadcom BCM4329 chipset
+
+config TEGRA_CRYPTO_DEV
+ bool "Device node to access tegra aes hardware"
+ ---help---
+ Dev node /dev/tegra-crypto in order to get access to tegra aes
+ hardware from user space
+
+config MAX1749_VIBRATOR
+ bool "MAX1749 vibrator device driver"
+ depends on ANDROID_TIMED_OUTPUT
+ default n
+ ---help---
+ Adds a timed output vibrator device node for MAX1749 vibrator motor
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -507,5 +573,7 @@ source "drivers/misc/iwmc3200top/Kconfig"
source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
+source "drivers/misc/inv_mpu/Kconfig"
+source "drivers/misc/tegra-baseband/Kconfig"
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8f3efb68a141..6c38d32eff22 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for misc devices that really don't fit anywhere else.
#
+GCOV_PROFILE_tegra-cryptodev.o := y
obj-$(CONFIG_IBM_ASM) += ibmasm/
obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
@@ -13,12 +14,13 @@ obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_BMP085) += bmp085.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm.o
-obj-$(CONFIG_TIFM_CORE) += tifm_core.o
-obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
+obj-$(CONFIG_TIFM_CORE) += tifm_core.o
+obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
+obj-$(CONFIG_ANDROID_PMEM) += pmem.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
@@ -33,6 +35,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
+obj-$(CONFIG_UID_STAT) += uid_stat.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-$(CONFIG_HMC6352) += hmc6352.o
@@ -47,3 +50,12 @@ obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
+obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
+obj-$(CONFIG_APANIC) += apanic.o
+obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
+obj-$(CONFIG_SENSORS_NCT1008) += nct1008.o
+obj-$(CONFIG_BCM4329_RFKILL) += bcm4329_rfkill.o
+obj-$(CONFIG_MPU_SENSORS_MPU3050) += inv_mpu/
+obj-$(CONFIG_TEGRA_CRYPTO_DEV) += tegra-cryptodev.o
+obj-$(CONFIG_TEGRA_BB_SUPPORT) += tegra-baseband/
+obj-$(CONFIG_MAX1749_VIBRATOR) += max1749.o
diff --git a/drivers/misc/akm8975.c b/drivers/misc/akm8975.c
new file mode 100644
index 000000000000..aef7985d4ce4
--- /dev/null
+++ b/drivers/misc/akm8975.c
@@ -0,0 +1,732 @@
+/* drivers/misc/akm8975.c - akm8975 compass driver
+ *
+ * Copyright (C) 2007-2008 HTC Corporation.
+ * Author: Hou-Kun Chen <houkun.chen@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Revised by AKM 2009/04/02
+ * Revised by Motorola 2010/05/27
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/freezer.h>
+#include <linux/akm8975.h>
+#include <linux/earlysuspend.h>
+
+#define AK8975DRV_CALL_DBG 0
+#if AK8975DRV_CALL_DBG
+#define FUNCDBG(msg) pr_err("%s:%s\n", __func__, msg);
+#else
+#define FUNCDBG(msg)
+#endif
+
+#define AK8975DRV_DATA_DBG 0
+#define MAX_FAILURE_COUNT 10
+
+struct akm8975_data {
+ struct i2c_client *this_client;
+ struct akm8975_platform_data *pdata;
+ struct input_dev *input_dev;
+ struct work_struct work;
+ struct mutex flags_lock;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+};
+
+/*
+* Because misc devices can not carry a pointer from driver register to
+* open, we keep this global. This limits the driver to a single instance.
+*/
+struct akm8975_data *akmd_data;
+
+static DECLARE_WAIT_QUEUE_HEAD(open_wq);
+
+static atomic_t open_flag;
+
+static short m_flag;
+static short a_flag;
+static short t_flag;
+static short mv_flag;
+
+static short akmd_delay;
+
+static ssize_t akm8975_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ return sprintf(buf, "%u\n", i2c_smbus_read_byte_data(client,
+ AK8975_REG_CNTL));
+}
+static ssize_t akm8975_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ strict_strtoul(buf, 10, &val);
+ if (val > 0xff)
+ return -EINVAL;
+ i2c_smbus_write_byte_data(client, AK8975_REG_CNTL, val);
+ return count;
+}
+static DEVICE_ATTR(akm_ms1, S_IWUSR | S_IRUGO, akm8975_show, akm8975_store);
+
+static int akm8975_i2c_rxdata(struct akm8975_data *akm, char *buf, int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = akm->this_client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = akm->this_client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = buf,
+ },
+ };
+
+ FUNCDBG("called");
+
+ if (i2c_transfer(akm->this_client->adapter, msgs, 2) < 0) {
+ pr_err("akm8975_i2c_rxdata: transfer error\n");
+ return EIO;
+ } else
+ return 0;
+}
+
+static int akm8975_i2c_txdata(struct akm8975_data *akm, char *buf, int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = akm->this_client->addr,
+ .flags = 0,
+ .len = length,
+ .buf = buf,
+ },
+ };
+
+ FUNCDBG("called");
+
+ if (i2c_transfer(akm->this_client->adapter, msgs, 1) < 0) {
+ pr_err("akm8975_i2c_txdata: transfer error\n");
+ return -EIO;
+ } else
+ return 0;
+}
+
+static void akm8975_ecs_report_value(struct akm8975_data *akm, short *rbuf)
+{
+ struct akm8975_data *data = i2c_get_clientdata(akm->this_client);
+
+ FUNCDBG("called");
+
+#if AK8975DRV_DATA_DBG
+ pr_info("akm8975_ecs_report_value: yaw = %d, pitch = %d, roll = %d\n",
+ rbuf[0], rbuf[1], rbuf[2]);
+ pr_info("tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], rbuf[4], rbuf[5]);
+ pr_info("Acceleration: x = %d LSB, y = %d LSB, z = %d LSB\n",
+ rbuf[6], rbuf[7], rbuf[8]);
+ pr_info("Magnetic: x = %d LSB, y = %d LSB, z = %d LSB\n\n",
+ rbuf[9], rbuf[10], rbuf[11]);
+#endif
+ mutex_lock(&akm->flags_lock);
+ /* Report magnetic sensor information */
+ if (m_flag) {
+ input_report_abs(data->input_dev, ABS_RX, rbuf[0]);
+ input_report_abs(data->input_dev, ABS_RY, rbuf[1]);
+ input_report_abs(data->input_dev, ABS_RZ, rbuf[2]);
+ input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]);
+ }
+
+ /* Report acceleration sensor information */
+ if (a_flag) {
+ input_report_abs(data->input_dev, ABS_X, rbuf[6]);
+ input_report_abs(data->input_dev, ABS_Y, rbuf[7]);
+ input_report_abs(data->input_dev, ABS_Z, rbuf[8]);
+ input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]);
+ }
+
+ /* Report temperature information */
+ if (t_flag)
+ input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]);
+
+ if (mv_flag) {
+ input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]);
+ input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]);
+ input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]);
+ }
+ mutex_unlock(&akm->flags_lock);
+
+ input_sync(data->input_dev);
+}
+
+static void akm8975_ecs_close_done(struct akm8975_data *akm)
+{
+ FUNCDBG("called");
+ mutex_lock(&akm->flags_lock);
+ m_flag = 1;
+ a_flag = 1;
+ t_flag = 1;
+ mv_flag = 1;
+ mutex_unlock(&akm->flags_lock);
+}
+
+static int akm_aot_open(struct inode *inode, struct file *file)
+{
+ int ret = -1;
+
+ FUNCDBG("called");
+ if (atomic_cmpxchg(&open_flag, 0, 1) == 0) {
+ wake_up(&open_wq);
+ ret = 0;
+ }
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ file->private_data = akmd_data;
+
+ return ret;
+}
+
+static int akm_aot_release(struct inode *inode, struct file *file)
+{
+ FUNCDBG("called");
+ atomic_set(&open_flag, 0);
+ wake_up(&open_wq);
+ return 0;
+}
+
+static int akm_aot_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+ short flag;
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+
+ switch (cmd) {
+ case ECS_IOCTL_APP_SET_MFLAG:
+ case ECS_IOCTL_APP_SET_AFLAG:
+ case ECS_IOCTL_APP_SET_MVFLAG:
+ if (copy_from_user(&flag, argp, sizeof(flag)))
+ return -EFAULT;
+ if (flag < 0 || flag > 1)
+ return -EINVAL;
+ break;
+ case ECS_IOCTL_APP_SET_DELAY:
+ if (copy_from_user(&flag, argp, sizeof(flag)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ mutex_lock(&akm->flags_lock);
+ switch (cmd) {
+ case ECS_IOCTL_APP_SET_MFLAG:
+ m_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_MFLAG:
+ flag = m_flag;
+ break;
+ case ECS_IOCTL_APP_SET_AFLAG:
+ a_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_AFLAG:
+ flag = a_flag;
+ break;
+ case ECS_IOCTL_APP_SET_MVFLAG:
+ mv_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_MVFLAG:
+ flag = mv_flag;
+ break;
+ case ECS_IOCTL_APP_SET_DELAY:
+ akmd_delay = flag;
+ break;
+ case ECS_IOCTL_APP_GET_DELAY:
+ flag = akmd_delay;
+ break;
+ default:
+ return -ENOTTY;
+ }
+ mutex_unlock(&akm->flags_lock);
+
+ switch (cmd) {
+ case ECS_IOCTL_APP_GET_MFLAG:
+ case ECS_IOCTL_APP_GET_AFLAG:
+ case ECS_IOCTL_APP_GET_MVFLAG:
+ case ECS_IOCTL_APP_GET_DELAY:
+ if (copy_to_user(argp, &flag, sizeof(flag)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int akmd_open(struct inode *inode, struct file *file)
+{
+ int err = 0;
+
+ FUNCDBG("called");
+ err = nonseekable_open(inode, file);
+ if (err)
+ return err;
+
+ file->private_data = akmd_data;
+ return 0;
+}
+
+static int akmd_release(struct inode *inode, struct file *file)
+{
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+ akm8975_ecs_close_done(akm);
+ return 0;
+}
+
+static int akmd_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+
+ char rwbuf[16];
+ int ret = -1;
+ int status;
+ short value[12];
+ short delay;
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ case ECS_IOCTL_WRITE:
+ if (copy_from_user(&rwbuf, argp, sizeof(rwbuf)))
+ return -EFAULT;
+ break;
+
+ case ECS_IOCTL_SET_YPR:
+ if (copy_from_user(&value, argp, sizeof(value)))
+ return -EFAULT;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ if (rwbuf[0] < 1)
+ return -EINVAL;
+
+ ret = akm8975_i2c_rxdata(akm, &rwbuf[1], rwbuf[0]);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case ECS_IOCTL_WRITE:
+ if (rwbuf[0] < 2)
+ return -EINVAL;
+
+ ret = akm8975_i2c_txdata(akm, &rwbuf[1], rwbuf[0]);
+ if (ret < 0)
+ return ret;
+ break;
+ case ECS_IOCTL_SET_YPR:
+ akm8975_ecs_report_value(akm, value);
+ break;
+
+ case ECS_IOCTL_GET_OPEN_STATUS:
+ wait_event_interruptible(open_wq,
+ (atomic_read(&open_flag) != 0));
+ status = atomic_read(&open_flag);
+ break;
+ case ECS_IOCTL_GET_CLOSE_STATUS:
+ wait_event_interruptible(open_wq,
+ (atomic_read(&open_flag) == 0));
+ status = atomic_read(&open_flag);
+ break;
+
+ case ECS_IOCTL_GET_DELAY:
+ delay = akmd_delay;
+ break;
+
+ default:
+ FUNCDBG("Unknown cmd\n");
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ if (copy_to_user(argp, &rwbuf, sizeof(rwbuf)))
+ return -EFAULT;
+ break;
+ case ECS_IOCTL_GET_OPEN_STATUS:
+ case ECS_IOCTL_GET_CLOSE_STATUS:
+ if (copy_to_user(argp, &status, sizeof(status)))
+ return -EFAULT;
+ break;
+ case ECS_IOCTL_GET_DELAY:
+ if (copy_to_user(argp, &delay, sizeof(delay)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* needed to clear the int. pin */
+static void akm_work_func(struct work_struct *work)
+{
+ struct akm8975_data *akm =
+ container_of(work, struct akm8975_data, work);
+
+ FUNCDBG("called");
+ enable_irq(akm->this_client->irq);
+}
+
+static irqreturn_t akm8975_interrupt(int irq, void *dev_id)
+{
+ struct akm8975_data *akm = dev_id;
+ FUNCDBG("called");
+
+ disable_irq_nosync(akm->this_client->irq);
+ schedule_work(&akm->work);
+ return IRQ_HANDLED;
+}
+
+static int akm8975_power_off(struct akm8975_data *akm)
+{
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ if (akm->pdata->power_off)
+ akm->pdata->power_off();
+
+ return 0;
+}
+
+static int akm8975_power_on(struct akm8975_data *akm)
+{
+ int err;
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ if (akm->pdata->power_on) {
+ err = akm->pdata->power_on();
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int akm8975_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ /* TO DO: might need more work after power mgmt
+ is enabled */
+ return akm8975_power_off(akm);
+}
+
+static int akm8975_resume(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ /* TO DO: might need more work after power mgmt
+ is enabled */
+ return akm8975_power_on(akm);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void akm8975_early_suspend(struct early_suspend *handler)
+{
+ struct akm8975_data *akm;
+ akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ akm8975_suspend(akm->this_client, PMSG_SUSPEND);
+}
+
+static void akm8975_early_resume(struct early_suspend *handler)
+{
+ struct akm8975_data *akm;
+ akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ akm8975_resume(akm->this_client);
+}
+#endif
+
+
+static int akm8975_init_client(struct i2c_client *client)
+{
+ struct akm8975_data *data;
+ int ret;
+
+ data = i2c_get_clientdata(client);
+
+ ret = request_irq(client->irq, akm8975_interrupt, IRQF_TRIGGER_RISING,
+ "akm8975", data);
+
+ if (ret < 0) {
+ pr_err("akm8975_init_client: request irq failed\n");
+ goto err;
+ }
+
+ init_waitqueue_head(&open_wq);
+
+ mutex_lock(&data->flags_lock);
+ m_flag = 1;
+ a_flag = 1;
+ t_flag = 1;
+ mv_flag = 1;
+ mutex_unlock(&data->flags_lock);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct file_operations akmd_fops = {
+ .owner = THIS_MODULE,
+ .open = akmd_open,
+ .release = akmd_release,
+ .unlocked_ioctl = akmd_ioctl,
+};
+
+static const struct file_operations akm_aot_fops = {
+ .owner = THIS_MODULE,
+ .open = akm_aot_open,
+ .release = akm_aot_release,
+ .unlocked_ioctl = akm_aot_ioctl,
+};
+
+static struct miscdevice akm_aot_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "akm8975_aot",
+ .fops = &akm_aot_fops,
+};
+
+static struct miscdevice akmd_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "akm8975_dev",
+ .fops = &akmd_fops,
+};
+
+int akm8975_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct akm8975_data *akm;
+ int err;
+ FUNCDBG("called");
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_platform_data_null;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_check_functionality_failed;
+ }
+
+ akm = kzalloc(sizeof(struct akm8975_data), GFP_KERNEL);
+ if (!akm) {
+ dev_err(&client->dev,
+ "failed to allocate memory for module data\n");
+ err = -ENOMEM;
+ goto exit_alloc_data_failed;
+ }
+
+ akm->pdata = client->dev.platform_data;
+
+ mutex_init(&akm->flags_lock);
+ INIT_WORK(&akm->work, akm_work_func);
+ i2c_set_clientdata(client, akm);
+
+ err = akm8975_power_on(akm);
+ if (err < 0)
+ goto exit_power_on_failed;
+
+ akm8975_init_client(client);
+ akm->this_client = client;
+ akmd_data = akm;
+
+ akm->input_dev = input_allocate_device();
+ if (!akm->input_dev) {
+ err = -ENOMEM;
+ dev_err(&akm->this_client->dev,
+ "input device allocate failed\n");
+ goto exit_input_dev_alloc_failed;
+ }
+
+ set_bit(EV_ABS, akm->input_dev->evbit);
+
+ /* yaw */
+ input_set_abs_params(akm->input_dev, ABS_RX, 0, 23040, 0, 0);
+ /* pitch */
+ input_set_abs_params(akm->input_dev, ABS_RY, -11520, 11520, 0, 0);
+ /* roll */
+ input_set_abs_params(akm->input_dev, ABS_RZ, -5760, 5760, 0, 0);
+ /* x-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_X, -5760, 5760, 0, 0);
+ /* y-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_Y, -5760, 5760, 0, 0);
+ /* z-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_Z, -5760, 5760, 0, 0);
+ /* temparature */
+ input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0);
+ /* status of magnetic sensor */
+ input_set_abs_params(akm->input_dev, ABS_RUDDER, 0, 3, 0, 0);
+ /* status of acceleration sensor */
+ input_set_abs_params(akm->input_dev, ABS_WHEEL, 0, 3, 0, 0);
+ /* x-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_HAT0X, -20480, 20479, 0, 0);
+ /* y-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_HAT0Y, -20480, 20479, 0, 0);
+ /* z-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_BRAKE, -20480, 20479, 0, 0);
+
+ akm->input_dev->name = "compass";
+
+ err = input_register_device(akm->input_dev);
+ if (err) {
+ pr_err("akm8975_probe: Unable to register input device: %s\n",
+ akm->input_dev->name);
+ goto exit_input_register_device_failed;
+ }
+
+ err = misc_register(&akmd_device);
+ if (err) {
+ pr_err("akm8975_probe: akmd_device register failed\n");
+ goto exit_misc_device_register_failed;
+ }
+
+ err = misc_register(&akm_aot_device);
+ if (err) {
+ pr_err("akm8975_probe: akm_aot_device register failed\n");
+ goto exit_misc_device_register_failed;
+ }
+
+ err = device_create_file(&client->dev, &dev_attr_akm_ms1);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ akm->early_suspend.suspend = akm8975_early_suspend;
+ akm->early_suspend.resume = akm8975_early_resume;
+ register_early_suspend(&akm->early_suspend);
+#endif
+ return 0;
+
+exit_misc_device_register_failed:
+exit_input_register_device_failed:
+ input_free_device(akm->input_dev);
+exit_input_dev_alloc_failed:
+ akm8975_power_off(akm);
+exit_power_on_failed:
+ kfree(akm);
+exit_alloc_data_failed:
+exit_check_functionality_failed:
+exit_platform_data_null:
+ return err;
+}
+
+static int __devexit akm8975_remove(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+ FUNCDBG("called");
+ free_irq(client->irq, NULL);
+ input_unregister_device(akm->input_dev);
+ misc_deregister(&akmd_device);
+ misc_deregister(&akm_aot_device);
+ akm8975_power_off(akm);
+ kfree(akm);
+ return 0;
+}
+
+static const struct i2c_device_id akm8975_id[] = {
+ { "akm8975", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, akm8975_id);
+
+static struct i2c_driver akm8975_driver = {
+ .probe = akm8975_probe,
+ .remove = akm8975_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .resume = akm8975_resume,
+ .suspend = akm8975_suspend,
+#endif
+ .id_table = akm8975_id,
+ .driver = {
+ .name = "akm8975",
+ },
+};
+
+static int __init akm8975_init(void)
+{
+ pr_info("AK8975 compass driver: init\n");
+ FUNCDBG("AK8975 compass driver: init\n");
+ return i2c_add_driver(&akm8975_driver);
+}
+
+static void __exit akm8975_exit(void)
+{
+ FUNCDBG("AK8975 compass driver: exit\n");
+ i2c_del_driver(&akm8975_driver);
+}
+
+module_init(akm8975_init);
+module_exit(akm8975_exit);
+
+MODULE_AUTHOR("Hou-Kun Chen <hk_chen@htc.com>");
+MODULE_DESCRIPTION("AK8975 compass driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/apanic.c b/drivers/misc/apanic.c
new file mode 100644
index 000000000000..ca875f89da7a
--- /dev/null
+++ b/drivers/misc/apanic.c
@@ -0,0 +1,606 @@
+/* drivers/misc/apanic.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+#include <linux/notifier.h>
+#include <linux/mtd/mtd.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+
+extern void ram_console_enable_console(int);
+
+struct panic_header {
+ u32 magic;
+#define PANIC_MAGIC 0xdeadf00d
+
+ u32 version;
+#define PHDR_VERSION 0x01
+
+ u32 console_offset;
+ u32 console_length;
+
+ u32 threads_offset;
+ u32 threads_length;
+};
+
+struct apanic_data {
+ struct mtd_info *mtd;
+ struct panic_header curr;
+ void *bounce;
+ struct proc_dir_entry *apanic_console;
+ struct proc_dir_entry *apanic_threads;
+};
+
+static struct apanic_data drv_ctx;
+static struct work_struct proc_removal_work;
+static DEFINE_MUTEX(drv_mutex);
+
+static unsigned int *apanic_bbt;
+static unsigned int apanic_erase_blocks;
+static unsigned int apanic_good_blocks;
+
+static void set_bb(unsigned int block, unsigned int *bbt)
+{
+ unsigned int flag = 1;
+
+ BUG_ON(block >= apanic_erase_blocks);
+
+ flag = flag << (block%32);
+ apanic_bbt[block/32] |= flag;
+ apanic_good_blocks--;
+}
+
+static unsigned int get_bb(unsigned int block, unsigned int *bbt)
+{
+ unsigned int flag;
+
+ BUG_ON(block >= apanic_erase_blocks);
+
+ flag = 1 << (block%32);
+ return apanic_bbt[block/32] & flag;
+}
+
+static void alloc_bbt(struct mtd_info *mtd, unsigned int *bbt)
+{
+ int bbt_size;
+ apanic_erase_blocks = (mtd->size)>>(mtd->erasesize_shift);
+ bbt_size = (apanic_erase_blocks+32)/32;
+
+ apanic_bbt = kmalloc(bbt_size*4, GFP_KERNEL);
+ memset(apanic_bbt, 0, bbt_size*4);
+ apanic_good_blocks = apanic_erase_blocks;
+}
+static void scan_bbt(struct mtd_info *mtd, unsigned int *bbt)
+{
+ int i;
+
+ for (i = 0; i < apanic_erase_blocks; i++) {
+ if (mtd->block_isbad(mtd, i*mtd->erasesize))
+ set_bb(i, apanic_bbt);
+ }
+}
+
+#define APANIC_INVALID_OFFSET 0xFFFFFFFF
+
+static unsigned int phy_offset(struct mtd_info *mtd, unsigned int offset)
+{
+ unsigned int logic_block = offset>>(mtd->erasesize_shift);
+ unsigned int phy_block;
+ unsigned good_block = 0;
+
+ for (phy_block = 0; phy_block < apanic_erase_blocks; phy_block++) {
+ if (!get_bb(phy_block, apanic_bbt))
+ good_block++;
+ if (good_block == (logic_block + 1))
+ break;
+ }
+
+ if (good_block != (logic_block + 1))
+ return APANIC_INVALID_OFFSET;
+
+ return offset + ((phy_block-logic_block)<<mtd->erasesize_shift);
+}
+
+static void apanic_erase_callback(struct erase_info *done)
+{
+ wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv;
+ wake_up(wait_q);
+}
+
+static int apanic_proc_read(char *buffer, char **start, off_t offset,
+ int count, int *peof, void *dat)
+{
+ struct apanic_data *ctx = &drv_ctx;
+ size_t file_length;
+ off_t file_offset;
+ unsigned int page_no;
+ off_t page_offset;
+ int rc;
+ size_t len;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&drv_mutex);
+
+ switch ((int) dat) {
+ case 1: /* apanic_console */
+ file_length = ctx->curr.console_length;
+ file_offset = ctx->curr.console_offset;
+ break;
+ case 2: /* apanic_threads */
+ file_length = ctx->curr.threads_length;
+ file_offset = ctx->curr.threads_offset;
+ break;
+ default:
+ pr_err("Bad dat (%d)\n", (int) dat);
+ mutex_unlock(&drv_mutex);
+ return -EINVAL;
+ }
+
+ if ((offset + count) > file_length) {
+ mutex_unlock(&drv_mutex);
+ return 0;
+ }
+
+ /* We only support reading a maximum of a flash page */
+ if (count > ctx->mtd->writesize)
+ count = ctx->mtd->writesize;
+
+ page_no = (file_offset + offset) / ctx->mtd->writesize;
+ page_offset = (file_offset + offset) % ctx->mtd->writesize;
+
+
+ if (phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize))
+ == APANIC_INVALID_OFFSET) {
+ pr_err("apanic: reading an invalid address\n");
+ mutex_unlock(&drv_mutex);
+ return -EINVAL;
+ }
+ rc = ctx->mtd->read(ctx->mtd,
+ phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)),
+ ctx->mtd->writesize,
+ &len, ctx->bounce);
+
+ if (page_offset)
+ count -= page_offset;
+ memcpy(buffer, ctx->bounce + page_offset, count);
+
+ *start = count;
+
+ if ((offset + count) == file_length)
+ *peof = 1;
+
+ mutex_unlock(&drv_mutex);
+ return count;
+}
+
+static void mtd_panic_erase(void)
+{
+ struct apanic_data *ctx = &drv_ctx;
+ struct erase_info erase;
+ DECLARE_WAITQUEUE(wait, current);
+ wait_queue_head_t wait_q;
+ int rc, i;
+
+ init_waitqueue_head(&wait_q);
+ erase.mtd = ctx->mtd;
+ erase.callback = apanic_erase_callback;
+ erase.len = ctx->mtd->erasesize;
+ erase.priv = (u_long)&wait_q;
+ for (i = 0; i < ctx->mtd->size; i += ctx->mtd->erasesize) {
+ erase.addr = i;
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&wait_q, &wait);
+
+ if (get_bb(erase.addr>>ctx->mtd->erasesize_shift, apanic_bbt)) {
+ printk(KERN_WARNING
+ "apanic: Skipping erase of bad "
+ "block @%llx\n", erase.addr);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&wait_q, &wait);
+ continue;
+ }
+
+ rc = ctx->mtd->erase(ctx->mtd, &erase);
+ if (rc) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&wait_q, &wait);
+ printk(KERN_ERR
+ "apanic: Erase of 0x%llx, 0x%llx failed\n",
+ (unsigned long long) erase.addr,
+ (unsigned long long) erase.len);
+ if (rc == -EIO) {
+ if (ctx->mtd->block_markbad(ctx->mtd,
+ erase.addr)) {
+ printk(KERN_ERR
+ "apanic: Err marking blk bad\n");
+ goto out;
+ }
+ printk(KERN_INFO
+ "apanic: Marked a bad block"
+ " @%llx\n", erase.addr);
+ set_bb(erase.addr>>ctx->mtd->erasesize_shift,
+ apanic_bbt);
+ continue;
+ }
+ goto out;
+ }
+ schedule();
+ remove_wait_queue(&wait_q, &wait);
+ }
+ printk(KERN_DEBUG "apanic: %s partition erased\n",
+ CONFIG_APANIC_PLABEL);
+out:
+ return;
+}
+
+static void apanic_remove_proc_work(struct work_struct *work)
+{
+ struct apanic_data *ctx = &drv_ctx;
+
+ mutex_lock(&drv_mutex);
+ mtd_panic_erase();
+ memset(&ctx->curr, 0, sizeof(struct panic_header));
+ if (ctx->apanic_console) {
+ remove_proc_entry("apanic_console", NULL);
+ ctx->apanic_console = NULL;
+ }
+ if (ctx->apanic_threads) {
+ remove_proc_entry("apanic_threads", NULL);
+ ctx->apanic_threads = NULL;
+ }
+ mutex_unlock(&drv_mutex);
+}
+
+static int apanic_proc_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ schedule_work(&proc_removal_work);
+ return count;
+}
+
+static void mtd_panic_notify_add(struct mtd_info *mtd)
+{
+ struct apanic_data *ctx = &drv_ctx;
+ struct panic_header *hdr = ctx->bounce;
+ size_t len;
+ int rc;
+ int proc_entry_created = 0;
+
+ if (strcmp(mtd->name, CONFIG_APANIC_PLABEL))
+ return;
+
+ ctx->mtd = mtd;
+
+ alloc_bbt(mtd, apanic_bbt);
+ scan_bbt(mtd, apanic_bbt);
+
+ if (apanic_good_blocks == 0) {
+ printk(KERN_ERR "apanic: no any good blocks?!\n");
+ goto out_err;
+ }
+
+ rc = mtd->read(mtd, phy_offset(mtd, 0), mtd->writesize,
+ &len, ctx->bounce);
+ if (rc && rc == -EBADMSG) {
+ printk(KERN_WARNING
+ "apanic: Bad ECC on block 0 (ignored)\n");
+ } else if (rc && rc != -EUCLEAN) {
+ printk(KERN_ERR "apanic: Error reading block 0 (%d)\n", rc);
+ goto out_err;
+ }
+
+ if (len != mtd->writesize) {
+ printk(KERN_ERR "apanic: Bad read size (%d)\n", rc);
+ goto out_err;
+ }
+
+ printk(KERN_INFO "apanic: Bound to mtd partition '%s'\n", mtd->name);
+
+ if (hdr->magic != PANIC_MAGIC) {
+ printk(KERN_INFO "apanic: No panic data available\n");
+ mtd_panic_erase();
+ return;
+ }
+
+ if (hdr->version != PHDR_VERSION) {
+ printk(KERN_INFO "apanic: Version mismatch (%d != %d)\n",
+ hdr->version, PHDR_VERSION);
+ mtd_panic_erase();
+ return;
+ }
+
+ memcpy(&ctx->curr, hdr, sizeof(struct panic_header));
+
+ printk(KERN_INFO "apanic: c(%u, %u) t(%u, %u)\n",
+ hdr->console_offset, hdr->console_length,
+ hdr->threads_offset, hdr->threads_length);
+
+ if (hdr->console_length) {
+ ctx->apanic_console = create_proc_entry("apanic_console",
+ S_IFREG | S_IRUGO, NULL);
+ if (!ctx->apanic_console)
+ printk(KERN_ERR "%s: failed creating procfile\n",
+ __func__);
+ else {
+ ctx->apanic_console->read_proc = apanic_proc_read;
+ ctx->apanic_console->write_proc = apanic_proc_write;
+ ctx->apanic_console->size = hdr->console_length;
+ ctx->apanic_console->data = (void *) 1;
+ proc_entry_created = 1;
+ }
+ }
+
+ if (hdr->threads_length) {
+ ctx->apanic_threads = create_proc_entry("apanic_threads",
+ S_IFREG | S_IRUGO, NULL);
+ if (!ctx->apanic_threads)
+ printk(KERN_ERR "%s: failed creating procfile\n",
+ __func__);
+ else {
+ ctx->apanic_threads->read_proc = apanic_proc_read;
+ ctx->apanic_threads->write_proc = apanic_proc_write;
+ ctx->apanic_threads->size = hdr->threads_length;
+ ctx->apanic_threads->data = (void *) 2;
+ proc_entry_created = 1;
+ }
+ }
+
+ if (!proc_entry_created)
+ mtd_panic_erase();
+
+ return;
+out_err:
+ ctx->mtd = NULL;
+}
+
+static void mtd_panic_notify_remove(struct mtd_info *mtd)
+{
+ struct apanic_data *ctx = &drv_ctx;
+ if (mtd == ctx->mtd) {
+ ctx->mtd = NULL;
+ printk(KERN_INFO "apanic: Unbound from %s\n", mtd->name);
+ }
+}
+
+static struct mtd_notifier mtd_panic_notifier = {
+ .add = mtd_panic_notify_add,
+ .remove = mtd_panic_notify_remove,
+};
+
+static int in_panic = 0;
+
+static int apanic_writeflashpage(struct mtd_info *mtd, loff_t to,
+ const u_char *buf)
+{
+ int rc;
+ size_t wlen;
+ int panic = in_interrupt() | in_atomic();
+
+ if (panic && !mtd->panic_write) {
+ printk(KERN_EMERG "%s: No panic_write available\n", __func__);
+ return 0;
+ } else if (!panic && !mtd->write) {
+ printk(KERN_EMERG "%s: No write available\n", __func__);
+ return 0;
+ }
+
+ to = phy_offset(mtd, to);
+ if (to == APANIC_INVALID_OFFSET) {
+ printk(KERN_EMERG "apanic: write to invalid address\n");
+ return 0;
+ }
+
+ if (panic)
+ rc = mtd->panic_write(mtd, to, mtd->writesize, &wlen, buf);
+ else
+ rc = mtd->write(mtd, to, mtd->writesize, &wlen, buf);
+
+ if (rc) {
+ printk(KERN_EMERG
+ "%s: Error writing data to flash (%d)\n",
+ __func__, rc);
+ return rc;
+ }
+
+ return wlen;
+}
+
+extern int log_buf_copy(char *dest, int idx, int len);
+extern void log_buf_clear(void);
+
+/*
+ * Writes the contents of the console to the specified offset in flash.
+ * Returns number of bytes written
+ */
+static int apanic_write_console(struct mtd_info *mtd, unsigned int off)
+{
+ struct apanic_data *ctx = &drv_ctx;
+ int saved_oip;
+ int idx = 0;
+ int rc, rc2;
+ unsigned int last_chunk = 0;
+
+ while (!last_chunk) {
+ saved_oip = oops_in_progress;
+ oops_in_progress = 1;
+ rc = log_buf_copy(ctx->bounce, idx, mtd->writesize);
+ if (rc < 0)
+ break;
+
+ if (rc != mtd->writesize)
+ last_chunk = rc;
+
+ oops_in_progress = saved_oip;
+ if (rc <= 0)
+ break;
+ if (rc != mtd->writesize)
+ memset(ctx->bounce + rc, 0, mtd->writesize - rc);
+
+ rc2 = apanic_writeflashpage(mtd, off, ctx->bounce);
+ if (rc2 <= 0) {
+ printk(KERN_EMERG
+ "apanic: Flash write failed (%d)\n", rc2);
+ return idx;
+ }
+ if (!last_chunk)
+ idx += rc2;
+ else
+ idx += last_chunk;
+ off += rc2;
+ }
+ return idx;
+}
+
+static int apanic(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct apanic_data *ctx = &drv_ctx;
+ struct panic_header *hdr = (struct panic_header *) ctx->bounce;
+ int console_offset = 0;
+ int console_len = 0;
+ int threads_offset = 0;
+ int threads_len = 0;
+ int rc;
+
+ if (in_panic)
+ return NOTIFY_DONE;
+ in_panic = 1;
+#ifdef CONFIG_PREEMPT
+ /* Ensure that cond_resched() won't try to preempt anybody */
+ add_preempt_count(PREEMPT_ACTIVE);
+#endif
+ touch_softlockup_watchdog();
+
+ if (!ctx->mtd)
+ goto out;
+
+ if (ctx->curr.magic) {
+ printk(KERN_EMERG "Crash partition in use!\n");
+ goto out;
+ }
+ console_offset = ctx->mtd->writesize;
+
+ /*
+ * Write out the console
+ */
+ console_len = apanic_write_console(ctx->mtd, console_offset);
+ if (console_len < 0) {
+ printk(KERN_EMERG "Error writing console to panic log! (%d)\n",
+ console_len);
+ console_len = 0;
+ }
+
+ /*
+ * Write out all threads
+ */
+ threads_offset = ALIGN(console_offset + console_len,
+ ctx->mtd->writesize);
+ if (!threads_offset)
+ threads_offset = ctx->mtd->writesize;
+
+ ram_console_enable_console(0);
+
+ log_buf_clear();
+ show_state_filter(0);
+ threads_len = apanic_write_console(ctx->mtd, threads_offset);
+ if (threads_len < 0) {
+ printk(KERN_EMERG "Error writing threads to panic log! (%d)\n",
+ threads_len);
+ threads_len = 0;
+ }
+
+ /*
+ * Finally write the panic header
+ */
+ memset(ctx->bounce, 0, PAGE_SIZE);
+ hdr->magic = PANIC_MAGIC;
+ hdr->version = PHDR_VERSION;
+
+ hdr->console_offset = console_offset;
+ hdr->console_length = console_len;
+
+ hdr->threads_offset = threads_offset;
+ hdr->threads_length = threads_len;
+
+ rc = apanic_writeflashpage(ctx->mtd, 0, ctx->bounce);
+ if (rc <= 0) {
+ printk(KERN_EMERG "apanic: Header write failed (%d)\n",
+ rc);
+ goto out;
+ }
+
+ printk(KERN_EMERG "apanic: Panic dump sucessfully written to flash\n");
+
+ out:
+#ifdef CONFIG_PREEMPT
+ sub_preempt_count(PREEMPT_ACTIVE);
+#endif
+ in_panic = 0;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_blk = {
+ .notifier_call = apanic,
+};
+
+static int panic_dbg_get(void *data, u64 *val)
+{
+ apanic(NULL, 0, NULL);
+ return 0;
+}
+
+static int panic_dbg_set(void *data, u64 val)
+{
+ BUG();
+ return -1;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, panic_dbg_get, panic_dbg_set, "%llu\n");
+
+int __init apanic_init(void)
+{
+ register_mtd_user(&mtd_panic_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
+ debugfs_create_file("apanic", 0644, NULL, NULL, &panic_dbg_fops);
+ memset(&drv_ctx, 0, sizeof(drv_ctx));
+ drv_ctx.bounce = (void *) __get_free_page(GFP_KERNEL);
+ INIT_WORK(&proc_removal_work, apanic_remove_proc_work);
+ printk(KERN_INFO "Android kernel panic handler initialized (bind=%s)\n",
+ CONFIG_APANIC_PLABEL);
+ return 0;
+}
+
+module_init(apanic_init);
diff --git a/drivers/misc/bcm4329_rfkill.c b/drivers/misc/bcm4329_rfkill.c
new file mode 100644
index 000000000000..a077326f2553
--- /dev/null
+++ b/drivers/misc/bcm4329_rfkill.c
@@ -0,0 +1,207 @@
+/*
+ * drivers/misc/bcm4329_rfkill.c
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+struct bcm4329_rfkill_data {
+ int gpio_reset;
+ int gpio_shutdown;
+ int delay;
+ struct clk *bt_32k_clk;
+};
+
+static struct bcm4329_rfkill_data *bcm4329_rfkill;
+
+static int bcm4329_bt_rfkill_set_power(void *data, bool blocked)
+{
+ if (blocked) {
+ if (bcm4329_rfkill->gpio_shutdown)
+ gpio_direction_output(bcm4329_rfkill->gpio_shutdown, 0);
+ if (bcm4329_rfkill->gpio_reset)
+ gpio_direction_output(bcm4329_rfkill->gpio_reset, 0);
+ if (bcm4329_rfkill->bt_32k_clk)
+ clk_disable(bcm4329_rfkill->bt_32k_clk);
+ } else {
+ if (bcm4329_rfkill->bt_32k_clk)
+ clk_enable(bcm4329_rfkill->bt_32k_clk);
+ if (bcm4329_rfkill->gpio_shutdown)
+ {
+ gpio_direction_output(bcm4329_rfkill->gpio_shutdown, 0);
+ msleep(100);
+ gpio_direction_output(bcm4329_rfkill->gpio_shutdown, 1);
+ msleep(100);
+ }
+ if (bcm4329_rfkill->gpio_reset)
+ {
+ gpio_direction_output(bcm4329_rfkill->gpio_reset, 0);
+ msleep(100);
+ gpio_direction_output(bcm4329_rfkill->gpio_reset, 1);
+ msleep(100);
+ }
+ }
+
+ return 0;
+}
+
+static const struct rfkill_ops bcm4329_bt_rfkill_ops = {
+ .set_block = bcm4329_bt_rfkill_set_power,
+};
+
+static int bcm4329_rfkill_probe(struct platform_device *pdev)
+{
+ struct rfkill *bt_rfkill;
+ struct resource *res;
+ int ret;
+ bool enable = false; /* off */
+ bool default_sw_block_state;
+
+ bcm4329_rfkill = kzalloc(sizeof(*bcm4329_rfkill), GFP_KERNEL);
+ if (!bcm4329_rfkill)
+ return -ENOMEM;
+
+ bcm4329_rfkill->bt_32k_clk = clk_get(&pdev->dev, "bcm4329_32k_clk");
+ if (IS_ERR(bcm4329_rfkill->bt_32k_clk)) {
+ pr_warn("%s: can't find bcm4329_32k_clk.\
+ assuming 32k clock to chip\n", __func__);
+ bcm4329_rfkill->bt_32k_clk = NULL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "bcm4329_nreset_gpio");
+ if (res) {
+ bcm4329_rfkill->gpio_reset = res->start;
+ tegra_gpio_enable(bcm4329_rfkill->gpio_reset);
+ ret = gpio_request(bcm4329_rfkill->gpio_reset,
+ "bcm4329_nreset_gpio");
+ } else {
+ pr_warn("%s : can't find reset gpio.\n", __func__);
+ bcm4329_rfkill->gpio_reset = 0;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "bcm4329_nshutdown_gpio");
+ if (res) {
+ bcm4329_rfkill->gpio_shutdown = res->start;
+ tegra_gpio_enable(bcm4329_rfkill->gpio_shutdown);
+ ret = gpio_request(bcm4329_rfkill->gpio_shutdown,
+ "bcm4329_nshutdown_gpio");
+ } else {
+ pr_warn("%s : can't find shutdown gpio.\n", __func__);
+ bcm4329_rfkill->gpio_shutdown = 0;
+ }
+
+ /* make sure at-least one of the GPIO is defined */
+ if (!bcm4329_rfkill->gpio_reset && !bcm4329_rfkill->gpio_shutdown)
+ goto free_bcm_res;
+
+ if (bcm4329_rfkill->bt_32k_clk && enable)
+ clk_enable(bcm4329_rfkill->bt_32k_clk);
+ if (bcm4329_rfkill->gpio_shutdown)
+ gpio_direction_output(bcm4329_rfkill->gpio_shutdown, enable);
+ if (bcm4329_rfkill->gpio_reset)
+ gpio_direction_output(bcm4329_rfkill->gpio_reset, enable);
+
+ bt_rfkill = rfkill_alloc("bcm4329 Bluetooth", &pdev->dev,
+ RFKILL_TYPE_BLUETOOTH, &bcm4329_bt_rfkill_ops,
+ NULL);
+
+ if (unlikely(!bt_rfkill))
+ goto free_bcm_res;
+
+ default_sw_block_state = !enable;
+ rfkill_set_states(bt_rfkill, default_sw_block_state, false);
+
+ ret = rfkill_register(bt_rfkill);
+
+ if (unlikely(ret)) {
+ rfkill_destroy(bt_rfkill);
+ goto free_bcm_res;
+ }
+
+ return 0;
+
+free_bcm_res:
+ if (bcm4329_rfkill->gpio_shutdown)
+ gpio_free(bcm4329_rfkill->gpio_shutdown);
+ if (bcm4329_rfkill->gpio_reset)
+ gpio_free(bcm4329_rfkill->gpio_reset);
+ if (bcm4329_rfkill->bt_32k_clk && enable)
+ clk_disable(bcm4329_rfkill->bt_32k_clk);
+ if (bcm4329_rfkill->bt_32k_clk)
+ clk_put(bcm4329_rfkill->bt_32k_clk);
+ kfree(bcm4329_rfkill);
+ return -ENODEV;
+}
+
+static int bcm4329_rfkill_remove(struct platform_device *pdev)
+{
+ struct rfkill *bt_rfkill = platform_get_drvdata(pdev);
+
+ if (bcm4329_rfkill->bt_32k_clk)
+ clk_put(bcm4329_rfkill->bt_32k_clk);
+ rfkill_unregister(bt_rfkill);
+ rfkill_destroy(bt_rfkill);
+ if (bcm4329_rfkill->gpio_shutdown)
+ gpio_free(bcm4329_rfkill->gpio_shutdown);
+ if (bcm4329_rfkill->gpio_reset)
+ gpio_free(bcm4329_rfkill->gpio_reset);
+ kfree(bcm4329_rfkill);
+
+ return 0;
+}
+
+static struct platform_driver bcm4329_rfkill_driver = {
+ .probe = bcm4329_rfkill_probe,
+ .remove = bcm4329_rfkill_remove,
+ .driver = {
+ .name = "bcm4329_rfkill",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bcm4329_rfkill_init(void)
+{
+ return platform_driver_register(&bcm4329_rfkill_driver);
+}
+
+static void __exit bcm4329_rfkill_exit(void)
+{
+ platform_driver_unregister(&bcm4329_rfkill_driver);
+}
+
+module_init(bcm4329_rfkill_init);
+module_exit(bcm4329_rfkill_exit);
+
+MODULE_DESCRIPTION("BCM4329 rfkill");
+MODULE_AUTHOR("NVIDIA");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/inv_mpu/Kconfig b/drivers/misc/inv_mpu/Kconfig
new file mode 100644
index 000000000000..53c7c200d453
--- /dev/null
+++ b/drivers/misc/inv_mpu/Kconfig
@@ -0,0 +1,77 @@
+config MPU_SENSORS_TIMERIRQ
+ tristate "MPU Timer IRQ"
+ help
+ If you say yes here you get access to the timerirq device handle which
+ can be used to select on. This can be used instead of IRQ's, sleeping,
+ or timer threads. Reading from this device returns the same type of
+ information as reading from the MPU and slave IRQ's.
+
+menuconfig: INV_SENSORS
+ tristate "Motion Processing Unit"
+ depends on I2C
+ default y
+
+if INV_SENSORS
+
+choice
+ prompt "MPU Master"
+ depends on I2C && INV_SENSORS
+ default MPU_SENSORS_MPU3050
+
+config MPU_SENSORS_MPU3050
+ bool "MPU3050"
+ depends on I2C
+ select MPU_SENSORS_MPU3050_GYRO
+ help
+ If you say yes here you get support for the MPU3050 Gyroscope driver
+ This driver can also be built as a module. If so, the module
+ will be called mpu3050.
+
+config MPU_SENSORS_MPU6050A2
+ bool "MPU6050A2"
+ depends on I2C
+ select MPU_SENSORS_MPU6050_GYRO
+ help
+ If you say yes here you get support for the MPU6050A2 Gyroscope driver
+ This driver can also be built as a module. If so, the module
+ will be called mpu6050a2.
+
+config MPU_SENSORS_MPU6050B1
+ bool "MPU6050B1"
+ select MPU_SENSORS_MPU6050_GYRO
+ depends on I2C
+ help
+ If you say yes here you get support for the MPU6050 Gyroscope driver
+ This driver can also be built as a module. If so, the module
+ will be called mpu6050b1.
+
+endchoice
+
+choice
+ prompt "Gyroscope Type"
+ depends on I2C && INV_SENSORS
+ default MPU_SENSORS_MPU3050_GYRO
+
+config MPU_SENSORS_MPU3050_GYRO
+ bool "MPU3050 built in gyroscope"
+ depends on MPU_SENSORS_MPU3050
+
+config MPU_SENSORS_MPU6050_GYRO
+ bool "MPU6050 built in gyroscope"
+ depends on MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+
+endchoice
+
+source "drivers/misc/inv_mpu/accel/Kconfig"
+source "drivers/misc/inv_mpu/compass/Kconfig"
+source "drivers/misc/inv_mpu/pressure/Kconfig"
+
+config MPU_USERSPACE_DEBUG
+ bool "MPU Userspace debugging ioctls"
+ help
+ Allows the ioctls MPU_SET_MPU_PLATFORM_DATA and
+ MPU_SET_EXT_SLAVE_PLATFORM_DATA, which allows userspace applications
+ to override the slave address and orientation. This is dangerous
+ and could cause the devices not to work.
+
+endif #INV_SENSORS
diff --git a/drivers/misc/inv_mpu/Makefile b/drivers/misc/inv_mpu/Makefile
new file mode 100644
index 000000000000..248648f6567c
--- /dev/null
+++ b/drivers/misc/inv_mpu/Makefile
@@ -0,0 +1,45 @@
+
+# Kernel makefile for motions sensors
+#
+#
+
+# MPU
+ifdef CONFIG_MPU_SENSORS_MPU3050
+INV_MODULE_NAME := mpu3050
+endif
+
+ifdef CONFIG_MPU_SENSORS_MPU6050A2
+INV_MODULE_NAME := mpu6050
+endif
+
+ifdef CONFIG_MPU_SENSORS_MPU6050B1
+INV_MODULE_NAME := mpu6050
+endif
+
+obj-$(CONFIG_INV_SENSORS) += $(INV_MODULE_NAME).o
+
+$(INV_MODULE_NAME)-objs += mpuirq.o
+$(INV_MODULE_NAME)-objs += slaveirq.o
+$(INV_MODULE_NAME)-objs += mpu-dev.o
+$(INV_MODULE_NAME)-objs += mlsl-kernel.o
+$(INV_MODULE_NAME)-objs += mldl_cfg.o
+$(INV_MODULE_NAME)-objs += mldl_print_cfg.o
+
+ifdef CONFIG_MPU_SENSORS_MPU6050A2
+$(INV_MODULE_NAME)-objs += accel/mpu6050.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_MPU6050B1
+$(INV_MODULE_NAME)-objs += accel/mpu6050.o
+endif
+
+EXTRA_CFLAGS += -Idrivers/misc/inv_mpu
+EXTRA_CFLAGS += -D__C99_DESIGNATED_INITIALIZER
+EXTRA_CFLAGS += -DINV_CACHE_DMP=1
+
+obj-$(CONFIG_MPU_SENSORS_TIMERIRQ)+= timerirq.o
+
+obj-y += accel/
+obj-y += compass/
+obj-y += pressure/
+
diff --git a/drivers/misc/inv_mpu/accel/Kconfig b/drivers/misc/inv_mpu/accel/Kconfig
new file mode 100644
index 000000000000..4e280bd876bc
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/Kconfig
@@ -0,0 +1,133 @@
+menuconfig INV_SENSORS_ACCELEROMETERS
+ bool "Accelerometer Slave Sensors"
+ default y
+ ---help---
+ Say Y here to get to see options for device drivers for various
+ accelerometrs for integration with the MPU3050 or MPU6050 driver.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if INV_SENSORS_ACCELEROMETERS
+
+config MPU_SENSORS_ADXL34X
+ tristate "ADI adxl34x"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the ADI adxl345 or adxl346 accelerometers.
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_BMA222
+ tristate "Bosch BMA222"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the Bosch BMA222 accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_BMA150
+ tristate "Bosch BMA150"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the Bosch BMA150 accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_BMA250
+ tristate "Bosch BMA250"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the Bosch BMA250 accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_KXSD9
+ tristate "Kionix KXSD9"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the Kionix KXSD9 accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_KXTF9
+ tristate "Kionix KXTF9"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the Kionix KXFT9 accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_LIS331DLH
+ tristate "ST lis331dlh"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the ST lis331dlh accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_LIS3DH
+ tristate "ST lis3dh"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the ST lis3dh accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_LSM303DLX_A
+ tristate "ST lsm303dlx"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the ST lsm303dlh and lsm303dlm accelerometers
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_MMA8450
+ tristate "Freescale mma8450"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the Freescale mma8450 accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_MMA845X
+ tristate "Freescale mma8451/8452/8453"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the Freescale mma8451/8452/8453 accelerometer
+ This support is for integration with the MPU3050 gyroscope device
+ driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_MPU6050_ACCEL
+ tristate "MPU6050 built in accelerometer"
+ depends on MPU_SENSORS_MPU6050B1 || MPU_SENSORS_MPU6050A2
+ help
+ This enables support for the MPU6050 built in accelerometer.
+ This the built in support for integration with the MPU6050 gyroscope
+ device driver. This is the only accelerometer supported with the
+ MPU6050. Specifying another accelerometer in the board file will
+ result in runtime errors.
+
+endif
diff --git a/drivers/misc/inv_mpu/accel/Makefile b/drivers/misc/inv_mpu/accel/Makefile
new file mode 100644
index 000000000000..1f0f5bec6774
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/Makefile
@@ -0,0 +1,38 @@
+#
+# Accel Slaves to MPUxxxx
+#
+obj-$(CONFIG_MPU_SENSORS_ADXL34X) += inv_mpu_adxl34x.o
+inv_mpu_adxl34x-objs += adxl34x.o
+
+obj-$(CONFIG_MPU_SENSORS_BMA150) += inv_mpu_bma150.o
+inv_mpu_bma150-objs += bma150.o
+
+obj-$(CONFIG_MPU_SENSORS_KXTF9) += inv_mpu_kxtf9.o
+inv_mpu_kxtf9-objs += kxtf9.o
+
+obj-$(CONFIG_MPU_SENSORS_BMA222) += inv_mpu_bma222.o
+inv_mpu_bma222-objs += bma222.o
+
+obj-$(CONFIG_MPU_SENSORS_BMA250) += inv_mpu_bma250.o
+inv_mpu_bma250-objs += bma250.o
+
+obj-$(CONFIG_MPU_SENSORS_KXSD9) += inv_mpu_kxsd9.o
+inv_mpu_kxsd9-objs += kxsd9.o
+
+obj-$(CONFIG_MPU_SENSORS_LIS331DLH) += inv_mpu_lis331.o
+inv_mpu_lis331-objs += lis331.o
+
+obj-$(CONFIG_MPU_SENSORS_LIS3DH) += inv_mpu_lis3dh.o
+inv_mpu_lis3dh-objs += lis3dh.o
+
+obj-$(CONFIG_MPU_SENSORS_LSM303DLX_A) += inv_mpu_lsm303dlx_a.o
+inv_mpu_lsm303dlx_a-objs += lsm303dlx_a.o
+
+obj-$(CONFIG_MPU_SENSORS_MMA8450) += inv_mpu_mma8450.o
+inv_mpu_mma8450-objs += mma8450.o
+
+obj-$(CONFIG_MPU_SENSORS_MMA845X) += inv_mpu_mma845x.o
+inv_mpu_mma845x-objs += mma845x.o
+
+EXTRA_CFLAGS += -Idrivers/misc/inv_mpu
+EXTRA_CFLAGS += -D__C99_DESIGNATED_INITIALIZER
diff --git a/drivers/misc/inv_mpu/accel/adxl34x.c b/drivers/misc/inv_mpu/accel/adxl34x.c
new file mode 100644
index 000000000000..f2bff8a75925
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/adxl34x.c
@@ -0,0 +1,728 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file adxl34x.c
+ * @brief Accelerometer setup and handling methods for AD adxl345 and
+ * adxl346.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+/* -------------------------------------------------------------------------- */
+
+/* registers */
+#define ADXL34X_ODR_REG (0x2C)
+#define ADXL34X_PWR_REG (0x2D)
+#define ADXL34X_DATAFORMAT_REG (0x31)
+
+/* masks */
+#define ADXL34X_ODR_MASK (0x0F)
+#define ADXL34X_PWR_SLEEP_MASK (0x04)
+#define ADXL34X_PWR_MEAS_MASK (0x08)
+#define ADXL34X_DATAFORMAT_JUSTIFY_MASK (0x04)
+#define ADXL34X_DATAFORMAT_FSR_MASK (0x03)
+
+/* -------------------------------------------------------------------------- */
+
+struct adxl34x_config {
+ unsigned int odr; /** < output data rate in mHz */
+ unsigned int fsr; /** < full scale range mg */
+ unsigned int fsr_reg_mask; /** < register setting for fsr */
+};
+
+struct adxl34x_private_data {
+ struct adxl34x_config suspend; /** < suspend configuration */
+ struct adxl34x_config resume; /** < resume configuration */
+};
+
+/**
+ * @brief Set the output data rate for the particular configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * Config to modify with new ODR.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param odr
+ * Output data rate in units of 1/1000Hz (mHz).
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct adxl34x_config *config,
+ int apply,
+ long odr)
+{
+ int result = INV_SUCCESS;
+ unsigned char new_odr_mask;
+
+ /* ADXL346 (Rev. A) pages 13, 24 */
+ if (odr >= 3200000) {
+ new_odr_mask = 0x0F;
+ config->odr = 3200000;
+ } else if (odr >= 1600000) {
+ new_odr_mask = 0x0E;
+ config->odr = 1600000;
+ } else if (odr >= 800000) {
+ new_odr_mask = 0x0D;
+ config->odr = 800000;
+ } else if (odr >= 400000) {
+ new_odr_mask = 0x0C;
+ config->odr = 400000;
+ } else if (odr >= 200000) {
+ new_odr_mask = 0x0B;
+ config->odr = 200000;
+ } else if (odr >= 100000) {
+ new_odr_mask = 0x0A;
+ config->odr = 100000;
+ } else if (odr >= 50000) {
+ new_odr_mask = 0x09;
+ config->odr = 50000;
+ } else if (odr >= 25000) {
+ new_odr_mask = 0x08;
+ config->odr = 25000;
+ } else if (odr >= 12500) {
+ new_odr_mask = 0x07;
+ config->odr = 12500;
+ } else if (odr >= 6250) {
+ new_odr_mask = 0x06;
+ config->odr = 6250;
+ } else if (odr >= 3130) {
+ new_odr_mask = 0x05;
+ config->odr = 3130;
+ } else if (odr >= 1560) {
+ new_odr_mask = 0x04;
+ config->odr = 1560;
+ } else if (odr >= 780) {
+ new_odr_mask = 0x03;
+ config->odr = 780;
+ } else if (odr >= 390) {
+ new_odr_mask = 0x02;
+ config->odr = 390;
+ } else if (odr >= 200) {
+ new_odr_mask = 0x01;
+ config->odr = 200;
+ } else {
+ new_odr_mask = 0x00;
+ config->odr = 100;
+ }
+
+ if (apply) {
+ unsigned char reg_odr;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ ADXL34X_ODR_REG, 1, &reg_odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ reg_odr &= ~ADXL34X_ODR_MASK;
+ reg_odr |= new_odr_mask;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ADXL34X_ODR_REG, reg_odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ MPL_LOGV("ODR: %d mHz\n", config->odr);
+ }
+ return result;
+}
+
+/**
+ * @brief Set the full scale range of the accels
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * pointer to configuration.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param fsr
+ * requested full scale range in milli gees (mg).
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct adxl34x_config *config,
+ int apply,
+ long fsr)
+{
+ int result = INV_SUCCESS;
+
+ if (fsr <= 2000) {
+ config->fsr_reg_mask = 0x00;
+ config->fsr = 2000;
+ } else if (fsr <= 4000) {
+ config->fsr_reg_mask = 0x01;
+ config->fsr = 4000;
+ } else if (fsr <= 8000) {
+ config->fsr_reg_mask = 0x02;
+ config->fsr = 8000;
+ } else { /* 8001 -> oo */
+ config->fsr_reg_mask = 0x03;
+ config->fsr = 16000;
+ }
+
+ if (apply) {
+ unsigned char reg_df;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ ADXL34X_DATAFORMAT_REG, 1, &reg_df);
+ reg_df &= ~ADXL34X_DATAFORMAT_FSR_MASK;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ADXL34X_DATAFORMAT_REG,
+ reg_df | config->fsr_reg_mask);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ MPL_LOGV("FSR: %d mg\n", config->fsr);
+ }
+ return result;
+}
+
+/**
+ * @brief facility to retrieve the device configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to store the returned configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct adxl34x_private_data *private_data =
+ (struct adxl34x_private_data *)(pdata->private_data);
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief device configuration facility.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to the configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct adxl34x_private_data *private_data =
+ (struct adxl34x_private_data *)(pdata->private_data);
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return adxl34x_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return adxl34x_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return adxl34x_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return adxl34x_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief suspends the device to put it in its lowest power mode.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+
+ /*
+ struct adxl34x_config *suspend_config =
+ &((struct adxl34x_private_data *)pdata->private_data)->suspend;
+
+ result = adxl34x_set_odr(mlsl_handle, pdata, suspend_config,
+ true, suspend_config->odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+}
+ result = adxl34x_set_fsr(mlsl_handle, pdata, suspend_config,
+ true, suspend_config->fsr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+}
+ */
+
+ /*
+ Page 25
+ When clearing the sleep bit, it is recommended that the part
+ be placed into standby mode and then set back to measurement mode
+ with a subsequent write.
+ This is done to ensure that the device is properly biased if sleep
+ mode is manually disabled; otherwise, the first few samples of data
+ after the sleep bit is cleared may have additional noise,
+ especially if the device was asleep when the bit was cleared. */
+
+ /* go in standy-by mode (suspends measurements) */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ADXL34X_PWR_REG, ADXL34X_PWR_MEAS_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* and then in sleep */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ADXL34X_PWR_REG,
+ ADXL34X_PWR_MEAS_MASK | ADXL34X_PWR_SLEEP_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+/**
+ * @brief resume the device in the proper power state given the configuration
+ * chosen.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ struct adxl34x_config *resume_config =
+ &((struct adxl34x_private_data *)pdata->private_data)->resume;
+ unsigned char reg;
+
+ /*
+ Page 25
+ When clearing the sleep bit, it is recommended that the part
+ be placed into standby mode and then set back to measurement mode
+ with a subsequent write.
+ This is done to ensure that the device is properly biased if sleep
+ mode is manually disabled; otherwise, the first few samples of data
+ after the sleep bit is cleared may have additional noise,
+ especially if the device was asleep when the bit was cleared. */
+
+ /* remove sleep, but leave in stand-by */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ADXL34X_PWR_REG, ADXL34X_PWR_MEAS_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = adxl34x_set_odr(mlsl_handle, pdata, resume_config,
+ true, resume_config->odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /*
+ -> FSR
+ -> Justiy bit for Big endianess
+ -> resulution to 10 bits
+ */
+ reg = ADXL34X_DATAFORMAT_JUSTIFY_MASK;
+ reg |= resume_config->fsr_reg_mask;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ADXL34X_DATAFORMAT_REG, reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* go in measurement mode */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ADXL34X_PWR_REG, 0x00);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* DATA_FORMAT: full resolution of +/-2g; data is left justified */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ 0x31, reg);
+
+ return result;
+}
+
+/**
+ * @brief one-time device driver initialization function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is loaded in the kernel.
+ * If the driver is built-in in the kernel, this function will be
+ * called at boot time.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ long range;
+
+ struct adxl34x_private_data *private_data;
+ private_data = (struct adxl34x_private_data *)
+ kzalloc(sizeof(struct adxl34x_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ result = adxl34x_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ false, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = adxl34x_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ range = range_fixedpoint_to_long_mg(slave->range);
+ result = adxl34x_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, range);
+ result = adxl34x_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, range);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = adxl34x_suspend(mlsl_handle, slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief one-time device driver exit function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is removed from the kernel.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief read the sensor data from the device.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a buffer to store the data read.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int adxl34x_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len, data);
+ return result;
+}
+
+static struct ext_slave_descr adxl34x_descr = {
+ .init = adxl34x_init,
+ .exit = adxl34x_exit,
+ .suspend = adxl34x_suspend,
+ .resume = adxl34x_resume,
+ .read = adxl34x_read,
+ .config = adxl34x_config,
+ .get_config = adxl34x_get_config,
+ .name = "adxl34x", /* 5 or 6 */
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_ADXL34X,
+ .read_reg = 0x32,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {2, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *adxl34x_get_slave_descr(void)
+{
+ return &adxl34x_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct adxl34x_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int adxl34x_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct adxl34x_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ adxl34x_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int adxl34x_mod_remove(struct i2c_client *client)
+{
+ struct adxl34x_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ adxl34x_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id adxl34x_mod_id[] = {
+ { "adxl34x", ACCEL_ID_ADXL34X },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, adxl34x_mod_id);
+
+static struct i2c_driver adxl34x_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = adxl34x_mod_probe,
+ .remove = adxl34x_mod_remove,
+ .id_table = adxl34x_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adxl34x_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init adxl34x_mod_init(void)
+{
+ int res = i2c_add_driver(&adxl34x_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "adxl34x_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit adxl34x_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&adxl34x_mod_driver);
+}
+
+module_init(adxl34x_mod_init);
+module_exit(adxl34x_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate ADXL34X sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("adxl34x_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/bma150.c b/drivers/misc/inv_mpu/accel/bma150.c
new file mode 100644
index 000000000000..745d90a6744f
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/bma150.c
@@ -0,0 +1,777 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file bma150.c
+ * @brief Accelerometer setup and handling methods for Bosch BMA150.
+ */
+
+/* -------------------------------------------------------------------------- */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+
+/* -------------------------------------------------------------------------- */
+/* registers */
+#define BMA150_CTRL_REG (0x14)
+#define BMA150_INT_REG (0x15)
+#define BMA150_PWR_REG (0x0A)
+
+/* masks */
+#define BMA150_CTRL_MASK (0x18)
+#define BMA150_CTRL_MASK_ODR (0xF8)
+#define BMA150_CTRL_MASK_FSR (0xE7)
+#define BMA150_INT_MASK_WUP (0xF8)
+#define BMA150_INT_MASK_IRQ (0xDF)
+#define BMA150_PWR_MASK_SLEEP (0x01)
+#define BMA150_PWR_MASK_SOFT_RESET (0x02)
+
+/* -------------------------------------------------------------------------- */
+struct bma150_config {
+ unsigned int odr; /** < output data rate mHz */
+ unsigned int fsr; /** < full scale range mgees */
+ unsigned int irq_type; /** < type of IRQ, see bma150_set_irq */
+ unsigned char ctrl_reg; /** < control register value */
+ unsigned char int_reg; /** < interrupt control register value */
+};
+
+struct bma150_private_data {
+ struct bma150_config suspend; /** < suspend configuration */
+ struct bma150_config resume; /** < resume configuration */
+};
+
+/**
+ * @brief Simply disables the IRQ since it is not usable on BMA150 devices.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * configuration to apply to, suspend or resume
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param irq_type
+ * the type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ * The only supported IRQ type is MPU_SLAVE_IRQ_TYPE_NONE which
+ * corresponds to disabling the IRQ completely.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct bma150_config *config,
+ int apply,
+ long irq_type)
+{
+ int result = INV_SUCCESS;
+
+ if (irq_type != MPU_SLAVE_IRQ_TYPE_NONE)
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+
+ config->irq_type = MPU_SLAVE_IRQ_TYPE_NONE;
+ config->int_reg = 0x00;
+
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_CTRL_REG, config->ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_INT_REG, config->int_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ return result;
+}
+
+/**
+ * @brief Set the output data rate for the particular configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * Config to modify with new ODR.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param odr
+ * Output data rate in units of 1/1000Hz (mHz).
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct bma150_config *config,
+ int apply,
+ long odr)
+{
+ unsigned char odr_bits = 0;
+ unsigned char wup_bits = 0;
+ int result = INV_SUCCESS;
+
+ if (odr > 100000) {
+ config->odr = 190000;
+ odr_bits = 0x03;
+ } else if (odr > 50000) {
+ config->odr = 100000;
+ odr_bits = 0x02;
+ } else if (odr > 25000) {
+ config->odr = 50000;
+ odr_bits = 0x01;
+ } else if (odr > 0) {
+ config->odr = 25000;
+ odr_bits = 0x00;
+ } else {
+ config->odr = 0;
+ wup_bits = 0x00;
+ }
+
+ config->int_reg &= BMA150_INT_MASK_WUP;
+ config->ctrl_reg &= BMA150_CTRL_MASK_ODR;
+ config->ctrl_reg |= odr_bits;
+
+ MPL_LOGV("ODR: %d\n", config->odr);
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_CTRL_REG, config->ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_INT_REG, config->int_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ return result;
+}
+
+/**
+ * @brief Set the full scale range of the accels
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * pointer to configuration.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param fsr
+ * requested full scale range.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct bma150_config *config,
+ int apply,
+ long fsr)
+{
+ unsigned char fsr_bits;
+ int result = INV_SUCCESS;
+
+ if (fsr <= 2048) {
+ fsr_bits = 0x00;
+ config->fsr = 2048;
+ } else if (fsr <= 4096) {
+ fsr_bits = 0x08;
+ config->fsr = 4096;
+ } else {
+ fsr_bits = 0x10;
+ config->fsr = 8192;
+ }
+
+ config->ctrl_reg &= BMA150_CTRL_MASK_FSR;
+ config->ctrl_reg |= fsr_bits;
+
+ MPL_LOGV("FSR: %d\n", config->fsr);
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_CTRL_REG, config->ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_CTRL_REG, config->ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ return result;
+}
+
+/**
+ * @brief one-time device driver initialization function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is loaded in the kernel.
+ * If the driver is built-in in the kernel, this function will be
+ * called at boot time.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char reg;
+ long range;
+
+ struct bma150_private_data *private_data;
+ private_data = (struct bma150_private_data *)
+ kzalloc(sizeof(struct bma150_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_PWR_REG, BMA150_PWR_MASK_SOFT_RESET);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1);
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ BMA150_CTRL_REG, 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ private_data->resume.ctrl_reg = reg;
+ private_data->suspend.ctrl_reg = reg;
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ BMA150_INT_REG, 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ private_data->resume.int_reg = reg;
+ private_data->suspend.int_reg = reg;
+
+ result = bma150_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ false, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma150_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ range = range_fixedpoint_to_long_mg(slave->range);
+ result = bma150_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, range);
+ result = bma150_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, range);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = bma150_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma150_set_irq(mlsl_handle, pdata, &private_data->resume,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_PWR_REG, BMA150_PWR_MASK_SLEEP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief one-time device driver exit function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is removed from the kernel.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief device configuration facility.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to the configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct bma150_private_data *private_data =
+ (struct bma150_private_data *)(pdata->private_data);
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return bma150_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return bma150_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return bma150_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return bma150_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return bma150_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return bma150_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief facility to retrieve the device configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to store the returned configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct bma150_private_data *private_data =
+ (struct bma150_private_data *)(pdata->private_data);
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.irq_type;
+ break;
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief suspends the device to put it in its lowest power mode.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char ctrl_reg;
+ unsigned char int_reg;
+
+ struct bma150_private_data *private_data =
+ (struct bma150_private_data *)(pdata->private_data);
+
+ ctrl_reg = private_data->suspend.ctrl_reg;
+ int_reg = private_data->suspend.int_reg;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_PWR_REG, BMA150_PWR_MASK_SOFT_RESET);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_CTRL_REG, ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_INT_REG, int_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_PWR_REG, BMA150_PWR_MASK_SLEEP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief resume the device in the proper power state given the configuration
+ * chosen.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char ctrl_reg;
+ unsigned char int_reg;
+
+ struct bma150_private_data *private_data =
+ (struct bma150_private_data *)(pdata->private_data);
+
+ ctrl_reg = private_data->resume.ctrl_reg;
+ int_reg = private_data->resume.int_reg;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_PWR_REG, BMA150_PWR_MASK_SOFT_RESET);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_CTRL_REG, ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_INT_REG, int_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA150_PWR_REG, 0x00);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief read the sensor data from the device.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a buffer to store the data read.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma150_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ return inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len, data);
+}
+
+static struct ext_slave_descr bma150_descr = {
+ .init = bma150_init,
+ .exit = bma150_exit,
+ .suspend = bma150_suspend,
+ .resume = bma150_resume,
+ .read = bma150_read,
+ .config = bma150_config,
+ .get_config = bma150_get_config,
+ .name = "bma150",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_BMA150,
+ .read_reg = 0x02,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {2, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *bma150_get_slave_descr(void)
+{
+ return &bma150_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+
+/* Platform data for the MPU */
+struct bma150_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int bma150_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct bma150_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ bma150_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int bma150_mod_remove(struct i2c_client *client)
+{
+ struct bma150_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ bma150_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id bma150_mod_id[] = {
+ { "bma150", ACCEL_ID_BMA150 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bma150_mod_id);
+
+static struct i2c_driver bma150_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = bma150_mod_probe,
+ .remove = bma150_mod_remove,
+ .id_table = bma150_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bma150_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init bma150_mod_init(void)
+{
+ int res = i2c_add_driver(&bma150_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "bma150_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit bma150_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&bma150_mod_driver);
+}
+
+module_init(bma150_mod_init);
+module_exit(bma150_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate BMA150 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("bma150_mod");
+
+/**
+ * @}
+ */
+
diff --git a/drivers/misc/inv_mpu/accel/bma222.c b/drivers/misc/inv_mpu/accel/bma222.c
new file mode 100644
index 000000000000..e9fc99b1a62d
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/bma222.c
@@ -0,0 +1,654 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/*
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file bma222.c
+ * @brief Accelerometer setup and handling methods for Bosch BMA222.
+ */
+
+/* ------------------ */
+/* - Include Files. - */
+/* ------------------ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+
+/* -------------------------------------------------------------------------- */
+
+#define BMA222_STATUS_REG (0x0A)
+#define BMA222_FSR_REG (0x0F)
+#define ADXL34X_ODR_REG (0x10)
+#define BMA222_PWR_REG (0x11)
+#define BMA222_SOFTRESET_REG (0x14)
+
+#define BMA222_STATUS_RDY_MASK (0x80)
+#define BMA222_FSR_MASK (0x0F)
+#define BMA222_ODR_MASK (0x1F)
+#define BMA222_PWR_SLEEP_MASK (0x80)
+#define BMA222_PWR_AWAKE_MASK (0x00)
+#define BMA222_SOFTRESET_MASK (0xB6)
+#define BMA222_SOFTRESET_MASK (0xB6)
+
+/* -------------------------------------------------------------------------- */
+
+struct bma222_config {
+ unsigned int odr; /** < output data rate in mHz */
+ unsigned int fsr; /** < full scale range mg */
+};
+
+struct bma222_private_data {
+ struct bma222_config suspend; /** < suspend configuration */
+ struct bma222_config resume; /** < resume configuration */
+};
+
+
+/* -------------------------------------------------------------------------- */
+
+/**
+ * @brief Set the output data rate for the particular configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * Config to modify with new ODR.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param odr
+ * Output data rate in units of 1/1000Hz (mHz).
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct bma222_config *config,
+ int apply,
+ long odr)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg_odr;
+
+ if (odr >= 1000000) {
+ reg_odr = 0x0F;
+ config->odr = 1000000;
+ } else if (odr >= 500000) {
+ reg_odr = 0x0E;
+ config->odr = 500000;
+ } else if (odr >= 250000) {
+ reg_odr = 0x0D;
+ config->odr = 250000;
+ } else if (odr >= 125000) {
+ reg_odr = 0x0C;
+ config->odr = 125000;
+ } else if (odr >= 62500) {
+ reg_odr = 0x0B;
+ config->odr = 62500;
+ } else if (odr >= 32000) {
+ reg_odr = 0x0A;
+ config->odr = 32000;
+ } else if (odr >= 16000) {
+ reg_odr = 0x09;
+ config->odr = 16000;
+ } else {
+ reg_odr = 0x08;
+ config->odr = 8000;
+ }
+
+ if (apply) {
+ MPL_LOGV("ODR: %d\n", config->odr);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ADXL34X_ODR_REG, reg_odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ return result;
+}
+
+/**
+ * @brief Set the full scale range of the accels
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * pointer to configuration.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param fsr
+ * requested full scale range.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct bma222_config *config,
+ int apply,
+ long fsr)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg_fsr_mask;
+
+ if (fsr <= 2000) {
+ reg_fsr_mask = 0x03;
+ config->fsr = 2000;
+ } else if (fsr <= 4000) {
+ reg_fsr_mask = 0x05;
+ config->fsr = 4000;
+ } else if (fsr <= 8000) {
+ reg_fsr_mask = 0x08;
+ config->fsr = 8000;
+ } else { /* 8001 -> oo */
+ reg_fsr_mask = 0x0C;
+ config->fsr = 16000;
+ }
+
+ if (apply) {
+ MPL_LOGV("FSR: %d\n", config->fsr);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA222_FSR_REG, reg_fsr_mask);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ return result;
+}
+
+/**
+ * @brief one-time device driver initialization function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is loaded in the kernel.
+ * If the driver is built-in in the kernel, this function will be
+ * called at boot time.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+
+ struct bma222_private_data *private_data;
+ private_data = (struct bma222_private_data *)
+ kzalloc(sizeof(struct bma222_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA222_SOFTRESET_REG, BMA222_SOFTRESET_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1);
+
+ result = bma222_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ false, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma222_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = bma222_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, 2000);
+ result = bma222_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, 2000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA222_PWR_REG, BMA222_PWR_SLEEP_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief one-time device driver exit function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is removed from the kernel.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+
+/**
+ * @brief facility to retrieve the device configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to store the returned configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct bma222_private_data *private_data =
+ (struct bma222_private_data *)(pdata->private_data);
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief device configuration facility.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to the configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct bma222_private_data *private_data =
+ (struct bma222_private_data *)(pdata->private_data);
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return bma222_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return bma222_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return bma222_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return bma222_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief suspends the device to put it in its lowest power mode.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ struct bma222_config *suspend_config =
+ &((struct bma222_private_data *)pdata->private_data)->suspend;
+
+ result = bma222_set_odr(mlsl_handle, pdata, suspend_config,
+ true, suspend_config->odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma222_set_fsr(mlsl_handle, pdata, suspend_config,
+ true, suspend_config->fsr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA222_PWR_REG, BMA222_PWR_SLEEP_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ msleep(3); /* 3 ms powerup time maximum */
+ return result;
+}
+
+/**
+ * @brief resume the device in the proper power state given the configuration
+ * chosen.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ struct bma222_config *resume_config =
+ &((struct bma222_private_data *)pdata->private_data)->resume;
+
+ /* Soft reset */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA222_SOFTRESET_REG, BMA222_SOFTRESET_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(10);
+
+ result = bma222_set_odr(mlsl_handle, pdata, resume_config,
+ true, resume_config->odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma222_set_fsr(mlsl_handle, pdata, resume_config,
+ true, resume_config->fsr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief read the sensor data from the device.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a buffer to store the data read.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma222_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result = INV_SUCCESS;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ BMA222_STATUS_REG, 1, data);
+ if (data[0] & BMA222_STATUS_RDY_MASK) {
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len, data);
+ return result;
+ } else
+ return INV_ERROR_ACCEL_DATA_NOT_READY;
+}
+
+static struct ext_slave_descr bma222_descr = {
+ .init = bma222_init,
+ .exit = bma222_exit,
+ .suspend = bma222_suspend,
+ .resume = bma222_resume,
+ .read = bma222_read,
+ .config = bma222_config,
+ .get_config = bma222_get_config,
+ .name = "bma222",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_BMA222,
+ .read_reg = 0x02,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {2, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *bma222_get_slave_descr(void)
+{
+ return &bma222_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+
+struct bma222_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int bma222_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct bma222_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ bma222_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int bma222_mod_remove(struct i2c_client *client)
+{
+ struct bma222_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ bma222_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id bma222_mod_id[] = {
+ { "bma222", ACCEL_ID_BMA222 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bma222_mod_id);
+
+static struct i2c_driver bma222_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = bma222_mod_probe,
+ .remove = bma222_mod_remove,
+ .id_table = bma222_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bma222_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init bma222_mod_init(void)
+{
+ int res = i2c_add_driver(&bma222_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "bma222_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit bma222_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&bma222_mod_driver);
+}
+
+module_init(bma222_mod_init);
+module_exit(bma222_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate BMA222 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("bma222_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/bma250.c b/drivers/misc/inv_mpu/accel/bma250.c
new file mode 100644
index 000000000000..6a245f4566aa
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/bma250.c
@@ -0,0 +1,787 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file bma250.c
+ * @brief Accelerometer setup and handling methods for Bosch BMA250.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+
+/* -------------------------------------------------------------------------- */
+
+/* registers */
+#define BMA250_STATUS_REG (0x0A)
+#define BMA250_FSR_REG (0x0F)
+#define BMA250_ODR_REG (0x10)
+#define BMA250_PWR_REG (0x11)
+#define BMA250_SOFTRESET_REG (0x14)
+#define BMA250_INT_TYPE_REG (0x17)
+#define BMA250_INT_DST_REG (0x1A)
+#define BMA250_INT_SRC_REG (0x1E)
+
+/* masks */
+#define BMA250_STATUS_RDY_MASK (0x80)
+#define BMA250_FSR_MASK (0x0F)
+#define BMA250_ODR_MASK (0x1F)
+#define BMA250_PWR_SLEEP_MASK (0x80)
+#define BMA250_PWR_AWAKE_MASK (0x00)
+#define BMA250_SOFTRESET_MASK (0xB6)
+#define BMA250_INT_TYPE_MASK (0x10)
+#define BMA250_INT_DST_1_MASK (0x01)
+#define BMA250_INT_DST_2_MASK (0x80)
+#define BMA250_INT_SRC_MASK (0x00)
+
+/* -------------------------------------------------------------------------- */
+
+struct bma250_config {
+ unsigned int odr; /** < output data rate in mHz */
+ unsigned int fsr; /** < full scale range mg */
+ unsigned char irq_type;
+};
+
+struct bma250_private_data {
+ struct bma250_config suspend; /** < suspend configuration */
+ struct bma250_config resume; /** < resume configuration */
+};
+
+/* -------------------------------------------------------------------------- */
+/**
+ * @brief Sets the IRQ to fire when one of the IRQ events occur.
+ * Threshold and duration will not be used unless the type is MOT or
+ * NMOT.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * configuration to apply to, suspend or resume
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param irq_type
+ * the type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct bma250_config *config,
+ int apply, long irq_type)
+{
+ int result = INV_SUCCESS;
+ unsigned char irqtype_reg;
+ unsigned char irqdst_reg;
+ unsigned char irqsrc_reg;
+
+ switch (irq_type) {
+ case MPU_SLAVE_IRQ_TYPE_DATA_READY:
+ /* data ready int. */
+ irqtype_reg = BMA250_INT_TYPE_MASK;
+ /* routed to interrupt pin 1 */
+ irqdst_reg = BMA250_INT_DST_1_MASK;
+ /* from filtered data */
+ irqsrc_reg = BMA250_INT_SRC_MASK;
+ break;
+ /* unfinished
+ case MPU_SLAVE_IRQ_TYPE_MOTION:
+ reg1 = 0x00;
+ reg2 = config->mot_int1_cfg;
+ reg3 = ;
+ break;
+ */
+ case MPU_SLAVE_IRQ_TYPE_NONE:
+ irqtype_reg = 0x00;
+ irqdst_reg = 0x00;
+ irqsrc_reg = 0x00;
+ break;
+ default:
+ return INV_ERROR_INVALID_PARAMETER;
+ break;
+ }
+
+ config->irq_type = (unsigned char)irq_type;
+
+ if (apply) {
+ /* select the type of interrupt to use */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_INT_TYPE_REG, irqtype_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* select to which interrupt pin to route it to */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_INT_DST_REG, irqdst_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* select whether the interrupt works off filtered or
+ unfiltered data */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_INT_SRC_REG, irqsrc_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ return result;
+}
+
+/**
+ * @brief Set the output data rate for the particular configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * Config to modify with new ODR.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param odr
+ * Output data rate in units of 1/1000Hz (mHz).
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct bma250_config *config,
+ int apply,
+ long odr)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg_odr;
+
+ /* Table uses bandwidth which is half the sample rate */
+ odr = odr >> 1;
+ if (odr >= 1000000) {
+ reg_odr = 0x0F;
+ config->odr = 2000000;
+ } else if (odr >= 500000) {
+ reg_odr = 0x0E;
+ config->odr = 1000000;
+ } else if (odr >= 250000) {
+ reg_odr = 0x0D;
+ config->odr = 500000;
+ } else if (odr >= 125000) {
+ reg_odr = 0x0C;
+ config->odr = 250000;
+ } else if (odr >= 62500) {
+ reg_odr = 0x0B;
+ config->odr = 125000;
+ } else if (odr >= 31250) {
+ reg_odr = 0x0A;
+ config->odr = 62500;
+ } else if (odr >= 15630) {
+ reg_odr = 0x09;
+ config->odr = 31250;
+ } else {
+ reg_odr = 0x08;
+ config->odr = 15630;
+ }
+
+ if (apply) {
+ MPL_LOGV("ODR: %d\n", config->odr);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_ODR_REG, reg_odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ return result;
+}
+
+/**
+ * @brief Set the full scale range of the accels
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * pointer to configuration.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param fsr
+ * requested full scale range.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct bma250_config *config,
+ int apply,
+ long fsr)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg_fsr_mask;
+
+ if (fsr <= 2000) {
+ reg_fsr_mask = 0x03;
+ config->fsr = 2000;
+ } else if (fsr <= 4000) {
+ reg_fsr_mask = 0x05;
+ config->fsr = 4000;
+ } else if (fsr <= 8000) {
+ reg_fsr_mask = 0x08;
+ config->fsr = 8000;
+ } else { /* 8001 -> oo */
+ reg_fsr_mask = 0x0C;
+ config->fsr = 16000;
+ }
+
+ if (apply) {
+ MPL_LOGV("FSR: %d\n", config->fsr);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_FSR_REG, reg_fsr_mask);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ return result;
+}
+
+/**
+ * @brief one-time device driver initialization function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is loaded in the kernel.
+ * If the driver is built-in in the kernel, this function will be
+ * called at boot time.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ long range;
+
+ struct bma250_private_data *private_data;
+ private_data = kzalloc(sizeof(struct bma250_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_SOFTRESET_REG, BMA250_SOFTRESET_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1);
+
+ result = bma250_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ false, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma250_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ range = range_fixedpoint_to_long_mg(slave->range);
+ result = bma250_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, range);
+ result = bma250_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, range);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = bma250_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma250_set_irq(mlsl_handle, pdata, &private_data->resume,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_PWR_REG, BMA250_PWR_SLEEP_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief one-time device driver exit function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is removed from the kernel.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief device configuration facility.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to the configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct bma250_private_data *private_data =
+ (struct bma250_private_data *)(pdata->private_data);
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return bma250_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return bma250_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return bma250_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return bma250_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return bma250_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return bma250_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief facility to retrieve the device configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to store the returned configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct bma250_private_data *private_data =
+ (struct bma250_private_data *)(pdata->private_data);
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.irq_type;
+ break;
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief suspends the device to put it in its lowest power mode.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ struct bma250_config *suspend_config =
+ &((struct bma250_private_data *)pdata->private_data)->suspend;
+
+ result = bma250_set_odr(mlsl_handle, pdata, suspend_config,
+ true, suspend_config->odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma250_set_fsr(mlsl_handle, pdata, suspend_config,
+ true, suspend_config->fsr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma250_set_irq(mlsl_handle, pdata, suspend_config,
+ true, suspend_config->irq_type);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_PWR_REG, BMA250_PWR_SLEEP_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ msleep(3); /* 3 ms powerup time maximum */
+ return result;
+}
+
+/**
+ * @brief resume the device in the proper power state given the configuration
+ * chosen.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ struct bma250_config *resume_config =
+ &((struct bma250_private_data *)pdata->private_data)->resume;
+
+ result = bma250_set_odr(mlsl_handle, pdata, resume_config,
+ true, resume_config->odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma250_set_fsr(mlsl_handle, pdata, resume_config,
+ true, resume_config->fsr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = bma250_set_irq(mlsl_handle, pdata, resume_config,
+ true, resume_config->irq_type);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ BMA250_PWR_REG, BMA250_PWR_AWAKE_MASK);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief read the sensor data from the device.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a buffer to store the data read.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int bma250_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result = INV_SUCCESS;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ BMA250_STATUS_REG, 1, data);
+ if (1) { /* KLP - workaroud for small data ready window */
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len, data);
+ return result;
+ } else
+ return INV_ERROR_ACCEL_DATA_NOT_READY;
+}
+
+static struct ext_slave_descr bma250_descr = {
+ .init = bma250_init,
+ .exit = bma250_exit,
+ .suspend = bma250_suspend,
+ .resume = bma250_resume,
+ .read = bma250_read,
+ .config = bma250_config,
+ .get_config = bma250_get_config,
+ .name = "bma250",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_BMA250,
+ .read_reg = 0x02,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {2, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *bma250_get_slave_descr(void)
+{
+ return &bma250_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+
+/* Platform data for the MPU */
+struct bma250_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int bma250_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct bma250_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ bma250_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int bma250_mod_remove(struct i2c_client *client)
+{
+ struct bma250_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ bma250_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id bma250_mod_id[] = {
+ { "bma250", ACCEL_ID_BMA250 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bma250_mod_id);
+
+static struct i2c_driver bma250_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = bma250_mod_probe,
+ .remove = bma250_mod_remove,
+ .id_table = bma250_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bma250_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init bma250_mod_init(void)
+{
+ int res = i2c_add_driver(&bma250_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "bma250_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit bma250_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&bma250_mod_driver);
+}
+
+module_init(bma250_mod_init);
+module_exit(bma250_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate BMA250 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("bma250_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/cma3000.c b/drivers/misc/inv_mpu/accel/cma3000.c
new file mode 100644
index 000000000000..496d1f29bdc0
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/cma3000.c
@@ -0,0 +1,222 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/*
+ * @addtogroup ACCELDL
+ * @brief Accelerometer setup and handling methods for VTI CMA3000.
+ *
+ * @{
+ * @file cma3000.c
+ * @brief Accelerometer setup and handling methods for VTI CMA3000
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+/* -------------------------------------------------------------------------- */
+
+static int cma3000_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ /* RAM reset */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address, 0x1d, 0xcd);
+ return result;
+}
+
+static int cma3000_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+
+ return INV_SUCCESS;
+}
+
+static int cma3000_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->reg, slave->len, data);
+ return result;
+}
+
+static struct ext_slave_descr cma3000_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = cma3000_suspend,
+ .resume = cma3000_resume,
+ .read = cma3000_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "cma3000",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ID_INVALID,
+ .read_reg = 0x06,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {2, 0},
+ .trigger = NULL,
+
+};
+
+static
+struct ext_slave_descr *cma3000_get_slave_descr(void)
+{
+ return &cma3000_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+
+struct cma3000_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int cma3000_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct cma3000_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ cma3000_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int cma3000_mod_remove(struct i2c_client *client)
+{
+ struct cma3000_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ cma3000_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id cma3000_mod_id[] = {
+ { "cma3000", ACCEL_ID_CMA3000 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cma3000_mod_id);
+
+static struct i2c_driver cma3000_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = cma3000_mod_probe,
+ .remove = cma3000_mod_remove,
+ .id_table = cma3000_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cma3000_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init cma3000_mod_init(void)
+{
+ int res = i2c_add_driver(&cma3000_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "cma3000_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit cma3000_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&cma3000_mod_driver);
+}
+
+module_init(cma3000_mod_init);
+module_exit(cma3000_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate CMA3000 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("cma3000_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/kxsd9.c b/drivers/misc/inv_mpu/accel/kxsd9.c
new file mode 100644
index 000000000000..5cb4eaf6b4ab
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/kxsd9.c
@@ -0,0 +1,264 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Accelerometer setup and handling methods for Kionix KXSD9.
+ *
+ * @{
+ * @file kxsd9.c
+ * @brief Accelerometer setup and handling methods for Kionix KXSD9.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+/* -------------------------------------------------------------------------- */
+
+static int kxsd9_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ /* CTRL_REGB: low-power standby mode */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address, 0x0d, 0x0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+/* full scale setting - register and mask */
+#define ACCEL_KIONIX_CTRL_REG (0x0C)
+#define ACCEL_KIONIX_CTRL_MASK (0x3)
+
+static int kxsd9_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg;
+
+ /* Full Scale */
+ reg = 0x0;
+ reg &= ~ACCEL_KIONIX_CTRL_MASK;
+ reg |= 0x00;
+ if (slave->range.mantissa == 4) { /* 4g scale = 4.9951 */
+ reg |= 0x2;
+ slave->range.fraction = 9951;
+ } else if (slave->range.mantissa == 7) { /* 6g scale = 7.5018 */
+ reg |= 0x1;
+ slave->range.fraction = 5018;
+ } else if (slave->range.mantissa == 9) { /* 8g scale = 9.9902 */
+ reg |= 0x0;
+ slave->range.fraction = 9902;
+ } else {
+ slave->range.mantissa = 2; /* 2g scale = 2.5006 */
+ slave->range.fraction = 5006;
+ reg |= 0x3;
+ }
+ reg |= 0xC0; /* 100Hz LPF */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_KIONIX_CTRL_REG, reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* normal operation */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address, 0x0d, 0x40);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return INV_SUCCESS;
+}
+
+static int kxsd9_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len, data);
+ return result;
+}
+
+static struct ext_slave_descr kxsd9_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = kxsd9_suspend,
+ .resume = kxsd9_resume,
+ .read = kxsd9_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "kxsd9",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_KXSD9,
+ .read_reg = 0x00,
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {2, 5006},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *kxsd9_get_slave_descr(void)
+{
+ return &kxsd9_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct kxsd9_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int kxsd9_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct kxsd9_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ kxsd9_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int kxsd9_mod_remove(struct i2c_client *client)
+{
+ struct kxsd9_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ kxsd9_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id kxsd9_mod_id[] = {
+ { "kxsd9", ACCEL_ID_KXSD9 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, kxsd9_mod_id);
+
+static struct i2c_driver kxsd9_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = kxsd9_mod_probe,
+ .remove = kxsd9_mod_remove,
+ .id_table = kxsd9_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "kxsd9_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init kxsd9_mod_init(void)
+{
+ int res = i2c_add_driver(&kxsd9_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "kxsd9_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit kxsd9_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&kxsd9_mod_driver);
+}
+
+module_init(kxsd9_mod_init);
+module_exit(kxsd9_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate KXSD9 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("kxsd9_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/kxtf9.c b/drivers/misc/inv_mpu/accel/kxtf9.c
new file mode 100644
index 000000000000..80776f249c63
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/kxtf9.c
@@ -0,0 +1,841 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Accelerometer setup and handling methods for Kionix KXTF9.
+ *
+ * @{
+ * @file kxtf9.c
+ * @brief Accelerometer setup and handling methods for Kionix KXTF9.
+*/
+
+/* -------------------------------------------------------------------------- */
+
+#undef MPL_LOG_NDEBUG
+#define MPL_LOG_NDEBUG 1
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+#define KXTF9_XOUT_HPF_L (0x00) /* 0000 0000 */
+#define KXTF9_XOUT_HPF_H (0x01) /* 0000 0001 */
+#define KXTF9_YOUT_HPF_L (0x02) /* 0000 0010 */
+#define KXTF9_YOUT_HPF_H (0x03) /* 0000 0011 */
+#define KXTF9_ZOUT_HPF_L (0x04) /* 0001 0100 */
+#define KXTF9_ZOUT_HPF_H (0x05) /* 0001 0101 */
+#define KXTF9_XOUT_L (0x06) /* 0000 0110 */
+#define KXTF9_XOUT_H (0x07) /* 0000 0111 */
+#define KXTF9_YOUT_L (0x08) /* 0000 1000 */
+#define KXTF9_YOUT_H (0x09) /* 0000 1001 */
+#define KXTF9_ZOUT_L (0x0A) /* 0001 1010 */
+#define KXTF9_ZOUT_H (0x0B) /* 0001 1011 */
+#define KXTF9_ST_RESP (0x0C) /* 0000 1100 */
+#define KXTF9_WHO_AM_I (0x0F) /* 0000 1111 */
+#define KXTF9_TILT_POS_CUR (0x10) /* 0001 0000 */
+#define KXTF9_TILT_POS_PRE (0x11) /* 0001 0001 */
+#define KXTF9_INT_SRC_REG1 (0x15) /* 0001 0101 */
+#define KXTF9_INT_SRC_REG2 (0x16) /* 0001 0110 */
+#define KXTF9_STATUS_REG (0x18) /* 0001 1000 */
+#define KXTF9_INT_REL (0x1A) /* 0001 1010 */
+#define KXTF9_CTRL_REG1 (0x1B) /* 0001 1011 */
+#define KXTF9_CTRL_REG2 (0x1C) /* 0001 1100 */
+#define KXTF9_CTRL_REG3 (0x1D) /* 0001 1101 */
+#define KXTF9_INT_CTRL_REG1 (0x1E) /* 0001 1110 */
+#define KXTF9_INT_CTRL_REG2 (0x1F) /* 0001 1111 */
+#define KXTF9_INT_CTRL_REG3 (0x20) /* 0010 0000 */
+#define KXTF9_DATA_CTRL_REG (0x21) /* 0010 0001 */
+#define KXTF9_TILT_TIMER (0x28) /* 0010 1000 */
+#define KXTF9_WUF_TIMER (0x29) /* 0010 1001 */
+#define KXTF9_TDT_TIMER (0x2B) /* 0010 1011 */
+#define KXTF9_TDT_H_THRESH (0x2C) /* 0010 1100 */
+#define KXTF9_TDT_L_THRESH (0x2D) /* 0010 1101 */
+#define KXTF9_TDT_TAP_TIMER (0x2E) /* 0010 1110 */
+#define KXTF9_TDT_TOTAL_TIMER (0x2F) /* 0010 1111 */
+#define KXTF9_TDT_LATENCY_TIMER (0x30) /* 0011 0000 */
+#define KXTF9_TDT_WINDOW_TIMER (0x31) /* 0011 0001 */
+#define KXTF9_WUF_THRESH (0x5A) /* 0101 1010 */
+#define KXTF9_TILT_ANGLE (0x5C) /* 0101 1100 */
+#define KXTF9_HYST_SET (0x5F) /* 0101 1111 */
+
+#define KXTF9_MAX_DUR (0xFF)
+#define KXTF9_MAX_THS (0xFF)
+#define KXTF9_THS_COUNTS_P_G (32)
+
+/* -------------------------------------------------------------------------- */
+
+struct kxtf9_config {
+ unsigned long odr; /* Output data rate mHz */
+ unsigned int fsr; /* full scale range mg */
+ unsigned int ths; /* Motion no-motion thseshold mg */
+ unsigned int dur; /* Motion no-motion duration ms */
+ unsigned int irq_type;
+ unsigned char reg_ths;
+ unsigned char reg_dur;
+ unsigned char reg_odr;
+ unsigned char reg_int_cfg1;
+ unsigned char reg_int_cfg2;
+ unsigned char ctrl_reg1;
+};
+
+struct kxtf9_private_data {
+ struct kxtf9_config suspend;
+ struct kxtf9_config resume;
+};
+
+static int kxtf9_set_ths(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config, int apply, long ths)
+{
+ int result = INV_SUCCESS;
+ if ((ths * KXTF9_THS_COUNTS_P_G / 1000) > KXTF9_MAX_THS)
+ ths = (long)(KXTF9_MAX_THS * 1000) / KXTF9_THS_COUNTS_P_G;
+
+ if (ths < 0)
+ ths = 0;
+
+ config->ths = ths;
+ config->reg_ths = (unsigned char)
+ ((long)(ths * KXTF9_THS_COUNTS_P_G) / 1000);
+ MPL_LOGV("THS: %d, 0x%02x\n", config->ths, (int)config->reg_ths);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_WUF_THRESH,
+ config->reg_ths);
+ return result;
+}
+
+static int kxtf9_set_dur(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config, int apply, long dur)
+{
+ int result = INV_SUCCESS;
+ long reg_dur = (dur * config->odr) / 1000000L;
+ config->dur = dur;
+
+ if (reg_dur > KXTF9_MAX_DUR)
+ reg_dur = KXTF9_MAX_DUR;
+
+ config->reg_dur = (unsigned char)reg_dur;
+ MPL_LOGV("DUR: %d, 0x%02x\n", config->dur, (int)config->reg_dur);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_WUF_TIMER,
+ (unsigned char)reg_dur);
+ return result;
+}
+
+/**
+ * Sets the IRQ to fire when one of the IRQ events occur. Threshold and
+ * duration will not be used uless the type is MOT or NMOT.
+ *
+ * @param config configuration to apply to, suspend or resume
+ * @param irq_type The type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ */
+static int kxtf9_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config, int apply, long irq_type)
+{
+ int result = INV_SUCCESS;
+ struct kxtf9_private_data *private_data = pdata->private_data;
+
+ config->irq_type = (unsigned char)irq_type;
+ config->ctrl_reg1 &= ~0x22;
+ if (irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ config->ctrl_reg1 |= 0x20;
+ config->reg_int_cfg1 = 0x38;
+ config->reg_int_cfg2 = 0x00;
+ } else if (irq_type == MPU_SLAVE_IRQ_TYPE_MOTION) {
+ config->ctrl_reg1 |= 0x02;
+ if ((unsigned long)config ==
+ (unsigned long)&private_data->suspend)
+ config->reg_int_cfg1 = 0x34;
+ else
+ config->reg_int_cfg1 = 0x24;
+ config->reg_int_cfg2 = 0xE0;
+ } else {
+ config->reg_int_cfg1 = 0x00;
+ config->reg_int_cfg2 = 0x00;
+ }
+
+ if (apply) {
+ /* Must clear bit 7 before writing new configuration */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_INT_CTRL_REG1,
+ config->reg_int_cfg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_INT_CTRL_REG2,
+ config->reg_int_cfg2);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ config->ctrl_reg1);
+ }
+ MPL_LOGV("CTRL_REG1: %lx, INT_CFG1: %lx, INT_CFG2: %lx\n",
+ (unsigned long)config->ctrl_reg1,
+ (unsigned long)config->reg_int_cfg1,
+ (unsigned long)config->reg_int_cfg2);
+
+ return result;
+}
+
+/**
+ * Set the Output data rate for the particular configuration
+ *
+ * @param config Config to modify with new ODR
+ * @param odr Output data rate in units of 1/1000Hz
+ */
+static int kxtf9_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config, int apply, long odr)
+{
+ unsigned char bits;
+ int result = INV_SUCCESS;
+
+ /* Data sheet says there is 12.5 hz, but that seems to produce a single
+ * correct data value, thus we remove it from the table */
+ if (odr > 400000L) {
+ config->odr = 800000L;
+ bits = 0x06;
+ } else if (odr > 200000L) {
+ config->odr = 400000L;
+ bits = 0x05;
+ } else if (odr > 100000L) {
+ config->odr = 200000L;
+ bits = 0x04;
+ } else if (odr > 50000) {
+ config->odr = 100000L;
+ bits = 0x03;
+ } else if (odr > 25000) {
+ config->odr = 50000;
+ bits = 0x02;
+ } else if (odr != 0) {
+ config->odr = 25000;
+ bits = 0x01;
+ } else {
+ config->odr = 0;
+ bits = 0;
+ }
+
+ if (odr != 0)
+ config->ctrl_reg1 |= 0x80;
+ else
+ config->ctrl_reg1 &= ~0x80;
+
+ config->reg_odr = bits;
+ kxtf9_set_dur(mlsl_handle, pdata, config, apply, config->dur);
+ MPL_LOGV("ODR: %ld, 0x%02x\n", config->odr, (int)config->ctrl_reg1);
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_DATA_CTRL_REG,
+ config->reg_odr);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ config->ctrl_reg1);
+ }
+ return result;
+}
+
+/**
+ * Set the full scale range of the accels
+ *
+ * @param config pointer to configuration
+ * @param fsr requested full scale range
+ */
+static int kxtf9_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config, int apply, long fsr)
+{
+ int result = INV_SUCCESS;
+
+ config->ctrl_reg1 = (config->ctrl_reg1 & 0xE7);
+ if (fsr <= 2000) {
+ config->fsr = 2000;
+ config->ctrl_reg1 |= 0x00;
+ } else if (fsr <= 4000) {
+ config->fsr = 4000;
+ config->ctrl_reg1 |= 0x08;
+ } else {
+ config->fsr = 8000;
+ config->ctrl_reg1 |= 0x10;
+ }
+
+ MPL_LOGV("FSR: %d\n", config->fsr);
+ if (apply) {
+ /* Must clear bit 7 before writing new configuration */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ config->ctrl_reg1);
+ }
+ return result;
+}
+
+static int kxtf9_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char data;
+ struct kxtf9_private_data *private_data = pdata->private_data;
+
+ /* Wake up */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* INT_CTRL_REG1: */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_INT_CTRL_REG1,
+ private_data->suspend.reg_int_cfg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* WUF_THRESH: */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_WUF_THRESH,
+ private_data->suspend.reg_ths);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* DATA_CTRL_REG */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_DATA_CTRL_REG,
+ private_data->suspend.reg_odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* WUF_TIMER */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_WUF_TIMER,
+ private_data->suspend.reg_dur);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Normal operation */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ private_data->suspend.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ KXTF9_INT_REL, 1, &data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/* full scale setting - register and mask */
+#define ACCEL_KIONIX_CTRL_REG (0x1b)
+#define ACCEL_KIONIX_CTRL_MASK (0x18)
+
+static int kxtf9_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ unsigned char data;
+ struct kxtf9_private_data *private_data = pdata->private_data;
+
+ /* Wake up */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* INT_CTRL_REG1: */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_INT_CTRL_REG1,
+ private_data->resume.reg_int_cfg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* WUF_THRESH: */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_WUF_THRESH,
+ private_data->resume.reg_ths);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* DATA_CTRL_REG */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_DATA_CTRL_REG,
+ private_data->resume.reg_odr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* WUF_TIMER */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_WUF_TIMER,
+ private_data->resume.reg_dur);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Normal operation */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ private_data->resume.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ KXTF9_INT_REL, 1, &data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return INV_SUCCESS;
+}
+
+static int kxtf9_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+
+ struct kxtf9_private_data *private_data;
+ int result = INV_SUCCESS;
+
+ private_data = (struct kxtf9_private_data *)
+ kzalloc(sizeof(struct kxtf9_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ /* RAM reset */
+ /* Fastest Reset */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Fastest Reset */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_DATA_CTRL_REG, 0x36);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Reset */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG3, 0xcd);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(2);
+
+ pdata->private_data = private_data;
+
+ private_data->resume.ctrl_reg1 = 0xC0;
+ private_data->suspend.ctrl_reg1 = 0x40;
+
+ result = kxtf9_set_dur(mlsl_handle, pdata, &private_data->suspend,
+ false, 1000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = kxtf9_set_dur(mlsl_handle, pdata, &private_data->resume,
+ false, 2540);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = kxtf9_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ false, 50000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = kxtf9_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000L);
+
+ result = kxtf9_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, 2000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = kxtf9_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, 2000);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = kxtf9_set_ths(mlsl_handle, pdata, &private_data->suspend,
+ false, 80);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = kxtf9_set_ths(mlsl_handle, pdata, &private_data->resume,
+ false, 40);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = kxtf9_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = kxtf9_set_irq(mlsl_handle, pdata, &private_data->resume,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static int kxtf9_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+static int kxtf9_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct kxtf9_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return kxtf9_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return kxtf9_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return kxtf9_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return kxtf9_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ return kxtf9_set_ths(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ return kxtf9_set_ths(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ return kxtf9_set_dur(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ return kxtf9_set_dur(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return kxtf9_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return kxtf9_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static int kxtf9_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct kxtf9_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.ths;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.ths;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.dur;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.dur;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.irq_type;
+ break;
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static int kxtf9_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result;
+ unsigned char reg;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ KXTF9_INT_SRC_REG2, 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (!(reg & 0x10))
+ return INV_ERROR_ACCEL_DATA_NOT_READY;
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static struct ext_slave_descr kxtf9_descr = {
+ .init = kxtf9_init,
+ .exit = kxtf9_exit,
+ .suspend = kxtf9_suspend,
+ .resume = kxtf9_resume,
+ .read = kxtf9_read,
+ .config = kxtf9_config,
+ .get_config = kxtf9_get_config,
+ .name = "kxtf9",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_KXTF9,
+ .read_reg = 0x06,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {2, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *kxtf9_get_slave_descr(void)
+{
+ return &kxtf9_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct kxtf9_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int kxtf9_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct kxtf9_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ kxtf9_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int kxtf9_mod_remove(struct i2c_client *client)
+{
+ struct kxtf9_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ kxtf9_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id kxtf9_mod_id[] = {
+ { "kxtf9", ACCEL_ID_KXTF9 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, kxtf9_mod_id);
+
+static struct i2c_driver kxtf9_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = kxtf9_mod_probe,
+ .remove = kxtf9_mod_remove,
+ .id_table = kxtf9_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "kxtf9_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init kxtf9_mod_init(void)
+{
+ int res = i2c_add_driver(&kxtf9_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "kxtf9_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit kxtf9_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&kxtf9_mod_driver);
+}
+
+module_init(kxtf9_mod_init);
+module_exit(kxtf9_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate KXTF9 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("kxtf9_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/lis331.c b/drivers/misc/inv_mpu/accel/lis331.c
new file mode 100644
index 000000000000..bcbec252af97
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/lis331.c
@@ -0,0 +1,745 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file lis331.c
+ * @brief Accelerometer setup and handling methods for ST LIS331DLH.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#undef MPL_LOG_NDEBUG
+#define MPL_LOG_NDEBUG 1
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+/* full scale setting - register & mask */
+#define LIS331DLH_CTRL_REG1 (0x20)
+#define LIS331DLH_CTRL_REG2 (0x21)
+#define LIS331DLH_CTRL_REG3 (0x22)
+#define LIS331DLH_CTRL_REG4 (0x23)
+#define LIS331DLH_CTRL_REG5 (0x24)
+#define LIS331DLH_HP_FILTER_RESET (0x25)
+#define LIS331DLH_REFERENCE (0x26)
+#define LIS331DLH_STATUS_REG (0x27)
+#define LIS331DLH_OUT_X_L (0x28)
+#define LIS331DLH_OUT_X_H (0x29)
+#define LIS331DLH_OUT_Y_L (0x2a)
+#define LIS331DLH_OUT_Y_H (0x2b)
+#define LIS331DLH_OUT_Z_L (0x2b)
+#define LIS331DLH_OUT_Z_H (0x2d)
+
+#define LIS331DLH_INT1_CFG (0x30)
+#define LIS331DLH_INT1_SRC (0x31)
+#define LIS331DLH_INT1_THS (0x32)
+#define LIS331DLH_INT1_DURATION (0x33)
+
+#define LIS331DLH_INT2_CFG (0x34)
+#define LIS331DLH_INT2_SRC (0x35)
+#define LIS331DLH_INT2_THS (0x36)
+#define LIS331DLH_INT2_DURATION (0x37)
+
+/* CTRL_REG1 */
+#define LIS331DLH_CTRL_MASK (0x30)
+#define LIS331DLH_SLEEP_MASK (0x20)
+#define LIS331DLH_PWR_MODE_NORMAL (0x20)
+
+#define LIS331DLH_MAX_DUR (0x7F)
+
+
+/* -------------------------------------------------------------------------- */
+
+struct lis331dlh_config {
+ unsigned int odr;
+ unsigned int fsr; /* full scale range mg */
+ unsigned int ths; /* Motion no-motion thseshold mg */
+ unsigned int dur; /* Motion no-motion duration ms */
+ unsigned char reg_ths;
+ unsigned char reg_dur;
+ unsigned char ctrl_reg1;
+ unsigned char irq_type;
+ unsigned char mot_int1_cfg;
+};
+
+struct lis331dlh_private_data {
+ struct lis331dlh_config suspend;
+ struct lis331dlh_config resume;
+};
+
+/* -------------------------------------------------------------------------- */
+static int lis331dlh_set_ths(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis331dlh_config *config,
+ int apply, long ths)
+{
+ int result = INV_SUCCESS;
+ if ((unsigned int)ths >= config->fsr)
+ ths = (long)config->fsr - 1;
+
+ if (ths < 0)
+ ths = 0;
+
+ config->ths = ths;
+ config->reg_ths = (unsigned char)(long)((ths * 128L) / (config->fsr));
+ MPL_LOGV("THS: %d, 0x%02x\n", config->ths, (int)config->reg_ths);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_THS,
+ config->reg_ths);
+ return result;
+}
+
+static int lis331dlh_set_dur(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis331dlh_config *config,
+ int apply, long dur)
+{
+ int result = INV_SUCCESS;
+ long reg_dur = (dur * config->odr) / 1000000L;
+ config->dur = dur;
+
+ if (reg_dur > LIS331DLH_MAX_DUR)
+ reg_dur = LIS331DLH_MAX_DUR;
+
+ config->reg_dur = (unsigned char)reg_dur;
+ MPL_LOGV("DUR: %d, 0x%02x\n", config->dur, (int)config->reg_dur);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_DURATION,
+ (unsigned char)reg_dur);
+ return result;
+}
+
+/**
+ * Sets the IRQ to fire when one of the IRQ events occur. Threshold and
+ * duration will not be used uless the type is MOT or NMOT.
+ *
+ * @param config configuration to apply to, suspend or resume
+ * @param irq_type The type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ */
+static int lis331dlh_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis331dlh_config *config,
+ int apply, long irq_type)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+
+ config->irq_type = (unsigned char)irq_type;
+ if (irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x02;
+ reg2 = 0x00;
+ } else if (irq_type == MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x00;
+ reg2 = config->mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG3, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_CFG, reg2);
+ }
+
+ return result;
+}
+
+/**
+ * Set the Output data rate for the particular configuration
+ *
+ * @param config Config to modify with new ODR
+ * @param odr Output data rate in units of 1/1000Hz
+ */
+static int lis331dlh_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis331dlh_config *config,
+ int apply, long odr)
+{
+ unsigned char bits;
+ int result = INV_SUCCESS;
+
+ /* normal power modes */
+ if (odr > 400000) {
+ config->odr = 1000000;
+ bits = LIS331DLH_PWR_MODE_NORMAL | 0x18;
+ } else if (odr > 100000) {
+ config->odr = 400000;
+ bits = LIS331DLH_PWR_MODE_NORMAL | 0x10;
+ } else if (odr > 50000) {
+ config->odr = 100000;
+ bits = LIS331DLH_PWR_MODE_NORMAL | 0x08;
+ } else if (odr > 10000) {
+ config->odr = 50000;
+ bits = LIS331DLH_PWR_MODE_NORMAL | 0x00;
+ /* low power modes */
+ } else if (odr > 5000) {
+ config->odr = 10000;
+ bits = 0xC0;
+ } else if (odr > 2000) {
+ config->odr = 5000;
+ bits = 0xA0;
+ } else if (odr > 1000) {
+ config->odr = 2000;
+ bits = 0x80;
+ } else if (odr > 500) {
+ config->odr = 1000;
+ bits = 0x60;
+ } else if (odr > 0) {
+ config->odr = 500;
+ bits = 0x40;
+ } else {
+ config->odr = 0;
+ bits = 0;
+ }
+
+ config->ctrl_reg1 = bits | (config->ctrl_reg1 & 0x7);
+ lis331dlh_set_dur(mlsl_handle, pdata, config, apply, config->dur);
+ MPL_LOGV("ODR: %d, 0x%02x\n", config->odr, (int)config->ctrl_reg1);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG1,
+ config->ctrl_reg1);
+ return result;
+}
+
+/**
+ * Set the full scale range of the accels
+ *
+ * @param config pointer to configuration
+ * @param fsr requested full scale range
+ */
+static int lis331dlh_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis331dlh_config *config,
+ int apply, long fsr)
+{
+ unsigned char reg1 = 0x40;
+ int result = INV_SUCCESS;
+
+ if (fsr <= 2048) {
+ config->fsr = 2048;
+ } else if (fsr <= 4096) {
+ reg1 |= 0x30;
+ config->fsr = 4096;
+ } else {
+ reg1 |= 0x10;
+ config->fsr = 8192;
+ }
+
+ lis331dlh_set_ths(mlsl_handle, pdata, config, apply, config->ths);
+ MPL_LOGV("FSR: %d\n", config->fsr);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG4, reg1);
+
+ return result;
+}
+
+static int lis331dlh_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+ struct lis331dlh_private_data *private_data =
+ (struct lis331dlh_private_data *)(pdata->private_data);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG1,
+ private_data->suspend.ctrl_reg1);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG2, 0x0f);
+ reg1 = 0x40;
+ if (private_data->suspend.fsr == 8192)
+ reg1 |= 0x30;
+ else if (private_data->suspend.fsr == 4096)
+ reg1 |= 0x10;
+ /* else bits [4..5] are already zero */
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG4, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_THS,
+ private_data->suspend.reg_ths);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_DURATION,
+ private_data->suspend.reg_dur);
+
+ if (private_data->suspend.irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x02;
+ reg2 = 0x00;
+ } else if (private_data->suspend.irq_type ==
+ MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x00;
+ reg2 = private_data->suspend.mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG3, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_CFG, reg2);
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LIS331DLH_HP_FILTER_RESET, 1, &reg1);
+ return result;
+}
+
+static int lis331dlh_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+ struct lis331dlh_private_data *private_data =
+ (struct lis331dlh_private_data *)(pdata->private_data);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG1,
+ private_data->resume.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(6);
+
+ /* Full Scale */
+ reg1 = 0x40;
+ if (private_data->resume.fsr == 8192)
+ reg1 |= 0x30;
+ else if (private_data->resume.fsr == 4096)
+ reg1 |= 0x10;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG4, reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Configure high pass filter */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG2, 0x0F);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (private_data->resume.irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x02;
+ reg2 = 0x00;
+ } else if (private_data->resume.irq_type == MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x00;
+ reg2 = private_data->resume.mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_CTRL_REG3, reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_THS,
+ private_data->resume.reg_ths);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_DURATION,
+ private_data->resume.reg_dur);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS331DLH_INT1_CFG, reg2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LIS331DLH_HP_FILTER_RESET, 1, &reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static int lis331dlh_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result = INV_SUCCESS;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LIS331DLH_STATUS_REG, 1, data);
+ if (data[0] & 0x0F) {
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len,
+ data);
+ return result;
+ } else
+ return INV_ERROR_ACCEL_DATA_NOT_READY;
+}
+
+static int lis331dlh_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ struct lis331dlh_private_data *private_data;
+ long range;
+ private_data = (struct lis331dlh_private_data *)
+ kzalloc(sizeof(struct lis331dlh_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ private_data->resume.ctrl_reg1 = 0x37;
+ private_data->suspend.ctrl_reg1 = 0x47;
+ private_data->resume.mot_int1_cfg = 0x95;
+ private_data->suspend.mot_int1_cfg = 0x2a;
+
+ lis331dlh_set_odr(mlsl_handle, pdata, &private_data->suspend, false, 0);
+ lis331dlh_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000);
+
+ range = range_fixedpoint_to_long_mg(slave->range);
+ lis331dlh_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, range);
+ lis331dlh_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, range);
+
+ lis331dlh_set_ths(mlsl_handle, pdata, &private_data->suspend,
+ false, 80);
+ lis331dlh_set_ths(mlsl_handle, pdata, &private_data->resume, false, 40);
+
+
+ lis331dlh_set_dur(mlsl_handle, pdata, &private_data->suspend,
+ false, 1000);
+ lis331dlh_set_dur(mlsl_handle, pdata, &private_data->resume,
+ false, 2540);
+
+ lis331dlh_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ lis331dlh_set_irq(mlsl_handle, pdata, &private_data->resume,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ return INV_SUCCESS;
+}
+
+static int lis331dlh_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+static int lis331dlh_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct lis331dlh_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return lis331dlh_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return lis331dlh_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return lis331dlh_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return lis331dlh_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ return lis331dlh_set_ths(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ return lis331dlh_set_ths(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ return lis331dlh_set_dur(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ return lis331dlh_set_dur(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return lis331dlh_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return lis331dlh_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static int lis331dlh_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct lis331dlh_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.ths;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.ths;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.dur;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.dur;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.irq_type;
+ break;
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_descr lis331dlh_descr = {
+ .init = lis331dlh_init,
+ .exit = lis331dlh_exit,
+ .suspend = lis331dlh_suspend,
+ .resume = lis331dlh_resume,
+ .read = lis331dlh_read,
+ .config = lis331dlh_config,
+ .get_config = lis331dlh_get_config,
+ .name = "lis331dlh",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_LIS331,
+ .read_reg = (0x28 | 0x80), /* 0x80 for burst reads */
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {2, 480},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *lis331_get_slave_descr(void)
+{
+ return &lis331dlh_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct lis331_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int lis331_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct lis331_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ lis331_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int lis331_mod_remove(struct i2c_client *client)
+{
+ struct lis331_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ lis331_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id lis331_mod_id[] = {
+ { "lis331", ACCEL_ID_LIS331 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lis331_mod_id);
+
+static struct i2c_driver lis331_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = lis331_mod_probe,
+ .remove = lis331_mod_remove,
+ .id_table = lis331_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lis331_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init lis331_mod_init(void)
+{
+ int res = i2c_add_driver(&lis331_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "lis331_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit lis331_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&lis331_mod_driver);
+}
+
+module_init(lis331_mod_init);
+module_exit(lis331_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate LIS331 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("lis331_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/lis3dh.c b/drivers/misc/inv_mpu/accel/lis3dh.c
new file mode 100644
index 000000000000..27206e4b847c
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/lis3dh.c
@@ -0,0 +1,728 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file lis3dh.c
+ * @brief Accelerometer setup and handling methods for ST LIS3DH.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#undef MPL_LOG_NDEBUG
+#define MPL_LOG_NDEBUG 0
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+/* full scale setting - register & mask */
+#define LIS3DH_CTRL_REG1 (0x20)
+#define LIS3DH_CTRL_REG2 (0x21)
+#define LIS3DH_CTRL_REG3 (0x22)
+#define LIS3DH_CTRL_REG4 (0x23)
+#define LIS3DH_CTRL_REG5 (0x24)
+#define LIS3DH_CTRL_REG6 (0x25)
+#define LIS3DH_REFERENCE (0x26)
+#define LIS3DH_STATUS_REG (0x27)
+#define LIS3DH_OUT_X_L (0x28)
+#define LIS3DH_OUT_X_H (0x29)
+#define LIS3DH_OUT_Y_L (0x2a)
+#define LIS3DH_OUT_Y_H (0x2b)
+#define LIS3DH_OUT_Z_L (0x2c)
+#define LIS3DH_OUT_Z_H (0x2d)
+
+#define LIS3DH_INT1_CFG (0x30)
+#define LIS3DH_INT1_SRC (0x31)
+#define LIS3DH_INT1_THS (0x32)
+#define LIS3DH_INT1_DURATION (0x33)
+
+#define LIS3DH_MAX_DUR (0x7F)
+
+/* -------------------------------------------------------------------------- */
+
+struct lis3dh_config {
+ unsigned long odr;
+ unsigned int fsr; /* full scale range mg */
+ unsigned int ths; /* Motion no-motion thseshold mg */
+ unsigned int dur; /* Motion no-motion duration ms */
+ unsigned char reg_ths;
+ unsigned char reg_dur;
+ unsigned char ctrl_reg1;
+ unsigned char irq_type;
+ unsigned char mot_int1_cfg;
+};
+
+struct lis3dh_private_data {
+ struct lis3dh_config suspend;
+ struct lis3dh_config resume;
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int lis3dh_set_ths(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis3dh_config *config, int apply, long ths)
+{
+ int result = INV_SUCCESS;
+ if ((unsigned int)ths > 1000 * config->fsr)
+ ths = (long)1000 * config->fsr;
+
+ if (ths < 0)
+ ths = 0;
+
+ config->ths = ths;
+ config->reg_ths = (unsigned char)(long)((ths * 128L) / (config->fsr));
+ MPL_LOGV("THS: %d, 0x%02x\n", config->ths, (int)config->reg_ths);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_THS,
+ config->reg_ths);
+ return result;
+}
+
+static int lis3dh_set_dur(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis3dh_config *config, int apply, long dur)
+{
+ int result = INV_SUCCESS;
+ long reg_dur = (dur * config->odr) / 1000000L;
+ config->dur = dur;
+
+ if (reg_dur > LIS3DH_MAX_DUR)
+ reg_dur = LIS3DH_MAX_DUR;
+
+ config->reg_dur = (unsigned char)reg_dur;
+ MPL_LOGV("DUR: %d, 0x%02x\n", config->dur, (int)config->reg_dur);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_DURATION,
+ (unsigned char)reg_dur);
+ return result;
+}
+
+/**
+ * Sets the IRQ to fire when one of the IRQ events occur. Threshold and
+ * duration will not be used uless the type is MOT or NMOT.
+ *
+ * @param config configuration to apply to, suspend or resume
+ * @param irq_type The type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ */
+static int lis3dh_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis3dh_config *config,
+ int apply, long irq_type)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+
+ config->irq_type = (unsigned char)irq_type;
+ if (irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x10;
+ reg2 = 0x00;
+ } else if (irq_type == MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x40;
+ reg2 = config->mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG3, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_CFG, reg2);
+ }
+
+ return result;
+}
+
+/**
+ * Set the Output data rate for the particular configuration
+ *
+ * @param config Config to modify with new ODR
+ * @param odr Output data rate in units of 1/1000Hz
+ */
+static int lis3dh_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis3dh_config *config, int apply, long odr)
+{
+ unsigned char bits;
+ int result = INV_SUCCESS;
+
+ if (odr > 400000L) {
+ config->odr = 1250000L;
+ bits = 0x90;
+ } else if (odr > 200000L) {
+ config->odr = 400000L;
+ bits = 0x70;
+ } else if (odr > 100000L) {
+ config->odr = 200000L;
+ bits = 0x60;
+ } else if (odr > 50000) {
+ config->odr = 100000L;
+ bits = 0x50;
+ } else if (odr > 25000) {
+ config->odr = 50000;
+ bits = 0x40;
+ } else if (odr > 10000) {
+ config->odr = 25000;
+ bits = 0x30;
+ } else if (odr > 1000) {
+ config->odr = 10000;
+ bits = 0x20;
+ } else if (odr > 500) {
+ config->odr = 1000;
+ bits = 0x10;
+ } else {
+ config->odr = 0;
+ bits = 0;
+ }
+
+ config->ctrl_reg1 = bits | (config->ctrl_reg1 & 0xf);
+ lis3dh_set_dur(mlsl_handle, pdata, config, apply, config->dur);
+ MPL_LOGV("ODR: %ld, 0x%02x\n", config->odr, (int)config->ctrl_reg1);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG1,
+ config->ctrl_reg1);
+ return result;
+}
+
+/**
+ * Set the full scale range of the accels
+ *
+ * @param config pointer to configuration
+ * @param fsr requested full scale range
+ */
+static int lis3dh_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lis3dh_config *config, int apply, long fsr)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1 = 0x48;
+
+ if (fsr <= 2048) {
+ config->fsr = 2048;
+ } else if (fsr <= 4096) {
+ reg1 |= 0x10;
+ config->fsr = 4096;
+ } else if (fsr <= 8192) {
+ reg1 |= 0x20;
+ config->fsr = 8192;
+ } else {
+ reg1 |= 0x30;
+ config->fsr = 16348;
+ }
+
+ lis3dh_set_ths(mlsl_handle, pdata, config, apply, config->ths);
+ MPL_LOGV("FSR: %d\n", config->fsr);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG4, reg1);
+
+ return result;
+}
+
+static int lis3dh_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+ struct lis3dh_private_data *private_data = pdata->private_data;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG1,
+ private_data->suspend.ctrl_reg1);
+
+ reg1 = 0x48;
+ if (private_data->suspend.fsr == 16384)
+ reg1 |= 0x30;
+ else if (private_data->suspend.fsr == 8192)
+ reg1 |= 0x20;
+ else if (private_data->suspend.fsr == 4096)
+ reg1 |= 0x10;
+ else if (private_data->suspend.fsr == 2048)
+ reg1 |= 0x00;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG4, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_THS,
+ private_data->suspend.reg_ths);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_DURATION,
+ private_data->suspend.reg_dur);
+
+ if (private_data->suspend.irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x10;
+ reg2 = 0x00;
+ } else if (private_data->suspend.irq_type ==
+ MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x40;
+ reg2 = private_data->suspend.mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG3, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_CFG, reg2);
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG6, 1, &reg1);
+
+ return result;
+}
+
+static int lis3dh_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char reg1;
+ unsigned char reg2;
+ struct lis3dh_private_data *private_data = pdata->private_data;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG1,
+ private_data->resume.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(6);
+
+ /* Full Scale */
+ reg1 = 0x48;
+ if (private_data->suspend.fsr == 16384)
+ reg1 |= 0x30;
+ else if (private_data->suspend.fsr == 8192)
+ reg1 |= 0x20;
+ else if (private_data->suspend.fsr == 4096)
+ reg1 |= 0x10;
+ else if (private_data->suspend.fsr == 2048)
+ reg1 |= 0x00;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG4, reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (private_data->resume.irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x10;
+ reg2 = 0x00;
+ } else if (private_data->resume.irq_type == MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x40;
+ reg2 = private_data->resume.mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG3, reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_THS,
+ private_data->resume.reg_ths);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_DURATION,
+ private_data->resume.reg_dur);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_INT1_CFG, reg2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG6, 1, &reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static int lis3dh_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result = INV_SUCCESS;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LIS3DH_STATUS_REG, 1, data);
+ if (data[0] & 0x0F) {
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len,
+ data);
+ return result;
+ } else
+ return INV_ERROR_ACCEL_DATA_NOT_READY;
+}
+
+static int lis3dh_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ long range;
+ struct lis3dh_private_data *private_data;
+ private_data = (struct lis3dh_private_data *)
+ kzalloc(sizeof(struct lis3dh_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ private_data->resume.ctrl_reg1 = 0x67;
+ private_data->suspend.ctrl_reg1 = 0x18;
+ private_data->resume.mot_int1_cfg = 0x95;
+ private_data->suspend.mot_int1_cfg = 0x2a;
+
+ lis3dh_set_odr(mlsl_handle, pdata, &private_data->suspend, false, 0);
+ lis3dh_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000L);
+
+ range = range_fixedpoint_to_long_mg(slave->range);
+ lis3dh_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, range);
+ lis3dh_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, range);
+
+ lis3dh_set_ths(mlsl_handle, pdata, &private_data->suspend,
+ false, 80);
+ lis3dh_set_ths(mlsl_handle, pdata, &private_data->resume,
+ false, 40);
+
+ lis3dh_set_dur(mlsl_handle, pdata, &private_data->suspend,
+ false, 1000);
+ lis3dh_set_dur(mlsl_handle, pdata, &private_data->resume,
+ false, 2540);
+
+ lis3dh_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ lis3dh_set_irq(mlsl_handle, pdata, &private_data->resume,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LIS3DH_CTRL_REG1, 0x07);
+ msleep(6);
+
+ return INV_SUCCESS;
+}
+
+static int lis3dh_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+static int lis3dh_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct lis3dh_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return lis3dh_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return lis3dh_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return lis3dh_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return lis3dh_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ return lis3dh_set_ths(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ return lis3dh_set_ths(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ return lis3dh_set_dur(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ return lis3dh_set_dur(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return lis3dh_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply, *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return lis3dh_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply, *((long *)data->data));
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+ return INV_SUCCESS;
+}
+
+static int lis3dh_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct lis3dh_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.ths;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.ths;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.dur;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.dur;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long)private_data->resume.irq_type;
+ break;
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_descr lis3dh_descr = {
+ .init = lis3dh_init,
+ .exit = lis3dh_exit,
+ .suspend = lis3dh_suspend,
+ .resume = lis3dh_resume,
+ .read = lis3dh_read,
+ .config = lis3dh_config,
+ .get_config = lis3dh_get_config,
+ .name = "lis3dh",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_LIS3DH,
+ .read_reg = 0x28 | 0x80, /* 0x80 for burst reads */
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {2, 480},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *lis3dh_get_slave_descr(void)
+{
+ return &lis3dh_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct lis3dh_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int lis3dh_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct lis3dh_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ lis3dh_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int lis3dh_mod_remove(struct i2c_client *client)
+{
+ struct lis3dh_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ lis3dh_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id lis3dh_mod_id[] = {
+ { "lis3dh", ACCEL_ID_LIS3DH },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lis3dh_mod_id);
+
+static struct i2c_driver lis3dh_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = lis3dh_mod_probe,
+ .remove = lis3dh_mod_remove,
+ .id_table = lis3dh_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lis3dh_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init lis3dh_mod_init(void)
+{
+ int res = i2c_add_driver(&lis3dh_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "lis3dh_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit lis3dh_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&lis3dh_mod_driver);
+}
+
+module_init(lis3dh_mod_init);
+module_exit(lis3dh_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate LIS3DH sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("lis3dh_mod");
+
+/*
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/lsm303dlx_a.c b/drivers/misc/inv_mpu/accel/lsm303dlx_a.c
new file mode 100644
index 000000000000..576282a0fb16
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/lsm303dlx_a.c
@@ -0,0 +1,881 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file lsm303dlx_a.c
+ * @brief Accelerometer setup and handling methods for ST LSM303DLH
+ * or LSM303DLM accel.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+/* -------------------------------------------------------------------------- */
+
+/* full scale setting - register & mask */
+#define LSM303DLx_CTRL_REG1 (0x20)
+#define LSM303DLx_CTRL_REG2 (0x21)
+#define LSM303DLx_CTRL_REG3 (0x22)
+#define LSM303DLx_CTRL_REG4 (0x23)
+#define LSM303DLx_CTRL_REG5 (0x24)
+#define LSM303DLx_HP_FILTER_RESET (0x25)
+#define LSM303DLx_REFERENCE (0x26)
+#define LSM303DLx_STATUS_REG (0x27)
+#define LSM303DLx_OUT_X_L (0x28)
+#define LSM303DLx_OUT_X_H (0x29)
+#define LSM303DLx_OUT_Y_L (0x2a)
+#define LSM303DLx_OUT_Y_H (0x2b)
+#define LSM303DLx_OUT_Z_L (0x2b)
+#define LSM303DLx_OUT_Z_H (0x2d)
+
+#define LSM303DLx_INT1_CFG (0x30)
+#define LSM303DLx_INT1_SRC (0x31)
+#define LSM303DLx_INT1_THS (0x32)
+#define LSM303DLx_INT1_DURATION (0x33)
+
+#define LSM303DLx_INT2_CFG (0x34)
+#define LSM303DLx_INT2_SRC (0x35)
+#define LSM303DLx_INT2_THS (0x36)
+#define LSM303DLx_INT2_DURATION (0x37)
+
+#define LSM303DLx_CTRL_MASK (0x30)
+#define LSM303DLx_SLEEP_MASK (0x20)
+#define LSM303DLx_PWR_MODE_NORMAL (0x20)
+
+#define LSM303DLx_MAX_DUR (0x7F)
+
+/* -------------------------------------------------------------------------- */
+
+struct lsm303dlx_a_config {
+ unsigned int odr;
+ unsigned int fsr; /** < full scale range mg */
+ unsigned int ths; /** < Motion no-motion thseshold mg */
+ unsigned int dur; /** < Motion no-motion duration ms */
+ unsigned char reg_ths;
+ unsigned char reg_dur;
+ unsigned char ctrl_reg1;
+ unsigned char irq_type;
+ unsigned char mot_int1_cfg;
+};
+
+struct lsm303dlx_a_private_data {
+ struct lsm303dlx_a_config suspend;
+ struct lsm303dlx_a_config resume;
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int lsm303dlx_a_set_ths(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lsm303dlx_a_config *config,
+ int apply,
+ long ths)
+{
+ int result = INV_SUCCESS;
+ if ((unsigned int) ths >= config->fsr)
+ ths = (long) config->fsr - 1;
+
+ if (ths < 0)
+ ths = 0;
+
+ config->ths = ths;
+ config->reg_ths = (unsigned char)(long)((ths * 128L) / (config->fsr));
+ MPL_LOGV("THS: %d, 0x%02x\n", config->ths, (int)config->reg_ths);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_THS,
+ config->reg_ths);
+ return result;
+}
+
+static int lsm303dlx_a_set_dur(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lsm303dlx_a_config *config,
+ int apply,
+ long dur)
+{
+ int result = INV_SUCCESS;
+ long reg_dur = (dur * config->odr) / 1000000L;
+ config->dur = dur;
+
+ if (reg_dur > LSM303DLx_MAX_DUR)
+ reg_dur = LSM303DLx_MAX_DUR;
+
+ config->reg_dur = (unsigned char) reg_dur;
+ MPL_LOGV("DUR: %d, 0x%02x\n", config->dur, (int)config->reg_dur);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_DURATION,
+ (unsigned char)reg_dur);
+ return result;
+}
+
+/**
+ * Sets the IRQ to fire when one of the IRQ events occur. Threshold and
+ * duration will not be used uless the type is MOT or NMOT.
+ *
+ * @param config configuration to apply to, suspend or resume
+ * @param irq_type The type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ */
+static int lsm303dlx_a_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lsm303dlx_a_config *config,
+ int apply,
+ long irq_type)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+
+ config->irq_type = (unsigned char)irq_type;
+ if (irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x02;
+ reg2 = 0x00;
+ } else if (irq_type == MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x00;
+ reg2 = config->mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG3, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_CFG, reg2);
+ }
+
+ return result;
+}
+
+/**
+ * @brief Set the output data rate for the particular configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * Config to modify with new ODR.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param odr
+ * Output data rate in units of 1/1000Hz (mHz).
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lsm303dlx_a_config *config,
+ int apply,
+ long odr)
+{
+ unsigned char bits;
+ int result = INV_SUCCESS;
+
+ /* normal power modes */
+ if (odr > 400000) {
+ config->odr = 1000000;
+ bits = LSM303DLx_PWR_MODE_NORMAL | 0x18;
+ } else if (odr > 100000) {
+ config->odr = 400000;
+ bits = LSM303DLx_PWR_MODE_NORMAL | 0x10;
+ } else if (odr > 50000) {
+ config->odr = 100000;
+ bits = LSM303DLx_PWR_MODE_NORMAL | 0x08;
+ } else if (odr > 10000) {
+ config->odr = 50000;
+ bits = LSM303DLx_PWR_MODE_NORMAL | 0x00;
+ /* low power modes */
+ } else if (odr > 5000) {
+ config->odr = 10000;
+ bits = 0xC0;
+ } else if (odr > 2000) {
+ config->odr = 5000;
+ bits = 0xA0;
+ } else if (odr > 1000) {
+ config->odr = 2000;
+ bits = 0x80;
+ } else if (odr > 500) {
+ config->odr = 1000;
+ bits = 0x60;
+ } else if (odr > 0) {
+ config->odr = 500;
+ bits = 0x40;
+ } else {
+ config->odr = 0;
+ bits = 0;
+ }
+
+ config->ctrl_reg1 = bits | (config->ctrl_reg1 & 0x7);
+ lsm303dlx_a_set_dur(mlsl_handle, pdata, config, apply, config->dur);
+ MPL_LOGV("ODR: %d, 0x%02x\n", config->odr, (int)config->ctrl_reg1);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG1,
+ config->ctrl_reg1);
+ return result;
+}
+
+/**
+ * @brief Set the full scale range of the accels
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * pointer to configuration.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param fsr
+ * requested full scale range.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct lsm303dlx_a_config *config,
+ int apply,
+ long fsr)
+{
+ unsigned char reg1 = 0x40;
+ int result = INV_SUCCESS;
+
+ if (fsr <= 2048) {
+ config->fsr = 2048;
+ } else if (fsr <= 4096) {
+ reg1 |= 0x30;
+ config->fsr = 4096;
+ } else {
+ reg1 |= 0x10;
+ config->fsr = 8192;
+ }
+
+ lsm303dlx_a_set_ths(mlsl_handle, pdata,
+ config, apply, config->ths);
+ MPL_LOGV("FSR: %d\n", config->fsr);
+ if (apply)
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG4, reg1);
+
+ return result;
+}
+
+/**
+ * @brief suspends the device to put it in its lowest power mode.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+ struct lsm303dlx_a_private_data *private_data =
+ (struct lsm303dlx_a_private_data *)(pdata->private_data);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG1,
+ private_data->suspend.ctrl_reg1);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG2, 0x0f);
+ reg1 = 0x40;
+ if (private_data->suspend.fsr == 8192)
+ reg1 |= 0x30;
+ else if (private_data->suspend.fsr == 4096)
+ reg1 |= 0x10;
+ /* else bits [4..5] are already zero */
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG4, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_THS,
+ private_data->suspend.reg_ths);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_DURATION,
+ private_data->suspend.reg_dur);
+
+ if (private_data->suspend.irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x02;
+ reg2 = 0x00;
+ } else if (private_data->suspend.irq_type ==
+ MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x00;
+ reg2 = private_data->suspend.mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG3, reg1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_CFG, reg2);
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LSM303DLx_HP_FILTER_RESET, 1, &reg1);
+ return result;
+}
+
+/**
+ * @brief resume the device in the proper power state given the configuration
+ * chosen.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+ struct lsm303dlx_a_private_data *private_data =
+ (struct lsm303dlx_a_private_data *)(pdata->private_data);
+
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG1,
+ private_data->resume.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(6);
+
+ /* Full Scale */
+ reg1 = 0x40;
+ if (private_data->resume.fsr == 8192)
+ reg1 |= 0x30;
+ else if (private_data->resume.fsr == 4096)
+ reg1 |= 0x10;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG4, reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Configure high pass filter */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG2, 0x0F);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (private_data->resume.irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x02;
+ reg2 = 0x00;
+ } else if (private_data->resume.irq_type ==
+ MPU_SLAVE_IRQ_TYPE_MOTION) {
+ reg1 = 0x00;
+ reg2 = private_data->resume.mot_int1_cfg;
+ } else {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_CTRL_REG3, reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_THS,
+ private_data->resume.reg_ths);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_DURATION,
+ private_data->resume.reg_dur);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM303DLx_INT1_CFG, reg2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LSM303DLx_HP_FILTER_RESET, 1, &reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+/**
+ * @brief read the sensor data from the device.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a buffer to store the data read.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result = INV_SUCCESS;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ LSM303DLx_STATUS_REG, 1, data);
+ if (data[0] & 0x0F) {
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, slave->read_len, data);
+ return result;
+ } else
+ return INV_ERROR_ACCEL_DATA_NOT_READY;
+}
+
+/**
+ * @brief one-time device driver initialization function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is loaded in the kernel.
+ * If the driver is built-in in the kernel, this function will be
+ * called at boot time.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ long range;
+ struct lsm303dlx_a_private_data *private_data;
+ private_data = (struct lsm303dlx_a_private_data *)
+ kzalloc(sizeof(struct lsm303dlx_a_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ private_data->resume.ctrl_reg1 = 0x37;
+ private_data->suspend.ctrl_reg1 = 0x47;
+ private_data->resume.mot_int1_cfg = 0x95;
+ private_data->suspend.mot_int1_cfg = 0x2a;
+
+ lsm303dlx_a_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ false, 0);
+ lsm303dlx_a_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000);
+
+ range = range_fixedpoint_to_long_mg(slave->range);
+ lsm303dlx_a_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, range);
+ lsm303dlx_a_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, range);
+
+ lsm303dlx_a_set_ths(mlsl_handle, pdata, &private_data->suspend,
+ false, 80);
+ lsm303dlx_a_set_ths(mlsl_handle, pdata, &private_data->resume,
+ false, 40);
+
+ lsm303dlx_a_set_dur(mlsl_handle, pdata, &private_data->suspend,
+ false, 1000);
+ lsm303dlx_a_set_dur(mlsl_handle, pdata, &private_data->resume,
+ false, 2540);
+
+ lsm303dlx_a_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ lsm303dlx_a_set_irq(mlsl_handle, pdata, &private_data->resume,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief one-time device driver exit function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is removed from the kernel.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief device configuration facility.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to the configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct lsm303dlx_a_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return lsm303dlx_a_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return lsm303dlx_a_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return lsm303dlx_a_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return lsm303dlx_a_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ return lsm303dlx_a_set_ths(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ return lsm303dlx_a_set_ths(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ return lsm303dlx_a_set_dur(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ return lsm303dlx_a_set_dur(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return lsm303dlx_a_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return lsm303dlx_a_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief facility to retrieve the device configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to store the returned configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int lsm303dlx_a_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct lsm303dlx_a_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.ths;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.ths;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.dur;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.dur;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.irq_type;
+ break;
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_descr lsm303dlx_a_descr = {
+ .init = lsm303dlx_a_init,
+ .exit = lsm303dlx_a_exit,
+ .suspend = lsm303dlx_a_suspend,
+ .resume = lsm303dlx_a_resume,
+ .read = lsm303dlx_a_read,
+ .config = lsm303dlx_a_config,
+ .get_config = lsm303dlx_a_get_config,
+ .name = "lsm303dlx_a",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_LSM303DLX,
+ .read_reg = (0x28 | 0x80), /* 0x80 for burst reads */
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {2, 480},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *lsm303dlx_a_get_slave_descr(void)
+{
+ return &lsm303dlx_a_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct lsm303dlx_a_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int lsm303dlx_a_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct lsm303dlx_a_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ lsm303dlx_a_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int lsm303dlx_a_mod_remove(struct i2c_client *client)
+{
+ struct lsm303dlx_a_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ lsm303dlx_a_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id lsm303dlx_a_mod_id[] = {
+ { "lsm303dlx", ACCEL_ID_LSM303DLX },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lsm303dlx_a_mod_id);
+
+static struct i2c_driver lsm303dlx_a_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = lsm303dlx_a_mod_probe,
+ .remove = lsm303dlx_a_mod_remove,
+ .id_table = lsm303dlx_a_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lsm303dlx_a_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init lsm303dlx_a_mod_init(void)
+{
+ int res = i2c_add_driver(&lsm303dlx_a_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "lsm303dlx_a_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit lsm303dlx_a_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&lsm303dlx_a_mod_driver);
+}
+
+module_init(lsm303dlx_a_mod_init);
+module_exit(lsm303dlx_a_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate LSM303DLX_A sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("lsm303dlx_a_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/mma8450.c b/drivers/misc/inv_mpu/accel/mma8450.c
new file mode 100644
index 000000000000..f698ee98bf50
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/mma8450.c
@@ -0,0 +1,804 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file mma8450.c
+ * @brief Accelerometer setup and handling methods for Freescale MMA8450.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+/* full scale setting - register & mask */
+#define ACCEL_MMA8450_XYZ_DATA_CFG (0x16)
+
+#define ACCEL_MMA8450_CTRL_REG1 (0x38)
+#define ACCEL_MMA8450_CTRL_REG2 (0x39)
+#define ACCEL_MMA8450_CTRL_REG4 (0x3B)
+#define ACCEL_MMA8450_CTRL_REG5 (0x3C)
+
+#define ACCEL_MMA8450_CTRL_REG (0x38)
+#define ACCEL_MMA8450_CTRL_MASK (0x03)
+
+#define ACCEL_MMA8450_SLEEP_MASK (0x03)
+
+/* -------------------------------------------------------------------------- */
+
+struct mma8450_config {
+ unsigned int odr;
+ unsigned int fsr; /** < full scale range mg */
+ unsigned int ths; /** < Motion no-motion thseshold mg */
+ unsigned int dur; /** < Motion no-motion duration ms */
+ unsigned char reg_ths;
+ unsigned char reg_dur;
+ unsigned char ctrl_reg1;
+ unsigned char irq_type;
+ unsigned char mot_int1_cfg;
+};
+
+struct mma8450_private_data {
+ struct mma8450_config suspend;
+ struct mma8450_config resume;
+};
+
+
+/* -------------------------------------------------------------------------- */
+
+static int mma8450_set_ths(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma8450_config *config,
+ int apply,
+ long ths)
+{
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+static int mma8450_set_dur(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma8450_config *config,
+ int apply,
+ long dur)
+{
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+/**
+ * @brief Sets the IRQ to fire when one of the IRQ events occur.
+ * Threshold and duration will not be used unless the type is MOT or
+ * NMOT.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * configuration to apply to, suspend or resume
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param irq_type
+ * the type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma8450_config *config,
+ int apply,
+ long irq_type)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+ unsigned char reg3;
+
+ config->irq_type = (unsigned char)irq_type;
+ if (irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x01;
+ reg2 = 0x01;
+ reg3 = 0x07;
+ } else if (irq_type == MPU_SLAVE_IRQ_TYPE_NONE) {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ reg3 = 0x00;
+ } else {
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ }
+
+ if (apply) {
+ /* XYZ_DATA_CFG: event flag enabled on Z axis */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_XYZ_DATA_CFG, reg3);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG4, reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG5, reg2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ return result;
+}
+
+/**
+ * @brief Set the output data rate for the particular configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * Config to modify with new ODR.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param odr
+ * Output data rate in units of 1/1000Hz (mHz).
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma8450_config *config,
+ int apply,
+ long odr)
+{
+ unsigned char bits;
+ int result = INV_SUCCESS;
+
+ if (odr > 200000) {
+ config->odr = 400000;
+ bits = 0x00;
+ } else if (odr > 100000) {
+ config->odr = 200000;
+ bits = 0x04;
+ } else if (odr > 50000) {
+ config->odr = 100000;
+ bits = 0x08;
+ } else if (odr > 25000) {
+ config->odr = 50000;
+ bits = 0x0C;
+ } else if (odr > 12500) {
+ config->odr = 25000;
+ bits = 0x40; /* Sleep -> Auto wake mode */
+ } else if (odr > 1563) {
+ config->odr = 12500;
+ bits = 0x10;
+ } else if (odr > 0) {
+ config->odr = 1563;
+ bits = 0x14;
+ } else {
+ config->ctrl_reg1 = 0; /* Set FS1.FS2 to Standby */
+ config->odr = 0;
+ bits = 0;
+ }
+
+ config->ctrl_reg1 = bits | (config->ctrl_reg1 & 0x3);
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG1, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG1, config->ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ MPL_LOGV("ODR: %d mHz, 0x%02x\n",
+ config->odr, (int)config->ctrl_reg1);
+ }
+ return result;
+}
+
+/**
+ * @brief Set the full scale range of the accels
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * pointer to configuration.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param fsr
+ * requested full scale range.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma8450_config *config,
+ int apply,
+ long fsr)
+{
+ unsigned char bits;
+ int result = INV_SUCCESS;
+
+ if (fsr <= 2000) {
+ bits = 0x01;
+ config->fsr = 2000;
+ } else if (fsr <= 4000) {
+ bits = 0x02;
+ config->fsr = 4000;
+ } else {
+ bits = 0x03;
+ config->fsr = 8000;
+ }
+
+ config->ctrl_reg1 = bits | (config->ctrl_reg1 & 0xFC);
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG1, config->ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ MPL_LOGV("FSR: %d mg\n", config->fsr);
+ }
+ return result;
+}
+
+/**
+ * @brief suspends the device to put it in its lowest power mode.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ struct mma8450_private_data *private_data = pdata->private_data;
+
+ if (private_data->suspend.fsr == 4000)
+ slave->range.mantissa = 4;
+ else if (private_data->suspend.fsr == 8000)
+ slave->range.mantissa = 8;
+ else
+ slave->range.mantissa = 2;
+ slave->range.fraction = 0;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG1, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ if (private_data->suspend.ctrl_reg1) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG1,
+ private_data->suspend.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ result = mma8450_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ true, private_data->suspend.irq_type);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+/**
+ * @brief resume the device in the proper power state given the configuration
+ * chosen.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ struct mma8450_private_data *private_data = pdata->private_data;
+
+ /* Full Scale */
+ if (private_data->resume.fsr == 4000)
+ slave->range.mantissa = 4;
+ else if (private_data->resume.fsr == 8000)
+ slave->range.mantissa = 8;
+ else
+ slave->range.mantissa = 2;
+ slave->range.fraction = 0;
+
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG1, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ if (private_data->resume.ctrl_reg1) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA8450_CTRL_REG1,
+ private_data->resume.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ result = mma8450_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ true, private_data->resume.irq_type);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief read the sensor data from the device.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a buffer to store the data read.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata, unsigned char *data)
+{
+ int result;
+ unsigned char local_data[4]; /* Status register + 3 bytes data */
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ 0x00, sizeof(local_data), local_data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ memcpy(data, &local_data[1], (slave->read_len) - 1);
+
+ MPL_LOGV("Data Not Ready: %02x %02x %02x %02x\n",
+ local_data[0], local_data[1],
+ local_data[2], local_data[3]);
+
+ return result;
+}
+
+/**
+ * @brief one-time device driver initialization function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is loaded in the kernel.
+ * If the driver is built-in in the kernel, this function will be
+ * called at boot time.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ struct mma8450_private_data *private_data;
+ private_data = (struct mma8450_private_data *)
+ kzalloc(sizeof(struct mma8450_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ mma8450_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ false, 0);
+ mma8450_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000);
+ mma8450_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, 2000);
+ mma8450_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, 2000);
+ mma8450_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ false,
+ MPU_SLAVE_IRQ_TYPE_NONE);
+ mma8450_set_irq(mlsl_handle, pdata, &private_data->resume,
+ false,
+ MPU_SLAVE_IRQ_TYPE_NONE);
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief one-time device driver exit function.
+ * If the driver is built as a kernel module, this function will be
+ * called when the module is removed from the kernel.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief device configuration facility.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to the configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct mma8450_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return mma8450_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return mma8450_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return mma8450_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return mma8450_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ return mma8450_set_ths(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ return mma8450_set_ths(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ return mma8450_set_dur(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ return mma8450_set_dur(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return mma8450_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return mma8450_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+/**
+ * @brief facility to retrieve the device configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a pointer to store the returned configuration data structure.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma8450_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct mma8450_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.ths;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.ths;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.dur;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.dur;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.irq_type;
+ break;
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_descr mma8450_descr = {
+ .init = mma8450_init,
+ .exit = mma8450_exit,
+ .suspend = mma8450_suspend,
+ .resume = mma8450_resume,
+ .read = mma8450_read,
+ .config = mma8450_config,
+ .get_config = mma8450_get_config,
+ .name = "mma8450",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_MMA8450,
+ .read_reg = 0x00,
+ .read_len = 4,
+ .endian = EXT_SLAVE_FS8_BIG_ENDIAN,
+ .range = {2, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *mma8450_get_slave_descr(void)
+{
+ return &mma8450_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct mma8450_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int mma8450_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct mma8450_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ mma8450_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int mma8450_mod_remove(struct i2c_client *client)
+{
+ struct mma8450_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ mma8450_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id mma8450_mod_id[] = {
+ { "mma8450", ACCEL_ID_MMA8450 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mma8450_mod_id);
+
+static struct i2c_driver mma8450_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = mma8450_mod_probe,
+ .remove = mma8450_mod_remove,
+ .id_table = mma8450_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mma8450_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init mma8450_mod_init(void)
+{
+ int res = i2c_add_driver(&mma8450_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "mma8450_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit mma8450_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&mma8450_mod_driver);
+}
+
+module_init(mma8450_mod_init);
+module_exit(mma8450_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate MMA8450 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("mma8450_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/mma845x.c b/drivers/misc/inv_mpu/accel/mma845x.c
new file mode 100644
index 000000000000..5f62b22388b1
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/mma845x.c
@@ -0,0 +1,713 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup ACCELDL
+ * @brief Provides the interface to setup and handle an accelerometer.
+ *
+ * @{
+ * @file mma845x.c
+ * @brief Accelerometer setup and handling methods for Freescale MMA845X
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+#define ACCEL_MMA845X_XYZ_DATA_CFG (0x0E)
+#define ACCEL_MMA845X_CTRL_REG1 (0x2A)
+#define ACCEL_MMA845X_CTRL_REG4 (0x2D)
+#define ACCEL_MMA845X_CTRL_REG5 (0x2E)
+
+#define ACCEL_MMA845X_SLEEP_MASK (0x01)
+
+/* full scale setting - register & mask */
+#define ACCEL_MMA845X_CFG_REG (0x0E)
+#define ACCEL_MMA845X_CTRL_MASK (0x03)
+
+/* -------------------------------------------------------------------------- */
+
+struct mma845x_config {
+ unsigned int odr;
+ unsigned int fsr; /** < full scale range mg */
+ unsigned int ths; /** < Motion no-motion thseshold mg */
+ unsigned int dur; /** < Motion no-motion duration ms */
+ unsigned char reg_ths;
+ unsigned char reg_dur;
+ unsigned char ctrl_reg1;
+ unsigned char irq_type;
+ unsigned char mot_int1_cfg;
+};
+
+struct mma845x_private_data {
+ struct mma845x_config suspend;
+ struct mma845x_config resume;
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int mma845x_set_ths(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma845x_config *config,
+ int apply,
+ long ths)
+{
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+static int mma845x_set_dur(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma845x_config *config,
+ int apply,
+ long dur)
+{
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+/**
+ * @brief Sets the IRQ to fire when one of the IRQ events occur.
+ * Threshold and duration will not be used unless the type is MOT or
+ * NMOT.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * configuration to apply to, suspend or resume
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param irq_type
+ * the type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma845x_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma845x_config *config,
+ int apply,
+ long irq_type)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg1;
+ unsigned char reg2;
+
+ config->irq_type = (unsigned char)irq_type;
+ if (irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ reg1 = 0x01;
+ reg2 = 0x01;
+ } else if (irq_type == MPU_SLAVE_IRQ_TYPE_NONE) {
+ reg1 = 0x00;
+ reg2 = 0x00;
+ } else {
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ }
+
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA845X_CTRL_REG4, reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA845X_CTRL_REG5, reg2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ return result;
+}
+
+/**
+ * @brief Set the output data rate for the particular configuration.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * Config to modify with new ODR.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param odr
+ * Output data rate in units of 1/1000Hz (mHz).
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma845x_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma845x_config *config,
+ int apply,
+ long odr)
+{
+ unsigned char bits;
+ int result = INV_SUCCESS;
+
+ if (odr > 400000) {
+ config->odr = 800000;
+ bits = 0x01;
+ } else if (odr > 200000) {
+ config->odr = 400000;
+ bits = 0x09;
+ } else if (odr > 100000) {
+ config->odr = 200000;
+ bits = 0x11;
+ } else if (odr > 50000) {
+ config->odr = 100000;
+ bits = 0x19;
+ } else if (odr > 12500) {
+ config->odr = 50000;
+ bits = 0x21;
+ } else if (odr > 6250) {
+ config->odr = 12500;
+ bits = 0x29;
+ } else if (odr > 1560) {
+ config->odr = 6250;
+ bits = 0x31;
+ } else if (odr > 0) {
+ config->odr = 1560;
+ bits = 0x39;
+ } else {
+ config->ctrl_reg1 = 0; /* Set FS1.FS2 to Standby */
+ config->odr = 0;
+ bits = 0;
+ }
+
+ config->ctrl_reg1 = bits;
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA845X_CTRL_REG1,
+ config->ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ MPL_LOGV("ODR: %d mHz, 0x%02x\n", config->odr,
+ (int)config->ctrl_reg1);
+ }
+ return result;
+}
+
+/**
+ * @brief Set the full scale range of the accels
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param config
+ * pointer to configuration.
+ * @param apply
+ * whether to apply immediately or save the settings to be applied
+ * at the next resume.
+ * @param fsr
+ * requested full scale range.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma845x_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct mma845x_config *config,
+ int apply,
+ long fsr)
+{
+ unsigned char bits;
+ int result = INV_SUCCESS;
+
+ if (fsr <= 2000) {
+ bits = 0x00;
+ config->fsr = 2000;
+ } else if (fsr <= 4000) {
+ bits = 0x01;
+ config->fsr = 4000;
+ } else {
+ bits = 0x02;
+ config->fsr = 8000;
+ }
+
+ if (apply) {
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA845X_XYZ_DATA_CFG,
+ bits);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ MPL_LOGV("FSR: %d mg\n", config->fsr);
+ }
+ return result;
+}
+
+/**
+ * @brief suspends the device to put it in its lowest power mode.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma845x_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ struct mma845x_private_data *private_data = pdata->private_data;
+
+ /* Full Scale */
+ if (private_data->suspend.fsr == 4000)
+ slave->range.mantissa = 4;
+ else if (private_data->suspend.fsr == 8000)
+ slave->range.mantissa = 8;
+ else
+ slave->range.mantissa = 2;
+
+ slave->range.fraction = 0;
+
+ result = mma845x_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ true, private_data->suspend.fsr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA845X_CTRL_REG1,
+ private_data->suspend.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief resume the device in the proper power state given the configuration
+ * chosen.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma845x_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ struct mma845x_private_data *private_data = pdata->private_data;
+
+ /* Full Scale */
+ if (private_data->resume.fsr == 4000)
+ slave->range.mantissa = 4;
+ else if (private_data->resume.fsr == 8000)
+ slave->range.mantissa = 8;
+ else
+ slave->range.mantissa = 2;
+
+ slave->range.fraction = 0;
+
+ result = mma845x_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ true, private_data->resume.fsr);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ ACCEL_MMA845X_CTRL_REG1,
+ private_data->resume.ctrl_reg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+/**
+ * @brief read the sensor data from the device.
+ *
+ * @param mlsl_handle
+ * the handle to the serial channel the device is connected to.
+ * @param slave
+ * a pointer to the slave descriptor data structure.
+ * @param pdata
+ * a pointer to the slave platform data.
+ * @param data
+ * a buffer to store the data read.
+ *
+ * @return INV_SUCCESS if successful or a non-zero error code.
+ */
+static int mma845x_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata, unsigned char *data)
+{
+ int result;
+ unsigned char local_data[7]; /* Status register + 6 bytes data */
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ slave->read_reg, sizeof(local_data),
+ local_data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ memcpy(data, &local_data[1], slave->read_len);
+ return result;
+}
+
+static int mma845x_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ long range;
+ struct mma845x_private_data *private_data;
+ private_data = (struct mma845x_private_data *)
+ kzalloc(sizeof(struct mma845x_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+
+ mma845x_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ false, 0);
+ mma845x_set_odr(mlsl_handle, pdata, &private_data->resume,
+ false, 200000);
+
+ range = range_fixedpoint_to_long_mg(slave->range);
+ mma845x_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ false, range);
+ mma845x_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ false, range);
+
+ mma845x_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ mma845x_set_irq(mlsl_handle, pdata, &private_data->resume,
+ false, MPU_SLAVE_IRQ_TYPE_NONE);
+ return INV_SUCCESS;
+}
+
+static int mma845x_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+static int mma845x_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct mma845x_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return mma845x_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ return mma845x_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return mma845x_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return mma845x_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ return mma845x_set_ths(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ return mma845x_set_ths(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ return mma845x_set_dur(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ return mma845x_set_dur(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return mma845x_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return mma845x_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static int mma845x_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct mma845x_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.ths;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.ths;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.dur;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.dur;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.irq_type;
+ break;
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_descr mma845x_descr = {
+ .init = mma845x_init,
+ .exit = mma845x_exit,
+ .suspend = mma845x_suspend,
+ .resume = mma845x_resume,
+ .read = mma845x_read,
+ .config = mma845x_config,
+ .get_config = mma845x_get_config,
+ .name = "mma845x",
+ .type = EXT_SLAVE_TYPE_ACCEL,
+ .id = ACCEL_ID_MMA845X,
+ .read_reg = 0x00,
+ .read_len = 6,
+ .endian = EXT_SLAVE_FS16_BIG_ENDIAN,
+ .range = {2, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *mma845x_get_slave_descr(void)
+{
+ return &mma845x_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct mma845x_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int mma845x_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct mma845x_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ mma845x_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int mma845x_mod_remove(struct i2c_client *client)
+{
+ struct mma845x_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ mma845x_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id mma845x_mod_id[] = {
+ { "mma845x", ACCEL_ID_MMA845X },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mma845x_mod_id);
+
+static struct i2c_driver mma845x_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = mma845x_mod_probe,
+ .remove = mma845x_mod_remove,
+ .id_table = mma845x_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mma845x_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init mma845x_mod_init(void)
+{
+ int res = i2c_add_driver(&mma845x_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "mma845x_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit mma845x_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&mma845x_mod_driver);
+}
+
+module_init(mma845x_mod_init);
+module_exit(mma845x_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate MMA845X sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("mma845x_mod");
+
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/accel/mpu6050.h b/drivers/misc/inv_mpu/accel/mpu6050.h
new file mode 100644
index 000000000000..c347bcb4d773
--- /dev/null
+++ b/drivers/misc/inv_mpu/accel/mpu6050.h
@@ -0,0 +1,28 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+
+#ifndef __MPU6050_H__
+#define __MPU6050_H__
+
+#include <linux/mpu.h>
+
+struct ext_slave_descr *mpu6050_get_slave_descr(void);
+
+#endif
diff --git a/drivers/misc/inv_mpu/compass/Kconfig b/drivers/misc/inv_mpu/compass/Kconfig
new file mode 100644
index 000000000000..6c4c13a6e19a
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/Kconfig
@@ -0,0 +1,121 @@
+menuconfig INV_SENSORS_COMPASS
+ bool "Compass Slave Sensors"
+ default y
+ ---help---
+ Say Y here to get to see options for device drivers for various
+ compasses. This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if INV_SENSORS_COMPASS
+
+config MPU_SENSORS_AK8975
+ tristate "AKM ak8975"
+ help
+ This enables support for the AKM ak8975 compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_AK8972
+ tristate "AKM ak8972"
+ help
+ This enables support for the AKM ak8972 compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_MMC314X
+ tristate "MEMSIC mmc314x"
+ help
+ This enables support for the MEMSIC mmc314x compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_AMI30X
+ tristate "Aichi Steel ami30X"
+ help
+ This enables support for the Aichi Steel ami304/ami305 compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_AMI306
+ tristate "Aichi Steel ami306"
+ help
+ This enables support for the Aichi Steel ami306 compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_HMC5883
+ tristate "Honeywell hmc5883"
+ help
+ This enables support for the Honeywell hmc5883 compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_LSM303DLX_M
+ tristate "ST lsm303dlx"
+ help
+ This enables support for the ST lsm303dlh and lsm303dlm compasses
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_MMC314XMS
+ tristate "MEMSIC mmc314xMS"
+ help
+ This enables support for the MEMSIC mmc314xMS compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_YAS529
+ tristate "Yamaha yas529"
+ depends on INPUT_YAS_MAGNETOMETER
+ help
+ This enables support for the Yamaha yas529 compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_YAS530
+ tristate "Yamaha yas530"
+ help
+ This enables support for the Yamaha yas530 compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_HSCDTD002B
+ tristate "Alps hscdtd002b"
+ help
+ This enables support for the Alps hscdtd002b compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+config MPU_SENSORS_HSCDTD004A
+ tristate "Alps hscdtd004a"
+ help
+ This enables support for the Alps hscdtd004a compass
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one compass can be registered at a time.
+ Specifying more that one compass in the board file will result
+ in runtime errors.
+
+endif
diff --git a/drivers/misc/inv_mpu/compass/Makefile b/drivers/misc/inv_mpu/compass/Makefile
new file mode 100644
index 000000000000..aa8aa6a2657b
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/Makefile
@@ -0,0 +1,38 @@
+#
+# Compass Slaves MPUxxxx
+#
+obj-$(CONFIG_MPU_SENSORS_AMI30X) += inv_mpu_ami30x.o
+inv_mpu_ami30x-objs += ami30x.o
+
+obj-$(CONFIG_MPU_SENSORS_AMI306) += inv_mpu_ami306.o
+inv_mpu_ami306-objs += ami306.o
+
+obj-$(CONFIG_MPU_SENSORS_HMC5883) += inv_mpu_hmc5883.o
+inv_mpu_hmc5883-objs += hmc5883.o
+
+obj-$(CONFIG_MPU_SENSORS_LSM303DLX_M) += inv_mpu_lsm303dlx_m.o
+inv_mpu_lsm303dlx_m-objs += lsm303dlx_m.o
+
+obj-$(CONFIG_MPU_SENSORS_MMC314X) += inv_mpu_mmc314x.o
+inv_mpu_mmc314x-objs += mmc314x.o
+
+obj-$(CONFIG_MPU_SENSORS_YAS529) += inv_mpu_yas529.o
+inv_mpu_yas529-objs += yas529-kernel.o
+
+obj-$(CONFIG_MPU_SENSORS_YAS530) += inv_mpu_yas530.o
+inv_mpu_yas530-objs += yas530.o
+
+obj-$(CONFIG_MPU_SENSORS_HSCDTD002B) += inv_mpu_hscdtd002b.o
+inv_mpu_hscdtd002b-objs += hscdtd002b.o
+
+obj-$(CONFIG_MPU_SENSORS_HSCDTD004A) += inv_mpu_hscdtd004a.o
+inv_mpu_hscdtd004a-objs += hscdtd004a.o
+
+obj-$(CONFIG_MPU_SENSORS_AK8975) += inv_mpu_ak8975.o
+inv_mpu_ak8975-objs += ak8975.o
+
+obj-$(CONFIG_MPU_SENSORS_AK8972) += inv_mpu_ak8972.o
+inv_mpu_ak8972-objs += ak8972.o
+
+EXTRA_CFLAGS += -Idrivers/misc/inv_mpu
+EXTRA_CFLAGS += -D__C99_DESIGNATED_INITIALIZER
diff --git a/drivers/misc/inv_mpu/compass/ak8972.c b/drivers/misc/inv_mpu/compass/ak8972.c
new file mode 100644
index 000000000000..7eb15b44039d
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/ak8972.c
@@ -0,0 +1,499 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file ak8972.c
+ * @brief Magnetometer setup and handling methods for the AKM AK8972 compass device.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+#define AK8972_REG_ST1 (0x02)
+#define AK8972_REG_HXL (0x03)
+#define AK8972_REG_ST2 (0x09)
+
+#define AK8972_REG_CNTL (0x0A)
+#define AK8972_REG_ASAX (0x10)
+#define AK8972_REG_ASAY (0x11)
+#define AK8972_REG_ASAZ (0x12)
+
+#define AK8972_CNTL_MODE_POWER_DOWN (0x00)
+#define AK8972_CNTL_MODE_SINGLE_MEASUREMENT (0x01)
+#define AK8972_CNTL_MODE_FUSE_ROM_ACCESS (0x0f)
+
+/* -------------------------------------------------------------------------- */
+struct ak8972_config {
+ char asa[COMPASS_NUM_AXES]; /* axis sensitivity adjustment */
+};
+
+struct ak8972_private_data {
+ struct ak8972_config init;
+};
+
+/* -------------------------------------------------------------------------- */
+static int ak8972_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char serial_data[COMPASS_NUM_AXES];
+
+ struct ak8972_private_data *private_data;
+ private_data = (struct ak8972_private_data *)
+ kzalloc(sizeof(struct ak8972_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8972_REG_CNTL,
+ AK8972_CNTL_MODE_POWER_DOWN);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Wait at least 100us */
+ udelay(100);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8972_REG_CNTL,
+ AK8972_CNTL_MODE_FUSE_ROM_ACCESS);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Wait at least 200us */
+ udelay(200);
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AK8972_REG_ASAX,
+ COMPASS_NUM_AXES, serial_data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ pdata->private_data = private_data;
+
+ private_data->init.asa[0] = serial_data[0];
+ private_data->init.asa[1] = serial_data[1];
+ private_data->init.asa[2] = serial_data[2];
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8972_REG_CNTL,
+ AK8972_CNTL_MODE_POWER_DOWN);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ udelay(100);
+ return INV_SUCCESS;
+}
+
+static int ak8972_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+static int ak8972_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8972_REG_CNTL,
+ AK8972_CNTL_MODE_POWER_DOWN);
+ msleep(1); /* wait at least 100us */
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static int ak8972_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8972_REG_CNTL,
+ AK8972_CNTL_MODE_SINGLE_MEASUREMENT);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static int ak8972_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata, unsigned char *data)
+{
+ unsigned char regs[8];
+ unsigned char *stat = &regs[0];
+ unsigned char *stat2 = &regs[7];
+ int result = INV_SUCCESS;
+ int status = INV_SUCCESS;
+
+ result =
+ inv_serial_read(mlsl_handle, pdata->address, AK8972_REG_ST1,
+ 8, regs);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Always return the data and the status registers */
+ memcpy(data, &regs[1], 6);
+ data[6] = regs[0];
+ data[7] = regs[7];
+
+ /*
+ * ST : data ready -
+ * Measurement has been completed and data is ready to be read.
+ */
+ if (*stat & 0x01)
+ status = INV_SUCCESS;
+
+ /*
+ * ST2 : data error -
+ * occurs when data read is started outside of a readable period;
+ * data read would not be correct.
+ * Valid in continuous measurement mode only.
+ * In single measurement mode this error should not occour but we
+ * stil account for it and return an error, since the data would be
+ * corrupted.
+ * DERR bit is self-clearing when ST2 register is read.
+ */
+ if (*stat2 & 0x04)
+ status = INV_ERROR_COMPASS_DATA_ERROR;
+ /*
+ * ST2 : overflow -
+ * the sum of the absolute values of all axis |X|+|Y|+|Z| < 2400uT.
+ * This is likely to happen in presence of an external magnetic
+ * disturbance; it indicates, the sensor data is incorrect and should
+ * be ignored.
+ * An error is returned.
+ * HOFL bit clears when a new measurement starts.
+ */
+ if (*stat2 & 0x08)
+ status = INV_ERROR_COMPASS_DATA_OVERFLOW;
+ /*
+ * ST : overrun -
+ * the previous sample was not fetched and lost.
+ * Valid in continuous measurement mode only.
+ * In single measurement mode this error should not occour and we
+ * don't consider this condition an error.
+ * DOR bit is self-clearing when ST2 or any meas. data register is
+ * read.
+ */
+ if (*stat & 0x02) {
+ /* status = INV_ERROR_COMPASS_DATA_UNDERFLOW; */
+ status = INV_SUCCESS;
+ }
+
+ /*
+ * trigger next measurement if:
+ * - stat is non zero;
+ * - if stat is zero and stat2 is non zero.
+ * Won't trigger if data is not ready and there was no error.
+ */
+ if (*stat != 0x00 || *stat2 != 0x00) {
+ result = inv_serial_single_write(
+ mlsl_handle, pdata->address,
+ AK8972_REG_CNTL, AK8972_CNTL_MODE_SINGLE_MEASUREMENT);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ return status;
+}
+
+static int ak8972_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ int result;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_WRITE_REGISTERS:
+ result = inv_serial_write(mlsl_handle, pdata->address,
+ data->len,
+ (unsigned char *)data->data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ break;
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static int ak8972_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct ak8972_private_data *private_data = pdata->private_data;
+ int result;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_READ_REGISTERS:
+ {
+ unsigned char *serial_data =
+ (unsigned char *)data->data;
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ serial_data[0], data->len - 1,
+ &serial_data[1]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ break;
+ }
+ case MPU_SLAVE_READ_SCALE:
+ {
+ unsigned char *serial_data =
+ (unsigned char *)data->data;
+ serial_data[0] = private_data->init.asa[0];
+ serial_data[1] = private_data->init.asa[1];
+ serial_data[2] = private_data->init.asa[2];
+ result = INV_SUCCESS;
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ break;
+ }
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) = 0;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) = 8000;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_read_trigger ak8972_read_trigger = {
+ /*.reg = */ 0x0A,
+ /*.value = */ 0x01
+};
+
+static struct ext_slave_descr ak8972_descr = {
+ .init = ak8972_init,
+ .exit = ak8972_exit,
+ .suspend = ak8972_suspend,
+ .resume = ak8972_resume,
+ .read = ak8972_read,
+ .config = ak8972_config,
+ .get_config = ak8972_get_config,
+ .name = "ak8972",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_AK8972,
+ .read_reg = 0x01,
+ .read_len = 9,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {39321, 6000},
+ .trigger = &ak8972_read_trigger,
+};
+
+static
+struct ext_slave_descr *ak8972_get_slave_descr(void)
+{
+ return &ak8972_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct ak8972_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int ak8972_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct ak8972_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ ak8972_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int ak8972_mod_remove(struct i2c_client *client)
+{
+ struct ak8972_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ ak8972_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id ak8972_mod_id[] = {
+ { "ak8972", COMPASS_ID_AK8972 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ak8972_mod_id);
+
+static struct i2c_driver ak8972_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = ak8972_mod_probe,
+ .remove = ak8972_mod_remove,
+ .id_table = ak8972_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ak8972_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init ak8972_mod_init(void)
+{
+ int res = i2c_add_driver(&ak8972_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "ak8972_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit ak8972_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&ak8972_mod_driver);
+}
+
+module_init(ak8972_mod_init);
+module_exit(ak8972_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate AK8972 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ak8972_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/ak8975.c b/drivers/misc/inv_mpu/compass/ak8975.c
new file mode 100644
index 000000000000..3642e29e89a7
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/ak8975.c
@@ -0,0 +1,500 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file ak8975.c
+ * @brief Magnetometer setup and handling methods for the AKM AK8975,
+ * AKM AK8975B, and AKM AK8975C compass devices.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+#define AK8975_REG_ST1 (0x02)
+#define AK8975_REG_HXL (0x03)
+#define AK8975_REG_ST2 (0x09)
+
+#define AK8975_REG_CNTL (0x0A)
+#define AK8975_REG_ASAX (0x10)
+#define AK8975_REG_ASAY (0x11)
+#define AK8975_REG_ASAZ (0x12)
+
+#define AK8975_CNTL_MODE_POWER_DOWN (0x00)
+#define AK8975_CNTL_MODE_SINGLE_MEASUREMENT (0x01)
+#define AK8975_CNTL_MODE_FUSE_ROM_ACCESS (0x0f)
+
+/* -------------------------------------------------------------------------- */
+struct ak8975_config {
+ char asa[COMPASS_NUM_AXES]; /* axis sensitivity adjustment */
+};
+
+struct ak8975_private_data {
+ struct ak8975_config init;
+};
+
+/* -------------------------------------------------------------------------- */
+static int ak8975_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char serial_data[COMPASS_NUM_AXES];
+
+ struct ak8975_private_data *private_data;
+ private_data = (struct ak8975_private_data *)
+ kzalloc(sizeof(struct ak8975_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8975_REG_CNTL,
+ AK8975_CNTL_MODE_POWER_DOWN);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Wait at least 100us */
+ udelay(100);
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8975_REG_CNTL,
+ AK8975_CNTL_MODE_FUSE_ROM_ACCESS);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Wait at least 200us */
+ udelay(200);
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AK8975_REG_ASAX,
+ COMPASS_NUM_AXES, serial_data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ pdata->private_data = private_data;
+
+ private_data->init.asa[0] = serial_data[0];
+ private_data->init.asa[1] = serial_data[1];
+ private_data->init.asa[2] = serial_data[2];
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8975_REG_CNTL,
+ AK8975_CNTL_MODE_POWER_DOWN);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ udelay(100);
+ return INV_SUCCESS;
+}
+
+static int ak8975_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+static int ak8975_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8975_REG_CNTL,
+ AK8975_CNTL_MODE_POWER_DOWN);
+ msleep(1); /* wait at least 100us */
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static int ak8975_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AK8975_REG_CNTL,
+ AK8975_CNTL_MODE_SINGLE_MEASUREMENT);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static int ak8975_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata, unsigned char *data)
+{
+ unsigned char regs[8];
+ unsigned char *stat = &regs[0];
+ unsigned char *stat2 = &regs[7];
+ int result = INV_SUCCESS;
+ int status = INV_SUCCESS;
+
+ result =
+ inv_serial_read(mlsl_handle, pdata->address, AK8975_REG_ST1,
+ 8, regs);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Always return the data and the status registers */
+ memcpy(data, &regs[1], 6);
+ data[6] = regs[0];
+ data[7] = regs[7];
+
+ /*
+ * ST : data ready -
+ * Measurement has been completed and data is ready to be read.
+ */
+ if (*stat & 0x01)
+ status = INV_SUCCESS;
+
+ /*
+ * ST2 : data error -
+ * occurs when data read is started outside of a readable period;
+ * data read would not be correct.
+ * Valid in continuous measurement mode only.
+ * In single measurement mode this error should not occour but we
+ * stil account for it and return an error, since the data would be
+ * corrupted.
+ * DERR bit is self-clearing when ST2 register is read.
+ */
+ if (*stat2 & 0x04)
+ status = INV_ERROR_COMPASS_DATA_ERROR;
+ /*
+ * ST2 : overflow -
+ * the sum of the absolute values of all axis |X|+|Y|+|Z| < 2400uT.
+ * This is likely to happen in presence of an external magnetic
+ * disturbance; it indicates, the sensor data is incorrect and should
+ * be ignored.
+ * An error is returned.
+ * HOFL bit clears when a new measurement starts.
+ */
+ if (*stat2 & 0x08)
+ status = INV_ERROR_COMPASS_DATA_OVERFLOW;
+ /*
+ * ST : overrun -
+ * the previous sample was not fetched and lost.
+ * Valid in continuous measurement mode only.
+ * In single measurement mode this error should not occour and we
+ * don't consider this condition an error.
+ * DOR bit is self-clearing when ST2 or any meas. data register is
+ * read.
+ */
+ if (*stat & 0x02) {
+ /* status = INV_ERROR_COMPASS_DATA_UNDERFLOW; */
+ status = INV_SUCCESS;
+ }
+
+ /*
+ * trigger next measurement if:
+ * - stat is non zero;
+ * - if stat is zero and stat2 is non zero.
+ * Won't trigger if data is not ready and there was no error.
+ */
+ if (*stat != 0x00 || *stat2 != 0x00) {
+ result = inv_serial_single_write(
+ mlsl_handle, pdata->address,
+ AK8975_REG_CNTL, AK8975_CNTL_MODE_SINGLE_MEASUREMENT);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ return status;
+}
+
+static int ak8975_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ int result;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_WRITE_REGISTERS:
+ result = inv_serial_write(mlsl_handle, pdata->address,
+ data->len,
+ (unsigned char *)data->data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ break;
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static int ak8975_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct ak8975_private_data *private_data = pdata->private_data;
+ int result;
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_READ_REGISTERS:
+ {
+ unsigned char *serial_data =
+ (unsigned char *)data->data;
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ serial_data[0], data->len - 1,
+ &serial_data[1]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ break;
+ }
+ case MPU_SLAVE_READ_SCALE:
+ {
+ unsigned char *serial_data =
+ (unsigned char *)data->data;
+ serial_data[0] = private_data->init.asa[0];
+ serial_data[1] = private_data->init.asa[1];
+ serial_data[2] = private_data->init.asa[2];
+ result = INV_SUCCESS;
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ break;
+ }
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) = 0;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) = 8000;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_read_trigger ak8975_read_trigger = {
+ /*.reg = */ 0x0A,
+ /*.value = */ 0x01
+};
+
+static struct ext_slave_descr ak8975_descr = {
+ .init = ak8975_init,
+ .exit = ak8975_exit,
+ .suspend = ak8975_suspend,
+ .resume = ak8975_resume,
+ .read = ak8975_read,
+ .config = ak8975_config,
+ .get_config = ak8975_get_config,
+ .name = "ak8975",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_AK8975,
+ .read_reg = 0x01,
+ .read_len = 10,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {9830, 4000},
+ .trigger = &ak8975_read_trigger,
+};
+
+static
+struct ext_slave_descr *ak8975_get_slave_descr(void)
+{
+ return &ak8975_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct ak8975_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int ak8975_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct ak8975_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ ak8975_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int ak8975_mod_remove(struct i2c_client *client)
+{
+ struct ak8975_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ ak8975_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id ak8975_mod_id[] = {
+ { "ak8975", COMPASS_ID_AK8975 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ak8975_mod_id);
+
+static struct i2c_driver ak8975_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = ak8975_mod_probe,
+ .remove = ak8975_mod_remove,
+ .id_table = ak8975_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ak8975_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init ak8975_mod_init(void)
+{
+ int res = i2c_add_driver(&ak8975_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "ak8975_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit ak8975_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&ak8975_mod_driver);
+}
+
+module_init(ak8975_mod_init);
+module_exit(ak8975_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate AK8975 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ak8975_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/ami306.c b/drivers/misc/inv_mpu/compass/ami306.c
new file mode 100644
index 000000000000..f645457d1612
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/ami306.c
@@ -0,0 +1,1020 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file ami306.c
+ * @brief Magnetometer setup and handling methods for Aichi AMI306
+ * compass.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include "ami_hw.h"
+#include "ami_sensor_def.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+#define AMI306_REG_DATAX (0x10)
+#define AMI306_REG_STAT1 (0x18)
+#define AMI306_REG_CNTL1 (0x1B)
+#define AMI306_REG_CNTL2 (0x1C)
+#define AMI306_REG_CNTL3 (0x1D)
+#define AMI306_REG_CNTL4_1 (0x5C)
+#define AMI306_REG_CNTL4_2 (0x5D)
+
+#define AMI306_BIT_CNTL1_PC1 (0x80)
+#define AMI306_BIT_CNTL1_ODR1 (0x10)
+#define AMI306_BIT_CNTL1_FS1 (0x02)
+
+#define AMI306_BIT_CNTL2_IEN (0x10)
+#define AMI306_BIT_CNTL2_DREN (0x08)
+#define AMI306_BIT_CNTL2_DRP (0x04)
+#define AMI306_BIT_CNTL3_F0RCE (0x40)
+
+#define AMI_FINE_MAX (96)
+#define AMI_STANDARD_OFFSET (0x800)
+#define AMI_GAIN_COR_DEFAULT (1000)
+
+/* -------------------------------------------------------------------------- */
+struct ami306_private_data {
+ int isstandby;
+ unsigned char fine[3];
+ struct ami_sensor_parametor param;
+ struct ami_win_parameter win;
+};
+
+/* -------------------------------------------------------------------------- */
+static inline unsigned short little_u8_to_u16(unsigned char *p_u8)
+{
+ return p_u8[0] | (p_u8[1] << 8);
+}
+
+static int ami306_set_bits8(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ unsigned char reg, unsigned char bits)
+{
+ int result;
+ unsigned char buf;
+
+ result = inv_serial_read(mlsl_handle, pdata->address, reg, 1, &buf);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ buf |= bits;
+ result = inv_serial_single_write(mlsl_handle, pdata->address, reg, buf);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+static int ami306_wait_data_ready(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ unsigned long usecs, unsigned long times)
+{
+ int result = 0;
+ unsigned char buf;
+
+ for (; 0 < times; --times) {
+ udelay(usecs);
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_STA1, 1, &buf);
+ if (buf & AMI_STA1_DRDY_BIT)
+ return 0;
+ else if (buf & AMI_STA1_DOR_BIT)
+ return INV_ERROR_COMPASS_DATA_OVERFLOW;
+ }
+ return INV_ERROR_COMPASS_DATA_NOT_READY;
+}
+
+static int ami306_read_raw_data(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ short dat[3])
+{
+ int result;
+ unsigned char buf[6];
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_DATAX, sizeof(buf), buf);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dat[0] = little_u8_to_u16(&buf[0]);
+ dat[1] = little_u8_to_u16(&buf[2]);
+ dat[2] = little_u8_to_u16(&buf[4]);
+ return result;
+}
+
+#define AMI_WAIT_DATAREADY_RETRY 3 /* retry times */
+#define AMI_DRDYWAIT 800 /* u(micro) sec */
+static int ami306_force_mesurement(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ short ver[3])
+{
+ int result;
+ int status;
+ result = ami306_set_bits8(mlsl_handle, pdata,
+ AMI_REG_CTRL3, AMI_CTRL3_FORCE_BIT);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = ami306_wait_data_ready(mlsl_handle, pdata,
+ AMI_DRDYWAIT, AMI_WAIT_DATAREADY_RETRY);
+ if (result && result != INV_ERROR_COMPASS_DATA_OVERFLOW) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* READ DATA X,Y,Z */
+ status = ami306_read_raw_data(mlsl_handle, pdata, ver);
+ if (status) {
+ LOG_RESULT_LOCATION(status);
+ return status;
+ }
+
+ return result;
+}
+
+static int ami306_mea(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata, short val[3])
+{
+ int result = ami306_force_mesurement(mlsl_handle, pdata, val);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ val[0] += AMI_STANDARD_OFFSET;
+ val[1] += AMI_STANDARD_OFFSET;
+ val[2] += AMI_STANDARD_OFFSET;
+ return result;
+}
+
+static int ami306_write_offset(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *fine)
+{
+ int result = 0;
+ unsigned char dat[3];
+ dat[0] = AMI_REG_OFFX;
+ dat[1] = 0x7f & fine[0];
+ dat[2] = 0;
+ result = inv_serial_write(mlsl_handle, pdata->address,
+ sizeof(dat), dat);
+ dat[0] = AMI_REG_OFFY;
+ dat[1] = 0x7f & fine[1];
+ dat[2] = 0;
+ result = inv_serial_write(mlsl_handle, pdata->address,
+ sizeof(dat), dat);
+ dat[0] = AMI_REG_OFFZ;
+ dat[1] = 0x7f & fine[2];
+ dat[2] = 0;
+ result = inv_serial_write(mlsl_handle, pdata->address,
+ sizeof(dat), dat);
+ return result;
+}
+
+static int ami306_start_sensor(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = 0;
+ unsigned char buf[3];
+ struct ami306_private_data *private_data = pdata->private_data;
+
+ /* Step 1 */
+ result = ami306_set_bits8(mlsl_handle, pdata,
+ AMI_REG_CTRL1,
+ AMI_CTRL1_PC1 | AMI_CTRL1_FS1_FORCE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Step 2 */
+ result = ami306_set_bits8(mlsl_handle, pdata,
+ AMI_REG_CTRL2, AMI_CTRL2_DREN);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Step 3 */
+ buf[0] = AMI_REG_CTRL4;
+ buf[1] = AMI_CTRL4_HS & 0xFF;
+ buf[2] = (AMI_CTRL4_HS >> 8) & 0xFF;
+ result = inv_serial_write(mlsl_handle, pdata->address,
+ sizeof(buf), buf);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Step 4 */
+ result = ami306_write_offset(mlsl_handle, pdata, private_data->fine);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+/**
+ * This function does this.
+ *
+ * @param mlsl_handle this param is this.
+ * @param slave
+ * @param pdata
+ *
+ * @return INV_SUCCESS or non-zero error code.
+ */
+static int ami306_read_param(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = 0;
+ unsigned char regs[12];
+ struct ami306_private_data *private_data = pdata->private_data;
+ struct ami_sensor_parametor *param = &private_data->param;
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_SENX, sizeof(regs), regs);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Little endian 16 bit registers */
+ param->m_gain.x = little_u8_to_u16(&regs[0]);
+ param->m_gain.y = little_u8_to_u16(&regs[2]);
+ param->m_gain.z = little_u8_to_u16(&regs[4]);
+
+ param->m_interference.xy = regs[7];
+ param->m_interference.xz = regs[6];
+ param->m_interference.yx = regs[9];
+ param->m_interference.yz = regs[8];
+ param->m_interference.zx = regs[11];
+ param->m_interference.zy = regs[10];
+
+ param->m_offset.x = AMI_STANDARD_OFFSET;
+ param->m_offset.y = AMI_STANDARD_OFFSET;
+ param->m_offset.z = AMI_STANDARD_OFFSET;
+
+ param->m_gain_cor.x = AMI_GAIN_COR_DEFAULT;
+ param->m_gain_cor.y = AMI_GAIN_COR_DEFAULT;
+ param->m_gain_cor.z = AMI_GAIN_COR_DEFAULT;
+
+ return result;
+}
+
+static int ami306_initial_b0_adjust(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char fine[3] = { 0 };
+ short data[3];
+ int diff[3] = { 0x7fff, 0x7fff, 0x7fff };
+ int fn = 0;
+ int ax = 0;
+ unsigned char buf[3];
+ struct ami306_private_data *private_data = pdata->private_data;
+
+ result = ami306_set_bits8(mlsl_handle, pdata,
+ AMI_REG_CTRL2, AMI_CTRL2_DREN);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ buf[0] = AMI_REG_CTRL4;
+ buf[1] = AMI_CTRL4_HS & 0xFF;
+ buf[2] = (AMI_CTRL4_HS >> 8) & 0xFF;
+ result = inv_serial_write(mlsl_handle, pdata->address,
+ sizeof(buf), buf);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ for (fn = 0; fn < AMI_FINE_MAX; ++fn) { /* fine 0 -> 95 */
+ fine[0] = fine[1] = fine[2] = fn;
+ result = ami306_write_offset(mlsl_handle, pdata, fine);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = ami306_force_mesurement(mlsl_handle, pdata, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ MPL_LOGV("[%d] x:%-5d y:%-5d z:%-5d\n",
+ fn, data[0], data[1], data[2]);
+
+ for (ax = 0; ax < 3; ax++) {
+ /* search point most close to zero. */
+ if (diff[ax] > abs(data[ax])) {
+ private_data->fine[ax] = fn;
+ diff[ax] = abs(data[ax]);
+ }
+ }
+ }
+ MPL_LOGV("fine x:%-5d y:%-5d z:%-5d\n",
+ private_data->fine[0], private_data->fine[1],
+ private_data->fine[2]);
+
+ result = ami306_write_offset(mlsl_handle, pdata, private_data->fine);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Software Reset */
+ result = ami306_set_bits8(mlsl_handle, pdata,
+ AMI_REG_CTRL3, AMI_CTRL3_SRST_BIT);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+}
+
+#define SEH_RANGE_MIN 100
+#define SEH_RANGE_MAX 3950
+static int ami306_search_offset(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ int axis;
+ unsigned char regs[6];
+ unsigned char run_flg[3] = { 1, 1, 1 };
+ unsigned char fine[3];
+ unsigned char win_range_fine[3];
+ unsigned short fine_output[3];
+ short val[3];
+ unsigned short cnt[3] = { 0 };
+ struct ami306_private_data *private_data = pdata->private_data;
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_FINEOUTPUT_X, sizeof(regs), regs);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ fine_output[0] = little_u8_to_u16(&regs[0]);
+ fine_output[1] = little_u8_to_u16(&regs[2]);
+ fine_output[2] = little_u8_to_u16(&regs[4]);
+
+ for (axis = 0; axis < 3; ++axis) {
+ if (fine_output[axis] == 0) {
+ MPL_LOGV("error fine_output %d axis:%d\n",
+ __LINE__, axis);
+ return -1;
+ }
+ /* fines per a window */
+ win_range_fine[axis] = (SEH_RANGE_MAX - SEH_RANGE_MIN)
+ / fine_output[axis];
+ }
+
+ /* get current fine */
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_OFFX, 2, &regs[0]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_OFFY, 2, &regs[2]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_OFFZ, 2, &regs[4]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ fine[0] = (unsigned char)(regs[0] & 0x7f);
+ fine[1] = (unsigned char)(regs[2] & 0x7f);
+ fine[2] = (unsigned char)(regs[4] & 0x7f);
+
+ while (run_flg[0] == 1 || run_flg[1] == 1 || run_flg[2] == 1) {
+
+ result = ami306_mea(mlsl_handle, pdata, val);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ MPL_LOGV("val x:%-5d y:%-5d z:%-5d\n", val[0], val[1], val[2]);
+ MPL_LOGV("now fine x:%-5d y:%-5d z:%-5d\n",
+ fine[0], fine[1], fine[2]);
+
+ for (axis = 0; axis < 3; ++axis) {
+ if (axis == 0) { /* X-axis is reversed */
+ val[axis] = 0x0FFF & ~val[axis];
+ }
+ if (val[axis] < SEH_RANGE_MIN) {
+ /* At the case of less low limmit. */
+ fine[axis] -= win_range_fine[axis];
+ MPL_LOGV("min : fine=%d diff=%d\n",
+ fine[axis], win_range_fine[axis]);
+ }
+ if (val[axis] > SEH_RANGE_MAX) {
+ /* At the case of over high limmit. */
+ fine[axis] += win_range_fine[axis];
+ MPL_LOGV("max : fine=%d diff=%d\n",
+ fine[axis], win_range_fine[axis]);
+ }
+ if (SEH_RANGE_MIN <= val[axis] &&
+ val[axis] <= SEH_RANGE_MAX) {
+ /* In the current window. */
+ int diff_fine =
+ (val[axis] - AMI_STANDARD_OFFSET) /
+ fine_output[axis];
+ fine[axis] += diff_fine;
+ run_flg[axis] = 0;
+ MPL_LOGV("mid : fine=%d diff=%d\n",
+ fine[axis], diff_fine);
+ }
+
+ if (!(0 <= fine[axis] && fine[axis] < AMI_FINE_MAX)) {
+ MPL_LOGE("fine err :%d\n", cnt[axis]);
+ goto out;
+ }
+ if (cnt[axis] > 3) {
+ MPL_LOGE("cnt err :%d\n", cnt[axis]);
+ goto out;
+ }
+ cnt[axis]++;
+ }
+ MPL_LOGV("new fine x:%-5d y:%-5d z:%-5d\n",
+ fine[0], fine[1], fine[2]);
+
+ /* set current fine */
+ result = ami306_write_offset(mlsl_handle, pdata, fine);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ memcpy(private_data->fine, fine, sizeof(fine));
+out:
+ result = ami306_set_bits8(mlsl_handle, pdata,
+ AMI_REG_CTRL3, AMI_CTRL3_SRST_BIT);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ udelay(250 + 50);
+ return 0;
+}
+
+static int ami306_read_win(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = 0;
+ unsigned char regs[6];
+ struct ami306_private_data *private_data = pdata->private_data;
+ struct ami_win_parameter *win = &private_data->win;
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_OFFOTPX, sizeof(regs), regs);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ win->m_0Gauss_fine.x = (unsigned char)(regs[0] & 0x7f);
+ win->m_0Gauss_fine.y = (unsigned char)(regs[2] & 0x7f);
+ win->m_0Gauss_fine.z = (unsigned char)(regs[4] & 0x7f);
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_OFFX, 2, &regs[0]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_OFFY, 2, &regs[2]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_REG_OFFZ, 2, &regs[4]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ win->m_fine.x = (unsigned char)(regs[0] & 0x7f);
+ win->m_fine.y = (unsigned char)(regs[2] & 0x7f);
+ win->m_fine.z = (unsigned char)(regs[4] & 0x7f);
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI_FINEOUTPUT_X, sizeof(regs), regs);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ win->m_fine_output.x = little_u8_to_u16(&regs[0]);
+ win->m_fine_output.y = little_u8_to_u16(&regs[2]);
+ win->m_fine_output.z = little_u8_to_u16(&regs[4]);
+
+ return result;
+}
+
+static int ami306_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char reg;
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ AMI306_REG_CNTL1, 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ reg &= ~(AMI306_BIT_CNTL1_PC1 | AMI306_BIT_CNTL1_FS1);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI306_REG_CNTL1, reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+static int ami306_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ unsigned char regs[] = {
+ AMI306_REG_CNTL4_1,
+ 0x7E,
+ 0xA0
+ };
+ /* Step1. Set CNTL1 reg to power model active (Write CNTL1:PC1=1) */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI306_REG_CNTL1,
+ AMI306_BIT_CNTL1_PC1 |
+ AMI306_BIT_CNTL1_FS1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Step2. Set CNTL2 reg to DRDY active high and enabled
+ (Write CNTL2:DREN=1) */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI306_REG_CNTL2,
+ AMI306_BIT_CNTL2_DREN |
+ AMI306_BIT_CNTL2_DRP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Step3. Set CNTL4 reg to for measurement speed: Write CNTL4, 0xA07E */
+ result = inv_serial_write(mlsl_handle, pdata->address,
+ ARRAY_SIZE(regs), regs);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Step4. skipped */
+
+ /* Step5. Set CNTL3 reg to forced measurement period
+ (Write CNTL3:FORCE=1) */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI306_REG_CNTL3,
+ AMI306_BIT_CNTL3_F0RCE);
+
+ return result;
+}
+
+static int ami306_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result = INV_SUCCESS;
+ int ii;
+ short val[COMPASS_NUM_AXES];
+
+ result = ami306_mea(mlsl_handle, pdata, val);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ for (ii = 0; ii < COMPASS_NUM_AXES; ii++) {
+ val[ii] -= AMI_STANDARD_OFFSET;
+ data[2 * ii] = val[ii] & 0xFF;
+ data[(2 * ii) + 1] = (val[ii] >> 8) & 0xFF;
+ }
+ return result;
+}
+
+static int ami306_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ struct ami306_private_data *private_data;
+ private_data = (struct ami306_private_data *)
+ kzalloc(sizeof(struct ami306_private_data), GFP_KERNEL);
+
+ if (!private_data)
+ return INV_ERROR_MEMORY_EXAUSTED;
+
+ pdata->private_data = private_data;
+ result = ami306_set_bits8(mlsl_handle, pdata,
+ AMI_REG_CTRL1,
+ AMI_CTRL1_PC1 | AMI_CTRL1_FS1_FORCE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Read Parameters */
+ result = ami306_read_param(mlsl_handle, slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Read Window */
+ result = ami306_initial_b0_adjust(mlsl_handle, slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = ami306_start_sensor(mlsl_handle, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = ami306_read_win(mlsl_handle, slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI306_REG_CNTL1, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return INV_SUCCESS;
+}
+
+static int ami306_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ kfree(pdata->private_data);
+ return INV_SUCCESS;
+}
+
+static int ami306_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ if (!data->data) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ switch (data->key) {
+ case MPU_SLAVE_PARAM:
+ case MPU_SLAVE_WINDOW:
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static int ami306_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ int result;
+ struct ami306_private_data *private_data = pdata->private_data;
+ if (!data->data) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ switch (data->key) {
+ case MPU_SLAVE_PARAM:
+ if (sizeof(struct ami_sensor_parametor) > data->len) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ if (data->apply) {
+ result = ami306_read_param(mlsl_handle, slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ memcpy(data->data, &private_data->param,
+ sizeof(struct ami_sensor_parametor));
+ break;
+ case MPU_SLAVE_WINDOW:
+ if (sizeof(struct ami_win_parameter) > data->len) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ if (data->apply) {
+ result = ami306_read_win(mlsl_handle, slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ memcpy(data->data, &private_data->win,
+ sizeof(struct ami_win_parameter));
+ break;
+ case MPU_SLAVE_SEARCHOFFSET:
+ if (sizeof(struct ami_win_parameter) > data->len) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ if (data->apply) {
+ result = ami306_search_offset(mlsl_handle,
+ slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Start sensor */
+ result = ami306_start_sensor(mlsl_handle, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = ami306_read_win(mlsl_handle, slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ memcpy(data->data, &private_data->win,
+ sizeof(struct ami_win_parameter));
+ break;
+ case MPU_SLAVE_READWINPARAMS:
+ if (sizeof(struct ami_win_parameter) > data->len) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ if (data->apply) {
+ result = ami306_initial_b0_adjust(mlsl_handle,
+ slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Start sensor */
+ result = ami306_start_sensor(mlsl_handle, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = ami306_read_win(mlsl_handle, slave, pdata);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ memcpy(data->data, &private_data->win,
+ sizeof(struct ami_win_parameter));
+ break;
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) = 0;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) = 50000;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ case MPU_SLAVE_READ_SCALE:
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_read_trigger ami306_read_trigger = {
+ /*.reg = */ AMI_REG_CTRL3,
+ /*.value = */ AMI_CTRL3_FORCE_BIT
+};
+
+static struct ext_slave_descr ami306_descr = {
+ .init = ami306_init,
+ .exit = ami306_exit,
+ .suspend = ami306_suspend,
+ .resume = ami306_resume,
+ .read = ami306_read,
+ .config = ami306_config,
+ .get_config = ami306_get_config,
+ .name = "ami306",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_AMI306,
+ .read_reg = 0x0E,
+ .read_len = 13,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {5461, 3333},
+ .trigger = &ami306_read_trigger,
+};
+
+static
+struct ext_slave_descr *ami306_get_slave_descr(void)
+{
+ return &ami306_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct ami306_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int ami306_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct ami306_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ ami306_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int ami306_mod_remove(struct i2c_client *client)
+{
+ struct ami306_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ ami306_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id ami306_mod_id[] = {
+ { "ami306", COMPASS_ID_AMI306 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ami306_mod_id);
+
+static struct i2c_driver ami306_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = ami306_mod_probe,
+ .remove = ami306_mod_remove,
+ .id_table = ami306_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ami306_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init ami306_mod_init(void)
+{
+ int res = i2c_add_driver(&ami306_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "ami306_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit ami306_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&ami306_mod_driver);
+}
+
+module_init(ami306_mod_init);
+module_exit(ami306_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate AMI306 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ami306_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/ami30x.c b/drivers/misc/inv_mpu/compass/ami30x.c
new file mode 100644
index 000000000000..0c4937c44263
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/ami30x.c
@@ -0,0 +1,308 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file ami30x.c
+ * @brief Magnetometer setup and handling methods for Aichi AMI304
+ * and AMI305 compass devices.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+#define AMI30X_REG_DATAX (0x10)
+#define AMI30X_REG_STAT1 (0x18)
+#define AMI30X_REG_CNTL1 (0x1B)
+#define AMI30X_REG_CNTL2 (0x1C)
+#define AMI30X_REG_CNTL3 (0x1D)
+
+#define AMI30X_BIT_CNTL1_PC1 (0x80)
+#define AMI30X_BIT_CNTL1_ODR1 (0x10)
+#define AMI30X_BIT_CNTL1_FS1 (0x02)
+
+#define AMI30X_BIT_CNTL2_IEN (0x10)
+#define AMI30X_BIT_CNTL2_DREN (0x08)
+#define AMI30X_BIT_CNTL2_DRP (0x04)
+#define AMI30X_BIT_CNTL3_F0RCE (0x40)
+
+/* -------------------------------------------------------------------------- */
+static int ami30x_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char reg;
+ result =
+ inv_serial_read(mlsl_handle, pdata->address, AMI30X_REG_CNTL1,
+ 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ reg &= ~(AMI30X_BIT_CNTL1_PC1 | AMI30X_BIT_CNTL1_FS1);
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI30X_REG_CNTL1, reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+static int ami30x_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ /* Set CNTL1 reg to power model active */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI30X_REG_CNTL1,
+ AMI30X_BIT_CNTL1_PC1 |
+ AMI30X_BIT_CNTL1_FS1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Set CNTL2 reg to DRDY active high and enabled */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI30X_REG_CNTL2,
+ AMI30X_BIT_CNTL2_DREN |
+ AMI30X_BIT_CNTL2_DRP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Set CNTL3 reg to forced measurement period */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI30X_REG_CNTL3, AMI30X_BIT_CNTL3_F0RCE);
+
+ return result;
+}
+
+static int ami30x_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ unsigned char stat;
+ int result = INV_SUCCESS;
+
+ /* Read status reg and check if data ready (DRDY) */
+ result =
+ inv_serial_read(mlsl_handle, pdata->address, AMI30X_REG_STAT1,
+ 1, &stat);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (stat & 0x40) {
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ AMI30X_REG_DATAX, 6, (unsigned char *)data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* start another measurement */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ AMI30X_REG_CNTL3,
+ AMI30X_BIT_CNTL3_F0RCE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return INV_SUCCESS;
+ }
+
+ return INV_ERROR_COMPASS_DATA_NOT_READY;
+}
+
+
+/* For AMI305,the range field needs to be modified to {9830.4f} */
+static struct ext_slave_descr ami30x_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = ami30x_suspend,
+ .resume = ami30x_resume,
+ .read = ami30x_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "ami30x",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_AMI30X,
+ .read_reg = 0x06,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {5461, 3333},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *ami30x_get_slave_descr(void)
+{
+ return &ami30x_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct ami30x_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int ami30x_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct ami30x_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ ami30x_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int ami30x_mod_remove(struct i2c_client *client)
+{
+ struct ami30x_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ ami30x_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id ami30x_mod_id[] = {
+ { "ami30x", COMPASS_ID_AMI30X },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ami30x_mod_id);
+
+static struct i2c_driver ami30x_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = ami30x_mod_probe,
+ .remove = ami30x_mod_remove,
+ .id_table = ami30x_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ami30x_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init ami30x_mod_init(void)
+{
+ int res = i2c_add_driver(&ami30x_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "ami30x_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit ami30x_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&ami30x_mod_driver);
+}
+
+module_init(ami30x_mod_init);
+module_exit(ami30x_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate AMI30X sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ami30x_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/ami_hw.h b/drivers/misc/inv_mpu/compass/ami_hw.h
new file mode 100644
index 000000000000..32a04e91cdc1
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/ami_hw.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010 Information System Products Co.,Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AMI_HW_H
+#define AMI_HW_H
+
+#define AMI_I2C_BUS_NUM 2
+
+#ifdef AMI304_MODEL
+#define AMI_I2C_ADDRESS 0x0F
+#else
+#define AMI_I2C_ADDRESS 0x0E
+#endif
+
+#define AMI_GPIO_INT 152
+#define AMI_GPIO_DRDY 153
+
+/* AMI-Sensor Internal Register Address
+ *(Please refer to AMI-Sensor Specifications)
+ */
+#define AMI_MOREINFO_CMDCODE 0x0d
+#define AMI_WHOIAM_CMDCODE 0x0f
+#define AMI_REG_DATAX 0x10
+#define AMI_REG_DATAY 0x12
+#define AMI_REG_DATAZ 0x14
+#define AMI_REG_STA1 0x18
+#define AMI_REG_CTRL1 0x1b
+#define AMI_REG_CTRL2 0x1c
+#define AMI_REG_CTRL3 0x1d
+#define AMI_REG_B0X 0x20
+#define AMI_REG_B0Y 0x22
+#define AMI_REG_B0Z 0x24
+#define AMI_REG_CTRL5 0x40
+#define AMI_REG_CTRL4 0x5c
+#define AMI_REG_TEMP 0x60
+#define AMI_REG_DELAYX 0x68
+#define AMI_REG_DELAYY 0x6e
+#define AMI_REG_DELAYZ 0x74
+#define AMI_REG_OFFX 0x6c
+#define AMI_REG_OFFY 0x72
+#define AMI_REG_OFFZ 0x78
+#define AMI_FINEOUTPUT_X 0x90
+#define AMI_FINEOUTPUT_Y 0x92
+#define AMI_FINEOUTPUT_Z 0x94
+#define AMI_REG_SENX 0x96
+#define AMI_REG_SENY 0x98
+#define AMI_REG_SENZ 0x9a
+#define AMI_REG_GAINX 0x9c
+#define AMI_REG_GAINY 0x9e
+#define AMI_REG_GAINZ 0xa0
+#define AMI_GETVERSION_CMDCODE 0xe8
+#define AMI_SERIALNUMBER_CMDCODE 0xea
+#define AMI_REG_B0OTPX 0xa2
+#define AMI_REG_B0OTPY 0xb8
+#define AMI_REG_B0OTPZ 0xce
+#define AMI_REG_OFFOTPX 0xf8
+#define AMI_REG_OFFOTPY 0xfa
+#define AMI_REG_OFFOTPZ 0xfc
+
+/* AMI-Sensor Control Bit (Please refer to AMI-Sensor Specifications) */
+#define AMI_CTRL1_PC1 0x80
+#define AMI_CTRL1_FS1_FORCE 0x02
+#define AMI_CTRL1_ODR1 0x10
+#define AMI_CTRL2_DREN 0x08
+#define AMI_CTRL2_DRP 0x04
+#define AMI_CTRL3_FORCE_BIT 0x40
+#define AMI_CTRL3_B0_LO_BIT 0x10
+#define AMI_CTRL3_SRST_BIT 0x80
+#define AMI_CTRL4_HS 0xa07e
+#define AMI_CTRL4_AB 0x0001
+#define AMI_STA1_DRDY_BIT 0x40
+#define AMI_STA1_DOR_BIT 0x20
+
+#endif
diff --git a/drivers/misc/inv_mpu/compass/ami_sensor_def.h b/drivers/misc/inv_mpu/compass/ami_sensor_def.h
new file mode 100644
index 000000000000..64032e2bf1fb
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/ami_sensor_def.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2010 Information System Products Co.,Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Definitions for ami306 compass chip.
+ */
+#ifndef AMI_SENSOR_DEF_H
+#define AMI_SENSOR_DEF_H
+
+/*********************************************************************
+ Constant
+ *********************************************************************/
+#define AMI_OK 0x00 /**< Normal */
+#define AMI_PARAM_ERR 0x01 /**< Parameter Error */
+#define AMI_SEQ_ERR 0x02 /**< Squence Error */
+#define AMI_SYSTEM_ERR 0x10 /**< System Error */
+#define AMI_BLOCK_ERR 0x20 /**< Block Error */
+#define AMI_ERROR 0x99 /**< other Error */
+
+/*********************************************************************
+ Struct definition
+ *********************************************************************/
+/** axis sensitivity(gain) calibration parameter information */
+struct ami_vector3d {
+ signed short x; /**< X-axis */
+ signed short y; /**< Y-axis */
+ signed short z; /**< Z-axis */
+};
+
+/** axis interference information */
+struct ami_interference {
+ /**< Y-axis magnetic field for X-axis correction value */
+ signed short xy;
+ /**< Z-axis magnetic field for X-axis correction value */
+ signed short xz;
+ /**< X-axis magnetic field for Y-axis correction value */
+ signed short yx;
+ /**< Z-axis magnetic field for Y-axis correction value */
+ signed short yz;
+ /**< X-axis magnetic field for Z-axis correction value */
+ signed short zx;
+ /**< Y-axis magnetic field for Z-axis correction value */
+ signed short zy;
+};
+
+/** sensor calibration Parameter information */
+struct ami_sensor_parametor {
+ /**< geomagnetic field sensor gain */
+ struct ami_vector3d m_gain;
+ /**< geomagnetic field sensor gain correction parameter */
+ struct ami_vector3d m_gain_cor;
+ /**< geomagnetic field sensor offset */
+ struct ami_vector3d m_offset;
+ /**< geomagnetic field sensor axis interference parameter */
+ struct ami_interference m_interference;
+#ifdef AMI_6AXIS
+ /**< acceleration sensor gain */
+ struct ami_vector3d a_gain;
+ /**< acceleration sensor offset */
+ struct ami_vector3d a_offset;
+ /**< acceleration sensor deviation */
+ signed short a_deviation;
+#endif
+};
+
+/** G2-Sensor measurement value (voltage ADC value ) */
+struct ami_sensor_rawvalue {
+ /**< geomagnetic field sensor measurement X-axis value
+ (mounted position/direction reference) */
+ unsigned short mx;
+ /**< geomagnetic field sensor measurement Y-axis value
+ (mounted position/direction reference) */
+ unsigned short my;
+ /**< geomagnetic field sensor measurement Z-axis value
+ (mounted position/direction reference) */
+ unsigned short mz;
+#ifdef AMI_6AXIS
+ /**< acceleration sensor measurement X-axis value
+ (mounted position/direction reference) */
+ unsigned short ax;
+ /**< acceleration sensor measurement Y-axis value
+ (mounted position/direction reference) */
+ unsigned short ay;
+ /**< acceleration sensor measurement Z-axis value
+ (mounted position/direction reference) */
+ unsigned short az;
+#endif
+ /**< temperature sensor measurement value */
+ unsigned short temperature;
+};
+
+/** Window function Parameter information */
+struct ami_win_parameter {
+ /**< current fine value */
+ struct ami_vector3d m_fine;
+ /**< change per 1coarse */
+ struct ami_vector3d m_fine_output;
+ /**< fine value at zero gauss */
+ struct ami_vector3d m_0Gauss_fine;
+#ifdef AMI304
+ /**< current b0 value */
+ struct ami_vector3d m_b0;
+ /**< current coarse value */
+ struct ami_vector3d m_coar;
+ /**< change per 1fine */
+ struct ami_vector3d m_coar_output;
+ /**< coarse value at zero gauss */
+ struct ami_vector3d m_0Gauss_coar;
+ /**< delay value */
+ struct ami_vector3d m_delay;
+#endif
+};
+
+/** AMI chip information ex) 1)model 2)s/n 3)ver 4)more info in the chip */
+struct ami_chipinfo {
+ unsigned short info; /* INFO 0x0d/0x0e reg. */
+ unsigned short ver; /* VER 0xe8/0xe9 reg. */
+ unsigned short sn; /* SN 0xea/0xeb reg. */
+ unsigned char wia; /* WIA 0x0f reg. */
+};
+
+/** AMI Driver Information */
+struct ami_driverinfo {
+ unsigned char remarks[40]; /* Some Information */
+ unsigned char datetime[30]; /* compiled date&time */
+ unsigned char ver_major; /* major version */
+ unsigned char ver_middle; /* middle.. */
+ unsigned char ver_minor; /* minor .. */
+};
+
+#endif
diff --git a/drivers/misc/inv_mpu/compass/hmc5883.c b/drivers/misc/inv_mpu/compass/hmc5883.c
new file mode 100644
index 000000000000..fdf2ac00565a
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/hmc5883.c
@@ -0,0 +1,391 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file hmc5883.c
+ * @brief Magnetometer setup and handling methods for Honeywell
+ * HMC5883 compass.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+enum HMC_REG {
+ HMC_REG_CONF_A = 0x0,
+ HMC_REG_CONF_B = 0x1,
+ HMC_REG_MODE = 0x2,
+ HMC_REG_X_M = 0x3,
+ HMC_REG_X_L = 0x4,
+ HMC_REG_Z_M = 0x5,
+ HMC_REG_Z_L = 0x6,
+ HMC_REG_Y_M = 0x7,
+ HMC_REG_Y_L = 0x8,
+ HMC_REG_STATUS = 0x9,
+ HMC_REG_ID_A = 0xA,
+ HMC_REG_ID_B = 0xB,
+ HMC_REG_ID_C = 0xC
+};
+
+enum HMC_CONF_A {
+ HMC_CONF_A_DRATE_MASK = 0x1C,
+ HMC_CONF_A_DRATE_0_75 = 0x00,
+ HMC_CONF_A_DRATE_1_5 = 0x04,
+ HMC_CONF_A_DRATE_3 = 0x08,
+ HMC_CONF_A_DRATE_7_5 = 0x0C,
+ HMC_CONF_A_DRATE_15 = 0x10,
+ HMC_CONF_A_DRATE_30 = 0x14,
+ HMC_CONF_A_DRATE_75 = 0x18,
+ HMC_CONF_A_MEAS_MASK = 0x3,
+ HMC_CONF_A_MEAS_NORM = 0x0,
+ HMC_CONF_A_MEAS_POS = 0x1,
+ HMC_CONF_A_MEAS_NEG = 0x2
+};
+
+enum HMC_CONF_B {
+ HMC_CONF_B_GAIN_MASK = 0xE0,
+ HMC_CONF_B_GAIN_0_9 = 0x00,
+ HMC_CONF_B_GAIN_1_2 = 0x20,
+ HMC_CONF_B_GAIN_1_9 = 0x40,
+ HMC_CONF_B_GAIN_2_5 = 0x60,
+ HMC_CONF_B_GAIN_4_0 = 0x80,
+ HMC_CONF_B_GAIN_4_6 = 0xA0,
+ HMC_CONF_B_GAIN_5_5 = 0xC0,
+ HMC_CONF_B_GAIN_7_9 = 0xE0
+};
+
+enum HMC_MODE {
+ HMC_MODE_MASK = 0x3,
+ HMC_MODE_CONT = 0x0,
+ HMC_MODE_SINGLE = 0x1,
+ HMC_MODE_IDLE = 0x2,
+ HMC_MODE_SLEEP = 0x3
+};
+
+/* -------------------------------------------------------------------------- */
+static int hmc5883_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ HMC_REG_MODE, HMC_MODE_SLEEP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(3);
+
+ return result;
+}
+
+static int hmc5883_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ /* Use single measurement mode. Start at sleep state. */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ HMC_REG_MODE, HMC_MODE_SLEEP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Config normal measurement */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ HMC_REG_CONF_A, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Adjust gain to 307 LSB/Gauss */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ HMC_REG_CONF_B, HMC_CONF_B_GAIN_5_5);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+static int hmc5883_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ unsigned char stat;
+ int result = INV_SUCCESS;
+ unsigned char tmp;
+ short axisFixed;
+
+ /* Read status reg. to check if data is ready */
+ result =
+ inv_serial_read(mlsl_handle, pdata->address, HMC_REG_STATUS, 1,
+ &stat);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ if (stat & 0x01) {
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ HMC_REG_X_M, 6, (unsigned char *)data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* switch YZ axis to proper position */
+ tmp = data[2];
+ data[2] = data[4];
+ data[4] = tmp;
+ tmp = data[3];
+ data[3] = data[5];
+ data[5] = tmp;
+
+ /*drop data if overflows */
+ if ((data[0] == 0xf0) || (data[2] == 0xf0)
+ || (data[4] == 0xf0)) {
+ /* trigger next measurement read */
+ result =
+ inv_serial_single_write(mlsl_handle,
+ pdata->address,
+ HMC_REG_MODE,
+ HMC_MODE_SINGLE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return INV_ERROR_COMPASS_DATA_OVERFLOW;
+ }
+ /* convert to fixed point and apply sensitivity correction for
+ Z-axis */
+ axisFixed =
+ (short)((unsigned short)data[5] +
+ (unsigned short)data[4] * 256);
+ /* scale up by 1.125 (36/32) */
+ axisFixed = (short)(axisFixed * 36);
+ data[4] = axisFixed >> 8;
+ data[5] = axisFixed & 0xFF;
+
+ axisFixed =
+ (short)((unsigned short)data[3] +
+ (unsigned short)data[2] * 256);
+ axisFixed = (short)(axisFixed * 32);
+ data[2] = axisFixed >> 8;
+ data[3] = axisFixed & 0xFF;
+
+ axisFixed =
+ (short)((unsigned short)data[1] +
+ (unsigned short)data[0] * 256);
+ axisFixed = (short)(axisFixed * 32);
+ data[0] = axisFixed >> 8;
+ data[1] = axisFixed & 0xFF;
+
+ /* trigger next measurement read */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ HMC_REG_MODE, HMC_MODE_SINGLE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return INV_SUCCESS;
+ } else {
+ /* trigger next measurement read */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ HMC_REG_MODE, HMC_MODE_SINGLE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return INV_ERROR_COMPASS_DATA_NOT_READY;
+ }
+}
+
+static struct ext_slave_descr hmc5883_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = hmc5883_suspend,
+ .resume = hmc5883_resume,
+ .read = hmc5883_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "hmc5883",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_HMC5883,
+ .read_reg = 0x06,
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {10673, 6156},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *hmc5883_get_slave_descr(void)
+{
+ return &hmc5883_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct hmc5883_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int hmc5883_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct hmc5883_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ hmc5883_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int hmc5883_mod_remove(struct i2c_client *client)
+{
+ struct hmc5883_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ hmc5883_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id hmc5883_mod_id[] = {
+ { "hmc5883", COMPASS_ID_HMC5883 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, hmc5883_mod_id);
+
+static struct i2c_driver hmc5883_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = hmc5883_mod_probe,
+ .remove = hmc5883_mod_remove,
+ .id_table = hmc5883_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hmc5883_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init hmc5883_mod_init(void)
+{
+ int res = i2c_add_driver(&hmc5883_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "hmc5883_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit hmc5883_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&hmc5883_mod_driver);
+}
+
+module_init(hmc5883_mod_init);
+module_exit(hmc5883_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate HMC5883 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("hmc5883_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/hscdtd002b.c b/drivers/misc/inv_mpu/compass/hscdtd002b.c
new file mode 100644
index 000000000000..4f6013cbe3dc
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/hscdtd002b.c
@@ -0,0 +1,294 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file hscdtd002b.c
+ * @brief Magnetometer setup and handling methods for Alps HSCDTD002B
+ * compass.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+#define COMPASS_HSCDTD002B_STAT (0x18)
+#define COMPASS_HSCDTD002B_CTRL1 (0x1B)
+#define COMPASS_HSCDTD002B_CTRL2 (0x1C)
+#define COMPASS_HSCDTD002B_CTRL3 (0x1D)
+#define COMPASS_HSCDTD002B_DATAX (0x10)
+
+/* -------------------------------------------------------------------------- */
+static int hscdtd002b_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ /* Power mode: stand-by */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD002B_CTRL1, 0x00);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1); /* turn-off time */
+
+ return result;
+}
+
+static int hscdtd002b_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ /* Soft reset */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD002B_CTRL3, 0x80);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Force state; Power mode: active */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD002B_CTRL1, 0x82);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Data ready enable */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD002B_CTRL2, 0x08);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1); /* turn-on time */
+
+ return result;
+}
+
+static int hscdtd002b_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ unsigned char stat;
+ int result = INV_SUCCESS;
+ int status = INV_SUCCESS;
+
+ /* Read status reg. to check if data is ready */
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD002B_STAT, 1, &stat);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ if (stat & 0x40) {
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD002B_DATAX, 6,
+ (unsigned char *)data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ status = INV_SUCCESS;
+ } else if (stat & 0x20) {
+ status = INV_ERROR_COMPASS_DATA_OVERFLOW;
+ } else {
+ status = INV_ERROR_COMPASS_DATA_NOT_READY;
+ }
+ /* trigger next measurement read */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD002B_CTRL3, 0x40);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return status;
+}
+
+static struct ext_slave_descr hscdtd002b_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = hscdtd002b_suspend,
+ .resume = hscdtd002b_resume,
+ .read = hscdtd002b_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "hscdtd002b",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_HSCDTD002B,
+ .read_reg = 0x10,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {9830, 4000},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *hscdtd002b_get_slave_descr(void)
+{
+ return &hscdtd002b_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct hscdtd002b_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int hscdtd002b_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct hscdtd002b_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ hscdtd002b_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int hscdtd002b_mod_remove(struct i2c_client *client)
+{
+ struct hscdtd002b_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ hscdtd002b_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id hscdtd002b_mod_id[] = {
+ { "hscdtd002b", COMPASS_ID_HSCDTD002B },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, hscdtd002b_mod_id);
+
+static struct i2c_driver hscdtd002b_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = hscdtd002b_mod_probe,
+ .remove = hscdtd002b_mod_remove,
+ .id_table = hscdtd002b_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hscdtd002b_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init hscdtd002b_mod_init(void)
+{
+ int res = i2c_add_driver(&hscdtd002b_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "hscdtd002b_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit hscdtd002b_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&hscdtd002b_mod_driver);
+}
+
+module_init(hscdtd002b_mod_init);
+module_exit(hscdtd002b_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate HSCDTD002B sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("hscdtd002b_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/hscdtd004a.c b/drivers/misc/inv_mpu/compass/hscdtd004a.c
new file mode 100644
index 000000000000..f0915599bd2f
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/hscdtd004a.c
@@ -0,0 +1,318 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file hscdtd004a.c
+ * @brief Magnetometer setup and handling methods for Alps HSCDTD004A
+ * compass.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+#define COMPASS_HSCDTD004A_STAT (0x18)
+#define COMPASS_HSCDTD004A_CTRL1 (0x1B)
+#define COMPASS_HSCDTD004A_CTRL2 (0x1C)
+#define COMPASS_HSCDTD004A_CTRL3 (0x1D)
+#define COMPASS_HSCDTD004A_DATAX (0x10)
+
+/* -------------------------------------------------------------------------- */
+
+static int hscdtd004a_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ /* Power mode: stand-by */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD004A_CTRL1, 0x00);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1); /* turn-off time */
+
+ return result;
+}
+
+static int hscdtd004a_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char data1, data2[2];
+
+ result = inv_serial_read(mlsl_handle, pdata->address, 0xf, 1, &data1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(mlsl_handle, pdata->address, 0xd, 2, data2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ if (data1 != 0x49 || data2[0] != 0x45 || data2[1] != 0x54) {
+ LOG_RESULT_LOCATION(INV_ERROR_SERIAL_DEVICE_NOT_RECOGNIZED);
+ return INV_ERROR_SERIAL_DEVICE_NOT_RECOGNIZED;
+ }
+ return result;
+}
+
+static int hscdtd004a_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ /* Soft reset */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD004A_CTRL3, 0x80);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Normal state; Power mode: active */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD004A_CTRL1, 0x82);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Data ready enable */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD004A_CTRL2, 0x7C);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(1); /* turn-on time */
+ return result;
+}
+
+static int hscdtd004a_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ unsigned char stat;
+ int result = INV_SUCCESS;
+ int status = INV_SUCCESS;
+
+ /* Read status reg. to check if data is ready */
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD004A_STAT, 1, &stat);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ if (stat & 0x48) {
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD004A_DATAX, 6,
+ (unsigned char *)data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ status = INV_SUCCESS;
+ } else if (stat & 0x68) {
+ status = INV_ERROR_COMPASS_DATA_OVERFLOW;
+ } else {
+ status = INV_ERROR_COMPASS_DATA_NOT_READY;
+ }
+ /* trigger next measurement read */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ COMPASS_HSCDTD004A_CTRL3, 0x40);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return status;
+
+}
+
+static struct ext_slave_descr hscdtd004a_descr = {
+ .init = hscdtd004a_init,
+ .exit = NULL,
+ .suspend = hscdtd004a_suspend,
+ .resume = hscdtd004a_resume,
+ .read = hscdtd004a_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "hscdtd004a",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_HSCDTD004A,
+ .read_reg = 0x10,
+ .read_len = 6,
+ .endian = EXT_SLAVE_LITTLE_ENDIAN,
+ .range = {9830, 4000},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *hscdtd004a_get_slave_descr(void)
+{
+ return &hscdtd004a_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct hscdtd004a_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int hscdtd004a_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct hscdtd004a_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ hscdtd004a_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int hscdtd004a_mod_remove(struct i2c_client *client)
+{
+ struct hscdtd004a_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ hscdtd004a_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id hscdtd004a_mod_id[] = {
+ { "hscdtd004a", COMPASS_ID_HSCDTD004A },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, hscdtd004a_mod_id);
+
+static struct i2c_driver hscdtd004a_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = hscdtd004a_mod_probe,
+ .remove = hscdtd004a_mod_remove,
+ .id_table = hscdtd004a_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hscdtd004a_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init hscdtd004a_mod_init(void)
+{
+ int res = i2c_add_driver(&hscdtd004a_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "hscdtd004a_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit hscdtd004a_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&hscdtd004a_mod_driver);
+}
+
+module_init(hscdtd004a_mod_init);
+module_exit(hscdtd004a_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate HSCDTD004A sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("hscdtd004a_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/lsm303dlx_m.c b/drivers/misc/inv_mpu/compass/lsm303dlx_m.c
new file mode 100644
index 000000000000..32f8cdddb00b
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/lsm303dlx_m.c
@@ -0,0 +1,395 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file lsm303dlx_m.c
+ * @brief Magnetometer setup and handling methods for ST LSM303
+ * compass.
+ * This magnetometer device is part of a combo chip with the
+ * ST LIS331DLH accelerometer and the logic in entirely based
+ * on the Honeywell HMC5883 magnetometer.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+enum LSM_REG {
+ LSM_REG_CONF_A = 0x0,
+ LSM_REG_CONF_B = 0x1,
+ LSM_REG_MODE = 0x2,
+ LSM_REG_X_M = 0x3,
+ LSM_REG_X_L = 0x4,
+ LSM_REG_Z_M = 0x5,
+ LSM_REG_Z_L = 0x6,
+ LSM_REG_Y_M = 0x7,
+ LSM_REG_Y_L = 0x8,
+ LSM_REG_STATUS = 0x9,
+ LSM_REG_ID_A = 0xA,
+ LSM_REG_ID_B = 0xB,
+ LSM_REG_ID_C = 0xC
+};
+
+enum LSM_CONF_A {
+ LSM_CONF_A_DRATE_MASK = 0x1C,
+ LSM_CONF_A_DRATE_0_75 = 0x00,
+ LSM_CONF_A_DRATE_1_5 = 0x04,
+ LSM_CONF_A_DRATE_3 = 0x08,
+ LSM_CONF_A_DRATE_7_5 = 0x0C,
+ LSM_CONF_A_DRATE_15 = 0x10,
+ LSM_CONF_A_DRATE_30 = 0x14,
+ LSM_CONF_A_DRATE_75 = 0x18,
+ LSM_CONF_A_MEAS_MASK = 0x3,
+ LSM_CONF_A_MEAS_NORM = 0x0,
+ LSM_CONF_A_MEAS_POS = 0x1,
+ LSM_CONF_A_MEAS_NEG = 0x2
+};
+
+enum LSM_CONF_B {
+ LSM_CONF_B_GAIN_MASK = 0xE0,
+ LSM_CONF_B_GAIN_0_9 = 0x00,
+ LSM_CONF_B_GAIN_1_2 = 0x20,
+ LSM_CONF_B_GAIN_1_9 = 0x40,
+ LSM_CONF_B_GAIN_2_5 = 0x60,
+ LSM_CONF_B_GAIN_4_0 = 0x80,
+ LSM_CONF_B_GAIN_4_6 = 0xA0,
+ LSM_CONF_B_GAIN_5_5 = 0xC0,
+ LSM_CONF_B_GAIN_7_9 = 0xE0
+};
+
+enum LSM_MODE {
+ LSM_MODE_MASK = 0x3,
+ LSM_MODE_CONT = 0x0,
+ LSM_MODE_SINGLE = 0x1,
+ LSM_MODE_IDLE = 0x2,
+ LSM_MODE_SLEEP = 0x3
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int lsm303dlx_m_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM_REG_MODE, LSM_MODE_SLEEP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(3);
+
+ return result;
+}
+
+static int lsm303dlx_m_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ /* Use single measurement mode. Start at sleep state. */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM_REG_MODE, LSM_MODE_SLEEP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Config normal measurement */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM_REG_CONF_A, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Adjust gain to 320 LSB/Gauss */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM_REG_CONF_B, LSM_CONF_B_GAIN_5_5);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+static int lsm303dlx_m_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ unsigned char stat;
+ int result = INV_SUCCESS;
+ short axis_fixed;
+
+ /* Read status reg. to check if data is ready */
+ result =
+ inv_serial_read(mlsl_handle, pdata->address, LSM_REG_STATUS, 1,
+ &stat);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ if (stat & 0x01) {
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ LSM_REG_X_M, 6, (unsigned char *)data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /*drop data if overflows */
+ if ((data[0] == 0xf0) || (data[2] == 0xf0)
+ || (data[4] == 0xf0)) {
+ /* trigger next measurement read */
+ result =
+ inv_serial_single_write(mlsl_handle,
+ pdata->address,
+ LSM_REG_MODE,
+ LSM_MODE_SINGLE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return INV_ERROR_COMPASS_DATA_OVERFLOW;
+ }
+ /* convert to fixed point and apply sensitivity correction for
+ Z-axis */
+ axis_fixed =
+ (short)((unsigned short)data[5] +
+ (unsigned short)data[4] * 256);
+ /* scale up by 1.125 (36/32) approximate of 1.122 (320/285) */
+ if (slave->id == COMPASS_ID_LSM303DLM) {
+ /* NOTE/IMPORTANT:
+ lsm303dlm compass axis definition doesn't
+ respect the right hand rule. We invert
+ the sign of the Z axis to fix that. */
+ axis_fixed = (short)(-1 * axis_fixed * 36);
+ } else {
+ axis_fixed = (short)(axis_fixed * 36);
+ }
+ data[4] = axis_fixed >> 8;
+ data[5] = axis_fixed & 0xFF;
+
+ axis_fixed =
+ (short)((unsigned short)data[3] +
+ (unsigned short)data[2] * 256);
+ axis_fixed = (short)(axis_fixed * 32);
+ data[2] = axis_fixed >> 8;
+ data[3] = axis_fixed & 0xFF;
+
+ axis_fixed =
+ (short)((unsigned short)data[1] +
+ (unsigned short)data[0] * 256);
+ axis_fixed = (short)(axis_fixed * 32);
+ data[0] = axis_fixed >> 8;
+ data[1] = axis_fixed & 0xFF;
+
+ /* trigger next measurement read */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM_REG_MODE, LSM_MODE_SINGLE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return INV_SUCCESS;
+ } else {
+ /* trigger next measurement read */
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ LSM_REG_MODE, LSM_MODE_SINGLE);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return INV_ERROR_COMPASS_DATA_NOT_READY;
+ }
+}
+
+static struct ext_slave_descr lsm303dlx_m_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = lsm303dlx_m_suspend,
+ .resume = lsm303dlx_m_resume,
+ .read = lsm303dlx_m_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "lsm303dlx_m",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = ID_INVALID,
+ .read_reg = 0x06,
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {10240, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *lsm303dlx_m_get_slave_descr(void)
+{
+ return &lsm303dlx_m_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct lsm303dlx_m_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static const struct i2c_device_id lsm303dlx_m_mod_id[] = {
+ { "lsm303dlh", COMPASS_ID_LSM303DLH },
+ { "lsm303dlm", COMPASS_ID_LSM303DLM },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lsm303dlx_m_mod_id);
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int lsm303dlx_m_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct lsm303dlx_m_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+ lsm303dlx_m_descr.id = devid->driver_data;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ lsm303dlx_m_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int lsm303dlx_m_mod_remove(struct i2c_client *client)
+{
+ struct lsm303dlx_m_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ lsm303dlx_m_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static struct i2c_driver lsm303dlx_m_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = lsm303dlx_m_mod_probe,
+ .remove = lsm303dlx_m_mod_remove,
+ .id_table = lsm303dlx_m_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lsm303dlx_m_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init lsm303dlx_m_mod_init(void)
+{
+ int res = i2c_add_driver(&lsm303dlx_m_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "lsm303dlx_m_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit lsm303dlx_m_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&lsm303dlx_m_mod_driver);
+}
+
+module_init(lsm303dlx_m_mod_init);
+module_exit(lsm303dlx_m_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate lsm303dlx_m sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("lsm303dlx_m_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/mmc314x.c b/drivers/misc/inv_mpu/compass/mmc314x.c
new file mode 100644
index 000000000000..786fadcc3e48
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/mmc314x.c
@@ -0,0 +1,313 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file mmc314x.c
+ * @brief Magnetometer setup and handling methods for the
+ * MEMSIC MMC314x compass.
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+
+static int reset_int = 1000;
+static int read_count = 1;
+static char reset_mode; /* in Z-init section */
+
+/* -------------------------------------------------------------------------- */
+#define MMC314X_REG_ST (0x00)
+#define MMC314X_REG_X_MSB (0x01)
+
+#define MMC314X_CNTL_MODE_WAKE_UP (0x01)
+#define MMC314X_CNTL_MODE_SET (0x02)
+#define MMC314X_CNTL_MODE_RESET (0x04)
+
+/* -------------------------------------------------------------------------- */
+
+static int mmc314x_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ return result;
+}
+
+static int mmc314x_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+
+ int result;
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ MMC314X_REG_ST, MMC314X_CNTL_MODE_RESET);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(10);
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ MMC314X_REG_ST, MMC314X_CNTL_MODE_SET);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(10);
+ read_count = 1;
+ return INV_SUCCESS;
+}
+
+static int mmc314x_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result, ii;
+ short tmp[3];
+ unsigned char tmpdata[6];
+
+ if (read_count > 1000)
+ read_count = 1;
+
+ result =
+ inv_serial_read(mlsl_handle, pdata->address, MMC314X_REG_X_MSB,
+ 6, (unsigned char *)data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ for (ii = 0; ii < 6; ii++)
+ tmpdata[ii] = data[ii];
+
+ for (ii = 0; ii < 3; ii++) {
+ tmp[ii] = (short)((tmpdata[2 * ii] << 8) + tmpdata[2 * ii + 1]);
+ tmp[ii] = tmp[ii] - 4096;
+ tmp[ii] = tmp[ii] * 16;
+ }
+
+ for (ii = 0; ii < 3; ii++) {
+ data[2 * ii] = (unsigned char)(tmp[ii] >> 8);
+ data[2 * ii + 1] = (unsigned char)(tmp[ii]);
+ }
+
+ if (read_count % reset_int == 0) {
+ if (reset_mode) {
+ result =
+ inv_serial_single_write(mlsl_handle,
+ pdata->address,
+ MMC314X_REG_ST,
+ MMC314X_CNTL_MODE_RESET);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ reset_mode = 0;
+ return INV_ERROR_COMPASS_DATA_NOT_READY;
+ } else {
+ result =
+ inv_serial_single_write(mlsl_handle,
+ pdata->address,
+ MMC314X_REG_ST,
+ MMC314X_CNTL_MODE_SET);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ reset_mode = 1;
+ read_count++;
+ return INV_ERROR_COMPASS_DATA_NOT_READY;
+ }
+ }
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ MMC314X_REG_ST, MMC314X_CNTL_MODE_WAKE_UP);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ read_count++;
+
+ return INV_SUCCESS;
+}
+
+static struct ext_slave_descr mmc314x_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = mmc314x_suspend,
+ .resume = mmc314x_resume,
+ .read = mmc314x_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "mmc314x",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_MMC314X,
+ .read_reg = 0x01,
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {400, 0},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *mmc314x_get_slave_descr(void)
+{
+ return &mmc314x_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct mmc314x_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int mmc314x_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct mmc314x_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ mmc314x_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int mmc314x_mod_remove(struct i2c_client *client)
+{
+ struct mmc314x_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ mmc314x_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id mmc314x_mod_id[] = {
+ { "mmc314x", COMPASS_ID_MMC314X },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mmc314x_mod_id);
+
+static struct i2c_driver mmc314x_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = mmc314x_mod_probe,
+ .remove = mmc314x_mod_remove,
+ .id_table = mmc314x_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mmc314x_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init mmc314x_mod_init(void)
+{
+ int res = i2c_add_driver(&mmc314x_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "mmc314x_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit mmc314x_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&mmc314x_mod_driver);
+}
+
+module_init(mmc314x_mod_init);
+module_exit(mmc314x_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate MMC314X sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("mmc314x_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/yas529-kernel.c b/drivers/misc/inv_mpu/compass/yas529-kernel.c
new file mode 100644
index 000000000000..f53223fba641
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/yas529-kernel.c
@@ -0,0 +1,611 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <log.h>
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+/*----- YAMAHA YAS529 Registers ------*/
+enum YAS_REG {
+ YAS_REG_CMDR = 0x00, /* 000 < 5 */
+ YAS_REG_XOFFSETR = 0x20, /* 001 < 5 */
+ YAS_REG_Y1OFFSETR = 0x40, /* 010 < 5 */
+ YAS_REG_Y2OFFSETR = 0x60, /* 011 < 5 */
+ YAS_REG_ICOILR = 0x80, /* 100 < 5 */
+ YAS_REG_CAL = 0xA0, /* 101 < 5 */
+ YAS_REG_CONFR = 0xC0, /* 110 < 5 */
+ YAS_REG_DOUTR = 0xE0 /* 111 < 5 */
+};
+
+/* -------------------------------------------------------------------------- */
+
+static long a1;
+static long a2;
+static long a3;
+static long a4;
+static long a5;
+static long a6;
+static long a7;
+static long a8;
+static long a9;
+
+/* -------------------------------------------------------------------------- */
+static int yas529_sensor_i2c_write(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned int len, unsigned char *data)
+{
+ struct i2c_msg msgs[1];
+ int res;
+
+ if (NULL == data || NULL == i2c_adap)
+ return -EINVAL;
+
+ msgs[0].addr = address;
+ msgs[0].flags = 0; /* write */
+ msgs[0].buf = (unsigned char *)data;
+ msgs[0].len = len;
+
+ res = i2c_transfer(i2c_adap, msgs, 1);
+ if (res < 1)
+ return res;
+ else
+ return 0;
+}
+
+static int yas529_sensor_i2c_read(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned char reg,
+ unsigned int len, unsigned char *data)
+{
+ struct i2c_msg msgs[2];
+ int res;
+
+ if (NULL == data || NULL == i2c_adap)
+ return -EINVAL;
+
+ msgs[0].addr = address;
+ msgs[0].flags = I2C_M_RD;
+ msgs[0].buf = data;
+ msgs[0].len = len;
+
+ res = i2c_transfer(i2c_adap, msgs, 1);
+ if (res < 1)
+ return res;
+ else
+ return 0;
+}
+
+static int yas529_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ return result;
+}
+
+static int yas529_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ unsigned char dummyData[1] = { 0 };
+ unsigned char dummyRegister = 0;
+ unsigned char rawData[6];
+ unsigned char calData[9];
+
+ short xoffset, y1offset, y2offset;
+ short d2, d3, d4, d5, d6, d7, d8, d9;
+
+ /* YAS529 Application Manual MS-3C - Section 4.4.5 */
+ /* =============================================== */
+ /* Step 1 - register initialization */
+ /* zero initialization coil register - "100 00 000" */
+ dummyData[0] = YAS_REG_ICOILR | 0x00;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* zero config register - "110 00 000" */
+ dummyData[0] = YAS_REG_CONFR | 0x00;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Step 2 - initialization coil operation */
+ dummyData[0] = YAS_REG_ICOILR | 0x11;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x01;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x12;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x02;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x13;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x03;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x14;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x04;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x15;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x05;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x16;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x06;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x17;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x07;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x10;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_ICOILR | 0x00;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Step 3 - rough offset measurement */
+ /* Config register - Measurements results - "110 00 000" */
+ dummyData[0] = YAS_REG_CONFR | 0x00;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Measurements command register - Rough offset measurement -
+ "000 00001" */
+ dummyData[0] = YAS_REG_CMDR | 0x01;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(2); /* wait at least 1.5ms */
+
+ /* Measurement data read */
+ result =
+ yas529_sensor_i2c_read(mlsl_handle, pdata->address,
+ dummyRegister, 6, rawData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ xoffset =
+ (short)((unsigned short)rawData[5] +
+ ((unsigned short)rawData[4] & 0x7) * 256) - 5;
+ if (xoffset < 0)
+ xoffset = 0;
+ y1offset =
+ (short)((unsigned short)rawData[3] +
+ ((unsigned short)rawData[2] & 0x7) * 256) - 5;
+ if (y1offset < 0)
+ y1offset = 0;
+ y2offset =
+ (short)((unsigned short)rawData[1] +
+ ((unsigned short)rawData[0] & 0x7) * 256) - 5;
+ if (y2offset < 0)
+ y2offset = 0;
+
+ /* Step 4 - rough offset setting */
+ /* Set rough offset register values */
+ dummyData[0] = YAS_REG_XOFFSETR | xoffset;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_Y1OFFSETR | y1offset;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ dummyData[0] = YAS_REG_Y2OFFSETR | y2offset;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* CAL matrix read (first read is invalid) */
+ /* Config register - CAL register read - "110 01 000" */
+ dummyData[0] = YAS_REG_CONFR | 0x08;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* CAL data read */
+ result =
+ yas529_sensor_i2c_read(mlsl_handle, pdata->address,
+ dummyRegister, 9, calData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Config register - CAL register read - "110 01 000" */
+ dummyData[0] = YAS_REG_CONFR | 0x08;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* CAL data read */
+ result =
+ yas529_sensor_i2c_read(mlsl_handle, pdata->address,
+ dummyRegister, 9, calData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Calculate coefficients of the sensitivity correction matrix */
+ a1 = 100;
+ d2 = (calData[0] & 0xFC) >> 2; /* [71..66] 6bit */
+ a2 = (short)(d2 - 32);
+ /* [65..62] 4bit */
+ d3 = ((calData[0] & 0x03) << 2) | ((calData[1] & 0xC0) >> 6);
+ a3 = (short)(d3 - 8);
+ d4 = (calData[1] & 0x3F); /* [61..56] 6bit */
+ a4 = (short)(d4 - 32);
+ d5 = (calData[2] & 0xFC) >> 2; /* [55..50] 6bit */
+ a5 = (short)(d5 - 32) + 70;
+ /* [49..44] 6bit */
+ d6 = ((calData[2] & 0x03) << 4) | ((calData[3] & 0xF0) >> 4);
+ a6 = (short)(d6 - 32);
+ /* [43..38] 6bit */
+ d7 = ((calData[3] & 0x0F) << 2) | ((calData[4] & 0xC0) >> 6);
+ a7 = (short)(d7 - 32);
+ d8 = (calData[4] & 0x3F); /* [37..32] 6bit */
+ a8 = (short)(d8 - 32);
+ d9 = (calData[5] & 0xFE) >> 1; /* [31..25] 7bit */
+ a9 = (short)(d9 - 64) + 130;
+
+ return result;
+}
+
+static int yas529_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ unsigned char stat;
+ unsigned char rawData[6];
+ unsigned char dummyData[1] = { 0 };
+ unsigned char dummyRegister = 0;
+ int result = INV_SUCCESS;
+ short SX, SY1, SY2, SY, SZ;
+ short row1fixed, row2fixed, row3fixed;
+
+ /* Config register - Measurements results - "110 00 000" */
+ dummyData[0] = YAS_REG_CONFR | 0x00;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Measurements command register - Normal magnetic field measurement -
+ "000 00000" */
+ dummyData[0] = YAS_REG_CMDR | 0x00;
+ result =
+ yas529_sensor_i2c_write(mlsl_handle, pdata->address, 1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(10);
+ /* Measurement data read */
+ result =
+ yas529_sensor_i2c_read(mlsl_handle, pdata->address,
+ dummyRegister, 6, (unsigned char *)&rawData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ stat = rawData[0] & 0x80;
+ if (stat == 0x00) {
+ /* Extract raw data */
+ SX = (short)((unsigned short)rawData[5] +
+ ((unsigned short)rawData[4] & 0x7) * 256);
+ SY1 =
+ (short)((unsigned short)rawData[3] +
+ ((unsigned short)rawData[2] & 0x7) * 256);
+ SY2 =
+ (short)((unsigned short)rawData[1] +
+ ((unsigned short)rawData[0] & 0x7) * 256);
+ if ((SX <= 1) || (SY1 <= 1) || (SY2 <= 1))
+ return INV_ERROR_COMPASS_DATA_UNDERFLOW;
+ if ((SX >= 1024) || (SY1 >= 1024) || (SY2 >= 1024))
+ return INV_ERROR_COMPASS_DATA_OVERFLOW;
+ /* Convert to XYZ axis */
+ SX = -1 * SX;
+ SY = SY2 - SY1;
+ SZ = SY1 + SY2;
+
+ /* Apply sensitivity correction matrix */
+ row1fixed = (short)((a1 * SX + a2 * SY + a3 * SZ) >> 7) * 41;
+ row2fixed = (short)((a4 * SX + a5 * SY + a6 * SZ) >> 7) * 41;
+ row3fixed = (short)((a7 * SX + a8 * SY + a9 * SZ) >> 7) * 41;
+
+ data[0] = row1fixed >> 8;
+ data[1] = row1fixed & 0xFF;
+ data[2] = row2fixed >> 8;
+ data[3] = row2fixed & 0xFF;
+ data[4] = row3fixed >> 8;
+ data[5] = row3fixed & 0xFF;
+
+ return INV_SUCCESS;
+ } else {
+ return INV_ERROR_COMPASS_DATA_NOT_READY;
+ }
+}
+
+static struct ext_slave_descr yas529_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = yas529_suspend,
+ .resume = yas529_resume,
+ .read = yas529_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "yas529",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_YAS529,
+ .read_reg = 0x06,
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {19660, 8000},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *yas529_get_slave_descr(void)
+{
+ return &yas529_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct yas529_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int yas529_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct yas529_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ yas529_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int yas529_mod_remove(struct i2c_client *client)
+{
+ struct yas529_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ yas529_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id yas529_mod_id[] = {
+ { "yas529", COMPASS_ID_YAS529 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, yas529_mod_id);
+
+static struct i2c_driver yas529_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = yas529_mod_probe,
+ .remove = yas529_mod_remove,
+ .id_table = yas529_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "yas529_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init yas529_mod_init(void)
+{
+ int res = i2c_add_driver(&yas529_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "yas529_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit yas529_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&yas529_mod_driver);
+}
+
+module_init(yas529_mod_init);
+module_exit(yas529_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate YAS529 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("yas529_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/compass/yas530.c b/drivers/misc/inv_mpu/compass/yas530.c
new file mode 100644
index 000000000000..fdca05ba8e5c
--- /dev/null
+++ b/drivers/misc/inv_mpu/compass/yas530.c
@@ -0,0 +1,580 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup COMPASSDL
+ *
+ * @{
+ * @file yas530.c
+ * @brief Magnetometer setup and handling methods for Yamaha YAS530
+ * compass when used in a user-space solution (no kernel driver).
+ */
+
+/* -------------------------------------------------------------------------- */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include "log.h"
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+/* -------------------------------------------------------------------------- */
+#define YAS530_REGADDR_DEVICE_ID (0x80)
+#define YAS530_REGADDR_ACTUATE_INIT_COIL (0x81)
+#define YAS530_REGADDR_MEASURE_COMMAND (0x82)
+#define YAS530_REGADDR_CONFIG (0x83)
+#define YAS530_REGADDR_MEASURE_INTERVAL (0x84)
+#define YAS530_REGADDR_OFFSET_X (0x85)
+#define YAS530_REGADDR_OFFSET_Y1 (0x86)
+#define YAS530_REGADDR_OFFSET_Y2 (0x87)
+#define YAS530_REGADDR_TEST1 (0x88)
+#define YAS530_REGADDR_TEST2 (0x89)
+#define YAS530_REGADDR_CAL (0x90)
+#define YAS530_REGADDR_MEASURE_DATA (0xb0)
+
+/* -------------------------------------------------------------------------- */
+static int Cx, Cy1, Cy2;
+static int /*a1, */ a2, a3, a4, a5, a6, a7, a8, a9;
+static int k;
+
+static unsigned char dx, dy1, dy2;
+static unsigned char d2, d3, d4, d5, d6, d7, d8, d9, d0;
+static unsigned char dck;
+
+/* -------------------------------------------------------------------------- */
+
+static int set_hardware_offset(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ char offset_x, char offset_y1, char offset_y2)
+{
+ char data;
+ int result = INV_SUCCESS;
+
+ data = offset_x & 0x3f;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_OFFSET_X, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ data = offset_y1 & 0x3f;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_OFFSET_Y1, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ data = offset_y2 & 0x3f;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_OFFSET_Y2, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+static int set_measure_command(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ int ldtc, int fors, int dlymes)
+{
+ int result = INV_SUCCESS;
+
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_MEASURE_COMMAND, 0x01);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+static int measure_normal(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ int *busy, unsigned short *t,
+ unsigned short *x, unsigned short *y1,
+ unsigned short *y2)
+{
+ unsigned char data[8];
+ unsigned short b, to, xo, y1o, y2o;
+ int result;
+ ktime_t sleeptime;
+ result = set_measure_command(mlsl_handle, slave, pdata, 0, 0, 0);
+ sleeptime = ktime_set(0, 2 * NSEC_PER_MSEC);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&sleeptime, HRTIMER_MODE_REL);
+
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ YAS530_REGADDR_MEASURE_DATA, 8, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ b = (data[0] >> 7) & 0x01;
+ to = ((data[0] << 2) & 0x1fc) | ((data[1] >> 6) & 0x03);
+ xo = ((data[2] << 5) & 0xfe0) | ((data[3] >> 3) & 0x1f);
+ y1o = ((data[4] << 5) & 0xfe0) | ((data[5] >> 3) & 0x1f);
+ y2o = ((data[6] << 5) & 0xfe0) | ((data[7] >> 3) & 0x1f);
+
+ *busy = b;
+ *t = to;
+ *x = xo;
+ *y1 = y1o;
+ *y2 = y2o;
+
+ return result;
+}
+
+static int check_offset(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ char offset_x, char offset_y1, char offset_y2,
+ int *flag_x, int *flag_y1, int *flag_y2)
+{
+ int result;
+ int busy;
+ short t, x, y1, y2;
+
+ result = set_hardware_offset(mlsl_handle, slave, pdata,
+ offset_x, offset_y1, offset_y2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = measure_normal(mlsl_handle, slave, pdata,
+ &busy, &t, &x, &y1, &y2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ *flag_x = 0;
+ *flag_y1 = 0;
+ *flag_y2 = 0;
+
+ if (x > 2048)
+ *flag_x = 1;
+ if (y1 > 2048)
+ *flag_y1 = 1;
+ if (y2 > 2048)
+ *flag_y2 = 1;
+ if (x < 2048)
+ *flag_x = -1;
+ if (y1 < 2048)
+ *flag_y1 = -1;
+ if (y2 < 2048)
+ *flag_y2 = -1;
+
+ return result;
+}
+
+static int measure_and_set_offset(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ char *offset)
+{
+ int i;
+ int result = INV_SUCCESS;
+ char offset_x = 0, offset_y1 = 0, offset_y2 = 0;
+ int flag_x = 0, flag_y1 = 0, flag_y2 = 0;
+ static const int correct[5] = { 16, 8, 4, 2, 1 };
+
+ for (i = 0; i < 5; i++) {
+ result = check_offset(mlsl_handle, slave, pdata,
+ offset_x, offset_y1, offset_y2,
+ &flag_x, &flag_y1, &flag_y2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (flag_x)
+ offset_x += flag_x * correct[i];
+ if (flag_y1)
+ offset_y1 += flag_y1 * correct[i];
+ if (flag_y2)
+ offset_y2 += flag_y2 * correct[i];
+ }
+
+ result = set_hardware_offset(mlsl_handle, slave, pdata,
+ offset_x, offset_y1, offset_y2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ offset[0] = offset_x;
+ offset[1] = offset_y1;
+ offset[2] = offset_y2;
+
+ return result;
+}
+
+static void coordinate_conversion(short x, short y1, short y2, short t,
+ int32_t *xo, int32_t *yo, int32_t *zo)
+{
+ int32_t sx, sy1, sy2, sy, sz;
+ int32_t hx, hy, hz;
+
+ sx = x - (Cx * t) / 100;
+ sy1 = y1 - (Cy1 * t) / 100;
+ sy2 = y2 - (Cy2 * t) / 100;
+
+ sy = sy1 - sy2;
+ sz = -sy1 - sy2;
+
+ hx = k * ((100 * sx + a2 * sy + a3 * sz) / 10);
+ hy = k * ((a4 * sx + a5 * sy + a6 * sz) / 10);
+ hz = k * ((a7 * sx + a8 * sy + a9 * sz) / 10);
+
+ *xo = hx;
+ *yo = hy;
+ *zo = hz;
+}
+
+static int yas530_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ return result;
+}
+
+static int yas530_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+
+ unsigned char dummyData = 0x00;
+ char offset[3] = { 0, 0, 0 };
+ unsigned char data[16];
+ unsigned char read_reg[1];
+
+ /* =============================================== */
+
+ /* Step 1 - Test register initialization */
+ dummyData = 0x00;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_TEST1, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result =
+ inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_TEST2, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Device ID read */
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ YAS530_REGADDR_DEVICE_ID, 1, read_reg);
+
+ /*Step 2 Read the CAL register */
+ /* CAL data read */
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ YAS530_REGADDR_CAL, 16, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* CAL data Second Read */
+ result = inv_serial_read(mlsl_handle, pdata->address,
+ YAS530_REGADDR_CAL, 16, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /*Cal data */
+ dx = data[0];
+ dy1 = data[1];
+ dy2 = data[2];
+ d2 = (data[3] >> 2) & 0x03f;
+ d3 = ((data[3] << 2) & 0x0c) | ((data[4] >> 6) & 0x03);
+ d4 = data[4] & 0x3f;
+ d5 = (data[5] >> 2) & 0x3f;
+ d6 = ((data[5] << 4) & 0x30) | ((data[6] >> 4) & 0x0f);
+ d7 = ((data[6] << 3) & 0x78) | ((data[7] >> 5) & 0x07);
+ d8 = ((data[7] << 1) & 0x3e) | ((data[8] >> 7) & 0x01);
+ d9 = ((data[8] << 1) & 0xfe) | ((data[9] >> 7) & 0x01);
+ d0 = (data[9] >> 2) & 0x1f;
+ dck = ((data[9] << 1) & 0x06) | ((data[10] >> 7) & 0x01);
+
+ /*Correction Data */
+ Cx = (int)dx * 6 - 768;
+ Cy1 = (int)dy1 * 6 - 768;
+ Cy2 = (int)dy2 * 6 - 768;
+ a2 = (int)d2 - 32;
+ a3 = (int)d3 - 8;
+ a4 = (int)d4 - 32;
+ a5 = (int)d5 + 38;
+ a6 = (int)d6 - 32;
+ a7 = (int)d7 - 64;
+ a8 = (int)d8 - 32;
+ a9 = (int)d9;
+ k = (int)d0 + 10;
+
+ /*Obtain the [49:47] bits */
+ dck &= 0x07;
+
+ /*Step 3 : Storing the CONFIG with the CLK value */
+ dummyData = 0x00 | (dck << 2);
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_CONFIG, dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /*Step 4 : Set Acquisition Interval Register */
+ dummyData = 0x00;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_MEASURE_INTERVAL,
+ dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /*Step 5 : Reset Coil */
+ dummyData = 0x00;
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ YAS530_REGADDR_ACTUATE_INIT_COIL,
+ dummyData);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Offset Measurement and Set */
+ result = measure_and_set_offset(mlsl_handle, slave, pdata, offset);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+static int yas530_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result = INV_SUCCESS;
+
+ int busy;
+ short t, x, y1, y2;
+ int32_t xyz[3];
+ short rawfixed[3];
+
+ result = measure_normal(mlsl_handle, slave, pdata,
+ &busy, &t, &x, &y1, &y2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ coordinate_conversion(x, y1, y2, t, &xyz[0], &xyz[1], &xyz[2]);
+
+ rawfixed[0] = (short)(xyz[0] / 100);
+ rawfixed[1] = (short)(xyz[1] / 100);
+ rawfixed[2] = (short)(xyz[2] / 100);
+
+ data[0] = rawfixed[0] >> 8;
+ data[1] = rawfixed[0] & 0xFF;
+ data[2] = rawfixed[1] >> 8;
+ data[3] = rawfixed[1] & 0xFF;
+ data[4] = rawfixed[2] >> 8;
+ data[5] = rawfixed[2] & 0xFF;
+
+ if (busy)
+ return INV_ERROR_COMPASS_DATA_NOT_READY;
+ return result;
+}
+
+static struct ext_slave_descr yas530_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = yas530_suspend,
+ .resume = yas530_resume,
+ .read = yas530_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "yas530",
+ .type = EXT_SLAVE_TYPE_COMPASS,
+ .id = COMPASS_ID_YAS530,
+ .read_reg = 0x06,
+ .read_len = 6,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {3276, 8001},
+ .trigger = NULL,
+};
+
+static
+struct ext_slave_descr *yas530_get_slave_descr(void)
+{
+ return &yas530_descr;
+}
+
+/* -------------------------------------------------------------------------- */
+struct yas530_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int yas530_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct yas530_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ yas530_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int yas530_mod_remove(struct i2c_client *client)
+{
+ struct yas530_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ yas530_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id yas530_mod_id[] = {
+ { "yas530", COMPASS_ID_YAS530 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, yas530_mod_id);
+
+static struct i2c_driver yas530_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = yas530_mod_probe,
+ .remove = yas530_mod_remove,
+ .id_table = yas530_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "yas530_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init yas530_mod_init(void)
+{
+ int res = i2c_add_driver(&yas530_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "yas530_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit yas530_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&yas530_mod_driver);
+}
+
+module_init(yas530_mod_init);
+module_exit(yas530_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate YAS530 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("yas530_mod");
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/log.h b/drivers/misc/inv_mpu/log.h
new file mode 100644
index 000000000000..5630602e3efa
--- /dev/null
+++ b/drivers/misc/inv_mpu/log.h
@@ -0,0 +1,287 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/*
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (C) 2005 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * C/C++ logging functions. See the logging documentation for API details.
+ *
+ * We'd like these to be available from C code (in case we import some from
+ * somewhere), so this has a C interface.
+ *
+ * The output will be correct when the log file is shared between multiple
+ * threads and/or multiple processes so long as the operating system
+ * supports O_APPEND. These calls have mutex-protected data structures
+ * and so are NOT reentrant. Do not use MPL_LOG in a signal handler.
+ */
+#ifndef _LIBS_CUTILS_MPL_LOG_H
+#define _LIBS_CUTILS_MPL_LOG_H
+
+#include "mltypes.h"
+#include <stdarg.h>
+
+
+#include <linux/kernel.h>
+
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Normally we strip MPL_LOGV (VERBOSE messages) from release builds.
+ * You can modify this (for example with "#define MPL_LOG_NDEBUG 0"
+ * at the top of your source file) to change that behavior.
+ */
+#ifndef MPL_LOG_NDEBUG
+#ifdef NDEBUG
+#define MPL_LOG_NDEBUG 1
+#else
+#define MPL_LOG_NDEBUG 0
+#endif
+#endif
+
+#define MPL_LOG_UNKNOWN MPL_LOG_VERBOSE
+#define MPL_LOG_DEFAULT KERN_DEFAULT
+#define MPL_LOG_VERBOSE KERN_CONT
+#define MPL_LOG_DEBUG KERN_NOTICE
+#define MPL_LOG_INFO KERN_INFO
+#define MPL_LOG_WARN KERN_WARNING
+#define MPL_LOG_ERROR KERN_ERR
+#define MPL_LOG_SILENT MPL_LOG_VERBOSE
+
+
+
+/*
+ * This is the local tag used for the following simplified
+ * logging macros. You can change this preprocessor definition
+ * before using the other macros to change the tag.
+ */
+#ifndef MPL_LOG_TAG
+#define MPL_LOG_TAG
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Simplified macro to send a verbose log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGV
+#if MPL_LOG_NDEBUG
+#define MPL_LOGV(fmt, ...) \
+ do { \
+ if (0) \
+ MPL_LOG(LOG_VERBOSE, MPL_LOG_TAG, fmt, ##__VA_ARGS__);\
+ } while (0)
+#else
+#define MPL_LOGV(fmt, ...) MPL_LOG(LOG_VERBOSE, MPL_LOG_TAG, fmt, ##__VA_ARGS__)
+#endif
+#endif
+
+#ifndef CONDITION
+#define CONDITION(cond) ((cond) != 0)
+#endif
+
+#ifndef MPL_LOGV_IF
+#if MPL_LOG_NDEBUG
+#define MPL_LOGV_IF(cond, fmt, ...) \
+ do { if (0) MPL_LOG(fmt, ##__VA_ARGS__); } while (0)
+#else
+#define MPL_LOGV_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_VERBOSE, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+#endif
+
+/*
+ * Simplified macro to send a debug log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGD
+#define MPL_LOGD(fmt, ...) MPL_LOG(LOG_DEBUG, MPL_LOG_TAG, fmt, ##__VA_ARGS__)
+#endif
+
+#ifndef MPL_LOGD_IF
+#define MPL_LOGD_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_DEBUG, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+
+/*
+ * Simplified macro to send an info log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGI
+#define MPL_LOGI(fmt, ...) pr_info(KERN_INFO MPL_LOG_TAG fmt, ##__VA_ARGS__)
+#endif
+
+#ifndef MPL_LOGI_IF
+#define MPL_LOGI_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_INFO, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+
+/*
+ * Simplified macro to send a warning log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGW
+#define MPL_LOGW(fmt, ...) printk(KERN_WARNING MPL_LOG_TAG fmt, ##__VA_ARGS__)
+#endif
+
+#ifndef MPL_LOGW_IF
+#define MPL_LOGW_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_WARN, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+
+/*
+ * Simplified macro to send an error log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGE
+#define MPL_LOGE(fmt, ...) printk(KERN_ERR MPL_LOG_TAG fmt, ##__VA_ARGS__)
+#endif
+
+#ifndef MPL_LOGE_IF
+#define MPL_LOGE_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_ERROR, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Log a fatal error. If the given condition fails, this stops program
+ * execution like a normal assertion, but also generating the given message.
+ * It is NOT stripped from release builds. Note that the condition test
+ * is -inverted- from the normal assert() semantics.
+ */
+#define MPL_LOG_ALWAYS_FATAL_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? ((void)android_printAssert(#cond, MPL_LOG_TAG, \
+ fmt, ##__VA_ARGS__)) \
+ : (void)0)
+
+#define MPL_LOG_ALWAYS_FATAL(fmt, ...) \
+ (((void)android_printAssert(NULL, MPL_LOG_TAG, fmt, ##__VA_ARGS__)))
+
+/*
+ * Versions of MPL_LOG_ALWAYS_FATAL_IF and MPL_LOG_ALWAYS_FATAL that
+ * are stripped out of release builds.
+ */
+#if MPL_LOG_NDEBUG
+#define MPL_LOG_FATAL_IF(cond, fmt, ...) \
+ do { \
+ if (0) \
+ MPL_LOG_ALWAYS_FATAL_IF(cond, fmt, ##__VA_ARGS__); \
+ } while (0)
+#define MPL_LOG_FATAL(fmt, ...) \
+ do { \
+ if (0) \
+ MPL_LOG_ALWAYS_FATAL(fmt, ##__VA_ARGS__) \
+ } while (0)
+#else
+#define MPL_LOG_FATAL_IF(cond, fmt, ...) \
+ MPL_LOG_ALWAYS_FATAL_IF(cond, fmt, ##__VA_ARGS__)
+#define MPL_LOG_FATAL(fmt, ...) \
+ MPL_LOG_ALWAYS_FATAL(fmt, ##__VA_ARGS__)
+#endif
+
+/*
+ * Assertion that generates a log message when the assertion fails.
+ * Stripped out of release builds. Uses the current MPL_LOG_TAG.
+ */
+#define MPL_LOG_ASSERT(cond, fmt, ...) \
+ MPL_LOG_FATAL_IF(!(cond), fmt, ##__VA_ARGS__)
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Basic log message macro.
+ *
+ * Example:
+ * MPL_LOG(MPL_LOG_WARN, NULL, "Failed with error %d", errno);
+ *
+ * The second argument may be NULL or "" to indicate the "global" tag.
+ */
+#ifndef MPL_LOG
+#define MPL_LOG(priority, tag, fmt, ...) \
+ MPL_LOG_PRI(priority, tag, fmt, ##__VA_ARGS__)
+#endif
+
+/*
+ * Log macro that allows you to specify a number for the priority.
+ */
+#ifndef MPL_LOG_PRI
+#define MPL_LOG_PRI(priority, tag, fmt, ...) \
+ pr_debug(MPL_##priority tag fmt, ##__VA_ARGS__)
+#endif
+
+/*
+ * Log macro that allows you to pass in a varargs ("args" is a va_list).
+ */
+#ifndef MPL_LOG_PRI_VA
+/* not allowed in the Kernel because there is no dev_dbg that takes a va_list */
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * ===========================================================================
+ *
+ * The stuff in the rest of this file should not be used directly.
+ */
+
+int _MLPrintLog(int priority, const char *tag, const char *fmt, ...);
+int _MLPrintVaLog(int priority, const char *tag, const char *fmt, va_list args);
+/* Final implementation of actual writing to a character device */
+int _MLWriteLog(const char *buf, int buflen);
+
+static inline void __print_result_location(int result,
+ const char *file,
+ const char *func, int line)
+{
+ MPL_LOGE("%s|%s|%d returning %d\n", file, func, line, result);
+}
+
+#define LOG_RESULT_LOCATION(condition) \
+ do { \
+ __print_result_location((int)(condition), __FILE__, \
+ __func__, __LINE__); \
+ } while (0)
+
+
+#endif /* _LIBS_CUTILS_MPL_LOG_H */
diff --git a/drivers/misc/inv_mpu/mldl_cfg.c b/drivers/misc/inv_mpu/mldl_cfg.c
new file mode 100644
index 000000000000..ccacc8ec0b56
--- /dev/null
+++ b/drivers/misc/inv_mpu/mldl_cfg.c
@@ -0,0 +1,1765 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup MLDL
+ *
+ * @{
+ * @file mldl_cfg.c
+ * @brief The Motion Library Driver Layer.
+ */
+
+/* -------------------------------------------------------------------------- */
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <stddef.h>
+
+#include "mldl_cfg.h"
+#include <linux/mpu.h>
+#include "mpu3050.h"
+
+#include "mlsl.h"
+#include "mldl_print_cfg.h"
+#include "log.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "mldl_cfg:"
+
+/* -------------------------------------------------------------------------- */
+
+#define SLEEP 1
+#define WAKE_UP 0
+#define RESET 1
+#define STANDBY 1
+
+/* -------------------------------------------------------------------------- */
+
+/**
+ * @brief Stop the DMP running
+ *
+ * @return INV_SUCCESS or non-zero error code
+ */
+static int dmp_stop(struct mldl_cfg *mldl_cfg, void *gyro_handle)
+{
+ unsigned char user_ctrl_reg;
+ int result;
+
+ if (mldl_cfg->inv_mpu_state->status & MPU_DMP_IS_SUSPENDED)
+ return INV_SUCCESS;
+
+ result = inv_serial_read(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, 1, &user_ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ user_ctrl_reg = (user_ctrl_reg & (~BIT_FIFO_EN)) | BIT_FIFO_RST;
+ user_ctrl_reg = (user_ctrl_reg & (~BIT_DMP_EN)) | BIT_DMP_RST;
+
+ result = inv_serial_single_write(gyro_handle,
+ mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, user_ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ mldl_cfg->inv_mpu_state->status |= MPU_DMP_IS_SUSPENDED;
+
+ return result;
+}
+
+/**
+ * @brief Starts the DMP running
+ *
+ * @return INV_SUCCESS or non-zero error code
+ */
+static int dmp_start(struct mldl_cfg *mldl_cfg, void *mlsl_handle)
+{
+ unsigned char user_ctrl_reg;
+ int result;
+
+ if ((!(mldl_cfg->inv_mpu_state->status & MPU_DMP_IS_SUSPENDED) &&
+ mldl_cfg->mpu_gyro_cfg->dmp_enable)
+ ||
+ ((mldl_cfg->inv_mpu_state->status & MPU_DMP_IS_SUSPENDED) &&
+ !mldl_cfg->mpu_gyro_cfg->dmp_enable))
+ return INV_SUCCESS;
+
+ result = inv_serial_read(mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, 1, &user_ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL,
+ ((user_ctrl_reg & (~BIT_FIFO_EN))
+ | BIT_FIFO_RST));
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, user_ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_read(mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, 1, &user_ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ user_ctrl_reg |= BIT_DMP_EN;
+
+ if (mldl_cfg->mpu_gyro_cfg->fifo_enable)
+ user_ctrl_reg |= BIT_FIFO_EN;
+ else
+ user_ctrl_reg &= ~BIT_FIFO_EN;
+
+ user_ctrl_reg |= BIT_DMP_RST;
+
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, user_ctrl_reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ mldl_cfg->inv_mpu_state->status &= ~MPU_DMP_IS_SUSPENDED;
+
+ return result;
+}
+
+
+
+static int mpu3050_set_i2c_bypass(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle, unsigned char enable)
+{
+ unsigned char b;
+ int result;
+ unsigned char status = mldl_cfg->inv_mpu_state->status;
+
+ if ((status & MPU_GYRO_IS_BYPASSED && enable) ||
+ (!(status & MPU_GYRO_IS_BYPASSED) && !enable))
+ return INV_SUCCESS;
+
+ /*---- get current 'USER_CTRL' into b ----*/
+ result = inv_serial_read(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, 1, &b);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ b &= ~BIT_AUX_IF_EN;
+
+ if (!enable) {
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL,
+ (b | BIT_AUX_IF_EN));
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ } else {
+ /* Coming out of I2C is tricky due to several erratta. Do not
+ * modify this algorithm
+ */
+ /*
+ * 1) wait for the right time and send the command to change
+ * the aux i2c slave address to an invalid address that will
+ * get nack'ed
+ *
+ * 0x00 is broadcast. 0x7F is unlikely to be used by any aux.
+ */
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_AUX_SLV_ADDR, 0x7F);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /*
+ * 2) wait enough time for a nack to occur, then go into
+ * bypass mode:
+ */
+ msleep(2);
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, (b));
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /*
+ * 3) wait for up to one MPU cycle then restore the slave
+ * address
+ */
+ msleep(inv_mpu_get_sampling_period_us(mldl_cfg->mpu_gyro_cfg)
+ / 1000);
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_AUX_SLV_ADDR,
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_ACCEL]
+ ->address);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /*
+ * 4) reset the ime interface
+ */
+
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL,
+ (b | BIT_AUX_IF_RST));
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(2);
+ }
+ if (enable)
+ mldl_cfg->inv_mpu_state->status |= MPU_GYRO_IS_BYPASSED;
+ else
+ mldl_cfg->inv_mpu_state->status &= ~MPU_GYRO_IS_BYPASSED;
+
+ return result;
+}
+
+
+/**
+ * @brief enables/disables the I2C bypass to an external device
+ * connected to MPU's secondary I2C bus.
+ * @param enable
+ * Non-zero to enable pass through.
+ * @return INV_SUCCESS if successful, a non-zero error code otherwise.
+ */
+static int mpu_set_i2c_bypass(struct mldl_cfg *mldl_cfg, void *mlsl_handle,
+ unsigned char enable)
+{
+ return mpu3050_set_i2c_bypass(mldl_cfg, mlsl_handle, enable);
+}
+
+
+#define NUM_OF_PROD_REVS (ARRAY_SIZE(prod_rev_map))
+
+/* NOTE : when not indicated, product revision
+ is considered an 'npp'; non production part */
+
+struct prod_rev_map_t {
+ unsigned char silicon_rev;
+ unsigned short gyro_trim;
+};
+
+#define OLDEST_PROD_REV_SUPPORTED 11
+static struct prod_rev_map_t prod_rev_map[] = {
+ {0, 0},
+ {MPU_SILICON_REV_A4, 131}, /* 1 A? OBSOLETED */
+ {MPU_SILICON_REV_A4, 131}, /* 2 | */
+ {MPU_SILICON_REV_A4, 131}, /* 3 | */
+ {MPU_SILICON_REV_A4, 131}, /* 4 | */
+ {MPU_SILICON_REV_A4, 131}, /* 5 | */
+ {MPU_SILICON_REV_A4, 131}, /* 6 | */
+ {MPU_SILICON_REV_A4, 131}, /* 7 | */
+ {MPU_SILICON_REV_A4, 131}, /* 8 | */
+ {MPU_SILICON_REV_A4, 131}, /* 9 | */
+ {MPU_SILICON_REV_A4, 131}, /* 10 V */
+ {MPU_SILICON_REV_B1, 131}, /* 11 B1 */
+ {MPU_SILICON_REV_B1, 131}, /* 12 | */
+ {MPU_SILICON_REV_B1, 131}, /* 13 | */
+ {MPU_SILICON_REV_B1, 131}, /* 14 V */
+ {MPU_SILICON_REV_B4, 131}, /* 15 B4 */
+ {MPU_SILICON_REV_B4, 131}, /* 16 | */
+ {MPU_SILICON_REV_B4, 131}, /* 17 | */
+ {MPU_SILICON_REV_B4, 131}, /* 18 | */
+ {MPU_SILICON_REV_B4, 115}, /* 19 | */
+ {MPU_SILICON_REV_B4, 115}, /* 20 V */
+ {MPU_SILICON_REV_B6, 131}, /* 21 B6 (B6/A9) */
+ {MPU_SILICON_REV_B4, 115}, /* 22 B4 (B7/A10) */
+ {MPU_SILICON_REV_B6, 0}, /* 23 B6 */
+ {MPU_SILICON_REV_B6, 0}, /* 24 | */
+ {MPU_SILICON_REV_B6, 0}, /* 25 | */
+ {MPU_SILICON_REV_B6, 131}, /* 26 V (B6/A11) */
+};
+
+/**
+ * @internal
+ * @brief Get the silicon revision ID from OTP for MPU3050.
+ * The silicon revision number is in read from OTP bank 0,
+ * ADDR6[7:2]. The corresponding ID is retrieved by lookup
+ * in a map.
+ *
+ * @param mldl_cfg
+ * a pointer to the mldl config data structure.
+ * @param mlsl_handle
+ * an file handle to the serial communication device the
+ * device is connected to.
+ *
+ * @return 0 on success, a non-zero error code otherwise.
+ */
+static int inv_get_silicon_rev_mpu3050(
+ struct mldl_cfg *mldl_cfg, void *mlsl_handle)
+{
+ int result;
+ unsigned char index = 0x00;
+ unsigned char bank =
+ (BIT_PRFTCH_EN | BIT_CFG_USER_BANK | MPU_MEM_OTP_BANK_0);
+ unsigned short mem_addr = ((bank << 8) | 0x06);
+ struct mpu_chip_info *mpu_chip_info = mldl_cfg->mpu_chip_info;
+
+ result = inv_serial_read(mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PRODUCT_ID, 1,
+ &mpu_chip_info->product_id);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_serial_read_mem(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ mem_addr, 1, &index);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ index >>= 2;
+
+ /* clean the prefetch and cfg user bank bits */
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_BANK_SEL, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (index < OLDEST_PROD_REV_SUPPORTED || index >= NUM_OF_PROD_REVS) {
+ mpu_chip_info->silicon_revision = 0;
+ mpu_chip_info->gyro_sens_trim = 0;
+ MPL_LOGE("Unsupported Product Revision Detected : %d\n", index);
+ return INV_ERROR_INVALID_MODULE;
+ }
+
+ mpu_chip_info->product_revision = index;
+ mpu_chip_info->silicon_revision = prod_rev_map[index].silicon_rev;
+ mpu_chip_info->gyro_sens_trim = prod_rev_map[index].gyro_trim;
+ if (mpu_chip_info->gyro_sens_trim == 0) {
+ MPL_LOGE("gyro sensitivity trim is 0"
+ " - unsupported non production part.\n");
+ return INV_ERROR_INVALID_MODULE;
+ }
+
+ return result;
+}
+#define inv_get_silicon_rev inv_get_silicon_rev_mpu3050
+
+
+/**
+ * @brief Enable / Disable the use MPU's secondary I2C interface level
+ * shifters.
+ * When enabled the secondary I2C interface to which the external
+ * device is connected runs at VDD voltage (main supply).
+ * When disabled the 2nd interface runs at VDDIO voltage.
+ * See the device specification for more details.
+ *
+ * @note using this API may produce unpredictable results, depending on how
+ * the MPU and slave device are setup on the target platform.
+ * Use of this API should entirely be restricted to system
+ * integrators. Once the correct value is found, there should be no
+ * need to change the level shifter at runtime.
+ *
+ * @pre Must be called after inv_serial_start().
+ * @note Typically called before inv_dmp_open().
+ *
+ * @param[in] enable:
+ * 0 to run at VDDIO (default),
+ * 1 to run at VDD.
+ *
+ * @return INV_SUCCESS if successfull, a non-zero error code otherwise.
+ */
+static int inv_mpu_set_level_shifter_bit(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle, unsigned char enable)
+{
+ int result;
+ unsigned char regval;
+
+ unsigned char reg;
+ unsigned char mask;
+
+ if (0 == mldl_cfg->mpu_chip_info->silicon_revision)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ /*-- on parts before B6 the VDDIO bit is bit 7 of ACCEL_BURST_ADDR --
+ NOTE: this is incompatible with ST accelerometers where the VDDIO
+ bit MUST be set to enable ST's internal logic to autoincrement
+ the register address on burst reads --*/
+ if ((mldl_cfg->mpu_chip_info->silicon_revision & 0xf)
+ < MPU_SILICON_REV_B6) {
+ reg = MPUREG_ACCEL_BURST_ADDR;
+ mask = 0x80;
+ } else {
+ /*-- on B6 parts the VDDIO bit was moved to FIFO_EN2 =>
+ the mask is always 0x04 --*/
+ reg = MPUREG_FIFO_EN2;
+ mask = 0x04;
+ }
+
+ result = inv_serial_read(mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ reg, 1, &regval);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (enable)
+ regval |= mask;
+ else
+ regval &= ~mask;
+
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr, reg, regval);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ return result;
+ return INV_SUCCESS;
+}
+
+
+/**
+ * @internal
+ * @brief This function controls the power management on the MPU device.
+ * The entire chip can be put to low power sleep mode, or individual
+ * gyros can be turned on/off.
+ *
+ * Putting the device into sleep mode depending upon the changing needs
+ * of the associated applications is a recommended method for reducing
+ * power consuption. It is a safe opearation in that sleep/wake up of
+ * gyros while running will not result in any interruption of data.
+ *
+ * Although it is entirely allowed to put the device into full sleep
+ * while running the DMP, it is not recomended because it will disrupt
+ * the ongoing calculations carried on inside the DMP and consequently
+ * the sensor fusion algorithm. Furthermore, while in sleep mode
+ * read & write operation from the app processor on both registers and
+ * memory are disabled and can only regained by restoring the MPU in
+ * normal power mode.
+ * Disabling any of the gyro axis will reduce the associated power
+ * consuption from the PLL but will not stop the DMP from running
+ * state.
+ *
+ * @param reset
+ * Non-zero to reset the device. Note that this setting
+ * is volatile and the corresponding register bit will
+ * clear itself right after being applied.
+ * @param sleep
+ * Non-zero to put device into full sleep.
+ * @param disable_gx
+ * Non-zero to disable gyro X.
+ * @param disable_gy
+ * Non-zero to disable gyro Y.
+ * @param disable_gz
+ * Non-zero to disable gyro Z.
+ *
+ * @return INV_SUCCESS if successfull; a non-zero error code otherwise.
+ */
+static int mpu3050_pwr_mgmt(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ unsigned char reset,
+ unsigned char sleep,
+ unsigned char disable_gx,
+ unsigned char disable_gy,
+ unsigned char disable_gz)
+{
+ unsigned char b;
+ int result;
+
+ result =
+ inv_serial_read(mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, 1, &b);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* If we are awake, we need to put it in bypass before resetting */
+ if ((!(b & BIT_SLEEP)) && reset)
+ result = mpu_set_i2c_bypass(mldl_cfg, mlsl_handle, 1);
+
+ /* Reset if requested */
+ if (reset) {
+ MPL_LOGV("Reset MPU3050\n");
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, b | BIT_H_RESET);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ msleep(5);
+ /* Some chips are awake after reset and some are asleep,
+ * check the status */
+ result = inv_serial_read(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, 1, &b);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ /* Update the suspended state just in case we return early */
+ if (b & BIT_SLEEP) {
+ mldl_cfg->inv_mpu_state->status |= MPU_GYRO_IS_SUSPENDED;
+ mldl_cfg->inv_mpu_state->status |= MPU_DEVICE_IS_SUSPENDED;
+ } else {
+ mldl_cfg->inv_mpu_state->status &= ~MPU_GYRO_IS_SUSPENDED;
+ mldl_cfg->inv_mpu_state->status &= ~MPU_DEVICE_IS_SUSPENDED;
+ }
+
+ /* if power status match requested, nothing else's left to do */
+ if ((b & (BIT_SLEEP | BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG)) ==
+ (((sleep != 0) * BIT_SLEEP) |
+ ((disable_gx != 0) * BIT_STBY_XG) |
+ ((disable_gy != 0) * BIT_STBY_YG) |
+ ((disable_gz != 0) * BIT_STBY_ZG))) {
+ return INV_SUCCESS;
+ }
+
+ /*
+ * This specific transition between states needs to be reinterpreted:
+ * (1,1,1,1) -> (0,1,1,1) has to become
+ * (1,1,1,1) -> (1,0,0,0) -> (0,1,1,1)
+ * where
+ * (1,1,1,1) is (sleep=1,disable_gx=1,disable_gy=1,disable_gz=1)
+ */
+ if ((b & (BIT_SLEEP | BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG)) ==
+ (BIT_SLEEP | BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG)
+ && ((!sleep) && disable_gx && disable_gy && disable_gz)) {
+ result = mpu3050_pwr_mgmt(mldl_cfg, mlsl_handle, 0, 1, 0, 0, 0);
+ if (result)
+ return result;
+ b |= BIT_SLEEP;
+ b &= ~(BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG);
+ }
+
+ if ((b & BIT_SLEEP) != ((sleep != 0) * BIT_SLEEP)) {
+ if (sleep) {
+ result = mpu_set_i2c_bypass(mldl_cfg, mlsl_handle, 1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ b |= BIT_SLEEP;
+ result =
+ inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, b);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ mldl_cfg->inv_mpu_state->status |=
+ MPU_GYRO_IS_SUSPENDED;
+ mldl_cfg->inv_mpu_state->status |=
+ MPU_DEVICE_IS_SUSPENDED;
+ } else {
+ b &= ~BIT_SLEEP;
+ result =
+ inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, b);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ mldl_cfg->inv_mpu_state->status &=
+ ~MPU_GYRO_IS_SUSPENDED;
+ mldl_cfg->inv_mpu_state->status &=
+ ~MPU_DEVICE_IS_SUSPENDED;
+ msleep(5);
+ }
+ }
+ /*---
+ WORKAROUND FOR PUTTING GYRO AXIS in STAND-BY MODE
+ 1) put one axis at a time in stand-by
+ ---*/
+ if ((b & BIT_STBY_XG) != ((disable_gx != 0) * BIT_STBY_XG)) {
+ b ^= BIT_STBY_XG;
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, b);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ if ((b & BIT_STBY_YG) != ((disable_gy != 0) * BIT_STBY_YG)) {
+ b ^= BIT_STBY_YG;
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, b);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ if ((b & BIT_STBY_ZG) != ((disable_gz != 0) * BIT_STBY_ZG)) {
+ b ^= BIT_STBY_ZG;
+ result = inv_serial_single_write(
+ mlsl_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, b);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ return INV_SUCCESS;
+}
+
+
+/**
+ * @brief sets the clock source for the gyros.
+ * @param mldl_cfg
+ * a pointer to the struct mldl_cfg data structure.
+ * @param gyro_handle
+ * an handle to the serial device the gyro is assigned to.
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+static int mpu_set_clock_source(void *gyro_handle, struct mldl_cfg *mldl_cfg)
+{
+ int result;
+ unsigned char cur_clk_src;
+ unsigned char reg;
+
+ /* clock source selection */
+ result = inv_serial_read(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ cur_clk_src = reg & BITS_CLKSEL;
+ reg &= ~BITS_CLKSEL;
+
+
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_PWR_MGM, mldl_cfg->mpu_gyro_cfg->clk_src | reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* TODO : workarounds to be determined and implemented */
+
+ return result;
+}
+
+/**
+ * Configures the MPU I2C Master
+ *
+ * @mldl_cfg Handle to the configuration data
+ * @gyro_handle handle to the gyro communictation interface
+ * @slave Can be Null if turning off the slave
+ * @slave_pdata Can be null if turning off the slave
+ * @slave_id enum ext_slave_type to determine which index to use
+ *
+ *
+ * This fucntion configures the slaves by:
+ * 1) Setting up the read
+ * a) Read Register
+ * b) Read Length
+ * 2) Set up the data trigger (MPU6050 only)
+ * a) Set trigger write register
+ * b) Set Trigger write value
+ * 3) Set up the divider (MPU6050 only)
+ * 4) Set the slave bypass mode depending on slave
+ *
+ * returns INV_SUCCESS or non-zero error code
+ */
+static int mpu_set_slave_mpu3050(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *slave_pdata,
+ int slave_id)
+{
+ int result;
+ unsigned char reg;
+ unsigned char slave_reg;
+ unsigned char slave_len;
+ unsigned char slave_endian;
+ unsigned char slave_address;
+
+ if (slave_id != EXT_SLAVE_TYPE_ACCEL)
+ return 0;
+
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, true);
+
+ if (NULL == slave || NULL == slave_pdata) {
+ slave_reg = 0;
+ slave_len = 0;
+ slave_endian = 0;
+ slave_address = 0;
+ mldl_cfg->inv_mpu_state->i2c_slaves_enabled = 0;
+ } else {
+ slave_reg = slave->read_reg;
+ slave_len = slave->read_len;
+ slave_endian = slave->endian;
+ slave_address = slave_pdata->address;
+ mldl_cfg->inv_mpu_state->i2c_slaves_enabled = 1;
+ }
+
+ /* Address */
+ result = inv_serial_single_write(gyro_handle,
+ mldl_cfg->mpu_chip_info->addr,
+ MPUREG_AUX_SLV_ADDR, slave_address);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ /* Register */
+ result = inv_serial_read(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_ACCEL_BURST_ADDR, 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ reg = ((reg & 0x80) | slave_reg);
+ result = inv_serial_single_write(gyro_handle,
+ mldl_cfg->mpu_chip_info->addr,
+ MPUREG_ACCEL_BURST_ADDR, reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Length */
+ result = inv_serial_read(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ reg = (reg & ~BIT_AUX_RD_LENG);
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_USER_CTRL, reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ return result;
+}
+
+
+static int mpu_set_slave(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *slave_pdata,
+ int slave_id)
+{
+ return mpu_set_slave_mpu3050(mldl_cfg, gyro_handle, slave,
+ slave_pdata, slave_id);
+}
+/**
+ * Check to see if the gyro was reset by testing a couple of registers known
+ * to change on reset.
+ *
+ * @mldl_cfg mldl configuration structure
+ * @gyro_handle handle used to communicate with the gyro
+ *
+ * @return INV_SUCCESS or non-zero error code
+ */
+static int mpu_was_reset(struct mldl_cfg *mldl_cfg, void *gyro_handle)
+{
+ int result = INV_SUCCESS;
+ unsigned char reg;
+
+ result = inv_serial_read(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_DMP_CFG_2, 1, &reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (mldl_cfg->mpu_gyro_cfg->dmp_cfg2 != reg)
+ return true;
+
+ if (0 != mldl_cfg->mpu_gyro_cfg->dmp_cfg1)
+ return false;
+
+ /* Inconclusive assume it was reset */
+ return true;
+}
+
+
+int inv_mpu_set_firmware(struct mldl_cfg *mldl_cfg, void *mlsl_handle,
+ const unsigned char *data, int size)
+{
+ int bank, offset, write_size;
+ int result;
+ unsigned char read[MPU_MEM_BANK_SIZE];
+
+ if (mldl_cfg->inv_mpu_state->status & MPU_DEVICE_IS_SUSPENDED) {
+#if INV_CACHE_DMP == 1
+ memcpy(mldl_cfg->mpu_ram->ram, data, size);
+ return INV_SUCCESS;
+#else
+ LOG_RESULT_LOCATION(INV_ERROR_MEMORY_SET);
+ return INV_ERROR_MEMORY_SET;
+#endif
+ }
+
+ if (!(mldl_cfg->inv_mpu_state->status & MPU_DMP_IS_SUSPENDED)) {
+ LOG_RESULT_LOCATION(INV_ERROR_MEMORY_SET);
+ return INV_ERROR_MEMORY_SET;
+ }
+ /* Write and verify memory */
+ for (bank = 0; size > 0; bank++,
+ size -= write_size,
+ data += write_size) {
+ if (size > MPU_MEM_BANK_SIZE)
+ write_size = MPU_MEM_BANK_SIZE;
+ else
+ write_size = size;
+
+ result = inv_serial_write_mem(mlsl_handle,
+ mldl_cfg->mpu_chip_info->addr,
+ ((bank << 8) | 0x00),
+ write_size,
+ data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ MPL_LOGE("Write mem error in bank %d\n", bank);
+ return result;
+ }
+ result = inv_serial_read_mem(mlsl_handle,
+ mldl_cfg->mpu_chip_info->addr,
+ ((bank << 8) | 0x00),
+ write_size,
+ read);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ MPL_LOGE("Read mem error in bank %d\n", bank);
+ return result;
+ }
+
+#define ML_SKIP_CHECK 20
+ for (offset = 0; offset < write_size; offset++) {
+ /* skip the register memory locations */
+ if (bank == 0 && offset < ML_SKIP_CHECK)
+ continue;
+ if (data[offset] != read[offset]) {
+ result = INV_ERROR_SERIAL_WRITE;
+ break;
+ }
+ }
+ if (result != INV_SUCCESS) {
+ LOG_RESULT_LOCATION(result);
+ MPL_LOGE("Read data mismatch at bank %d, offset %d\n",
+ bank, offset);
+ return result;
+ }
+ }
+ return INV_SUCCESS;
+}
+
+static int gyro_resume(struct mldl_cfg *mldl_cfg, void *gyro_handle,
+ unsigned long sensors)
+{
+ int result;
+ int ii;
+ unsigned char reg;
+ unsigned char regs[7];
+
+ /* Wake up the part */
+ result = mpu3050_pwr_mgmt(mldl_cfg, gyro_handle, false, false,
+ !(sensors & INV_X_GYRO),
+ !(sensors & INV_Y_GYRO),
+ !(sensors & INV_Z_GYRO));
+
+ if (!(mldl_cfg->inv_mpu_state->status & MPU_GYRO_NEEDS_CONFIG) &&
+ !mpu_was_reset(mldl_cfg, gyro_handle)) {
+ return INV_SUCCESS;
+ }
+
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_INT_CFG,
+ (mldl_cfg->mpu_gyro_cfg->int_config |
+ mldl_cfg->pdata->int_config));
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_SMPLRT_DIV, mldl_cfg->mpu_gyro_cfg->divider);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = mpu_set_clock_source(gyro_handle, mldl_cfg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ reg = DLPF_FS_SYNC_VALUE(mldl_cfg->mpu_gyro_cfg->ext_sync,
+ mldl_cfg->mpu_gyro_cfg->full_scale,
+ mldl_cfg->mpu_gyro_cfg->lpf);
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_DLPF_FS_SYNC, reg);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_DMP_CFG_1, mldl_cfg->mpu_gyro_cfg->dmp_cfg1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_DMP_CFG_2, mldl_cfg->mpu_gyro_cfg->dmp_cfg2);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Write and verify memory */
+#if INV_CACHE_DMP != 0
+ inv_mpu_set_firmware(mldl_cfg, gyro_handle,
+ mldl_cfg->mpu_ram->ram, mldl_cfg->mpu_ram->length);
+#endif
+
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_XG_OFFS_TC, mldl_cfg->mpu_offsets->tc[0]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_YG_OFFS_TC, mldl_cfg->mpu_offsets->tc[1]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_single_write(
+ gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_ZG_OFFS_TC, mldl_cfg->mpu_offsets->tc[2]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ regs[0] = MPUREG_X_OFFS_USRH;
+ for (ii = 0; ii < ARRAY_SIZE(mldl_cfg->mpu_offsets->gyro); ii++) {
+ regs[1 + ii * 2] =
+ (unsigned char)(mldl_cfg->mpu_offsets->gyro[ii] >> 8)
+ & 0xff;
+ regs[1 + ii * 2 + 1] =
+ (unsigned char)(mldl_cfg->mpu_offsets->gyro[ii] & 0xff);
+ }
+ result = inv_serial_write(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ 7, regs);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Configure slaves */
+ result = inv_mpu_set_level_shifter_bit(mldl_cfg, gyro_handle,
+ mldl_cfg->pdata->level_shifter);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ mldl_cfg->inv_mpu_state->status &= ~MPU_GYRO_NEEDS_CONFIG;
+
+ return result;
+}
+
+int gyro_config(void *mlsl_handle,
+ struct mldl_cfg *mldl_cfg,
+ struct ext_slave_config *data)
+{
+ struct mpu_gyro_cfg *mpu_gyro_cfg = mldl_cfg->mpu_gyro_cfg;
+ struct mpu_chip_info *mpu_chip_info = mldl_cfg->mpu_chip_info;
+ struct mpu_offsets *mpu_offsets = mldl_cfg->mpu_offsets;
+ int ii;
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_INT_CONFIG:
+ mpu_gyro_cfg->int_config = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_EXT_SYNC:
+ mpu_gyro_cfg->ext_sync = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_FULL_SCALE:
+ mpu_gyro_cfg->full_scale = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_LPF:
+ mpu_gyro_cfg->lpf = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_CLK_SRC:
+ mpu_gyro_cfg->clk_src = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_DIVIDER:
+ mpu_gyro_cfg->divider = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_DMP_ENABLE:
+ mpu_gyro_cfg->dmp_enable = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_FIFO_ENABLE:
+ mpu_gyro_cfg->fifo_enable = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_DMP_CFG1:
+ mpu_gyro_cfg->dmp_cfg1 = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_DMP_CFG2:
+ mpu_gyro_cfg->dmp_cfg2 = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_TC:
+ for (ii = 0; ii < GYRO_NUM_AXES; ii++)
+ mpu_offsets->tc[ii] = ((__u8 *)data->data)[ii];
+ break;
+ case MPU_SLAVE_GYRO:
+ for (ii = 0; ii < GYRO_NUM_AXES; ii++)
+ mpu_offsets->gyro[ii] = ((__u16 *)data->data)[ii];
+ break;
+ case MPU_SLAVE_ADDR:
+ mpu_chip_info->addr = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_PRODUCT_REVISION:
+ mpu_chip_info->product_revision = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_SILICON_REVISION:
+ mpu_chip_info->silicon_revision = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_PRODUCT_ID:
+ mpu_chip_info->product_id = *((__u8 *)data->data);
+ break;
+ case MPU_SLAVE_GYRO_SENS_TRIM:
+ mpu_chip_info->gyro_sens_trim = *((__u16 *)data->data);
+ break;
+ case MPU_SLAVE_ACCEL_SENS_TRIM:
+ mpu_chip_info->accel_sens_trim = *((__u16 *)data->data);
+ break;
+ case MPU_SLAVE_RAM:
+ if (data->len != mldl_cfg->mpu_ram->length)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ memcpy(mldl_cfg->mpu_ram->ram, data->data, data->len);
+ break;
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+ mldl_cfg->inv_mpu_state->status |= MPU_GYRO_NEEDS_CONFIG;
+ return INV_SUCCESS;
+}
+
+int gyro_get_config(void *mlsl_handle,
+ struct mldl_cfg *mldl_cfg,
+ struct ext_slave_config *data)
+{
+ struct mpu_gyro_cfg *mpu_gyro_cfg = mldl_cfg->mpu_gyro_cfg;
+ struct mpu_chip_info *mpu_chip_info = mldl_cfg->mpu_chip_info;
+ struct mpu_offsets *mpu_offsets = mldl_cfg->mpu_offsets;
+ int ii;
+
+ if (!data->data)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_INT_CONFIG:
+ *((__u8 *)data->data) = mpu_gyro_cfg->int_config;
+ break;
+ case MPU_SLAVE_EXT_SYNC:
+ *((__u8 *)data->data) = mpu_gyro_cfg->ext_sync;
+ break;
+ case MPU_SLAVE_FULL_SCALE:
+ *((__u8 *)data->data) = mpu_gyro_cfg->full_scale;
+ break;
+ case MPU_SLAVE_LPF:
+ *((__u8 *)data->data) = mpu_gyro_cfg->lpf;
+ break;
+ case MPU_SLAVE_CLK_SRC:
+ *((__u8 *)data->data) = mpu_gyro_cfg->clk_src;
+ break;
+ case MPU_SLAVE_DIVIDER:
+ *((__u8 *)data->data) = mpu_gyro_cfg->divider;
+ break;
+ case MPU_SLAVE_DMP_ENABLE:
+ *((__u8 *)data->data) = mpu_gyro_cfg->dmp_enable;
+ break;
+ case MPU_SLAVE_FIFO_ENABLE:
+ *((__u8 *)data->data) = mpu_gyro_cfg->fifo_enable;
+ break;
+ case MPU_SLAVE_DMP_CFG1:
+ *((__u8 *)data->data) = mpu_gyro_cfg->dmp_cfg1;
+ break;
+ case MPU_SLAVE_DMP_CFG2:
+ *((__u8 *)data->data) = mpu_gyro_cfg->dmp_cfg2;
+ break;
+ case MPU_SLAVE_TC:
+ for (ii = 0; ii < GYRO_NUM_AXES; ii++)
+ ((__u8 *)data->data)[ii] = mpu_offsets->tc[ii];
+ break;
+ case MPU_SLAVE_GYRO:
+ for (ii = 0; ii < GYRO_NUM_AXES; ii++)
+ ((__u16 *)data->data)[ii] = mpu_offsets->gyro[ii];
+ break;
+ case MPU_SLAVE_ADDR:
+ *((__u8 *)data->data) = mpu_chip_info->addr;
+ break;
+ case MPU_SLAVE_PRODUCT_REVISION:
+ *((__u8 *)data->data) = mpu_chip_info->product_revision;
+ break;
+ case MPU_SLAVE_SILICON_REVISION:
+ *((__u8 *)data->data) = mpu_chip_info->silicon_revision;
+ break;
+ case MPU_SLAVE_PRODUCT_ID:
+ *((__u8 *)data->data) = mpu_chip_info->product_id;
+ break;
+ case MPU_SLAVE_GYRO_SENS_TRIM:
+ *((__u16 *)data->data) = mpu_chip_info->gyro_sens_trim;
+ break;
+ case MPU_SLAVE_ACCEL_SENS_TRIM:
+ *((__u16 *)data->data) = mpu_chip_info->accel_sens_trim;
+ break;
+ case MPU_SLAVE_RAM:
+ if (data->len != mldl_cfg->mpu_ram->length)
+ return INV_ERROR_INVALID_PARAMETER;
+
+ memcpy(data->data, mldl_cfg->mpu_ram->ram, data->len);
+ break;
+ default:
+ LOG_RESULT_LOCATION(INV_ERROR_FEATURE_NOT_IMPLEMENTED);
+ return INV_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return INV_SUCCESS;
+}
+
+
+/*******************************************************************************
+ *******************************************************************************
+ * Exported functions
+ *******************************************************************************
+ ******************************************************************************/
+
+/**
+ * Initializes the pdata structure to defaults.
+ *
+ * Opens the device to read silicon revision, product id and whoami.
+ *
+ * @mldl_cfg
+ * The internal device configuration data structure.
+ * @mlsl_handle
+ * The serial communication handle.
+ *
+ * @return INV_SUCCESS if silicon revision, product id and woami are supported
+ * by this software.
+ */
+int inv_mpu_open(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle, void *pressure_handle)
+{
+ int result;
+ void *slave_handle[EXT_SLAVE_NUM_TYPES];
+ int ii;
+
+ /* Default is Logic HIGH, pushpull, latch disabled, anyread to clear */
+ ii = 0;
+ mldl_cfg->inv_mpu_cfg->ignore_system_suspend = false;
+ mldl_cfg->mpu_gyro_cfg->int_config = BIT_DMP_INT_EN;
+ mldl_cfg->mpu_gyro_cfg->clk_src = MPU_CLK_SEL_PLLGYROZ;
+ mldl_cfg->mpu_gyro_cfg->lpf = MPU_FILTER_42HZ;
+ mldl_cfg->mpu_gyro_cfg->full_scale = MPU_FS_2000DPS;
+ mldl_cfg->mpu_gyro_cfg->divider = 4;
+ mldl_cfg->mpu_gyro_cfg->dmp_enable = 1;
+ mldl_cfg->mpu_gyro_cfg->fifo_enable = 1;
+ mldl_cfg->mpu_gyro_cfg->ext_sync = 0;
+ mldl_cfg->mpu_gyro_cfg->dmp_cfg1 = 0;
+ mldl_cfg->mpu_gyro_cfg->dmp_cfg2 = 0;
+ mldl_cfg->inv_mpu_state->status =
+ MPU_DMP_IS_SUSPENDED |
+ MPU_GYRO_IS_SUSPENDED |
+ MPU_ACCEL_IS_SUSPENDED |
+ MPU_COMPASS_IS_SUSPENDED |
+ MPU_PRESSURE_IS_SUSPENDED |
+ MPU_DEVICE_IS_SUSPENDED;
+ mldl_cfg->inv_mpu_state->i2c_slaves_enabled = 0;
+
+ slave_handle[EXT_SLAVE_TYPE_GYROSCOPE] = gyro_handle;
+ slave_handle[EXT_SLAVE_TYPE_ACCEL] = accel_handle;
+ slave_handle[EXT_SLAVE_TYPE_COMPASS] = compass_handle;
+ slave_handle[EXT_SLAVE_TYPE_PRESSURE] = pressure_handle;
+
+ if (mldl_cfg->mpu_chip_info->addr == 0) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Reset,
+ * Take the DMP out of sleep, and
+ * read the product_id, sillicon rev and whoami
+ */
+ mldl_cfg->inv_mpu_state->status |= MPU_GYRO_IS_BYPASSED;
+ result = mpu3050_pwr_mgmt(mldl_cfg, gyro_handle, RESET, 0, 0, 0, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ result = inv_get_silicon_rev(mldl_cfg, gyro_handle);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Get the factory temperature compensation offsets */
+ result = inv_serial_read(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_XG_OFFS_TC, 1,
+ &mldl_cfg->mpu_offsets->tc[0]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_YG_OFFS_TC, 1,
+ &mldl_cfg->mpu_offsets->tc[1]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_serial_read(gyro_handle, mldl_cfg->mpu_chip_info->addr,
+ MPUREG_ZG_OFFS_TC, 1,
+ &mldl_cfg->mpu_offsets->tc[2]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Into bypass mode before sleeping and calling the slaves init */
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, true);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = inv_mpu_set_level_shifter_bit(mldl_cfg, gyro_handle,
+ mldl_cfg->pdata->level_shifter);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+
+#if INV_CACHE_DMP != 0
+ result = mpu3050_pwr_mgmt(mldl_cfg, gyro_handle, 0, SLEEP, 0, 0, 0);
+#endif
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+
+ return result;
+
+}
+
+/**
+ * Close the mpu interface
+ *
+ * @mldl_cfg pointer to the configuration structure
+ * @mlsl_handle pointer to the serial layer handle
+ *
+ * @return INV_SUCCESS or non-zero error code
+ */
+int inv_mpu_close(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle)
+{
+ return 0;
+}
+
+/**
+ * @brief resume the MPU device and all the other sensor
+ * devices from their low power state.
+ *
+ * @mldl_cfg
+ * pointer to the configuration structure
+ * @gyro_handle
+ * the main file handle to the MPU device.
+ * @accel_handle
+ * an handle to the accelerometer device, if sitting
+ * onto a separate bus. Can match mlsl_handle if
+ * the accelerometer device operates on the same
+ * primary bus of MPU.
+ * @compass_handle
+ * an handle to the compass device, if sitting
+ * onto a separate bus. Can match mlsl_handle if
+ * the compass device operates on the same
+ * primary bus of MPU.
+ * @pressure_handle
+ * an handle to the pressure sensor device, if sitting
+ * onto a separate bus. Can match mlsl_handle if
+ * the pressure sensor device operates on the same
+ * primary bus of MPU.
+ * @resume_gyro
+ * whether resuming the gyroscope device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @resume_accel
+ * whether resuming the accelerometer device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @resume_compass
+ * whether resuming the compass device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @resume_pressure
+ * whether resuming the pressure sensor device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @return INV_SUCCESS or a non-zero error code.
+ */
+int inv_mpu_resume(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle,
+ unsigned long sensors)
+{
+ int result = INV_SUCCESS;
+ int ii;
+ bool resume_slave[EXT_SLAVE_NUM_TYPES];
+ bool resume_dmp = sensors & INV_DMP_PROCESSOR;
+ void *slave_handle[EXT_SLAVE_NUM_TYPES];
+ resume_slave[EXT_SLAVE_TYPE_GYROSCOPE] =
+ (sensors & (INV_X_GYRO | INV_Y_GYRO | INV_Z_GYRO));
+ resume_slave[EXT_SLAVE_TYPE_ACCEL] =
+ sensors & INV_THREE_AXIS_ACCEL;
+ resume_slave[EXT_SLAVE_TYPE_COMPASS] =
+ sensors & INV_THREE_AXIS_COMPASS;
+ resume_slave[EXT_SLAVE_TYPE_PRESSURE] =
+ sensors & INV_THREE_AXIS_PRESSURE;
+
+ slave_handle[EXT_SLAVE_TYPE_GYROSCOPE] = gyro_handle;
+ slave_handle[EXT_SLAVE_TYPE_ACCEL] = accel_handle;
+ slave_handle[EXT_SLAVE_TYPE_COMPASS] = compass_handle;
+ slave_handle[EXT_SLAVE_TYPE_PRESSURE] = pressure_handle;
+
+
+ mldl_print_cfg(mldl_cfg);
+
+ /* Skip the Gyro since slave[EXT_SLAVE_TYPE_GYROSCOPE] is NULL */
+ for (ii = EXT_SLAVE_TYPE_ACCEL; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (resume_slave[ii] &&
+ ((!mldl_cfg->slave[ii]) ||
+ (!mldl_cfg->slave[ii]->resume))) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ }
+
+ if ((resume_slave[EXT_SLAVE_TYPE_GYROSCOPE] || resume_dmp)
+ && ((mldl_cfg->inv_mpu_state->status & MPU_GYRO_IS_SUSPENDED) ||
+ (mldl_cfg->inv_mpu_state->status & MPU_GYRO_NEEDS_CONFIG))) {
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = dmp_stop(mldl_cfg, gyro_handle);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = gyro_resume(mldl_cfg, gyro_handle, sensors);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!mldl_cfg->slave[ii] ||
+ !mldl_cfg->pdata_slave[ii] ||
+ !resume_slave[ii] ||
+ !(mldl_cfg->inv_mpu_state->status & (1 << ii)))
+ continue;
+
+ if (EXT_SLAVE_BUS_SECONDARY ==
+ mldl_cfg->pdata_slave[ii]->bus) {
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle,
+ true);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ result = mldl_cfg->slave[ii]->resume(slave_handle[ii],
+ mldl_cfg->slave[ii],
+ mldl_cfg->pdata_slave[ii]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ mldl_cfg->inv_mpu_state->status &= ~(1 << ii);
+ }
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (resume_dmp &&
+ !(mldl_cfg->inv_mpu_state->status & (1 << ii)) &&
+ mldl_cfg->pdata_slave[ii] &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata_slave[ii]->bus) {
+ result = mpu_set_slave(mldl_cfg,
+ gyro_handle,
+ mldl_cfg->slave[ii],
+ mldl_cfg->pdata_slave[ii],
+ mldl_cfg->slave[ii]->type);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ }
+
+ /* Turn on the master i2c iterface if necessary */
+ if (resume_dmp) {
+ result = mpu_set_i2c_bypass(
+ mldl_cfg, gyro_handle,
+ !(mldl_cfg->inv_mpu_state->i2c_slaves_enabled));
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* Now start */
+ result = dmp_start(mldl_cfg, gyro_handle);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ mldl_cfg->inv_mpu_cfg->requested_sensors = sensors;
+
+ return result;
+}
+
+/**
+ * @brief suspend the MPU device and all the other sensor
+ * devices into their low power state.
+ * @mldl_cfg
+ * a pointer to the struct mldl_cfg internal data
+ * structure.
+ * @gyro_handle
+ * the main file handle to the MPU device.
+ * @accel_handle
+ * an handle to the accelerometer device, if sitting
+ * onto a separate bus. Can match gyro_handle if
+ * the accelerometer device operates on the same
+ * primary bus of MPU.
+ * @compass_handle
+ * an handle to the compass device, if sitting
+ * onto a separate bus. Can match gyro_handle if
+ * the compass device operates on the same
+ * primary bus of MPU.
+ * @pressure_handle
+ * an handle to the pressure sensor device, if sitting
+ * onto a separate bus. Can match gyro_handle if
+ * the pressure sensor device operates on the same
+ * primary bus of MPU.
+ * @accel
+ * whether suspending the accelerometer device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @compass
+ * whether suspending the compass device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @pressure
+ * whether suspending the pressure sensor device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @return INV_SUCCESS or a non-zero error code.
+ */
+int inv_mpu_suspend(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle,
+ unsigned long sensors)
+{
+ int result = INV_SUCCESS;
+ int ii;
+ struct ext_slave_descr **slave = mldl_cfg->slave;
+ struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
+ bool suspend_dmp = ((sensors & INV_DMP_PROCESSOR) == INV_DMP_PROCESSOR);
+ bool suspend_slave[EXT_SLAVE_NUM_TYPES];
+ void *slave_handle[EXT_SLAVE_NUM_TYPES];
+
+ suspend_slave[EXT_SLAVE_TYPE_GYROSCOPE] =
+ ((sensors & (INV_X_GYRO | INV_Y_GYRO | INV_Z_GYRO))
+ == (INV_X_GYRO | INV_Y_GYRO | INV_Z_GYRO));
+ suspend_slave[EXT_SLAVE_TYPE_ACCEL] =
+ ((sensors & INV_THREE_AXIS_ACCEL) == INV_THREE_AXIS_ACCEL);
+ suspend_slave[EXT_SLAVE_TYPE_COMPASS] =
+ ((sensors & INV_THREE_AXIS_COMPASS) == INV_THREE_AXIS_COMPASS);
+ suspend_slave[EXT_SLAVE_TYPE_PRESSURE] =
+ ((sensors & INV_THREE_AXIS_PRESSURE) ==
+ INV_THREE_AXIS_PRESSURE);
+
+ slave_handle[EXT_SLAVE_TYPE_GYROSCOPE] = gyro_handle;
+ slave_handle[EXT_SLAVE_TYPE_ACCEL] = accel_handle;
+ slave_handle[EXT_SLAVE_TYPE_COMPASS] = compass_handle;
+ slave_handle[EXT_SLAVE_TYPE_PRESSURE] = pressure_handle;
+
+ if (suspend_dmp) {
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ result = dmp_stop(mldl_cfg, gyro_handle);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ /* Gyro */
+ if (suspend_slave[EXT_SLAVE_TYPE_GYROSCOPE] &&
+ !(mldl_cfg->inv_mpu_state->status & MPU_GYRO_IS_SUSPENDED)) {
+ result = mpu3050_pwr_mgmt(
+ mldl_cfg, gyro_handle, 0,
+ suspend_dmp && suspend_slave[EXT_SLAVE_TYPE_GYROSCOPE],
+ (unsigned char)(sensors & INV_X_GYRO),
+ (unsigned char)(sensors & INV_Y_GYRO),
+ (unsigned char)(sensors & INV_Z_GYRO));
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ bool is_suspended = mldl_cfg->inv_mpu_state->status & (1 << ii);
+ if (!slave[ii] || !pdata_slave[ii] ||
+ is_suspended || !suspend_slave[ii])
+ continue;
+
+ if (EXT_SLAVE_BUS_SECONDARY == pdata_slave[ii]->bus) {
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ result = slave[ii]->suspend(slave_handle[ii],
+ slave[ii],
+ pdata_slave[ii]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ if (EXT_SLAVE_BUS_SECONDARY == pdata_slave[ii]->bus) {
+ result = mpu_set_slave(mldl_cfg, gyro_handle,
+ NULL, NULL,
+ slave[ii]->type);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ mldl_cfg->inv_mpu_state->status |= (1 << ii);
+ }
+
+ /* Re-enable the i2c master if there are configured slaves and DMP */
+ if (!suspend_dmp) {
+ result = mpu_set_i2c_bypass(
+ mldl_cfg, gyro_handle,
+ !(mldl_cfg->inv_mpu_state->i2c_slaves_enabled));
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ mldl_cfg->inv_mpu_cfg->requested_sensors = (~sensors) & INV_ALL_SENSORS;
+
+ return result;
+}
+
+int inv_mpu_slave_read(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *slave_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result;
+ int bypass_result;
+ int remain_bypassed = true;
+
+ if (NULL == slave || NULL == slave->read) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_CONFIGURATION);
+ return INV_ERROR_INVALID_CONFIGURATION;
+ }
+
+ if ((EXT_SLAVE_BUS_SECONDARY == pdata->bus)
+ && (!(mldl_cfg->inv_mpu_state->status & MPU_GYRO_IS_BYPASSED))) {
+ remain_bypassed = false;
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ result = slave->read(slave_handle, slave, pdata, data);
+
+ if (!remain_bypassed) {
+ bypass_result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 0);
+ if (bypass_result) {
+ LOG_RESULT_LOCATION(bypass_result);
+ return bypass_result;
+ }
+ }
+ return result;
+}
+
+int inv_mpu_slave_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *slave_handle,
+ struct ext_slave_config *data,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ int remain_bypassed = true;
+
+ if (NULL == slave || NULL == slave->config) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_CONFIGURATION);
+ return INV_ERROR_INVALID_CONFIGURATION;
+ }
+
+ if (data->apply && (EXT_SLAVE_BUS_SECONDARY == pdata->bus)
+ && (!(mldl_cfg->inv_mpu_state->status & MPU_GYRO_IS_BYPASSED))) {
+ remain_bypassed = false;
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ result = slave->config(slave_handle, slave, pdata, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (!remain_bypassed) {
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ return result;
+}
+
+int inv_mpu_get_slave_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *slave_handle,
+ struct ext_slave_config *data,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ int remain_bypassed = true;
+
+ if (NULL == slave || NULL == slave->get_config) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_CONFIGURATION);
+ return INV_ERROR_INVALID_CONFIGURATION;
+ }
+
+ if (data->apply && (EXT_SLAVE_BUS_SECONDARY == pdata->bus)
+ && (!(mldl_cfg->inv_mpu_state->status & MPU_GYRO_IS_BYPASSED))) {
+ remain_bypassed = false;
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 1);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+
+ result = slave->get_config(slave_handle, slave, pdata, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ if (!remain_bypassed) {
+ result = mpu_set_i2c_bypass(mldl_cfg, gyro_handle, 0);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ }
+ return result;
+}
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/mldl_cfg.h b/drivers/misc/inv_mpu/mldl_cfg.h
new file mode 100644
index 000000000000..1c23878940b7
--- /dev/null
+++ b/drivers/misc/inv_mpu/mldl_cfg.h
@@ -0,0 +1,380 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup MLDL
+ *
+ * @{
+ * @file mldl_cfg.h
+ * @brief The Motion Library Driver Layer Configuration header file.
+ */
+
+#ifndef __MLDL_CFG_H__
+#define __MLDL_CFG_H__
+
+#include "mltypes.h"
+#include "mlsl.h"
+#include <linux/mpu.h>
+#include "mpu3050.h"
+
+#include "log.h"
+
+/*************************************************************************
+ * Sensors Bit definitions
+ *************************************************************************/
+
+#define INV_X_GYRO (0x0001)
+#define INV_Y_GYRO (0x0002)
+#define INV_Z_GYRO (0x0004)
+#define INV_DMP_PROCESSOR (0x0008)
+
+#define INV_X_ACCEL (0x0010)
+#define INV_Y_ACCEL (0x0020)
+#define INV_Z_ACCEL (0x0040)
+
+#define INV_X_COMPASS (0x0080)
+#define INV_Y_COMPASS (0x0100)
+#define INV_Z_COMPASS (0x0200)
+
+#define INV_X_PRESSURE (0x0300)
+#define INV_Y_PRESSURE (0x0800)
+#define INV_Z_PRESSURE (0x1000)
+
+#define INV_TEMPERATURE (0x2000)
+#define INV_TIME (0x4000)
+
+#define INV_THREE_AXIS_GYRO (0x000F)
+#define INV_THREE_AXIS_ACCEL (0x0070)
+#define INV_THREE_AXIS_COMPASS (0x0380)
+#define INV_THREE_AXIS_PRESSURE (0x1C00)
+
+#define INV_FIVE_AXIS (0x007B)
+#define INV_SIX_AXIS_GYRO_ACCEL (0x007F)
+#define INV_SIX_AXIS_ACCEL_COMPASS (0x03F0)
+#define INV_NINE_AXIS (0x03FF)
+#define INV_ALL_SENSORS (0x7FFF)
+
+#define MPL_PROD_KEY(ver, rev) (ver * 100 + rev)
+
+/* -------------------------------------------------------------------------- */
+struct mpu_ram {
+ __u16 length;
+ __u8 *ram;
+};
+
+struct mpu_gyro_cfg {
+ __u8 int_config;
+ __u8 ext_sync;
+ __u8 full_scale;
+ __u8 lpf;
+ __u8 clk_src;
+ __u8 divider;
+ __u8 dmp_enable;
+ __u8 fifo_enable;
+ __u8 dmp_cfg1;
+ __u8 dmp_cfg2;
+};
+
+/* Offset registers that can be calibrated */
+struct mpu_offsets {
+ __u8 tc[GYRO_NUM_AXES];
+ __u16 gyro[GYRO_NUM_AXES];
+};
+
+/* Chip related information that can be read and verified */
+struct mpu_chip_info {
+ __u8 addr;
+ __u8 product_revision;
+ __u8 silicon_revision;
+ __u8 product_id;
+ __u16 gyro_sens_trim;
+ /* Only used for MPU6050 */
+ __u16 accel_sens_trim;
+};
+
+
+struct inv_mpu_cfg {
+ __u32 requested_sensors;
+ __u8 ignore_system_suspend;
+};
+
+/* Driver related state information */
+struct inv_mpu_state {
+#define MPU_GYRO_IS_SUSPENDED (0x01 << EXT_SLAVE_TYPE_GYROSCOPE)
+#define MPU_ACCEL_IS_SUSPENDED (0x01 << EXT_SLAVE_TYPE_ACCEL)
+#define MPU_COMPASS_IS_SUSPENDED (0x01 << EXT_SLAVE_TYPE_COMPASS)
+#define MPU_PRESSURE_IS_SUSPENDED (0x01 << EXT_SLAVE_TYPE_PRESSURE)
+#define MPU_GYRO_IS_BYPASSED (0x10)
+#define MPU_DMP_IS_SUSPENDED (0x20)
+#define MPU_GYRO_NEEDS_CONFIG (0x40)
+#define MPU_DEVICE_IS_SUSPENDED (0x80)
+ __u8 status;
+ /* 0-1 for 3050, bitfield of BIT_SLVx_DLY_EN, x = [0..4] */
+ __u8 i2c_slaves_enabled;
+};
+
+/* Platform data for the MPU */
+struct mldl_cfg {
+ struct mpu_ram *mpu_ram;
+ struct mpu_gyro_cfg *mpu_gyro_cfg;
+ struct mpu_offsets *mpu_offsets;
+ struct mpu_chip_info *mpu_chip_info;
+
+ /* MPU Related stored status and info */
+ struct inv_mpu_cfg *inv_mpu_cfg;
+ struct inv_mpu_state *inv_mpu_state;
+
+ /* Slave related information */
+ struct ext_slave_descr *slave[EXT_SLAVE_NUM_TYPES];
+ /* Platform Data */
+ struct mpu_platform_data *pdata;
+ struct ext_slave_platform_data *pdata_slave[EXT_SLAVE_NUM_TYPES];
+};
+
+/* -------------------------------------------------------------------------- */
+
+int inv_mpu_open(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle);
+int inv_mpu_close(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle);
+int inv_mpu_resume(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle,
+ unsigned long sensors);
+int inv_mpu_suspend(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle,
+ unsigned long sensors);
+int inv_mpu_set_firmware(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ const unsigned char *data,
+ int size);
+
+/* -------------------------------------------------------------------------- */
+/* Slave Read functions */
+int inv_mpu_slave_read(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *slave_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data);
+static inline int inv_mpu_read_accel(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle, unsigned char *data)
+{
+ if (!mldl_cfg) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_slave_read(
+ mldl_cfg, gyro_handle, accel_handle,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_ACCEL],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_ACCEL],
+ data);
+}
+
+static inline int inv_mpu_read_compass(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *compass_handle,
+ unsigned char *data)
+{
+ if (!mldl_cfg) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_slave_read(
+ mldl_cfg, gyro_handle, compass_handle,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_COMPASS],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_COMPASS],
+ data);
+}
+
+static inline int inv_mpu_read_pressure(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *pressure_handle,
+ unsigned char *data)
+{
+ if (!mldl_cfg) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_slave_read(
+ mldl_cfg, gyro_handle, pressure_handle,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_PRESSURE],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
+ data);
+}
+
+int gyro_config(void *mlsl_handle,
+ struct mldl_cfg *mldl_cfg,
+ struct ext_slave_config *data);
+
+/* Slave Config functions */
+int inv_mpu_slave_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *slave_handle,
+ struct ext_slave_config *data,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata);
+static inline int inv_mpu_config_accel(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ struct ext_slave_config *data)
+{
+ if (!mldl_cfg) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_slave_config(
+ mldl_cfg, gyro_handle, accel_handle, data,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_ACCEL],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_ACCEL]);
+}
+
+static inline int inv_mpu_config_compass(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *compass_handle,
+ struct ext_slave_config *data)
+{
+ if (!mldl_cfg) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_slave_config(
+ mldl_cfg, gyro_handle, compass_handle, data,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_COMPASS],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_COMPASS]);
+}
+
+static inline int inv_mpu_config_pressure(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *pressure_handle,
+ struct ext_slave_config *data)
+{
+ if (!mldl_cfg) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_slave_config(
+ mldl_cfg, gyro_handle, pressure_handle, data,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_PRESSURE],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_PRESSURE]);
+}
+
+int gyro_get_config(void *mlsl_handle,
+ struct mldl_cfg *mldl_cfg,
+ struct ext_slave_config *data);
+
+/* Slave get config functions */
+int inv_mpu_get_slave_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *slave_handle,
+ struct ext_slave_config *data,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata);
+
+static inline int inv_mpu_get_accel_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ struct ext_slave_config *data)
+{
+ if (!mldl_cfg) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_get_slave_config(
+ mldl_cfg, gyro_handle, accel_handle, data,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_ACCEL],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_ACCEL]);
+}
+
+static inline int inv_mpu_get_compass_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *compass_handle,
+ struct ext_slave_config *data)
+{
+ if (!mldl_cfg || !(mldl_cfg->pdata)) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_get_slave_config(
+ mldl_cfg, gyro_handle, compass_handle, data,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_COMPASS],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_COMPASS]);
+}
+
+static inline int inv_mpu_get_pressure_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *pressure_handle,
+ struct ext_slave_config *data)
+{
+ if (!mldl_cfg || !(mldl_cfg->pdata)) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ return inv_mpu_get_slave_config(
+ mldl_cfg, gyro_handle, pressure_handle, data,
+ mldl_cfg->slave[EXT_SLAVE_TYPE_PRESSURE],
+ mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_PRESSURE]);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline
+long inv_mpu_get_sampling_rate_hz(struct mpu_gyro_cfg *gyro_cfg)
+{
+ if (((gyro_cfg->lpf) == 0) || ((gyro_cfg->lpf) == 7))
+ return 8000L / (gyro_cfg->divider + 1);
+ else
+ return 1000L / (gyro_cfg->divider + 1);
+}
+
+static inline
+long inv_mpu_get_sampling_period_us(struct mpu_gyro_cfg *gyro_cfg)
+{
+ if (((gyro_cfg->lpf) == 0) || ((gyro_cfg->lpf) == 7))
+ return (long) (1000000L * (gyro_cfg->divider + 1)) / 8000L;
+ else
+ return (long) (1000000L * (gyro_cfg->divider + 1)) / 1000L;
+}
+
+#endif /* __MLDL_CFG_H__ */
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/mldl_print_cfg.c b/drivers/misc/inv_mpu/mldl_print_cfg.c
new file mode 100644
index 000000000000..e2b8d30cebaa
--- /dev/null
+++ b/drivers/misc/inv_mpu/mldl_print_cfg.c
@@ -0,0 +1,137 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup MLDL
+ *
+ * @{
+ * @file mldl_print_cfg.c
+ * @brief The Motion Library Driver Layer.
+ */
+
+#include <stddef.h>
+#include "mldl_cfg.h"
+#include "mlsl.h"
+#include "linux/mpu.h"
+
+#include "log.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "mldl_print_cfg:"
+
+#undef MPL_LOG_NDEBUG
+#define MPL_LOG_NDEBUG 1
+
+void mldl_print_cfg(struct mldl_cfg *mldl_cfg)
+{
+ struct mpu_gyro_cfg *mpu_gyro_cfg = mldl_cfg->mpu_gyro_cfg;
+ struct mpu_offsets *mpu_offsets = mldl_cfg->mpu_offsets;
+ struct mpu_chip_info *mpu_chip_info = mldl_cfg->mpu_chip_info;
+ struct inv_mpu_cfg *inv_mpu_cfg = mldl_cfg->inv_mpu_cfg;
+ struct inv_mpu_state *inv_mpu_state = mldl_cfg->inv_mpu_state;
+ struct ext_slave_descr **slave = mldl_cfg->slave;
+ struct mpu_platform_data *pdata = mldl_cfg->pdata;
+ struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
+ int ii;
+
+ /* mpu_gyro_cfg */
+ MPL_LOGV("int_config = %02x\n", mpu_gyro_cfg->int_config);
+ MPL_LOGV("ext_sync = %02x\n", mpu_gyro_cfg->ext_sync);
+ MPL_LOGV("full_scale = %02x\n", mpu_gyro_cfg->full_scale);
+ MPL_LOGV("lpf = %02x\n", mpu_gyro_cfg->lpf);
+ MPL_LOGV("clk_src = %02x\n", mpu_gyro_cfg->clk_src);
+ MPL_LOGV("divider = %02x\n", mpu_gyro_cfg->divider);
+ MPL_LOGV("dmp_enable = %02x\n", mpu_gyro_cfg->dmp_enable);
+ MPL_LOGV("fifo_enable = %02x\n", mpu_gyro_cfg->fifo_enable);
+ MPL_LOGV("dmp_cfg1 = %02x\n", mpu_gyro_cfg->dmp_cfg1);
+ MPL_LOGV("dmp_cfg2 = %02x\n", mpu_gyro_cfg->dmp_cfg2);
+ /* mpu_offsets */
+ MPL_LOGV("tc[0] = %02x\n", mpu_offsets->tc[0]);
+ MPL_LOGV("tc[1] = %02x\n", mpu_offsets->tc[1]);
+ MPL_LOGV("tc[2] = %02x\n", mpu_offsets->tc[2]);
+ MPL_LOGV("gyro[0] = %04x\n", mpu_offsets->gyro[0]);
+ MPL_LOGV("gyro[1] = %04x\n", mpu_offsets->gyro[1]);
+ MPL_LOGV("gyro[2] = %04x\n", mpu_offsets->gyro[2]);
+
+ /* mpu_chip_info */
+ MPL_LOGV("addr = %02x\n", mldl_cfg->mpu_chip_info->addr);
+
+ MPL_LOGV("silicon_revision = %02x\n", mpu_chip_info->silicon_revision);
+ MPL_LOGV("product_revision = %02x\n", mpu_chip_info->product_revision);
+ MPL_LOGV("product_id = %02x\n", mpu_chip_info->product_id);
+ MPL_LOGV("gyro_sens_trim = %02x\n", mpu_chip_info->gyro_sens_trim);
+
+ MPL_LOGV("requested_sensors = %04x\n", inv_mpu_cfg->requested_sensors);
+ MPL_LOGV("ignore_system_suspend= %04x\n",
+ inv_mpu_cfg->ignore_system_suspend);
+ MPL_LOGV("status = %04x\n", inv_mpu_state->status);
+ MPL_LOGV("i2c_slaves_enabled= %04x\n",
+ inv_mpu_state->i2c_slaves_enabled);
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!slave[ii])
+ continue;
+ MPL_LOGV("SLAVE %d:\n", ii);
+ MPL_LOGV(" suspend = %02x\n", (int)slave[ii]->suspend);
+ MPL_LOGV(" resume = %02x\n", (int)slave[ii]->resume);
+ MPL_LOGV(" read = %02x\n", (int)slave[ii]->read);
+ MPL_LOGV(" type = %02x\n", slave[ii]->type);
+ MPL_LOGV(" reg = %02x\n", slave[ii]->read_reg);
+ MPL_LOGV(" len = %02x\n", slave[ii]->read_len);
+ MPL_LOGV(" endian = %02x\n", slave[ii]->endian);
+ MPL_LOGV(" range.mantissa= %02x\n",
+ slave[ii]->range.mantissa);
+ MPL_LOGV(" range.fraction= %02x\n",
+ slave[ii]->range.fraction);
+ }
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!pdata_slave[ii])
+ continue;
+ MPL_LOGV("PDATA_SLAVE[%d]\n", ii);
+ MPL_LOGV(" irq = %02x\n", pdata_slave[ii]->irq);
+ MPL_LOGV(" adapt_num = %02x\n", pdata_slave[ii]->adapt_num);
+ MPL_LOGV(" bus = %02x\n", pdata_slave[ii]->bus);
+ MPL_LOGV(" address = %02x\n", pdata_slave[ii]->address);
+ MPL_LOGV(" orientation=\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n",
+ pdata_slave[ii]->orientation[0],
+ pdata_slave[ii]->orientation[1],
+ pdata_slave[ii]->orientation[2],
+ pdata_slave[ii]->orientation[3],
+ pdata_slave[ii]->orientation[4],
+ pdata_slave[ii]->orientation[5],
+ pdata_slave[ii]->orientation[6],
+ pdata_slave[ii]->orientation[7],
+ pdata_slave[ii]->orientation[8]);
+ }
+
+ MPL_LOGV("pdata->int_config = %02x\n", pdata->int_config);
+ MPL_LOGV("pdata->level_shifter = %02x\n", pdata->level_shifter);
+ MPL_LOGV("pdata->orientation =\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n",
+ pdata->orientation[0], pdata->orientation[1],
+ pdata->orientation[2], pdata->orientation[3],
+ pdata->orientation[4], pdata->orientation[5],
+ pdata->orientation[6], pdata->orientation[7],
+ pdata->orientation[8]);
+}
diff --git a/drivers/misc/inv_mpu/mldl_print_cfg.h b/drivers/misc/inv_mpu/mldl_print_cfg.h
new file mode 100644
index 000000000000..2e1911440cca
--- /dev/null
+++ b/drivers/misc/inv_mpu/mldl_print_cfg.h
@@ -0,0 +1,38 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @defgroup
+ * @brief
+ *
+ * @{
+ * @file mldl_print_cfg.h
+ * @brief
+ *
+ *
+ */
+#ifndef __MLDL_PRINT_CFG__
+#define __MLDL_PRINT_CFG__
+
+#include "mldl_cfg.h"
+
+
+void mldl_print_cfg(struct mldl_cfg *mldl_cfg);
+
+#endif /* __MLDL_PRINT_CFG__ */
diff --git a/drivers/misc/inv_mpu/mlsl-kernel.c b/drivers/misc/inv_mpu/mlsl-kernel.c
new file mode 100644
index 000000000000..19adf5182c00
--- /dev/null
+++ b/drivers/misc/inv_mpu/mlsl-kernel.c
@@ -0,0 +1,420 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#include "mlsl.h"
+#include <linux/i2c.h>
+#include "log.h"
+#include "mpu3050.h"
+
+static int inv_i2c_write(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned int len, unsigned char const *data)
+{
+ struct i2c_msg msgs[1];
+ int res;
+
+ if (!data || !i2c_adap) {
+ LOG_RESULT_LOCATION(-EINVAL);
+ return -EINVAL;
+ }
+
+ msgs[0].addr = address;
+ msgs[0].flags = 0; /* write */
+ msgs[0].buf = (unsigned char *)data;
+ msgs[0].len = len;
+
+ res = i2c_transfer(i2c_adap, msgs, 1);
+ if (res < 1) {
+ if (res == 0)
+ res = -EIO;
+ LOG_RESULT_LOCATION(res);
+ return res;
+ } else
+ return 0;
+}
+
+static int inv_i2c_write_register(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned char reg, unsigned char value)
+{
+ unsigned char data[2];
+
+ data[0] = reg;
+ data[1] = value;
+ return inv_i2c_write(i2c_adap, address, 2, data);
+}
+
+static int inv_i2c_read(struct i2c_adapter *i2c_adap,
+ unsigned char address, unsigned char reg,
+ unsigned int len, unsigned char *data)
+{
+ struct i2c_msg msgs[2];
+ int res;
+
+ if (!data || !i2c_adap) {
+ LOG_RESULT_LOCATION(-EINVAL);
+ return -EINVAL;
+ }
+
+ msgs[0].addr = address;
+ msgs[0].flags = 0; /* write */
+ msgs[0].buf = &reg;
+ msgs[0].len = 1;
+
+ msgs[1].addr = address;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = data;
+ msgs[1].len = len;
+
+ res = i2c_transfer(i2c_adap, msgs, 2);
+ if (res < 2) {
+ if (res >= 0)
+ res = -EIO;
+ LOG_RESULT_LOCATION(res);
+ return res;
+ } else
+ return 0;
+}
+
+static int mpu_memory_read(struct i2c_adapter *i2c_adap,
+ unsigned char mpu_addr,
+ unsigned short mem_addr,
+ unsigned int len, unsigned char *data)
+{
+ unsigned char bank[2];
+ unsigned char addr[2];
+ unsigned char buf;
+
+ struct i2c_msg msgs[4];
+ int res;
+
+ if (!data || !i2c_adap) {
+ LOG_RESULT_LOCATION(-EINVAL);
+ return -EINVAL;
+ }
+
+ bank[0] = MPUREG_BANK_SEL;
+ bank[1] = mem_addr >> 8;
+
+ addr[0] = MPUREG_MEM_START_ADDR;
+ addr[1] = mem_addr & 0xFF;
+
+ buf = MPUREG_MEM_R_W;
+
+ /* write message */
+ msgs[0].addr = mpu_addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = bank;
+ msgs[0].len = sizeof(bank);
+
+ msgs[1].addr = mpu_addr;
+ msgs[1].flags = 0;
+ msgs[1].buf = addr;
+ msgs[1].len = sizeof(addr);
+
+ msgs[2].addr = mpu_addr;
+ msgs[2].flags = 0;
+ msgs[2].buf = &buf;
+ msgs[2].len = 1;
+
+ msgs[3].addr = mpu_addr;
+ msgs[3].flags = I2C_M_RD;
+ msgs[3].buf = data;
+ msgs[3].len = len;
+
+ res = i2c_transfer(i2c_adap, msgs, 4);
+ if (res != 4) {
+ if (res >= 0)
+ res = -EIO;
+ LOG_RESULT_LOCATION(res);
+ return res;
+ } else
+ return 0;
+}
+
+static int mpu_memory_write(struct i2c_adapter *i2c_adap,
+ unsigned char mpu_addr,
+ unsigned short mem_addr,
+ unsigned int len, unsigned char const *data)
+{
+ unsigned char bank[2];
+ unsigned char addr[2];
+ unsigned char buf[513];
+
+ struct i2c_msg msgs[3];
+ int res;
+
+ if (!data || !i2c_adap) {
+ LOG_RESULT_LOCATION(-EINVAL);
+ return -EINVAL;
+ }
+ if (len >= (sizeof(buf) - 1)) {
+ LOG_RESULT_LOCATION(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ bank[0] = MPUREG_BANK_SEL;
+ bank[1] = mem_addr >> 8;
+
+ addr[0] = MPUREG_MEM_START_ADDR;
+ addr[1] = mem_addr & 0xFF;
+
+ buf[0] = MPUREG_MEM_R_W;
+ memcpy(buf + 1, data, len);
+
+ /* write message */
+ msgs[0].addr = mpu_addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = bank;
+ msgs[0].len = sizeof(bank);
+
+ msgs[1].addr = mpu_addr;
+ msgs[1].flags = 0;
+ msgs[1].buf = addr;
+ msgs[1].len = sizeof(addr);
+
+ msgs[2].addr = mpu_addr;
+ msgs[2].flags = 0;
+ msgs[2].buf = (unsigned char *)buf;
+ msgs[2].len = len + 1;
+
+ res = i2c_transfer(i2c_adap, msgs, 3);
+ if (res != 3) {
+ if (res >= 0)
+ res = -EIO;
+ LOG_RESULT_LOCATION(res);
+ return res;
+ } else
+ return 0;
+}
+
+int inv_serial_single_write(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned char register_addr,
+ unsigned char data)
+{
+ return inv_i2c_write_register((struct i2c_adapter *)sl_handle,
+ slave_addr, register_addr, data);
+}
+EXPORT_SYMBOL(inv_serial_single_write);
+
+int inv_serial_write(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short length,
+ unsigned char const *data)
+{
+ int result;
+ const unsigned short data_length = length - 1;
+ const unsigned char start_reg_addr = data[0];
+ unsigned char i2c_write[SERIAL_MAX_TRANSFER_SIZE + 1];
+ unsigned short bytes_written = 0;
+
+ while (bytes_written < data_length) {
+ unsigned short this_len = min(SERIAL_MAX_TRANSFER_SIZE,
+ data_length - bytes_written);
+ if (bytes_written == 0) {
+ result = inv_i2c_write((struct i2c_adapter *)
+ sl_handle, slave_addr,
+ 1 + this_len, data);
+ } else {
+ /* manually increment register addr between chunks */
+ i2c_write[0] = start_reg_addr + bytes_written;
+ memcpy(&i2c_write[1], &data[1 + bytes_written],
+ this_len);
+ result = inv_i2c_write((struct i2c_adapter *)
+ sl_handle, slave_addr,
+ 1 + this_len, i2c_write);
+ }
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ bytes_written += this_len;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(inv_serial_write);
+
+int inv_serial_read(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned char register_addr,
+ unsigned short length,
+ unsigned char *data)
+{
+ int result;
+ unsigned short bytes_read = 0;
+
+ if ((slave_addr & 0x7E) == DEFAULT_MPU_SLAVEADDR
+ && (register_addr == MPUREG_FIFO_R_W ||
+ register_addr == MPUREG_MEM_R_W)) {
+ LOG_RESULT_LOCATION(INV_ERROR_INVALID_PARAMETER);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+
+ while (bytes_read < length) {
+ unsigned short this_len =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytes_read);
+ result = inv_i2c_read((struct i2c_adapter *)sl_handle,
+ slave_addr, register_addr + bytes_read,
+ this_len, &data[bytes_read]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ bytes_read += this_len;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(inv_serial_read);
+
+int inv_serial_write_mem(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short mem_addr,
+ unsigned short length,
+ unsigned char const *data)
+{
+ int result;
+ unsigned short bytes_written = 0;
+
+ if ((mem_addr & 0xFF) + length > MPU_MEM_BANK_SIZE) {
+ pr_err("memory read length (%d B) extends beyond its"
+ " limits (%d) if started at location %d\n", length,
+ MPU_MEM_BANK_SIZE, mem_addr & 0xFF);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ while (bytes_written < length) {
+ unsigned short this_len =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytes_written);
+ result = mpu_memory_write((struct i2c_adapter *)sl_handle,
+ slave_addr, mem_addr + bytes_written,
+ this_len, &data[bytes_written]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ bytes_written += this_len;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(inv_serial_write_mem);
+
+int inv_serial_read_mem(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short mem_addr,
+ unsigned short length,
+ unsigned char *data)
+{
+ int result;
+ unsigned short bytes_read = 0;
+
+ if ((mem_addr & 0xFF) + length > MPU_MEM_BANK_SIZE) {
+ printk
+ ("memory read length (%d B) extends beyond its limits (%d) "
+ "if started at location %d\n", length,
+ MPU_MEM_BANK_SIZE, mem_addr & 0xFF);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ while (bytes_read < length) {
+ unsigned short this_len =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytes_read);
+ result =
+ mpu_memory_read((struct i2c_adapter *)sl_handle,
+ slave_addr, mem_addr + bytes_read,
+ this_len, &data[bytes_read]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ bytes_read += this_len;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(inv_serial_read_mem);
+
+int inv_serial_write_fifo(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short length,
+ unsigned char const *data)
+{
+ int result;
+ unsigned char i2c_write[SERIAL_MAX_TRANSFER_SIZE + 1];
+ unsigned short bytes_written = 0;
+
+ if (length > FIFO_HW_SIZE) {
+ printk(KERN_ERR
+ "maximum fifo write length is %d\n", FIFO_HW_SIZE);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ while (bytes_written < length) {
+ unsigned short this_len =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytes_written);
+ i2c_write[0] = MPUREG_FIFO_R_W;
+ memcpy(&i2c_write[1], &data[bytes_written], this_len);
+ result = inv_i2c_write((struct i2c_adapter *)sl_handle,
+ slave_addr, this_len + 1, i2c_write);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ bytes_written += this_len;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(inv_serial_write_fifo);
+
+int inv_serial_read_fifo(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short length,
+ unsigned char *data)
+{
+ int result;
+ unsigned short bytes_read = 0;
+
+ if (length > FIFO_HW_SIZE) {
+ printk(KERN_ERR
+ "maximum fifo read length is %d\n", FIFO_HW_SIZE);
+ return INV_ERROR_INVALID_PARAMETER;
+ }
+ while (bytes_read < length) {
+ unsigned short this_len =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytes_read);
+ result = inv_i2c_read((struct i2c_adapter *)sl_handle,
+ slave_addr, MPUREG_FIFO_R_W, this_len,
+ &data[bytes_read]);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ bytes_read += this_len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(inv_serial_read_fifo);
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/inv_mpu/mlsl.h b/drivers/misc/inv_mpu/mlsl.h
new file mode 100644
index 000000000000..204baedc1e20
--- /dev/null
+++ b/drivers/misc/inv_mpu/mlsl.h
@@ -0,0 +1,186 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __MLSL_H__
+#define __MLSL_H__
+
+/**
+ * @defgroup MLSL
+ * @brief Motion Library - Serial Layer.
+ * The Motion Library System Layer provides the Motion Library
+ * with the communication interface to the hardware.
+ *
+ * The communication interface is assumed to support serial
+ * transfers in burst of variable length up to
+ * SERIAL_MAX_TRANSFER_SIZE.
+ * The default value for SERIAL_MAX_TRANSFER_SIZE is 128 bytes.
+ * Transfers of length greater than SERIAL_MAX_TRANSFER_SIZE, will
+ * be subdivided in smaller transfers of length <=
+ * SERIAL_MAX_TRANSFER_SIZE.
+ * The SERIAL_MAX_TRANSFER_SIZE definition can be modified to
+ * overcome any host processor transfer size limitation down to
+ * 1 B, the minimum.
+ * An higher value for SERIAL_MAX_TRANSFER_SIZE will favor
+ * performance and efficiency while requiring higher resource usage
+ * (mostly buffering). A smaller value will increase overhead and
+ * decrease efficiency but allows to operate with more resource
+ * constrained processor and master serial controllers.
+ * The SERIAL_MAX_TRANSFER_SIZE definition can be found in the
+ * mlsl.h header file and master serial controllers.
+ * The SERIAL_MAX_TRANSFER_SIZE definition can be found in the
+ * mlsl.h header file.
+ *
+ * @{
+ * @file mlsl.h
+ * @brief The Motion Library System Layer.
+ *
+ */
+
+#include "mltypes.h"
+#include <linux/mpu.h>
+
+
+/*
+ * NOTE : to properly support Yamaha compass reads,
+ * the max transfer size should be at least 9 B.
+ * Length in bytes, typically a power of 2 >= 2
+ */
+#define SERIAL_MAX_TRANSFER_SIZE 128
+
+
+/**
+ * inv_serial_single_write() - used to write a single byte of data.
+ * @sl_handle pointer to the serial device used for the communication.
+ * @slave_addr I2C slave address of device.
+ * @register_addr Register address to write.
+ * @data Single byte of data to write.
+ *
+ * It is called by the MPL to write a single byte of data to the MPU.
+ *
+ * returns INV_SUCCESS if successful, a non-zero error code otherwise.
+ */
+int inv_serial_single_write(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned char register_addr,
+ unsigned char data);
+
+/**
+ * inv_serial_write() - used to write multiple bytes of data to registers.
+ * @sl_handle a file handle to the serial device used for the communication.
+ * @slave_addr I2C slave address of device.
+ * @register_addr Register address to write.
+ * @length Length of burst of data.
+ * @data Pointer to block of data.
+ *
+ * returns INV_SUCCESS if successful, a non-zero error code otherwise.
+ */
+int inv_serial_write(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short length,
+ unsigned char const *data);
+
+/**
+ * inv_serial_read() - used to read multiple bytes of data from registers.
+ * @sl_handle a file handle to the serial device used for the communication.
+ * @slave_addr I2C slave address of device.
+ * @register_addr Register address to read.
+ * @length Length of burst of data.
+ * @data Pointer to block of data.
+ *
+ * returns INV_SUCCESS == 0 if successful; a non-zero error code otherwise.
+ */
+int inv_serial_read(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned char register_addr,
+ unsigned short length,
+ unsigned char *data);
+
+/**
+ * inv_serial_read_mem() - used to read multiple bytes of data from the memory.
+ * This should be sent by I2C or SPI.
+ *
+ * @sl_handle a file handle to the serial device used for the communication.
+ * @slave_addr I2C slave address of device.
+ * @mem_addr The location in the memory to read from.
+ * @length Length of burst data.
+ * @data Pointer to block of data.
+ *
+ * returns INV_SUCCESS == 0 if successful; a non-zero error code otherwise.
+ */
+int inv_serial_read_mem(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short mem_addr,
+ unsigned short length,
+ unsigned char *data);
+
+/**
+ * inv_serial_write_mem() - used to write multiple bytes of data to the memory.
+ * @sl_handle a file handle to the serial device used for the communication.
+ * @slave_addr I2C slave address of device.
+ * @mem_addr The location in the memory to write to.
+ * @length Length of burst data.
+ * @data Pointer to block of data.
+ *
+ * returns INV_SUCCESS == 0 if successful; a non-zero error code otherwise.
+ */
+int inv_serial_write_mem(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short mem_addr,
+ unsigned short length,
+ unsigned char const *data);
+
+/**
+ * inv_serial_read_fifo() - used to read multiple bytes of data from the fifo.
+ * @sl_handle a file handle to the serial device used for the communication.
+ * @slave_addr I2C slave address of device.
+ * @length Length of burst of data.
+ * @data Pointer to block of data.
+ *
+ * returns INV_SUCCESS == 0 if successful; a non-zero error code otherwise.
+ */
+int inv_serial_read_fifo(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short length,
+ unsigned char *data);
+
+/**
+ * inv_serial_write_fifo() - used to write multiple bytes of data to the fifo.
+ * @sl_handle a file handle to the serial device used for the communication.
+ * @slave_addr I2C slave address of device.
+ * @length Length of burst of data.
+ * @data Pointer to block of data.
+ *
+ * returns INV_SUCCESS == 0 if successful; a non-zero error code otherwise.
+ */
+int inv_serial_write_fifo(
+ void *sl_handle,
+ unsigned char slave_addr,
+ unsigned short length,
+ unsigned char const *data);
+
+/**
+ * @}
+ */
+#endif /* __MLSL_H__ */
diff --git a/drivers/misc/inv_mpu/mltypes.h b/drivers/misc/inv_mpu/mltypes.h
new file mode 100644
index 000000000000..a249f93be3ee
--- /dev/null
+++ b/drivers/misc/inv_mpu/mltypes.h
@@ -0,0 +1,234 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @defgroup MLERROR
+ * @brief Definition of the error codes used within the MPL and
+ * returned to the user.
+ * Every function tries to return a meaningful error code basing
+ * on the occuring error condition. The error code is numeric.
+ *
+ * The available error codes and their associated values are:
+ * - (0) INV_SUCCESS
+ * - (32) INV_ERROR
+ * - (22 / EINVAL) INV_ERROR_INVALID_PARAMETER
+ * - (1 / EPERM) INV_ERROR_FEATURE_NOT_ENABLED
+ * - (36) INV_ERROR_FEATURE_NOT_IMPLEMENTED
+ * - (38) INV_ERROR_DMP_NOT_STARTED
+ * - (39) INV_ERROR_DMP_STARTED
+ * - (40) INV_ERROR_NOT_OPENED
+ * - (41) INV_ERROR_OPENED
+ * - (19 / ENODEV) INV_ERROR_INVALID_MODULE
+ * - (12 / ENOMEM) INV_ERROR_MEMORY_EXAUSTED
+ * - (44) INV_ERROR_DIVIDE_BY_ZERO
+ * - (45) INV_ERROR_ASSERTION_FAILURE
+ * - (46) INV_ERROR_FILE_OPEN
+ * - (47) INV_ERROR_FILE_READ
+ * - (48) INV_ERROR_FILE_WRITE
+ * - (49) INV_ERROR_INVALID_CONFIGURATION
+ * - (52) INV_ERROR_SERIAL_CLOSED
+ * - (53) INV_ERROR_SERIAL_OPEN_ERROR
+ * - (54) INV_ERROR_SERIAL_READ
+ * - (55) INV_ERROR_SERIAL_WRITE
+ * - (56) INV_ERROR_SERIAL_DEVICE_NOT_RECOGNIZED
+ * - (57) INV_ERROR_SM_TRANSITION
+ * - (58) INV_ERROR_SM_IMPROPER_STATE
+ * - (62) INV_ERROR_FIFO_OVERFLOW
+ * - (63) INV_ERROR_FIFO_FOOTER
+ * - (64) INV_ERROR_FIFO_READ_COUNT
+ * - (65) INV_ERROR_FIFO_READ_DATA
+ * - (72) INV_ERROR_MEMORY_SET
+ * - (82) INV_ERROR_LOG_MEMORY_ERROR
+ * - (83) INV_ERROR_LOG_OUTPUT_ERROR
+ * - (92) INV_ERROR_OS_BAD_PTR
+ * - (93) INV_ERROR_OS_BAD_HANDLE
+ * - (94) INV_ERROR_OS_CREATE_FAILED
+ * - (95) INV_ERROR_OS_LOCK_FAILED
+ * - (102) INV_ERROR_COMPASS_DATA_OVERFLOW
+ * - (103) INV_ERROR_COMPASS_DATA_UNDERFLOW
+ * - (104) INV_ERROR_COMPASS_DATA_NOT_READY
+ * - (105) INV_ERROR_COMPASS_DATA_ERROR
+ * - (107) INV_ERROR_CALIBRATION_LOAD
+ * - (108) INV_ERROR_CALIBRATION_STORE
+ * - (109) INV_ERROR_CALIBRATION_LEN
+ * - (110) INV_ERROR_CALIBRATION_CHECKSUM
+ * - (111) INV_ERROR_ACCEL_DATA_OVERFLOW
+ * - (112) INV_ERROR_ACCEL_DATA_UNDERFLOW
+ * - (113) INV_ERROR_ACCEL_DATA_NOT_READY
+ * - (114) INV_ERROR_ACCEL_DATA_ERROR
+ *
+ * The available warning codes and their associated values are:
+ * - (115) INV_WARNING_MOTION_RACE
+ * - (116) INV_WARNING_QUAT_TRASHED
+ *
+ * @{
+ * @file mltypes.h
+ * @}
+ */
+
+#ifndef MLTYPES_H
+#define MLTYPES_H
+
+#include <linux/types.h>
+#include <asm-generic/errno-base.h>
+
+
+
+
+/*---------------------------
+ * ML Defines
+ *--------------------------*/
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* - ML Errors. - */
+#define ERROR_NAME(x) (#x)
+#define ERROR_CHECK_FIRST(first, x) \
+ { if (INV_SUCCESS == first) first = x; }
+
+#define INV_SUCCESS (0)
+/* Generic Error code. Proprietary Error Codes only */
+#define INV_ERROR_BASE (0x20)
+#define INV_ERROR (INV_ERROR_BASE)
+
+/* Compatibility and other generic error codes */
+#define INV_ERROR_INVALID_PARAMETER (EINVAL)
+#define INV_ERROR_FEATURE_NOT_ENABLED (EPERM)
+#define INV_ERROR_FEATURE_NOT_IMPLEMENTED (INV_ERROR_BASE + 4)
+#define INV_ERROR_DMP_NOT_STARTED (INV_ERROR_BASE + 6)
+#define INV_ERROR_DMP_STARTED (INV_ERROR_BASE + 7)
+#define INV_ERROR_NOT_OPENED (INV_ERROR_BASE + 8)
+#define INV_ERROR_OPENED (INV_ERROR_BASE + 9)
+#define INV_ERROR_INVALID_MODULE (ENODEV)
+#define INV_ERROR_MEMORY_EXAUSTED (ENOMEM)
+#define INV_ERROR_DIVIDE_BY_ZERO (INV_ERROR_BASE + 12)
+#define INV_ERROR_ASSERTION_FAILURE (INV_ERROR_BASE + 13)
+#define INV_ERROR_FILE_OPEN (INV_ERROR_BASE + 14)
+#define INV_ERROR_FILE_READ (INV_ERROR_BASE + 15)
+#define INV_ERROR_FILE_WRITE (INV_ERROR_BASE + 16)
+#define INV_ERROR_INVALID_CONFIGURATION (INV_ERROR_BASE + 17)
+
+/* Serial Communication */
+#define INV_ERROR_SERIAL_CLOSED (INV_ERROR_BASE + 20)
+#define INV_ERROR_SERIAL_OPEN_ERROR (INV_ERROR_BASE + 21)
+#define INV_ERROR_SERIAL_READ (INV_ERROR_BASE + 22)
+#define INV_ERROR_SERIAL_WRITE (INV_ERROR_BASE + 23)
+#define INV_ERROR_SERIAL_DEVICE_NOT_RECOGNIZED (INV_ERROR_BASE + 24)
+
+/* SM = State Machine */
+#define INV_ERROR_SM_TRANSITION (INV_ERROR_BASE + 25)
+#define INV_ERROR_SM_IMPROPER_STATE (INV_ERROR_BASE + 26)
+
+/* Fifo */
+#define INV_ERROR_FIFO_OVERFLOW (INV_ERROR_BASE + 30)
+#define INV_ERROR_FIFO_FOOTER (INV_ERROR_BASE + 31)
+#define INV_ERROR_FIFO_READ_COUNT (INV_ERROR_BASE + 32)
+#define INV_ERROR_FIFO_READ_DATA (INV_ERROR_BASE + 33)
+
+/* Memory & Registers, Set & Get */
+#define INV_ERROR_MEMORY_SET (INV_ERROR_BASE + 40)
+
+#define INV_ERROR_LOG_MEMORY_ERROR (INV_ERROR_BASE + 50)
+#define INV_ERROR_LOG_OUTPUT_ERROR (INV_ERROR_BASE + 51)
+
+/* OS interface errors */
+#define INV_ERROR_OS_BAD_PTR (INV_ERROR_BASE + 60)
+#define INV_ERROR_OS_BAD_HANDLE (INV_ERROR_BASE + 61)
+#define INV_ERROR_OS_CREATE_FAILED (INV_ERROR_BASE + 62)
+#define INV_ERROR_OS_LOCK_FAILED (INV_ERROR_BASE + 63)
+
+/* Compass errors */
+#define INV_ERROR_COMPASS_DATA_OVERFLOW (INV_ERROR_BASE + 70)
+#define INV_ERROR_COMPASS_DATA_UNDERFLOW (INV_ERROR_BASE + 71)
+#define INV_ERROR_COMPASS_DATA_NOT_READY (INV_ERROR_BASE + 72)
+#define INV_ERROR_COMPASS_DATA_ERROR (INV_ERROR_BASE + 73)
+
+/* Load/Store calibration */
+#define INV_ERROR_CALIBRATION_LOAD (INV_ERROR_BASE + 75)
+#define INV_ERROR_CALIBRATION_STORE (INV_ERROR_BASE + 76)
+#define INV_ERROR_CALIBRATION_LEN (INV_ERROR_BASE + 77)
+#define INV_ERROR_CALIBRATION_CHECKSUM (INV_ERROR_BASE + 78)
+
+/* Accel errors */
+#define INV_ERROR_ACCEL_DATA_OVERFLOW (INV_ERROR_BASE + 79)
+#define INV_ERROR_ACCEL_DATA_UNDERFLOW (INV_ERROR_BASE + 80)
+#define INV_ERROR_ACCEL_DATA_NOT_READY (INV_ERROR_BASE + 81)
+#define INV_ERROR_ACCEL_DATA_ERROR (INV_ERROR_BASE + 82)
+
+/* No Motion Warning States */
+#define INV_WARNING_MOTION_RACE (INV_ERROR_BASE + 83)
+#define INV_WARNING_QUAT_TRASHED (INV_ERROR_BASE + 84)
+#define INV_WARNING_GYRO_MAG (INV_ERROR_BASE + 85)
+
+#ifdef INV_USE_LEGACY_NAMES
+#define ML_SUCCESS INV_SUCCESS
+#define ML_ERROR INV_ERROR
+#define ML_ERROR_INVALID_PARAMETER INV_ERROR_INVALID_PARAMETER
+#define ML_ERROR_FEATURE_NOT_ENABLED INV_ERROR_FEATURE_NOT_ENABLED
+#define ML_ERROR_FEATURE_NOT_IMPLEMENTED INV_ERROR_FEATURE_NOT_IMPLEMENTED
+#define ML_ERROR_DMP_NOT_STARTED INV_ERROR_DMP_NOT_STARTED
+#define ML_ERROR_DMP_STARTED INV_ERROR_DMP_STARTED
+#define ML_ERROR_NOT_OPENED INV_ERROR_NOT_OPENED
+#define ML_ERROR_OPENED INV_ERROR_OPENED
+#define ML_ERROR_INVALID_MODULE INV_ERROR_INVALID_MODULE
+#define ML_ERROR_MEMORY_EXAUSTED INV_ERROR_MEMORY_EXAUSTED
+#define ML_ERROR_DIVIDE_BY_ZERO INV_ERROR_DIVIDE_BY_ZERO
+#define ML_ERROR_ASSERTION_FAILURE INV_ERROR_ASSERTION_FAILURE
+#define ML_ERROR_FILE_OPEN INV_ERROR_FILE_OPEN
+#define ML_ERROR_FILE_READ INV_ERROR_FILE_READ
+#define ML_ERROR_FILE_WRITE INV_ERROR_FILE_WRITE
+#define ML_ERROR_INVALID_CONFIGURATION INV_ERROR_INVALID_CONFIGURATION
+#define ML_ERROR_SERIAL_CLOSED INV_ERROR_SERIAL_CLOSED
+#define ML_ERROR_SERIAL_OPEN_ERROR INV_ERROR_SERIAL_OPEN_ERROR
+#define ML_ERROR_SERIAL_READ INV_ERROR_SERIAL_READ
+#define ML_ERROR_SERIAL_WRITE INV_ERROR_SERIAL_WRITE
+#define ML_ERROR_SERIAL_DEVICE_NOT_RECOGNIZED \
+ INV_ERROR_SERIAL_DEVICE_NOT_RECOGNIZED
+#define ML_ERROR_SM_TRANSITION INV_ERROR_SM_TRANSITION
+#define ML_ERROR_SM_IMPROPER_STATE INV_ERROR_SM_IMPROPER_STATE
+#define ML_ERROR_FIFO_OVERFLOW INV_ERROR_FIFO_OVERFLOW
+#define ML_ERROR_FIFO_FOOTER INV_ERROR_FIFO_FOOTER
+#define ML_ERROR_FIFO_READ_COUNT INV_ERROR_FIFO_READ_COUNT
+#define ML_ERROR_FIFO_READ_DATA INV_ERROR_FIFO_READ_DATA
+#define ML_ERROR_MEMORY_SET INV_ERROR_MEMORY_SET
+#define ML_ERROR_LOG_MEMORY_ERROR INV_ERROR_LOG_MEMORY_ERROR
+#define ML_ERROR_LOG_OUTPUT_ERROR INV_ERROR_LOG_OUTPUT_ERROR
+#define ML_ERROR_OS_BAD_PTR INV_ERROR_OS_BAD_PTR
+#define ML_ERROR_OS_BAD_HANDLE INV_ERROR_OS_BAD_HANDLE
+#define ML_ERROR_OS_CREATE_FAILED INV_ERROR_OS_CREATE_FAILED
+#define ML_ERROR_OS_LOCK_FAILED INV_ERROR_OS_LOCK_FAILED
+#define ML_ERROR_COMPASS_DATA_OVERFLOW INV_ERROR_COMPASS_DATA_OVERFLOW
+#define ML_ERROR_COMPASS_DATA_UNDERFLOW INV_ERROR_COMPASS_DATA_UNDERFLOW
+#define ML_ERROR_COMPASS_DATA_NOT_READY INV_ERROR_COMPASS_DATA_NOT_READY
+#define ML_ERROR_COMPASS_DATA_ERROR INV_ERROR_COMPASS_DATA_ERROR
+#define ML_ERROR_CALIBRATION_LOAD INV_ERROR_CALIBRATION_LOAD
+#define ML_ERROR_CALIBRATION_STORE INV_ERROR_CALIBRATION_STORE
+#define ML_ERROR_CALIBRATION_LEN INV_ERROR_CALIBRATION_LEN
+#define ML_ERROR_CALIBRATION_CHECKSUM INV_ERROR_CALIBRATION_CHECKSUM
+#define ML_ERROR_ACCEL_DATA_OVERFLOW INV_ERROR_ACCEL_DATA_OVERFLOW
+#define ML_ERROR_ACCEL_DATA_UNDERFLOW INV_ERROR_ACCEL_DATA_UNDERFLOW
+#define ML_ERROR_ACCEL_DATA_NOT_READY INV_ERROR_ACCEL_DATA_NOT_READY
+#define ML_ERROR_ACCEL_DATA_ERROR INV_ERROR_ACCEL_DATA_ERROR
+#endif
+
+/* For Linux coding compliance */
+
+#endif /* MLTYPES_H */
diff --git a/drivers/misc/inv_mpu/mpu-dev.c b/drivers/misc/inv_mpu/mpu-dev.c
new file mode 100644
index 000000000000..ddaf0864a440
--- /dev/null
+++ b/drivers/misc/inv_mpu/mpu-dev.c
@@ -0,0 +1,1244 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/poll.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "mpuirq.h"
+#include "slaveirq.h"
+#include "mlsl.h"
+#include "mldl_cfg.h"
+#include <linux/mpu.h>
+
+
+/* Platform data for the MPU */
+struct mpu_private_data {
+ struct miscdevice dev;
+ struct i2c_client *client;
+
+ /* mldl_cfg data */
+ struct mldl_cfg mldl_cfg;
+ struct mpu_ram mpu_ram;
+ struct mpu_gyro_cfg mpu_gyro_cfg;
+ struct mpu_offsets mpu_offsets;
+ struct mpu_chip_info mpu_chip_info;
+ struct inv_mpu_cfg inv_mpu_cfg;
+ struct inv_mpu_state inv_mpu_state;
+
+ struct mutex mutex;
+ wait_queue_head_t mpu_event_wait;
+ struct completion completion;
+ struct timer_list timeout;
+ struct notifier_block nb;
+ struct mpuirq_data mpu_pm_event;
+ int response_timeout; /* In seconds */
+ unsigned long event;
+ int pid;
+ struct module *slave_modules[EXT_SLAVE_NUM_TYPES];
+};
+
+struct mpu_private_data *mpu_private_data;
+
+static void mpu_pm_timeout(u_long data)
+{
+ struct mpu_private_data *mpu = (struct mpu_private_data *)data;
+ struct i2c_client *client = mpu->client;
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+ complete(&mpu->completion);
+}
+
+static int mpu_pm_notifier_callback(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct mpu_private_data *mpu =
+ container_of(nb, struct mpu_private_data, nb);
+ struct i2c_client *client = mpu->client;
+ struct timeval event_time;
+ dev_dbg(&client->adapter->dev, "%s: %ld\n", __func__, event);
+
+ /* Prevent the file handle from being closed before we initialize
+ the completion event */
+ mutex_lock(&mpu->mutex);
+ if (!(mpu->pid) ||
+ (event != PM_SUSPEND_PREPARE && event != PM_POST_SUSPEND)) {
+ mutex_unlock(&mpu->mutex);
+ return NOTIFY_OK;
+ }
+
+ if (event == PM_SUSPEND_PREPARE)
+ mpu->event = MPU_PM_EVENT_SUSPEND_PREPARE;
+ if (event == PM_POST_SUSPEND)
+ mpu->event = MPU_PM_EVENT_POST_SUSPEND;
+
+ do_gettimeofday(&event_time);
+ mpu->mpu_pm_event.interruptcount++;
+ mpu->mpu_pm_event.irqtime =
+ (((long long)event_time.tv_sec) << 32) + event_time.tv_usec;
+ mpu->mpu_pm_event.data_type = MPUIRQ_DATA_TYPE_PM_EVENT;
+ mpu->mpu_pm_event.data = mpu->event;
+
+ if (mpu->response_timeout > 0) {
+ mpu->timeout.expires = jiffies + mpu->response_timeout * HZ;
+ add_timer(&mpu->timeout);
+ }
+ INIT_COMPLETION(mpu->completion);
+ mutex_unlock(&mpu->mutex);
+
+ wake_up_interruptible(&mpu->mpu_event_wait);
+ wait_for_completion(&mpu->completion);
+ del_timer_sync(&mpu->timeout);
+ dev_dbg(&client->adapter->dev, "%s: %ld DONE\n", __func__, event);
+ return NOTIFY_OK;
+}
+
+static int mpu_dev_open(struct inode *inode, struct file *file)
+{
+ struct mpu_private_data *mpu =
+ container_of(file->private_data, struct mpu_private_data, dev);
+ struct i2c_client *client = mpu->client;
+ int result;
+ int ii;
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+ dev_dbg(&client->adapter->dev, "current->pid %d\n", current->pid);
+
+ result = mutex_lock_interruptible(&mpu->mutex);
+ if (mpu->pid) {
+ mutex_unlock(&mpu->mutex);
+ return -EBUSY;
+ }
+ mpu->pid = current->pid;
+
+ /* Reset the sensors to the default */
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "%s: mutex_lock_interruptible returned %d\n",
+ __func__, result);
+ return result;
+ }
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++)
+ __module_get(mpu->slave_modules[ii]);
+
+ mutex_unlock(&mpu->mutex);
+ return 0;
+}
+
+/* close function - called when the "file" /dev/mpu is closed in userspace */
+static int mpu_release(struct inode *inode, struct file *file)
+{
+ struct mpu_private_data *mpu =
+ container_of(file->private_data, struct mpu_private_data, dev);
+ struct i2c_client *client = mpu->client;
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ int result = 0;
+ int ii;
+ struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES];
+ struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!pdata_slave[ii])
+ slave_adapter[ii] = NULL;
+ else
+ slave_adapter[ii] =
+ i2c_get_adapter(pdata_slave[ii]->adapt_num);
+ }
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter;
+
+ mutex_lock(&mpu->mutex);
+ mldl_cfg->inv_mpu_cfg->requested_sensors = 0;
+ result = inv_mpu_suspend(mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ INV_ALL_SENSORS);
+ mpu->pid = 0;
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++)
+ module_put(mpu->slave_modules[ii]);
+
+ mutex_unlock(&mpu->mutex);
+ complete(&mpu->completion);
+ dev_dbg(&client->adapter->dev, "mpu_release\n");
+
+ return result;
+}
+
+/* read function called when from /dev/mpu is read. Read from the FIFO */
+static ssize_t mpu_read(struct file *file,
+ char __user *buf, size_t count, loff_t *offset)
+{
+ struct mpu_private_data *mpu =
+ container_of(file->private_data, struct mpu_private_data, dev);
+ struct i2c_client *client = mpu->client;
+ size_t len = sizeof(mpu->mpu_pm_event) + sizeof(unsigned long);
+ int err;
+
+ if (!mpu->event && (!(file->f_flags & O_NONBLOCK)))
+ wait_event_interruptible(mpu->mpu_event_wait, mpu->event);
+
+ if (!mpu->event || !buf
+ || count < sizeof(mpu->mpu_pm_event))
+ return 0;
+
+ err = copy_to_user(buf, &mpu->mpu_pm_event, sizeof(mpu->mpu_pm_event));
+ if (err) {
+ dev_err(&client->adapter->dev,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ mpu->event = 0;
+ return len;
+}
+
+static unsigned int mpu_poll(struct file *file, struct poll_table_struct *poll)
+{
+ struct mpu_private_data *mpu =
+ container_of(file->private_data, struct mpu_private_data, dev);
+ int mask = 0;
+
+ poll_wait(file, &mpu->mpu_event_wait, poll);
+ if (mpu->event)
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+static int mpu_dev_ioctl_get_ext_slave_platform_data(
+ struct i2c_client *client,
+ struct ext_slave_platform_data __user *arg)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *)i2c_get_clientdata(client);
+ struct ext_slave_platform_data *pdata_slave;
+ struct ext_slave_platform_data local_pdata_slave;
+
+ if (copy_from_user(&local_pdata_slave, arg, sizeof(local_pdata_slave)))
+ return -EFAULT;
+
+ if (local_pdata_slave.type >= EXT_SLAVE_NUM_TYPES)
+ return -EINVAL;
+
+ pdata_slave = mpu->mldl_cfg.pdata_slave[local_pdata_slave.type];
+ /* All but private data and irq_data */
+ if (!pdata_slave)
+ return -ENODEV;
+ if (copy_to_user(arg, pdata_slave, sizeof(*pdata_slave)))
+ return -EFAULT;
+ return 0;
+}
+
+static int mpu_dev_ioctl_get_mpu_platform_data(
+ struct i2c_client *client,
+ struct mpu_platform_data __user *arg)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *)i2c_get_clientdata(client);
+ struct mpu_platform_data *pdata = mpu->mldl_cfg.pdata;
+
+ if (copy_to_user(arg, pdata, sizeof(*pdata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int mpu_dev_ioctl_get_ext_slave_descr(
+ struct i2c_client *client,
+ struct ext_slave_descr __user *arg)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *)i2c_get_clientdata(client);
+ struct ext_slave_descr *slave;
+ struct ext_slave_descr local_slave;
+
+ if (copy_from_user(&local_slave, arg, sizeof(local_slave)))
+ return -EFAULT;
+
+ if (local_slave.type >= EXT_SLAVE_NUM_TYPES)
+ return -EINVAL;
+
+ slave = mpu->mldl_cfg.slave[local_slave.type];
+ /* All but private data and irq_data */
+ if (!slave)
+ return -ENODEV;
+ if (copy_to_user(arg, slave, sizeof(*slave)))
+ return -EFAULT;
+ return 0;
+}
+
+
+/**
+ * slave_config() - Pass a requested slave configuration to the slave sensor
+ *
+ * @adapter the adaptor to use to communicate with the slave
+ * @mldl_cfg the mldl configuration structuer
+ * @slave pointer to the slave descriptor
+ * @usr_config The configuration to pass to the slave sensor
+ *
+ * returns 0 or non-zero error code
+ */
+static int inv_mpu_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_adapter,
+ struct ext_slave_config __user *usr_config)
+{
+ int retval = 0;
+ struct ext_slave_config config;
+
+ retval = copy_from_user(&config, usr_config, sizeof(config));
+ if (retval)
+ return -EFAULT;
+
+ if (config.len && config.data) {
+ void *data;
+ data = kmalloc(config.len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ retval = copy_from_user(data,
+ (void __user *)config.data, config.len);
+ if (retval) {
+ retval = -EFAULT;
+ kfree(data);
+ return retval;
+ }
+ config.data = data;
+ }
+ retval = gyro_config(gyro_adapter, mldl_cfg, &config);
+ kfree(config.data);
+ return retval;
+}
+
+static int inv_mpu_get_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_adapter,
+ struct ext_slave_config __user *usr_config)
+{
+ int retval = 0;
+ struct ext_slave_config config;
+ void *user_data;
+
+ retval = copy_from_user(&config, usr_config, sizeof(config));
+ if (retval)
+ return -EFAULT;
+
+ user_data = config.data;
+ if (config.len && config.data) {
+ void *data;
+ data = kmalloc(config.len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ retval = copy_from_user(data,
+ (void __user *)config.data, config.len);
+ if (retval) {
+ retval = -EFAULT;
+ kfree(data);
+ return retval;
+ }
+ config.data = data;
+ }
+ retval = gyro_get_config(gyro_adapter, mldl_cfg, &config);
+ if (!retval)
+ retval = copy_to_user((unsigned char __user *)user_data,
+ config.data, config.len);
+ kfree(config.data);
+ return retval;
+}
+
+static int slave_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_adapter,
+ void *slave_adapter,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config __user *usr_config)
+{
+ int retval = 0;
+ struct ext_slave_config config;
+ if ((!slave) || (!slave->config))
+ return -ENODEV;
+
+ retval = copy_from_user(&config, usr_config, sizeof(config));
+ if (retval)
+ return -EFAULT;
+
+ if (config.len && config.data) {
+ void *data;
+ data = kmalloc(config.len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ retval = copy_from_user(data,
+ (void __user *)config.data, config.len);
+ if (retval) {
+ retval = -EFAULT;
+ kfree(data);
+ return retval;
+ }
+ config.data = data;
+ }
+ retval = inv_mpu_slave_config(mldl_cfg, gyro_adapter, slave_adapter,
+ &config, slave, pdata);
+ kfree(config.data);
+ return retval;
+}
+
+static int slave_get_config(struct mldl_cfg *mldl_cfg,
+ void *gyro_adapter,
+ void *slave_adapter,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config __user *usr_config)
+{
+ int retval = 0;
+ struct ext_slave_config config;
+ void *user_data;
+ if (!(slave) || !(slave->get_config))
+ return -ENODEV;
+
+ retval = copy_from_user(&config, usr_config, sizeof(config));
+ if (retval)
+ return -EFAULT;
+
+ user_data = config.data;
+ if (config.len && config.data) {
+ void *data;
+ data = kmalloc(config.len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ retval = copy_from_user(data,
+ (void __user *)config.data, config.len);
+ if (retval) {
+ retval = -EFAULT;
+ kfree(data);
+ return retval;
+ }
+ config.data = data;
+ }
+ retval = inv_mpu_get_slave_config(mldl_cfg, gyro_adapter,
+ slave_adapter, &config, slave, pdata);
+ if (retval) {
+ kfree(config.data);
+ return retval;
+ }
+ retval = copy_to_user((unsigned char __user *)user_data,
+ config.data, config.len);
+ kfree(config.data);
+ return retval;
+}
+
+static int inv_slave_read(struct mldl_cfg *mldl_cfg,
+ void *gyro_adapter,
+ void *slave_adapter,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ void __user *usr_data)
+{
+ int retval;
+ unsigned char *data;
+ data = kzalloc(slave->read_len, GFP_KERNEL);
+ if (!data)
+ return -EFAULT;
+
+ retval = inv_mpu_slave_read(mldl_cfg, gyro_adapter, slave_adapter,
+ slave, pdata, data);
+
+ if ((!retval) &&
+ (copy_to_user((unsigned char __user *)usr_data,
+ data, slave->read_len)))
+ retval = -EFAULT;
+
+ kfree(data);
+ return retval;
+}
+
+static int mpu_handle_mlsl(void *sl_handle,
+ unsigned char addr,
+ unsigned int cmd,
+ struct mpu_read_write __user *usr_msg)
+{
+ int retval = 0;
+ struct mpu_read_write msg;
+ unsigned char *user_data;
+ retval = copy_from_user(&msg, usr_msg, sizeof(msg));
+ if (retval)
+ return -EFAULT;
+
+ user_data = msg.data;
+ if (msg.length && msg.data) {
+ unsigned char *data;
+ data = kmalloc(msg.length, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ retval = copy_from_user(data,
+ (void __user *)msg.data, msg.length);
+ if (retval) {
+ retval = -EFAULT;
+ kfree(data);
+ return retval;
+ }
+ msg.data = data;
+ } else {
+ return -EPERM;
+ }
+
+ switch (cmd) {
+ case MPU_READ:
+ retval = inv_serial_read(sl_handle, addr,
+ msg.address, msg.length, msg.data);
+ break;
+ case MPU_WRITE:
+ retval = inv_serial_write(sl_handle, addr,
+ msg.length, msg.data);
+ break;
+ case MPU_READ_MEM:
+ retval = inv_serial_read_mem(sl_handle, addr,
+ msg.address, msg.length, msg.data);
+ break;
+ case MPU_WRITE_MEM:
+ retval = inv_serial_write_mem(sl_handle, addr,
+ msg.address, msg.length,
+ msg.data);
+ break;
+ case MPU_READ_FIFO:
+ retval = inv_serial_read_fifo(sl_handle, addr,
+ msg.length, msg.data);
+ break;
+ case MPU_WRITE_FIFO:
+ retval = inv_serial_write_fifo(sl_handle, addr,
+ msg.length, msg.data);
+ break;
+
+ };
+ if (retval) {
+ dev_err(&((struct i2c_adapter *)sl_handle)->dev,
+ "%s: i2c %d error %d\n",
+ __func__, cmd, retval);
+ kfree(msg.data);
+ return retval;
+ }
+ retval = copy_to_user((unsigned char __user *)user_data,
+ msg.data, msg.length);
+ kfree(msg.data);
+ return retval;
+}
+
+/* ioctl - I/O control */
+static long mpu_dev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mpu_private_data *mpu =
+ container_of(file->private_data, struct mpu_private_data, dev);
+ struct i2c_client *client = mpu->client;
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ int retval = 0;
+ struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES];
+ struct ext_slave_descr **slave = mldl_cfg->slave;
+ struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
+ int ii;
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!pdata_slave[ii])
+ slave_adapter[ii] = NULL;
+ else
+ slave_adapter[ii] =
+ i2c_get_adapter(pdata_slave[ii]->adapt_num);
+ }
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter;
+
+ retval = mutex_lock_interruptible(&mpu->mutex);
+ if (retval) {
+ dev_err(&client->adapter->dev,
+ "%s: mutex_lock_interruptible returned %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ switch (cmd) {
+ case MPU_GET_EXT_SLAVE_PLATFORM_DATA:
+ retval = mpu_dev_ioctl_get_ext_slave_platform_data(
+ client,
+ (struct ext_slave_platform_data __user *)arg);
+ break;
+ case MPU_GET_MPU_PLATFORM_DATA:
+ retval = mpu_dev_ioctl_get_mpu_platform_data(
+ client,
+ (struct mpu_platform_data __user *)arg);
+ break;
+ case MPU_GET_EXT_SLAVE_DESCR:
+ retval = mpu_dev_ioctl_get_ext_slave_descr(
+ client,
+ (struct ext_slave_descr __user *)arg);
+ break;
+ case MPU_READ:
+ case MPU_WRITE:
+ case MPU_READ_MEM:
+ case MPU_WRITE_MEM:
+ case MPU_READ_FIFO:
+ case MPU_WRITE_FIFO:
+ retval = mpu_handle_mlsl(
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ mldl_cfg->mpu_chip_info->addr, cmd,
+ (struct mpu_read_write __user *)arg);
+ break;
+ case MPU_CONFIG_GYRO:
+ retval = inv_mpu_config(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ (struct ext_slave_config __user *)arg);
+ break;
+ case MPU_CONFIG_ACCEL:
+ retval = slave_config(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave[EXT_SLAVE_TYPE_ACCEL],
+ pdata_slave[EXT_SLAVE_TYPE_ACCEL],
+ (struct ext_slave_config __user *)arg);
+ break;
+ case MPU_CONFIG_COMPASS:
+ retval = slave_config(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave[EXT_SLAVE_TYPE_COMPASS],
+ pdata_slave[EXT_SLAVE_TYPE_COMPASS],
+ (struct ext_slave_config __user *)arg);
+ break;
+ case MPU_CONFIG_PRESSURE:
+ retval = slave_config(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ slave[EXT_SLAVE_TYPE_PRESSURE],
+ pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
+ (struct ext_slave_config __user *)arg);
+ break;
+ case MPU_GET_CONFIG_GYRO:
+ retval = inv_mpu_get_config(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ (struct ext_slave_config __user *)arg);
+ break;
+ case MPU_GET_CONFIG_ACCEL:
+ retval = slave_get_config(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave[EXT_SLAVE_TYPE_ACCEL],
+ pdata_slave[EXT_SLAVE_TYPE_ACCEL],
+ (struct ext_slave_config __user *)arg);
+ break;
+ case MPU_GET_CONFIG_COMPASS:
+ retval = slave_get_config(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave[EXT_SLAVE_TYPE_COMPASS],
+ pdata_slave[EXT_SLAVE_TYPE_COMPASS],
+ (struct ext_slave_config __user *)arg);
+ break;
+ case MPU_GET_CONFIG_PRESSURE:
+ retval = slave_get_config(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ slave[EXT_SLAVE_TYPE_PRESSURE],
+ pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
+ (struct ext_slave_config __user *)arg);
+ break;
+ case MPU_SUSPEND:
+ retval = inv_mpu_suspend(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ arg);
+ break;
+ case MPU_RESUME:
+ retval = inv_mpu_resume(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ arg);
+ break;
+ case MPU_PM_EVENT_HANDLED:
+ dev_dbg(&client->adapter->dev, "%s: %d\n", __func__, cmd);
+ complete(&mpu->completion);
+ break;
+ case MPU_READ_ACCEL:
+ retval = inv_slave_read(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave[EXT_SLAVE_TYPE_ACCEL],
+ pdata_slave[EXT_SLAVE_TYPE_ACCEL],
+ (unsigned char __user *)arg);
+ break;
+ case MPU_READ_COMPASS:
+ retval = inv_slave_read(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave[EXT_SLAVE_TYPE_COMPASS],
+ pdata_slave[EXT_SLAVE_TYPE_COMPASS],
+ (unsigned char __user *)arg);
+ break;
+ case MPU_READ_PRESSURE:
+ retval = inv_slave_read(
+ mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ slave[EXT_SLAVE_TYPE_PRESSURE],
+ pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
+ (unsigned char __user *)arg);
+ break;
+ case MPU_GET_REQUESTED_SENSORS:
+ if (copy_to_user(
+ (__u32 __user *)arg,
+ &mldl_cfg->inv_mpu_cfg->requested_sensors,
+ sizeof(mldl_cfg->inv_mpu_cfg->requested_sensors)))
+ retval = -EFAULT;
+ break;
+ case MPU_SET_REQUESTED_SENSORS:
+ mldl_cfg->inv_mpu_cfg->requested_sensors = arg;
+ break;
+ case MPU_GET_IGNORE_SYSTEM_SUSPEND:
+ if (copy_to_user(
+ (unsigned char __user *)arg,
+ &mldl_cfg->inv_mpu_cfg->ignore_system_suspend,
+ sizeof(mldl_cfg->inv_mpu_cfg->ignore_system_suspend)))
+ retval = -EFAULT;
+ break;
+ case MPU_SET_IGNORE_SYSTEM_SUSPEND:
+ mldl_cfg->inv_mpu_cfg->ignore_system_suspend = arg;
+ break;
+ case MPU_GET_MLDL_STATUS:
+ if (copy_to_user(
+ (unsigned char __user *)arg,
+ &mldl_cfg->inv_mpu_state->status,
+ sizeof(mldl_cfg->inv_mpu_state->status)))
+ retval = -EFAULT;
+ break;
+ case MPU_GET_I2C_SLAVES_ENABLED:
+ if (copy_to_user(
+ (unsigned char __user *)arg,
+ &mldl_cfg->inv_mpu_state->i2c_slaves_enabled,
+ sizeof(mldl_cfg->inv_mpu_state->i2c_slaves_enabled)))
+ retval = -EFAULT;
+ break;
+ default:
+ dev_err(&client->adapter->dev,
+ "%s: Unknown cmd %x, arg %lu\n",
+ __func__, cmd, arg);
+ retval = -EINVAL;
+ };
+
+ mutex_unlock(&mpu->mutex);
+ dev_dbg(&client->adapter->dev, "%s: %08x, %08lx, %d\n",
+ __func__, cmd, arg, retval);
+
+ if (retval > 0)
+ retval = -retval;
+
+ return retval;
+}
+
+void mpu_shutdown(struct i2c_client *client)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *)i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES];
+ struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
+ int ii;
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!pdata_slave[ii])
+ slave_adapter[ii] = NULL;
+ else
+ slave_adapter[ii] =
+ i2c_get_adapter(pdata_slave[ii]->adapt_num);
+ }
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter;
+
+ mutex_lock(&mpu->mutex);
+ (void)inv_mpu_suspend(mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ INV_ALL_SENSORS);
+ mutex_unlock(&mpu->mutex);
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+}
+
+int mpu_dev_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *)i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES];
+ struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
+ int ii;
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!pdata_slave[ii])
+ slave_adapter[ii] = NULL;
+ else
+ slave_adapter[ii] =
+ i2c_get_adapter(pdata_slave[ii]->adapt_num);
+ }
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter;
+
+ mutex_lock(&mpu->mutex);
+ if (!mldl_cfg->inv_mpu_cfg->ignore_system_suspend) {
+ dev_dbg(&client->adapter->dev,
+ "%s: suspending on event %d\n", __func__, mesg.event);
+ (void)inv_mpu_suspend(mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ INV_ALL_SENSORS);
+ } else {
+ dev_dbg(&client->adapter->dev,
+ "%s: Already suspended %d\n", __func__, mesg.event);
+ }
+ mutex_unlock(&mpu->mutex);
+ return 0;
+}
+
+int mpu_dev_resume(struct i2c_client *client)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *)i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES];
+ struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
+ int ii;
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!pdata_slave[ii])
+ slave_adapter[ii] = NULL;
+ else
+ slave_adapter[ii] =
+ i2c_get_adapter(pdata_slave[ii]->adapt_num);
+ }
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter;
+
+ mutex_lock(&mpu->mutex);
+ if (mpu->pid && !mldl_cfg->inv_mpu_cfg->ignore_system_suspend) {
+ (void)inv_mpu_resume(mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
+ mldl_cfg->inv_mpu_cfg->requested_sensors);
+ dev_dbg(&client->adapter->dev,
+ "%s for pid %d\n", __func__, mpu->pid);
+ }
+ mutex_unlock(&mpu->mutex);
+ return 0;
+}
+
+/* define which file operations are supported */
+static const struct file_operations mpu_fops = {
+ .owner = THIS_MODULE,
+ .read = mpu_read,
+ .poll = mpu_poll,
+ .unlocked_ioctl = mpu_dev_ioctl,
+ .open = mpu_dev_open,
+ .release = mpu_release,
+};
+
+int inv_mpu_register_slave(struct module *slave_module,
+ struct i2c_client *slave_client,
+ struct ext_slave_platform_data *slave_pdata,
+ struct ext_slave_descr *(*get_slave_descr)(void))
+{
+ struct mpu_private_data *mpu = mpu_private_data;
+ struct mldl_cfg *mldl_cfg;
+ struct ext_slave_descr *slave_descr;
+ struct ext_slave_platform_data **pdata_slave;
+ char *irq_name = NULL;
+ int result = 0;
+
+ if (!slave_client || !slave_pdata || !get_slave_descr)
+ return -EINVAL;
+
+ if (!mpu) {
+ dev_err(&slave_client->adapter->dev,
+ "%s: Null mpu_private_data\n", __func__);
+ return -EINVAL;
+ }
+ mldl_cfg = &mpu->mldl_cfg;
+ pdata_slave = mldl_cfg->pdata_slave;
+ slave_descr = get_slave_descr();
+
+ if (!slave_descr) {
+ dev_err(&slave_client->adapter->dev,
+ "%s: Null ext_slave_descr\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mpu->mutex);
+ if (mpu->pid) {
+ mutex_unlock(&mpu->mutex);
+ return -EBUSY;
+ }
+
+ if (pdata_slave[slave_descr->type]) {
+ result = -EBUSY;
+ goto out_unlock_mutex;
+ }
+
+ slave_pdata->address = slave_client->addr;
+ slave_pdata->irq = slave_client->irq;
+ slave_pdata->adapt_num = i2c_adapter_id(slave_client->adapter);
+
+ dev_info(&slave_client->adapter->dev,
+ "%s: +%s Type %d: Addr: %2x IRQ: %2d, Adapt: %2d\n",
+ __func__,
+ slave_descr->name,
+ slave_descr->type,
+ slave_pdata->address,
+ slave_pdata->irq,
+ slave_pdata->adapt_num);
+
+ switch (slave_descr->type) {
+ case EXT_SLAVE_TYPE_ACCEL:
+ irq_name = "accelirq";
+ break;
+ case EXT_SLAVE_TYPE_COMPASS:
+ irq_name = "compassirq";
+ break;
+ case EXT_SLAVE_TYPE_PRESSURE:
+ irq_name = "pressureirq";
+ break;
+ default:
+ irq_name = "none";
+ };
+ if (slave_descr->init) {
+ result = slave_descr->init(slave_client->adapter,
+ slave_descr,
+ slave_pdata);
+ if (result) {
+ dev_err(&slave_client->adapter->dev,
+ "%s init failed %d\n",
+ slave_descr->name, result);
+ goto out_unlock_mutex;
+ }
+ }
+
+ pdata_slave[slave_descr->type] = slave_pdata;
+ mpu->slave_modules[slave_descr->type] = slave_module;
+ mldl_cfg->slave[slave_descr->type] = slave_descr;
+
+ goto out_unlock_mutex;
+
+out_unlock_mutex:
+ mutex_unlock(&mpu->mutex);
+
+ if (!result && irq_name && (slave_pdata->irq > 0)) {
+ int warn_result;
+ dev_info(&slave_client->adapter->dev,
+ "Installing %s irq using %d\n",
+ irq_name,
+ slave_pdata->irq);
+ warn_result = slaveirq_init(slave_client->adapter,
+ slave_pdata, irq_name);
+ if (result)
+ dev_WARN(&slave_client->adapter->dev,
+ "%s irq assigned error: %d\n",
+ slave_descr->name, warn_result);
+ } else {
+ dev_WARN(&slave_client->adapter->dev,
+ "%s irq not assigned: %d %d %d\n",
+ slave_descr->name,
+ result, (int)irq_name, slave_pdata->irq);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(inv_mpu_register_slave);
+
+void inv_mpu_unregister_slave(struct i2c_client *slave_client,
+ struct ext_slave_platform_data *slave_pdata,
+ struct ext_slave_descr *(*get_slave_descr)(void))
+{
+ struct mpu_private_data *mpu = mpu_private_data;
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct ext_slave_descr *slave_descr;
+ int result;
+
+ dev_info(&slave_client->adapter->dev, "%s\n", __func__);
+
+ if (!slave_client || !slave_pdata || !get_slave_descr)
+ return;
+
+ if (slave_pdata->irq)
+ slaveirq_exit(slave_pdata);
+
+ slave_descr = get_slave_descr();
+ if (!slave_descr)
+ return;
+
+ mutex_lock(&mpu->mutex);
+
+ if (slave_descr->exit) {
+ result = slave_descr->exit(slave_client->adapter,
+ slave_descr,
+ slave_pdata);
+ if (result)
+ dev_err(&slave_client->adapter->dev,
+ "Accel exit failed %d\n", result);
+ }
+ mldl_cfg->slave[slave_descr->type] = NULL;
+ mldl_cfg->pdata_slave[slave_descr->type] = NULL;
+ mpu->slave_modules[slave_descr->type] = NULL;
+
+ mutex_unlock(&mpu->mutex);
+
+}
+EXPORT_SYMBOL(inv_mpu_unregister_slave);
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static const struct i2c_device_id mpu_id[] = {
+ {"mpu3050", 0},
+ {"mpu6050", 0},
+ {"mpu6050_no_accel", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mpu_id);
+
+int mpu_probe(struct i2c_client *client, const struct i2c_device_id *devid)
+{
+ struct mpu_platform_data *pdata;
+ struct mpu_private_data *mpu;
+ struct mldl_cfg *mldl_cfg;
+ int res = 0;
+ int ii = 0;
+
+ dev_info(&client->adapter->dev, "%s: %d\n", __func__, ii++);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ res = -ENODEV;
+ goto out_check_functionality_failed;
+ }
+
+ mpu = kzalloc(sizeof(struct mpu_private_data), GFP_KERNEL);
+ if (!mpu) {
+ res = -ENOMEM;
+ goto out_alloc_data_failed;
+ }
+ mldl_cfg = &mpu->mldl_cfg;
+ mldl_cfg->mpu_ram = &mpu->mpu_ram;
+ mldl_cfg->mpu_gyro_cfg = &mpu->mpu_gyro_cfg;
+ mldl_cfg->mpu_offsets = &mpu->mpu_offsets;
+ mldl_cfg->mpu_chip_info = &mpu->mpu_chip_info;
+ mldl_cfg->inv_mpu_cfg = &mpu->inv_mpu_cfg;
+ mldl_cfg->inv_mpu_state = &mpu->inv_mpu_state;
+
+ mldl_cfg->mpu_ram->length = MPU_MEM_NUM_RAM_BANKS * MPU_MEM_BANK_SIZE;
+ mldl_cfg->mpu_ram->ram = kzalloc(mldl_cfg->mpu_ram->length, GFP_KERNEL);
+ if (!mldl_cfg->mpu_ram->ram) {
+ res = -ENOMEM;
+ goto out_alloc_ram_failed;
+ }
+ mpu_private_data = mpu;
+ i2c_set_clientdata(client, mpu);
+ mpu->client = client;
+
+ init_waitqueue_head(&mpu->mpu_event_wait);
+ mutex_init(&mpu->mutex);
+ init_completion(&mpu->completion);
+
+ mpu->response_timeout = 60; /* Seconds */
+ mpu->timeout.function = mpu_pm_timeout;
+ mpu->timeout.data = (u_long) mpu;
+ init_timer(&mpu->timeout);
+
+ mpu->nb.notifier_call = mpu_pm_notifier_callback;
+ mpu->nb.priority = 0;
+ res = register_pm_notifier(&mpu->nb);
+ if (res) {
+ dev_err(&client->adapter->dev,
+ "Unable to register pm_notifier %d\n", res);
+ goto out_register_pm_notifier_failed;
+ }
+
+ pdata = (struct mpu_platform_data *)client->dev.platform_data;
+ if (!pdata) {
+ dev_WARN(&client->adapter->dev,
+ "Missing platform data for mpu\n");
+ }
+ mldl_cfg->pdata = pdata;
+
+ mldl_cfg->mpu_chip_info->addr = client->addr;
+ res = inv_mpu_open(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL);
+
+ if (res) {
+ dev_err(&client->adapter->dev,
+ "Unable to open %s %d\n", MPU_NAME, res);
+ res = -ENODEV;
+ goto out_whoami_failed;
+ }
+
+ mpu->dev.minor = MISC_DYNAMIC_MINOR;
+ mpu->dev.name = "mpu";
+ mpu->dev.fops = &mpu_fops;
+ res = misc_register(&mpu->dev);
+ if (res < 0) {
+ dev_err(&client->adapter->dev,
+ "ERROR: misc_register returned %d\n", res);
+ goto out_misc_register_failed;
+ }
+
+ if (client->irq) {
+ dev_info(&client->adapter->dev,
+ "Installing irq using %d\n", client->irq);
+ res = mpuirq_init(client, mldl_cfg);
+ if (res)
+ goto out_mpuirq_failed;
+ } else {
+ dev_WARN(&client->adapter->dev,
+ "Missing %s IRQ\n", MPU_NAME);
+ }
+ return res;
+
+out_mpuirq_failed:
+ misc_deregister(&mpu->dev);
+out_misc_register_failed:
+ inv_mpu_close(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL);
+out_whoami_failed:
+ unregister_pm_notifier(&mpu->nb);
+out_register_pm_notifier_failed:
+ kfree(mldl_cfg->mpu_ram->ram);
+ mpu_private_data = NULL;
+out_alloc_ram_failed:
+ kfree(mpu);
+out_alloc_data_failed:
+out_check_functionality_failed:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, res);
+ return res;
+
+}
+
+static int mpu_remove(struct i2c_client *client)
+{
+ struct mpu_private_data *mpu = i2c_get_clientdata(client);
+ struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES];
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
+ int ii;
+
+ for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
+ if (!pdata_slave[ii])
+ slave_adapter[ii] = NULL;
+ else
+ slave_adapter[ii] =
+ i2c_get_adapter(pdata_slave[ii]->adapt_num);
+ }
+
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter;
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_close(mldl_cfg,
+ slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
+ slave_adapter[EXT_SLAVE_TYPE_ACCEL],
+ slave_adapter[EXT_SLAVE_TYPE_COMPASS],
+ slave_adapter[EXT_SLAVE_TYPE_PRESSURE]);
+
+
+ if (client->irq)
+ mpuirq_exit();
+
+ misc_deregister(&mpu->dev);
+
+ unregister_pm_notifier(&mpu->nb);
+
+ kfree(mpu->mldl_cfg.mpu_ram->ram);
+ kfree(mpu);
+
+ return 0;
+}
+
+static struct i2c_driver mpu_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = mpu_probe,
+ .remove = mpu_remove,
+ .id_table = mpu_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = MPU_NAME,
+ },
+ .address_list = normal_i2c,
+ .shutdown = mpu_shutdown, /* optional */
+ .suspend = mpu_dev_suspend, /* optional */
+ .resume = mpu_dev_resume, /* optional */
+
+};
+
+static int __init mpu_init(void)
+{
+ int res = i2c_add_driver(&mpu_driver);
+ pr_info("%s: Probe name %s\n", __func__, MPU_NAME);
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit mpu_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&mpu_driver);
+}
+
+module_init(mpu_init);
+module_exit(mpu_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("User space character device interface for MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(MPU_NAME);
diff --git a/drivers/misc/inv_mpu/mpu-dev.h b/drivers/misc/inv_mpu/mpu-dev.h
new file mode 100644
index 000000000000..b6a4fcfac586
--- /dev/null
+++ b/drivers/misc/inv_mpu/mpu-dev.h
@@ -0,0 +1,36 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+
+#ifndef __MPU_DEV_H__
+#define __MPU_DEV_H__
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mpu.h>
+
+int inv_mpu_register_slave(struct module *slave_module,
+ struct i2c_client *client,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_descr *(*slave_descr)(void));
+
+void inv_mpu_unregister_slave(struct i2c_client *client,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_descr *(*slave_descr)(void));
+#endif
diff --git a/drivers/misc/inv_mpu/mpu3050.h b/drivers/misc/inv_mpu/mpu3050.h
new file mode 100644
index 000000000000..02af16ed1216
--- /dev/null
+++ b/drivers/misc/inv_mpu/mpu3050.h
@@ -0,0 +1,251 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __MPU_H_
+#error Do not include this file directly. Include mpu.h instead.
+#endif
+
+#ifndef __MPU3050_H_
+#define __MPU3050_H_
+
+#include <linux/types.h>
+
+
+#define MPU_NAME "mpu3050"
+#define DEFAULT_MPU_SLAVEADDR 0x68
+
+/*==== MPU REGISTER SET ====*/
+enum mpu_register {
+ MPUREG_WHO_AM_I = 0, /* 00 0x00 */
+ MPUREG_PRODUCT_ID, /* 01 0x01 */
+ MPUREG_02_RSVD, /* 02 0x02 */
+ MPUREG_03_RSVD, /* 03 0x03 */
+ MPUREG_04_RSVD, /* 04 0x04 */
+ MPUREG_XG_OFFS_TC, /* 05 0x05 */
+ MPUREG_06_RSVD, /* 06 0x06 */
+ MPUREG_07_RSVD, /* 07 0x07 */
+ MPUREG_YG_OFFS_TC, /* 08 0x08 */
+ MPUREG_09_RSVD, /* 09 0x09 */
+ MPUREG_0A_RSVD, /* 10 0x0a */
+ MPUREG_ZG_OFFS_TC, /* 11 0x0b */
+ MPUREG_X_OFFS_USRH, /* 12 0x0c */
+ MPUREG_X_OFFS_USRL, /* 13 0x0d */
+ MPUREG_Y_OFFS_USRH, /* 14 0x0e */
+ MPUREG_Y_OFFS_USRL, /* 15 0x0f */
+ MPUREG_Z_OFFS_USRH, /* 16 0x10 */
+ MPUREG_Z_OFFS_USRL, /* 17 0x11 */
+ MPUREG_FIFO_EN1, /* 18 0x12 */
+ MPUREG_FIFO_EN2, /* 19 0x13 */
+ MPUREG_AUX_SLV_ADDR, /* 20 0x14 */
+ MPUREG_SMPLRT_DIV, /* 21 0x15 */
+ MPUREG_DLPF_FS_SYNC, /* 22 0x16 */
+ MPUREG_INT_CFG, /* 23 0x17 */
+ MPUREG_ACCEL_BURST_ADDR,/* 24 0x18 */
+ MPUREG_19_RSVD, /* 25 0x19 */
+ MPUREG_INT_STATUS, /* 26 0x1a */
+ MPUREG_TEMP_OUT_H, /* 27 0x1b */
+ MPUREG_TEMP_OUT_L, /* 28 0x1c */
+ MPUREG_GYRO_XOUT_H, /* 29 0x1d */
+ MPUREG_GYRO_XOUT_L, /* 30 0x1e */
+ MPUREG_GYRO_YOUT_H, /* 31 0x1f */
+ MPUREG_GYRO_YOUT_L, /* 32 0x20 */
+ MPUREG_GYRO_ZOUT_H, /* 33 0x21 */
+ MPUREG_GYRO_ZOUT_L, /* 34 0x22 */
+ MPUREG_23_RSVD, /* 35 0x23 */
+ MPUREG_24_RSVD, /* 36 0x24 */
+ MPUREG_25_RSVD, /* 37 0x25 */
+ MPUREG_26_RSVD, /* 38 0x26 */
+ MPUREG_27_RSVD, /* 39 0x27 */
+ MPUREG_28_RSVD, /* 40 0x28 */
+ MPUREG_29_RSVD, /* 41 0x29 */
+ MPUREG_2A_RSVD, /* 42 0x2a */
+ MPUREG_2B_RSVD, /* 43 0x2b */
+ MPUREG_2C_RSVD, /* 44 0x2c */
+ MPUREG_2D_RSVD, /* 45 0x2d */
+ MPUREG_2E_RSVD, /* 46 0x2e */
+ MPUREG_2F_RSVD, /* 47 0x2f */
+ MPUREG_30_RSVD, /* 48 0x30 */
+ MPUREG_31_RSVD, /* 49 0x31 */
+ MPUREG_32_RSVD, /* 50 0x32 */
+ MPUREG_33_RSVD, /* 51 0x33 */
+ MPUREG_34_RSVD, /* 52 0x34 */
+ MPUREG_DMP_CFG_1, /* 53 0x35 */
+ MPUREG_DMP_CFG_2, /* 54 0x36 */
+ MPUREG_BANK_SEL, /* 55 0x37 */
+ MPUREG_MEM_START_ADDR, /* 56 0x38 */
+ MPUREG_MEM_R_W, /* 57 0x39 */
+ MPUREG_FIFO_COUNTH, /* 58 0x3a */
+ MPUREG_FIFO_COUNTL, /* 59 0x3b */
+ MPUREG_FIFO_R_W, /* 60 0x3c */
+ MPUREG_USER_CTRL, /* 61 0x3d */
+ MPUREG_PWR_MGM, /* 62 0x3e */
+ MPUREG_3F_RSVD, /* 63 0x3f */
+ NUM_OF_MPU_REGISTERS /* 64 0x40 */
+};
+
+/*==== BITS FOR MPU ====*/
+
+/*---- MPU 'FIFO_EN1' register (12) ----*/
+#define BIT_TEMP_OUT 0x80
+#define BIT_GYRO_XOUT 0x40
+#define BIT_GYRO_YOUT 0x20
+#define BIT_GYRO_ZOUT 0x10
+#define BIT_ACCEL_XOUT 0x08
+#define BIT_ACCEL_YOUT 0x04
+#define BIT_ACCEL_ZOUT 0x02
+#define BIT_AUX_1OUT 0x01
+/*---- MPU 'FIFO_EN2' register (13) ----*/
+#define BIT_AUX_2OUT 0x02
+#define BIT_AUX_3OUT 0x01
+/*---- MPU 'DLPF_FS_SYNC' register (16) ----*/
+#define BITS_EXT_SYNC_NONE 0x00
+#define BITS_EXT_SYNC_TEMP 0x20
+#define BITS_EXT_SYNC_GYROX 0x40
+#define BITS_EXT_SYNC_GYROY 0x60
+#define BITS_EXT_SYNC_GYROZ 0x80
+#define BITS_EXT_SYNC_ACCELX 0xA0
+#define BITS_EXT_SYNC_ACCELY 0xC0
+#define BITS_EXT_SYNC_ACCELZ 0xE0
+#define BITS_EXT_SYNC_MASK 0xE0
+#define BITS_FS_250DPS 0x00
+#define BITS_FS_500DPS 0x08
+#define BITS_FS_1000DPS 0x10
+#define BITS_FS_2000DPS 0x18
+#define BITS_FS_MASK 0x18
+#define BITS_DLPF_CFG_256HZ_NOLPF2 0x00
+#define BITS_DLPF_CFG_188HZ 0x01
+#define BITS_DLPF_CFG_98HZ 0x02
+#define BITS_DLPF_CFG_42HZ 0x03
+#define BITS_DLPF_CFG_20HZ 0x04
+#define BITS_DLPF_CFG_10HZ 0x05
+#define BITS_DLPF_CFG_5HZ 0x06
+#define BITS_DLPF_CFG_2100HZ_NOLPF 0x07
+#define BITS_DLPF_CFG_MASK 0x07
+/*---- MPU 'INT_CFG' register (17) ----*/
+#define BIT_ACTL 0x80
+#define BIT_ACTL_LOW 0x80
+#define BIT_ACTL_HIGH 0x00
+#define BIT_OPEN 0x40
+#define BIT_OPEN_DRAIN 0x40
+#define BIT_PUSH_PULL 0x00
+#define BIT_LATCH_INT_EN 0x20
+#define BIT_INT_PULSE_WIDTH_50US 0x00
+#define BIT_INT_ANYRD_2CLEAR 0x10
+#define BIT_INT_STAT_READ_2CLEAR 0x00
+#define BIT_MPU_RDY_EN 0x04
+#define BIT_DMP_INT_EN 0x02
+#define BIT_RAW_RDY_EN 0x01
+/*---- MPU 'INT_STATUS' register (1A) ----*/
+#define BIT_INT_STATUS_FIFO_OVERLOW 0x80
+#define BIT_MPU_RDY 0x04
+#define BIT_DMP_INT 0x02
+#define BIT_RAW_RDY 0x01
+/*---- MPU 'BANK_SEL' register (37) ----*/
+#define BIT_PRFTCH_EN 0x20
+#define BIT_CFG_USER_BANK 0x10
+#define BITS_MEM_SEL 0x0f
+/*---- MPU 'USER_CTRL' register (3D) ----*/
+#define BIT_DMP_EN 0x80
+#define BIT_FIFO_EN 0x40
+#define BIT_AUX_IF_EN 0x20
+#define BIT_AUX_RD_LENG 0x10
+#define BIT_AUX_IF_RST 0x08
+#define BIT_DMP_RST 0x04
+#define BIT_FIFO_RST 0x02
+#define BIT_GYRO_RST 0x01
+/*---- MPU 'PWR_MGM' register (3E) ----*/
+#define BIT_H_RESET 0x80
+#define BIT_SLEEP 0x40
+#define BIT_STBY_XG 0x20
+#define BIT_STBY_YG 0x10
+#define BIT_STBY_ZG 0x08
+#define BITS_CLKSEL 0x07
+
+/*---- MPU Silicon Revision ----*/
+#define MPU_SILICON_REV_A4 1 /* MPU A4 Device */
+#define MPU_SILICON_REV_B1 2 /* MPU B1 Device */
+#define MPU_SILICON_REV_B4 3 /* MPU B4 Device */
+#define MPU_SILICON_REV_B6 4 /* MPU B6 Device */
+
+/*---- MPU Memory ----*/
+#define MPU_MEM_BANK_SIZE (256)
+#define FIFO_HW_SIZE (512)
+
+enum MPU_MEMORY_BANKS {
+ MPU_MEM_RAM_BANK_0 = 0,
+ MPU_MEM_RAM_BANK_1,
+ MPU_MEM_RAM_BANK_2,
+ MPU_MEM_RAM_BANK_3,
+ MPU_MEM_NUM_RAM_BANKS,
+ MPU_MEM_OTP_BANK_0 = MPU_MEM_NUM_RAM_BANKS,
+ /* This one is always last */
+ MPU_MEM_NUM_BANKS
+};
+
+/*---- structure containing control variables used by MLDL ----*/
+/*---- MPU clock source settings ----*/
+/*---- MPU filter selections ----*/
+enum mpu_filter {
+ MPU_FILTER_256HZ_NOLPF2 = 0,
+ MPU_FILTER_188HZ,
+ MPU_FILTER_98HZ,
+ MPU_FILTER_42HZ,
+ MPU_FILTER_20HZ,
+ MPU_FILTER_10HZ,
+ MPU_FILTER_5HZ,
+ MPU_FILTER_2100HZ_NOLPF,
+ NUM_MPU_FILTER
+};
+
+enum mpu_fullscale {
+ MPU_FS_250DPS = 0,
+ MPU_FS_500DPS,
+ MPU_FS_1000DPS,
+ MPU_FS_2000DPS,
+ NUM_MPU_FS
+};
+
+enum mpu_clock_sel {
+ MPU_CLK_SEL_INTERNAL = 0,
+ MPU_CLK_SEL_PLLGYROX,
+ MPU_CLK_SEL_PLLGYROY,
+ MPU_CLK_SEL_PLLGYROZ,
+ MPU_CLK_SEL_PLLEXT32K,
+ MPU_CLK_SEL_PLLEXT19M,
+ MPU_CLK_SEL_RESERVED,
+ MPU_CLK_SEL_STOP,
+ NUM_CLK_SEL
+};
+
+enum mpu_ext_sync {
+ MPU_EXT_SYNC_NONE = 0,
+ MPU_EXT_SYNC_TEMP,
+ MPU_EXT_SYNC_GYROX,
+ MPU_EXT_SYNC_GYROY,
+ MPU_EXT_SYNC_GYROZ,
+ MPU_EXT_SYNC_ACCELX,
+ MPU_EXT_SYNC_ACCELY,
+ MPU_EXT_SYNC_ACCELZ,
+ NUM_MPU_EXT_SYNC
+};
+
+#define DLPF_FS_SYNC_VALUE(ext_sync, full_scale, lpf) \
+ ((ext_sync << 5) | (full_scale << 3) | lpf)
+
+#endif /* __MPU3050_H_ */
diff --git a/drivers/misc/inv_mpu/mpuirq.c b/drivers/misc/inv_mpu/mpuirq.c
new file mode 100644
index 000000000000..dfd87cdabc6e
--- /dev/null
+++ b/drivers/misc/inv_mpu/mpuirq.c
@@ -0,0 +1,257 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/irq.h>
+#include <linux/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/poll.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <linux/mpu.h>
+#include "mpuirq.h"
+#include "mldl_cfg.h"
+
+#define MPUIRQ_NAME "mpuirq"
+
+/* function which gets accel data and sends it to MPU */
+
+DECLARE_WAIT_QUEUE_HEAD(mpuirq_wait);
+
+struct mpuirq_dev_data {
+ struct i2c_client *mpu_client;
+ struct miscdevice *dev;
+ int irq;
+ int pid;
+ int accel_divider;
+ int data_ready;
+ int timeout;
+};
+
+static struct mpuirq_dev_data mpuirq_dev_data;
+static struct mpuirq_data mpuirq_data;
+static char *interface = MPUIRQ_NAME;
+
+static int mpuirq_open(struct inode *inode, struct file *file)
+{
+ dev_dbg(mpuirq_dev_data.dev->this_device,
+ "%s current->pid %d\n", __func__, current->pid);
+ mpuirq_dev_data.pid = current->pid;
+ file->private_data = &mpuirq_dev_data;
+ return 0;
+}
+
+/* close function - called when the "file" /dev/mpuirq is closed in userspace */
+static int mpuirq_release(struct inode *inode, struct file *file)
+{
+ dev_dbg(mpuirq_dev_data.dev->this_device, "mpuirq_release\n");
+ return 0;
+}
+
+/* read function called when from /dev/mpuirq is read */
+static ssize_t mpuirq_read(struct file *file,
+ char *buf, size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct mpuirq_dev_data *p_mpuirq_dev_data = file->private_data;
+
+ if (!mpuirq_dev_data.data_ready &&
+ mpuirq_dev_data.timeout && (!(file->f_flags & O_NONBLOCK))) {
+ wait_event_interruptible_timeout(mpuirq_wait,
+ mpuirq_dev_data.data_ready,
+ mpuirq_dev_data.timeout);
+ }
+
+ if (mpuirq_dev_data.data_ready && NULL != buf
+ && count >= sizeof(mpuirq_data)) {
+ err = copy_to_user(buf, &mpuirq_data, sizeof(mpuirq_data));
+ mpuirq_data.data_type = 0;
+ } else {
+ return 0;
+ }
+ if (err != 0) {
+ dev_err(p_mpuirq_dev_data->dev->this_device,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ mpuirq_dev_data.data_ready = 0;
+ len = sizeof(mpuirq_data);
+ return len;
+}
+
+unsigned int mpuirq_poll(struct file *file, struct poll_table_struct *poll)
+{
+ int mask = 0;
+
+ poll_wait(file, &mpuirq_wait, poll);
+ if (mpuirq_dev_data.data_ready)
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+/* ioctl - I/O control */
+static long mpuirq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ int data;
+
+ switch (cmd) {
+ case MPUIRQ_SET_TIMEOUT:
+ mpuirq_dev_data.timeout = arg;
+ break;
+
+ case MPUIRQ_GET_INTERRUPT_CNT:
+ data = mpuirq_data.interruptcount - 1;
+ if (mpuirq_data.interruptcount > 1)
+ mpuirq_data.interruptcount = 1;
+
+ if (copy_to_user((int *)arg, &data, sizeof(int)))
+ return -EFAULT;
+ break;
+ case MPUIRQ_GET_IRQ_TIME:
+ if (copy_to_user((int *)arg, &mpuirq_data.irqtime,
+ sizeof(mpuirq_data.irqtime)))
+ return -EFAULT;
+ mpuirq_data.irqtime = 0;
+ break;
+ case MPUIRQ_SET_FREQUENCY_DIVIDER:
+ mpuirq_dev_data.accel_divider = arg;
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static irqreturn_t mpuirq_handler(int irq, void *dev_id)
+{
+ static int mycount;
+ struct timeval irqtime;
+ mycount++;
+
+ mpuirq_data.interruptcount++;
+
+ /* wake up (unblock) for reading data from userspace */
+ /* and ignore first interrupt generated in module init */
+ mpuirq_dev_data.data_ready = 1;
+
+ do_gettimeofday(&irqtime);
+ mpuirq_data.irqtime = (((long long)irqtime.tv_sec) << 32);
+ mpuirq_data.irqtime += irqtime.tv_usec;
+ mpuirq_data.data_type = MPUIRQ_DATA_TYPE_MPU_IRQ;
+ mpuirq_data.data = 0;
+
+ wake_up_interruptible(&mpuirq_wait);
+
+ return IRQ_HANDLED;
+
+}
+
+/* define which file operations are supported */
+const struct file_operations mpuirq_fops = {
+ .owner = THIS_MODULE,
+ .read = mpuirq_read,
+ .poll = mpuirq_poll,
+
+ .unlocked_ioctl = mpuirq_ioctl,
+ .open = mpuirq_open,
+ .release = mpuirq_release,
+};
+
+static struct miscdevice mpuirq_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = MPUIRQ_NAME,
+ .fops = &mpuirq_fops,
+};
+
+int mpuirq_init(struct i2c_client *mpu_client, struct mldl_cfg *mldl_cfg)
+{
+
+ int res;
+
+ mpuirq_dev_data.mpu_client = mpu_client;
+
+ dev_info(&mpu_client->adapter->dev,
+ "Module Param interface = %s\n", interface);
+
+ mpuirq_dev_data.irq = mpu_client->irq;
+ mpuirq_dev_data.pid = 0;
+ mpuirq_dev_data.accel_divider = -1;
+ mpuirq_dev_data.data_ready = 0;
+ mpuirq_dev_data.timeout = 0;
+ mpuirq_dev_data.dev = &mpuirq_device;
+
+ if (mpuirq_dev_data.irq) {
+ unsigned long flags;
+ if (BIT_ACTL_LOW == ((mldl_cfg->pdata->int_config) & BIT_ACTL))
+ flags = IRQF_TRIGGER_FALLING;
+ else
+ flags = IRQF_TRIGGER_RISING;
+
+ flags |= IRQF_SHARED;
+ res =
+ request_irq(mpuirq_dev_data.irq, mpuirq_handler, flags,
+ interface, &mpuirq_dev_data.irq);
+ if (res) {
+ dev_err(&mpu_client->adapter->dev,
+ "myirqtest: cannot register IRQ %d\n",
+ mpuirq_dev_data.irq);
+ } else {
+ res = misc_register(&mpuirq_device);
+ if (res < 0) {
+ dev_err(&mpu_client->adapter->dev,
+ "misc_register returned %d\n", res);
+ free_irq(mpuirq_dev_data.irq,
+ &mpuirq_dev_data.irq);
+ }
+ }
+
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+void mpuirq_exit(void)
+{
+ if (mpuirq_dev_data.irq > 0)
+ free_irq(mpuirq_dev_data.irq, &mpuirq_dev_data.irq);
+
+ dev_info(mpuirq_device.this_device, "Unregistering %s\n", MPUIRQ_NAME);
+ misc_deregister(&mpuirq_device);
+
+ return;
+}
+
+module_param(interface, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(interface, "The Interface name");
diff --git a/drivers/misc/inv_mpu/mpuirq.h b/drivers/misc/inv_mpu/mpuirq.h
new file mode 100644
index 000000000000..33480711f79d
--- /dev/null
+++ b/drivers/misc/inv_mpu/mpuirq.h
@@ -0,0 +1,36 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __MPUIRQ__
+#define __MPUIRQ__
+
+#include <linux/i2c-dev.h>
+#include <linux/time.h>
+#include <linux/ioctl.h>
+#include "mldl_cfg.h"
+
+#define MPUIRQ_SET_TIMEOUT _IOW(MPU_IOCTL, 0x40, unsigned long)
+#define MPUIRQ_GET_INTERRUPT_CNT _IOR(MPU_IOCTL, 0x41, unsigned long)
+#define MPUIRQ_GET_IRQ_TIME _IOR(MPU_IOCTL, 0x42, struct timeval)
+#define MPUIRQ_SET_FREQUENCY_DIVIDER _IOW(MPU_IOCTL, 0x43, unsigned long)
+
+void mpuirq_exit(void);
+int mpuirq_init(struct i2c_client *mpu_client, struct mldl_cfg *mldl_cfg);
+
+#endif
diff --git a/drivers/misc/inv_mpu/pressure/Kconfig b/drivers/misc/inv_mpu/pressure/Kconfig
new file mode 100644
index 000000000000..f1c021e8f126
--- /dev/null
+++ b/drivers/misc/inv_mpu/pressure/Kconfig
@@ -0,0 +1,20 @@
+menuconfig: INV_SENSORS_PRESSURE
+ bool "Pressure Sensor Slaves"
+ depends on INV_SENSORS
+ default y
+ help
+ Select y to see a list of supported pressure sensors that can be
+ integrated with the MPUxxxx set of motion processors.
+
+if INV_SENSORS_PRESSURE
+
+config MPU_SENSORS_BMA085
+ tristate "Bosch BMA085"
+ help
+ This enables support for the Bosch bma085 pressure sensor
+ This support is for integration with the MPU3050 or MPU6050 gyroscope
+ device driver. Only one accelerometer can be registered at a time.
+ Specifying more that one accelerometer in the board file will result
+ in runtime errors.
+
+endif
diff --git a/drivers/misc/inv_mpu/pressure/Makefile b/drivers/misc/inv_mpu/pressure/Makefile
new file mode 100644
index 000000000000..595923d809dc
--- /dev/null
+++ b/drivers/misc/inv_mpu/pressure/Makefile
@@ -0,0 +1,8 @@
+#
+# Pressure Slaves to MPUxxxx
+#
+obj-$(CONFIG_MPU_SENSORS_BMA085) += inv_mpu_bma085.o
+inv_mpu_bma085-objs += bma085.o
+
+EXTRA_CFLAGS += -Idrivers/misc/inv_mpu
+EXTRA_CFLAGS += -D__C99_DESIGNATED_INITIALIZER
diff --git a/drivers/misc/inv_mpu/pressure/bma085.c b/drivers/misc/inv_mpu/pressure/bma085.c
new file mode 100644
index 000000000000..696d2b6e183c
--- /dev/null
+++ b/drivers/misc/inv_mpu/pressure/bma085.c
@@ -0,0 +1,367 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @defgroup ACCELDL (Motion Library - Pressure Driver Layer)
+ * @brief Provides the interface to setup and handle a pressure
+ * connected to the secondary I2C interface of the gyroscope.
+ *
+ * @{
+ * @file bma085.c
+ * @brief Pressure setup and handling methods.
+ */
+
+/* ------------------ */
+/* - Include Files. - */
+/* ------------------ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "mpu-dev.h"
+
+#include <linux/mpu.h>
+#include "mlsl.h"
+#include "log.h"
+
+/*
+ * this structure holds all device specific calibration parameters
+ */
+struct bmp085_calibration_param_t {
+ short ac1;
+ short ac2;
+ short ac3;
+ unsigned short ac4;
+ unsigned short ac5;
+ unsigned short ac6;
+ short b1;
+ short b2;
+ short mb;
+ short mc;
+ short md;
+ long param_b5;
+};
+
+struct bmp085_calibration_param_t cal_param;
+
+#define PRESSURE_BMA085_PARAM_MG 3038 /* calibration parameter */
+#define PRESSURE_BMA085_PARAM_MH -7357 /* calibration parameter */
+#define PRESSURE_BMA085_PARAM_MI 3791 /* calibration parameter */
+
+/*********************************************
+ * Pressure Initialization Functions
+ *********************************************/
+
+static int bma085_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = INV_SUCCESS;
+ return result;
+}
+
+#define PRESSURE_BMA085_PROM_START_ADDR (0xAA)
+#define PRESSURE_BMA085_PROM_DATA_LEN (22)
+#define PRESSURE_BMP085_CTRL_MEAS_REG (0xF4)
+/* temperature measurent */
+#define PRESSURE_BMP085_T_MEAS (0x2E)
+/* pressure measurement; oversampling_setting */
+#define PRESSURE_BMP085_P_MEAS_OSS_0 (0x34)
+#define PRESSURE_BMP085_P_MEAS_OSS_1 (0x74)
+#define PRESSURE_BMP085_P_MEAS_OSS_2 (0xB4)
+#define PRESSURE_BMP085_P_MEAS_OSS_3 (0xF4)
+#define PRESSURE_BMP085_ADC_OUT_MSB_REG (0xF6)
+#define PRESSURE_BMP085_ADC_OUT_LSB_REG (0xF7)
+
+static int bma085_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char data[PRESSURE_BMA085_PROM_DATA_LEN];
+
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ PRESSURE_BMA085_PROM_START_ADDR,
+ PRESSURE_BMA085_PROM_DATA_LEN, data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+
+ /* parameters AC1-AC6 */
+ cal_param.ac1 = (data[0] << 8) | data[1];
+ cal_param.ac2 = (data[2] << 8) | data[3];
+ cal_param.ac3 = (data[4] << 8) | data[5];
+ cal_param.ac4 = (data[6] << 8) | data[7];
+ cal_param.ac5 = (data[8] << 8) | data[9];
+ cal_param.ac6 = (data[10] << 8) | data[11];
+
+ /* parameters B1,B2 */
+ cal_param.b1 = (data[12] << 8) | data[13];
+ cal_param.b2 = (data[14] << 8) | data[15];
+
+ /* parameters MB,MC,MD */
+ cal_param.mb = (data[16] << 8) | data[17];
+ cal_param.mc = (data[18] << 8) | data[19];
+ cal_param.md = (data[20] << 8) | data[21];
+
+ return result;
+}
+
+static int bma085_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result;
+ long pressure, x1, x2, x3, b3, b6;
+ unsigned long b4, b7;
+ unsigned long up;
+ unsigned short ut;
+ short oversampling_setting = 0;
+ short temperature;
+ long divisor;
+
+ /* get temprature */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ PRESSURE_BMP085_CTRL_MEAS_REG,
+ PRESSURE_BMP085_T_MEAS);
+ msleep(5);
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ PRESSURE_BMP085_ADC_OUT_MSB_REG, 2,
+ (unsigned char *)data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ ut = (data[0] << 8) | data[1];
+
+ x1 = (((long) ut - (long)cal_param.ac6) * (long)cal_param.ac5) >> 15;
+ divisor = x1 + cal_param.md;
+ if (!divisor)
+ return INV_ERROR_DIVIDE_BY_ZERO;
+
+ x2 = ((long)cal_param.mc << 11) / (x1 + cal_param.md);
+ cal_param.param_b5 = x1 + x2;
+ /* temperature in 0.1 degree C */
+ temperature = (short)((cal_param.param_b5 + 8) >> 4);
+
+ /* get pressure */
+ result = inv_serial_single_write(mlsl_handle, pdata->address,
+ PRESSURE_BMP085_CTRL_MEAS_REG,
+ PRESSURE_BMP085_P_MEAS_OSS_0);
+ msleep(5);
+ result =
+ inv_serial_read(mlsl_handle, pdata->address,
+ PRESSURE_BMP085_ADC_OUT_MSB_REG, 2,
+ (unsigned char *)data);
+ if (result) {
+ LOG_RESULT_LOCATION(result);
+ return result;
+ }
+ up = (((unsigned long) data[0] << 8) | ((unsigned long) data[1]));
+
+ b6 = cal_param.param_b5 - 4000;
+ /* calculate B3 */
+ x1 = (b6*b6) >> 12;
+ x1 *= cal_param.b2;
+ x1 >>= 11;
+
+ x2 = (cal_param.ac2*b6);
+ x2 >>= 11;
+
+ x3 = x1 + x2;
+
+ b3 = (((((long)cal_param.ac1) * 4 + x3)
+ << oversampling_setting) + 2) >> 2;
+
+ /* calculate B4 */
+ x1 = (cal_param.ac3 * b6) >> 13;
+ x2 = (cal_param.b1 * ((b6*b6) >> 12)) >> 16;
+ x3 = ((x1 + x2) + 2) >> 2;
+ b4 = (cal_param.ac4 * (unsigned long) (x3 + 32768)) >> 15;
+ if (!b4)
+ return INV_ERROR;
+
+ b7 = ((unsigned long)(up - b3) * (50000>>oversampling_setting));
+ if (b7 < 0x80000000)
+ pressure = (b7 << 1) / b4;
+ else
+ pressure = (b7 / b4) << 1;
+
+ x1 = pressure >> 8;
+ x1 *= x1;
+ x1 = (x1 * PRESSURE_BMA085_PARAM_MG) >> 16;
+ x2 = (pressure * PRESSURE_BMA085_PARAM_MH) >> 16;
+ /* pressure in Pa */
+ pressure += (x1 + x2 + PRESSURE_BMA085_PARAM_MI) >> 4;
+
+ data[0] = (unsigned char)(pressure >> 16);
+ data[1] = (unsigned char)(pressure >> 8);
+ data[2] = (unsigned char)(pressure & 0xFF);
+
+ return result;
+}
+
+static struct ext_slave_descr bma085_descr = {
+ .init = NULL,
+ .exit = NULL,
+ .suspend = bma085_suspend,
+ .resume = bma085_resume,
+ .read = bma085_read,
+ .config = NULL,
+ .get_config = NULL,
+ .name = "bma085",
+ .type = EXT_SLAVE_TYPE_PRESSURE,
+ .id = PRESSURE_ID_BMA085,
+ .read_reg = 0xF6,
+ .read_len = 3,
+ .endian = EXT_SLAVE_BIG_ENDIAN,
+ .range = {0, 0},
+};
+
+static
+struct ext_slave_descr *bma085_get_slave_descr(void)
+{
+ return &bma085_descr;
+}
+
+/* Platform data for the MPU */
+struct bma085_mod_private_data {
+ struct i2c_client *client;
+ struct ext_slave_platform_data *pdata;
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+static int bma085_mod_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ext_slave_platform_data *pdata;
+ struct bma085_mod_private_data *private_data;
+ int result = 0;
+
+ dev_info(&client->adapter->dev, "%s: %s\n", __func__, devid->name);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENODEV;
+ goto out_no_free;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->adapter->dev,
+ "Missing platform data for slave %s\n", devid->name);
+ result = -EFAULT;
+ goto out_no_free;
+ }
+
+ private_data = kzalloc(sizeof(*private_data), GFP_KERNEL);
+ if (!private_data) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+
+ i2c_set_clientdata(client, private_data);
+ private_data->client = client;
+ private_data->pdata = pdata;
+
+ result = inv_mpu_register_slave(THIS_MODULE, client, pdata,
+ bma085_get_slave_descr);
+ if (result) {
+ dev_err(&client->adapter->dev,
+ "Slave registration failed: %s, %d\n",
+ devid->name, result);
+ goto out_free_memory;
+ }
+
+ return result;
+
+out_free_memory:
+ kfree(private_data);
+out_no_free:
+ dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result);
+ return result;
+
+}
+
+static int bma085_mod_remove(struct i2c_client *client)
+{
+ struct bma085_mod_private_data *private_data =
+ i2c_get_clientdata(client);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ inv_mpu_unregister_slave(client, private_data->pdata,
+ bma085_get_slave_descr);
+
+ kfree(private_data);
+ return 0;
+}
+
+static const struct i2c_device_id bma085_mod_id[] = {
+ { "bma085", PRESSURE_ID_BMA085 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bma085_mod_id);
+
+static struct i2c_driver bma085_mod_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = bma085_mod_probe,
+ .remove = bma085_mod_remove,
+ .id_table = bma085_mod_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bma085_mod",
+ },
+ .address_list = normal_i2c,
+};
+
+static int __init bma085_mod_init(void)
+{
+ int res = i2c_add_driver(&bma085_mod_driver);
+ pr_info("%s: Probe name %s\n", __func__, "bma085_mod");
+ if (res)
+ pr_err("%s failed\n", __func__);
+ return res;
+}
+
+static void __exit bma085_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+ i2c_del_driver(&bma085_mod_driver);
+}
+
+module_init(bma085_mod_init);
+module_exit(bma085_mod_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Driver to integrate BMA085 sensor with the MPU");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("bma085_mod");
+/**
+ * @}
+**/
diff --git a/drivers/misc/inv_mpu/slaveirq.c b/drivers/misc/inv_mpu/slaveirq.c
new file mode 100644
index 000000000000..95e690ee60cb
--- /dev/null
+++ b/drivers/misc/inv_mpu/slaveirq.c
@@ -0,0 +1,266 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/irq.h>
+#include <linux/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/poll.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#include <linux/mpu.h>
+#include "slaveirq.h"
+#include "mldl_cfg.h"
+
+/* function which gets slave data and sends it to SLAVE */
+
+struct slaveirq_dev_data {
+ struct miscdevice dev;
+ struct i2c_client *slave_client;
+ struct mpuirq_data data;
+ wait_queue_head_t slaveirq_wait;
+ int irq;
+ int pid;
+ int data_ready;
+ int timeout;
+};
+
+/* The following depends on patch fa1f68db6ca7ebb6fc4487ac215bffba06c01c28
+ * drivers: misc: pass miscdevice pointer via file private data
+ */
+static int slaveirq_open(struct inode *inode, struct file *file)
+{
+ /* Device node is availabe in the file->private_data, this is
+ * exactly what we want so we leave it there */
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+
+ dev_dbg(data->dev.this_device,
+ "%s current->pid %d\n", __func__, current->pid);
+ data->pid = current->pid;
+ return 0;
+}
+
+static int slaveirq_release(struct inode *inode, struct file *file)
+{
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+ dev_dbg(data->dev.this_device, "slaveirq_release\n");
+ return 0;
+}
+
+/* read function called when from /dev/slaveirq is read */
+static ssize_t slaveirq_read(struct file *file,
+ char *buf, size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+
+ if (!data->data_ready && data->timeout &&
+ !(file->f_flags & O_NONBLOCK)) {
+ wait_event_interruptible_timeout(data->slaveirq_wait,
+ data->data_ready,
+ data->timeout);
+ }
+
+ if (data->data_ready && NULL != buf && count >= sizeof(data->data)) {
+ err = copy_to_user(buf, &data->data, sizeof(data->data));
+ data->data.data_type = 0;
+ } else {
+ return 0;
+ }
+ if (err != 0) {
+ dev_err(data->dev.this_device,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ data->data_ready = 0;
+ len = sizeof(data->data);
+ return len;
+}
+
+static unsigned int slaveirq_poll(struct file *file,
+ struct poll_table_struct *poll)
+{
+ int mask = 0;
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+
+ poll_wait(file, &data->slaveirq_wait, poll);
+ if (data->data_ready)
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+/* ioctl - I/O control */
+static long slaveirq_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ int tmp;
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+
+ switch (cmd) {
+ case SLAVEIRQ_SET_TIMEOUT:
+ data->timeout = arg;
+ break;
+
+ case SLAVEIRQ_GET_INTERRUPT_CNT:
+ tmp = data->data.interruptcount - 1;
+ if (data->data.interruptcount > 1)
+ data->data.interruptcount = 1;
+
+ if (copy_to_user((int *)arg, &tmp, sizeof(int)))
+ return -EFAULT;
+ break;
+ case SLAVEIRQ_GET_IRQ_TIME:
+ if (copy_to_user((int *)arg, &data->data.irqtime,
+ sizeof(data->data.irqtime)))
+ return -EFAULT;
+ data->data.irqtime = 0;
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static irqreturn_t slaveirq_handler(int irq, void *dev_id)
+{
+ struct slaveirq_dev_data *data = (struct slaveirq_dev_data *)dev_id;
+ static int mycount;
+ struct timeval irqtime;
+ mycount++;
+
+ data->data.interruptcount++;
+
+ /* wake up (unblock) for reading data from userspace */
+ data->data_ready = 1;
+
+ do_gettimeofday(&irqtime);
+ data->data.irqtime = (((long long)irqtime.tv_sec) << 32);
+ data->data.irqtime += irqtime.tv_usec;
+ data->data.data_type |= 1;
+
+ wake_up_interruptible(&data->slaveirq_wait);
+
+ return IRQ_HANDLED;
+
+}
+
+/* define which file operations are supported */
+static const struct file_operations slaveirq_fops = {
+ .owner = THIS_MODULE,
+ .read = slaveirq_read,
+ .poll = slaveirq_poll,
+
+#if HAVE_COMPAT_IOCTL
+ .compat_ioctl = slaveirq_ioctl,
+#endif
+#if HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = slaveirq_ioctl,
+#endif
+ .open = slaveirq_open,
+ .release = slaveirq_release,
+};
+
+int slaveirq_init(struct i2c_adapter *slave_adapter,
+ struct ext_slave_platform_data *pdata, char *name)
+{
+
+ int res;
+ struct slaveirq_dev_data *data;
+
+ if (!pdata->irq)
+ return -EINVAL;
+
+ pdata->irq_data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = (struct slaveirq_dev_data *)pdata->irq_data;
+ if (!data)
+ return -ENOMEM;
+
+ data->dev.minor = MISC_DYNAMIC_MINOR;
+ data->dev.name = name;
+ data->dev.fops = &slaveirq_fops;
+ data->irq = pdata->irq;
+ data->pid = 0;
+ data->data_ready = 0;
+ data->timeout = 0;
+
+ init_waitqueue_head(&data->slaveirq_wait);
+
+ res = request_irq(data->irq, slaveirq_handler,
+ IRQF_TRIGGER_RISING | IRQF_SHARED,
+ data->dev.name, data);
+
+ if (res) {
+ dev_err(&slave_adapter->dev,
+ "myirqtest: cannot register IRQ %d\n", data->irq);
+ goto out_request_irq;
+ }
+
+ res = misc_register(&data->dev);
+ if (res < 0) {
+ dev_err(&slave_adapter->dev,
+ "misc_register returned %d\n", res);
+ goto out_misc_register;
+ }
+
+ return res;
+
+out_misc_register:
+ free_irq(data->irq, data);
+out_request_irq:
+ kfree(pdata->irq_data);
+ pdata->irq_data = NULL;
+
+ return res;
+}
+
+void slaveirq_exit(struct ext_slave_platform_data *pdata)
+{
+ struct slaveirq_dev_data *data = pdata->irq_data;
+
+ if (!pdata->irq_data || data->irq <= 0)
+ return;
+
+ dev_info(data->dev.this_device, "Unregistering %s\n", data->dev.name);
+
+ free_irq(data->irq, data);
+ misc_deregister(&data->dev);
+ kfree(pdata->irq_data);
+ pdata->irq_data = NULL;
+}
diff --git a/drivers/misc/inv_mpu/slaveirq.h b/drivers/misc/inv_mpu/slaveirq.h
new file mode 100644
index 000000000000..6926634ff94c
--- /dev/null
+++ b/drivers/misc/inv_mpu/slaveirq.h
@@ -0,0 +1,36 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __SLAVEIRQ__
+#define __SLAVEIRQ__
+
+#include <linux/i2c-dev.h>
+
+#include <linux/mpu.h>
+#include "mpuirq.h"
+
+#define SLAVEIRQ_SET_TIMEOUT _IOW(MPU_IOCTL, 0x50, unsigned long)
+#define SLAVEIRQ_GET_INTERRUPT_CNT _IOR(MPU_IOCTL, 0x51, unsigned long)
+#define SLAVEIRQ_GET_IRQ_TIME _IOR(MPU_IOCTL, 0x52, unsigned long)
+
+void slaveirq_exit(struct ext_slave_platform_data *pdata);
+int slaveirq_init(struct i2c_adapter *slave_adapter,
+ struct ext_slave_platform_data *pdata, char *name);
+
+#endif
diff --git a/drivers/misc/inv_mpu/timerirq.c b/drivers/misc/inv_mpu/timerirq.c
new file mode 100644
index 000000000000..601858f9c4d5
--- /dev/null
+++ b/drivers/misc/inv_mpu/timerirq.c
@@ -0,0 +1,296 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/poll.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+
+#include <linux/mpu.h>
+#include "mltypes.h"
+#include "timerirq.h"
+
+/* function which gets timer data and sends it to TIMER */
+struct timerirq_data {
+ int pid;
+ int data_ready;
+ int run;
+ int timeout;
+ unsigned long period;
+ struct mpuirq_data data;
+ struct completion timer_done;
+ wait_queue_head_t timerirq_wait;
+ struct timer_list timer;
+ struct miscdevice *dev;
+};
+
+static struct miscdevice *timerirq_dev_data;
+
+static void timerirq_handler(unsigned long arg)
+{
+ struct timerirq_data *data = (struct timerirq_data *)arg;
+ struct timeval irqtime;
+
+ data->data.interruptcount++;
+
+ data->data_ready = 1;
+
+ do_gettimeofday(&irqtime);
+ data->data.irqtime = (((long long)irqtime.tv_sec) << 32);
+ data->data.irqtime += irqtime.tv_usec;
+ data->data.data_type |= 1;
+
+ dev_dbg(data->dev->this_device,
+ "%s, %lld, %ld\n", __func__, data->data.irqtime,
+ (unsigned long)data);
+
+ wake_up_interruptible(&data->timerirq_wait);
+
+ if (data->run)
+ mod_timer(&data->timer,
+ jiffies + msecs_to_jiffies(data->period));
+ else
+ complete(&data->timer_done);
+}
+
+static int start_timerirq(struct timerirq_data *data)
+{
+ dev_dbg(data->dev->this_device,
+ "%s current->pid %d\n", __func__, current->pid);
+
+ /* Timer already running... success */
+ if (data->run)
+ return 0;
+
+ /* Don't allow a period of 0 since this would fire constantly */
+ if (!data->period)
+ return -EINVAL;
+
+ data->run = true;
+ data->data_ready = false;
+
+ init_completion(&data->timer_done);
+ setup_timer(&data->timer, timerirq_handler, (unsigned long)data);
+
+ return mod_timer(&data->timer,
+ jiffies + msecs_to_jiffies(data->period));
+}
+
+static int stop_timerirq(struct timerirq_data *data)
+{
+ dev_dbg(data->dev->this_device,
+ "%s current->pid %lx\n", __func__, (unsigned long)data);
+
+ if (data->run) {
+ data->run = false;
+ mod_timer(&data->timer, jiffies + 1);
+ wait_for_completion(&data->timer_done);
+ }
+ return 0;
+}
+
+/* The following depends on patch fa1f68db6ca7ebb6fc4487ac215bffba06c01c28
+ * drivers: misc: pass miscdevice pointer via file private data
+ */
+static int timerirq_open(struct inode *inode, struct file *file)
+{
+ /* Device node is availabe in the file->private_data, this is
+ * exactly what we want so we leave it there */
+ struct miscdevice *dev_data = file->private_data;
+ struct timerirq_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev_data;
+ file->private_data = data;
+ data->pid = current->pid;
+ init_waitqueue_head(&data->timerirq_wait);
+
+ dev_dbg(data->dev->this_device,
+ "%s current->pid %d\n", __func__, current->pid);
+ return 0;
+}
+
+static int timerirq_release(struct inode *inode, struct file *file)
+{
+ struct timerirq_data *data = file->private_data;
+ dev_dbg(data->dev->this_device, "timerirq_release\n");
+ if (data->run)
+ stop_timerirq(data);
+ kfree(data);
+ return 0;
+}
+
+/* read function called when from /dev/timerirq is read */
+static ssize_t timerirq_read(struct file *file,
+ char *buf, size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct timerirq_data *data = file->private_data;
+
+ if (!data->data_ready && data->timeout &&
+ !(file->f_flags & O_NONBLOCK)) {
+ wait_event_interruptible_timeout(data->timerirq_wait,
+ data->data_ready,
+ data->timeout);
+ }
+
+ if (data->data_ready && NULL != buf && count >= sizeof(data->data)) {
+ err = copy_to_user(buf, &data->data, sizeof(data->data));
+ data->data.data_type = 0;
+ } else {
+ return 0;
+ }
+ if (err != 0) {
+ dev_err(data->dev->this_device,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ data->data_ready = 0;
+ len = sizeof(data->data);
+ return len;
+}
+
+static unsigned int timerirq_poll(struct file *file,
+ struct poll_table_struct *poll)
+{
+ int mask = 0;
+ struct timerirq_data *data = file->private_data;
+
+ poll_wait(file, &data->timerirq_wait, poll);
+ if (data->data_ready)
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+/* ioctl - I/O control */
+static long timerirq_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ int tmp;
+ struct timerirq_data *data = file->private_data;
+
+ dev_dbg(data->dev->this_device,
+ "%s current->pid %d, %d, %ld\n",
+ __func__, current->pid, cmd, arg);
+
+ if (!data)
+ return -EFAULT;
+
+ switch (cmd) {
+ case TIMERIRQ_SET_TIMEOUT:
+ data->timeout = arg;
+ break;
+ case TIMERIRQ_GET_INTERRUPT_CNT:
+ tmp = data->data.interruptcount - 1;
+ if (data->data.interruptcount > 1)
+ data->data.interruptcount = 1;
+
+ if (copy_to_user((int *)arg, &tmp, sizeof(int)))
+ return -EFAULT;
+ break;
+ case TIMERIRQ_START:
+ data->period = arg;
+ retval = start_timerirq(data);
+ break;
+ case TIMERIRQ_STOP:
+ retval = stop_timerirq(data);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+/* define which file operations are supported */
+static const struct file_operations timerirq_fops = {
+ .owner = THIS_MODULE,
+ .read = timerirq_read,
+ .poll = timerirq_poll,
+
+#if HAVE_COMPAT_IOCTL
+ .compat_ioctl = timerirq_ioctl,
+#endif
+#if HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = timerirq_ioctl,
+#endif
+ .open = timerirq_open,
+ .release = timerirq_release,
+};
+
+static int __init timerirq_init(void)
+{
+
+ int res;
+ static struct miscdevice *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ timerirq_dev_data = data;
+ data->minor = MISC_DYNAMIC_MINOR;
+ data->name = "timerirq";
+ data->fops = &timerirq_fops;
+
+ res = misc_register(data);
+ if (res < 0) {
+ dev_err(data->this_device, "misc_register returned %d\n", res);
+ return res;
+ }
+
+ return res;
+}
+
+module_init(timerirq_init);
+
+static void __exit timerirq_exit(void)
+{
+ struct miscdevice *data = timerirq_dev_data;
+
+ dev_info(data->this_device, "Unregistering %s\n", data->name);
+
+ misc_deregister(data);
+ kfree(data);
+
+ timerirq_dev_data = NULL;
+}
+
+module_exit(timerirq_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Timer IRQ device driver.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("timerirq");
diff --git a/drivers/misc/inv_mpu/timerirq.h b/drivers/misc/inv_mpu/timerirq.h
new file mode 100644
index 000000000000..f69f07a45a3b
--- /dev/null
+++ b/drivers/misc/inv_mpu/timerirq.h
@@ -0,0 +1,30 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __TIMERIRQ__
+#define __TIMERIRQ__
+
+#include <linux/mpu.h>
+
+#define TIMERIRQ_SET_TIMEOUT _IOW(MPU_IOCTL, 0x60, unsigned long)
+#define TIMERIRQ_GET_INTERRUPT_CNT _IOW(MPU_IOCTL, 0x61, unsigned long)
+#define TIMERIRQ_START _IOW(MPU_IOCTL, 0x62, unsigned long)
+#define TIMERIRQ_STOP _IO(MPU_IOCTL, 0x63)
+
+#endif
diff --git a/drivers/misc/max1749.c b/drivers/misc/max1749.c
new file mode 100644
index 000000000000..e98964289002
--- /dev/null
+++ b/drivers/misc/max1749.c
@@ -0,0 +1,118 @@
+/*
+ * drivers/misc/max1749.c
+ *
+ * Driver for MAX1749, vibrator motor driver.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/regulator/consumer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+
+#include "../staging/android/timed_output.h"
+
+static struct regulator *regulator;
+static int timeout;
+
+static void vibrator_start(void)
+{
+ regulator_enable(regulator);
+}
+
+static void vibrator_stop(void)
+{
+ int ret;
+
+ ret = regulator_is_enabled(regulator);
+ if (ret > 0)
+ regulator_disable(regulator);
+}
+
+/*
+ * Timeout value can be changed from sysfs entry
+ * created by timed_output_dev.
+ * echo 100 > /sys/class/timed_output/vibrator/enable
+ */
+static void vibrator_enable(struct timed_output_dev *dev, int value)
+{
+ timeout = value;
+ if (!regulator)
+ return;
+
+ if (value) {
+ vibrator_start();
+ msleep(value);
+ vibrator_stop();
+ } else {
+ vibrator_stop();
+ }
+}
+
+/*
+ * Timeout value can be read from sysfs entry
+ * created by timed_output_dev.
+ * cat /sys/class/timed_output/vibrator/enable
+ */
+static int vibrator_get_time(struct timed_output_dev *dev)
+{
+ return timeout;
+}
+
+static struct timed_output_dev vibrator_dev = {
+ .name = "vibrator",
+ .get_time = vibrator_get_time,
+ .enable = vibrator_enable,
+};
+
+static int __init vibrator_init(void)
+{
+ int status;
+
+ regulator = regulator_get(NULL, "vdd_vbrtr");
+ if (IS_ERR_OR_NULL(regulator)) {
+ pr_err("vibrator_init:Couldn't get regulator vdd_vbrtr\n");
+ regulator = NULL;
+ return PTR_ERR(regulator);
+ }
+
+ status = timed_output_dev_register(&vibrator_dev);
+
+ if (status) {
+ regulator_put(regulator);
+ regulator = NULL;
+ }
+ return status;
+}
+
+static void __exit vibrator_exit(void)
+{
+ if (regulator) {
+ timed_output_dev_unregister(&vibrator_dev);
+ regulator_put(regulator);
+ regulator = NULL;
+ }
+}
+
+MODULE_DESCRIPTION("timed output vibrator device");
+MODULE_AUTHOR("GPL");
+
+module_init(vibrator_init);
+module_exit(vibrator_exit);
diff --git a/drivers/misc/mpu3050/Kconfig b/drivers/misc/mpu3050/Kconfig
new file mode 100644
index 000000000000..de240fa0ad83
--- /dev/null
+++ b/drivers/misc/mpu3050/Kconfig
@@ -0,0 +1,65 @@
+
+menu "Motion Sensors Support"
+
+choice
+ tristate "Motion Processing Unit"
+ depends on I2C
+ optional
+
+config MPU_SENSORS_MPU3050
+ tristate "MPU3050"
+ help
+ If you say yes here you get support for the MPU3050 Gyroscope driver
+ This driver can also be built as a module. If so, the module
+ will be called mpu3050.
+
+config MPU_SENSORS_MPU6000
+ tristate "MPU6000"
+ help
+ If you say yes here you get support for the MPU6000 Gyroscope driver
+ This driver can also be built as a module. If so, the module
+ will be called mpu6000.
+
+endchoice
+
+choice
+ prompt "Accelerometer Type"
+ depends on MPU_SENSORS_MPU3050
+ optional
+
+config MPU_SENSORS_KXTF9
+ bool "Kionix KXTF9"
+ help
+ This enables support for the Kionix KXFT9 accelerometer
+
+endchoice
+
+choice
+ prompt "Compass Type"
+ depends on MPU_SENSORS_MPU6000 || MPU_SENSORS_MPU3050
+ optional
+
+config MPU_SENSORS_AK8975
+ bool "AKM ak8975"
+ help
+ This enables support for the AKM ak8975 compass
+
+endchoice
+
+config MPU_SENSORS_TIMERIRQ
+ tristate "Timer IRQ"
+ help
+ If you say yes here you get access to the timerirq device handle which
+ can be used to select on. This can be used instead of IRQ's, sleeping,
+ or timer threads. Reading from this device returns the same type of
+ information as reading from the MPU and slave IRQ's.
+
+config MPU_SENSORS_DEBUG
+ bool "MPU debug"
+ depends on MPU_SENSORS_MPU3050 || MPU_SENSORS_MPU6000 || MPU_SENSORS_TIMERIRQ
+ help
+ If you say yes here you get extra debug messages from the MPU3050
+ and other slave sensors.
+
+endmenu
+
diff --git a/drivers/misc/mpu3050/Makefile b/drivers/misc/mpu3050/Makefile
new file mode 100644
index 000000000000..89ac46fdac5b
--- /dev/null
+++ b/drivers/misc/mpu3050/Makefile
@@ -0,0 +1,132 @@
+
+# Kernel makefile for motions sensors
+#
+#
+
+# MPU
+obj-$(CONFIG_MPU_SENSORS_MPU3050) += mpu3050.o
+mpu3050-objs += mpuirq.o \
+ slaveirq.o \
+ mpu-dev.o \
+ mpu-i2c.o \
+ mlsl-kernel.o \
+ mlos-kernel.o \
+ $(MLLITE_DIR)mldl_cfg.o
+
+#
+# Accel options
+#
+ifdef CONFIG_MPU_SENSORS_ADXL346
+mpu3050-objs += $(MLLITE_DIR)accel/adxl346.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_BMA150
+mpu3050-objs += $(MLLITE_DIR)accel/bma150.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_BMA222
+mpu3050-objs += $(MLLITE_DIR)accel/bma222.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_KXSD9
+mpu3050-objs += $(MLLITE_DIR)accel/kxsd9.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_KXTF9
+mpu3050-objs += $(MLLITE_DIR)accel/kxtf9.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_LIS331DLH
+mpu3050-objs += $(MLLITE_DIR)accel/lis331.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_LIS3DH
+mpu3050-objs += $(MLLITE_DIR)accel/lis3dh.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_LSM303DLHA
+mpu3050-objs += $(MLLITE_DIR)accel/lsm303a.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_MMA8450
+mpu3050-objs += $(MLLITE_DIR)accel/mma8450.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_MMA845X
+mpu3050-objs += $(MLLITE_DIR)accel/mma845x.o
+endif
+
+#
+# Compass options
+#
+ifdef CONFIG_MPU_SENSORS_AK8975
+mpu3050-objs += $(MLLITE_DIR)compass/ak8975.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_AMI30X
+mpu3050-objs += $(MLLITE_DIR)compass/ami30x.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_AMI306
+mpu3050-objs += $(MLLITE_DIR)compass/ami306.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_HMC5883
+mpu3050-objs += $(MLLITE_DIR)compass/hmc5883.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_LSM303DLHM
+mpu3050-objs += $(MLLITE_DIR)compass/lsm303m.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_MMC314X
+mpu3050-objs += $(MLLITE_DIR)compass/mmc314x.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_YAS529
+mpu3050-objs += $(MLLITE_DIR)compass/yas529-kernel.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_YAS530
+mpu3050-objs += $(MLLITE_DIR)compass/yas530.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_HSCDTD002B
+mpu3050-objs += $(MLLITE_DIR)compass/hscdtd002b.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_HSCDTD004A
+mpu3050-objs += $(MLLITE_DIR)compass/hscdtd004a.o
+endif
+#
+# Pressure options
+#
+ifdef CONFIG_MPU_SENSORS_BMA085
+mpu3050-objs += $(MLLITE_DIR)pressure/bma085.o
+endif
+
+EXTRA_CFLAGS += -I$(M)/$(MLLITE_DIR) \
+ -I$(M)/../../include \
+ -Idrivers/misc/mpu3050 \
+ -Iinclude/linux
+
+obj-$(CONFIG_MPU_SENSORS_MPU6000)+= mpu6000.o
+mpu6000-objs += mpuirq.o \
+ slaveirq.o \
+ mpu-dev.o \
+ mpu-i2c.o \
+ mlsl-kernel.o \
+ mlos-kernel.o \
+ $(MLLITE_DIR)mldl_cfg.o \
+ $(MLLITE_DIR)accel/mantis.o
+
+ifdef CONFIG_MPU_SENSORS_AK8975
+mpu6000-objs += $(MLLITE_DIR)compass/ak8975.o
+endif
+
+ifdef CONFIG_MPU_SENSORS_MPU6000
+EXTRA_CFLAGS += -DM_HW
+endif
+
+obj-$(CONFIG_MPU_SENSORS_TIMERIRQ)+= timerirq.o
+
diff --git a/drivers/misc/mpu3050/accel/kxtf9.c b/drivers/misc/mpu3050/accel/kxtf9.c
new file mode 100644
index 000000000000..938cd572a8fd
--- /dev/null
+++ b/drivers/misc/mpu3050/accel/kxtf9.c
@@ -0,0 +1,669 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @defgroup ACCELDL (Motion Library - Accelerometer Driver Layer)
+ * @brief Provides the interface to setup and handle an accelerometers
+ * connected to the secondary I2C interface of the gyroscope.
+ *
+ * @{
+ * @file kxtf9.c
+ * @brief Accelerometer setup and handling methods.
+*/
+
+/* ------------------ */
+/* - Include Files. - */
+/* ------------------ */
+
+#undef MPL_LOG_NDEBUG
+#define MPL_LOG_NDEBUG 1
+
+#ifdef __KERNEL__
+#include <linux/module.h>
+#endif
+
+#include "mpu.h"
+#include "mlsl.h"
+#include "mlos.h"
+
+#include <log.h>
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-acc"
+
+#define KXTF9_XOUT_HPF_L (0x00) /* 0000 0000 */
+#define KXTF9_XOUT_HPF_H (0x01) /* 0000 0001 */
+#define KXTF9_YOUT_HPF_L (0x02) /* 0000 0010 */
+#define KXTF9_YOUT_HPF_H (0x03) /* 0000 0011 */
+#define KXTF9_ZOUT_HPF_L (0x04) /* 0001 0100 */
+#define KXTF9_ZOUT_HPF_H (0x05) /* 0001 0101 */
+#define KXTF9_XOUT_L (0x06) /* 0000 0110 */
+#define KXTF9_XOUT_H (0x07) /* 0000 0111 */
+#define KXTF9_YOUT_L (0x08) /* 0000 1000 */
+#define KXTF9_YOUT_H (0x09) /* 0000 1001 */
+#define KXTF9_ZOUT_L (0x0A) /* 0001 1010 */
+#define KXTF9_ZOUT_H (0x0B) /* 0001 1011 */
+#define KXTF9_ST_RESP (0x0C) /* 0000 1100 */
+#define KXTF9_WHO_AM_I (0x0F) /* 0000 1111 */
+#define KXTF9_TILT_POS_CUR (0x10) /* 0001 0000 */
+#define KXTF9_TILT_POS_PRE (0x11) /* 0001 0001 */
+#define KXTF9_INT_SRC_REG1 (0x15) /* 0001 0101 */
+#define KXTF9_INT_SRC_REG2 (0x16) /* 0001 0110 */
+#define KXTF9_STATUS_REG (0x18) /* 0001 1000 */
+#define KXTF9_INT_REL (0x1A) /* 0001 1010 */
+#define KXTF9_CTRL_REG1 (0x1B) /* 0001 1011 */
+#define KXTF9_CTRL_REG2 (0x1C) /* 0001 1100 */
+#define KXTF9_CTRL_REG3 (0x1D) /* 0001 1101 */
+#define KXTF9_INT_CTRL_REG1 (0x1E) /* 0001 1110 */
+#define KXTF9_INT_CTRL_REG2 (0x1F) /* 0001 1111 */
+#define KXTF9_INT_CTRL_REG3 (0x20) /* 0010 0000 */
+#define KXTF9_DATA_CTRL_REG (0x21) /* 0010 0001 */
+#define KXTF9_TILT_TIMER (0x28) /* 0010 1000 */
+#define KXTF9_WUF_TIMER (0x29) /* 0010 1001 */
+#define KXTF9_TDT_TIMER (0x2B) /* 0010 1011 */
+#define KXTF9_TDT_H_THRESH (0x2C) /* 0010 1100 */
+#define KXTF9_TDT_L_THRESH (0x2D) /* 0010 1101 */
+#define KXTF9_TDT_TAP_TIMER (0x2E) /* 0010 1110 */
+#define KXTF9_TDT_TOTAL_TIMER (0x2F) /* 0010 1111 */
+#define KXTF9_TDT_LATENCY_TIMER (0x30) /* 0011 0000 */
+#define KXTF9_TDT_WINDOW_TIMER (0x31) /* 0011 0001 */
+#define KXTF9_WUF_THRESH (0x5A) /* 0101 1010 */
+#define KXTF9_TILT_ANGLE (0x5C) /* 0101 1100 */
+#define KXTF9_HYST_SET (0x5F) /* 0101 1111 */
+
+#define KXTF9_MAX_DUR (0xFF)
+#define KXTF9_MAX_THS (0xFF)
+#define KXTF9_THS_COUNTS_P_G (32)
+
+/* --------------------- */
+/* - Variables. - */
+/* --------------------- */
+
+struct kxtf9_config {
+ unsigned int odr; /* Output data rate mHz */
+ unsigned int fsr; /* full scale range mg */
+ unsigned int ths; /* Motion no-motion thseshold mg */
+ unsigned int dur; /* Motion no-motion duration ms */
+ unsigned int irq_type;
+ unsigned char reg_ths;
+ unsigned char reg_dur;
+ unsigned char reg_odr;
+ unsigned char reg_int_cfg1;
+ unsigned char reg_int_cfg2;
+ unsigned char ctrl_reg1;
+};
+
+struct kxtf9_private_data {
+ struct kxtf9_config suspend;
+ struct kxtf9_config resume;
+};
+
+/*****************************************
+ Accelerometer Initialization Functions
+*****************************************/
+
+static int kxtf9_set_ths(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config,
+ int apply,
+ long ths)
+{
+ int result = ML_SUCCESS;
+ if ((ths * KXTF9_THS_COUNTS_P_G / 1000) > KXTF9_MAX_THS)
+ ths = (KXTF9_MAX_THS * 1000) / KXTF9_THS_COUNTS_P_G;
+
+ if (ths < 0)
+ ths = 0;
+
+ config->ths = ths;
+ config->reg_ths = (unsigned char)
+ ((long)(ths * KXTF9_THS_COUNTS_P_G) / 1000);
+ MPL_LOGV("THS: %d, 0x%02x\n", config->ths, (int)config->reg_ths);
+ if (apply)
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_WUF_THRESH,
+ config->reg_ths);
+ return result;
+}
+
+static int kxtf9_set_dur(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config,
+ int apply,
+ long dur)
+{
+ int result = ML_SUCCESS;
+ long reg_dur = (dur * config->odr) / 1000000;
+ config->dur = dur;
+
+ if (reg_dur > KXTF9_MAX_DUR)
+ reg_dur = KXTF9_MAX_DUR;
+
+ config->reg_dur = (unsigned char) reg_dur;
+ MPL_LOGV("DUR: %d, 0x%02x\n", config->dur, (int)config->reg_dur);
+ if (apply)
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_WUF_TIMER,
+ (unsigned char)reg_dur);
+ return result;
+}
+
+/**
+ * Sets the IRQ to fire when one of the IRQ events occur. Threshold and
+ * duration will not be used uless the type is MOT or NMOT.
+ *
+ * @param config configuration to apply to, suspend or resume
+ * @param irq_type The type of IRQ. Valid values are
+ * - MPU_SLAVE_IRQ_TYPE_NONE
+ * - MPU_SLAVE_IRQ_TYPE_MOTION
+ * - MPU_SLAVE_IRQ_TYPE_DATA_READY
+ */
+static int kxtf9_set_irq(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config,
+ int apply,
+ long irq_type)
+{
+ int result = ML_SUCCESS;
+ struct kxtf9_private_data *private_data = pdata->private_data;
+
+ config->irq_type = (unsigned char)irq_type;
+ config->ctrl_reg1 &= ~0x22;
+ if (irq_type == MPU_SLAVE_IRQ_TYPE_DATA_READY) {
+ config->ctrl_reg1 |= 0x20;
+ config->reg_int_cfg1 = 0x38;
+ config->reg_int_cfg2 = 0x00;
+ } else if (irq_type == MPU_SLAVE_IRQ_TYPE_MOTION) {
+ config->ctrl_reg1 |= 0x02;
+ if ((unsigned long) config ==
+ (unsigned long) &private_data->suspend)
+ config->reg_int_cfg1 = 0x34;
+ else
+ config->reg_int_cfg1 = 0x24;
+ config->reg_int_cfg2 = 0xE0;
+ } else {
+ config->reg_int_cfg1 = 0x00;
+ config->reg_int_cfg2 = 0x00;
+ }
+
+ if (apply) {
+ /* Must clear bit 7 before writing new configuration */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_INT_CTRL_REG1,
+ config->reg_int_cfg1);
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_INT_CTRL_REG2,
+ config->reg_int_cfg2);
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ config->ctrl_reg1);
+ }
+ MPL_LOGV("CTRL_REG1: %lx, INT_CFG1: %lx, INT_CFG2: %lx\n",
+ (unsigned long)config->ctrl_reg1,
+ (unsigned long)config->reg_int_cfg1,
+ (unsigned long)config->reg_int_cfg2);
+
+ return result;
+}
+
+/**
+ * Set the Output data rate for the particular configuration
+ *
+ * @param config Config to modify with new ODR
+ * @param odr Output data rate in units of 1/1000Hz
+ */
+static int kxtf9_set_odr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config,
+ int apply,
+ long odr)
+{
+ unsigned char bits;
+ int result = ML_SUCCESS;
+
+ /* Data sheet says there is 12.5 hz, but that seems to produce a single
+ * correct data value, thus we remove it from the table */
+ if (odr > 400000) {
+ config->odr = 800000;
+ bits = 0x06;
+ } else if (odr > 200000) {
+ config->odr = 400000;
+ bits = 0x05;
+ } else if (odr > 100000) {
+ config->odr = 200000;
+ bits = 0x04;
+ } else if (odr > 50000) {
+ config->odr = 100000;
+ bits = 0x03;
+ } else if (odr > 25000) {
+ config->odr = 50000;
+ bits = 0x02;
+ } else if (odr != 0) {
+ config->odr = 25000;
+ bits = 0x01;
+ } else {
+ config->odr = 0;
+ bits = 0;
+ }
+
+ config->reg_odr = bits;
+ kxtf9_set_dur(mlsl_handle, pdata,
+ config, apply, config->dur);
+ MPL_LOGV("ODR: %d, 0x%02x\n", config->odr, (int)config->ctrl_reg1);
+ if (apply) {
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_DATA_CTRL_REG,
+ config->reg_odr);
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ 0x40);
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ config->ctrl_reg1);
+ }
+ return result;
+}
+
+/**
+ * Set the full scale range of the accels
+ *
+ * @param config pointer to configuration
+ * @param fsr requested full scale range
+ */
+static int kxtf9_set_fsr(void *mlsl_handle,
+ struct ext_slave_platform_data *pdata,
+ struct kxtf9_config *config,
+ int apply,
+ long fsr)
+{
+ int result = ML_SUCCESS;
+
+ config->ctrl_reg1 = (config->ctrl_reg1 & 0xE7);
+ if (fsr <= 2000) {
+ config->fsr = 2000;
+ config->ctrl_reg1 |= 0x00;
+ } else if (fsr <= 4000) {
+ config->fsr = 4000;
+ config->ctrl_reg1 |= 0x08;
+ } else {
+ config->fsr = 8000;
+ config->ctrl_reg1 |= 0x10;
+ }
+
+ MPL_LOGV("FSR: %d\n", config->fsr);
+ if (apply) {
+ /* Must clear bit 7 before writing new configuration */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, config->ctrl_reg1);
+ }
+ return result;
+}
+
+static int kxtf9_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result;
+ unsigned char data;
+ struct kxtf9_private_data *private_data = pdata->private_data;
+
+ /* Wake up */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ ERROR_CHECK(result);
+ /* INT_CTRL_REG1: */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_INT_CTRL_REG1,
+ private_data->suspend.reg_int_cfg1);
+ ERROR_CHECK(result);
+ /* WUF_THRESH: */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_WUF_THRESH,
+ private_data->suspend.reg_ths);
+ ERROR_CHECK(result);
+ /* DATA_CTRL_REG */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_DATA_CTRL_REG,
+ private_data->suspend.reg_odr);
+ ERROR_CHECK(result);
+ /* WUF_TIMER */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_WUF_TIMER, private_data->suspend.reg_dur);
+ ERROR_CHECK(result);
+
+ /* Normal operation */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ private_data->suspend.ctrl_reg1);
+ ERROR_CHECK(result);
+ result = MLSLSerialRead(mlsl_handle, pdata->address,
+ KXTF9_INT_REL, 1, &data);
+ ERROR_CHECK(result);
+
+ return result;
+}
+
+/* full scale setting - register and mask */
+#define ACCEL_KIONIX_CTRL_REG (0x1b)
+#define ACCEL_KIONIX_CTRL_MASK (0x18)
+
+static int kxtf9_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = ML_SUCCESS;
+ unsigned char data;
+ struct kxtf9_private_data *private_data = pdata->private_data;
+
+ /* Wake up */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1, 0x40);
+ ERROR_CHECK(result);
+ /* INT_CTRL_REG1: */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_INT_CTRL_REG1,
+ private_data->resume.reg_int_cfg1);
+ ERROR_CHECK(result);
+ /* WUF_THRESH: */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_WUF_THRESH, private_data->resume.reg_ths);
+ ERROR_CHECK(result);
+ /* DATA_CTRL_REG */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_DATA_CTRL_REG,
+ private_data->resume.reg_odr);
+ ERROR_CHECK(result);
+ /* WUF_TIMER */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_WUF_TIMER, private_data->resume.reg_dur);
+ ERROR_CHECK(result);
+
+ /* Normal operation */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ private_data->resume.ctrl_reg1);
+ ERROR_CHECK(result);
+ result = MLSLSerialRead(mlsl_handle, pdata->address,
+ KXTF9_INT_REL, 1, &data);
+ ERROR_CHECK(result);
+
+ return ML_SUCCESS;
+}
+
+static int kxtf9_init(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+
+ struct kxtf9_private_data *private_data;
+ int result = ML_SUCCESS;
+
+ private_data = (struct kxtf9_private_data *)
+ MLOSMalloc(sizeof(struct kxtf9_private_data));
+
+ if (!private_data)
+ return ML_ERROR_MEMORY_EXAUSTED;
+
+ /* RAM reset */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG1,
+ 0x40); /* Fastest Reset */
+ ERROR_CHECK(result);
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_DATA_CTRL_REG,
+ 0x36); /* Fastest Reset */
+ ERROR_CHECK(result);
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ KXTF9_CTRL_REG3, 0xcd); /* Reset */
+ ERROR_CHECK(result);
+ MLOSSleep(2);
+
+ pdata->private_data = private_data;
+
+ private_data->resume.ctrl_reg1 = 0xC0;
+ private_data->suspend.ctrl_reg1 = 0x40;
+
+ result = kxtf9_set_dur(mlsl_handle, pdata, &private_data->suspend,
+ FALSE, 1000);
+ ERROR_CHECK(result);
+ result = kxtf9_set_dur(mlsl_handle, pdata, &private_data->resume,
+ FALSE, 2540);
+ ERROR_CHECK(result);
+
+ result = kxtf9_set_odr(mlsl_handle, pdata, &private_data->suspend,
+ FALSE, 50000);
+ ERROR_CHECK(result);
+ result = kxtf9_set_odr(mlsl_handle, pdata, &private_data->resume,
+ FALSE, 200000);
+
+ result = kxtf9_set_fsr(mlsl_handle, pdata, &private_data->suspend,
+ FALSE, 2000);
+ ERROR_CHECK(result);
+ result = kxtf9_set_fsr(mlsl_handle, pdata, &private_data->resume,
+ FALSE, 2000);
+ ERROR_CHECK(result);
+
+ result = kxtf9_set_ths(mlsl_handle, pdata, &private_data->suspend,
+ FALSE, 80);
+ ERROR_CHECK(result);
+ result = kxtf9_set_ths(mlsl_handle, pdata, &private_data->resume,
+ FALSE, 40);
+ ERROR_CHECK(result);
+
+ result = kxtf9_set_irq(mlsl_handle, pdata, &private_data->suspend,
+ FALSE,
+ MPU_SLAVE_IRQ_TYPE_NONE);
+ ERROR_CHECK(result);
+ result = kxtf9_set_irq(mlsl_handle, pdata, &private_data->resume,
+ FALSE,
+ MPU_SLAVE_IRQ_TYPE_NONE);
+ ERROR_CHECK(result);
+ return result;
+}
+
+static int kxtf9_exit(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ if (pdata->private_data)
+ return MLOSFree(pdata->private_data);
+ else
+ return ML_SUCCESS;
+}
+
+static int kxtf9_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ int retval;
+ long odr;
+ struct kxtf9_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ return kxtf9_set_odr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ odr = *((long *)data->data);
+ if (odr != 0)
+ private_data->resume.ctrl_reg1 |= 0x80;
+
+ retval = kxtf9_set_odr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ odr);
+ return retval;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ return kxtf9_set_fsr(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ return kxtf9_set_fsr(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ return kxtf9_set_ths(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ return kxtf9_set_ths(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ return kxtf9_set_dur(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ return kxtf9_set_dur(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ return kxtf9_set_irq(mlsl_handle, pdata,
+ &private_data->suspend,
+ data->apply,
+ *((long *)data->data));
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ return kxtf9_set_irq(mlsl_handle, pdata,
+ &private_data->resume,
+ data->apply,
+ *((long *)data->data));
+ default:
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return ML_SUCCESS;
+}
+
+static int kxtf9_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ struct kxtf9_private_data *private_data = pdata->private_data;
+ if (!data->data)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.odr;
+ break;
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.odr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.fsr;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.ths;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.ths;
+ break;
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.dur;
+ break;
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.dur;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->suspend.irq_type;
+ break;
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ (*(unsigned long *)data->data) =
+ (unsigned long) private_data->resume.irq_type;
+ break;
+ default:
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return ML_SUCCESS;
+}
+
+static int kxtf9_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ unsigned char *data)
+{
+ int result;
+ unsigned char reg;
+ result = MLSLSerialRead(mlsl_handle, pdata->address,
+ KXTF9_INT_SRC_REG2, 1, &reg);
+ ERROR_CHECK(result);
+
+ if (!(reg & 0x10))
+ return ML_ERROR_ACCEL_DATA_NOT_READY;
+
+ result = MLSLSerialRead(mlsl_handle, pdata->address,
+ slave->reg, slave->len, data);
+ ERROR_CHECK(result);
+ return result;
+}
+
+static struct ext_slave_descr kxtf9_descr = {
+ /*.init = */ kxtf9_init,
+ /*.exit = */ kxtf9_exit,
+ /*.suspend = */ kxtf9_suspend,
+ /*.resume = */ kxtf9_resume,
+ /*.read = */ kxtf9_read,
+ /*.config = */ kxtf9_config,
+ /*.get_config = */ kxtf9_get_config,
+ /*.name = */ "kxtf9",
+ /*.type = */ EXT_SLAVE_TYPE_ACCELEROMETER,
+ /*.id = */ ACCEL_ID_KXTF9,
+ /*.reg = */ 0x06,
+ /*.len = */ 6,
+ /*.endian = */ EXT_SLAVE_LITTLE_ENDIAN,
+ /*.range = */ {2, 0},
+};
+
+struct ext_slave_descr *kxtf9_get_slave_descr(void)
+{
+ return &kxtf9_descr;
+}
+EXPORT_SYMBOL(kxtf9_get_slave_descr);
+
+/**
+ * @}
+**/
diff --git a/drivers/misc/mpu3050/compass/ak8975.c b/drivers/misc/mpu3050/compass/ak8975.c
new file mode 100644
index 000000000000..b8aed30ba39b
--- /dev/null
+++ b/drivers/misc/mpu3050/compass/ak8975.c
@@ -0,0 +1,258 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @defgroup COMPASSDL (Motion Library - Accelerometer Driver Layer)
+ * @brief Provides the interface to setup and handle an accelerometers
+ * connected to the secondary I2C interface of the gyroscope.
+ *
+ * @{
+ * @file AK8975.c
+ * @brief Magnetometer setup and handling methods for AKM 8975 compass.
+ */
+
+/* ------------------ */
+/* - Include Files. - */
+/* ------------------ */
+
+#include <string.h>
+
+#ifdef __KERNEL__
+#include <linux/module.h>
+#endif
+
+#include "mpu.h"
+#include "mlsl.h"
+#include "mlos.h"
+
+#include <log.h>
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "MPL-compass"
+
+
+#define AK8975_REG_ST1 (0x02)
+#define AK8975_REG_HXL (0x03)
+#define AK8975_REG_ST2 (0x09)
+
+#define AK8975_REG_CNTL (0x0A)
+
+#define AK8975_CNTL_MODE_POWER_DOWN (0x00)
+#define AK8975_CNTL_MODE_SINGLE_MEASUREMENT (0x01)
+
+int ak8975_suspend(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = ML_SUCCESS;
+ result =
+ MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ AK8975_REG_CNTL,
+ AK8975_CNTL_MODE_POWER_DOWN);
+ MLOSSleep(1); /* wait at least 100us */
+ ERROR_CHECK(result);
+ return result;
+}
+
+int ak8975_resume(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata)
+{
+ int result = ML_SUCCESS;
+ result =
+ MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ AK8975_REG_CNTL,
+ AK8975_CNTL_MODE_SINGLE_MEASUREMENT);
+ ERROR_CHECK(result);
+ return result;
+}
+
+int ak8975_read(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata, unsigned char *data)
+{
+ unsigned char regs[8];
+ unsigned char *stat = &regs[0];
+ unsigned char *stat2 = &regs[7];
+ int result = ML_SUCCESS;
+ int status = ML_SUCCESS;
+
+ result =
+ MLSLSerialRead(mlsl_handle, pdata->address, AK8975_REG_ST1,
+ 8, regs);
+ ERROR_CHECK(result);
+
+ /*
+ * ST : data ready -
+ * Measurement has been completed and data is ready to be read.
+ */
+ if (*stat & 0x01) {
+ memcpy(data, &regs[1], 6);
+ status = ML_SUCCESS;
+ }
+
+ /*
+ * ST2 : data error -
+ * occurs when data read is started outside of a readable period;
+ * data read would not be correct.
+ * Valid in continuous measurement mode only.
+ * In single measurement mode this error should not occour but we
+ * stil account for it and return an error, since the data would be
+ * corrupted.
+ * DERR bit is self-clearing when ST2 register is read.
+ */
+ if (*stat2 & 0x04)
+ status = ML_ERROR_COMPASS_DATA_ERROR;
+ /*
+ * ST2 : overflow -
+ * the sum of the absolute values of all axis |X|+|Y|+|Z| < 2400uT.
+ * This is likely to happen in presence of an external magnetic
+ * disturbance; it indicates, the sensor data is incorrect and should
+ * be ignored.
+ * An error is returned.
+ * HOFL bit clears when a new measurement starts.
+ */
+ if (*stat2 & 0x08)
+ status = ML_ERROR_COMPASS_DATA_OVERFLOW;
+ /*
+ * ST : overrun -
+ * the previous sample was not fetched and lost.
+ * Valid in continuous measurement mode only.
+ * In single measurement mode this error should not occour and we
+ * don't consider this condition an error.
+ * DOR bit is self-clearing when ST2 or any meas. data register is
+ * read.
+ */
+ if (*stat & 0x02) {
+ /* status = ML_ERROR_COMPASS_DATA_UNDERFLOW; */
+ status = ML_SUCCESS;
+ }
+
+ /*
+ * trigger next measurement if:
+ * - stat is non zero;
+ * - if stat is zero and stat2 is non zero.
+ * Won't trigger if data is not ready and there was no error.
+ */
+ if (*stat != 0x00 || *stat2 != 0x00) {
+ result =
+ MLSLSerialWriteSingle(mlsl_handle, pdata->address,
+ AK8975_REG_CNTL,
+ AK8975_CNTL_MODE_SINGLE_MEASUREMENT);
+ ERROR_CHECK(result);
+ }
+
+ return status;
+}
+
+static int ak8975_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ int result;
+ if (!data->data)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_WRITE_REGISTERS:
+ result = MLSLSerialWrite(mlsl_handle, pdata->address,
+ data->len,
+ (unsigned char *)data->data);
+ ERROR_CHECK(result);
+ break;
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return ML_SUCCESS;
+}
+
+static int ak8975_get_config(void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *data)
+{
+ int result;
+ if (!data->data)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ switch (data->key) {
+ case MPU_SLAVE_READ_REGISTERS:
+ {
+ unsigned char *serial_data = (unsigned char *)data->data;
+ result = MLSLSerialRead(mlsl_handle, pdata->address,
+ serial_data[0],
+ data->len - 1,
+ &serial_data[1]);
+ ERROR_CHECK(result);
+ break;
+ }
+ case MPU_SLAVE_CONFIG_ODR_SUSPEND:
+ case MPU_SLAVE_CONFIG_ODR_RESUME:
+ case MPU_SLAVE_CONFIG_FSR_SUSPEND:
+ case MPU_SLAVE_CONFIG_FSR_RESUME:
+ case MPU_SLAVE_CONFIG_MOT_THS:
+ case MPU_SLAVE_CONFIG_NMOT_THS:
+ case MPU_SLAVE_CONFIG_MOT_DUR:
+ case MPU_SLAVE_CONFIG_NMOT_DUR:
+ case MPU_SLAVE_CONFIG_IRQ_SUSPEND:
+ case MPU_SLAVE_CONFIG_IRQ_RESUME:
+ default:
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+ };
+
+ return ML_SUCCESS;
+}
+
+struct ext_slave_descr ak8975_descr = {
+ /*.init = */ NULL,
+ /*.exit = */ NULL,
+ /*.suspend = */ ak8975_suspend,
+ /*.resume = */ ak8975_resume,
+ /*.read = */ ak8975_read,
+ /*.config = */ ak8975_config,
+ /*.get_config = */ ak8975_get_config,
+ /*.name = */ "ak8975",
+ /*.type = */ EXT_SLAVE_TYPE_COMPASS,
+ /*.id = */ COMPASS_ID_AKM,
+ /*.reg = */ 0x01,
+ /*.len = */ 9,
+ /*.endian = */ EXT_SLAVE_LITTLE_ENDIAN,
+ /*.range = */ {9830, 4000}
+};
+
+struct ext_slave_descr *ak8975_get_slave_descr(void)
+{
+ return &ak8975_descr;
+}
+EXPORT_SYMBOL(ak8975_get_slave_descr);
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/mpu3050/log.h b/drivers/misc/mpu3050/log.h
new file mode 100644
index 000000000000..f2f9ea7ece8e
--- /dev/null
+++ b/drivers/misc/mpu3050/log.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2010 InvenSense Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * C/C++ logging functions. See the logging documentation for API details.
+ *
+ * We'd like these to be available from C code (in case we import some from
+ * somewhere), so this has a C interface.
+ *
+ * The output will be correct when the log file is shared between multiple
+ * threads and/or multiple processes so long as the operating system
+ * supports O_APPEND. These calls have mutex-protected data structures
+ * and so are NOT reentrant. Do not use MPL_LOG in a signal handler.
+ */
+#ifndef _LIBS_CUTILS_MPL_LOG_H
+#define _LIBS_CUTILS_MPL_LOG_H
+
+#include <stdarg.h>
+
+#ifdef ANDROID
+#include <utils/Log.h> /* For the LOG macro */
+#endif
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Normally we strip MPL_LOGV (VERBOSE messages) from release builds.
+ * You can modify this (for example with "#define MPL_LOG_NDEBUG 0"
+ * at the top of your source file) to change that behavior.
+ */
+#ifndef MPL_LOG_NDEBUG
+#ifdef NDEBUG
+#define MPL_LOG_NDEBUG 1
+#else
+#define MPL_LOG_NDEBUG 0
+#endif
+#endif
+
+#ifdef __KERNEL__
+#define MPL_LOG_UNKNOWN MPL_LOG_VERBOSE
+#define MPL_LOG_DEFAULT KERN_DEFAULT
+#define MPL_LOG_VERBOSE KERN_CONT
+#define MPL_LOG_DEBUG KERN_NOTICE
+#define MPL_LOG_INFO KERN_INFO
+#define MPL_LOG_WARN KERN_WARNING
+#define MPL_LOG_ERROR KERN_ERR
+#define MPL_LOG_SILENT MPL_LOG_VERBOSE
+
+#else
+ /* Based off the log priorities in android
+ /system/core/include/android/log.h */
+#define MPL_LOG_UNKNOWN (0)
+#define MPL_LOG_DEFAULT (1)
+#define MPL_LOG_VERBOSE (2)
+#define MPL_LOG_DEBUG (3)
+#define MPL_LOG_INFO (4)
+#define MPL_LOG_WARN (5)
+#define MPL_LOG_ERROR (6)
+#define MPL_LOG_SILENT (8)
+#endif
+
+
+/*
+ * This is the local tag used for the following simplified
+ * logging macros. You can change this preprocessor definition
+ * before using the other macros to change the tag.
+ */
+#ifndef MPL_LOG_TAG
+#ifdef __KERNEL__
+#define MPL_LOG_TAG
+#else
+#define MPL_LOG_TAG NULL
+#endif
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Simplified macro to send a verbose log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGV
+#if MPL_LOG_NDEBUG
+#define MPL_LOGV(fmt, ...) \
+ do { \
+ if (0) \
+ MPL_LOG(LOG_VERBOSE, MPL_LOG_TAG, fmt, ##__VA_ARGS__);\
+ } while (0)
+#else
+#define MPL_LOGV(fmt, ...) MPL_LOG(LOG_VERBOSE, MPL_LOG_TAG, fmt, ##__VA_ARGS__)
+#endif
+#endif
+
+#ifndef CONDITION
+#define CONDITION(cond) ((cond) != 0)
+#endif
+
+#ifndef MPL_LOGV_IF
+#if MPL_LOG_NDEBUG
+#define MPL_LOGV_IF(cond, fmt, ...) \
+ do { if (0) MPL_LOG(fmt, ##__VA_ARGS__); } while (0)
+#else
+#define MPL_LOGV_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_VERBOSE, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+#endif
+
+/*
+ * Simplified macro to send a debug log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGD
+#define MPL_LOGD(fmt, ...) MPL_LOG(LOG_DEBUG, MPL_LOG_TAG, fmt, ##__VA_ARGS__)
+#endif
+
+#ifndef MPL_LOGD_IF
+#define MPL_LOGD_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_DEBUG, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+
+/*
+ * Simplified macro to send an info log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGI
+#define MPL_LOGI(fmt, ...) MPL_LOG(LOG_INFO, MPL_LOG_TAG, fmt, ##__VA_ARGS__)
+#endif
+
+#ifndef MPL_LOGI_IF
+#define MPL_LOGI_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_INFO, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+
+/*
+ * Simplified macro to send a warning log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGW
+#ifdef __KERNEL__
+#define MPL_LOGW(fmt, ...) printk(KERN_WARNING MPL_LOG_TAG fmt, ##__VA_ARGS__)
+#else
+#define MPL_LOGW(fmt, ...) MPL_LOG(LOG_WARN, MPL_LOG_TAG, fmt, ##__VA_ARGS__)
+#endif
+#endif
+
+#ifndef MPL_LOGW_IF
+#define MPL_LOGW_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_WARN, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+
+/*
+ * Simplified macro to send an error log message using the current MPL_LOG_TAG.
+ */
+#ifndef MPL_LOGE
+#ifdef __KERNEL__
+#define MPL_LOGE(fmt, ...) printk(KERN_ERR MPL_LOG_TAG fmt, ##__VA_ARGS__)
+#else
+#define MPL_LOGE(fmt, ...) MPL_LOG(LOG_ERROR, MPL_LOG_TAG, fmt, ##__VA_ARGS__)
+#endif
+#endif
+
+#ifndef MPL_LOGE_IF
+#define MPL_LOGE_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? MPL_LOG(LOG_ERROR, MPL_LOG_TAG, fmt, ##__VA_ARGS__) \
+ : (void)0)
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Log a fatal error. If the given condition fails, this stops program
+ * execution like a normal assertion, but also generating the given message.
+ * It is NOT stripped from release builds. Note that the condition test
+ * is -inverted- from the normal assert() semantics.
+ */
+#define MPL_LOG_ALWAYS_FATAL_IF(cond, fmt, ...) \
+ ((CONDITION(cond)) \
+ ? ((void)android_printAssert(#cond, MPL_LOG_TAG, \
+ fmt, ##__VA_ARGS__)) \
+ : (void)0)
+
+#define MPL_LOG_ALWAYS_FATAL(fmt, ...) \
+ (((void)android_printAssert(NULL, MPL_LOG_TAG, fmt, ##__VA_ARGS__)))
+
+/*
+ * Versions of MPL_LOG_ALWAYS_FATAL_IF and MPL_LOG_ALWAYS_FATAL that
+ * are stripped out of release builds.
+ */
+#if MPL_LOG_NDEBUG
+#define MPL_LOG_FATAL_IF(cond, fmt, ...) \
+ do { \
+ if (0) \
+ MPL_LOG_ALWAYS_FATAL_IF(cond, fmt, ##__VA_ARGS__); \
+ } while (0)
+#define MPL_LOG_FATAL(fmt, ...) \
+ do { \
+ if (0) \
+ MPL_LOG_ALWAYS_FATAL(fmt, ##__VA_ARGS__) \
+ } while (0)
+#else
+#define MPL_LOG_FATAL_IF(cond, fmt, ...) \
+ MPL_LOG_ALWAYS_FATAL_IF(cond, fmt, ##__VA_ARGS__)
+#define MPL_LOG_FATAL(fmt, ...) \
+ MPL_LOG_ALWAYS_FATAL(fmt, ##__VA_ARGS__)
+#endif
+
+/*
+ * Assertion that generates a log message when the assertion fails.
+ * Stripped out of release builds. Uses the current MPL_LOG_TAG.
+ */
+#define MPL_LOG_ASSERT(cond, fmt, ...) \
+ MPL_LOG_FATAL_IF(!(cond), fmt, ##__VA_ARGS__)
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Basic log message macro.
+ *
+ * Example:
+ * MPL_LOG(MPL_LOG_WARN, NULL, "Failed with error %d", errno);
+ *
+ * The second argument may be NULL or "" to indicate the "global" tag.
+ */
+#ifndef MPL_LOG
+#define MPL_LOG(priority, tag, fmt, ...) \
+ MPL_LOG_PRI(priority, tag, fmt, ##__VA_ARGS__)
+#endif
+
+/*
+ * Log macro that allows you to specify a number for the priority.
+ */
+#ifndef MPL_LOG_PRI
+#ifdef ANDROID
+#define MPL_LOG_PRI(priority, tag, fmt, ...) \
+ LOG(priority, tag, fmt, ##__VA_ARGS__)
+#elif defined __KERNEL__
+#define MPL_LOG_PRI(priority, tag, fmt, ...) \
+ pr_debug(MPL_##priority tag fmt, ##__VA_ARGS__)
+#else
+#define MPL_LOG_PRI(priority, tag, fmt, ...) \
+ _MLPrintLog(MPL_##priority, tag, fmt, ##__VA_ARGS__)
+#endif
+#endif
+
+/*
+ * Log macro that allows you to pass in a varargs ("args" is a va_list).
+ */
+#ifndef MPL_LOG_PRI_VA
+#ifdef ANDROID
+#define MPL_LOG_PRI_VA(priority, tag, fmt, args) \
+ android_vprintLog(priority, NULL, tag, fmt, args)
+#elif defined __KERNEL__
+/* not allowed in the Kernel because there is no dev_dbg that takes a va_list */
+#else
+#define MPL_LOG_PRI_VA(priority, tag, fmt, args) \
+ _MLPrintVaLog(priority, NULL, tag, fmt, args)
+#endif
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * ===========================================================================
+ *
+ * The stuff in the rest of this file should not be used directly.
+ */
+
+#ifndef ANDROID
+ int _MLPrintLog(int priority, const char *tag, const char *fmt,
+ ...);
+ int _MLPrintVaLog(int priority, const char *tag, const char *fmt,
+ va_list args);
+/* Final implementation of actual writing to a character device */
+ int _MLWriteLog(const char *buf, int buflen);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _LIBS_CUTILS_MPL_LOG_H */
diff --git a/drivers/misc/mpu3050/mldl_cfg.c b/drivers/misc/mpu3050/mldl_cfg.c
new file mode 100644
index 000000000000..9cc4cf690386
--- /dev/null
+++ b/drivers/misc/mpu3050/mldl_cfg.c
@@ -0,0 +1,1739 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup MLDL
+ *
+ * @{
+ * @file mldl_cfg.c
+ * @brief The Motion Library Driver Layer.
+ */
+
+/* ------------------ */
+/* - Include Files. - */
+/* ------------------ */
+
+#include <stddef.h>
+
+#include "mldl_cfg.h"
+#include "mpu.h"
+
+#include "mlsl.h"
+#include "mlos.h"
+
+#include "log.h"
+#undef MPL_LOG_TAG
+#define MPL_LOG_TAG "mldl_cfg:"
+
+/* --------------------- */
+/* - Variables. - */
+/* --------------------- */
+#ifdef M_HW
+#define SLEEP 0
+#define WAKE_UP 7
+#define RESET 1
+#define STANDBY 1
+#else
+/* licteral significance of all parameters used in MLDLPowerMgmtMPU */
+#define SLEEP 1
+#define WAKE_UP 0
+#define RESET 1
+#define STANDBY 1
+#endif
+
+/*---------------------*/
+/*- Prototypes. -*/
+/*---------------------*/
+
+/*----------------------*/
+/*- Static Functions. -*/
+/*----------------------*/
+
+static int dmp_stop(struct mldl_cfg *mldl_cfg, void *gyro_handle)
+{
+ unsigned char userCtrlReg;
+ int result;
+
+ if (!mldl_cfg->dmp_is_running)
+ return ML_SUCCESS;
+
+ result = MLSLSerialRead(gyro_handle, mldl_cfg->addr,
+ MPUREG_USER_CTRL, 1, &userCtrlReg);
+ ERROR_CHECK(result);
+ userCtrlReg = (userCtrlReg & (~BIT_FIFO_EN)) | BIT_FIFO_RST;
+ userCtrlReg = (userCtrlReg & (~BIT_DMP_EN)) | BIT_DMP_RST;
+
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_USER_CTRL, userCtrlReg);
+ ERROR_CHECK(result);
+ mldl_cfg->dmp_is_running = 0;
+
+ return result;
+
+}
+/**
+ * @brief Starts the DMP running
+ *
+ * @return ML_SUCCESS or non-zero error code
+ */
+static int dmp_start(struct mldl_cfg *pdata, void *mlsl_handle)
+{
+ unsigned char userCtrlReg;
+ int result;
+
+ if (pdata->dmp_is_running == pdata->dmp_enable)
+ return ML_SUCCESS;
+
+ result = MLSLSerialRead(mlsl_handle, pdata->addr,
+ MPUREG_USER_CTRL, 1, &userCtrlReg);
+ ERROR_CHECK(result);
+
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_USER_CTRL,
+ ((userCtrlReg & (~BIT_FIFO_EN))
+ | BIT_FIFO_RST));
+ ERROR_CHECK(result);
+
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_USER_CTRL, userCtrlReg);
+ ERROR_CHECK(result);
+
+ result = MLSLSerialRead(mlsl_handle, pdata->addr,
+ MPUREG_USER_CTRL, 1, &userCtrlReg);
+ ERROR_CHECK(result);
+
+ if (pdata->dmp_enable)
+ userCtrlReg |= BIT_DMP_EN;
+ else
+ userCtrlReg &= ~BIT_DMP_EN;
+
+ if (pdata->fifo_enable)
+ userCtrlReg |= BIT_FIFO_EN;
+ else
+ userCtrlReg &= ~BIT_FIFO_EN;
+
+ userCtrlReg |= BIT_DMP_RST;
+
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_USER_CTRL, userCtrlReg);
+ ERROR_CHECK(result);
+ pdata->dmp_is_running = pdata->dmp_enable;
+
+ return result;
+}
+
+/**
+ * @brief enables/disables the I2C bypass to an external device
+ * connected to MPU's secondary I2C bus.
+ * @param enable
+ * Non-zero to enable pass through.
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+static int MLDLSetI2CBypass(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ unsigned char enable)
+{
+ unsigned char b;
+ int result;
+
+ if ((mldl_cfg->gyro_is_bypassed && enable) ||
+ (!mldl_cfg->gyro_is_bypassed && !enable))
+ return ML_SUCCESS;
+
+ /*---- get current 'USER_CTRL' into b ----*/
+ result = MLSLSerialRead(mlsl_handle, mldl_cfg->addr,
+ MPUREG_USER_CTRL, 1, &b);
+ ERROR_CHECK(result);
+
+ b &= ~BIT_AUX_IF_EN;
+
+ if (!enable) {
+ result = MLSLSerialWriteSingle(mlsl_handle, mldl_cfg->addr,
+ MPUREG_USER_CTRL,
+ (b | BIT_AUX_IF_EN));
+ ERROR_CHECK(result);
+ } else {
+ /* Coming out of I2C is tricky due to several erratta. Do not
+ * modify this algorithm
+ */
+ /*
+ * 1) wait for the right time and send the command to change
+ * the aux i2c slave address to an invalid address that will
+ * get nack'ed
+ *
+ * 0x00 is broadcast. 0x7F is unlikely to be used by any aux.
+ */
+ result = MLSLSerialWriteSingle(mlsl_handle, mldl_cfg->addr,
+ MPUREG_AUX_SLV_ADDR, 0x7F);
+ ERROR_CHECK(result);
+ /*
+ * 2) wait enough time for a nack to occur, then go into
+ * bypass mode:
+ */
+ MLOSSleep(2);
+ result = MLSLSerialWriteSingle(mlsl_handle, mldl_cfg->addr,
+ MPUREG_USER_CTRL, (b));
+ ERROR_CHECK(result);
+ /*
+ * 3) wait for up to one MPU cycle then restore the slave
+ * address
+ */
+ MLOSSleep(SAMPLING_PERIOD_US(mldl_cfg) / 1000);
+ result = MLSLSerialWriteSingle(mlsl_handle, mldl_cfg->addr,
+ MPUREG_AUX_SLV_ADDR,
+ mldl_cfg->pdata->
+ accel.address);
+ ERROR_CHECK(result);
+
+ /*
+ * 4) reset the ime interface
+ */
+#ifdef M_HW
+ result = MLSLSerialWriteSingle(mlsl_handle, mldl_cfg->addr,
+ MPUREG_USER_CTRL,
+ (b | BIT_I2C_MST_RST));
+
+#else
+ result = MLSLSerialWriteSingle(mlsl_handle, mldl_cfg->addr,
+ MPUREG_USER_CTRL,
+ (b | BIT_AUX_IF_RST));
+#endif
+ ERROR_CHECK(result);
+ MLOSSleep(2);
+ }
+ mldl_cfg->gyro_is_bypassed = enable;
+
+ return result;
+}
+
+struct tsProdRevMap {
+ unsigned char siliconRev;
+ unsigned short sensTrim;
+};
+
+#define NUM_OF_PROD_REVS (DIM(prodRevsMap))
+
+/* NOTE : 'npp' is a non production part */
+#ifdef M_HW
+#define OLDEST_PROD_REV_SUPPORTED 1
+static struct tsProdRevMap prodRevsMap[] = {
+ {0, 0},
+ {MPU_SILICON_REV_A1, 131}, /* 1 A1 (npp) */
+ {MPU_SILICON_REV_A1, 131}, /* 2 A1 (npp) */
+ {MPU_SILICON_REV_A1, 131}, /* 3 A1 (npp) */
+ {MPU_SILICON_REV_A1, 131}, /* 4 A1 (npp) */
+ {MPU_SILICON_REV_A1, 131}, /* 5 A1 (npp) */
+ {MPU_SILICON_REV_A1, 131}, /* 6 A1 (npp) */
+ {MPU_SILICON_REV_A1, 131}, /* 7 A1 (npp) */
+ {MPU_SILICON_REV_A1, 131}, /* 8 A1 (npp) */
+};
+
+#else /* !M_HW */
+#define OLDEST_PROD_REV_SUPPORTED 11
+
+static struct tsProdRevMap prodRevsMap[] = {
+ {0, 0},
+ {MPU_SILICON_REV_A4, 131}, /* 1 A? OBSOLETED */
+ {MPU_SILICON_REV_A4, 131}, /* 2 | */
+ {MPU_SILICON_REV_A4, 131}, /* 3 V */
+ {MPU_SILICON_REV_A4, 131}, /* 4 */
+ {MPU_SILICON_REV_A4, 131}, /* 5 */
+ {MPU_SILICON_REV_A4, 131}, /* 6 */
+ {MPU_SILICON_REV_A4, 131}, /* 7 */
+ {MPU_SILICON_REV_A4, 131}, /* 8 */
+ {MPU_SILICON_REV_A4, 131}, /* 9 */
+ {MPU_SILICON_REV_A4, 131}, /* 10 */
+ {MPU_SILICON_REV_B1, 131}, /* 11 B1 */
+ {MPU_SILICON_REV_B1, 131}, /* 12 | */
+ {MPU_SILICON_REV_B1, 131}, /* 13 V */
+ {MPU_SILICON_REV_B1, 131}, /* 14 B4 */
+ {MPU_SILICON_REV_B4, 131}, /* 15 | */
+ {MPU_SILICON_REV_B4, 131}, /* 16 V */
+ {MPU_SILICON_REV_B4, 131}, /* 17 */
+ {MPU_SILICON_REV_B4, 131}, /* 18 */
+ {MPU_SILICON_REV_B4, 115}, /* 19 */
+ {MPU_SILICON_REV_B4, 115}, /* 20 */
+ {MPU_SILICON_REV_B6, 131}, /* 21 B6 (B6/A9) */
+ {MPU_SILICON_REV_B4, 115}, /* 22 B4 (B7/A10) */
+ {MPU_SILICON_REV_B6, 0}, /* 23 B6 (npp) */
+ {MPU_SILICON_REV_B6, 0}, /* 24 | (npp) */
+ {MPU_SILICON_REV_B6, 0}, /* 25 V (npp) */
+ {MPU_SILICON_REV_B6, 131}, /* 26 (B6/A11) */
+};
+#endif /* !M_HW */
+
+/**
+ * @internal
+ * @brief Get the silicon revision ID from OTP.
+ * The silicon revision number is in read from OTP bank 0,
+ * ADDR6[7:2]. The corresponding ID is retrieved by lookup
+ * in a map.
+ * @return The silicon revision ID (0 on error).
+ */
+static int MLDLGetSiliconRev(struct mldl_cfg *pdata,
+ void *mlsl_handle)
+{
+ int result;
+ unsigned char index = 0x00;
+ unsigned char bank =
+ (BIT_PRFTCH_EN | BIT_CFG_USER_BANK | MPU_MEM_OTP_BANK_0);
+ unsigned short memAddr = ((bank << 8) | 0x06);
+
+ result = MLSLSerialReadMem(mlsl_handle, pdata->addr,
+ memAddr, 1, &index);
+ ERROR_CHECK(result);
+ if (result)
+ return result;
+ index >>= 2;
+
+ /* clean the prefetch and cfg user bank bits */
+ result =
+ MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_BANK_SEL, 0);
+ ERROR_CHECK(result);
+ if (result)
+ return result;
+
+ if (index < OLDEST_PROD_REV_SUPPORTED || NUM_OF_PROD_REVS <= index) {
+ pdata->silicon_revision = 0;
+ pdata->trim = 0;
+ MPL_LOGE("Unsupported Product Revision Detected : %d\n", index);
+ return ML_ERROR_INVALID_MODULE;
+ }
+
+ pdata->silicon_revision = prodRevsMap[index].siliconRev;
+ pdata->trim = prodRevsMap[index].sensTrim;
+
+ if (pdata->trim == 0) {
+ MPL_LOGE("sensitivity trim is 0"
+ " - unsupported non production part.\n");
+ return ML_ERROR_INVALID_MODULE;
+ }
+
+ return result;
+}
+
+/**
+ * @brief Enable / Disable the use MPU's secondary I2C interface level
+ * shifters.
+ * When enabled the secondary I2C interface to which the external
+ * device is connected runs at VDD voltage (main supply).
+ * When disabled the 2nd interface runs at VDDIO voltage.
+ * See the device specification for more details.
+ *
+ * @note using this API may produce unpredictable results, depending on how
+ * the MPU and slave device are setup on the target platform.
+ * Use of this API should entirely be restricted to system
+ * integrators. Once the correct value is found, there should be no
+ * need to change the level shifter at runtime.
+ *
+ * @pre Must be called after MLSerialOpen().
+ * @note Typically called before MLDmpOpen().
+ *
+ * @param[in] enable:
+ * 0 to run at VDDIO (default),
+ * 1 to run at VDD.
+ *
+ * @return ML_SUCCESS if successfull, a non-zero error code otherwise.
+ */
+static int MLDLSetLevelShifterBit(struct mldl_cfg *pdata,
+ void *mlsl_handle,
+ unsigned char enable)
+{
+#ifndef M_HW
+ int result;
+ unsigned char reg;
+ unsigned char mask;
+ unsigned char regval;
+
+ if (0 == pdata->silicon_revision)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ /*-- on parts before B6 the VDDIO bit is bit 7 of ACCEL_BURST_ADDR --
+ NOTE: this is incompatible with ST accelerometers where the VDDIO
+ bit MUST be set to enable ST's internal logic to autoincrement
+ the register address on burst reads --*/
+ if ((pdata->silicon_revision & 0xf) < MPU_SILICON_REV_B6) {
+ reg = MPUREG_ACCEL_BURST_ADDR;
+ mask = 0x80;
+ } else {
+ /*-- on B6 parts the VDDIO bit was moved to FIFO_EN2 =>
+ the mask is always 0x04 --*/
+ reg = MPUREG_FIFO_EN2;
+ mask = 0x04;
+ }
+
+ result = MLSLSerialRead(mlsl_handle, pdata->addr, reg, 1, &regval);
+ if (result)
+ return result;
+
+ if (enable)
+ regval |= mask;
+ else
+ regval &= ~mask;
+
+ result =
+ MLSLSerialWriteSingle(mlsl_handle, pdata->addr, reg, regval);
+
+ return result;
+#else
+ return ML_SUCCESS;
+#endif
+}
+
+
+#ifdef M_HW
+/**
+ * @internal
+ * @param reset 1 to reset hardware
+ */
+static tMLError mpu60xx_pwr_mgmt(struct mldl_cfg *pdata,
+ void *mlsl_handle,
+ unsigned char reset,
+ unsigned char powerselection)
+{
+ unsigned char b;
+ tMLError result;
+
+ if (powerselection < 0 || powerselection > 7)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ result =
+ MLSLSerialRead(mlsl_handle, pdata->addr, MPUREG_PWR_MGMT_1, 1,
+ &b);
+ ERROR_CHECK(result);
+
+ b &= ~(BITS_PWRSEL);
+
+ if (reset) {
+ /* Current sillicon has an errata where the reset will get
+ * nacked. Ignore the error code for now. */
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, b | BIT_H_RESET);
+#define M_HW_RESET_ERRATTA
+#ifndef M_HW_RESET_ERRATTA
+ ERROR_CHECK(result);
+#else
+ MLOSSleep(50);
+#endif
+ }
+
+ b |= (powerselection << 4);
+
+ if (b & BITS_PWRSEL)
+ pdata->gyro_is_suspended = FALSE;
+ else
+ pdata->gyro_is_suspended = TRUE;
+
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, b);
+ ERROR_CHECK(result);
+
+ return ML_SUCCESS;
+}
+
+/**
+ * @internal
+ */
+static tMLError MLDLStandByGyros(struct mldl_cfg *pdata,
+ void *mlsl_handle,
+ unsigned char disable_gx,
+ unsigned char disable_gy,
+ unsigned char disable_gz)
+{
+ unsigned char b;
+ tMLError result;
+
+ result =
+ MLSLSerialRead(mlsl_handle, pdata->addr, MPUREG_PWR_MGMT_2, 1,
+ &b);
+ ERROR_CHECK(result);
+
+ b &= ~(BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG);
+ b |= (disable_gx << 2 | disable_gy << 1 | disable_gz);
+
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGMT_2, b);
+ ERROR_CHECK(result);
+
+ return ML_SUCCESS;
+}
+
+/**
+ * @internal
+ */
+static tMLError MLDLStandByAccels(struct mldl_cfg *pdata,
+ void *mlsl_handle,
+ unsigned char disable_ax,
+ unsigned char disable_ay,
+ unsigned char disable_az)
+{
+ unsigned char b;
+ tMLError result;
+
+ result =
+ MLSLSerialRead(mlsl_handle, pdata->addr, MPUREG_PWR_MGMT_2, 1,
+ &b);
+ ERROR_CHECK(result);
+
+ b &= ~(BIT_STBY_XA | BIT_STBY_YA | BIT_STBY_ZA);
+ b |= (disable_ax << 2 | disable_ay << 1 | disable_az);
+
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGMT_2, b);
+ ERROR_CHECK(result);
+
+ return ML_SUCCESS;
+}
+
+#else /* ! M_HW */
+
+/**
+ * @internal
+ * @brief This function controls the power management on the MPU device.
+ * The entire chip can be put to low power sleep mode, or individual
+ * gyros can be turned on/off.
+ *
+ * Putting the device into sleep mode depending upon the changing needs
+ * of the associated applications is a recommended method for reducing
+ * power consuption. It is a safe opearation in that sleep/wake up of
+ * gyros while running will not result in any interruption of data.
+ *
+ * Although it is entirely allowed to put the device into full sleep
+ * while running the DMP, it is not recomended because it will disrupt
+ * the ongoing calculations carried on inside the DMP and consequently
+ * the sensor fusion algorithm. Furthermore, while in sleep mode
+ * read & write operation from the app processor on both registers and
+ * memory are disabled and can only regained by restoring the MPU in
+ * normal power mode.
+ * Disabling any of the gyro axis will reduce the associated power
+ * consuption from the PLL but will not stop the DMP from running
+ * state.
+ *
+ * @param reset
+ * Non-zero to reset the device. Note that this setting
+ * is volatile and the corresponding register bit will
+ * clear itself right after being applied.
+ * @param sleep
+ * Non-zero to put device into full sleep.
+ * @param disable_gx
+ * Non-zero to disable gyro X.
+ * @param disable_gy
+ * Non-zero to disable gyro Y.
+ * @param disable_gz
+ * Non-zero to disable gyro Z.
+ *
+ * @return ML_SUCCESS if successfull; a non-zero error code otherwise.
+ */
+static int MLDLPowerMgmtMPU(struct mldl_cfg *pdata,
+ void *mlsl_handle,
+ unsigned char reset,
+ unsigned char sleep,
+ unsigned char disable_gx,
+ unsigned char disable_gy,
+ unsigned char disable_gz)
+{
+ unsigned char b;
+ int result;
+
+ result =
+ MLSLSerialRead(mlsl_handle, pdata->addr, MPUREG_PWR_MGM, 1,
+ &b);
+ ERROR_CHECK(result);
+
+ /* If we are awake, we need to put it in bypass before resetting */
+ if ((!(b & BIT_SLEEP)) && reset)
+ result = MLDLSetI2CBypass(pdata, mlsl_handle, 1);
+
+ /* If we are awake, we need stop the dmp sleeping */
+ if ((!(b & BIT_SLEEP)) && sleep)
+ dmp_stop(pdata, mlsl_handle);
+
+ /* Reset if requested */
+ if (reset) {
+ MPL_LOGV("Reset MPU3050\n");
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, b | BIT_H_RESET);
+ ERROR_CHECK(result);
+ MLOSSleep(5);
+ pdata->gyro_needs_reset = FALSE;
+ /* Some chips are awake after reset and some are asleep,
+ * check the status */
+ result = MLSLSerialRead(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, 1, &b);
+ ERROR_CHECK(result);
+ }
+
+ /* Update the suspended state just in case we return early */
+ if (b & BIT_SLEEP)
+ pdata->gyro_is_suspended = TRUE;
+ else
+ pdata->gyro_is_suspended = FALSE;
+
+ /* if power status match requested, nothing else's left to do */
+ if ((b & (BIT_SLEEP | BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG)) ==
+ (((sleep != 0) * BIT_SLEEP) |
+ ((disable_gx != 0) * BIT_STBY_XG) |
+ ((disable_gy != 0) * BIT_STBY_YG) |
+ ((disable_gz != 0) * BIT_STBY_ZG))) {
+ return ML_SUCCESS;
+ }
+
+ /*
+ * This specific transition between states needs to be reinterpreted:
+ * (1,1,1,1) -> (0,1,1,1) has to become
+ * (1,1,1,1) -> (1,0,0,0) -> (0,1,1,1)
+ * where
+ * (1,1,1,1) is (sleep=1,disable_gx=1,disable_gy=1,disable_gz=1)
+ */
+ if ((b & (BIT_SLEEP | BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG)) ==
+ (BIT_SLEEP | BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG)
+ && ((!sleep) && disable_gx && disable_gy && disable_gz)) {
+ result = MLDLPowerMgmtMPU(pdata, mlsl_handle, 0, 1, 0, 0, 0);
+ if (result)
+ return result;
+ b |= BIT_SLEEP;
+ b &= ~(BIT_STBY_XG | BIT_STBY_YG | BIT_STBY_ZG);
+ }
+
+ if ((b & BIT_SLEEP) != ((sleep != 0) * BIT_SLEEP)) {
+ if (sleep) {
+ result = MLDLSetI2CBypass(pdata, mlsl_handle, 1);
+ ERROR_CHECK(result);
+ b |= BIT_SLEEP;
+ result =
+ MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, b);
+ ERROR_CHECK(result);
+ pdata->gyro_is_suspended = TRUE;
+ } else {
+ b &= ~BIT_SLEEP;
+ result =
+ MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, b);
+ ERROR_CHECK(result);
+ pdata->gyro_is_suspended = FALSE;
+ MLOSSleep(5);
+ }
+ }
+ /*---
+ WORKAROUND FOR PUTTING GYRO AXIS in STAND-BY MODE
+ 1) put one axis at a time in stand-by
+ ---*/
+ if ((b & BIT_STBY_XG) != ((disable_gx != 0) * BIT_STBY_XG)) {
+ b ^= BIT_STBY_XG;
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, b);
+ ERROR_CHECK(result);
+ }
+ if ((b & BIT_STBY_YG) != ((disable_gy != 0) * BIT_STBY_YG)) {
+ b ^= BIT_STBY_YG;
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, b);
+ ERROR_CHECK(result);
+ }
+ if ((b & BIT_STBY_ZG) != ((disable_gz != 0) * BIT_STBY_ZG)) {
+ b ^= BIT_STBY_ZG;
+ result = MLSLSerialWriteSingle(mlsl_handle, pdata->addr,
+ MPUREG_PWR_MGM, b);
+ ERROR_CHECK(result);
+ }
+
+ return ML_SUCCESS;
+}
+#endif /* M_HW */
+
+
+void mpu_print_cfg(struct mldl_cfg *mldl_cfg)
+{
+ struct mpu3050_platform_data *pdata = mldl_cfg->pdata;
+ struct ext_slave_platform_data *accel = &mldl_cfg->pdata->accel;
+ struct ext_slave_platform_data *compass =
+ &mldl_cfg->pdata->compass;
+ struct ext_slave_platform_data *pressure =
+ &mldl_cfg->pdata->pressure;
+
+ MPL_LOGD("mldl_cfg.addr = %02x\n", mldl_cfg->addr);
+ MPL_LOGD("mldl_cfg.int_config = %02x\n",
+ mldl_cfg->int_config);
+ MPL_LOGD("mldl_cfg.ext_sync = %02x\n", mldl_cfg->ext_sync);
+ MPL_LOGD("mldl_cfg.full_scale = %02x\n",
+ mldl_cfg->full_scale);
+ MPL_LOGD("mldl_cfg.lpf = %02x\n", mldl_cfg->lpf);
+ MPL_LOGD("mldl_cfg.clk_src = %02x\n", mldl_cfg->clk_src);
+ MPL_LOGD("mldl_cfg.divider = %02x\n", mldl_cfg->divider);
+ MPL_LOGD("mldl_cfg.dmp_enable = %02x\n",
+ mldl_cfg->dmp_enable);
+ MPL_LOGD("mldl_cfg.fifo_enable = %02x\n",
+ mldl_cfg->fifo_enable);
+ MPL_LOGD("mldl_cfg.dmp_cfg1 = %02x\n", mldl_cfg->dmp_cfg1);
+ MPL_LOGD("mldl_cfg.dmp_cfg2 = %02x\n", mldl_cfg->dmp_cfg2);
+ MPL_LOGD("mldl_cfg.offset_tc[0] = %02x\n",
+ mldl_cfg->offset_tc[0]);
+ MPL_LOGD("mldl_cfg.offset_tc[1] = %02x\n",
+ mldl_cfg->offset_tc[1]);
+ MPL_LOGD("mldl_cfg.offset_tc[2] = %02x\n",
+ mldl_cfg->offset_tc[2]);
+ MPL_LOGD("mldl_cfg.silicon_revision = %02x\n",
+ mldl_cfg->silicon_revision);
+ MPL_LOGD("mldl_cfg.product_id = %02x\n",
+ mldl_cfg->product_id);
+ MPL_LOGD("mldl_cfg.trim = %02x\n", mldl_cfg->trim);
+ MPL_LOGD("mldl_cfg.requested_sensors= %04lx\n",
+ mldl_cfg->requested_sensors);
+
+ if (mldl_cfg->accel) {
+ MPL_LOGD("slave_accel->suspend = %02x\n",
+ (int) mldl_cfg->accel->suspend);
+ MPL_LOGD("slave_accel->resume = %02x\n",
+ (int) mldl_cfg->accel->resume);
+ MPL_LOGD("slave_accel->read = %02x\n",
+ (int) mldl_cfg->accel->read);
+ MPL_LOGD("slave_accel->type = %02x\n",
+ mldl_cfg->accel->type);
+ MPL_LOGD("slave_accel->reg = %02x\n",
+ mldl_cfg->accel->reg);
+ MPL_LOGD("slave_accel->len = %02x\n",
+ mldl_cfg->accel->len);
+ MPL_LOGD("slave_accel->endian = %02x\n",
+ mldl_cfg->accel->endian);
+ MPL_LOGD("slave_accel->range.mantissa= %02lx\n",
+ mldl_cfg->accel->range.mantissa);
+ MPL_LOGD("slave_accel->range.fraction= %02lx\n",
+ mldl_cfg->accel->range.fraction);
+ } else {
+ MPL_LOGD("slave_accel = NULL\n");
+ }
+
+ if (mldl_cfg->compass) {
+ MPL_LOGD("slave_compass->suspend = %02x\n",
+ (int) mldl_cfg->compass->suspend);
+ MPL_LOGD("slave_compass->resume = %02x\n",
+ (int) mldl_cfg->compass->resume);
+ MPL_LOGD("slave_compass->read = %02x\n",
+ (int) mldl_cfg->compass->read);
+ MPL_LOGD("slave_compass->type = %02x\n",
+ mldl_cfg->compass->type);
+ MPL_LOGD("slave_compass->reg = %02x\n",
+ mldl_cfg->compass->reg);
+ MPL_LOGD("slave_compass->len = %02x\n",
+ mldl_cfg->compass->len);
+ MPL_LOGD("slave_compass->endian = %02x\n",
+ mldl_cfg->compass->endian);
+ MPL_LOGD("slave_compass->range.mantissa= %02lx\n",
+ mldl_cfg->compass->range.mantissa);
+ MPL_LOGD("slave_compass->range.fraction= %02lx\n",
+ mldl_cfg->compass->range.fraction);
+
+ } else {
+ MPL_LOGD("slave_compass = NULL\n");
+ }
+
+ if (mldl_cfg->pressure) {
+ MPL_LOGD("slave_pressure->suspend = %02x\n",
+ (int) mldl_cfg->pressure->suspend);
+ MPL_LOGD("slave_pressure->resume = %02x\n",
+ (int) mldl_cfg->pressure->resume);
+ MPL_LOGD("slave_pressure->read = %02x\n",
+ (int) mldl_cfg->pressure->read);
+ MPL_LOGD("slave_pressure->type = %02x\n",
+ mldl_cfg->pressure->type);
+ MPL_LOGD("slave_pressure->reg = %02x\n",
+ mldl_cfg->pressure->reg);
+ MPL_LOGD("slave_pressure->len = %02x\n",
+ mldl_cfg->pressure->len);
+ MPL_LOGD("slave_pressure->endian = %02x\n",
+ mldl_cfg->pressure->endian);
+ MPL_LOGD("slave_pressure->range.mantissa= %02lx\n",
+ mldl_cfg->pressure->range.mantissa);
+ MPL_LOGD("slave_pressure->range.fraction= %02lx\n",
+ mldl_cfg->pressure->range.fraction);
+
+ } else {
+ MPL_LOGD("slave_pressure = NULL\n");
+ }
+ MPL_LOGD("accel->get_slave_descr = %x\n",
+ (unsigned int) accel->get_slave_descr);
+ MPL_LOGD("accel->irq = %02x\n", accel->irq);
+ MPL_LOGD("accel->adapt_num = %02x\n", accel->adapt_num);
+ MPL_LOGD("accel->bus = %02x\n", accel->bus);
+ MPL_LOGD("accel->address = %02x\n", accel->address);
+ MPL_LOGD("accel->orientation =\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n",
+ accel->orientation[0], accel->orientation[1],
+ accel->orientation[2], accel->orientation[3],
+ accel->orientation[4], accel->orientation[5],
+ accel->orientation[6], accel->orientation[7],
+ accel->orientation[8]);
+ MPL_LOGD("compass->get_slave_descr = %x\n",
+ (unsigned int) compass->get_slave_descr);
+ MPL_LOGD("compass->irq = %02x\n", compass->irq);
+ MPL_LOGD("compass->adapt_num = %02x\n", compass->adapt_num);
+ MPL_LOGD("compass->bus = %02x\n", compass->bus);
+ MPL_LOGD("compass->address = %02x\n", compass->address);
+ MPL_LOGD("compass->orientation =\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n",
+ compass->orientation[0], compass->orientation[1],
+ compass->orientation[2], compass->orientation[3],
+ compass->orientation[4], compass->orientation[5],
+ compass->orientation[6], compass->orientation[7],
+ compass->orientation[8]);
+ MPL_LOGD("pressure->get_slave_descr = %x\n",
+ (unsigned int) pressure->get_slave_descr);
+ MPL_LOGD("pressure->irq = %02x\n", pressure->irq);
+ MPL_LOGD("pressure->adapt_num = %02x\n", pressure->adapt_num);
+ MPL_LOGD("pressure->bus = %02x\n", pressure->bus);
+ MPL_LOGD("pressure->address = %02x\n", pressure->address);
+ MPL_LOGD("pressure->orientation =\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n",
+ pressure->orientation[0], pressure->orientation[1],
+ pressure->orientation[2], pressure->orientation[3],
+ pressure->orientation[4], pressure->orientation[5],
+ pressure->orientation[6], pressure->orientation[7],
+ pressure->orientation[8]);
+
+ MPL_LOGD("pdata->int_config = %02x\n", pdata->int_config);
+ MPL_LOGD("pdata->level_shifter = %02x\n",
+ pdata->level_shifter);
+ MPL_LOGD("pdata->orientation =\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n"
+ " %2d %2d %2d\n",
+ pdata->orientation[0], pdata->orientation[1],
+ pdata->orientation[2], pdata->orientation[3],
+ pdata->orientation[4], pdata->orientation[5],
+ pdata->orientation[6], pdata->orientation[7],
+ pdata->orientation[8]);
+
+ MPL_LOGD("Struct sizes: mldl_cfg: %d, "
+ "ext_slave_descr:%d, "
+ "mpu3050_platform_data:%d: RamOffset: %d\n",
+ sizeof(struct mldl_cfg), sizeof(struct ext_slave_descr),
+ sizeof(struct mpu3050_platform_data),
+ offsetof(struct mldl_cfg, ram));
+}
+
+int mpu_set_slave(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *slave_pdata)
+{
+ int result;
+ unsigned char reg;
+ unsigned char slave_reg;
+ unsigned char slave_len;
+ unsigned char slave_endian;
+ unsigned char slave_address;
+
+ result = MLDLSetI2CBypass(mldl_cfg, gyro_handle, TRUE);
+
+ if (NULL == slave || NULL == slave_pdata) {
+ slave_reg = 0;
+ slave_len = 0;
+ slave_endian = 0;
+ slave_address = 0;
+ } else {
+ slave_reg = slave->reg;
+ slave_len = slave->len;
+ slave_endian = slave->endian;
+ slave_address = slave_pdata->address;
+ }
+
+ /* Address */
+ result = MLSLSerialWriteSingle(gyro_handle,
+ mldl_cfg->addr,
+ MPUREG_AUX_SLV_ADDR,
+ slave_address);
+ ERROR_CHECK(result);
+ /* Register */
+ result = MLSLSerialRead(gyro_handle, mldl_cfg->addr,
+ MPUREG_ACCEL_BURST_ADDR, 1,
+ &reg);
+ ERROR_CHECK(result);
+ reg = ((reg & 0x80) | slave_reg);
+ result = MLSLSerialWriteSingle(gyro_handle,
+ mldl_cfg->addr,
+ MPUREG_ACCEL_BURST_ADDR,
+ reg);
+ ERROR_CHECK(result);
+
+#ifdef M_HW
+ /* Length, byte swapping, grouping & enable */
+ if (slave_len > BITS_SLV_LENG) {
+ MPL_LOGW("Limiting slave burst read length to "
+ "the allowed maximum (15B, req. %d)\n",
+ slave_len);
+ slave_len = BITS_SLV_LENG;
+ }
+ reg = slave_len;
+ if (slave_endian == EXT_SLAVE_LITTLE_ENDIAN)
+ reg |= BIT_SLV_BYTE_SW;
+ reg |= BIT_SLV_GRP;
+ reg |= BIT_SLV_ENABLE;
+
+ result = MLSLSerialWriteSingle(gyro_handle,
+ mldl_cfg->addr,
+ MPUREG_I2C_SLV0_CTRL,
+ reg);
+#else
+ /* Length */
+ result = MLSLSerialRead(gyro_handle, mldl_cfg->addr,
+ MPUREG_USER_CTRL, 1, &reg);
+ ERROR_CHECK(result);
+ reg = (reg & ~BIT_AUX_RD_LENG);
+ result = MLSLSerialWriteSingle(gyro_handle,
+ mldl_cfg->addr,
+ MPUREG_USER_CTRL, reg);
+ ERROR_CHECK(result);
+#endif
+
+ if (slave_address) {
+ result = MLDLSetI2CBypass(mldl_cfg, gyro_handle, FALSE);
+ ERROR_CHECK(result);
+ }
+ return result;
+}
+
+/**
+ * Check to see if the gyro was reset by testing a couple of registers known
+ * to change on reset.
+ *
+ * @param mldl_cfg mldl configuration structure
+ * @param gyro_handle handle used to communicate with the gyro
+ *
+ * @return ML_SUCCESS or non-zero error code
+ */
+static int mpu_was_reset(struct mldl_cfg *mldl_cfg, void *gyro_handle)
+{
+ int result = ML_SUCCESS;
+ unsigned char reg;
+
+ result = MLSLSerialRead(gyro_handle, mldl_cfg->addr,
+ MPUREG_DMP_CFG_2, 1, &reg);
+ ERROR_CHECK(result);
+
+ if (mldl_cfg->dmp_cfg2 != reg)
+ return TRUE;
+
+ if (0 != mldl_cfg->dmp_cfg1)
+ return FALSE;
+
+ result = MLSLSerialRead(gyro_handle, mldl_cfg->addr,
+ MPUREG_SMPLRT_DIV, 1, &reg);
+ ERROR_CHECK(result);
+ if (reg != mldl_cfg->divider)
+ return TRUE;
+
+ if (0 != mldl_cfg->divider)
+ return FALSE;
+
+ /* Inconclusive assume it was reset */
+ return TRUE;
+}
+
+static int gyro_resume(struct mldl_cfg *mldl_cfg, void *gyro_handle)
+{
+ int result;
+ int ii;
+ int jj;
+ unsigned char reg;
+ unsigned char regs[7];
+
+ /* Wake up the part */
+#ifdef M_HW
+ result = mpu60xx_pwr_mgmt(mldl_cfg, gyro_handle, RESET,
+ WAKE_UP);
+ ERROR_CHECK(result);
+
+ /* Configure the MPU */
+ result = MLDLSetI2CBypass(mldl_cfg, gyro_handle, 1);
+ ERROR_CHECK(result);
+ /* setting int_config with the propert flag BIT_BYPASS_EN
+ should be done by the setup functions */
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_INT_PIN_CFG,
+ (mldl_cfg->pdata->int_config |
+ BIT_BYPASS_EN));
+ ERROR_CHECK(result);
+ /* temporary: masking out higher bits to avoid switching
+ intelligence */
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_INT_ENABLE,
+ (mldl_cfg->int_config));
+ ERROR_CHECK(result);
+#else
+ result = MLDLPowerMgmtMPU(mldl_cfg, gyro_handle, 0, 0,
+ mldl_cfg->gyro_power & BIT_STBY_XG,
+ mldl_cfg->gyro_power & BIT_STBY_YG,
+ mldl_cfg->gyro_power & BIT_STBY_ZG);
+
+ if (!mldl_cfg->gyro_needs_reset &&
+ !mpu_was_reset(mldl_cfg, gyro_handle)) {
+ return ML_SUCCESS;
+ }
+
+ result = MLDLPowerMgmtMPU(mldl_cfg, gyro_handle, 1, 0,
+ mldl_cfg->gyro_power & BIT_STBY_XG,
+ mldl_cfg->gyro_power & BIT_STBY_YG,
+ mldl_cfg->gyro_power & BIT_STBY_ZG);
+ ERROR_CHECK(result);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_INT_CFG,
+ (mldl_cfg->int_config |
+ mldl_cfg->pdata->int_config));
+ ERROR_CHECK(result);
+#endif
+
+ result = MLSLSerialRead(gyro_handle, mldl_cfg->addr,
+ MPUREG_PWR_MGM, 1, &reg);
+ ERROR_CHECK(result);
+ reg &= ~BITS_CLKSEL;
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_PWR_MGM,
+ mldl_cfg->clk_src | reg);
+ ERROR_CHECK(result);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_SMPLRT_DIV,
+ mldl_cfg->divider);
+ ERROR_CHECK(result);
+
+#ifdef M_HW
+ reg = DLPF_FS_SYNC_VALUE(0, mldl_cfg->full_scale, 0);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_GYRO_CONFIG, reg);
+ reg = DLPF_FS_SYNC_VALUE(mldl_cfg->ext_sync, 0, mldl_cfg->lpf);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_CONFIG, reg);
+#else
+ reg = DLPF_FS_SYNC_VALUE(mldl_cfg->ext_sync,
+ mldl_cfg->full_scale, mldl_cfg->lpf);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_DLPF_FS_SYNC, reg);
+#endif
+ ERROR_CHECK(result);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_DMP_CFG_1,
+ mldl_cfg->dmp_cfg1);
+ ERROR_CHECK(result);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_DMP_CFG_2,
+ mldl_cfg->dmp_cfg2);
+ ERROR_CHECK(result);
+
+ /* Write and verify memory */
+ for (ii = 0; ii < MPU_MEM_NUM_RAM_BANKS; ii++) {
+ unsigned char read[MPU_MEM_BANK_SIZE];
+
+ result = MLSLSerialWriteMem(gyro_handle,
+ mldl_cfg->addr,
+ ((ii << 8) | 0x00),
+ MPU_MEM_BANK_SIZE,
+ mldl_cfg->ram[ii]);
+ ERROR_CHECK(result);
+ result = MLSLSerialReadMem(gyro_handle, mldl_cfg->addr,
+ ((ii << 8) | 0x00),
+ MPU_MEM_BANK_SIZE, read);
+ ERROR_CHECK(result);
+
+#ifdef M_HW
+#define ML_SKIP_CHECK 38
+#else
+#define ML_SKIP_CHECK 20
+#endif
+ for (jj = 0; jj < MPU_MEM_BANK_SIZE; jj++) {
+ /* skip the register memory locations */
+ if (ii == 0 && jj < ML_SKIP_CHECK)
+ continue;
+ if (mldl_cfg->ram[ii][jj] != read[jj]) {
+ result = ML_ERROR_SERIAL_WRITE;
+ break;
+ }
+ }
+ ERROR_CHECK(result);
+ }
+
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_XG_OFFS_TC,
+ mldl_cfg->offset_tc[0]);
+ ERROR_CHECK(result);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_YG_OFFS_TC,
+ mldl_cfg->offset_tc[1]);
+ ERROR_CHECK(result);
+ result = MLSLSerialWriteSingle(gyro_handle, mldl_cfg->addr,
+ MPUREG_ZG_OFFS_TC,
+ mldl_cfg->offset_tc[2]);
+ ERROR_CHECK(result);
+
+ regs[0] = MPUREG_X_OFFS_USRH;
+ for (ii = 0; ii < DIM(mldl_cfg->offset); ii++) {
+ regs[1 + ii * 2] =
+ (unsigned char)(mldl_cfg->offset[ii] >> 8)
+ & 0xff;
+ regs[1 + ii * 2 + 1] =
+ (unsigned char)(mldl_cfg->offset[ii] & 0xff);
+ }
+ result = MLSLSerialWrite(gyro_handle, mldl_cfg->addr, 7, regs);
+ ERROR_CHECK(result);
+
+ /* Configure slaves */
+ result = MLDLSetLevelShifterBit(mldl_cfg, gyro_handle,
+ mldl_cfg->pdata->level_shifter);
+ ERROR_CHECK(result);
+ return result;
+}
+/*******************************************************************************
+ *******************************************************************************
+ * Exported functions
+ *******************************************************************************
+ ******************************************************************************/
+
+/**
+ * Initializes the pdata structure to defaults.
+ *
+ * Opens the device to read silicon revision, product id and whoami.
+ *
+ * @param mldl_cfg
+ * The internal device configuration data structure.
+ * @param mlsl_handle
+ * The serial communication handle.
+ *
+ * @return ML_SUCCESS if silicon revision, product id and woami are supported
+ * by this software.
+ */
+int mpu3050_open(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle)
+{
+ int result;
+ /* Default is Logic HIGH, pushpull, latch disabled, anyread to clear */
+ mldl_cfg->ignore_system_suspend = FALSE;
+ mldl_cfg->int_config = BIT_INT_ANYRD_2CLEAR | BIT_DMP_INT_EN;
+ mldl_cfg->clk_src = MPU_CLK_SEL_PLLGYROZ;
+ mldl_cfg->lpf = MPU_FILTER_42HZ;
+ mldl_cfg->full_scale = MPU_FS_2000DPS;
+ mldl_cfg->divider = 4;
+ mldl_cfg->dmp_enable = 1;
+ mldl_cfg->fifo_enable = 1;
+ mldl_cfg->ext_sync = 0;
+ mldl_cfg->dmp_cfg1 = 0;
+ mldl_cfg->dmp_cfg2 = 0;
+ mldl_cfg->gyro_power = 0;
+ mldl_cfg->gyro_is_bypassed = TRUE;
+ mldl_cfg->dmp_is_running = FALSE;
+ mldl_cfg->gyro_is_suspended = TRUE;
+ mldl_cfg->accel_is_suspended = TRUE;
+ mldl_cfg->compass_is_suspended = TRUE;
+ mldl_cfg->pressure_is_suspended = TRUE;
+ mldl_cfg->gyro_needs_reset = FALSE;
+ if (mldl_cfg->addr == 0) {
+#ifdef __KERNEL__
+ return ML_ERROR_INVALID_PARAMETER;
+#else
+ mldl_cfg->addr = 0x68;
+#endif
+ }
+
+ /*
+ * Reset,
+ * Take the DMP out of sleep, and
+ * read the product_id, sillicon rev and whoami
+ */
+#ifdef M_HW
+ result = mpu60xx_pwr_mgmt(mldl_cfg, mlsl_handle,
+ RESET, WAKE_UP);
+#else
+ result = MLDLPowerMgmtMPU(mldl_cfg, mlsl_handle, RESET, 0, 0, 0, 0);
+#endif
+ ERROR_CHECK(result);
+
+ result = MLDLGetSiliconRev(mldl_cfg, mlsl_handle);
+ ERROR_CHECK(result);
+#ifndef M_HW
+ result = MLSLSerialRead(mlsl_handle, mldl_cfg->addr,
+ MPUREG_PRODUCT_ID, 1,
+ &mldl_cfg->product_id);
+ ERROR_CHECK(result);
+#endif
+
+ /* Get the factory temperature compensation offsets */
+ result = MLSLSerialRead(mlsl_handle, mldl_cfg->addr,
+ MPUREG_XG_OFFS_TC, 1,
+ &mldl_cfg->offset_tc[0]);
+ ERROR_CHECK(result);
+ result = MLSLSerialRead(mlsl_handle, mldl_cfg->addr,
+ MPUREG_YG_OFFS_TC, 1,
+ &mldl_cfg->offset_tc[1]);
+ ERROR_CHECK(result);
+ result = MLSLSerialRead(mlsl_handle, mldl_cfg->addr,
+ MPUREG_ZG_OFFS_TC, 1,
+ &mldl_cfg->offset_tc[2]);
+ ERROR_CHECK(result);
+
+ /* Configure the MPU */
+#ifdef M_HW
+ result = mpu60xx_pwr_mgmt(mldl_cfg, mlsl_handle,
+ FALSE, SLEEP);
+#else
+ result =
+ MLDLPowerMgmtMPU(mldl_cfg, mlsl_handle, 0, SLEEP, 0, 0, 0);
+#endif
+ ERROR_CHECK(result);
+
+ if (mldl_cfg->accel && mldl_cfg->accel->init) {
+ result = mldl_cfg->accel->init(accel_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel);
+ ERROR_CHECK(result);
+ }
+
+ if (mldl_cfg->compass && mldl_cfg->compass->init) {
+ result = mldl_cfg->compass->init(compass_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass);
+ if (ML_SUCCESS != result) {
+ MPL_LOGE("mldl_cfg->compass->init returned %d\n",
+ result);
+ goto out_accel;
+ }
+ }
+ if (mldl_cfg->pressure && mldl_cfg->pressure->init) {
+ result = mldl_cfg->pressure->init(pressure_handle,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->pressure);
+ if (ML_SUCCESS != result) {
+ MPL_LOGE("mldl_cfg->pressure->init returned %d\n",
+ result);
+ goto out_compass;
+ }
+ }
+
+ mldl_cfg->requested_sensors = ML_THREE_AXIS_GYRO;
+ if (mldl_cfg->accel && mldl_cfg->accel->resume)
+ mldl_cfg->requested_sensors |= ML_THREE_AXIS_ACCEL;
+
+ if (mldl_cfg->compass && mldl_cfg->compass->resume)
+ mldl_cfg->requested_sensors |= ML_THREE_AXIS_COMPASS;
+
+ if (mldl_cfg->pressure && mldl_cfg->pressure->resume)
+ mldl_cfg->requested_sensors |= ML_THREE_AXIS_PRESSURE;
+
+ return result;
+
+out_compass:
+ if (mldl_cfg->compass->init)
+ mldl_cfg->compass->exit(compass_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass);
+out_accel:
+ if (mldl_cfg->accel->init)
+ mldl_cfg->accel->exit(accel_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel);
+ return result;
+
+}
+
+/**
+ * Close the mpu3050 interface
+ *
+ * @param mldl_cfg pointer to the configuration structure
+ * @param mlsl_handle pointer to the serial layer handle
+ *
+ * @return ML_SUCCESS or non-zero error code
+ */
+int mpu3050_close(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle)
+{
+ int result = ML_SUCCESS;
+ int ret_result = ML_SUCCESS;
+
+ if (mldl_cfg->accel && mldl_cfg->accel->exit) {
+ result = mldl_cfg->accel->exit(accel_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel);
+ if (ML_SUCCESS != result)
+ MPL_LOGE("Accel exit failed %d\n", result);
+ ret_result = result;
+ }
+ if (ML_SUCCESS == ret_result)
+ ret_result = result;
+
+ if (mldl_cfg->compass && mldl_cfg->compass->exit) {
+ result = mldl_cfg->compass->exit(compass_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass);
+ if (ML_SUCCESS != result)
+ MPL_LOGE("Compass exit failed %d\n", result);
+ }
+ if (ML_SUCCESS == ret_result)
+ ret_result = result;
+
+ if (mldl_cfg->pressure && mldl_cfg->pressure->exit) {
+ result = mldl_cfg->pressure->exit(pressure_handle,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->pressure);
+ if (ML_SUCCESS != result)
+ MPL_LOGE("Pressure exit failed %d\n", result);
+ }
+ if (ML_SUCCESS == ret_result)
+ ret_result = result;
+
+ return ret_result;
+}
+
+/**
+ * @brief resume the MPU3050 device and all the other sensor
+ * devices from their low power state.
+ *
+ * @param mldl_cfg
+ * pointer to the configuration structure
+ * @param gyro_handle
+ * the main file handle to the MPU3050 device.
+ * @param accel_handle
+ * an handle to the accelerometer device, if sitting
+ * onto a separate bus. Can match mlsl_handle if
+ * the accelerometer device operates on the same
+ * primary bus of MPU.
+ * @param compass_handle
+ * an handle to the compass device, if sitting
+ * onto a separate bus. Can match mlsl_handle if
+ * the compass device operates on the same
+ * primary bus of MPU.
+ * @param pressure_handle
+ * an handle to the pressure sensor device, if sitting
+ * onto a separate bus. Can match mlsl_handle if
+ * the pressure sensor device operates on the same
+ * primary bus of MPU.
+ * @param resume_gyro
+ * whether resuming the gyroscope device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @param resume_accel
+ * whether resuming the accelerometer device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @param resume_compass
+ * whether resuming the compass device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @param resume_pressure
+ * whether resuming the pressure sensor device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @return ML_SUCCESS or a non-zero error code.
+ */
+int mpu3050_resume(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle,
+ bool resume_gyro,
+ bool resume_accel,
+ bool resume_compass,
+ bool resume_pressure)
+{
+ int result = ML_SUCCESS;
+
+#ifdef CONFIG_MPU_SENSORS_DEBUG
+ mpu_print_cfg(mldl_cfg);
+#endif
+
+ if (resume_accel &&
+ ((!mldl_cfg->accel) || (!mldl_cfg->accel->resume)))
+ return ML_ERROR_INVALID_PARAMETER;
+ if (resume_compass &&
+ ((!mldl_cfg->compass) || (!mldl_cfg->compass->resume)))
+ return ML_ERROR_INVALID_PARAMETER;
+ if (resume_pressure &&
+ ((!mldl_cfg->pressure) || (!mldl_cfg->pressure->resume)))
+ return ML_ERROR_INVALID_PARAMETER;
+
+ if (resume_gyro && mldl_cfg->gyro_is_suspended) {
+ result = gyro_resume(mldl_cfg, gyro_handle);
+ ERROR_CHECK(result);
+ }
+
+ if (resume_accel && mldl_cfg->accel_is_suspended) {
+ if (!mldl_cfg->gyro_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->accel.bus) {
+ result = MLDLSetI2CBypass(mldl_cfg, gyro_handle, TRUE);
+ ERROR_CHECK(result);
+ }
+ result = mldl_cfg->accel->resume(accel_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel);
+ ERROR_CHECK(result);
+ mldl_cfg->accel_is_suspended = FALSE;
+ }
+
+ if (!mldl_cfg->gyro_is_suspended && !mldl_cfg->accel_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->accel.bus) {
+ result = mpu_set_slave(mldl_cfg,
+ gyro_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel);
+ ERROR_CHECK(result);
+ }
+
+ if (resume_compass && mldl_cfg->compass_is_suspended) {
+ if (!mldl_cfg->gyro_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->compass.bus) {
+ result = MLDLSetI2CBypass(mldl_cfg, gyro_handle, TRUE);
+ ERROR_CHECK(result);
+ }
+ result = mldl_cfg->compass->resume(compass_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->
+ compass);
+ ERROR_CHECK(result);
+ mldl_cfg->compass_is_suspended = FALSE;
+ }
+
+ if (!mldl_cfg->gyro_is_suspended && !mldl_cfg->compass_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->compass.bus) {
+ result = mpu_set_slave(mldl_cfg,
+ gyro_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass);
+ ERROR_CHECK(result);
+ }
+
+ if (resume_pressure && mldl_cfg->pressure_is_suspended) {
+ if (!mldl_cfg->gyro_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->pressure.bus) {
+ result = MLDLSetI2CBypass(mldl_cfg, gyro_handle, TRUE);
+ ERROR_CHECK(result);
+ }
+ result = mldl_cfg->pressure->resume(pressure_handle,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->
+ pressure);
+ ERROR_CHECK(result);
+ mldl_cfg->pressure_is_suspended = FALSE;
+ }
+
+ if (!mldl_cfg->gyro_is_suspended && !mldl_cfg->pressure_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->pressure.bus) {
+ result = mpu_set_slave(mldl_cfg,
+ gyro_handle,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->pressure);
+ ERROR_CHECK(result);
+ }
+
+ /* Now start */
+ if (resume_gyro) {
+ result = dmp_start(mldl_cfg, gyro_handle);
+ ERROR_CHECK(result);
+ }
+
+ return result;
+}
+
+/**
+ * @brief suspend the MPU3050 device and all the other sensor
+ * devices into their low power state.
+ * @param gyro_handle
+ * the main file handle to the MPU3050 device.
+ * @param accel_handle
+ * an handle to the accelerometer device, if sitting
+ * onto a separate bus. Can match gyro_handle if
+ * the accelerometer device operates on the same
+ * primary bus of MPU.
+ * @param compass_handle
+ * an handle to the compass device, if sitting
+ * onto a separate bus. Can match gyro_handle if
+ * the compass device operates on the same
+ * primary bus of MPU.
+ * @param pressure_handle
+ * an handle to the pressure sensor device, if sitting
+ * onto a separate bus. Can match gyro_handle if
+ * the pressure sensor device operates on the same
+ * primary bus of MPU.
+ * @param accel
+ * whether suspending the accelerometer device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @param compass
+ * whether suspending the compass device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @param pressure
+ * whether suspending the pressure sensor device is
+ * actually needed (if the device supports low power
+ * mode of some sort).
+ * @return ML_SUCCESS or a non-zero error code.
+ */
+int mpu3050_suspend(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle,
+ bool suspend_gyro,
+ bool suspend_accel,
+ bool suspend_compass,
+ bool suspend_pressure)
+{
+ int result = ML_SUCCESS;
+
+ if (suspend_gyro && !mldl_cfg->gyro_is_suspended) {
+#ifdef M_HW
+ return ML_SUCCESS;
+ /* This puts the bus into bypass mode */
+ result = MLDLSetI2CBypass(mldl_cfg, gyro_handle, 1);
+ ERROR_CHECK(result);
+ result = mpu60xx_pwr_mgmt(mldl_cfg, gyro_handle, 0, SLEEP);
+#else
+ result = MLDLPowerMgmtMPU(mldl_cfg, gyro_handle,
+ 0, SLEEP, 0, 0, 0);
+#endif
+ ERROR_CHECK(result);
+ }
+
+ if (!mldl_cfg->accel_is_suspended && suspend_accel &&
+ mldl_cfg->accel && mldl_cfg->accel->suspend) {
+ if (!mldl_cfg->gyro_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->accel.bus) {
+ result = mpu_set_slave(mldl_cfg, gyro_handle,
+ NULL, NULL);
+ ERROR_CHECK(result);
+ }
+ result = mldl_cfg->accel->suspend(accel_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel);
+ ERROR_CHECK(result);
+ mldl_cfg->accel_is_suspended = TRUE;
+ }
+
+ if (!mldl_cfg->compass_is_suspended && suspend_compass &&
+ mldl_cfg->compass && mldl_cfg->compass->suspend) {
+ if (!mldl_cfg->gyro_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->compass.bus) {
+ result = mpu_set_slave(mldl_cfg, gyro_handle,
+ NULL, NULL);
+ ERROR_CHECK(result);
+ }
+ result = mldl_cfg->compass->suspend(compass_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->
+ pdata->compass);
+ ERROR_CHECK(result);
+ mldl_cfg->compass_is_suspended = TRUE;
+ }
+
+ if (!mldl_cfg->pressure_is_suspended && suspend_pressure &&
+ mldl_cfg->pressure && mldl_cfg->pressure->suspend) {
+ if (!mldl_cfg->gyro_is_suspended &&
+ EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->pressure.bus) {
+ result = mpu_set_slave(mldl_cfg, gyro_handle,
+ NULL, NULL);
+ ERROR_CHECK(result);
+ }
+ result = mldl_cfg->pressure->suspend(pressure_handle,
+ mldl_cfg->pressure,
+ &mldl_cfg->
+ pdata->pressure);
+ ERROR_CHECK(result);
+ mldl_cfg->pressure_is_suspended = TRUE;
+ }
+ return result;
+}
+
+
+/**
+ * @brief read raw sensor data from the accelerometer device
+ * in use.
+ * @param mldl_cfg
+ * A pointer to the struct mldl_cfg data structure.
+ * @param accel_handle
+ * The handle to the device the accelerometer is connected to.
+ * @param data
+ * a buffer to store the raw sensor data.
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+int mpu3050_read_accel(struct mldl_cfg *mldl_cfg,
+ void *accel_handle, unsigned char *data)
+{
+ if (NULL != mldl_cfg->accel && NULL != mldl_cfg->accel->read)
+ if ((EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->accel.bus)
+ && (!mldl_cfg->gyro_is_bypassed))
+ return ML_ERROR_FEATURE_NOT_ENABLED;
+ else
+ return mldl_cfg->accel->read(accel_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+/**
+ * @brief read raw sensor data from the compass device
+ * in use.
+ * @param mldl_cfg
+ * A pointer to the struct mldl_cfg data structure.
+ * @param compass_handle
+ * The handle to the device the compass is connected to.
+ * @param data
+ * a buffer to store the raw sensor data.
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+int mpu3050_read_compass(struct mldl_cfg *mldl_cfg,
+ void *compass_handle, unsigned char *data)
+{
+ if (NULL != mldl_cfg->compass && NULL != mldl_cfg->compass->read)
+ if ((EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->compass.bus)
+ && (!mldl_cfg->gyro_is_bypassed))
+ return ML_ERROR_FEATURE_NOT_ENABLED;
+ else
+ return mldl_cfg->compass->read(compass_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+/**
+ * @brief read raw sensor data from the pressure device
+ * in use.
+ * @param mldl_cfg
+ * A pointer to the struct mldl_cfg data structure.
+ * @param pressure_handle
+ * The handle to the device the pressure sensor is connected to.
+ * @param data
+ * a buffer to store the raw sensor data.
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+int mpu3050_read_pressure(struct mldl_cfg *mldl_cfg,
+ void *pressure_handle, unsigned char *data)
+{
+ if (NULL != mldl_cfg->pressure && NULL != mldl_cfg->pressure->read)
+ if ((EXT_SLAVE_BUS_SECONDARY == mldl_cfg->pdata->pressure.bus)
+ && (!mldl_cfg->gyro_is_bypassed))
+ return ML_ERROR_FEATURE_NOT_ENABLED;
+ else
+ return mldl_cfg->pressure->read(
+ pressure_handle,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->pressure,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+int mpu3050_config_accel(struct mldl_cfg *mldl_cfg,
+ void *accel_handle,
+ struct ext_slave_config *data)
+{
+ if (NULL != mldl_cfg->accel && NULL != mldl_cfg->accel->config)
+ return mldl_cfg->accel->config(accel_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+
+}
+
+int mpu3050_config_compass(struct mldl_cfg *mldl_cfg,
+ void *compass_handle,
+ struct ext_slave_config *data)
+{
+ if (NULL != mldl_cfg->compass && NULL != mldl_cfg->compass->config)
+ return mldl_cfg->compass->config(compass_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+
+}
+
+int mpu3050_config_pressure(struct mldl_cfg *mldl_cfg,
+ void *pressure_handle,
+ struct ext_slave_config *data)
+{
+ if (NULL != mldl_cfg->pressure && NULL != mldl_cfg->pressure->config)
+ return mldl_cfg->pressure->config(pressure_handle,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->pressure,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+int mpu3050_get_config_accel(struct mldl_cfg *mldl_cfg,
+ void *accel_handle,
+ struct ext_slave_config *data)
+{
+ if (NULL != mldl_cfg->accel && NULL != mldl_cfg->accel->get_config)
+ return mldl_cfg->accel->get_config(accel_handle,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+
+}
+
+int mpu3050_get_config_compass(struct mldl_cfg *mldl_cfg,
+ void *compass_handle,
+ struct ext_slave_config *data)
+{
+ if (NULL != mldl_cfg->compass && NULL != mldl_cfg->compass->get_config)
+ return mldl_cfg->compass->get_config(compass_handle,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+
+}
+
+int mpu3050_get_config_pressure(struct mldl_cfg *mldl_cfg,
+ void *pressure_handle,
+ struct ext_slave_config *data)
+{
+ if (NULL != mldl_cfg->pressure &&
+ NULL != mldl_cfg->pressure->get_config)
+ return mldl_cfg->pressure->get_config(pressure_handle,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->pressure,
+ data);
+ else
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+
+/**
+ *@}
+ */
diff --git a/drivers/misc/mpu3050/mldl_cfg.h b/drivers/misc/mpu3050/mldl_cfg.h
new file mode 100644
index 000000000000..ad6a211c5d86
--- /dev/null
+++ b/drivers/misc/mpu3050/mldl_cfg.h
@@ -0,0 +1,199 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @addtogroup MLDL
+ *
+ * @{
+ * @file mldl_cfg.h
+ * @brief The Motion Library Driver Layer Configuration header file.
+ */
+
+#ifndef __MLDL_CFG_H__
+#define __MLDL_CFG_H__
+
+/* ------------------ */
+/* - Include Files. - */
+/* ------------------ */
+
+#include "mlsl.h"
+#include "mpu.h"
+
+/* --------------------- */
+/* - Defines. - */
+/* --------------------- */
+
+ /*************************************************************************/
+ /* Sensors */
+ /*************************************************************************/
+
+#define ML_X_GYRO (0x0001)
+#define ML_Y_GYRO (0x0002)
+#define ML_Z_GYRO (0x0004)
+#define ML_DMP_PROCESSOR (0x0008)
+
+#define ML_X_ACCEL (0x0010)
+#define ML_Y_ACCEL (0x0020)
+#define ML_Z_ACCEL (0x0040)
+
+#define ML_X_COMPASS (0x0080)
+#define ML_Y_COMPASS (0x0100)
+#define ML_Z_COMPASS (0x0200)
+
+#define ML_X_PRESSURE (0x0300)
+#define ML_Y_PRESSURE (0x0800)
+#define ML_Z_PRESSURE (0x1000)
+
+#define ML_TEMPERATURE (0x2000)
+#define ML_TIME (0x4000)
+
+#define ML_THREE_AXIS_GYRO (0x000F)
+#define ML_THREE_AXIS_ACCEL (0x0070)
+#define ML_THREE_AXIS_COMPASS (0x0380)
+#define ML_THREE_AXIS_PRESSURE (0x1C00)
+
+#define ML_FIVE_AXIS (0x007B)
+#define ML_SIX_AXIS_GYRO_ACCEL (0x007F)
+#define ML_SIX_AXIS_ACCEL_COMPASS (0x03F0)
+#define ML_NINE_AXIS (0x03FF)
+#define ML_ALL_SENSORS (0x7FFF)
+
+#define SAMPLING_RATE_HZ(mldl_cfg) \
+ ((((((mldl_cfg)->lpf) == 0) || (((mldl_cfg)->lpf) == 7)) \
+ ? (8000) \
+ : (1000)) \
+ / ((mldl_cfg)->divider + 1))
+
+#define SAMPLING_PERIOD_US(mldl_cfg) \
+ ((1000000L * ((mldl_cfg)->divider + 1)) / \
+ (((((mldl_cfg)->lpf) == 0) || (((mldl_cfg)->lpf) == 7)) \
+ ? (8000) \
+ : (1000)))
+/* --------------------- */
+/* - Variables. - */
+/* --------------------- */
+
+/* Platform data for the MPU */
+struct mldl_cfg {
+ /* MPU related configuration */
+ unsigned long requested_sensors;
+ unsigned char ignore_system_suspend;
+ unsigned char addr;
+ unsigned char int_config;
+ unsigned char ext_sync;
+ unsigned char full_scale;
+ unsigned char lpf;
+ unsigned char clk_src;
+ unsigned char divider;
+ unsigned char dmp_enable;
+ unsigned char fifo_enable;
+ unsigned char dmp_cfg1;
+ unsigned char dmp_cfg2;
+ unsigned char gyro_power;
+ unsigned char offset_tc[MPU_NUM_AXES];
+ unsigned short offset[MPU_NUM_AXES];
+ unsigned char ram[MPU_MEM_NUM_RAM_BANKS][MPU_MEM_BANK_SIZE];
+
+ /* MPU Related stored status and info */
+ unsigned char silicon_revision;
+ unsigned char product_id;
+ unsigned short trim;
+
+ /* Driver/Kernel related state information */
+ int gyro_is_bypassed;
+ int dmp_is_running;
+ int gyro_is_suspended;
+ int accel_is_suspended;
+ int compass_is_suspended;
+ int pressure_is_suspended;
+ int gyro_needs_reset;
+
+ /* Slave related information */
+ struct ext_slave_descr *accel;
+ struct ext_slave_descr *compass;
+ struct ext_slave_descr *pressure;
+
+ /* Platform Data */
+ struct mpu3050_platform_data *pdata;
+};
+
+
+int mpu3050_open(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle);
+int mpu3050_close(struct mldl_cfg *mldl_cfg,
+ void *mlsl_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle);
+int mpu3050_resume(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle,
+ bool resume_gyro,
+ bool resume_accel,
+ bool resume_compass,
+ bool resume_pressure);
+int mpu3050_suspend(struct mldl_cfg *mldl_cfg,
+ void *gyro_handle,
+ void *accel_handle,
+ void *compass_handle,
+ void *pressure_handle,
+ bool suspend_gyro,
+ bool suspend_accel,
+ bool suspend_compass,
+ bool suspend_pressure);
+int mpu3050_read_accel(struct mldl_cfg *mldl_cfg,
+ void *accel_handle,
+ unsigned char *data);
+int mpu3050_read_compass(struct mldl_cfg *mldl_cfg,
+ void *compass_handle,
+ unsigned char *data);
+int mpu3050_read_pressure(struct mldl_cfg *mldl_cfg, void *mlsl_handle,
+ unsigned char *data);
+
+int mpu3050_config_accel(struct mldl_cfg *mldl_cfg,
+ void *accel_handle,
+ struct ext_slave_config *data);
+int mpu3050_config_compass(struct mldl_cfg *mldl_cfg,
+ void *compass_handle,
+ struct ext_slave_config *data);
+int mpu3050_config_pressure(struct mldl_cfg *mldl_cfg,
+ void *pressure_handle,
+ struct ext_slave_config *data);
+
+int mpu3050_get_config_accel(struct mldl_cfg *mldl_cfg,
+ void *accel_handle,
+ struct ext_slave_config *data);
+int mpu3050_get_config_compass(struct mldl_cfg *mldl_cfg,
+ void *compass_handle,
+ struct ext_slave_config *data);
+int mpu3050_get_config_pressure(struct mldl_cfg *mldl_cfg,
+ void *pressure_handle,
+ struct ext_slave_config *data);
+
+
+#endif /* __MLDL_CFG_H__ */
+
+/**
+ *@}
+ */
diff --git a/drivers/misc/mpu3050/mlos-kernel.c b/drivers/misc/mpu3050/mlos-kernel.c
new file mode 100644
index 000000000000..ced9ba4f6f3c
--- /dev/null
+++ b/drivers/misc/mpu3050/mlos-kernel.c
@@ -0,0 +1,89 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+/**
+ * @defgroup
+ * @brief
+ *
+ * @{
+ * @file mlos-kernel.c
+ * @brief
+ *
+ *
+ */
+
+#include "mlos.h"
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+void *MLOSMalloc(unsigned int numBytes)
+{
+ return kmalloc(numBytes, GFP_KERNEL);
+}
+
+tMLError MLOSFree(void *ptr)
+{
+ kfree(ptr);
+ return ML_SUCCESS;
+}
+
+tMLError MLOSCreateMutex(HANDLE *mutex)
+{
+ /* @todo implement if needed */
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+tMLError MLOSLockMutex(HANDLE mutex)
+{
+ /* @todo implement if needed */
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+tMLError MLOSUnlockMutex(HANDLE mutex)
+{
+ /* @todo implement if needed */
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+tMLError MLOSDestroyMutex(HANDLE handle)
+{
+ /* @todo implement if needed */
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
+
+FILE *MLOSFOpen(char *filename)
+{
+ /* @todo implement if needed */
+ return NULL;
+}
+
+void MLOSFClose(FILE *fp)
+{
+ /* @todo implement if needed */
+}
+
+void MLOSSleep(int mSecs)
+{
+ msleep(mSecs);
+}
+
+unsigned long MLOSGetTickCount(void)
+{
+ /* @todo implement if needed */
+ return ML_ERROR_FEATURE_NOT_IMPLEMENTED;
+}
diff --git a/drivers/misc/mpu3050/mlos.h b/drivers/misc/mpu3050/mlos.h
new file mode 100644
index 000000000000..4ebb86c9fa5c
--- /dev/null
+++ b/drivers/misc/mpu3050/mlos.h
@@ -0,0 +1,73 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef _MLOS_H
+#define _MLOS_H
+
+#ifndef __KERNEL__
+#include <stdio.h>
+#endif
+
+#include "mltypes.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /* ------------ */
+ /* - Defines. - */
+ /* ------------ */
+
+ /* - MLOSCreateFile defines. - */
+
+#define MLOS_GENERIC_READ ((unsigned int)0x80000000)
+#define MLOS_GENERIC_WRITE ((unsigned int)0x40000000)
+#define MLOS_FILE_SHARE_READ ((unsigned int)0x00000001)
+#define MLOS_FILE_SHARE_WRITE ((unsigned int)0x00000002)
+#define MLOS_OPEN_EXISTING ((unsigned int)0x00000003)
+
+ /* ---------- */
+ /* - Enums. - */
+ /* ---------- */
+
+ /* --------------- */
+ /* - Structures. - */
+ /* --------------- */
+
+ /* --------------------- */
+ /* - Function p-types. - */
+ /* --------------------- */
+
+ void *MLOSMalloc(unsigned int numBytes);
+ tMLError MLOSFree(void *ptr);
+ tMLError MLOSCreateMutex(HANDLE *mutex);
+ tMLError MLOSLockMutex(HANDLE mutex);
+ tMLError MLOSUnlockMutex(HANDLE mutex);
+ FILE *MLOSFOpen(char *filename);
+ void MLOSFClose(FILE *fp);
+
+ tMLError MLOSDestroyMutex(HANDLE handle);
+
+ void MLOSSleep(int mSecs);
+ unsigned long MLOSGetTickCount(void);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _MLOS_H */
diff --git a/drivers/misc/mpu3050/mlsl-kernel.c b/drivers/misc/mpu3050/mlsl-kernel.c
new file mode 100644
index 000000000000..cb1605131cbf
--- /dev/null
+++ b/drivers/misc/mpu3050/mlsl-kernel.c
@@ -0,0 +1,331 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#include "mlsl.h"
+#include "mpu-i2c.h"
+
+/* ------------ */
+/* - Defines. - */
+/* ------------ */
+
+/* ---------------------- */
+/* - Types definitions. - */
+/* ---------------------- */
+
+/* --------------------- */
+/* - Function p-types. - */
+/* --------------------- */
+
+/**
+ * @brief used to open the I2C or SPI serial port.
+ * This port is used to send and receive data to the MPU device.
+ * @param portNum
+ * The COM port number associated with the device in use.
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+tMLError MLSLSerialOpen(char const *port, void **sl_handle)
+{
+ return ML_SUCCESS;
+}
+
+/**
+ * @brief used to reset any buffering the driver may be doing
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+tMLError MLSLSerialReset(void *sl_handle)
+{
+ return ML_SUCCESS;
+}
+
+/**
+ * @brief used to close the I2C or SPI serial port.
+ * This port is used to send and receive data to the MPU device.
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+tMLError MLSLSerialClose(void *sl_handle)
+{
+ return ML_SUCCESS;
+}
+
+/**
+ * @brief used to read a single byte of data.
+ * This should be sent by I2C or SPI.
+ *
+ * @param slaveAddr I2C slave address of device.
+ * @param registerAddr Register address to read.
+ * @param data Single byte of data to read.
+ *
+ * @return ML_SUCCESS if the command is successful, an error code otherwise.
+ */
+tMLError MLSLSerialWriteSingle(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned char registerAddr,
+ unsigned char data)
+{
+ return sensor_i2c_write_register((struct i2c_adapter *) sl_handle,
+ slaveAddr, registerAddr, data);
+}
+
+
+/**
+ * @brief used to write multiple bytes of data from registers.
+ * This should be sent by I2C.
+ *
+ * @param slaveAddr I2C slave address of device.
+ * @param registerAddr Register address to write.
+ * @param length Length of burst of data.
+ * @param data Pointer to block of data.
+ *
+ * @return ML_SUCCESS if successful, a non-zero error code otherwise.
+ */
+tMLError MLSLSerialWrite(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short length,
+ unsigned char const *data)
+{
+ tMLError result;
+ const unsigned short dataLength = length - 1;
+ const unsigned char startRegAddr = data[0];
+ unsigned char i2cWrite[SERIAL_MAX_TRANSFER_SIZE + 1];
+ unsigned short bytesWritten = 0;
+
+ while (bytesWritten < dataLength) {
+ unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE,
+ dataLength - bytesWritten);
+ if (bytesWritten == 0) {
+ result = sensor_i2c_write((struct i2c_adapter *)
+ sl_handle, slaveAddr,
+ 1 + thisLen, data);
+ } else {
+ /* manually increment register addr between chunks */
+ i2cWrite[0] = startRegAddr + bytesWritten;
+ memcpy(&i2cWrite[1], &data[1 + bytesWritten],
+ thisLen);
+ result = sensor_i2c_write((struct i2c_adapter *)
+ sl_handle, slaveAddr,
+ 1 + thisLen, i2cWrite);
+ }
+ if (ML_SUCCESS != result)
+ return result;
+ bytesWritten += thisLen;
+ }
+ return ML_SUCCESS;
+}
+
+
+/**
+ * @brief used to read multiple bytes of data from registers.
+ * This should be sent by I2C.
+ *
+ * @param slaveAddr I2C slave address of device.
+ * @param registerAddr Register address to read.
+ * @param length Length of burst of data.
+ * @param data Pointer to block of data.
+ *
+ * @return Zero if successful; an error code otherwise
+ */
+tMLError MLSLSerialRead(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned char registerAddr,
+ unsigned short length, unsigned char *data)
+{
+ tMLError result;
+ unsigned short bytesRead = 0;
+
+ if (registerAddr == MPUREG_FIFO_R_W
+ || registerAddr == MPUREG_MEM_R_W) {
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+ while (bytesRead < length) {
+ unsigned short thisLen =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead);
+ result =
+ sensor_i2c_read((struct i2c_adapter *) sl_handle,
+ slaveAddr, registerAddr + bytesRead,
+ thisLen, &data[bytesRead]);
+ if (ML_SUCCESS != result)
+ return result;
+ bytesRead += thisLen;
+ }
+ return ML_SUCCESS;
+}
+
+
+/**
+ * @brief used to write multiple bytes of data to the memory.
+ * This should be sent by I2C.
+ *
+ * @param slaveAddr I2C slave address of device.
+ * @param memAddr The location in the memory to write to.
+ * @param length Length of burst data.
+ * @param data Pointer to block of data.
+ *
+ * @return Zero if successful; an error code otherwise
+ */
+tMLError MLSLSerialWriteMem(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short memAddr,
+ unsigned short length,
+ unsigned char const *data)
+{
+ tMLError result;
+ unsigned short bytesWritten = 0;
+
+ if ((memAddr & 0xFF) + length > MPU_MEM_BANK_SIZE) {
+ pr_err("memory read length (%d B) extends beyond its"
+ " limits (%d) if started at location %d\n", length,
+ MPU_MEM_BANK_SIZE, memAddr & 0xFF);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+ while (bytesWritten < length) {
+ unsigned short thisLen =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytesWritten);
+ result =
+ mpu_memory_write((struct i2c_adapter *) sl_handle,
+ slaveAddr, memAddr + bytesWritten,
+ thisLen, &data[bytesWritten]);
+ if (ML_SUCCESS != result)
+ return result;
+ bytesWritten += thisLen;
+ }
+ return ML_SUCCESS;
+}
+
+
+/**
+ * @brief used to read multiple bytes of data from the memory.
+ * This should be sent by I2C.
+ *
+ * @param slaveAddr I2C slave address of device.
+ * @param memAddr The location in the memory to read from.
+ * @param length Length of burst data.
+ * @param data Pointer to block of data.
+ *
+ * @return Zero if successful; an error code otherwise
+ */
+tMLError MLSLSerialReadMem(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short memAddr,
+ unsigned short length, unsigned char *data)
+{
+ tMLError result;
+ unsigned short bytesRead = 0;
+
+ if ((memAddr & 0xFF) + length > MPU_MEM_BANK_SIZE) {
+ printk
+ ("memory read length (%d B) extends beyond its limits (%d) "
+ "if started at location %d\n", length,
+ MPU_MEM_BANK_SIZE, memAddr & 0xFF);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+ while (bytesRead < length) {
+ unsigned short thisLen =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead);
+ result =
+ mpu_memory_read((struct i2c_adapter *) sl_handle,
+ slaveAddr, memAddr + bytesRead,
+ thisLen, &data[bytesRead]);
+ if (ML_SUCCESS != result)
+ return result;
+ bytesRead += thisLen;
+ }
+ return ML_SUCCESS;
+}
+
+
+/**
+ * @brief used to write multiple bytes of data to the fifo.
+ * This should be sent by I2C.
+ *
+ * @param slaveAddr I2C slave address of device.
+ * @param length Length of burst of data.
+ * @param data Pointer to block of data.
+ *
+ * @return Zero if successful; an error code otherwise
+ */
+tMLError MLSLSerialWriteFifo(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short length,
+ unsigned char const *data)
+{
+ tMLError result;
+ unsigned char i2cWrite[SERIAL_MAX_TRANSFER_SIZE + 1];
+ unsigned short bytesWritten = 0;
+
+ if (length > FIFO_HW_SIZE) {
+ printk(KERN_ERR
+ "maximum fifo write length is %d\n", FIFO_HW_SIZE);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+ while (bytesWritten < length) {
+ unsigned short thisLen =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytesWritten);
+ i2cWrite[0] = MPUREG_FIFO_R_W;
+ memcpy(&i2cWrite[1], &data[bytesWritten], thisLen);
+ result = sensor_i2c_write((struct i2c_adapter *) sl_handle,
+ slaveAddr, thisLen + 1,
+ i2cWrite);
+ if (ML_SUCCESS != result)
+ return result;
+ bytesWritten += thisLen;
+ }
+ return ML_SUCCESS;
+}
+
+
+/**
+ * @brief used to read multiple bytes of data from the fifo.
+ * This should be sent by I2C.
+ *
+ * @param slaveAddr I2C slave address of device.
+ * @param length Length of burst of data.
+ * @param data Pointer to block of data.
+ *
+ * @return Zero if successful; an error code otherwise
+ */
+tMLError MLSLSerialReadFifo(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short length, unsigned char *data)
+{
+ tMLError result;
+ unsigned short bytesRead = 0;
+
+ if (length > FIFO_HW_SIZE) {
+ printk(KERN_ERR
+ "maximum fifo read length is %d\n", FIFO_HW_SIZE);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+ while (bytesRead < length) {
+ unsigned short thisLen =
+ min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead);
+ result =
+ sensor_i2c_read((struct i2c_adapter *) sl_handle,
+ slaveAddr, MPUREG_FIFO_R_W, thisLen,
+ &data[bytesRead]);
+ if (ML_SUCCESS != result)
+ return result;
+ bytesRead += thisLen;
+ }
+
+ return ML_SUCCESS;
+}
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/mpu3050/mlsl.h b/drivers/misc/mpu3050/mlsl.h
new file mode 100644
index 000000000000..76f69c77ba98
--- /dev/null
+++ b/drivers/misc/mpu3050/mlsl.h
@@ -0,0 +1,103 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __MSSL_H__
+#define __MSSL_H__
+
+#include "mltypes.h"
+#include "mpu.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------ */
+/* - Defines. - */
+/* ------------ */
+
+/*
+ * NOTE : to properly support Yamaha compass reads,
+ * the max transfer size should be at least 9 B.
+ * Length in bytes, typically a power of 2 >= 2
+ */
+#define SERIAL_MAX_TRANSFER_SIZE 128
+
+/* ---------------------- */
+/* - Types definitions. - */
+/* ---------------------- */
+
+/* --------------------- */
+/* - Function p-types. - */
+/* --------------------- */
+
+ tMLError MLSLSerialOpen(char const *port,
+ void **sl_handle);
+ tMLError MLSLSerialReset(void *sl_handle);
+ tMLError MLSLSerialClose(void *sl_handle);
+
+ tMLError MLSLSerialWriteSingle(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned char registerAddr,
+ unsigned char data);
+
+ tMLError MLSLSerialRead(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned char registerAddr,
+ unsigned short length,
+ unsigned char *data);
+
+ tMLError MLSLSerialWrite(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short length,
+ unsigned char const *data);
+
+ tMLError MLSLSerialReadMem(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short memAddr,
+ unsigned short length,
+ unsigned char *data);
+
+ tMLError MLSLSerialWriteMem(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short memAddr,
+ unsigned short length,
+ unsigned char const *data);
+
+ tMLError MLSLSerialReadFifo(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short length,
+ unsigned char *data);
+
+ tMLError MLSLSerialWriteFifo(void *sl_handle,
+ unsigned char slaveAddr,
+ unsigned short length,
+ unsigned char const *data);
+
+ tMLError MLSLWriteCal(unsigned char *cal, unsigned int len);
+ tMLError MLSLReadCal(unsigned char *cal, unsigned int len);
+ tMLError MLSLGetCalLength(unsigned int *len);
+
+#ifdef __cplusplus
+}
+#endif
+
+/**
+ * @}
+ */
+#endif /* MLSL_H */
diff --git a/drivers/misc/mpu3050/mltypes.h b/drivers/misc/mpu3050/mltypes.h
new file mode 100644
index 000000000000..d0b27fa89e78
--- /dev/null
+++ b/drivers/misc/mpu3050/mltypes.h
@@ -0,0 +1,227 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @defgroup MLERROR
+ * @brief Motion Library - Error definitions.
+ * Definition of the error codes used within the MPL and returned
+ * to the user.
+ * Every function tries to return a meaningful error code basing
+ * on the occuring error condition. The error code is numeric.
+ *
+ * The available error codes and their associated values are:
+ * - (0) ML_SUCCESS
+ * - (1) ML_ERROR
+ * - (2) ML_ERROR_INVALID_PARAMETER
+ * - (3) ML_ERROR_FEATURE_NOT_ENABLED
+ * - (4) ML_ERROR_FEATURE_NOT_IMPLEMENTED
+ * - (6) ML_ERROR_DMP_NOT_STARTED
+ * - (7) ML_ERROR_DMP_STARTED
+ * - (8) ML_ERROR_NOT_OPENED
+ * - (9) ML_ERROR_OPENED
+ * - (10) ML_ERROR_INVALID_MODULE
+ * - (11) ML_ERROR_MEMORY_EXAUSTED
+ * - (12) ML_ERROR_DIVIDE_BY_ZERO
+ * - (13) ML_ERROR_ASSERTION_FAILURE
+ * - (14) ML_ERROR_FILE_OPEN
+ * - (15) ML_ERROR_FILE_READ
+ * - (16) ML_ERROR_FILE_WRITE
+ * - (20) ML_ERROR_SERIAL_CLOSED
+ * - (21) ML_ERROR_SERIAL_OPEN_ERROR
+ * - (22) ML_ERROR_SERIAL_READ
+ * - (23) ML_ERROR_SERIAL_WRITE
+ * - (24) ML_ERROR_SERIAL_DEVICE_NOT_RECOGNIZED
+ * - (25) ML_ERROR_SM_TRANSITION
+ * - (26) ML_ERROR_SM_IMPROPER_STATE
+ * - (30) ML_ERROR_FIFO_OVERFLOW
+ * - (31) ML_ERROR_FIFO_FOOTER
+ * - (32) ML_ERROR_FIFO_READ_COUNT
+ * - (33) ML_ERROR_FIFO_READ_DATA
+ * - (40) ML_ERROR_MEMORY_SET
+ * - (50) ML_ERROR_LOG_MEMORY_ERROR
+ * - (51) ML_ERROR_LOG_OUTPUT_ERROR
+ * - (60) ML_ERROR_OS_BAD_PTR
+ * - (61) ML_ERROR_OS_BAD_HANDLE
+ * - (62) ML_ERROR_OS_CREATE_FAILED
+ * - (63) ML_ERROR_OS_LOCK_FAILED
+ * - (70) ML_ERROR_COMPASS_DATA_OVERFLOW
+ * - (71) ML_ERROR_COMPASS_DATA_UNDERFLOW
+ * - (72) ML_ERROR_COMPASS_DATA_NOT_READY
+ * - (73) ML_ERROR_COMPASS_DATA_ERROR
+ * - (75) ML_ERROR_CALIBRATION_LOAD
+ * - (76) ML_ERROR_CALIBRATION_STORE
+ * - (77) ML_ERROR_CALIBRATION_LEN
+ * - (78) ML_ERROR_CALIBRATION_CHECKSUM
+ *
+ * @{
+ * @file mltypes.h
+ * @}
+ */
+
+#ifndef MLTYPES_H
+#define MLTYPES_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include "stdint_invensense.h"
+#endif
+#include "log.h"
+
+/*---------------------------
+ ML Types
+---------------------------*/
+
+/**
+ * @struct tMLError mltypes.h "mltypes"
+ * @brief The MPL Error Code return type.
+ *
+ * @code
+ * typedef unsigned char tMLError;
+ * @endcode
+ */
+typedef unsigned char tMLError;
+
+#if defined(LINUX) || defined(__KERNEL__)
+typedef unsigned int HANDLE;
+#endif
+
+#ifdef __KERNEL__
+typedef HANDLE FILE;
+#endif
+
+#ifndef __cplusplus
+#ifndef __KERNEL__
+typedef int_fast8_t bool;
+#endif
+#endif
+
+/*---------------------------
+ ML Defines
+---------------------------*/
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* Dimension of an array */
+#ifndef DIM
+#define DIM(array) (sizeof(array)/sizeof((array)[0]))
+#endif
+
+/* - ML Errors. - */
+#define ERROR_NAME(x) (#x)
+#define ERROR_CHECK(x) \
+ do { \
+ if (ML_SUCCESS != x) { \
+ MPL_LOGE("%s|%s|%d returning %d\n", \
+ __FILE__, __func__, __LINE__, x); \
+ return x; \
+ } \
+ } while (0)
+
+#define ERROR_CHECK_FIRST(first, x) \
+ { if (ML_SUCCESS == first) first = x; }
+
+#define ML_SUCCESS (0)
+/* Generic Error code. Proprietary Error Codes only */
+#define ML_ERROR (1)
+
+/* Compatibility and other generic error codes */
+#define ML_ERROR_INVALID_PARAMETER (2)
+#define ML_ERROR_FEATURE_NOT_ENABLED (3)
+#define ML_ERROR_FEATURE_NOT_IMPLEMENTED (4)
+#define ML_ERROR_DMP_NOT_STARTED (6)
+#define ML_ERROR_DMP_STARTED (7)
+#define ML_ERROR_NOT_OPENED (8)
+#define ML_ERROR_OPENED (9)
+#define ML_ERROR_INVALID_MODULE (10)
+#define ML_ERROR_MEMORY_EXAUSTED (11)
+#define ML_ERROR_DIVIDE_BY_ZERO (12)
+#define ML_ERROR_ASSERTION_FAILURE (13)
+#define ML_ERROR_FILE_OPEN (14)
+#define ML_ERROR_FILE_READ (15)
+#define ML_ERROR_FILE_WRITE (16)
+#define ML_ERROR_INVALID_CONFIGURATION (17)
+
+/* Serial Communication */
+#define ML_ERROR_SERIAL_CLOSED (20)
+#define ML_ERROR_SERIAL_OPEN_ERROR (21)
+#define ML_ERROR_SERIAL_READ (22)
+#define ML_ERROR_SERIAL_WRITE (23)
+#define ML_ERROR_SERIAL_DEVICE_NOT_RECOGNIZED (24)
+
+/* SM = State Machine */
+#define ML_ERROR_SM_TRANSITION (25)
+#define ML_ERROR_SM_IMPROPER_STATE (26)
+
+/* Fifo */
+#define ML_ERROR_FIFO_OVERFLOW (30)
+#define ML_ERROR_FIFO_FOOTER (31)
+#define ML_ERROR_FIFO_READ_COUNT (32)
+#define ML_ERROR_FIFO_READ_DATA (33)
+
+/* Memory & Registers, Set & Get */
+#define ML_ERROR_MEMORY_SET (40)
+
+#define ML_ERROR_LOG_MEMORY_ERROR (50)
+#define ML_ERROR_LOG_OUTPUT_ERROR (51)
+
+/* OS interface errors */
+#define ML_ERROR_OS_BAD_PTR (60)
+#define ML_ERROR_OS_BAD_HANDLE (61)
+#define ML_ERROR_OS_CREATE_FAILED (62)
+#define ML_ERROR_OS_LOCK_FAILED (63)
+
+/* Compass errors */
+#define ML_ERROR_COMPASS_DATA_OVERFLOW (70)
+#define ML_ERROR_COMPASS_DATA_UNDERFLOW (71)
+#define ML_ERROR_COMPASS_DATA_NOT_READY (72)
+#define ML_ERROR_COMPASS_DATA_ERROR (73)
+
+/* Load/Store calibration */
+#define ML_ERROR_CALIBRATION_LOAD (75)
+#define ML_ERROR_CALIBRATION_STORE (76)
+#define ML_ERROR_CALIBRATION_LEN (77)
+#define ML_ERROR_CALIBRATION_CHECKSUM (78)
+
+/* Accel errors */
+#define ML_ERROR_ACCEL_DATA_OVERFLOW (79)
+#define ML_ERROR_ACCEL_DATA_UNDERFLOW (80)
+#define ML_ERROR_ACCEL_DATA_NOT_READY (81)
+#define ML_ERROR_ACCEL_DATA_ERROR (82)
+
+/* For Linux coding compliance */
+#ifndef __KERNEL__
+#define EXPORT_SYMBOL(x)
+#endif
+
+/*---------------------------
+ p-Types
+---------------------------*/
+
+#endif /* MLTYPES_H */
diff --git a/drivers/misc/mpu3050/mpu-dev.c b/drivers/misc/mpu3050/mpu-dev.c
new file mode 100644
index 000000000000..4dba44d45483
--- /dev/null
+++ b/drivers/misc/mpu3050/mpu-dev.c
@@ -0,0 +1,1310 @@
+/*
+ mpu-dev.c - mpu3050 char device interface 2 $License:
+
+ Copyright (C) 1995-97 Simon G. Vogl
+ Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
+ Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
+
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/poll.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "mpuirq.h"
+#include "slaveirq.h"
+#include "mlsl.h"
+#include "mpu-i2c.h"
+#include "mldl_cfg.h"
+#include "mpu.h"
+
+#define MPU3050_EARLY_SUSPEND_IN_DRIVER 0
+
+/* Platform data for the MPU */
+struct mpu_private_data {
+ struct mldl_cfg mldl_cfg;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ struct mutex mutex;
+ wait_queue_head_t mpu_event_wait;
+ struct completion completion;
+ struct timer_list timeout;
+ struct notifier_block nb;
+ struct mpuirq_data mpu_pm_event;
+ int response_timeout; /* In seconds */
+ unsigned long event;
+ int pid;
+};
+
+static struct i2c_client *this_client;
+
+
+static void
+mpu_pm_timeout(u_long data)
+{
+ struct mpu_private_data *mpu = (struct mpu_private_data *) data;
+ dev_dbg(&this_client->adapter->dev, "%s\n", __func__);
+ complete(&mpu->completion);
+}
+
+static int mpu_pm_notifier_callback(struct notifier_block *nb,
+ unsigned long event,
+ void *unused)
+{
+ struct mpu_private_data *mpu =
+ container_of(nb, struct mpu_private_data, nb);
+ struct timeval event_time;
+ dev_dbg(&this_client->adapter->dev, "%s: %ld\n", __func__, event);
+
+ /* Prevent the file handle from being closed before we initialize
+ the completion event */
+ mutex_lock(&mpu->mutex);
+ if (!(mpu->pid) ||
+ (event != PM_SUSPEND_PREPARE && event != PM_POST_SUSPEND)) {
+ mutex_unlock(&mpu->mutex);
+ return NOTIFY_OK;
+ }
+
+ do_gettimeofday(&event_time);
+ mpu->mpu_pm_event.interruptcount++;
+ mpu->mpu_pm_event.irqtime =
+ (((long long) event_time.tv_sec) << 32) +
+ event_time.tv_usec;
+ mpu->mpu_pm_event.data_type = MPUIRQ_DATA_TYPE_PM_EVENT;
+ mpu->mpu_pm_event.data_size = sizeof(unsigned long);
+ mpu->mpu_pm_event.data = &mpu->event;
+
+ if (event == PM_SUSPEND_PREPARE)
+ mpu->event = MPU_PM_EVENT_SUSPEND_PREPARE;
+ if (event == PM_POST_SUSPEND)
+ mpu->event = MPU_PM_EVENT_POST_SUSPEND;
+
+ if (mpu->response_timeout > 0) {
+ mpu->timeout.expires = jiffies + mpu->response_timeout * HZ;
+ add_timer(&mpu->timeout);
+ }
+ INIT_COMPLETION(mpu->completion);
+ mutex_unlock(&mpu->mutex);
+
+ wake_up_interruptible(&mpu->mpu_event_wait);
+ wait_for_completion(&mpu->completion);
+ del_timer_sync(&mpu->timeout);
+ dev_dbg(&this_client->adapter->dev, "%s: %ld DONE\n", __func__, event);
+ return NOTIFY_OK;
+}
+
+static int mpu_open(struct inode *inode, struct file *file)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(this_client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ int result;
+ dev_dbg(&this_client->adapter->dev, "mpu_open\n");
+ dev_dbg(&this_client->adapter->dev, "current->pid %d\n",
+ current->pid);
+ mpu->pid = current->pid;
+ file->private_data = this_client;
+ /* we could do some checking on the flags supplied by "open" */
+ /* i.e. O_NONBLOCK */
+ /* -> set some flag to disable interruptible_sleep_on in mpu_read */
+
+ /* Reset the sensors to the default */
+ result = mutex_lock_interruptible(&mpu->mutex);
+ if (result) {
+ dev_err(&this_client->adapter->dev,
+ "%s: mutex_lock_interruptible returned %d\n",
+ __func__, result);
+ return result;
+ }
+ mldl_cfg->requested_sensors = ML_THREE_AXIS_GYRO;
+ if (mldl_cfg->accel && mldl_cfg->accel->resume)
+ mldl_cfg->requested_sensors |= ML_THREE_AXIS_ACCEL;
+
+ if (mldl_cfg->compass && mldl_cfg->compass->resume)
+ mldl_cfg->requested_sensors |= ML_THREE_AXIS_COMPASS;
+
+ if (mldl_cfg->pressure && mldl_cfg->pressure->resume)
+ mldl_cfg->requested_sensors |= ML_THREE_AXIS_PRESSURE;
+ mutex_unlock(&mpu->mutex);
+ return 0;
+}
+
+/* close function - called when the "file" /dev/mpu is closed in userspace */
+static int mpu_release(struct inode *inode, struct file *file)
+{
+ struct i2c_client *client =
+ (struct i2c_client *) file->private_data;
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *accel_adapter;
+ struct i2c_adapter *compass_adapter;
+ struct i2c_adapter *pressure_adapter;
+ int result = 0;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ compass_adapter = i2c_get_adapter(mldl_cfg->pdata->compass.adapt_num);
+ pressure_adapter = i2c_get_adapter(mldl_cfg->pdata->pressure.adapt_num);
+
+ mutex_lock(&mpu->mutex);
+ result = mpu3050_suspend(mldl_cfg, client->adapter,
+ accel_adapter, compass_adapter,
+ pressure_adapter,
+ TRUE, TRUE, TRUE, TRUE);
+ mpu->pid = 0;
+ mutex_unlock(&mpu->mutex);
+ complete(&mpu->completion);
+ dev_dbg(&this_client->adapter->dev, "mpu_release\n");
+ return result;
+}
+
+/* read function called when from /dev/mpu is read. Read from the FIFO */
+static ssize_t mpu_read(struct file *file,
+ char __user *buf, size_t count, loff_t *offset)
+{
+ struct mpuirq_data local_mpu_pm_event;
+ struct i2c_client *client =
+ (struct i2c_client *) file->private_data;
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ size_t len = sizeof(mpu->mpu_pm_event) + sizeof(unsigned long);
+ int err;
+
+ if (!mpu->event && (!(file->f_flags & O_NONBLOCK)))
+ wait_event_interruptible(mpu->mpu_event_wait, mpu->event);
+
+ if (!mpu->event || NULL == buf
+ || count < sizeof(mpu->mpu_pm_event) + sizeof(unsigned long))
+ return 0;
+
+ err = copy_from_user(&local_mpu_pm_event, buf,
+ sizeof(mpu->mpu_pm_event));
+ if (err != 0) {
+ dev_err(&this_client->adapter->dev,
+ "Copy from user returned %d\n", err);
+ return -EFAULT;
+ }
+
+ mpu->mpu_pm_event.data = local_mpu_pm_event.data;
+ err = copy_to_user((unsigned long __user *)local_mpu_pm_event.data,
+ &mpu->event,
+ sizeof(mpu->event));
+ if (err != 0) {
+ dev_err(&this_client->adapter->dev,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ err = copy_to_user(buf, &mpu->mpu_pm_event, sizeof(mpu->mpu_pm_event));
+ if (err != 0) {
+ dev_err(&this_client->adapter->dev,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ mpu->event = 0;
+ return len;
+}
+
+static unsigned int mpu_poll(struct file *file, struct poll_table_struct *poll)
+{
+ struct i2c_client *client =
+ (struct i2c_client *) file->private_data;
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ int mask = 0;
+
+ poll_wait(file, &mpu->mpu_event_wait, poll);
+ if (mpu->event)
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+static int
+mpu_ioctl_set_mpu_pdata(struct i2c_client *client, unsigned long arg)
+{
+ int ii;
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ struct mpu3050_platform_data *pdata = mpu->mldl_cfg.pdata;
+ struct mpu3050_platform_data local_pdata;
+
+ if (copy_from_user(&local_pdata, (unsigned char __user *) arg,
+ sizeof(local_pdata)))
+ return -EFAULT;
+
+ pdata->int_config = local_pdata.int_config;
+ for (ii = 0; ii < DIM(pdata->orientation); ii++)
+ pdata->orientation[ii] = local_pdata.orientation[ii];
+ pdata->level_shifter = local_pdata.level_shifter;
+
+ pdata->accel.address = local_pdata.accel.address;
+ for (ii = 0; ii < DIM(pdata->accel.orientation); ii++)
+ pdata->accel.orientation[ii] =
+ local_pdata.accel.orientation[ii];
+
+ pdata->compass.address = local_pdata.compass.address;
+ for (ii = 0; ii < DIM(pdata->compass.orientation); ii++)
+ pdata->compass.orientation[ii] =
+ local_pdata.compass.orientation[ii];
+
+ pdata->pressure.address = local_pdata.pressure.address;
+ for (ii = 0; ii < DIM(pdata->pressure.orientation); ii++)
+ pdata->pressure.orientation[ii] =
+ local_pdata.pressure.orientation[ii];
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ return ML_SUCCESS;
+}
+
+static int
+mpu_ioctl_set_mpu_config(struct i2c_client *client, unsigned long arg)
+{
+ int ii;
+ int result = ML_SUCCESS;
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct mldl_cfg *temp_mldl_cfg;
+
+ dev_dbg(&this_client->adapter->dev, "%s\n", __func__);
+
+ temp_mldl_cfg = kzalloc(sizeof(struct mldl_cfg), GFP_KERNEL);
+ if (NULL == temp_mldl_cfg)
+ return -ENOMEM;
+
+ /*
+ * User space is not allowed to modify accel compass pressure or
+ * pdata structs, as well as silicon_revision product_id or trim
+ */
+ if (copy_from_user(temp_mldl_cfg, (struct mldl_cfg __user *) arg,
+ offsetof(struct mldl_cfg, silicon_revision))) {
+ result = -EFAULT;
+ goto out;
+ }
+
+ if (mldl_cfg->gyro_is_suspended) {
+ if (mldl_cfg->addr != temp_mldl_cfg->addr)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->int_config != temp_mldl_cfg->int_config)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->ext_sync != temp_mldl_cfg->ext_sync)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->full_scale != temp_mldl_cfg->full_scale)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->lpf != temp_mldl_cfg->lpf)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->clk_src != temp_mldl_cfg->clk_src)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->divider != temp_mldl_cfg->divider)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->dmp_enable != temp_mldl_cfg->dmp_enable)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->fifo_enable != temp_mldl_cfg->fifo_enable)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->dmp_cfg1 != temp_mldl_cfg->dmp_cfg1)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->dmp_cfg2 != temp_mldl_cfg->dmp_cfg2)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (mldl_cfg->gyro_power != temp_mldl_cfg->gyro_power)
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ for (ii = 0; ii < MPU_NUM_AXES; ii++)
+ if (mldl_cfg->offset_tc[ii] !=
+ temp_mldl_cfg->offset_tc[ii])
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ for (ii = 0; ii < MPU_NUM_AXES; ii++)
+ if (mldl_cfg->offset[ii] != temp_mldl_cfg->offset[ii])
+ mldl_cfg->gyro_needs_reset = TRUE;
+
+ if (memcmp(mldl_cfg->ram, temp_mldl_cfg->ram,
+ MPU_MEM_NUM_RAM_BANKS * MPU_MEM_BANK_SIZE *
+ sizeof(unsigned char)))
+ mldl_cfg->gyro_needs_reset = TRUE;
+ }
+
+ memcpy(mldl_cfg, temp_mldl_cfg,
+ offsetof(struct mldl_cfg, silicon_revision));
+
+out:
+ kfree(temp_mldl_cfg);
+ return result;
+}
+
+static int
+mpu_ioctl_get_mpu_config(struct i2c_client *client, unsigned long arg)
+{
+ /* Have to be careful as there are 3 pointers in the mldl_cfg
+ * structure */
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct mldl_cfg *local_mldl_cfg;
+ int retval = 0;
+
+ local_mldl_cfg = kzalloc(sizeof(struct mldl_cfg), GFP_KERNEL);
+ if (NULL == local_mldl_cfg)
+ return -ENOMEM;
+
+ retval =
+ copy_from_user(local_mldl_cfg, (struct mldl_cfg __user *) arg,
+ sizeof(struct mldl_cfg));
+ if (retval) {
+ dev_err(&this_client->adapter->dev,
+ "%s|%s:%d: EFAULT on arg\n",
+ __FILE__, __func__, __LINE__);
+ retval = -EFAULT;
+ goto out;
+ }
+
+ /* Fill in the accel, compass, pressure and pdata pointers */
+ if (mldl_cfg->accel) {
+ retval = copy_to_user((void __user *)local_mldl_cfg->accel,
+ mldl_cfg->accel,
+ sizeof(*mldl_cfg->accel));
+ if (retval) {
+ dev_err(&this_client->adapter->dev,
+ "%s|%s:%d: EFAULT on accel\n",
+ __FILE__, __func__, __LINE__);
+ retval = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (mldl_cfg->compass) {
+ retval = copy_to_user((void __user *)local_mldl_cfg->compass,
+ mldl_cfg->compass,
+ sizeof(*mldl_cfg->compass));
+ if (retval) {
+ dev_err(&this_client->adapter->dev,
+ "%s|%s:%d: EFAULT on compass\n",
+ __FILE__, __func__, __LINE__);
+ retval = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (mldl_cfg->pressure) {
+ retval = copy_to_user((void __user *)local_mldl_cfg->pressure,
+ mldl_cfg->pressure,
+ sizeof(*mldl_cfg->pressure));
+ if (retval) {
+ dev_err(&this_client->adapter->dev,
+ "%s|%s:%d: EFAULT on pressure\n",
+ __FILE__, __func__, __LINE__);
+ retval = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (mldl_cfg->pdata) {
+ retval = copy_to_user((void __user *)local_mldl_cfg->pdata,
+ mldl_cfg->pdata,
+ sizeof(*mldl_cfg->pdata));
+ if (retval) {
+ dev_err(&this_client->adapter->dev,
+ "%s|%s:%d: EFAULT on pdata\n",
+ __FILE__, __func__, __LINE__);
+ retval = -EFAULT;
+ goto out;
+ }
+ }
+
+ /* Do not modify the accel, compass, pressure and pdata pointers */
+ retval = copy_to_user((struct mldl_cfg __user *) arg,
+ mldl_cfg, offsetof(struct mldl_cfg, accel));
+
+ if (retval)
+ retval = -EFAULT;
+out:
+ kfree(local_mldl_cfg);
+ return retval;
+}
+
+/**
+ * Pass a requested slave configuration to the slave sensor
+ *
+ * @param adapter the adaptor to use to communicate with the slave
+ * @param mldl_cfg the mldl configuration structuer
+ * @param slave pointer to the slave descriptor
+ * @param usr_config The configuration to pass to the slave sensor
+ *
+ * @return 0 or non-zero error code
+ */
+static int slave_config(void *adapter,
+ struct mldl_cfg *mldl_cfg,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config __user *usr_config)
+{
+ int retval = ML_SUCCESS;
+ struct ext_slave_config config;
+ if ((!slave) || (!slave->config))
+ return retval;
+
+ retval = copy_from_user(&config, usr_config, sizeof(config));
+ if (retval)
+ return -EFAULT;
+
+ if (config.len && config.data) {
+ int *data;
+ data = kzalloc(config.len, GFP_KERNEL);
+ if (!data)
+ return ML_ERROR_MEMORY_EXAUSTED;
+
+ retval = copy_from_user(data,
+ (void __user *)config.data,
+ config.len);
+ if (retval) {
+ retval = -EFAULT;
+ kfree(data);
+ return retval;
+ }
+ config.data = data;
+ }
+ retval = slave->config(adapter, slave, pdata, &config);
+ kfree(config.data);
+ return retval;
+}
+
+/**
+ * Get a requested slave configuration from the slave sensor
+ *
+ * @param adapter the adaptor to use to communicate with the slave
+ * @param mldl_cfg the mldl configuration structuer
+ * @param slave pointer to the slave descriptor
+ * @param usr_config The configuration for the slave to fill out
+ *
+ * @return 0 or non-zero error code
+ */
+static int slave_get_config(void *adapter,
+ struct mldl_cfg *mldl_cfg,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config __user *usr_config)
+{
+ int retval = ML_SUCCESS;
+ struct ext_slave_config config;
+ void *user_data;
+ if (!(slave) || !(slave->get_config))
+ return ML_SUCCESS;
+
+ retval = copy_from_user(&config, usr_config, sizeof(config));
+ if (retval)
+ return -EFAULT;
+
+ user_data = config.data;
+ if (config.len && config.data) {
+ int *data;
+ data = kzalloc(config.len, GFP_KERNEL);
+ if (!data)
+ return ML_ERROR_MEMORY_EXAUSTED;
+
+ retval = copy_from_user(data,
+ (void __user *)config.data,
+ config.len);
+ if (retval) {
+ retval = -EFAULT;
+ kfree(data);
+ return retval;
+ }
+ config.data = data;
+ }
+ retval = slave->get_config(adapter, slave, pdata, &config);
+ if (retval) {
+ kfree(config.data);
+ return retval;
+ }
+ retval = copy_to_user((unsigned char __user *) user_data,
+ config.data,
+ config.len);
+ kfree(config.data);
+ return retval;
+}
+
+static int mpu_handle_mlsl(void *sl_handle,
+ unsigned char addr,
+ unsigned int cmd,
+ struct mpu_read_write __user *usr_msg)
+{
+ int retval = ML_SUCCESS;
+ struct mpu_read_write msg;
+ unsigned char *user_data;
+ retval = copy_from_user(&msg, usr_msg, sizeof(msg));
+ if (retval)
+ return -EFAULT;
+
+ user_data = msg.data;
+ if (msg.length && msg.data) {
+ unsigned char *data;
+ data = kzalloc(msg.length, GFP_KERNEL);
+ if (!data)
+ return ML_ERROR_MEMORY_EXAUSTED;
+
+ retval = copy_from_user(data,
+ (void __user *)msg.data,
+ msg.length);
+ if (retval) {
+ retval = -EFAULT;
+ kfree(data);
+ return retval;
+ }
+ msg.data = data;
+ } else {
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ switch (cmd) {
+ case MPU_READ:
+ retval = MLSLSerialRead(sl_handle, addr,
+ msg.address, msg.length, msg.data);
+ break;
+ case MPU_WRITE:
+ retval = MLSLSerialWrite(sl_handle, addr,
+ msg.length, msg.data);
+ break;
+ case MPU_READ_MEM:
+ retval = MLSLSerialReadMem(sl_handle, addr,
+ msg.address, msg.length, msg.data);
+ break;
+ case MPU_WRITE_MEM:
+ retval = MLSLSerialWriteMem(sl_handle, addr,
+ msg.address, msg.length, msg.data);
+ break;
+ case MPU_READ_FIFO:
+ retval = MLSLSerialReadFifo(sl_handle, addr,
+ msg.length, msg.data);
+ break;
+ case MPU_WRITE_FIFO:
+ retval = MLSLSerialWriteFifo(sl_handle, addr,
+ msg.length, msg.data);
+ break;
+
+ };
+ retval = copy_to_user((unsigned char __user *) user_data,
+ msg.data,
+ msg.length);
+ kfree(msg.data);
+ return retval;
+}
+
+/* ioctl - I/O control */
+static long mpu_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client =
+ (struct i2c_client *) file->private_data;
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ int retval = 0;
+ struct i2c_adapter *accel_adapter;
+ struct i2c_adapter *compass_adapter;
+ struct i2c_adapter *pressure_adapter;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ compass_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->compass.adapt_num);
+ pressure_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->pressure.adapt_num);
+
+ retval = mutex_lock_interruptible(&mpu->mutex);
+ if (retval) {
+ dev_err(&this_client->adapter->dev,
+ "%s: mutex_lock_interruptible returned %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ switch (cmd) {
+ case MPU_SET_MPU_CONFIG:
+ retval = mpu_ioctl_set_mpu_config(client, arg);
+ break;
+ case MPU_SET_PLATFORM_DATA:
+ retval = mpu_ioctl_set_mpu_pdata(client, arg);
+ break;
+ case MPU_GET_MPU_CONFIG:
+ retval = mpu_ioctl_get_mpu_config(client, arg);
+ break;
+ case MPU_READ:
+ case MPU_WRITE:
+ case MPU_READ_MEM:
+ case MPU_WRITE_MEM:
+ case MPU_READ_FIFO:
+ case MPU_WRITE_FIFO:
+ retval = mpu_handle_mlsl(client->adapter, mldl_cfg->addr, cmd,
+ (struct mpu_read_write __user *) arg);
+ break;
+ case MPU_CONFIG_ACCEL:
+ retval = slave_config(accel_adapter, mldl_cfg,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel,
+ (struct ext_slave_config __user *) arg);
+ break;
+ case MPU_CONFIG_COMPASS:
+ retval = slave_config(compass_adapter, mldl_cfg,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass,
+ (struct ext_slave_config __user *) arg);
+ break;
+ case MPU_CONFIG_PRESSURE:
+ retval = slave_config(pressure_adapter, mldl_cfg,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->pressure,
+ (struct ext_slave_config __user *) arg);
+ break;
+ case MPU_GET_CONFIG_ACCEL:
+ retval = slave_get_config(accel_adapter, mldl_cfg,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel,
+ (struct ext_slave_config __user *) arg);
+ break;
+ case MPU_GET_CONFIG_COMPASS:
+ retval = slave_get_config(compass_adapter, mldl_cfg,
+ mldl_cfg->compass,
+ &mldl_cfg->pdata->compass,
+ (struct ext_slave_config __user *) arg);
+ break;
+ case MPU_GET_CONFIG_PRESSURE:
+ retval = slave_get_config(pressure_adapter, mldl_cfg,
+ mldl_cfg->pressure,
+ &mldl_cfg->pdata->pressure,
+ (struct ext_slave_config __user *) arg);
+ break;
+ case MPU_SUSPEND:
+ {
+ unsigned long sensors;
+ sensors = ~(mldl_cfg->requested_sensors);
+ retval = mpu3050_suspend(mldl_cfg,
+ client->adapter,
+ accel_adapter,
+ compass_adapter,
+ pressure_adapter,
+ ((sensors & ML_THREE_AXIS_GYRO)
+ == ML_THREE_AXIS_GYRO),
+ ((sensors & ML_THREE_AXIS_ACCEL)
+ == ML_THREE_AXIS_ACCEL),
+ ((sensors & ML_THREE_AXIS_COMPASS)
+ == ML_THREE_AXIS_COMPASS),
+ ((sensors & ML_THREE_AXIS_PRESSURE)
+ == ML_THREE_AXIS_PRESSURE));
+ }
+ break;
+ case MPU_RESUME:
+ {
+ unsigned long sensors;
+ sensors = mldl_cfg->requested_sensors;
+ retval = mpu3050_resume(mldl_cfg,
+ client->adapter,
+ accel_adapter,
+ compass_adapter,
+ pressure_adapter,
+ sensors & ML_THREE_AXIS_GYRO,
+ sensors & ML_THREE_AXIS_ACCEL,
+ sensors & ML_THREE_AXIS_COMPASS,
+ sensors & ML_THREE_AXIS_PRESSURE);
+ }
+ break;
+ case MPU_PM_EVENT_HANDLED:
+ dev_dbg(&this_client->adapter->dev,
+ "%s: %d\n", __func__, cmd);
+ complete(&mpu->completion);
+ break;
+ case MPU_READ_ACCEL:
+ {
+ unsigned char data[6];
+ retval = mpu3050_read_accel(mldl_cfg, client->adapter,
+ data);
+ if ((ML_SUCCESS == retval) &&
+ (copy_to_user((unsigned char __user *) arg,
+ data, sizeof(data))))
+ retval = -EFAULT;
+ }
+ break;
+ case MPU_READ_COMPASS:
+ {
+ unsigned char data[6];
+ struct i2c_adapter *compass_adapt =
+ i2c_get_adapter(mldl_cfg->pdata->compass.
+ adapt_num);
+ retval = mpu3050_read_compass(mldl_cfg, compass_adapt,
+ data);
+ if ((ML_SUCCESS == retval) &&
+ (copy_to_user((unsigned char *) arg,
+ data, sizeof(data))))
+ retval = -EFAULT;
+ }
+ break;
+ case MPU_READ_PRESSURE:
+ {
+ unsigned char data[3];
+ struct i2c_adapter *pressure_adapt =
+ i2c_get_adapter(mldl_cfg->pdata->pressure.
+ adapt_num);
+ retval =
+ mpu3050_read_pressure(mldl_cfg, pressure_adapt,
+ data);
+ if ((ML_SUCCESS == retval) &&
+ (copy_to_user((unsigned char __user *) arg,
+ data, sizeof(data))))
+ retval = -EFAULT;
+ }
+ break;
+ default:
+ dev_err(&this_client->adapter->dev,
+ "%s: Unknown cmd %x, arg %lu\n", __func__, cmd,
+ arg);
+ retval = -EINVAL;
+ }
+
+ mutex_unlock(&mpu->mutex);
+ return retval;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void mpu3050_early_suspend(struct early_suspend *h)
+{
+ struct mpu_private_data *mpu = container_of(h,
+ struct
+ mpu_private_data,
+ early_suspend);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *accel_adapter;
+ struct i2c_adapter *compass_adapter;
+ struct i2c_adapter *pressure_adapter;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ compass_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->compass.adapt_num);
+ pressure_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->pressure.adapt_num);
+
+ dev_dbg(&this_client->adapter->dev, "%s: %d, %d\n", __func__,
+ h->level, mpu->mldl_cfg.gyro_is_suspended);
+ if (MPU3050_EARLY_SUSPEND_IN_DRIVER) {
+ mutex_lock(&mpu->mutex);
+ (void) mpu3050_suspend(mldl_cfg, this_client->adapter,
+ accel_adapter, compass_adapter,
+ pressure_adapter, TRUE, TRUE, TRUE, TRUE);
+ mutex_unlock(&mpu->mutex);
+ }
+}
+
+void mpu3050_early_resume(struct early_suspend *h)
+{
+ struct mpu_private_data *mpu = container_of(h,
+ struct
+ mpu_private_data,
+ early_suspend);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *accel_adapter;
+ struct i2c_adapter *compass_adapter;
+ struct i2c_adapter *pressure_adapter;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ compass_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->compass.adapt_num);
+ pressure_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->pressure.adapt_num);
+
+ if (MPU3050_EARLY_SUSPEND_IN_DRIVER) {
+ if (mpu->pid) {
+ unsigned long sensors = mldl_cfg->requested_sensors;
+ mutex_lock(&mpu->mutex);
+ (void) mpu3050_resume(mldl_cfg,
+ this_client->adapter,
+ accel_adapter,
+ compass_adapter,
+ pressure_adapter,
+ sensors & ML_THREE_AXIS_GYRO,
+ sensors & ML_THREE_AXIS_ACCEL,
+ sensors & ML_THREE_AXIS_COMPASS,
+ sensors & ML_THREE_AXIS_PRESSURE);
+ mutex_unlock(&mpu->mutex);
+ dev_dbg(&this_client->adapter->dev,
+ "%s for pid %d\n", __func__, mpu->pid);
+ }
+ }
+ dev_dbg(&this_client->adapter->dev, "%s: %d\n", __func__, h->level);
+}
+#endif
+
+void mpu_shutdown(struct i2c_client *client)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *accel_adapter;
+ struct i2c_adapter *compass_adapter;
+ struct i2c_adapter *pressure_adapter;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ compass_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->compass.adapt_num);
+ pressure_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->pressure.adapt_num);
+
+ mutex_lock(&mpu->mutex);
+ (void) mpu3050_suspend(mldl_cfg, this_client->adapter,
+ accel_adapter, compass_adapter, pressure_adapter,
+ TRUE, TRUE, TRUE, TRUE);
+ mutex_unlock(&mpu->mutex);
+ dev_dbg(&this_client->adapter->dev, "%s\n", __func__);
+}
+
+int mpu_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *accel_adapter;
+ struct i2c_adapter *compass_adapter;
+ struct i2c_adapter *pressure_adapter;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ compass_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->compass.adapt_num);
+ pressure_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->pressure.adapt_num);
+
+ mutex_lock(&mpu->mutex);
+ if (!mldl_cfg->ignore_system_suspend) {
+ dev_dbg(&this_client->adapter->dev,
+ "%s: suspending on event %d\n", __func__,
+ mesg.event);
+ (void) mpu3050_suspend(mldl_cfg, this_client->adapter,
+ accel_adapter, compass_adapter,
+ pressure_adapter,
+ TRUE, TRUE, TRUE, TRUE);
+ } else {
+ dev_dbg(&this_client->adapter->dev,
+ "%s: Already suspended %d\n", __func__,
+ mesg.event);
+ }
+ mutex_unlock(&mpu->mutex);
+ return 0;
+}
+
+int mpu_resume(struct i2c_client *client)
+{
+ struct mpu_private_data *mpu =
+ (struct mpu_private_data *) i2c_get_clientdata(client);
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct i2c_adapter *accel_adapter;
+ struct i2c_adapter *compass_adapter;
+ struct i2c_adapter *pressure_adapter;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ compass_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->compass.adapt_num);
+ pressure_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->pressure.adapt_num);
+
+ mutex_lock(&mpu->mutex);
+ if (mpu->pid && !mldl_cfg->ignore_system_suspend) {
+ unsigned long sensors = mldl_cfg->requested_sensors;
+ (void) mpu3050_resume(mldl_cfg, this_client->adapter,
+ accel_adapter,
+ compass_adapter,
+ pressure_adapter,
+ sensors & ML_THREE_AXIS_GYRO,
+ sensors & ML_THREE_AXIS_ACCEL,
+ sensors & ML_THREE_AXIS_COMPASS,
+ sensors & ML_THREE_AXIS_PRESSURE);
+ dev_dbg(&this_client->adapter->dev,
+ "%s for pid %d\n", __func__, mpu->pid);
+ }
+ mutex_unlock(&mpu->mutex);
+ return 0;
+}
+
+/* define which file operations are supported */
+static const struct file_operations mpu_fops = {
+ .owner = THIS_MODULE,
+ .read = mpu_read,
+ .poll = mpu_poll,
+
+#if HAVE_COMPAT_IOCTL
+ .compat_ioctl = mpu_ioctl,
+#endif
+#if HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mpu_ioctl,
+#endif
+ .open = mpu_open,
+ .release = mpu_release,
+};
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)
+I2C_CLIENT_INSMOD;
+#endif
+
+static struct miscdevice i2c_mpu_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mpu", /* Same for both 3050 and 6000 */
+ .fops = &mpu_fops,
+};
+
+
+int mpu3050_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct mpu3050_platform_data *pdata;
+ struct mpu_private_data *mpu;
+ struct mldl_cfg *mldl_cfg;
+ int res = 0;
+ struct i2c_adapter *accel_adapter = NULL;
+ struct i2c_adapter *compass_adapter = NULL;
+ struct i2c_adapter *pressure_adapter = NULL;
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ res = -ENODEV;
+ goto out_check_functionality_failed;
+ }
+
+ mpu = kzalloc(sizeof(struct mpu_private_data), GFP_KERNEL);
+ if (!mpu) {
+ res = -ENOMEM;
+ goto out_alloc_data_failed;
+ }
+
+ i2c_set_clientdata(client, mpu);
+ this_client = client;
+ mldl_cfg = &mpu->mldl_cfg;
+
+ init_waitqueue_head(&mpu->mpu_event_wait);
+
+ mutex_init(&mpu->mutex);
+ init_completion(&mpu->completion);
+
+ mpu->response_timeout = 60; /* Seconds */
+ mpu->timeout.function = mpu_pm_timeout;
+ mpu->timeout.data = (u_long) mpu;
+ init_timer(&mpu->timeout);
+
+ /* FIXME:
+ * Do not register the pm_notifier as it causes
+ * issues with resume sequence as there is no response
+ * from user-space for power notifications for approx
+ * 60 sec. Refer NV bug 858630 for more details.
+ */
+#if 0
+ mpu->nb.notifier_call = mpu_pm_notifier_callback;
+ mpu->nb.priority = 0;
+ register_pm_notifier(&mpu->nb);
+#endif
+
+ pdata = (struct mpu3050_platform_data *) client->dev.platform_data;
+ if (!pdata) {
+ dev_warn(&this_client->adapter->dev,
+ "Missing platform data for mpu3050\n");
+ } else {
+ mldl_cfg->pdata = pdata;
+
+#if defined(CONFIG_MPU_SENSORS_MPU3050_MODULE) || \
+ defined(CONFIG_MPU_SENSORS_MPU6000_MODULE)
+ pdata->accel.get_slave_descr = get_accel_slave_descr;
+ pdata->compass.get_slave_descr = get_compass_slave_descr;
+ pdata->pressure.get_slave_descr = get_pressure_slave_descr;
+#endif
+
+ if (pdata->accel.get_slave_descr) {
+ mldl_cfg->accel =
+ pdata->accel.get_slave_descr();
+ dev_info(&this_client->adapter->dev,
+ "%s: +%s\n", MPU_NAME,
+ mldl_cfg->accel->name);
+ accel_adapter =
+ i2c_get_adapter(pdata->accel.adapt_num);
+ if (pdata->accel.irq > 0) {
+ dev_info(&this_client->adapter->dev,
+ "Installing Accel irq using %d\n",
+ pdata->accel.irq);
+ res = slaveirq_init(accel_adapter,
+ &pdata->accel,
+ "accelirq");
+ if (res)
+ goto out_accelirq_failed;
+ } else {
+ dev_warn(&this_client->adapter->dev,
+ "WARNING: Accel irq not assigned\n");
+ }
+ } else {
+ dev_warn(&this_client->adapter->dev,
+ "%s: No Accel Present\n", MPU_NAME);
+ }
+
+ if (pdata->compass.get_slave_descr) {
+ mldl_cfg->compass =
+ pdata->compass.get_slave_descr();
+ dev_info(&this_client->adapter->dev,
+ "%s: +%s\n", MPU_NAME,
+ mldl_cfg->compass->name);
+ compass_adapter =
+ i2c_get_adapter(pdata->compass.adapt_num);
+ if (pdata->compass.irq > 0) {
+ dev_info(&this_client->adapter->dev,
+ "Installing Compass irq using %d\n",
+ pdata->compass.irq);
+ res = slaveirq_init(compass_adapter,
+ &pdata->compass,
+ "compassirq");
+ if (res)
+ goto out_compassirq_failed;
+ } else {
+ dev_warn(&this_client->adapter->dev,
+ "WARNING: Compass irq not assigned\n");
+ }
+ } else {
+ dev_warn(&this_client->adapter->dev,
+ "%s: No Compass Present\n", MPU_NAME);
+ }
+
+ if (pdata->pressure.get_slave_descr) {
+ mldl_cfg->pressure =
+ pdata->pressure.get_slave_descr();
+ dev_info(&this_client->adapter->dev,
+ "%s: +%s\n", MPU_NAME,
+ mldl_cfg->pressure->name);
+ pressure_adapter =
+ i2c_get_adapter(pdata->pressure.adapt_num);
+
+ if (pdata->pressure.irq > 0) {
+ dev_info(&this_client->adapter->dev,
+ "Installing Pressure irq using %d\n",
+ pdata->pressure.irq);
+ res = slaveirq_init(pressure_adapter,
+ &pdata->pressure,
+ "pressureirq");
+ if (res)
+ goto out_pressureirq_failed;
+ } else {
+ dev_warn(&this_client->adapter->dev,
+ "WARNING: Pressure irq not assigned\n");
+ }
+ } else {
+ dev_warn(&this_client->adapter->dev,
+ "%s: No Pressure Present\n", MPU_NAME);
+ }
+ }
+
+ mldl_cfg->addr = client->addr;
+ res = mpu3050_open(&mpu->mldl_cfg, client->adapter,
+ accel_adapter, compass_adapter, pressure_adapter);
+
+ if (res) {
+ dev_err(&this_client->adapter->dev,
+ "Unable to open %s %d\n", MPU_NAME, res);
+ res = -ENODEV;
+ goto out_whoami_failed;
+ }
+
+ res = misc_register(&i2c_mpu_device);
+ if (res < 0) {
+ dev_err(&this_client->adapter->dev,
+ "ERROR: misc_register returned %d\n", res);
+ goto out_misc_register_failed;
+ }
+
+ if (this_client->irq > 0) {
+ dev_info(&this_client->adapter->dev,
+ "Installing irq using %d\n", this_client->irq);
+ res = mpuirq_init(this_client);
+ if (res)
+ goto out_mpuirq_failed;
+ } else {
+ dev_warn(&this_client->adapter->dev,
+ "Missing %s IRQ\n", MPU_NAME);
+ }
+
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ mpu->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ mpu->early_suspend.suspend = mpu3050_early_suspend;
+ mpu->early_suspend.resume = mpu3050_early_resume;
+ register_early_suspend(&mpu->early_suspend);
+#endif
+ return res;
+
+out_mpuirq_failed:
+ misc_deregister(&i2c_mpu_device);
+out_misc_register_failed:
+ mpu3050_close(&mpu->mldl_cfg, client->adapter,
+ accel_adapter, compass_adapter, pressure_adapter);
+out_whoami_failed:
+ if (pdata &&
+ pdata->pressure.get_slave_descr &&
+ pdata->pressure.irq)
+ slaveirq_exit(&pdata->pressure);
+out_pressureirq_failed:
+ if (pdata &&
+ pdata->compass.get_slave_descr &&
+ pdata->compass.irq)
+ slaveirq_exit(&pdata->compass);
+out_compassirq_failed:
+ if (pdata &&
+ pdata->accel.get_slave_descr &&
+ pdata->accel.irq)
+ slaveirq_exit(&pdata->accel);
+out_accelirq_failed:
+ kfree(mpu);
+out_alloc_data_failed:
+out_check_functionality_failed:
+ dev_err(&this_client->adapter->dev, "%s failed %d\n", __func__,
+ res);
+ return res;
+
+}
+
+static int mpu3050_remove(struct i2c_client *client)
+{
+ struct mpu_private_data *mpu = i2c_get_clientdata(client);
+ struct i2c_adapter *accel_adapter;
+ struct i2c_adapter *compass_adapter;
+ struct i2c_adapter *pressure_adapter;
+ struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
+ struct mpu3050_platform_data *pdata = mldl_cfg->pdata;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ compass_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->compass.adapt_num);
+ pressure_adapter =
+ i2c_get_adapter(mldl_cfg->pdata->pressure.adapt_num);
+
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&mpu->early_suspend);
+#endif
+ mpu3050_close(mldl_cfg, client->adapter,
+ accel_adapter, compass_adapter, pressure_adapter);
+
+ if (client->irq)
+ mpuirq_exit();
+
+ if (pdata &&
+ pdata->pressure.get_slave_descr &&
+ pdata->pressure.irq)
+ slaveirq_exit(&pdata->pressure);
+
+ if (pdata &&
+ pdata->compass.get_slave_descr &&
+ pdata->compass.irq)
+ slaveirq_exit(&pdata->compass);
+
+ if (pdata &&
+ pdata->accel.get_slave_descr &&
+ pdata->accel.irq)
+ slaveirq_exit(&pdata->accel);
+
+ misc_deregister(&i2c_mpu_device);
+ kfree(mpu);
+
+ return 0;
+}
+
+static const struct i2c_device_id mpu3050_id[] = {
+ {MPU_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mpu3050_id);
+
+static struct i2c_driver mpu3050_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = mpu3050_probe,
+ .remove = mpu3050_remove,
+ .id_table = mpu3050_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = MPU_NAME,
+ },
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)
+ .address_data = &addr_data,
+#else
+ .address_list = normal_i2c,
+#endif
+
+ .shutdown = mpu_shutdown, /* optional */
+ .suspend = mpu_suspend, /* optional */
+ .resume = mpu_resume, /* optional */
+
+};
+
+static int __init mpu_init(void)
+{
+ int res = i2c_add_driver(&mpu3050_driver);
+ pr_debug("%s\n", __func__);
+ if (res)
+ pr_err("%s failed\n",
+ __func__);
+ return res;
+}
+
+static void __exit mpu_exit(void)
+{
+ pr_debug("%s\n", __func__);
+ i2c_del_driver(&mpu3050_driver);
+}
+
+module_init(mpu_init);
+module_exit(mpu_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("User space character device interface for MPU3050");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(MPU_NAME);
diff --git a/drivers/misc/mpu3050/mpu-i2c.c b/drivers/misc/mpu3050/mpu-i2c.c
new file mode 100644
index 000000000000..b1298d313abf
--- /dev/null
+++ b/drivers/misc/mpu3050/mpu-i2c.c
@@ -0,0 +1,196 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @defgroup
+ * @brief
+ *
+ * @{
+ * @file mpu-i2c.c
+ * @brief
+ *
+ */
+
+#include <linux/i2c.h>
+#include "mpu.h"
+
+int sensor_i2c_write(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned int len, unsigned char const *data)
+{
+ struct i2c_msg msgs[1];
+ int res;
+
+ if (NULL == data || NULL == i2c_adap)
+ return -EINVAL;
+
+ msgs[0].addr = address;
+ msgs[0].flags = 0; /* write */
+ msgs[0].buf = (unsigned char *) data;
+ msgs[0].len = len;
+
+ res = i2c_transfer(i2c_adap, msgs, 1);
+ if (res < 1)
+ return res;
+ else
+ return 0;
+}
+
+int sensor_i2c_write_register(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned char reg, unsigned char value)
+{
+ unsigned char data[2];
+
+ data[0] = reg;
+ data[1] = value;
+ return sensor_i2c_write(i2c_adap, address, 2, data);
+}
+
+int sensor_i2c_read(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned char reg,
+ unsigned int len, unsigned char *data)
+{
+ struct i2c_msg msgs[2];
+ int res;
+
+ if (NULL == data || NULL == i2c_adap)
+ return -EINVAL;
+
+ msgs[0].addr = address;
+ msgs[0].flags = 0; /* write */
+ msgs[0].buf = &reg;
+ msgs[0].len = 1;
+
+ msgs[1].addr = address;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = data;
+ msgs[1].len = len;
+
+ res = i2c_transfer(i2c_adap, msgs, 2);
+ if (res < 2)
+ return res;
+ else
+ return 0;
+}
+
+int mpu_memory_read(struct i2c_adapter *i2c_adap,
+ unsigned char mpu_addr,
+ unsigned short mem_addr,
+ unsigned int len, unsigned char *data)
+{
+ unsigned char bank[2];
+ unsigned char addr[2];
+ unsigned char buf;
+
+ struct i2c_msg msgs[4];
+ int ret;
+
+ if (NULL == data || NULL == i2c_adap)
+ return -EINVAL;
+
+ bank[0] = MPUREG_BANK_SEL;
+ bank[1] = mem_addr >> 8;
+
+ addr[0] = MPUREG_MEM_START_ADDR;
+ addr[1] = mem_addr & 0xFF;
+
+ buf = MPUREG_MEM_R_W;
+
+ /* Write Message */
+ msgs[0].addr = mpu_addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = bank;
+ msgs[0].len = sizeof(bank);
+
+ msgs[1].addr = mpu_addr;
+ msgs[1].flags = 0;
+ msgs[1].buf = addr;
+ msgs[1].len = sizeof(addr);
+
+ msgs[2].addr = mpu_addr;
+ msgs[2].flags = 0;
+ msgs[2].buf = &buf;
+ msgs[2].len = 1;
+
+ msgs[3].addr = mpu_addr;
+ msgs[3].flags = I2C_M_RD;
+ msgs[3].buf = data;
+ msgs[3].len = len;
+
+ ret = i2c_transfer(i2c_adap, msgs, 4);
+ if (ret != 4)
+ return ret;
+ else
+ return 0;
+}
+
+int mpu_memory_write(struct i2c_adapter *i2c_adap,
+ unsigned char mpu_addr,
+ unsigned short mem_addr,
+ unsigned int len, unsigned char const *data)
+{
+ unsigned char bank[2];
+ unsigned char addr[2];
+ unsigned char buf[513];
+
+ struct i2c_msg msgs[3];
+ int ret;
+
+ if (NULL == data || NULL == i2c_adap)
+ return -EINVAL;
+ if (len >= (sizeof(buf) - 1))
+ return -ENOMEM;
+
+ bank[0] = MPUREG_BANK_SEL;
+ bank[1] = mem_addr >> 8;
+
+ addr[0] = MPUREG_MEM_START_ADDR;
+ addr[1] = mem_addr & 0xFF;
+
+ buf[0] = MPUREG_MEM_R_W;
+ memcpy(buf + 1, data, len);
+
+ /* Write Message */
+ msgs[0].addr = mpu_addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = bank;
+ msgs[0].len = sizeof(bank);
+
+ msgs[1].addr = mpu_addr;
+ msgs[1].flags = 0;
+ msgs[1].buf = addr;
+ msgs[1].len = sizeof(addr);
+
+ msgs[2].addr = mpu_addr;
+ msgs[2].flags = 0;
+ msgs[2].buf = (unsigned char *) buf;
+ msgs[2].len = len + 1;
+
+ ret = i2c_transfer(i2c_adap, msgs, 3);
+ if (ret != 3)
+ return ret;
+ else
+ return 0;
+}
+
+/**
+ * @}
+ */
diff --git a/drivers/misc/mpu3050/mpu-i2c.h b/drivers/misc/mpu3050/mpu-i2c.h
new file mode 100644
index 000000000000..0bbc8c64594e
--- /dev/null
+++ b/drivers/misc/mpu3050/mpu-i2c.h
@@ -0,0 +1,58 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+/**
+ * @defgroup
+ * @brief
+ *
+ * @{
+ * @file mpu-i2c.c
+ * @brief
+ *
+ *
+ */
+
+#ifndef __MPU_I2C_H__
+#define __MPU_I2C_H__
+
+#include <linux/i2c.h>
+
+int sensor_i2c_write(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned int len, unsigned char const *data);
+
+int sensor_i2c_write_register(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned char reg, unsigned char value);
+
+int sensor_i2c_read(struct i2c_adapter *i2c_adap,
+ unsigned char address,
+ unsigned char reg,
+ unsigned int len, unsigned char *data);
+
+int mpu_memory_read(struct i2c_adapter *i2c_adap,
+ unsigned char mpu_addr,
+ unsigned short mem_addr,
+ unsigned int len, unsigned char *data);
+
+int mpu_memory_write(struct i2c_adapter *i2c_adap,
+ unsigned char mpu_addr,
+ unsigned short mem_addr,
+ unsigned int len, unsigned char const *data);
+
+#endif /* __MPU_I2C_H__ */
diff --git a/drivers/misc/mpu3050/mpuirq.c b/drivers/misc/mpu3050/mpuirq.c
new file mode 100644
index 000000000000..ce1ad409cbf4
--- /dev/null
+++ b/drivers/misc/mpu3050/mpuirq.c
@@ -0,0 +1,319 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/irq.h>
+#include <linux/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "mpu.h"
+#include "mpuirq.h"
+#include "mldl_cfg.h"
+#include "mpu-i2c.h"
+
+#define MPUIRQ_NAME "mpuirq"
+
+/* function which gets accel data and sends it to MPU */
+
+DECLARE_WAIT_QUEUE_HEAD(mpuirq_wait);
+
+struct mpuirq_dev_data {
+ struct work_struct work;
+ struct i2c_client *mpu_client;
+ struct miscdevice *dev;
+ int irq;
+ int pid;
+ int accel_divider;
+ int data_ready;
+ int timeout;
+};
+
+static struct mpuirq_dev_data mpuirq_dev_data;
+static struct mpuirq_data mpuirq_data;
+static char *interface = MPUIRQ_NAME;
+
+static void mpu_accel_data_work_fcn(struct work_struct *work);
+
+static int mpuirq_open(struct inode *inode, struct file *file)
+{
+ dev_dbg(mpuirq_dev_data.dev->this_device,
+ "%s current->pid %d\n", __func__, current->pid);
+ mpuirq_dev_data.pid = current->pid;
+ file->private_data = &mpuirq_dev_data;
+ return 0;
+}
+
+/* close function - called when the "file" /dev/mpuirq is closed in userspace */
+static int mpuirq_release(struct inode *inode, struct file *file)
+{
+ dev_dbg(mpuirq_dev_data.dev->this_device, "mpuirq_release\n");
+ return 0;
+}
+
+/* read function called when from /dev/mpuirq is read */
+static ssize_t mpuirq_read(struct file *file,
+ char *buf, size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct mpuirq_dev_data *p_mpuirq_dev_data = file->private_data;
+
+ if (!mpuirq_dev_data.data_ready &&
+ mpuirq_dev_data.timeout &&
+ (!(file->f_flags & O_NONBLOCK))) {
+ wait_event_interruptible_timeout(mpuirq_wait,
+ mpuirq_dev_data.
+ data_ready,
+ mpuirq_dev_data.timeout);
+ }
+
+ if (mpuirq_dev_data.data_ready && NULL != buf
+ && count >= sizeof(mpuirq_data)) {
+ err = copy_to_user(buf, &mpuirq_data, sizeof(mpuirq_data));
+ mpuirq_data.data_type = 0;
+ } else {
+ return 0;
+ }
+ if (err != 0) {
+ dev_err(p_mpuirq_dev_data->dev->this_device,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ mpuirq_dev_data.data_ready = 0;
+ len = sizeof(mpuirq_data);
+ return len;
+}
+
+unsigned int mpuirq_poll(struct file *file, struct poll_table_struct *poll)
+{
+ int mask = 0;
+
+ poll_wait(file, &mpuirq_wait, poll);
+ if (mpuirq_dev_data.data_ready)
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+/* ioctl - I/O control */
+static long mpuirq_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ int data;
+
+ switch (cmd) {
+ case MPUIRQ_SET_TIMEOUT:
+ mpuirq_dev_data.timeout = arg;
+ break;
+
+ case MPUIRQ_GET_INTERRUPT_CNT:
+ data = mpuirq_data.interruptcount - 1;
+ if (mpuirq_data.interruptcount > 1)
+ mpuirq_data.interruptcount = 1;
+
+ if (copy_to_user((int *) arg, &data, sizeof(int)))
+ return -EFAULT;
+ break;
+ case MPUIRQ_GET_IRQ_TIME:
+ if (copy_to_user((int *) arg, &mpuirq_data.irqtime,
+ sizeof(mpuirq_data.irqtime)))
+ return -EFAULT;
+ mpuirq_data.irqtime = 0;
+ break;
+ case MPUIRQ_SET_FREQUENCY_DIVIDER:
+ mpuirq_dev_data.accel_divider = arg;
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static void mpu_accel_data_work_fcn(struct work_struct *work)
+{
+ struct mpuirq_dev_data *mpuirq_dev_data =
+ (struct mpuirq_dev_data *) work;
+ struct mldl_cfg *mldl_cfg =
+ (struct mldl_cfg *)
+ i2c_get_clientdata(mpuirq_dev_data->mpu_client);
+ struct i2c_adapter *accel_adapter;
+ unsigned char wbuff[16];
+ unsigned char rbuff[16];
+ int ii;
+
+ accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
+ mldl_cfg->accel->read(accel_adapter,
+ mldl_cfg->accel,
+ &mldl_cfg->pdata->accel, rbuff);
+
+
+ /* @todo add other data formats here as well */
+ if (EXT_SLAVE_BIG_ENDIAN == mldl_cfg->accel->endian) {
+ for (ii = 0; ii < 3; ii++) {
+ wbuff[2 * ii + 1] = rbuff[2 * ii + 1];
+ wbuff[2 * ii + 2] = rbuff[2 * ii + 0];
+ }
+ } else {
+ memcpy(wbuff + 1, rbuff, mldl_cfg->accel->len);
+ }
+
+ wbuff[7] = 0;
+ wbuff[8] = 1; /*set semaphore */
+
+ mpu_memory_write(mpuirq_dev_data->mpu_client->adapter,
+ mldl_cfg->addr, 0x0108, 8, wbuff);
+}
+
+static irqreturn_t mpuirq_handler(int irq, void *dev_id)
+{
+ static int mycount;
+ struct timeval irqtime;
+ mycount++;
+
+ mpuirq_data.interruptcount++;
+
+ /* wake up (unblock) for reading data from userspace */
+ /* and ignore first interrupt generated in module init */
+ mpuirq_dev_data.data_ready = 1;
+
+ do_gettimeofday(&irqtime);
+ mpuirq_data.irqtime = (((long long) irqtime.tv_sec) << 32);
+ mpuirq_data.irqtime += irqtime.tv_usec;
+
+ if ((mpuirq_dev_data.accel_divider >= 0) &&
+ (0 == (mycount % (mpuirq_dev_data.accel_divider + 1)))) {
+ schedule_work((struct work_struct
+ *) (&mpuirq_dev_data));
+ }
+
+ wake_up_interruptible(&mpuirq_wait);
+
+ return IRQ_HANDLED;
+
+}
+
+/* define which file operations are supported */
+const struct file_operations mpuirq_fops = {
+ .owner = THIS_MODULE,
+ .read = mpuirq_read,
+ .poll = mpuirq_poll,
+
+#if HAVE_COMPAT_IOCTL
+ .compat_ioctl = mpuirq_ioctl,
+#endif
+#if HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mpuirq_ioctl,
+#endif
+ .open = mpuirq_open,
+ .release = mpuirq_release,
+};
+
+static struct miscdevice mpuirq_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = MPUIRQ_NAME,
+ .fops = &mpuirq_fops,
+};
+
+int mpuirq_init(struct i2c_client *mpu_client)
+{
+
+ int res;
+ struct mldl_cfg *mldl_cfg =
+ (struct mldl_cfg *) i2c_get_clientdata(mpu_client);
+
+ /* work_struct initialization */
+ INIT_WORK((struct work_struct *) &mpuirq_dev_data,
+ mpu_accel_data_work_fcn);
+ mpuirq_dev_data.mpu_client = mpu_client;
+
+ dev_info(&mpu_client->adapter->dev,
+ "Module Param interface = %s\n", interface);
+
+ mpuirq_dev_data.irq = mpu_client->irq;
+ mpuirq_dev_data.pid = 0;
+ mpuirq_dev_data.accel_divider = -1;
+ mpuirq_dev_data.data_ready = 0;
+ mpuirq_dev_data.timeout = 0;
+ mpuirq_dev_data.dev = &mpuirq_device;
+
+ if (mpuirq_dev_data.irq) {
+ unsigned long flags;
+ if (BIT_ACTL_LOW ==
+ ((mldl_cfg->pdata->int_config) & BIT_ACTL))
+ flags = IRQF_TRIGGER_FALLING;
+ else
+ flags = IRQF_TRIGGER_RISING;
+
+ res =
+ request_irq(mpuirq_dev_data.irq, mpuirq_handler, flags,
+ interface, &mpuirq_dev_data.irq);
+ if (res) {
+ dev_err(&mpu_client->adapter->dev,
+ "myirqtest: cannot register IRQ %d\n",
+ mpuirq_dev_data.irq);
+ } else {
+ res = misc_register(&mpuirq_device);
+ if (res < 0) {
+ dev_err(&mpu_client->adapter->dev,
+ "misc_register returned %d\n",
+ res);
+ free_irq(mpuirq_dev_data.irq,
+ &mpuirq_dev_data.irq);
+ }
+ }
+
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+void mpuirq_exit(void)
+{
+ /* Free the IRQ first before flushing the work */
+ if (mpuirq_dev_data.irq > 0)
+ free_irq(mpuirq_dev_data.irq, &mpuirq_dev_data.irq);
+
+ flush_scheduled_work();
+
+ dev_info(mpuirq_device.this_device, "Unregistering %s\n",
+ MPUIRQ_NAME);
+ misc_deregister(&mpuirq_device);
+
+ return;
+}
+
+module_param(interface, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(interface, "The Interface name");
diff --git a/drivers/misc/mpu3050/mpuirq.h b/drivers/misc/mpu3050/mpuirq.h
new file mode 100644
index 000000000000..a71c79c75e8c
--- /dev/null
+++ b/drivers/misc/mpu3050/mpuirq.h
@@ -0,0 +1,42 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __MPUIRQ__
+#define __MPUIRQ__
+
+#ifdef __KERNEL__
+#include <linux/i2c-dev.h>
+#include <linux/time.h>
+#else
+#include <sys/time.h>
+#endif
+
+#define MPUIRQ_SET_TIMEOUT _IOW(MPU_IOCTL, 0x40, unsigned long)
+#define MPUIRQ_GET_INTERRUPT_CNT _IOR(MPU_IOCTL, 0x41, unsigned long)
+#define MPUIRQ_GET_IRQ_TIME _IOR(MPU_IOCTL, 0x42, struct timeval)
+#define MPUIRQ_SET_FREQUENCY_DIVIDER _IOW(MPU_IOCTL, 0x43, unsigned long)
+
+#ifdef __KERNEL__
+
+void mpuirq_exit(void);
+int mpuirq_init(struct i2c_client *mpu_client);
+
+#endif
+
+#endif
diff --git a/drivers/misc/mpu3050/slaveirq.c b/drivers/misc/mpu3050/slaveirq.c
new file mode 100644
index 000000000000..a3c7bfec4b4b
--- /dev/null
+++ b/drivers/misc/mpu3050/slaveirq.c
@@ -0,0 +1,273 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/irq.h>
+#include <linux/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/poll.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#include "mpu.h"
+#include "slaveirq.h"
+#include "mldl_cfg.h"
+#include "mpu-i2c.h"
+
+/* function which gets slave data and sends it to SLAVE */
+
+struct slaveirq_dev_data {
+ struct miscdevice dev;
+ struct i2c_client *slave_client;
+ struct mpuirq_data data;
+ wait_queue_head_t slaveirq_wait;
+ int irq;
+ int pid;
+ int data_ready;
+ int timeout;
+};
+
+/* The following depends on patch fa1f68db6ca7ebb6fc4487ac215bffba06c01c28
+ * drivers: misc: pass miscdevice pointer via file private data
+ */
+static int slaveirq_open(struct inode *inode, struct file *file)
+{
+ /* Device node is availabe in the file->private_data, this is
+ * exactly what we want so we leave it there */
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+
+ dev_dbg(data->dev.this_device,
+ "%s current->pid %d\n", __func__, current->pid);
+ data->pid = current->pid;
+ return 0;
+}
+
+static int slaveirq_release(struct inode *inode, struct file *file)
+{
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+ dev_dbg(data->dev.this_device, "slaveirq_release\n");
+ return 0;
+}
+
+/* read function called when from /dev/slaveirq is read */
+static ssize_t slaveirq_read(struct file *file,
+ char *buf, size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+
+ if (!data->data_ready &&
+ data->timeout &&
+ !(file->f_flags & O_NONBLOCK)) {
+ wait_event_interruptible_timeout(data->slaveirq_wait,
+ data->data_ready,
+ data->timeout);
+ }
+
+ if (data->data_ready && NULL != buf
+ && count >= sizeof(data->data)) {
+ err = copy_to_user(buf, &data->data, sizeof(data->data));
+ data->data.data_type = 0;
+ } else {
+ return 0;
+ }
+ if (err != 0) {
+ dev_err(data->dev.this_device,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ data->data_ready = 0;
+ len = sizeof(data->data);
+ return len;
+}
+
+static unsigned int slaveirq_poll(struct file *file,
+ struct poll_table_struct *poll)
+{
+ int mask = 0;
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+
+ poll_wait(file, &data->slaveirq_wait, poll);
+ if (data->data_ready)
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+/* ioctl - I/O control */
+static long slaveirq_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ int tmp;
+ struct slaveirq_dev_data *data =
+ container_of(file->private_data, struct slaveirq_dev_data, dev);
+
+ switch (cmd) {
+ case SLAVEIRQ_SET_TIMEOUT:
+ data->timeout = arg;
+ break;
+
+ case SLAVEIRQ_GET_INTERRUPT_CNT:
+ tmp = data->data.interruptcount - 1;
+ if (data->data.interruptcount > 1)
+ data->data.interruptcount = 1;
+
+ if (copy_to_user((int *) arg, &tmp, sizeof(int)))
+ return -EFAULT;
+ break;
+ case SLAVEIRQ_GET_IRQ_TIME:
+ if (copy_to_user((int *) arg, &data->data.irqtime,
+ sizeof(data->data.irqtime)))
+ return -EFAULT;
+ data->data.irqtime = 0;
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static irqreturn_t slaveirq_handler(int irq, void *dev_id)
+{
+ struct slaveirq_dev_data *data = (struct slaveirq_dev_data *)dev_id;
+ static int mycount;
+ struct timeval irqtime;
+ mycount++;
+
+ data->data.interruptcount++;
+
+ /* wake up (unblock) for reading data from userspace */
+ data->data_ready = 1;
+
+ do_gettimeofday(&irqtime);
+ data->data.irqtime = (((long long) irqtime.tv_sec) << 32);
+ data->data.irqtime += irqtime.tv_usec;
+ data->data.data_type |= 1;
+
+ wake_up_interruptible(&data->slaveirq_wait);
+
+ return IRQ_HANDLED;
+
+}
+
+/* define which file operations are supported */
+static const struct file_operations slaveirq_fops = {
+ .owner = THIS_MODULE,
+ .read = slaveirq_read,
+ .poll = slaveirq_poll,
+
+#if HAVE_COMPAT_IOCTL
+ .compat_ioctl = slaveirq_ioctl,
+#endif
+#if HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = slaveirq_ioctl,
+#endif
+ .open = slaveirq_open,
+ .release = slaveirq_release,
+};
+
+int slaveirq_init(struct i2c_adapter *slave_adapter,
+ struct ext_slave_platform_data *pdata,
+ char *name)
+{
+
+ int res;
+ struct slaveirq_dev_data *data;
+
+ if (!pdata->irq)
+ return -EINVAL;
+
+ pdata->irq_data = kzalloc(sizeof(*data),
+ GFP_KERNEL);
+ data = (struct slaveirq_dev_data *) pdata->irq_data;
+ if (!data)
+ return -ENOMEM;
+
+ data->dev.minor = MISC_DYNAMIC_MINOR;
+ data->dev.name = name;
+ data->dev.fops = &slaveirq_fops;
+ data->irq = pdata->irq;
+ data->pid = 0;
+ data->data_ready = 0;
+ data->timeout = 0;
+
+ init_waitqueue_head(&data->slaveirq_wait);
+
+ res = request_irq(data->irq, slaveirq_handler, IRQF_TRIGGER_RISING,
+ data->dev.name, data);
+
+ if (res) {
+ dev_err(&slave_adapter->dev,
+ "myirqtest: cannot register IRQ %d\n",
+ data->irq);
+ goto out_request_irq;
+ }
+
+ res = misc_register(&data->dev);
+ if (res < 0) {
+ dev_err(&slave_adapter->dev,
+ "misc_register returned %d\n",
+ res);
+ goto out_misc_register;
+ }
+
+ return res;
+
+out_misc_register:
+ free_irq(data->irq, data);
+out_request_irq:
+ kfree(pdata->irq_data);
+ pdata->irq_data = NULL;
+
+ return res;
+}
+
+void slaveirq_exit(struct ext_slave_platform_data *pdata)
+{
+ struct slaveirq_dev_data *data = pdata->irq_data;
+
+ if (!pdata->irq_data || data->irq <= 0)
+ return;
+
+ dev_info(data->dev.this_device, "Unregistering %s\n",
+ data->dev.name);
+
+ free_irq(data->irq, data);
+ misc_deregister(&data->dev);
+ kfree(pdata->irq_data);
+ pdata->irq_data = NULL;
+}
diff --git a/drivers/misc/mpu3050/slaveirq.h b/drivers/misc/mpu3050/slaveirq.h
new file mode 100644
index 000000000000..b4e1115f1b0a
--- /dev/null
+++ b/drivers/misc/mpu3050/slaveirq.h
@@ -0,0 +1,43 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __SLAVEIRQ__
+#define __SLAVEIRQ__
+
+#ifdef __KERNEL__
+#include <linux/i2c-dev.h>
+#endif
+
+#include "mpu.h"
+#include "mpuirq.h"
+
+#define SLAVEIRQ_SET_TIMEOUT _IOW(MPU_IOCTL, 0x50, unsigned long)
+#define SLAVEIRQ_GET_INTERRUPT_CNT _IOR(MPU_IOCTL, 0x51, unsigned long)
+#define SLAVEIRQ_GET_IRQ_TIME _IOR(MPU_IOCTL, 0x52, unsigned long)
+
+#ifdef __KERNEL__
+
+void slaveirq_exit(struct ext_slave_platform_data *pdata);
+int slaveirq_init(struct i2c_adapter *slave_adapter,
+ struct ext_slave_platform_data *pdata,
+ char *name);
+
+#endif
+
+#endif
diff --git a/drivers/misc/mpu3050/timerirq.c b/drivers/misc/mpu3050/timerirq.c
new file mode 100644
index 000000000000..41c3ac981016
--- /dev/null
+++ b/drivers/misc/mpu3050/timerirq.c
@@ -0,0 +1,299 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/signal.h>
+#include <linux/miscdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/poll.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+
+#include "mpu.h"
+#include "mltypes.h"
+#include "timerirq.h"
+
+/* function which gets timer data and sends it to TIMER */
+struct timerirq_data {
+ int pid;
+ int data_ready;
+ int run;
+ int timeout;
+ unsigned long period;
+ struct mpuirq_data data;
+ struct completion timer_done;
+ wait_queue_head_t timerirq_wait;
+ struct timer_list timer;
+ struct miscdevice *dev;
+};
+
+static struct miscdevice *timerirq_dev_data;
+
+static void timerirq_handler(unsigned long arg)
+{
+ struct timerirq_data *data = (struct timerirq_data *)arg;
+ struct timeval irqtime;
+
+ data->data.interruptcount++;
+
+ data->data_ready = 1;
+
+ do_gettimeofday(&irqtime);
+ data->data.irqtime = (((long long) irqtime.tv_sec) << 32);
+ data->data.irqtime += irqtime.tv_usec;
+ data->data.data_type |= 1;
+
+ dev_dbg(data->dev->this_device,
+ "%s, %lld, %ld\n", __func__, data->data.irqtime,
+ (unsigned long)data);
+
+ wake_up_interruptible(&data->timerirq_wait);
+
+ if (data->run)
+ mod_timer(&data->timer,
+ jiffies + msecs_to_jiffies(data->period));
+ else
+ complete(&data->timer_done);
+}
+
+static int start_timerirq(struct timerirq_data *data)
+{
+ dev_dbg(data->dev->this_device,
+ "%s current->pid %d\n", __func__, current->pid);
+
+ /* Timer already running... success */
+ if (data->run)
+ return 0;
+
+ /* Don't allow a period of 0 since this would fire constantly */
+ if (!data->period)
+ return -EINVAL;
+
+ data->run = TRUE;
+ data->data_ready = FALSE;
+
+ init_completion(&data->timer_done);
+ setup_timer(&data->timer, timerirq_handler, (unsigned long)data);
+
+ return mod_timer(&data->timer,
+ jiffies + msecs_to_jiffies(data->period));
+}
+
+static int stop_timerirq(struct timerirq_data *data)
+{
+ dev_dbg(data->dev->this_device,
+ "%s current->pid %lx\n", __func__, (unsigned long)data);
+
+ if (data->run) {
+ data->run = FALSE;
+ mod_timer(&data->timer, jiffies + 1);
+ wait_for_completion(&data->timer_done);
+ }
+ return 0;
+}
+
+/* The following depends on patch fa1f68db6ca7ebb6fc4487ac215bffba06c01c28
+ * drivers: misc: pass miscdevice pointer via file private data
+ */
+static int timerirq_open(struct inode *inode, struct file *file)
+{
+ /* Device node is availabe in the file->private_data, this is
+ * exactly what we want so we leave it there */
+ struct miscdevice *dev_data = file->private_data;
+ struct timerirq_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev_data;
+ file->private_data = data;
+ data->pid = current->pid;
+ init_waitqueue_head(&data->timerirq_wait);
+
+ dev_dbg(data->dev->this_device,
+ "%s current->pid %d\n", __func__, current->pid);
+ return 0;
+}
+
+static int timerirq_release(struct inode *inode, struct file *file)
+{
+ struct timerirq_data *data = file->private_data;
+ dev_dbg(data->dev->this_device, "timerirq_release\n");
+ if (data->run)
+ stop_timerirq(data);
+ kfree(data);
+ return 0;
+}
+
+/* read function called when from /dev/timerirq is read */
+static ssize_t timerirq_read(struct file *file,
+ char *buf, size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct timerirq_data *data = file->private_data;
+
+ if (!data->data_ready &&
+ data->timeout &&
+ !(file->f_flags & O_NONBLOCK)) {
+ wait_event_interruptible_timeout(data->timerirq_wait,
+ data->data_ready,
+ data->timeout);
+ }
+
+ if (data->data_ready && NULL != buf
+ && count >= sizeof(data->data)) {
+ err = copy_to_user(buf, &data->data, sizeof(data->data));
+ data->data.data_type = 0;
+ } else {
+ return 0;
+ }
+ if (err != 0) {
+ dev_err(data->dev->this_device,
+ "Copy to user returned %d\n", err);
+ return -EFAULT;
+ }
+ data->data_ready = 0;
+ len = sizeof(data->data);
+ return len;
+}
+
+static unsigned int timerirq_poll(struct file *file,
+ struct poll_table_struct *poll)
+{
+ int mask = 0;
+ struct timerirq_data *data = file->private_data;
+
+ poll_wait(file, &data->timerirq_wait, poll);
+ if (data->data_ready)
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+/* ioctl - I/O control */
+static long timerirq_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ int tmp;
+ struct timerirq_data *data = file->private_data;
+
+ dev_dbg(data->dev->this_device,
+ "%s current->pid %d, %d, %ld\n",
+ __func__, current->pid, cmd, arg);
+
+ if (!data)
+ return -EFAULT;
+
+ switch (cmd) {
+ case TIMERIRQ_SET_TIMEOUT:
+ data->timeout = arg;
+ break;
+ case TIMERIRQ_GET_INTERRUPT_CNT:
+ tmp = data->data.interruptcount - 1;
+ if (data->data.interruptcount > 1)
+ data->data.interruptcount = 1;
+
+ if (copy_to_user((int *) arg, &tmp, sizeof(int)))
+ return -EFAULT;
+ break;
+ case TIMERIRQ_START:
+ data->period = arg;
+ retval = start_timerirq(data);
+ break;
+ case TIMERIRQ_STOP:
+ retval = stop_timerirq(data);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+/* define which file operations are supported */
+static const struct file_operations timerirq_fops = {
+ .owner = THIS_MODULE,
+ .read = timerirq_read,
+ .poll = timerirq_poll,
+
+#if HAVE_COMPAT_IOCTL
+ .compat_ioctl = timerirq_ioctl,
+#endif
+#if HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = timerirq_ioctl,
+#endif
+ .open = timerirq_open,
+ .release = timerirq_release,
+};
+
+static int __init timerirq_init(void)
+{
+
+ int res;
+ static struct miscdevice *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ timerirq_dev_data = data;
+ data->minor = MISC_DYNAMIC_MINOR;
+ data->name = "timerirq";
+ data->fops = &timerirq_fops;
+
+ res = misc_register(data);
+ if (res < 0) {
+ dev_err(data->this_device,
+ "misc_register returned %d\n",
+ res);
+ return res;
+ }
+
+ return res;
+}
+module_init(timerirq_init);
+
+static void __exit timerirq_exit(void)
+{
+ struct miscdevice *data = timerirq_dev_data;
+
+ dev_info(data->this_device, "Unregistering %s\n",
+ data->name);
+
+ misc_deregister(data);
+ kfree(data);
+
+ timerirq_dev_data = NULL;
+}
+module_exit(timerirq_exit);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Timer IRQ device driver.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("timerirq");
diff --git a/drivers/misc/mpu3050/timerirq.h b/drivers/misc/mpu3050/timerirq.h
new file mode 100644
index 000000000000..ec2c1e29f080
--- /dev/null
+++ b/drivers/misc/mpu3050/timerirq.h
@@ -0,0 +1,30 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __TIMERIRQ__
+#define __TIMERIRQ__
+
+#include "mpu.h"
+
+#define TIMERIRQ_SET_TIMEOUT _IOW(MPU_IOCTL, 0x60, unsigned long)
+#define TIMERIRQ_GET_INTERRUPT_CNT _IOW(MPU_IOCTL, 0x61, unsigned long)
+#define TIMERIRQ_START _IOW(MPU_IOCTL, 0x62, unsigned long)
+#define TIMERIRQ_STOP _IO(MPU_IOCTL, 0x63)
+
+#endif
diff --git a/drivers/misc/nct1008.c b/drivers/misc/nct1008.c
new file mode 100644
index 000000000000..fab2df033dff
--- /dev/null
+++ b/drivers/misc/nct1008.c
@@ -0,0 +1,1027 @@
+/*
+ * drivers/misc/nct1008.c
+ *
+ * Driver for NCT1008, temperature monitoring device from ON Semiconductors
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/nct1008.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+
+#define DRIVER_NAME "nct1008"
+
+/* Register Addresses */
+#define LOCAL_TEMP_RD 0x00
+#define EXT_TEMP_RD_HI 0x01
+#define EXT_TEMP_RD_LO 0x10
+#define STATUS_RD 0x02
+#define CONFIG_RD 0x03
+
+#define LOCAL_TEMP_HI_LIMIT_RD 0x05
+#define LOCAL_TEMP_LO_LIMIT_RD 0x06
+
+#define EXT_TEMP_HI_LIMIT_HI_BYTE_RD 0x07
+
+#define CONFIG_WR 0x09
+#define CONV_RATE_WR 0x0A
+#define LOCAL_TEMP_HI_LIMIT_WR 0x0B
+#define LOCAL_TEMP_LO_LIMIT_WR 0x0C
+#define EXT_TEMP_HI_LIMIT_HI_BYTE_WR 0x0D
+#define EXT_TEMP_LO_LIMIT_HI_BYTE_WR 0x0E
+#define OFFSET_WR 0x11
+#define OFFSET_QUARTER_WR 0x12
+#define EXT_THERM_LIMIT_WR 0x19
+#define LOCAL_THERM_LIMIT_WR 0x20
+#define THERM_HYSTERESIS_WR 0x21
+
+/* Configuration Register Bits */
+#define EXTENDED_RANGE_BIT BIT(2)
+#define THERM2_BIT BIT(5)
+#define STANDBY_BIT BIT(6)
+#define ALERT_BIT BIT(7)
+
+/* Max Temperature Measurements */
+#define EXTENDED_RANGE_OFFSET 64U
+#define STANDARD_RANGE_MAX 127U
+#define EXTENDED_RANGE_MAX (150U + EXTENDED_RANGE_OFFSET)
+
+#define NCT1008_MIN_TEMP -64
+#define NCT1008_MAX_TEMP 191
+
+#define MAX_STR_PRINT 50
+
+#define MIN_SLEEP_MSEC 20
+#define CELSIUS_TO_MILLICELSIUS(x) ((x)*1000)
+#define MILLICELSIUS_TO_CELSIUS(x) ((x)/1000)
+
+static inline s8 value_to_temperature(bool extended, u8 value)
+{
+ return extended ? (s8)(value - EXTENDED_RANGE_OFFSET) : (s8)value;
+}
+
+static inline u8 temperature_to_value(bool extended, s8 temp)
+{
+ return extended ? (u8)(temp + EXTENDED_RANGE_OFFSET) : (u8)temp;
+}
+
+static int nct1008_get_temp(struct device *dev, long *pTemp)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ s8 temp_local;
+ u8 temp_ext_lo;
+ s8 temp_ext_hi;
+ long temp_ext_milli;
+ long temp_local_milli;
+ u8 value;
+
+ /* Read Local Temp */
+ value = i2c_smbus_read_byte_data(client, LOCAL_TEMP_RD);
+ if (value < 0)
+ goto error;
+ temp_local = value_to_temperature(pdata->ext_range, value);
+ temp_local_milli = CELSIUS_TO_MILLICELSIUS(temp_local);
+
+ /* Read External Temp */
+ value = i2c_smbus_read_byte_data(client, EXT_TEMP_RD_LO);
+ if (value < 0)
+ goto error;
+ temp_ext_lo = (value >> 6);
+
+ value = i2c_smbus_read_byte_data(client, EXT_TEMP_RD_HI);
+ if (value < 0)
+ goto error;
+ temp_ext_hi = value_to_temperature(pdata->ext_range, value);
+
+ temp_ext_milli = CELSIUS_TO_MILLICELSIUS(temp_ext_hi) +
+ temp_ext_lo * 250;
+
+ /* Return max between Local and External Temp */
+ *pTemp = max(temp_local_milli, temp_ext_milli);
+
+ dev_dbg(dev, "\n %s: ret temp=%ldC ", __func__, *pTemp);
+ return 0;
+error:
+ dev_err(&client->dev, "\n error in file=: %s %s() line=%d: "
+ "error=%d ", __FILE__, __func__, __LINE__, value);
+ return value;
+}
+
+static ssize_t nct1008_show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ s8 temp1 = 0;
+ s8 temp = 0;
+ u8 temp2 = 0;
+ int value = 0;
+
+ if (!dev || !buf || !attr)
+ return -EINVAL;
+
+ value = i2c_smbus_read_byte_data(client, LOCAL_TEMP_RD);
+ if (value < 0)
+ goto error;
+ temp1 = value_to_temperature(pdata->ext_range, value);
+
+ value = i2c_smbus_read_byte_data(client, EXT_TEMP_RD_LO);
+ if (value < 0)
+ goto error;
+ temp2 = (value >> 6);
+ value = i2c_smbus_read_byte_data(client, EXT_TEMP_RD_HI);
+ if (value < 0)
+ goto error;
+ temp = value_to_temperature(pdata->ext_range, value);
+
+ return snprintf(buf, MAX_STR_PRINT, "%d %d.%d\n",
+ temp1, temp, temp2 * 25);
+
+error:
+ return snprintf(buf, MAX_STR_PRINT,
+ "Error read local/ext temperature\n");
+}
+
+static ssize_t nct1008_show_temp_overheat(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ int value;
+ s8 temp, temp2;
+
+ /* Local temperature h/w shutdown limit */
+ value = i2c_smbus_read_byte_data(client, LOCAL_THERM_LIMIT_WR);
+ if (value < 0)
+ goto error;
+ temp = value_to_temperature(pdata->ext_range, value);
+
+ /* External temperature h/w shutdown limit */
+ value = i2c_smbus_read_byte_data(client, EXT_THERM_LIMIT_WR);
+ if (value < 0)
+ goto error;
+ temp2 = value_to_temperature(pdata->ext_range, value);
+
+ return snprintf(buf, MAX_STR_PRINT, "%d %d\n", temp, temp2);
+error:
+ dev_err(dev, "%s: failed to read temperature-overheat "
+ "\n", __func__);
+ return snprintf(buf, MAX_STR_PRINT, " Rd overheat Error\n");
+}
+
+static ssize_t nct1008_set_temp_overheat(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ long int num;
+ int err;
+ u8 temp;
+ long currTemp;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ char bufTemp[MAX_STR_PRINT];
+ char bufOverheat[MAX_STR_PRINT];
+ unsigned int ret;
+
+ if (strict_strtol(buf, 0, &num)) {
+ dev_err(dev, "\n file: %s, line=%d return %s() ", __FILE__,
+ __LINE__, __func__);
+ return -EINVAL;
+ }
+ if (((int)num < NCT1008_MIN_TEMP) || ((int)num >= NCT1008_MAX_TEMP)) {
+ dev_err(dev, "\n file: %s, line=%d return %s() ", __FILE__,
+ __LINE__, __func__);
+ return -EINVAL;
+ }
+ /* check for system power down */
+ err = nct1008_get_temp(dev, &currTemp);
+ if (err)
+ goto error;
+
+ currTemp = MILLICELSIUS_TO_CELSIUS(currTemp);
+
+ if (currTemp >= (int)num) {
+ ret = nct1008_show_temp(dev, attr, bufTemp);
+ ret = nct1008_show_temp_overheat(dev, attr, bufOverheat);
+ dev_err(dev, "\nCurrent temp: %s ", bufTemp);
+ dev_err(dev, "\nOld overheat limit: %s ", bufOverheat);
+ dev_err(dev, "\nReset from overheat: curr temp=%ld, "
+ "new overheat temp=%d\n\n", currTemp, (int)num);
+ }
+
+ /* External temperature h/w shutdown limit */
+ temp = temperature_to_value(pdata->ext_range, (s8)num);
+ err = i2c_smbus_write_byte_data(client, EXT_THERM_LIMIT_WR, temp);
+ if (err < 0)
+ goto error;
+
+ /* Local temperature h/w shutdown limit */
+ temp = temperature_to_value(pdata->ext_range, (s8)num);
+ err = i2c_smbus_write_byte_data(client, LOCAL_THERM_LIMIT_WR, temp);
+ if (err < 0)
+ goto error;
+ return count;
+error:
+ dev_err(dev, " %s: failed to set temperature-overheat\n", __func__);
+ return err;
+}
+
+static ssize_t nct1008_show_temp_alert(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ int value;
+ s8 temp, temp2;
+ /* External Temperature Throttling limit */
+ value = i2c_smbus_read_byte_data(client, EXT_TEMP_HI_LIMIT_HI_BYTE_RD);
+ if (value < 0)
+ goto error;
+ temp2 = value_to_temperature(pdata->ext_range, value);
+
+ /* Local Temperature Throttling limit */
+ value = i2c_smbus_read_byte_data(client, LOCAL_TEMP_HI_LIMIT_RD);
+ if (value < 0)
+ goto error;
+ temp = value_to_temperature(pdata->ext_range, value);
+
+ return snprintf(buf, MAX_STR_PRINT, "%d %d\n", temp, temp2);
+error:
+ dev_err(dev, "%s: failed to read temperature-overheat "
+ "\n", __func__);
+ return snprintf(buf, MAX_STR_PRINT, " Rd overheat Error\n");
+}
+
+static ssize_t nct1008_set_temp_alert(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ long int num;
+ int value;
+ int err;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+
+ if (strict_strtol(buf, 0, &num)) {
+ dev_err(dev, "\n file: %s, line=%d return %s() ", __FILE__,
+ __LINE__, __func__);
+ return -EINVAL;
+ }
+ if (((int)num < NCT1008_MIN_TEMP) || ((int)num >= NCT1008_MAX_TEMP)) {
+ dev_err(dev, "\n file: %s, line=%d return %s() ", __FILE__,
+ __LINE__, __func__);
+ return -EINVAL;
+ }
+
+ /* External Temperature Throttling limit */
+ value = temperature_to_value(pdata->ext_range, (s8)num);
+ err = i2c_smbus_write_byte_data(client, EXT_TEMP_HI_LIMIT_HI_BYTE_WR,
+ value);
+ if (err < 0)
+ goto error;
+
+ /* Local Temperature Throttling limit */
+ err = i2c_smbus_write_byte_data(client, LOCAL_TEMP_HI_LIMIT_WR,
+ value);
+ if (err < 0)
+ goto error;
+
+ return count;
+error:
+ dev_err(dev, "%s: failed to set temperature-alert "
+ "\n", __func__);
+ return err;
+}
+
+static ssize_t nct1008_show_ext_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ s8 temp_value;
+ int data = 0;
+ int data_lo;
+
+ if (!dev || !buf || !attr)
+ return -EINVAL;
+
+ /* When reading the full external temperature value, read the
+ * LSB first. This causes the MSB to be locked (that is, the
+ * ADC does not write to it) until it is read */
+ data_lo = i2c_smbus_read_byte_data(client, EXT_TEMP_RD_LO);
+ if (data_lo < 0) {
+ dev_err(&client->dev, "%s: failed to read "
+ "ext_temperature, i2c error=%d\n", __func__, data_lo);
+ goto error;
+ }
+
+ data = i2c_smbus_read_byte_data(client, EXT_TEMP_RD_HI);
+ if (data < 0) {
+ dev_err(&client->dev, "%s: failed to read "
+ "ext_temperature, i2c error=%d\n", __func__, data);
+ goto error;
+ }
+
+ temp_value = value_to_temperature(pdata->ext_range, data);
+
+ return snprintf(buf, MAX_STR_PRINT, "%d.%d\n", temp_value,
+ (25 * (data_lo >> 6)));
+error:
+ return snprintf(buf, MAX_STR_PRINT, "Error read ext temperature\n");
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, nct1008_show_temp, NULL);
+static DEVICE_ATTR(temperature_overheat, (S_IRUGO | (S_IWUSR | S_IWGRP)),
+ nct1008_show_temp_overheat, nct1008_set_temp_overheat);
+static DEVICE_ATTR(temperature_alert, (S_IRUGO | (S_IWUSR | S_IWGRP)),
+ nct1008_show_temp_alert, nct1008_set_temp_alert);
+static DEVICE_ATTR(ext_temperature, S_IRUGO, nct1008_show_ext_temp, NULL);
+
+static struct attribute *nct1008_attributes[] = {
+ &dev_attr_temperature.attr,
+ &dev_attr_temperature_overheat.attr,
+ &dev_attr_temperature_alert.attr,
+ &dev_attr_ext_temperature.attr,
+ NULL
+};
+
+static const struct attribute_group nct1008_attr_group = {
+ .attrs = nct1008_attributes,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+static void print_reg(const char *reg_name, struct seq_file *s,
+ int offset)
+{
+ struct nct1008_data *nct_data = s->private;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(nct_data->client,
+ offset);
+ if (ret >= 0)
+ seq_printf(s, "Reg %s Addr = 0x%02x Reg 0x%02x "
+ "Value 0x%02x\n", reg_name,
+ nct_data->client->addr,
+ offset, ret);
+ else
+ seq_printf(s, "%s: line=%d, i2c read error=%d\n",
+ __func__, __LINE__, ret);
+}
+
+static int dbg_nct1008_show(struct seq_file *s, void *unused)
+{
+ seq_printf(s, "nct1008 Registers\n");
+ seq_printf(s, "------------------\n");
+ print_reg("Local Temp Value ", s, 0x00);
+ print_reg("Ext Temp Value Hi ", s, 0x01);
+ print_reg("Status ", s, 0x02);
+ print_reg("Configuration ", s, 0x03);
+ print_reg("Conversion Rate ", s, 0x04);
+ print_reg("Local Temp Hi Limit ", s, 0x05);
+ print_reg("Local Temp Lo Limit ", s, 0x06);
+ print_reg("Ext Temp Hi Limit Hi", s, 0x07);
+ print_reg("Ext Temp Hi Limit Lo", s, 0x13);
+ print_reg("Ext Temp Lo Limit Hi", s, 0x08);
+ print_reg("Ext Temp Lo Limit Lo", s, 0x14);
+ print_reg("Ext Temp Value Lo ", s, 0x10);
+ print_reg("Ext Temp Offset Hi ", s, 0x11);
+ print_reg("Ext Temp Offset Lo ", s, 0x12);
+ print_reg("Ext THERM Limit ", s, 0x19);
+ print_reg("Local THERM Limit ", s, 0x20);
+ print_reg("THERM Hysteresis ", s, 0x21);
+ print_reg("Consecutive ALERT ", s, 0x22);
+ return 0;
+}
+
+static int dbg_nct1008_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_nct1008_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = dbg_nct1008_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init nct1008_debuginit(struct nct1008_data *nct)
+{
+ int err = 0;
+ struct dentry *d;
+ d = debugfs_create_file("nct1008", S_IRUGO, NULL,
+ (void *)nct, &debug_fops);
+ if ((!d) || IS_ERR(d)) {
+ dev_err(&nct->client->dev, "Error: %s debugfs_create_file"
+ " returned an error\n", __func__);
+ err = -ENOENT;
+ goto end;
+ }
+ if (d == ERR_PTR(-ENODEV)) {
+ dev_err(&nct->client->dev, "Error: %s debugfs not supported "
+ "error=-ENODEV\n", __func__);
+ err = -ENODEV;
+ } else {
+ nct->dent = d;
+ }
+end:
+ return err;
+}
+#else
+static int __init nct1008_debuginit(struct nct1008_data *nct)
+{
+ return 0;
+}
+#endif
+
+static int nct1008_enable(struct i2c_client *client)
+{
+ struct nct1008_data *data = i2c_get_clientdata(client);
+ int err;
+
+ err = i2c_smbus_write_byte_data(client, CONFIG_WR,
+ data->config & ~STANDBY_BIT);
+ if (err < 0)
+ dev_err(&client->dev, "%s, line=%d, i2c write error=%d\n",
+ __func__, __LINE__, err);
+ return err;
+}
+
+static int nct1008_disable(struct i2c_client *client)
+{
+ struct nct1008_data *data = i2c_get_clientdata(client);
+ int err;
+
+ err = i2c_smbus_write_byte_data(client, CONFIG_WR,
+ data->config | STANDBY_BIT);
+ if (err < 0)
+ dev_err(&client->dev, "%s, line=%d, i2c write error=%d\n",
+ __func__, __LINE__, err);
+ return err;
+}
+
+static int nct1008_disable_alert(struct nct1008_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret = 0;
+ int val;
+
+ /*
+ * Disable ALERT# output, because these chips don't implement
+ * SMBus alert correctly; they should only hold the alert line
+ * low briefly.
+ */
+ val = i2c_smbus_read_byte_data(data->client, CONFIG_RD);
+ if (val < 0) {
+ dev_err(&client->dev, "%s, line=%d, disable alert failed ... "
+ "i2c read error=%d\n", __func__, __LINE__, val);
+ return val;
+ }
+ data->config = val | ALERT_BIT;
+ ret = i2c_smbus_write_byte_data(client, CONFIG_WR, data->config);
+ if (ret)
+ dev_err(&client->dev, "%s: fail to disable alert, i2c "
+ "write error=%d#\n", __func__, ret);
+
+ return ret;
+}
+
+static int nct1008_enable_alert(struct nct1008_data *data)
+{
+ int val;
+ int ret;
+
+ val = i2c_smbus_read_byte_data(data->client, CONFIG_RD);
+ if (val < 0) {
+ dev_err(&data->client->dev, "%s, line=%d, enable alert "
+ "failed ... i2c read error=%d\n", __func__,
+ __LINE__, val);
+ return val;
+ }
+ val &= ~(ALERT_BIT | THERM2_BIT);
+ ret = i2c_smbus_write_byte_data(data->client, CONFIG_WR, val);
+ if (ret) {
+ dev_err(&data->client->dev, "%s: fail to enable alert, i2c "
+ "write error=%d\n", __func__, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void nct1008_work_func(struct work_struct *work)
+{
+ struct nct1008_data *data = container_of(work, struct nct1008_data,
+ work);
+ int err = 0;
+ int intr_status = i2c_smbus_read_byte_data(data->client, STATUS_RD);
+
+ if (intr_status < 0) {
+ dev_err(&data->client->dev, "%s, line=%d, i2c read error=%d\n",
+ __func__, __LINE__, intr_status);
+ return;
+ }
+
+ intr_status &= (BIT(3) | BIT(4));
+ if (!intr_status)
+ return;
+
+ if (data->alert_func) {
+ err = nct1008_disable_alert(data);
+ if (err) {
+ dev_err(&data->client->dev,
+ "%s: disable alert fail(error=%d)\n",
+ __func__, err);
+ return;
+ }
+
+ data->alert_func(data->alert_data);
+
+ nct1008_enable_alert(data);
+ }
+
+
+ if (err)
+ dev_err(&data->client->dev, "%s: fail(error=%d)\n", __func__,
+ err);
+ else
+ pr_debug("%s: done\n", __func__);
+}
+
+static irqreturn_t nct1008_irq(int irq, void *dev_id)
+{
+ struct nct1008_data *data = dev_id;
+
+ schedule_work(&data->work);
+ return IRQ_HANDLED;
+}
+
+static void nct1008_power_control(struct nct1008_data *data, bool is_enable)
+{
+ int ret;
+ if (!data->nct_reg) {
+ data->nct_reg = regulator_get(&data->client->dev, "vdd");
+ if (IS_ERR_OR_NULL(data->nct_reg)) {
+ dev_warn(&data->client->dev, "Error [%d] in"
+ "getting the regulator handle for vdd "
+ "of %s\n", (int)data->nct_reg,
+ dev_name(&data->client->dev));
+ data->nct_reg = NULL;
+ return;
+ }
+ }
+ if (is_enable)
+ ret = regulator_enable(data->nct_reg);
+ else
+ ret = regulator_disable(data->nct_reg);
+
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error in %s rail vdd_nct1008, "
+ "error %d\n", (is_enable) ? "enabling" : "disabling",
+ ret);
+ else
+ dev_info(&data->client->dev, "success in %s rail vdd_nct1008\n",
+ (is_enable) ? "enabling" : "disabling");
+}
+
+static int __devinit nct1008_configure_sensor(struct nct1008_data* data)
+{
+ struct i2c_client *client = data->client;
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ u8 value;
+ s8 temp;
+ u8 temp2;
+ int err;
+
+ if (!pdata || !pdata->supported_hwrev)
+ return -ENODEV;
+
+ /* Place in Standby */
+ data->config = STANDBY_BIT;
+ err = i2c_smbus_write_byte_data(client, CONFIG_WR, data->config);
+ if (err)
+ goto error;
+
+ /* External temperature h/w shutdown limit */
+ value = temperature_to_value(pdata->ext_range, NCT1008_MAX_TEMP);
+ err = i2c_smbus_write_byte_data(client, EXT_THERM_LIMIT_WR, value);
+ if (err)
+ goto error;
+
+ /* Local temperature h/w shutdown limit */
+ value = temperature_to_value(pdata->ext_range, NCT1008_MAX_TEMP);
+ err = i2c_smbus_write_byte_data(client, LOCAL_THERM_LIMIT_WR, value);
+ if (err)
+ goto error;
+
+ /* set extended range mode if needed */
+ if (pdata->ext_range)
+ data->config |= EXTENDED_RANGE_BIT;
+ data->config &= ~(THERM2_BIT | ALERT_BIT);
+
+ err = i2c_smbus_write_byte_data(client, CONFIG_WR, data->config);
+ if (err)
+ goto error;
+
+ /* Temperature conversion rate */
+ err = i2c_smbus_write_byte_data(client, CONV_RATE_WR, pdata->conv_rate);
+ if (err)
+ goto error;
+
+ /* Setup local hi and lo limits */
+ err = i2c_smbus_write_byte_data(client,
+ LOCAL_TEMP_HI_LIMIT_WR, NCT1008_MAX_TEMP);
+ if (err)
+ goto error;
+
+ err = i2c_smbus_write_byte_data(client,
+ LOCAL_TEMP_LO_LIMIT_WR, 0);
+ if (err)
+ goto error;
+
+ /* Setup external hi and lo limits */
+ err = i2c_smbus_write_byte_data(client,
+ EXT_TEMP_LO_LIMIT_HI_BYTE_WR, 0);
+ if (err)
+ goto error;
+ err = i2c_smbus_write_byte_data(client, EXT_TEMP_HI_LIMIT_HI_BYTE_WR,
+ NCT1008_MAX_TEMP);
+ if (err)
+ goto error;
+
+ /* read initial temperature */
+ value = i2c_smbus_read_byte_data(client, LOCAL_TEMP_RD);
+ if (value < 0) {
+ err = value;
+ goto error;
+ }
+ temp = value_to_temperature(pdata->ext_range, value);
+ dev_dbg(&client->dev, "\n initial local temp = %d ", temp);
+
+ value = i2c_smbus_read_byte_data(client, EXT_TEMP_RD_LO);
+ if (value < 0) {
+ err = value;
+ goto error;
+ }
+ temp2 = (value >> 6);
+ value = i2c_smbus_read_byte_data(client, EXT_TEMP_RD_HI);
+ if (value < 0) {
+ err = value;
+ goto error;
+ }
+ temp = value_to_temperature(pdata->ext_range, value);
+
+ if (temp2 > 0)
+ dev_dbg(&client->dev, "\n initial ext temp = %d.%d deg",
+ temp, temp2 * 25);
+ else
+ dev_dbg(&client->dev, "\n initial ext temp = %d.0 deg", temp);
+
+ /* Remote channel offset */
+ err = i2c_smbus_write_byte_data(client, OFFSET_WR, pdata->offset / 4);
+ if (err < 0)
+ goto error;
+
+ /* Remote channel offset fraction (quarters) */
+ err = i2c_smbus_write_byte_data(client, OFFSET_QUARTER_WR,
+ (pdata->offset % 4) << 6);
+ if (err < 0)
+ goto error;
+
+ /* register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &nct1008_attr_group);
+ if (err < 0) {
+ dev_err(&client->dev, "\n sysfs create err=%d ", err);
+ goto error;
+ }
+
+ return 0;
+error:
+ dev_err(&client->dev, "\n exit %s, err=%d ", __func__, err);
+ return err;
+}
+
+static int __devinit nct1008_configure_irq(struct nct1008_data *data)
+{
+ INIT_WORK(&data->work, nct1008_work_func);
+
+ if (data->client->irq < 0)
+ return 0;
+ else
+ return request_irq(data->client->irq, nct1008_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ DRIVER_NAME, data);
+}
+
+static unsigned int get_ext_mode_delay_ms(unsigned int conv_rate)
+{
+ switch (conv_rate) {
+ case 0:
+ return 16000;
+ case 1:
+ return 8000;
+ case 2:
+ return 4000;
+ case 3:
+ return 2000;
+ case 4:
+ return 1000;
+ case 5:
+ return 500;
+ case 6:
+ return 250;
+ case 7:
+ return 125;
+ case 9:
+ return 32;
+ case 10:
+ return 16;
+ case 8:
+ default:
+ return 63;
+ }
+}
+
+int nct1008_thermal_get_temp(struct nct1008_data *data, long *temp)
+{
+ return nct1008_get_temp(&data->client->dev, temp);
+}
+
+int nct1008_thermal_get_temp_low(struct nct1008_data *data, long *temp)
+{
+ *temp = 0;
+ return 0;
+}
+
+int nct1008_thermal_set_limits(struct nct1008_data *data,
+ long lo_limit_milli,
+ long hi_limit_milli)
+{
+ int err;
+ u8 value;
+ bool extended_range = data->plat_data.ext_range;
+ long lo_limit = MILLICELSIUS_TO_CELSIUS(lo_limit_milli);
+ long hi_limit = MILLICELSIUS_TO_CELSIUS(hi_limit_milli);
+
+ if (lo_limit >= hi_limit)
+ return -EINVAL;
+
+ if (data->current_lo_limit == lo_limit &&
+ data->current_hi_limit == hi_limit)
+ return 0;
+
+ if (data->current_lo_limit != lo_limit) {
+ value = temperature_to_value(extended_range, lo_limit);
+ pr_debug("%s: %d\n", __func__, value);
+ err = i2c_smbus_write_byte_data(data->client,
+ EXT_TEMP_LO_LIMIT_HI_BYTE_WR, value);
+ if (err)
+ return err;
+
+ data->current_lo_limit = lo_limit;
+ }
+
+ if (data->current_hi_limit != hi_limit) {
+ value = temperature_to_value(extended_range, hi_limit);
+ pr_debug("%s: %d\n", __func__, value);
+ err = i2c_smbus_write_byte_data(data->client,
+ EXT_TEMP_HI_LIMIT_HI_BYTE_WR, value);
+ if (err)
+ return err;
+
+ data->current_hi_limit = hi_limit;
+ }
+
+ return 0;
+}
+
+int nct1008_thermal_set_alert(struct nct1008_data *data,
+ void (*alert_func)(void *),
+ void *alert_data)
+{
+ data->alert_data = alert_data;
+ data->alert_func = alert_func;
+
+ return 0;
+}
+
+int nct1008_thermal_set_shutdown_temp(struct nct1008_data *data,
+ long shutdown_temp_milli)
+{
+ struct i2c_client *client = data->client;
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ int err;
+ u8 value;
+ long shutdown_temp;
+
+ shutdown_temp = MILLICELSIUS_TO_CELSIUS(shutdown_temp_milli);
+
+ /* External temperature h/w shutdown limit */
+ value = temperature_to_value(pdata->ext_range, shutdown_temp);
+ err = i2c_smbus_write_byte_data(client, EXT_THERM_LIMIT_WR, value);
+ if (err)
+ return err;
+
+ /* Local temperature h/w shutdown limit */
+ value = temperature_to_value(pdata->ext_range, shutdown_temp);
+ err = i2c_smbus_write_byte_data(client, LOCAL_THERM_LIMIT_WR, value);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Manufacturer(OnSemi) recommended sequence for
+ * Extended Range mode is as follows
+ * 1. Place in Standby
+ * 2. Scale the THERM and ALERT limits
+ * appropriately(for Extended Range mode).
+ * 3. Enable Extended Range mode.
+ * ALERT mask/THERM2 mode may be done here
+ * as these are not critical
+ * 4. Set Conversion Rate as required
+ * 5. Take device out of Standby
+ */
+
+/*
+ * function nct1008_probe takes care of initial configuration
+ */
+static int __devinit nct1008_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct nct1008_data *data;
+ int err;
+ unsigned int delay;
+
+ data = kzalloc(sizeof(struct nct1008_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ memcpy(&data->plat_data, client->dev.platform_data,
+ sizeof(struct nct1008_platform_data));
+ i2c_set_clientdata(client, data);
+
+ nct1008_power_control(data, true);
+ /* extended range recommended steps 1 through 4 taken care
+ * in nct1008_configure_sensor function */
+ err = nct1008_configure_sensor(data); /* sensor is in standby */
+ if (err < 0) {
+ dev_err(&client->dev, "\n error file: %s : %s(), line=%d ",
+ __FILE__, __func__, __LINE__);
+ goto error;
+ }
+
+ err = nct1008_configure_irq(data);
+ if (err < 0) {
+ dev_err(&client->dev, "\n error file: %s : %s(), line=%d ",
+ __FILE__, __func__, __LINE__);
+ goto error;
+ }
+ dev_info(&client->dev, "%s: initialized\n", __func__);
+
+ /* extended range recommended step 5 is in nct1008_enable function */
+ err = nct1008_enable(client); /* sensor is running */
+ if (err < 0) {
+ dev_err(&client->dev, "Error: %s, line=%d, error=%d\n",
+ __func__, __LINE__, err);
+ goto error;
+ }
+
+ err = nct1008_debuginit(data);
+ if (err < 0)
+ err = 0; /* without debugfs we may continue */
+
+ /* switch to extended mode reports correct temperature
+ * from next measurement cycle */
+ if (data->plat_data.ext_range) {
+ delay = get_ext_mode_delay_ms(
+ data->plat_data.conv_rate);
+ msleep(delay); /* 63msec for default conv rate 0x8 */
+ }
+
+ /* notify callback that probe is done */
+ if (data->plat_data.probe_callback)
+ data->plat_data.probe_callback(data);
+
+ return 0;
+
+error:
+ dev_err(&client->dev, "\n exit %s, err=%d ", __func__, err);
+ nct1008_power_control(data, false);
+ if (data->nct_reg)
+ regulator_put(data->nct_reg);
+ kfree(data);
+ return err;
+}
+
+static int __devexit nct1008_remove(struct i2c_client *client)
+{
+ struct nct1008_data *data = i2c_get_clientdata(client);
+
+ if (data->dent)
+ debugfs_remove(data->dent);
+
+ free_irq(data->client->irq, data);
+ cancel_work_sync(&data->work);
+ sysfs_remove_group(&client->dev.kobj, &nct1008_attr_group);
+ nct1008_power_control(data, false);
+ if (data->nct_reg)
+ regulator_put(data->nct_reg);
+
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int nct1008_suspend(struct i2c_client *client, pm_message_t state)
+{
+ int err;
+
+ disable_irq(client->irq);
+ err = nct1008_disable(client);
+ return err;
+}
+
+static int nct1008_resume(struct i2c_client *client)
+{
+ struct nct1008_data *data = i2c_get_clientdata(client);
+ int err;
+
+ err = nct1008_enable(client);
+ if (err < 0) {
+ dev_err(&client->dev, "Error: %s, error=%d\n",
+ __func__, err);
+ return err;
+ }
+ enable_irq(client->irq);
+ schedule_work(&data->work);
+
+ return 0;
+}
+#endif
+
+static const struct i2c_device_id nct1008_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nct1008_id);
+
+static struct i2c_driver nct1008_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = nct1008_probe,
+ .remove = __devexit_p(nct1008_remove),
+ .id_table = nct1008_id,
+#ifdef CONFIG_PM
+ .suspend = nct1008_suspend,
+ .resume = nct1008_resume,
+#endif
+};
+
+static int __init nct1008_init(void)
+{
+ return i2c_add_driver(&nct1008_driver);
+}
+
+static void __exit nct1008_exit(void)
+{
+ i2c_del_driver(&nct1008_driver);
+}
+
+MODULE_DESCRIPTION("Temperature sensor driver for OnSemi NCT1008");
+MODULE_LICENSE("GPL");
+
+module_init(nct1008_init);
+module_exit(nct1008_exit);
diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c
new file mode 100644
index 000000000000..abb73c143164
--- /dev/null
+++ b/drivers/misc/pmem.c
@@ -0,0 +1,1345 @@
+/* drivers/android/pmem.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/android_pmem.h>
+#include <linux/mempolicy.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+
+#define PMEM_MAX_DEVICES 10
+#define PMEM_MAX_ORDER 128
+#define PMEM_MIN_ALLOC PAGE_SIZE
+
+#define PMEM_DEBUG 1
+
+/* indicates that a refernce to this file has been taken via get_pmem_file,
+ * the file should not be released until put_pmem_file is called */
+#define PMEM_FLAGS_BUSY 0x1
+/* indicates that this is a suballocation of a larger master range */
+#define PMEM_FLAGS_CONNECTED 0x1 << 1
+/* indicates this is a master and not a sub allocation and that it is mmaped */
+#define PMEM_FLAGS_MASTERMAP 0x1 << 2
+/* submap and unsubmap flags indicate:
+ * 00: subregion has never been mmaped
+ * 10: subregion has been mmaped, reference to the mm was taken
+ * 11: subretion has ben released, refernece to the mm still held
+ * 01: subretion has been released, reference to the mm has been released
+ */
+#define PMEM_FLAGS_SUBMAP 0x1 << 3
+#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
+
+
+struct pmem_data {
+ /* in alloc mode: an index into the bitmap
+ * in no_alloc mode: the size of the allocation */
+ int index;
+ /* see flags above for descriptions */
+ unsigned int flags;
+ /* protects this data field, if the mm_mmap sem will be held at the
+ * same time as this sem, the mm sem must be taken first (as this is
+ * the order for vma_open and vma_close ops */
+ struct rw_semaphore sem;
+ /* info about the mmaping process */
+ struct vm_area_struct *vma;
+ /* task struct of the mapping process */
+ struct task_struct *task;
+ /* process id of teh mapping process */
+ pid_t pid;
+ /* file descriptor of the master */
+ int master_fd;
+ /* file struct of the master */
+ struct file *master_file;
+ /* a list of currently available regions if this is a suballocation */
+ struct list_head region_list;
+ /* a linked list of data so we can access them for debugging */
+ struct list_head list;
+#if PMEM_DEBUG
+ int ref;
+#endif
+};
+
+struct pmem_bits {
+ unsigned allocated:1; /* 1 if allocated, 0 if free */
+ unsigned order:7; /* size of the region in pmem space */
+};
+
+struct pmem_region_node {
+ struct pmem_region region;
+ struct list_head list;
+};
+
+#define PMEM_DEBUG_MSGS 0
+#if PMEM_DEBUG_MSGS
+#define DLOG(fmt,args...) \
+ do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
+ ##args); } \
+ while (0)
+#else
+#define DLOG(x...) do {} while (0)
+#endif
+
+struct pmem_info {
+ struct miscdevice dev;
+ /* physical start address of the remaped pmem space */
+ unsigned long base;
+ /* vitual start address of the remaped pmem space */
+ unsigned char __iomem *vbase;
+ /* total size of the pmem space */
+ unsigned long size;
+ /* number of entries in the pmem space */
+ unsigned long num_entries;
+ /* pfn of the garbage page in memory */
+ unsigned long garbage_pfn;
+ /* index of the garbage page in the pmem space */
+ int garbage_index;
+ /* the bitmap for the region indicating which entries are allocated
+ * and which are free */
+ struct pmem_bits *bitmap;
+ /* indicates the region should not be managed with an allocator */
+ unsigned no_allocator;
+ /* indicates maps of this region should be cached, if a mix of
+ * cached and uncached is desired, set this and open the device with
+ * O_SYNC to get an uncached region */
+ unsigned cached;
+ unsigned buffered;
+ /* in no_allocator mode the first mapper gets the whole space and sets
+ * this flag */
+ unsigned allocated;
+ /* for debugging, creates a list of pmem file structs, the
+ * data_list_lock should be taken before pmem_data->sem if both are
+ * needed */
+ struct mutex data_list_lock;
+ struct list_head data_list;
+ /* pmem_sem protects the bitmap array
+ * a write lock should be held when modifying entries in bitmap
+ * a read lock should be held when reading data from bits or
+ * dereferencing a pointer into bitmap
+ *
+ * pmem_data->sem protects the pmem data of a particular file
+ * Many of the function that require the pmem_data->sem have a non-
+ * locking version for when the caller is already holding that sem.
+ *
+ * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
+ * down(pmem_data->sem) => down(bitmap_sem)
+ */
+ struct rw_semaphore bitmap_sem;
+
+ long (*ioctl)(struct file *, unsigned int, unsigned long);
+ int (*release)(struct inode *, struct file *);
+};
+
+static struct pmem_info pmem[PMEM_MAX_DEVICES];
+static int id_count;
+
+#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
+#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
+#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
+#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
+#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
+#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
+#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
+#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
+ PMEM_LEN(id, index))
+#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
+#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
+ PMEM_LEN(id, index))
+#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
+#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
+ (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
+
+static int pmem_release(struct inode *, struct file *);
+static int pmem_mmap(struct file *, struct vm_area_struct *);
+static int pmem_open(struct inode *, struct file *);
+static long pmem_ioctl(struct file *, unsigned int, unsigned long);
+
+struct file_operations pmem_fops = {
+ .release = pmem_release,
+ .mmap = pmem_mmap,
+ .open = pmem_open,
+ .unlocked_ioctl = pmem_ioctl,
+};
+
+static int get_id(struct file *file)
+{
+ return MINOR(file->f_dentry->d_inode->i_rdev);
+}
+
+int is_pmem_file(struct file *file)
+{
+ int id;
+
+ if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
+ return 0;
+ id = get_id(file);
+ if (unlikely(id >= PMEM_MAX_DEVICES))
+ return 0;
+ if (unlikely(file->f_dentry->d_inode->i_rdev !=
+ MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
+ return 0;
+ return 1;
+}
+
+static int has_allocation(struct file *file)
+{
+ struct pmem_data *data;
+ /* check is_pmem_file first if not accessed via pmem_file_ops */
+
+ if (unlikely(!file->private_data))
+ return 0;
+ data = (struct pmem_data *)file->private_data;
+ if (unlikely(data->index < 0))
+ return 0;
+ return 1;
+}
+
+static int is_master_owner(struct file *file)
+{
+ struct file *master_file;
+ struct pmem_data *data;
+ int put_needed, ret = 0;
+
+ if (!is_pmem_file(file) || !has_allocation(file))
+ return 0;
+ data = (struct pmem_data *)file->private_data;
+ if (PMEM_FLAGS_MASTERMAP & data->flags)
+ return 1;
+ master_file = fget_light(data->master_fd, &put_needed);
+ if (master_file && data->master_file == master_file)
+ ret = 1;
+ fput_light(master_file, put_needed);
+ return ret;
+}
+
+static int pmem_free(int id, int index)
+{
+ /* caller should hold the write lock on pmem_sem! */
+ int buddy, curr = index;
+ DLOG("index %d\n", index);
+
+ if (pmem[id].no_allocator) {
+ pmem[id].allocated = 0;
+ return 0;
+ }
+ /* clean up the bitmap, merging any buddies */
+ pmem[id].bitmap[curr].allocated = 0;
+ /* find a slots buddy Buddy# = Slot# ^ (1 << order)
+ * if the buddy is also free merge them
+ * repeat until the buddy is not free or end of the bitmap is reached
+ */
+ do {
+ buddy = PMEM_BUDDY_INDEX(id, curr);
+ if (PMEM_IS_FREE(id, buddy) &&
+ PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
+ PMEM_ORDER(id, buddy)++;
+ PMEM_ORDER(id, curr)++;
+ curr = min(buddy, curr);
+ } else {
+ break;
+ }
+ } while (curr < pmem[id].num_entries);
+
+ return 0;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data);
+
+static int pmem_release(struct inode *inode, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_region_node *region_node;
+ struct list_head *elt, *elt2;
+ int id = get_id(file), ret = 0;
+
+
+ mutex_lock(&pmem[id].data_list_lock);
+ /* if this file is a master, revoke all the memory in the connected
+ * files */
+ if (PMEM_FLAGS_MASTERMAP & data->flags) {
+ struct pmem_data *sub_data;
+ list_for_each(elt, &pmem[id].data_list) {
+ sub_data = list_entry(elt, struct pmem_data, list);
+ down_read(&sub_data->sem);
+ if (PMEM_IS_SUBMAP(sub_data) &&
+ file == sub_data->master_file) {
+ up_read(&sub_data->sem);
+ pmem_revoke(file, sub_data);
+ } else
+ up_read(&sub_data->sem);
+ }
+ }
+ list_del(&data->list);
+ mutex_unlock(&pmem[id].data_list_lock);
+
+
+ down_write(&data->sem);
+
+ /* if its not a conencted file and it has an allocation, free it */
+ if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
+ down_write(&pmem[id].bitmap_sem);
+ ret = pmem_free(id, data->index);
+ up_write(&pmem[id].bitmap_sem);
+ }
+
+ /* if this file is a submap (mapped, connected file), downref the
+ * task struct */
+ if (PMEM_FLAGS_SUBMAP & data->flags)
+ if (data->task) {
+ put_task_struct(data->task);
+ data->task = NULL;
+ }
+
+ file->private_data = NULL;
+
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node, list);
+ list_del(elt);
+ kfree(region_node);
+ }
+ BUG_ON(!list_empty(&data->region_list));
+
+ up_write(&data->sem);
+ kfree(data);
+ if (pmem[id].release)
+ ret = pmem[id].release(inode, file);
+
+ return ret;
+}
+
+static int pmem_open(struct inode *inode, struct file *file)
+{
+ struct pmem_data *data;
+ int id = get_id(file);
+ int ret = 0;
+
+ DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
+ /* setup file->private_data to indicate its unmapped */
+ /* you can only open a pmem device one time */
+ if (file->private_data != NULL)
+ return -1;
+ data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
+ if (!data) {
+ printk("pmem: unable to allocate memory for pmem metadata.");
+ return -1;
+ }
+ data->flags = 0;
+ data->index = -1;
+ data->task = NULL;
+ data->vma = NULL;
+ data->pid = 0;
+ data->master_file = NULL;
+#if PMEM_DEBUG
+ data->ref = 0;
+#endif
+ INIT_LIST_HEAD(&data->region_list);
+ init_rwsem(&data->sem);
+
+ file->private_data = data;
+ INIT_LIST_HEAD(&data->list);
+
+ mutex_lock(&pmem[id].data_list_lock);
+ list_add(&data->list, &pmem[id].data_list);
+ mutex_unlock(&pmem[id].data_list_lock);
+ return ret;
+}
+
+static unsigned long pmem_order(unsigned long len)
+{
+ int i;
+
+ len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
+ len--;
+ for (i = 0; i < sizeof(len)*8; i++)
+ if (len >> i == 0)
+ break;
+ return i;
+}
+
+static int pmem_allocate(int id, unsigned long len)
+{
+ /* caller should hold the write lock on pmem_sem! */
+ /* return the corresponding pdata[] entry */
+ int curr = 0;
+ int end = pmem[id].num_entries;
+ int best_fit = -1;
+ unsigned long order = pmem_order(len);
+
+ if (pmem[id].no_allocator) {
+ DLOG("no allocator");
+ if ((len > pmem[id].size) || pmem[id].allocated)
+ return -1;
+ pmem[id].allocated = 1;
+ return len;
+ }
+
+ if (order > PMEM_MAX_ORDER)
+ return -1;
+ DLOG("order %lx\n", order);
+
+ /* look through the bitmap:
+ * if you find a free slot of the correct order use it
+ * otherwise, use the best fit (smallest with size > order) slot
+ */
+ while (curr < end) {
+ if (PMEM_IS_FREE(id, curr)) {
+ if (PMEM_ORDER(id, curr) == (unsigned char)order) {
+ /* set the not free bit and clear others */
+ best_fit = curr;
+ break;
+ }
+ if (PMEM_ORDER(id, curr) > (unsigned char)order &&
+ (best_fit < 0 ||
+ PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
+ best_fit = curr;
+ }
+ curr = PMEM_NEXT_INDEX(id, curr);
+ }
+
+ /* if best_fit < 0, there are no suitable slots,
+ * return an error
+ */
+ if (best_fit < 0) {
+ printk("pmem: no space left to allocate!\n");
+ return -1;
+ }
+
+ /* now partition the best fit:
+ * split the slot into 2 buddies of order - 1
+ * repeat until the slot is of the correct order
+ */
+ while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
+ int buddy;
+ PMEM_ORDER(id, best_fit) -= 1;
+ buddy = PMEM_BUDDY_INDEX(id, best_fit);
+ PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
+ }
+ pmem[id].bitmap[best_fit].allocated = 1;
+ return best_fit;
+}
+
+static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot)
+{
+ int id = get_id(file);
+#ifdef pgprot_noncached
+ if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
+ return pgprot_noncached(vma_prot);
+#endif
+#ifdef pgprot_ext_buffered
+ else if (pmem[id].buffered)
+ return pgprot_ext_buffered(vma_prot);
+#endif
+ return vma_prot;
+}
+
+static unsigned long pmem_start_addr(int id, struct pmem_data *data)
+{
+ if (pmem[id].no_allocator)
+ return PMEM_START_ADDR(id, 0);
+ else
+ return PMEM_START_ADDR(id, data->index);
+
+}
+
+static void *pmem_start_vaddr(int id, struct pmem_data *data)
+{
+ return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
+}
+
+static unsigned long pmem_len(int id, struct pmem_data *data)
+{
+ if (pmem[id].no_allocator)
+ return data->index;
+ else
+ return PMEM_LEN(id, data->index);
+}
+
+static int pmem_map_garbage(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ int i, garbage_pages = len >> PAGE_SHIFT;
+
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
+ for (i = 0; i < garbage_pages; i++) {
+ if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
+ pmem[id].garbage_pfn))
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ int garbage_pages;
+ DLOG("unmap offset %lx len %lx\n", offset, len);
+
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+
+ garbage_pages = len >> PAGE_SHIFT;
+ zap_page_range(vma, vma->vm_start + offset, len, NULL);
+ pmem_map_garbage(id, vma, data, offset, len);
+ return 0;
+}
+
+static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ DLOG("map offset %lx len %lx\n", offset, len);
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
+
+ if (io_remap_pfn_range(vma, vma->vm_start + offset,
+ (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
+ len, vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ /* hold the mm semp for the vma you are modifying when you call this */
+ BUG_ON(!vma);
+ zap_page_range(vma, vma->vm_start + offset, len, NULL);
+ return pmem_map_pfn_range(id, vma, data, offset, len);
+}
+
+static void pmem_vma_open(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct pmem_data *data = file->private_data;
+ int id = get_id(file);
+ /* this should never be called as we don't support copying pmem
+ * ranges via fork */
+ BUG_ON(!has_allocation(file));
+ down_write(&data->sem);
+ /* remap the garbage pages, forkers don't get access to the data */
+ pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
+ up_write(&data->sem);
+}
+
+static void pmem_vma_close(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct pmem_data *data = file->private_data;
+
+ DLOG("current %u ppid %u file %p count %d\n", current->pid,
+ current->parent->pid, file, file_count(file));
+ if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
+ printk(KERN_WARNING "pmem: something is very wrong, you are "
+ "closing a vm backing an allocation that doesn't "
+ "exist!\n");
+ return;
+ }
+ down_write(&data->sem);
+ if (data->vma == vma) {
+ data->vma = NULL;
+ if ((data->flags & PMEM_FLAGS_CONNECTED) &&
+ (data->flags & PMEM_FLAGS_SUBMAP))
+ data->flags |= PMEM_FLAGS_UNSUBMAP;
+ }
+ /* the kernel is going to free this vma now anyway */
+ up_write(&data->sem);
+}
+
+static struct vm_operations_struct vm_ops = {
+ .open = pmem_vma_open,
+ .close = pmem_vma_close,
+};
+
+static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct pmem_data *data;
+ int index;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ int ret = 0, id = get_id(file);
+
+ if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
+#if PMEM_DEBUG
+ printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
+ " and a multiple of pages_size.\n");
+#endif
+ return -EINVAL;
+ }
+
+ data = (struct pmem_data *)file->private_data;
+ down_write(&data->sem);
+ /* check this file isn't already mmaped, for submaps check this file
+ * has never been mmaped */
+ if ((data->flags & PMEM_FLAGS_SUBMAP) ||
+ (data->flags & PMEM_FLAGS_UNSUBMAP)) {
+#if PMEM_DEBUG
+ printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
+ "this file is already mmaped. %x\n", data->flags);
+#endif
+ ret = -EINVAL;
+ goto error;
+ }
+ /* if file->private_data == unalloced, alloc*/
+ if (data && data->index == -1) {
+ down_write(&pmem[id].bitmap_sem);
+ index = pmem_allocate(id, vma->vm_end - vma->vm_start);
+ up_write(&pmem[id].bitmap_sem);
+ data->index = index;
+ }
+ /* either no space was available or an error occured */
+ if (!has_allocation(file)) {
+ ret = -EINVAL;
+ printk("pmem: could not find allocation for map.\n");
+ goto error;
+ }
+
+ if (pmem_len(id, data) < vma_size) {
+#if PMEM_DEBUG
+ printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
+ "size of backing region [%lu].\n", vma_size,
+ pmem_len(id, data));
+#endif
+ ret = -EINVAL;
+ goto error;
+ }
+
+ vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
+ vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot);
+
+ if (data->flags & PMEM_FLAGS_CONNECTED) {
+ struct pmem_region_node *region_node;
+ struct list_head *elt;
+ if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
+ printk("pmem: mmap failed in kernel!\n");
+ ret = -EAGAIN;
+ goto error;
+ }
+ list_for_each(elt, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ DLOG("remapping file: %p %lx %lx\n", file,
+ region_node->region.offset,
+ region_node->region.len);
+ if (pmem_remap_pfn_range(id, vma, data,
+ region_node->region.offset,
+ region_node->region.len)) {
+ ret = -EAGAIN;
+ goto error;
+ }
+ }
+ data->flags |= PMEM_FLAGS_SUBMAP;
+ get_task_struct(current->group_leader);
+ data->task = current->group_leader;
+ data->vma = vma;
+#if PMEM_DEBUG
+ data->pid = current->pid;
+#endif
+ DLOG("submmapped file %p vma %p pid %u\n", file, vma,
+ current->pid);
+ } else {
+ if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
+ printk(KERN_INFO "pmem: mmap failed in kernel!\n");
+ ret = -EAGAIN;
+ goto error;
+ }
+ data->flags |= PMEM_FLAGS_MASTERMAP;
+ data->pid = current->pid;
+ }
+ vma->vm_ops = &vm_ops;
+error:
+ up_write(&data->sem);
+ return ret;
+}
+
+/* the following are the api for accessing pmem regions by other drivers
+ * from inside the kernel */
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *len)
+{
+ struct pmem_data *data;
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: requested pmem data from invalid"
+ "file.\n");
+#endif
+ return -1;
+ }
+ data = (struct pmem_data *)file->private_data;
+ down_read(&data->sem);
+ if (data->vma) {
+ *start = data->vma->vm_start;
+ *len = data->vma->vm_end - data->vma->vm_start;
+ } else {
+ *start = 0;
+ *len = 0;
+ }
+ up_read(&data->sem);
+ return 0;
+}
+
+int get_pmem_addr(struct file *file, unsigned long *start,
+ unsigned long *vstart, unsigned long *len)
+{
+ struct pmem_data *data;
+ int id;
+
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+ return -1;
+ }
+
+ data = (struct pmem_data *)file->private_data;
+ if (data->index == -1) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: requested pmem data from file with no "
+ "allocation.\n");
+ return -1;
+#endif
+ }
+ id = get_id(file);
+
+ down_read(&data->sem);
+ *start = pmem_start_addr(id, data);
+ *len = pmem_len(id, data);
+ *vstart = (unsigned long)pmem_start_vaddr(id, data);
+ up_read(&data->sem);
+#if PMEM_DEBUG
+ down_write(&data->sem);
+ data->ref++;
+ up_write(&data->sem);
+#endif
+ return 0;
+}
+
+int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+ unsigned long *len, struct file **filp)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (unlikely(file == NULL)) {
+ printk(KERN_INFO "pmem: requested data from file descriptor "
+ "that doesn't exist.");
+ return -1;
+ }
+
+ if (get_pmem_addr(file, start, vstart, len))
+ goto end;
+
+ if (filp)
+ *filp = file;
+ return 0;
+end:
+ fput(file);
+ return -1;
+}
+
+void put_pmem_file(struct file *file)
+{
+ struct pmem_data *data;
+ int id;
+
+ if (!is_pmem_file(file))
+ return;
+ id = get_id(file);
+ data = (struct pmem_data *)file->private_data;
+#if PMEM_DEBUG
+ down_write(&data->sem);
+ if (data->ref == 0) {
+ printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
+ pmem[id].dev.name, data->pid);
+ BUG();
+ }
+ data->ref--;
+ up_write(&data->sem);
+#endif
+ fput(file);
+}
+
+void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
+{
+ struct pmem_data *data;
+ int id;
+ void *vaddr;
+ struct pmem_region_node *region_node;
+ struct list_head *elt;
+ void *flush_start, *flush_end;
+
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+ return;
+ }
+
+ id = get_id(file);
+ data = (struct pmem_data *)file->private_data;
+ if (!pmem[id].cached || file->f_flags & O_SYNC)
+ return;
+
+ down_read(&data->sem);
+ vaddr = pmem_start_vaddr(id, data);
+ /* if this isn't a submmapped file, flush the whole thing */
+ if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
+ dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
+ goto end;
+ }
+ /* otherwise, flush the region of the file we are drawing */
+ list_for_each(elt, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node, list);
+ if ((offset >= region_node->region.offset) &&
+ ((offset + len) <= (region_node->region.offset +
+ region_node->region.len))) {
+ flush_start = vaddr + region_node->region.offset;
+ flush_end = flush_start + region_node->region.len;
+ dmac_flush_range(flush_start, flush_end);
+ break;
+ }
+ }
+end:
+ up_read(&data->sem);
+}
+
+static int pmem_connect(unsigned long connect, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *src_data;
+ struct file *src_file;
+ int ret = 0, put_needed;
+
+ down_write(&data->sem);
+ /* retrieve the src file and check it is a pmem file with an alloc */
+ src_file = fget_light(connect, &put_needed);
+ DLOG("connect %p to %p\n", file, src_file);
+ if (!src_file) {
+ printk("pmem: src file not found!\n");
+ ret = -EINVAL;
+ goto err_no_file;
+ }
+ if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
+ printk(KERN_INFO "pmem: src file is not a pmem file or has no "
+ "alloc!\n");
+ ret = -EINVAL;
+ goto err_bad_file;
+ }
+ src_data = (struct pmem_data *)src_file->private_data;
+
+ if (has_allocation(file) && (data->index != src_data->index)) {
+ printk("pmem: file is already mapped but doesn't match this"
+ " src_file!\n");
+ ret = -EINVAL;
+ goto err_bad_file;
+ }
+ data->index = src_data->index;
+ data->flags |= PMEM_FLAGS_CONNECTED;
+ data->master_fd = connect;
+ data->master_file = src_file;
+
+err_bad_file:
+ fput_light(src_file, put_needed);
+err_no_file:
+ up_write(&data->sem);
+ return ret;
+}
+
+static void pmem_unlock_data_and_mm(struct pmem_data *data,
+ struct mm_struct *mm)
+{
+ up_write(&data->sem);
+ if (mm != NULL) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+
+static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
+ struct mm_struct **locked_mm)
+{
+ int ret = 0;
+ struct mm_struct *mm = NULL;
+ *locked_mm = NULL;
+lock_mm:
+ down_read(&data->sem);
+ if (PMEM_IS_SUBMAP(data)) {
+ mm = get_task_mm(data->task);
+ if (!mm) {
+#if PMEM_DEBUG
+ printk("pmem: can't remap task is gone!\n");
+#endif
+ up_read(&data->sem);
+ return -1;
+ }
+ }
+ up_read(&data->sem);
+
+ if (mm)
+ down_write(&mm->mmap_sem);
+
+ down_write(&data->sem);
+ /* check that the file didn't get mmaped before we could take the
+ * data sem, this should be safe b/c you can only submap each file
+ * once */
+ if (PMEM_IS_SUBMAP(data) && !mm) {
+ pmem_unlock_data_and_mm(data, mm);
+ up_write(&data->sem);
+ goto lock_mm;
+ }
+ /* now check that vma.mm is still there, it could have been
+ * deleted by vma_close before we could get the data->sem */
+ if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
+ /* might as well release this */
+ if (data->flags & PMEM_FLAGS_SUBMAP) {
+ put_task_struct(data->task);
+ data->task = NULL;
+ /* lower the submap flag to show the mm is gone */
+ data->flags &= ~(PMEM_FLAGS_SUBMAP);
+ }
+ pmem_unlock_data_and_mm(data, mm);
+ return -1;
+ }
+ *locked_mm = mm;
+ return ret;
+}
+
+int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation)
+{
+ int ret;
+ struct pmem_region_node *region_node;
+ struct mm_struct *mm = NULL;
+ struct list_head *elt, *elt2;
+ int id = get_id(file);
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+
+ /* pmem region must be aligned on a page boundry */
+ if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
+ !PMEM_IS_PAGE_ALIGNED(region->len))) {
+#if PMEM_DEBUG
+ printk("pmem: request for unaligned pmem suballocation "
+ "%lx %lx\n", region->offset, region->len);
+#endif
+ return -EINVAL;
+ }
+
+ /* if userspace requests a region of len 0, there's nothing to do */
+ if (region->len == 0)
+ return 0;
+
+ /* lock the mm and data */
+ ret = pmem_lock_data_and_mm(file, data, &mm);
+ if (ret)
+ return 0;
+
+ /* only the owner of the master file can remap the client fds
+ * that back in it */
+ if (!is_master_owner(file)) {
+#if PMEM_DEBUG
+ printk("pmem: remap requested from non-master process\n");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* check that the requested range is within the src allocation */
+ if (unlikely((region->offset > pmem_len(id, data)) ||
+ (region->len > pmem_len(id, data)) ||
+ (region->offset + region->len > pmem_len(id, data)))) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (operation == PMEM_MAP) {
+ region_node = kmalloc(sizeof(struct pmem_region_node),
+ GFP_KERNEL);
+ if (!region_node) {
+ ret = -ENOMEM;
+#if PMEM_DEBUG
+ printk(KERN_INFO "No space to allocate metadata!");
+#endif
+ goto err;
+ }
+ region_node->region = *region;
+ list_add(&region_node->list, &data->region_list);
+ } else if (operation == PMEM_UNMAP) {
+ int found = 0;
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ if (region->len == 0 ||
+ (region_node->region.offset == region->offset &&
+ region_node->region.len == region->len)) {
+ list_del(elt);
+ kfree(region_node);
+ found = 1;
+ }
+ }
+ if (!found) {
+#if PMEM_DEBUG
+ printk("pmem: Unmap region does not map any mapped "
+ "region!");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (data->vma && PMEM_IS_SUBMAP(data)) {
+ if (operation == PMEM_MAP)
+ ret = pmem_remap_pfn_range(id, data->vma, data,
+ region->offset, region->len);
+ else if (operation == PMEM_UNMAP)
+ ret = pmem_unmap_pfn_range(id, data->vma, data,
+ region->offset, region->len);
+ }
+
+err:
+ pmem_unlock_data_and_mm(data, mm);
+ return ret;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data)
+{
+ struct pmem_region_node *region_node;
+ struct list_head *elt, *elt2;
+ struct mm_struct *mm = NULL;
+ int id = get_id(file);
+ int ret = 0;
+
+ data->master_file = NULL;
+ ret = pmem_lock_data_and_mm(file, data, &mm);
+ /* if lock_data_and_mm fails either the task that mapped the fd, or
+ * the vma that mapped it have already gone away, nothing more
+ * needs to be done */
+ if (ret)
+ return;
+ /* unmap everything */
+ /* delete the regions and region list nothing is mapped any more */
+ if (data->vma)
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ pmem_unmap_pfn_range(id, data->vma, data,
+ region_node->region.offset,
+ region_node->region.len);
+ list_del(elt);
+ kfree(region_node);
+ }
+ /* delete the master file */
+ pmem_unlock_data_and_mm(data, mm);
+}
+
+static void pmem_get_size(struct pmem_region *region, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ int id = get_id(file);
+
+ if (!has_allocation(file)) {
+ region->offset = 0;
+ region->len = 0;
+ return;
+ } else {
+ region->offset = pmem_start_addr(id, data);
+ region->len = pmem_len(id, data);
+ }
+ DLOG("offset %lx len %lx\n", region->offset, region->len);
+}
+
+
+static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pmem_data *data;
+ int id = get_id(file);
+
+ switch (cmd) {
+ case PMEM_GET_PHYS:
+ {
+ struct pmem_region region;
+ DLOG("get_phys\n");
+ if (!has_allocation(file)) {
+ region.offset = 0;
+ region.len = 0;
+ } else {
+ data = (struct pmem_data *)file->private_data;
+ region.offset = pmem_start_addr(id, data);
+ region.len = pmem_len(id, data);
+ }
+ printk(KERN_INFO "pmem: request for physical address of pmem region "
+ "from process %d.\n", current->pid);
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_MAP:
+ {
+ struct pmem_region region;
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ data = (struct pmem_data *)file->private_data;
+ return pmem_remap(&region, file, PMEM_MAP);
+ }
+ break;
+ case PMEM_UNMAP:
+ {
+ struct pmem_region region;
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ data = (struct pmem_data *)file->private_data;
+ return pmem_remap(&region, file, PMEM_UNMAP);
+ break;
+ }
+ case PMEM_GET_SIZE:
+ {
+ struct pmem_region region;
+ DLOG("get_size\n");
+ pmem_get_size(&region, file);
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_GET_TOTAL_SIZE:
+ {
+ struct pmem_region region;
+ DLOG("get total size\n");
+ region.offset = 0;
+ get_id(file);
+ region.len = pmem[id].size;
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_ALLOCATE:
+ {
+ if (has_allocation(file))
+ return -EINVAL;
+ data = (struct pmem_data *)file->private_data;
+ data->index = pmem_allocate(id, arg);
+ break;
+ }
+ case PMEM_CONNECT:
+ DLOG("connect\n");
+ return pmem_connect(arg, file);
+ break;
+ case PMEM_CACHE_FLUSH:
+ {
+ struct pmem_region region;
+ DLOG("flush\n");
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ flush_pmem_file(file, region.offset, region.len);
+ break;
+ }
+ default:
+ if (pmem[id].ioctl)
+ return pmem[id].ioctl(file, cmd, arg);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#if PMEM_DEBUG
+static ssize_t debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct list_head *elt, *elt2;
+ struct pmem_data *data;
+ struct pmem_region_node *region_node;
+ int id = (int)file->private_data;
+ const int debug_bufmax = 4096;
+ static char buffer[4096];
+ int n = 0;
+
+ DLOG("debug open\n");
+ n = scnprintf(buffer, debug_bufmax,
+ "pid #: mapped regions (offset, len) (offset,len)...\n");
+
+ mutex_lock(&pmem[id].data_list_lock);
+ list_for_each(elt, &pmem[id].data_list) {
+ data = list_entry(elt, struct pmem_data, list);
+ down_read(&data->sem);
+ n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
+ data->pid);
+ list_for_each(elt2, &data->region_list) {
+ region_node = list_entry(elt2, struct pmem_region_node,
+ list);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "(%lx,%lx) ",
+ region_node->region.offset,
+ region_node->region.len);
+ }
+ n += scnprintf(buffer + n, debug_bufmax - n, "\n");
+ up_read(&data->sem);
+ }
+ mutex_unlock(&pmem[id].data_list_lock);
+
+ n++;
+ buffer[n] = 0;
+ return simple_read_from_buffer(buf, count, ppos, buffer, n);
+}
+
+static struct file_operations debug_fops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+#endif
+
+#if 0
+static struct miscdevice pmem_dev = {
+ .name = "pmem",
+ .fops = &pmem_fops,
+};
+#endif
+
+int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *))
+{
+ int err = 0;
+ int i, index = 0;
+ int id = id_count;
+ id_count++;
+
+ pmem[id].no_allocator = pdata->no_allocator;
+ pmem[id].cached = pdata->cached;
+ pmem[id].buffered = pdata->buffered;
+ pmem[id].base = pdata->start;
+ pmem[id].size = pdata->size;
+ pmem[id].ioctl = ioctl;
+ pmem[id].release = release;
+ init_rwsem(&pmem[id].bitmap_sem);
+ mutex_init(&pmem[id].data_list_lock);
+ INIT_LIST_HEAD(&pmem[id].data_list);
+ pmem[id].dev.name = pdata->name;
+ pmem[id].dev.minor = id;
+ pmem[id].dev.fops = &pmem_fops;
+ printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
+
+ err = misc_register(&pmem[id].dev);
+ if (err) {
+ printk(KERN_ALERT "Unable to register pmem driver!\n");
+ goto err_cant_register_device;
+ }
+ pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
+
+ pmem[id].bitmap = kmalloc(pmem[id].num_entries *
+ sizeof(struct pmem_bits), GFP_KERNEL);
+ if (!pmem[id].bitmap)
+ goto err_no_mem_for_metadata;
+
+ memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
+ pmem[id].num_entries);
+
+ for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
+ if ((pmem[id].num_entries) & 1<<i) {
+ PMEM_ORDER(id, index) = i;
+ index = PMEM_NEXT_INDEX(id, index);
+ }
+ }
+
+ if (pmem[id].cached)
+ pmem[id].vbase = ioremap_cached(pmem[id].base,
+ pmem[id].size);
+#ifdef ioremap_ext_buffered
+ else if (pmem[id].buffered)
+ pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
+ pmem[id].size);
+#endif
+ else
+ pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+
+ if (pmem[id].vbase == 0)
+ goto error_cant_remap;
+
+ pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
+ if (pmem[id].no_allocator)
+ pmem[id].allocated = 0;
+
+#if PMEM_DEBUG
+ debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
+ &debug_fops);
+#endif
+ return 0;
+error_cant_remap:
+ kfree(pmem[id].bitmap);
+err_no_mem_for_metadata:
+ misc_deregister(&pmem[id].dev);
+err_cant_register_device:
+ return -1;
+}
+
+static int pmem_probe(struct platform_device *pdev)
+{
+ struct android_pmem_platform_data *pdata;
+
+ if (!pdev || !pdev->dev.platform_data) {
+ printk(KERN_ALERT "Unable to probe pmem!\n");
+ return -1;
+ }
+ pdata = pdev->dev.platform_data;
+ return pmem_setup(pdata, NULL, NULL);
+}
+
+
+static int pmem_remove(struct platform_device *pdev)
+{
+ int id = pdev->id;
+ __free_page(pfn_to_page(pmem[id].garbage_pfn));
+ misc_deregister(&pmem[id].dev);
+ return 0;
+}
+
+static struct platform_driver pmem_driver = {
+ .probe = pmem_probe,
+ .remove = pmem_remove,
+ .driver = { .name = "android_pmem" }
+};
+
+
+static int __init pmem_init(void)
+{
+ return platform_driver_register(&pmem_driver);
+}
+
+static void __exit pmem_exit(void)
+{
+ platform_driver_unregister(&pmem_driver);
+}
+
+module_init(pmem_init);
+module_exit(pmem_exit);
+
diff --git a/drivers/misc/tegra-baseband/Kconfig b/drivers/misc/tegra-baseband/Kconfig
new file mode 100644
index 000000000000..1f116918296f
--- /dev/null
+++ b/drivers/misc/tegra-baseband/Kconfig
@@ -0,0 +1,32 @@
+menuconfig TEGRA_BB_SUPPORT
+ bool "Tegra baseband support"
+ depends on ARCH_TEGRA
+ ---help---
+ Say Y here to get to see options for tegra baseband support.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if TEGRA_BB_SUPPORT
+
+config TEGRA_BB_POWER
+ bool "Enable tegra baseband power driver"
+ ---help---
+ Adds power management driver for managing different baseband
+ modems with tegra processor.
+
+ This driver should work with at least the following devices:
+
+ * STE M7400
+ * ...
+
+ Disabled by default. Choose Y here if you want to build the driver.
+
+config TEGRA_BB_M7400
+ bool "Enable driver for M7400 modem"
+ ---help---
+ Enables driver for M7400 modem.
+
+ Disabled by default. Choose Y here if you want to build the driver.
+
+endif # TEGRA_BB_SUPPORT
diff --git a/drivers/misc/tegra-baseband/Makefile b/drivers/misc/tegra-baseband/Makefile
new file mode 100644
index 000000000000..a95d84dbf117
--- /dev/null
+++ b/drivers/misc/tegra-baseband/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for tegra baseband support.
+#
+
+obj-$(CONFIG_TEGRA_BB_POWER) += bb-power.o
+obj-$(CONFIG_TEGRA_BB_M7400) += bb-m7400.o
diff --git a/drivers/misc/tegra-baseband/bb-m7400.c b/drivers/misc/tegra-baseband/bb-m7400.c
new file mode 100644
index 000000000000..4c87245eede4
--- /dev/null
+++ b/drivers/misc/tegra-baseband/bb-m7400.c
@@ -0,0 +1,270 @@
+/*
+ * drivers/misc/tegra-baseband/bb-m7400.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/wakelock.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/tegra-bb-power.h>
+#include <mach/usb_phy.h>
+#include "bb-power.h"
+
+static struct tegra_bb_gpio_data m7400_gpios[] = {
+ { { GPIO_INVALID, GPIOF_OUT_INIT_LOW, "MDM_PWR_ON" }, true },
+ { { GPIO_INVALID, GPIOF_IN, "MDM_PWRSTATUS" }, true },
+ { { GPIO_INVALID, GPIOF_OUT_INIT_HIGH, "MDM_SERVICE" }, true },
+ { { GPIO_INVALID, GPIOF_OUT_INIT_LOW, "MDM_USB_AWR" }, false },
+ { { GPIO_INVALID, GPIOF_IN, "MDM_USB_CWR" }, false },
+ { { GPIO_INVALID, GPIOF_IN, "MDM_RESOUT2" }, true },
+ { { GPIO_INVALID, 0, NULL }, false }, /* End of table */
+};
+static bool ehci_registered;
+static int gpio_awr;
+static int gpio_cwr;
+
+static int gpio_wait_timeout(int gpio, int value, int timeout_msec)
+{
+ int count;
+ for (count = 0; count < timeout_msec; ++count) {
+ if (gpio_get_value(gpio) == value)
+ return 0;
+ mdelay(1);
+ }
+ return -1;
+}
+
+static int m7400_enum_handshake(void)
+{
+ int retval = 0;
+
+ /* Wait for CP to indicate ready - by driving USB_CWR high. */
+ if (gpio_wait_timeout(gpio_cwr, 1, 10) != 0) {
+ pr_info("%s: Error: timeout waiting for modem resume.\n",
+ __func__);
+ retval = -1;
+ }
+
+ /* Signal AP ready - Drive USB_AWR high. */
+ gpio_set_value(gpio_awr, 1);
+
+ return retval;
+}
+
+static int m7400_apup_handshake(bool checkresponse)
+{
+ int retval = 0;
+
+ /* Signal AP ready - Drive USB_AWR high. */
+ gpio_set_value(gpio_awr, 1);
+
+ if (checkresponse) {
+ /* Wait for CP ack - by driving USB_CWR high. */
+ if (gpio_wait_timeout(gpio_cwr, 1, 10) != 0) {
+ pr_info("%s: Error: timeout waiting for modem ack.\n",
+ __func__);
+ retval = -1;
+ }
+ }
+ return retval;
+}
+
+static void m7400_apdown_handshake(void)
+{
+ /* Signal AP going down to modem - Drive USB_AWR low. */
+ /* No need to wait for a CP response */
+ gpio_set_value(gpio_awr, 0);
+}
+
+static int m7400_l2_suspend(void)
+{
+ return 0;
+}
+
+static int m7400_l2_resume(void)
+{
+ return 0;
+}
+
+static void m7400_l3_suspend(void)
+{
+ m7400_apdown_handshake();
+}
+
+static void m7400_l3_resume(void)
+{
+ m7400_apup_handshake(true);
+}
+
+static irqreturn_t m7400_wake_irq(int irq, void *dev_id)
+{
+ pr_info("%s called.\n", __func__);
+
+ /* Resume usb host activity. */
+ /* TBD */
+
+ return IRQ_HANDLED;
+}
+
+static int m7400_power(int code)
+{
+ switch (code) {
+ case PWRSTATE_L2L3:
+ m7400_l3_suspend();
+ break;
+ case PWRSTATE_L3L0:
+ m7400_l3_resume();
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void m7400_ehci_customize(struct platform_device *pdev)
+{
+ struct tegra_ehci_platform_data *ehci_pdata;
+ struct tegra_uhsic_config *hsic_config;
+
+ ehci_pdata = (struct tegra_ehci_platform_data *)
+ pdev->dev.platform_data;
+ hsic_config = (struct tegra_uhsic_config *)
+ ehci_pdata->phy_config;
+
+ /* Register PHY callbacks */
+ hsic_config->postsuspend = m7400_l2_suspend;
+ hsic_config->preresume = m7400_l2_resume;
+
+ /* Override required settings */
+ ehci_pdata->power_down_on_bus_suspend = 0;
+}
+
+static int m7400_attrib_write(struct device *dev, int value)
+{
+ struct tegra_bb_pdata *pdata;
+ static struct platform_device *ehci_device;
+ static bool first_enum = true;
+
+ if (value > 1 || (!ehci_registered && !value)) {
+ /* Supported values are 0/1. */
+ return -1;
+ }
+
+ pdata = (struct tegra_bb_pdata *) dev->platform_data;
+ if (value) {
+
+ /* Check readiness for enumeration */
+ if (first_enum)
+ first_enum = false;
+ else
+ m7400_enum_handshake();
+
+ /* Register ehci controller */
+ ehci_device = pdata->ehci_register();
+ if (ehci_device == NULL) {
+ pr_info("%s - Error: ehci register failed.\n",
+ __func__);
+ return -1;
+ }
+
+ /* Customize PHY setup/callbacks */
+ m7400_ehci_customize(ehci_device);
+
+ ehci_registered = true;
+ } else {
+ /* Unregister ehci controller */
+ if (ehci_device != NULL)
+ pdata->ehci_unregister(ehci_device);
+
+ /* Signal AP going down */
+ m7400_apdown_handshake();
+ ehci_registered = false;
+ }
+
+ return 0;
+}
+
+static struct tegra_bb_gpio_irqdata m7400_gpioirqs[] = {
+ { GPIO_INVALID, "tegra_bb_wake", m7400_wake_irq,
+ IRQF_TRIGGER_FALLING, NULL },
+ { GPIO_INVALID, NULL, NULL, 0, NULL }, /* End of table */
+};
+
+static struct tegra_bb_power_gdata m7400_gdata = {
+ .gpio = m7400_gpios,
+ .gpioirq = m7400_gpioirqs,
+};
+
+static void *m7400_init(void *pdata)
+{
+ struct tegra_bb_pdata *platdata = (struct tegra_bb_pdata *) pdata;
+ union tegra_bb_gpio_id *id = platdata->id;
+
+ /* Fill the gpio ids allocated by hardware */
+ m7400_gpios[0].data.gpio = id->m7400.pwr_on;
+ m7400_gpios[1].data.gpio = id->m7400.pwr_status;
+ m7400_gpios[2].data.gpio = id->m7400.service;
+ m7400_gpios[3].data.gpio = id->m7400.usb_awr;
+ m7400_gpios[4].data.gpio = id->m7400.usb_cwr;
+ m7400_gpios[5].data.gpio = id->m7400.resout2;
+ m7400_gpioirqs[0].id = id->m7400.usb_cwr;
+
+ if (!platdata->ehci_register || !platdata->ehci_unregister) {
+ pr_info("%s - Error: ehci reg/unreg functions missing.\n"
+ , __func__);
+ return 0;
+ }
+
+ gpio_awr = m7400_gpios[3].data.gpio;
+ gpio_cwr = m7400_gpios[4].data.gpio;
+ if (gpio_awr == GPIO_INVALID || gpio_cwr == GPIO_INVALID) {
+ pr_info("%s - Error: Invalid gpio data.\n", __func__);
+ return 0;
+ }
+
+ ehci_registered = false;
+ return (void *) &m7400_gdata;
+}
+
+static void *m7400_deinit(void)
+{
+ return (void *) &m7400_gdata;
+}
+
+static struct tegra_bb_callback m7400_callbacks = {
+ .init = m7400_init,
+ .deinit = m7400_deinit,
+ .attrib = m7400_attrib_write,
+#ifdef CONFIG_PM
+ .power = m7400_power,
+#endif
+};
+
+void *m7400_get_cblist(void)
+{
+ return (void *) &m7400_callbacks;
+}
diff --git a/drivers/misc/tegra-baseband/bb-power.c b/drivers/misc/tegra-baseband/bb-power.c
new file mode 100644
index 000000000000..225d7667f86f
--- /dev/null
+++ b/drivers/misc/tegra-baseband/bb-power.c
@@ -0,0 +1,273 @@
+/*
+ * drivers/misc/tegra-baseband/bb-power.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <mach/usb_phy.h>
+#include <mach/tegra-bb-power.h>
+#include "bb-power.h"
+
+static struct tegra_bb_callback *callback;
+static int attr_load_val;
+static bb_get_cblist get_cblist[] = {
+ NULL,
+ NULL,
+ NULL,
+ M7400_CB,
+};
+
+static int tegra_bb_power_gpio_init(struct tegra_bb_power_gdata *gdata)
+{
+ int ret;
+ int irq;
+ unsigned gpio_id;
+ const char *gpio_label;
+ unsigned long gpio_flags;
+ struct tegra_bb_gpio_data *gpiolist;
+ struct tegra_bb_gpio_irqdata *gpioirq;
+
+ gpiolist = gdata->gpio;
+ for (; gpiolist->data.gpio != GPIO_INVALID; ++gpiolist) {
+ gpio_id = (gpiolist->data.gpio);
+ gpio_label = (gpiolist->data.label);
+ gpio_flags = (gpiolist->data.flags);
+
+ /* Request the gpio */
+ ret = gpio_request(gpio_id, gpio_label);
+ if (ret) {
+ pr_err("%s: Error: gpio_request for gpio %d failed.\n",
+ __func__, gpio_id);
+ return ret;
+ }
+
+ /* Set gpio direction, as requested */
+ if (gpio_flags == GPIOF_IN)
+ gpio_direction_input(gpio_id);
+ else
+ gpio_direction_output(gpio_id, (!gpio_flags ? 0 : 1));
+
+ /* Enable the gpio */
+ tegra_gpio_enable(gpio_id);
+
+ /* Create a sysfs node, if requested */
+ if (gpiolist->doexport)
+ gpio_export(gpio_id, false);
+ }
+
+ gpioirq = gdata->gpioirq;
+ for (; gpioirq->id != GPIO_INVALID; ++gpioirq) {
+
+ /* Create interrupt handler, if requested */
+ if (gpioirq->handler != NULL) {
+ irq = gpio_to_irq(gpioirq->id);
+ ret = request_threaded_irq(irq, NULL, gpioirq->handler,
+ gpioirq->flags, gpioirq->name, gpioirq->cookie);
+ if (ret < 0) {
+ pr_err("%s: Error: threaded_irq req fail.\n"
+ , __func__);
+ return ret;
+ }
+ ret = enable_irq_wake(irq);
+ if (ret) {
+ pr_err("%s: Error: enable_irq_wake failed.\n",
+ __func__);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int tegra_bb_power_gpio_deinit(struct tegra_bb_power_gdata *gdata)
+{
+ struct tegra_bb_gpio_data *gpiolist;
+ struct tegra_bb_gpio_irqdata *gpioirq;
+
+ gpiolist = gdata->gpio;
+ for (; gpiolist->data.gpio != GPIO_INVALID; ++gpiolist) {
+
+ /* Free the gpio */
+ gpio_free(gpiolist->data.gpio);
+ }
+
+ gpioirq = gdata->gpioirq;
+ for (; gpioirq->id != GPIO_INVALID; ++gpioirq) {
+
+ /* Free the irq */
+ free_irq(gpio_to_irq(gpioirq->id), gpioirq->cookie);
+ }
+ return 0;
+}
+
+static ssize_t tegra_bb_attr_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int val;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (callback && callback->attrib) {
+ if (!callback->attrib(dev, val))
+ attr_load_val = val;
+ }
+ return count;
+}
+
+static ssize_t tegra_bb_attr_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", attr_load_val);
+}
+
+static DEVICE_ATTR(load, S_IRUSR | S_IWUSR | S_IRGRP,
+ tegra_bb_attr_read, tegra_bb_attr_write);
+
+static int tegra_bb_power_probe(struct platform_device *device)
+{
+ struct device *dev = &device->dev;
+ struct tegra_bb_pdata *pdata;
+ struct tegra_bb_power_gdata *gdata;
+ int err;
+ unsigned int bb_id;
+
+ pdata = (struct tegra_bb_pdata *) dev->platform_data;
+ if (!pdata) {
+ pr_err("%s - Error: platform data is empty.\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Obtain BB specific callback list */
+ bb_id = pdata->bb_id;
+ if (get_cblist[bb_id] != NULL) {
+ callback = (struct tegra_bb_callback *) get_cblist[bb_id]();
+ if (callback && callback->init) {
+ gdata = (struct tegra_bb_power_gdata *)
+ callback->init((void *)pdata);
+
+ if (!gdata) {
+ pr_err("%s - Error: Gpio data is empty.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ /* Initialize gpio as required */
+ tegra_bb_power_gpio_init(gdata);
+ } else {
+ pr_err("%s - Error: init callback is empty.\n",
+ __func__);
+ return -ENODEV;
+ }
+ } else {
+ pr_err("%s - Error: callback data is empty.\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Create the control sysfs node */
+ err = device_create_file(dev, &dev_attr_load);
+ if (err < 0) {
+ pr_err("%s - Error: device_create_file failed.\n", __func__);
+ return -ENODEV;
+ }
+ attr_load_val = 0;
+
+ return 0;
+}
+
+static int tegra_bb_power_remove(struct platform_device *device)
+{
+ struct device *dev = &device->dev;
+ struct tegra_bb_power_gdata *gdata;
+
+ /* BB specific callback */
+ if (callback && callback->deinit) {
+ gdata = (struct tegra_bb_power_gdata *)
+ callback->deinit();
+
+ /* Deinitialize gpios */
+ if (gdata)
+ tegra_bb_power_gpio_deinit(gdata);
+ else {
+ pr_err("%s - Error: Gpio data is empty.\n", __func__);
+ return -ENODEV;
+ }
+ }
+
+ /* Remove the control sysfs node */
+ device_remove_file(dev, &dev_attr_load);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_bb_power_suspend(struct platform_device *device,
+ pm_message_t state)
+{
+ /* BB specific callback */
+ if (callback && callback->power)
+ callback->power(PWRSTATE_L2L3);
+ return 0;
+}
+
+static int tegra_bb_power_resume(struct platform_device *device)
+{
+ /* BB specific callback */
+ if (callback && callback->power)
+ callback->power(PWRSTATE_L3L0);
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_bb_power_driver = {
+ .probe = tegra_bb_power_probe,
+ .remove = tegra_bb_power_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_bb_power_suspend,
+ .resume = tegra_bb_power_resume,
+#endif
+ .driver = {
+ .name = "tegra_baseband_power",
+ },
+};
+
+static int __init tegra_baseband_power_init(void)
+{
+ pr_debug("%s\n", __func__);
+ return platform_driver_register(&tegra_bb_power_driver);
+}
+
+static void __exit tegra_baseband_power_exit(void)
+{
+ pr_debug("%s\n", __func__);
+ platform_driver_unregister(&tegra_bb_power_driver);
+}
+
+module_init(tegra_baseband_power_init)
+module_exit(tegra_baseband_power_exit)
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_DESCRIPTION("Tegra modem power management driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/tegra-baseband/bb-power.h b/drivers/misc/tegra-baseband/bb-power.h
new file mode 100644
index 000000000000..4f85cca712e6
--- /dev/null
+++ b/drivers/misc/tegra-baseband/bb-power.h
@@ -0,0 +1,60 @@
+/*
+ * drivers/misc/tegra-baseband/bb-power.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+enum tegra_bb_pwrstate {
+ PWRSTATE_L2L3,
+ PWRSTATE_L3L0,
+ PWRSTATE_INVALID,
+};
+
+struct tegra_bb_gpio_data {
+ struct gpio data;
+ bool doexport;
+};
+
+struct tegra_bb_gpio_irqdata {
+ int id;
+ const char *name;
+ irq_handler_t handler;
+ int flags;
+ void *cookie;
+};
+
+struct tegra_bb_power_gdata {
+ struct tegra_bb_gpio_data *gpio;
+ struct tegra_bb_gpio_irqdata *gpioirq;
+};
+
+typedef void* (*bb_get_cblist)(void);
+typedef void* (*bb_init_cb)(void *pdata);
+typedef void* (*bb_deinit_cb)(void);
+typedef int (*bb_power_cb)(int code);
+typedef int (*bb_attrib_cb)(struct device *dev, int value);
+
+struct tegra_bb_callback {
+ bb_init_cb init;
+ bb_deinit_cb deinit;
+ bb_power_cb power;
+ bb_attrib_cb attrib;
+ bool valid;
+};
+
+#ifdef CONFIG_TEGRA_BB_M7400
+extern void *m7400_get_cblist(void);
+#define M7400_CB m7400_get_cblist
+#else
+#define M7400_CB NULL
+#endif
diff --git a/drivers/misc/tegra-cryptodev.c b/drivers/misc/tegra-cryptodev.c
new file mode 100644
index 000000000000..d5ed6a22ddac
--- /dev/null
+++ b/drivers/misc/tegra-cryptodev.c
@@ -0,0 +1,349 @@
+/*
+ * drivers/misc/tegra-cryptodev.c
+ *
+ * crypto dev node for NVIDIA tegra aes hardware
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/uaccess.h>
+#include <crypto/rng.h>
+
+#include "tegra-cryptodev.h"
+
+#define NBUFS 2
+
+struct tegra_crypto_ctx {
+ struct crypto_ablkcipher *ecb_tfm;
+ struct crypto_ablkcipher *cbc_tfm;
+ struct crypto_rng *rng;
+ u8 seed[TEGRA_CRYPTO_RNG_SEED_SIZE];
+ int use_ssk;
+};
+
+struct tegra_crypto_completion {
+ struct completion restart;
+ int req_err;
+};
+
+static int alloc_bufs(unsigned long *buf[NBUFS])
+{
+ int i;
+
+ for (i = 0; i < NBUFS; i++) {
+ buf[i] = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf[i])
+ goto err_free_buf;
+ }
+
+ return 0;
+
+err_free_buf:
+ while (i-- > 0)
+ free_page((unsigned long)buf[i]);
+
+ return -ENOMEM;
+}
+
+static void free_bufs(unsigned long *buf[NBUFS])
+{
+ int i;
+
+ for (i = 0; i < NBUFS; i++)
+ free_page((unsigned long)buf[i]);
+}
+
+static int tegra_crypto_dev_open(struct inode *inode, struct file *filp)
+{
+ struct tegra_crypto_ctx *ctx;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(struct tegra_crypto_ctx), GFP_KERNEL);
+ if (!ctx) {
+ pr_err("no memory for context\n");
+ return -ENOMEM;
+ }
+
+ ctx->ecb_tfm = crypto_alloc_ablkcipher("ecb-aes-tegra",
+ CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
+ if (IS_ERR(ctx->ecb_tfm)) {
+ pr_err("Failed to load transform for ecb-aes-tegra: %ld\n",
+ PTR_ERR(ctx->ecb_tfm));
+ ret = PTR_ERR(ctx->ecb_tfm);
+ goto fail_ecb;
+ }
+
+ ctx->cbc_tfm = crypto_alloc_ablkcipher("cbc-aes-tegra",
+ CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
+ if (IS_ERR(ctx->cbc_tfm)) {
+ pr_err("Failed to load transform for cbc-aes-tegra: %ld\n",
+ PTR_ERR(ctx->cbc_tfm));
+ ret = PTR_ERR(ctx->cbc_tfm);
+ goto fail_cbc;
+ }
+
+ ctx->rng = crypto_alloc_rng("rng-aes-tegra", CRYPTO_ALG_TYPE_RNG, 0);
+ if (IS_ERR(ctx->rng)) {
+ pr_err("Failed to load transform for tegra rng: %ld\n",
+ PTR_ERR(ctx->rng));
+ ret = PTR_ERR(ctx->rng);
+ goto fail_rng;
+ }
+
+ filp->private_data = ctx;
+ return ret;
+
+fail_rng:
+ crypto_free_ablkcipher(ctx->cbc_tfm);
+
+fail_cbc:
+ crypto_free_ablkcipher(ctx->ecb_tfm);
+
+fail_ecb:
+ kfree(ctx);
+ return ret;
+}
+
+static int tegra_crypto_dev_release(struct inode *inode, struct file *filp)
+{
+ struct tegra_crypto_ctx *ctx = filp->private_data;
+
+ crypto_free_ablkcipher(ctx->ecb_tfm);
+ crypto_free_ablkcipher(ctx->cbc_tfm);
+ crypto_free_rng(ctx->rng);
+ kfree(ctx);
+ filp->private_data = NULL;
+ return 0;
+}
+
+static void tegra_crypt_complete(struct crypto_async_request *req, int err)
+{
+ struct tegra_crypto_completion *done = req->data;
+
+ if (err != -EINPROGRESS) {
+ done->req_err = err;
+ complete(&done->restart);
+ }
+}
+
+static int process_crypt_req(struct tegra_crypto_ctx *ctx, struct tegra_crypt_req *crypt_req)
+{
+ struct crypto_ablkcipher *tfm;
+ struct ablkcipher_request *req = NULL;
+ struct scatterlist in_sg;
+ struct scatterlist out_sg;
+ unsigned long *xbuf[NBUFS];
+ int ret = 0, size = 0;
+ unsigned long total = 0;
+ struct tegra_crypto_completion tcrypt_complete;
+ const u8 *key = NULL;
+
+ if (crypt_req->op & TEGRA_CRYPTO_ECB) {
+ req = ablkcipher_request_alloc(ctx->ecb_tfm, GFP_KERNEL);
+ tfm = ctx->ecb_tfm;
+ } else {
+ req = ablkcipher_request_alloc(ctx->cbc_tfm, GFP_KERNEL);
+ tfm = ctx->cbc_tfm;
+ }
+ if (!req) {
+ pr_err("%s: Failed to allocate request\n", __func__);
+ return -ENOMEM;
+ }
+
+ if ((crypt_req->keylen < 0) || (crypt_req->keylen > AES_MAX_KEY_SIZE))
+ return -EINVAL;
+
+ crypto_ablkcipher_clear_flags(tfm, ~0);
+
+ if (!ctx->use_ssk)
+ key = crypt_req->key;
+
+ ret = crypto_ablkcipher_setkey(tfm, key, crypt_req->keylen);
+ if (ret < 0) {
+ pr_err("setkey failed");
+ goto process_req_out;
+ }
+
+ ret = alloc_bufs(xbuf);
+ if (ret < 0) {
+ pr_err("alloc_bufs failed");
+ goto process_req_out;
+ }
+
+ init_completion(&tcrypt_complete.restart);
+
+ ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tegra_crypt_complete, &tcrypt_complete);
+
+ total = crypt_req->plaintext_sz;
+ while (total > 0) {
+ size = min(total, PAGE_SIZE);
+ ret = copy_from_user((void *)xbuf[0],
+ (void __user *)crypt_req->plaintext, size);
+ if (ret < 0) {
+ pr_debug("%s: copy_from_user failed (%d)\n", __func__, ret);
+ goto process_req_buf_out;
+ }
+ sg_init_one(&in_sg, xbuf[0], size);
+ sg_init_one(&out_sg, xbuf[1], size);
+
+ ablkcipher_request_set_crypt(req, &in_sg,
+ &out_sg, size, crypt_req->iv);
+
+ INIT_COMPLETION(tcrypt_complete.restart);
+ tcrypt_complete.req_err = 0;
+ ret = crypt_req->encrypt ?
+ crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
+
+ if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
+ /* crypto driver is asynchronous */
+ ret = wait_for_completion_interruptible(&tcrypt_complete.restart);
+
+ if (ret < 0)
+ goto process_req_buf_out;
+
+ if (tcrypt_complete.req_err < 0) {
+ ret = tcrypt_complete.req_err;
+ goto process_req_buf_out;
+ }
+ } else if (ret < 0) {
+ pr_debug("%scrypt failed (%d)\n",
+ crypt_req->encrypt ? "en" : "de", ret);
+ goto process_req_buf_out;
+ }
+
+ ret = copy_to_user((void __user *)crypt_req->result,
+ (const void *)xbuf[1], size);
+ if (ret < 0)
+ goto process_req_buf_out;
+
+ total -= size;
+ crypt_req->result += size;
+ crypt_req->plaintext += size;
+ }
+
+process_req_buf_out:
+ free_bufs(xbuf);
+process_req_out:
+ ablkcipher_request_free(req);
+
+ return ret;
+}
+
+static long tegra_crypto_dev_ioctl(struct file *filp,
+ unsigned int ioctl_num, unsigned long arg)
+{
+ struct tegra_crypto_ctx *ctx = filp->private_data;
+ struct tegra_crypt_req crypt_req;
+ struct tegra_rng_req rng_req;
+ char *rng;
+ int ret = 0;
+
+ switch (ioctl_num) {
+ case TEGRA_CRYPTO_IOCTL_NEED_SSK:
+ ctx->use_ssk = (int)arg;
+ break;
+
+ case TEGRA_CRYPTO_IOCTL_PROCESS_REQ:
+ ret = copy_from_user(&crypt_req, (void __user *)arg, sizeof(crypt_req));
+ if (ret < 0) {
+ pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
+ break;
+ }
+
+ ret = process_crypt_req(ctx, &crypt_req);
+ break;
+
+ case TEGRA_CRYPTO_IOCTL_SET_SEED:
+ if (copy_from_user(&rng_req, (void __user *)arg, sizeof(rng_req)))
+ return -EFAULT;
+
+ memcpy(ctx->seed, rng_req.seed, TEGRA_CRYPTO_RNG_SEED_SIZE);
+
+ ret = crypto_rng_reset(ctx->rng, ctx->seed,
+ crypto_rng_seedsize(ctx->rng));
+ break;
+
+ case TEGRA_CRYPTO_IOCTL_GET_RANDOM:
+ if (copy_from_user(&rng_req, (void __user *)arg, sizeof(rng_req)))
+ return -EFAULT;
+
+ rng = kzalloc(rng_req.nbytes, GFP_KERNEL);
+ if (!rng) {
+ pr_err("mem alloc for rng fail");
+ ret = -ENODATA;
+ goto rng_out;
+ }
+
+ ret = crypto_rng_get_bytes(ctx->rng, rng,
+ rng_req.nbytes);
+
+ if (ret != rng_req.nbytes) {
+ pr_err("rng failed");
+ ret = -ENODATA;
+ goto rng_out;
+ }
+
+ ret = copy_to_user((void __user *)rng_req.rdata,
+ (const void *)rng, rng_req.nbytes);
+ ret = (ret < 0) ? -ENODATA : 0;
+rng_out:
+ if (rng)
+ kfree(rng);
+ break;
+
+ default:
+ pr_debug("invalid ioctl code(%d)", ioctl_num);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+struct file_operations tegra_crypto_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_crypto_dev_open,
+ .release = tegra_crypto_dev_release,
+ .unlocked_ioctl = tegra_crypto_dev_ioctl,
+};
+
+struct miscdevice tegra_crypto_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra-crypto",
+ .fops = &tegra_crypto_fops,
+};
+
+static int __init tegra_crypto_dev_init(void)
+{
+ return misc_register(&tegra_crypto_device);
+}
+
+late_initcall(tegra_crypto_dev_init);
+
+MODULE_DESCRIPTION("Tegra AES hw device node.");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/misc/tegra-cryptodev.h b/drivers/misc/tegra-cryptodev.h
new file mode 100644
index 000000000000..ed62a52eca03
--- /dev/null
+++ b/drivers/misc/tegra-cryptodev.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __TEGRA_CRYPTODEV_H
+#define __TEGRA_CRYPTODEV_H
+
+#include <crypto/aes.h>
+
+#include <asm-generic/ioctl.h>
+
+/* ioctl arg = 1 if you want to use ssk. arg = 0 to use normal key */
+#define TEGRA_CRYPTO_IOCTL_NEED_SSK _IOWR(0x98, 100, int)
+#define TEGRA_CRYPTO_IOCTL_PROCESS_REQ _IOWR(0x98, 101, int*)
+#define TEGRA_CRYPTO_IOCTL_SET_SEED _IOWR(0x98, 102, int*)
+#define TEGRA_CRYPTO_IOCTL_GET_RANDOM _IOWR(0x98, 103, int*)
+
+#define TEGRA_CRYPTO_MAX_KEY_SIZE AES_MAX_KEY_SIZE
+#define TEGRA_CRYPTO_IV_SIZE AES_BLOCK_SIZE
+#define DEFAULT_RNG_BLK_SZ 16
+
+/* the seed consists of 16 bytes of key + 16 bytes of init vector */
+#define TEGRA_CRYPTO_RNG_SEED_SIZE AES_KEYSIZE_128 + DEFAULT_RNG_BLK_SZ
+#define TEGRA_CRYPTO_RNG_SIZE SZ_16
+
+/* encrypt/decrypt operations */
+#define TEGRA_CRYPTO_ECB BIT(0)
+#define TEGRA_CRYPTO_CBC BIT(1)
+#define TEGRA_CRYPTO_RNG BIT(2)
+
+/* a pointer to this struct needs to be passed to:
+ * TEGRA_CRYPTO_IOCTL_PROCESS_REQ
+ */
+struct tegra_crypt_req {
+ int op; /* e.g. TEGRA_CRYPTO_ECB */
+ bool encrypt;
+ char key[TEGRA_CRYPTO_MAX_KEY_SIZE];
+ int keylen;
+ char iv[TEGRA_CRYPTO_IV_SIZE];
+ int ivlen;
+ u8 *plaintext;
+ int plaintext_sz;
+ u8 *result;
+};
+
+/* pointer to this struct should be passed to:
+ * TEGRA_CRYPTO_IOCTL_SET_SEED
+ * TEGRA_CRYPTO_IOCTL_GET_RANDOM
+ */
+struct tegra_rng_req {
+ u8 seed[TEGRA_CRYPTO_RNG_SEED_SIZE];
+ u8 *rdata; /* random generated data */
+ int nbytes; /* random data length */
+};
+
+#endif
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 000000000000..2141124a6c12
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,156 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+ struct list_head link;
+ uid_t uid;
+ atomic_t tcp_rcv;
+ atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+ unsigned long flags;
+ struct uid_stat *entry;
+
+ spin_lock_irqsave(&uid_lock, flags);
+ list_for_each_entry(entry, &uid_list, link) {
+ if (entry->uid == uid) {
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return entry;
+ }
+ }
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return NULL;
+}
+
+static int tcp_snd_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int bytes;
+ char *p = page;
+ struct uid_stat *uid_entry = (struct uid_stat *) data;
+ if (!data)
+ return 0;
+
+ bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN);
+ p += sprintf(p, "%u\n", bytes);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int tcp_rcv_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int bytes;
+ char *p = page;
+ struct uid_stat *uid_entry = (struct uid_stat *) data;
+ if (!data)
+ return 0;
+
+ bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN);
+ p += sprintf(p, "%u\n", bytes);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+ unsigned long flags;
+ char uid_s[32];
+ struct uid_stat *new_uid;
+ struct proc_dir_entry *entry;
+
+ /* Create the uid stat struct and append it to the list. */
+ if ((new_uid = kmalloc(sizeof(struct uid_stat), GFP_KERNEL)) == NULL)
+ return NULL;
+
+ new_uid->uid = uid;
+ /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+ atomic_set(&new_uid->tcp_rcv, INT_MIN);
+ atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+ spin_lock_irqsave(&uid_lock, flags);
+ list_add_tail(&new_uid->link, &uid_list);
+ spin_unlock_irqrestore(&uid_lock, flags);
+
+ sprintf(uid_s, "%d", uid);
+ entry = proc_mkdir(uid_s, parent);
+
+ /* Keep reference to uid_stat so we know what uid to read stats from. */
+ create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc,
+ (void *) new_uid);
+
+ create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc,
+ (void *) new_uid);
+
+ return new_uid;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ if ((entry = find_uid_stat(uid)) == NULL &&
+ ((entry = create_stat(uid)) == NULL)) {
+ return -1;
+ }
+ atomic_add(size, &entry->tcp_snd);
+ return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ if ((entry = find_uid_stat(uid)) == NULL &&
+ ((entry = create_stat(uid)) == NULL)) {
+ return -1;
+ }
+ atomic_add(size, &entry->tcp_rcv);
+ return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+ parent = proc_mkdir("uid_stat", NULL);
+ if (!parent) {
+ pr_err("uid_stat: failed to create proc entry\n");
+ return -1;
+ }
+ return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/misc/wl127x-rfkill.c b/drivers/misc/wl127x-rfkill.c
new file mode 100644
index 000000000000..f5b95152948b
--- /dev/null
+++ b/drivers/misc/wl127x-rfkill.c
@@ -0,0 +1,121 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/wl127x-rfkill.h>
+
+static int wl127x_rfkill_set_power(void *data, enum rfkill_state state)
+{
+ int nshutdown_gpio = (int) data;
+
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ gpio_set_value(nshutdown_gpio, 1);
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ gpio_set_value(nshutdown_gpio, 0);
+ break;
+ default:
+ printk(KERN_ERR "invalid bluetooth rfkill state %d\n", state);
+ }
+ return 0;
+}
+
+static int wl127x_rfkill_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+ enum rfkill_state default_state = RFKILL_STATE_SOFT_BLOCKED; /* off */
+
+ rc = gpio_request(pdata->nshutdown_gpio, "wl127x_nshutdown_gpio");
+ if (unlikely(rc))
+ return rc;
+
+ rc = gpio_direction_output(pdata->nshutdown_gpio, 0);
+ if (unlikely(rc))
+ return rc;
+
+ rfkill_set_default(RFKILL_TYPE_BLUETOOTH, default_state);
+ wl127x_rfkill_set_power(NULL, default_state);
+
+ pdata->rfkill = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH);
+ if (unlikely(!pdata->rfkill))
+ return -ENOMEM;
+
+ pdata->rfkill->name = "wl127x";
+ pdata->rfkill->state = default_state;
+ /* userspace cannot take exclusive control */
+ pdata->rfkill->user_claim_unsupported = 1;
+ pdata->rfkill->user_claim = 0;
+ pdata->rfkill->data = (void *) pdata->nshutdown_gpio;
+ pdata->rfkill->toggle_radio = wl127x_rfkill_set_power;
+
+ rc = rfkill_register(pdata->rfkill);
+
+ if (unlikely(rc))
+ rfkill_free(pdata->rfkill);
+
+ return 0;
+}
+
+static int wl127x_rfkill_remove(struct platform_device *pdev)
+{
+ struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+
+ rfkill_unregister(pdata->rfkill);
+ rfkill_free(pdata->rfkill);
+ gpio_free(pdata->nshutdown_gpio);
+
+ return 0;
+}
+
+static struct platform_driver wl127x_rfkill_platform_driver = {
+ .probe = wl127x_rfkill_probe,
+ .remove = wl127x_rfkill_remove,
+ .driver = {
+ .name = "wl127x-rfkill",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init wl127x_rfkill_init(void)
+{
+ return platform_driver_register(&wl127x_rfkill_platform_driver);
+}
+
+static void __exit wl127x_rfkill_exit(void)
+{
+ platform_driver_unregister(&wl127x_rfkill_platform_driver);
+}
+
+module_init(wl127x_rfkill_init);
+module_exit(wl127x_rfkill_exit);
+
+MODULE_ALIAS("platform:wl127x");
+MODULE_DESCRIPTION("wl127x-rfkill");
+MODULE_AUTHOR("Motorola");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783bf924..ebb4afe6c702 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE
If unsure, say Y here.
+config MMC_BLOCK_DEFERRED_RESUME
+ bool "Deferr MMC layer resume until I/O is requested"
+ depends on MMC_BLOCK
+ default n
+ help
+ Say Y here to enable deferred MMC resume until I/O
+ is requested. This will reduce overall resume latency and
+ save power when theres an SD card inserted but not being used.
+
config SDIO_UART
tristate "SDIO UART/GPS class support"
help
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4c1a648d00fc..6db913d6342f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,8 @@ MODULE_ALIAS("mmc:block");
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
+#define MMC_CMD_RETRIES 10
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -136,11 +138,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
static inline int mmc_get_devidx(struct gendisk *disk)
{
- int devmaj = MAJOR(disk_devt(disk));
- int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = disk->first_minor / perdev_minors;
+ int devidx = disk->first_minor / perdev_minors;
return devidx;
}
@@ -575,18 +573,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
req->rq_disk->disk_name, "timed out", name, status);
/* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid)
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
return ERR_RETRY;
-
+ }
/*
* If it was a r/w cmd crc error, or illegal command
* (eg, issued in wrong state) then retry - we should
* have corrected the state problem above.
*/
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/* Otherwise abort the command */
+ pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
return ERR_ABORT;
default:
@@ -943,6 +945,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
if (!mmc_card_blockaddr(card))
brq->cmd.arg <<= 9;
brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->cmd.retries = MMC_CMD_RETRIES;
brq->data.blksz = 512;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -1178,12 +1181,22 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 0;
}
+static int
+mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
+
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host)) {
+ mmc_resume_bus(card->host);
+ mmc_blk_set_blksize(md, card);
+ }
+#endif
+
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
mmc_claim_host(card->host);
@@ -1288,6 +1301,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
+ md->disk->flags = GENHD_FL_EXT_DEVT;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -1527,6 +1541,9 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_set_drvdata(card, md);
mmc_fixup_device(card, blk_fixups);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
if (mmc_add_disk(md))
goto out;
@@ -1552,6 +1569,9 @@ static void mmc_blk_remove(struct mmc_card *card)
mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
#ifdef CONFIG_PM
@@ -1575,7 +1595,9 @@ static int mmc_blk_resume(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
+#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
mmc_blk_set_blksize(md, card);
+#endif
/*
* Resume involves the card going into idle state,
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef103871517f..85c2e1acd156 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -27,3 +27,20 @@ config MMC_CLKGATE
support handling this in order for it to be of any use.
If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+ boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ If you say Y here, support will be added for embedded SDIO
+ devices which do not contain the necessary enumeration
+ support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+ bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ If you say Y here, the MMC layer will be extra paranoid
+ about re-trying SD init requests. This can be a useful
+ work-around for buggy controllers and hardware. Enable
+ if you are experiencing issues with SD detection.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d637982b0352..344d2414f05c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -24,6 +24,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
+#include <linux/wakelock.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -107,6 +108,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
cmd->retries--;
cmd->error = 0;
+ if (mrq->data) {
+ mrq->data->error = 0;
+ if (mrq->stop)
+ mrq->stop->error = 0;
+ }
host->ops->request(host, mrq);
} else {
led_trigger_event(host->led, LED_OFF);
@@ -1215,6 +1221,36 @@ static inline void mmc_bus_put(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
+int mmc_resume_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ if (!mmc_bus_needs_resume(host))
+ return -EINVAL;
+
+ printk("%s: Starting deferred resume\n", mmc_hostname(host));
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+ host->rescan_disable = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead) {
+ mmc_power_up(host);
+ BUG_ON(!host->bus_ops->resume);
+ host->bus_ops->resume(host);
+ }
+
+ if (host->bus_ops->detect && !host->bus_dead)
+ host->bus_ops->detect(host);
+
+ mmc_bus_put(host);
+ printk("%s: Deferred resume completed\n", mmc_hostname(host));
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
@@ -1280,6 +1316,7 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
spin_unlock_irqrestore(&host->lock, flags);
#endif
+ wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -1762,6 +1799,7 @@ void mmc_rescan(struct work_struct *work)
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
+ bool extend_wakelock = false;
if (host->rescan_disable)
return;
@@ -1776,6 +1814,12 @@ void mmc_rescan(struct work_struct *work)
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
+ /* If the card was removed the bus will be marked
+ * as dead - extend the wakelock so userspace
+ * can respond */
+ if (host->bus_dead)
+ extend_wakelock = 1;
+
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
@@ -1800,16 +1844,24 @@ void mmc_rescan(struct work_struct *work)
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
+ extend_wakelock = true;
break;
+ }
if (freqs[i] <= host->f_min)
break;
}
mmc_release_host(host);
out:
- if (host->caps & MMC_CAP_NEEDS_POLL)
+ if (extend_wakelock)
+ wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
+ else
+ wake_unlock(&host->detect_wake_lock);
+ if (host->caps & MMC_CAP_NEEDS_POLL) {
+ wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, HZ);
+ }
}
void mmc_start_host(struct mmc_host *host)
@@ -1829,7 +1881,8 @@ void mmc_stop_host(struct mmc_host *host)
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
- cancel_delayed_work_sync(&host->detect);
+ if (cancel_delayed_work_sync(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
@@ -1954,9 +2007,13 @@ int mmc_suspend_host(struct mmc_host *host)
{
int err = 0;
+ if (mmc_bus_needs_resume(host))
+ return 0;
+
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
- cancel_delayed_work(&host->detect);
+ if (cancel_delayed_work(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
mmc_bus_get(host);
@@ -1997,6 +2054,12 @@ int mmc_resume_host(struct mmc_host *host)
int err = 0;
mmc_bus_get(host);
+ if (mmc_bus_manual_resume(host)) {
+ host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+ mmc_bus_put(host);
+ return 0;
+ }
+
if (host->bus_ops && !host->bus_dead) {
if (!mmc_card_keep_power(host)) {
mmc_power_up(host);
@@ -2047,9 +2110,14 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_SUSPEND_PREPARE:
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_needs_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
- cancel_delayed_work_sync(&host->detect);
+ if (cancel_delayed_work_sync(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
if (!host->bus_ops || host->bus_ops->suspend)
break;
@@ -2070,6 +2138,10 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_manual_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
mmc_detect_change(host, 0);
@@ -2080,6 +2152,22 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
#endif
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs)
+{
+ host->embedded_sdio_data.cis = cis;
+ host->embedded_sdio_data.cccr = cccr;
+ host->embedded_sdio_data.funcs = funcs;
+ host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 793d0a0dad8d..e09f0a7eb652 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -284,6 +284,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
+ wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
+ kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
#ifdef CONFIG_PM
@@ -336,7 +338,8 @@ int mmc_add_host(struct mmc_host *host)
#endif
mmc_start_host(host);
- register_pm_notifier(&host->pm_notify);
+ if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+ register_pm_notifier(&host->pm_notify);
return 0;
}
@@ -353,7 +356,9 @@ EXPORT_SYMBOL(mmc_add_host);
*/
void mmc_remove_host(struct mmc_host *host)
{
- unregister_pm_notifier(&host->pm_notify);
+ if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+ unregister_pm_notifier(&host->pm_notify);
+
mmc_stop_host(host);
#ifdef CONFIG_DEBUG_FS
@@ -380,6 +385,7 @@ void mmc_free_host(struct mmc_host *host)
spin_lock(&mmc_host_lock);
idr_remove(&mmc_host_idr, host->index);
spin_unlock(&mmc_host_lock);
+ wake_lock_destroy(&host->detect_wake_lock);
put_device(&host->class_dev);
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4c281a4bf058..92bd3737c4b0 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -348,11 +348,11 @@ static int mmc_read_switch(struct mmc_card *card)
}
card->sw_caps.sd3_curr_limit = status[7];
- } else {
- if (status[13] & 0x02)
- card->sw_caps.hs_max_dtr = 50000000;
}
+ if (status[13] & 0x02)
+ card->sw_caps.hs_max_dtr = 50000000;
+
out:
kfree(status);
@@ -799,6 +799,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit)
{
int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
if (!reinit) {
/*
@@ -825,7 +828,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/*
* Fetch switch information from card.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ for (retries = 1; retries <= 3; retries++) {
+ err = mmc_read_switch(card);
+ if (!err) {
+ if (retries > 1) {
+ printk(KERN_WARNING
+ "%s: recovered\n",
+ mmc_hostname(host));
+ }
+ break;
+ } else {
+ printk(KERN_WARNING
+ "%s: read switch failed (attempt %d)\n",
+ mmc_hostname(host), retries);
+ }
+ }
+#else
err = mmc_read_switch(card);
+#endif
+
if (err)
return err;
}
@@ -1024,18 +1046,36 @@ static void mmc_sd_remove(struct mmc_host *host)
*/
static void mmc_sd_detect(struct mmc_host *host)
{
- int err;
+ int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries = 5;
+#endif
BUG_ON(!host);
BUG_ON(!host->card);
-
+
mmc_claim_host(host);
/*
* Just check if our card has been removed.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ while(retries) {
+ err = mmc_send_status(host->card, NULL);
+ if (err) {
+ retries--;
+ udelay(5);
+ continue;
+ }
+ break;
+ }
+ if (!retries) {
+ printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+ __func__, mmc_hostname(host), err);
+ }
+#else
err = mmc_send_status(host->card, NULL);
-
+#endif
mmc_release_host(host);
if (err) {
@@ -1074,12 +1114,31 @@ static int mmc_sd_suspend(struct mmc_host *host)
static int mmc_sd_resume(struct mmc_host *host)
{
int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ retries = 5;
+ while (retries) {
+ err = mmc_sd_init_card(host, host->ocr, host->card);
+
+ if (err) {
+ printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+ mmc_hostname(host), err, retries);
+ mdelay(5);
+ retries--;
+ continue;
+ }
+ break;
+ }
+#else
err = mmc_sd_init_card(host, host->ocr, host->card);
+#endif
mmc_release_host(host);
return err;
@@ -1131,6 +1190,9 @@ int mmc_attach_sd(struct mmc_host *host)
{
int err;
u32 ocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -1195,9 +1257,27 @@ int mmc_attach_sd(struct mmc_host *host)
/*
* Detect and init the card.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ retries = 5;
+ while (retries) {
+ err = mmc_sd_init_card(host, host->ocr, NULL);
+ if (err) {
+ retries--;
+ continue;
+ }
+ break;
+ }
+
+ if (!retries) {
+ printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+ mmc_hostname(host), err);
+ goto err;
+ }
+#else
err = mmc_sd_init_card(host, host->ocr, NULL);
if (err)
goto err;
+#endif
mmc_release_host(host);
err = mmc_add_card(host->card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index ac492ac974e1..a2c1c4d83718 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -27,6 +27,10 @@
#include "sdio_ops.h"
#include "sdio_cis.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
@@ -449,19 +453,35 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
goto finish;
}
- /*
- * Read the common registers.
- */
- err = sdio_read_cccr(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cccr)
+ memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+ else {
+#endif
+ /*
+ * Read the common registers.
+ */
+ err = sdio_read_cccr(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
- /*
- * Read the common CIS tuples.
- */
- err = sdio_read_common_cis(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cis)
+ memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+ else {
+#endif
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -827,14 +847,36 @@ int mmc_attach_sdio(struct mmc_host *host)
funcs = (ocr & 0x70000000) >> 28;
card->sdio_funcs = 0;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs)
+ card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
/*
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
- err = sdio_init_func(host->card, i + 1);
- if (err)
- goto remove;
-
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs) {
+ struct sdio_func *tmp;
+
+ tmp = sdio_alloc_func(host->card);
+ if (IS_ERR(tmp))
+ goto remove;
+ tmp->num = (i + 1);
+ card->sdio_func[i] = tmp;
+ tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+ tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+ tmp->vendor = card->cis.vendor;
+ tmp->device = card->cis.device;
+ } else {
+#endif
+ err = sdio_init_func(host->card, i + 1);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
/*
* Enable Runtime PM for this func (if supported)
*/
@@ -882,3 +924,77 @@ err:
return err;
}
+int sdio_reset_comm(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 ocr;
+ int err;
+
+ printk("%s():\n", __func__);
+ mmc_claim_host(host);
+
+ mmc_go_idle(host);
+
+ mmc_set_clock(host, host->f_min);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ goto err;
+
+ host->ocr = mmc_select_voltage(host, ocr);
+ if (!host->ocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ if (err)
+ goto err;
+
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto err;
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto err;
+
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ else if (err)
+ goto err;
+
+ mmc_release_host(host);
+ return 0;
+err:
+ printk("%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host), err);
+ mmc_release_host(host);
+ return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index e4e6822d09e3..ca58c307a129 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -23,6 +23,10 @@
#include "sdio_cis.h"
#include "sdio_bus.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
/* show configuration fields */
#define sdio_config_attr(field, format_string) \
static ssize_t \
@@ -256,7 +260,14 @@ static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ /*
+ * If this device is embedded then we never allocated
+ * cis tables for this func
+ */
+ if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+ sdio_free_func_cis(func);
if (func->info)
kfree(func->info);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 0f687cdeb064..549a34144646 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -383,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
EXPORT_SYMBOL_GPL(sdio_readb);
/**
+ * sdio_readb_ext - read a single byte from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ * @in: value to add to argument
+ *
+ * Reads a single byte from the address space of a given SDIO
+ * function. If there is a problem reading the address, 0xff
+ * is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+ int *err_ret, unsigned in)
+{
+ int ret;
+ unsigned char val;
+
+ BUG_ON(!func);
+
+ if (err_ret)
+ *err_ret = 0;
+
+ ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+ if (ret) {
+ if (err_ret)
+ *err_ret = ret;
+ return 0xFF;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
+/**
* sdio_writeb - write a single byte to a SDIO function
* @func: SDIO function to access
* @b: byte to write
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index b4b83f302e32..f5ea51bd0ed3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for MMC/SD host controller drivers
#
+GCOV_PROFILE_sdhci-tegra.o := y
obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 6414efeddca0..1179f1be4318 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -196,16 +196,42 @@ EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
{
struct sdhci_host *host = platform_get_drvdata(dev);
+ int ret;
+
+ ret = sdhci_suspend_host(host, state);
+ if (ret) {
+ dev_err(&dev->dev, "suspend failed, error = %d\n", ret);
+ return ret;
+ }
- return sdhci_suspend_host(host, state);
+ if (host->ops && host->ops->suspend)
+ ret = host->ops->suspend(host, state);
+ if (ret) {
+ dev_err(&dev->dev, "suspend hook failed, error = %d\n", ret);
+ sdhci_resume_host(host);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
int sdhci_pltfm_resume(struct platform_device *dev)
{
struct sdhci_host *host = platform_get_drvdata(dev);
+ int ret = 0;
+
+ if (host->ops && host->ops->resume)
+ ret = host->ops->resume(host);
+ if (ret) {
+ dev_err(&dev->dev, "resume hook failed, error = %d\n", ret);
+ return ret;
+ }
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret)
+ dev_err(&dev->dev, "resume failed, error = %d\n", ret);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
#endif /* CONFIG_PM */
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3a9fc3f40840..b92c7f29a4e7 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -17,7 +17,7 @@
struct sdhci_pltfm_data {
struct sdhci_ops *ops;
- unsigned int quirks;
+ u64 quirks;
};
struct sdhci_pltfm_host {
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 18b0bd31de78..1cca7cfe9bc0 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -18,14 +18,83 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/slab.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
#include <mach/gpio.h>
#include <mach/sdhci.h>
+#include <mach/io_dpd.h>
#include "sdhci-pltfm.h"
+#define SDHCI_VENDOR_CLOCK_CNTRL 0x100
+#define SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK 0x1
+#define SDHCI_VENDOR_CLOCK_CNTRL_PADPIPE_CLKEN_OVERRIDE 0x8
+#define SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT 8
+#define SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT 16
+
+#define SDHCI_VENDOR_MISC_CNTRL 0x120
+#define SDHCI_VENDOR_MISC_CNTRL_SDMMC_SPARE0_ENABLE_SD_3_0 0x20
+
+#define SDMMC_AUTO_CAL_CONFIG 0x1E4
+#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
+#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
+#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET 0x70
+#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET 0x62
+
+#define SDHOST_1V8_OCR_MASK 0x8
+#define SDHOST_HIGH_VOLT_MIN 2700000
+#define SDHOST_HIGH_VOLT_MAX 3600000
+#define SDHOST_LOW_VOLT_MIN 1800000
+#define SDHOST_LOW_VOLT_MAX 1800000
+
+#define TEGRA_SDHOST_MIN_FREQ 50000000
+#define TEGRA2_SDHOST_STD_FREQ 50000000
+#define TEGRA3_SDHOST_STD_FREQ 104000000
+
+static unsigned int tegra_sdhost_min_freq;
+static unsigned int tegra_sdhost_std_freq;
+static void tegra_3x_sdhci_set_card_clock(struct sdhci_host *sdhci, unsigned int clock);
+static void tegra3_sdhci_post_reset_init(struct sdhci_host *sdhci);
+
+static unsigned int tegra3_sdhost_max_clk[4] = {
+ 208000000, 104000000, 208000000, 104000000 };
+
+struct tegra_sdhci_hw_ops{
+ /* Set the internal clk and card clk.*/
+ void (*set_card_clock)(struct sdhci_host *sdhci, unsigned int clock);
+ /* Post reset vendor registers configuration */
+ void (*sdhost_init)(struct sdhci_host *sdhci);
+};
+
+static struct tegra_sdhci_hw_ops tegra_2x_sdhci_ops = {
+};
+
+static struct tegra_sdhci_hw_ops tegra_3x_sdhci_ops = {
+ .set_card_clock = tegra_3x_sdhci_set_card_clock,
+ .sdhost_init = tegra3_sdhci_post_reset_init,
+};
+
+struct tegra_sdhci_host {
+ bool clk_enabled;
+ struct regulator *vdd_io_reg;
+ struct regulator *vdd_slot_reg;
+ /* Pointer to the chip specific HW ops */
+ struct tegra_sdhci_hw_ops *hw_ops;
+ /* Host controller instance */
+ unsigned int instance;
+ /* vddio_min */
+ unsigned int vddio_min_uv;
+ /* vddio_max */
+ unsigned int vddio_max_uv;
+ /* max clk supported by the platform */
+ unsigned int max_clk_limit;
+ struct tegra_io_dpd *dpd;
+};
+
static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
{
u32 val;
@@ -41,11 +110,12 @@ static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
{
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (unlikely(reg == SDHCI_HOST_VERSION)) {
/* Erratum: Version register is invalid in HW. */
return SDHCI_SPEC_200;
}
-
+#endif
return readw(host->ioaddr + reg);
}
@@ -60,6 +130,7 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
writel(val, host->ioaddr + reg);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (unlikely(reg == SDHCI_INT_ENABLE)) {
/* Erratum: Must enable block gap interrupt detection */
u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
@@ -69,6 +140,7 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
gap_ctrl &= ~0x8;
writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
}
+#endif
}
static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
@@ -84,6 +156,114 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
return gpio_get_value(plat->wp_gpio);
}
+static void tegra3_sdhci_post_reset_init(struct sdhci_host *sdhci)
+{
+ u16 misc_ctrl;
+ u32 vendor_ctrl;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
+ struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
+ struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
+ struct tegra_sdhci_platform_data *plat;
+
+ plat = pdev->dev.platform_data;
+ /* Set the base clock frequency */
+ vendor_ctrl = sdhci_readl(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
+ vendor_ctrl &= ~(0xFF << SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT);
+ vendor_ctrl |= (tegra3_sdhost_max_clk[tegra_host->instance] / 1000000) <<
+ SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT;
+ /* Set tap delay */
+ if (plat->tap_delay) {
+ vendor_ctrl &= ~(0xFF <<
+ SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT);
+ vendor_ctrl |= (plat->tap_delay <<
+ SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT);
+ }
+ sdhci_writel(sdhci, vendor_ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
+
+ /* Enable SDHOST v3.0 support */
+ misc_ctrl = sdhci_readw(sdhci, SDHCI_VENDOR_MISC_CNTRL);
+ misc_ctrl |= SDHCI_VENDOR_MISC_CNTRL_SDMMC_SPARE0_ENABLE_SD_3_0;
+ sdhci_writew(sdhci, misc_ctrl, SDHCI_VENDOR_MISC_CNTRL);
+}
+
+static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ u16 clk, ctrl_2;
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ break;
+ }
+
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+ if (uhs == MMC_TIMING_UHS_DDR50) {
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
+ clk |= 1 << SDHCI_DIVIDER_SHIFT;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ }
+ return 0;
+}
+
+static void tegra_sdhci_reset_exit(struct sdhci_host *sdhci, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
+ struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
+
+ if (mask & SDHCI_RESET_ALL) {
+ if (tegra_host->hw_ops->sdhost_init)
+ tegra_host->hw_ops->sdhost_init(sdhci);
+ }
+}
+
+static void sdhci_status_notify_cb(int card_present, void *dev_id)
+{
+ struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
+ struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
+ struct tegra_sdhci_platform_data *plat;
+ unsigned int status, oldstat;
+
+ pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
+ card_present);
+
+ plat = pdev->dev.platform_data;
+ if (!plat->mmc_data.status) {
+ mmc_detect_change(sdhci->mmc, 0);
+ return;
+ }
+
+ status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
+
+ oldstat = plat->mmc_data.card_present;
+ plat->mmc_data.card_present = status;
+ if (status ^ oldstat) {
+ pr_debug("%s: Slot status change detected (%d -> %d)\n",
+ mmc_hostname(sdhci->mmc), oldstat, status);
+ if (status && !plat->mmc_data.built_in)
+ mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
+ else
+ mmc_detect_change(sdhci->mmc, 0);
+ }
+}
+
static irqreturn_t carddetect_irq(int irq, void *data)
{
struct sdhci_host *sdhost = (struct sdhci_host *)data;
@@ -115,16 +295,309 @@ static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
return 0;
}
+static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
+ unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
+ struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
+ unsigned int clk_rate;
+
+ if (sdhci->mmc->card &&
+ mmc_card_ddr_mode(sdhci->mmc->card)) {
+ /*
+ * In ddr mode, tegra sdmmc controller clock frequency
+ * should be double the card clock frequency.
+ */
+ clk_rate = clock * 2;
+ } else {
+ if (clock <= tegra_sdhost_min_freq)
+ clk_rate = tegra_sdhost_min_freq;
+ else if (clock <= tegra_sdhost_std_freq)
+ clk_rate = tegra_sdhost_std_freq;
+ else
+ clk_rate = clock;
+ }
+
+ if (tegra_host->max_clk_limit &&
+ (clk_rate > tegra_host->max_clk_limit))
+ clk_rate = tegra_host->max_clk_limit;
+
+ clk_set_rate(pltfm_host->clk, clk_rate);
+ sdhci->max_clk = clk_get_rate(pltfm_host->clk);
+}
+
+static void tegra_3x_sdhci_set_card_clock(struct sdhci_host *sdhci, unsigned int clock)
+{
+ int div;
+ u16 clk;
+ unsigned long timeout;
+ u8 ctrl;
+
+ if (clock && clock == sdhci->clock)
+ return;
+
+ sdhci_writew(sdhci, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ goto out;
+ if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
+ div = 1;
+ goto set_clk;
+ }
+
+ if (sdhci->version >= SDHCI_SPEC_300) {
+ /* Version 3.00 divisors must be a multiple of 2. */
+ if (sdhci->max_clk <= clock) {
+ div = 1;
+ } else {
+ for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
+ if ((sdhci->max_clk / div) <= clock)
+ break;
+ }
+ }
+ } else {
+ /* Version 2.00 divisors must be a power of 2. */
+ for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
+ if ((sdhci->max_clk / div) <= clock)
+ break;
+ }
+ }
+ div >>= 1;
+
+ /*
+ * Tegra3 sdmmc controller internal clock will not be stabilized when
+ * we use a clock divider value greater than 4. The WAR is as follows.
+ * - Enable PADPIPE_CLK_OVERRIDE in the vendr clk cntrl register.
+ * - Enable internal clock.
+ * - Wait for 5 usec and do a dummy write.
+ * - Poll for clk stable and disable PADPIPE_CLK_OVERRIDE.
+ */
+set_clk:
+ /* Enable PADPIPE clk override */
+ ctrl = sdhci_readb(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
+ ctrl |= SDHCI_VENDOR_CLOCK_CNTRL_PADPIPE_CLKEN_OVERRIDE;
+ sdhci_writeb(sdhci, ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
+
+ clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
+ clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
+ << SDHCI_DIVIDER_HI_SHIFT;
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait for 5 usec */
+ udelay(5);
+
+ /* Do a dummy write */
+ ctrl = sdhci_readb(sdhci, SDHCI_CAPABILITIES);
+ ctrl |= 1;
+ sdhci_writeb(sdhci, ctrl, SDHCI_CAPABILITIES);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!((clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ dev_err(mmc_dev(sdhci->mmc), "Internal clock never stabilised\n");
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ /* Disable PADPIPE clk override */
+ ctrl = sdhci_readb(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
+ ctrl &= ~SDHCI_VENDOR_CLOCK_CNTRL_PADPIPE_CLKEN_OVERRIDE;
+ sdhci_writeb(sdhci, ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
+out:
+ sdhci->clock = clock;
+}
+
+static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
+ struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
+ u8 ctrl;
+
+ pr_debug("%s %s %u enabled=%u\n", __func__,
+ mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
+
+ if (clock) {
+ /* bring out sd instance from io dpd mode */
+ tegra_io_dpd_disable(tegra_host->dpd);
+
+ if (!tegra_host->clk_enabled) {
+ clk_enable(pltfm_host->clk);
+ ctrl = sdhci_readb(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
+ ctrl |= SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK;
+ sdhci_writeb(sdhci, ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
+ tegra_host->clk_enabled = true;
+ }
+ tegra_sdhci_set_clk_rate(sdhci, clock);
+ if (tegra_host->hw_ops->set_card_clock)
+ tegra_host->hw_ops->set_card_clock(sdhci, clock);
+ } else if (!clock && tegra_host->clk_enabled) {
+ if (tegra_host->hw_ops->set_card_clock)
+ tegra_host->hw_ops->set_card_clock(sdhci, clock);
+ ctrl = sdhci_readb(sdhci, SDHCI_VENDOR_CLOCK_CNTRL);
+ ctrl &= ~SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK;
+ sdhci_writeb(sdhci, ctrl, SDHCI_VENDOR_CLOCK_CNTRL);
+ clk_disable(pltfm_host->clk);
+ tegra_host->clk_enabled = false;
+ /* io dpd enable call for sd instance */
+ tegra_io_dpd_enable(tegra_host->dpd);
+ }
+}
+
+static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
+ unsigned int signal_voltage)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
+ struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
+ unsigned int min_uV = SDHOST_HIGH_VOLT_MIN;
+ unsigned int max_uV = SDHOST_HIGH_VOLT_MAX;
+ unsigned int rc;
+ u16 clk, ctrl;
+ unsigned int val;
+
+ /* Switch OFF the card clock to prevent glitches on the clock line */
+ clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
+
+ ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ ctrl |= SDHCI_CTRL_VDD_180;
+ min_uV = SDHOST_LOW_VOLT_MIN;
+ max_uV = SDHOST_LOW_VOLT_MAX;
+ } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ if (ctrl & SDHCI_CTRL_VDD_180)
+ ctrl &= ~SDHCI_CTRL_VDD_180;
+ }
+ sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
+
+ /* Switch the I/O rail voltage */
+ if (tegra_host->vdd_io_reg) {
+ rc = regulator_set_voltage(tegra_host->vdd_io_reg,
+ min_uV, max_uV);
+ if (rc) {
+ dev_err(mmc_dev(sdhci->mmc), "switching to 1.8V"
+ "failed . Switching back to 3.3V\n");
+ regulator_set_voltage(tegra_host->vdd_io_reg,
+ SDHOST_HIGH_VOLT_MIN,
+ SDHOST_HIGH_VOLT_MAX);
+ return rc;
+ }
+ }
+
+ /* Wait for 10 msec for the voltage to be switched */
+ mdelay(10);
+
+ /* Enable the card clock */
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait for 1 msec after enabling clock */
+ mdelay(1);
+
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ /* Do Auto Calibration for 1.8V signal voltage */
+ val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
+ val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
+ /* Program Auto cal PD offset(bits 8:14) */
+ val &= ~(0x7F <<
+ SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
+ val |= (SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET <<
+ SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
+ /* Program Auto cal PU offset(bits 0:6) */
+ val &= ~0x7F;
+ val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET;
+ sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
+ }
+
+ return 0;
+}
+
+static int tegra_sdhci_suspend(struct sdhci_host *sdhci, pm_message_t state)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
+ struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
+
+ tegra_sdhci_set_clock(sdhci, 0);
+
+ /* Disable the power rails if any */
+ if (tegra_host->vdd_slot_reg)
+ regulator_disable(tegra_host->vdd_slot_reg);
+ if (tegra_host->vdd_io_reg)
+ regulator_disable(tegra_host->vdd_io_reg);
+ return 0;
+}
+
+static int tegra_sdhci_resume(struct sdhci_host *sdhci)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
+ struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
+ unsigned long timeout;
+
+ /* Enable the power rails if any */
+ if (tegra_host->vdd_io_reg)
+ regulator_enable(tegra_host->vdd_io_reg);
+ if (tegra_host->vdd_slot_reg)
+ regulator_enable(tegra_host->vdd_slot_reg);
+
+ /* Setting the min identification clock of freq 400KHz */
+ tegra_sdhci_set_clock(sdhci, 400000);
+
+ /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
+ if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+ sdhci_writeb(sdhci, SDHCI_RESET_ALL, SDHCI_SOFTWARE_RESET);
+
+ /* Wait max 100 ms */
+ timeout = 100;
+
+ /* hw clears the bit when it's done */
+ while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & SDHCI_RESET_ALL) {
+ if (timeout == 0) {
+ printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
+ mmc_hostname(sdhci->mmc), (int)SDHCI_RESET_ALL);
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ sdhci->pwr = 0;
+ }
+
+ return 0;
+}
+
static struct sdhci_ops tegra_sdhci_ops = {
.get_ro = tegra_sdhci_get_ro,
.read_l = tegra_sdhci_readl,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.platform_8bit_width = tegra_sdhci_8bit,
+ .set_clock = tegra_sdhci_set_clock,
+ .suspend = tegra_sdhci_suspend,
+ .resume = tegra_sdhci_resume,
+ .platform_reset_exit = tegra_sdhci_reset_exit,
+ .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+ .switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
};
static struct sdhci_pltfm_data sdhci_tegra_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING |
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ SDHCI_QUIRK_NONSTANDARD_CLOCK |
+#endif
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
@@ -136,6 +609,7 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct tegra_sdhci_platform_data *plat;
struct sdhci_host *host;
+ struct tegra_sdhci_host *tegra_host;
struct clk *clk;
int rc;
@@ -153,6 +627,22 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
goto err_no_plat;
}
+ tegra_host = kzalloc(sizeof(struct tegra_sdhci_host), GFP_KERNEL);
+ if (tegra_host == NULL) {
+ dev_err(mmc_dev(host->mmc), "failed to allocate tegra host\n");
+ rc = -ENOMEM;
+ goto err_no_mem;
+ }
+
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (plat->mmc_data.embedded_sdio)
+ mmc_set_embedded_sdio_data(host->mmc,
+ &plat->mmc_data.embedded_sdio->cis,
+ &plat->mmc_data.embedded_sdio->cccr,
+ plat->mmc_data.embedded_sdio->funcs,
+ plat->mmc_data.embedded_sdio->num_funcs);
+#endif
+
if (gpio_is_valid(plat->power_gpio)) {
rc = gpio_request(plat->power_gpio, "sdhci_power");
if (rc) {
@@ -182,7 +672,18 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
dev_err(mmc_dev(host->mmc), "request irq error\n");
goto err_cd_irq_req;
}
+ rc = enable_irq_wake(gpio_to_irq(plat->cd_gpio));
+ if (rc < 0)
+ dev_err(mmc_dev(host->mmc),
+ "SD card wake-up event registration"
+ "failed with eroor: %d\n", rc);
+
+ } else if (plat->mmc_data.register_status_notify) {
+ plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
+ }
+ if (plat->mmc_data.status) {
+ plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
}
if (gpio_is_valid(plat->wp_gpio)) {
@@ -196,19 +697,89 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
gpio_direction_input(plat->wp_gpio);
}
+
+ if (!plat->mmc_data.built_in) {
+ if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
+ tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
+ tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
+ } else {
+ /*
+ * Set the minV and maxV to default
+ * voltage range of 2.7V - 3.6V
+ */
+ tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
+ tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
+ }
+ tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc), "vddio_sdmmc");
+ if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
+ dev_err(mmc_dev(host->mmc), "%s regulator not found: %ld\n",
+ "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
+ tegra_host->vdd_io_reg = NULL;
+ } else {
+ rc = regulator_set_voltage(tegra_host->vdd_io_reg,
+ tegra_host->vddio_min_uv,
+ tegra_host->vddio_max_uv);
+ if (rc) {
+ dev_err(mmc_dev(host->mmc), "%s regulator_set_voltage failed: %d",
+ "vddio_sdmmc", rc);
+ } else {
+ regulator_enable(tegra_host->vdd_io_reg);
+ }
+ }
+
+ tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc), "vddio_sd_slot");
+ if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
+ dev_err(mmc_dev(host->mmc), "%s regulator not found: %ld\n",
+ "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
+ tegra_host->vdd_slot_reg = NULL;
+ } else {
+ regulator_enable(tegra_host->vdd_slot_reg);
+ }
+ }
+
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
rc = PTR_ERR(clk);
goto err_clk_get;
}
- clk_enable(clk);
+ rc = clk_enable(clk);
+ if (rc != 0)
+ goto err_clk_put;
pltfm_host->clk = clk;
+ pltfm_host->priv = tegra_host;
+ tegra_host->clk_enabled = true;
+ tegra_host->max_clk_limit = plat->max_clk_limit;
+ tegra_host->instance = pdev->id;
+ tegra_host->dpd = tegra_io_dpd_get(mmc_dev(host->mmc));
host->mmc->pm_caps = plat->pm_flags;
+ host->mmc->caps |= MMC_CAP_ERASE;
+ host->mmc->caps |= MMC_CAP_DISABLE;
+ /* enable 1/8V DDR capable */
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
if (plat->is_8bit)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ host->mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+ host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
+ if (plat->mmc_data.built_in) {
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
+ }
+ /* Do not turn OFF embedded sdio cards as it support Wake on Wireless */
+ if (plat->mmc_data.embedded_sdio)
+ host->mmc->pm_flags |= MMC_PM_KEEP_POWER;
+
+ tegra_sdhost_min_freq = TEGRA_SDHOST_MIN_FREQ;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra_host->hw_ops = &tegra_2x_sdhci_ops;
+ tegra_sdhost_std_freq = TEGRA2_SDHOST_STD_FREQ;
+#else
+ tegra_host->hw_ops = &tegra_3x_sdhci_ops;
+ tegra_sdhost_std_freq = TEGRA3_SDHOST_STD_FREQ;
+#endif
rc = sdhci_add_host(host);
if (rc)
@@ -218,6 +789,7 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
err_add_host:
clk_disable(pltfm_host->clk);
+err_clk_put:
clk_put(pltfm_host->clk);
err_clk_get:
if (gpio_is_valid(plat->wp_gpio)) {
@@ -238,6 +810,8 @@ err_cd_req:
gpio_free(plat->power_gpio);
}
err_power_req:
+err_no_mem:
+ kfree(tegra_host);
err_no_plat:
sdhci_pltfm_free(pdev);
return rc;
@@ -247,6 +821,7 @@ static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct tegra_sdhci_host *tegra_host = pltfm_host->priv;
struct tegra_sdhci_platform_data *plat;
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
@@ -254,6 +829,18 @@ static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
plat = pdev->dev.platform_data;
+ disable_irq_wake(gpio_to_irq(plat->cd_gpio));
+
+ if (tegra_host->vdd_slot_reg) {
+ regulator_disable(tegra_host->vdd_slot_reg);
+ regulator_put(tegra_host->vdd_slot_reg);
+ }
+
+ if (tegra_host->vdd_io_reg) {
+ regulator_disable(tegra_host->vdd_io_reg);
+ regulator_put(tegra_host->vdd_io_reg);
+ }
+
if (gpio_is_valid(plat->wp_gpio)) {
tegra_gpio_disable(plat->wp_gpio);
gpio_free(plat->wp_gpio);
@@ -270,10 +857,12 @@ static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
gpio_free(plat->power_gpio);
}
- clk_disable(pltfm_host->clk);
+ if (tegra_host->clk_enabled)
+ clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
sdhci_pltfm_free(pdev);
+ kfree(tegra_host);
return 0;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0e02cc1df12e..24713706295c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -25,6 +25,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "sdhci.h"
@@ -1046,14 +1047,11 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
u16 clk = 0;
unsigned long timeout;
- if (clock == host->clock)
+ if (clock && clock == host->clock)
return;
- if (host->ops->set_clock) {
- host->ops->set_clock(host, clock);
- if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
- return;
- }
+ if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
+ return;
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
@@ -1277,6 +1275,20 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host = mmc_priv(mmc);
+ /*
+ * Controller registers should not be updated without the
+ * controller clock enabled. Set the minimum controller
+ * clock if there is no clock.
+ */
+ if (host->ops->set_clock) {
+ if (!host->clock && !ios->clock) {
+ host->ops->set_clock(host, host->mmc->f_min);
+ host->clock = host->mmc->f_min;
+ } else if (ios->clock && (ios->clock != host->clock)) {
+ host->ops->set_clock(host, ios->clock);
+ }
+ }
+
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
@@ -1291,13 +1303,13 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_reinit(host);
}
- sdhci_set_clock(host, ios->clock);
-
if (ios->power_mode == MMC_POWER_OFF)
sdhci_set_power(host, -1);
else
sdhci_set_power(host, ios->vdd);
+ sdhci_set_clock(host, ios->clock);
+
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1376,9 +1388,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/* Re-enable SD Clock */
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
@@ -1407,9 +1419,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* Re-enable SD Clock */
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@@ -1424,6 +1436,12 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
out:
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
+ /*
+ * Controller clock should only be disabled after all the register
+ * writes are done.
+ */
+ if (!ios->clock && host->ops->set_clock)
+ host->ops->set_clock(host, ios->clock);
}
static int check_ro(struct sdhci_host *host)
@@ -1510,6 +1528,12 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (host->version < SDHCI_SPEC_300)
return 0;
+ if (host->quirks & SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING) {
+ if (host->ops->switch_signal_voltage)
+ return host->ops->switch_signal_voltage(
+ host, ios->signal_voltage);
+ }
+
/*
* We first check whether the request is to set signalling voltage
* to 3.3V. If so, we change the voltage to 3.3V and return quickly.
@@ -1552,7 +1576,6 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
/* Wait for 5ms */
usleep_range(5000, 5500);
-
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (ctrl & SDHCI_CTRL_VDD_180) {
/* Provide SDCLK again and wait for 1ms*/
@@ -1801,10 +1824,42 @@ static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
spin_unlock_irqrestore(&host->lock, flags);
}
+int sdhci_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!mmc->card || mmc->card->type == MMC_TYPE_SDIO)
+ return 0;
+
+ if (mmc->ios.clock) {
+ if (host->ops->set_clock)
+ host->ops->set_clock(host, mmc->ios.clock);
+ sdhci_set_clock(host, mmc->ios.clock);
+ }
+
+ return 0;
+}
+
+int sdhci_disable(struct mmc_host *mmc, int lazy)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!mmc->card || mmc->card->type == MMC_TYPE_SDIO)
+ return 0;
+
+ sdhci_set_clock(host, 0);
+ if (host->ops->set_clock)
+ host->ops->set_clock(host, 0);
+
+ return 0;
+}
+
static const struct mmc_host_ops sdhci_ops = {
.request = sdhci_request,
.set_ios = sdhci_set_ios,
.get_ro = sdhci_get_ro,
+ .enable = sdhci_enable,
+ .disable = sdhci_disable,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.execute_tuning = sdhci_execute_tuning,
@@ -1884,6 +1939,8 @@ static void sdhci_tasklet_finish(unsigned long param)
/* This is to force an update */
clock = host->clock;
host->clock = 0;
+ if (host->ops->set_clock)
+ host->ops->set_clock(host, clock);
sdhci_set_clock(host, clock);
}
@@ -2239,7 +2296,8 @@ out:
int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
{
- int ret;
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
sdhci_disable_card_detection(host);
@@ -2251,15 +2309,21 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
host->tuning_count * HZ);
}
- ret = mmc_suspend_host(host->mmc);
- if (ret)
- return ret;
+ if (mmc->card && (mmc->card->type != MMC_TYPE_SDIO))
+ ret = mmc_suspend_host(host->mmc);
- free_irq(host->irq, host);
+ if (host->flags & MMC_PM_KEEP_POWER)
+ host->card_int_set = sdhci_readl(host, SDHCI_INT_ENABLE) &
+ SDHCI_INT_CARD_INT;
+
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
if (host->vmmc)
ret = regulator_disable(host->vmmc);
+ if (host->irq)
+ disable_irq(host->irq);
+
return ret;
}
@@ -2267,7 +2331,8 @@ EXPORT_SYMBOL_GPL(sdhci_suspend_host);
int sdhci_resume_host(struct sdhci_host *host)
{
- int ret;
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
if (host->vmmc) {
int ret = regulator_enable(host->vmmc);
@@ -2281,15 +2346,24 @@ int sdhci_resume_host(struct sdhci_host *host)
host->ops->enable_dma(host);
}
- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
- mmc_hostname(host->mmc), host);
- if (ret)
- return ret;
+ if (host->irq)
+ enable_irq(host->irq);
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
- ret = mmc_resume_host(host->mmc);
+ if (mmc->card) {
+ if (mmc->card->type != MMC_TYPE_SDIO) {
+ ret = mmc_resume_host(host->mmc);
+ } else {
+ /* Enable card interrupt as it is overwritten in sdhci_init */
+ if ((mmc->caps & MMC_CAP_SDIO_IRQ) &&
+ (mmc->pm_flags & MMC_PM_KEEP_POWER))
+ if (host->card_int_set)
+ mmc->ops->enable_sdio_irq(mmc, true);
+ }
+ }
+
sdhci_enable_card_detection(host);
/* Set the re-tuning expiration flag */
@@ -2509,7 +2583,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->max_discard_to = (1 << 27) / host->timeout_clk;
- mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
host->flags |= SDHCI_AUTO_CMD12;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 745c42fa41ed..1c12419f9d64 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -273,7 +273,10 @@ struct sdhci_ops {
void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
-
+ int (*suspend)(struct sdhci_host *host, pm_message_t state);
+ int (*resume)(struct sdhci_host *host);
+ int (*switch_signal_voltage)(struct sdhci_host *host,
+ unsigned int signal_voltage);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35081ce77fbd..943d90f08c08 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -78,6 +78,12 @@ config MTD_DATAFLASH_OTP
other key product data. The second half is programmed with a
unique-to-each-chip bit pattern at the factory.
+config MTD_NAND_TEGRA
+ tristate "Support for NAND Controller on NVIDIA Tegra"
+ depends on ARCH_TEGRA
+ help
+ Enables NAND flash support for NVIDIA's Tegra family of chips.
+
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f3226b1d38fc..67345a00a5ab 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,6 +1,7 @@
#
# linux/drivers/mtd/devices/Makefile
#
+GCOV_PROFILE := y
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
obj-$(CONFIG_MTD_DOC2001) += doc2001.o
@@ -17,3 +18,4 @@ obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
+obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
diff --git a/drivers/mtd/devices/tegra_nand.c b/drivers/mtd/devices/tegra_nand.c
new file mode 100644
index 000000000000..54f4c1f67436
--- /dev/null
+++ b/drivers/mtd/devices/tegra_nand.c
@@ -0,0 +1,1782 @@
+/*
+ * drivers/mtd/devices/tegra_nand.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * Copyright (C) 2010-2011 Nvidia Graphics Pvt. Ltd.
+ * http://www.nvidia.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Derived from: drivers/mtd/nand/nand_base.c
+ * drivers/mtd/nand/pxa3xx.c
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+#include <mach/nand.h>
+
+#include "tegra_nand.h"
+
+#define DRIVER_NAME "tegra_nand"
+#define DRIVER_DESC "Nvidia Tegra NAND Flash Controller driver"
+
+#define MAX_DMA_SZ SZ_64K
+#define ECC_BUF_SZ SZ_1K
+
+/* FIXME: is this right?!
+ * NvRM code says it should be 128 bytes, but that seems awfully small
+ */
+
+/*#define TEGRA_NAND_DEBUG
+#define TEGRA_NAND_DEBUG_PEDANTIC*/
+
+#ifdef TEGRA_NAND_DEBUG
+#define TEGRA_DBG(fmt, args...) \
+ do { pr_info(fmt, ##args); } while (0)
+#else
+#define TEGRA_DBG(fmt, args...)
+#endif
+
+/* TODO: will vary with devices, move into appropriate device spcific header */
+#define SCAN_TIMING_VAL 0x3f0bd214
+#define SCAN_TIMING2_VAL 0xb
+
+#define TIMEOUT (2 * HZ)
+/* TODO: pull in the register defs (fields, masks, etc) from Nvidia files
+ * so we don't have to redefine them */
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+#endif
+
+struct tegra_nand_chip {
+ spinlock_t lock;
+ uint32_t chipsize;
+ int num_chips;
+ int curr_chip;
+
+ /* addr >> chip_shift == chip number */
+ uint32_t chip_shift;
+ /* (addr >> page_shift) & page_mask == page number within chip */
+ uint32_t page_shift;
+ uint32_t page_mask;
+ /* column within page */
+ uint32_t column_mask;
+ /* addr >> block_shift == block number (across the whole mtd dev, not
+ * just a single chip. */
+ uint32_t block_shift;
+
+ void *priv;
+};
+
+struct tegra_nand_info {
+ struct tegra_nand_chip chip;
+ struct mtd_info mtd;
+ struct tegra_nand_platform *plat;
+ struct device *dev;
+ struct mtd_partition *parts;
+
+ /* synchronizes access to accessing the actual NAND controller */
+ struct mutex lock;
+ /* partial_unaligned_rw_buffer is temporary buffer used during
+ reading of unaligned data from nand pages or if data to be read
+ is less than nand page size.
+ */
+ uint8_t *partial_unaligned_rw_buffer;
+
+ void *oob_dma_buf;
+ dma_addr_t oob_dma_addr;
+ /* ecc error vector info (offset into page and data mask to apply */
+ void *ecc_buf;
+ dma_addr_t ecc_addr;
+ /* ecc error status (page number, err_cnt) */
+ uint32_t *ecc_errs;
+ uint32_t num_ecc_errs;
+ uint32_t max_ecc_errs;
+ spinlock_t ecc_lock;
+
+ uint32_t command_reg;
+ uint32_t config_reg;
+ uint32_t dmactrl_reg;
+
+ struct completion cmd_complete;
+ struct completion dma_complete;
+
+ /* bad block bitmap: 1 == good, 0 == bad/unknown */
+ unsigned long *bb_bitmap;
+
+ struct clk *clk;
+ uint32_t is_data_bus_width_16;
+ uint32_t device_id;
+ uint32_t vendor_id;
+ uint32_t num_bad_blocks;
+};
+#define MTD_TO_INFO(mtd) container_of((mtd), struct tegra_nand_info, mtd)
+
+/* 64 byte oob block info for large page (== 2KB) device
+ *
+ * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC:
+ * Skipped bytes(4)
+ * Main area Ecc(36)
+ * Tag data(20)
+ * Tag data Ecc(4)
+ *
+ * Yaffs2 will use 16 tag bytes.
+ */
+
+static struct nand_ecclayout tegra_nand_oob_64 = {
+ .eccbytes = 36,
+ .eccpos = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobavail = 20,
+ .oobfree = {
+ {.offset = 40,
+ .length = 20,
+ },
+ },
+};
+
+static struct nand_ecclayout tegra_nand_oob_128 = {
+ .eccbytes = 72,
+ .eccpos = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ /* ECC POS is only of size 64 bytes so commenting the remaining
+ * bytes here. As driver uses the Hardware ECC so it there is
+ * no issue with it
+ */
+ /*67, 68, 69, 70, 71, 72, 73, 74, 75, */
+ },
+ .oobavail = 48,
+ .oobfree = {
+ {.offset = 76,
+ .length = 48,
+ },
+ },
+};
+
+static struct nand_flash_dev *find_nand_flash_device(int dev_id)
+{
+ struct nand_flash_dev *dev = &nand_flash_ids[0];
+
+ while (dev->name && dev->id != dev_id)
+ dev++;
+ return dev->name ? dev : NULL;
+}
+
+static struct nand_manufacturers *find_nand_flash_vendor(int vendor_id)
+{
+ struct nand_manufacturers *vendor = &nand_manuf_ids[0];
+
+ while (vendor->id && vendor->id != vendor_id)
+ vendor++;
+ return vendor->id ? vendor : NULL;
+}
+
+#define REG_NAME(name) { name, #name }
+static struct {
+ uint32_t addr;
+ char *name;
+} reg_names[] = {
+ REG_NAME(COMMAND_REG),
+ REG_NAME(STATUS_REG),
+ REG_NAME(ISR_REG),
+ REG_NAME(IER_REG),
+ REG_NAME(CONFIG_REG),
+ REG_NAME(TIMING_REG),
+ REG_NAME(RESP_REG),
+ REG_NAME(TIMING2_REG),
+ REG_NAME(CMD_REG1),
+ REG_NAME(CMD_REG2),
+ REG_NAME(ADDR_REG1),
+ REG_NAME(ADDR_REG2),
+ REG_NAME(DMA_MST_CTRL_REG),
+ REG_NAME(DMA_CFG_A_REG),
+ REG_NAME(DMA_CFG_B_REG),
+ REG_NAME(FIFO_CTRL_REG),
+ REG_NAME(DATA_BLOCK_PTR_REG),
+ REG_NAME(TAG_PTR_REG),
+ REG_NAME(ECC_PTR_REG),
+ REG_NAME(DEC_STATUS_REG),
+ REG_NAME(HWSTATUS_CMD_REG),
+ REG_NAME(HWSTATUS_MASK_REG),
+ {0, NULL},
+};
+
+#undef REG_NAME
+
+static int dump_nand_regs(void)
+{
+ int i = 0;
+
+ TEGRA_DBG("%s: dumping registers\n", __func__);
+ while (reg_names[i].name != NULL) {
+ TEGRA_DBG("%s = 0x%08x\n", reg_names[i].name,
+ readl(reg_names[i].addr));
+ i++;
+ }
+ TEGRA_DBG("%s: end of reg dump\n", __func__);
+ return 1;
+}
+
+static inline void enable_ints(struct tegra_nand_info *info, uint32_t mask)
+{
+ (void)info;
+ writel(readl(IER_REG) | mask, IER_REG);
+}
+
+static inline void disable_ints(struct tegra_nand_info *info, uint32_t mask)
+{
+ (void)info;
+ writel(readl(IER_REG) & ~mask, IER_REG);
+}
+
+static inline void
+split_addr(struct tegra_nand_info *info, loff_t offset, int *chipnr,
+ uint32_t *page, uint32_t *column)
+{
+ *chipnr = (int)(offset >> info->chip.chip_shift);
+ *page = (offset >> info->chip.page_shift) & info->chip.page_mask;
+ *column = offset & info->chip.column_mask;
+}
+
+static irqreturn_t tegra_nand_irq(int irq, void *dev_id)
+{
+ struct tegra_nand_info *info = dev_id;
+ uint32_t isr;
+ uint32_t ier;
+ uint32_t dma_ctrl;
+ uint32_t tmp;
+
+ isr = readl(ISR_REG);
+ ier = readl(IER_REG);
+ dma_ctrl = readl(DMA_MST_CTRL_REG);
+#ifdef DEBUG_DUMP_IRQ
+ pr_info("IRQ: ISR=0x%08x IER=0x%08x DMA_IS=%d DMA_IE=%d\n",
+ isr, ier, !!(dma_ctrl & (1 << 20)), !!(dma_ctrl & (1 << 28)));
+#endif
+ if (isr & ISR_CMD_DONE) {
+ if (likely(!(readl(COMMAND_REG) & COMMAND_GO)))
+ complete(&info->cmd_complete);
+ else
+ pr_err("tegra_nand_irq: Spurious cmd done irq!\n");
+ }
+
+ if (isr & ISR_ECC_ERR) {
+ /* always want to read the decode status so xfers don't stall. */
+ tmp = readl(DEC_STATUS_REG);
+
+ /* was ECC check actually enabled */
+ if ((ier & IER_ECC_ERR)) {
+ unsigned long flags;
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ info->ecc_errs[info->num_ecc_errs++] = tmp;
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+ }
+ }
+
+ if ((dma_ctrl & DMA_CTRL_IS_DMA_DONE) &&
+ (dma_ctrl & DMA_CTRL_IE_DMA_DONE)) {
+ complete(&info->dma_complete);
+ writel(dma_ctrl, DMA_MST_CTRL_REG);
+ }
+
+ if ((isr & ISR_UND) && (ier & IER_UND))
+ pr_err("%s: fifo underrun.\n", __func__);
+
+ if ((isr & ISR_OVR) && (ier & IER_OVR))
+ pr_err("%s: fifo overrun.\n", __func__);
+
+ /* clear ALL interrupts?! */
+ writel(isr & 0xfffc, ISR_REG);
+
+ return IRQ_HANDLED;
+}
+
+static inline int tegra_nand_is_cmd_done(struct tegra_nand_info *info)
+{
+ return (readl(COMMAND_REG) & COMMAND_GO) ? 0 : 1;
+}
+
+static int tegra_nand_wait_cmd_done(struct tegra_nand_info *info)
+{
+ uint32_t timeout = TIMEOUT; /* TODO: make this realistic */
+ int ret;
+
+ ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
+
+#ifdef TEGRA_NAND_DEBUG_PEDANTIC
+ BUG_ON(!ret && dump_nand_regs());
+#endif
+
+ return ret ? 0 : ret;
+}
+
+static inline void select_chip(struct tegra_nand_info *info, int chipnr)
+{
+ BUG_ON(chipnr != -1 && chipnr >= info->plat->max_chips);
+ info->chip.curr_chip = chipnr;
+}
+
+static void cfg_hwstatus_mon(struct tegra_nand_info *info)
+{
+ uint32_t val;
+
+ val = (HWSTATUS_RDSTATUS_MASK(1) |
+ HWSTATUS_RDSTATUS_EXP_VAL(0) |
+ HWSTATUS_RBSY_MASK(NAND_STATUS_READY) |
+ HWSTATUS_RBSY_EXP_VAL(NAND_STATUS_READY));
+ writel(NAND_CMD_STATUS, HWSTATUS_CMD_REG);
+ writel(val, HWSTATUS_MASK_REG);
+}
+
+/* Tells the NAND controller to initiate the command. */
+static int tegra_nand_go(struct tegra_nand_info *info)
+{
+ BUG_ON(!tegra_nand_is_cmd_done(info));
+
+ INIT_COMPLETION(info->cmd_complete);
+ writel(info->command_reg | COMMAND_GO, COMMAND_REG);
+
+ if (unlikely(tegra_nand_wait_cmd_done(info))) {
+ /* TODO: abort command if needed? */
+ pr_err("%s: Timeout while waiting for command\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* TODO: maybe wait for dma here? */
+ return 0;
+}
+
+static void tegra_nand_prep_readid(struct tegra_nand_info *info)
+{
+ info->command_reg =
+ (COMMAND_CLE | COMMAND_ALE | COMMAND_PIO | COMMAND_RX |
+ COMMAND_ALE_BYTE_SIZE(0) | COMMAND_TRANS_SIZE(3) |
+ (COMMAND_CE(info->chip.curr_chip)));
+ writel(NAND_CMD_READID, CMD_REG1);
+ writel(0, CMD_REG2);
+ writel(0, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(0, CONFIG_REG);
+}
+
+static int
+tegra_nand_cmd_readid(struct tegra_nand_info *info, uint32_t *chip_id)
+{
+ int err;
+
+#ifdef TEGRA_NAND_DEBUG_PEDANTIC
+ BUG_ON(info->chip.curr_chip == -1);
+#endif
+
+ tegra_nand_prep_readid(info);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ return err;
+
+ *chip_id = readl(RESP_REG);
+ return 0;
+}
+
+/* assumes right locks are held */
+static int nand_cmd_get_status(struct tegra_nand_info *info, uint32_t *status)
+{
+ int err;
+
+ info->command_reg = (COMMAND_CLE | COMMAND_PIO | COMMAND_RX |
+ COMMAND_RBSY_CHK |
+ (COMMAND_CE(info->chip.curr_chip)));
+ writel(NAND_CMD_STATUS, CMD_REG1);
+ writel(0, CMD_REG2);
+ writel(0, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ err = tegra_nand_go(info);
+ if (err != 0)
+ return err;
+
+ *status = readl(RESP_REG) & 0xff;
+ return 0;
+}
+
+/* must be called with lock held */
+static int check_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t block = offs >> info->chip.block_shift;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ int ret = 0;
+ int i;
+
+ if (info->bb_bitmap[BIT_WORD(block)] & BIT_MASK(block))
+ return 0;
+
+ offs &= ~(mtd->erasesize - 1);
+
+ if (info->is_data_bus_width_16)
+ writel(CONFIG_COM_BSY | CONFIG_BUS_WIDTH, CONFIG_REG);
+ else
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ split_addr(info, offs, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
+
+ /* check fist two pages of the block */
+ if (info->is_data_bus_width_16)
+ column = column >> 1;
+ for (i = 0; i < 2; ++i) {
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
+ COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_RX |
+ COMMAND_PIO | COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID |
+ COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
+ writel(NAND_CMD_READ0, CMD_REG1);
+ writel(NAND_CMD_READSTART, CMD_REG2);
+
+ writel(column | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+
+ /* ... poison me ... */
+ writel(0xaa55aa55, RESP_REG);
+ ret = tegra_nand_go(info);
+ if (ret != 0) {
+ pr_info("baaaaaad\n");
+ goto out;
+ }
+
+ if ((readl(RESP_REG) & 0xffff) != 0xffff) {
+ ret = 1;
+ goto out;
+ }
+
+ /* Note: The assumption here is that we cannot cross chip
+ * boundary since the we are only looking at the first 2 pages in
+ * a block, i.e. erasesize > writesize ALWAYS */
+ page++;
+ }
+
+out:
+ /* update the bitmap if the block is good */
+ if (ret == 0)
+ set_bit(block, info->bb_bitmap);
+ return ret;
+}
+
+static int tegra_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ int ret;
+
+ if (offs >= mtd->size)
+ return -EINVAL;
+
+ mutex_lock(&info->lock);
+ ret = check_block_isbad(mtd, offs);
+ mutex_unlock(&info->lock);
+
+#if 0
+ if (ret > 0)
+ pr_info("block @ 0x%llx is bad.\n", offs);
+ else if (ret < 0)
+ pr_err("error checking block @ 0x%llx for badness.\n", offs);
+#endif
+
+ return ret;
+}
+
+static int tegra_nand_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t block = offs >> info->chip.block_shift;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ int ret = 0;
+ int i;
+
+ if (offs >= mtd->size)
+ return -EINVAL;
+
+ pr_info("tegra_nand: setting block %d bad\n", block);
+
+ mutex_lock(&info->lock);
+ offs &= ~(mtd->erasesize - 1);
+
+ /* mark the block bad in our bitmap */
+ clear_bit(block, info->bb_bitmap);
+ mtd->ecc_stats.badblocks++;
+
+ if (info->is_data_bus_width_16)
+ writel(CONFIG_COM_BSY | CONFIG_BUS_WIDTH, CONFIG_REG);
+ else
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ split_addr(info, offs, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
+ if (info->is_data_bus_width_16)
+ column = column >> 1;
+ /* write to fist two pages in the block */
+ for (i = 0; i < 2; ++i) {
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
+ COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_TX |
+ COMMAND_PIO | COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID |
+ COMMAND_RBSY_CHK | COMMAND_AFT_DAT | COMMAND_SEC_CMD;
+ writel(NAND_CMD_SEQIN, CMD_REG1);
+ writel(NAND_CMD_PAGEPROG, CMD_REG2);
+
+ writel(column | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+
+ writel(0x0, RESP_REG);
+ ret = tegra_nand_go(info);
+ if (ret != 0)
+ goto out;
+
+ /* TODO: check if the program op worked? */
+ page++;
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int tegra_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t num_blocks;
+ uint32_t offs;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint32_t status = 0;
+
+ TEGRA_DBG("tegra_nand_erase: addr=0x%08llx len=%lld\n", instr->addr,
+ instr->len);
+
+ if ((instr->addr + instr->len) > mtd->size) {
+ pr_err("tegra_nand_erase: Can't erase past end of device\n");
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ if (instr->addr & (mtd->erasesize - 1)) {
+ pr_err("tegra_nand_erase: addr=0x%08llx not block-aligned\n",
+ instr->addr);
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ if (instr->len & (mtd->erasesize - 1)) {
+ pr_err("tegra_nand_erase: len=%lld not block-aligned\n",
+ instr->len);
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ instr->fail_addr = 0xffffffff;
+
+ mutex_lock(&info->lock);
+
+ instr->state = MTD_ERASING;
+
+ offs = instr->addr;
+ num_blocks = instr->len >> info->chip.block_shift;
+
+ select_chip(info, -1);
+
+ while (num_blocks--) {
+ split_addr(info, offs, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ TEGRA_DBG("tegra_nand_erase: addr=0x%08x, page=0x%08x\n", offs,
+ page);
+
+ if (check_block_isbad(mtd, offs)) {
+ pr_info("%s: skipping bad block @ 0x%08x\n", __func__,
+ offs);
+ goto next_block;
+ }
+
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
+ COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(2) |
+ COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
+ writel(NAND_CMD_ERASE1, CMD_REG1);
+ writel(NAND_CMD_ERASE2, CMD_REG2);
+
+ writel(page & 0xffffff, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ if (tegra_nand_go(info) != 0) {
+ instr->fail_addr = offs;
+ goto out_err;
+ }
+
+ /* TODO: do we want a timeout here? */
+ if ((nand_cmd_get_status(info, &status) != 0) ||
+ (status & NAND_STATUS_FAIL) ||
+ ((status & NAND_STATUS_READY) != NAND_STATUS_READY)) {
+ instr->fail_addr = offs;
+ pr_info("%s: erase failed @ 0x%08x (stat=0x%08x)\n",
+ __func__, offs, status);
+ goto out_err;
+ }
+next_block:
+ offs += mtd->erasesize;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mutex_unlock(&info->lock);
+ mtd_erase_callback(instr);
+ return 0;
+
+out_err:
+ instr->state = MTD_ERASE_FAILED;
+ mutex_unlock(&info->lock);
+ return -EIO;
+}
+
+static inline void dump_mtd_oob_ops(struct mtd_oob_ops *ops)
+{
+ pr_info("%s: oob_ops: mode=%s len=0x%x ooblen=0x%x "
+ "ooboffs=0x%x dat=0x%p oob=0x%p\n", __func__,
+ (ops->mode == MTD_OOB_AUTO ? "MTD_OOB_AUTO" :
+ (ops->mode ==
+ MTD_OOB_PLACE ? "MTD_OOB_PLACE" : "MTD_OOB_RAW")), ops->len,
+ ops->ooblen, ops->ooboffs, ops->datbuf, ops->oobbuf);
+}
+
+static int
+tegra_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ pr_debug("%s: read: from=0x%llx len=0x%x\n", __func__, from, len);
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = mtd->read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static void
+correct_ecc_errors_on_blank_page(struct tegra_nand_info *info, u8 *datbuf,
+ u8 *oobbuf, unsigned int a_len,
+ unsigned int b_len)
+{
+ int i;
+ int all_ff = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ if (info->num_ecc_errs) {
+ if (datbuf) {
+ for (i = 0; i < a_len; i++)
+ if (datbuf[i] != 0xFF)
+ all_ff = 0;
+ }
+ if (oobbuf) {
+ for (i = 0; i < b_len; i++)
+ if (oobbuf[i] != 0xFF)
+ all_ff = 0;
+ }
+ if (all_ff)
+ info->num_ecc_errs = 0;
+ }
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+}
+
+static void update_ecc_counts(struct tegra_nand_info *info, int check_oob)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ for (i = 0; i < info->num_ecc_errs; ++i) {
+ /* correctable */
+ info->mtd.ecc_stats.corrected +=
+ DEC_STATUS_ERR_CNT(info->ecc_errs[i]);
+
+ /* uncorrectable */
+ if (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_A)
+ info->mtd.ecc_stats.failed++;
+ if (check_oob && (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_B))
+ info->mtd.ecc_stats.failed++;
+ }
+ info->num_ecc_errs = 0;
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+}
+
+static inline void clear_regs(struct tegra_nand_info *info)
+{
+ info->command_reg = 0;
+ info->config_reg = 0;
+ info->dmactrl_reg = 0;
+}
+
+static void
+prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc,
+ uint32_t page, uint32_t column, dma_addr_t data_dma,
+ uint32_t data_len, dma_addr_t oob_dma, uint32_t oob_len)
+{
+ uint32_t tag_sz = oob_len;
+
+ uint32_t page_size_sel = (info->mtd.writesize >> 11) + 2;
+#if 0
+ pr_info("%s: rx=%d ecc=%d page=%d col=%d data_dma=0x%x "
+ "data_len=0x%08x oob_dma=0x%x ooblen=%d\n", __func__,
+ rx, do_ecc, page, column, data_dma, data_len, oob_dma, oob_len);
+#endif
+
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(4) | COMMAND_SEC_CMD | COMMAND_RBSY_CHK |
+ COMMAND_TRANS_SIZE(8);
+
+ info->config_reg = (CONFIG_PIPELINE_EN | CONFIG_EDO_MODE |
+ CONFIG_COM_BSY);
+ if (info->is_data_bus_width_16)
+ info->config_reg |= CONFIG_BUS_WIDTH;
+ info->dmactrl_reg = (DMA_CTRL_DMA_GO |
+ DMA_CTRL_DMA_PERF_EN | DMA_CTRL_IE_DMA_DONE |
+ DMA_CTRL_IS_DMA_DONE | DMA_CTRL_BURST_SIZE(4));
+
+ if (rx) {
+ if (do_ecc)
+ info->config_reg |= CONFIG_HW_ERR_CORRECTION;
+ info->command_reg |= COMMAND_RX;
+ info->dmactrl_reg |= DMA_CTRL_REUSE_BUFFER;
+ writel(NAND_CMD_READ0, CMD_REG1);
+ writel(NAND_CMD_READSTART, CMD_REG2);
+ } else {
+ info->command_reg |= (COMMAND_TX | COMMAND_AFT_DAT);
+ info->dmactrl_reg |= DMA_CTRL_DIR; /* DMA_RD == TX */
+ writel(NAND_CMD_SEQIN, CMD_REG1);
+ writel(NAND_CMD_PAGEPROG, CMD_REG2);
+ }
+
+ if (data_len) {
+ if (do_ecc)
+ info->config_reg |= CONFIG_HW_ECC | CONFIG_ECC_SEL;
+ info->config_reg |=
+ CONFIG_PAGE_SIZE_SEL(page_size_sel) | CONFIG_TVALUE(0) |
+ CONFIG_SKIP_SPARE | CONFIG_SKIP_SPARE_SEL(0);
+ info->command_reg |= COMMAND_A_VALID;
+ info->dmactrl_reg |= DMA_CTRL_DMA_EN_A;
+ writel(DMA_CFG_BLOCK_SIZE(data_len - 1), DMA_CFG_A_REG);
+ writel(data_dma, DATA_BLOCK_PTR_REG);
+ } else {
+ column = info->mtd.writesize;
+ if (do_ecc)
+ column += info->mtd.ecclayout->oobfree[0].offset;
+ writel(0, DMA_CFG_A_REG);
+ writel(0, DATA_BLOCK_PTR_REG);
+ }
+
+ if (oob_len) {
+ if (do_ecc) {
+ oob_len = info->mtd.oobavail;
+ tag_sz = info->mtd.oobavail;
+ tag_sz += 4; /* size of tag ecc */
+ if (rx)
+ oob_len += 4; /* size of tag ecc */
+ info->config_reg |= CONFIG_ECC_EN_TAG;
+ }
+ if (data_len && rx)
+ oob_len += 4; /* num of skipped bytes */
+
+ info->command_reg |= COMMAND_B_VALID;
+ info->config_reg |= CONFIG_TAG_BYTE_SIZE(tag_sz - 1);
+ info->dmactrl_reg |= DMA_CTRL_DMA_EN_B;
+ writel(DMA_CFG_BLOCK_SIZE(oob_len - 1), DMA_CFG_B_REG);
+ writel(oob_dma, TAG_PTR_REG);
+ } else {
+ writel(0, DMA_CFG_B_REG);
+ writel(0, TAG_PTR_REG);
+ }
+ /* For x16 bit we needs to divide the column number by 2 */
+ if (info->is_data_bus_width_16)
+ column = column >> 1;
+ writel((column & 0xffff) | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+}
+
+static dma_addr_t
+tegra_nand_dma_map(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ if (virt_addr_valid(addr))
+ page = virt_to_page(addr);
+ else {
+ if (WARN_ON(size + offset > PAGE_SIZE))
+ return ~0;
+ page = vmalloc_to_page(addr);
+ }
+ return dma_map_page(dev, page, offset, size, dir);
+}
+
+static ssize_t show_vendor_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ return sprintf(buf, "0x%x\n", info->vendor_id);
+}
+
+static DEVICE_ATTR(vendor_id, S_IRUSR, show_vendor_id, NULL);
+
+static ssize_t show_device_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ return sprintf(buf, "0x%x\n", info->device_id);
+}
+
+static DEVICE_ATTR(device_id, S_IRUSR, show_device_id, NULL);
+
+static ssize_t show_flash_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ struct mtd_info *mtd = &info->mtd;
+ return sprintf(buf, "%llu bytes\n", mtd->size);
+}
+
+static DEVICE_ATTR(flash_size, S_IRUSR, show_flash_size, NULL);
+
+static ssize_t show_num_bad_blocks(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", info->num_bad_blocks);
+}
+
+static DEVICE_ATTR(num_bad_blocks, S_IRUSR, show_num_bad_blocks, NULL);
+
+static ssize_t show_bb_bitmap(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ struct mtd_info *mtd = &info->mtd;
+ int num_blocks = mtd->size >> info->chip.block_shift, i, ret = 0, size =
+ 0;
+
+ for (i = 0; i < num_blocks / (8 * sizeof(unsigned long)); i++) {
+ size = sprintf(buf, "0x%lx\n", info->bb_bitmap[i]);
+ ret += size;
+ buf += size;
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(bb_bitmap, S_IRUSR, show_bb_bitmap, NULL);
+
+/*
+ * Independent of Mode, we read main data and the OOB data from the oobfree areas as
+ * specified nand_ecclayout
+ * This function also checks buffer pool partial_unaligned_rw_buffer
+ * if the address is already present and is not 'unused' then it will use
+ * data in buffer else it will go for DMA.
+ */
+static int
+do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ struct mtd_ecc_stats old_ecc_stats;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint8_t *datbuf = ops->datbuf;
+ uint8_t *oobbuf = ops->oobbuf;
+ uint32_t ooblen = oobbuf ? ops->ooblen : 0;
+ uint32_t oobsz;
+ uint32_t page_count;
+ int err;
+ int unaligned = from & info->chip.column_mask;
+ uint32_t len = datbuf ? ((ops->len) + unaligned) : 0;
+ int do_ecc = 1;
+ dma_addr_t datbuf_dma_addr = 0;
+
+#if 0
+ dump_mtd_oob_ops(ops);
+#endif
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ from = from - unaligned;
+
+ /* Don't care about the MTD_OOB_ value field always use oobavail and ecc. */
+ oobsz = mtd->oobavail;
+ if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
+ pr_err("%s: can't read OOB from multiple pages (%d > %d)\n",
+ __func__, ops->ooblen, oobsz);
+ return -EINVAL;
+ } else if (ops->oobbuf && !len) {
+ page_count = 1;
+ } else {
+ page_count =
+ (uint32_t) ((len + mtd->writesize - 1) / mtd->writesize);
+ }
+
+ mutex_lock(&info->lock);
+
+ memcpy(&old_ecc_stats, &mtd->ecc_stats, sizeof(old_ecc_stats));
+
+ if (do_ecc) {
+ enable_ints(info, IER_ECC_ERR);
+ writel(info->ecc_addr, ECC_PTR_REG);
+ } else
+ disable_ints(info, IER_ECC_ERR);
+
+ split_addr(info, from, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ /* reset it to point back to beginning of page */
+ from -= column;
+
+ while (page_count--) {
+ int a_len = min(mtd->writesize - column, len);
+ int b_len = min(oobsz, ooblen);
+ int temp_len = 0;
+ char *temp_buf = NULL;
+ /* Take care when read is of less than page size.
+ * Otherwise there will be kernel Panic due to DMA timeout */
+ if (((a_len < mtd->writesize) && len) || unaligned) {
+ temp_len = a_len;
+ a_len = mtd->writesize;
+ temp_buf = datbuf;
+ datbuf = info->partial_unaligned_rw_buffer;
+ }
+#if 0
+ pr_info("%s: chip:=%d page=%d col=%d\n", __func__, chipnr,
+ page, column);
+#endif
+
+ clear_regs(info);
+ if (datbuf)
+ datbuf_dma_addr =
+ tegra_nand_dma_map(info->dev, datbuf, a_len,
+ DMA_FROM_DEVICE);
+
+ prep_transfer_dma(info, 1, do_ecc, page, column,
+ datbuf_dma_addr, a_len, info->oob_dma_addr,
+ b_len);
+ writel(info->config_reg, CONFIG_REG);
+ writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
+
+ INIT_COMPLETION(info->dma_complete);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ goto out_err;
+
+ if (!wait_for_completion_timeout(&info->dma_complete, TIMEOUT)) {
+ pr_err("%s: dma completion timeout\n", __func__);
+ dump_nand_regs();
+ err = -ETIMEDOUT;
+ goto out_err;
+ }
+
+ /*pr_info("tegra_read_oob: DMA complete\n"); */
+
+ /* if we are here, transfer is done */
+ if (datbuf)
+ dma_unmap_page(info->dev, datbuf_dma_addr, a_len,
+ DMA_FROM_DEVICE);
+
+ if (oobbuf) {
+ uint32_t ofs = datbuf && oobbuf ? 4 : 0; /* skipped bytes */
+ memcpy(oobbuf, info->oob_dma_buf + ofs, b_len);
+ }
+
+ correct_ecc_errors_on_blank_page(info, datbuf, oobbuf, a_len,
+ b_len);
+ /* Take care when read is of less than page size */
+ if (temp_len) {
+ memcpy(temp_buf, datbuf + unaligned,
+ temp_len - unaligned);
+ a_len = temp_len;
+ datbuf = temp_buf;
+ }
+ if (datbuf) {
+ len -= a_len;
+ datbuf += a_len - unaligned;
+ ops->retlen += a_len - unaligned;
+ }
+
+ if (oobbuf) {
+ ooblen -= b_len;
+ oobbuf += b_len;
+ ops->oobretlen += b_len;
+ }
+
+ unaligned = 0;
+ update_ecc_counts(info, oobbuf != NULL);
+
+ if (!page_count)
+ break;
+
+ from += mtd->writesize;
+ column = 0;
+
+ split_addr(info, from, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ }
+
+ disable_ints(info, IER_ECC_ERR);
+
+ if (mtd->ecc_stats.failed != old_ecc_stats.failed)
+ err = -EBADMSG;
+ else if (mtd->ecc_stats.corrected != old_ecc_stats.corrected)
+ err = -EUCLEAN;
+ else
+ err = 0;
+
+ mutex_unlock(&info->lock);
+ return err;
+
+out_err:
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ disable_ints(info, IER_ECC_ERR);
+ mutex_unlock(&info->lock);
+ return err;
+}
+
+/* just does some parameter checking and calls do_read_oob */
+static int
+tegra_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ if (ops->datbuf && unlikely((from + ops->len) > mtd->size)) {
+ pr_err("%s: Can't read past end of device.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->oobbuf && !ops->ooblen)) {
+ pr_err("%s: Reading 0 bytes from OOB is meaningless\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->mode != MTD_OOB_AUTO)) {
+ if (ops->oobbuf && ops->datbuf) {
+ pr_err("%s: can't read OOB + Data in non-AUTO mode.\n",
+ __func__);
+ return -EINVAL;
+ }
+ if ((ops->mode == MTD_OOB_RAW) && !ops->datbuf) {
+ pr_err("%s: Raw mode only supports reading data area.\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return do_read_oob(mtd, from, ops);
+}
+
+static int
+tegra_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ pr_debug("%s: write: to=0x%llx len=0x%x\n", __func__, to, len);
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = len;
+ ops.datbuf = (uint8_t *) buf;
+ ops.oobbuf = NULL;
+ ret = mtd->write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static int
+do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint8_t *datbuf = ops->datbuf;
+ uint8_t *oobbuf = ops->oobbuf;
+ uint32_t len = datbuf ? ops->len : 0;
+ uint32_t ooblen = oobbuf ? ops->ooblen : 0;
+ uint32_t oobsz;
+ uint32_t page_count;
+ int err;
+ int do_ecc = 1;
+ dma_addr_t datbuf_dma_addr = 0;
+
+#if 0
+ dump_mtd_oob_ops(ops);
+#endif
+
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ if (!ops->len)
+ return 0;
+
+ oobsz = mtd->oobavail;
+
+ if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
+ pr_err("%s: can't write OOB to multiple pages (%d > %d)\n",
+ __func__, ops->ooblen, oobsz);
+ return -EINVAL;
+ } else if (ops->oobbuf && !len) {
+ page_count = 1;
+ } else
+ page_count =
+ max((uint32_t) (ops->len / mtd->writesize), (uint32_t) 1);
+
+ mutex_lock(&info->lock);
+
+ split_addr(info, to, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ while (page_count--) {
+ int a_len = min(mtd->writesize, len);
+ int b_len = min(oobsz, ooblen);
+ int temp_len = 0;
+ char *temp_buf = NULL;
+ /* Take care when write is of less than page size. Otherwise
+ * there will be kernel panic due to dma timeout */
+ if ((a_len < mtd->writesize) && len) {
+ temp_len = a_len;
+ a_len = mtd->writesize;
+ temp_buf = datbuf;
+ datbuf = info->partial_unaligned_rw_buffer;
+ memset(datbuf, 0xff, a_len);
+ memcpy(datbuf, temp_buf, temp_len);
+ }
+
+ if (datbuf)
+ datbuf_dma_addr =
+ tegra_nand_dma_map(info->dev, datbuf, a_len,
+ DMA_TO_DEVICE);
+ if (oobbuf)
+ memcpy(info->oob_dma_buf, oobbuf, b_len);
+
+ clear_regs(info);
+ prep_transfer_dma(info, 0, do_ecc, page, column,
+ datbuf_dma_addr, a_len, info->oob_dma_addr,
+ b_len);
+
+ writel(info->config_reg, CONFIG_REG);
+ writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
+
+ INIT_COMPLETION(info->dma_complete);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ goto out_err;
+
+ if (!wait_for_completion_timeout(&info->dma_complete, TIMEOUT)) {
+ pr_err("%s: dma completion timeout\n", __func__);
+ dump_nand_regs();
+ goto out_err;
+ }
+ if (temp_len) {
+ a_len = temp_len;
+ datbuf = temp_buf;
+ }
+
+ if (datbuf) {
+ dma_unmap_page(info->dev, datbuf_dma_addr, a_len,
+ DMA_TO_DEVICE);
+ len -= a_len;
+ datbuf += a_len;
+ ops->retlen += a_len;
+ }
+ if (oobbuf) {
+ ooblen -= b_len;
+ oobbuf += b_len;
+ ops->oobretlen += b_len;
+ }
+
+ if (!page_count)
+ break;
+
+ to += mtd->writesize;
+ column = 0;
+
+ split_addr(info, to, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ }
+
+ mutex_unlock(&info->lock);
+ return err;
+
+out_err:
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ mutex_unlock(&info->lock);
+ return err;
+}
+
+static int
+tegra_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+
+ if (unlikely(to & info->chip.column_mask)) {
+ pr_err("%s: Unaligned write (to 0x%llx) not supported\n",
+ __func__, to);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->oobbuf && !ops->ooblen)) {
+ pr_err("%s: Writing 0 bytes to OOB is meaningless\n", __func__);
+ return -EINVAL;
+ }
+
+ return do_write_oob(mtd, to, ops);
+}
+
+static int tegra_nand_suspend(struct mtd_info *mtd)
+{
+ return 0;
+}
+
+static void tegra_nand_resume(struct mtd_info *mtd)
+{
+}
+
+static int scan_bad_blocks(struct tegra_nand_info *info)
+{
+ struct mtd_info *mtd = &info->mtd;
+ int num_blocks = mtd->size >> info->chip.block_shift;
+ uint32_t block;
+ int is_bad = 0;
+ info->num_bad_blocks = 0;
+
+ for (block = 0; block < num_blocks; ++block) {
+ /* make sure the bit is cleared, meaning it's bad/unknown before
+ * we check. */
+ clear_bit(block, info->bb_bitmap);
+ is_bad = mtd->block_isbad(mtd, block << info->chip.block_shift);
+
+ if (is_bad == 0)
+ set_bit(block, info->bb_bitmap);
+ else if (is_bad > 0) {
+ info->num_bad_blocks++;
+ pr_debug("block 0x%08x is bad.\n", block);
+ } else {
+ pr_err("Fatal error (%d) while scanning for "
+ "bad blocks\n", is_bad);
+ return is_bad;
+ }
+ }
+ return 0;
+}
+
+static void
+set_chip_timing(struct tegra_nand_info *info, uint32_t vendor_id,
+ uint32_t dev_id, uint32_t fourth_id_field)
+{
+ struct tegra_nand_chip_parms *chip_parms = NULL;
+ uint32_t tmp;
+ int i = 0;
+ unsigned long nand_clk_freq_khz = clk_get_rate(info->clk) / 1000;
+ for (i = 0; i < info->plat->nr_chip_parms; i++)
+ if (info->plat->chip_parms[i].vendor_id == vendor_id &&
+ info->plat->chip_parms[i].device_id == dev_id &&
+ info->plat->chip_parms[i].read_id_fourth_byte ==
+ fourth_id_field)
+ chip_parms = &info->plat->chip_parms[i];
+
+ if (!chip_parms) {
+ pr_warn("WARNING:tegra_nand: timing for vendor-id: "
+ "%x device-id: %x fourth-id-field: %x not found. Using Bootloader timing",
+ vendor_id, dev_id, fourth_id_field);
+ return;
+ }
+ /* TODO: Handle the change of frequency if DVFS is enabled */
+#define CNT(t) (((((t) * nand_clk_freq_khz) + 1000000 - 1) / 1000000) - 1)
+ tmp = (TIMING_TRP_RESP(CNT(chip_parms->timing.trp_resp)) |
+ TIMING_TWB(CNT(chip_parms->timing.twb)) |
+ TIMING_TCR_TAR_TRR(CNT(chip_parms->timing.tcr_tar_trr)) |
+ TIMING_TWHR(CNT(chip_parms->timing.twhr)) |
+ TIMING_TCS(CNT(chip_parms->timing.tcs)) |
+ TIMING_TWH(CNT(chip_parms->timing.twh)) |
+ TIMING_TWP(CNT(chip_parms->timing.twp)) |
+ TIMING_TRH(CNT(chip_parms->timing.trh)) |
+ TIMING_TRP(CNT(chip_parms->timing.trp)));
+ writel(tmp, TIMING_REG);
+ writel(TIMING2_TADL(CNT(chip_parms->timing.tadl)), TIMING2_REG);
+#undef CNT
+}
+
+/* Scans for nand flash devices, identifies them, and fills in the
+ * device info. */
+static int tegra_nand_scan(struct mtd_info *mtd, int maxchips)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ struct nand_flash_dev *dev_info;
+ struct nand_manufacturers *vendor_info;
+ uint32_t tmp;
+ uint32_t dev_id;
+ uint32_t vendor_id;
+ uint32_t dev_parms;
+ uint32_t mlc_parms;
+ int cnt;
+ int err = 0;
+
+ writel(SCAN_TIMING_VAL, TIMING_REG);
+ writel(SCAN_TIMING2_VAL, TIMING2_REG);
+ writel(0, CONFIG_REG);
+
+ select_chip(info, 0);
+ err = tegra_nand_cmd_readid(info, &tmp);
+ if (err != 0)
+ goto out_error;
+
+ vendor_id = tmp & 0xff;
+ dev_id = (tmp >> 8) & 0xff;
+ mlc_parms = (tmp >> 16) & 0xff;
+ dev_parms = (tmp >> 24) & 0xff;
+
+ dev_info = find_nand_flash_device(dev_id);
+ if (dev_info == NULL) {
+ pr_err("%s: unknown flash device id (0x%02x) found.\n",
+ __func__, dev_id);
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ vendor_info = find_nand_flash_vendor(vendor_id);
+ if (vendor_info == NULL) {
+ pr_err("%s: unknown flash vendor id (0x%02x) found.\n",
+ __func__, vendor_id);
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ /* loop through and see if we can find more devices */
+ for (cnt = 1; cnt < info->plat->max_chips; ++cnt) {
+ select_chip(info, cnt);
+ /* TODO: figure out what to do about errors here */
+ err = tegra_nand_cmd_readid(info, &tmp);
+ if (err != 0)
+ goto out_error;
+ if ((dev_id != ((tmp >> 8) & 0xff)) ||
+ (vendor_id != (tmp & 0xff)))
+ break;
+ }
+
+ pr_info("%s: %d NAND chip(s) found (vend=0x%02x, dev=0x%02x) (%s %s)\n",
+ DRIVER_NAME, cnt, vendor_id, dev_id, vendor_info->name,
+ dev_info->name);
+ info->vendor_id = vendor_id;
+ info->device_id = dev_id;
+ info->chip.num_chips = cnt;
+ info->chip.chipsize = dev_info->chipsize << 20;
+ mtd->size = info->chip.num_chips * info->chip.chipsize;
+
+ /* format of 4th id byte returned by READ ID
+ * bit 7 = rsvd
+ * bit 6 = bus width. 1 == 16bit, 0 == 8bit
+ * bits 5:4 = data block size. 64kb * (2^val)
+ * bit 3 = rsvd
+ * bit 2 = spare area size / 512 bytes. 0 == 8bytes, 1 == 16bytes
+ * bits 1:0 = page size. 1kb * (2^val)
+ */
+
+ /* page_size */
+ tmp = dev_parms & 0x3;
+ mtd->writesize = 1024 << tmp;
+ info->chip.column_mask = mtd->writesize - 1;
+
+ if (mtd->writesize > 4096) {
+ pr_err("%s: Large page devices with pagesize > 4kb are NOT "
+ "supported\n", __func__);
+ goto out_error;
+ } else if (mtd->writesize < 2048) {
+ pr_err("%s: Small page devices are NOT supported\n", __func__);
+ goto out_error;
+ }
+
+ /* spare area, must be at least 64 bytes */
+ tmp = (dev_parms >> 2) & 0x1;
+ tmp = (8 << tmp) * (mtd->writesize / 512);
+ if (tmp < 64) {
+ pr_err("%s: Spare area (%d bytes) too small\n", __func__, tmp);
+ goto out_error;
+ }
+ mtd->oobsize = tmp;
+
+ /* data block size (erase size) (w/o spare) */
+ tmp = (dev_parms >> 4) & 0x3;
+ mtd->erasesize = (64 * 1024) << tmp;
+ info->chip.block_shift = ffs(mtd->erasesize) - 1;
+ /* bus width of the nand chip 8/16 */
+ tmp = (dev_parms >> 6) & 0x1;
+ info->is_data_bus_width_16 = tmp;
+ /* used to select the appropriate chip/page in case multiple devices
+ * are connected */
+ info->chip.chip_shift = ffs(info->chip.chipsize) - 1;
+ info->chip.page_shift = ffs(mtd->writesize) - 1;
+ info->chip.page_mask =
+ (info->chip.chipsize >> info->chip.page_shift) - 1;
+
+ /* now fill in the rest of the mtd fields */
+ if (mtd->oobsize == 64)
+ mtd->ecclayout = &tegra_nand_oob_64;
+ else
+ mtd->ecclayout = &tegra_nand_oob_128;
+
+ mtd->oobavail = mtd->ecclayout->oobavail;
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+
+ mtd->erase = tegra_nand_erase;
+ mtd->lock = NULL;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = tegra_nand_read;
+ mtd->write = tegra_nand_write;
+ mtd->read_oob = tegra_nand_read_oob;
+ mtd->write_oob = tegra_nand_write_oob;
+
+ mtd->resume = tegra_nand_resume;
+ mtd->suspend = tegra_nand_suspend;
+ mtd->block_isbad = tegra_nand_block_isbad;
+ mtd->block_markbad = tegra_nand_block_markbad;
+
+ set_chip_timing(info, vendor_id, dev_id, dev_parms);
+
+ return 0;
+
+out_error:
+ pr_err("%s: NAND device scan aborted due to error(s).\n", __func__);
+ return err;
+}
+
+static int __devinit tegra_nand_probe(struct platform_device *pdev)
+{
+ struct tegra_nand_platform *plat = pdev->dev.platform_data;
+ struct tegra_nand_info *info = NULL;
+ struct tegra_nand_chip *chip = NULL;
+ struct mtd_info *mtd = NULL;
+ int err = 0;
+ uint64_t num_erase_blocks;
+
+ pr_debug("%s: probing (%p)\n", __func__, pdev);
+
+ if (!plat) {
+ pr_err("%s: no platform device info\n", __func__);
+ return -EINVAL;
+ } else if (!plat->chip_parms) {
+ pr_err("%s: no platform nand parms\n", __func__);
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(struct tegra_nand_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("%s: no memory for flash info\n", __func__);
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+ info->plat = plat;
+
+ platform_set_drvdata(pdev, info);
+
+ init_completion(&info->cmd_complete);
+ init_completion(&info->dma_complete);
+
+ mutex_init(&info->lock);
+ spin_lock_init(&info->ecc_lock);
+
+ chip = &info->chip;
+ chip->priv = &info->mtd;
+ chip->curr_chip = -1;
+
+ mtd = &info->mtd;
+ mtd->name = dev_name(&pdev->dev);
+ mtd->priv = &info->chip;
+ mtd->owner = THIS_MODULE;
+
+ /* HACK: allocate a dma buffer to hold 1 page oob data */
+ info->oob_dma_buf = dma_alloc_coherent(NULL, 128,
+ &info->oob_dma_addr, GFP_KERNEL);
+ if (!info->oob_dma_buf) {
+ err = -ENOMEM;
+ goto out_free_info;
+ }
+
+ /* this will store the ecc error vector info */
+ info->ecc_buf = dma_alloc_coherent(NULL, ECC_BUF_SZ, &info->ecc_addr,
+ GFP_KERNEL);
+ if (!info->ecc_buf) {
+ err = -ENOMEM;
+ goto out_free_dma_buf;
+ }
+
+ /* grab the irq */
+ if (!(pdev->resource[0].flags & IORESOURCE_IRQ)) {
+ pr_err("NAND IRQ resource not defined\n");
+ err = -EINVAL;
+ goto out_free_ecc_buf;
+ }
+
+ err = request_irq(pdev->resource[0].start, tegra_nand_irq,
+ IRQF_SHARED, DRIVER_NAME, info);
+ if (err) {
+ pr_err("Unable to request IRQ %d (%d)\n",
+ pdev->resource[0].start, err);
+ goto out_free_ecc_buf;
+ }
+
+ /* TODO: configure pinmux here?? */
+ info->clk = clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(info->clk)) {
+ err = PTR_ERR(info->clk);
+ goto out_free_ecc_buf;
+ }
+ err = clk_enable(info->clk);
+ if (err != 0)
+ goto out_free_ecc_buf;
+
+ if (plat->wp_gpio) {
+ gpio_request(plat->wp_gpio, "nand_wp");
+ tegra_gpio_enable(plat->wp_gpio);
+ gpio_direction_output(plat->wp_gpio, 1);
+ }
+
+ cfg_hwstatus_mon(info);
+
+ /* clear all pending interrupts */
+ writel(readl(ISR_REG), ISR_REG);
+
+ /* clear dma interrupt */
+ writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG);
+
+ /* enable interrupts */
+ disable_ints(info, 0xffffffff);
+ enable_ints(info,
+ IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE |
+ IER_ECC_ERR | IER_GIE);
+
+ if (tegra_nand_scan(mtd, plat->max_chips)) {
+ err = -ENXIO;
+ goto out_dis_irq;
+ }
+ pr_info("%s: NVIDIA Tegra NAND controller @ base=0x%08x irq=%d.\n",
+ DRIVER_NAME, TEGRA_NAND_PHYS, pdev->resource[0].start);
+
+ /* allocate memory to hold the ecc error info */
+ info->max_ecc_errs = MAX_DMA_SZ / mtd->writesize;
+ info->ecc_errs = kmalloc(info->max_ecc_errs * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!info->ecc_errs) {
+ err = -ENOMEM;
+ goto out_dis_irq;
+ }
+
+ /* alloc the bad block bitmap */
+ num_erase_blocks = mtd->size;
+ do_div(num_erase_blocks, mtd->erasesize);
+ info->bb_bitmap = kzalloc(BITS_TO_LONGS(num_erase_blocks) *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!info->bb_bitmap) {
+ err = -ENOMEM;
+ goto out_free_ecc;
+ }
+
+ err = scan_bad_blocks(info);
+ if (err != 0)
+ goto out_free_bbbmap;
+
+#if 0
+ dump_nand_regs();
+#endif
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(mtd, part_probes, &info->parts, 0);
+ if (err > 0) {
+ err = mtd_device_register(mtd, info->parts, err);
+ } else if (err <= 0 && plat->parts) {
+ err = mtd_device_register(mtd, plat->parts, plat->nr_parts);
+ } else
+#endif
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err != 0)
+ goto out_free_bbbmap;
+
+ dev_set_drvdata(&pdev->dev, info);
+
+ info->partial_unaligned_rw_buffer = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!info->partial_unaligned_rw_buffer) {
+ err = -ENOMEM;
+ goto out_free_bbbmap;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_device_id);
+ if (err != 0)
+ goto out_free_rw_buffer;
+
+ err = device_create_file(&pdev->dev, &dev_attr_vendor_id);
+ if (err != 0)
+ goto err_nand_sysfs_vendorid_failed;
+
+ err = device_create_file(&pdev->dev, &dev_attr_flash_size);
+ if (err != 0)
+ goto err_nand_sysfs_flash_size_failed;
+
+ err = device_create_file(&pdev->dev, &dev_attr_num_bad_blocks);
+ if (err != 0)
+ goto err_nand_sysfs_num_bad_blocks_failed;
+
+ err = device_create_file(&pdev->dev, &dev_attr_bb_bitmap);
+ if (err != 0)
+ goto err_nand_sysfs_bb_bitmap_failed;
+
+ pr_debug("%s: probe done.\n", __func__);
+ return 0;
+
+err_nand_sysfs_bb_bitmap_failed:
+ device_remove_file(&pdev->dev, &dev_attr_num_bad_blocks);
+
+err_nand_sysfs_num_bad_blocks_failed:
+ device_remove_file(&pdev->dev, &dev_attr_flash_size);
+
+err_nand_sysfs_flash_size_failed:
+ device_remove_file(&pdev->dev, &dev_attr_vendor_id);
+
+err_nand_sysfs_vendorid_failed:
+ device_remove_file(&pdev->dev, &dev_attr_device_id);
+
+out_free_rw_buffer:
+ kfree(info->partial_unaligned_rw_buffer);
+
+out_free_bbbmap:
+ kfree(info->bb_bitmap);
+
+out_free_ecc:
+ kfree(info->ecc_errs);
+
+out_dis_irq:
+ disable_ints(info, 0xffffffff);
+ free_irq(pdev->resource[0].start, info);
+
+out_free_ecc_buf:
+ dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
+
+out_free_dma_buf:
+ dma_free_coherent(NULL, 128, info->oob_dma_buf, info->oob_dma_addr);
+
+out_free_info:
+ platform_set_drvdata(pdev, NULL);
+ kfree(info);
+
+ return err;
+}
+
+static int __devexit tegra_nand_remove(struct platform_device *pdev)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (info) {
+ free_irq(pdev->resource[0].start, info);
+ kfree(info->bb_bitmap);
+ kfree(info->ecc_errs);
+ kfree(info->partial_unaligned_rw_buffer);
+
+ device_remove_file(&pdev->dev, &dev_attr_device_id);
+ device_remove_file(&pdev->dev, &dev_attr_vendor_id);
+ device_remove_file(&pdev->dev, &dev_attr_flash_size);
+ device_remove_file(&pdev->dev, &dev_attr_num_bad_blocks);
+ device_remove_file(&pdev->dev, &dev_attr_bb_bitmap);
+
+ dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf,
+ info->ecc_addr);
+ dma_free_coherent(NULL, info->mtd.writesize + info->mtd.oobsize,
+ info->oob_dma_buf, info->oob_dma_addr);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tegra_nand_driver = {
+ .probe = tegra_nand_probe,
+ .remove = __devexit_p(tegra_nand_remove),
+ .suspend = NULL,
+ .resume = NULL,
+ .driver = {
+ .name = "tegra_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_nand_init(void)
+{
+ return platform_driver_register(&tegra_nand_driver);
+}
+
+static void __exit tegra_nand_exit(void)
+{
+ platform_driver_unregister(&tegra_nand_driver);
+}
+
+module_init(tegra_nand_init);
+module_exit(tegra_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/mtd/devices/tegra_nand.h b/drivers/mtd/devices/tegra_nand.h
new file mode 100644
index 000000000000..339d6cc7330c
--- /dev/null
+++ b/drivers/mtd/devices/tegra_nand.h
@@ -0,0 +1,148 @@
+/*
+ * drivers/mtd/devices/tegra_nand.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTD_DEV_TEGRA_NAND_H
+#define __MTD_DEV_TEGRA_NAND_H
+
+#include <mach/io.h>
+
+#define __BITMASK0(len) ((1 << (len)) - 1)
+#define __BITMASK(start, len) (__BITMASK0(len) << (start))
+#define REG_BIT(bit) (1 << (bit))
+#define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start))
+#define REG_FIELD_MASK(start, len) (~(__BITMASK((start), (len))))
+#define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len))
+
+/* tegra nand registers... */
+#define TEGRA_NAND_PHYS 0x70008000
+#define TEGRA_NAND_BASE IO_TO_VIRT(TEGRA_NAND_PHYS)
+#define COMMAND_REG (TEGRA_NAND_BASE + 0x00)
+#define STATUS_REG (TEGRA_NAND_BASE + 0x04)
+#define ISR_REG (TEGRA_NAND_BASE + 0x08)
+#define IER_REG (TEGRA_NAND_BASE + 0x0c)
+#define CONFIG_REG (TEGRA_NAND_BASE + 0x10)
+#define TIMING_REG (TEGRA_NAND_BASE + 0x14)
+#define RESP_REG (TEGRA_NAND_BASE + 0x18)
+#define TIMING2_REG (TEGRA_NAND_BASE + 0x1c)
+#define CMD_REG1 (TEGRA_NAND_BASE + 0x20)
+#define CMD_REG2 (TEGRA_NAND_BASE + 0x24)
+#define ADDR_REG1 (TEGRA_NAND_BASE + 0x28)
+#define ADDR_REG2 (TEGRA_NAND_BASE + 0x2c)
+#define DMA_MST_CTRL_REG (TEGRA_NAND_BASE + 0x30)
+#define DMA_CFG_A_REG (TEGRA_NAND_BASE + 0x34)
+#define DMA_CFG_B_REG (TEGRA_NAND_BASE + 0x38)
+#define FIFO_CTRL_REG (TEGRA_NAND_BASE + 0x3c)
+#define DATA_BLOCK_PTR_REG (TEGRA_NAND_BASE + 0x40)
+#define TAG_PTR_REG (TEGRA_NAND_BASE + 0x44)
+#define ECC_PTR_REG (TEGRA_NAND_BASE + 0x48)
+#define DEC_STATUS_REG (TEGRA_NAND_BASE + 0x4c)
+#define HWSTATUS_CMD_REG (TEGRA_NAND_BASE + 0x50)
+#define HWSTATUS_MASK_REG (TEGRA_NAND_BASE + 0x54)
+#define LL_CONFIG_REG (TEGRA_NAND_BASE + 0x58)
+#define LL_PTR_REG (TEGRA_NAND_BASE + 0x5c)
+#define LL_STATUS_REG (TEGRA_NAND_BASE + 0x60)
+
+/* nand_command bits */
+#define COMMAND_GO REG_BIT(31)
+#define COMMAND_CLE REG_BIT(30)
+#define COMMAND_ALE REG_BIT(29)
+#define COMMAND_PIO REG_BIT(28)
+#define COMMAND_TX REG_BIT(27)
+#define COMMAND_RX REG_BIT(26)
+#define COMMAND_SEC_CMD REG_BIT(25)
+#define COMMAND_AFT_DAT REG_BIT(24)
+#define COMMAND_TRANS_SIZE(val) REG_FIELD((val), 20, 4)
+#define COMMAND_A_VALID REG_BIT(19)
+#define COMMAND_B_VALID REG_BIT(18)
+#define COMMAND_RD_STATUS_CHK REG_BIT(17)
+#define COMMAND_RBSY_CHK REG_BIT(16)
+#define COMMAND_CE(val) REG_BIT(8 + ((val) & 0x7))
+#define COMMAND_CLE_BYTE_SIZE(val) REG_FIELD((val), 4, 2)
+#define COMMAND_ALE_BYTE_SIZE(val) REG_FIELD((val), 0, 4)
+
+/* nand isr bits */
+#define ISR_UND REG_BIT(7)
+#define ISR_OVR REG_BIT(6)
+#define ISR_CMD_DONE REG_BIT(5)
+#define ISR_ECC_ERR REG_BIT(4)
+
+/* nand ier bits */
+#define IER_ERR_TRIG_VAL(val) REG_FIELD((val), 16, 4)
+#define IER_UND REG_BIT(7)
+#define IER_OVR REG_BIT(6)
+#define IER_CMD_DONE REG_BIT(5)
+#define IER_ECC_ERR REG_BIT(4)
+#define IER_GIE REG_BIT(0)
+
+/* nand config bits */
+#define CONFIG_HW_ECC REG_BIT(31)
+#define CONFIG_ECC_SEL REG_BIT(30)
+#define CONFIG_HW_ERR_CORRECTION REG_BIT(29)
+#define CONFIG_PIPELINE_EN REG_BIT(28)
+#define CONFIG_ECC_EN_TAG REG_BIT(27)
+#define CONFIG_TVALUE(val) REG_FIELD((val), 24, 2)
+#define CONFIG_SKIP_SPARE REG_BIT(23)
+#define CONFIG_COM_BSY REG_BIT(22)
+#define CONFIG_BUS_WIDTH REG_BIT(21)
+#define CONFIG_EDO_MODE REG_BIT(19)
+#define CONFIG_PAGE_SIZE_SEL(val) REG_FIELD((val), 16, 3)
+#define CONFIG_SKIP_SPARE_SEL(val) REG_FIELD((val), 14, 2)
+#define CONFIG_TAG_BYTE_SIZE(val) REG_FIELD((val), 0, 8)
+
+/* nand timing bits */
+#define TIMING_TRP_RESP(val) REG_FIELD((val), 28, 4)
+#define TIMING_TWB(val) REG_FIELD((val), 24, 4)
+#define TIMING_TCR_TAR_TRR(val) REG_FIELD((val), 20, 4)
+#define TIMING_TWHR(val) REG_FIELD((val), 16, 4)
+#define TIMING_TCS(val) REG_FIELD((val), 14, 2)
+#define TIMING_TWH(val) REG_FIELD((val), 12, 2)
+#define TIMING_TWP(val) REG_FIELD((val), 8, 4)
+#define TIMING_TRH(val) REG_FIELD((val), 4, 2)
+#define TIMING_TRP(val) REG_FIELD((val), 0, 4)
+
+/* nand timing2 bits */
+#define TIMING2_TADL(val) REG_FIELD((val), 0, 4)
+
+/* nand dma_mst_ctrl bits */
+#define DMA_CTRL_DMA_GO REG_BIT(31)
+#define DMA_CTRL_DIR REG_BIT(30)
+#define DMA_CTRL_DMA_PERF_EN REG_BIT(29)
+#define DMA_CTRL_IE_DMA_DONE REG_BIT(28)
+#define DMA_CTRL_REUSE_BUFFER REG_BIT(27)
+#define DMA_CTRL_BURST_SIZE(val) REG_FIELD((val), 24, 3)
+#define DMA_CTRL_IS_DMA_DONE REG_BIT(20)
+#define DMA_CTRL_DMA_EN_A REG_BIT(2)
+#define DMA_CTRL_DMA_EN_B REG_BIT(1)
+
+/* nand dma_cfg_a/cfg_b bits */
+#define DMA_CFG_BLOCK_SIZE(val) REG_FIELD((val), 0, 16)
+
+/* nand dec_status bits */
+#define DEC_STATUS_ERR_PAGE_NUM(val) REG_GET_FIELD((val), 24, 8)
+#define DEC_STATUS_ERR_CNT(val) REG_GET_FIELD((val), 16, 8)
+#define DEC_STATUS_ECC_FAIL_A REG_BIT(1)
+#define DEC_STATUS_ECC_FAIL_B REG_BIT(0)
+
+/* nand hwstatus_mask bits */
+#define HWSTATUS_RDSTATUS_MASK(val) REG_FIELD((val), 24, 8)
+#define HWSTATUS_RDSTATUS_EXP_VAL(val) REG_FIELD((val), 16, 8)
+#define HWSTATUS_RBSY_MASK(val) REG_FIELD((val), 8, 8)
+#define HWSTATUS_RBSY_EXP_VAL(val) REG_FIELD((val), 0, 8)
+
+#endif
+
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index c0c328c5b133..299e67c039ff 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -561,4 +561,11 @@ config MTD_LATCH_ADDR
If compiled as a module, it will be called latch-addr-flash.
+config MTD_NOR_TEGRA
+ bool "NOR Flash mapping driver for NVIDIA Tegra based boards"
+ depends on MTD_COMPLEX_MAPPINGS && ARCH_TEGRA
+ help
+ This enables access routines for the flash chips on the NVIDIA Tegra
+ based boards.
+
endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index cb48b11affff..bb5eef14a367 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -60,3 +60,4 @@ obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
+obj-$(CONFIG_MTD_NOR_TEGRA) += tegra_nor.o
diff --git a/drivers/mtd/maps/tegra_nor.c b/drivers/mtd/maps/tegra_nor.c
new file mode 100644
index 000000000000..34238156ded1
--- /dev/null
+++ b/drivers/mtd/maps/tegra_nor.c
@@ -0,0 +1,480 @@
+/*
+ * drivers/mtd/maps/tegra_nor.c
+ *
+ * MTD mapping driver for the internal SNOR controller in Tegra SoCs
+ *
+ * Copyright (C) 2009 - 2011 NVIDIA Corporation
+ *
+ * Author:
+ * Raghavendra VK <rvk@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_data/tegra_nor.h>
+#include <asm/cacheflush.h>
+
+#define __BITMASK0(len) (BIT(len) - 1)
+#define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start))
+#define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len))
+
+/* tegra gmi registers... */
+#define TEGRA_SNOR_CONFIG_REG 0x00
+#define TEGRA_SNOR_NOR_ADDR_PTR_REG 0x08
+#define TEGRA_SNOR_AHB_ADDR_PTR_REG 0x0C
+#define TEGRA_SNOR_TIMING0_REG 0x10
+#define TEGRA_SNOR_TIMING1_REG 0x14
+#define TEGRA_SNOR_DMA_CFG_REG 0x20
+
+/* config register */
+#define TEGRA_SNOR_CONFIG_GO BIT(31)
+#define TEGRA_SNOR_CONFIG_WORDWIDE BIT(30)
+#define TEGRA_SNOR_CONFIG_DEVICE_TYPE BIT(29)
+#define TEGRA_SNOR_CONFIG_MUX_MODE BIT(28)
+#define TEGRA_SNOR_CONFIG_BURST_LEN(val) REG_FIELD((val), 26, 2)
+#define TEGRA_SNOR_CONFIG_RDY_ACTIVE BIT(24)
+#define TEGRA_SNOR_CONFIG_RDY_POLARITY BIT(23)
+#define TEGRA_SNOR_CONFIG_ADV_POLARITY BIT(22)
+#define TEGRA_SNOR_CONFIG_OE_WE_POLARITY BIT(21)
+#define TEGRA_SNOR_CONFIG_CS_POLARITY BIT(20)
+#define TEGRA_SNOR_CONFIG_NOR_DPD BIT(19)
+#define TEGRA_SNOR_CONFIG_WP BIT(15)
+#define TEGRA_SNOR_CONFIG_PAGE_SZ(val) REG_FIELD((val), 8, 2)
+#define TEGRA_SNOR_CONFIG_MST_ENB BIT(7)
+#define TEGRA_SNOR_CONFIG_SNOR_CS(val) REG_FIELD((val), 4, 2)
+#define TEGRA_SNOR_CONFIG_CE_LAST REG_FIELD(3)
+#define TEGRA_SNOR_CONFIG_CE_FIRST REG_FIELD(2)
+#define TEGRA_SNOR_CONFIG_DEVICE_MODE(val) REG_FIELD((val), 0, 2)
+
+/* dma config register */
+#define TEGRA_SNOR_DMA_CFG_GO BIT(31)
+#define TEGRA_SNOR_DMA_CFG_BSY BIT(30)
+#define TEGRA_SNOR_DMA_CFG_DIR BIT(29)
+#define TEGRA_SNOR_DMA_CFG_INT_ENB BIT(28)
+#define TEGRA_SNOR_DMA_CFG_INT_STA BIT(27)
+#define TEGRA_SNOR_DMA_CFG_BRST_SZ(val) REG_FIELD((val), 24, 3)
+#define TEGRA_SNOR_DMA_CFG_WRD_CNT(val) REG_FIELD((val), 2, 14)
+
+/* timing 0 register */
+#define TEGRA_SNOR_TIMING0_PG_RDY(val) REG_FIELD((val), 28, 4)
+#define TEGRA_SNOR_TIMING0_PG_SEQ(val) REG_FIELD((val), 20, 4)
+#define TEGRA_SNOR_TIMING0_MUX(val) REG_FIELD((val), 12, 4)
+#define TEGRA_SNOR_TIMING0_HOLD(val) REG_FIELD((val), 8, 4)
+#define TEGRA_SNOR_TIMING0_ADV(val) REG_FIELD((val), 4, 4)
+#define TEGRA_SNOR_TIMING0_CE(val) REG_FIELD((val), 0, 4)
+
+/* timing 1 register */
+#define TEGRA_SNOR_TIMING1_WE(val) REG_FIELD((val), 16, 8)
+#define TEGRA_SNOR_TIMING1_OE(val) REG_FIELD((val), 8, 8)
+#define TEGRA_SNOR_TIMING1_WAIT(val) REG_FIELD((val), 0, 8)
+
+/* SNOR DMA supports 2^14 AHB (32-bit words)
+ * Maximum data in one transfer = 2^16 bytes
+ */
+#define TEGRA_SNOR_DMA_LIMIT 0x10000
+#define TEGRA_SNOR_DMA_LIMIT_WORDS (TEGRA_SNOR_DMA_LIMIT >> 2)
+
+/* Even if BW is 1 MB/s, maximum time to
+ * transfer SNOR_DMA_LIMIT bytes is 66 ms
+ */
+#define TEGRA_SNOR_DMA_TIMEOUT_MS 67
+
+struct tegra_nor_info {
+ struct tegra_nor_platform_data *plat;
+ struct device *dev;
+ struct clk *clk;
+ struct mtd_partition *parts;
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct completion dma_complete;
+ void __iomem *base;
+ u32 init_config;
+ u32 timing0_default, timing1_default;
+ u32 timing0_read, timing1_read;
+};
+
+static inline unsigned long snor_tegra_readl(struct tegra_nor_info *tnor,
+ unsigned long reg)
+{
+ return readl(tnor->base + reg);
+}
+
+static inline void snor_tegra_writel(struct tegra_nor_info *tnor,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, tnor->base + reg);
+}
+
+#define DRV_NAME "tegra-nor"
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char * const part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+static int wait_for_dma_completion(struct tegra_nor_info *info)
+{
+ unsigned long dma_timeout;
+ int ret;
+
+ dma_timeout = msecs_to_jiffies(TEGRA_SNOR_DMA_TIMEOUT_MS);
+ ret = wait_for_completion_timeout(&info->dma_complete, dma_timeout);
+ return ret ? 0 : -ETIMEDOUT;
+}
+
+static void tegra_flash_dma(struct map_info *map,
+ void *to, unsigned long from, ssize_t len)
+{
+ u32 snor_config, dma_config = 0;
+ int dma_transfer_count = 0, word32_count = 0;
+ u32 nor_address, ahb_address, current_transfer;
+ struct tegra_nor_info *c =
+ container_of(map, struct tegra_nor_info, map);
+ unsigned int bytes_remaining = len;
+
+ snor_config = c->init_config;
+ snor_tegra_writel(c, c->timing0_read, TEGRA_SNOR_TIMING0_REG);
+ snor_tegra_writel(c, c->timing1_read, TEGRA_SNOR_TIMING1_REG);
+
+ if (len > 32) {
+
+ if (to >= high_memory)
+ goto out_copy;
+
+ ahb_address = dma_map_single(c->dev, to, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(c->dev, ahb_address)) {
+ dev_err(c->dev,
+ "Couldn't DMA map a %d byte buffer\n", len);
+ goto out_copy;
+ }
+ word32_count = len >> 2;
+ bytes_remaining = len & 0x00000003;
+ /*
+ * The parameters can be setup in any order since we write to
+ * controller register only after all parameters are set.
+ */
+ /* SNOR CONFIGURATION SETUP */
+ snor_config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(1);
+ /* 8 word page */
+ snor_config |= TEGRA_SNOR_CONFIG_PAGE_SZ(2);
+ snor_config |= TEGRA_SNOR_CONFIG_MST_ENB;
+ /* SNOR DMA CONFIGURATION SETUP */
+ /* NOR -> AHB */
+ dma_config &= ~TEGRA_SNOR_DMA_CFG_DIR;
+ /* One word burst */
+ dma_config |= TEGRA_SNOR_DMA_CFG_BRST_SZ(4);
+
+ for (nor_address = (unsigned int)(map->phys + from);
+ word32_count > 0;
+ word32_count -= current_transfer,
+ dma_transfer_count += current_transfer,
+ nor_address += (current_transfer * 4),
+ ahb_address += (current_transfer * 4)) {
+
+ current_transfer =
+ (word32_count > TEGRA_SNOR_DMA_LIMIT_WORDS)
+ ? (TEGRA_SNOR_DMA_LIMIT_WORDS) : word32_count;
+ /* Start NOR operation */
+ snor_config |= TEGRA_SNOR_CONFIG_GO;
+ dma_config |= TEGRA_SNOR_DMA_CFG_GO;
+ /* Enable interrupt before every transaction since the
+ * interrupt handler disables it */
+ dma_config |= TEGRA_SNOR_DMA_CFG_INT_ENB;
+ /* Num of AHB (32-bit) words to transferred minus 1 */
+ dma_config |=
+ TEGRA_SNOR_DMA_CFG_WRD_CNT(current_transfer - 1);
+ snor_tegra_writel(c, ahb_address,
+ TEGRA_SNOR_AHB_ADDR_PTR_REG);
+ snor_tegra_writel(c, nor_address,
+ TEGRA_SNOR_NOR_ADDR_PTR_REG);
+ snor_tegra_writel(c, snor_config,
+ TEGRA_SNOR_CONFIG_REG);
+ snor_tegra_writel(c, dma_config,
+ TEGRA_SNOR_DMA_CFG_REG);
+ if (wait_for_dma_completion(c)) {
+ dev_err(c->dev, "timout waiting for DMA\n");
+ /* Transfer the remaining words by memcpy */
+ bytes_remaining += (word32_count << 2);
+ break;
+ }
+ }
+ dma_unmap_single(c->dev, ahb_address, len, DMA_FROM_DEVICE);
+ }
+ /* Put the controller back into slave mode. */
+ snor_config = snor_tegra_readl(c, TEGRA_SNOR_CONFIG_REG);
+ snor_config &= ~TEGRA_SNOR_CONFIG_MST_ENB;
+ snor_config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(0);
+ snor_tegra_writel(c, snor_config, TEGRA_SNOR_CONFIG_REG);
+out_copy:
+ memcpy_fromio(((char *)to + (dma_transfer_count << 2)),
+ ((char *)(map->virt + from) + (dma_transfer_count << 2)),
+ bytes_remaining);
+
+ snor_tegra_writel(c, c->timing0_default, TEGRA_SNOR_TIMING0_REG);
+ snor_tegra_writel(c, c->timing1_default, TEGRA_SNOR_TIMING1_REG);
+}
+
+static irqreturn_t tegra_nor_isr(int flag, void *dev_id)
+{
+ struct tegra_nor_info *info = (struct tegra_nor_info *)dev_id;
+ u32 dma_config = snor_tegra_readl(info, TEGRA_SNOR_DMA_CFG_REG);
+ if (dma_config & TEGRA_SNOR_DMA_CFG_INT_STA) {
+ /* Disable interrupts. WAR for BUG:821560 */
+ dma_config &= ~TEGRA_SNOR_DMA_CFG_INT_ENB;
+ snor_tegra_writel(info, dma_config, TEGRA_SNOR_DMA_CFG_REG);
+ complete(&info->dma_complete);
+ } else {
+ pr_err("%s: Spurious interrupt\n", __func__);
+ }
+ return IRQ_HANDLED;
+}
+
+static int tegra_snor_controller_init(struct tegra_nor_info *info)
+{
+ struct tegra_nor_chip_parms *chip_parm = &info->plat->chip_parms;
+ u32 width = info->plat->flash.width;
+ u32 config = 0;
+
+ config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(0);
+ config |= TEGRA_SNOR_CONFIG_SNOR_CS(0);
+ config &= ~TEGRA_SNOR_CONFIG_DEVICE_TYPE; /* Select NOR */
+ config |= TEGRA_SNOR_CONFIG_WP; /* Enable writes */
+ switch (width) {
+ case 2:
+ config &= ~TEGRA_SNOR_CONFIG_WORDWIDE; /* 16 bit */
+ break;
+ case 4:
+ config |= TEGRA_SNOR_CONFIG_WORDWIDE; /* 32 bit */
+ break;
+ default:
+ return -EINVAL;
+ }
+ config |= TEGRA_SNOR_CONFIG_BURST_LEN(0);
+ config &= ~TEGRA_SNOR_CONFIG_MUX_MODE;
+ snor_tegra_writel(info, config, TEGRA_SNOR_CONFIG_REG);
+ info->init_config = config;
+
+ info->timing0_default = chip_parm->timing_default.timing0;
+ info->timing0_read = chip_parm->timing_read.timing0;
+ info->timing1_default = chip_parm->timing_default.timing1;
+ info->timing1_read = chip_parm->timing_read.timing0;
+
+ snor_tegra_writel(info, info->timing1_default, TEGRA_SNOR_TIMING1_REG);
+ snor_tegra_writel(info, info->timing0_default, TEGRA_SNOR_TIMING0_REG);
+ return 0;
+}
+
+static int tegra_nor_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct tegra_nor_platform_data *plat = pdev->dev.platform_data;
+ struct tegra_nor_info *info = NULL;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+
+ if (!plat) {
+ pr_err("%s: no platform device info\n", __func__);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ info = devm_kzalloc(dev, sizeof(struct tegra_nor_info),
+ GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Get NOR flash aperture & map the same */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no mem resource?\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(dev, "NOR region already claimed\n");
+ err = -EBUSY;
+ goto fail;
+ }
+
+ info->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!info->base) {
+ dev_err(dev, "Can't ioremap NOR region\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Get NOR flash aperture & map the same */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "no mem resource?\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "NOR region already claimed\n");
+ err = -EBUSY;
+ goto fail;
+ }
+
+ info->map.virt = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!info->map.virt) {
+ dev_err(dev, "Can't ioremap NOR region\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ info->plat = plat;
+ info->dev = dev;
+ info->map.bankwidth = plat->flash.width;
+ info->map.name = dev_name(dev);
+ info->map.phys = res->start;
+ info->map.size = resource_size(res);
+
+ info->clk = clk_get(dev, NULL);
+ if (IS_ERR(info->clk)) {
+ err = PTR_ERR(info->clk);
+ goto fail;
+ }
+
+ err = clk_enable(info->clk);
+ if (err != 0)
+ goto out_clk_put;
+
+ simple_map_init(&info->map);
+ info->map.copy_from = tegra_flash_dma;
+
+ /* Intialise the SNOR controller before probe */
+ err = tegra_snor_controller_init(info);
+ if (err) {
+ dev_err(dev, "Error initializing controller\n");
+ goto out_clk_disable;
+ }
+
+ init_completion(&info->dma_complete);
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(dev, "no irq resource?\n");
+ err = -ENODEV;
+ goto out_clk_disable;
+ }
+
+ /* Register SNOR DMA completion interrupt */
+ err = devm_request_irq(dev, irq, tegra_nor_isr, IRQF_DISABLED,
+ dev_name(dev), info);
+ if (err) {
+ dev_err(dev, "Failed to request irq %i\n", irq);
+ goto out_clk_disable;
+ }
+
+ info->mtd = do_map_probe(plat->flash.map_name, &info->map);
+ if (!info->mtd) {
+ err = -EIO;
+ goto out_clk_disable;
+ }
+ info->mtd->owner = THIS_MODULE;
+ info->parts = NULL;
+
+ platform_set_drvdata(pdev, info);
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(info->mtd, part_probes, &info->parts, 0);
+ if (err > 0)
+ err = add_mtd_partitions(info->mtd, info->parts, err);
+ else if (err <= 0 && plat->flash.parts)
+ err =
+ add_mtd_partitions(info->mtd, plat->flash.parts,
+ plat->flash.nr_parts);
+ else
+#endif
+ add_mtd_device(info->mtd);
+
+ return 0;
+
+out_clk_disable:
+ clk_disable(info->clk);
+out_clk_put:
+ clk_put(info->clk);
+fail:
+ pr_err("Tegra NOR probe failed\n");
+ return err;
+}
+
+static int tegra_nor_remove(struct platform_device *pdev)
+{
+ struct tegra_nor_info *info = platform_get_drvdata(pdev);
+
+ if (info->parts) {
+ del_mtd_partitions(info->mtd);
+ kfree(info->parts);
+ } else
+ del_mtd_device(info->mtd);
+ map_destroy(info->mtd);
+ clk_disable(info->clk);
+ clk_put(info->clk);
+
+ return 0;
+}
+
+static struct platform_driver __refdata tegra_nor_driver = {
+ .probe = tegra_nor_probe,
+ .remove = __devexit_p(tegra_nor_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_nor_init(void)
+{
+ return platform_driver_register(&tegra_nor_driver);
+}
+
+static void __exit tegra_nor_exit(void)
+{
+ platform_driver_unregister(&tegra_nor_driver);
+}
+
+module_init(tegra_nor_init);
+module_exit(tegra_nor_exit);
+
+MODULE_AUTHOR("Raghavendra VK <rvk@nvidia.com>");
+MODULE_DESCRIPTION("NOR Flash mapping driver for NVIDIA Tegra based boards");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4c3425235adc..43173a335e49 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+ tristate "Include chip ids for known NAND devices."
+ depends on MTD
+ help
+ Useful for NAND drivers that do not use the NAND subsystem but
+ still like to take advantage of the known chip information.
+
config MTD_NAND_ECC
tristate
@@ -121,6 +128,23 @@ config MTD_NAND_OMAP2
help
Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+config MTD_NAND_OMAP_PREFETCH
+ bool "GPMC prefetch support for NAND Flash device"
+ depends on MTD_NAND_OMAP2
+ default y
+ help
+ The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
+ to improve the performance.
+
+config MTD_NAND_OMAP_PREFETCH_DMA
+ depends on MTD_NAND_OMAP_PREFETCH
+ bool "DMA mode"
+ default n
+ help
+ The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
+ or in DMA interrupt mode.
+ Say y for DMA mode or MPU mode will be used
+
config MTD_NAND_IDS
tristate
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 86f05f45780a..15d71658b4f1 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3222,6 +3222,44 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
}
EXPORT_SYMBOL(nand_scan_ident);
+static void nand_panic_wait(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ int i;
+
+ if (chip->state != FL_READY)
+ for (i = 0; i < 40; i++) {
+ if (chip->dev_ready(mtd))
+ break;
+ mdelay(10);
+ }
+ chip->state = FL_READY;
+}
+
+static int nand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ /* Do not allow reads past end of device */
+ if ((to + len) > mtd->size)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ nand_panic_wait(mtd);
+
+ chip->ops.len = len;
+ chip->ops.datbuf = (uint8_t *)buf;
+ chip->ops.oobbuf = NULL;
+
+ ret = nand_do_write_ops(mtd, to, &chip->ops);
+
+ *retlen = chip->ops.retlen;
+ return ret;
+}
+
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
@@ -3465,6 +3503,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->panic_write = panic_nand_write;
mtd->read_oob = nand_read_oob;
mtd->write_oob = nand_write_oob;
+ mtd->panic_write = nand_panic_write;
mtd->sync = nand_sync;
mtd->lock = NULL;
mtd->unlock = NULL;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a44874e24f2a..41b279abbd5c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3325,6 +3325,23 @@ config PPPOL2TP
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
+config PPPOLAC
+ tristate "PPP on L2TP Access Concentrator"
+ depends on PPP && INET
+ help
+ L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+ networks. This driver handles L2TP data packets between a UDP socket
+ and a PPP channel, but only permits one session per socket. Thus it is
+ fairly simple and suited for clients.
+
+config PPPOPNS
+ tristate "PPP on PPTP Network Server"
+ depends on PPP && INET
+ help
+ PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+ networks. This driver handles PPTP data packets between a RAW socket
+ and a PPP channel. It is fairly simple and easy to use.
+
config SLIP
tristate "SLIP (serial line) support"
---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e1eca2ab505e..e4a90ac66d09 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -167,6 +167,8 @@ obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
obj-$(CONFIG_PPPOL2TP) += pppox.o
obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
@@ -283,6 +285,7 @@ obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
+obj-$(CONFIG_USB_NET_RAW_IP) += usb/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index abf4d7a9dcce..3013a74bbfdd 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -47,3 +47,12 @@ config CAIF_HSI
The caif low level driver for CAIF over HSI.
Be aware that if you enable this then you also need to
enable a low-level HSI driver.
+
+config TEGRA_SPI_CAIF
+ tristate "TEGRA specific CAIF SPI transport driver for slave interface"
+ depends on CAIF_SPI_SLAVE && TEGRA_SPI_SLAVE
+ default n
+ ---help---
+ The CAIF Link layer SPI Protocol driver for Tegra Slave SPI interface.
+ This driver implements a platform driver to accommodate for a
+ Tegra Slave SPI device.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff861560f..f30752565b33 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -13,3 +13,7 @@ obj-$(CONFIG_CAIF_SHM) += caif_shm.o
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
+
+# Tegra specific SPI slave physical interfaces module
+tegra_cfspi_slave-objs := tegra_caif_sspi.o
+obj-$(CONFIG_TEGRA_SPI_CAIF) += tegra_cfspi_slave.o
diff --git a/drivers/net/caif/tegra_caif_sspi.c b/drivers/net/caif/tegra_caif_sspi.c
new file mode 100644
index 000000000000..c2c15c18f2e4
--- /dev/null
+++ b/drivers/net/caif/tegra_caif_sspi.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/tegra_caif.h>
+#include <mach/spi.h>
+#include <net/caif/caif_spi.h>
+
+MODULE_LICENSE("GPL");
+
+#define SPI_CAIF_PAD_TRANSACTION_SIZE(x) \
+ (((x) > 4) ? ((((x) + 15) / 16) * 16) : (x))
+
+struct sspi_struct {
+ struct cfspi_dev sdev;
+ struct cfspi_xfer *xfer;
+};
+
+static struct sspi_struct slave;
+static struct platform_device slave_device;
+static struct spi_device *tegra_caif_spi_slave_device;
+int tegra_caif_sspi_gpio_spi_int;
+int tegra_caif_sspi_gpio_spi_ss;
+int tegra_caif_sspi_gpio_reset;
+int tegra_caif_sspi_gpio_power;
+int tegra_caif_sspi_gpio_awr;
+int tegra_caif_sspi_gpio_cwr;
+
+
+static int __devinit tegra_caif_spi_slave_probe(struct spi_device *spi);
+
+static int tegra_caif_spi_slave_remove(struct spi_device *spi)
+{
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_caif_spi_slave_suspend(struct spi_device *spi
+ , pm_message_t mesg)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM
+static int tegra_caif_spi_slave_resume(struct spi_device *spi)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct spi_driver tegra_caif_spi_slave_driver = {
+ .driver = {
+ .name = "baseband_spi_slave0.0",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_caif_spi_slave_probe,
+ .remove = __devexit_p(tegra_caif_spi_slave_remove),
+#ifdef CONFIG_PM
+ .suspend = tegra_caif_spi_slave_suspend,
+ .resume = tegra_caif_spi_slave_resume,
+#endif /* CONFIG_PM */
+};
+
+void tegra_caif_modem_power(int on)
+{
+ static int u3xx_on;
+ int err;
+ int cnt = 0;
+ int val = 0;
+
+ if (u3xx_on == on)
+ return;
+ u3xx_on = on;
+
+ if (u3xx_on) {
+ /* turn on u3xx modem */
+ err = gpio_request(tegra_caif_sspi_gpio_reset
+ , "caif_sspi_reset");
+ if (err < 0)
+ goto err1;
+
+ err = gpio_request(tegra_caif_sspi_gpio_power
+ , "caif_sspi_power");
+ if (err < 0)
+ goto err2;
+
+ err = gpio_request(tegra_caif_sspi_gpio_awr
+ , "caif_sspi_awr");
+ if (err < 0)
+ goto err3;
+
+ err = gpio_request(tegra_caif_sspi_gpio_cwr
+ , "caif_sspi_cwr");
+ if (err < 0)
+ goto err4;
+
+ err = gpio_direction_output(tegra_caif_sspi_gpio_reset
+ , 0 /* asserted */);
+ if (err < 0)
+ goto err5;
+
+ err = gpio_direction_output(tegra_caif_sspi_gpio_power
+ , 0 /* off */);
+ if (err < 0)
+ goto err6;
+
+ err = gpio_direction_output(tegra_caif_sspi_gpio_awr
+ , 0);
+ if (err < 0)
+ goto err7;
+
+ err = gpio_direction_input(tegra_caif_sspi_gpio_cwr);
+ if (err < 0)
+ goto err8;
+
+ gpio_set_value(tegra_caif_sspi_gpio_power, 0);
+ gpio_set_value(tegra_caif_sspi_gpio_reset, 0);
+
+ msleep(800);
+
+ /* pulse modem power on for 300 ms */
+ gpio_set_value(tegra_caif_sspi_gpio_reset
+ , 1 /* deasserted */);
+ msleep(300);
+ gpio_set_value(tegra_caif_sspi_gpio_power, 1);
+ msleep(300);
+ gpio_set_value(tegra_caif_sspi_gpio_power, 0);
+ msleep(100);
+
+ /* set awr high */
+ gpio_set_value(tegra_caif_sspi_gpio_awr, 1);
+ val = gpio_get_value(tegra_caif_sspi_gpio_cwr);
+ while (!val) {
+ /* wait for cwr to go high */
+ val = gpio_get_value(tegra_caif_sspi_gpio_cwr);
+ pr_info(".");
+ msleep(100);
+ cnt++;
+ if (cnt > 200) {
+ pr_err("\nWaiting for CWR timed out - ERROR\n");
+ break;
+ }
+ }
+ }
+ return;
+err8:
+err7:
+err6:
+err5:
+ gpio_free(tegra_caif_sspi_gpio_cwr);
+err4:
+ gpio_free(tegra_caif_sspi_gpio_awr);
+err3:
+ gpio_free(tegra_caif_sspi_gpio_power);
+err2:
+ gpio_free(tegra_caif_sspi_gpio_reset);
+err1:
+ return;
+}
+
+static irqreturn_t sspi_irq(int irq, void *arg)
+{
+ /* You only need to trigger on an edge to the active state of the
+ * SS signal. Once a edge is detected, the ss_cb() function should
+ * be called with the parameter assert set to true. It is OK
+ * (and even advised) to call the ss_cb() function in IRQ context
+ * in order not to add any delay.
+ */
+ int val;
+ struct cfspi_dev *sdev = (struct cfspi_dev *)arg;
+ val = gpio_get_value(tegra_caif_sspi_gpio_spi_ss);
+ if (val)
+ return IRQ_HANDLED;
+ sdev->ifc->ss_cb(true, sdev->ifc);
+ return IRQ_HANDLED;
+}
+
+static int sspi_callback(void *arg)
+{
+ /* for each spi_sync() call
+ * - sspi_callback() called before spi transfer
+ * - sspi_complete() called after spi transfer
+ */
+
+ /* set master interrupt gpio pin active (tells master to
+ * start spi clock)
+ */
+ udelay(MIN_TRANSITION_TIME_USEC);
+ gpio_set_value(tegra_caif_sspi_gpio_spi_int, 1);
+ return 0;
+}
+
+static void sspi_complete(void *context)
+{
+ /* Normally the DMA or the SPI framework will call you back
+ * in something similar to this. The only thing you need to
+ * do is to call the xfer_done_cb() function, providing the pointer
+ * to the CAIF SPI interface. It is OK to call this function
+ * from IRQ context.
+ */
+
+ struct cfspi_dev *sdev = (struct cfspi_dev *)context;
+ sdev->ifc->xfer_done_cb(sdev->ifc);
+}
+
+static void swap_byte(unsigned char *buf, unsigned int bufsiz)
+{
+ unsigned int i;
+ unsigned char tmp;
+ for (i = 0; i < bufsiz; i += 2) {
+ tmp = buf[i];
+ buf[i] = buf[i+1];
+ buf[i+1] = tmp;
+ }
+}
+
+static int sspi_init_xfer(struct cfspi_xfer *xfer, struct cfspi_dev *dev)
+{
+ /* Store transfer info. For a normal implementation you should
+ * set up your DMA here and make sure that you are ready to
+ * receive the data from the master SPI.
+ */
+
+ struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
+ struct spi_message m;
+ struct spi_transfer t;
+ int err;
+
+ sspi->xfer = xfer;
+
+ if (!tegra_caif_spi_slave_device)
+ return -ENODEV;
+
+ err = spi_tegra_register_callback(tegra_caif_spi_slave_device,
+ sspi_callback, sspi);
+ if (err < 0) {
+ pr_err("\nspi_tegra_register_callback() failed\n");
+ return -ENODEV;
+ }
+ memset(&t, 0, sizeof(t));
+ t.tx_buf = xfer->va_tx;
+ swap_byte(xfer->va_tx, xfer->tx_dma_len);
+ t.rx_buf = xfer->va_rx;
+ t.len = max(xfer->tx_dma_len, xfer->rx_dma_len);
+ t.len = SPI_CAIF_PAD_TRANSACTION_SIZE(t.len);
+ t.bits_per_word = 16;
+ /* SPI controller clock should be 4 times the spi_clk */
+ t.speed_hz = (SPI_MASTER_CLK_MHZ * 4 * 1000000);
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ dmb();
+ err = spi_sync(tegra_caif_spi_slave_device, &m);
+ dmb();
+ swap_byte(xfer->va_tx, xfer->tx_dma_len);
+ swap_byte(xfer->va_rx, xfer->rx_dma_len);
+ sspi_complete(&sspi->sdev);
+ if (err < 0) {
+ pr_err("spi_init_xfer - spi_sync() err %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+void sspi_sig_xfer(bool xfer, struct cfspi_dev *dev)
+{
+ /* If xfer is true then you should assert the SPI_INT to indicate to
+ * the master that you are ready to recieve the data from the master
+ * SPI. If xfer is false then you should de-assert SPI_INT to indicate
+ * that the transfer is done.
+ */
+ if (xfer)
+ gpio_set_value(tegra_caif_sspi_gpio_spi_int, 1);
+ else
+ gpio_set_value(tegra_caif_sspi_gpio_spi_int, 0);
+}
+
+static void sspi_release(struct device *dev)
+{
+ /*
+ * Here you should release your SPI device resources.
+ */
+}
+
+static int __init sspi_init(void)
+{
+ /* Here you should initialize your SPI device by providing the
+ * necessary functions, clock speed, name and private data. Once
+ * done, you can register your device with the
+ * platform_device_register() function. This function will return
+ * with the CAIF SPI interface initialized. This is probably also
+ * the place where you should set up your GPIOs, interrupts and SPI
+ * resources.
+ */
+
+ int res = 0;
+
+ /* Register Tegra SPI protocol driver. */
+ res = spi_register_driver(&tegra_caif_spi_slave_driver);
+ if (res < 0)
+ return res;
+
+ /* Initialize slave device. */
+ slave.sdev.init_xfer = sspi_init_xfer;
+ slave.sdev.sig_xfer = sspi_sig_xfer;
+ slave.sdev.clk_mhz = SPI_MASTER_CLK_MHZ;
+ slave.sdev.priv = &slave;
+ slave.sdev.name = "spi_sspi";
+ slave_device.dev.release = sspi_release;
+
+ /* Initialize platform device. */
+ slave_device.name = "cfspi_sspi";
+ slave_device.dev.platform_data = &slave.sdev;
+
+ /* Register platform device. */
+ res = platform_device_register(&slave_device);
+ if (res)
+ return -ENODEV;
+
+ return res;
+}
+
+static void __exit sspi_exit(void)
+{
+ /* Delete platfrom device. */
+ platform_device_del(&slave_device);
+
+ /* Free Tegra SPI protocol driver. */
+ spi_unregister_driver(&tegra_caif_spi_slave_driver);
+
+ /* Free Tegra GPIO interrupts. */
+ disable_irq(gpio_to_irq(tegra_caif_sspi_gpio_spi_ss));
+ free_irq(gpio_to_irq(tegra_caif_sspi_gpio_spi_ss), &slave_device);
+
+ /* Free Tegra GPIOs. */
+ gpio_free(tegra_caif_sspi_gpio_spi_ss);
+ gpio_free(tegra_caif_sspi_gpio_spi_int);
+}
+
+static int __devinit tegra_caif_spi_slave_probe(struct spi_device *spi)
+{
+ struct tegra_caif_platform_data *pdata;
+ int res;
+
+ if (!spi)
+ return -ENODEV;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ tegra_caif_sspi_gpio_spi_int = pdata->spi_int;
+ tegra_caif_sspi_gpio_spi_ss = pdata->spi_ss;
+ tegra_caif_sspi_gpio_reset = pdata->reset;
+ tegra_caif_sspi_gpio_power = pdata->power;
+ tegra_caif_sspi_gpio_awr = pdata->awr;
+ tegra_caif_sspi_gpio_cwr = pdata->cwr;
+
+ tegra_caif_spi_slave_device = spi;
+
+ /* Initialize Tegra GPIOs. */
+ res = gpio_request(tegra_caif_sspi_gpio_spi_int, "caif_sspi_spi_int");
+ if (res < 0)
+ goto err1;
+
+ res = gpio_request(tegra_caif_sspi_gpio_spi_ss, "caif_sspi_ss");
+ if (res < 0)
+ goto err2;
+
+ res = gpio_direction_output(tegra_caif_sspi_gpio_spi_int, 0);
+ if (res < 0)
+ goto err3;
+
+ res = gpio_direction_input(tegra_caif_sspi_gpio_spi_ss);
+ if (res < 0)
+ goto err4;
+
+ tegra_caif_modem_power(1);
+ msleep(2000);
+
+ /* Initialize Tegra GPIO interrupts. */
+ res = request_irq(gpio_to_irq(tegra_caif_sspi_gpio_spi_ss),
+ sspi_irq, IRQF_TRIGGER_FALLING, "caif_sspi_ss_irq",
+ &slave.sdev);
+ if (res < 0)
+ goto err5;
+
+ return 0;
+err5:
+ free_irq(gpio_to_irq(tegra_caif_sspi_gpio_spi_ss), &slave_device);
+err4:
+err3:
+ gpio_free(tegra_caif_sspi_gpio_spi_ss);
+err2:
+ gpio_free(tegra_caif_sspi_gpio_spi_int);
+err1:
+ return res;
+}
+
+module_init(sspi_init);
+module_exit(sspi_exit);
diff --git a/drivers/net/pppolac.c b/drivers/net/pppolac.c
new file mode 100644
index 000000000000..c94b8507d92b
--- /dev/null
+++ b/drivers/net/pppolac.c
@@ -0,0 +1,449 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT 0x80
+#define L2TP_LENGTH_BIT 0x40
+#define L2TP_SEQUENCE_BIT 0x08
+#define L2TP_OFFSET_BIT 0x02
+#define L2TP_VERSION 0x02
+#define L2TP_VERSION_MASK 0x0F
+
+#define PPP_ADDR 0xFF
+#define PPP_CTRL 0x03
+
+union unaligned {
+ __u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+ return (union unaligned *)ptr;
+}
+
+struct meta {
+ __u32 sequence;
+ __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+ return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+ struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+ struct meta *meta = skb_meta(skb);
+ __u32 now = jiffies;
+ __u8 bits;
+ __u8 *ptr;
+
+ /* Drop the packet if L2TP header is missing. */
+ if (skb->len < sizeof(struct udphdr) + 6)
+ goto drop;
+
+ /* Put it back if it is a control packet. */
+ if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+ return opt->backlog_rcv(sk_udp, skb);
+
+ /* Skip UDP header. */
+ skb_pull(skb, sizeof(struct udphdr));
+
+ /* Check the version. */
+ if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+ goto drop;
+ bits = skb->data[0];
+ ptr = &skb->data[2];
+
+ /* Check the length if it is present. */
+ if (bits & L2TP_LENGTH_BIT) {
+ if ((ptr[0] << 8 | ptr[1]) != skb->len)
+ goto drop;
+ ptr += 2;
+ }
+
+ /* Skip all fields including optional ones. */
+ if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+ (bits & L2TP_LENGTH_BIT ? 2 : 0) +
+ (bits & L2TP_OFFSET_BIT ? 2 : 0)))
+ goto drop;
+
+ /* Skip the offset padding if it is present. */
+ if (bits & L2TP_OFFSET_BIT &&
+ !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+ goto drop;
+
+ /* Check the tunnel and the session. */
+ if (unaligned(ptr)->u32 != opt->local)
+ goto drop;
+
+ /* Check the sequence if it is present. */
+ if (bits & L2TP_SEQUENCE_BIT) {
+ meta->sequence = ptr[4] << 8 | ptr[5];
+ if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+ goto drop;
+ }
+
+ /* Skip PPP address and control if they are present. */
+ if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+ skb->data[1] == PPP_CTRL)
+ skb_pull(skb, 2);
+
+ /* Fix PPP protocol if it is compressed. */
+ if (skb->len >= 1 && skb->data[0] & 1)
+ skb_push(skb, 1)[0] = 0;
+
+ /* Drop the packet if PPP protocol is missing. */
+ if (skb->len < 2)
+ goto drop;
+
+ /* Perform reordering if sequencing is enabled. */
+ atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+ if (bits & L2TP_SEQUENCE_BIT) {
+ struct sk_buff *skb1;
+
+ /* Insert the packet into receive queue in order. */
+ skb_set_owner_r(skb, sk);
+ skb_queue_walk(&sk->sk_receive_queue, skb1) {
+ struct meta *meta1 = skb_meta(skb1);
+ __s16 order = meta->sequence - meta1->sequence;
+ if (order == 0)
+ goto drop;
+ if (order < 0) {
+ meta->timestamp = meta1->timestamp;
+ skb_insert(skb1, skb, &sk->sk_receive_queue);
+ skb = NULL;
+ break;
+ }
+ }
+ if (skb) {
+ meta->timestamp = now;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+
+ /* Remove packets from receive queue as long as
+ * 1. the receive buffer is full,
+ * 2. they are queued longer than one second, or
+ * 3. there are no missing packets before them. */
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+ meta = skb_meta(skb);
+ if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ now - meta->timestamp < HZ &&
+ meta->sequence != opt->recv_sequence)
+ break;
+ skb_unlink(skb, &sk->sk_receive_queue);
+ opt->recv_sequence = (__u16)(meta->sequence + 1);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ }
+ return NET_RX_SUCCESS;
+ }
+
+ /* Flush receive queue if sequencing is disabled. */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ return NET_RX_SUCCESS;
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+ sock_hold(sk_udp);
+ sk_receive_skb(sk_udp, skb, 0);
+ return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+ mm_segment_t old_fs = get_fs();
+ struct sk_buff *skb;
+
+ set_fs(KERNEL_DS);
+ while ((skb = skb_dequeue(&delivery_queue))) {
+ struct sock *sk_udp = skb->sk;
+ struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)&iov,
+ .msg_iovlen = 1,
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
+ sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len);
+ kfree_skb(skb);
+ }
+ set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk_udp = (struct sock *)chan->private;
+ struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+ /* Install PPP address and control. */
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ADDR;
+ skb->data[1] = PPP_CTRL;
+
+ /* Install L2TP header. */
+ if (atomic_read(&opt->sequencing)) {
+ skb_push(skb, 10);
+ skb->data[0] = L2TP_SEQUENCE_BIT;
+ skb->data[6] = opt->xmit_sequence >> 8;
+ skb->data[7] = opt->xmit_sequence;
+ skb->data[8] = 0;
+ skb->data[9] = 0;
+ opt->xmit_sequence++;
+ } else {
+ skb_push(skb, 6);
+ skb->data[0] = 0;
+ }
+ skb->data[1] = L2TP_VERSION;
+ unaligned(&skb->data[2])->u32 = opt->remote;
+
+ /* Now send the packet via the delivery queue. */
+ skb_set_owner_w(skb, sk_udp);
+ skb_queue_tail(&delivery_queue, skb);
+ schedule_work(&delivery_work);
+ return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+ .start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+ struct socket *sock_udp = NULL;
+ struct sock *sk_udp;
+ int error;
+
+ if (addrlen != sizeof(struct sockaddr_pppolac) ||
+ !addr->local.tunnel || !addr->local.session ||
+ !addr->remote.tunnel || !addr->remote.session) {
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+ error = -EALREADY;
+ if (sk->sk_state != PPPOX_NONE)
+ goto out;
+
+ sock_udp = sockfd_lookup(addr->udp_socket, &error);
+ if (!sock_udp)
+ goto out;
+ sk_udp = sock_udp->sk;
+ lock_sock(sk_udp);
+
+ /* Remove this check when IPv6 supports UDP encapsulation. */
+ error = -EAFNOSUPPORT;
+ if (sk_udp->sk_family != AF_INET)
+ goto out;
+ error = -EPROTONOSUPPORT;
+ if (sk_udp->sk_protocol != IPPROTO_UDP)
+ goto out;
+ error = -EDESTADDRREQ;
+ if (sk_udp->sk_state != TCP_ESTABLISHED)
+ goto out;
+ error = -EBUSY;
+ if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+ goto out;
+ if (!sk_udp->sk_bound_dev_if) {
+ struct dst_entry *dst = sk_dst_get(sk_udp);
+ error = -ENODEV;
+ if (!dst)
+ goto out;
+ sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+ dst_release(dst);
+ }
+
+ po->chan.hdrlen = 12;
+ po->chan.private = sk_udp;
+ po->chan.ops = &pppolac_channel_ops;
+ po->chan.mtu = PPP_MTU - 80;
+ po->proto.lac.local = unaligned(&addr->local)->u32;
+ po->proto.lac.remote = unaligned(&addr->remote)->u32;
+ atomic_set(&po->proto.lac.sequencing, 1);
+ po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto out;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+ udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+ sk_udp->sk_backlog_rcv = pppolac_recv_core;
+ sk_udp->sk_user_data = sk;
+out:
+ if (sock_udp) {
+ release_sock(sk_udp);
+ if (error)
+ sockfd_put(sock_udp);
+ }
+ release_sock(sk);
+ return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ if (sk->sk_state != PPPOX_NONE) {
+ struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+ lock_sock(sk_udp);
+ skb_queue_purge(&sk->sk_receive_queue);
+ pppox_unbind_sock(sk);
+ udp_sk(sk_udp)->encap_type = 0;
+ udp_sk(sk_udp)->encap_rcv = NULL;
+ sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+ sk_udp->sk_user_data = NULL;
+ release_sock(sk_udp);
+ sockfd_put(sk_udp->sk_socket);
+ }
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+ .name = "PPPOLAC",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+ .family = PF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppolac_release,
+ .bind = sock_no_bind,
+ .connect = pppolac_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = sock_no_poll,
+ .ioctl = pppox_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppolac_proto_ops;
+ sk->sk_protocol = PX_PROTO_OLAC;
+ sk->sk_state = PPPOX_NONE;
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+ .create = pppolac_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+ int error;
+
+ error = proto_register(&pppolac_proto, 0);
+ if (error)
+ return error;
+
+ error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+ if (error)
+ proto_unregister(&pppolac_proto);
+ else
+ skb_queue_head_init(&delivery_queue);
+ return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OLAC);
+ proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pppopns.c b/drivers/net/pppopns.c
new file mode 100644
index 000000000000..fb8198447938
--- /dev/null
+++ b/drivers/net/pppopns.c
@@ -0,0 +1,428 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE 8
+
+#define PPTP_GRE_BITS htons(0x2001)
+#define PPTP_GRE_BITS_MASK htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT htons(0x1000)
+#define PPTP_GRE_ACK_BIT htons(0x0080)
+#define PPTP_GRE_TYPE htons(0x880B)
+
+#define PPP_ADDR 0xFF
+#define PPP_CTRL 0x03
+
+struct header {
+ __u16 bits;
+ __u16 type;
+ __u16 length;
+ __u16 call;
+ __u32 sequence;
+} __attribute__((packed));
+
+struct meta {
+ __u32 sequence;
+ __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+ return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+ struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+ struct meta *meta = skb_meta(skb);
+ __u32 now = jiffies;
+ struct header *hdr;
+
+ /* Skip transport header */
+ skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+ /* Drop the packet if GRE header is missing. */
+ if (skb->len < GRE_HEADER_SIZE)
+ goto drop;
+ hdr = (struct header *)skb->data;
+
+ /* Check the header. */
+ if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+ (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+ goto drop;
+
+ /* Skip all fields including optional ones. */
+ if (!skb_pull(skb, GRE_HEADER_SIZE +
+ (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+ (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+ goto drop;
+
+ /* Check the length. */
+ if (skb->len != ntohs(hdr->length))
+ goto drop;
+
+ /* Check the sequence if it is present. */
+ if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+ meta->sequence = ntohl(hdr->sequence);
+ if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+ goto drop;
+ }
+
+ /* Skip PPP address and control if they are present. */
+ if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+ skb->data[1] == PPP_CTRL)
+ skb_pull(skb, 2);
+
+ /* Fix PPP protocol if it is compressed. */
+ if (skb->len >= 1 && skb->data[0] & 1)
+ skb_push(skb, 1)[0] = 0;
+
+ /* Drop the packet if PPP protocol is missing. */
+ if (skb->len < 2)
+ goto drop;
+
+ /* Perform reordering if sequencing is enabled. */
+ if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+ struct sk_buff *skb1;
+
+ /* Insert the packet into receive queue in order. */
+ skb_set_owner_r(skb, sk);
+ skb_queue_walk(&sk->sk_receive_queue, skb1) {
+ struct meta *meta1 = skb_meta(skb1);
+ __s32 order = meta->sequence - meta1->sequence;
+ if (order == 0)
+ goto drop;
+ if (order < 0) {
+ meta->timestamp = meta1->timestamp;
+ skb_insert(skb1, skb, &sk->sk_receive_queue);
+ skb = NULL;
+ break;
+ }
+ }
+ if (skb) {
+ meta->timestamp = now;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+
+ /* Remove packets from receive queue as long as
+ * 1. the receive buffer is full,
+ * 2. they are queued longer than one second, or
+ * 3. there are no missing packets before them. */
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+ meta = skb_meta(skb);
+ if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ now - meta->timestamp < HZ &&
+ meta->sequence != opt->recv_sequence)
+ break;
+ skb_unlink(skb, &sk->sk_receive_queue);
+ opt->recv_sequence = meta->sequence + 1;
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ }
+ return NET_RX_SUCCESS;
+ }
+
+ /* Flush receive queue if sequencing is disabled. */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ return NET_RX_SUCCESS;
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw, int length)
+{
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+ sock_hold(sk_raw);
+ sk_receive_skb(sk_raw, skb, 0);
+ }
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+ mm_segment_t old_fs = get_fs();
+ struct sk_buff *skb;
+
+ set_fs(KERNEL_DS);
+ while ((skb = skb_dequeue(&delivery_queue))) {
+ struct sock *sk_raw = skb->sk;
+ struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)&iov,
+ .msg_iovlen = 1,
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
+ sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len);
+ kfree_skb(skb);
+ }
+ set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk_raw = (struct sock *)chan->private;
+ struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+ struct header *hdr;
+ __u16 length;
+
+ /* Install PPP address and control. */
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ADDR;
+ skb->data[1] = PPP_CTRL;
+ length = skb->len;
+
+ /* Install PPTP GRE header. */
+ hdr = (struct header *)skb_push(skb, 12);
+ hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+ hdr->type = PPTP_GRE_TYPE;
+ hdr->length = htons(length);
+ hdr->call = opt->remote;
+ hdr->sequence = htonl(opt->xmit_sequence);
+ opt->xmit_sequence++;
+
+ /* Now send the packet via the delivery queue. */
+ skb_set_owner_w(skb, sk_raw);
+ skb_queue_tail(&delivery_queue, skb);
+ schedule_work(&delivery_work);
+ return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+ .start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+ struct sockaddr_storage ss;
+ struct socket *sock_tcp = NULL;
+ struct socket *sock_raw = NULL;
+ struct sock *sk_tcp;
+ struct sock *sk_raw;
+ int error;
+
+ if (addrlen != sizeof(struct sockaddr_pppopns))
+ return -EINVAL;
+
+ lock_sock(sk);
+ error = -EALREADY;
+ if (sk->sk_state != PPPOX_NONE)
+ goto out;
+
+ sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+ if (!sock_tcp)
+ goto out;
+ sk_tcp = sock_tcp->sk;
+ error = -EPROTONOSUPPORT;
+ if (sk_tcp->sk_protocol != IPPROTO_TCP)
+ goto out;
+ addrlen = sizeof(struct sockaddr_storage);
+ error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+ if (error)
+ goto out;
+ if (!sk_tcp->sk_bound_dev_if) {
+ struct dst_entry *dst = sk_dst_get(sk_tcp);
+ error = -ENODEV;
+ if (!dst)
+ goto out;
+ sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+ dst_release(dst);
+ }
+
+ error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+ if (error)
+ goto out;
+ sk_raw = sock_raw->sk;
+ sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+ error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+ if (error)
+ goto out;
+
+ po->chan.hdrlen = 14;
+ po->chan.private = sk_raw;
+ po->chan.ops = &pppopns_channel_ops;
+ po->chan.mtu = PPP_MTU - 80;
+ po->proto.pns.local = addr->local;
+ po->proto.pns.remote = addr->remote;
+ po->proto.pns.data_ready = sk_raw->sk_data_ready;
+ po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto out;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ lock_sock(sk_raw);
+ sk_raw->sk_data_ready = pppopns_recv;
+ sk_raw->sk_backlog_rcv = pppopns_recv_core;
+ sk_raw->sk_user_data = sk;
+ release_sock(sk_raw);
+out:
+ if (sock_tcp)
+ sockfd_put(sock_tcp);
+ if (error && sock_raw)
+ sock_release(sock_raw);
+ release_sock(sk);
+ return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ if (sk->sk_state != PPPOX_NONE) {
+ struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+ lock_sock(sk_raw);
+ skb_queue_purge(&sk->sk_receive_queue);
+ pppox_unbind_sock(sk);
+ sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+ sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+ sk_raw->sk_user_data = NULL;
+ release_sock(sk_raw);
+ sock_release(sk_raw->sk_socket);
+ }
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+ .name = "PPPOPNS",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+ .family = PF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppopns_release,
+ .bind = sock_no_bind,
+ .connect = pppopns_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = sock_no_poll,
+ .ioctl = pppox_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppopns_proto_ops;
+ sk->sk_protocol = PX_PROTO_OPNS;
+ sk->sk_state = PPPOX_NONE;
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+ .create = pppopns_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+ int error;
+
+ error = proto_register(&pppopns_proto, 0);
+ if (error)
+ return error;
+
+ error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+ if (error)
+ proto_unregister(&pppopns_proto);
+ else
+ skb_queue_head_init(&delivery_queue);
+ return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OPNS);
+ proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 6d657cabb951..35576b9e1744 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -5633,6 +5633,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
+ /* FIX ME: Allow all phy on Cardhu dock Ethernet */
+ rx_mode |= AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 71f3d1a35b74..a024708b5704 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1243,6 +1243,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int vnet_hdr_sz;
int ret;
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+ if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+#endif
+
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 84d4608153c9..7f0818038aa3 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -458,4 +458,12 @@ config USB_VL600
http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
+config USB_NET_RAW_IP
+ tristate "RAW-IP Driver for XMM6260 modems"
+ help
+ Choose this option if you have a XMM6260 modem device with RAW-IP suport.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rmnet.
+
endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c203fa21f6b1..ea9665cb8492 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -29,4 +29,5 @@ obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
obj-$(CONFIG_USB_VL600) += lg-vl600.o
+obj-$(CONFIG_USB_NET_RAW_IP) += raw_ip_net.o
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c924ea2bce07..4fba62acf757 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -570,6 +570,26 @@ static const struct usb_device_id products [] = {
.driver_info = (unsigned long)&wwan_info,
},
+/* PH450 */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ USB_DEVICE(0x1983,0x0310),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ USB_DEVICE(0x1983,0x0321),
+ .driver_info = (unsigned long)&wwan_info,
+},
+
+/* Tango module */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ USB_DEVICE(0x0489,0xE03A),
+ .driver_info = (unsigned long)&wwan_info,
+},
/*
* WHITELIST!!!
*
diff --git a/drivers/net/usb/raw_ip_net.c b/drivers/net/usb/raw_ip_net.c
new file mode 100644
index 000000000000..a3e352495a62
--- /dev/null
+++ b/drivers/net/usb/raw_ip_net.c
@@ -0,0 +1,735 @@
+/*
+ * raw_ip_net.c
+ *
+ * USB network driver for RAW-IP modems.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/usb.h>
+
+#define BASEBAND_USB_NET_DEV_NAME "rmnet%d"
+
+/* ethernet packet ethertype for IP packets */
+#define NET_IP_ETHERTYPE 0x08, 0x00
+
+#define TX_TIMEOUT 10
+
+#ifndef USB_NET_BUFSIZ
+#define USB_NET_BUFSIZ 8192
+#endif /* USB_NET_BUFSIZ */
+
+/* maximum interface number supported */
+#define MAX_INTFS 3
+
+MODULE_LICENSE("GPL");
+
+int g_i;
+
+int max_intfs = MAX_INTFS;
+unsigned long usb_net_raw_ip_vid = 0x1519;
+unsigned long usb_net_raw_ip_pid = 0x0020;
+unsigned long usb_net_raw_ip_intf[MAX_INTFS] = { 0x03, 0x05, 0x07 };
+unsigned long usb_net_raw_ip_rx_debug;
+unsigned long usb_net_raw_ip_tx_debug;
+
+module_param(max_intfs, int, 0644);
+MODULE_PARM_DESC(max_intfs, "usb net (raw-ip) - max. interfaces supported");
+module_param(usb_net_raw_ip_vid, ulong, 0644);
+MODULE_PARM_DESC(usb_net_raw_ip_vid, "usb net (raw-ip) - USB VID");
+module_param(usb_net_raw_ip_pid, ulong, 0644);
+MODULE_PARM_DESC(usb_net_raw_ip_pid, "usb net (raw-ip) - USB PID");
+module_param(usb_net_raw_ip_rx_debug, ulong, 0644);
+MODULE_PARM_DESC(usb_net_raw_ip_rx_debug, "usb net (raw-ip) - rx debug");
+module_param(usb_net_raw_ip_tx_debug, ulong, 0644);
+MODULE_PARM_DESC(usb_net_raw_ip_tx_debug, "usb net (raw-ip) - tx debug");
+
+struct baseband_usb {
+ int baseband_index;
+ struct {
+ struct usb_driver *driver;
+ struct usb_device *device;
+ struct usb_interface *interface;
+ struct {
+ struct {
+ unsigned int in;
+ unsigned int out;
+ } isoch, bulk, interrupt;
+ } pipe;
+ /* currently active rx urb */
+ struct urb *rx_urb;
+ /* currently active tx urb */
+ struct urb *tx_urb;
+ } usb;
+};
+
+static struct baseband_usb *baseband_usb_net[MAX_INTFS] = { 0, 0, 0};
+
+static struct net_device *usb_net_raw_ip_dev[MAX_INTFS] = { 0, 0, 0};
+
+static unsigned int g_usb_interface_index[MAX_INTFS];
+static struct usb_interface *g_usb_interface[MAX_INTFS];
+
+static int usb_net_raw_ip_rx_urb_submit(struct baseband_usb *usb);
+static void usb_net_raw_ip_rx_urb_comp(struct urb *urb);
+static void usb_net_raw_ip_tx_urb_comp(struct urb *urb);
+
+static int baseband_usb_driver_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int i = g_i;
+
+ pr_debug("%s(%d) { intf %p id %p\n", __func__, __LINE__, intf, id);
+
+ pr_debug("i %d\n", i);
+
+ pr_debug("intf->cur_altsetting->desc.bInterfaceNumber %02x\n",
+ intf->cur_altsetting->desc.bInterfaceNumber);
+ pr_debug("intf->cur_altsetting->desc.bAlternateSetting %02x\n",
+ intf->cur_altsetting->desc.bAlternateSetting);
+ pr_debug("intf->cur_altsetting->desc.bNumEndpoints %02x\n",
+ intf->cur_altsetting->desc.bNumEndpoints);
+ pr_debug("intf->cur_altsetting->desc.bInterfaceClass %02x\n",
+ intf->cur_altsetting->desc.bInterfaceClass);
+ pr_debug("intf->cur_altsetting->desc.bInterfaceSubClass %02x\n",
+ intf->cur_altsetting->desc.bInterfaceSubClass);
+ pr_debug("intf->cur_altsetting->desc.bInterfaceProtocol %02x\n",
+ intf->cur_altsetting->desc.bInterfaceProtocol);
+ pr_debug("intf->cur_altsetting->desc.iInterface %02x\n",
+ intf->cur_altsetting->desc.iInterface);
+
+ if (g_usb_interface_index[i] !=
+ intf->cur_altsetting->desc.bInterfaceNumber) {
+ pr_debug("%s(%d) } -ENODEV\n", __func__, __LINE__);
+ return -ENODEV;
+ } else {
+ g_usb_interface[i] = intf;
+ }
+
+ pr_debug("%s(%d) }\n", __func__, __LINE__);
+ return 0;
+}
+
+static void baseband_usb_driver_disconnect(struct usb_interface *intf)
+{
+ pr_debug("%s intf %p\n", __func__, intf);
+}
+
+#ifdef CONFIG_PM
+static int baseband_usb_driver_suspend(struct usb_interface *intf,
+ pm_message_t message)
+{
+ int i;
+
+ pr_debug("%s intf %p\n", __func__, intf);
+
+ for (i = 0; i < max_intfs; i++) {
+ pr_debug("[%d]\n", i);
+ if (!baseband_usb_net[i])
+ continue;
+ if (baseband_usb_net[i]->usb.interface != intf) {
+ pr_debug("%p != %p\n",
+ baseband_usb_net[i]->usb.interface, intf);
+ continue;
+ }
+ if (!baseband_usb_net[i]->usb.rx_urb) {
+ pr_debug("rx_usb already killed\n");
+ continue;
+ }
+ /* kill usb rx */
+ usb_kill_urb(baseband_usb_net[i]->usb.rx_urb);
+ baseband_usb_net[i]->usb.rx_urb = (struct urb *) 0;
+ }
+
+ return 0;
+}
+
+static int baseband_usb_driver_resume(struct usb_interface *intf)
+{
+ int i, err;
+
+ pr_debug("%s intf %p\n", __func__, intf);
+
+ for (i = 0; i < max_intfs; i++) {
+ pr_debug("[%d]\n", i);
+ if (!baseband_usb_net[i])
+ continue;
+ if (baseband_usb_net[i]->usb.interface != intf) {
+ pr_debug("%p != %p\n",
+ baseband_usb_net[i]->usb.interface, intf);
+ continue;
+ }
+ if (baseband_usb_net[i]->usb.rx_urb) {
+ pr_debug("rx_usb already exists\n");
+ continue;
+ }
+ /* start usb rx */
+ err = usb_net_raw_ip_rx_urb_submit(baseband_usb_net[i]);
+ if (err < 0) {
+ pr_err("submit rx failed - err %d\n", err);
+ continue;
+ }
+ }
+
+ return 0;
+}
+static int baseband_usb_driver_reset_resume(struct usb_interface *intf)
+{
+ pr_debug("%s intf %p\n", __func__, intf);
+ return baseband_usb_driver_resume(intf);
+}
+#endif /* CONFIG_PM */
+
+static struct usb_device_id baseband_usb_driver_id_table[MAX_INTFS][2];
+
+static char baseband_usb_driver_name[MAX_INTFS][32];
+
+static struct usb_driver baseband_usb_driver[MAX_INTFS] = {
+ {
+ .name = baseband_usb_driver_name[0],
+ .probe = baseband_usb_driver_probe,
+ .disconnect = baseband_usb_driver_disconnect,
+ .id_table = baseband_usb_driver_id_table[0],
+#ifdef CONFIG_PM
+ .suspend = baseband_usb_driver_suspend,
+ .resume = baseband_usb_driver_resume,
+ .reset_resume = baseband_usb_driver_reset_resume,
+ .supports_autosuspend = 1,
+#endif
+ },
+ {
+ .name = baseband_usb_driver_name[1],
+ .probe = baseband_usb_driver_probe,
+ .disconnect = baseband_usb_driver_disconnect,
+ .id_table = baseband_usb_driver_id_table[1],
+#ifdef CONFIG_PM
+ .suspend = baseband_usb_driver_suspend,
+ .resume = baseband_usb_driver_resume,
+ .reset_resume = baseband_usb_driver_reset_resume,
+ .supports_autosuspend = 1,
+#endif
+ },
+ {
+ .name = baseband_usb_driver_name[2],
+ .probe = baseband_usb_driver_probe,
+ .disconnect = baseband_usb_driver_disconnect,
+ .id_table = baseband_usb_driver_id_table[2],
+#ifdef CONFIG_PM
+ .suspend = baseband_usb_driver_suspend,
+ .resume = baseband_usb_driver_resume,
+ .reset_resume = baseband_usb_driver_reset_resume,
+ .supports_autosuspend = 1,
+#endif
+ },
+};
+
+static void find_usb_pipe(struct baseband_usb *usb)
+{
+ struct usb_device *usbdev = usb->usb.device;
+ struct usb_interface *intf = usb->usb.interface;
+ unsigned char numendpoint = intf->cur_altsetting->desc.bNumEndpoints;
+ struct usb_host_endpoint *endpoint = intf->cur_altsetting->endpoint;
+ unsigned char n;
+
+ for (n = 0; n < numendpoint; n++) {
+ if (usb_endpoint_is_isoc_in(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] isochronous in\n", n);
+ usb->usb.pipe.isoch.in = usb_rcvisocpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_isoc_out(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] isochronous out\n", n);
+ usb->usb.pipe.isoch.out = usb_sndisocpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_bulk_in(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] bulk in\n", n);
+ usb->usb.pipe.bulk.in = usb_rcvbulkpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_bulk_out(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] bulk out\n", n);
+ usb->usb.pipe.bulk.out = usb_sndbulkpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_int_in(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] interrupt in\n", n);
+ usb->usb.pipe.interrupt.in = usb_rcvintpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_int_out(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] interrupt out\n", n);
+ usb->usb.pipe.interrupt.out = usb_sndintpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else {
+ pr_debug("endpoint[%d] skipped\n", n);
+ }
+ }
+}
+
+void baseband_usb_close(struct baseband_usb *usb);
+
+struct baseband_usb *baseband_usb_open(int index,
+ unsigned int vid,
+ unsigned int pid,
+ unsigned int intf)
+{
+ struct baseband_usb *usb;
+ int err;
+
+ pr_debug("baseband_usb_open {\n");
+
+ /* allocate baseband usb structure */
+ usb = kzalloc(sizeof(struct baseband_usb),
+ GFP_KERNEL);
+ if (!usb)
+ return (struct baseband_usb *) 0;
+
+ /* open usb driver */
+ sprintf(baseband_usb_driver_name[index],
+ "baseband_usb_%x_%x_%x",
+ vid, pid, intf);
+ baseband_usb_driver_id_table[index][0].match_flags =
+ USB_DEVICE_ID_MATCH_DEVICE;
+ baseband_usb_driver_id_table[index][0].idVendor = vid;
+ baseband_usb_driver_id_table[index][0].idProduct = pid;
+ g_usb_interface_index[index] = intf;
+ g_usb_interface[index] = (struct usb_interface *) 0;
+ err = usb_register(&baseband_usb_driver[index]);
+ if (err < 0) {
+ pr_err("cannot open usb driver - err %d\n", err);
+ goto error_exit;
+ }
+ usb->baseband_index = index;
+ usb->usb.driver = &baseband_usb_driver[index];
+ if (!g_usb_interface[index]) {
+ pr_err("cannot open usb driver - !g_usb_interface[%d]\n",
+ index);
+ goto error_exit;
+ }
+ usb->usb.device = interface_to_usbdev(g_usb_interface[index]);
+ usb->usb.interface = g_usb_interface[index];
+ find_usb_pipe(usb);
+ usb->usb.rx_urb = (struct urb *) 0;
+ usb->usb.tx_urb = (struct urb *) 0;
+ g_usb_interface_index[index] = ~0U;
+ g_usb_interface[index] = (struct usb_interface *) 0;
+ pr_debug("usb->usb.driver->name %s\n", usb->usb.driver->name);
+ pr_debug("usb->usb.device %p\n", usb->usb.device);
+ pr_debug("usb->usb.interface %p\n", usb->usb.interface);
+ pr_debug("usb->usb.pipe.isoch.in %x\n", usb->usb.pipe.isoch.in);
+ pr_debug("usb->usb.pipe.isoch.out %x\n", usb->usb.pipe.isoch.out);
+ pr_debug("usb->usb.pipe.bulk.in %x\n", usb->usb.pipe.bulk.in);
+ pr_debug("usb->usb.pipe.bulk.out %x\n", usb->usb.pipe.bulk.out);
+ pr_debug("usb->usb.pipe.interrupt.in %x\n", usb->usb.pipe.interrupt.in);
+ pr_debug("usb->usb.pipe.interrupt.out %x\n",
+ usb->usb.pipe.interrupt.out);
+
+ pr_debug("baseband_usb_open }\n");
+ return usb;
+
+error_exit:
+ return (struct baseband_usb *) 0;
+}
+
+void baseband_usb_close(struct baseband_usb *usb)
+{
+ pr_debug("baseband_usb_close {\n");
+
+ /* check input */
+ if (!usb)
+ return;
+
+ /* close usb driver */
+ if (usb->usb.driver) {
+ pr_debug("close usb driver {\n");
+ usb_deregister(usb->usb.driver);
+ usb->usb.driver = (struct usb_driver *) 0;
+ pr_debug("close usb driver }\n");
+ }
+
+ /* free baseband usb structure */
+ kfree(usb);
+
+ pr_debug("baseband_usb_close }\n");
+}
+
+static int baseband_usb_netdev_init(struct net_device *dev)
+{
+ pr_debug("baseband_usb_netdev_init\n");
+ return 0;
+}
+
+static void baseband_usb_netdev_uninit(struct net_device *dev)
+{
+ pr_debug("baseband_usb_netdev_uninit\n");
+}
+
+static int baseband_usb_netdev_open(struct net_device *dev)
+{
+ pr_debug("baseband_usb_netdev_open\n");
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int baseband_usb_netdev_stop(struct net_device *dev)
+{
+ pr_debug("baseband_usb_netdev_stop\n");
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static netdev_tx_t baseband_usb_netdev_start_xmit(
+ struct sk_buff *skb, struct net_device *dev)
+{
+ int i = 0;
+ struct baseband_usb *usb = baseband_usb_net[i];
+ struct urb *urb;
+ unsigned char *buf;
+ int err;
+
+ pr_debug("baseband_usb_netdev_start_xmit\n");
+
+ /* check input */
+ if (!skb) {
+ pr_err("no skb\n");
+ return -EINVAL;
+ }
+
+ /* allocate urb */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ pr_err("usb_alloc_urb() failed\n");
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ buf = kzalloc(skb->len - 14, GFP_ATOMIC);
+ if (!buf) {
+ pr_err("usb buffer kzalloc() failed\n");
+ usb_free_urb(urb);
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ err = skb_copy_bits(skb, 14, buf, skb->len - 14);
+ if (err < 0) {
+ pr_err("skb_copy_bits() failed - %d\n", err);
+ kfree(buf);
+ usb_free_urb(urb);
+ kfree_skb(skb);
+ return err;
+ }
+ usb_fill_bulk_urb(urb, usb->usb.device, usb->usb.pipe.bulk.out,
+ buf, skb->len - 14,
+ usb_net_raw_ip_tx_urb_comp,
+ usb);
+ urb->transfer_flags = URB_ZERO_PACKET;
+
+ /* autoresume before tx */
+ err = usb_autopm_get_interface_async(usb->usb.interface);
+ if (err < 0) {
+ pr_err("%s: usb_autopm_get_interface(%p) failed %d\n",
+ __func__, usb->usb.interface, err);
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ kfree_skb(skb);
+ return err;
+ }
+
+ /* submit tx urb */
+ usb_mark_last_busy(usb->usb.device);
+ usb->usb.tx_urb = urb;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ pr_err("usb_submit_urb() failed - err %d\n", err);
+ usb_autopm_put_interface_async(usb->usb.interface);
+ usb->usb.tx_urb = (struct urb *) 0;
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ kfree_skb(skb);
+ return err;
+ }
+
+ /* free skb */
+ consume_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static struct net_device_ops usb_net_raw_ip_ops = {
+ .ndo_init = baseband_usb_netdev_init,
+ .ndo_uninit = baseband_usb_netdev_uninit,
+ .ndo_open = baseband_usb_netdev_open,
+ .ndo_stop = baseband_usb_netdev_stop,
+ .ndo_start_xmit = baseband_usb_netdev_start_xmit,
+};
+
+static int usb_net_raw_ip_rx_urb_submit(struct baseband_usb *usb)
+{
+ struct urb *urb;
+ void *buf;
+ int err;
+
+ pr_debug("usb_net_raw_ip_rx_urb_submit { usb %p\n", usb);
+
+ /* check input */
+ if (usb->usb.rx_urb) {
+ pr_err("previous urb still active\n");
+ return -1;
+ }
+
+ /* allocate rx urb */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ pr_err("usb_alloc_urb() failed\n");
+ return -ENOMEM;
+ }
+ buf = kzalloc(USB_NET_BUFSIZ, GFP_ATOMIC);
+ if (!buf) {
+ pr_err("usb buffer kzalloc() failed\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+ usb_fill_bulk_urb(urb, usb->usb.device, usb->usb.pipe.bulk.in,
+ buf, USB_NET_BUFSIZ,
+ usb_net_raw_ip_rx_urb_comp,
+ usb);
+ urb->transfer_flags = 0;
+
+ /* submit rx urb */
+ usb_mark_last_busy(usb->usb.device);
+ usb->usb.rx_urb = urb;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ pr_err("usb_submit_urb() failed - err %d\n", err);
+ usb->usb.rx_urb = (struct urb *) 0;
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ return err;
+ }
+
+ pr_debug("usb_net_raw_ip_rx_urb_submit }\n");
+ return err;
+}
+
+static void usb_net_raw_ip_rx_urb_comp(struct urb *urb)
+{
+ struct baseband_usb *usb = (struct baseband_usb *) urb->context;
+ int i = usb->baseband_index;
+ struct sk_buff *skb;
+ unsigned char *dst;
+ unsigned char ethernet_header[14] = {
+ /* Destination MAC */
+ 0x00, 0x00,
+ 0x00, 0x00,
+ 0x00, 0x00,
+ /* Source MAC */
+ 0x00, 0x00,
+ 0x00, 0x00,
+ 0x00, 0x00,
+ /* EtherType */
+ NET_IP_ETHERTYPE,
+ };
+
+ pr_debug("usb_net_raw_ip_rx_urb_comp { urb %p\n", urb);
+
+ /* check input */
+ if (!urb) {
+ pr_err("no urb\n");
+ return;
+ }
+ if (urb->status == -ENOENT) {
+ pr_info("rx urb killed\n");
+ return;
+ }
+ if (urb->status) {
+ pr_info("rx urb status %d\n", urb->status);
+ }
+
+ /* put rx urb data in rx buffer */
+ if (urb->actual_length) {
+ pr_debug("usb_net_raw_ip_rx_urb_comp - "
+ "urb->actual_length %d\n", urb->actual_length);
+ /* allocate skb with space for
+ * - dummy ethernet header
+ * - rx IP packet from modem
+ */
+ skb = netdev_alloc_skb(usb_net_raw_ip_dev[i],
+ NET_IP_ALIGN + 14 + urb->actual_length);
+ if (skb) {
+ /* generate a dummy ethernet header
+ * since modem sends IP packets without
+ * any ethernet headers
+ */
+ memcpy(ethernet_header + 0,
+ usb_net_raw_ip_dev[i]->dev_addr, 6);
+ memcpy(ethernet_header + 6,
+ "0x01\0x02\0x03\0x04\0x05\0x06", 6);
+ /* fill skb with
+ * - dummy ethernet header
+ * - rx IP packet from modem
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+ dst = skb_put(skb, 14);
+ memcpy(dst, ethernet_header, 14);
+ dst = skb_put(skb, urb->actual_length);
+ memcpy(dst, urb->transfer_buffer, urb->actual_length);
+ skb->protocol = eth_type_trans(skb,
+ usb_net_raw_ip_dev[i]);
+ /* pass skb to network stack */
+ if (netif_rx(skb) < 0) {
+ pr_err("usb_net_raw_ip_rx_urb_comp_work - "
+ "netif_rx(%p) failed\n", skb);
+ kfree_skb(skb);
+ }
+ } else {
+ pr_err("usb_net_raw_ip_rx_urb_comp_work - "
+ "netdev_alloc_skb() failed\n");
+ }
+ }
+
+ /* free rx urb */
+ if (urb->transfer_buffer) {
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = (void *) 0;
+ }
+ usb_free_urb(urb);
+ usb->usb.rx_urb = (struct urb *) 0;
+
+ /* submit next rx urb */
+ usb_net_raw_ip_rx_urb_submit(usb);
+
+ pr_debug("usb_net_raw_ip_rx_urb_comp }\n");
+}
+
+static void usb_net_raw_ip_tx_urb_comp(struct urb *urb)
+{
+ struct baseband_usb *usb = (struct baseband_usb *) urb->context;
+
+ pr_debug("usb_net_raw_ip_tx_urb_comp {\n");
+
+ /* free tx urb */
+ if (urb->transfer_buffer) {
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = (void *) 0;
+ }
+ usb_free_urb(urb);
+ usb->usb.tx_urb = (struct urb *) 0;
+
+ /* autosuspend after tx completed */
+ usb_autopm_put_interface_async(usb->usb.interface);
+
+ pr_debug("usb_net_raw_ip_tx_urb_comp }\n");
+}
+
+static int usb_net_raw_ip_init(void)
+{
+ int i;
+ int err;
+
+ pr_debug("usb_net_raw_ip_init {\n");
+
+ /* create multiple raw-ip network devices */
+ for (i = 0; i < max_intfs; i++) {
+ /* open baseband usb */
+ g_i = i;
+ baseband_usb_net[i] = baseband_usb_open(i, usb_net_raw_ip_vid,
+ usb_net_raw_ip_pid, usb_net_raw_ip_intf[i]);
+ if (!baseband_usb_net[i]) {
+ pr_err("cannot open baseband usb net\n");
+ err = -1;
+ goto error_exit;
+ }
+ /* register network device */
+ usb_net_raw_ip_dev[i] = alloc_netdev(0,
+ BASEBAND_USB_NET_DEV_NAME,
+ ether_setup);
+ if (!usb_net_raw_ip_dev[i]) {
+ pr_err("alloc_netdev() failed\n");
+ err = -ENOMEM;
+ goto error_exit;
+ }
+ usb_net_raw_ip_dev[i]->netdev_ops = &usb_net_raw_ip_ops;
+ usb_net_raw_ip_dev[i]->watchdog_timeo = TX_TIMEOUT;
+ random_ether_addr(usb_net_raw_ip_dev[i]->dev_addr);
+ err = register_netdev(usb_net_raw_ip_dev[i]);
+ if (err < 0) {
+ pr_err("cannot register network device - %d\n", err);
+ goto error_exit;
+ }
+ pr_debug("registered baseband usb network device"
+ " - dev %p name %s\n", usb_net_raw_ip_dev[i],
+ BASEBAND_USB_NET_DEV_NAME);
+ /* start usb rx */
+ err = usb_net_raw_ip_rx_urb_submit(baseband_usb_net[i]);
+ if (err < 0) {
+ pr_err("submit rx failed - err %d\n", err);
+ goto error_exit;
+ }
+ }
+
+ pr_debug("usb_net_raw_ip_init }\n");
+ return 0;
+
+error_exit:
+ /* destroy multiple raw-ip network devices */
+ for (i = 0; i < max_intfs; i++) {
+ /* unregister network device */
+ if (usb_net_raw_ip_dev[i]) {
+ unregister_netdev(usb_net_raw_ip_dev[i]);
+ free_netdev(usb_net_raw_ip_dev[i]);
+ usb_net_raw_ip_dev[i] = (struct net_device *) 0;
+ }
+ /* close baseband usb */
+ if (baseband_usb_net[i]) {
+ baseband_usb_close(baseband_usb_net[i]);
+ baseband_usb_net[i] = (struct baseband_usb *) 0;
+ }
+ }
+
+ return err;
+}
+
+static void usb_net_raw_ip_exit(void)
+{
+ int i;
+
+ pr_debug("usb_net_raw_ip_exit {\n");
+
+ /* destroy multiple raw-ip network devices */
+ for (i = 0; i < max_intfs; i++) {
+ /* unregister network device */
+ if (usb_net_raw_ip_dev[i]) {
+ unregister_netdev(usb_net_raw_ip_dev[i]);
+ free_netdev(usb_net_raw_ip_dev[i]);
+ usb_net_raw_ip_dev[i] = (struct net_device *) 0;
+ }
+ /* close baseband usb */
+ if (baseband_usb_net[i]) {
+ baseband_usb_close(baseband_usb_net[i]);
+ baseband_usb_net[i] = (struct baseband_usb *) 0;
+ }
+ }
+
+ pr_debug("usb_net_raw_ip_exit }\n");
+}
+
+module_init(usb_net_raw_ip_init)
+module_exit(usb_net_raw_ip_exit)
+
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index f74f3ce71526..7b4687974987 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1284,6 +1284,11 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0424, 0x9E08),
.driver_info = (unsigned long) &smsc95xx_info,
},
+ {
+ /* SMSC89530 USB Ethernet Device on Automotive VCM */
+ USB_DEVICE(0x0424, 0x9E08),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f354bd4e121e..f1d88c571bc4 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -268,9 +268,16 @@ config MWL8K
To compile this driver as a module, choose M here: the module
will be called mwl8k. If unsure, say N.
+config WIFI_CONTROL_FUNC
+ bool "Enable WiFi control function abstraction"
+ help
+ Enables Power/Reset/Carddetect function abstraction
+
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/b43/Kconfig"
source "drivers/net/wireless/b43legacy/Kconfig"
+source "drivers/net/wireless/bcm4329/Kconfig"
+source "drivers/net/wireless/bcmdhd/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7bba6a82b875..8ceae0a8ba0f 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -58,3 +58,6 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
obj-$(CONFIG_IWM) += iwmc3200wifi/
obj-$(CONFIG_MWIFIEX) += mwifiex/
+
+obj-$(CONFIG_BCM4329) += bcm4329/
+obj-$(CONFIG_BCMDHD) += bcmdhd/
diff --git a/drivers/net/wireless/bcm4329/Kconfig b/drivers/net/wireless/bcm4329/Kconfig
new file mode 100644
index 000000000000..79d701b698e1
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/Kconfig
@@ -0,0 +1,75 @@
+config BCM4329
+ tristate "Broadcom 4329 wireless cards support"
+ depends on MMC
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ ---help---
+ This module adds support for wireless adapters based on
+ Broadcom 4329 chipset.
+
+ This driver uses the kernel's wireless extensions subsystem.
+
+ If you choose to build a module, it'll be called dhd. Say M if
+ unsure.
+
+config BCM4329_FIRST_SCAN
+ depends on BCM4329
+ bool "first scan support"
+ default n
+ ---help---
+ Initiate broadcast scan (active scan) just after
+ initializing with network interface.
+
+config BCM4329_FW_PATH
+ depends on BCM4329
+ string "Firmware path"
+ default "/system/etc/firmware/fw_bcm4329.bin"
+ ---help---
+ Path to the firmware file.
+
+config BCM4329_NVRAM_PATH
+ depends on BCM4329
+ string "NVRAM path"
+ default "/proc/calibration"
+ ---help---
+ Path to the calibration file.
+
+config BCM4329_WIFI_CONTROL_FUNC
+ bool "Use bcm4329_wlan device"
+ depends on BCM4329
+ default n
+ ---help---
+ Use this option to get various parameters from architecture specific
+ bcm4329_wlan platform device. Say n if unsure.
+
+if BCM4329_WIFI_CONTROL_FUNC
+
+config BCM4329_DHD_USE_STATIC_BUF
+ bool "Use static buffer"
+ depends on BCM4329
+ default n
+ ---help---
+ Use static buffer from kernel heap allocated during bcm4329_wlan
+ platform device creation.
+
+config BCM4329_HW_OOB
+ bool "Use out of band interrupt"
+ depends on BCM4329
+ default n
+ ---help---
+ Use out of band interrupt for wake on wireless.
+
+config BCM4329_OOB_INTR_ONLY
+ bool "Use out of band interrupt only"
+ depends on BCM4329
+ default n
+ ---help---
+ Use out of band interrupt for all interrupts(including SDIO interrupts).
+
+config BCM4329_GET_CUSTOM_MAC_ENABLE
+ bool "Use custom mac address"
+ depends on BCM4329
+ default n
+ ---help---
+ Use mac address provided by bcm4329_wlan platform device.
+endif
diff --git a/drivers/net/wireless/bcm4329/Makefile b/drivers/net/wireless/bcm4329/Makefile
new file mode 100644
index 000000000000..7714efce4af5
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/Makefile
@@ -0,0 +1,52 @@
+# bcm4329
+DHDCFLAGS = -DLINUX -DBCMDRIVER -DBCMDONGLEHOST -DDHDTHREAD -DBCMWPA2 \
+ -DUNRELEASEDCHIP -Dlinux -DDHD_SDALIGN=64 -DMAX_HDR_READ=64 \
+ -DDHD_FIRSTREAD=64 -DDHD_GPL -DDHD_SCHED -DBDC -DTOE -DDHD_BCMEVENTS \
+ -DSHOW_EVENTS -DBCMSDIO -DDHD_GPL -DBCMLXSDMMC -DBCMPLATFORM_BUS \
+ -Wall -Wstrict-prototypes -Werror -DCUSTOMER_HW2 -DMMC_SDIO_ABORT \
+ -DDHD_DEBUG_TRAP -DSOFTAP -DEMBEDDED_PLATFORM -DARP_OFFLOAD_SUPPORT \
+ -DPKT_FILTER_SUPPORT -DSET_RANDOM_MAC_SOFTAP -DCSCAN \
+ -DKEEP_ALIVE -DPNO_SUPPORT \
+ -Idrivers/net/wireless/bcm4329 -Idrivers/net/wireless/bcm4329/include
+
+ifeq ($(CONFIG_BCM4329_WIFI_CONTROL_FUNC),y)
+DHDCFLAGS += -DCONFIG_WIFI_CONTROL_FUNC
+endif
+
+ifeq ($(CONFIG_BCM4329_FIRST_SCAN),y)
+DHDCFLAGS += -DCONFIG_FIRST_SCAN
+endif
+
+ifeq ($(CONFIG_BCM4329_DHD_USE_STATIC_BUF),y)
+DHDCFLAGS += -DDHD_USE_STATIC_BUF
+endif
+ifeq ($(CONFIG_BCM4329_OOB_INTR_ONLY),y)
+DHDCFLAGS += -DOOB_INTR_ONLY
+endif
+ifeq ($(CONFIG_BCM4329_GET_CUSTOM_MAC_ENABLE),y)
+DHDCFLAGS += -DGET_CUSTOM_MAC_ENABLE
+endif
+ifeq ($(CONFIG_BCM4329_HW_OOB),y)
+DHDCFLAGS += -DHW_OOB
+else
+DHDCFLAGS += -DSDIO_ISR_THREAD
+endif
+
+ifeq ($(TARGET_USE_NEW_TOOLCHAIN),1)
+ # gcc-4.6.1 warns a lot more than previous compilers.
+ # The following is the minimal set of warnings that need to not error out
+ # the build for it to succeed. -Wno-error would also work, but this
+ # explicit list allows them to be fixed in smaller chunks.
+ DHDCFLAGS += -Wno-error=unused-but-set-variable
+ DHDCFLAGS += -Wno-error=array-bounds
+endif
+
+DHDOFILES = dhd_linux.o linux_osl.o bcmutils.o dhd_common.o dhd_custom_gpio.o \
+ wl_iw.o siutils.o sbutils.o aiutils.o hndpmu.o bcmwifi.o dhd_sdio.o \
+ dhd_linux_sched.o dhd_cdc.o bcmsdh_sdmmc.o bcmsdh.o bcmsdh_linux.o \
+ bcmsdh_sdmmc_linux.o
+
+obj-$(CONFIG_BCM4329) += bcm4329.o
+bcm4329-objs += $(DHDOFILES)
+EXTRA_CFLAGS = $(DHDCFLAGS)
+EXTRA_LDFLAGS += --strip-debug
diff --git a/drivers/net/wireless/bcm4329/aiutils.c b/drivers/net/wireless/bcm4329/aiutils.c
new file mode 100644
index 000000000000..df48ac0d83d4
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/aiutils.c
@@ -0,0 +1,686 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aiutils.c,v 1.6.4.7.4.6 2010/04/21 20:43:47 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+STATIC uint32
+get_asd(si_t *sih, uint32 *eromptr, uint sp, uint ad, uint st,
+ uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh);
+
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(si_t *sih, uint32 *eromptr, uint32 mask, uint32 match)
+{
+ uint32 ent;
+ uint inv = 0, nom = 0;
+
+ while (TRUE) {
+ ent = R_REG(si_osh(sih), (uint32 *)(uintptr)(*eromptr));
+ *eromptr += sizeof(uint32);
+
+ if (mask == 0)
+ break;
+
+ if ((ent & ER_VALID) == 0) {
+ inv++;
+ continue;
+ }
+
+ if (ent == (ER_END | ER_VALID))
+ break;
+
+ if ((ent & mask) == match)
+ break;
+
+ nom++;
+ }
+
+ SI_MSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+ if (inv + nom)
+ SI_MSG((" after %d invalid and %d non-matching entries\n", inv, nom));
+ return ent;
+}
+
+STATIC uint32
+get_asd(si_t *sih, uint32 *eromptr, uint sp, uint ad, uint st,
+ uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh)
+{
+ uint32 asd, sz, szd;
+
+ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+ if (((asd & ER_TAG1) != ER_ADD) ||
+ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+ ((asd & AD_ST_MASK) != st)) {
+ /* This is not what we want, "push" it back */
+ *eromptr -= sizeof(uint32);
+ return 0;
+ }
+ *addrl = asd & AD_ADDR_MASK;
+ if (asd & AD_AG32)
+ *addrh = get_erom_ent(sih, eromptr, 0, 0);
+ else
+ *addrh = 0;
+ *sizeh = 0;
+ sz = asd & AD_SZ_MASK;
+ if (sz == AD_SZ_SZD) {
+ szd = get_erom_ent(sih, eromptr, 0, 0);
+ *sizel = szd & SD_SZ_MASK;
+ if (szd & SD_SG32)
+ *sizeh = get_erom_ent(sih, eromptr, 0, 0);
+ } else
+ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+ SI_MSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+ sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+ return asd;
+}
+
+/* parse the enumeration rom to identify all cores */
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = (chipcregs_t *)regs;
+ uint32 erombase, eromptr, eromlim;
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ eromptr = (uintptr)REG_MAP(erombase, SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Set wrappers address */
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+ /* Now point the window at the erom */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+ eromptr = (uint32)(uintptr)regs;
+ break;
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ eromptr = erombase;
+ break;
+
+ case PCMCIA_BUS:
+ default:
+ SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+ ASSERT(0);
+ return;
+ }
+ eromlim = eromptr + ER_REMAPCONTROL;
+
+ SI_MSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%08x, eromlim = 0x%08x\n",
+ regs, erombase, eromptr, eromlim));
+ while (eromptr < eromlim) {
+ uint32 cia, cib, base, cid, mfg, crev, nmw, nsw, nmp, nsp;
+ uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+ uint i, j, idx;
+ bool br;
+
+ br = FALSE;
+
+ /* Grok a component */
+ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+ if (cia == (ER_END | ER_VALID)) {
+ SI_MSG(("Found END of erom after %d cores\n", sii->numcores));
+ return;
+ }
+ base = eromptr - sizeof(uint32);
+ cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+ if ((cib & ER_TAG) != ER_CI) {
+ SI_ERROR(("CIA not followed by CIB\n"));
+ goto error;
+ }
+
+ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+ SI_MSG(("Found component 0x%04x/0x%4x rev %d at erom addr 0x%08x, with nmw = %d, "
+ "nsw = %d, nmp = %d & nsp = %d\n",
+ mfg, cid, crev, base, nmw, nsw, nmp, nsp));
+
+ if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+ continue;
+ if ((nmw + nsw == 0)) {
+ /* A component which is not a core */
+ if (cid == OOB_ROUTER_CORE_ID) {
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd != 0) {
+ sii->common_info->oob_router = addrl;
+ }
+ }
+ continue;
+ }
+
+ idx = sii->numcores;
+/* sii->eromptr[idx] = base; */
+ sii->common_info->cia[idx] = cia;
+ sii->common_info->cib[idx] = cib;
+ sii->common_info->coreid[idx] = cid;
+
+ for (i = 0; i < nmp; i++) {
+ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+ if ((mpd & ER_TAG) != ER_MP) {
+ SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+ goto error;
+ }
+ SI_MSG((" Master port %d, mp: %d id: %d\n", i,
+ (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+ (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+ }
+
+ /* First Slave Address Descriptor should be port 0:
+ * the main register space for the core
+ */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ /* Try again to see if it is a bridge */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd != 0)
+ br = TRUE;
+ else
+ if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("First Slave ASD for core 0x%04x malformed "
+ "(0x%08x)\n", cid, asd));
+ goto error;
+ }
+ }
+ sii->common_info->coresba[idx] = addrl;
+ sii->common_info->coresba_size[idx] = sizel;
+ /* Get any more ASDs in port 0 */
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE))
+ sii->common_info->coresba2[idx] = addrl;
+ sii->common_info->coresba2_size[idx] = sizel;
+ j++;
+ } while (asd != 0);
+
+ /* Go through the ASDs for other slave ports */
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ } while (asd != 0);
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ goto error;
+ }
+ }
+
+ /* Now get master wrappers */
+ for (i = 0; i < nmw; i++) {
+ asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for MW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if (i == 0)
+ sii->common_info->wrapba[idx] = addrl;
+ }
+
+ /* And finally slave wrappers */
+ for (i = 0; i < nsw; i++) {
+ uint fwp = (nsp == 1) ? 0 : 1;
+ asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for SW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if ((nmw == 0) && (i == 0))
+ sii->common_info->wrapba[idx] = addrl;
+ }
+
+ /* Don't record bridges */
+ if (br)
+ continue;
+
+ /* Done with core */
+ sii->numcores++;
+ }
+
+ SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+ sii->numcores = 0;
+ return;
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 addr = sii->common_info->coresba[coreidx];
+ uint32 wrap = sii->common_info->wrapba[coreidx];
+ void *regs;
+
+ if (coreidx >= sii->numcores)
+ return (NULL);
+
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!sii->common_info->regs[coreidx]) {
+ sii->common_info->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+ }
+ sii->curmap = regs = sii->common_info->regs[coreidx];
+ if (!sii->common_info->wrappers[coreidx]) {
+ sii->common_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->common_info->wrappers[coreidx]));
+ }
+ sii->curwrap = sii->common_info->wrappers[coreidx];
+ break;
+
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ sii->curmap = regs = (void *)((uintptr)addr);
+ sii->curwrap = (void *)((uintptr)wrap);
+ break;
+
+ case PCMCIA_BUS:
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ sii->curmap = regs;
+ sii->curidx = coreidx;
+
+ return regs;
+}
+
+/* Return the number of address spaces in current core */
+int
+ai_numaddrspaces(si_t *sih)
+{
+ return 2;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->common_info->coresba[cidx];
+ else if (asidx == 1)
+ return sii->common_info->coresba2[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->common_info->coresba_size[cidx];
+ else if (asidx == 1)
+ return sii->common_info->coresba2_size[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
+}
+
+uint
+ai_flag(si_t *sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ ai = sii->curwrap;
+
+ return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+void
+ai_write_wrap_reg(si_t *sih, uint32 offset, uint32 val)
+{
+ si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai = sii->curwrap;
+ W_REG(sii->osh, (uint32 *)((uint8 *)ai+offset), val);
+ return;
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 cia;
+
+ sii = SI_INFO(sih);
+ cia = sii->common_info->cia[sii->curidx];
+ return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 cib;
+
+ sii = SI_INFO(sih);
+ cib = sii->common_info->cib[sii->curidx];
+ return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ ai = sii->curwrap;
+
+ return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+ ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!sii->common_info->wrappers[coreidx]) {
+ sii->common_info->regs[coreidx] =
+ REG_MAP(sii->common_info->coresba[coreidx], SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)sii->common_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((sii->common_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ /* readback */
+ w = R_REG(sii->osh, r);
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ /* if core is already in reset, just return */
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+ return;
+
+ W_REG(sii->osh, &ai->ioctrl, bits);
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ OSL_DELAY(10);
+
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ /*
+ * Must do the disable sequence first to work for arbitrary current core state.
+ */
+ ai_core_disable(sih, (bits | resetbits));
+
+ /*
+ * Now do the initialization sequence.
+ */
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ W_REG(sii->osh, &ai->resetctrl, 0);
+ OSL_DELAY(1);
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ OSL_DELAY(1);
+}
+
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+
+ return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+ W_REG(sii->osh, &ai->iostatus, w);
+ }
+
+ return R_REG(sii->osh, &ai->iostatus);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmpcispi.c b/drivers/net/wireless/bcm4329/bcmpcispi.c
new file mode 100644
index 000000000000..1a8b6717f924
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmpcispi.c
@@ -0,0 +1,630 @@
+/*
+ * Broadcom SPI over PCI-SPI Host Controller, low-level hardware driver
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.c,v 1.22.2.4.4.5.6.1 2010/08/13 00:26:05 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#include <sdio.h> /* SDIO Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <pcicfg.h>
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+#include <bcmpcispi.h> /* BRCM PCI-SPI Host Controller Register definitions */
+
+
+/* ndis_osl.h needs to do a runtime check of the osh to map
+ * R_REG/W_REG to bus specific access similar to linux_osl.h.
+ * Until then...
+ */
+/* linux */
+
+#define SPIPCI_RREG R_REG
+#define SPIPCI_WREG W_REG
+
+
+#define SPIPCI_ANDREG(osh, r, v) SPIPCI_WREG(osh, (r), (SPIPCI_RREG(osh, r) & (v)))
+#define SPIPCI_ORREG(osh, r, v) SPIPCI_WREG(osh, (r), (SPIPCI_RREG(osh, r) | (v)))
+
+
+int bcmpcispi_dump = 0; /* Set to dump complete trace of all SPI bus transactions */
+
+typedef struct spih_info_ {
+ uint bar0; /* BAR0 of PCI Card */
+ uint bar1; /* BAR1 of PCI Card */
+ osl_t *osh; /* osh handle */
+ spih_pciregs_t *pciregs; /* PCI Core Registers */
+ spih_regs_t *regs; /* SPI Controller Registers */
+ uint8 rev; /* PCI Card Revision ID */
+} spih_info_t;
+
+
+/* Attach to PCI-SPI Host Controller Hardware */
+bool
+spi_hw_attach(sdioh_info_t *sd)
+{
+ osl_t *osh;
+ spih_info_t *si;
+
+ sd_trace(("%s: enter\n", __FUNCTION__));
+
+ osh = sd->osh;
+
+ if ((si = (spih_info_t *)MALLOC(osh, sizeof(spih_info_t))) == NULL) {
+ sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+ return FALSE;
+ }
+
+ bzero(si, sizeof(spih_info_t));
+
+ sd->controller = si;
+
+ si->osh = sd->osh;
+ si->rev = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_REV, 4) & 0xFF;
+
+ if (si->rev < 3) {
+ sd_err(("Host controller %d not supported, please upgrade to rev >= 3\n", si->rev));
+ MFREE(osh, si, sizeof(spih_info_t));
+ return (FALSE);
+ }
+
+ sd_err(("Attaching to Generic PCI SPI Host Controller Rev %d\n", si->rev));
+
+ /* FPGA Revision < 3 not supported by driver anymore. */
+ ASSERT(si->rev >= 3);
+
+ si->bar0 = sd->bar0;
+
+ /* Rev < 10 PciSpiHost has 2 BARs:
+ * BAR0 = PCI Core Registers
+ * BAR1 = PciSpiHost Registers (all other cores on backplane)
+ *
+ * Rev 10 and up use a different PCI core which only has a single
+ * BAR0 which contains the PciSpiHost Registers.
+ */
+ if (si->rev < 10) {
+ si->pciregs = (spih_pciregs_t *)spi_reg_map(osh,
+ (uintptr)si->bar0,
+ sizeof(spih_pciregs_t));
+ sd_err(("Mapped PCI Core regs to BAR0 at %p\n", si->pciregs));
+
+ si->bar1 = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR1, 4);
+ si->regs = (spih_regs_t *)spi_reg_map(osh,
+ (uintptr)si->bar1,
+ sizeof(spih_regs_t));
+ sd_err(("Mapped SPI Controller regs to BAR1 at %p\n", si->regs));
+ } else {
+ si->regs = (spih_regs_t *)spi_reg_map(osh,
+ (uintptr)si->bar0,
+ sizeof(spih_regs_t));
+ sd_err(("Mapped SPI Controller regs to BAR0 at %p\n", si->regs));
+ si->pciregs = NULL;
+ }
+ /* Enable SPI Controller, 16.67MHz SPI Clock */
+ SPIPCI_WREG(osh, &si->regs->spih_ctrl, 0x000000d1);
+
+ /* Set extended feature register to defaults */
+ SPIPCI_WREG(osh, &si->regs->spih_ext, 0x00000000);
+
+ /* Set GPIO CS# High (de-asserted) */
+ SPIPCI_WREG(osh, &si->regs->spih_gpio_data, SPIH_CS);
+
+ /* set GPIO[0] to output for CS# */
+ /* set GPIO[1] to output for power control */
+ /* set GPIO[2] to input for card detect */
+ SPIPCI_WREG(osh, &si->regs->spih_gpio_ctrl, (SPIH_CS | SPIH_SLOT_POWER));
+
+ /* Clear out the Read FIFO in case there is any stuff left in there from a previous run. */
+ while ((SPIPCI_RREG(osh, &si->regs->spih_stat) & SPIH_RFEMPTY) == 0) {
+ SPIPCI_RREG(osh, &si->regs->spih_data);
+ }
+
+ /* Wait for power to stabilize to the SDIO Card (100msec was insufficient) */
+ OSL_DELAY(250000);
+
+ /* Check card detect on FPGA Revision >= 4 */
+ if (si->rev >= 4) {
+ if (SPIPCI_RREG(osh, &si->regs->spih_gpio_data) & SPIH_CARD_DETECT) {
+ sd_err(("%s: no card detected in SD slot\n", __FUNCTION__));
+ spi_reg_unmap(osh, (uintptr)si->regs, sizeof(spih_regs_t));
+ if (si->pciregs) {
+ spi_reg_unmap(osh, (uintptr)si->pciregs, sizeof(spih_pciregs_t));
+ }
+ MFREE(osh, si, sizeof(spih_info_t));
+ return FALSE;
+ }
+ }
+
+ /* Interrupts are level sensitive */
+ SPIPCI_WREG(osh, &si->regs->spih_int_edge, 0x80000000);
+
+ /* Interrupts are active low. */
+ SPIPCI_WREG(osh, &si->regs->spih_int_pol, 0x40000004);
+
+ /* Enable interrupts through PCI Core. */
+ if (si->pciregs) {
+ SPIPCI_WREG(osh, &si->pciregs->ICR, PCI_INT_PROP_EN);
+ }
+
+ sd_trace(("%s: exit\n", __FUNCTION__));
+ return TRUE;
+}
+
+/* Detach and return PCI-SPI Hardware to unconfigured state */
+bool
+spi_hw_detach(sdioh_info_t *sd)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+ spih_pciregs_t *pciregs = si->pciregs;
+
+ sd_trace(("%s: enter\n", __FUNCTION__));
+
+ SPIPCI_WREG(osh, &regs->spih_ctrl, 0x00000010);
+ SPIPCI_WREG(osh, &regs->spih_gpio_ctrl, 0x00000000); /* Disable GPIO for CS# */
+ SPIPCI_WREG(osh, &regs->spih_int_mask, 0x00000000); /* Clear Intmask */
+ SPIPCI_WREG(osh, &regs->spih_hex_disp, 0x0000DEAF);
+ SPIPCI_WREG(osh, &regs->spih_int_edge, 0x00000000);
+ SPIPCI_WREG(osh, &regs->spih_int_pol, 0x00000000);
+ SPIPCI_WREG(osh, &regs->spih_hex_disp, 0x0000DEAD);
+
+ /* Disable interrupts through PCI Core. */
+ if (si->pciregs) {
+ SPIPCI_WREG(osh, &pciregs->ICR, 0x00000000);
+ spi_reg_unmap(osh, (uintptr)pciregs, sizeof(spih_pciregs_t));
+ }
+ spi_reg_unmap(osh, (uintptr)regs, sizeof(spih_regs_t));
+
+ MFREE(osh, si, sizeof(spih_info_t));
+
+ sd->controller = NULL;
+
+ sd_trace(("%s: exit\n", __FUNCTION__));
+ return TRUE;
+}
+
+/* Switch between internal (PCI) and external clock oscillator */
+static bool
+sdspi_switch_clock(sdioh_info_t *sd, bool ext_clk)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+
+ /* Switch to desired clock, and reset the PLL. */
+ SPIPCI_WREG(osh, &regs->spih_pll_ctrl, ext_clk ? SPIH_EXT_CLK : 0);
+
+ SPINWAIT(((SPIPCI_RREG(osh, &regs->spih_pll_status) & SPIH_PLL_LOCKED)
+ != SPIH_PLL_LOCKED), 1000);
+ if ((SPIPCI_RREG(osh, &regs->spih_pll_status) & SPIH_PLL_LOCKED) != SPIH_PLL_LOCKED) {
+ sd_err(("%s: timeout waiting for PLL to lock\n", __FUNCTION__));
+ return (FALSE);
+ }
+ return (TRUE);
+
+}
+
+/* Configure PCI-SPI Host Controller's SPI Clock rate as a divisor into the
+ * base clock rate. The base clock is either the PCI Clock (33MHz) or the
+ * external clock oscillator at U17 on the PciSpiHost.
+ */
+bool
+spi_start_clock(sdioh_info_t *sd, uint16 div)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+ uint32 t, espr, disp;
+ uint32 disp_xtal_freq;
+ bool ext_clock = FALSE;
+ char disp_string[5];
+
+ if (div > 2048) {
+ sd_err(("%s: divisor %d too large; using max of 2048\n", __FUNCTION__, div));
+ div = 2048;
+ } else if (div & (div - 1)) { /* Not a power of 2? */
+ /* Round up to a power of 2 */
+ while ((div + 1) & div)
+ div |= div >> 1;
+ div++;
+ }
+
+ /* For FPGA Rev >= 5, the use of an external clock oscillator is supported.
+ * If the oscillator is populated, use it to provide the SPI base clock,
+ * otherwise, default to the PCI clock as the SPI base clock.
+ */
+ if (si->rev >= 5) {
+ uint32 clk_tick;
+ /* Enable the External Clock Oscillator as PLL clock source. */
+ if (!sdspi_switch_clock(sd, TRUE)) {
+ sd_err(("%s: error switching to external clock\n", __FUNCTION__));
+ }
+
+ /* Check to make sure the external clock is running. If not, then it
+ * is not populated on the card, so we will default to the PCI clock.
+ */
+ clk_tick = SPIPCI_RREG(osh, &regs->spih_clk_count);
+ if (clk_tick == SPIPCI_RREG(osh, &regs->spih_clk_count)) {
+
+ /* Switch back to the PCI clock as the clock source. */
+ if (!sdspi_switch_clock(sd, FALSE)) {
+ sd_err(("%s: error switching to external clock\n", __FUNCTION__));
+ }
+ } else {
+ ext_clock = TRUE;
+ }
+ }
+
+ /* Hack to allow hot-swapping oscillators:
+ * 1. Force PCI clock as clock source, using sd_divisor of 0.
+ * 2. Swap oscillator
+ * 3. Set desired sd_divisor (will switch to external oscillator as clock source.
+ */
+ if (div == 0) {
+ ext_clock = FALSE;
+ div = 2;
+
+ /* Select PCI clock as the clock source. */
+ if (!sdspi_switch_clock(sd, FALSE)) {
+ sd_err(("%s: error switching to external clock\n", __FUNCTION__));
+ }
+
+ sd_err(("%s: Ok to hot-swap oscillators.\n", __FUNCTION__));
+ }
+
+ /* If using the external oscillator, read the clock frequency from the controller
+ * The value read is in units of 10000Hz, and it's not a nice round number because
+ * it is calculated by the FPGA. So to make up for that, we round it off.
+ */
+ if (ext_clock == TRUE) {
+ uint32 xtal_freq;
+
+ OSL_DELAY(1000);
+ xtal_freq = SPIPCI_RREG(osh, &regs->spih_xtal_freq) * 10000;
+
+ sd_info(("%s: Oscillator is %dHz\n", __FUNCTION__, xtal_freq));
+
+
+ disp_xtal_freq = xtal_freq / 10000;
+
+ /* Round it off to a nice number. */
+ if ((disp_xtal_freq % 100) > 50) {
+ disp_xtal_freq += 100;
+ }
+
+ disp_xtal_freq = (disp_xtal_freq / 100) * 100;
+ } else {
+ sd_err(("%s: no external oscillator installed, using PCI clock.\n", __FUNCTION__));
+ disp_xtal_freq = 3333;
+ }
+
+ /* Convert the SPI Clock frequency to BCD format. */
+ sprintf(disp_string, "%04d", disp_xtal_freq / div);
+
+ disp = (disp_string[0] - '0') << 12;
+ disp |= (disp_string[1] - '0') << 8;
+ disp |= (disp_string[2] - '0') << 4;
+ disp |= (disp_string[3] - '0');
+
+ /* Select the correct ESPR register value based on the divisor. */
+ switch (div) {
+ case 1: espr = 0x0; break;
+ case 2: espr = 0x1; break;
+ case 4: espr = 0x2; break;
+ case 8: espr = 0x5; break;
+ case 16: espr = 0x3; break;
+ case 32: espr = 0x4; break;
+ case 64: espr = 0x6; break;
+ case 128: espr = 0x7; break;
+ case 256: espr = 0x8; break;
+ case 512: espr = 0x9; break;
+ case 1024: espr = 0xa; break;
+ case 2048: espr = 0xb; break;
+ default: espr = 0x0; ASSERT(0); break;
+ }
+
+ t = SPIPCI_RREG(osh, &regs->spih_ctrl);
+ t &= ~3;
+ t |= espr & 3;
+ SPIPCI_WREG(osh, &regs->spih_ctrl, t);
+
+ t = SPIPCI_RREG(osh, &regs->spih_ext);
+ t &= ~3;
+ t |= (espr >> 2) & 3;
+ SPIPCI_WREG(osh, &regs->spih_ext, t);
+
+ SPIPCI_WREG(osh, &regs->spih_hex_disp, disp);
+
+ /* For Rev 8, writing to the PLL_CTRL register resets
+ * the PLL, and it can re-acquire in 200uS. For
+ * Rev 7 and older, we use a software delay to allow
+ * the PLL to re-acquire, which takes more than 2mS.
+ */
+ if (si->rev < 8) {
+ /* Wait for clock to settle. */
+ OSL_DELAY(5000);
+ }
+
+ sd_info(("%s: SPI_CTRL=0x%08x SPI_EXT=0x%08x\n",
+ __FUNCTION__,
+ SPIPCI_RREG(osh, &regs->spih_ctrl),
+ SPIPCI_RREG(osh, &regs->spih_ext)));
+
+ return TRUE;
+}
+
+/* Configure PCI-SPI Host Controller High-Speed Clocking mode setting */
+bool
+spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+
+ if (si->rev >= 10) {
+ if (hsmode) {
+ SPIPCI_ORREG(osh, &regs->spih_ext, 0x10);
+ } else {
+ SPIPCI_ANDREG(osh, &regs->spih_ext, ~0x10);
+ }
+ }
+
+ return TRUE;
+}
+
+/* Disable device interrupt */
+void
+spi_devintr_off(sdioh_info_t *sd)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ if (sd->use_client_ints) {
+ sd->intmask &= ~SPIH_DEV_INTR;
+ SPIPCI_WREG(osh, &regs->spih_int_mask, sd->intmask); /* Clear Intmask */
+ }
+}
+
+/* Enable device interrupt */
+void
+spi_devintr_on(sdioh_info_t *sd)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+
+ ASSERT(sd->lockcount == 0);
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ if (sd->use_client_ints) {
+ if (SPIPCI_RREG(osh, &regs->spih_ctrl) & 0x02) {
+ /* Ack in case one was pending but is no longer... */
+ SPIPCI_WREG(osh, &regs->spih_int_status, SPIH_DEV_INTR);
+ }
+ sd->intmask |= SPIH_DEV_INTR;
+ /* Set device intr in Intmask */
+ SPIPCI_WREG(osh, &regs->spih_int_mask, sd->intmask);
+ }
+}
+
+/* Check to see if an interrupt belongs to the PCI-SPI Host or a SPI Device */
+bool
+spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+ bool ours = FALSE;
+
+ uint32 raw_int, cur_int;
+ ASSERT(sd);
+
+ if (is_dev_intr)
+ *is_dev_intr = FALSE;
+ raw_int = SPIPCI_RREG(osh, &regs->spih_int_status);
+ cur_int = raw_int & sd->intmask;
+ if (cur_int & SPIH_DEV_INTR) {
+ if (sd->client_intr_enabled && sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ if (is_dev_intr)
+ *is_dev_intr = TRUE;
+ } else {
+ sd_trace(("%s: Not ready for intr: enabled %d, handler 0x%p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+ SPIPCI_WREG(osh, &regs->spih_int_status, SPIH_DEV_INTR);
+ SPIPCI_RREG(osh, &regs->spih_int_status);
+ ours = TRUE;
+ } else if (cur_int & SPIH_CTLR_INTR) {
+ /* Interrupt is from SPI FIFO... just clear and ack it... */
+ sd_trace(("%s: SPI CTLR interrupt: raw_int 0x%08x cur_int 0x%08x\n",
+ __FUNCTION__, raw_int, cur_int));
+
+ /* Clear the interrupt in the SPI_STAT register */
+ SPIPCI_WREG(osh, &regs->spih_stat, 0x00000080);
+
+ /* Ack the interrupt in the interrupt controller */
+ SPIPCI_WREG(osh, &regs->spih_int_status, SPIH_CTLR_INTR);
+ SPIPCI_RREG(osh, &regs->spih_int_status);
+
+ ours = TRUE;
+ } else if (cur_int & SPIH_WFIFO_INTR) {
+ sd_trace(("%s: SPI WR FIFO Empty interrupt: raw_int 0x%08x cur_int 0x%08x\n",
+ __FUNCTION__, raw_int, cur_int));
+
+ /* Disable the FIFO Empty Interrupt */
+ sd->intmask &= ~SPIH_WFIFO_INTR;
+ SPIPCI_WREG(osh, &regs->spih_int_mask, sd->intmask);
+
+ sd->local_intrcount++;
+ sd->got_hcint = TRUE;
+ ours = TRUE;
+ } else {
+ /* Not an error: can share interrupts... */
+ sd_trace(("%s: Not my interrupt: raw_int 0x%08x cur_int 0x%08x\n",
+ __FUNCTION__, raw_int, cur_int));
+ ours = FALSE;
+ }
+
+ return ours;
+}
+
+static void
+hexdump(char *pfx, unsigned char *msg, int msglen)
+{
+ int i, col;
+ char buf[80];
+
+ ASSERT(strlen(pfx) + 49 <= sizeof(buf));
+
+ col = 0;
+
+ for (i = 0; i < msglen; i++, col++) {
+ if (col % 16 == 0)
+ strcpy(buf, pfx);
+ sprintf(buf + strlen(buf), "%02x", msg[i]);
+ if ((col + 1) % 16 == 0)
+ printf("%s\n", buf);
+ else
+ sprintf(buf + strlen(buf), " ");
+ }
+
+ if (col % 16 != 0)
+ printf("%s\n", buf);
+}
+
+/* Send/Receive an SPI Packet */
+void
+spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+ uint32 count;
+ uint32 spi_data_out;
+ uint32 spi_data_in;
+ bool yield;
+
+ sd_trace(("%s: enter\n", __FUNCTION__));
+
+ if (bcmpcispi_dump) {
+ printf("SENDRECV(len=%d)\n", msglen);
+ hexdump(" OUT: ", msg_out, msglen);
+ }
+
+#ifdef BCMSDYIELD
+ /* Only yield the CPU and wait for interrupt on Rev 8 and newer FPGA images. */
+ yield = ((msglen > 500) && (si->rev >= 8));
+#else
+ yield = FALSE;
+#endif /* BCMSDYIELD */
+
+ ASSERT(msglen % 4 == 0);
+
+
+ SPIPCI_ANDREG(osh, &regs->spih_gpio_data, ~SPIH_CS); /* Set GPIO CS# Low (asserted) */
+
+ for (count = 0; count < (uint32)msglen/4; count++) {
+ spi_data_out = ((uint32)((uint32 *)msg_out)[count]);
+ SPIPCI_WREG(osh, &regs->spih_data, spi_data_out);
+ }
+
+#ifdef BCMSDYIELD
+ if (yield) {
+ /* Ack the interrupt in the interrupt controller */
+ SPIPCI_WREG(osh, &regs->spih_int_status, SPIH_WFIFO_INTR);
+ SPIPCI_RREG(osh, &regs->spih_int_status);
+
+ /* Enable the FIFO Empty Interrupt */
+ sd->intmask |= SPIH_WFIFO_INTR;
+ sd->got_hcint = FALSE;
+ SPIPCI_WREG(osh, &regs->spih_int_mask, sd->intmask);
+
+ }
+#endif /* BCMSDYIELD */
+
+ /* Wait for write fifo to empty... */
+ SPIPCI_ANDREG(osh, &regs->spih_gpio_data, ~0x00000020); /* Set GPIO 5 Low */
+
+ if (yield) {
+ ASSERT((SPIPCI_RREG(sd->osh, &regs->spih_stat) & SPIH_WFEMPTY) == 0);
+ }
+
+ spi_waitbits(sd, yield);
+ SPIPCI_ORREG(osh, &regs->spih_gpio_data, 0x00000020); /* Set GPIO 5 High (de-asserted) */
+
+ for (count = 0; count < (uint32)msglen/4; count++) {
+ spi_data_in = SPIPCI_RREG(osh, &regs->spih_data);
+ ((uint32 *)msg_in)[count] = spi_data_in;
+ }
+
+ /* Set GPIO CS# High (de-asserted) */
+ SPIPCI_ORREG(osh, &regs->spih_gpio_data, SPIH_CS);
+
+ if (bcmpcispi_dump) {
+ hexdump(" IN : ", msg_in, msglen);
+ }
+}
+
+void
+spi_spinbits(sdioh_info_t *sd)
+{
+ spih_info_t *si = (spih_info_t *)sd->controller;
+ osl_t *osh = si->osh;
+ spih_regs_t *regs = si->regs;
+ uint spin_count; /* Spin loop bound check */
+
+ spin_count = 0;
+ while ((SPIPCI_RREG(sd->osh, &regs->spih_stat) & SPIH_WFEMPTY) == 0) {
+ if (spin_count > SPI_SPIN_BOUND) {
+ sd_err(("%s: SPIH_WFEMPTY spin bits out of bound %u times \n",
+ __FUNCTION__, spin_count));
+ ASSERT(FALSE);
+ }
+ spin_count++;
+ }
+
+ /* Wait for SPI Transfer state machine to return to IDLE state.
+ * The state bits are only implemented in Rev >= 5 FPGA. These
+ * bits are hardwired to 00 for Rev < 5, so this check doesn't cause
+ * any problems.
+ */
+ spin_count = 0;
+ while ((SPIPCI_RREG(osh, &regs->spih_stat) & SPIH_STATE_MASK) != 0) {
+ if (spin_count > SPI_SPIN_BOUND) {
+ sd_err(("%s: SPIH_STATE_MASK spin bits out of bound %u times \n",
+ __FUNCTION__, spin_count));
+ ASSERT(FALSE);
+ }
+ spin_count++;
+ }
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdh.c b/drivers/net/wireless/bcm4329/bcmsdh.c
new file mode 100644
index 000000000000..4bf5889e5a68
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdh.c
@@ -0,0 +1,652 @@
+/*
+ * BCMSDH interface glue
+ * implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.c,v 1.35.2.1.4.8.6.13 2010/04/06 03:26:57 Exp $
+ */
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h> /* common SDIO/controller interface */
+#include <sbsdio.h> /* BRCM sdio device core */
+
+#include <sdio.h> /* sdio spec */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+
+struct bcmsdh_info
+{
+ bool init_success; /* underlying driver successfully attached */
+ void *sdioh; /* handler for sdioh */
+ uint32 vendevid; /* Target Vendor and Device ID on SD bus */
+ osl_t *osh;
+ bool regfail; /* Save status of last reg_read/reg_write call */
+ uint32 sbwad; /* Save backplane window address */
+};
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+ sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+{
+ bcmsdh_info_t *bcmsdh;
+
+ if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+ BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+
+ /* save the handler locally */
+ l_bcmsdh = bcmsdh;
+
+ if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) {
+ bcmsdh_detach(osh, bcmsdh);
+ return NULL;
+ }
+
+ bcmsdh->osh = osh;
+ bcmsdh->init_success = TRUE;
+
+ *regsva = (uint32 *)SI_ENUM_BASE;
+
+ /* Report the BAR, to fix if needed */
+ bcmsdh->sbwad = SI_ENUM_BASE;
+ return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bcmsdh != NULL) {
+ if (bcmsdh->sdioh) {
+ sdioh_detach(osh, bcmsdh->sdioh);
+ bcmsdh->sdioh = NULL;
+ }
+ MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+ }
+
+ l_bcmsdh = NULL;
+ return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ bool on;
+
+ ASSERT(bcmsdh);
+ status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+ if (SDIOH_API_SUCCESS(status))
+ return FALSE;
+ else
+ return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ ASSERT(sdh);
+ return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ ASSERT(sdh);
+
+ /* don't support yet */
+ return BCME_UNSUPPORTED;
+}
+
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+ uint8 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+ addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ uint8 *tmp_buf, *tmp_ptr;
+ uint8 *ptr;
+ bool ascii = func & ~0xf;
+ func &= 0x7;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+ ASSERT(cis);
+ ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+ status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+ if (ascii) {
+ /* Move binary bits to tmp and format them into the provided buffer. */
+ if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+ BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ bcopy(cis, tmp_buf, length);
+ for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+ ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff);
+ if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+ ptr += sprintf((char *)ptr, "\n");
+ }
+ MFREE(bcmsdh->osh, tmp_buf, length);
+ }
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+static int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address)
+{
+ int err = 0;
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+
+ return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 word = 0;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if (bar0 != bcmsdh->sbwad) {
+ if (bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))
+ return 0xFFFFFFFF;
+
+ bcmsdh->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+ /* if ok, return appropriately masked word */
+ if (SDIOH_API_SUCCESS(status)) {
+ switch (size) {
+ case sizeof(uint8):
+ return (word & 0xff);
+ case sizeof(uint16):
+ return (word & 0xffff);
+ case sizeof(uint32):
+ return word;
+ default:
+ bcmsdh->regfail = TRUE;
+
+ }
+ }
+
+ /* otherwise, bad sdio access or invalid size */
+ BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+ return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ __FUNCTION__, addr, size*8, data));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if (bar0 != bcmsdh->sbwad) {
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+ return err;
+
+ bcmsdh->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+ addr, &data, size);
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ if (SDIOH_API_SUCCESS(status))
+ return 0;
+
+ BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+ __FUNCTION__, data, addr, size));
+ return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+ return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if (bar0 != bcmsdh->sbwad) {
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+ return err;
+
+ bcmsdh->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if (bar0 != bcmsdh->sbwad) {
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+ return err;
+
+ bcmsdh->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+ ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+ (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_stop(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+ return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+ ASSERT(sdh);
+ return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+ return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+ return;
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdh_linux.c b/drivers/net/wireless/bcm4329/bcmsdh_linux.c
new file mode 100644
index 000000000000..6d6097b78f7d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdh_linux.c
@@ -0,0 +1,735 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_linux.c,v 1.42.10.10.2.14.4.2 2010/09/15 00:30:11 Exp $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+
+#if defined(OOB_INTR_ONLY)
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* defined(OOB_INTR_ONLY) */
+#if defined(CONFIG_MACH_SANDGATE2G) || defined(CONFIG_MACH_LOGICPD_PXA270)
+#if !defined(BCMPLATFORM_BUS)
+#define BCMPLATFORM_BUS
+#endif /* !defined(BCMPLATFORM_BUS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#include <linux/platform_device.h>
+#endif /* KERNEL_VERSION(2, 6, 19) */
+#endif /* CONFIG_MACH_SANDGATE2G || CONFIG_MACH_LOGICPD_PXA270 */
+
+/**
+ * SDIO Host Controller info
+ */
+typedef struct bcmsdh_hc bcmsdh_hc_t;
+
+struct bcmsdh_hc {
+ bcmsdh_hc_t *next;
+#ifdef BCMPLATFORM_BUS
+ struct device *dev; /* platform device handle */
+#else
+ struct pci_dev *dev; /* pci device handle */
+#endif /* BCMPLATFORM_BUS */
+ osl_t *osh;
+ void *regs; /* SDIO Host Controller address */
+ bcmsdh_info_t *sdh; /* SDIO Host Controller handle */
+ void *ch;
+ unsigned int oob_irq;
+ unsigned long oob_flags; /* OOB Host specifiction as edge and etc */
+ bool oob_irq_registered;
+#if defined(OOB_INTR_ONLY)
+ spinlock_t irq_lock;
+#endif
+};
+static bcmsdh_hc_t *sdhcinfo = NULL;
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL};
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+ /* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+ /* Check for Arasan host controller */
+ if (vendor == VENDOR_SI_IMAGE) {
+ return (TRUE);
+ }
+ /* Check for BRCM 27XX Standard host controller */
+ if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for BRCM Standard host controller */
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for TI PCIxx21 Standard host controller */
+ if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ /* Ricoh R5C822 Standard SDIO Host */
+ if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+ return (TRUE);
+ }
+ /* JMicron Standard SDIO Host */
+ if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+ return (TRUE);
+ }
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+ /* This is the PciSpiHost. */
+ if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found PCI SPI Host Controller\n");
+ return (TRUE);
+ }
+
+#endif /* BCMSDIOH_SPI */
+
+ return (FALSE);
+}
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+/* forward declarations */
+int bcmsdh_probe(struct device *dev);
+int bcmsdh_remove(struct device *dev);
+
+EXPORT_SYMBOL(bcmsdh_probe);
+EXPORT_SYMBOL(bcmsdh_remove);
+
+#else
+/* forward declarations */
+static int __devinit bcmsdh_probe(struct device *dev);
+static int __devexit bcmsdh_remove(struct device *dev);
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static struct device_driver bcmsdh_driver = {
+ .name = "pxa2xx-mci",
+ .bus = &platform_bus_type,
+ .probe = bcmsdh_probe,
+ .remove = bcmsdh_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ };
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_probe(struct device *dev)
+{
+ osl_t *osh = NULL;
+ bcmsdh_hc_t *sdhc = NULL;
+ ulong regs = 0;
+ bcmsdh_info_t *sdh = NULL;
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+ struct platform_device *pdev;
+ struct resource *r;
+#endif /* BCMLXSDMMC */
+ int irq = 0;
+ uint32 vendevid;
+ unsigned long irq_flags = 0;
+
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+ pdev = to_platform_device(dev);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq == NO_IRQ)
+ return -ENXIO;
+#endif /* BCMLXSDMMC */
+
+#if defined(OOB_INTR_ONLY)
+#ifdef HW_OOB
+ irq_flags = \
+ IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+ irq_flags = IRQF_TRIGGER_FALLING;
+#endif /* HW_OOB */
+ irq = dhd_customer_oob_irq_map(&irq_flags);
+ if (irq < 0) {
+ SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__));
+ return 1;
+ }
+#endif /* defined(OOB_INTR_ONLY) */
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+ SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+ __FUNCTION__,
+ MALLOCED(osh)));
+ goto err;
+ }
+ bzero(sdhc, sizeof(bcmsdh_hc_t));
+ sdhc->osh = osh;
+
+ sdhc->dev = (void *)dev;
+
+#ifdef BCMLXSDMMC
+ if (!(sdh = bcmsdh_attach(osh, (void *)0,
+ (void **)&regs, irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+#else
+ if (!(sdh = bcmsdh_attach(osh, (void *)r->start,
+ (void **)&regs, irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+#endif /* BCMLXSDMMC */
+ sdhc->sdh = sdh;
+ sdhc->oob_irq = irq;
+ sdhc->oob_flags = irq_flags;
+ sdhc->oob_irq_registered = FALSE; /* to make sure.. */
+#if defined(OOB_INTR_ONLY)
+ spin_lock_init(&sdhc->irq_lock);
+#endif
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+ /* Read the vendor/device ID from the CIS */
+ vendevid = bcmsdh_query_device(sdh);
+
+ /* try to attach to the target device */
+ if (!(sdhc->ch = drvinfo.attach((vendevid >> 16),
+ (vendevid & 0xFFFF), 0, 0, 0, 0,
+ (void *)regs, NULL, sdh))) {
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ return 0;
+
+ /* error handling */
+err:
+ if (sdhc) {
+ if (sdhc->sdh)
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ }
+ if (osh)
+ osl_detach(osh);
+ return -ENODEV;
+}
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_remove(struct device *dev)
+{
+ bcmsdh_hc_t *sdhc, *prev;
+ osl_t *osh;
+
+ sdhc = sdhcinfo;
+ drvinfo.detach(sdhc->ch);
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ /* find the SDIO Host Controller state for this pdev and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == (void *)dev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc) {
+ SDLX_MSG(("%s: failed\n", __FUNCTION__));
+ return 0;
+ }
+
+
+ /* release SDIO Host Controller info */
+ osh = sdhc->osh;
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ osl_detach(osh);
+
+#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY)
+ dev_set_drvdata(dev, NULL);
+#endif /* !defined(BCMLXSDMMC) */
+
+ return 0;
+}
+
+#else /* BCMPLATFORM_BUS */
+
+#if !defined(BCMLXSDMMC)
+/* forward declarations for PCI probe and remove functions. */
+static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/**
+ * pci id table
+ */
+static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
+ { vendor: PCI_ANY_ID,
+ device: PCI_ANY_ID,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: 0,
+ class_mask: 0,
+ driver_data: 0,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
+
+/**
+ * SDIO Host Controller pci driver info
+ */
+static struct pci_driver bcmsdh_pci_driver = {
+ node: {},
+ name: "bcmsdh",
+ id_table: bcmsdh_pci_devid,
+ probe: bcmsdh_pci_probe,
+ remove: bcmsdh_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ save_state: NULL,
+#endif
+ suspend: NULL,
+ resume: NULL,
+};
+
+
+extern uint sd_pci_slot; /* Force detection to a particular PCI */
+ /* slot only . Allows for having multiple */
+ /* WL devices at once in a PC */
+ /* Only one instance of dhd will be */
+ /* usable at a time */
+ /* Upper word is bus number, */
+ /* lower word is slot number */
+ /* Default value of 0xFFFFffff turns this */
+ /* off */
+module_param(sd_pci_slot, uint, 0);
+
+
+/**
+ * Detect supported SDIO Host Controller and attach if found.
+ *
+ * Determine if the device described by pdev is a supported SDIO Host
+ * Controller. If so, attach to it and attach to the target device.
+ */
+static int __devinit
+bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ osl_t *osh = NULL;
+ bcmsdh_hc_t *sdhc = NULL;
+ ulong regs;
+ bcmsdh_info_t *sdh = NULL;
+ int rc;
+
+ if (sd_pci_slot != 0xFFFFffff) {
+ if (pdev->bus->number != (sd_pci_slot>>16) ||
+ PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device) ?
+ "Found compatible SDIOHC" :
+ "Probing unknown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ pdev->vendor, pdev->device));
+ return -ENODEV;
+ }
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device) ?
+ "Using compatible SDIOHC" :
+ "WARNING, forced use of unkown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ pdev->vendor, pdev->device));
+ }
+
+ if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
+ (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
+ uint32 config_reg;
+
+ SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
+
+ /*
+ * Set MMC_SD_DIS bit in FlashMedia Controller.
+ * Disbling the SD/MMC Controller in the FlashMedia Controller
+ * allows the Standard SD Host Controller to take over control
+ * of the SD Slot.
+ */
+ config_reg |= 0x02;
+ OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
+ osl_detach(osh);
+ }
+ /* match this pci device with what we support */
+ /* we can't solely rely on this to believe it is our SDIO Host Controller! */
+ if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
+ return -ENODEV;
+ }
+
+ /* this is a pci device we might support */
+ SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
+ __FUNCTION__,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), pdev->irq));
+
+ /* use bcmsdh_query_device() to get the vendor ID of the target device so
+ * it will eventually appear in the Broadcom string on the console
+ */
+
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+ SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+ __FUNCTION__,
+ MALLOCED(osh)));
+ goto err;
+ }
+ bzero(sdhc, sizeof(bcmsdh_hc_t));
+ sdhc->osh = osh;
+
+ sdhc->dev = pdev;
+
+ /* map to address where host can access */
+ pci_set_master(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0),
+ (void **)&regs, pdev->irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ sdhc->sdh = sdh;
+
+ /* try to attach to the target device */
+ if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
+ bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
+ (void *)regs, NULL, sdh))) {
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+
+ return 0;
+
+ /* error handling */
+err:
+ if (sdhc->sdh)
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ if (sdhc)
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ if (osh)
+ osl_detach(osh);
+ return -ENODEV;
+}
+
+
+/**
+ * Detach from target devices and SDIO Host Controller
+ */
+static void __devexit
+bcmsdh_pci_remove(struct pci_dev *pdev)
+{
+ bcmsdh_hc_t *sdhc, *prev;
+ osl_t *osh;
+
+ /* find the SDIO Host Controller state for this pdev and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == pdev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc)
+ return;
+
+ drvinfo.detach(sdhc->ch);
+
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+
+ /* release SDIO Host Controller info */
+ osh = sdhc->osh;
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ osl_detach(osh);
+}
+#endif /* BCMLXSDMMC */
+#endif /* BCMPLATFORM_BUS */
+
+extern int sdio_function_init(void);
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+ int error = 0;
+
+ drvinfo = *driver;
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+ SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
+ error = sdio_function_init();
+#else
+ SDLX_MSG(("Intel PXA270 SDIO Driver\n"));
+ error = driver_register(&bcmsdh_driver);
+#endif /* defined(BCMLXSDMMC) */
+ return error;
+#endif /* defined(BCMPLATFORM_BUS) */
+
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (!(error = pci_module_init(&bcmsdh_pci_driver)))
+ return 0;
+#else
+ if (!(error = pci_register_driver(&bcmsdh_pci_driver)))
+ return 0;
+#endif
+
+ SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#endif /* BCMPLATFORM_BUS */
+
+ return error;
+}
+
+extern void sdio_function_cleanup(void);
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (bcmsdh_pci_driver.node.next)
+#endif
+
+#if defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+ driver_unregister(&bcmsdh_driver);
+#endif
+#if defined(BCMLXSDMMC)
+ sdio_function_cleanup();
+#endif /* BCMLXSDMMC */
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+ pci_unregister_driver(&bcmsdh_pci_driver);
+#endif /* BCMPLATFORM_BUS */
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bool enable)
+{
+ static bool curstate = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdhcinfo->irq_lock, flags);
+ if (curstate != enable) {
+ if (enable)
+ enable_irq(sdhcinfo->oob_irq);
+ else
+ disable_irq_nosync(sdhcinfo->oob_irq);
+ curstate = enable;
+ }
+ spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+ dhd_pub_t *dhdp;
+
+ dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev);
+
+ bcmsdh_oob_intr_set(0);
+
+ if (dhdp == NULL) {
+ SDLX_MSG(("Out of band GPIO interrupt fired way too early\n"));
+ return IRQ_HANDLED;
+ }
+
+ dhdsdio_isr((void *)dhdp->bus);
+
+ return IRQ_HANDLED;
+}
+
+int bcmsdh_register_oob_intr(void * dhdp)
+{
+ int error = 0;
+
+ SDLX_MSG(("%s Enter\n", __FUNCTION__));
+
+/* Example of HW_OOB for HW2: please refer to your host specifiction */
+/* sdhcinfo->oob_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; */
+
+ dev_set_drvdata(sdhcinfo->dev, dhdp);
+
+ if (!sdhcinfo->oob_irq_registered) {
+ SDLX_MSG(("%s IRQ=%d Type=%X \n", __FUNCTION__, \
+ (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags));
+ /* Refer to customer Host IRQ docs about proper irqflags definition */
+ error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags,
+ "bcmsdh_sdmmc", NULL);
+ if (error)
+ return -ENODEV;
+
+ enable_irq_wake(sdhcinfo->oob_irq);
+ sdhcinfo->oob_irq_registered = TRUE;
+ }
+
+ return 0;
+}
+
+void bcmsdh_set_irq(int flag)
+{
+ if (sdhcinfo->oob_irq_registered) {
+ SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag));
+ if (flag) {
+ enable_irq(sdhcinfo->oob_irq);
+ enable_irq_wake(sdhcinfo->oob_irq);
+ } else {
+ disable_irq_wake(sdhcinfo->oob_irq);
+ disable_irq(sdhcinfo->oob_irq);
+ }
+ }
+}
+
+void bcmsdh_unregister_oob_intr(void)
+{
+ SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+
+ if (sdhcinfo->oob_irq_registered) {
+ disable_irq_wake(sdhcinfo->oob_irq);
+ disable_irq(sdhcinfo->oob_irq); /* just in case.. */
+ free_irq(sdhcinfo->oob_irq, NULL);
+ sdhcinfo->oob_irq_registered = FALSE;
+ }
+}
+#endif /* defined(OOB_INTR_ONLY) */
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel; /* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor; /* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok; /* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c
new file mode 100644
index 000000000000..031367b8f18f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c
@@ -0,0 +1,1304 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c,v 1.1.2.5.6.30.4.1 2010/09/02 23:12:21 Exp $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 512; /* Default blocksize */
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK 0x03
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+ int err_ret;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Enable Function 1 */
+ sdio_claim_host(gInstance->func[1]);
+ err_ret = sdio_enable_func(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+ }
+
+ return FALSE;
+}
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+ int err_ret;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (gInstance == NULL) {
+ sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+ if (sdioh_sdmmc_osinit(sd) != 0) {
+ sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+
+ gInstance->sd = sd;
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[1]);
+
+ sd->client_block_size[1] = 64;
+ err_ret = sdio_set_block_size(gInstance->func[1], 64);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(gInstance->func[1]);
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
+ sd_f2_blocksize));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (sd) {
+
+ /* Disable Function 2 */
+ sdio_claim_host(gInstance->func[2]);
+ sdio_disable_func(gInstance->func[2]);
+ sdio_release_host(gInstance->func[2]);
+
+ /* Disable Function 1 */
+ sdio_claim_host(gInstance->func[1]);
+ sdio_disable_func(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+
+ /* deregister irq */
+ sdioh_sdmmc_osfree(sd);
+
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(void)
+{
+ uint8 reg;
+ int err;
+
+ if (gInstance->func[0]) {
+ sdio_claim_host(gInstance->func[0]);
+
+ reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(gInstance->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* Enable F1 and F2 interrupts, set master enable */
+ reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN);
+
+ sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(gInstance->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(void)
+{
+ uint8 reg;
+ int err;
+
+ if (gInstance->func[0]) {
+ sdio_claim_host(gInstance->func[0]);
+ reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(gInstance->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+ sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+
+ sdio_release_host(gInstance->func[0]);
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ if (fn == NULL) {
+ sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+
+ /* register and unmask irq */
+ if (gInstance->func[2]) {
+ sdio_claim_host(gInstance->func[2]);
+ sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ if (gInstance->func[1]) {
+ sdio_claim_host(gInstance->func[1]);
+ sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_release_host(gInstance->func[1]);
+ }
+#elif defined(HW_OOB)
+ sdioh_enable_func_intr();
+#endif /* defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+ if (gInstance->func[1]) {
+ /* register and unmask irq */
+ sdio_claim_host(gInstance->func[1]);
+ sdio_release_irq(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ }
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+ sdio_release_irq(gInstance->func[2]);
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+ sdioh_disable_func_intr();
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
+ {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ si->client_block_size[func] = blksize;
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_RXCHAIN):
+ int_val = FALSE;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+ else if (sd_ptr->offset & 2)
+ int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+ else
+ int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = 0;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+ SDIOH_API_RC status;
+ uint8 data;
+
+ if (enable)
+ data = 3; /* enable hw oob interrupt */
+ else
+ data = 4; /* disable hw oob interrupt */
+ data |= 4; /* Active HIGH */
+
+ status = sdioh_request_byte(sd, SDIOH_WRITE, 0, 0xf2, &data);
+ return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int err_ret;
+
+ sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ if(rw) { /* CMD52 Write */
+ if (func == 0) {
+ /* Can only directly write to some F0 registers. Handle F2 enable
+ * as a special case.
+ */
+ if (regaddr == SDIOD_CCCR_IOEN) {
+ if (gInstance->func[2]) {
+ sdio_claim_host(gInstance->func[2]);
+ if (*byte & SDIO_FUNC_ENABLE_2) {
+ /* Enable Function 2 */
+ err_ret = sdio_enable_func(gInstance->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+ err_ret));
+ }
+ } else {
+ /* Disable Function 2 */
+ err_ret = sdio_disable_func(gInstance->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+ err_ret));
+ }
+ }
+ sdio_release_host(gInstance->func[2]);
+ }
+ }
+#if defined(MMC_SDIO_ABORT)
+ /* to allow abort command through F1 */
+ else if (regaddr == SDIOD_CCCR_IOABORT) {
+ sdio_claim_host(gInstance->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with another api
+ * depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+#endif /* MMC_SDIO_ABORT */
+ else if (regaddr < 0xF0) {
+ sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+ } else {
+ /* Claim host controller, perform F0 write, and release */
+ sdio_claim_host(gInstance->func[func]);
+ sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+ } else {
+ /* Claim host controller, perform Fn write, and release */
+ sdio_claim_host(gInstance->func[func]);
+ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+ } else { /* CMD52 Read */
+ /* Claim host controller, perform Fn read, and release */
+ sdio_claim_host(gInstance->func[func]);
+
+ if (func == 0) {
+ *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
+ } else {
+ *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
+ }
+
+ sdio_release_host(gInstance->func[func]);
+ }
+
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int err_ret = SDIOH_API_RC_FAIL;
+
+ if (func == 0) {
+ sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[func]);
+
+ if(rw) { /* CMD52 Write */
+ if (nbytes == 4) {
+ sdio_writel(gInstance->func[func], *word, addr, &err_ret);
+ } else if (nbytes == 2) {
+ sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ } else { /* CMD52 Read */
+ if (nbytes == 4) {
+ *word = sdio_readl(gInstance->func[func], addr, &err_ret);
+ } else if (nbytes == 2) {
+ *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ }
+
+ /* Release host controller */
+ sdio_release_host(gInstance->func[func]);
+
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+ rw ? "Write" : "Read", err_ret));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, void *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ uint32 SGCount = 0;
+ int err_ret = 0;
+
+ void *pnext;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(pkt);
+ DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[func]);
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ uint pkt_len = PKTLEN(sd->osh, pnext);
+ pkt_len += 3;
+ pkt_len &= 0xFFFFFFFC;
+
+#ifdef CONFIG_MMC_MSM7X00A
+ if ((pkt_len % 64) == 32) {
+ sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
+ pkt_len += 32;
+ }
+#endif /* CONFIG_MMC_MSM7X00A */
+ /* Make sure the packet is aligned properly. If it isn't, then this
+ * is the fault of sdioh_request_buffer() which is supposed to give
+ * us something we can work with.
+ */
+ ASSERT(((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) == 0);
+
+ if ((write) && (!fifo)) {
+ err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
+ ((uint8*)PKTDATA(sd->osh, pnext)),
+ pkt_len);
+ } else if (write) {
+ err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
+ ((uint8*)PKTDATA(sd->osh, pnext)),
+ pkt_len);
+ } else if (fifo) {
+ err_ret = sdio_readsb(gInstance->func[func],
+ ((uint8*)PKTDATA(sd->osh, pnext)),
+ addr,
+ pkt_len);
+ } else {
+ err_ret = sdio_memcpy_fromio(gInstance->func[func],
+ ((uint8*)PKTDATA(sd->osh, pnext)),
+ addr,
+ pkt_len);
+ }
+
+ if (err_ret) {
+ sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len, err_ret));
+ } else {
+ sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len));
+ }
+
+ if (!fifo) {
+ addr += pkt_len;
+ }
+ SGCount ++;
+
+ }
+
+ /* Release host controller */
+ sdio_release_host(gInstance->func[func]);
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
+ * then all the packets in the chain must be properly aligned. If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ SDIOH_API_RC Status;
+ void *mypkt = NULL;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Case 1: we don't have a packet. */
+ if (pkt == NULL) {
+ sd_data(("%s: Creating new %s Packet, len=%d\n",
+ __FUNCTION__, write ? "TX" : "RX", buflen_u));
+#ifdef DHD_USE_STATIC_BUF
+ if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#else
+ if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#endif /* DHD_USE_STATIC_BUF */
+ sd_err(("%s: PKTGET failed: len %d\n",
+ __FUNCTION__, buflen_u));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write) {
+ bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u);
+ }
+
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write) {
+ bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u);
+ }
+#ifdef DHD_USE_STATIC_BUF
+ PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+ PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* DHD_USE_STATIC_BUF */
+ } else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) {
+ /* Case 2: We have a packet, but it is unaligned. */
+
+ /* In this case, we cannot have a chain. */
+ ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
+
+ sd_data(("%s: Creating aligned %s Packet, len=%d\n",
+ __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt)));
+#ifdef DHD_USE_STATIC_BUF
+ if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#else
+ if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#endif /* DHD_USE_STATIC_BUF */
+ sd_err(("%s: PKTGET failed: len %d\n",
+ __FUNCTION__, PKTLEN(sd->osh, pkt)));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write) {
+ bcopy(PKTDATA(sd->osh, pkt),
+ PKTDATA(sd->osh, mypkt),
+ PKTLEN(sd->osh, pkt));
+ }
+
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write) {
+ bcopy(PKTDATA(sd->osh, mypkt),
+ PKTDATA(sd->osh, pkt),
+ PKTLEN(sd->osh, mypkt));
+ }
+#ifdef DHD_USE_STATIC_BUF
+ PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+ PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* DHD_USE_STATIC_BUF */
+ } else { /* case 3: We have a packet and it is aligned. */
+ sd_data(("%s: Aligned %s Packet, direct DMA\n",
+ __FUNCTION__, write ? "Tx" : "Rx"));
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
+ }
+
+ return (Status);
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+ char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+ /* issue abort cmd52 command through F1 */
+ sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp = 0;
+
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ *data = temp;
+ *data &= 0xff;
+ sd_data(("%s: byte read data=0x%02x\n",
+ __FUNCTION__, *data));
+ } else {
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+ if (regsize == 2)
+ *data &= 0xffff;
+
+ sd_data(("%s: word read data=0x%08x\n",
+ __FUNCTION__, *data));
+ }
+
+ return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
+ sd = gInstance->sd;
+
+ ASSERT(sd != NULL);
+ sdio_release_host(gInstance->func[0]);
+
+ if (sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else {
+ sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+
+ sdio_claim_host(gInstance->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+
+ sd = gInstance->sd;
+
+ ASSERT(sd != NULL);
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp;
+
+ temp = data & 0xff;
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ sd_data(("%s: byte write data=0x%02x\n",
+ __FUNCTION__, data));
+ } else {
+ if (regsize == 2)
+ data &= 0xffff;
+
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+ sd_data(("%s: word write data=0x%08x\n",
+ __FUNCTION__, data));
+ }
+
+ return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *si, int stage)
+{
+ int ret;
+ sdioh_info_t *sd = gInstance->sd;
+
+ /* Need to do this stages as we can't enable the interrupt till
+ downloading of the firmware is complete, other wise polling
+ sdio access will come in way
+ */
+ if (gInstance->func[0]) {
+ if (stage == 0) {
+ /* Since the power to the chip is killed, we will have
+ re enumerate the device again. Set the block size
+ and enable the fucntion 1 for in preparation for
+ downloading the code
+ */
+ /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+ 2.6.27. The implementation prior to that is buggy, and needs broadcom's
+ patch for it
+ */
+ if ((ret = sdio_reset_comm(gInstance->func[0]->card)))
+ sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+ else {
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[1]);
+
+ sd->client_block_size[1] = 64;
+ if (sdio_set_block_size(gInstance->func[1], 64)) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(gInstance->func[1]);
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ if (sdio_set_block_size(gInstance->func[2],
+ sd_f2_blocksize)) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+ "blocksize to %d\n", sd_f2_blocksize));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+ }
+ } else {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(gInstance->func[0]);
+ sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_enable_func_intr();
+#endif
+ bcmsdh_oob_intr_set(TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+
+ return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *si)
+{
+ /* MSM7201A Android sdio stack has bug with interrupt
+ So internaly within SDIO stack they are polling
+ which cause issue when device is turned off. So
+ unregister interrupt with SDIO stack to stop the
+ polling
+ */
+ if (gInstance->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(gInstance->func[0]);
+ sdio_release_irq(gInstance->func[1]);
+ sdio_release_irq(gInstance->func[2]);
+ sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_disable_func_intr();
+#endif
+ bcmsdh_oob_intr_set(FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+ return (0);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c
new file mode 100644
index 000000000000..5a1a46c93571
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,269 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c,v 1.1.2.5.6.17 2010/08/13 00:36:19 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h> /* SDIO Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <linux/sched.h> /* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+
+#include <bcmsdh_sdmmc.h>
+
+#include <dhd_dbg.h>
+
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+PBCMSDH_SDMMC_INSTANCE gInstance;
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern int bcmsdh_probe(struct device *dev);
+extern int bcmsdh_remove(struct device *dev);
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+ static struct sdio_func sdio_func_0;
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_trace(("sdio_device: 0x%04x\n", func->device));
+ sd_trace(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 1) {
+ sdio_func_0.num = 0;
+ sdio_func_0.card = func->card;
+ gInstance->func[0] = &sdio_func_0;
+ if(func->device == 0x4) { /* 4318 */
+ gInstance->func[2] = NULL;
+ sd_trace(("NIC found, calling bcmsdh_probe...\n"));
+ ret = bcmsdh_probe(&func->dev);
+ }
+ }
+
+ gInstance->func[func->num] = func;
+
+ if (func->num == 2) {
+ sd_trace(("F2 found, calling bcmsdh_probe...\n"));
+ ret = bcmsdh_probe(&func->dev);
+ }
+
+ return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 2) {
+ sd_trace(("F2 found, calling bcmsdh_remove...\n"));
+ bcmsdh_remove(&func->dev);
+ }
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+ .probe = bcmsdh_sdmmc_probe,
+ .remove = bcmsdh_sdmmc_remove,
+ .name = "bcmsdh_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+ };
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+};
+
+
+int
+sdioh_sdmmc_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+ return BCME_OK;
+}
+
+void
+sdioh_sdmmc_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+#if !defined(OOB_INTR_ONLY)
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable) {
+ sdioh_sdmmc_devintr_on(sd);
+ } else {
+ sdioh_sdmmc_devintr_off(sd);
+ }
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+ int error = 0;
+ sdio_function_init();
+ return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+ sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int sdio_function_init(void)
+{
+ int error = 0;
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+
+ gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
+ if (!gInstance)
+ return -ENOMEM;
+
+ error = sdio_register_driver(&bcmsdh_sdmmc_driver);
+
+ return error;
+}
+
+/*
+ * module cleanup
+*/
+extern int bcmsdh_remove(struct device *dev);
+void sdio_function_cleanup(void)
+{
+ sd_trace(("%s Enter\n", __FUNCTION__));
+
+ sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+
+ if (gInstance)
+ kfree(gInstance);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdspi.c b/drivers/net/wireless/bcm4329/bcmsdspi.c
new file mode 100644
index 000000000000..636539be5ea5
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdspi.c
@@ -0,0 +1,1596 @@
+/*
+ * Broadcom BCMSDH to SPI Protocol Conversion Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.c,v 1.14.4.2.4.4.6.5 2010/03/10 03:09:48 Exp $
+ */
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <siutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+
+#include <pcicfg.h>
+
+
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+
+#include <proto/sdspi.h>
+
+#define SD_PAGE 4096
+
+/* Globals */
+
+uint sd_msglevel = SDH_ERROR_VAL;
+uint sd_hiok = FALSE; /* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 512; /* Default blocksize */
+
+uint sd_divisor = 2; /* Default 33MHz/2 = 16MHz for dongle */
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+
+uint sd_toctl = 7;
+
+/* Prototypes */
+static bool sdspi_start_power(sdioh_info_t *sd);
+static int sdspi_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
+static int sdspi_card_enablefuncs(sdioh_info_t *sd);
+static void sdspi_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
+static int sdspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg,
+ uint32 *data, uint32 datalen);
+static int sdspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 *data);
+static int sdspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 data);
+static int sdspi_driver_init(sdioh_info_t *sd);
+static bool sdspi_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
+static int sdspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+ uint32 addr, int nbytes, uint32 *data);
+static int sdspi_abort(sdioh_info_t *sd, uint func);
+
+static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
+
+static uint8 sdspi_crc7(unsigned char* p, uint32 len);
+static uint16 sdspi_crc16(unsigned char* p, uint32 len);
+static int sdspi_crc_onoff(sdioh_info_t *sd, bool use_crc);
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+
+ if (spi_osinit(sd) != 0) {
+ sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+
+ sd->bar0 = (uintptr)bar0;
+ sd->irq = irq;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ sd->intr_handler_valid = FALSE;
+
+ /* Set defaults */
+ sd->sd_blockmode = FALSE;
+ sd->use_client_ints = TRUE;
+ sd->sd_use_dma = FALSE; /* DMA Not supported */
+
+ /* Haven't figured out how to make bytemode work with dma */
+ if (!sd->sd_blockmode)
+ sd->sd_use_dma = 0;
+
+ if (!spi_hw_attach(sd)) {
+ sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+
+ if (sdspi_driver_init(sd) != SUCCESS) {
+ if (sdspi_driver_init(sd) != SUCCESS) {
+ sd_err(("%s:sdspi_driver_init() failed()\n", __FUNCTION__));
+ spi_hw_detach(sd);
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+ }
+
+ if (spi_register_irq(sd, irq) != SUCCESS) {
+ sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+ spi_hw_detach(sd);
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (sd) {
+ if (sd->card_init_done)
+ sdspi_reset(sd, 1, 1);
+
+ sd_info(("%s: detaching from hardware\n", __FUNCTION__));
+ spi_free_irq(sd->irq, sd);
+ spi_hw_detach(sd);
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+ *onoff = sd->client_intr_enabled;
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return 0;
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_CRC
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
+ {"sd_crc", IOV_CRC, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0},
+ {NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ if (!si->sd_blockmode)
+ si->sd_use_dma = 0;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ spi_lock(si);
+ bcmerror = set_client_block_size(si, func, blksize);
+ spi_unlock(si);
+ break;
+ }
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ if (!spi_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("set clock failed!\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CRC):
+ int_val = (uint32)sd_crc;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CRC):
+ /* Apply new setting, but don't change sd_crc until
+ * after the CRC-mode is selected in the device. This
+ * is required because the software must generate a
+ * correct CRC for the CMD59 in order to be able to
+ * turn OFF the CRC.
+ */
+ sdspi_crc_onoff(si, int_val ? 1 : 0);
+ sd_crc = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+
+ if (!sdspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+ sd_err(("Failed changing highspeed mode to %d.\n", sd_hiok));
+ bcmerror = BCME_ERROR;
+ return ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)si->local_intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ sd_err(("IOV_HOSTREG unsupported\n"));
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ spi_lock(sd);
+ *cis = 0;
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdspi_card_regread (sd, 0, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ spi_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+ spi_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+
+ spi_lock(sd);
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
+
+ sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x\n", __FUNCTION__, rw, func, regaddr));
+
+ if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma,
+ SDIOH_CMD_52, cmd_arg, NULL, 0)) != SUCCESS) {
+ spi_unlock(sd);
+ return status;
+ }
+
+ sdspi_cmd_getrsp(sd, &rsp5, 1);
+ if (rsp5 != 0x00) {
+ sd_err(("%s: rsp5 flags is 0x%x func=%d\n",
+ __FUNCTION__, rsp5, func));
+ /* ASSERT(0); */
+ spi_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (rw == SDIOH_READ)
+ *byte = sd->card_rsp_data >> 24;
+
+ spi_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int status;
+
+ spi_lock(sd);
+
+ if (rw == SDIOH_READ)
+ status = sdspi_card_regread(sd, func, addr, nbytes, word);
+ else
+ status = sdspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+ spi_unlock(sd);
+ return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ int len;
+ int buflen = (int)buflen_u;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+ spi_lock(sd);
+
+ ASSERT(reg_width == 4);
+ ASSERT(buflen_u < (1 << 30));
+ ASSERT(sd->client_block_size[func]);
+
+ sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+ __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+ buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+ /* Break buffer down into blocksize chunks:
+ * Bytemode: 1 block at a time.
+ */
+ while (buflen > 0) {
+ if (sd->sd_blockmode) {
+ /* Max xfer is Page size */
+ len = MIN(SD_PAGE, buflen);
+
+ /* Round down to a block boundry */
+ if (buflen > sd->client_block_size[func])
+ len = (len/sd->client_block_size[func]) *
+ sd->client_block_size[func];
+ } else {
+ /* Byte mode: One block at a time */
+ len = MIN(sd->client_block_size[func], buflen);
+ }
+
+ if (sdspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+ spi_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+ buffer += len;
+ buflen -= len;
+ if (!fifo)
+ addr += len;
+ }
+ spi_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+static int
+sdspi_abort(sdioh_info_t *sd, uint func)
+{
+ uint8 spi_databuf[] = { 0x74, 0x80, 0x00, 0x0C, 0xFF, 0x95, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ uint8 spi_rspbuf[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ int err = 0;
+
+ sd_err(("Sending SPI Abort to F%d\n", func));
+ spi_databuf[4] = func & 0x7;
+ /* write to function 0, addr 6 (IOABORT) func # in 3 LSBs. */
+ spi_sendrecv(sd, spi_databuf, spi_rspbuf, sizeof(spi_databuf));
+
+ return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint fnum)
+{
+ int ret;
+
+ spi_lock(sd);
+ ret = sdspi_abort(sd, fnum);
+ spi_unlock(sd);
+
+ return ret;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+ return SUCCESS;
+}
+
+
+/*
+ * Private/Static work routines
+ */
+static bool
+sdspi_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
+{
+ if (!sd)
+ return TRUE;
+
+ spi_lock(sd);
+ /* Reset client card */
+ if (client_reset && (sd->adapter_slot != -1)) {
+ if (sdspi_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
+ sd_err(("%s: Cannot write to card reg 0x%x\n",
+ __FUNCTION__, SDIOD_CCCR_IOABORT));
+ else
+ sd->card_rca = 0;
+ }
+
+ /* The host reset is a NOP in the sd-spi case. */
+ if (host_reset) {
+ sd->sd_mode = SDIOH_MODE_SPI;
+ }
+ spi_unlock(sd);
+ return TRUE;
+}
+
+static int
+sdspi_host_init(sdioh_info_t *sd)
+{
+ sdspi_reset(sd, 1, 0);
+
+ /* Default power on mode is SD1 */
+ sd->sd_mode = SDIOH_MODE_SPI;
+ sd->polled_mode = TRUE;
+ sd->host_init_done = TRUE;
+ sd->card_init_done = FALSE;
+ sd->adapter_slot = 1;
+
+ return (SUCCESS);
+}
+
+#define CMD0_RETRIES 3
+#define CMD5_RETRIES 10
+
+static int
+get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
+{
+ uint32 rsp5;
+ int retries, status;
+
+ /* First issue a CMD0 to get the card into SPI mode. */
+ for (retries = 0; retries <= CMD0_RETRIES; retries++) {
+ if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma,
+ SDIOH_CMD_0, *cmd_arg, NULL, 0)) != SUCCESS) {
+ sd_err(("%s: No response to CMD0\n", __FUNCTION__));
+ continue;
+ }
+
+ sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+ if (GFIELD(rsp5, SPI_RSP_ILL_CMD)) {
+ printf("%s: Card already initialized (continuing)\n", __FUNCTION__);
+ break;
+ }
+
+ if (GFIELD(rsp5, SPI_RSP_IDLE)) {
+ printf("%s: Card in SPI mode\n", __FUNCTION__);
+ break;
+ }
+ }
+
+ if (retries > CMD0_RETRIES) {
+ sd_err(("%s: Too many retries for CMD0\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ /* Get the Card's Operation Condition. */
+ /* Occasionally the board takes a while to become ready. */
+ for (retries = 0; retries <= CMD5_RETRIES; retries++) {
+ if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma,
+ SDIOH_CMD_5, *cmd_arg, NULL, 0)) != SUCCESS) {
+ sd_err(("%s: No response to CMD5\n", __FUNCTION__));
+ continue;
+ }
+
+ printf("CMD5 response data was: 0x%08x\n", sd->card_rsp_data);
+
+ if (GFIELD(sd->card_rsp_data, RSP4_CARD_READY)) {
+ printf("%s: Card ready\n", __FUNCTION__);
+ break;
+ }
+ }
+
+ if (retries > CMD5_RETRIES) {
+ sd_err(("%s: Too many retries for CMD5\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ *cmd_rsp = sd->card_rsp_data;
+
+ sdspi_crc_onoff(sd, sd_crc ? 1 : 0);
+
+ return (SUCCESS);
+}
+
+static int
+sdspi_crc_onoff(sdioh_info_t *sd, bool use_crc)
+{
+ uint32 args;
+ int status;
+
+ args = use_crc ? 1 : 0;
+ if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma,
+ SDIOH_CMD_59, args, NULL, 0)) != SUCCESS) {
+ sd_err(("%s: No response to CMD59\n", __FUNCTION__));
+ }
+
+ sd_info(("CMD59 response data was: 0x%08x\n", sd->card_rsp_data));
+
+ sd_err(("SD-SPI CRC turned %s\n", use_crc ? "ON" : "OFF"));
+ return (SUCCESS);
+}
+
+static int
+sdspi_client_init(sdioh_info_t *sd)
+{
+ uint8 fn_ints;
+
+ sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+ /* Start at ~400KHz clock rate for initialization */
+ if (!spi_start_clock(sd, 128)) {
+ sd_err(("spi_start_clock failed\n"));
+ return ERROR;
+ }
+
+ if (!sdspi_start_power(sd)) {
+ sd_err(("sdspi_start_power failed\n"));
+ return ERROR;
+ }
+
+ if (sd->num_funcs == 0) {
+ sd_err(("%s: No IO funcs!\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ sdspi_card_enablefuncs(sd);
+
+ set_client_block_size(sd, 1, BLOCK_SIZE_4318);
+ fn_ints = INTR_CTL_FUNC1_EN;
+
+ if (sd->num_funcs >= 2) {
+ set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
+ fn_ints |= INTR_CTL_FUNC2_EN;
+ }
+
+ /* Enable/Disable Client interrupts */
+ /* Turn on here but disable at host controller */
+ if (sdspi_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
+ (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
+ sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ /* Switch to High-speed clocking mode if both host and device support it */
+ sdspi_set_highspeed_mode(sd, (bool)sd_hiok);
+
+ /* After configuring for High-Speed mode, set the desired clock rate. */
+ if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+ sd_err(("spi_start_clock failed\n"));
+ return ERROR;
+ }
+
+ sd->card_init_done = TRUE;
+
+ return SUCCESS;
+}
+
+static int
+sdspi_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
+{
+ uint32 regdata;
+ int status;
+ bool hsmode;
+
+ if (HSMode == TRUE) {
+
+ sd_err(("Attempting to enable High-Speed mode.\n"));
+
+ if ((status = sdspi_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != SUCCESS) {
+ return status;
+ }
+ if (regdata & SDIO_SPEED_SHS) {
+ sd_err(("Device supports High-Speed mode.\n"));
+
+ regdata |= SDIO_SPEED_EHS;
+
+ sd_err(("Writing %08x to Card at %08x\n",
+ regdata, SDIOD_CCCR_SPEED_CONTROL));
+ if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ return status;
+ }
+
+ hsmode = 1;
+
+ sd_err(("High-speed clocking mode enabled.\n"));
+ }
+ else {
+ sd_err(("Device does not support High-Speed Mode.\n"));
+ hsmode = 0;
+ }
+ } else {
+ if ((status = sdspi_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != SUCCESS) {
+ return status;
+ }
+
+ regdata = ~SDIO_SPEED_EHS;
+
+ sd_err(("Writing %08x to Card at %08x\n",
+ regdata, SDIOD_CCCR_SPEED_CONTROL));
+ if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ return status;
+ }
+
+ sd_err(("Low-speed clocking mode enabled.\n"));
+ hsmode = 0;
+ }
+
+ spi_controller_highspeed_mode(sd, hsmode);
+
+ return TRUE;
+}
+
+bool
+sdspi_start_power(sdioh_info_t *sd)
+{
+ uint32 cmd_arg;
+ uint32 cmd_rsp;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's Operation Condition. Occasionally the board
+ * takes a while to become ready
+ */
+
+ cmd_arg = 0;
+ if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
+ sd_err(("%s: Failed to get OCR; bailing\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ sd_err(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
+ sd_err(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
+ sd_err(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
+ sd_err(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+
+ /* Verify that the card supports I/O mode */
+ if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
+ sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
+
+ /* Examine voltage: Arasan only supports 3.3 volts,
+ * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
+ */
+
+ if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
+ sd_err(("This client does not support 3.3 volts!\n"));
+ return ERROR;
+ }
+
+
+ return TRUE;
+}
+
+static int
+sdspi_driver_init(sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if ((sdspi_host_init(sd)) != SUCCESS) {
+ return ERROR;
+ }
+
+ if (sdspi_client_init(sd) != SUCCESS) {
+ return ERROR;
+ }
+
+ return SUCCESS;
+}
+
+static int
+sdspi_card_enablefuncs(sdioh_info_t *sd)
+{
+ int status;
+ uint32 regdata;
+ uint32 regaddr, fbraddr;
+ uint8 func;
+ uint8 *ptr;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ /* Get the Card's common CIS address */
+ ptr = (uint8 *) &sd->com_cis_ptr;
+ for (regaddr = SDIOD_CCCR_CISPTR_0; regaddr <= SDIOD_CCCR_CISPTR_2; regaddr++) {
+ if ((status = sdspi_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+ return status;
+
+ *ptr++ = (uint8) regdata;
+ }
+
+ /* Only the lower 17-bits are valid */
+ sd->com_cis_ptr &= 0x0001FFFF;
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ ptr = (uint8 *) &sd->func_cis_ptr[func];
+ for (regaddr = SDIOD_FBR_CISPTR_0; regaddr <= SDIOD_FBR_CISPTR_2; regaddr++) {
+ if ((status = sdspi_card_regread (sd, 0, regaddr + fbraddr, 1, &regdata))
+ != SUCCESS)
+ return status;
+
+ *ptr++ = (uint8) regdata;
+ }
+
+ /* Only the lower 17-bits are valid */
+ sd->func_cis_ptr[func] &= 0x0001FFFF;
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ sd_info(("%s: write ESCI bit\n", __FUNCTION__));
+ /* Enable continuous SPI interrupt (ESCI bit) */
+ sdspi_card_regwrite(sd, 0, SDIOD_CCCR_BICTRL, 1, 0x60);
+
+ sd_info(("%s: enable f1\n", __FUNCTION__));
+ /* Enable function 1 on the card */
+ regdata = SDIO_FUNC_ENABLE_1;
+ if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
+ return status;
+
+ sd_info(("%s: done\n", __FUNCTION__));
+ return SUCCESS;
+}
+
+/* Read client card reg */
+static int
+sdspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+
+ cmd_arg = 0;
+
+ if ((func == 0) || (regsize == 1)) {
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
+
+ if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_52, cmd_arg, NULL, 0))
+ != SUCCESS)
+ return status;
+
+ sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+ if (rsp5 != 0x00)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, rsp5, func));
+
+ *data = sd->card_rsp_data >> 24;
+ } else {
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+
+ sd->data_xfer_count = regsize;
+
+ /* sdspi_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_53, cmd_arg, NULL, 0))
+ != SUCCESS)
+ return status;
+
+ sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+ if (rsp5 != 0x00)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, rsp5, func));
+
+ *data = sd->card_rsp_data;
+ if (regsize == 2) {
+ *data &= 0xffff;
+ }
+
+ sd_info(("%s: CMD53 func %d, addr 0x%x, size %d, data 0x%08x\n",
+ __FUNCTION__, func, regaddr, regsize, *data));
+
+
+ }
+
+ return SUCCESS;
+}
+
+/* write a client register */
+static int
+sdspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+ int status;
+ uint32 cmd_arg, rsp5, flags;
+
+ cmd_arg = 0;
+
+ if ((func == 0) || (regsize == 1)) {
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
+ if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_52, cmd_arg, NULL, 0))
+ != SUCCESS)
+ return status;
+
+ sdspi_cmd_getrsp(sd, &rsp5, 1);
+ flags = GFIELD(rsp5, RSP5_FLAGS);
+ if (flags && (flags != 0x10))
+ sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
+ __FUNCTION__, flags));
+ }
+ else {
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+ sd->data_xfer_count = regsize;
+ sd->cmd53_wr_data = data;
+
+ sd_info(("%s: CMD53 func %d, addr 0x%x, size %d, data 0x%08x\n",
+ __FUNCTION__, func, regaddr, regsize, data));
+
+ /* sdspi_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_53, cmd_arg, NULL, 0))
+ != SUCCESS)
+ return status;
+
+ sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+ if (rsp5 != 0x00)
+ sd_err(("%s: rsp5 flags = 0x%x, expecting 0x00\n",
+ __FUNCTION__, rsp5));
+
+ }
+ return SUCCESS;
+}
+
+void
+sdspi_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
+{
+ *rsp_buffer = sd->card_response;
+}
+
+int max_errors = 0;
+
+#define SPI_MAX_PKT_LEN 768
+uint8 spi_databuf[SPI_MAX_PKT_LEN];
+uint8 spi_rspbuf[SPI_MAX_PKT_LEN];
+
+/* datalen is used for CMD53 length only (0 for sd->data_xfer_count) */
+static int
+sdspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg,
+ uint32 *data, uint32 datalen)
+{
+ uint32 cmd_reg;
+ uint32 cmd_arg = arg;
+ uint8 cmd_crc = 0x95; /* correct CRC for CMD0 and don't care for others. */
+ uint16 dat_crc;
+ uint8 cmd52data = 0;
+ uint32 i, j;
+ uint32 spi_datalen = 0;
+ uint32 spi_pre_cmd_pad = 0;
+ uint32 spi_max_response_pad = 128;
+
+ cmd_reg = 0;
+ cmd_reg = SFIELD(cmd_reg, SPI_DIR, 1);
+ cmd_reg = SFIELD(cmd_reg, SPI_CMD_INDEX, cmd);
+
+ if (GFIELD(cmd_arg, CMD52_RW_FLAG) == 1) { /* Same for CMD52 and CMD53 */
+ cmd_reg = SFIELD(cmd_reg, SPI_RW, 1);
+ }
+
+ switch (cmd) {
+ case SDIOH_CMD_59: /* CRC_ON_OFF (SPI Mode Only) - Response R1 */
+ cmd52data = arg & 0x1;
+ case SDIOH_CMD_0: /* Set Card to Idle State - No Response */
+ case SDIOH_CMD_5: /* Send Operation condition - Response R4 */
+ sd_trace(("%s: CMD%d\n", __FUNCTION__, cmd));
+ spi_datalen = 44;
+ spi_pre_cmd_pad = 12;
+ spi_max_response_pad = 28;
+ break;
+
+ case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */
+ case SDIOH_CMD_7: /* Select card - Response R1 */
+ case SDIOH_CMD_15: /* Set card to inactive state - Response None */
+ sd_err(("%s: CMD%d is invalid for SPI Mode.\n", __FUNCTION__, cmd));
+ return ERROR;
+ break;
+
+ case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */
+ cmd52data = GFIELD(cmd_arg, CMD52_DATA);
+ cmd_arg = arg;
+ cmd_reg = SFIELD(cmd_reg, SPI_FUNC, GFIELD(cmd_arg, CMD52_FUNCTION));
+ cmd_reg = SFIELD(cmd_reg, SPI_ADDR, GFIELD(cmd_arg, CMD52_REG_ADDR));
+ /* Display trace for byte write */
+ if (GFIELD(cmd_arg, CMD52_RW_FLAG) == 1) {
+ sd_trace(("%s: CMD52: Wr F:%d @0x%04x=%02x\n",
+ __FUNCTION__,
+ GFIELD(cmd_arg, CMD52_FUNCTION),
+ GFIELD(cmd_arg, CMD52_REG_ADDR),
+ cmd52data));
+ }
+
+ spi_datalen = 32;
+ spi_max_response_pad = 28;
+
+ break;
+ case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */
+ cmd_arg = arg;
+ cmd_reg = SFIELD(cmd_reg, SPI_FUNC, GFIELD(cmd_arg, CMD53_FUNCTION));
+ cmd_reg = SFIELD(cmd_reg, SPI_ADDR, GFIELD(cmd_arg, CMD53_REG_ADDR));
+ cmd_reg = SFIELD(cmd_reg, SPI_BLKMODE, 0);
+ cmd_reg = SFIELD(cmd_reg, SPI_OPCODE, GFIELD(cmd_arg, CMD53_OP_CODE));
+ cmd_reg = SFIELD(cmd_reg, SPI_STUFF0, (sd->data_xfer_count>>8));
+ cmd52data = (uint8)sd->data_xfer_count;
+
+ /* Set upper bit in byte count if necessary, but don't set it for 512 bytes. */
+ if ((sd->data_xfer_count > 255) && (sd->data_xfer_count < 512)) {
+ cmd_reg |= 1;
+ }
+
+ if (GFIELD(cmd_reg, SPI_RW) == 1) { /* Write */
+ spi_max_response_pad = 32;
+ spi_datalen = (sd->data_xfer_count + spi_max_response_pad) & 0xFFFC;
+ } else { /* Read */
+
+ spi_max_response_pad = 32;
+ spi_datalen = (sd->data_xfer_count + spi_max_response_pad) & 0xFFFC;
+ }
+ sd_trace(("%s: CMD53: %s F:%d @0x%04x len=0x%02x\n",
+ __FUNCTION__,
+ (GFIELD(cmd_reg, SPI_RW) == 1 ? "Wr" : "Rd"),
+ GFIELD(cmd_arg, CMD53_FUNCTION),
+ GFIELD(cmd_arg, CMD53_REG_ADDR),
+ cmd52data));
+ break;
+
+ default:
+ sd_err(("%s: Unknown command %d\n", __FUNCTION__, cmd));
+ return ERROR;
+ }
+
+ /* Set up and issue the SDIO command */
+ memset(spi_databuf, SDSPI_IDLE_PAD, spi_datalen);
+ spi_databuf[spi_pre_cmd_pad + 0] = (cmd_reg & 0xFF000000) >> 24;
+ spi_databuf[spi_pre_cmd_pad + 1] = (cmd_reg & 0x00FF0000) >> 16;
+ spi_databuf[spi_pre_cmd_pad + 2] = (cmd_reg & 0x0000FF00) >> 8;
+ spi_databuf[spi_pre_cmd_pad + 3] = (cmd_reg & 0x000000FF);
+ spi_databuf[spi_pre_cmd_pad + 4] = cmd52data;
+
+ /* Generate CRC7 for command, if CRC is enabled, otherwise, a
+ * default CRC7 of 0x95, which is correct for CMD0, is used.
+ */
+ if (sd_crc) {
+ cmd_crc = sdspi_crc7(&spi_databuf[spi_pre_cmd_pad], 5);
+ }
+ spi_databuf[spi_pre_cmd_pad + 5] = cmd_crc;
+#define SPI_STOP_TRAN 0xFD
+
+ /* for CMD53 Write, put the data into the output buffer */
+ if ((cmd == SDIOH_CMD_53) && (GFIELD(cmd_arg, CMD53_RW_FLAG) == 1)) {
+ if (datalen != 0) {
+ spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD;
+ spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK;
+
+ for (i = 0; i < sd->data_xfer_count; i++) {
+ spi_databuf[i + 11 + spi_pre_cmd_pad] = ((uint8 *)data)[i];
+ }
+ if (sd_crc) {
+ dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], i);
+ } else {
+ dat_crc = 0xAAAA;
+ }
+ spi_databuf[i + 11 + spi_pre_cmd_pad] = (dat_crc >> 8) & 0xFF;
+ spi_databuf[i + 12 + spi_pre_cmd_pad] = dat_crc & 0xFF;
+ } else if (sd->data_xfer_count == 2) {
+ spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD;
+ spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK;
+ spi_databuf[spi_pre_cmd_pad + 11] = sd->cmd53_wr_data & 0xFF;
+ spi_databuf[spi_pre_cmd_pad + 12] = (sd->cmd53_wr_data & 0x0000FF00) >> 8;
+ if (sd_crc) {
+ dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], 2);
+ } else {
+ dat_crc = 0x22AA;
+ }
+ spi_databuf[spi_pre_cmd_pad + 13] = (dat_crc >> 8) & 0xFF;
+ spi_databuf[spi_pre_cmd_pad + 14] = (dat_crc & 0xFF);
+ } else if (sd->data_xfer_count == 4) {
+ spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD;
+ spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK;
+ spi_databuf[spi_pre_cmd_pad + 11] = sd->cmd53_wr_data & 0xFF;
+ spi_databuf[spi_pre_cmd_pad + 12] = (sd->cmd53_wr_data & 0x0000FF00) >> 8;
+ spi_databuf[spi_pre_cmd_pad + 13] = (sd->cmd53_wr_data & 0x00FF0000) >> 16;
+ spi_databuf[spi_pre_cmd_pad + 14] = (sd->cmd53_wr_data & 0xFF000000) >> 24;
+ if (sd_crc) {
+ dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], 4);
+ } else {
+ dat_crc = 0x44AA;
+ }
+ spi_databuf[spi_pre_cmd_pad + 15] = (dat_crc >> 8) & 0xFF;
+ spi_databuf[spi_pre_cmd_pad + 16] = (dat_crc & 0xFF);
+ } else {
+ printf("CMD53 Write: size %d unsupported\n", sd->data_xfer_count);
+ }
+ }
+
+ spi_sendrecv(sd, spi_databuf, spi_rspbuf, spi_datalen);
+
+ for (i = spi_pre_cmd_pad + SDSPI_COMMAND_LEN; i < spi_max_response_pad; i++) {
+ if ((spi_rspbuf[i] & SDSPI_START_BIT_MASK) == 0) {
+ break;
+ }
+ }
+
+ if (i == spi_max_response_pad) {
+ sd_err(("%s: Did not get a response for CMD%d\n", __FUNCTION__, cmd));
+ return ERROR;
+ }
+
+ /* Extract the response. */
+ sd->card_response = spi_rspbuf[i];
+
+ /* for CMD53 Read, find the start of the response data... */
+ if ((cmd == SDIOH_CMD_53) && (GFIELD(cmd_arg, CMD52_RW_FLAG) == 0)) {
+ for (; i < spi_max_response_pad; i++) {
+ if (spi_rspbuf[i] == SDSPI_START_BLOCK) {
+ break;
+ }
+ }
+
+ if (i == spi_max_response_pad) {
+ printf("Did not get a start of data phase for CMD%d\n", cmd);
+ max_errors++;
+ sdspi_abort(sd, GFIELD(cmd_arg, CMD53_FUNCTION));
+ }
+ sd->card_rsp_data = spi_rspbuf[i+1];
+ sd->card_rsp_data |= spi_rspbuf[i+2] << 8;
+ sd->card_rsp_data |= spi_rspbuf[i+3] << 16;
+ sd->card_rsp_data |= spi_rspbuf[i+4] << 24;
+
+ if (datalen != 0) {
+ i++;
+ for (j = 0; j < sd->data_xfer_count; j++) {
+ ((uint8 *)data)[j] = spi_rspbuf[i+j];
+ }
+ if (sd_crc) {
+ uint16 recv_crc;
+
+ recv_crc = spi_rspbuf[i+j] << 8 | spi_rspbuf[i+j+1];
+ dat_crc = sdspi_crc16((uint8 *)data, datalen);
+ if (dat_crc != recv_crc) {
+ sd_err(("%s: Incorrect data CRC: expected 0x%04x, "
+ "received 0x%04x\n",
+ __FUNCTION__, dat_crc, recv_crc));
+ }
+ }
+ }
+ return SUCCESS;
+ }
+
+ sd->card_rsp_data = spi_rspbuf[i+4];
+ sd->card_rsp_data |= spi_rspbuf[i+3] << 8;
+ sd->card_rsp_data |= spi_rspbuf[i+2] << 16;
+ sd->card_rsp_data |= spi_rspbuf[i+1] << 24;
+
+ /* Display trace for byte read */
+ if ((cmd == SDIOH_CMD_52) && (GFIELD(cmd_arg, CMD52_RW_FLAG) == 0)) {
+ sd_trace(("%s: CMD52: Rd F:%d @0x%04x=%02x\n",
+ __FUNCTION__,
+ GFIELD(cmd_arg, CMD53_FUNCTION),
+ GFIELD(cmd_arg, CMD53_REG_ADDR),
+ sd->card_rsp_data >> 24));
+ }
+
+ return SUCCESS;
+}
+
+/*
+ * On entry: if single-block or non-block, buffer size <= block size.
+ * If multi-block, buffer size is unlimited.
+ * Question is how to handle the left-overs in either single- or multi-block.
+ * I think the caller should break the buffer up so this routine will always
+ * use block size == buffer size to handle the end piece of the buffer
+ */
+
+static int
+sdspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+ int num_blocks, blocksize;
+ bool local_blockmode, local_dma;
+ bool read = rw == SDIOH_READ ? 1 : 0;
+
+ ASSERT(nbytes);
+
+ cmd_arg = 0;
+ sd_data(("%s: %s 53 func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, read ? "Rd" : "Wr", func, fifo ? "FIXED" : "INCR",
+ addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+ if (read) sd->r_cnt++; else sd->t_cnt++;
+
+ local_blockmode = sd->sd_blockmode;
+ local_dma = sd->sd_use_dma;
+
+ /* Don't bother with block mode on small xfers */
+ if (nbytes < sd->client_block_size[func]) {
+ sd_info(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
+ nbytes, sd->client_block_size[func]));
+ local_blockmode = FALSE;
+ local_dma = FALSE;
+ }
+
+ if (local_blockmode) {
+ blocksize = MIN(sd->client_block_size[func], nbytes);
+ num_blocks = nbytes/blocksize;
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
+ } else {
+ num_blocks = 1;
+ blocksize = nbytes;
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ }
+
+ if (fifo)
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0);
+ else
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
+ if (read)
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+ else
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+ sd->data_xfer_count = nbytes;
+ if ((func == 2) && (fifo == 1)) {
+ sd_data(("%s: %s 53 func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, read ? "Rd" : "Wr", func, fifo ? "FIXED" : "INCR",
+ addr, nbytes, sd->r_cnt, sd->t_cnt));
+ }
+
+ /* sdspi_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdspi_cmd_issue(sd, local_dma,
+ SDIOH_CMD_53, cmd_arg,
+ data, nbytes)) != SUCCESS) {
+ sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
+ return status;
+ }
+
+ sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+ if (rsp5 != 0x00) {
+ sd_err(("%s: rsp5 flags = 0x%x, expecting 0x00\n",
+ __FUNCTION__, rsp5));
+ return ERROR;
+ }
+
+ return SUCCESS;
+}
+
+static int
+set_client_block_size(sdioh_info_t *sd, int func, int block_size)
+{
+ int base;
+ int err = 0;
+
+ sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
+ sd->client_block_size[func] = block_size;
+
+ /* Set the block size in the SDIO Card register */
+ base = func * SDIOD_FBR_SIZE;
+ err = sdspi_card_regwrite(sd, 0, base + SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
+ if (!err) {
+ err = sdspi_card_regwrite(sd, 0, base + SDIOD_CCCR_BLKSIZE_1, 1,
+ (block_size >> 8) & 0xff);
+ }
+
+ /*
+ * Do not set the block size in the SDIO Host register; that
+ * is func dependent and will get done on an individual
+ * transaction basis.
+ */
+
+ return (err ? BCME_SDIO_ERROR : 0);
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+ si->card_init_done = FALSE;
+ return sdspi_client_init(si);
+}
+
+#define CRC7_POLYNOM 0x09
+#define CRC7_CRCHIGHBIT 0x40
+
+static uint8 sdspi_crc7(unsigned char* p, uint32 len)
+{
+ uint8 c, j, bit, crc = 0;
+ uint32 i;
+
+ for (i = 0; i < len; i++) {
+ c = *p++;
+ for (j = 0x80; j; j >>= 1) {
+ bit = crc & CRC7_CRCHIGHBIT;
+ crc <<= 1;
+ if (c & j) bit ^= CRC7_CRCHIGHBIT;
+ if (bit) crc ^= CRC7_POLYNOM;
+ }
+ }
+
+ /* Convert the CRC7 to an 8-bit SD CRC */
+ crc = (crc << 1) | 1;
+
+ return (crc);
+}
+
+#define CRC16_POLYNOM 0x1021
+#define CRC16_CRCHIGHBIT 0x8000
+
+static uint16 sdspi_crc16(unsigned char* p, uint32 len)
+{
+ uint32 i;
+ uint16 j, c, bit;
+ uint16 crc = 0;
+
+ for (i = 0; i < len; i++) {
+ c = *p++;
+ for (j = 0x80; j; j >>= 1) {
+ bit = crc & CRC16_CRCHIGHBIT;
+ crc <<= 1;
+ if (c & j) bit ^= CRC16_CRCHIGHBIT;
+ if (bit) crc ^= CRC16_POLYNOM;
+ }
+ }
+
+ return (crc);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdspi_linux.c b/drivers/net/wireless/bcm4329/bcmsdspi_linux.c
new file mode 100644
index 000000000000..e2e0ca6abe46
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdspi_linux.c
@@ -0,0 +1,252 @@
+/*
+ * Broadcom SPI Host Controller Driver - Linux Per-port
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi_linux.c,v 1.7.2.1.4.3 2008/06/30 21:09:36 Exp $
+ */
+
+#include <typedefs.h>
+#include <pcicfg.h>
+#include <bcmutils.h>
+
+#include <sdio.h> /* SDIO Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <linux/sched.h> /* request_irq(), free_irq() */
+
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+
+extern uint sd_crc;
+module_param(sd_crc, uint, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+ wait_queue_head_t intr_wait_queue;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt())
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdspi_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+ sdioh_info_t *sd;
+ struct sdos_info *sdos;
+ bool ours;
+
+ sd = (sdioh_info_t *)dev_id;
+ sd->local_intrcount++;
+
+ if (!sd->card_init_done) {
+ sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+ return IRQ_RETVAL(FALSE);
+ } else {
+ ours = spi_check_client_intr(sd, NULL);
+
+ /* For local interrupts, wake the waiting process */
+ if (ours && sd->got_hcint) {
+ sdos = (struct sdos_info *)sd->sdos_info;
+ wake_up_interruptible(&sdos->intr_wait_queue);
+ }
+
+ return IRQ_RETVAL(ours);
+ }
+}
+
+/* Register with Linux for interrupts */
+int
+spi_register_irq(sdioh_info_t *sd, uint irq)
+{
+ sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+ if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) {
+ sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+ return ERROR;
+ }
+ return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+spi_free_irq(uint irq, sdioh_info_t *sd)
+{
+ free_irq(irq, sd);
+}
+
+/* Map Host controller registers */
+
+uint32 *
+spi_reg_map(osl_t *osh, uintptr addr, int size)
+{
+ return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+spi_reg_unmap(osl_t *osh, uintptr addr, int size)
+{
+ REG_UNMAP((void*)(uintptr)addr);
+}
+
+int
+spi_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+ init_waitqueue_head(&sdos->intr_wait_queue);
+ return BCME_OK;
+}
+
+void
+spi_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ if (!(sd->host_init_done && sd->card_init_done)) {
+ sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable && !sd->lockcount)
+ spi_devintr_on(sd);
+ else
+ spi_devintr_off(sd);
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+spi_lock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (sd->lockcount) {
+ sd_err(("%s: Already locked!\n", __FUNCTION__));
+ ASSERT(sd->lockcount == 0);
+ }
+ spi_devintr_off(sd);
+ sd->lockcount++;
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+spi_unlock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+ ASSERT(sd->lockcount > 0);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+ spi_devintr_on(sd);
+ }
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+void spi_waitbits(sdioh_info_t *sd, bool yield)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+
+#ifndef BCMSDYIELD
+ ASSERT(!yield);
+#endif
+ sd_trace(("%s: yield %d canblock %d\n",
+ __FUNCTION__, yield, BLOCKABLE()));
+
+ /* Clear the "interrupt happened" flag and last intrstatus */
+ sd->got_hcint = FALSE;
+
+#ifdef BCMSDYIELD
+ if (yield && BLOCKABLE()) {
+ /* Wait for the indication, the interrupt will be masked when the ISR fires. */
+ wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+ } else
+#endif /* BCMSDYIELD */
+ {
+ spi_spinbits(sd);
+ }
+
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdstd.c b/drivers/net/wireless/bcm4329/bcmsdstd.c
new file mode 100644
index 000000000000..0ca1f8ff8a24
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdstd.c
@@ -0,0 +1,3127 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.c,v 1.64.4.1.4.4.2.18 2010/08/17 17:00:48 Exp $
+ */
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <siutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+#include <pcicfg.h>
+
+
+#define SD_PAGE_BITS 12
+#define SD_PAGE (1 << SD_PAGE_BITS)
+
+#include <bcmsdstd.h>
+
+/* Globals */
+uint sd_msglevel = SDH_ERROR_VAL;
+uint sd_hiok = TRUE; /* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 64; /* Default blocksize */
+
+#ifdef BCMSDYIELD
+bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */
+uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */
+bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */
+#endif
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+uint8 sd_dma_mode = DMA_MODE_SDMA; /* Default to SDMA for now */
+
+uint sd_toctl = 7;
+
+static bool trap_errs = FALSE;
+
+static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" };
+
+/* Prototypes */
+static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor);
+static bool sdstd_start_power(sdioh_info_t *sd);
+static bool sdstd_bus_width(sdioh_info_t *sd, int width);
+static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
+static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode);
+static int sdstd_card_enablefuncs(sdioh_info_t *sd);
+static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
+static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg);
+static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 *data);
+static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 data);
+static int sdstd_driver_init(sdioh_info_t *sd);
+static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
+static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+ uint32 addr, int nbytes, uint32 *data);
+static int sdstd_abort(sdioh_info_t *sd, uint func);
+static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg);
+static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
+static void sd_map_dma(sdioh_info_t * sd);
+static void sd_unmap_dma(sdioh_info_t * sd);
+static void sd_clear_adma_dscr_buf(sdioh_info_t *sd);
+static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data);
+static void sd_create_adma_descriptor(sdioh_info_t *sd,
+ uint32 index, uint32 addr_phys,
+ uint16 length, uint16 flags);
+static void sd_dump_adma_dscr(sdioh_info_t *sd);
+static void sdstd_dumpregs(sdioh_info_t *sd);
+
+
+/*
+ * Private register access routines.
+ */
+
+/* 16 bit PCI regs */
+
+extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg);
+uint16
+sdstd_rreg16(sdioh_info_t *sd, uint reg)
+{
+
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+
+extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data);
+void
+sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data)
+{
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+ sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data));
+}
+
+static void
+sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val)
+{
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val));
+ data |= val;
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+
+}
+static void
+sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val)
+{
+
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val));
+ data &= ~mask;
+ data |= (val & mask);
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+}
+
+
+/* 32 bit PCI regs */
+static uint32
+sdstd_rreg(sdioh_info_t *sd, uint reg)
+{
+ volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
+ sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+static inline void
+sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data)
+{
+ *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data;
+ sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data));
+
+}
+
+/* 8 bit PCI regs */
+static inline void
+sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data)
+{
+ *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data;
+ sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data));
+}
+static uint8
+sdstd_rreg8(sdioh_info_t *sd, uint reg)
+{
+ volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg);
+ sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+
+/*
+ * Private work routines
+ */
+
+sdioh_info_t *glob_sd;
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ glob_sd = sd;
+ sd->osh = osh;
+ if (sdstd_osinit(sd) != 0) {
+ sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+ sd->mem_space = (volatile char *)sdstd_reg_map(osh, (uintptr)bar0, SDIOH_REG_WINSZ);
+ sd_init_dma(sd);
+ sd->irq = irq;
+ if (sd->mem_space == NULL) {
+ sd_err(("%s:ioremap() failed\n", __FUNCTION__));
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+ sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space));
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ sd->intr_handler_valid = FALSE;
+
+ /* Set defaults */
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->sd_dma_mode = sd_dma_mode;
+
+ if (!sd->sd_blockmode)
+ sd->sd_dma_mode = DMA_MODE_NONE;
+
+ if (sdstd_driver_init(sd) != SUCCESS) {
+ /* If host CPU was reset without resetting SD bus or
+ SD device, the device will still have its RCA but
+ driver no longer knows what it is (since driver has been restarted).
+ go through once to clear the RCA and a gain reassign it.
+ */
+ sd_info(("driver_init failed - Reset RCA and try again\n"));
+ if (sdstd_driver_init(sd) != SUCCESS) {
+ sd_err(("%s:driver_init() failed()\n", __FUNCTION__));
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+ }
+
+ OSL_DMADDRWIDTH(osh, 32);
+
+ /* Always map DMA buffers, so we can switch between DMA modes. */
+ sd_map_dma(sd);
+
+ if (sdstd_register_irq(sd, irq) != SUCCESS) {
+ sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+ sdstd_free_irq(sd->irq, sd);
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ if (sd) {
+ sd_unmap_dma(sd);
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
+ sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
+ sdstd_free_irq(sd->irq, sd);
+ if (sd->card_init_done)
+ sdstd_reset(sd, 1, 1);
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we receive client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ uint16 intrstatus;
+ intrstatus = sdstd_rreg16(sd, SD_IntrStatus);
+ return !!(intrstatus & CLIENT_INTR);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_YIELDCPU,
+ IOV_MINYIELD,
+ IOV_FORCERB,
+ IOV_CLOCK
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, IOVT_UINT32, 0 },
+#ifdef BCMSDYIELD
+ {"sd_yieldcpu", IOV_YIELDCPU, 0, IOVT_BOOL, 0 },
+ {"sd_minyield", IOV_MINYIELD, 0, IOVT_UINT32, 0 },
+ {"sd_forcerb", IOV_FORCERB, 0, IOVT_BOOL, 0 },
+#endif
+ {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0},
+ {NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ if (!si->sd_blockmode)
+ si->sd_dma_mode = DMA_MODE_NONE;
+ break;
+
+#ifdef BCMSDYIELD
+ case IOV_GVAL(IOV_YIELDCPU):
+ int_val = sd_yieldcpu;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_YIELDCPU):
+ sd_yieldcpu = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_MINYIELD):
+ int_val = sd_minyield;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MINYIELD):
+ sd_minyield = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_FORCERB):
+ int_val = sd_forcerb;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FORCERB):
+ sd_forcerb = (bool)int_val;
+ break;
+#endif /* BCMSDYIELD */
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ sdstd_lock(si);
+ bcmerror = set_client_block_size(si, func, blksize);
+ sdstd_unlock(si);
+ break;
+ }
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_dma_mode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_dma_mode = (char)int_val;
+ sdstd_set_dma_mode(si, si->sd_dma_mode);
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("set clock failed!\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ if (sd_power == 1) {
+ if (sdstd_driver_init(si) != SUCCESS) {
+ sd_err(("set SD Slot power failed!\n"));
+ bcmerror = BCME_ERROR;
+ } else {
+ sd_err(("SD Slot Powered ON.\n"));
+ }
+ } else {
+ uint8 pwr = 0;
+
+ pwr = SFIELD(pwr, PWR_BUS_EN, 0);
+ sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */
+ sd_err(("SD Slot Powered OFF.\n"));
+ }
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ if (sd_clock == 1) {
+ sd_info(("SD Clock turned ON.\n"));
+ if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ bcmerror = BCME_ERROR;
+ }
+ } else {
+ /* turn off HC clock */
+ sdstd_wreg16(si, SD_ClockCntrl,
+ sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4));
+
+ sd_info(("SD Clock turned OFF.\n"));
+ }
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+
+ if (!sdstd_bus_width(si, sd_sdmode)) {
+ sd_err(("sdstd_bus_width failed\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok);
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)si->local_intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = sdstd_rreg8(si, sd_ptr->offset);
+ else if (sd_ptr->offset & 2)
+ int_val = sdstd_rreg16(si, sd_ptr->offset);
+ else
+ int_val = sdstd_rreg(si, sd_ptr->offset);
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value);
+ else if (sd_ptr->offset & 2)
+ sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value);
+ else
+ sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value);
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdstd_lock(sd);
+ *cis = 0;
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdstd_card_regread(sd, 0, offset, 1, &foo)) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+
+ sdstd_lock(sd);
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
+
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
+ sdstd_unlock(sd);
+ return status;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ }
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+ if (rw == SDIOH_READ)
+ *byte = GFIELD(rsp5, RSP5_DATA);
+
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int status;
+ bool swap = FALSE;
+
+ sdstd_lock(sd);
+
+ if (rw == SDIOH_READ) {
+ status = sdstd_card_regread(sd, func, addr, nbytes, word);
+ if (swap)
+ *word = BCMSWAP32(*word);
+ } else {
+ if (swap)
+ *word = BCMSWAP32(*word);
+ status = sdstd_card_regwrite(sd, func, addr, nbytes, *word);
+ }
+
+ sdstd_unlock(sd);
+ return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ int len;
+ int buflen = (int)buflen_u;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ uint8 *localbuf = NULL, *tmpbuf = NULL;
+ uint tmplen = 0;
+ bool local_blockmode = sd->sd_blockmode;
+
+ sdstd_lock(sd);
+
+ ASSERT(reg_width == 4);
+ ASSERT(buflen_u < (1 << 30));
+ ASSERT(sd->client_block_size[func]);
+
+ sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+ __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+ buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+ /* Break buffer down into blocksize chunks:
+ * Bytemode: 1 block at a time.
+ * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE.
+ * Both: leftovers are handled last (will be sent via bytemode).
+ */
+ while (buflen > 0) {
+ if (local_blockmode) {
+ /* Max xfer is Page size */
+ len = MIN(SD_PAGE, buflen);
+
+ /* Round down to a block boundry */
+ if (buflen > sd->client_block_size[func])
+ len = (len/sd->client_block_size[func]) *
+ sd->client_block_size[func];
+ if ((func == SDIO_FUNC_1) && ((len % 4) == 3) && (rw == SDIOH_WRITE)) {
+ tmplen = len;
+ sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__));
+ len++;
+ tmpbuf = buffer;
+ if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) {
+ sd_err(("out of memory, malloced %d bytes\n",
+ MALLOCED(sd->osh)));
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+ bcopy(buffer, localbuf, len);
+ buffer = localbuf;
+ }
+ } else {
+ /* Byte mode: One block at a time */
+ len = MIN(sd->client_block_size[func], buflen);
+ }
+
+ if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (local_blockmode) {
+ if ((func == SDIO_FUNC_1) && ((tmplen % 4) == 3) && (rw == SDIOH_WRITE)) {
+ if (localbuf)
+ MFREE(sd->osh, localbuf, len);
+ len--;
+ buffer = tmpbuf;
+ sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__));
+ }
+ }
+
+ buffer += len;
+ buflen -= len;
+ if (!fifo)
+ addr += len;
+ }
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+static
+int sdstd_abort(sdioh_info_t *sd, uint func)
+{
+ int err = 0;
+ int retries;
+
+ uint16 cmd_reg;
+ uint32 cmd_arg;
+ uint32 rsp5;
+ uint8 rflags;
+
+ uint16 int_reg = 0;
+ uint16 plain_intstatus;
+
+ /* Argument is write to F0 (CCCR) IOAbort with function number */
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func);
+
+ /* Command is CMD52 write */
+ cmd_reg = 0;
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52);
+
+ if (sd->sd_mode == SDIOH_MODE_SPI) {
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ }
+
+ /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) {
+ if (retries == RETRIES_SMALL)
+ sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
+ if (!--retries) {
+ sd_err(("%s: Command Inhibit timeout, state 0x%08x\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
+ if (trap_errs)
+ ASSERT(0);
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+ }
+
+ /* Clear errors from any previous commands */
+ if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) {
+ sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus));
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
+ }
+ plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus);
+ if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) {
+ sd_err(("abort: intstatus 0x%04x\n", plain_intstatus));
+ if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) {
+ sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n"));
+ }
+ if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) {
+ sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n"));
+ err = BCME_NODEVICE;
+ goto done;
+ }
+ }
+
+ /* Issue the command */
+ sdstd_wreg(sd, SD_Arg0, cmd_arg);
+ sdstd_wreg16(sd, SD_Command, cmd_reg);
+
+ /* In interrupt mode return, expect later CMD_COMPLETE interrupt */
+ if (!sd->polled_mode)
+ return err;
+
+ /* Otherwise, wait for the command to complete */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries &&
+ (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
+ (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
+
+ /* If command completion fails, do a cmd reset and note the error */
+ if (!retries) {
+ sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+
+ sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
+ SW_RESET_CMD)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ err = BCME_SDIO_ERROR;
+ }
+
+ /* Clear Command Complete interrupt */
+ int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* Check for Errors */
+ if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) {
+ sd_err(("%s: ErrorintrStatus: 0x%x, "
+ "(intrstatus = 0x%x, present state 0x%x) clearing\n",
+ __FUNCTION__, plain_intstatus,
+ sdstd_rreg16(sd, SD_IntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
+
+ sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
+ SW_RESET_DAT)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ /* ABORT is dataless, only cmd errs count */
+ if (plain_intstatus & ERRINT_CMD_ERRS)
+ err = BCME_SDIO_ERROR;
+ }
+
+ /* If command failed don't bother looking at response */
+ if (err)
+ goto done;
+
+ /* Otherwise, check the response */
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ rflags = GFIELD(rsp5, RSP5_FLAGS);
+
+ if (rflags & SD_RSP_R5_ERRBITS) {
+ sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags));
+
+ /* The CRC error flag applies to the previous command */
+ if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) {
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+ }
+
+ if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) &&
+ ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) {
+ sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags));
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+
+ if (GFIELD(rsp5, RSP5_STUFF)) {
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+
+done:
+ if (err == BCME_NODEVICE)
+ return err;
+
+ sdstd_wreg8(sd, SD_SoftwareReset,
+ SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1));
+
+ retries = RETRIES_LARGE;
+ do {
+ rflags = sdstd_rreg8(sd, SD_SoftwareReset);
+ if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
+ break;
+ } while (--retries);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n",
+ __FUNCTION__, rflags));
+ err = BCME_SDIO_ERROR;
+ }
+
+ return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint fnum)
+{
+ int ret;
+
+ sdstd_lock(sd);
+ ret = sdstd_abort(sd, fnum);
+ sdstd_unlock(sd);
+
+ return ret;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+ return SUCCESS;
+}
+
+static int
+sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg)
+{
+ uint16 regval;
+ uint retries;
+ uint function = 0;
+
+ /* If no errors, we're done */
+ if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
+ return SUCCESS;
+
+ sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
+ __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
+ sdstd_rreg(sdioh_info, SD_PresentState)));
+ sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
+
+ /* On command error, issue CMD reset */
+ if (regval & ERRINT_CMD_ERRS) {
+ sd_trace(("%s: issuing CMD reset\n", __FUNCTION__));
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ for (retries = RETRIES_LARGE; retries; retries--)
+ if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
+ break;
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+ }
+
+ /* On data error, issue DAT reset */
+ if (regval & ERRINT_DATA_ERRS) {
+ sd_trace(("%s: issuing DAT reset\n", __FUNCTION__));
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
+ for (retries = RETRIES_LARGE; retries; retries--)
+ if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
+ break;
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
+ }
+ }
+
+ /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */
+ if (cmd == SDIOH_CMD_53)
+ function = GFIELD(arg, CMD53_FUNCTION);
+ else if (cmd == SDIOH_CMD_52)
+ function = GFIELD(arg, CMD52_FUNCTION);
+ if (function) {
+ sd_trace(("%s: requesting abort for function %d after cmd %d\n",
+ __FUNCTION__, function, cmd));
+ sdstd_abort(sdioh_info, function);
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ return ERROR;
+}
+
+
+
+/*
+ * Private/Static work routines
+ */
+static bool
+sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
+{
+ int retries = RETRIES_LARGE;
+ uchar regval;
+
+ if (!sd)
+ return TRUE;
+
+ sdstd_lock(sd);
+ /* Reset client card */
+ if (client_reset && (sd->adapter_slot != -1)) {
+ if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
+ sd_err(("%s: Cannot write to card reg 0x%x\n",
+ __FUNCTION__, SDIOD_CCCR_IOABORT));
+ else
+ sd->card_rca = 0;
+ }
+
+ /* Reset host controller */
+ if (host_reset) {
+ regval = SFIELD(0, SW_RESET_ALL, 1);
+ sdstd_wreg8(sd, SD_SoftwareReset, regval);
+ do {
+ sd_trace(("%s: waiting for reset\n", __FUNCTION__));
+ } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__));
+ sdstd_unlock(sd);
+ return (FALSE);
+ }
+
+ /* A reset should reset bus back to 1 bit mode */
+ sd->sd_mode = SDIOH_MODE_SD1;
+ sdstd_set_dma_mode(sd, sd->sd_dma_mode);
+ }
+ sdstd_unlock(sd);
+ return TRUE;
+}
+
+/* Disable device interrupt */
+void
+sdstd_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ if (sd->use_client_ints) {
+ sd->intmask &= ~CLIENT_INTR;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+ }
+}
+
+/* Enable device interrupt */
+void
+sdstd_devintr_on(sdioh_info_t *sd)
+{
+ ASSERT(sd->lockcount == 0);
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ if (sd->use_client_ints) {
+ uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0));
+ sdstd_wreg16(sd, SD_IntrStatusEnable, status);
+
+ sd->intmask |= CLIENT_INTR;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+ }
+}
+
+#ifdef BCMSDYIELD
+/* Enable/disable other interrupts */
+void
+sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ if (err) {
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err);
+ }
+
+ sd->intmask |= norm;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ if (sd_forcerb)
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+}
+
+void
+sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ if (err) {
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
+ }
+
+ sd->intmask &= ~norm;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ if (sd_forcerb)
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+}
+#endif /* BCMSDYIELD */
+
+static int
+sdstd_host_init(sdioh_info_t *sd)
+{
+ int num_slots, full_slot;
+ uint8 reg8;
+
+ uint32 card_ins;
+ int slot, first_bar = 0;
+ bool detect_slots = FALSE;
+ uint bar;
+
+ /* Check for Arasan ID */
+ if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) {
+ sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_ARASAN_HDK;
+ detect_slots = TRUE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
+ sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_BCM27XX;
+ detect_slots = FALSE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) {
+ sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_TI_PCIXX21;
+ detect_slots = TRUE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) {
+ sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n",
+ __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_RICOH_R5C822;
+ detect_slots = TRUE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) {
+ sd_info(("%s: JMicron Standard SDIO Host Controller\n",
+ __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_JMICRON;
+ detect_slots = TRUE;
+ } else {
+ return ERROR;
+ }
+
+ /*
+ * Determine num of slots
+ * Search each slot
+ */
+
+ first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7;
+ num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4;
+ num_slots &= 7;
+ num_slots++; /* map bits to num slots according to spec */
+
+ if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
+ ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
+ sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__));
+ /* Set BAR0 Window to SDIOSTH core */
+ OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
+
+ /* Set defaults particular to this controller. */
+ detect_slots = TRUE;
+ num_slots = 1;
+ first_bar = 0;
+
+ /* Controller supports ADMA2, so turn it on here. */
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ }
+
+ /* Map in each slot on the board and query it to see if a
+ * card is inserted. Use the first populated slot found.
+ */
+ if (sd->mem_space) {
+ sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ full_slot = -1;
+
+ for (slot = 0; slot < num_slots; slot++) {
+ bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
+ (uintptr)bar, SDIOH_REG_WINSZ);
+
+ sd->adapter_slot = -1;
+
+ if (detect_slots) {
+ card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT);
+ } else {
+ card_ins = TRUE;
+ }
+
+ if (card_ins) {
+ sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot));
+ if (full_slot < 0)
+ full_slot = slot;
+ } else {
+ sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot));
+ }
+
+ if (sd->mem_space) {
+ sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+ }
+
+ if (full_slot < 0) {
+ sd_err(("No slots on SDIO controller are populated\n"));
+ return -1;
+ }
+
+ bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
+
+ sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
+ full_slot,
+ (full_slot + first_bar),
+ OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
+ sd->mem_space));
+
+
+ sd->adapter_slot = full_slot;
+
+ sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF;
+ switch (sd->version) {
+ case 0:
+ sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n",
+ sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+ break;
+ case 1:
+ case 2:
+ sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n",
+ sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+ break;
+ default:
+ sd_err(("%s: Host Controller version 0x%02x not supported.\n",
+ __FUNCTION__, sd->version));
+ break;
+ }
+
+ sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */
+ sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap);
+
+ sdstd_set_dma_mode(sd, sd->sd_dma_mode);
+
+
+ sdstd_reset(sd, 1, 0);
+
+ /* Read SD4/SD1 mode */
+ if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) {
+ if (reg8 & SD4_MODE) {
+ sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n",
+ __FUNCTION__, reg8));
+ }
+ }
+
+ /* Default power on mode is SD1 */
+ sd->sd_mode = SDIOH_MODE_SD1;
+ sd->polled_mode = TRUE;
+ sd->host_init_done = TRUE;
+ sd->card_init_done = FALSE;
+ sd->adapter_slot = full_slot;
+
+ return (SUCCESS);
+}
+#define CMD5_RETRIES 200
+static int
+get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
+{
+ int retries, status;
+
+ /* Get the Card's Operation Condition. Occasionally the board
+ * takes a while to become ready
+ */
+ retries = CMD5_RETRIES;
+ do {
+ *cmd_rsp = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD5 failed\n", __FUNCTION__));
+ return status;
+ }
+ sdstd_cmd_getrsp(sd, cmd_rsp, 1);
+ if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
+ sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
+ } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
+ if (!retries)
+ return ERROR;
+
+ return (SUCCESS);
+}
+
+static int
+sdstd_client_init(sdioh_info_t *sd)
+{
+ uint32 cmd_arg, cmd_rsp;
+ int status;
+ uint8 fn_ints;
+
+
+ sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+ /* Clear any pending ints */
+ sdstd_wreg16(sd, SD_IntrStatus, 0x1ff);
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff);
+
+ /* Enable both Normal and Error Status. This does not enable
+ * interrupts, it only enables the status bits to
+ * become 'live'
+ */
+ sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff);
+
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */
+
+ /* Start at ~400KHz clock rate for initialization */
+ if (!sdstd_start_clock(sd, 128)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+ if (!sdstd_start_power(sd)) {
+ sd_err(("sdstd_start_power failed\n"));
+ return ERROR;
+ }
+
+ if (sd->num_funcs == 0) {
+ sd_err(("%s: No IO funcs!\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ /* In SPI mode, issue CMD0 first */
+ if (sd->sd_mode == SDIOH_MODE_SPI) {
+ cmd_arg = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg))
+ != SUCCESS) {
+ sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n"));
+ return status;
+ }
+ }
+
+ if (sd->sd_mode != SDIOH_MODE_SPI) {
+ uint16 rsp6_status;
+
+ /* Card is operational. Ask it to send an RCA */
+ cmd_arg = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD3 failed!\n", __FUNCTION__));
+ return status;
+ }
+
+ /* Verify the card status returned with the cmd response */
+ sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+ rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS);
+ if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) ||
+ GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) ||
+ GFIELD(rsp6_status, RSP6STAT_ERROR)) {
+ sd_err(("%s: CMD3 response error. Response = 0x%x!\n",
+ __FUNCTION__, rsp6_status));
+ return ERROR;
+ }
+
+ /* Save the Card's RCA */
+ sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA);
+ sd_info(("RCA is 0x%x\n", sd->card_rca));
+
+ if (rsp6_status)
+ sd_err(("raw status is 0x%x\n", rsp6_status));
+
+ /* Select the card */
+ cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD7 failed!\n", __FUNCTION__));
+ return status;
+ }
+ sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+ if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) {
+ sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
+ __FUNCTION__, cmd_rsp));
+ return ERROR;
+ }
+ }
+
+ sdstd_card_enablefuncs(sd);
+
+ if (!sdstd_bus_width(sd, sd_sdmode)) {
+ sd_err(("sdstd_bus_width failed\n"));
+ return ERROR;
+ }
+
+ set_client_block_size(sd, 1, BLOCK_SIZE_4318);
+ fn_ints = INTR_CTL_FUNC1_EN;
+
+ if (sd->num_funcs >= 2) {
+ set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
+ fn_ints |= INTR_CTL_FUNC2_EN;
+ }
+
+ /* Enable/Disable Client interrupts */
+ /* Turn on here but disable at host controller? */
+ if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
+ (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
+ sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ /* Switch to High-speed clocking mode if both host and device support it */
+ sdstd_set_highspeed_mode(sd, (bool)sd_hiok);
+
+ /* After configuring for High-Speed mode, set the desired clock rate. */
+ if (!sdstd_start_clock(sd, (uint16)sd_divisor)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+
+ sd->card_init_done = TRUE;
+
+ return SUCCESS;
+}
+
+static int
+sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
+{
+ uint32 regdata;
+ int status;
+ uint8 reg8;
+
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+
+
+ if (HSMode == TRUE) {
+ if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) {
+ sd_err(("Host Controller does not support hi-speed mode.\n"));
+ return BCME_ERROR;
+ }
+
+ sd_info(("Attempting to enable High-Speed mode.\n"));
+
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != SUCCESS) {
+ return BCME_SDIO_ERROR;
+ }
+ if (regdata & SDIO_SPEED_SHS) {
+ sd_info(("Device supports High-Speed mode.\n"));
+
+ regdata |= SDIO_SPEED_EHS;
+
+ sd_info(("Writing %08x to Card at %08x\n",
+ regdata, SDIOD_CCCR_SPEED_CONTROL));
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ return BCME_SDIO_ERROR;
+ }
+
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != BCME_OK) {
+ return BCME_SDIO_ERROR;
+ }
+
+ sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
+
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1);
+
+ sd_err(("High-speed clocking mode enabled.\n"));
+ }
+ else {
+ sd_err(("Device does not support High-Speed Mode.\n"));
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
+ }
+ } else {
+ /* Force off device bit */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != BCME_OK) {
+ return status;
+ }
+ if (regdata & SDIO_SPEED_EHS) {
+ regdata &= ~SDIO_SPEED_EHS;
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ return status;
+ }
+ }
+
+ sd_err(("High-speed clocking mode disabled.\n"));
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
+ }
+
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+
+ return BCME_OK;
+}
+
+/* Select DMA Mode:
+ * If dma_mode == DMA_MODE_AUTO, pick the "best" mode.
+ * Otherwise, pick the selected mode if supported.
+ * If not supported, use PIO mode.
+ */
+static int
+sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode)
+{
+ uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE;
+ int8 prev_dma_mode = sd->sd_dma_mode;
+
+ switch (prev_dma_mode) {
+ case DMA_MODE_AUTO:
+ sd_dma(("%s: Selecting best DMA mode supported by controller.\n",
+ __FUNCTION__));
+ if (GFIELD(sd->caps, CAP_ADMA2)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ dma_sel_bits = SDIOH_ADMA2_MODE;
+ } else if (GFIELD(sd->caps, CAP_ADMA1)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA1;
+ dma_sel_bits = SDIOH_ADMA1_MODE;
+ } else if (GFIELD(sd->caps, CAP_DMA)) {
+ sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_NONE:
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ case DMA_MODE_SDMA:
+ if (GFIELD(sd->caps, CAP_DMA)) {
+ sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else {
+ sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA1:
+ if (GFIELD(sd->caps, CAP_ADMA1)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA1;
+ dma_sel_bits = SDIOH_ADMA1_MODE;
+ } else {
+ sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA2:
+ if (GFIELD(sd->caps, CAP_ADMA2)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ dma_sel_bits = SDIOH_ADMA2_MODE;
+ } else {
+ sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA2_64:
+ sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ default:
+ sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__,
+ prev_dma_mode));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ }
+
+ /* clear SysAddr, only used for SDMA */
+ sdstd_wreg(sd, SD_SysAddr, 0);
+
+ sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode]));
+
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+ reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits);
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+ sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8));
+
+ return BCME_OK;
+}
+
+
+bool
+sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor)
+{
+ uint rc, count;
+ uint16 divisor;
+
+ /* turn off HC clock */
+ sdstd_wreg16(sd, SD_ClockCntrl,
+ sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */
+
+ /* Set divisor */
+
+ divisor = (new_sd_divisor >> 1) << 8;
+
+ sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
+ sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor);
+ sd_info(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__,
+ new_sd_divisor, divisor));
+
+ sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_TO_CLKFREQ)));
+
+ if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)),
+ ((50 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)),
+ ((48 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)),
+ ((33 % new_sd_divisor) ? "KHz" : "MHz")));
+
+ } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) {
+ } else {
+ sd_err(("Need to determine divisor for %d MHz clocks\n",
+ GFIELD(sd->caps, CAP_TO_CLKFREQ)));
+ sd_err(("Consult SD Host Controller Spec: Clock Control Register\n"));
+ return (FALSE);
+ }
+
+ sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */
+
+ /* Wait for clock to stabilize */
+ rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
+ count = 0;
+ while (!rc) {
+ OSL_DELAY(1);
+ sd_info(("Waiting for clock to become stable 0x%x\n", rc));
+ rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
+ count++;
+ if (count > 10000) {
+ sd_err(("%s:Clocks failed to stabilize after %u attempts",
+ __FUNCTION__, count));
+ return (FALSE);
+ }
+ }
+ /* Turn on clock */
+ sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
+
+ /* Set timeout control (adjust default value based on divisor).
+ * Disabling timeout interrupts during setting is advised by host spec.
+ */
+ {
+ uint16 regdata;
+ uint toval;
+
+ toval = sd_toctl;
+ divisor = new_sd_divisor;
+
+ while (toval && !(divisor & 1)) {
+ toval -= 1;
+ divisor >>= 1;
+ }
+
+ regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
+ sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
+ }
+
+ OSL_DELAY(2);
+
+ sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
+
+ return TRUE;
+}
+
+bool
+sdstd_start_power(sdioh_info_t *sd)
+{
+ char *s;
+ uint32 cmd_arg;
+ uint32 cmd_rsp;
+ uint8 pwr = 0;
+ int volts;
+
+ volts = 0;
+ s = NULL;
+ if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
+ volts = 5;
+ s = "1.8";
+ }
+ if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
+ volts = 6;
+ s = "3.0";
+ }
+ if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
+ volts = 7;
+ s = "3.3";
+ }
+
+ pwr = SFIELD(pwr, PWR_VOLTS, volts);
+ pwr = SFIELD(pwr, PWR_BUS_EN, 1);
+ sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */
+ sd_info(("Setting Bus Power to %s Volts\n", s));
+
+ /* Wait for power to stabilize, Dongle takes longer than NIC. */
+ OSL_DELAY(250000);
+
+ /* Get the Card's Operation Condition. Occasionally the board
+ * takes a while to become ready
+ */
+ cmd_arg = 0;
+ cmd_rsp = 0;
+ if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
+ sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__));
+ sdstd_reset(sd, 0, 1);
+ return FALSE;
+ }
+
+ sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
+ sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
+ sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
+ sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+
+ /* Verify that the card supports I/O mode */
+ if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
+ sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
+ return ERROR;
+ }
+ sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
+
+ /* Examine voltage: Arasan only supports 3.3 volts,
+ * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
+ */
+
+ if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
+ sd_err(("This client does not support 3.3 volts!\n"));
+ return ERROR;
+ }
+ sd_info(("Leaving bus power at 3.3 Volts\n"));
+
+ cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000);
+ cmd_rsp = 0;
+ get_ocr(sd, &cmd_arg, &cmd_rsp);
+ sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+ return TRUE;
+}
+
+bool
+sdstd_bus_width(sdioh_info_t *sd, int new_mode)
+{
+ uint32 regdata;
+ int status;
+ uint8 reg8;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if (sd->sd_mode == new_mode) {
+ sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode));
+ /* Could exit, but continue just in case... */
+ }
+
+ /* Set client side via reg 0x7 in CCCR */
+ if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, &regdata)) != SUCCESS)
+ return (bool)status;
+ regdata &= ~BUS_SD_DATA_WIDTH_MASK;
+ if (new_mode == SDIOH_MODE_SD4) {
+ sd_info(("Changing to SD4 Mode\n"));
+ regdata |= SD4_MODE;
+ } else if (new_mode == SDIOH_MODE_SD1) {
+ sd_info(("Changing to SD1 Mode\n"));
+ } else {
+ sd_err(("SPI Mode not supported by Standard Host Controller\n"));
+ }
+
+ if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS)
+ return (bool)status;
+
+ /* Set host side via Host reg */
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE;
+ if (new_mode == SDIOH_MODE_SD4)
+ reg8 |= SD4_MODE;
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+
+ sd->sd_mode = new_mode;
+
+ return TRUE;
+}
+
+static int
+sdstd_driver_init(sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((sdstd_host_init(sd)) != SUCCESS) {
+ return ERROR;
+ }
+
+ if (sdstd_client_init(sd) != SUCCESS) {
+ return ERROR;
+ }
+
+ return SUCCESS;
+}
+
+static int
+sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdstd_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+static int
+sdstd_card_enablefuncs(sdioh_info_t *sd)
+{
+ int status;
+ uint32 regdata;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ /* Enable function 1 on the card */
+ regdata = SDIO_FUNC_ENABLE_1;
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
+ return status;
+
+ return SUCCESS;
+}
+
+/* Read client card reg */
+static int
+sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+
+
+ cmd_arg = 0;
+
+ if ((func == 0) || (regsize == 1)) {
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
+
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ }
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+ *data = GFIELD(rsp5, RSP5_DATA);
+ } else {
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+
+ sd->data_xfer_count = regsize;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+ if (sd->polled_mode) {
+ volatile uint16 int_reg;
+ int retries = RETRIES_LARGE;
+
+ /* Wait for Read Buffer to become ready */
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
+
+ if (!retries) {
+ sd_err(("%s: Timeout on Buf_Read_Ready: "
+ "intStat: 0x%x errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ return (ERROR);
+ }
+
+ /* Have Buffer Ready, so clear it and read the data */
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1));
+ if (regsize == 2)
+ *data = sdstd_rreg16(sd, SD_BufferDataPort0);
+ else
+ *data = sdstd_rreg(sd, SD_BufferDataPort0);
+
+ /* Check Status.
+ * After the data is read, the Transfer Complete bit should be on
+ */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+ return ERROR;
+
+ if (!retries) {
+ sd_err(("%s: Timeout on xfer complete: "
+ "intr 0x%04x err 0x%04x state 0x%08x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ return (ERROR);
+ }
+
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1));
+ }
+ }
+ if (sd->polled_mode) {
+ if (regsize == 2)
+ *data &= 0xffff;
+ }
+ return SUCCESS;
+}
+
+bool
+check_client_intr(sdioh_info_t *sd)
+{
+ uint16 raw_int, cur_int, old_int;
+
+ raw_int = sdstd_rreg16(sd, SD_IntrStatus);
+ cur_int = raw_int & sd->intmask;
+
+ if (!cur_int) {
+ /* Not an error -- might share interrupts... */
+ return FALSE;
+ }
+
+ if (GFIELD(cur_int, INTSTAT_CARD_INT)) {
+ old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0));
+
+ if (sd->client_intr_enabled && sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else {
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+ sdstd_wreg16(sd, SD_IntrStatusEnable, old_int);
+ } else {
+ /* Local interrupt: disable, set flag, and save intrstatus */
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
+ sd->local_intrcount++;
+ sd->got_hcint = TRUE;
+ sd->last_intrstatus = cur_int;
+ }
+
+ return TRUE;
+}
+
+void
+sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ uint16 int_reg, err_reg;
+ int retries = RETRIES_LARGE;
+
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
+ } while (--retries && !(int_reg & norm) && !(err_reg & err));
+
+ norm |= sd->intmask;
+ if (err_reg & err)
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sd->last_intrstatus = int_reg & norm;
+}
+
+/* write a client register */
+static int
+sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+ int status;
+ uint32 cmd_arg, rsp5, flags;
+
+ cmd_arg = 0;
+
+ if ((func == 0) || (regsize == 1)) {
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ flags = GFIELD(rsp5, RSP5_FLAGS);
+ if (flags && (flags != 0x10))
+ sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
+ __FUNCTION__, flags));
+ }
+ else {
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+ sd->data_xfer_count = regsize;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS)));
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+ if (sd->polled_mode) {
+ uint16 int_reg;
+ int retries = RETRIES_LARGE;
+
+ /* Wait for Write Buffer to become ready */
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
+
+ if (!retries) {
+ sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x "
+ "errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ return (ERROR);
+ }
+ /* Clear Write Buf Ready bit */
+ int_reg = 0;
+ int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1);
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* At this point we have Buffer Ready, so write the data */
+ if (regsize == 2)
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data);
+ else
+ sdstd_wreg(sd, SD_BufferDataPort0, data);
+
+ /* Wait for Transfer Complete */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+ return ERROR;
+
+ if (retries == 0) {
+ sd_err(("%s: Timeout for xfer complete; State = 0x%x, "
+ "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState),
+ int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sd->r_cnt, sd->t_cnt));
+ }
+ /* Clear the status bits */
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0));
+ }
+ }
+ return SUCCESS;
+}
+
+void
+sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
+{
+ int rsp_count;
+ int respaddr = SD_Response0;
+
+ if (count > 4)
+ count = 4;
+
+ for (rsp_count = 0; rsp_count < count; rsp_count++) {
+ *rsp_buffer++ = sdstd_rreg(sd, respaddr);
+ respaddr += 4;
+ }
+}
+
+static int
+sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg)
+{
+ uint16 cmd_reg;
+ int retries;
+ uint32 cmd_arg;
+ uint16 xfer_reg = 0;
+
+
+ if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) &&
+ ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) {
+ sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd));
+ return ERROR;
+ }
+
+ retries = RETRIES_SMALL;
+ while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) {
+ if (retries == RETRIES_SMALL)
+ sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
+ __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
+ }
+ if (!retries) {
+ sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+
+
+ cmd_reg = 0;
+ switch (cmd) {
+ case SDIOH_CMD_0: /* Set Card to Idle State - No Response */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_5: /* Send Operation condition - Response R4 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_7: /* Select card - Response R1 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_15: /* Set card to inactive state - Response None */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */
+
+ sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n",
+ __FUNCTION__,
+ GFIELD(arg, CMD52_FUNCTION),
+ GFIELD(arg, CMD52_REG_ADDR),
+ GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R",
+ GFIELD(arg, CMD52_DATA)));
+
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */
+
+ sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n",
+ __FUNCTION__,
+ GFIELD(arg, CMD53_FUNCTION),
+ GFIELD(arg, CMD53_REG_ADDR),
+ GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R",
+ GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte",
+ GFIELD(arg, CMD53_BYTE_BLK_CNT),
+ GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr"));
+
+ cmd_arg = arg;
+ xfer_reg = 0;
+
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+
+ use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE);
+
+ if (GFIELD(cmd_arg, CMD53_BLK_MODE)) {
+ uint16 blocksize;
+ uint16 blockcount;
+ int func;
+
+ ASSERT(sdioh_info->sd_blockmode);
+
+ func = GFIELD(cmd_arg, CMD53_FUNCTION);
+ blocksize = MIN((int)sdioh_info->data_xfer_count,
+ sdioh_info->client_block_size[func]);
+ blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
+
+ /* data_xfer_cnt is already setup so that for multiblock mode,
+ * it is the entire buffer length. For non-block or single block,
+ * it is < 64 bytes
+ */
+ if (use_dma) {
+ switch (sdioh_info->sd_dma_mode) {
+ case DMA_MODE_SDMA:
+ sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n",
+ __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr),
+ (uint32)sdioh_info->dma_phys));
+ sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
+ break;
+ case DMA_MODE_ADMA1:
+ case DMA_MODE_ADMA2:
+ sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__));
+ sd_create_adma_descriptor(sdioh_info, 0,
+ sdioh_info->dma_phys, blockcount*blocksize,
+ ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END |
+ ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN);
+ /* Dump descriptor if DMA debugging is enabled. */
+ if (sd_msglevel & SDH_DMA_VAL) {
+ sd_dump_adma_dscr(sdioh_info);
+ }
+
+ sdstd_wreg(sdioh_info, SD_ADMA_SysAddr,
+ sdioh_info->adma2_dscr_phys);
+ break;
+ default:
+ sd_err(("%s: unsupported DMA mode %d.\n",
+ __FUNCTION__, sdioh_info->sd_dma_mode));
+ break;
+ }
+ }
+
+ sd_trace(("%s: Setting block count %d, block size %d bytes\n",
+ __FUNCTION__, blockcount, blocksize));
+ sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize);
+ sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount);
+
+ xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
+
+ if (sdioh_info->client_block_size[func] != blocksize)
+ set_client_block_size(sdioh_info, 1, blocksize);
+
+ if (blockcount > 1) {
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ } else {
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ }
+
+ if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+ else
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
+
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
+ PRES_DAT_INHIBIT) && --retries)
+ sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
+ __FUNCTION__, cmd));
+ if (!retries) {
+ sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+ sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+
+ } else { /* Non block mode */
+ uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
+ /* The byte/block count field only has 9 bits,
+ * so, to do a 512-byte bytemode transfer, this
+ * field will contain 0, but we need to tell the
+ * controller we're transferring 512 bytes.
+ */
+ if (bytes == 0) bytes = 512;
+
+ if (use_dma)
+ sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
+
+ /* PCI: Transfer Mode register 0x0c */
+ xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+ else
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
+ /* See table 2-8 Host Controller spec ver 1.00 */
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
+
+ sdstd_wreg16(sdioh_info, SD_BlockSize, bytes);
+
+ sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
+
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
+ PRES_DAT_INHIBIT) && --retries)
+ sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
+ __FUNCTION__, cmd));
+ if (!retries) {
+ sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+ sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+ }
+ break;
+
+ default:
+ sd_err(("%s: Unknown command\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ if (sdioh_info->sd_mode == SDIOH_MODE_SPI) {
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ }
+
+ /* Setup and issue the SDIO command */
+ sdstd_wreg(sdioh_info, SD_Arg0, arg);
+ sdstd_wreg16(sdioh_info, SD_Command, cmd_reg);
+
+ /* If we are in polled mode, wait for the command to complete.
+ * In interrupt mode, return immediately. The calling function will
+ * know that the command has completed when the CMDATDONE interrupt
+ * is asserted
+ */
+ if (sdioh_info->polled_mode) {
+ uint16 int_reg = 0;
+ int retries = RETRIES_LARGE;
+
+ do {
+ int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
+ } while (--retries &&
+ (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
+ (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
+
+ if (!retries) {
+ sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x "
+ "error stat 0x%x state 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus),
+ sdstd_rreg(sdioh_info, SD_PresentState)));
+
+ /* Attempt to reset CMD line when we get a CMD timeout */
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
+ SW_RESET_CMD)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+ return (ERROR);
+ }
+
+ /* Clear Command Complete interrupt */
+ int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
+ sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg);
+
+ /* Check for Errors */
+ if (sdstd_check_errs(sdioh_info, cmd, arg)) {
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+ }
+ return SUCCESS;
+}
+
+
+static int
+sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+ uint16 int_reg, int_bit;
+ uint flags;
+ int num_blocks, blocksize;
+ bool local_blockmode, local_dma;
+ bool read = rw == SDIOH_READ ? 1 : 0;
+ bool yield = FALSE;
+
+ ASSERT(nbytes);
+
+ cmd_arg = 0;
+
+ sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+ if (read) sd->r_cnt++; else sd->t_cnt++;
+
+ local_blockmode = sd->sd_blockmode;
+ local_dma = USE_DMA(sd);
+
+ /* Don't bother with block mode on small xfers */
+ if (nbytes < sd->client_block_size[func]) {
+ sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
+ nbytes, sd->client_block_size[func]));
+ local_blockmode = FALSE;
+ local_dma = FALSE;
+ }
+
+ if (local_blockmode) {
+ blocksize = MIN(sd->client_block_size[func], nbytes);
+ num_blocks = nbytes/blocksize;
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
+ } else {
+ num_blocks = 1;
+ blocksize = nbytes;
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ }
+
+ if (local_dma && !read) {
+ bcopy(data, sd->dma_buf, nbytes);
+ sd_sync_dma(sd, read, nbytes);
+ }
+
+ if (fifo)
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0);
+ else
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
+ if (read)
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+ else
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+ sd->data_xfer_count = nbytes;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) {
+ sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
+ return status;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) {
+ sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d "
+ "numblocks %d, blocksize %d\n",
+ __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize));
+
+ if (flags & 1)
+ sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, "
+ "bytes %d dma %d\n",
+ __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT),
+ GFIELD(cmd_arg, CMD53_BLK_MODE)));
+ if (flags & 0x8)
+ sd_err(("%s: Rsp5: General Error\n", __FUNCTION__));
+
+ sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n",
+ __FUNCTION__, flags));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+#ifdef BCMSDYIELD
+ yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield);
+#endif
+
+ if (!local_dma) {
+ int bytes, i;
+ uint32 tmp;
+ for (i = 0; i < num_blocks; i++) {
+ int words;
+
+ /* Decide which status bit we're waiting for */
+ if (read)
+ int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1);
+ else
+ int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1);
+
+ /* If not on, wait for it (or for xfer error) */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit))
+ int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield);
+
+ /* Confirm we got the bit w/o error */
+ if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) {
+ sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x "
+ "errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, read ? "Read" : "Write", int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_dumpregs(sd);
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ return (ERROR);
+ }
+
+ /* Clear Buf Ready bit */
+ sdstd_wreg16(sd, SD_IntrStatus, int_bit);
+
+ /* At this point we have Buffer Ready, write the data 4 bytes at a time */
+ for (words = blocksize/4; words; words--) {
+ if (read)
+ *data = sdstd_rreg(sd, SD_BufferDataPort0);
+ else
+ sdstd_wreg(sd, SD_BufferDataPort0, *data);
+ data++;
+ }
+
+ bytes = blocksize % 4;
+
+ /* If no leftover bytes, go to next block */
+ if (!bytes)
+ continue;
+
+ switch (bytes) {
+ case 1:
+ /* R/W 8 bits */
+ if (read)
+ *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0));
+ else
+ sdstd_wreg8(sd, SD_BufferDataPort0,
+ (uint8)(*(data++) & 0xff));
+ break;
+ case 2:
+ /* R/W 16 bits */
+ if (read)
+ *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
+ else
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++)));
+ break;
+ case 3:
+ /* R/W 24 bits:
+ * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23]
+ */
+ if (read) {
+ tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
+ tmp |= ((uint32)(sdstd_rreg8(sd,
+ SD_BufferDataPort1)) << 16);
+ *(data++) = tmp;
+ } else {
+ tmp = *(data++);
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff);
+ sdstd_wreg8(sd, SD_BufferDataPort1,
+ (uint8)((tmp >> 16) & 0xff));
+ }
+ break;
+ default:
+ sd_err(("%s: Unexpected bytes leftover %d\n",
+ __FUNCTION__, bytes));
+ ASSERT(0);
+ break;
+ }
+ }
+ } /* End PIO processing */
+
+ /* Wait for Transfer Complete or Transfer Error */
+ int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1);
+
+ /* If not on, wait for it (or for xfer error) */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit))
+ int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield);
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+ return ERROR;
+
+ /* May have gotten a software timeout if not blocking? */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit)) {
+ sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, "
+ "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n",
+ __FUNCTION__, read ? "R" : "W", local_dma,
+ sdstd_rreg(sd, SD_PresentState), int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes,
+ sd->r_cnt, sd->t_cnt));
+ sdstd_dumpregs(sd);
+ return ERROR;
+ }
+
+ /* Clear the status bits */
+ int_reg = int_bit;
+ if (local_dma) {
+ /* DMA Complete */
+ /* Reads in particular don't have DMA_COMPLETE set */
+ int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1);
+ }
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* Fetch data */
+ if (local_dma && read) {
+ sd_sync_dma(sd, read, nbytes);
+ bcopy(sd->dma_buf, data, nbytes);
+ }
+ return SUCCESS;
+}
+
+static int
+set_client_block_size(sdioh_info_t *sd, int func, int block_size)
+{
+ int base;
+ int err = 0;
+
+
+ sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
+ sd->client_block_size[func] = block_size;
+
+ /* Set the block size in the SDIO Card register */
+ base = func * SDIOD_FBR_SIZE;
+ err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
+ if (!err) {
+ err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1,
+ (block_size >> 8) & 0xff);
+ }
+
+ /* Do not set the block size in the SDIO Host register, that
+ * is func dependent and will get done on an individual
+ * transaction basis
+ */
+
+ return (err ? BCME_SDIO_ERROR : 0);
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+ uint8 hreg;
+
+ /* Reset the attached device (use slower clock for safety) */
+ sdstd_start_clock(si, 128);
+ sdstd_reset(si, 0, 1);
+
+ /* Reset portions of the host state accordingly */
+ hreg = sdstd_rreg8(si, SD_HostCntrl);
+ hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0);
+ hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0);
+ si->sd_mode = SDIOH_MODE_SD1;
+
+ /* Reinitialize the card */
+ si->card_init_done = FALSE;
+ return sdstd_client_init(si);
+}
+
+
+static void
+sd_map_dma(sdioh_info_t * sd)
+{
+
+ void *va;
+
+ if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE,
+ &sd->dma_start_phys, 0x12, 12)) == NULL) {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ sd->dma_start_buf = 0;
+ sd->dma_buf = (void *)0;
+ sd->dma_phys = 0;
+ sd->alloced_dma_size = SD_PAGE;
+ sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__));
+ } else {
+ sd->dma_start_buf = va;
+ sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
+ sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE);
+ sd->alloced_dma_size = SD_PAGE;
+ sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%lx\n",
+ __FUNCTION__, sd->alloced_dma_size, sd->dma_buf, sd->dma_phys));
+ sd_fill_dma_data_buf(sd, 0xA5);
+ }
+
+ if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE,
+ &sd->adma2_dscr_start_phys, 0x12, 12)) == NULL) {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ sd->adma2_dscr_start_buf = 0;
+ sd->adma2_dscr_buf = (void *)0;
+ sd->adma2_dscr_phys = 0;
+ sd->alloced_adma2_dscr_size = 0;
+ sd_err(("%s: DMA_ALLOC failed for descriptor buffer. "
+ "Disabling DMA support.\n", __FUNCTION__));
+ } else {
+ sd->adma2_dscr_start_buf = va;
+ sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
+ sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE);
+ sd->alloced_adma2_dscr_size = SD_PAGE;
+ }
+
+ sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%lx\n",
+ __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf,
+ sd->adma2_dscr_phys));
+ sd_clear_adma_dscr_buf(sd);
+}
+
+static void
+sd_unmap_dma(sdioh_info_t * sd)
+{
+ if (sd->dma_start_buf) {
+ DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size,
+ sd->dma_start_phys, 0x12);
+ }
+
+ if (sd->adma2_dscr_start_buf) {
+ DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size,
+ sd->adma2_dscr_start_phys, 0x12);
+ }
+}
+
+static void sd_clear_adma_dscr_buf(sdioh_info_t *sd)
+{
+ bzero((char *)sd->adma2_dscr_buf, SD_PAGE);
+ sd_dump_adma_dscr(sd);
+}
+
+static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data)
+{
+ memset((char *)sd->dma_buf, data, SD_PAGE);
+}
+
+
+static void sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index,
+ uint32 addr_phys, uint16 length, uint16 flags)
+{
+ adma2_dscr_32b_t *adma2_dscr_table;
+ adma1_dscr_t *adma1_dscr_table;
+
+ adma2_dscr_table = sd->adma2_dscr_buf;
+ adma1_dscr_table = sd->adma2_dscr_buf;
+
+ switch (sd->sd_dma_mode) {
+ case DMA_MODE_ADMA2:
+ sd_dma(("%s: creating ADMA2 descriptor for index %d\n",
+ __FUNCTION__, index));
+
+ adma2_dscr_table[index].phys_addr = addr_phys;
+ adma2_dscr_table[index].len_attr = length << 16;
+ adma2_dscr_table[index].len_attr |= flags;
+ break;
+ case DMA_MODE_ADMA1:
+ /* ADMA1 requires two descriptors, one for len
+ * and the other for data transfer
+ */
+ index <<= 1;
+
+ sd_dma(("%s: creating ADMA1 descriptor for index %d\n",
+ __FUNCTION__, index));
+
+ adma1_dscr_table[index].phys_addr_attr = length << 12;
+ adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET |
+ ADMA2_ATTRIBUTE_VALID);
+ adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000;
+ adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f);
+ break;
+ default:
+ sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n",
+ __FUNCTION__, sd->sd_dma_mode));
+ break;
+ }
+}
+
+
+static void sd_dump_adma_dscr(sdioh_info_t *sd)
+{
+ adma2_dscr_32b_t *adma2_dscr_table;
+ adma1_dscr_t *adma1_dscr_table;
+ uint32 i = 0;
+ uint16 flags;
+ char flags_str[32];
+
+ ASSERT(sd->adma2_dscr_buf != NULL);
+
+ adma2_dscr_table = sd->adma2_dscr_buf;
+ adma1_dscr_table = sd->adma2_dscr_buf;
+
+ switch (sd->sd_dma_mode) {
+ case DMA_MODE_ADMA2:
+ sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
+ SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
+ sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)"
+ " |\n"));
+ while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) {
+ flags = adma2_dscr_table->len_attr & 0xFFFF;
+ sprintf(flags_str, "%s%s%s%s",
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ",
+ (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
+ (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
+ (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
+ sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n",
+ i, adma2_dscr_table, adma2_dscr_table->phys_addr,
+ adma2_dscr_table->len_attr >> 16, flags, flags_str));
+ i++;
+
+ /* Follow LINK descriptors or skip to next. */
+ if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) {
+ adma2_dscr_table = phys_to_virt(
+ adma2_dscr_table->phys_addr);
+ } else {
+ adma2_dscr_table++;
+ }
+
+ }
+ break;
+ case DMA_MODE_ADMA1:
+ sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
+ SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
+ sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n"));
+
+ for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) {
+ flags = adma1_dscr_table->phys_addr_attr & 0x3F;
+ sprintf(flags_str, "%s%s%s%s",
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ",
+ (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
+ (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
+ (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
+ sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n",
+ i, adma1_dscr_table,
+ adma1_dscr_table->phys_addr_attr & 0xFFFFF000,
+ flags, flags_str));
+
+ /* Follow LINK descriptors or skip to next. */
+ if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) {
+ adma1_dscr_table = phys_to_virt(
+ adma1_dscr_table->phys_addr_attr & 0xFFFFF000);
+ } else {
+ adma1_dscr_table++;
+ }
+ }
+ break;
+ default:
+ sd_err(("Unknown DMA Descriptor Table Format.\n"));
+ break;
+ }
+}
+
+static void sdstd_dumpregs(sdioh_info_t *sd)
+{
+ sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrStatus),
+ sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrStatusEnable),
+ sdstd_rreg16(sd, SD_ErrorIntrStatusEnable)));
+ sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrSignalEnable),
+ sdstd_rreg16(sd, SD_ErrorIntrSignalEnable)));
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdstd_linux.c b/drivers/net/wireless/bcm4329/bcmsdstd_linux.c
new file mode 100644
index 000000000000..a8b98e2054a0
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdstd_linux.c
@@ -0,0 +1,251 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver - linux portion
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd_linux.c,v 1.11.18.2.16.1 2010/08/17 17:03:13 Exp $
+ */
+
+#include <typedefs.h>
+#include <pcicfg.h>
+#include <bcmutils.h>
+#include <sdio.h> /* SDIO Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <linux/sched.h> /* request_irq() */
+
+#include <bcmsdstd.h>
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+ wait_queue_head_t intr_wait_queue;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt())
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdstd_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+ sdioh_info_t *sd;
+ struct sdos_info *sdos;
+ bool ours;
+
+ sd = (sdioh_info_t *)dev_id;
+
+ if (!sd->card_init_done) {
+ sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+ return IRQ_RETVAL(FALSE);
+ } else {
+ ours = check_client_intr(sd);
+
+ /* For local interrupts, wake the waiting process */
+ if (ours && sd->got_hcint) {
+ sd_trace(("INTR->WAKE\n"));
+ sdos = (struct sdos_info *)sd->sdos_info;
+ wake_up_interruptible(&sdos->intr_wait_queue);
+ }
+ return IRQ_RETVAL(ours);
+ }
+}
+
+/* Register with Linux for interrupts */
+int
+sdstd_register_irq(sdioh_info_t *sd, uint irq)
+{
+ sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+ if (request_irq(irq, sdstd_isr, IRQF_SHARED, "bcmsdstd", sd) < 0) {
+ sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+ return ERROR;
+ }
+ return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+sdstd_free_irq(uint irq, sdioh_info_t *sd)
+{
+ free_irq(irq, sd);
+}
+
+/* Map Host controller registers */
+
+uint32 *
+sdstd_reg_map(osl_t *osh, int32 addr, int size)
+{
+ return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+sdstd_reg_unmap(osl_t *osh, int32 addr, int size)
+{
+ REG_UNMAP((void*)(uintptr)addr);
+}
+
+int
+sdstd_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+ init_waitqueue_head(&sdos->intr_wait_queue);
+ return BCME_OK;
+}
+
+void
+sdstd_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ if (!(sd->host_init_done && sd->card_init_done)) {
+ sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable && !sd->lockcount)
+ sdstd_devintr_on(sd);
+ else
+ sdstd_devintr_off(sd);
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+sdstd_lock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (sd->lockcount) {
+ sd_err(("%s: Already locked! called from %p\n",
+ __FUNCTION__,
+ __builtin_return_address(0)));
+ ASSERT(sd->lockcount == 0);
+ }
+ sdstd_devintr_off(sd);
+ sd->lockcount++;
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+sdstd_unlock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+ ASSERT(sd->lockcount > 0);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+ sdstd_devintr_on(sd);
+ }
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+uint16
+sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+
+#ifndef BCMSDYIELD
+ ASSERT(!yield);
+#endif
+ sd_trace(("%s: int 0x%02x err 0x%02x yield %d canblock %d\n",
+ __FUNCTION__, norm, err, yield, BLOCKABLE()));
+
+ /* Clear the "interrupt happened" flag and last intrstatus */
+ sd->got_hcint = FALSE;
+ sd->last_intrstatus = 0;
+
+#ifdef BCMSDYIELD
+ if (yield && BLOCKABLE()) {
+ /* Enable interrupts, wait for the indication, then disable */
+ sdstd_intrs_on(sd, norm, err);
+ wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+ sdstd_intrs_off(sd, norm, err);
+ } else
+#endif /* BCMSDYIELD */
+ {
+ sdstd_spinbits(sd, norm, err);
+ }
+
+ sd_trace(("%s: last_intrstatus 0x%04x\n", __FUNCTION__, sd->last_intrstatus));
+
+ return sd->last_intrstatus;
+}
diff --git a/drivers/net/wireless/bcm4329/bcmutils.c b/drivers/net/wireless/bcm4329/bcmutils.c
new file mode 100644
index 000000000000..43c04ee92f38
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmutils.c
@@ -0,0 +1,1838 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.c,v 1.210.4.5.2.4.6.19 2010/04/26 06:05:25 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#include <bcmutils.h>
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <siutils.h>
+#else
+#include <stdio.h>
+#include <string.h>
+/* This case for external supplicant use */
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+#endif /* BCMDRIVER */
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+
+#ifdef BCMDRIVER
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ if (len < 0)
+ len = 4096; /* "infinite" */
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(PKTDATA(osh, p) + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(buf, PKTDATA(osh, p) + offset, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint
+pkttotlen(osl_t *osh, void *p)
+{
+ uint total;
+
+ total = 0;
+ for (; p; p = PKTNEXT(osh, p))
+ total += PKTLEN(osh, p);
+ return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+ for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+ ;
+
+ return (p);
+}
+
+/* count segments of a chained packet */
+uint
+pktsegcnt(osl_t *osh, void *p)
+{
+ uint cnt;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p))
+ cnt++;
+
+ return cnt;
+}
+
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void *
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void *
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void *
+pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ q->head = PKTLINK(p);
+ PKTSETLINK(p, NULL);
+ PKTFREE(osh, p, dir);
+ q->len--;
+ pq->len--;
+ p = q->head;
+ }
+ ASSERT(q->len == 0);
+ q->tail = NULL;
+}
+
+bool
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (!pktbuf)
+ return FALSE;
+
+ q = &pq->q[prec];
+
+ if (q->head == pktbuf) {
+ if ((q->head = PKTLINK(pktbuf)) == NULL)
+ q->tail = NULL;
+ } else {
+ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+ ;
+ if (p == NULL)
+ return FALSE;
+
+ PKTSETLINK(p, PKTLINK(pktbuf));
+ if (q->tail == pktbuf)
+ q->tail = p;
+ }
+
+ q->len--;
+ pq->len--;
+ PKTSETLINK(pktbuf, NULL);
+ return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+ /* pq is variable size; only zero out what's requested */
+ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ pq->num_prec = (uint16)num_prec;
+
+ pq->max = (uint16)max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+
+void *
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
+{
+ int prec;
+ for (prec = 0; prec < pq->num_prec; prec++)
+ pktq_pflush(osh, pq, prec, dir);
+ ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+
+/* Priority dequeue from a specific set of precedences */
+void *
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+#endif /* BCMDRIVER */
+
+
+
+const unsigned char bcm_ctype[] = {
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */
+ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+ _BCM_C, /* 8-15 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */
+ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */
+ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */
+ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */
+ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */
+ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+ _BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */
+ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+ _BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
+ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(char *cp, char **endp, uint base)
+{
+ ulong result, last_result = 0, value;
+ bool minus;
+
+ minus = FALSE;
+
+ while (bcm_isspace(*cp))
+ cp++;
+
+ if (cp[0] == '+')
+ cp++;
+ else if (cp[0] == '-') {
+ minus = TRUE;
+ cp++;
+ }
+
+ if (base == 0) {
+ if (cp[0] == '0') {
+ if ((cp[1] == 'x') || (cp[1] == 'X')) {
+ base = 16;
+ cp = &cp[2];
+ } else {
+ base = 8;
+ cp = &cp[1];
+ }
+ } else
+ base = 10;
+ } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+ cp = &cp[2];
+ }
+
+ result = 0;
+
+ while (bcm_isxdigit(*cp) &&
+ (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+ result = result*base + value;
+ /* Detected overflow */
+ if (result < last_result && !minus)
+ return (ulong)-1;
+ last_result = result;
+ cp++;
+ }
+
+ if (minus)
+ result = (ulong)(-(long)result);
+
+ if (endp)
+ *endp = (char *)cp;
+
+ return (result);
+}
+
+int
+bcm_atoi(char *s)
+{
+ return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char*
+bcmstrstr(char *haystack, char *needle)
+{
+ int len, nlen;
+ int i;
+
+ if ((haystack == NULL) || (needle == NULL))
+ return (haystack);
+
+ nlen = strlen(needle);
+ len = strlen(haystack) - nlen + 1;
+
+ for (i = 0; i < len; i++)
+ if (memcmp(needle, &haystack[i], nlen) == 0)
+ return (&haystack[i]);
+ return (NULL);
+}
+
+char*
+bcmstrcat(char *dest, const char *src)
+{
+ char *p;
+
+ p = dest + strlen(dest);
+
+ while ((*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+char*
+bcmstrncat(char *dest, const char *src, uint size)
+{
+ char *endp;
+ char *p;
+
+ p = dest + strlen(dest);
+ endp = p + size;
+
+ while (p != endp && (*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+
+/****************************************************************************
+* Function: bcmstrtok
+*
+* Purpose:
+* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+* but allows strToken() to be used by different strings or callers at the same
+* time. Each call modifies '*string' by substituting a NULL character for the
+* first delimiter that is encountered, and updates 'string' to point to the char
+* after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+* string (mod) Ptr to string ptr, updated by token.
+* delimiters (in) Set of delimiter characters.
+* tokdelim (out) Character that delimits the returned token. (May
+* be set to NULL if token delimiter is not required).
+*
+* Returns: Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+ unsigned char *str;
+ unsigned long map[8];
+ int count;
+ char *nextoken;
+
+ if (tokdelim != NULL) {
+ /* Prime the token delimiter */
+ *tokdelim = '\0';
+ }
+
+ /* Clear control map */
+ for (count = 0; count < 8; count++) {
+ map[count] = 0;
+ }
+
+ /* Set bits in delimiter table */
+ do {
+ map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+ }
+ while (*delimiters++);
+
+ str = (unsigned char*)*string;
+
+ /* Find beginning of token (skip over leading delimiters). Note that
+ * there is no token iff this loop sets str to point to the terminal
+ * null (*str == '\0')
+ */
+ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+ str++;
+ }
+
+ nextoken = (char*)str;
+
+ /* Find the end of the token. If it is not the end of the string,
+ * put a null there.
+ */
+ for (; *str; str++) {
+ if (map[*str >> 5] & (1 << (*str & 31))) {
+ if (tokdelim != NULL) {
+ *tokdelim = *str;
+ }
+
+ *str++ = '\0';
+ break;
+ }
+ }
+
+ *string = (char*)str;
+
+ /* Determine if a token has been found. */
+ if (nextoken == (char *) str) {
+ return NULL;
+ }
+ else {
+ return nextoken;
+ }
+}
+
+
+#define xToLower(C) \
+ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function: bcmstricmp
+*
+* Purpose: Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+ char dc, sc;
+
+ while (*s2 && *s1) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ }
+
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+
+/****************************************************************************
+* Function: bcmstrnicmp
+*
+* Purpose: Compare to strings case insensitively, upto a max of 'cnt'
+* characters.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+* cnt (in) Max characters to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+ char dc, sc;
+
+ while (*s2 && *s1 && cnt) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ cnt--;
+ }
+
+ if (!cnt) return 0;
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(char *p, struct ether_addr *ea)
+{
+ int i = 0;
+
+ for (;;) {
+ ea->octet[i++] = (char) bcm_strtoul(p, &p, 16);
+ if (!*p++ || i == 6)
+ break;
+ }
+
+ return (i == 6);
+}
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+ ulong copyct = 1;
+ ushort i;
+
+ if (abuflen == 0)
+ return 0;
+
+ /* wbuflen is in bytes */
+ wbuflen /= sizeof(ushort);
+
+ for (i = 0; i < wbuflen; ++i) {
+ if (--abuflen == 0)
+ break;
+ *abuf++ = (char) *wbuf++;
+ ++copyct;
+ }
+ *abuf = '\0';
+
+ return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+ static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x";
+ snprintf(buf, 18, template,
+ ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff,
+ ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff);
+ return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+ snprintf(buf, 16, "%d.%d.%d.%d",
+ ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+ return (buf);
+}
+
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+ uint i;
+
+ for (i = 0; i < ms; i++) {
+ OSL_DELAY(1000);
+ }
+}
+
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+ void *p;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ for (p = p0; p; p = PKTNEXT(osh, p))
+ prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint
+pktsetprio(void *pkt, bool update_vtag)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata;
+ int priority = 0;
+ int rc = 0;
+
+ pktdata = (uint8 *) PKTDATA(NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+ eh = (struct ether_header *) pktdata;
+
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) {
+ uint16 vlan_tag;
+ int vlan_prio, dscp_prio = 0;
+
+ evh = (struct ethervlan_header *)eh;
+
+ vlan_tag = ntoh16(evh->vlan_tag);
+ vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+ if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+ uint8 tos_tc = IP_TOS(ip_body);
+ dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
+
+ /* DSCP priority gets precedence over 802.1P (vlan tag) */
+ if (dscp_prio != 0) {
+ priority = dscp_prio;
+ rc |= PKTPRIO_VDSCP;
+ } else {
+ priority = vlan_prio;
+ rc |= PKTPRIO_VLAN;
+ }
+ /*
+ * If the DSCP priority is not the same as the VLAN priority,
+ * then overwrite the priority field in the vlan tag, with the
+ * DSCP priority value. This is required for Linux APs because
+ * the VLAN driver on Linux, overwrites the skb->priority field
+ * with the priority value in the vlan tag
+ */
+ if (update_vtag && (priority != vlan_prio)) {
+ vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+ vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+ evh->vlan_tag = hton16(vlan_tag);
+ rc |= PKTPRIO_UPD;
+ }
+ } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ether_header);
+ uint8 tos_tc = IP_TOS(ip_body);
+ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ rc |= PKTPRIO_DSCP;
+ }
+
+ ASSERT(priority >= 0 && priority <= MAXPRIO);
+ PKTSETPRIO(pkt, priority);
+ return (rc | priority);
+}
+
+static char bcm_undeferrstr[BCME_STRLEN];
+
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings */
+const char *
+bcmerrorstr(int bcmerror)
+{
+ /* check if someone added a bcmerror code but forgot to add errorstring */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+ if (bcmerror > 0 || bcmerror < BCME_LAST) {
+ snprintf(bcm_undeferrstr, BCME_STRLEN, "Undefined error %d", bcmerror);
+ return bcm_undeferrstr;
+ }
+
+ ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+ return bcmerrorstrtable[-bcmerror];
+}
+
+
+
+/* iovar table lookup */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+ const bcm_iovar_t *vi;
+ const char *lookup_name;
+
+ /* skip any ':' delimited option prefixes */
+ lookup_name = strrchr(name, ':');
+ if (lookup_name != NULL)
+ lookup_name++;
+ else
+ lookup_name = name;
+
+ ASSERT(table != NULL);
+
+ for (vi = table; vi->name; vi++) {
+ if (!strcmp(vi->name, lookup_name))
+ return vi;
+ }
+ /* ran to end of table */
+
+ return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+
+ /* length check on io buf */
+ switch (vi->type) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_INT16:
+ case IOVT_INT32:
+ case IOVT_UINT8:
+ case IOVT_UINT16:
+ case IOVT_UINT32:
+ /* all integers are int32 sized args at the ioctl interface */
+ if (len < (int)sizeof(int)) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_BUFFER:
+ /* buffer must meet minimum length requirement */
+ if (len < vi->minlen) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_VOID:
+ if (!set) {
+ /* Cannot return nil... */
+ bcmerror = BCME_UNSUPPORTED;
+ } else if (len) {
+ /* Set is an action w/o parameters */
+ bcmerror = BCME_BUFTOOLONG;
+ }
+ break;
+
+ default:
+ /* unknown type for length check in iovar info */
+ ASSERT(0);
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+#endif /* BCMDRIVER */
+
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ * x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+STATIC const uint8 crc8_table[256] = {
+ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint8 crc /* either CRC8_INIT_VALUE or previous return value */
+)
+{
+ /* hard code the crc loop instead of using CRC_INNER_LOOP macro
+ * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+ */
+ while (nbytes-- > 0)
+ crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+ return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ * x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint16 crc /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+ while (nbytes-- > 0)
+ CRC_INNER_LOOP(16, crc, *pdata++);
+ return crc;
+}
+
+STATIC const uint32 crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+uint32
+hndcrc32(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint32 crc /* either CRC32_INIT_VALUE or previous return value */
+)
+{
+ uint8 *pend;
+#ifdef __mips__
+ uint8 tmp[4];
+ ulong *tptr = (ulong *)tmp;
+
+ /* in case the beginning of the buffer isn't aligned */
+ pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc);
+ nbytes -= (pend - pdata);
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+
+ /* handle bulk of data as 32-bit words */
+ pend = pdata + (nbytes & 0xfffffffc);
+ while (pdata < pend) {
+ *tptr = *(ulong *)pdata;
+ pdata += sizeof(ulong *);
+ CRC_INNER_LOOP(32, crc, tmp[0]);
+ CRC_INNER_LOOP(32, crc, tmp[1]);
+ CRC_INNER_LOOP(32, crc, tmp[2]);
+ CRC_INNER_LOOP(32, crc, tmp[3]);
+ }
+
+ /* 1-3 bytes at end of buffer */
+ pend = pdata + (nbytes & 0x03);
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+#else
+ pend = pdata + nbytes;
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+#endif /* __mips__ */
+
+ return crc;
+}
+
+#ifdef notdef
+#define CLEN 1499 /* CRC Length */
+#define CBUFSIZ (CLEN+4)
+#define CNBUFS 5 /* # of bufs */
+
+void testcrc32(void)
+{
+ uint j, k, l;
+ uint8 *buf;
+ uint len[CNBUFS];
+ uint32 crcr;
+ uint32 crc32tv[CNBUFS] =
+ {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+ ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+ /* step through all possible alignments */
+ for (l = 0; l <= 4; l++) {
+ for (j = 0; j < CNBUFS; j++) {
+ len[j] = CLEN;
+ for (k = 0; k < len[j]; k++)
+ *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+ }
+
+ for (j = 0; j < CNBUFS; j++) {
+ crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+ ASSERT(crcr == crc32tv[j]);
+ }
+ }
+
+ MFREE(buf, CBUFSIZ*CNBUFS);
+ return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+ int len;
+
+ /* validate current elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ /* advance to next elt */
+ len = elt->len;
+ elt = (bcm_tlv_t*)(elt->data + len);
+ *buflen -= (2 + len);
+
+ /* validate next elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + 2)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+ totlen -= (len + 2);
+ }
+
+ return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag. Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ uint id = elt->id;
+ int len = elt->len;
+
+ /* Punt if we start seeing IDs > than target key */
+ if (id > key)
+ return (NULL);
+
+ /* validate remaining totlen */
+ if ((id == key) && (totlen >= (len + 2)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+ totlen -= (len + 2);
+ }
+ return NULL;
+}
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+ defined(DHD_DEBUG)
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+ int i;
+ char* p = buf;
+ char hexstr[16];
+ int slen = 0;
+ uint32 bit;
+ const char* name;
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+ len -= 1;
+
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags) {
+ /* print any unnamed bits */
+ sprintf(hexstr, "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0)
+ continue;
+ slen += strlen(name);
+ if (len < slen)
+ break;
+ if (p != buf) p += sprintf(p, " "); /* btwn flag space */
+ strcat(p, name);
+ p += strlen(name);
+ flags &= ~bit;
+ len -= slen;
+ slen = 1; /* account for btwn flag space */
+ }
+
+ /* indicate the str was too short */
+ if (flags != 0) {
+ if (len == 0)
+ p--; /* overwrite last char */
+ p += sprintf(p, ">");
+ }
+
+ return (int)(p - buf);
+}
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+ int i;
+ char *p = str;
+ const uint8 *src = (const uint8*)bytes;
+
+ for (i = 0; i < len; i++) {
+ p += sprintf(p, "%02X", *src);
+ src++;
+ }
+ return (int)(p - str);
+}
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+ char line[128], *p;
+ uint i;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ p += sprintf(p, " %04d: ", i); /* line prefix */
+ }
+ p += sprintf(p, "%02x ", buf[i]);
+ if (i % 16 == 15) {
+ printf("%s\n", line); /* flush line */
+ p = line;
+ }
+ }
+
+ /* flush last partial line */
+ if (p != line)
+ printf("%s\n", line);
+}
+#endif
+
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+ if (brev < 0x100)
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ else
+ snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+ return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+ uint len, max_len;
+ char c;
+
+ len = strlen(buf);
+
+ max_len = BUFSIZE_TODUMP_ATONCE;
+
+ while (len > max_len) {
+ c = buf[max_len];
+ buf[max_len] = '\0';
+ printf("%s", buf);
+ buf[max_len] = c;
+
+ buf += max_len;
+ len -= max_len;
+ }
+ /* print the remaining string */
+ printf("%s\n", buf);
+ return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+ char *buf, uint32 bufsize)
+{
+ uint filled_len;
+ int len;
+ struct fielddesc *cur_ptr;
+
+ filled_len = 0;
+ cur_ptr = fielddesc_array;
+
+ while (bufsize > 1) {
+ if (cur_ptr->nameandfmt == NULL)
+ break;
+ len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ /* check for snprintf overflow or error */
+ if (len < 0 || (uint32)len >= bufsize)
+ len = bufsize - 1;
+ buf += len;
+ bufsize -= len;
+ filled_len += len;
+ cur_ptr++;
+ }
+ return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+ uint len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ strncpy(buf, name, buflen);
+
+ /* append data onto the end of the name string */
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+
+ return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
+
+ if (idx >= QDBM_TABLE_LEN) {
+ /* clamp to max uint16 mW value */
+ return 0xFFFF;
+ }
+
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+ uint8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
+
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+ nqdBm_to_mW_map[qdbm])/2;
+ if (mw_uint < boundary) break;
+ }
+
+ qdbm += (uint8)offset;
+
+ return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+ uint bitcount = 0, i;
+ uint8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
+ }
+ return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+ b->origsize = b->size = size;
+ b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ if ((r == -1) || (r >= (int)b->size) || (r == 0)) {
+ b->size = 0;
+ } else {
+ b->size -= r;
+ b->buf += r;
+ }
+
+ va_end(ap);
+
+ return r;
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+ int i;
+
+ for (i = 0; i < num_bytes; i++) {
+ num[i] += amount;
+ if (num[i] >= amount)
+ break;
+ amount = 1;
+ }
+}
+
+int
+bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes)
+{
+ int i;
+
+ for (i = nbytes - 1; i >= 0; i--) {
+ if (arg1[i] != arg2[i])
+ return (arg1[i] - arg2[i]);
+ }
+ return 0;
+}
+
+void
+bcm_print_bytes(char *name, const uchar *data, int len)
+{
+ int i;
+ int per_line = 0;
+
+ printf("%s: %d \n", name ? name : "", len);
+ for (i = 0; i < len; i++) {
+ printf("%02x ", *data++);
+ per_line++;
+ if (per_line == 16) {
+ per_line = 0;
+ printf("\n");
+ }
+ }
+ printf("\n");
+}
+
+/*
+ * buffer length needed for wlc_format_ssid
+ * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
+ */
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+ uint i, c;
+ char *p = buf;
+ char *endp = buf + SSID_FMT_BUF_LEN;
+
+ if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+ for (i = 0; i < ssid_len; i++) {
+ c = (uint)ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (bcm_isprint((uchar)c)) {
+ *p++ = (char)c;
+ } else {
+ p += snprintf(p, (endp - p), "\\x%02X", c);
+ }
+ }
+ *p = '\0';
+ ASSERT(p < endp);
+
+ return (int)(p - buf);
+}
+#endif
+
+#endif /* BCMDRIVER */
diff --git a/drivers/net/wireless/bcm4329/bcmwifi.c b/drivers/net/wireless/bcm4329/bcmwifi.c
new file mode 100644
index 000000000000..803acf842a29
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmwifi.c
@@ -0,0 +1,199 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi.c,v 1.18.24.2.4.1 2009/09/25 00:32:01 Exp $
+ */
+
+
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#endif
+#include <bcmwifi.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h>
+#endif
+
+
+
+
+
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+ const char *band, *bw, *sb;
+ uint channel;
+
+ band = "";
+ bw = "";
+ sb = "";
+ channel = CHSPEC_CHANNEL(chspec);
+
+ if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) ||
+ (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL))
+ band = (CHSPEC_IS2G(chspec)) ? "b" : "a";
+ if (CHSPEC_IS40(chspec)) {
+ if (CHSPEC_SB_UPPER(chspec)) {
+ sb = "u";
+ channel += CH_10MHZ_APART;
+ } else {
+ sb = "l";
+ channel -= CH_10MHZ_APART;
+ }
+ } else if (CHSPEC_IS10(chspec)) {
+ bw = "n";
+ }
+
+
+ snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb);
+ return (buf);
+}
+
+
+chanspec_t
+wf_chspec_aton(char *a)
+{
+ char *endp = NULL;
+ uint channel, band, bw, ctl_sb;
+ char c;
+
+ channel = strtoul(a, &endp, 10);
+
+
+ if (endp == a)
+ return 0;
+
+ if (channel > MAXCHANNEL)
+ return 0;
+
+ band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+
+ a = endp;
+
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+
+
+ if (c == 'a' || c == 'b') {
+ band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G;
+ a++;
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+ }
+
+
+ if (c == 'n') {
+ bw = WL_CHANSPEC_BW_10;
+ } else if (c == 'l') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_LOWER;
+
+ if (channel <= (MAXCHANNEL - CH_20MHZ_APART))
+ channel += CH_10MHZ_APART;
+ else
+ return 0;
+ } else if (c == 'u') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_UPPER;
+
+ if (channel > CH_20MHZ_APART)
+ channel -= CH_10MHZ_APART;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+
+done:
+ return (channel | band | bw | ctl_sb);
+}
+
+
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+ int ch = -1;
+ uint base;
+ int offset;
+
+
+ if (start_factor == 0) {
+ if (freq >= 2400 && freq <= 2500)
+ start_factor = WF_CHAN_FACTOR_2_4_G;
+ else if (freq >= 5000 && freq <= 6000)
+ start_factor = WF_CHAN_FACTOR_5_G;
+ }
+
+ if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+ return 14;
+
+ base = start_factor / 2;
+
+
+ if ((freq < base) || (freq > base + 1000))
+ return -1;
+
+ offset = freq - base;
+ ch = offset / 5;
+
+
+ if (offset != (ch * 5))
+ return -1;
+
+
+ if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+ return -1;
+
+ return ch;
+}
+
+
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+ int freq;
+
+ if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+ (ch <= 200))
+ freq = -1;
+ if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+ freq = 2484;
+ else
+ freq = ch * 5 + start_factor / 2;
+
+ return freq;
+}
diff --git a/drivers/net/wireless/bcm4329/dhd.h b/drivers/net/wireless/bcm4329/dhd.h
new file mode 100644
index 000000000000..9b75ae68098a
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd.h
@@ -0,0 +1,472 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd.h,v 1.32.4.7.2.4.14.49.4.9 2011/01/14 22:40:45 Exp $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#if defined(LINUX)
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+/* The kernel threading is sdio-specific */
+#else /* LINUX */
+#define ENOMEM 1
+#define EFAULT 2
+#define EINVAL 3
+#define EIO 4
+#define ETIMEDOUT 5
+#define ERESTARTSYS 6
+#endif /* LINUX */
+
+#include <wlioctl.h>
+
+#ifdef DHD_DEBUG
+#ifndef DHD_DEBUG_TRAP
+#define DHD_DEBUG_TRAP
+#endif
+#endif
+
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+ DHD_BUS_DOWN, /* Not ready for frame transfers */
+ DHD_BUS_LOAD, /* Download access only (CPU reset) */
+ DHD_BUS_DATA /* Ready for frame transfers */
+};
+
+enum dhd_bus_wake_state {
+ WAKE_LOCK_OFF,
+ WAKE_LOCK_PRIV,
+ WAKE_LOCK_DPC,
+ WAKE_LOCK_IOCTL,
+ WAKE_LOCK_DOWNLOAD,
+ WAKE_LOCK_TMOUT,
+ WAKE_LOCK_WATCHDOG,
+ WAKE_LOCK_LINK_DOWN_TMOUT,
+ WAKE_LOCK_PNO_FIND_TMOUT,
+ WAKE_LOCK_SOFTAP_SET,
+ WAKE_LOCK_SOFTAP_STOP,
+ WAKE_LOCK_SOFTAP_START,
+ WAKE_LOCK_SOFTAP_THREAD,
+ WAKE_LOCK_MAX
+};
+enum dhd_prealloc_index {
+ DHD_PREALLOC_PROT = 0,
+ DHD_PREALLOC_RXBUF,
+ DHD_PREALLOC_DATABUF,
+ DHD_PREALLOC_OSL_BUF
+};
+#ifdef DHD_USE_STATIC_BUF
+extern void * dhd_os_prealloc(int section, unsigned long size);
+#endif
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+ /* Linkage ponters */
+ osl_t *osh; /* OSL handle */
+ struct dhd_bus *bus; /* Bus module handle */
+ struct dhd_prot *prot; /* Protocol module handle */
+ struct dhd_info *info; /* Info module handle */
+
+ /* Internal dhd items */
+ bool up; /* Driver up/down (to OS) */
+ bool txoff; /* Transmit flow-controlled */
+ bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */
+ enum dhd_bus_state busstate;
+ uint hdrlen; /* Total DHD header length (proto + bus) */
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ uint rxsz; /* Rx buffer size bus module should use */
+ uint8 wme_dp; /* wme discard priority */
+
+ /* Dongle media info */
+ bool iswl; /* Dongle-resident driver is wl */
+ ulong drv_version; /* Version of dongle-resident driver */
+ struct ether_addr mac; /* MAC address obtained from dongle */
+ dngl_stats_t dstats; /* Stats for dongle-based data */
+
+ /* Additional stats for the bus level */
+ ulong tx_packets; /* Data packets sent to dongle */
+ ulong tx_multicast; /* Multicast data packets sent to dongle */
+ ulong tx_errors; /* Errors in sending data to dongle */
+ ulong tx_ctlpkts; /* Control packets sent to dongle */
+ ulong tx_ctlerrs; /* Errors sending control frames to dongle */
+ ulong rx_packets; /* Packets sent up the network interface */
+ ulong rx_multicast; /* Multicast packets sent up the network interface */
+ ulong rx_errors; /* Errors processing rx data packets */
+ ulong rx_ctlpkts; /* Control frames processed from dongle */
+ ulong rx_ctlerrs; /* Errors in processing rx control frames */
+ ulong rx_dropped; /* Packets dropped locally (no memory) */
+ ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */
+ ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */
+
+ ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */
+ ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */
+ ulong fc_packets; /* Number of flow control pkts recvd */
+
+ /* Last error return */
+ int bcmerror;
+ uint tickcnt;
+
+ /* Last error from dongle */
+ int dongle_error;
+
+ /* Suspend disable flag and "in suspend" flag */
+ int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+ int in_suspend; /* flag set to 1 when early suspend called */
+ int hang_was_sent; /* flag that message was send at least once */
+#ifdef PNO_SUPPORT
+ int pno_enable; /* pno status : "1" is pno enable */
+#endif /* PNO_SUPPORT */
+ int dtim_skip; /* dtim skip , default 0 means wake each dtim */
+
+ /* Pkt filter defination */
+ char * pktfilter[100];
+ int pktfilter_count;
+
+ wl_country_t dhd_cspec; /* Current Locale info */
+ char eventmask[WL_EVENTING_MASK_LEN];
+
+#ifdef CONFIG_HAS_WAKELOCK
+ struct wake_lock wow_wakelock;
+#endif
+} dhd_pub_t;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+ #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define _DHD_PM_RESUME_WAIT(a, b) do { \
+ int retry = 0; \
+ smp_mb(); \
+ while (dhd_mmc_suspend && retry++ != b) { \
+ wait_event_interruptible_timeout(a, FALSE, 3 * HZ); \
+ } \
+ } while (0)
+ #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0)
+ #define DHD_PM_RESUME_RETURN_ERROR(a) do { if (dhd_mmc_suspend) return a; } while (0)
+ #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0)
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9999; \
+ while ((exp) && (countdown >= 10000)) { \
+ wait_event_interruptible_timeout(a, FALSE, HZ/100); \
+ countdown -= 10000; \
+ } \
+ } while (0)
+
+#else
+
+ #define DHD_PM_RESUME_WAIT_INIT(a)
+ #define DHD_PM_RESUME_WAIT(a)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a)
+ #define DHD_PM_RESUME_RETURN_ERROR(a)
+ #define DHD_PM_RESUME_RETURN
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a)
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) { \
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+ } while (0)
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */
+
+inline static void NETIF_ADDR_LOCK(struct net_device *dev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+ netif_addr_lock_bh(dev);
+#endif
+}
+
+inline static void NETIF_ADDR_UNLOCK(struct net_device *dev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+ netif_addr_unlock_bh(dev);
+#endif
+}
+
+/* Wakelock Functions */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub);
+
+extern void dhd_os_start_lock(dhd_pub_t *pub);
+extern void dhd_os_start_unlock(dhd_pub_t *pub);
+extern unsigned long dhd_os_spin_lock(dhd_pub_t *pub);
+extern void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+typedef struct dhd_if_event {
+ uint8 ifidx;
+ uint8 action;
+ uint8 flags;
+ uint8 bssidx;
+} dhd_if_event_t;
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* To allow osl_attach/detach calls from os-independent modules */
+osl_t *dhd_osl_attach(void *pdev, uint bustype);
+void dhd_osl_detach(osl_t *osh);
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS. Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+/* Query ioctl */
+extern int dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void * dhd_os_open_image(char * filename);
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern int dhd_custom_get_mac_address(unsigned char *buf);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+#if defined(OOB_INTR_ONLY)
+extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr);
+#endif /* defined(OOB_INTR_ONLY) */
+extern void dhd_os_sdtxlock(dhd_pub_t * pub);
+extern void dhd_os_sdtxunlock(dhd_pub_t * pub);
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+
+typedef struct {
+ uint32 limit; /* Expiration time (usec) */
+ uint32 increment; /* Current expiration increment (usec) */
+ uint32 elapsed; /* Current elapsed time (usec) */
+ uint32 tick; /* O/S tick time (usec) */
+} dhd_timeout_t;
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern uint8 *dhd_bssidx2bssid(dhd_pub_t *dhd, int idx);
+extern int wl_host_event(struct dhd_info *dhd, int *idx, void *pktdata,
+ wl_event_msg_t *, void **data_ptr);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern void dhd_common_init(void);
+
+extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle,
+ char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx);
+extern void dhd_del_if(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int dhd_bus_start(dhd_pub_t *dhdp);
+
+extern void print_buf(void *pbuf, int len, int bytes_per_line);
+
+
+typedef enum cust_gpio_modes {
+ WLAN_RESET_ON,
+ WLAN_RESET_OFF,
+ WLAN_POWER_ON,
+ WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+extern int net_os_send_hang_message(struct net_device *dev);
+
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+#endif /* defined(DHD_DEBUG) */
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/* Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#define DHD_IDLETIME_TICKS 1
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR "null_pkt"
+
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN 2048
+extern char fw_path[MOD_PARAM_PATHLEN];
+extern char nv_path[MOD_PARAM_PATHLEN];
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS 16
+#define DHD_DEL_IF -0xe
+#define DHD_BAD_IF -0xf
+
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+/* dhd_commn arp offload wrapers */
+extern void dhd_arp_cleanup(dhd_pub_t *dhd);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, u32 ipaddr);
+
+#define DHD_UNICAST_FILTER_NUM 0
+#define DHD_BROADCAST_FILTER_NUM 1
+#define DHD_MULTICAST4_FILTER_NUM 2
+#define DHD_MULTICAST6_FILTER_NUM 3
+extern int net_os_set_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcm4329/dhd_bus.h b/drivers/net/wireless/bcm4329/dhd_bus.h
new file mode 100644
index 000000000000..97af41b313d0
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_bus.h
@@ -0,0 +1,93 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bus.h,v 1.4.6.3.2.3.6.7 2010/08/13 01:35:24 Exp $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Send a data frame to the dongle. Callee disposes of txp. */
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+
+#ifdef DHD_DEBUG
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* DHD_DEBUG */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcm4329/dhd_cdc.c b/drivers/net/wireless/bcm4329/dhd_cdc.c
new file mode 100644
index 000000000000..4bec0b606dc9
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_cdc.c
@@ -0,0 +1,535 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_cdc.c,v 1.22.4.2.4.7.2.41 2010/06/23 19:58:18 Exp $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload).
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN (16+DHD_SDALIGN) /* Must be atleast SDPCM_RESERVE
+ * defined in dhd_sdio.c (amount of header tha might be added)
+ * plus any space that might be needed for alignment padding.
+ */
+#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+ * round off at the end of buffer
+ */
+
+typedef struct dhd_prot {
+ uint16 reqid;
+ uint8 pending;
+ uint32 lastcmd;
+ uint8 bus_header[BUS_HEADER_LEN];
+ cdc_ioctl_t msg;
+ unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+ int ret;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_os_wake_lock(dhd);
+
+ /* NOTE : cdc->msg.len holds the desired length of the buffer to be
+ * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+ * is actually sent to the dongle
+ */
+ if (len > CDC_MAX_MSG_SIZE)
+ len = CDC_MAX_MSG_SIZE;
+
+ /* Send request */
+ ret = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+ dhd_os_wake_unlock(dhd);
+ return ret;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+ int ret;
+ dhd_prot_t *prot = dhd->prot;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ do {
+ ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, len+sizeof(cdc_ioctl_t));
+ if (ret < 0)
+ break;
+ } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+ return ret;
+}
+
+int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ void *info;
+ int ret = 0, retries = 0;
+ uint32 id, flags = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ if (cmd == WLC_GET_VAR && buf)
+ {
+ if (!strcmp((char *)buf, "bcmerrorstr"))
+ {
+ strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+ goto done;
+ }
+ else if (!strcmp((char *)buf, "bcmerror"))
+ {
+ *(int *)buf = dhd->dongle_error;
+ goto done;
+ }
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ if (!dhd->hang_was_sent)
+ DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+ goto done;
+ }
+
+retry:
+ /* wait for interrupt and get first fragment */
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if ((id < prot->reqid) && (++retries < RETRIES))
+ goto retry;
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check info buffer */
+ info = (void*)&msg[1];
+
+ /* Copy info buffer */
+ if (buf)
+ {
+ if (ret < (int)len)
+ len = ret;
+ memcpy(buf, info, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ int ret = 0;
+ uint32 flags, id;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT) | CDCF_IOC_SET;
+ CDC_SET_IF_IDX(msg, ifidx);
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0)
+ goto done;
+
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+extern int dhd_bus_interface(struct dhd_bus *bus, uint arg, void* arg2);
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = -1;
+
+ if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return ret;
+ }
+ dhd_os_proto_block(dhd);
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+ if (len > WLC_IOCTL_MAXLEN)
+ goto done;
+
+ if (prot->pending == TRUE) {
+ DHD_TRACE(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+ (unsigned long)prot->lastcmd));
+ if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+ DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+ }
+ goto done;
+ }
+
+ prot->pending = TRUE;
+ prot->lastcmd = ioc->cmd;
+ if (ioc->set)
+ ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len);
+ else {
+ ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len);
+ if (ret > 0)
+ ioc->used = ret - sizeof(cdc_ioctl_t);
+ }
+
+ /* Too many programs assume ioctl() returns 0 on success */
+ if (ret >= 0)
+ ret = 0;
+ else {
+ cdc_ioctl_t *msg = &prot->msg;
+ ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+ }
+
+ /* Intercept the wme_dp ioctl here */
+ if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+ int slen, val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+ prot->pending = FALSE;
+
+done:
+ dhd_os_proto_unblock(dhd);
+
+ return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ return BCME_UNSUPPORTED;
+}
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+}
+
+
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif /* BDC */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ /* Push BDC header used to convey priority for buses that don't */
+
+
+ PKTPUSH(dhd->osh, pktbuf, BDC_HEADER_LEN);
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(pktbuf))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+ h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->rssi = 0;
+#endif /* BDC */
+ BDC_SET_IF_IDX(h, ifidx);
+}
+
+
+bool
+dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, uint8 *fcbits)
+{
+#ifdef BDC
+ struct bdc_header *h;
+
+ if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n",
+ __FUNCTION__, PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ *fcbits = h->priority >> BDC_PRIORITY_FC_SHIFT;
+ if ((h->flags2 & BDC_FLAG2_FC_FLAG) == BDC_FLAG2_FC_FLAG)
+ return TRUE;
+#endif
+ return FALSE;
+}
+
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ /* Pop BDC header used to convey priority for buses that don't */
+
+ if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+ __FUNCTION__, *ifidx));
+ return BCME_ERROR;
+ }
+
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+ DHD_ERROR(("%s: non-BDC packet received, flags 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ return BCME_ERROR;
+ }
+
+ if (h->flags & BDC_FLAG_SUM_GOOD) {
+ DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ PKTSETSUMGOOD(pktbuf, TRUE);
+ }
+
+ PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+
+ PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+ return 0;
+}
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+ dhd_prot_t *cdc;
+
+#ifndef DHD_USE_STATIC_BUF
+ if (!(cdc = (dhd_prot_t *)MALLOC(dhd->osh, sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+#else
+ if (!(cdc = (dhd_prot_t *)dhd_os_prealloc(DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+#endif /* DHD_USE_STATIC_BUF */
+ memset(cdc, 0, sizeof(dhd_prot_t));
+
+ /* ensure that the msg buf directly follows the cdc msg struct */
+ if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+ DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+ goto fail;
+ }
+
+ dhd->prot = cdc;
+#ifdef BDC
+ dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+ dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+ return 0;
+
+fail:
+#ifndef DHD_USE_STATIC_BUF
+ if (cdc != NULL)
+ MFREE(dhd->osh, cdc, sizeof(dhd_prot_t));
+#endif
+ return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifndef DHD_USE_STATIC_BUF
+ MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif
+ dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+ /* No stats from dongle added yet, copy bus stats */
+ dhd->dstats.tx_packets = dhd->tx_packets;
+ dhd->dstats.tx_errors = dhd->tx_errors;
+ dhd->dstats.rx_packets = dhd->rx_packets;
+ dhd->dstats.rx_errors = dhd->rx_errors;
+ dhd->dstats.rx_dropped = dhd->rx_dropped;
+ dhd->dstats.multicast = dhd->rx_multicast;
+ return;
+}
+
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ char buf[128];
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_os_proto_block(dhd);
+
+ /* Get the device MAC address */
+ strcpy(buf, "cur_etheraddr");
+ ret = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf));
+ if (ret < 0) {
+ dhd_os_proto_unblock(dhd);
+ return ret;
+ }
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+ dhd_os_proto_unblock(dhd);
+
+#ifdef EMBEDDED_PLATFORM
+ ret = dhd_preinit_ioctls(dhd);
+#endif /* EMBEDDED_PLATFORM */
+
+ /* Always assumes wl for now */
+ dhd->iswl = TRUE;
+
+ return ret;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+ /* Nothing to do for CDC */
+}
diff --git a/drivers/net/wireless/bcm4329/dhd_common.c b/drivers/net/wireless/bcm4329/dhd_common.c
new file mode 100644
index 000000000000..f7cd372d68c8
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_common.c
@@ -0,0 +1,2432 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_common.c,v 1.5.6.8.2.6.6.69.4.25 2011-02-11 21:16:02 Exp $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#include <wlioctl.h>
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#endif
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+int wifi_get_mac_addr(unsigned char *buf);
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+int dhd_msg_level;
+
+#include <wl_iw.h>
+
+char fw_path[MOD_PARAM_PATHLEN];
+char nv_path[MOD_PARAM_PATHLEN];
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_wl_ioctl(dhd_pub_t *dhd, uint cmd, char *buf, uint buflen);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+
+#if defined(SOFTAP)
+extern bool ap_fw_loaded;
+#endif
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on);
+#endif /* KEEP_ALIVE */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#ifdef DHD_DEBUG
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on "
+ __DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+#endif
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+/* IOVar table */
+enum {
+ IOV_VERSION = 1,
+ IOV_MSGLEVEL,
+ IOV_BCMERRORSTR,
+ IOV_BCMERROR,
+ IOV_WDTICK,
+ IOV_DUMP,
+#ifdef DHD_DEBUG
+ IOV_CONS,
+ IOV_DCONSOLE_POLL,
+#endif
+ IOV_CLEARCOUNTS,
+ IOV_LOGDUMP,
+ IOV_LOGCAL,
+ IOV_LOGSTAMP,
+ IOV_GPIOOB,
+ IOV_IOCTLTIMEOUT,
+ IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+ {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+ {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+ {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN },
+ {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 },
+ {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 },
+ {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+ {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 },
+ {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 },
+#endif
+ {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 },
+ {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 },
+ {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+void
+dhd_common_init(void)
+{
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver. Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behaviour since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent initializations.
+ */
+ dhd_msg_level = DHD_ERROR_VAL;
+#ifdef CONFIG_BCM4329_FW_PATH
+ strncpy(fw_path, CONFIG_BCM4329_FW_PATH, MOD_PARAM_PATHLEN-1);
+#else
+ fw_path[0] = '\0';
+#endif
+#ifdef CONFIG_BCM4329_NVRAM_PATH
+ strncpy(nv_path, CONFIG_BCM4329_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
+#else
+ nv_path[0] = '\0';
+#endif
+}
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+ char eabuf[ETHER_ADDR_STR_LEN];
+
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+
+ bcm_binit(strbuf, buf, buflen);
+
+ /* Base DHD info */
+ bcm_bprintf(strbuf, "%s\n", dhd_version);
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+ dhdp->up, dhdp->txoff, dhdp->busstate);
+ bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
+ dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+ bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+ dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+ bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt);
+
+ bcm_bprintf(strbuf, "dongle stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
+ dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+ dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+ bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
+ dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+ dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+ bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast);
+
+ bcm_bprintf(strbuf, "bus stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
+ dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
+ bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
+ dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+ bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n",
+ dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+ bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld rx_flushed %ld\n",
+ dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped, dhdp->rx_flushed);
+ bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld fc_packets %ld\n",
+ dhdp->rx_readahead_cnt, dhdp->tx_realloc, dhdp->fc_packets);
+ bcm_bprintf(strbuf, "wd_dpc_sched %ld\n", dhdp->wd_dpc_sched);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any prot info */
+ dhd_prot_dump(dhdp, strbuf);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any bus info */
+ dhd_bus_dump(dhdp, strbuf);
+
+ return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_VERSION):
+ /* Need to have checked buffer length */
+ strncpy((char*)arg, dhd_version, len);
+ break;
+
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)dhd_msg_level;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ dhd_msg_level = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BCMERRORSTR):
+ strncpy((char *)arg, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+ ((char *)arg)[BCME_STRLEN - 1] = 0x00;
+ break;
+
+ case IOV_GVAL(IOV_BCMERROR):
+ int_val = (int32)dhd_pub->bcmerror;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_WDTICK):
+ int_val = (int32)dhd_watchdog_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WDTICK):
+ if (!dhd_pub->up) {
+ bcmerror = BCME_NOTUP;
+ break;
+ }
+ dhd_os_wd_timer(dhd_pub, (uint)int_val);
+ break;
+
+ case IOV_GVAL(IOV_DUMP):
+ bcmerror = dhd_dump(dhd_pub, arg, len);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_DCONSOLE_POLL):
+ int_val = (int32)dhd_console_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DCONSOLE_POLL):
+ dhd_console_ms = (uint)int_val;
+ break;
+
+ case IOV_SVAL(IOV_CONS):
+ if (len > 0)
+ bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+ break;
+#endif
+
+ case IOV_SVAL(IOV_CLEARCOUNTS):
+ dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+ dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+ dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+ dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+ dhd_pub->rx_dropped = 0;
+ dhd_pub->rx_readahead_cnt = 0;
+ dhd_pub->tx_realloc = 0;
+ dhd_pub->wd_dpc_sched = 0;
+ memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+ dhd_bus_clearcounts(dhd_pub);
+ break;
+
+
+ case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+ int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+ if (int_val <= 0)
+ bcmerror = BCME_BADARG;
+ else
+ dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+ break;
+ }
+
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+ /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+ * because an encryption/rsn mismatch results in both events, and
+ * the important information is in the WLC_E_PRUNE.
+ */
+ if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+ dhd_conn_event == WLC_E_PRUNE)) {
+ dhd_conn_event = event;
+ dhd_conn_status = status;
+ dhd_conn_reason = reason;
+ }
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+ void *p;
+ int eprec = -1; /* precedence to evict from */
+ bool discard_oldest;
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+ pktq_penq(q, prec, pkt);
+ return TRUE;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(q, prec))
+ eprec = prec;
+ else if (pktq_full(q)) {
+ p = pktq_peek_tail(q, &eprec);
+ ASSERT(p);
+ if (eprec > prec)
+ return FALSE;
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ ASSERT(!pktq_pempty(q, eprec));
+ discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+ if (eprec == prec && !discard_oldest)
+ return FALSE; /* refuse newer (incoming) packet */
+ /* Evict packet according to discard policy */
+ p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+ if (p == NULL) {
+ DHD_ERROR(("%s: pktq_penq() failed, oldest %d.",
+ __FUNCTION__, discard_oldest));
+ ASSERT(p);
+ }
+
+ PKTFREE(dhdp->osh, p, TRUE);
+ }
+
+ /* Enqueue */
+ p = pktq_penq(q, prec, pkt);
+ if (p == NULL) {
+ DHD_ERROR(("%s: pktq_penq() failed.", __FUNCTION__));
+ ASSERT(p);
+ }
+
+ return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+ int val_size;
+ const bcm_iovar_t *vi = NULL;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t *dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
+{
+ int bcmerror = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!buf) return BCME_BADARG;
+
+ switch (ioc->cmd) {
+ case DHD_GET_MAGIC:
+ if (buflen < sizeof(int))
+ bcmerror = BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_MAGIC;
+ break;
+
+ case DHD_GET_VERSION:
+ if (buflen < sizeof(int))
+ bcmerror = -BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_VERSION;
+ break;
+
+ case DHD_GET_VAR:
+ case DHD_SET_VAR: {
+ char *arg;
+ uint arglen;
+
+ /* scan past the name to any arguments */
+ for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--);
+
+ if (*arg) {
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+
+ /* account for the NUL terminator */
+ arg++, arglen--;
+
+ /* call with the appropriate arguments */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+ buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+ if (bcmerror != BCME_UNSUPPORTED)
+ break;
+
+ /* not in generic table, try protocol module */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+ arglen, buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ if (bcmerror != BCME_UNSUPPORTED)
+ break;
+
+ /* if still not found, try bus module */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ arg, arglen, buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+
+#ifdef SHOW_EVENTS
+static void
+wl_show_host_event(wl_event_msg_t *event, void *event_data)
+{
+ uint i, status, reason;
+ bool group = FALSE, flush_txq = FALSE, link = FALSE;
+ char *auth_str, *event_name;
+ uchar *buf;
+ char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+ static struct {uint event; char *event_name;} event_names[] = {
+ {WLC_E_SET_SSID, "SET_SSID"},
+ {WLC_E_JOIN, "JOIN"},
+ {WLC_E_START, "START"},
+ {WLC_E_AUTH, "AUTH"},
+ {WLC_E_AUTH_IND, "AUTH_IND"},
+ {WLC_E_DEAUTH, "DEAUTH"},
+ {WLC_E_DEAUTH_IND, "DEAUTH_IND"},
+ {WLC_E_ASSOC, "ASSOC"},
+ {WLC_E_ASSOC_IND, "ASSOC_IND"},
+ {WLC_E_REASSOC, "REASSOC"},
+ {WLC_E_REASSOC_IND, "REASSOC_IND"},
+ {WLC_E_DISASSOC, "DISASSOC"},
+ {WLC_E_DISASSOC_IND, "DISASSOC_IND"},
+ {WLC_E_QUIET_START, "START_QUIET"},
+ {WLC_E_QUIET_END, "END_QUIET"},
+ {WLC_E_BEACON_RX, "BEACON_RX"},
+ {WLC_E_LINK, "LINK"},
+ {WLC_E_MIC_ERROR, "MIC_ERROR"},
+ {WLC_E_NDIS_LINK, "NDIS_LINK"},
+ {WLC_E_ROAM, "ROAM"},
+ {WLC_E_TXFAIL, "TXFAIL"},
+ {WLC_E_PMKID_CACHE, "PMKID_CACHE"},
+ {WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF"},
+ {WLC_E_PRUNE, "PRUNE"},
+ {WLC_E_AUTOAUTH, "AUTOAUTH"},
+ {WLC_E_EAPOL_MSG, "EAPOL_MSG"},
+ {WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE"},
+ {WLC_E_ADDTS_IND, "ADDTS_IND"},
+ {WLC_E_DELTS_IND, "DELTS_IND"},
+ {WLC_E_BCNSENT_IND, "BCNSENT_IND"},
+ {WLC_E_BCNRX_MSG, "BCNRX_MSG"},
+ {WLC_E_BCNLOST_MSG, "BCNLOST_MSG"},
+ {WLC_E_ROAM_PREP, "ROAM_PREP"},
+ {WLC_E_PFN_NET_FOUND, "PNO_NET_FOUND"},
+ {WLC_E_PFN_NET_LOST, "PNO_NET_LOST"},
+ {WLC_E_RESET_COMPLETE, "RESET_COMPLETE"},
+ {WLC_E_JOIN_START, "JOIN_START"},
+ {WLC_E_ROAM_START, "ROAM_START"},
+ {WLC_E_ASSOC_START, "ASSOC_START"},
+ {WLC_E_IBSS_ASSOC, "IBSS_ASSOC"},
+ {WLC_E_RADIO, "RADIO"},
+ {WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG"},
+ {WLC_E_PROBREQ_MSG, "PROBREQ_MSG"},
+ {WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"},
+ {WLC_E_PSK_SUP, "PSK_SUP"},
+ {WLC_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"},
+ {WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"},
+ {WLC_E_ICV_ERROR, "ICV_ERROR"},
+ {WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"},
+ {WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"},
+ {WLC_E_TRACE, "TRACE"},
+ {WLC_E_ACTION_FRAME, "ACTION FRAME"},
+ {WLC_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"},
+ {WLC_E_IF, "IF"},
+ {WLC_E_RSSI, "RSSI"},
+ {WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
+ };
+ uint event_type, flags, auth_type, datalen;
+ event_type = ntoh32(event->event_type);
+ flags = ntoh16(event->flags);
+ status = ntoh32(event->status);
+ reason = ntoh32(event->reason);
+ auth_type = ntoh32(event->auth_type);
+ datalen = ntoh32(event->datalen);
+ /* debug dump of event messages */
+ sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ (uchar)event->addr.octet[0]&0xff,
+ (uchar)event->addr.octet[1]&0xff,
+ (uchar)event->addr.octet[2]&0xff,
+ (uchar)event->addr.octet[3]&0xff,
+ (uchar)event->addr.octet[4]&0xff,
+ (uchar)event->addr.octet[5]&0xff);
+
+ event_name = "UNKNOWN";
+ for (i = 0; i < ARRAYSIZE(event_names); i++) {
+ if (event_names[i].event == event_type)
+ event_name = event_names[i].event_name;
+ }
+
+ DHD_EVENT(("EVENT: %s, event ID = %d\n", event_name, event_type));
+
+ if (flags & WLC_EVENT_MSG_LINK)
+ link = TRUE;
+ if (flags & WLC_EVENT_MSG_GROUP)
+ group = TRUE;
+ if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+ flush_txq = TRUE;
+
+ switch (event_type) {
+ case WLC_E_START:
+ case WLC_E_DEAUTH:
+ case WLC_E_DISASSOC:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_ASSOC:
+ case WLC_E_REASSOC:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+ event_name, eabuf, (int)reason));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+ event_name, eabuf, (int)status));
+ }
+ break;
+
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+ break;
+
+ case WLC_E_AUTH:
+ case WLC_E_AUTH_IND:
+ if (auth_type == DOT11_OPEN_SYSTEM)
+ auth_str = "Open System";
+ else if (auth_type == DOT11_SHARED_KEY)
+ auth_str = "Shared Key";
+ else {
+ sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
+ auth_str = err_msg;
+ }
+ if (event_type == WLC_E_AUTH_IND) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+ event_name, eabuf, auth_str, (int)reason));
+ }
+
+ break;
+
+ case WLC_E_JOIN:
+ case WLC_E_ROAM:
+ case WLC_E_SET_SSID:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+ } else if (status == WLC_E_STATUS_NO_NETWORKS) {
+ DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+ event_name, (int)status));
+ }
+ break;
+
+ case WLC_E_BEACON_RX:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+ }
+ break;
+
+ case WLC_E_LINK:
+ DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+ break;
+
+ case WLC_E_MIC_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+ event_name, eabuf, group, flush_txq));
+ break;
+
+ case WLC_E_ICV_ERROR:
+ case WLC_E_UNICAST_DECODE_ERROR:
+ case WLC_E_MULTICAST_DECODE_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+ event_name, eabuf));
+ break;
+
+ case WLC_E_TXFAIL:
+ DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_SCAN_COMPLETE:
+ case WLC_E_PMKID_CACHE:
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PFN_NET_FOUND:
+ case WLC_E_PFN_NET_LOST:
+ case WLC_E_PFN_SCAN_COMPLETE:
+ DHD_EVENT(("PNOEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PSK_SUP:
+ case WLC_E_PRUNE:
+ DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+ event_name, (int)status, (int)reason));
+ break;
+
+ case WLC_E_TRACE:
+ {
+ static uint32 seqnum_prev = 0;
+ msgtrace_hdr_t hdr;
+ uint32 nblost;
+ char *s, *p;
+
+ buf = (uchar *) event_data;
+ memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+ if (hdr.version != MSGTRACE_VERSION) {
+ printf("\nMACEVENT: %s [unsupported version --> "
+ "dhd version:%d dongle version:%d]\n",
+ event_name, MSGTRACE_VERSION, hdr.version);
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+ }
+
+ /* There are 2 bytes available at the end of data */
+ buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+ if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+ printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
+ "discarded_bytes %d discarded_printf %d]\n",
+ ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
+ }
+
+ nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+ if (nblost > 0) {
+ printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
+ ntoh32(hdr.seqnum), nblost);
+ }
+ seqnum_prev = ntoh32(hdr.seqnum);
+
+ /* Display the trace buffer. Advance from \n to \n to avoid display big
+ * printf (issue with Linux printk )
+ */
+ p = (char *)&buf[MSGTRACE_HDRLEN];
+ while ((s = strstr(p, "\n")) != NULL) {
+ *s = '\0';
+ printf("%s\n", p);
+ p = s + 1;
+ }
+ printf("%s\n", p);
+
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ }
+ break;
+
+
+ case WLC_E_RSSI:
+ DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+ break;
+
+ default:
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type));
+ break;
+ }
+
+ /* show any appended data */
+ if (datalen) {
+ buf = (uchar *) event_data;
+ DHD_EVENT((" data (%d) : ", datalen));
+ for (i = 0; i < datalen; i++)
+ DHD_EVENT((" 0x%02x ", *buf++));
+ DHD_EVENT(("\n"));
+ }
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(struct dhd_info *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data_ptr)
+{
+ /* check whether packet is a BRCM event pkt */
+ bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+ char *event_data;
+ uint32 type, status;
+ uint16 flags;
+ int evlen;
+
+ if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+ DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+ return (BCME_ERROR);
+ }
+
+ /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+ if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+ DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+ return (BCME_ERROR);
+ }
+
+ *data_ptr = &pvt_data[1];
+ event_data = *data_ptr;
+
+ /* memcpy since BRCM event pkt may be unaligned. */
+ memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+ type = ntoh32_ua((void *)&event->event_type);
+ flags = ntoh16_ua((void *)&event->flags);
+ status = ntoh32_ua((void *)&event->status);
+ evlen = ntoh32_ua((void *)&event->datalen) + sizeof(bcm_event_t);
+
+ switch (type) {
+ case WLC_E_IF:
+ {
+ dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data;
+ DHD_TRACE(("%s: if event\n", __FUNCTION__));
+
+ if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS)
+ {
+ if (ifevent->action == WLC_E_IF_ADD)
+ dhd_add_if(dhd, ifevent->ifidx,
+ NULL, event->ifname,
+ pvt_data->eth.ether_dhost,
+ ifevent->flags, ifevent->bssidx);
+ else
+ dhd_del_if(dhd, ifevent->ifidx);
+ } else {
+ DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+ __FUNCTION__, ifevent->ifidx, event->ifname));
+ }
+ }
+ /* send up the if event: btamp user needs it */
+ *ifidx = dhd_ifname2idx(dhd, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd, (char *)pvt_data, evlen, *ifidx);
+ break;
+
+
+#ifdef P2P
+ case WLC_E_NDIS_LINK:
+ break;
+#endif
+ /* fall through */
+ /* These are what external supplicant/authenticator wants */
+ case WLC_E_LINK:
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+ case WLC_E_DISASSOC_IND:
+ case WLC_E_MIC_ERROR:
+ default:
+ /* Fall through: this should get _everything_ */
+
+ *ifidx = dhd_ifname2idx(dhd, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd, (char *)pvt_data, evlen, *ifidx);
+ DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
+
+ /* put it back to WLC_E_NDIS_LINK */
+ if (type == WLC_E_NDIS_LINK) {
+ uint32 temp;
+
+ temp = ntoh32_ua((void *)&event->event_type);
+ DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
+
+ temp = ntoh32(WLC_E_NDIS_LINK);
+ memcpy((void *)(&pvt_data->event.event_type), &temp,
+ sizeof(pvt_data->event.event_type));
+ }
+ break;
+ }
+
+#ifdef SHOW_EVENTS
+ wl_show_host_event(event, event_data);
+#endif /* SHOW_EVENTS */
+
+ return (BCME_OK);
+}
+
+
+void
+wl_event_to_host_order(wl_event_msg_t *evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = ntoh32(evt->event_type);
+ evt->flags = ntoh16(evt->flags);
+ evt->status = ntoh32(evt->status);
+ evt->reason = ntoh32(evt->reason);
+ evt->auth_type = ntoh32(evt->auth_type);
+ evt->datalen = ntoh32(evt->datalen);
+ evt->version = ntoh16(evt->version);
+}
+
+void print_buf(void *pbuf, int len, int bytes_per_line)
+{
+ int i, j = 0;
+ unsigned char *buf = pbuf;
+
+ if (bytes_per_line == 0) {
+ bytes_per_line = len;
+ }
+
+ for (i = 0; i < len; i++) {
+ printf("%2.2x", *buf++);
+ j++;
+ if (j == bytes_per_line) {
+ printf("\n");
+ j = 0;
+ } else {
+ printf(":");
+ }
+ }
+ printf("\n");
+}
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+#ifdef PKT_FILTER_SUPPORT
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 &&
+ strncmp(src, "0X", 2) != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[3];
+ strncpy(num, src, 2);
+ num[2] = '\0';
+ dst[i] = (uint8)strtoul(num, NULL, 16);
+ src += 2;
+ }
+ return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+ char *argv[8];
+ int i = 0;
+ const char *str;
+ int buf_len;
+ int str_len;
+ char *arg_save = 0, *arg_org = 0;
+ int rc;
+ char buf[128];
+ wl_pkt_filter_enable_t enable_parm;
+ wl_pkt_filter_enable_t * pkt_filterp;
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ arg_org = arg_save;
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (NULL == argv[i]) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_enable";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[str_len] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+ /* Parse enable/disable value. */
+ enable_parm.enable = htod32(enable);
+
+ buf_len += sizeof(enable_parm);
+ memcpy((char *)pkt_filterp,
+ &enable_parm,
+ sizeof(enable_parm));
+
+ /* Enable/disable the specified filter. */
+ rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+ /* Contorl the master mode */
+ bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+ rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+ const char *str;
+ wl_pkt_filter_t pkt_filter;
+ wl_pkt_filter_t *pkt_filterp;
+ int buf_len;
+ int str_len;
+ int rc;
+ uint32 mask_size;
+ uint32 pattern_size;
+ char *argv[8], * buf = 0;
+ int i = 0;
+ char *arg_save = 0, *arg_org = 0;
+#define BUF_SIZE 2048
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ arg_org = arg_save;
+
+ if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ if (strlen(arg) > BUF_SIZE) {
+ DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+ goto fail;
+ }
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+ while (argv[i++])
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (NULL == argv[i]) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_add";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[ str_len ] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+ if (NULL == argv[++i]) {
+ DHD_ERROR(("Polarity not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter polarity. */
+ pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+ if (NULL == argv[++i]) {
+ DHD_ERROR(("Filter type not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter type. */
+ pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+ if (NULL == argv[++i]) {
+ DHD_ERROR(("Offset not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter offset. */
+ pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+ if (NULL == argv[++i]) {
+ DHD_ERROR(("Bitmask not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter mask. */
+ mask_size =
+ htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+ if (NULL == argv[++i]) {
+ DHD_ERROR(("Pattern not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter pattern. */
+ pattern_size =
+ htod32(wl_pattern_atoh(argv[i],
+ (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+ if (mask_size != pattern_size) {
+ DHD_ERROR(("Mask and pattern not the same size\n"));
+ goto fail;
+ }
+
+ pkt_filter.u.pattern.size_bytes = mask_size;
+ buf_len += WL_PKT_FILTER_FIXED_LEN;
+ buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+ /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
+ ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+ ** guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)pkt_filterp,
+ &pkt_filter,
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+ rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+ rc = rc >= 0 ? 0 : rc;
+
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+ if (buf)
+ MFREE(dhd->osh, buf, BUF_SIZE);
+}
+#endif
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+ char iovbuf[32];
+ int retcode;
+
+ bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+ __FUNCTION__, arp_mode, retcode));
+ else
+ DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+ __FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+ char iovbuf[32];
+ int retcode;
+
+ bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+ __FUNCTION__, arp_enable, retcode));
+ else
+ DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+ __FUNCTION__, arp_enable));
+}
+#endif
+
+
+void dhd_arp_cleanup(dhd_pub_t *dhd)
+{
+#ifdef ARP_OFFLOAD_SUPPORT
+ int ret = 0;
+ int iov_len = 0;
+ char iovbuf[128];
+
+ if (dhd == NULL) return;
+
+ dhd_os_proto_block(dhd);
+
+ iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0)
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+ iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0)
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+ dhd_os_proto_unblock(dhd);
+
+#endif /* ARP_OFFLOAD_SUPPORT */
+}
+
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, u32 ipaddr)
+{
+#ifdef ARP_OFFLOAD_SUPPORT
+ int iov_len = 0;
+ char iovbuf[32];
+ int retcode;
+
+ dhd_os_proto_block(dhd);
+
+ iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len);
+
+ dhd_os_proto_unblock(dhd);
+
+ if (retcode)
+ DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+ __FUNCTION__, retcode));
+ else
+ DHD_TRACE(("%s: ARP ipaddr entry added\n",
+ __FUNCTION__));
+#endif /* ARP_OFFLOAD_SUPPORT */
+}
+
+
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen)
+{
+#ifdef ARP_OFFLOAD_SUPPORT
+ int retcode;
+ int iov_len = 0;
+
+ if (!buf)
+ return -1;
+
+ dhd_os_proto_block(dhd);
+
+ iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+ retcode = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, buflen);
+
+ dhd_os_proto_unblock(dhd);
+
+ if (retcode) {
+ DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+ __FUNCTION__, retcode));
+
+ return -1;
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+ return 0;
+}
+
+
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+ uint up = 0;
+ char buf[128], *ptr;
+ uint power_mode = PM_FAST;
+ uint32 dongle_align = DHD_SDALIGN;
+ uint32 glom = 0;
+ uint bcn_timeout = 4;
+ int scan_assoc_time = 40;
+ int scan_unassoc_time = 40;
+ uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#if defined(SOFTAP)
+ uint dtim = 1;
+#endif
+ int ret = 0;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ dhd_os_proto_block(dhd);
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ /*
+ ** Read MAC address from external customer place
+ ** NOTE that default mac address has to be present in otp or nvram file
+ ** to bring up firmware but unique per board mac address maybe provided
+ ** by customer code
+ */
+ ret = dhd_custom_get_mac_address(ea_addr.octet);
+ if (!ret) {
+ bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ } else
+ memcpy(dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
+ }
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+ if (strstr(fw_path, "apsta") != NULL) {
+ uint rand_mac;
+
+ srandom32((uint)jiffies);
+ rand_mac = random32();
+ iovbuf[0] = 0x02; /* locally administered bit */
+ iovbuf[1] = 0x1A;
+ iovbuf[2] = 0x11;
+ iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+ iovbuf[4] = (unsigned char)(rand_mac >> 8);
+ iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+ printk("Broadcom Dongle Host Driver mac=%02x:%02x:%02x:%02x:%02x:%02x\n",
+ iovbuf[0], iovbuf[1], iovbuf[2], iovbuf[3], iovbuf[4], iovbuf[5]);
+
+ bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ } else
+ memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+ }
+#endif /* SET_RANDOM_MAC_SOFTAP */
+
+ /* Set Country code */
+ if (dhd->dhd_cspec.ccode[0] != 0) {
+ bcm_mkiovar("country", (char *)&dhd->dhd_cspec, \
+ sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+ if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) {
+ DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+ }
+ }
+
+ /* Set Listen Interval */
+ bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0)
+ DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ bcm_mkiovar("ver", 0, 0, buf, sizeof(buf));
+ dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf));
+ bcmstrtok(&ptr, "\n", 0);
+ /* Print fw version info */
+ DHD_ERROR(("Firmware version = %s\n", buf));
+
+ /* Set PowerSave mode */
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode));
+
+ /* Match Host and Dongle rx alignment */
+ bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+ /* disable glom option per default */
+ bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+ /* Setup timeout if Beacons are lost and roam is off to report link down */
+ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+ /* Enable/Disable build-in roaming to allowed ext supplicant to take of romaing */
+ bcm_mkiovar("roam_off", (char *)&dhd_roam, 4, iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded == TRUE) {
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim));
+ }
+#endif
+
+ if (dhd_roam == 0)
+ {
+ /* set internal roaming roaming parameters */
+ int roam_scan_period = 30; /* in sec */
+ int roam_fullscan_period = 120; /* in sec */
+ int roam_trigger = -85;
+ int roam_delta = 15;
+ int band;
+ int band_temp_set = WLC_BAND_2G;
+
+ if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_SCAN_PERIOD, \
+ (char *)&roam_scan_period, sizeof(roam_scan_period)) < 0)
+ DHD_ERROR(("%s: roam scan setup failed\n", __FUNCTION__));
+
+ bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, \
+ 4, iovbuf, sizeof(iovbuf));
+ if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, \
+ iovbuf, sizeof(iovbuf)) < 0)
+ DHD_ERROR(("%s: roam fullscan setup failed\n", __FUNCTION__));
+
+ if (dhdcdc_query_ioctl(dhd, 0, WLC_GET_BAND, \
+ (char *)&band, sizeof(band)) < 0)
+ DHD_ERROR(("%s: roam delta setting failed\n", __FUNCTION__));
+ else {
+ if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_ALL))
+ {
+ /* temp set band to insert new roams values */
+ if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_BAND, \
+ (char *)&band_temp_set, sizeof(band_temp_set)) < 0)
+ DHD_ERROR(("%s: local band seting failed\n", __FUNCTION__));
+ }
+ if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_DELTA, \
+ (char *)&roam_delta, sizeof(roam_delta)) < 0)
+ DHD_ERROR(("%s: roam delta setting failed\n", __FUNCTION__));
+
+ if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_TRIGGER, \
+ (char *)&roam_trigger, sizeof(roam_trigger)) < 0)
+ DHD_ERROR(("%s: roam trigger setting failed\n", __FUNCTION__));
+
+ /* Restore original band settinngs */
+ if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_BAND, \
+ (char *)&band, sizeof(band)) < 0)
+ DHD_ERROR(("%s: Original band restore failed\n", __FUNCTION__));
+ }
+ }
+
+ /* Force STA UP */
+ if (dhd_radio_up)
+ dhdcdc_set_ioctl(dhd, 0, WLC_UP, (char *)&up, sizeof(up));
+
+ /* Setup event_msgs */
+ bcm_mkiovar("event_msgs", dhd->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+ sizeof(scan_assoc_time));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+ sizeof(scan_unassoc_time));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* Set and enable ARP offload feature */
+ if (dhd_arp_enable)
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ dhd_arp_offload_enable(dhd, dhd_arp_enable);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+ {
+ int i;
+ /* Set up pkt filter */
+ if (dhd_pkt_filter_enable) {
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ dhd_pkt_filter_init, dhd_master_mode);
+ }
+ }
+ }
+#endif /* PKT_FILTER_SUPPORT */
+
+#if defined(KEEP_ALIVE)
+ {
+ /* Set Keep Alive : be sure to use FW with -keepalive */
+ int res;
+
+ if (ap_fw_loaded == FALSE) {
+ if ((res = dhd_keep_alive_onoff(dhd, 1)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n", \
+ __FUNCTION__, res));
+ }
+ }
+#endif
+
+ dhd_os_proto_unblock(dhd);
+
+ return 0;
+}
+
+#ifdef SIMPLE_ISCAN
+
+uint iscan_thread_id;
+iscan_buf_t * iscan_chain = 0;
+
+iscan_buf_t *
+dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
+{
+ iscan_buf_t *iscanbuf_alloc = 0;
+ iscan_buf_t *iscanbuf_head;
+
+ dhd_iscan_lock();
+
+ iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
+ if (iscanbuf_alloc == NULL)
+ goto fail;
+
+ iscanbuf_alloc->next = NULL;
+ iscanbuf_head = *iscanbuf;
+
+ DHD_ISCAN(("%s: addr of allocated node = 0x%X"
+ "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
+ __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
+
+ if (iscanbuf_head == NULL) {
+ *iscanbuf = iscanbuf_alloc;
+ DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
+ goto fail;
+ }
+
+ while (iscanbuf_head->next)
+ iscanbuf_head = iscanbuf_head->next;
+
+ iscanbuf_head->next = iscanbuf_alloc;
+
+fail:
+ dhd_iscan_unlock();
+ return iscanbuf_alloc;
+}
+
+void
+dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
+{
+ iscan_buf_t *iscanbuf_free = 0;
+ iscan_buf_t *iscanbuf_prv = 0;
+ iscan_buf_t *iscanbuf_cur = iscan_chain;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+
+ dhd_iscan_lock();
+ /* If iscan_delete is null then delete the entire
+ * chain or else delete specific one provided
+ */
+ if (!iscan_delete) {
+ while (iscanbuf_cur) {
+ iscanbuf_free = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ iscanbuf_free->next = 0;
+ MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
+ }
+ iscan_chain = 0;
+ } else {
+ while (iscanbuf_cur) {
+ if (iscanbuf_cur == iscan_delete)
+ break;
+ iscanbuf_prv = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ }
+ if (iscanbuf_prv)
+ iscanbuf_prv->next = iscan_delete->next;
+
+ iscan_delete->next = 0;
+ MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
+
+ if (!iscanbuf_prv)
+ iscan_chain = 0;
+ }
+ dhd_iscan_unlock();
+}
+
+iscan_buf_t *
+dhd_iscan_result_buf(void)
+{
+ return iscan_chain;
+}
+
+
+
+/*
+* print scan cache
+* print partial iscan_skip list differently
+*/
+int
+dhd_iscan_print_cache(iscan_buf_t *iscan_skip)
+{
+ int i = 0, l = 0;
+ iscan_buf_t *iscan_cur;
+ wl_iscan_results_t *list;
+ wl_scan_results_t *results;
+ wl_bss_info_t UNALIGNED *bi;
+
+ dhd_iscan_lock();
+
+ iscan_cur = dhd_iscan_result_buf();
+
+ while (iscan_cur) {
+ list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
+ if (!list)
+ break;
+
+ results = (wl_scan_results_t *)&list->results;
+ if (!results)
+ break;
+
+ if (results->version != WL_BSS_INFO_VERSION) {
+ DHD_ISCAN(("%s: results->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, results->version));
+ goto done;
+ }
+
+ bi = results->bss_info;
+ for (i = 0; i < results->count; i++) {
+ if (!bi)
+ break;
+
+ DHD_ISCAN(("%s[%2.2d:%2.2d] %X:%X:%X:%X:%X:%X\n",
+ iscan_cur != iscan_skip?"BSS":"bss", l, i,
+ bi->BSSID.octet[0], bi->BSSID.octet[1], bi->BSSID.octet[2],
+ bi->BSSID.octet[3], bi->BSSID.octet[4], bi->BSSID.octet[5]));
+
+ bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length));
+ }
+ iscan_cur = iscan_cur->next;
+ l++;
+ }
+
+done:
+ dhd_iscan_unlock();
+ return 0;
+}
+
+/*
+* delete disappeared AP from specific scan cache but skip partial list in iscan_skip
+*/
+int
+dhd_iscan_delete_bss(void *dhdp, void *addr, iscan_buf_t *iscan_skip)
+{
+ int i = 0, j = 0, l = 0;
+ iscan_buf_t *iscan_cur;
+ wl_iscan_results_t *list;
+ wl_scan_results_t *results;
+ wl_bss_info_t UNALIGNED *bi, *bi_new, *bi_next;
+
+ uchar *s_addr = addr;
+
+ dhd_iscan_lock();
+ DHD_ISCAN(("%s: BSS to remove %X:%X:%X:%X:%X:%X\n",
+ __FUNCTION__, s_addr[0], s_addr[1], s_addr[2],
+ s_addr[3], s_addr[4], s_addr[5]));
+
+ iscan_cur = dhd_iscan_result_buf();
+
+ while (iscan_cur) {
+ if (iscan_cur != iscan_skip) {
+ list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
+ if (!list)
+ break;
+
+ results = (wl_scan_results_t *)&list->results;
+ if (!results)
+ break;
+
+ if (results->version != WL_BSS_INFO_VERSION) {
+ DHD_ERROR(("%s: results->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, results->version));
+ goto done;
+ }
+
+ bi = results->bss_info;
+ for (i = 0; i < results->count; i++) {
+ if (!bi)
+ break;
+
+ if (!memcmp(bi->BSSID.octet, addr, ETHER_ADDR_LEN)) {
+ DHD_ISCAN(("%s: Del BSS[%2.2d:%2.2d] %X:%X:%X:%X:%X:%X\n",
+ __FUNCTION__, l, i, bi->BSSID.octet[0],
+ bi->BSSID.octet[1], bi->BSSID.octet[2],
+ bi->BSSID.octet[3], bi->BSSID.octet[4],
+ bi->BSSID.octet[5]));
+
+ bi_new = bi;
+ bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length));
+/*
+ if(bi && bi_new) {
+ bcopy(bi, bi_new, results->buflen -
+ dtoh32(bi_new->length));
+ results->buflen -= dtoh32(bi_new->length);
+ }
+*/
+ results->buflen -= dtoh32(bi_new->length);
+ results->count--;
+
+ for (j = i; j < results->count; j++) {
+ if (bi && bi_new) {
+ DHD_ISCAN(("%s: Moved up BSS[%2.2d:%2.2d]"
+ "%X:%X:%X:%X:%X:%X\n",
+ __FUNCTION__, l, j, bi->BSSID.octet[0],
+ bi->BSSID.octet[1], bi->BSSID.octet[2],
+ bi->BSSID.octet[3], bi->BSSID.octet[4],
+ bi->BSSID.octet[5]));
+
+ bi_next = (wl_bss_info_t *)((uintptr)bi +
+ dtoh32(bi->length));
+ bcopy(bi, bi_new, dtoh32(bi->length));
+ bi_new = (wl_bss_info_t *)((uintptr)bi_new +
+ dtoh32(bi_new->length));
+ bi = bi_next;
+ }
+ }
+
+ if (results->count == 0) {
+ /* Prune now empty partial scan list */
+ dhd_iscan_free_buf(dhdp, iscan_cur);
+ goto done;
+ }
+ break;
+ }
+ bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length));
+ }
+ }
+ iscan_cur = iscan_cur->next;
+ l++;
+ }
+
+done:
+ dhd_iscan_unlock();
+ return 0;
+}
+
+int
+dhd_iscan_remove_duplicates(void * dhdp, iscan_buf_t *iscan_cur)
+{
+ int i = 0;
+ wl_iscan_results_t *list;
+ wl_scan_results_t *results;
+ wl_bss_info_t UNALIGNED *bi, *bi_new, *bi_next;
+
+ dhd_iscan_lock();
+
+ DHD_ISCAN(("%s: Scan cache before delete\n",
+ __FUNCTION__));
+ dhd_iscan_print_cache(iscan_cur);
+
+ if (!iscan_cur)
+ goto done;
+
+ list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
+ if (!list)
+ goto done;
+
+ results = (wl_scan_results_t *)&list->results;
+ if (!results)
+ goto done;
+
+ if (results->version != WL_BSS_INFO_VERSION) {
+ DHD_ERROR(("%s: results->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, results->version));
+ goto done;
+ }
+
+ bi = results->bss_info;
+ for (i = 0; i < results->count; i++) {
+ if (!bi)
+ break;
+
+ DHD_ISCAN(("%s: Find dups for BSS[%2.2d] %X:%X:%X:%X:%X:%X\n",
+ __FUNCTION__, i, bi->BSSID.octet[0], bi->BSSID.octet[1], bi->BSSID.octet[2],
+ bi->BSSID.octet[3], bi->BSSID.octet[4], bi->BSSID.octet[5]));
+
+ dhd_iscan_delete_bss(dhdp, bi->BSSID.octet, iscan_cur);
+
+ bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length));
+ }
+
+done:
+ DHD_ISCAN(("%s: Scan cache after delete\n", __FUNCTION__));
+ dhd_iscan_print_cache(iscan_cur);
+ dhd_iscan_unlock();
+ return 0;
+}
+
+void
+dhd_iscan_ind_scan_confirm(void *dhdp, bool status)
+{
+
+ dhd_ind_scan_confirm(dhdp, status);
+}
+
+int
+dhd_iscan_request(void * dhdp, uint16 action)
+{
+ int rc;
+ wl_iscan_params_t params;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ char buf[WLC_IOCTL_SMLEN];
+
+
+ memset(&params, 0, sizeof(wl_iscan_params_t));
+ memcpy(&params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+
+ params.params.bss_type = DOT11_BSSTYPE_ANY;
+ params.params.scan_type = DOT11_SCANTYPE_ACTIVE;
+
+ params.params.nprobes = htod32(-1);
+ params.params.active_time = htod32(-1);
+ params.params.passive_time = htod32(-1);
+ params.params.home_time = htod32(-1);
+ params.params.channel_num = htod32(0);
+
+ params.version = htod32(ISCAN_REQ_VERSION);
+ params.action = htod16(action);
+ params.scan_duration = htod16(0);
+
+ bcm_mkiovar("iscan", (char *)&params, sizeof(wl_iscan_params_t), buf, WLC_IOCTL_SMLEN);
+ rc = dhd_wl_ioctl(dhdp, WLC_SET_VAR, buf, WLC_IOCTL_SMLEN);
+
+ return rc;
+}
+
+static int
+dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
+{
+ wl_iscan_results_t *list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ iscan_buf_t *iscan_cur;
+ int status = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ int rc;
+
+
+ iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
+ if (!iscan_cur) {
+ DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
+ dhd_iscan_free_buf(dhdp, 0);
+ dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
+ goto fail;
+ }
+
+ dhd_iscan_lock();
+
+ memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
+ iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+ rc = dhd_wl_ioctl(dhdp, WLC_GET_VAR, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ *scan_count = results->count = dtoh32(results->count);
+ status = dtoh32(list_buf->status);
+
+ dhd_iscan_unlock();
+
+ if (!(*scan_count))
+ dhd_iscan_free_buf(dhdp, iscan_cur);
+ else
+ dhd_iscan_remove_duplicates(dhdp, iscan_cur);
+
+
+fail:
+ return status;
+}
+
+#endif
+
+/* Function to estimate possible DTIM_SKIP value */
+int dhd_get_dtim_skip(dhd_pub_t *dhd)
+{
+ int bcn_li_dtim;
+ char buf[128];
+ int ret;
+ int dtim_assoc = 0;
+
+ if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1))
+ bcn_li_dtim = 3;
+ else
+ bcn_li_dtim = dhd->dtim_skip;
+
+ /* Read DTIM value if associated */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("dtim_assoc", 0, 0, buf, sizeof(buf));
+ if ((ret = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf))) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ bcn_li_dtim = 1;
+ goto exit;
+ }
+ else
+ dtim_assoc = dtoh32(*(int *)buf);
+
+ DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n", \
+ __FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL));
+
+ /* if not assocated just eixt */
+ if (dtim_assoc == 0) {
+ goto exit;
+ }
+
+ /* check if sta listen interval fits into AP dtim */
+ if (dtim_assoc > LISTEN_INTERVAL) {
+ /* AP DTIM to big for our Listen Interval : no dtim skiping */
+ bcn_li_dtim = 1;
+ DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", \
+ __FUNCTION__, dtim_assoc, LISTEN_INTERVAL));
+ goto exit;
+ }
+
+ if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) {
+ /* Round up dtim_skip to fit into STAs Listen Interval */
+ bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc);
+ DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+ }
+
+exit:
+ return bcn_li_dtim;
+}
+
+#ifdef PNO_SUPPORT
+int dhd_pno_clean(dhd_pub_t *dhd)
+{
+ char iovbuf[128];
+ int pfn_enabled = 0;
+ int iov_len = 0;
+ int ret;
+
+ /* Disable pfn */
+ iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) >= 0) {
+ /* clear pfn */
+ iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
+ if (iov_len) {
+ if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ }
+ }
+ else {
+ ret = -1;
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len));
+ }
+ }
+ else
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
+{
+ char iovbuf[128];
+ uint8 bssid[6];
+ int ret = -1;
+
+ if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ return ret;
+ }
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+
+ /* Check if disassoc to enable pno */
+ if ((pfn_enabled) && \
+ ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_GET_BSSID, \
+ (char *)&bssid, ETHER_ADDR_LEN)) == BCME_NOTASSOCIATED)) {
+ DHD_TRACE(("%s pno enable called in disassoc mode\n", __FUNCTION__));
+ }
+ else if (pfn_enabled) {
+ DHD_ERROR(("%s pno enable called in assoc mode ret=%d\n", \
+ __FUNCTION__, ret));
+ return ret;
+ }
+
+ /* Enable/disable PNO */
+ if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) {
+ if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) {
+ DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ else {
+ dhd->pno_enable = pfn_enabled;
+ DHD_TRACE(("%s set pno as %d\n", __FUNCTION__, dhd->pno_enable));
+ }
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+/* Function to execute combined scan */
+int
+dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr, \
+ int pno_repeat, int pno_freq_expo_max)
+{
+ int err = -1;
+ char iovbuf[128];
+ int k, i;
+ wl_pfn_param_t pfn_param;
+ wl_pfn_t pfn_element;
+
+ DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr));
+
+ if ((!dhd) && (!ssids_local)) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ err = -1;
+ }
+
+ /* Check for broadcast ssid */
+ for (k = 0; k < nssid; k++) {
+ if (!ssids_local[k].SSID_len) {
+ DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k));
+ return err;
+ }
+ }
+/* #define PNO_DUMP 1 */
+#ifdef PNO_DUMP
+ {
+ int j;
+ for (j = 0; j < nssid; j++) {
+ DHD_ERROR(("%d: scan for %s size =%d\n", j,
+ ssids_local[j].SSID, ssids_local[j].SSID_len));
+ }
+ }
+#endif /* PNO_DUMP */
+
+ /* clean up everything */
+ if ((err = dhd_pno_clean(dhd)) < 0) {
+ DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err));
+ return err;
+ }
+ memset(&pfn_param, 0, sizeof(pfn_param));
+ memset(&pfn_element, 0, sizeof(pfn_element));
+
+ /* set pfn parameters */
+ pfn_param.version = htod32(PFN_VERSION);
+ pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT));
+
+ /* check and set extra pno params */
+ if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) {
+ pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+ pfn_param.repeat_scan = htod32(pno_repeat);
+ pfn_param.max_freq_adjust = htod32(pno_freq_expo_max);
+ }
+
+ /* set up pno scan fr */
+ if (scan_fr != 0)
+ pfn_param.scan_freq = htod32(scan_fr);
+
+ if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) {
+ DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC));
+ return err;
+ }
+ if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) {
+ DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+ return err;
+ }
+
+ bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+ /* set all pfn ssid */
+ for (i = 0; i < nssid; i++) {
+
+ pfn_element.bss_type = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+ pfn_element.auth = (DOT11_OPEN_SYSTEM);
+ pfn_element.infra = htod32(1);
+
+ memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len);
+ pfn_element.ssid.SSID_len = ssids_local[i].SSID_len;
+
+ if ((err =
+ bcm_mkiovar("pfn_add", (char *)&pfn_element,
+ sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) {
+ if ((err =
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) {
+ DHD_ERROR(("%s failed for i=%d error=%d\n",
+ __FUNCTION__, i, err));
+ return err;
+ }
+ else
+ DHD_ERROR(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n", \
+ __FUNCTION__, pfn_param.scan_freq, \
+ pfn_param.repeat_scan, pfn_param.max_freq_adjust));
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err));
+ }
+
+ /* Enable PNO */
+ /* dhd_pno_enable(dhd, 1); */
+ return err;
+}
+
+int dhd_pno_get_status(dhd_pub_t *dhd)
+{
+ int ret = -1;
+
+ if (!dhd)
+ return ret;
+ else
+ return (dhd->pno_enable);
+}
+
+#endif /* PNO_SUPPORT */
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on)
+{
+ char buf[256];
+ char *buf_ptr = buf;
+ wl_keep_alive_pkt_t keep_alive_pkt;
+ char * str;
+ int str_len, buf_len;
+ int res = 0;
+ int keep_alive_period = KEEP_ALIVE_PERIOD; /* in ms */
+
+ DHD_TRACE(("%s: ka:%d\n", __FUNCTION__, ka_on));
+
+ if (ka_on) { /* on suspend */
+ keep_alive_pkt.period_msec = keep_alive_period;
+
+ } else {
+ /* on resume, turn off keep_alive packets */
+ keep_alive_pkt.period_msec = 0;
+ }
+
+ /* IOC var name */
+ str = "keep_alive";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[str_len] = '\0';
+ buf_len = str_len + 1;
+
+ /* set ptr to IOCTL payload after the var name */
+ buf_ptr += buf_len; /* include term Z */
+
+ /* copy Keep-alive attributes from local var keep_alive_pkt */
+ str = NULL_PKT_STR;
+ keep_alive_pkt.len_bytes = strlen(str);
+
+ memcpy(buf_ptr, &keep_alive_pkt, WL_KEEP_ALIVE_FIXED_LEN);
+ buf_ptr += WL_KEEP_ALIVE_FIXED_LEN;
+
+ /* copy packet data */
+ memcpy(buf_ptr, str, keep_alive_pkt.len_bytes);
+ buf_len += (WL_KEEP_ALIVE_FIXED_LEN + keep_alive_pkt.len_bytes);
+
+ res = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+ return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+
+#if defined(CSCAN)
+
+/* Androd ComboSCAN support */
+/*
+ * data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+ int input_size, int *bytes_left)
+{
+ char* str = *list_str;
+ uint16 short_temp;
+ uint32 int_temp;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ /* Clean all dest bytes */
+ memset(dst, 0, dst_size);
+ while (*bytes_left > 0) {
+
+ if (str[0] != token) {
+ DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+ __FUNCTION__, token, str[0], *bytes_left));
+ return -1;
+ }
+
+ *bytes_left -= 1;
+ str += 1;
+
+ if (input_size == 1) {
+ memcpy(dst, str, input_size);
+ }
+ else if (input_size == 2) {
+ memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+ input_size);
+ }
+ else if (input_size == 4) {
+ memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+ input_size);
+ }
+
+ *bytes_left -= input_size;
+ str += input_size;
+ *list_str = str;
+ return 1;
+ }
+ return 1;
+}
+
+/*
+ * channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+ int channel_num, int *bytes_left)
+{
+ char* str = *list_str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+ *list_str = str;
+ DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+ /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* All channels */
+ channel_list[idx] = 0x0;
+ }
+ else {
+ channel_list[idx] = (uint16)str[0];
+ DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
+ }
+ *bytes_left -= 1;
+ str += 1;
+
+ if (idx++ > 255) {
+ DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/*
+ * SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+{
+ char* str = *list_str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+ *list_str = str;
+ DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+
+ /* Get proper CSCAN_TLV_TYPE_SSID_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* Broadcast SSID */
+ ssid[idx].SSID_len = 0;
+ memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+ *bytes_left -= 1;
+ str += 1;
+
+ DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
+ }
+ else if (str[0] <= DOT11_MAX_SSID_LEN) {
+ /* Get proper SSID size */
+ ssid[idx].SSID_len = str[0];
+ *bytes_left -= 1;
+ str += 1;
+
+ /* Get SSID */
+ if (ssid[idx].SSID_len > *bytes_left) {
+ DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+ __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+ return -1;
+ }
+
+ memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+ *bytes_left -= ssid[idx].SSID_len;
+ str += ssid[idx].SSID_len;
+
+ DHD_TRACE(("%s :size=%d left=%d\n",
+ (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+ }
+ else {
+ DHD_ERROR(("### SSID size more that %d\n", str[0]));
+ return -1;
+ }
+
+ if (idx++ > max) {
+ DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx. Max specifies size of the ssid array. Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied. Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+ char* str, *ptr;
+
+ if ((list_str == NULL) || (*list_str == NULL))
+ return -1;
+
+ for (str = *list_str; str != NULL; str = ptr) {
+
+ /* check for next TAG */
+ if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+ *list_str = str + strlen(GET_CHANNEL);
+ return idx;
+ }
+
+ if ((ptr = strchr(str, ',')) != NULL) {
+ *ptr++ = '\0';
+ }
+
+ if (strlen(str) > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+ return -1;
+ }
+
+ if (strlen(str) == 0)
+ ssid[idx].SSID_len = 0;
+
+ if (idx < max) {
+ strcpy((char*)ssid[idx].SSID, str);
+ ssid[idx].SSID_len = strlen(str);
+ }
+ idx++;
+ }
+ return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+ int num;
+ int val;
+ char* str;
+ char* endptr = NULL;
+
+ if ((list_str == NULL)||(*list_str == NULL))
+ return -1;
+
+ str = *list_str;
+ num = 0;
+ while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+ val = (int)strtoul(str, &endptr, 0);
+ if (endptr == str) {
+ printf("could not parse channel number starting at"
+ " substring \"%s\" in list:\n%s\n",
+ str, *list_str);
+ return -1;
+ }
+ str = endptr + strspn(endptr, " ,");
+
+ if (num == channel_num) {
+ DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+ channel_num, *list_str));
+ return -1;
+ }
+
+ channel_list[num++] = (uint16)val;
+ }
+ *list_str = str;
+ return num;
+}
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/dhd_custom_gpio.c b/drivers/net/wireless/bcm4329/dhd_custom_gpio.c
new file mode 100644
index 000000000000..4d32863e2982
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_custom_gpio.c
@@ -0,0 +1,272 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* Copyright (C) 1999-2010, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+*
+* $Id: dhd_custom_gpio.c,v 1.1.4.8.4.4 2011/01/20 20:23:09 Exp $
+*/
+
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#ifdef CUSTOMER_HW
+extern void bcm_wlan_power_off(int);
+extern void bcm_wlan_power_on(int);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+int wifi_set_carddetect(int on);
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#endif
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC) */
+
+#ifdef CUSTOMER_HW3
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion */
+static int dhd_oob_gpio_num = -1; /* GG 19 */
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
+{
+ int host_oob_irq = 0;
+
+#ifdef CUSTOMER_HW2
+ host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
+
+#else /* for NOT CUSTOMER_HW2 */
+#if defined(CUSTOM_OOB_GPIO_NUM)
+ if (dhd_oob_gpio_num < 0) {
+ dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+ }
+#endif
+
+ if (dhd_oob_gpio_num < 0) {
+ WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+ __FUNCTION__));
+ return (dhd_oob_gpio_num);
+ }
+
+ WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+ __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW
+ host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
+#elif defined CUSTOMER_HW3
+ gpio_request(dhd_oob_gpio_num, "oob irq");
+ host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+ gpio_direction_input(dhd_oob_gpio_num);
+#endif /* CUSTOMER_HW */
+#endif /* CUSTOMER_HW2 */
+
+ return (host_oob_irq);
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Customer function to control hw specific wlan gpios */
+void
+dhd_customer_gpio_wlan_ctrl(int onoff)
+{
+ switch (onoff) {
+ case WLAN_RESET_OFF:
+ WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_off(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+ wifi_set_power(0, 0);
+#endif
+ WL_ERROR(("=========== WLAN placed in RESET ========\n"));
+ break;
+
+ case WLAN_RESET_ON:
+ WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_on(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+ wifi_set_power(1, 0);
+#endif
+ WL_ERROR(("=========== WLAN going back to live ========\n"));
+ break;
+
+ case WLAN_POWER_OFF:
+ WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_off(1);
+#endif /* CUSTOMER_HW */
+ break;
+
+ case WLAN_POWER_ON:
+ WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_on(1);
+ /* Lets customer power to get stable */
+ OSL_DELAY(50);
+#endif /* CUSTOMER_HW */
+ break;
+ }
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(unsigned char *buf)
+{
+ int ret = 0;
+
+ WL_TRACE(("%s Enter\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+
+ /* Customer access to MAC address stored outside of DHD driver */
+#ifdef CUSTOMER_HW2
+ ret = wifi_get_mac_addr(buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+ /* EXAMPLE code */
+ {
+ struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+ bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+ }
+#endif /* EXAMPLE_GET_MAC */
+
+ return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+ {"", "XY", 4}, /* universal */
+ {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+ {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+ {"EU", "EU", 5}, /* European union countries */
+ {"AT", "EU", 5},
+ {"BE", "EU", 5},
+ {"BG", "EU", 5},
+ {"CY", "EU", 5},
+ {"CZ", "EU", 5},
+ {"DK", "EU", 5},
+ {"EE", "EU", 5},
+ {"FI", "EU", 5},
+ {"FR", "EU", 5},
+ {"DE", "EU", 5},
+ {"GR", "EU", 5},
+ {"HU", "EU", 5},
+ {"IE", "EU", 5},
+ {"IT", "EU", 5},
+ {"LV", "EU", 5},
+ {"LI", "EU", 5},
+ {"LT", "EU", 5},
+ {"LU", "EU", 5},
+ {"MT", "EU", 5},
+ {"NL", "EU", 5},
+ {"PL", "EU", 5},
+ {"PT", "EU", 5},
+ {"RO", "EU", 5},
+ {"SK", "EU", 5},
+ {"SI", "EU", 5},
+ {"ES", "EU", 5},
+ {"SE", "EU", 5},
+ {"GB", "EU", 5}, /* input ISO "GB" to : EU regrev 05 */
+ {"IL", "IL", 0},
+ {"CH", "CH", 0},
+ {"TR", "TR", 0},
+ {"NO", "NO", 0},
+ {"KR", "XY", 3},
+ {"AU", "XY", 3},
+ {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+ {"TW", "XY", 3},
+ {"AR", "XY", 3},
+ {"MX", "XY", 3}
+#endif /* EXAMPLE_TABLE */
+};
+
+
+/* Customized Locale convertor
+* input : ISO 3166-1 country abbreviation
+* output: customized cspec
+*/
+void get_customized_country_code(char *country_iso_code, wl_country_t *cspec)
+{
+#ifdef CUSTOMER_HW2
+ struct cntry_locales_custom *cloc_ptr;
+
+ if (!cspec)
+ return;
+
+ cloc_ptr = wifi_get_country_code(country_iso_code);
+ if (cloc_ptr) {
+ strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = cloc_ptr->custom_locale_rev;
+ }
+ return;
+#else
+ int size, i;
+
+ size = ARRAYSIZE(translate_custom_table);
+
+ if (cspec == 0)
+ return;
+
+ if (size == 0)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+ memcpy(cspec->ccode, translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[i].custom_locale_rev;
+ return;
+ }
+ }
+ memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[0].custom_locale_rev;
+ return;
+#endif
+}
diff --git a/drivers/net/wireless/bcm4329/dhd_dbg.h b/drivers/net/wireless/bcm4329/dhd_dbg.h
new file mode 100644
index 000000000000..b48c1d70f144
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_dbg.h
@@ -0,0 +1,100 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_dbg.h,v 1.5.6.2.4.2.14.10 2010/05/21 21:49:38 Exp $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#ifdef DHD_DEBUG
+
+#define DHD_ERROR(args) do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \
+ printf args;} while (0)
+#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+
+#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL)
+
+#else /* DHD_DEBUG */
+
+#define DHD_ERROR(args) do {if (net_ratelimit()) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+
+#define DHD_ERROR_ON() 0
+#define DHD_TRACE_ON() 0
+#define DHD_INFO_ON() 0
+#define DHD_DATA_ON() 0
+#define DHD_CTL_ON() 0
+#define DHD_TIMER_ON() 0
+#define DHD_HDRS_ON() 0
+#define DHD_BYTES_ON() 0
+#define DHD_INTR_ON() 0
+#define DHD_GLOM_ON() 0
+#define DHD_EVENT_ON() 0
+#define DHD_BTA_ON() 0
+#define DHD_ISCAN_ON() 0
+#endif /* DHD_DEBUG */
+
+#define DHD_LOG(args)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcm4329/dhd_linux.c b/drivers/net/wireless/bcm4329/dhd_linux.c
new file mode 100644
index 000000000000..26bbb74ba7b7
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_linux.c
@@ -0,0 +1,3450 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.c,v 1.65.4.9.2.12.2.104.4.40 2011/02/03 19:55:18 Exp $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/inetdevice.h>
+#include <linux/mutex.h>
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <wl_iw.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef CUSTOMER_HW2
+#include <linux/platform_device.h>
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+#include <linux/wlan_plat.h>
+static struct wifi_platform_data *wifi_control_data = NULL;
+#endif
+struct semaphore wifi_control_sem;
+
+static struct resource *wifi_irqres = NULL;
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr)
+{
+ if (wifi_irqres) {
+ *irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
+ return (int)wifi_irqres->start;
+ }
+#ifdef CUSTOM_OOB_GPIO_NUM
+ return CUSTOM_OOB_GPIO_NUM;
+#else
+ return -1;
+#endif
+}
+
+int wifi_set_carddetect(int on)
+{
+ printk("%s = %d\n", __FUNCTION__, on);
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ if (wifi_control_data && wifi_control_data->set_carddetect) {
+ wifi_control_data->set_carddetect(on);
+ }
+#endif
+ return 0;
+}
+
+int wifi_set_power(int on, unsigned long msec)
+{
+ printk("%s = %d\n", __FUNCTION__, on);
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ if (wifi_control_data && wifi_control_data->set_power) {
+ wifi_control_data->set_power(on);
+ }
+#endif
+ if (msec)
+ mdelay(msec);
+ return 0;
+}
+
+int wifi_set_reset(int on, unsigned long msec)
+{
+ DHD_TRACE(("%s = %d\n", __FUNCTION__, on));
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ if (wifi_control_data && wifi_control_data->set_reset) {
+ wifi_control_data->set_reset(on);
+ }
+#endif
+ if (msec)
+ mdelay(msec);
+ return 0;
+}
+
+int wifi_get_mac_addr(unsigned char *buf)
+{
+ DHD_TRACE(("%s\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ if (wifi_control_data && wifi_control_data->get_mac_addr) {
+ return wifi_control_data->get_mac_addr(buf);
+ }
+#endif
+ return -EOPNOTSUPP;
+}
+
+void *wifi_get_country_code(char *ccode)
+{
+ DHD_TRACE(("%s\n", __FUNCTION__));
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ if (!ccode)
+ return NULL;
+ if (wifi_control_data && wifi_control_data->get_country_code) {
+ return wifi_control_data->get_country_code(ccode);
+ }
+#endif
+ return NULL;
+}
+
+static int wifi_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ struct wifi_platform_data *wifi_ctrl =
+ (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ wifi_control_data = wifi_ctrl;
+#endif
+
+ DHD_TRACE(("## %s\n", __FUNCTION__));
+ wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq");
+
+ wifi_set_power(1, 0); /* Power On */
+ wifi_set_carddetect(1); /* CardDetect (0->1) */
+
+ up(&wifi_control_sem);
+ return 0;
+}
+
+static int wifi_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ struct wifi_platform_data *wifi_ctrl =
+ (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ wifi_control_data = wifi_ctrl;
+#endif
+ DHD_TRACE(("## %s\n", __FUNCTION__));
+ wifi_set_power(0, 0); /* Power Off */
+ wifi_set_carddetect(0); /* CardDetect (1->0) */
+
+ up(&wifi_control_sem);
+ return 0;
+}
+
+static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+static int wifi_resume(struct platform_device *pdev)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+static struct platform_driver wifi_device = {
+ .probe = wifi_probe,
+ .remove = wifi_remove,
+ .suspend = wifi_suspend,
+ .resume = wifi_resume,
+ .driver = {
+ .name = "bcm4329_wlan",
+ }
+};
+
+int wifi_add_dev(void)
+{
+ DHD_TRACE(("## Calling platform_driver_register\n"));
+ return platform_driver_register(&wifi_device);
+}
+
+void wifi_del_dev(void)
+{
+ DHD_TRACE(("## Unregister platform_driver_register\n"));
+ platform_driver_unregister(&wifi_device);
+}
+#endif /* defined(CUSTOMER_HW2) */
+
+static int dhd_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr);
+
+static struct notifier_block dhd_notifier = {
+ .notifier_call = dhd_device_event
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+ return "";
+}
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(CONFIG_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+/* Interface control information */
+typedef struct dhd_if {
+ struct dhd_info *info; /* back pointer to dhd_info */
+ /* OS/stack specifics */
+ struct net_device *net;
+ struct net_device_stats stats;
+ int idx; /* iface idx in dongle */
+ int state; /* interface state */
+ uint subunit; /* subunit */
+ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool attached; /* Delayed attachment when unset */
+ bool txflowcontrol; /* Per interface flow control indicator */
+ char name[IFNAMSIZ+1]; /* linux interface name */
+} dhd_if_t;
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(CONFIG_WIRELESS_EXT)
+ wl_iw_t iw; /* wireless extensions state (must be first) */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ dhd_pub_t pub;
+
+ /* OS/stack specifics */
+ dhd_if_t *iflist[DHD_MAX_IFS];
+
+ struct mutex proto_sem;
+ wait_queue_head_t ioctl_resp_wait;
+ struct timer_list timer;
+ bool wd_timer_valid;
+ struct tasklet_struct tasklet;
+ spinlock_t sdlock;
+ spinlock_t txqlock;
+ spinlock_t dhd_lock;
+
+ /* Thread based operation */
+ bool threads_only;
+ struct mutex sdsem;
+ long watchdog_pid;
+ struct semaphore watchdog_sem;
+ struct completion watchdog_exited;
+ long dpc_pid;
+ struct semaphore dpc_sem;
+ struct completion dpc_exited;
+
+ /* Wakelocks */
+#ifdef CONFIG_HAS_WAKELOCK
+ struct wake_lock wl_wifi; /* Wifi wakelock */
+ struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+#endif
+ spinlock_t wl_lock;
+ int wl_count;
+ int wl_packet;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ struct mutex wl_start_lock; /* mutex when START called to prevent any other Linux calls */
+#endif
+ /* Thread to issue ioctl for multicast */
+ long sysioc_pid;
+ struct semaphore sysioc_sem;
+ struct completion sysioc_exited;
+ bool set_multicast;
+ bool set_macaddress;
+ struct ether_addr macvalue;
+ wait_queue_head_t ctrl_wait;
+ atomic_t pend_8021x_cnt;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+} dhd_info_t;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+struct semaphore dhd_registration_sem;
+#define DHD_REGISTRATION_TIMEOUT 24000 /* msec : allowed time to finished dhd registration */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+/* Spawn a thread for system ioctls (set mac, set mcast) */
+uint dhd_sysioc = TRUE;
+module_param(dhd_sysioc, uint, 0);
+
+/* Watchdog interval */
+uint dhd_watchdog_ms = 10;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#ifdef DHD_DEBUG
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0);
+#endif /* DHD_DEBUG */
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+uint dhd_arp_mode = 0xb;
+module_param(dhd_arp_mode, uint, 0);
+
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+uint dhd_master_mode = TRUE;
+module_param(dhd_master_mode, uint, 1);
+
+/* Watchdog thread priority, -1 to use kernel timer */
+int dhd_watchdog_prio = 97;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+int dhd_dpc_prio = 98;
+module_param(dhd_dpc_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+extern int dhd_dongle_memsize;
+module_param(dhd_dongle_memsize, int, 0);
+
+/* Control fw roaming */
+#ifdef CUSTOMER_HW2
+uint dhd_roam = 0;
+#else
+uint dhd_roam = 1;
+#endif
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ];
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+ } while (0);
+#endif /* LINUX_VERSION_CODE */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt())
+#endif
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE "drivers/net/wireless/bcm4329"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#else
+#define DHD_COMPILED
+#endif
+
+static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+#ifdef DHD_DEBUG
+"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
+#endif
+;
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+ int ret = NOTIFY_DONE;
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ dhd_mmc_suspend = TRUE;
+ ret = NOTIFY_OK;
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ dhd_mmc_suspend = FALSE;
+ ret = NOTIFY_OK;
+ break;
+ }
+ smp_mb();
+ return ret;
+}
+
+static struct notifier_block dhd_sleep_pm_notifier = {
+ .notifier_call = dhd_sleep_pm_callback,
+ .priority = 0
+};
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+ DHD_TRACE(("%s: %d\n", __FUNCTION__, value));
+ /* 1 - Enable packet filter, only allow unicast packet to send up */
+ /* 0 - Disable packet filter */
+ if (dhd_pkt_filter_enable) {
+ int i;
+
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ value, dhd_master_mode);
+ }
+ }
+#endif
+}
+
+
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+ int power_mode = PM_MAX;
+ /* wl_pkt_filter_enable_t enable_parm; */
+ char iovbuf[32];
+ int bcn_li_dtim = 3;
+#ifdef CUSTOMER_HW2
+ uint roamvar = 1;
+#endif /* CUSTOMER_HW2 */
+
+ DHD_TRACE(("%s: enter, value = %d in_suspend = %d\n",
+ __FUNCTION__, value, dhd->in_suspend));
+
+ if (dhd && dhd->up) {
+ if (value && dhd->in_suspend) {
+
+ /* Kernel suspended */
+ DHD_TRACE(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM,
+ (char *)&power_mode, sizeof(power_mode));
+
+ /* Enable packet filter, only allow unicast packet to send up */
+ dhd_set_packet_filter(1, dhd);
+
+ /* if dtim skip setup as default force it to wake each thrid dtim
+ * for better power saving.
+ * Note that side effect is chance to miss BC/MC packet
+ */
+ bcn_li_dtim = dhd_get_dtim_skip(dhd);
+ bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+ 4, iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+#ifdef CUSTOMER_HW2
+ /* Disable build-in roaming during suspend */
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+#endif /* CUSTOMER_HW2 */
+
+ } else {
+
+ /* Kernel resumed */
+ DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__));
+
+ power_mode = PM_FAST;
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode));
+
+ /* disable pkt filter */
+ dhd_set_packet_filter(0, dhd);
+
+ /* restore pre-suspend setting for dtim_skip */
+ bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip,
+ 4, iovbuf, sizeof(iovbuf));
+
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+#ifdef CUSTOMER_HW2
+ roamvar = dhd_roam;
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+ dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+#endif /* CUSTOMER_HW2 */
+ }
+ }
+
+ return 0;
+}
+
+static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val)
+{
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ dhd_os_wake_lock(dhdp);
+ dhd_os_proto_block(dhdp);
+ /* Set flag when early suspend was called */
+ dhdp->in_suspend = val;
+ if (!dhdp->suspend_disable_flag)
+ dhd_set_suspend(val, dhdp);
+ dhd_os_proto_unblock(dhdp);
+ dhd_os_wake_unlock(dhdp);
+}
+
+static void dhd_early_suspend(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 1);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 0);
+}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+/*
+ * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay. Usage:
+ *
+ * dhd_timeout_start(&tmo, usec);
+ * while (!dhd_timeout_expired(&tmo))
+ * if (poll_something())
+ * break;
+ * if (dhd_timeout_expired(&tmo))
+ * fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+ tmo->limit = usec;
+ tmo->increment = 0;
+ tmo->elapsed = 0;
+ tmo->tick = 1000000 / HZ;
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+ /* Does nothing the first call */
+ if (tmo->increment == 0) {
+ tmo->increment = 1;
+ return 0;
+ }
+
+ if (tmo->elapsed >= tmo->limit)
+ return 1;
+
+ /* Add the delay that's about to take place */
+ tmo->elapsed += tmo->increment;
+
+ if (tmo->increment < tmo->tick) {
+ OSL_DELAY(tmo->increment);
+ tmo->increment *= 2;
+ if (tmo->increment > tmo->tick)
+ tmo->increment = tmo->tick;
+ } else {
+ wait_queue_head_t delay_wait;
+ DECLARE_WAITQUEUE(wait, current);
+ int pending;
+ init_waitqueue_head(&delay_wait);
+ add_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ pending = signal_pending(current);
+ remove_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_RUNNING);
+ if (pending)
+ return 1; /* Interrupted */
+ }
+
+ return 0;
+}
+
+static int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+ int i = 0;
+
+ ASSERT(dhd);
+ while (i < DHD_MAX_IFS) {
+ if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
+ return i;
+ i++;
+ }
+
+ return DHD_BAD_IF;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+ int i = DHD_MAX_IFS;
+
+ ASSERT(dhd);
+
+ if (name == NULL || *name == '\0')
+ return 0;
+
+ while (--i > 0)
+ if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+ break;
+
+ DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+ return i; /* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ ASSERT(dhd);
+
+ if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+ return "<if_bad>";
+ }
+
+ if (dhd->iflist[ifidx] == NULL) {
+ DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+ return "<if_null>";
+ }
+
+ if (dhd->iflist[ifidx]->net)
+ return dhd->iflist[ifidx]->net->name;
+
+ return "<if_none>";
+}
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+ struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *mclist;
+#endif
+ uint32 allmulti, cnt;
+
+ wl_ioctl_t ioc;
+ char *buf, *bufp;
+ uint buflen;
+ int ret;
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+
+ NETIF_ADDR_LOCK(dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ cnt = netdev_mc_count(dev);
+#else
+ cnt = dev->mc_count;
+#endif
+ NETIF_ADDR_UNLOCK(dev);
+
+ /* Determine initial value of allmulti flag */
+ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+ /* Send down the multicast list first. */
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+ if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ return;
+ }
+
+ strcpy(bufp, "mcast_list");
+ bufp += strlen("mcast_list") + 1;
+
+ cnt = htol32(cnt);
+ memcpy(bufp, &cnt, sizeof(cnt));
+ bufp += sizeof(cnt);
+
+ NETIF_ADDR_LOCK(dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ cnt--;
+ }
+#else
+ for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) {
+ memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ }
+#endif
+ NETIF_ADDR_UNLOCK(dev);
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ allmulti = cnt ? TRUE : allmulti;
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Now send the allmulti setting. This is based on the setting in the
+ * net_device flags, but might be modified above to be turned on if we
+ * were trying to set some addresses and dongle rejected it...
+ */
+
+ buflen = sizeof("allmulti") + sizeof(allmulti);
+ if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+ return;
+ }
+ allmulti = htol32(allmulti);
+
+ if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+ DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+ dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+ MFREE(dhd->pub.osh, buf, buflen);
+ return;
+ }
+
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set allmulti %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+ allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+ allmulti = htol32(allmulti);
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_PROMISC;
+ ioc.buf = &allmulti;
+ ioc.len = sizeof(allmulti);
+ ioc.set = TRUE;
+
+ ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set promisc %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+}
+
+static int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
+{
+ char buf[32];
+ wl_ioctl_t ioc;
+ int ret;
+
+ DHD_TRACE(("%s enter\n", __FUNCTION__));
+ if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+ DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+ return -1;
+ }
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = 32;
+ ioc.set = TRUE;
+
+ ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+ } else {
+ memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+ }
+
+ return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+/* semaphore that the soft AP CODE waits on */
+extern struct semaphore ap_eth_sema;
+#endif
+
+static void
+dhd_op_if(dhd_if_t *ifp)
+{
+ dhd_info_t *dhd;
+ int ret = 0, err = 0;
+#ifdef SOFTAP
+ unsigned long flags;
+#endif
+
+ ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */
+
+ dhd = ifp->info;
+
+ DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
+
+ switch (ifp->state) {
+ case WLC_E_IF_ADD:
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the WLC_E_IF_DEL event.
+ */
+ if (ifp->net != NULL) {
+ DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
+ __FUNCTION__, ifp->net->name));
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ }
+ /* Allocate etherdev, including space for private structure */
+ if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ ret = -ENOMEM;
+ }
+ if (ret == 0) {
+ strcpy(ifp->net->name, ifp->name);
+ memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
+ if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
+ DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
+ __FUNCTION__, err));
+ ret = -EOPNOTSUPP;
+ } else {
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ /* save ptr to wl0.1 netdev for use in wl_iw.c */
+ ap_net_dev = ifp->net;
+ /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
+ up(&ap_eth_sema);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif
+ DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
+ current->pid, ifp->net->name));
+ ifp->state = 0;
+ }
+ }
+ break;
+ case WLC_E_IF_DEL:
+ if (ifp->net != NULL) {
+ DHD_TRACE(("\n%s: got 'WLC_E_IF_DEL' state\n", __FUNCTION__));
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */
+ }
+ break;
+ default:
+ DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
+ ASSERT(!ifp->state);
+ break;
+ }
+
+ if (ret < 0) {
+ if (ifp->net) {
+ free_netdev(ifp->net);
+ }
+ dhd->iflist[ifp->idx] = NULL;
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ if (ifp->net == ap_net_dev)
+ ap_net_dev = NULL; /* NULL SOFTAP global as well */
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+ }
+}
+
+static int
+_dhd_sysioc_thread(void *data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+ int i;
+#ifdef SOFTAP
+ bool in_ap = FALSE;
+ unsigned long flags;
+#endif
+
+ DAEMONIZE("dhd_sysioc");
+
+ while (down_interruptible(&dhd->sysioc_sem) == 0) {
+ dhd_os_start_lock(&dhd->pub);
+ dhd_os_wake_lock(&dhd->pub);
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ DHD_TRACE(("%s: interface %d\n",__FUNCTION__, i));
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ in_ap = (ap_net_dev != NULL);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+ if (dhd->iflist[i]->state)
+ dhd_op_if(dhd->iflist[i]);
+#ifdef SOFTAP
+ if (dhd->iflist[i] == NULL) {
+ DHD_TRACE(("%s: interface %d just been removed!\n\n", __FUNCTION__, i));
+ continue;
+ }
+
+ if (in_ap && dhd->set_macaddress) {
+ DHD_TRACE(("attempt to set MAC for %s in AP Mode blocked.\n", dhd->iflist[i]->net->name));
+ dhd->set_macaddress = FALSE;
+ continue;
+ }
+
+ if (in_ap && dhd->set_multicast) {
+ DHD_TRACE(("attempt to set MULTICAST list for %s in AP Mode blocked.\n", dhd->iflist[i]->net->name));
+ dhd->set_multicast = FALSE;
+ continue;
+ }
+#endif /* SOFTAP */
+ if (dhd->set_multicast) {
+ dhd->set_multicast = FALSE;
+ _dhd_set_multicast_list(dhd, i);
+ }
+ if (dhd->set_macaddress) {
+ dhd->set_macaddress = FALSE;
+ _dhd_set_mac_address(dhd, i, &dhd->macvalue);
+ }
+ }
+ }
+ dhd_os_wake_unlock(&dhd->pub);
+ dhd_os_start_unlock(&dhd->pub);
+ }
+ DHD_TRACE(("%s: stopped\n",__FUNCTION__));
+ complete_and_exit(&dhd->sysioc_exited, 0);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+ int ret = 0;
+
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ struct sockaddr *sa = (struct sockaddr *)addr;
+ int ifidx;
+
+ DHD_TRACE(("%s: Enter\n",__FUNCTION__));
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return -1;
+
+ ASSERT(dhd->sysioc_pid >= 0);
+ memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
+ dhd->set_macaddress = TRUE;
+ up(&dhd->sysioc_sem);
+
+ return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ifidx;
+
+ DHD_TRACE(("%s: Enter\n",__FUNCTION__));
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return;
+
+ ASSERT(dhd->sysioc_pid >= 0);
+ dhd->set_multicast = TRUE;
+ up(&dhd->sysioc_sem);
+}
+
+int
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ int ret;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+
+ /* Reject if down */
+ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+ return -ENODEV;
+ }
+
+ /* Update multicast statistic */
+ if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_ADDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ struct ether_header *eh = (struct ether_header *)pktdata;
+
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ dhdp->tx_multicast++;
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+ atomic_inc(&dhd->pend_8021x_cnt);
+ }
+
+ /* Look into the packet and update the packet priority */
+ if ((PKTPRIO(pktbuf) == 0))
+ pktsetprio(pktbuf, FALSE);
+
+ /* If the protocol uses a data header, apply it */
+ dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+ /* Use bus module to send data frame */
+#ifdef BCMDBUS
+ ret = dbus_send_pkt(dhdp->dbus, pktbuf, NULL /* pktinfo */);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMDBUS */
+
+ return ret;
+}
+
+static int
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+ int ret;
+ void *pktbuf;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ int ifidx;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_os_wake_lock(&dhd->pub);
+
+ /* Reject if down */
+ if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) {
+ DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d\n",
+ __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+ netif_stop_queue(net);
+ /* Send Event when bus down detected during data session */
+ if (dhd->pub.busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__));
+ net_os_send_hang_message(net);
+ }
+ dhd_os_wake_unlock(&dhd->pub);
+ return -ENODEV;
+ }
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+ netif_stop_queue(net);
+ dhd_os_wake_unlock(&dhd->pub);
+ return -ENODEV;
+ }
+
+ /* Make sure there's enough room for any header */
+ if (skb_headroom(skb) < dhd->pub.hdrlen) {
+ struct sk_buff *skb2;
+
+ DHD_INFO(("%s: insufficient headroom\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dhd->pub.tx_realloc++;
+ skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen);
+ dev_kfree_skb(skb);
+ if ((skb = skb2) == NULL) {
+ DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+
+ /* Convert to packet */
+ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+ DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+done:
+ if (ret)
+ dhd->pub.dstats.tx_dropped++;
+ else
+ dhd->pub.tx_packets++;
+
+ dhd_os_wake_unlock(&dhd->pub);
+
+ /* Return ok: we always eat the packet */
+ return 0;
+}
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+ struct net_device *net;
+ dhd_info_t *dhd = dhdp->info;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhdp->txoff = state;
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ net = dhd->iflist[ifidx]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+}
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ uchar *eth;
+ uint len;
+ void * data, *pnext, *save_pktbuf;
+ int i;
+ dhd_if_t *ifp;
+ wl_event_msg_t event;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ save_pktbuf = pktbuf;
+
+ for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+
+ pnext = PKTNEXT(dhdp->osh, pktbuf);
+ PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
+
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST) {
+ dhd->pub.rx_multicast++;
+ }
+
+ skb->data = eth;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Process special event packets and then discard them */
+ if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM)
+ dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ skb->mac_header,
+#else
+ skb->mac.raw,
+#endif
+ &event,
+ &data);
+
+ ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+ if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
+ ifp = dhd->iflist[ifidx];
+
+ if (ifp->net)
+ ifp->net->last_rx = jiffies;
+
+ dhdp->dstats.rx_bytes += skb->len;
+ dhdp->rx_packets++; /* Local count */
+
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ netif_rx_ni(skb);
+#else
+ ulong flags;
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+ }
+ }
+ dhd_os_wake_lock_timeout_enable(dhdp);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+ /* Linux version has nothing to do */
+ return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+ uint ifidx;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
+
+ dhd_prot_hdrpull(dhdp, &ifidx, txp);
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
+
+ if (type == ETHER_TYPE_802_1X)
+ atomic_dec(&dhd->pend_8021x_cnt);
+
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_if_t *ifp;
+ int ifidx;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF)
+ return NULL;
+
+ ifp = dhd->iflist[ifidx];
+ ASSERT(dhd && ifp);
+
+ if (dhd->pub.up) {
+ /* Use the protocol to get dongle stats */
+ dhd_prot_dstats(&dhd->pub);
+ }
+
+ /* Copy dongle stats to net device stats */
+ ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
+ ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
+ ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
+ ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
+ ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
+ ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
+ ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
+ ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
+ ifp->stats.multicast = dhd->pub.dstats.multicast;
+
+ return &ifp->stats;
+}
+
+static int
+dhd_watchdog_thread(void *data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+#ifdef DHD_SCHED
+ if (dhd_watchdog_prio > 0) {
+ struct sched_param param;
+ param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+ dhd_watchdog_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+#endif /* DHD_SCHED */
+
+ DAEMONIZE("dhd_watchdog");
+
+ /* Run until signal received */
+ while (1) {
+ if (down_interruptible (&dhd->watchdog_sem) == 0) {
+ dhd_os_sdlock(&dhd->pub);
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ }
+ dhd_os_sdunlock(&dhd->pub);
+ dhd_os_wake_unlock(&dhd->pub);
+ } else {
+ break;
+ }
+ }
+
+ complete_and_exit(&dhd->watchdog_exited, 0);
+}
+
+static void
+dhd_watchdog(ulong data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ dhd_os_wake_lock(&dhd->pub);
+ if (dhd->pub.dongle_reset) {
+ dhd_os_wake_unlock(&dhd->pub);
+ return;
+ }
+
+ if (dhd->watchdog_pid >= 0) {
+ up(&dhd->watchdog_sem);
+ return;
+ }
+
+ dhd_os_sdlock(&dhd->pub);
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd_os_sdunlock(&dhd->pub);
+ dhd_os_wake_unlock(&dhd->pub);
+}
+
+static int
+dhd_dpc_thread(void *data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+#ifdef DHD_SCHED
+ if (dhd_dpc_prio > 0)
+ {
+ struct sched_param param;
+ param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+#endif /* DHD_SCHED */
+
+ DAEMONIZE("dhd_dpc");
+
+ /* Run until signal received */
+ while (1) {
+ if (down_interruptible(&dhd->dpc_sem) == 0) {
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ if (dhd_bus_dpc(dhd->pub.bus)) {
+ up(&dhd->dpc_sem);
+ }
+ else {
+ dhd_os_wake_unlock(&dhd->pub);
+ }
+ } else {
+ if (dhd->pub.up)
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ dhd_os_wake_unlock(&dhd->pub);
+ }
+ }
+ else
+ break;
+ }
+
+ complete_and_exit(&dhd->dpc_exited, 0);
+}
+
+static void
+dhd_dpc(ulong data)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)data;
+
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ if (dhd_bus_dpc(dhd->pub.bus))
+ tasklet_schedule(&dhd->tasklet);
+ } else {
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ }
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ dhd_os_wake_lock(dhdp);
+ if (dhd->dpc_pid >= 0) {
+ up(&dhd->dpc_sem);
+ return;
+ }
+
+ tasklet_schedule(&dhd->tasklet);
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = FALSE;
+
+ strcpy(buf, "toe_ol");
+ if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ /* Check for older dongle image that doesn't support toe_ol */
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: toe not supported by device\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return -EOPNOTSUPP;
+ }
+
+ DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ memcpy(toe_ol, buf, sizeof(uint32));
+ return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int toe, ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = TRUE;
+
+ /* Set toe_ol as requested */
+
+ strcpy(buf, "toe_ol");
+ memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+ if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+ dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ /* Enable toe globally only if any components are enabled. */
+
+ toe = (toe_ol != 0);
+
+ strcpy(buf, "toe");
+ memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+ if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* TOE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void dhd_ethtool_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+ sprintf(info->driver, "wl");
+ sprintf(info->version, "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+ .get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+ struct ethtool_drvinfo info;
+ char drvname[sizeof(info.driver)];
+ uint32 cmd;
+#ifdef TOE
+ struct ethtool_value edata;
+ uint32 toe_cmpnt, csum_dir;
+ int ret;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* all ethtool calls start with a cmd word */
+ if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ /* Copy out any request driver name */
+ if (copy_from_user(&info, uaddr, sizeof(info)))
+ return -EFAULT;
+ strncpy(drvname, info.driver, sizeof(info.driver));
+ drvname[sizeof(info.driver)-1] = '\0';
+
+ /* clear struct for return */
+ memset(&info, 0, sizeof(info));
+ info.cmd = cmd;
+
+ /* if dhd requested, identify ourselves */
+ if (strcmp(drvname, "?dhd") == 0) {
+ sprintf(info.driver, "dhd");
+ strcpy(info.version, EPI_VERSION_STR);
+ }
+
+ /* otherwise, require dongle to be up */
+ else if (!dhd->pub.up) {
+ DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ /* finally, report dongle driver type */
+ else if (dhd->pub.iswl)
+ sprintf(info.driver, "wl");
+ else
+ sprintf(info.driver, "xx");
+
+ sprintf(info.version, "%lu", dhd->pub.drv_version);
+ if (copy_to_user(uaddr, &info, sizeof(info)))
+ return -EFAULT;
+ DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+ (int)sizeof(drvname), drvname, info.driver));
+ break;
+
+#ifdef TOE
+ /* Get toe offload components from dongle */
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ edata.cmd = cmd;
+ edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+ if (copy_to_user(uaddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+
+ /* Set toe offload components in dongle */
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_STXCSUM:
+ if (copy_from_user(&edata, uaddr, sizeof(edata)))
+ return -EFAULT;
+
+ /* Read the current settings, update and write back */
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ if (edata.data != 0)
+ toe_cmpnt |= csum_dir;
+ else
+ toe_cmpnt &= ~csum_dir;
+
+ if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+ return ret;
+
+ /* If setting TX checksum mode, tell Linux the new mode */
+ if (cmd == ETHTOOL_STXCSUM) {
+ if (edata.data)
+ dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ break;
+#endif /* TOE */
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_ioctl_t ioc;
+ int bcmerror = 0;
+ int buflen = 0;
+ void *buf = NULL;
+ uint driver = 0;
+ int ifidx;
+ bool is_set_key_cmd;
+ int ret;
+
+ dhd_os_wake_lock(&dhd->pub);
+
+ /* send to dongle only if we are not waiting for reload already */
+ if (dhd->pub.hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ dhd_os_wake_lock_timeout_enable(&dhd->pub);
+ dhd_os_wake_unlock(&dhd->pub);
+ return OSL_ERROR(BCME_DONGLE_DOWN);
+ }
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+ if (ifidx == DHD_BAD_IF) {
+ dhd_os_wake_unlock(&dhd->pub);
+ return -1;
+ }
+
+#if defined(CONFIG_WIRELESS_EXT)
+ /* linux wireless extensions */
+ if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+ /* may recurse, do NOT lock */
+ ret = wl_iw_ioctl(net, ifr, cmd);
+ dhd_os_wake_unlock(&dhd->pub);
+ return ret;
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+ if (cmd == SIOCETHTOOL) {
+ ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+ dhd_os_wake_unlock(&dhd->pub);
+ return ret;
+ }
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+ if (cmd != SIOCDEVPRIVATE) {
+ dhd_os_wake_unlock(&dhd->pub);
+ return -EOPNOTSUPP;
+ }
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+
+ /* Copy out any buffer passed */
+ if (ioc.buf) {
+ buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+ /* optimization for direct ioctl calls from kernel */
+ /*
+ if (segment_eq(get_fs(), KERNEL_DS)) {
+ buf = ioc.buf;
+ } else {
+ */
+ {
+ if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) {
+ bcmerror = -BCME_NOMEM;
+ goto done;
+ }
+ if (copy_from_user(buf, ioc.buf, buflen)) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+ }
+ }
+
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ bcmerror = -BCME_EPERM;
+ goto done;
+ }
+
+ /* check for local dhd ioctl and handle it */
+ if (driver == DHD_IOCTL_MAGIC) {
+ bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen);
+ if (bcmerror)
+ dhd->pub.bcmerror = bcmerror;
+ goto done;
+ }
+
+ /* send to dongle (must be up, and wl) */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("%s DONGLE_DOWN\n", __FUNCTION__));
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+
+ if (!dhd->pub.iswl) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+
+ /* Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+ * prevent M4 encryption.
+ */
+ is_set_key_cmd = ((ioc.cmd == WLC_SET_KEY) ||
+ ((ioc.cmd == WLC_SET_VAR) &&
+ !(strncmp("wsec_key", ioc.buf, 9))) ||
+ ((ioc.cmd == WLC_SET_VAR) &&
+ !(strncmp("bsscfg:wsec_key", ioc.buf, 15))));
+ if (is_set_key_cmd) {
+ dhd_wait_pend8021x(net);
+ }
+
+ bcmerror = dhd_prot_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+
+done:
+ if ((bcmerror == -ETIMEDOUT) || ((dhd->pub.busstate == DHD_BUS_DOWN) &&
+ (!dhd->pub.dongle_reset))) {
+ DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__));
+ net_os_send_hang_message(net);
+ }
+
+ if (!bcmerror && buf && ioc.buf) {
+ if (copy_to_user(ioc.buf, buf, buflen))
+ bcmerror = -EFAULT;
+ }
+
+ if (buf)
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ dhd_os_wake_unlock(&dhd->pub);
+
+ return OSL_ERROR(bcmerror);
+}
+
+static int
+dhd_stop(struct net_device *net)
+{
+#if !defined(IGNORE_ETH0_DOWN)
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+ DHD_TRACE(("%s: Enter %s\n", __FUNCTION__, net->name));
+ if (dhd->pub.up == 0) {
+ return 0;
+ }
+
+ /* Set state and stop OS transmissions */
+ dhd->pub.up = 0;
+ netif_stop_queue(net);
+#else
+ DHD_ERROR(("BYPASS %s:due to BRCM compilation : under investigation ...\n", __FUNCTION__));
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+ dhd->pub.hang_was_sent = 0;
+ OLD_MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static int
+dhd_open(struct net_device *net)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+#ifdef TOE
+ uint32 toe_ol;
+#endif
+ int ifidx;
+
+ /* Force start if ifconfig_up gets called before START command */
+ wl_control_wl_start(net);
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ if (ifidx == DHD_BAD_IF)
+ return -1;
+
+ if ((dhd->iflist[ifidx]) && (dhd->iflist[ifidx]->state == WLC_E_IF_DEL)) {
+ DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+ return -1;
+ }
+
+ if (ifidx == 0) { /* do it only for primary eth0 */
+
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+
+ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+ /* Get current TOE mode from dongle */
+ if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+ dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif
+ }
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ dhd->pub.up = 1;
+
+ OLD_MOD_INC_USE_COUNT;
+ return 0;
+}
+
+osl_t *
+dhd_osl_attach(void *pdev, uint bustype)
+{
+ return osl_attach(pdev, bustype, TRUE);
+}
+
+void
+dhd_osl_detach(osl_t *osh)
+{
+ if (MALLOCED(osh)) {
+ DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+ }
+ osl_detach(osh);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && 1
+ up(&dhd_registration_sem);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+int
+dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
+ uint8 *mac_addr, uint32 flags, uint8 bssidx)
+{
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
+
+ ASSERT(dhd && (ifidx < DHD_MAX_IFS));
+
+ ifp = dhd->iflist[ifidx];
+ if (!ifp && !(ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t)))) {
+ DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ memset(ifp, 0, sizeof(dhd_if_t));
+ ifp->info = dhd;
+ dhd->iflist[ifidx] = ifp;
+ strncpy(ifp->name, name, IFNAMSIZ);
+ ifp->name[IFNAMSIZ] = '\0';
+ if (mac_addr != NULL)
+ memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ if (handle == NULL) {
+ ifp->state = WLC_E_IF_ADD;
+ ifp->idx = ifidx;
+ ASSERT(dhd->sysioc_pid >= 0);
+ up(&dhd->sysioc_sem);
+ } else
+ ifp->net = (struct net_device *)handle;
+
+ return 0;
+}
+
+void
+dhd_del_if(dhd_info_t *dhd, int ifidx)
+{
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+
+ ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
+ ifp = dhd->iflist[ifidx];
+ if (!ifp) {
+ DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
+ return;
+ }
+
+ ifp->state = WLC_E_IF_DEL;
+ ifp->idx = ifidx;
+ ASSERT(dhd->sysioc_pid >= 0);
+ up(&dhd->sysioc_sem);
+}
+
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+ dhd_info_t *dhd = NULL;
+ struct net_device *net;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ /* updates firmware nvram path if it was provided as module paramters */
+ if ((firmware_path != NULL) && (firmware_path[0] != '\0'))
+ strcpy(fw_path, firmware_path);
+ if ((nvram_path != NULL) && (nvram_path[0] != '\0'))
+ strcpy(nv_path, nvram_path);
+
+ /* Allocate etherdev, including space for private structure */
+ if (!(net = alloc_etherdev(sizeof(dhd)))) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Allocate primary dhd_info */
+ if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
+ DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+ goto fail;
+ }
+
+ memset(dhd, 0, sizeof(dhd_info_t));
+
+ /*
+ * Save the dhd_info into the priv
+ */
+ memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+ dhd->pub.osh = osh;
+
+ /* Set network interface name if it was provided as module parameter */
+ if (iface_name[0]) {
+ int len;
+ char ch;
+ strncpy(net->name, iface_name, IFNAMSIZ);
+ net->name[IFNAMSIZ - 1] = 0;
+ len = strlen(net->name);
+ ch = net->name[len - 1];
+ if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+ strcat(net->name, "%d");
+ }
+
+ if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
+ goto fail;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+
+ mutex_init(&dhd->proto_sem);
+ /* Initialize other structure content */
+ init_waitqueue_head(&dhd->ioctl_resp_wait);
+ init_waitqueue_head(&dhd->ctrl_wait);
+
+ /* Initialize the spinlocks */
+ spin_lock_init(&dhd->sdlock);
+ spin_lock_init(&dhd->txqlock);
+ spin_lock_init(&dhd->dhd_lock);
+
+ /* Initialize Wakelock stuff */
+ spin_lock_init(&dhd->wl_lock);
+ dhd->wl_count = 0;
+ dhd->wl_packet = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+ wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_init(&dhd->wl_start_lock);
+#endif
+ /* Link to info module */
+ dhd->pub.info = dhd;
+
+ /* Link to bus module */
+ dhd->pub.bus = bus;
+ dhd->pub.hdrlen = bus_hdrlen;
+
+ /* Attach and link in the protocol */
+ if (dhd_prot_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_prot_attach failed\n"));
+ goto fail;
+ }
+#if defined(CONFIG_WIRELESS_EXT)
+ /* Attach and link in the iw */
+ if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+ DHD_ERROR(("wl_iw_attach failed\n"));
+ goto fail;
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ /* Set up the watchdog timer */
+ init_timer(&dhd->timer);
+ dhd->timer.data = (ulong)dhd;
+ dhd->timer.function = dhd_watchdog;
+
+ /* Initialize thread based operation and lock */
+ mutex_init(&dhd->sdsem);
+ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
+ dhd->threads_only = TRUE;
+ }
+ else {
+ dhd->threads_only = FALSE;
+ }
+
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize watchdog thread */
+ sema_init(&dhd->watchdog_sem, 0);
+ init_completion(&dhd->watchdog_exited);
+ dhd->watchdog_pid = kernel_thread(dhd_watchdog_thread, dhd, 0);
+ } else {
+ dhd->watchdog_pid = -1;
+ }
+
+ /* Set up the bottom half handler */
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize DPC thread */
+ sema_init(&dhd->dpc_sem, 0);
+ init_completion(&dhd->dpc_exited);
+ dhd->dpc_pid = kernel_thread(dhd_dpc_thread, dhd, 0);
+ } else {
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->dpc_pid = -1;
+ }
+
+ if (dhd_sysioc) {
+ sema_init(&dhd->sysioc_sem, 0);
+ init_completion(&dhd->sysioc_exited);
+ dhd->sysioc_pid = kernel_thread(_dhd_sysioc_thread, dhd, 0);
+ } else {
+ dhd->sysioc_pid = -1;
+ }
+
+ /*
+ * Save the dhd_info into the priv
+ */
+ memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ register_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&dhd->pub.wow_wakelock, WAKE_LOCK_SUSPEND, "wow_wake_lock");
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+ dhd->early_suspend.suspend = dhd_early_suspend;
+ dhd->early_suspend.resume = dhd_late_resume;
+ register_early_suspend(&dhd->early_suspend);
+#endif
+
+ register_inetaddr_notifier(&dhd_notifier);
+
+ return &dhd->pub;
+
+fail:
+ if (net)
+ free_netdev(net);
+ if (dhd)
+ dhd_detach(&dhd->pub);
+
+ return NULL;
+}
+
+
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+ int ret = -1;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+#ifdef EMBEDDED_PLATFORM
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+#endif /* EMBEDDED_PLATFORM */
+
+ ASSERT(dhd);
+
+ DHD_TRACE(("%s: \n", __FUNCTION__));
+
+ dhd_os_sdlock(dhdp);
+
+ /* try to download image and nvram to the dongle */
+ if (dhd->pub.busstate == DHD_BUS_DOWN) {
+ if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+ fw_path, nv_path))) {
+ DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n",
+ __FUNCTION__, fw_path, nv_path));
+ dhd_os_sdunlock(dhdp);
+ return -1;
+ }
+ }
+
+ /* Start the watchdog timer */
+ dhd->pub.tickcnt = 0;
+ dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+ /* Bring up the bus */
+ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+ DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+ dhd_os_sdunlock(dhdp);
+ return ret;
+ }
+#if defined(OOB_INTR_ONLY)
+ /* Host registration for OOB interrupt */
+ if (bcmsdh_register_oob_intr(dhdp)) {
+ dhd->wd_timer_valid = FALSE;
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s Host failed to resgister for OOB\n", __FUNCTION__));
+ dhd_os_sdunlock(dhdp);
+ return -ENODEV;
+ }
+
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ /* If bus is not ready, can't come up */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ dhd->wd_timer_valid = FALSE;
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+ dhd_os_sdunlock(dhdp);
+ return -ENODEV;
+ }
+
+ dhd_os_sdunlock(dhdp);
+
+#ifdef EMBEDDED_PLATFORM
+ bcm_mkiovar("event_msgs", dhdp->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+ dhdcdc_query_ioctl(dhdp, 0, WLC_GET_VAR, iovbuf, sizeof(iovbuf));
+ bcopy(iovbuf, dhdp->eventmask, WL_EVENTING_MASK_LEN);
+
+ setbit(dhdp->eventmask, WLC_E_SET_SSID);
+ setbit(dhdp->eventmask, WLC_E_PRUNE);
+ setbit(dhdp->eventmask, WLC_E_AUTH);
+ setbit(dhdp->eventmask, WLC_E_REASSOC);
+ setbit(dhdp->eventmask, WLC_E_REASSOC_IND);
+ setbit(dhdp->eventmask, WLC_E_DEAUTH_IND);
+ setbit(dhdp->eventmask, WLC_E_DISASSOC_IND);
+ setbit(dhdp->eventmask, WLC_E_DISASSOC);
+ setbit(dhdp->eventmask, WLC_E_JOIN);
+ setbit(dhdp->eventmask, WLC_E_ASSOC_IND);
+ setbit(dhdp->eventmask, WLC_E_PSK_SUP);
+ setbit(dhdp->eventmask, WLC_E_LINK);
+ setbit(dhdp->eventmask, WLC_E_NDIS_LINK);
+ setbit(dhdp->eventmask, WLC_E_MIC_ERROR);
+ setbit(dhdp->eventmask, WLC_E_PMKID_CACHE);
+ setbit(dhdp->eventmask, WLC_E_TXFAIL);
+ setbit(dhdp->eventmask, WLC_E_JOIN_START);
+ setbit(dhdp->eventmask, WLC_E_SCAN_COMPLETE);
+ setbit(dhdp->eventmask, WLC_E_RELOAD);
+#ifdef PNO_SUPPORT
+ setbit(dhdp->eventmask, WLC_E_PFN_NET_FOUND);
+#endif /* PNO_SUPPORT */
+
+/* enable dongle roaming event */
+ setbit(dhdp->eventmask, WLC_E_ROAM);
+
+ dhdp->pktfilter_count = 4;
+ /* Setup filter to allow only unicast */
+ dhdp->pktfilter[0] = "100 0 0 0 0x01 0x00";
+ dhdp->pktfilter[1] = NULL;
+ dhdp->pktfilter[2] = NULL;
+ dhdp->pktfilter[3] = NULL;
+#endif /* EMBEDDED_PLATFORM */
+
+ /* Bus is ready, do any protocol initialization */
+ if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+ return ret;
+
+ return 0;
+}
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+ char buf[strlen(name) + 1 + cmd_len];
+ int len = sizeof(buf);
+ wl_ioctl_t ioc;
+ int ret;
+
+ len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = set;
+
+ ret = dhd_prot_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (!set && ret >= 0)
+ memcpy(cmd_buf, buf, cmd_len);
+
+ return ret;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+ .ndo_open = dhd_open,
+ .ndo_stop = dhd_stop,
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+};
+
+static struct net_device_ops dhd_ops_virt = {
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+};
+#endif
+
+static int dhd_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ dhd_info_t *dhd;
+ dhd_pub_t *dhd_pub;
+
+ if (!ifa)
+ return NOTIFY_DONE;
+
+ dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
+ dhd_pub = &dhd->pub;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+ if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) {
+#else
+ if (ifa->ifa_dev->dev->open == &dhd_open) {
+#endif
+ switch (event) {
+ case NETDEV_UP:
+ DHD_TRACE(("%s: [%s] Up IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+ dhd_arp_cleanup(dhd_pub);
+ break;
+
+ case NETDEV_DOWN:
+ DHD_TRACE(("%s: [%s] Down IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+ dhd_arp_cleanup(dhd_pub);
+ break;
+
+ default:
+ DHD_TRACE(("%s: [%s] Event: %lu\n",
+ __FUNCTION__, ifa->ifa_label, event));
+ break;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+int
+dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct net_device *net;
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ net = dhd->iflist[ifidx]->net;
+
+ ASSERT(net);
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+ ASSERT(!net->open);
+ net->get_stats = dhd_get_stats;
+ net->do_ioctl = dhd_ioctl_entry;
+ net->hard_start_xmit = dhd_start_xmit;
+ net->set_mac_address = dhd_set_mac_address;
+ net->set_multicast_list = dhd_set_multicast_list;
+ net->open = net->stop = NULL;
+#else
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &dhd_ops_virt;
+#endif
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+ net->open = dhd_open;
+ net->stop = dhd_stop;
+#else
+ net->netdev_ops = &dhd_ops_pri;
+#endif
+
+ /*
+ * We have to use the primary MAC for virtual interfaces
+ */
+ if (ifidx != 0) {
+ /* for virtual interfaces use the primary MAC */
+ memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+ }
+
+ if (ifidx == 1) {
+ DHD_TRACE(("%s ACCESS POINT MAC: \n", __FUNCTION__));
+ /* ACCESSPOINT INTERFACE CASE */
+ temp_addr[0] |= 0x02; /* set bit 2 , - Locally Administered address */
+ }
+ net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+ net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+ net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ dhd->pub.rxsz = net->mtu + net->hard_header_len + dhd->pub.hdrlen;
+
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+ if (register_netdev(net) != 0) {
+ DHD_ERROR(("%s: couldn't register the net device\n", __FUNCTION__));
+ goto fail;
+ }
+
+ printf("%s: Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", net->name,
+ dhd->pub.mac.octet[0], dhd->pub.mac.octet[1], dhd->pub.mac.octet[2],
+ dhd->pub.mac.octet[3], dhd->pub.mac.octet[4], dhd->pub.mac.octet[5]);
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+#if defined(CONFIG_FIRST_SCAN)
+#ifdef SOFTAP
+ if (ifidx == 0)
+ /* Don't call for SOFTAP Interface in SOFTAP MODE */
+ wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#else
+ wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif /* SOFTAP */
+#endif /* CONFIG_FIRST_SCAN */
+#endif /* CONFIG_WIRELESS_EXT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ up(&dhd_registration_sem);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+ return 0;
+
+fail:
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+ return BCME_ERROR;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd) {
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+ /* Stop the bus module */
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_unregister_oob_intr();
+#endif /* defined(OOB_INTR_ONLY) */
+
+ /* Clear the watchdog timer */
+ dhd->wd_timer_valid = FALSE;
+ del_timer_sync(&dhd->timer);
+ }
+ }
+}
+
+void
+dhd_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd) {
+ dhd_if_t *ifp;
+ int i;
+
+ unregister_inetaddr_notifier(&dhd_notifier);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ if (dhd->early_suspend.suspend)
+ unregister_early_suspend(&dhd->early_suspend);
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+#if defined(CONFIG_WIRELESS_EXT)
+ /* Attach and link in the iw */
+ wl_iw_detach();
+#endif
+ if (dhd->sysioc_pid >= 0) {
+ KILL_PROC(dhd->sysioc_pid, SIGTERM);
+ wait_for_completion(&dhd->sysioc_exited);
+ }
+
+ for (i = 1; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i]) {
+ dhd->iflist[i]->state = WLC_E_IF_DEL;
+ dhd->iflist[i]->idx = i;
+ dhd_op_if(dhd->iflist[i]);
+ }
+
+ ifp = dhd->iflist[0];
+ ASSERT(ifp);
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+ if (ifp->net->open) {
+#else
+ if (ifp->net->netdev_ops == &dhd_ops_pri) {
+#endif
+ dhd_stop(ifp->net);
+ unregister_netdev(ifp->net);
+ }
+
+ if (dhd->watchdog_pid >= 0)
+ {
+ KILL_PROC(dhd->watchdog_pid, SIGTERM);
+ wait_for_completion(&dhd->watchdog_exited);
+ }
+
+ if (dhd->dpc_pid >= 0)
+ {
+ KILL_PROC(dhd->dpc_pid, SIGTERM);
+ wait_for_completion(&dhd->dpc_exited);
+ }
+ else
+ tasklet_kill(&dhd->tasklet);
+
+ dhd_bus_detach(dhdp);
+
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&dhdp->wow_wakelock);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ unregister_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+ free_netdev(ifp->net);
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&dhd->wl_wifi);
+ wake_lock_destroy(&dhd->wl_rxwake);
+#endif
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+ }
+ }
+}
+
+static void __exit
+dhd_module_cleanup(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_bus_unregister();
+#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+ wifi_del_dev();
+#endif
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+}
+
+static int __init
+dhd_module_init(void)
+{
+ int error;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Sanity check on the module parameters */
+ do {
+ /* Both watchdog and DPC as tasklets are ok */
+ if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
+ break;
+
+ /* If both watchdog and DPC are threads, TX must be deferred */
+ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
+ break;
+
+ DHD_ERROR(("Invalid module parameters.\n"));
+ return -EINVAL;
+ } while (0);
+
+ /* Call customer gpio to turn on power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
+
+#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+ sema_init(&wifi_control_sem, 0);
+
+ error = wifi_add_dev();
+ if (error) {
+ DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__));
+ goto fail_0;
+ }
+
+ /* Waiting callback after platform_driver_register is done or exit with error */
+ if (down_timeout(&wifi_control_sem, msecs_to_jiffies(5000)) != 0) {
+ error = -EINVAL;
+ DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__));
+ goto fail_1;
+ }
+#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ sema_init(&dhd_registration_sem, 0);
+#endif
+
+ error = dhd_bus_register();
+
+ if (!error)
+ printf("\n%s\n", dhd_version);
+ else {
+ DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+ goto fail_1;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ /*
+ * Wait till MMC sdio_register_driver callback called and made driver attach.
+ * It's needed to make sync up exit from dhd insmod and
+ * Kernel MMC sdio device callback registration
+ */
+ if (down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) {
+ error = -EINVAL;
+ DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__));
+ goto fail_2;
+ }
+#endif
+ return error;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+fail_2:
+ dhd_bus_unregister();
+#endif
+fail_1:
+#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+ wifi_del_dev();
+fail_0:
+#endif /* defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+
+ return error;
+}
+
+module_init(dhd_module_init);
+module_exit(dhd_module_cleanup);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ mutex_lock(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ mutex_unlock(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+ return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+ dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ DECLARE_WAITQUEUE(wait, current);
+ int timeout = dhd_ioctl_timeout_msec;
+
+ /* Convert timeout in millsecond to jiffies */
+ /* timeout = timeout * HZ / 1000; */
+ timeout = msecs_to_jiffies(timeout);
+
+ /* Wait until control frame is available */
+ add_wait_queue(&dhd->ioctl_resp_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ smp_mb();
+ while (!(*condition) && (!signal_pending(current) && timeout)) {
+ timeout = schedule_timeout(timeout);
+ smp_mb();
+ }
+
+ if (signal_pending(current))
+ *pending = TRUE;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&dhd->ioctl_resp_wait, &wait);
+
+ return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (waitqueue_active(&dhd->ioctl_resp_wait)) {
+ wake_up_interruptible(&dhd->ioctl_resp_wait);
+ }
+
+ return 0;
+}
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+ int del_timer_flag = FALSE;
+
+ flags = dhd_os_spin_lock(pub);
+
+ /* don't start the wd until fw is loaded */
+ if (pub->busstate != DHD_BUS_DOWN) {
+ if (wdtick) {
+ dhd_watchdog_ms = (uint)wdtick;
+ dhd->wd_timer_valid = TRUE;
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ } else if (dhd->wd_timer_valid == TRUE) {
+ /* Totally stop the timer */
+ dhd->wd_timer_valid = FALSE;
+ del_timer_flag = TRUE;
+ }
+ }
+ dhd_os_spin_unlock(pub, flags);
+ if (del_timer_flag) {
+ del_timer_sync(&dhd->timer);
+ }
+}
+
+void *
+dhd_os_open_image(char *filename)
+{
+ struct file *fp;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
+ if (IS_ERR(fp))
+ fp = NULL;
+
+ return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+
+ if (!image)
+ return 0;
+
+ rdlen = kernel_read(fp, fp->f_pos, buf, len);
+ if (rdlen > 0)
+ fp->f_pos += rdlen;
+
+ return rdlen;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+ if (image)
+ filp_close((struct file *)image, NULL);
+}
+
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd->threads_only)
+ mutex_lock(&dhd->sdsem);
+ else
+ spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd->threads_only)
+ mutex_unlock(&dhd->sdsem);
+ else
+ spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_bh(&dhd->txqlock);
+}
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdtxlock(dhd_pub_t *pub)
+{
+ dhd_os_sdlock(pub);
+}
+
+void
+dhd_os_sdtxunlock(dhd_pub_t *pub)
+{
+ dhd_os_sdunlock(pub);
+}
+
+#ifdef DHD_USE_STATIC_BUF
+void * dhd_os_prealloc(int section, unsigned long size)
+{
+#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+ void *alloc_ptr = NULL;
+ if (wifi_control_data && wifi_control_data->mem_prealloc)
+ {
+ alloc_ptr = wifi_control_data->mem_prealloc(section, size);
+ if (alloc_ptr)
+ {
+ DHD_INFO(("success alloc section %d\n", section));
+ bzero(alloc_ptr, size);
+ return alloc_ptr;
+ }
+ }
+
+ DHD_ERROR(("can't alloc section %d\n", section));
+ return 0;
+#else
+return MALLOC(0, size);
+#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
+}
+#endif /* DHD_USE_STATIC_BUF */
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+ int res = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+ if (res == 0)
+ return &dhd->iw.wstats;
+ else
+ return NULL;
+}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data)
+{
+ int bcmerror = 0;
+
+ ASSERT(dhd != NULL);
+
+ bcmerror = wl_host_event(dhd, ifidx, pktdata, event, data);
+ if (bcmerror != BCME_OK)
+ return (bcmerror);
+
+#if defined(CONFIG_WIRELESS_EXT)
+ ASSERT(dhd->iflist[*ifidx] != NULL);
+
+ if (ntoh32(event->event_type) == WLC_E_IF) {
+ DHD_INFO(("<0> interface:%d OP:%d don't pass to wext,"
+ "net_device might not be created yet\n",
+ *ifidx, ntoh32(event->event_type)));
+ return bcmerror;
+ }
+
+ ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+ if (dhd->iflist[*ifidx]->net)
+ wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+ switch (ntoh32(event->event_type)) {
+ default:
+ break;
+ }
+}
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ dhd_os_sdunlock(dhd);
+ wait_event_interruptible_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), HZ * 2);
+ dhd_os_sdlock(dhd);
+#endif
+ return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ if (waitqueue_active(&dhdinfo->ctrl_wait))
+ wake_up_interruptible(&dhdinfo->ctrl_wait);
+#endif
+ return;
+}
+
+int
+dhd_dev_reset(struct net_device *dev, uint8 flag)
+{
+ int ret;
+
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ ret = dhd_bus_devreset(&dhd->pub, flag);
+ if (ret) {
+ DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ DHD_ERROR(("%s: WLAN %s DONE\n", __FUNCTION__, flag ? "OFF" : "ON"));
+
+ return ret;
+}
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd) {
+ ret = dhd->pub.suspend_disable_flag;
+ dhd->pub.suspend_disable_flag = val;
+ }
+ return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val)
+{
+ int ret = 0;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd) {
+ dhd_os_proto_block(&dhd->pub);
+ ret = dhd_set_suspend(val, &dhd->pub);
+ dhd_os_proto_unblock(&dhd->pub);
+ }
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+ return ret;
+}
+
+int net_os_set_dtim_skip(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd)
+ dhd->pub.dtim_skip = val;
+
+ return 0;
+}
+
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ char *filterp = NULL;
+ int ret = 0;
+
+ if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
+ return ret;
+ if (num >= dhd->pub.pktfilter_count)
+ return -EINVAL;
+ if (add_remove) {
+ switch (num) {
+ case DHD_BROADCAST_FILTER_NUM:
+ filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+ break;
+ case DHD_MULTICAST4_FILTER_NUM:
+ filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+ break;
+ case DHD_MULTICAST6_FILTER_NUM:
+ filterp = "103 0 0 0 0xFFFF 0x3333";
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ dhd->pub.pktfilter[num] = filterp;
+ return ret;
+}
+
+int net_os_set_packet_filter(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ /* Packet filtering is set only if we still in early-suspend and
+ * we need either to turn it ON or turn it OFF
+ * We can always turn it OFF in case of early-suspend, but we turn it
+ * back ON only if suspend_disable_flag was not set
+ */
+ if (dhd && dhd->pub.up) {
+ dhd_os_proto_block(&dhd->pub);
+ if (dhd->pub.in_suspend) {
+ if (!val || (val && !dhd->pub.suspend_disable_flag))
+ dhd_set_packet_filter(val, &dhd->pub);
+ }
+ dhd_os_proto_unblock(&dhd->pub);
+ }
+ return ret;
+}
+
+
+void
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ dhd_preinit_ioctls(&dhd->pub);
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_clean */
+int
+dhd_dev_pno_reset(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_clean(&dhd->pub));
+}
+
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_enable(&dhd->pub, pfn_enabled));
+}
+
+
+/* Linux wrapper to call common dhd_pno_set */
+int
+dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max));
+}
+
+/* Linux wrapper to get pno status */
+int
+dhd_dev_get_pno_status(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_get_status(&dhd->pub));
+}
+
+#endif /* PNO_SUPPORT */
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd) {
+ if (!dhd->pub.hang_was_sent) {
+ dhd->pub.hang_was_sent = 1;
+ ret = wl_iw_send_priv_event(dev, "HANG");
+ }
+ }
+ return ret;
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd && dhd->pub.up)
+ memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+}
+
+char *dhd_bus_country_get(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd && (dhd->pub.dhd_cspec.ccode[0] != 0))
+ return dhd->pub.dhd_cspec.ccode;
+ return NULL;
+}
+
+void dhd_os_start_lock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ mutex_lock(&dhd->wl_start_lock);
+#endif
+}
+
+void dhd_os_start_unlock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ mutex_unlock(&dhd->wl_start_lock);
+#endif
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+ return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX 10
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int timeout = 10 * HZ / 1000;
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = dhd_get_pend_8021x_cnt(dhd);
+
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = dhd_get_pend_8021x_cnt(dhd);
+ }
+ return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+ int ret = 0;
+ struct file *fp;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* open file to write */
+ fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+ if (!fp) {
+ printf("%s: open file error\n", __FUNCTION__);
+ ret = -1;
+ goto exit;
+ }
+
+ /* Write buf to file */
+ fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+ /* free buf before return */
+ MFREE(dhd->osh, buf, size);
+ /* close file before return */
+ if (fp)
+ filp_close(fp, current->files);
+ /* restore previous address limit */
+ set_fs(old_fs);
+
+ return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wl_lock, flags);
+ ret = dhd->wl_packet;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (dhd->wl_packet)
+ wake_lock_timeout(&dhd->wl_rxwake, HZ);
+#endif
+ dhd->wl_packet = 0;
+ spin_unlock_irqrestore(&dhd->wl_lock, flags);
+ }
+ /* printk("%s: %d\n", __FUNCTION__, ret); */
+ return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wl_lock, flags);
+ dhd->wl_packet = 1;
+ spin_unlock_irqrestore(&dhd->wl_lock, flags);
+ }
+ /* printk("%s\n",__func__); */
+ return 0;
+}
+
+int net_os_wake_lock_timeout_enable(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout_enable(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wl_lock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (!dhd->wl_count)
+ wake_lock(&dhd->wl_wifi);
+#endif
+ dhd->wl_count++;
+ ret = dhd->wl_count;
+ spin_unlock_irqrestore(&dhd->wl_lock, flags);
+ }
+ /* printk("%s: %d\n", __FUNCTION__, ret); */
+ return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ dhd_os_wake_lock_timeout(pub);
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wl_lock, flags);
+ if (dhd->wl_count) {
+ dhd->wl_count--;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (!dhd->wl_count)
+ wake_unlock(&dhd->wl_wifi);
+#endif
+ ret = dhd->wl_count;
+ }
+ spin_unlock_irqrestore(&dhd->wl_lock, flags);
+ }
+ /* printk("%s: %d\n", __FUNCTION__, ret); */
+ return ret;
+}
+
+int net_os_wake_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_unlock(&dhd->pub);
+ return ret;
+}
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags = 0;
+
+ if (dhd)
+ spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+ return flags;
+}
+
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
diff --git a/drivers/net/wireless/bcm4329/dhd_linux_sched.c b/drivers/net/wireless/bcm4329/dhd_linux_sched.c
new file mode 100644
index 000000000000..480b416657ee
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_linux_sched.c
@@ -0,0 +1,38 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_sched.c,v 1.1.34.1.6.1 2009/01/16 01:17:40 Exp $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+ int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+ return rc;
+}
diff --git a/drivers/net/wireless/bcm4329/dhd_proto.h b/drivers/net/wireless/bcm4329/dhd_proto.h
new file mode 100644
index 000000000000..7ef6929a5bf7
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_proto.h
@@ -0,0 +1,102 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_proto.h,v 1.2.82.1.4.1.16.7 2010/05/10 12:54:59 Exp $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifndef IOCTL_RESP_TIMEOUT
+#define IOCTL_RESP_TIMEOUT 3000 /* In milli second */
+#endif
+
+#ifndef IOCTL_CHIP_ACTIVE_TIMEOUT
+#define IOCTL_CHIP_ACTIVE_TIMEOUT 10 /* In milli second */
+#endif
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_prot_init(dhd_pub_t *dhdp);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+extern bool dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, uint8 *fcbits);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#elif defined(RNDIS)
+#define DHD_PROTOCOL "rndis"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcm4329/dhd_sdio.c b/drivers/net/wireless/bcm4329/dhd_sdio.c
new file mode 100644
index 000000000000..5b08f7926d7c
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_sdio.c
@@ -0,0 +1,5840 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_sdio.c,v 1.157.2.27.2.33.2.129.4.1 2010/09/02 23:13:16 Exp $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <sbhnddma.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#ifdef DHD_DEBUG
+#include <hndrte_cons.h>
+#endif /* DHD_DEBUG */
+#ifdef DHD_DEBUG_TRAP
+#include <hndrte_armtrap.h>
+#endif /* DHD_DEBUG_TRAP */
+
+#define QLEN 2048 /* bulk rx and tx queue lengths */
+#define FCHI (QLEN - 256)
+#define FCLOW (FCHI -256)
+#define PRIOMASK 7
+
+#define TXRETRIES 2 /* # of retries for tx frames */
+
+#if defined(CONFIG_MACH_SANDGATE2G)
+#define DHD_RXBOUND 250 /* Default for max rx frames in one scheduling */
+#else
+#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */
+#endif /* defined(CONFIG_MACH_SANDGATE2G) */
+
+#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */
+
+#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
+#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold biggest possible glom */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD 32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#ifdef SDTEST
+#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ 32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ 2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY 3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY < 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi. Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+
+extern void bcmsdh_set_irq(int flag);
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+typedef struct dhd_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ hndrte_log_t log; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ uint8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+ dhd_pub_t *dhd;
+
+ bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */
+ si_t *sih; /* Handle for SI calls */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+ uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
+
+ sdpcmd_regs_t *regs; /* Registers for SDIO core */
+ uint sdpcmrev; /* SDIO core revision */
+ uint armrev; /* CPU core revision */
+ uint ramrev; /* SOCRAM core revision */
+ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
+
+ uint32 bus; /* gSPI or SDIO bus */
+ uint32 hostintmask; /* Copy of Host Interrupt Mask */
+ uint32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
+ char *fw_path; /* module_param: path to firmware image */
+ char *nv_path; /* module_param: path to nvram vars file */
+ const char *nvram_params; /* user specified nvram params. */
+
+ uint blocksize; /* Block size of SDIO transfers */
+ uint roundup; /* Max roundup limit */
+
+ struct pktq txq; /* Queue length used for flow-control */
+ uint8 flowcontrol; /* per prio flow control bitmask */
+ uint8 tx_seq; /* Transmit sequence number (next) */
+ uint8 tx_max; /* Maximum transmit sequence allowed */
+
+ uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+ uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
+ uint16 nextlen; /* Next Read Len from last header */
+ uint8 rx_seq; /* Receive sequence number (expected) */
+ bool rxskip; /* Skip receive (awaiting NAK ACK) */
+
+ void *glomd; /* Packet containing glomming descriptor */
+ void *glom; /* Packet chain for glommed superframe */
+ uint glomerr; /* Glom packet read errors */
+
+ uint8 *rxbuf; /* Buffer for receiving control packets */
+ uint rxblen; /* Allocated length of rxbuf */
+ uint8 *rxctl; /* Aligned pointer into rxbuf */
+ uint8 *databuf; /* Buffer for receiving big glom packet */
+ uint8 *dataptr; /* Aligned pointer into databuf */
+ uint rxlen; /* Length of valid data in buffer */
+
+ uint8 sdpcm_ver; /* Bus protocol reported by dongle */
+
+ bool intr; /* Use interrupts */
+ bool poll; /* Use polling */
+ bool ipend; /* Device interrupt is pending */
+ bool intdis; /* Interrupts disabled by isr */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint spurious; /* Count of spurious interrupts */
+ uint pollrate; /* Ticks between device polls */
+ uint polltick; /* Tick counter */
+ uint pollcnt; /* Count of active polls */
+
+#ifdef DHD_DEBUG
+ dhd_console_t console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+ uint regfails; /* Count of R_REG/W_REG failures */
+
+ uint clkstate; /* State of sd and backplane clock(s) */
+ bool activity; /* Activity flag for clock down */
+ int32 idletime; /* Control for activity timeout */
+ int32 idlecount; /* Activity timeout counter */
+ int32 idleclock; /* How to set bus driver when idle */
+ int32 sd_divisor; /* Speed control to bus driver */
+ int32 sd_mode; /* Mode control to bus driver */
+ int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
+ bool use_rxchain; /* If dhd should use PKT chains */
+ bool sleeping; /* Is SDIO bus sleeping? */
+ bool rxflow_mode; /* Rx flow control mode */
+ bool rxflow; /* Is rx flow control on */
+ uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */
+ bool alp_only; /* Don't use HT clock (ALP only) */
+ /* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+ bool usebufpool;
+
+#ifdef SDTEST
+ /* external loopback */
+ bool ext_loop;
+ uint8 loopid;
+
+ /* pktgen configuration */
+ uint pktgen_freq; /* Ticks between bursts */
+ uint pktgen_count; /* Packets to send each burst */
+ uint pktgen_print; /* Bursts between count displays */
+ uint pktgen_total; /* Stop after this many */
+ uint pktgen_minlen; /* Minimum packet data len */
+ uint pktgen_maxlen; /* Maximum packet data len */
+ uint pktgen_mode; /* Configured mode: tx, rx, or echo */
+ uint pktgen_stop; /* Number of tx failures causing stop */
+
+ /* active pktgen fields */
+ uint pktgen_tick; /* Tick counter for bursts */
+ uint pktgen_ptick; /* Burst counter for printing */
+ uint pktgen_sent; /* Number of test packets generated */
+ uint pktgen_rcvd; /* Number of test packets received */
+ uint pktgen_fail; /* Number of failed send attempts */
+ uint16 pktgen_len; /* Length of next packet to send */
+#endif /* SDTEST */
+
+ /* Some additional counters */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+
+ uint8 *ctrl_frame_buf;
+ uint32 ctrl_frame_len;
+ bool ctrl_frame_stat;
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE 0
+#define CLK_SDONLY 1
+#define CLK_PENDING 2 /* Not used yet */
+#define CLK_AVAIL 3
+
+#define DHD_NOPMU(dhd) (FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+int dhd_dongle_memsize;
+
+static bool dhd_doflow;
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static const uint watermark = 8;
+static const uint firstread = DHD_FIRSTREAD;
+
+#define HDATLEN (firstread - (SDPCM_HDRLEN))
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+#define ALIGNMENT 4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align) \
+ do { \
+ uint datalign; \
+ datalign = (uintptr)PKTDATA((osh), (p)); \
+ datalign = ROUNDUP(datalign, (align)) - datalign; \
+ ASSERT(datalign < (align)); \
+ ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \
+ if (datalign) \
+ PKTPULL((osh), (p), datalign); \
+ PKTSETLEN((osh), (p), (len)); \
+ } while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ regvar = R_REG(bus->dhd->osh, regaddr); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) { \
+ DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ regvar = 0; \
+ } \
+ } \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ W_REG(bus->dhd->osh, regaddr, regval); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) \
+ DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ } \
+} while (0)
+
+
+#define DHD_BUS SDIO_BUS
+
+#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
+
+#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start);
+#endif
+
+#ifdef DHD_DEBUG_TRAP
+static int dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size);
+#endif /* DHD_DEBUG_TRAP */
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+ void * regsva, uint16 devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, int reset_flag);
+
+static uint process_nvram_vars(char *varbuf, uint len);
+
+static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+
+static bool dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(struct dhd_bus *bus);
+
+static int dhdsdio_download_code_file(struct dhd_bus *bus, char *image_path);
+static int dhdsdio_download_nvram(struct dhd_bus *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(struct dhd_bus *bus);
+#endif
+
+
+static void
+dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+ int32 min_size = DONGLE_MIN_MEMSIZE;
+ /* Restrict the memsize to user specified limit */
+ DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+ dhd_dongle_memsize, min_size));
+ if ((dhd_dongle_memsize > min_size) &&
+ (dhd_dongle_memsize < (int32)bus->orig_ramsize))
+ bus->ramsize = dhd_dongle_memsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+ int err = 0;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+ return err;
+}
+
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+ int err;
+ uint8 clkctl, clkreq, devctl;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(OOB_INTR_ONLY)
+ pendok = FALSE;
+#endif
+ clkctl = 0;
+ sdh = bus->sdh;
+
+
+ if (on) {
+ /* Request HT Avail */
+ clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+ if ((bus->sih->chip == BCM4329_CHIP_ID) && (bus->sih->chiprev == 0))
+ clkreq |= SBSDIO_FORCE_ALP;
+
+
+
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ if (pendok &&
+ ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev == 9))) {
+ uint32 dummy, retries;
+ R_SDREG(dummy, &bus->regs->clockctlstatus, retries);
+ }
+
+ /* Check current status */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ /* Go to pending and await interrupt if appropriate */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+ /* Allow only clock-available interrupt */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ DHD_INFO(("CLKCTL: set PENDING\n"));
+ bus->clkstate = CLK_PENDING;
+ return BCME_OK;
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ /* Otherwise, wait here (polling) for HT Avail */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+ !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+ }
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+ __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+ return BCME_ERROR;
+ }
+
+
+ /* Mark clock available */
+ bus->clkstate = CLK_AVAIL;
+ DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+ if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+ if (!SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+ }
+#endif /* !defined(BCMLXSDMMC) */
+ } else {
+ if (SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+ }
+ }
+#endif /* defined (DHD_DEBUG) */
+
+ bus->activity = TRUE;
+ } else {
+ clkreq = 0;
+
+ if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ bus->clkstate = CLK_SDONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ DHD_INFO(("CLKCTL: turned OFF\n"));
+ if (err) {
+ DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+ int err;
+ int32 iovalue;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (on) {
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ /* Turn on clock and restore mode */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ iovalue = bus->sd_mode;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_mode: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Restore clock speed */
+ iovalue = bus->sd_divisor;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_SDONLY;
+ } else {
+ /* Stop or slow the SD clock itself */
+ if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+ DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+ __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+ return BCME_ERROR;
+ }
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ if (sd1idle) {
+ /* Change to SD1 mode and turn off clock */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+
+ iovalue = 0;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Set divisor to idle value */
+ iovalue = bus->idleclock;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_NONE;
+ }
+
+ return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+ int ret = BCME_OK;
+#ifdef DHD_DEBUG
+ uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Early exit if we're already there */
+ if (bus->clkstate == target) {
+ if (target == CLK_AVAIL) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+ }
+ return ret;
+ }
+
+ switch (target) {
+ case CLK_AVAIL:
+ /* Make sure SD clock is available */
+ if (bus->clkstate == CLK_NONE)
+ dhdsdio_sdclk(bus, TRUE);
+ /* Now request HT Avail on the backplane */
+ ret = dhdsdio_htclk(bus, TRUE, pendok);
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+ }
+ break;
+
+ case CLK_SDONLY:
+ /* Remove HT request, or bring up SD clock */
+ if (bus->clkstate == CLK_NONE)
+ ret = dhdsdio_sdclk(bus, TRUE);
+ else if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ else
+ DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+ bus->clkstate, target));
+ if (ret == BCME_OK)
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ break;
+
+ case CLK_NONE:
+ /* Make sure to remove HT request */
+ if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ /* Now remove the SD clock */
+ ret = dhdsdio_sdclk(bus, FALSE);
+ dhd_os_wd_timer(bus->dhd, 0);
+ break;
+ }
+#ifdef DHD_DEBUG
+ DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+ return ret;
+}
+
+int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+ (sleep ? "SLEEP" : "WAKE"),
+ (bus->sleeping ? "SLEEP" : "WAKE")));
+
+ /* Done if we're already in the requested state */
+ if (sleep == bus->sleeping)
+ return BCME_OK;
+
+ /* Going to sleep: set the alarm and turn off the lights... */
+ if (sleep) {
+ /* Don't sleep if something is pending */
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+ return BCME_BUSY;
+
+
+ /* Disable SDIO interrupts (no longer interested) */
+ bcmsdh_intr_disable(bus->sdh);
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+ /* Isolate the bus */
+ if (bus->sih->chip != BCM4329_CHIP_ID && bus->sih->chip != BCM4319_CHIP_ID) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
+ }
+
+ /* Change state */
+ bus->sleeping = TRUE;
+
+ } else {
+ /* Waking up: bus power up is ok, set local state */
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ 0, NULL);
+
+ /* Force pad isolation off if possible (in case power never toggled) */
+ if ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev >= 10))
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+ /* Make sure we have SD bus access */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Change state */
+ bus->sleeping = FALSE;
+
+ /* Enable interrupts again */
+ if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+ bus->intdis = FALSE;
+ bcmsdh_intr_enable(bus->sdh);
+ }
+ }
+
+ return BCME_OK;
+}
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+ bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (enable == TRUE) {
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ } else {
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+ }
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+#define BUS_WAKE(bus) \
+ do { \
+ if ((bus)->sleeping) \
+ dhdsdio_bussleep((bus), FALSE); \
+ } while (0);
+
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int
+dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
+{
+ int ret;
+ osl_t *osh;
+ uint8 *frame;
+ uint16 len, pad = 0;
+ uint32 swheader;
+ uint retries = 0;
+ bcmsdh_info_t *sdh;
+ void *new;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ sdh = bus->sdh;
+ osh = bus->dhd->osh;
+
+ if (bus->dhd->dongle_reset) {
+ ret = BCME_NOTREADY;
+ goto done;
+ }
+
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ /* Add alignment padding, allocate new packet if needed */
+ if ((pad = ((uintptr)frame % DHD_SDALIGN))) {
+ if (PKTHEADROOM(osh, pkt) < pad) {
+ DHD_INFO(("%s: insufficient headroom %d for %d pad\n",
+ __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad));
+ bus->dhd->tx_realloc++;
+ new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
+ if (!new) {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
+ bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+ /* free the pkt if canned one is not used */
+ free_pkt = TRUE;
+ pkt = new;
+ frame = (uint8*)PKTDATA(osh, pkt);
+ ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
+ pad = 0;
+ } else {
+ PKTPUSH(osh, pkt, pad);
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ ASSERT((pad + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt));
+ bzero(frame, pad + SDPCM_HDRLEN);
+ }
+ }
+ ASSERT(pad < DHD_SDALIGN);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ len = (uint16)PKTLEN(osh, pkt);
+ *(uint16*)frame = htol16(len);
+ *(((uint16*)frame) + 1) = htol16(~len);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+ (((pad + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef DHD_DEBUG
+ tx_packets[PKTPRIO(pkt)]++;
+ if (DHD_BYTES_ON() &&
+ (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+ (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ /* Raise len to next SDIO block to eliminate tail command */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+#ifdef NOTUSED
+ if (pad <= PKTTAILROOM(osh, pkt))
+#endif /* NOTUSED */
+ len += pad;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Some controllers have trouble with odd bytes -- round to even */
+ if (forcealign && (len & (ALIGNMENT - 1))) {
+#ifdef NOTUSED
+ if (PKTTAILROOM(osh, pkt))
+#endif
+ len = ROUNDUP(len, ALIGNMENT);
+#ifdef NOTUSED
+ else
+ DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
+#endif
+ }
+
+ do {
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, pkt, NULL, NULL);
+ bus->f2txdata++;
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ } while ((ret < 0) && retrydata && retries++ < TXRETRIES);
+
+done:
+ /* restore pkt buffer pointer before calling tx complete routine */
+ PKTPULL(osh, pkt, SDPCM_HDRLEN + pad);
+ dhd_os_sdunlock(bus->dhd);
+ dhd_txcomplete(bus->dhd, pkt, ret != 0);
+ dhd_os_sdlock(bus->dhd);
+
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+
+ return ret;
+}
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+ int ret = BCME_ERROR;
+ osl_t *osh;
+ uint datalen, prec;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ osh = bus->dhd->osh;
+ datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+ /* Push the test header if doing loopback */
+ if (bus->ext_loop) {
+ uint8* data;
+ PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+ data = PKTDATA(osh, pkt);
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->loopid++;
+ *data++ = (datalen >> 0);
+ *data++ = (datalen >> 8);
+ datalen += SDPCM_TEST_HDRLEN;
+ }
+#endif /* SDTEST */
+
+ /* Add space for the header */
+ PKTPUSH(osh, pkt, SDPCM_HDRLEN);
+ ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+ prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+
+
+ /* Check for existing queue, current flow-control, pending event, or pending clock */
+ if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+ (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+ (bus->clkstate != CLK_AVAIL)) {
+ DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
+ pktq_len(&bus->txq)));
+ bus->fcqueued++;
+
+ /* Priority based enq */
+ dhd_os_sdlock_txq(bus->dhd);
+ if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) {
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+ dhd_txcomplete(bus->dhd, pkt, FALSE);
+ PKTFREE(osh, pkt, TRUE);
+ DHD_ERROR(("%s: out of bus->txq !!!\n", __FUNCTION__));
+ ret = BCME_NORESOURCE;
+ } else {
+ ret = BCME_OK;
+ }
+ dhd_os_sdunlock_txq(bus->dhd);
+
+ if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow)
+ dhd_txflowcontrol(bus->dhd, 0, ON);
+
+#ifdef DHD_DEBUG
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+ /* Schedule DPC if needed to send queued packet(s) */
+ if (dhd_deferred_tx && !bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ } else {
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Otherwise, send it now */
+ BUS_WAKE(bus);
+ /* Make sure back plane ht clk is on, no pending allowed */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+
+#ifndef SDTEST
+ DHD_TRACE(("%s: calling txpkt\n", __FUNCTION__));
+ ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+ ret = dhdsdio_txpkt(bus, pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+ }
+
+
+ return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+ void *pkt;
+ uint32 intstatus = 0;
+ uint retries = 0;
+ int ret = 0, prec_out;
+ uint cnt = 0;
+ uint datalen;
+ uint8 tx_prec_map;
+
+ dhd_pub_t *dhd = bus->dhd;
+ sdpcmd_regs_t *regs = bus->regs;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ tx_prec_map = ~bus->flowcontrol;
+
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
+ dhd_os_sdlock_txq(bus->dhd);
+ if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
+ dhd_os_sdunlock_txq(bus->dhd);
+ break;
+ }
+ dhd_os_sdunlock_txq(bus->dhd);
+ datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN;
+
+#ifndef SDTEST
+ ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+ ret = dhdsdio_txpkt(bus, pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ /* In poll mode, need to check for other events */
+ if (!bus->intr && cnt)
+ {
+ /* Check device status, signal pending interrupt */
+ R_SDREG(intstatus, &regs->intstatus, retries);
+ bus->f2txdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ break;
+ if (intstatus & bus->hostintmask)
+ bus->ipend = TRUE;
+ }
+ }
+
+ /* Deflow-control stack if needed */
+ if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
+ dhd->txoff && (pktq_len(&bus->txq) < FCLOW))
+ dhd_txflowcontrol(dhd, 0, OFF);
+
+ return cnt;
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ uint8 *frame;
+ uint16 len;
+ uint32 swheader;
+ uint retries = 0;
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint8 doff = 0;
+ int ret = -1;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Back the pointer to make a room for bus header */
+ frame = msg - SDPCM_HDRLEN;
+ len = (msglen += SDPCM_HDRLEN);
+
+ /* Add alignment padding (optional for ctl frames) */
+ if (dhd_alignctl) {
+ if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+ frame -= doff;
+ len += doff;
+ msglen += doff;
+ bzero(frame, doff + SDPCM_HDRLEN);
+ }
+ ASSERT(doff < DHD_SDALIGN);
+ }
+ doff += SDPCM_HDRLEN;
+
+ /* Round send length to next SDIO block */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (len & (ALIGNMENT - 1)))
+ len = ROUNDUP(len, ALIGNMENT);
+
+ ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+ /* Need to lock here to protect txseq and SDIO tx calls */
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ *(uint16*)frame = htol16((uint16)msglen);
+ *(((uint16*)frame) + 1) = htol16(~msglen);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+ if (!DATAOK(bus)) {
+ DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq));
+ bus->ctrl_frame_stat = TRUE;
+ /* Send from dpc */
+ bus->ctrl_frame_buf = frame;
+ bus->ctrl_frame_len = len;
+
+ dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+
+ if (bus->ctrl_frame_stat == FALSE) {
+ DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+ ret = 0;
+ } else {
+ if (!bus->dhd->hang_was_sent)
+ DHD_ERROR(("%s: ctrl_frame_stat == TRUE\n", __FUNCTION__));
+ ret = -1;
+ }
+ }
+
+ if (ret == -1) {
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ do {
+ bus->ctrl_frame_stat = FALSE;
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, NULL, NULL, NULL);
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ } while ((ret < 0) && retries++ < TXRETRIES);
+ }
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ if (ret)
+ bus->dhd->tx_ctlerrs++;
+ else
+ bus->dhd->tx_ctlpkts++;
+
+ return ret ? -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ int timeleft;
+ uint rxlen = 0;
+ bool pending;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Wait until control frame is available */
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+ dhd_os_sdlock(bus->dhd);
+ rxlen = bus->rxlen;
+ bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+ bus->rxlen = 0;
+ dhd_os_sdunlock(bus->dhd);
+
+ if (rxlen) {
+ DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+ __FUNCTION__, rxlen, msglen));
+ } else if (timeleft == 0) {
+ DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#ifdef DHD_DEBUG_TRAP
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG_TRAP */
+ } else if (pending == TRUE) {
+ DHD_CTL(("%s: cancelled\n", __FUNCTION__));
+ return -ERESTARTSYS;
+ } else {
+ DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG_TRAP
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG_TRAP */
+ }
+
+ if (rxlen)
+ bus->dhd->rx_ctlpkts++;
+ else
+ bus->dhd->rx_ctlerrs++;
+
+ return rxlen ? (int)rxlen : -ETIMEDOUT;
+}
+
+/* IOVar table */
+enum {
+ IOV_INTR = 1,
+ IOV_POLLRATE,
+ IOV_SDREG,
+ IOV_SBREG,
+ IOV_SDCIS,
+ IOV_MEMBYTES,
+ IOV_MEMSIZE,
+#ifdef DHD_DEBUG_TRAP
+ IOV_CHECKDIED,
+#endif
+ IOV_DOWNLOAD,
+ IOV_FORCEEVEN,
+ IOV_SDIOD_DRIVE,
+ IOV_READAHEAD,
+ IOV_SDRXCHAIN,
+ IOV_ALIGNCTL,
+ IOV_SDALIGN,
+ IOV_DEVRESET,
+ IOV_CPU,
+#ifdef SDTEST
+ IOV_PKTGEN,
+ IOV_EXTLOOP,
+#endif /* SDTEST */
+ IOV_SPROM,
+ IOV_TXBOUND,
+ IOV_RXBOUND,
+ IOV_TXMINMAX,
+ IOV_IDLETIME,
+ IOV_IDLECLOCK,
+ IOV_SD1IDLE,
+ IOV_SLEEP,
+ IOV_VARS
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+ {"intr", IOV_INTR, 0, IOVT_BOOL, 0 },
+ {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 },
+ {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 },
+ {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 },
+ {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 },
+ {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 },
+ {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
+ {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 },
+ {"download", IOV_DOWNLOAD, 0, IOVT_BOOL, 0 },
+ {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
+ {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 },
+ {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 },
+ {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 },
+ {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 },
+ {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 },
+ {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 },
+ {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
+ {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
+ {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 },
+ {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 },
+#endif /* DHD_DEBUG */
+#ifdef DHD_DEBUG_TRAP
+ {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 },
+#endif /* DHD_DEBUG_TRAP */
+#ifdef SDTEST
+ {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 },
+ {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+
+ {NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+ uint q1, q2;
+
+ if (!div) {
+ bcm_bprintf(strbuf, "%s N/A", desc);
+ } else {
+ q1 = num / div;
+ q2 = (100 * (num - (q1 * div))) / div;
+ bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+ }
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+ bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+ bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+ bcm_bprintf(strbuf, "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n",
+ bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+ bus->rxlen, bus->rx_seq);
+ bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
+ bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+ bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
+ bus->pollrate, bus->pollcnt, bus->regfails);
+
+ bcm_bprintf(strbuf, "\nAdditional counters:\n");
+ bcm_bprintf(strbuf, "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n",
+ bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+ bus->rxc_errors);
+ bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
+ bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+ bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n",
+ bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+ bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
+ bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+ bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n",
+ (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+ bus->f2txdata, bus->f1regdata);
+ {
+ dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+ bus->dhd->rx_packets);
+ dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+ (bus->f2txdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+ bcm_bprintf(strbuf, "\n\n");
+ }
+
+#ifdef SDTEST
+ if (bus->pktgen_count) {
+ bcm_bprintf(strbuf, "pktgen config and count:\n");
+ bcm_bprintf(strbuf, "freq %d count %d print %d total %d min %d len %d\n",
+ bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+ bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+ bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
+ bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+ }
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+ bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+ bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+ bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+ bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+ bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+ bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+ bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+ bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+ bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+ bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+ bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+
+ pktgen.version = DHD_PKTGEN_VERSION;
+ pktgen.freq = bus->pktgen_freq;
+ pktgen.count = bus->pktgen_count;
+ pktgen.print = bus->pktgen_print;
+ pktgen.total = bus->pktgen_total;
+ pktgen.minlen = bus->pktgen_minlen;
+ pktgen.maxlen = bus->pktgen_maxlen;
+ pktgen.numsent = bus->pktgen_sent;
+ pktgen.numrcvd = bus->pktgen_rcvd;
+ pktgen.numfail = bus->pktgen_fail;
+ pktgen.mode = bus->pktgen_mode;
+ pktgen.stop = bus->pktgen_stop;
+
+ bcopy(&pktgen, arg, sizeof(pktgen));
+
+ return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+ uint oldcnt, oldmode;
+
+ bcopy(arg, &pktgen, sizeof(pktgen));
+ if (pktgen.version != DHD_PKTGEN_VERSION)
+ return BCME_BADARG;
+
+ oldcnt = bus->pktgen_count;
+ oldmode = bus->pktgen_mode;
+
+ bus->pktgen_freq = pktgen.freq;
+ bus->pktgen_count = pktgen.count;
+ bus->pktgen_print = pktgen.print;
+ bus->pktgen_total = pktgen.total;
+ bus->pktgen_minlen = pktgen.minlen;
+ bus->pktgen_maxlen = pktgen.maxlen;
+ bus->pktgen_mode = pktgen.mode;
+ bus->pktgen_stop = pktgen.stop;
+
+ bus->pktgen_tick = bus->pktgen_ptick = 0;
+ bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+ bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+ /* Clear counts for a new pktgen (mode change, or was stopped) */
+ if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode))
+ bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0;
+
+ return 0;
+}
+#endif /* SDTEST */
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+ int bcmerror = 0;
+ uint32 sdaddr;
+ uint dsize;
+
+ /* Determine initial transfer parameters */
+ sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+ if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+ dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+ else
+ dsize = size;
+
+ /* Set the backplane window to include the start address */
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ goto xfer_done;
+ }
+
+ /* Do the transfer(s) */
+ while (size) {
+ DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+ __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+ (address & SBSDIO_SBWINDOW_MASK)));
+ if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+ DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+ break;
+ }
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize)) {
+ data += dsize;
+ address += dsize;
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ break;
+ }
+ sdaddr = 0;
+ dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+ }
+ }
+
+xfer_done:
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+ DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+ bcmsdh_cur_sbwad(bus->sdh)));
+ }
+
+ return bcmerror;
+}
+
+#ifdef DHD_DEBUG_TRAP
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+ uint32 addr;
+ int rv;
+
+ /* Read last word in memory to determine address of sdpcm_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, bus->ramsize - 4, (uint8 *)&addr, 4)) < 0)
+ return rv;
+
+ addr = ltoh32(addr);
+
+ DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+ DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", __FUNCTION__, addr));
+ return BCME_ERROR;
+ }
+
+ /* Read hndrte_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = ltoh32(sh->flags);
+ sh->trap_addr = ltoh32(sh->trap_addr);
+ sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+ sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+ sh->assert_line = ltoh32(sh->assert_line);
+ sh->console_addr = ltoh32(sh->console_addr);
+ sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+ DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+ "is different than sdpcm_shared version %d in dongle\n",
+ __FUNCTION__, SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size)
+{
+ int bcmerror = 0;
+ uint msize = 512;
+ char *mbuffer = NULL;
+ uint maxstrlen = 256;
+ char *str = NULL;
+ trap_t tr;
+ sdpcm_shared_t sdpcm_shared;
+ struct bcmstrbuf strbuf;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (data == NULL) {
+ /*
+ * Called after a rx ctrl timeout. "data" is NULL.
+ * allocate memory to trace the trap or assert.
+ */
+ size = msize;
+ mbuffer = data = MALLOC(bus->dhd->osh, msize);
+ if (mbuffer == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+ }
+
+ if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+
+ if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+ goto done;
+
+ bcm_binit(&strbuf, data, size);
+
+ bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
+ sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+ if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+ }
+
+ if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "No trap%s in dongle",
+ (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+ ?"/assrt" :"");
+ } else {
+ if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+ /* Download assert */
+ bcm_bprintf(&strbuf, "Dongle assert");
+ if (sdpcm_shared.assert_exp_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.assert_exp_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " expr \"%s\"", str);
+ }
+
+ if (sdpcm_shared.assert_file_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.assert_file_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " file \"%s\"", str);
+ }
+
+ bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+ }
+
+ if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.trap_addr,
+ (uint8*)&tr, sizeof(trap_t))) < 0)
+ goto done;
+
+ bcm_bprintf(&strbuf,
+ "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+ "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+ "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n",
+ tr.type, tr.epc, tr.cpsr, tr.spsr, tr.r13, tr.r14, tr.pc,
+ sdpcm_shared.trap_addr,
+ tr.r0, tr.r1, tr.r2, tr.r3, tr.r4, tr.r5, tr.r6, tr.r7);
+ }
+ }
+
+ if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+ DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+ }
+
+done:
+ if (mbuffer)
+ MFREE(bus->dhd->osh, mbuffer, msize);
+ if (str)
+ MFREE(bus->dhd->osh, str, maxstrlen);
+
+ return bcmerror;
+}
+#endif /* DHD_DEBUG_TRAP */
+
+#ifdef DHD_DEBUG
+#define CONSOLE_LINE_MAX 192
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+ dhd_console_t *c = &bus->console;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, idx, addr;
+ int rv;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return 0;
+
+ /* Read console log struct */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = ltoh32(c->log.buf_size);
+ if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+ return BCME_NOMEM;
+ }
+
+ idx = ltoh32(c->log.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return BCME_ERROR;
+
+ /* Skip reading the console buffer if the index pointer has not moved */
+ if (idx == c->last)
+ return BCME_OK;
+
+ /* Read the console buffer */
+ addr = ltoh32(c->log.buf);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+ return rv;
+
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line. Instead, back up
+ * the buffer pointer and output this line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+break2:
+
+ return BCME_OK;
+}
+#endif /* DHD_DEBUG */
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+ int bcmerror = BCME_OK;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Basic sanity checks */
+ if (bus->dhd->up) {
+ bcmerror = BCME_NOTDOWN;
+ goto err;
+ }
+ if (!len) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ if (bus->vars)
+ MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+ bus->vars = MALLOC(bus->dhd->osh, len);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the terminating double-null */
+ bcopy(arg, bus->vars, bus->varsz);
+err:
+ return bcmerror;
+}
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+ bool bool_val = 0;
+
+ DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+ __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+ /* Some ioctls use the bus */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+ if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+ actionid == IOV_GVAL(IOV_DEVRESET))) {
+ bcmerror = BCME_NOTREADY;
+ goto exit;
+ }
+
+ /* Handle sleep stuff before any clock mucking */
+ if (vi->varid == IOV_SLEEP) {
+ if (IOV_ISSET(actionid)) {
+ bcmerror = dhdsdio_bussleep(bus, bool_val);
+ } else {
+ int_val = (int32)bus->sleeping;
+ bcopy(&int_val, arg, val_size);
+ }
+ goto exit;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ if (!bus->dhd->dongle_reset) {
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_INTR):
+ int_val = (int32)bus->intr;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_INTR):
+ bus->intr = bool_val;
+ bus->intdis = FALSE;
+ if (bus->dhd->up) {
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ }
+ break;
+
+ case IOV_GVAL(IOV_POLLRATE):
+ int_val = (int32)bus->pollrate;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POLLRATE):
+ bus->pollrate = (uint)int_val;
+ bus->poll = (bus->pollrate != 0);
+ break;
+
+ case IOV_GVAL(IOV_IDLETIME):
+ int_val = bus->idletime;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLETIME):
+ if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->idletime = int_val;
+ }
+ break;
+
+ case IOV_GVAL(IOV_IDLECLOCK):
+ int_val = (int32)bus->idleclock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLECLOCK):
+ bus->idleclock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SD1IDLE):
+ int_val = (int32)sd1idle;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SD1IDLE):
+ sd1idle = bool_val;
+ break;
+
+
+ case IOV_SVAL(IOV_MEMBYTES):
+ case IOV_GVAL(IOV_MEMBYTES):
+ {
+ uint32 address;
+ uint size, dsize;
+ uint8 *data;
+
+ bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ address = (uint32)int_val;
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+ (set ? "write" : "read"), size, address));
+
+ /* If we know about SOCRAM, check for a fit */
+ if ((bus->orig_ramsize) &&
+ ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) {
+ DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+ __FUNCTION__, bus->orig_ramsize, size, address));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ /* Generate the actual data pointer */
+ data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+ /* Call to do the transfer */
+ bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_MEMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_SDIOD_DRIVE):
+ int_val = (int32)dhd_sdiod_drive_strength;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDIOD_DRIVE):
+ dhd_sdiod_drive_strength = int_val;
+ si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+ break;
+
+ case IOV_SVAL(IOV_DOWNLOAD):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_VARS):
+ bcmerror = dhdsdio_downloadvars(bus, arg, len);
+ break;
+
+ case IOV_GVAL(IOV_READAHEAD):
+ int_val = (int32)dhd_readahead;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_READAHEAD):
+ if (bool_val && !dhd_readahead)
+ bus->nextlen = 0;
+ dhd_readahead = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDRXCHAIN):
+ int_val = (int32)bus->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDRXCHAIN):
+ if (bool_val && !bus->sd_rxchain)
+ bcmerror = BCME_UNSUPPORTED;
+ else
+ bus->use_rxchain = bool_val;
+ break;
+ case IOV_GVAL(IOV_ALIGNCTL):
+ int_val = (int32)dhd_alignctl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_ALIGNCTL):
+ dhd_alignctl = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDALIGN):
+ int_val = DHD_SDALIGN;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_VARS):
+ if (bus->varsz < (uint)len)
+ bcopy(bus->vars, arg, bus->varsz);
+ else
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uint32 addr, size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = (uintptr)bus->regs + sd_ptr->offset;
+ size = sd_ptr->func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uint32 addr, size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = (uintptr)bus->regs + sd_ptr->offset;
+ size = sd_ptr->func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ /* Same as above, but offset is not backplane (not SDIO core) */
+ case IOV_GVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE + sdreg.offset;
+ size = sdreg.func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE + sdreg.offset;
+ size = sdreg.func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ case IOV_GVAL(IOV_SDCIS):
+ {
+ *(char *)arg = 0;
+
+ bcmstrcat(arg, "\nFunc 0\n");
+ bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 1\n");
+ bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 2\n");
+ bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+
+ case IOV_GVAL(IOV_FORCEEVEN):
+ int_val = (int32)forcealign;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FORCEEVEN):
+ forcealign = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_TXBOUND):
+ int_val = (int32)dhd_txbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXBOUND):
+ dhd_txbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_RXBOUND):
+ int_val = (int32)dhd_rxbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RXBOUND):
+ dhd_rxbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_TXMINMAX):
+ int_val = (int32)dhd_txminmax;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXMINMAX):
+ dhd_txminmax = (uint)int_val;
+ break;
+
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+ case IOV_GVAL(IOV_EXTLOOP):
+ int_val = (int32)bus->ext_loop;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_EXTLOOP):
+ bus->ext_loop = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_get(bus, arg);
+ break;
+
+ case IOV_SVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_set(bus, arg);
+ break;
+#endif /* SDTEST */
+
+
+ case IOV_SVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+ __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+ bus->dhd->busstate));
+
+ ASSERT(bus->dhd->osh);
+ /* ASSERT(bus->cl_devid); */
+
+ dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+ break;
+
+ case IOV_GVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+ /* Get its status */
+ int_val = (bool) bus->dhd->dongle_reset;
+ bcopy(&int_val, arg, val_size);
+
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ if (actionid == IOV_SVAL(IOV_DEVRESET) && bool_val == FALSE)
+ dhd_preinit_ioctls((dhd_pub_t *) bus->dhd);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+ uint32 varsize;
+ uint32 varaddr;
+ uint8 *vbuffer;
+ uint32 varsizew;
+#ifdef DHD_DEBUG
+ char *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+ /* Even if there are no vars are to be written, we still need to set the ramsize. */
+ varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ if (bus->vars) {
+ vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+ if (!vbuffer)
+ return BCME_NOMEM;
+
+ bzero(vbuffer, varsize);
+ bcopy(bus->vars, vbuffer, bus->varsz);
+
+ /* Write the vars list */
+ bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+ /* Verify NVRAM bytes */
+ DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+ nvram_ularray = (char*)MALLOC(bus->dhd->osh, varsize);
+ if (!nvram_ularray)
+ return BCME_NOMEM;
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, varsize, varaddr));
+ }
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize)) {
+ DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+ __FUNCTION__));
+
+ MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ }
+
+ /* adjust to the user specified RAM */
+ DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+ bus->orig_ramsize, bus->ramsize));
+ DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+ varaddr, varsize));
+ varsize = ((bus->orig_ramsize - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ varsizew = htol32(varsizew);
+ }
+
+ DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+ /* Write the length token to the last word */
+ bcmerror = dhdsdio_membytes(bus, TRUE, (bus->orig_ramsize - 4),
+ (uint8*)&varsizew, 4);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+ uint retries;
+ int bcmerror = 0;
+
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+
+ bus->alp_only = TRUE;
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_disable(bus->sih, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ uint32 zeros = 0;
+ dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, (uint8*)&zeros, 4);
+ }
+ } else {
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (!si_iscoreup(bus->sih)) {
+ DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if ((bcmerror = dhdsdio_write_vars(bus))) {
+ DHD_ERROR(("%s: no vars written to RAM\n", __FUNCTION__));
+ bcmerror = 0;
+ }
+
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = FALSE;
+
+ bus->dhd->busstate = DHD_BUS_LOAD;
+ }
+
+fail:
+ /* Always return to SDIOD core */
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+ si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+ return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ /* Look up var locally; if not found pass to host driver */
+ if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Turn on clock in case SD command needs backplane */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+ /* Check for bus configuration changes of interest */
+
+ /* If it was divisor change, read the new one */
+ if (set && strcmp(name, "sd_divisor") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_divisor = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_divisor));
+ }
+ }
+ /* If it was a mode change, read the new one */
+ if (set && strcmp(name, "sd_mode") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_mode = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_mode));
+ }
+ }
+ /* Similar check for blocksize change */
+ if (set && strcmp(name, "sd_blocksize") == 0) {
+ int32 fnum = 2;
+ if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+ }
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+ osl_t *osh = bus->dhd->osh;
+ uint32 local_hostintmask;
+ uint8 saveclk;
+ uint retries;
+ int err;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Change our idea of bus state */
+ bus->dhd->busstate = DHD_BUS_DOWN;
+
+ /* Enable clock for device interrupts */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Disable and clear interrupts at the chip level also */
+ W_SDREG(0, &bus->regs->hostintmask, retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ }
+
+ /* Turn off the bus (F2), free any pending packets */
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+
+ /* Turn off the backplane clock (only) */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Clear the data packet queues */
+ pktq_flush(osh, &bus->txq, TRUE);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ PKTFREE(osh, bus->glomd, FALSE);
+
+ if (bus->glom)
+ PKTFREE(osh, bus->glom, FALSE);
+
+ bus->glom = bus->glomd = NULL;
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ dhd_os_ioctl_resp_wake(bus->dhd);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = FALSE;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+}
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ dhd_timeout_t tmo;
+ uint retries = 0;
+ uint8 ready, enable;
+ int err, ret = BCME_ERROR;
+ uint8 saveclk;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(bus->dhd);
+ if (!bus->dhd)
+ return BCME_OK;
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ err = dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if ((err != BCME_OK) || (bus->clkstate != CLK_AVAIL)) {
+ DHD_ERROR(("%s: Failed to set backplane clock: err %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+ &bus->regs->tosbmailboxdata, retries);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+ /* Give the dongle some time to do its thing and set IOR2 */
+ dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+ ready = 0;
+ while (ready != enable && !dhd_timeout_expired(&tmo))
+ ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+
+ DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+ __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (ready == enable) {
+ /* Make sure we're talking to the core. */
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+ bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, &err);
+
+ /* Set bus state according to enable result */
+ dhdp->busstate = DHD_BUS_DATA;
+
+ /* bcmsdh_intr_unmask(bus->sdh); */
+
+ bus->intdis = FALSE;
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+
+ }
+
+
+ else {
+ /* Disable F2 again */
+ enable = SDIO_FUNC_ENABLE_1;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+ }
+
+ /* Restore previous clock setting */
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+
+ /* If we didn't come up, turn off backplane clock */
+ if (dhdp->busstate != DHD_BUS_DATA)
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+ ret = BCME_OK;
+exit:
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+
+ return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+ uint16 lastrbc;
+ uint8 hi, lo;
+ int err;
+
+ DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+ (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+ if (abort) {
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ }
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+ bus->f1regdata++;
+
+ /* Wait until the packet has been flushed (device/FIFO stable) */
+ for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+
+ if ((hi == 0) && (lo == 0))
+ break;
+
+ if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+ DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+ __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+ }
+ lastrbc = (hi << 8) + lo;
+ }
+
+ if (!retries) {
+ DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+ } else {
+ DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+ }
+
+ if (rtx) {
+ bus->rxrtx++;
+ W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+ bus->f1regdata++;
+ if (retries <= retry_limit) {
+ bus->rxskip = TRUE;
+ }
+ }
+
+ /* Clear partial in any case */
+ bus->nextlen = 0;
+
+ /* If we can't reach the device, signal failure */
+ if (err || bcmsdh_regfail(sdh))
+ bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint rdlen, pad;
+
+ int sdret;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Control data already received in aligned rxctl */
+ if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+ goto gotpkt;
+
+ ASSERT(bus->rxbuf);
+ /* Set rxctl for frame (w/optional alignment) */
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+
+ /* Copy the already-read portion over */
+ bcopy(hdr, bus->rxctl, firstread);
+ if (len <= firstread)
+ goto gotpkt;
+
+ /* Copy the full data pkt in gSPI case and process ioctl. */
+ if (bus->bus == SPI_BUS) {
+ bcopy(hdr, bus->rxctl, len);
+ goto gotpkt;
+ }
+
+ /* Raise rdlen to next SDIO block to avoid tail command */
+ rdlen = len - firstread;
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((len + pad) < bus->dhd->maxctl))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ /* Drop if the read is too big or it exceeds our maximum */
+ if ((rdlen + firstread) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+ __FUNCTION__, rdlen, bus->dhd->maxctl));
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+ if ((len - doff) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+ __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+
+ /* Read remainder of frame body into the rxctl buffer */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+ bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ goto done;
+ }
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("RxCtrl", bus->rxctl, len);
+ }
+#endif
+
+ /* Point to valid data and indicate its length */
+ bus->rxctl += doff;
+ bus->rxlen = len - doff;
+
+done:
+ /* Awake any waiters */
+ dhd_os_ioctl_resp_wake(bus->dhd);
+}
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+ uint16 dlen, totlen;
+ uint8 *dptr, num = 0;
+
+ uint16 sublen, check;
+ void *pfirst, *plast, *pnext, *save_pfirst;
+ osl_t *osh = bus->dhd->osh;
+
+ int errcode;
+ uint8 chan, seq, doff, sfdoff;
+ uint8 txmax;
+
+ int ifidx = 0;
+ bool usechain = bus->use_rxchain;
+
+ /* If packets, issue read(s) and send up packet chain */
+ /* Return sequence numbers consumed? */
+
+ DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+ /* If there's a descriptor, generate the packet chain */
+ if (bus->glomd) {
+ dhd_os_sdlock_rxq(bus->dhd);
+
+ pfirst = plast = pnext = NULL;
+ dlen = (uint16)PKTLEN(osh, bus->glomd);
+ dptr = PKTDATA(osh, bus->glomd);
+ if (!dlen || (dlen & 1)) {
+ DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+ __FUNCTION__, dlen));
+ dlen = 0;
+ }
+
+ for (totlen = num = 0; dlen; num++) {
+ /* Get (and move past) next length */
+ sublen = ltoh16_ua(dptr);
+ dlen -= sizeof(uint16);
+ dptr += sizeof(uint16);
+ if ((sublen < SDPCM_HDRLEN) ||
+ ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+ DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+ __FUNCTION__, num, sublen));
+ pnext = NULL;
+ break;
+ }
+ if (sublen % DHD_SDALIGN) {
+ DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+ __FUNCTION__, sublen, DHD_SDALIGN));
+ usechain = FALSE;
+ }
+ totlen += sublen;
+
+ /* For last frame, adjust read len so total is a block multiple */
+ if (!dlen) {
+ sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+ totlen = ROUNDUP(totlen, bus->blocksize);
+ }
+
+ /* Allocate/chain packet for next subframe */
+ if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+ DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+ __FUNCTION__, num, sublen));
+ break;
+ }
+ ASSERT(!PKTLINK(pnext));
+ if (!pfirst) {
+ ASSERT(!plast);
+ pfirst = plast = pnext;
+ } else {
+ ASSERT(plast);
+ PKTSETNEXT(osh, plast, pnext);
+ plast = pnext;
+ }
+
+ /* Adhere to start alignment requirements */
+ PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+ }
+
+ /* If all allocations succeeded, save packet chain in bus structure */
+ if (pnext) {
+ DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+ __FUNCTION__, totlen, num));
+ if (DHD_GLOM_ON() && bus->nextlen) {
+ if (totlen != bus->nextlen) {
+ DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+ "rxseq %d\n", __FUNCTION__, bus->nextlen,
+ totlen, rxseq));
+ }
+ }
+ bus->glom = pfirst;
+ pfirst = pnext = NULL;
+ } else {
+ if (pfirst)
+ PKTFREE(osh, pfirst, FALSE);
+ bus->glom = NULL;
+ num = 0;
+ }
+
+ /* Done with descriptor packet */
+ PKTFREE(osh, bus->glomd, FALSE);
+ bus->glomd = NULL;
+ bus->nextlen = 0;
+
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+
+ /* Ok -- either we just generated a packet chain, or had one from before */
+ if (bus->glom) {
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+ for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+ DHD_GLOM((" %p: %p len 0x%04x (%d)\n",
+ pnext, (uint8*)PKTDATA(osh, pnext),
+ PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+ }
+ }
+
+ pfirst = bus->glom;
+ dlen = (uint16)pkttotlen(osh, pfirst);
+
+ /* Do an SDIO read for the superframe. Configurable iovar to
+ * read directly into the chained packet, or allocate a large
+ * packet and and copy into the chain.
+ */
+ if (usechain) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+ dlen, pfirst, NULL, NULL);
+ } else if (bus->dataptr) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, bus->dataptr,
+ dlen, NULL, NULL, NULL);
+ sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+ if (sublen != dlen) {
+ DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+ __FUNCTION__, dlen, sublen));
+ errcode = -1;
+ }
+ pnext = NULL;
+ } else {
+ DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+ errcode = -1;
+ }
+ bus->f2rxdata++;
+ ASSERT(errcode != BCME_PENDING);
+
+ /* On failure, kill the superframe, allow a couple retries */
+ if (errcode < 0) {
+ DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+ __FUNCTION__, dlen, errcode));
+ bus->dhd->rx_errors++;
+
+ if (bus->glomerr++ < 3) {
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ return 0;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 48));
+ }
+#endif
+
+
+ /* Validate the superframe header */
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ errcode = 0;
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, sublen, check));
+ errcode = -1;
+ } else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+ DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+ errcode = -1;
+ } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+ DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+ SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+ errcode = -1;
+ } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+ DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) ||
+ (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+ DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+ __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), SDPCM_HDRLEN));
+ errcode = -1;
+ }
+
+ /* Check sequence number of superframe SW header */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+ /* Remove superframe header, remember offset */
+ PKTPULL(osh, pfirst, doff);
+ sfdoff = doff;
+
+ /* Validate all the subframe headers */
+ for (num = 0, pnext = pfirst; pnext && !errcode;
+ num++, pnext = PKTNEXT(osh, pnext)) {
+ dptr = (uint8 *)PKTDATA(osh, pnext);
+ dlen = (uint16)PKTLEN(osh, pnext);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("subframe", dptr, 32);
+ }
+#endif
+
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (subframe %d): HW hdr error: "
+ "len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, num, sublen, check));
+ errcode = -1;
+ } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+ DHD_ERROR(("%s (subframe %d): length mismatch: "
+ "len 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, num, sublen, dlen));
+ errcode = -1;
+ } else if ((chan != SDPCM_DATA_CHANNEL) &&
+ (chan != SDPCM_EVENT_CHANNEL)) {
+ DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+ __FUNCTION__, num, chan));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+ DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+ __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+ errcode = -1;
+ }
+ }
+
+ if (errcode) {
+ /* Terminate frame on error, request a couple retries */
+ if (bus->glomerr++ < 3) {
+ /* Restore superframe header space */
+ PKTPUSH(osh, pfirst, sfdoff);
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ bus->nextlen = 0;
+ return 0;
+ }
+
+ /* Basic SD framing looks ok - process each packet (header) */
+ save_pfirst = pfirst;
+ bus->glom = NULL;
+ plast = NULL;
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+ pnext = PKTNEXT(osh, pfirst);
+ PKTSETNEXT(osh, pfirst, NULL);
+
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+ __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+ PKTLEN(osh, pfirst), sublen, chan, seq));
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+ if (rxseq != seq) {
+ DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Subframe Data", dptr, dlen);
+ }
+#endif
+
+ PKTSETLEN(osh, pfirst, sublen);
+ PKTPULL(osh, pfirst, doff);
+
+ if (PKTLEN(osh, pfirst) == 0) {
+ PKTFREE(bus->dhd->osh, pfirst, FALSE);
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ bus->dhd->rx_errors++;
+ PKTFREE(osh, pfirst, FALSE);
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ }
+
+ /* this packet will go up, link back into chain and count it */
+ PKTSETNEXT(osh, pfirst, pnext);
+ plast = pfirst;
+ num++;
+
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+ __FUNCTION__, num, pfirst,
+ PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+ PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+ prhex("", (uint8 *)PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 32));
+ }
+#endif /* DHD_DEBUG */
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+ if (num) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num);
+ dhd_os_sdlock(bus->dhd);
+ }
+
+ bus->rxglomframes++;
+ bus->rxglompkts += num;
+ }
+ return num;
+}
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+ osl_t *osh = bus->dhd->osh;
+ bcmsdh_info_t *sdh = bus->sdh;
+
+ uint16 len, check; /* Extracted hardware header fields */
+ uint8 chan, seq, doff; /* Extracted software header fields */
+ uint8 fcbits; /* Extracted fcbits from software header */
+ uint8 delta;
+
+ void *pkt; /* Packet for event or data frames */
+ uint16 pad; /* Number of pad bytes to read */
+ uint16 rdlen; /* Total number of bytes to read */
+ uint8 rxseq; /* Next sequence number to expect */
+ uint rxleft = 0; /* Remaining number of frames allowed */
+ int sdret; /* Return code from bcmsdh calls */
+ uint8 txmax; /* Maximum tx sequence offered */
+ bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+ uint8 *rxbuf;
+ int ifidx = 0;
+ uint rxcount = 0; /* Total frames read */
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+ bool sdtest = FALSE; /* To limit message spew from test mode */
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(maxframes);
+
+#ifdef SDTEST
+ /* Allow pktgen to override maxframes */
+ if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+ maxframes = bus->pktgen_count;
+ sdtest = TRUE;
+ }
+#endif
+
+ /* Not finished unless we encounter no more frames indication */
+ *finished = FALSE;
+
+
+ for (rxseq = bus->rx_seq, rxleft = maxframes;
+ !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+ rxseq++, rxleft--) {
+
+ /* Handle glomming separately */
+ if (bus->glom || bus->glomd) {
+ uint8 cnt;
+ DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+ __FUNCTION__, bus->glomd, bus->glom));
+ cnt = dhdsdio_rxglom(bus, rxseq);
+ DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+ rxseq += cnt - 1;
+ rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+ continue;
+ }
+
+ /* Try doing single read if we can */
+ if (dhd_readahead && bus->nextlen) {
+ uint16 nextlen = bus->nextlen;
+ bus->nextlen = 0;
+
+ if (bus->bus == SPI_BUS) {
+ rdlen = len = nextlen;
+ }
+ else {
+ rdlen = len = nextlen << 4;
+
+ /* Pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+ }
+
+ /* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+ * Later we use buffer-poll for data as well as control packets.
+ * This is required becuase dhd receives full frame in gSPI unlike SDIO.
+ * After the frame is received we have to distinguish whether it is data
+ * or non-data frame.
+ */
+ /* Allocate a packet buffer */
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+ if (bus->bus == SPI_BUS) {
+ bus->usebufpool = FALSE;
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+ rxbuf = bus->rxctl;
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ /* dhd.rx_ctlerrs is higher level */
+ bus->rxc_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ } else {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+ "expected rxseq %d\n",
+ __FUNCTION__, len, rdlen, rxseq));
+ /* Just go try again w/normal header read */
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ } else {
+ if (bus->bus == SPI_BUS)
+ bus->usebufpool = TRUE;
+
+ ASSERT(!PKTLINK(pkt));
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+ rxbuf = (uint8 *)PKTDATA(osh, pkt);
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ bus->dhd->rx_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ /* Force retry w/normal header read. Don't attemp NAK for
+ * gSPI
+ */
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ /* Now check the header */
+ bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means readahead info was bad */
+ if (!(len|check)) {
+ DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+ __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+ " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+ len, check));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+ __FUNCTION__, len));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Check for consistency with readahead info */
+ len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+ if (len_consistent) {
+ /* Mismatch, force retry w/normal header (may be >4K) */
+ DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+ "expected rxseq %d\n",
+ __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+ " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+ seq));
+ bus->nextlen = 0;
+ }
+
+ bus->dhd->rx_readahead_cnt ++;
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", rxbuf, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ if (bus->bus == SPI_BUS) {
+ dhdsdio_read_control(bus, rxbuf, len, doff);
+ if (bus->usebufpool) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+ continue;
+ } else {
+ DHD_ERROR(("%s (nextlen): readahead on control"
+ " packet %d?\n", __FUNCTION__, seq));
+ /* Force retry w/normal header read */
+ bus->nextlen = 0;
+ dhdsdio_rxfail(bus, FALSE, TRUE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ }
+
+ if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+ DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+ "rx pktbuf's or not yet malloced.\n", len, chan));
+ continue;
+ }
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* All done with this one -- now deliver the packet */
+ goto deliver;
+ }
+ /* gSPI frames should not be handled in fractions */
+ if (bus->bus == SPI_BUS) {
+ break;
+ }
+
+ /* Read frame header (hardware and software) */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ bus->rxhdr, firstread, NULL, NULL, NULL);
+ bus->f2rxhdrs++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+ bus->rx_hdrfail++;
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ continue;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means no more frames */
+ if (!(len|check)) {
+ *finished = TRUE;
+ break;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, len, check));
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+ continue;
+ }
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+ bus->rx_badhdr++;
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Save the readahead length if there is one */
+ bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+ /* Call a separate function for control frames */
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+ continue;
+ }
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+ (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+ /* Length to read */
+ rdlen = (len > firstread) ? (len - firstread) : 0;
+
+ /* May pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ if ((rdlen + firstread) > MAX_RX_DATASZ) {
+ /* Too long -- skip this frame */
+ DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+ __FUNCTION__, rdlen, chan));
+ bus->dhd->rx_dropped++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+ continue;
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ ASSERT(!PKTLINK(pkt));
+
+ /* Leave room for what we already read, and align remainder */
+ ASSERT(firstread < (PKTLEN(osh, pkt)));
+ PKTPULL(osh, pkt, firstread);
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+ /* Read the remaining frame data */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+ ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+ ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Copy the already-read portion */
+ PKTPUSH(osh, pkt, firstread);
+ bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+
+deliver:
+ /* Save superframe descriptor and allocate packet frame */
+ if (chan == SDPCM_GLOM_CHANNEL) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+ __FUNCTION__, len));
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("Glom Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+ PKTSETLEN(osh, pkt, len);
+ ASSERT(doff == SDPCM_HDRLEN);
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+ bus->glomd = pkt;
+ } else {
+ DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ }
+ continue;
+ }
+
+ /* Fill in packet len and prio, deliver upward */
+ PKTSETLEN(osh, pkt, len);
+ PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+ /* Test channel packets are processed separately */
+ if (chan == SDPCM_TEST_CHANNEL) {
+ dhdsdio_testrcv(bus, pkt, seq);
+ continue;
+ }
+#endif /* SDTEST */
+
+ if (PKTLEN(osh, pkt) == 0) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ continue;
+ }
+
+
+ /* Unlock during rx call */
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, pkt, 1);
+ dhd_os_sdlock(bus->dhd);
+ }
+ rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+ /* Message if we hit the limit */
+ if (!rxleft && !sdtest)
+ DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+ else
+#endif /* DHD_DEBUG */
+ DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+ /* Back off rxseq if awaiting rtx, update rx_seq */
+ if (bus->rxskip)
+ rxseq--;
+ bus->rx_seq = rxseq;
+
+ return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus = 0;
+ uint32 hmb_data;
+ uint8 fcbits;
+ uint retries = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Read mailbox data and ack that we did so */
+ R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+ bus->f1regdata += 2;
+
+ /* Dongle recomposed rx frames, accept them again */
+ if (hmb_data & HMB_DATA_NAKHANDLED) {
+ DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+ if (!bus->rxskip) {
+ DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+ }
+ bus->rxskip = FALSE;
+ intstatus |= I_HMB_FRAME_IND;
+ }
+
+ /*
+ * DEVREADY does not occur with gSPI.
+ */
+ if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+ bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+ if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+ DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+ bus->sdpcm_ver, SDPCM_PROT_VERSION));
+ else
+ DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+ }
+
+ /*
+ * Flow Control has been moved into the RX headers and this out of band
+ * method isn't used any more. Leae this here for possibly remaining backward
+ * compatible with older dongles
+ */
+ if (hmb_data & HMB_DATA_FC) {
+ fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+ if (fcbits & ~bus->flowcontrol)
+ bus->fc_xoff++;
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Shouldn't be any others */
+ if (hmb_data & ~(HMB_DATA_DEVREADY |
+ HMB_DATA_NAKHANDLED |
+ HMB_DATA_FC |
+ HMB_DATA_FWREADY |
+ HMB_DATA_FCDATA_MASK |
+ HMB_DATA_VERSION_MASK)) {
+ DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+ }
+
+ return intstatus;
+}
+
+bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus, newstatus = 0;
+ uint retries = 0;
+ uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+ uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+ uint framecnt = 0; /* Temporary counter of tx/rx frames */
+ bool rxdone = TRUE; /* Flag for no more read data */
+ bool resched = FALSE; /* Flag indicating resched wanted */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Start with leftover status bits */
+ intstatus = bus->intstatus;
+
+ dhd_os_sdlock(bus->dhd);
+
+ /* If waiting for HTAVAIL, check status */
+ if (bus->clkstate == CLK_PENDING) {
+ int err;
+ uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+ /* Check for inconsistent device control */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ } else {
+ ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+ }
+#endif /* DHD_DEBUG */
+
+ /* Read CSR, if clock on switch to AVAIL, else ignore */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+
+ DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+ if (SBSDIO_HTAV(clkctl)) {
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+ __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ if (err) {
+ DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+ __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ bus->clkstate = CLK_AVAIL;
+ } else {
+ goto clkwait;
+ }
+ }
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+ if (bus->clkstate == CLK_PENDING)
+ goto clkwait;
+
+ /* Pending interrupt indicates new device status */
+ if (bus->ipend) {
+ bus->ipend = FALSE;
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ newstatus = 0;
+ newstatus &= bus->hostintmask;
+ bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+ if (newstatus) {
+ W_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata++;
+ }
+ }
+
+ /* Merge new bits with previous */
+ intstatus |= newstatus;
+ bus->intstatus = 0;
+
+ /* Handle flow-control change: read new state in case our ack
+ * crossed another change interrupt. If change still set, assume
+ * FC ON for safety, let next loop through do the debounce.
+ */
+ if (intstatus & I_HMB_FC_CHANGE) {
+ intstatus &= ~I_HMB_FC_CHANGE;
+ W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata += 2;
+ bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+ intstatus |= (newstatus & bus->hostintmask);
+ }
+
+ /* Handle host mailbox indication */
+ if (intstatus & I_HMB_HOST_INT) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_timeout(&bus->dhd->wow_wakelock, 3*HZ);
+#endif
+ intstatus &= ~I_HMB_HOST_INT;
+ intstatus |= dhdsdio_hostmail(bus);
+ }
+
+ /* Generally don't ask for these, can get CRC errors... */
+ if (intstatus & I_WR_OOSYNC) {
+ DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+ intstatus &= ~I_WR_OOSYNC;
+ }
+
+ if (intstatus & I_RD_OOSYNC) {
+ DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+ intstatus &= ~I_RD_OOSYNC;
+ }
+
+ if (intstatus & I_SBINT) {
+ DHD_ERROR(("Dongle reports SBINT\n"));
+ intstatus &= ~I_SBINT;
+ }
+
+ /* Would be active due to wake-wlan in gSPI */
+ if (intstatus & I_CHIPACTIVE) {
+ DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Ignore frame indications if rxskip is set */
+ if (bus->rxskip)
+ intstatus &= ~I_HMB_FRAME_IND;
+
+ /* On frame indication, read available frames */
+ if (PKT_AVAILABLE()) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_timeout(&bus->dhd->wow_wakelock, 3*HZ);
+#endif
+ framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+ if (rxdone || bus->rxskip)
+ intstatus &= ~I_HMB_FRAME_IND;
+ rxlimit -= MIN(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+ bus->intstatus = intstatus;
+
+clkwait:
+ /* Re-enable interrupts to detect new device events (mailbox, rx frame)
+ * or clock availability. (Allows tx loop to check ipend if desired.)
+ * (Unless register access seems hosed, as we may not be able to ACK...)
+ */
+ if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+ DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+ __FUNCTION__, rxdone, framecnt));
+ bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ bcmsdh_intr_enable(sdh);
+ }
+
+ if (DATAOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+ int ret, i;
+
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+ NULL, NULL, NULL);
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+
+ printf("Return_dpc value is : %d\n", ret);
+ bus->ctrl_frame_stat = FALSE;
+ dhd_wait_event_wakeup(bus->dhd);
+ }
+ /* Send queued frames (limit 1 if rx may still be pending) */
+ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+ framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+ framecnt = dhdsdio_sendfromq(bus, framecnt);
+ txlimit -= framecnt;
+ }
+
+ /* Resched if events or tx frames are pending, else await next interrupt */
+ /* On failed register access, all bets are off: no resched or interrupts */
+ if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+ DHD_ERROR(("%s: failed backplane access over SDIO, halting operation %d \n",
+ __FUNCTION__, bcmsdh_regfail(sdh)));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->intstatus = 0;
+ } else if (bus->clkstate == CLK_PENDING) {
+ DHD_INFO(("%s: rescheduled due to CLK_PENDING awaiting \
+ I_CHIPACTIVE interrupt", __FUNCTION__));
+ resched = TRUE;
+ } else if (bus->intstatus || bus->ipend ||
+ (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+ PKT_AVAILABLE()) { /* Read multiple frames */
+ resched = TRUE;
+ }
+
+
+ bus->dpc_sched = resched;
+
+ /* If we're done for now, turn off clock request. */
+ if ((bus->clkstate != CLK_PENDING) && bus->idletime == DHD_IDLE_IMMEDIATE) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+ bool resched;
+
+ /* Call the DPC directly. */
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ resched = dhdsdio_dpc(bus);
+
+ return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+ dhd_bus_t *bus = (dhd_bus_t*)arg;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!bus) {
+ DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+ return;
+ }
+ sdh = bus->sdh;
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return;
+ }
+ /* Count the interrupt call */
+ bus->intrcount++;
+ bus->ipend = TRUE;
+
+ /* Shouldn't get this interrupt if we're sleeping? */
+ if (bus->sleeping) {
+ DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+ return;
+ }
+
+ /* Disable additional interrupts (is this needed now)? */
+ if (bus->intr) {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+ }
+
+ bcmsdh_intr_disable(sdh);
+ bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ dhd_os_wake_lock(bus->dhd);
+ while (dhdsdio_dpc(bus));
+ dhd_os_wake_unlock(bus->dhd);
+#else
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+#endif
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+ /* Default to specified length, or full range */
+ if (dhd_pktgen_len) {
+ bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+ bus->pktgen_minlen = bus->pktgen_maxlen;
+ } else {
+ bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+ bus->pktgen_minlen = 0;
+ }
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Default to per-watchdog burst with 10s print time */
+ bus->pktgen_freq = 1;
+ bus->pktgen_print = 10000 / dhd_watchdog_ms;
+ bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+ /* Default to echo mode */
+ bus->pktgen_mode = DHD_PKTGEN_ECHO;
+ bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+ void *pkt;
+ uint8 *data;
+ uint pktcount;
+ uint fillbyte;
+ osl_t *osh = bus->dhd->osh;
+ uint16 len;
+
+ /* Display current count if appropriate */
+ if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+ bus->pktgen_ptick = 0;
+ printf("%s: send attempts %d rcvd %d\n",
+ __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd);
+ }
+
+ /* For recv mode, just make sure dongle has started sending */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (!bus->pktgen_rcvd)
+ dhdsdio_sdtest_set(bus, TRUE);
+ return;
+ }
+
+ /* Otherwise, generate or request the specified number of packets */
+ for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+ /* Stop if total has been reached */
+ if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ break;
+ }
+
+ /* Allocate an appropriate-sized packet */
+ len = bus->pktgen_len;
+ if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+ TRUE))) {;
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ break;
+ }
+ PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Write test header cmd and extra based on mode */
+ switch (bus->pktgen_mode) {
+ case DHD_PKTGEN_ECHO:
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_SEND:
+ *data++ = SDPCM_TEST_DISCARD;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_RXBURST:
+ *data++ = SDPCM_TEST_BURST;
+ *data++ = (uint8)bus->pktgen_count;
+ break;
+
+ default:
+ DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+ PKTFREE(osh, pkt, TRUE);
+ bus->pktgen_count = 0;
+ return;
+ }
+
+ /* Write test header length field */
+ *data++ = (len >> 0);
+ *data++ = (len >> 8);
+
+ /* Then fill in the remainder -- N/A for burst, but who cares... */
+ for (fillbyte = 0; fillbyte < len; fillbyte++)
+ *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+ prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) {
+ bus->pktgen_fail++;
+ if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+ bus->pktgen_count = 0;
+ }
+ bus->pktgen_sent++;
+
+ /* Bump length if not fixed, wrap at max */
+ if (++bus->pktgen_len > bus->pktgen_maxlen)
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Special case for burst mode: just send one request! */
+ if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+ break;
+ }
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, bool start)
+{
+ void *pkt;
+ uint8 *data;
+ osl_t *osh = bus->dhd->osh;
+
+ /* Allocate the packet */
+ if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN, TRUE))) {
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ return;
+ }
+ PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Fill in the test header */
+ *data++ = SDPCM_TEST_SEND;
+ *data++ = start;
+ *data++ = (bus->pktgen_maxlen >> 0);
+ *data++ = (bus->pktgen_maxlen >> 8);
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE))
+ bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+ osl_t *osh = bus->dhd->osh;
+ uint8 *data;
+ uint pktlen;
+
+ uint8 cmd;
+ uint8 extra;
+ uint16 len;
+ uint16 offset;
+
+ /* Check for min length */
+ if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+
+ /* Extract header fields */
+ data = PKTDATA(osh, pkt);
+ cmd = *data++;
+ extra = *data++;
+ len = *data++; len += *data++ << 8;
+
+ /* Check length for relevant commands */
+ if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+ if (pktlen != len + SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+ }
+
+ /* Process as per command */
+ switch (cmd) {
+ case SDPCM_TEST_ECHOREQ:
+ /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+ *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE) == 0) {
+ bus->pktgen_sent++;
+ } else {
+ bus->pktgen_fail++;
+ PKTFREE(osh, pkt, FALSE);
+ }
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_ECHORSP:
+ if (bus->ext_loop) {
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+ }
+
+ for (offset = 0; offset < len; offset++, data++) {
+ if (*data != SDPCM_TEST_FILL(offset, extra)) {
+ DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+ "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+ offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+ break;
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_DISCARD:
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_BURST:
+ case SDPCM_TEST_SEND:
+ default:
+ DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ break;
+ }
+
+ /* For recv mode, stop at limie (and tell dongle to stop sending) */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_total && (bus->pktgen_rcvd >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ dhdsdio_sdtest_set(bus, FALSE);
+ }
+ }
+}
+#endif /* SDTEST */
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus;
+
+ DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+ bus = dhdp->bus;
+
+ if (bus->dhd->dongle_reset)
+ return FALSE;
+
+ /* Ignore the timer if simulating bus down */
+ if (bus->sleeping)
+ return FALSE;
+
+ /* Poll period: check device if appropriate. */
+ if (bus->poll && (++bus->polltick >= bus->pollrate)) {
+ uint32 intstatus = 0;
+
+ /* Reset poll tick */
+ bus->polltick = 0;
+
+ /* Check device if no interrupts */
+ if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+ if (!bus->dpc_sched) {
+ uint8 devpend;
+ devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+ SDIOD_CCCR_INTPEND, NULL);
+ intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+ }
+
+ /* If there is something, make like the ISR and schedule the DPC */
+ if (intstatus) {
+ bus->pollcnt++;
+ bus->ipend = TRUE;
+ if (bus->intr) {
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+
+ }
+ }
+
+ /* Update interrupt tracking */
+ bus->lastintrs = bus->intrcount;
+ }
+
+#ifdef DHD_DEBUG
+ /* Poll for console output periodically */
+ if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+ bus->console.count += dhd_watchdog_ms;
+ if (bus->console.count >= dhd_console_ms) {
+ bus->console.count -= dhd_console_ms;
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (dhdsdio_readconsole(bus) < 0)
+ dhd_console_ms = 0; /* On error, stop trying */
+ }
+ }
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+ /* Generate packets if configured */
+ if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ bus->pktgen_tick = 0;
+ dhdsdio_pktgen(bus);
+ }
+#endif
+
+ /* On idle timeout clear activity flag and/or turn off clock */
+ if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+ if (++bus->idlecount >= bus->idletime) {
+ bus->idlecount = 0;
+ if (bus->activity) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ }
+ }
+
+ return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ uint32 addr, val;
+ int rv;
+ void *pkt;
+
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0)
+ return BCME_UNSUPPORTED;
+
+ /* Exclusive bus access */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Don't allow input if dongle is in reset */
+ if (bus->dhd->dongle_reset) {
+ dhd_os_sdunlock(bus->dhd);
+ return BCME_NOTREADY;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ BUS_WAKE(bus);
+ /* No pend allowed since txpkt is called later, ht clk has to be on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Zero cbuf_index */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
+ val = htol32(0);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Write message into cbuf */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+ goto done;
+
+ /* Write length into vcons_in */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
+ val = htol32(msglen);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Bump dongle by sending an empty event pkt.
+ * sdpcm_sendup (RX) checks for virtual console input.
+ */
+ if (((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL) &&
+ bus->clkstate == CLK_AVAIL)
+ dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE);
+
+done:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+ uint byte, tag, tdata;
+ DHD_INFO(("Function %d CIS:\n", fn));
+
+ for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+ if ((byte % 16) == 0)
+ DHD_INFO((" "));
+ DHD_INFO(("%02x ", cis[byte]));
+ if ((byte % 16) == 15)
+ DHD_INFO(("\n"));
+ if (!tdata--) {
+ tag = cis[byte];
+ if (tag == 0xff)
+ break;
+ else if (!tag)
+ tdata = 0;
+ else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+ tdata = cis[byte + 1] + 1;
+ else
+ DHD_INFO(("]"));
+ }
+ }
+ if ((byte % 16) != 15)
+ DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+ if (chipid == BCM4325_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4329_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4315_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4319_CHIP_ID)
+ return TRUE;
+ return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+ uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+ int ret;
+ dhd_bus_t *bus;
+
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver. Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behavior since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent initializations.
+ */
+ dhd_txbound = DHD_TXBOUND;
+ dhd_rxbound = DHD_RXBOUND;
+ dhd_alignctl = TRUE;
+ sd1idle = TRUE;
+ dhd_readahead = TRUE;
+ retrydata = FALSE;
+ dhd_doflow = TRUE;
+ dhd_dongle_memsize = 0;
+ dhd_txminmax = DHD_TXMINMAX;
+
+ forcealign = TRUE;
+
+
+ dhd_common_init();
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+ /* We make assumptions about address window mappings */
+ ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+ /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+ * means early parse could fail, so here we should get either an ID
+ * we recognize OR (-1) indicating we must request power first.
+ */
+ /* Check the Vendor ID */
+ switch (venid) {
+ case 0x0000:
+ case VENDOR_BROADCOM:
+ break;
+ default:
+ DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+ __FUNCTION__, venid));
+ return NULL;
+ }
+
+ /* Check the Device ID and make sure it's one that we support */
+ switch (devid) {
+ case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */
+ case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */
+ case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */
+ DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4329_D11NDUAL_ID: /* 4329 802.11n dualband device */
+ case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */
+ case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */
+ case 0x4329:
+ DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */
+ case BCM4315_D11G_ID: /* 4315 802.11g id */
+ case BCM4315_D11A_ID: /* 4315 802.11a id */
+ DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4319_D11N_ID: /* 4319 802.11n id */
+ case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */
+ case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */
+ DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+ break;
+ case 0:
+ DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+ __FUNCTION__));
+ break;
+
+ default:
+ DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+ __FUNCTION__, venid, devid));
+ return NULL;
+ }
+
+ if (osh == NULL) {
+ /* Ask the OS interface part for an OSL handle */
+ if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) {
+ DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
+ return NULL;
+ }
+ }
+
+ /* Allocate private bus interface state */
+ if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ goto fail;
+ }
+ bzero(bus, sizeof(dhd_bus_t));
+ bus->sdh = sdh;
+ bus->cl_devid = (uint16)devid;
+ bus->bus = DHD_BUS;
+ bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+ /* attempt to attach to the dongle */
+ if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+ DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Attach to the dhd/OS/network interface */
+ if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+ DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Allocate buffers */
+ if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+ bcmsdh_intr_disable(sdh);
+ if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+ DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+ DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+
+ DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+
+ /* if firmware path present try to download and bring up bus */
+ if ((ret = dhd_bus_start(bus->dhd)) != 0) {
+#if 1
+ DHD_ERROR(("%s: failed\n", __FUNCTION__));
+ goto fail;
+#else
+ if (ret == BCME_NOTUP) {
+ DHD_ERROR(("%s: dongle is not responding\n", __FUNCTION__));
+ goto fail;
+ }
+#endif
+ }
+ /* Ok, have the per-port tell the stack we're open for business */
+ if (dhd_net_attach(bus->dhd, 0) != 0) {
+ DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ return bus;
+
+fail:
+ dhdsdio_release(bus, osh);
+ return NULL;
+}
+
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+ uint16 devid)
+{
+ uint8 clkctl = 0;
+ int err = 0;
+
+ bus->alp_only = TRUE;
+
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+ DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+ }
+
+#ifdef DHD_DEBUG
+ printf("F1 signature read @0x18000000=0x%4x\n",
+ bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4));
+
+
+#endif /* DHD_DEBUG */
+
+
+ /* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+ if (!err)
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+ if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+ DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+ err, DHD_INIT_CLKCTL1, clkctl));
+ goto fail;
+ }
+
+
+#ifdef DHD_DEBUG
+ if (DHD_INFO_ON()) {
+ uint fn, numfn;
+ uint8 *cis[SDIOD_MAX_IOFUNCS];
+ int err = 0;
+
+ numfn = bcmsdh_query_iofnum(sdh);
+ ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+ /* Make sure ALP is available before trying to read CIS */
+ SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+ /* Now request ALP be put on the bus */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ DHD_INIT_CLKCTL2, &err);
+ OSL_DELAY(65);
+
+ for (fn = 0; fn <= numfn; fn++) {
+ if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+ break;
+ }
+ bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+ if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+ dhd_dump_cis(fn, cis[fn]);
+ }
+
+ while (fn-- > 0) {
+ ASSERT(cis[fn]);
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ }
+
+ if (err) {
+ DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+ goto fail;
+ }
+ }
+#endif /* DHD_DEBUG */
+
+ /* si_attach() will provide an SI handle and scan the backplane */
+ if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+ &bus->vars, &bus->varsz))) {
+ DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+ if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+ DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+ __FUNCTION__, bus->sih->chip));
+ goto fail;
+ }
+
+ si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+ /* Get info on the ARM and SOCRAM cores... */
+ if (!DHD_NOPMU(bus)) {
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+ if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ bus->ramsize = bus->orig_ramsize;
+ if (dhd_dongle_memsize)
+ dhd_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+ DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
+ bus->ramsize, bus->orig_ramsize));
+ }
+
+ /* ...but normally deal with the SDPCMDEV core */
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+ !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+ DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+ goto fail;
+ }
+ bus->sdpcmrev = si_corerev(bus->sih);
+
+ /* Set core control so an SDIO reset does a backplane reset */
+ OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+
+ pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+ /* Locate an appropriately-aligned portion of hdrbuf */
+ bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = (bool)dhd_intr;
+ if ((bus->poll = (bool)dhd_poll))
+ bus->pollrate = 1;
+
+ return TRUE;
+
+fail:
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifndef DHD_USE_STATIC_BUF
+ if (bus->dhd->maxctl) {
+ bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+ if (!(bus->rxbuf = MALLOC(osh, bus->rxblen))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+ __FUNCTION__, bus->rxblen));
+ goto fail;
+ }
+ }
+
+ /* Allocate buffer to receive glomed packet */
+ if (!(bus->databuf = MALLOC(osh, MAX_DATA_BUF))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+ __FUNCTION__, MAX_DATA_BUF));
+ /* release rxbuf which was already located as above */
+ if (!bus->rxblen) MFREE(osh, bus->rxbuf, bus->rxblen);
+ goto fail;
+ }
+#else
+ if (bus->dhd->maxctl) {
+ bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+ if (!(bus->rxbuf = dhd_os_prealloc(DHD_PREALLOC_RXBUF, bus->rxblen))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+ __FUNCTION__, bus->rxblen));
+ goto fail;
+ }
+ }
+ /* Allocate buffer to receive glomed packet */
+ if (!(bus->databuf = dhd_os_prealloc(DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+ __FUNCTION__, MAX_DATA_BUF));
+ goto fail;
+ }
+#endif /* DHD_USE_STATIC_BUF */
+
+ /* Align the buffer */
+ if ((uintptr)bus->databuf % DHD_SDALIGN)
+ bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+ else
+ bus->dataptr = bus->databuf;
+
+ return TRUE;
+
+fail:
+ return FALSE;
+}
+
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ int32 fnum;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef SDTEST
+ dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->sleeping = FALSE;
+ bus->rxflow = FALSE;
+ bus->prev_rxlim_hit = 0;
+
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = (int32)dhd_idletime;
+ bus->idleclock = DHD_IDLE_ACTIVE;
+
+ /* Query the SD clock speed */
+ if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+ bus->sd_divisor = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_divisor", bus->sd_divisor));
+ }
+
+ /* Query the SD bus mode */
+ if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+ bus->sd_mode = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_mode", bus->sd_mode));
+ }
+
+ /* Query the F2 block size, set roundup accordingly */
+ fnum = 2;
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ /* Query if bus module supports packet chaining, default to use if supported */
+ if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+ &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_rxchain = FALSE;
+ } else {
+ DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+ __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+ }
+ bus->use_rxchain = (bool)bus->sd_rxchain;
+
+ return TRUE;
+}
+
+bool
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *fw_path, char *nv_path)
+{
+ bool ret;
+ bus->fw_path = fw_path;
+ bus->nv_path = nv_path;
+
+ ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+ return ret;
+}
+
+static bool
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+ bool ret;
+
+ /* Download the firmware */
+ dhd_os_wake_lock(bus->dhd);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ ret = _dhdsdio_download_firmware(bus) == 0;
+
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ dhd_os_wake_unlock(bus->dhd);
+ return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(osh);
+
+
+ /* De-register interrupt handler */
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_intr_dereg(bus->sdh);
+
+ if (bus->dhd) {
+
+ dhdsdio_release_dongle(bus, osh, TRUE);
+
+ dhd_detach(bus->dhd);
+ bus->dhd = NULL;
+ }
+
+ dhdsdio_release_malloc(bus, osh);
+
+
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+ }
+
+ if (osh)
+ dhd_osl_detach(osh);
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd && bus->dhd->dongle_reset)
+ return;
+
+ if (bus->rxbuf) {
+#ifndef DHD_USE_STATIC_BUF
+ MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+ bus->rxctl = bus->rxbuf = NULL;
+ bus->rxlen = 0;
+ }
+
+ if (bus->databuf) {
+#ifndef DHD_USE_STATIC_BUF
+ MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+ bus->databuf = NULL;
+ }
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, int reset_flag)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+ return;
+
+ if (bus->sih) {
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+#if !defined(BCMLXSDMMC)
+ si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ si_detach(bus->sih);
+ if (bus->vars && bus->varsz)
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(bus->dhd);
+ dhdsdio_release(bus, bus->dhd->osh);
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+ dhdsdio_probe,
+ dhdsdio_disconnect
+};
+
+int
+dhd_bus_register(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_unregister();
+}
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+
+ DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+ /* Download image */
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset, dlarray + offset, MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ dlarray + offset, sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ unsigned char *ularray;
+
+ ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+ /* Upload image to verify downloaded contents. */
+ offset = 0;
+ memset(ularray, 0xaa, bus->ramsize);
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+ ularray + offset, sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+ if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+ DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
+ ASSERT(0);
+ goto err;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
+
+ MFREE(bus->dhd->osh, ularray, bus->ramsize);
+ }
+#endif /* DHD_DEBUG */
+
+err:
+ return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *fw_path)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ uint len;
+ void *image = NULL;
+ uint8 *memblock = NULL, *memptr;
+
+ DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, fw_path));
+
+ image = dhd_os_open_image(fw_path);
+ if (image == NULL)
+ goto err;
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+ /* Download image */
+ while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs.
+*/
+
+static uint
+process_nvram_vars(char *varbuf, uint len)
+{
+ char *dp;
+ bool findNewline;
+ int column;
+ uint buf_len, n;
+
+ dp = varbuf;
+
+ findNewline = FALSE;
+ column = 0;
+
+ for (n = 0; n < len; n++) {
+ if (varbuf[n] == 0)
+ break;
+ if (varbuf[n] == '\r')
+ continue;
+ if (findNewline && varbuf[n] != '\n')
+ continue;
+ findNewline = FALSE;
+ if (varbuf[n] == '#') {
+ findNewline = TRUE;
+ continue;
+ }
+ if (varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ *dp++ = 0;
+ column = 0;
+ continue;
+ }
+ *dp++ = varbuf[n];
+ column++;
+ }
+ buf_len = dp - varbuf;
+
+ while (dp < varbuf + n)
+ *dp++ = 0;
+
+ return buf_len;
+}
+
+/*
+ EXAMPLE: nvram_array
+ nvram_arry format:
+ name=value
+ Use carriage return at the end of each assignment, and an empty string with
+ carriage return at the end of array.
+
+ For example:
+ unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+ Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+ Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+ bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ uint len;
+ void * image = NULL;
+ char * memblock = NULL;
+ char *bufp;
+ char *nv_path;
+ bool nvram_file_exists;
+
+ nv_path = bus->nv_path;
+
+ nvram_file_exists = ((nv_path != NULL) && (nv_path[0] != '\0'));
+ if (!nvram_file_exists && (bus->nvram_params == NULL))
+ return (0);
+
+ if (nvram_file_exists) {
+ image = dhd_os_open_image(nv_path);
+ if (image == NULL)
+ goto err;
+ }
+
+ memblock = MALLOC(bus->dhd->osh, MEMBLOCK);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+
+ /* Download variables */
+ if (nvram_file_exists) {
+ len = dhd_os_get_image_block(memblock, MEMBLOCK, image);
+ }
+ else {
+ len = strlen(bus->nvram_params);
+ ASSERT(len <= MEMBLOCK);
+ if (len > MEMBLOCK)
+ len = MEMBLOCK;
+ memcpy(memblock, bus->nvram_params, len);
+ }
+
+ if (len > 0 && len < MEMBLOCK) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+ len = process_nvram_vars(bufp, len);
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error downloading vars: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+ else {
+ DHD_ERROR(("%s: error reading nvram file: %d\n",
+ __FUNCTION__, len));
+ bcmerror = BCME_SDIO_ERROR;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+
+ bool embed = FALSE; /* download embedded firmware */
+ bool dlok = FALSE; /* download firmware succeeded */
+
+ /* Out immediately if no image to download */
+ if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ return bcmerror;
+#endif
+ }
+
+ /* Keep arm in reset */
+ if (dhdsdio_download_state(bus, TRUE)) {
+ DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+ if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+ DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ goto err;
+#endif
+ }
+ else {
+ embed = FALSE;
+ dlok = TRUE;
+ }
+ }
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdsdio_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ }
+ else {
+ dlok = TRUE;
+ }
+ }
+#endif
+ if (!dlok) {
+ DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* EXAMPLE: nvram_array */
+ /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+ /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+ /* External nvram takes precedence if specified */
+ if (dhdsdio_download_nvram(bus)) {
+ DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+ }
+
+ /* Take arm out of reset */
+ if (dhdsdio_download_state(bus, FALSE)) {
+ DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+ return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ int status;
+
+ /* 4329: GSPI check */
+ status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+ return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle));
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chip;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+ return bus->dhd;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+ return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+ return SDPCM_HDRLEN;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ int bcmerror = 0;
+ dhd_bus_t *bus;
+
+ bus = dhdp->bus;
+
+ if (flag == TRUE) {
+ if (!bus->dhd->dongle_reset) {
+ dhd_os_sdlock(dhdp);
+ /* Turning off watchdog */
+ dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Force flow control as protection when stop come before ifconfig_down */
+ dhd_txflowcontrol(bus->dhd, 0, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+
+#if !defined(OOB_INTR_ONLY)
+ /* to avoid supurious client interrupt during stop process */
+ bcmsdh_stop(bus->sdh);
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ /* Expect app to have torn down any connection before calling */
+ /* Stop the bus, disable F2 */
+ dhd_bus_stop(bus, FALSE);
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_set_irq(FALSE);
+#endif /* defined(OOB_INTR_ONLY) */
+ /* Clean tx/rx buffer pointers, detach from the dongle */
+ dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE);
+
+ bus->dhd->dongle_reset = TRUE;
+ bus->dhd->up = FALSE;
+ dhd_os_sdunlock(dhdp);
+
+ DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__));
+ /* App can now remove power from device */
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else {
+ /* App must have restored power to device before calling */
+
+ DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset) {
+ /* Turn on WLAN */
+ dhd_os_sdlock(dhdp);
+
+ /* Reset SD client */
+ bcmsdh_reset(bus->sdh);
+
+ /* Attempt to re-attach & download */
+ if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+ (uint32 *)SI_ENUM_BASE,
+ bus->cl_devid)) {
+ /* Attempt to download binary to the dongle */
+ if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+ dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) {
+
+ /* Re-init bus, enable F2 transfer */
+ bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+ if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_set_irq(TRUE);
+ dhd_enable_oob_intr(bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+ bus->dhd->dongle_reset = FALSE;
+ bus->dhd->up = TRUE;
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Restore flow control */
+ dhd_txflowcontrol(bus->dhd, 0, OFF);
+#endif
+ /* Turning on watchdog back */
+ dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+ DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+ } else {
+ dhd_bus_stop(bus, FALSE);
+ dhdsdio_release_dongle(bus, bus->dhd->osh, FALSE);
+ }
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+
+ dhd_os_sdunlock(dhdp);
+ } else {
+ bcmerror = BCME_NOTDOWN;
+ DHD_ERROR(("%s: Set DEVRESET=FALSE invoked when device is on\n",
+ __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ }
+ }
+ return bcmerror;
+}
diff --git a/drivers/net/wireless/bcm4329/dngl_stats.h b/drivers/net/wireless/bcm4329/dngl_stats.h
new file mode 100644
index 000000000000..e5db54e7edfe
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dngl_stats.h
@@ -0,0 +1,43 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_stats.h,v 1.2.140.3 2008/05/26 16:52:08 Exp $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+} dngl_stats_t;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcm4329/hndpmu.c b/drivers/net/wireless/bcm4329/hndpmu.c
new file mode 100644
index 000000000000..307347a43bde
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/hndpmu.c
@@ -0,0 +1,131 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.c,v 1.95.2.17.4.11.2.63 2010/07/21 13:55:09 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+/* debug/trace */
+#define PMU_ERROR(args)
+
+#define PMU_MSG(args)
+
+
+/* SDIO Pad drive strength to select value mappings */
+typedef struct {
+ uint8 strength; /* Pad Drive Strength in mA */
+ uint8 sel; /* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+ {4, 0x2},
+ {2, 0x3},
+ {1, 0x0},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+ {12, 0x7},
+ {10, 0x6},
+ {8, 0x5},
+ {6, 0x4},
+ {4, 0x2},
+ {2, 0x1},
+ {0, 0x0} };
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+ chipcregs_t *cc;
+ uint origidx, intr_val = 0;
+ sdiod_drive_str_t *str_tab = NULL;
+ uint32 str_mask = 0;
+ uint32 str_shift = 0;
+
+ if (!(sih->cccaps & CC_CAP_PMU)) {
+ return;
+ }
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+
+ switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+ str_mask = 0x30000000;
+ str_shift = 28;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+ case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+
+ default:
+ PMU_MSG(("No SDIO Drive strength init done for chip %x rev %d pmurev %d\n",
+ sih->chip, sih->chiprev, sih->pmurev));
+
+ break;
+ }
+
+ if (str_tab != NULL) {
+ uint32 drivestrength_sel = 0;
+ uint32 cc_data_temp;
+ int i;
+
+ for (i = 0; str_tab[i].strength != 0; i ++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+
+ W_REG(osh, &cc->chipcontrol_addr, 1);
+ cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+
+ PMU_MSG(("SDIO: %dmA drive strength selected, set to 0x%08x\n",
+ drivestrength, cc_data_temp));
+ }
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, intr_val);
+}
diff --git a/drivers/net/wireless/bcm4329/include/Makefile b/drivers/net/wireless/bcm4329/include/Makefile
new file mode 100644
index 000000000000..439ead14a0e6
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/Makefile
@@ -0,0 +1,21 @@
+#
+# include/Makefile
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile,v 13.5 2005/02/17 19:11:31 Exp $
+#
+
+SRCBASE = ..
+
+TARGETS = epivers.h
+
+
+all release:
+ bash epivers.sh
+
+clean:
+ rm -rf ${TARGETS} *.prev
+
+
+.PHONY: all release clean
diff --git a/drivers/net/wireless/bcm4329/include/aidmp.h b/drivers/net/wireless/bcm4329/include/aidmp.h
new file mode 100644
index 000000000000..a927e5dae586
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/aidmp.h
@@ -0,0 +1,368 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aidmp.h,v 13.2.10.1 2008/05/07 20:32:12 Exp $
+ */
+
+
+#ifndef _AIDMP_H
+#define _AIDMP_H
+
+
+#define MFGID_ARM 0x43b
+#define MFGID_BRCM 0x4bf
+#define MFGID_MIPS 0x4a7
+
+
+#define CC_SIM 0
+#define CC_EROM 1
+#define CC_CORESIGHT 9
+#define CC_VERIF 0xb
+#define CC_OPTIMO 0xd
+#define CC_GEN 0xe
+#define CC_PRIMECELL 0xf
+
+
+#define ER_EROMENTRY 0x000
+#define ER_REMAPCONTROL 0xe00
+#define ER_REMAPSELECT 0xe04
+#define ER_MASTERSELECT 0xe10
+#define ER_ITCR 0xf00
+#define ER_ITIP 0xf04
+
+
+#define ER_TAG 0xe
+#define ER_TAG1 0x6
+#define ER_VALID 1
+#define ER_CI 0
+#define ER_MP 2
+#define ER_ADD 4
+#define ER_END 0xe
+#define ER_BAD 0xffffffff
+
+
+#define CIA_MFG_MASK 0xfff00000
+#define CIA_MFG_SHIFT 20
+#define CIA_CID_MASK 0x000fff00
+#define CIA_CID_SHIFT 8
+#define CIA_CCL_MASK 0x000000f0
+#define CIA_CCL_SHIFT 4
+
+
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+#define CIB_NSW_MASK 0x00f80000
+#define CIB_NSW_SHIFT 19
+#define CIB_NMW_MASK 0x0007c000
+#define CIB_NMW_SHIFT 14
+#define CIB_NSP_MASK 0x00003e00
+#define CIB_NSP_SHIFT 9
+#define CIB_NMP_MASK 0x000001f0
+#define CIB_NMP_SHIFT 4
+
+
+#define MPD_MUI_MASK 0x0000ff00
+#define MPD_MUI_SHIFT 8
+#define MPD_MP_MASK 0x000000f0
+#define MPD_MP_SHIFT 4
+
+
+#define AD_ADDR_MASK 0xfffff000
+#define AD_SP_MASK 0x00000f00
+#define AD_SP_SHIFT 8
+#define AD_ST_MASK 0x000000c0
+#define AD_ST_SHIFT 6
+#define AD_ST_SLAVE 0x00000000
+#define AD_ST_BRIDGE 0x00000040
+#define AD_ST_SWRAP 0x00000080
+#define AD_ST_MWRAP 0x000000c0
+#define AD_SZ_MASK 0x00000030
+#define AD_SZ_SHIFT 4
+#define AD_SZ_4K 0x00000000
+#define AD_SZ_8K 0x00000010
+#define AD_SZ_16K 0x00000020
+#define AD_SZ_SZD 0x00000030
+#define AD_AG32 0x00000008
+#define AD_ADDR_ALIGN 0x00000fff
+#define AD_SZ_BASE 0x00001000
+
+
+#define SD_SZ_MASK 0xfffff000
+#define SD_SG32 0x00000008
+#define SD_SZ_ALIGN 0x00000fff
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _aidmp {
+ uint32 oobselina30;
+ uint32 oobselina74;
+ uint32 PAD[6];
+ uint32 oobselinb30;
+ uint32 oobselinb74;
+ uint32 PAD[6];
+ uint32 oobselinc30;
+ uint32 oobselinc74;
+ uint32 PAD[6];
+ uint32 oobselind30;
+ uint32 oobselind74;
+ uint32 PAD[38];
+ uint32 oobselouta30;
+ uint32 oobselouta74;
+ uint32 PAD[6];
+ uint32 oobseloutb30;
+ uint32 oobseloutb74;
+ uint32 PAD[6];
+ uint32 oobseloutc30;
+ uint32 oobseloutc74;
+ uint32 PAD[6];
+ uint32 oobseloutd30;
+ uint32 oobseloutd74;
+ uint32 PAD[38];
+ uint32 oobsynca;
+ uint32 oobseloutaen;
+ uint32 PAD[6];
+ uint32 oobsyncb;
+ uint32 oobseloutben;
+ uint32 PAD[6];
+ uint32 oobsyncc;
+ uint32 oobseloutcen;
+ uint32 PAD[6];
+ uint32 oobsyncd;
+ uint32 oobseloutden;
+ uint32 PAD[38];
+ uint32 oobaextwidth;
+ uint32 oobainwidth;
+ uint32 oobaoutwidth;
+ uint32 PAD[5];
+ uint32 oobbextwidth;
+ uint32 oobbinwidth;
+ uint32 oobboutwidth;
+ uint32 PAD[5];
+ uint32 oobcextwidth;
+ uint32 oobcinwidth;
+ uint32 oobcoutwidth;
+ uint32 PAD[5];
+ uint32 oobdextwidth;
+ uint32 oobdinwidth;
+ uint32 oobdoutwidth;
+ uint32 PAD[37];
+ uint32 ioctrlset;
+ uint32 ioctrlclear;
+ uint32 ioctrl;
+ uint32 PAD[61];
+ uint32 iostatus;
+ uint32 PAD[127];
+ uint32 ioctrlwidth;
+ uint32 iostatuswidth;
+ uint32 PAD[62];
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 resetreadid;
+ uint32 resetwriteid;
+ uint32 PAD[60];
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+ uint32 PAD[56];
+ uint32 intstatus;
+ uint32 PAD[127];
+ uint32 config;
+ uint32 PAD[63];
+ uint32 itcr;
+ uint32 PAD[3];
+ uint32 itipooba;
+ uint32 itipoobb;
+ uint32 itipoobc;
+ uint32 itipoobd;
+ uint32 PAD[4];
+ uint32 itipoobaout;
+ uint32 itipoobbout;
+ uint32 itipoobcout;
+ uint32 itipoobdout;
+ uint32 PAD[4];
+ uint32 itopooba;
+ uint32 itopoobb;
+ uint32 itopoobc;
+ uint32 itopoobd;
+ uint32 PAD[4];
+ uint32 itopoobain;
+ uint32 itopoobbin;
+ uint32 itopoobcin;
+ uint32 itopoobdin;
+ uint32 PAD[4];
+ uint32 itopreset;
+ uint32 PAD[15];
+ uint32 peripherialid4;
+ uint32 peripherialid5;
+ uint32 peripherialid6;
+ uint32 peripherialid7;
+ uint32 peripherialid0;
+ uint32 peripherialid1;
+ uint32 peripherialid2;
+ uint32 peripherialid3;
+ uint32 componentid0;
+ uint32 componentid1;
+ uint32 componentid2;
+ uint32 componentid3;
+} aidmp_t;
+
+#endif
+
+
+#define OOB_BUSCONFIG 0x020
+#define OOB_STATUSA 0x100
+#define OOB_STATUSB 0x104
+#define OOB_STATUSC 0x108
+#define OOB_STATUSD 0x10c
+#define OOB_ENABLEA0 0x200
+#define OOB_ENABLEA1 0x204
+#define OOB_ENABLEA2 0x208
+#define OOB_ENABLEA3 0x20c
+#define OOB_ENABLEB0 0x280
+#define OOB_ENABLEB1 0x284
+#define OOB_ENABLEB2 0x288
+#define OOB_ENABLEB3 0x28c
+#define OOB_ENABLEC0 0x300
+#define OOB_ENABLEC1 0x304
+#define OOB_ENABLEC2 0x308
+#define OOB_ENABLEC3 0x30c
+#define OOB_ENABLED0 0x380
+#define OOB_ENABLED1 0x384
+#define OOB_ENABLED2 0x388
+#define OOB_ENABLED3 0x38c
+#define OOB_ITCR 0xf00
+#define OOB_ITIPOOBA 0xf10
+#define OOB_ITIPOOBB 0xf14
+#define OOB_ITIPOOBC 0xf18
+#define OOB_ITIPOOBD 0xf1c
+#define OOB_ITOPOOBA 0xf30
+#define OOB_ITOPOOBB 0xf34
+#define OOB_ITOPOOBC 0xf38
+#define OOB_ITOPOOBD 0xf3c
+
+
+#define AI_OOBSELINA30 0x000
+#define AI_OOBSELINA74 0x004
+#define AI_OOBSELINB30 0x020
+#define AI_OOBSELINB74 0x024
+#define AI_OOBSELINC30 0x040
+#define AI_OOBSELINC74 0x044
+#define AI_OOBSELIND30 0x060
+#define AI_OOBSELIND74 0x064
+#define AI_OOBSELOUTA30 0x100
+#define AI_OOBSELOUTA74 0x104
+#define AI_OOBSELOUTB30 0x120
+#define AI_OOBSELOUTB74 0x124
+#define AI_OOBSELOUTC30 0x140
+#define AI_OOBSELOUTC74 0x144
+#define AI_OOBSELOUTD30 0x160
+#define AI_OOBSELOUTD74 0x164
+#define AI_OOBSYNCA 0x200
+#define AI_OOBSELOUTAEN 0x204
+#define AI_OOBSYNCB 0x220
+#define AI_OOBSELOUTBEN 0x224
+#define AI_OOBSYNCC 0x240
+#define AI_OOBSELOUTCEN 0x244
+#define AI_OOBSYNCD 0x260
+#define AI_OOBSELOUTDEN 0x264
+#define AI_OOBAEXTWIDTH 0x300
+#define AI_OOBAINWIDTH 0x304
+#define AI_OOBAOUTWIDTH 0x308
+#define AI_OOBBEXTWIDTH 0x320
+#define AI_OOBBINWIDTH 0x324
+#define AI_OOBBOUTWIDTH 0x328
+#define AI_OOBCEXTWIDTH 0x340
+#define AI_OOBCINWIDTH 0x344
+#define AI_OOBCOUTWIDTH 0x348
+#define AI_OOBDEXTWIDTH 0x360
+#define AI_OOBDINWIDTH 0x364
+#define AI_OOBDOUTWIDTH 0x368
+#define AI_IOCTRLSET 0x400
+#define AI_IOCTRLCLEAR 0x404
+#define AI_IOCTRL 0x408
+#define AI_IOSTATUS 0x500
+#define AI_IOCTRLWIDTH 0x700
+#define AI_IOSTATUSWIDTH 0x704
+#define AI_RESETCTRL 0x800
+#define AI_RESETSTATUS 0x804
+#define AI_RESETREADID 0x808
+#define AI_RESETWRITEID 0x80c
+#define AI_ERRLOGCTRL 0xa00
+#define AI_ERRLOGDONE 0xa04
+#define AI_ERRLOGSTATUS 0xa08
+#define AI_ERRLOGADDRLO 0xa0c
+#define AI_ERRLOGADDRHI 0xa10
+#define AI_ERRLOGID 0xa14
+#define AI_ERRLOGUSER 0xa18
+#define AI_ERRLOGFLAGS 0xa1c
+#define AI_INTSTATUS 0xa00
+#define AI_CONFIG 0xe00
+#define AI_ITCR 0xf00
+#define AI_ITIPOOBA 0xf10
+#define AI_ITIPOOBB 0xf14
+#define AI_ITIPOOBC 0xf18
+#define AI_ITIPOOBD 0xf1c
+#define AI_ITIPOOBAOUT 0xf30
+#define AI_ITIPOOBBOUT 0xf34
+#define AI_ITIPOOBCOUT 0xf38
+#define AI_ITIPOOBDOUT 0xf3c
+#define AI_ITOPOOBA 0xf50
+#define AI_ITOPOOBB 0xf54
+#define AI_ITOPOOBC 0xf58
+#define AI_ITOPOOBD 0xf5c
+#define AI_ITOPOOBAIN 0xf70
+#define AI_ITOPOOBBIN 0xf74
+#define AI_ITOPOOBCIN 0xf78
+#define AI_ITOPOOBDIN 0xf7c
+#define AI_ITOPRESET 0xf90
+#define AI_PERIPHERIALID4 0xfd0
+#define AI_PERIPHERIALID5 0xfd4
+#define AI_PERIPHERIALID6 0xfd8
+#define AI_PERIPHERIALID7 0xfdc
+#define AI_PERIPHERIALID0 0xfe0
+#define AI_PERIPHERIALID1 0xfe4
+#define AI_PERIPHERIALID2 0xfe8
+#define AI_PERIPHERIALID3 0xfec
+#define AI_COMPONENTID0 0xff0
+#define AI_COMPONENTID1 0xff4
+#define AI_COMPONENTID2 0xff8
+#define AI_COMPONENTID3 0xffc
+
+
+#define AIRC_RESET 1
+
+
+#define AICFG_OOB 0x00000020
+#define AICFG_IOS 0x00000010
+#define AICFG_IOC 0x00000008
+#define AICFG_TO 0x00000004
+#define AICFG_ERRL 0x00000002
+#define AICFG_RST 0x00000001
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/bcmcdc.h b/drivers/net/wireless/bcm4329/include/bcmcdc.h
new file mode 100644
index 000000000000..c2a860beab24
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmcdc.h
@@ -0,0 +1,100 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmcdc.h,v 13.14.16.3.16.4 2009/04/12 16:58:45 Exp $
+ */
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+ uint32 cmd; /* ioctl command value */
+ uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */
+ uint32 flags; /* flag defns given below */
+ uint32 status; /* status code returned from the device */
+} cdc_ioctl_t;
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN
+
+/* len field is divided into input and output buffer lengths */
+#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */
+ /* excluding IOCTL header */
+#define CDCL_IOC_OUTLEN_SHIFT 0
+#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */
+#define CDCL_IOC_INLEN_SHIFT 16
+
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */
+#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */
+#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */
+#define CDCF_IOC_IF_SHIFT 12
+#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */
+#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */
+
+#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+/*
+ * BDC header
+ *
+ * The BDC header is used on data packets to convey priority across USB.
+ */
+
+#define BDC_HEADER_LEN 4
+
+#define BDC_PROTO_VER 1 /* Protocol version */
+
+#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
+
+#define BDC_FLAG__UNUSED 0x03 /* Unassigned */
+#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */
+#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
+
+#define BDC_PRIORITY_MASK 0x7
+
+#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */
+ /* FLOW CONTROL info only */
+#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */
+
+#define BDC_FLAG2_IF_MASK 0x0f /* APSTA: interface on which the packet was received */
+#define BDC_FLAG2_IF_SHIFT 0
+
+#define BDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+struct bdc_header {
+ uint8 flags; /* Flags */
+ uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 flow control info for usb */
+ uint8 flags2;
+ uint8 rssi;
+};
diff --git a/drivers/net/wireless/bcm4329/include/bcmdefs.h b/drivers/net/wireless/bcm4329/include/bcmdefs.h
new file mode 100644
index 000000000000..f4e99461971b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmdefs.h
@@ -0,0 +1,114 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmdefs.h,v 13.38.4.10.2.7.6.11 2010/02/01 05:51:55 Exp $
+ */
+
+
+#ifndef _bcmdefs_h_
+#define _bcmdefs_h_
+
+#define STATIC static
+
+#define SI_BUS 0
+#define PCI_BUS 1
+#define PCMCIA_BUS 2
+#define SDIO_BUS 3
+#define JTAG_BUS 4
+#define USB_BUS 5
+#define SPI_BUS 6
+
+
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) (BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) (bus)
+#endif
+
+
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) (BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) (bus)
+#endif
+
+
+
+#if defined(BCMSPROMBUS)
+#define SPROMBUS (BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS (PCMCIA_BUS)
+#else
+#define SPROMBUS (PCI_BUS)
+#endif
+
+
+#ifdef BCMCHIPID
+#define CHIPID(chip) (BCMCHIPID)
+#else
+#define CHIPID(chip) (chip)
+#endif
+
+
+#define DMADDR_MASK_32 0x0
+#define DMADDR_MASK_30 0xc0000000
+#define DMADDR_MASK_0 0xffffffff
+
+#define DMADDRWIDTH_30 30
+#define DMADDRWIDTH_32 32
+#define DMADDRWIDTH_63 63
+#define DMADDRWIDTH_64 64
+
+
+#define BCMEXTRAHDROOM 164
+
+
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+
+#define BITFIELD_MASK(width) \
+ (((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+ (((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+ (((val) & (~(field ## _M << field ## _S))) | \
+ ((unsigned)(bits) << field ## _S))
+
+
+#ifdef BCMSMALL
+#undef BCMSPACE
+#define bcmspace FALSE
+#else
+#define BCMSPACE
+#define bcmspace TRUE
+#endif
+
+
+#define MAXSZ_NVRAM_VARS 4096
+
+#define LOCATOR_EXTERN static
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/bcmdevs.h b/drivers/net/wireless/bcm4329/include/bcmdevs.h
new file mode 100644
index 000000000000..14853f17795c
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmdevs.h
@@ -0,0 +1,124 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdevs.h,v 13.172.4.5.4.10.2.36 2010/05/25 08:33:44 Exp $
+ */
+
+
+#ifndef _BCMDEVS_H
+#define _BCMDEVS_H
+
+
+#define VENDOR_EPIGRAM 0xfeda
+#define VENDOR_BROADCOM 0x14e4
+#define VENDOR_SI_IMAGE 0x1095
+#define VENDOR_TI 0x104c
+#define VENDOR_RICOH 0x1180
+#define VENDOR_JMICRON 0x197b
+
+
+#define VENDOR_BROADCOM_PCMCIA 0x02d0
+
+
+#define VENDOR_BROADCOM_SDIO 0x00BF
+
+
+#define BCM_DNGL_VID 0xa5c
+#define BCM_DNGL_BL_PID_4320 0xbd11
+#define BCM_DNGL_BL_PID_4328 0xbd12
+#define BCM_DNGL_BL_PID_4322 0xbd13
+#define BCM_DNGL_BL_PID_4325 0xbd14
+#define BCM_DNGL_BL_PID_4315 0xbd15
+#define BCM_DNGL_BL_PID_4319 0xbd16
+#define BCM_DNGL_BDC_PID 0xbdc
+
+#define BCM4325_D11DUAL_ID 0x431b
+#define BCM4325_D11G_ID 0x431c
+#define BCM4325_D11A_ID 0x431d
+#define BCM4329_D11NDUAL_ID 0x432e
+#define BCM4329_D11N2G_ID 0x432f
+#define BCM4329_D11N5G_ID 0x4330
+#define BCM4336_D11N_ID 0x4343
+#define BCM4315_D11DUAL_ID 0x4334
+#define BCM4315_D11G_ID 0x4335
+#define BCM4315_D11A_ID 0x4336
+#define BCM4319_D11N_ID 0x4337
+#define BCM4319_D11N2G_ID 0x4338
+#define BCM4319_D11N5G_ID 0x4339
+
+
+#define SDIOH_FPGA_ID 0x43f2
+#define SPIH_FPGA_ID 0x43f5
+#define BCM4710_DEVICE_ID 0x4710
+#define BCM27XX_SDIOH_ID 0x2702
+#define PCIXX21_FLASHMEDIA0_ID 0x8033
+#define PCIXX21_SDIOH0_ID 0x8034
+#define PCIXX21_FLASHMEDIA_ID 0x803b
+#define PCIXX21_SDIOH_ID 0x803c
+#define R5C822_SDIOH_ID 0x0822
+#define JMICRON_SDIOH_ID 0x2381
+
+
+#define BCM4306_CHIP_ID 0x4306
+#define BCM4311_CHIP_ID 0x4311
+#define BCM4312_CHIP_ID 0x4312
+#define BCM4315_CHIP_ID 0x4315
+#define BCM4318_CHIP_ID 0x4318
+#define BCM4319_CHIP_ID 0x4319
+#define BCM4320_CHIP_ID 0x4320
+#define BCM4321_CHIP_ID 0x4321
+#define BCM4322_CHIP_ID 0x4322
+#define BCM4325_CHIP_ID 0x4325
+#define BCM4328_CHIP_ID 0x4328
+#define BCM4329_CHIP_ID 0x4329
+#define BCM4336_CHIP_ID 0x4336
+#define BCM4402_CHIP_ID 0x4402
+#define BCM4704_CHIP_ID 0x4704
+#define BCM4710_CHIP_ID 0x4710
+#define BCM4712_CHIP_ID 0x4712
+#define BCM4785_CHIP_ID 0x4785
+#define BCM5350_CHIP_ID 0x5350
+#define BCM5352_CHIP_ID 0x5352
+#define BCM5354_CHIP_ID 0x5354
+#define BCM5365_CHIP_ID 0x5365
+
+
+
+#define BCM4303_PKG_ID 2
+#define BCM4309_PKG_ID 1
+#define BCM4712LARGE_PKG_ID 0
+#define BCM4712SMALL_PKG_ID 1
+#define BCM4712MID_PKG_ID 2
+#define BCM4328USBD11G_PKG_ID 2
+#define BCM4328USBDUAL_PKG_ID 3
+#define BCM4328SDIOD11G_PKG_ID 4
+#define BCM4328SDIODUAL_PKG_ID 5
+#define BCM4329_289PIN_PKG_ID 0
+#define BCM4329_182PIN_PKG_ID 1
+#define BCM5354E_PKG_ID 1
+#define HDLSIM5350_PKG_ID 1
+#define HDLSIM_PKG_ID 14
+#define HWSIM_PKG_ID 15
+
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/bcmendian.h b/drivers/net/wireless/bcm4329/include/bcmendian.h
new file mode 100644
index 000000000000..ae468383aa74
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmendian.h
@@ -0,0 +1,205 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmendian.h,v 1.31.302.1.16.1 2009/02/03 18:34:31 Exp $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+
+#define BCMSWAP16(val) \
+ ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+ (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+
+#define BCMSWAP32(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+ (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \
+ (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \
+ (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+
+#define BCMSWAP32BY16(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+ (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+ return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+ return BCMSWAP32(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+ return BCMSWAP32BY16(val);
+}
+
+
+
+
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+ len = len / 2;
+
+ while (len--) {
+ *buf = bcmswap16(*buf);
+ buf++;
+ }
+}
+
+#ifndef hton16
+#ifndef IL_BIGENDIAN
+#define HTON16(i) BCMSWAP16(i)
+#define HTON32(i) BCMSWAP32(i)
+#define hton16(i) bcmswap16(i)
+#define hton32(i) bcmswap32(i)
+#define ntoh16(i) bcmswap16(i)
+#define ntoh32(i) bcmswap32(i)
+#define HTOL16(i) (i)
+#define HTOL32(i) (i)
+#define ltoh16(i) (i)
+#define ltoh32(i) (i)
+#define htol16(i) (i)
+#define htol32(i) (i)
+#else
+#define HTON16(i) (i)
+#define HTON32(i) (i)
+#define hton16(i) (i)
+#define hton32(i) (i)
+#define ntoh16(i) (i)
+#define ntoh32(i) (i)
+#define HTOL16(i) BCMSWAP16(i)
+#define HTOL32(i) BCMSWAP32(i)
+#define ltoh16(i) bcmswap16(i)
+#define ltoh32(i) bcmswap32(i)
+#define htol16(i) bcmswap16(i)
+#define htol32(i) bcmswap32(i)
+#endif
+#endif
+
+#ifndef IL_BIGENDIAN
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+#else
+#define ltoh16_buf(buf, i) bcmswap16_buf((uint16 *)buf, i)
+#define htol16_buf(buf, i) bcmswap16_buf((uint16 *)buf, i)
+#endif
+
+
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = val >> 8;
+}
+
+
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = (val >> 8) & 0xff;
+ bytes[2] = (val >> 16) & 0xff;
+ bytes[3] = val >> 24;
+}
+
+
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val >> 8;
+ bytes[1] = val & 0xff;
+}
+
+
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val >> 24;
+ bytes[1] = (val >> 16) & 0xff;
+ bytes[2] = (val >> 8) & 0xff;
+ bytes[3] = val & 0xff;
+}
+
+#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+ return _LTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+ return _LTOH32_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+ return _NTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+ return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#define ltoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)ptr : \
+ sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)ptr) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)ptr) : \
+ 0xfeedf00d)
+
+#define ntoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)ptr : \
+ sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)ptr) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)ptr) : \
+ 0xfeedf00d)
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/bcmpcispi.h b/drivers/net/wireless/bcm4329/include/bcmpcispi.h
new file mode 100644
index 000000000000..7d98fb7cbdc8
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmpcispi.h
@@ -0,0 +1,205 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.h,v 13.11.8.3 2008/07/09 21:23:29 Exp $
+ */
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/*
++---------------------------------------------------------------------------+
+| |
+| 7 6 5 4 3 2 1 0 |
+| 0x0000 SPI_CTRL SPIE SPE 0 MSTR CPOL CPHA SPR1 SPR0 |
+| 0x0004 SPI_STAT SPIF WCOL ST1 ST0 WFFUL WFEMP RFFUL RFEMP |
+| 0x0008 SPI_DATA Bits 31:0, data to send out on MOSI |
+| 0x000C SPI_EXT ICNT1 ICNT0 BSWAP *HSMODE ESPR1 ESPR0 |
+| 0x0020 GPIO_OE 0=input, 1=output PWR_OE CS_OE |
+| 0x0024 GPIO_DATA CARD:1=missing, 0=present CARD PWR_DAT CS_DAT |
+| 0x0040 INT_EDGE 0=level, 1=edge DEV_E SPI_E |
+| 0x0044 INT_POL 1=active high, 0=active low DEV_P SPI_P |
+| 0x0048 INTMASK DEV SPI |
+| 0x004C INTSTATUS DEV SPI |
+| 0x0060 HEXDISP Reset value: 0x14e443f5. In hexdisp mode, value |
+| shows on the Raggedstone1 4-digit 7-segment display. |
+| 0x0064 CURRENT_MA Low 16 bits indicate card current consumption in mA |
+| 0x006C DISP_SEL Display mode (0=hexdisp, 1=current) DSP |
+| 0x00C0 PLL_CTL bit31=ext_clk, remainder unused. |
+| 0x00C4 PLL_STAT LOCK |
+| 0x00C8 CLK_FREQ |
+| 0x00CC CLK_CNT |
+| |
+| *Notes: HSMODE is not implemented, never set this bit! |
+| BSWAP is available in rev >= 8 |
+| |
++---------------------------------------------------------------------------+
+*/
+
+typedef volatile struct {
+ uint32 spih_ctrl; /* 0x00 SPI Control Register */
+ uint32 spih_stat; /* 0x04 SPI Status Register */
+ uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */
+ uint32 spih_ext; /* 0x0C SPI Extension Register */
+ uint32 PAD[4]; /* 0x10-0x1F PADDING */
+
+ uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */
+ uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */
+ uint32 PAD[6]; /* 0x28-0x3F PADDING */
+
+ uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+ uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+ /* 1=Active High) */
+ uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */
+ uint32 spih_int_status; /* 0x4C SPI Interrupt Status */
+ uint32 PAD[4]; /* 0x50-0x5F PADDING */
+
+ uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */
+ uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */
+ uint32 PAD[1]; /* 0x68 PADDING */
+ uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */
+ uint32 PAD[4]; /* 0x70-0x7F PADDING */
+ uint32 PAD[8]; /* 0x80-0x9F PADDING */
+ uint32 PAD[8]; /* 0xA0-0xBF PADDING */
+ uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */
+ uint32 spih_pll_status; /* 0xC4 PLL Status Register */
+ uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */
+ uint32 spih_clk_count; /* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+ uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */
+ uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */
+
+ uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */
+ uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */
+ uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */
+ uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */
+ uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */
+ uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */
+ uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */
+ uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */
+ uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */
+ uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */
+ uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */
+ uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */
+ uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */
+ uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */
+ uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */
+ uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */
+ uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */
+ uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */
+ uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */
+ uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */
+ uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */
+ uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */
+ uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */
+ uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */
+ uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */
+ uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */
+
+ uint32 PAD[5]; /* 0x16C-0x17F PADDING */
+
+ uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */
+ uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */
+ uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */
+ uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+ uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+ uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */
+ uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */
+ uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+ uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+ uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */
+ uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+ uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+ uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+ uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */
+ uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+ uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+ uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+ uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */
+ uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+ uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+ uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+ uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+ uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */
+ uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */
+ uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */
+ uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+ uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */
+ uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */
+ uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */
+#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */
+#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */
+#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND 0xf4240 /* 1 million */
diff --git a/drivers/net/wireless/bcm4329/include/bcmperf.h b/drivers/net/wireless/bcm4329/include/bcmperf.h
new file mode 100644
index 000000000000..2a78784e85d3
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmperf.h
@@ -0,0 +1,36 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmperf.h,v 13.5 2007/09/14 22:00:59 Exp $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define BCMPERF_GETICACHE_MISS(x) ((x) = 0)
+#define BCMPERF_GETICACHE_HIT(x) ((x) = 0)
+#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdbus.h b/drivers/net/wireless/bcm4329/include/bcmsdbus.h
new file mode 100644
index 000000000000..b7b67bc66248
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdbus.h
@@ -0,0 +1,117 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdbus.h,v 13.11.14.2.6.6 2009/10/27 17:20:28 Exp $
+ */
+
+#ifndef _sdio_api_h_
+#define _sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS (0x00)
+#define SDIOH_API_RC_FAIL (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ 0 /* Read request */
+#define SDIOH_WRITE 1 /* Write request */
+
+#define SDIOH_DATA_FIX 0 /* Fixed addressing */
+#define SDIOH_DATA_INC 1 /* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */
+
+#define SDIOH_DATA_PIO 0 /* PIO mode */
+#define SDIOH_DATA_DMA 1 /* DMA mode */
+
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+/* attach, return handler on success, NULL if failed.
+ * The handler shall be provided by all subsequent calls. No local cache
+ * cfghdl points to the starting address of pci device mapped memory
+ */
+extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+ uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+ uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+ void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+/* Helper function */
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdh.h b/drivers/net/wireless/bcm4329/include/bcmsdh.h
new file mode 100644
index 000000000000..f5dee5c58445
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdh.h
@@ -0,0 +1,208 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ * export functions to client drivers
+ * abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.h,v 13.35.14.7.6.8 2009/10/14 04:22:25 Exp $
+ */
+
+#ifndef _bcmsdh_h_
+#define _bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL 0x0001 /* Error */
+#define BCMSDH_INFO_VAL 0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+/* Attach and build an interface to the underlying SD host driver.
+ * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh.
+ * - Returns the bcmsdh handle and virtual address base for register access.
+ * The returned handle should be used in all subsequent calls, but the bcmsh
+ * implementation may maintain a single "default" handle (e.g. the first or
+ * most recent one) to enable single-instance implementations to pass NULL.
+ */
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq);
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+#ifdef BCMLXSDMMC
+extern int bcmsdh_claim_host_and_lock(void *sdh);
+extern int bcmsdh_release_host_and_unlock(void *sdh);
+#endif /* BCMLXSDMMC */
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ * fn: function number
+ * addr: unmodified SDIO-space address
+ * data: data byte to write
+ * err: pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ * fn: function whose CIS is being requested (0 is common CIS)
+ * cis: pointer to memory location to place results
+ * length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ * addr: backplane address (i.e. >= regsva from attach)
+ * size: register width in bytes (2 or 4)
+ * data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ * fn: function number
+ * addr: backplane address (i.e. >= regsva from attach)
+ * flags: backplane width, address increment, sync/async
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * pkt: pointer to packet associated with buf (if any)
+ * complete: callback function for command completion (async only)
+ * handle: handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete, void *handle);
+
+/* Flags bits */
+#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING 1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ * rw: read or write (0/1)
+ * addr: direct SDIO address
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+/* callback functions */
+typedef struct {
+ /* attach to device */
+ void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+ uint16 func, uint bustype, void * regsva, osl_t * osh,
+ void * param);
+ /* detach from device */
+ void (*detach)(void *ch);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_register_oob_intr(void * dhdp);
+extern void bcmsdh_unregister_oob_intr(void);
+extern void bcmsdh_oob_intr_set(bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+#endif /* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h
new file mode 100644
index 000000000000..4e6d1b5bd94f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h
@@ -0,0 +1,122 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h,v 13.1.2.1.8.7 2009/10/27 18:22:52 Exp $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+/* Allocate/init/free per-OS private data */
+extern int sdioh_sdmmc_osinit(sdioh_info_t *sd);
+extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4 2
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+struct sdioh_info {
+ osl_t *osh; /* osh handler */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ uint max_dma_len;
+ uint max_dma_descriptors; /* DMA Descriptors supported by this controller. */
+// SDDMA_DESCRIPTOR SGList[32]; /* Scatter/Gather DMA List */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+typedef struct _BCMSDH_SDMMC_INSTANCE {
+ sdioh_info_t *sd;
+ struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdpcm.h b/drivers/net/wireless/bcm4329/include/bcmsdpcm.h
new file mode 100644
index 000000000000..77aca4500ad8
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdpcm.h
@@ -0,0 +1,263 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdpcm.h,v 1.1.2.4 2010/07/02 01:15:46 Exp $
+ */
+
+#ifndef _bcmsdpcm_h_
+#define _bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED 1 /* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY 2 /* we're ready to to talk to host after enable */
+#define HMB_DATA_FC 4 /* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY 8 /* firmware is ready for protocol activity */
+
+#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION 4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET 2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+
+#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
+#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET 6 /* Version # */
+#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET 7 /* Spare */
+#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL 15
+
+#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0 0x01
+#define SDPCM_FLAG_RESVD1 0x02
+#define SDPCM_FLAG_GSPI_TXENAB 0x04
+#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2);
+ * Semantics of Ext byte depend on command.
+ * Len is current or requested frame length, not
+ * including test header; sent little-endian.
+ */
+#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count */
+#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+ uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+ uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+ uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */
+ uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */
+ uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */
+ uint32 rxdescuflo; /* receive descriptor underflows */
+ uint32 rxfifooflo; /* receive fifo overflows */
+ uint32 txfifouflo; /* transmit fifo underflows */
+ uint32 runt; /* runt (too short) frames recv'd from bus */
+ uint32 badlen; /* frame's rxh len does not match its hw tag len */
+ uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */
+ uint32 seqbreak; /* break in sequence # space from one rx frame to the next */
+ uint32 rxfcrc; /* frame rx header indicates crc error */
+ uint32 rxfwoos; /* frame rx header indicates write out of sync */
+ uint32 rxfwft; /* frame rx header indicates write frame termination */
+ uint32 rxfabort; /* frame rx header indicates frame aborted */
+ uint32 woosint; /* write out of sync interrupt */
+ uint32 roosint; /* read out of sync interrupt */
+ uint32 rftermint; /* read frame terminate interrupt */
+ uint32 wftermint; /* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val) ((var) == (val))
+#define SDIODREV_GE(var, val) ((var) >= (val))
+#define SDIODREV_GT(var, val) ((var) > (val))
+#define SDIODREV_LT(var, val) ((var) < (val))
+#define SDIODREV_LE(var, val) ((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+ (SDIODREV_LT((h)->corerev, 1) ? \
+ SDIODDMAREG32((h), (dir), (chnl)) : \
+ SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODDMAREG(h, dir, chnl) : \
+ PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+ (SDIODREV_LT((corerev), 1) ? \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODFIFOREG(h, corerev) : \
+ PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host
+ * The structure contains pointers to trap or assert information shared with the host
+ */
+#define SDPCM_SHARED_VERSION 0x0002
+#define SDPCM_SHARED_VERSION_MASK 0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT 0x0100
+#define SDPCM_SHARED_ASSERT 0x0200
+#define SDPCM_SHARED_TRAP 0x0400
+
+typedef struct {
+ uint32 flags;
+ uint32 trap_addr;
+ uint32 assert_exp_addr;
+ uint32 assert_file_addr;
+ uint32 assert_line;
+ uint32 console_addr; /* Address of hndrte_cons_t */
+ uint32 msgtrace_addr;
+ uint8 tag[32];
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+#endif /* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdspi.h b/drivers/net/wireless/bcm4329/include/bcmsdspi.h
new file mode 100644
index 000000000000..eaae10d8bf19
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdspi.h
@@ -0,0 +1,131 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.h,v 13.8.10.2 2008/06/30 21:09:40 Exp $
+ */
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of sdspi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ bool got_hcint; /* Host Controller interrupt. */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current register transfer size */
+ uint32 cmd53_wr_data; /* Used to pass CMD53 write data */
+ uint32 card_response; /* Used to pass back response status byte */
+ uint32 card_rsp_data; /* Used to pass back response data word */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdstd.h b/drivers/net/wireless/bcm4329/include/bcmsdstd.h
new file mode 100644
index 000000000000..974b3d41698d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdstd.h
@@ -0,0 +1,223 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.h,v 13.16.18.1.16.3 2009/12/10 01:09:23 Exp $
+ */
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+#define SDIOH_MODE_SD1 1
+#define SDIOH_MODE_SD4 2
+
+#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK 1
+#define SDIOH_TYPE_BCM27XX 2
+#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS 0x00001E00
+
+#define RETRIES_LARGE 100000
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+#define USE_FIFO 0x8 /* Fifo vs non-fifo */
+
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint32 curr_caps; /* max current capabilities reg */
+
+ osl_t *osh; /* osh handler */
+ volatile char *mem_space; /* pci device memory va */
+ uint lockcount; /* nest count of sdstd_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint target_dev; /* Target device ID */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+ int local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf; /* DMA Buffer virtual address */
+ ulong dma_phys; /* DMA Buffer physical address */
+ void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */
+ ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */
+
+ /* adjustments needed to make the dma align properly */
+ void *dma_start_buf;
+ ulong dma_start_phys;
+ uint alloced_dma_size;
+ void *adma2_dscr_start_buf;
+ ulong adma2_dscr_start_phys;
+ uint alloced_adma2_dscr_size;
+
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ bool got_hcint; /* local interrupt flag */
+ uint16 last_intrstatus; /* to cache intrstatus */
+};
+
+#define DMA_MODE_NONE 0
+#define DMA_MODE_SDMA 1
+#define DMA_MODE_ADMA1 2
+#define DMA_MODE_ADMA2 3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO -1
+
+#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE 0
+#define SDIOH_ADMA1_MODE 1
+#define SDIOH_ADMA2_MODE 2
+#define SDIOH_ADMA2_64_MODE 3
+
+#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+ uint32 len_attr;
+ uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+ uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern uint16 sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield);
diff --git a/drivers/net/wireless/bcm4329/include/bcmspi.h b/drivers/net/wireless/bcm4329/include/bcmspi.h
new file mode 100644
index 000000000000..2e2bc935716f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmspi.h
@@ -0,0 +1,36 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspi.h,v 13.3.10.2 2008/06/30 21:09:40 Exp $
+ */
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
diff --git a/drivers/net/wireless/bcm4329/include/bcmspibrcm.h b/drivers/net/wireless/bcm4329/include/bcmspibrcm.h
new file mode 100644
index 000000000000..9dce878d11e2
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmspibrcm.h
@@ -0,0 +1,134 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
+ *
+ * Copyright (C) 2010, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: bcmspibrcm.h,v 1.4.4.1.4.3.6.1 2008/09/27 17:03:25 Exp $
+ */
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_F1 64
+#define BLOCK_SIZE_F2 2048
+#define BLOCK_SIZE_F3 2048
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+#define ERROR_UF 2
+#define ERROR_OF 3
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ void *bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of spi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 card_dstatus; /* 32bit device status */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SPI_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ uint32 wordlen; /* host processor 16/32bits */
+ uint32 prev_fun;
+ uint32 chip;
+ uint32 chiprev;
+ bool resp_delay_all;
+ bool dwordmode;
+
+ struct spierrstats_t spierrstats;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmspibrcm.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmspibrcm.c references to per-port code
+ */
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */
+#define SPI_RW_FLAG_S 31
+#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */
+#define SPI_ACCESS_S 30
+#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */
+#define SPI_FUNCTION_S 28
+#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */
+#define SPI_REG_ADDR_S 11
+#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */
+#define SPI_LEN_S 0
diff --git a/drivers/net/wireless/bcm4329/include/bcmutils.h b/drivers/net/wireless/bcm4329/include/bcmutils.h
new file mode 100644
index 000000000000..f85ed351d663
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmutils.h
@@ -0,0 +1,637 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.h,v 13.184.4.6.2.1.18.25 2010/04/26 06:05:24 Exp $
+ */
+
+
+#ifndef _bcmutils_h_
+#define _bcmutils_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define _BCM_U 0x01
+#define _BCM_L 0x02
+#define _BCM_D 0x04
+#define _BCM_C 0x08
+#define _BCM_P 0x10
+#define _BCM_S 0x20
+#define _BCM_X 0x40
+#define _BCM_SP 0x80
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+
+
+struct bcmstrbuf {
+ char *buf;
+ unsigned int size;
+ char *origbuf;
+ unsigned int origsize;
+};
+
+
+#ifdef BCMDRIVER
+#include <osl.h>
+
+#define GPIO_PIN_NOTDEFINED 0x20
+
+
+#define SPINWAIT(exp, us) { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) {\
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+}
+
+
+
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT 128
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC 16
+#endif
+
+typedef struct pktq_prec {
+ void *head;
+ void *tail;
+ uint16 len;
+ uint16 max;
+} pktq_prec_t;
+
+
+
+struct pktq {
+ uint16 num_prec;
+ uint16 hi_prec;
+ uint16 max;
+ uint16 len;
+
+ struct pktq_prec q[PKTQ_MAX_PREC];
+};
+
+
+struct spktq {
+ uint16 num_prec;
+ uint16 hi_prec;
+ uint16 max;
+ uint16 len;
+
+ struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+
+
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+
+
+#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
+#define pktq_plen(pq, prec) ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir);
+
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir);
+
+
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+
+
+#define pktq_len(pq) ((int)(pq)->len)
+#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq) ((pq)->len >= (pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
+
+
+#define pktenq(pq, p) pktq_penq(((struct pktq *)pq), 0, (p))
+#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)pq), 0, (p))
+#define pktdeq(pq) pktq_pdeq(((struct pktq *)pq), 0)
+#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)pq), 0)
+#define pktqinit(pq, len) pktq_init(((struct pktq *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+
+
+
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+#define PKTPRIO_VDSCP 0x100
+#define PKTPRIO_VLAN 0x200
+#define PKTPRIO_UPD 0x400
+#define PKTPRIO_DSCP 0x800
+
+
+extern int bcm_atoi(char *s);
+extern ulong bcm_strtoul(char *cp, char **endp, uint base);
+extern char *bcmstrstr(char *haystack, char *needle);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(char *p, struct ether_addr *ea);
+
+
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+
+
+extern void bcm_mdelay(uint ms);
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define bcmlog(fmt, a1, a2)
+#define bcmdumplog(buf, size) *buf = '\0'
+#define bcmdumplogent(buf, idx) -1
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+
+
+
+
+typedef struct bcm_iovar {
+ const char *name;
+ uint16 varid;
+ uint16 flags;
+ uint16 type;
+ uint16 minlen;
+} bcm_iovar_t;
+
+
+
+
+#define IOV_GET 0
+#define IOV_SET 1
+
+
+#define IOV_GVAL(id) ((id)*2)
+#define IOV_SVAL(id) (((id)*2)+IOV_SET)
+#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
+
+
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+
+#endif
+
+
+#define IOVT_VOID 0
+#define IOVT_BOOL 1
+#define IOVT_INT8 2
+#define IOVT_UINT8 3
+#define IOVT_INT16 4
+#define IOVT_UINT16 5
+#define IOVT_INT32 6
+#define IOVT_UINT32 7
+#define IOVT_BUFFER 8
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+
+#define BCM_IOV_TYPE_INIT { \
+ "void", \
+ "bool", \
+ "int8", \
+ "uint8", \
+ "int16", \
+ "uint16", \
+ "int32", \
+ "uint32", \
+ "buffer", \
+ "" }
+
+#define BCM_IOVT_IS_INT(type) (\
+ (type == IOVT_BOOL) || \
+ (type == IOVT_INT8) || \
+ (type == IOVT_UINT8) || \
+ (type == IOVT_INT16) || \
+ (type == IOVT_UINT16) || \
+ (type == IOVT_INT32) || \
+ (type == IOVT_UINT32))
+
+
+
+#define BCME_STRLEN 64
+#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST))
+
+
+
+
+#define BCME_OK 0
+#define BCME_ERROR -1
+#define BCME_BADARG -2
+#define BCME_BADOPTION -3
+#define BCME_NOTUP -4
+#define BCME_NOTDOWN -5
+#define BCME_NOTAP -6
+#define BCME_NOTSTA -7
+#define BCME_BADKEYIDX -8
+#define BCME_RADIOOFF -9
+#define BCME_NOTBANDLOCKED -10
+#define BCME_NOCLK -11
+#define BCME_BADRATESET -12
+#define BCME_BADBAND -13
+#define BCME_BUFTOOSHORT -14
+#define BCME_BUFTOOLONG -15
+#define BCME_BUSY -16
+#define BCME_NOTASSOCIATED -17
+#define BCME_BADSSIDLEN -18
+#define BCME_OUTOFRANGECHAN -19
+#define BCME_BADCHAN -20
+#define BCME_BADADDR -21
+#define BCME_NORESOURCE -22
+#define BCME_UNSUPPORTED -23
+#define BCME_BADLEN -24
+#define BCME_NOTREADY -25
+#define BCME_EPERM -26
+#define BCME_NOMEM -27
+#define BCME_ASSOCIATED -28
+#define BCME_RANGE -29
+#define BCME_NOTFOUND -30
+#define BCME_WME_NOT_ENABLED -31
+#define BCME_TSPEC_NOTFOUND -32
+#define BCME_ACM_NOTSUPPORTED -33
+#define BCME_NOT_WME_ASSOCIATION -34
+#define BCME_SDIO_ERROR -35
+#define BCME_DONGLE_DOWN -36
+#define BCME_VERSION -37
+#define BCME_TXFAIL -38
+#define BCME_RXFAIL -39
+#define BCME_NODEVICE -40
+#define BCME_UNFINISHED -41
+#define BCME_LAST BCME_UNFINISHED
+
+
+#define BCMERRSTRINGTABLE { \
+ "OK", \
+ "Undefined error", \
+ "Bad Argument", \
+ "Bad Option", \
+ "Not up", \
+ "Not down", \
+ "Not AP", \
+ "Not STA", \
+ "Bad Key Index", \
+ "Radio Off", \
+ "Not band locked", \
+ "No clock", \
+ "Bad Rate valueset", \
+ "Bad Band", \
+ "Buffer too short", \
+ "Buffer too long", \
+ "Busy", \
+ "Not Associated", \
+ "Bad SSID len", \
+ "Out of Range Channel", \
+ "Bad Channel", \
+ "Bad Address", \
+ "Not Enough Resources", \
+ "Unsupported", \
+ "Bad length", \
+ "Not Ready", \
+ "Not Permitted", \
+ "No Memory", \
+ "Associated", \
+ "Not In Range", \
+ "Not Found", \
+ "WME Not Enabled", \
+ "TSPEC Not Found", \
+ "ACM Not Supported", \
+ "Not WME Association", \
+ "SDIO Bus Error", \
+ "Dongle Not Accessible", \
+ "Incorrect version", \
+ "TX Failure", \
+ "RX Failure", \
+ "Device Not Present", \
+ "Command not finished", \
+}
+
+#ifndef ABS
+#define ABS(a) (((a) < 0)?-(a):(a))
+#endif
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b))?(a):(b))
+#endif
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b))?(a):(b))
+#endif
+
+#define CEIL(x, y) (((x) + ((y)-1)) / (y))
+#define ROUNDUP(x, y) ((((x)+((y)-1))/(y))*(y))
+#define ISALIGNED(a, x) (((a) & ((x)-1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+ & ~((boundary) - 1))
+#define ISPOWEROF2(x) ((((x)-1)&(x)) == 0)
+#define VALID_MASK(mask) !((mask) & ((mask) + 1))
+#ifndef OFFSETOF
+#define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member)
+#endif
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a) (sizeof(a)/sizeof(a[0]))
+#endif
+
+
+#ifndef setbit
+#ifndef NBBY
+#define NBBY 8
+#endif
+#define setbit(a, i) (((uint8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
+#define clrbit(a, i) (((uint8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+#define isset(a, i) (((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
+#define isclr(a, i) ((((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+#endif
+
+#define NBITS(type) (sizeof(type) * 8)
+#define NBITVAL(nbits) (1 << (nbits))
+#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
+#define NBITMASK(nbits) MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
+
+
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+
+#define MODADD(x, y, bound) \
+ MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+ MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+
+#define CRC8_INIT_VALUE 0xff
+#define CRC8_GOOD_VALUE 0x9f
+#define CRC16_INIT_VALUE 0xffff
+#define CRC16_GOOD_VALUE 0xf0b8
+#define CRC32_INIT_VALUE 0xffffffff
+#define CRC32_GOOD_VALUE 0xdebb20e3
+
+
+typedef struct bcm_bit_desc {
+ uint32 bit;
+ const char* name;
+} bcm_bit_desc_t;
+
+
+typedef struct bcm_tlv {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} bcm_tlv_t;
+
+
+#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len))
+
+
+#define ETHER_ADDR_STR_LEN 18
+
+
+#ifdef IL_BIGENDIAN
+static INLINE uint32
+load32_ua(uint8 *a)
+{
+ return ((a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]);
+}
+
+static INLINE void
+store32_ua(uint8 *a, uint32 v)
+{
+ a[0] = (v >> 24) & 0xff;
+ a[1] = (v >> 16) & 0xff;
+ a[2] = (v >> 8) & 0xff;
+ a[3] = v & 0xff;
+}
+
+static INLINE uint16
+load16_ua(uint8 *a)
+{
+ return ((a[0] << 8) | a[1]);
+}
+
+static INLINE void
+store16_ua(uint8 *a, uint16 v)
+{
+ a[0] = (v >> 8) & 0xff;
+ a[1] = v & 0xff;
+}
+
+#else
+
+static INLINE uint32
+load32_ua(uint8 *a)
+{
+ return ((a[3] << 24) | (a[2] << 16) | (a[1] << 8) | a[0]);
+}
+
+static INLINE void
+store32_ua(uint8 *a, uint32 v)
+{
+ a[3] = (v >> 24) & 0xff;
+ a[2] = (v >> 16) & 0xff;
+ a[1] = (v >> 8) & 0xff;
+ a[0] = v & 0xff;
+}
+
+static INLINE uint16
+load16_ua(uint8 *a)
+{
+ return ((a[1] << 8) | a[0]);
+}
+
+static INLINE void
+store16_ua(uint8 *a, uint16 v)
+{
+ a[1] = (v >> 8) & 0xff;
+ a[0] = v & 0xff;
+}
+
+#endif
+
+
+
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+ if (
+#ifdef __i386__
+ 1 ||
+#endif
+ (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+
+
+ ((uint32 *)dst)[0] = ((uint32 *)src1)[0] ^ ((uint32 *)src2)[0];
+ ((uint32 *)dst)[1] = ((uint32 *)src1)[1] ^ ((uint32 *)src2)[1];
+ ((uint32 *)dst)[2] = ((uint32 *)src1)[2] ^ ((uint32 *)src2)[2];
+ ((uint32 *)dst)[3] = ((uint32 *)src1)[3] ^ ((uint32 *)src2)[3];
+ } else {
+
+ int k;
+ for (k = 0; k < 16; k++)
+ dst[k] = src1[k] ^ src2[k];
+ }
+}
+
+
+
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+ defined(WLMSG_ASSOC)
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+extern void prhex(const char *msg, uchar *buf, uint len);
+#endif
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+
+extern const char *bcmerrorstr(int bcmerror);
+
+
+typedef uint32 mbool;
+#define mboolset(mb, bit) ((mb) |= (bit))
+#define mboolclr(mb, bit) ((mb) &= ~(bit))
+#define mboolisset(mb, bit) (((mb) & (bit)) != 0)
+#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
+
+
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+
+
+struct fielddesc {
+ const char *nameandfmt;
+ uint32 offset;
+ uint32 len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(char *name, const uchar *cdata, int len);
+
+typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+ char *buf, uint32 bufsize);
+
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif
+
+
+#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/bcmwifi.h b/drivers/net/wireless/bcm4329/include/bcmwifi.h
new file mode 100644
index 000000000000..038aedcdb3c8
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmwifi.h
@@ -0,0 +1,154 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi.h,v 1.15.30.4 2010/03/10 20:10:52 Exp $
+ */
+
+
+#ifndef _bcmwifi_h_
+#define _bcmwifi_h_
+
+
+
+typedef uint16 chanspec_t;
+
+
+#define CH_UPPER_SB 0x01
+#define CH_LOWER_SB 0x02
+#define CH_EWA_VALID 0x04
+#define CH_20MHZ_APART 4
+#define CH_10MHZ_APART 2
+#define CH_5MHZ_APART 1
+#define CH_MAX_2G_CHANNEL 14
+#define WLC_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL
+#define MAXCHANNEL 224
+
+#define WL_CHANSPEC_CHAN_MASK 0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT 0
+
+#define WL_CHANSPEC_CTL_SB_MASK 0x0300
+#define WL_CHANSPEC_CTL_SB_SHIFT 8
+#define WL_CHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_CHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_CHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_CHANSPEC_BW_MASK 0x0C00
+#define WL_CHANSPEC_BW_SHIFT 10
+#define WL_CHANSPEC_BW_10 0x0400
+#define WL_CHANSPEC_BW_20 0x0800
+#define WL_CHANSPEC_BW_40 0x0C00
+
+#define WL_CHANSPEC_BAND_MASK 0xf000
+#define WL_CHANSPEC_BAND_SHIFT 12
+#define WL_CHANSPEC_BAND_5G 0x1000
+#define WL_CHANSPEC_BAND_2G 0x2000
+#define INVCHANSPEC 255
+
+
+#define WF_CHAN_FACTOR_2_4_G 4814
+#define WF_CHAN_FACTOR_5_G 10000
+#define WF_CHAN_FACTOR_4_G 8000
+
+
+#define LOWER_20_SB(channel) ((channel > CH_10MHZ_APART) ? (channel - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel) ((channel < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+ (channel + CH_10MHZ_APART) : 0)
+#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+ WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel) ((channel < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+ (channel + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+ WL_CHANSPEC_BAND_5G))
+#define CHSPEC_CHANNEL(chspec) ((uint8)(chspec & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_BAND(chspec) (chspec & WL_CHANSPEC_BAND_MASK)
+
+#ifdef WL20MHZ_ONLY
+
+#define CHSPEC_CTL_SB(chspec) WL_CHANSPEC_CTL_SB_NONE
+#define CHSPEC_BW(chspec) WL_CHANSPEC_BW_20
+#define CHSPEC_IS10(chspec) 0
+#define CHSPEC_IS20(chspec) 1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) 0
+#endif
+
+#else
+
+#define CHSPEC_CTL_SB(chspec) (chspec & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) (chspec & WL_CHANSPEC_BW_MASK)
+#define CHSPEC_IS10(chspec) ((chspec & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec) ((chspec & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+
+#endif
+
+#define CHSPEC_IS5G(chspec) ((chspec & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec) ((chspec & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_NONE(chspec) ((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
+#define CHSPEC_SB_UPPER(chspec) ((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
+#define CHSPEC_SB_LOWER(chspec) ((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
+#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \
+ (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
+ (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
+
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G((chspec))? WLC_BAND_5G: WLC_BAND_2G)
+
+#define CHANSPEC_STR_LEN 8
+
+
+#define WLC_MAXRATE 108
+#define WLC_RATE_1M 2
+#define WLC_RATE_2M 4
+#define WLC_RATE_5M5 11
+#define WLC_RATE_11M 22
+#define WLC_RATE_6M 12
+#define WLC_RATE_9M 18
+#define WLC_RATE_12M 24
+#define WLC_RATE_18M 36
+#define WLC_RATE_24M 48
+#define WLC_RATE_36M 72
+#define WLC_RATE_48M 96
+#define WLC_RATE_54M 108
+
+#define WLC_2G_25MHZ_OFFSET 5
+
+
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+
+extern chanspec_t wf_chspec_aton(char *a);
+
+
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/dhdioctl.h b/drivers/net/wireless/bcm4329/include/dhdioctl.h
new file mode 100644
index 000000000000..980a14301003
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/dhdioctl.h
@@ -0,0 +1,123 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhdioctl.h,v 13.7.8.1.4.1.16.5 2010/05/21 21:49:38 Exp $
+ */
+
+#ifndef _dhdioctl_h_
+#define _dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+ uint cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ bool set; /* get or set request (optional) */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+ uint driver; /* to identify target driver */
+} dhd_ioctl_t;
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC 0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION 1
+
+#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC 0
+#define DHD_GET_VERSION 1
+#define DHD_GET_VAR 2
+#define DHD_SET_VAR 3
+
+/* message levels */
+#define DHD_ERROR_VAL 0x0001
+#define DHD_TRACE_VAL 0x0002
+#define DHD_INFO_VAL 0x0004
+#define DHD_DATA_VAL 0x0008
+#define DHD_CTL_VAL 0x0010
+#define DHD_TIMER_VAL 0x0020
+#define DHD_HDRS_VAL 0x0040
+#define DHD_BYTES_VAL 0x0080
+#define DHD_INTR_VAL 0x0100
+#define DHD_LOG_VAL 0x0200
+#define DHD_GLOM_VAL 0x0400
+#define DHD_EVENT_VAL 0x0800
+#define DHD_BTA_VAL 0x1000
+#define DHD_ISCAN_VAL 0x2000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+ uint version; /* To allow structure change tracking */
+ uint freq; /* Max ticks between tx/rx attempts */
+ uint count; /* Test packets to send/rcv each attempt */
+ uint print; /* Print counts every <print> attempts */
+ uint total; /* Total packets (or bursts) */
+ uint minlen; /* Minimum length of packets to send */
+ uint maxlen; /* Maximum length of packets to send */
+ uint numsent; /* Count of test packets sent */
+ uint numrcvd; /* Count of test packets received */
+ uint numfail; /* Count of test send failures */
+ uint mode; /* Test mode (type of test packets) */
+ uint stop; /* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO 1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE (-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/epivers.h b/drivers/net/wireless/bcm4329/include/epivers.h
new file mode 100644
index 000000000000..cd66a9501cb6
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/epivers.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: epivers.h.in,v 13.25 2005/10/28 18:35:33 Exp $
+ *
+*/
+
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define EPI_MAJOR_VERSION 4
+
+#define EPI_MINOR_VERSION 218
+
+#define EPI_RC_NUMBER 248
+
+#define EPI_INCREMENTAL_NUMBER 23
+
+#define EPI_BUILD_NUMBER 0
+
+#define EPI_VERSION 4, 218, 248, 23
+
+#define EPI_VERSION_NUM 0x04daf817
+
+
+#define EPI_VERSION_STR "4.218.248.23"
+#define EPI_ROUTER_VERSION_STR "4.219.248.23"
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/hndpmu.h b/drivers/net/wireless/bcm4329/include/hndpmu.h
new file mode 100644
index 000000000000..e829b3df2d0b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/hndpmu.h
@@ -0,0 +1,34 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.h,v 13.14.4.3.4.3.8.7 2010/04/09 13:20:51 Exp $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h b/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h
new file mode 100644
index 000000000000..ca3281b6d901
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h
@@ -0,0 +1,88 @@
+/*
+ * HNDRTE arm trap handling.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_armtrap.h,v 13.3.196.2 2010/07/15 19:06:11 Exp $
+ */
+
+#ifndef _hndrte_armtrap_h
+#define _hndrte_armtrap_h
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define TRAP_STRIDE 4
+#define FIRST_TRAP TR_RST
+#define LAST_TRAP (TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define MAX_TRAP_TYPE (TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
+#endif /* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define TR_TYPE 0x00
+#define TR_EPC 0x04
+#define TR_CPSR 0x08
+#define TR_SPSR 0x0c
+#define TR_REGS 0x10
+#define TR_REG(n) (TR_REGS + (n) * 4)
+#define TR_SP TR_REG(13)
+#define TR_LR TR_REG(14)
+#define TR_PC TR_REG(15)
+
+#define TRAP_T_SIZE 80
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+ uint32 type;
+ uint32 epc;
+ uint32 cpsr;
+ uint32 spsr;
+ uint32 r0;
+ uint32 r1;
+ uint32 r2;
+ uint32 r3;
+ uint32 r4;
+ uint32 r5;
+ uint32 r6;
+ uint32 r7;
+ uint32 r8;
+ uint32 r9;
+ uint32 r10;
+ uint32 r11;
+ uint32 r12;
+ uint32 r13;
+ uint32 r14;
+ uint32 pc;
+} trap_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY */
+
+#endif /* _hndrte_armtrap_h */
diff --git a/drivers/net/wireless/bcm4329/include/hndrte_cons.h b/drivers/net/wireless/bcm4329/include/hndrte_cons.h
new file mode 100644
index 000000000000..a42417478a16
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/hndrte_cons.h
@@ -0,0 +1,63 @@
+/*
+ * Console support for hndrte.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_cons.h,v 13.1.2.4 2010/07/15 19:06:11 Exp $
+ */
+
+#include <typedefs.h>
+
+#define CBUF_LEN (128)
+
+#define LOG_BUF_LEN 1024
+
+typedef struct {
+ uint32 buf; /* Can't be pointer on (64-bit) hosts */
+ uint buf_size;
+ uint idx;
+ char *_buf_compat; /* Redundant pointer for backward compat. */
+} hndrte_log_t;
+
+typedef struct {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn), the host should write a complete
+ * input line directly into cbuf and then write the length into vcons_in.
+ * This may also be used when there is a real UART (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ volatile uint vcons_in;
+ volatile uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host polls.
+ */
+ hndrte_log_t log;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf until <CR> is received, then
+ * the buffer is processed as a command line. Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+} hndrte_cons_t;
diff --git a/drivers/net/wireless/bcm4329/include/hndsoc.h b/drivers/net/wireless/bcm4329/include/hndsoc.h
new file mode 100644
index 000000000000..35424175f55e
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/hndsoc.h
@@ -0,0 +1,195 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndsoc.h,v 13.3.10.3 2008/08/06 03:43:25 Exp $
+ */
+
+#ifndef _HNDSOC_H
+#define _HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */
+#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ (64 * 1024 * 1024)
+#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */
+
+#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
+#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */
+#ifndef SI_MAXCORES
+#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software
+ * convenience and could be changed if we
+ * make any larger chips
+ */
+#endif
+
+#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */
+
+#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */
+#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */
+#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */
+#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */
+#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */
+#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */
+#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */
+#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */
+#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
+#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
+
+#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), low 32 bits
+ */
+#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+
+/* core codes */
+#define NODEV_CORE_ID 0x700 /* Invalid coreid */
+#define CC_CORE_ID 0x800 /* chipcommon core */
+#define ILINE20_CORE_ID 0x801 /* iline20 core */
+#define SRAM_CORE_ID 0x802 /* sram core */
+#define SDRAM_CORE_ID 0x803 /* sdram core */
+#define PCI_CORE_ID 0x804 /* pci core */
+#define MIPS_CORE_ID 0x805 /* mips core */
+#define ENET_CORE_ID 0x806 /* enet mac core */
+#define CODEC_CORE_ID 0x807 /* v90 codec core */
+#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
+#define ADSL_CORE_ID 0x809 /* ADSL core */
+#define ILINE100_CORE_ID 0x80a /* iline100 core */
+#define IPSEC_CORE_ID 0x80b /* ipsec core */
+#define UTOPIA_CORE_ID 0x80c /* utopia core */
+#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
+#define SOCRAM_CORE_ID 0x80e /* internal memory core */
+#define MEMC_CORE_ID 0x80f /* memc sdram core */
+#define OFDM_CORE_ID 0x810 /* OFDM phy core */
+#define EXTIF_CORE_ID 0x811 /* external interface core */
+#define D11_CORE_ID 0x812 /* 802.11 MAC core */
+#define APHY_CORE_ID 0x813 /* 802.11a phy core */
+#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
+#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
+#define MIPS33_CORE_ID 0x816 /* mips3302 core */
+#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
+#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
+#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
+#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
+#define SDIOH_CORE_ID 0x81b /* sdio host core */
+#define ROBO_CORE_ID 0x81c /* roboswitch core */
+#define ATA100_CORE_ID 0x81d /* parallel ATA core */
+#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
+#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
+#define PCIE_CORE_ID 0x820 /* pci express core */
+#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
+#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
+#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
+#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
+#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
+#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
+#define PMU_CORE_ID 0x827 /* PMU core */
+#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
+#define SDIOD_CORE_ID 0x829 /* SDIO device core */
+#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
+#define QNPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
+#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
+#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
+#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
+#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
+#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
+#define SC_CORE_ID 0x831 /* shared common core */
+#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
+#define SPIH_CORE_ID 0x833 /* SPI host core */
+#define I2S_CORE_ID 0x834 /* I2S core */
+#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all
+ * unused address ranges
+ */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define SI_CC_IDX 0
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+
+/* Common core control flags */
+#define SICF_BIST_EN 0x8000
+#define SICF_PME_EN 0x4000
+#define SICF_CORE_BITS 0x3ffc
+#define SICF_FGC 0x0002
+#define SICF_CLOCK_EN 0x0001
+
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
+
+/* clk_ctl_st register */
+#define CCS_FORCEALP 0x00000001 /* force ALP request */
+#define CCS_FORCEHT 0x00000002 /* force HT request */
+#define CCS_FORCEILP 0x00000004 /* force ILP request */
+#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */
+#define CCS_HTAREQ 0x00000010 /* HT Avail Request */
+#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
+#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
+#define CCS_HTAVAIL 0x00020000 /* HT is available */
+#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */
+#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size */
+#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */
+#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */
+#define BISZ_MAGIC_IDX 0 /* Word 0: magic */
+#define BISZ_TXTST_IDX 1 /* 1: text start */
+#define BISZ_TXTEND_IDX 2 /* 2: text end */
+#define BISZ_DATAST_IDX 3 /* 3: data start */
+#define BISZ_DATAEND_IDX 4 /* 4: data end */
+#define BISZ_BSSST_IDX 5 /* 5: bss start */
+#define BISZ_BSSEND_IDX 6 /* 6: bss end */
+#define BISZ_SIZE 7 /* descriptor size in 32-bit intergers */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcm4329/include/linux_osl.h b/drivers/net/wireless/bcm4329/include/linux_osl.h
new file mode 100644
index 000000000000..b059c2adb17d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/linux_osl.h
@@ -0,0 +1,322 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.h,v 13.131.30.8 2010/04/26 05:42:18 Exp $
+ */
+
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+
+
+#include <linuxver.h>
+
+
+#ifdef __GNUC__
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION > 30100
+#define ASSERT(exp) do {} while (0)
+#else
+
+#define ASSERT(exp)
+#endif
+#endif
+
+
+#define OSL_DELAY(usec) osl_delay(usec)
+extern void osl_delay(uint usec);
+
+
+
+#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+ osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+ osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+
+#define OSL_PCI_READ_CONFIG(osh, offset, size) \
+ osl_pci_read_config((osh), (offset), (size))
+#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+ osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+
+#define OSL_PCI_BUS(osh) osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+
+
+typedef struct {
+ bool pkttag;
+ uint pktalloced;
+ bool mmbus;
+ pktfree_cb_fn_t tx_fn;
+ void *tx_ctx;
+} osl_pubinfo_t;
+
+
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+extern void osl_detach(osl_t *osh);
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
+ do { \
+ ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \
+ ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \
+ } while (0)
+
+
+#define BUS_SWAP32(v) (v)
+
+
+#define MALLOC(osh, size) osl_malloc((osh), (size))
+#define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size))
+#define MALLOCED(osh) osl_malloced((osh))
+
+
+#define MALLOC_FAILED(osh) osl_malloc_failed((osh))
+
+extern void *osl_malloc(osl_t *osh, uint size);
+extern void osl_mfree(osl_t *osh, void *addr, uint size);
+extern uint osl_malloced(osl_t *osh);
+extern uint osl_malloc_failed(osl_t *osh);
+
+
+#define DMA_CONSISTENT_ALIGN PAGE_SIZE
+#define DMA_ALLOC_CONSISTENT(osh, size, pap, dmah, alignbits) \
+ osl_dma_alloc_consistent((osh), (size), (pap))
+#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa);
+
+
+#define DMA_TX 1
+#define DMA_RX 2
+
+
+#define DMA_MAP(osh, va, size, direction, p, dmah) \
+ osl_dma_map((osh), (va), (size), (direction))
+#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+ osl_dma_unmap((osh), (pa), (size), (direction))
+extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+
+#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
+
+
+#include <bcmsdh.h>
+#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v)))
+#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r))))
+
+#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+ mmap_op else bus_op
+#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+ mmap_op : bus_op
+
+
+
+
+#ifndef printf
+#define printf(fmt, args...) printk(fmt, ## args)
+#endif
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+
+#ifndef IL_BIGENDIAN
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(uint8) ? readb((volatile uint8*)(r)) : \
+ sizeof(*(r)) == sizeof(uint16) ? readw((volatile uint16*)(r)) : \
+ readl((volatile uint32*)(r)), OSL_READ_REG(osh, r)) \
+)
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#else
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)((uintptr)(r)^3)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)((uintptr)(r)^2)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), \
+ (volatile uint8*)((uintptr)(r)^3)); break; \
+ case sizeof(uint16): writew((uint16)(v), \
+ (volatile uint16*)((uintptr)(r)^2)); break; \
+ case sizeof(uint32): writel((uint32)(v), \
+ (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#endif
+
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+
+
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+
+#define OSL_UNCACHED(va) ((void*)va)
+
+
+#if defined(__i386__)
+#define OSL_GETCYCLES(x) rdtscl((x))
+#else
+#define OSL_GETCYCLES(x) ((x) = 0)
+#endif
+
+
+#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
+
+
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size) (void *)(0)
+#endif
+#define REG_UNMAP(va) iounmap((va))
+
+
+#define R_SM(r) *(r)
+#define W_SM(r, v) (*(r) = (v))
+#define BZERO_SM(r, len) memset((r), '\0', (len))
+
+
+#define PKTGET(osh, len, send) osl_pktget((osh), (len))
+#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
+#ifdef DHD_USE_STATIC_BUF
+#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
+#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
+#endif
+#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data)
+#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len)
+#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail))
+#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next)
+#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x))
+#define PKTSETLEN(osh, skb, len) __skb_trim((struct sk_buff*)(skb), (len))
+#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes))
+#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes))
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTALLOCED(osh) ((osl_pubinfo_t *)(osh))->pktalloced
+#define PKTSETPOOL(osh, skb, x, y) do {} while (0)
+#define PKTPOOL(osh, skb) FALSE
+#define PKTPOOLLEN(osh, pktp) (0)
+#define PKTPOOLAVAIL(osh, pktp) (0)
+#define PKTPOOLADD(osh, pktp, p) BCME_ERROR
+#define PKTPOOLGET(osh, pktp) NULL
+#define PKTLIST_DUMP(osh, buf)
+
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+
+
+
+static INLINE void *
+osl_pkt_frmnative(osl_pubinfo_t *osh, struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+
+ if (osh->pkttag)
+ bzero((void*)skb->cb, OSL_PKTTAG_SZ);
+
+
+ for (nskb = skb; nskb; nskb = nskb->next) {
+ osh->pktalloced++;
+ }
+
+ return (void *)skb;
+}
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_pubinfo_t *)osh), (struct sk_buff*)(skb))
+
+
+static INLINE struct sk_buff *
+osl_pkt_tonative(osl_pubinfo_t *osh, void *pkt)
+{
+ struct sk_buff *nskb;
+
+ if (osh->pkttag)
+ bzero(((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ);
+
+
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ osh->pktalloced--;
+ }
+
+ return (struct sk_buff *)pkt;
+}
+#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_pubinfo_t *)(osh), (pkt))
+
+#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
+#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
+#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
+ ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+
+#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
+
+
+#define OSL_ERROR(bcmerror) osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+
+#define PKTBUFSZ 2048
+
+
+#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/linuxver.h b/drivers/net/wireless/bcm4329/include/linuxver.h
new file mode 100644
index 000000000000..6ed22658a72b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/linuxver.h
@@ -0,0 +1,447 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linuxver.h,v 13.38.8.1.8.6 2010/04/29 05:00:46 Exp $
+ */
+
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#include <linux/autoconf.h>
+#endif
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+ MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/semaphore.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define MY_INIT_WORK(_work, _func, _data) INIT_WORK(_work, _func)
+#else
+#define MY_INIT_WORK(_work, _func, _data) INIT_WORK(_work, _func, _data)
+typedef void (*work_func_t)(void *work);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef CONFIG_NET_RADIO
+#define CONFIG_WIRELESS_EXT
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#ifndef SANDGATE2G
+#define MOD_INC_USE_COUNT
+#endif
+#endif
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#define __devinit __init
+#endif
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x) x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev) (dev)->sysdata
+#define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
+
+
+
+struct pci_device_id {
+ unsigned int vendor, device;
+ unsigned int subvendor, subdevice;
+ unsigned int class, class_mask;
+ unsigned long driver_data;
+};
+
+struct pci_driver {
+ struct list_head node;
+ char *name;
+ const struct pci_device_id *id_table;
+ int (*probe)(struct pci_dev *dev,
+ const struct pci_device_id *id);
+ void (*remove)(struct pci_dev *dev);
+ void (*suspend)(struct pci_dev *dev);
+ void (*resume)(struct pci_dev *dev);
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x) __initcall(x);
+#define module_exit(x) __exitcall(x);
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+
+
+#ifndef PCI_DMA_TODEVICE
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#endif
+
+typedef u32 dma_addr_t;
+
+
+static inline int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC | GFP_DMA;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a) dev_kfree_skb(a)
+#define netif_down(dev) do { (dev)->start = 0; } while (0)
+
+
+#ifndef _COMPAT_NETDEVICE_H
+
+
+
+#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+ do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+}
+
+#define netif_queue_stopped(dev) (dev)->tbusy
+#define netif_running(dev) (dev)->start
+
+#endif
+
+#define netif_device_attach(dev) netif_start_queue(dev)
+#define netif_device_detach(dev) netif_stop_queue(dev)
+
+
+#define tasklet_struct tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+ queue_task(tasklet, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+ void (*func)(unsigned long),
+ unsigned long data)
+{
+ tasklet->next = NULL;
+ tasklet->sync = 0;
+ tasklet->routine = (void (*)(void *))func;
+ tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet) { do {} while (0); }
+
+
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+
+#define PREPARE_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+
+
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ PREPARE_TQUEUE((_tq), (_routine), (_data)); \
+ } while (0)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+
+
+
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(dev, i * 4, &buffer[i]);
+ }
+ return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(dev, i * 4, buffer[i]);
+ }
+
+ else {
+ for (i = 0; i < 6; i ++)
+ pci_write_config_dword(dev,
+ PCI_BASE_ADDRESS_0 + (i * 4),
+ pci_resource_start(dev, i));
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ }
+ return 0;
+}
+
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#else
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev) do {} while (0)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev) kfree(dev)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#define af_packet_priv data
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW CHECKSUM_PARTIAL
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#define KILL_PROC(pid, sig) \
+{ \
+ struct task_struct *tsk; \
+ tsk = pid_task(find_vpid(pid), PIDTYPE_PID); \
+ if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+ kill_proc(pid, sig, 1); \
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/miniopt.h b/drivers/net/wireless/bcm4329/include/miniopt.h
new file mode 100644
index 000000000000..3667fb1e215b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/miniopt.h
@@ -0,0 +1,77 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.h,v 1.1.6.2 2009/01/14 23:52:48 Exp $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY 128 /* Max options */
+typedef struct miniopt {
+
+ /* These are persistent after miniopt_init() */
+ const char* name; /* name for prompt in error strings */
+ const char* flags; /* option chars that take no args */
+ bool longflags; /* long options may be flags */
+ bool opt_end; /* at end of options (passed a "--") */
+
+ /* These are per-call to miniopt() */
+
+ int consumed; /* number of argv entries cosumed in
+ * the most recent call to miniopt()
+ */
+ bool positional;
+ bool good_int; /* 'val' member is the result of a sucessful
+ * strtol conversion of the option value
+ */
+ char opt;
+ char key[MINIOPT_MAXKEY];
+ char* valstr; /* positional param, or value for the option,
+ * or null if the option had
+ * no accompanying value
+ */
+ uint uval; /* strtol translation of valstr */
+ int val; /* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* MINI_OPT_H */
diff --git a/drivers/net/wireless/bcm4329/include/msgtrace.h b/drivers/net/wireless/bcm4329/include/msgtrace.h
new file mode 100644
index 000000000000..1479086dba3e
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/msgtrace.h
@@ -0,0 +1,72 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: msgtrace.h,v 1.1.2.4 2009/01/27 04:09:40 Exp $
+ */
+
+#ifndef _MSGTRACE_H
+#define _MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+ uint8 version;
+ uint8 spare;
+ uint16 len; /* Len of the trace */
+ uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost
+ * because of DMA error or a bus reset (ex: SDIO Func2)
+ */
+ uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */
+ uint32 discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces.
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+ uint16 hdrlen, uint8 *buf, uint16 buflen);
+
+extern void msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcm4329/include/osl.h b/drivers/net/wireless/bcm4329/include/osl.h
new file mode 100644
index 000000000000..5599e536eeea
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/osl.h
@@ -0,0 +1,55 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: osl.h,v 13.37.32.1 2008/11/20 00:51:15 Exp $
+ */
+
+
+#ifndef _osl_h_
+#define _osl_h_
+
+
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#define OSL_PKTTAG_SZ 32
+
+
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+#include <linux_osl.h>
+
+
+
+
+#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif
+
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/packed_section_end.h b/drivers/net/wireless/bcm4329/include/packed_section_end.h
new file mode 100644
index 000000000000..5b61c18fcd08
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/packed_section_end.h
@@ -0,0 +1,54 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_end.h,v 1.1.6.3 2008/12/10 00:27:54 Exp $
+ */
+
+
+
+
+#ifdef BWL_PACKED_SECTION
+ #undef BWL_PACKED_SECTION
+#else
+ #error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+
+#undef BWL_PRE_PACKED_STRUCT
+#undef BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcm4329/include/packed_section_start.h b/drivers/net/wireless/bcm4329/include/packed_section_start.h
new file mode 100644
index 000000000000..cb93aa64079a
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/packed_section_start.h
@@ -0,0 +1,61 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_start.h,v 1.1.6.3 2008/12/10 00:27:54 Exp $
+ */
+
+
+
+
+#ifdef BWL_PACKED_SECTION
+ #error "BWL_PACKED_SECTION is already defined!"
+#else
+ #define BWL_PACKED_SECTION
+#endif
+
+
+
+
+
+#if defined(__GNUC__)
+ #define BWL_PRE_PACKED_STRUCT
+ #define BWL_POST_PACKED_STRUCT __attribute__((packed))
+#elif defined(__CC_ARM)
+ #define BWL_PRE_PACKED_STRUCT __packed
+ #define BWL_POST_PACKED_STRUCT
+#else
+ #error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/pcicfg.h b/drivers/net/wireless/bcm4329/include/pcicfg.h
new file mode 100644
index 000000000000..898962c942a8
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/pcicfg.h
@@ -0,0 +1,52 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcicfg.h,v 1.41.12.3 2008/06/26 22:49:41 Exp $
+ */
+
+
+#ifndef _h_pcicfg_
+#define _h_pcicfg_
+
+
+#define PCI_CFG_VID 0
+#define PCI_CFG_CMD 4
+#define PCI_CFG_REV 8
+#define PCI_CFG_BAR0 0x10
+#define PCI_CFG_BAR1 0x14
+#define PCI_BAR0_WIN 0x80
+#define PCI_INT_STATUS 0x90
+#define PCI_INT_MASK 0x94
+
+#define PCIE_EXTCFG_OFFSET 0x100
+#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
+#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
+
+#define PCI_BAR0_WINSZ (16 * 1024)
+
+
+#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
+#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
+#define PCI_16KBB0_WINSZ (16 * 1024)
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/proto/802.11.h b/drivers/net/wireless/bcm4329/include/proto/802.11.h
new file mode 100644
index 000000000000..fd26317361da
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/802.11.h
@@ -0,0 +1,1433 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h,v 9.219.4.1.4.5.6.11 2010/02/09 13:23:26 Exp $
+ */
+
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US 1024
+
+
+#define DOT11_A3_HDR_LEN 24
+#define DOT11_A4_HDR_LEN 30
+#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN
+#define DOT11_FCS_LEN 4
+#define DOT11_ICV_LEN 4
+#define DOT11_ICV_AES_LEN 8
+#define DOT11_QOS_LEN 2
+#define DOT11_HTC_LEN 4
+
+#define DOT11_KEY_INDEX_SHIFT 6
+#define DOT11_IV_LEN 4
+#define DOT11_IV_TKIP_LEN 8
+#define DOT11_IV_AES_OCB_LEN 4
+#define DOT11_IV_AES_CCM_LEN 8
+#define DOT11_IV_MAX_LEN 8
+
+
+#define DOT11_MAX_MPDU_BODY_LEN 2304
+
+#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \
+ DOT11_QOS_LEN + \
+ DOT11_IV_AES_CCM_LEN + \
+ DOT11_MAX_MPDU_BODY_LEN + \
+ DOT11_ICV_LEN + \
+ DOT11_FCS_LEN)
+
+#define DOT11_MAX_SSID_LEN 32
+
+
+#define DOT11_DEFAULT_RTS_LEN 2347
+#define DOT11_MAX_RTS_LEN 2347
+
+
+#define DOT11_MIN_FRAG_LEN 256
+#define DOT11_MAX_FRAG_LEN 2346
+#define DOT11_DEFAULT_FRAG_LEN 2346
+
+
+#define DOT11_MIN_BEACON_PERIOD 1
+#define DOT11_MAX_BEACON_PERIOD 0xFFFF
+
+
+#define DOT11_MIN_DTIM_PERIOD 1
+#define DOT11_MAX_DTIM_PERIOD 0xFF
+
+
+#define DOT11_LLC_SNAP_HDR_LEN 8
+#define DOT11_OUI_LEN 3
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+ uint8 dsap;
+ uint8 ssap;
+ uint8 ctl;
+ uint8 oui[DOT11_OUI_LEN];
+ uint16 type;
+} BWL_POST_PACKED_STRUCT;
+
+
+#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr a1;
+ struct ether_addr a2;
+ struct ether_addr a3;
+ uint16 seq;
+ struct ether_addr a4;
+} BWL_POST_PACKED_STRUCT;
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_RTS_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTS_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACK_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr bssid;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_PS_POLL_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr bssid;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CS_END_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1040];
+ struct dot11_action_wifi_vendor_specific* next_node;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+#define DOT11_BA_CTL_POLICY_NORMAL 0x0000
+#define DOT11_BA_CTL_POLICY_NOACK 0x0001
+#define DOT11_BA_CTL_POLICY_MASK 0x0001
+
+#define DOT11_BA_CTL_MTID 0x0002
+#define DOT11_BA_CTL_COMPRESSED 0x0004
+
+#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0
+#define DOT11_BA_CTL_NUMMSDU_SHIFT 6
+
+#define DOT11_BA_CTL_TID_MASK 0xF000
+#define DOT11_BA_CTL_TID_SHIFT 12
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+ uint16 bar_control;
+ uint16 seqnum;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN 4
+
+#define DOT11_BA_BITMAP_LEN 128
+#define DOT11_BA_CMP_BITMAP_LEN 8
+
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+ uint16 ba_control;
+ uint16 seqnum;
+ uint8 bitmap[DOT11_BA_BITMAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr da;
+ struct ether_addr sa;
+ struct ether_addr bssid;
+ uint16 seq;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_HDR_LEN 24
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+ uint32 timestamp[2];
+ uint16 beacon_interval;
+ uint16 capability;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BCN_PRB_LEN 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+ uint16 alg;
+ uint16 seq;
+ uint16 status;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN 6
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+ uint16 capability;
+ uint16 listen;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+ uint16 capability;
+ uint16 listen;
+ struct ether_addr ap;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+ uint16 capability;
+ uint16 status;
+ uint16 aid;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN 6
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+ uint8 category;
+ uint8 action;
+ uint8 ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+ uint8 category;
+ uint8 action;
+ uint8 control;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE 1
+#define SM_PWRSAVE_MODE 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+ uint8 id;
+ uint8 len;
+ uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+ uint8 min;
+ uint8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+ uint8 id;
+ uint8 len;
+ uint8 tx_pwr;
+ uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN 2
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+ uint8 id;
+ uint8 len;
+ uint8 first_channel;
+ uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+ uint8 id;
+ uint8 len;
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN 5
+#define BRCM_EXTCH_IE_TYPE 53
+#define DOT11_EXTCH_IE_LEN 1
+#define DOT11_EXT_CH_MASK 0x03
+#define DOT11_EXT_CH_UPPER 0x01
+#define DOT11_EXT_CH_LOWER 0x03
+#define DOT11_EXT_CH_NONE 0x00
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+ uint8 category;
+ uint8 action;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+ uint8 id;
+ uint8 len;
+ uint8 mode;
+ uint8 channel;
+ uint8 count;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN 3
+
+#define DOT11_CSA_MODE_ADVISORY 0
+#define DOT11_CSA_MODE_NO_TX 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+ uint8 category;
+ uint8 action;
+ dot11_chan_switch_ie_t chan_switch_ie;
+ dot11_brcm_extch_ie_t extch_ie;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+ uint8 mode;
+ uint8 reg;
+ uint8 channel;
+ uint8 count;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+ uint8 id;
+ uint8 len;
+ struct dot11_csa_body b;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ struct dot11_csa_body b;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ dot11_ext_csa_ie_t chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+ uint8 id;
+ uint8 len;
+ uint8 info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN 1
+
+#define DOT11_OBSS_COEX_INFO_REQ 0x01
+#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02
+#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+ uint8 id;
+ uint8 len;
+ uint8 regclass;
+ uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+#define DOT11_EXTCAP_LEN 1
+
+
+
+#define DOT11_MEASURE_TYPE_BASIC 0
+#define DOT11_MEASURE_TYPE_CCA 1
+#define DOT11_MEASURE_TYPE_RPI 2
+
+
+#define DOT11_MEASURE_MODE_ENABLE (1<<1)
+#define DOT11_MEASURE_MODE_REQUEST (1<<2)
+#define DOT11_MEASURE_MODE_REPORT (1<<3)
+
+#define DOT11_MEASURE_MODE_LATE (1<<0)
+#define DOT11_MEASURE_MODE_INCAPABLE (1<<1)
+#define DOT11_MEASURE_MODE_REFUSED (1<<2)
+
+#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0))
+#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1))
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2))
+#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3))
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4))
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14
+
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ BWL_PRE_PACKED_STRUCT union
+ {
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+ } BWL_POST_PACKED_STRUCT basic;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+
+#define DOT11_MNG_IE_MREP_FIXED_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+ uint8 id;
+ uint8 len;
+ uint8 count;
+ uint8 period;
+ uint16 duration;
+ uint16 offset;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+ uint8 channel;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+ uint8 id;
+ uint8 len;
+ uint8 eaddr[ETHER_ADDR_LEN];
+ uint8 interval;
+ chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+
+#define WME_OUI "\x00\x50\xf2"
+#define WME_VER 1
+#define WME_TYPE 2
+#define WME_SUBTYPE_IE 0
+#define WME_SUBTYPE_PARAM_IE 1
+#define WME_SUBTYPE_TSPEC 2
+
+
+#define AC_BE 0
+#define AC_BK 1
+#define AC_VI 2
+#define AC_VO 3
+#define AC_COUNT 4
+
+typedef uint8 ac_bitmap_t;
+
+#define AC_BITMAP_NONE 0x0
+#define AC_BITMAP_ALL 0xf
+#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+ uint8 ACI;
+ uint8 ECW;
+ uint16 TXOP;
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN 24
+
+
+#define WME_QI_AP_APSD_MASK 0x80
+#define WME_QI_AP_APSD_SHIFT 7
+#define WME_QI_AP_COUNT_MASK 0x0f
+#define WME_QI_AP_COUNT_SHIFT 0
+
+
+#define WME_QI_STA_MAXSPLEN_MASK 0x60
+#define WME_QI_STA_MAXSPLEN_SHIFT 5
+#define WME_QI_STA_APSD_ALL_MASK 0xf
+#define WME_QI_STA_APSD_ALL_SHIFT 0
+#define WME_QI_STA_APSD_BE_MASK 0x8
+#define WME_QI_STA_APSD_BE_SHIFT 3
+#define WME_QI_STA_APSD_BK_MASK 0x4
+#define WME_QI_STA_APSD_BK_SHIFT 2
+#define WME_QI_STA_APSD_VI_MASK 0x2
+#define WME_QI_STA_APSD_VI_SHIFT 1
+#define WME_QI_STA_APSD_VO_MASK 0x1
+#define WME_QI_STA_APSD_VO_SHIFT 0
+
+
+#define EDCF_AIFSN_MIN 1
+#define EDCF_AIFSN_MAX 15
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_ACM_MASK 0x10
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_AIFSN_SHIFT 12
+
+
+#define EDCF_ECW_MIN 0
+#define EDCF_ECW_MAX 15
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_MASK 0xf0
+#define EDCF_ECWMAX_SHIFT 4
+
+
+#define EDCF_TXOP_MIN 0
+#define EDCF_TXOP_MAX 65535
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+
+
+#define NON_EDCF_AC_BE_ACI_STA 0x02
+
+
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VO_TXOP_STA 0x002f
+
+
+#define EDCF_AC_BE_ACI_AP 0x03
+#define EDCF_AC_BE_ECW_AP 0x64
+#define EDCF_AC_BE_TXOP_AP 0x0000
+#define EDCF_AC_BK_ACI_AP 0x27
+#define EDCF_AC_BK_ECW_AP 0xA4
+#define EDCF_AC_BK_TXOP_AP 0x0000
+#define EDCF_AC_VI_ACI_AP 0x41
+#define EDCF_AC_VI_ECW_AP 0x43
+#define EDCF_AC_VI_TXOP_AP 0x005e
+#define EDCF_AC_VO_ACI_AP 0x61
+#define EDCF_AC_VO_ECW_AP 0x32
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+ uint8 id;
+ uint8 length;
+ uint16 station_count;
+ uint8 channel_utilization;
+ uint16 aac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+
+
+#define FIXED_MSDU_SIZE 0x8000
+#define MSDU_SIZE_MASK 0x7fff
+
+
+
+#define INTEGER_SHIFT 13
+#define FRACTION_MASK 0x1FFF
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 status;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4
+
+
+#define WME_ADDTS_REQUEST 0
+#define WME_ADDTS_RESPONSE 1
+#define WME_DELTS_REQUEST 2
+
+
+#define WME_ADMISSION_ACCEPTED 0
+#define WME_INVALID_PARAMETERS 1
+#define WME_ADMISSION_REFUSED 3
+
+
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+
+#define DOT11_OPEN_SYSTEM 0
+#define DOT11_SHARED_KEY 1
+
+#define DOT11_OPEN_SHARED 2
+#define DOT11_CHALLENGE_LEN 128
+
+
+#define FC_PVER_MASK 0x3
+#define FC_PVER_SHIFT 0
+#define FC_TYPE_MASK 0xC
+#define FC_TYPE_SHIFT 2
+#define FC_SUBTYPE_MASK 0xF0
+#define FC_SUBTYPE_SHIFT 4
+#define FC_TODS 0x100
+#define FC_TODS_SHIFT 8
+#define FC_FROMDS 0x200
+#define FC_FROMDS_SHIFT 9
+#define FC_MOREFRAG 0x400
+#define FC_MOREFRAG_SHIFT 10
+#define FC_RETRY 0x800
+#define FC_RETRY_SHIFT 11
+#define FC_PM 0x1000
+#define FC_PM_SHIFT 12
+#define FC_MOREDATA 0x2000
+#define FC_MOREDATA_SHIFT 13
+#define FC_WEP 0x4000
+#define FC_WEP_SHIFT 14
+#define FC_ORDER 0x8000
+#define FC_ORDER_SHIFT 15
+
+
+#define SEQNUM_SHIFT 4
+#define SEQNUM_MAX 0x1000
+#define FRAGNUM_MASK 0xF
+
+
+
+
+#define FC_TYPE_MNG 0
+#define FC_TYPE_CTL 1
+#define FC_TYPE_DATA 2
+
+
+#define FC_SUBTYPE_ASSOC_REQ 0
+#define FC_SUBTYPE_ASSOC_RESP 1
+#define FC_SUBTYPE_REASSOC_REQ 2
+#define FC_SUBTYPE_REASSOC_RESP 3
+#define FC_SUBTYPE_PROBE_REQ 4
+#define FC_SUBTYPE_PROBE_RESP 5
+#define FC_SUBTYPE_BEACON 8
+#define FC_SUBTYPE_ATIM 9
+#define FC_SUBTYPE_DISASSOC 10
+#define FC_SUBTYPE_AUTH 11
+#define FC_SUBTYPE_DEAUTH 12
+#define FC_SUBTYPE_ACTION 13
+#define FC_SUBTYPE_ACTION_NOACK 14
+
+
+#define FC_SUBTYPE_CTL_WRAPPER 7
+#define FC_SUBTYPE_BLOCKACK_REQ 8
+#define FC_SUBTYPE_BLOCKACK 9
+#define FC_SUBTYPE_PS_POLL 10
+#define FC_SUBTYPE_RTS 11
+#define FC_SUBTYPE_CTS 12
+#define FC_SUBTYPE_ACK 13
+#define FC_SUBTYPE_CF_END 14
+#define FC_SUBTYPE_CF_END_ACK 15
+
+
+#define FC_SUBTYPE_DATA 0
+#define FC_SUBTYPE_DATA_CF_ACK 1
+#define FC_SUBTYPE_DATA_CF_POLL 2
+#define FC_SUBTYPE_DATA_CF_ACK_POLL 3
+#define FC_SUBTYPE_NULL 4
+#define FC_SUBTYPE_CF_ACK 5
+#define FC_SUBTYPE_CF_POLL 6
+#define FC_SUBTYPE_CF_ACK_POLL 7
+#define FC_SUBTYPE_QOS_DATA 8
+#define FC_SUBTYPE_QOS_DATA_CF_ACK 9
+#define FC_SUBTYPE_QOS_DATA_CF_POLL 10
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11
+#define FC_SUBTYPE_QOS_NULL 12
+#define FC_SUBTYPE_QOS_CF_POLL 14
+#define FC_SUBTYPE_QOS_CF_ACK_POLL 15
+
+
+#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0)
+
+
+#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK)
+
+#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))
+
+#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)
+#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)
+
+#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)
+#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)
+#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)
+#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)
+#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)
+#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)
+#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)
+#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)
+#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)
+#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)
+#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)
+#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)
+
+#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)
+#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)
+#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)
+#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)
+#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)
+#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)
+#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)
+#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)
+#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)
+
+#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)
+#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)
+#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)
+#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)
+#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)
+
+
+
+
+#define QOS_PRIO_SHIFT 0
+#define QOS_PRIO_MASK 0x0007
+#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)
+
+
+#define QOS_TID_SHIFT 0
+#define QOS_TID_MASK 0x000f
+#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)
+
+
+#define QOS_EOSP_SHIFT 4
+#define QOS_EOSP_MASK 0x0010
+#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)
+
+
+#define QOS_ACK_NORMAL_ACK 0
+#define QOS_ACK_NO_ACK 1
+#define QOS_ACK_NO_EXP_ACK 2
+#define QOS_ACK_BLOCK_ACK 3
+#define QOS_ACK_SHIFT 5
+#define QOS_ACK_MASK 0x0060
+#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)
+
+
+#define QOS_AMSDU_SHIFT 7
+#define QOS_AMSDU_MASK 0x0080
+
+
+
+
+
+
+#define DOT11_MNG_AUTH_ALGO_LEN 2
+#define DOT11_MNG_AUTH_SEQ_LEN 2
+#define DOT11_MNG_BEACON_INT_LEN 2
+#define DOT11_MNG_CAP_LEN 2
+#define DOT11_MNG_AP_ADDR_LEN 6
+#define DOT11_MNG_LISTEN_INT_LEN 2
+#define DOT11_MNG_REASON_LEN 2
+#define DOT11_MNG_AID_LEN 2
+#define DOT11_MNG_STATUS_LEN 2
+#define DOT11_MNG_TIMESTAMP_LEN 8
+
+
+#define DOT11_AID_MASK 0x3fff
+
+
+#define DOT11_RC_RESERVED 0
+#define DOT11_RC_UNSPECIFIED 1
+#define DOT11_RC_AUTH_INVAL 2
+#define DOT11_RC_DEAUTH_LEAVING 3
+#define DOT11_RC_INACTIVITY 4
+#define DOT11_RC_BUSY 5
+#define DOT11_RC_INVAL_CLASS_2 6
+#define DOT11_RC_INVAL_CLASS_3 7
+#define DOT11_RC_DISASSOC_LEAVING 8
+#define DOT11_RC_NOT_AUTH 9
+#define DOT11_RC_BAD_PC 10
+#define DOT11_RC_BAD_CHANNELS 11
+
+
+
+#define DOT11_RC_UNSPECIFIED_QOS 32
+#define DOT11_RC_INSUFFCIENT_BW 33
+#define DOT11_RC_EXCESSIVE_FRAMES 34
+#define DOT11_RC_TX_OUTSIDE_TXOP 35
+#define DOT11_RC_LEAVING_QBSS 36
+#define DOT11_RC_BAD_MECHANISM 37
+#define DOT11_RC_SETUP_NEEDED 38
+#define DOT11_RC_TIMEOUT 39
+
+#define DOT11_RC_MAX 23
+
+
+#define DOT11_SC_SUCCESS 0
+#define DOT11_SC_FAILURE 1
+#define DOT11_SC_CAP_MISMATCH 10
+#define DOT11_SC_REASSOC_FAIL 11
+#define DOT11_SC_ASSOC_FAIL 12
+#define DOT11_SC_AUTH_MISMATCH 13
+#define DOT11_SC_AUTH_SEQ 14
+#define DOT11_SC_AUTH_CHALLENGE_FAIL 15
+#define DOT11_SC_AUTH_TIMEOUT 16
+#define DOT11_SC_ASSOC_BUSY_FAIL 17
+#define DOT11_SC_ASSOC_RATE_MISMATCH 18
+#define DOT11_SC_ASSOC_SHORT_REQUIRED 19
+#define DOT11_SC_ASSOC_PBCC_REQUIRED 20
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22
+#define DOT11_SC_ASSOC_BAD_POWER_CAP 23
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25
+#define DOT11_SC_ASSOC_ERPBCC_REQUIRED 26
+#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED 27
+
+#define DOT11_SC_DECLINED 37
+#define DOT11_SC_INVALID_PARAMS 38
+
+
+#define DOT11_MNG_DS_PARAM_LEN 1
+#define DOT11_MNG_IBSS_PARAM_LEN 2
+
+
+#define DOT11_MNG_TIM_FIXED_LEN 3
+#define DOT11_MNG_TIM_DTIM_COUNT 0
+#define DOT11_MNG_TIM_DTIM_PERIOD 1
+#define DOT11_MNG_TIM_BITMAP_CTL 2
+#define DOT11_MNG_TIM_PVB 3
+
+
+#define TLV_TAG_OFF 0
+#define TLV_LEN_OFF 1
+#define TLV_HDR_LEN 2
+#define TLV_BODY_OFF 2
+
+
+#define DOT11_MNG_SSID_ID 0
+#define DOT11_MNG_RATES_ID 1
+#define DOT11_MNG_FH_PARMS_ID 2
+#define DOT11_MNG_DS_PARMS_ID 3
+#define DOT11_MNG_CF_PARMS_ID 4
+#define DOT11_MNG_TIM_ID 5
+#define DOT11_MNG_IBSS_PARMS_ID 6
+#define DOT11_MNG_COUNTRY_ID 7
+#define DOT11_MNG_HOPPING_PARMS_ID 8
+#define DOT11_MNG_HOPPING_TABLE_ID 9
+#define DOT11_MNG_REQUEST_ID 10
+#define DOT11_MNG_QBSS_LOAD_ID 11
+#define DOT11_MNG_EDCA_PARAM_ID 12
+#define DOT11_MNG_CHALLENGE_ID 16
+#define DOT11_MNG_PWR_CONSTRAINT_ID 32
+#define DOT11_MNG_PWR_CAP_ID 33
+#define DOT11_MNG_TPC_REQUEST_ID 34
+#define DOT11_MNG_TPC_REPORT_ID 35
+#define DOT11_MNG_SUPP_CHANNELS_ID 36
+#define DOT11_MNG_CHANNEL_SWITCH_ID 37
+#define DOT11_MNG_MEASURE_REQUEST_ID 38
+#define DOT11_MNG_MEASURE_REPORT_ID 39
+#define DOT11_MNG_QUIET_ID 40
+#define DOT11_MNG_IBSS_DFS_ID 41
+#define DOT11_MNG_ERP_ID 42
+#define DOT11_MNG_TS_DELAY_ID 43
+#define DOT11_MNG_HT_CAP 45
+#define DOT11_MNG_QOS_CAP_ID 46
+#define DOT11_MNG_NONERP_ID 47
+#define DOT11_MNG_RSN_ID 48
+#define DOT11_MNG_EXT_RATES_ID 50
+#define DOT11_MNG_REGCLASS_ID 59
+#define DOT11_MNG_EXT_CSA_ID 60
+#define DOT11_MNG_HT_ADD 61
+#define DOT11_MNG_EXT_CHANNEL_OFFSET 62
+#define DOT11_MNG_WAPI_ID 68
+#define DOT11_MNG_HT_BSS_COEXINFO_ID 72
+#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73
+#define DOT11_MNG_HT_OBSS_ID 74
+#define DOT11_MNG_EXT_CAP 127
+#define DOT11_MNG_WPA_ID 221
+#define DOT11_MNG_PROPR_ID 221
+
+
+#define DOT11_RATE_BASIC 0x80
+#define DOT11_RATE_MASK 0x7F
+
+
+#define DOT11_MNG_ERP_LEN 1
+#define DOT11_MNG_NONERP_PRESENT 0x01
+#define DOT11_MNG_USE_PROTECTION 0x02
+#define DOT11_MNG_BARKER_PREAMBLE 0x04
+
+#define DOT11_MGN_TS_DELAY_LEN 4
+#define TS_DELAY_FIELD_SIZE 4
+
+
+#define DOT11_CAP_ESS 0x0001
+#define DOT11_CAP_IBSS 0x0002
+#define DOT11_CAP_POLLABLE 0x0004
+#define DOT11_CAP_POLL_RQ 0x0008
+#define DOT11_CAP_PRIVACY 0x0010
+#define DOT11_CAP_SHORT 0x0020
+#define DOT11_CAP_PBCC 0x0040
+#define DOT11_CAP_AGILITY 0x0080
+#define DOT11_CAP_SPECTRUM 0x0100
+#define DOT11_CAP_SHORTSLOT 0x0400
+#define DOT11_CAP_CCK_OFDM 0x2000
+
+
+#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01
+
+
+#define DOT11_ACTION_HDR_LEN 2
+#define DOT11_ACTION_CAT_ERR_MASK 0x80
+#define DOT11_ACTION_CAT_MASK 0x7F
+#define DOT11_ACTION_CAT_SPECT_MNG 0
+#define DOT11_ACTION_CAT_BLOCKACK 3
+#define DOT11_ACTION_CAT_PUBLIC 4
+#define DOT11_ACTION_CAT_HT 7
+#define DOT11_ACTION_CAT_VS 127
+#define DOT11_ACTION_NOTIFICATION 0x11
+
+#define DOT11_ACTION_ID_M_REQ 0
+#define DOT11_ACTION_ID_M_REP 1
+#define DOT11_ACTION_ID_TPC_REQ 2
+#define DOT11_ACTION_ID_TPC_REP 3
+#define DOT11_ACTION_ID_CHANNEL_SWITCH 4
+#define DOT11_ACTION_ID_EXT_CSA 5
+
+
+#define DOT11_ACTION_ID_HT_CH_WIDTH 0
+#define DOT11_ACTION_ID_HT_MIMO_PS 1
+
+
+#define DOT11_PUB_ACTION_BSS_COEX_MNG 0
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4
+
+
+#define DOT11_BA_ACTION_ADDBA_REQ 0
+#define DOT11_BA_ACTION_ADDBA_RESP 1
+#define DOT11_BA_ACTION_DELBA 2
+
+
+#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001
+#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1
+#define DOT11_ADDBA_PARAM_TID_MASK 0x003c
+#define DOT11_ADDBA_PARAM_TID_SHIFT 2
+#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6
+
+#define DOT11_ADDBA_POLICY_DELAYED 0
+#define DOT11_ADDBA_POLICY_IMMEDIATE 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 addba_param_set;
+ uint16 timeout;
+ uint16 start_seqnum;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN 9
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 status;
+ uint16 addba_param_set;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN 9
+
+
+#define DOT11_DELBA_PARAM_INIT_MASK 0x0800
+#define DOT11_DELBA_PARAM_INIT_SHIFT 11
+#define DOT11_DELBA_PARAM_TID_MASK 0xf000
+#define DOT11_DELBA_PARAM_TID_SHIFT 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+ uint8 category;
+ uint8 action;
+ uint16 delba_param_set;
+ uint16 reason;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN 6
+
+
+#define DOT11_BSSTYPE_INFRASTRUCTURE 0
+#define DOT11_BSSTYPE_INDEPENDENT 1
+#define DOT11_BSSTYPE_ANY 2
+#define DOT11_SCANTYPE_ACTIVE 0
+#define DOT11_SCANTYPE_PASSIVE 1
+
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 8
+#define PREN_PREAMBLE_EXT 4
+
+
+#define NPHY_RIFS_TIME 2
+
+
+#define APHY_SLOT_TIME 9
+#define APHY_SIFS_TIME 16
+#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SYMBOL_TIME 4
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define APHY_CWMIN 15
+
+
+#define BPHY_SLOT_TIME 20
+#define BPHY_SIFS_TIME 10
+#define BPHY_DIFS_TIME 50
+#define BPHY_PLCP_TIME 192
+#define BPHY_PLCP_SHORT_TIME 96
+#define BPHY_CWMIN 31
+
+
+#define DOT11_OFDM_SIGNAL_EXTENSION 6
+
+#define PHY_CWMAX 1023
+
+#define DOT11_MAXNUMFRAGS 16
+
+
+typedef struct d11cnt {
+ uint32 txfrag;
+ uint32 txmulti;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txretrie;
+ uint32 rxdup;
+ uint32 txrts;
+ uint32 txnocts;
+ uint32 txnoack;
+ uint32 rxfrag;
+ uint32 rxmulti;
+ uint32 rxcrc;
+ uint32 txfrmsnt;
+ uint32 rxundec;
+} d11cnt_t;
+
+
+#define BRCM_PROP_OUI "\x00\x90\x4C"
+
+
+
+
+BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ uint16 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_prop_ie_s brcm_prop_ie_t;
+
+#define BRCM_PROP_IE_LEN 6
+
+#define DPT_IE_TYPE 2
+
+
+#define BRCM_OUI "\x00\x10\x18"
+
+
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 ver;
+ uint8 assoc;
+ uint8 flags;
+ uint8 flags1;
+ uint16 amsdu_mtu_pref;
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN 11
+#define BRCM_IE_VER 2
+#define BRCM_IE_LEGACY_AES_VER 1
+
+
+#ifdef WLAFTERBURNER
+#define BRF_ABCAP 0x1
+#define BRF_ABRQRD 0x2
+#define BRF_ABCOUNTER_MASK 0xf0
+#define BRF_ABCOUNTER_SHIFT 4
+#endif
+#define BRF_LZWDS 0x4
+#define BRF_BLOCKACK 0x8
+
+
+#define BRF1_AMSDU 0x1
+#define BRF1_WMEPS 0x4
+#define BRF1_PSOFIX 0x8
+
+#ifdef WLAFTERBURNER
+#define AB_WDS_TIMEOUT_MAX 15
+#define AB_WDS_TIMEOUT_MIN 1
+#endif
+
+#define AB_GUARDCOUNT 10
+
+#define MCSSET_LEN 16
+#define MAX_MCS_NUM (128)
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+ uint16 cap;
+ uint8 params;
+ uint8 supp_mcs[MCSSET_LEN];
+ uint16 ext_htcap;
+ uint32 txbf_cap;
+ uint8 as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+#define HT_PROP_IE_OVERHEAD 4
+#define HT_CAP_IE_LEN 26
+#define HT_CAP_IE_TYPE 51
+
+#define HT_CAP_LDPC_CODING 0x0001
+#define HT_CAP_40MHZ 0x0002
+#define HT_CAP_MIMO_PS_MASK 0x000C
+#define HT_CAP_MIMO_PS_SHIFT 0x0002
+#define HT_CAP_MIMO_PS_OFF 0x0003
+#define HT_CAP_MIMO_PS_RTS 0x0001
+#define HT_CAP_MIMO_PS_ON 0x0000
+#define HT_CAP_GF 0x0010
+#define HT_CAP_SHORT_GI_20 0x0020
+#define HT_CAP_SHORT_GI_40 0x0040
+#define HT_CAP_TX_STBC 0x0080
+#define HT_CAP_RX_STBC_MASK 0x0300
+#define HT_CAP_RX_STBC_SHIFT 8
+#define HT_CAP_DELAYED_BA 0x0400
+#define HT_CAP_MAX_AMSDU 0x0800
+#define HT_CAP_DSSS_CCK 0x1000
+#define HT_CAP_PSMP 0x2000
+#define HT_CAP_40MHZ_INTOLERANT 0x4000
+#define HT_CAP_LSIG_TXOP 0x8000
+
+#define HT_CAP_RX_STBC_NO 0x0
+#define HT_CAP_RX_STBC_ONE_STREAM 0x1
+#define HT_CAP_RX_STBC_TWO_STREAM 0x2
+#define HT_CAP_RX_STBC_THREE_STREAM 0x3
+
+#define HT_MAX_AMSDU 7935
+#define HT_MIN_AMSDU 3835
+
+#define HT_PARAMS_RX_FACTOR_MASK 0x03
+#define HT_PARAMS_DENSITY_MASK 0x1C
+#define HT_PARAMS_DENSITY_SHIFT 2
+
+
+#define AMPDU_MAX_MPDU_DENSITY 7
+#define AMPDU_RX_FACTOR_64K 3
+#define AMPDU_RX_FACTOR_BASE 8*1024
+#define AMPDU_DELIMITER_LEN 4
+
+#define HT_CAP_EXT_PCO 0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT 1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8
+#define HT_CAP_EXT_HTC 0x0400
+#define HT_CAP_EXT_RD_RESP 0x0800
+
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+ uint8 ctl_ch;
+ uint8 byte1;
+ uint16 opmode;
+ uint16 misc_bits;
+ uint8 basic_mcs[MCSSET_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN 22
+#define HT_ADD_IE_TYPE 52
+
+
+#define HT_BW_ANY 0x04
+#define HT_RIFS_PERMITTED 0x08
+
+
+#define HT_OPMODE_MASK 0x0003
+#define HT_OPMODE_SHIFT 0
+#define HT_OPMODE_PURE 0x0000
+#define HT_OPMODE_OPTIONAL 0x0001
+#define HT_OPMODE_HT20IN40 0x0002
+#define HT_OPMODE_MIXED 0x0003
+#define HT_OPMODE_NONGF 0x0004
+#define DOT11N_TXBURST 0x0008
+#define DOT11N_OBSS_NONHT 0x0010
+
+
+#define HT_BASIC_STBC_MCS 0x007f
+#define HT_DUAL_STBC_PROT 0x0080
+#define HT_SECOND_BCN 0x0100
+#define HT_LSIG_TXOP 0x0200
+#define HT_PCO_ACTIVE 0x0400
+#define HT_PCO_PHASE 0x0800
+#define HT_DUALCTS_PROTECTION 0x0080
+
+
+#define DOT11N_2G_TXBURST_LIMIT 6160
+#define DOT11N_5G_TXBURST_LIMIT 3080
+
+
+#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ >> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_MIXED)
+#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_HT20IN40)
+#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_OPTIONAL)
+#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \
+ HT_MIXEDMODE_PRESENT((add_ie)))
+#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+ == HT_OPMODE_NONGF)
+#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+ == DOT11N_TXBURST)
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+ == DOT11N_OBSS_NONHT)
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+ uint16 passive_dwell;
+ uint16 active_dwell;
+ uint16 bss_widthscan_interval;
+ uint16 passive_total;
+ uint16 active_total;
+ uint16 chanwidth_transition_dly;
+ uint16 activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+ uint8 id;
+ uint8 len;
+ obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t)
+
+
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+ uchar id;
+ uchar len;
+ uchar oui [3];
+ uchar data [1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN 2
+#define VNDR_IE_MIN_LEN 3
+#define VNDR_IE_MAX_LEN 256
+
+
+#define WPA_VERSION 1
+#define WPA_OUI "\x00\x50\xF2"
+
+#define WPA2_VERSION 1
+#define WPA2_VERSION_LEN 2
+#define WPA2_OUI "\x00\x0F\xAC"
+
+#define WPA_OUI_LEN 3
+
+
+#define RSN_AKM_NONE 0
+#define RSN_AKM_UNSPECIFIED 1
+#define RSN_AKM_PSK 2
+
+
+#define DOT11_MAX_DEFAULT_KEYS 4
+#define DOT11_MAX_KEY_SIZE 32
+#define DOT11_MAX_IV_SIZE 16
+#define DOT11_EXT_IV_FLAG (1<<5)
+#define DOT11_WPA_KEY_RSC_LEN 8
+
+#define WEP1_KEY_SIZE 5
+#define WEP1_KEY_HEX_SIZE 10
+#define WEP128_KEY_SIZE 13
+#define WEP128_KEY_HEX_SIZE 26
+#define TKIP_MIC_SIZE 8
+#define TKIP_EOM_SIZE 7
+#define TKIP_EOM_FLAG 0x5a
+#define TKIP_KEY_SIZE 32
+#define TKIP_MIC_AUTH_TX 16
+#define TKIP_MIC_AUTH_RX 24
+#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX
+#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX
+#define AES_KEY_SIZE 16
+#define AES_MIC_SIZE 8
+
+#define SMS4_KEY_LEN 16
+#define SMS4_WPI_CBC_MAC_LEN 16
+
+
+#include <packed_section_end.h>
+
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/proto/802.11e.h b/drivers/net/wireless/bcm4329/include/proto/802.11e.h
new file mode 100644
index 000000000000..1dd6f45b1ed8
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/802.11e.h
@@ -0,0 +1,131 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11e.h,v 1.5.56.1 2008/11/20 00:51:18 Exp $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+ uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+ uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */
+ uint8 type; /* WME_TYPE */
+ uint8 subtype; /* WME_SUBTYPE_TSPEC */
+ uint8 version; /* WME_VERSION */
+ tsinfo_t tsinfo; /* TS Info bit field */
+ uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */
+ uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */
+ uint32 min_srv_interval; /* Minimum Service Interval (us) */
+ uint32 max_srv_interval; /* Maximum Service Interval (us) */
+ uint32 inactivity_interval; /* Inactivity Interval (us) */
+ uint32 suspension_interval; /* Suspension Interval (us) */
+ uint32 srv_start_time; /* Service Start Time (us) */
+ uint32 min_data_rate; /* Minimum Data Rate (bps) */
+ uint32 mean_data_rate; /* Mean Data Rate (bps) */
+ uint32 peak_data_rate; /* Peak Data Rate (bps) */
+ uint32 max_burst_size; /* Maximum Burst Size (bytes) */
+ uint32 delay_bound; /* Delay Bound (us) */
+ uint32 min_phy_rate; /* Minimum PHY Rate (bps) */
+ uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */
+ uint16 medium_time; /* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */
+#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */
+#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */
+#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \
+ TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \
+ TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+ ((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+ ((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT 300 /* default ADDTS response timeout in ms */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */
+#define DOT11E_STATUS_END_TS 37 /* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcm4329/include/proto/802.1d.h b/drivers/net/wireless/bcm4329/include/proto/802.1d.h
new file mode 100644
index 000000000000..45c728bc2976
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/802.1d.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h,v 9.3 2007/04/10 21:33:06 Exp $
+ */
+
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+
+#define PRIO_8021D_NONE 2
+#define PRIO_8021D_BK 1
+#define PRIO_8021D_BE 0
+#define PRIO_8021D_EE 3
+#define PRIO_8021D_CL 4
+#define PRIO_8021D_VI 5
+#define PRIO_8021D_VO 6
+#define PRIO_8021D_NC 7
+#define MAXPRIO 7
+#define NUMPRIO (MAXPRIO + 1)
+
+#define ALLPRIO -1
+
+
+#define PRIO2PREC(prio) \
+ (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmeth.h b/drivers/net/wireless/bcm4329/include/proto/bcmeth.h
new file mode 100644
index 000000000000..fdb5a2a5648f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/bcmeth.h
@@ -0,0 +1,83 @@
+/*
+ * Broadcom Ethernettype protocol definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmeth.h,v 9.9.46.1 2008/11/20 00:51:20 Exp $
+ */
+
+
+
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+
+
+
+
+#define BCMILCP_SUBTYPE_RATE 1
+#define BCMILCP_SUBTYPE_LINK 2
+#define BCMILCP_SUBTYPE_CSA 3
+#define BCMILCP_SUBTYPE_LARQ 4
+#define BCMILCP_SUBTYPE_VENDOR 5
+#define BCMILCP_SUBTYPE_FLH 17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
+#define BCMILCP_SUBTYPE_CERT 32770
+#define BCMILCP_SUBTYPE_SES 32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED 0
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
+#define BCMILCP_BCM_SUBTYPE_SES 2
+
+
+#define BCMILCP_BCM_SUBTYPE_DPT 4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+ uint16 subtype;
+ uint16 length;
+ uint8 version;
+ uint8 oui[3];
+
+ uint16 usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmevent.h b/drivers/net/wireless/bcm4329/include/proto/bcmevent.h
new file mode 100644
index 000000000000..1f8ecb14d97a
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/bcmevent.h
@@ -0,0 +1,212 @@
+/*
+ * Broadcom Event protocol definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h,v 9.34.4.1.20.16.64.1 2010/11/08 21:57:03 Exp $
+ *
+ */
+
+
+
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION 1
+#define BCM_MSG_IFNAME_MAX 16
+
+
+#define WLC_EVENT_MSG_LINK 0x01
+#define WLC_EVENT_MSG_FLUSHTXQ 0x02
+#define WLC_EVENT_MSG_GROUP 0x04
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags;
+ uint32 event_type;
+ uint32 status;
+ uint32 reason;
+ uint32 auth_type;
+ uint32 datalen;
+ struct ether_addr addr;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+ struct ether_header eth;
+ bcmeth_hdr_t bcm_hdr;
+ wl_event_msg_t event;
+
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+
+#define WLC_E_SET_SSID 0
+#define WLC_E_JOIN 1
+#define WLC_E_START 2
+#define WLC_E_AUTH 3
+#define WLC_E_AUTH_IND 4
+#define WLC_E_DEAUTH 5
+#define WLC_E_DEAUTH_IND 6
+#define WLC_E_ASSOC 7
+#define WLC_E_ASSOC_IND 8
+#define WLC_E_REASSOC 9
+#define WLC_E_REASSOC_IND 10
+#define WLC_E_DISASSOC 11
+#define WLC_E_DISASSOC_IND 12
+#define WLC_E_QUIET_START 13
+#define WLC_E_QUIET_END 14
+#define WLC_E_BEACON_RX 15
+#define WLC_E_LINK 16
+#define WLC_E_MIC_ERROR 17
+#define WLC_E_NDIS_LINK 18
+#define WLC_E_ROAM 19
+#define WLC_E_TXFAIL 20
+#define WLC_E_PMKID_CACHE 21
+#define WLC_E_RETROGRADE_TSF 22
+#define WLC_E_PRUNE 23
+#define WLC_E_AUTOAUTH 24
+#define WLC_E_EAPOL_MSG 25
+#define WLC_E_SCAN_COMPLETE 26
+#define WLC_E_ADDTS_IND 27
+#define WLC_E_DELTS_IND 28
+#define WLC_E_BCNSENT_IND 29
+#define WLC_E_BCNRX_MSG 30
+#define WLC_E_BCNLOST_MSG 31
+#define WLC_E_ROAM_PREP 32
+#define WLC_E_PFN_NET_FOUND 33
+#define WLC_E_PFN_NET_LOST 34
+#define WLC_E_RESET_COMPLETE 35
+#define WLC_E_JOIN_START 36
+#define WLC_E_ROAM_START 37
+#define WLC_E_ASSOC_START 38
+#define WLC_E_IBSS_ASSOC 39
+#define WLC_E_RADIO 40
+#define WLC_E_PSM_WATCHDOG 41
+#define WLC_E_PROBREQ_MSG 44
+#define WLC_E_SCAN_CONFIRM_IND 45
+#define WLC_E_PSK_SUP 46
+#define WLC_E_COUNTRY_CODE_CHANGED 47
+#define WLC_E_EXCEEDED_MEDIUM_TIME 48
+#define WLC_E_ICV_ERROR 49
+#define WLC_E_UNICAST_DECODE_ERROR 50
+#define WLC_E_MULTICAST_DECODE_ERROR 51
+#define WLC_E_TRACE 52
+#define WLC_E_IF 54
+#define WLC_E_RSSI 56
+#define WLC_E_PFN_SCAN_COMPLETE 57
+#define WLC_E_ACTION_FRAME 58
+#define WLC_E_ACTION_FRAME_COMPLETE 59
+
+#define WLC_E_ESCAN_RESULT 69
+#define WLC_E_WAKE_EVENT 70
+#define WLC_E_RELOAD 71
+#define WLC_E_LAST 72
+
+
+
+#define WLC_E_STATUS_SUCCESS 0
+#define WLC_E_STATUS_FAIL 1
+#define WLC_E_STATUS_TIMEOUT 2
+#define WLC_E_STATUS_NO_NETWORKS 3
+#define WLC_E_STATUS_ABORT 4
+#define WLC_E_STATUS_NO_ACK 5
+#define WLC_E_STATUS_UNSOLICITED 6
+#define WLC_E_STATUS_ATTEMPT 7
+#define WLC_E_STATUS_PARTIAL 8
+#define WLC_E_STATUS_NEWSCAN 9
+#define WLC_E_STATUS_NEWASSOC 10
+#define WLC_E_STATUS_11HQUIET 11
+#define WLC_E_STATUS_SUPPRESS 12
+#define WLC_E_STATUS_NOCHANS 13
+#define WLC_E_STATUS_CCXFASTRM 14
+#define WLC_E_STATUS_CS_ABORT 15
+
+
+#define WLC_E_REASON_INITIAL_ASSOC 0
+#define WLC_E_REASON_LOW_RSSI 1
+#define WLC_E_REASON_DEAUTH 2
+#define WLC_E_REASON_DISASSOC 3
+#define WLC_E_REASON_BCNS_LOST 4
+#define WLC_E_REASON_FAST_ROAM_FAILED 5
+#define WLC_E_REASON_DIRECTED_ROAM 6
+#define WLC_E_REASON_TSPEC_REJECTED 7
+#define WLC_E_REASON_BETTER_AP 8
+
+
+#define WLC_E_PRUNE_ENCR_MISMATCH 1
+#define WLC_E_PRUNE_BCAST_BSSID 2
+#define WLC_E_PRUNE_MAC_DENY 3
+#define WLC_E_PRUNE_MAC_NA 4
+#define WLC_E_PRUNE_REG_PASSV 5
+#define WLC_E_PRUNE_SPCT_MGMT 6
+#define WLC_E_PRUNE_RADAR 7
+#define WLC_E_RSN_MISMATCH 8
+#define WLC_E_PRUNE_NO_COMMON_RATES 9
+#define WLC_E_PRUNE_BASIC_RATES 10
+#define WLC_E_PRUNE_CIPHER_NA 12
+#define WLC_E_PRUNE_KNOWN_STA 13
+#define WLC_E_PRUNE_WDS_PEER 15
+#define WLC_E_PRUNE_QBSS_LOAD 16
+#define WLC_E_PRUNE_HOME_AP 17
+
+
+#define WLC_E_SUP_OTHER 0
+#define WLC_E_SUP_DECRYPT_KEY_DATA 1
+#define WLC_E_SUP_BAD_UCAST_WEP128 2
+#define WLC_E_SUP_BAD_UCAST_WEP40 3
+#define WLC_E_SUP_UNSUP_KEY_LEN 4
+#define WLC_E_SUP_PW_KEY_CIPHER 5
+#define WLC_E_SUP_MSG3_TOO_MANY_IE 6
+#define WLC_E_SUP_MSG3_IE_MISMATCH 7
+#define WLC_E_SUP_NO_INSTALL_FLAG 8
+#define WLC_E_SUP_MSG3_NO_GTK 9
+#define WLC_E_SUP_GRP_KEY_CIPHER 10
+#define WLC_E_SUP_GRP_MSG1_NO_GTK 11
+#define WLC_E_SUP_GTK_DECRYPT_FAIL 12
+#define WLC_E_SUP_SEND_FAIL 13
+#define WLC_E_SUP_DEAUTH 14
+#define WLC_E_SUP_WPA_PSK_TMO 15
+
+
+#define WLC_E_IF_ADD 1
+#define WLC_E_IF_DEL 2
+
+#define WLC_E_RELOAD_STATUS1 1
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmip.h b/drivers/net/wireless/bcm4329/include/proto/bcmip.h
new file mode 100644
index 000000000000..9d2fd6fba484
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/bcmip.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h,v 9.16.186.4 2009/01/27 04:25:25 Exp $
+ */
+
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define IP_VER_OFFSET 0x0
+#define IP_VER_MASK 0xf0
+#define IP_VER_SHIFT 4
+#define IP_VER_4 4
+#define IP_VER_6 6
+
+#define IP_VER(ip_body) \
+ ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP 0x1
+#define IP_PROT_TCP 0x6
+#define IP_PROT_UDP 0x11
+
+
+#define IPV4_VER_HL_OFFSET 0
+#define IPV4_TOS_OFFSET 1
+#define IPV4_PKTLEN_OFFSET 2
+#define IPV4_PKTFLAG_OFFSET 6
+#define IPV4_PROT_OFFSET 9
+#define IPV4_CHKSUM_OFFSET 10
+#define IPV4_SRC_IP_OFFSET 12
+#define IPV4_DEST_IP_OFFSET 16
+#define IPV4_OPTIONS_OFFSET 20
+
+
+#define IPV4_VER_MASK 0xf0
+#define IPV4_VER_SHIFT 4
+
+#define IPV4_HLEN_MASK 0x0f
+#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN 4
+
+#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+ ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+ ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define IPV4_TOS_DSCP_MASK 0xfc
+#define IPV4_TOS_DSCP_SHIFT 2
+
+#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define IPV4_TOS_PREC_MASK 0xe0
+#define IPV4_TOS_PREC_SHIFT 5
+
+#define IPV4_TOS_LOWDELAY 0x10
+#define IPV4_TOS_THROUGHPUT 0x8
+#define IPV4_TOS_RELIABILITY 0x4
+
+#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV 0x8000
+#define IPV4_FRAG_DONT 0x4000
+#define IPV4_FRAG_MORE 0x2000
+#define IPV4_FRAG_OFFSET_MASK 0x1fff
+
+#define IPV4_ADDR_STR_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+ uint8 addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+ uint8 version_ihl;
+ uint8 tos;
+ uint16 tot_len;
+ uint16 id;
+ uint16 frag;
+ uint8 ttl;
+ uint8 prot;
+ uint16 hdr_chksum;
+ uint8 src_ip[IPV4_ADDR_LEN];
+ uint8 dst_ip[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+
+#define IPV6_PAYLOAD_LEN_OFFSET 4
+#define IPV6_NEXT_HDR_OFFSET 6
+#define IPV6_HOP_LIMIT_OFFSET 7
+#define IPV6_SRC_IP_OFFSET 8
+#define IPV6_DEST_IP_OFFSET 24
+
+
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+ ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+ (((uint8 *)(ipv6_body))[2] << 8) | \
+ (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+ ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+ ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+ (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN 16
+
+
+#ifndef IP_TOS
+#define IP_TOS(ip_body) \
+ (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+ IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+#endif
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/proto/eapol.h b/drivers/net/wireless/bcm4329/include/proto/eapol.h
new file mode 100644
index 000000000000..95e76ff18c6b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/eapol.h
@@ -0,0 +1,172 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright (C) 2002 Broadcom Corporation
+ *
+ * $Id: eapol.h,v 9.18.260.1.2.1.6.6 2009/04/08 05:00:08 Exp $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define AKW_BLOCK_LEN 8 /* The only def we need here */
+
+/* EAPOL for 802.3/Ethernet */
+typedef struct {
+ struct ether_header eth; /* 802.3/Ethernet header */
+ unsigned char version; /* EAPOL protocol version */
+ unsigned char type; /* EAPOL type */
+ unsigned short length; /* Length of body */
+ unsigned char body[1]; /* Body (optional) */
+} eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION 2
+#define WPA_EAPOL_VERSION 1
+#define LEAP_EAPOL_VERSION 1
+#define SES_EAPOL_VERSION 1
+
+/* EAPOL types */
+#define EAP_PACKET 0
+#define EAPOL_START 1
+#define EAPOL_LOGOFF 2
+#define EAPOL_KEY 3
+#define EAPOL_ASF 4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY 1
+#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254 /* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN 8
+#define EAPOL_KEY_IV_LEN 16
+#define EAPOL_KEY_SIG_LEN 16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short length; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */
+ unsigned char index; /* Key Flags & Index */
+ unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */
+ unsigned char key[1]; /* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK 0x80
+#define EAPOL_KEY_BROADCAST 0
+#define EAPOL_KEY_UNICAST 0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK 0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN 8
+#define EAPOL_WPA_KEY_NONCE_LEN 32
+#define EAPOL_WPA_KEY_IV_LEN 16
+#define EAPOL_WPA_KEY_ID_LEN 8
+#define EAPOL_WPA_KEY_RSC_LEN 8
+#define EAPOL_WPA_KEY_MIC_LEN 16
+#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE 32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short key_info; /* Key Information (unaligned) */
+ unsigned short key_len; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */
+ unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */
+ unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */
+ unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+ unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */
+ unsigned short data_len; /* Key Data Length */
+ unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_V1 0x01
+#define WPA_KEY_DESC_V2 0x02
+#define WPA_KEY_PAIRWISE 0x08
+#define WPA_KEY_INSTALL 0x40
+#define WPA_KEY_ACK 0x80
+#define WPA_KEY_MIC 0x100
+#define WPA_KEY_SECURE 0x200
+#define WPA_KEY_ERROR 0x400
+#define WPA_KEY_REQ 0x800
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0 0x00
+#define WPA_KEY_INDEX_1 0x10
+#define WPA_KEY_INDEX_2 0x20
+#define WPA_KEY_INDEX_3 0x30
+#define WPA_KEY_INDEX_MASK 0x30
+#define WPA_KEY_INDEX_SHIFT 0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA 0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 type;
+ uint8 length;
+ uint8 oui[3];
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK 1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2
+#define WPA2_KEY_DATA_SUBTYPE_MAC 3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID 4
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 flags;
+ uint8 reserved;
+ uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2
+
+#define WPA2_GTK_INDEX_MASK 0x03
+#define WPA2_GTK_INDEX_SHIFT 0x00
+
+#define WPA2_GTK_TRANSMIT 0x04
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 reserved[2];
+ uint8 mac[ETHER_ADDR_LEN];
+ uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD 0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/proto/ethernet.h b/drivers/net/wireless/bcm4329/include/proto/ethernet.h
new file mode 100644
index 000000000000..9ad2ea0c70fd
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/ethernet.h
@@ -0,0 +1,148 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: ethernet.h,v 9.45.56.5 2010/02/22 22:04:36 Exp $
+ */
+
+
+#ifndef _NET_ETHERNET_H_
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define ETHER_ADDR_LEN 6
+
+
+#define ETHER_TYPE_LEN 2
+
+
+#define ETHER_CRC_LEN 4
+
+
+#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+
+#define ETHER_MIN_LEN 64
+
+
+#define ETHER_MIN_DATA 46
+
+
+#define ETHER_MAX_LEN 1518
+
+
+#define ETHER_MAX_DATA 1500
+
+
+#define ETHER_TYPE_MIN 0x0600
+#define ETHER_TYPE_IP 0x0800
+#define ETHER_TYPE_ARP 0x0806
+#define ETHER_TYPE_8021Q 0x8100
+#define ETHER_TYPE_BRCM 0x886c
+#define ETHER_TYPE_802_1X 0x888e
+#define ETHER_TYPE_WAI 0x88b4
+#ifdef BCMWPA2
+#define ETHER_TYPE_802_1X_PREAUTH 0x88c7
+#endif
+
+
+#define ETHER_BRCM_SUBTYPE_LEN 4
+#define ETHER_BRCM_CRAM 1
+
+
+#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN)
+#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN)
+#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN)
+
+
+#define ETHER_IS_VALID_LEN(foo) \
+ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+
+#ifndef __INCif_etherh
+
+BWL_PRE_PACKED_STRUCT struct ether_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct ether_addr {
+ uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif
+
+
+#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xd))
+#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+
+#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+
+#define ether_cmp(a, b) (!(((short*)a)[0] == ((short*)b)[0]) | \
+ !(((short*)a)[1] == ((short*)b)[1]) | \
+ !(((short*)a)[2] == ((short*)b)[2]))
+
+
+#define ether_copy(s, d) { \
+ ((short*)d)[0] = ((short*)s)[0]; \
+ ((short*)d)[1] = ((short*)s)[1]; \
+ ((short*)d)[2] = ((short*)s)[2]; }
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+
+#define ETHER_ISBCAST(ea) ((((uint8 *)(ea))[0] & \
+ ((uint8 *)(ea))[1] & \
+ ((uint8 *)(ea))[2] & \
+ ((uint8 *)(ea))[3] & \
+ ((uint8 *)(ea))[4] & \
+ ((uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea) ((((uint8 *)(ea))[0] | \
+ ((uint8 *)(ea))[1] | \
+ ((uint8 *)(ea))[2] | \
+ ((uint8 *)(ea))[3] | \
+ ((uint8 *)(ea))[4] | \
+ ((uint8 *)(ea))[5]) == 0)
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/proto/sdspi.h b/drivers/net/wireless/bcm4329/include/proto/sdspi.h
new file mode 100644
index 000000000000..7739e68a2440
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/sdspi.h
@@ -0,0 +1,71 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdspi.h,v 9.1.20.1 2008/05/06 22:59:19 Exp $
+ */
+
+#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */
+#define SPI_START_S 31
+#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */
+#define SPI_DIR_S 30
+#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S 24
+#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */
+#define SPI_RW_S 23
+#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */
+#define SPI_FUNC_S 20
+#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */
+#define SPI_RAW_S 19
+#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */
+#define SPI_STUFF_S 18
+#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */
+#define SPI_BLKMODE_S 19
+#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */
+#define SPI_OPCODE_S 18
+#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */
+#define SPI_ADDR_S 1
+#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */
+#define SPI_STUFF0_S 0
+
+#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */
+#define SPI_RSP_START_S 7
+#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */
+#define SPI_RSP_PARAM_ERR_S 6
+#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */
+#define SPI_RSP_RFU5_S 5
+#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */
+#define SPI_RSP_FUNC_ERR_S 4
+#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */
+#define SPI_RSP_CRC_ERR_S 3
+#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */
+#define SPI_RSP_ILL_CMD_S 2
+#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */
+#define SPI_RSP_RFU1_S 1
+#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */
+#define SPI_RSP_IDLE_S 0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */
+#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK 0x80
diff --git a/drivers/net/wireless/bcm4329/include/proto/vlan.h b/drivers/net/wireless/bcm4329/include/proto/vlan.h
new file mode 100644
index 000000000000..670bc44c6bd6
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/vlan.h
@@ -0,0 +1,63 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: vlan.h,v 9.4.196.2 2008/12/07 21:19:20 Exp $
+ */
+
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define VLAN_VID_MASK 0xfff
+#define VLAN_CFI_SHIFT 12
+#define VLAN_PRI_SHIFT 13
+
+#define VLAN_PRI_MASK 7
+
+#define VLAN_TAG_LEN 4
+#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN)
+
+#define VLAN_TPID 0x8100
+
+struct ethervlan_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 vlan_type;
+ uint16 vlan_tag;
+ uint16 ether_type;
+};
+
+#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/proto/wpa.h b/drivers/net/wireless/bcm4329/include/proto/wpa.h
new file mode 100644
index 000000000000..f5d0cd539777
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/wpa.h
@@ -0,0 +1,159 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wpa.h,v 1.16.166.1.20.1 2008/11/20 00:51:31 Exp $
+ */
+
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+
+#include <packed_section_start.h>
+
+
+
+
+#define DOT11_RC_INVALID_WPA_IE 13
+#define DOT11_RC_MIC_FAILURE 14
+#define DOT11_RC_4WH_TIMEOUT 15
+#define DOT11_RC_GTK_UPDATE_TIMEOUT 16
+#define DOT11_RC_WPA_IE_MISMATCH 17
+#define DOT11_RC_INVALID_MC_CIPHER 18
+#define DOT11_RC_INVALID_UC_CIPHER 19
+#define DOT11_RC_INVALID_AKMP 20
+#define DOT11_RC_BAD_WPA_VERSION 21
+#define DOT11_RC_INVALID_WPA_CAP 22
+#define DOT11_RC_8021X_AUTH_FAIL 23
+
+#define WPA2_PMKID_LEN 16
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 tag;
+ uint8 length;
+ uint8 oui[3];
+ uint8 oui_type;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version;
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN 4
+#define WPA_IE_FIXED_LEN 8
+#define WPA_IE_TAG_FIXED_LEN 6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 tag;
+ uint8 length;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version;
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN 4
+#define WPA_RSN_IE_TAG_FIXED_LEN 2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 oui[3];
+ uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN 4
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN 2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+
+#define WPA_CIPHER_NONE 0
+#define WPA_CIPHER_WEP_40 1
+#define WPA_CIPHER_TKIP 2
+#define WPA_CIPHER_AES_OCB 3
+#define WPA_CIPHER_AES_CCM 4
+#define WPA_CIPHER_WEP_104 5
+
+#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
+ (cipher) == WPA_CIPHER_WEP_40 || \
+ (cipher) == WPA_CIPHER_WEP_104 || \
+ (cipher) == WPA_CIPHER_TKIP || \
+ (cipher) == WPA_CIPHER_AES_OCB || \
+ (cipher) == WPA_CIPHER_AES_CCM)
+
+
+#define WPA_TKIP_CM_DETECT 60
+#define WPA_TKIP_CM_BLOCK 60
+
+
+#define RSN_CAP_LEN 2
+
+
+#define RSN_CAP_PREAUTH 0x0001
+#define RSN_CAP_NOPAIRWISE 0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4
+#define RSN_CAP_1_REPLAY_CNTR 0
+#define RSN_CAP_2_REPLAY_CNTRS 1
+#define RSN_CAP_4_REPLAY_CNTRS 2
+#define RSN_CAP_16_REPLAY_CNTRS 3
+
+
+#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+
+#define WPA_CAP_LEN RSN_CAP_LEN
+
+#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/sbchipc.h b/drivers/net/wireless/bcm4329/include/sbchipc.h
new file mode 100644
index 000000000000..39e5c8d6aed0
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbchipc.h
@@ -0,0 +1,1026 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer,
+ * gpio interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h,v 13.103.2.5.4.5.2.9 2009/07/03 14:23:21 Exp $
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ */
+
+
+#ifndef _SBCHIPC_H
+#define _SBCHIPC_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+typedef volatile struct {
+ uint32 chipid;
+ uint32 capabilities;
+ uint32 corecontrol;
+ uint32 bist;
+
+
+ uint32 otpstatus;
+ uint32 otpcontrol;
+ uint32 otpprog;
+ uint32 PAD;
+
+
+ uint32 intstatus;
+ uint32 intmask;
+ uint32 chipcontrol;
+ uint32 chipstatus;
+
+
+ uint32 jtagcmd;
+ uint32 jtagir;
+ uint32 jtagdr;
+ uint32 jtagctrl;
+
+
+ uint32 flashcontrol;
+ uint32 flashaddress;
+ uint32 flashdata;
+ uint32 PAD[1];
+
+
+ uint32 broadcastaddress;
+ uint32 broadcastdata;
+
+
+ uint32 gpiopullup;
+ uint32 gpiopulldown;
+ uint32 gpioin;
+ uint32 gpioout;
+ uint32 gpioouten;
+ uint32 gpiocontrol;
+ uint32 gpiointpolarity;
+ uint32 gpiointmask;
+
+
+ uint32 gpioevent;
+ uint32 gpioeventintmask;
+
+
+ uint32 watchdog;
+
+
+ uint32 gpioeventintpolarity;
+
+
+ uint32 gpiotimerval;
+ uint32 gpiotimeroutmask;
+
+
+ uint32 clockcontrol_n;
+ uint32 clockcontrol_sb;
+ uint32 clockcontrol_pci;
+ uint32 clockcontrol_m2;
+ uint32 clockcontrol_m3;
+ uint32 clkdiv;
+ uint32 PAD[2];
+
+
+ uint32 pll_on_delay;
+ uint32 fref_sel_delay;
+ uint32 slow_clk_ctl;
+ uint32 PAD[1];
+
+
+ uint32 system_clk_ctl;
+ uint32 clkstatestretch;
+ uint32 PAD[13];
+
+
+ uint32 eromptr;
+
+
+ uint32 pcmcia_config;
+ uint32 pcmcia_memwait;
+ uint32 pcmcia_attrwait;
+ uint32 pcmcia_iowait;
+ uint32 ide_config;
+ uint32 ide_memwait;
+ uint32 ide_attrwait;
+ uint32 ide_iowait;
+ uint32 prog_config;
+ uint32 prog_waitcount;
+ uint32 flash_config;
+ uint32 flash_waitcount;
+ uint32 PAD[4];
+ uint32 PAD[40];
+
+
+
+ uint32 clk_ctl_st;
+ uint32 hw_war;
+ uint32 PAD[70];
+
+
+ uint8 uart0data;
+ uint8 uart0imr;
+ uint8 uart0fcr;
+ uint8 uart0lcr;
+ uint8 uart0mcr;
+ uint8 uart0lsr;
+ uint8 uart0msr;
+ uint8 uart0scratch;
+ uint8 PAD[248];
+
+ uint8 uart1data;
+ uint8 uart1imr;
+ uint8 uart1fcr;
+ uint8 uart1lcr;
+ uint8 uart1mcr;
+ uint8 uart1lsr;
+ uint8 uart1msr;
+ uint8 uart1scratch;
+ uint32 PAD[126];
+
+
+ uint32 pmucontrol;
+ uint32 pmucapabilities;
+ uint32 pmustatus;
+ uint32 res_state;
+ uint32 res_pending;
+ uint32 pmutimer;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+ uint32 res_table_sel;
+ uint32 res_dep_mask;
+ uint32 res_updn_timer;
+ uint32 res_timer;
+ uint32 clkstretch;
+ uint32 pmuwatchdog;
+ uint32 gpiosel;
+ uint32 gpioenable;
+ uint32 res_req_timer_sel;
+ uint32 res_req_timer;
+ uint32 res_req_mask;
+ uint32 PAD;
+ uint32 chipcontrol_addr;
+ uint32 chipcontrol_data;
+ uint32 regcontrol_addr;
+ uint32 regcontrol_data;
+ uint32 pllcontrol_addr;
+ uint32 pllcontrol_data;
+ uint32 PAD[102];
+ uint16 otp[768];
+} chipcregs_t;
+
+#endif
+
+#define CC_CHIPID 0
+#define CC_CAPABILITIES 4
+#define CC_OTPST 0x10
+#define CC_CHIPST 0x2c
+#define CC_JTAGCMD 0x30
+#define CC_JTAGIR 0x34
+#define CC_JTAGDR 0x38
+#define CC_JTAGCTRL 0x3c
+#define CC_WATCHDOG 0x80
+#define CC_CLKC_N 0x90
+#define CC_CLKC_M0 0x94
+#define CC_CLKC_M1 0x98
+#define CC_CLKC_M2 0x9c
+#define CC_CLKC_M3 0xa0
+#define CC_CLKDIV 0xa4
+#define CC_SYS_CLK_CTL 0xc0
+#define CC_CLK_CTL_ST SI_CLK_CTL_ST
+#define CC_EROMPTR 0xfc
+#define PMU_CTL 0x600
+#define PMU_CAP 0x604
+#define PMU_ST 0x608
+#define PMU_RES_STATE 0x60c
+#define PMU_TIMER 0x614
+#define PMU_MIN_RES_MASK 0x618
+#define PMU_MAX_RES_MASK 0x61c
+#define PMU_REG_CONTROL_ADDR 0x658
+#define PMU_REG_CONTROL_DATA 0x65C
+#define PMU_PLL_CONTROL_ADDR 0x660
+#define PMU_PLL_CONTROL_DATA 0x664
+#define CC_OTP 0x800
+
+
+#define CID_ID_MASK 0x0000ffff
+#define CID_REV_MASK 0x000f0000
+#define CID_REV_SHIFT 16
+#define CID_PKG_MASK 0x00f00000
+#define CID_PKG_SHIFT 20
+#define CID_CC_MASK 0x0f000000
+#define CID_CC_SHIFT 24
+#define CID_TYPE_MASK 0xf0000000
+#define CID_TYPE_SHIFT 28
+
+
+#define CC_CAP_UARTS_MASK 0x00000003
+#define CC_CAP_MIPSEB 0x00000004
+#define CC_CAP_UCLKSEL 0x00000018
+#define CC_CAP_UINTCLK 0x00000008
+#define CC_CAP_UARTGPIO 0x00000020
+#define CC_CAP_EXTBUS_MASK 0x000000c0
+#define CC_CAP_EXTBUS_NONE 0x00000000
+#define CC_CAP_EXTBUS_FULL 0x00000040
+#define CC_CAP_EXTBUS_PROG 0x00000080
+#define CC_CAP_FLASH_MASK 0x00000700
+#define CC_CAP_PLL_MASK 0x00038000
+#define CC_CAP_PWR_CTL 0x00040000
+#define CC_CAP_OTPSIZE 0x00380000
+#define CC_CAP_OTPSIZE_SHIFT 19
+#define CC_CAP_OTPSIZE_BASE 5
+#define CC_CAP_JTAGP 0x00400000
+#define CC_CAP_ROM 0x00800000
+#define CC_CAP_BKPLN64 0x08000000
+#define CC_CAP_PMU 0x10000000
+#define CC_CAP_ECI 0x20000000
+
+
+#define PLL_NONE 0x00000000
+#define PLL_TYPE1 0x00010000
+#define PLL_TYPE2 0x00020000
+#define PLL_TYPE3 0x00030000
+#define PLL_TYPE4 0x00008000
+#define PLL_TYPE5 0x00018000
+#define PLL_TYPE6 0x00028000
+#define PLL_TYPE7 0x00038000
+
+
+#define ILP_CLOCK 32000
+
+
+#define ALP_CLOCK 20000000
+
+
+#define HT_CLOCK 80000000
+
+
+#define CC_UARTCLKO 0x00000001
+#define CC_SE 0x00000002
+#define CC_UARTCLKEN 0x00000008
+
+
+#define CHIPCTRL_4321A0_DEFAULT 0x3a4
+#define CHIPCTRL_4321A1_DEFAULT 0x0a4
+#define CHIPCTRL_4321_PLL_DOWN 0x800000
+
+
+#define OTPS_OL_MASK 0x000000ff
+#define OTPS_OL_MFG 0x00000001
+#define OTPS_OL_OR1 0x00000002
+#define OTPS_OL_OR2 0x00000004
+#define OTPS_OL_GU 0x00000008
+#define OTPS_GUP_MASK 0x00000f00
+#define OTPS_GUP_SHIFT 8
+#define OTPS_GUP_HW 0x00000100
+#define OTPS_GUP_SW 0x00000200
+#define OTPS_GUP_CI 0x00000400
+#define OTPS_GUP_FUSE 0x00000800
+#define OTPS_READY 0x00001000
+#define OTPS_RV(x) (1 << (16 + (x)))
+#define OTPS_RV_MASK 0x0fff0000
+
+
+#define OTPC_PROGSEL 0x00000001
+#define OTPC_PCOUNT_MASK 0x0000000e
+#define OTPC_PCOUNT_SHIFT 1
+#define OTPC_VSEL_MASK 0x000000f0
+#define OTPC_VSEL_SHIFT 4
+#define OTPC_TMM_MASK 0x00000700
+#define OTPC_TMM_SHIFT 8
+#define OTPC_ODM 0x00000800
+#define OTPC_PROGEN 0x80000000
+
+
+#define OTPP_COL_MASK 0x000000ff
+#define OTPP_COL_SHIFT 0
+#define OTPP_ROW_MASK 0x0000ff00
+#define OTPP_ROW_SHIFT 8
+#define OTPP_OC_MASK 0x0f000000
+#define OTPP_OC_SHIFT 24
+#define OTPP_READERR 0x10000000
+#define OTPP_VALUE_MASK 0x20000000
+#define OTPP_VALUE_SHIFT 29
+#define OTPP_START_BUSY 0x80000000
+
+
+#define OTPPOC_READ 0
+#define OTPPOC_BIT_PROG 1
+#define OTPPOC_VERIFY 3
+#define OTPPOC_INIT 4
+#define OTPPOC_SET 5
+#define OTPPOC_RESET 6
+#define OTPPOC_OCST 7
+#define OTPPOC_ROW_LOCK 8
+#define OTPPOC_PRESCN_TEST 9
+
+
+#define JCMD_START 0x80000000
+#define JCMD_BUSY 0x80000000
+#define JCMD_STATE_MASK 0x60000000
+#define JCMD_STATE_TLR 0x00000000
+#define JCMD_STATE_PIR 0x20000000
+#define JCMD_STATE_PDR 0x40000000
+#define JCMD_STATE_RTI 0x60000000
+#define JCMD0_ACC_MASK 0x0000f000
+#define JCMD0_ACC_IRDR 0x00000000
+#define JCMD0_ACC_DR 0x00001000
+#define JCMD0_ACC_IR 0x00002000
+#define JCMD0_ACC_RESET 0x00003000
+#define JCMD0_ACC_IRPDR 0x00004000
+#define JCMD0_ACC_PDR 0x00005000
+#define JCMD0_IRW_MASK 0x00000f00
+#define JCMD_ACC_MASK 0x000f0000
+#define JCMD_ACC_IRDR 0x00000000
+#define JCMD_ACC_DR 0x00010000
+#define JCMD_ACC_IR 0x00020000
+#define JCMD_ACC_RESET 0x00030000
+#define JCMD_ACC_IRPDR 0x00040000
+#define JCMD_ACC_PDR 0x00050000
+#define JCMD_ACC_PIR 0x00060000
+#define JCMD_ACC_IRDR_I 0x00070000
+#define JCMD_ACC_DR_I 0x00080000
+#define JCMD_IRW_MASK 0x00001f00
+#define JCMD_IRW_SHIFT 8
+#define JCMD_DRW_MASK 0x0000003f
+
+
+#define JCTRL_FORCE_CLK 4
+#define JCTRL_EXT_EN 2
+#define JCTRL_EN 1
+
+
+#define CLKD_SFLASH 0x0f000000
+#define CLKD_SFLASH_SHIFT 24
+#define CLKD_OTP 0x000f0000
+#define CLKD_OTP_SHIFT 16
+#define CLKD_JTAG 0x00000f00
+#define CLKD_JTAG_SHIFT 8
+#define CLKD_UART 0x000000ff
+
+
+#define CI_GPIO 0x00000001
+#define CI_EI 0x00000002
+#define CI_TEMP 0x00000004
+#define CI_SIRQ 0x00000008
+#define CI_ECI 0x00000010
+#define CI_PMU 0x00000020
+#define CI_UART 0x00000040
+#define CI_WDRESET 0x80000000
+
+
+#define SCC_SS_MASK 0x00000007
+#define SCC_SS_LPO 0x00000000
+#define SCC_SS_XTAL 0x00000001
+#define SCC_SS_PCI 0x00000002
+#define SCC_LF 0x00000200
+#define SCC_LP 0x00000400
+#define SCC_FS 0x00000800
+#define SCC_IP 0x00001000
+#define SCC_XC 0x00002000
+#define SCC_XP 0x00004000
+#define SCC_CD_MASK 0xffff0000
+#define SCC_CD_SHIFT 16
+
+
+#define SYCC_IE 0x00000001
+#define SYCC_AE 0x00000002
+#define SYCC_FP 0x00000004
+#define SYCC_AR 0x00000008
+#define SYCC_HR 0x00000010
+#define SYCC_CD_MASK 0xffff0000
+#define SYCC_CD_SHIFT 16
+
+
+#define CF_EN 0x00000001
+#define CF_EM_MASK 0x0000000e
+#define CF_EM_SHIFT 1
+#define CF_EM_FLASH 0
+#define CF_EM_SYNC 2
+#define CF_EM_PCMCIA 4
+#define CF_DS 0x00000010
+#define CF_BS 0x00000020
+#define CF_CD_MASK 0x000000c0
+#define CF_CD_SHIFT 6
+#define CF_CD_DIV2 0x00000000
+#define CF_CD_DIV3 0x00000040
+#define CF_CD_DIV4 0x00000080
+#define CF_CE 0x00000100
+#define CF_SB 0x00000200
+
+
+#define PM_W0_MASK 0x0000003f
+#define PM_W1_MASK 0x00001f00
+#define PM_W1_SHIFT 8
+#define PM_W2_MASK 0x001f0000
+#define PM_W2_SHIFT 16
+#define PM_W3_MASK 0x1f000000
+#define PM_W3_SHIFT 24
+
+
+#define PA_W0_MASK 0x0000003f
+#define PA_W1_MASK 0x00001f00
+#define PA_W1_SHIFT 8
+#define PA_W2_MASK 0x001f0000
+#define PA_W2_SHIFT 16
+#define PA_W3_MASK 0x1f000000
+#define PA_W3_SHIFT 24
+
+
+#define PI_W0_MASK 0x0000003f
+#define PI_W1_MASK 0x00001f00
+#define PI_W1_SHIFT 8
+#define PI_W2_MASK 0x001f0000
+#define PI_W2_SHIFT 16
+#define PI_W3_MASK 0x1f000000
+#define PI_W3_SHIFT 24
+
+
+#define PW_W0_MASK 0x0000001f
+#define PW_W1_MASK 0x00001f00
+#define PW_W1_SHIFT 8
+#define PW_W2_MASK 0x001f0000
+#define PW_W2_SHIFT 16
+#define PW_W3_MASK 0x1f000000
+#define PW_W3_SHIFT 24
+
+#define PW_W0 0x0000000c
+#define PW_W1 0x00000a00
+#define PW_W2 0x00020000
+#define PW_W3 0x01000000
+
+
+#define FW_W0_MASK 0x0000003f
+#define FW_W1_MASK 0x00001f00
+#define FW_W1_SHIFT 8
+#define FW_W2_MASK 0x001f0000
+#define FW_W2_SHIFT 16
+#define FW_W3_MASK 0x1f000000
+#define FW_W3_SHIFT 24
+
+
+#define WATCHDOG_CLOCK 48000000
+#define WATCHDOG_CLOCK_5354 32000
+
+
+#define PCTL_ILP_DIV_MASK 0xffff0000
+#define PCTL_ILP_DIV_SHIFT 16
+#define PCTL_PLL_PLLCTL_UPD 0x00000400
+#define PCTL_NOILP_ON_WAIT 0x00000200
+#define PCTL_HT_REQ_EN 0x00000100
+#define PCTL_ALP_REQ_EN 0x00000080
+#define PCTL_XTALFREQ_MASK 0x0000007c
+#define PCTL_XTALFREQ_SHIFT 2
+#define PCTL_ILP_DIV_EN 0x00000002
+#define PCTL_LPO_SEL 0x00000001
+
+
+#define CSTRETCH_HT 0xffff0000
+#define CSTRETCH_ALP 0x0000ffff
+
+
+#define GPIO_ONTIME_SHIFT 16
+
+
+#define CN_N1_MASK 0x3f
+#define CN_N2_MASK 0x3f00
+#define CN_N2_SHIFT 8
+#define CN_PLLC_MASK 0xf0000
+#define CN_PLLC_SHIFT 16
+
+
+#define CC_M1_MASK 0x3f
+#define CC_M2_MASK 0x3f00
+#define CC_M2_SHIFT 8
+#define CC_M3_MASK 0x3f0000
+#define CC_M3_SHIFT 16
+#define CC_MC_MASK 0x1f000000
+#define CC_MC_SHIFT 24
+
+
+#define CC_F6_2 0x02
+#define CC_F6_3 0x03
+#define CC_F6_4 0x05
+#define CC_F6_5 0x09
+#define CC_F6_6 0x11
+#define CC_F6_7 0x21
+
+#define CC_F5_BIAS 5
+
+#define CC_MC_BYPASS 0x08
+#define CC_MC_M1 0x04
+#define CC_MC_M1M2 0x02
+#define CC_MC_M1M2M3 0x01
+#define CC_MC_M1M3 0x11
+
+
+#define CC_T2_BIAS 2
+#define CC_T2M2_BIAS 3
+
+#define CC_T2MC_M1BYP 1
+#define CC_T2MC_M2BYP 2
+#define CC_T2MC_M3BYP 4
+
+
+#define CC_T6_MMASK 1
+#define CC_T6_M0 120000000
+#define CC_T6_M1 100000000
+#define SB2MIPS_T6(sb) (2 * (sb))
+
+
+#define CC_CLOCK_BASE1 24000000
+#define CC_CLOCK_BASE2 12500000
+
+
+#define CLKC_5350_N 0x0311
+#define CLKC_5350_M 0x04020009
+
+
+#define FLASH_NONE 0x000
+#define SFLASH_ST 0x100
+#define SFLASH_AT 0x200
+#define PFLASH 0x700
+
+
+#define CC_CFG_EN 0x0001
+#define CC_CFG_EM_MASK 0x000e
+#define CC_CFG_EM_ASYNC 0x0000
+#define CC_CFG_EM_SYNC 0x0002
+#define CC_CFG_EM_PCMCIA 0x0004
+#define CC_CFG_EM_IDE 0x0006
+#define CC_CFG_DS 0x0010
+#define CC_CFG_CD_MASK 0x00e0
+#define CC_CFG_CE 0x0100
+#define CC_CFG_SB 0x0200
+#define CC_CFG_IS 0x0400
+
+
+#define CC_EB_BASE 0x1a000000
+#define CC_EB_PCMCIA_MEM 0x1a000000
+#define CC_EB_PCMCIA_IO 0x1a200000
+#define CC_EB_PCMCIA_CFG 0x1a400000
+#define CC_EB_IDE 0x1a800000
+#define CC_EB_PCMCIA1_MEM 0x1a800000
+#define CC_EB_PCMCIA1_IO 0x1aa00000
+#define CC_EB_PCMCIA1_CFG 0x1ac00000
+#define CC_EB_PROGIF 0x1b000000
+
+
+
+#define SFLASH_OPCODE 0x000000ff
+#define SFLASH_ACTION 0x00000700
+#define SFLASH_CS_ACTIVE 0x00001000
+#define SFLASH_START 0x80000000
+#define SFLASH_BUSY SFLASH_START
+
+
+#define SFLASH_ACT_OPONLY 0x0000
+#define SFLASH_ACT_OP1D 0x0100
+#define SFLASH_ACT_OP3A 0x0200
+#define SFLASH_ACT_OP3A1D 0x0300
+#define SFLASH_ACT_OP3A4D 0x0400
+#define SFLASH_ACT_OP3A4X4D 0x0500
+#define SFLASH_ACT_OP3A1X4D 0x0700
+
+
+#define SFLASH_ST_WREN 0x0006
+#define SFLASH_ST_WRDIS 0x0004
+#define SFLASH_ST_RDSR 0x0105
+#define SFLASH_ST_WRSR 0x0101
+#define SFLASH_ST_READ 0x0303
+#define SFLASH_ST_PP 0x0302
+#define SFLASH_ST_SE 0x02d8
+#define SFLASH_ST_BE 0x00c7
+#define SFLASH_ST_DP 0x00b9
+#define SFLASH_ST_RES 0x03ab
+#define SFLASH_ST_CSA 0x1000
+
+
+#define SFLASH_ST_WIP 0x01
+#define SFLASH_ST_WEL 0x02
+#define SFLASH_ST_BP_MASK 0x1c
+#define SFLASH_ST_BP_SHIFT 2
+#define SFLASH_ST_SRWD 0x80
+
+
+#define SFLASH_AT_READ 0x07e8
+#define SFLASH_AT_PAGE_READ 0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS 0x01d7
+#define SFLASH_AT_BUF1_WRITE 0x0384
+#define SFLASH_AT_BUF2_WRITE 0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286
+#define SFLASH_AT_BUF1_PROGRAM 0x0288
+#define SFLASH_AT_BUF2_PROGRAM 0x0289
+#define SFLASH_AT_PAGE_ERASE 0x0281
+#define SFLASH_AT_BLOCK_ERASE 0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define SFLASH_AT_BUF1_LOAD 0x0253
+#define SFLASH_AT_BUF2_LOAD 0x0255
+#define SFLASH_AT_BUF1_COMPARE 0x0260
+#define SFLASH_AT_BUF2_COMPARE 0x0261
+#define SFLASH_AT_BUF1_REPROGRAM 0x0258
+#define SFLASH_AT_BUF2_REPROGRAM 0x0259
+
+
+#define SFLASH_AT_READY 0x80
+#define SFLASH_AT_MISMATCH 0x40
+#define SFLASH_AT_ID_MASK 0x38
+#define SFLASH_AT_ID_SHIFT 3
+
+
+
+#define UART_RX 0
+#define UART_TX 0
+#define UART_DLL 0
+#define UART_IER 1
+#define UART_DLM 1
+#define UART_IIR 2
+#define UART_FCR 2
+#define UART_LCR 3
+#define UART_MCR 4
+#define UART_LSR 5
+#define UART_MSR 6
+#define UART_SCR 7
+#define UART_LCR_DLAB 0x80
+#define UART_LCR_WLEN8 0x03
+#define UART_MCR_OUT2 0x08
+#define UART_MCR_LOOP 0x10
+#define UART_LSR_RX_FIFO 0x80
+#define UART_LSR_TDHR 0x40
+#define UART_LSR_THRE 0x20
+#define UART_LSR_BREAK 0x10
+#define UART_LSR_FRAMING 0x08
+#define UART_LSR_PARITY 0x04
+#define UART_LSR_OVERRUN 0x02
+#define UART_LSR_RXRDY 0x01
+#define UART_FCR_FIFO_ENABLE 1
+
+
+#define UART_IIR_FIFO_MASK 0xc0
+#define UART_IIR_INT_MASK 0xf
+#define UART_IIR_MDM_CHG 0x0
+#define UART_IIR_NOINT 0x1
+#define UART_IIR_THRE 0x2
+#define UART_IIR_RCVD_DATA 0x4
+#define UART_IIR_RCVR_STATUS 0x6
+#define UART_IIR_CHAR_TIME 0xc
+
+
+#define UART_IER_EDSSI 8
+#define UART_IER_ELSI 4
+#define UART_IER_ETBEI 2
+#define UART_IER_ERBFI 1
+
+
+#define PST_INTPEND 0x0040
+#define PST_SBCLKST 0x0030
+#define PST_SBCLKST_ILP 0x0010
+#define PST_SBCLKST_ALP 0x0020
+#define PST_SBCLKST_HT 0x0030
+#define PST_ALPAVAIL 0x0008
+#define PST_HTAVAIL 0x0004
+#define PST_RESINIT 0x0003
+
+
+#define PCAP_REV_MASK 0x000000ff
+#define PCAP_RC_MASK 0x00001f00
+#define PCAP_RC_SHIFT 8
+#define PCAP_TC_MASK 0x0001e000
+#define PCAP_TC_SHIFT 13
+#define PCAP_PC_MASK 0x001e0000
+#define PCAP_PC_SHIFT 17
+#define PCAP_VC_MASK 0x01e00000
+#define PCAP_VC_SHIFT 21
+#define PCAP_CC_MASK 0x1e000000
+#define PCAP_CC_SHIFT 25
+#define PCAP5_PC_MASK 0x003e0000
+#define PCAP5_PC_SHIFT 17
+#define PCAP5_VC_MASK 0x07c00000
+#define PCAP5_VC_SHIFT 22
+#define PCAP5_CC_MASK 0xf8000000
+#define PCAP5_CC_SHIFT 27
+
+
+
+#define PRRT_TIME_MASK 0x03ff
+#define PRRT_INTEN 0x0400
+#define PRRT_REQ_ACTIVE 0x0800
+#define PRRT_ALP_REQ 0x1000
+#define PRRT_HT_REQ 0x2000
+
+
+#define PMURES_BIT(bit) (1 << (bit))
+
+
+#define PMURES_MAX_RESNUM 30
+
+
+
+
+#define PMU0_PLL0_PLLCTL0 0
+#define PMU0_PLL0_PC0_PDIV_MASK 1
+#define PMU0_PLL0_PC0_PDIV_FREQ 25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE 8
+
+
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7
+
+
+#define PMU0_PLL0_PLLCTL1 1
+#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000
+#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28
+#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00
+#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8
+#define PMU0_PLL0_PC1_STOP_MOD 0x00000040
+
+
+#define PMU0_PLL0_PLLCTL2 2
+#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf
+#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4
+
+
+#define RES4328_EXT_SWITCHER_PWM 0
+#define RES4328_BB_SWITCHER_PWM 1
+#define RES4328_BB_SWITCHER_BURST 2
+#define RES4328_BB_EXT_SWITCHER_BURST 3
+#define RES4328_ILP_REQUEST 4
+#define RES4328_RADIO_SWITCHER_PWM 5
+#define RES4328_RADIO_SWITCHER_BURST 6
+#define RES4328_ROM_SWITCH 7
+#define RES4328_PA_REF_LDO 8
+#define RES4328_RADIO_LDO 9
+#define RES4328_AFE_LDO 10
+#define RES4328_PLL_LDO 11
+#define RES4328_BG_FILTBYP 12
+#define RES4328_TX_FILTBYP 13
+#define RES4328_RX_FILTBYP 14
+#define RES4328_XTAL_PU 15
+#define RES4328_XTAL_EN 16
+#define RES4328_BB_PLL_FILTBYP 17
+#define RES4328_RF_PLL_FILTBYP 18
+#define RES4328_BB_PLL_PU 19
+
+#define RES5354_EXT_SWITCHER_PWM 0
+#define RES5354_BB_SWITCHER_PWM 1
+#define RES5354_BB_SWITCHER_BURST 2
+#define RES5354_BB_EXT_SWITCHER_BURST 3
+#define RES5354_ILP_REQUEST 4
+#define RES5354_RADIO_SWITCHER_PWM 5
+#define RES5354_RADIO_SWITCHER_BURST 6
+#define RES5354_ROM_SWITCH 7
+#define RES5354_PA_REF_LDO 8
+#define RES5354_RADIO_LDO 9
+#define RES5354_AFE_LDO 10
+#define RES5354_PLL_LDO 11
+#define RES5354_BG_FILTBYP 12
+#define RES5354_TX_FILTBYP 13
+#define RES5354_RX_FILTBYP 14
+#define RES5354_XTAL_PU 15
+#define RES5354_XTAL_EN 16
+#define RES5354_BB_PLL_FILTBYP 17
+#define RES5354_RF_PLL_FILTBYP 18
+#define RES5354_BB_PLL_PU 19
+
+
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+
+#define PMU2_PHY_PLL_PLLCTL 4
+#define PMU2_SI_PLL_PLLCTL 10
+
+
+#define RES4325_BUCK_BOOST_BURST 0
+#define RES4325_CBUCK_BURST 1
+#define RES4325_CBUCK_PWM 2
+#define RES4325_CLDO_CBUCK_BURST 3
+#define RES4325_CLDO_CBUCK_PWM 4
+#define RES4325_BUCK_BOOST_PWM 5
+#define RES4325_ILP_REQUEST 6
+#define RES4325_ABUCK_BURST 7
+#define RES4325_ABUCK_PWM 8
+#define RES4325_LNLDO1_PU 9
+#define RES4325_OTP_PU 10
+#define RES4325_LNLDO3_PU 11
+#define RES4325_LNLDO4_PU 12
+#define RES4325_XTAL_PU 13
+#define RES4325_ALP_AVAIL 14
+#define RES4325_RX_PWRSW_PU 15
+#define RES4325_TX_PWRSW_PU 16
+#define RES4325_RFPLL_PWRSW_PU 17
+#define RES4325_LOGEN_PWRSW_PU 18
+#define RES4325_AFE_PWRSW_PU 19
+#define RES4325_BBPLL_PWRSW_PU 20
+#define RES4325_HT_AVAIL 21
+
+
+#define RES4325B0_CBUCK_LPOM 1
+#define RES4325B0_CBUCK_BURST 2
+#define RES4325B0_CBUCK_PWM 3
+#define RES4325B0_CLDO_PU 4
+
+
+#define RES4325C1_OTP_PWRSW_PU 10
+#define RES4325C1_LNLDO2_PU 12
+
+
+#define CST4325_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4325_DEFCIS_SEL 0
+#define CST4325_SPROM_SEL 1
+#define CST4325_OTP_SEL 2
+#define CST4325_OTP_PWRDN 3
+#define CST4325_SDIO_USB_MODE_MASK 0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT 2
+#define CST4325_RCAL_VALID_MASK 0x00000008
+#define CST4325_RCAL_VALID_SHIFT 3
+#define CST4325_RCAL_VALUE_MASK 0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT 4
+#define CST4325_PMUTOP_2B_MASK 0x00000200
+#define CST4325_PMUTOP_2B_SHIFT 9
+
+#define RES4329_RESERVED0 0
+#define RES4329_CBUCK_LPOM 1
+#define RES4329_CBUCK_BURST 2
+#define RES4329_CBUCK_PWM 3
+#define RES4329_CLDO_PU 4
+#define RES4329_PALDO_PU 5
+#define RES4329_ILP_REQUEST 6
+#define RES4329_RESERVED7 7
+#define RES4329_RESERVED8 8
+#define RES4329_LNLDO1_PU 9
+#define RES4329_OTP_PU 10
+#define RES4329_RESERVED11 11
+#define RES4329_LNLDO2_PU 12
+#define RES4329_XTAL_PU 13
+#define RES4329_ALP_AVAIL 14
+#define RES4329_RX_PWRSW_PU 15
+#define RES4329_TX_PWRSW_PU 16
+#define RES4329_RFPLL_PWRSW_PU 17
+#define RES4329_LOGEN_PWRSW_PU 18
+#define RES4329_AFE_PWRSW_PU 19
+#define RES4329_BBPLL_PWRSW_PU 20
+#define RES4329_HT_AVAIL 21
+
+#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4329_DEFCIS_SEL 0
+#define CST4329_SPROM_SEL 1
+#define CST4329_OTP_SEL 2
+#define CST4329_OTP_PWRDN 3
+#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT 2
+
+
+#define RES4312_SWITCHER_BURST 0
+#define RES4312_SWITCHER_PWM 1
+#define RES4312_PA_REF_LDO 2
+#define RES4312_CORE_LDO_BURST 3
+#define RES4312_CORE_LDO_PWM 4
+#define RES4312_RADIO_LDO 5
+#define RES4312_ILP_REQUEST 6
+#define RES4312_BG_FILTBYP 7
+#define RES4312_TX_FILTBYP 8
+#define RES4312_RX_FILTBYP 9
+#define RES4312_XTAL_PU 10
+#define RES4312_ALP_AVAIL 11
+#define RES4312_BB_PLL_FILTBYP 12
+#define RES4312_RF_PLL_FILTBYP 13
+#define RES4312_HT_AVAIL 14
+
+#define RES4322_RF_LDO 0
+#define RES4322_ILP_REQUEST 1
+#define RES4322_XTAL_PU 2
+#define RES4322_ALP_AVAIL 3
+#define RES4322_SI_PLL_ON 4
+#define RES4322_HT_SI_AVAIL 5
+#define RES4322_PHY_PLL_ON 6
+#define RES4322_HT_PHY_AVAIL 7
+#define RES4322_OTP_PU 8
+
+
+#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT 6
+#define CST4322_NO_SPROM_OTP 0
+#define CST4322_SPROM_PRESENT 1
+#define CST4322_OTP_PRESENT 2
+#define CST4322_PCI_OR_USB 0x00000100
+#define CST4322_BOOT_MASK 0x00000600
+#define CST4322_BOOT_SHIFT 9
+#define CST4322_BOOT_FROM_SRAM 0
+#define CST4322_BOOT_FROM_ROM 1
+#define CST4322_BOOT_FROM_FLASH 2
+#define CST4322_BOOT_FROM_INVALID 3
+#define CST4322_ILP_DIV_EN 0x00000800
+#define CST4322_FLASH_TYPE_MASK 0x00001000
+#define CST4322_FLASH_TYPE_SHIFT 12
+#define CST4322_FLASH_TYPE_SHIFT_ST 0
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1
+#define CST4322_ARM_TAP_SEL 0x00002000
+#define CST4322_RES_INIT_MODE_MASK 0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT 14
+#define CST4322_RES_INIT_MODE_ILPAVAIL 0
+#define CST4322_RES_INIT_MODE_ILPREQ 1
+#define CST4322_RES_INIT_MODE_ALPAVAIL 2
+#define CST4322_RES_INIT_MODE_HTAVAIL 3
+#define CST4322_PCIPLLCLK_GATING 0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000
+#define CST4322_PCI_CARDBUS_MODE 0x00040000
+
+#define RES4315_CBUCK_LPOM 1
+#define RES4315_CBUCK_BURST 2
+#define RES4315_CBUCK_PWM 3
+#define RES4315_CLDO_PU 4
+#define RES4315_PALDO_PU 5
+#define RES4315_ILP_REQUEST 6
+#define RES4315_LNLDO1_PU 9
+#define RES4315_OTP_PU 10
+#define RES4315_LNLDO2_PU 12
+#define RES4315_XTAL_PU 13
+#define RES4315_ALP_AVAIL 14
+#define RES4315_RX_PWRSW_PU 15
+#define RES4315_TX_PWRSW_PU 16
+#define RES4315_RFPLL_PWRSW_PU 17
+#define RES4315_LOGEN_PWRSW_PU 18
+#define RES4315_AFE_PWRSW_PU 19
+#define RES4315_BBPLL_PWRSW_PU 20
+#define RES4315_HT_AVAIL 21
+
+#define CST4315_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4315_DEFCIS_SEL 0x00000000
+#define CST4315_SPROM_SEL 0x00000001
+#define CST4315_OTP_SEL 0x00000002
+#define CST4315_OTP_PWRDN 0x00000003
+#define CST4315_SDIO_MODE 0x00000004
+#define CST4315_RCAL_VALID 0x00000008
+#define CST4315_RCAL_VALUE_MASK 0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT 4
+#define CST4315_PALDO_EXTPNP 0x00000200
+#define CST4315_CBUCK_MODE_MASK 0x00000c00
+#define CST4315_CBUCK_MODE_BURST 0x00000400
+#define CST4315_CBUCK_MODE_LPBURST 0x00000c00
+
+#define PMU_MAX_TRANSITION_DLY 15000
+
+
+#define PMURES_UP_TRANSITION 2
+
+
+
+
+
+#define ECI_BW_20 0x0
+#define ECI_BW_25 0x1
+#define ECI_BW_30 0x2
+#define ECI_BW_35 0x3
+#define ECI_BW_40 0x4
+#define ECI_BW_45 0x5
+#define ECI_BW_50 0x6
+#define ECI_BW_ALL 0x7
+
+
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/sbconfig.h b/drivers/net/wireless/bcm4329/include/sbconfig.h
new file mode 100644
index 000000000000..da18ccbe9ab8
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbconfig.h
@@ -0,0 +1,276 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbconfig.h,v 13.67.30.1 2008/05/07 20:17:27 Exp $
+ */
+
+
+#ifndef _SBCONFIG_H
+#define _SBCONFIG_H
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+
+#define SB_BUS_SIZE 0x10000
+#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE)
+
+
+#define SBCONFIGOFF 0xf00
+#define SBCONFIGSIZE 256
+
+#define SBIPSFLAG 0x08
+#define SBTPSFLAG 0x18
+#define SBTMERRLOGA 0x48
+#define SBTMERRLOG 0x50
+#define SBADMATCH3 0x60
+#define SBADMATCH2 0x68
+#define SBADMATCH1 0x70
+#define SBIMSTATE 0x90
+#define SBINTVEC 0x94
+#define SBTMSTATELOW 0x98
+#define SBTMSTATEHIGH 0x9c
+#define SBBWA0 0xa0
+#define SBIMCONFIGLOW 0xa8
+#define SBIMCONFIGHIGH 0xac
+#define SBADMATCH0 0xb0
+#define SBTMCONFIGLOW 0xb8
+#define SBTMCONFIGHIGH 0xbc
+#define SBBCONFIG 0xc0
+#define SBBSTATE 0xc8
+#define SBACTCNFG 0xd8
+#define SBFLAGST 0xe8
+#define SBIDLOW 0xf8
+#define SBIDHIGH 0xfc
+
+
+
+#define SBIMERRLOGA 0xea8
+#define SBIMERRLOG 0xeb0
+#define SBTMPORTCONNID0 0xed8
+#define SBTMPORTLOCK0 0xef8
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _sbconfig {
+ uint32 PAD[2];
+ uint32 sbipsflag;
+ uint32 PAD[3];
+ uint32 sbtpsflag;
+ uint32 PAD[11];
+ uint32 sbtmerrloga;
+ uint32 PAD;
+ uint32 sbtmerrlog;
+ uint32 PAD[3];
+ uint32 sbadmatch3;
+ uint32 PAD;
+ uint32 sbadmatch2;
+ uint32 PAD;
+ uint32 sbadmatch1;
+ uint32 PAD[7];
+ uint32 sbimstate;
+ uint32 sbintvec;
+ uint32 sbtmstatelow;
+ uint32 sbtmstatehigh;
+ uint32 sbbwa0;
+ uint32 PAD;
+ uint32 sbimconfiglow;
+ uint32 sbimconfighigh;
+ uint32 sbadmatch0;
+ uint32 PAD;
+ uint32 sbtmconfiglow;
+ uint32 sbtmconfighigh;
+ uint32 sbbconfig;
+ uint32 PAD;
+ uint32 sbbstate;
+ uint32 PAD[3];
+ uint32 sbactcnfg;
+ uint32 PAD[3];
+ uint32 sbflagst;
+ uint32 PAD[3];
+ uint32 sbidlow;
+ uint32 sbidhigh;
+} sbconfig_t;
+
+#endif
+
+
+#define SBIPS_INT1_MASK 0x3f
+#define SBIPS_INT1_SHIFT 0
+#define SBIPS_INT2_MASK 0x3f00
+#define SBIPS_INT2_SHIFT 8
+#define SBIPS_INT3_MASK 0x3f0000
+#define SBIPS_INT3_SHIFT 16
+#define SBIPS_INT4_MASK 0x3f000000
+#define SBIPS_INT4_SHIFT 24
+
+
+#define SBTPS_NUM0_MASK 0x3f
+#define SBTPS_F0EN0 0x40
+
+
+#define SBTMEL_CM 0x00000007
+#define SBTMEL_CI 0x0000ff00
+#define SBTMEL_EC 0x0f000000
+#define SBTMEL_ME 0x80000000
+
+
+#define SBIM_PC 0xf
+#define SBIM_AP_MASK 0x30
+#define SBIM_AP_BOTH 0x00
+#define SBIM_AP_TS 0x10
+#define SBIM_AP_TK 0x20
+#define SBIM_AP_RSV 0x30
+#define SBIM_IBE 0x20000
+#define SBIM_TO 0x40000
+#define SBIM_BY 0x01800000
+#define SBIM_RJ 0x02000000
+
+
+#define SBTML_RESET 0x0001
+#define SBTML_REJ_MASK 0x0006
+#define SBTML_REJ 0x0002
+#define SBTML_TMPREJ 0x0004
+
+#define SBTML_SICF_SHIFT 16
+
+
+#define SBTMH_SERR 0x0001
+#define SBTMH_INT 0x0002
+#define SBTMH_BUSY 0x0004
+#define SBTMH_TO 0x0020
+
+#define SBTMH_SISF_SHIFT 16
+
+
+#define SBBWA_TAB0_MASK 0xffff
+#define SBBWA_TAB1_MASK 0xffff
+#define SBBWA_TAB1_SHIFT 16
+
+
+#define SBIMCL_STO_MASK 0x7
+#define SBIMCL_RTO_MASK 0x70
+#define SBIMCL_RTO_SHIFT 4
+#define SBIMCL_CID_MASK 0xff0000
+#define SBIMCL_CID_SHIFT 16
+
+
+#define SBIMCH_IEM_MASK 0xc
+#define SBIMCH_TEM_MASK 0x30
+#define SBIMCH_TEM_SHIFT 4
+#define SBIMCH_BEM_MASK 0xc0
+#define SBIMCH_BEM_SHIFT 6
+
+
+#define SBAM_TYPE_MASK 0x3
+#define SBAM_AD64 0x4
+#define SBAM_ADINT0_MASK 0xf8
+#define SBAM_ADINT0_SHIFT 3
+#define SBAM_ADINT1_MASK 0x1f8
+#define SBAM_ADINT1_SHIFT 3
+#define SBAM_ADINT2_MASK 0x1f8
+#define SBAM_ADINT2_SHIFT 3
+#define SBAM_ADEN 0x400
+#define SBAM_ADNEG 0x800
+#define SBAM_BASE0_MASK 0xffffff00
+#define SBAM_BASE0_SHIFT 8
+#define SBAM_BASE1_MASK 0xfffff000
+#define SBAM_BASE1_SHIFT 12
+#define SBAM_BASE2_MASK 0xffff0000
+#define SBAM_BASE2_SHIFT 16
+
+
+#define SBTMCL_CD_MASK 0xff
+#define SBTMCL_CO_MASK 0xf800
+#define SBTMCL_CO_SHIFT 11
+#define SBTMCL_IF_MASK 0xfc0000
+#define SBTMCL_IF_SHIFT 18
+#define SBTMCL_IM_MASK 0x3000000
+#define SBTMCL_IM_SHIFT 24
+
+
+#define SBTMCH_BM_MASK 0x3
+#define SBTMCH_RM_MASK 0x3
+#define SBTMCH_RM_SHIFT 2
+#define SBTMCH_SM_MASK 0x30
+#define SBTMCH_SM_SHIFT 4
+#define SBTMCH_EM_MASK 0x300
+#define SBTMCH_EM_SHIFT 8
+#define SBTMCH_IM_MASK 0xc00
+#define SBTMCH_IM_SHIFT 10
+
+
+#define SBBC_LAT_MASK 0x3
+#define SBBC_MAX0_MASK 0xf0000
+#define SBBC_MAX0_SHIFT 16
+#define SBBC_MAX1_MASK 0xf00000
+#define SBBC_MAX1_SHIFT 20
+
+
+#define SBBS_SRD 0x1
+#define SBBS_HRD 0x2
+
+
+#define SBIDL_CS_MASK 0x3
+#define SBIDL_AR_MASK 0x38
+#define SBIDL_AR_SHIFT 3
+#define SBIDL_SYNCH 0x40
+#define SBIDL_INIT 0x80
+#define SBIDL_MINLAT_MASK 0xf00
+#define SBIDL_MINLAT_SHIFT 8
+#define SBIDL_MAXLAT 0xf000
+#define SBIDL_MAXLAT_SHIFT 12
+#define SBIDL_FIRST 0x10000
+#define SBIDL_CW_MASK 0xc0000
+#define SBIDL_CW_SHIFT 18
+#define SBIDL_TP_MASK 0xf00000
+#define SBIDL_TP_SHIFT 20
+#define SBIDL_IP_MASK 0xf000000
+#define SBIDL_IP_SHIFT 24
+#define SBIDL_RV_MASK 0xf0000000
+#define SBIDL_RV_SHIFT 28
+#define SBIDL_RV_2_2 0x00000000
+#define SBIDL_RV_2_3 0x10000000
+
+
+#define SBIDH_RC_MASK 0x000f
+#define SBIDH_RCE_MASK 0x7000
+#define SBIDH_RCE_SHIFT 8
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define SBIDH_CC_MASK 0x8ff0
+#define SBIDH_CC_SHIFT 4
+#define SBIDH_VC_MASK 0xffff0000
+#define SBIDH_VC_SHIFT 16
+
+#define SB_COMMIT 0xfd8
+
+
+#define SB_VEND_BCM 0x4243
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/sbhnddma.h b/drivers/net/wireless/bcm4329/include/sbhnddma.h
new file mode 100644
index 000000000000..7681395f5b3b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbhnddma.h
@@ -0,0 +1,294 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbhnddma.h,v 13.11.250.5.16.1 2009/07/21 14:04:51 Exp $
+ */
+
+
+#ifndef _sbhnddma_h_
+#define _sbhnddma_h_
+
+
+
+
+
+
+
+typedef volatile struct {
+ uint32 control;
+ uint32 addr;
+ uint32 ptr;
+ uint32 status;
+} dma32regs_t;
+
+typedef volatile struct {
+ dma32regs_t xmt;
+ dma32regs_t rcv;
+} dma32regp_t;
+
+typedef volatile struct {
+ uint32 fifoaddr;
+ uint32 fifodatalow;
+ uint32 fifodatahigh;
+ uint32 pad;
+} dma32diag_t;
+
+
+typedef volatile struct {
+ uint32 ctrl;
+ uint32 addr;
+} dma32dd_t;
+
+
+#define D32RINGALIGN_BITS 12
+#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS)
+#define D32RINGALIGN (1 << D32RINGALIGN_BITS)
+#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
+
+
+#define XC_XE ((uint32)1 << 0)
+#define XC_SE ((uint32)1 << 1)
+#define XC_LE ((uint32)1 << 2)
+#define XC_FL ((uint32)1 << 4)
+#define XC_PD ((uint32)1 << 11)
+#define XC_AE ((uint32)3 << 16)
+#define XC_AE_SHIFT 16
+
+
+#define XP_LD_MASK 0xfff
+
+
+#define XS_CD_MASK 0x0fff
+#define XS_XS_MASK 0xf000
+#define XS_XS_SHIFT 12
+#define XS_XS_DISABLED 0x0000
+#define XS_XS_ACTIVE 0x1000
+#define XS_XS_IDLE 0x2000
+#define XS_XS_STOPPED 0x3000
+#define XS_XS_SUSP 0x4000
+#define XS_XE_MASK 0xf0000
+#define XS_XE_SHIFT 16
+#define XS_XE_NOERR 0x00000
+#define XS_XE_DPE 0x10000
+#define XS_XE_DFU 0x20000
+#define XS_XE_BEBR 0x30000
+#define XS_XE_BEDA 0x40000
+#define XS_AD_MASK 0xfff00000
+#define XS_AD_SHIFT 20
+
+
+#define RC_RE ((uint32)1 << 0)
+#define RC_RO_MASK 0xfe
+#define RC_RO_SHIFT 1
+#define RC_FM ((uint32)1 << 8)
+#define RC_SH ((uint32)1 << 9)
+#define RC_OC ((uint32)1 << 10)
+#define RC_PD ((uint32)1 << 11)
+#define RC_AE ((uint32)3 << 16)
+#define RC_AE_SHIFT 16
+
+
+#define RP_LD_MASK 0xfff
+
+
+#define RS_CD_MASK 0x0fff
+#define RS_RS_MASK 0xf000
+#define RS_RS_SHIFT 12
+#define RS_RS_DISABLED 0x0000
+#define RS_RS_ACTIVE 0x1000
+#define RS_RS_IDLE 0x2000
+#define RS_RS_STOPPED 0x3000
+#define RS_RE_MASK 0xf0000
+#define RS_RE_SHIFT 16
+#define RS_RE_NOERR 0x00000
+#define RS_RE_DPE 0x10000
+#define RS_RE_DFO 0x20000
+#define RS_RE_BEBW 0x30000
+#define RS_RE_BEDA 0x40000
+#define RS_AD_MASK 0xfff00000
+#define RS_AD_SHIFT 20
+
+
+#define FA_OFF_MASK 0xffff
+#define FA_SEL_MASK 0xf0000
+#define FA_SEL_SHIFT 16
+#define FA_SEL_XDD 0x00000
+#define FA_SEL_XDP 0x10000
+#define FA_SEL_RDD 0x40000
+#define FA_SEL_RDP 0x50000
+#define FA_SEL_XFD 0x80000
+#define FA_SEL_XFP 0x90000
+#define FA_SEL_RFD 0xc0000
+#define FA_SEL_RFP 0xd0000
+#define FA_SEL_RSD 0xe0000
+#define FA_SEL_RSP 0xf0000
+
+
+#define CTRL_BC_MASK 0x1fff
+#define CTRL_AE ((uint32)3 << 16)
+#define CTRL_AE_SHIFT 16
+#define CTRL_EOT ((uint32)1 << 28)
+#define CTRL_IOC ((uint32)1 << 29)
+#define CTRL_EOF ((uint32)1 << 30)
+#define CTRL_SOF ((uint32)1 << 31)
+
+
+#define CTRL_CORE_MASK 0x0ff00000
+
+
+
+
+typedef volatile struct {
+ uint32 control;
+ uint32 ptr;
+ uint32 addrlow;
+ uint32 addrhigh;
+ uint32 status0;
+ uint32 status1;
+} dma64regs_t;
+
+typedef volatile struct {
+ dma64regs_t tx;
+ dma64regs_t rx;
+} dma64regp_t;
+
+typedef volatile struct {
+ uint32 fifoaddr;
+ uint32 fifodatalow;
+ uint32 fifodatahigh;
+ uint32 pad;
+} dma64diag_t;
+
+
+typedef volatile struct {
+ uint32 ctrl1;
+ uint32 ctrl2;
+ uint32 addrlow;
+ uint32 addrhigh;
+} dma64dd_t;
+
+
+#define D64RINGALIGN_BITS 13
+#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
+#define D64RINGALIGN (1 << D64RINGALIGN_BITS)
+#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
+
+
+#define D64_XC_XE 0x00000001
+#define D64_XC_SE 0x00000002
+#define D64_XC_LE 0x00000004
+#define D64_XC_FL 0x00000010
+#define D64_XC_PD 0x00000800
+#define D64_XC_AE 0x00030000
+#define D64_XC_AE_SHIFT 16
+
+
+#define D64_XP_LD_MASK 0x00000fff
+
+
+#define D64_XS0_CD_MASK 0x00001fff
+#define D64_XS0_XS_MASK 0xf0000000
+#define D64_XS0_XS_SHIFT 28
+#define D64_XS0_XS_DISABLED 0x00000000
+#define D64_XS0_XS_ACTIVE 0x10000000
+#define D64_XS0_XS_IDLE 0x20000000
+#define D64_XS0_XS_STOPPED 0x30000000
+#define D64_XS0_XS_SUSP 0x40000000
+
+#define D64_XS1_AD_MASK 0x0001ffff
+#define D64_XS1_XE_MASK 0xf0000000
+#define D64_XS1_XE_SHIFT 28
+#define D64_XS1_XE_NOERR 0x00000000
+#define D64_XS1_XE_DPE 0x10000000
+#define D64_XS1_XE_DFU 0x20000000
+#define D64_XS1_XE_DTE 0x30000000
+#define D64_XS1_XE_DESRE 0x40000000
+#define D64_XS1_XE_COREE 0x50000000
+
+
+#define D64_RC_RE 0x00000001
+#define D64_RC_RO_MASK 0x000000fe
+#define D64_RC_RO_SHIFT 1
+#define D64_RC_FM 0x00000100
+#define D64_RC_SH 0x00000200
+#define D64_RC_OC 0x00000400
+#define D64_RC_PD 0x00000800
+#define D64_RC_AE 0x00030000
+#define D64_RC_AE_SHIFT 16
+
+
+#define D64_RP_LD_MASK 0x00000fff
+
+
+#define D64_RS0_CD_MASK 0x00001fff
+#define D64_RS0_RS_MASK 0xf0000000
+#define D64_RS0_RS_SHIFT 28
+#define D64_RS0_RS_DISABLED 0x00000000
+#define D64_RS0_RS_ACTIVE 0x10000000
+#define D64_RS0_RS_IDLE 0x20000000
+#define D64_RS0_RS_STOPPED 0x30000000
+#define D64_RS0_RS_SUSP 0x40000000
+
+#define D64_RS1_AD_MASK 0x0001ffff
+#define D64_RS1_RE_MASK 0xf0000000
+#define D64_RS1_RE_SHIFT 28
+#define D64_RS1_RE_NOERR 0x00000000
+#define D64_RS1_RE_DPO 0x10000000
+#define D64_RS1_RE_DFU 0x20000000
+#define D64_RS1_RE_DTE 0x30000000
+#define D64_RS1_RE_DESRE 0x40000000
+#define D64_RS1_RE_COREE 0x50000000
+
+
+#define D64_FA_OFF_MASK 0xffff
+#define D64_FA_SEL_MASK 0xf0000
+#define D64_FA_SEL_SHIFT 16
+#define D64_FA_SEL_XDD 0x00000
+#define D64_FA_SEL_XDP 0x10000
+#define D64_FA_SEL_RDD 0x40000
+#define D64_FA_SEL_RDP 0x50000
+#define D64_FA_SEL_XFD 0x80000
+#define D64_FA_SEL_XFP 0x90000
+#define D64_FA_SEL_RFD 0xc0000
+#define D64_FA_SEL_RFP 0xd0000
+#define D64_FA_SEL_RSD 0xe0000
+#define D64_FA_SEL_RSP 0xf0000
+
+
+#define D64_CTRL1_EOT ((uint32)1 << 28)
+#define D64_CTRL1_IOC ((uint32)1 << 29)
+#define D64_CTRL1_EOF ((uint32)1 << 30)
+#define D64_CTRL1_SOF ((uint32)1 << 31)
+
+
+#define D64_CTRL2_BC_MASK 0x00007fff
+#define D64_CTRL2_AE 0x00030000
+#define D64_CTRL2_AE_SHIFT 16
+#define D64_CTRL2_PARITY 0x00040000
+
+
+#define D64_CTRL_CORE_MASK 0x0ff00000
+
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/sbpcmcia.h b/drivers/net/wireless/bcm4329/include/sbpcmcia.h
new file mode 100644
index 000000000000..d6d80334258a
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbpcmcia.h
@@ -0,0 +1,109 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbpcmcia.h,v 13.31.4.1.2.3.8.7 2009/06/22 05:14:24 Exp $
+ */
+
+
+#ifndef _SBPCMCIA_H
+#define _SBPCMCIA_H
+
+
+
+
+#define PCMCIA_FCR (0x700 / 2)
+
+#define FCR0_OFF 0
+#define FCR1_OFF (0x40 / 2)
+#define FCR2_OFF (0x80 / 2)
+#define FCR3_OFF (0xc0 / 2)
+
+#define PCMCIA_FCR0 (0x700 / 2)
+#define PCMCIA_FCR1 (0x740 / 2)
+#define PCMCIA_FCR2 (0x780 / 2)
+#define PCMCIA_FCR3 (0x7c0 / 2)
+
+
+
+#define PCMCIA_COR 0
+
+#define COR_RST 0x80
+#define COR_LEV 0x40
+#define COR_IRQEN 0x04
+#define COR_BLREN 0x01
+#define COR_FUNEN 0x01
+
+
+#define PCICIA_FCSR (2 / 2)
+#define PCICIA_PRR (4 / 2)
+#define PCICIA_SCR (6 / 2)
+#define PCICIA_ESR (8 / 2)
+
+
+#define PCM_MEMOFF 0x0000
+#define F0_MEMOFF 0x1000
+#define F1_MEMOFF 0x2000
+#define F2_MEMOFF 0x3000
+#define F3_MEMOFF 0x4000
+
+
+#define MEM_ADDR0 (0x728 / 2)
+#define MEM_ADDR1 (0x72a / 2)
+#define MEM_ADDR2 (0x72c / 2)
+
+
+#define PCMCIA_ADDR0 (0x072e / 2)
+#define PCMCIA_ADDR1 (0x0730 / 2)
+#define PCMCIA_ADDR2 (0x0732 / 2)
+
+#define MEM_SEG (0x0734 / 2)
+#define SROM_CS (0x0736 / 2)
+#define SROM_DATAL (0x0738 / 2)
+#define SROM_DATAH (0x073a / 2)
+#define SROM_ADDRL (0x073c / 2)
+#define SROM_ADDRH (0x073e / 2)
+#define SROM_INFO2 (0x0772 / 2)
+#define SROM_INFO (0x07be / 2)
+
+
+#define SROM_IDLE 0
+#define SROM_WRITE 1
+#define SROM_READ 2
+#define SROM_WEN 4
+#define SROM_WDS 7
+#define SROM_DONE 8
+
+
+#define SRI_SZ_MASK 0x03
+#define SRI_BLANK 0x04
+#define SRI_OTP 0x80
+
+
+
+#define SBTML_INT_ACK 0x40000
+#define SBTML_INT_EN 0x20000
+
+
+#define SBTMH_INT_STATUS 0x40000
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/sbsdio.h b/drivers/net/wireless/bcm4329/include/sbsdio.h
new file mode 100644
index 000000000000..75aaf4d88f7d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbsdio.h
@@ -0,0 +1,166 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdio.h,v 13.29.4.1.22.3 2009/03/11 20:26:57 Exp $
+ */
+
+#ifndef _SBSDIO_H
+#define _SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */
+#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */
+#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */
+
+#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE 0
+#define SBSDIO_SPROM_WRITE 1
+#define SBSDIO_SPROM_READ 2
+#define SBSDIO_SPROM_WEN 4
+#define SBSDIO_SPROM_WDS 7
+#define SBSDIO_SPROM_DONE 8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK 0x04 /* depreciated in corerev 6 */
+#define SROM_OTP 0x80 /* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu,
+ * 1: power on oscillator
+ * (for 4318 only)
+ */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device
+ * to wait before sending data to host
+ */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when
+ * receiving CMD53
+ */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is
+ * synchronous to the sdio clock
+ */
+#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host
+ * except the chipActive (rev 8)
+ */
+#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put
+ * external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9)
+ */
+#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */
+
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL 0x40
+#define SBSDIO_Rev8_ALP_AVAIL 0x80
+
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \
+ (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple,
+ * link bytes
+ */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from
+ * 8th byte
+ */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one
+ * data comamnd
+ */
+
+#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */
+
+#endif /* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h b/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h
new file mode 100644
index 000000000000..7c7c7e4de0f6
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h
@@ -0,0 +1,288 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific device core support
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdpcmdev.h,v 13.29.4.1.4.6.6.2 2008/12/31 21:16:51 Exp $
+ */
+
+#ifndef _sbsdpcmdev_h_
+#define _sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+
+typedef volatile struct {
+ dma64regs_t xmt; /* dma tx */
+ uint32 PAD[2];
+ dma64regs_t rcv; /* dma rx */
+ uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+ dma64p_t dma64regs[2];
+ dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */
+ uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+ dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */
+ uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+ dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */
+ uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+ uint32 corecontrol; /* CoreControl, 0x000, rev8 */
+ uint32 corestatus; /* CoreStatus, 0x004, rev8 */
+ uint32 PAD[1];
+ uint32 biststatus; /* BistStatus, 0x00c, rev8 */
+
+ /* PCMCIA access */
+ uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */
+ uint16 PAD[1];
+
+ /* interrupt */
+ uint32 intstatus; /* IntStatus, 0x020, rev8 */
+ uint32 hostintmask; /* IntHostMask, 0x024, rev8 */
+ uint32 intmask; /* IntSbMask, 0x028, rev8 */
+ uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */
+ uint32 sbintmask; /* SBIntMask, 0x030, rev8 */
+ uint32 PAD[3];
+ uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */
+ uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */
+ uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */
+ uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */
+ uint32 PAD[3];
+
+ /* PCMCIA frame control */
+ uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */
+ uint8 PAD[3];
+ uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */
+ uint8 PAD[155];
+
+ /* interrupt batching control */
+ uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */
+ uint32 PAD[3];
+
+ /* counters */
+ uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+ uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+ uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+ uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+ uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+ uint32 PAD[40];
+ uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */
+ uint32 PAD[7];
+
+ /* DMA engines */
+ volatile union {
+ pcmdma32_t pcm32;
+ sdiodma32_t sdiod32;
+ sdiodma64_t sdiod64;
+ } dma;
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */
+ uint16 PAD[55];
+
+ /* PCMCIA backplane access */
+ uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */
+ uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */
+ uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */
+ uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */
+ uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */
+ uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */
+ uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */
+ uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */
+ uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */
+ uint16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */
+ uint32 PAD[464];
+
+ /* Sonics SiliconBackplane registers */
+ sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY (1 << 0) /* CIS Ready */
+#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */
+#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
+#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */
+
+/* corestatus */
+#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */
+#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
+#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
+#define I_PC (1 << 10) /* descriptor error */
+#define I_PD (1 << 11) /* data error */
+#define I_DE (1 << 12) /* Descriptor protocol Error */
+#define I_RU (1 << 13) /* Receive descriptor Underflow */
+#define I_RO (1 << 14) /* Receive fifo Overflow */
+#define I_XU (1 << 15) /* Transmit fifo Underflow */
+#define I_RI (1 << 16) /* Receive Interrupt */
+#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
+#define I_XI (1 << 24) /* Transmit Interrupt */
+#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
+#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
+#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
+#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */
+#define I_SRESET (1 << 30) /* CCCR RES interrupt */
+#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */
+#define I_DMA (I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR (1 << 8) /* Backplane SError (write) */
+#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */
+#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */
+#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */
+#define SDA_WRITE 0x01000000 /* Write bit */
+#define SDA_READ 0x00000000 /* Write bit cleared for Read */
+#define SDA_BUSY 0x80000000 /* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */
+#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */
+#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */
+#define SDA_DEVICECONTROL 0x009 /* DeviceControl */
+#define SDA_SBADDRLOW 0x00a /* SbAddrLow */
+#define SDA_SBADDRMID 0x00b /* SbAddrMid */
+#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */
+#define SDA_FRAMECTRL 0x00d /* FrameCtrl */
+#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */
+#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+
+/* intrcvlazy */
+#define IRL_TO_MASK 0x00ffffff /* timeout */
+#define IRL_FC_MASK 0xff000000 /* frame count */
+#define IRL_FC_SHIFT 24 /* frame count */
+
+/* rx header */
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC 0x0001 /* CRC error detected */
+#define RXF_WOOS 0x0002 /* write frame out of sync */
+#define RXF_WF_TERM 0x0004 /* write frame terminated */
+#define RXF_ABORT 0x0008 /* write frame aborted */
+#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */
+
+#endif /* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/sbsocram.h b/drivers/net/wireless/bcm4329/include/sbsocram.h
new file mode 100644
index 000000000000..5ede0b66d97f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbsocram.h
@@ -0,0 +1,150 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsocram.h,v 13.9.162.2 2008/12/12 14:13:27 Exp $
+ */
+
+
+#ifndef _SBSOCRAM_H
+#define _SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+
+typedef volatile struct sbsocramregs {
+ uint32 coreinfo;
+ uint32 bwalloc;
+ uint32 extracoreinfo;
+ uint32 biststat;
+ uint32 bankidx;
+ uint32 standbyctrl;
+
+ uint32 errlogstatus;
+ uint32 errlogaddr;
+
+ uint32 cambankidx;
+ uint32 cambankstandbyctrl;
+ uint32 cambankpatchctrl;
+ uint32 cambankpatchtblbaseaddr;
+ uint32 cambankcmdreg;
+ uint32 cambankdatareg;
+ uint32 cambankmaskreg;
+ uint32 PAD[17];
+ uint32 extmemconfig;
+ uint32 extmemparitycsr;
+ uint32 extmemparityerrdata;
+ uint32 extmemparityerrcnt;
+ uint32 extmemwrctrlandsize;
+ uint32 PAD[84];
+ uint32 workaround;
+ uint32 pwrctl;
+} sbsocramregs_t;
+
+#endif
+
+
+#define SR_COREINFO 0x00
+#define SR_BWALLOC 0x04
+#define SR_BISTSTAT 0x0c
+#define SR_BANKINDEX 0x10
+#define SR_BANKSTBYCTL 0x14
+#define SR_PWRCTL 0x1e8
+
+
+#define SRCI_PT_MASK 0x00070000
+#define SRCI_PT_SHIFT 16
+
+#define SRCI_PT_OCP_OCP 0
+#define SRCI_PT_AXI_OCP 1
+#define SRCI_PT_ARM7AHB_OCP 2
+#define SRCI_PT_CM3AHB_OCP 3
+#define SRCI_PT_AXI_AXI 4
+#define SRCI_PT_AHB_AXI 5
+
+#define SRCI_LSS_MASK 0x00f00000
+#define SRCI_LSS_SHIFT 20
+#define SRCI_LRS_MASK 0x0f000000
+#define SRCI_LRS_SHIFT 24
+
+
+#define SRCI_MS0_MASK 0xf
+#define SR_MS0_BASE 16
+
+
+#define SRCI_ROMNB_MASK 0xf000
+#define SRCI_ROMNB_SHIFT 12
+#define SRCI_ROMBSZ_MASK 0xf00
+#define SRCI_ROMBSZ_SHIFT 8
+#define SRCI_SRNB_MASK 0xf0
+#define SRCI_SRNB_SHIFT 4
+#define SRCI_SRBSZ_MASK 0xf
+#define SRCI_SRBSZ_SHIFT 0
+
+#define SR_BSZ_BASE 14
+
+
+#define SRSC_SBYOVR_MASK 0x80000000
+#define SRSC_SBYOVR_SHIFT 31
+#define SRSC_SBYOVRVAL_MASK 0x60000000
+#define SRSC_SBYOVRVAL_SHIFT 29
+#define SRSC_SBYEN_MASK 0x01000000
+#define SRSC_SBYEN_SHIFT 24
+
+
+#define SRPC_PMU_STBYDIS_MASK 0x00000010
+#define SRPC_PMU_STBYDIS_SHIFT 4
+#define SRPC_STBYOVRVAL_MASK 0x00000008
+#define SRPC_STBYOVRVAL_SHIFT 3
+#define SRPC_STBYOVR_MASK 0x00000007
+#define SRPC_STBYOVR_SHIFT 0
+
+
+#define SRECC_NUM_BANKS_MASK 0x000000F0
+#define SRECC_NUM_BANKS_SHIFT 4
+#define SRECC_BANKSIZE_MASK 0x0000000F
+#define SRECC_BANKSIZE_SHIFT 0
+
+#define SRECC_BANKSIZE(value) (1 << (value))
+
+
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS 0x0001FFFC
+#define SRP_VALID 0x8000
+
+
+#define SRCMD_WRITE 0x00020000
+#define SRCMD_READ 0x00010000
+#define SRCMD_DONE 0x80000000
+
+#define SRCMD_DONE_DLY 1000
+
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/sdio.h b/drivers/net/wireless/bcm4329/include/sdio.h
new file mode 100644
index 000000000000..280cb845fb04
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sdio.h
@@ -0,0 +1,566 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdio.h,v 13.24.4.1.4.1.16.1 2009/08/12 01:08:02 Exp $
+ */
+
+#ifndef _SDIO_H
+#define _SDIO_H
+
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+ uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */
+ uint8 sd_rev; /* RO, sd spec revision */
+ uint8 io_en; /* I/O enable */
+ uint8 io_rdy; /* I/O ready reg */
+ uint8 intr_ctl; /* Master and per function interrupt enable control */
+ uint8 intr_status; /* RO, interrupt pending status */
+ uint8 io_abort; /* read/write abort or reset all functions */
+ uint8 bus_inter; /* bus interface control */
+ uint8 capability; /* RO, card capability */
+
+ uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */
+ uint8 cis_base_mid;
+ uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */
+
+ /* suspend/resume registers */
+ uint8 bus_suspend; /* 0xC */
+ uint8 func_select; /* 0xD */
+ uint8 exec_flag; /* 0xE */
+ uint8 ready_flag; /* 0xF */
+
+ uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */
+
+ uint8 power_control; /* 0x12 (SDIO version 1.10) */
+
+ uint8 speed_control; /* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV 0x00
+#define SDIOD_CCCR_SDREV 0x01
+#define SDIOD_CCCR_IOEN 0x02
+#define SDIOD_CCCR_IORDY 0x03
+#define SDIOD_CCCR_INTEN 0x04
+#define SDIOD_CCCR_INTPEND 0x05
+#define SDIOD_CCCR_IOABORT 0x06
+#define SDIOD_CCCR_BICTRL 0x07
+#define SDIOD_CCCR_CAPABLITIES 0x08
+#define SDIOD_CCCR_CISPTR_0 0x09
+#define SDIOD_CCCR_CISPTR_1 0x0A
+#define SDIOD_CCCR_CISPTR_2 0x0B
+#define SDIOD_CCCR_BUSSUSP 0x0C
+#define SDIOD_CCCR_FUNCSEL 0x0D
+#define SDIOD_CCCR_EXECFLAGS 0x0E
+#define SDIOD_CCCR_RDYFLAGS 0x0F
+#define SDIOD_CCCR_BLKSIZE_0 0x10
+#define SDIOD_CCCR_BLKSIZE_1 0x11
+#define SDIOD_CCCR_POWER_CONTROL 0x12
+#define SDIOD_CCCR_SPEED_CONTROL 0x13
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_SEPINT 0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK 0x0f /* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */
+#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */
+#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */
+#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */
+#define SDIO_CAP_LSC 0x40 /* low speed card */
+#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS 0x08 /* support suspend/resume */
+#define SDIO_CAP_SRW 0x04 /* support read wait */
+#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */
+#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */
+#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+ uint8 devctr; /* device interface, CSA control */
+ uint8 ext_dev; /* extended standard I/O device type code */
+ uint8 pwr_sel; /* power selection support */
+ uint8 PAD[6]; /* reserved */
+
+ uint8 cis_low; /* CIS LSB */
+ uint8 cis_mid;
+ uint8 cis_high; /* CIS MSB */
+ uint8 csa_low; /* code storage area, LSB */
+ uint8 csa_mid;
+ uint8 csa_high; /* code storage area, MSB */
+ uint8 csa_dat_win; /* data access window to function */
+
+ uint8 fnx_blk_size[2]; /* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_IOFUNCS 7
+
+/* SDIO Device FBR Start Address */
+#define SDIOD_FBR_STARTADDR 0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE 0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n) ((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */
+#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0 0x09
+#define SDIOD_FBR_CISPTR_1 0x0A
+#define SDIOD_FBR_CISPTR_2 0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0 0x0C
+#define SDIOD_FBR_CSA_ADDR_1 0x0D
+#define SDIOD_FBR_CSA_ADDR_2 0x0E
+#define SDIOD_FBR_CSA_DATA 0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0 0x10
+#define SDIOD_FBR_BLKSIZE_1 0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */
+#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART 1
+#define SDIOD_DIC_BLUETOOTH_A 2
+#define SDIOD_DIC_BLUETOOTH_B 3
+#define SDIOD_DIC_GPS 4
+#define SDIOD_DIC_CAMERA 5
+#define SDIOD_DIC_PHS 6
+#define SDIOD_DIC_WLAN 7
+#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */
+#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0 0
+#define SDIO_FUNC_1 1
+#define SDIO_FUNC_2 2
+#define SDIO_FUNC_3 3
+#define SDIO_FUNC_4 4
+#define SDIO_FUNC_5 5
+#define SDIO_FUNC_6 6
+#define SDIO_FUNC_7 7
+
+#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */
+#define SD_CARD_TYPE_IO 1 /* IO only card */
+#define SD_CARD_TYPE_MEMORY 2 /* memory only card */
+#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE 31
+#define CARDREG_STATUS_BIT_COMCRCERROR 23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22
+#define CARDREG_STATUS_BIT_ERROR 19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4
+
+
+
+#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND 1
+#define SD_CMD_MMC_SET_RCA 3
+#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD 7
+#define SD_CMD_SEND_CSD 9
+#define SD_CMD_SEND_CID 10
+#define SD_CMD_STOP_TRANSMISSION 12
+#define SD_CMD_SEND_STATUS 13
+#define SD_CMD_GO_INACTIVE_STATE 15
+#define SD_CMD_SET_BLOCKLEN 16
+#define SD_CMD_READ_SINGLE_BLOCK 17
+#define SD_CMD_READ_MULTIPLE_BLOCK 18
+#define SD_CMD_WRITE_BLOCK 24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK 25
+#define SD_CMD_PROGRAM_CSD 27
+#define SD_CMD_SET_WRITE_PROT 28
+#define SD_CMD_CLR_WRITE_PROT 29
+#define SD_CMD_SEND_WRITE_PROT 30
+#define SD_CMD_ERASE_WR_BLK_START 32
+#define SD_CMD_ERASE_WR_BLK_END 33
+#define SD_CMD_ERASE 38
+#define SD_CMD_LOCK_UNLOCK 42
+#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */
+#define SD_CMD_APP_CMD 55
+#define SD_CMD_GEN_CMD 56
+#define SD_CMD_READ_OCR 58
+#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS 13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS 22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23
+#define SD_ACMD_SD_SEND_OP_COND 41
+#define SD_ACMD_SET_CLR_CARD_DETECT 42
+#define SD_ACMD_SEND_SCR 51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ 0 /* Read_Write: Read */
+#define SD_IO_OP_WRITE 1 /* Read_Write: Write */
+#define SD_IO_RW_NORMAL 0 /* no RAW */
+#define SD_IO_RW_RAW 1 /* RAW */
+#define SD_IO_BYTE_MODE 0 /* Byte Mode */
+#define SD_IO_BLOCK_MODE 1 /* BlockMode */
+#define SD_IO_FIXED_ADDRESS 0 /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+ (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+ (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE 0
+#define SD_RSP_NO_1 1
+#define SD_RSP_NO_2 2
+#define SD_RSP_NO_3 3
+#define SD_RSP_NO_4 4
+#define SD_RSP_NO_5 5
+#define SD_RSP_NO_6 6
+
+ /* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR 0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000
+#define SD_RSP_MR6_ERROR 0x2000
+
+ /* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT 0x80
+#define SD_RSP_MR1_PARAMETER_ERROR 0x40
+#define SD_RSP_MR1_RFU5 0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10
+#define SD_RSP_MR1_COM_CRC_ERROR 0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04
+#define SD_RSP_MR1_RFU1 0x02
+#define SD_RSP_MR1_IDLE_STATE 0x01
+
+ /* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR 0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND 0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1 0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0 0x10
+#define SD_RSP_R5_ERROR 0x08
+#define SD_RSP_R5_RFU 0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR 0x02
+#define SD_RSP_R5_OUT_OF_RANGE 0x01
+
+#define SD_RSP_R5_ERRBITS 0xCB
+
+
+/* ------------------------------------------------
+ * SDIO Commands and responses
+ *
+ * I/O only commands are:
+ * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0 0
+#define SDIOH_CMD_3 3
+#define SDIOH_CMD_5 5
+#define SDIOH_CMD_7 7
+#define SDIOH_CMD_15 15
+#define SDIOH_CMD_52 52
+#define SDIOH_CMD_53 53
+#define SDIOH_CMD_59 59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE 0
+#define SDIOH_RSP_R1 1
+#define SDIOH_RSP_R2 2
+#define SDIOH_RSP_R3 3
+#define SDIOH_RSP_R4 4
+#define SDIOH_RSP_R5 5
+#define SDIOH_RSP_R6 6
+
+/*
+ * SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS 0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M BITFIELD_MASK(24)
+#define CMD5_OCR_S 0
+
+#define CMD7_RCA_M BITFIELD_MASK(16)
+#define CMD7_RCA_S 16
+
+#define CMD_15_RCA_M BITFIELD_MASK(16)
+#define CMD_15_RCA_S 16
+
+#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52
+ */
+#define CMD52_DATA_S 0
+#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD52_REG_ADDR_S 9
+#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */
+#define CMD52_RAW_S 27
+#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD52_FUNCTION_S 28
+#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD52_RW_FLAG_S 31
+
+
+#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S 0
+#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD53_REG_ADDR_S 9
+#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */
+#define CMD53_OP_CODE_S 26
+#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */
+#define CMD53_BLK_MODE_S 27
+#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD53_FUNCTION_S 28
+#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD53_RW_FLAG_S 31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ * -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S 0
+#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S 24
+#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */
+#define RSP4_MEM_PRESENT_S 27
+#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S 28
+#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */
+#define RSP4_CARD_READY_S 31
+
+#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0]
+ */
+#define RSP6_STATUS_S 0
+#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S 16
+
+#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S 3
+#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP1_APP_CMD_S 5
+#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S 8
+#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card
+ * when Cmd was received
+ */
+#define RSP1_CURR_STATE_S 9
+#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */
+#define RSP1_EARSE_RESET_S 13
+#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S 14
+#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S 15
+#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits
+ * of CSD
+ */
+#define RSP1_CID_CSD_OVERW_S 16
+#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */
+#define RSP1_ERROR_S 19
+#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */
+#define RSP1_CC_ERROR_S 20
+#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed
+ * to correct data
+ */
+#define RSP1_CARD_ECC_FAILED_S 21
+#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S 22
+#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed
+ */
+#define RSP1_COM_CRC_ERROR_S 23
+#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S 24
+#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */
+#define RSP1_CARD_LOCKED_S 25
+#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program
+ * write-protected blocks
+ */
+#define RSP1_WP_VIOLATION_S 26
+#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S 27
+#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S 28
+#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */
+#define RSP1_BLK_LEN_ERR_S 29
+#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */
+#define RSP1_ADDR_ERR_S 30
+#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S 31
+
+
+#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */
+#define RSP5_DATA_S 0
+#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */
+#define RSP5_FLAGS_S 8
+#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S 16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S 0
+#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */
+#define SPIRSP4_STUFF_S 16
+#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */
+#define SPIRSP4_MEM_PRESENT_S 19
+#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S 20
+#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */
+#define SPIRSP4_CARD_READY_S 23
+#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */
+#define SPIRSP4_IDLE_STATE_S 24
+#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S 26
+#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S 27
+#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP4_FUNC_NUM_ERROR_S 28
+#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S 30
+#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP4_START_BIT_S 31
+
+#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */
+#define SPIRSP5_DATA_S 16
+#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */
+#define SPIRSP5_IDLE_STATE_S 24
+#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S 26
+#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S 27
+#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP5_FUNC_NUM_ERROR_S 28
+#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S 30
+#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP5_START_BIT_S 31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error
+ */
+#define RSP6STAT_AKE_SEQ_ERROR_S 3
+#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP6STAT_APP_CMD_S 5
+#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data
+ * (buff empty)
+ */
+#define RSP6STAT_READY_FOR_DATA_S 8
+#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at
+ * Cmd reception
+ */
+#define RSP6STAT_CURR_STATE_S 9
+#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19
+ */
+#define RSP6STAT_ERROR_S 13
+#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for
+ * card state Bit 22
+ */
+#define RSP6STAT_ILLEGAL_CMD_S 14
+#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command
+ * failed Bit 23
+ */
+#define RSP6STAT_COM_CRC_ERROR_S 15
+
+#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE
+
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcm4329/include/sdioh.h b/drivers/net/wireless/bcm4329/include/sdioh.h
new file mode 100644
index 000000000000..8123452eac2b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sdioh.h
@@ -0,0 +1,299 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdioh.h,v 13.13.18.1.16.3 2009/12/08 22:34:21 Exp $
+ */
+
+#ifndef _SDIOH_H
+#define _SDIOH_H
+
+#define SD_SysAddr 0x000
+#define SD_BlockSize 0x004
+#define SD_BlockCount 0x006
+#define SD_Arg0 0x008
+#define SD_Arg1 0x00A
+#define SD_TransferMode 0x00C
+#define SD_Command 0x00E
+#define SD_Response0 0x010
+#define SD_Response1 0x012
+#define SD_Response2 0x014
+#define SD_Response3 0x016
+#define SD_Response4 0x018
+#define SD_Response5 0x01A
+#define SD_Response6 0x01C
+#define SD_Response7 0x01E
+#define SD_BufferDataPort0 0x020
+#define SD_BufferDataPort1 0x022
+#define SD_PresentState 0x024
+#define SD_HostCntrl 0x028
+#define SD_PwrCntrl 0x029
+#define SD_BlockGapCntrl 0x02A
+#define SD_WakeupCntrl 0x02B
+#define SD_ClockCntrl 0x02C
+#define SD_TimeoutCntrl 0x02E
+#define SD_SoftwareReset 0x02F
+#define SD_IntrStatus 0x030
+#define SD_ErrorIntrStatus 0x032
+#define SD_IntrStatusEnable 0x034
+#define SD_ErrorIntrStatusEnable 0x036
+#define SD_IntrSignalEnable 0x038
+#define SD_ErrorIntrSignalEnable 0x03A
+#define SD_CMD12ErrorStatus 0x03C
+#define SD_Capabilities 0x040
+#define SD_Capabilities_Reserved 0x044
+#define SD_MaxCurCap 0x048
+#define SD_MaxCurCap_Reserved 0x04C
+#define SD_ADMA_SysAddr 0x58
+#define SD_SlotInterruptStatus 0x0FC
+#define SD_HostControllerVersion 0x0FE
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo 0x40
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 0
+#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 7
+#define CAP_BASECLK_M BITFIELD_MASK(6)
+#define CAP_BASECLK_S 8
+#define CAP_MAXBLOCK_M BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S 16
+#define CAP_ADMA2_M BITFIELD_MASK(1)
+#define CAP_ADMA2_S 19
+#define CAP_ADMA1_M BITFIELD_MASK(1)
+#define CAP_ADMA1_S 20
+#define CAP_HIGHSPEED_M BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S 21
+#define CAP_DMA_M BITFIELD_MASK(1)
+#define CAP_DMA_S 22
+#define CAP_SUSPEND_M BITFIELD_MASK(1)
+#define CAP_SUSPEND_S 23
+#define CAP_VOLT_3_3_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S 24
+#define CAP_VOLT_3_0_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S 25
+#define CAP_VOLT_1_8_M BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S 26
+#define CAP_64BIT_HOST_M BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S 28
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S 0
+#define CAP_CURR_3_0_M BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S 8
+#define CAP_CURR_1_8_M BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S 16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S 0
+#define BLKSZ_BNDRY_M BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S 12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S 0
+#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S 1
+#define XFER_CMD_12_EN_M BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 2
+#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S 4
+#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S 5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 0
+#define RESP_TYPE_136 1
+#define RESP_TYPE_48 2
+#define RESP_TYPE_48_BUSY 3
+/* type field */
+#define CMD_TYPE_NORMAL 0
+#define CMD_TYPE_SUSPEND 1
+#define CMD_TYPE_RESUME 2
+#define CMD_TYPE_ABORT 3
+
+#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */
+#define CMD_RESP_TYPE_S 0
+#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */
+#define CMD_CRC_EN_S 3
+#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */
+#define CMD_INDEX_EN_S 4
+#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */
+#define CMD_DATA_EN_S 5
+#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc
+ */
+#define CMD_TYPE_S 6
+#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */
+#define CMD_INDEX_S 8
+
+/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */
+/* SD_PresentState : Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */
+#define PRES_CMD_INHIBIT_S 0
+#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */
+#define PRES_DAT_INHIBIT_S 1
+#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */
+#define PRES_DAT_BUSY_S 2
+#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */
+#define PRES_PRESENT_RSVD_S 3
+#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */
+#define PRES_WRITE_ACTIVE_S 8
+#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */
+#define PRES_READ_ACTIVE_S 9
+#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S 10
+#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */
+#define PRES_READ_DATA_RDY_S 11
+#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */
+#define PRES_CARD_PRESENT_S 16
+#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */
+#define PRES_CARD_STABLE_S 17
+#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */
+#define PRES_CARD_PRESENT_RAW_S 18
+#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */
+#define PRES_WRITE_ENABLED_S 19
+#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */
+#define PRES_DAT_SIGNAL_S 20
+#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */
+#define PRES_CMD_SIGNAL_S 24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */
+#define HOST_LED_S 0
+#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */
+#define HOST_DATA_WIDTH_S 1
+#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */
+#define HOST_DMA_SEL_S 3
+#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */
+#define HOST_HI_SPEED_EN_S 2
+
+/* misc defines */
+#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */
+#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */
+#define PWR_BUS_EN_S 0
+#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */
+#define PWR_VOLTS_S 1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */
+#define SW_RESET_ALL_S 0
+#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */
+#define SW_RESET_CMD_S 1
+#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */
+#define SW_RESET_DAT_S 2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S 0
+#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S 1
+#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S 2
+#define INTSTAT_DMA_INT_M BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S 3
+#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S 4
+#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S 5
+#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S 6
+#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S 7
+#define INTSTAT_CARD_INT_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S 8
+#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */
+#define INTSTAT_ERROR_INT_S 15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S 0
+#define ERRINT_CMD_CRC_M BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S 1
+#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S 2
+#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S 3
+#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S 4
+#define ERRINT_DATA_CRC_M BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S 5
+#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S 6
+#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S 7
+#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S 8
+#define ERRINT_VENDOR_M BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S 12
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT 0x0001
+#define ERRINT_CMD_CRC_BIT 0x0002
+#define ERRINT_CMD_ENDBIT_BIT 0x0004
+#define ERRINT_CMD_INDEX_BIT 0x0008
+#define ERRINT_DATA_TIMEOUT_BIT 0x0010
+#define ERRINT_DATA_CRC_BIT 0x0020
+#define ERRINT_DATA_ENDBIT_BIT 0x0040
+#define ERRINT_CURRENT_LIMIT_BIT 0x0080
+#define ERRINT_AUTO_CMD12_BIT 0x0100
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+ ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+ ERRINT_DATA_ENDBIT_BIT)
+#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl : Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */
+/* SD_IntrStatus : Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */
+/* SD_Capabilities : Offset 0x040 , size = bytes */
+/* SD_MaxCurCap : Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcm4329/include/sdiovar.h b/drivers/net/wireless/bcm4329/include/sdiovar.h
new file mode 100644
index 000000000000..0179d4cb96db
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sdiovar.h
@@ -0,0 +1,58 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdiovar.h,v 13.5.14.2.16.2 2009/12/08 22:34:21 Exp $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+ int func;
+ int offset;
+ int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL 0x0001 /* Error */
+#define SDH_TRACE_VAL 0x0002 /* Trace */
+#define SDH_INFO_VAL 0x0004 /* Info */
+#define SDH_DEBUG_VAL 0x0008 /* Debug */
+#define SDH_DATA_VAL 0x0010 /* Data */
+#define SDH_CTRL_VAL 0x0020 /* Control Regs */
+#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */
+#define SDH_DMA_VAL 0x0080 /* DMA */
+
+#define NUM_PREV_TRANSACTIONS 16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/siutils.h b/drivers/net/wireless/bcm4329/include/siutils.h
new file mode 100644
index 000000000000..cb9f1407b73b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/siutils.h
@@ -0,0 +1,235 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.h,v 13.197.4.2.4.3.8.16 2010/06/23 21:36:05 Exp $
+ */
+
+
+#ifndef _siutils_h_
+#define _siutils_h_
+
+
+struct si_pub {
+ uint socitype;
+
+ uint bustype;
+ uint buscoretype;
+ uint buscorerev;
+ uint buscoreidx;
+ int ccrev;
+ uint32 cccaps;
+ int pmurev;
+ uint32 pmucaps;
+ uint boardtype;
+ uint boardvendor;
+ uint boardflags;
+ uint chip;
+ uint chiprev;
+ uint chippkg;
+ uint32 chipst;
+ bool issim;
+ uint socirev;
+ bool pci_pr32414;
+};
+
+#if defined(WLC_HIGH) && !defined(WLC_LOW)
+typedef struct si_pub si_t;
+#else
+typedef const struct si_pub si_t;
+#endif
+
+
+#define SI_OSH NULL
+
+
+#define XTAL 0x1
+#define PLL 0x2
+
+
+#define CLK_FAST 0
+#define CLK_DYNAMIC 2
+
+
+#define GPIO_DRV_PRIORITY 0
+#define GPIO_APP_PRIORITY 1
+#define GPIO_HI_PRIORITY 2
+
+
+#define GPIO_PULLUP 0
+#define GPIO_PULLDN 1
+
+
+#define GPIO_REGEVT 0
+#define GPIO_REGEVT_INTMSK 1
+#define GPIO_REGEVT_INTPOL 2
+
+
+#define SI_DEVPATH_BUFSZ 16
+
+
+#define SI_DOATTACH 1
+#define SI_PCIDOWN 2
+#define SI_PCIUP 3
+
+#define ISSIM_ENAB(sih) 0
+
+
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih) (BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih) (0)
+#define CCPLL_ENAB(sih) (0)
+#else
+#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+
+
+
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void *si_coreregs(si_t *sih);
+extern void si_write_wrapperreg(si_t *sih, uint32 offset, uint32 val);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_tofixup(si_t *sih);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern uint32 si_clock(si_t *sih);
+extern void si_clock_pmu_spuravoid(si_t *sih, bool spuravoid);
+extern uint32 si_alp_clock(si_t *sih);
+extern uint32 si_ilp_clock(si_t *sih);
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern bool si_backplane64(si_t *sih);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+
+
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+
+extern bool si_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+extern void si_sdio_init(si_t *sih);
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+ uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci_init(sih) (0)
+#define si_eci_notify_bt(sih, type, val, interrupt) (0)
+
+
+
+extern int si_devpath(si_t *sih, char *path, int size);
+
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pcie_war_ovr_disable(si_t *sih);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+
+
+
+
+
+
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/spid.h b/drivers/net/wireless/bcm4329/include/spid.h
new file mode 100644
index 000000000000..c740296de9af
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/spid.h
@@ -0,0 +1,153 @@
+/*
+ * SPI device spec header file
+ *
+ * Copyright (C) 2010, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: spid.h,v 1.7.10.1.16.3 2009/04/09 19:23:14 Exp $
+ */
+
+#ifndef _SPI_H
+#define _SPI_H
+
+/*
+ * Brcm SPI Device Register Map.
+ *
+ */
+
+typedef volatile struct {
+ uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */
+ uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */
+ uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay
+ * function selection, command/data error check
+ */
+ uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */
+ uint16 intr_reg; /* 0x04, Intr status register */
+ uint16 intr_en_reg; /* 0x06, Intr mask register */
+ uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */
+ uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */
+ uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */
+ uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */
+ uint32 test_read; /* 0x14, RO 0xfeedbead signature */
+ uint32 test_rw; /* 0x18, RW */
+ uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */
+ uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */
+ uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */
+ uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */
+} spi_regs_t;
+
+/* SPI device register offsets */
+#define SPID_CONFIG 0x00
+#define SPID_RESPONSE_DELAY 0x01
+#define SPID_STATUS_ENABLE 0x02
+#define SPID_RESET_BP 0x03 /* (corerev >= 1) */
+#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */
+#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */
+#define SPID_STATUS_REG 0x08 /* 32 bits */
+#define SPID_F1_INFO_REG 0x0C /* 16 bits */
+#define SPID_F2_INFO_REG 0x0E /* 16 bits */
+#define SPID_F3_INFO_REG 0x10 /* 16 bits */
+#define SPID_TEST_READ 0x14 /* 32 bits */
+#define SPID_TEST_RW 0x18 /* 32 bits */
+#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */
+
+/* Bit masks for SPID_CONFIG device register */
+#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */
+#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */
+#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */
+#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */
+#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */
+#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */
+#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */
+
+/* Bit mask for SPID_RESPONSE_DELAY device register */
+#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */
+
+/* Bit mask for SPID_STATUS_ENABLE device register */
+#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */
+#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */
+#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */
+#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */
+#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */
+#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */
+
+/* Bit mask for SPID_RESET_BP device register */
+#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */
+#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */
+#define RESET_SPI 0x80 /* reset the above enabled logic */
+
+/* Bit mask for SPID_INTR_REG device register */
+#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */
+#define F2_F3_FIFO_RD_UNDERFLOW 0x0002
+#define F2_F3_FIFO_WR_OVERFLOW 0x0004
+#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */
+#define DATA_ERROR 0x0010 /* Cleared by writing 1 */
+#define F2_PACKET_AVAILABLE 0x0020
+#define F3_PACKET_AVAILABLE 0x0040
+#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */
+#define MISC_INTR0 0x0100
+#define MISC_INTR1 0x0200
+#define MISC_INTR2 0x0400
+#define MISC_INTR3 0x0800
+#define MISC_INTR4 0x1000
+#define F1_INTR 0x2000
+#define F2_INTR 0x4000
+#define F3_INTR 0x8000
+
+/* Bit mask for 32bit SPID_STATUS_REG device register */
+#define STATUS_DATA_NOT_AVAILABLE 0x00000001
+#define STATUS_UNDERFLOW 0x00000002
+#define STATUS_OVERFLOW 0x00000004
+#define STATUS_F2_INTR 0x00000008
+#define STATUS_F3_INTR 0x00000010
+#define STATUS_F2_RX_READY 0x00000020
+#define STATUS_F3_RX_READY 0x00000040
+#define STATUS_HOST_CMD_DATA_ERR 0x00000080
+#define STATUS_F2_PKT_AVAILABLE 0x00000100
+#define STATUS_F2_PKT_LEN_MASK 0x000FFE00
+#define STATUS_F2_PKT_LEN_SHIFT 9
+#define STATUS_F3_PKT_AVAILABLE 0x00100000
+#define STATUS_F3_PKT_LEN_MASK 0xFFE00000
+#define STATUS_F3_PKT_LEN_SHIFT 21
+
+/* Bit mask for 16 bits SPID_F1_INFO_REG device register */
+#define F1_ENABLED 0x0001
+#define F1_RDY_FOR_DATA_TRANSFER 0x0002
+#define F1_MAX_PKT_SIZE 0x01FC
+
+/* Bit mask for 16 bits SPID_F2_INFO_REG device register */
+#define F2_ENABLED 0x0001
+#define F2_RDY_FOR_DATA_TRANSFER 0x0002
+#define F2_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 16 bits SPID_F3_INFO_REG device register */
+#define F3_ENABLED 0x0001
+#define F3_RDY_FOR_DATA_TRANSFER 0x0002
+#define F3_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */
+#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD
+
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS 4
+
+#define SPI_MAX_PKT_LEN (2048*4)
+
+/* Misc defines */
+#define SPI_FUNC_0 0
+#define SPI_FUNC_1 1
+#define SPI_FUNC_2 2
+#define SPI_FUNC_3 3
+
+#define WAIT_F2RXFIFORDY 100
+#define WAIT_F2RXFIFORDY_DELAY 20
+
+#endif /* _SPI_H */
diff --git a/drivers/net/wireless/bcm4329/include/trxhdr.h b/drivers/net/wireless/bcm4329/include/trxhdr.h
new file mode 100644
index 000000000000..8f5eed9410eb
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/trxhdr.h
@@ -0,0 +1,46 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: trxhdr.h,v 13.11.310.1 2008/08/17 12:58:58 Exp $
+ */
+
+#include <typedefs.h>
+
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_VERSION 1 /* Version 1 */
+#define TRX_MAX_LEN 0x3A0000 /* Max length */
+#define TRX_NO_HEADER 1 /* Do not write TRX header */
+#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_MAX_OFFSET 3 /* Max number of individual files */
+#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */
+
+struct trx_header {
+ uint32 magic; /* "HDR0" */
+ uint32 len; /* Length of file including header */
+ uint32 crc32; /* 32-bit CRC from flag_version to end of file */
+ uint32 flag_version; /* 0:15 flags, 16:31 version */
+ uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
+};
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
diff --git a/drivers/net/wireless/bcm4329/include/typedefs.h b/drivers/net/wireless/bcm4329/include/typedefs.h
new file mode 100644
index 000000000000..4d9dd761ed64
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/typedefs.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: typedefs.h,v 1.85.34.1.2.5 2009/01/27 04:09:40 Exp $
+ */
+
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+
+
+#include "site_typedefs.h"
+
+#else
+
+
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE false
+#endif
+#ifndef TRUE
+#define TRUE true
+#endif
+
+#else
+
+
+#endif
+
+#if defined(__x86_64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+#if defined(TARGETOS_nucleus)
+
+#include <stddef.h>
+
+
+#define TYPEDEF_FLOAT_T
+#endif
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+#ifdef __DJGPP__
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif
+#endif
+
+
+
+
+
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif
+
+
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif
+
+#if !defined(__DJGPP__) && !defined(TARGETOS_nucleus)
+
+
+#if defined(__KERNEL__)
+
+#include <linux/types.h>
+
+#else
+
+
+#include <sys/types.h>
+
+#endif
+
+#endif
+
+
+
+
+#define USE_TYPEDEF_DEFAULTS
+
+#endif
+
+
+
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef unsigned char bool;
+#endif
+
+
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long ulong;
+#endif
+
+
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT32
+typedef float float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double float64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else
+typedef float64 float_t;
+#endif
+
+#endif
+
+
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef OFF
+#define OFF 0
+#endif
+
+#ifndef ON
+#define ON 1
+#endif
+
+#define AUTO (-1)
+
+
+
+#ifndef PTRSZ
+#define PTRSZ sizeof(char*)
+#endif
+
+
+
+#if defined(__GNUC__)
+ #define BWL_COMPILER_GNU
+#elif defined(__CC_ARM)
+ #define BWL_COMPILER_ARMCC
+#else
+ #error "Unknown compiler!"
+#endif
+
+
+#ifndef INLINE
+ #if defined(BWL_COMPILER_MICROSOFT)
+ #define INLINE __inline
+ #elif defined(BWL_COMPILER_GNU)
+ #define INLINE __inline__
+ #elif defined(BWL_COMPILER_ARMCC)
+ #define INLINE __inline
+ #else
+ #define INLINE
+ #endif
+#endif
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif
+
+
+#define UNUSED_PARAMETER(x) (void)(x)
+
+
+#include <bcmdefs.h>
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/wlioctl.h b/drivers/net/wireless/bcm4329/include/wlioctl.h
new file mode 100644
index 000000000000..00c61f10782f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/wlioctl.h
@@ -0,0 +1,1673 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl.h,v 1.601.4.15.2.14.2.62.4.3 2011/02/09 23:31:02 Exp $
+ */
+
+
+#ifndef _wlioctl_h_
+#define _wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <bcmwifi.h>
+
+
+
+#define ACTION_FRAME_SIZE 1040
+
+typedef struct wl_action_frame {
+ struct ether_addr da;
+ uint16 len;
+ uint32 packetId;
+ uint8 data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+#define RWL_ACTION_WIFI_CATEGORY 127
+#define RWL_WIFI_OUI_BYTE1 0x90
+#define RWL_WIFI_OUI_BYTE2 0x4C
+#define RWL_WIFI_OUI_BYTE3 0x0F
+#define RWL_WIFI_ACTION_FRAME_SIZE sizeof(struct dot11_action_wifi_vendor_specific)
+#define RWL_WIFI_DEFAULT 0x00
+#define RWL_WIFI_FIND_MY_PEER 0x09
+#define RWL_WIFI_FOUND_PEER 0x0A
+#define RWL_ACTION_WIFI_FRAG_TYPE 0x55
+
+typedef struct ssid_info
+{
+ uint8 ssid_len;
+ uint8 ssid[32];
+} ssid_info_t;
+
+typedef struct cnt_rx
+{
+ uint32 cnt_rxundec;
+ uint32 cnt_rxframe;
+} cnt_rx_t;
+
+
+
+#define RWL_REF_MAC_ADDRESS_OFFSET 17
+#define RWL_DUT_MAC_ADDRESS_OFFSET 23
+#define RWL_WIFI_CLIENT_CHANNEL_OFFSET 50
+#define RWL_WIFI_SERVER_CHANNEL_OFFSET 51
+
+
+
+
+
+#define WL_BSS_INFO_VERSION 108
+
+
+typedef struct wl_bss_info {
+ uint32 version;
+ uint32 length;
+ struct ether_addr BSSID;
+ uint16 beacon_period;
+ uint16 capability;
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count;
+ uint8 rates[16];
+ } rateset;
+ chanspec_t chanspec;
+ uint16 atim_window;
+ uint8 dtim_period;
+ int16 RSSI;
+ int8 phy_noise;
+
+ uint8 n_cap;
+ uint32 nbss_cap;
+ uint8 ctl_ch;
+ uint32 reserved32[1];
+ uint8 flags;
+ uint8 reserved[3];
+ uint8 basic_mcs[MCSSET_LEN];
+
+ uint16 ie_offset;
+ uint32 ie_length;
+
+
+} wl_bss_info_t;
+
+typedef struct wlc_ssid {
+ uint32 SSID_len;
+ uchar SSID[32];
+} wlc_ssid_t;
+
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY 2
+
+
+#define WL_SCANFLAGS_PASSIVE 0x01
+#define WL_SCANFLAGS_PROHIBITED 0x04
+
+typedef struct wl_scan_params {
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ int8 bss_type;
+ int8 scan_type;
+ int32 nprobes;
+ int32 active_time;
+ int32 passive_time;
+ int32 home_time;
+ int32 channel_num;
+ uint16 channel_list[1];
+} wl_scan_params_t;
+
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+
+
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START 1
+#define WL_SCAN_ACTION_CONTINUE 2
+#define WL_SCAN_ACTION_ABORT 3
+
+#define ISCAN_REQ_VERSION 1
+
+
+typedef struct wl_iscan_params {
+ uint32 version;
+ uint16 action;
+ uint16 scan_duration;
+ wl_scan_params_t params;
+} wl_iscan_params_t;
+
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+#define WL_SCAN_RESULTS_FIXED_SIZE 12
+
+
+#define WL_SCAN_RESULTS_SUCCESS 0
+#define WL_SCAN_RESULTS_PARTIAL 1
+#define WL_SCAN_RESULTS_PENDING 2
+#define WL_SCAN_RESULTS_ABORTED 3
+#define WL_SCAN_RESULTS_NO_MEM 4
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+ uint32 buflen;
+ uint32 version;
+ uint16 sync_id;
+ uint16 bss_count;
+ wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+
+typedef struct wl_iscan_results {
+ uint32 status;
+ wl_scan_results_t results;
+} wl_iscan_results_t;
+
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+ (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+#define WL_NUMRATES 16
+typedef struct wl_rateset {
+ uint32 count;
+ uint8 rates[WL_NUMRATES];
+} wl_rateset_t;
+
+
+typedef struct wl_uint32_list {
+
+ uint32 count;
+
+ uint32 element[1];
+} wl_uint32_list_t;
+
+
+typedef struct wl_assoc_params {
+ struct ether_addr bssid;
+ uint16 bssid_cnt;
+ int32 chanspec_num;
+ chanspec_t chanspec_list[1];
+} wl_assoc_params_t;
+#define WL_ASSOC_PARAMS_FIXED_SIZE (sizeof(wl_assoc_params_t) - sizeof(chanspec_t))
+
+
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+
+
+typedef struct wl_join_params {
+ wlc_ssid_t ssid;
+ wl_assoc_params_t params;
+} wl_join_params_t;
+#define WL_JOIN_PARAMS_FIXED_SIZE (sizeof(wl_join_params_t) - sizeof(chanspec_t))
+
+#define WLC_CNTRY_BUF_SZ 4
+
+typedef struct wl_country {
+ char country_abbrev[WLC_CNTRY_BUF_SZ];
+ int32 rev;
+ char ccode[WLC_CNTRY_BUF_SZ];
+} wl_country_t;
+
+typedef enum sup_auth_status {
+
+ WLC_SUP_DISCONNECTED = 0,
+ WLC_SUP_CONNECTING,
+ WLC_SUP_IDREQUIRED,
+ WLC_SUP_AUTHENTICATING,
+ WLC_SUP_AUTHENTICATED,
+ WLC_SUP_KEYXCHANGE,
+ WLC_SUP_KEYED,
+ WLC_SUP_TIMEOUT,
+ WLC_SUP_LAST_BASIC_STATE,
+
+
+ WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+
+ WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+
+ WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+
+ WLC_SUP_KEYXCHANGE_PREP_M4,
+ WLC_SUP_KEYXCHANGE_WAIT_G1,
+ WLC_SUP_KEYXCHANGE_PREP_G2
+} sup_auth_status_t;
+
+
+#define CRYPTO_ALGO_OFF 0
+#define CRYPTO_ALGO_WEP1 1
+#define CRYPTO_ALGO_TKIP 2
+#define CRYPTO_ALGO_WEP128 3
+#define CRYPTO_ALGO_AES_CCM 4
+#define CRYPTO_ALGO_AES_OCB_MSDU 5
+#define CRYPTO_ALGO_AES_OCB_MPDU 6
+#define CRYPTO_ALGO_NALG 7
+#define CRYPTO_ALGO_SMS4 11
+
+#define WSEC_GEN_MIC_ERROR 0x0001
+#define WSEC_GEN_REPLAY 0x0002
+#define WSEC_GEN_ICV_ERROR 0x0004
+
+#define WL_SOFT_KEY (1 << 0)
+#define WL_PRIMARY_KEY (1 << 1)
+#define WL_KF_RES_4 (1 << 4)
+#define WL_KF_RES_5 (1 << 5)
+#define WL_IBSS_PEER_GROUP_KEY (1 << 6)
+
+typedef struct wl_wsec_key {
+ uint32 index;
+ uint32 len;
+ uint8 data[DOT11_MAX_KEY_SIZE];
+ uint32 pad_1[18];
+ uint32 algo;
+ uint32 flags;
+ uint32 pad_2[2];
+ int pad_3;
+ int iv_initialized;
+ int pad_4;
+
+ struct {
+ uint32 hi;
+ uint16 lo;
+ } rxiv;
+ uint32 pad_5[2];
+ struct ether_addr ea;
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN 8
+#define WSEC_MAX_PSK_LEN 64
+
+
+#define WSEC_PASSPHRASE (1<<0)
+
+
+typedef struct {
+ ushort key_len;
+ ushort flags;
+ uint8 key[WSEC_MAX_PSK_LEN];
+} wsec_pmk_t;
+
+
+#define WEP_ENABLED 0x0001
+#define TKIP_ENABLED 0x0002
+#define AES_ENABLED 0x0004
+#define WSEC_SWFLAG 0x0008
+#define SES_OW_ENABLED 0x0040
+#define SMS4_ENABLED 0x0100
+
+
+#define WPA_AUTH_DISABLED 0x0000
+#define WPA_AUTH_NONE 0x0001
+#define WPA_AUTH_UNSPECIFIED 0x0002
+#define WPA_AUTH_PSK 0x0004
+
+#define WPA2_AUTH_UNSPECIFIED 0x0040
+#define WPA2_AUTH_PSK 0x0080
+#define BRCM_AUTH_PSK 0x0100
+#define BRCM_AUTH_DPT 0x0200
+#define WPA_AUTH_WAPI 0x0400
+
+#define WPA_AUTH_PFN_ANY 0xffffffff
+
+
+#define MAXPMKID 16
+
+typedef struct _pmkid {
+ struct ether_addr BSSID;
+ uint8 PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+ uint32 npmkid;
+ pmkid_t pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+ struct ether_addr BSSID;
+ uint8 preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+ uint32 npmkid_cand;
+ pmkid_cand_t pmkid_cand[1];
+} pmkid_cand_list_t;
+
+
+
+
+typedef struct {
+ uint32 val;
+ struct ether_addr ea;
+} scb_val_t;
+
+
+
+typedef struct channel_info {
+ int hw_channel;
+ int target_channel;
+ int scan_channel;
+} channel_info_t;
+
+
+struct maclist {
+ uint count;
+ struct ether_addr ea[1];
+};
+
+
+typedef struct get_pktcnt {
+ uint rx_good_pkt;
+ uint rx_bad_pkt;
+ uint tx_good_pkt;
+ uint tx_bad_pkt;
+ uint rx_ocast_good_pkt;
+} get_pktcnt_t;
+
+
+typedef struct wl_ioctl {
+ uint cmd;
+ void *buf;
+ uint len;
+ uint8 set;
+ uint used;
+ uint needed;
+} wl_ioctl_t;
+
+
+
+#define WLC_IOCTL_MAGIC 0x14e46c77
+
+
+#define WLC_IOCTL_VERSION 1
+
+#define WLC_IOCTL_MAXLEN 8192
+#define WLC_IOCTL_SMLEN 256
+#define WLC_IOCTL_MEDLEN 1536
+
+
+
+#define WLC_GET_MAGIC 0
+#define WLC_GET_VERSION 1
+#define WLC_UP 2
+#define WLC_DOWN 3
+#define WLC_GET_LOOP 4
+#define WLC_SET_LOOP 5
+#define WLC_DUMP 6
+#define WLC_GET_MSGLEVEL 7
+#define WLC_SET_MSGLEVEL 8
+#define WLC_GET_PROMISC 9
+#define WLC_SET_PROMISC 10
+
+#define WLC_GET_RATE 12
+
+#define WLC_GET_INSTANCE 14
+
+
+
+
+#define WLC_GET_INFRA 19
+#define WLC_SET_INFRA 20
+#define WLC_GET_AUTH 21
+#define WLC_SET_AUTH 22
+#define WLC_GET_BSSID 23
+#define WLC_SET_BSSID 24
+#define WLC_GET_SSID 25
+#define WLC_SET_SSID 26
+#define WLC_RESTART 27
+
+#define WLC_GET_CHANNEL 29
+#define WLC_SET_CHANNEL 30
+#define WLC_GET_SRL 31
+#define WLC_SET_SRL 32
+#define WLC_GET_LRL 33
+#define WLC_SET_LRL 34
+#define WLC_GET_PLCPHDR 35
+#define WLC_SET_PLCPHDR 36
+#define WLC_GET_RADIO 37
+#define WLC_SET_RADIO 38
+#define WLC_GET_PHYTYPE 39
+#define WLC_DUMP_RATE 40
+#define WLC_SET_RATE_PARAMS 41
+
+
+#define WLC_GET_KEY 44
+#define WLC_SET_KEY 45
+#define WLC_GET_REGULATORY 46
+#define WLC_SET_REGULATORY 47
+#define WLC_GET_PASSIVE_SCAN 48
+#define WLC_SET_PASSIVE_SCAN 49
+#define WLC_SCAN 50
+#define WLC_SCAN_RESULTS 51
+#define WLC_DISASSOC 52
+#define WLC_REASSOC 53
+#define WLC_GET_ROAM_TRIGGER 54
+#define WLC_SET_ROAM_TRIGGER 55
+#define WLC_GET_ROAM_DELTA 56
+#define WLC_SET_ROAM_DELTA 57
+#define WLC_GET_ROAM_SCAN_PERIOD 58
+#define WLC_SET_ROAM_SCAN_PERIOD 59
+#define WLC_EVM 60
+#define WLC_GET_TXANT 61
+#define WLC_SET_TXANT 62
+#define WLC_GET_ANTDIV 63
+#define WLC_SET_ANTDIV 64
+
+
+#define WLC_GET_CLOSED 67
+#define WLC_SET_CLOSED 68
+#define WLC_GET_MACLIST 69
+#define WLC_SET_MACLIST 70
+#define WLC_GET_RATESET 71
+#define WLC_SET_RATESET 72
+
+#define WLC_LONGTRAIN 74
+#define WLC_GET_BCNPRD 75
+#define WLC_SET_BCNPRD 76
+#define WLC_GET_DTIMPRD 77
+#define WLC_SET_DTIMPRD 78
+#define WLC_GET_SROM 79
+#define WLC_SET_SROM 80
+#define WLC_GET_WEP_RESTRICT 81
+#define WLC_SET_WEP_RESTRICT 82
+#define WLC_GET_COUNTRY 83
+#define WLC_SET_COUNTRY 84
+#define WLC_GET_PM 85
+#define WLC_SET_PM 86
+#define WLC_GET_WAKE 87
+#define WLC_SET_WAKE 88
+
+#define WLC_GET_FORCELINK 90
+#define WLC_SET_FORCELINK 91
+#define WLC_FREQ_ACCURACY 92
+#define WLC_CARRIER_SUPPRESS 93
+#define WLC_GET_PHYREG 94
+#define WLC_SET_PHYREG 95
+#define WLC_GET_RADIOREG 96
+#define WLC_SET_RADIOREG 97
+#define WLC_GET_REVINFO 98
+#define WLC_GET_UCANTDIV 99
+#define WLC_SET_UCANTDIV 100
+#define WLC_R_REG 101
+#define WLC_W_REG 102
+
+
+#define WLC_GET_MACMODE 105
+#define WLC_SET_MACMODE 106
+#define WLC_GET_MONITOR 107
+#define WLC_SET_MONITOR 108
+#define WLC_GET_GMODE 109
+#define WLC_SET_GMODE 110
+#define WLC_GET_LEGACY_ERP 111
+#define WLC_SET_LEGACY_ERP 112
+#define WLC_GET_RX_ANT 113
+#define WLC_GET_CURR_RATESET 114
+#define WLC_GET_SCANSUPPRESS 115
+#define WLC_SET_SCANSUPPRESS 116
+#define WLC_GET_AP 117
+#define WLC_SET_AP 118
+#define WLC_GET_EAP_RESTRICT 119
+#define WLC_SET_EAP_RESTRICT 120
+#define WLC_SCB_AUTHORIZE 121
+#define WLC_SCB_DEAUTHORIZE 122
+#define WLC_GET_WDSLIST 123
+#define WLC_SET_WDSLIST 124
+#define WLC_GET_ATIM 125
+#define WLC_SET_ATIM 126
+#define WLC_GET_RSSI 127
+#define WLC_GET_PHYANTDIV 128
+#define WLC_SET_PHYANTDIV 129
+#define WLC_AP_RX_ONLY 130
+#define WLC_GET_TX_PATH_PWR 131
+#define WLC_SET_TX_PATH_PWR 132
+#define WLC_GET_WSEC 133
+#define WLC_SET_WSEC 134
+#define WLC_GET_PHY_NOISE 135
+#define WLC_GET_BSS_INFO 136
+#define WLC_GET_PKTCNTS 137
+#define WLC_GET_LAZYWDS 138
+#define WLC_SET_LAZYWDS 139
+#define WLC_GET_BANDLIST 140
+#define WLC_GET_BAND 141
+#define WLC_SET_BAND 142
+#define WLC_SCB_DEAUTHENTICATE 143
+#define WLC_GET_SHORTSLOT 144
+#define WLC_GET_SHORTSLOT_OVERRIDE 145
+#define WLC_SET_SHORTSLOT_OVERRIDE 146
+#define WLC_GET_SHORTSLOT_RESTRICT 147
+#define WLC_SET_SHORTSLOT_RESTRICT 148
+#define WLC_GET_GMODE_PROTECTION 149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151
+#define WLC_UPGRADE 152
+
+
+#define WLC_GET_IGNORE_BCNS 155
+#define WLC_SET_IGNORE_BCNS 156
+#define WLC_GET_SCB_TIMEOUT 157
+#define WLC_SET_SCB_TIMEOUT 158
+#define WLC_GET_ASSOCLIST 159
+#define WLC_GET_CLK 160
+#define WLC_SET_CLK 161
+#define WLC_GET_UP 162
+#define WLC_OUT 163
+#define WLC_GET_WPA_AUTH 164
+#define WLC_SET_WPA_AUTH 165
+#define WLC_GET_UCFLAGS 166
+#define WLC_SET_UCFLAGS 167
+#define WLC_GET_PWRIDX 168
+#define WLC_SET_PWRIDX 169
+#define WLC_GET_TSSI 170
+#define WLC_GET_SUP_RATESET_OVERRIDE 171
+#define WLC_SET_SUP_RATESET_OVERRIDE 172
+
+
+
+
+
+#define WLC_GET_PROTECTION_CONTROL 178
+#define WLC_SET_PROTECTION_CONTROL 179
+#define WLC_GET_PHYLIST 180
+#define WLC_ENCRYPT_STRENGTH 181
+#define WLC_DECRYPT_STATUS 182
+#define WLC_GET_KEY_SEQ 183
+#define WLC_GET_SCAN_CHANNEL_TIME 184
+#define WLC_SET_SCAN_CHANNEL_TIME 185
+#define WLC_GET_SCAN_UNASSOC_TIME 186
+#define WLC_SET_SCAN_UNASSOC_TIME 187
+#define WLC_GET_SCAN_HOME_TIME 188
+#define WLC_SET_SCAN_HOME_TIME 189
+#define WLC_GET_SCAN_NPROBES 190
+#define WLC_SET_SCAN_NPROBES 191
+#define WLC_GET_PRB_RESP_TIMEOUT 192
+#define WLC_SET_PRB_RESP_TIMEOUT 193
+#define WLC_GET_ATTEN 194
+#define WLC_SET_ATTEN 195
+#define WLC_GET_SHMEM 196
+#define WLC_SET_SHMEM 197
+
+
+#define WLC_SET_WSEC_TEST 200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define WLC_TKIP_COUNTERMEASURES 202
+#define WLC_GET_PIOMODE 203
+#define WLC_SET_PIOMODE 204
+#define WLC_SET_ASSOC_PREFER 205
+#define WLC_GET_ASSOC_PREFER 206
+#define WLC_SET_ROAM_PREFER 207
+#define WLC_GET_ROAM_PREFER 208
+#define WLC_SET_LED 209
+#define WLC_GET_LED 210
+#define WLC_GET_INTERFERENCE_MODE 211
+#define WLC_SET_INTERFERENCE_MODE 212
+#define WLC_GET_CHANNEL_QA 213
+#define WLC_START_CHANNEL_QA 214
+#define WLC_GET_CHANNEL_SEL 215
+#define WLC_START_CHANNEL_SEL 216
+#define WLC_GET_VALID_CHANNELS 217
+#define WLC_GET_FAKEFRAG 218
+#define WLC_SET_FAKEFRAG 219
+#define WLC_GET_PWROUT_PERCENTAGE 220
+#define WLC_SET_PWROUT_PERCENTAGE 221
+#define WLC_SET_BAD_FRAME_PREEMPT 222
+#define WLC_GET_BAD_FRAME_PREEMPT 223
+#define WLC_SET_LEAP_LIST 224
+#define WLC_GET_LEAP_LIST 225
+#define WLC_GET_CWMIN 226
+#define WLC_SET_CWMIN 227
+#define WLC_GET_CWMAX 228
+#define WLC_SET_CWMAX 229
+#define WLC_GET_WET 230
+#define WLC_SET_WET 231
+#define WLC_GET_PUB 232
+
+
+#define WLC_GET_KEY_PRIMARY 235
+#define WLC_SET_KEY_PRIMARY 236
+
+#define WLC_GET_ACI_ARGS 238
+#define WLC_SET_ACI_ARGS 239
+#define WLC_UNSET_CALLBACK 240
+#define WLC_SET_CALLBACK 241
+#define WLC_GET_RADAR 242
+#define WLC_SET_RADAR 243
+#define WLC_SET_SPECT_MANAGMENT 244
+#define WLC_GET_SPECT_MANAGMENT 245
+#define WLC_WDS_GET_REMOTE_HWADDR 246
+#define WLC_WDS_GET_WPA_SUP 247
+#define WLC_SET_CS_SCAN_TIMER 248
+#define WLC_GET_CS_SCAN_TIMER 249
+#define WLC_MEASURE_REQUEST 250
+#define WLC_INIT 251
+#define WLC_SEND_QUIET 252
+#define WLC_KEEPALIVE 253
+#define WLC_SEND_PWR_CONSTRAINT 254
+#define WLC_UPGRADE_STATUS 255
+#define WLC_CURRENT_PWR 256
+#define WLC_GET_SCAN_PASSIVE_TIME 257
+#define WLC_SET_SCAN_PASSIVE_TIME 258
+#define WLC_LEGACY_LINK_BEHAVIOR 259
+#define WLC_GET_CHANNELS_IN_COUNTRY 260
+#define WLC_GET_COUNTRY_LIST 261
+#define WLC_GET_VAR 262
+#define WLC_SET_VAR 263
+#define WLC_NVRAM_GET 264
+#define WLC_NVRAM_SET 265
+#define WLC_NVRAM_DUMP 266
+#define WLC_REBOOT 267
+#define WLC_SET_WSEC_PMK 268
+#define WLC_GET_AUTH_MODE 269
+#define WLC_SET_AUTH_MODE 270
+#define WLC_GET_WAKEENTRY 271
+#define WLC_SET_WAKEENTRY 272
+#define WLC_NDCONFIG_ITEM 273
+#define WLC_NVOTPW 274
+#define WLC_OTPW 275
+#define WLC_IOV_BLOCK_GET 276
+#define WLC_IOV_MODULES_GET 277
+#define WLC_SOFT_RESET 278
+#define WLC_GET_ALLOW_MODE 279
+#define WLC_SET_ALLOW_MODE 280
+#define WLC_GET_DESIRED_BSSID 281
+#define WLC_SET_DESIRED_BSSID 282
+#define WLC_DISASSOC_MYAP 283
+#define WLC_GET_NBANDS 284
+#define WLC_GET_BANDSTATES 285
+#define WLC_GET_WLC_BSS_INFO 286
+#define WLC_GET_ASSOC_INFO 287
+#define WLC_GET_OID_PHY 288
+#define WLC_SET_OID_PHY 289
+#define WLC_SET_ASSOC_TIME 290
+#define WLC_GET_DESIRED_SSID 291
+#define WLC_GET_CHANSPEC 292
+#define WLC_GET_ASSOC_STATE 293
+#define WLC_SET_PHY_STATE 294
+#define WLC_GET_SCAN_PENDING 295
+#define WLC_GET_SCANREQ_PENDING 296
+#define WLC_GET_PREV_ROAM_REASON 297
+#define WLC_SET_PREV_ROAM_REASON 298
+#define WLC_GET_BANDSTATES_PI 299
+#define WLC_GET_PHY_STATE 300
+#define WLC_GET_BSS_WPA_RSN 301
+#define WLC_GET_BSS_WPA2_RSN 302
+#define WLC_GET_BSS_BCN_TS 303
+#define WLC_GET_INT_DISASSOC 304
+#define WLC_SET_NUM_PEERS 305
+#define WLC_GET_NUM_BSS 306
+#define WLC_LAST 307
+
+
+
+#define WL_RADIO_SW_DISABLE (1<<0)
+#define WL_RADIO_HW_DISABLE (1<<1)
+#define WL_RADIO_MPC_DISABLE (1<<2)
+#define WL_RADIO_COUNTRY_DISABLE (1<<3)
+
+
+#define WL_TXPWR_OVERRIDE (1U<<31)
+
+#define WL_PHY_PAVARS_LEN 6
+
+
+#define WL_DIAG_INTERRUPT 1
+#define WL_DIAG_LOOPBACK 2
+#define WL_DIAG_MEMORY 3
+#define WL_DIAG_LED 4
+#define WL_DIAG_REG 5
+#define WL_DIAG_SROM 6
+#define WL_DIAG_DMA 7
+
+#define WL_DIAGERR_SUCCESS 0
+#define WL_DIAGERR_FAIL_TO_RUN 1
+#define WL_DIAGERR_NOT_SUPPORTED 2
+#define WL_DIAGERR_INTERRUPT_FAIL 3
+#define WL_DIAGERR_LOOPBACK_FAIL 4
+#define WL_DIAGERR_SROM_FAIL 5
+#define WL_DIAGERR_SROM_BADCRC 6
+#define WL_DIAGERR_REG_FAIL 7
+#define WL_DIAGERR_MEMORY_FAIL 8
+#define WL_DIAGERR_NOMEM 9
+#define WL_DIAGERR_DMA_FAIL 10
+
+#define WL_DIAGERR_MEMORY_TIMEOUT 11
+#define WL_DIAGERR_MEMORY_BADPATTERN 12
+
+
+#define WLC_BAND_AUTO 0
+#define WLC_BAND_5G 1
+#define WLC_BAND_2G 2
+#define WLC_BAND_ALL 3
+
+
+#define WL_CHAN_FREQ_RANGE_2G 0
+#define WL_CHAN_FREQ_RANGE_5GL 1
+#define WL_CHAN_FREQ_RANGE_5GM 2
+#define WL_CHAN_FREQ_RANGE_5GH 3
+
+
+#define WLC_PHY_TYPE_A 0
+#define WLC_PHY_TYPE_B 1
+#define WLC_PHY_TYPE_G 2
+#define WLC_PHY_TYPE_N 4
+#define WLC_PHY_TYPE_LP 5
+#define WLC_PHY_TYPE_SSN 6
+#define WLC_PHY_TYPE_NULL 0xf
+
+
+#define WLC_MACMODE_DISABLED 0
+#define WLC_MACMODE_DENY 1
+#define WLC_MACMODE_ALLOW 2
+
+
+#define GMODE_LEGACY_B 0
+#define GMODE_AUTO 1
+#define GMODE_ONLY 2
+#define GMODE_B_DEFERRED 3
+#define GMODE_PERFORMANCE 4
+#define GMODE_LRS 5
+#define GMODE_MAX 6
+
+
+#define WLC_PLCP_AUTO -1
+#define WLC_PLCP_SHORT 0
+#define WLC_PLCP_LONG 1
+
+
+#define WLC_PROTECTION_AUTO -1
+#define WLC_PROTECTION_OFF 0
+#define WLC_PROTECTION_ON 1
+#define WLC_PROTECTION_MMHDR_ONLY 2
+#define WLC_PROTECTION_CTS_ONLY 3
+
+
+#define WLC_PROTECTION_CTL_OFF 0
+#define WLC_PROTECTION_CTL_LOCAL 1
+#define WLC_PROTECTION_CTL_OVERLAP 2
+
+
+#define WLC_N_PROTECTION_OFF 0
+#define WLC_N_PROTECTION_OPTIONAL 1
+#define WLC_N_PROTECTION_20IN40 2
+#define WLC_N_PROTECTION_MIXEDMODE 3
+
+
+#define WLC_N_PREAMBLE_MIXEDMODE 0
+#define WLC_N_PREAMBLE_GF 1
+
+
+#define WLC_N_BW_20ALL 0
+#define WLC_N_BW_40ALL 1
+#define WLC_N_BW_20IN2G_40IN5G 2
+
+
+#define WLC_N_TXRX_CHAIN0 0
+#define WLC_N_TXRX_CHAIN1 1
+
+
+#define WLC_N_SGI_20 0x01
+#define WLC_N_SGI_40 0x02
+
+
+#define PM_OFF 0
+#define PM_MAX 1
+#define PM_FAST 2
+
+#define LISTEN_INTERVAL 10
+
+#define INTERFERE_NONE 0
+#define NON_WLAN 1
+#define WLAN_MANUAL 2
+#define WLAN_AUTO 3
+#define AUTO_ACTIVE (1 << 7)
+
+typedef struct wl_aci_args {
+ int enter_aci_thresh;
+ int exit_aci_thresh;
+ int usec_spin;
+ int glitch_delay;
+ uint16 nphy_adcpwr_enter_thresh;
+ uint16 nphy_adcpwr_exit_thresh;
+ uint16 nphy_repeat_ctr;
+ uint16 nphy_num_samples;
+ uint16 nphy_undetect_window_sz;
+ uint16 nphy_b_energy_lo_aci;
+ uint16 nphy_b_energy_md_aci;
+ uint16 nphy_b_energy_hi_aci;
+} wl_aci_args_t;
+
+#define WL_ACI_ARGS_LEGACY_LENGTH 16
+
+
+
+#define WL_ERROR_VAL 0x00000001
+#define WL_TRACE_VAL 0x00000002
+#define WL_PRHDRS_VAL 0x00000004
+#define WL_PRPKT_VAL 0x00000008
+#define WL_INFORM_VAL 0x00000010
+#define WL_TMP_VAL 0x00000020
+#define WL_OID_VAL 0x00000040
+#define WL_RATE_VAL 0x00000080
+#define WL_ASSOC_VAL 0x00000100
+#define WL_PRUSR_VAL 0x00000200
+#define WL_PS_VAL 0x00000400
+#define WL_TXPWR_VAL 0x00000800
+#define WL_PORT_VAL 0x00001000
+#define WL_DUAL_VAL 0x00002000
+#define WL_WSEC_VAL 0x00004000
+#define WL_WSEC_DUMP_VAL 0x00008000
+#define WL_LOG_VAL 0x00010000
+#define WL_NRSSI_VAL 0x00020000
+#define WL_LOFT_VAL 0x00040000
+#define WL_REGULATORY_VAL 0x00080000
+#define WL_PHYCAL_VAL 0x00100000
+#define WL_RADAR_VAL 0x00200000
+#define WL_MPC_VAL 0x00400000
+#define WL_APSTA_VAL 0x00800000
+#define WL_DFS_VAL 0x01000000
+#define WL_BA_VAL 0x02000000
+#define WL_MBSS_VAL 0x04000000
+#define WL_CAC_VAL 0x08000000
+#define WL_AMSDU_VAL 0x10000000
+#define WL_AMPDU_VAL 0x20000000
+#define WL_FFPLD_VAL 0x40000000
+
+
+#define WL_DPT_VAL 0x00000001
+#define WL_SCAN_VAL 0x00000002
+#define WL_WOWL_VAL 0x00000004
+#define WL_COEX_VAL 0x00000008
+#define WL_RTDC_VAL 0x00000010
+#define WL_BTA_VAL 0x00000040
+
+
+#define WL_LED_NUMGPIO 16
+
+
+#define WL_LED_OFF 0
+#define WL_LED_ON 1
+#define WL_LED_ACTIVITY 2
+#define WL_LED_RADIO 3
+#define WL_LED_ARADIO 4
+#define WL_LED_BRADIO 5
+#define WL_LED_BGMODE 6
+#define WL_LED_WI1 7
+#define WL_LED_WI2 8
+#define WL_LED_WI3 9
+#define WL_LED_ASSOC 10
+#define WL_LED_INACTIVE 11
+#define WL_LED_ASSOCACT 12
+#define WL_LED_NUMBEHAVIOR 13
+
+
+#define WL_LED_BEH_MASK 0x7f
+#define WL_LED_AL_MASK 0x80
+
+
+#define WL_NUMCHANNELS 64
+#define WL_NUMCHANSPECS 100
+
+
+#define WL_WDS_WPA_ROLE_AUTH 0
+#define WL_WDS_WPA_ROLE_SUP 1
+#define WL_WDS_WPA_ROLE_AUTO 255
+
+
+#define WL_EVENTING_MASK_LEN 16
+
+
+#define VNDR_IE_CMD_LEN 4
+
+
+#define VNDR_IE_BEACON_FLAG 0x1
+#define VNDR_IE_PRBRSP_FLAG 0x2
+#define VNDR_IE_ASSOCRSP_FLAG 0x4
+#define VNDR_IE_AUTHRSP_FLAG 0x8
+#define VNDR_IE_PRBREQ_FLAG 0x10
+#define VNDR_IE_ASSOCREQ_FLAG 0x20
+#define VNDR_IE_CUSTOM_FLAG 0x100
+
+#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32))
+
+typedef struct {
+ uint32 pktflag;
+ vndr_ie_t vndr_ie_data;
+} vndr_ie_info_t;
+
+typedef struct {
+ int iecount;
+ vndr_ie_info_t vndr_ie_list[1];
+} vndr_ie_buf_t;
+
+typedef struct {
+ char cmd[VNDR_IE_CMD_LEN];
+ vndr_ie_buf_t vndr_ie_buffer;
+} vndr_ie_setbuf_t;
+
+
+
+
+#define WL_JOIN_PREF_RSSI 1
+#define WL_JOIN_PREF_WPA 2
+#define WL_JOIN_PREF_BAND 3
+
+
+#define WLJP_BAND_ASSOC_PREF 255
+
+
+#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00"
+
+struct tsinfo_arg {
+ uint8 octets[3];
+};
+
+
+#define NFIFO 6
+
+#define WL_CNT_T_VERSION 5
+#define WL_CNT_EXT_T_VERSION 1
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+
+
+ uint32 txframe;
+ uint32 txbyte;
+ uint32 txretrans;
+ uint32 txerror;
+ uint32 txctl;
+ uint32 txprshort;
+ uint32 txserr;
+ uint32 txnobuf;
+ uint32 txnoassoc;
+ uint32 txrunt;
+ uint32 txchit;
+ uint32 txcmiss;
+
+
+ uint32 txuflo;
+ uint32 txphyerr;
+ uint32 txphycrs;
+
+
+ uint32 rxframe;
+ uint32 rxbyte;
+ uint32 rxerror;
+ uint32 rxctl;
+ uint32 rxnobuf;
+ uint32 rxnondata;
+ uint32 rxbadds;
+ uint32 rxbadcm;
+ uint32 rxfragerr;
+ uint32 rxrunt;
+ uint32 rxgiant;
+ uint32 rxnoscb;
+ uint32 rxbadproto;
+ uint32 rxbadsrcmac;
+ uint32 rxbadda;
+ uint32 rxfilter;
+
+
+ uint32 rxoflo;
+ uint32 rxuflo[NFIFO];
+
+ uint32 d11cnt_txrts_off;
+ uint32 d11cnt_rxcrc_off;
+ uint32 d11cnt_txnocts_off;
+
+
+ uint32 dmade;
+ uint32 dmada;
+ uint32 dmape;
+ uint32 reset;
+ uint32 tbtt;
+ uint32 txdmawar;
+ uint32 pkt_callback_reg_fail;
+
+
+ uint32 txallfrm;
+ uint32 txrtsfrm;
+ uint32 txctsfrm;
+ uint32 txackfrm;
+ uint32 txdnlfrm;
+ uint32 txbcnfrm;
+ uint32 txfunfl[8];
+ uint32 txtplunfl;
+ uint32 txphyerror;
+ uint32 rxfrmtoolong;
+ uint32 rxfrmtooshrt;
+ uint32 rxinvmachdr;
+ uint32 rxbadfcs;
+ uint32 rxbadplcp;
+ uint32 rxcrsglitch;
+ uint32 rxstrt;
+ uint32 rxdfrmucastmbss;
+ uint32 rxmfrmucastmbss;
+ uint32 rxcfrmucast;
+ uint32 rxrtsucast;
+ uint32 rxctsucast;
+ uint32 rxackucast;
+ uint32 rxdfrmocast;
+ uint32 rxmfrmocast;
+ uint32 rxcfrmocast;
+ uint32 rxrtsocast;
+ uint32 rxctsocast;
+ uint32 rxdfrmmcast;
+ uint32 rxmfrmmcast;
+ uint32 rxcfrmmcast;
+ uint32 rxbeaconmbss;
+ uint32 rxdfrmucastobss;
+ uint32 rxbeaconobss;
+ uint32 rxrsptmout;
+ uint32 bcntxcancl;
+ uint32 rxf0ovfl;
+ uint32 rxf1ovfl;
+ uint32 rxf2ovfl;
+ uint32 txsfovfl;
+ uint32 pmqovfl;
+ uint32 rxcgprqfrm;
+ uint32 rxcgprsqovfl;
+ uint32 txcgprsfail;
+ uint32 txcgprssuc;
+ uint32 prs_timeout;
+ uint32 rxnack;
+ uint32 frmscons;
+ uint32 txnack;
+ uint32 txglitch_nack;
+ uint32 txburst;
+
+
+ uint32 txfrag;
+ uint32 txmulti;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txretrie;
+ uint32 rxdup;
+ uint32 txrts;
+ uint32 txnocts;
+ uint32 txnoack;
+ uint32 rxfrag;
+ uint32 rxmulti;
+ uint32 rxcrc;
+ uint32 txfrmsnt;
+ uint32 rxundec;
+
+
+ uint32 tkipmicfaill;
+ uint32 tkipcntrmsr;
+ uint32 tkipreplay;
+ uint32 ccmpfmterr;
+ uint32 ccmpreplay;
+ uint32 ccmpundec;
+ uint32 fourwayfail;
+ uint32 wepundec;
+ uint32 wepicverr;
+ uint32 decsuccess;
+ uint32 tkipicverr;
+ uint32 wepexcluded;
+
+ uint32 txchanrej;
+ uint32 psmwds;
+ uint32 phywatchdog;
+
+
+ uint32 prq_entries_handled;
+ uint32 prq_undirected_entries;
+ uint32 prq_bad_entries;
+ uint32 atim_suppress_count;
+ uint32 bcn_template_not_ready;
+ uint32 bcn_template_not_ready_done;
+ uint32 late_tbtt_dpc;
+
+
+ uint32 rx1mbps;
+ uint32 rx2mbps;
+ uint32 rx5mbps5;
+ uint32 rx6mbps;
+ uint32 rx9mbps;
+ uint32 rx11mbps;
+ uint32 rx12mbps;
+ uint32 rx18mbps;
+ uint32 rx24mbps;
+ uint32 rx36mbps;
+ uint32 rx48mbps;
+ uint32 rx54mbps;
+ uint32 rx108mbps;
+ uint32 rx162mbps;
+ uint32 rx216mbps;
+ uint32 rx270mbps;
+ uint32 rx324mbps;
+ uint32 rx378mbps;
+ uint32 rx432mbps;
+ uint32 rx486mbps;
+ uint32 rx540mbps;
+
+ uint32 pktengrxducast;
+ uint32 pktengrxdmcast;
+} wl_cnt_t;
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+
+ uint32 rxampdu_sgi;
+ uint32 rxampdu_stbc;
+ uint32 rxmpdu_sgi;
+ uint32 rxmpdu_stbc;
+ uint32 rxmcs0_40M;
+ uint32 rxmcs1_40M;
+ uint32 rxmcs2_40M;
+ uint32 rxmcs3_40M;
+ uint32 rxmcs4_40M;
+ uint32 rxmcs5_40M;
+ uint32 rxmcs6_40M;
+ uint32 rxmcs7_40M;
+ uint32 rxmcs32_40M;
+
+ uint32 txfrmsnt_20Mlo;
+ uint32 txfrmsnt_20Mup;
+ uint32 txfrmsnt_40M;
+
+ uint32 rx_20ul;
+} wl_cnt_ext_t;
+
+#define WL_RXDIV_STATS_T_VERSION 1
+typedef struct {
+ uint16 version;
+ uint16 length;
+
+ uint32 rxant[4];
+} wl_rxdiv_stats_t;
+
+#define WL_DELTA_STATS_T_VERSION 1
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+
+
+ uint32 txframe;
+ uint32 txbyte;
+ uint32 txretrans;
+ uint32 txfail;
+
+
+ uint32 rxframe;
+ uint32 rxbyte;
+
+
+ uint32 rx1mbps;
+ uint32 rx2mbps;
+ uint32 rx5mbps5;
+ uint32 rx6mbps;
+ uint32 rx9mbps;
+ uint32 rx11mbps;
+ uint32 rx12mbps;
+ uint32 rx18mbps;
+ uint32 rx24mbps;
+ uint32 rx36mbps;
+ uint32 rx48mbps;
+ uint32 rx54mbps;
+ uint32 rx108mbps;
+ uint32 rx162mbps;
+ uint32 rx216mbps;
+ uint32 rx270mbps;
+ uint32 rx324mbps;
+ uint32 rx378mbps;
+ uint32 rx432mbps;
+ uint32 rx486mbps;
+ uint32 rx540mbps;
+} wl_delta_stats_t;
+
+#define WL_WME_CNT_VERSION 1
+
+typedef struct {
+ uint32 packets;
+ uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+
+ wl_traffic_stats_t tx[AC_COUNT];
+ wl_traffic_stats_t tx_failed[AC_COUNT];
+ wl_traffic_stats_t rx[AC_COUNT];
+ wl_traffic_stats_t rx_failed[AC_COUNT];
+
+ wl_traffic_stats_t forward[AC_COUNT];
+
+ wl_traffic_stats_t tx_expired[AC_COUNT];
+
+} wl_wme_cnt_t;
+
+
+
+#define WLC_ROAM_TRIGGER_DEFAULT 0
+#define WLC_ROAM_TRIGGER_BANDWIDTH 1
+#define WLC_ROAM_TRIGGER_DISTANCE 2
+#define WLC_ROAM_TRIGGER_MAX_VALUE 2
+
+
+enum {
+ PFN_LIST_ORDER,
+ PFN_RSSI
+};
+
+enum {
+ DISABLE,
+ ENABLE
+};
+
+#define SORT_CRITERIA_BIT 0
+#define AUTO_NET_SWITCH_BIT 1
+#define ENABLE_BKGRD_SCAN_BIT 2
+#define IMMEDIATE_SCAN_BIT 3
+#define AUTO_CONNECT_BIT 4
+#define ENABLE_BD_SCAN_BIT 5
+#define ENABLE_ADAPTSCAN_BIT 6
+
+#define SORT_CRITERIA_MASK 0x01
+#define AUTO_NET_SWITCH_MASK 0x02
+#define ENABLE_BKGRD_SCAN_MASK 0x04
+#define IMMEDIATE_SCAN_MASK 0x08
+#define AUTO_CONNECT_MASK 0x10
+#define ENABLE_BD_SCAN_MASK 0x20
+#define ENABLE_ADAPTSCAN_MASK 0x40
+
+#define PFN_VERSION 1
+
+#define MAX_PFN_LIST_COUNT 16
+
+
+typedef struct wl_pfn_param {
+ int32 version;
+ int32 scan_freq;
+ int32 lost_network_timeout;
+ int16 flags;
+ int16 rssi_margin;
+ int32 repeat_scan;
+ int32 max_freq_adjust;
+} wl_pfn_param_t;
+
+typedef struct wl_pfn {
+ wlc_ssid_t ssid;
+ int32 bss_type;
+ int32 infra;
+ int32 auth;
+ uint32 wpa_auth;
+ int32 wsec;
+} wl_pfn_t;
+
+#define PNO_SCAN_MAX_FW 508*1000
+#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000
+#define PNO_SCAN_MIN_FW_SEC 10
+
+
+#define TOE_TX_CSUM_OL 0x00000001
+#define TOE_RX_CSUM_OL 0x00000002
+
+
+#define TOE_ERRTEST_TX_CSUM 0x00000001
+#define TOE_ERRTEST_RX_CSUM 0x00000002
+#define TOE_ERRTEST_RX_CSUM2 0x00000004
+
+struct toe_ol_stats_t {
+
+ uint32 tx_summed;
+
+
+ uint32 tx_iph_fill;
+ uint32 tx_tcp_fill;
+ uint32 tx_udp_fill;
+ uint32 tx_icmp_fill;
+
+
+ uint32 rx_iph_good;
+ uint32 rx_iph_bad;
+ uint32 rx_tcp_good;
+ uint32 rx_tcp_bad;
+ uint32 rx_udp_good;
+ uint32 rx_udp_bad;
+ uint32 rx_icmp_good;
+ uint32 rx_icmp_bad;
+
+
+ uint32 tx_tcp_errinj;
+ uint32 tx_udp_errinj;
+ uint32 tx_icmp_errinj;
+
+
+ uint32 rx_tcp_errinj;
+ uint32 rx_udp_errinj;
+ uint32 rx_icmp_errinj;
+};
+
+
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+
+
+#define ARP_ERRTEST_REPLY_PEER 0x1
+#define ARP_ERRTEST_REPLY_HOST 0x2
+
+#define ARP_MULTIHOMING_MAX 8
+
+
+struct arp_ol_stats_t {
+ uint32 host_ip_entries;
+ uint32 host_ip_overflow;
+
+ uint32 arp_table_entries;
+ uint32 arp_table_overflow;
+
+ uint32 host_request;
+ uint32 host_reply;
+ uint32 host_service;
+
+ uint32 peer_request;
+ uint32 peer_request_drop;
+ uint32 peer_reply;
+ uint32 peer_reply_drop;
+ uint32 peer_service;
+};
+
+
+
+
+
+typedef struct wl_keep_alive_pkt {
+ uint32 period_msec;
+ uint16 len_bytes;
+ uint8 data[1];
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data)
+
+
+
+
+
+typedef enum wl_pkt_filter_type {
+ WL_PKT_FILTER_TYPE_PATTERN_MATCH
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+
+typedef struct wl_pkt_filter_pattern {
+ uint32 offset;
+ uint32 size_bytes;
+ uint8 mask_and_pattern[1];
+} wl_pkt_filter_pattern_t;
+
+
+typedef struct wl_pkt_filter {
+ uint32 id;
+ uint32 type;
+ uint32 negate_match;
+ union {
+ wl_pkt_filter_pattern_t pattern;
+ } u;
+} wl_pkt_filter_t;
+
+#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+
+
+typedef struct wl_pkt_filter_enable {
+ uint32 id;
+ uint32 enable;
+} wl_pkt_filter_enable_t;
+
+
+typedef struct wl_pkt_filter_list {
+ uint32 num;
+ wl_pkt_filter_t filter[1];
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter)
+
+
+typedef struct wl_pkt_filter_stats {
+ uint32 num_pkts_matched;
+ uint32 num_pkts_forwarded;
+ uint32 num_pkts_discarded;
+} wl_pkt_filter_stats_t;
+
+
+typedef struct wl_seq_cmd_ioctl {
+ uint32 cmd;
+ uint32 len;
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES 4
+
+
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+ (((cmd) == WLC_GET_MAGIC) || \
+ ((cmd) == WLC_GET_VERSION) || \
+ ((cmd) == WLC_GET_AP) || \
+ ((cmd) == WLC_GET_INSTANCE))
+
+
+
+#define WL_PKTENG_PER_TX_START 0x01
+#define WL_PKTENG_PER_TX_STOP 0x02
+#define WL_PKTENG_PER_RX_START 0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06
+#define WL_PKTENG_PER_RX_STOP 0x08
+#define WL_PKTENG_PER_MASK 0xff
+
+#define WL_PKTENG_SYNCHRONOUS 0x100
+
+typedef struct wl_pkteng {
+ uint32 flags;
+ uint32 delay;
+ uint32 nframes;
+ uint32 length;
+ uint8 seqno;
+ struct ether_addr dest;
+ struct ether_addr src;
+} wl_pkteng_t;
+
+#define NUM_80211b_RATES 4
+#define NUM_80211ag_RATES 8
+#define NUM_80211n_RATES 32
+#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+typedef struct wl_pkteng_stats {
+ uint32 lostfrmcnt;
+ int32 rssi;
+ int32 snr;
+ uint16 rxpktcnt[NUM_80211_RATES+1];
+} wl_pkteng_stats_t;
+
+#define WL_WOWL_MAGIC (1 << 0)
+#define WL_WOWL_NET (1 << 1)
+#define WL_WOWL_DIS (1 << 2)
+#define WL_WOWL_RETR (1 << 3)
+#define WL_WOWL_BCN (1 << 4)
+#define WL_WOWL_TST (1 << 5)
+#define WL_WOWL_BCAST (1 << 15)
+
+#define MAGIC_PKT_MINLEN 102
+
+typedef struct {
+ uint masksize;
+ uint offset;
+ uint patternoffset;
+ uint patternsize;
+
+
+} wl_wowl_pattern_t;
+
+typedef struct {
+ uint count;
+ wl_wowl_pattern_t pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct {
+ uint8 pci_wakeind;
+ uint16 ucode_wakeind;
+} wl_wowl_wakeind_t;
+
+
+typedef struct wl_txrate_class {
+ uint8 init_rate;
+ uint8 min_rate;
+ uint8 max_rate;
+} wl_txrate_class_t;
+
+
+
+
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 100
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 20
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100
+
+
+typedef struct wl_obss_scan_arg {
+ int16 passive_dwell;
+ int16 active_dwell;
+ int16 bss_widthscan_interval;
+ int16 passive_total;
+ int16 active_total;
+ int16 chanwidth_transition_delay;
+ int16 activity_threshold;
+} wl_obss_scan_arg_t;
+#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t)
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7
+
+#define WL_COEX_INFO_MASK 0x07
+#define WL_COEX_INFO_REQ 0x01
+#define WL_COEX_40MHZ_INTOLERANT 0x02
+#define WL_COEX_WIDTH20 0x04
+
+typedef struct wl_action_obss_coex_req {
+ uint8 info;
+ uint8 num;
+ uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+#define MAX_RSSI_LEVELS 8
+
+
+typedef struct wl_rssi_event {
+
+ uint32 rate_limit_msec;
+
+ uint8 num_rssi_levels;
+
+ int8 rssi_levels[MAX_RSSI_LEVELS];
+} wl_rssi_event_t;
+
+
+
+#define WLFEATURE_DISABLE_11N 0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
+#define WLFEATURE_DISABLE_11N_GF 0x00000080
+
+
+
+#include <packed_section_end.h>
+
+
+#include <packed_section_start.h>
+
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+ struct ether_addr staAddr;
+ uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+ sta_prbreq_wps_ie_hdr_t hdr;
+ uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+ uint32 totLen;
+ uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/linux_osl.c b/drivers/net/wireless/bcm4329/linux_osl.c
new file mode 100644
index 000000000000..cf72a077bd90
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/linux_osl.c
@@ -0,0 +1,625 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.c,v 1.125.12.3.8.7 2010/05/04 21:10:04 Exp $
+ */
+
+
+#define LINUX_OSL
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+#include <linux/mutex.h>
+
+#define PCI_CFG_RETRY 10
+
+#define OS_HANDLE_MAGIC 0x1234abcd
+#define BCM_MEM_FILENAME_LEN 24
+
+#ifdef DHD_USE_STATIC_BUF
+#define MAX_STATIC_BUF_NUM 16
+#define STATIC_BUF_SIZE (PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN (MAX_STATIC_BUF_NUM*STATIC_BUF_SIZE)
+typedef struct bcm_static_buf {
+ struct mutex static_sem;
+ unsigned char *buf_ptr;
+ unsigned char buf_use[MAX_STATIC_BUF_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#define MAX_STATIC_PKT_NUM 8
+typedef struct bcm_static_pkt {
+ struct sk_buff *skb_4k[MAX_STATIC_PKT_NUM];
+ struct sk_buff *skb_8k[MAX_STATIC_PKT_NUM];
+ struct mutex osl_pkt_sem;
+ unsigned char pkt_use[MAX_STATIC_PKT_NUM*2];
+} bcm_static_pkt_t;
+static bcm_static_pkt_t *bcm_static_skb = 0;
+
+#endif
+typedef struct bcm_mem_link {
+ struct bcm_mem_link *prev;
+ struct bcm_mem_link *next;
+ uint size;
+ int line;
+ char file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_info {
+ osl_pubinfo_t pub;
+ uint magic;
+ void *pdev;
+ uint malloced;
+ uint failed;
+ uint bustype;
+ bcm_mem_link_t *dbgmem_list;
+};
+
+static int16 linuxbcmerrormap[] =
+{ 0,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -E2BIG,
+ -E2BIG,
+ -EBUSY,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EFAULT,
+ -ENOMEM,
+ -EOPNOTSUPP,
+ -EMSGSIZE,
+ -EINVAL,
+ -EPERM,
+ -ENOMEM,
+ -EINVAL,
+ -ERANGE,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EIO,
+ -ENODEV,
+ -EINVAL,
+ -EIO,
+ -EIO,
+ -EINVAL,
+ -EINVAL,
+
+
+
+#if BCME_LAST != -41
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+ for new error code defined in bcmutils.h"
+#endif
+};
+
+
+int
+osl_error(int bcmerror)
+{
+ if (bcmerror > 0)
+ bcmerror = 0;
+ else if (bcmerror < BCME_LAST)
+ bcmerror = BCME_ERROR;
+
+
+ return linuxbcmerrormap[-bcmerror];
+}
+
+void * dhd_os_prealloc(int section, unsigned long size);
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+ osl_t *osh;
+ gfp_t flags;
+
+ flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ osh = kmalloc(sizeof(osl_t), flags);
+ ASSERT(osh);
+
+ bzero(osh, sizeof(osl_t));
+
+
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+ osh->magic = OS_HANDLE_MAGIC;
+ osh->malloced = 0;
+ osh->failed = 0;
+ osh->dbgmem_list = NULL;
+ osh->pdev = pdev;
+ osh->pub.pkttag = pkttag;
+ osh->bustype = bustype;
+
+ switch (bustype) {
+ case PCI_BUS:
+ case SI_BUS:
+ case PCMCIA_BUS:
+ osh->pub.mmbus = TRUE;
+ break;
+ case JTAG_BUS:
+ case SDIO_BUS:
+ case USB_BUS:
+ case SPI_BUS:
+ osh->pub.mmbus = FALSE;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+
+#ifdef DHD_USE_STATIC_BUF
+
+
+ if (!bcm_static_buf) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(3, STATIC_BUF_SIZE+
+ STATIC_BUF_TOTAL_LEN))) {
+ printk("can not alloc static buf!\n");
+ }
+ else {
+ /* printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf); */
+ }
+
+ mutex_init(&bcm_static_buf->static_sem);
+
+
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+
+ }
+
+ if (!bcm_static_skb)
+ {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = dhd_os_prealloc(4, 0);
+
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
+ for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++)
+ bcm_static_skb->pkt_use[i] = 0;
+
+ mutex_init(&bcm_static_skb->osl_pkt_sem);
+ }
+#endif
+ return osh;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+ if (osh == NULL)
+ return;
+
+#ifdef DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ kfree(osh);
+}
+
+
+void*
+osl_pktget(osl_t *osh, uint len)
+{
+ struct sk_buff *skb;
+ gfp_t flags;
+
+ flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ if ((skb = __dev_alloc_skb(len, flags))) {
+ skb_put(skb, len);
+ skb->priority = 0;
+
+
+ osh->pub.pktalloced++;
+ }
+
+ return ((void*) skb);
+}
+
+
+void
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+ struct sk_buff *skb, *nskb;
+
+ skb = (struct sk_buff*) p;
+
+ if (send && osh->pub.tx_fn)
+ osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+
+ if (skb->destructor) {
+
+ dev_kfree_skb_any(skb);
+ } else {
+
+ dev_kfree_skb(skb);
+ }
+
+ osh->pub.pktalloced--;
+
+ skb = nskb;
+ }
+}
+
+#ifdef DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+ int i = 0;
+ struct sk_buff *skb;
+
+
+ if (len > (PAGE_SIZE*2))
+ {
+ printk("Do we really need this big skb??\n");
+ return osl_pktget(osh, len);
+ }
+
+
+ mutex_lock(&bcm_static_skb->osl_pkt_sem);
+ if (len <= PAGE_SIZE)
+ {
+
+ for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
+ {
+ if (bcm_static_skb->pkt_use[i] == 0)
+ break;
+ }
+
+ if (i != MAX_STATIC_PKT_NUM)
+ {
+ bcm_static_skb->pkt_use[i] = 1;
+ mutex_unlock(&bcm_static_skb->osl_pkt_sem);
+
+ skb = bcm_static_skb->skb_4k[i];
+ skb->tail = skb->data + len;
+ skb->len = len;
+
+ return skb;
+ }
+ }
+
+
+ for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
+ {
+ if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0)
+ break;
+ }
+
+ if (i != MAX_STATIC_PKT_NUM)
+ {
+ bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1;
+ mutex_unlock(&bcm_static_skb->osl_pkt_sem);
+ skb = bcm_static_skb->skb_8k[i];
+ skb->tail = skb->data + len;
+ skb->len = len;
+
+ return skb;
+ }
+
+
+
+ mutex_unlock(&bcm_static_skb->osl_pkt_sem);
+ printk("all static pkt in use!\n");
+ return osl_pktget(osh, len);
+}
+
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+ int i;
+
+ for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++)
+ {
+ if (p == bcm_static_skb->skb_4k[i])
+ {
+ mutex_lock(&bcm_static_skb->osl_pkt_sem);
+ bcm_static_skb->pkt_use[i] = 0;
+ mutex_unlock(&bcm_static_skb->osl_pkt_sem);
+
+
+ return;
+ }
+ }
+ return osl_pktfree(osh, p, send);
+}
+#endif
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+ uint val = 0;
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+ ASSERT(size == 4);
+
+ do {
+ pci_read_config_dword(osh->pdev, offset, &val);
+ if (val != 0xffffffff)
+ break;
+ } while (retry--);
+
+
+ return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+ ASSERT(size == 4);
+
+ do {
+ pci_write_config_dword(osh->pdev, offset, val);
+ if (offset != PCI_BAR0_WIN)
+ break;
+ if (osl_pci_read_config(osh, offset, size) == val)
+ break;
+ } while (retry--);
+
+}
+
+
+uint
+osl_pci_bus(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+
+uint
+osl_pci_slot(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+
+
+void*
+osl_malloc(osl_t *osh, uint size)
+{
+ void *addr;
+ gfp_t flags;
+
+ if (osh)
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+#ifdef DHD_USE_STATIC_BUF
+ if (bcm_static_buf)
+ {
+ int i = 0;
+ if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+ {
+ mutex_lock(&bcm_static_buf->static_sem);
+
+ for (i = 0; i < MAX_STATIC_BUF_NUM; i++)
+ {
+ if (bcm_static_buf->buf_use[i] == 0)
+ break;
+ }
+
+ if (i == MAX_STATIC_BUF_NUM)
+ {
+ mutex_unlock(&bcm_static_buf->static_sem);
+ printk("all static buff in use!\n");
+ goto original;
+ }
+
+ bcm_static_buf->buf_use[i] = 1;
+ mutex_unlock(&bcm_static_buf->static_sem);
+
+ bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+ if (osh)
+ osh->malloced += size;
+
+ return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+ }
+ }
+original:
+#endif
+ flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ if ((addr = kmalloc(size, flags)) == NULL) {
+ if (osh)
+ osh->failed++;
+ return (NULL);
+ }
+ if (osh)
+ osh->malloced += size;
+
+ return (addr);
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef DHD_USE_STATIC_BUF
+ if (bcm_static_buf)
+ {
+ if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+ <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+ {
+ int buf_idx = 0;
+
+ buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+
+ mutex_lock(&bcm_static_buf->static_sem);
+ bcm_static_buf->buf_use[buf_idx] = 0;
+ mutex_unlock(&bcm_static_buf->static_sem);
+
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ osh->malloced -= size;
+ }
+ return;
+ }
+ }
+#endif
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ osh->malloced -= size;
+ }
+ kfree(addr);
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->malloced);
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->failed);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+}
+
+uint
+osl_dma_map(osl_t *osh, void *va, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+
+void
+osl_delay(uint usec)
+{
+ uint d;
+
+ while (usec > 0) {
+ d = MIN(usec, 1000);
+ udelay(d);
+ usec -= d;
+ }
+}
+
+
+
+void *
+osl_pktdup(osl_t *osh, void *skb)
+{
+ void * p;
+ gfp_t flags;
+
+ flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ if ((p = skb_clone((struct sk_buff*)skb, flags)) == NULL)
+ return NULL;
+
+
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
+
+
+ osh->pub.pktalloced++;
+ return (p);
+}
diff --git a/drivers/net/wireless/bcm4329/miniopt.c b/drivers/net/wireless/bcm4329/miniopt.c
new file mode 100644
index 000000000000..6a184a75f06b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/miniopt.c
@@ -0,0 +1,163 @@
+/*
+ * Description.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.c,v 1.1.6.4 2009/09/25 00:32:01 Exp $
+ */
+
+/* ---- Include Files ---------------------------------------------------- */
+
+#include <typedefs.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "miniopt.h"
+
+
+/* ---- Public Variables ------------------------------------------------- */
+/* ---- Private Constants and Types -------------------------------------- */
+
+
+
+/* ---- Private Variables ------------------------------------------------ */
+/* ---- Private Function Prototypes -------------------------------------- */
+/* ---- Functions -------------------------------------------------------- */
+
+/* ----------------------------------------------------------------------- */
+void
+miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags)
+{
+ static const char *null_flags = "";
+
+ memset(t, 0, sizeof(miniopt_t));
+ t->name = name;
+ if (flags == NULL)
+ t->flags = null_flags;
+ else
+ t->flags = flags;
+ t->longflags = longflags;
+}
+
+
+/* ----------------------------------------------------------------------- */
+int
+miniopt(miniopt_t *t, char **argv)
+{
+ int keylen;
+ char *p, *eq, *valstr, *endptr = NULL;
+ int err = 0;
+
+ t->consumed = 0;
+ t->positional = FALSE;
+ memset(t->key, 0, MINIOPT_MAXKEY);
+ t->opt = '\0';
+ t->valstr = NULL;
+ t->good_int = FALSE;
+ valstr = NULL;
+
+ if (*argv == NULL) {
+ err = -1;
+ goto exit;
+ }
+
+ p = *argv++;
+ t->consumed++;
+
+ if (!t->opt_end && !strcmp(p, "--")) {
+ t->opt_end = TRUE;
+ if (*argv == NULL) {
+ err = -1;
+ goto exit;
+ }
+ p = *argv++;
+ t->consumed++;
+ }
+
+ if (t->opt_end) {
+ t->positional = TRUE;
+ valstr = p;
+ }
+ else if (!strncmp(p, "--", 2)) {
+ eq = strchr(p, '=');
+ if (eq == NULL && !t->longflags) {
+ fprintf(stderr,
+ "%s: missing \" = \" in long param \"%s\"\n", t->name, p);
+ err = 1;
+ goto exit;
+ }
+ keylen = eq ? (eq - (p + 2)) : (int)strlen(p) - 2;
+ if (keylen > 63) keylen = 63;
+ memcpy(t->key, p + 2, keylen);
+
+ if (eq) {
+ valstr = eq + 1;
+ if (*valstr == '\0') {
+ fprintf(stderr,
+ "%s: missing value after \" = \" in long param \"%s\"\n",
+ t->name, p);
+ err = 1;
+ goto exit;
+ }
+ }
+ }
+ else if (!strncmp(p, "-", 1)) {
+ t->opt = p[1];
+ if (strlen(p) > 2) {
+ fprintf(stderr,
+ "%s: only single char options, error on param \"%s\"\n",
+ t->name, p);
+ err = 1;
+ goto exit;
+ }
+ if (strchr(t->flags, t->opt)) {
+ /* this is a flag option, no value expected */
+ valstr = NULL;
+ } else {
+ if (*argv == NULL) {
+ fprintf(stderr,
+ "%s: missing value parameter after \"%s\"\n", t->name, p);
+ err = 1;
+ goto exit;
+ }
+ valstr = *argv;
+ argv++;
+ t->consumed++;
+ }
+ } else {
+ t->positional = TRUE;
+ valstr = p;
+ }
+
+ /* parse valstr as int just in case */
+ if (valstr) {
+ t->uval = (uint)strtoul(valstr, &endptr, 0);
+ t->val = (int)t->uval;
+ t->good_int = (*endptr == '\0');
+ }
+
+ t->valstr = valstr;
+
+exit:
+ if (err == 1)
+ t->opt = '?';
+
+ return err;
+}
diff --git a/drivers/net/wireless/bcm4329/sbutils.c b/drivers/net/wireless/bcm4329/sbutils.c
new file mode 100644
index 000000000000..46cd51010b78
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/sbutils.c
@@ -0,0 +1,1004 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbutils.c,v 1.662.4.10.2.7.4.2 2010/04/19 05:48:48 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+ uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+
+#define SET_SBREG(sii, r, mask, val) \
+ W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
+#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
+#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+ uint8 tmp;
+ uint32 val, intr_val = 0;
+
+
+ /*
+ * compact flash only has 11 bits address, while we needs 12 bits address.
+ * MEM_SEG will be OR'd with other 11 bits address in hardware,
+ * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+ * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+ */
+ if (PCMCIA(sii)) {
+ INTR_OFF(sii, intr_val);
+ tmp = 1;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+ }
+
+ val = R_REG(sii->osh, sbr);
+
+ if (PCMCIA(sii)) {
+ tmp = 0;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+ uint8 tmp;
+ volatile uint32 dummy;
+ uint32 intr_val = 0;
+
+
+ /*
+ * compact flash only has 11 bits address, while we needs 12 bits address.
+ * MEM_SEG will be OR'd with other 11 bits address in hardware,
+ * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+ * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+ */
+ if (PCMCIA(sii)) {
+ INTR_OFF(sii, intr_val);
+ tmp = 1;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+ }
+
+ if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+#ifdef IL_BIGENDIAN
+ dummy = R_REG(sii->osh, sbr);
+ W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+ dummy = R_REG(sii->osh, sbr);
+ W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+#else
+ dummy = R_REG(sii->osh, sbr);
+ W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+ dummy = R_REG(sii->osh, sbr);
+ W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+#endif /* IL_BIGENDIAN */
+ } else
+ W_REG(sii->osh, sbr, v);
+
+ if (PCMCIA(sii)) {
+ tmp = 0;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ INTR_RESTORE(sii, intr_val);
+ }
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_flag(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 vec;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ if (siflag == -1)
+ vec = 0;
+ else
+ vec = 1 << siflag;
+ W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+ uint i;
+
+ for (i = 0; i < sii->numcores; i ++)
+ if (sba == sii->common_info->coresba[i])
+ return i;
+ return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+ uint32 sbaddr;
+
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS: {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+ break;
+ }
+
+ case PCI_BUS:
+ sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ break;
+
+ case PCMCIA_BUS: {
+ uint8 tmp = 0;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+ sbaddr = (uint32)tmp << 12;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+ sbaddr |= (uint32)tmp << 16;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+ sbaddr |= (uint32)tmp << 24;
+ break;
+ }
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ sbaddr = (uint32)(uintptr)sii->curmap;
+ break;
+
+
+ default:
+ sbaddr = BADCOREADDR;
+ break;
+ }
+
+ return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint sbidh;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+ sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+ return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+ }
+
+ /* return the new value
+ * for write operation, the following readback ensures the completion of write opration.
+ */
+ return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+ (val << SBTMH_SISF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatehigh, w);
+ }
+
+ /* return the new value */
+ return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbtmstatelow) &
+ (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!sii->common_info->regs[coreidx]) {
+ sii->common_info->regs[coreidx] =
+ REG_MAP(sii->common_info->coresba[coreidx], SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)sii->common_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((sii->common_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ if (regoff >= SBCONFIGOFF) {
+ w = (R_SBREG(sii, r) & ~mask) | val;
+ W_SBREG(sii, r, w);
+ } else {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+ }
+
+ /* readback */
+ if (regoff >= SBCONFIGOFF)
+ w = R_SBREG(sii, r);
+ else {
+ if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+ (coreidx == SI_CC_IDX) &&
+ (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+ w = val;
+ } else
+ w = R_REG(sii->osh, r);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ sb_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES 2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+ uint next;
+ uint ncc = 0;
+ uint i;
+
+ if (bus >= SB_MAXBUSES) {
+ SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+ return 0;
+ }
+ SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+ /* Scan all cores on the bus starting from core 0.
+ * Core addresses must be contiguous on each bus.
+ */
+ for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+ sii->common_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+ /* keep and reuse the initial register mapping */
+ if ((BUSTYPE(sii->pub.bustype) == SI_BUS) &&
+ (sii->common_info->coresba[next] == sba)) {
+ SI_MSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+ sii->common_info->regs[next] = regs;
+ }
+
+ /* change core to 'next' and read its coreid */
+ sii->curmap = _sb_setcoreidx(sii, next);
+ sii->curidx = next;
+
+ sii->common_info->coreid[next] = sb_coreid(&sii->pub);
+
+ /* core specific processing... */
+ /* chipc provides # cores */
+ if (sii->common_info->coreid[next] == CC_CORE_ID) {
+ chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+ uint32 ccrev = sb_corerev(&sii->pub);
+
+ /* determine numcores - this is the total # cores in the chip */
+ if (((ccrev == 4) || (ccrev >= 6)))
+ numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+ CID_CC_SHIFT;
+ else {
+ /* Older chips */
+ uint chip = sii->pub.chip;
+
+ if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
+ numcores = 6;
+ else if (chip == BCM4704_CHIP_ID)
+ numcores = 9;
+ else if (chip == BCM5365_CHIP_ID)
+ numcores = 7;
+ else {
+ SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+ chip));
+ ASSERT(0);
+ numcores = 1;
+ }
+ }
+ SI_MSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+ sii->pub.issim ? "QT" : ""));
+ }
+ /* scan bridged SB(s) and add results to the end of the list */
+ else if (sii->common_info->coreid[next] == OCP_CORE_ID) {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+ uint nsbcc;
+
+ sii->numcores = next + 1;
+
+ if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+ continue;
+ nsbba &= 0xfffff000;
+ if (_sb_coreidx(sii, nsbba) != BADIDX)
+ continue;
+
+ nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+ nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+ if (sbba == SI_ENUM_BASE)
+ numcores -= nsbcc;
+ ncc += nsbcc;
+ }
+ }
+
+ SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+ sii->numcores = i + ncc;
+ return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii;
+ uint32 origsba;
+
+ sii = SI_INFO(sih);
+
+ /* Save the current core info and validate it later till we know
+ * for sure what is good and what is bad.
+ */
+ origsba = _sb_coresba(sii);
+
+ /* scan all SB(s) starting from SI_ENUM_BASE */
+ sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (coreidx >= sii->numcores)
+ return (NULL);
+
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ sii->curmap = _sb_setcoreidx(sii, coreidx);
+ sii->curidx = coreidx;
+
+ return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+ uint32 sbaddr = sii->common_info->coresba[coreidx];
+ void *regs;
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!sii->common_info->regs[coreidx]) {
+ sii->common_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+ }
+ regs = sii->common_info->regs[coreidx];
+ break;
+
+ case PCI_BUS:
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+ regs = sii->curmap;
+ break;
+
+ case PCMCIA_BUS: {
+ uint8 tmp = (sbaddr >> 12) & 0x0f;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+ tmp = (sbaddr >> 16) & 0xff;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+ tmp = (sbaddr >> 24) & 0xff;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+ regs = sii->curmap;
+ break;
+ }
+ case SPI_BUS:
+ case SDIO_BUS:
+ /* map new one */
+ if (!sii->common_info->regs[coreidx]) {
+ sii->common_info->regs[coreidx] = (void *)(uintptr)sbaddr;
+ ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+ }
+ regs = sii->common_info->regs[coreidx];
+ break;
+
+
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+ sbconfig_t *sb;
+ volatile uint32 *addrm;
+
+ sb = REGS2SB(sii->curmap);
+
+ switch (asidx) {
+ case 0:
+ addrm = &sb->sbadmatch0;
+ break;
+
+ case 1:
+ addrm = &sb->sbadmatch1;
+ break;
+
+ case 2:
+ addrm = &sb->sbadmatch2;
+ break;
+
+ case 3:
+ addrm = &sb->sbadmatch3;
+ break;
+
+ default:
+ SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+ return 0;
+ }
+
+ return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ /* + 1 because of enumeration space */
+ return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ origidx = sii->curidx;
+ ASSERT(GOODIDX(origidx));
+
+ INTR_OFF(sii, intr_val);
+
+ /* switch over to chipcommon core if there is one, else use pci */
+ if (sii->pub.ccrev != NOREV) {
+ chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ /* do the buffer registers update */
+ W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+ W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+ } else
+ ASSERT(0);
+
+ /* restore core index */
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /* if core is already in reset, just return */
+ if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+ return;
+
+ /* if clocks are not enabled, put into reset and return */
+ if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+ goto disable;
+
+ /* set target reject and spin until busy is clear (preserve core-specific bits) */
+ OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+ SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+ OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+ dummy = R_SBREG(sii, &sb->sbimstate);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_REJ | SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(10);
+
+ /* don't forget to clear the initiator reject bit */
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+ AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+ /* leave reset and reject asserted */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+ OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /*
+ * Must do the disable sequence first to work for arbitrary current core state.
+ */
+ sb_core_disable(sih, (bits | resetbits));
+
+ /*
+ * Now do the initialization sequence.
+ */
+
+ /* set reset while enabling the clock and forcing them on throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(1);
+
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+ W_SBREG(sii, &sb->sbtmstatehigh, 0);
+ }
+ if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+ AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(1);
+
+ /* leave clock enabled */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(1);
+}
+
+void
+sb_core_tofixup(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ if ((BUSTYPE(sii->pub.bustype) != PCI_BUS) || PCIE(sii) ||
+ (PCI(sii) && (sii->pub.buscorerev >= 5)))
+ return;
+
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+ SET_SBREG(sii, &sb->sbimconfiglow,
+ SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
+ (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
+ } else {
+ if (sb_coreid(sih) == PCI_CORE_ID) {
+ SET_SBREG(sii, &sb->sbimconfiglow,
+ SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
+ (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
+ } else {
+ SET_SBREG(sii, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
+ }
+ }
+
+ sb_commit(sih);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ * SI_BUS => mips
+ * JTAG_BUS => chipc
+ * PCI_BUS => pci or pcie
+ * PCMCIA_BUS => pcmcia
+ * SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 tmp, ret = 0xffffffff;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ if ((to & ~TO_MASK) != 0)
+ return ret;
+
+ /* Figure out the master core */
+ if (idx == BADIDX) {
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case PCI_BUS:
+ idx = sii->pub.buscoreidx;
+ break;
+ case JTAG_BUS:
+ idx = SI_CC_IDX;
+ break;
+ case PCMCIA_BUS:
+ case SDIO_BUS:
+ idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+ break;
+ case SI_BUS:
+ idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+ break;
+ default:
+ ASSERT(0);
+ }
+ if (idx == BADIDX)
+ return ret;
+ }
+
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+ tmp = R_SBREG(sii, &sb->sbimconfiglow);
+ ret = tmp & TO_MASK;
+ W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+ sb_commit(sih);
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+ return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+ uint32 base;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ base = 0;
+
+ if (type == 0) {
+ base = admatch & SBAM_BASE0_MASK;
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE1_MASK;
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE2_MASK;
+ }
+
+ return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+ uint32 size;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ size = 0;
+
+ if (type == 0) {
+ size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+ }
+
+ return (size);
+}
diff --git a/drivers/net/wireless/bcm4329/siutils.c b/drivers/net/wireless/bcm4329/siutils.c
new file mode 100644
index 000000000000..1814db0f9dd6
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/siutils.c
@@ -0,0 +1,1527 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.c,v 1.662.4.4.4.16.4.28 2010/06/23 21:37:54 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <hndpmu.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, void *regs);
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+static void *common_info_alloced = NULL;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+
+/*
+ * Allocate a si handle.
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a pointer area for "environment" variables
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ si_info_t *sii;
+
+ /* alloc si_info_t */
+ if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
+ SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+ return (NULL);
+ }
+
+ if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+ if (NULL != sii->common_info)
+ MFREE(osh, sii->common_info, sizeof(si_common_info_t));
+ MFREE(osh, sii, sizeof(si_info_t));
+ return (NULL);
+ }
+ sii->vars = vars ? *vars : NULL;
+ sii->varsz = varsz ? *varsz : 0;
+
+ return (si_t *)sii;
+}
+
+/* global kernel resource */
+static si_info_t ksii;
+
+static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */
+
+/* generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+ static bool ksii_attached = FALSE;
+
+ if (!ksii_attached) {
+ void *regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+
+ if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+ SI_BUS, NULL,
+ osh != SI_OSH ? &ksii.vars : NULL,
+ osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
+ if (NULL != ksii.common_info)
+ MFREE(osh, ksii.common_info, sizeof(si_common_info_t));
+ SI_ERROR(("si_kattach: si_doattach failed\n"));
+ REG_UNMAP(regs);
+ return NULL;
+ }
+ REG_UNMAP(regs);
+
+ /* save ticks normalized to ms for si_watchdog_ms() */
+ if (PMUCTL_ENAB(&ksii.pub)) {
+ /* based on 32KHz ILP clock */
+ wd_msticks = 32;
+ } else {
+ wd_msticks = ALP_CLOCK / 1000;
+ }
+
+ ksii_attached = TRUE;
+ SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+ ksii.pub.ccrev, wd_msticks));
+ }
+
+ return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+ /* need to set memseg flag for CF card first before any sb registers access */
+ if (BUSTYPE(bustype) == PCMCIA_BUS)
+ sii->memseg = TRUE;
+
+
+ if (BUSTYPE(bustype) == SDIO_BUS) {
+ int err;
+ uint8 clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (!err) {
+ uint8 clkval;
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+ SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval));
+ return FALSE;
+ }
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ OSL_DELAY(65);
+ }
+ }
+
+ /* Also, disable the extra SDIO pull-ups */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ }
+
+
+ return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, void *regs)
+{
+ bool pci, pcie;
+ uint i;
+ uint pciidx, pcieidx, pcirev, pcierev;
+
+ cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+ ASSERT((uintptr)cc);
+
+ /* get chipcommon rev */
+ sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+ /* get chipcommon chipstatus */
+ if (sii->pub.ccrev >= 11)
+ sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+ /* get chipcommon capabilites */
+ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+
+ /* get pmu rev and caps */
+ if (sii->pub.cccaps & CC_CAP_PMU) {
+ sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+ sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+ }
+
+ SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+ sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+ sii->pub.pmucaps));
+
+ /* figure out bus/orignal core idx */
+ sii->pub.buscoretype = NODEV_CORE_ID;
+ sii->pub.buscorerev = NOREV;
+ sii->pub.buscoreidx = BADIDX;
+
+ pci = pcie = FALSE;
+ pcirev = pcierev = NOREV;
+ pciidx = pcieidx = BADIDX;
+
+ for (i = 0; i < sii->numcores; i++) {
+ uint cid, crev;
+
+ si_setcoreidx(&sii->pub, i);
+ cid = si_coreid(&sii->pub);
+ crev = si_corerev(&sii->pub);
+
+ /* Display cores found */
+ SI_MSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+ i, cid, crev, sii->common_info->coresba[i], sii->common_info->regs[i]));
+
+ if (BUSTYPE(bustype) == PCI_BUS) {
+ if (cid == PCI_CORE_ID) {
+ pciidx = i;
+ pcirev = crev;
+ pci = TRUE;
+ } else if (cid == PCIE_CORE_ID) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = TRUE;
+ }
+ } else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+ (cid == PCMCIA_CORE_ID)) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+ else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+ (BUSTYPE(bustype) == SPI_BUS)) &&
+ ((cid == PCMCIA_CORE_ID) ||
+ (cid == SDIOD_CORE_ID))) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+
+ /* find the core idx before entering this func. */
+ if ((savewin && (savewin == sii->common_info->coresba[i])) ||
+ (regs == sii->common_info->regs[i]))
+ *origidx = i;
+ }
+
+
+ SI_MSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+ sii->pub.buscorerev));
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+ (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (sii->pub.chiprev <= 3))
+ OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+ /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+ * already running.
+ */
+ if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+ if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+ si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+ si_core_disable(&sii->pub, 0);
+ }
+
+ /* return to the original core */
+ si_setcoreidx(&sii->pub, *origidx);
+
+ return TRUE;
+}
+
+
+
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ struct si_pub *sih = &sii->pub;
+ uint32 w, savewin;
+ chipcregs_t *cc;
+ char *pvars = NULL;
+ uint origidx;
+
+ ASSERT(GOODREGS(regs));
+
+ bzero((uchar*)sii, sizeof(si_info_t));
+
+
+ {
+ if (NULL == (common_info_alloced = (void *)MALLOC(osh, sizeof(si_common_info_t)))) {
+ SI_ERROR(("si_doattach: malloc failed! malloced %dbytes\n", MALLOCED(osh)));
+ return (NULL);
+ }
+ bzero((uchar*)(common_info_alloced), sizeof(si_common_info_t));
+ }
+ sii->common_info = (si_common_info_t *)common_info_alloced;
+ sii->common_info->attach_count++;
+
+ savewin = 0;
+
+ sih->buscoreidx = BADIDX;
+
+ sii->curmap = regs;
+ sii->sdh = sdh;
+ sii->osh = osh;
+
+
+ /* find Chipcommon address */
+ if (bustype == PCI_BUS) {
+ savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+ savewin = SI_ENUM_BASE;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+ cc = (chipcregs_t *)regs;
+ } else
+ if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+ cc = (chipcregs_t *)sii->curmap;
+ } else {
+ cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+ }
+
+ sih->bustype = bustype;
+ if (bustype != BUSTYPE(bustype)) {
+ SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+ bustype, BUSTYPE(bustype)));
+ return NULL;
+ }
+
+ /* bus/core/clk setup for register access */
+ if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+ return NULL;
+ }
+
+ /* ChipID recognition.
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+ * some way of recognizing them needs to be added here.
+ */
+ w = R_REG(osh, &cc->chipid);
+ sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ /* Might as wll fill in chip id rev & pkg */
+ sih->chip = w & CID_ID_MASK;
+ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chippkg != BCM4329_289PIN_PKG_ID))
+ sih->chippkg = BCM4329_182PIN_PKG_ID;
+ sih->issim = IS_SIM(sih->chippkg);
+
+ /* scan for cores */
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+ SI_MSG(("Found chip type SB (0x%08x)\n", w));
+ sb_scan(&sii->pub, regs, devid);
+ } else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+ SI_MSG(("Found chip type AI (0x%08x)\n", w));
+ /* pass chipc address instead of original core base */
+ ai_scan(&sii->pub, (void *)cc, devid);
+ } else {
+ SI_ERROR(("Found chip of unkown type (0x%08x)\n", w));
+ return NULL;
+ }
+ /* no cores found, bail out */
+ if (sii->numcores == 0) {
+ SI_ERROR(("si_doattach: could not find any cores\n"));
+ return NULL;
+ }
+ /* bus/core/clk setup */
+ origidx = SI_CC_IDX;
+ if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+ SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+ return NULL;
+ }
+
+ pvars = NULL;
+
+
+
+ if (sii->pub.ccrev >= 20) {
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ W_REG(osh, &cc->gpiopullup, 0);
+ W_REG(osh, &cc->gpiopulldown, 0);
+ si_setcoreidx(sih, origidx);
+ }
+
+ /* Skip PMU initialization from the Dongle Host.
+ * Firmware will take care of it when it comes up.
+ */
+
+
+
+ return (sii);
+}
+
+/* may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+ si_info_t *sii;
+ uint idx;
+
+ sii = SI_INFO(sih);
+
+ if (sii == NULL)
+ return;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS)
+ for (idx = 0; idx < SI_MAXCORES; idx++)
+ if (sii->common_info->regs[idx]) {
+ REG_UNMAP(sii->common_info->regs[idx]);
+ sii->common_info->regs[idx] = NULL;
+ }
+
+
+ if (1 == sii->common_info->attach_count--) {
+ MFREE(sii->osh, sii->common_info, sizeof(si_common_info_t));
+ common_info_alloced = NULL;
+ }
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+ if (sii != &ksii)
+#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+ MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sii->osh != NULL) {
+ SI_ERROR(("osh is already set....\n"));
+ ASSERT(!sii->osh);
+ }
+ sii->osh = osh;
+}
+
+/* register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intr_arg = intr_arg;
+ sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+ sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+ sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+ /* save current core id. when this function called, the current core
+ * must be the core which provides driver functions(il, et, wl, etc.)
+ */
+ sii->dev_coreid = sii->common_info->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intrsoff_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ if (CHIPTYPE(sih->socitype) == SOCI_SB) {
+ sbconfig_t *ccsbr = (sbconfig_t *)((uintptr)((ulong)
+ (sii->common_info->coresba[SI_CC_IDX]) + SBCONFIGOFF));
+ return R_REG(sii->osh, &ccsbr->sbflagst);
+ } else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return R_REG(sii->osh, ((uint32 *)(uintptr)
+ (sii->common_info->oob_router + OOB_STATUSA)));
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+si_flag(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_flag(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_setint(sih, siflag);
+ else
+ ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->common_info->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->curidx;
+}
+
+/* return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+ si_info_t *sii;
+ uint idx;
+ uint coreid;
+ uint coreunit;
+ uint i;
+
+ sii = SI_INFO(sih);
+ coreunit = 0;
+
+ idx = sii->curidx;
+
+ ASSERT(GOODREGS(sii->curmap));
+ coreid = si_coreid(sih);
+
+ /* count the cores of our type */
+ for (i = 0; i < idx; i++)
+ if (sii->common_info->coreid[i] == coreid)
+ coreunit++;
+
+ return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corevendor(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+ return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corerev(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+ si_info_t *sii;
+ uint found;
+ uint i;
+
+ sii = SI_INFO(sih);
+
+ found = 0;
+
+ for (i = 0; i < sii->numcores; i++)
+ if (sii->common_info->coreid[i] == coreid) {
+ if (found == coreunit)
+ return (i);
+ found++;
+ }
+
+ return (BADIDX);
+}
+
+/* return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ bcopy((uchar*)sii->common_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+ return (sii->numcores);
+}
+
+/* return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+
+ return (sii->curmap);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+ uint idx;
+
+ idx = si_findcoreidx(sih, coreid, coreunit);
+ if (!GOODIDX(idx))
+ return (NULL);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, idx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, coreidx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+/* Turn off interrupt as required by sb_setcore, before switch core */
+void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+ void *cc;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ INTR_OFF(sii, *intr_val);
+ *origidx = sii->curidx;
+ cc = si_setcore(sih, coreid, 0);
+ ASSERT(cc != NULL);
+
+ return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ si_setcoreidx(sih, coreid);
+ INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_numaddrspaces(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspace(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspace(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspacesize(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspacesize(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_cflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_cflags_wo(sih, mask, val);
+ else
+ ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_sflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_iscoreup(sih);
+ else {
+ ASSERT(0);
+ return FALSE;
+ }
+}
+
+void
+si_write_wrapperreg(si_t *sih, uint32 offset, uint32 val)
+{
+ /* only for 4319, no requirement for SOCI_SB */
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ ai_write_wrap_reg(sih, offset, val);
+ }
+ else
+ return;
+
+ return;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corereg(sih, coreidx, regoff, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_reset(sih, bits, resetbits);
+}
+
+void
+si_core_tofixup(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_tofixup(sih);
+}
+
+/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+ uint32 cflags;
+ int result = 0;
+
+ /* Read core control flags */
+ cflags = si_core_cflags(sih, 0, 0);
+
+ /* Set bist & fgc */
+ si_core_cflags(sih, 0, (SICF_BIST_EN | SICF_FGC));
+
+ /* Wait for bist done */
+ SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+ if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+ result = BCME_ERROR;
+
+ /* Reset core control flags */
+ si_core_cflags(sih, 0xffff, cflags);
+
+ return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+ switch (x) {
+ case CC_F6_2: return 2;
+ case CC_F6_3: return 3;
+ case CC_F6_4: return 4;
+ case CC_F6_5: return 5;
+ case CC_F6_6: return 6;
+ case CC_F6_7: return 7;
+ default: return 0;
+ }
+}
+
+/* calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+ uint32 n1, n2, clock, m1, m2, m3, mc;
+
+ n1 = n & CN_N1_MASK;
+ n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+ if (pll_type == PLL_TYPE6) {
+ if (m & CC_T6_MMASK)
+ return CC_T6_M1;
+ else
+ return CC_T6_M0;
+ } else if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ n1 = factor6(n1);
+ n2 += CC_F5_BIAS;
+ } else if (pll_type == PLL_TYPE2) {
+ n1 += CC_T2_BIAS;
+ n2 += CC_T2_BIAS;
+ ASSERT((n1 >= 2) && (n1 <= 7));
+ ASSERT((n2 >= 5) && (n2 <= 23));
+ } else if (pll_type == PLL_TYPE5) {
+ return (100000000);
+ } else
+ ASSERT(0);
+ /* PLL types 3 and 7 use BASE2 (25Mhz) */
+ if ((pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE7)) {
+ clock = CC_CLOCK_BASE2 * n1 * n2;
+ } else
+ clock = CC_CLOCK_BASE1 * n1 * n2;
+
+ if (clock == 0)
+ return 0;
+
+ m1 = m & CC_M1_MASK;
+ m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+ m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+ mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+ if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ m1 = factor6(m1);
+ if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+ m2 += CC_F5_BIAS;
+ else
+ m2 = factor6(m2);
+ m3 = factor6(m3);
+
+ switch (mc) {
+ case CC_MC_BYPASS: return (clock);
+ case CC_MC_M1: return (clock / m1);
+ case CC_MC_M1M2: return (clock / (m1 * m2));
+ case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
+ case CC_MC_M1M3: return (clock / (m1 * m3));
+ default: return (0);
+ }
+ } else {
+ ASSERT(pll_type == PLL_TYPE2);
+
+ m1 += CC_T2_BIAS;
+ m2 += CC_T2M2_BIAS;
+ m3 += CC_T2_BIAS;
+ ASSERT((m1 >= 2) && (m1 <= 7));
+ ASSERT((m2 >= 3) && (m2 <= 10));
+ ASSERT((m3 >= 2) && (m3 <= 7));
+
+ if ((mc & CC_T2MC_M1BYP) == 0)
+ clock /= m1;
+ if ((mc & CC_T2MC_M2BYP) == 0)
+ clock /= m2;
+ if ((mc & CC_T2MC_M3BYP) == 0)
+ clock /= m3;
+
+ return (clock);
+ }
+}
+
+
+/* set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+ if (PMUCTL_ENAB(sih)) {
+
+ if ((sih->chip == BCM4319_CHIP_ID) && (sih->chiprev == 0) && (ticks != 0)) {
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+ si_setcore(sih, USB20D_CORE_ID, 0);
+ si_core_disable(sih, 1);
+ si_setcore(sih, CC_CORE_ID, 0);
+ }
+
+ if (ticks == 1)
+ ticks = 2;
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
+ } else {
+ /* instant NMI */
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+ }
+}
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+/* trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ si_watchdog(sih, wd_msticks * ms);
+}
+#endif
+
+
+
+/* initialize the sdio core */
+void
+si_sdio_init(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (((sih->buscoretype == PCMCIA_CORE_ID) && (sih->buscorerev >= 8)) ||
+ (sih->buscoretype == SDIOD_CORE_ID)) {
+ uint idx;
+ sdpcmd_regs_t *sdpregs;
+
+ /* get the current core index */
+ idx = sii->curidx;
+ ASSERT(idx == si_findcoreidx(sih, D11_CORE_ID, 0));
+
+ /* switch to sdio core */
+ if (!(sdpregs = (sdpcmd_regs_t *)si_setcore(sih, PCMCIA_CORE_ID, 0)))
+ sdpregs = (sdpcmd_regs_t *)si_setcore(sih, SDIOD_CORE_ID, 0);
+ ASSERT(sdpregs);
+
+ SI_MSG(("si_sdio_init: For PCMCIA/SDIO Corerev %d, enable ints from core %d "
+ "through SD core %d (%p)\n",
+ sih->buscorerev, idx, sii->curidx, sdpregs));
+
+ /* enable backplane error and core interrupts */
+ W_REG(sii->osh, &sdpregs->hostintmask, I_SBINT);
+ W_REG(sii->osh, &sdpregs->sbintmask, (I_SB_SERR | I_SB_RESPERR | (1 << idx)));
+
+ /* switch back to previous core */
+ si_setcoreidx(sih, idx);
+ }
+
+ /* enable interrupts */
+ bcmsdh_intr_enable(sii->sdh);
+
+}
+
+
+/* change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+ return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/* mask&set gpiocontrol bits */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioouten);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioout);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return -1;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return -1;
+ }
+
+ /* already reserved */
+ if (si_gpioreservation & gpio_bitmask)
+ return -1;
+ /* set reservation */
+ si_gpioreservation |= gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* release one gpio */
+/*
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till some one overwrites it
+ */
+
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return -1;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return -1;
+ }
+
+ /* already released */
+ if (!(si_gpioreservation & gpio_bitmask))
+ return -1;
+
+ /* clear reservation */
+ si_gpioreservation &= ~gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ regoff = OFFSETOF(chipcregs_t, gpioin);
+ return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointmask);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 16)
+ return -1;
+
+ /* gpio led powersave reg */
+ return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (sih->ccrev < 16)
+ return -1;
+
+ return (si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 20)
+ return -1;
+
+ offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return -1;
+
+ if (regtype == GPIO_REGEVT)
+ offs = OFFSETOF(chipcregs_t, gpioevent);
+ else if (regtype == GPIO_REGEVT_INTMSK)
+ offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+ else if (regtype == GPIO_REGEVT_INTPOL)
+ offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+ else
+ return -1;
+
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+ bool level, gpio_handler_t cb, void *arg)
+{
+ si_info_t *sii;
+ gpioh_item_t *gi;
+
+ ASSERT(event);
+ ASSERT(cb != NULL);
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return NULL;
+
+ if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+ return NULL;
+
+ bzero(gi, sizeof(gpioh_item_t));
+ gi->event = event;
+ gi->handler = cb;
+ gi->arg = arg;
+ gi->level = level;
+
+ gi->next = sii->gpioh_head;
+ sii->gpioh_head = gi;
+
+ return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+ si_info_t *sii;
+ gpioh_item_t *p, *n;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return;
+
+ ASSERT(sii->gpioh_head != NULL);
+ if ((void*)sii->gpioh_head == gpioh) {
+ sii->gpioh_head = sii->gpioh_head->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ } else {
+ p = sii->gpioh_head;
+ n = p->next;
+ while (n) {
+ if ((void*)n == gpioh) {
+ p->next = n->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ }
+ p = n;
+ n = n->next;
+ }
+ }
+
+ ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+ si_info_t *sii;
+ gpioh_item_t *h;
+ uint32 status;
+ uint32 level = si_gpioin(sih);
+ uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+
+ sii = SI_INFO(sih);
+ for (h = sii->gpioh_head; h != NULL; h = h->next) {
+ if (h->handler) {
+ status = (h->level ? level : edge);
+
+ if (status & h->event)
+ h->handler(status, h->arg);
+ }
+ }
+
+ si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return -1;
+
+ offs = OFFSETOF(chipcregs_t, intmask);
+ return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/* Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev == 0)
+ memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+ else if (corerev < 3) {
+ memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+ memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ } else {
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+ uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+ if (lss != 0)
+ nb --;
+ memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+ if (lss != 0)
+ memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+
+void
+si_btcgpiowar(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ chipcregs_t *cc;
+
+ sii = SI_INFO(sih);
+
+ /* Make sure that there is ChipCommon core present &&
+ * UART_TX is strapped to 1
+ */
+ if (!(sih->cccaps & CC_CAP_UARTGPIO))
+ return;
+
+ /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+ INTR_OFF(sii, intr_val);
+
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+}
+
+/* check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+ uint32 w;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case PCI_BUS:
+ ASSERT(sii->osh != NULL);
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
+ if ((w & 0xFFFF) != VENDOR_BROADCOM)
+ return TRUE;
+ else
+ return FALSE;
+ default:
+ return FALSE;
+ }
+ return FALSE;
+}
diff --git a/drivers/net/wireless/bcm4329/siutils_priv.h b/drivers/net/wireless/bcm4329/siutils_priv.h
new file mode 100644
index 000000000000..e8ad7e50958a
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/siutils_priv.h
@@ -0,0 +1,213 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils_priv.h,v 1.3.10.5.4.2 2009/09/22 13:28:16 Exp $
+ */
+
+#ifndef _siutils_priv_h_
+#define _siutils_priv_h_
+
+/* debug/trace */
+#define SI_ERROR(args)
+
+#define SI_MSG(args)
+
+#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+ void *arg;
+ bool level;
+ gpio_handler_t handler;
+ uint32 event;
+ struct gpioh_item *next;
+} gpioh_item_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_common_info {
+ void *regs[SI_MAXCORES]; /* other regs va */
+ void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */
+ uint coreid[SI_MAXCORES]; /* id of each core */
+ uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */
+ uint32 coresba2_size[SI_MAXCORES]; /* second address space size */
+ uint32 coresba[SI_MAXCORES]; /* backplane address of each core */
+ uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */
+ void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
+ uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
+ uint32 oob_router; /* oob router registers for axi */
+ uint8 attach_count;
+} si_common_info_t;
+
+typedef struct si_info {
+ struct si_pub pub; /* back plane public state (must be first field) */
+
+ void *osh; /* osl os handle */
+ void *sdh; /* bcmsdh handle */
+ void *pch; /* PCI/E core handle */
+ uint dev_coreid; /* the core provides driver functions */
+ void *intr_arg; /* interrupt callback function arg */
+ si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
+ si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+ si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+
+ gpioh_item_t *gpioh_head; /* GPIO event handlers list */
+
+ bool memseg; /* flag to toggle MEM_SEG register */
+
+ char *vars;
+ uint varsz;
+
+ void *curmap; /* current regs va */
+
+ uint curidx; /* current core index */
+ uint numcores; /* # discovered cores */
+ void *curwrap; /* current wrapper va */
+ si_common_info_t *common_info; /* Common information for all the cores in a chip */
+} si_info_t;
+
+#define SI_INFO(sih) (si_info_t *)(uintptr)sih
+
+#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+ ISALIGNED((x), SI_CORE_SIZE))
+#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR 0
+#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES)
+#define BADIDX (SI_MAXCORES + 1)
+#define NOREV -1 /* Invalid rev */
+
+#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCI_CORE_ID))
+#define PCIE(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCIE_CORE_ID))
+#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
+ (((si)->pub.buscoretype == PCI_CORE_ID) && (si)->pub.buscorerev >= 13))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+ if ((si)->intrsoff_fn && (si)->common_info->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+ if ((si)->intrsrestore_fn && (si)->common_info->coreid[(si)->curidx] == (si)->dev_coreid) {\
+ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define LPOMINFREQ 25000 /* low power oscillator min */
+#define LPOMAXFREQ 43000 /* low power oscillator max */
+#define XTALMINFREQ 19800000 /* 20 MHz - 1% */
+#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
+#define PCIMINFREQ 25000000 /* 25 MHz */
+#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
+
+#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
+#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
+
+#define PCI_FORCEHT(si) \
+ (((PCIE(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+ ((PCI(si) || PCIE(si)) && (si->pub.chip == BCM4321_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_tofixup(si_t *sih);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern void ai_write_wrap_reg(si_t *sih, uint32 offset, uint32 val);
+
+
+#endif /* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcm4329/wl_iw.c b/drivers/net/wireless/bcm4329/wl_iw.c
new file mode 100644
index 000000000000..434e584f830c
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/wl_iw.c
@@ -0,0 +1,8455 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.c,v 1.51.4.9.2.6.4.142.4.78 2011/02/11 21:27:52 Exp $
+ */
+
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+
+typedef void wlc_info_t;
+typedef void wl_info_t;
+typedef const struct si_pub si_t;
+#include <wlioctl.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+#define WL_ASSOC(x)
+#define WL_INFORM(x)
+#define WL_WSEC(x)
+#define WL_SCAN(x)
+#define WL_PNO(x)
+#define WL_TRACE_COEX(x)
+
+#include <wl_iw.h>
+
+
+
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1 0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4 0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+
+
+#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+
+#include <linux/rtnetlink.h>
+#include <linux/mutex.h>
+
+#define WL_IW_USE_ISCAN 1
+#define ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS 1
+
+#if defined(SOFTAP)
+#define WL_SOFTAP(x) printk x
+static struct net_device *priv_dev;
+static bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+static long ap_cfg_pid = -1;
+struct net_device *ap_net_dev = NULL;
+struct semaphore ap_eth_sema;
+static struct completion ap_cfg_exited;
+static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap);
+static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac);
+#endif
+
+#define WL_IW_IOCTL_CALL(func_call) \
+ do { \
+ func_call; \
+ } while (0)
+
+static int g_onoff = G_WLAN_SET_ON;
+wl_iw_extra_params_t g_wl_iw_params;
+static struct mutex wl_cache_lock;
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+ uint32 reason, char* stringBuf, uint buflen);
+#include <bcmsdbus.h>
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern uint dhd_dev_reset(struct net_device *dev, uint8 flag);
+extern void dhd_dev_init_ioctl(struct net_device *dev);
+int dev_iw_write_cfg1_bss_var(struct net_device *dev, int val);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+
+#if defined(IL_BIGENDIAN)
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+#endif
+
+#ifdef CONFIG_WIRELESS_EXT
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#endif
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST)
+#endif
+
+static void *g_scan = NULL;
+static volatile uint g_scan_specified_ssid;
+static wlc_ssid_t g_specific_ssid;
+
+static wlc_ssid_t g_ssid;
+
+bool btcoex_is_sco_active(struct net_device *dev);
+static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl;
+#if defined(CONFIG_FIRST_SCAN)
+static volatile uint g_first_broadcast_scan;
+static volatile uint g_first_counter_scans;
+#define MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN 3
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else
+#define RAISE_RX_SOFTIRQ() \
+ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+ } while (0);
+#endif
+
+#if defined(WL_IW_USE_ISCAN)
+#if !defined(CSCAN)
+static void wl_iw_free_ss_cache(void);
+static int wl_iw_run_ss_cache_timer(int kick_off);
+#endif
+#if defined(CONFIG_FIRST_SCAN)
+int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+#endif
+static int dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len);
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ char iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+ struct net_device *dev;
+ struct timer_list timer;
+ uint32 timer_ms;
+ uint32 timer_on;
+ int iscan_state;
+ iscan_buf_t * list_hdr;
+ iscan_buf_t * list_cur;
+
+
+ long sysioc_pid;
+ struct semaphore sysioc_sem;
+ struct completion sysioc_exited;
+
+ uint32 scan_flag;
+#if defined CSCAN
+ char ioctlbuf[WLC_IOCTL_MEDLEN];
+#else
+ char ioctlbuf[WLC_IOCTL_SMLEN];
+#endif
+ wl_iscan_params_t *iscan_ex_params_p;
+ int iscan_ex_param_size;
+} iscan_info_t;
+#define COEX_DHCP 1
+
+#define BT_DHCP_eSCO_FIX
+#define BT_DHCP_USE_FLAGS
+#define BT_DHCP_OPPORTUNITY_WINDOW_TIME 2500
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+static void wl_iw_bt_flag_set(struct net_device *dev, bool set);
+static void wl_iw_bt_release(void);
+
+typedef enum bt_coex_status {
+ BT_DHCP_IDLE = 0,
+ BT_DHCP_START,
+ BT_DHCP_OPPORTUNITY_WINDOW,
+ BT_DHCP_FLAG_FORCE_TIMEOUT
+} coex_status_t;
+
+typedef struct bt_info {
+ struct net_device *dev;
+ struct timer_list timer;
+ uint32 timer_ms;
+ uint32 timer_on;
+ bool dhcp_done;
+ int bt_state;
+
+ long bt_pid;
+ struct semaphore bt_sem;
+ struct completion bt_exited;
+} bt_info_t;
+
+bt_info_t *g_bt = NULL;
+static void wl_iw_bt_timerfunc(ulong data);
+iscan_info_t *g_iscan = NULL;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+#endif
+static int
+wl_iw_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+);
+
+#ifndef CSCAN
+static int
+wl_iw_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+);
+
+static uint
+wl_iw_get_scan_prep(
+ wl_scan_results_t *list,
+ struct iw_request_info *info,
+ char *extra,
+ short max_size
+);
+#endif
+
+static void swap_key_from_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+ struct net_device *dev,
+ int cmd,
+ void *arg,
+ int len
+)
+{
+ struct ifreq ifr;
+ wl_ioctl_t ioc;
+ mm_segment_t fs;
+ int ret = -EINVAL;
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return ret;
+ }
+
+ net_os_wake_lock(dev);
+
+ WL_INFORM(("\n%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d ,\n",
+ __FUNCTION__, current->pid, cmd, arg, len));
+
+ if (g_onoff == G_WLAN_SET_ON) {
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+
+ strcpy(ifr.ifr_name, dev->name);
+ ifr.ifr_data = (caddr_t) &ioc;
+
+ ret = dev_open(dev);
+ if (ret) {
+ WL_ERROR(("%s: Error dev_open: %d\n", __func__, ret));
+ net_os_wake_unlock(dev);
+ return ret;
+ }
+
+ fs = get_fs();
+ set_fs(get_ds());
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+ ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+ set_fs(fs);
+ }
+ else {
+ WL_TRACE(("%s: call after driver stop : ignored\n", __FUNCTION__));
+ }
+
+ net_os_wake_unlock(dev);
+
+ return ret;
+}
+
+
+static int
+dev_wlc_intvar_get_reg(
+ struct net_device *dev,
+ char *name,
+ uint reg,
+ int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ uint len;
+ len = bcm_mkiovar(name, (char *)(&reg), sizeof(reg), (char *)(&var), sizeof(var.buf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+ *retval = dtoh32(var.val);
+ return (error);
+}
+
+
+static int
+dev_wlc_intvar_set_reg(
+ struct net_device *dev,
+ char *name,
+ char *addr,
+ char * val)
+{
+ char reg_addr[8];
+
+ memset(reg_addr, 0, sizeof(reg_addr));
+ memcpy((char *)&reg_addr[0], (char *)addr, 4);
+ memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+ return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+
+static int
+dev_wlc_intvar_set(
+ struct net_device *dev,
+ char *name,
+ int val)
+{
+ char buf[WLC_IOCTL_SMLEN];
+ uint len;
+
+ val = htod32(val);
+ len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+ ASSERT(len);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+#if defined(WL_IW_USE_ISCAN)
+static int
+dev_iw_iovar_setbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+
+ if (iolen == 0)
+ return 0;
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+
+ return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+#endif
+
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+ struct net_device *dev,
+ char *name,
+ char *buf, int len)
+{
+ static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+ uint buflen;
+
+ buflen = bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf));
+ ASSERT(buflen);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen));
+}
+#endif
+
+
+static int
+dev_wlc_bufvar_get(
+ struct net_device *dev,
+ char *name,
+ char *buf, int buflen)
+{
+ static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+ int error;
+ uint len;
+
+ len = bcm_mkiovar(name, NULL, 0, ioctlbuf, sizeof(ioctlbuf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ if (!error)
+ bcopy(ioctlbuf, buf, buflen);
+
+ return (error);
+}
+
+
+
+static int
+dev_wlc_intvar_get(
+ struct net_device *dev,
+ char *name,
+ int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ uint len;
+ uint data_null;
+
+ len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+ *retval = dtoh32(var.val);
+
+ return (error);
+}
+
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_active_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int as = 0;
+ int error = 0;
+ char *p = extra;
+
+#if defined(WL_IW_USE_ISCAN)
+ if (g_iscan->iscan_state == ISCAN_STATE_IDLE)
+#endif
+ error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as));
+#if defined(WL_IW_USE_ISCAN)
+ else
+ g_iscan->scan_flag = as;
+#endif
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+static int
+wl_iw_set_passive_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int ps = 1;
+ int error = 0;
+ char *p = extra;
+
+#if defined(WL_IW_USE_ISCAN)
+ if (g_iscan->iscan_state == ISCAN_STATE_IDLE) {
+#endif
+
+
+ if (g_scan_specified_ssid == 0) {
+ error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &ps, sizeof(ps));
+ }
+#if defined(WL_IW_USE_ISCAN)
+ }
+ else
+ g_iscan->scan_flag = ps;
+#endif
+
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+
+static int
+wl_iw_set_txpower(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ char *p = extra;
+ int txpower = -1;
+
+ txpower = bcm_atoi(extra + strlen(TXPOWER_SET_CMD) + 1);
+ if ((txpower >= 0) && (txpower <= 127)) {
+ txpower |= WL_TXPWR_OVERRIDE;
+ txpower = htod32(txpower);
+
+ error = dev_wlc_intvar_set(dev, "qtxpower", txpower);
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_TRACE(("%s: set TXpower 0x%X is OK\n", __FUNCTION__, txpower));
+ } else {
+ WL_ERROR(("%s: set tx power failed\n", __FUNCTION__));
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+ }
+
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+static int
+wl_iw_get_macaddr(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error;
+ char buf[128];
+ struct ether_addr *id;
+ char *p = extra;
+
+
+ strcpy(buf, "cur_etheraddr");
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, buf, sizeof(buf));
+ id = (struct ether_addr *) buf;
+ p += snprintf(p, MAX_WX_STRING, "Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ id->octet[0], id->octet[1], id->octet[2],
+ id->octet[3], id->octet[4], id->octet[5]);
+ wrqu->data.length = p - extra + 1;
+
+ return error;
+}
+
+
+static int
+wl_iw_set_country(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ char country_code[WLC_CNTRY_BUF_SZ];
+ int error = 0;
+ char *p = extra;
+ int country_offset;
+ int country_code_size;
+ wl_country_t cspec = {{0}, 0, {0}};
+ char smbuf[WLC_IOCTL_SMLEN];
+
+ cspec.rev = -1;
+ memset(country_code, 0, sizeof(country_code));
+ memset(smbuf, 0, sizeof(smbuf));
+
+ country_offset = strcspn(extra, " ");
+ country_code_size = strlen(extra) - country_offset;
+
+ if (country_offset != 0) {
+ strncpy(country_code, extra + country_offset +1,
+ MIN(country_code_size, sizeof(country_code)));
+
+
+ memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+
+ get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
+
+ if ((error = dev_iw_iovar_setbuf(dev, "country", &cspec, \
+ sizeof(cspec), smbuf, sizeof(smbuf))) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_ERROR(("%s: set country for %s as %s rev %d is OK\n", \
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ dhd_bus_country_set(dev, &cspec);
+ goto exit;
+ }
+ }
+
+ WL_ERROR(("%s: set country for %s as %s rev %d failed\n", \
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+#ifdef CUSTOMER_HW2
+static int
+wl_iw_set_power_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ char *p = extra;
+ static int pm = PM_FAST;
+ int pm_local = PM_OFF;
+ char powermode_val = 0;
+
+ WL_TRACE_COEX(("%s: DHCP session cmd:%s\n", __FUNCTION__, extra));
+
+ strncpy((char *)&powermode_val, extra + strlen("POWERMODE") + 1, 1);
+
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+ dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm));
+ dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local));
+
+ /* Disable packet filtering if necessary */
+ net_os_set_packet_filter(dev, 0);
+
+ g_bt->dhcp_done = false;
+ WL_TRACE_COEX(("%s: DHCP start, pm:%d changed to pm:%d\n",
+ __FUNCTION__, pm, pm_local));
+
+ } else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) {
+
+ dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+
+ /* Enable packet filtering if was turned off */
+ net_os_set_packet_filter(dev, 1);
+
+ g_bt->dhcp_done = true;
+
+ } else {
+ WL_ERROR(("%s Unkwown yet power setting, ignored\n",
+ __FUNCTION__));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ wrqu->data.length = p - extra + 1;
+
+ return error;
+}
+#endif
+
+
+bool btcoex_is_sco_active(struct net_device *dev)
+{
+ int ioc_res = 0;
+ bool res = false;
+ int sco_id_cnt = 0;
+ int param27;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+
+ ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+ WL_TRACE_COEX(("%s, sample[%d], btc params: 27:%x\n",
+ __FUNCTION__, i, param27));
+
+ if (ioc_res < 0) {
+ WL_ERROR(("%s ioc read btc params error\n", __FUNCTION__));
+ break;
+ }
+
+ if ((param27 & 0x6) == 2) {
+ sco_id_cnt++;
+ }
+
+ if (sco_id_cnt > 2) {
+ WL_TRACE_COEX(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n",
+ __FUNCTION__, sco_id_cnt, i));
+ res = true;
+ break;
+ }
+
+ msleep(5);
+ }
+
+ return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+ static bool saved_status = false;
+
+ char buf_reg50va_dhcp_on[8] = { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+ char buf_reg51va_dhcp_on[8] = { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg64va_dhcp_on[8] = { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg65va_dhcp_on[8] = { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg71va_dhcp_on[8] = { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg50;
+ static uint32 saved_reg51;
+ static uint32 saved_reg64;
+ static uint32 saved_reg65;
+ static uint32 saved_reg71;
+
+ if (trump_sco) {
+
+ WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n"));
+
+ if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+
+ saved_status = TRUE;
+ WL_TRACE_COEX(("%s saved bt_params[50,51,64,65,71]:"
+ " 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ __FUNCTION__, saved_reg50, saved_reg51,
+ saved_reg64, saved_reg65, saved_reg71));
+
+ } else {
+ WL_ERROR((":%s: save btc_params failed\n",
+ __FUNCTION__));
+ saved_status = false;
+ return -1;
+ }
+
+ WL_TRACE_COEX(("override with [50,51,64,65,71]:"
+ " 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *(u32 *)(buf_reg50va_dhcp_on+4),
+ *(u32 *)(buf_reg51va_dhcp_on+4),
+ *(u32 *)(buf_reg64va_dhcp_on+4),
+ *(u32 *)(buf_reg65va_dhcp_on+4),
+ *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg50va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg51va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg64va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg65va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg71va_dhcp_on[0], 8);
+
+ saved_status = true;
+
+ } else if (saved_status) {
+
+ WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n"));
+
+ regaddr = 50;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg50);
+ regaddr = 51;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg51);
+ regaddr = 64;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg64);
+ regaddr = 65;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg65);
+ regaddr = 71;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg71);
+
+ WL_TRACE_COEX(("restore bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51, saved_reg64,
+ saved_reg65, saved_reg71));
+
+ saved_status = false;
+ } else {
+ WL_ERROR((":%s att to restore not saved BTCOEX params\n",
+ __FUNCTION__));
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+static int
+wl_iw_get_power_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error;
+ char *p = extra;
+ int pm_local = PM_FAST;
+
+ error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm_local, sizeof(pm_local));
+ if (!error) {
+ WL_TRACE(("%s: Powermode = %d\n", __func__, pm_local));
+ if (pm_local == PM_OFF)
+ pm_local = 1; /* Active */
+ else
+ pm_local = 0; /* Auto */
+ p += snprintf(p, MAX_WX_STRING, "powermode = %d", pm_local);
+ }
+ else {
+ WL_TRACE(("%s: Error = %d\n", __func__, error));
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+ }
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+static int
+wl_iw_set_btcoex_dhcp(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ char *p = extra;
+#ifndef CUSTOMER_HW2
+ static int pm = PM_FAST;
+ int pm_local = PM_OFF;
+#endif
+ char powermode_val = 0;
+ char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+ char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+ char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg66;
+ static uint32 saved_reg41;
+ static uint32 saved_reg68;
+ static bool saved_status = FALSE;
+
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+
+#ifdef CUSTOMER_HW2
+ strncpy((char *)&powermode_val, extra + strlen("BTCOEXMODE") + 1, 1);
+#else
+ strncpy((char *)&powermode_val, extra + strlen("POWERMODE") + 1, 1);
+#endif
+
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+ WL_TRACE_COEX(("%s: DHCP session start, cmd:%s\n", __FUNCTION__, extra));
+
+ if ((saved_status == FALSE) &&
+#ifndef CUSTOMER_HW2
+ (!dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))) &&
+#endif
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) {
+ WL_TRACE_COEX(("save regs {66,41,68} ->: 0x%x 0x%x 0x%x\n", \
+ saved_reg66, saved_reg41, saved_reg68));
+
+#ifndef CUSTOMER_HW2
+ dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local));
+#endif
+
+ if (btcoex_is_sco_active(dev)) {
+
+ dev_wlc_bufvar_set(dev, "btc_params", \
+ (char *)&buf_reg66va_dhcp_on[0], \
+ sizeof(buf_reg66va_dhcp_on));
+
+ dev_wlc_bufvar_set(dev, "btc_params", \
+ (char *)&buf_reg41va_dhcp_on[0], \
+ sizeof(buf_reg41va_dhcp_on));
+
+ dev_wlc_bufvar_set(dev, "btc_params", \
+ (char *)&buf_reg68va_dhcp_on[0], \
+ sizeof(buf_reg68va_dhcp_on));
+ saved_status = TRUE;
+
+ g_bt->bt_state = BT_DHCP_START;
+ g_bt->timer_on = 1;
+ mod_timer(&g_bt->timer, g_bt->timer.expires);
+ WL_TRACE_COEX(("%s enable BT DHCP Timer\n", \
+ __FUNCTION__));
+ }
+ }
+ else if (saved_status == TRUE) {
+ WL_ERROR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__));
+ }
+ }
+#ifdef CUSTOMER_HW2
+ else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+#else
+ else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) {
+#endif
+
+#ifndef CUSTOMER_HW2
+ dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+#endif
+
+ WL_TRACE_COEX(("%s disable BT DHCP Timer\n", __FUNCTION__));
+ if (g_bt->timer_on) {
+ g_bt->timer_on = 0;
+ del_timer_sync(&g_bt->timer);
+
+ if (g_bt->bt_state != BT_DHCP_IDLE) {
+ WL_TRACE_COEX(("%s bt->bt_state:%d\n",
+ __FUNCTION__, g_bt->bt_state));
+
+ up(&g_bt->bt_sem);
+ }
+ }
+
+ if (saved_status == TRUE) {
+ dev_wlc_bufvar_set(dev, "btc_flags", \
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+
+ regaddr = 66;
+ dev_wlc_intvar_set_reg(dev, "btc_params", \
+ (char *)&regaddr, (char *)&saved_reg66);
+ regaddr = 41;
+ dev_wlc_intvar_set_reg(dev, "btc_params", \
+ (char *)&regaddr, (char *)&saved_reg41);
+ regaddr = 68;
+ dev_wlc_intvar_set_reg(dev, "btc_params", \
+ (char *)&regaddr, (char *)&saved_reg68);
+
+ WL_TRACE_COEX(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n", \
+ saved_reg66, saved_reg41, saved_reg68));
+ }
+ saved_status = FALSE;
+ }
+ else {
+ WL_ERROR(("%s Unkwown yet power setting, ignored\n",
+ __FUNCTION__));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ wrqu->data.length = p - extra + 1;
+
+ return error;
+}
+
+static int
+wl_iw_set_suspend(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int suspend_flag;
+ int ret_now;
+ int ret = 0;
+
+ suspend_flag = *(extra + strlen(SETSUSPEND_CMD) + 1) - '0';
+
+ if (suspend_flag != 0)
+ suspend_flag = 1;
+
+ ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+ if (ret_now != suspend_flag) {
+ if (!(ret = net_os_set_suspend(dev, ret_now)))
+ WL_ERROR(("%s: Suspend Flag %d -> %d\n", \
+ __FUNCTION__, ret_now, suspend_flag));
+ else
+ WL_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+
+
+int
+wl_format_ssid(char* ssid_buf, uint8* ssid, int ssid_len)
+{
+ int i, c;
+ char *p = ssid_buf;
+
+ if (ssid_len > 32) ssid_len = 32;
+
+ for (i = 0; i < ssid_len; i++) {
+ c = (int)ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (isprint((uchar)c)) {
+ *p++ = (char)c;
+ } else {
+ p += sprintf(p, "\\x%02X", c);
+ }
+ }
+ *p = '\0';
+
+ return p - ssid_buf;
+}
+
+static int
+wl_iw_get_link_speed(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ char *p = extra;
+ static int link_speed;
+
+ net_os_wake_lock(dev);
+ if (g_onoff == G_WLAN_SET_ON) {
+ error = dev_wlc_ioctl(dev, WLC_GET_RATE, &link_speed, sizeof(link_speed));
+ link_speed *= 500000;
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "LinkSpeed %d", link_speed/1000000);
+
+ wrqu->data.length = p - extra + 1;
+
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+static int
+wl_iw_get_dtim_skip(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ char iovbuf[32];
+
+ net_os_wake_lock(dev);
+ if (g_onoff == G_WLAN_SET_ON) {
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ strcpy(iovbuf, "bcn_li_dtim");
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_VAR,
+ &iovbuf, sizeof(iovbuf))) >= 0) {
+
+ p += snprintf(p, MAX_WX_STRING, "Dtim_skip %d", iovbuf[0]);
+ WL_TRACE(("%s: get dtim_skip = %d\n", __FUNCTION__, iovbuf[0]));
+ wrqu->data.length = p - extra + 1;
+ }
+ else
+ WL_ERROR(("%s: get dtim_skip failed code %d\n", \
+ __FUNCTION__, error));
+ }
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+static int
+wl_iw_set_dtim_skip(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ int bcn_li_dtim;
+ char iovbuf[32];
+
+ net_os_wake_lock(dev);
+ if (g_onoff == G_WLAN_SET_ON) {
+
+ bcn_li_dtim = htod32((uint)*(extra + strlen(DTIM_SKIP_SET_CMD) + 1) - '0');
+
+ if ((bcn_li_dtim >= 0) || ((bcn_li_dtim <= 5))) {
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+ 4, iovbuf, sizeof(iovbuf));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_VAR,
+ &iovbuf, sizeof(iovbuf))) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ net_os_set_dtim_skip(dev, bcn_li_dtim);
+
+ WL_TRACE(("%s: set dtim_skip %d OK\n", __FUNCTION__, \
+ bcn_li_dtim));
+ goto exit;
+ }
+ else WL_ERROR(("%s: set dtim_skip %d failed code %d\n", \
+ __FUNCTION__, bcn_li_dtim, error));
+ }
+ else WL_ERROR(("%s Incorrect dtim_skip setting %d, ignored\n", \
+ __FUNCTION__, bcn_li_dtim));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+static int
+wl_iw_get_band(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ static int band;
+
+ net_os_wake_lock(dev);
+
+ if (g_onoff == G_WLAN_SET_ON) {
+ error = dev_wlc_ioctl(dev, WLC_GET_BAND, &band, sizeof(band));
+
+ p += snprintf(p, MAX_WX_STRING, "Band %d", band);
+
+ wrqu->data.length = p - extra + 1;
+ }
+
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+static int
+wl_iw_set_band(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ uint band;
+
+ net_os_wake_lock(dev);
+
+ if (g_onoff == G_WLAN_SET_ON) {
+
+ band = htod32((uint)*(extra + strlen(BAND_SET_CMD) + 1) - '0');
+
+ if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_BAND,
+ &band, sizeof(band))) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_TRACE(("%s: set band %d OK\n", __FUNCTION__, band));
+ goto exit;
+ }
+ else WL_ERROR(("%s: set band %d failed code %d\n", __FUNCTION__, \
+ band, error));
+ }
+ else WL_ERROR(("%s Incorrect band setting %d, ignored\n", __FUNCTION__, band));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+#ifdef PNO_SUPPORT
+
+static int
+wl_iw_set_pno_reset(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+
+ net_os_wake_lock(dev);
+ if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) {
+
+ if ((error = dhd_dev_pno_reset(dev)) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_TRACE(("%s: set OK\n", __FUNCTION__));
+ goto exit;
+ }
+ else WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+
+static int
+wl_iw_set_pno_enable(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ int pfn_enabled;
+
+ net_os_wake_lock(dev);
+ pfn_enabled = htod32((uint)*(extra + strlen(PNOENABLE_SET_CMD) + 1) - '0');
+
+ if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) {
+
+ if ((error = dhd_dev_pno_enable(dev, pfn_enabled)) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_TRACE(("%s: set OK\n", __FUNCTION__));
+ goto exit;
+ }
+ else WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+
+static int
+wl_iw_set_pno_set(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int res = -1;
+ wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+ int nssid = 0;
+ cmd_tlv_t *cmd_tlv_temp;
+ char *str_ptr;
+ int tlv_size_left;
+ int pno_time;
+ int pno_repeat;
+ int pno_freq_expo_max;
+
+#ifdef PNO_SET_DEBUG
+ int i;
+ char pno_in_example[] = {'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ', \
+ 'S', '1', '2', '0',
+ 'S',
+ 0x04,
+ 'B', 'R', 'C', 'M',
+ 'S',
+ 0x04,
+ 'G', 'O', 'O', 'G',
+ 'T',
+ '1','E',
+ 'R',
+ '2',
+ 'M',
+ '2',
+ 0x00
+ };
+#endif
+
+ net_os_wake_lock(dev);
+ WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+ if (wrqu->data.length < (strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t))) {
+ WL_ERROR(("%s aggument=%d less %d\n", __FUNCTION__, \
+ wrqu->data.length, strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t)));
+ goto exit_proc;
+ }
+
+#ifdef PNO_SET_DEBUG
+ if (!(extra = kmalloc(sizeof(pno_in_example) +100, GFP_KERNEL))) {
+ res = -ENOMEM;
+ goto exit_proc;
+ }
+ memcpy(extra, pno_in_example, sizeof(pno_in_example));
+ wrqu->data.length = sizeof(pno_in_example);
+ for (i = 0; i < wrqu->data.length; i++)
+ printf("%02X ", extra[i]);
+ printf("\n");
+#endif
+
+ str_ptr = extra;
+#ifdef PNO_SET_DEBUG
+ str_ptr += strlen("PNOSETUP ");
+ tlv_size_left = wrqu->data.length - strlen("PNOSETUP ");
+#else
+ str_ptr += strlen(PNOSETUP_SET_CMD);
+ tlv_size_left = wrqu->data.length - strlen(PNOSETUP_SET_CMD);
+#endif
+
+ cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+ memset(ssids_local, 0, sizeof(ssids_local));
+ pno_repeat = pno_freq_expo_max = 0;
+
+ if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && \
+ (cmd_tlv_temp->version == PNO_TLV_VERSION) && \
+ (cmd_tlv_temp->subver == PNO_TLV_SUBVERSION))
+ {
+ str_ptr += sizeof(cmd_tlv_t);
+ tlv_size_left -= sizeof(cmd_tlv_t);
+
+ if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, \
+ MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+ WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+ goto exit_proc;
+ }
+ else {
+ if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+ WL_ERROR(("%s scan duration corrupted field size %d\n", \
+ __FUNCTION__, tlv_size_left));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+ WL_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+ if (str_ptr[0] != 0) {
+ if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+ WL_ERROR(("%s pno repeat : corrupted field\n", \
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+ WL_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+ if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+ WL_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", \
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+ WL_PNO(("%s: pno_freq_expo_max=%d\n", \
+ __FUNCTION__, pno_freq_expo_max));
+ }
+ }
+ }
+ else {
+ WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+ res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max);
+
+exit_proc:
+ net_os_wake_unlock(dev);
+ return res;
+}
+#endif
+
+static int
+wl_iw_get_rssi(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ static int rssi = 0;
+ static wlc_ssid_t ssid = {0};
+ int error = 0;
+ char *p = extra;
+ static char ssidbuf[SSID_FMT_BUF_LEN];
+ scb_val_t scb_val;
+
+ net_os_wake_lock(dev);
+
+ bzero(&scb_val, sizeof(scb_val_t));
+
+ if (g_onoff == G_WLAN_SET_ON) {
+ error = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+ if (error) {
+ WL_ERROR(("%s: Fails %d\n", __FUNCTION__, error));
+ } else {
+ rssi = dtoh32(scb_val.val);
+
+ error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
+ if (!error) {
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+ wl_format_ssid(ssidbuf, ssid.SSID, dtoh32(ssid.SSID_len));
+ }
+ }
+ }
+
+ WL_ASSOC(("%s ssid_len:%d, rssi:%d\n", __FUNCTION__, ssid.SSID_len, rssi));
+
+ if (error || (ssid.SSID_len == 0)) {
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+ } else {
+ p += snprintf(p, MAX_WX_STRING, "%s rssi %d ", ssidbuf, rssi);
+ }
+ wrqu->data.length = p - extra + 1;
+
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+int
+wl_iw_send_priv_event(
+ struct net_device *dev,
+ char *flag
+)
+{
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd;
+
+ cmd = IWEVCUSTOM;
+ memset(&wrqu, 0, sizeof(wrqu));
+ if (strlen(flag) > sizeof(extra))
+ return -1;
+
+ strcpy(extra, flag);
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ net_os_wake_lock_timeout_enable(dev);
+ WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+ return 0;
+}
+
+
+int
+wl_control_wl_start(struct net_device *dev)
+{
+ int ret = 0;
+ wl_iw_t *iw;
+
+ WL_TRACE(("Enter %s \n", __FUNCTION__));
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ iw = *(wl_iw_t **)netdev_priv(dev);
+
+ if (!iw) {
+ WL_ERROR(("%s: wl is null\n", __FUNCTION__));
+ return -1;
+ }
+ dhd_os_start_lock(iw->pub);
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+
+#if defined(BCMLXSDMMC)
+ sdioh_start(NULL, 0);
+#endif
+
+ ret = dhd_dev_reset(dev, 0);
+
+ if (ret == BCME_OK) {
+#if defined(BCMLXSDMMC)
+ sdioh_start(NULL, 1);
+#endif
+ dhd_dev_init_ioctl(dev);
+ g_onoff = G_WLAN_SET_ON;
+ }
+ }
+ WL_TRACE(("Exited %s \n", __FUNCTION__));
+
+ dhd_os_start_unlock(iw->pub);
+ return ret;
+}
+
+
+static int
+wl_iw_control_wl_off(
+ struct net_device *dev,
+ struct iw_request_info *info
+)
+{
+ int ret = 0;
+ wl_iw_t *iw;
+
+ WL_TRACE(("Enter %s\n", __FUNCTION__));
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ iw = *(wl_iw_t **)netdev_priv(dev);
+ if (!iw) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+ dhd_os_start_lock(iw->pub);
+
+#ifdef SOFTAP
+ ap_cfg_running = FALSE;
+#endif
+
+ if (g_onoff == G_WLAN_SET_ON) {
+ g_onoff = G_WLAN_SET_OFF;
+#if defined(WL_IW_USE_ISCAN)
+ g_iscan->iscan_state = ISCAN_STATE_IDLE;
+#endif
+
+ dhd_dev_reset(dev, 1);
+
+#if defined(WL_IW_USE_ISCAN)
+#if !defined(CSCAN)
+ wl_iw_free_ss_cache();
+ wl_iw_run_ss_cache_timer(0);
+
+ g_ss_cache_ctrl.m_link_down = 1;
+#endif
+ memset(g_scan, 0, G_SCAN_RESULTS);
+ g_scan_specified_ssid = 0;
+#if defined(CONFIG_FIRST_SCAN)
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE;
+ g_first_counter_scans = 0;
+#endif
+#endif
+
+#if defined(BCMLXSDMMC)
+ sdioh_stop(NULL);
+#endif
+
+ net_os_set_dtim_skip(dev, 0);
+
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+
+ wl_iw_send_priv_event(dev, "STOP");
+ }
+
+ dhd_os_start_unlock(iw->pub);
+
+ WL_TRACE(("Exited %s\n", __FUNCTION__));
+
+ return ret;
+}
+
+static int
+wl_iw_control_wl_on(
+ struct net_device *dev,
+ struct iw_request_info *info
+)
+{
+ int ret = 0;
+
+ WL_TRACE(("Enter %s \n", __FUNCTION__));
+
+ if ((ret = wl_control_wl_start(dev)) != BCME_OK) {
+ WL_ERROR(("%s failed first attemp\n", __FUNCTION__));
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ if ((ret = wl_control_wl_start(dev)) != BCME_OK) {
+ WL_ERROR(("%s failed second attemp\n", __FUNCTION__));
+ net_os_send_hang_message(dev);
+ return ret;
+ }
+ }
+
+ wl_iw_send_priv_event(dev, "START");
+
+#ifdef SOFTAP
+ if (!ap_fw_loaded) {
+ wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+ }
+#else
+ wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+#endif
+
+ WL_TRACE(("Exited %s \n", __FUNCTION__));
+
+ return ret;
+}
+
+#ifdef SOFTAP
+static struct ap_profile my_ap;
+static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap);
+static int get_assoc_sta_list(struct net_device *dev, char *buf, int len);
+static int set_ap_mac_list(struct net_device *dev, void *buf);
+
+#define PTYPE_STRING 0
+#define PTYPE_INTDEC 1
+#define PTYPE_INTHEX 2
+#define PTYPE_STR_HEX 3
+int get_parmeter_from_string(
+ char **str_ptr, const char *token, int param_type, void *dst, int param_max_len);
+
+#endif
+
+int hex2num(char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+int hex2byte(const char *hex)
+{
+ int a, b;
+ a = hex2num(*hex++);
+ if (a < 0)
+ return -1;
+ b = hex2num(*hex++);
+ if (b < 0)
+ return -1;
+ return (a << 4) | b;
+}
+
+
+
+int hstr_2_buf(const char *txt, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int a, b;
+
+ a = hex2num(*txt++);
+ if (a < 0)
+ return -1;
+ b = hex2num(*txt++);
+ if (b < 0)
+ return -1;
+ *buf++ = (a << 4) | b;
+ }
+
+ return 0;
+}
+
+#if defined(SOFTAP) && defined(SOFTAP_TLV_CFG)
+
+static int wl_iw_softap_cfg_tlv(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int res = -1;
+ char *str_ptr;
+ int tlv_size_left;
+
+
+#define SOFTAP_TLV_DEBUG 1
+#ifdef SOFTAP_TLV_DEBUG
+char softap_cmd_example[] = {
+
+ 'S', 'O', 'F', 'T', 'A', 'P', 'S', 'E', 'T', ' ',
+
+ SOFTAP_TLV_PREFIX, SOFTAP_TLV_VERSION,
+ SOFTAP_TLV_SUBVERSION, SOFTAP_TLV_RESERVED,
+
+ TLV_TYPE_SSID, 9, 'B', 'R', 'C', 'M', ',', 'G', 'O', 'O', 'G',
+
+ TLV_TYPE_SECUR, 4, 'O', 'P', 'E', 'N',
+
+ TLV_TYPE_KEY, 4, 0x31, 0x32, 0x33, 0x34,
+
+ TLV_TYPE_CHANNEL, 4, 0x06, 0x00, 0x00, 0x00
+};
+#endif
+
+
+#ifdef SOFTAP_TLV_DEBUG
+ {
+ int i;
+ if (!(extra = kmalloc(sizeof(softap_cmd_example) +10, GFP_KERNEL)))
+ return -ENOMEM;
+ memcpy(extra, softap_cmd_example, sizeof(softap_cmd_example));
+ wrqu->data.length = sizeof(softap_cmd_example);
+ print_buf(extra, wrqu->data.length, 16);
+ for (i = 0; i < wrqu->data.length; i++)
+ printf("%c ", extra[i]);
+ printf("\n");
+ }
+#endif
+
+ WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+ return -1;
+ }
+
+ if (wrqu->data.length < (strlen(SOFTAP_SET_CMD) + sizeof(cmd_tlv_t))) {
+ WL_ERROR(("%s argument=%d less %d\n", __FUNCTION__,
+ wrqu->data.length, strlen(SOFTAP_SET_CMD) + sizeof(cmd_tlv_t)));
+ return -1;
+ }
+
+ str_ptr = extra + strlen(SOFTAP_SET_CMD)+1;
+ tlv_size_left = wrqu->data.length - (strlen(SOFTAP_SET_CMD)+1);
+
+ memset(&my_ap, 0, sizeof(my_ap));
+
+ return res;
+}
+#endif
+
+
+#ifdef SOFTAP
+int init_ap_profile_from_string(char *param_str, struct ap_profile *ap_cfg)
+{
+ char *str_ptr = param_str;
+ char sub_cmd[16];
+ int ret = 0;
+
+ memset(sub_cmd, 0, sizeof(sub_cmd));
+ memset(ap_cfg, 0, sizeof(struct ap_profile));
+
+ if (get_parmeter_from_string(&str_ptr, "ASCII_CMD=",
+ PTYPE_STRING, sub_cmd, SSID_LEN) != 0) {
+ return -1;
+ }
+ if (strncmp(sub_cmd, "AP_CFG", 6)) {
+ WL_ERROR(("ERROR: sub_cmd:%s != 'AP_CFG'!\n", sub_cmd));
+ return -1;
+ }
+
+ ret = get_parmeter_from_string(&str_ptr, "SSID=", PTYPE_STRING, ap_cfg->ssid, SSID_LEN);
+
+ ret |= get_parmeter_from_string(&str_ptr, "SEC=", PTYPE_STRING, ap_cfg->sec, SEC_LEN);
+
+ ret |= get_parmeter_from_string(&str_ptr, "KEY=", PTYPE_STRING, ap_cfg->key, KEY_LEN);
+
+ ret |= get_parmeter_from_string(&str_ptr, "CHANNEL=", PTYPE_INTDEC, &ap_cfg->channel, 5);
+
+ get_parmeter_from_string(&str_ptr, "PREAMBLE=", PTYPE_INTDEC, &ap_cfg->preamble, 5);
+
+ get_parmeter_from_string(&str_ptr, "MAX_SCB=", PTYPE_INTDEC, &ap_cfg->max_scb, 5);
+
+ get_parmeter_from_string(&str_ptr, "HIDDEN=", PTYPE_INTDEC, &ap_cfg->closednet, 5);
+
+ get_parmeter_from_string(&str_ptr, "COUNTRY=", PTYPE_STRING, &ap_cfg->country_code, 3);
+
+ return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+static int iwpriv_set_ap_config(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int res = 0;
+ char *extra = NULL;
+ struct ap_profile *ap_cfg = &my_ap;
+
+ WL_TRACE(("%s: info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n",
+ __FUNCTION__,
+ info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (wrqu->data.length != 0) {
+
+ char *str_ptr;
+
+ if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ extra[wrqu->data.length] = 0;
+ WL_SOFTAP((" Got str param in iw_point:\n %s\n", extra));
+
+ memset(ap_cfg, 0, sizeof(struct ap_profile));
+
+ str_ptr = extra;
+
+ if ((res = init_ap_profile_from_string(extra, ap_cfg)) < 0) {
+ WL_ERROR(("%s failed to parse %d\n", __FUNCTION__, res));
+ kfree(extra);
+ return -1;
+ }
+
+ } else {
+ WL_ERROR(("IWPRIV argument len = 0 \n"));
+ return -1;
+ }
+
+ if ((res = set_ap_cfg(dev, ap_cfg)) < 0)
+ WL_ERROR(("%s failed to set_ap_cfg %d\n", __FUNCTION__, res));
+
+ kfree(extra);
+
+ return res;
+}
+#endif
+
+
+#ifdef SOFTAP
+static int iwpriv_get_assoc_list(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *p_iwrq,
+ char *extra)
+{
+ int i, ret = 0;
+ char mac_buf[256];
+ struct maclist *sta_maclist = (struct maclist *)mac_buf;
+
+ char mac_lst[384];
+ char *p_mac_str;
+ char *p_mac_str_end;
+
+ if ((!dev) || (!extra)) {
+ return -EINVAL;
+ }
+
+ net_os_wake_lock(dev);
+
+ WL_TRACE(("\n %s: IWPRIV IOCTL: cmd:%hx, flags:%hx, extra:%p, iwp.len:%d, \
+ iwp.len:%p, iwp.flags:%x \n", __FUNCTION__, info->cmd, info->flags, \
+ extra, p_iwrq->data.length, p_iwrq->data.pointer, p_iwrq->data.flags));
+
+ memset(sta_maclist, 0, sizeof(mac_buf));
+
+ sta_maclist->count = 8;
+
+ WL_SOFTAP(("%s: net device:%s, buf_sz:%d\n",
+ __FUNCTION__, dev->name, sizeof(mac_buf)));
+
+ if ((ret = get_assoc_sta_list(dev, mac_buf, sizeof(mac_buf))) < 0) {
+ WL_ERROR(("%s: sta list ioctl error:%d\n",
+ __FUNCTION__, ret));
+ goto func_exit;
+ }
+
+ WL_SOFTAP(("%s: got %d stations\n", __FUNCTION__,
+ sta_maclist->count));
+
+ memset(mac_lst, 0, sizeof(mac_lst));
+ p_mac_str = mac_lst;
+ p_mac_str_end = &mac_lst[sizeof(mac_lst)-1];
+
+ for (i = 0; i < 8; i++) {
+ struct ether_addr *id = &sta_maclist->ea[i];
+ if (!ETHER_ISNULLADDR(id->octet)) {
+ scb_val_t scb_val;
+ int rssi = 0;
+
+ bzero(&scb_val, sizeof(scb_val_t));
+
+ if ((p_mac_str_end - p_mac_str) <= 36) {
+ WL_ERROR(("%s: mac list buf is < 36 for item[%i] item\n",
+ __FUNCTION__, i));
+ break;
+ }
+
+ p_mac_str += snprintf(p_mac_str, MAX_WX_STRING,
+ "\nMac[%d]=%02X:%02X:%02X:%02X:%02X:%02X,", i,
+ id->octet[0], id->octet[1], id->octet[2],
+ id->octet[3], id->octet[4], id->octet[5]);
+
+ bcopy(id->octet, &scb_val.ea, 6);
+ ret = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+ if (ret < 0) {
+ snprintf(p_mac_str, MAX_WX_STRING, "RSSI:ERR");
+ WL_ERROR(("%s: RSSI ioctl error:%d\n",
+ __FUNCTION__, ret));
+ break;
+ }
+
+ rssi = dtoh32(scb_val.val);
+ p_mac_str += snprintf(p_mac_str, MAX_WX_STRING,
+ "RSSI:%d", rssi);
+ }
+ }
+
+ p_iwrq->data.length = strlen(mac_lst) + 1;
+
+ WL_SOFTAP(("%s: data to user:\n%s\n usr_ptr:%p\n", __FUNCTION__,
+ mac_lst, p_iwrq->data.pointer));
+
+ if (p_iwrq->data.length) {
+ bcopy(mac_lst, extra, p_iwrq->data.length);
+ }
+
+func_exit:
+ net_os_wake_unlock(dev);
+
+ WL_TRACE(("Exited %s \n", __FUNCTION__));
+ return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+#define MAC_FILT_MAX 8
+static int iwpriv_set_mac_filters(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int i, ret = -1;
+ char * extra = NULL;
+ int mac_cnt = 0;
+ int mac_mode = 0;
+ struct ether_addr *p_ea;
+ struct mac_list_set mflist_set;
+
+ WL_SOFTAP((">>> Got IWPRIV SET_MAC_FILTER IOCTL: info->cmd:%x, \
+ info->flags:%x, u.data:%p, u.len:%d\n",
+ info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (wrqu->data.length != 0) {
+
+ char *str_ptr;
+
+ if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ extra[wrqu->data.length] = 0;
+ WL_SOFTAP((" Got parameter string in iw_point:\n %s \n", extra));
+
+ memset(&mflist_set, 0, sizeof(mflist_set));
+
+ str_ptr = extra;
+
+ if (get_parmeter_from_string(&str_ptr, "MAC_MODE=",
+ PTYPE_INTDEC, &mac_mode, 4) != 0) {
+ WL_ERROR(("ERROR: 'MAC_MODE=' token is missing\n"));
+ goto exit_proc;
+ }
+
+ p_ea = &mflist_set.mac_list.ea[0];
+
+ if (get_parmeter_from_string(&str_ptr, "MAC_CNT=",
+ PTYPE_INTDEC, &mac_cnt, 4) != 0) {
+ WL_ERROR(("ERROR: 'MAC_CNT=' token param is missing \n"));
+ goto exit_proc;
+ }
+
+ if (mac_cnt > MAC_FILT_MAX) {
+ WL_ERROR(("ERROR: number of MAC filters > MAX\n"));
+ goto exit_proc;
+ }
+
+ for (i=0; i < mac_cnt; i++)
+ if (get_parmeter_from_string(&str_ptr, "MAC=",
+ PTYPE_STR_HEX, &p_ea[i], 12) != 0) {
+ WL_ERROR(("ERROR: MAC_filter[%d] is missing !\n", i));
+ goto exit_proc;
+ }
+
+ WL_SOFTAP(("MAC_MODE=:%d, MAC_CNT=%d, MACs:..\n", mac_mode, mac_cnt));
+ for (i = 0; i < mac_cnt; i++) {
+ WL_SOFTAP(("mac_filt[%d]:", i));
+ print_buf(&p_ea[i], 6, 0);
+ }
+
+ mflist_set.mode = mac_mode;
+ mflist_set.mac_list.count = mac_cnt;
+ set_ap_mac_list(dev, &mflist_set);
+
+ wrqu->data.pointer = NULL;
+ wrqu->data.length = 0;
+ ret = 0;
+
+ } else {
+ WL_ERROR(("IWPRIV argument len is 0\n"));
+ return -1;
+ }
+
+ exit_proc:
+ kfree(extra);
+ return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+static int iwpriv_set_ap_sta_disassoc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int res = 0;
+ char sta_mac[6] = {0, 0, 0, 0, 0, 0};
+ char cmd_buf[256];
+ char *str_ptr = cmd_buf;
+
+ WL_SOFTAP((">>%s called\n args: info->cmd:%x,"
+ " info->flags:%x, u.data.p:%p, u.data.len:%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (wrqu->data.length != 0) {
+
+ if (copy_from_user(cmd_buf, wrqu->data.pointer, wrqu->data.length)) {
+ return -EFAULT;
+ }
+
+ if (get_parmeter_from_string(&str_ptr,
+ "MAC=", PTYPE_STR_HEX, sta_mac, 12) == 0) {
+ res = wl_iw_softap_deassoc_stations(dev, sta_mac);
+ } else {
+ WL_ERROR(("ERROR: STA_MAC= token not found\n"));
+ }
+ }
+
+ return res;
+}
+#endif
+
+#endif
+
+
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+ __u16 cmd;
+ __u16 flags;
+};
+
+typedef int (*iw_handler)(struct net_device *dev,
+ struct iw_request_info *info,
+ void *wrqu,
+ char *extra);
+#endif
+
+static int
+wl_iw_config_commit(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ void *zwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+ struct sockaddr bssid;
+
+ WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+ return error;
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+ if (!ssid.SSID_len)
+ return 0;
+
+ bzero(&bssid, sizeof(struct sockaddr));
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+ WL_ERROR(("%s: WLC_REASSOC to %s failed \n", __FUNCTION__, ssid.SSID));
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_name(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ char *cwrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+ strcpy(cwrq, "IEEE 802.11-DS");
+
+ return 0;
+}
+
+static int
+wl_iw_set_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ int error, chan;
+ uint sf = 0;
+
+ WL_TRACE(("%s %s: SIOCSIWFREQ\n", __FUNCTION__, dev->name));
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_TRACE(("%s:>> not executed, 'SOFT_AP is active' \n", __FUNCTION__));
+ return 0;
+ }
+#endif
+
+
+ if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+ chan = fwrq->m;
+ }
+
+
+ else {
+
+ if (fwrq->e >= 6) {
+ fwrq->e -= 6;
+ while (fwrq->e--)
+ fwrq->m *= 10;
+ } else if (fwrq->e < 6) {
+ while (fwrq->e++ < 6)
+ fwrq->m /= 10;
+ }
+
+ if (fwrq->m > 4000 && fwrq->m < 5000)
+ sf = WF_CHAN_FACTOR_4_G;
+
+ chan = wf_mhz2channel(fwrq->m, sf);
+ }
+ chan = htod32(chan);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+ return error;
+
+ g_wl_iw_params.target_channel = chan;
+
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+
+ fwrq->m = dtoh32(ci.hw_channel);
+ fwrq->e = dtoh32(0);
+ return 0;
+}
+
+static int
+wl_iw_set_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int infra = 0, ap = 0, error = 0;
+
+ WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+ switch (*uwrq) {
+ case IW_MODE_MASTER:
+ infra = ap = 1;
+ break;
+ case IW_MODE_ADHOC:
+ case IW_MODE_AUTO:
+ break;
+ case IW_MODE_INFRA:
+ infra = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ infra = htod32(infra);
+ ap = htod32(ap);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+ return error;
+
+
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int error, infra = 0, ap = 0;
+
+ WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+ return error;
+
+ infra = dtoh32(infra);
+ ap = dtoh32(ap);
+ *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+ return 0;
+}
+
+static int
+wl_iw_get_range(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ struct iw_range *range = (struct iw_range *) extra;
+ wl_uint32_list_t *list;
+ wl_rateset_t rateset;
+ int8 *channels;
+ int error, i, k;
+ uint sf, ch;
+
+ int phytype;
+ int bw_cap = 0, sgi_tx = 0, nmode = 0;
+ channel_info_t ci;
+ uint8 nrate_list2copy = 0;
+ uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+ {14, 29, 43, 58, 87, 116, 130, 144},
+ {27, 54, 81, 108, 162, 216, 243, 270},
+ {30, 60, 90, 120, 180, 240, 270, 300}};
+
+ WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ channels = kmalloc((MAXCHANNEL+1)*4, GFP_KERNEL);
+ if (!channels) {
+ WL_ERROR(("Could not alloc channels\n"));
+ return -ENOMEM;
+ }
+ list = (wl_uint32_list_t *)channels;
+
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(range));
+
+ range->min_nwid = range->max_nwid = 0;
+
+ list->count = htod32(MAXCHANNEL);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, (MAXCHANNEL+1)*4))) {
+ kfree(channels);
+ return error;
+ }
+ for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+ range->freq[i].i = dtoh32(list->element[i]);
+
+ ch = dtoh32(list->element[i]);
+ if (ch <= CH_MAX_2G_CHANNEL)
+ sf = WF_CHAN_FACTOR_2_4_G;
+ else
+ sf = WF_CHAN_FACTOR_5_G;
+
+ range->freq[i].m = wf_channel2mhz(ch, sf);
+ range->freq[i].e = 6;
+ }
+ range->num_frequency = range->num_channels = i;
+
+ range->max_qual.qual = 5;
+
+ range->max_qual.level = 0x100 - 200;
+
+ range->max_qual.noise = 0x100 - 200;
+
+ range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+
+ range->avg_qual.qual = 3;
+
+ range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+
+ range->avg_qual.noise = 0x100 - 75;
+#endif
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) {
+ kfree(channels);
+ return error;
+ }
+ rateset.count = dtoh32(rateset.count);
+ range->num_bitrates = rateset.count;
+ for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+ range->bitrate[i] = (rateset.rates[i]& 0x7f) * 500000;
+ dev_wlc_intvar_get(dev, "nmode", &nmode);
+ dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype));
+
+ if (nmode == 1 && phytype == WLC_PHY_TYPE_SSN) {
+ dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap);
+ dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx);
+ dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t));
+ ci.hw_channel = dtoh32(ci.hw_channel);
+
+ if (bw_cap == 0 ||
+ (bw_cap == 2 && ci.hw_channel <= 14)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 0;
+ else
+ nrate_list2copy = 1;
+ }
+ if (bw_cap == 1 ||
+ (bw_cap == 2 && ci.hw_channel >= 36)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 2;
+ else
+ nrate_list2copy = 3;
+ }
+ range->num_bitrates += 8;
+ for (k = 0; i < range->num_bitrates; k++, i++) {
+
+ range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+ }
+ }
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) {
+ kfree(channels);
+ return error;
+ }
+ i = dtoh32(i);
+ if (i == WLC_PHY_TYPE_A)
+ range->throughput = 24000000;
+ else
+ range->throughput = 1500000;
+
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+ range->num_encoding_sizes = 4;
+ range->encoding_size[0] = WEP1_KEY_SIZE;
+ range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+ range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+ range->encoding_size[2] = 0;
+#endif
+ range->encoding_size[3] = AES_KEY_SIZE;
+
+ range->min_pmp = 0;
+ range->max_pmp = 0;
+ range->min_pmt = 0;
+ range->max_pmt = 0;
+ range->pmp_flags = 0;
+ range->pm_capa = 0;
+
+ range->num_txpower = 2;
+ range->txpower[0] = 1;
+ range->txpower[1] = 255;
+ range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 19;
+
+ range->retry_capa = IW_RETRY_LIMIT;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = 0;
+
+ range->min_retry = 1;
+ range->max_retry = 255;
+
+ range->min_r_time = 0;
+ range->max_r_time = 0;
+#endif
+
+#if WIRELESS_EXT > 17
+ range->enc_capa = IW_ENC_CAPA_WPA;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+#ifdef BCMWPA2
+ range->enc_capa |= IW_ENC_CAPA_WPA2;
+#endif
+
+ IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+#ifdef BCMWPA2
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+#endif
+#endif
+
+ kfree(channels);
+
+ return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ return 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ return 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ return 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ return 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ return 4;
+ else
+ return 5;
+}
+
+static int
+wl_iw_set_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ int i;
+
+ WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+ for (i = 0; i < iw->spy_num; i++)
+ memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+ memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+ return 0;
+}
+
+static int
+wl_iw_get_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ dwrq->length = iw->spy_num;
+ for (i = 0; i < iw->spy_num; i++) {
+ memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+ addr[i].sa_family = AF_UNIX;
+ memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+ iw->spy_qual[i].updated = 0;
+ }
+
+ return 0;
+}
+
+
+static int
+wl_iw_ch_to_chanspec(int ch, wl_join_params_t *join_params, int *join_params_size)
+{
+ chanspec_t chanspec = 0;
+
+ if (ch != 0) {
+
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
+
+ if (join_params->params.chanspec_list[0])
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
+
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ htodchanspec(join_params->params.chanspec_list[0]);
+
+ join_params->params.chanspec_num = htod32(join_params->params.chanspec_num);
+
+ WL_TRACE(("%s join_params->params.chanspec_list[0]= %X\n", \
+ __FUNCTION__, join_params->params.chanspec_list[0]));
+ }
+ return 1;
+}
+
+static int
+wl_iw_set_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ int error = -EINVAL;
+ wl_join_params_t join_params;
+ int join_params_size;
+
+ WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+ if (awrq->sa_family != ARPHRD_ETHER) {
+ WL_ERROR(("Invalid Header...sa_family\n"));
+ return -EINVAL;
+ }
+
+
+ if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+ scb_val_t scbval;
+
+ bzero(&scbval, sizeof(scb_val_t));
+
+ (void) dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ return 0;
+ }
+
+
+
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid);
+
+ memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
+ memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN);
+
+ WL_ASSOC(("%s target_channel=%d\n", __FUNCTION__, g_wl_iw_params.target_channel));
+ wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) {
+ WL_ERROR(("%s Invalid ioctl data=%d\n", __FUNCTION__, error));
+ return error;
+ }
+
+ if (g_ssid.SSID_len) {
+ WL_ASSOC(("%s: join SSID=%s BSSID="MACSTR" ch=%d\n", __FUNCTION__, \
+ g_ssid.SSID, MAC2STR((u8 *)awrq->sa_data), \
+ g_wl_iw_params.target_channel));
+ }
+
+
+ memset(&g_ssid, 0, sizeof(g_ssid));
+ return 0;
+}
+
+static int
+wl_iw_get_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+ awrq->sa_family = ARPHRD_ETHER;
+ memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+
+ (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ struct iw_mlme *mlme;
+ scb_val_t scbval;
+ int error = -EINVAL;
+
+ WL_TRACE(("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name));
+
+ mlme = (struct iw_mlme *)extra;
+ if (mlme == NULL) {
+ WL_ERROR(("Invalid ioctl data.\n"));
+ return error;
+ }
+
+ scbval.val = mlme->reason_code;
+ bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+ if (mlme->cmd == IW_MLME_DISASSOC) {
+ scbval.val = htod32(scbval.val);
+ error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ }
+ else if (mlme->cmd == IW_MLME_DEAUTH) {
+ scbval.val = htod32(scbval.val);
+ error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+ sizeof(scb_val_t));
+ }
+ else {
+ WL_ERROR(("Invalid ioctl data.\n"));
+ return error;
+ }
+
+ return error;
+}
+#endif
+
+#ifndef WL_IW_USE_ISCAN
+static int
+wl_iw_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int error, i;
+ uint buflen = dwrq->length;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ list = kmalloc(buflen, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ memset(list, 0, buflen);
+ list->buflen = htod32(buflen);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+ WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+ kfree(list);
+ return error;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s: list->version %d != WL_BSS_INFO_VERSION\n", \
+ __FUNCTION__, list->version));
+ kfree(list);
+ return -EINVAL;
+ }
+
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+
+ if ((dtoh32(bi->length) > buflen) ||
+ (((uintptr)bi + dtoh32(bi->length)) > ((uintptr)list + buflen))) {
+ WL_ERROR(("%s: Scan results out of bounds: %u\n",__FUNCTION__,dtoh32(bi->length)));
+ kfree(list);
+ return -E2BIG;
+ }
+
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+ qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif
+
+ dwrq->length++;
+ }
+
+ kfree(list);
+
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+
+ dwrq->flags = 1;
+ }
+ return 0;
+}
+#endif
+
+#ifdef WL_IW_USE_ISCAN
+static int
+wl_iw_iscan_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ iscan_buf_t * buf;
+ iscan_info_t *iscan = g_iscan;
+
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ WL_ERROR(("%s error\n", __FUNCTION__));
+ return 0;
+ }
+
+ buf = iscan->list_hdr;
+
+ while (buf) {
+ list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \
+ __FUNCTION__, list->version));
+ return -EINVAL;
+ }
+
+ bi = NULL;
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length))
+ : list->bss_info;
+
+ if ((dtoh32(bi->length) > WLC_IW_ISCAN_MAXLEN) ||
+ (((uintptr)bi + dtoh32(bi->length)) > ((uintptr)list + WLC_IW_ISCAN_MAXLEN))) {
+ WL_ERROR(("%s: Scan results out of bounds: %u\n",__FUNCTION__,dtoh32(bi->length)));
+ return -E2BIG;
+ }
+
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+ qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif
+
+ dwrq->length++;
+ }
+ buf = buf->next;
+ }
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+
+ dwrq->flags = 1;
+ }
+ return 0;
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+ int err = 0;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED)
+ params->passive_time = 30;
+#endif
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+ if (ssid && ssid->SSID_len)
+ memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+ return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+ int err = 0;
+
+ iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+ iscan->iscan_ex_params_p->action = htod16(action);
+ iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+ WL_SCAN(("%s : nprobes=%d\n", __FUNCTION__, iscan->iscan_ex_params_p->params.nprobes));
+ WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time));
+ WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time));
+ WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
+ WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
+ WL_SCAN(("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type));
+
+ if ((err = dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p, \
+ iscan->iscan_ex_param_size, iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) {
+ WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err));
+ err = -1;
+ }
+
+ return err;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+ iscan_info_t *iscan = (iscan_info_t *)data;
+ if (iscan) {
+ iscan->timer_on = 0;
+ if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+ WL_SCAN(("timer trigger\n"));
+ up(&iscan->sysioc_sem);
+ }
+ }
+}
+static void wl_iw_set_event_mask(struct net_device *dev)
+{
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+ setbit(eventmask, WLC_E_SCAN_COMPLETE);
+ dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+ iscan_buf_t * buf;
+ iscan_buf_t * ptr;
+ wl_iscan_results_t * list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ uint32 status;
+ int res;
+
+ mutex_lock(&wl_cache_lock);
+ if (iscan->list_cur) {
+ buf = iscan->list_cur;
+ iscan->list_cur = buf->next;
+ }
+ else {
+ buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+ if (!buf) {
+ WL_ERROR(("%s can't alloc iscan_buf_t : going to abort currect iscan\n", \
+ __FUNCTION__));
+ mutex_unlock(&wl_cache_lock);
+ return WL_SCAN_RESULTS_NO_MEM;
+ }
+ buf->next = NULL;
+ if (!iscan->list_hdr)
+ iscan->list_hdr = buf;
+ else {
+ ptr = iscan->list_hdr;
+ while (ptr->next) {
+ ptr = ptr->next;
+ }
+ ptr->next = buf;
+ }
+ }
+ memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ res = dev_iw_iovar_getbuf(
+ iscan->dev,
+ "iscanresults",
+ &list,
+ WL_ISCAN_RESULTS_FIXED_SIZE,
+ buf->iscan_buf,
+ WLC_IW_ISCAN_MAXLEN);
+ if (res == 0) {
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ results->count = dtoh32(results->count);
+ WL_SCAN(("results->count = %d\n", results->count));
+
+ WL_SCAN(("results->buflen = %d\n", results->buflen));
+ status = dtoh32(list_buf->status);
+ } else {
+ WL_ERROR(("%s returns error %d\n", __FUNCTION__, res));
+ status = WL_SCAN_RESULTS_NO_MEM;
+ }
+ mutex_unlock(&wl_cache_lock);
+ return status;
+}
+
+static void wl_iw_force_specific_scan(iscan_info_t *iscan)
+{
+ WL_SCAN(("%s force Specific SCAN for %s\n", __FUNCTION__, g_specific_ssid.SSID));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+ (void) dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+#ifndef SANDGATE2G
+ union iwreq_data wrqu;
+
+ memset(&wrqu, 0, sizeof(wrqu));
+
+ wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED)
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_READY;
+#endif
+ WL_SCAN(("Send Event ISCAN complete\n"));
+#endif
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+ uint32 status;
+ iscan_info_t *iscan = (iscan_info_t *)data;
+ static bool iscan_pass_abort = FALSE;
+
+ DAEMONIZE("iscan_sysioc");
+
+ status = WL_SCAN_RESULTS_PARTIAL;
+ while (down_interruptible(&iscan->sysioc_sem) == 0) {
+
+ net_os_wake_lock(iscan->dev);
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_SCAN(("%s skipping SCAN ops in AP mode !!!\n", __FUNCTION__));
+ net_os_wake_unlock(iscan->dev);
+ continue;
+ }
+#endif
+
+ if (iscan->timer_on) {
+ iscan->timer_on = 0;
+ del_timer_sync(&iscan->timer);
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+ status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ if (g_scan_specified_ssid && (iscan_pass_abort == TRUE)) {
+ WL_SCAN(("%s Get results from specific scan status=%d\n", __FUNCTION__, status));
+ wl_iw_send_scan_complete(iscan);
+ iscan_pass_abort = FALSE;
+ status = -1;
+ }
+
+ switch (status) {
+ case WL_SCAN_RESULTS_PARTIAL:
+ WL_SCAN(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+
+ wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_SUCCESS:
+ WL_SCAN(("iscanresults complete\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ wl_iw_send_scan_complete(iscan);
+ break;
+ case WL_SCAN_RESULTS_PENDING:
+ WL_SCAN(("iscanresults pending\n"));
+
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_ABORTED:
+ WL_SCAN(("iscanresults aborted\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ if (g_scan_specified_ssid == 0)
+ wl_iw_send_scan_complete(iscan);
+ else {
+ iscan_pass_abort = TRUE;
+ wl_iw_force_specific_scan(iscan);
+ }
+ break;
+ case WL_SCAN_RESULTS_NO_MEM:
+ WL_SCAN(("iscanresults can't alloc memory: skip\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ break;
+ default:
+ WL_SCAN(("iscanresults returned unknown status %d\n", status));
+ break;
+ }
+
+ net_os_wake_unlock(iscan->dev);
+ }
+
+ if (iscan->timer_on) {
+ iscan->timer_on = 0;
+ del_timer_sync(&iscan->timer);
+ }
+
+ complete_and_exit(&iscan->sysioc_exited, 0);
+}
+#endif
+
+#if !defined(CSCAN)
+
+static void
+wl_iw_set_ss_cache_timer_flag(void)
+{
+ g_ss_cache_ctrl.m_timer_expired = 1;
+ WL_TRACE(("%s called\n", __FUNCTION__));
+}
+
+static int
+wl_iw_init_ss_cache_ctrl(void)
+{
+ WL_TRACE(("%s :\n", __FUNCTION__));
+ g_ss_cache_ctrl.m_prev_scan_mode = 0;
+ g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+ g_ss_cache_ctrl.m_cache_head = NULL;
+ g_ss_cache_ctrl.m_link_down = 0;
+ g_ss_cache_ctrl.m_timer_expired = 0;
+ memset(g_ss_cache_ctrl.m_active_bssid, 0, ETHER_ADDR_LEN);
+
+ g_ss_cache_ctrl.m_timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
+ if (!g_ss_cache_ctrl.m_timer) {
+ return -ENOMEM;
+ }
+ g_ss_cache_ctrl.m_timer->function = (void *)wl_iw_set_ss_cache_timer_flag;
+ init_timer(g_ss_cache_ctrl.m_timer);
+
+ return 0;
+}
+
+
+
+static void
+wl_iw_free_ss_cache(void)
+{
+ wl_iw_ss_cache_t *node, *cur;
+ wl_iw_ss_cache_t **spec_scan_head;
+
+ WL_TRACE(("%s called\n", __FUNCTION__));
+
+ mutex_lock(&wl_cache_lock);
+ spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+ node = *spec_scan_head;
+
+ for (;node;) {
+ WL_TRACE(("%s : SSID - %s\n", __FUNCTION__, node->bss_info->SSID));
+ cur = node;
+ node = cur->next;
+ kfree(cur);
+ }
+ *spec_scan_head = NULL;
+ mutex_unlock(&wl_cache_lock);
+}
+
+
+
+static int
+wl_iw_run_ss_cache_timer(int kick_off)
+{
+ struct timer_list **timer;
+
+ timer = &g_ss_cache_ctrl.m_timer;
+
+ if (*timer) {
+ if (kick_off) {
+ (*timer)->expires = jiffies + 30000 * HZ / 1000;
+ add_timer(*timer);
+ WL_TRACE(("%s : timer starts \n", __FUNCTION__));
+ } else {
+ del_timer_sync(*timer);
+ WL_TRACE(("%s : timer stops \n", __FUNCTION__));
+ }
+ }
+
+ return 0;
+}
+
+
+void
+wl_iw_release_ss_cache_ctrl(void)
+{
+ WL_TRACE(("%s :\n", __FUNCTION__));
+ wl_iw_free_ss_cache();
+ wl_iw_run_ss_cache_timer(0);
+ if (g_ss_cache_ctrl.m_timer) {
+ kfree(g_ss_cache_ctrl.m_timer);
+ }
+}
+
+
+
+static void
+wl_iw_reset_ss_cache(void)
+{
+ wl_iw_ss_cache_t *node, *prev, *cur;
+ wl_iw_ss_cache_t **spec_scan_head;
+
+ mutex_lock(&wl_cache_lock);
+ spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+ node = *spec_scan_head;
+ prev = node;
+
+ for (;node;) {
+ WL_TRACE(("%s : node SSID %s \n", __FUNCTION__, node->bss_info->SSID));
+ if (!node->dirty) {
+ cur = node;
+ if (cur == *spec_scan_head) {
+ *spec_scan_head = cur->next;
+ prev = *spec_scan_head;
+ }
+ else {
+ prev->next = cur->next;
+ }
+ node = cur->next;
+
+ WL_TRACE(("%s : Del node : SSID %s\n", __FUNCTION__, cur->bss_info->SSID));
+ kfree(cur);
+ continue;
+ }
+
+ node->dirty = 0;
+ prev = node;
+ node = node->next;
+ }
+ mutex_unlock(&wl_cache_lock);
+}
+
+
+static int
+wl_iw_add_bss_to_ss_cache(wl_scan_results_t *ss_list)
+{
+
+ wl_iw_ss_cache_t *node, *prev, *leaf;
+ wl_iw_ss_cache_t **spec_scan_head;
+ wl_bss_info_t *bi = NULL;
+ int i;
+
+ if (!ss_list->count) {
+ return 0;
+ }
+
+ mutex_lock(&wl_cache_lock);
+ spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+
+ for (i = 0; i < ss_list->count; i++) {
+
+ node = *spec_scan_head;
+ prev = node;
+
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
+
+ WL_TRACE(("%s : find %d with specific SSID %s\n", __FUNCTION__, i, bi->SSID));
+ for (;node;) {
+ if (!memcmp(&node->bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
+
+ WL_TRACE(("dirty marked : SSID %s\n", bi->SSID));
+ node->dirty = 1;
+ break;
+ }
+ prev = node;
+ node = node->next;
+ }
+
+ if (node) {
+ continue;
+ }
+ leaf = kmalloc(bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN, GFP_KERNEL);
+ if (!leaf) {
+ WL_ERROR(("Memory alloc failure %d\n", \
+ bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN));
+ mutex_unlock(&wl_cache_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(leaf->bss_info, bi, bi->length);
+ leaf->next = NULL;
+ leaf->dirty = 1;
+ leaf->count = 1;
+ leaf->version = ss_list->version;
+
+ if (!prev) {
+ *spec_scan_head = leaf;
+ }
+ else {
+ prev->next = leaf;
+ }
+ }
+ mutex_unlock(&wl_cache_lock);
+ return 0;
+}
+
+
+static int
+wl_iw_merge_scan_cache(struct iw_request_info *info, char *extra, uint buflen_from_user,
+__u16 *merged_len)
+{
+ wl_iw_ss_cache_t *node;
+ wl_scan_results_t *list_merge;
+
+ mutex_lock(&wl_cache_lock);
+ node = g_ss_cache_ctrl.m_cache_head;
+ for (;node;) {
+ list_merge = (wl_scan_results_t *)&node->buflen;
+ WL_TRACE(("%s: Cached Specific APs list=%d\n", __FUNCTION__, list_merge->count));
+ if (buflen_from_user - *merged_len > 0) {
+ *merged_len += (__u16) wl_iw_get_scan_prep(list_merge, info,
+ extra + *merged_len, buflen_from_user - *merged_len);
+ }
+ else {
+ WL_TRACE(("%s: exit with break\n", __FUNCTION__));
+ break;
+ }
+ node = node->next;
+ }
+ mutex_unlock(&wl_cache_lock);
+ return 0;
+}
+
+
+static int
+wl_iw_delete_bss_from_ss_cache(void *addr)
+{
+
+ wl_iw_ss_cache_t *node, *prev;
+ wl_iw_ss_cache_t **spec_scan_head;
+
+ mutex_lock(&wl_cache_lock);
+ spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+ node = *spec_scan_head;
+ prev = node;
+ for (;node;) {
+ if (!memcmp(&node->bss_info->BSSID, addr, ETHER_ADDR_LEN)) {
+ if (node == *spec_scan_head) {
+ *spec_scan_head = node->next;
+ }
+ else {
+ prev->next = node->next;
+ }
+
+ WL_TRACE(("%s : Del node : %s\n", __FUNCTION__, node->bss_info->SSID));
+ kfree(node);
+ break;
+ }
+
+ prev = node;
+ node = node->next;
+ }
+
+ memset(addr, 0, ETHER_ADDR_LEN);
+ mutex_unlock(&wl_cache_lock);
+ return 0;
+}
+
+#endif
+
+
+static int
+wl_iw_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error;
+ WL_TRACE(("%s dev:%s: SIOCSIWSCAN : SCAN\n", __FUNCTION__, dev->name));
+
+#if defined(CSCAN)
+ WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__));
+ return -EINVAL;
+#endif
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+ return 0;
+ }
+#endif
+
+ if (g_onoff == G_WLAN_SET_OFF)
+ return 0;
+
+ memset(&g_specific_ssid, 0, sizeof(g_specific_ssid));
+#ifndef WL_IW_USE_ISCAN
+ g_scan_specified_ssid = 0;
+#endif
+
+#if WIRELESS_EXT > 17
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan != BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+ WL_ERROR(("%s Ignoring SC %s first BC is not done = %d\n", \
+ __FUNCTION__, req->essid, \
+ g_first_broadcast_scan));
+ return -EBUSY;
+ }
+#endif
+ if (g_scan_specified_ssid) {
+ WL_SCAN(("%s Specific SCAN is not done ignore scan for = %s \n", \
+ __FUNCTION__, req->essid));
+ return -EBUSY;
+ }
+ else {
+ g_specific_ssid.SSID_len = MIN(sizeof(g_specific_ssid.SSID), \
+ req->essid_len);
+ memcpy(g_specific_ssid.SSID, req->essid, g_specific_ssid.SSID_len);
+ g_specific_ssid.SSID_len = htod32(g_specific_ssid.SSID_len);
+ g_scan_specified_ssid = 1;
+ WL_TRACE(("### Specific scan ssid=%s len=%d\n", \
+ g_specific_ssid.SSID, g_specific_ssid.SSID_len));
+ }
+ }
+ }
+#endif
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)))) {
+ WL_SCAN(("Set SCAN for %s failed with %d\n", g_specific_ssid.SSID, error));
+ g_scan_specified_ssid = 0;
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#ifdef WL_IW_USE_ISCAN
+int
+wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag)
+{
+ wlc_ssid_t ssid;
+ iscan_info_t *iscan = g_iscan;
+
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_IDLE) {
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_STARTED;
+ WL_SCAN(("%s: First Brodcast scan was forced\n", __FUNCTION__));
+ }
+ else if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) {
+ WL_SCAN(("%s: ignore ISCAN request first BS is not done yet\n", __FUNCTION__));
+ return 0;
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (flag)
+ rtnl_lock();
+#endif
+
+ dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &iscan->scan_flag, sizeof(iscan->scan_flag));
+ wl_iw_set_event_mask(dev);
+
+ WL_SCAN(("+++: Set Broadcast ISCAN\n"));
+
+ memset(&ssid, 0, sizeof(ssid));
+
+ iscan->list_cur = iscan->list_hdr;
+ iscan->iscan_state = ISCAN_STATE_SCANING;
+
+ memset(&iscan->iscan_ex_params_p->params, 0, iscan->iscan_ex_param_size);
+ wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, &ssid);
+ wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (flag)
+ rtnl_unlock();
+#endif
+
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+
+ iscan->timer_on = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ iscan_info_t *iscan = g_iscan;
+ int ret = 0;
+
+ WL_SCAN(("%s: SIOCSIWSCAN : ISCAN\n", dev->name));
+
+#if defined(CSCAN)
+ WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__));
+ return -EINVAL;
+#endif
+
+ net_os_wake_lock(dev);
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_SCAN(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+ goto set_scan_end;
+ }
+#endif
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_SCAN(("%s: driver is not up yet after START\n", __FUNCTION__));
+ goto set_scan_end;
+ }
+
+#ifdef PNO_SUPPORT
+ if (dhd_dev_get_pno_status(dev)) {
+ WL_SCAN(("%s: Scan called when PNO is active\n", __FUNCTION__));
+ }
+#endif
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ WL_ERROR(("%s error\n", __FUNCTION__));
+ goto set_scan_end;
+ }
+
+ if (g_scan_specified_ssid) {
+ WL_SCAN(("%s Specific SCAN already running ignoring BC scan\n", \
+ __FUNCTION__));
+ ret = EBUSY;
+ goto set_scan_end;
+ }
+
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ int as = 0;
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as));
+ wl_iw_set_event_mask(dev);
+ ret = wl_iw_set_scan(dev, info, wrqu, extra);
+ goto set_scan_end;
+ }
+ else {
+ g_scan_specified_ssid = 0;
+
+ if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+ WL_SCAN(("%s ISCAN already in progress \n", __FUNCTION__));
+ goto set_scan_end;
+ }
+ }
+ }
+#endif
+
+#if defined(CONFIG_FIRST_SCAN) && !defined(CSCAN)
+ if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+ if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) {
+
+ WL_ERROR(("%s Clean up First scan flag which is %d\n", \
+ __FUNCTION__, g_first_broadcast_scan));
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+ }
+ else {
+ WL_ERROR(("%s Ignoring Broadcast Scan:First Scan is not done yet %d\n", \
+ __FUNCTION__, g_first_counter_scans));
+ ret = -EBUSY;
+ goto set_scan_end;
+ }
+ }
+#endif
+
+ wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+
+set_scan_end:
+ net_os_wake_unlock(dev);
+ return ret;
+}
+#endif
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+ uint8 *ie = *wpaie;
+
+ if ((ie[1] >= 6) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+ return TRUE;
+ }
+
+ ie += ie[1] + 2;
+
+ *tlvs_len -= (int)(ie - *tlvs);
+
+ *tlvs = ie;
+ return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+ uint8 *ie = *wpsie;
+
+ if ((ie[1] >= 4) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+ return TRUE;
+ }
+
+ ie += ie[1] + 2;
+
+ *tlvs_len -= (int)(ie - *tlvs);
+
+ *tlvs = ie;
+ return FALSE;
+}
+#endif
+
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+ size_t len, int uppercase)
+{
+ size_t i;
+ char *pos = buf, *end = buf + buf_size;
+ int ret;
+ if (buf_size == 0)
+ return 0;
+ for (i = 0; i < len; i++) {
+ ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+ data[i]);
+ if (ret < 0 || ret >= end - pos) {
+ end[-1] = '\0';
+ return pos - buf;
+ }
+ pos += ret;
+ }
+ end[-1] = '\0';
+ return pos - buf;
+}
+
+
+int wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+ return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+ struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+ struct iw_event iwe;
+ char *event;
+ char *buf;
+ int custom_event_len;
+
+ event = *event_p;
+ if (bi->ie_length) {
+
+ bcm_tlv_t *ie;
+ uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ int ptr_len = bi->ie_length;
+
+#ifdef BCMWPA2
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+#endif
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+
+ if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+ if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+ WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__));
+#ifdef WAPI_IE_USE_GENIE
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else
+ iwe.cmd = IWEVCUSTOM;
+ custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+ iwe.u.data.length = custom_event_len;
+
+ buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+ if (buf == NULL)
+ {
+ WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+ break;
+ }
+
+ memcpy(buf, "wapi_ie=", 8);
+ wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+ wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+ wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+ kfree(buf);
+#endif
+ break;
+ }
+ *event_p = event;
+ }
+#endif
+
+ return 0;
+}
+
+#ifndef CSCAN
+static uint
+wl_iw_get_scan_prep(
+ wl_scan_results_t *list,
+ struct iw_request_info *info,
+ char *extra,
+ short max_size)
+{
+ int i, j;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value;
+ int ret = 0;
+ int channel;
+
+ if (!list) {
+ WL_ERROR(("%s: Null list pointer",__FUNCTION__));
+ return ret;
+ }
+
+ for (i = 0; i < list->count && i < IW_MAX_AP; i++)
+ {
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \
+ __FUNCTION__, list->version));
+ return ret;
+ }
+
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+
+ WL_TRACE(("%s : %s\n", __FUNCTION__, bi->SSID));
+
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+ iwe.cmd = SIOCGIWFREQ;
+ channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+ iwe.u.freq.m = wf_channel2mhz(channel,
+ channel <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+ iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+ if (bi->rateset.count) {
+ if (((event -extra) + IW_EV_LCP_LEN) <= (uintptr)end) {
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+ }
+
+ if ((ret = (event - extra)) < 0) {
+ WL_ERROR(("==> Wrong size\n"));
+ ret = 0;
+ }
+ WL_TRACE(("%s: size=%d bytes prepared \n", __FUNCTION__, (unsigned int)(event - extra)));
+ return (uint)ret;
+}
+
+static int
+wl_iw_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ wl_scan_results_t *list_merge;
+ wl_scan_results_t *list = (wl_scan_results_t *) g_scan;
+ int error;
+ uint buflen_from_user = dwrq->length;
+ uint len = G_SCAN_RESULTS;
+ __u16 len_ret = 0;
+#if !defined(CSCAN)
+ __u16 merged_len = 0;
+#endif
+#if defined(WL_IW_USE_ISCAN)
+ iscan_info_t *iscan = g_iscan;
+ iscan_buf_t * p_buf;
+#if !defined(CSCAN)
+ uint32 counter = 0;
+#endif
+#endif
+ WL_TRACE(("%s: buflen_from_user %d: \n", dev->name, buflen_from_user));
+
+ if (!extra) {
+ WL_TRACE(("%s: wl_iw_get_scan return -EINVAL\n", dev->name));
+ return -EINVAL;
+ }
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+ ci.scan_channel = dtoh32(ci.scan_channel);
+ if (ci.scan_channel)
+ return -EAGAIN;
+
+#if !defined(CSCAN)
+ if (g_ss_cache_ctrl.m_timer_expired) {
+ wl_iw_free_ss_cache();
+ g_ss_cache_ctrl.m_timer_expired ^= 1;
+ }
+ if ((!g_scan_specified_ssid && g_ss_cache_ctrl.m_prev_scan_mode) ||
+ g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) {
+ g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+
+ wl_iw_reset_ss_cache();
+ }
+ g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid;
+ if (g_scan_specified_ssid) {
+ g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+ }
+ else {
+ g_ss_cache_ctrl.m_cons_br_scan_cnt++;
+ }
+#endif
+
+ if (g_scan_specified_ssid) {
+
+ list = kmalloc(len, GFP_KERNEL);
+ if (!list) {
+ WL_TRACE(("%s: wl_iw_get_scan return -ENOMEM\n", dev->name));
+ g_scan_specified_ssid = 0;
+ return -ENOMEM;
+ }
+ }
+
+ memset(list, 0, len);
+ list->buflen = htod32(len);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len))) {
+ WL_ERROR(("%s: %s : Scan_results ERROR %d\n", dev->name, __FUNCTION__, error));
+ dwrq->length = len;
+ if (g_scan_specified_ssid) {
+ g_scan_specified_ssid = 0;
+ kfree(list);
+ }
+ return 0;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, list->version));
+ if (g_scan_specified_ssid) {
+ g_scan_specified_ssid = 0;
+ kfree(list);
+ }
+ return -EINVAL;
+ }
+
+#if !defined(CSCAN)
+ if (g_scan_specified_ssid) {
+
+ wl_iw_add_bss_to_ss_cache(list);
+ kfree(list);
+ }
+
+ mutex_lock(&wl_cache_lock);
+#if defined(WL_IW_USE_ISCAN)
+ if (g_scan_specified_ssid)
+ WL_TRACE(("%s: Specified scan APs from scan=%d\n", __FUNCTION__, list->count));
+ p_buf = iscan->list_hdr;
+
+ while (p_buf != iscan->list_cur) {
+ list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+ WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+ counter += list_merge->count;
+ if (list_merge->count > 0)
+ len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info,
+ extra+len_ret, buflen_from_user -len_ret);
+ p_buf = p_buf->next;
+ }
+ WL_TRACE(("%s merged with total Bcast APs=%d\n", __FUNCTION__, counter));
+#else
+ list_merge = (wl_scan_results_t *) g_scan;
+ len_ret = (__u16) wl_iw_get_scan_prep(list_merge, info, extra, buflen_from_user);
+#endif
+ mutex_unlock(&wl_cache_lock);
+ if (g_ss_cache_ctrl.m_link_down) {
+ wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid);
+ }
+
+ wl_iw_merge_scan_cache(info, extra+len_ret, buflen_from_user-len_ret, &merged_len);
+ len_ret += merged_len;
+ wl_iw_run_ss_cache_timer(0);
+ wl_iw_run_ss_cache_timer(1);
+#else
+
+ if (g_scan_specified_ssid) {
+ WL_TRACE(("%s: Specified scan APs in the list =%d\n", __FUNCTION__, list->count));
+ len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user);
+ kfree(list);
+
+#if defined(WL_IW_USE_ISCAN)
+ p_buf = iscan->list_hdr;
+
+ while (p_buf != iscan->list_cur) {
+ list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+ WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+ if (list_merge->count > 0)
+ len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info,
+ extra+len_ret, buflen_from_user -len_ret);
+ p_buf = p_buf->next;
+ }
+#else
+ list_merge = (wl_scan_results_t *) g_scan;
+ WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+ if (list_merge->count > 0)
+ len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, extra+len_ret,
+ buflen_from_user -len_ret);
+#endif
+ }
+ else {
+ list = (wl_scan_results_t *) g_scan;
+ len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user);
+ }
+#endif
+
+#if defined(WL_IW_USE_ISCAN)
+
+ g_scan_specified_ssid = 0;
+#endif
+
+ if ((len_ret + WE_ADD_EVENT_FIX) < buflen_from_user)
+ len = len_ret;
+
+ dwrq->length = len;
+ dwrq->flags = 0;
+
+ WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, list->count));
+ return 0;
+}
+#endif
+
+#if defined(WL_IW_USE_ISCAN)
+static int
+wl_iw_iscan_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ int ii, j;
+ int apcnt;
+ char *event = extra, *end = extra + dwrq->length, *value;
+ iscan_info_t *iscan = g_iscan;
+ iscan_buf_t * p_buf;
+ uint32 counter = 0;
+ uint8 channel;
+#if !defined(CSCAN)
+ __u16 merged_len = 0;
+ uint buflen_from_user = dwrq->length;
+#endif
+
+ WL_SCAN(("%s %s buflen_from_user %d:\n", dev->name, __FUNCTION__, dwrq->length));
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+ return -EINVAL;
+ }
+#endif
+
+ if (!extra) {
+ WL_TRACE(("%s: INVALID SIOCGIWSCAN GET bad parameter\n", dev->name));
+ return -EINVAL;
+ }
+
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_READY) {
+ WL_TRACE(("%s %s: first ISCAN results are NOT ready yet \n", \
+ dev->name, __FUNCTION__));
+ return -EAGAIN;
+ }
+#endif
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ WL_ERROR(("%ssysioc_pid\n", __FUNCTION__));
+ return -EAGAIN;
+ }
+
+#if !defined(CSCAN)
+ if (g_ss_cache_ctrl.m_timer_expired) {
+ wl_iw_free_ss_cache();
+ g_ss_cache_ctrl.m_timer_expired ^= 1;
+ }
+ if (g_scan_specified_ssid) {
+ return wl_iw_get_scan(dev, info, dwrq, extra);
+ }
+ else {
+ if (g_ss_cache_ctrl.m_link_down) {
+ wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid);
+ }
+ if (g_ss_cache_ctrl.m_prev_scan_mode || g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) {
+ g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+
+ wl_iw_reset_ss_cache();
+ }
+ g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid;
+ g_ss_cache_ctrl.m_cons_br_scan_cnt++;
+ }
+#endif
+
+ WL_TRACE(("%s: SIOCGIWSCAN GET broadcast results\n", dev->name));
+ apcnt = 0;
+ p_buf = iscan->list_hdr;
+
+ while (p_buf != iscan->list_cur) {
+ list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+ counter += list->count;
+
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, list->version));
+ return -EINVAL;
+ }
+
+ bi = NULL;
+ for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+
+ if ((dtoh32(bi->length) > WLC_IW_ISCAN_MAXLEN) ||
+ (((uintptr)bi + dtoh32(bi->length)) > ((uintptr)list + WLC_IW_ISCAN_MAXLEN))) {
+ WL_ERROR(("%s: Scan results out of bounds: %u\n",__FUNCTION__,dtoh32(bi->length)));
+ return -E2BIG;
+ }
+
+ if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+ IW_EV_QUAL_LEN >= end)
+ return -E2BIG;
+
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+ iwe.cmd = SIOCGIWFREQ;
+ channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+ iwe.u.freq.m = wf_channel2mhz(channel,
+ channel <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+ iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+ if (bi->rateset.count) {
+ if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+ return -E2BIG;
+
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+ p_buf = p_buf->next;
+ }
+
+ dwrq->length = event - extra;
+ dwrq->flags = 0;
+
+#if !defined(CSCAN)
+ wl_iw_merge_scan_cache(info, event, buflen_from_user - dwrq->length, &merged_len);
+ dwrq->length += merged_len;
+ wl_iw_run_ss_cache_timer(0);
+ wl_iw_run_ss_cache_timer(1);
+#endif /* CSCAN */
+#if defined(CONFIG_FIRST_SCAN)
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+#endif
+
+ WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, counter));
+
+ return 0;
+}
+#endif
+
+static int
+wl_iw_set_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ int error;
+ wl_join_params_t join_params;
+ int join_params_size;
+
+ WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+
+ memset(&g_ssid, 0, sizeof(g_ssid));
+
+ CHECK_EXTRA_FOR_NULL(extra);
+
+ if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+ g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length);
+#else
+ g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length-1);
+#endif
+ memcpy(g_ssid.SSID, extra, g_ssid.SSID_len);
+ } else {
+
+ g_ssid.SSID_len = 0;
+ }
+ g_ssid.SSID_len = htod32(g_ssid.SSID_len);
+
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid);
+
+ memcpy(&join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
+ memcpy(&join_params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+
+ wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) {
+ WL_ERROR(("Invalid ioctl data=%d\n", error));
+ return error;
+ }
+
+ if (g_ssid.SSID_len) {
+ WL_TRACE(("%s: join SSID=%s ch=%d\n", __FUNCTION__, \
+ g_ssid.SSID, g_wl_iw_params.target_channel));
+ }
+ return 0;
+}
+
+static int
+wl_iw_get_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+ WL_ERROR(("Error getting the SSID\n"));
+ return error;
+ }
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+ memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+ dwrq->length = ssid.SSID_len;
+
+ dwrq->flags = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+
+ WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if (dwrq->length > sizeof(iw->nickname))
+ return -E2BIG;
+
+ memcpy(iw->nickname, extra, dwrq->length);
+ iw->nickname[dwrq->length - 1] = '\0';
+
+ return 0;
+}
+
+static int
+wl_iw_get_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+
+ WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ strcpy(extra, iw->nickname);
+ dwrq->length = strlen(extra) + 1;
+
+ return 0;
+}
+
+static int wl_iw_set_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ wl_rateset_t rateset;
+ int error, rate, i, error_bg, error_a;
+
+ WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+ return error;
+
+ rateset.count = dtoh32(rateset.count);
+
+ if (vwrq->value < 0) {
+
+ rate = rateset.rates[rateset.count - 1] & 0x7f;
+ } else if (vwrq->value < rateset.count) {
+
+ rate = rateset.rates[vwrq->value] & 0x7f;
+ } else {
+
+ rate = vwrq->value / 500000;
+ }
+
+ if (vwrq->fixed) {
+
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+ error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+ } else {
+
+
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+
+ error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+
+
+ for (i = 0; i < rateset.count; i++)
+ if ((rateset.rates[i] & 0x7f) > rate)
+ break;
+ rateset.count = htod32(i);
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+ return error;
+ }
+
+ return 0;
+}
+
+static int wl_iw_get_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rate;
+
+ WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+ return error;
+ rate = dtoh32(rate);
+ vwrq->value = rate * 500000;
+
+ return 0;
+}
+
+static int
+wl_iw_set_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+ if (vwrq->disabled)
+ rts = DOT11_DEFAULT_RTS_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+ return -EINVAL;
+ else
+ rts = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+ return error;
+
+ vwrq->value = rts;
+ vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, frag;
+
+ WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+ if (vwrq->disabled)
+ frag = DOT11_DEFAULT_FRAG_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+ return -EINVAL;
+ else
+ frag = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, fragthreshold;
+
+ WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+ return error;
+
+ vwrq->value = fragthreshold;
+ vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable;
+ uint16 txpwrmw;
+ WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+
+ disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+ disable += WL_RADIO_SW_DISABLE << 16;
+
+ disable = htod32(disable);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+ return error;
+
+
+ if (disable & WL_RADIO_SW_DISABLE)
+ return 0;
+
+
+ if (!(vwrq->flags & IW_TXPOW_MWATT))
+ return -EINVAL;
+
+
+ if (vwrq->value < 0)
+ return 0;
+
+ if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+ else txpwrmw = (uint16)vwrq->value;
+
+
+ error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+ return error;
+}
+
+static int
+wl_iw_get_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable, txpwrdbm;
+ uint8 result;
+
+ WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+ (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+ return error;
+
+ disable = dtoh32(disable);
+ result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ vwrq->value = (int32)bcm_qdbm_to_mw(result);
+ vwrq->fixed = 0;
+ vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+ vwrq->flags = IW_TXPOW_MWATT;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+
+ if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+ return -EINVAL;
+
+
+ if (vwrq->flags & IW_RETRY_LIMIT) {
+
+
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+ !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+ if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif
+ lrl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+ return error;
+ }
+
+
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+ !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+ if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif
+ srl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+ return error;
+ }
+ }
+ return 0;
+}
+
+static int
+wl_iw_get_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+ vwrq->disabled = 0;
+
+
+ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+ return -EINVAL;
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+ return error;
+
+ lrl = dtoh32(lrl);
+ srl = dtoh32(srl);
+
+
+ if (vwrq->flags & IW_RETRY_MAX) {
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ vwrq->value = lrl;
+ } else {
+ vwrq->flags = IW_RETRY_LIMIT;
+ vwrq->value = srl;
+ if (srl != lrl)
+ vwrq->flags |= IW_RETRY_MIN;
+ }
+
+ return 0;
+}
+#endif
+
+static int
+wl_iw_set_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec;
+
+ WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+ memset(&key, 0, sizeof(key));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+
+ if (key.index == DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+ } else {
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ return -EINVAL;
+ }
+
+
+ if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ } else {
+ key.len = dwrq->length;
+
+ if (dwrq->length > sizeof(key.data))
+ return -EINVAL;
+
+ memcpy(key.data, extra, dwrq->length);
+
+ key.flags = WL_PRIMARY_KEY;
+ switch (key.len) {
+ case WEP1_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WEP128_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ case TKIP_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+#endif
+ case AES_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ swap_key_from_BE(&key);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+ return error;
+ }
+
+
+ val = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+ return error;
+
+ wsec &= ~(WEP_ENABLED);
+ wsec |= val;
+
+ if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+ return error;
+
+
+ val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+ val = htod32(val);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec, auth;
+
+ WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+
+ bzero(&key, sizeof(wl_wsec_key_t));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = key.index;
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+ } else
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+ return error;
+
+ swap_key_to_BE(&key);
+
+ wsec = dtoh32(wsec);
+ auth = dtoh32(auth);
+
+ dwrq->length = MIN(DOT11_MAX_KEY_SIZE, key.len);
+
+
+ dwrq->flags = key.index + 1;
+ if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+
+ dwrq->flags |= IW_ENCODE_DISABLED;
+ }
+ if (auth) {
+
+ dwrq->flags |= IW_ENCODE_RESTRICTED;
+ }
+
+
+ if (dwrq->length && extra)
+ memcpy(extra, key.data, dwrq->length);
+
+ return 0;
+}
+
+static int
+wl_iw_set_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+ pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+ pm = htod32(pm);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+ return error;
+
+ pm = dtoh32(pm);
+ vwrq->disabled = pm ? 0 : 1;
+ vwrq->flags = IW_POWER_ALL_R;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+ uchar buf[WLC_IOCTL_SMLEN] = {0};
+ uchar *p = buf;
+ int wapi_ie_size;
+
+ WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+ CHECK_EXTRA_FOR_NULL(extra);
+
+ if (extra[0] == DOT11_MNG_WAPI_ID)
+ {
+ wapi_ie_size = iwp->length;
+ memcpy(p, extra, iwp->length);
+ dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+ }
+ else
+ dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+ return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+ iwp->length = 64;
+ dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+ return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error;
+ struct iw_encode_ext *iwe;
+
+ WL_WSEC(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+ CHECK_EXTRA_FOR_NULL(extra);
+
+ memset(&key, 0, sizeof(key));
+ iwe = (struct iw_encode_ext *)extra;
+
+
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+ }
+
+
+ key.index = 0;
+ if (dwrq->flags & IW_ENCODE_INDEX)
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ key.len = iwe->key_len;
+
+
+ if (!ETHER_ISMULTI(iwe->addr.sa_data))
+ bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+
+ if (key.len == 0) {
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+
+ key.index = htod32(key.index);
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+ &key.index, sizeof(key.index));
+ if (error)
+ return error;
+ }
+
+ else {
+ swap_key_from_BE(&key);
+ dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ }
+ }
+ else {
+ if (iwe->key_len > sizeof(key.data))
+ return -EINVAL;
+
+ WL_WSEC(("Setting the key index %d\n", key.index));
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("key is a Primary Key\n"));
+ key.flags = WL_PRIMARY_KEY;
+ }
+
+ bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+ if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+ uint8 keybuf[8];
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+
+
+ if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ uchar *ivptr;
+ ivptr = (uchar *)iwe->rx_seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = TRUE;
+ }
+
+ switch (iwe->alg) {
+ case IW_ENCODE_ALG_NONE:
+ key.algo = CRYPTO_ALGO_OFF;
+ break;
+ case IW_ENCODE_ALG_WEP:
+ if (iwe->key_len == WEP1_KEY_SIZE)
+ key.algo = CRYPTO_ALGO_WEP1;
+ else
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ case IW_ENCODE_ALG_SM4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ key.flags &= ~WL_PRIMARY_KEY;
+ }
+ break;
+ default:
+ break;
+ }
+ swap_key_from_BE(&key);
+
+ dhd_wait_pend8021x(dev);
+
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+#ifdef BCMWPA2
+struct {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+
+static int
+wl_iw_set_pmksa(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ struct iw_pmksa *iwpmksa;
+ uint i;
+ int ret = 0;
+ char eabuf[ETHER_ADDR_STR_LEN];
+
+ WL_WSEC(("%s: SIOCSIWPMKSA\n", dev->name));
+ CHECK_EXTRA_FOR_NULL(extra);
+
+ iwpmksa = (struct iw_pmksa *)extra;
+ bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+
+ if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+ WL_WSEC(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+ bzero((char *)&pmkid_list, sizeof(pmkid_list));
+ }
+
+ else if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+ {
+ pmkid_list_t pmkid, *pmkidptr;
+ uint j;
+ pmkidptr = &pmkid;
+
+ bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+
+ WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_WSEC(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+ WL_WSEC(("\n"));
+ }
+
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+ if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_list.pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+
+ if ((pmkid_list.pmkids.npmkid > 0) && (i < pmkid_list.pmkids.npmkid)) {
+ bzero(&pmkid_list.pmkids.pmkid[i], sizeof(pmkid_t));
+ for (; i < (pmkid_list.pmkids.npmkid - 1); i++) {
+ bcopy(&pmkid_list.pmkids.pmkid[i+1].BSSID,
+ &pmkid_list.pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&pmkid_list.pmkids.pmkid[i+1].PMKID,
+ &pmkid_list.pmkids.pmkid[i].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ pmkid_list.pmkids.npmkid--;
+ }
+ else
+ ret = -EINVAL;
+ }
+
+ else if (iwpmksa->cmd == IW_PMKSA_ADD) {
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+ if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_list.pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+ if (i < MAXPMKID) {
+ bcopy(&iwpmksa->bssid.sa_data[0],
+ &pmkid_list.pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkid_list.pmkids.pmkid[i].PMKID,
+ WPA2_PMKID_LEN);
+ if (i == pmkid_list.pmkids.npmkid)
+ pmkid_list.pmkids.npmkid++;
+ }
+ else
+ ret = -EINVAL;
+
+ {
+ uint j;
+ uint k;
+ k = pmkid_list.pmkids.npmkid;
+ WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkid_list.pmkids.pmkid[k].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[k].PMKID[j]));
+ WL_WSEC(("\n"));
+ }
+ }
+ WL_WSEC(("PRINTING pmkid LIST - No of elements %d, ret = %d\n", pmkid_list.pmkids.npmkid, ret));
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+ uint j;
+ WL_WSEC(("PMKID[%d]: %s = ", i,
+ bcm_ether_ntoa(&pmkid_list.pmkids.pmkid[i].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j]));
+ WL_WSEC(("\n"));
+ }
+ WL_WSEC(("\n"));
+
+ if (!ret)
+ ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
+ return ret;
+}
+#endif
+#endif
+
+static int
+wl_iw_get_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ WL_WSEC(("%s: SIOCGIWENCODEEXT\n", dev->name));
+ return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error = 0;
+ int paramid;
+ int paramval;
+ int val = 0;
+ wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+
+ WL_WSEC(("%s: SIOCSIWAUTH\n", dev->name));
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+ return 0;
+ }
+#endif
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+ paramval = vwrq->value;
+
+ WL_WSEC(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+ dev->name, paramid, paramval));
+
+ switch (paramid) {
+ case IW_AUTH_WPA_VERSION:
+
+ if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+ val = WPA_AUTH_DISABLED;
+ else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+ val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+#ifdef BCMWPA2
+ else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+ val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+#endif
+ else if (paramval & IW_AUTH_WAPI_VERSION_1)
+ val = WPA_AUTH_WAPI;
+ WL_WSEC(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+ return error;
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP:
+
+
+ if (paramval & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+ val = WEP_ENABLED;
+ if (paramval & IW_AUTH_CIPHER_TKIP)
+ val = TKIP_ENABLED;
+ if (paramval & IW_AUTH_CIPHER_CCMP)
+ val = AES_ENABLED;
+ if (paramval & IW_AUTH_CIPHER_SMS4)
+ val = SMS4_ENABLED;
+
+ if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+ iw->pwsec = val;
+ val |= iw->gwsec;
+ }
+ else {
+ iw->gwsec = val;
+ val |= iw->pwsec;
+ }
+
+ if (iw->privacy_invoked && !val) {
+ WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+ "we're a WPS enrollee\n", dev->name, __FUNCTION__));
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+ WL_ERROR(("Failed to set iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else if (val) {
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_ERROR(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ }
+
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+ WL_ERROR(("Failed to set 'wsec'iovar\n"));
+ return error;
+ }
+
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) {
+ WL_ERROR(("Failed to get 'wpa_auth'iovar\n"));
+ return error;
+ }
+
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ if (paramval & IW_AUTH_KEY_MGMT_PSK)
+ val = WPA_AUTH_PSK;
+ else
+ val = WPA_AUTH_UNSPECIFIED;
+ }
+#ifdef BCMWPA2
+ else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ if (paramval & IW_AUTH_KEY_MGMT_PSK)
+ val = WPA2_AUTH_PSK;
+ else
+ val = WPA2_AUTH_UNSPECIFIED;
+ }
+#endif
+ if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+ val = WPA_AUTH_WAPI;
+ WL_WSEC(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) {
+ WL_ERROR(("Failed to set 'wpa_auth'iovar\n"));
+ return error;
+ }
+
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ if ((error = dev_wlc_bufvar_set(dev, "tkip_countermeasures", \
+ (char *)&paramval, sizeof(paramval))))
+ WL_WSEC(("%s: tkip_countermeasures failed %d\n", __FUNCTION__, error));
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ WL_WSEC(("Setting the D11auth %d\n", paramval));
+ if (paramval == IW_AUTH_ALG_OPEN_SYSTEM)
+ val = 0;
+ else if (paramval == IW_AUTH_ALG_SHARED_KEY)
+ val = 1;
+ else if (paramval == (IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY))
+ val = 2;
+ else
+ error = 1;
+ if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ if (paramval == 0) {
+ iw->pwsec = 0;
+ iw->gwsec = 0;
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) {
+ WL_ERROR(("Failed to get 'wsec'iovar\n"));
+ return error;
+ }
+ if (val & (TKIP_ENABLED | AES_ENABLED)) {
+ val &= ~(TKIP_ENABLED | AES_ENABLED);
+ dev_wlc_intvar_set(dev, "wsec", val);
+ }
+ val = 0;
+
+ WL_INFORM(("%s: %d: setting wpa_auth to %d\n",
+ __FUNCTION__, __LINE__, val));
+ error = dev_wlc_intvar_set(dev, "wpa_auth", 0);
+ if (error)
+ WL_ERROR(("Failed to set 'wpa_auth'iovar\n"));
+ return error;
+ }
+
+
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ error = dev_wlc_bufvar_set(dev, "wsec_restrict", \
+ (char *)&paramval, sizeof(paramval));
+ if (error)
+ WL_ERROR(("%s: wsec_restrict %d\n", __FUNCTION__, error));
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ error = dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", \
+ (char *)&paramval, sizeof(paramval));
+ if (error)
+ WL_WSEC(("%s: rx_unencrypted_eapol %d\n", __FUNCTION__, error));
+ break;
+
+#if WIRELESS_EXT > 17
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_INFORM(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+
+ break;
+ case IW_AUTH_PRIVACY_INVOKED: {
+ int wsec;
+
+ if (paramval == 0) {
+ iw->privacy_invoked = FALSE;
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else {
+ iw->privacy_invoked = TRUE;
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+ return error;
+
+ if (!(IW_WSEC_ENABLED(wsec))) {
+
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+ WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else {
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ }
+ }
+ break;
+ }
+#endif
+ case IW_AUTH_WAPI_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+ return error;
+ if (paramval) {
+ val |= SMS4_ENABLED;
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+ WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n",
+ __FUNCTION__, val, error));
+ return error;
+ }
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WPA_AUTH_WAPI))) {
+ WL_ERROR(("%s: setting wpa_auth(WPA_AUTH_WAPI) returned %d\n",
+ __FUNCTION__, error));
+ return error;
+ }
+ }
+
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+#ifdef BCMWPA2
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+#else
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK))
+#endif
+
+static int
+wl_iw_get_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error;
+ int paramid;
+ int paramval = 0;
+ int val;
+ wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+
+ WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+
+ switch (paramid) {
+ case IW_AUTH_WPA_VERSION:
+
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+ paramval = IW_AUTH_WPA_VERSION_DISABLED;
+ else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+ paramval = IW_AUTH_WPA_VERSION_WPA;
+#ifdef BCMWPA2
+ else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+ paramval = IW_AUTH_WPA_VERSION_WPA2;
+#endif
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP:
+ if (paramid == IW_AUTH_CIPHER_PAIRWISE)
+ val = iw->pwsec;
+ else
+ val = iw->gwsec;
+
+ paramval = 0;
+ if (val) {
+ if (val & WEP_ENABLED)
+ paramval |= (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104);
+ if (val & TKIP_ENABLED)
+ paramval |= (IW_AUTH_CIPHER_TKIP);
+ if (val & AES_ENABLED)
+ paramval |= (IW_AUTH_CIPHER_CCMP);
+ }
+ else
+ paramval = IW_AUTH_CIPHER_NONE;
+ break;
+ case IW_AUTH_KEY_MGMT:
+
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (VAL_PSK(val))
+ paramval = IW_AUTH_KEY_MGMT_PSK;
+ else
+ paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ error = dev_wlc_bufvar_get(dev, "tkip_countermeasures", \
+ (char *)&paramval, sizeof(paramval));
+ if (error)
+ WL_ERROR(("%s get tkip_countermeasures %d\n", __FUNCTION__, error));
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ error = dev_wlc_bufvar_get(dev, "wsec_restrict", \
+ (char *)&paramval, sizeof(paramval));
+ if (error)
+ WL_ERROR(("%s get wsec_restrict %d\n", __FUNCTION__, error));
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ error = dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", \
+ (char *)&paramval, sizeof(paramval));
+ if (error)
+ WL_ERROR(("%s get rx_unencrypted_eapol %d\n", __FUNCTION__, error));
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+ return error;
+ if (!val)
+ paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+ else
+ paramval = IW_AUTH_ALG_SHARED_KEY;
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val)
+ paramval = TRUE;
+ else
+ paramval = FALSE;
+ break;
+#if WIRELESS_EXT > 17
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
+ paramval = iw->privacy_invoked;
+ break;
+#endif
+ }
+ vwrq->value = paramval;
+ return 0;
+}
+#endif
+
+
+#ifdef SOFTAP
+
+static int ap_macmode = MACLIST_MODE_DISABLED;
+static struct mflist ap_black_list;
+static int
+wl_iw_parse_wep(char *keystr, wl_wsec_key_t *key)
+{
+ char hex[] = "XX";
+ unsigned char *data = key->data;
+
+ switch (strlen(keystr)) {
+ case 5:
+ case 13:
+ case 16:
+ key->len = strlen(keystr);
+ memcpy(data, keystr, key->len + 1);
+ break;
+ case 12:
+ case 28:
+ case 34:
+ case 66:
+ if (!strnicmp(keystr, "0x", 2))
+ keystr += 2;
+ else
+ return -1;
+ case 10:
+ case 26:
+ case 32:
+ case 64:
+ key->len = strlen(keystr) / 2;
+ while (*keystr) {
+ strncpy(hex, keystr, 2);
+ *data++ = (char) bcm_strtoul(hex, NULL, 16);
+ keystr += 2;
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ switch (key->len) {
+ case 5:
+ key->algo = CRYPTO_ALGO_WEP1;
+ break;
+ case 13:
+ key->algo = CRYPTO_ALGO_WEP128;
+ break;
+ case 16:
+ key->algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ case 32:
+ key->algo = CRYPTO_ALGO_TKIP;
+ break;
+ default:
+ return -1;
+ }
+
+ key->flags |= WL_PRIMARY_KEY;
+
+ return 0;
+}
+
+#ifdef EXT_WPA_CRYPTO
+#define SHA1HashSize 20
+extern void pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len,
+ int iterations, u8 *buf, size_t buflen);
+
+#else
+
+#define SHA1HashSize 20
+int pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len,
+ int iterations, u8 *buf, size_t buflen)
+{
+ WL_ERROR(("WARNING: %s is not implemented !!!\n", __FUNCTION__));
+ return -1;
+}
+
+#endif
+
+
+int dev_iw_write_cfg1_bss_var(struct net_device *dev, int val)
+{
+ struct {
+ int cfg;
+ int val;
+ } bss_setbuf;
+
+ int bss_set_res;
+ char smbuf[WLC_IOCTL_SMLEN];
+ memset(smbuf, 0, sizeof(smbuf));
+
+ bss_setbuf.cfg = 1;
+ bss_setbuf.val = val;
+
+ bss_set_res = dev_iw_iovar_setbuf(dev, "bss",
+ &bss_setbuf, sizeof(bss_setbuf), smbuf, sizeof(smbuf));
+ WL_TRACE(("%s: bss_set_result:%d set with %d\n", __FUNCTION__, bss_set_res, val));
+
+ return bss_set_res;
+}
+
+
+int dev_iw_read_cfg1_bss_var(struct net_device *dev, int *val)
+{
+ int bsscfg_idx = 1;
+ int bss_set_res;
+ char smbuf[WLC_IOCTL_SMLEN];
+ memset(smbuf, 0, sizeof(smbuf));
+
+ bss_set_res = dev_iw_iovar_getbuf(dev, "bss", \
+ &bsscfg_idx, sizeof(bsscfg_idx), smbuf, sizeof(smbuf));
+ *val = *(int*)smbuf;
+ *val = dtoh32(*val);
+ WL_TRACE(("%s: status=%d bss_get_result=%d\n", __FUNCTION__, bss_set_res, *val));
+ return bss_set_res;
+}
+
+
+#ifndef AP_ONLY
+static int wl_bssiovar_mkbuf(
+ const char *iovar,
+ int bssidx,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen,
+ int *perr)
+{
+ const char *prefix = "bsscfg:";
+ int8 *p;
+ uint prefixlen;
+ uint namelen;
+ uint iolen;
+
+ prefixlen = strlen(prefix);
+ namelen = strlen(iovar) + 1;
+ iolen = prefixlen + namelen + sizeof(int) + paramlen;
+
+ if (buflen < 0 || iolen > (uint)buflen) {
+ *perr = BCME_BUFTOOSHORT;
+ return 0;
+ }
+
+ p = (int8 *)bufptr;
+
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+ memcpy(p, iovar, namelen);
+ p += namelen;
+
+ bssidx = htod32(bssidx);
+ memcpy(p, &bssidx, sizeof(int32));
+ p += sizeof(int32);
+
+ if (paramlen)
+ memcpy(p, param, paramlen);
+
+ *perr = 0;
+ return iolen;
+}
+#endif
+
+
+int get_user_params(char *user_params, struct iw_point *dwrq)
+{
+ int ret = 0;
+
+ if (copy_from_user(user_params, dwrq->pointer, dwrq->length)) {
+ WL_ERROR(("\n%s: no user params: uptr:%p, ulen:%d\n",
+ __FUNCTION__, dwrq->pointer, dwrq->length));
+ return -EFAULT;
+ }
+
+ WL_TRACE(("\n%s: iwpriv user params:%s\n", __FUNCTION__, user_params));
+
+ return ret;
+}
+
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+#if defined(CSCAN)
+
+static int
+wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, int nchan)
+{
+ int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16);
+ int err = 0;
+ char *p;
+ int i;
+ iscan_info_t *iscan = g_iscan;
+
+ WL_SCAN(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, nchan));
+
+ if ((!dev) && (!g_iscan) && (!iscan->iscan_ex_params_p)) {
+ WL_ERROR(("%s error exit\n", __FUNCTION__));
+ err = -1;
+ goto exit;
+ }
+
+#ifdef PNO_SUPPORT
+ if (dhd_dev_get_pno_status(dev)) {
+ WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__));
+ }
+#endif
+
+ params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+
+ if (nssid > 0) {
+ i = OFFSETOF(wl_scan_params_t, channel_list) + nchan * sizeof(uint16);
+ i = ROUNDUP(i, sizeof(uint32));
+ if (i + nssid * sizeof(wlc_ssid_t) > params_size) {
+ printf("additional ssids exceed params_size\n");
+ err = -1;
+ goto exit;
+ }
+
+ p = ((char*)&iscan->iscan_ex_params_p->params) + i;
+ memcpy(p, ssids_local, nssid * sizeof(wlc_ssid_t));
+ p += nssid * sizeof(wlc_ssid_t);
+ } else {
+ p = (char*)iscan->iscan_ex_params_p->params.channel_list + nchan * sizeof(uint16);
+ }
+
+ iscan->iscan_ex_params_p->params.channel_num = \
+ htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) | \
+ (nchan & WL_SCAN_PARAMS_COUNT_MASK));
+
+ nssid = \
+ (uint)((iscan->iscan_ex_params_p->params.channel_num >> WL_SCAN_PARAMS_NSSID_SHIFT) & \
+ WL_SCAN_PARAMS_COUNT_MASK);
+
+ params_size = (int) (p - (char*)iscan->iscan_ex_params_p + nssid * sizeof(wlc_ssid_t));
+ iscan->iscan_ex_param_size = params_size;
+
+ iscan->list_cur = iscan->list_hdr;
+ iscan->iscan_state = ISCAN_STATE_SCANING;
+ wl_iw_set_event_mask(dev);
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+
+ iscan->timer_on = 1;
+
+#ifdef SCAN_DUMP
+ {
+ int i;
+ WL_SCAN(("\n### List of SSIDs to scan ###\n"));
+ for (i = 0; i < nssid; i++) {
+ if (!ssids_local[i].SSID_len)
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ else
+ WL_SCAN(("%d: scan for %s size =%d\n", i, \
+ ssids_local[i].SSID, ssids_local[i].SSID_len));
+ }
+ WL_SCAN(("### List of channels to scan ###\n"));
+ for (i = 0; i < nchan; i++)
+ {
+ WL_SCAN(("%d ", iscan->iscan_ex_params_p->params.channel_list[i]));
+ }
+ WL_SCAN(("\nnprobes=%d\n", iscan->iscan_ex_params_p->params.nprobes));
+ WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time));
+ WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time));
+ WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
+ WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
+ WL_SCAN(("\n###################\n"));
+ }
+#endif
+
+ if (params_size > WLC_IOCTL_MEDLEN) {
+ WL_ERROR(("Set ISCAN for %s due to params_size=%d \n", \
+ __FUNCTION__, params_size));
+ err = -1;
+ }
+
+ if ((err = dev_iw_iovar_setbuf(dev, "iscan", iscan->iscan_ex_params_p, \
+ iscan->iscan_ex_param_size, \
+ iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) {
+ WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err));
+ err = -1;
+ }
+
+exit:
+
+ return err;
+}
+
+
+static int iwpriv_set_cscan(struct net_device *dev, struct iw_request_info *info, \
+ union iwreq_data *wrqu, char *ext)
+{
+ int res = 0;
+ char *extra = NULL;
+ iscan_info_t *iscan = g_iscan;
+ wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX];
+ int nssid = 0;
+ int nchan = 0;
+
+ WL_TRACE(("\%s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+ return -1;
+ }
+
+#ifdef PNO_SET_DEBUG
+ wl_iw_set_pno_set(dev, info, wrqu, extra);
+ return 0;
+#endif
+
+ if (wrqu->data.length != 0) {
+
+ char *str_ptr;
+
+ if (!iscan->iscan_ex_params_p) {
+ return -EFAULT;
+ }
+
+ if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ extra[wrqu->data.length] = 0;
+ WL_ERROR(("Got str param in iw_point:\n %s\n", extra));
+
+ str_ptr = extra;
+
+ if (strncmp(str_ptr, GET_SSID, strlen(GET_SSID))) {
+ WL_ERROR(("%s Error: extracting SSID='' string\n", __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr += strlen(GET_SSID);
+ nssid = wl_iw_parse_ssid_list(&str_ptr, ssids_local, nssid, \
+ WL_SCAN_PARAMS_SSID_MAX);
+ if (nssid == -1) {
+ WL_ERROR(("%s wrong ssid list", __FUNCTION__));
+ return -1;
+ }
+
+ if (iscan->iscan_ex_param_size > WLC_IOCTL_MAXLEN) {
+ WL_ERROR(("%s wrong ex_param_size %d", \
+ __FUNCTION__, iscan->iscan_ex_param_size));
+ return -1;
+ }
+ memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size);
+
+
+ wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL);
+ iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+ iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START);
+ iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+
+ if ((nchan = wl_iw_parse_channel_list(&str_ptr, \
+ &iscan->iscan_ex_params_p->params.channel_list[0], \
+ WL_NUMCHANNELS)) == -1) {
+ WL_ERROR(("%s missing channel list\n", __FUNCTION__));
+ return -1;
+ }
+
+
+ get_parmeter_from_string(&str_ptr, \
+ GET_NPROBE, PTYPE_INTDEC, \
+ &iscan->iscan_ex_params_p->params.nprobes, 2);
+
+ get_parmeter_from_string(&str_ptr, GET_ACTIVE_ASSOC_DWELL, PTYPE_INTDEC, \
+ &iscan->iscan_ex_params_p->params.active_time, 4);
+
+ get_parmeter_from_string(&str_ptr, GET_PASSIVE_ASSOC_DWELL, PTYPE_INTDEC, \
+ &iscan->iscan_ex_params_p->params.passive_time, 4);
+
+ get_parmeter_from_string(&str_ptr, GET_HOME_DWELL, PTYPE_INTDEC, \
+ &iscan->iscan_ex_params_p->params.home_time, 4);
+
+ get_parmeter_from_string(&str_ptr, GET_SCAN_TYPE, PTYPE_INTDEC, \
+ &iscan->iscan_ex_params_p->params.scan_type, 1);
+
+ res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan);
+
+ } else {
+ WL_ERROR(("IWPRIV argument len = 0 \n"));
+ return -1;
+ }
+
+exit_proc:
+
+ kfree(extra);
+
+ return res;
+}
+
+
+static int
+wl_iw_set_cscan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int res = -1;
+ iscan_info_t *iscan = g_iscan;
+ wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX];
+ int nssid = 0;
+ int nchan = 0;
+ cscan_tlv_t *cscan_tlv_temp;
+ char type;
+ char *str_ptr;
+ int tlv_size_left;
+#ifdef TLV_DEBUG
+ int i;
+ char tlv_in_example[] = { 'C', 'S', 'C', 'A', 'N', ' ', \
+ 0x53, 0x01, 0x00, 0x00,
+ 'S',
+ 0x00,
+ 'S',
+ 0x04,
+ 'B', 'R', 'C', 'M',
+ 'C',
+ 0x06,
+ 'P',
+ 0x94,
+ 0x11,
+ 'T',
+ 0x01
+ };
+#endif
+
+ WL_TRACE(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ net_os_wake_lock(dev);
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+
+ if (wrqu->data.length < (strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t))) {
+ WL_ERROR(("%s aggument=%d less %d\n", __FUNCTION__, \
+ wrqu->data.length, strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t)));
+ goto exit_proc;
+ }
+
+#ifdef TLV_DEBUG
+ memcpy(extra, tlv_in_example, sizeof(tlv_in_example));
+ wrqu->data.length = sizeof(tlv_in_example);
+ for (i = 0; i < wrqu->data.length; i++)
+ printf("%02X ", extra[i]);
+ printf("\n");
+#endif
+
+ str_ptr = extra;
+ str_ptr += strlen(CSCAN_COMMAND);
+ tlv_size_left = wrqu->data.length - strlen(CSCAN_COMMAND);
+
+ cscan_tlv_temp = (cscan_tlv_t *)str_ptr;
+ memset(ssids_local, 0, sizeof(ssids_local));
+
+ if ((cscan_tlv_temp->prefix == CSCAN_TLV_PREFIX) && \
+ (cscan_tlv_temp->version == CSCAN_TLV_VERSION) && \
+ (cscan_tlv_temp->subver == CSCAN_TLV_SUBVERSION))
+ {
+ str_ptr += sizeof(cscan_tlv_t);
+ tlv_size_left -= sizeof(cscan_tlv_t);
+
+
+ if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, \
+ WL_SCAN_PARAMS_SSID_MAX, &tlv_size_left)) <= 0) {
+ WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+ goto exit_proc;
+ }
+ else {
+
+ memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size);
+
+
+ wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL);
+ iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+ iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START);
+ iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+
+ while (tlv_size_left > 0)
+ {
+ type = str_ptr[0];
+ switch (type) {
+ case CSCAN_TLV_TYPE_CHANNEL_IE:
+
+ if ((nchan = wl_iw_parse_channel_list_tlv(&str_ptr, \
+ &iscan->iscan_ex_params_p->params.channel_list[0], \
+ WL_NUMCHANNELS, &tlv_size_left)) == -1) {
+ WL_ERROR(("%s missing channel list\n", \
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_NPROBE_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+ &iscan->iscan_ex_params_p->params.nprobes, \
+ sizeof(iscan->iscan_ex_params_p->params.nprobes), \
+ type, sizeof(char), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n", \
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_ACTIVE_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+ &iscan->iscan_ex_params_p->params.active_time, \
+ sizeof(iscan->iscan_ex_params_p->params.active_time), \
+ type, sizeof(short), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n", \
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_PASSIVE_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+ &iscan->iscan_ex_params_p->params.passive_time, \
+ sizeof(iscan->iscan_ex_params_p->params.passive_time), \
+ type, sizeof(short), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n", \
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_HOME_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+ &iscan->iscan_ex_params_p->params.home_time, \
+ sizeof(iscan->iscan_ex_params_p->params.home_time), \
+ type, sizeof(short), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n", \
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_STYPE_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+ &iscan->iscan_ex_params_p->params.scan_type, \
+ sizeof(iscan->iscan_ex_params_p->params.scan_type), \
+ type, sizeof(char), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n", \
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+
+ default :
+ WL_ERROR(("%s get unkwown type %X\n", \
+ __FUNCTION__, type));
+ goto exit_proc;
+ break;
+ }
+ }
+ }
+ }
+ else {
+ WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+ if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) {
+
+ WL_ERROR(("%s Clean up First scan flag which is %d\n", \
+ __FUNCTION__, g_first_broadcast_scan));
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+ }
+ else {
+ WL_ERROR(("%s Ignoring CSCAN : First Scan is not done yet %d\n", \
+ __FUNCTION__, g_first_counter_scans));
+ res = -EBUSY;
+ goto exit_proc;
+ }
+ }
+#endif
+
+ res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan);
+
+exit_proc:
+ net_os_wake_unlock(dev);
+ return res;
+}
+
+#endif
+
+#ifdef SOFTAP
+#ifndef AP_ONLY
+
+static int thr_wait_for_2nd_eth_dev(void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ wl_iw_t *iw;
+ int ret = 0;
+ unsigned long flags;
+
+ net_os_wake_lock(dev);
+
+ DAEMONIZE("wl0_eth_wthread");
+
+ WL_TRACE(("\n>%s thread started:, PID:%x\n", __FUNCTION__, current->pid));
+ iw = *(wl_iw_t **)netdev_priv(dev);
+ if (!iw) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ ret = -1;
+ goto fail;
+ }
+
+#ifndef BCMSDIOH_STD
+ if (down_timeout(&ap_eth_sema, msecs_to_jiffies(5000)) != 0) {
+ WL_ERROR(("\n%s: sap_eth_sema timeout \n", __FUNCTION__));
+ ret = -1;
+ goto fail;
+ }
+#endif
+
+ flags = dhd_os_spin_lock(iw->pub);
+ if (!ap_net_dev) {
+ WL_ERROR((" ap_net_dev is null !!!"));
+ ret = -1;
+ dhd_os_spin_unlock(iw->pub, flags);
+ goto fail;
+ }
+
+ WL_TRACE(("\n>%s: Thread:'softap ethdev IF:%s is detected !!!'\n\n",
+ __FUNCTION__, ap_net_dev->name));
+
+ ap_cfg_running = TRUE;
+
+ dhd_os_spin_unlock(iw->pub, flags);
+
+ bcm_mdelay(500);
+
+ wl_iw_send_priv_event(priv_dev, "AP_SET_CFG_OK");
+
+fail:
+ WL_TRACE(("\n>%s, thread completed\n", __FUNCTION__));
+
+ net_os_wake_unlock(dev);
+
+ complete_and_exit(&ap_cfg_exited, 0);
+ return ret;
+}
+#endif
+#ifndef AP_ONLY
+static int last_auto_channel = 6;
+#endif
+static int get_softap_auto_channel(struct net_device *dev, struct ap_profile *ap)
+{
+ int chosen = 0;
+ wl_uint32_list_t request;
+ int rescan = 0;
+ int retry = 0;
+ int updown = 0;
+ int ret = 0;
+ wlc_ssid_t null_ssid;
+ int res = 0;
+#ifndef AP_ONLY
+ int iolen = 0;
+ int mkvar_err = 0;
+ int bsscfg_index = 1;
+ char buf[WLC_IOCTL_SMLEN];
+#endif
+ WL_SOFTAP(("Enter %s\n", __FUNCTION__));
+
+#ifndef AP_ONLY
+ if (ap_cfg_running) {
+ ap->channel = last_auto_channel;
+ return res;
+ }
+#endif
+ memset(&null_ssid, 0, sizeof(wlc_ssid_t));
+ res |= dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown));
+#ifdef AP_ONLY
+ res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &null_ssid, sizeof(null_ssid));
+#else
+ iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&null_ssid), \
+ null_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err);
+ ASSERT(iolen);
+ res |= dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen);
+#endif
+ auto_channel_retry:
+ request.count = htod32(0);
+ ret = dev_wlc_ioctl(dev, WLC_START_CHANNEL_SEL, &request, sizeof(request));
+ if (ret < 0) {
+ WL_ERROR(("can't start auto channel scan\n"));
+ goto fail;
+ }
+
+ get_channel_retry:
+ bcm_mdelay(500);
+
+ ret = dev_wlc_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen));
+ if (ret < 0 || dtoh32(chosen) == 0) {
+ if (retry++ < 3)
+ goto get_channel_retry;
+ else {
+ WL_ERROR(("can't get auto channel sel, err = %d, \
+ chosen = %d\n", ret, chosen));
+ goto fail;
+ }
+ }
+ if ((chosen == 1) && (!rescan++))
+ goto auto_channel_retry;
+ WL_SOFTAP(("Set auto channel = %d\n", chosen));
+ ap->channel = chosen;
+ if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown))) < 0) {
+ WL_ERROR(("%s fail to set up err =%d\n", __FUNCTION__, res));
+ goto fail;
+ }
+#ifndef AP_ONLY
+ if (!res)
+ last_auto_channel = ap->channel;
+#endif
+
+fail :
+ return res;
+}
+
+
+static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap)
+{
+ int updown = 0;
+ int channel = 0;
+
+ wlc_ssid_t ap_ssid;
+ int max_assoc = 8;
+
+ int res = 0;
+ int apsta_var = 0;
+#ifndef AP_ONLY
+ int mpc = 0;
+ int iolen = 0;
+ int mkvar_err = 0;
+ int bsscfg_index = 1;
+ char buf[WLC_IOCTL_SMLEN];
+#endif
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ net_os_wake_lock(dev);
+
+ WL_SOFTAP(("wl_iw: set ap profile:\n"));
+ WL_SOFTAP((" ssid = '%s'\n", ap->ssid));
+ WL_SOFTAP((" security = '%s'\n", ap->sec));
+ if (ap->key[0] != '\0')
+ WL_SOFTAP((" key = '%s'\n", ap->key));
+ WL_SOFTAP((" channel = %d\n", ap->channel));
+ WL_SOFTAP((" max scb = %d\n", ap->max_scb));
+
+#ifdef AP_ONLY
+ if (ap_cfg_running) {
+ wl_iw_softap_deassoc_stations(dev, NULL);
+ ap_cfg_running = FALSE;
+ }
+#endif
+
+ if (ap_cfg_running == FALSE) {
+
+#ifndef AP_ONLY
+ sema_init(&ap_eth_sema, 0);
+
+ mpc = 0;
+ if ((res = dev_wlc_intvar_set(dev, "mpc", mpc))) {
+ WL_ERROR(("%s fail to set mpc\n", __FUNCTION__));
+ goto fail;
+ }
+#endif
+
+ updown = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown)))) {
+ WL_ERROR(("%s fail to set updown\n", __FUNCTION__));
+ goto fail;
+ }
+
+#ifdef AP_ONLY
+ apsta_var = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) {
+ WL_ERROR(("%s fail to set apsta_var 0\n", __FUNCTION__));
+ goto fail;
+ }
+ apsta_var = 1;
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) {
+ WL_ERROR(("%s fail to set apsta_var 1\n", __FUNCTION__));
+ goto fail;
+ }
+ res = dev_wlc_ioctl(dev, WLC_GET_AP, &apsta_var, sizeof(apsta_var));
+#else
+ apsta_var = 1;
+ iolen = wl_bssiovar_mkbuf("apsta",
+ bsscfg_index, &apsta_var, sizeof(apsta_var)+4,
+ buf, sizeof(buf), &mkvar_err);
+
+ if (iolen <= 0)
+ goto fail;
+
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) {
+ WL_ERROR(("%s fail to set apsta \n", __FUNCTION__));
+ goto fail;
+ }
+ WL_TRACE(("\n>in %s: apsta set result: %d \n", __FUNCTION__, res));
+#endif
+
+ updown = 1;
+ if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown))) < 0) {
+ WL_ERROR(("%s fail to set apsta \n", __FUNCTION__));
+ goto fail;
+ }
+
+ } else {
+
+ if (!ap_net_dev) {
+ WL_ERROR(("%s: ap_net_dev is null\n", __FUNCTION__));
+ goto fail;
+ }
+
+ res = wl_iw_softap_deassoc_stations(ap_net_dev, NULL);
+
+
+ if ((res = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) {
+ WL_ERROR(("%s fail to set bss down\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (strlen(ap->country_code)) {
+ WL_ERROR(("%s: Igonored: Country MUST be specified \
+ COUNTRY command with \n", __FUNCTION__));
+ } else {
+ WL_SOFTAP(("%s: Country code is not specified,"
+ " will use Radio's default\n",
+ __FUNCTION__));
+ }
+
+ iolen = wl_bssiovar_mkbuf("closednet",
+ bsscfg_index, &ap->closednet, sizeof(ap->closednet)+4,
+ buf, sizeof(buf), &mkvar_err);
+ ASSERT(iolen);
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) {
+ WL_ERROR(("%s failed to set 'closednet'for apsta \n", __FUNCTION__));
+ goto fail;
+ }
+
+
+ if ((ap->channel == 0) && (get_softap_auto_channel(dev, ap) < 0)) {
+ ap->channel = 1;
+ WL_ERROR(("%s auto channel failed, pick up channel=%d\n", \
+ __FUNCTION__, ap->channel));
+ }
+
+ channel = ap->channel;
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel)))) {
+ WL_ERROR(("%s fail to set channel\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (ap_cfg_running == FALSE) {
+ updown = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown)))) {
+ WL_ERROR(("%s fail to set up\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ max_assoc = ap->max_scb;
+ if ((res = dev_wlc_intvar_set(dev, "maxassoc", max_assoc))) {
+ WL_ERROR(("%s fail to set maxassoc\n", __FUNCTION__));
+ goto fail;
+ }
+
+ ap_ssid.SSID_len = strlen(ap->ssid);
+ strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len);
+
+#ifdef AP_ONLY
+ if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) {
+ WL_ERROR(("ERROR:%d in:%s, wl_iw_set_ap_security is skipped\n", \
+ res, __FUNCTION__));
+ goto fail;
+ }
+ wl_iw_send_priv_event(dev, "ASCII_CMD=AP_BSS_START");
+ ap_cfg_running = TRUE;
+#else
+ iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&ap_ssid),
+ ap_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err);
+ ASSERT(iolen);
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) != 0) {
+ WL_ERROR(("ERROR:%d in:%s, Security & BSS reconfiguration is skipped\n", \
+ res, __FUNCTION__));
+ goto fail;
+ }
+ if (ap_cfg_running == FALSE) {
+ init_completion(&ap_cfg_exited);
+ ap_cfg_pid = kernel_thread(thr_wait_for_2nd_eth_dev, dev, 0);
+ } else {
+ ap_cfg_pid = -1;
+ if (ap_net_dev == NULL) {
+ WL_ERROR(("%s ERROR: ap_net_dev is NULL !!!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ WL_ERROR(("%s: %s Configure security & restart AP bss \n", \
+ __FUNCTION__, ap_net_dev->name));
+
+ if ((res = wl_iw_set_ap_security(ap_net_dev, &my_ap)) < 0) {
+ WL_ERROR(("%s fail to set security : %d\n", __FUNCTION__, res));
+ goto fail;
+ }
+
+ if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0) {
+ WL_ERROR(("%s fail to set bss up\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+#endif
+fail:
+ WL_SOFTAP(("%s exit with %d\n", __FUNCTION__, res));
+
+ net_os_wake_unlock(dev);
+
+ return res;
+}
+
+
+static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap)
+{
+ int wsec = 0;
+ int wpa_auth = 0;
+ int res = 0;
+ int i;
+ char *ptr;
+#ifdef AP_ONLY
+ int mpc = 0;
+ wlc_ssid_t ap_ssid;
+#endif
+ wl_wsec_key_t key;
+
+ WL_SOFTAP(("\nsetting SOFTAP security mode:\n"));
+ WL_SOFTAP(("wl_iw: set ap profile:\n"));
+ WL_SOFTAP((" ssid = '%s'\n", ap->ssid));
+ WL_SOFTAP((" security = '%s'\n", ap->sec));
+ if (ap->key[0] != '\0') {
+ WL_SOFTAP((" key = '%s'\n", ap->key));
+ }
+ WL_SOFTAP((" channel = %d\n", ap->channel));
+ WL_SOFTAP((" max scb = %d\n", ap->max_scb));
+
+ if (strnicmp(ap->sec, "open", strlen("open")) == 0) {
+ wsec = 0;
+ res = dev_wlc_intvar_set(dev, "wsec", wsec);
+ wpa_auth = WPA_AUTH_DISABLED;
+ res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+ WL_SOFTAP(("=====================\n"));
+ WL_SOFTAP((" wsec & wpa_auth set 'OPEN', result:&d %d\n", res));
+ WL_SOFTAP(("=====================\n"));
+
+ } else if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) {
+
+ memset(&key, 0, sizeof(key));
+
+ wsec = WEP_ENABLED;
+ res = dev_wlc_intvar_set(dev, "wsec", wsec);
+
+ key.index = 0;
+ if (wl_iw_parse_wep(ap->key, &key)) {
+ WL_SOFTAP(("wep key parse err!\n"));
+ return -1;
+ }
+
+ key.index = htod32(key.index);
+ key.len = htod32(key.len);
+ key.algo = htod32(key.algo);
+ key.flags = htod32(key.flags);
+
+ res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+
+ wpa_auth = WPA_AUTH_DISABLED;
+ res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+ WL_SOFTAP(("=====================\n"));
+ WL_SOFTAP((" wsec & auth set 'WEP', result:&d %d\n", res));
+ WL_SOFTAP(("=====================\n"));
+
+ } else if (strnicmp(ap->sec, "wpa2-psk", strlen("wpa2-psk")) == 0) {
+ wsec_pmk_t psk;
+ size_t key_len;
+
+ wsec = AES_ENABLED;
+ dev_wlc_intvar_set(dev, "wsec", wsec);
+
+ key_len = strlen(ap->key);
+ if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) {
+ WL_SOFTAP(("passphrase must be between %d and %d characters long\n",
+ WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN));
+ return -1;
+ }
+
+ if (key_len < WSEC_MAX_PSK_LEN) {
+ unsigned char output[2*SHA1HashSize];
+ char key_str_buf[WSEC_MAX_PSK_LEN+1];
+
+ memset(output, 0, sizeof(output));
+ pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32);
+
+ ptr = key_str_buf;
+ for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) {
+ sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4], \
+ (uint)output[i*4+1], (uint)output[i*4+2], \
+ (uint)output[i*4+3]);
+ ptr += 8;
+ }
+ WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf));
+
+ psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN);
+ memcpy(psk.key, key_str_buf, psk.key_len);
+ } else {
+ psk.key_len = htod16((ushort) key_len);
+ memcpy(psk.key, ap->key, key_len);
+ }
+ psk.flags = htod16(WSEC_PASSPHRASE);
+ dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk));
+
+ wpa_auth = WPA2_AUTH_PSK;
+ dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+ } else if (strnicmp(ap->sec, "wpa-psk", strlen("wpa-psk")) == 0) {
+
+ wsec_pmk_t psk;
+ size_t key_len;
+
+ wsec = TKIP_ENABLED;
+ res = dev_wlc_intvar_set(dev, "wsec", wsec);
+
+ key_len = strlen(ap->key);
+ if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) {
+ WL_SOFTAP(("passphrase must be between %d and %d characters long\n",
+ WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN));
+ return -1;
+ }
+
+ if (key_len < WSEC_MAX_PSK_LEN) {
+ unsigned char output[2*SHA1HashSize];
+ char key_str_buf[WSEC_MAX_PSK_LEN+1];
+ bzero(output, 2*SHA1HashSize);
+
+ WL_SOFTAP(("%s: do passhash...\n", __FUNCTION__));
+
+ pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32);
+
+ ptr = key_str_buf;
+ for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) {
+ WL_SOFTAP(("[%02d]: %08x\n", i, *((unsigned int *)&output[i*4])));
+
+ sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4],
+ (uint)output[i*4+1], (uint)output[i*4+2],
+ (uint)output[i*4+3]);
+ ptr += 8;
+ }
+ WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf));
+
+ psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN);
+ memcpy(psk.key, key_str_buf, psk.key_len);
+ } else {
+ psk.key_len = htod16((ushort) key_len);
+ memcpy(psk.key, ap->key, key_len);
+ }
+
+ psk.flags = htod16(WSEC_PASSPHRASE);
+ res |= dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk));
+
+ wpa_auth = WPA_AUTH_PSK;
+ res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+ WL_SOFTAP((" wsec & auth set 'wpa-psk' (TKIP), result:&d %d\n", res));
+ }
+
+#ifdef AP_ONLY
+ ap_ssid.SSID_len = strlen(ap->ssid);
+ strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len);
+ res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &ap_ssid, sizeof(ap_ssid));
+ mpc = 0;
+ res |= dev_wlc_intvar_set(dev, "mpc", mpc);
+ if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) {
+ res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ }
+#endif
+ return res;
+}
+
+
+
+int get_parmeter_from_string(
+ char **str_ptr, const char *token,
+ int param_type, void *dst, int param_max_len)
+{
+ char int_str[7] = "0";
+ int parm_str_len;
+ char *param_str_begin;
+ char *param_str_end;
+
+ if ((*str_ptr) && !strncmp(*str_ptr, token, strlen(token))) {
+
+ strsep(str_ptr, "=,");
+ param_str_begin = *str_ptr;
+ strsep(str_ptr, "=,");
+
+ if (*str_ptr == NULL) {
+ parm_str_len = strlen(param_str_begin);
+ } else {
+ param_str_end = *str_ptr-1;
+ parm_str_len = param_str_end - param_str_begin;
+ }
+
+ WL_TRACE((" 'token:%s', len:%d, ", token, parm_str_len));
+
+ if (parm_str_len > param_max_len) {
+ WL_TRACE((" WARNING: extracted param len:%d is > MAX:%d\n",
+ parm_str_len, param_max_len));
+
+ parm_str_len = param_max_len;
+ }
+
+ switch (param_type) {
+
+ case PTYPE_INTDEC: {
+ int *pdst_int = dst;
+ char *eptr;
+
+ if (parm_str_len > sizeof(int_str))
+ parm_str_len = sizeof(int_str);
+
+ memcpy(int_str, param_str_begin, parm_str_len);
+
+ *pdst_int = simple_strtoul(int_str, &eptr, 10);
+
+ WL_TRACE((" written as integer:%d\n", *pdst_int));
+ }
+ break;
+ case PTYPE_STR_HEX: {
+ u8 *buf = dst;
+
+ param_max_len = param_max_len >> 1;
+ hstr_2_buf(param_str_begin, buf, param_max_len);
+ print_buf(buf, param_max_len, 0);
+ }
+ break;
+ default:
+ memcpy(dst, param_str_begin, parm_str_len);
+ *((char *)dst + parm_str_len) = 0;
+ WL_TRACE((" written as a string:%s\n", (char *)dst));
+ break;
+ }
+
+ return 0;
+ } else {
+ WL_ERROR(("\n %s: No token:%s in str:%s\n",
+ __FUNCTION__, token, *str_ptr));
+
+ return -1;
+ }
+}
+
+static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac)
+{
+ int i;
+ int res = 0;
+ char mac_buf[128] = {0};
+ char z_mac[6] = {0, 0, 0, 0, 0, 0};
+ char *sta_mac;
+ struct maclist *assoc_maclist = (struct maclist *) mac_buf;
+ bool deauth_all = false;
+
+ if (mac == NULL) {
+ deauth_all = true;
+ sta_mac = z_mac;
+ } else {
+ sta_mac = mac;
+ }
+
+ memset(assoc_maclist, 0, sizeof(mac_buf));
+ assoc_maclist->count = 8;
+
+ res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 128);
+ if (res != 0) {
+ WL_SOFTAP(("%s: Error:%d Couldn't get ASSOC List\n", __FUNCTION__, res));
+ return res;
+ }
+
+ if (assoc_maclist->count) {
+ for (i = 0; i < assoc_maclist->count; i++) {
+ scb_val_t scbval;
+
+ scbval.val = htod32(1);
+ bcopy(&assoc_maclist->ea[i], &scbval.ea, ETHER_ADDR_LEN);
+
+ if (deauth_all || (memcmp(&scbval.ea, sta_mac, ETHER_ADDR_LEN) == 0)) {
+ WL_SOFTAP(("%s, deauth STA:%d \n", __FUNCTION__, i));
+ res |= dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scb_val_t));
+ }
+ }
+ } else {
+ WL_SOFTAP((" STA ASSOC list is empty\n"));
+ }
+
+ if (res != 0) {
+ WL_ERROR(("%s: Error:%d\n", __FUNCTION__, res));
+ } else if (assoc_maclist->count) {
+ bcm_mdelay(200);
+ }
+ return res;
+}
+
+
+static int iwpriv_softap_stop(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int res = 0;
+
+ WL_SOFTAP(("got iwpriv AP_BSS_STOP\n"));
+
+ if ((!dev) && (!ap_net_dev)) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return res;
+ }
+
+ net_os_wake_lock(dev);
+
+ if ((ap_cfg_running == TRUE)) {
+#ifdef AP_ONLY
+ wl_iw_softap_deassoc_stations(dev, NULL);
+#else
+ wl_iw_softap_deassoc_stations(ap_net_dev, NULL);
+
+ if ((res = dev_iw_write_cfg1_bss_var(dev, 2)) < 0)
+ WL_ERROR(("%s failed to del BSS err = %d", __FUNCTION__, res));
+#endif
+
+ bcm_mdelay(100);
+
+ wrqu->data.length = 0;
+ ap_cfg_running = FALSE;
+ }
+ else
+ WL_ERROR(("%s: was called when SoftAP is OFF : move on\n", __FUNCTION__));
+
+ WL_SOFTAP(("%s Done with %d\n", __FUNCTION__, res));
+
+ net_os_wake_unlock(dev);
+
+ return res;
+}
+
+
+static int iwpriv_fw_reload(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int ret = -1;
+ char extra[256];
+ char *fwstr = fw_path;
+
+ WL_SOFTAP(("current firmware_path[]=%s\n", fwstr));
+
+ WL_TRACE((">Got FW_RELOAD cmd:"
+ "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d, \
+ fw_path:%p, len:%d \n",
+ info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length, fwstr, strlen(fwstr)));
+
+ if ((wrqu->data.length > 4) && (wrqu->data.length < sizeof(extra))) {
+
+ char *str_ptr;
+
+ if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+ ret = -EFAULT;
+ goto exit_proc;
+ }
+
+ extra[wrqu->data.length] = 8;
+ str_ptr = extra;
+
+ if (get_parmeter_from_string(&str_ptr, "FW_PATH=", PTYPE_STRING, fwstr, 255) != 0) {
+ WL_ERROR(("Error: extracting FW_PATH='' string\n"));
+ goto exit_proc;
+ }
+
+ if (strstr(fwstr, "apsta") != NULL) {
+ WL_SOFTAP(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ WL_SOFTAP(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
+ }
+
+ WL_SOFTAP(("SET firmware_path[]=%s , str_p:%p\n", fwstr, fwstr));
+ ret = 0;
+ } else {
+ WL_ERROR(("Error: ivalid param len:%d\n", wrqu->data.length));
+ }
+
+exit_proc:
+ return ret;
+}
+#endif
+
+#ifdef SOFTAP
+static int iwpriv_wpasupp_loop_tst(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int res = 0;
+ char *params = NULL;
+
+ WL_TRACE((">Got IWPRIV wp_supp loopback cmd test:"
+ "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n",
+ info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (wrqu->data.length != 0) {
+
+ if (!(params = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(params, wrqu->data.pointer, wrqu->data.length)) {
+ kfree(params);
+ return -EFAULT;
+ }
+
+ params[wrqu->data.length] = 0;
+ WL_SOFTAP(("\n>> copied from user:\n %s\n", params));
+ } else {
+ WL_ERROR(("ERROR param length is 0\n"));
+ return -EFAULT;
+ }
+
+ res = wl_iw_send_priv_event(dev, params);
+ kfree(params);
+
+ return res;
+}
+#endif
+
+
+static int
+iwpriv_en_ap_bss(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ void *wrqu,
+ char *extra)
+{
+ int res = 0;
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ net_os_wake_lock(dev);
+
+ WL_SOFTAP(("%s: rcvd IWPRIV IOCTL: for dev:%s\n", __FUNCTION__, dev->name));
+
+#ifndef AP_ONLY
+ if (ap_cfg_pid >= 0) {
+ wait_for_completion(&ap_cfg_exited);
+ ap_cfg_pid = -1;
+ }
+
+ if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) {
+ WL_ERROR((" %s ERROR setting SOFTAP security in :%d\n", __FUNCTION__, res));
+ }
+ else {
+ if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0)
+ WL_ERROR(("%s fail to set bss up err=%d\n", __FUNCTION__, res));
+ else
+ bcm_mdelay(100);
+ }
+
+#endif
+ WL_SOFTAP(("%s done with res %d \n", __FUNCTION__, res));
+
+ net_os_wake_unlock(dev);
+
+ return res;
+}
+
+static int
+get_assoc_sta_list(struct net_device *dev, char *buf, int len)
+{
+ WL_TRACE(("%s: dev_wlc_ioctl(dev:%p, cmd:%d, buf:%p, len:%d)\n",
+ __FUNCTION__, dev, WLC_GET_ASSOCLIST, buf, len));
+
+ return dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, buf, len);
+
+}
+
+
+void check_error(int res, const char *msg, const char *func, int line)
+{
+ if (res != 0)
+ WL_ERROR(("%s, %d function:%s, line:%d\n", msg, res, func, line));
+}
+
+static int
+set_ap_mac_list(struct net_device *dev, void *buf)
+{
+ struct mac_list_set *mac_list_set = (struct mac_list_set *)buf;
+ struct maclist *maclist = (struct maclist *)&mac_list_set->mac_list;
+ int length;
+ int i;
+ int mac_mode = mac_list_set->mode;
+ int ioc_res = 0;
+ ap_macmode = mac_list_set->mode;
+
+ bzero(&ap_black_list, sizeof(struct mflist));
+
+ if (mac_mode == MACLIST_MODE_DISABLED) {
+
+ ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode));
+ check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+ WL_SOFTAP(("%s: MAC filtering disabled\n", __FUNCTION__));
+ } else {
+
+ scb_val_t scbval;
+ char mac_buf[256] = {0};
+ struct maclist *assoc_maclist = (struct maclist *) mac_buf;
+
+ bcopy(maclist, &ap_black_list, sizeof(ap_black_list));
+
+ ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode));
+ check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+
+ length = sizeof(maclist->count) + maclist->count*ETHER_ADDR_LEN;
+ dev_wlc_ioctl(dev, WLC_SET_MACLIST, maclist, length);
+
+ WL_SOFTAP(("%s: applied MAC List, mode:%d, length %d:\n",
+ __FUNCTION__, mac_mode, length));
+ for (i = 0; i < maclist->count; i++)
+ WL_SOFTAP(("mac %d: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ i, maclist->ea[i].octet[0], maclist->ea[i].octet[1], \
+ maclist->ea[i].octet[2], \
+ maclist->ea[i].octet[3], maclist->ea[i].octet[4], \
+ maclist->ea[i].octet[5]));
+
+ assoc_maclist->count = 8;
+ ioc_res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 256);
+ check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+ WL_SOFTAP((" Cur assoc clients:%d\n", assoc_maclist->count));
+
+ if (assoc_maclist->count)
+ for (i = 0; i < assoc_maclist->count; i++) {
+ int j;
+ bool assoc_mac_matched = false;
+
+ WL_SOFTAP(("\n Cheking assoc STA: "));
+ print_buf(&assoc_maclist->ea[i], 6, 7);
+ WL_SOFTAP(("with the b/w list:"));
+
+ for (j = 0; j < maclist->count; j++)
+ if (!bcmp(&assoc_maclist->ea[i], &maclist->ea[j],
+ ETHER_ADDR_LEN)) {
+
+ assoc_mac_matched = true;
+ break;
+ }
+
+ if (((mac_mode == MACLIST_MODE_ALLOW) && !assoc_mac_matched) ||
+ ((mac_mode == MACLIST_MODE_DENY) && assoc_mac_matched)) {
+
+ WL_SOFTAP(("b-match or w-mismatch,"
+ " do deauth/disassoc \n"));
+ scbval.val = htod32(1);
+ bcopy(&assoc_maclist->ea[i], &scbval.ea, \
+ ETHER_ADDR_LEN);
+ ioc_res = dev_wlc_ioctl(dev,
+ WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scb_val_t));
+ check_error(ioc_res,
+ "ioctl ERROR:",
+ __FUNCTION__, __LINE__);
+
+ } else {
+ WL_SOFTAP((" no b/w list hits, let it be\n"));
+ }
+ } else {
+ WL_SOFTAP(("No ASSOC CLIENTS\n"));
+ }
+ }
+
+ WL_SOFTAP(("%s iocres:%d\n", __FUNCTION__, ioc_res));
+ return ioc_res;
+}
+#endif
+
+
+#ifdef SOFTAP
+int set_macfilt_from_string(struct mflist *pmflist, char **param_str)
+{
+ return 0;
+}
+#endif
+
+
+#ifdef SOFTAP
+#define PARAM_OFFSET PROFILE_OFFSET
+
+int wl_iw_process_private_ascii_cmd(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *dwrq,
+ char *cmd_str)
+{
+ int ret = 0;
+ char *sub_cmd = cmd_str + PROFILE_OFFSET + strlen("ASCII_CMD=");
+
+ WL_SOFTAP(("\n %s: ASCII_CMD: offs_0:%s, offset_32:\n'%s'\n",
+ __FUNCTION__, cmd_str, cmd_str + PROFILE_OFFSET));
+
+ if (strnicmp(sub_cmd, "AP_CFG", strlen("AP_CFG")) == 0) {
+
+ WL_SOFTAP((" AP_CFG \n"));
+
+
+ if (init_ap_profile_from_string(cmd_str+PROFILE_OFFSET, &my_ap) != 0) {
+ WL_ERROR(("ERROR: SoftAP CFG prams !\n"));
+ ret = -1;
+ } else {
+ ret = set_ap_cfg(dev, &my_ap);
+ }
+
+ } else if (strnicmp(sub_cmd, "AP_BSS_START", strlen("AP_BSS_START")) == 0) {
+
+ WL_SOFTAP(("\n SOFTAP - ENABLE BSS \n"));
+
+ WL_SOFTAP(("\n!!! got 'WL_AP_EN_BSS' from WPA supplicant, dev:%s\n", dev->name));
+
+#ifndef AP_ONLY
+ if (ap_net_dev == NULL) {
+ printf("\n ERROR: SOFTAP net_dev* is NULL !!!\n");
+ } else {
+ if ((ret = iwpriv_en_ap_bss(ap_net_dev, info, dwrq, cmd_str)) < 0)
+ WL_ERROR(("%s line %d fail to set bss up\n", \
+ __FUNCTION__, __LINE__));
+ }
+#else
+ if ((ret = iwpriv_en_ap_bss(dev, info, dwrq, cmd_str)) < 0)
+ WL_ERROR(("%s line %d fail to set bss up\n", \
+ __FUNCTION__, __LINE__));
+#endif
+ } else if (strnicmp(sub_cmd, "ASSOC_LST", strlen("ASSOC_LST")) == 0) {
+ /* no code yet */
+ } else if (strnicmp(sub_cmd, "AP_BSS_STOP", strlen("AP_BSS_STOP")) == 0) {
+ WL_SOFTAP((" \n temp DOWN SOFTAP\n"));
+#ifndef AP_ONLY
+ if ((ret = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) {
+ WL_ERROR(("%s line %d fail to set bss down\n", \
+ __FUNCTION__, __LINE__));
+ }
+#endif
+ }
+
+ return ret;
+}
+#endif
+
+static int wl_iw_set_priv(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *ext
+)
+{
+ int ret = 0;
+ char * extra;
+
+ if (!(extra = kmalloc(dwrq->length, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, dwrq->pointer, dwrq->length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ WL_TRACE(("%s: SIOCSIWPRIV request %s, info->cmd:%x, info->flags:%d\n dwrq->length:%d",
+ dev->name, extra, info->cmd, info->flags, dwrq->length));
+
+ net_os_wake_lock(dev);
+
+ if (dwrq->length && extra) {
+ if (strnicmp(extra, "START", strlen("START")) == 0) {
+ wl_iw_control_wl_on(dev, info);
+ WL_TRACE(("%s, Received regular START command\n", __FUNCTION__));
+ }
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s, missing START, Fail\n", __FUNCTION__));
+ kfree(extra);
+ net_os_wake_unlock(dev);
+ return -EFAULT;
+ }
+
+ if (strnicmp(extra, "SCAN-ACTIVE", strlen("SCAN-ACTIVE")) == 0) {
+#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS
+ WL_TRACE(("%s: active scan setting suppressed\n", dev->name));
+#else
+ ret = wl_iw_set_active_scan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+ } else if (strnicmp(extra, "SCAN-PASSIVE", strlen("SCAN-PASSIVE")) == 0)
+#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS
+ WL_TRACE(("%s: passive scan setting suppressed\n", dev->name));
+#else
+ ret = wl_iw_set_passive_scan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+ else if (strnicmp(extra, "RSSI", strlen("RSSI")) == 0)
+ ret = wl_iw_get_rssi(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "LINKSPEED", strlen("LINKSPEED")) == 0)
+ ret = wl_iw_get_link_speed(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "MACADDR", strlen("MACADDR")) == 0)
+ ret = wl_iw_get_macaddr(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "COUNTRY", strlen("COUNTRY")) == 0)
+ ret = wl_iw_set_country(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "STOP", strlen("STOP")) == 0)
+ ret = wl_iw_control_wl_off(dev, info);
+ else if (strnicmp(extra, BAND_GET_CMD, strlen(BAND_GET_CMD)) == 0)
+ ret = wl_iw_get_band(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, BAND_SET_CMD, strlen(BAND_SET_CMD)) == 0)
+ ret = wl_iw_set_band(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, DTIM_SKIP_GET_CMD, strlen(DTIM_SKIP_GET_CMD)) == 0)
+ ret = wl_iw_get_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, DTIM_SKIP_SET_CMD, strlen(DTIM_SKIP_SET_CMD)) == 0)
+ ret = wl_iw_set_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, SETSUSPEND_CMD, strlen(SETSUSPEND_CMD)) == 0)
+ ret = wl_iw_set_suspend(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, TXPOWER_SET_CMD, strlen(TXPOWER_SET_CMD)) == 0)
+ ret = wl_iw_set_txpower(dev, info, (union iwreq_data *)dwrq, extra);
+#if defined(PNO_SUPPORT)
+ else if (strnicmp(extra, PNOSSIDCLR_SET_CMD, strlen(PNOSSIDCLR_SET_CMD)) == 0)
+ ret = wl_iw_set_pno_reset(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, PNOSETUP_SET_CMD, strlen(PNOSETUP_SET_CMD)) == 0)
+ ret = wl_iw_set_pno_set(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, PNOENABLE_SET_CMD, strlen(PNOENABLE_SET_CMD)) == 0)
+ ret = wl_iw_set_pno_enable(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+#if defined(CSCAN)
+ else if (strnicmp(extra, CSCAN_COMMAND, strlen(CSCAN_COMMAND)) == 0)
+ ret = wl_iw_set_cscan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+#ifdef CUSTOMER_HW2
+ else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0)
+ ret = wl_iw_set_power_mode(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0) {
+ WL_TRACE_COEX(("%s:got Framwrork cmd: 'BTCOEXMODE'\n", __FUNCTION__));
+ ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra);
+ }
+#else
+ else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0)
+ ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+ else if (strnicmp(extra, "GETPOWER", strlen("GETPOWER")) == 0)
+ ret = wl_iw_get_power_mode(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, RXFILTER_START_CMD, strlen(RXFILTER_START_CMD)) == 0)
+ ret = net_os_set_packet_filter(dev, 1);
+ else if (strnicmp(extra, RXFILTER_STOP_CMD, strlen(RXFILTER_STOP_CMD)) == 0)
+ ret = net_os_set_packet_filter(dev, 0);
+ else if (strnicmp(extra, RXFILTER_ADD_CMD, strlen(RXFILTER_ADD_CMD)) == 0) {
+ int filter_num = *(extra + strlen(RXFILTER_ADD_CMD) + 1) - '0';
+ ret = net_os_rxfilter_add_remove(dev, TRUE, filter_num);
+ }
+ else if (strnicmp(extra, RXFILTER_REMOVE_CMD, strlen(RXFILTER_REMOVE_CMD)) == 0) {
+ int filter_num = *(extra + strlen(RXFILTER_REMOVE_CMD) + 1) - '0';
+ ret = net_os_rxfilter_add_remove(dev, FALSE, filter_num);
+ }
+#ifdef SOFTAP
+#ifdef SOFTAP_TLV_CFG
+ else if (strnicmp(extra, SOFTAP_SET_CMD, strlen(SOFTAP_SET_CMD)) == 0) {
+ wl_iw_softap_cfg_tlv(dev, info, (union iwreq_data *)dwrq, extra);
+ }
+#endif
+ else if (strnicmp(extra, "ASCII_CMD", strlen("ASCII_CMD")) == 0) {
+ wl_iw_process_private_ascii_cmd(dev, info, (union iwreq_data *)dwrq, extra);
+ } else if (strnicmp(extra, "AP_MAC_LIST_SET", strlen("AP_MAC_LIST_SET")) == 0) {
+ WL_SOFTAP(("penguin, set AP_MAC_LIST_SET\n"));
+ set_ap_mac_list(dev, (extra + PROFILE_OFFSET));
+ }
+#endif
+ else {
+ WL_TRACE(("Unknown PRIVATE command: %s: ignored\n", extra));
+ snprintf(extra, MAX_WX_STRING, "OK");
+ dwrq->length = strlen("OK") + 1;
+ }
+ }
+
+ net_os_wake_unlock(dev);
+
+ if (extra) {
+ if (copy_to_user(dwrq->pointer, extra, dwrq->length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ kfree(extra);
+ }
+
+ return ret;
+}
+
+static const iw_handler wl_iw_handler[] =
+{
+ (iw_handler) wl_iw_config_commit,
+ (iw_handler) wl_iw_get_name,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_freq,
+ (iw_handler) wl_iw_get_freq,
+ (iw_handler) wl_iw_set_mode,
+ (iw_handler) wl_iw_get_mode,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_get_range,
+ (iw_handler) wl_iw_set_priv,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_spy,
+ (iw_handler) wl_iw_get_spy,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_wap,
+ (iw_handler) wl_iw_get_wap,
+#if WIRELESS_EXT > 17
+ (iw_handler) wl_iw_mlme,
+#else
+ (iw_handler) NULL,
+#endif
+#if defined(WL_IW_USE_ISCAN)
+ (iw_handler) wl_iw_iscan_get_aplist,
+#else
+ (iw_handler) wl_iw_get_aplist,
+#endif
+#if WIRELESS_EXT > 13
+#if defined(WL_IW_USE_ISCAN)
+ (iw_handler) wl_iw_iscan_set_scan,
+ (iw_handler) wl_iw_iscan_get_scan,
+#else
+ (iw_handler) wl_iw_set_scan,
+ (iw_handler) wl_iw_get_scan,
+#endif
+#else
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+#endif
+ (iw_handler) wl_iw_set_essid,
+ (iw_handler) wl_iw_get_essid,
+ (iw_handler) wl_iw_set_nick,
+ (iw_handler) wl_iw_get_nick,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_rate,
+ (iw_handler) wl_iw_get_rate,
+ (iw_handler) wl_iw_set_rts,
+ (iw_handler) wl_iw_get_rts,
+ (iw_handler) wl_iw_set_frag,
+ (iw_handler) wl_iw_get_frag,
+ (iw_handler) wl_iw_set_txpow,
+ (iw_handler) wl_iw_get_txpow,
+#if WIRELESS_EXT > 10
+ (iw_handler) wl_iw_set_retry,
+ (iw_handler) wl_iw_get_retry,
+#endif
+ (iw_handler) wl_iw_set_encode,
+ (iw_handler) wl_iw_get_encode,
+ (iw_handler) wl_iw_set_power,
+ (iw_handler) wl_iw_get_power,
+#if WIRELESS_EXT > 17
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_wpaie,
+ (iw_handler) wl_iw_get_wpaie,
+ (iw_handler) wl_iw_set_wpaauth,
+ (iw_handler) wl_iw_get_wpaauth,
+ (iw_handler) wl_iw_set_encodeext,
+ (iw_handler) wl_iw_get_encodeext,
+#ifdef BCMWPA2
+ (iw_handler) wl_iw_set_pmksa,
+#endif
+#endif
+};
+
+#if WIRELESS_EXT > 12
+static const iw_handler wl_iw_priv_handler[] = {
+ NULL,
+ (iw_handler)wl_iw_set_active_scan,
+ NULL,
+ (iw_handler)wl_iw_get_rssi,
+ NULL,
+ (iw_handler)wl_iw_set_passive_scan,
+ NULL,
+ (iw_handler)wl_iw_get_link_speed,
+ NULL,
+ (iw_handler)wl_iw_get_macaddr,
+ NULL,
+ (iw_handler)wl_iw_control_wl_off,
+ NULL,
+ (iw_handler)wl_iw_control_wl_on,
+#ifdef SOFTAP
+ NULL,
+ (iw_handler)iwpriv_set_ap_config,
+
+ NULL,
+ (iw_handler)iwpriv_get_assoc_list,
+
+ NULL,
+ (iw_handler)iwpriv_set_mac_filters,
+
+ NULL,
+ (iw_handler)iwpriv_en_ap_bss,
+
+ NULL,
+ (iw_handler)iwpriv_wpasupp_loop_tst,
+
+ NULL,
+ (iw_handler)iwpriv_softap_stop,
+
+ NULL,
+ (iw_handler)iwpriv_fw_reload,
+
+ NULL,
+ (iw_handler)iwpriv_set_ap_sta_disassoc,
+#endif
+#if defined(CSCAN)
+
+ NULL,
+ (iw_handler)iwpriv_set_cscan
+#endif
+};
+
+static const struct iw_priv_args wl_iw_priv_args[] = {
+ {
+ WL_IW_SET_ACTIVE_SCAN,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "SCAN-ACTIVE"
+ },
+ {
+ WL_IW_GET_RSSI,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "RSSI"
+ },
+ {
+ WL_IW_SET_PASSIVE_SCAN,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "SCAN-PASSIVE"
+ },
+ {
+ WL_IW_GET_LINK_SPEED,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "LINKSPEED"
+ },
+ {
+ WL_IW_GET_CURR_MACADDR,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "Macaddr"
+ },
+ {
+ WL_IW_SET_STOP,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "STOP"
+ },
+ {
+ WL_IW_SET_START,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "START"
+ },
+
+#ifdef SOFTAP
+ {
+ WL_SET_AP_CFG,
+ IW_PRIV_TYPE_CHAR | 256,
+ 0,
+ "AP_SET_CFG"
+ },
+
+ {
+ WL_AP_STA_LIST,
+ IW_PRIV_TYPE_CHAR | 0,
+ IW_PRIV_TYPE_CHAR | 1024,
+ "AP_GET_STA_LIST"
+ },
+
+ {
+ WL_AP_MAC_FLTR,
+ IW_PRIV_TYPE_CHAR | 256,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ "AP_SET_MAC_FLTR"
+ },
+
+ {
+ WL_AP_BSS_START,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "AP_BSS_START"
+ },
+
+ {
+ AP_LPB_CMD,
+ IW_PRIV_TYPE_CHAR | 256,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ "AP_LPB_CMD"
+ },
+
+ {
+ WL_AP_STOP,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ "AP_BSS_STOP"
+ },
+
+ {
+ WL_FW_RELOAD,
+ IW_PRIV_TYPE_CHAR | 256,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ "WL_FW_RELOAD"
+ },
+
+ {
+ WL_AP_STA_DISASSOC,
+ IW_PRIV_TYPE_CHAR | 256,
+ IW_PRIV_TYPE_CHAR | 0,
+ "AP_STA_DISASSOC"
+ },
+#endif
+#if defined(CSCAN)
+ {
+ WL_COMBO_SCAN,
+ IW_PRIV_TYPE_CHAR | 1024,
+ 0,
+ "CSCAN"
+ },
+#endif
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+ .num_standard = ARRAYSIZE(wl_iw_handler),
+ .standard = (iw_handler *) wl_iw_handler,
+ .num_private = ARRAYSIZE(wl_iw_priv_handler),
+ .num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+ .private = (iw_handler *)wl_iw_priv_handler,
+ .private_args = (void *) wl_iw_priv_args,
+
+#if WIRELESS_EXT >= 19
+ get_wireless_stats: dhd_get_wireless_stats,
+#endif
+};
+#endif
+
+
+int wl_iw_ioctl(
+ struct net_device *dev,
+ struct ifreq *rq,
+ int cmd
+)
+{
+ struct iwreq *wrq = (struct iwreq *) rq;
+ struct iw_request_info info;
+ iw_handler handler;
+ char *extra = NULL;
+ int token_size = 1, max_tokens = 0, ret = 0;
+
+ net_os_wake_lock(dev);
+
+ WL_TRACE(("%s: cmd:%x alled via dhd->do_ioctl()entry point\n", __FUNCTION__, cmd));
+ if (cmd < SIOCIWFIRST ||
+ IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+ !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) {
+ WL_ERROR(("%s: error in cmd=%x : not supported\n", __FUNCTION__, cmd));
+ net_os_wake_unlock(dev);
+ return -EOPNOTSUPP;
+ }
+
+ switch (cmd) {
+
+ case SIOCSIWESSID:
+ case SIOCGIWESSID:
+ case SIOCSIWNICKN:
+ case SIOCGIWNICKN:
+ max_tokens = IW_ESSID_MAX_SIZE + 1;
+ break;
+
+ case SIOCSIWENCODE:
+ case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+ case SIOCSIWENCODEEXT:
+ case SIOCGIWENCODEEXT:
+#endif
+ max_tokens = wrq->u.data.length;
+ break;
+
+ case SIOCGIWRANGE:
+ max_tokens = sizeof(struct iw_range) + 500;
+ break;
+
+ case SIOCGIWAPLIST:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_AP;
+ break;
+
+#if WIRELESS_EXT > 13
+ case SIOCGIWSCAN:
+#if defined(WL_IW_USE_ISCAN)
+ if (g_iscan)
+ max_tokens = wrq->u.data.length;
+ else
+#endif
+ max_tokens = IW_SCAN_MAX_DATA;
+ break;
+#endif
+
+ case SIOCSIWSPY:
+ token_size = sizeof(struct sockaddr);
+ max_tokens = IW_MAX_SPY;
+ break;
+
+ case SIOCGIWSPY:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_SPY;
+ break;
+
+#if WIRELESS_EXT > 17
+ case SIOCSIWPMKSA:
+ case SIOCSIWGENIE:
+#endif
+ case SIOCSIWPRIV:
+ max_tokens = wrq->u.data.length;
+ break;
+ }
+
+ if (max_tokens && wrq->u.data.pointer) {
+ if (wrq->u.data.length > max_tokens) {
+ WL_ERROR(("%s: error in cmd=%x wrq->u.data.length=%d > max_tokens=%d\n", \
+ __FUNCTION__, cmd, wrq->u.data.length, max_tokens));
+ ret = -E2BIG;
+ goto wl_iw_ioctl_done;
+ }
+ if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto wl_iw_ioctl_done;
+ }
+
+ if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ ret = -EFAULT;
+ goto wl_iw_ioctl_done;
+ }
+ }
+
+ info.cmd = cmd;
+ info.flags = 0;
+
+ ret = handler(dev, &info, &wrq->u, extra);
+
+ if (extra) {
+ if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ ret = -EFAULT;
+ goto wl_iw_ioctl_done;
+ }
+
+ kfree(extra);
+ }
+
+wl_iw_ioctl_done:
+
+ net_os_wake_unlock(dev);
+
+ return ret;
+}
+
+
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+ char* stringBuf, uint buflen)
+{
+ typedef struct conn_fail_event_map_t {
+ uint32 inEvent;
+ uint32 inStatus;
+ uint32 inReason;
+ const char* outName;
+ const char* outCause;
+ } conn_fail_event_map_t;
+
+
+# define WL_IW_DONT_CARE 9999
+ const conn_fail_event_map_t event_map [] = {
+
+
+ {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE,
+ "Conn", "Success"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+ "Conn", "NoNetworks"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ConfigMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH,
+ "Conn", "EncrypMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH,
+ "Conn", "RsnMismatch"},
+ {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "AuthTimeout"},
+ {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "AuthFail"},
+ {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE,
+ "Conn", "AuthNoAck"},
+ {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ReassocFail"},
+ {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "ReassocTimeout"},
+ {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE,
+ "Conn", "ReassocAbort"},
+ {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE,
+ "Sup", "ConnSuccess"},
+ {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Sup", "WpaHandshakeFail"},
+ {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Deauth"},
+ {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "DisassocInd"},
+ {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Disassoc"}
+ };
+
+ const char* name = "";
+ const char* cause = NULL;
+ int i;
+
+
+ for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) {
+ const conn_fail_event_map_t* row = &event_map[i];
+ if (row->inEvent == event_type &&
+ (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+ (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+ name = row->outName;
+ cause = row->outCause;
+ break;
+ }
+ }
+
+
+ if (cause) {
+ memset(stringBuf, 0, buflen);
+ snprintf(stringBuf, buflen, "%s %s %02d %02d",
+ name, cause, status, reason);
+ WL_INFORM(("Connection status: %s\n", stringBuf));
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+#if WIRELESS_EXT > 14
+
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+ uint32 event = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+
+ if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+ return TRUE;
+ }
+ else
+ return FALSE;
+}
+#endif
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256
+#endif
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd = 0;
+ uint32 event_type = ntoh32(e->event_type);
+ uint16 flags = ntoh16(e->flags);
+ uint32 datalen = ntoh32(e->datalen);
+ uint32 status = ntoh32(e->status);
+ uint32 toto;
+#if defined(ROAM_NOT_USED)
+ static uint32 roam_no_success = 0;
+ static bool roam_no_success_send = FALSE;
+#endif
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return;
+ }
+
+ net_os_wake_lock(dev);
+
+ WL_TRACE(("%s: dev=%s event=%d \n", __FUNCTION__, dev->name, event_type));
+
+ switch (event_type) {
+
+ case WLC_E_RELOAD:
+ WL_ERROR(("%s: Firmware ERROR %d\n", __FUNCTION__, status));
+ net_os_send_hang_message(dev);
+ goto wl_iw_event_end;
+
+#if defined(SOFTAP)
+ case WLC_E_PRUNE:
+ if (ap_cfg_running) {
+ char *macaddr = (char *)&e->addr;
+ WL_SOFTAP(("PRUNE received, %02X:%02X:%02X:%02X:%02X:%02X!\n",
+ macaddr[0], macaddr[1], macaddr[2], macaddr[3], \
+ macaddr[4], macaddr[5]));
+
+ if (ap_macmode) {
+ int i;
+ for (i = 0; i < ap_black_list.count; i++) {
+ if (!bcmp(macaddr, &ap_black_list.ea[i], \
+ sizeof(struct ether_addr))) {
+ WL_SOFTAP(("mac in black list, ignore it\n"));
+ break;
+ }
+ }
+
+ if (i == ap_black_list.count) {
+ char mac_buf[32] = {0};
+ sprintf(mac_buf, "STA_BLOCK %02X:%02X:%02X:%02X:%02X:%02X",
+ macaddr[0], macaddr[1], macaddr[2],
+ macaddr[3], macaddr[4], macaddr[5]);
+ wl_iw_send_priv_event(priv_dev, mac_buf);
+ }
+ }
+ }
+ break;
+#endif
+ case WLC_E_TXFAIL:
+ cmd = IWEVTXDROP;
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ break;
+#if WIRELESS_EXT > 14
+ case WLC_E_JOIN:
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+#if defined(SOFTAP)
+ WL_SOFTAP(("STA connect received %d\n", event_type));
+ if (ap_cfg_running) {
+ wl_iw_send_priv_event(priv_dev, "STA_JOIN");
+ goto wl_iw_event_end;
+ }
+#endif
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ cmd = IWEVREGISTERED;
+ break;
+ case WLC_E_ROAM:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ WL_ASSOC(("%s: WLC_E_ROAM: success\n", __FUNCTION__));
+#if defined(ROAM_NOT_USED)
+ roam_no_success_send = FALSE;
+ roam_no_success = 0;
+#endif
+ goto wl_iw_event_end;
+ }
+#if defined(ROAM_NOT_USED)
+ else if (status == WLC_E_STATUS_NO_NETWORKS) {
+ roam_no_success++;
+ if ((roam_no_success == 5) && (roam_no_success_send == FALSE)) {
+ roam_no_success_send = TRUE;
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+ cmd = SIOCGIWAP;
+ WL_ERROR(("%s ROAMING did not succeeded , send Link Down\n", \
+ __FUNCTION__));
+ } else {
+ WL_TRACE(("##### ROAMING did not succeeded %d\n", roam_no_success));
+ goto wl_iw_event_end;
+ }
+ }
+#endif
+ break;
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+#if defined(SOFTAP)
+ WL_SOFTAP(("STA disconnect received %d\n", event_type));
+ if (ap_cfg_running) {
+ wl_iw_send_priv_event(priv_dev, "STA_LEAVE");
+ goto wl_iw_event_end;
+ }
+#endif
+ cmd = SIOCGIWAP;
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ bzero(&extra, ETHER_ADDR_LEN);
+ break;
+ case WLC_E_LINK:
+ case WLC_E_NDIS_LINK:
+ cmd = SIOCGIWAP;
+ if (!(flags & WLC_EVENT_MSG_LINK)) {
+#ifdef SOFTAP
+#ifdef AP_ONLY
+ if (ap_cfg_running) {
+#else
+ if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) {
+#endif
+ WL_SOFTAP(("AP DOWN %d\n", event_type));
+ wl_iw_send_priv_event(priv_dev, "AP_DOWN");
+ } else {
+ WL_TRACE(("STA_Link Down\n"));
+ g_ss_cache_ctrl.m_link_down = 1;
+ }
+#else
+ g_ss_cache_ctrl.m_link_down = 1;
+#endif
+ WL_TRACE(("Link Down\n"));
+
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+ }
+ else {
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ g_ss_cache_ctrl.m_link_down = 0;
+
+ memcpy(g_ss_cache_ctrl.m_active_bssid, &e->addr, ETHER_ADDR_LEN);
+
+#ifdef SOFTAP
+#ifdef AP_ONLY
+ if (ap_cfg_running) {
+#else
+ if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) {
+#endif
+ WL_SOFTAP(("AP UP %d\n", event_type));
+ wl_iw_send_priv_event(priv_dev, "AP_UP");
+ } else {
+ WL_TRACE(("STA_LINK_UP\n"));
+#if defined(ROAM_NOT_USED)
+ roam_no_success_send = FALSE;
+ roam_no_success = 0;
+#endif
+ }
+#endif
+ WL_TRACE(("Link UP\n"));
+
+ }
+ net_os_wake_lock_timeout_enable(dev);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ break;
+ case WLC_E_ACTION_FRAME:
+ cmd = IWEVCUSTOM;
+ if (datalen + 1 <= sizeof(extra)) {
+ wrqu.data.length = datalen + 1;
+ extra[0] = WLC_E_ACTION_FRAME;
+ memcpy(&extra[1], data, datalen);
+ WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+ }
+ break;
+
+ case WLC_E_ACTION_FRAME_COMPLETE:
+ cmd = IWEVCUSTOM;
+ memcpy(&toto, data, 4);
+ if (sizeof(status) + 1 <= sizeof(extra)) {
+ wrqu.data.length = sizeof(status) + 1;
+ extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+ memcpy(&extra[1], &status, sizeof(status));
+ printf("wl_iw_event status %d PacketId %d \n", status, toto);
+ printf("WLC_E_ACTION_FRAME_COMPLETE len %d \n", wrqu.data.length);
+ }
+ break;
+#endif
+#if WIRELESS_EXT > 17
+ case WLC_E_MIC_ERROR: {
+ struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra;
+ cmd = IWEVMICHAELMICFAILURE;
+ wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ micerrevt->flags |= IW_MICFAILURE_GROUP;
+ else
+ micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+ memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+ break;
+ }
+#ifdef BCMWPA2
+ case WLC_E_PMKID_CACHE: {
+ if (data)
+ {
+ struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+ pmkid_cand_list_t *pmkcandlist;
+ pmkid_cand_t *pmkidcand;
+ int count;
+
+ cmd = IWEVPMKIDCAND;
+ pmkcandlist = data;
+ count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+ ASSERT(count >= 0);
+ wrqu.data.length = sizeof(struct iw_pmkid_cand);
+ pmkidcand = pmkcandlist->pmkid_cand;
+ while (count) {
+ bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+ if (pmkidcand->preauth)
+ iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+ bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+ ETHER_ADDR_LEN);
+#ifndef SANDGATE2G
+ wireless_send_event(dev, cmd, &wrqu, extra);
+#endif
+ pmkidcand++;
+ count--;
+ }
+ }
+ goto wl_iw_event_end;
+ }
+#endif
+#endif
+
+ case WLC_E_SCAN_COMPLETE:
+#if defined(WL_IW_USE_ISCAN)
+ if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
+ (g_iscan->iscan_state != ISCAN_STATE_IDLE))
+ {
+ up(&g_iscan->sysioc_sem);
+ } else {
+ cmd = SIOCGIWSCAN;
+ wrqu.data.length = strlen(extra);
+ WL_TRACE(("Event WLC_E_SCAN_COMPLETE from specific scan %d\n", \
+ g_iscan->iscan_state));
+ }
+#else
+ cmd = SIOCGIWSCAN;
+ wrqu.data.length = strlen(extra);
+ WL_TRACE(("Event WLC_E_SCAN_COMPLETE\n"));
+#endif
+ break;
+
+ case WLC_E_PFN_NET_FOUND:
+ {
+ wlc_ssid_t * ssid;
+ ssid = (wlc_ssid_t *)data;
+ WL_TRACE(("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n", \
+ __FUNCTION__, PNO_EVENT_UP, ssid->SSID, ssid->SSID_len));
+ net_os_wake_lock_timeout_enable(dev);
+ cmd = IWEVCUSTOM;
+ memset(&wrqu, 0, sizeof(wrqu));
+ strcpy(extra, PNO_EVENT_UP);
+ wrqu.data.length = strlen(extra);
+ }
+ break;
+
+ default:
+
+ WL_TRACE(("Unknown Event %d: ignoring\n", event_type));
+ break;
+ }
+#ifndef SANDGATE2G
+ if (cmd) {
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+ if (cmd == SIOCGIWSCAN)
+ wireless_send_event(dev, cmd, &wrqu, NULL);
+ else
+#endif
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+#endif
+
+#if WIRELESS_EXT > 14
+ memset(extra, 0, sizeof(extra));
+ if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+ cmd = IWEVCUSTOM;
+ wrqu.data.length = strlen(extra);
+#ifndef SANDGATE2G
+ wireless_send_event(dev, cmd, &wrqu, extra);
+#endif
+ }
+#endif
+wl_iw_event_end:
+ net_os_wake_unlock(dev);
+#endif
+}
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+ int res = 0;
+ wl_cnt_t cnt;
+ int phy_noise;
+ int rssi;
+ scb_val_t scb_val;
+
+ phy_noise = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+ goto done;
+
+ phy_noise = dtoh32(phy_noise);
+ WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise));
+
+ bzero(&scb_val, sizeof(scb_val_t));
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+ goto done;
+
+ rssi = dtoh32(scb_val.val);
+ WL_TRACE(("wl_iw_get_wireless_stats rssi=%d\n", rssi));
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ wstats->qual.qual = 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ wstats->qual.qual = 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ wstats->qual.qual = 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ wstats->qual.qual = 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ wstats->qual.qual = 4;
+ else
+ wstats->qual.qual = 5;
+
+
+ wstats->qual.level = 0x100 + rssi;
+ wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+ wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+ wstats->qual.updated |= 7;
+#endif
+
+#if WIRELESS_EXT > 11
+ WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n", (int)sizeof(wl_cnt_t)));
+
+ memset(&cnt, 0, sizeof(wl_cnt_t));
+ res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+ if (res)
+ {
+ WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d\n", res));
+ goto done;
+ }
+
+ cnt.version = dtoh16(cnt.version);
+ if (cnt.version != WL_CNT_T_VERSION) {
+ WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+ WL_CNT_T_VERSION, cnt.version));
+ goto done;
+ }
+
+ wstats->discard.nwid = 0;
+ wstats->discard.code = dtoh32(cnt.rxundec);
+ wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+ wstats->discard.retries = dtoh32(cnt.txfail);
+ wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+ wstats->miss.beacon = 0;
+
+ WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+ dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif
+
+done:
+ return res;
+}
+static void
+wl_iw_bt_flag_set(
+ struct net_device *dev,
+ bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+ char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+
+#if defined(BT_DHCP_eSCO_FIX)
+ set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+ WL_TRACE_COEX(("WI-FI priority boost via bt flags, set:%d\n", set));
+ if (set == TRUE) {
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_dhcp_on[0], sizeof(buf_flag7_dhcp_on));
+ }
+ else {
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+}
+
+static void
+wl_iw_bt_timerfunc(ulong data)
+{
+ bt_info_t *bt_local = (bt_info_t *)data;
+ bt_local->timer_on = 0;
+ WL_TRACE(("%s\n", __FUNCTION__));
+
+ up(&bt_local->bt_sem);
+}
+
+static int
+_bt_dhcp_sysioc_thread(void *data)
+{
+ DAEMONIZE("dhcp_sysioc");
+
+ while (down_interruptible(&g_bt->bt_sem) == 0) {
+
+ net_os_wake_lock(g_bt->dev);
+
+ if (g_bt->timer_on) {
+ g_bt->timer_on = 0;
+ del_timer_sync(&g_bt->timer);
+ }
+
+ switch (g_bt->bt_state) {
+ case BT_DHCP_START:
+ WL_TRACE_COEX(("%s bt_dhcp stm: started \n", __FUNCTION__));
+ g_bt->bt_state = BT_DHCP_OPPORTUNITY_WINDOW;
+ mod_timer(&g_bt->timer, jiffies + BT_DHCP_OPPORTUNITY_WINDOW_TIME*HZ/1000);
+ g_bt->timer_on = 1;
+ break;
+
+ case BT_DHCP_OPPORTUNITY_WINDOW:
+ if (g_bt->dhcp_done) {
+ WL_TRACE_COEX(("%s DHCP Done before T1 expiration\n", \
+ __FUNCTION__));
+ g_bt->bt_state = BT_DHCP_IDLE;
+ g_bt->timer_on = 0;
+ break;
+ }
+
+ WL_TRACE_COEX(("%s DHCP T1:%d expired\n", \
+ __FUNCTION__, BT_DHCP_OPPORTUNITY_WINDOW_TIME));
+ if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, TRUE);
+ g_bt->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+ mod_timer(&g_bt->timer, jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000);
+ g_bt->timer_on = 1;
+ break;
+
+ case BT_DHCP_FLAG_FORCE_TIMEOUT:
+ if (g_bt->dhcp_done) {
+ WL_TRACE_COEX(("%s DHCP Done before T2 expiration\n", \
+ __FUNCTION__));
+ } else {
+ WL_TRACE_COEX(("%s DHCP wait interval T2:%d msec expired\n",
+ __FUNCTION__, BT_DHCP_FLAG_FORCE_TIME));
+ }
+
+ if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE);
+ g_bt->bt_state = BT_DHCP_IDLE;
+ g_bt->timer_on = 0;
+ break;
+
+ default:
+ WL_ERROR(("%s error g_status=%d !!!\n", __FUNCTION__, \
+ g_bt->bt_state));
+ if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE);
+ g_bt->bt_state = BT_DHCP_IDLE;
+ g_bt->timer_on = 0;
+ break;
+ }
+
+ net_os_wake_unlock(g_bt->dev);
+ }
+
+ if (g_bt->timer_on) {
+ g_bt->timer_on = 0;
+ del_timer_sync(&g_bt->timer);
+ }
+
+ complete_and_exit(&g_bt->bt_exited, 0);
+}
+
+static void
+wl_iw_bt_release(void)
+{
+ bt_info_t *bt_local = g_bt;
+
+ if (!bt_local) {
+ return;
+ }
+
+ if (bt_local->bt_pid >= 0) {
+ KILL_PROC(bt_local->bt_pid, SIGTERM);
+ wait_for_completion(&bt_local->bt_exited);
+ }
+ kfree(bt_local);
+ g_bt = NULL;
+}
+
+static int
+wl_iw_bt_init(struct net_device *dev)
+{
+ bt_info_t *bt_dhcp = NULL;
+
+ bt_dhcp = kmalloc(sizeof(bt_info_t), GFP_KERNEL);
+ if (!bt_dhcp)
+ return -ENOMEM;
+
+ memset(bt_dhcp, 0, sizeof(bt_info_t));
+ bt_dhcp->bt_pid = -1;
+ g_bt = bt_dhcp;
+ bt_dhcp->dev = dev;
+ bt_dhcp->bt_state = BT_DHCP_IDLE;
+
+
+ bt_dhcp->timer_ms = 10;
+ init_timer(&bt_dhcp->timer);
+ bt_dhcp->timer.data = (ulong)bt_dhcp;
+ bt_dhcp->timer.function = wl_iw_bt_timerfunc;
+
+ sema_init(&bt_dhcp->bt_sem, 0);
+ init_completion(&bt_dhcp->bt_exited);
+ bt_dhcp->bt_pid = kernel_thread(_bt_dhcp_sysioc_thread, bt_dhcp, 0);
+ if (bt_dhcp->bt_pid < 0) {
+ WL_ERROR(("Failed in %s\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int wl_iw_attach(struct net_device *dev, void *dhdp)
+{
+ int params_size;
+ wl_iw_t *iw;
+#if defined(WL_IW_USE_ISCAN)
+ iscan_info_t *iscan = NULL;
+#endif
+
+ mutex_init(&wl_cache_lock);
+
+#if defined(WL_IW_USE_ISCAN)
+ if (!dev)
+ return 0;
+
+ memset(&g_wl_iw_params, 0, sizeof(wl_iw_extra_params_t));
+
+#ifdef CSCAN
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)) +
+ (WL_NUMCHANNELS * sizeof(uint16)) + WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+#else
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+#endif
+ iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+ if (!iscan)
+ return -ENOMEM;
+ memset(iscan, 0, sizeof(iscan_info_t));
+
+ iscan->iscan_ex_params_p = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+ if (!iscan->iscan_ex_params_p)
+ return -ENOMEM;
+ iscan->iscan_ex_param_size = params_size;
+ iscan->sysioc_pid = -1;
+
+ g_iscan = iscan;
+ iscan->dev = dev;
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+#if defined(CONFIG_FIRST_SCAN)
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE;
+ g_first_counter_scans = 0;
+ g_iscan->scan_flag = 0;
+#endif
+
+ iscan->timer_ms = 8000;
+ init_timer(&iscan->timer);
+ iscan->timer.data = (ulong)iscan;
+ iscan->timer.function = wl_iw_timerfunc;
+
+ sema_init(&iscan->sysioc_sem, 0);
+ init_completion(&iscan->sysioc_exited);
+ iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+ if (iscan->sysioc_pid < 0)
+ return -ENOMEM;
+#endif
+
+ iw = *(wl_iw_t **)netdev_priv(dev);
+ iw->pub = (dhd_pub_t *)dhdp;
+#ifdef SOFTAP
+ priv_dev = dev;
+#endif
+ g_scan = NULL;
+
+ g_scan = (void *)kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
+ if (!g_scan)
+ return -ENOMEM;
+
+ memset(g_scan, 0, G_SCAN_RESULTS);
+ g_scan_specified_ssid = 0;
+
+#if !defined(CSCAN)
+ wl_iw_init_ss_cache_ctrl();
+#endif
+
+ wl_iw_bt_init(dev);
+
+ return 0;
+}
+
+void wl_iw_detach(void)
+{
+#if defined(WL_IW_USE_ISCAN)
+ iscan_buf_t *buf;
+ iscan_info_t *iscan = g_iscan;
+
+ if (!iscan)
+ return;
+ if (iscan->sysioc_pid >= 0) {
+ KILL_PROC(iscan->sysioc_pid, SIGTERM);
+ wait_for_completion(&iscan->sysioc_exited);
+ }
+ mutex_lock(&wl_cache_lock);
+ while (iscan->list_hdr) {
+ buf = iscan->list_hdr->next;
+ kfree(iscan->list_hdr);
+ iscan->list_hdr = buf;
+ }
+ kfree(iscan->iscan_ex_params_p);
+ kfree(iscan);
+ g_iscan = NULL;
+ mutex_unlock(&wl_cache_lock);
+#endif
+
+ if (g_scan)
+ kfree(g_scan);
+
+ g_scan = NULL;
+#if !defined(CSCAN)
+ wl_iw_release_ss_cache_ctrl();
+#endif
+ wl_iw_bt_release();
+#ifdef SOFTAP
+ if (ap_cfg_running) {
+ WL_TRACE(("\n%s AP is going down\n", __FUNCTION__));
+ wl_iw_send_priv_event(priv_dev, "AP_DOWN");
+ }
+#endif
+}
diff --git a/drivers/net/wireless/bcm4329/wl_iw.h b/drivers/net/wireless/bcm4329/wl_iw.h
new file mode 100644
index 000000000000..ee6c699936ea
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/wl_iw.h
@@ -0,0 +1,309 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.h,v 1.5.34.1.6.36.4.18 2011/02/10 19:33:12 Exp $
+ */
+
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+#define GET_SSID "SSID="
+#define GET_CHANNEL "CH="
+#define GET_NPROBE "NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL "ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL "PASSIVE="
+#define GET_HOME_DWELL "HOME="
+#define GET_SCAN_TYPE "TYPE="
+
+#define BAND_GET_CMD "GETBAND"
+#define BAND_SET_CMD "SETBAND"
+#define DTIM_SKIP_GET_CMD "DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD "DTIMSKIPSET"
+#define SETSUSPEND_CMD "SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR"
+#define PNOSETUP_SET_CMD "PNOSETUP "
+#define PNOENABLE_SET_CMD "PNOFORCE"
+#define PNODEBUG_SET_CMD "PNODEBUG"
+#define TXPOWER_SET_CMD "TXPOWER"
+#define RXFILTER_START_CMD "RXFILTER-START"
+#define RXFILTER_STOP_CMD "RXFILTER-STOP"
+#define RXFILTER_ADD_CMD "RXFILTER-ADD"
+#define RXFILTER_REMOVE_CMD "RXFILTER-REMOVE"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+
+typedef struct wl_iw_extra_params {
+ int target_channel;
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+ char iso_abbrev[WLC_CNTRY_BUF_SZ];
+ char custom_locale[WLC_CNTRY_BUF_SZ];
+ int32 custom_locale_rev;
+};
+
+#define WL_IW_RSSI_MINVAL -200
+#define WL_IW_RSSI_NO_SIGNAL -91
+#define WL_IW_RSSI_VERY_LOW -80
+#define WL_IW_RSSI_LOW -70
+#define WL_IW_RSSI_GOOD -68
+#define WL_IW_RSSI_VERY_GOOD -58
+#define WL_IW_RSSI_EXCELLENT -57
+#define WL_IW_RSSI_INVALID 0
+#define MAX_WX_STRING 80
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13)
+
+
+#define WL_SET_AP_CFG (SIOCIWFIRSTPRIV+15)
+#define WL_AP_STA_LIST (SIOCIWFIRSTPRIV+17)
+#define WL_AP_MAC_FLTR (SIOCIWFIRSTPRIV+19)
+#define WL_AP_BSS_START (SIOCIWFIRSTPRIV+21)
+#define AP_LPB_CMD (SIOCIWFIRSTPRIV+23)
+#define WL_AP_STOP (SIOCIWFIRSTPRIV+25)
+#define WL_FW_RELOAD (SIOCIWFIRSTPRIV+27)
+#define WL_AP_STA_DISASSOC (SIOCIWFIRSTPRIV+29)
+#define WL_COMBO_SCAN (SIOCIWFIRSTPRIV+31)
+
+#define G_SCAN_RESULTS (8*1024)
+#define WE_ADD_EVENT_FIX 0x80
+#define G_WLAN_SET_ON 0
+#define G_WLAN_SET_OFF 1
+
+#define CHECK_EXTRA_FOR_NULL(extra) \
+if (!extra) { \
+ WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \
+ return -EINVAL; \
+}
+
+typedef struct wl_iw {
+ char nickname[IW_ESSID_MAX_SIZE];
+
+ struct iw_statistics wstats;
+
+ int spy_num;
+ uint32 pwsec;
+ uint32 gwsec;
+ bool privacy_invoked;
+
+ struct ether_addr spy_addr[IW_MAX_SPY];
+ struct iw_quality spy_qual[IW_MAX_SPY];
+ void *wlinfo;
+ dhd_pub_t * pub;
+} wl_iw_t;
+
+#define WLC_IW_SS_CACHE_MAXLEN 2048
+#define WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN 32
+#define WLC_IW_BSS_INFO_MAXLEN \
+ (WLC_IW_SS_CACHE_MAXLEN - WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN)
+
+typedef struct wl_iw_ss_cache {
+ struct wl_iw_ss_cache *next;
+ int dirty;
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ wl_bss_info_t bss_info[1];
+} wl_iw_ss_cache_t;
+
+typedef struct wl_iw_ss_cache_ctrl {
+ wl_iw_ss_cache_t *m_cache_head;
+ int m_link_down;
+ int m_timer_expired;
+ char m_active_bssid[ETHER_ADDR_LEN];
+ uint m_prev_scan_mode;
+ uint m_cons_br_scan_cnt;
+ struct timer_list *m_timer;
+} wl_iw_ss_cache_ctrl_t;
+
+typedef enum broadcast_first_scan {
+ BROADCAST_SCAN_FIRST_IDLE = 0,
+ BROADCAST_SCAN_FIRST_STARTED,
+ BROADCAST_SCAN_FIRST_RESULT_READY,
+ BROADCAST_SCAN_FIRST_RESULT_CONSUMED
+} broadcast_first_scan_t;
+#ifdef SOFTAP
+#define SSID_LEN 33
+#define SEC_LEN 16
+#define KEY_LEN 65
+#define PROFILE_OFFSET 32
+struct ap_profile {
+ uint8 ssid[SSID_LEN];
+ uint8 sec[SEC_LEN];
+ uint8 key[KEY_LEN];
+ uint32 channel;
+ uint32 preamble;
+ uint32 max_scb;
+ uint32 closednet;
+ char country_code[WLC_CNTRY_BUF_SZ];
+};
+
+
+#define MACLIST_MODE_DISABLED 0
+#define MACLIST_MODE_DENY 1
+#define MACLIST_MODE_ALLOW 2
+struct mflist {
+ uint count;
+ struct ether_addr ea[16];
+};
+
+struct mac_list_set {
+ uint32 mode;
+ struct mflist mac_list;
+};
+#endif
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+void wl_iw_detach(void);
+int wl_control_wl_start(struct net_device *dev);
+
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec);
+extern char *dhd_bus_country_get(struct net_device *dev);
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, \
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_pno_get_status(dhd_pub_t *dhd);
+extern int dhd_dev_pno_reset(struct net_device *dev);
+extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, \
+ int nssid, ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled);
+extern int dhd_dev_get_pno_status(struct net_device *dev);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec);
+
+#define PNO_TLV_PREFIX 'S'
+#define PNO_TLV_VERSION '1'
+#define PNO_TLV_SUBVERSION '2'
+#define PNO_TLV_RESERVED '0'
+#define PNO_TLV_TYPE_SSID_IE 'S'
+#define PNO_TLV_TYPE_TIME 'T'
+#define PNO_TLV_FREQ_REPEAT 'R'
+#define PNO_TLV_FREQ_EXPO_MAX 'M'
+#define PNO_EVENT_UP "PNO_EVENT"
+
+typedef struct cmd_tlv {
+ char prefix;
+ char version;
+ char subver;
+ char reserved;
+} cmd_tlv_t;
+
+#ifdef SOFTAP_TLV_CFG
+#define SOFTAP_SET_CMD "SOFTAPSET "
+#define SOFTAP_TLV_PREFIX 'A'
+#define SOFTAP_TLV_VERSION '1'
+#define SOFTAP_TLV_SUBVERSION '0'
+#define SOFTAP_TLV_RESERVED '0'
+
+#define TLV_TYPE_SSID 'S'
+#define TLV_TYPE_SECUR 'E'
+#define TLV_TYPE_KEY 'K'
+#define TLV_TYPE_CHANNEL 'C'
+#endif
+
+#if defined(CSCAN)
+
+typedef struct cscan_tlv {
+ char prefix;
+ char version;
+ char subver;
+ char reserved;
+} cscan_tlv_t;
+
+#define CSCAN_COMMAND "CSCAN "
+#define CSCAN_TLV_PREFIX 'S'
+#define CSCAN_TLV_VERSION 1
+#define CSCAN_TLV_SUBVERSION 0
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE 'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE 'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE 'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE 'P'
+#define CSCAN_TLV_TYPE_HOME_IE 'H'
+#define CSCAN_TLV_TYPE_STYPE_IE 'T'
+
+extern int wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, \
+ int channel_num, int *bytes_left);
+
+extern int wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, \
+ const char token, int input_size, int *bytes_left);
+
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, \
+ int max, int *bytes_left);
+
+extern int wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max);
+
+extern int wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num);
+
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig
new file mode 100644
index 000000000000..60f5a3adbc91
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Kconfig
@@ -0,0 +1,48 @@
+config BCMDHD
+ tristate "Broadcom 4329/30 wireless cards support"
+ depends on MMC
+ ---help---
+ This module adds support for wireless adapters based on
+ Broadcom 4329/30 chipset.
+
+ This driver uses the kernel's wireless extensions subsystem.
+
+ If you choose to build a module, it'll be called dhd. Say M if
+ unsure.
+
+config BCMDHD_FW_DIR
+ depends on BCMDHD
+ string "Firmware path"
+ default "/system/vendor/firmware"
+ ---help---
+ Path to the firmware file.
+
+config BCMDHD_NVRAM_DIR
+ depends on BCMDHD
+ string "NVRAM path"
+ default "/system/etc"
+ ---help---
+ Path to the calibration file.
+
+config BCMDHD_WEXT
+ bool "Enable WEXT support"
+ depends on BCMDHD && CFG80211 = n
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ help
+ Enables WEXT support
+
+config BCMDHD_WIFI_CONTROL_FUNC
+ bool "Use bcmdhd_wlan device"
+ depends on BCMDHD
+ default n
+ ---help---
+ Use this option to get various parameters from architecture specific
+ bcmdhd_wlan platform device. Say n if unsure.
+
+config BCMDHD_HW_OOB
+ bool "Use out of band interrupt"
+ depends on BCMDHD
+ default n
+ ---help---
+ Use out of band interrupt for card interrupt and wake on wireless.
diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile
new file mode 100644
index 000000000000..0eca2aba4f02
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Makefile
@@ -0,0 +1,43 @@
+# bcmdhd
+DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \
+ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DWLBTAMP -DBCMFILEIMAGE \
+ -DDHDTHREAD -DDHD_GPL -DDHD_SCHED -DDHD_DEBUG -DSDTEST -DBDC -DTOE \
+ -DDHD_BCMEVENTS -DSHOW_EVENTS -DDONGLEOVERLAYS -DBCMDBG \
+ -DCUSTOMER_HW2 \
+ -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS -DWLP2P \
+ -DNEW_COMPAT_WIRELESS -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \
+ -DKEEP_ALIVE -DCSCAN -DPKT_FILTER_SUPPORT \
+ -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \
+ -Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include
+
+ifeq ($(CONFIG_BCMDHD_WIFI_CONTROL_FUNC),y)
+DHDCFLAGS += -DCONFIG_WIFI_CONTROL_FUNC
+else
+DHDCFLAGS += -DCUSTOM_OOB_GPIO_NUM=2
+endif
+
+ifeq ($(CONFIG_BCMDHD_HW_OOB),y)
+DHDCFLAGS += -DHW_OOB -DOOB_INTR_ONLY
+else
+DHDCFLAGS += -DSDIO_ISR_THREAD
+endif
+
+DHDOFILES = aiutils.o bcmsdh_sdmmc_linux.o dhd_linux.o siutils.o bcmutils.o \
+ dhd_linux_sched.o bcmwifi.o dhd_sdio.o bcmevent.o dhd_bta.o hndpmu.o \
+ bcmsdh.o dhd_cdc.o bcmsdh_linux.o dhd_common.o linux_osl.o \
+ bcmsdh_sdmmc.o dhd_custom_gpio.o sbutils.o wldev_common.o wl_android.o
+
+obj-$(CONFIG_BCMDHD) += bcmdhd.o
+bcmdhd-objs += $(DHDOFILES)
+ifneq ($(CONFIG_WIRELESS_EXT),)
+bcmdhd-objs += wl_iw.o
+DHDCFLAGS += -DSOFTAP
+endif
+ifneq ($(CONFIG_CFG80211),)
+bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o dhd_linux_mon.o
+DHDCFLAGS += -DWL_CFG80211
+endif
+EXTRA_CFLAGS = $(DHDCFLAGS)
+ifeq ($(CONFIG_BCMDHD),m)
+EXTRA_LDFLAGS += --strip-debug
+endif
diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c
new file mode 100644
index 000000000000..059df8907928
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/aiutils.c
@@ -0,0 +1,675 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aiutils.c,v 1.26.2.1 2010-03-09 18:41:21 Exp $
+ */
+
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+
+
+
+
+static uint32
+get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+ uint32 ent;
+ uint inv = 0, nom = 0;
+
+ while (TRUE) {
+ ent = R_REG(si_osh(sih), *eromptr);
+ (*eromptr)++;
+
+ if (mask == 0)
+ break;
+
+ if ((ent & ER_VALID) == 0) {
+ inv++;
+ continue;
+ }
+
+ if (ent == (ER_END | ER_VALID))
+ break;
+
+ if ((ent & mask) == match)
+ break;
+
+ nom++;
+ }
+
+ SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+ if (inv + nom) {
+ SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
+ }
+ return ent;
+}
+
+static uint32
+get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+ uint32 *sizel, uint32 *sizeh)
+{
+ uint32 asd, sz, szd;
+
+ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+ if (((asd & ER_TAG1) != ER_ADD) ||
+ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+ ((asd & AD_ST_MASK) != st)) {
+
+ (*eromptr)--;
+ return 0;
+ }
+ *addrl = asd & AD_ADDR_MASK;
+ if (asd & AD_AG32)
+ *addrh = get_erom_ent(sih, eromptr, 0, 0);
+ else
+ *addrh = 0;
+ *sizeh = 0;
+ sz = asd & AD_SZ_MASK;
+ if (sz == AD_SZ_SZD) {
+ szd = get_erom_ent(sih, eromptr, 0, 0);
+ *sizel = szd & SD_SZ_MASK;
+ if (szd & SD_SG32)
+ *sizeh = get_erom_ent(sih, eromptr, 0, 0);
+ } else
+ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+ SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+ sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+ return asd;
+}
+
+static void
+ai_hwfixup(si_info_t *sii)
+{
+}
+
+
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = (chipcregs_t *)regs;
+ uint32 erombase, *eromptr, *eromlim;
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+ eromptr = regs;
+ break;
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ eromptr = (uint32 *)(uintptr)erombase;
+ break;
+
+ case PCMCIA_BUS:
+ default:
+ SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+ ASSERT(0);
+ return;
+ }
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+ SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+ regs, erombase, eromptr, eromlim));
+ while (eromptr < eromlim) {
+ uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+ uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+ uint32 *base;
+ uint i, j, idx;
+ bool br;
+
+ br = FALSE;
+
+
+ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+ if (cia == (ER_END | ER_VALID)) {
+ SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+ ai_hwfixup(sii);
+ return;
+ }
+ base = eromptr - 1;
+ cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+ if ((cib & ER_TAG) != ER_CI) {
+ SI_ERROR(("CIA not followed by CIB\n"));
+ goto error;
+ }
+
+ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+ SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+ "nsw = %d, nmp = %d & nsp = %d\n",
+ mfg, cid, crev, base, nmw, nsw, nmp, nsp));
+
+ if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+ continue;
+ if ((nmw + nsw == 0)) {
+
+ if (cid == OOB_ROUTER_CORE_ID) {
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd != 0) {
+ sii->oob_router = addrl;
+ }
+ }
+ continue;
+ }
+
+ idx = sii->numcores;
+
+ sii->cia[idx] = cia;
+ sii->cib[idx] = cib;
+ sii->coreid[idx] = cid;
+
+ for (i = 0; i < nmp; i++) {
+ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+ if ((mpd & ER_TAG) != ER_MP) {
+ SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+ goto error;
+ }
+ SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
+ (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+ (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+ }
+
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd != 0)
+ br = TRUE;
+ else
+ if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("First Slave ASD for core 0x%04x malformed "
+ "(0x%08x)\n", cid, asd));
+ goto error;
+ }
+ }
+ sii->coresba[idx] = addrl;
+ sii->coresba_size[idx] = sizel;
+
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+ sii->coresba2[idx] = addrl;
+ sii->coresba2_size[idx] = sizel;
+ }
+ j++;
+ } while (asd != 0);
+
+
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ } while (asd != 0);
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ goto error;
+ }
+ }
+
+
+ for (i = 0; i < nmw; i++) {
+ asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for MW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if (i == 0)
+ sii->wrapba[idx] = addrl;
+ }
+
+
+ for (i = 0; i < nsw; i++) {
+ uint fwp = (nsp == 1) ? 0 : 1;
+ asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for SW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if ((nmw == 0) && (i == 0))
+ sii->wrapba[idx] = addrl;
+ }
+
+
+ if (br)
+ continue;
+
+
+ sii->numcores++;
+ }
+
+ SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+ sii->numcores = 0;
+ return;
+}
+
+
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 addr = sii->coresba[coreidx];
+ uint32 wrap = sii->wrapba[coreidx];
+ void *regs;
+
+ if (coreidx >= sii->numcores)
+ return (NULL);
+
+
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ sii->curmap = regs = sii->regs[coreidx];
+ if (!sii->wrappers[coreidx]) {
+ sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->wrappers[coreidx]));
+ }
+ sii->curwrap = sii->wrappers[coreidx];
+ break;
+
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ sii->curmap = regs = (void *)((uintptr)addr);
+ sii->curwrap = (void *)((uintptr)wrap);
+ break;
+
+ case PCMCIA_BUS:
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ sii->curmap = regs;
+ sii->curidx = coreidx;
+
+ return regs;
+}
+
+
+int
+ai_numaddrspaces(si_t *sih)
+{
+ return 2;
+}
+
+
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba[cidx];
+ else if (asidx == 1)
+ return sii->coresba2[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
+}
+
+
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba_size[cidx];
+ else if (asidx == 1)
+ return sii->coresba2_size[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
+}
+
+uint
+ai_flag(si_t *sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ ai = sii->curwrap;
+
+ return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+uint
+ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 *map = (uint32 *) sii->curwrap;
+
+ if (mask || val) {
+ uint32 w = R_REG(sii->osh, map+(offset/4));
+ w &= ~mask;
+ w |= val;
+ W_REG(sii->osh, map+(offset/4), val);
+ }
+
+ return (R_REG(sii->osh, map+(offset/4)));
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 cia;
+
+ sii = SI_INFO(sih);
+ cia = sii->cia[sii->curidx];
+ return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 cib;
+
+ sii = SI_INFO(sih);
+ cib = sii->cib[sii->curidx];
+ return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ ai = sii->curwrap;
+
+ return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+ ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+
+ fast = TRUE;
+
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+
+
+ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+
+ origidx = si_coreidx(&sii->pub);
+
+
+ r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+ ASSERT(r != NULL);
+
+
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+
+ w = R_REG(sii->osh, r);
+
+ if (!fast) {
+
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+ return;
+
+ W_REG(sii->osh, &ai->ioctrl, bits);
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ OSL_DELAY(10);
+
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ OSL_DELAY(1);
+}
+
+
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+
+ ai_core_disable(sih, (bits | resetbits));
+
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ W_REG(sii->osh, &ai->resetctrl, 0);
+ OSL_DELAY(1);
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ OSL_DELAY(1);
+}
+
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+
+ return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+ W_REG(sii->osh, &ai->iostatus, w);
+ }
+
+ return R_REG(sii->osh, &ai->iostatus);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c
new file mode 100644
index 000000000000..24581ddd353c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmevent.c
@@ -0,0 +1,125 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmevent.c,v 1.8.2.7 2011-02-01 06:23:39 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+
+#if WLC_E_LAST != 85
+#error "You need to add an entry to bcmevent_names[] for the new event"
+#endif
+
+const bcmevent_name_t bcmevent_names[] = {
+ { WLC_E_SET_SSID, "SET_SSID" },
+ { WLC_E_JOIN, "JOIN" },
+ { WLC_E_START, "START" },
+ { WLC_E_AUTH, "AUTH" },
+ { WLC_E_AUTH_IND, "AUTH_IND" },
+ { WLC_E_DEAUTH, "DEAUTH" },
+ { WLC_E_DEAUTH_IND, "DEAUTH_IND" },
+ { WLC_E_ASSOC, "ASSOC" },
+ { WLC_E_ASSOC_IND, "ASSOC_IND" },
+ { WLC_E_REASSOC, "REASSOC" },
+ { WLC_E_REASSOC_IND, "REASSOC_IND" },
+ { WLC_E_DISASSOC, "DISASSOC" },
+ { WLC_E_DISASSOC_IND, "DISASSOC_IND" },
+ { WLC_E_QUIET_START, "START_QUIET" },
+ { WLC_E_QUIET_END, "END_QUIET" },
+ { WLC_E_BEACON_RX, "BEACON_RX" },
+ { WLC_E_LINK, "LINK" },
+ { WLC_E_MIC_ERROR, "MIC_ERROR" },
+ { WLC_E_NDIS_LINK, "NDIS_LINK" },
+ { WLC_E_ROAM, "ROAM" },
+ { WLC_E_TXFAIL, "TXFAIL" },
+ { WLC_E_PMKID_CACHE, "PMKID_CACHE" },
+ { WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF" },
+ { WLC_E_PRUNE, "PRUNE" },
+ { WLC_E_AUTOAUTH, "AUTOAUTH" },
+ { WLC_E_EAPOL_MSG, "EAPOL_MSG" },
+ { WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE" },
+ { WLC_E_ADDTS_IND, "ADDTS_IND" },
+ { WLC_E_DELTS_IND, "DELTS_IND" },
+ { WLC_E_BCNSENT_IND, "BCNSENT_IND" },
+ { WLC_E_BCNRX_MSG, "BCNRX_MSG" },
+ { WLC_E_BCNLOST_MSG, "BCNLOST_IND" },
+ { WLC_E_ROAM_PREP, "ROAM_PREP" },
+ { WLC_E_PFN_NET_FOUND, "PFNFOUND_IND" },
+ { WLC_E_PFN_NET_LOST, "PFNLOST_IND" },
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+ { WLC_E_IBSS_ASSOC, "IBSS_ASSOC" },
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+ { WLC_E_RADIO, "RADIO" },
+ { WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG" },
+ { WLC_E_PROBREQ_MSG, "PROBE_REQ_MSG" },
+ { WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND" },
+ { WLC_E_PSK_SUP, "PSK_SUP" },
+ { WLC_E_COUNTRY_CODE_CHANGED, "CNTRYCODE_IND" },
+ { WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME" },
+ { WLC_E_ICV_ERROR, "ICV_ERROR" },
+ { WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR" },
+ { WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR" },
+ { WLC_E_TRACE, "TRACE" },
+ { WLC_E_BTA_HCI_EVENT, "BTA_HCI_EVENT" },
+ { WLC_E_IF, "IF" },
+#ifdef WLP2P
+ { WLC_E_P2P_DISC_LISTEN_COMPLETE, "WLC_E_P2P_DISC_LISTEN_COMPLETE" },
+#endif
+ { WLC_E_RSSI, "RSSI" },
+ { WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE" },
+ { WLC_E_EXTLOG_MSG, "EXTERNAL LOG MESSAGE" },
+#ifdef WIFI_ACT_FRAME
+ { WLC_E_ACTION_FRAME, "ACTION_FRAME" },
+ { WLC_E_ACTION_FRAME_RX, "ACTION_FRAME_RX" },
+ { WLC_E_ACTION_FRAME_COMPLETE, "ACTION_FRAME_COMPLETE" },
+#endif
+ { WLC_E_ESCAN_RESULT, "WLC_E_ESCAN_RESULT" },
+ { WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, "WLC_E_AF_OFF_CHAN_COMPLETE" },
+#ifdef WLP2P
+ { WLC_E_PROBRESP_MSG, "PROBE_RESP_MSG" },
+ { WLC_E_P2P_PROBREQ_MSG, "P2P PROBE_REQ_MSG" },
+#endif
+#ifdef PROP_TXSTATUS
+ { WLC_E_FIFO_CREDIT_MAP, "FIFO_CREDIT_MAP" },
+#endif
+ { WLC_E_WAKE_EVENT, "WAKE_EVENT" },
+ { WLC_E_DCS_REQUEST, "DCS_REQUEST" },
+ { WLC_E_RM_COMPLETE, "RM_COMPLETE" },
+#ifdef WLMEDIA_HTSF
+ { WLC_E_HTSFSYNC, "HTSF_SYNC_EVENT" },
+#endif
+ { WLC_E_OVERLAY_REQ, "OVERLAY_REQ_EVENT" },
+ { WLC_E_CSA_COMPLETE_IND, "WLC_E_CSA_COMPLETE_IND" },
+ { WLC_E_EXCESS_PM_WAKE_EVENT, "EXCESS_PM_WAKE_EVENT" },
+ { WLC_E_PFN_SCAN_NONE, "PFN_SCAN_NONE" },
+ { WLC_E_PFN_SCAN_ALLGONE, "PFN_SCAN_ALLGONE" },
+#ifdef SOFTAP
+ { WLC_E_GTK_PLUMBED, "GTK_PLUMBED" }
+#endif
+};
+
+
+const int bcmevent_names_size = ARRAYSIZE(bcmevent_names);
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c
new file mode 100644
index 000000000000..918c8e648f13
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh.c
@@ -0,0 +1,690 @@
+/*
+ * BCMSDH interface glue
+ * implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.c 275784 2011-08-04 22:41:49Z $
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h> /* common SDIO/controller interface */
+#include <sbsdio.h> /* SDIO device core hardware definitions. */
+
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/**
+ * BCMSDH API context
+ */
+struct bcmsdh_info
+{
+ bool init_success; /* underlying driver successfully attached */
+ void *sdioh; /* handler for sdioh */
+ uint32 vendevid; /* Target Vendor and Device ID on SD bus */
+ osl_t *osh;
+ bool regfail; /* Save status of last reg_read/reg_write call */
+ uint32 sbwad; /* Save backplane window address */
+};
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+ sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+{
+ bcmsdh_info_t *bcmsdh;
+
+ if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+ BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+
+ /* save the handler locally */
+ l_bcmsdh = bcmsdh;
+
+ if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) {
+ bcmsdh_detach(osh, bcmsdh);
+ return NULL;
+ }
+
+ bcmsdh->osh = osh;
+ bcmsdh->init_success = TRUE;
+
+ *regsva = (uint32 *)SI_ENUM_BASE;
+
+ /* Report the BAR, to fix if needed */
+ bcmsdh->sbwad = SI_ENUM_BASE;
+ return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bcmsdh != NULL) {
+ if (bcmsdh->sdioh) {
+ sdioh_detach(osh, bcmsdh->sdioh);
+ bcmsdh->sdioh = NULL;
+ }
+ MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+ }
+
+ l_bcmsdh = NULL;
+ return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ bool on;
+
+ ASSERT(bcmsdh);
+ status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+ if (SDIOH_API_SUCCESS(status))
+ return FALSE;
+ else
+ return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ ASSERT(sdh);
+ return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ ASSERT(sdh);
+
+ /* don't support yet */
+ return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+ uint8 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+ addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ uint8 *tmp_buf, *tmp_ptr;
+ uint8 *ptr;
+ bool ascii = func & ~0xf;
+ func &= 0x7;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+ ASSERT(cis);
+ ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+ status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+ if (ascii) {
+ /* Move binary bits to tmp and format them into the provided buffer. */
+ if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+ BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ bcopy(cis, tmp_buf, length);
+ for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+ ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff);
+ if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+ ptr += sprintf((char *)ptr, "\n");
+ }
+ MFREE(bcmsdh->osh, tmp_buf, length);
+ }
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+ int err = 0;
+ uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bar0 != bcmsdh->sbwad || force_set) {
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+ if (!err)
+ bcmsdh->sbwad = bar0;
+ else
+ /* invalidate cached window var */
+ bcmsdh->sbwad = 0;
+
+ }
+
+ return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 word = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))
+ return 0xFFFFFFFF;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+ /* if ok, return appropriately masked word */
+ if (SDIOH_API_SUCCESS(status)) {
+ switch (size) {
+ case sizeof(uint8):
+ return (word & 0xff);
+ case sizeof(uint16):
+ return (word & 0xffff);
+ case sizeof(uint32):
+ return word;
+ default:
+ bcmsdh->regfail = TRUE;
+
+ }
+ }
+
+ /* otherwise, bad sdio access or invalid size */
+ BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+ return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ int err = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ __FUNCTION__, addr, size*8, data));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+ addr, &data, size);
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ if (SDIOH_API_SUCCESS(status))
+ return 0;
+
+ BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+ __FUNCTION__, data, addr, size));
+ return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+ return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+ ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+ (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return sdioh_waitlockfree(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+ return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+ ASSERT(sdh);
+ return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+ return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+ return;
+}
+
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_sleep(sd, enab);
+#else
+ return BCME_UNSUPPORTED;
+#endif
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
new file mode 100644
index 000000000000..096abb824350
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
@@ -0,0 +1,741 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_linux.c,v 1.72.6.5 2010-12-23 01:13:15 Exp $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/mmc/sdio_func.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+
+#if defined(OOB_INTR_ONLY)
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* defined(OOB_INTR_ONLY) */
+
+/**
+ * SDIO Host Controller info
+ */
+typedef struct bcmsdh_hc bcmsdh_hc_t;
+
+struct bcmsdh_hc {
+ bcmsdh_hc_t *next;
+#ifdef BCMPLATFORM_BUS
+ struct device *dev; /* platform device handle */
+#else
+ struct pci_dev *dev; /* pci device handle */
+#endif /* BCMPLATFORM_BUS */
+ osl_t *osh;
+ void *regs; /* SDIO Host Controller address */
+ bcmsdh_info_t *sdh; /* SDIO Host Controller handle */
+ void *ch;
+ unsigned int oob_irq;
+ unsigned long oob_flags; /* OOB Host specifiction as edge and etc */
+ bool oob_irq_registered;
+ bool oob_irq_enable_flag;
+#if defined(OOB_INTR_ONLY)
+ spinlock_t irq_lock;
+#endif
+};
+static bcmsdh_hc_t *sdhcinfo = NULL;
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL};
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+ /* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+ /* Check for Arasan host controller */
+ if (vendor == VENDOR_SI_IMAGE) {
+ return (TRUE);
+ }
+ /* Check for BRCM 27XX Standard host controller */
+ if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for BRCM Standard host controller */
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for TI PCIxx21 Standard host controller */
+ if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ /* Ricoh R5C822 Standard SDIO Host */
+ if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+ return (TRUE);
+ }
+ /* JMicron Standard SDIO Host */
+ if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+ return (TRUE);
+ }
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+ /* This is the PciSpiHost. */
+ if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found PCI SPI Host Controller\n");
+ return (TRUE);
+ }
+
+#endif /* BCMSDIOH_SPI */
+
+ return (FALSE);
+}
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+/* forward declarations */
+int bcmsdh_probe(struct device *dev);
+int bcmsdh_remove(struct device *dev);
+
+EXPORT_SYMBOL(bcmsdh_probe);
+EXPORT_SYMBOL(bcmsdh_remove);
+
+#else
+/* forward declarations */
+static int __devinit bcmsdh_probe(struct device *dev);
+static int __devexit bcmsdh_remove(struct device *dev);
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static struct device_driver bcmsdh_driver = {
+ .name = "pxa2xx-mci",
+ .bus = &platform_bus_type,
+ .probe = bcmsdh_probe,
+ .remove = bcmsdh_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ };
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_probe(struct device *dev)
+{
+ osl_t *osh = NULL;
+ bcmsdh_hc_t *sdhc = NULL;
+ ulong regs = 0;
+ bcmsdh_info_t *sdh = NULL;
+ struct sdio_func *func = container_of(dev, struct sdio_func, dev);
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+ struct platform_device *pdev;
+ struct resource *r;
+#endif /* BCMLXSDMMC */
+ int irq = 0;
+ uint32 vendevid;
+ unsigned long irq_flags = 0;
+
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+ pdev = to_platform_device(dev);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq == NO_IRQ)
+ return -ENXIO;
+#endif /* BCMLXSDMMC */
+
+#if defined(OOB_INTR_ONLY)
+#ifdef HW_OOB
+ irq_flags =
+ IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+ irq_flags = IRQF_TRIGGER_FALLING;
+#endif /* HW_OOB */
+
+ /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+ irq = dhd_customer_oob_irq_map(&irq_flags);
+ if (irq < 0) {
+ SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__));
+ return 1;
+ }
+#endif /* defined(OOB_INTR_ONLY) */
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+ SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+ __FUNCTION__,
+ MALLOCED(osh)));
+ goto err;
+ }
+ bzero(sdhc, sizeof(bcmsdh_hc_t));
+ sdhc->osh = osh;
+
+ sdhc->dev = (void *)dev;
+
+#ifdef BCMLXSDMMC
+ if (!(sdh = bcmsdh_attach(osh, (void *)0,
+ (void **)&regs, irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+#else
+ if (!(sdh = bcmsdh_attach(osh, (void *)r->start,
+ (void **)&regs, irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+#endif /* BCMLXSDMMC */
+ sdhc->sdh = sdh;
+ sdhc->oob_irq = irq;
+ sdhc->oob_flags = irq_flags;
+ sdhc->oob_irq_registered = FALSE; /* to make sure.. */
+ sdhc->oob_irq_enable_flag = FALSE;
+#if defined(OOB_INTR_ONLY)
+ spin_lock_init(&sdhc->irq_lock);
+#endif
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+ /* Read the vendor/device ID from the CIS */
+ vendevid = bcmsdh_query_device(sdh);
+
+
+ /* try to attach to the target device */
+ if (!(sdhc->ch = drvinfo.attach((vendevid >> 16),
+ func->device, 0, 0, 0, 0,
+ (void *)regs, NULL, sdh))) {
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ return 0;
+
+ /* error handling */
+err:
+ if (sdhc) {
+ if (sdhc->sdh)
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ }
+ if (osh)
+ osl_detach(osh);
+ return -ENODEV;
+}
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_remove(struct device *dev)
+{
+ bcmsdh_hc_t *sdhc, *prev;
+ osl_t *osh;
+
+ sdhc = sdhcinfo;
+ drvinfo.detach(sdhc->ch);
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ /* find the SDIO Host Controller state for this pdev and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == (void *)dev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc) {
+ SDLX_MSG(("%s: failed\n", __FUNCTION__));
+ return 0;
+ }
+
+
+ /* release SDIO Host Controller info */
+ osh = sdhc->osh;
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ osl_detach(osh);
+
+#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY)
+ dev_set_drvdata(dev, NULL);
+#endif /* !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY) */
+
+ return 0;
+}
+
+#else /* BCMPLATFORM_BUS */
+
+#if !defined(BCMLXSDMMC)
+/* forward declarations for PCI probe and remove functions. */
+static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/**
+ * pci id table
+ */
+static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
+ { vendor: PCI_ANY_ID,
+ device: PCI_ANY_ID,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: 0,
+ class_mask: 0,
+ driver_data: 0,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
+
+/**
+ * SDIO Host Controller pci driver info
+ */
+static struct pci_driver bcmsdh_pci_driver = {
+ node: {},
+ name: "bcmsdh",
+ id_table: bcmsdh_pci_devid,
+ probe: bcmsdh_pci_probe,
+ remove: bcmsdh_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ save_state: NULL,
+#endif
+ suspend: NULL,
+ resume: NULL,
+ };
+
+
+extern uint sd_pci_slot; /* Force detection to a particular PCI */
+ /* slot only . Allows for having multiple */
+ /* WL devices at once in a PC */
+ /* Only one instance of dhd will be */
+ /* usable at a time */
+ /* Upper word is bus number, */
+ /* lower word is slot number */
+ /* Default value of 0xffffffff turns this */
+ /* off */
+module_param(sd_pci_slot, uint, 0);
+
+
+/**
+ * Detect supported SDIO Host Controller and attach if found.
+ *
+ * Determine if the device described by pdev is a supported SDIO Host
+ * Controller. If so, attach to it and attach to the target device.
+ */
+static int __devinit
+bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ osl_t *osh = NULL;
+ bcmsdh_hc_t *sdhc = NULL;
+ ulong regs;
+ bcmsdh_info_t *sdh = NULL;
+ int rc;
+
+ if (sd_pci_slot != 0xFFFFffff) {
+ if (pdev->bus->number != (sd_pci_slot>>16) ||
+ PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Found compatible SDIOHC"
+ :"Probing unknown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
+ pdev->device));
+ return -ENODEV;
+ }
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Using compatible SDIOHC"
+ :"WARNING, forced use of unkown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device));
+ }
+
+ if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
+ (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
+ uint32 config_reg;
+
+ SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
+
+ /*
+ * Set MMC_SD_DIS bit in FlashMedia Controller.
+ * Disbling the SD/MMC Controller in the FlashMedia Controller
+ * allows the Standard SD Host Controller to take over control
+ * of the SD Slot.
+ */
+ config_reg |= 0x02;
+ OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
+ osl_detach(osh);
+ }
+ /* match this pci device with what we support */
+ /* we can't solely rely on this to believe it is our SDIO Host Controller! */
+ if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
+ return -ENODEV;
+ }
+
+ /* this is a pci device we might support */
+ SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
+ __FUNCTION__,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), pdev->irq));
+
+ /* use bcmsdh_query_device() to get the vendor ID of the target device so
+ * it will eventually appear in the Broadcom string on the console
+ */
+
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+ SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+ __FUNCTION__,
+ MALLOCED(osh)));
+ goto err;
+ }
+ bzero(sdhc, sizeof(bcmsdh_hc_t));
+ sdhc->osh = osh;
+
+ sdhc->dev = pdev;
+
+ /* map to address where host can access */
+ pci_set_master(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0),
+ (void **)&regs, pdev->irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ sdhc->sdh = sdh;
+
+ /* try to attach to the target device */
+ if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
+ bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
+ (void *)regs, NULL, sdh))) {
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+
+ return 0;
+
+ /* error handling */
+err:
+ if (sdhc) {
+ if (sdhc->sdh)
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ }
+ if (osh)
+ osl_detach(osh);
+ return -ENODEV;
+}
+
+
+/**
+ * Detach from target devices and SDIO Host Controller
+ */
+static void __devexit
+bcmsdh_pci_remove(struct pci_dev *pdev)
+{
+ bcmsdh_hc_t *sdhc, *prev;
+ osl_t *osh;
+
+ /* find the SDIO Host Controller state for this pdev and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == pdev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc)
+ return;
+
+ drvinfo.detach(sdhc->ch);
+
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+
+ /* release SDIO Host Controller info */
+ osh = sdhc->osh;
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ osl_detach(osh);
+}
+#endif /* BCMLXSDMMC */
+#endif /* BCMPLATFORM_BUS */
+
+extern int sdio_function_init(void);
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+ int error = 0;
+
+ drvinfo = *driver;
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+ SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
+ error = sdio_function_init();
+#else
+ SDLX_MSG(("Intel PXA270 SDIO Driver\n"));
+ error = driver_register(&bcmsdh_driver);
+#endif /* defined(BCMLXSDMMC) */
+ return error;
+#endif /* defined(BCMPLATFORM_BUS) */
+
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (!(error = pci_module_init(&bcmsdh_pci_driver)))
+ return 0;
+#else
+ if (!(error = pci_register_driver(&bcmsdh_pci_driver)))
+ return 0;
+#endif
+
+ SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#endif /* BCMPLATFORM_BUS */
+
+ return error;
+}
+
+extern void sdio_function_cleanup(void);
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (bcmsdh_pci_driver.node.next)
+#endif
+
+#if defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+ driver_unregister(&bcmsdh_driver);
+#endif
+#if defined(BCMLXSDMMC)
+ sdio_function_cleanup();
+#endif /* BCMLXSDMMC */
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+ pci_unregister_driver(&bcmsdh_pci_driver);
+#endif /* BCMPLATFORM_BUS */
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bool enable)
+{
+ static bool curstate = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdhcinfo->irq_lock, flags);
+ if (curstate != enable) {
+ if (enable)
+ enable_irq(sdhcinfo->oob_irq);
+ else
+ disable_irq_nosync(sdhcinfo->oob_irq);
+ curstate = enable;
+ }
+ spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+ dhd_pub_t *dhdp;
+
+ dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev);
+
+ bcmsdh_oob_intr_set(0);
+
+ if (dhdp == NULL) {
+ SDLX_MSG(("Out of band GPIO interrupt fired way too early\n"));
+ return IRQ_HANDLED;
+ }
+
+ dhdsdio_isr((void *)dhdp->bus);
+
+ return IRQ_HANDLED;
+}
+
+void *bcmsdh_get_drvdata(void)
+{
+ if (!sdhcinfo)
+ return NULL;
+ return dev_get_drvdata(sdhcinfo->dev);
+}
+
+int bcmsdh_register_oob_intr(void * dhdp)
+{
+ int error = 0;
+
+ SDLX_MSG(("%s Enter \n", __FUNCTION__));
+
+ /* IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; */
+
+ dev_set_drvdata(sdhcinfo->dev, dhdp);
+
+ if (!sdhcinfo->oob_irq_registered) {
+ SDLX_MSG(("%s IRQ=%d Type=%X \n", __FUNCTION__,
+ (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags));
+ /* Refer to customer Host IRQ docs about proper irqflags definition */
+ error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags,
+ "bcmsdh_sdmmc", NULL);
+ if (error)
+ return -ENODEV;
+
+ enable_irq_wake(sdhcinfo->oob_irq);
+ sdhcinfo->oob_irq_registered = TRUE;
+ sdhcinfo->oob_irq_enable_flag = TRUE;
+ }
+
+ return 0;
+}
+
+void bcmsdh_set_irq(int flag)
+{
+ if (sdhcinfo->oob_irq_registered && sdhcinfo->oob_irq_enable_flag != flag) {
+ SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag));
+ sdhcinfo->oob_irq_enable_flag = flag;
+ if (flag) {
+ enable_irq(sdhcinfo->oob_irq);
+ enable_irq_wake(sdhcinfo->oob_irq);
+ } else {
+ disable_irq_wake(sdhcinfo->oob_irq);
+ disable_irq(sdhcinfo->oob_irq);
+ }
+ }
+}
+
+void bcmsdh_unregister_oob_intr(void)
+{
+ SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+
+ if (sdhcinfo->oob_irq_registered == TRUE) {
+ bcmsdh_set_irq(FALSE);
+ free_irq(sdhcinfo->oob_irq, NULL);
+ sdhcinfo->oob_irq_registered = FALSE;
+ }
+}
+#endif /* defined(OOB_INTR_ONLY) */
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel; /* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor; /* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok; /* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
new file mode 100644
index 000000000000..7499a1ec55fd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
@@ -0,0 +1,1331 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c 282820 2011-09-09 15:40:35Z $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 512; /* Default blocksize */
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK 0x03
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+ int err_ret;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Enable Function 1 */
+ sdio_claim_host(gInstance->func[1]);
+ err_ret = sdio_enable_func(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+ }
+
+ return FALSE;
+}
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+ int err_ret;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (gInstance == NULL) {
+ sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+ if (sdioh_sdmmc_osinit(sd) != 0) {
+ sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+
+ gInstance->sd = sd;
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[1]);
+
+ sd->client_block_size[1] = 64;
+ err_ret = sdio_set_block_size(gInstance->func[1], 64);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(gInstance->func[1]);
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
+ sd_f2_blocksize));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (sd) {
+
+ /* Disable Function 2 */
+ sdio_claim_host(gInstance->func[2]);
+ sdio_disable_func(gInstance->func[2]);
+ sdio_release_host(gInstance->func[2]);
+
+ /* Disable Function 1 */
+ if (gInstance->func[1]) {
+ sdio_claim_host(gInstance->func[1]);
+ sdio_disable_func(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ }
+
+ gInstance->func[1] = NULL;
+ gInstance->func[2] = NULL;
+
+ /* deregister irq */
+ sdioh_sdmmc_osfree(sd);
+
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(void)
+{
+ uint8 reg;
+ int err;
+
+ if (gInstance->func[0]) {
+ sdio_claim_host(gInstance->func[0]);
+
+ reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(gInstance->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* Enable F1 and F2 interrupts, set master enable */
+ reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN);
+
+ sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(gInstance->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(int func)
+{
+ uint8 reg;
+ int err;
+
+ if (gInstance->func[func]) {
+ sdio_claim_host(gInstance->func[func]);
+ reg = sdio_readb(gInstance->func[func], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(gInstance->func[func]);
+ return SDIOH_API_RC_FAIL;
+ }
+#if defined(HW_OOB)
+ reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+#else
+ reg &= ~(1 << func);
+#endif
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+ sdio_writeb(gInstance->func[func], reg, SDIOD_CCCR_INTEN, &err);
+
+ sdio_release_host(gInstance->func[func]);
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ if (fn == NULL) {
+ sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+
+ /* register and unmask irq */
+ if (gInstance->func[2]) {
+ sdio_claim_host(gInstance->func[2]);
+ sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ if (gInstance->func[1]) {
+ sdio_claim_host(gInstance->func[1]);
+ sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_release_host(gInstance->func[1]);
+ }
+#elif defined(HW_OOB)
+ sdioh_enable_func_intr();
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+ if (gInstance->func[1]) {
+ sdioh_disable_func_intr(1);
+ /*Wait for the pending interrupts to be cleared*/
+ msleep(300);
+ /* register and unmask irq */
+ sdio_claim_host(gInstance->func[1]);
+ sdio_release_irq(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ }
+
+ if (gInstance->func[2]) {
+ sdioh_disable_func_intr(2);
+ /*Wait for the pending interrupts to be cleared*/
+ msleep(300);
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+ sdio_release_irq(gInstance->func[2]);
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+ sdioh_disable_func_intr(0);
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
+ {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ si->client_block_size[func] = blksize;
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_RXCHAIN):
+ int_val = FALSE;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+ else if (sd_ptr->offset & 2)
+ int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+ else
+ int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = 0;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+ SDIOH_API_RC status;
+ uint8 data;
+
+ if (enable)
+ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; /* enable hw oob interrupt */
+ else
+ data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ /* Needed for Android Linux Kernel 2.6.35 */
+ data |= SDIO_SEPINT_ACT_HI; /* Active HIGH */
+#endif
+
+ status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+ return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int err_ret;
+
+ sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ if(rw) { /* CMD52 Write */
+ if (func == 0) {
+ /* Can only directly write to some F0 registers. Handle F2 enable
+ * as a special case.
+ */
+ if (regaddr == SDIOD_CCCR_IOEN) {
+ if (gInstance->func[2]) {
+ sdio_claim_host(gInstance->func[2]);
+ if (*byte & SDIO_FUNC_ENABLE_2) {
+ /* Enable Function 2 */
+ err_ret = sdio_enable_func(gInstance->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+ err_ret));
+ }
+ } else {
+ /* Disable Function 2 */
+ err_ret = sdio_disable_func(gInstance->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+ err_ret));
+ }
+ }
+ sdio_release_host(gInstance->func[2]);
+ }
+ }
+#if defined(MMC_SDIO_ABORT)
+ /* to allow abort command through F1 */
+ else if (regaddr == SDIOD_CCCR_IOABORT) {
+ sdio_claim_host(gInstance->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with another api
+ * depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+#endif /* MMC_SDIO_ABORT */
+ else if (regaddr < 0xF0) {
+ sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+ } else {
+ /* Claim host controller, perform F0 write, and release */
+ sdio_claim_host(gInstance->func[func]);
+ sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+ } else {
+ /* Claim host controller, perform Fn write, and release */
+ sdio_claim_host(gInstance->func[func]);
+ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+ } else { /* CMD52 Read */
+ /* Claim host controller, perform Fn read, and release */
+ sdio_claim_host(gInstance->func[func]);
+
+ if (func == 0) {
+ *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
+ } else {
+ *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
+ }
+
+ sdio_release_host(gInstance->func[func]);
+ }
+
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int err_ret = SDIOH_API_RC_FAIL;
+
+ if (func == 0) {
+ sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[func]);
+
+ if(rw) { /* CMD52 Write */
+ if (nbytes == 4) {
+ sdio_writel(gInstance->func[func], *word, addr, &err_ret);
+ } else if (nbytes == 2) {
+ sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ } else { /* CMD52 Read */
+ if (nbytes == 4) {
+ *word = sdio_readl(gInstance->func[func], addr, &err_ret);
+ } else if (nbytes == 2) {
+ *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ }
+
+ /* Release host controller */
+ sdio_release_host(gInstance->func[func]);
+
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+ rw ? "Write" : "Read", err_ret));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, void *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ uint32 SGCount = 0;
+ int err_ret = 0;
+
+ void *pnext;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(pkt);
+ DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[func]);
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ uint pkt_len = PKTLEN(sd->osh, pnext);
+ pkt_len += 3;
+ pkt_len &= 0xFFFFFFFC;
+
+#ifdef CONFIG_MMC_MSM7X00A
+ if ((pkt_len % 64) == 32) {
+ sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
+ pkt_len += 32;
+ }
+#endif /* CONFIG_MMC_MSM7X00A */
+ /* Make sure the packet is aligned properly. If it isn't, then this
+ * is the fault of sdioh_request_buffer() which is supposed to give
+ * us something we can work with.
+ */
+ ASSERT(((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) == 0);
+
+ if ((write) && (!fifo)) {
+ err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
+ ((uint8*)PKTDATA(sd->osh, pnext)),
+ pkt_len);
+ } else if (write) {
+ err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
+ ((uint8*)PKTDATA(sd->osh, pnext)),
+ pkt_len);
+ } else if (fifo) {
+ err_ret = sdio_readsb(gInstance->func[func],
+ ((uint8*)PKTDATA(sd->osh, pnext)),
+ addr,
+ pkt_len);
+ } else {
+ err_ret = sdio_memcpy_fromio(gInstance->func[func],
+ ((uint8*)PKTDATA(sd->osh, pnext)),
+ addr,
+ pkt_len);
+ }
+
+ if (err_ret) {
+ sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len, err_ret));
+ } else {
+ sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len));
+ }
+
+ if (!fifo) {
+ addr += pkt_len;
+ }
+ SGCount ++;
+
+ }
+
+ /* Release host controller */
+ sdio_release_host(gInstance->func[func]);
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
+ * then all the packets in the chain must be properly aligned. If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ SDIOH_API_RC Status;
+ void *mypkt = NULL;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Case 1: we don't have a packet. */
+ if (pkt == NULL) {
+ sd_data(("%s: Creating new %s Packet, len=%d\n",
+ __FUNCTION__, write ? "TX" : "RX", buflen_u));
+#ifdef DHD_USE_STATIC_BUF
+ if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#else
+ if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#endif /* DHD_USE_STATIC_BUF */
+ sd_err(("%s: PKTGET failed: len %d\n",
+ __FUNCTION__, buflen_u));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write) {
+ bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u);
+ }
+
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write) {
+ bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u);
+ }
+#ifdef DHD_USE_STATIC_BUF
+ PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+ PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* DHD_USE_STATIC_BUF */
+ } else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) {
+ /* Case 2: We have a packet, but it is unaligned. */
+
+ /* In this case, we cannot have a chain. */
+ ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
+
+ sd_data(("%s: Creating aligned %s Packet, len=%d\n",
+ __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt)));
+#ifdef DHD_USE_STATIC_BUF
+ if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#else
+ if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#endif /* DHD_USE_STATIC_BUF */
+ sd_err(("%s: PKTGET failed: len %d\n",
+ __FUNCTION__, PKTLEN(sd->osh, pkt)));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write) {
+ bcopy(PKTDATA(sd->osh, pkt),
+ PKTDATA(sd->osh, mypkt),
+ PKTLEN(sd->osh, pkt));
+ }
+
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write) {
+ bcopy(PKTDATA(sd->osh, mypkt),
+ PKTDATA(sd->osh, pkt),
+ PKTLEN(sd->osh, mypkt));
+ }
+#ifdef DHD_USE_STATIC_BUF
+ PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+ PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* DHD_USE_STATIC_BUF */
+ } else { /* case 3: We have a packet and it is aligned. */
+ sd_data(("%s: Aligned %s Packet, direct DMA\n",
+ __FUNCTION__, write ? "Tx" : "Rx"));
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
+ }
+
+ return (Status);
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+ char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+ /* issue abort cmd52 command through F1 */
+ sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp = 0;
+
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ *data = temp;
+ *data &= 0xff;
+ sd_data(("%s: byte read data=0x%02x\n",
+ __FUNCTION__, *data));
+ } else {
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+ if (regsize == 2)
+ *data &= 0xffff;
+
+ sd_data(("%s: word read data=0x%08x\n",
+ __FUNCTION__, *data));
+ }
+
+ return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
+ sd = gInstance->sd;
+
+ ASSERT(sd != NULL);
+ sdio_release_host(gInstance->func[0]);
+
+ if (sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else {
+ sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+
+ sdio_claim_host(gInstance->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+
+ sd = gInstance->sd;
+
+ ASSERT(sd != NULL);
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp;
+
+ temp = data & 0xff;
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ sd_data(("%s: byte write data=0x%02x\n",
+ __FUNCTION__, data));
+ } else {
+ if (regsize == 2)
+ data &= 0xffff;
+
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+ sd_data(("%s: word write data=0x%08x\n",
+ __FUNCTION__, data));
+ }
+
+ return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *si, int stage)
+{
+ int ret;
+ sdioh_info_t *sd = gInstance->sd;
+
+ /* Need to do this stages as we can't enable the interrupt till
+ downloading of the firmware is complete, other wise polling
+ sdio access will come in way
+ */
+ if (gInstance->func[0]) {
+ if (stage == 0) {
+ /* Since the power to the chip is killed, we will have
+ re enumerate the device again. Set the block size
+ and enable the fucntion 1 for in preparation for
+ downloading the code
+ */
+ /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+ 2.6.27. The implementation prior to that is buggy, and needs broadcom's
+ patch for it
+ */
+ if ((ret = sdio_reset_comm(gInstance->func[0]->card)))
+ sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+ else {
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[1]);
+
+ sd->client_block_size[1] = 64;
+ if (sdio_set_block_size(gInstance->func[1], 64)) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(gInstance->func[1]);
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ if (sdio_set_block_size(gInstance->func[2],
+ sd_f2_blocksize)) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+ "blocksize to %d\n", sd_f2_blocksize));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+ }
+ } else {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(gInstance->func[0]);
+ sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_enable_func_intr();
+#endif
+ bcmsdh_oob_intr_set(TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+
+ return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *si)
+{
+ /* MSM7201A Android sdio stack has bug with interrupt
+ So internaly within SDIO stack they are polling
+ which cause issue when device is turned off. So
+ unregister interrupt with SDIO stack to stop the
+ polling
+ */
+ if (gInstance->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(gInstance->func[0]);
+ sdio_release_irq(gInstance->func[1]);
+ sdio_release_irq(gInstance->func[2]);
+ sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_disable_func_intr(0);
+#endif
+ bcmsdh_oob_intr_set(FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+ return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ return (1);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
new file mode 100644
index 000000000000..726b6391353d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,332 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c,v 1.8.6.2 2011-02-01 18:38:36 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <linux/sched.h> /* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+
+#include <bcmsdh_sdmmc.h>
+
+#include <dhd_dbg.h>
+
+#ifdef WL_CFG80211
+extern void wl_cfg80211_set_sdio_func(void *func);
+#endif
+
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern int dhd_os_check_wakelock(void *dhdp);
+extern int dhd_os_check_if_up(void *dhdp);
+extern void *bcmsdh_get_drvdata(void);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+PBCMSDH_SDMMC_INSTANCE gInstance;
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern int bcmsdh_probe(struct device *dev);
+extern int bcmsdh_remove(struct device *dev);
+
+extern volatile bool dhd_mmc_suspend;
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+ static struct sdio_func sdio_func_0;
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_trace(("sdio_device: 0x%04x\n", func->device));
+ sd_trace(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 1) {
+ sdio_func_0.num = 0;
+ sdio_func_0.card = func->card;
+ gInstance->func[0] = &sdio_func_0;
+ if(func->device == 0x4) { /* 4318 */
+ gInstance->func[2] = NULL;
+ sd_trace(("NIC found, calling bcmsdh_probe...\n"));
+ ret = bcmsdh_probe(&func->dev);
+ }
+ }
+
+ gInstance->func[func->num] = func;
+
+ if (func->num == 2) {
+#ifdef WL_CFG80211
+ wl_cfg80211_set_sdio_func(func);
+#endif
+ sd_trace(("F2 found, calling bcmsdh_probe...\n"));
+ ret = bcmsdh_probe(&func->dev);
+ }
+
+ return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 2) {
+ sd_trace(("F2 found, calling bcmsdh_remove...\n"));
+ bcmsdh_remove(&func->dev);
+ } else if (func->num == 1) {
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ gInstance->func[1] = NULL;
+ }
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+
+ if (func->num != 2)
+ return 0;
+ if (dhd_os_check_wakelock(bcmsdh_get_drvdata()))
+ return -EBUSY;
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(0);
+#endif
+ dhd_mmc_suspend = TRUE;
+ smp_mb();
+
+ return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+
+ if (func->num != 2)
+ return 0;
+ dhd_mmc_suspend = FALSE;
+#if defined(OOB_INTR_ONLY)
+ if (dhd_os_check_if_up(bcmsdh_get_drvdata()))
+ bcmsdh_oob_intr_set(1);
+#endif
+ smp_mb();
+ return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+ .suspend = bcmsdh_sdmmc_suspend,
+ .resume = bcmsdh_sdmmc_resume,
+};
+#endif
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+ .probe = bcmsdh_sdmmc_probe,
+ .remove = bcmsdh_sdmmc_remove,
+ .name = "bcmsdh_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+ .drv = {
+ .pm = &bcmsdh_sdmmc_pm_ops,
+ },
+#endif
+};
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+};
+
+
+int
+sdioh_sdmmc_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+ return BCME_OK;
+}
+
+void
+sdioh_sdmmc_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+#if !defined(OOB_INTR_ONLY)
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable) {
+ sdioh_sdmmc_devintr_on(sd);
+ } else {
+ sdioh_sdmmc_devintr_off(sd);
+ }
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+ int error = 0;
+ sdio_function_init();
+ return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+ sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int sdio_function_init(void)
+{
+ int error = 0;
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+
+ gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
+ if (!gInstance)
+ return -ENOMEM;
+
+ error = sdio_register_driver(&bcmsdh_sdmmc_driver);
+
+ return error;
+}
+
+/*
+ * module cleanup
+*/
+extern int bcmsdh_remove(struct device *dev);
+void sdio_function_cleanup(void)
+{
+ sd_trace(("%s Enter\n", __FUNCTION__));
+
+
+ sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+
+ if (gInstance)
+ kfree(gInstance);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c
new file mode 100644
index 000000000000..fbdd7cd2d19b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmutils.c
@@ -0,0 +1,1967 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.c,v 1.277.2.18 2011-01-26 02:32:08 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+void *_bcmutils_dummy_fn = NULL;
+
+#ifdef BCMDRIVER
+
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ if (len < 0)
+ len = 4096; /* "infinite" */
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(PKTDATA(osh, p) + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(buf, PKTDATA(osh, p) + offset, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+ uint total;
+
+ total = 0;
+ for (; p; p = PKTNEXT(osh, p))
+ total += PKTLEN(osh, p);
+ return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+ for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+ ;
+
+ return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+ uint cnt;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p))
+ cnt++;
+
+ return cnt;
+}
+
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = PKTLINK(p);
+ else
+ PKTSETLINK(prev, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+ PKTFREE(osh, p, dir);
+ q->len--;
+ pq->len--;
+ p = (head ? q->head : PKTLINK(prev));
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+
+ if (q->head == NULL) {
+ ASSERT(q->len == 0);
+ q->tail = NULL;
+ }
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (!pktbuf)
+ return FALSE;
+
+ q = &pq->q[prec];
+
+ if (q->head == pktbuf) {
+ if ((q->head = PKTLINK(pktbuf)) == NULL)
+ q->tail = NULL;
+ } else {
+ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+ ;
+ if (p == NULL)
+ return FALSE;
+
+ PKTSETLINK(p, PKTLINK(pktbuf));
+ if (q->tail == pktbuf)
+ q->tail = p;
+ }
+
+ q->len--;
+ pq->len--;
+ PKTSETLINK(pktbuf, NULL);
+ return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+ /* pq is variable size; only zero out what's requested */
+ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ pq->num_prec = (uint16)num_prec;
+
+ pq->max = (uint16)max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+ int prec;
+ for (prec = 0; prec < pq->num_prec; prec++)
+ pktq_pflush(osh, pq, prec, dir, fn, arg);
+ if (fn == NULL)
+ ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+#endif /* BCMDRIVER */
+
+const unsigned char bcm_ctype[] = {
+
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */
+ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+ _BCM_C, /* 8-15 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */
+ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */
+ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */
+ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */
+ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */
+ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+ _BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */
+ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+ _BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
+ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(char *cp, char **endp, uint base)
+{
+ ulong result, last_result = 0, value;
+ bool minus;
+
+ minus = FALSE;
+
+ while (bcm_isspace(*cp))
+ cp++;
+
+ if (cp[0] == '+')
+ cp++;
+ else if (cp[0] == '-') {
+ minus = TRUE;
+ cp++;
+ }
+
+ if (base == 0) {
+ if (cp[0] == '0') {
+ if ((cp[1] == 'x') || (cp[1] == 'X')) {
+ base = 16;
+ cp = &cp[2];
+ } else {
+ base = 8;
+ cp = &cp[1];
+ }
+ } else
+ base = 10;
+ } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+ cp = &cp[2];
+ }
+
+ result = 0;
+
+ while (bcm_isxdigit(*cp) &&
+ (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base)
+ {
+ result = result*base + value;
+ /* Detected overflow */
+ if (result < last_result && !minus)
+ return (ulong)-1;
+ last_result = result;
+ cp++;
+ }
+
+ if (minus)
+ result = (ulong)(-(long)result);
+
+ if (endp)
+ *endp = (char *)cp;
+
+ return (result);
+}
+
+int
+bcm_atoi(char *s)
+{
+ return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char*
+bcmstrstr(char *haystack, char *needle)
+{
+ int len, nlen;
+ int i;
+
+ if ((haystack == NULL) || (needle == NULL))
+ return (haystack);
+
+ nlen = strlen(needle);
+ len = strlen(haystack) - nlen + 1;
+
+ for (i = 0; i < len; i++)
+ if (memcmp(needle, &haystack[i], nlen) == 0)
+ return (&haystack[i]);
+ return (NULL);
+}
+
+char*
+bcmstrcat(char *dest, const char *src)
+{
+ char *p;
+
+ p = dest + strlen(dest);
+
+ while ((*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+char*
+bcmstrncat(char *dest, const char *src, uint size)
+{
+ char *endp;
+ char *p;
+
+ p = dest + strlen(dest);
+ endp = p + size;
+
+ while (p != endp && (*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+
+/****************************************************************************
+* Function: bcmstrtok
+*
+* Purpose:
+* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+* but allows strToken() to be used by different strings or callers at the same
+* time. Each call modifies '*string' by substituting a NULL character for the
+* first delimiter that is encountered, and updates 'string' to point to the char
+* after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+* string (mod) Ptr to string ptr, updated by token.
+* delimiters (in) Set of delimiter characters.
+* tokdelim (out) Character that delimits the returned token. (May
+* be set to NULL if token delimiter is not required).
+*
+* Returns: Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+ unsigned char *str;
+ unsigned long map[8];
+ int count;
+ char *nextoken;
+
+ if (tokdelim != NULL) {
+ /* Prime the token delimiter */
+ *tokdelim = '\0';
+ }
+
+ /* Clear control map */
+ for (count = 0; count < 8; count++) {
+ map[count] = 0;
+ }
+
+ /* Set bits in delimiter table */
+ do {
+ map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+ }
+ while (*delimiters++);
+
+ str = (unsigned char*)*string;
+
+ /* Find beginning of token (skip over leading delimiters). Note that
+ * there is no token iff this loop sets str to point to the terminal
+ * null (*str == '\0')
+ */
+ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+ str++;
+ }
+
+ nextoken = (char*)str;
+
+ /* Find the end of the token. If it is not the end of the string,
+ * put a null there.
+ */
+ for (; *str; str++) {
+ if (map[*str >> 5] & (1 << (*str & 31))) {
+ if (tokdelim != NULL) {
+ *tokdelim = *str;
+ }
+
+ *str++ = '\0';
+ break;
+ }
+ }
+
+ *string = (char*)str;
+
+ /* Determine if a token has been found. */
+ if (nextoken == (char *) str) {
+ return NULL;
+ }
+ else {
+ return nextoken;
+ }
+}
+
+
+#define xToLower(C) \
+ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function: bcmstricmp
+*
+* Purpose: Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+ char dc, sc;
+
+ while (*s2 && *s1) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ }
+
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+
+/****************************************************************************
+* Function: bcmstrnicmp
+*
+* Purpose: Compare to strings case insensitively, upto a max of 'cnt'
+* characters.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+* cnt (in) Max characters to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+ char dc, sc;
+
+ while (*s2 && *s1 && cnt) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ cnt--;
+ }
+
+ if (!cnt) return 0;
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(char *p, struct ether_addr *ea)
+{
+ int i = 0;
+
+ for (;;) {
+ ea->octet[i++] = (char) bcm_strtoul(p, &p, 16);
+ if (!*p++ || i == 6)
+ break;
+ }
+
+ return (i == 6);
+}
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+ ulong copyct = 1;
+ ushort i;
+
+ if (abuflen == 0)
+ return 0;
+
+ /* wbuflen is in bytes */
+ wbuflen /= sizeof(ushort);
+
+ for (i = 0; i < wbuflen; ++i) {
+ if (--abuflen == 0)
+ break;
+ *abuf++ = (char) *wbuf++;
+ ++copyct;
+ }
+ *abuf = '\0';
+
+ return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+ static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x";
+ snprintf(buf, 18, template,
+ ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff,
+ ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff);
+ return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+ snprintf(buf, 16, "%d.%d.%d.%d",
+ ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+ return (buf);
+}
+
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+ uint i;
+
+ for (i = 0; i < ms; i++) {
+ OSL_DELAY(1000);
+ }
+}
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+ void *p;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ for (p = p0; p; p = PKTNEXT(osh, p))
+ prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata;
+ int priority = 0;
+ int rc = 0;
+
+ pktdata = (uint8 *) PKTDATA(NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+ eh = (struct ether_header *) pktdata;
+
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) {
+ uint16 vlan_tag;
+ int vlan_prio, dscp_prio = 0;
+
+ evh = (struct ethervlan_header *)eh;
+
+ vlan_tag = ntoh16(evh->vlan_tag);
+ vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+ if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
+
+ /* DSCP priority gets precedence over 802.1P (vlan tag) */
+ if (dscp_prio != 0) {
+ priority = dscp_prio;
+ rc |= PKTPRIO_VDSCP;
+ } else {
+ priority = vlan_prio;
+ rc |= PKTPRIO_VLAN;
+ }
+ /*
+ * If the DSCP priority is not the same as the VLAN priority,
+ * then overwrite the priority field in the vlan tag, with the
+ * DSCP priority value. This is required for Linux APs because
+ * the VLAN driver on Linux, overwrites the skb->priority field
+ * with the priority value in the vlan tag
+ */
+ if (update_vtag && (priority != vlan_prio)) {
+ vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+ vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+ evh->vlan_tag = hton16(vlan_tag);
+ rc |= PKTPRIO_UPD;
+ }
+ } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ether_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ rc |= PKTPRIO_DSCP;
+ }
+
+ ASSERT(priority >= 0 && priority <= MAXPRIO);
+ PKTSETPRIO(pkt, priority);
+ return (rc | priority);
+}
+
+#ifndef BCM_BOOTLOADER
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings */
+const char *
+bcmerrorstr(int bcmerror)
+{
+ /* check if someone added a bcmerror code but forgot to add errorstring */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+ if (bcmerror > 0 || bcmerror < BCME_LAST) {
+ snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+ return bcm_undeferrstr;
+ }
+
+ ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+ return bcmerrorstrtable[-bcmerror];
+}
+
+#endif /* !BCM_BOOTLOADER */
+
+
+
+/* iovar table lookup */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+ const bcm_iovar_t *vi;
+ const char *lookup_name;
+
+ /* skip any ':' delimited option prefixes */
+ lookup_name = strrchr(name, ':');
+ if (lookup_name != NULL)
+ lookup_name++;
+ else
+ lookup_name = name;
+
+ ASSERT(table != NULL);
+
+ for (vi = table; vi->name; vi++) {
+ if (!strcmp(vi->name, lookup_name))
+ return vi;
+ }
+ /* ran to end of table */
+
+ return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+
+ /* length check on io buf */
+ switch (vi->type) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_INT16:
+ case IOVT_INT32:
+ case IOVT_UINT8:
+ case IOVT_UINT16:
+ case IOVT_UINT32:
+ /* all integers are int32 sized args at the ioctl interface */
+ if (len < (int)sizeof(int)) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_BUFFER:
+ /* buffer must meet minimum length requirement */
+ if (len < vi->minlen) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_VOID:
+ if (!set) {
+ /* Cannot return nil... */
+ bcmerror = BCME_UNSUPPORTED;
+ } else if (len) {
+ /* Set is an action w/o parameters */
+ bcmerror = BCME_BUFTOOLONG;
+ }
+ break;
+
+ default:
+ /* unknown type for length check in iovar info */
+ ASSERT(0);
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+#endif /* BCMDRIVER */
+
+
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ * x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint8 crc /* either CRC8_INIT_VALUE or previous return value */
+)
+{
+ /* hard code the crc loop instead of using CRC_INNER_LOOP macro
+ * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+ */
+ while (nbytes-- > 0)
+ crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+ return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ * x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint16 crc /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+ while (nbytes-- > 0)
+ CRC_INNER_LOOP(16, crc, *pdata++);
+ return crc;
+}
+
+static const uint32 crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
+{
+ uint8 *pend;
+#ifdef __mips__
+ uint8 tmp[4];
+ ulong *tptr = (ulong *)tmp;
+
+ /* in case the beginning of the buffer isn't aligned */
+ pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc);
+ nbytes -= (pend - pdata);
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+
+ /* handle bulk of data as 32-bit words */
+ pend = pdata + (nbytes & 0xfffffffc);
+ while (pdata < pend) {
+ *tptr = *(ulong *)pdata;
+ pdata += sizeof(ulong *);
+ CRC_INNER_LOOP(32, crc, tmp[0]);
+ CRC_INNER_LOOP(32, crc, tmp[1]);
+ CRC_INNER_LOOP(32, crc, tmp[2]);
+ CRC_INNER_LOOP(32, crc, tmp[3]);
+ }
+
+ /* 1-3 bytes at end of buffer */
+ pend = pdata + (nbytes & 0x03);
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+#else
+ pend = pdata + nbytes;
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+#endif /* __mips__ */
+
+ return crc;
+}
+
+#ifdef notdef
+#define CLEN 1499 /* CRC Length */
+#define CBUFSIZ (CLEN+4)
+#define CNBUFS 5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+ uint j, k, l;
+ uint8 *buf;
+ uint len[CNBUFS];
+ uint32 crcr;
+ uint32 crc32tv[CNBUFS] =
+ {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+ ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+ /* step through all possible alignments */
+ for (l = 0; l <= 4; l++) {
+ for (j = 0; j < CNBUFS; j++) {
+ len[j] = CLEN;
+ for (k = 0; k < len[j]; k++)
+ *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+ }
+
+ for (j = 0; j < CNBUFS; j++) {
+ crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+ ASSERT(crcr == crc32tv[j]);
+ }
+ }
+
+ MFREE(buf, CBUFSIZ*CNBUFS);
+ return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+ int len;
+
+ /* validate current elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ /* advance to next elt */
+ len = elt->len;
+ elt = (bcm_tlv_t*)(elt->data + len);
+ *buflen -= (2 + len);
+
+ /* validate next elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + 2)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+ totlen -= (len + 2);
+ }
+
+ return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag. Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ uint id = elt->id;
+ int len = elt->len;
+
+ /* Punt if we start seeing IDs > than target key */
+ if (id > key)
+ return (NULL);
+
+ /* validate remaining totlen */
+ if ((id == key) && (totlen >= (len + 2)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+ totlen -= (len + 2);
+ }
+ return NULL;
+}
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+ defined(DHD_DEBUG)
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+ int i;
+ char* p = buf;
+ char hexstr[16];
+ int slen = 0, nlen = 0;
+ uint32 bit;
+ const char* name;
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags != 0) {
+ /* print any unnamed bits */
+ snprintf(hexstr, 16, "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0)
+ continue;
+ flags &= ~bit;
+ nlen = strlen(name);
+ slen += nlen;
+ /* count btwn flag space */
+ if (flags != 0)
+ slen += 1;
+ /* need NULL char as well */
+ if (len <= slen)
+ break;
+ /* copy NULL char but don't count it */
+ strncpy(p, name, nlen + 1);
+ p += nlen;
+ /* copy btwn flag space and NULL char */
+ if (flags != 0)
+ p += snprintf(p, 2, " ");
+ len -= slen;
+ }
+
+ /* indicate the str was too short */
+ if (flags != 0) {
+ if (len < 2)
+ p -= 2 - len; /* overwrite last char */
+ p += snprintf(p, 2, ">");
+ }
+
+ return (int)(p - buf);
+}
+#endif
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+ defined(DHD_DEBUG) || defined(WLMEDIA_PEAKRATE)
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+ int i;
+ char *p = str;
+ const uint8 *src = (const uint8*)bytes;
+
+ for (i = 0; i < len; i++) {
+ p += snprintf(p, 3, "%02X", *src);
+ src++;
+ }
+ return (int)(p - str);
+}
+#endif
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+ char line[128], *p;
+ int len = sizeof(line);
+ int nchar;
+ uint i;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ nchar = snprintf(p, len, " %04d: ", i); /* line prefix */
+ p += nchar;
+ len -= nchar;
+ }
+ if (len > 0) {
+ nchar = snprintf(p, len, "%02x ", buf[i]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ if (i % 16 == 15) {
+ printf("%s\n", line); /* flush line */
+ p = line;
+ len = sizeof(line);
+ }
+ }
+
+ /* flush last partial line */
+ if (p != line)
+ printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+ "NONE",
+ "WEP1",
+ "TKIP",
+ "WEP128",
+ "AES_CCM",
+ "AES_OCB_MSDU",
+ "AES_OCB_MPDU",
+ "NALG"
+ "UNDEF",
+ "UNDEF",
+ "UNDEF",
+ "UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+ return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+ if (brev < 0x100)
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ else
+ snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+ return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+ uint len, max_len;
+ char c;
+
+ len = strlen(buf);
+
+ max_len = BUFSIZE_TODUMP_ATONCE;
+
+ while (len > max_len) {
+ c = buf[max_len];
+ buf[max_len] = '\0';
+ printf("%s", buf);
+ buf[max_len] = c;
+
+ buf += max_len;
+ len -= max_len;
+ }
+ /* print the remaining string */
+ printf("%s\n", buf);
+ return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+ char *buf, uint32 bufsize)
+{
+ uint filled_len;
+ int len;
+ struct fielddesc *cur_ptr;
+
+ filled_len = 0;
+ cur_ptr = fielddesc_array;
+
+ while (bufsize > 1) {
+ if (cur_ptr->nameandfmt == NULL)
+ break;
+ len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ /* check for snprintf overflow or error */
+ if (len < 0 || (uint32)len >= bufsize)
+ len = bufsize - 1;
+ buf += len;
+ bufsize -= len;
+ filled_len += len;
+ cur_ptr++;
+ }
+ return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+ uint len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ strncpy(buf, name, buflen);
+
+ /* append data onto the end of the name string */
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+
+ return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
+
+ if (idx >= QDBM_TABLE_LEN) {
+ /* clamp to max uint16 mW value */
+ return 0xFFFF;
+ }
+
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+ uint8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
+
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+ nqdBm_to_mW_map[qdbm])/2;
+ if (mw_uint < boundary)
+ break;
+ }
+
+ qdbm += (uint8)offset;
+
+ return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+ uint bitcount = 0, i;
+ uint8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
+ }
+ return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+ b->origsize = b->size = size;
+ b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ if ((r == -1) || (r >= (int)b->size) || (r == 0)) {
+ b->size = 0;
+ } else {
+ b->size -= r;
+ b->buf += r;
+ }
+
+ va_end(ap);
+
+ return r;
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+ int i;
+
+ for (i = 0; i < num_bytes; i++) {
+ num[i] += amount;
+ if (num[i] >= amount)
+ break;
+ amount = 1;
+ }
+}
+
+int
+bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes)
+{
+ int i;
+
+ for (i = nbytes - 1; i >= 0; i--) {
+ if (arg1[i] != arg2[i])
+ return (arg1[i] - arg2[i]);
+ }
+ return 0;
+}
+
+void
+bcm_print_bytes(char *name, const uchar *data, int len)
+{
+ int i;
+ int per_line = 0;
+
+ printf("%s: %d \n", name ? name : "", len);
+ for (i = 0; i < len; i++) {
+ printf("%02x ", *data++);
+ per_line++;
+ if (per_line == 16) {
+ per_line = 0;
+ printf("\n");
+ }
+ }
+ printf("\n");
+}
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+ uint i, c;
+ char *p = buf;
+ char *endp = buf + SSID_FMT_BUF_LEN;
+
+ if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+ for (i = 0; i < ssid_len; i++) {
+ c = (uint)ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (bcm_isprint((uchar)c)) {
+ *p++ = (char)c;
+ } else {
+ p += snprintf(p, (endp - p), "\\x%02X", c);
+ }
+ }
+ *p = '\0';
+ ASSERT(p < endp);
+
+ return (int)(p - buf);
+}
+#endif
+
+#endif /* BCMDRIVER */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+ char *dp;
+ bool findNewline;
+ int column;
+ unsigned int buf_len, n;
+ unsigned int pad = 0;
+
+ dp = varbuf;
+
+ findNewline = FALSE;
+ column = 0;
+
+ for (n = 0; n < len; n++) {
+ if (varbuf[n] == '\r')
+ continue;
+ if (findNewline && varbuf[n] != '\n')
+ continue;
+ findNewline = FALSE;
+ if (varbuf[n] == '#') {
+ findNewline = TRUE;
+ continue;
+ }
+ if (varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ *dp++ = 0;
+ column = 0;
+ continue;
+ }
+ *dp++ = varbuf[n];
+ column++;
+ }
+ buf_len = (unsigned int)(dp - varbuf);
+ if (buf_len % 4) {
+ pad = 4 - buf_len % 4;
+ if (pad && (buf_len + pad <= len)) {
+ buf_len += pad;
+ }
+ }
+
+ while (dp < varbuf + n)
+ *dp++ = 0;
+
+ return buf_len;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi.c b/drivers/net/wireless/bcmdhd/bcmwifi.c
new file mode 100644
index 000000000000..70722170bdfd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi.c
@@ -0,0 +1,274 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi.c,v 1.31.8.1 2010-08-03 17:47:05 Exp $
+ */
+
+
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif
+#include <bcmwifi.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h>
+#endif
+
+
+
+
+
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+ const char *band, *bw, *sb;
+ uint channel;
+
+ band = "";
+ bw = "";
+ sb = "";
+ channel = CHSPEC_CHANNEL(chspec);
+
+ if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) ||
+ (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL))
+ band = (CHSPEC_IS2G(chspec)) ? "b" : "a";
+ if (CHSPEC_IS40(chspec)) {
+ if (CHSPEC_SB_UPPER(chspec)) {
+ sb = "u";
+ channel += CH_10MHZ_APART;
+ } else {
+ sb = "l";
+ channel -= CH_10MHZ_APART;
+ }
+ } else if (CHSPEC_IS10(chspec)) {
+ bw = "n";
+ }
+
+
+ snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb);
+ return (buf);
+}
+
+
+chanspec_t
+wf_chspec_aton(char *a)
+{
+ char *endp = NULL;
+ uint channel, band, bw, ctl_sb;
+ char c;
+
+ channel = strtoul(a, &endp, 10);
+
+
+ if (endp == a)
+ return 0;
+
+ if (channel > MAXCHANNEL)
+ return 0;
+
+ band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+
+ a = endp;
+
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+
+
+ if (c == 'a' || c == 'b') {
+ band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G;
+ a++;
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+ }
+
+
+ if (c == 'n') {
+ bw = WL_CHANSPEC_BW_10;
+ } else if (c == 'l') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_LOWER;
+
+ if (channel <= (MAXCHANNEL - CH_20MHZ_APART))
+ channel += CH_10MHZ_APART;
+ else
+ return 0;
+ } else if (c == 'u') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_UPPER;
+
+ if (channel > CH_20MHZ_APART)
+ channel -= CH_10MHZ_APART;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+
+done:
+ return (channel | band | bw | ctl_sb);
+}
+
+
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+
+ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
+ return TRUE;
+
+ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
+ return TRUE;
+
+
+ if (CHSPEC_IS20_UNCOND(chanspec)) {
+ if (!CHSPEC_SB_NONE(chanspec))
+ return TRUE;
+ } else {
+ if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+ uint8 ctl_chan;
+
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return CHSPEC_CHANNEL(chspec);
+ } else {
+
+ ASSERT(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_40);
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+
+ ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ ASSERT(CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_LOWER);
+
+ ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ }
+ }
+
+ return ctl_chan;
+}
+
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+ chanspec_t ctl_chspec = 0;
+ uint8 channel;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return chspec;
+ } else {
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+ channel = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ channel = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ }
+ ctl_chspec = channel | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+ ctl_chspec |= CHSPEC_BAND(chspec);
+ }
+ return ctl_chspec;
+}
+
+
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+ int ch = -1;
+ uint base;
+ int offset;
+
+
+ if (start_factor == 0) {
+ if (freq >= 2400 && freq <= 2500)
+ start_factor = WF_CHAN_FACTOR_2_4_G;
+ else if (freq >= 5000 && freq <= 6000)
+ start_factor = WF_CHAN_FACTOR_5_G;
+ }
+
+ if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+ return 14;
+
+ base = start_factor / 2;
+
+
+ if ((freq < base) || (freq > base + 1000))
+ return -1;
+
+ offset = freq - base;
+ ch = offset / 5;
+
+
+ if (offset != (ch * 5))
+ return -1;
+
+
+ if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+ return -1;
+
+ return ch;
+}
+
+
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+ int freq;
+
+ if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+ (ch > 200))
+ freq = -1;
+ else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+ freq = 2484;
+ else
+ freq = ch * 5 + start_factor / 2;
+
+ return freq;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h
new file mode 100644
index 000000000000..18d48d846d08
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd.h
@@ -0,0 +1,733 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd.h 290844 2011-10-20 08:54:39Z $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#if defined(CHROMIUMOS_COMPAT_WIRELESS)
+#include <linux/sched.h>
+#endif
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+/* The kernel threading is sdio-specific */
+struct task_struct;
+struct sched_param;
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+
+#define ALL_INTERFACES 0xff
+
+#include <wlioctl.h>
+
+
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+struct dhd_cmn;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+ DHD_BUS_DOWN, /* Not ready for frame transfers */
+ DHD_BUS_LOAD, /* Download access only (CPU reset) */
+ DHD_BUS_DATA /* Ready for frame transfers */
+};
+
+/* Firmware requested operation mode */
+#define STA_MASK 0x0001
+#define HOSTAPD_MASK 0x0002
+#define WFD_MASK 0x0004
+#define SOFTAP_FW_MASK 0x0008
+
+/* max sequential rxcntl timeouts to set HANG event */
+#define MAX_CNTL_TIMEOUT 2
+
+enum dhd_bus_wake_state {
+ WAKE_LOCK_OFF,
+ WAKE_LOCK_PRIV,
+ WAKE_LOCK_DPC,
+ WAKE_LOCK_IOCTL,
+ WAKE_LOCK_DOWNLOAD,
+ WAKE_LOCK_TMOUT,
+ WAKE_LOCK_WATCHDOG,
+ WAKE_LOCK_LINK_DOWN_TMOUT,
+ WAKE_LOCK_PNO_FIND_TMOUT,
+ WAKE_LOCK_SOFTAP_SET,
+ WAKE_LOCK_SOFTAP_STOP,
+ WAKE_LOCK_SOFTAP_START,
+ WAKE_LOCK_SOFTAP_THREAD,
+ WAKE_LOCK_MAX
+};
+
+enum dhd_prealloc_index {
+ DHD_PREALLOC_PROT = 0,
+ DHD_PREALLOC_RXBUF,
+ DHD_PREALLOC_DATABUF,
+ DHD_PREALLOC_OSL_BUF
+};
+
+typedef enum {
+ DHD_IF_NONE = 0,
+ DHD_IF_ADD,
+ DHD_IF_DEL,
+ DHD_IF_CHANGE,
+ DHD_IF_DELETING
+} dhd_if_state_t;
+
+
+#if defined(DHD_USE_STATIC_BUF)
+
+uint8* dhd_os_prealloc(void *osh, int section, uint size);
+void dhd_os_prefree(void *osh, void *addr, uint size);
+#define DHD_OS_PREALLOC(osh, section, size) dhd_os_prealloc(osh, section, size)
+#define DHD_OS_PREFREE(osh, addr, size) dhd_os_prefree(osh, addr, size)
+
+#else
+
+#define DHD_OS_PREALLOC(osh, section, size) MALLOC(osh, size)
+#define DHD_OS_PREFREE(osh, addr, size) MFREE(osh, addr, size)
+
+#endif /* defined(DHD_USE_STATIC_BUF) */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+ /* Linkage ponters */
+ osl_t *osh; /* OSL handle */
+ struct dhd_bus *bus; /* Bus module handle */
+ struct dhd_prot *prot; /* Protocol module handle */
+ struct dhd_info *info; /* Info module handle */
+ struct dhd_cmn *cmn; /* dhd_common module handle */
+
+ /* Internal dhd items */
+ bool up; /* Driver up/down (to OS) */
+ bool txoff; /* Transmit flow-controlled */
+ bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */
+ enum dhd_bus_state busstate;
+ uint hdrlen; /* Total DHD header length (proto + bus) */
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ uint rxsz; /* Rx buffer size bus module should use */
+ uint8 wme_dp; /* wme discard priority */
+
+ /* Dongle media info */
+ bool iswl; /* Dongle-resident driver is wl */
+ ulong drv_version; /* Version of dongle-resident driver */
+ struct ether_addr mac; /* MAC address obtained from dongle */
+ dngl_stats_t dstats; /* Stats for dongle-based data */
+
+ /* Additional stats for the bus level */
+ ulong tx_packets; /* Data packets sent to dongle */
+ ulong tx_multicast; /* Multicast data packets sent to dongle */
+ ulong tx_errors; /* Errors in sending data to dongle */
+ ulong tx_ctlpkts; /* Control packets sent to dongle */
+ ulong tx_ctlerrs; /* Errors sending control frames to dongle */
+ ulong rx_packets; /* Packets sent up the network interface */
+ ulong rx_multicast; /* Multicast packets sent up the network interface */
+ ulong rx_errors; /* Errors processing rx data packets */
+ ulong rx_ctlpkts; /* Control frames processed from dongle */
+ ulong rx_ctlerrs; /* Errors in processing rx control frames */
+ ulong rx_dropped; /* Packets dropped locally (no memory) */
+ ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */
+ ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */
+
+ ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */
+ ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */
+ ulong fc_packets; /* Number of flow control pkts recvd */
+
+ /* Last error return */
+ int bcmerror;
+ uint tickcnt;
+
+ /* Last error from dongle */
+ int dongle_error;
+
+ /* Suspend disable flag and "in suspend" flag */
+ int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+ int in_suspend; /* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+ int pno_enable; /* pno status : "1" is pno enable */
+#endif /* PNO_SUPPORT */
+ int dtim_skip; /* dtim skip , default 0 means wake each dtim */
+
+ /* Pkt filter defination */
+ char * pktfilter[100];
+ int pktfilter_count;
+
+ wl_country_t dhd_cspec; /* Current Locale info */
+ char eventmask[WL_EVENTING_MASK_LEN];
+ int op_mode; /* STA, HostAPD, WFD, SoftAP */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+ struct wake_lock wakelock[WAKE_LOCK_MAX];
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
+ struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
+#endif
+
+ uint16 maxdatablks;
+#ifdef PROP_TXSTATUS
+ int wlfc_enabled;
+ void* wlfc_state;
+#endif
+ bool dongle_isolation;
+ int hang_was_sent;
+ int rxcnt_timeout; /* counter rxcnt timeout to send HANG */
+ int txcnt_timeout; /* counter txcnt timeout to send HANG */
+#ifdef WLMEDIA_HTSF
+ uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
+} dhd_pub_t;
+
+typedef struct dhd_cmn {
+ osl_t *osh; /* OSL handle */
+ dhd_pub_t *dhd;
+} dhd_cmn_t;
+
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+ #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define _DHD_PM_RESUME_WAIT(a, b) do {\
+ int retry = 0; \
+ SMP_RD_BARRIER_DEPENDS(); \
+ while (dhd_mmc_suspend && retry++ != b) { \
+ SMP_RD_BARRIER_DEPENDS(); \
+ wait_event_interruptible_timeout(a, !dhd_mmc_suspend, HZ/100); \
+ } \
+ } while (0)
+ #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0)
+ #define DHD_PM_RESUME_RETURN_ERROR(a) do { if (dhd_mmc_suspend) return a; } while (0)
+ #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0)
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9999; \
+ while ((exp) && (countdown >= 10000)) { \
+ wait_event_interruptible_timeout(a, FALSE, HZ/100); \
+ countdown -= 10000; \
+ } \
+ } while (0)
+
+ #else
+
+ #define DHD_PM_RESUME_WAIT_INIT(a)
+ #define DHD_PM_RESUME_WAIT(a)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a)
+ #define DHD_PM_RESUME_RETURN_ERROR(a)
+ #define DHD_PM_RESUME_RETURN
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a)
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) { \
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+ } while (0)
+
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+#ifndef DHDTHREAD
+#undef SPINWAIT_SLEEP
+#define SPINWAIT_SLEEP(a, exp, us) SPINWAIT(exp, us)
+#endif /* DHDTHREAD */
+#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub);
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/* Wakelock Functions */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub, int val);
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub)
+#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(pub, val) dhd_os_wake_lock_timeout_enable(pub, val)
+
+#define DHD_PACKET_TIMEOUT 1
+#define DHD_EVENT_TIMEOUT 2
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+
+typedef struct dhd_if_event {
+ uint8 ifidx;
+ uint8 action;
+ uint8 flags;
+ uint8 bssidx;
+ uint8 is_AP;
+} dhd_if_event_t;
+
+typedef enum dhd_attach_states
+{
+ DHD_ATTACH_STATE_INIT = 0x0,
+ DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+ DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+ DHD_ATTACH_STATE_ADD_IF = 0x4,
+ DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+ DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+ DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+ DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+ DHD_ATTACH_STATE_CFG80211 = 0x80,
+ DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+ DHD_ATTACH_STATE_DONE = 0x200
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID -1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID -2
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* To allow osl_attach/detach calls from os-independent modules */
+osl_t *dhd_osl_attach(void *pdev, uint bustype);
+void dhd_osl_detach(osl_t *osh);
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS. Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void * dhd_os_open_image(char * filename);
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern int dhd_custom_get_mac_address(unsigned char *buf);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_pno_get_status(dhd_pub_t *dhd);
+extern int dhd_dev_pno_reset(struct net_device *dev);
+extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local,
+ int nssid, ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled);
+extern int dhd_dev_get_pno_status(struct net_device *dev);
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+extern bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd);
+extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
+
+#define DHD_UNICAST_FILTER_NUM 0
+#define DHD_BROADCAST_FILTER_NUM 1
+#define DHD_MULTICAST4_FILTER_NUM 2
+#define DHD_MULTICAST6_FILTER_NUM 3
+extern int net_os_set_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+#if defined(OOB_INTR_ONLY)
+extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr);
+#endif /* defined(OOB_INTR_ONLY) */
+extern void dhd_os_sdtxlock(dhd_pub_t * pub);
+extern void dhd_os_sdtxunlock(dhd_pub_t * pub);
+
+typedef struct {
+ uint32 limit; /* Expiration time (usec) */
+ uint32 increment; /* Current expiration increment (usec) */
+ uint32 elapsed; /* Current elapsed time (usec) */
+ uint32 tick; /* O/S tick time (usec) */
+} dhd_timeout_t;
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
+extern struct net_device * dhd_idx2net(struct dhd_pub *dhd_pub, int ifidx);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata,
+ wl_event_msg_t *, void **data_ptr);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+ int ifindex);
+
+extern struct dhd_cmn *dhd_common_init(uint16 devid, osl_t *osh);
+extern void dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn);
+
+extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle,
+ char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx);
+extern void dhd_del_if(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf);
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+extern void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
+extern void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+
+typedef enum cust_gpio_modes {
+ WLAN_RESET_ON,
+ WLAN_RESET_OFF,
+ WLAN_POWER_ON,
+ WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+extern uint wl_msg_level;
+#endif /* defined(DHD_DEBUG) */
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/* Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#define DHD_IDLETIME_TICKS 1
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR "null_pkt"
+
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN 2048
+extern char fw_path[MOD_PARAM_PATHLEN];
+extern char nv_path[MOD_PARAM_PATHLEN];
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS 16
+#define DHD_DEL_IF -0xe
+#define DHD_BAD_IF -0xf
+
+#ifdef PROP_TXSTATUS
+/* Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+ /*
+ b[11 ] - 1 = this packet was sent in response to one time packet request,
+ do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+ b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+ b[9 ] - 1 = packet is host->firmware (transmit direction)
+ - 0 = packet received from firmware (firmware->host)
+ b[8 ] - 1 = packet was sent due to credit_request (pspoll),
+ packet does not count against FIFO credit.
+ - 0 = normal transaction, packet counts against FIFO credit
+ b[7 ] - 1 = AP, 0 = STA
+ b[6:4] - AC FIFO number
+ b[3:0] - interface index
+ */
+ uint16 if_flags;
+ /* destination MAC address for this packet so that not every
+ module needs to open the packet to find this
+ */
+ uint8 dstn_ether[ETHER_ADDR_LEN];
+ /*
+ This 32-bit goes from host to device for every packet.
+ */
+ uint32 htod_tag;
+ /* bus specific stuff */
+ union {
+ struct {
+ void* stuff;
+ uint32 thing1;
+ uint32 thing2;
+ } sd;
+ struct {
+ void* bus;
+ void* urb;
+ } usb;
+ } bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_IFMASK 0xf
+#define DHD_PKTTAG_IFTYPE_MASK 0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT 7
+#define DHD_PKTTAG_FIFO_MASK 0x7
+#define DHD_PKTTAG_FIFO_SHIFT 4
+
+#define DHD_PKTTAG_SIGNALONLY_MASK 0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT 10
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11
+
+#define DHD_PKTTAG_PKTDIR_MASK 0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT 9
+
+#define DHD_PKTTAG_CREDITCHECK_MASK 0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT 8
+
+#define DHD_PKTTAG_INVALID_FIFOID 0x7
+
+#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+ (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~DHD_PKTTAG_IFMASK) | ((if) & DHD_PKTTAG_IFMASK)
+#define DHD_PKTTAG_IF(tag) (((dhd_pkttag_t*)(tag))->if_flags & DHD_PKTTAG_IFMASK)
+
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+ (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+ (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+ (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+ (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+ (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+ (dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+int dhd_wlfc_enable(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(struct dhd_info *, uint8 action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data);
+int dhd_wlfc_event(struct dhd_info *dhd);
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0)
+#endif
+
+#endif /* PROP_TXSTATUS */
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+#define MAX_IPV4_ENTRIES 8
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c
new file mode 100644
index 000000000000..6b782ea4a4d2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.c
@@ -0,0 +1,335 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.c,v 1.10.4.2 2010-12-22 23:47:23 Exp $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmcdc.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/802.11.h>
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhdioctl.h>
+#include <dhd_dbg.h>
+
+#include <dhd_bta.h>
+
+
+#ifdef SEND_HCI_CMD_VIA_IOCTL
+#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE
+
+/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+ uint8 buf[BTA_HCI_CMD_MAX_LEN + 16];
+ uint len = sizeof(buf);
+ wl_ioctl_t ioc;
+
+ if (cmd_len < HCI_CMD_PREAMBLE_SIZE)
+ return BCME_BADLEN;
+
+ if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len)
+ return BCME_BADLEN;
+
+ len = bcm_mkiovar("HCI_cmd",
+ (char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len);
+
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = TRUE;
+
+ return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len);
+}
+#else /* !SEND_HCI_CMD_VIA_IOCTL */
+
+static void
+dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh)
+{
+ int prec;
+ struct pktq *q;
+ uint count = 0;
+
+ q = dhd_bus_txq(pub->bus);
+ if (q == NULL)
+ return;
+
+ DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh));
+
+ dhd_os_sdlock_txq(pub);
+
+ /* Walk through the txq and toss all HCI ACL data packets */
+ PKTQ_PREC_ITER(q, prec) {
+ void *head_pkt = NULL;
+
+ while (pktq_ppeek(q, prec) != head_pkt) {
+ void *pkt = pktq_pdeq(q, prec);
+ int ifidx;
+
+ PKTPULL(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
+ dhd_prot_hdrpull(pub, &ifidx, pkt);
+
+ if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) {
+ struct ether_header *eh =
+ (struct ether_header *)PKTDATA(pub->osh, pkt);
+
+ if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+ struct dot11_llc_snap_header *lsh =
+ (struct dot11_llc_snap_header *)&eh[1];
+
+ if (bcmp(lsh, BT_SIG_SNAP_MPROT,
+ DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+ amp_hci_ACL_data_t *ACL_data =
+ (amp_hci_ACL_data_t *)&lsh[1];
+ uint16 handle = ltoh16(ACL_data->handle);
+
+ if (HCI_ACL_DATA_HANDLE(handle) == llh) {
+ PKTFREE(pub->osh, pkt, TRUE);
+ count ++;
+ continue;
+ }
+ }
+ }
+ }
+
+ dhd_prot_hdrpush(pub, ifidx, pkt);
+ PKTPUSH(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
+
+ if (head_pkt == NULL)
+ head_pkt = pkt;
+ pktq_penq(q, prec, pkt);
+ }
+ }
+
+ dhd_os_sdunlock_txq(pub);
+
+ DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh));
+}
+
+/* Handle HCI cmd locally.
+ * Return 0: continue to send the cmd across SDIO
+ * < 0: stop, fail
+ * > 0: stop, succuess
+ */
+static int
+_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd)
+{
+ int status = 0;
+
+ switch (ltoh16_ua((uint8 *)&cmd->opcode)) {
+ case HCI_Enhanced_Flush: {
+ eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms;
+ dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh));
+ break;
+ }
+ default:
+ break;
+ }
+
+ return status;
+}
+
+/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+ osl_t *osh = pub->osh;
+ uint len;
+ void *p;
+ int status;
+
+ if (cmd_len < HCI_CMD_PREAMBLE_SIZE) {
+ DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len));
+ return BCME_BADLEN;
+ }
+
+ if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) {
+ DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n",
+ len, cmd_len));
+ /* return BCME_BADLEN; */
+ }
+
+ p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+ if (p == NULL) {
+ DHD_ERROR(("dhd_bta_docmd: out of memory\n"));
+ return BCME_NOMEM;
+ }
+
+
+ /* intercept and handle the HCI cmd locally */
+ if ((status = _dhd_bta_docmd(pub, cmd)) > 0)
+ return 0;
+ else if (status < 0)
+ return status;
+
+ /* copy in HCI cmd */
+ PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+ bcopy(cmd, PKTDATA(osh, p), len);
+
+ /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+ PKTPUSH(osh, p, RFC1042_HDR_LEN);
+ eh = (struct ether_header *)PKTDATA(osh, p);
+ bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+ ETHER_SET_LOCALADDR(eh->ether_dhost);
+ bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+ eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+ bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+ lsh->type = 0;
+
+ return dhd_sendpkt(pub, 0, p);
+}
+#endif /* !SEND_HCI_CMD_VIA_IOCTL */
+
+/* Send HCI ACL data to dongle via data channel */
+int
+dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len)
+{
+ amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf;
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+ osl_t *osh = pub->osh;
+ uint len;
+ void *p;
+
+ if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len));
+ return BCME_BADLEN;
+ }
+
+ if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n",
+ len, data_len));
+ /* return BCME_BADLEN; */
+ }
+
+ p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+ if (p == NULL) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n"));
+ return BCME_NOMEM;
+ }
+
+
+ /* copy in HCI ACL data header and HCI ACL data */
+ PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+ bcopy(data, PKTDATA(osh, p), len);
+
+ /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+ PKTPUSH(osh, p, RFC1042_HDR_LEN);
+ eh = (struct ether_header *)PKTDATA(osh, p);
+ bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+ eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+ bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+ lsh->type = HTON16(BTA_PROT_L2CAP);
+
+ return dhd_sendpkt(pub, 0, p);
+}
+
+/* txcomplete callback */
+void
+dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp);
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN);
+ uint16 handle = ltoh16(ACL_data->handle);
+ uint16 llh = HCI_ACL_DATA_HANDLE(handle);
+
+ wl_event_msg_t event;
+ uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)];
+ amp_hci_event_t *evt;
+ num_completed_data_blocks_evt_parms_t *parms;
+
+ uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t);
+
+ /* update the event struct */
+ memset(&event, 0, sizeof(event));
+ event.version = hton16(BCM_EVENT_MSG_VERSION);
+ event.event_type = hton32(WLC_E_BTA_HCI_EVENT);
+ event.status = 0;
+ event.reason = 0;
+ event.auth_type = 0;
+ event.datalen = hton32(len);
+ event.flags = 0;
+
+ /* generate Number of Completed Blocks event */
+ evt = (amp_hci_event_t *)data;
+ evt->ecode = HCI_Number_of_Completed_Data_Blocks;
+ evt->plen = sizeof(num_completed_data_blocks_evt_parms_t);
+
+ parms = (num_completed_data_blocks_evt_parms_t *)evt->parms;
+ htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks);
+ parms->num_handles = 1;
+ htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle);
+ parms->completed[0].pkts = 1;
+ parms->completed[0].blocks = 1;
+
+ dhd_sendup_event_common(dhdp, &event, data);
+}
+
+/* event callback */
+void
+dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len)
+{
+ amp_hci_event_t *evt = (amp_hci_event_t *)data_buf;
+
+ switch (evt->ecode) {
+ case HCI_Command_Complete: {
+ cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms;
+ switch (ltoh16_ua((uint8 *)&parms->opcode)) {
+ case HCI_Read_Data_Block_Size: {
+ read_data_block_size_evt_parms_t *parms2 =
+ (read_data_block_size_evt_parms_t *)parms->parms;
+ dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num);
+ break;
+ }
+ }
+ break;
+ }
+
+ case HCI_Flush_Occurred: {
+ flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms;
+ dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle));
+ break;
+ }
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h
new file mode 100644
index 000000000000..07d9cebb883a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.h
@@ -0,0 +1,39 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.h,v 1.2 2009-02-26 22:35:56 Exp $
+ */
+#ifndef __dhd_bta_h__
+#define __dhd_bta_h__
+
+struct dhd_pub;
+
+extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len);
+
+extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len);
+
+extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len);
+extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success);
+
+
+#endif /* __dhd_bta_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h
new file mode 100644
index 000000000000..bccb8b6603f8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bus.h
@@ -0,0 +1,99 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bus.h,v 1.14.28.1 2010-12-23 01:13:17 Exp $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Get the Bus Idle Time */
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime);
+
+/* Set the Bus Idle Time*/
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+/* Send a data frame to the dongle. Callee disposes of txp. */
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+extern void dhd_disable_intr(dhd_pub_t *dhd);
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* defined(DHD_DEBUG) */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c
new file mode 100644
index 000000000000..3a4de96c0028
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c
@@ -0,0 +1,2530 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_cdc.c,v 1.51.6.31 2011-02-09 14:31:43 Exp $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN (16+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE
+ * defined in dhd_sdio.c (amount of header tha might be added)
+ * plus any space that might be needed for alignment padding.
+ */
+#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+ * round off at the end of buffer
+ */
+
+#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */
+
+#ifdef PROP_TXSTATUS
+typedef struct dhd_wlfc_commit_info {
+ uint8 needs_hdr;
+ uint8 ac_fifo_credit_spent;
+ ewlfc_packet_state_t pkt_type;
+ wlfc_mac_descriptor_t* mac_entry;
+ void* p;
+} dhd_wlfc_commit_info_t;
+#endif /* PROP_TXSTATUS */
+
+typedef struct dhd_prot {
+ uint16 reqid;
+ uint8 pending;
+ uint32 lastcmd;
+ uint8 bus_header[BUS_HEADER_LEN];
+ cdc_ioctl_t msg;
+ unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+ int err = 0;
+ dhd_prot_t *prot = dhd->prot;
+ int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ DHD_OS_WAKE_LOCK(dhd);
+
+ /* NOTE : cdc->msg.len holds the desired length of the buffer to be
+ * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+ * is actually sent to the dongle
+ */
+ if (len > CDC_MAX_MSG_SIZE)
+ len = CDC_MAX_MSG_SIZE;
+
+ /* Send request */
+ err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+ int ret;
+ int cdc_len = len+sizeof(cdc_ioctl_t);
+ dhd_prot_t *prot = dhd->prot;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ do {
+ ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+ if (ret < 0)
+ break;
+ } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+ return ret;
+}
+
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ void *info;
+ int ret = 0, retries = 0;
+ uint32 id, flags = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ if (cmd == WLC_GET_VAR && buf)
+ {
+ if (!strcmp((char *)buf, "bcmerrorstr"))
+ {
+ strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+ goto done;
+ }
+ else if (!strcmp((char *)buf, "bcmerror"))
+ {
+ *(int *)buf = dhd->dongle_error;
+ goto done;
+ }
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ /* add additional action bits */
+ action &= WL_IOCTL_ACTION_MASK;
+ msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ if (!dhd->hang_was_sent)
+ DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+ goto done;
+ }
+
+retry:
+ /* wait for interrupt and get first fragment */
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if ((id < prot->reqid) && (++retries < RETRIES))
+ goto retry;
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check info buffer */
+ info = (void*)&msg[1];
+
+ /* Copy info buffer */
+ if (buf)
+ {
+ if (ret < (int)len)
+ len = ret;
+ memcpy(buf, info, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ int ret = 0;
+ uint32 flags, id;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ /* add additional action bits */
+ action &= WL_IOCTL_ACTION_MASK;
+ msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = -1;
+ uint8 action;
+
+ if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+ if (len > WLC_IOCTL_MAXLEN)
+ goto done;
+
+ if (prot->pending == TRUE) {
+ DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+ (unsigned long)prot->lastcmd));
+ if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+ DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+ }
+ goto done;
+ }
+
+ prot->pending = TRUE;
+ prot->lastcmd = ioc->cmd;
+ action = ioc->set;
+ if (action & WL_IOCTL_ACTION_SET)
+ ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ else {
+ ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ if (ret > 0)
+ ioc->used = ret - sizeof(cdc_ioctl_t);
+ }
+
+ /* Too many programs assume ioctl() returns 0 on success */
+ if (ret >= 0)
+ ret = 0;
+ else {
+ cdc_ioctl_t *msg = &prot->msg;
+ ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+ }
+
+ /* Intercept the wme_dp ioctl here */
+ if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+ int slen, val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+ prot->pending = FALSE;
+
+done:
+ return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ return BCME_UNSUPPORTED;
+}
+
+#ifdef PROP_TXSTATUS
+void
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ int i;
+ uint8* ea;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhdp->wlfc_state;
+ wlfc_hanger_t* h;
+ wlfc_mac_descriptor_t* mac_table;
+ wlfc_mac_descriptor_t* interfaces;
+ char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+ if (wlfc == NULL) {
+ bcm_bprintf(strbuf, "wlfc not initialized yet\n");
+ return;
+ }
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h == NULL) {
+ bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+ }
+
+ mac_table = wlfc->destination_entries.nodes;
+ interfaces = wlfc->destination_entries.interfaces;
+ bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+ if (h) {
+ bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+ "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+ h->pushed,
+ h->popped,
+ h->failed_to_push,
+ h->failed_to_pop,
+ h->failed_slotfind,
+ (h->pushed - h->popped));
+ }
+
+ bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+ "(dq_full,sendq_full, rollback_fail) = (%d,%d,%d,%d), (%d,%d,%d)\n",
+ wlfc->stats.tlv_parse_failed,
+ wlfc->stats.credit_request_failed,
+ wlfc->stats.mac_update_failed,
+ wlfc->stats.psmode_update_failed,
+ wlfc->stats.delayq_full_error,
+ wlfc->stats.sendq_full_error,
+ wlfc->stats.rollback_failed);
+
+ bcm_bprintf(strbuf, "SENDQ (len,credit,sent) "
+ "(AC0[%d,%d,%d],AC1[%d,%d,%d],AC2[%d,%d,%d],AC3[%d,%d,%d],BC_MC[%d,%d,%d])\n",
+ wlfc->SENDQ.q[0].len, wlfc->FIFO_credit[0], wlfc->stats.sendq_pkts[0],
+ wlfc->SENDQ.q[1].len, wlfc->FIFO_credit[1], wlfc->stats.sendq_pkts[1],
+ wlfc->SENDQ.q[2].len, wlfc->FIFO_credit[2], wlfc->stats.sendq_pkts[2],
+ wlfc->SENDQ.q[3].len, wlfc->FIFO_credit[3], wlfc->stats.sendq_pkts[3],
+ wlfc->SENDQ.q[4].len, wlfc->FIFO_credit[4], wlfc->stats.sendq_pkts[4]);
+
+#ifdef PROP_TXSTATUS_DEBUG
+ bcm_bprintf(strbuf, "SENDQ dropped: AC[0-3]:(%d,%d,%d,%d), (bcmc,atim):(%d,%d)\n",
+ wlfc->stats.dropped_qfull[0], wlfc->stats.dropped_qfull[1],
+ wlfc->stats.dropped_qfull[2], wlfc->stats.dropped_qfull[3],
+ wlfc->stats.dropped_qfull[4], wlfc->stats.dropped_qfull[5]);
+#endif
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (interfaces[i].occupied) {
+ char* iftype_desc;
+
+ if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+ iftype_desc = "<Unknown";
+ else
+ iftype_desc = iftypes[interfaces[i].iftype];
+
+ ea = interfaces[i].ea;
+ bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s\n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ interfaces[i].interface_id,
+ iftype_desc);
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ(len,state,credit)"
+ "= (%d,%s,%d)\n",
+ i,
+ interfaces[i].psq.len,
+ ((interfaces[i].state ==
+ WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+ interfaces[i].requested_credit);
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ"
+ "(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
+ "(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
+ i,
+ interfaces[i].psq.q[0].len,
+ interfaces[i].psq.q[1].len,
+ interfaces[i].psq.q[2].len,
+ interfaces[i].psq.q[3].len,
+ interfaces[i].psq.q[4].len,
+ interfaces[i].psq.q[5].len,
+ interfaces[i].psq.q[6].len,
+ interfaces[i].psq.q[7].len);
+ }
+ }
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (mac_table[i].occupied) {
+ ea = mac_table[i].ea;
+ bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d\n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ mac_table[i].interface_id);
+
+ bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ(len,state,credit)"
+ "= (%d,%s,%d)\n",
+ i,
+ mac_table[i].psq.len,
+ ((mac_table[i].state ==
+ WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+ mac_table[i].requested_credit);
+#ifdef PROP_TXSTATUS_DEBUG
+ bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+ i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+ bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ"
+ "(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
+ "(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
+ i,
+ mac_table[i].psq.q[0].len,
+ mac_table[i].psq.q[1].len,
+ mac_table[i].psq.q[2].len,
+ mac_table[i].psq.q[3].len,
+ mac_table[i].psq.q[4].len,
+ mac_table[i].psq.q[5].len,
+ mac_table[i].psq.q[6].len,
+ mac_table[i].psq.q[7].len);
+ }
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ int avg;
+ int moving_avg = 0;
+ int moving_samples;
+
+ if (wlfc->stats.latency_sample_count) {
+ moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+ for (i = 0; i < moving_samples; i++)
+ moving_avg += wlfc->stats.deltas[i];
+ moving_avg /= moving_samples;
+
+ avg = (100 * wlfc->stats.total_status_latency) /
+ wlfc->stats.latency_sample_count;
+ bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+ "(%d.%d, %03d, %03d)\n",
+ moving_samples, avg/100, (avg - (avg/100)*100),
+ wlfc->stats.latency_most_recent,
+ moving_avg);
+ }
+ }
+
+ bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+ "back = (%d,%d,%d,%d,%d,%d)\n",
+ wlfc->stats.fifo_credits_sent[0],
+ wlfc->stats.fifo_credits_sent[1],
+ wlfc->stats.fifo_credits_sent[2],
+ wlfc->stats.fifo_credits_sent[3],
+ wlfc->stats.fifo_credits_sent[4],
+ wlfc->stats.fifo_credits_sent[5],
+
+ wlfc->stats.fifo_credits_back[0],
+ wlfc->stats.fifo_credits_back[1],
+ wlfc->stats.fifo_credits_back[2],
+ wlfc->stats.fifo_credits_back[3],
+ wlfc->stats.fifo_credits_back[4],
+ wlfc->stats.fifo_credits_back[5]);
+ {
+ uint32 fifo_cr_sent = 0;
+ uint32 fifo_cr_acked = 0;
+ uint32 request_cr_sent = 0;
+ uint32 request_cr_ack = 0;
+ uint32 bc_mc_cr_ack = 0;
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+ fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+ }
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+ fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.nodes[i].dstncredit_acks;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.interfaces[i].dstncredit_acks;
+ }
+ }
+ bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+ "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+ fifo_cr_sent, fifo_cr_acked,
+ request_cr_sent, request_cr_ack,
+ wlfc->destination_entries.other.dstncredit_acks,
+ bc_mc_cr_ack,
+ wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull),(dropped,hdr_only,wlc_tossed)"
+ "(freed,free_err,rollback)) = "
+ "((%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+ wlfc->stats.pktin,
+ wlfc->stats.pkt2bus,
+ wlfc->stats.txstatus_in,
+ wlfc->stats.dhd_hdrpulls,
+
+ wlfc->stats.pktdropped,
+ wlfc->stats.wlfc_header_only_pkt,
+ wlfc->stats.wlc_tossed_pkts,
+
+ wlfc->stats.pkt_freed,
+ wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+ bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+ "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+
+ wlfc->stats.d11_suppress,
+ wlfc->stats.wl_suppress,
+ wlfc->stats.bad_suppress,
+
+ wlfc->stats.psq_d11sup_enq,
+ wlfc->stats.psq_wlsup_enq,
+ wlfc->stats.psq_hostq_enq,
+ wlfc->stats.mac_handle_notfound,
+
+ wlfc->stats.psq_d11sup_retx,
+ wlfc->stats.psq_wlsup_retx,
+ wlfc->stats.psq_hostq_retx);
+ return;
+}
+
+/* Create a place to store all packet pointers submitted to the firmware until
+ a status comes back, suppress or otherwise.
+
+ hang-er: noun, a contrivance on which things are hung, as a hook.
+*/
+static void*
+dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+{
+ int i;
+ wlfc_hanger_t* hanger;
+
+ /* allow only up to a specific size for now */
+ ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+ if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL)
+ return NULL;
+
+ memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+ hanger->max_items = max_items;
+
+ for (i = 0; i < hanger->max_items; i++) {
+ hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+ }
+ return hanger;
+}
+
+static int
+dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+{
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h) {
+ MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items));
+ return BCME_OK;
+ }
+ return BCME_BADARG;
+}
+
+static uint16
+dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+ int i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h) {
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE)
+ return (uint16)i;
+ }
+ h->failed_slotfind++;
+ }
+ return WLFC_HANGER_MAXITEMS;
+}
+
+static int
+dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+ if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+ h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+ h->items[slot_id].pkt = pkt;
+ h->items[slot_id].identifier = slot_id;
+ h->pushed++;
+ }
+ else {
+ h->failed_to_push++;
+ rc = BCME_NOTFOUND;
+ }
+ }
+ else
+ rc = BCME_BADARG;
+ return rc;
+}
+
+static int
+dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, int remove_from_hanger)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ /* this packet was not pushed at the time it went to the firmware */
+ if (slot_id == WLFC_HANGER_MAXITEMS)
+ return BCME_NOTFOUND;
+
+ if (h) {
+ if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ *pktout = h->items[slot_id].pkt;
+ if (remove_from_hanger) {
+ h->items[slot_id].state =
+ WLFC_HANGER_ITEM_STATE_FREE;
+ h->items[slot_id].pkt = NULL;
+ h->items[slot_id].identifier = 0;
+ h->popped++;
+ }
+ }
+ else {
+ h->failed_to_pop++;
+ rc = BCME_NOTFOUND;
+ }
+ }
+ else
+ rc = BCME_BADARG;
+ return rc;
+}
+
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+ uint8 tim_bmp, uint8 mac_handle, uint32 htodtag)
+{
+ uint32 wl_pktinfo = 0;
+ uint8* wlh;
+ uint8 dataOffset;
+ uint8 fillers;
+ uint8 tim_signal_len = 0;
+
+ struct bdc_header *h;
+
+ if (tim_signal) {
+ tim_signal_len = 1 + 1 + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ }
+
+ /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+ dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + 2 + tim_signal_len;
+ fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+ dataOffset += fillers;
+
+ PKTPUSH(ctx->osh, p, dataOffset);
+ wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+ wl_pktinfo = htol32(htodtag);
+
+ wlh[0] = WLFC_CTL_TYPE_PKTTAG;
+ wlh[1] = WLFC_CTL_VALUE_LEN_PKTTAG;
+ memcpy(&wlh[2], &wl_pktinfo, sizeof(uint32));
+
+ if (tim_signal_len) {
+ wlh[dataOffset - fillers - tim_signal_len ] =
+ WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 1] =
+ WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+ wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+ }
+ if (fillers)
+ memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+ PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+ h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(p))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+ h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->dataOffset = dataOffset >> 2;
+ BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+ struct bdc_header *h;
+
+ if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+ WLFC_DBGMESG(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+ h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+ /* pull BDC header */
+ PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+ /* pull wl-header */
+ PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+ return BCME_OK;
+}
+
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+ int i;
+ wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+ uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+ uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+
+ /* no lookup necessary, only if this packet belongs to STA interface */
+ if (((ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_STA) ||
+ ETHER_ISMULTI(dstn) ||
+ (ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_P2P_CLIENT)) &&
+ (ctx->destination_entries.interfaces[ifid].occupied)) {
+ return &ctx->destination_entries.interfaces[ifid];
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (table[i].occupied) {
+ if (table[i].interface_id == ifid) {
+ if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN))
+ return &table[i];
+ }
+ }
+ }
+ return &ctx->destination_entries.other;
+}
+
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+ void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+ /*
+ put the packet back to the head of queue
+
+ - a packet from send-q will need to go back to send-q and not delay-q
+ since that will change the order of packets.
+ - suppressed packet goes back to suppress sub-queue
+ - pull out the header, if new or delayed packet
+
+ Note: hslot is used only when header removal is done.
+ */
+ wlfc_mac_descriptor_t* entry;
+ void* pktout;
+ int rc = BCME_OK;
+ int prec;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+ if (entry != NULL) {
+ if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) {
+ /* wl-header is saved for suppressed packets */
+ if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ else {
+ /* remove header first */
+ _dhd_wlfc_pullheader(ctx, p);
+
+ if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+ /* delay-q packets are going to delay-q */
+ if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, (prec << 1), p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ else {
+ /* these are going to SENDQ */
+ if (WLFC_PKTQ_PENQ_HEAD(&ctx->SENDQ, prec, p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ /* free the hanger slot */
+ dhd_wlfc_hanger_poppkt(ctx->hanger, hslot, &pktout, 1);
+
+ /* decrement sequence count */
+ WLFC_DECR_SEQCOUNT(entry, prec);
+ }
+ /*
+ if this packet did not count against FIFO credit, it must have
+ taken a requested_credit from the firmware (for pspoll etc.)
+ */
+ if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+ entry->requested_credit++;
+ }
+ }
+ else {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ if (rc != BCME_OK)
+ ctx->stats.rollback_failed++;
+ else
+ ctx->stats.rollback++;
+
+ return rc;
+}
+
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+ if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+ /* start traffic */
+ ctx->hostif_flow_state[if_id] = OFF;
+ /*
+ WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+ pq->len, if_id, __FUNCTION__));
+ */
+ WLFC_DBGMESG(("F"));
+ /* dhd_txflowcontrol(ctx->dhdp, if_id, OFF); */
+ ctx->toggle_host_if = 0;
+ }
+ if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
+ /* stop traffic */
+ ctx->hostif_flow_state[if_id] = ON;
+ /*
+ WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n",
+ pq->len, if_id, __FUNCTION__));
+ */
+ WLFC_DBGMESG(("N"));
+ /* dhd_txflowcontrol(ctx->dhdp, if_id, ON); */
+ ctx->host_ifidx = if_id;
+ ctx->toggle_host_if = 1;
+ }
+ return;
+}
+
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ uint8 ta_bmp)
+{
+ int rc = BCME_OK;
+ void* p = NULL;
+ int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 12;
+
+ /* allocate a dummy packet */
+ p = PKTGET(ctx->osh, dummylen, TRUE);
+ if (p) {
+ PKTPULL(ctx->osh, p, dummylen);
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+ _dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0);
+ DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+ ctx->stats.signal_only_pkts_sent++;
+#endif
+ rc = dhd_bus_txdata(((dhd_pub_t *)ctx->dhdp)->bus, p);
+ if (rc != BCME_OK) {
+ PKTFREE(ctx->osh, p, TRUE);
+ }
+ }
+ else {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, dummylen));
+ rc = BCME_NOMEM;
+ }
+ return rc;
+}
+
+/* Return TRUE if traffic availability changed */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ int prec)
+{
+ bool rc = FALSE;
+
+ if (entry->state == WLFC_STATE_CLOSE) {
+ if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+ (pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
+
+ if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+ rc = TRUE;
+ entry->traffic_pending_bmp =
+ entry->traffic_pending_bmp & ~ NBITVAL(prec);
+ }
+ }
+ else {
+ if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+ rc = TRUE;
+ entry->traffic_pending_bmp =
+ entry->traffic_pending_bmp | NBITVAL(prec);
+ }
+ }
+ }
+ if (rc) {
+ /* request a TIM update to firmware at the next piggyback opportunity */
+ if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+ entry->send_tim_signal = 1;
+ _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+ entry->send_tim_signal = 0;
+ }
+ else {
+ rc = FALSE;
+ }
+ }
+ return rc;
+}
+
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+ wlfc_mac_descriptor_t* entry;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_NOTFOUND;
+ }
+ /*
+ - suppressed packets go to sub_queue[2*prec + 1] AND
+ - delayed packets go to sub_queue[2*prec + 0] to ensure
+ order of delivery.
+ */
+ if (WLFC_PKTQ_PENQ(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+ ctx->stats.delayq_full_error++;
+ /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+ WLFC_DBGMESG(("s"));
+ return BCME_ERROR;
+ }
+ /* A packet has been pushed, update traffic availability bitmap, if applicable */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+ wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot)
+{
+ int rc = BCME_OK;
+ int hslot = WLFC_HANGER_MAXITEMS;
+ bool send_tim_update = FALSE;
+ uint32 htod = 0;
+ uint8 free_ctr;
+
+ *slot = hslot;
+
+ if (entry == NULL) {
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ }
+
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_ERROR;
+ }
+ if (entry->send_tim_signal) {
+ send_tim_update = TRUE;
+ entry->send_tim_signal = 0;
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+ }
+ if (header_needed) {
+ hslot = dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+ free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+ }
+ else {
+ hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ }
+ WLFC_PKTID_HSLOT_SET(htod, hslot);
+ WLFC_PKTID_FREERUNCTR_SET(htod, free_ctr);
+ DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ WLFC_PKTFLAG_SET_GENERATION(htod, entry->generation);
+
+ if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+ /*
+ Indicate that this packet is being sent in response to an
+ explicit request from the firmware side.
+ */
+ WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+ }
+ else {
+ WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+ }
+ if (header_needed) {
+ rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+ entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+ if (rc == BCME_OK) {
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+ /*
+ a new header was created for this packet.
+ push to hanger slot and scrub q. Since bus
+ send succeeded, increment seq number as well.
+ */
+ rc = dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+ if (rc == BCME_OK) {
+ /* increment free running sequence count */
+ WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+#ifdef PROP_TXSTATUS_DEBUG
+ ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
+ OSL_SYSUPTIME();
+#endif
+ }
+ else {
+ WLFC_DBGMESG(("%s() hanger_pushpkt() failed, rc: %d\n",
+ __FUNCTION__, rc));
+ }
+ }
+ }
+ else {
+ /* remove old header */
+ _dhd_wlfc_pullheader(ctx, p);
+
+ hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ /* push new header */
+ _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+ entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+ }
+ *slot = hslot;
+ return rc;
+}
+
+static int
+_dhd_wlfc_is_destination_closed(athost_wl_status_info_t* ctx,
+ wlfc_mac_descriptor_t* entry, int prec)
+{
+ if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
+ WLC_E_IF_ROLE_P2P_GO) {
+ /* - destination interface is of type p2p GO.
+ For a p2pGO interface, if the destination is OPEN but the interface is
+ CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+ destination-specific-credit left send packets. This is because the
+ firmware storing the destination-specific-requested packet in queue.
+ */
+ if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+ (entry->requested_packet == 0))
+ return 1;
+ }
+ /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+ if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+ (entry->requested_packet == 0)) ||
+ (!(entry->ac_bitmap & (1 << prec))))
+ return 1;
+
+ return 0;
+}
+
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx,
+ int prec, uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out)
+{
+ wlfc_mac_descriptor_t* entry;
+ wlfc_mac_descriptor_t* table;
+ uint8 token_pos;
+ int total_entries;
+ void* p = NULL;
+ int pout;
+ int i;
+
+ *entry_out = NULL;
+ token_pos = ctx->token_pos[prec];
+ /* most cases a packet will count against FIFO credit */
+ *ac_credit_spent = 1;
+ *needs_hdr = 1;
+
+ /* search all entries, include nodes as well as interfaces */
+ table = (wlfc_mac_descriptor_t*)&ctx->destination_entries;
+ total_entries = sizeof(ctx->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+
+ for (i = 0; i < total_entries; i++) {
+ entry = &table[(token_pos + i) % total_entries];
+ if (entry->occupied) {
+ if (!_dhd_wlfc_is_destination_closed(ctx, entry, prec)) {
+ p = pktq_mdeq(&entry->psq,
+ /* higher precedence will be picked up first,
+ i.e. suppressed packets before delayed ones
+ */
+ (NBITVAL((prec << 1) + 1) | NBITVAL((prec << 1))),
+ &pout);
+ if (p != NULL) {
+ /* did the packet come from suppress sub-queue? */
+ if (pout == ((prec << 1) + 1)) {
+ /*
+ this packet was suppressed and was sent on the bus
+ previously; this already has a header
+ */
+ *needs_hdr = 0;
+ }
+ if (entry->requested_credit > 0) {
+ entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_sent_packets++;
+#endif
+ /*
+ if the packet was pulled out while destination is in
+ closed state but had a non-zero packets requested,
+ then this should not count against the FIFO credit.
+ That is due to the fact that the firmware will
+ most likely hold onto this packet until a suitable
+ time later to push it to the appropriate AC FIFO.
+ */
+ if (entry->state == WLFC_STATE_CLOSE)
+ *ac_credit_spent = 0;
+ }
+ else if (entry->requested_packet > 0) {
+ entry->requested_packet--;
+ DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+ if (entry->state == WLFC_STATE_CLOSE)
+ *ac_credit_spent = 0;
+ }
+ /* move token to ensure fair round-robin */
+ ctx->token_pos[prec] =
+ (token_pos + i + 1) % total_entries;
+ *entry_out = entry;
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq,
+ DHD_PKTTAG_IF(PKTTAG(p)));
+ /*
+ A packet has been picked up, update traffic
+ availability bitmap, if applicable
+ */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ return p;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+static void*
+_dhd_wlfc_deque_sendq(athost_wl_status_info_t* ctx, int prec, uint8* ac_credit_spent)
+{
+ wlfc_mac_descriptor_t* entry;
+ void* p;
+
+ /* most cases a packet will count against FIFO credit */
+ *ac_credit_spent = 1;
+
+ p = pktq_pdeq(&ctx->SENDQ, prec);
+ if (p != NULL) {
+ if (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))
+ /* bc/mc packets do not have a delay queue */
+ return p;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return p;
+ }
+
+ while ((p != NULL) && _dhd_wlfc_is_destination_closed(ctx, entry, prec)) {
+ /*
+ - suppressed packets go to sub_queue[2*prec + 1] AND
+ - delayed packets go to sub_queue[2*prec + 0] to ensure
+ order of delivery.
+ */
+ if (WLFC_PKTQ_PENQ(&entry->psq, (prec << 1), p) == NULL) {
+ WLFC_DBGMESG(("D"));
+ /* dhd_txcomplete(ctx->dhdp, p, FALSE); */
+ PKTFREE(ctx->osh, p, TRUE);
+ ctx->stats.delayq_full_error++;
+ }
+ /*
+ A packet has been pushed, update traffic availability bitmap,
+ if applicable
+ */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+ p = pktq_pdeq(&ctx->SENDQ, prec);
+ if (p == NULL)
+ break;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+
+ if ((entry == NULL) || (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))) {
+ return p;
+ }
+ }
+ if (p) {
+ if (entry->requested_packet == 0) {
+ if (entry->requested_credit > 0)
+ entry->requested_credit--;
+ }
+ else {
+ entry->requested_packet--;
+ DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+ }
+ if (entry->state == WLFC_STATE_CLOSE)
+ *ac_credit_spent = 0;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_sent_packets++;
+#endif
+ }
+ if (p)
+ _dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p)));
+ }
+ return p;
+}
+
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ int rc = BCME_OK;
+
+ if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+ entry->occupied = 1;
+ entry->state = WLFC_STATE_OPEN;
+ entry->requested_credit = 0;
+ entry->interface_id = ifid;
+ entry->iftype = iftype;
+ entry->ac_bitmap = 0xff; /* update this when handling APSD */
+ /* for an interface entry we may not care about the MAC address */
+ if (ea != NULL)
+ memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+ pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+ }
+ else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+ entry->occupied = 0;
+ entry->state = WLFC_STATE_CLOSE;
+ entry->requested_credit = 0;
+ /* enable after packets are queued-deqeued properly.
+ pktq_flush(dhd->osh, &entry->psq, FALSE, NULL, 0);
+ */
+ }
+ return rc;
+}
+
+int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, uint8 available_credit_map, int borrower_ac)
+{
+ int lender_ac;
+ int rc = BCME_ERROR;
+
+ if (ctx == NULL || available_credit_map == 0) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ /* Borrow from lowest priority available AC (including BC/MC credits) */
+ for (lender_ac = 0; lender_ac <= AC_COUNT; lender_ac++) {
+ if ((available_credit_map && (1 << lender_ac)) &&
+ (ctx->FIFO_credit[lender_ac] > 0)) {
+ ctx->credits_borrowed[borrower_ac][lender_ac]++;
+ ctx->FIFO_credit[lender_ac]--;
+ rc = BCME_OK;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int
+dhd_wlfc_interface_entry_update(void* state,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ wlfc_mac_descriptor_t* entry;
+
+ if (ifid >= WLFC_MAX_IFNUM)
+ return BCME_BADARG;
+
+ entry = &ctx->destination_entries.interfaces[ifid];
+ return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea);
+}
+
+int
+dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+ /* update the AC FIFO credit map */
+ ctx->FIFO_credit[0] = credits[0];
+ ctx->FIFO_credit[1] = credits[1];
+ ctx->FIFO_credit[2] = credits[2];
+ ctx->FIFO_credit[3] = credits[3];
+ /* credit for bc/mc packets */
+ ctx->FIFO_credit[4] = credits[4];
+ /* credit for ATIM FIFO is not used yet. */
+ ctx->FIFO_credit[5] = 0;
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_enque_sendq(void* state, int prec, void* p)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+ if ((state == NULL) ||
+ /* prec = AC_COUNT is used for bc/mc queue */
+ (prec > AC_COUNT) ||
+ (p == NULL)) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+ if (FALSE == dhd_prec_enq(ctx->dhdp, &ctx->SENDQ, p, prec)) {
+ ctx->stats.sendq_full_error++;
+ /*
+ WLFC_DBGMESG(("Error: %s():%d, qlen:%d\n",
+ __FUNCTION__, __LINE__, ctx->SENDQ.len));
+ */
+ WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, prec);
+ WLFC_DBGMESG(("Q"));
+ PKTFREE(ctx->osh, p, TRUE);
+ return BCME_ERROR;
+ }
+ ctx->stats.pktin++;
+ /* _dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p))); */
+ return BCME_OK;
+}
+
+int
+_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
+ dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
+{
+ uint32 hslot;
+ int rc;
+
+ /*
+ if ac_fifo_credit_spent = 0
+
+ This packet will not count against the FIFO credit.
+ To ensure the txstatus corresponding to this packet
+ does not provide an implied credit (default behavior)
+ mark the packet accordingly.
+
+ if ac_fifo_credit_spent = 1
+
+ This is a normal packet and it counts against the FIFO
+ credit count.
+ */
+ DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
+ rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p,
+ commit_info->needs_hdr, &hslot);
+
+ if (rc == BCME_OK)
+ rc = fcommit(commit_ctx, commit_info->p);
+ else
+ ctx->stats.generic_error++;
+
+ if (rc == BCME_OK) {
+ ctx->stats.pkt2bus++;
+ if (commit_info->ac_fifo_credit_spent) {
+ ctx->stats.sendq_pkts[ac]++;
+ WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+ }
+ }
+ else {
+ /*
+ bus commit has failed, rollback.
+ - remove wl-header for a delayed packet
+ - save wl-header header for suppressed packets
+ */
+ rc = _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p,
+ (commit_info->pkt_type), hslot);
+ if (rc != BCME_OK)
+ ctx->stats.rollback_failed++;
+
+ rc = BCME_ERROR;
+ }
+
+ return rc;
+}
+
+int
+dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx)
+{
+ int ac;
+ int credit;
+ int rc;
+ dhd_wlfc_commit_info_t commit_info;
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ int credit_count = 0;
+ int bus_retry_count = 0;
+ uint8 ac_available = 0; /* Bitmask for 4 ACs + BC/MC */
+
+ if ((state == NULL) ||
+ (fcommit == NULL)) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ memset(&commit_info, 0, sizeof(commit_info));
+
+ /*
+ Commit packets for regular AC traffic. Higher priority first.
+ First, use up FIFO credits available to each AC. Based on distribution
+ and credits left, borrow from other ACs as applicable
+
+ -NOTE:
+ If the bus between the host and firmware is overwhelmed by the
+ traffic from host, it is possible that higher priority traffic
+ starves the lower priority queue. If that occurs often, we may
+ have to employ weighted round-robin or ucode scheme to avoid
+ low priority packet starvation.
+ */
+
+ for (ac = AC_COUNT; ac >= 0; ac--) {
+
+ int initial_credit_count = ctx->FIFO_credit[ac];
+
+ for (credit = 0; credit < ctx->FIFO_credit[ac];) {
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry));
+
+ if (commit_info.p == NULL)
+ break;
+
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ credit++;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ ctx->FIFO_credit[ac] -= credit;
+ return rc;
+ }
+ }
+ }
+
+ ctx->FIFO_credit[ac] -= credit;
+
+ /* packets from SENDQ are fresh and they'd need header and have no MAC entry */
+ commit_info.needs_hdr = 1;
+ commit_info.mac_entry = NULL;
+ commit_info.pkt_type = eWLFC_PKTTYPE_NEW;
+
+ for (credit = 0; credit < ctx->FIFO_credit[ac];) {
+ commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent));
+ if (commit_info.p == NULL)
+ break;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ credit++;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ ctx->FIFO_credit[ac] -= credit;
+ return rc;
+ }
+ }
+ }
+
+ ctx->FIFO_credit[ac] -= credit;
+
+ /* If no credits were used, the queue is idle and can be re-used
+ Note that resv credits cannot be borrowed
+ */
+ if (initial_credit_count == ctx->FIFO_credit[ac]) {
+ ac_available |= (1 << ac);
+ credit_count += ctx->FIFO_credit[ac];
+ }
+ }
+
+ /* We borrow only for AC_BE and only if no other traffic seen for DEFER_PERIOD
+
+ Note that (ac_available & WLFC_AC_BE_TRAFFIC_ONLY) is done to:
+ a) ignore BC/MC for deferring borrow
+ b) ignore AC_BE being available along with other ACs
+ (this should happen only for pure BC/MC traffic)
+
+ i.e. AC_VI, AC_VO, AC_BK all MUST be available (i.e. no traffic) and
+ we do not care if AC_BE and BC/MC are available or not
+ */
+ if ((ac_available & WLFC_AC_BE_TRAFFIC_ONLY) == WLFC_AC_BE_TRAFFIC_ONLY) {
+
+ if (ctx->allow_credit_borrow) {
+ ac = 1; /* Set ac to AC_BE and borrow credits */
+ }
+ else {
+ int delta;
+ int curr_t = OSL_SYSUPTIME();
+
+ if (curr_t > ctx->borrow_defer_timestamp)
+ delta = curr_t - ctx->borrow_defer_timestamp;
+ else
+ delta = 0xffffffff + curr_t - ctx->borrow_defer_timestamp;
+
+ if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+ /* Reset borrow but defer to next iteration (defensive borrowing) */
+ ctx->allow_credit_borrow = TRUE;
+ ctx->borrow_defer_timestamp = 0;
+ }
+ return BCME_OK;
+ }
+ }
+ else {
+ /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+ ctx->allow_credit_borrow = FALSE;
+ ctx->borrow_defer_timestamp = OSL_SYSUPTIME();
+ return BCME_OK;
+ }
+
+ /* At this point, borrow all credits only for "ac" (which should be set above to AC_BE)
+ Generically use "ac" only in case we extend to all ACs in future
+ */
+ for (; (credit_count > 0);) {
+
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry));
+ if (commit_info.p == NULL)
+ break;
+
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac);
+ credit_count--;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ return rc;
+ }
+ }
+ }
+
+ /* packets from SENDQ are fresh and they'd need header and have no MAC entry */
+ commit_info.needs_hdr = 1;
+ commit_info.mac_entry = NULL;
+ commit_info.pkt_type = eWLFC_PKTTYPE_NEW;
+
+ for (; (credit_count > 0);) {
+
+ commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent));
+ if (commit_info.p == NULL)
+ break;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac);
+ credit_count--;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ return rc;
+ }
+ }
+ }
+
+ return BCME_OK;
+}
+
+static uint8
+dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+{
+ wlfc_mac_descriptor_t* table =
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+ uint8 table_index;
+
+ if (ea != NULL) {
+ for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+ if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+ table[table_index].occupied)
+ return table_index;
+ }
+ }
+ return WLFC_MAC_DESC_ID_INVALID;
+}
+
+void
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ void* p;
+ int fifo_id;
+
+ if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.signal_only_pkts_freed++;
+#endif
+ /* is this a signal-only packet? */
+ PKTFREE(wlfc->osh, txp, TRUE);
+ return;
+ }
+ if (!success) {
+ WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+ __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+ dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG
+ (PKTTAG(txp))), &p, 1);
+
+ /* indicate failure and free the packet */
+ dhd_txcomplete(dhd, txp, FALSE);
+
+ /* return the credit, if necessary */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(txp))) {
+ int lender, credit_returned = 0; /* Note that borrower is fifo_id */
+
+ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(txp));
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; lender >= 0; lender--) {
+ if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+ wlfc->FIFO_credit[lender]++;
+ wlfc->credits_borrowed[fifo_id][lender]--;
+ credit_returned = 1;
+ break;
+ }
+ }
+
+ if (!credit_returned) {
+ wlfc->FIFO_credit[fifo_id]++;
+ }
+ }
+
+ PKTFREE(wlfc->osh, txp, TRUE);
+ }
+ return;
+}
+
+/* Handle discard or suppress indication */
+static int
+dhd_wlfc_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info)
+{
+ uint8 status_flag;
+ uint32 status;
+ int ret;
+ int remove_from_hanger = 1;
+ void* pktbuf;
+ uint8 fifo_id;
+ wlfc_mac_descriptor_t* entry = NULL;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+
+ memcpy(&status, pkt_info, sizeof(uint32));
+ status_flag = WL_TXSTATUS_GET_FLAGS(status);
+ wlfc->stats.txstatus_in++;
+
+ if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+ wlfc->stats.pkt_freed++;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+ wlfc->stats.d11_suppress++;
+ remove_from_hanger = 0;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+ wlfc->stats.wl_suppress++;
+ remove_from_hanger = 0;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+ wlfc->stats.wlc_tossed_pkts++;
+ }
+
+ ret = dhd_wlfc_hanger_poppkt(wlfc->hanger,
+ WLFC_PKTID_HSLOT_GET(status), &pktbuf, remove_from_hanger);
+ if (ret != BCME_OK) {
+ /* do something */
+ return ret;
+ }
+
+ if (!remove_from_hanger) {
+ /* this packet was suppressed */
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+ entry->generation = WLFC_PKTID_GEN(status);
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ uint32 new_t = OSL_SYSUPTIME();
+ uint32 old_t;
+ uint32 delta;
+ old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[
+ WLFC_PKTID_HSLOT_GET(status)].push_time;
+
+
+ wlfc->stats.latency_sample_count++;
+ if (new_t > old_t)
+ delta = new_t - old_t;
+ else
+ delta = 0xffffffff + new_t - old_t;
+ wlfc->stats.total_status_latency += delta;
+ wlfc->stats.latency_most_recent = delta;
+
+ wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+ if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+ wlfc->stats.idx_delta = 0;
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+
+ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+ /* pick up the implicit credit from this packet */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+ if (wlfc->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) {
+
+ int lender, credit_returned = 0; /* Note that borrower is fifo_id */
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; lender >= 0; lender--) {
+ if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+ wlfc->FIFO_credit[lender]++;
+ wlfc->credits_borrowed[fifo_id][lender]--;
+ credit_returned = 1;
+ break;
+ }
+ }
+
+ if (!credit_returned) {
+ wlfc->FIFO_credit[fifo_id]++;
+ }
+ }
+ }
+ else {
+ /*
+ if this packet did not count against FIFO credit, it must have
+ taken a requested_credit from the destination entry (for pspoll etc.)
+ */
+ if (!entry) {
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+ }
+ if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
+ entry->requested_credit++;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_acks++;
+#endif
+ }
+ if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+ (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+ ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+ if (ret != BCME_OK) {
+ /* delay q is full, drop this packet */
+ dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(status),
+ &pktbuf, 1);
+
+ /* indicate failure and free the packet */
+ dhd_txcomplete(dhd, pktbuf, FALSE);
+ PKTFREE(wlfc->osh, pktbuf, TRUE);
+ }
+ }
+ else {
+ dhd_txcomplete(dhd, pktbuf, TRUE);
+ /* free the packet */
+ PKTFREE(wlfc->osh, pktbuf, TRUE);
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+ int i;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+ /* update FIFO credits */
+ if (wlfc->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+ {
+ int lender; /* Note that borrower is i */
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
+ if (wlfc->credits_borrowed[i][lender] > 0) {
+ if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
+ credits[i] -= wlfc->credits_borrowed[i][lender];
+ wlfc->FIFO_credit[lender] +=
+ wlfc->credits_borrowed[i][lender];
+ wlfc->credits_borrowed[i][lender] = 0;
+ }
+ else {
+ wlfc->credits_borrowed[i][lender] -= credits[i];
+ wlfc->FIFO_credit[lender] += credits[i];
+ credits[i] = 0;
+ }
+ }
+ }
+
+ /* If we have more credits left over, these must belong to the AC */
+ if (credits[i] > 0) {
+ wlfc->FIFO_credit[i] += credits[i];
+ }
+ }
+ }
+
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+ (void)dhd;
+ (void)rssi;
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ int rc;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ uint8 existing_index;
+ uint8 table_index;
+ uint8 ifid;
+ uint8* ea;
+
+ WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+ __FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
+ ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+ WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+ table = wlfc->destination_entries.nodes;
+ table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+ ifid = value[1];
+ ea = &value[2];
+
+ if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+ existing_index = dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+ if (existing_index == WLFC_MAC_DESC_ID_INVALID) {
+ /* this MAC entry does not exist, create one */
+ if (!table[table_index].occupied) {
+ table[table_index].mac_handle = value[0];
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea);
+ }
+ else {
+ /* the space should have been empty, but it's not */
+ wlfc->stats.mac_update_failed++;
+ }
+ }
+ else {
+ /*
+ there is an existing entry, move it to new index
+ if necessary.
+ */
+ if (existing_index != table_index) {
+ /* if we already have an entry, free the old one */
+ table[existing_index].occupied = 0;
+ table[existing_index].state = WLFC_STATE_CLOSE;
+ table[existing_index].requested_credit = 0;
+ table[existing_index].interface_id = 0;
+ }
+ }
+ }
+ if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+ if (table[table_index].occupied) {
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea);
+ }
+ else {
+ /* the space should have been occupied, but it's not */
+ wlfc->stats.mac_update_failed++;
+ }
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ /* Handle PS on/off indication */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle = value[0];
+ int i;
+
+ table = wlfc->destination_entries.nodes;
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ /* a fresh PS mode should wipe old ps credits? */
+ desc->requested_credit = 0;
+ if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+ desc->state = WLFC_STATE_OPEN;
+ DHD_WLFC_CTRINC_MAC_OPEN(desc);
+ }
+ else {
+ desc->state = WLFC_STATE_CLOSE;
+ DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+ /*
+ Indicate to firmware if there is any traffic pending.
+ */
+ for (i = AC_BE; i < AC_COUNT; i++) {
+ _dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+ }
+ }
+ }
+ else {
+ wlfc->stats.psmode_update_failed++;
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ /* Handle PS on/off indication */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ uint8 if_id = value[0];
+
+ if (if_id < WLFC_MAX_IFNUM) {
+ table = wlfc->destination_entries.interfaces;
+ if (table[if_id].occupied) {
+ if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+ table[if_id].state = WLFC_STATE_OPEN;
+ /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+ }
+ else {
+ table[if_id].state = WLFC_STATE_CLOSE;
+ /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+ }
+ return BCME_OK;
+ }
+ }
+ wlfc->stats.interface_update_failed++;
+
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle;
+ uint8 credit;
+
+ table = wlfc->destination_entries.nodes;
+ mac_handle = value[1];
+ credit = value[0];
+
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ desc->requested_credit = credit;
+
+ desc->ac_bitmap = value[2];
+ }
+ else {
+ wlfc->stats.credit_request_failed++;
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle;
+ uint8 packet_count;
+
+ table = wlfc->destination_entries.nodes;
+ mac_handle = value[1];
+ packet_count = value[0];
+
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ desc->requested_packet = packet_count;
+
+ desc->ac_bitmap = value[2];
+ }
+ else {
+ wlfc->stats.packet_request_failed++;
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len)
+{
+ uint8 type, len;
+ uint8* value;
+ uint8* tmpbuf;
+ uint16 remainder = tlv_hdr_len;
+ uint16 processed = 0;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+ if (remainder) {
+ while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+ type = tmpbuf[processed];
+ if (type == WLFC_CTL_TYPE_FILLER) {
+ remainder -= 1;
+ processed += 1;
+ continue;
+ }
+
+ len = tmpbuf[processed + 1];
+ value = &tmpbuf[processed + 2];
+
+ if (remainder < (2 + len))
+ break;
+
+ remainder -= 2 + len;
+ processed += 2 + len;
+ if (type == WLFC_CTL_TYPE_TXSTATUS)
+ dhd_wlfc_txstatus_update(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
+ dhd_wlfc_fifocreditback_indicate(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_RSSI)
+ dhd_wlfc_rssi_indicate(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
+ dhd_wlfc_credit_request(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
+ dhd_wlfc_packet_request(dhd, value);
+
+ else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+ (type == WLFC_CTL_TYPE_MAC_CLOSE))
+ dhd_wlfc_psmode_update(dhd, value, type);
+
+ else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+ (type == WLFC_CTL_TYPE_MACDESC_DEL))
+ dhd_wlfc_mac_table_update(dhd, value, type);
+
+ else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+ (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+ dhd_wlfc_interface_update(dhd, value, type);
+ }
+ }
+ if (remainder != 0) {
+ /* trouble..., something is not right */
+ wlfc->stats.tlv_parse_failed++;
+ }
+ }
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+ char iovbuf[12]; /* Room for "tlv" + '\0' + parameter */
+ /* enable all signals & indicate host proptxstatus logic is active */
+ uint32 tlv = dhd->wlfc_enabled?
+ WLFC_FLAGS_RSSI_SIGNALS |
+ WLFC_FLAGS_XONXOFF_SIGNALS |
+ WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+ WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE : 0;
+
+
+ /*
+ try to enable/disable signaling by sending "tlv" iovar. if that fails,
+ fallback to no flow control? Print a message for now.
+ */
+
+ /* enable proptxtstatus signaling by default */
+ bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+ }
+ else {
+ /*
+ Leaving the message for now, it should be removed after a while; once
+ the tlv situation is stable.
+ */
+ DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+ dhd->wlfc_enabled?"enabled":"disabled", tlv));
+ }
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+ int i;
+ athost_wl_status_info_t* wlfc;
+
+ if (!dhd->wlfc_enabled || dhd->wlfc_state)
+ return BCME_OK;
+
+ /* allocate space to track txstatus propagated from firmware */
+ dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+ if (dhd->wlfc_state == NULL)
+ return BCME_NOMEM;
+
+ /* initialize state space */
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+ /* remember osh & dhdp */
+ wlfc->osh = dhd->osh;
+ wlfc->dhdp = dhd;
+
+ wlfc->hanger =
+ dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+ if (wlfc->hanger == NULL) {
+ MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ return BCME_NOMEM;
+ }
+
+ /* initialize all interfaces to accept traffic */
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ wlfc->hostif_flow_state[i] = OFF;
+ }
+
+ /*
+ create the SENDQ containing
+ sub-queues for all AC precedences + 1 for bc/mc traffic
+ */
+ pktq_init(&wlfc->SENDQ, (AC_COUNT + 1), WLFC_SENDQ_LEN);
+
+ wlfc->destination_entries.other.state = WLFC_STATE_OPEN;
+ /* bc/mc FIFO is always open [credit aside], i.e. b[5] */
+ wlfc->destination_entries.other.ac_bitmap = 0x1f;
+ wlfc->destination_entries.other.interface_id = 0;
+
+ wlfc->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+
+ wlfc->allow_credit_borrow = TRUE;
+ wlfc->borrow_defer_timestamp = 0;
+
+ return BCME_OK;
+}
+
+/* release all packet resources */
+void
+dhd_wlfc_cleanup(dhd_pub_t *dhd)
+{
+ int i;
+ int total_entries;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_hanger_t* h;
+
+ if (dhd->wlfc_state == NULL)
+ return;
+
+ total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+ /* search all entries, include nodes as well as interfaces */
+ table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+ for (i = 0; i < total_entries; i++) {
+ if (table[i].occupied) {
+ if (table[i].psq.len) {
+ WLFC_DBGMESG(("%s(): DELAYQ[%d].len = %d\n",
+ __FUNCTION__, i, table[i].psq.len));
+ /* release packets held in DELAYQ */
+ pktq_flush(wlfc->osh, &table[i].psq, TRUE, NULL, 0);
+ }
+ table[i].occupied = 0;
+ }
+ }
+ /* release packets held in SENDQ */
+ if (wlfc->SENDQ.len)
+ pktq_flush(wlfc->osh, &wlfc->SENDQ, TRUE, NULL, 0);
+ /* any in the hanger? */
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ PKTFREE(wlfc->osh, h->items[i].pkt, TRUE);
+ }
+ }
+ return;
+}
+
+void
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+ /* cleanup all psq related resources */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+
+ if (dhd->wlfc_state == NULL)
+ return;
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ int i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
+ __FUNCTION__, i, h->items[i].pkt,
+ DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+ }
+ }
+ }
+#endif
+ /* delete hanger */
+ dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+
+ /* free top structure */
+ MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ return;
+}
+#endif /* PROP_TXSTATUS */
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state)
+ dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif /* BDC */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ /* Push BDC header used to convey priority for buses that don't */
+
+ PKTPUSH(dhd->osh, pktbuf, BDC_HEADER_LEN);
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(pktbuf))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+ h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->dataOffset = 0;
+#endif /* BDC */
+ BDC_SET_IF_IDX(h, ifidx);
+}
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ /* Pop BDC header used to convey priority for buses that don't */
+
+ if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+ __FUNCTION__, *ifidx));
+ return BCME_ERROR;
+ }
+
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+ DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+ h->dataOffset = 0;
+ else
+ return BCME_ERROR;
+ }
+
+ if (h->flags & BDC_FLAG_SUM_GOOD) {
+ DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ PKTSETSUMGOOD(pktbuf, TRUE);
+ }
+
+ PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+ PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+ if (PKTLEN(dhd->osh, pktbuf) < (uint32) (h->dataOffset << 2)) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), (h->dataOffset * 4)));
+ return BCME_ERROR;
+ }
+
+#ifdef PROP_TXSTATUS
+ if (dhd->wlfc_state &&
+ ((athost_wl_status_info_t*)dhd->wlfc_state)->proptxstatus_mode
+ != WLFC_FCMODE_NONE &&
+ (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf)))) {
+ /*
+ - parse txstatus only for packets that came from the firmware
+ */
+ dhd_os_wlfc_block(dhd);
+ dhd_wlfc_parse_header_info(dhd, pktbuf, (h->dataOffset << 2));
+ ((athost_wl_status_info_t*)dhd->wlfc_state)->stats.dhd_hdrpulls++;
+ dhd_wlfc_commit_packets(dhd->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
+ (void *)dhd->bus);
+ dhd_os_wlfc_unblock(dhd);
+ }
+#endif /* PROP_TXSTATUS */
+ PKTPULL(dhd->osh, pktbuf, (h->dataOffset << 2));
+ return 0;
+}
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+ dhd_prot_t *cdc;
+
+ if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd->osh, DHD_PREALLOC_PROT,
+ sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(cdc, 0, sizeof(dhd_prot_t));
+
+ /* ensure that the msg buf directly follows the cdc msg struct */
+ if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+ DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+ goto fail;
+ }
+
+ dhd->prot = cdc;
+#ifdef BDC
+ dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+ dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+ return 0;
+
+fail:
+#ifndef DHD_USE_STATIC_BUF
+ if (cdc != NULL)
+ MFREE(dhd->osh, cdc, sizeof(dhd_prot_t));
+#endif
+ return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_deinit(dhd);
+#endif
+#ifndef DHD_USE_STATIC_BUF
+ MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif
+ dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+ /* No stats from dongle added yet, copy bus stats */
+ dhd->dstats.tx_packets = dhd->tx_packets;
+ dhd->dstats.tx_errors = dhd->tx_errors;
+ dhd->dstats.rx_packets = dhd->rx_packets;
+ dhd->dstats.rx_errors = dhd->rx_errors;
+ dhd->dstats.rx_dropped = dhd->rx_dropped;
+ dhd->dstats.multicast = dhd->rx_multicast;
+ return;
+}
+
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ wlc_rev_info_t revinfo;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+ if (ret < 0)
+ goto done;
+
+
+#ifdef PROP_TXSTATUS
+ ret = dhd_wlfc_init(dhd);
+#endif
+
+#if !defined(WL_CFG80211)
+ ret = dhd_preinit_ioctls(dhd);
+#endif /* WL_CFG80211 */
+
+ /* Always assumes wl for now */
+ dhd->iswl = TRUE;
+
+done:
+ return ret;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+ /* Nothing to do for CDC */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c
new file mode 100644
index 000000000000..372ec80c866a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_common.c
@@ -0,0 +1,2306 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_common.c 290546 2011-10-19 01:55:21Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <dhd.h>
+
+#include <proto/bcmevent.h>
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+#ifdef SET_RANDOM_MAC_SOFTAP
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#endif
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+int dhd_msg_level = DHD_ERROR_VAL;
+
+
+#include <wl_iw.h>
+
+char fw_path[MOD_PARAM_PATHLEN];
+char nv_path[MOD_PARAM_PATHLEN];
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+extern bool softap_enabled;
+#endif
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+
+
+#ifdef DHD_DEBUG
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on "
+ __DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+#endif
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+/* IOVar table */
+enum {
+ IOV_VERSION = 1,
+ IOV_MSGLEVEL,
+ IOV_BCMERRORSTR,
+ IOV_BCMERROR,
+ IOV_WDTICK,
+ IOV_DUMP,
+ IOV_CLEARCOUNTS,
+ IOV_LOGDUMP,
+ IOV_LOGCAL,
+ IOV_LOGSTAMP,
+ IOV_GPIOOB,
+ IOV_IOCTLTIMEOUT,
+ IOV_HCI_CMD, /* HCI command */
+ IOV_HCI_ACL_DATA, /* HCI data packet */
+#if defined(DHD_DEBUG)
+ IOV_CONS,
+ IOV_DCONSOLE_POLL,
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+ IOV_PROPTXSTATUS_ENABLE,
+ IOV_PROPTXSTATUS_MODE,
+#endif
+ IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+ IOV_WLPKTDLYSTAT_SZ,
+#endif
+ IOV_CHANGEMTU,
+ IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+ {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+ {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+ {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN },
+ {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 },
+ {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 },
+ {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+ {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 },
+ {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 },
+#endif
+ {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 },
+ {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 },
+ {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 },
+ {"HCI_cmd", IOV_HCI_CMD, 0, IOVT_BUFFER, 0},
+ {"HCI_ACL_data", IOV_HCI_ACL_DATA, 0, IOVT_BUFFER, 0},
+#ifdef PROP_TXSTATUS
+ {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_UINT32, 0 },
+ /*
+ set the proptxtstatus operation mode:
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ */
+ {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 },
+#endif
+ {"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+ {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 },
+#endif
+ {"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+struct dhd_cmn *
+dhd_common_init(uint16 devid, osl_t *osh)
+{
+ dhd_cmn_t *cmn;
+
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver. Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behavior since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent initializations.
+ */
+ /* Allocate private bus interface state */
+ if (!(cmn = MALLOC(osh, sizeof(dhd_cmn_t)))) {
+ DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
+ return NULL;
+ }
+ memset(cmn, 0, sizeof(dhd_cmn_t));
+ cmn->osh = osh;
+
+#ifdef CONFIG_BCMDHD_FW_PATH
+ bcm_strncpy_s(fw_path, sizeof(fw_path), CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);
+#elif defined(CONFIG_BCMDHD_FW_DIR) /* CONFIG_BCMDHD_FW_PATH */
+ sprintf(fw_path, "%s/bcm%x/fw_bcmdhd.bin", CONFIG_BCMDHD_FW_DIR, devid);
+#else
+ fw_path[0] = '\0';
+#endif /* CONFIG_BCMDHD_FW_DIR */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+ bcm_strncpy_s(nv_path, sizeof(nv_path), CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
+#elif defined(CONFIG_BCMDHD_NVRAM_DIR) /* CONFIG_BCMDHD_NVRAM_PATH */
+ sprintf(nv_path, "%s/nvram_%x.txt", CONFIG_BCMDHD_NVRAM_DIR, devid);
+#else
+ nv_path[0] = '\0';
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+#ifdef SOFTAP
+ fw_path2[0] = '\0';
+#endif
+ DHD_ERROR(("bcmdhd: fw_path: %s nvram_path: %s\n", fw_path, nv_path));
+ return cmn;
+}
+
+void
+dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn)
+{
+ osl_t *osh;
+ dhd_cmn_t *cmn;
+
+ if (dhd_pub != NULL)
+ cmn = dhd_pub->cmn;
+ else
+ cmn = sa_cmn;
+
+ if (!cmn)
+ return;
+
+ osh = cmn->osh;
+
+ if (dhd_pub != NULL)
+ dhd_pub->cmn = NULL;
+ MFREE(osh, cmn, sizeof(dhd_cmn_t));
+}
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+ char eabuf[ETHER_ADDR_STR_LEN];
+
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+
+ bcm_binit(strbuf, buf, buflen);
+
+ /* Base DHD info */
+ bcm_bprintf(strbuf, "%s\n", dhd_version);
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+ dhdp->up, dhdp->txoff, dhdp->busstate);
+ bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
+ dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+ bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+ dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+ bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt);
+
+ bcm_bprintf(strbuf, "dongle stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
+ dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+ dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+ bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
+ dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+ dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+ bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast);
+
+ bcm_bprintf(strbuf, "bus stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
+ dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
+ bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
+ dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+ bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n",
+ dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+ bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld\n",
+ dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+ bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld\n",
+ dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any prot info */
+ dhd_prot_dump(dhdp, strbuf);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any bus info */
+ dhd_bus_dump(dhdp, strbuf);
+
+ return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex)
+{
+ wl_ioctl_t ioc;
+
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+
+ return dhd_wl_ioctl(dhd_pub, ifindex, &ioc, arg, len);
+}
+
+
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len)
+{
+ int ret;
+
+ dhd_os_proto_block(dhd_pub);
+
+ ret = dhd_prot_ioctl(dhd_pub, ifindex, ioc, buf, len);
+ if (!ret)
+ dhd_os_check_hang(dhd_pub, ifindex, ret);
+
+ dhd_os_proto_unblock(dhd_pub);
+ return ret;
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_VERSION):
+ /* Need to have checked buffer length */
+ bcm_strncpy_s((char*)arg, len, dhd_version, len);
+ break;
+
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)dhd_msg_level;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ dhd_msg_level = int_val;
+ break;
+ case IOV_GVAL(IOV_BCMERRORSTR):
+ bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+ ((char *)arg)[BCME_STRLEN - 1] = 0x00;
+ break;
+
+ case IOV_GVAL(IOV_BCMERROR):
+ int_val = (int32)dhd_pub->bcmerror;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_WDTICK):
+ int_val = (int32)dhd_watchdog_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WDTICK):
+ if (!dhd_pub->up) {
+ bcmerror = BCME_NOTUP;
+ break;
+ }
+ dhd_os_wd_timer(dhd_pub, (uint)int_val);
+ break;
+
+ case IOV_GVAL(IOV_DUMP):
+ bcmerror = dhd_dump(dhd_pub, arg, len);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_DCONSOLE_POLL):
+ int_val = (int32)dhd_console_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DCONSOLE_POLL):
+ dhd_console_ms = (uint)int_val;
+ break;
+
+ case IOV_SVAL(IOV_CONS):
+ if (len > 0)
+ bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+ break;
+#endif /* DHD_DEBUG */
+
+ case IOV_SVAL(IOV_CLEARCOUNTS):
+ dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+ dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+ dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+ dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+ dhd_pub->rx_dropped = 0;
+ dhd_pub->rx_readahead_cnt = 0;
+ dhd_pub->tx_realloc = 0;
+ dhd_pub->wd_dpc_sched = 0;
+ memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+ dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+ /* clear proptxstatus related counters */
+ if (dhd_pub->wlfc_state) {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ wlfc_hanger_t* hanger;
+
+ memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+ hanger = (wlfc_hanger_t*)wlfc->hanger;
+ hanger->pushed = 0;
+ hanger->popped = 0;
+ hanger->failed_slotfind = 0;
+ hanger->failed_to_pop = 0;
+ hanger->failed_to_push = 0;
+ }
+#endif /* PROP_TXSTATUS */
+ break;
+
+
+ case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+ int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+ if (int_val <= 0)
+ bcmerror = BCME_BADARG;
+ else
+ dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+ break;
+ }
+
+ case IOV_SVAL(IOV_HCI_CMD): {
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg;
+
+ /* sanity check: command preamble present */
+ if (len < HCI_CMD_PREAMBLE_SIZE)
+ return BCME_BUFTOOSHORT;
+
+ /* sanity check: command parameters are present */
+ if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen))
+ return BCME_BUFTOOSHORT;
+
+ dhd_bta_docmd(dhd_pub, cmd, len);
+ break;
+ }
+
+ case IOV_SVAL(IOV_HCI_ACL_DATA): {
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg;
+
+ /* sanity check: HCI header present */
+ if (len < HCI_ACL_DATA_PREAMBLE_SIZE)
+ return BCME_BUFTOOSHORT;
+
+ /* sanity check: ACL data is present */
+ if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen))
+ return BCME_BUFTOOSHORT;
+
+ dhd_bta_tx_hcidata(dhd_pub, ACL_data, len);
+ break;
+ }
+
+#ifdef PROP_TXSTATUS
+ case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE):
+ int_val = dhd_pub->wlfc_enabled? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE):
+ dhd_pub->wlfc_enabled = int_val? 1 : 0;
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_MODE): {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ int_val = dhd_pub->wlfc_state ? (int32)wlfc->proptxstatus_mode : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+ if (dhd_pub->wlfc_state) {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ wlfc->proptxstatus_mode = int_val & 0xff;
+ }
+ break;
+#endif /* PROP_TXSTATUS */
+
+ case IOV_GVAL(IOV_BUS_TYPE):
+ /* The dhd application queries the driver to check if its usb or sdio. */
+#ifdef BCMDHDUSB
+ int_val = BUS_TYPE_USB;
+#endif
+ int_val = BUS_TYPE_SDIO;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+
+#ifdef WLMEDIA_HTSF
+ case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+ int_val = dhd_pub->htsfdlystat_sz;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+ dhd_pub->htsfdlystat_sz = int_val & 0xff;
+ printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+ break;
+#endif
+ case IOV_SVAL(IOV_CHANGEMTU):
+ int_val &= 0xffff;
+ bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+ return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+ /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+ * because an encryption/rsn mismatch results in both events, and
+ * the important information is in the WLC_E_PRUNE.
+ */
+ if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+ dhd_conn_event == WLC_E_PRUNE)) {
+ dhd_conn_event = event;
+ dhd_conn_status = status;
+ dhd_conn_reason = reason;
+ }
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+ void *p;
+ int eprec = -1; /* precedence to evict from */
+ bool discard_oldest;
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+ pktq_penq(q, prec, pkt);
+ return TRUE;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(q, prec))
+ eprec = prec;
+ else if (pktq_full(q)) {
+ p = pktq_peek_tail(q, &eprec);
+ ASSERT(p);
+ if (eprec > prec || eprec < 0)
+ return FALSE;
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ ASSERT(!pktq_pempty(q, eprec));
+ discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+ if (eprec == prec && !discard_oldest)
+ return FALSE; /* refuse newer (incoming) packet */
+ /* Evict packet according to discard policy */
+ p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+ ASSERT(p);
+
+ PKTFREE(dhdp->osh, p, TRUE);
+ }
+
+ /* Enqueue */
+ p = pktq_penq(q, prec, pkt);
+ ASSERT(p);
+
+ return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+ int val_size;
+ const bcm_iovar_t *vi = NULL;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+ bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
+{
+ int bcmerror = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!buf) {
+ return BCME_BADARG;
+ }
+
+ switch (ioc->cmd) {
+ case DHD_GET_MAGIC:
+ if (buflen < sizeof(int))
+ bcmerror = BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_MAGIC;
+ break;
+
+ case DHD_GET_VERSION:
+ if (buflen < sizeof(int))
+ bcmerror = -BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_VERSION;
+ break;
+
+ case DHD_GET_VAR:
+ case DHD_SET_VAR: {
+ char *arg;
+ uint arglen;
+
+ /* scan past the name to any arguments */
+ for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+ ;
+
+ if (*arg) {
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+
+ /* account for the NUL terminator */
+ arg++, arglen--;
+
+ /* call with the appropriate arguments */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+ buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+ if (bcmerror != BCME_UNSUPPORTED)
+ break;
+
+ /* not in generic table, try protocol module */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+ arglen, buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ if (bcmerror != BCME_UNSUPPORTED)
+ break;
+
+ /* if still not found, try bus module */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ arg, arglen, buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ }
+
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+static void
+wl_show_host_event(wl_event_msg_t *event, void *event_data)
+{
+ uint i, status, reason;
+ bool group = FALSE, flush_txq = FALSE, link = FALSE;
+ const char *auth_str;
+ const char *event_name;
+ uchar *buf;
+ char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+ uint event_type, flags, auth_type, datalen;
+
+ event_type = ntoh32(event->event_type);
+ flags = ntoh16(event->flags);
+ status = ntoh32(event->status);
+ reason = ntoh32(event->reason);
+ auth_type = ntoh32(event->auth_type);
+ datalen = ntoh32(event->datalen);
+
+ /* debug dump of event messages */
+ sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ (uchar)event->addr.octet[0]&0xff,
+ (uchar)event->addr.octet[1]&0xff,
+ (uchar)event->addr.octet[2]&0xff,
+ (uchar)event->addr.octet[3]&0xff,
+ (uchar)event->addr.octet[4]&0xff,
+ (uchar)event->addr.octet[5]&0xff);
+
+ event_name = "UNKNOWN";
+ for (i = 0; i < (uint)bcmevent_names_size; i++)
+ if (bcmevent_names[i].event == event_type)
+ event_name = bcmevent_names[i].name;
+
+ if (flags & WLC_EVENT_MSG_LINK)
+ link = TRUE;
+ if (flags & WLC_EVENT_MSG_GROUP)
+ group = TRUE;
+ if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+ flush_txq = TRUE;
+
+ switch (event_type) {
+ case WLC_E_START:
+ case WLC_E_DEAUTH:
+ case WLC_E_DISASSOC:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_ASSOC:
+ case WLC_E_REASSOC:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+ event_name, eabuf, (int)reason));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+ event_name, eabuf, (int)status));
+ }
+ break;
+
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+ break;
+
+ case WLC_E_AUTH:
+ case WLC_E_AUTH_IND:
+ if (auth_type == DOT11_OPEN_SYSTEM)
+ auth_str = "Open System";
+ else if (auth_type == DOT11_SHARED_KEY)
+ auth_str = "Shared Key";
+ else {
+ sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
+ auth_str = err_msg;
+ }
+ if (event_type == WLC_E_AUTH_IND) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+ event_name, eabuf, auth_str, (int)reason));
+ }
+
+ break;
+
+ case WLC_E_JOIN:
+ case WLC_E_ROAM:
+ case WLC_E_SET_SSID:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+ } else if (status == WLC_E_STATUS_NO_NETWORKS) {
+ DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+ event_name, (int)status));
+ }
+ break;
+
+ case WLC_E_BEACON_RX:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+ }
+ break;
+
+ case WLC_E_LINK:
+ DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+ break;
+
+ case WLC_E_MIC_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+ event_name, eabuf, group, flush_txq));
+ break;
+
+ case WLC_E_ICV_ERROR:
+ case WLC_E_UNICAST_DECODE_ERROR:
+ case WLC_E_MULTICAST_DECODE_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+ event_name, eabuf));
+ break;
+
+ case WLC_E_TXFAIL:
+ DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_SCAN_COMPLETE:
+ case WLC_E_PMKID_CACHE:
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PFN_NET_FOUND:
+ case WLC_E_PFN_NET_LOST:
+ case WLC_E_PFN_SCAN_COMPLETE:
+ case WLC_E_PFN_SCAN_NONE:
+ case WLC_E_PFN_SCAN_ALLGONE:
+ DHD_EVENT(("PNOEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PSK_SUP:
+ case WLC_E_PRUNE:
+ DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+ event_name, (int)status, (int)reason));
+ break;
+
+#ifdef WIFI_ACT_FRAME
+ case WLC_E_ACTION_FRAME:
+ DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+ break;
+#endif /* WIFI_ACT_FRAME */
+
+ case WLC_E_TRACE: {
+ static uint32 seqnum_prev = 0;
+ msgtrace_hdr_t hdr;
+ uint32 nblost;
+ char *s, *p;
+
+ buf = (uchar *) event_data;
+ memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+ if (hdr.version != MSGTRACE_VERSION) {
+ printf("\nMACEVENT: %s [unsupported version --> "
+ "dhd version:%d dongle version:%d]\n",
+ event_name, MSGTRACE_VERSION, hdr.version);
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+ }
+
+ /* There are 2 bytes available at the end of data */
+ buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+ if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+ printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
+ "discarded_bytes %d discarded_printf %d]\n",
+ ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
+ }
+
+ nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+ if (nblost > 0) {
+ printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
+ ntoh32(hdr.seqnum), nblost);
+ }
+ seqnum_prev = ntoh32(hdr.seqnum);
+
+ /* Display the trace buffer. Advance from \n to \n to avoid display big
+ * printf (issue with Linux printk )
+ */
+ p = (char *)&buf[MSGTRACE_HDRLEN];
+ while ((s = strstr(p, "\n")) != NULL) {
+ *s = '\0';
+ printf("%s\n", p);
+ p = s+1;
+ }
+ printf("%s\n", p);
+
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+ }
+
+
+ case WLC_E_RSSI:
+ DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+ break;
+
+ default:
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type));
+ break;
+ }
+
+ /* show any appended data */
+ if (datalen) {
+ buf = (uchar *) event_data;
+ DHD_EVENT((" data (%d) : ", datalen));
+ for (i = 0; i < datalen; i++)
+ DHD_EVENT((" 0x%02x ", *buf++));
+ DHD_EVENT(("\n"));
+ }
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data_ptr)
+{
+ /* check whether packet is a BRCM event pkt */
+ bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+ uint8 *event_data;
+ uint32 type, status, reason, datalen;
+ uint16 flags;
+ int evlen;
+
+ if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+ DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+ return (BCME_ERROR);
+ }
+
+ /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+ if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+ DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+ return (BCME_ERROR);
+ }
+
+ *data_ptr = &pvt_data[1];
+ event_data = *data_ptr;
+
+ /* memcpy since BRCM event pkt may be unaligned. */
+ memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+ type = ntoh32_ua((void *)&event->event_type);
+ flags = ntoh16_ua((void *)&event->flags);
+ status = ntoh32_ua((void *)&event->status);
+ reason = ntoh32_ua((void *)&event->reason);
+ datalen = ntoh32_ua((void *)&event->datalen);
+ evlen = datalen + sizeof(bcm_event_t);
+
+ switch (type) {
+#ifdef PROP_TXSTATUS
+ case WLC_E_FIFO_CREDIT_MAP:
+ dhd_wlfc_event(dhd_pub->info);
+ dhd_wlfc_FIFOcreditmap_event(dhd_pub->info, event_data);
+ WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+ "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+ event_data[2],
+ event_data[3], event_data[4], event_data[5]));
+ break;
+#endif
+
+ case WLC_E_IF:
+ {
+ dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data;
+#ifdef PROP_TXSTATUS
+ {
+ uint8* ea = pvt_data->eth.ether_dhost;
+ WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+ "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+ ifevent->ifidx,
+ ((ifevent->action == WLC_E_IF_ADD) ? "ADD":"DEL"),
+ ((ifevent->is_AP == 0) ? "STA":"AP "),
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+ (void)ea;
+
+ dhd_wlfc_interface_event(dhd_pub->info,
+ ((ifevent->action == WLC_E_IF_ADD) ?
+ eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+ ifevent->ifidx, ifevent->is_AP, ea);
+
+ /* dhd already has created an interface by default, for 0 */
+ if (ifevent->ifidx == 0)
+ break;
+ }
+#endif /* PROP_TXSTATUS */
+
+#ifdef WL_CFG80211
+ if (wl_cfg80211_is_progress_ifchange()) {
+ DHD_ERROR(("%s: ifidx %d for %s action %d\n",
+ __FUNCTION__, ifevent->ifidx,
+ event->ifname, ifevent->action));
+ if (ifevent->action == WLC_E_IF_ADD)
+ wl_cfg80211_notify_ifchange();
+ return (BCME_OK);
+ }
+#endif /* WL_CFG80211 */
+ if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+ if (ifevent->action == WLC_E_IF_ADD) {
+ if (dhd_add_if(dhd_pub->info, ifevent->ifidx,
+ NULL, event->ifname,
+ event->addr.octet,
+ ifevent->flags, ifevent->bssidx)) {
+ DHD_ERROR(("%s: dhd_add_if failed!!"
+ " ifidx: %d for %s\n",
+ __FUNCTION__,
+ ifevent->ifidx,
+ event->ifname));
+ return (BCME_ERROR);
+ }
+ }
+ else
+ dhd_del_if(dhd_pub->info, ifevent->ifidx);
+ } else {
+#ifndef PROP_TXSTATUS
+ DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+ __FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS */
+ }
+ }
+ /* send up the if event: btamp user needs it */
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+ break;
+
+
+#ifdef WLMEDIA_HTSF
+ case WLC_E_HTSFSYNC:
+ htsf_update(dhd_pub->info, event_data);
+ break;
+#endif /* WLMEDIA_HTSF */
+ case WLC_E_NDIS_LINK: {
+ uint32 temp = hton32(WLC_E_LINK);
+
+ memcpy((void *)(&pvt_data->event.event_type), &temp,
+ sizeof(pvt_data->event.event_type));
+ }
+ /* These are what external supplicant/authenticator wants */
+ /* fall through */
+ case WLC_E_LINK:
+ case WLC_E_DEAUTH:
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC:
+ case WLC_E_DISASSOC_IND:
+ DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
+ /* fall through */
+ default:
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+ DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
+
+ /* put it back to WLC_E_NDIS_LINK */
+ if (type == WLC_E_NDIS_LINK) {
+ uint32 temp;
+
+ temp = ntoh32_ua((void *)&event->event_type);
+ DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
+
+ temp = ntoh32(WLC_E_NDIS_LINK);
+ memcpy((void *)(&pvt_data->event.event_type), &temp,
+ sizeof(pvt_data->event.event_type));
+ }
+ break;
+ }
+
+#ifdef SHOW_EVENTS
+ wl_show_host_event(event, (void *)event_data);
+#endif /* SHOW_EVENTS */
+
+ return (BCME_OK);
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = ntoh32(evt->event_type);
+ evt->flags = ntoh16(evt->flags);
+ evt->status = ntoh32(evt->status);
+ evt->reason = ntoh32(evt->reason);
+ evt->auth_type = ntoh32(evt->auth_type);
+ evt->datalen = ntoh32(evt->datalen);
+ evt->version = ntoh16(evt->version);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+ int i, j = 0;
+ unsigned char *buf = pbuf;
+
+ if (bytes_per_line == 0) {
+ bytes_per_line = len;
+ }
+
+ for (i = 0; i < len; i++) {
+ printf("%2.2x", *buf++);
+ j++;
+ if (j == bytes_per_line) {
+ printf("\n");
+ j = 0;
+ } else {
+ printf(":");
+ }
+ }
+ printf("\n");
+#endif /* DHD_DEBUG */
+}
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 &&
+ strncmp(src, "0X", 2) != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[3];
+ bcm_strncpy_s(num, sizeof(num), src, 2);
+ num[2] = '\0';
+ dst[i] = (uint8)strtoul(num, NULL, 16);
+ src += 2;
+ }
+ return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+ char *argv[8];
+ int i = 0;
+ const char *str;
+ int buf_len;
+ int str_len;
+ char *arg_save = 0, *arg_org = 0;
+ int rc;
+ char buf[128];
+ wl_pkt_filter_enable_t enable_parm;
+ wl_pkt_filter_enable_t * pkt_filterp;
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ arg_org = arg_save;
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (argv[i] == NULL) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_enable";
+ str_len = strlen(str);
+ bcm_strncpy_s(buf, sizeof(buf), str, str_len);
+ buf[str_len] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+ /* Parse enable/disable value. */
+ enable_parm.enable = htod32(enable);
+
+ buf_len += sizeof(enable_parm);
+ memcpy((char *)pkt_filterp,
+ &enable_parm,
+ sizeof(enable_parm));
+
+ /* Enable/disable the specified filter. */
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+ /* Contorl the master mode */
+ bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+ const char *str;
+ wl_pkt_filter_t pkt_filter;
+ wl_pkt_filter_t *pkt_filterp;
+ int buf_len;
+ int str_len;
+ int rc;
+ uint32 mask_size;
+ uint32 pattern_size;
+ char *argv[8], * buf = 0;
+ int i = 0;
+ char *arg_save = 0, *arg_org = 0;
+#define BUF_SIZE 2048
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ arg_org = arg_save;
+
+ if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ if (strlen(arg) > BUF_SIZE) {
+ DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+ goto fail;
+ }
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+ while (argv[i++])
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (argv[i] == NULL) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_add";
+ str_len = strlen(str);
+ bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
+ buf[ str_len ] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Polarity not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter polarity. */
+ pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Filter type not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter type. */
+ pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Offset not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter offset. */
+ pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Bitmask not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter mask. */
+ mask_size =
+ htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Pattern not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter pattern. */
+ pattern_size =
+ htod32(wl_pattern_atoh(argv[i],
+ (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+ if (mask_size != pattern_size) {
+ DHD_ERROR(("Mask and pattern not the same size\n"));
+ goto fail;
+ }
+
+ pkt_filter.u.pattern.size_bytes = mask_size;
+ buf_len += WL_PKT_FILTER_FIXED_LEN;
+ buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+ /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
+ ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+ ** guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)pkt_filterp,
+ &pkt_filter,
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+ if (buf)
+ MFREE(dhd->osh, buf, BUF_SIZE);
+}
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+ char iovbuf[32];
+ int retcode;
+
+ bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+ __FUNCTION__, arp_mode, retcode));
+ else
+ DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+ __FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+ char iovbuf[32];
+ int retcode;
+
+ bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+ __FUNCTION__, arp_enable, retcode));
+ else
+ DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+ __FUNCTION__, arp_enable));
+}
+
+void
+dhd_aoe_arp_clr(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ int iov_len = 0;
+ char iovbuf[128];
+
+ if (dhd == NULL) return;
+
+ iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_aoe_hostip_clr(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ int iov_len = 0;
+ char iovbuf[128];
+
+ if (dhd == NULL) return;
+
+ iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0)) < 0)
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr)
+{
+ int iov_len = 0;
+ char iovbuf[32];
+ int retcode;
+
+ iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+
+ if (retcode)
+ DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+ __FUNCTION__, retcode));
+ else
+ DHD_TRACE(("%s: sARP H ipaddr entry added \n",
+ __FUNCTION__));
+}
+
+int
+dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen)
+{
+ int retcode, i;
+ int iov_len = 0;
+ uint32 *ptr32 = buf;
+ bool clr_bottom = FALSE;
+
+ if (!buf)
+ return -1;
+
+ iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, TRUE, 0);
+
+ if (retcode) {
+ DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+ __FUNCTION__, retcode));
+
+ return -1;
+ }
+
+ /* clean up the buf, ascii reminder */
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (!clr_bottom) {
+ if (*ptr32 == 0)
+ clr_bottom = TRUE;
+ } else {
+ *ptr32 = 0;
+ }
+ ptr32++;
+ }
+
+ return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* send up locally generated event */
+void
+dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+ switch (ntoh32(event->event_type)) {
+ case WLC_E_BTA_HCI_EVENT:
+ break;
+ default:
+ break;
+ }
+
+ /* Call per-port handler. */
+ dhd_sendup_event(dhdp, event, data);
+}
+
+#ifdef SIMPLE_ISCAN
+
+uint iscan_thread_id = 0;
+iscan_buf_t * iscan_chain = 0;
+
+iscan_buf_t *
+dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
+{
+ iscan_buf_t *iscanbuf_alloc = 0;
+ iscan_buf_t *iscanbuf_head;
+
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+ dhd_iscan_lock();
+
+ iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
+ if (iscanbuf_alloc == NULL)
+ goto fail;
+
+ iscanbuf_alloc->next = NULL;
+ iscanbuf_head = *iscanbuf;
+
+ DHD_ISCAN(("%s: addr of allocated node = 0x%X"
+ "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
+ __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
+
+ if (iscanbuf_head == NULL) {
+ *iscanbuf = iscanbuf_alloc;
+ DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
+ goto fail;
+ }
+
+ while (iscanbuf_head->next)
+ iscanbuf_head = iscanbuf_head->next;
+
+ iscanbuf_head->next = iscanbuf_alloc;
+
+fail:
+ dhd_iscan_unlock();
+ return iscanbuf_alloc;
+}
+
+void
+dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
+{
+ iscan_buf_t *iscanbuf_free = 0;
+ iscan_buf_t *iscanbuf_prv = 0;
+ iscan_buf_t *iscanbuf_cur;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+
+ dhd_iscan_lock();
+
+ iscanbuf_cur = iscan_chain;
+
+ /* If iscan_delete is null then delete the entire
+ * chain or else delete specific one provided
+ */
+ if (!iscan_delete) {
+ while (iscanbuf_cur) {
+ iscanbuf_free = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ iscanbuf_free->next = 0;
+ MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
+ }
+ iscan_chain = 0;
+ } else {
+ while (iscanbuf_cur) {
+ if (iscanbuf_cur == iscan_delete)
+ break;
+ iscanbuf_prv = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ }
+ if (iscanbuf_prv)
+ iscanbuf_prv->next = iscan_delete->next;
+
+ iscan_delete->next = 0;
+ MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
+
+ if (!iscanbuf_prv)
+ iscan_chain = 0;
+ }
+ dhd_iscan_unlock();
+}
+
+iscan_buf_t *
+dhd_iscan_result_buf(void)
+{
+ return iscan_chain;
+}
+
+int
+dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
+{
+ int rc = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ char *buf;
+ char iovar[] = "iscan";
+ uint32 allocSize = 0;
+ wl_ioctl_t ioctl;
+
+ if (pParams) {
+ allocSize = (size + strlen(iovar) + 1);
+ if ((allocSize < size) || (allocSize < strlen(iovar)))
+ {
+ DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
+ __FUNCTION__, allocSize, size, strlen(iovar)));
+ goto cleanUp;
+ }
+ buf = MALLOC(dhd->osh, allocSize);
+
+ if (buf == NULL)
+ {
+ DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
+ goto cleanUp;
+ }
+ ioctl.cmd = WLC_SET_VAR;
+ bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
+ rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, allocSize);
+ }
+
+cleanUp:
+ if (buf) {
+ MFREE(dhd->osh, buf, allocSize);
+ }
+
+ return rc;
+}
+
+static int
+dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
+{
+ wl_iscan_results_t *list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ iscan_buf_t *iscan_cur;
+ int status = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ int rc;
+ wl_ioctl_t ioctl;
+
+ DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
+
+ iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
+ if (!iscan_cur) {
+ DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
+ dhd_iscan_free_buf(dhdp, 0);
+ dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
+ dhd_ind_scan_confirm(dhdp, FALSE);
+ goto fail;
+ }
+
+ dhd_iscan_lock();
+
+ memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
+ iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+ ioctl.cmd = WLC_GET_VAR;
+ ioctl.set = FALSE;
+ rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ *scan_count = results->count = dtoh32(results->count);
+ status = dtoh32(list_buf->status);
+ DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
+
+ dhd_iscan_unlock();
+
+ if (!(*scan_count)) {
+ /* TODO: race condition when FLUSH already called */
+ dhd_iscan_free_buf(dhdp, 0);
+ }
+fail:
+ return status;
+}
+
+#endif /* SIMPLE_ISCAN */
+
+/*
+ * returns = TRUE if associated, FALSE if not associated
+ */
+bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf)
+{
+ char bssid[6], zbuf[6];
+ int ret = -1;
+
+ bzero(bssid, 6);
+ bzero(zbuf, 6);
+
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0);
+ DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
+
+ if (ret == BCME_NOTASSOCIATED) {
+ DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
+ }
+
+ if (ret < 0)
+ return FALSE;
+
+ if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) {
+ /* STA is assocoated BSSID is non zero */
+
+ if (bss_buf) {
+ /* return bss if caller provided buf */
+ memcpy(bss_buf, bssid, ETHER_ADDR_LEN);
+ }
+ return TRUE;
+ } else {
+ DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
+ return FALSE;
+ }
+}
+
+
+/* Function to estimate possible DTIM_SKIP value */
+int
+dhd_get_dtim_skip(dhd_pub_t *dhd)
+{
+ int bcn_li_dtim;
+ int ret = -1;
+ int dtim_assoc = 0;
+
+ if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1))
+ bcn_li_dtim = 3;
+ else
+ bcn_li_dtim = dhd->dtim_skip;
+
+ /* Check if associated */
+ if (dhd_is_associated(dhd, NULL) == FALSE) {
+ DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ /* if assoc grab ap's dtim value */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+ &dtim_assoc, sizeof(dtim_assoc), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+ __FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL));
+
+ /* if not assocated just eixt */
+ if (dtim_assoc == 0) {
+ goto exit;
+ }
+
+ /* check if sta listen interval fits into AP dtim */
+ if (dtim_assoc > LISTEN_INTERVAL) {
+ /* AP DTIM to big for our Listen Interval : no dtim skiping */
+ bcn_li_dtim = 1;
+ DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+ __FUNCTION__, dtim_assoc, LISTEN_INTERVAL));
+ goto exit;
+ }
+
+ if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) {
+ /* Round up dtim_skip to fit into STAs Listen Interval */
+ bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc);
+ DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+ }
+
+exit:
+ return bcn_li_dtim;
+}
+
+/* Check if HostAPD or WFD mode setup */
+bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd)
+{
+#ifdef WL_CFG80211
+ if (((dhd->op_mode & HOSTAPD_MASK) == HOSTAPD_MASK) ||
+ ((dhd->op_mode & WFD_MASK) == WFD_MASK))
+ return TRUE;
+ else
+#endif /* WL_CFG80211 */
+ return FALSE;
+}
+
+#ifdef PNO_SUPPORT
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+ char iovbuf[128];
+ int pfn_enabled = 0;
+ int iov_len = 0;
+ int ret;
+
+ /* Disable pfn */
+ iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) >= 0) {
+ /* clear pfn */
+ iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
+ if (iov_len) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ iov_len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ }
+ }
+ else {
+ ret = -1;
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len));
+ }
+ }
+ else
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
+{
+ char iovbuf[128];
+ int ret = -1;
+
+ if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ return ret;
+ }
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (ret);
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+
+ if ((pfn_enabled) && (dhd_is_associated(dhd, NULL) == TRUE)) {
+ DHD_ERROR(("%s pno is NOT enable : called in assoc mode , ignore\n", __FUNCTION__));
+ return ret;
+ }
+
+ /* Enable/disable PNO */
+ if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ else {
+ dhd->pno_enable = pfn_enabled;
+ DHD_TRACE(("%s set pno as %s\n",
+ __FUNCTION__, dhd->pno_enable ? "Enable" : "Disable"));
+ }
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+/* Function to execute combined scan */
+int
+dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr,
+ int pno_repeat, int pno_freq_expo_max)
+{
+ int err = -1;
+ char iovbuf[128];
+ int k, i;
+ wl_pfn_param_t pfn_param;
+ wl_pfn_t pfn_element;
+ uint len = 0;
+
+ DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr));
+
+ if ((!dhd) && (!ssids_local)) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ err = -1;
+ }
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (err);
+
+ /* Check for broadcast ssid */
+ for (k = 0; k < nssid; k++) {
+ if (!ssids_local[k].SSID_len) {
+ DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k));
+ return err;
+ }
+ }
+/* #define PNO_DUMP 1 */
+#ifdef PNO_DUMP
+ {
+ int j;
+ for (j = 0; j < nssid; j++) {
+ DHD_ERROR(("%d: scan for %s size =%d\n", j,
+ ssids_local[j].SSID, ssids_local[j].SSID_len));
+ }
+ }
+#endif /* PNO_DUMP */
+
+ /* clean up everything */
+ if ((err = dhd_pno_clean(dhd)) < 0) {
+ DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err));
+ return err;
+ }
+ memset(iovbuf, 0, sizeof(iovbuf));
+ memset(&pfn_param, 0, sizeof(pfn_param));
+ memset(&pfn_element, 0, sizeof(pfn_element));
+
+ /* set pfn parameters */
+ pfn_param.version = htod32(PFN_VERSION);
+ pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT));
+
+ /* check and set extra pno params */
+ if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) {
+ pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+ pfn_param.repeat = (uchar) (pno_repeat);
+ pfn_param.exp = (uchar) (pno_freq_expo_max);
+ }
+ /* set up pno scan fr */
+ if (scan_fr != 0)
+ pfn_param.scan_freq = htod32(scan_fr);
+
+ if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) {
+ DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC));
+ return err;
+ }
+ if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) {
+ DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+ return err;
+ }
+
+ len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
+ if ((err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s pfn_set failed for error=%d\n",
+ __FUNCTION__, err));
+ return err;
+ }
+
+ /* set all pfn ssid */
+ for (i = 0; i < nssid; i++) {
+
+ pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+ pfn_element.auth = (DOT11_OPEN_SYSTEM);
+ pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
+ pfn_element.wsec = htod32(0);
+ pfn_element.infra = htod32(1);
+ pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+ memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len);
+ pfn_element.ssid.SSID_len = ssids_local[i].SSID_len;
+
+ if ((len =
+ bcm_mkiovar("pfn_add", (char *)&pfn_element,
+ sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) {
+ if ((err =
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed for i=%d error=%d\n",
+ __FUNCTION__, i, err));
+ return err;
+ }
+ else
+ DHD_TRACE(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n",
+ __FUNCTION__, pfn_param.scan_freq,
+ pfn_param.repeat, pfn_param.exp));
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err));
+ }
+
+ /* Enable PNO */
+ /* dhd_pno_enable(dhd, 1); */
+ return err;
+}
+
+int
+dhd_pno_get_status(dhd_pub_t *dhd)
+{
+ int ret = -1;
+
+ if (!dhd)
+ return ret;
+ else
+ return (dhd->pno_enable);
+}
+
+#endif /* PNO_SUPPORT */
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd)
+{
+ char buf[256];
+ const char *str;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int buf_len;
+ int str_len;
+ int res = -1;
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (res);
+
+ DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+ str = "mkeep_alive";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[ str_len ] = '\0';
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+ mkeep_alive_pkt.period_msec = KEEP_ALIVE_PERIOD;
+ buf_len = str_len + 1;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ /* Setup keep alive zero for null packet generation */
+ mkeep_alive_pkt.keep_alive_id = 0;
+ mkeep_alive_pkt.len_bytes = 0;
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+ /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+ * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+ res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+ return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+/* Android ComboSCAN support */
+
+/*
+ * data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+ int input_size, int *bytes_left)
+{
+ char* str = *list_str;
+ uint16 short_temp;
+ uint32 int_temp;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ /* Clean all dest bytes */
+ memset(dst, 0, dst_size);
+ while (*bytes_left > 0) {
+
+ if (str[0] != token) {
+ DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+ __FUNCTION__, token, str[0], *bytes_left));
+ return -1;
+ }
+
+ *bytes_left -= 1;
+ str += 1;
+
+ if (input_size == 1) {
+ memcpy(dst, str, input_size);
+ }
+ else if (input_size == 2) {
+ memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+ input_size);
+ }
+ else if (input_size == 4) {
+ memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+ input_size);
+ }
+
+ *bytes_left -= input_size;
+ str += input_size;
+ *list_str = str;
+ return 1;
+ }
+ return 1;
+}
+
+/*
+ * channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+ int channel_num, int *bytes_left)
+{
+ char* str = *list_str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+ *list_str = str;
+ DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+ /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* All channels */
+ channel_list[idx] = 0x0;
+ }
+ else {
+ channel_list[idx] = (uint16)str[0];
+ DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
+ }
+ *bytes_left -= 1;
+ str += 1;
+
+ if (idx++ > 255) {
+ DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/*
+ * SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+{
+ char* str = *list_str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+ *list_str = str;
+ DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+
+ /* Get proper CSCAN_TLV_TYPE_SSID_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* Broadcast SSID */
+ ssid[idx].SSID_len = 0;
+ memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+ *bytes_left -= 1;
+ str += 1;
+
+ DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
+ }
+ else if (str[0] <= DOT11_MAX_SSID_LEN) {
+ /* Get proper SSID size */
+ ssid[idx].SSID_len = str[0];
+ *bytes_left -= 1;
+ str += 1;
+
+ /* Get SSID */
+ if (ssid[idx].SSID_len > *bytes_left) {
+ DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+ __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+ return -1;
+ }
+
+ memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+ *bytes_left -= ssid[idx].SSID_len;
+ str += ssid[idx].SSID_len;
+
+ DHD_TRACE(("%s :size=%d left=%d\n",
+ (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+ }
+ else {
+ DHD_ERROR(("### SSID size more that %d\n", str[0]));
+ return -1;
+ }
+
+ if (idx++ > max) {
+ DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx. Max specifies size of the ssid array. Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied. Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+ char* str, *ptr;
+
+ if ((list_str == NULL) || (*list_str == NULL))
+ return -1;
+
+ for (str = *list_str; str != NULL; str = ptr) {
+
+ /* check for next TAG */
+ if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+ *list_str = str + strlen(GET_CHANNEL);
+ return idx;
+ }
+
+ if ((ptr = strchr(str, ',')) != NULL) {
+ *ptr++ = '\0';
+ }
+
+ if (strlen(str) > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+ return -1;
+ }
+
+ if (strlen(str) == 0)
+ ssid[idx].SSID_len = 0;
+
+ if (idx < max) {
+ bcm_strcpy_s((char*)ssid[idx].SSID, sizeof(ssid[idx].SSID), str);
+ ssid[idx].SSID_len = strlen(str);
+ }
+ idx++;
+ }
+ return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+ int num;
+ int val;
+ char* str;
+ char* endptr = NULL;
+
+ if ((list_str == NULL)||(*list_str == NULL))
+ return -1;
+
+ str = *list_str;
+ num = 0;
+ while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+ val = (int)strtoul(str, &endptr, 0);
+ if (endptr == str) {
+ printf("could not parse channel number starting at"
+ " substring \"%s\" in list:\n%s\n",
+ str, *list_str);
+ return -1;
+ }
+ str = endptr + strspn(endptr, " ,");
+
+ if (num == channel_num) {
+ DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+ channel_num, *list_str));
+ return -1;
+ }
+
+ channel_list[num++] = (uint16)val;
+ }
+ *list_str = str;
+ return num;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
new file mode 100644
index 000000000000..9750eeb23bce
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
@@ -0,0 +1,293 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* Copyright (C) 1999-2011, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+*
+* $Id: dhd_custom_gpio.c,v 1.2.42.1 2010-10-19 00:41:09 Exp $
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#ifdef CUSTOMER_HW
+extern void bcm_wlan_power_off(int);
+extern void bcm_wlan_power_on(int);
+#endif /* CUSTOMER_HW */
+#if defined(CUSTOMER_HW2)
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#else
+int wifi_set_power(int on, unsigned long msec) { return -1; }
+int wifi_get_irq_number(unsigned long *irq_flags_ptr) { return -1; }
+int wifi_get_mac_addr(unsigned char *buf) { return -1; }
+void *wifi_get_country_code(char *ccode) { return NULL; }
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+#endif /* CUSTOMER_HW2 */
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC) */
+
+#ifdef CUSTOMER_HW3
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ * 1) return : Host gpio interrupt number per customer platform
+ * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ * NOTE :
+ * Customer should check his platform definitions
+ * and his Host Interrupt spec
+ * to figure out the proper setting for his platform.
+ * Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
+{
+ int host_oob_irq = 0;
+
+#ifdef CUSTOMER_HW2
+ host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
+
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+ if (dhd_oob_gpio_num < 0) {
+ dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+ }
+#endif /* CUSTOMER_HW2 */
+
+ if (dhd_oob_gpio_num < 0) {
+ WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+ __FUNCTION__));
+ return (dhd_oob_gpio_num);
+ }
+
+ WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+ __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW
+ host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
+#elif defined CUSTOMER_HW3
+ gpio_request(dhd_oob_gpio_num, "oob irq");
+ host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+ gpio_direction_input(dhd_oob_gpio_num);
+#endif /* CUSTOMER_HW */
+#endif /* CUSTOMER_HW2 */
+
+ return (host_oob_irq);
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Customer function to control hw specific wlan gpios */
+void
+dhd_customer_gpio_wlan_ctrl(int onoff)
+{
+ switch (onoff) {
+ case WLAN_RESET_OFF:
+ WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_off(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+ wifi_set_power(0, 0);
+#endif
+ WL_ERROR(("=========== WLAN placed in RESET ========\n"));
+ break;
+
+ case WLAN_RESET_ON:
+ WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_on(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+ wifi_set_power(1, 0);
+#endif
+ WL_ERROR(("=========== WLAN going back to live ========\n"));
+ break;
+
+ case WLAN_POWER_OFF:
+ WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_off(1);
+#endif /* CUSTOMER_HW */
+ break;
+
+ case WLAN_POWER_ON:
+ WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_on(1);
+ /* Lets customer power to get stable */
+ OSL_DELAY(200);
+#endif /* CUSTOMER_HW */
+ break;
+ }
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(unsigned char *buf)
+{
+ int ret = 0;
+
+ WL_TRACE(("%s Enter\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+
+ /* Customer access to MAC address stored outside of DHD driver */
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+ ret = wifi_get_mac_addr(buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+ /* EXAMPLE code */
+ {
+ struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+ bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+ }
+#endif /* EXAMPLE_GET_MAC */
+
+ return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+ {"", "XY", 4}, /* Universal if Country code is unknown or empty */
+ {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+ {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+ {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */
+ {"AT", "EU", 5},
+ {"BE", "EU", 5},
+ {"BG", "EU", 5},
+ {"CY", "EU", 5},
+ {"CZ", "EU", 5},
+ {"DK", "EU", 5},
+ {"EE", "EU", 5},
+ {"FI", "EU", 5},
+ {"FR", "EU", 5},
+ {"DE", "EU", 5},
+ {"GR", "EU", 5},
+ {"HU", "EU", 5},
+ {"IE", "EU", 5},
+ {"IT", "EU", 5},
+ {"LV", "EU", 5},
+ {"LI", "EU", 5},
+ {"LT", "EU", 5},
+ {"LU", "EU", 5},
+ {"MT", "EU", 5},
+ {"NL", "EU", 5},
+ {"PL", "EU", 5},
+ {"PT", "EU", 5},
+ {"RO", "EU", 5},
+ {"SK", "EU", 5},
+ {"SI", "EU", 5},
+ {"ES", "EU", 5},
+ {"SE", "EU", 5},
+ {"GB", "EU", 5},
+ {"KR", "XY", 3},
+ {"AU", "XY", 3},
+ {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+ {"TW", "XY", 3},
+ {"AR", "XY", 3},
+ {"MX", "XY", 3},
+ {"IL", "IL", 0},
+ {"CH", "CH", 0},
+ {"TR", "TR", 0},
+ {"NO", "NO", 0},
+#endif /* EXMAPLE_TABLE */
+};
+
+
+/* Customized Locale convertor
+* input : ISO 3166-1 country abbreviation
+* output: customized cspec
+*/
+void get_customized_country_code(char *country_iso_code, wl_country_t *cspec)
+{
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+ struct cntry_locales_custom *cloc_ptr;
+
+ if (!cspec)
+ return;
+
+ cloc_ptr = wifi_get_country_code(country_iso_code);
+ if (cloc_ptr) {
+ strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = cloc_ptr->custom_locale_rev;
+ }
+ return;
+#else
+ int size, i;
+
+ size = ARRAYSIZE(translate_custom_table);
+
+ if (cspec == 0)
+ return;
+
+ if (size == 0)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+ memcpy(cspec->ccode,
+ translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[i].custom_locale_rev;
+ return;
+ }
+ }
+#ifdef EXAMPLE_TABLE
+ /* if no country code matched return first universal code from translate_custom_table */
+ memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[0].custom_locale_rev;
+#endif /* EXMAPLE_TABLE */
+ return;
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h
new file mode 100644
index 000000000000..a195cbe88e5b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h
@@ -0,0 +1,105 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_dbg.h 285933 2011-09-23 21:45:31Z $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#if defined(DHD_DEBUG)
+
+#define DHD_ERROR(args) do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \
+ printf args;} while (0)
+#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+
+#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL)
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#define DHD_ERROR(args) do {if (net_ratelimit()) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+
+#define DHD_ERROR_ON() 0
+#define DHD_TRACE_ON() 0
+#define DHD_INFO_ON() 0
+#define DHD_DATA_ON() 0
+#define DHD_CTL_ON() 0
+#define DHD_TIMER_ON() 0
+#define DHD_HDRS_ON() 0
+#define DHD_BYTES_ON() 0
+#define DHD_INTR_ON() 0
+#define DHD_GLOM_ON() 0
+#define DHD_EVENT_ON() 0
+#define DHD_BTA_ON() 0
+#define DHD_ISCAN_ON() 0
+#define DHD_ARPOE_ON() 0
+#endif
+
+#define DHD_LOG(args)
+
+#define DHD_BLOG(cp, size)
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c
new file mode 100644
index 000000000000..b5a91eb4034c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.c
@@ -0,0 +1,5079 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.c 291449 2011-10-22 12:16:26Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200 /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
+#define TSMAX 1000 /* max no. of timing record kept */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+ uint32 bin[NUMBIN];
+} histo_t;
+
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+#if defined(SOFTAP)
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+#endif
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <wl_android.h>
+
+#ifdef ARP_OFFLOAD_SUPPORT
+static int dhd_device_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr);
+
+static struct notifier_block dhd_notifier = {
+ .notifier_call = dhd_device_event
+};
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#include <dhd_bus.h>
+
+#ifndef PROP_TXSTATUS
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+#else
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
+#endif
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+ return "";
+}
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(CONFIG_WIRELESS_EXT)
+#include <wl_iw.h>
+extern wl_iw_extra_params_t g_wl_iw_params;
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+/* Interface control information */
+typedef struct dhd_if {
+ struct dhd_info *info; /* back pointer to dhd_info */
+ /* OS/stack specifics */
+ struct net_device *net;
+ struct net_device_stats stats;
+ int idx; /* iface idx in dongle */
+ dhd_if_state_t state; /* interface state */
+ uint subunit; /* subunit */
+ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool attached; /* Delayed attachment when unset */
+ bool txflowcontrol; /* Per interface flow control indicator */
+ char name[IFNAMSIZ+1]; /* linux interface name */
+ uint8 bssidx; /* bsscfg index for the interface */
+ bool set_multicast;
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+ uint32 low;
+ uint32 high;
+} tsf_t;
+
+typedef struct {
+ uint32 last_cycle;
+ uint32 last_sec;
+ uint32 last_tsf;
+ uint32 coef; /* scaling factor */
+ uint32 coefdec1; /* first decimal */
+ uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+ uint32 t1;
+ uint32 t2;
+ uint32 t3;
+ uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif /* WLMEDIA_HTSF */
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(CONFIG_WIRELESS_EXT)
+ wl_iw_t iw; /* wireless extensions state (must be first) */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ dhd_pub_t pub;
+
+ /* For supporting multiple interfaces */
+ dhd_if_t *iflist[DHD_MAX_IFS];
+
+ struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+ spinlock_t wlfc_spinlock;
+#endif /* PROP_TXSTATUS */
+#ifdef WLMEDIA_HTSF
+ htsf_t htsf;
+#endif
+ wait_queue_head_t ioctl_resp_wait;
+ struct timer_list timer;
+ bool wd_timer_valid;
+ struct tasklet_struct tasklet;
+ spinlock_t sdlock;
+ spinlock_t txqlock;
+ spinlock_t dhd_lock;
+#ifdef DHDTHREAD
+ /* Thread based operation */
+ bool threads_only;
+ struct semaphore sdsem;
+
+ tsk_ctl_t thr_dpc_ctl;
+ tsk_ctl_t thr_wdt_ctl;
+
+#else
+ bool dhd_tasklet_create;
+#endif /* DHDTHREAD */
+ tsk_ctl_t thr_sysioc_ctl;
+
+ /* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ struct wake_lock wl_wifi; /* Wifi wakelock */
+ struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ /* net_device interface lock, prevent race conditions among net_dev interface
+ * calls and wifi_on or wifi_off
+ */
+ struct mutex dhd_net_if_mutex;
+#endif
+ spinlock_t wakelock_spinlock;
+ int wakelock_counter;
+ int wakelock_timeout_enable;
+
+ /* Thread to issue ioctl for multicast */
+ bool set_macaddress;
+ struct ether_addr macvalue;
+ wait_queue_head_t ctrl_wait;
+ atomic_t pend_8021x_cnt;
+ dhd_attach_states_t dhd_state;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+} dhd_info_t;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+extern int wl_control_wl_start(struct net_device *dev);
+extern int net_os_send_hang_message(struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+struct semaphore dhd_registration_sem;
+#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+/* Spawn a thread for system ioctls (set mac, set mcast) */
+uint dhd_sysioc = TRUE;
+module_param(dhd_sysioc, uint, 0);
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
+
+/* Watchdog interval */
+uint dhd_watchdog_ms = 10;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0644);
+#endif /* defined(DHD_DEBUG) */
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+uint dhd_arp_mode = 0xb;
+module_param(dhd_arp_mode, uint, 0);
+
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+uint dhd_master_mode = TRUE;
+module_param(dhd_master_mode, uint, 0);
+
+#ifdef DHDTHREAD
+/* Watchdog thread priority, -1 to use kernel timer */
+int dhd_watchdog_prio = 97;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+int dhd_dpc_prio = 98;
+module_param(dhd_dpc_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+extern int dhd_dongle_memsize;
+module_param(dhd_dongle_memsize, int, 0);
+#endif /* DHDTHREAD */
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+ } while (0);
+#endif /* LINUX_VERSION_CODE */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt())
+#endif
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#ifdef BCMDBGFS
+extern void dhd_dbg_init(dhd_pub_t *dhdp);
+extern void dhd_dbg_remove(void);
+#endif /* BCMDBGFS */
+
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE "drivers/net/wireless/bcmdhd"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#else
+#define DHD_COMPILED
+#endif /* DHD_DEBUG */
+
+static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+#ifdef DHD_DEBUG
+"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
+#endif
+;
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+ int ret = NOTIFY_DONE;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39))
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ dhd_mmc_suspend = TRUE;
+ ret = NOTIFY_OK;
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ dhd_mmc_suspend = FALSE;
+ ret = NOTIFY_OK;
+ break;
+ }
+ smp_mb();
+#endif
+ return ret;
+}
+
+static struct notifier_block dhd_sleep_pm_notifier = {
+ .notifier_call = dhd_sleep_pm_callback,
+ .priority = 0
+};
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+ DHD_TRACE(("%s: %d\n", __FUNCTION__, value));
+ /* 1 - Enable packet filter, only allow unicast packet to send up */
+ /* 0 - Disable packet filter */
+ if (dhd_pkt_filter_enable) {
+ int i;
+
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ value, dhd_master_mode);
+ }
+ }
+#endif
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+ int power_mode = PM_MAX;
+ /* wl_pkt_filter_enable_t enable_parm; */
+ char iovbuf[32];
+ int bcn_li_dtim = 3;
+ uint roamvar = 1;
+
+ DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+ __FUNCTION__, value, dhd->in_suspend));
+
+ if (dhd && dhd->up) {
+ if (value && dhd->in_suspend) {
+
+ /* Kernel suspended */
+ DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+
+ /* Enable packet filter, only allow unicast packet to send up */
+ dhd_set_packet_filter(1, dhd);
+
+ /* If DTIM skip is set up as default, force it to wake
+ * each third DTIM for better power savings. Note that
+ * one side effect is a chance to miss BC/MC packet.
+ */
+ bcn_li_dtim = dhd_get_dtim_skip(dhd);
+ bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ /* Disable firmware roaming during suspend */
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4,
+ iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ } else {
+
+ /* Kernel resumed */
+ DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__));
+
+ power_mode = PM_FAST;
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+
+ /* disable pkt filter */
+ dhd_set_packet_filter(0, dhd);
+
+ /* restore pre-suspend setting for dtim_skip */
+ bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip,
+ 4, iovbuf, sizeof(iovbuf));
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ roamvar = dhd_roam_disable;
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
+ sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val)
+{
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ DHD_OS_WAKE_LOCK(dhdp);
+ /* Set flag when early suspend was called */
+ dhdp->in_suspend = val;
+ if ((!dhdp->suspend_disable_flag) && (dhd_check_ap_wfd_mode_set(dhdp) == FALSE))
+ dhd_set_suspend(val, dhdp);
+ DHD_OS_WAKE_UNLOCK(dhdp);
+}
+
+static void dhd_early_suspend(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 1);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 0);
+}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+/*
+ * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay. Usage:
+ *
+ * dhd_timeout_start(&tmo, usec);
+ * while (!dhd_timeout_expired(&tmo))
+ * if (poll_something())
+ * break;
+ * if (dhd_timeout_expired(&tmo))
+ * fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+ tmo->limit = usec;
+ tmo->increment = 0;
+ tmo->elapsed = 0;
+ tmo->tick = 1000000 / HZ;
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+ /* Does nothing the first call */
+ if (tmo->increment == 0) {
+ tmo->increment = 1;
+ return 0;
+ }
+
+ if (tmo->elapsed >= tmo->limit)
+ return 1;
+
+ /* Add the delay that's about to take place */
+ tmo->elapsed += tmo->increment;
+
+ if (tmo->increment < tmo->tick) {
+ OSL_DELAY(tmo->increment);
+ tmo->increment *= 2;
+ if (tmo->increment > tmo->tick)
+ tmo->increment = tmo->tick;
+ } else {
+ wait_queue_head_t delay_wait;
+ DECLARE_WAITQUEUE(wait, current);
+ int pending;
+ init_waitqueue_head(&delay_wait);
+ add_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ pending = signal_pending(current);
+ remove_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_RUNNING);
+ if (pending)
+ return 1; /* Interrupted */
+ }
+
+ return 0;
+}
+
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+ int i = 0;
+
+ ASSERT(dhd);
+ while (i < DHD_MAX_IFS) {
+ if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
+ return i;
+ i++;
+ }
+
+ return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(struct dhd_pub *dhd_pub, int ifidx)
+{
+ struct dhd_info *dhd_info;
+
+ if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+ return NULL;
+ dhd_info = dhd_pub->info;
+ if (dhd_info && dhd_info->iflist[ifidx])
+ return dhd_info->iflist[ifidx]->net;
+ return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+ int i = DHD_MAX_IFS;
+
+ ASSERT(dhd);
+
+ if (name == NULL || *name == '\0')
+ return 0;
+
+ while (--i > 0)
+ if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+ break;
+
+ DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+ return i; /* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ ASSERT(dhd);
+
+ if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+ return "<if_bad>";
+ }
+
+ if (dhd->iflist[ifidx] == NULL) {
+ DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+ return "<if_null>";
+ }
+
+ if (dhd->iflist[ifidx]->net)
+ return dhd->iflist[ifidx]->net->name;
+
+ return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+ int i;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+ ASSERT(dhd);
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+ return dhd->iflist[i]->mac_addr;
+
+ return NULL;
+}
+
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+ struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *mclist;
+#endif
+ uint32 allmulti, cnt;
+
+ wl_ioctl_t ioc;
+ char *buf, *bufp;
+ uint buflen;
+ int ret;
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ cnt = netdev_mc_count(dev);
+#else
+ cnt = dev->mc_count;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_unlock_bh(dev);
+#endif
+
+ /* Determine initial value of allmulti flag */
+ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+ /* Send down the multicast list first. */
+
+
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+ if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ return;
+ }
+
+ strcpy(bufp, "mcast_list");
+ bufp += strlen("mcast_list") + 1;
+
+ cnt = htol32(cnt);
+ memcpy(bufp, &cnt, sizeof(cnt));
+ bufp += sizeof(cnt);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ cnt--;
+ }
+#else
+ for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) {
+ memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ }
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_unlock_bh(dev);
+#endif
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ allmulti = cnt ? TRUE : allmulti;
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Now send the allmulti setting. This is based on the setting in the
+ * net_device flags, but might be modified above to be turned on if we
+ * were trying to set some addresses and dongle rejected it...
+ */
+
+ buflen = sizeof("allmulti") + sizeof(allmulti);
+ if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+ return;
+ }
+ allmulti = htol32(allmulti);
+
+ if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+ DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+ dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+ MFREE(dhd->pub.osh, buf, buflen);
+ return;
+ }
+
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set allmulti %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+ allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+ allmulti = htol32(allmulti);
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_PROMISC;
+ ioc.buf = &allmulti;
+ ioc.len = sizeof(allmulti);
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set promisc %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+}
+
+static int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
+{
+ char buf[32];
+ wl_ioctl_t ioc;
+ int ret;
+
+ if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+ DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+ return -1;
+ }
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = 32;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+ } else {
+ memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+ }
+
+ return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
+#endif
+
+static void
+dhd_op_if(dhd_if_t *ifp)
+{
+ dhd_info_t *dhd;
+ int ret = 0, err = 0;
+#ifdef SOFTAP
+ unsigned long flags;
+#endif
+
+ ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */
+
+ dhd = ifp->info;
+
+ DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
+
+#ifdef WL_CFG80211
+ if (wl_cfg80211_is_progress_ifchange())
+ return;
+
+#endif
+ switch (ifp->state) {
+ case DHD_IF_ADD:
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the WLC_E_IF_DEL event.
+ */
+ if (ifp->net != NULL) {
+ DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
+ __FUNCTION__, ifp->net->name));
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ }
+ /* Allocate etherdev, including space for private structure */
+ if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ ret = -ENOMEM;
+ }
+ if (ret == 0) {
+ strncpy(ifp->net->name, ifp->name, IFNAMSIZ);
+ ifp->net->name[IFNAMSIZ - 1] = '\0';
+ memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)
+ if (!wl_cfg80211_notify_ifadd(ifp->net, ifp->idx, ifp->bssidx,
+ dhd_net_attach)) {
+ ifp->state = DHD_IF_NONE;
+ return;
+ }
+#endif
+ if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
+ DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
+ __FUNCTION__, err));
+ ret = -EOPNOTSUPP;
+ } else {
+#if defined(SOFTAP)
+ if (ap_fw_loaded && !(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ /* semaphore that the soft AP CODE waits on */
+ flags = dhd_os_spin_lock(&dhd->pub);
+
+ /* save ptr to wl0.1 netdev for use in wl_iw.c */
+ ap_net_dev = ifp->net;
+ /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
+ up(&ap_eth_ctl.sema);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ }
+#endif
+ DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
+ current->pid, ifp->net->name));
+ ifp->state = DHD_IF_NONE;
+ }
+ }
+ break;
+ case DHD_IF_DEL:
+ /* Make sure that we don't enter again here if .. */
+ /* dhd_op_if is called again from some other context */
+ ifp->state = DHD_IF_DELETING;
+ if (ifp->net != NULL) {
+ DHD_TRACE(("\n%s: got 'DHD_IF_DEL' state\n", __FUNCTION__));
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ wl_cfg80211_notify_ifdel(ifp->net);
+ }
+#endif
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */
+ }
+ break;
+ case DHD_IF_DELETING:
+ break;
+ default:
+ DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
+ ASSERT(!ifp->state);
+ break;
+ }
+
+ if (ret < 0) {
+ ifp->set_multicast = FALSE;
+ if (ifp->net) {
+ free_netdev(ifp->net);
+ }
+ dhd->iflist[ifp->idx] = NULL;
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ if (ifp->net == ap_net_dev)
+ ap_net_dev = NULL; /* NULL SOFTAP global wl0.1 as well */
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ }
+}
+
+static int
+_dhd_sysioc_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+
+ int i;
+#ifdef SOFTAP
+ bool in_ap = FALSE;
+ unsigned long flags;
+#endif
+
+ DAEMONIZE("dhd_sysioc");
+
+ complete(&tsk->completed);
+
+ while (down_interruptible(&tsk->sema) == 0) {
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ DHD_TRACE(("%s: interface %d\n", __FUNCTION__, i));
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ in_ap = (ap_net_dev != NULL);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+ if (dhd->iflist[i] && dhd->iflist[i]->state)
+ dhd_op_if(dhd->iflist[i]);
+
+ if (dhd->iflist[i] == NULL) {
+ DHD_TRACE(("\n\n %s: interface %d just been removed,"
+ "!\n\n", __FUNCTION__, i));
+ continue;
+ }
+#ifdef SOFTAP
+ if (in_ap && dhd->set_macaddress) {
+ DHD_TRACE(("attempt to set MAC for %s in AP Mode,"
+ "blocked. \n", dhd->iflist[i]->net->name));
+ dhd->set_macaddress = FALSE;
+ continue;
+ }
+
+ if (in_ap && dhd->iflist[i]->set_multicast) {
+ DHD_TRACE(("attempt to set MULTICAST list for %s"
+ "in AP Mode, blocked. \n", dhd->iflist[i]->net->name));
+ dhd->iflist[i]->set_multicast = FALSE;
+ continue;
+ }
+#endif /* SOFTAP */
+ if (dhd->iflist[i]->set_multicast) {
+ dhd->iflist[i]->set_multicast = FALSE;
+ _dhd_set_multicast_list(dhd, i);
+ }
+ if (dhd->set_macaddress) {
+ dhd->set_macaddress = FALSE;
+ _dhd_set_mac_address(dhd, i, &dhd->macvalue);
+ }
+ }
+ }
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+ }
+ DHD_TRACE(("%s: stopped\n", __FUNCTION__));
+ complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+ int ret = 0;
+
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ struct sockaddr *sa = (struct sockaddr *)addr;
+ int ifidx;
+
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return -1;
+
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
+ dhd->set_macaddress = TRUE;
+ up(&dhd->thr_sysioc_ctl.sema);
+
+ return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ifidx;
+
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return;
+
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ dhd->iflist[ifidx]->set_multicast = TRUE;
+ up(&dhd->thr_sysioc_ctl.sema);
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+ ASSERT(di != NULL);
+ spin_lock_bh(&di->wlfc_spinlock);
+ return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+ ASSERT(di != NULL);
+ spin_unlock_bh(&di->wlfc_spinlock);
+ return 1;
+}
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
+
+#endif /* PROP_TXSTATUS */
+int
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ int ret;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh = NULL;
+
+ /* Reject if down */
+ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+ /* free the packet here since the caller won't */
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+
+ /* Update multicast statistic */
+ if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_ADDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ eh = (struct ether_header *)pktdata;
+
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ dhdp->tx_multicast++;
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+ atomic_inc(&dhd->pend_8021x_cnt);
+ }
+
+ /* Look into the packet and update the packet priority */
+ if (PKTPRIO(pktbuf) == 0)
+ pktsetprio(pktbuf, FALSE);
+
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state) {
+ /* store the interface ID */
+ DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+ /* store destination MAC in the tag as well */
+ DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+ /* decide which FIFO this packet belongs to */
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ /* one additional queue index (highest AC + 1) is used for bc/mc queue */
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+ else
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+ } else
+#endif /* PROP_TXSTATUS */
+ /* If the protocol uses a data header, apply it */
+ dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+ /* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+ dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && ((athost_wl_status_info_t*)dhdp->wlfc_state)->proptxstatus_mode
+ != WLFC_FCMODE_NONE) {
+ dhd_os_wlfc_block(dhdp);
+ ret = dhd_wlfc_enque_sendq(dhdp->wlfc_state, DHD_PKTTAG_FIFO(PKTTAG(pktbuf)),
+ pktbuf);
+ dhd_wlfc_commit_packets(dhdp->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
+ dhdp->bus);
+ if (((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if) {
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if = 0;
+ }
+ dhd_os_wlfc_unblock(dhdp);
+ }
+ else
+ /* non-proptxstatus way */
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* PROP_TXSTATUS */
+
+
+ return ret;
+}
+
+int
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+ int ret;
+ void *pktbuf;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ int ifidx;
+#ifdef WLMEDIA_HTSF
+ uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+ uint8 htsfdlystat_sz = 0;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ /* Reject if down */
+ if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) {
+ DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+ __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+ netif_stop_queue(net);
+ /* Send Event when bus down detected during data session */
+ if (dhd->pub.busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+ net_os_send_hang_message(net);
+ }
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -ENODEV;
+ }
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+ netif_stop_queue(net);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -ENODEV;
+ }
+
+ /* Make sure there's enough room for any header */
+
+ if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+ struct sk_buff *skb2;
+
+ DHD_INFO(("%s: insufficient headroom\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dhd->pub.tx_realloc++;
+
+ skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+ dev_kfree_skb(skb);
+ if ((skb = skb2) == NULL) {
+ DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+
+ /* Convert to packet */
+ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+ DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto done;
+ }
+#ifdef WLMEDIA_HTSF
+ if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+ struct ether_header *eh = (struct ether_header *)pktdata;
+
+ if (!ETHER_ISMULTI(eh->ether_dhost) &&
+ (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+ eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
+ }
+ }
+#endif
+
+ ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+
+done:
+ if (ret)
+ dhd->pub.dstats.tx_dropped++;
+ else
+ dhd->pub.tx_packets++;
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ /* Return ok: we always eat the packet */
+ return 0;
+}
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+ struct net_device *net;
+ dhd_info_t *dhd = dhdp->info;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhdp->txoff = state;
+ ASSERT(dhd);
+
+ if (ifidx == ALL_INTERFACES) {
+ /* Flow control on all active interfaces */
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ net = dhd->iflist[i]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+ }
+ }
+ }
+ else {
+ if (dhd->iflist[ifidx]) {
+ net = dhd->iflist[ifidx]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+ }
+ }
+}
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ uchar *eth;
+ uint len;
+ void *data, *pnext = NULL, *save_pktbuf;
+ int i;
+ dhd_if_t *ifp;
+ wl_event_msg_t event;
+ int tout = DHD_PACKET_TIMEOUT;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ save_pktbuf = pktbuf;
+
+ for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL) {
+ DHD_ERROR(("%s: ifp is NULL. drop packet\n",
+ __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+
+ /* Dropping packets before registering net device to avoid kernel panic */
+ if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED ||
+ !dhd->pub.up) {
+ DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+ __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+
+ pnext = PKTNEXT(dhdp->osh, pktbuf);
+ PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
+
+ eh = (struct ether_header *)PKTDATA(wl->sh.osh, pktbuf);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+ if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
+ (PKTLEN(wl->sh.osh, pktbuf) >= RFC1042_HDR_LEN) &&
+ bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ lsh->type == HTON16(BTA_PROT_L2CAP)) {
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
+ ((uint8 *)eh + RFC1042_HDR_LEN);
+ ACL_data = NULL;
+ }
+
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && PKTLEN(wl->sh.osh, pktbuf) == 0) {
+ /* WLFC may send header only packet when
+ there is an urgent message but no packet to
+ piggy-back on
+ */
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->stats.wlfc_header_only_pkt++;
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+#endif
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST) {
+ dhd->pub.rx_multicast++;
+ }
+
+ skb->data = eth;
+ skb->len = len;
+
+#ifdef WLMEDIA_HTSF
+ dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Process special event packets and then discard them */
+ if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+ dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+ skb->mac_header,
+#else
+ skb->mac.raw,
+#endif
+ &event,
+ &data);
+
+ wl_event_to_host_order(&event);
+ if (event.event_type == WLC_E_BTA_HCI_EVENT) {
+ dhd_bta_doevt(dhdp, data, event.datalen);
+ }
+ tout = DHD_EVENT_TIMEOUT;
+ }
+
+ ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+ if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
+ ifp = dhd->iflist[ifidx];
+
+ if (ifp->net)
+ ifp->net->last_rx = jiffies;
+
+ dhdp->dstats.rx_bytes += skb->len;
+ dhdp->rx_packets++; /* Local count */
+
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ netif_rx_ni(skb);
+#else
+ ulong flags;
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+ }
+ }
+ DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(dhdp, tout);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+ /* Linux version has nothing to do */
+ return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+ uint ifidx;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
+ uint len;
+
+ dhd_prot_hdrpull(dhdp, &ifidx, txp);
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
+
+ if (type == ETHER_TYPE_802_1X)
+ atomic_dec(&dhd->pend_8021x_cnt);
+
+ /* Crack open the packet and check to see if it is BT HCI ACL data packet.
+ * If yes generate packet completion event.
+ */
+ len = PKTLEN(dhdp->osh, txp);
+
+ /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
+ if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
+ struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+ if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+
+ dhd_bta_tx_hcidata_complete(dhdp, txp, success);
+ }
+ }
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_if_t *ifp;
+ int ifidx;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF)
+ return NULL;
+
+ ifp = dhd->iflist[ifidx];
+ ASSERT(dhd && ifp);
+
+ if (dhd->pub.up) {
+ /* Use the protocol to get dongle stats */
+ dhd_prot_dstats(&dhd->pub);
+ }
+
+ /* Copy dongle stats to net device stats */
+ ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
+ ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
+ ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
+ ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
+ ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
+ ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
+ ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
+ ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
+ ifp->stats.multicast = dhd->pub.dstats.multicast;
+
+ return &ifp->stats;
+}
+
+#ifdef DHDTHREAD
+static int
+dhd_watchdog_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_watchdog_prio > 0) {
+ struct sched_param param;
+ param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+ dhd_watchdog_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+ DAEMONIZE("dhd_watchdog");
+
+ /* Run until signal received */
+ complete(&tsk->completed);
+
+ while (1)
+ if (down_interruptible (&tsk->sema) == 0) {
+ unsigned long flags;
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ dhd_os_sdlock(&dhd->pub);
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer,
+ jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ }
+ dhd_os_sdunlock(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ break;
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+#endif /* DHDTHREAD */
+
+static void dhd_watchdog(ulong data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+ unsigned long flags;
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ if (dhd->pub.dongle_reset) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return;
+ }
+
+#ifdef DHDTHREAD
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ up(&dhd->thr_wdt_ctl.sema);
+ return;
+ }
+#endif /* DHDTHREAD */
+
+ dhd_os_sdlock(&dhd->pub);
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ dhd_os_sdunlock(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+}
+
+#ifdef DHDTHREAD
+static int
+dhd_dpc_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_dpc_prio > 0)
+ {
+ struct sched_param param;
+ param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+ DAEMONIZE("dhd_dpc");
+ /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
+
+ /* signal: thread has started */
+ complete(&tsk->completed);
+
+ /* Run until signal received */
+ while (1) {
+ if (down_interruptible(&tsk->sema) == 0) {
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ if (dhd_bus_dpc(dhd->pub.bus)) {
+ up(&tsk->sema);
+ }
+ else {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+ } else {
+ if (dhd->pub.up)
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+ }
+ else
+ break;
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+#endif /* DHDTHREAD */
+
+static void
+dhd_dpc(ulong data)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)data;
+
+ /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+ * down below , wake lock is set,
+ * the tasklet is initialized in dhd_attach()
+ */
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ if (dhd_bus_dpc(dhd->pub.bus))
+ tasklet_schedule(&dhd->tasklet);
+ else
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ DHD_OS_WAKE_LOCK(dhdp);
+#ifdef DHDTHREAD
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ up(&dhd->thr_dpc_ctl.sema);
+ return;
+ }
+#endif /* DHDTHREAD */
+
+ tasklet_schedule(&dhd->tasklet);
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = FALSE;
+
+ strcpy(buf, "toe_ol");
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ /* Check for older dongle image that doesn't support toe_ol */
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: toe not supported by device\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return -EOPNOTSUPP;
+ }
+
+ DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ memcpy(toe_ol, buf, sizeof(uint32));
+ return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int toe, ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = TRUE;
+
+ /* Set toe_ol as requested */
+
+ strcpy(buf, "toe_ol");
+ memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+ dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ /* Enable toe globally only if any components are enabled. */
+
+ toe = (toe_ol != 0);
+
+ strcpy(buf, "toe");
+ memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* TOE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+ sprintf(info->driver, "wl");
+ sprintf(info->version, "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+ .get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+ struct ethtool_drvinfo info;
+ char drvname[sizeof(info.driver)];
+ uint32 cmd;
+#ifdef TOE
+ struct ethtool_value edata;
+ uint32 toe_cmpnt, csum_dir;
+ int ret;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* all ethtool calls start with a cmd word */
+ if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ /* Copy out any request driver name */
+ if (copy_from_user(&info, uaddr, sizeof(info)))
+ return -EFAULT;
+ strncpy(drvname, info.driver, sizeof(info.driver));
+ drvname[sizeof(info.driver)-1] = '\0';
+
+ /* clear struct for return */
+ memset(&info, 0, sizeof(info));
+ info.cmd = cmd;
+
+ /* if dhd requested, identify ourselves */
+ if (strcmp(drvname, "?dhd") == 0) {
+ sprintf(info.driver, "dhd");
+ strcpy(info.version, EPI_VERSION_STR);
+ }
+
+ /* otherwise, require dongle to be up */
+ else if (!dhd->pub.up) {
+ DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ /* finally, report dongle driver type */
+ else if (dhd->pub.iswl)
+ sprintf(info.driver, "wl");
+ else
+ sprintf(info.driver, "xx");
+
+ sprintf(info.version, "%lu", dhd->pub.drv_version);
+ if (copy_to_user(uaddr, &info, sizeof(info)))
+ return -EFAULT;
+ DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+ (int)sizeof(drvname), drvname, info.driver));
+ break;
+
+#ifdef TOE
+ /* Get toe offload components from dongle */
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ edata.cmd = cmd;
+ edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+ if (copy_to_user(uaddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+
+ /* Set toe offload components in dongle */
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_STXCSUM:
+ if (copy_from_user(&edata, uaddr, sizeof(edata)))
+ return -EFAULT;
+
+ /* Read the current settings, update and write back */
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ if (edata.data != 0)
+ toe_cmpnt |= csum_dir;
+ else
+ toe_cmpnt &= ~csum_dir;
+
+ if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+ return ret;
+
+ /* If setting TX checksum mode, tell Linux the new mode */
+ if (cmd == ETHTOOL_STXCSUM) {
+ if (edata.data)
+ dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ break;
+#endif /* TOE */
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+{
+ if (!dhdp)
+ return FALSE;
+ if ((error == -ETIMEDOUT) || ((dhdp->busstate == DHD_BUS_DOWN) &&
+ (!dhdp->dongle_reset))) {
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+ net_os_send_hang_message(net);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_ioctl_t ioc;
+ int bcmerror = 0;
+ int buflen = 0;
+ void *buf = NULL;
+ uint driver = 0;
+ int ifidx;
+ int ret;
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ /* send to dongle only if we are not waiting for reload already */
+ if (dhd->pub.hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return OSL_ERROR(BCME_DONGLE_DOWN);
+ }
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+ if (ifidx == DHD_BAD_IF) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -1;
+ }
+
+#if defined(CONFIG_WIRELESS_EXT)
+ /* linux wireless extensions */
+ if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+ /* may recurse, do NOT lock */
+ ret = wl_iw_ioctl(net, ifr, cmd);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+ if (cmd == SIOCETHTOOL) {
+ ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+ if (cmd == SIOCDEVPRIVATE+1) {
+ ret = wl_android_priv_cmd(net, ifr, cmd);
+ dhd_check_hang(net, &dhd->pub, ret);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+
+ if (cmd != SIOCDEVPRIVATE) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -EOPNOTSUPP;
+ }
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+
+ /* Copy out any buffer passed */
+ if (ioc.buf) {
+ buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+ /* optimization for direct ioctl calls from kernel */
+ /*
+ if (segment_eq(get_fs(), KERNEL_DS)) {
+ buf = ioc.buf;
+ } else {
+ */
+ {
+ if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) {
+ bcmerror = -BCME_NOMEM;
+ goto done;
+ }
+ if (copy_from_user(buf, ioc.buf, buflen)) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+ }
+ }
+
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ bcmerror = -BCME_EPERM;
+ goto done;
+ }
+
+ /* check for local dhd ioctl and handle it */
+ if (driver == DHD_IOCTL_MAGIC) {
+ bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen);
+ if (bcmerror)
+ dhd->pub.bcmerror = bcmerror;
+ goto done;
+ }
+
+ /* send to dongle (must be up, and wl). */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+
+ if (!dhd->pub.iswl) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+
+ /*
+ * Flush the TX queue if required for proper message serialization:
+ * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+ * prevent M4 encryption and
+ * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+ * prevent disassoc frame being sent before WPS-DONE frame.
+ */
+ if (ioc.cmd == WLC_SET_KEY ||
+ (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL &&
+ strncmp("wsec_key", ioc.buf, 9) == 0) ||
+ (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL &&
+ strncmp("bsscfg:wsec_key", ioc.buf, 15) == 0) ||
+ ioc.cmd == WLC_DISASSOC)
+ dhd_wait_pend8021x(net);
+
+#ifdef WLMEDIA_HTSF
+ if (ioc.buf) {
+ /* short cut wl ioctl calls here */
+ if (strcmp("htsf", ioc.buf) == 0) {
+ dhd_ioctl_htsf_get(dhd, 0);
+ return BCME_OK;
+ }
+
+ if (strcmp("htsflate", ioc.buf) == 0) {
+ if (ioc.set) {
+ memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+ memset(&maxdelayts, 0, sizeof(tstamp_t));
+ maxdelay = 0;
+ tspktcnt = 0;
+ maxdelaypktno = 0;
+ memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+ } else {
+ dhd_dump_latency();
+ }
+ return BCME_OK;
+ }
+ if (strcmp("htsfclear", ioc.buf) == 0) {
+ memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+ htsf_seqnum = 0;
+ return BCME_OK;
+ }
+ if (strcmp("htsfhis", ioc.buf) == 0) {
+ dhd_dump_htsfhisto(&vi_d1, "H to D");
+ dhd_dump_htsfhisto(&vi_d2, "D to D");
+ dhd_dump_htsfhisto(&vi_d3, "D to H");
+ dhd_dump_htsfhisto(&vi_d4, "H to H");
+ return BCME_OK;
+ }
+ if (strcmp("tsport", ioc.buf) == 0) {
+ if (ioc.set) {
+ memcpy(&tsport, ioc.buf + 7, 4);
+ } else {
+ DHD_ERROR(("current timestamp port: %d \n", tsport));
+ }
+ return BCME_OK;
+ }
+ }
+#endif /* WLMEDIA_HTSF */
+
+ bcmerror = dhd_wl_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+
+done:
+ dhd_check_hang(net, &dhd->pub, bcmerror);
+
+ if (!bcmerror && buf && ioc.buf) {
+ if (copy_to_user(ioc.buf, buf, buflen))
+ bcmerror = -EFAULT;
+ }
+
+ if (buf)
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return OSL_ERROR(bcmerror);
+}
+
+#ifdef WL_CFG80211
+static int
+dhd_cleanup_virt_ifaces(dhd_info_t *dhd)
+{
+ int i = 1; /* Leave ifidx 0 [Primary Interface] */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ int rollback_lock = FALSE;
+#endif
+
+ DHD_TRACE(("%s: Enter \n", __func__));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ /* release lock for unregister_netdev */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = TRUE;
+ }
+#endif
+
+ for (i = 1; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ DHD_TRACE(("Deleting IF: %d \n", i));
+ if ((dhd->iflist[i]->state != DHD_IF_DEL) &&
+ (dhd->iflist[i]->state != DHD_IF_DELETING)) {
+ dhd->iflist[i]->state = DHD_IF_DEL;
+ dhd->iflist[i]->idx = i;
+ dhd_op_if(dhd->iflist[i]);
+ }
+ }
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (rollback_lock)
+ rtnl_lock();
+#endif
+
+ return 0;
+}
+#endif /* WL_CFG80211 */
+
+static int
+dhd_stop(struct net_device *net)
+{
+ int ifidx;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (dhd->pub.up == 0) {
+ goto exit;
+ }
+ ifidx = dhd_net2idx(dhd, net);
+
+#ifdef WL_CFG80211
+ if (ifidx == 0) {
+ wl_cfg80211_down();
+
+ /*
+ * For CFG80211: Clean up all the left over virtual interfaces
+ * when the primary Interface is brought down. [ifconfig wlan0 down]
+ */
+ if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+ (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ dhd_cleanup_virt_ifaces(dhd);
+ }
+ }
+#endif
+
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_cleanup(&dhd->pub);
+#endif
+ /* Set state and stop OS transmissions */
+ dhd->pub.up = 0;
+ netif_stop_queue(net);
+
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+#if defined(WL_CFG80211)
+ if (ifidx == 0 && !dhd_download_fw_on_driverload)
+ wl_android_wifi_off(net);
+#endif
+ dhd->pub.hang_was_sent = 0;
+ dhd->pub.rxcnt_timeout = 0;
+ dhd->pub.txcnt_timeout = 0;
+ OLD_MOD_DEC_USE_COUNT;
+exit:
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return 0;
+}
+
+static int
+dhd_open(struct net_device *net)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+#ifdef TOE
+ uint32 toe_ol;
+#endif
+ int ifidx;
+ int32 ret = 0;
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ /* Update FW path if it was changed */
+ if ((firmware_path != NULL) && (firmware_path[0] != '\0')) {
+ if (firmware_path[strlen(firmware_path)-1] == '\n')
+ firmware_path[strlen(firmware_path)-1] = '\0';
+ strcpy(fw_path, firmware_path);
+ firmware_path[0] = '\0';
+ }
+
+#if !defined(WL_CFG80211)
+ /*
+ * Force start if ifconfig_up gets called before START command
+ * We keep WEXT's wl_control_wl_start to provide backward compatibility
+ * This should be removed in the future
+ */
+ wl_control_wl_start(net);
+#endif
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ if (ifidx < 0) {
+ DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ if (!dhd->iflist[ifidx] || dhd->iflist[ifidx]->state == DHD_IF_DEL) {
+ DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ if (ifidx == 0) {
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+#if defined(WL_CFG80211)
+ DHD_ERROR(("\n%s\n", dhd_version));
+ if (!dhd_download_fw_on_driverload)
+ wl_android_wifi_on(net);
+#endif /* defined(WL_CFG80211) */
+
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ int ret;
+
+ /* try to bring up bus */
+ if ((ret = dhd_bus_start(&dhd->pub)) != 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
+ }
+
+ }
+
+ /* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */
+ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+ /* Get current TOE mode from dongle */
+ if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+ dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif /* TOE */
+
+#if defined(WL_CFG80211)
+ if (unlikely(wl_cfg80211_up())) {
+ DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+#endif /* WL_CFG80211 */
+ }
+
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ dhd->pub.up = 1;
+
+#ifdef BCMDBGFS
+ dhd_dbg_init(&dhd->pub);
+#endif
+
+ OLD_MOD_INC_USE_COUNT;
+exit:
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+}
+
+osl_t *
+dhd_osl_attach(void *pdev, uint bustype)
+{
+ return osl_attach(pdev, bustype, TRUE);
+}
+
+void
+dhd_osl_detach(osl_t *osh)
+{
+ if (MALLOCED(osh)) {
+ DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+ }
+ osl_detach(osh);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ up(&dhd_registration_sem);
+#endif
+}
+
+int
+dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
+ uint8 *mac_addr, uint32 flags, uint8 bssidx)
+{
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
+
+ ASSERT(dhd && (ifidx < DHD_MAX_IFS));
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ }
+ } else
+ if ((ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t))) == NULL) {
+ DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ memset(ifp, 0, sizeof(dhd_if_t));
+ ifp->info = dhd;
+ dhd->iflist[ifidx] = ifp;
+ strncpy(ifp->name, name, IFNAMSIZ);
+ ifp->name[IFNAMSIZ] = '\0';
+ if (mac_addr != NULL)
+ memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ if (handle == NULL) {
+ ifp->state = DHD_IF_ADD;
+ ifp->idx = ifidx;
+ ifp->bssidx = bssidx;
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ up(&dhd->thr_sysioc_ctl.sema);
+ } else
+ ifp->net = (struct net_device *)handle;
+
+ return 0;
+}
+
+void
+dhd_del_if(dhd_info_t *dhd, int ifidx)
+{
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+
+ ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
+ ifp = dhd->iflist[ifidx];
+ if (!ifp) {
+ DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
+ return;
+ }
+
+ ifp->state = DHD_IF_DEL;
+ ifp->idx = ifidx;
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ up(&dhd->thr_sysioc_ctl.sema);
+}
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+ dhd_info_t *dhd = NULL;
+ struct net_device *net = NULL;
+
+ dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* updates firmware nvram path if it was provided as module parameters */
+ if ((firmware_path != NULL) && (firmware_path[0] != '\0'))
+ strcpy(fw_path, firmware_path);
+ if ((nvram_path != NULL) && (nvram_path[0] != '\0'))
+ strcpy(nv_path, nvram_path);
+
+ /* Allocate etherdev, including space for private structure */
+ if (!(net = alloc_etherdev(sizeof(dhd)))) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_NET_ALLOC;
+
+ /* Allocate primary dhd_info */
+ if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
+ DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(dhd, 0, sizeof(dhd_info_t));
+
+#ifdef DHDTHREAD
+ dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+ dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+#else
+ dhd->dhd_tasklet_create = FALSE;
+#endif /* DHDTHREAD */
+ dhd->thr_sysioc_ctl.thr_pid = DHD_PID_KT_INVALID;
+ dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+ /*
+ * Save the dhd_info into the priv
+ */
+ memcpy((void *)netdev_priv(net), &dhd, sizeof(dhd));
+ dhd->pub.osh = osh;
+
+ /* Link to info module */
+ dhd->pub.info = dhd;
+ /* Link to bus module */
+ dhd->pub.bus = bus;
+ dhd->pub.hdrlen = bus_hdrlen;
+
+ /* Set network interface name if it was provided as module parameter */
+ if (iface_name[0]) {
+ int len;
+ char ch;
+ strncpy(net->name, iface_name, IFNAMSIZ);
+ net->name[IFNAMSIZ - 1] = 0;
+ len = strlen(net->name);
+ ch = net->name[len - 1];
+ if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+ strcat(net->name, "%d");
+ }
+
+ if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
+ goto fail;
+ dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+
+ sema_init(&dhd->proto_sem, 1);
+
+#ifdef PROP_TXSTATUS
+ spin_lock_init(&dhd->wlfc_spinlock);
+ dhd->pub.wlfc_enabled = TRUE;
+#endif /* PROP_TXSTATUS */
+
+ /* Initialize other structure content */
+ init_waitqueue_head(&dhd->ioctl_resp_wait);
+ init_waitqueue_head(&dhd->ctrl_wait);
+
+ /* Initialize the spinlocks */
+ spin_lock_init(&dhd->sdlock);
+ spin_lock_init(&dhd->txqlock);
+ spin_lock_init(&dhd->dhd_lock);
+
+ /* Initialize Wakelock stuff */
+ spin_lock_init(&dhd->wakelock_spinlock);
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+ wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_init(&dhd->dhd_net_if_mutex);
+#endif
+ dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+ /* Attach and link in the protocol */
+ if (dhd_prot_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_prot_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef WL_CFG80211
+ /* Attach and link in the cfg80211 */
+ if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+ DHD_ERROR(("wl_cfg80211_attach failed\n"));
+ goto fail;
+ }
+
+ dhd_monitor_init(&dhd->pub);
+ dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#if defined(CONFIG_WIRELESS_EXT)
+ /* Attach and link in the iw */
+ if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+ DHD_ERROR(("wl_iw_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+
+ /* Set up the watchdog timer */
+ init_timer(&dhd->timer);
+ dhd->timer.data = (ulong)dhd;
+ dhd->timer.function = dhd_watchdog;
+
+#ifdef DHDTHREAD
+ /* Initialize thread based operation and lock */
+ sema_init(&dhd->sdsem, 1);
+ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
+ dhd->threads_only = TRUE;
+ }
+ else {
+ dhd->threads_only = FALSE;
+ }
+
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize watchdog thread */
+ PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0);
+ } else {
+ dhd->thr_wdt_ctl.thr_pid = -1;
+ }
+
+ /* Set up the bottom half handler */
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize DPC thread */
+ PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0);
+ } else {
+ /* use tasklet for dpc */
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->thr_dpc_ctl.thr_pid = -1;
+ }
+#else
+ /* Set up the bottom half handler */
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->dhd_tasklet_create = TRUE;
+#endif /* DHDTHREAD */
+
+ if (dhd_sysioc) {
+ PROC_START(_dhd_sysioc_thread, dhd, &dhd->thr_sysioc_ctl, 0);
+ } else {
+ dhd->thr_sysioc_ctl.thr_pid = -1;
+ }
+ dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+ /*
+ * Save the dhd_info into the priv
+ */
+ memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ register_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+ dhd->early_suspend.suspend = dhd_early_suspend;
+ dhd->early_suspend.resume = dhd_late_resume;
+ register_early_suspend(&dhd->early_suspend);
+ dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ register_inetaddr_notifier(&dhd_notifier);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+ dhd_state |= DHD_ATTACH_STATE_DONE;
+ dhd->dhd_state = dhd_state;
+ return &dhd->pub;
+
+fail:
+ if (dhd_state < DHD_ATTACH_STATE_DHD_ALLOC) {
+ if (net) free_netdev(net);
+ } else {
+ DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+ __FUNCTION__, dhd_state, &dhd->pub));
+ dhd->dhd_state = dhd_state;
+ dhd_detach(&dhd->pub);
+ dhd_free(&dhd->pub);
+ }
+
+ return NULL;
+}
+
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+ int ret = -1;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+ unsigned long flags;
+
+ ASSERT(dhd);
+
+ DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+
+#ifdef DHDTHREAD
+ dhd_os_sdlock(dhdp);
+#endif /* DHDTHREAD */
+
+ /* try to download image and nvram to the dongle */
+ if ((dhd->pub.busstate == DHD_BUS_DOWN) &&
+ (fw_path != NULL) && (fw_path[0] != '\0') &&
+ (nv_path != NULL) && (nv_path[0] != '\0')) {
+ /* wake lock moved to dhdsdio_download_firmware */
+ if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+ fw_path, nv_path))) {
+ DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n",
+ __FUNCTION__, fw_path, nv_path));
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -1;
+ }
+ }
+ if (dhd->pub.busstate != DHD_BUS_LOAD) {
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENETDOWN;
+ }
+
+ /* Start the watchdog timer */
+ dhd->pub.tickcnt = 0;
+ dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+ /* Bring up the bus */
+ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+ DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return ret;
+ }
+#if defined(OOB_INTR_ONLY)
+ /* Host registration for OOB interrupt */
+ if (bcmsdh_register_oob_intr(dhdp)) {
+ /* deactivate timer and wait for the handler to finish */
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+
+ DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENODEV;
+ }
+
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ /* If bus is not ready, can't come up */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ flags = dhd_os_spin_lock(&dhd->pub);
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENODEV;
+ }
+
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+
+#ifdef READ_MACADDR
+ dhd_read_macaddr(dhd);
+#endif
+
+ /* Bus is ready, do any protocol initialization */
+ if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+ return ret;
+
+#ifdef WRITE_MACADDR
+ dhd_write_macaddr(dhd->pub.mac.octet);
+#endif
+
+ return 0;
+}
+
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+
+ uint up = 0;
+ uint power_mode = PM_FAST;
+ uint32 dongle_align = DHD_SDALIGN;
+ uint32 glom = 0;
+ uint bcn_timeout = 4;
+ uint retry_max = 3;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ int arpoe = 1;
+#endif
+ int scan_assoc_time = 40;
+ int scan_unassoc_time = 40;
+ int scan_passive_time = 130;
+ char buf[WLC_IOCTL_SMLEN];
+ char *ptr;
+ uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#if defined(SOFTAP)
+ uint dtim = 1;
+#endif
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+ uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
+#endif
+
+#if defined(AP) || defined(WLP2P)
+ uint32 apsta = 1; /* Enable APSTA mode */
+#endif /* defined(AP) || defined(WLP2P) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ DHD_TRACE(("Enter %s\n", __FUNCTION__));
+ dhd->op_mode = 0;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ ret = dhd_custom_get_mac_address(ea_addr.octet);
+ if (!ret) {
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ return BCME_NOTUP;
+ }
+ } else {
+#endif /* GET_CUSTOM_MAC_ENABLE */
+ /* Get the default device MAC address directly from firmware */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+ FALSE, 0)) < 0) {
+ DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+ return BCME_NOTUP;
+ }
+ /* Update public MAC address after reading from Firmware */
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+#ifdef GET_CUSTOM_MAC_ENABLE
+ }
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+ if (strstr(fw_path, "_apsta") != NULL) {
+ uint rand_mac;
+
+ srandom32((uint)jiffies);
+ rand_mac = random32();
+ iovbuf[0] = 0x02; /* locally administered bit */
+ iovbuf[1] = 0x1A;
+ iovbuf[2] = 0x11;
+ iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+ iovbuf[4] = (unsigned char)(rand_mac >> 8);
+ iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+ bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ } else
+ memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+ }
+#endif /* SET_RANDOM_MAC_SOFTAP */
+
+ DHD_TRACE(("Firmware = %s\n", fw_path));
+#if !defined(AP) && defined(WLP2P)
+ /* Check if firmware with WFD support used */
+ if (strstr(fw_path, "_p2p") != NULL) {
+ bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s APSTA for WFD failed ret= %d\n", __FUNCTION__, ret));
+ } else {
+ dhd->op_mode |= WFD_MASK;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif /* (ARP_OFFLOAD_SUPPORT) */
+ dhd_pkt_filter_enable = FALSE;
+ }
+ }
+#endif
+
+#if !defined(AP) && defined(WL_CFG80211)
+ /* Check if firmware with HostAPD support used */
+ if (strstr(fw_path, "_apsta") != NULL) {
+ /* Turn off MPC in AP mode */
+ bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
+ } else {
+ dhd->op_mode |= HOSTAPD_MASK;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif /* (ARP_OFFLOAD_SUPPORT) */
+ dhd_pkt_filter_enable = FALSE;
+ }
+ }
+#endif
+
+ if ((dhd->op_mode != WFD_MASK) && (dhd->op_mode != HOSTAPD_MASK)) {
+ /* STA only operation mode */
+ dhd->op_mode |= STA_MASK;
+ dhd_pkt_filter_enable = TRUE;
+ }
+
+ DHD_ERROR(("Firmware up: fw_path=%s op_mode=%d, "
+ "Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ fw_path,
+ dhd->op_mode,
+ dhd->mac.octet[0], dhd->mac.octet[1], dhd->mac.octet[2],
+ dhd->mac.octet[3], dhd->mac.octet[4], dhd->mac.octet[5]));
+
+ /* Set Country code */
+ if (dhd->dhd_cspec.ccode[0] != 0) {
+ bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
+ sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+ }
+
+ /* Set Listen Interval */
+ bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+ /* Set PowerSave mode */
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+
+ /* Match Host and Dongle rx alignment */
+ bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ /* disable glom option per default */
+ bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ /* Setup timeout if Beacons are lost and roam is off to report link down */
+ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ /* Setup assoc_retry_max count to reconnect target AP in dongle */
+ bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#if defined(AP) && !defined(WLP2P)
+ /* Turn off MPC in AP mode */
+ bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* defined(AP) && !defined(WLP2P) */
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded == TRUE) {
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+ }
+#endif
+
+#if defined(KEEP_ALIVE)
+ {
+ /* Set Keep Alive : be sure to use FW with -keepalive */
+ int res;
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded == FALSE)
+#endif
+ if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n",
+ __FUNCTION__, res));
+ }
+#endif /* defined(KEEP_ALIVE) */
+
+ /* Read event_msgs mask */
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+
+ /* Setup event_msgs */
+ setbit(eventmask, WLC_E_SET_SSID);
+ setbit(eventmask, WLC_E_PRUNE);
+ setbit(eventmask, WLC_E_AUTH);
+ setbit(eventmask, WLC_E_REASSOC);
+ setbit(eventmask, WLC_E_REASSOC_IND);
+ setbit(eventmask, WLC_E_DEAUTH);
+ setbit(eventmask, WLC_E_DEAUTH_IND);
+ setbit(eventmask, WLC_E_DISASSOC_IND);
+ setbit(eventmask, WLC_E_DISASSOC);
+ setbit(eventmask, WLC_E_JOIN);
+ setbit(eventmask, WLC_E_ASSOC_IND);
+ setbit(eventmask, WLC_E_PSK_SUP);
+ setbit(eventmask, WLC_E_LINK);
+ setbit(eventmask, WLC_E_NDIS_LINK);
+ setbit(eventmask, WLC_E_MIC_ERROR);
+ setbit(eventmask, WLC_E_PMKID_CACHE);
+ setbit(eventmask, WLC_E_TXFAIL);
+ setbit(eventmask, WLC_E_JOIN_START);
+ setbit(eventmask, WLC_E_SCAN_COMPLETE);
+#ifdef WLMEDIA_HTSF
+ setbit(eventmask, WLC_E_HTSFSYNC);
+#endif /* WLMEDIA_HTSF */
+#ifdef PNO_SUPPORT
+ setbit(eventmask, WLC_E_PFN_NET_FOUND);
+#endif /* PNO_SUPPORT */
+ /* enable dongle roaming event */
+ setbit(eventmask, WLC_E_ROAM);
+#ifdef WL_CFG80211
+ setbit(eventmask, WLC_E_ESCAN_RESULT);
+ if ((dhd->op_mode & WFD_MASK) == WFD_MASK) {
+ setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+ setbit(eventmask, WLC_E_ACTION_FRAME_COMPLETE);
+ setbit(eventmask, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE);
+ setbit(eventmask, WLC_E_P2P_PROBREQ_MSG);
+ setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ }
+#endif /* WL_CFG80211 */
+
+ /* Write updated Event mask */
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+ sizeof(scan_assoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+ sizeof(scan_unassoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
+ sizeof(scan_passive_time), TRUE, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* Set and enable ARP offload feature for STA only */
+#if defined(SOFTAP)
+ if (arpoe && !ap_fw_loaded) {
+#else
+ if (arpoe) {
+#endif
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ dhd_arp_offload_enable(dhd, arpoe);
+ } else {
+ dhd_arp_offload_set(dhd, 0);
+ dhd_arp_offload_enable(dhd, FALSE);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+ /* Setup defintions for pktfilter , enable in suspend */
+ dhd->pktfilter_count = 4;
+ /* Setup filter to allow only unicast */
+ dhd->pktfilter[0] = "100 0 0 0 0x01 0x00";
+ dhd->pktfilter[1] = NULL;
+ dhd->pktfilter[2] = NULL;
+ dhd->pktfilter[3] = NULL;
+#if defined(SOFTAP)
+ if (ap_fw_loaded) {
+ int i;
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ 0, dhd_master_mode);
+ }
+ }
+#endif /* defined(SOFTAP) */
+#endif /* PKT_FILTER_SUPPORT */
+
+ /* Force STA UP */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Setting WL UP failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ bcmstrtok(&ptr, "\n", 0);
+ /* Print fw version info */
+ DHD_ERROR(("Firmware version = %s\n", buf));
+ DHD_BLOG(buf, strlen(buf) + 1);
+ DHD_BLOG(dhd_version, strlen(dhd_version) + 1);
+ }
+
+done:
+ return ret;
+}
+
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+ char buf[strlen(name) + 1 + cmd_len];
+ int len = sizeof(buf);
+ wl_ioctl_t ioc;
+ int ret;
+
+ len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (!set && ret >= 0)
+ memcpy(cmd_buf, buf, cmd_len);
+
+ return ret;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+ .ndo_open = dhd_open,
+ .ndo_stop = dhd_stop,
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+};
+
+static struct net_device_ops dhd_ops_virt = {
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+ struct dhd_info *dhd = dhdp->info;
+ struct net_device *dev = NULL;
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+ ASSERT(dev);
+
+ if (netif_running(dev)) {
+ DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
+ return BCME_NOTDOWN;
+ }
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+ if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+ DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+ return BCME_BADARG;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add)
+{
+ u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+ int i;
+ int ret;
+
+ bzero(ipv4_buf, sizeof(ipv4_buf));
+
+ /* display what we've got */
+ ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf));
+ DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+ /* now we saved hoste_ip table, clr it in the dongle AOE */
+ dhd_aoe_hostip_clr(dhd_pub);
+
+ if (ret) {
+ DHD_ERROR(("%s failed\n", __FUNCTION__));
+ return;
+ }
+
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (add && (ipv4_buf[i] == 0)) {
+ ipv4_buf[i] = ipa;
+ add = FALSE; /* added ipa to local table */
+ DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+ __FUNCTION__, i));
+ } else if (ipv4_buf[i] == ipa) {
+ ipv4_buf[i] = 0;
+ DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+ __FUNCTION__, ipa, i));
+ }
+
+ if (ipv4_buf[i] != 0) {
+ /* add back host_ip entries from our local cache */
+ dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i]);
+ DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+ __FUNCTION__, ipv4_buf[i], i));
+ }
+ }
+#ifdef AOE_DBG
+ /* see the resulting hostip table */
+ dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf));
+ DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+static int dhd_device_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dhd_info_t *dhd;
+ dhd_pub_t *dhd_pub;
+
+ if (!ifa)
+ return NOTIFY_DONE;
+
+ dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
+ dhd_pub = &dhd->pub;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+ if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) {
+#else
+ if (ifa->ifa_dev->dev) {
+#endif
+ switch (event) {
+ case NETDEV_UP:
+ DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+ /* firmware not downloaded, do nothing */
+ if (dhd->pub.busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is down, exit\n", __FUNCTION__));
+ break;
+ }
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+ if (ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a) {
+ DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+ __FUNCTION__));
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE);
+ }
+ else
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE);
+#endif
+ break;
+
+ case NETDEV_DOWN:
+ DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+ if (!(ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a)) {
+ DHD_ARPOE(("%s: primary interface is down, AOE clr all\n",
+ __FUNCTION__));
+ dhd_aoe_hostip_clr(&dhd->pub);
+ dhd_aoe_arp_clr(&dhd->pub);
+ } else
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE);
+#else
+ dhd_aoe_hostip_clr(&dhd->pub);
+ dhd_aoe_arp_clr(&dhd->pub);
+#endif
+ break;
+
+ default:
+ DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+ __func__, ifa->ifa_label, event));
+ break;
+ }
+ }
+ return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+int
+dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct net_device *net = NULL;
+ int err = 0;
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+
+ net = dhd->iflist[ifidx]->net;
+ ASSERT(net);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ ASSERT(!net->open);
+ net->get_stats = dhd_get_stats;
+ net->do_ioctl = dhd_ioctl_entry;
+ net->hard_start_xmit = dhd_start_xmit;
+ net->set_mac_address = dhd_set_mac_address;
+ net->set_multicast_list = dhd_set_multicast_list;
+ net->open = net->stop = NULL;
+#else
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &dhd_ops_virt;
+#endif
+
+ /* Ok, link into the network layer... */
+ if (ifidx == 0) {
+ /*
+ * device functions for the primary interface only
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ net->open = dhd_open;
+ net->stop = dhd_stop;
+#else
+ net->netdev_ops = &dhd_ops_pri;
+#endif
+ } else {
+ /*
+ * We have to use the primary MAC for virtual interfaces
+ */
+ memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN);
+ /*
+ * Android sets the locally administered bit to indicate that this is a
+ * portable hotspot. This will not work in simultaneous AP/STA mode,
+ * nor with P2P. Need to set the Donlge's MAC address, and then use that.
+ */
+ if (ifidx > 0) {
+ DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+ __func__, net->name));
+ temp_addr[0] |= 0x02;
+ }
+ }
+
+ net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+ net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+ net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+ if ((err = register_netdev(net)) != 0) {
+ DHD_ERROR(("couldn't register the net device, err %d\n", err));
+ goto fail;
+ }
+ printf("Broadcom Dongle Host Driver: register interface [%s]"
+ " MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ net->name,
+ net->dev_addr[0], net->dev_addr[1], net->dev_addr[2],
+ net->dev_addr[3], net->dev_addr[4], net->dev_addr[5]);
+
+#if defined(SOFTAP) && defined(CONFIG_WIRELESS_EXT) && !defined(WL_CFG80211)
+ wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (ifidx == 0) {
+ up(&dhd_registration_sem);
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+ return 0;
+
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+ return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd) {
+
+ /*
+ * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+ * calling stop again will cuase SD read/write errors.
+ */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+ /* Stop the bus module */
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ }
+
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_unregister_oob_intr();
+#endif /* defined(OOB_INTR_ONLY) */
+ }
+ }
+}
+
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ unsigned long flags;
+ int timer_valid = FALSE;
+
+ if (!dhdp)
+ return;
+
+ dhd = (dhd_info_t *)dhdp->info;
+ if (!dhd)
+ return;
+
+ DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+ if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+ /* Give sufficient time for threads to start running in case
+ * dhd_attach() has failed
+ */
+ osl_delay(1000*100);
+ }
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ unregister_inetaddr_notifier(&dhd_notifier);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+ if (dhd->early_suspend.suspend)
+ unregister_early_suspend(&dhd->early_suspend);
+ }
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+ /* Detatch and unlink in the iw */
+ wl_iw_detach();
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ if (&dhd->thr_sysioc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_sysioc_ctl);
+ }
+
+ /* delete all interfaces, start with virtual */
+ if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+ int i = 1;
+ dhd_if_t *ifp;
+
+ /* Cleanup virtual interfaces */
+ for (i = 1; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i]) {
+ dhd->iflist[i]->state = DHD_IF_DEL;
+ dhd->iflist[i]->idx = i;
+ dhd_op_if(dhd->iflist[i]);
+ }
+
+ /* delete primary interface 0 */
+ ifp = dhd->iflist[0];
+ ASSERT(ifp);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ if (ifp->net->open)
+#else
+ if (ifp->net->netdev_ops == &dhd_ops_pri)
+#endif
+ {
+ if (ifp->net) {
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ ifp->net = NULL;
+ }
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ dhd->iflist[0] = NULL;
+ }
+ }
+
+ /* Clear the watchdog timer */
+ flags = dhd_os_spin_lock(&dhd->pub);
+ timer_valid = dhd->wd_timer_valid;
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ if (timer_valid)
+ del_timer_sync(&dhd->timer);
+
+ if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHDTHREAD
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_wdt_ctl);
+ }
+
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_dpc_ctl);
+ }
+ else
+#endif /* DHDTHREAD */
+ tasklet_kill(&dhd->tasklet);
+ }
+ if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+ dhd_bus_detach(dhdp);
+
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+ }
+
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ wl_cfg80211_detach();
+ dhd_monitor_uninit();
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ unregister_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&dhd->wl_wifi);
+ wake_lock_destroy(&dhd->wl_rxwake);
+#endif
+ }
+}
+
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd)
+ MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+ }
+}
+
+static void __exit
+dhd_module_cleanup(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_bus_unregister();
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ wl_android_wifictrl_func_del();
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+ wl_android_exit();
+
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+}
+
+
+static int __init
+dhd_module_init(void)
+{
+ int error = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ wl_android_init();
+
+#ifdef DHDTHREAD
+ /* Sanity check on the module parameters */
+ do {
+ /* Both watchdog and DPC as tasklets are ok */
+ if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
+ break;
+
+ /* If both watchdog and DPC are threads, TX must be deferred */
+ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
+ break;
+
+ DHD_ERROR(("Invalid module parameters.\n"));
+ return -EINVAL;
+ } while (0);
+#endif /* DHDTHREAD */
+
+ /* Call customer gpio to turn on power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ if (wl_android_wifictrl_func_add() < 0)
+ goto fail_1;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ sema_init(&dhd_registration_sem, 0);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+ error = dhd_bus_register();
+
+ if (!error)
+ printf("\n%s\n", dhd_version);
+ else {
+ DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+ goto fail_1;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ /*
+ * Wait till MMC sdio_register_driver callback called and made driver attach.
+ * It's needed to make sync up exit from dhd insmod and
+ * Kernel MMC sdio device callback registration
+ */
+ if (down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) {
+ error = -EINVAL;
+ DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__));
+ goto fail_2;
+ }
+#endif
+#if defined(WL_CFG80211)
+ error = wl_android_post_init();
+#endif
+
+ return error;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && 1
+fail_2:
+ dhd_bus_unregister();
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+fail_1:
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ wl_android_wifictrl_func_del();
+#endif
+
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+
+ return error;
+}
+
+late_initcall(dhd_module_init);
+module_exit(dhd_module_cleanup);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ down(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ up(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+ return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+ dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ DECLARE_WAITQUEUE(wait, current);
+ int timeout = dhd_ioctl_timeout_msec;
+
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(timeout);
+#else
+ timeout = timeout * HZ / 1000;
+#endif
+
+ /* Wait until control frame is available */
+ add_wait_queue(&dhd->ioctl_resp_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* Memory barrier to support multi-processing
+ * As the variable "condition", which points to dhd->rxlen (dhd_bus_rxctl[dhd_sdio.c])
+ * Can be changed by another processor.
+ */
+ smp_mb();
+ while (!(*condition) && (!signal_pending(current) && timeout)) {
+ timeout = schedule_timeout(timeout);
+ smp_mb();
+ }
+
+ if (signal_pending(current))
+ *pending = TRUE;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&dhd->ioctl_resp_wait, &wait);
+
+ return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (waitqueue_active(&dhd->ioctl_resp_wait)) {
+ wake_up_interruptible(&dhd->ioctl_resp_wait);
+ }
+
+ return 0;
+}
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ flags = dhd_os_spin_lock(pub);
+
+ /* don't start the wd until fw is loaded */
+ if (pub->busstate == DHD_BUS_DOWN) {
+ dhd_os_spin_unlock(pub, flags);
+ return;
+ }
+
+ /* Totally stop the timer */
+ if (!wdtick && dhd->wd_timer_valid == TRUE) {
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(pub, flags);
+#ifdef DHDTHREAD
+ del_timer_sync(&dhd->timer);
+#else
+ del_timer(&dhd->timer);
+#endif /* DHDTHREAD */
+ return;
+ }
+
+ if (wdtick) {
+ dhd_watchdog_ms = (uint)wdtick;
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd->wd_timer_valid = TRUE;
+ }
+ dhd_os_spin_unlock(pub, flags);
+}
+
+void *
+dhd_os_open_image(char *filename)
+{
+ struct file *fp;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
+ if (IS_ERR(fp))
+ fp = NULL;
+
+ return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+
+ if (!image)
+ return 0;
+
+ rdlen = kernel_read(fp, fp->f_pos, buf, len);
+ if (rdlen > 0)
+ fp->f_pos += rdlen;
+
+ return rdlen;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+ if (image)
+ filp_close((struct file *)image, NULL);
+}
+
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ down(&dhd->sdsem);
+ else
+#endif /* DHDTHREAD */
+ spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ up(&dhd->sdsem);
+ else
+#endif /* DHDTHREAD */
+ spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdtxlock(dhd_pub_t *pub)
+{
+ dhd_os_sdlock(pub);
+}
+
+void
+dhd_os_sdtxunlock(dhd_pub_t *pub)
+{
+ dhd_os_sdunlock(pub);
+}
+
+#if defined(DHD_USE_STATIC_BUF)
+uint8* dhd_os_prealloc(void *osh, int section, uint size)
+{
+ return (uint8*)wl_android_prealloc(section, size);
+}
+
+void dhd_os_prefree(void *osh, void *addr, uint size)
+{
+}
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+ int res = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (!dhd->pub.up) {
+ return NULL;
+ }
+
+ res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+ if (res == 0)
+ return &dhd->iw.wstats;
+ else
+ return NULL;
+}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data)
+{
+ int bcmerror = 0;
+ ASSERT(dhd != NULL);
+
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data);
+ if (bcmerror != BCME_OK)
+ return (bcmerror);
+
+#if defined(CONFIG_WIRELESS_EXT)
+ if (event->bsscfgidx == 0) {
+ /*
+ * Wireless ext is on primary interface only
+ */
+
+ ASSERT(dhd->iflist[*ifidx] != NULL);
+ ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+ if (dhd->iflist[*ifidx]->net) {
+ wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+ }
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#ifdef WL_CFG80211
+
+ if ((wl_cfg80211_is_progress_ifchange() ||
+ wl_cfg80211_is_progress_ifadd()) && (*ifidx != 0)) {
+ /*
+ * If IF_ADD/CHANGE operation is going on,
+ * discard any event received on the virtual I/F
+ */
+ return (BCME_OK);
+ }
+
+ ASSERT(dhd->iflist[*ifidx] != NULL);
+ ASSERT(dhd->iflist[*ifidx]->net != NULL);
+ if (dhd->iflist[*ifidx]->net) {
+ wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
+ }
+#endif /* defined(WL_CFG80211) */
+
+ return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+ switch (ntoh32(event->event_type)) {
+ /* Send up locally generated AMP HCI Events */
+ case WLC_E_BTA_HCI_EVENT: {
+ struct sk_buff *p, *skb;
+ bcm_event_t *msg;
+ wl_event_msg_t *p_bcm_event;
+ char *ptr;
+ uint32 len;
+ uint32 pktlen;
+ dhd_if_t *ifp;
+ dhd_info_t *dhd;
+ uchar *eth;
+ int ifidx;
+
+ len = ntoh32(event->datalen);
+ pktlen = sizeof(bcm_event_t) + len + 2;
+ dhd = dhdp->info;
+ ifidx = dhd_ifname2idx(dhd, event->ifname);
+
+ if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+ ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+ msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
+
+ bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
+ ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
+
+ msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+ /* BCM Vendor specific header... */
+ msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
+ msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
+ bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
+
+ /* vendor spec header length + pvt data length (private indication
+ * hdr + actual message itself)
+ */
+ msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
+ BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
+ msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
+
+ PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+ /* copy wl_event_msg_t into sk_buf */
+
+ /* pointer to wl_event_msg_t in sk_buf */
+ p_bcm_event = &msg->event;
+ bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
+
+ /* copy hci event into sk_buf */
+ bcopy(data, (p_bcm_event + 1), len);
+
+ msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
+ ntoh16(msg->bcm_hdr.length));
+ PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+ ptr = (char *)(msg + 1);
+ /* Last 2 bytes of the message are 0x00 0x00 to signal that there
+ * are no ethertypes which are following this
+ */
+ ptr[len+0] = 0x00;
+ ptr[len+1] = 0x00;
+
+ skb = PKTTONATIVE(dhdp->osh, p);
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ skb->data = eth;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Send the packet */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
+ }
+ }
+ else {
+ /* Could not allocate a sk_buf */
+ DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+ }
+ break;
+ } /* case WLC_E_BTA_HCI_EVENT */
+
+ default:
+ break;
+ }
+}
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ dhd_os_sdunlock(dhd);
+ wait_event_interruptible_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), HZ * 2);
+ dhd_os_sdlock(dhd);
+#endif
+ return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ if (waitqueue_active(&dhdinfo->ctrl_wait))
+ wake_up_interruptible(&dhdinfo->ctrl_wait);
+#endif
+ return;
+}
+
+int
+dhd_dev_reset(struct net_device *dev, uint8 flag)
+{
+ int ret;
+
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ ret = dhd_bus_devreset(&dhd->pub, flag);
+ if (ret) {
+ DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ return ret;
+}
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd) {
+ ret = dhd->pub.suspend_disable_flag;
+ dhd->pub.suspend_disable_flag = val;
+ }
+ return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val)
+{
+ int ret = 0;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd) {
+ ret = dhd_set_suspend(val, &dhd->pub);
+ }
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+ return ret;
+}
+
+int net_os_set_dtim_skip(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd)
+ dhd->pub.dtim_skip = val;
+
+ return 0;
+}
+
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ char *filterp = NULL;
+ int ret = 0;
+
+ if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
+ return ret;
+ if (num >= dhd->pub.pktfilter_count)
+ return -EINVAL;
+ if (add_remove) {
+ switch (num) {
+ case DHD_BROADCAST_FILTER_NUM:
+ filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+ break;
+ case DHD_MULTICAST4_FILTER_NUM:
+ filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+ break;
+ case DHD_MULTICAST6_FILTER_NUM:
+ filterp = "103 0 0 0 0xFFFF 0x3333";
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ dhd->pub.pktfilter[num] = filterp;
+ return ret;
+}
+
+int net_os_set_packet_filter(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ /* Packet filtering is set only if we still in early-suspend and
+ * we need either to turn it ON or turn it OFF
+ * We can always turn it OFF in case of early-suspend, but we turn it
+ * back ON only if suspend_disable_flag was not set
+ */
+ if (dhd && dhd->pub.up) {
+ if (dhd->pub.in_suspend) {
+ if (!val || (val && !dhd->pub.suspend_disable_flag))
+ dhd_set_packet_filter(val, &dhd->pub);
+ }
+ }
+ return ret;
+}
+
+
+void
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ dhd_preinit_ioctls(&dhd->pub);
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_clean */
+int
+dhd_dev_pno_reset(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_clean(&dhd->pub));
+}
+
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_enable(&dhd->pub, pfn_enabled));
+}
+
+
+/* Linux wrapper to call common dhd_pno_set */
+int
+dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max));
+}
+
+/* Linux wrapper to get pno status */
+int
+dhd_dev_get_pno_status(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_get_status(&dhd->pub));
+}
+
+#endif /* PNO_SUPPORT */
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd) {
+ if (!dhd->pub.hang_was_sent) {
+ dhd->pub.hang_was_sent = 1;
+#if defined(CONFIG_WIRELESS_EXT)
+ ret = wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+ ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+ }
+ }
+ return ret;
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd && dhd->pub.up)
+ memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+}
+
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (dhd)
+ mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (dhd)
+ mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags = 0;
+
+ if (dhd)
+ spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+ return flags;
+}
+
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+ return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX 10
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int timeout = 10 * HZ / 1000;
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = dhd_get_pend_8021x_cnt(dhd);
+
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = dhd_get_pend_8021x_cnt(dhd);
+ }
+ return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+ int ret = 0;
+ struct file *fp;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* open file to write */
+ fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+ if (!fp) {
+ printf("%s: open file error\n", __FUNCTION__);
+ ret = -1;
+ goto exit;
+ }
+
+ /* Write buf to file */
+ fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+ /* free buf before return */
+ MFREE(dhd->osh, buf, size);
+ /* close file before return */
+ if (fp)
+ filp_close(fp, current->files);
+ /* restore previous address limit */
+ set_fs(old_fs);
+
+ return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ ret = dhd->wakelock_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (dhd->wakelock_timeout_enable)
+ wake_lock_timeout(&dhd->wl_rxwake,
+ dhd->wakelock_timeout_enable * HZ);
+#endif
+ dhd->wakelock_timeout_enable = 0;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_timeout_enable)
+ dhd->wakelock_timeout_enable = val;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return 0;
+}
+
+int net_os_wake_lock_timeout_enable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout_enable(&dhd->pub, val);
+ return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (!dhd->wakelock_counter)
+ wake_lock(&dhd->wl_wifi);
+#endif
+ dhd->wakelock_counter++;
+ ret = dhd->wakelock_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ dhd_os_wake_lock_timeout(pub);
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_counter) {
+ dhd->wakelock_counter--;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (!dhd->wakelock_counter)
+ wake_unlock(&dhd->wl_wifi);
+#endif
+ ret = dhd->wakelock_counter;
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int dhd_os_check_wakelock(void *dhdp)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+ dhd_info_t *dhd;
+
+ if (!pub)
+ return 0;
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd && wake_lock_active(&dhd->wl_wifi))
+ return 1;
+#endif
+ return 0;
+}
+
+int dhd_os_check_if_up(void *dhdp)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+
+ if (!pub)
+ return 0;
+ return pub->up;
+}
+
+int net_os_wake_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_unlock(&dhd->pub);
+ return ret;
+}
+
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+{
+ int ifidx;
+ int ret = 0;
+ dhd_info_t *dhd = NULL;
+
+ if (!net || !netdev_priv(net)) {
+ DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd = *(dhd_info_t **)netdev_priv(net);
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+ dhd_check_hang(net, &dhd->pub, ret);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return ret;
+}
+
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+{
+ struct net_device *net;
+
+ net = dhd_idx2net(dhdp, ifidx);
+ return dhd_check_hang(net, dhdp, ret);
+}
+
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_interface_entry_update(void* state, ewlfc_mac_entry_action_t action, uint8 ifid,
+ uint8 iftype, uint8* ea);
+extern int dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits);
+
+int dhd_wlfc_interface_event(struct dhd_info *dhd, uint8 action, uint8 ifid, uint8 iftype,
+ uint8* ea)
+{
+ if (dhd->pub.wlfc_state == NULL)
+ return BCME_OK;
+
+ return dhd_wlfc_interface_entry_update(dhd->pub.wlfc_state, action, ifid, iftype, ea);
+}
+
+int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data)
+{
+ if (dhd->pub.wlfc_state == NULL)
+ return BCME_OK;
+
+ return dhd_wlfc_FIFOcreditmap_update(dhd->pub.wlfc_state, event_data);
+}
+
+int dhd_wlfc_event(struct dhd_info *dhd)
+{
+ return dhd_wlfc_enable(&dhd->pub);
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+
+#include <linux/debugfs.h>
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+typedef struct dhd_dbgfs {
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_mem;
+ dhd_pub_t *dhdp;
+ uint32 size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t rval;
+ uint32 tmp;
+ loff_t pos = *ppos;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+ tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+ ret = copy_to_user(ubuf, &tmp, 4);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+ rval = count;
+
+ return rval;
+}
+
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ size_t ret;
+ uint32 buf;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+ if (ret == count)
+ return -EFAULT;
+
+ /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+ dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+ return count;
+}
+
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+ loff_t pos = -1;
+
+ switch (whence) {
+ case 0:
+ pos = off;
+ break;
+ case 1:
+ pos = file->f_pos + off;
+ break;
+ case 2:
+ pos = g_dbgfs.size - off;
+ }
+ return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+ .read = dhd_dbg_state_read,
+ .write = dhd_debugfs_write,
+ .open = dhd_dbg_state_open,
+ .llseek = dhd_debugfs_lseek
+};
+
+static void dhd_dbg_create(void)
+{
+ if (g_dbgfs.debugfs_dir) {
+ g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+ NULL, &dhd_dbg_state_ops);
+ }
+}
+
+void dhd_dbg_init(dhd_pub_t *dhdp)
+{
+ int err;
+
+ g_dbgfs.dhdp = dhdp;
+ g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+ g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+ if (IS_ERR(g_dbgfs.debugfs_dir)) {
+ err = PTR_ERR(g_dbgfs.debugfs_dir);
+ g_dbgfs.debugfs_dir = NULL;
+ return;
+ }
+
+ dhd_dbg_create();
+
+ return;
+}
+
+void dhd_dbg_remove(void)
+{
+ debugfs_remove(g_dbgfs.debugfs_mem);
+ debugfs_remove(g_dbgfs.debugfs_dir);
+
+ bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+
+}
+#endif /* ifdef BCMDBGFS */
+
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct sk_buff *skb;
+ uint32 htsf = 0;
+ uint16 dport = 0, oldmagic = 0xACAC;
+ char *p1;
+ htsfts_t ts;
+
+ /* timestamp packet */
+
+ p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+ if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/* memcpy(&proto, p1+26, 4); */
+ memcpy(&dport, p1+40, 2);
+/* proto = ((ntoh32(proto))>> 16) & 0xFF; */
+ dport = ntoh16(dport);
+ }
+
+ /* timestamp only if icmp or udb iperf with port 5555 */
+/* if (proto == 17 && dport == tsport) { */
+ if (dport >= tsport && dport <= tsport + 20) {
+
+ skb = (struct sk_buff *) pktbuf;
+
+ htsf = dhd_get_htsf(dhd, 0);
+ memset(skb->data + 44, 0, 2); /* clear checksum */
+ memcpy(skb->data+82, &oldmagic, 2);
+ memcpy(skb->data+84, &htsf, 4);
+
+ memset(&ts, 0, sizeof(htsfts_t));
+ ts.magic = HTSFMAGIC;
+ ts.prio = PKTPRIO(pktbuf);
+ ts.seqnum = htsf_seqnum++;
+ ts.c10 = get_cycles();
+ ts.t10 = htsf;
+ ts.endmagic = HTSFENDMAGIC;
+
+ memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+ }
+}
+
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
+{
+ int pktcnt = 0, curval = 0, i;
+ for (i = 0; i < (NUMBIN-2); i++) {
+ curval += 500;
+ printf("%d ", his->bin[i]);
+ pktcnt += his->bin[i];
+ }
+ printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+ his->bin[NUMBIN-1], s);
+}
+
+static
+void sorttobin(int value, histo_t *histo)
+{
+ int i, binval = 0;
+
+ if (value < 0) {
+ histo->bin[NUMBIN-1]++;
+ return;
+ }
+ if (value > histo->bin[NUMBIN-2]) /* store the max value */
+ histo->bin[NUMBIN-2] = value;
+
+ for (i = 0; i < (NUMBIN-2); i++) {
+ binval += 500; /* 500m s bins */
+ if (value <= binval) {
+ histo->bin[i]++;
+ return;
+ }
+ }
+ histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ char *p1;
+ uint16 old_magic;
+ int d1, d2, d3, end2end;
+ htsfts_t *htsf_ts;
+ uint32 htsf;
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+ p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+ if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+ memcpy(&old_magic, p1+78, 2);
+ htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+ }
+ else
+ return;
+
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+ htsf_ts->cE0 = get_cycles();
+ }
+
+ if (old_magic == 0xACAC) {
+
+ tspktcnt++;
+ htsf = dhd_get_htsf(dhd, 0);
+ memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+ memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+ d1 = ts[tsidx].t2 - ts[tsidx].t1;
+ d2 = ts[tsidx].t3 - ts[tsidx].t2;
+ d3 = ts[tsidx].t4 - ts[tsidx].t3;
+ end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+ sorttobin(d1, &vi_d1);
+ sorttobin(d2, &vi_d2);
+ sorttobin(d3, &vi_d3);
+ sorttobin(end2end, &vi_d4);
+
+ if (end2end > 0 && end2end > maxdelay) {
+ maxdelay = end2end;
+ maxdelaypktno = tspktcnt;
+ memcpy(&maxdelayts, &ts[tsidx], 16);
+ }
+ if (++tsidx >= TSMAX)
+ tsidx = 0;
+ }
+}
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
+{
+ uint32 htsf = 0, cur_cycle, delta, delta_us;
+ uint32 factor, baseval, baseval2;
+ cycles_t t;
+
+ t = get_cycles();
+ cur_cycle = t;
+
+ if (cur_cycle > dhd->htsf.last_cycle)
+ delta = cur_cycle - dhd->htsf.last_cycle;
+ else {
+ delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
+ }
+
+ delta = delta >> 4;
+
+ if (dhd->htsf.coef) {
+ /* times ten to get the first digit */
+ factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+ baseval = (delta*10)/factor;
+ baseval2 = (delta*10)/(factor+1);
+ delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+ htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+ }
+ else {
+ DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+ }
+
+ return htsf;
+}
+
+static void dhd_dump_latency(void)
+{
+ int i, max = 0;
+ int d1, d2, d3, d4, d5;
+
+ printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
+ for (i = 0; i < TSMAX; i++) {
+ d1 = ts[i].t2 - ts[i].t1;
+ d2 = ts[i].t3 - ts[i].t2;
+ d3 = ts[i].t4 - ts[i].t3;
+ d4 = ts[i].t4 - ts[i].t1;
+ d5 = ts[max].t4-ts[max].t1;
+ if (d4 > d5 && d4 > 0) {
+ max = i;
+ }
+ printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
+ ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+ d1, d2, d3, d4, i);
+ }
+
+ printf("current idx = %d \n", tsidx);
+
+ printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+ printf("%08X %08X %08X %08X \t%d %d %d %d\n",
+ maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+ maxdelayts.t2 - maxdelayts.t1,
+ maxdelayts.t3 - maxdelayts.t2,
+ maxdelayts.t4 - maxdelayts.t3,
+ maxdelayts.t4 - maxdelayts.t1);
+}
+
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int ret;
+ uint32 s1, s2;
+
+ struct tsf {
+ uint32 low;
+ uint32 high;
+ } tsf_buf;
+
+ memset(&ioc, 0, sizeof(ioc));
+ memset(&tsf_buf, 0, sizeof(tsf_buf));
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = FALSE;
+
+ strcpy(buf, "tsf");
+ s1 = dhd_get_htsf(dhd, 0);
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: tsf is not supported by device\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return -EOPNOTSUPP;
+ }
+ return ret;
+ }
+ s2 = dhd_get_htsf(dhd, 0);
+
+ memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+ printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+ tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+ dhd->htsf.coefdec2, s2-tsf_buf.low);
+ printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+ return 0;
+}
+
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+ static ulong cur_cycle = 0, prev_cycle = 0;
+ uint32 htsf, tsf_delta = 0;
+ uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+ ulong b, a;
+ cycles_t t;
+
+ /* cycles_t in inlcude/mips/timex.h */
+
+ t = get_cycles();
+
+ prev_cycle = cur_cycle;
+ cur_cycle = t;
+
+ if (cur_cycle > prev_cycle)
+ cyc_delta = cur_cycle - prev_cycle;
+ else {
+ b = cur_cycle;
+ a = prev_cycle;
+ cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
+ }
+
+ if (data == NULL)
+ printf(" tsf update ata point er is null \n");
+
+ memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+ memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+ if (cur_tsf.low == 0) {
+ DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+ return;
+ }
+
+ if (cur_tsf.low > prev_tsf.low)
+ tsf_delta = (cur_tsf.low - prev_tsf.low);
+ else {
+ DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+ cur_tsf.low, prev_tsf.low));
+ if (cur_tsf.high > prev_tsf.high) {
+ tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+ DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
+ }
+ else
+ return; /* do not update */
+ }
+
+ if (tsf_delta) {
+ hfactor = cyc_delta / tsf_delta;
+ tmp = (cyc_delta - (hfactor * tsf_delta))*10;
+ dec1 = tmp/tsf_delta;
+ dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+ tmp = (tmp - (dec1*tsf_delta))*10;
+ dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
+
+ if (dec3 > 4) {
+ if (dec2 == 9) {
+ dec2 = 0;
+ if (dec1 == 9) {
+ dec1 = 0;
+ hfactor++;
+ }
+ else {
+ dec1++;
+ }
+ }
+ else
+ dec2++;
+ }
+ }
+
+ if (hfactor) {
+ htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
+ dhd->htsf.coef = hfactor;
+ dhd->htsf.last_cycle = cur_cycle;
+ dhd->htsf.last_tsf = cur_tsf.low;
+ dhd->htsf.coefdec1 = dec1;
+ dhd->htsf.coefdec2 = dec2;
+ }
+ else {
+ htsf = prev_tsf.low;
+ }
+}
+
+#endif /* WLMEDIA_HTSF */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_mon.c b/drivers/net/wireless/bcmdhd/dhd_linux_mon.c
new file mode 100644
index 000000000000..dd9c71f75be6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_mon.c
@@ -0,0 +1,393 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux monitor network interface
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_mon.c,v 1.131.2.55 2011-02-09 05:31:56 Exp $
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <net/ieee80211_radiotap.h>
+
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+typedef enum monitor_states
+{
+ MONITOR_STATE_DEINIT = 0x0,
+ MONITOR_STATE_INIT = 0x1,
+ MONITOR_STATE_INTERFACE_ADDED = 0x2,
+ MONITOR_STATE_INTERFACE_DELETED = 0x4
+} monitor_states_t;
+extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+/**
+ * Local declarations and defintions (not exposed)
+ */
+#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
+#define MON_TRACE MON_PRINT
+
+typedef struct monitor_interface {
+ int radiotap_enabled;
+ struct net_device* real_ndev; /* The real interface that the monitor is on */
+ struct net_device* mon_ndev;
+} monitor_interface;
+
+typedef struct dhd_linux_monitor {
+ void *dhd_pub;
+ monitor_states_t monitor_state;
+ monitor_interface mon_if[DHD_MAX_IFS];
+ struct mutex lock; /* lock to protect mon_if */
+} dhd_linux_monitor_t;
+
+static dhd_linux_monitor_t g_monitor;
+
+static struct net_device* lookup_real_netdev(char *name);
+static monitor_interface* ndev_to_monif(struct net_device *ndev);
+static int dhd_mon_if_open(struct net_device *ndev);
+static int dhd_mon_if_stop(struct net_device *ndev);
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev);
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr);
+
+static const struct net_device_ops dhd_mon_if_ops = {
+ .ndo_open = dhd_mon_if_open,
+ .ndo_stop = dhd_mon_if_stop,
+ .ndo_start_xmit = dhd_mon_if_subif_start_xmit,
+ .ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
+ .ndo_set_mac_address = dhd_mon_if_change_mac,
+};
+
+/**
+ * Local static function defintions
+ */
+
+/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0"
+ * "p2p-eth0-0" is a match for "mon.p2p-eth0-0")
+ */
+static struct net_device* lookup_real_netdev(char *name)
+{
+ int i;
+ int last_name_len = 0;
+ struct net_device *ndev;
+ struct net_device *ndev_found = NULL;
+
+ /* We want to find interface "p2p-eth0-0" for monitor interface "mon.p2p-eth0-0", so
+ * we skip "eth0" even if "mon.p2p-eth0-0" contains "eth0"
+ */
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = dhd_idx2net(g_monitor.dhd_pub, i);
+ if (ndev && strstr(name, ndev->name)) {
+ if (strlen(ndev->name) > last_name_len) {
+ ndev_found = ndev;
+ last_name_len = strlen(ndev->name);
+ }
+ }
+ }
+
+ return ndev_found;
+}
+
+static monitor_interface* ndev_to_monif(struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (g_monitor.mon_if[i].mon_ndev == ndev)
+ return &g_monitor.mon_if[i];
+ }
+
+ return NULL;
+}
+
+static int dhd_mon_if_open(struct net_device *ndev)
+{
+ int ret = 0;
+
+ MON_PRINT("enter\n");
+ return ret;
+}
+
+static int dhd_mon_if_stop(struct net_device *ndev)
+{
+ int ret = 0;
+
+ MON_PRINT("enter\n");
+ return ret;
+}
+
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int ret = 0;
+ int rtap_len;
+ int qos_len = 0;
+ int dot11_hdr_len = 24;
+ int snap_len = 6;
+ unsigned char *pdata;
+ unsigned short frame_ctl;
+ unsigned char src_mac_addr[6];
+ unsigned char dst_mac_addr[6];
+ struct ieee80211_hdr *dot11_hdr;
+ struct ieee80211_radiotap_header *rtap_hdr;
+ monitor_interface* mon_if;
+
+ MON_PRINT("enter\n");
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ goto fail;
+ }
+
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ goto fail;
+
+ rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+ if (unlikely(rtap_hdr->it_version))
+ goto fail;
+
+ rtap_len = ieee80211_get_radiotap_len(skb->data);
+ if (unlikely(skb->len < rtap_len))
+ goto fail;
+
+ MON_PRINT("radiotap len (should be 14): %d\n", rtap_len);
+
+ /* Skip the ratio tap header */
+ skb_pull(skb, rtap_len);
+
+ dot11_hdr = (struct ieee80211_hdr *)skb->data;
+ frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
+ /* Check if the QoS bit is set */
+ if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
+ /* Check if this ia a Wireless Distribution System (WDS) frame
+ * which has 4 MAC addresses
+ */
+ if (dot11_hdr->frame_control & 0x0080)
+ qos_len = 2;
+ if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
+ dot11_hdr_len += 6;
+
+ memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+ memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+ /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+ * for two MAC addresses
+ */
+ skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+ pdata = (unsigned char*)skb->data;
+ memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
+ memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
+
+ MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+
+ /* Use the real net device to transmit the packet */
+ ret = dhd_start_xmit(skb, mon_if->real_ndev);
+
+ return ret;
+ }
+fail:
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev)
+{
+ monitor_interface* mon_if;
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ }
+
+ MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+}
+
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr)
+{
+ int ret = 0;
+ monitor_interface* mon_if;
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ }
+
+ MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+ return ret;
+}
+
+/**
+ * Global function definitions (declared in dhd_linux_mon.h)
+ */
+
+int dhd_add_monitor(char *name, struct net_device **new_ndev)
+{
+ int i;
+ int idx = -1;
+ int ret = 0;
+ struct net_device* ndev = NULL;
+ dhd_linux_monitor_t **dhd_mon;
+
+ mutex_lock(&g_monitor.lock);
+
+ MON_TRACE("enter, if name: %s\n", name);
+ if (!name || !new_ndev) {
+ MON_PRINT("invalid parameters\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Find a vacancy
+ */
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (g_monitor.mon_if[i].mon_ndev == NULL) {
+ idx = i;
+ break;
+ }
+ if (idx == -1) {
+ MON_PRINT("exceeds maximum interfaces\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*));
+ if (!ndev) {
+ MON_PRINT("failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ strncpy(ndev->name, name, IFNAMSIZ);
+ ndev->name[IFNAMSIZ - 1] = 0;
+ ndev->netdev_ops = &dhd_mon_if_ops;
+
+ ret = register_netdevice(ndev);
+ if (ret) {
+ MON_PRINT(" register_netdevice failed (%d)\n", ret);
+ goto out;
+ }
+
+ *new_ndev = ndev;
+ g_monitor.mon_if[idx].radiotap_enabled = TRUE;
+ g_monitor.mon_if[idx].mon_ndev = ndev;
+ g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name);
+ dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev);
+ *dhd_mon = &g_monitor;
+ g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED;
+ MON_PRINT("net device returned: 0x%p\n", ndev);
+ MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name);
+
+out:
+ if (ret && ndev)
+ free_netdev(ndev);
+
+ mutex_unlock(&g_monitor.lock);
+ return ret;
+
+}
+
+int dhd_del_monitor(struct net_device *ndev)
+{
+ int i;
+ bool rollback_lock = false;
+ if (!ndev)
+ return -EINVAL;
+ mutex_lock(&g_monitor.lock);
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (g_monitor.mon_if[i].mon_ndev == ndev ||
+ g_monitor.mon_if[i].real_ndev == ndev) {
+ g_monitor.mon_if[i].real_ndev = NULL;
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ unregister_netdev(g_monitor.mon_if[i].mon_ndev);
+ free_netdev(g_monitor.mon_if[i].mon_ndev);
+ g_monitor.mon_if[i].mon_ndev = NULL;
+ g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
+ break;
+ }
+ }
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+
+ if (g_monitor.monitor_state !=
+ MONITOR_STATE_INTERFACE_DELETED)
+ MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n",
+ ndev);
+ mutex_unlock(&g_monitor.lock);
+
+ return 0;
+}
+
+int dhd_monitor_init(void *dhd_pub)
+{
+ if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) {
+ g_monitor.dhd_pub = dhd_pub;
+ mutex_init(&g_monitor.lock);
+ g_monitor.monitor_state = MONITOR_STATE_INIT;
+ }
+ return 0;
+}
+
+int dhd_monitor_uninit(void)
+{
+ int i;
+ struct net_device *ndev;
+ bool rollback_lock = false;
+ mutex_lock(&g_monitor.lock);
+ if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = g_monitor.mon_if[i].mon_ndev;
+ if (ndev) {
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ g_monitor.mon_if[i].real_ndev = NULL;
+ g_monitor.mon_if[i].mon_ndev = NULL;
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+ }
+ }
+ g_monitor.monitor_state = MONITOR_STATE_DEINIT;
+ }
+ mutex_unlock(&g_monitor.lock);
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
new file mode 100644
index 000000000000..aadd122f5b07
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
@@ -0,0 +1,39 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_sched.c,v 1.3 2009-04-10 04:14:49 Exp $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <typedefs.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+ int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+ return rc;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h
new file mode 100644
index 000000000000..e0a54ad02692
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_proto.h
@@ -0,0 +1,105 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_proto.h,v 1.8.10.6 2010-12-22 23:47:24 Exp $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifndef IOCTL_RESP_TIMEOUT
+#define IOCTL_RESP_TIMEOUT 20000 /* In milli second */
+#endif
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_prot_init(dhd_pub_t *dhdp);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_enque_sendq(void* state, int prec, void* p);
+extern int dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx);
+extern void dhd_wlfc_cleanup(dhd_pub_t *dhd);
+#endif /* PROP_TXSTATUS */
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#elif defined(RNDIS)
+#define DHD_PROTOCOL "rndis"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c
new file mode 100644
index 000000000000..0b90ed48a0f0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c
@@ -0,0 +1,6289 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_sdio.c 288105 2011-10-06 01:58:02Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#if defined(DHD_DEBUG)
+#include <hndrte_armtrap.h>
+#include <hndrte_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <sbchipc.h>
+#include <sbhnddma.h>
+
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <bcmsdbus.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME "mem_dump"
+#endif
+
+#define QLEN 256 /* bulk rx and tx queue lengths */
+#define FCHI (QLEN - 10)
+#define FCLOW (FCHI / 2)
+#define PRIOMASK 7
+
+#define TXRETRIES 2 /* # of retries for tx frames */
+
+#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */
+
+#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */
+
+#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE 4096 /* max nvram buf size */
+#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold biggest possible glom */
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD 32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#ifdef SDTEST
+#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ 32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ 2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY 3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi. Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+#if defined(OOB_INTR_ONLY)
+extern void bcmsdh_set_irq(int flag);
+#endif /* defined(OOB_INTR_ONLY) */
+#ifdef PROP_TXSTATUS
+extern void dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+#endif
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX 192
+#define CONSOLE_BUFFER_MAX 2024
+typedef struct dhd_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ hndrte_log_t log; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ uint8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+ dhd_pub_t *dhd;
+
+ bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */
+ si_t *sih; /* Handle for SI calls */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+ uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
+
+ sdpcmd_regs_t *regs; /* Registers for SDIO core */
+ uint sdpcmrev; /* SDIO core revision */
+ uint armrev; /* CPU core revision */
+ uint ramrev; /* SOCRAM core revision */
+ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
+
+ uint32 bus; /* gSPI or SDIO bus */
+ uint32 hostintmask; /* Copy of Host Interrupt Mask */
+ uint32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
+ char *fw_path; /* module_param: path to firmware image */
+ char *nv_path; /* module_param: path to nvram vars file */
+ const char *nvram_params; /* user specified nvram params. */
+
+ uint blocksize; /* Block size of SDIO transfers */
+ uint roundup; /* Max roundup limit */
+
+ struct pktq txq; /* Queue length used for flow-control */
+ uint8 flowcontrol; /* per prio flow control bitmask */
+ uint8 tx_seq; /* Transmit sequence number (next) */
+ uint8 tx_max; /* Maximum transmit sequence allowed */
+
+ uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+ uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
+ uint16 nextlen; /* Next Read Len from last header */
+ uint8 rx_seq; /* Receive sequence number (expected) */
+ bool rxskip; /* Skip receive (awaiting NAK ACK) */
+
+ void *glomd; /* Packet containing glomming descriptor */
+ void *glom; /* Packet chain for glommed superframe */
+ uint glomerr; /* Glom packet read errors */
+
+ uint8 *rxbuf; /* Buffer for receiving control packets */
+ uint rxblen; /* Allocated length of rxbuf */
+ uint8 *rxctl; /* Aligned pointer into rxbuf */
+ uint8 *databuf; /* Buffer for receiving big glom packet */
+ uint8 *dataptr; /* Aligned pointer into databuf */
+ uint rxlen; /* Length of valid data in buffer */
+
+ uint8 sdpcm_ver; /* Bus protocol reported by dongle */
+
+ bool intr; /* Use interrupts */
+ bool poll; /* Use polling */
+ bool ipend; /* Device interrupt is pending */
+ bool intdis; /* Interrupts disabled by isr */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint spurious; /* Count of spurious interrupts */
+ uint pollrate; /* Ticks between device polls */
+ uint polltick; /* Tick counter */
+ uint pollcnt; /* Count of active polls */
+
+#ifdef DHD_DEBUG
+ dhd_console_t console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+ uint regfails; /* Count of R_REG/W_REG failures */
+
+ uint clkstate; /* State of sd and backplane clock(s) */
+ bool activity; /* Activity flag for clock down */
+ int32 idletime; /* Control for activity timeout */
+ int32 idlecount; /* Activity timeout counter */
+ int32 idleclock; /* How to set bus driver when idle */
+ int32 sd_divisor; /* Speed control to bus driver */
+ int32 sd_mode; /* Mode control to bus driver */
+ int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
+ bool use_rxchain; /* If dhd should use PKT chains */
+ bool sleeping; /* Is SDIO bus sleeping? */
+ bool rxflow_mode; /* Rx flow control mode */
+ bool rxflow; /* Is rx flow control on */
+ uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */
+ bool alp_only; /* Don't use HT clock (ALP only) */
+ /* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+ bool usebufpool;
+
+#ifdef SDTEST
+ /* external loopback */
+ bool ext_loop;
+ uint8 loopid;
+
+ /* pktgen configuration */
+ uint pktgen_freq; /* Ticks between bursts */
+ uint pktgen_count; /* Packets to send each burst */
+ uint pktgen_print; /* Bursts between count displays */
+ uint pktgen_total; /* Stop after this many */
+ uint pktgen_minlen; /* Minimum packet data len */
+ uint pktgen_maxlen; /* Maximum packet data len */
+ uint pktgen_mode; /* Configured mode: tx, rx, or echo */
+ uint pktgen_stop; /* Number of tx failures causing stop */
+
+ /* active pktgen fields */
+ uint pktgen_tick; /* Tick counter for bursts */
+ uint pktgen_ptick; /* Burst counter for printing */
+ uint pktgen_sent; /* Number of test packets generated */
+ uint pktgen_rcvd; /* Number of test packets received */
+ uint pktgen_fail; /* Number of failed send attempts */
+ uint16 pktgen_len; /* Length of next packet to send */
+#define PKTGEN_RCV_IDLE (0)
+#define PKTGEN_RCV_ONGOING (1)
+ uint16 pktgen_rcv_state; /* receive state */
+ uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+ /* Some additional counters */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+
+ uint8 *ctrl_frame_buf;
+ uint32 ctrl_frame_len;
+ bool ctrl_frame_stat;
+ uint32 rxint_mode; /* rx interrupt mode */
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE 0
+#define CLK_SDONLY 1
+#define CLK_PENDING 2 /* Not used yet */
+#define CLK_AVAIL 3
+
+#define DHD_NOPMU(dhd) (FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+int dhd_dongle_memsize;
+
+static bool dhd_doflow;
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static const uint watermark = 8;
+static const uint firstread = DHD_FIRSTREAD;
+
+#define HDATLEN (firstread - (SDPCM_HDRLEN))
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+#define ALIGNMENT 4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align) \
+ do { \
+ uint datalign; \
+ datalign = (uintptr)PKTDATA((osh), (p)); \
+ datalign = ROUNDUP(datalign, (align)) - datalign; \
+ ASSERT(datalign < (align)); \
+ ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \
+ if (datalign) \
+ PKTPULL((osh), (p), datalign); \
+ PKTSETLEN((osh), (p), (len)); \
+ } while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) > 2) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* To check if there's window offered for ctrl frame */
+#define TXCTLOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ regvar = R_REG(bus->dhd->osh, regaddr); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) { \
+ DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ regvar = 0; \
+ } \
+ } \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ W_REG(bus->dhd->osh, regaddr, regval); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) \
+ DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ } \
+} while (0)
+
+#define BUS_WAKE(bus) \
+ do { \
+ if ((bus)->sleeping) \
+ dhdsdio_bussleep((bus), FALSE); \
+ } while (0);
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0: Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1: (sdiod core rev >= 4)
+ * Device sets a new bit in the intstatus whenever there is a packet
+ * available in fifo. Host can't clear this specific status bit until all the
+ * packets are read from the FIFO. No need to ack dongle intstatus.
+ * Mode 2: (sdiod core rev >= 4)
+ * Device sets a bit in the intstatus, and host acks this by writing
+ * one to this bit. Dongle won't generate anymore packet interrupts
+ * until host reads all the packets from the dongle and reads a zero to
+ * figure that there are no more packets. No need to disable host ints.
+ * Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */
+
+
+#define FRAME_AVAIL_MASK(bus) \
+ ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count);
+#endif
+
+#ifdef DHD_DEBUG
+static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+ void * regsva, uint16 devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+ bool reset_flag);
+
+static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+
+static bool dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
+
+static void
+dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+ int32 min_size = DONGLE_MIN_MEMSIZE;
+ /* Restrict the memsize to user specified limit */
+ DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+ dhd_dongle_memsize, min_size));
+ if ((dhd_dongle_memsize > min_size) &&
+ (dhd_dongle_memsize < (int32)bus->orig_ramsize))
+ bus->ramsize = dhd_dongle_memsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+ int err = 0;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+ return err;
+}
+
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+ int err;
+ uint8 clkctl, clkreq, devctl;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(OOB_INTR_ONLY)
+ pendok = FALSE;
+#endif
+ clkctl = 0;
+ sdh = bus->sdh;
+
+
+ if (on) {
+ /* Request HT Avail */
+ clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+
+
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ if (pendok &&
+ ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev == 9))) {
+ uint32 dummy, retries;
+ R_SDREG(dummy, &bus->regs->clockctlstatus, retries);
+ }
+
+ /* Check current status */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ /* Go to pending and await interrupt if appropriate */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+ /* Allow only clock-available interrupt */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ DHD_INFO(("CLKCTL: set PENDING\n"));
+ bus->clkstate = CLK_PENDING;
+ return BCME_OK;
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ /* Otherwise, wait here (polling) for HT Avail */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+ !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+ }
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+ __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+ return BCME_ERROR;
+ }
+
+
+ /* Mark clock available */
+ bus->clkstate = CLK_AVAIL;
+ DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+ if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+ if (!SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+ }
+#endif /* !defined(BCMLXSDMMC) */
+ } else {
+ if (SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+ }
+ }
+#endif /* defined (DHD_DEBUG) */
+
+ bus->activity = TRUE;
+ } else {
+ clkreq = 0;
+
+ if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ bus->clkstate = CLK_SDONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ DHD_INFO(("CLKCTL: turned OFF\n"));
+ if (err) {
+ DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+ int err;
+ int32 iovalue;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (on) {
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ /* Turn on clock and restore mode */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ iovalue = bus->sd_mode;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_mode: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Restore clock speed */
+ iovalue = bus->sd_divisor;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_SDONLY;
+ } else {
+ /* Stop or slow the SD clock itself */
+ if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+ DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+ __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+ return BCME_ERROR;
+ }
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ if (sd1idle) {
+ /* Change to SD1 mode and turn off clock */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+
+ iovalue = 0;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Set divisor to idle value */
+ iovalue = bus->idleclock;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_NONE;
+ }
+
+ return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+ int ret = BCME_OK;
+#ifdef DHD_DEBUG
+ uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Early exit if we're already there */
+ if (bus->clkstate == target) {
+ if (target == CLK_AVAIL) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+ }
+ return ret;
+ }
+
+ switch (target) {
+ case CLK_AVAIL:
+ /* Make sure SD clock is available */
+ if (bus->clkstate == CLK_NONE)
+ dhdsdio_sdclk(bus, TRUE);
+ /* Now request HT Avail on the backplane */
+ ret = dhdsdio_htclk(bus, TRUE, pendok);
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+ }
+ break;
+
+ case CLK_SDONLY:
+ /* Remove HT request, or bring up SD clock */
+ if (bus->clkstate == CLK_NONE)
+ ret = dhdsdio_sdclk(bus, TRUE);
+ else if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ else
+ DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+ bus->clkstate, target));
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ }
+ break;
+
+ case CLK_NONE:
+ /* Make sure to remove HT request */
+ if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ /* Now remove the SD clock */
+ ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+ if (dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+ dhd_os_wd_timer(bus->dhd, 0);
+ break;
+ }
+#ifdef DHD_DEBUG
+ DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+ return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+ (sleep ? "SLEEP" : "WAKE"),
+ (bus->sleeping ? "SLEEP" : "WAKE")));
+
+ /* Done if we're already in the requested state */
+ if (sleep == bus->sleeping)
+ return BCME_OK;
+
+ /* Going to sleep: set the alarm and turn off the lights... */
+ if (sleep) {
+ /* Don't sleep if something is pending */
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+ return BCME_BUSY;
+
+
+ /* Disable SDIO interrupts (no longer interested) */
+ bcmsdh_intr_disable(bus->sdh);
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+ /* Isolate the bus */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
+
+ /* Change state */
+ bus->sleeping = TRUE;
+
+ } else {
+ /* Waking up: bus power up is ok, set local state */
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ 0, NULL);
+
+ /* Force pad isolation off if possible (in case power never toggled) */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+ /* Make sure we have SD bus access */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Change state */
+ bus->sleeping = FALSE;
+
+ /* Enable interrupts again */
+ if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+ bus->intdis = FALSE;
+ bcmsdh_intr_enable(bus->sdh);
+ }
+ }
+
+ return BCME_OK;
+}
+
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+ bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (enable == TRUE) {
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ } else {
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+ }
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int
+dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
+{
+ int ret;
+ osl_t *osh;
+ uint8 *frame;
+ uint16 len, pad1 = 0;
+ uint32 swheader;
+ uint retries = 0;
+ bcmsdh_info_t *sdh;
+ void *new;
+ int i;
+#ifdef WLMEDIA_HTSF
+ char *p;
+ htsfts_t *htsf_ts;
+#endif
+
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ sdh = bus->sdh;
+ osh = bus->dhd->osh;
+
+ if (bus->dhd->dongle_reset) {
+ ret = BCME_NOTREADY;
+ goto done;
+ }
+
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+ if (PKTLEN(osh, pkt) >= 100) {
+ p = PKTDATA(osh, pkt);
+ htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12);
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->c20 = get_cycles();
+ htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+ }
+ }
+#endif /* WLMEDIA_HTSF */
+
+ /* Add alignment padding, allocate new packet if needed */
+ if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) {
+ if (PKTHEADROOM(osh, pkt) < pad1) {
+ DHD_INFO(("%s: insufficient headroom %d for %d pad1\n",
+ __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1));
+ bus->dhd->tx_realloc++;
+ new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
+ if (!new) {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
+ bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+ /* free the pkt if canned one is not used */
+ free_pkt = TRUE;
+ pkt = new;
+ frame = (uint8*)PKTDATA(osh, pkt);
+ ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
+ pad1 = 0;
+ } else {
+ PKTPUSH(osh, pkt, pad1);
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ ASSERT((pad1 + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt));
+ bzero(frame, pad1 + SDPCM_HDRLEN);
+ }
+ }
+ ASSERT(pad1 < DHD_SDALIGN);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ len = (uint16)PKTLEN(osh, pkt);
+ *(uint16*)frame = htol16(len);
+ *(((uint16*)frame) + 1) = htol16(~len);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+ (((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef DHD_DEBUG
+ if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) {
+ tx_packets[PKTPRIO(pkt)]++;
+ }
+ if (DHD_BYTES_ON() &&
+ (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+ (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ /* Raise len to next SDIO block to eliminate tail command */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad2 = bus->blocksize - (len % bus->blocksize);
+ if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize))
+#ifdef NOTUSED
+ if (pad2 <= PKTTAILROOM(osh, pkt))
+#endif /* NOTUSED */
+ len += pad2;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Some controllers have trouble with odd bytes -- round to even */
+ if (forcealign && (len & (ALIGNMENT - 1))) {
+#ifdef NOTUSED
+ if (PKTTAILROOM(osh, pkt))
+#endif
+ len = ROUNDUP(len, ALIGNMENT);
+#ifdef NOTUSED
+ else
+ DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
+#endif
+ }
+
+ do {
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, pkt, NULL, NULL);
+ bus->f2txdata++;
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ } while ((ret < 0) && retrydata && retries++ < TXRETRIES);
+
+done:
+ /* restore pkt buffer pointer before calling tx complete routine */
+ PKTPULL(osh, pkt, SDPCM_HDRLEN + pad1);
+#ifdef PROP_TXSTATUS
+ if (bus->dhd->wlfc_state) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0);
+ dhd_os_sdlock(bus->dhd);
+ } else {
+#endif /* PROP_TXSTATUS */
+ dhd_txcomplete(bus->dhd, pkt, ret != 0);
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+
+#ifdef PROP_TXSTATUS
+ }
+#endif
+ return ret;
+}
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+ int ret = BCME_ERROR;
+ osl_t *osh;
+ uint datalen, prec;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ osh = bus->dhd->osh;
+ datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+ /* Push the test header if doing loopback */
+ if (bus->ext_loop) {
+ uint8* data;
+ PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+ data = PKTDATA(osh, pkt);
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->loopid++;
+ *data++ = (datalen >> 0);
+ *data++ = (datalen >> 8);
+ datalen += SDPCM_TEST_HDRLEN;
+ }
+#endif /* SDTEST */
+
+ /* Add space for the header */
+ PKTPUSH(osh, pkt, SDPCM_HDRLEN);
+ ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+ prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+#ifndef DHDTHREAD
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ /* Check for existing queue, current flow-control, pending event, or pending clock */
+ if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+ (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+ (bus->clkstate != CLK_AVAIL)) {
+ DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
+ pktq_len(&bus->txq)));
+ bus->fcqueued++;
+
+ /* Priority based enq */
+ dhd_os_sdlock_txq(bus->dhd);
+ if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) {
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+#ifndef DHDTHREAD
+ /* Need to also release txqlock before releasing sdlock.
+ * This thread still has txqlock and releases sdlock.
+ * Deadlock happens when dpc() grabs sdlock first then
+ * attempts to grab txqlock.
+ */
+ dhd_os_sdunlock_txq(bus->dhd);
+ dhd_os_sdunlock(bus->dhd);
+#endif
+#ifdef PROP_TXSTATUS
+ if (bus->dhd->wlfc_state)
+ dhd_wlfc_txcomplete(bus->dhd, pkt, FALSE);
+ else
+#endif
+ dhd_txcomplete(bus->dhd, pkt, FALSE);
+#ifndef DHDTHREAD
+ dhd_os_sdlock(bus->dhd);
+ dhd_os_sdlock_txq(bus->dhd);
+#endif
+#ifdef PROP_TXSTATUS
+ /* let the caller decide whether to free the packet */
+ if (!bus->dhd->wlfc_state)
+#endif
+ PKTFREE(osh, pkt, TRUE);
+ ret = BCME_NORESOURCE;
+ }
+ else
+ ret = BCME_OK;
+ dhd_os_sdunlock_txq(bus->dhd);
+
+ if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow)
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+
+#ifdef DHD_DEBUG
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+ /* Schedule DPC if needed to send queued packet(s) */
+ if (dhd_deferred_tx && !bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ } else {
+#ifdef DHDTHREAD
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ /* Otherwise, send it now */
+ BUS_WAKE(bus);
+ /* Make sure back plane ht clk is on, no pending allowed */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+#ifndef SDTEST
+ ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+ ret = dhdsdio_txpkt(bus, pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHDTHREAD */
+ }
+
+#ifndef DHDTHREAD
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+ void *pkt;
+ uint32 intstatus = 0;
+ uint retries = 0;
+ int ret = 0, prec_out;
+ uint cnt = 0;
+ uint datalen;
+ uint8 tx_prec_map;
+
+ dhd_pub_t *dhd = bus->dhd;
+ sdpcmd_regs_t *regs = bus->regs;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ tx_prec_map = ~bus->flowcontrol;
+
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
+ dhd_os_sdlock_txq(bus->dhd);
+ if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
+ dhd_os_sdunlock_txq(bus->dhd);
+ break;
+ }
+ dhd_os_sdunlock_txq(bus->dhd);
+ datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN;
+
+#ifndef SDTEST
+ ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+ ret = dhdsdio_txpkt(bus, pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ /* In poll mode, need to check for other events */
+ if (!bus->intr && cnt)
+ {
+ /* Check device status, signal pending interrupt */
+ R_SDREG(intstatus, &regs->intstatus, retries);
+ bus->f2txdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ break;
+ if (intstatus & bus->hostintmask)
+ bus->ipend = TRUE;
+ }
+ }
+
+ /* Deflow-control stack if needed */
+ if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
+ dhd->txoff && (pktq_len(&bus->txq) < FCLOW))
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+
+ return cnt;
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ uint8 *frame;
+ uint16 len;
+ uint32 swheader;
+ uint retries = 0;
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint8 doff = 0;
+ int ret = -1;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Back the pointer to make a room for bus header */
+ frame = msg - SDPCM_HDRLEN;
+ len = (msglen += SDPCM_HDRLEN);
+
+ /* Add alignment padding (optional for ctl frames) */
+ if (dhd_alignctl) {
+ if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+ frame -= doff;
+ len += doff;
+ msglen += doff;
+ bzero(frame, doff + SDPCM_HDRLEN);
+ }
+ ASSERT(doff < DHD_SDALIGN);
+ }
+ doff += SDPCM_HDRLEN;
+
+ /* Round send length to next SDIO block */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (len & (ALIGNMENT - 1)))
+ len = ROUNDUP(len, ALIGNMENT);
+
+ ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+ /* Need to lock here to protect txseq and SDIO tx calls */
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ *(uint16*)frame = htol16((uint16)msglen);
+ *(((uint16*)frame) + 1) = htol16(~msglen);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+ if (!TXCTLOK(bus)) {
+ DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq));
+ bus->ctrl_frame_stat = TRUE;
+ /* Send from dpc */
+ bus->ctrl_frame_buf = frame;
+ bus->ctrl_frame_len = len;
+
+ dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+
+ if (bus->ctrl_frame_stat == FALSE) {
+ DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+ ret = 0;
+ } else {
+ bus->dhd->txcnt_timeout++;
+ if (!bus->dhd->hang_was_sent)
+ DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+ __FUNCTION__, bus->dhd->txcnt_timeout));
+ ret = -1;
+ bus->ctrl_frame_stat = FALSE;
+ goto done;
+ }
+ }
+
+ bus->dhd->txcnt_timeout = 0;
+
+ if (ret == -1) {
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ do {
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, NULL, NULL, NULL);
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ } while ((ret < 0) && retries++ < TXRETRIES);
+ }
+
+done:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ if (ret)
+ bus->dhd->tx_ctlerrs++;
+ else
+ bus->dhd->tx_ctlpkts++;
+
+ if (bus->dhd->txcnt_timeout >= MAX_CNTL_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return ret ? -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ int timeleft;
+ uint rxlen = 0;
+ bool pending;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Wait until control frame is available */
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+ dhd_os_sdlock(bus->dhd);
+ rxlen = bus->rxlen;
+ bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+ bus->rxlen = 0;
+ dhd_os_sdunlock(bus->dhd);
+
+ if (rxlen) {
+ DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+ __FUNCTION__, rxlen, msglen));
+ } else if (timeleft == 0) {
+ DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+ } else if (pending == TRUE) {
+ DHD_CTL(("%s: canceled\n", __FUNCTION__));
+ return -ERESTARTSYS;
+ } else {
+ DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+ }
+ if (timeleft == 0) {
+ bus->dhd->rxcnt_timeout++;
+ DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
+ }
+ else
+ bus->dhd->rxcnt_timeout = 0;
+
+ if (rxlen)
+ bus->dhd->rx_ctlpkts++;
+ else
+ bus->dhd->rx_ctlerrs++;
+
+ if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return rxlen ? (int)rxlen : -EIO;
+}
+
+/* IOVar table */
+enum {
+ IOV_INTR = 1,
+ IOV_POLLRATE,
+ IOV_SDREG,
+ IOV_SBREG,
+ IOV_SDCIS,
+ IOV_MEMBYTES,
+ IOV_MEMSIZE,
+#ifdef DHD_DEBUG
+ IOV_CHECKDIED,
+ IOV_SERIALCONS,
+#endif
+ IOV_DOWNLOAD,
+ IOV_SOCRAM_STATE,
+ IOV_FORCEEVEN,
+ IOV_SDIOD_DRIVE,
+ IOV_READAHEAD,
+ IOV_SDRXCHAIN,
+ IOV_ALIGNCTL,
+ IOV_SDALIGN,
+ IOV_DEVRESET,
+ IOV_CPU,
+#ifdef SDTEST
+ IOV_PKTGEN,
+ IOV_EXTLOOP,
+#endif /* SDTEST */
+ IOV_SPROM,
+ IOV_TXBOUND,
+ IOV_RXBOUND,
+ IOV_TXMINMAX,
+ IOV_IDLETIME,
+ IOV_IDLECLOCK,
+ IOV_SD1IDLE,
+ IOV_SLEEP,
+ IOV_DONGLEISOLATION,
+ IOV_VARS,
+#ifdef SOFTAP
+ IOV_FWPATH
+#endif
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+ {"intr", IOV_INTR, 0, IOVT_BOOL, 0 },
+ {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 },
+ {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 },
+ {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 },
+ {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 },
+ {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 },
+ {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
+ {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 },
+ {"download", IOV_DOWNLOAD, 0, IOVT_BOOL, 0 },
+ {"socram_state", IOV_SOCRAM_STATE, 0, IOVT_BOOL, 0 },
+ {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
+ {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 },
+ {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 },
+ {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 },
+ {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 },
+ {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 },
+ {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 },
+ {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
+ {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
+ {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 },
+ {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 },
+ {"serial", IOV_SERIALCONS, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+#endif /* DHD_DEBUG */
+#ifdef SDTEST
+ {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 },
+ {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+ {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
+#ifdef SOFTAP
+ {"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
+#endif
+ {NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+ uint q1, q2;
+
+ if (!div) {
+ bcm_bprintf(strbuf, "%s N/A", desc);
+ } else {
+ q1 = num / div;
+ q2 = (100 * (num - (q1 * div))) / div;
+ bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+ }
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+ bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+ bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+ bcm_bprintf(strbuf, "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n",
+ bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+ bus->rxlen, bus->rx_seq);
+ bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
+ bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+ bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
+ bus->pollrate, bus->pollcnt, bus->regfails);
+
+ bcm_bprintf(strbuf, "\nAdditional counters:\n");
+ bcm_bprintf(strbuf, "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n",
+ bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+ bus->rxc_errors);
+ bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
+ bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+ bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n",
+ bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+ bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
+ bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+ bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n",
+ (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+ bus->f2txdata, bus->f1regdata);
+ {
+ dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+ bus->dhd->rx_packets);
+ dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+ (bus->f2txdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+ bcm_bprintf(strbuf, "\n\n");
+ }
+
+#ifdef SDTEST
+ if (bus->pktgen_count) {
+ bcm_bprintf(strbuf, "pktgen config and count:\n");
+ bcm_bprintf(strbuf, "freq %d count %d print %d total %d min %d len %d\n",
+ bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+ bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+ bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
+ bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+ }
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+ bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+ bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+ bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+ bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+ bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+ bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+ bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+ bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+ bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+ bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+ bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+
+ pktgen.version = DHD_PKTGEN_VERSION;
+ pktgen.freq = bus->pktgen_freq;
+ pktgen.count = bus->pktgen_count;
+ pktgen.print = bus->pktgen_print;
+ pktgen.total = bus->pktgen_total;
+ pktgen.minlen = bus->pktgen_minlen;
+ pktgen.maxlen = bus->pktgen_maxlen;
+ pktgen.numsent = bus->pktgen_sent;
+ pktgen.numrcvd = bus->pktgen_rcvd;
+ pktgen.numfail = bus->pktgen_fail;
+ pktgen.mode = bus->pktgen_mode;
+ pktgen.stop = bus->pktgen_stop;
+
+ bcopy(&pktgen, arg, sizeof(pktgen));
+
+ return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+ uint oldcnt, oldmode;
+
+ bcopy(arg, &pktgen, sizeof(pktgen));
+ if (pktgen.version != DHD_PKTGEN_VERSION)
+ return BCME_BADARG;
+
+ oldcnt = bus->pktgen_count;
+ oldmode = bus->pktgen_mode;
+
+ bus->pktgen_freq = pktgen.freq;
+ bus->pktgen_count = pktgen.count;
+ bus->pktgen_print = pktgen.print;
+ bus->pktgen_total = pktgen.total;
+ bus->pktgen_minlen = pktgen.minlen;
+ bus->pktgen_maxlen = pktgen.maxlen;
+ bus->pktgen_mode = pktgen.mode;
+ bus->pktgen_stop = pktgen.stop;
+
+ bus->pktgen_tick = bus->pktgen_ptick = 0;
+ bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+ bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+ /* Clear counts for a new pktgen (mode change, or was stopped) */
+ if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode))
+ bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0;
+
+ return 0;
+}
+#endif /* SDTEST */
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+ int bcmerror = 0;
+ uint32 sdaddr;
+ uint dsize;
+
+ /* Determine initial transfer parameters */
+ sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+ if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+ dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+ else
+ dsize = size;
+
+ /* Set the backplane window to include the start address */
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ goto xfer_done;
+ }
+
+ /* Do the transfer(s) */
+ while (size) {
+ DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+ __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+ (address & SBSDIO_SBWINDOW_MASK)));
+ if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+ DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+ break;
+ }
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize)) {
+ data += dsize;
+ address += dsize;
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ break;
+ }
+ sdaddr = 0;
+ dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+ }
+
+ }
+
+xfer_done:
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+ DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+ bcmsdh_cur_sbwad(bus->sdh)));
+ }
+
+ return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+ uint32 addr;
+ int rv;
+
+ /* Read last word in memory to determine address of sdpcm_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, bus->ramsize - 4, (uint8 *)&addr, 4)) < 0)
+ return rv;
+
+ addr = ltoh32(addr);
+
+ DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+ DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", __FUNCTION__, addr));
+ return BCME_ERROR;
+ }
+
+ /* Read hndrte_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = ltoh32(sh->flags);
+ sh->trap_addr = ltoh32(sh->trap_addr);
+ sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+ sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+ sh->assert_line = ltoh32(sh->assert_line);
+ sh->console_addr = ltoh32(sh->console_addr);
+ sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
+ return BCME_OK;
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+ DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+ "is different than sdpcm_shared version %d in dongle\n",
+ __FUNCTION__, SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+ dhd_console_t *c = &bus->console;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, idx, addr;
+ int rv;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return 0;
+
+ /* Read console log struct */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = ltoh32(c->log.buf_size);
+ if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+ return BCME_NOMEM;
+ }
+
+ idx = ltoh32(c->log.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return BCME_ERROR;
+
+ /* Skip reading the console buffer if the index pointer has not moved */
+ if (idx == c->last)
+ return BCME_OK;
+
+ /* Read the console buffer */
+ addr = ltoh32(c->log.buf);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+ return rv;
+
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line. Instead, back up
+ * the buffer pointer and output this line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+break2:
+
+ return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+ int bcmerror = 0;
+ uint msize = 512;
+ char *mbuffer = NULL;
+ char *console_buffer = NULL;
+ uint maxstrlen = 256;
+ char *str = NULL;
+ trap_t tr;
+ sdpcm_shared_t sdpcm_shared;
+ struct bcmstrbuf strbuf;
+ uint32 console_ptr, console_size, console_index;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, i, addr;
+ int rv;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (data == NULL) {
+ /*
+ * Called after a rx ctrl timeout. "data" is NULL.
+ * allocate memory to trace the trap or assert.
+ */
+ size = msize;
+ mbuffer = data = MALLOC(bus->dhd->osh, msize);
+ if (mbuffer == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+ }
+
+ if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+
+ if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+ goto done;
+
+ bcm_binit(&strbuf, data, size);
+
+ bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
+ sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+ if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+ }
+
+ if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "No trap%s in dongle",
+ (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+ ?"/assrt" :"");
+ } else {
+ if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+ /* Download assert */
+ bcm_bprintf(&strbuf, "Dongle assert");
+ if (sdpcm_shared.assert_exp_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.assert_exp_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " expr \"%s\"", str);
+ }
+
+ if (sdpcm_shared.assert_file_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.assert_file_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " file \"%s\"", str);
+ }
+
+ bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+ }
+
+ if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.trap_addr,
+ (uint8*)&tr, sizeof(trap_t))) < 0)
+ goto done;
+
+ bcm_bprintf(&strbuf,
+ "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+ "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+ "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+ "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+ ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+ ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+ ltoh32(sdpcm_shared.trap_addr),
+ ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+ ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+ goto printbuf;
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_size, sizeof(console_size))) < 0)
+ goto printbuf;
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_index, sizeof(console_index))) < 0)
+ goto printbuf;
+
+ console_ptr = ltoh32(console_ptr);
+ console_size = ltoh32(console_size);
+ console_index = ltoh32(console_index);
+
+ if (console_size > CONSOLE_BUFFER_MAX ||
+ !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+ goto printbuf;
+
+ if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+ (uint8 *)console_buffer, console_size)) < 0)
+ goto printbuf;
+
+ for (i = 0, n = 0; i < console_size; i += n + 1) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ ch = console_buffer[(console_index + i + n) % console_size];
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ /* Don't use DHD_ERROR macro since we print
+ * a lot of information quickly. The macro
+ * will truncate a lot of the printfs
+ */
+
+ if (dhd_msg_level & DHD_ERROR_VAL) {
+ printf("CONSOLE: %s\n", line);
+ DHD_BLOG(line, strlen(line) + 1);
+ }
+ }
+ }
+ }
+ }
+
+printbuf:
+ if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+ DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+ }
+
+
+done:
+ if (mbuffer)
+ MFREE(bus->dhd->osh, mbuffer, msize);
+ if (str)
+ MFREE(bus->dhd->osh, str, maxstrlen);
+ if (console_buffer)
+ MFREE(bus->dhd->osh, console_buffer, console_size);
+
+ return bcmerror;
+}
+#endif /* #ifdef DHD_DEBUG */
+
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+ int bcmerror = BCME_OK;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Basic sanity checks */
+ if (bus->dhd->up) {
+ bcmerror = BCME_NOTDOWN;
+ goto err;
+ }
+ if (!len) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ if (bus->vars)
+ MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+ bus->vars = MALLOC(bus->dhd->osh, len);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the terminating double-null */
+ bcopy(arg, bus->vars, bus->varsz);
+err:
+ return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB (1 << 24)
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+ int int_val;
+ uint32 addr, data;
+
+
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+ *bcmerror = 0;
+
+ bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ if (!set)
+ return (int_val & CC_PLL_CHIPCTRL_SERIAL_ENAB);
+ if (enable)
+ int_val |= CC_PLL_CHIPCTRL_SERIAL_ENAB;
+ else
+ int_val &= ~CC_PLL_CHIPCTRL_SERIAL_ENAB;
+ bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ if (bus->sih->chip == BCM4330_CHIP_ID) {
+ uint32 chipcontrol;
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+ chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+ chipcontrol &= ~0x8;
+ if (enable) {
+ chipcontrol |= 0x8;
+ chipcontrol &= ~0x3;
+ }
+ bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+ }
+
+ return (int_val & CC_PLL_CHIPCTRL_SERIAL_ENAB);
+}
+#endif
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+ bool bool_val = 0;
+
+ DHD_ERROR(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+ __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+ /* Some ioctls use the bus */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+ if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+ actionid == IOV_GVAL(IOV_DEVRESET))) {
+ bcmerror = BCME_NOTREADY;
+ goto exit;
+ }
+
+ /* Handle sleep stuff before any clock mucking */
+ if (vi->varid == IOV_SLEEP) {
+ if (IOV_ISSET(actionid)) {
+ bcmerror = dhdsdio_bussleep(bus, bool_val);
+ } else {
+ int_val = (int32)bus->sleeping;
+ bcopy(&int_val, arg, val_size);
+ }
+ goto exit;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ if (!bus->dhd->dongle_reset) {
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_INTR):
+ int_val = (int32)bus->intr;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_INTR):
+ bus->intr = bool_val;
+ bus->intdis = FALSE;
+ if (bus->dhd->up) {
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ }
+ break;
+
+ case IOV_GVAL(IOV_POLLRATE):
+ int_val = (int32)bus->pollrate;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POLLRATE):
+ bus->pollrate = (uint)int_val;
+ bus->poll = (bus->pollrate != 0);
+ break;
+
+ case IOV_GVAL(IOV_IDLETIME):
+ int_val = bus->idletime;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLETIME):
+ if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->idletime = int_val;
+ }
+ break;
+
+ case IOV_GVAL(IOV_IDLECLOCK):
+ int_val = (int32)bus->idleclock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLECLOCK):
+ bus->idleclock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SD1IDLE):
+ int_val = (int32)sd1idle;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SD1IDLE):
+ sd1idle = bool_val;
+ break;
+
+
+ case IOV_SVAL(IOV_MEMBYTES):
+ case IOV_GVAL(IOV_MEMBYTES):
+ {
+ uint32 address;
+ uint size, dsize;
+ uint8 *data;
+
+ bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ address = (uint32)int_val;
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+ (set ? "write" : "read"), size, address));
+
+ /* If we know about SOCRAM, check for a fit */
+ if ((bus->orig_ramsize) &&
+ ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+ {
+ uint8 enable, protect;
+ si_socdevram(bus->sih, FALSE, &enable, &protect);
+ if (!enable || protect) {
+ DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+ __FUNCTION__, bus->orig_ramsize, size, address));
+ DHD_ERROR(("%s: socram enable %d, protect %d\n",
+ __FUNCTION__, enable, protect));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (enable && (bus->sih->chip == BCM4330_CHIP_ID)) {
+ uint32 devramsize = si_socdevram_size(bus->sih);
+ if ((address < SOCDEVRAM_4330_ARM_ADDR) ||
+ (address + size > (SOCDEVRAM_4330_ARM_ADDR + devramsize))) {
+ DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+ __FUNCTION__, address, size));
+ DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+ __FUNCTION__, SOCDEVRAM_4330_ARM_ADDR, devramsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ /* move it such that address is real now */
+ address -= SOCDEVRAM_4330_ARM_ADDR;
+ address += SOCDEVRAM_4330_BP_ADDR;
+ DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+ __FUNCTION__, (set ? "write" : "read"), size, address));
+ }
+ }
+
+ /* Generate the actual data pointer */
+ data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+ /* Call to do the transfer */
+ bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_MEMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_SDIOD_DRIVE):
+ int_val = (int32)dhd_sdiod_drive_strength;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDIOD_DRIVE):
+ dhd_sdiod_drive_strength = int_val;
+ si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+ break;
+
+ case IOV_SVAL(IOV_DOWNLOAD):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_SOCRAM_STATE):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_VARS):
+ bcmerror = dhdsdio_downloadvars(bus, arg, len);
+ break;
+
+ case IOV_GVAL(IOV_READAHEAD):
+ int_val = (int32)dhd_readahead;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_READAHEAD):
+ if (bool_val && !dhd_readahead)
+ bus->nextlen = 0;
+ dhd_readahead = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDRXCHAIN):
+ int_val = (int32)bus->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDRXCHAIN):
+ if (bool_val && !bus->sd_rxchain)
+ bcmerror = BCME_UNSUPPORTED;
+ else
+ bus->use_rxchain = bool_val;
+ break;
+ case IOV_GVAL(IOV_ALIGNCTL):
+ int_val = (int32)dhd_alignctl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_ALIGNCTL):
+ dhd_alignctl = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDALIGN):
+ int_val = DHD_SDALIGN;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_VARS):
+ if (bus->varsz < (uint)len)
+ bcopy(bus->vars, arg, bus->varsz);
+ else
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uint32 addr, size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = (uintptr)bus->regs + sd_ptr->offset;
+ size = sd_ptr->func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uint32 addr, size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = (uintptr)bus->regs + sd_ptr->offset;
+ size = sd_ptr->func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ /* Same as above, but offset is not backplane (not SDIO core) */
+ case IOV_GVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE + sdreg.offset;
+ size = sdreg.func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE + sdreg.offset;
+ size = sdreg.func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ case IOV_GVAL(IOV_SDCIS):
+ {
+ *(char *)arg = 0;
+
+ bcmstrcat(arg, "\nFunc 0\n");
+ bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 1\n");
+ bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 2\n");
+ bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+
+ case IOV_GVAL(IOV_FORCEEVEN):
+ int_val = (int32)forcealign;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FORCEEVEN):
+ forcealign = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_TXBOUND):
+ int_val = (int32)dhd_txbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXBOUND):
+ dhd_txbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_RXBOUND):
+ int_val = (int32)dhd_rxbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RXBOUND):
+ dhd_rxbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_TXMINMAX):
+ int_val = (int32)dhd_txminmax;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXMINMAX):
+ dhd_txminmax = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_SERIALCONS):
+ int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+ if (bcmerror != 0)
+ break;
+
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SERIALCONS):
+ dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+ break;
+
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+ case IOV_GVAL(IOV_EXTLOOP):
+ int_val = (int32)bus->ext_loop;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_EXTLOOP):
+ bus->ext_loop = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_get(bus, arg);
+ break;
+
+ case IOV_SVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_set(bus, arg);
+ break;
+#endif /* SDTEST */
+
+
+ case IOV_GVAL(IOV_DONGLEISOLATION):
+ int_val = bus->dhd->dongle_isolation;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DONGLEISOLATION):
+ bus->dhd->dongle_isolation = bool_val;
+ break;
+
+ case IOV_SVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+ __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+ bus->dhd->busstate));
+
+ ASSERT(bus->dhd->osh);
+ /* ASSERT(bus->cl_devid); */
+
+ dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+ break;
+#ifdef SOFTAP
+ case IOV_GVAL(IOV_FWPATH):
+ {
+ uint32 fw_path_len;
+
+ fw_path_len = strlen(bus->fw_path);
+ DHD_INFO(("[softap] get fwpath, l=%d\n", len));
+
+ if (fw_path_len > len-1) {
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+
+ if (fw_path_len) {
+ bcopy(bus->fw_path, arg, fw_path_len);
+ ((uchar*)arg)[fw_path_len] = 0;
+ }
+ break;
+ }
+
+ case IOV_SVAL(IOV_FWPATH):
+ DHD_INFO(("[softap] set fwpath, idx=%d\n", int_val));
+
+ switch (int_val) {
+ case 1:
+ bus->fw_path = fw_path; /* ordinary one */
+ break;
+ case 2:
+ bus->fw_path = fw_path2;
+ break;
+ default:
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("[softap] new fw path: %s\n", (bus->fw_path[0] ? bus->fw_path : "NULL")));
+ break;
+
+#endif /* SOFTAP */
+ case IOV_GVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+ /* Get its status */
+ int_val = (bool) bus->dhd->dongle_reset;
+ bcopy(&int_val, arg, val_size);
+
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ if (actionid == IOV_SVAL(IOV_DEVRESET) && bool_val == FALSE)
+ dhd_preinit_ioctls((dhd_pub_t *) bus->dhd);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+ uint32 varsize;
+ uint32 varaddr;
+ uint8 *vbuffer;
+ uint32 varsizew;
+#ifdef DHD_DEBUG
+ uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+ /* Even if there are no vars are to be written, we still need to set the ramsize. */
+ varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ if (bus->vars) {
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+ if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+ DHD_ERROR(("PR85623WAR in place\n"));
+ varsize += 4;
+ varaddr -= 4;
+ }
+ }
+
+ vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+ if (!vbuffer)
+ return BCME_NOMEM;
+
+ bzero(vbuffer, varsize);
+ bcopy(bus->vars, vbuffer, bus->varsz);
+
+ /* Write the vars list */
+ bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+ /* Verify NVRAM bytes */
+ DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+ nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+ if (!nvram_ularray)
+ return BCME_NOMEM;
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, varsize, varaddr));
+ }
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize)) {
+ DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+ __FUNCTION__));
+
+ MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ }
+
+ /* adjust to the user specified RAM */
+ DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+ bus->orig_ramsize, bus->ramsize));
+ DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+ varaddr, varsize));
+ varsize = ((bus->orig_ramsize - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ varsizew = htol32(varsizew);
+ }
+
+ DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+ /* Write the length token to the last word */
+ bcmerror = dhdsdio_membytes(bus, TRUE, (bus->orig_ramsize - 4),
+ (uint8*)&varsizew, 4);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+ uint retries;
+ int bcmerror = 0;
+
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+ bus->alp_only = TRUE;
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_disable(bus->sih, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ uint32 zeros = 0;
+ if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, (uint8*)&zeros, 4) < 0) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+ }
+ } else {
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (!si_iscoreup(bus->sih)) {
+ DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if ((bcmerror = dhdsdio_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = FALSE;
+
+ bus->dhd->busstate = DHD_BUS_LOAD;
+ }
+
+fail:
+ /* Always return to SDIOD core */
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+ si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+ return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ /* Look up var locally; if not found pass to host driver */
+ if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Turn on clock in case SD command needs backplane */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+ /* Check for bus configuration changes of interest */
+
+ /* If it was divisor change, read the new one */
+ if (set && strcmp(name, "sd_divisor") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_divisor = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_divisor));
+ }
+ }
+ /* If it was a mode change, read the new one */
+ if (set && strcmp(name, "sd_mode") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_mode = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_mode));
+ }
+ }
+ /* Similar check for blocksize change */
+ if (set && strcmp(name, "sd_blocksize") == 0) {
+ int32 fnum = 2;
+ if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+ }
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+ osl_t *osh;
+ uint32 local_hostintmask;
+ uint8 saveclk;
+ uint retries;
+ int err;
+ if (!bus->dhd)
+ return;
+
+ osh = bus->dhd->osh;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_waitlockfree(NULL);
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Change our idea of bus state */
+ bus->dhd->busstate = DHD_BUS_DOWN;
+
+ /* Enable clock for device interrupts */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Disable and clear interrupts at the chip level also */
+ W_SDREG(0, &bus->regs->hostintmask, retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ }
+
+ /* Turn off the bus (F2), free any pending packets */
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+
+ /* Turn off the backplane clock (only) */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Clear the data packet queues */
+ pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ PKTFREE(osh, bus->glomd, FALSE);
+
+ if (bus->glom)
+ PKTFREE(osh, bus->glom, FALSE);
+
+ bus->glom = bus->glomd = NULL;
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ dhd_os_ioctl_resp_wake(bus->dhd);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = FALSE;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+}
+
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ dhd_timeout_t tmo;
+ uint retries = 0;
+ uint8 ready, enable;
+ int err, ret = 0;
+ uint8 saveclk;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(bus->dhd);
+ if (!bus->dhd)
+ return 0;
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (bus->clkstate != CLK_AVAIL) {
+ DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+ goto exit;
+ }
+
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+ &bus->regs->tosbmailboxdata, retries);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+ /* Give the dongle some time to do its thing and set IOR2 */
+ dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+ ready = 0;
+ while (ready != enable && !dhd_timeout_expired(&tmo))
+ ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+
+ DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+ __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (ready == enable) {
+ /* Make sure we're talking to the core. */
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+ bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+ ASSERT(bus->regs != NULL);
+
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ /* corerev 4 could use the newer interrupt logic to detect the frames */
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+ (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+ bus->hostintmask &= ~I_HMB_FRAME_IND;
+ bus->hostintmask |= I_XMTDATA_AVAIL;
+ }
+ W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, &err);
+
+ /* Set bus state according to enable result */
+ dhdp->busstate = DHD_BUS_DATA;
+
+ /* bcmsdh_intr_unmask(bus->sdh); */
+
+ bus->intdis = FALSE;
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+
+ }
+
+
+ else {
+ /* Disable F2 again */
+ enable = SDIO_FUNC_ENABLE_1;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+ }
+
+ /* Restore previous clock setting */
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+
+ /* If we didn't come up, turn off backplane clock */
+ if (dhdp->busstate != DHD_BUS_DATA)
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+
+ return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+ uint16 lastrbc;
+ uint8 hi, lo;
+ int err;
+
+ DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+ (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+ if (abort) {
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ }
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+ bus->f1regdata++;
+
+ /* Wait until the packet has been flushed (device/FIFO stable) */
+ for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+
+ if ((hi == 0) && (lo == 0))
+ break;
+
+ if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+ DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+ __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+ }
+ lastrbc = (hi << 8) + lo;
+ }
+
+ if (!retries) {
+ DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+ } else {
+ DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+ }
+
+ if (rtx) {
+ bus->rxrtx++;
+ W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+ bus->f1regdata++;
+ if (retries <= retry_limit) {
+ bus->rxskip = TRUE;
+ }
+ }
+
+ /* Clear partial in any case */
+ bus->nextlen = 0;
+
+ /* If we can't reach the device, signal failure */
+ if (err || bcmsdh_regfail(sdh))
+ bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint rdlen, pad;
+
+ int sdret;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Control data already received in aligned rxctl */
+ if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+ goto gotpkt;
+
+ ASSERT(bus->rxbuf);
+ /* Set rxctl for frame (w/optional alignment) */
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+
+ /* Copy the already-read portion over */
+ bcopy(hdr, bus->rxctl, firstread);
+ if (len <= firstread)
+ goto gotpkt;
+
+ /* Copy the full data pkt in gSPI case and process ioctl. */
+ if (bus->bus == SPI_BUS) {
+ bcopy(hdr, bus->rxctl, len);
+ goto gotpkt;
+ }
+
+ /* Raise rdlen to next SDIO block to avoid tail command */
+ rdlen = len - firstread;
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((len + pad) < bus->dhd->maxctl))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ /* Drop if the read is too big or it exceeds our maximum */
+ if ((rdlen + firstread) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+ __FUNCTION__, rdlen, bus->dhd->maxctl));
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+ if ((len - doff) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+ __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+
+ /* Read remainder of frame body into the rxctl buffer */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+ bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ goto done;
+ }
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("RxCtrl", bus->rxctl, len);
+ }
+#endif
+
+ /* Point to valid data and indicate its length */
+ bus->rxctl += doff;
+ bus->rxlen = len - doff;
+
+done:
+ /* Awake any waiters */
+ dhd_os_ioctl_resp_wake(bus->dhd);
+}
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+ uint16 dlen, totlen;
+ uint8 *dptr, num = 0;
+
+ uint16 sublen, check;
+ void *pfirst, *plast, *pnext, *save_pfirst;
+ osl_t *osh = bus->dhd->osh;
+
+ int errcode;
+ uint8 chan, seq, doff, sfdoff;
+ uint8 txmax;
+
+ int ifidx = 0;
+ bool usechain = bus->use_rxchain;
+
+ /* If packets, issue read(s) and send up packet chain */
+ /* Return sequence numbers consumed? */
+
+ DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+ /* If there's a descriptor, generate the packet chain */
+ if (bus->glomd) {
+ dhd_os_sdlock_rxq(bus->dhd);
+
+ pfirst = plast = pnext = NULL;
+ dlen = (uint16)PKTLEN(osh, bus->glomd);
+ dptr = PKTDATA(osh, bus->glomd);
+ if (!dlen || (dlen & 1)) {
+ DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+ __FUNCTION__, dlen));
+ dlen = 0;
+ }
+
+ for (totlen = num = 0; dlen; num++) {
+ /* Get (and move past) next length */
+ sublen = ltoh16_ua(dptr);
+ dlen -= sizeof(uint16);
+ dptr += sizeof(uint16);
+ if ((sublen < SDPCM_HDRLEN) ||
+ ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+ DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+ __FUNCTION__, num, sublen));
+ pnext = NULL;
+ break;
+ }
+ if (sublen % DHD_SDALIGN) {
+ DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+ __FUNCTION__, sublen, DHD_SDALIGN));
+ usechain = FALSE;
+ }
+ totlen += sublen;
+
+ /* For last frame, adjust read len so total is a block multiple */
+ if (!dlen) {
+ sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+ totlen = ROUNDUP(totlen, bus->blocksize);
+ }
+
+ /* Allocate/chain packet for next subframe */
+ if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+ DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+ __FUNCTION__, num, sublen));
+ break;
+ }
+ ASSERT(!PKTLINK(pnext));
+ if (!pfirst) {
+ ASSERT(!plast);
+ pfirst = plast = pnext;
+ } else {
+ ASSERT(plast);
+ PKTSETNEXT(osh, plast, pnext);
+ plast = pnext;
+ }
+
+ /* Adhere to start alignment requirements */
+ PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+ }
+
+ /* If all allocations succeeded, save packet chain in bus structure */
+ if (pnext) {
+ DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+ __FUNCTION__, totlen, num));
+ if (DHD_GLOM_ON() && bus->nextlen) {
+ if (totlen != bus->nextlen) {
+ DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+ "rxseq %d\n", __FUNCTION__, bus->nextlen,
+ totlen, rxseq));
+ }
+ }
+ bus->glom = pfirst;
+ pfirst = pnext = NULL;
+ } else {
+ if (pfirst)
+ PKTFREE(osh, pfirst, FALSE);
+ bus->glom = NULL;
+ num = 0;
+ }
+
+ /* Done with descriptor packet */
+ PKTFREE(osh, bus->glomd, FALSE);
+ bus->glomd = NULL;
+ bus->nextlen = 0;
+
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+
+ /* Ok -- either we just generated a packet chain, or had one from before */
+ if (bus->glom) {
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+ for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+ DHD_GLOM((" %p: %p len 0x%04x (%d)\n",
+ pnext, (uint8*)PKTDATA(osh, pnext),
+ PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+ }
+ }
+
+ pfirst = bus->glom;
+ dlen = (uint16)pkttotlen(osh, pfirst);
+
+ /* Do an SDIO read for the superframe. Configurable iovar to
+ * read directly into the chained packet, or allocate a large
+ * packet and and copy into the chain.
+ */
+ if (usechain) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+ dlen, pfirst, NULL, NULL);
+ } else if (bus->dataptr) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, bus->dataptr,
+ dlen, NULL, NULL, NULL);
+ sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+ if (sublen != dlen) {
+ DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+ __FUNCTION__, dlen, sublen));
+ errcode = -1;
+ }
+ pnext = NULL;
+ } else {
+ DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+ errcode = -1;
+ }
+ bus->f2rxdata++;
+ ASSERT(errcode != BCME_PENDING);
+
+ /* On failure, kill the superframe, allow a couple retries */
+ if (errcode < 0) {
+ DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+ __FUNCTION__, dlen, errcode));
+ bus->dhd->rx_errors++;
+
+ if (bus->glomerr++ < 3) {
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ return 0;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 48));
+ }
+#endif
+
+
+ /* Validate the superframe header */
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ errcode = 0;
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, sublen, check));
+ errcode = -1;
+ } else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+ DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+ errcode = -1;
+ } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+ DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+ SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+ errcode = -1;
+ } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+ DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) ||
+ (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+ DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+ __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), SDPCM_HDRLEN));
+ errcode = -1;
+ }
+
+ /* Check sequence number of superframe SW header */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_seq;
+ }
+ bus->tx_max = txmax;
+
+ /* Remove superframe header, remember offset */
+ PKTPULL(osh, pfirst, doff);
+ sfdoff = doff;
+
+ /* Validate all the subframe headers */
+ for (num = 0, pnext = pfirst; pnext && !errcode;
+ num++, pnext = PKTNEXT(osh, pnext)) {
+ dptr = (uint8 *)PKTDATA(osh, pnext);
+ dlen = (uint16)PKTLEN(osh, pnext);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("subframe", dptr, 32);
+ }
+#endif
+
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (subframe %d): HW hdr error: "
+ "len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, num, sublen, check));
+ errcode = -1;
+ } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+ DHD_ERROR(("%s (subframe %d): length mismatch: "
+ "len 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, num, sublen, dlen));
+ errcode = -1;
+ } else if ((chan != SDPCM_DATA_CHANNEL) &&
+ (chan != SDPCM_EVENT_CHANNEL)) {
+ DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+ __FUNCTION__, num, chan));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+ DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+ __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+ errcode = -1;
+ }
+ }
+
+ if (errcode) {
+ /* Terminate frame on error, request a couple retries */
+ if (bus->glomerr++ < 3) {
+ /* Restore superframe header space */
+ PKTPUSH(osh, pfirst, sfdoff);
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ bus->nextlen = 0;
+ return 0;
+ }
+
+ /* Basic SD framing looks ok - process each packet (header) */
+ save_pfirst = pfirst;
+ bus->glom = NULL;
+ plast = NULL;
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+ pnext = PKTNEXT(osh, pfirst);
+ PKTSETNEXT(osh, pfirst, NULL);
+
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+ __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+ PKTLEN(osh, pfirst), sublen, chan, seq));
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+ if (rxseq != seq) {
+ DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Subframe Data", dptr, dlen);
+ }
+#endif
+
+ PKTSETLEN(osh, pfirst, sublen);
+ PKTPULL(osh, pfirst, doff);
+
+ if (PKTLEN(osh, pfirst) == 0) {
+ PKTFREE(bus->dhd->osh, pfirst, FALSE);
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ bus->dhd->rx_errors++;
+ PKTFREE(osh, pfirst, FALSE);
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ }
+
+ /* this packet will go up, link back into chain and count it */
+ PKTSETNEXT(osh, pfirst, pnext);
+ plast = pfirst;
+ num++;
+
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+ __FUNCTION__, num, pfirst,
+ PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+ PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+ prhex("", (uint8 *)PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 32));
+ }
+#endif /* DHD_DEBUG */
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+ if (num) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num, 0);
+ dhd_os_sdlock(bus->dhd);
+ }
+
+ bus->rxglomframes++;
+ bus->rxglompkts += num;
+ }
+ return num;
+}
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+ osl_t *osh = bus->dhd->osh;
+ bcmsdh_info_t *sdh = bus->sdh;
+
+ uint16 len, check; /* Extracted hardware header fields */
+ uint8 chan, seq, doff; /* Extracted software header fields */
+ uint8 fcbits; /* Extracted fcbits from software header */
+ uint8 delta;
+
+ void *pkt; /* Packet for event or data frames */
+ uint16 pad; /* Number of pad bytes to read */
+ uint16 rdlen; /* Total number of bytes to read */
+ uint8 rxseq; /* Next sequence number to expect */
+ uint rxleft = 0; /* Remaining number of frames allowed */
+ int sdret; /* Return code from bcmsdh calls */
+ uint8 txmax; /* Maximum tx sequence offered */
+ bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+ uint8 *rxbuf;
+ int ifidx = 0;
+ uint rxcount = 0; /* Total frames read */
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+ bool sdtest = FALSE; /* To limit message spew from test mode */
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(maxframes);
+
+#ifdef SDTEST
+ /* Allow pktgen to override maxframes */
+ if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+ maxframes = bus->pktgen_count;
+ sdtest = TRUE;
+ }
+#endif
+
+ /* Not finished unless we encounter no more frames indication */
+ *finished = FALSE;
+
+
+ for (rxseq = bus->rx_seq, rxleft = maxframes;
+ !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+ rxseq++, rxleft--) {
+
+ /* Handle glomming separately */
+ if (bus->glom || bus->glomd) {
+ uint8 cnt;
+ DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+ __FUNCTION__, bus->glomd, bus->glom));
+ cnt = dhdsdio_rxglom(bus, rxseq);
+ DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+ rxseq += cnt - 1;
+ rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+ continue;
+ }
+
+ /* Try doing single read if we can */
+ if (dhd_readahead && bus->nextlen) {
+ uint16 nextlen = bus->nextlen;
+ bus->nextlen = 0;
+
+ if (bus->bus == SPI_BUS) {
+ rdlen = len = nextlen;
+ }
+ else {
+ rdlen = len = nextlen << 4;
+
+ /* Pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+ }
+
+ /* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+ * Later we use buffer-poll for data as well as control packets.
+ * This is required because dhd receives full frame in gSPI unlike SDIO.
+ * After the frame is received we have to distinguish whether it is data
+ * or non-data frame.
+ */
+ /* Allocate a packet buffer */
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+ if (bus->bus == SPI_BUS) {
+ bus->usebufpool = FALSE;
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+ rxbuf = bus->rxctl;
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ /* dhd.rx_ctlerrs is higher level */
+ bus->rxc_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ } else {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+ "expected rxseq %d\n",
+ __FUNCTION__, len, rdlen, rxseq));
+ /* Just go try again w/normal header read */
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ } else {
+ if (bus->bus == SPI_BUS)
+ bus->usebufpool = TRUE;
+
+ ASSERT(!PKTLINK(pkt));
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+ rxbuf = (uint8 *)PKTDATA(osh, pkt);
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ bus->dhd->rx_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ /* Force retry w/normal header read. Don't attempt NAK for
+ * gSPI
+ */
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ /* Now check the header */
+ bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means readahead info was bad */
+ if (!(len|check)) {
+ DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+ __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+ " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+ len, check));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+ __FUNCTION__, len));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Check for consistency with readahead info */
+ len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+ if (len_consistent) {
+ /* Mismatch, force retry w/normal header (may be >4K) */
+ DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+ "expected rxseq %d\n",
+ __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+ " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+ seq));
+ bus->nextlen = 0;
+ }
+
+ bus->dhd->rx_readahead_cnt ++;
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_seq;
+ }
+ bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", rxbuf, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ if (bus->bus == SPI_BUS) {
+ dhdsdio_read_control(bus, rxbuf, len, doff);
+ if (bus->usebufpool) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+ continue;
+ } else {
+ DHD_ERROR(("%s (nextlen): readahead on control"
+ " packet %d?\n", __FUNCTION__, seq));
+ /* Force retry w/normal header read */
+ bus->nextlen = 0;
+ dhdsdio_rxfail(bus, FALSE, TRUE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ }
+
+ if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+ DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+ "rx pktbuf's or not yet malloced.\n", len, chan));
+ continue;
+ }
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* All done with this one -- now deliver the packet */
+ goto deliver;
+ }
+ /* gSPI frames should not be handled in fractions */
+ if (bus->bus == SPI_BUS) {
+ break;
+ }
+
+ /* Read frame header (hardware and software) */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ bus->rxhdr, firstread, NULL, NULL, NULL);
+ bus->f2rxhdrs++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+ bus->rx_hdrfail++;
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ continue;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means no more frames */
+ if (!(len|check)) {
+ *finished = TRUE;
+ break;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, len, check));
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+ continue;
+ }
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+ bus->rx_badhdr++;
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Save the readahead length if there is one */
+ bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_seq;
+ }
+ bus->tx_max = txmax;
+
+ /* Call a separate function for control frames */
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+ continue;
+ }
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+ (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+ /* Length to read */
+ rdlen = (len > firstread) ? (len - firstread) : 0;
+
+ /* May pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ if ((rdlen + firstread) > MAX_RX_DATASZ) {
+ /* Too long -- skip this frame */
+ DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+ __FUNCTION__, rdlen, chan));
+ bus->dhd->rx_dropped++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+ continue;
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ ASSERT(!PKTLINK(pkt));
+
+ /* Leave room for what we already read, and align remainder */
+ ASSERT(firstread < (PKTLEN(osh, pkt)));
+ PKTPULL(osh, pkt, firstread);
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+ /* Read the remaining frame data */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+ ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+ ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Copy the already-read portion */
+ PKTPUSH(osh, pkt, firstread);
+ bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+
+deliver:
+ /* Save superframe descriptor and allocate packet frame */
+ if (chan == SDPCM_GLOM_CHANNEL) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+ __FUNCTION__, len));
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("Glom Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+ PKTSETLEN(osh, pkt, len);
+ ASSERT(doff == SDPCM_HDRLEN);
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+ bus->glomd = pkt;
+ } else {
+ DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ }
+ continue;
+ }
+
+ /* Fill in packet len and prio, deliver upward */
+ PKTSETLEN(osh, pkt, len);
+ PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+ /* Test channel packets are processed separately */
+ if (chan == SDPCM_TEST_CHANNEL) {
+ dhdsdio_testrcv(bus, pkt, seq);
+ continue;
+ }
+#endif /* SDTEST */
+
+ if (PKTLEN(osh, pkt) == 0) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ continue;
+ }
+
+
+ /* Unlock during rx call */
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, pkt, 1, chan);
+ dhd_os_sdlock(bus->dhd);
+ }
+ rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+ /* Message if we hit the limit */
+ if (!rxleft && !sdtest)
+ DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+ else
+#endif /* DHD_DEBUG */
+ DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+ /* Back off rxseq if awaiting rtx, update rx_seq */
+ if (bus->rxskip)
+ rxseq--;
+ bus->rx_seq = rxseq;
+
+ return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus = 0;
+ uint32 hmb_data;
+ uint8 fcbits;
+ uint retries = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Read mailbox data and ack that we did so */
+ R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+ bus->f1regdata += 2;
+
+ /* Dongle recomposed rx frames, accept them again */
+ if (hmb_data & HMB_DATA_NAKHANDLED) {
+ DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+ if (!bus->rxskip) {
+ DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+ }
+ bus->rxskip = FALSE;
+ intstatus |= FRAME_AVAIL_MASK(bus);
+ }
+
+ /*
+ * DEVREADY does not occur with gSPI.
+ */
+ if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+ bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+ if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+ DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+ bus->sdpcm_ver, SDPCM_PROT_VERSION));
+ else
+ DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+ /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+ (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+ uint32 val;
+
+ val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+ val &= ~CC_XMTDATAAVAIL_MODE;
+ val |= CC_XMTDATAAVAIL_CTRL;
+ W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+ val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+ }
+
+#ifdef DHD_DEBUG
+ /* Retrieve console state address now that firmware should have updated it */
+ {
+ sdpcm_shared_t shared;
+ if (dhdsdio_readshared(bus, &shared) == 0)
+ bus->console_addr = shared.console_addr;
+ }
+#endif /* DHD_DEBUG */
+ }
+
+ /*
+ * Flow Control has been moved into the RX headers and this out of band
+ * method isn't used any more. Leave this here for possibly remaining backward
+ * compatible with older dongles
+ */
+ if (hmb_data & HMB_DATA_FC) {
+ fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+ if (fcbits & ~bus->flowcontrol)
+ bus->fc_xoff++;
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+#ifdef DHD_DEBUG
+ /* At least print a message if FW halted */
+ if (hmb_data & HMB_DATA_FWHALT) {
+ DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED\n"));
+ dhdsdio_checkdied(bus, NULL, 0);
+ }
+#endif /* DHD_DEBUG */
+
+ /* Shouldn't be any others */
+ if (hmb_data & ~(HMB_DATA_DEVREADY |
+ HMB_DATA_FWHALT |
+ HMB_DATA_NAKHANDLED |
+ HMB_DATA_FC |
+ HMB_DATA_FWREADY |
+ HMB_DATA_FCDATA_MASK |
+ HMB_DATA_VERSION_MASK)) {
+ DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+ }
+
+ return intstatus;
+}
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus, newstatus = 0;
+ uint retries = 0;
+ uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+ uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+ uint framecnt = 0; /* Temporary counter of tx/rx frames */
+ bool rxdone = TRUE; /* Flag for no more read data */
+ bool resched = FALSE; /* Flag indicating resched wanted */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+ bus->intstatus = 0;
+ return 0;
+ }
+
+ /* Start with leftover status bits */
+ intstatus = bus->intstatus;
+
+ dhd_os_sdlock(bus->dhd);
+
+ /* If waiting for HTAVAIL, check status */
+ if (bus->clkstate == CLK_PENDING) {
+ int err;
+ uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+ /* Check for inconsistent device control */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ } else {
+ ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+ }
+#endif /* DHD_DEBUG */
+
+ /* Read CSR, if clock on switch to AVAIL, else ignore */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+
+ DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+ if (SBSDIO_HTAV(clkctl)) {
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+ __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ if (err) {
+ DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+ __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ bus->clkstate = CLK_AVAIL;
+ } else {
+ goto clkwait;
+ }
+ }
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+ if (bus->clkstate != CLK_AVAIL)
+ goto clkwait;
+
+ /* Pending interrupt indicates new device status */
+ if (bus->ipend) {
+ bus->ipend = FALSE;
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ newstatus = 0;
+ newstatus &= bus->hostintmask;
+ bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+ if (newstatus) {
+ bus->f1regdata++;
+ if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+ (newstatus == I_XMTDATA_AVAIL)) {
+ }
+ else
+ W_SDREG(newstatus, &regs->intstatus, retries);
+ }
+ }
+
+ /* Merge new bits with previous */
+ intstatus |= newstatus;
+ bus->intstatus = 0;
+
+ /* Handle flow-control change: read new state in case our ack
+ * crossed another change interrupt. If change still set, assume
+ * FC ON for safety, let next loop through do the debounce.
+ */
+ if (intstatus & I_HMB_FC_CHANGE) {
+ intstatus &= ~I_HMB_FC_CHANGE;
+ W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata += 2;
+ bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+ intstatus |= (newstatus & bus->hostintmask);
+ }
+
+ /* Just being here means nothing more to do for chipactive */
+ if (intstatus & I_CHIPACTIVE) {
+ /* ASSERT(bus->clkstate == CLK_AVAIL); */
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Handle host mailbox indication */
+ if (intstatus & I_HMB_HOST_INT) {
+ intstatus &= ~I_HMB_HOST_INT;
+ intstatus |= dhdsdio_hostmail(bus);
+ }
+
+ /* Generally don't ask for these, can get CRC errors... */
+ if (intstatus & I_WR_OOSYNC) {
+ DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+ intstatus &= ~I_WR_OOSYNC;
+ }
+
+ if (intstatus & I_RD_OOSYNC) {
+ DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+ intstatus &= ~I_RD_OOSYNC;
+ }
+
+ if (intstatus & I_SBINT) {
+ DHD_ERROR(("Dongle reports SBINT\n"));
+ intstatus &= ~I_SBINT;
+ }
+
+ /* Would be active due to wake-wlan in gSPI */
+ if (intstatus & I_CHIPACTIVE) {
+ DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Ignore frame indications if rxskip is set */
+ if (bus->rxskip) {
+ intstatus &= ~FRAME_AVAIL_MASK(bus);
+ }
+
+ /* On frame indication, read available frames */
+ if (PKT_AVAILABLE(bus, intstatus)) {
+ framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+ if (rxdone || bus->rxskip)
+ intstatus &= ~FRAME_AVAIL_MASK(bus);
+ rxlimit -= MIN(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+ bus->intstatus = intstatus;
+
+clkwait:
+ /* Re-enable interrupts to detect new device events (mailbox, rx frame)
+ * or clock availability. (Allows tx loop to check ipend if desired.)
+ * (Unless register access seems hosed, as we may not be able to ACK...)
+ */
+ if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+ DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+ __FUNCTION__, rxdone, framecnt));
+ bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ bcmsdh_intr_enable(sdh);
+ }
+
+ if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+ int ret, i;
+
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+ NULL, NULL, NULL);
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+
+ bus->ctrl_frame_stat = FALSE;
+ dhd_wait_event_wakeup(bus->dhd);
+ }
+ /* Send queued frames (limit 1 if rx may still be pending) */
+ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+ framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+ framecnt = dhdsdio_sendfromq(bus, framecnt);
+ txlimit -= framecnt;
+ }
+ /* Resched the DPC if ctrl cmd is pending on bus credit */
+ if (bus->ctrl_frame_stat)
+ resched = TRUE;
+
+ /* Resched if events or tx frames are pending, else await next interrupt */
+ /* On failed register access, all bets are off: no resched or interrupts */
+ if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+ DHD_ERROR(("%s: failed backplane access over SDIO, halting operation %d \n",
+ __FUNCTION__, bcmsdh_regfail(sdh)));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->intstatus = 0;
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Awaiting I_CHIPACTIVE; don't resched */
+ } else if (bus->intstatus || bus->ipend ||
+ (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+ PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */
+ resched = TRUE;
+ }
+
+ bus->dpc_sched = resched;
+
+ /* If we're done for now, turn off clock request. */
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+ return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+ bool resched;
+
+ /* Call the DPC directly. */
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ resched = dhdsdio_dpc(bus);
+
+ return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+ dhd_bus_t *bus = (dhd_bus_t*)arg;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!bus) {
+ DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+ return;
+ }
+ sdh = bus->sdh;
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Count the interrupt call */
+ bus->intrcount++;
+ bus->ipend = TRUE;
+
+ /* Shouldn't get this interrupt if we're sleeping? */
+ if (bus->sleeping) {
+ DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+ return;
+ }
+
+ /* Disable additional interrupts (is this needed now)? */
+ if (bus->intr) {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+ }
+
+ bcmsdh_intr_disable(sdh);
+ bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ while (dhdsdio_dpc(bus));
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+#endif
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+ /* Default to specified length, or full range */
+ if (dhd_pktgen_len) {
+ bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+ bus->pktgen_minlen = bus->pktgen_maxlen;
+ } else {
+ bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+ bus->pktgen_minlen = 0;
+ }
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Default to per-watchdog burst with 10s print time */
+ bus->pktgen_freq = 1;
+ bus->pktgen_print = 10000 / dhd_watchdog_ms;
+ bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+ /* Default to echo mode */
+ bus->pktgen_mode = DHD_PKTGEN_ECHO;
+ bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+ void *pkt;
+ uint8 *data;
+ uint pktcount;
+ uint fillbyte;
+ osl_t *osh = bus->dhd->osh;
+ uint16 len;
+
+ /* Display current count if appropriate */
+ if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+ bus->pktgen_ptick = 0;
+ printf("%s: send attempts %d rcvd %d\n",
+ __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd);
+ }
+
+ /* For recv mode, just make sure dongle has started sending */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+ bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+ dhdsdio_sdtest_set(bus, (uint8)bus->pktgen_total);
+ }
+ return;
+ }
+
+ /* Otherwise, generate or request the specified number of packets */
+ for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+ /* Stop if total has been reached */
+ if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ break;
+ }
+
+ /* Allocate an appropriate-sized packet */
+ len = bus->pktgen_len;
+ if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+ TRUE))) {;
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ break;
+ }
+ PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Write test header cmd and extra based on mode */
+ switch (bus->pktgen_mode) {
+ case DHD_PKTGEN_ECHO:
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_SEND:
+ *data++ = SDPCM_TEST_DISCARD;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_RXBURST:
+ *data++ = SDPCM_TEST_BURST;
+ *data++ = (uint8)bus->pktgen_count;
+ break;
+
+ default:
+ DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+ PKTFREE(osh, pkt, TRUE);
+ bus->pktgen_count = 0;
+ return;
+ }
+
+ /* Write test header length field */
+ *data++ = (len >> 0);
+ *data++ = (len >> 8);
+
+ /* Then fill in the remainder -- N/A for burst, but who cares... */
+ for (fillbyte = 0; fillbyte < len; fillbyte++)
+ *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+ prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) {
+ bus->pktgen_fail++;
+ if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+ bus->pktgen_count = 0;
+ }
+ bus->pktgen_sent++;
+
+ /* Bump length if not fixed, wrap at max */
+ if (++bus->pktgen_len > bus->pktgen_maxlen)
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Special case for burst mode: just send one request! */
+ if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+ break;
+ }
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count)
+{
+ void *pkt;
+ uint8 *data;
+ osl_t *osh = bus->dhd->osh;
+
+ /* Allocate the packet */
+ if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN, TRUE))) {
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ return;
+ }
+ PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Fill in the test header */
+ *data++ = SDPCM_TEST_SEND;
+ *data++ = count;
+ *data++ = (bus->pktgen_maxlen >> 0);
+ *data++ = (bus->pktgen_maxlen >> 8);
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE))
+ bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+ osl_t *osh = bus->dhd->osh;
+ uint8 *data;
+ uint pktlen;
+
+ uint8 cmd;
+ uint8 extra;
+ uint16 len;
+ uint16 offset;
+
+ /* Check for min length */
+ if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+
+ /* Extract header fields */
+ data = PKTDATA(osh, pkt);
+ cmd = *data++;
+ extra = *data++;
+ len = *data++; len += *data++ << 8;
+ DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+ /* Check length for relevant commands */
+ if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+ if (pktlen != len + SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+ }
+
+ /* Process as per command */
+ switch (cmd) {
+ case SDPCM_TEST_ECHOREQ:
+ /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+ *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE) == 0) {
+ bus->pktgen_sent++;
+ } else {
+ bus->pktgen_fail++;
+ PKTFREE(osh, pkt, FALSE);
+ }
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_ECHORSP:
+ if (bus->ext_loop) {
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+ }
+
+ for (offset = 0; offset < len; offset++, data++) {
+ if (*data != SDPCM_TEST_FILL(offset, extra)) {
+ DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+ "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+ offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+ break;
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_DISCARD:
+ {
+ int i = 0;
+ uint8 *prn = data;
+ uint8 testval = extra;
+ for (i = 0; i < len; i++) {
+ if (*prn != testval) {
+ DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+ i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+ prn++; testval++;
+ }
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_BURST:
+ case SDPCM_TEST_SEND:
+ default:
+ DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ break;
+ }
+
+ /* For recv mode, stop at limit (and tell dongle to stop sending) */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+ bus->pktgen_rcvd_rcvsession++;
+
+ if (bus->pktgen_total &&
+ (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ DHD_ERROR(("Pktgen:rcv test complete!\n"));
+ bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+ dhdsdio_sdtest_set(bus, FALSE);
+ bus->pktgen_rcvd_rcvsession = 0;
+ }
+ }
+ }
+}
+#endif /* SDTEST */
+
+extern void
+dhd_disable_intr(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus;
+ bus = dhdp->bus;
+ bcmsdh_intr_disable(bus->sdh);
+}
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus;
+
+ DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+ bus = dhdp->bus;
+
+ if (bus->dhd->dongle_reset)
+ return FALSE;
+
+ /* Ignore the timer if simulating bus down */
+ if (bus->sleeping)
+ return FALSE;
+
+ if (dhdp->busstate == DHD_BUS_DOWN)
+ return FALSE;
+
+ /* Poll period: check device if appropriate. */
+ if (bus->poll && (++bus->polltick >= bus->pollrate)) {
+ uint32 intstatus = 0;
+
+ /* Reset poll tick */
+ bus->polltick = 0;
+
+ /* Check device if no interrupts */
+ if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+ if (!bus->dpc_sched) {
+ uint8 devpend;
+ devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+ SDIOD_CCCR_INTPEND, NULL);
+ intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+ }
+
+ /* If there is something, make like the ISR and schedule the DPC */
+ if (intstatus) {
+ bus->pollcnt++;
+ bus->ipend = TRUE;
+ if (bus->intr) {
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+
+ }
+ }
+
+ /* Update interrupt tracking */
+ bus->lastintrs = bus->intrcount;
+ }
+
+#ifdef DHD_DEBUG
+ /* Poll for console output periodically */
+ if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+ bus->console.count += dhd_watchdog_ms;
+ if (bus->console.count >= dhd_console_ms) {
+ bus->console.count -= dhd_console_ms;
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (dhdsdio_readconsole(bus) < 0)
+ dhd_console_ms = 0; /* On error, stop trying */
+ }
+ }
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+ /* Generate packets if configured */
+ if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ bus->pktgen_tick = 0;
+ dhdsdio_pktgen(bus);
+ }
+#endif
+
+ /* On idle timeout clear activity flag and/or turn off clock */
+ if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+ if (++bus->idlecount >= bus->idletime) {
+ bus->idlecount = 0;
+ if (bus->activity) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ }
+ }
+
+ return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ uint32 addr, val;
+ int rv;
+ void *pkt;
+
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0)
+ return BCME_UNSUPPORTED;
+
+ /* Exclusive bus access */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Don't allow input if dongle is in reset */
+ if (bus->dhd->dongle_reset) {
+ dhd_os_sdunlock(bus->dhd);
+ return BCME_NOTREADY;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ BUS_WAKE(bus);
+ /* No pend allowed since txpkt is called later, ht clk has to be on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Zero cbuf_index */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
+ val = htol32(0);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Write message into cbuf */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+ goto done;
+
+ /* Write length into vcons_in */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
+ val = htol32(msglen);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Bump dongle by sending an empty packet on the event channel.
+ * sdpcm_sendup (RX) checks for virtual console input.
+ */
+ if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+ dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE);
+
+done:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+ uint byte, tag, tdata;
+ DHD_INFO(("Function %d CIS:\n", fn));
+
+ for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+ if ((byte % 16) == 0)
+ DHD_INFO((" "));
+ DHD_INFO(("%02x ", cis[byte]));
+ if ((byte % 16) == 15)
+ DHD_INFO(("\n"));
+ if (!tdata--) {
+ tag = cis[byte];
+ if (tag == 0xff)
+ break;
+ else if (!tag)
+ tdata = 0;
+ else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+ tdata = cis[byte + 1] + 1;
+ else
+ DHD_INFO(("]"));
+ }
+ }
+ if ((byte % 16) != 15)
+ DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+ if (chipid == BCM4325_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4329_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4315_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4319_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4336_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4330_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43237_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43362_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43239_CHIP_ID)
+ return TRUE;
+ return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+ uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+ int ret;
+ dhd_bus_t *bus;
+ dhd_cmn_t *cmn;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+#ifdef PROP_TXSTATUS
+ uint up = 0;
+#endif
+
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver. Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behavior since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent initializations.
+ */
+ dhd_txbound = DHD_TXBOUND;
+ dhd_rxbound = DHD_RXBOUND;
+ dhd_alignctl = TRUE;
+ sd1idle = TRUE;
+ dhd_readahead = TRUE;
+ retrydata = FALSE;
+ dhd_doflow = FALSE;
+ dhd_dongle_memsize = 0;
+ dhd_txminmax = DHD_TXMINMAX;
+
+ forcealign = TRUE;
+
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+ /* We make assumptions about address window mappings */
+ ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+ /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+ * means early parse could fail, so here we should get either an ID
+ * we recognize OR (-1) indicating we must request power first.
+ */
+ /* Check the Vendor ID */
+ switch (venid) {
+ case 0x0000:
+ case VENDOR_BROADCOM:
+ break;
+ default:
+ DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+ __FUNCTION__, venid));
+ return NULL;
+ }
+
+ /* Check the Device ID and make sure it's one that we support */
+ switch (devid) {
+ case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */
+ case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */
+ case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */
+ DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4329_D11N_ID: /* 4329 802.11n dualband device */
+ case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */
+ case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */
+ case 0x4329:
+ DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */
+ case BCM4315_D11G_ID: /* 4315 802.11g id */
+ case BCM4315_D11A_ID: /* 4315 802.11a id */
+ DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4319_D11N_ID: /* 4319 802.11n id */
+ case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */
+ case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */
+ DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+ break;
+ case 0:
+ DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+ __FUNCTION__));
+ break;
+
+ default:
+ DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+ __FUNCTION__, venid, devid));
+ return NULL;
+ }
+
+ if (osh == NULL) {
+ /* Ask the OS interface part for an OSL handle */
+ if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) {
+ DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
+ return NULL;
+ }
+ }
+
+ /* Allocate private bus interface state */
+ if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ goto fail;
+ }
+ bzero(bus, sizeof(dhd_bus_t));
+ bus->sdh = sdh;
+ bus->cl_devid = (uint16)devid;
+ bus->bus = DHD_BUS;
+ bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+ /* attach the common module */
+ if (!(cmn = dhd_common_init(bus->cl_devid, osh))) {
+ DHD_ERROR(("%s: dhd_common_init failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* attempt to attach to the dongle */
+ if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+ DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+ dhd_common_deinit(NULL, cmn);
+ goto fail;
+ }
+
+ /* Attach to the dhd/OS/network interface */
+ if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+ DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ bus->dhd->cmn = cmn;
+ cmn->dhd = bus->dhd;
+
+ /* Allocate buffers */
+ if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (bus->intr) {
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+ bcmsdh_intr_disable(sdh);
+ if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+ DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+ DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+ } else {
+ DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
+ __FUNCTION__));
+ }
+
+ DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ /* Read MAC address from external customer place */
+ memset(&ea_addr, 0, sizeof(ea_addr));
+ ret = dhd_custom_get_mac_address(ea_addr.octet);
+ if (!ret) {
+ memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
+ }
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ /* if firmware path present try to download and bring up bus */
+ if (dhd_download_fw_on_driverload && (ret = dhd_bus_start(bus->dhd)) != 0) {
+ DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+ if (ret == BCME_NOTUP)
+ goto fail;
+ }
+ /* Ok, have the per-port tell the stack we're open for business */
+ if (dhd_net_attach(bus->dhd, 0) != 0) {
+ DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+ goto fail;
+ }
+
+#ifdef PROP_TXSTATUS
+ if (dhd_download_fw_on_driverload)
+ dhd_wl_ioctl_cmd(bus->dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
+#endif
+ return bus;
+
+fail:
+ dhdsdio_release(bus, osh);
+ return NULL;
+}
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+ uint16 devid)
+{
+ int err = 0;
+ uint8 clkctl = 0;
+
+ bus->alp_only = TRUE;
+
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+ DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+ }
+
+#ifdef DHD_DEBUG
+ DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+ bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+
+#endif /* DHD_DEBUG */
+
+
+ /* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+ if (!err)
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+ if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+ DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+ err, DHD_INIT_CLKCTL1, clkctl));
+ goto fail;
+ }
+
+
+#ifdef DHD_DEBUG
+ if (DHD_INFO_ON()) {
+ uint fn, numfn;
+ uint8 *cis[SDIOD_MAX_IOFUNCS];
+ int err = 0;
+
+ numfn = bcmsdh_query_iofnum(sdh);
+ ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+ /* Make sure ALP is available before trying to read CIS */
+ SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+ /* Now request ALP be put on the bus */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ DHD_INIT_CLKCTL2, &err);
+ OSL_DELAY(65);
+
+ for (fn = 0; fn <= numfn; fn++) {
+ if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+ break;
+ }
+ bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+ if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+ dhd_dump_cis(fn, cis[fn]);
+ }
+
+ while (fn-- > 0) {
+ ASSERT(cis[fn]);
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ }
+
+ if (err) {
+ DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+ goto fail;
+ }
+ }
+#endif /* DHD_DEBUG */
+
+ /* si_attach() will provide an SI handle and scan the backplane */
+ if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+ &bus->vars, &bus->varsz))) {
+ DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+ if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+ DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+ __FUNCTION__, bus->sih->chip));
+ goto fail;
+ }
+
+
+ si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+ /* Get info on the ARM and SOCRAM cores... */
+ if (!DHD_NOPMU(bus)) {
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+ if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ bus->ramsize = bus->orig_ramsize;
+ if (dhd_dongle_memsize)
+ dhd_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+ DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
+ bus->ramsize, bus->orig_ramsize));
+ }
+
+ /* ...but normally deal with the SDPCMDEV core */
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+ !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+ DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+ goto fail;
+ }
+ bus->sdpcmrev = si_corerev(bus->sih);
+
+ /* Set core control so an SDIO reset does a backplane reset */
+ OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+ bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+ (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1))
+ {
+ uint32 val;
+
+ val = R_REG(osh, &bus->regs->corecontrol);
+ val &= ~CC_XMTDATAAVAIL_MODE;
+ val |= CC_XMTDATAAVAIL_CTRL;
+ W_REG(osh, &bus->regs->corecontrol, val);
+ }
+
+
+ pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+ /* Locate an appropriately-aligned portion of hdrbuf */
+ bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = (bool)dhd_intr;
+ if ((bus->poll = (bool)dhd_poll))
+ bus->pollrate = 1;
+
+ return TRUE;
+
+fail:
+ if (bus->sih != NULL)
+ si_detach(bus->sih);
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->maxctl) {
+ bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+ if (!(bus->rxbuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+ __FUNCTION__, bus->rxblen));
+ goto fail;
+ }
+ }
+ /* Allocate buffer to receive glomed packet */
+ if (!(bus->databuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+ __FUNCTION__, MAX_DATA_BUF));
+ /* release rxbuf which was already located as above */
+ if (!bus->rxblen)
+ DHD_OS_PREFREE(osh, bus->rxbuf, bus->rxblen);
+ goto fail;
+ }
+
+ /* Align the buffer */
+ if ((uintptr)bus->databuf % DHD_SDALIGN)
+ bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+ else
+ bus->dataptr = bus->databuf;
+
+ return TRUE;
+
+fail:
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ int32 fnum;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef SDTEST
+ dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->sleeping = FALSE;
+ bus->rxflow = FALSE;
+ bus->prev_rxlim_hit = 0;
+
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = (int32)dhd_idletime;
+ bus->idleclock = DHD_IDLE_ACTIVE;
+
+ /* Query the SD clock speed */
+ if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+ bus->sd_divisor = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_divisor", bus->sd_divisor));
+ }
+
+ /* Query the SD bus mode */
+ if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+ bus->sd_mode = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_mode", bus->sd_mode));
+ }
+
+ /* Query the F2 block size, set roundup accordingly */
+ fnum = 2;
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ /* Query if bus module supports packet chaining, default to use if supported */
+ if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+ &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_rxchain = FALSE;
+ } else {
+ DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+ __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+ }
+ bus->use_rxchain = (bool)bus->sd_rxchain;
+
+ return TRUE;
+}
+
+bool
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *pfw_path, char *pnv_path)
+{
+ bool ret;
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+
+ ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+
+ return ret;
+}
+
+static bool
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+ bool ret;
+
+ /* Download the firmware */
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ ret = _dhdsdio_download_firmware(bus) == 0;
+
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+ bool dongle_isolation = FALSE;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(osh);
+
+ /* De-register interrupt handler */
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_intr_dereg(bus->sdh);
+
+ if (bus->dhd) {
+ dhd_common_deinit(bus->dhd, NULL);
+ dongle_isolation = bus->dhd->dongle_isolation;
+ dhd_detach(bus->dhd);
+ dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+ dhd_free(bus->dhd);
+ bus->dhd = NULL;
+ }
+
+ dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+ if (bus->console.buf != NULL)
+ MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+ }
+
+ if (osh)
+ dhd_osl_detach(osh);
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd && bus->dhd->dongle_reset)
+ return;
+
+ if (bus->rxbuf) {
+#ifndef DHD_USE_STATIC_BUF
+ MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+ bus->rxctl = bus->rxbuf = NULL;
+ bus->rxlen = 0;
+ }
+
+ if (bus->databuf) {
+#ifndef DHD_USE_STATIC_BUF
+ MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+ bus->databuf = NULL;
+ }
+
+ if (bus->vars && bus->varsz) {
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+ DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+ bus->dhd, bus->dhd->dongle_reset));
+
+ if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+ return;
+
+ if (bus->sih) {
+ if (bus->dhd) {
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+#if !defined(BCMLXSDMMC)
+ if (dongle_isolation == FALSE)
+ si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+ if (bus->dhd) {
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ si_detach(bus->sih);
+ if (bus->vars && bus->varsz)
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(bus->dhd);
+ dhdsdio_release(bus, bus->dhd->osh);
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+ dhdsdio_probe,
+ dhdsdio_disconnect
+};
+
+int
+dhd_bus_register(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_unregister();
+}
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ unsigned char *ularray = NULL;
+
+ DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+ /* Download image */
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+ /* Upload image to verify downloaded contents. */
+ offset = 0;
+ memset(ularray, 0xaa, bus->ramsize);
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+ ularray + offset, sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+ if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+ DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+ goto err;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+ }
+#endif /* DHD_DEBUG */
+
+err:
+ if (ularray)
+ MFREE(bus->dhd->osh, ularray, bus->ramsize);
+ return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ uint len;
+ void *image = NULL;
+ uint8 *memblock = NULL, *memptr;
+
+ DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+ image = dhd_os_open_image(pfw_path);
+ if (image == NULL)
+ goto err;
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+ /* Download image */
+ while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+/*
+ EXAMPLE: nvram_array
+ nvram_arry format:
+ name=value
+ Use carriage return at the end of each assignment, and an empty string with
+ carriage return at the end of array.
+
+ For example:
+ unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+ Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+ Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+ bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ uint len;
+ void * image = NULL;
+ char * memblock = NULL;
+ char *bufp;
+ char *pnv_path;
+ bool nvram_file_exists;
+
+ pnv_path = bus->nv_path;
+
+ nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+ if (!nvram_file_exists && (bus->nvram_params == NULL))
+ return (0);
+
+ if (nvram_file_exists) {
+ image = dhd_os_open_image(pnv_path);
+ if (image == NULL)
+ goto err;
+ }
+
+ memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAX_NVRAMBUF_SIZE));
+ goto err;
+ }
+
+ /* Download variables */
+ if (nvram_file_exists) {
+ len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+ }
+ else {
+ len = strlen(bus->nvram_params);
+ ASSERT(len <= MAX_NVRAMBUF_SIZE);
+ memcpy(memblock, bus->nvram_params, len);
+ }
+ if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+ len = process_nvram_vars(bufp, len);
+ if (len % 4) {
+ len += 4 - (len % 4);
+ }
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error downloading vars: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+ else {
+ DHD_ERROR(("%s: error reading nvram file: %d\n",
+ __FUNCTION__, len));
+ bcmerror = BCME_SDIO_ERROR;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ char *p;
+
+ bool embed = FALSE; /* download embedded firmware */
+ bool dlok = FALSE; /* download firmware succeeded */
+
+ /* Out immediately if no image to download */
+ if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ return 0;
+#endif
+ }
+
+ /* Keep arm in reset */
+ if (dhdsdio_download_state(bus, TRUE)) {
+ DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+
+ /* replace bcm43xx with bcm4330 or bcm4329 */
+ if ((p = strstr(bus->fw_path, "bcm43xx"))) {
+ if (bus->cl_devid == 0x4329) {
+ *(p + 5)='2';
+ *(p + 6)='9';
+ }
+ if (bus->cl_devid == 0x4330) {
+ *(p + 5)='3';
+ *(p + 6)='0';
+ }
+ }
+
+ if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+ DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ goto err;
+#endif
+ }
+ else {
+ embed = FALSE;
+ dlok = TRUE;
+ }
+ }
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdsdio_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ }
+ else {
+ dlok = TRUE;
+ }
+ }
+#endif
+ if (!dlok) {
+ DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* EXAMPLE: nvram_array */
+ /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+ /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+ /* External nvram takes precedence if specified */
+ if (dhdsdio_download_nvram(bus)) {
+ DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* Take arm out of reset */
+ if (dhdsdio_download_state(bus, FALSE)) {
+ DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+ return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ int status;
+
+ status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+
+ return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle));
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chip;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+ return bus->dhd;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+ return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+ return SDPCM_HDRLEN;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ int bcmerror = 0;
+ dhd_bus_t *bus;
+
+ bus = dhdp->bus;
+
+ if (flag == TRUE) {
+ if (!bus->dhd->dongle_reset) {
+ dhd_os_sdlock(dhdp);
+ dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Force flow control as protection when stop come before ifconfig_down */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+ /* Expect app to have torn down any connection before calling */
+ /* Stop the bus, disable F2 */
+ dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY)
+ /* Clean up any pending IRQ */
+ bcmsdh_set_irq(FALSE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ /* Clean tx/rx buffer pointers, detach from the dongle */
+ dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+ bus->dhd->dongle_reset = TRUE;
+ bus->dhd->up = FALSE;
+ dhd_os_sdunlock(dhdp);
+
+ DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__));
+ /* App can now remove power from device */
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else {
+ /* App must have restored power to device before calling */
+
+ DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset) {
+ /* Turn on WLAN */
+#ifdef DHDTHREAD
+ dhd_os_sdlock(dhdp);
+#endif /* DHDTHREAD */
+ /* Reset SD client */
+ bcmsdh_reset(bus->sdh);
+
+ /* Attempt to re-attach & download */
+ if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+ (uint32 *)SI_ENUM_BASE,
+ bus->cl_devid)) {
+ /* Attempt to download binary to the dongle */
+ if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+ dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) {
+
+ /* Re-init bus, enable F2 transfer */
+ bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+ if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_set_irq(TRUE);
+ dhd_enable_oob_intr(bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ bus->dhd->dongle_reset = FALSE;
+ bus->dhd->up = TRUE;
+
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Restore flow control */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif
+ dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+ DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+ } else {
+ dhd_bus_stop(bus, FALSE);
+ dhdsdio_release_dongle(bus, bus->dhd->osh,
+ TRUE, FALSE);
+ }
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ } else {
+ bcmerror = BCME_SDIO_ERROR;
+ DHD_INFO(("%s called when dongle is not in reset\n",
+ __FUNCTION__));
+ DHD_INFO(("Will call dhd_bus_start instead\n"));
+ sdioh_start(NULL, 1);
+ if ((bcmerror = dhd_bus_start(dhdp)) != 0)
+ DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+ return bcmerror;
+}
+
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+ dhd_bus_t *bus;
+
+ bus = dhdp->bus;
+ return dhdsdio_membytes(bus, set, address, data, size);
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
new file mode 100644
index 000000000000..59d018b64c6f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
@@ -0,0 +1,266 @@
+/*
+* Copyright (C) 1999-2011, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: dhd_wlfc.h,v 1.1.8.1 2010-09-09 22:41:08 Exp $
+*
+*/
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+/* 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 1024
+
+#define WLFC_HANGER_ITEM_STATE_FREE 1
+#define WLFC_HANGER_ITEM_STATE_INUSE 2
+
+#define WLFC_PKTID_HSLOT_MASK 0xffff /* allow 16 bits only */
+#define WLFC_PKTID_HSLOT_SHIFT 8
+
+/* x -> TXSTATUS TAG to/from firmware */
+#define WLFC_PKTID_HSLOT_GET(x) \
+ (((x) >> WLFC_PKTID_HSLOT_SHIFT) & WLFC_PKTID_HSLOT_MASK)
+#define WLFC_PKTID_HSLOT_SET(var, slot) \
+ ((var) = ((var) & ~(WLFC_PKTID_HSLOT_MASK << WLFC_PKTID_HSLOT_SHIFT)) | \
+ (((slot) & WLFC_PKTID_HSLOT_MASK) << WLFC_PKTID_HSLOT_SHIFT))
+
+#define WLFC_PKTID_FREERUNCTR_MASK 0xff
+
+#define WLFC_PKTID_FREERUNCTR_GET(x) ((x) & WLFC_PKTID_FREERUNCTR_MASK)
+#define WLFC_PKTID_FREERUNCTR_SET(var, ctr) \
+ ((var) = (((var) & ~WLFC_PKTID_FREERUNCTR_MASK) | \
+ (((ctr) & WLFC_PKTID_FREERUNCTR_MASK))))
+
+#define WLFC_PKTQ_PENQ(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec)))? \
+ NULL : pktq_penq((pq), (prec), (p)))
+#define WLFC_PKTQ_PENQ_HEAD(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec))) ? \
+ NULL : pktq_penq_head((pq), (prec), (p)))
+
+typedef enum ewlfc_packet_state {
+ eWLFC_PKTTYPE_NEW,
+ eWLFC_PKTTYPE_DELAYED,
+ eWLFC_PKTTYPE_SUPPRESSED,
+ eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+ eWLFC_MAC_ENTRY_ACTION_ADD,
+ eWLFC_MAC_ENTRY_ACTION_DEL,
+ eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+ uint8 state;
+ uint8 pad[3];
+ uint32 identifier;
+ void* pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+ uint32 push_time;
+#endif
+} wlfc_hanger_item_t;
+
+typedef struct wlfc_hanger {
+ int max_items;
+ uint32 pushed;
+ uint32 popped;
+ uint32 failed_to_push;
+ uint32 failed_to_pop;
+ uint32 failed_slotfind;
+ wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \
+ sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN 1
+#define WLFC_STATE_CLOSE 2
+
+#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_PSQ_LEN 64
+#define WLFC_SENDQ_LEN 256
+
+#define WLFC_FLOWCONTROL_DELTA 8
+#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - WLFC_FLOWCONTROL_DELTA)
+#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER - WLFC_FLOWCONTROL_DELTA)
+
+typedef struct wlfc_mac_descriptor {
+ uint8 occupied;
+ uint8 interface_id;
+ uint8 iftype;
+ uint8 state;
+ uint8 ac_bitmap; /* for APSD */
+ uint8 requested_credit;
+ uint8 requested_packet;
+ uint8 ea[ETHER_ADDR_LEN];
+ /*
+ maintain (MAC,AC) based seq count for
+ packets going to the device. As well as bc/mc.
+ */
+ uint8 seq[AC_COUNT + 1];
+ uint8 generation;
+ struct pktq psq;
+ /* The AC pending bitmap that was reported to the fw at last change */
+ uint8 traffic_lastreported_bmp;
+ /* The new AC pending bitmap */
+ uint8 traffic_pending_bmp;
+ /* 1= send on next opportunity */
+ uint8 send_tim_signal;
+ uint8 mac_handle;
+#ifdef PROP_TXSTATUS_DEBUG
+ uint32 dstncredit_sent_packets;
+ uint32 dstncredit_acks;
+ uint32 opened_ct;
+ uint32 closed_ct;
+#endif
+} wlfc_mac_descriptor_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+ entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+ uint32 pktin;
+ uint32 pkt2bus;
+ uint32 pktdropped;
+ uint32 tlv_parse_failed;
+ uint32 rollback;
+ uint32 rollback_failed;
+ uint32 sendq_full_error;
+ uint32 delayq_full_error;
+ uint32 credit_request_failed;
+ uint32 packet_request_failed;
+ uint32 mac_update_failed;
+ uint32 psmode_update_failed;
+ uint32 interface_update_failed;
+ uint32 wlfc_header_only_pkt;
+ uint32 txstatus_in;
+ uint32 d11_suppress;
+ uint32 wl_suppress;
+ uint32 bad_suppress;
+ uint32 pkt_freed;
+ uint32 pkt_free_err;
+ uint32 psq_wlsup_retx;
+ uint32 psq_wlsup_enq;
+ uint32 psq_d11sup_retx;
+ uint32 psq_d11sup_enq;
+ uint32 psq_hostq_retx;
+ uint32 psq_hostq_enq;
+ uint32 mac_handle_notfound;
+ uint32 wlc_tossed_pkts;
+ uint32 dhd_hdrpulls;
+ uint32 generic_error;
+ /* an extra one for bc/mc traffic */
+ uint32 sendq_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+ /* all pkt2bus -> txstatus latency accumulated */
+ uint32 latency_sample_count;
+ uint32 total_status_latency;
+ uint32 latency_most_recent;
+ int idx_delta;
+ uint32 deltas[10];
+ uint32 fifo_credits_sent[6];
+ uint32 fifo_credits_back[6];
+ uint32 dropped_qfull[6];
+ uint32 signal_only_pkts_sent;
+ uint32 signal_only_pkts_freed;
+#endif
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+ (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+ (ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+ (ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+
+#define WLFC_FCMODE_NONE 0
+#define WLFC_FCMODE_IMPLIED_CREDIT 1
+#define WLFC_FCMODE_EXPLICIT_CREDIT 2
+
+#define WLFC_BORROW_DEFER_PERIOD_MS 100
+
+/* Mask to represent available ACs (note: BC/MC is ignored */
+#define WLFC_AC_MASK 0xF
+
+/* Mask to check for only on-going AC_BE traffic */
+#define WLFC_AC_BE_TRAFFIC_ONLY 0xD
+
+typedef struct athost_wl_status_info {
+ uint8 last_seqid_to_wlc;
+
+ /* OSL handle */
+ osl_t* osh;
+ /* dhd pub */
+ void* dhdp;
+
+ /* stats */
+ athost_wl_stat_counters_t stats;
+
+ /* the additional ones are for bc/mc and ATIM FIFO */
+ int FIFO_credit[AC_COUNT + 2];
+
+ /* Credit borrow counts for each FIFO from each of the other FIFOs */
+ int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
+
+ struct pktq SENDQ;
+
+ /* packet hanger and MAC->handle lookup table */
+ void* hanger;
+ struct {
+ /* table for individual nodes */
+ wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE];
+ /* table for interfaces */
+ wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM];
+ /* OS may send packets to unknown (unassociated) destinations */
+ /* A place holder for bc/mc and packets to unknown destinations */
+ wlfc_mac_descriptor_t other;
+ } destination_entries;
+ /* token position for different priority packets */
+ uint8 token_pos[AC_COUNT+1];
+ /* ON/OFF state for flow control to the host network interface */
+ uint8 hostif_flow_state[WLFC_MAX_IFNUM];
+ uint8 host_ifidx;
+ /* to flow control an OS interface */
+ uint8 toggle_host_if;
+
+ /*
+ Mode in which the dhd flow control shall operate. Must be set before
+ traffic starts to the device.
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ */
+ uint8 proptxstatus_mode;
+
+ /* To borrow credits */
+ uint8 allow_credit_borrow;
+
+ /* Timestamp to compute how long to defer borrowing for */
+ uint32 borrow_defer_timestamp;
+} athost_wl_status_info_t;
+
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h
new file mode 100644
index 000000000000..9cdf718b3990
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_stats.h
@@ -0,0 +1,43 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_stats.h,v 1.5 2008-06-02 16:56:20 Exp $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+} dngl_stats_t;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
new file mode 100644
index 000000000000..8b39b9ecb584
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
@@ -0,0 +1,40 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_wlhdr.h,v 1.1 2009-01-08 01:21:12 Exp $
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+ uint8 type; /* Header type */
+ uint8 version; /* Header version */
+ int8 rssi; /* RSSI */
+ uint8 pad; /* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN sizeof(wl_header_t)
+#define WL_HEADER_TYPE 0
+#define WL_HEADER_VER 1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c
new file mode 100644
index 000000000000..b9586e40d0c2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hndpmu.c
@@ -0,0 +1,222 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.c,v 1.228.2.56 2011-02-11 22:49:07 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+#define PMU_ERROR(args)
+
+#define PMU_MSG(args)
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define PMU_NONE(args)
+
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+ uint8 strength; /* Pad Drive Strength in mA */
+ uint8 sel; /* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+ {4, 0x2},
+ {2, 0x3},
+ {1, 0x0},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+ {12, 0x7},
+ {10, 0x6},
+ {8, 0x5},
+ {6, 0x4},
+ {4, 0x2},
+ {2, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+ {32, 0x7},
+ {26, 0x6},
+ {22, 0x5},
+ {16, 0x4},
+ {12, 0x3},
+ {8, 0x2},
+ {4, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v2[] = {
+ {16, 0x3},
+ {13, 0x2},
+ {11, 0x1},
+ {8, 0x0},
+ {6, 0x7},
+ {4, 0x6},
+ {2, 0x5},
+ {0, 0x4} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_2v5[] = {
+ {80, 0x5},
+ {65, 0x4},
+ {55, 0x7},
+ {40, 0x6},
+ {30, 0x1},
+ {20, 0x0},
+ {10, 0x3},
+ {0, 0x2} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_3v3[] = {
+ {12, 0x7},
+ {10, 0x6},
+ {8, 0x5},
+ {6, 0x4},
+ {4, 0x2},
+ {2, 0x1},
+ {0, 0x0} };
+
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+ chipcregs_t *cc;
+ uint origidx, intr_val = 0;
+ sdiod_drive_str_t *str_tab = NULL;
+ uint32 str_mask = 0;
+ uint32 str_shift = 0;
+
+ if (!(sih->cccaps & CC_CAP_PMU)) {
+ return;
+ }
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+
+ switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+ str_mask = 0x30000000;
+ str_shift = 28;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+ case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+ if (sih->pmurev == 8) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+ }
+ else if (sih->pmurev == 11) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+ }
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+
+ break;
+ }
+
+ if (str_tab != NULL) {
+ uint32 cc_data_temp;
+ int i;
+
+ /* Pick the lowest available drive strength equal or greater than the
+ * requested strength. Drive strength of 0 requests tri-state.
+ */
+ for (i = 0; drivestrength < str_tab[i].strength; i++)
+ ;
+
+ if (i > 0 && drivestrength > str_tab[i].strength)
+ i--;
+
+ W_REG(osh, &cc->chipcontrol_addr, 1);
+ cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+ cc_data_temp &= ~str_mask;
+ cc_data_temp |= str_tab[i].sel << str_shift;
+ W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+
+ PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+ drivestrength, str_tab[i].strength));
+ }
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, intr_val);
+}
diff --git a/drivers/net/wireless/bcmdhd/include/Makefile b/drivers/net/wireless/bcmdhd/include/Makefile
new file mode 100644
index 000000000000..c07266fd6fdc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/Makefile
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# This script serves following purpose:
+#
+# 1. It generates native version information by querying
+# automerger maintained database to see where src/include
+# came from
+# 2. For select components, as listed in compvers.sh
+# it generates component version files
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile 241702 2011-02-19 00:41:03Z automrgr $
+#
+
+SRCBASE := ..
+
+TARGETS := epivers.h
+
+ifdef VERBOSE
+export VERBOSE
+endif
+
+all release: epivers compvers
+
+# Generate epivers.h for native branch version
+epivers:
+ bash epivers.sh
+
+# Generate epivers.h for native branch version
+compvers:
+ @if [ -s "compvers.sh" ]; then \
+ echo "Generating component versions, if any"; \
+ bash compvers.sh; \
+ else \
+ echo "Skipping component version generation"; \
+ fi
+
+# Generate epivers.h for native branch version
+clean_compvers:
+ @if [ -s "compvers.sh" ]; then \
+ echo "bash compvers.sh clean"; \
+ bash compvers.sh clean; \
+ else \
+ echo "Skipping component version clean"; \
+ fi
+
+clean:
+ rm -f $(TARGETS) *.prev
+
+clean_all: clean clean_compvers
+
+.PHONY: all release clean epivers compvers clean_compvers
diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h
new file mode 100644
index 000000000000..375df443a29a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/aidmp.h
@@ -0,0 +1,377 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aidmp.h,v 13.4.14.1 2010-03-09 18:40:06 Exp $
+ */
+
+
+#ifndef _AIDMP_H
+#define _AIDMP_H
+
+
+#define MFGID_ARM 0x43b
+#define MFGID_BRCM 0x4bf
+#define MFGID_MIPS 0x4a7
+
+
+#define CC_SIM 0
+#define CC_EROM 1
+#define CC_CORESIGHT 9
+#define CC_VERIF 0xb
+#define CC_OPTIMO 0xd
+#define CC_GEN 0xe
+#define CC_PRIMECELL 0xf
+
+
+#define ER_EROMENTRY 0x000
+#define ER_REMAPCONTROL 0xe00
+#define ER_REMAPSELECT 0xe04
+#define ER_MASTERSELECT 0xe10
+#define ER_ITCR 0xf00
+#define ER_ITIP 0xf04
+
+
+#define ER_TAG 0xe
+#define ER_TAG1 0x6
+#define ER_VALID 1
+#define ER_CI 0
+#define ER_MP 2
+#define ER_ADD 4
+#define ER_END 0xe
+#define ER_BAD 0xffffffff
+
+
+#define CIA_MFG_MASK 0xfff00000
+#define CIA_MFG_SHIFT 20
+#define CIA_CID_MASK 0x000fff00
+#define CIA_CID_SHIFT 8
+#define CIA_CCL_MASK 0x000000f0
+#define CIA_CCL_SHIFT 4
+
+
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+#define CIB_NSW_MASK 0x00f80000
+#define CIB_NSW_SHIFT 19
+#define CIB_NMW_MASK 0x0007c000
+#define CIB_NMW_SHIFT 14
+#define CIB_NSP_MASK 0x00003e00
+#define CIB_NSP_SHIFT 9
+#define CIB_NMP_MASK 0x000001f0
+#define CIB_NMP_SHIFT 4
+
+
+#define MPD_MUI_MASK 0x0000ff00
+#define MPD_MUI_SHIFT 8
+#define MPD_MP_MASK 0x000000f0
+#define MPD_MP_SHIFT 4
+
+
+#define AD_ADDR_MASK 0xfffff000
+#define AD_SP_MASK 0x00000f00
+#define AD_SP_SHIFT 8
+#define AD_ST_MASK 0x000000c0
+#define AD_ST_SHIFT 6
+#define AD_ST_SLAVE 0x00000000
+#define AD_ST_BRIDGE 0x00000040
+#define AD_ST_SWRAP 0x00000080
+#define AD_ST_MWRAP 0x000000c0
+#define AD_SZ_MASK 0x00000030
+#define AD_SZ_SHIFT 4
+#define AD_SZ_4K 0x00000000
+#define AD_SZ_8K 0x00000010
+#define AD_SZ_16K 0x00000020
+#define AD_SZ_SZD 0x00000030
+#define AD_AG32 0x00000008
+#define AD_ADDR_ALIGN 0x00000fff
+#define AD_SZ_BASE 0x00001000
+
+
+#define SD_SZ_MASK 0xfffff000
+#define SD_SG32 0x00000008
+#define SD_SZ_ALIGN 0x00000fff
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _aidmp {
+ uint32 oobselina30;
+ uint32 oobselina74;
+ uint32 PAD[6];
+ uint32 oobselinb30;
+ uint32 oobselinb74;
+ uint32 PAD[6];
+ uint32 oobselinc30;
+ uint32 oobselinc74;
+ uint32 PAD[6];
+ uint32 oobselind30;
+ uint32 oobselind74;
+ uint32 PAD[38];
+ uint32 oobselouta30;
+ uint32 oobselouta74;
+ uint32 PAD[6];
+ uint32 oobseloutb30;
+ uint32 oobseloutb74;
+ uint32 PAD[6];
+ uint32 oobseloutc30;
+ uint32 oobseloutc74;
+ uint32 PAD[6];
+ uint32 oobseloutd30;
+ uint32 oobseloutd74;
+ uint32 PAD[38];
+ uint32 oobsynca;
+ uint32 oobseloutaen;
+ uint32 PAD[6];
+ uint32 oobsyncb;
+ uint32 oobseloutben;
+ uint32 PAD[6];
+ uint32 oobsyncc;
+ uint32 oobseloutcen;
+ uint32 PAD[6];
+ uint32 oobsyncd;
+ uint32 oobseloutden;
+ uint32 PAD[38];
+ uint32 oobaextwidth;
+ uint32 oobainwidth;
+ uint32 oobaoutwidth;
+ uint32 PAD[5];
+ uint32 oobbextwidth;
+ uint32 oobbinwidth;
+ uint32 oobboutwidth;
+ uint32 PAD[5];
+ uint32 oobcextwidth;
+ uint32 oobcinwidth;
+ uint32 oobcoutwidth;
+ uint32 PAD[5];
+ uint32 oobdextwidth;
+ uint32 oobdinwidth;
+ uint32 oobdoutwidth;
+ uint32 PAD[37];
+ uint32 ioctrlset;
+ uint32 ioctrlclear;
+ uint32 ioctrl;
+ uint32 PAD[61];
+ uint32 iostatus;
+ uint32 PAD[127];
+ uint32 ioctrlwidth;
+ uint32 iostatuswidth;
+ uint32 PAD[62];
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 resetreadid;
+ uint32 resetwriteid;
+ uint32 PAD[60];
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+ uint32 PAD[56];
+ uint32 intstatus;
+ uint32 PAD[127];
+ uint32 config;
+ uint32 PAD[63];
+ uint32 itcr;
+ uint32 PAD[3];
+ uint32 itipooba;
+ uint32 itipoobb;
+ uint32 itipoobc;
+ uint32 itipoobd;
+ uint32 PAD[4];
+ uint32 itipoobaout;
+ uint32 itipoobbout;
+ uint32 itipoobcout;
+ uint32 itipoobdout;
+ uint32 PAD[4];
+ uint32 itopooba;
+ uint32 itopoobb;
+ uint32 itopoobc;
+ uint32 itopoobd;
+ uint32 PAD[4];
+ uint32 itopoobain;
+ uint32 itopoobbin;
+ uint32 itopoobcin;
+ uint32 itopoobdin;
+ uint32 PAD[4];
+ uint32 itopreset;
+ uint32 PAD[15];
+ uint32 peripherialid4;
+ uint32 peripherialid5;
+ uint32 peripherialid6;
+ uint32 peripherialid7;
+ uint32 peripherialid0;
+ uint32 peripherialid1;
+ uint32 peripherialid2;
+ uint32 peripherialid3;
+ uint32 componentid0;
+ uint32 componentid1;
+ uint32 componentid2;
+ uint32 componentid3;
+} aidmp_t;
+
+#endif
+
+
+#define OOB_BUSCONFIG 0x020
+#define OOB_STATUSA 0x100
+#define OOB_STATUSB 0x104
+#define OOB_STATUSC 0x108
+#define OOB_STATUSD 0x10c
+#define OOB_ENABLEA0 0x200
+#define OOB_ENABLEA1 0x204
+#define OOB_ENABLEA2 0x208
+#define OOB_ENABLEA3 0x20c
+#define OOB_ENABLEB0 0x280
+#define OOB_ENABLEB1 0x284
+#define OOB_ENABLEB2 0x288
+#define OOB_ENABLEB3 0x28c
+#define OOB_ENABLEC0 0x300
+#define OOB_ENABLEC1 0x304
+#define OOB_ENABLEC2 0x308
+#define OOB_ENABLEC3 0x30c
+#define OOB_ENABLED0 0x380
+#define OOB_ENABLED1 0x384
+#define OOB_ENABLED2 0x388
+#define OOB_ENABLED3 0x38c
+#define OOB_ITCR 0xf00
+#define OOB_ITIPOOBA 0xf10
+#define OOB_ITIPOOBB 0xf14
+#define OOB_ITIPOOBC 0xf18
+#define OOB_ITIPOOBD 0xf1c
+#define OOB_ITOPOOBA 0xf30
+#define OOB_ITOPOOBB 0xf34
+#define OOB_ITOPOOBC 0xf38
+#define OOB_ITOPOOBD 0xf3c
+
+
+#define AI_OOBSELINA30 0x000
+#define AI_OOBSELINA74 0x004
+#define AI_OOBSELINB30 0x020
+#define AI_OOBSELINB74 0x024
+#define AI_OOBSELINC30 0x040
+#define AI_OOBSELINC74 0x044
+#define AI_OOBSELIND30 0x060
+#define AI_OOBSELIND74 0x064
+#define AI_OOBSELOUTA30 0x100
+#define AI_OOBSELOUTA74 0x104
+#define AI_OOBSELOUTB30 0x120
+#define AI_OOBSELOUTB74 0x124
+#define AI_OOBSELOUTC30 0x140
+#define AI_OOBSELOUTC74 0x144
+#define AI_OOBSELOUTD30 0x160
+#define AI_OOBSELOUTD74 0x164
+#define AI_OOBSYNCA 0x200
+#define AI_OOBSELOUTAEN 0x204
+#define AI_OOBSYNCB 0x220
+#define AI_OOBSELOUTBEN 0x224
+#define AI_OOBSYNCC 0x240
+#define AI_OOBSELOUTCEN 0x244
+#define AI_OOBSYNCD 0x260
+#define AI_OOBSELOUTDEN 0x264
+#define AI_OOBAEXTWIDTH 0x300
+#define AI_OOBAINWIDTH 0x304
+#define AI_OOBAOUTWIDTH 0x308
+#define AI_OOBBEXTWIDTH 0x320
+#define AI_OOBBINWIDTH 0x324
+#define AI_OOBBOUTWIDTH 0x328
+#define AI_OOBCEXTWIDTH 0x340
+#define AI_OOBCINWIDTH 0x344
+#define AI_OOBCOUTWIDTH 0x348
+#define AI_OOBDEXTWIDTH 0x360
+#define AI_OOBDINWIDTH 0x364
+#define AI_OOBDOUTWIDTH 0x368
+
+
+#define AI_IOCTRLSET 0x400
+#define AI_IOCTRLCLEAR 0x404
+#define AI_IOCTRL 0x408
+#define AI_IOSTATUS 0x500
+#define AI_RESETCTRL 0x800
+#define AI_RESETSTATUS 0x804
+
+
+#define AI_IOCTRLWIDTH 0x700
+#define AI_IOSTATUSWIDTH 0x704
+
+#define AI_RESETREADID 0x808
+#define AI_RESETWRITEID 0x80c
+#define AI_ERRLOGCTRL 0xa00
+#define AI_ERRLOGDONE 0xa04
+#define AI_ERRLOGSTATUS 0xa08
+#define AI_ERRLOGADDRLO 0xa0c
+#define AI_ERRLOGADDRHI 0xa10
+#define AI_ERRLOGID 0xa14
+#define AI_ERRLOGUSER 0xa18
+#define AI_ERRLOGFLAGS 0xa1c
+#define AI_INTSTATUS 0xa00
+#define AI_CONFIG 0xe00
+#define AI_ITCR 0xf00
+#define AI_ITIPOOBA 0xf10
+#define AI_ITIPOOBB 0xf14
+#define AI_ITIPOOBC 0xf18
+#define AI_ITIPOOBD 0xf1c
+#define AI_ITIPOOBAOUT 0xf30
+#define AI_ITIPOOBBOUT 0xf34
+#define AI_ITIPOOBCOUT 0xf38
+#define AI_ITIPOOBDOUT 0xf3c
+#define AI_ITOPOOBA 0xf50
+#define AI_ITOPOOBB 0xf54
+#define AI_ITOPOOBC 0xf58
+#define AI_ITOPOOBD 0xf5c
+#define AI_ITOPOOBAIN 0xf70
+#define AI_ITOPOOBBIN 0xf74
+#define AI_ITOPOOBCIN 0xf78
+#define AI_ITOPOOBDIN 0xf7c
+#define AI_ITOPRESET 0xf90
+#define AI_PERIPHERIALID4 0xfd0
+#define AI_PERIPHERIALID5 0xfd4
+#define AI_PERIPHERIALID6 0xfd8
+#define AI_PERIPHERIALID7 0xfdc
+#define AI_PERIPHERIALID0 0xfe0
+#define AI_PERIPHERIALID1 0xfe4
+#define AI_PERIPHERIALID2 0xfe8
+#define AI_PERIPHERIALID3 0xfec
+#define AI_COMPONENTID0 0xff0
+#define AI_COMPONENTID1 0xff4
+#define AI_COMPONENTID2 0xff8
+#define AI_COMPONENTID3 0xffc
+
+
+#define AIRC_RESET 1
+
+
+#define AICFG_OOB 0x00000020
+#define AICFG_IOS 0x00000010
+#define AICFG_IOC 0x00000008
+#define AICFG_TO 0x00000004
+#define AICFG_ERRL 0x00000002
+#define AICFG_RST 0x00000001
+
+
+#define OOB_SEL_OUTEN_B_5 15
+#define OOB_SEL_OUTEN_B_6 23
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
new file mode 100644
index 000000000000..ce45c50d9641
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
@@ -0,0 +1,121 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmcdc.h,v 13.25.10.3 2010-12-22 23:47:26 Exp $
+ */
+
+#ifndef _bcmcdc_h_
+#define _bcmcdc_h_
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+ uint32 cmd;
+ uint32 len;
+ uint32 flags;
+ uint32 status;
+} cdc_ioctl_t;
+
+
+#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN
+
+
+#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF
+
+#define CDCL_IOC_OUTLEN_SHIFT 0
+#define CDCL_IOC_INLEN_MASK 0xFFFF0000
+#define CDCL_IOC_INLEN_SHIFT 16
+
+
+#define CDCF_IOC_ERROR 0x01
+#define CDCF_IOC_SET 0x02
+#define CDCF_IOC_OVL_IDX_MASK 0x3c
+#define CDCF_IOC_OVL_RSV 0x40
+#define CDCF_IOC_OVL 0x80
+#define CDCF_IOC_ACTION_MASK 0xfe
+#define CDCF_IOC_ACTION_SHIFT 1
+#define CDCF_IOC_IF_MASK 0xF000
+#define CDCF_IOC_IF_SHIFT 12
+#define CDCF_IOC_ID_MASK 0xFFFF0000
+#define CDCF_IOC_ID_SHIFT 16
+
+#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+
+
+#define BDC_HEADER_LEN 4
+
+#define BDC_PROTO_VER_1 1
+#define BDC_PROTO_VER 2
+
+#define BDC_FLAG_VER_MASK 0xf0
+#define BDC_FLAG_VER_SHIFT 4
+
+#define BDC_FLAG__UNUSED 0x03
+#define BDC_FLAG_SUM_GOOD 0x04
+#define BDC_FLAG_SUM_NEEDED 0x08
+
+#define BDC_PRIORITY_MASK 0x7
+
+#define BDC_FLAG2_FC_FLAG 0x10
+
+#define BDC_PRIORITY_FC_SHIFT 4
+
+#define BDC_FLAG2_IF_MASK 0x0f
+#define BDC_FLAG2_IF_SHIFT 0
+#define BDC_FLAG2_PAD_MASK 0xf0
+#define BDC_FLAG_PAD_MASK 0x03
+#define BDC_FLAG2_PAD_SHIFT 2
+#define BDC_FLAG_PAD_SHIFT 0
+#define BDC_FLAG2_PAD_IDX 0x3c
+#define BDC_FLAG_PAD_IDX 0x03
+#define BDC_GET_PAD_LEN(hdr) \
+ ((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+ ((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+ (((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+ ((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+ (((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
+#define BDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+struct bdc_header {
+ uint8 flags;
+ uint8 priority;
+ uint8 flags2;
+ uint8 dataOffset;
+};
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
new file mode 100644
index 000000000000..da1fd5e4eac4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
@@ -0,0 +1,196 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdefs.h,v 13.68.2.8 2011-01-08 04:04:19 Exp $
+ */
+
+
+#ifndef _bcmdefs_h_
+#define _bcmdefs_h_
+
+
+
+#define bcmreclaimed 0
+#define _data _data
+#define _fn _fn
+#define _data _data
+#define _fn _fn
+#define _fn _fn
+#define CONST const
+#define BCMFASTPATH
+
+
+
+
+#define _data _data
+#define _fn _fn
+#define _fn _fn
+#define STATIC static
+
+
+#define SI_BUS 0
+#define PCI_BUS 1
+#define PCMCIA_BUS 2
+#define SDIO_BUS 3
+#define JTAG_BUS 4
+#define USB_BUS 5
+#define SPI_BUS 6
+#define RPC_BUS 7
+
+
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) (BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) (bus)
+#endif
+
+
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) (BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) (bus)
+#endif
+
+
+
+#if defined(BCMSPROMBUS)
+#define SPROMBUS (BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS (PCMCIA_BUS)
+#else
+#define SPROMBUS (PCI_BUS)
+#endif
+
+
+#ifdef BCMCHIPID
+#define CHIPID(chip) (BCMCHIPID)
+#else
+#define CHIPID(chip) (chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev) (BCMCHIPREV)
+#else
+#define CHIPREV(rev) (rev)
+#endif
+
+
+#define DMADDR_MASK_32 0x0
+#define DMADDR_MASK_30 0xc0000000
+#define DMADDR_MASK_0 0xffffffff
+
+#define DMADDRWIDTH_30 30
+#define DMADDRWIDTH_32 32
+#define DMADDRWIDTH_63 63
+#define DMADDRWIDTH_64 64
+
+#ifdef BCMDMA64OSL
+typedef struct {
+ uint32 loaddr;
+ uint32 hiaddr;
+} dma64addr_t;
+
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) ((_pa).hiaddr)
+#define PHYSADDRHISET(_pa, _val) \
+ do { \
+ (_pa).hiaddr = (_val); \
+ } while (0)
+#define PHYSADDRLO(_pa) ((_pa).loaddr)
+#define PHYSADDRLOSET(_pa, _val) \
+ do { \
+ (_pa).loaddr = (_val); \
+ } while (0)
+
+#else
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+ do { \
+ (_pa) = (_val); \
+ } while (0)
+#endif
+
+
+typedef struct {
+ dmaaddr_t addr;
+ uint32 length;
+} hnddma_seg_t;
+
+#define MAX_DMA_SEGS 4
+
+
+typedef struct {
+ void *oshdmah;
+ uint origsize;
+ uint nsegs;
+ hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+
+
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
+
+#define BCMEXTRAHDROOM 220
+#else
+#define BCMEXTRAHDROOM 172
+#endif
+
+
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+#if defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif
+
+
+#define BITFIELD_MASK(width) \
+ (((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+ (((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+ (((val) & (~(field ## _M << field ## _S))) | \
+ ((unsigned)(bits) << field ## _S))
+
+
+#ifdef BCMSMALL
+#undef BCMSPACE
+#define bcmspace FALSE
+#else
+#define BCMSPACE
+#define bcmspace TRUE
+#endif
+
+
+#define MAXSZ_NVRAM_VARS 4096
+
+#define LOCATOR_EXTERN static
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
new file mode 100644
index 000000000000..4f707c0c6920
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
@@ -0,0 +1,182 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdevs.h,v 13.285.2.39 2011-02-04 05:03:16 Exp $
+ */
+
+
+#ifndef _BCMDEVS_H
+#define _BCMDEVS_H
+
+
+#define VENDOR_EPIGRAM 0xfeda
+#define VENDOR_BROADCOM 0x14e4
+#define VENDOR_SI_IMAGE 0x1095
+#define VENDOR_TI 0x104c
+#define VENDOR_RICOH 0x1180
+#define VENDOR_JMICRON 0x197b
+
+
+
+#define VENDOR_BROADCOM_PCMCIA 0x02d0
+
+
+#define VENDOR_BROADCOM_SDIO 0x00BF
+
+
+#define BCM_DNGL_VID 0x0a5c
+#define BCM_DNGL_BL_PID_4328 0xbd12
+#define BCM_DNGL_BL_PID_4322 0xbd13
+#define BCM_DNGL_BL_PID_4319 0xbd16
+#define BCM_DNGL_BL_PID_43236 0xbd17
+#define BCM_DNGL_BL_PID_4332 0xbd18
+#define BCM_DNGL_BL_PID_4330 0xbd19
+#define BCM_DNGL_BL_PID_43239 0xbd1b
+#define BCM_DNGL_BDC_PID 0x0bdc
+#define BCM_DNGL_JTAG_PID 0x4a44
+#define BCM4325_D11DUAL_ID 0x431b
+#define BCM4325_D11G_ID 0x431c
+#define BCM4325_D11A_ID 0x431d
+#define BCM4321_D11N_ID 0x4328
+#define BCM4321_D11N2G_ID 0x4329
+#define BCM4321_D11N5G_ID 0x432a
+#define BCM4322_D11N_ID 0x432b
+#define BCM4322_D11N2G_ID 0x432c
+#define BCM4322_D11N5G_ID 0x432d
+#define BCM4329_D11N_ID 0x432e
+#define BCM4329_D11N2G_ID 0x432f
+#define BCM4329_D11N5G_ID 0x4330
+#define BCM4315_D11DUAL_ID 0x4334
+#define BCM4315_D11G_ID 0x4335
+#define BCM4315_D11A_ID 0x4336
+#define BCM4319_D11N_ID 0x4337
+#define BCM4319_D11N2G_ID 0x4338
+#define BCM4319_D11N5G_ID 0x4339
+#define BCM43231_D11N2G_ID 0x4340
+#define BCM43221_D11N2G_ID 0x4341
+#define BCM43222_D11N_ID 0x4350
+#define BCM43222_D11N2G_ID 0x4351
+#define BCM43222_D11N5G_ID 0x4352
+#define BCM43224_D11N_ID 0x4353
+#define BCM43224_D11N_ID_VEN1 0x0576
+#define BCM43226_D11N_ID 0x4354
+#define BCM43236_D11N_ID 0x4346
+#define BCM43236_D11N2G_ID 0x4347
+#define BCM43236_D11N5G_ID 0x4348
+#define BCM43225_D11N2G_ID 0x4357
+#define BCM43421_D11N_ID 0xA99D
+#define BCM4313_D11N2G_ID 0x4727
+#define BCM4330_D11N_ID 0x4360
+#define BCM4330_D11N2G_ID 0x4361
+#define BCM4330_D11N5G_ID 0x4362
+#define BCM4336_D11N_ID 0x4343
+#define BCM6362_D11N_ID 0x435f
+#define BCM4331_D11N_ID 0x4331
+#define BCM4331_D11N2G_ID 0x4332
+#define BCM4331_D11N5G_ID 0x4333
+#define BCM43237_D11N_ID 0x4355
+#define BCM43237_D11N5G_ID 0x4356
+#define BCM43227_D11N2G_ID 0x4358
+#define BCM43228_D11N_ID 0x4359
+#define BCM43228_D11N5G_ID 0x435a
+#define BCM43362_D11N_ID 0x4363
+#define BCM43239_D11N_ID 0x4370
+
+
+#define SDIOH_FPGA_ID 0x43f2
+#define SPIH_FPGA_ID 0x43f5
+#define BCM4710_DEVICE_ID 0x4710
+#define BCM27XX_SDIOH_ID 0x2702
+#define PCIXX21_FLASHMEDIA0_ID 0x8033
+#define PCIXX21_SDIOH0_ID 0x8034
+#define PCIXX21_FLASHMEDIA_ID 0x803b
+#define PCIXX21_SDIOH_ID 0x803c
+#define R5C822_SDIOH_ID 0x0822
+#define JMICRON_SDIOH_ID 0x2381
+
+
+#define BCM4306_CHIP_ID 0x4306
+#define BCM4311_CHIP_ID 0x4311
+#define BCM43111_CHIP_ID 43111
+#define BCM43112_CHIP_ID 43112
+#define BCM4312_CHIP_ID 0x4312
+#define BCM4313_CHIP_ID 0x4313
+#define BCM4315_CHIP_ID 0x4315
+#define BCM4318_CHIP_ID 0x4318
+#define BCM4319_CHIP_ID 0x4319
+#define BCM4320_CHIP_ID 0x4320
+#define BCM4321_CHIP_ID 0x4321
+#define BCM4322_CHIP_ID 0x4322
+#define BCM43221_CHIP_ID 43221
+#define BCM43222_CHIP_ID 43222
+#define BCM43224_CHIP_ID 43224
+#define BCM43225_CHIP_ID 43225
+#define BCM43227_CHIP_ID 43227
+#define BCM43228_CHIP_ID 43228
+#define BCM43226_CHIP_ID 43226
+#define BCM43231_CHIP_ID 43231
+#define BCM43234_CHIP_ID 43234
+#define BCM43235_CHIP_ID 43235
+#define BCM43236_CHIP_ID 43236
+#define BCM43237_CHIP_ID 43237
+#define BCM43238_CHIP_ID 43238
+#define BCM43239_CHIP_ID 43239
+#define BCM43420_CHIP_ID 43420
+#define BCM43421_CHIP_ID 43421
+#define BCM43428_CHIP_ID 43428
+#define BCM43431_CHIP_ID 43431
+#define BCM4325_CHIP_ID 0x4325
+#define BCM4328_CHIP_ID 0x4328
+#define BCM4329_CHIP_ID 0x4329
+#define BCM4331_CHIP_ID 0x4331
+#define BCM4336_CHIP_ID 0x4336
+#define BCM43362_CHIP_ID 43362
+#define BCM4330_CHIP_ID 0x4330
+#define BCM4402_CHIP_ID 0x4402
+#define BCM4704_CHIP_ID 0x4704
+#define BCM4710_CHIP_ID 0x4710
+#define BCM4712_CHIP_ID 0x4712
+#define BCM4785_CHIP_ID 0x4785
+#define BCM5350_CHIP_ID 0x5350
+#define BCM5352_CHIP_ID 0x5352
+#define BCM5354_CHIP_ID 0x5354
+#define BCM5365_CHIP_ID 0x5365
+
+
+#define BCM4303_PKG_ID 2
+#define BCM4309_PKG_ID 1
+#define BCM4712LARGE_PKG_ID 0
+#define BCM4712SMALL_PKG_ID 1
+#define BCM4712MID_PKG_ID 2
+#define BCM4328USBD11G_PKG_ID 2
+#define BCM4328USBDUAL_PKG_ID 3
+#define BCM4328SDIOD11G_PKG_ID 4
+#define BCM4328SDIODUAL_PKG_ID 5
+#define BCM4329_289PIN_PKG_ID 0
+#define BCM4329_182PIN_PKG_ID 1
+#define BCM5354E_PKG_ID 1
+#define HDLSIM5350_PKG_ID 1
+#define HDLSIM_PKG_ID 14
+#define HWSIM_PKG_ID 15
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h
new file mode 100644
index 000000000000..04b07ecb8043
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h
@@ -0,0 +1,279 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmendian.h,v 1.36 2009-11-09 05:29:43 Exp $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+
+#define BCMSWAP16(val) \
+ ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+ (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+
+#define BCMSWAP32(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+ (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \
+ (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \
+ (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+
+#define BCMSWAP32BY16(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+ (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+
+#ifndef hton16
+#define HTON16(i) BCMSWAP16(i)
+#define hton16(i) bcmswap16(i)
+#define HTON32(i) BCMSWAP32(i)
+#define hton32(i) bcmswap32(i)
+#define NTOH16(i) BCMSWAP16(i)
+#define ntoh16(i) bcmswap16(i)
+#define NTOH32(i) BCMSWAP32(i)
+#define ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#endif
+
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+
+
+#define load32_ua(a) ltoh32_ua(a)
+#define store32_ua(a, v) htol32_ua_store(v, a)
+#define load16_ua(a) ltoh16_ua(a)
+#define store16_ua(a, v) htol16_ua_store(v, a)
+
+#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+#define ltoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+ sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \
+ *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+ sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \
+ *(uint8 *)0)
+
+#ifdef __GNUC__
+
+
+
+#define bcmswap16(val) ({ \
+ uint16 _val = (val); \
+ BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+ uint32 _val = (val); \
+ BCMSWAP32(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+ uint32 _val = (val); \
+ BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+ uint16 *_buf = (uint16 *)(buf); \
+ uint _wds = (len) / 2; \
+ while (_wds--) { \
+ *_buf = bcmswap16(*_buf); \
+ _buf++; \
+ } \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+ uint16 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val & 0xff; \
+ _bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+ uint32 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val & 0xff; \
+ _bytes[1] = (_val >> 8) & 0xff; \
+ _bytes[2] = (_val >> 16) & 0xff; \
+ _bytes[3] = _val >> 24; \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+ uint16 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val >> 8; \
+ _bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+ uint32 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val >> 24; \
+ _bytes[1] = (_val >> 16) & 0xff; \
+ _bytes[2] = (_val >> 8) & 0xff; \
+ _bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH32_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH32_UA(_bytes); \
+})
+
+#else
+
+
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+ return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+ return BCMSWAP32(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+ return BCMSWAP32BY16(val);
+}
+
+
+
+
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+ len = len / 2;
+
+ while (len--) {
+ *buf = bcmswap16(*buf);
+ buf++;
+ }
+}
+
+
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = val >> 8;
+}
+
+
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = (val >> 8) & 0xff;
+ bytes[2] = (val >> 16) & 0xff;
+ bytes[3] = val >> 24;
+}
+
+
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val >> 8;
+ bytes[1] = val & 0xff;
+}
+
+
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val >> 24;
+ bytes[1] = (val >> 16) & 0xff;
+ bytes[2] = (val >> 8) & 0xff;
+ bytes[3] = val & 0xff;
+}
+
+
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+ return _LTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+ return _LTOH32_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+ return _NTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+ return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#endif
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
new file mode 100644
index 000000000000..fd148c591d88
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
@@ -0,0 +1,181 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.h,v 13.15.112.1 2010-11-15 18:22:12 Exp $
+ */
+#ifndef _BCM_PCI_SPI_H
+#define _BCM_PCI_SPI_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+
+typedef volatile struct {
+ uint32 spih_ctrl; /* 0x00 SPI Control Register */
+ uint32 spih_stat; /* 0x04 SPI Status Register */
+ uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */
+ uint32 spih_ext; /* 0x0C SPI Extension Register */
+ uint32 PAD[4]; /* 0x10-0x1F PADDING */
+
+ uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */
+ uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */
+ uint32 PAD[6]; /* 0x28-0x3F PADDING */
+
+ uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+ uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+ /* 1=Active High) */
+ uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */
+ uint32 spih_int_status; /* 0x4C SPI Interrupt Status */
+ uint32 PAD[4]; /* 0x50-0x5F PADDING */
+
+ uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */
+ uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */
+ uint32 PAD[1]; /* 0x68 PADDING */
+ uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */
+ uint32 PAD[4]; /* 0x70-0x7F PADDING */
+ uint32 PAD[8]; /* 0x80-0x9F PADDING */
+ uint32 PAD[8]; /* 0xA0-0xBF PADDING */
+ uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */
+ uint32 spih_pll_status; /* 0xC4 PLL Status Register */
+ uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */
+ uint32 spih_clk_count; /* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+ uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */
+ uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */
+
+ uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */
+ uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */
+ uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */
+ uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */
+ uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */
+ uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */
+ uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */
+ uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */
+ uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */
+ uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */
+ uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */
+ uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */
+ uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */
+ uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */
+ uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */
+ uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */
+ uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */
+ uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */
+ uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */
+ uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */
+ uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */
+ uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */
+ uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */
+ uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */
+ uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */
+ uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */
+
+ uint32 PAD[5]; /* 0x16C-0x17F PADDING */
+
+ uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */
+ uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */
+ uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */
+ uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+ uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+ uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */
+ uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */
+ uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+ uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+ uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */
+ uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+ uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+ uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+ uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */
+ uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+ uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+ uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+ uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */
+ uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+ uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+ uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+ uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+ uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */
+ uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */
+ uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */
+ uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+ uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */
+ uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */
+ uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */
+#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */
+#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */
+#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND 0xf4240 /* 1 million */
+
+#endif /* _BCM_PCI_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h
new file mode 100644
index 000000000000..a3985cf29375
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h
@@ -0,0 +1,36 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmperf.h,v 13.5 2007-09-14 22:00:59 Exp $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define BCMPERF_GETICACHE_MISS(x) ((x) = 0)
+#define BCMPERF_GETICACHE_HIT(x) ((x) = 0)
+#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
new file mode 100644
index 000000000000..5fda5e9b5df4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
@@ -0,0 +1,120 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdbus.h,v 13.17.86.2 2010-12-23 01:13:20 Exp $
+ */
+
+#ifndef _sdio_api_h_
+#define _sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS (0x00)
+#define SDIOH_API_RC_FAIL (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ 0 /* Read request */
+#define SDIOH_WRITE 1 /* Write request */
+
+#define SDIOH_DATA_FIX 0 /* Fixed addressing */
+#define SDIOH_DATA_INC 1 /* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */
+
+#define SDIOH_DATA_PIO 0 /* PIO mode */
+#define SDIOH_DATA_DMA 1 /* DMA mode */
+
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+/* attach, return handler on success, NULL if failed.
+ * The handler shall be provided by all subsequent calls. No local cache
+ * cfghdl points to the starting address of pci device mapped memory
+ */
+extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+ uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+ uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+ void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+/* Helper function */
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
new file mode 100644
index 000000000000..6131d8ae4305
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
@@ -0,0 +1,211 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ * export functions to client drivers
+ * abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.h,v 13.46.52.3 2010-10-19 00:41:44 Exp $
+ */
+
+#ifndef _bcmsdh_h_
+#define _bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL 0x0001 /* Error */
+#define BCMSDH_INFO_VAL 0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+/* Attach and build an interface to the underlying SD host driver.
+ * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh.
+ * - Returns the bcmsdh handle and virtual address base for register access.
+ * The returned handle should be used in all subsequent calls, but the bcmsh
+ * implementation may maintain a single "default" handle (e.g. the first or
+ * most recent one) to enable single-instance implementations to pass NULL.
+ */
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq);
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+#ifdef BCMLXSDMMC
+extern int bcmsdh_claim_host_and_lock(void *sdh);
+extern int bcmsdh_release_host_and_unlock(void *sdh);
+#endif /* BCMLXSDMMC */
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ * fn: function number
+ * addr: unmodified SDIO-space address
+ * data: data byte to write
+ * err: pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ * fn: function whose CIS is being requested (0 is common CIS)
+ * cis: pointer to memory location to place results
+ * length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ * addr: backplane address (i.e. >= regsva from attach)
+ * size: register width in bytes (2 or 4)
+ * data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ * fn: function number
+ * addr: backplane address (i.e. >= regsva from attach)
+ * flags: backplane width, address increment, sync/async
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * pkt: pointer to packet associated with buf (if any)
+ * complete: callback function for command completion (async only)
+ * handle: handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete, void *handle);
+
+/* Flags bits */
+#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING 1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ * rw: read or write (0/1)
+ * addr: direct SDIO address
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+/* callback functions */
+typedef struct {
+ /* attach to device */
+ void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+ uint16 func, uint bustype, void * regsva, osl_t * osh,
+ void * param);
+ /* detach from device */
+ void (*detach)(void *ch);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_register_oob_intr(void * dhdp);
+extern void bcmsdh_unregister_oob_intr(void);
+extern void bcmsdh_oob_intr_set(bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+#endif /* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
new file mode 100644
index 000000000000..d188c4ec7d5a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
@@ -0,0 +1,122 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h,v 13.5.88.1 2010-12-23 01:13:20 Exp $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+/* Allocate/init/free per-OS private data */
+extern int sdioh_sdmmc_osinit(sdioh_info_t *sd);
+extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4 2
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+struct sdioh_info {
+ osl_t *osh; /* osh handler */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ uint max_dma_len;
+ uint max_dma_descriptors; /* DMA Descriptors supported by this controller. */
+// SDDMA_DESCRIPTOR SGList[32]; /* Scatter/Gather DMA List */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+typedef struct _BCMSDH_SDMMC_INSTANCE {
+ sdioh_info_t *sd;
+ struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
new file mode 100644
index 000000000000..ee29b5c08a5c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
@@ -0,0 +1,274 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdpcm.h,v 13.4.90.2 2010-05-12 04:14:25 Exp $
+ */
+
+#ifndef _bcmsdpcm_h_
+#define _bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */
+#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */
+
+#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION 4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET 2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+
+#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
+#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET 6 /* Version # */
+#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET 7 /* Spare */
+#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL 15
+
+#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0 0x01
+#define SDPCM_FLAG_RESVD1 0x02
+#define SDPCM_FLAG_GSPI_TXENAB 0x04
+#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2);
+ * Semantics of Ext byte depend on command.
+ * Len is current or requested frame length, not
+ * including test header; sent little-endian.
+ */
+#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count */
+#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+ uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+ uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+ uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */
+ uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */
+ uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */
+ uint32 rxdescuflo; /* receive descriptor underflows */
+ uint32 rxfifooflo; /* receive fifo overflows */
+ uint32 txfifouflo; /* transmit fifo underflows */
+ uint32 runt; /* runt (too short) frames recv'd from bus */
+ uint32 badlen; /* frame's rxh len does not match its hw tag len */
+ uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */
+ uint32 seqbreak; /* break in sequence # space from one rx frame to the next */
+ uint32 rxfcrc; /* frame rx header indicates crc error */
+ uint32 rxfwoos; /* frame rx header indicates write out of sync */
+ uint32 rxfwft; /* frame rx header indicates write frame termination */
+ uint32 rxfabort; /* frame rx header indicates frame aborted */
+ uint32 woosint; /* write out of sync interrupt */
+ uint32 roosint; /* read out of sync interrupt */
+ uint32 rftermint; /* read frame terminate interrupt */
+ uint32 wftermint; /* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val) ((var) == (val))
+#define SDIODREV_GE(var, val) ((var) >= (val))
+#define SDIODREV_GT(var, val) ((var) > (val))
+#define SDIODREV_LT(var, val) ((var) < (val))
+#define SDIODREV_LE(var, val) ((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+ (SDIODREV_LT((h)->corerev, 1) ? \
+ SDIODDMAREG32((h), (dir), (chnl)) : \
+ SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODDMAREG(h, dir, chnl) : \
+ PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+ (SDIODREV_LT((corerev), 1) ? \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODFIFOREG(h, corerev) : \
+ PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION 0x0001
+#define SDPCM_SHARED_VERSION_MASK 0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT 0x0100
+#define SDPCM_SHARED_ASSERT 0x0200
+#define SDPCM_SHARED_TRAP 0x0400
+#define SDPCM_SHARED_IN_BRPT 0x0800
+#define SDPCM_SHARED_SET_BRPT 0x1000
+#define SDPCM_SHARED_PENDING_BRPT 0x2000
+
+typedef struct {
+ uint32 flags;
+ uint32 trap_addr;
+ uint32 assert_exp_addr;
+ uint32 assert_file_addr;
+ uint32 assert_line;
+ uint32 console_addr; /* Address of hndrte_cons_t */
+ uint32 msgtrace_addr;
+ uint32 brpt_addr;
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+/* Function can be used to notify host of FW halt */
+extern void sdpcmd_fwhalt(void);
+
+#endif /* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
new file mode 100644
index 000000000000..0bff355f8ffd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
@@ -0,0 +1,135 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.h,v 13.11.86.1 2010-11-15 18:14:56 Exp $
+ */
+#ifndef _BCM_SD_SPI_H
+#define _BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of sdspi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ bool got_hcint; /* Host Controller interrupt. */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current register transfer size */
+ uint32 cmd53_wr_data; /* Used to pass CMD53 write data */
+ uint32 card_response; /* Used to pass back response status byte */
+ uint32 card_rsp_data; /* Used to pass back response data word */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
new file mode 100644
index 000000000000..0f4c0267dbc8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
@@ -0,0 +1,267 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.h,v 13.21.2.6 2010-11-15 18:14:01 Exp $
+ */
+#ifndef _BCM_SD_STD_H
+#define _BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+#define SDIOH_MODE_SD1 1
+#define SDIOH_MODE_SD4 2
+
+#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK 1
+#define SDIOH_TYPE_BCM27XX 2
+#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS 0x00001E00
+
+#define RETRIES_LARGE 100000
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+#define USE_FIFO 0x8 /* Fifo vs non-fifo */
+
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+#define HC_INTR_RETUNING 0x1000
+
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint32 curr_caps; /* max current capabilities reg */
+
+ osl_t *osh; /* osh handler */
+ volatile char *mem_space; /* pci device memory va */
+ uint lockcount; /* nest count of sdstd_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint target_dev; /* Target device ID */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+ int local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf; /* DMA Buffer virtual address */
+ ulong dma_phys; /* DMA Buffer physical address */
+ void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */
+ ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */
+
+ /* adjustments needed to make the dma align properly */
+ void *dma_start_buf;
+ ulong dma_start_phys;
+ uint alloced_dma_size;
+ void *adma2_dscr_start_buf;
+ ulong adma2_dscr_start_phys;
+ uint alloced_adma2_dscr_size;
+
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ bool got_hcint; /* local interrupt flag */
+ uint16 last_intrstatus; /* to cache intrstatus */
+ int host_UHSISupported; /* whether UHSI is supported for HC. */
+ int card_UHSI_voltage_Supported; /* whether UHSI is supported for
+ * Card in terms of Voltage [1.8 or 3.3].
+ */
+ int global_UHSI_Supp; /* type of UHSI support in both host and card.
+ * HOST_SDR_UNSUPP: capabilities not supported/matched
+ * HOST_SDR_12_25: SDR12 and SDR25 supported
+ * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+ */
+ int sd3_dat_state; /* data transfer state used for retuning check */
+ int sd3_tun_state; /* tuning state used for retuning check */
+ bool sd3_tuning_reqd; /* tuning requirement parameter */
+ uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+};
+
+#define DMA_MODE_NONE 0
+#define DMA_MODE_SDMA 1
+#define DMA_MODE_ADMA1 2
+#define DMA_MODE_ADMA2 3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO -1
+
+#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE 0
+#define SDIOH_ADMA1_MODE 1
+#define SDIOH_ADMA2_MODE 2
+#define SDIOH_ADMA2_64_MODE 3
+
+#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 0
+#define TUNING_START 1
+#define TUNING_START_AFTER_DAT 2
+#define TUNING_ONGOING 3
+
+#define DATA_TRANSFER_IDLE 0
+#define DATA_TRANSFER_ONGOING 1
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+ uint32 len_attr;
+ uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+ uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_STD_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h
new file mode 100644
index 000000000000..0eb2a30c9a84
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h
@@ -0,0 +1,40 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspi.h,v 13.5.112.1 2010-11-15 18:13:09 Exp $
+ */
+#ifndef _BCM_SPI_H
+#define _BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h
new file mode 100644
index 000000000000..530036f0ba77
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h
@@ -0,0 +1,708 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmutils.h,v 13.236.2.16 2011-01-26 00:45:06 Exp $
+ */
+
+
+#ifndef _bcmutils_h_
+#define _bcmutils_h_
+
+#define bcm_strcpy_s(dst, noOfElements, src) strcpy((dst), (src))
+#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count))
+#define bcm_strcat_s(dst, noOfElements, src) strcat((dst), (src))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define _BCM_U 0x01
+#define _BCM_L 0x02
+#define _BCM_D 0x04
+#define _BCM_C 0x08
+#define _BCM_P 0x10
+#define _BCM_S 0x20
+#define _BCM_X 0x40
+#define _BCM_SP 0x80
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+
+
+struct bcmstrbuf {
+ char *buf;
+ unsigned int size;
+ char *origbuf;
+ unsigned int origsize;
+};
+
+
+#ifdef BCMDRIVER
+#include <osl.h>
+
+#define GPIO_PIN_NOTDEFINED 0x20
+
+
+#define SPINWAIT(exp, us) { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) {\
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+}
+
+
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT 128
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC 16
+#endif
+
+typedef struct pktq_prec {
+ void *head;
+ void *tail;
+ uint16 len;
+ uint16 max;
+} pktq_prec_t;
+
+
+
+struct pktq {
+ uint16 num_prec;
+ uint16 hi_prec;
+ uint16 max;
+ uint16 len;
+
+ struct pktq_prec q[PKTQ_MAX_PREC];
+};
+
+
+struct spktq {
+ uint16 num_prec;
+ uint16 hi_prec;
+ uint16 max;
+ uint16 len;
+
+ struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool) ((pool) && (pool)->inited)
+#if defined(BCM4329C0)
+#define SHARED_POOL (pktpool_shared_ptr)
+#else
+#define SHARED_POOL (pktpool_shared)
+#endif
+#else
+#define POOL_ENAB(bus) 0
+#define SHARED_POOL ((struct pktpool *)NULL)
+#endif
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX 40
+#endif
+#define PKTPOOL_CB_MAX 3
+
+struct pktpool;
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+ pktpool_cb_t cb;
+ void *arg;
+} pktpool_cbinfo_t;
+
+#ifdef BCMDBG_POOL
+
+#define POOL_IDLE 0
+#define POOL_RXFILL 1
+#define POOL_RXDH 2
+#define POOL_RXD11 3
+#define POOL_TXDH 4
+#define POOL_TXD11 5
+#define POOL_AMPDU 6
+#define POOL_TXENQ 7
+
+typedef struct {
+ void *p;
+ uint32 cycles;
+ uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+ uint8 txdh;
+ uint8 txd11;
+ uint8 enq;
+ uint8 rxdh;
+ uint8 rxd11;
+ uint8 rxfill;
+ uint8 idle;
+} pktpool_stats_t;
+#endif
+
+typedef struct pktpool {
+ bool inited;
+ uint16 r;
+ uint16 w;
+ uint16 len;
+ uint16 maxlen;
+ uint16 plen;
+ bool istx;
+ bool empty;
+ uint8 cbtoggle;
+ uint8 cbcnt;
+ uint8 ecbcnt;
+ bool emptycb_disable;
+ pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+ pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+ void *q[PKTPOOL_LEN_MAX + 1];
+
+#ifdef BCMDBG_POOL
+ uint8 dbg_cbcnt;
+ pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+ uint16 dbg_qlen;
+ pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+} pktpool_t;
+
+#if defined(BCM4329C0)
+extern pktpool_t *pktpool_shared_ptr;
+#else
+extern pktpool_t *pktpool_shared;
+#endif
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern uint16 pktpool_avail(pktpool_t *pktp);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+
+#define POOLPTR(pp) ((pktpool_t *)(pp))
+#define pktpool_len(pp) (POOLPTR(pp)->len - 1)
+#define pktpool_plen(pp) (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen)
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif
+
+
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+
+
+#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
+#define pktq_plen(pq, prec) ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
+ ifpkt_cb_t fn, int arg);
+
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+
+
+#define pktq_len(pq) ((int)(pq)->len)
+#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq) ((pq)->len >= (pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
+
+
+#define pktenq(pq, p) pktq_penq(((struct pktq *)pq), 0, (p))
+#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)pq), 0, (p))
+#define pktdeq(pq) pktq_pdeq(((struct pktq *)pq), 0)
+#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)pq), 0)
+#define pktqinit(pq, len) pktq_init(((struct pktq *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
+
+
+
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+#define PKTPRIO_VDSCP 0x100
+#define PKTPRIO_VLAN 0x200
+#define PKTPRIO_UPD 0x400
+#define PKTPRIO_DSCP 0x800
+
+
+extern int bcm_atoi(char *s);
+extern ulong bcm_strtoul(char *cp, char **endp, uint base);
+extern char *bcmstrstr(char *haystack, char *needle);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(char *p, struct ether_addr *ea);
+
+
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+
+
+extern void bcm_mdelay(uint ms);
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define bcmlog(fmt, a1, a2)
+#define bcmdumplog(buf, size) *buf = '\0'
+#define bcmdumplogent(buf, idx) -1
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+
+extern char *bcm_nvram_vars(uint *length);
+extern int bcm_nvram_cache(void *sih);
+
+
+
+
+typedef struct bcm_iovar {
+ const char *name;
+ uint16 varid;
+ uint16 flags;
+ uint16 type;
+ uint16 minlen;
+} bcm_iovar_t;
+
+
+
+
+#define IOV_GET 0
+#define IOV_SET 1
+
+
+#define IOV_GVAL(id) ((id)*2)
+#define IOV_SVAL(id) (((id)*2)+IOV_SET)
+#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid) (actionid >> 1)
+
+
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif
+#endif
+
+
+#define IOVT_VOID 0
+#define IOVT_BOOL 1
+#define IOVT_INT8 2
+#define IOVT_UINT8 3
+#define IOVT_INT16 4
+#define IOVT_UINT16 5
+#define IOVT_INT32 6
+#define IOVT_UINT32 7
+#define IOVT_BUFFER 8
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+
+#define BCM_IOV_TYPE_INIT { \
+ "void", \
+ "bool", \
+ "int8", \
+ "uint8", \
+ "int16", \
+ "uint16", \
+ "int32", \
+ "uint32", \
+ "buffer", \
+ "" }
+
+#define BCM_IOVT_IS_INT(type) (\
+ (type == IOVT_BOOL) || \
+ (type == IOVT_INT8) || \
+ (type == IOVT_UINT8) || \
+ (type == IOVT_INT16) || \
+ (type == IOVT_UINT16) || \
+ (type == IOVT_INT32) || \
+ (type == IOVT_UINT32))
+
+
+
+#define BCME_STRLEN 64
+#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST))
+
+
+
+
+#define BCME_OK 0
+#define BCME_ERROR -1
+#define BCME_BADARG -2
+#define BCME_BADOPTION -3
+#define BCME_NOTUP -4
+#define BCME_NOTDOWN -5
+#define BCME_NOTAP -6
+#define BCME_NOTSTA -7
+#define BCME_BADKEYIDX -8
+#define BCME_RADIOOFF -9
+#define BCME_NOTBANDLOCKED -10
+#define BCME_NOCLK -11
+#define BCME_BADRATESET -12
+#define BCME_BADBAND -13
+#define BCME_BUFTOOSHORT -14
+#define BCME_BUFTOOLONG -15
+#define BCME_BUSY -16
+#define BCME_NOTASSOCIATED -17
+#define BCME_BADSSIDLEN -18
+#define BCME_OUTOFRANGECHAN -19
+#define BCME_BADCHAN -20
+#define BCME_BADADDR -21
+#define BCME_NORESOURCE -22
+#define BCME_UNSUPPORTED -23
+#define BCME_BADLEN -24
+#define BCME_NOTREADY -25
+#define BCME_EPERM -26
+#define BCME_NOMEM -27
+#define BCME_ASSOCIATED -28
+#define BCME_RANGE -29
+#define BCME_NOTFOUND -30
+#define BCME_WME_NOT_ENABLED -31
+#define BCME_TSPEC_NOTFOUND -32
+#define BCME_ACM_NOTSUPPORTED -33
+#define BCME_NOT_WME_ASSOCIATION -34
+#define BCME_SDIO_ERROR -35
+#define BCME_DONGLE_DOWN -36
+#define BCME_VERSION -37
+#define BCME_TXFAIL -38
+#define BCME_RXFAIL -39
+#define BCME_NODEVICE -40
+#define BCME_NMODE_DISABLED -41
+#define BCME_NONRESIDENT -42
+#define BCME_LAST BCME_NONRESIDENT
+
+
+#define BCMERRSTRINGTABLE { \
+ "OK", \
+ "Undefined error", \
+ "Bad Argument", \
+ "Bad Option", \
+ "Not up", \
+ "Not down", \
+ "Not AP", \
+ "Not STA", \
+ "Bad Key Index", \
+ "Radio Off", \
+ "Not band locked", \
+ "No clock", \
+ "Bad Rate valueset", \
+ "Bad Band", \
+ "Buffer too short", \
+ "Buffer too long", \
+ "Busy", \
+ "Not Associated", \
+ "Bad SSID len", \
+ "Out of Range Channel", \
+ "Bad Channel", \
+ "Bad Address", \
+ "Not Enough Resources", \
+ "Unsupported", \
+ "Bad length", \
+ "Not Ready", \
+ "Not Permitted", \
+ "No Memory", \
+ "Associated", \
+ "Not In Range", \
+ "Not Found", \
+ "WME Not Enabled", \
+ "TSPEC Not Found", \
+ "ACM Not Supported", \
+ "Not WME Association", \
+ "SDIO Bus Error", \
+ "Dongle Not Accessible", \
+ "Incorrect version", \
+ "TX Failure", \
+ "RX Failure", \
+ "Device Not Present", \
+ "NMODE Disabled", \
+ "Nonresident overlay access", \
+}
+
+#ifndef ABS
+#define ABS(a) (((a) < 0)?-(a):(a))
+#endif
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b))?(a):(b))
+#endif
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b))?(a):(b))
+#endif
+
+#define CEIL(x, y) (((x) + ((y)-1)) / (y))
+#define ROUNDUP(x, y) ((((x)+((y)-1))/(y))*(y))
+#define ISALIGNED(a, x) (((uintptr)(a) & ((x)-1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+ & ~((boundary) - 1))
+#define ISPOWEROF2(x) ((((x)-1)&(x)) == 0)
+#define VALID_MASK(mask) !((mask) & ((mask) + 1))
+#ifndef OFFSETOF
+#define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member)
+#endif
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a) (sizeof(a)/sizeof(a[0]))
+#endif
+
+
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f))
+
+
+#ifndef setbit
+#ifndef NBBY
+#define NBBY 8
+#endif
+#define setbit(a, i) (((uint8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
+#define clrbit(a, i) (((uint8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+#define isset(a, i) (((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
+#define isclr(a, i) ((((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+#endif
+
+#define NBITS(type) (sizeof(type) * 8)
+#define NBITVAL(nbits) (1 << (nbits))
+#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
+#define NBITMASK(nbits) MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
+
+
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+
+#define MODADD(x, y, bound) \
+ MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+ MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+
+#define CRC8_INIT_VALUE 0xff
+#define CRC8_GOOD_VALUE 0x9f
+#define CRC16_INIT_VALUE 0xffff
+#define CRC16_GOOD_VALUE 0xf0b8
+#define CRC32_INIT_VALUE 0xffffffff
+#define CRC32_GOOD_VALUE 0xdebb20e3
+
+
+typedef struct bcm_bit_desc {
+ uint32 bit;
+ const char* name;
+} bcm_bit_desc_t;
+
+
+typedef struct bcm_tlv {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} bcm_tlv_t;
+
+
+#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len))
+
+
+#define ETHER_ADDR_STR_LEN 18
+
+
+
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+ if (
+#ifdef __i386__
+ 1 ||
+#endif
+ (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+
+
+ ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+ ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+ ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+ ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+ } else {
+
+ int k;
+ for (k = 0; k < 16; k++)
+ dst[k] = src1[k] ^ src2[k];
+ }
+}
+
+
+
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+ defined(WLMSG_ASSOC)
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+#endif
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+ defined(WLMSG_ASSOC) || defined(WLMEDIA_PEAKRATE)
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+#endif
+
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, uchar *buf, uint len);
+
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+
+extern const char *bcmerrorstr(int bcmerror);
+
+
+typedef uint32 mbool;
+#define mboolset(mb, bit) ((mb) |= (bit))
+#define mboolclr(mb, bit) ((mb) &= ~(bit))
+#define mboolisset(mb, bit) (((mb) & (bit)) != 0)
+#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
+
+
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+
+
+struct fielddesc {
+ const char *nameandfmt;
+ uint32 offset;
+ uint32 len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(char *name, const uchar *cdata, int len);
+
+typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+ char *buf, uint32 bufsize);
+
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+
+
+#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
+
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmwifi.h b/drivers/net/wireless/bcmdhd/include/bcmwifi.h
new file mode 100644
index 000000000000..45f3c0312dcc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmwifi.h
@@ -0,0 +1,165 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi.h,v 1.29.6.3 2010-08-03 17:47:04 Exp $
+ */
+
+
+#ifndef _bcmwifi_h_
+#define _bcmwifi_h_
+
+
+
+typedef uint16 chanspec_t;
+
+
+#define CH_UPPER_SB 0x01
+#define CH_LOWER_SB 0x02
+#define CH_EWA_VALID 0x04
+#define CH_20MHZ_APART 4
+#define CH_10MHZ_APART 2
+#define CH_5MHZ_APART 1
+#define CH_MAX_2G_CHANNEL 14
+#define WLC_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL
+#define MAXCHANNEL 224
+
+#define WL_CHANSPEC_CHAN_MASK 0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT 0
+
+#define WL_CHANSPEC_CTL_SB_MASK 0x0300
+#define WL_CHANSPEC_CTL_SB_SHIFT 8
+#define WL_CHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_CHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_CHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_CHANSPEC_BW_MASK 0x0C00
+#define WL_CHANSPEC_BW_SHIFT 10
+#define WL_CHANSPEC_BW_10 0x0400
+#define WL_CHANSPEC_BW_20 0x0800
+#define WL_CHANSPEC_BW_40 0x0C00
+
+#define WL_CHANSPEC_BAND_MASK 0xf000
+#define WL_CHANSPEC_BAND_SHIFT 12
+#define WL_CHANSPEC_BAND_5G 0x1000
+#define WL_CHANSPEC_BAND_2G 0x2000
+#define INVCHANSPEC 255
+
+
+#define WF_CHAN_FACTOR_2_4_G 4814
+#define WF_CHAN_FACTOR_5_G 10000
+#define WF_CHAN_FACTOR_4_G 8000
+
+
+#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+ ((channel) + CH_10MHZ_APART) : 0)
+#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+ WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+ ((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+ WL_CHANSPEC_BAND_5G))
+#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
+
+
+#define CHSPEC_CTL_SB(chspec) (chspec & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) (chspec & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec) 0
+#define CHSPEC_IS20(chspec) 1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) 0
+#endif
+
+#else
+
+#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+
+#endif
+
+#define CHSPEC_IS20_UNCOND(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+
+#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_NONE(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
+#define CHSPEC_SB_UPPER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
+#define CHSPEC_SB_LOWER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
+#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \
+ (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
+ (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+#define CHANSPEC_STR_LEN 8
+
+
+#define WLC_MAXRATE 108
+#define WLC_RATE_1M 2
+#define WLC_RATE_2M 4
+#define WLC_RATE_5M5 11
+#define WLC_RATE_11M 22
+#define WLC_RATE_6M 12
+#define WLC_RATE_9M 18
+#define WLC_RATE_12M 24
+#define WLC_RATE_18M 36
+#define WLC_RATE_24M 48
+#define WLC_RATE_36M 72
+#define WLC_RATE_48M 96
+#define WLC_RATE_54M 108
+
+#define WLC_2G_25MHZ_OFFSET 5
+
+
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+
+extern chanspec_t wf_chspec_aton(char *a);
+
+
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
new file mode 100644
index 000000000000..9661dac2603f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
@@ -0,0 +1,129 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhdioctl.h,v 13.11.10.1 2010-12-22 23:47:26 Exp $
+ */
+
+#ifndef _dhdioctl_h_
+#define _dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+ uint cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ bool set; /* get or set request (optional) */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+ uint driver; /* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+ BUS_TYPE_USB = 0, /* for USB dongles */
+ BUS_TYPE_SDIO /* for SDIO dongles */
+};
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC 0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION 1
+
+#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC 0
+#define DHD_GET_VERSION 1
+#define DHD_GET_VAR 2
+#define DHD_SET_VAR 3
+
+/* message levels */
+#define DHD_ERROR_VAL 0x0001
+#define DHD_TRACE_VAL 0x0002
+#define DHD_INFO_VAL 0x0004
+#define DHD_DATA_VAL 0x0008
+#define DHD_CTL_VAL 0x0010
+#define DHD_TIMER_VAL 0x0020
+#define DHD_HDRS_VAL 0x0040
+#define DHD_BYTES_VAL 0x0080
+#define DHD_INTR_VAL 0x0100
+#define DHD_LOG_VAL 0x0200
+#define DHD_GLOM_VAL 0x0400
+#define DHD_EVENT_VAL 0x0800
+#define DHD_BTA_VAL 0x1000
+#define DHD_ISCAN_VAL 0x2000
+#define DHD_ARPOE_VAL 0x4000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+ uint version; /* To allow structure change tracking */
+ uint freq; /* Max ticks between tx/rx attempts */
+ uint count; /* Test packets to send/rcv each attempt */
+ uint print; /* Print counts every <print> attempts */
+ uint total; /* Total packets (or bursts) */
+ uint minlen; /* Minimum length of packets to send */
+ uint maxlen; /* Maximum length of packets to send */
+ uint numsent; /* Count of test packets sent */
+ uint numrcvd; /* Count of test packets received */
+ uint numfail; /* Count of test send failures */
+ uint mode; /* Test mode (type of test packets) */
+ uint stop; /* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO 1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE (-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h
new file mode 100644
index 000000000000..ae1f975bdb64
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: epivers.h.in,v 13.32.4.1 2010-09-17 00:39:18 $
+ *
+*/
+
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define EPI_MAJOR_VERSION 5
+
+#define EPI_MINOR_VERSION 90
+
+#define EPI_RC_NUMBER 125
+
+#define EPI_INCREMENTAL_NUMBER 94
+
+#define EPI_BUILD_NUMBER 0
+
+#define EPI_VERSION 5, 90, 125, 94
+
+#define EPI_VERSION_NUM 0x055a7d5e
+
+#define EPI_VERSION_DEV 5.90.125
+
+
+#define EPI_VERSION_STR "5.90.125.94"
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h
new file mode 100644
index 000000000000..51c51b9734a2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h
@@ -0,0 +1,34 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.h,v 13.35.8.5 2011-02-11 00:56:32 Exp $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h
new file mode 100644
index 000000000000..8b9615c35f35
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h
@@ -0,0 +1,88 @@
+/*
+ * HNDRTE arm trap handling.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_armtrap.h,v 13.4.14.1 2011-02-05 00:04:30 Exp $
+ */
+
+#ifndef _hndrte_armtrap_h
+#define _hndrte_armtrap_h
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define TRAP_STRIDE 4
+#define FIRST_TRAP TR_RST
+#define LAST_TRAP (TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define MAX_TRAP_TYPE (TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
+#endif /* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define TR_TYPE 0x00
+#define TR_EPC 0x04
+#define TR_CPSR 0x08
+#define TR_SPSR 0x0c
+#define TR_REGS 0x10
+#define TR_REG(n) (TR_REGS + (n) * 4)
+#define TR_SP TR_REG(13)
+#define TR_LR TR_REG(14)
+#define TR_PC TR_REG(15)
+
+#define TRAP_T_SIZE 80
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+ uint32 type;
+ uint32 epc;
+ uint32 cpsr;
+ uint32 spsr;
+ uint32 r0;
+ uint32 r1;
+ uint32 r2;
+ uint32 r3;
+ uint32 r4;
+ uint32 r5;
+ uint32 r6;
+ uint32 r7;
+ uint32 r8;
+ uint32 r9;
+ uint32 r10;
+ uint32 r11;
+ uint32 r12;
+ uint32 r13;
+ uint32 r14;
+ uint32 pc;
+} trap_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY */
+
+#endif /* _hndrte_armtrap_h */
diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_cons.h b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h
new file mode 100644
index 000000000000..b9ede53af701
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h
@@ -0,0 +1,68 @@
+/*
+ * Console support for hndrte.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_cons.h,v 13.4.10.4 2011-02-05 00:08:20 Exp $
+ */
+
+#ifndef _HNDRTE_CONS_H
+#define _HNDRTE_CONS_H
+
+#include <typedefs.h>
+
+#define CBUF_LEN (128)
+
+#define LOG_BUF_LEN 1024
+
+typedef struct {
+ uint32 buf; /* Can't be pointer on (64-bit) hosts */
+ uint buf_size;
+ uint idx;
+ char *_buf_compat; /* redundant pointer for backward compat. */
+} hndrte_log_t;
+
+typedef struct {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn), the host should write a complete
+ * input line directly into cbuf and then write the length into vcons_in.
+ * This may also be used when there is a real UART (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ volatile uint vcons_in;
+ volatile uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host polls.
+ */
+ hndrte_log_t log;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf until <CR> is received, then
+ * the buffer is processed as a command line. Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+} hndrte_cons_t;
+
+#endif /* _HNDRTE_CONS_H */
diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h
new file mode 100644
index 000000000000..4e26121c3881
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h
@@ -0,0 +1,207 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndsoc.h,v 13.11 2009-12-03 23:52:31 Exp $
+ */
+
+#ifndef _HNDSOC_H
+#define _HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */
+#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ (64 * 1024 * 1024)
+#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */
+
+#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
+
+#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */
+#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */
+#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software
+ * convenience and could be changed if we
+ * make any larger chips
+ */
+
+#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */
+#define SI_FASTRAM_SWAPPED 0x19800000
+
+#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */
+#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */
+#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */
+#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */
+#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */
+#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */
+#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */
+#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */
+#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
+#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
+
+#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), low 32 bits
+ */
+#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+
+/* core codes */
+#define NODEV_CORE_ID 0x700 /* Invalid coreid */
+#define CC_CORE_ID 0x800 /* chipcommon core */
+#define ILINE20_CORE_ID 0x801 /* iline20 core */
+#define SRAM_CORE_ID 0x802 /* sram core */
+#define SDRAM_CORE_ID 0x803 /* sdram core */
+#define PCI_CORE_ID 0x804 /* pci core */
+#define MIPS_CORE_ID 0x805 /* mips core */
+#define ENET_CORE_ID 0x806 /* enet mac core */
+#define CODEC_CORE_ID 0x807 /* v90 codec core */
+#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
+#define ADSL_CORE_ID 0x809 /* ADSL core */
+#define ILINE100_CORE_ID 0x80a /* iline100 core */
+#define IPSEC_CORE_ID 0x80b /* ipsec core */
+#define UTOPIA_CORE_ID 0x80c /* utopia core */
+#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
+#define SOCRAM_CORE_ID 0x80e /* internal memory core */
+#define MEMC_CORE_ID 0x80f /* memc sdram core */
+#define OFDM_CORE_ID 0x810 /* OFDM phy core */
+#define EXTIF_CORE_ID 0x811 /* external interface core */
+#define D11_CORE_ID 0x812 /* 802.11 MAC core */
+#define APHY_CORE_ID 0x813 /* 802.11a phy core */
+#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
+#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
+#define MIPS33_CORE_ID 0x816 /* mips3302 core */
+#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
+#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
+#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
+#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
+#define SDIOH_CORE_ID 0x81b /* sdio host core */
+#define ROBO_CORE_ID 0x81c /* roboswitch core */
+#define ATA100_CORE_ID 0x81d /* parallel ATA core */
+#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
+#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
+#define PCIE_CORE_ID 0x820 /* pci express core */
+#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
+#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
+#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
+#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
+#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
+#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
+#define PMU_CORE_ID 0x827 /* PMU core */
+#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
+#define SDIOD_CORE_ID 0x829 /* SDIO device core */
+#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
+#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
+#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
+#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
+#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
+#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
+#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
+#define SC_CORE_ID 0x831 /* shared common core */
+#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
+#define SPIH_CORE_ID 0x833 /* SPI host core */
+#define I2S_CORE_ID 0x834 /* I2S core */
+#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
+#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
+#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all
+ * unused address ranges
+ */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define SI_CC_IDX 0
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+#define SOCI_UBUS 2
+
+/* Common core control flags */
+#define SICF_BIST_EN 0x8000
+#define SICF_PME_EN 0x4000
+#define SICF_CORE_BITS 0x3ffc
+#define SICF_FGC 0x0002
+#define SICF_CLOCK_EN 0x0001
+
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
+
+/* clk_ctl_st register */
+#define CCS_FORCEALP 0x00000001 /* force ALP request */
+#define CCS_FORCEHT 0x00000002 /* force HT request */
+#define CCS_FORCEILP 0x00000004 /* force ILP request */
+#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */
+#define CCS_HTAREQ 0x00000010 /* HT Avail Request */
+#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
+#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT 8
+#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
+#define CCS_HTAVAIL 0x00020000 /* HT is available */
+#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */
+#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */
+#define CCS_ERSRC_STS_SHIFT 24
+
+#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */
+#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size */
+#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */
+#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */
+#define BISZ_MAGIC_IDX 0 /* Word 0: magic */
+#define BISZ_TXTST_IDX 1 /* 1: text start */
+#define BISZ_TXTEND_IDX 2 /* 2: text end */
+#define BISZ_DATAST_IDX 3 /* 3: data start */
+#define BISZ_DATAEND_IDX 4 /* 4: data end */
+#define BISZ_BSSST_IDX 5 /* 5: bss start */
+#define BISZ_BSSEND_IDX 6 /* 6: bss end */
+#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/htsf.h b/drivers/net/wireless/bcmdhd/include/htsf.h
new file mode 100644
index 000000000000..379fbbe37dbc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/htsf.h
@@ -0,0 +1,74 @@
+/*
+ * Time stamps for latency measurements
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: htsf.h,v 1.1.2.4 2011-01-21 08:27:03 Exp $
+ */
+#ifndef _HTSF_H_
+#define _HTSF_H_
+
+#define HTSFMAGIC 0xCDCDABAB /* in network order for tcpdump */
+#define HTSFENDMAGIC 0xEFEFABAB /* to distinguish from RT2 magic */
+#define HTSF_HOSTOFFSET 102
+#define HTSF_DNGLOFFSET HTSF_HOSTOFFSET - 4
+#define HTSF_DNGLOFFSET2 HTSF_HOSTOFFSET + 106
+#define HTSF_MIN_PKTLEN 200
+#define ETHER_TYPE_BRCM_PKTDLYSTATS 0x886d
+
+typedef enum htsfts_type {
+ T10,
+ T20,
+ T30,
+ T40,
+ T50,
+ T60,
+ T70,
+ T80,
+ T90,
+ TA0,
+ TE0
+} htsf_timestamp_t;
+
+typedef struct {
+ uint32 magic;
+ uint32 prio;
+ uint32 seqnum;
+ uint32 misc;
+ uint32 c10;
+ uint32 t10;
+ uint32 c20;
+ uint32 t20;
+ uint32 t30;
+ uint32 t40;
+ uint32 t50;
+ uint32 t60;
+ uint32 t70;
+ uint32 t80;
+ uint32 t90;
+ uint32 cA0;
+ uint32 tA0;
+ uint32 cE0;
+ uint32 tE0;
+ uint32 endmagic;
+} htsfts_t;
+
+#endif /* _HTSF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h
new file mode 100644
index 000000000000..1ec136eb70dc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h
@@ -0,0 +1,431 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.h,v 13.158.6.3 2010-12-22 23:47:26 Exp $
+ */
+
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+
+
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+
+
+#ifdef BCMDRIVER
+
+
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+extern void osl_detach(osl_t *osh);
+
+
+extern uint32 g_assert_type;
+
+
+#if defined(BCMASSERT_LOG)
+ #define ASSERT(exp) \
+ do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(char *exp, char *file, int line);
+#else
+ #ifdef __GNUC__
+ #define GCC_VERSION \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+ #if GCC_VERSION > 30100
+ #define ASSERT(exp) do {} while (0)
+ #else
+
+ #define ASSERT(exp)
+ #endif
+ #endif
+#endif
+
+
+#define OSL_DELAY(usec) osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+ osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+ osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+
+#define OSL_PCI_READ_CONFIG(osh, offset, size) \
+ osl_pci_read_config((osh), (offset), (size))
+#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+ osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+
+#define OSL_PCI_BUS(osh) osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+
+
+typedef struct {
+ bool pkttag;
+ uint pktalloced;
+ bool mmbus;
+ pktfree_cb_fn_t tx_fn;
+ void *tx_ctx;
+} osl_pubinfo_t;
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
+ do { \
+ ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \
+ ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \
+ } while (0)
+
+
+
+#define BUS_SWAP32(v) (v)
+
+ #define MALLOC(osh, size) osl_malloc((osh), (size))
+ #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size))
+ #define MALLOCED(osh) osl_malloced((osh))
+ extern void *osl_malloc(osl_t *osh, uint size);
+ extern void osl_mfree(osl_t *osh, void *addr, uint size);
+ extern uint osl_malloced(osl_t *osh);
+
+#define NATIVE_MALLOC(osh, size) kmalloc(size, GFP_ATOMIC)
+#define NATIVE_MFREE(osh, addr, size) kfree(addr)
+
+#define MALLOC_FAILED(osh) osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+
+#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align()
+#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+ osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, ulong *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa);
+
+
+#define DMA_TX 1
+#define DMA_RX 2
+
+
+#define DMA_MAP(osh, va, size, direction, p, dmah) \
+ osl_dma_map((osh), (va), (size), (direction))
+#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+ osl_dma_unmap((osh), (pa), (size), (direction))
+extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+
+#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
+
+
+ #include <bcmsdh.h>
+ #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v)))
+ #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r))))
+
+ #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+ mmap_op else bus_op
+ #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+ mmap_op : bus_op
+
+#define OSL_ERROR(bcmerror) osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+
+#define PKTBUFSZ 2048
+
+
+
+#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
+#define printf(fmt, args...) printk(fmt , ## args)
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+
+
+#ifndef __mips__
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(uint8) ? readb((volatile uint8*)(r)) : \
+ sizeof(*(r)) == sizeof(uint16) ? readw((volatile uint16*)(r)) : \
+ readl((volatile uint32*)(r)), OSL_READ_REG(osh, r)) \
+)
+#else
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ __asm__ __volatile__("sync"); \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)(r)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)(r)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ } \
+ __asm__ __volatile__("sync"); \
+ __osl_v; \
+ }), \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ __asm__ __volatile__("sync"); \
+ __osl_v = OSL_READ_REG(osh, r); \
+ __asm__ __volatile__("sync"); \
+ __osl_v; \
+ })) \
+)
+#endif
+
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+
+
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+
+
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+
+#ifdef __mips__
+#include <asm/addrspace.h>
+#define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va)))
+#define OSL_CACHED(va) ((void *)KSEG0ADDR((va)))
+#else
+#define OSL_UNCACHED(va) ((void *)va)
+#define OSL_CACHED(va) ((void *)va)
+#endif
+
+
+#if defined(__i386__)
+#define OSL_GETCYCLES(x) rdtscl((x))
+#else
+#define OSL_GETCYCLES(x) ((x) = 0)
+#endif
+
+
+#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
+
+
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size) (void *)(0)
+#endif
+#define REG_UNMAP(va) iounmap((va))
+
+
+#define R_SM(r) *(r)
+#define W_SM(r, v) (*(r) = (v))
+#define BZERO_SM(r, len) memset((r), '\0', (len))
+
+
+#include <linuxver.h>
+
+
+#define PKTGET(osh, len, send) osl_pktget((osh), (len))
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#define PKTLIST_DUMP(osh, buf)
+#define PKTDBG_TRACE(osh, pkt, bit)
+#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
+#ifdef DHD_USE_STATIC_BUF
+#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
+#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
+#endif
+#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data)
+#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len)
+#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail))
+#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next)
+#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x))
+#define PKTSETLEN(osh, skb, len) __skb_trim((struct sk_buff*)(skb), (len))
+#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes))
+#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes))
+#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTALLOCED(osh) ((osl_pubinfo_t *)(osh))->pktalloced
+#define PKTSETPOOL(osh, skb, x, y) do {} while (0)
+#define PKTPOOL(osh, skb) FALSE
+#define PKTSHRINK(osh, m) (m)
+
+#ifdef CTFPOOL
+#define CTFPOOL_REFILL_THRESH 3
+typedef struct ctfpool {
+ void *head;
+ spinlock_t lock;
+ uint max_obj;
+ uint curr_obj;
+ uint obj_size;
+ uint refills;
+ uint fast_allocs;
+ uint fast_frees;
+ uint slow_allocs;
+} ctfpool_t;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define FASTBUF (1 << 4)
+#define CTFBUF (1 << 5)
+#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF)
+#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF))
+#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= CTFBUF)
+#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~CTFBUF))
+#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & FASTBUF)
+#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & CTFBUF)
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len)
+#else
+#define FASTBUF (1 << 0)
+#define CTFBUF (1 << 1)
+#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= FASTBUF)
+#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF))
+#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= CTFBUF)
+#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~CTFBUF))
+#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) & FASTBUF)
+#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) & CTFBUF)
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused)
+#endif
+
+#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->sk)
+#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#endif
+
+#ifdef HNDCTF
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define SKIPCT (1 << 6)
+#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len |= SKIPCT)
+#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT))
+#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len & SKIPCT)
+#else
+#define SKIPCT (1 << 2)
+#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused |= SKIPCT)
+#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused &= (~SKIPCT))
+#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused & SKIPCT)
+#endif
+#else
+#define PKTSETSKIPCT(osh, skb)
+#define PKTCLRSKIPCT(osh, skb)
+#define PKTSKIPCT(osh, skb)
+#endif
+
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+
+
+static INLINE void *
+osl_pkt_frmnative(osl_pubinfo_t *osh, void *pkt)
+{
+ struct sk_buff *nskb;
+
+ if (osh->pkttag)
+ bzero((void*)((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ);
+
+
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ osh->pktalloced++;
+ }
+
+ return (void *)pkt;
+}
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_pubinfo_t *)osh), (struct sk_buff*)(skb))
+
+
+static INLINE struct sk_buff *
+osl_pkt_tonative(osl_pubinfo_t *osh, void *pkt)
+{
+ struct sk_buff *nskb;
+
+ if (osh->pkttag)
+ bzero(((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ);
+
+
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ osh->pktalloced--;
+ }
+
+ return (struct sk_buff *)pkt;
+}
+#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_pubinfo_t *)(osh), (pkt))
+
+#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
+#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
+#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
+ ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+
+#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
+
+
+
+#else
+
+
+
+ #define ASSERT(exp) do {} while (0)
+
+
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+
+#include <string.h>
+
+
+#include <stdio.h>
+
+
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h
new file mode 100644
index 000000000000..96844db2f059
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linuxver.h
@@ -0,0 +1,593 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linuxver.h,v 13.53.2.2 2010-12-22 23:47:26 Exp $
+ */
+
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#endif
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+ MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/semaphore.h>
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
+#else
+#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
+typedef void (*work_func_t)(void *work);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef CONFIG_NET_RADIO
+#define CONFIG_WIRELESS_EXT
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#define __devinit __init
+#endif
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x) x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev) (dev)->sysdata
+#define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
+
+
+
+struct pci_device_id {
+ unsigned int vendor, device;
+ unsigned int subvendor, subdevice;
+ unsigned int class, class_mask;
+ unsigned long driver_data;
+};
+
+struct pci_driver {
+ struct list_head node;
+ char *name;
+ const struct pci_device_id *id_table;
+ int (*probe)(struct pci_dev *dev,
+ const struct pci_device_id *id);
+ void (*remove)(struct pci_dev *dev);
+ void (*suspend)(struct pci_dev *dev);
+ void (*resume)(struct pci_dev *dev);
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x) __initcall(x);
+#define module_exit(x) __exitcall(x);
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL_INPUT)
+#define WL_CONFIG_RFKILL_INPUT
+#else
+#undef WL_CONFIG_RFKILL_INPUT
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+
+
+#ifndef PCI_DMA_TODEVICE
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#endif
+
+typedef u32 dma_addr_t;
+
+
+static inline int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC | GFP_DMA;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a) dev_kfree_skb(a)
+#define netif_down(dev) do { (dev)->start = 0; } while (0)
+
+
+#ifndef _COMPAT_NETDEVICE_H
+
+
+
+#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+ do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+}
+
+#define netif_queue_stopped(dev) (dev)->tbusy
+#define netif_running(dev) (dev)->start
+
+#endif
+
+#define netif_device_attach(dev) netif_start_queue(dev)
+#define netif_device_detach(dev) netif_stop_queue(dev)
+
+
+#define tasklet_struct tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+ queue_task(tasklet, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+ void (*func)(unsigned long),
+ unsigned long data)
+{
+ tasklet->next = NULL;
+ tasklet->sync = 0;
+ tasklet->routine = (void (*)(void *))func;
+ tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet) { do {} while (0); }
+
+
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+
+#define PREPARE_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+
+
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ PREPARE_TQUEUE((_tq), (_routine), (_data)); \
+ } while (0)
+
+#endif
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define PCI_SAVE_STATE(a, b) pci_save_state(a)
+#define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
+#else
+#define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
+#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(dev, i * 4, &buffer[i]);
+ }
+ return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(dev, i * 4, buffer[i]);
+ }
+
+ else {
+ for (i = 0; i < 6; i ++)
+ pci_write_config_dword(dev,
+ PCI_BASE_ADDRESS_0 + (i * 4),
+ pci_resource_start(dev, i));
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ }
+ return 0;
+}
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#else
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev) do {} while (0)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev) kfree(dev)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#define af_packet_priv data
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+ void *parent;
+ struct task_struct *p_task;
+ long thr_pid;
+ int prio;
+ struct semaphore sema;
+ bool terminated;
+ struct completion completed;
+} tsk_ctl_t;
+
+
+
+
+#ifdef DHD_DEBUG
+#define DBG_THR(x) printk x
+#else
+#define DBG_THR(x)
+#endif
+
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags) \
+{ \
+ sema_init(&((tsk_ctl)->sema), 0); \
+ init_completion(&((tsk_ctl)->completed)); \
+ (tsk_ctl)->parent = owner; \
+ (tsk_ctl)->terminated = FALSE; \
+ (tsk_ctl)->thr_pid = kernel_thread(thread_func, tsk_ctl, flags); \
+ if ((tsk_ctl)->thr_pid > 0) \
+ wait_for_completion(&((tsk_ctl)->completed)); \
+ DBG_THR(("%s thr:%lx started\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
+}
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+ (tsk_ctl)->terminated = TRUE; \
+ smp_wmb(); \
+ up(&((tsk_ctl)->sema)); \
+ wait_for_completion(&((tsk_ctl)->completed)); \
+ DBG_THR(("%s thr:%lx terminated OK\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
+ (tsk_ctl)->thr_pid = -1; \
+}
+
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid; \
+pid = find_get_pid((pid_t)nr); \
+tsk = pid_task(pid, PIDTYPE_PID); \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+ KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+ struct task_struct *tsk; \
+ tsk = find_task_by_vpid(pid); \
+ if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+ kill_proc(pid, sig, 1); \
+}
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (!signal_pending(current)) { \
+ ret = schedule_timeout(ret); \
+ if (!ret) \
+ break; \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!(condition)) \
+ __wait_event_interruptible_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#define WL_DEV_IF(dev) ((wl_if_t*)netdev_priv(dev))
+#else
+#define WL_DEV_IF(dev) ((wl_if_t*)(dev)->priv)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p) wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p) wl_isr((i), (d), (p))
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h
new file mode 100644
index 000000000000..f468420f5346
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/miniopt.h
@@ -0,0 +1,77 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.h,v 1.3 2009-01-15 00:06:54 Exp $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY 128 /* Max options */
+typedef struct miniopt {
+
+ /* These are persistent after miniopt_init() */
+ const char* name; /* name for prompt in error strings */
+ const char* flags; /* option chars that take no args */
+ bool longflags; /* long options may be flags */
+ bool opt_end; /* at end of options (passed a "--") */
+
+ /* These are per-call to miniopt() */
+
+ int consumed; /* number of argv entries cosumed in
+ * the most recent call to miniopt()
+ */
+ bool positional;
+ bool good_int; /* 'val' member is the result of a sucessful
+ * strtol conversion of the option value
+ */
+ char opt;
+ char key[MINIOPT_MAXKEY];
+ char* valstr; /* positional param, or value for the option,
+ * or null if the option had
+ * no accompanying value
+ */
+ uint uval; /* strtol translation of valstr */
+ int val; /* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* MINI_OPT_H */
diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h
new file mode 100644
index 000000000000..721d42100f2a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h
@@ -0,0 +1,74 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: msgtrace.h,v 1.4 2009-04-10 04:15:32 Exp $
+ */
+
+#ifndef _MSGTRACE_H
+#define _MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+ uint8 version;
+ uint8 spare;
+ uint16 len; /* Len of the trace */
+ uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost
+ * because of DMA error or a bus reset (ex: SDIO Func2)
+ */
+ uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */
+ uint32 discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces.
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+ uint16 hdrlen, uint8 *buf, uint16 buflen);
+extern void msgtrace_start(void);
+extern void msgtrace_stop(void);
+extern void msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+extern bool msgtrace_event_enabled(void);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h
new file mode 100644
index 000000000000..80248ee7604e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl.h
@@ -0,0 +1,66 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: osl.h,v 13.44.96.1 2010-05-20 11:09:18 Exp $
+ */
+
+
+#ifndef _osl_h_
+#define _osl_h_
+
+
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#define OSL_PKTTAG_SZ 32
+
+
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+
+#include <linux_osl.h>
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit)
+#endif
+
+
+
+#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
new file mode 100644
index 000000000000..5d4a87678071
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
@@ -0,0 +1,54 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_end.h,v 1.4 2008-12-09 23:43:22 Exp $
+ */
+
+
+
+
+#ifdef BWL_PACKED_SECTION
+ #undef BWL_PACKED_SECTION
+#else
+ #error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+
+#undef BWL_PRE_PACKED_STRUCT
+#undef BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
new file mode 100644
index 000000000000..da2fed68afac
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
@@ -0,0 +1,61 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_start.h,v 1.4.124.1 2010-09-17 00:47:03 Exp $
+ */
+
+
+
+
+#ifdef BWL_PACKED_SECTION
+ #error "BWL_PACKED_SECTION is already defined!"
+#else
+ #define BWL_PACKED_SECTION
+#endif
+
+
+
+
+
+#if defined(__GNUC__)
+ #define BWL_PRE_PACKED_STRUCT
+ #define BWL_POST_PACKED_STRUCT __attribute__ ((packed))
+#elif defined(__CC_ARM)
+ #define BWL_PRE_PACKED_STRUCT __packed
+ #define BWL_POST_PACKED_STRUCT
+#else
+ #error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h
new file mode 100644
index 000000000000..fae063a72f18
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h
@@ -0,0 +1,52 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcicfg.h,v 1.50 2009-12-07 21:56:06 Exp $
+ */
+
+
+#ifndef _h_pcicfg_
+#define _h_pcicfg_
+
+
+#define PCI_CFG_VID 0
+#define PCI_CFG_CMD 4
+#define PCI_CFG_REV 8
+#define PCI_CFG_BAR0 0x10
+#define PCI_CFG_BAR1 0x14
+#define PCI_BAR0_WIN 0x80
+#define PCI_INT_STATUS 0x90
+#define PCI_INT_MASK 0x94
+
+#define PCIE_EXTCFG_OFFSET 0x100
+#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
+#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
+
+#define PCI_BAR0_WINSZ (16 * 1024)
+
+
+#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
+#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
+#define PCI_16KBB0_WINSZ (16 * 1024)
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
new file mode 100644
index 000000000000..2342cb383147
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
@@ -0,0 +1,1731 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h,v 9.260.2.6 2010-12-15 21:41:14 Exp $
+ */
+
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US 1024
+
+
+#define DOT11_A3_HDR_LEN 24
+#define DOT11_A4_HDR_LEN 30
+#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN
+#define DOT11_FCS_LEN 4
+#define DOT11_ICV_LEN 4
+#define DOT11_ICV_AES_LEN 8
+#define DOT11_QOS_LEN 2
+#define DOT11_HTC_LEN 4
+
+#define DOT11_KEY_INDEX_SHIFT 6
+#define DOT11_IV_LEN 4
+#define DOT11_IV_TKIP_LEN 8
+#define DOT11_IV_AES_OCB_LEN 4
+#define DOT11_IV_AES_CCM_LEN 8
+#define DOT11_IV_MAX_LEN 8
+
+
+#define DOT11_MAX_MPDU_BODY_LEN 2304
+
+#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \
+ DOT11_QOS_LEN + \
+ DOT11_IV_AES_CCM_LEN + \
+ DOT11_MAX_MPDU_BODY_LEN + \
+ DOT11_ICV_LEN + \
+ DOT11_FCS_LEN)
+
+#define DOT11_MAX_SSID_LEN 32
+
+
+#define DOT11_DEFAULT_RTS_LEN 2347
+#define DOT11_MAX_RTS_LEN 2347
+
+
+#define DOT11_MIN_FRAG_LEN 256
+#define DOT11_MAX_FRAG_LEN 2346
+#define DOT11_DEFAULT_FRAG_LEN 2346
+
+
+#define DOT11_MIN_BEACON_PERIOD 1
+#define DOT11_MAX_BEACON_PERIOD 0xFFFF
+
+
+#define DOT11_MIN_DTIM_PERIOD 1
+#define DOT11_MAX_DTIM_PERIOD 0xFF
+
+
+#define DOT11_LLC_SNAP_HDR_LEN 8
+#define DOT11_OUI_LEN 3
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+ uint8 dsap;
+ uint8 ssap;
+ uint8 ctl;
+ uint8 oui[DOT11_OUI_LEN];
+ uint16 type;
+} BWL_POST_PACKED_STRUCT;
+
+
+#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr a1;
+ struct ether_addr a2;
+ struct ether_addr a3;
+ uint16 seq;
+ struct ether_addr a4;
+} BWL_POST_PACKED_STRUCT;
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_RTS_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTS_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACK_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr bssid;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_PS_POLL_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr bssid;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CS_END_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+#define DOT11_ACTION_VS_HDR_LEN 6
+
+#define BCM_ACTION_OUI_BYTE0 0x00
+#define BCM_ACTION_OUI_BYTE1 0x90
+#define BCM_ACTION_OUI_BYTE2 0x4c
+
+
+#define DOT11_BA_CTL_POLICY_NORMAL 0x0000
+#define DOT11_BA_CTL_POLICY_NOACK 0x0001
+#define DOT11_BA_CTL_POLICY_MASK 0x0001
+
+#define DOT11_BA_CTL_MTID 0x0002
+#define DOT11_BA_CTL_COMPRESSED 0x0004
+
+#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0
+#define DOT11_BA_CTL_NUMMSDU_SHIFT 6
+
+#define DOT11_BA_CTL_TID_MASK 0xF000
+#define DOT11_BA_CTL_TID_SHIFT 12
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+ uint16 bar_control;
+ uint16 seqnum;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN 4
+
+#define DOT11_BA_BITMAP_LEN 128
+#define DOT11_BA_CMP_BITMAP_LEN 8
+
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+ uint16 ba_control;
+ uint16 seqnum;
+ uint8 bitmap[DOT11_BA_BITMAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr da;
+ struct ether_addr sa;
+ struct ether_addr bssid;
+ uint16 seq;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_HDR_LEN 24
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+ uint32 timestamp[2];
+ uint16 beacon_interval;
+ uint16 capability;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BCN_PRB_LEN 12
+#define DOT11_BCN_PRB_FIXED_LEN 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+ uint16 alg;
+ uint16 seq;
+ uint16 status;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN 6
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+ uint16 capability;
+ uint16 listen;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+ uint16 capability;
+ uint16 listen;
+ struct ether_addr ap;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+ uint16 capability;
+ uint16 status;
+ uint16 aid;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN 6
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+ uint8 category;
+ uint8 action;
+ uint8 ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+ uint8 category;
+ uint8 action;
+ uint8 control;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE 1
+#define SM_PWRSAVE_MODE 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+ uint8 id;
+ uint8 len;
+ uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+ uint8 min;
+ uint8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+ uint8 id;
+ uint8 len;
+ uint8 tx_pwr;
+ uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN 2
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+ uint8 id;
+ uint8 len;
+ uint8 first_channel;
+ uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+ uint8 id;
+ uint8 len;
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN 5
+#define BRCM_EXTCH_IE_TYPE 53
+#define DOT11_EXTCH_IE_LEN 1
+#define DOT11_EXT_CH_MASK 0x03
+#define DOT11_EXT_CH_UPPER 0x01
+#define DOT11_EXT_CH_LOWER 0x03
+#define DOT11_EXT_CH_NONE 0x00
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+ uint8 category;
+ uint8 action;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+ uint8 id;
+ uint8 len;
+ uint8 mode;
+ uint8 channel;
+ uint8 count;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN 3
+
+#define DOT11_CSA_MODE_ADVISORY 0
+#define DOT11_CSA_MODE_NO_TX 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+ uint8 category;
+ uint8 action;
+ dot11_chan_switch_ie_t chan_switch_ie;
+ dot11_brcm_extch_ie_t extch_ie;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+ uint8 mode;
+ uint8 reg;
+ uint8 channel;
+ uint8 count;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+ uint8 id;
+ uint8 len;
+ struct dot11_csa_body b;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ dot11_ext_csa_ie_t chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ struct dot11_csa_body b;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+ uint8 id;
+ uint8 len;
+ uint8 info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN 1
+
+#define DOT11_OBSS_COEX_INFO_REQ 0x01
+#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02
+#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+ uint8 id;
+ uint8 len;
+ uint8 regclass;
+ uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+#define DOT11_EXTCAP_LEN 1
+
+
+
+#define DOT11_MEASURE_TYPE_BASIC 0
+#define DOT11_MEASURE_TYPE_CCA 1
+#define DOT11_MEASURE_TYPE_RPI 2
+#define DOT11_MEASURE_TYPE_CHLOAD 3
+#define DOT11_MEASURE_TYPE_NOISE 4
+#define DOT11_MEASURE_TYPE_BEACON 5
+#define DOT11_MEASURE_TYPE_FRAME 6
+#define DOT11_MEASURE_TYPE_STATS 7
+#define DOT11_MEASURE_TYPE_LCI 8
+#define DOT11_MEASURE_TYPE_TXSTREAM 9
+#define DOT11_MEASURE_TYPE_PAUSE 255
+
+
+#define DOT11_MEASURE_MODE_PARALLEL (1<<0)
+#define DOT11_MEASURE_MODE_ENABLE (1<<1)
+#define DOT11_MEASURE_MODE_REQUEST (1<<2)
+#define DOT11_MEASURE_MODE_REPORT (1<<3)
+#define DOT11_MEASURE_MODE_DUR (1<<4)
+
+#define DOT11_MEASURE_MODE_LATE (1<<0)
+#define DOT11_MEASURE_MODE_INCAPABLE (1<<1)
+#define DOT11_MEASURE_MODE_REFUSED (1<<2)
+
+#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0))
+#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1))
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2))
+#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3))
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4))
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14
+
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ BWL_PRE_PACKED_STRUCT union
+ {
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+ } BWL_POST_PACKED_STRUCT basic;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+
+#define DOT11_MNG_IE_MREP_FIXED_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+ uint8 id;
+ uint8 len;
+ uint8 count;
+ uint8 period;
+ uint16 duration;
+ uint16 offset;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+ uint8 channel;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+ uint8 id;
+ uint8 len;
+ uint8 eaddr[ETHER_ADDR_LEN];
+ uint8 interval;
+ chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+
+#define WME_OUI "\x00\x50\xf2"
+#define WME_OUI_LEN 3
+#define WME_OUI_TYPE 2
+#define WME_VER 1
+#define WME_TYPE 2
+#define WME_SUBTYPE_IE 0
+#define WME_SUBTYPE_PARAM_IE 1
+#define WME_SUBTYPE_TSPEC 2
+
+
+#define AC_BE 0
+#define AC_BK 1
+#define AC_VI 2
+#define AC_VO 3
+#define AC_COUNT 4
+
+typedef uint8 ac_bitmap_t;
+
+#define AC_BITMAP_NONE 0x0
+#define AC_BITMAP_ALL 0xf
+#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+ uint8 ACI;
+ uint8 ECW;
+ uint16 TXOP;
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN 24
+
+
+#define WME_QI_AP_APSD_MASK 0x80
+#define WME_QI_AP_APSD_SHIFT 7
+#define WME_QI_AP_COUNT_MASK 0x0f
+#define WME_QI_AP_COUNT_SHIFT 0
+
+
+#define WME_QI_STA_MAXSPLEN_MASK 0x60
+#define WME_QI_STA_MAXSPLEN_SHIFT 5
+#define WME_QI_STA_APSD_ALL_MASK 0xf
+#define WME_QI_STA_APSD_ALL_SHIFT 0
+#define WME_QI_STA_APSD_BE_MASK 0x8
+#define WME_QI_STA_APSD_BE_SHIFT 3
+#define WME_QI_STA_APSD_BK_MASK 0x4
+#define WME_QI_STA_APSD_BK_SHIFT 2
+#define WME_QI_STA_APSD_VI_MASK 0x2
+#define WME_QI_STA_APSD_VI_SHIFT 1
+#define WME_QI_STA_APSD_VO_MASK 0x1
+#define WME_QI_STA_APSD_VO_SHIFT 0
+
+
+#define EDCF_AIFSN_MIN 1
+#define EDCF_AIFSN_MAX 15
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_ACM_MASK 0x10
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_AIFSN_SHIFT 12
+
+
+#define EDCF_ECW_MIN 0
+#define EDCF_ECW_MAX 15
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_MASK 0xf0
+#define EDCF_ECWMAX_SHIFT 4
+
+
+#define EDCF_TXOP_MIN 0
+#define EDCF_TXOP_MAX 65535
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+
+
+#define NON_EDCF_AC_BE_ACI_STA 0x02
+
+
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VO_TXOP_STA 0x002f
+
+
+#define EDCF_AC_BE_ACI_AP 0x03
+#define EDCF_AC_BE_ECW_AP 0x64
+#define EDCF_AC_BE_TXOP_AP 0x0000
+#define EDCF_AC_BK_ACI_AP 0x27
+#define EDCF_AC_BK_ECW_AP 0xA4
+#define EDCF_AC_BK_TXOP_AP 0x0000
+#define EDCF_AC_VI_ACI_AP 0x41
+#define EDCF_AC_VI_ECW_AP 0x43
+#define EDCF_AC_VI_TXOP_AP 0x005e
+#define EDCF_AC_VO_ACI_AP 0x61
+#define EDCF_AC_VO_ECW_AP 0x32
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+ uint8 id;
+ uint8 length;
+ uint16 station_count;
+ uint8 channel_utilization;
+ uint16 aac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+
+
+#define FIXED_MSDU_SIZE 0x8000
+#define MSDU_SIZE_MASK 0x7fff
+
+
+
+#define INTEGER_SHIFT 13
+#define FRACTION_MASK 0x1FFF
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 status;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4
+
+
+#define WME_ADDTS_REQUEST 0
+#define WME_ADDTS_RESPONSE 1
+#define WME_DELTS_REQUEST 2
+
+
+#define WME_ADMISSION_ACCEPTED 0
+#define WME_INVALID_PARAMETERS 1
+#define WME_ADMISSION_REFUSED 3
+
+
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+
+#define DOT11_OPEN_SYSTEM 0
+#define DOT11_SHARED_KEY 1
+#define DOT11_OPEN_SHARED 2
+#define DOT11_FAST_BSS 3
+#define DOT11_CHALLENGE_LEN 128
+
+
+#define FC_PVER_MASK 0x3
+#define FC_PVER_SHIFT 0
+#define FC_TYPE_MASK 0xC
+#define FC_TYPE_SHIFT 2
+#define FC_SUBTYPE_MASK 0xF0
+#define FC_SUBTYPE_SHIFT 4
+#define FC_TODS 0x100
+#define FC_TODS_SHIFT 8
+#define FC_FROMDS 0x200
+#define FC_FROMDS_SHIFT 9
+#define FC_MOREFRAG 0x400
+#define FC_MOREFRAG_SHIFT 10
+#define FC_RETRY 0x800
+#define FC_RETRY_SHIFT 11
+#define FC_PM 0x1000
+#define FC_PM_SHIFT 12
+#define FC_MOREDATA 0x2000
+#define FC_MOREDATA_SHIFT 13
+#define FC_WEP 0x4000
+#define FC_WEP_SHIFT 14
+#define FC_ORDER 0x8000
+#define FC_ORDER_SHIFT 15
+
+
+#define SEQNUM_SHIFT 4
+#define SEQNUM_MAX 0x1000
+#define FRAGNUM_MASK 0xF
+
+
+
+
+#define FC_TYPE_MNG 0
+#define FC_TYPE_CTL 1
+#define FC_TYPE_DATA 2
+
+
+#define FC_SUBTYPE_ASSOC_REQ 0
+#define FC_SUBTYPE_ASSOC_RESP 1
+#define FC_SUBTYPE_REASSOC_REQ 2
+#define FC_SUBTYPE_REASSOC_RESP 3
+#define FC_SUBTYPE_PROBE_REQ 4
+#define FC_SUBTYPE_PROBE_RESP 5
+#define FC_SUBTYPE_BEACON 8
+#define FC_SUBTYPE_ATIM 9
+#define FC_SUBTYPE_DISASSOC 10
+#define FC_SUBTYPE_AUTH 11
+#define FC_SUBTYPE_DEAUTH 12
+#define FC_SUBTYPE_ACTION 13
+#define FC_SUBTYPE_ACTION_NOACK 14
+
+
+#define FC_SUBTYPE_CTL_WRAPPER 7
+#define FC_SUBTYPE_BLOCKACK_REQ 8
+#define FC_SUBTYPE_BLOCKACK 9
+#define FC_SUBTYPE_PS_POLL 10
+#define FC_SUBTYPE_RTS 11
+#define FC_SUBTYPE_CTS 12
+#define FC_SUBTYPE_ACK 13
+#define FC_SUBTYPE_CF_END 14
+#define FC_SUBTYPE_CF_END_ACK 15
+
+
+#define FC_SUBTYPE_DATA 0
+#define FC_SUBTYPE_DATA_CF_ACK 1
+#define FC_SUBTYPE_DATA_CF_POLL 2
+#define FC_SUBTYPE_DATA_CF_ACK_POLL 3
+#define FC_SUBTYPE_NULL 4
+#define FC_SUBTYPE_CF_ACK 5
+#define FC_SUBTYPE_CF_POLL 6
+#define FC_SUBTYPE_CF_ACK_POLL 7
+#define FC_SUBTYPE_QOS_DATA 8
+#define FC_SUBTYPE_QOS_DATA_CF_ACK 9
+#define FC_SUBTYPE_QOS_DATA_CF_POLL 10
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11
+#define FC_SUBTYPE_QOS_NULL 12
+#define FC_SUBTYPE_QOS_CF_POLL 14
+#define FC_SUBTYPE_QOS_CF_ACK_POLL 15
+
+
+#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0)
+
+
+#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK)
+
+#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))
+
+#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)
+#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)
+
+#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)
+#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)
+#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)
+#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)
+#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)
+#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)
+#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)
+#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)
+#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)
+#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)
+#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)
+#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)
+
+#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)
+#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)
+#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)
+#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)
+#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)
+#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)
+#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)
+#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)
+#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)
+
+#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)
+#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)
+#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)
+#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)
+#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)
+
+
+
+
+#define QOS_PRIO_SHIFT 0
+#define QOS_PRIO_MASK 0x0007
+#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)
+
+
+#define QOS_TID_SHIFT 0
+#define QOS_TID_MASK 0x000f
+#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)
+
+
+#define QOS_EOSP_SHIFT 4
+#define QOS_EOSP_MASK 0x0010
+#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)
+
+
+#define QOS_ACK_NORMAL_ACK 0
+#define QOS_ACK_NO_ACK 1
+#define QOS_ACK_NO_EXP_ACK 2
+#define QOS_ACK_BLOCK_ACK 3
+#define QOS_ACK_SHIFT 5
+#define QOS_ACK_MASK 0x0060
+#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)
+
+
+#define QOS_AMSDU_SHIFT 7
+#define QOS_AMSDU_MASK 0x0080
+
+
+
+
+
+
+#define DOT11_MNG_AUTH_ALGO_LEN 2
+#define DOT11_MNG_AUTH_SEQ_LEN 2
+#define DOT11_MNG_BEACON_INT_LEN 2
+#define DOT11_MNG_CAP_LEN 2
+#define DOT11_MNG_AP_ADDR_LEN 6
+#define DOT11_MNG_LISTEN_INT_LEN 2
+#define DOT11_MNG_REASON_LEN 2
+#define DOT11_MNG_AID_LEN 2
+#define DOT11_MNG_STATUS_LEN 2
+#define DOT11_MNG_TIMESTAMP_LEN 8
+
+
+#define DOT11_AID_MASK 0x3fff
+
+
+#define DOT11_RC_RESERVED 0
+#define DOT11_RC_UNSPECIFIED 1
+#define DOT11_RC_AUTH_INVAL 2
+#define DOT11_RC_DEAUTH_LEAVING 3
+#define DOT11_RC_INACTIVITY 4
+#define DOT11_RC_BUSY 5
+#define DOT11_RC_INVAL_CLASS_2 6
+#define DOT11_RC_INVAL_CLASS_3 7
+#define DOT11_RC_DISASSOC_LEAVING 8
+#define DOT11_RC_NOT_AUTH 9
+#define DOT11_RC_BAD_PC 10
+#define DOT11_RC_BAD_CHANNELS 11
+
+
+
+#define DOT11_RC_UNSPECIFIED_QOS 32
+#define DOT11_RC_INSUFFCIENT_BW 33
+#define DOT11_RC_EXCESSIVE_FRAMES 34
+#define DOT11_RC_TX_OUTSIDE_TXOP 35
+#define DOT11_RC_LEAVING_QBSS 36
+#define DOT11_RC_BAD_MECHANISM 37
+#define DOT11_RC_SETUP_NEEDED 38
+#define DOT11_RC_TIMEOUT 39
+
+#define DOT11_RC_MAX 23
+
+
+#define DOT11_SC_SUCCESS 0
+#define DOT11_SC_FAILURE 1
+#define DOT11_SC_CAP_MISMATCH 10
+#define DOT11_SC_REASSOC_FAIL 11
+#define DOT11_SC_ASSOC_FAIL 12
+#define DOT11_SC_AUTH_MISMATCH 13
+#define DOT11_SC_AUTH_SEQ 14
+#define DOT11_SC_AUTH_CHALLENGE_FAIL 15
+#define DOT11_SC_AUTH_TIMEOUT 16
+#define DOT11_SC_ASSOC_BUSY_FAIL 17
+#define DOT11_SC_ASSOC_RATE_MISMATCH 18
+#define DOT11_SC_ASSOC_SHORT_REQUIRED 19
+#define DOT11_SC_ASSOC_PBCC_REQUIRED 20
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22
+#define DOT11_SC_ASSOC_BAD_POWER_CAP 23
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25
+#define DOT11_SC_ASSOC_ERPBCC_REQUIRED 26
+#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED 27
+
+#define DOT11_SC_DECLINED 37
+#define DOT11_SC_INVALID_PARAMS 38
+#define DOT11_SC_INVALID_AKMP 43
+#define DOT11_SC_INVALID_MDID 54
+#define DOT11_SC_INVALID_FTIE 55
+
+
+#define DOT11_MNG_DS_PARAM_LEN 1
+#define DOT11_MNG_IBSS_PARAM_LEN 2
+
+
+#define DOT11_MNG_TIM_FIXED_LEN 3
+#define DOT11_MNG_TIM_DTIM_COUNT 0
+#define DOT11_MNG_TIM_DTIM_PERIOD 1
+#define DOT11_MNG_TIM_BITMAP_CTL 2
+#define DOT11_MNG_TIM_PVB 3
+
+
+#define TLV_TAG_OFF 0
+#define TLV_LEN_OFF 1
+#define TLV_HDR_LEN 2
+#define TLV_BODY_OFF 2
+
+
+#define DOT11_MNG_SSID_ID 0
+#define DOT11_MNG_RATES_ID 1
+#define DOT11_MNG_FH_PARMS_ID 2
+#define DOT11_MNG_DS_PARMS_ID 3
+#define DOT11_MNG_CF_PARMS_ID 4
+#define DOT11_MNG_TIM_ID 5
+#define DOT11_MNG_IBSS_PARMS_ID 6
+#define DOT11_MNG_COUNTRY_ID 7
+#define DOT11_MNG_HOPPING_PARMS_ID 8
+#define DOT11_MNG_HOPPING_TABLE_ID 9
+#define DOT11_MNG_REQUEST_ID 10
+#define DOT11_MNG_QBSS_LOAD_ID 11
+#define DOT11_MNG_EDCA_PARAM_ID 12
+#define DOT11_MNG_CHALLENGE_ID 16
+#define DOT11_MNG_PWR_CONSTRAINT_ID 32
+#define DOT11_MNG_PWR_CAP_ID 33
+#define DOT11_MNG_TPC_REQUEST_ID 34
+#define DOT11_MNG_TPC_REPORT_ID 35
+#define DOT11_MNG_SUPP_CHANNELS_ID 36
+#define DOT11_MNG_CHANNEL_SWITCH_ID 37
+#define DOT11_MNG_MEASURE_REQUEST_ID 38
+#define DOT11_MNG_MEASURE_REPORT_ID 39
+#define DOT11_MNG_QUIET_ID 40
+#define DOT11_MNG_IBSS_DFS_ID 41
+#define DOT11_MNG_ERP_ID 42
+#define DOT11_MNG_TS_DELAY_ID 43
+#define DOT11_MNG_HT_CAP 45
+#define DOT11_MNG_QOS_CAP_ID 46
+#define DOT11_MNG_NONERP_ID 47
+#define DOT11_MNG_RSN_ID 48
+#define DOT11_MNG_EXT_RATES_ID 50
+#define DOT11_MNG_AP_CHREP_ID 51
+#define DOT11_MNG_NBR_REP_ID 52
+#define DOT11_MNG_MDIE_ID 54
+#define DOT11_MNG_FTIE_ID 55
+#define DOT11_MNG_FT_TI_ID 56
+#define DOT11_MNG_REGCLASS_ID 59
+#define DOT11_MNG_EXT_CSA_ID 60
+#define DOT11_MNG_HT_ADD 61
+#define DOT11_MNG_EXT_CHANNEL_OFFSET 62
+
+
+#define DOT11_MNG_RRM_CAP_ID 70
+#define DOT11_MNG_HT_BSS_COEXINFO_ID 72
+#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73
+#define DOT11_MNG_HT_OBSS_ID 74
+#define DOT11_MNG_EXT_CAP 127
+#define DOT11_MNG_WPA_ID 221
+#define DOT11_MNG_PROPR_ID 221
+
+#define DOT11_MNG_VS_ID 221
+
+
+#define DOT11_RATE_BASIC 0x80
+#define DOT11_RATE_MASK 0x7F
+
+
+#define DOT11_MNG_ERP_LEN 1
+#define DOT11_MNG_NONERP_PRESENT 0x01
+#define DOT11_MNG_USE_PROTECTION 0x02
+#define DOT11_MNG_BARKER_PREAMBLE 0x04
+
+#define DOT11_MGN_TS_DELAY_LEN 4
+#define TS_DELAY_FIELD_SIZE 4
+
+
+#define DOT11_CAP_ESS 0x0001
+#define DOT11_CAP_IBSS 0x0002
+#define DOT11_CAP_POLLABLE 0x0004
+#define DOT11_CAP_POLL_RQ 0x0008
+#define DOT11_CAP_PRIVACY 0x0010
+#define DOT11_CAP_SHORT 0x0020
+#define DOT11_CAP_PBCC 0x0040
+#define DOT11_CAP_AGILITY 0x0080
+#define DOT11_CAP_SPECTRUM 0x0100
+#define DOT11_CAP_SHORTSLOT 0x0400
+#define DOT11_CAP_RRM 0x1000
+#define DOT11_CAP_CCK_OFDM 0x2000
+
+
+#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01
+
+
+#define DOT11_ACTION_HDR_LEN 2
+#define DOT11_ACTION_CAT_OFF 0
+#define DOT11_ACTION_ACT_OFF 1
+
+
+#define DOT11_ACTION_CAT_ERR_MASK 0x80
+#define DOT11_ACTION_CAT_MASK 0x7F
+#define DOT11_ACTION_CAT_SPECT_MNG 0
+#define DOT11_ACTION_CAT_QOS 1
+#define DOT11_ACTION_CAT_DLS 2
+#define DOT11_ACTION_CAT_BLOCKACK 3
+#define DOT11_ACTION_CAT_PUBLIC 4
+#define DOT11_ACTION_CAT_RRM 5
+#define DOT11_ACTION_CAT_FBT 6
+#define DOT11_ACTION_CAT_HT 7
+#define DOT11_ACTION_CAT_BSSMGMT 10
+#define DOT11_ACTION_NOTIFICATION 17
+#define DOT11_ACTION_CAT_VS 127
+
+
+#define DOT11_SM_ACTION_M_REQ 0
+#define DOT11_SM_ACTION_M_REP 1
+#define DOT11_SM_ACTION_TPC_REQ 2
+#define DOT11_SM_ACTION_TPC_REP 3
+#define DOT11_SM_ACTION_CHANNEL_SWITCH 4
+#define DOT11_SM_ACTION_EXT_CSA 5
+
+
+#define DOT11_ACTION_ID_HT_CH_WIDTH 0
+#define DOT11_ACTION_ID_HT_MIMO_PS 1
+
+
+#define DOT11_PUB_ACTION_BSS_COEX_MNG 0
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4
+
+
+#define DOT11_BA_ACTION_ADDBA_REQ 0
+#define DOT11_BA_ACTION_ADDBA_RESP 1
+#define DOT11_BA_ACTION_DELBA 2
+
+
+#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001
+#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1
+#define DOT11_ADDBA_PARAM_TID_MASK 0x003c
+#define DOT11_ADDBA_PARAM_TID_SHIFT 2
+#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6
+
+#define DOT11_ADDBA_POLICY_DELAYED 0
+#define DOT11_ADDBA_POLICY_IMMEDIATE 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 addba_param_set;
+ uint16 timeout;
+ uint16 start_seqnum;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN 9
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 status;
+ uint16 addba_param_set;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN 9
+
+
+#define DOT11_DELBA_PARAM_INIT_MASK 0x0800
+#define DOT11_DELBA_PARAM_INIT_SHIFT 11
+#define DOT11_DELBA_PARAM_TID_MASK 0xf000
+#define DOT11_DELBA_PARAM_TID_SHIFT 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+ uint8 category;
+ uint8 action;
+ uint16 delba_param_set;
+ uint16 reason;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN 6
+
+
+
+
+
+#define DOT11_RRM_CAP_LEN 5
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+ uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+
+#define DOT11_RRM_CAP_LINK 0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1
+#define DOT11_RRM_CAP_PARALLEL 2
+#define DOT11_RRM_CAP_REPEATED 3
+#define DOT11_RRM_CAP_BCN_PASSIVE 4
+#define DOT11_RRM_CAP_BCN_ACTIVE 5
+#define DOT11_RRM_CAP_BCN_TABLE 6
+#define DOT11_RRM_CAP_BCN_REP_COND 7
+#define DOT11_RRM_CAP_AP_CHANREP 16
+
+
+#define DOT11_RM_ACTION_RM_REQ 0
+#define DOT11_RM_ACTION_RM_REP 1
+#define DOT11_RM_ACTION_LM_REQ 2
+#define DOT11_RM_ACTION_LM_REP 3
+#define DOT11_RM_ACTION_NR_REQ 4
+#define DOT11_RM_ACTION_NR_REP 5
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 reps;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN 5
+
+
+#define DOT11_RMREQ_MODE_PARALLEL 1
+#define DOT11_RMREQ_MODE_ENABLE 2
+#define DOT11_RMREQ_MODE_REQUEST 4
+#define DOT11_RMREQ_MODE_REPORT 8
+#define DOT11_RMREQ_MODE_DURMAND 0x10
+
+
+#define DOT11_RMREP_MODE_LATE 1
+#define DOT11_RMREP_MODE_INCAPABLE 2
+#define DOT11_RMREP_MODE_REFUSED 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 reg;
+ uint8 channel;
+ uint16 interval;
+ uint16 duration;
+ uint8 bcn_mode;
+ struct ether_addr bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN 18
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+ uint8 reg;
+ uint8 channel;
+ uint32 starttime[2];
+ uint16 duration;
+ uint8 frame_info;
+ uint8 rcpi;
+ uint8 rsni;
+ struct ether_addr bssid;
+ uint8 antenna_id;
+ uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN 26
+
+
+#define DOT11_RMREQ_BCN_PASSIVE 0
+#define DOT11_RMREQ_BCN_ACTIVE 1
+#define DOT11_RMREQ_BCN_TABLE 2
+
+
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID 1
+#define DOT11_RMREQ_BCN_REPDET_ID 2
+#define DOT11_RMREQ_BCN_REQUEST_ID 10
+#define DOT11_RMREQ_BCN_APCHREP_ID 51
+
+
+#define DOT11_RMREQ_BCN_REPDET_FIXED 0
+#define DOT11_RMREQ_BCN_REPDET_REQUEST 1
+#define DOT11_RMREQ_BCN_REPDET_ALL 2
+
+
+#define DOT11_RMREP_BCN_FRM_BODY 1
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_nbr {
+ struct ether_addr bssid;
+ uint32 bssid_info;
+ uint8 reg;
+ uint8 channel;
+ uint8 phytype;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_nbr dot11_rmrep_nbr_t;
+#define DOT11_RMREP_NBR_LEN 13
+
+
+#define DOT11_BSSTYPE_INFRASTRUCTURE 0
+#define DOT11_BSSTYPE_INDEPENDENT 1
+#define DOT11_BSSTYPE_ANY 2
+#define DOT11_SCANTYPE_ACTIVE 0
+#define DOT11_SCANTYPE_PASSIVE 1
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 txpwr;
+ uint8 maxtxpwr;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ dot11_tpc_rep_t tpc;
+ uint8 rxant;
+ uint8 txant;
+ uint8 rcpi;
+ uint8 rsni;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN 11
+
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 12
+#define PREN_PREAMBLE_EXT 4
+
+
+#define RIFS_11N_TIME 2
+
+
+
+#define HT_SIG1_MCS_MASK 0x00007F
+#define HT_SIG1_CBW 0x000080
+#define HT_SIG1_HT_LENGTH 0xFFFF00
+
+
+#define HT_SIG2_SMOOTHING 0x000001
+#define HT_SIG2_NOT_SOUNDING 0x000002
+#define HT_SIG2_RESERVED 0x000004
+#define HT_SIG2_AGGREGATION 0x000008
+#define HT_SIG2_STBC_MASK 0x000030
+#define HT_SIG2_STBC_SHIFT 4
+#define HT_SIG2_FEC_CODING 0x000040
+#define HT_SIG2_SHORT_GI 0x000080
+#define HT_SIG2_ESS_MASK 0x000300
+#define HT_SIG2_ESS_SHIFT 8
+#define HT_SIG2_CRC 0x03FC00
+#define HT_SIG2_TAIL 0x1C0000
+
+
+#define APHY_SLOT_TIME 9
+#define APHY_SIFS_TIME 16
+#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SYMBOL_TIME 4
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define APHY_CWMIN 15
+
+
+#define BPHY_SLOT_TIME 20
+#define BPHY_SIFS_TIME 10
+#define BPHY_DIFS_TIME 50
+#define BPHY_PLCP_TIME 192
+#define BPHY_PLCP_SHORT_TIME 96
+#define BPHY_CWMIN 31
+
+
+#define DOT11_OFDM_SIGNAL_EXTENSION 6
+
+#define PHY_CWMAX 1023
+
+#define DOT11_MAXNUMFRAGS 16
+
+
+typedef struct d11cnt {
+ uint32 txfrag;
+ uint32 txmulti;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txretrie;
+ uint32 rxdup;
+ uint32 txrts;
+ uint32 txnocts;
+ uint32 txnoack;
+ uint32 rxfrag;
+ uint32 rxmulti;
+ uint32 rxcrc;
+ uint32 txfrmsnt;
+ uint32 rxundec;
+} d11cnt_t;
+
+
+#define BRCM_PROP_OUI "\x00\x90\x4C"
+
+
+
+#define BRCM_OUI "\x00\x10\x18"
+
+
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 ver;
+ uint8 assoc;
+ uint8 flags;
+ uint8 flags1;
+ uint16 amsdu_mtu_pref;
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN 11
+#define BRCM_IE_VER 2
+#define BRCM_IE_LEGACY_AES_VER 1
+
+
+#ifdef WLAFTERBURNER
+#define BRF_ABCAP 0x1
+#define BRF_ABRQRD 0x2
+#define BRF_ABCOUNTER_MASK 0xf0
+#define BRF_ABCOUNTER_SHIFT 4
+#endif
+#define BRF_LZWDS 0x4
+#define BRF_BLOCKACK 0x8
+
+
+#define BRF1_AMSDU 0x1
+#define BRF1_WMEPS 0x4
+#define BRF1_PSOFIX 0x8
+#define BRF1_RX_LARGE_AGG 0x10
+#define BRF1_SOFTAP 0x40
+
+#ifdef WLAFTERBURNER
+#define AB_WDS_TIMEOUT_MAX 15
+#define AB_WDS_TIMEOUT_MIN 1
+#endif
+
+#define AB_GUARDCOUNT 10
+
+
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+ uchar id;
+ uchar len;
+ uchar oui [3];
+ uchar data [1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN 2
+#define VNDR_IE_MIN_LEN 3
+#define VNDR_IE_MAX_LEN 256
+
+
+#define MCSSET_LEN 16
+#define MAX_MCS_NUM (128)
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+ uint16 cap;
+ uint8 params;
+ uint8 supp_mcs[MCSSET_LEN];
+ uint16 ext_htcap;
+ uint32 txbf_cap;
+ uint8 as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD 4
+#define HT_CAP_IE_LEN 26
+#define HT_CAP_IE_TYPE 51
+
+#define HT_CAP_LDPC_CODING 0x0001
+#define HT_CAP_40MHZ 0x0002
+#define HT_CAP_MIMO_PS_MASK 0x000C
+#define HT_CAP_MIMO_PS_SHIFT 0x0002
+#define HT_CAP_MIMO_PS_OFF 0x0003
+#define HT_CAP_MIMO_PS_RTS 0x0001
+#define HT_CAP_MIMO_PS_ON 0x0000
+#define HT_CAP_GF 0x0010
+#define HT_CAP_SHORT_GI_20 0x0020
+#define HT_CAP_SHORT_GI_40 0x0040
+#define HT_CAP_TX_STBC 0x0080
+#define HT_CAP_RX_STBC_MASK 0x0300
+#define HT_CAP_RX_STBC_SHIFT 8
+#define HT_CAP_DELAYED_BA 0x0400
+#define HT_CAP_MAX_AMSDU 0x0800
+#define HT_CAP_DSSS_CCK 0x1000
+#define HT_CAP_PSMP 0x2000
+#define HT_CAP_40MHZ_INTOLERANT 0x4000
+#define HT_CAP_LSIG_TXOP 0x8000
+
+#define HT_CAP_RX_STBC_NO 0x0
+#define HT_CAP_RX_STBC_ONE_STREAM 0x1
+#define HT_CAP_RX_STBC_TWO_STREAM 0x2
+#define HT_CAP_RX_STBC_THREE_STREAM 0x3
+
+#define HT_MAX_AMSDU 7935
+#define HT_MIN_AMSDU 3835
+
+#define HT_PARAMS_RX_FACTOR_MASK 0x03
+#define HT_PARAMS_DENSITY_MASK 0x1C
+#define HT_PARAMS_DENSITY_SHIFT 2
+
+
+#define AMPDU_MAX_MPDU_DENSITY 7
+#define AMPDU_RX_FACTOR_8K 0
+#define AMPDU_RX_FACTOR_16K 1
+#define AMPDU_RX_FACTOR_32K 2
+#define AMPDU_RX_FACTOR_64K 3
+#define AMPDU_RX_FACTOR_BASE 8*1024
+
+#define AMPDU_DELIMITER_LEN 4
+#define AMPDU_DELIMITER_LEN_MAX 63
+
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+ uint8 ctl_ch;
+ uint8 byte1;
+ uint16 opmode;
+ uint16 misc_bits;
+ uint8 basic_mcs[MCSSET_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN 22
+#define HT_ADD_IE_TYPE 52
+
+
+#define HT_BW_ANY 0x04
+#define HT_RIFS_PERMITTED 0x08
+
+
+#define HT_OPMODE_MASK 0x0003
+#define HT_OPMODE_SHIFT 0
+#define HT_OPMODE_PURE 0x0000
+#define HT_OPMODE_OPTIONAL 0x0001
+#define HT_OPMODE_HT20IN40 0x0002
+#define HT_OPMODE_MIXED 0x0003
+#define HT_OPMODE_NONGF 0x0004
+#define DOT11N_TXBURST 0x0008
+#define DOT11N_OBSS_NONHT 0x0010
+
+
+#define HT_BASIC_STBC_MCS 0x007f
+#define HT_DUAL_STBC_PROT 0x0080
+#define HT_SECOND_BCN 0x0100
+#define HT_LSIG_TXOP 0x0200
+#define HT_PCO_ACTIVE 0x0400
+#define HT_PCO_PHASE 0x0800
+
+
+#define DOT11N_2G_TXBURST_LIMIT 6160
+#define DOT11N_5G_TXBURST_LIMIT 3080
+
+
+#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ >> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_MIXED)
+#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_HT20IN40)
+#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_OPTIONAL)
+#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \
+ HT_MIXEDMODE_PRESENT((add_ie)))
+#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+ == HT_OPMODE_NONGF)
+#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+ == DOT11N_TXBURST)
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+ == DOT11N_OBSS_NONHT)
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+ uint16 passive_dwell;
+ uint16 active_dwell;
+ uint16 bss_widthscan_interval;
+ uint16 passive_total;
+ uint16 active_total;
+ uint16 chanwidth_transition_dly;
+ uint16 activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+ uint8 id;
+ uint8 len;
+ obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t)
+
+
+#define HT_CTRL_LA_TRQ 0x00000002
+#define HT_CTRL_LA_MAI 0x0000003C
+#define HT_CTRL_LA_MAI_SHIFT 2
+#define HT_CTRL_LA_MAI_MRQ 0x00000004
+#define HT_CTRL_LA_MAI_MSI 0x00000038
+#define HT_CTRL_LA_MFSI 0x000001C0
+#define HT_CTRL_LA_MFSI_SHIFT 6
+#define HT_CTRL_LA_MFB_ASELC 0x0000FE00
+#define HT_CTRL_LA_MFB_ASELC_SH 9
+#define HT_CTRL_LA_ASELC_CMD 0x00000C00
+#define HT_CTRL_LA_ASELC_DATA 0x0000F000
+#define HT_CTRL_CAL_POS 0x00030000
+#define HT_CTRL_CAL_SEQ 0x000C0000
+#define HT_CTRL_CSI_STEERING 0x00C00000
+#define HT_CTRL_CSI_STEER_SHIFT 22
+#define HT_CTRL_CSI_STEER_NFB 0
+#define HT_CTRL_CSI_STEER_CSI 1
+#define HT_CTRL_CSI_STEER_NCOM 2
+#define HT_CTRL_CSI_STEER_COM 3
+#define HT_CTRL_NDP_ANNOUNCE 0x01000000
+#define HT_CTRL_AC_CONSTRAINT 0x40000000
+#define HT_CTRL_RDG_MOREPPDU 0x80000000
+
+#define HT_OPMODE_OPTIONAL 0x0001
+#define HT_OPMODE_HT20IN40 0x0002
+#define HT_OPMODE_MIXED 0x0003
+#define HT_OPMODE_NONGF 0x0004
+#define DOT11N_TXBURST 0x0008
+#define DOT11N_OBSS_NONHT 0x0010
+
+
+
+#define WPA_OUI "\x00\x50\xF2"
+#define WPA_OUI_LEN 3
+#define WPA_OUI_TYPE 1
+#define WPA_VERSION 1
+#define WPA2_OUI "\x00\x0F\xAC"
+#define WPA2_OUI_LEN 3
+#define WPA2_VERSION 1
+#define WPA2_VERSION_LEN 2
+
+
+#define WPS_OUI "\x00\x50\xF2"
+#define WPS_OUI_LEN 3
+#define WPS_OUI_TYPE 4
+
+
+#define WFA_OUI "\x50\x6F\x9A"
+#define WFA_OUI_LEN 3
+
+#define WFA_OUI_TYPE_WPA 1
+#define WFA_OUI_TYPE_WPS 4
+#define WFA_OUI_TYPE_TPC 8
+#define WFA_OUI_TYPE_P2P 9
+
+
+#define RSN_AKM_NONE 0
+#define RSN_AKM_UNSPECIFIED 1
+#define RSN_AKM_PSK 2
+#define RSN_AKM_FBT_1X 3
+#define RSN_AKM_FBT_PSK 4
+
+
+#define DOT11_MAX_DEFAULT_KEYS 4
+#define DOT11_MAX_KEY_SIZE 32
+#define DOT11_MAX_IV_SIZE 16
+#define DOT11_EXT_IV_FLAG (1<<5)
+#define DOT11_WPA_KEY_RSC_LEN 8
+
+#define WEP1_KEY_SIZE 5
+#define WEP1_KEY_HEX_SIZE 10
+#define WEP128_KEY_SIZE 13
+#define WEP128_KEY_HEX_SIZE 26
+#define TKIP_MIC_SIZE 8
+#define TKIP_EOM_SIZE 7
+#define TKIP_EOM_FLAG 0x5a
+#define TKIP_KEY_SIZE 32
+#define TKIP_MIC_AUTH_TX 16
+#define TKIP_MIC_AUTH_RX 24
+#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX
+#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX
+#define AES_KEY_SIZE 16
+#define AES_MIC_SIZE 8
+
+
+#define WCN_OUI "\x00\x50\xf2"
+#define WCN_TYPE 4
+
+
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+ uint8 id;
+ uint8 len;
+ uint16 mdid;
+ uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+#define FBT_MDID_CAP_OVERDS 0x01
+#define FBT_MDID_CAP_RRP 0x02
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+ uint8 id;
+ uint8 len;
+ uint16 mic_control;
+ uint8 mic[16];
+ uint8 anonce[32];
+ uint8 snonce[32];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+ uint8 id;
+ uint8 len;
+ uint16 key_info;
+ uint8 key_len;
+ uint8 rsc[8];
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
new file mode 100644
index 000000000000..4ccfab02056b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
@@ -0,0 +1,45 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer)
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11_bta.h,v 9.2 2008-10-28 23:27:13 Exp $
+*/
+
+#ifndef _802_11_BTA_H_
+#define _802_11_BTA_H_
+
+#define BT_SIG_SNAP_MPROT "\xAA\xAA\x03\x00\x19\x58"
+
+/* BT-AMP 802.11 PAL Protocols */
+#define BTA_PROT_L2CAP 1
+#define BTA_PROT_ACTIVITY_REPORT 2
+#define BTA_PROT_SECURITY 3
+#define BTA_PROT_LINK_SUPERVISION_REQUEST 4
+#define BTA_PROT_LINK_SUPERVISION_REPLY 5
+
+/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */
+#define BTA_TYPE_ID_MAC_ADDRESS 1
+#define BTA_TYPE_ID_PREFERRED_CHANNELS 2
+#define BTA_TYPE_ID_CONNECTED_CHANNELS 3
+#define BTA_TYPE_ID_CAPABILITIES 4
+#define BTA_TYPE_ID_VERSION 5
+#endif /* _802_11_bta_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
new file mode 100644
index 000000000000..ce8ad083f286
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
@@ -0,0 +1,131 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11e.h,v 1.6 2008-12-01 22:55:11 Exp $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+ uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+ uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */
+ uint8 type; /* WME_TYPE */
+ uint8 subtype; /* WME_SUBTYPE_TSPEC */
+ uint8 version; /* WME_VERSION */
+ tsinfo_t tsinfo; /* TS Info bit field */
+ uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */
+ uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */
+ uint32 min_srv_interval; /* Minimum Service Interval (us) */
+ uint32 max_srv_interval; /* Maximum Service Interval (us) */
+ uint32 inactivity_interval; /* Inactivity Interval (us) */
+ uint32 suspension_interval; /* Suspension Interval (us) */
+ uint32 srv_start_time; /* Service Start Time (us) */
+ uint32 min_data_rate; /* Minimum Data Rate (bps) */
+ uint32 mean_data_rate; /* Mean Data Rate (bps) */
+ uint32 peak_data_rate; /* Peak Data Rate (bps) */
+ uint32 max_burst_size; /* Maximum Burst Size (bytes) */
+ uint32 delay_bound; /* Delay Bound (us) */
+ uint32 min_phy_rate; /* Minimum PHY Rate (bps) */
+ uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */
+ uint16 medium_time; /* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */
+#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */
+#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */
+#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \
+ TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \
+ TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+ ((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+ ((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT 300 /* default ADDTS response timeout in ms */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */
+#define DOT11E_STATUS_END_TS 37 /* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
new file mode 100644
index 000000000000..cf206250246f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h,v 9.3 2007-04-10 21:33:06 Exp $
+ */
+
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+
+#define PRIO_8021D_NONE 2
+#define PRIO_8021D_BK 1
+#define PRIO_8021D_BE 0
+#define PRIO_8021D_EE 3
+#define PRIO_8021D_CL 4
+#define PRIO_8021D_VI 5
+#define PRIO_8021D_VO 6
+#define PRIO_8021D_NC 7
+#define MAXPRIO 7
+#define NUMPRIO (MAXPRIO + 1)
+
+#define ALLPRIO -1
+
+
+#define PRIO2PREC(prio) \
+ (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
new file mode 100644
index 000000000000..46fa4c9f89e8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
@@ -0,0 +1,83 @@
+/*
+ * Broadcom Ethernettype protocol definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmeth.h,v 9.12 2009-12-29 19:57:18 Exp $
+ */
+
+
+
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+
+
+
+
+#define BCMILCP_SUBTYPE_RATE 1
+#define BCMILCP_SUBTYPE_LINK 2
+#define BCMILCP_SUBTYPE_CSA 3
+#define BCMILCP_SUBTYPE_LARQ 4
+#define BCMILCP_SUBTYPE_VENDOR 5
+#define BCMILCP_SUBTYPE_FLH 17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
+#define BCMILCP_SUBTYPE_CERT 32770
+#define BCMILCP_SUBTYPE_SES 32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED 0
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
+#define BCMILCP_BCM_SUBTYPE_SES 2
+
+
+#define BCMILCP_BCM_SUBTYPE_DPT 4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+ uint16 subtype;
+ uint16 length;
+ uint8 version;
+ uint8 oui[3];
+
+ uint16 usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
new file mode 100644
index 000000000000..30ec848c40ae
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
@@ -0,0 +1,312 @@
+/*
+ * Broadcom Event protocol definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h,v 9.64.2.9 2011-02-01 06:24:21 Exp $
+ *
+ */
+
+
+
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION 2
+#define BCM_MSG_IFNAME_MAX 16
+
+
+#define WLC_EVENT_MSG_LINK 0x01
+#define WLC_EVENT_MSG_FLUSHTXQ 0x02
+#define WLC_EVENT_MSG_GROUP 0x04
+#define WLC_EVENT_MSG_UNKBSS 0x08
+#define WLC_EVENT_MSG_UNKIF 0x10
+
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags;
+ uint32 event_type;
+ uint32 status;
+ uint32 reason;
+ uint32 auth_type;
+ uint32 datalen;
+ struct ether_addr addr;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags;
+ uint32 event_type;
+ uint32 status;
+ uint32 reason;
+ uint32 auth_type;
+ uint32 datalen;
+ struct ether_addr addr;
+ char ifname[BCM_MSG_IFNAME_MAX];
+ uint8 ifidx;
+ uint8 bsscfgidx;
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+ struct ether_header eth;
+ bcmeth_hdr_t bcm_hdr;
+ wl_event_msg_t event;
+
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+
+#define WLC_E_SET_SSID 0
+#define WLC_E_JOIN 1
+#define WLC_E_START 2
+#define WLC_E_AUTH 3
+#define WLC_E_AUTH_IND 4
+#define WLC_E_DEAUTH 5
+#define WLC_E_DEAUTH_IND 6
+#define WLC_E_ASSOC 7
+#define WLC_E_ASSOC_IND 8
+#define WLC_E_REASSOC 9
+#define WLC_E_REASSOC_IND 10
+#define WLC_E_DISASSOC 11
+#define WLC_E_DISASSOC_IND 12
+#define WLC_E_QUIET_START 13
+#define WLC_E_QUIET_END 14
+#define WLC_E_BEACON_RX 15
+#define WLC_E_LINK 16
+#define WLC_E_MIC_ERROR 17
+#define WLC_E_NDIS_LINK 18
+#define WLC_E_ROAM 19
+#define WLC_E_TXFAIL 20
+#define WLC_E_PMKID_CACHE 21
+#define WLC_E_RETROGRADE_TSF 22
+#define WLC_E_PRUNE 23
+#define WLC_E_AUTOAUTH 24
+#define WLC_E_EAPOL_MSG 25
+#define WLC_E_SCAN_COMPLETE 26
+#define WLC_E_ADDTS_IND 27
+#define WLC_E_DELTS_IND 28
+#define WLC_E_BCNSENT_IND 29
+#define WLC_E_BCNRX_MSG 30
+#define WLC_E_BCNLOST_MSG 31
+#define WLC_E_ROAM_PREP 32
+#define WLC_E_PFN_NET_FOUND 33
+#define WLC_E_PFN_NET_LOST 34
+#define WLC_E_RESET_COMPLETE 35
+#define WLC_E_JOIN_START 36
+#define WLC_E_ROAM_START 37
+#define WLC_E_ASSOC_START 38
+#define WLC_E_IBSS_ASSOC 39
+#define WLC_E_RADIO 40
+#define WLC_E_PSM_WATCHDOG 41
+#define WLC_E_PROBREQ_MSG 44
+#define WLC_E_SCAN_CONFIRM_IND 45
+#define WLC_E_PSK_SUP 46
+#define WLC_E_COUNTRY_CODE_CHANGED 47
+#define WLC_E_EXCEEDED_MEDIUM_TIME 48
+#define WLC_E_ICV_ERROR 49
+#define WLC_E_UNICAST_DECODE_ERROR 50
+#define WLC_E_MULTICAST_DECODE_ERROR 51
+#define WLC_E_TRACE 52
+#define WLC_E_BTA_HCI_EVENT 53
+#define WLC_E_IF 54
+#ifdef WLP2P
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55
+#endif
+#define WLC_E_RSSI 56
+#define WLC_E_PFN_SCAN_COMPLETE 57
+#define WLC_E_EXTLOG_MSG 58
+#define WLC_E_ACTION_FRAME 59
+#define WLC_E_ACTION_FRAME_COMPLETE 60
+#define WLC_E_PRE_ASSOC_IND 61
+#define WLC_E_PRE_REASSOC_IND 62
+#define WLC_E_CHANNEL_ADOPTED 63
+#define WLC_E_AP_STARTED 64
+#define WLC_E_DFS_AP_STOP 65
+#define WLC_E_DFS_AP_RESUME 66
+#define WLC_E_WAI_STA_EVENT 67
+#define WLC_E_WAI_MSG 68
+#define WLC_E_ESCAN_RESULT 69
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
+#if defined(WLP2P)
+#define WLC_E_PROBRESP_MSG 71
+#define WLC_E_P2P_PROBREQ_MSG 72
+#endif
+#define WLC_E_DCS_REQUEST 73
+
+#define WLC_E_FIFO_CREDIT_MAP 74
+
+#define WLC_E_ACTION_FRAME_RX 75
+#define WLC_E_WAKE_EVENT 76
+#define WLC_E_RM_COMPLETE 77
+#define WLC_E_HTSFSYNC 78
+#define WLC_E_OVERLAY_REQ 79
+#define WLC_E_CSA_COMPLETE_IND 80
+#define WLC_E_EXCESS_PM_WAKE_EVENT 81
+#define WLC_E_PFN_SCAN_NONE 82
+#define WLC_E_PFN_SCAN_ALLGONE 83
+#define WLC_E_GTK_PLUMBED 84
+#define WLC_E_LAST 85
+
+
+typedef struct {
+ uint event;
+ const char *name;
+} bcmevent_name_t;
+
+extern const bcmevent_name_t bcmevent_names[];
+extern const int bcmevent_names_size;
+
+
+#define WLC_E_STATUS_SUCCESS 0
+#define WLC_E_STATUS_FAIL 1
+#define WLC_E_STATUS_TIMEOUT 2
+#define WLC_E_STATUS_NO_NETWORKS 3
+#define WLC_E_STATUS_ABORT 4
+#define WLC_E_STATUS_NO_ACK 5
+#define WLC_E_STATUS_UNSOLICITED 6
+#define WLC_E_STATUS_ATTEMPT 7
+#define WLC_E_STATUS_PARTIAL 8
+#define WLC_E_STATUS_NEWSCAN 9
+#define WLC_E_STATUS_NEWASSOC 10
+#define WLC_E_STATUS_11HQUIET 11
+#define WLC_E_STATUS_SUPPRESS 12
+#define WLC_E_STATUS_NOCHANS 13
+#define WLC_E_STATUS_CS_ABORT 15
+#define WLC_E_STATUS_ERROR 16
+
+
+#define WLC_E_REASON_INITIAL_ASSOC 0
+#define WLC_E_REASON_LOW_RSSI 1
+#define WLC_E_REASON_DEAUTH 2
+#define WLC_E_REASON_DISASSOC 3
+#define WLC_E_REASON_BCNS_LOST 4
+#define WLC_E_REASON_MINTXRATE 9
+#define WLC_E_REASON_TXFAIL 10
+
+
+#define WLC_E_REASON_FAST_ROAM_FAILED 5
+#define WLC_E_REASON_DIRECTED_ROAM 6
+#define WLC_E_REASON_TSPEC_REJECTED 7
+#define WLC_E_REASON_BETTER_AP 8
+
+
+#define WLC_E_PRUNE_ENCR_MISMATCH 1
+#define WLC_E_PRUNE_BCAST_BSSID 2
+#define WLC_E_PRUNE_MAC_DENY 3
+#define WLC_E_PRUNE_MAC_NA 4
+#define WLC_E_PRUNE_REG_PASSV 5
+#define WLC_E_PRUNE_SPCT_MGMT 6
+#define WLC_E_PRUNE_RADAR 7
+#define WLC_E_RSN_MISMATCH 8
+#define WLC_E_PRUNE_NO_COMMON_RATES 9
+#define WLC_E_PRUNE_BASIC_RATES 10
+#define WLC_E_PRUNE_CIPHER_NA 12
+#define WLC_E_PRUNE_KNOWN_STA 13
+#define WLC_E_PRUNE_WDS_PEER 15
+#define WLC_E_PRUNE_QBSS_LOAD 16
+#define WLC_E_PRUNE_HOME_AP 17
+
+
+#define WLC_E_SUP_OTHER 0
+#define WLC_E_SUP_DECRYPT_KEY_DATA 1
+#define WLC_E_SUP_BAD_UCAST_WEP128 2
+#define WLC_E_SUP_BAD_UCAST_WEP40 3
+#define WLC_E_SUP_UNSUP_KEY_LEN 4
+#define WLC_E_SUP_PW_KEY_CIPHER 5
+#define WLC_E_SUP_MSG3_TOO_MANY_IE 6
+#define WLC_E_SUP_MSG3_IE_MISMATCH 7
+#define WLC_E_SUP_NO_INSTALL_FLAG 8
+#define WLC_E_SUP_MSG3_NO_GTK 9
+#define WLC_E_SUP_GRP_KEY_CIPHER 10
+#define WLC_E_SUP_GRP_MSG1_NO_GTK 11
+#define WLC_E_SUP_GTK_DECRYPT_FAIL 12
+#define WLC_E_SUP_SEND_FAIL 13
+#define WLC_E_SUP_DEAUTH 14
+#define WLC_E_SUP_WPA_PSK_TMO 15
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
+ uint16 version;
+ uint16 channel;
+ int32 rssi;
+ uint32 mactime;
+ uint32 rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
+
+#define BCM_RX_FRAME_DATA_VERSION 1
+
+
+typedef struct wl_event_data_if {
+ uint8 ifidx;
+ uint8 opcode;
+ uint8 reserved;
+ uint8 bssidx;
+ uint8 role;
+} wl_event_data_if_t;
+
+
+#define WLC_E_IF_ADD 1
+#define WLC_E_IF_DEL 2
+#define WLC_E_IF_CHANGE 3
+
+
+#define WLC_E_IF_ROLE_STA 0
+#define WLC_E_IF_ROLE_AP 1
+#define WLC_E_IF_ROLE_WDS 2
+#define WLC_E_IF_ROLE_P2P_GO 3
+#define WLC_E_IF_ROLE_P2P_CLIENT 4
+#define WLC_E_IF_ROLE_BTA_CREATOR 5
+#define WLC_E_IF_ROLE_BTA_ACCEPTOR 6
+
+
+#define WLC_E_LINK_BCN_LOSS 1
+#define WLC_E_LINK_DISASSOC 2
+#define WLC_E_LINK_ASSOC_REC 3
+#define WLC_E_LINK_BSSCFG_DIS 4
+
+
+#define WLC_E_OVL_DOWNLOAD 0
+#define WLC_E_OVL_UPDATE_IND 1
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
new file mode 100644
index 000000000000..8a8f3146d56b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h,v 9.19 2009-11-10 20:08:33 Exp $
+ */
+
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define IP_VER_OFFSET 0x0
+#define IP_VER_MASK 0xf0
+#define IP_VER_SHIFT 4
+#define IP_VER_4 4
+#define IP_VER_6 6
+
+#define IP_VER(ip_body) \
+ ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP 0x1
+#define IP_PROT_TCP 0x6
+#define IP_PROT_UDP 0x11
+
+
+#define IPV4_VER_HL_OFFSET 0
+#define IPV4_TOS_OFFSET 1
+#define IPV4_PKTLEN_OFFSET 2
+#define IPV4_PKTFLAG_OFFSET 6
+#define IPV4_PROT_OFFSET 9
+#define IPV4_CHKSUM_OFFSET 10
+#define IPV4_SRC_IP_OFFSET 12
+#define IPV4_DEST_IP_OFFSET 16
+#define IPV4_OPTIONS_OFFSET 20
+
+
+#define IPV4_VER_MASK 0xf0
+#define IPV4_VER_SHIFT 4
+
+#define IPV4_HLEN_MASK 0x0f
+#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN 4
+
+#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+ ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+ ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define IPV4_TOS_DSCP_MASK 0xfc
+#define IPV4_TOS_DSCP_SHIFT 2
+
+#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define IPV4_TOS_PREC_MASK 0xe0
+#define IPV4_TOS_PREC_SHIFT 5
+
+#define IPV4_TOS_LOWDELAY 0x10
+#define IPV4_TOS_THROUGHPUT 0x8
+#define IPV4_TOS_RELIABILITY 0x4
+
+#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV 0x8000
+#define IPV4_FRAG_DONT 0x4000
+#define IPV4_FRAG_MORE 0x2000
+#define IPV4_FRAG_OFFSET_MASK 0x1fff
+
+#define IPV4_ADDR_STR_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+ uint8 addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+ uint8 version_ihl;
+ uint8 tos;
+ uint16 tot_len;
+ uint16 id;
+ uint16 frag;
+ uint8 ttl;
+ uint8 prot;
+ uint16 hdr_chksum;
+ uint8 src_ip[IPV4_ADDR_LEN];
+ uint8 dst_ip[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+
+#define IPV6_PAYLOAD_LEN_OFFSET 4
+#define IPV6_NEXT_HDR_OFFSET 6
+#define IPV6_HOP_LIMIT_OFFSET 7
+#define IPV6_SRC_IP_OFFSET 8
+#define IPV6_DEST_IP_OFFSET 24
+
+
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+ ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+ (((uint8 *)(ipv6_body))[2] << 8) | \
+ (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+ ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+ ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+ (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN 16
+
+
+#define IP_TOS46(ip_body) \
+ (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+ IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
new file mode 100644
index 000000000000..89c118179159
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
@@ -0,0 +1,442 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface)
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bt_amp_hci.h,v 9.14.8.2 2010-09-10 18:37:47 Exp $
+*/
+
+#ifndef _bt_amp_hci_h
+#define _bt_amp_hci_h
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* AMP HCI CMD packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd {
+ uint16 opcode;
+ uint8 plen;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_cmd_t;
+
+#define HCI_CMD_PREAMBLE_SIZE OFFSETOF(amp_hci_cmd_t, parms)
+#define HCI_CMD_DATA_SIZE 255
+
+/* AMP HCI CMD opcode layout */
+#define HCI_CMD_OPCODE(ogf, ocf) ((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF))
+#define HCI_CMD_OGF(opcode) ((uint8)(((opcode) >> 10) & 0x3F))
+#define HCI_CMD_OCF(opcode) ((opcode) & 0x03FF)
+
+/* AMP HCI command opcodes */
+#define HCI_Read_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0001)
+#define HCI_Reset_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0002)
+#define HCI_Read_Link_Quality HCI_CMD_OPCODE(0x05, 0x0003)
+#define HCI_Read_Local_AMP_Info HCI_CMD_OPCODE(0x05, 0x0009)
+#define HCI_Read_Local_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000A)
+#define HCI_Write_Remote_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000B)
+#define HCI_Create_Physical_Link HCI_CMD_OPCODE(0x01, 0x0035)
+#define HCI_Accept_Physical_Link_Request HCI_CMD_OPCODE(0x01, 0x0036)
+#define HCI_Disconnect_Physical_Link HCI_CMD_OPCODE(0x01, 0x0037)
+#define HCI_Create_Logical_Link HCI_CMD_OPCODE(0x01, 0x0038)
+#define HCI_Accept_Logical_Link HCI_CMD_OPCODE(0x01, 0x0039)
+#define HCI_Disconnect_Logical_Link HCI_CMD_OPCODE(0x01, 0x003A)
+#define HCI_Logical_Link_Cancel HCI_CMD_OPCODE(0x01, 0x003B)
+#define HCI_Flow_Spec_Modify HCI_CMD_OPCODE(0x01, 0x003C)
+#define HCI_Write_Flow_Control_Mode HCI_CMD_OPCODE(0x01, 0x0067)
+#define HCI_Read_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x0069)
+#define HCI_Write_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x006A)
+#define HCI_Short_Range_Mode HCI_CMD_OPCODE(0x01, 0x006B)
+#define HCI_Reset HCI_CMD_OPCODE(0x03, 0x0003)
+#define HCI_Read_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0015)
+#define HCI_Write_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0016)
+#define HCI_Read_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0036)
+#define HCI_Write_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0037)
+#define HCI_Enhanced_Flush HCI_CMD_OPCODE(0x03, 0x005F)
+#define HCI_Read_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0061)
+#define HCI_Write_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0062)
+#define HCI_Set_Event_Mask_Page_2 HCI_CMD_OPCODE(0x03, 0x0063)
+#define HCI_Read_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0064)
+#define HCI_Write_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0065)
+#define HCI_Read_Local_Version_Info HCI_CMD_OPCODE(0x04, 0x0001)
+#define HCI_Read_Local_Supported_Commands HCI_CMD_OPCODE(0x04, 0x0002)
+#define HCI_Read_Buffer_Size HCI_CMD_OPCODE(0x04, 0x0005)
+#define HCI_Read_Data_Block_Size HCI_CMD_OPCODE(0x04, 0x000A)
+
+/* AMP HCI command parameters */
+typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms {
+ uint8 plh;
+ uint8 offset[2]; /* length so far */
+ uint8 max_remote[2];
+} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms {
+ uint8 plh;
+ uint8 offset[2];
+ uint8 len[2];
+ uint8 frag[1];
+} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms {
+ uint8 plh;
+ uint8 key_length;
+ uint8 key_type;
+ uint8 key[1];
+} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms {
+ uint8 plh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms {
+ uint8 plh;
+ uint8 txflow[16];
+ uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec {
+ uint8 id;
+ uint8 service_type;
+ uint8 max_sdu[2];
+ uint8 sdu_ia_time[4];
+ uint8 access_latency[4];
+ uint8 flush_timeout[4];
+} BWL_POST_PACKED_STRUCT ext_flow_spec_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms {
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms {
+ uint8 llh[2];
+ uint8 txflow[16];
+ uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct plh_pad {
+ uint8 plh;
+ uint8 pad;
+} BWL_POST_PACKED_STRUCT plh_pad_t;
+
+typedef BWL_PRE_PACKED_STRUCT union hci_handle {
+ uint16 bredr;
+ plh_pad_t amp;
+} BWL_POST_PACKED_STRUCT hci_handle_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms {
+ hci_handle_t handle;
+ uint8 timeout[2];
+} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms {
+ uint8 llh[2];
+ uint8 befto[4];
+} BWL_POST_PACKED_STRUCT befto_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms {
+ uint8 plh;
+ uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms {
+ uint8 ld_aware;
+ uint8 ld[2];
+ uint8 ld_opts;
+ uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms {
+ uint8 llh[2];
+ uint8 packet_type;
+} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t;
+
+/* Generic AMP extended flow spec service types */
+#define EFS_SVCTYPE_NO_TRAFFIC 0
+#define EFS_SVCTYPE_BEST_EFFORT 1
+#define EFS_SVCTYPE_GUARANTEED 2
+
+/* AMP HCI event packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event {
+ uint8 ecode;
+ uint8 plen;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_event_t;
+
+#define HCI_EVT_PREAMBLE_SIZE OFFSETOF(amp_hci_event_t, parms)
+
+/* AMP HCI event codes */
+#define HCI_Command_Complete 0x0E
+#define HCI_Command_Status 0x0F
+#define HCI_Flush_Occurred 0x11
+#define HCI_Enhanced_Flush_Complete 0x39
+#define HCI_Physical_Link_Complete 0x40
+#define HCI_Channel_Select 0x41
+#define HCI_Disconnect_Physical_Link_Complete 0x42
+#define HCI_Logical_Link_Complete 0x45
+#define HCI_Disconnect_Logical_Link_Complete 0x46
+#define HCI_Flow_Spec_Modify_Complete 0x47
+#define HCI_Number_of_Completed_Data_Blocks 0x48
+#define HCI_Short_Range_Mode_Change_Complete 0x4C
+#define HCI_Status_Change_Event 0x4D
+#define HCI_Vendor_Specific 0xFF
+
+/* AMP HCI event mask bit positions */
+#define HCI_Physical_Link_Complete_Event_Mask 0x0001
+#define HCI_Channel_Select_Event_Mask 0x0002
+#define HCI_Disconnect_Physical_Link_Complete_Event_Mask 0x0004
+#define HCI_Logical_Link_Complete_Event_Mask 0x0020
+#define HCI_Disconnect_Logical_Link_Complete_Event_Mask 0x0040
+#define HCI_Flow_Spec_Modify_Complete_Event_Mask 0x0080
+#define HCI_Number_of_Completed_Data_Blocks_Event_Mask 0x0100
+#define HCI_Short_Range_Mode_Change_Complete_Event_Mask 0x1000
+#define HCI_Status_Change_Event_Mask 0x2000
+#define HCI_All_Event_Mask 0x31e7
+
+/* AMP HCI event parameters */
+typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms {
+ uint8 status;
+ uint8 cmdpkts;
+ uint16 opcode;
+} BWL_POST_PACKED_STRUCT cmd_status_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms {
+ uint8 cmdpkts;
+ uint16 opcode;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT cmd_complete_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms {
+ uint16 handle;
+} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms {
+ uint8 status;
+ uint8 plh;
+} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint16 len;
+ uint8 frag[1];
+} BWL_POST_PACKED_STRUCT read_local_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms {
+ uint8 status;
+ uint8 AMP_status;
+ uint32 bandwidth;
+ uint32 gbandwidth;
+ uint32 latency;
+ uint32 PDU_size;
+ uint8 ctrl_type;
+ uint16 PAL_cap;
+ uint16 AMP_ASSOC_len;
+ uint32 max_flush_timeout;
+ uint32 be_flush_timeout;
+} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms {
+ uint8 status;
+ uint16 llh;
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms {
+ uint8 status;
+ uint16 llh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms {
+ uint8 status;
+ uint16 llh;
+} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms {
+ uint8 status;
+ uint8 plh;
+} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms {
+ uint8 status;
+ hci_handle_t handle;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms {
+ uint8 status;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms {
+ uint8 status;
+ uint16 ACL_pkt_len;
+ uint16 data_block_len;
+ uint16 data_block_num;
+} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct data_blocks {
+ uint16 handle;
+ uint16 pkts;
+ uint16 blocks;
+} BWL_POST_PACKED_STRUCT data_blocks_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms {
+ uint16 num_blocks;
+ uint8 num_handles;
+ data_blocks_t completed[1];
+} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms {
+ uint8 status;
+ uint32 befto;
+} BWL_POST_PACKED_STRUCT befto_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms {
+ uint8 status;
+ uint8 llh[2];
+ uint16 counter;
+} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms {
+ uint8 status;
+ uint8 llh[2];
+} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms {
+ uint8 status;
+ hci_handle_t handle;
+ uint8 link_quality;
+} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms {
+ uint8 status;
+ uint8 ld_aware;
+ uint8 ld[2];
+ uint8 ld_opts;
+ uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms {
+ uint16 handle;
+} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms {
+ uint8 len;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms {
+ uint8 status;
+ uint8 hci_version;
+ uint16 hci_revision;
+ uint8 pal_version;
+ uint16 mfg_name;
+ uint16 pal_subversion;
+} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t;
+
+#define MAX_SUPPORTED_CMD_BYTE 64
+typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms {
+ uint8 status;
+ uint8 cmd[MAX_SUPPORTED_CMD_BYTE];
+} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms {
+ uint8 status;
+ uint8 amp_status;
+} BWL_POST_PACKED_STRUCT status_change_evt_parms_t;
+
+/* AMP HCI error codes */
+#define HCI_SUCCESS 0x00
+#define HCI_ERR_ILLEGAL_COMMAND 0x01
+#define HCI_ERR_NO_CONNECTION 0x02
+#define HCI_ERR_MEMORY_FULL 0x07
+#define HCI_ERR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERR_MAX_NUM_OF_CONNECTIONS 0x09
+#define HCI_ERR_CONNECTION_EXISTS 0x0B
+#define HCI_ERR_CONNECTION_DISALLOWED 0x0C
+#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT 0x10
+#define HCI_ERR_UNSUPPORTED_VALUE 0x11
+#define HCI_ERR_ILLEGAL_PARAMETER_FMT 0x12
+#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST 0x16
+#define HCI_ERR_UNSPECIFIED 0x1F
+#define HCI_ERR_UNIT_KEY_USED 0x26
+#define HCI_ERR_QOS_REJECTED 0x2D
+#define HCI_ERR_PARAM_OUT_OF_RANGE 0x30
+#define HCI_ERR_NO_SUITABLE_CHANNEL 0x39
+#define HCI_ERR_CHANNEL_MOVE 0xFF
+
+/* AMP HCI ACL Data packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data {
+ uint16 handle; /* 12-bit connection handle + 2-bit PB and 2-bit BC flags */
+ uint16 dlen; /* data total length */
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t;
+
+#define HCI_ACL_DATA_PREAMBLE_SIZE OFFSETOF(amp_hci_ACL_data_t, data)
+
+#define HCI_ACL_DATA_BC_FLAGS (0x0 << 14)
+#define HCI_ACL_DATA_PB_FLAGS (0x3 << 12)
+
+#define HCI_ACL_DATA_HANDLE(handle) ((handle) & 0x0fff)
+#define HCI_ACL_DATA_FLAGS(handle) ((handle) >> 12)
+
+/* AMP Activity Report packet formats */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report {
+ uint8 ScheduleKnown;
+ uint8 NumReports;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple {
+ uint32 StartTime;
+ uint32 Duration;
+ uint32 Periodicity;
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t;
+
+#define HCI_AR_SCHEDULE_KNOWN 0x01
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _bt_amp_hci_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
new file mode 100644
index 000000000000..5781d1312e35
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
@@ -0,0 +1,173 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright (C) 2002 Broadcom Corporation
+ *
+ * $Id: eapol.h,v 9.23.86.1 2010-09-02 18:09:39 Exp $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#include <bcmcrypto/aeskeywrap.h>
+
+/* EAPOL for 802.3/Ethernet */
+typedef struct {
+ struct ether_header eth; /* 802.3/Ethernet header */
+ unsigned char version; /* EAPOL protocol version */
+ unsigned char type; /* EAPOL type */
+ unsigned short length; /* Length of body */
+ unsigned char body[1]; /* Body (optional) */
+} eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION 2
+#define WPA_EAPOL_VERSION 1
+#define LEAP_EAPOL_VERSION 1
+#define SES_EAPOL_VERSION 1
+
+/* EAPOL types */
+#define EAP_PACKET 0
+#define EAPOL_START 1
+#define EAPOL_LOGOFF 2
+#define EAPOL_KEY 3
+#define EAPOL_ASF 4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY 1
+#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254 /* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN 8
+#define EAPOL_KEY_IV_LEN 16
+#define EAPOL_KEY_SIG_LEN 16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short length; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */
+ unsigned char index; /* Key Flags & Index */
+ unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */
+ unsigned char key[1]; /* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK 0x80
+#define EAPOL_KEY_BROADCAST 0
+#define EAPOL_KEY_UNICAST 0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK 0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN 8
+#define EAPOL_WPA_KEY_NONCE_LEN 32
+#define EAPOL_WPA_KEY_IV_LEN 16
+#define EAPOL_WPA_KEY_RSC_LEN 8
+#define EAPOL_WPA_KEY_ID_LEN 8
+#define EAPOL_WPA_KEY_MIC_LEN 16
+#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE 32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short key_info; /* Key Information (unaligned) */
+ unsigned short key_len; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */
+ unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */
+ unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */
+ unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+ unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */
+ unsigned short data_len; /* Key Data Length */
+ unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_V1 0x01
+#define WPA_KEY_DESC_V2 0x02
+#define WPA_KEY_DESC_V3 0x03
+#define WPA_KEY_PAIRWISE 0x08
+#define WPA_KEY_INSTALL 0x40
+#define WPA_KEY_ACK 0x80
+#define WPA_KEY_MIC 0x100
+#define WPA_KEY_SECURE 0x200
+#define WPA_KEY_ERROR 0x400
+#define WPA_KEY_REQ 0x800
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0 0x00
+#define WPA_KEY_INDEX_1 0x10
+#define WPA_KEY_INDEX_2 0x20
+#define WPA_KEY_INDEX_3 0x30
+#define WPA_KEY_INDEX_MASK 0x30
+#define WPA_KEY_INDEX_SHIFT 0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA 0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 type;
+ uint8 length;
+ uint8 oui[3];
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK 1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2
+#define WPA2_KEY_DATA_SUBTYPE_MAC 3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID 4
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 flags;
+ uint8 reserved;
+ uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2
+
+#define WPA2_GTK_INDEX_MASK 0x03
+#define WPA2_GTK_INDEX_SHIFT 0x00
+
+#define WPA2_GTK_TRANSMIT 0x04
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 reserved[2];
+ uint8 mac[ETHER_ADDR_LEN];
+ uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD 0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
new file mode 100644
index 000000000000..6a6dd14c1bbb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
@@ -0,0 +1,162 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: ethernet.h,v 9.56 2009-10-15 22:54:58 Exp $
+ */
+
+
+#ifndef _NET_ETHERNET_H_
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define ETHER_ADDR_LEN 6
+
+
+#define ETHER_TYPE_LEN 2
+
+
+#define ETHER_CRC_LEN 4
+
+
+#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+
+#define ETHER_MIN_LEN 64
+
+
+#define ETHER_MIN_DATA 46
+
+
+#define ETHER_MAX_LEN 1518
+
+
+#define ETHER_MAX_DATA 1500
+
+
+#define ETHER_TYPE_MIN 0x0600
+#define ETHER_TYPE_IP 0x0800
+#define ETHER_TYPE_ARP 0x0806
+#define ETHER_TYPE_8021Q 0x8100
+#define ETHER_TYPE_BRCM 0x886c
+#define ETHER_TYPE_802_1X 0x888e
+#define ETHER_TYPE_802_1X_PREAUTH 0x88c7
+#define ETHER_TYPE_WAI 0x88b4
+
+
+
+#define ETHER_BRCM_SUBTYPE_LEN 4
+#define ETHER_BRCM_CRAM 1
+
+
+#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN)
+#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN)
+#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN)
+
+
+#define ETHER_IS_VALID_LEN(foo) \
+ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \
+ ((uint8 *)ea)[0] = 0x01; \
+ ((uint8 *)ea)[1] = 0x00; \
+ ((uint8 *)ea)[2] = 0x5e; \
+ ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \
+ ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \
+ ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \
+}
+
+#ifndef __INCif_etherh
+
+BWL_PRE_PACKED_STRUCT struct ether_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct ether_addr {
+ uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif
+
+
+#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xd))
+#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+
+#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+
+#define ether_cmp(a, b) (!(((short*)a)[0] == ((short*)b)[0]) | \
+ !(((short*)a)[1] == ((short*)b)[1]) | \
+ !(((short*)a)[2] == ((short*)b)[2]))
+
+
+#define ether_copy(s, d) { \
+ ((short*)d)[0] = ((short*)s)[0]; \
+ ((short*)d)[1] = ((short*)s)[1]; \
+ ((short*)d)[2] = ((short*)s)[2]; }
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+
+#define ETHER_ISBCAST(ea) ((((uint8 *)(ea))[0] & \
+ ((uint8 *)(ea))[1] & \
+ ((uint8 *)(ea))[2] & \
+ ((uint8 *)(ea))[3] & \
+ ((uint8 *)(ea))[4] & \
+ ((uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea) ((((uint8 *)(ea))[0] | \
+ ((uint8 *)(ea))[1] | \
+ ((uint8 *)(ea))[2] | \
+ ((uint8 *)(ea))[3] | \
+ ((uint8 *)(ea))[4] | \
+ ((uint8 *)(ea))[5]) == 0)
+
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+ struct ether_header t; \
+ t = *(struct ether_header *)(s); \
+ *(struct ether_header *)(d) = t; \
+} while (0)
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
new file mode 100644
index 000000000000..4a0c9d1ddc37
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
@@ -0,0 +1,512 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * $Id: p2p.h,v 9.17.2.4 2010-12-15 21:41:21 Exp $
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <proto/802.11.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WiFi P2P OUI values */
+#define P2P_OUI WFA_OUI /* WiFi P2P OUI */
+#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */
+
+#define P2P_IE_ID 0xdd /* P2P IE element ID */
+
+/* WiFi P2P IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+ uint8 id; /* IE ID: 0xDD */
+ uint8 len; /* IE length */
+ uint8 OUI[3]; /* WiFi P2P specific OUI: P2P_OUI */
+ uint8 oui_type; /* Identifies P2P version: P2P_VER */
+ uint8 subelts[1]; /* variable length subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN 6
+
+#define P2P_ATTR_ID_OFF 0
+#define P2P_ATTR_LEN_OFF 1
+#define P2P_ATTR_DATA_OFF 3
+
+#define P2P_ATTR_HDR_LEN 3 /* ID + 2-byte length field spec 1.02 */
+
+/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */
+#define P2P_SEID_STATUS 0 /* Status */
+#define P2P_SEID_MINOR_RC 1 /* Minor Reason Code */
+#define P2P_SEID_P2P_INFO 2 /* P2P Capability (capabilities info) */
+#define P2P_SEID_DEV_ID 3 /* P2P Device ID */
+#define P2P_SEID_INTENT 4 /* Group Owner Intent */
+#define P2P_SEID_CFG_TIMEOUT 5 /* Configuration Timeout */
+#define P2P_SEID_CHANNEL 6 /* Channel */
+#define P2P_SEID_GRP_BSSID 7 /* P2P Group BSSID */
+#define P2P_SEID_XT_TIMING 8 /* Extended Listen Timing */
+#define P2P_SEID_INTINTADDR 9 /* Intended P2P Interface Address */
+#define P2P_SEID_P2P_MGBTY 10 /* P2P Manageability */
+#define P2P_SEID_CHAN_LIST 11 /* Channel List */
+#define P2P_SEID_ABSENCE 12 /* Notice of Absence */
+#define P2P_SEID_DEV_INFO 13 /* Device Info */
+#define P2P_SEID_GROUP_INFO 14 /* Group Info */
+#define P2P_SEID_GROUP_ID 15 /* Group ID */
+#define P2P_SEID_P2P_IF 16 /* P2P Interface */
+#define P2P_SEID_VNDR 221 /* Vendor-specific subelement */
+
+#define P2P_SE_VS_ID_SERVICES 0x1b /* BRCM proprietary subel: L2 Services */
+
+
+/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 dev; /* Device Capability Bitmap */
+ uint8 group; /* Group Capability Bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+/* P2P Capability subelement's Device Capability Bitmap bit values */
+#define P2P_CAPSE_DEV_SERVICE_DIS 0x1 /* Service Discovery */
+#define P2P_CAPSE_DEV_CLIENT_DIS 0x2 /* Client Discoverability */
+#define P2P_CAPSE_DEV_CONCURRENT 0x4 /* Concurrent Operation */
+#define P2P_CAPSE_DEV_INFRA_MAN 0x8 /* P2P Infrastructure Managed */
+#define P2P_CAPSE_DEV_LIMIT 0x10 /* P2P Device Limit */
+#define P2P_CAPSE_INVITE_PROC 0x20 /* P2P Invitation Procedure */
+
+/* P2P Capability subelement's Group Capability Bitmap bit values */
+#define P2P_CAPSE_GRP_OWNER 0x1 /* P2P Group Owner */
+#define P2P_CAPSE_PERSIST_GRP 0x2 /* Persistent P2P Group */
+#define P2P_CAPSE_GRP_LIMIT 0x4 /* P2P Group Limit */
+#define P2P_CAPSE_GRP_INTRA_BSS 0x8 /* Intra-BSS Distribution */
+#define P2P_CAPSE_GRP_X_CONNECT 0x10 /* Cross Connection */
+#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */
+#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */
+
+
+/* WiFi P2P IE subelement: Group Owner Intent */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_INTENT */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 intent; /* Intent Value 0...15 (0=legacy 15=master only) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+/* WiFi P2P IE subelement: Configuration Timeout */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_CFG_TIMEOUT */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 go_tmo; /* GO config timeout in units of 10 ms */
+ uint8 client_tmo; /* Client config timeout in units of 10 ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+
+/* WiFi P2P IE subelement: Status */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_STATUS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 status; /* Status Code: P2P_STATSE_* */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+/* Status subelement Status Code definitions */
+#define P2P_STATSE_SUCCESS 0
+ /* Success */
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1
+ /* Failed, information currently unavailable */
+#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+ /* Old name for above in P2P spec 1.08 and older */
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2
+ /* Failed, incompatible parameters */
+#define P2P_STATSE_FAIL_LIMIT_REACHED 3
+ /* Failed, limit reached */
+#define P2P_STATSE_FAIL_INVALID_PARAMS 4
+ /* Failed, invalid parameters */
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5
+ /* Failed, unable to accomodate request */
+#define P2P_STATSE_FAIL_PROTO_ERROR 6
+ /* Failed, previous protocol error or disruptive behaviour */
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7
+ /* Failed, no common channels */
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8
+ /* Failed, unknown P2P Group */
+#define P2P_STATSE_FAIL_INTENT 9
+ /* Failed, both peers indicated Intent 15 in GO Negotiation */
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10
+ /* Failed, incompatible provisioning method */
+#define P2P_STATSE_FAIL_USER_REJECT 11
+ /* Failed, rejected by user */
+
+/* WiFi P2P IE attribute: Extended Listen Timing */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+ uint8 eltId; /* ID: P2P_SEID_EXT_TIMING */
+ uint8 len[2]; /* length not including eltId, len fields */
+ uint8 avail[2]; /* availibility period */
+ uint8 interval[2]; /* availibility interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN 10 /* minimum 10ms */
+
+/* WiFi P2P IE subelement: Intended P2P Interface Address */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_INTINTADDR */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* intended P2P interface MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+/* WiFi P2P IE subelement: Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_STATUS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 band; /* Regulatory Class (band) */
+ uint8 channel; /* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+
+/* Channel Entry structure within the Channel List SE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+ uint8 band; /* Regulatory Class (band) */
+ uint8 num_channels; /* # of channels in the channel list */
+ uint8 channels[WL_NUMCHANNELS]; /* Channel List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+/* WiFi P2P IE subelement: Channel List */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_STATUS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 country[3]; /* Country String */
+ uint8 num_entries; /* # of channel entries */
+ wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+ /* Channel Entry List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+/* WiFi P2P IE's Device Info subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_DEVINFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P Device MAC address */
+ uint16 wps_cfg_meths; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+ uint8 pri_devtype[8]; /* Primary Device Type */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN 8
+
+/* WiFi P2P IE's Group Info subelement Client Info Descriptor */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+ uint8 len;
+ uint8 devaddr[ETHER_ADDR_LEN]; /* P2P Device Address */
+ uint8 ifaddr[ETHER_ADDR_LEN]; /* P2P Interface Address */
+ uint8 devcap; /* Device Capability */
+ uint8 cfg_meths[2]; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+ uint8 pridt[P2P_DEV_TYPE_LEN]; /* Primary Device Type */
+ uint8 secdts; /* Number of Secondary Device Types */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+/* WiFi P2P IE's Device ID subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ struct ether_addr addr; /* P2P Device MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+/* WiFi P2P IE subelement: P2P Manageability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_P2P_MGBTY */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mg_bitmap; /* manageability bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+/* mg_bitmap field bit values */
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1 /* AP supports Managed P2P Device */
+
+/* WiFi P2P IE subelement: Group Info */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_GROUP_INFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+
+/* WiFi P2P Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+ uint8 category; /* P2P_AF_CATEGORY */
+ uint8 OUI[3]; /* OUI - P2P_OUI */
+ uint8 type; /* OUI Type - P2P_VER */
+ uint8 subtype; /* OUI Subtype - P2P_AF_* */
+ uint8 dialog_token; /* nonzero, identifies req/resp tranaction */
+ uint8 elts[1]; /* Variable length information elements. Max size =
+ * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY 0x7f
+
+#define P2P_AF_FIXED_LEN 7
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */
+
+
+/* WiFi P2P Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+ uint8 category; /* P2P_PUB_AF_CATEGORY */
+ uint8 action; /* P2P_PUB_AF_ACTION */
+ uint8 oui[3]; /* P2P_OUI */
+ uint8 oui_type; /* OUI type - P2P_VER */
+ uint8 subtype; /* OUI subtype - P2P_TYPE_* */
+ uint8 dialog_token; /* nonzero, identifies req/rsp transaction */
+ uint8 elts[1]; /* Variable length information elements. Max size =
+ * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN 8
+#define P2P_PUB_AF_CATEGORY 0x04
+#define P2P_PUB_AF_ACTION 0x09
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Request */
+
+/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */
+#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF
+
+/* WiFi P2P IE subelement: Notice of Absence */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+ uint8 cnt_type; /* Count/Type */
+ uint32 duration; /* Duration */
+ uint32 interval; /* Interval */
+ uint32 start; /* Start Time */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+ uint8 eltId; /* Subelement ID */
+ uint8 len[2]; /* Length */
+ uint8 index; /* Index */
+ uint8 ops_ctw_parms; /* CTWindow and OppPS Parameters */
+ wifi_p2p_noa_desc_t desc[1]; /* Notice of Absence Descriptor(s) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN 5
+
+/* cnt_type field values */
+#define P2P_NOA_DESC_CNT_RESERVED 0 /* reserved and should not be used */
+#define P2P_NOA_DESC_CNT_REPEAT 255 /* continuous schedule */
+#define P2P_NOA_DESC_TYPE_PREFERRED 1 /* preferred values */
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2 /* acceptable limits */
+
+/* ctw_ops_parms field values */
+#define P2P_NOA_CTW_MASK 0x7f
+#define P2P_NOA_OPS_MASK 0x80
+#define P2P_NOA_OPS_SHIFT 7
+
+#define P2P_CTW_MIN 10 /* minimum 10TU */
+
+/*
+ * P2P Service Discovery related
+ */
+#define P2PSD_ACTION_CATEGORY 0x04
+ /* Public action frame */
+#define P2PSD_ACTION_ID_GAS_IREQ 0x0a
+ /* Action value for GAS Initial Request AF */
+#define P2PSD_ACTION_ID_GAS_IRESP 0x0b
+ /* Action value for GAS Initial Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c
+ /* Action value for GAS Comback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d
+ /* Action value for GAS Comback Response AF */
+#define P2PSD_AD_EID 0x6c
+ /* Advertisement Protocol IE ID */
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00
+ /* Query Response Length Limit 7 bits plus PAME-BI 1 bit */
+#define P2PSD_ADP_PROTO_ID 0x00
+ /* Advertisement Protocol ID. Always 0 for P2P SD */
+#define P2PSD_GAS_OUI P2P_OUI
+ /* WFA OUI */
+#define P2PSD_GAS_OUI_SUBTYPE P2P_VER
+ /* OUI Subtype for GAS IE */
+#define P2PSD_GAS_NQP_INFOID 0xDDDD
+ /* NQP Query Info ID: 56797 */
+#define P2PSD_GAS_COMEBACKDEALY 0x00
+ /* Not used in the Native GAS protocol */
+
+/* Service Protocol Type */
+typedef enum p2psd_svc_protype {
+ SVC_RPOTYPE_ALL = 0,
+ SVC_RPOTYPE_BONJOUR = 1,
+ SVC_RPOTYPE_UPNP = 2,
+ SVC_RPOTYPE_WSD = 3,
+ SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+/* Service Discovery response status code */
+typedef enum {
+ P2PSD_RESP_STATUS_SUCCESS = 0,
+ P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+ P2PSD_RESP_STATUS_DATA_NA = 2,
+ P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+/* Advertisement Protocol IE tuple field */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+ uint8 llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus
+ * Pre-Associated Message Exchange BSSID Independent bit 7, set to 0
+ */
+ uint8 adp_id; /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+/* Advertisement Protocol IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+ uint8 id; /* IE ID: 0x6c - 108 */
+ uint8 len; /* IE length */
+ wifi_p2psd_adp_tpl_t adp_tpl; /* Advertisement Protocol Tuple field. Only one
+ * tuple is defined for P2P Service Discovery
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+/* NQP Vendor-specific Content */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+ uint8 oui_subtype; /* OUI Subtype: 0x09 */
+ uint16 svc_updi; /* Service Update Indicator */
+ uint8 svc_tlvs[1]; /* wifi_p2psd_qreq_tlv_t type for service request,
+ * wifi_p2psd_qresp_tlv_t type for service response
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+/* Service Request TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+ uint16 len; /* Length: 5 plus size of Query Data */
+ uint8 svc_prot; /* Service Protocol Type */
+ uint8 svc_tscid; /* Service Transaction ID */
+ uint8 query_data[1]; /* Query Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+/* Query Request Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+ uint16 info_id; /* Info ID: 0xDDDD */
+ uint16 len; /* Length of service request TLV, 5 plus the size of request data */
+ uint8 oui[3]; /* WFA OUI: 0x0050F2 */
+ uint8 qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qreq_len; /* Query Request Length */
+ uint8 qreq_frm[1]; /* Query Request Frame wifi_p2psd_qreq_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+/* Service Response TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+ uint16 len; /* Length: 5 plus size of Query Data */
+ uint8 svc_prot; /* Service Protocol Type */
+ uint8 svc_tscid; /* Service Transaction ID */
+ uint8 status; /* Value defined in Table 57 of P2P spec. */
+ uint8 query_data[1]; /* Response Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+/* Query Response Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+ uint16 info_id; /* Info ID: 0xDDDD */
+ uint16 len; /* Lenth of service response TLV, 6 plus the size of resp data */
+ uint8 oui[3]; /* WFA OUI: 0x0050F2 */
+ uint8 qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+ uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */
+ uint16 cb_delay; /* GAS Comeback Delay */
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qresp_len; /* Query Response Length */
+ uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+ uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */
+ uint8 fragment_id; /* Fragmentation ID */
+ uint16 cb_delay; /* GAS Comeback Delay */
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qresp_len; /* Query Response Length */
+ uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+/* Wi-Fi GAS Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+ uint8 category; /* 0x04 Public Action Frame */
+ uint8 action; /* 0x6c Advertisement Protocol */
+ uint8 dialog_token; /* nonzero, identifies req/rsp transaction */
+ uint8 query_data[1]; /* Query Data. wifi_p2psd_gas_ireq_frame_t
+ * or wifi_p2psd_gas_iresp_frame_t format
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _P2P_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
new file mode 100644
index 000000000000..7fe4fbce310e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
@@ -0,0 +1,76 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdspi.h,v 9.2.120.1 2010-11-15 17:56:25 Exp $
+ */
+
+#ifndef _SD_SPI_H
+#define _SD_SPI_H
+
+#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */
+#define SPI_START_S 31
+#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */
+#define SPI_DIR_S 30
+#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S 24
+#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */
+#define SPI_RW_S 23
+#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */
+#define SPI_FUNC_S 20
+#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */
+#define SPI_RAW_S 19
+#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */
+#define SPI_STUFF_S 18
+#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */
+#define SPI_BLKMODE_S 19
+#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */
+#define SPI_OPCODE_S 18
+#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */
+#define SPI_ADDR_S 1
+#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */
+#define SPI_STUFF0_S 0
+
+#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */
+#define SPI_RSP_START_S 7
+#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */
+#define SPI_RSP_PARAM_ERR_S 6
+#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */
+#define SPI_RSP_RFU5_S 5
+#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */
+#define SPI_RSP_FUNC_ERR_S 4
+#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */
+#define SPI_RSP_CRC_ERR_S 3
+#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */
+#define SPI_RSP_ILL_CMD_S 2
+#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */
+#define SPI_RSP_RFU1_S 1
+#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */
+#define SPI_RSP_IDLE_S 0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */
+#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK 0x80
+
+#endif /* _SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
new file mode 100644
index 000000000000..07fa7e499c23
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
@@ -0,0 +1,70 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: vlan.h,v 9.7 2009-03-13 01:11:50 Exp $
+ */
+
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define VLAN_VID_MASK 0xfff
+#define VLAN_CFI_SHIFT 12
+#define VLAN_PRI_SHIFT 13
+
+#define VLAN_PRI_MASK 7
+
+#define VLAN_TAG_LEN 4
+#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN)
+
+#define VLAN_TPID 0x8100
+
+struct ethervlan_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 vlan_type;
+ uint16 vlan_tag;
+ uint16 ether_type;
+};
+
+#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+ struct ethervlan_header t; \
+ t = *(struct ethervlan_header *)(s); \
+ *(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
new file mode 100644
index 000000000000..1ff06dc79423
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
@@ -0,0 +1,160 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wpa.h,v 1.19 2009-07-13 08:29:58 Exp $
+ */
+
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+
+#include <packed_section_start.h>
+
+
+
+
+#define DOT11_RC_INVALID_WPA_IE 13
+#define DOT11_RC_MIC_FAILURE 14
+#define DOT11_RC_4WH_TIMEOUT 15
+#define DOT11_RC_GTK_UPDATE_TIMEOUT 16
+#define DOT11_RC_WPA_IE_MISMATCH 17
+#define DOT11_RC_INVALID_MC_CIPHER 18
+#define DOT11_RC_INVALID_UC_CIPHER 19
+#define DOT11_RC_INVALID_AKMP 20
+#define DOT11_RC_BAD_WPA_VERSION 21
+#define DOT11_RC_INVALID_WPA_CAP 22
+#define DOT11_RC_8021X_AUTH_FAIL 23
+
+#define WPA2_PMKID_LEN 16
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 tag;
+ uint8 length;
+ uint8 oui[3];
+ uint8 oui_type;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version;
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN 4
+#define WPA_IE_FIXED_LEN 8
+#define WPA_IE_TAG_FIXED_LEN 6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 tag;
+ uint8 length;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version;
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN 4
+#define WPA_RSN_IE_TAG_FIXED_LEN 2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 oui[3];
+ uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN 4
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN 2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+
+#define WPA_CIPHER_NONE 0
+#define WPA_CIPHER_WEP_40 1
+#define WPA_CIPHER_TKIP 2
+#define WPA_CIPHER_AES_OCB 3
+#define WPA_CIPHER_AES_CCM 4
+#define WPA_CIPHER_WEP_104 5
+
+
+#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
+ (cipher) == WPA_CIPHER_WEP_40 || \
+ (cipher) == WPA_CIPHER_WEP_104 || \
+ (cipher) == WPA_CIPHER_TKIP || \
+ (cipher) == WPA_CIPHER_AES_OCB || \
+ (cipher) == WPA_CIPHER_AES_CCM)
+
+
+#define WPA_TKIP_CM_DETECT 60
+#define WPA_TKIP_CM_BLOCK 60
+
+
+#define RSN_CAP_LEN 2
+
+
+#define RSN_CAP_PREAUTH 0x0001
+#define RSN_CAP_NOPAIRWISE 0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4
+#define RSN_CAP_1_REPLAY_CNTR 0
+#define RSN_CAP_2_REPLAY_CNTRS 1
+#define RSN_CAP_4_REPLAY_CNTRS 2
+#define RSN_CAP_16_REPLAY_CNTRS 3
+
+
+#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+
+#define WPA_CAP_LEN RSN_CAP_LEN
+
+#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h
new file mode 100644
index 000000000000..cbd37490f1cb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h
@@ -0,0 +1,1615 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h,v 13.169.2.14 2011-02-10 23:43:55 Exp $
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ */
+
+
+#ifndef _SBCHIPC_H
+#define _SBCHIPC_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+typedef volatile struct {
+ uint32 chipid;
+ uint32 capabilities;
+ uint32 corecontrol;
+ uint32 bist;
+
+
+ uint32 otpstatus;
+ uint32 otpcontrol;
+ uint32 otpprog;
+ uint32 otplayout;
+
+
+ uint32 intstatus;
+ uint32 intmask;
+
+
+ uint32 chipcontrol;
+ uint32 chipstatus;
+
+
+ uint32 jtagcmd;
+ uint32 jtagir;
+ uint32 jtagdr;
+ uint32 jtagctrl;
+
+
+ uint32 flashcontrol;
+ uint32 flashaddress;
+ uint32 flashdata;
+ uint32 PAD[1];
+
+
+ uint32 broadcastaddress;
+ uint32 broadcastdata;
+
+
+ uint32 gpiopullup;
+ uint32 gpiopulldown;
+ uint32 gpioin;
+ uint32 gpioout;
+ uint32 gpioouten;
+ uint32 gpiocontrol;
+ uint32 gpiointpolarity;
+ uint32 gpiointmask;
+
+
+ uint32 gpioevent;
+ uint32 gpioeventintmask;
+
+
+ uint32 watchdog;
+
+
+ uint32 gpioeventintpolarity;
+
+
+ uint32 gpiotimerval;
+ uint32 gpiotimeroutmask;
+
+
+ uint32 clockcontrol_n;
+ uint32 clockcontrol_sb;
+ uint32 clockcontrol_pci;
+ uint32 clockcontrol_m2;
+ uint32 clockcontrol_m3;
+ uint32 clkdiv;
+ uint32 gpiodebugsel;
+ uint32 capabilities_ext;
+
+
+ uint32 pll_on_delay;
+ uint32 fref_sel_delay;
+ uint32 slow_clk_ctl;
+ uint32 PAD;
+
+
+ uint32 system_clk_ctl;
+ uint32 clkstatestretch;
+ uint32 PAD[2];
+
+
+ uint32 bp_addrlow;
+ uint32 bp_addrhigh;
+ uint32 bp_data;
+ uint32 PAD;
+ uint32 bp_indaccess;
+
+ uint32 gsioctrl;
+ uint32 gsioaddress;
+ uint32 gsiodata;
+
+
+ uint32 clkdiv2;
+ uint32 PAD[2];
+
+
+ uint32 eromptr;
+
+
+ uint32 pcmcia_config;
+ uint32 pcmcia_memwait;
+ uint32 pcmcia_attrwait;
+ uint32 pcmcia_iowait;
+ uint32 ide_config;
+ uint32 ide_memwait;
+ uint32 ide_attrwait;
+ uint32 ide_iowait;
+ uint32 prog_config;
+ uint32 prog_waitcount;
+ uint32 flash_config;
+ uint32 flash_waitcount;
+ uint32 PAD[4];
+ uint32 PAD[40];
+
+
+
+ uint32 clk_ctl_st;
+ uint32 hw_war;
+ uint32 PAD[70];
+
+
+ uint8 uart0data;
+ uint8 uart0imr;
+ uint8 uart0fcr;
+ uint8 uart0lcr;
+ uint8 uart0mcr;
+ uint8 uart0lsr;
+ uint8 uart0msr;
+ uint8 uart0scratch;
+ uint8 PAD[248];
+
+ uint8 uart1data;
+ uint8 uart1imr;
+ uint8 uart1fcr;
+ uint8 uart1lcr;
+ uint8 uart1mcr;
+ uint8 uart1lsr;
+ uint8 uart1msr;
+ uint8 uart1scratch;
+ uint32 PAD[126];
+
+
+
+ uint32 pmucontrol;
+ uint32 pmucapabilities;
+ uint32 pmustatus;
+ uint32 res_state;
+ uint32 res_pending;
+ uint32 pmutimer;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+ uint32 res_table_sel;
+ uint32 res_dep_mask;
+ uint32 res_updn_timer;
+ uint32 res_timer;
+ uint32 clkstretch;
+ uint32 pmuwatchdog;
+ uint32 gpiosel;
+ uint32 gpioenable;
+ uint32 res_req_timer_sel;
+ uint32 res_req_timer;
+ uint32 res_req_mask;
+ uint32 PAD;
+ uint32 chipcontrol_addr;
+ uint32 chipcontrol_data;
+ uint32 regcontrol_addr;
+ uint32 regcontrol_data;
+ uint32 pllcontrol_addr;
+ uint32 pllcontrol_data;
+ uint32 pmustrapopt;
+ uint32 pmu_xtalfreq;
+ uint32 PAD[100];
+ uint16 sromotp[768];
+} chipcregs_t;
+
+#endif
+
+
+#define CC_CHIPID 0
+#define CC_CAPABILITIES 4
+#define CC_CHIPST 0x2c
+#define CC_EROMPTR 0xfc
+
+
+#define CC_OTPST 0x10
+#define CC_JTAGCMD 0x30
+#define CC_JTAGIR 0x34
+#define CC_JTAGDR 0x38
+#define CC_JTAGCTRL 0x3c
+#define CC_GPIOPU 0x58
+#define CC_GPIOPD 0x5c
+#define CC_GPIOIN 0x60
+#define CC_GPIOOUT 0x64
+#define CC_GPIOOUTEN 0x68
+#define CC_GPIOCTRL 0x6c
+#define CC_GPIOPOL 0x70
+#define CC_GPIOINTM 0x74
+#define CC_WATCHDOG 0x80
+#define CC_CLKC_N 0x90
+#define CC_CLKC_M0 0x94
+#define CC_CLKC_M1 0x98
+#define CC_CLKC_M2 0x9c
+#define CC_CLKC_M3 0xa0
+#define CC_CLKDIV 0xa4
+#define CC_SYS_CLK_CTL 0xc0
+#define CC_CLK_CTL_ST SI_CLK_CTL_ST
+#define PMU_CTL 0x600
+#define PMU_CAP 0x604
+#define PMU_ST 0x608
+#define PMU_RES_STATE 0x60c
+#define PMU_TIMER 0x614
+#define PMU_MIN_RES_MASK 0x618
+#define PMU_MAX_RES_MASK 0x61c
+#define CC_CHIPCTL_ADDR 0x650
+#define CC_CHIPCTL_DATA 0x654
+#define PMU_REG_CONTROL_ADDR 0x658
+#define PMU_REG_CONTROL_DATA 0x65C
+#define PMU_PLL_CONTROL_ADDR 0x660
+#define PMU_PLL_CONTROL_DATA 0x664
+#define CC_SROM_OTP 0x800
+
+
+#define CID_ID_MASK 0x0000ffff
+#define CID_REV_MASK 0x000f0000
+#define CID_REV_SHIFT 16
+#define CID_PKG_MASK 0x00f00000
+#define CID_PKG_SHIFT 20
+#define CID_CC_MASK 0x0f000000
+#define CID_CC_SHIFT 24
+#define CID_TYPE_MASK 0xf0000000
+#define CID_TYPE_SHIFT 28
+
+
+#define CC_CAP_UARTS_MASK 0x00000003
+#define CC_CAP_MIPSEB 0x00000004
+#define CC_CAP_UCLKSEL 0x00000018
+#define CC_CAP_UINTCLK 0x00000008
+#define CC_CAP_UARTGPIO 0x00000020
+#define CC_CAP_EXTBUS_MASK 0x000000c0
+#define CC_CAP_EXTBUS_NONE 0x00000000
+#define CC_CAP_EXTBUS_FULL 0x00000040
+#define CC_CAP_EXTBUS_PROG 0x00000080
+#define CC_CAP_FLASH_MASK 0x00000700
+#define CC_CAP_PLL_MASK 0x00038000
+#define CC_CAP_PWR_CTL 0x00040000
+#define CC_CAP_OTPSIZE 0x00380000
+#define CC_CAP_OTPSIZE_SHIFT 19
+#define CC_CAP_OTPSIZE_BASE 5
+#define CC_CAP_JTAGP 0x00400000
+#define CC_CAP_ROM 0x00800000
+#define CC_CAP_BKPLN64 0x08000000
+#define CC_CAP_PMU 0x10000000
+#define CC_CAP_ECI 0x20000000
+#define CC_CAP_SROM 0x40000000
+#define CC_CAP_NFLASH 0x80000000
+
+#define CC_CAP2_SECI 0x00000001
+#define CC_CAP2_GSIO 0x00000002
+
+
+#define CC_CAP_EXT_SECI_PRESENT 0x00000001
+
+
+#define PLL_NONE 0x00000000
+#define PLL_TYPE1 0x00010000
+#define PLL_TYPE2 0x00020000
+#define PLL_TYPE3 0x00030000
+#define PLL_TYPE4 0x00008000
+#define PLL_TYPE5 0x00018000
+#define PLL_TYPE6 0x00028000
+#define PLL_TYPE7 0x00038000
+
+
+#define ILP_CLOCK 32000
+
+
+#define ALP_CLOCK 20000000
+
+
+#define HT_CLOCK 80000000
+
+
+#define CC_UARTCLKO 0x00000001
+#define CC_SE 0x00000002
+#define CC_ASYNCGPIO 0x00000004
+#define CC_UARTCLKEN 0x00000008
+
+
+#define CHIPCTRL_4321A0_DEFAULT 0x3a4
+#define CHIPCTRL_4321A1_DEFAULT 0x0a4
+#define CHIPCTRL_4321_PLL_DOWN 0x800000
+
+
+#define OTPS_OL_MASK 0x000000ff
+#define OTPS_OL_MFG 0x00000001
+#define OTPS_OL_OR1 0x00000002
+#define OTPS_OL_OR2 0x00000004
+#define OTPS_OL_GU 0x00000008
+#define OTPS_GUP_MASK 0x00000f00
+#define OTPS_GUP_SHIFT 8
+#define OTPS_GUP_HW 0x00000100
+#define OTPS_GUP_SW 0x00000200
+#define OTPS_GUP_CI 0x00000400
+#define OTPS_GUP_FUSE 0x00000800
+#define OTPS_READY 0x00001000
+#define OTPS_RV(x) (1 << (16 + (x)))
+#define OTPS_RV_MASK 0x0fff0000
+
+
+#define OTPC_PROGSEL 0x00000001
+#define OTPC_PCOUNT_MASK 0x0000000e
+#define OTPC_PCOUNT_SHIFT 1
+#define OTPC_VSEL_MASK 0x000000f0
+#define OTPC_VSEL_SHIFT 4
+#define OTPC_TMM_MASK 0x00000700
+#define OTPC_TMM_SHIFT 8
+#define OTPC_ODM 0x00000800
+#define OTPC_PROGEN 0x80000000
+
+
+#define OTPP_COL_MASK 0x000000ff
+#define OTPP_COL_SHIFT 0
+#define OTPP_ROW_MASK 0x0000ff00
+#define OTPP_ROW_SHIFT 8
+#define OTPP_OC_MASK 0x0f000000
+#define OTPP_OC_SHIFT 24
+#define OTPP_READERR 0x10000000
+#define OTPP_VALUE_MASK 0x20000000
+#define OTPP_VALUE_SHIFT 29
+#define OTPP_START_BUSY 0x80000000
+#define OTPP_READ 0x40000000
+
+
+#define OTP_CISFORMAT_NEW 0x80000000
+
+
+#define OTPPOC_READ 0
+#define OTPPOC_BIT_PROG 1
+#define OTPPOC_VERIFY 3
+#define OTPPOC_INIT 4
+#define OTPPOC_SET 5
+#define OTPPOC_RESET 6
+#define OTPPOC_OCST 7
+#define OTPPOC_ROW_LOCK 8
+#define OTPPOC_PRESCN_TEST 9
+
+
+
+#define JTAGM_CREV_OLD 10
+#define JTAGM_CREV_IRP 22
+#define JTAGM_CREV_RTI 28
+
+
+#define JCMD_START 0x80000000
+#define JCMD_BUSY 0x80000000
+#define JCMD_STATE_MASK 0x60000000
+#define JCMD_STATE_TLR 0x00000000
+#define JCMD_STATE_PIR 0x20000000
+#define JCMD_STATE_PDR 0x40000000
+#define JCMD_STATE_RTI 0x60000000
+#define JCMD0_ACC_MASK 0x0000f000
+#define JCMD0_ACC_IRDR 0x00000000
+#define JCMD0_ACC_DR 0x00001000
+#define JCMD0_ACC_IR 0x00002000
+#define JCMD0_ACC_RESET 0x00003000
+#define JCMD0_ACC_IRPDR 0x00004000
+#define JCMD0_ACC_PDR 0x00005000
+#define JCMD0_IRW_MASK 0x00000f00
+#define JCMD_ACC_MASK 0x000f0000
+#define JCMD_ACC_IRDR 0x00000000
+#define JCMD_ACC_DR 0x00010000
+#define JCMD_ACC_IR 0x00020000
+#define JCMD_ACC_RESET 0x00030000
+#define JCMD_ACC_IRPDR 0x00040000
+#define JCMD_ACC_PDR 0x00050000
+#define JCMD_ACC_PIR 0x00060000
+#define JCMD_ACC_IRDR_I 0x00070000
+#define JCMD_ACC_DR_I 0x00080000
+#define JCMD_IRW_MASK 0x00001f00
+#define JCMD_IRW_SHIFT 8
+#define JCMD_DRW_MASK 0x0000003f
+
+
+#define JCTRL_FORCE_CLK 4
+#define JCTRL_EXT_EN 2
+#define JCTRL_EN 1
+
+
+#define CLKD_SFLASH 0x0f000000
+#define CLKD_SFLASH_SHIFT 24
+#define CLKD_OTP 0x000f0000
+#define CLKD_OTP_SHIFT 16
+#define CLKD_JTAG 0x00000f00
+#define CLKD_JTAG_SHIFT 8
+#define CLKD_UART 0x000000ff
+
+#define CLKD2_SROM 0x00000003
+
+
+#define CI_GPIO 0x00000001
+#define CI_EI 0x00000002
+#define CI_TEMP 0x00000004
+#define CI_SIRQ 0x00000008
+#define CI_ECI 0x00000010
+#define CI_PMU 0x00000020
+#define CI_UART 0x00000040
+#define CI_WDRESET 0x80000000
+
+
+#define SCC_SS_MASK 0x00000007
+#define SCC_SS_LPO 0x00000000
+#define SCC_SS_XTAL 0x00000001
+#define SCC_SS_PCI 0x00000002
+#define SCC_LF 0x00000200
+#define SCC_LP 0x00000400
+#define SCC_FS 0x00000800
+#define SCC_IP 0x00001000
+#define SCC_XC 0x00002000
+#define SCC_XP 0x00004000
+#define SCC_CD_MASK 0xffff0000
+#define SCC_CD_SHIFT 16
+
+
+#define SYCC_IE 0x00000001
+#define SYCC_AE 0x00000002
+#define SYCC_FP 0x00000004
+#define SYCC_AR 0x00000008
+#define SYCC_HR 0x00000010
+#define SYCC_CD_MASK 0xffff0000
+#define SYCC_CD_SHIFT 16
+
+
+#define BPIA_BYTEEN 0x0000000f
+#define BPIA_SZ1 0x00000001
+#define BPIA_SZ2 0x00000003
+#define BPIA_SZ4 0x00000007
+#define BPIA_SZ8 0x0000000f
+#define BPIA_WRITE 0x00000100
+#define BPIA_START 0x00000200
+#define BPIA_BUSY 0x00000200
+#define BPIA_ERROR 0x00000400
+
+
+#define CF_EN 0x00000001
+#define CF_EM_MASK 0x0000000e
+#define CF_EM_SHIFT 1
+#define CF_EM_FLASH 0
+#define CF_EM_SYNC 2
+#define CF_EM_PCMCIA 4
+#define CF_DS 0x00000010
+#define CF_BS 0x00000020
+#define CF_CD_MASK 0x000000c0
+#define CF_CD_SHIFT 6
+#define CF_CD_DIV2 0x00000000
+#define CF_CD_DIV3 0x00000040
+#define CF_CD_DIV4 0x00000080
+#define CF_CE 0x00000100
+#define CF_SB 0x00000200
+
+
+#define PM_W0_MASK 0x0000003f
+#define PM_W1_MASK 0x00001f00
+#define PM_W1_SHIFT 8
+#define PM_W2_MASK 0x001f0000
+#define PM_W2_SHIFT 16
+#define PM_W3_MASK 0x1f000000
+#define PM_W3_SHIFT 24
+
+
+#define PA_W0_MASK 0x0000003f
+#define PA_W1_MASK 0x00001f00
+#define PA_W1_SHIFT 8
+#define PA_W2_MASK 0x001f0000
+#define PA_W2_SHIFT 16
+#define PA_W3_MASK 0x1f000000
+#define PA_W3_SHIFT 24
+
+
+#define PI_W0_MASK 0x0000003f
+#define PI_W1_MASK 0x00001f00
+#define PI_W1_SHIFT 8
+#define PI_W2_MASK 0x001f0000
+#define PI_W2_SHIFT 16
+#define PI_W3_MASK 0x1f000000
+#define PI_W3_SHIFT 24
+
+
+#define PW_W0_MASK 0x0000001f
+#define PW_W1_MASK 0x00001f00
+#define PW_W1_SHIFT 8
+#define PW_W2_MASK 0x001f0000
+#define PW_W2_SHIFT 16
+#define PW_W3_MASK 0x1f000000
+#define PW_W3_SHIFT 24
+
+#define PW_W0 0x0000000c
+#define PW_W1 0x00000a00
+#define PW_W2 0x00020000
+#define PW_W3 0x01000000
+
+
+#define FW_W0_MASK 0x0000003f
+#define FW_W1_MASK 0x00001f00
+#define FW_W1_SHIFT 8
+#define FW_W2_MASK 0x001f0000
+#define FW_W2_SHIFT 16
+#define FW_W3_MASK 0x1f000000
+#define FW_W3_SHIFT 24
+
+
+#define SRC_START 0x80000000
+#define SRC_BUSY 0x80000000
+#define SRC_OPCODE 0x60000000
+#define SRC_OP_READ 0x00000000
+#define SRC_OP_WRITE 0x20000000
+#define SRC_OP_WRDIS 0x40000000
+#define SRC_OP_WREN 0x60000000
+#define SRC_OTPSEL 0x00000010
+#define SRC_LOCK 0x00000008
+#define SRC_SIZE_MASK 0x00000006
+#define SRC_SIZE_1K 0x00000000
+#define SRC_SIZE_4K 0x00000002
+#define SRC_SIZE_16K 0x00000004
+#define SRC_SIZE_SHIFT 1
+#define SRC_PRESENT 0x00000001
+
+
+#define PCTL_ILP_DIV_MASK 0xffff0000
+#define PCTL_ILP_DIV_SHIFT 16
+#define PCTL_PLL_PLLCTL_UPD 0x00000400
+#define PCTL_NOILP_ON_WAIT 0x00000200
+#define PCTL_HT_REQ_EN 0x00000100
+#define PCTL_ALP_REQ_EN 0x00000080
+#define PCTL_XTALFREQ_MASK 0x0000007c
+#define PCTL_XTALFREQ_SHIFT 2
+#define PCTL_ILP_DIV_EN 0x00000002
+#define PCTL_LPO_SEL 0x00000001
+
+
+#define CSTRETCH_HT 0xffff0000
+#define CSTRETCH_ALP 0x0000ffff
+
+
+#define GPIO_ONTIME_SHIFT 16
+
+
+#define CN_N1_MASK 0x3f
+#define CN_N2_MASK 0x3f00
+#define CN_N2_SHIFT 8
+#define CN_PLLC_MASK 0xf0000
+#define CN_PLLC_SHIFT 16
+
+
+#define CC_M1_MASK 0x3f
+#define CC_M2_MASK 0x3f00
+#define CC_M2_SHIFT 8
+#define CC_M3_MASK 0x3f0000
+#define CC_M3_SHIFT 16
+#define CC_MC_MASK 0x1f000000
+#define CC_MC_SHIFT 24
+
+
+#define CC_F6_2 0x02
+#define CC_F6_3 0x03
+#define CC_F6_4 0x05
+#define CC_F6_5 0x09
+#define CC_F6_6 0x11
+#define CC_F6_7 0x21
+
+#define CC_F5_BIAS 5
+
+#define CC_MC_BYPASS 0x08
+#define CC_MC_M1 0x04
+#define CC_MC_M1M2 0x02
+#define CC_MC_M1M2M3 0x01
+#define CC_MC_M1M3 0x11
+
+
+#define CC_T2_BIAS 2
+#define CC_T2M2_BIAS 3
+
+#define CC_T2MC_M1BYP 1
+#define CC_T2MC_M2BYP 2
+#define CC_T2MC_M3BYP 4
+
+
+#define CC_T6_MMASK 1
+#define CC_T6_M0 120000000
+#define CC_T6_M1 100000000
+#define SB2MIPS_T6(sb) (2 * (sb))
+
+
+#define CC_CLOCK_BASE1 24000000
+#define CC_CLOCK_BASE2 12500000
+
+
+#define CLKC_5350_N 0x0311
+#define CLKC_5350_M 0x04020009
+
+
+#define FLASH_NONE 0x000
+#define SFLASH_ST 0x100
+#define SFLASH_AT 0x200
+#define PFLASH 0x700
+
+
+#define CC_CFG_EN 0x0001
+#define CC_CFG_EM_MASK 0x000e
+#define CC_CFG_EM_ASYNC 0x0000
+#define CC_CFG_EM_SYNC 0x0002
+#define CC_CFG_EM_PCMCIA 0x0004
+#define CC_CFG_EM_IDE 0x0006
+#define CC_CFG_DS 0x0010
+#define CC_CFG_CD_MASK 0x00e0
+#define CC_CFG_CE 0x0100
+#define CC_CFG_SB 0x0200
+#define CC_CFG_IS 0x0400
+
+
+#define CC_EB_BASE 0x1a000000
+#define CC_EB_PCMCIA_MEM 0x1a000000
+#define CC_EB_PCMCIA_IO 0x1a200000
+#define CC_EB_PCMCIA_CFG 0x1a400000
+#define CC_EB_IDE 0x1a800000
+#define CC_EB_PCMCIA1_MEM 0x1a800000
+#define CC_EB_PCMCIA1_IO 0x1aa00000
+#define CC_EB_PCMCIA1_CFG 0x1ac00000
+#define CC_EB_PROGIF 0x1b000000
+
+
+
+#define SFLASH_OPCODE 0x000000ff
+#define SFLASH_ACTION 0x00000700
+#define SFLASH_CS_ACTIVE 0x00001000
+#define SFLASH_START 0x80000000
+#define SFLASH_BUSY SFLASH_START
+
+
+#define SFLASH_ACT_OPONLY 0x0000
+#define SFLASH_ACT_OP1D 0x0100
+#define SFLASH_ACT_OP3A 0x0200
+#define SFLASH_ACT_OP3A1D 0x0300
+#define SFLASH_ACT_OP3A4D 0x0400
+#define SFLASH_ACT_OP3A4X4D 0x0500
+#define SFLASH_ACT_OP3A1X4D 0x0700
+
+
+#define SFLASH_ST_WREN 0x0006
+#define SFLASH_ST_WRDIS 0x0004
+#define SFLASH_ST_RDSR 0x0105
+#define SFLASH_ST_WRSR 0x0101
+#define SFLASH_ST_READ 0x0303
+#define SFLASH_ST_PP 0x0302
+#define SFLASH_ST_SE 0x02d8
+#define SFLASH_ST_BE 0x00c7
+#define SFLASH_ST_DP 0x00b9
+#define SFLASH_ST_RES 0x03ab
+#define SFLASH_ST_CSA 0x1000
+#define SFLASH_ST_SSE 0x0220
+
+
+#define SFLASH_ST_WIP 0x01
+#define SFLASH_ST_WEL 0x02
+#define SFLASH_ST_BP_MASK 0x1c
+#define SFLASH_ST_BP_SHIFT 2
+#define SFLASH_ST_SRWD 0x80
+
+
+#define SFLASH_AT_READ 0x07e8
+#define SFLASH_AT_PAGE_READ 0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS 0x01d7
+#define SFLASH_AT_BUF1_WRITE 0x0384
+#define SFLASH_AT_BUF2_WRITE 0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286
+#define SFLASH_AT_BUF1_PROGRAM 0x0288
+#define SFLASH_AT_BUF2_PROGRAM 0x0289
+#define SFLASH_AT_PAGE_ERASE 0x0281
+#define SFLASH_AT_BLOCK_ERASE 0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define SFLASH_AT_BUF1_LOAD 0x0253
+#define SFLASH_AT_BUF2_LOAD 0x0255
+#define SFLASH_AT_BUF1_COMPARE 0x0260
+#define SFLASH_AT_BUF2_COMPARE 0x0261
+#define SFLASH_AT_BUF1_REPROGRAM 0x0258
+#define SFLASH_AT_BUF2_REPROGRAM 0x0259
+
+
+#define SFLASH_AT_READY 0x80
+#define SFLASH_AT_MISMATCH 0x40
+#define SFLASH_AT_ID_MASK 0x38
+#define SFLASH_AT_ID_SHIFT 3
+
+
+#define GSIO_START 0x80000000
+#define GSIO_BUSY GSIO_START
+
+
+
+#define UART_RX 0
+#define UART_TX 0
+#define UART_DLL 0
+#define UART_IER 1
+#define UART_DLM 1
+#define UART_IIR 2
+#define UART_FCR 2
+#define UART_LCR 3
+#define UART_MCR 4
+#define UART_LSR 5
+#define UART_MSR 6
+#define UART_SCR 7
+#define UART_LCR_DLAB 0x80
+#define UART_LCR_WLEN8 0x03
+#define UART_MCR_OUT2 0x08
+#define UART_MCR_LOOP 0x10
+#define UART_LSR_RX_FIFO 0x80
+#define UART_LSR_TDHR 0x40
+#define UART_LSR_THRE 0x20
+#define UART_LSR_BREAK 0x10
+#define UART_LSR_FRAMING 0x08
+#define UART_LSR_PARITY 0x04
+#define UART_LSR_OVERRUN 0x02
+#define UART_LSR_RXRDY 0x01
+#define UART_FCR_FIFO_ENABLE 1
+
+
+#define UART_IIR_FIFO_MASK 0xc0
+#define UART_IIR_INT_MASK 0xf
+#define UART_IIR_MDM_CHG 0x0
+#define UART_IIR_NOINT 0x1
+#define UART_IIR_THRE 0x2
+#define UART_IIR_RCVD_DATA 0x4
+#define UART_IIR_RCVR_STATUS 0x6
+#define UART_IIR_CHAR_TIME 0xc
+
+
+#define UART_IER_EDSSI 8
+#define UART_IER_ELSI 4
+#define UART_IER_ETBEI 2
+#define UART_IER_ERBFI 1
+
+
+#define PST_EXTLPOAVAIL 0x0100
+#define PST_WDRESET 0x0080
+#define PST_INTPEND 0x0040
+#define PST_SBCLKST 0x0030
+#define PST_SBCLKST_ILP 0x0010
+#define PST_SBCLKST_ALP 0x0020
+#define PST_SBCLKST_HT 0x0030
+#define PST_ALPAVAIL 0x0008
+#define PST_HTAVAIL 0x0004
+#define PST_RESINIT 0x0003
+
+
+#define PCAP_REV_MASK 0x000000ff
+#define PCAP_RC_MASK 0x00001f00
+#define PCAP_RC_SHIFT 8
+#define PCAP_TC_MASK 0x0001e000
+#define PCAP_TC_SHIFT 13
+#define PCAP_PC_MASK 0x001e0000
+#define PCAP_PC_SHIFT 17
+#define PCAP_VC_MASK 0x01e00000
+#define PCAP_VC_SHIFT 21
+#define PCAP_CC_MASK 0x1e000000
+#define PCAP_CC_SHIFT 25
+#define PCAP5_PC_MASK 0x003e0000
+#define PCAP5_PC_SHIFT 17
+#define PCAP5_VC_MASK 0x07c00000
+#define PCAP5_VC_SHIFT 22
+#define PCAP5_CC_MASK 0xf8000000
+#define PCAP5_CC_SHIFT 27
+
+
+
+#define PRRT_TIME_MASK 0x03ff
+#define PRRT_INTEN 0x0400
+#define PRRT_REQ_ACTIVE 0x0800
+#define PRRT_ALP_REQ 0x1000
+#define PRRT_HT_REQ 0x2000
+
+
+#define PMURES_BIT(bit) (1 << (bit))
+
+
+#define PMURES_MAX_RESNUM 30
+
+
+#define PMU_CHIPCTL0 0
+
+
+#define PMU_CC1_CLKREQ_TYPE_SHIFT 19
+#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL 1
+
+
+#define PMU_CHIPCTL1 1
+#define PMU_CC1_RXC_DLL_BYPASS 0x00010000
+
+#define PMU_CC1_IF_TYPE_MASK 0x00000030
+#define PMU_CC1_IF_TYPE_RMII 0x00000000
+#define PMU_CC1_IF_TYPE_MII 0x00000010
+#define PMU_CC1_IF_TYPE_RGMII 0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK 0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY 0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080
+#define PMU_CC1_SW_TYPE_RGMII 0x000000c0
+
+
+
+
+
+#define PMU0_PLL0_PLLCTL0 0
+#define PMU0_PLL0_PC0_PDIV_MASK 1
+#define PMU0_PLL0_PC0_PDIV_FREQ 25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE 8
+
+
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7
+
+
+#define PMU0_PLL0_PLLCTL1 1
+#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000
+#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28
+#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00
+#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8
+#define PMU0_PLL0_PC1_STOP_MOD 0x00000040
+
+
+#define PMU0_PLL0_PLLCTL2 2
+#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf
+#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4
+
+
+
+#define PMU1_PLL0_PLLCTL0 0
+#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT 20
+#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT 24
+
+
+#define PMU1_PLL0_PLLCTL1 1
+#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT 0
+#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT 8
+#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT 16
+#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT 24
+#define PMU1_PLL0_PC1_M4DIV_BY_9 9
+#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+
+#define PMU1_PLL0_PLLCTL2 2
+#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT 0
+#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24
+#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT 8
+#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2
+#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
+
+
+#define PMU1_PLL0_PLLCTL3 3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0
+
+
+#define PMU1_PLL0_PLLCTL4 4
+
+
+#define PMU1_PLL0_PLLCTL5 5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+
+
+#define PMU2_PHY_PLL_PLLCTL 4
+#define PMU2_SI_PLL_PLLCTL 10
+
+
+
+
+#define PMU2_PLL_PLLCTL0 0
+#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT 20
+#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT 24
+
+
+#define PMU2_PLL_PLLCTL1 1
+#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT 0
+#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT 8
+#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT 16
+#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT 24
+
+
+#define PMU2_PLL_PLLCTL2 2
+#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT 0
+#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT 8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17
+#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20
+
+
+#define PMU2_PLL_PLLCTL3 3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0
+
+
+#define PMU2_PLL_PLLCTL4 4
+
+
+#define PMU2_PLL_PLLCTL5 5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28
+
+
+#define PMU5_PLL_P1P2_OFF 0
+#define PMU5_PLL_P1_MASK 0x0f000000
+#define PMU5_PLL_P1_SHIFT 24
+#define PMU5_PLL_P2_MASK 0x00f00000
+#define PMU5_PLL_P2_SHIFT 20
+#define PMU5_PLL_M14_OFF 1
+#define PMU5_PLL_MDIV_MASK 0x000000ff
+#define PMU5_PLL_MDIV_WIDTH 8
+#define PMU5_PLL_NM5_OFF 2
+#define PMU5_PLL_NDIV_MASK 0xfff00000
+#define PMU5_PLL_NDIV_SHIFT 20
+#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000
+#define PMU5_PLL_NDIV_MODE_SHIFT 17
+#define PMU5_PLL_FMAB_OFF 3
+#define PMU5_PLL_MRAT_MASK 0xf0000000
+#define PMU5_PLL_MRAT_SHIFT 28
+#define PMU5_PLL_ABRAT_MASK 0x08000000
+#define PMU5_PLL_ABRAT_SHIFT 27
+#define PMU5_PLL_FDIV_MASK 0x07ffffff
+#define PMU5_PLL_PLLCTL_OFF 4
+#define PMU5_PLL_PCHI_OFF 5
+#define PMU5_PLL_PCHI_MASK 0x0000003f
+
+
+#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
+#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000
+#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31
+
+
+#define PMU5_MAINPLL_CPU 1
+#define PMU5_MAINPLL_MEM 2
+#define PMU5_MAINPLL_SI 3
+
+#define PMU7_PLL_PLLCTL7 7
+#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 24
+#define PMU7_PLL_CTL7_M4DIV_BY_6 6
+#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18
+#define PMU7_PLL_PLLCTL8 8
+#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT 0
+#define PMU7_PLL_CTL8_M5DIV_BY_8 8
+#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT 8
+#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18
+#define PMU7_PLL_PLLCTL11 11
+#define PMU7_PLL_PLLCTL11_MASK 0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL 0x22222200
+
+
+#define PMU4716_MAINPLL_PLL0 12
+
+
+#define PMU5356_MAINPLL_PLL0 0
+#define PMU5357_MAINPLL_PLL0 0
+
+
+#define RES4716_PROC_PLL_ON 0x00000040
+#define RES4716_PROC_HT_AVAIL 0x00000080
+
+
+#define CCTRL_471X_I2S_PINS_ENABLE 0x0080
+
+
+
+#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000
+#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000
+
+
+#define RES5354_EXT_SWITCHER_PWM 0
+#define RES5354_BB_SWITCHER_PWM 1
+#define RES5354_BB_SWITCHER_BURST 2
+#define RES5354_BB_EXT_SWITCHER_BURST 3
+#define RES5354_ILP_REQUEST 4
+#define RES5354_RADIO_SWITCHER_PWM 5
+#define RES5354_RADIO_SWITCHER_BURST 6
+#define RES5354_ROM_SWITCH 7
+#define RES5354_PA_REF_LDO 8
+#define RES5354_RADIO_LDO 9
+#define RES5354_AFE_LDO 10
+#define RES5354_PLL_LDO 11
+#define RES5354_BG_FILTBYP 12
+#define RES5354_TX_FILTBYP 13
+#define RES5354_RX_FILTBYP 14
+#define RES5354_XTAL_PU 15
+#define RES5354_XTAL_EN 16
+#define RES5354_BB_PLL_FILTBYP 17
+#define RES5354_RF_PLL_FILTBYP 18
+#define RES5354_BB_PLL_PU 19
+
+
+#define CCTRL5357_EXTPA (1<<14)
+#define CCTRL5357_ANT_MUX_2o3 (1<<15)
+
+
+#define RES4328_EXT_SWITCHER_PWM 0
+#define RES4328_BB_SWITCHER_PWM 1
+#define RES4328_BB_SWITCHER_BURST 2
+#define RES4328_BB_EXT_SWITCHER_BURST 3
+#define RES4328_ILP_REQUEST 4
+#define RES4328_RADIO_SWITCHER_PWM 5
+#define RES4328_RADIO_SWITCHER_BURST 6
+#define RES4328_ROM_SWITCH 7
+#define RES4328_PA_REF_LDO 8
+#define RES4328_RADIO_LDO 9
+#define RES4328_AFE_LDO 10
+#define RES4328_PLL_LDO 11
+#define RES4328_BG_FILTBYP 12
+#define RES4328_TX_FILTBYP 13
+#define RES4328_RX_FILTBYP 14
+#define RES4328_XTAL_PU 15
+#define RES4328_XTAL_EN 16
+#define RES4328_BB_PLL_FILTBYP 17
+#define RES4328_RF_PLL_FILTBYP 18
+#define RES4328_BB_PLL_PU 19
+
+
+#define RES4325_BUCK_BOOST_BURST 0
+#define RES4325_CBUCK_BURST 1
+#define RES4325_CBUCK_PWM 2
+#define RES4325_CLDO_CBUCK_BURST 3
+#define RES4325_CLDO_CBUCK_PWM 4
+#define RES4325_BUCK_BOOST_PWM 5
+#define RES4325_ILP_REQUEST 6
+#define RES4325_ABUCK_BURST 7
+#define RES4325_ABUCK_PWM 8
+#define RES4325_LNLDO1_PU 9
+#define RES4325_OTP_PU 10
+#define RES4325_LNLDO3_PU 11
+#define RES4325_LNLDO4_PU 12
+#define RES4325_XTAL_PU 13
+#define RES4325_ALP_AVAIL 14
+#define RES4325_RX_PWRSW_PU 15
+#define RES4325_TX_PWRSW_PU 16
+#define RES4325_RFPLL_PWRSW_PU 17
+#define RES4325_LOGEN_PWRSW_PU 18
+#define RES4325_AFE_PWRSW_PU 19
+#define RES4325_BBPLL_PWRSW_PU 20
+#define RES4325_HT_AVAIL 21
+
+
+#define RES4325B0_CBUCK_LPOM 1
+#define RES4325B0_CBUCK_BURST 2
+#define RES4325B0_CBUCK_PWM 3
+#define RES4325B0_CLDO_PU 4
+
+
+#define RES4325C1_LNLDO2_PU 12
+
+
+#define CST4325_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4325_DEFCIS_SEL 0
+#define CST4325_SPROM_SEL 1
+#define CST4325_OTP_SEL 2
+#define CST4325_OTP_PWRDN 3
+#define CST4325_SDIO_USB_MODE_MASK 0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT 2
+#define CST4325_RCAL_VALID_MASK 0x00000008
+#define CST4325_RCAL_VALID_SHIFT 3
+#define CST4325_RCAL_VALUE_MASK 0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT 4
+#define CST4325_PMUTOP_2B_MASK 0x00000200
+#define CST4325_PMUTOP_2B_SHIFT 9
+
+#define RES4329_RESERVED0 0
+#define RES4329_CBUCK_LPOM 1
+#define RES4329_CBUCK_BURST 2
+#define RES4329_CBUCK_PWM 3
+#define RES4329_CLDO_PU 4
+#define RES4329_PALDO_PU 5
+#define RES4329_ILP_REQUEST 6
+#define RES4329_RESERVED7 7
+#define RES4329_RESERVED8 8
+#define RES4329_LNLDO1_PU 9
+#define RES4329_OTP_PU 10
+#define RES4329_RESERVED11 11
+#define RES4329_LNLDO2_PU 12
+#define RES4329_XTAL_PU 13
+#define RES4329_ALP_AVAIL 14
+#define RES4329_RX_PWRSW_PU 15
+#define RES4329_TX_PWRSW_PU 16
+#define RES4329_RFPLL_PWRSW_PU 17
+#define RES4329_LOGEN_PWRSW_PU 18
+#define RES4329_AFE_PWRSW_PU 19
+#define RES4329_BBPLL_PWRSW_PU 20
+#define RES4329_HT_AVAIL 21
+
+#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4329_DEFCIS_SEL 0
+#define CST4329_SPROM_SEL 1
+#define CST4329_OTP_SEL 2
+#define CST4329_OTP_PWRDN 3
+#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT 2
+
+
+#define CST4312_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4312_DEFCIS_SEL 0
+#define CST4312_SPROM_SEL 1
+#define CST4312_OTP_SEL 2
+#define CST4312_OTP_BAD 3
+
+
+#define RES4312_SWITCHER_BURST 0
+#define RES4312_SWITCHER_PWM 1
+#define RES4312_PA_REF_LDO 2
+#define RES4312_CORE_LDO_BURST 3
+#define RES4312_CORE_LDO_PWM 4
+#define RES4312_RADIO_LDO 5
+#define RES4312_ILP_REQUEST 6
+#define RES4312_BG_FILTBYP 7
+#define RES4312_TX_FILTBYP 8
+#define RES4312_RX_FILTBYP 9
+#define RES4312_XTAL_PU 10
+#define RES4312_ALP_AVAIL 11
+#define RES4312_BB_PLL_FILTBYP 12
+#define RES4312_RF_PLL_FILTBYP 13
+#define RES4312_HT_AVAIL 14
+
+
+#define RES4322_RF_LDO 0
+#define RES4322_ILP_REQUEST 1
+#define RES4322_XTAL_PU 2
+#define RES4322_ALP_AVAIL 3
+#define RES4322_SI_PLL_ON 4
+#define RES4322_HT_SI_AVAIL 5
+#define RES4322_PHY_PLL_ON 6
+#define RES4322_HT_PHY_AVAIL 7
+#define RES4322_OTP_PU 8
+
+
+#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT 6
+#define CST4322_NO_SPROM_OTP 0
+#define CST4322_SPROM_PRESENT 1
+#define CST4322_OTP_PRESENT 2
+#define CST4322_PCI_OR_USB 0x00000100
+#define CST4322_BOOT_MASK 0x00000600
+#define CST4322_BOOT_SHIFT 9
+#define CST4322_BOOT_FROM_SRAM 0
+#define CST4322_BOOT_FROM_ROM 1
+#define CST4322_BOOT_FROM_FLASH 2
+#define CST4322_BOOT_FROM_INVALID 3
+#define CST4322_ILP_DIV_EN 0x00000800
+#define CST4322_FLASH_TYPE_MASK 0x00001000
+#define CST4322_FLASH_TYPE_SHIFT 12
+#define CST4322_FLASH_TYPE_SHIFT_ST 0
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1
+#define CST4322_ARM_TAP_SEL 0x00002000
+#define CST4322_RES_INIT_MODE_MASK 0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT 14
+#define CST4322_RES_INIT_MODE_ILPAVAIL 0
+#define CST4322_RES_INIT_MODE_ILPREQ 1
+#define CST4322_RES_INIT_MODE_ALPAVAIL 2
+#define CST4322_RES_INIT_MODE_HTAVAIL 3
+#define CST4322_PCIPLLCLK_GATING 0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000
+#define CST4322_PCI_CARDBUS_MODE 0x00040000
+
+
+#define CCTRL43224_GPIO_TOGGLE 0x8000
+#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0
+#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0
+
+
+#define RES43236_REGULATOR 0
+#define RES43236_ILP_REQUEST 1
+#define RES43236_XTAL_PU 2
+#define RES43236_ALP_AVAIL 3
+#define RES43236_SI_PLL_ON 4
+#define RES43236_HT_SI_AVAIL 5
+
+
+#define CCTRL43236_BT_COEXIST (1<<0)
+#define CCTRL43236_SECI (1<<1)
+#define CCTRL43236_EXT_LNA (1<<2)
+#define CCTRL43236_ANT_MUX_2o3 (1<<3)
+#define CCTRL43236_GSIO (1<<4)
+
+
+#define CST43236_SFLASH_MASK 0x00000040
+#define CST43236_OTP_SEL_MASK 0x00000080
+#define CST43236_OTP_SEL_SHIFT 7
+#define CST43236_HSIC_MASK 0x00000100
+#define CST43236_BP_CLK 0x00000200
+#define CST43236_BOOT_MASK 0x00001800
+#define CST43236_BOOT_SHIFT 11
+#define CST43236_BOOT_FROM_SRAM 0
+#define CST43236_BOOT_FROM_ROM 1
+#define CST43236_BOOT_FROM_FLASH 2
+#define CST43236_BOOT_FROM_INVALID 3
+
+
+#define RES43237_REGULATOR 0
+#define RES43237_ILP_REQUEST 1
+#define RES43237_XTAL_PU 2
+#define RES43237_ALP_AVAIL 3
+#define RES43237_SI_PLL_ON 4
+#define RES43237_HT_SI_AVAIL 5
+
+
+#define CCTRL43237_BT_COEXIST (1<<0)
+#define CCTRL43237_SECI (1<<1)
+#define CCTRL43237_EXT_LNA (1<<2)
+#define CCTRL43237_ANT_MUX_2o3 (1<<3)
+#define CCTRL43237_GSIO (1<<4)
+
+
+#define CST43237_SFLASH_MASK 0x00000040
+#define CST43237_OTP_SEL_MASK 0x00000080
+#define CST43237_OTP_SEL_SHIFT 7
+#define CST43237_HSIC_MASK 0x00000100
+#define CST43237_BP_CLK 0x00000200
+#define CST43237_BOOT_MASK 0x00001800
+#define CST43237_BOOT_SHIFT 11
+#define CST43237_BOOT_FROM_SRAM 0
+#define CST43237_BOOT_FROM_ROM 1
+#define CST43237_BOOT_FROM_FLASH 2
+#define CST43237_BOOT_FROM_INVALID 3
+
+
+#define RES43239_OTP_PU 9
+#define RES43239_MACPHY_CLKAVAIL 23
+#define RES43239_HT_AVAIL 24
+
+
+#define CST43239_SPROM_MASK 0x00000002
+#define CST43239_SFLASH_MASK 0x00000004
+#define CST43239_RES_INIT_MODE_SHIFT 7
+#define CST43239_RES_INIT_MODE_MASK 0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15))
+#define CST43239_CHIPMODE_USB20D(cs) ((cs) & !(1 << 15))
+#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0)
+#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0))
+
+
+#define CCTRL43239_XTAL_STRENGTH(ctl) ((ctl & 0x3F) << 12)
+
+
+
+
+#define RES4315_CBUCK_LPOM 1
+#define RES4315_CBUCK_BURST 2
+#define RES4315_CBUCK_PWM 3
+#define RES4315_CLDO_PU 4
+#define RES4315_PALDO_PU 5
+#define RES4315_ILP_REQUEST 6
+#define RES4315_LNLDO1_PU 9
+#define RES4315_OTP_PU 10
+#define RES4315_LNLDO2_PU 12
+#define RES4315_XTAL_PU 13
+#define RES4315_ALP_AVAIL 14
+#define RES4315_RX_PWRSW_PU 15
+#define RES4315_TX_PWRSW_PU 16
+#define RES4315_RFPLL_PWRSW_PU 17
+#define RES4315_LOGEN_PWRSW_PU 18
+#define RES4315_AFE_PWRSW_PU 19
+#define RES4315_BBPLL_PWRSW_PU 20
+#define RES4315_HT_AVAIL 21
+
+
+#define CST4315_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4315_DEFCIS_SEL 0x00000000
+#define CST4315_SPROM_SEL 0x00000001
+#define CST4315_OTP_SEL 0x00000002
+#define CST4315_OTP_PWRDN 0x00000003
+#define CST4315_SDIO_MODE 0x00000004
+#define CST4315_RCAL_VALID 0x00000008
+#define CST4315_RCAL_VALUE_MASK 0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT 4
+#define CST4315_PALDO_EXTPNP 0x00000200
+#define CST4315_CBUCK_MODE_MASK 0x00000c00
+#define CST4315_CBUCK_MODE_BURST 0x00000400
+#define CST4315_CBUCK_MODE_LPBURST 0x00000c00
+
+
+#define RES4319_CBUCK_LPOM 1
+#define RES4319_CBUCK_BURST 2
+#define RES4319_CBUCK_PWM 3
+#define RES4319_CLDO_PU 4
+#define RES4319_PALDO_PU 5
+#define RES4319_ILP_REQUEST 6
+#define RES4319_LNLDO1_PU 9
+#define RES4319_OTP_PU 10
+#define RES4319_LNLDO2_PU 12
+#define RES4319_XTAL_PU 13
+#define RES4319_ALP_AVAIL 14
+#define RES4319_RX_PWRSW_PU 15
+#define RES4319_TX_PWRSW_PU 16
+#define RES4319_RFPLL_PWRSW_PU 17
+#define RES4319_LOGEN_PWRSW_PU 18
+#define RES4319_AFE_PWRSW_PU 19
+#define RES4319_BBPLL_PWRSW_PU 20
+#define RES4319_HT_AVAIL 21
+
+
+#define CST4319_SPI_CPULESSUSB 0x00000001
+#define CST4319_SPI_CLK_POL 0x00000002
+#define CST4319_SPI_CLK_PH 0x00000008
+#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4319_SPROM_OTP_SEL_SHIFT 6
+#define CST4319_DEFCIS_SEL 0x00000000
+#define CST4319_SPROM_SEL 0x00000040
+#define CST4319_OTP_SEL 0x00000080
+#define CST4319_OTP_PWRDN 0x000000c0
+#define CST4319_SDIO_USB_MODE 0x00000100
+#define CST4319_REMAP_SEL_MASK 0x00000600
+#define CST4319_ILPDIV_EN 0x00000800
+#define CST4319_XTAL_PD_POL 0x00001000
+#define CST4319_LPO_SEL 0x00002000
+#define CST4319_RES_INIT_MODE 0x0000c000
+#define CST4319_PALDO_EXTPNP 0x00010000
+#define CST4319_CBUCK_MODE_MASK 0x00060000
+#define CST4319_CBUCK_MODE_BURST 0x00020000
+#define CST4319_CBUCK_MODE_LPBURST 0x00060000
+#define CST4319_RCAL_VALID 0x01000000
+#define CST4319_RCAL_VALUE_MASK 0x3e000000
+#define CST4319_RCAL_VALUE_SHIFT 25
+
+#define PMU1_PLL0_CHIPCTL0 0
+#define PMU1_PLL0_CHIPCTL1 1
+#define PMU1_PLL0_CHIPCTL2 2
+#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT 19
+#define CCTL_4319USB_48MHZ_PLL_SEL 1
+#define CCTL_4319USB_24MHZ_PLL_SEL 2
+
+
+#define RES4336_CBUCK_LPOM 0
+#define RES4336_CBUCK_BURST 1
+#define RES4336_CBUCK_LP_PWM 2
+#define RES4336_CBUCK_PWM 3
+#define RES4336_CLDO_PU 4
+#define RES4336_DIS_INT_RESET_PD 5
+#define RES4336_ILP_REQUEST 6
+#define RES4336_LNLDO_PU 7
+#define RES4336_LDO3P3_PU 8
+#define RES4336_OTP_PU 9
+#define RES4336_XTAL_PU 10
+#define RES4336_ALP_AVAIL 11
+#define RES4336_RADIO_PU 12
+#define RES4336_BG_PU 13
+#define RES4336_VREG1p4_PU_PU 14
+#define RES4336_AFE_PWRSW_PU 15
+#define RES4336_RX_PWRSW_PU 16
+#define RES4336_TX_PWRSW_PU 17
+#define RES4336_BB_PWRSW_PU 18
+#define RES4336_SYNTH_PWRSW_PU 19
+#define RES4336_MISC_PWRSW_PU 20
+#define RES4336_LOGEN_PWRSW_PU 21
+#define RES4336_BBPLL_PWRSW_PU 22
+#define RES4336_MACPHY_CLKAVAIL 23
+#define RES4336_HT_AVAIL 24
+#define RES4336_RSVD 25
+
+
+#define CST4336_SPI_MODE_MASK 0x00000001
+#define CST4336_SPROM_PRESENT 0x00000002
+#define CST4336_OTP_PRESENT 0x00000004
+#define CST4336_ARMREMAP_0 0x00000008
+#define CST4336_ILPDIV_EN_MASK 0x00000010
+#define CST4336_ILPDIV_EN_SHIFT 4
+#define CST4336_XTAL_PD_POL_MASK 0x00000020
+#define CST4336_XTAL_PD_POL_SHIFT 5
+#define CST4336_LPO_SEL_MASK 0x00000040
+#define CST4336_LPO_SEL_SHIFT 6
+#define CST4336_RES_INIT_MODE_MASK 0x00000180
+#define CST4336_RES_INIT_MODE_SHIFT 7
+#define CST4336_CBUCK_MODE_MASK 0x00000600
+#define CST4336_CBUCK_MODE_SHIFT 9
+
+
+#define PCTL_4336_SERIAL_ENAB (1 << 24)
+
+
+#define RES4330_CBUCK_LPOM 0
+#define RES4330_CBUCK_BURST 1
+#define RES4330_CBUCK_LP_PWM 2
+#define RES4330_CBUCK_PWM 3
+#define RES4330_CLDO_PU 4
+#define RES4330_DIS_INT_RESET_PD 5
+#define RES4330_ILP_REQUEST 6
+#define RES4330_LNLDO_PU 7
+#define RES4330_LDO3P3_PU 8
+#define RES4330_OTP_PU 9
+#define RES4330_XTAL_PU 10
+#define RES4330_ALP_AVAIL 11
+#define RES4330_RADIO_PU 12
+#define RES4330_BG_PU 13
+#define RES4330_VREG1p4_PU_PU 14
+#define RES4330_AFE_PWRSW_PU 15
+#define RES4330_RX_PWRSW_PU 16
+#define RES4330_TX_PWRSW_PU 17
+#define RES4330_BB_PWRSW_PU 18
+#define RES4330_SYNTH_PWRSW_PU 19
+#define RES4330_MISC_PWRSW_PU 20
+#define RES4330_LOGEN_PWRSW_PU 21
+#define RES4330_BBPLL_PWRSW_PU 22
+#define RES4330_MACPHY_CLKAVAIL 23
+#define RES4330_HT_AVAIL 24
+#define RES4330_5gRX_PWRSW_PU 25
+#define RES4330_5gTX_PWRSW_PU 26
+#define RES4330_5g_LOGEN_PWRSW_PU 27
+
+
+#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6)
+#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6)
+#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0)
+#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4)
+#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6)
+#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7)
+#define CST4330_OTP_PRESENT 0x00000010
+#define CST4330_LPO_AUTODET_EN 0x00000020
+#define CST4330_ARMREMAP_0 0x00000040
+#define CST4330_SPROM_PRESENT 0x00000080
+#define CST4330_ILPDIV_EN 0x00000100
+#define CST4330_LPO_SEL 0x00000200
+#define CST4330_RES_INIT_MODE_SHIFT 10
+#define CST4330_RES_INIT_MODE_MASK 0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT 12
+#define CST4330_CBUCK_MODE_MASK 0x00003000
+#define CST4330_CBUCK_POWER_OK 0x00004000
+#define CST4330_BB_PLL_LOCKED 0x00008000
+#define SOCDEVRAM_4330_BP_ADDR 0x1E000000
+#define SOCDEVRAM_4330_ARM_ADDR 0x00800000
+
+
+#define PCTL_4330_SERIAL_ENAB (1 << 24)
+
+
+#define CCTRL_4330_GPIO_SEL 0x00000001
+#define CCTRL_4330_ERCX_SEL 0x00000002
+#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004
+#define CCTRL_4330_JTAG_DISABLE 0x00000008
+
+
+#define RES4313_BB_PU_RSRC 0
+#define RES4313_ILP_REQ_RSRC 1
+#define RES4313_XTAL_PU_RSRC 2
+#define RES4313_ALP_AVAIL_RSRC 3
+#define RES4313_RADIO_PU_RSRC 4
+#define RES4313_BG_PU_RSRC 5
+#define RES4313_VREG1P4_PU_RSRC 6
+#define RES4313_AFE_PWRSW_RSRC 7
+#define RES4313_RX_PWRSW_RSRC 8
+#define RES4313_TX_PWRSW_RSRC 9
+#define RES4313_BB_PWRSW_RSRC 10
+#define RES4313_SYNTH_PWRSW_RSRC 11
+#define RES4313_MISC_PWRSW_RSRC 12
+#define RES4313_BB_PLL_PWRSW_RSRC 13
+#define RES4313_HT_AVAIL_RSRC 14
+#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
+
+
+#define CST4313_SPROM_PRESENT 1
+#define CST4313_OTP_PRESENT 2
+#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
+#define CST4313_SPROM_OTP_SEL_SHIFT 0
+
+
+#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
+
+
+#define RES43228_NOT_USED 0
+#define RES43228_ILP_REQUEST 1
+#define RES43228_XTAL_PU 2
+#define RES43228_ALP_AVAIL 3
+#define RES43228_PLL_EN 4
+#define RES43228_HT_PHY_AVAIL 5
+
+
+#define CST43228_ILP_DIV_EN 0x1
+#define CST43228_OTP_PRESENT 0x2
+#define CST43228_SERDES_REFCLK_PADSEL 0x4
+#define CST43228_SDIO_MODE 0x8
+#define CST43228_SDIO_OTP_PRESENT 0x10
+#define CST43228_SDIO_RESET 0x20
+
+
+#define PMU_MAX_TRANSITION_DLY 15000
+
+
+#define PMURES_UP_TRANSITION 2
+
+
+
+
+
+#define ECI_BW_20 0x0
+#define ECI_BW_25 0x1
+#define ECI_BW_30 0x2
+#define ECI_BW_35 0x3
+#define ECI_BW_40 0x4
+#define ECI_BW_45 0x5
+#define ECI_BW_50 0x6
+#define ECI_BW_ALL 0x7
+
+
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h
new file mode 100644
index 000000000000..76f05ae34bd5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h
@@ -0,0 +1,276 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbconfig.h,v 13.70 2008-03-28 19:17:04 Exp $
+ */
+
+
+#ifndef _SBCONFIG_H
+#define _SBCONFIG_H
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+
+#define SB_BUS_SIZE 0x10000
+#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE)
+
+
+#define SBCONFIGOFF 0xf00
+#define SBCONFIGSIZE 256
+
+#define SBIPSFLAG 0x08
+#define SBTPSFLAG 0x18
+#define SBTMERRLOGA 0x48
+#define SBTMERRLOG 0x50
+#define SBADMATCH3 0x60
+#define SBADMATCH2 0x68
+#define SBADMATCH1 0x70
+#define SBIMSTATE 0x90
+#define SBINTVEC 0x94
+#define SBTMSTATELOW 0x98
+#define SBTMSTATEHIGH 0x9c
+#define SBBWA0 0xa0
+#define SBIMCONFIGLOW 0xa8
+#define SBIMCONFIGHIGH 0xac
+#define SBADMATCH0 0xb0
+#define SBTMCONFIGLOW 0xb8
+#define SBTMCONFIGHIGH 0xbc
+#define SBBCONFIG 0xc0
+#define SBBSTATE 0xc8
+#define SBACTCNFG 0xd8
+#define SBFLAGST 0xe8
+#define SBIDLOW 0xf8
+#define SBIDHIGH 0xfc
+
+
+
+#define SBIMERRLOGA 0xea8
+#define SBIMERRLOG 0xeb0
+#define SBTMPORTCONNID0 0xed8
+#define SBTMPORTLOCK0 0xef8
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _sbconfig {
+ uint32 PAD[2];
+ uint32 sbipsflag;
+ uint32 PAD[3];
+ uint32 sbtpsflag;
+ uint32 PAD[11];
+ uint32 sbtmerrloga;
+ uint32 PAD;
+ uint32 sbtmerrlog;
+ uint32 PAD[3];
+ uint32 sbadmatch3;
+ uint32 PAD;
+ uint32 sbadmatch2;
+ uint32 PAD;
+ uint32 sbadmatch1;
+ uint32 PAD[7];
+ uint32 sbimstate;
+ uint32 sbintvec;
+ uint32 sbtmstatelow;
+ uint32 sbtmstatehigh;
+ uint32 sbbwa0;
+ uint32 PAD;
+ uint32 sbimconfiglow;
+ uint32 sbimconfighigh;
+ uint32 sbadmatch0;
+ uint32 PAD;
+ uint32 sbtmconfiglow;
+ uint32 sbtmconfighigh;
+ uint32 sbbconfig;
+ uint32 PAD;
+ uint32 sbbstate;
+ uint32 PAD[3];
+ uint32 sbactcnfg;
+ uint32 PAD[3];
+ uint32 sbflagst;
+ uint32 PAD[3];
+ uint32 sbidlow;
+ uint32 sbidhigh;
+} sbconfig_t;
+
+#endif
+
+
+#define SBIPS_INT1_MASK 0x3f
+#define SBIPS_INT1_SHIFT 0
+#define SBIPS_INT2_MASK 0x3f00
+#define SBIPS_INT2_SHIFT 8
+#define SBIPS_INT3_MASK 0x3f0000
+#define SBIPS_INT3_SHIFT 16
+#define SBIPS_INT4_MASK 0x3f000000
+#define SBIPS_INT4_SHIFT 24
+
+
+#define SBTPS_NUM0_MASK 0x3f
+#define SBTPS_F0EN0 0x40
+
+
+#define SBTMEL_CM 0x00000007
+#define SBTMEL_CI 0x0000ff00
+#define SBTMEL_EC 0x0f000000
+#define SBTMEL_ME 0x80000000
+
+
+#define SBIM_PC 0xf
+#define SBIM_AP_MASK 0x30
+#define SBIM_AP_BOTH 0x00
+#define SBIM_AP_TS 0x10
+#define SBIM_AP_TK 0x20
+#define SBIM_AP_RSV 0x30
+#define SBIM_IBE 0x20000
+#define SBIM_TO 0x40000
+#define SBIM_BY 0x01800000
+#define SBIM_RJ 0x02000000
+
+
+#define SBTML_RESET 0x0001
+#define SBTML_REJ_MASK 0x0006
+#define SBTML_REJ 0x0002
+#define SBTML_TMPREJ 0x0004
+
+#define SBTML_SICF_SHIFT 16
+
+
+#define SBTMH_SERR 0x0001
+#define SBTMH_INT 0x0002
+#define SBTMH_BUSY 0x0004
+#define SBTMH_TO 0x0020
+
+#define SBTMH_SISF_SHIFT 16
+
+
+#define SBBWA_TAB0_MASK 0xffff
+#define SBBWA_TAB1_MASK 0xffff
+#define SBBWA_TAB1_SHIFT 16
+
+
+#define SBIMCL_STO_MASK 0x7
+#define SBIMCL_RTO_MASK 0x70
+#define SBIMCL_RTO_SHIFT 4
+#define SBIMCL_CID_MASK 0xff0000
+#define SBIMCL_CID_SHIFT 16
+
+
+#define SBIMCH_IEM_MASK 0xc
+#define SBIMCH_TEM_MASK 0x30
+#define SBIMCH_TEM_SHIFT 4
+#define SBIMCH_BEM_MASK 0xc0
+#define SBIMCH_BEM_SHIFT 6
+
+
+#define SBAM_TYPE_MASK 0x3
+#define SBAM_AD64 0x4
+#define SBAM_ADINT0_MASK 0xf8
+#define SBAM_ADINT0_SHIFT 3
+#define SBAM_ADINT1_MASK 0x1f8
+#define SBAM_ADINT1_SHIFT 3
+#define SBAM_ADINT2_MASK 0x1f8
+#define SBAM_ADINT2_SHIFT 3
+#define SBAM_ADEN 0x400
+#define SBAM_ADNEG 0x800
+#define SBAM_BASE0_MASK 0xffffff00
+#define SBAM_BASE0_SHIFT 8
+#define SBAM_BASE1_MASK 0xfffff000
+#define SBAM_BASE1_SHIFT 12
+#define SBAM_BASE2_MASK 0xffff0000
+#define SBAM_BASE2_SHIFT 16
+
+
+#define SBTMCL_CD_MASK 0xff
+#define SBTMCL_CO_MASK 0xf800
+#define SBTMCL_CO_SHIFT 11
+#define SBTMCL_IF_MASK 0xfc0000
+#define SBTMCL_IF_SHIFT 18
+#define SBTMCL_IM_MASK 0x3000000
+#define SBTMCL_IM_SHIFT 24
+
+
+#define SBTMCH_BM_MASK 0x3
+#define SBTMCH_RM_MASK 0x3
+#define SBTMCH_RM_SHIFT 2
+#define SBTMCH_SM_MASK 0x30
+#define SBTMCH_SM_SHIFT 4
+#define SBTMCH_EM_MASK 0x300
+#define SBTMCH_EM_SHIFT 8
+#define SBTMCH_IM_MASK 0xc00
+#define SBTMCH_IM_SHIFT 10
+
+
+#define SBBC_LAT_MASK 0x3
+#define SBBC_MAX0_MASK 0xf0000
+#define SBBC_MAX0_SHIFT 16
+#define SBBC_MAX1_MASK 0xf00000
+#define SBBC_MAX1_SHIFT 20
+
+
+#define SBBS_SRD 0x1
+#define SBBS_HRD 0x2
+
+
+#define SBIDL_CS_MASK 0x3
+#define SBIDL_AR_MASK 0x38
+#define SBIDL_AR_SHIFT 3
+#define SBIDL_SYNCH 0x40
+#define SBIDL_INIT 0x80
+#define SBIDL_MINLAT_MASK 0xf00
+#define SBIDL_MINLAT_SHIFT 8
+#define SBIDL_MAXLAT 0xf000
+#define SBIDL_MAXLAT_SHIFT 12
+#define SBIDL_FIRST 0x10000
+#define SBIDL_CW_MASK 0xc0000
+#define SBIDL_CW_SHIFT 18
+#define SBIDL_TP_MASK 0xf00000
+#define SBIDL_TP_SHIFT 20
+#define SBIDL_IP_MASK 0xf000000
+#define SBIDL_IP_SHIFT 24
+#define SBIDL_RV_MASK 0xf0000000
+#define SBIDL_RV_SHIFT 28
+#define SBIDL_RV_2_2 0x00000000
+#define SBIDL_RV_2_3 0x10000000
+
+
+#define SBIDH_RC_MASK 0x000f
+#define SBIDH_RCE_MASK 0x7000
+#define SBIDH_RCE_SHIFT 8
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define SBIDH_CC_MASK 0x8ff0
+#define SBIDH_CC_SHIFT 4
+#define SBIDH_VC_MASK 0xffff0000
+#define SBIDH_VC_SHIFT 16
+
+#define SB_COMMIT 0xfd8
+
+
+#define SB_VEND_BCM 0x4243
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
new file mode 100644
index 000000000000..05d0587bc205
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
@@ -0,0 +1,327 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbhnddma.h,v 13.20.2.3 2010-10-14 22:21:29 Exp $
+ */
+
+
+#ifndef _sbhnddma_h_
+#define _sbhnddma_h_
+
+
+
+
+
+
+
+typedef volatile struct {
+ uint32 control;
+ uint32 addr;
+ uint32 ptr;
+ uint32 status;
+} dma32regs_t;
+
+typedef volatile struct {
+ dma32regs_t xmt;
+ dma32regs_t rcv;
+} dma32regp_t;
+
+typedef volatile struct {
+ uint32 fifoaddr;
+ uint32 fifodatalow;
+ uint32 fifodatahigh;
+ uint32 pad;
+} dma32diag_t;
+
+
+typedef volatile struct {
+ uint32 ctrl;
+ uint32 addr;
+} dma32dd_t;
+
+
+#define D32RINGALIGN_BITS 12
+#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS)
+#define D32RINGALIGN (1 << D32RINGALIGN_BITS)
+
+#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
+
+
+#define XC_XE ((uint32)1 << 0)
+#define XC_SE ((uint32)1 << 1)
+#define XC_LE ((uint32)1 << 2)
+#define XC_FL ((uint32)1 << 4)
+#define XC_PD ((uint32)1 << 11)
+#define XC_AE ((uint32)3 << 16)
+#define XC_AE_SHIFT 16
+#define XC_BL_MASK 0x001C0000
+#define XC_BL_SHIFT 18
+
+
+#define XP_LD_MASK 0xfff
+
+
+#define XS_CD_MASK 0x0fff
+#define XS_XS_MASK 0xf000
+#define XS_XS_SHIFT 12
+#define XS_XS_DISABLED 0x0000
+#define XS_XS_ACTIVE 0x1000
+#define XS_XS_IDLE 0x2000
+#define XS_XS_STOPPED 0x3000
+#define XS_XS_SUSP 0x4000
+#define XS_XE_MASK 0xf0000
+#define XS_XE_SHIFT 16
+#define XS_XE_NOERR 0x00000
+#define XS_XE_DPE 0x10000
+#define XS_XE_DFU 0x20000
+#define XS_XE_BEBR 0x30000
+#define XS_XE_BEDA 0x40000
+#define XS_AD_MASK 0xfff00000
+#define XS_AD_SHIFT 20
+
+
+#define RC_RE ((uint32)1 << 0)
+#define RC_RO_MASK 0xfe
+#define RC_RO_SHIFT 1
+#define RC_FM ((uint32)1 << 8)
+#define RC_SH ((uint32)1 << 9)
+#define RC_OC ((uint32)1 << 10)
+#define RC_PD ((uint32)1 << 11)
+#define RC_AE ((uint32)3 << 16)
+#define RC_AE_SHIFT 16
+#define RC_BL_MASK 0x001C0000
+#define RC_BL_SHIFT 18
+
+
+#define RP_LD_MASK 0xfff
+
+
+#define RS_CD_MASK 0x0fff
+#define RS_RS_MASK 0xf000
+#define RS_RS_SHIFT 12
+#define RS_RS_DISABLED 0x0000
+#define RS_RS_ACTIVE 0x1000
+#define RS_RS_IDLE 0x2000
+#define RS_RS_STOPPED 0x3000
+#define RS_RE_MASK 0xf0000
+#define RS_RE_SHIFT 16
+#define RS_RE_NOERR 0x00000
+#define RS_RE_DPE 0x10000
+#define RS_RE_DFO 0x20000
+#define RS_RE_BEBW 0x30000
+#define RS_RE_BEDA 0x40000
+#define RS_AD_MASK 0xfff00000
+#define RS_AD_SHIFT 20
+
+
+#define FA_OFF_MASK 0xffff
+#define FA_SEL_MASK 0xf0000
+#define FA_SEL_SHIFT 16
+#define FA_SEL_XDD 0x00000
+#define FA_SEL_XDP 0x10000
+#define FA_SEL_RDD 0x40000
+#define FA_SEL_RDP 0x50000
+#define FA_SEL_XFD 0x80000
+#define FA_SEL_XFP 0x90000
+#define FA_SEL_RFD 0xc0000
+#define FA_SEL_RFP 0xd0000
+#define FA_SEL_RSD 0xe0000
+#define FA_SEL_RSP 0xf0000
+
+
+#define CTRL_BC_MASK 0x00001fff
+#define CTRL_AE ((uint32)3 << 16)
+#define CTRL_AE_SHIFT 16
+#define CTRL_PARITY ((uint32)3 << 18)
+#define CTRL_EOT ((uint32)1 << 28)
+#define CTRL_IOC ((uint32)1 << 29)
+#define CTRL_EOF ((uint32)1 << 30)
+#define CTRL_SOF ((uint32)1 << 31)
+
+
+#define CTRL_CORE_MASK 0x0ff00000
+
+
+
+
+typedef volatile struct {
+ uint32 control;
+ uint32 ptr;
+ uint32 addrlow;
+ uint32 addrhigh;
+ uint32 status0;
+ uint32 status1;
+} dma64regs_t;
+
+typedef volatile struct {
+ dma64regs_t tx;
+ dma64regs_t rx;
+} dma64regp_t;
+
+typedef volatile struct {
+ uint32 fifoaddr;
+ uint32 fifodatalow;
+ uint32 fifodatahigh;
+ uint32 pad;
+} dma64diag_t;
+
+
+typedef volatile struct {
+ uint32 ctrl1;
+ uint32 ctrl2;
+ uint32 addrlow;
+ uint32 addrhigh;
+} dma64dd_t;
+
+
+#define D64RINGALIGN_BITS 13
+#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
+#define D64RINGALIGN (1 << D64RINGALIGN_BITS)
+
+#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
+
+
+#define D64_DEF_USBBURSTLEN 2
+#define D64_DEF_SDIOBURSTLEN 1
+
+
+#define D64_XC_XE 0x00000001
+#define D64_XC_SE 0x00000002
+#define D64_XC_LE 0x00000004
+#define D64_XC_FL 0x00000010
+#define D64_XC_PD 0x00000800
+#define D64_XC_AE 0x00030000
+#define D64_XC_AE_SHIFT 16
+#define D64_XC_BL_MASK 0x001C0000
+#define D64_XC_BL_SHIFT 18
+
+
+#define D64_XP_LD_MASK 0x00000fff
+
+
+#define D64_XS0_CD_MASK 0x00001fff
+#define D64_XS0_XS_MASK 0xf0000000
+#define D64_XS0_XS_SHIFT 28
+#define D64_XS0_XS_DISABLED 0x00000000
+#define D64_XS0_XS_ACTIVE 0x10000000
+#define D64_XS0_XS_IDLE 0x20000000
+#define D64_XS0_XS_STOPPED 0x30000000
+#define D64_XS0_XS_SUSP 0x40000000
+
+#define D64_XS1_AD_MASK 0x00001fff
+#define D64_XS1_XE_MASK 0xf0000000
+#define D64_XS1_XE_SHIFT 28
+#define D64_XS1_XE_NOERR 0x00000000
+#define D64_XS1_XE_DPE 0x10000000
+#define D64_XS1_XE_DFU 0x20000000
+#define D64_XS1_XE_DTE 0x30000000
+#define D64_XS1_XE_DESRE 0x40000000
+#define D64_XS1_XE_COREE 0x50000000
+
+
+#define D64_RC_RE 0x00000001
+#define D64_RC_RO_MASK 0x000000fe
+#define D64_RC_RO_SHIFT 1
+#define D64_RC_FM 0x00000100
+#define D64_RC_SH 0x00000200
+#define D64_RC_OC 0x00000400
+#define D64_RC_PD 0x00000800
+#define D64_RC_AE 0x00030000
+#define D64_RC_AE_SHIFT 16
+#define D64_RC_BL_MASK 0x001C0000
+#define D64_RC_BL_SHIFT 18
+
+
+#define DMA_CTRL_PEN (1 << 0)
+#define DMA_CTRL_ROC (1 << 1)
+#define DMA_CTRL_RXMULTI (1 << 2)
+#define DMA_CTRL_UNFRAMED (1 << 3)
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
+
+
+#define D64_RP_LD_MASK 0x00000fff
+
+
+#define D64_RS0_CD_MASK 0x00001fff
+#define D64_RS0_RS_MASK 0xf0000000
+#define D64_RS0_RS_SHIFT 28
+#define D64_RS0_RS_DISABLED 0x00000000
+#define D64_RS0_RS_ACTIVE 0x10000000
+#define D64_RS0_RS_IDLE 0x20000000
+#define D64_RS0_RS_STOPPED 0x30000000
+#define D64_RS0_RS_SUSP 0x40000000
+
+#define D64_RS1_AD_MASK 0x0001ffff
+#define D64_RS1_RE_MASK 0xf0000000
+#define D64_RS1_RE_SHIFT 28
+#define D64_RS1_RE_NOERR 0x00000000
+#define D64_RS1_RE_DPO 0x10000000
+#define D64_RS1_RE_DFU 0x20000000
+#define D64_RS1_RE_DTE 0x30000000
+#define D64_RS1_RE_DESRE 0x40000000
+#define D64_RS1_RE_COREE 0x50000000
+
+
+#define D64_FA_OFF_MASK 0xffff
+#define D64_FA_SEL_MASK 0xf0000
+#define D64_FA_SEL_SHIFT 16
+#define D64_FA_SEL_XDD 0x00000
+#define D64_FA_SEL_XDP 0x10000
+#define D64_FA_SEL_RDD 0x40000
+#define D64_FA_SEL_RDP 0x50000
+#define D64_FA_SEL_XFD 0x80000
+#define D64_FA_SEL_XFP 0x90000
+#define D64_FA_SEL_RFD 0xc0000
+#define D64_FA_SEL_RFP 0xd0000
+#define D64_FA_SEL_RSD 0xe0000
+#define D64_FA_SEL_RSP 0xf0000
+
+
+#define D64_CTRL_COREFLAGS 0x0ff00000
+#define D64_CTRL1_EOT ((uint32)1 << 28)
+#define D64_CTRL1_IOC ((uint32)1 << 29)
+#define D64_CTRL1_EOF ((uint32)1 << 30)
+#define D64_CTRL1_SOF ((uint32)1 << 31)
+
+
+#define D64_CTRL2_BC_MASK 0x00007fff
+#define D64_CTRL2_AE 0x00030000
+#define D64_CTRL2_AE_SHIFT 16
+#define D64_CTRL2_PARITY 0x00040000
+
+
+#define D64_CTRL_CORE_MASK 0x0ff00000
+
+#define D64_RX_FRM_STS_LEN 0x0000ffff
+#define D64_RX_FRM_STS_OVFL 0x00800000
+#define D64_RX_FRM_STS_DSCRCNT 0x0f000000
+#define D64_RX_FRM_STS_DATATYPE 0xf0000000
+
+
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} dma_rxh_t;
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
new file mode 100644
index 000000000000..aba914bd014f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
@@ -0,0 +1,109 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbpcmcia.h,v 13.48.12.6 2010-11-04 09:39:42 Exp $
+ */
+
+
+#ifndef _SBPCMCIA_H
+#define _SBPCMCIA_H
+
+
+
+
+#define PCMCIA_FCR (0x700 / 2)
+
+#define FCR0_OFF 0
+#define FCR1_OFF (0x40 / 2)
+#define FCR2_OFF (0x80 / 2)
+#define FCR3_OFF (0xc0 / 2)
+
+#define PCMCIA_FCR0 (0x700 / 2)
+#define PCMCIA_FCR1 (0x740 / 2)
+#define PCMCIA_FCR2 (0x780 / 2)
+#define PCMCIA_FCR3 (0x7c0 / 2)
+
+
+
+#define PCMCIA_COR 0
+
+#define COR_RST 0x80
+#define COR_LEV 0x40
+#define COR_IRQEN 0x04
+#define COR_BLREN 0x01
+#define COR_FUNEN 0x01
+
+
+#define PCICIA_FCSR (2 / 2)
+#define PCICIA_PRR (4 / 2)
+#define PCICIA_SCR (6 / 2)
+#define PCICIA_ESR (8 / 2)
+
+
+#define PCM_MEMOFF 0x0000
+#define F0_MEMOFF 0x1000
+#define F1_MEMOFF 0x2000
+#define F2_MEMOFF 0x3000
+#define F3_MEMOFF 0x4000
+
+
+#define MEM_ADDR0 (0x728 / 2)
+#define MEM_ADDR1 (0x72a / 2)
+#define MEM_ADDR2 (0x72c / 2)
+
+
+#define PCMCIA_ADDR0 (0x072e / 2)
+#define PCMCIA_ADDR1 (0x0730 / 2)
+#define PCMCIA_ADDR2 (0x0732 / 2)
+
+#define MEM_SEG (0x0734 / 2)
+#define SROM_CS (0x0736 / 2)
+#define SROM_DATAL (0x0738 / 2)
+#define SROM_DATAH (0x073a / 2)
+#define SROM_ADDRL (0x073c / 2)
+#define SROM_ADDRH (0x073e / 2)
+#define SROM_INFO2 (0x0772 / 2)
+#define SROM_INFO (0x07be / 2)
+
+
+#define SROM_IDLE 0
+#define SROM_WRITE 1
+#define SROM_READ 2
+#define SROM_WEN 4
+#define SROM_WDS 7
+#define SROM_DONE 8
+
+
+#define SRI_SZ_MASK 0x03
+#define SRI_BLANK 0x04
+#define SRI_OTP 0x80
+
+
+
+#define SBTML_INT_ACK 0x40000
+#define SBTML_INT_EN 0x20000
+
+
+#define SBTMH_INT_STATUS 0x40000
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h
new file mode 100644
index 000000000000..4280d5bf9c1f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h
@@ -0,0 +1,166 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdio.h,v 13.34 2009-03-11 20:27:16 Exp $
+ */
+
+#ifndef _SBSDIO_H
+#define _SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */
+#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */
+#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */
+
+#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE 0
+#define SBSDIO_SPROM_WRITE 1
+#define SBSDIO_SPROM_READ 2
+#define SBSDIO_SPROM_WEN 4
+#define SBSDIO_SPROM_WDS 7
+#define SBSDIO_SPROM_DONE 8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK 0x04 /* depreciated in corerev 6 */
+#define SROM_OTP 0x80 /* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu,
+ * 1: power on oscillator
+ * (for 4318 only)
+ */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device
+ * to wait before sending data to host
+ */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when
+ * receiving CMD53
+ */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is
+ * synchronous to the sdio clock
+ */
+#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host
+ * except the chipActive (rev 8)
+ */
+#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put
+ * external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9)
+ */
+#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */
+
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL 0x40
+#define SBSDIO_Rev8_ALP_AVAIL 0x80
+
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \
+ (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple,
+ * link bytes
+ */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from
+ * 8th byte
+ */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one
+ * data comamnd
+ */
+
+#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */
+
+#endif /* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
new file mode 100644
index 000000000000..107a8b07c9e6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
@@ -0,0 +1,293 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdpcmdev.h,v 13.38 2009-09-22 22:56:45 Exp $
+ */
+
+#ifndef _sbsdpcmdev_h_
+#define _sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+
+typedef volatile struct {
+ dma64regs_t xmt; /* dma tx */
+ uint32 PAD[2];
+ dma64regs_t rcv; /* dma rx */
+ uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+ dma64p_t dma64regs[2];
+ dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */
+ uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+ dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */
+ uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+ dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */
+ uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+ uint32 corecontrol; /* CoreControl, 0x000, rev8 */
+ uint32 corestatus; /* CoreStatus, 0x004, rev8 */
+ uint32 PAD[1];
+ uint32 biststatus; /* BistStatus, 0x00c, rev8 */
+
+ /* PCMCIA access */
+ uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */
+ uint16 PAD[1];
+
+ /* interrupt */
+ uint32 intstatus; /* IntStatus, 0x020, rev8 */
+ uint32 hostintmask; /* IntHostMask, 0x024, rev8 */
+ uint32 intmask; /* IntSbMask, 0x028, rev8 */
+ uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */
+ uint32 sbintmask; /* SBIntMask, 0x030, rev8 */
+ uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */
+ uint32 PAD[2];
+ uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */
+ uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */
+ uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */
+ uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */
+ uint32 PAD[3];
+
+ /* PCMCIA frame control */
+ uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */
+ uint8 PAD[3];
+ uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */
+ uint8 PAD[155];
+
+ /* interrupt batching control */
+ uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */
+ uint32 PAD[3];
+
+ /* counters */
+ uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+ uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+ uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+ uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+ uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+ uint32 PAD[40];
+ uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */
+ uint32 PAD[7];
+
+ /* DMA engines */
+ volatile union {
+ pcmdma32_t pcm32;
+ sdiodma32_t sdiod32;
+ sdiodma64_t sdiod64;
+ } dma;
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */
+ uint16 PAD[55];
+
+ /* PCMCIA backplane access */
+ uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */
+ uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */
+ uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */
+ uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */
+ uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */
+ uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */
+ uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */
+ uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */
+ uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */
+ uint16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */
+ uint32 PAD[464];
+
+ /* Sonics SiliconBackplane registers */
+ sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY (1 << 0) /* CIS Ready */
+#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */
+#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
+#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */
+#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
+#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
+#define I_PC (1 << 10) /* descriptor error */
+#define I_PD (1 << 11) /* data error */
+#define I_DE (1 << 12) /* Descriptor protocol Error */
+#define I_RU (1 << 13) /* Receive descriptor Underflow */
+#define I_RO (1 << 14) /* Receive fifo Overflow */
+#define I_XU (1 << 15) /* Transmit fifo Underflow */
+#define I_RI (1 << 16) /* Receive Interrupt */
+#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
+#define I_XI (1 << 24) /* Transmit Interrupt */
+#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
+#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
+#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
+#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */
+#define I_SRESET (1 << 30) /* CCCR RES interrupt */
+#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */
+#define I_DMA (I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR (1 << 8) /* Backplane SError (write) */
+#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */
+#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */
+#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */
+#define SDA_WRITE 0x01000000 /* Write bit */
+#define SDA_READ 0x00000000 /* Write bit cleared for Read */
+#define SDA_BUSY 0x80000000 /* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */
+#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */
+#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */
+#define SDA_DEVICECONTROL 0x009 /* DeviceControl */
+#define SDA_SBADDRLOW 0x00a /* SbAddrLow */
+#define SDA_SBADDRMID 0x00b /* SbAddrMid */
+#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */
+#define SDA_FRAMECTRL 0x00d /* FrameCtrl */
+#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */
+#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+
+/* intrcvlazy */
+#define IRL_TO_MASK 0x00ffffff /* timeout */
+#define IRL_FC_MASK 0xff000000 /* frame count */
+#define IRL_FC_SHIFT 24 /* frame count */
+
+/* rx header */
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC 0x0001 /* CRC error detected */
+#define RXF_WOOS 0x0002 /* write frame out of sync */
+#define RXF_WF_TERM 0x0004 /* write frame terminated */
+#define RXF_ABORT 0x0008 /* write frame aborted */
+#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */
+
+#endif /* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h
new file mode 100644
index 000000000000..1cba42238905
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h
@@ -0,0 +1,186 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsocram.h,v 13.15 2009-10-02 16:55:44 Exp $
+ */
+
+
+#ifndef _SBSOCRAM_H
+#define _SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+
+typedef volatile struct sbsocramregs {
+ uint32 coreinfo;
+ uint32 bwalloc;
+ uint32 extracoreinfo;
+ uint32 biststat;
+ uint32 bankidx;
+ uint32 standbyctrl;
+
+ uint32 errlogstatus;
+ uint32 errlogaddr;
+
+ uint32 cambankidx;
+ uint32 cambankstandbyctrl;
+ uint32 cambankpatchctrl;
+ uint32 cambankpatchtblbaseaddr;
+ uint32 cambankcmdreg;
+ uint32 cambankdatareg;
+ uint32 cambankmaskreg;
+ uint32 PAD[1];
+ uint32 bankinfo;
+ uint32 PAD[15];
+ uint32 extmemconfig;
+ uint32 extmemparitycsr;
+ uint32 extmemparityerrdata;
+ uint32 extmemparityerrcnt;
+ uint32 extmemwrctrlandsize;
+ uint32 PAD[84];
+ uint32 workaround;
+ uint32 pwrctl;
+ uint32 PAD[133];
+ uint32 sr_control;
+ uint32 sr_status;
+ uint32 sr_address;
+ uint32 sr_data;
+} sbsocramregs_t;
+
+#endif
+
+
+#define SR_COREINFO 0x00
+#define SR_BWALLOC 0x04
+#define SR_BISTSTAT 0x0c
+#define SR_BANKINDEX 0x10
+#define SR_BANKSTBYCTL 0x14
+#define SR_PWRCTL 0x1e8
+
+
+#define SRCI_PT_MASK 0x00070000
+#define SRCI_PT_SHIFT 16
+
+#define SRCI_PT_OCP_OCP 0
+#define SRCI_PT_AXI_OCP 1
+#define SRCI_PT_ARM7AHB_OCP 2
+#define SRCI_PT_CM3AHB_OCP 3
+#define SRCI_PT_AXI_AXI 4
+#define SRCI_PT_AHB_AXI 5
+
+#define SRCI_LSS_MASK 0x00f00000
+#define SRCI_LSS_SHIFT 20
+#define SRCI_LRS_MASK 0x0f000000
+#define SRCI_LRS_SHIFT 24
+
+
+#define SRCI_MS0_MASK 0xf
+#define SR_MS0_BASE 16
+
+
+#define SRCI_ROMNB_MASK 0xf000
+#define SRCI_ROMNB_SHIFT 12
+#define SRCI_ROMBSZ_MASK 0xf00
+#define SRCI_ROMBSZ_SHIFT 8
+#define SRCI_SRNB_MASK 0xf0
+#define SRCI_SRNB_SHIFT 4
+#define SRCI_SRBSZ_MASK 0xf
+#define SRCI_SRBSZ_SHIFT 0
+
+#define SR_BSZ_BASE 14
+
+
+#define SRSC_SBYOVR_MASK 0x80000000
+#define SRSC_SBYOVR_SHIFT 31
+#define SRSC_SBYOVRVAL_MASK 0x60000000
+#define SRSC_SBYOVRVAL_SHIFT 29
+#define SRSC_SBYEN_MASK 0x01000000
+#define SRSC_SBYEN_SHIFT 24
+
+
+#define SRPC_PMU_STBYDIS_MASK 0x00000010
+#define SRPC_PMU_STBYDIS_SHIFT 4
+#define SRPC_STBYOVRVAL_MASK 0x00000008
+#define SRPC_STBYOVRVAL_SHIFT 3
+#define SRPC_STBYOVR_MASK 0x00000007
+#define SRPC_STBYOVR_SHIFT 0
+
+
+#define SRECC_NUM_BANKS_MASK 0x000000F0
+#define SRECC_NUM_BANKS_SHIFT 4
+#define SRECC_BANKSIZE_MASK 0x0000000F
+#define SRECC_BANKSIZE_SHIFT 0
+
+#define SRECC_BANKSIZE(value) (1 << (value))
+
+
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS 0x0001FFFC
+#define SRP_VALID 0x8000
+
+
+#define SRCMD_WRITE 0x00020000
+#define SRCMD_READ 0x00010000
+#define SRCMD_DONE 0x80000000
+
+#define SRCMD_DONE_DLY 1000
+
+
+#define SOCRAM_BANKINFO_SZMASK 0x3f
+#define SOCRAM_BANKIDX_ROM_MASK 0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
+
+#define SOCRAM_MEMTYPE_RAM 0
+#define SOCRAM_MEMTYPE_R0M 1
+#define SOCRAM_MEMTYPE_DEVRAM 2
+
+#define SOCRAM_BANKINFO_REG 0x40
+#define SOCRAM_BANKIDX_REG 0x10
+#define SOCRAM_BANKINFO_STDBY_MASK 0x400
+#define SOCRAM_BANKINFO_STDBY_TIMER 0x800
+
+
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000
+
+
+#define SOCRAM_DEVRAMBANK_MASK 0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT 12
+
+
+#define SOCRAM_BANKINFO_SZBASE 8192
+#define SOCRAM_BANKSIZE_SHIFT 13
+
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h
new file mode 100644
index 000000000000..ca932266a1b2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdio.h
@@ -0,0 +1,611 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdio.h,v 13.27.14.1 2010-09-07 13:37:45 Exp $
+ */
+
+#ifndef _SDIO_H
+#define _SDIO_H
+
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+ uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */
+ uint8 sd_rev; /* RO, sd spec revision */
+ uint8 io_en; /* I/O enable */
+ uint8 io_rdy; /* I/O ready reg */
+ uint8 intr_ctl; /* Master and per function interrupt enable control */
+ uint8 intr_status; /* RO, interrupt pending status */
+ uint8 io_abort; /* read/write abort or reset all functions */
+ uint8 bus_inter; /* bus interface control */
+ uint8 capability; /* RO, card capability */
+
+ uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */
+ uint8 cis_base_mid;
+ uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */
+
+ /* suspend/resume registers */
+ uint8 bus_suspend; /* 0xC */
+ uint8 func_select; /* 0xD */
+ uint8 exec_flag; /* 0xE */
+ uint8 ready_flag; /* 0xF */
+
+ uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */
+
+ uint8 power_control; /* 0x12 (SDIO version 1.10) */
+
+ uint8 speed_control; /* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV 0x00
+#define SDIOD_CCCR_SDREV 0x01
+#define SDIOD_CCCR_IOEN 0x02
+#define SDIOD_CCCR_IORDY 0x03
+#define SDIOD_CCCR_INTEN 0x04
+#define SDIOD_CCCR_INTPEND 0x05
+#define SDIOD_CCCR_IOABORT 0x06
+#define SDIOD_CCCR_BICTRL 0x07
+#define SDIOD_CCCR_CAPABLITIES 0x08
+#define SDIOD_CCCR_CISPTR_0 0x09
+#define SDIOD_CCCR_CISPTR_1 0x0A
+#define SDIOD_CCCR_CISPTR_2 0x0B
+#define SDIOD_CCCR_BUSSUSP 0x0C
+#define SDIOD_CCCR_FUNCSEL 0x0D
+#define SDIOD_CCCR_EXECFLAGS 0x0E
+#define SDIOD_CCCR_RDYFLAGS 0x0F
+#define SDIOD_CCCR_BLKSIZE_0 0x10
+#define SDIOD_CCCR_BLKSIZE_1 0x11
+#define SDIOD_CCCR_POWER_CONTROL 0x12
+#define SDIOD_CCCR_SPEED_CONTROL 0x13
+#define SDIOD_CCCR_UHSI_SUPPORT 0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH 0x15
+#define SDIOD_CCCR_INTR_EXTN 0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_SEPINT 0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK 0x0f /* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */
+#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */
+#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */
+#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */
+#define SDIO_CAP_LSC 0x40 /* low speed card */
+#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS 0x08 /* support suspend/resume */
+#define SDIO_CAP_SRW 0x04 /* support read wait */
+#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */
+#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */
+#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S 1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S 0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S 0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S 4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S 0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S 1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+ uint8 devctr; /* device interface, CSA control */
+ uint8 ext_dev; /* extended standard I/O device type code */
+ uint8 pwr_sel; /* power selection support */
+ uint8 PAD[6]; /* reserved */
+
+ uint8 cis_low; /* CIS LSB */
+ uint8 cis_mid;
+ uint8 cis_high; /* CIS MSB */
+ uint8 csa_low; /* code storage area, LSB */
+ uint8 csa_mid;
+ uint8 csa_high; /* code storage area, MSB */
+ uint8 csa_dat_win; /* data access window to function */
+
+ uint8 fnx_blk_size[2]; /* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_IOFUNCS 7
+
+/* SDIO Device FBR Start Address */
+#define SDIOD_FBR_STARTADDR 0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE 0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n) ((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */
+#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0 0x09
+#define SDIOD_FBR_CISPTR_1 0x0A
+#define SDIOD_FBR_CISPTR_2 0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0 0x0C
+#define SDIOD_FBR_CSA_ADDR_1 0x0D
+#define SDIOD_FBR_CSA_ADDR_2 0x0E
+#define SDIOD_FBR_CSA_DATA 0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0 0x10
+#define SDIOD_FBR_BLKSIZE_1 0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */
+#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART 1
+#define SDIOD_DIC_BLUETOOTH_A 2
+#define SDIOD_DIC_BLUETOOTH_B 3
+#define SDIOD_DIC_GPS 4
+#define SDIOD_DIC_CAMERA 5
+#define SDIOD_DIC_PHS 6
+#define SDIOD_DIC_WLAN 7
+#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */
+#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0 0
+#define SDIO_FUNC_1 1
+#define SDIO_FUNC_2 2
+#define SDIO_FUNC_3 3
+#define SDIO_FUNC_4 4
+#define SDIO_FUNC_5 5
+#define SDIO_FUNC_6 6
+#define SDIO_FUNC_7 7
+
+#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */
+#define SD_CARD_TYPE_IO 1 /* IO only card */
+#define SD_CARD_TYPE_MEMORY 2 /* memory only card */
+#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE 31
+#define CARDREG_STATUS_BIT_COMCRCERROR 23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22
+#define CARDREG_STATUS_BIT_ERROR 19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4
+
+
+
+#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND 1
+#define SD_CMD_MMC_SET_RCA 3
+#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD 7
+#define SD_CMD_SEND_CSD 9
+#define SD_CMD_SEND_CID 10
+#define SD_CMD_STOP_TRANSMISSION 12
+#define SD_CMD_SEND_STATUS 13
+#define SD_CMD_GO_INACTIVE_STATE 15
+#define SD_CMD_SET_BLOCKLEN 16
+#define SD_CMD_READ_SINGLE_BLOCK 17
+#define SD_CMD_READ_MULTIPLE_BLOCK 18
+#define SD_CMD_WRITE_BLOCK 24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK 25
+#define SD_CMD_PROGRAM_CSD 27
+#define SD_CMD_SET_WRITE_PROT 28
+#define SD_CMD_CLR_WRITE_PROT 29
+#define SD_CMD_SEND_WRITE_PROT 30
+#define SD_CMD_ERASE_WR_BLK_START 32
+#define SD_CMD_ERASE_WR_BLK_END 33
+#define SD_CMD_ERASE 38
+#define SD_CMD_LOCK_UNLOCK 42
+#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */
+#define SD_CMD_APP_CMD 55
+#define SD_CMD_GEN_CMD 56
+#define SD_CMD_READ_OCR 58
+#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS 13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS 22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23
+#define SD_ACMD_SD_SEND_OP_COND 41
+#define SD_ACMD_SET_CLR_CARD_DETECT 42
+#define SD_ACMD_SEND_SCR 51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ 0 /* Read_Write: Read */
+#define SD_IO_OP_WRITE 1 /* Read_Write: Write */
+#define SD_IO_RW_NORMAL 0 /* no RAW */
+#define SD_IO_RW_RAW 1 /* RAW */
+#define SD_IO_BYTE_MODE 0 /* Byte Mode */
+#define SD_IO_BLOCK_MODE 1 /* BlockMode */
+#define SD_IO_FIXED_ADDRESS 0 /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+ (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+ (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE 0
+#define SD_RSP_NO_1 1
+#define SD_RSP_NO_2 2
+#define SD_RSP_NO_3 3
+#define SD_RSP_NO_4 4
+#define SD_RSP_NO_5 5
+#define SD_RSP_NO_6 6
+
+ /* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR 0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000
+#define SD_RSP_MR6_ERROR 0x2000
+
+ /* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT 0x80
+#define SD_RSP_MR1_PARAMETER_ERROR 0x40
+#define SD_RSP_MR1_RFU5 0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10
+#define SD_RSP_MR1_COM_CRC_ERROR 0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04
+#define SD_RSP_MR1_RFU1 0x02
+#define SD_RSP_MR1_IDLE_STATE 0x01
+
+ /* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR 0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND 0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1 0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0 0x10
+#define SD_RSP_R5_ERROR 0x08
+#define SD_RSP_R5_RFU 0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR 0x02
+#define SD_RSP_R5_OUT_OF_RANGE 0x01
+
+#define SD_RSP_R5_ERRBITS 0xCB
+
+
+/* ------------------------------------------------
+ * SDIO Commands and responses
+ *
+ * I/O only commands are:
+ * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0 0
+#define SDIOH_CMD_3 3
+#define SDIOH_CMD_5 5
+#define SDIOH_CMD_7 7
+#define SDIOH_CMD_11 11
+#define SDIOH_CMD_14 14
+#define SDIOH_CMD_15 15
+#define SDIOH_CMD_19 19
+#define SDIOH_CMD_52 52
+#define SDIOH_CMD_53 53
+#define SDIOH_CMD_59 59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE 0
+#define SDIOH_RSP_R1 1
+#define SDIOH_RSP_R2 2
+#define SDIOH_RSP_R3 3
+#define SDIOH_RSP_R4 4
+#define SDIOH_RSP_R5 5
+#define SDIOH_RSP_R6 6
+
+/*
+ * SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS 0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M BITFIELD_MASK(24)
+#define CMD5_OCR_S 0
+
+#define CMD5_S18R_M BITFIELD_MASK(1)
+#define CMD5_S18R_S 24
+
+#define CMD7_RCA_M BITFIELD_MASK(16)
+#define CMD7_RCA_S 16
+#define CMD14_RCA_M BITFIELD_MASK(16)
+#define CMD14_RCA_S 16
+#define CMD14_SLEEP_M BITFIELD_MASK(1)
+#define CMD14_SLEEP_S 15
+
+#define CMD_15_RCA_M BITFIELD_MASK(16)
+#define CMD_15_RCA_S 16
+
+#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52
+ */
+#define CMD52_DATA_S 0
+#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD52_REG_ADDR_S 9
+#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */
+#define CMD52_RAW_S 27
+#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD52_FUNCTION_S 28
+#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD52_RW_FLAG_S 31
+
+
+#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S 0
+#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD53_REG_ADDR_S 9
+#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */
+#define CMD53_OP_CODE_S 26
+#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */
+#define CMD53_BLK_MODE_S 27
+#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD53_FUNCTION_S 28
+#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD53_RW_FLAG_S 31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ * -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S 0
+
+#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S 24
+
+#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S 24
+#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */
+#define RSP4_MEM_PRESENT_S 27
+#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S 28
+#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */
+#define RSP4_CARD_READY_S 31
+
+#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0]
+ */
+#define RSP6_STATUS_S 0
+#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S 16
+
+#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S 3
+#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP1_APP_CMD_S 5
+#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S 8
+#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card
+ * when Cmd was received
+ */
+#define RSP1_CURR_STATE_S 9
+#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */
+#define RSP1_EARSE_RESET_S 13
+#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S 14
+#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S 15
+#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits
+ * of CSD
+ */
+#define RSP1_CID_CSD_OVERW_S 16
+#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */
+#define RSP1_ERROR_S 19
+#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */
+#define RSP1_CC_ERROR_S 20
+#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed
+ * to correct data
+ */
+#define RSP1_CARD_ECC_FAILED_S 21
+#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S 22
+#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed
+ */
+#define RSP1_COM_CRC_ERROR_S 23
+#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S 24
+#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */
+#define RSP1_CARD_LOCKED_S 25
+#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program
+ * write-protected blocks
+ */
+#define RSP1_WP_VIOLATION_S 26
+#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S 27
+#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S 28
+#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */
+#define RSP1_BLK_LEN_ERR_S 29
+#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */
+#define RSP1_ADDR_ERR_S 30
+#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S 31
+
+
+#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */
+#define RSP5_DATA_S 0
+#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */
+#define RSP5_FLAGS_S 8
+#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S 16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S 0
+#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */
+#define SPIRSP4_STUFF_S 16
+#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */
+#define SPIRSP4_MEM_PRESENT_S 19
+#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S 20
+#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */
+#define SPIRSP4_CARD_READY_S 23
+#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */
+#define SPIRSP4_IDLE_STATE_S 24
+#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S 26
+#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S 27
+#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP4_FUNC_NUM_ERROR_S 28
+#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S 30
+#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP4_START_BIT_S 31
+
+#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */
+#define SPIRSP5_DATA_S 16
+#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */
+#define SPIRSP5_IDLE_STATE_S 24
+#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S 26
+#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S 27
+#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP5_FUNC_NUM_ERROR_S 28
+#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S 30
+#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP5_START_BIT_S 31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error
+ */
+#define RSP6STAT_AKE_SEQ_ERROR_S 3
+#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP6STAT_APP_CMD_S 5
+#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data
+ * (buff empty)
+ */
+#define RSP6STAT_READY_FOR_DATA_S 8
+#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at
+ * Cmd reception
+ */
+#define RSP6STAT_CURR_STATE_S 9
+#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19
+ */
+#define RSP6STAT_ERROR_S 13
+#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for
+ * card state Bit 22
+ */
+#define RSP6STAT_ILLEGAL_CMD_S 14
+#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command
+ * failed Bit 23
+ */
+#define RSP6STAT_COM_CRC_ERROR_S 15
+
+#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT 0
+#define CMD_OPTION_TUNING 1
+
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h
new file mode 100644
index 000000000000..3d37c7a7e30b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdioh.h
@@ -0,0 +1,412 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdioh.h,v 13.17.2.3 2011-01-08 05:28:21 Exp $
+ */
+
+#ifndef _SDIOH_H
+#define _SDIOH_H
+
+#define SD_SysAddr 0x000
+#define SD_BlockSize 0x004
+#define SD_BlockCount 0x006
+#define SD_Arg0 0x008
+#define SD_Arg1 0x00A
+#define SD_TransferMode 0x00C
+#define SD_Command 0x00E
+#define SD_Response0 0x010
+#define SD_Response1 0x012
+#define SD_Response2 0x014
+#define SD_Response3 0x016
+#define SD_Response4 0x018
+#define SD_Response5 0x01A
+#define SD_Response6 0x01C
+#define SD_Response7 0x01E
+#define SD_BufferDataPort0 0x020
+#define SD_BufferDataPort1 0x022
+#define SD_PresentState 0x024
+#define SD_HostCntrl 0x028
+#define SD_PwrCntrl 0x029
+#define SD_BlockGapCntrl 0x02A
+#define SD_WakeupCntrl 0x02B
+#define SD_ClockCntrl 0x02C
+#define SD_TimeoutCntrl 0x02E
+#define SD_SoftwareReset 0x02F
+#define SD_IntrStatus 0x030
+#define SD_ErrorIntrStatus 0x032
+#define SD_IntrStatusEnable 0x034
+#define SD_ErrorIntrStatusEnable 0x036
+#define SD_IntrSignalEnable 0x038
+#define SD_ErrorIntrSignalEnable 0x03A
+#define SD_CMD12ErrorStatus 0x03C
+#define SD_Capabilities 0x040
+#define SD_Capabilities3 0x044
+#define SD_MaxCurCap 0x048
+#define SD_MaxCurCap_Reserved 0x04C
+#define SD_ADMA_ErrStatus 0x054
+#define SD_ADMA_SysAddr 0x58
+#define SD_SlotInterruptStatus 0x0FC
+#define SD_HostControllerVersion 0x0FE
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo 0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2 0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart 0x060
+#define SD3_PresetValCount 8
+/* preset-indiv regs */
+#define SD3_PresetVal_init 0x060
+#define SD3_PresetVal_default 0x062
+#define SD3_PresetVal_HS 0x064
+#define SD3_PresetVal_SDR12 0x066
+#define SD3_PresetVal_SDR25 0x068
+#define SD3_PresetVal_SDR50 0x06a
+#define SD3_PresetVal_SDR104 0x06c
+#define SD3_PresetVal_DDR50 0x06e
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX 0
+#define SD3_PRESETVAL_DESPEED_IX 1
+#define SD3_PRESETVAL_HISPEED_IX 2
+#define SD3_PRESETVAL_SDR12_IX 3
+#define SD3_PRESETVAL_SDR25_IX 4
+#define SD3_PRESETVAL_SDR50_IX 5
+#define SD3_PRESETVAL_SDR104_IX 6
+#define SD3_PRESETVAL_DDR50_IX 7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 0
+#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+ bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M BITFIELD_MASK(8)
+#define CAP_BASECLK_S 8
+#define CAP_MAXBLOCK_M BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S 16
+#define CAP_ADMA2_M BITFIELD_MASK(1)
+#define CAP_ADMA2_S 19
+#define CAP_ADMA1_M BITFIELD_MASK(1)
+#define CAP_ADMA1_S 20
+#define CAP_HIGHSPEED_M BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S 21
+#define CAP_DMA_M BITFIELD_MASK(1)
+#define CAP_DMA_S 22
+#define CAP_SUSPEND_M BITFIELD_MASK(1)
+#define CAP_SUSPEND_S 23
+#define CAP_VOLT_3_3_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S 24
+#define CAP_VOLT_3_0_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S 25
+#define CAP_VOLT_1_8_M BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S 26
+#define CAP_64BIT_HOST_M BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S 28
+
+#define SDIO_OCR_READ_FAIL (2)
+
+
+#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S 29
+
+#define CAP_SLOTTYPE_M BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S 30
+
+#define CAP3_MSBits_OFFSET (32)
+/* note: following are caps MSB32 bits.
+ So the bits start from 0, instead of 32. that is why
+ CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_CLK_MULT_M BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S 14
+
+#define PRESET_CLK_DIV_M BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S 0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S 0
+#define CAP_CURR_3_0_M BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S 8
+#define CAP_CURR_1_8_M BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S 16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S 0
+#define BLKSZ_BNDRY_M BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S 12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S 0
+#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S 1
+#define XFER_CMD_12_EN_M BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 2
+#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S 4
+#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S 5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 0
+#define RESP_TYPE_136 1
+#define RESP_TYPE_48 2
+#define RESP_TYPE_48_BUSY 3
+/* type field */
+#define CMD_TYPE_NORMAL 0
+#define CMD_TYPE_SUSPEND 1
+#define CMD_TYPE_RESUME 2
+#define CMD_TYPE_ABORT 3
+
+#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */
+#define CMD_RESP_TYPE_S 0
+#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */
+#define CMD_CRC_EN_S 3
+#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */
+#define CMD_INDEX_EN_S 4
+#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */
+#define CMD_DATA_EN_S 5
+#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc
+ */
+#define CMD_TYPE_S 6
+#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */
+#define CMD_INDEX_S 8
+
+/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */
+/* SD_PresentState : Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */
+#define PRES_CMD_INHIBIT_S 0
+#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */
+#define PRES_DAT_INHIBIT_S 1
+#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */
+#define PRES_DAT_BUSY_S 2
+#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */
+#define PRES_PRESENT_RSVD_S 3
+#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */
+#define PRES_WRITE_ACTIVE_S 8
+#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */
+#define PRES_READ_ACTIVE_S 9
+#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S 10
+#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */
+#define PRES_READ_DATA_RDY_S 11
+#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */
+#define PRES_CARD_PRESENT_S 16
+#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */
+#define PRES_CARD_STABLE_S 17
+#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */
+#define PRES_CARD_PRESENT_RAW_S 18
+#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */
+#define PRES_WRITE_ENABLED_S 19
+#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */
+#define PRES_DAT_SIGNAL_S 20
+#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */
+#define PRES_CMD_SIGNAL_S 24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */
+#define HOST_LED_S 0
+#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */
+#define HOST_DATA_WIDTH_S 1
+#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */
+#define HOST_DMA_SEL_S 3
+#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */
+#define HOST_HI_SPEED_EN_S 2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */
+
+#define HOST_CONTR_VER_2 (1)
+#define HOST_CONTR_VER_3 (2)
+
+/* misc defines */
+#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */
+#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */
+#define PWR_BUS_EN_S 0
+#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */
+#define PWR_VOLTS_S 1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */
+#define SW_RESET_ALL_S 0
+#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */
+#define SW_RESET_CMD_S 1
+#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */
+#define SW_RESET_DAT_S 2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S 0
+#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S 1
+#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S 2
+#define INTSTAT_DMA_INT_M BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S 3
+#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S 4
+#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S 5
+#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S 6
+#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S 7
+#define INTSTAT_CARD_INT_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S 8
+#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */
+#define INTSTAT_RETUNING_INT_S 12
+#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */
+#define INTSTAT_ERROR_INT_S 15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S 0
+#define ERRINT_CMD_CRC_M BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S 1
+#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S 2
+#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S 3
+#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S 4
+#define ERRINT_DATA_CRC_M BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S 5
+#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S 6
+#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S 7
+#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S 8
+#define ERRINT_VENDOR_M BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S 12
+#define ERRINT_ADMA_M BITFIELD_MASK(1)
+#define ERRINT_ADMA_S 9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT 0x0001
+#define ERRINT_CMD_CRC_BIT 0x0002
+#define ERRINT_CMD_ENDBIT_BIT 0x0004
+#define ERRINT_CMD_INDEX_BIT 0x0008
+#define ERRINT_DATA_TIMEOUT_BIT 0x0010
+#define ERRINT_DATA_CRC_BIT 0x0020
+#define ERRINT_DATA_ENDBIT_BIT 0x0040
+#define ERRINT_CURRENT_LIMIT_BIT 0x0080
+#define ERRINT_AUTO_CMD12_BIT 0x0100
+#define ERRINT_ADMA_BIT 0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+ ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+ ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl : Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */
+/* SD_IntrStatus : Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */
+/* SD_Capabilities : Offset 0x040 , size = bytes */
+/* SD_MaxCurCap : Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h
new file mode 100644
index 000000000000..2c5bcf97e910
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h
@@ -0,0 +1,58 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdiovar.h,v 13.9 2009-12-08 22:30:15 Exp $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+ int func;
+ int offset;
+ int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL 0x0001 /* Error */
+#define SDH_TRACE_VAL 0x0002 /* Trace */
+#define SDH_INFO_VAL 0x0004 /* Info */
+#define SDH_DEBUG_VAL 0x0008 /* Debug */
+#define SDH_DATA_VAL 0x0010 /* Data */
+#define SDH_CTRL_VAL 0x0020 /* Control Regs */
+#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */
+#define SDH_DMA_VAL 0x0080 /* DMA */
+
+#define NUM_PREV_TRANSACTIONS 16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h
new file mode 100644
index 000000000000..c5a33832b585
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/siutils.h
@@ -0,0 +1,247 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.h,v 13.251.2.10 2011-02-04 05:06:32 Exp $
+ */
+
+
+#ifndef _siutils_h_
+#define _siutils_h_
+
+
+struct si_pub {
+ uint socitype;
+
+ uint bustype;
+ uint buscoretype;
+ uint buscorerev;
+ uint buscoreidx;
+ int ccrev;
+ uint32 cccaps;
+ uint32 cccaps_ext;
+ int pmurev;
+ uint32 pmucaps;
+ uint boardtype;
+ uint boardvendor;
+ uint boardflags;
+ uint boardflags2;
+ uint chip;
+ uint chiprev;
+ uint chippkg;
+ uint32 chipst;
+ bool issim;
+ uint socirev;
+ bool pci_pr32414;
+
+};
+
+
+typedef const struct si_pub si_t;
+
+
+#define SI_OSH NULL
+
+#define BADIDX (SI_MAXCORES + 1)
+
+
+#define XTAL 0x1
+#define PLL 0x2
+
+
+#define CLK_FAST 0
+#define CLK_DYNAMIC 2
+
+
+#define GPIO_DRV_PRIORITY 0
+#define GPIO_APP_PRIORITY 1
+#define GPIO_HI_PRIORITY 2
+
+
+#define GPIO_PULLUP 0
+#define GPIO_PULLDN 1
+
+
+#define GPIO_REGEVT 0
+#define GPIO_REGEVT_INTMSK 1
+#define GPIO_REGEVT_INTPOL 2
+
+
+#define SI_DEVPATH_BUFSZ 16
+
+
+#define SI_DOATTACH 1
+#define SI_PCIDOWN 2
+#define SI_PCIUP 3
+
+#define ISSIM_ENAB(sih) 0
+
+
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih) (BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih) (0)
+#define CCPLL_ENAB(sih) (0)
+#else
+#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+
+
+
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void *si_coreregs(si_t *sih);
+extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern bool si_read_pmu_autopll(si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih);
+extern uint32 si_ilp_clock(si_t *sih);
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socdevram_size(si_t *sih);
+extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect);
+extern bool si_socdevram_pkg(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+
+
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+
+extern bool si_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmestat(si_t *sih);
+extern void si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+extern void si_sdio_init(si_t *sih);
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+ uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci(sih) 0
+#define si_eci_init(sih) (0)
+#define si_eci_notify_bt(sih, type, val) (0)
+
+
+
+extern int si_devpath(si_t *sih, char *path, int size);
+
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+extern uint si_pll_reset(si_t *sih);
+
+
+
+extern bool si_taclear(si_t *sih, bool details);
+
+
+
+extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
+
+char *si_getnvramflvar(si_t *sih, const char *name);
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h
new file mode 100644
index 000000000000..397006ab005a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h
@@ -0,0 +1,52 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: trxhdr.h,v 13.15.108.2 2010-11-15 17:57:30 Exp $
+ */
+
+#ifndef _TRX_HDR_H_
+#define _TRX_HDR_H_
+
+#include <typedefs.h>
+
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_VERSION 1 /* Version 1 */
+#define TRX_MAX_LEN 0x3B0000 /* Max length */
+#define TRX_NO_HEADER 1 /* Do not write TRX header */
+#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_OVERLAYS 0x4 /* Contains an overlay header after the trx header */
+#define TRX_MAX_OFFSET 3 /* Max number of individual files */
+#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */
+
+struct trx_header {
+ uint32 magic; /* "HDR0" */
+ uint32 len; /* Length of file including header */
+ uint32 crc32; /* 32-bit CRC from flag_version to end of file */
+ uint32 flag_version; /* 0:15 flags, 16:31 version */
+ uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
+};
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h
new file mode 100644
index 000000000000..228b5dcf11c7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/typedefs.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: typedefs.h,v 1.103.2.1 2010-05-11 18:19:28 Exp $
+ */
+
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+
+
+#include "site_typedefs.h"
+
+#else
+
+
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE false
+#endif
+#ifndef TRUE
+#define TRUE true
+#endif
+
+#else
+
+
+#endif
+
+#if defined(__x86_64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+
+
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif
+
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif
+#endif
+#endif
+
+
+
+
+
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif
+
+
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif
+
+#if !defined(__DJGPP__)
+
+
+#if defined(__KERNEL__)
+
+
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#include <linux/types.h>
+#endif
+
+#else
+
+
+#include <sys/types.h>
+
+#endif
+
+#endif
+
+
+
+
+#define USE_TYPEDEF_DEFAULTS
+
+#endif
+
+
+
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef unsigned char bool;
+#endif
+
+
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long ulong;
+#endif
+
+
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT32
+typedef float float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double float64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else
+typedef float64 float_t;
+#endif
+
+#endif
+
+
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef OFF
+#define OFF 0
+#endif
+
+#ifndef ON
+#define ON 1
+#endif
+
+#define AUTO (-1)
+
+
+
+#ifndef PTRSZ
+#define PTRSZ sizeof(char*)
+#endif
+
+
+
+#if defined(__GNUC__)
+ #define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+ #define BWL_COMPILER_ARMCC
+#else
+ #error "Unknown compiler!"
+#endif
+
+
+#ifndef INLINE
+ #if defined(BWL_COMPILER_MICROSOFT)
+ #define INLINE __inline
+ #elif defined(BWL_COMPILER_GNU)
+ #define INLINE __inline__
+ #elif defined(BWL_COMPILER_ARMCC)
+ #define INLINE __inline
+ #else
+ #define INLINE
+ #endif
+#endif
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif
+
+
+#define UNUSED_PARAMETER(x) (void)(x)
+
+
+#include <bcmdefs.h>
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
new file mode 100644
index 000000000000..7230d3b67ab0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
@@ -0,0 +1,198 @@
+/*
+* Copyright (C) 1999-2011, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: wlfc_proto.h,v 1.1.6.2 2010-05-08 01:30:41 Exp $
+*
+*/
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+ /* Use TLV to convey WLFC information.
+ ---------------------------------------------------------------------------
+ | Type | Len | value | Description
+ ---------------------------------------------------------------------------
+ | 1 | 1 | (handle) | MAC OPEN
+ ---------------------------------------------------------------------------
+ | 2 | 1 | (handle) | MAC CLOSE
+ ---------------------------------------------------------------------------
+ | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+ ---------------------------------------------------------------------------
+ | 4 | 4 | see pkttag comments | TXSTATUS
+ ---------------------------------------------------------------------------
+ | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware]
+ ---------------------------------------------------------------------------
+ | 6 | 8 | (handle, ifid, MAC) | MAC ADD
+ ---------------------------------------------------------------------------
+ | 7 | 8 | (handle, ifid, MAC) | MAC DEL
+ ---------------------------------------------------------------------------
+ | 8 | 1 | (rssi) | RSSI - RSSI value for the packet.
+ ---------------------------------------------------------------------------
+ | 9 | 1 | (interface ID) | Interface OPEN
+ ---------------------------------------------------------------------------
+ | 10 | 1 | (interface ID) | Interface CLOSE
+ ---------------------------------------------------------------------------
+ | 11 | 8 | fifo credit returns map | FIFO credits back to the host
+ | | | |
+ | | | | --------------------------------------
+ | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+ | | | | --------------------------------------
+ | | | |
+ ---------------------------------------------------------------------------
+ | 12 | 2 | MAC handle, | Host provides a bitmap of pending
+ | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn.
+ | | | | [host->firmware]
+ ---------------------------------------------------------------------------
+ | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific
+ | | | | MAC destination.
+ ---------------------------------------------------------------------------
+ | 255 | N/A | N/A | FILLER - This is a special type
+ | | | | that has no length or value.
+ | | | | Typically used for padding.
+ ---------------------------------------------------------------------------
+ */
+
+#define WLFC_CTL_TYPE_MAC_OPEN 1
+#define WLFC_CTL_TYPE_MAC_CLOSE 2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3
+#define WLFC_CTL_TYPE_TXSTATUS 4
+#define WLFC_CTL_TYPE_PKTTAG 5
+
+#define WLFC_CTL_TYPE_MACDESC_ADD 6
+#define WLFC_CTL_TYPE_MACDESC_DEL 7
+#define WLFC_CTL_TYPE_RSSI 8
+
+#define WLFC_CTL_TYPE_INTERFACE_OPEN 9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE 10
+
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK 11
+
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13
+
+#define WLFC_CTL_TYPE_FILLER 255
+
+#define WLFC_CTL_VALUE_LEN_MACDESC 8 /* handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC 1 /* MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI 1
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE 1
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS 4
+#define WLFC_CTL_VALUE_LEN_PKTTAG 4
+
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
+
+
+
+#define WLFC_PKTID_GEN_MASK 0x80000000
+#define WLFC_PKTID_GEN_SHIFT 31
+
+#define WLFC_PKTID_GEN(x) (((x) & WLFC_PKTID_GEN_MASK) >> WLFC_PKTID_GEN_SHIFT)
+#define WLFC_PKTID_SETGEN(x, gen) (x) = ((x) & ~WLFC_PKTID_GEN_MASK) | \
+ (((gen) << WLFC_PKTID_GEN_SHIFT) & WLFC_PKTID_GEN_MASK)
+
+#define WLFC_PKTFLAG_PKTFROMHOST 0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED 0x02
+
+#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT 27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+ (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+ WL_TXSTATUS_FLAGS_MASK)
+
+#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT 24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+ (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \
+ ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK)
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE 32
+#define WLFC_MAX_IFNUM 16
+#define WLFC_MAC_DESC_ID_INVALID 0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \
+ (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \
+ ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WL_TXSTATUS_GENERATION_MASK 1
+#define WL_TXSTATUS_GENERATION_SHIFT 31
+
+#define WLFC_PKTFLAG_SET_GENERATION(x, gen) ((x) = \
+ ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+ (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WLFC_PKTFLAG_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+ WL_TXSTATUS_GENERATION_MASK)
+
+#define WLFC_MAX_PENDING_DATALEN 120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD 0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1
+/* WL firmware suppressed a packet because MAC is
+ already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3
+
+#define WLFC_D11_STATUS_INTERPRET(txs) ((((txs)->status & TX_STATUS_SUPR_MASK) >> \
+ TX_STATUS_SUPR_SHIFT)) ? WLFC_CTL_PKTFLAG_D11SUPPRESS : WLFC_CTL_PKTFLAG_DISCARD
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+ {printf("WLFC: %s():%d:caller:%p\n", \
+ __FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+ banner, ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]); } while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
+#define WLFC_WHEREIS(s)
+#endif
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h
new file mode 100644
index 000000000000..9357552c9194
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h
@@ -0,0 +1,2757 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl.h,v 1.767.2.38 2011-02-01 23:04:28 Exp $
+ */
+
+
+#ifndef _wlioctl_h_
+#define _wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <bcmwifi.h>
+
+#include <bcmcdc.h>
+
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ 16
+#endif
+
+
+typedef struct remote_ioctl {
+ cdc_ioctl_t msg;
+ uint data_len;
+ char intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE sizeof(rem_ioctl_t)
+
+#define ACTION_FRAME_SIZE 1040
+
+typedef struct wl_action_frame {
+ struct ether_addr da;
+ uint16 len;
+ uint32 packetId;
+ uint8 data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+ uint8 ssid_len;
+ uint8 ssid[32];
+} ssid_info_t;
+
+typedef struct wl_af_params {
+ uint32 channel;
+ int32 dwell_time;
+ struct ether_addr BSSID;
+ wl_action_frame_t action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+
+
+
+#define LEGACY2_WL_BSS_INFO_VERSION 108
+
+
+typedef struct wl_bss_info_108 {
+ uint32 version;
+ uint32 length;
+ struct ether_addr BSSID;
+ uint16 beacon_period;
+ uint16 capability;
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count;
+ uint8 rates[16];
+ } rateset;
+ chanspec_t chanspec;
+ uint16 atim_window;
+ uint8 dtim_period;
+ int16 RSSI;
+ int8 phy_noise;
+
+ uint8 n_cap;
+ uint32 nbss_cap;
+ uint8 ctl_ch;
+ uint32 reserved32[1];
+ uint8 flags;
+ uint8 reserved[3];
+ uint8 basic_mcs[MCSSET_LEN];
+
+ uint16 ie_offset;
+ uint32 ie_length;
+
+
+} wl_bss_info_108_t;
+
+#define WL_BSS_INFO_VERSION 109
+
+
+typedef struct wl_bss_info {
+ uint32 version;
+ uint32 length;
+ struct ether_addr BSSID;
+ uint16 beacon_period;
+ uint16 capability;
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count;
+ uint8 rates[16];
+ } rateset;
+ chanspec_t chanspec;
+ uint16 atim_window;
+ uint8 dtim_period;
+ int16 RSSI;
+ int8 phy_noise;
+
+ uint8 n_cap;
+ uint32 nbss_cap;
+ uint8 ctl_ch;
+ uint32 reserved32[1];
+ uint8 flags;
+ uint8 reserved[3];
+ uint8 basic_mcs[MCSSET_LEN];
+
+ uint16 ie_offset;
+ uint32 ie_length;
+ int16 SNR;
+
+
+} wl_bss_info_t;
+
+typedef struct wl_bsscfg {
+ uint32 wsec;
+ uint32 WPA_auth;
+ uint32 wsec_index;
+ uint32 associated;
+ uint32 BSS;
+ uint32 phytest_on;
+ struct ether_addr prev_BSSID;
+ struct ether_addr BSSID;
+} wl_bsscfg_t;
+
+typedef struct wl_bss_config {
+ uint32 atim_window;
+ uint32 beacon_period;
+ uint32 chanspec;
+} wl_bss_config_t;
+
+
+typedef struct wlc_ssid {
+ uint32 SSID_len;
+ uchar SSID[32];
+} wlc_ssid_t;
+
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY 2
+
+
+#define WL_SCANFLAGS_PASSIVE 0x01
+#define WL_SCANFLAGS_RESERVED 0x02
+#define WL_SCANFLAGS_PROHIBITED 0x04
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+typedef struct wl_scan_params {
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ int8 bss_type;
+ uint8 scan_type;
+ int32 nprobes;
+ int32 active_time;
+ int32 passive_time;
+ int32 home_time;
+ int32 channel_num;
+ uint16 channel_list[1];
+} wl_scan_params_t;
+
+
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+
+
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START 1
+#define WL_SCAN_ACTION_CONTINUE 2
+#define WL_SCAN_ACTION_ABORT 3
+
+#define ISCAN_REQ_VERSION 1
+
+
+typedef struct wl_iscan_params {
+ uint32 version;
+ uint16 action;
+ uint16 scan_duration;
+ wl_scan_params_t params;
+} wl_iscan_params_t;
+
+
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+
+
+#define WL_SCAN_RESULTS_SUCCESS 0
+#define WL_SCAN_RESULTS_PARTIAL 1
+#define WL_SCAN_RESULTS_PENDING 2
+#define WL_SCAN_RESULTS_ABORTED 3
+#define WL_SCAN_RESULTS_NO_MEM 4
+
+
+#define DNGL_RXCTXT_SIZE 45
+
+#if defined(SIMPLE_ISCAN)
+#define ISCAN_RETRY_CNT 5
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+#define ISCAN_STATE_PENDING 2
+
+
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ char iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+#endif
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+ uint32 buflen;
+ uint32 version;
+ uint16 sync_id;
+ uint16 bss_count;
+ wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+
+typedef struct wl_iscan_results {
+ uint32 status;
+ wl_scan_results_t results;
+} wl_iscan_results_t;
+
+
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+ (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+typedef struct wl_probe_params {
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ struct ether_addr mac;
+} wl_probe_params_t;
+
+#define WL_NUMRATES 16
+typedef struct wl_rateset {
+ uint32 count;
+ uint8 rates[WL_NUMRATES];
+} wl_rateset_t;
+
+typedef struct wl_rateset_args {
+ uint32 count;
+ uint8 rates[WL_NUMRATES];
+ uint8 mcs[MCSSET_LEN];
+} wl_rateset_args_t;
+
+
+typedef struct wl_uint32_list {
+
+ uint32 count;
+
+ uint32 element[1];
+} wl_uint32_list_t;
+
+
+typedef struct wl_assoc_params {
+ struct ether_addr bssid;
+ uint16 bssid_cnt;
+ int32 chanspec_num;
+ chanspec_t chanspec_list[1];
+} wl_assoc_params_t;
+#define WL_ASSOC_PARAMS_FIXED_SIZE (sizeof(wl_assoc_params_t) - sizeof(chanspec_t))
+
+
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+
+
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+
+
+typedef struct wl_join_params {
+ wlc_ssid_t ssid;
+ wl_assoc_params_t params;
+} wl_join_params_t;
+#define WL_JOIN_PARAMS_FIXED_SIZE (sizeof(wl_join_params_t) - sizeof(chanspec_t))
+
+
+typedef struct wl_join_scan_params {
+ uint8 scan_type;
+ int32 nprobes;
+ int32 active_time;
+ int32 passive_time;
+ int32 home_time;
+} wl_join_scan_params_t;
+
+
+typedef struct wl_extjoin_params {
+ wlc_ssid_t ssid;
+ wl_join_scan_params_t scan;
+ wl_join_assoc_params_t assoc;
+} wl_extjoin_params_t;
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE (sizeof(wl_extjoin_params_t) - sizeof(chanspec_t))
+
+typedef struct {
+ uint32 num;
+ chanspec_t list[1];
+} chanspec_list_t;
+
+
+#define NRATE_MCS_INUSE 0x00000080
+#define NRATE_RATE_MASK 0x0000007f
+#define NRATE_STF_MASK 0x0000ff00
+#define NRATE_STF_SHIFT 8
+#define NRATE_OVERRIDE 0x80000000
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
+#define NRATE_SGI_MASK 0x00800000
+#define NRATE_SGI_SHIFT 23
+#define NRATE_LDPC_CODING 0x00400000
+#define NRATE_LDPC_SHIFT 22
+
+#define NRATE_STF_SISO 0
+#define NRATE_STF_CDD 1
+#define NRATE_STF_STBC 2
+#define NRATE_STF_SDM 3
+
+#define ANTENNA_NUM_1 1
+#define ANTENNA_NUM_2 2
+#define ANTENNA_NUM_3 3
+#define ANTENNA_NUM_4 4
+
+#define ANT_SELCFG_AUTO 0x80
+#define ANT_SELCFG_MASK 0x33
+#define ANT_SELCFG_MAX 4
+#define ANT_SELCFG_TX_UNICAST 0
+#define ANT_SELCFG_RX_UNICAST 1
+#define ANT_SELCFG_TX_DEF 2
+#define ANT_SELCFG_RX_DEF 3
+
+#define MAX_STREAMS_SUPPORTED 4
+
+typedef struct {
+ uint8 ant_config[ANT_SELCFG_MAX];
+ uint8 num_antcfg;
+} wlc_antselcfg_t;
+
+#define HIGHEST_SINGLE_STREAM_MCS 7
+
+#define MAX_CCA_CHANNELS 38
+#define MAX_CCA_SECS 60
+
+#define IBSS_MED 15
+#define IBSS_HI 25
+#define OBSS_MED 12
+#define OBSS_HI 25
+#define INTERFER_MED 5
+#define INTERFER_HI 10
+
+#define CCA_FLAG_2G_ONLY 0x01
+#define CCA_FLAG_5G_ONLY 0x02
+#define CCA_FLAG_IGNORE_DURATION 0x04
+#define CCA_FLAGS_PREFER_1_6_11 0x10
+#define CCA_FLAG_IGNORE_INTERFER 0x20
+
+#define CCA_ERRNO_BAND 1
+#define CCA_ERRNO_DURATION 2
+#define CCA_ERRNO_PREF_CHAN 3
+#define CCA_ERRNO_INTERFER 4
+#define CCA_ERRNO_TOO_FEW 5
+
+typedef struct {
+ uint32 duration;
+ uint32 congest_ibss;
+
+ uint32 congest_obss;
+ uint32 interference;
+ uint32 timestamp;
+} cca_congest_t;
+
+typedef struct {
+ chanspec_t chanspec;
+ uint8 num_secs;
+ cca_congest_t secs[1];
+} cca_congest_channel_req_t;
+
+#define WLC_CNTRY_BUF_SZ 4
+
+typedef struct wl_country {
+ char country_abbrev[WLC_CNTRY_BUF_SZ];
+ int32 rev;
+ char ccode[WLC_CNTRY_BUF_SZ];
+} wl_country_t;
+
+typedef struct wl_channels_in_country {
+ uint32 buflen;
+ uint32 band;
+ char country_abbrev[WLC_CNTRY_BUF_SZ];
+ uint32 count;
+ uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+ uint32 buflen;
+ uint32 band_set;
+ uint32 band;
+ uint32 count;
+ char country_abbrev[1];
+} wl_country_list_t;
+
+#define WL_NUM_RPI_BINS 8
+#define WL_RM_TYPE_BASIC 1
+#define WL_RM_TYPE_CCA 2
+#define WL_RM_TYPE_RPI 3
+
+#define WL_RM_FLAG_PARALLEL (1<<0)
+
+#define WL_RM_FLAG_LATE (1<<1)
+#define WL_RM_FLAG_INCAPABLE (1<<2)
+#define WL_RM_FLAG_REFUSED (1<<3)
+
+typedef struct wl_rm_req_elt {
+ int8 type;
+ int8 flags;
+ chanspec_t chanspec;
+ uint32 token;
+ uint32 tsf_h;
+ uint32 tsf_l;
+ uint32 dur;
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+ uint32 token;
+ uint32 count;
+ void *cb;
+ void *cb_arg;
+ wl_rm_req_elt_t req[1];
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+ int8 type;
+ int8 flags;
+ chanspec_t chanspec;
+ uint32 token;
+ uint32 tsf_h;
+ uint32 tsf_l;
+ uint32 dur;
+ uint32 len;
+ uint8 data[1];
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN 24
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+ uint8 rpi[WL_RPI_REP_BIN_NUM];
+ int8 rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+ uint32 token;
+ uint32 len;
+ wl_rm_rep_elt_t rep[1];
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN 8
+
+
+typedef enum sup_auth_status {
+
+ WLC_SUP_DISCONNECTED = 0,
+ WLC_SUP_CONNECTING,
+ WLC_SUP_IDREQUIRED,
+ WLC_SUP_AUTHENTICATING,
+ WLC_SUP_AUTHENTICATED,
+ WLC_SUP_KEYXCHANGE,
+ WLC_SUP_KEYED,
+ WLC_SUP_TIMEOUT,
+ WLC_SUP_LAST_BASIC_STATE,
+
+
+
+ WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+
+ WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+
+ WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+ WLC_SUP_KEYXCHANGE_PREP_M4,
+ WLC_SUP_KEYXCHANGE_WAIT_G1,
+ WLC_SUP_KEYXCHANGE_PREP_G2
+} sup_auth_status_t;
+
+
+#define CRYPTO_ALGO_OFF 0
+#define CRYPTO_ALGO_WEP1 1
+#define CRYPTO_ALGO_TKIP 2
+#define CRYPTO_ALGO_WEP128 3
+#define CRYPTO_ALGO_AES_CCM 4
+#define CRYPTO_ALGO_AES_OCB_MSDU 5
+#define CRYPTO_ALGO_AES_OCB_MPDU 6
+#define CRYPTO_ALGO_NALG 7
+#define CRYPTO_ALGO_PMK 12
+
+#define WSEC_GEN_MIC_ERROR 0x0001
+#define WSEC_GEN_REPLAY 0x0002
+#define WSEC_GEN_ICV_ERROR 0x0004
+
+#define WL_SOFT_KEY (1 << 0)
+#define WL_PRIMARY_KEY (1 << 1)
+#define WL_KF_RES_4 (1 << 4)
+#define WL_KF_RES_5 (1 << 5)
+#define WL_IBSS_PEER_GROUP_KEY (1 << 6)
+
+typedef struct wl_wsec_key {
+ uint32 index;
+ uint32 len;
+ uint8 data[DOT11_MAX_KEY_SIZE];
+ uint32 pad_1[18];
+ uint32 algo;
+ uint32 flags;
+ uint32 pad_2[2];
+ int pad_3;
+ int iv_initialized;
+ int pad_4;
+
+ struct {
+ uint32 hi;
+ uint16 lo;
+ } rxiv;
+ uint32 pad_5[2];
+ struct ether_addr ea;
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN 8
+#define WSEC_MAX_PSK_LEN 64
+
+
+#define WSEC_PASSPHRASE (1<<0)
+
+
+typedef struct {
+ ushort key_len;
+ ushort flags;
+ uint8 key[WSEC_MAX_PSK_LEN];
+} wsec_pmk_t;
+
+
+#define WEP_ENABLED 0x0001
+#define TKIP_ENABLED 0x0002
+#define AES_ENABLED 0x0004
+#define WSEC_SWFLAG 0x0008
+#define SES_OW_ENABLED 0x0040
+
+
+#define WPA_AUTH_DISABLED 0x0000
+#define WPA_AUTH_NONE 0x0001
+#define WPA_AUTH_UNSPECIFIED 0x0002
+#define WPA_AUTH_PSK 0x0004
+
+#define WPA2_AUTH_UNSPECIFIED 0x0040
+#define WPA2_AUTH_PSK 0x0080
+#define BRCM_AUTH_PSK 0x0100
+#define BRCM_AUTH_DPT 0x0200
+#define WPA2_AUTH_MFP 0x1000
+#define WPA2_AUTH_TPK 0x2000
+#define WPA2_AUTH_FT 0x4000
+
+
+#define MAXPMKID 16
+
+typedef struct _pmkid {
+ struct ether_addr BSSID;
+ uint8 PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+ uint32 npmkid;
+ pmkid_t pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+ struct ether_addr BSSID;
+ uint8 preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+ uint32 npmkid_cand;
+ pmkid_cand_t pmkid_cand[1];
+} pmkid_cand_list_t;
+
+typedef struct wl_assoc_info {
+ uint32 req_len;
+ uint32 resp_len;
+ uint32 flags;
+ struct dot11_assoc_req req;
+ struct ether_addr reassoc_bssid;
+ struct dot11_assoc_resp resp;
+} wl_assoc_info_t;
+
+
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01
+
+
+typedef struct {
+ uint16 ver;
+ uint16 len;
+ uint16 cap;
+ uint32 flags;
+ uint32 idle;
+ struct ether_addr ea;
+ wl_rateset_t rateset;
+ uint32 in;
+ uint32 listen_interval_inms;
+ uint32 tx_pkts;
+ uint32 tx_failures;
+ uint32 rx_ucast_pkts;
+ uint32 rx_mcast_pkts;
+ uint32 tx_rate;
+ uint32 rx_rate;
+ uint32 rx_decrypt_succeeds;
+ uint32 rx_decrypt_failures;
+} sta_info_t;
+
+#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_pkts)
+
+#define WL_STA_VER 3
+
+
+#define WL_STA_BRCM 0x1
+#define WL_STA_WME 0x2
+#define WL_STA_ABCAP 0x4
+#define WL_STA_AUTHE 0x8
+#define WL_STA_ASSOC 0x10
+#define WL_STA_AUTHO 0x20
+#define WL_STA_WDS 0x40
+#define WL_STA_WDS_LINKUP 0x80
+#define WL_STA_PS 0x100
+#define WL_STA_APSD_BE 0x200
+#define WL_STA_APSD_BK 0x400
+#define WL_STA_APSD_VI 0x800
+#define WL_STA_APSD_VO 0x1000
+#define WL_STA_N_CAP 0x2000
+#define WL_STA_SCBSTATS 0x4000
+
+#define WL_WDS_LINKUP WL_STA_WDS_LINKUP
+
+
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+
+typedef struct {
+ uint32 val;
+ struct ether_addr ea;
+} scb_val_t;
+
+
+typedef struct {
+ uint32 code;
+ scb_val_t ioctl_args;
+} authops_t;
+
+
+typedef struct channel_info {
+ int hw_channel;
+ int target_channel;
+ int scan_channel;
+} channel_info_t;
+
+
+struct maclist {
+ uint count;
+ struct ether_addr ea[1];
+};
+
+
+typedef struct get_pktcnt {
+ uint rx_good_pkt;
+ uint rx_bad_pkt;
+ uint tx_good_pkt;
+ uint tx_bad_pkt;
+ uint rx_ocast_good_pkt;
+} get_pktcnt_t;
+
+#define WL_IOCTL_ACTION_GET 0x0
+#define WL_IOCTL_ACTION_SET 0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e
+#define WL_IOCTL_ACTION_OVL_RSV 0x20
+#define WL_IOCTL_ACTION_OVL 0x40
+#define WL_IOCTL_ACTION_MASK 0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT 1
+
+
+typedef struct wl_ioctl {
+ uint cmd;
+ void *buf;
+ uint len;
+ uint8 set;
+ uint used;
+ uint needed;
+} wl_ioctl_t;
+
+
+#define ioctl_subtype set
+#define ioctl_pid used
+#define ioctl_status needed
+
+
+typedef struct wlc_rev_info {
+ uint vendorid;
+ uint deviceid;
+ uint radiorev;
+ uint chiprev;
+ uint corerev;
+ uint boardid;
+ uint boardvendor;
+ uint boardrev;
+ uint driverrev;
+ uint ucoderev;
+ uint bus;
+ uint chipnum;
+ uint phytype;
+ uint phyrev;
+ uint anarev;
+ uint chippkg;
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH 48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+ uint instance;
+ char brand[WL_BRAND_MAX];
+} wl_instance_info_t;
+
+
+typedef struct wl_txfifo_sz {
+ uint16 magic;
+ uint16 fifo;
+ uint16 size;
+} wl_txfifo_sz_t;
+
+#define WL_TXFIFO_SZ_MAGIC 0xa5a5
+
+
+
+#define WLC_IOV_NAME_LEN 30
+typedef struct wlc_iov_trx_s {
+ uint8 module;
+ uint8 type;
+ char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+
+#define WLC_IOCTL_MAGIC 0x14e46c77
+
+
+#define WLC_IOCTL_VERSION 1
+
+#define WLC_IOCTL_MAXLEN 8192
+#define WLC_IOCTL_SMLEN 256
+#define WLC_IOCTL_MEDLEN 1536
+#ifdef WLC_HIGH_ONLY
+#define WLC_SAMPLECOLLECT_MAXLEN 1024
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN 10240
+#endif
+
+
+#define WLC_GET_MAGIC 0
+#define WLC_GET_VERSION 1
+#define WLC_UP 2
+#define WLC_DOWN 3
+#define WLC_GET_LOOP 4
+#define WLC_SET_LOOP 5
+#define WLC_DUMP 6
+#define WLC_GET_MSGLEVEL 7
+#define WLC_SET_MSGLEVEL 8
+#define WLC_GET_PROMISC 9
+#define WLC_SET_PROMISC 10
+#define WLC_OVERLAY_IOCTL 11
+#define WLC_GET_RATE 12
+
+#define WLC_GET_INSTANCE 14
+
+
+
+
+#define WLC_GET_INFRA 19
+#define WLC_SET_INFRA 20
+#define WLC_GET_AUTH 21
+#define WLC_SET_AUTH 22
+#define WLC_GET_BSSID 23
+#define WLC_SET_BSSID 24
+#define WLC_GET_SSID 25
+#define WLC_SET_SSID 26
+#define WLC_RESTART 27
+
+#define WLC_GET_CHANNEL 29
+#define WLC_SET_CHANNEL 30
+#define WLC_GET_SRL 31
+#define WLC_SET_SRL 32
+#define WLC_GET_LRL 33
+#define WLC_SET_LRL 34
+#define WLC_GET_PLCPHDR 35
+#define WLC_SET_PLCPHDR 36
+#define WLC_GET_RADIO 37
+#define WLC_SET_RADIO 38
+#define WLC_GET_PHYTYPE 39
+#define WLC_DUMP_RATE 40
+#define WLC_SET_RATE_PARAMS 41
+#define WLC_GET_FIXRATE 42
+#define WLC_SET_FIXRATE 43
+
+
+#define WLC_GET_KEY 44
+#define WLC_SET_KEY 45
+#define WLC_GET_REGULATORY 46
+#define WLC_SET_REGULATORY 47
+#define WLC_GET_PASSIVE_SCAN 48
+#define WLC_SET_PASSIVE_SCAN 49
+#define WLC_SCAN 50
+#define WLC_SCAN_RESULTS 51
+#define WLC_DISASSOC 52
+#define WLC_REASSOC 53
+#define WLC_GET_ROAM_TRIGGER 54
+#define WLC_SET_ROAM_TRIGGER 55
+#define WLC_GET_ROAM_DELTA 56
+#define WLC_SET_ROAM_DELTA 57
+#define WLC_GET_ROAM_SCAN_PERIOD 58
+#define WLC_SET_ROAM_SCAN_PERIOD 59
+#define WLC_EVM 60
+#define WLC_GET_TXANT 61
+#define WLC_SET_TXANT 62
+#define WLC_GET_ANTDIV 63
+#define WLC_SET_ANTDIV 64
+
+
+#define WLC_GET_CLOSED 67
+#define WLC_SET_CLOSED 68
+#define WLC_GET_MACLIST 69
+#define WLC_SET_MACLIST 70
+#define WLC_GET_RATESET 71
+#define WLC_SET_RATESET 72
+
+#define WLC_LONGTRAIN 74
+#define WLC_GET_BCNPRD 75
+#define WLC_SET_BCNPRD 76
+#define WLC_GET_DTIMPRD 77
+#define WLC_SET_DTIMPRD 78
+#define WLC_GET_SROM 79
+#define WLC_SET_SROM 80
+#define WLC_GET_WEP_RESTRICT 81
+#define WLC_SET_WEP_RESTRICT 82
+#define WLC_GET_COUNTRY 83
+#define WLC_SET_COUNTRY 84
+#define WLC_GET_PM 85
+#define WLC_SET_PM 86
+#define WLC_GET_WAKE 87
+#define WLC_SET_WAKE 88
+
+#define WLC_GET_FORCELINK 90
+#define WLC_SET_FORCELINK 91
+#define WLC_FREQ_ACCURACY 92
+#define WLC_CARRIER_SUPPRESS 93
+#define WLC_GET_PHYREG 94
+#define WLC_SET_PHYREG 95
+#define WLC_GET_RADIOREG 96
+#define WLC_SET_RADIOREG 97
+#define WLC_GET_REVINFO 98
+#define WLC_GET_UCANTDIV 99
+#define WLC_SET_UCANTDIV 100
+#define WLC_R_REG 101
+#define WLC_W_REG 102
+
+
+#define WLC_GET_MACMODE 105
+#define WLC_SET_MACMODE 106
+#define WLC_GET_MONITOR 107
+#define WLC_SET_MONITOR 108
+#define WLC_GET_GMODE 109
+#define WLC_SET_GMODE 110
+#define WLC_GET_LEGACY_ERP 111
+#define WLC_SET_LEGACY_ERP 112
+#define WLC_GET_RX_ANT 113
+#define WLC_GET_CURR_RATESET 114
+#define WLC_GET_SCANSUPPRESS 115
+#define WLC_SET_SCANSUPPRESS 116
+#define WLC_GET_AP 117
+#define WLC_SET_AP 118
+#define WLC_GET_EAP_RESTRICT 119
+#define WLC_SET_EAP_RESTRICT 120
+#define WLC_SCB_AUTHORIZE 121
+#define WLC_SCB_DEAUTHORIZE 122
+#define WLC_GET_WDSLIST 123
+#define WLC_SET_WDSLIST 124
+#define WLC_GET_ATIM 125
+#define WLC_SET_ATIM 126
+#define WLC_GET_RSSI 127
+#define WLC_GET_PHYANTDIV 128
+#define WLC_SET_PHYANTDIV 129
+#define WLC_AP_RX_ONLY 130
+#define WLC_GET_TX_PATH_PWR 131
+#define WLC_SET_TX_PATH_PWR 132
+#define WLC_GET_WSEC 133
+#define WLC_SET_WSEC 134
+#define WLC_GET_PHY_NOISE 135
+#define WLC_GET_BSS_INFO 136
+#define WLC_GET_PKTCNTS 137
+#define WLC_GET_LAZYWDS 138
+#define WLC_SET_LAZYWDS 139
+#define WLC_GET_BANDLIST 140
+#define WLC_GET_BAND 141
+#define WLC_SET_BAND 142
+#define WLC_SCB_DEAUTHENTICATE 143
+#define WLC_GET_SHORTSLOT 144
+#define WLC_GET_SHORTSLOT_OVERRIDE 145
+#define WLC_SET_SHORTSLOT_OVERRIDE 146
+#define WLC_GET_SHORTSLOT_RESTRICT 147
+#define WLC_SET_SHORTSLOT_RESTRICT 148
+#define WLC_GET_GMODE_PROTECTION 149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151
+#define WLC_UPGRADE 152
+
+
+#define WLC_GET_IGNORE_BCNS 155
+#define WLC_SET_IGNORE_BCNS 156
+#define WLC_GET_SCB_TIMEOUT 157
+#define WLC_SET_SCB_TIMEOUT 158
+#define WLC_GET_ASSOCLIST 159
+#define WLC_GET_CLK 160
+#define WLC_SET_CLK 161
+#define WLC_GET_UP 162
+#define WLC_OUT 163
+#define WLC_GET_WPA_AUTH 164
+#define WLC_SET_WPA_AUTH 165
+#define WLC_GET_UCFLAGS 166
+#define WLC_SET_UCFLAGS 167
+#define WLC_GET_PWRIDX 168
+#define WLC_SET_PWRIDX 169
+#define WLC_GET_TSSI 170
+#define WLC_GET_SUP_RATESET_OVERRIDE 171
+#define WLC_SET_SUP_RATESET_OVERRIDE 172
+
+
+
+
+
+#define WLC_GET_PROTECTION_CONTROL 178
+#define WLC_SET_PROTECTION_CONTROL 179
+#define WLC_GET_PHYLIST 180
+#define WLC_ENCRYPT_STRENGTH 181
+#define WLC_DECRYPT_STATUS 182
+#define WLC_GET_KEY_SEQ 183
+#define WLC_GET_SCAN_CHANNEL_TIME 184
+#define WLC_SET_SCAN_CHANNEL_TIME 185
+#define WLC_GET_SCAN_UNASSOC_TIME 186
+#define WLC_SET_SCAN_UNASSOC_TIME 187
+#define WLC_GET_SCAN_HOME_TIME 188
+#define WLC_SET_SCAN_HOME_TIME 189
+#define WLC_GET_SCAN_NPROBES 190
+#define WLC_SET_SCAN_NPROBES 191
+#define WLC_GET_PRB_RESP_TIMEOUT 192
+#define WLC_SET_PRB_RESP_TIMEOUT 193
+#define WLC_GET_ATTEN 194
+#define WLC_SET_ATTEN 195
+#define WLC_GET_SHMEM 196
+#define WLC_SET_SHMEM 197
+
+
+#define WLC_SET_WSEC_TEST 200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define WLC_TKIP_COUNTERMEASURES 202
+#define WLC_GET_PIOMODE 203
+#define WLC_SET_PIOMODE 204
+#define WLC_SET_ASSOC_PREFER 205
+#define WLC_GET_ASSOC_PREFER 206
+#define WLC_SET_ROAM_PREFER 207
+#define WLC_GET_ROAM_PREFER 208
+#define WLC_SET_LED 209
+#define WLC_GET_LED 210
+#define WLC_GET_INTERFERENCE_MODE 211
+#define WLC_SET_INTERFERENCE_MODE 212
+#define WLC_GET_CHANNEL_QA 213
+#define WLC_START_CHANNEL_QA 214
+#define WLC_GET_CHANNEL_SEL 215
+#define WLC_START_CHANNEL_SEL 216
+#define WLC_GET_VALID_CHANNELS 217
+#define WLC_GET_FAKEFRAG 218
+#define WLC_SET_FAKEFRAG 219
+#define WLC_GET_PWROUT_PERCENTAGE 220
+#define WLC_SET_PWROUT_PERCENTAGE 221
+#define WLC_SET_BAD_FRAME_PREEMPT 222
+#define WLC_GET_BAD_FRAME_PREEMPT 223
+#define WLC_SET_LEAP_LIST 224
+#define WLC_GET_LEAP_LIST 225
+#define WLC_GET_CWMIN 226
+#define WLC_SET_CWMIN 227
+#define WLC_GET_CWMAX 228
+#define WLC_SET_CWMAX 229
+#define WLC_GET_WET 230
+#define WLC_SET_WET 231
+#define WLC_GET_PUB 232
+
+
+#define WLC_GET_KEY_PRIMARY 235
+#define WLC_SET_KEY_PRIMARY 236
+
+#define WLC_GET_ACI_ARGS 238
+#define WLC_SET_ACI_ARGS 239
+#define WLC_UNSET_CALLBACK 240
+#define WLC_SET_CALLBACK 241
+#define WLC_GET_RADAR 242
+#define WLC_SET_RADAR 243
+#define WLC_SET_SPECT_MANAGMENT 244
+#define WLC_GET_SPECT_MANAGMENT 245
+#define WLC_WDS_GET_REMOTE_HWADDR 246
+#define WLC_WDS_GET_WPA_SUP 247
+#define WLC_SET_CS_SCAN_TIMER 248
+#define WLC_GET_CS_SCAN_TIMER 249
+#define WLC_MEASURE_REQUEST 250
+#define WLC_INIT 251
+#define WLC_SEND_QUIET 252
+#define WLC_KEEPALIVE 253
+#define WLC_SEND_PWR_CONSTRAINT 254
+#define WLC_UPGRADE_STATUS 255
+#define WLC_CURRENT_PWR 256
+#define WLC_GET_SCAN_PASSIVE_TIME 257
+#define WLC_SET_SCAN_PASSIVE_TIME 258
+#define WLC_LEGACY_LINK_BEHAVIOR 259
+#define WLC_GET_CHANNELS_IN_COUNTRY 260
+#define WLC_GET_COUNTRY_LIST 261
+#define WLC_GET_VAR 262
+#define WLC_SET_VAR 263
+#define WLC_NVRAM_GET 264
+#define WLC_NVRAM_SET 265
+#define WLC_NVRAM_DUMP 266
+#define WLC_REBOOT 267
+#define WLC_SET_WSEC_PMK 268
+#define WLC_GET_AUTH_MODE 269
+#define WLC_SET_AUTH_MODE 270
+#define WLC_GET_WAKEENTRY 271
+#define WLC_SET_WAKEENTRY 272
+#define WLC_NDCONFIG_ITEM 273
+#define WLC_NVOTPW 274
+#define WLC_OTPW 275
+#define WLC_IOV_BLOCK_GET 276
+#define WLC_IOV_MODULES_GET 277
+#define WLC_SOFT_RESET 278
+#define WLC_GET_ALLOW_MODE 279
+#define WLC_SET_ALLOW_MODE 280
+#define WLC_GET_DESIRED_BSSID 281
+#define WLC_SET_DESIRED_BSSID 282
+#define WLC_DISASSOC_MYAP 283
+#define WLC_GET_NBANDS 284
+#define WLC_GET_BANDSTATES 285
+#define WLC_GET_WLC_BSS_INFO 286
+#define WLC_GET_ASSOC_INFO 287
+#define WLC_GET_OID_PHY 288
+#define WLC_SET_OID_PHY 289
+#define WLC_SET_ASSOC_TIME 290
+#define WLC_GET_DESIRED_SSID 291
+#define WLC_GET_CHANSPEC 292
+#define WLC_GET_ASSOC_STATE 293
+#define WLC_SET_PHY_STATE 294
+#define WLC_GET_SCAN_PENDING 295
+#define WLC_GET_SCANREQ_PENDING 296
+#define WLC_GET_PREV_ROAM_REASON 297
+#define WLC_SET_PREV_ROAM_REASON 298
+#define WLC_GET_BANDSTATES_PI 299
+#define WLC_GET_PHY_STATE 300
+#define WLC_GET_BSS_WPA_RSN 301
+#define WLC_GET_BSS_WPA2_RSN 302
+#define WLC_GET_BSS_BCN_TS 303
+#define WLC_GET_INT_DISASSOC 304
+#define WLC_SET_NUM_PEERS 305
+#define WLC_GET_NUM_BSS 306
+#define WLC_NPHY_SAMPLE_COLLECT 307
+#define WLC_UM_PRIV 308
+#define WLC_GET_CMD 309
+
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312
+#define WLC_GET_WAI_RESTRICT 313
+#define WLC_SET_WAI_RESTRICT 314
+#define WLC_SET_WAI_REKEY 315
+#define WLC_SET_PEAKRATE 316
+#define WLC_GET_PEAKRATE 317
+#define WLC_LAST 318
+
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE 0xABADCEDE
+#endif
+
+
+#define CMN_IOCTL_OFF 0x180
+
+
+
+
+#define WL_OID_BASE 0xFFE41420
+
+
+#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK)
+#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+
+#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS)
+
+#define WL_DECRYPT_STATUS_SUCCESS 1
+#define WL_DECRYPT_STATUS_FAILURE 2
+#define WL_DECRYPT_STATUS_UNKNOWN 3
+
+
+#define WLC_UPGRADE_SUCCESS 0
+#define WLC_UPGRADE_PENDING 1
+
+#ifdef CONFIG_USBRNDIS_RETAIL
+
+typedef struct {
+ char *name;
+ void *param;
+} ndconfig_item_t;
+#endif
+
+
+
+#define WL_AUTH_OPEN_SYSTEM 0
+#define WL_AUTH_SHARED_KEY 1
+#define WL_AUTH_OPEN_SHARED 2
+
+
+#define WL_RADIO_SW_DISABLE (1<<0)
+#define WL_RADIO_HW_DISABLE (1<<1)
+#define WL_RADIO_MPC_DISABLE (1<<2)
+#define WL_RADIO_COUNTRY_DISABLE (1<<3)
+
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
+
+
+#define WL_TXPWR_OVERRIDE (1U<<31)
+#define WL_TXPWR_NEG (1U<<30)
+
+#define WL_PHY_PAVARS_LEN 6
+
+#define WL_PHY_PAVARS2_NUM 3
+#define WL_PHY_PAVAR_VER 1
+typedef struct wl_pavars2 {
+ uint16 ver;
+ uint16 len;
+ uint16 inuse;
+ uint16 phy_type;
+ uint16 bandrange;
+ uint16 chain;
+ uint16 inpa[WL_PHY_PAVARS2_NUM];
+} wl_pavars2_t;
+
+typedef struct wl_po {
+ uint16 phy_type;
+ uint16 band;
+ uint16 cckpo;
+ uint32 ofdmpo;
+ uint16 mcspo[8];
+} wl_po_t;
+
+
+#define WLC_TXPWR_MAX (127)
+
+
+#define WL_DIAG_INTERRUPT 1
+#define WL_DIAG_LOOPBACK 2
+#define WL_DIAG_MEMORY 3
+#define WL_DIAG_LED 4
+#define WL_DIAG_REG 5
+#define WL_DIAG_SROM 6
+#define WL_DIAG_DMA 7
+
+#define WL_DIAGERR_SUCCESS 0
+#define WL_DIAGERR_FAIL_TO_RUN 1
+#define WL_DIAGERR_NOT_SUPPORTED 2
+#define WL_DIAGERR_INTERRUPT_FAIL 3
+#define WL_DIAGERR_LOOPBACK_FAIL 4
+#define WL_DIAGERR_SROM_FAIL 5
+#define WL_DIAGERR_SROM_BADCRC 6
+#define WL_DIAGERR_REG_FAIL 7
+#define WL_DIAGERR_MEMORY_FAIL 8
+#define WL_DIAGERR_NOMEM 9
+#define WL_DIAGERR_DMA_FAIL 10
+
+#define WL_DIAGERR_MEMORY_TIMEOUT 11
+#define WL_DIAGERR_MEMORY_BADPATTERN 12
+
+
+#define WLC_BAND_AUTO 0
+#define WLC_BAND_5G 1
+#define WLC_BAND_2G 2
+#define WLC_BAND_ALL 3
+
+
+#define WL_CHAN_FREQ_RANGE_2G 0
+#define WL_CHAN_FREQ_RANGE_5GL 1
+#define WL_CHAN_FREQ_RANGE_5GM 2
+#define WL_CHAN_FREQ_RANGE_5GH 3
+
+#define WL_CHAN_FREQ_RANGE_5GLL_VER2 4
+#define WL_CHAN_FREQ_RANGE_5GLH_VER2 5
+#define WL_CHAN_FREQ_RANGE_5GML_VER2 6
+#define WL_CHAN_FREQ_RANGE_5GMH_VER2 7
+#define WL_CHAN_FREQ_RANGE_5GH_VER2 8
+
+#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4
+#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5
+#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6
+#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7
+#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8
+
+
+#define WLC_PHY_TYPE_A 0
+#define WLC_PHY_TYPE_B 1
+#define WLC_PHY_TYPE_G 2
+#define WLC_PHY_TYPE_N 4
+#define WLC_PHY_TYPE_LP 5
+#define WLC_PHY_TYPE_SSN 6
+#define WLC_PHY_TYPE_HT 7
+#define WLC_PHY_TYPE_LCN 8
+#define WLC_PHY_TYPE_NULL 0xf
+
+
+#define WLC_MACMODE_DISABLED 0
+#define WLC_MACMODE_DENY 1
+#define WLC_MACMODE_ALLOW 2
+
+
+#define GMODE_LEGACY_B 0
+#define GMODE_AUTO 1
+#define GMODE_ONLY 2
+#define GMODE_B_DEFERRED 3
+#define GMODE_PERFORMANCE 4
+#define GMODE_LRS 5
+#define GMODE_MAX 6
+
+
+#define WLC_PLCP_AUTO -1
+#define WLC_PLCP_SHORT 0
+#define WLC_PLCP_LONG 1
+
+
+#define WLC_PROTECTION_AUTO -1
+#define WLC_PROTECTION_OFF 0
+#define WLC_PROTECTION_ON 1
+#define WLC_PROTECTION_MMHDR_ONLY 2
+#define WLC_PROTECTION_CTS_ONLY 3
+
+
+#define WLC_PROTECTION_CTL_OFF 0
+#define WLC_PROTECTION_CTL_LOCAL 1
+#define WLC_PROTECTION_CTL_OVERLAP 2
+
+
+#define WLC_N_PROTECTION_OFF 0
+#define WLC_N_PROTECTION_OPTIONAL 1
+#define WLC_N_PROTECTION_20IN40 2
+#define WLC_N_PROTECTION_MIXEDMODE 3
+
+
+#define WLC_N_PREAMBLE_MIXEDMODE 0
+#define WLC_N_PREAMBLE_GF 1
+#define WLC_N_PREAMBLE_GF_BRCM 2
+
+
+#define WLC_N_BW_20ALL 0
+#define WLC_N_BW_40ALL 1
+#define WLC_N_BW_20IN2G_40IN5G 2
+
+
+#define WLC_N_TXRX_CHAIN0 0
+#define WLC_N_TXRX_CHAIN1 1
+
+
+#define WLC_N_SGI_20 0x01
+#define WLC_N_SGI_40 0x02
+
+
+#define PM_OFF 0
+#define PM_MAX 1
+#define PM_FAST 2
+
+#define LISTEN_INTERVAL 10
+
+#define INTERFERE_OVRRIDE_OFF -1
+#define INTERFERE_NONE 0
+#define NON_WLAN 1
+#define WLAN_MANUAL 2
+#define WLAN_AUTO 3
+#define WLAN_AUTO_W_NOISE 4
+#define AUTO_ACTIVE (1 << 7)
+
+typedef struct wl_aci_args {
+ int enter_aci_thresh;
+ int exit_aci_thresh;
+ int usec_spin;
+ int glitch_delay;
+ uint16 nphy_adcpwr_enter_thresh;
+ uint16 nphy_adcpwr_exit_thresh;
+ uint16 nphy_repeat_ctr;
+ uint16 nphy_num_samples;
+ uint16 nphy_undetect_window_sz;
+ uint16 nphy_b_energy_lo_aci;
+ uint16 nphy_b_energy_md_aci;
+ uint16 nphy_b_energy_hi_aci;
+ uint16 nphy_noise_noassoc_glitch_th_up;
+ uint16 nphy_noise_noassoc_glitch_th_dn;
+ uint16 nphy_noise_assoc_glitch_th_up;
+ uint16 nphy_noise_assoc_glitch_th_dn;
+ uint16 nphy_noise_assoc_aci_glitch_th_up;
+ uint16 nphy_noise_assoc_aci_glitch_th_dn;
+ uint16 nphy_noise_assoc_enter_th;
+ uint16 nphy_noise_noassoc_enter_th;
+ uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+ uint16 nphy_noise_noassoc_crsidx_incr;
+ uint16 nphy_noise_assoc_crsidx_incr;
+ uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define TRIGGER_NOW 0
+#define TRIGGER_CRS 0x01
+#define TRIGGER_CRSDEASSERT 0x02
+#define TRIGGER_GOODFCS 0x04
+#define TRIGGER_BADFCS 0x08
+#define TRIGGER_BADPLCP 0x10
+#define TRIGGER_CRSGLITCH 0x20
+#define WL_ACI_ARGS_LEGACY_LENGTH 16
+#define WL_SAMPLECOLLECT_T_VERSION 1
+typedef struct wl_samplecollect_args {
+
+ uint8 coll_us;
+ int cores;
+
+ uint16 version;
+ uint16 length;
+ uint8 trigger;
+ uint16 timeout;
+ uint16 mode;
+ uint32 pre_dur;
+ uint32 post_dur;
+ uint8 gpio_sel;
+ bool downsamp;
+ bool be_deaf;
+ bool agc;
+ bool filter;
+} wl_samplecollect_args_t;
+
+#define WL_SAMPLEDATA_HEADER_TYPE 1
+#define WL_SAMPLEDATA_HEADER_SIZE 80
+#define WL_SAMPLEDATA_TYPE 2
+#define WL_SAMPLEDATA_SEQ 0xff
+#define WL_SAMPLEDATA_MORE_DATA 0x100
+#define WL_SAMPLEDATA_T_VERSION 1
+
+#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+ uint16 version;
+ uint16 size;
+ uint16 tag;
+ uint16 length;
+ uint32 flag;
+} wl_sampledata_t;
+
+
+
+#define WL_ERROR_VAL 0x00000001
+#define WL_TRACE_VAL 0x00000002
+#define WL_PRHDRS_VAL 0x00000004
+#define WL_PRPKT_VAL 0x00000008
+#define WL_INFORM_VAL 0x00000010
+#define WL_TMP_VAL 0x00000020
+#define WL_OID_VAL 0x00000040
+#define WL_RATE_VAL 0x00000080
+#define WL_ASSOC_VAL 0x00000100
+#define WL_PRUSR_VAL 0x00000200
+#define WL_PS_VAL 0x00000400
+#define WL_TXPWR_VAL 0x00000800
+#define WL_PORT_VAL 0x00001000
+#define WL_DUAL_VAL 0x00002000
+#define WL_WSEC_VAL 0x00004000
+#define WL_WSEC_DUMP_VAL 0x00008000
+#define WL_LOG_VAL 0x00010000
+#define WL_NRSSI_VAL 0x00020000
+#define WL_LOFT_VAL 0x00040000
+#define WL_REGULATORY_VAL 0x00080000
+#define WL_PHYCAL_VAL 0x00100000
+#define WL_RADAR_VAL 0x00200000
+#define WL_MPC_VAL 0x00400000
+#define WL_APSTA_VAL 0x00800000
+#define WL_DFS_VAL 0x01000000
+#define WL_BA_VAL 0x02000000
+#define WL_ACI_VAL 0x04000000
+#define WL_MBSS_VAL 0x04000000
+#define WL_CAC_VAL 0x08000000
+#define WL_AMSDU_VAL 0x10000000
+#define WL_AMPDU_VAL 0x20000000
+#define WL_FFPLD_VAL 0x40000000
+
+
+#define WL_DPT_VAL 0x00000001
+#define WL_SCAN_VAL 0x00000002
+#define WL_WOWL_VAL 0x00000004
+#define WL_COEX_VAL 0x00000008
+#define WL_RTDC_VAL 0x00000010
+#define WL_PROTO_VAL 0x00000020
+#define WL_BTA_VAL 0x00000040
+#define WL_CHANINT_VAL 0x00000080
+#define WL_THERMAL_VAL 0x00000100
+#define WL_P2P_VAL 0x00000200
+#define WL_TXRX_VAL 0x00000400
+#define WL_MCHAN_VAL 0x00000800
+
+
+#define WL_LED_NUMGPIO 16
+
+
+#define WL_LED_OFF 0
+#define WL_LED_ON 1
+#define WL_LED_ACTIVITY 2
+#define WL_LED_RADIO 3
+#define WL_LED_ARADIO 4
+#define WL_LED_BRADIO 5
+#define WL_LED_BGMODE 6
+#define WL_LED_WI1 7
+#define WL_LED_WI2 8
+#define WL_LED_WI3 9
+#define WL_LED_ASSOC 10
+#define WL_LED_INACTIVE 11
+#define WL_LED_ASSOCACT 12
+#define WL_LED_WI4 13
+#define WL_LED_WI5 14
+#define WL_LED_BLINKSLOW 15
+#define WL_LED_BLINKMED 16
+#define WL_LED_BLINKFAST 17
+#define WL_LED_BLINKCUSTOM 18
+#define WL_LED_BLINKPERIODIC 19
+#define WL_LED_ASSOC_WITH_SEC 20
+
+#define WL_LED_START_OFF 21
+#define WL_LED_NUMBEHAVIOR 22
+
+
+#define WL_LED_BEH_MASK 0x7f
+#define WL_LED_AL_MASK 0x80
+
+
+#define WL_NUMCHANNELS 64
+#define WL_NUMCHANSPECS 100
+
+
+#define WL_WDS_WPA_ROLE_AUTH 0
+#define WL_WDS_WPA_ROLE_SUP 1
+#define WL_WDS_WPA_ROLE_AUTO 255
+
+
+#define WL_EVENTING_MASK_LEN 16
+
+
+
+
+#define WL_JOIN_PREF_RSSI 1
+#define WL_JOIN_PREF_WPA 2
+#define WL_JOIN_PREF_BAND 3
+#define WL_JOIN_PREF_RSSI_DELTA 4
+#define WL_JOIN_PREF_TRANS_PREF 5
+
+
+#define WLJP_BAND_ASSOC_PREF 255
+
+
+#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00"
+
+struct tsinfo_arg {
+ uint8 octets[3];
+};
+
+#define NFIFO 6
+
+#define WL_CNT_T_VERSION 6
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+
+
+ uint32 txframe;
+ uint32 txbyte;
+ uint32 txretrans;
+ uint32 txerror;
+ uint32 txctl;
+ uint32 txprshort;
+ uint32 txserr;
+ uint32 txnobuf;
+ uint32 txnoassoc;
+ uint32 txrunt;
+ uint32 txchit;
+ uint32 txcmiss;
+
+
+ uint32 txuflo;
+ uint32 txphyerr;
+ uint32 txphycrs;
+
+
+ uint32 rxframe;
+ uint32 rxbyte;
+ uint32 rxerror;
+ uint32 rxctl;
+ uint32 rxnobuf;
+ uint32 rxnondata;
+ uint32 rxbadds;
+ uint32 rxbadcm;
+ uint32 rxfragerr;
+ uint32 rxrunt;
+ uint32 rxgiant;
+ uint32 rxnoscb;
+ uint32 rxbadproto;
+ uint32 rxbadsrcmac;
+ uint32 rxbadda;
+ uint32 rxfilter;
+
+
+ uint32 rxoflo;
+ uint32 rxuflo[NFIFO];
+
+ uint32 d11cnt_txrts_off;
+ uint32 d11cnt_rxcrc_off;
+ uint32 d11cnt_txnocts_off;
+
+
+ uint32 dmade;
+ uint32 dmada;
+ uint32 dmape;
+ uint32 reset;
+ uint32 tbtt;
+ uint32 txdmawar;
+ uint32 pkt_callback_reg_fail;
+
+
+ uint32 txallfrm;
+ uint32 txrtsfrm;
+ uint32 txctsfrm;
+ uint32 txackfrm;
+ uint32 txdnlfrm;
+ uint32 txbcnfrm;
+ uint32 txfunfl[8];
+ uint32 txtplunfl;
+ uint32 txphyerror;
+ uint32 rxfrmtoolong;
+ uint32 rxfrmtooshrt;
+ uint32 rxinvmachdr;
+ uint32 rxbadfcs;
+ uint32 rxbadplcp;
+ uint32 rxcrsglitch;
+ uint32 rxstrt;
+ uint32 rxdfrmucastmbss;
+ uint32 rxmfrmucastmbss;
+ uint32 rxcfrmucast;
+ uint32 rxrtsucast;
+ uint32 rxctsucast;
+ uint32 rxackucast;
+ uint32 rxdfrmocast;
+ uint32 rxmfrmocast;
+ uint32 rxcfrmocast;
+ uint32 rxrtsocast;
+ uint32 rxctsocast;
+ uint32 rxdfrmmcast;
+ uint32 rxmfrmmcast;
+ uint32 rxcfrmmcast;
+ uint32 rxbeaconmbss;
+ uint32 rxdfrmucastobss;
+ uint32 rxbeaconobss;
+ uint32 rxrsptmout;
+ uint32 bcntxcancl;
+ uint32 rxf0ovfl;
+ uint32 rxf1ovfl;
+ uint32 rxf2ovfl;
+ uint32 txsfovfl;
+ uint32 pmqovfl;
+ uint32 rxcgprqfrm;
+ uint32 rxcgprsqovfl;
+ uint32 txcgprsfail;
+ uint32 txcgprssuc;
+ uint32 prs_timeout;
+ uint32 rxnack;
+ uint32 frmscons;
+ uint32 txnack;
+ uint32 txglitch_nack;
+ uint32 txburst;
+
+
+ uint32 txfrag;
+ uint32 txmulti;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txretrie;
+ uint32 rxdup;
+ uint32 txrts;
+ uint32 txnocts;
+ uint32 txnoack;
+ uint32 rxfrag;
+ uint32 rxmulti;
+ uint32 rxcrc;
+ uint32 txfrmsnt;
+ uint32 rxundec;
+
+
+ uint32 tkipmicfaill;
+ uint32 tkipcntrmsr;
+ uint32 tkipreplay;
+ uint32 ccmpfmterr;
+ uint32 ccmpreplay;
+ uint32 ccmpundec;
+ uint32 fourwayfail;
+ uint32 wepundec;
+ uint32 wepicverr;
+ uint32 decsuccess;
+ uint32 tkipicverr;
+ uint32 wepexcluded;
+
+ uint32 rxundec_mcst;
+
+
+ uint32 tkipmicfaill_mcst;
+ uint32 tkipcntrmsr_mcst;
+ uint32 tkipreplay_mcst;
+ uint32 ccmpfmterr_mcst;
+ uint32 ccmpreplay_mcst;
+ uint32 ccmpundec_mcst;
+ uint32 fourwayfail_mcst;
+ uint32 wepundec_mcst;
+ uint32 wepicverr_mcst;
+ uint32 decsuccess_mcst;
+ uint32 tkipicverr_mcst;
+ uint32 wepexcluded_mcst;
+
+ uint32 txchanrej;
+ uint32 txexptime;
+ uint32 psmwds;
+ uint32 phywatchdog;
+
+
+ uint32 prq_entries_handled;
+ uint32 prq_undirected_entries;
+ uint32 prq_bad_entries;
+ uint32 atim_suppress_count;
+ uint32 bcn_template_not_ready;
+ uint32 bcn_template_not_ready_done;
+ uint32 late_tbtt_dpc;
+
+
+ uint32 rx1mbps;
+ uint32 rx2mbps;
+ uint32 rx5mbps5;
+ uint32 rx6mbps;
+ uint32 rx9mbps;
+ uint32 rx11mbps;
+ uint32 rx12mbps;
+ uint32 rx18mbps;
+ uint32 rx24mbps;
+ uint32 rx36mbps;
+ uint32 rx48mbps;
+ uint32 rx54mbps;
+ uint32 rx108mbps;
+ uint32 rx162mbps;
+ uint32 rx216mbps;
+ uint32 rx270mbps;
+ uint32 rx324mbps;
+ uint32 rx378mbps;
+ uint32 rx432mbps;
+ uint32 rx486mbps;
+ uint32 rx540mbps;
+
+
+ uint32 pktengrxducast;
+ uint32 pktengrxdmcast;
+
+ uint32 rfdisable;
+ uint32 bphy_rxcrsglitch;
+
+ uint32 txmpdu_sgi;
+ uint32 rxmpdu_sgi;
+ uint32 txmpdu_stbc;
+ uint32 rxmpdu_stbc;
+} wl_cnt_t;
+
+
+#define WL_WME_CNT_VERSION 1
+
+typedef struct {
+ uint32 packets;
+ uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+
+ wl_traffic_stats_t tx[AC_COUNT];
+ wl_traffic_stats_t tx_failed[AC_COUNT];
+ wl_traffic_stats_t rx[AC_COUNT];
+ wl_traffic_stats_t rx_failed[AC_COUNT];
+
+ wl_traffic_stats_t forward[AC_COUNT];
+
+ wl_traffic_stats_t tx_expired[AC_COUNT];
+
+} wl_wme_cnt_t;
+
+struct wl_msglevel2 {
+ uint32 low;
+ uint32 high;
+};
+
+typedef struct wl_mkeep_alive_pkt {
+ uint16 version;
+ uint16 length;
+ uint32 period_msec;
+ uint16 len_bytes;
+ uint8 keep_alive_id;
+ uint8 data[1];
+} wl_mkeep_alive_pkt_t;
+
+#define WL_MKEEP_ALIVE_VERSION 1
+#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data)
+#define WL_MKEEP_ALIVE_PRECISION 500
+
+#define WLC_ROAM_TRIGGER_DEFAULT 0
+#define WLC_ROAM_TRIGGER_BANDWIDTH 1
+#define WLC_ROAM_TRIGGER_DISTANCE 2
+#define WLC_ROAM_TRIGGER_AUTO 3
+#define WLC_ROAM_TRIGGER_MAX_VALUE 3
+
+
+#define WPA_AUTH_PFN_ANY 0xffffffff
+
+enum {
+ PFN_LIST_ORDER,
+ PFN_RSSI
+};
+
+enum {
+ DISABLE,
+ ENABLE
+};
+
+enum {
+ OFF_ADAPT,
+ SMART_ADAPT,
+ STRICT_ADAPT,
+ SLOW_ADAPT
+};
+
+#define SORT_CRITERIA_BIT 0
+#define AUTO_NET_SWITCH_BIT 1
+#define ENABLE_BKGRD_SCAN_BIT 2
+#define IMMEDIATE_SCAN_BIT 3
+#define AUTO_CONNECT_BIT 4
+#define ENABLE_BD_SCAN_BIT 5
+#define ENABLE_ADAPTSCAN_BIT 6
+#define IMMEDIATE_EVENT_BIT 8
+
+#define SORT_CRITERIA_MASK 0x0001
+#define AUTO_NET_SWITCH_MASK 0x0002
+#define ENABLE_BKGRD_SCAN_MASK 0x0004
+#define IMMEDIATE_SCAN_MASK 0x0008
+#define AUTO_CONNECT_MASK 0x0010
+#define ENABLE_BD_SCAN_MASK 0x0020
+#define ENABLE_ADAPTSCAN_MASK 0x00c0
+#define IMMEDIATE_EVENT_MASK 0x0100
+
+#define PFN_VERSION 2
+#define PFN_SCANRESULT_VERSION 1
+#define MAX_PFN_LIST_COUNT 16
+
+#define PFN_COMPLETE 1
+#define PFN_INCOMPLETE 0
+
+#define DEFAULT_BESTN 2
+#define DEFAULT_MSCAN 0
+#define DEFAULT_REPEAT 10
+#define DEFAULT_EXP 2
+
+
+typedef struct wl_pfn_subnet_info {
+ struct ether_addr BSSID;
+ uint8 channel;
+ uint8 SSID_len;
+ uint8 SSID[32];
+} wl_pfn_subnet_info_t;
+
+typedef struct wl_pfn_net_info {
+ wl_pfn_subnet_info_t pfnsubnet;
+ int16 RSSI;
+ uint16 timestamp;
+} wl_pfn_net_info_t;
+
+typedef struct wl_pfn_scanresults {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_t netinfo[1];
+} wl_pfn_scanresults_t;
+
+
+typedef struct wl_pfn_param {
+ int32 version;
+ int32 scan_freq;
+ int32 lost_network_timeout;
+ int16 flags;
+ int16 rssi_margin;
+ uint8 bestn;
+ uint8 mscan;
+ uint8 repeat;
+ uint8 exp;
+ int32 slow_freq;
+} wl_pfn_param_t;
+
+typedef struct wl_pfn_bssid {
+ struct ether_addr macaddr;
+
+ uint16 flags;
+} wl_pfn_bssid_t;
+#define WL_PFN_SUPPRESSFOUND_MASK 0x08
+#define WL_PFN_SUPPRESSLOST_MASK 0x10
+
+typedef struct wl_pfn_cfg {
+ uint32 reporttype;
+ int32 channel_num;
+ uint16 channel_list[WL_NUMCHANNELS];
+} wl_pfn_cfg_t;
+#define WL_PFN_REPORT_ALLNET 0
+#define WL_PFN_REPORT_SSIDNET 1
+#define WL_PFN_REPORT_BSSIDNET 2
+
+typedef struct wl_pfn {
+ wlc_ssid_t ssid;
+ int32 flags;
+ int32 infra;
+ int32 auth;
+ int32 wpa_auth;
+ int32 wsec;
+} wl_pfn_t;
+#define WL_PFN_HIDDEN_BIT 2
+#define PNO_SCAN_MAX_FW 508*1000
+#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000
+#define PNO_SCAN_MIN_FW_SEC 10
+#define WL_PFN_HIDDEN_MASK 0x4
+
+
+#define TOE_TX_CSUM_OL 0x00000001
+#define TOE_RX_CSUM_OL 0x00000002
+
+
+#define TOE_ERRTEST_TX_CSUM 0x00000001
+#define TOE_ERRTEST_RX_CSUM 0x00000002
+#define TOE_ERRTEST_RX_CSUM2 0x00000004
+
+struct toe_ol_stats_t {
+
+ uint32 tx_summed;
+
+
+ uint32 tx_iph_fill;
+ uint32 tx_tcp_fill;
+ uint32 tx_udp_fill;
+ uint32 tx_icmp_fill;
+
+
+ uint32 rx_iph_good;
+ uint32 rx_iph_bad;
+ uint32 rx_tcp_good;
+ uint32 rx_tcp_bad;
+ uint32 rx_udp_good;
+ uint32 rx_udp_bad;
+ uint32 rx_icmp_good;
+ uint32 rx_icmp_bad;
+
+
+ uint32 tx_tcp_errinj;
+ uint32 tx_udp_errinj;
+ uint32 tx_icmp_errinj;
+
+
+ uint32 rx_tcp_errinj;
+ uint32 rx_udp_errinj;
+ uint32 rx_icmp_errinj;
+};
+
+
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+
+
+#define ARP_ERRTEST_REPLY_PEER 0x1
+#define ARP_ERRTEST_REPLY_HOST 0x2
+
+#define ARP_MULTIHOMING_MAX 8
+
+
+struct arp_ol_stats_t {
+ uint32 host_ip_entries;
+ uint32 host_ip_overflow;
+
+ uint32 arp_table_entries;
+ uint32 arp_table_overflow;
+
+ uint32 host_request;
+ uint32 host_reply;
+ uint32 host_service;
+
+ uint32 peer_request;
+ uint32 peer_request_drop;
+ uint32 peer_reply;
+ uint32 peer_reply_drop;
+ uint32 peer_service;
+};
+
+
+
+
+typedef struct wl_keep_alive_pkt {
+ uint32 period_msec;
+ uint16 len_bytes;
+ uint8 data[1];
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data)
+
+
+
+
+typedef enum wl_pkt_filter_type {
+ WL_PKT_FILTER_TYPE_PATTERN_MATCH
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+
+typedef struct wl_pkt_filter_pattern {
+ uint32 offset;
+ uint32 size_bytes;
+ uint8 mask_and_pattern[1];
+} wl_pkt_filter_pattern_t;
+
+
+typedef struct wl_pkt_filter {
+ uint32 id;
+ uint32 type;
+ uint32 negate_match;
+ union {
+ wl_pkt_filter_pattern_t pattern;
+ } u;
+} wl_pkt_filter_t;
+
+#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+
+
+typedef struct wl_pkt_filter_enable {
+ uint32 id;
+ uint32 enable;
+} wl_pkt_filter_enable_t;
+
+
+typedef struct wl_pkt_filter_list {
+ uint32 num;
+ wl_pkt_filter_t filter[1];
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter)
+
+
+typedef struct wl_pkt_filter_stats {
+ uint32 num_pkts_matched;
+ uint32 num_pkts_forwarded;
+ uint32 num_pkts_discarded;
+} wl_pkt_filter_stats_t;
+
+
+typedef struct wl_seq_cmd_ioctl {
+ uint32 cmd;
+ uint32 len;
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES 4
+
+
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+ (((cmd) == WLC_GET_MAGIC) || \
+ ((cmd) == WLC_GET_VERSION) || \
+ ((cmd) == WLC_GET_AP) || \
+ ((cmd) == WLC_GET_INSTANCE))
+
+
+
+#define WL_PKTENG_PER_TX_START 0x01
+#define WL_PKTENG_PER_TX_STOP 0x02
+#define WL_PKTENG_PER_RX_START 0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06
+#define WL_PKTENG_PER_RX_STOP 0x08
+#define WL_PKTENG_PER_MASK 0xff
+
+#define WL_PKTENG_SYNCHRONOUS 0x100
+
+typedef struct wl_pkteng {
+ uint32 flags;
+ uint32 delay;
+ uint32 nframes;
+ uint32 length;
+ uint8 seqno;
+ struct ether_addr dest;
+ struct ether_addr src;
+} wl_pkteng_t;
+
+#define NUM_80211b_RATES 4
+#define NUM_80211ag_RATES 8
+#define NUM_80211n_RATES 32
+#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+typedef struct wl_pkteng_stats {
+ uint32 lostfrmcnt;
+ int32 rssi;
+ int32 snr;
+ uint16 rxpktcnt[NUM_80211_RATES+1];
+} wl_pkteng_stats_t;
+
+
+#define WL_WOWL_MAGIC (1 << 0)
+#define WL_WOWL_NET (1 << 1)
+#define WL_WOWL_DIS (1 << 2)
+#define WL_WOWL_RETR (1 << 3)
+#define WL_WOWL_BCN (1 << 4)
+#define WL_WOWL_TST (1 << 5)
+#define WL_WOWL_M1 (1 << 6)
+#define WL_WOWL_EAPID (1 << 7)
+#define WL_WOWL_KEYROT (1 << 14)
+#define WL_WOWL_BCAST (1 << 15)
+
+#define MAGIC_PKT_MINLEN 102
+
+typedef struct {
+ uint masksize;
+ uint offset;
+ uint patternoffset;
+ uint patternsize;
+ ulong id;
+
+
+} wl_wowl_pattern_t;
+
+typedef struct {
+ uint count;
+ wl_wowl_pattern_t pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct {
+ uint8 pci_wakeind;
+ uint16 ucode_wakeind;
+} wl_wowl_wakeind_t;
+
+
+typedef struct wl_txrate_class {
+ uint8 init_rate;
+ uint8 min_rate;
+ uint8 max_rate;
+} wl_txrate_class_t;
+
+
+
+
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100
+
+
+typedef struct wl_obss_scan_arg {
+ int16 passive_dwell;
+ int16 active_dwell;
+ int16 bss_widthscan_interval;
+ int16 passive_total;
+ int16 active_total;
+ int16 chanwidth_transition_delay;
+ int16 activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t)
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7
+
+#define WL_COEX_INFO_MASK 0x07
+#define WL_COEX_INFO_REQ 0x01
+#define WL_COEX_40MHZ_INTOLERANT 0x02
+#define WL_COEX_WIDTH20 0x04
+
+#define WLC_RSSI_INVALID 0
+
+#define MAX_RSSI_LEVELS 8
+
+
+typedef struct wl_rssi_event {
+ uint32 rate_limit_msec;
+ uint8 num_rssi_levels;
+ int8 rssi_levels[MAX_RSSI_LEVELS];
+} wl_rssi_event_t;
+
+typedef struct wl_action_obss_coex_req {
+ uint8 info;
+ uint8 num;
+ uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+#define EXTLOG_CUR_VER 0x0100
+
+#define MAX_ARGSTR_LEN 18
+
+
+#define LOG_MODULE_COMMON 0x0001
+#define LOG_MODULE_ASSOC 0x0002
+#define LOG_MODULE_EVENT 0x0004
+#define LOG_MODULE_MAX 3
+
+
+#define WL_LOG_LEVEL_DISABLE 0
+#define WL_LOG_LEVEL_ERR 1
+#define WL_LOG_LEVEL_WARN 2
+#define WL_LOG_LEVEL_INFO 3
+#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO
+
+
+#define LOG_FLAG_EVENT 1
+
+
+#define LOG_ARGTYPE_NULL 0
+#define LOG_ARGTYPE_STR 1
+#define LOG_ARGTYPE_INT 2
+#define LOG_ARGTYPE_INT_STR 3
+#define LOG_ARGTYPE_STR_INT 4
+
+typedef struct wlc_extlog_cfg {
+ int max_number;
+ uint16 module;
+ uint8 level;
+ uint8 flag;
+ uint16 version;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+ uint32 time;
+ uint16 module;
+ uint16 id;
+ uint8 level;
+ uint8 sub_unit;
+ uint8 seq_num;
+ int32 arg;
+ char str[MAX_ARGSTR_LEN];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+ uint32 from_last;
+ uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+ uint16 id;
+ uint16 flag;
+ uint8 arg_type;
+ const char *fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER 1
+
+
+typedef enum {
+ FMTSTR_DRIVER_UP_ID = 0,
+ FMTSTR_DRIVER_DOWN_ID = 1,
+ FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+ FMTSTR_NO_PROGRESS_ID = 3,
+ FMTSTR_RFDISABLE_ID = 4,
+ FMTSTR_REG_PRINT_ID = 5,
+ FMTSTR_EXPTIME_ID = 6,
+ FMTSTR_JOIN_START_ID = 7,
+ FMTSTR_JOIN_COMPLETE_ID = 8,
+ FMTSTR_NO_NETWORKS_ID = 9,
+ FMTSTR_SECURITY_MISMATCH_ID = 10,
+ FMTSTR_RATE_MISMATCH_ID = 11,
+ FMTSTR_AP_PRUNED_ID = 12,
+ FMTSTR_KEY_INSERTED_ID = 13,
+ FMTSTR_DEAUTH_ID = 14,
+ FMTSTR_DISASSOC_ID = 15,
+ FMTSTR_LINK_UP_ID = 16,
+ FMTSTR_LINK_DOWN_ID = 17,
+ FMTSTR_RADIO_HW_OFF_ID = 18,
+ FMTSTR_RADIO_HW_ON_ID = 19,
+ FMTSTR_EVENT_DESC_ID = 20,
+ FMTSTR_PNP_SET_POWER_ID = 21,
+ FMTSTR_RADIO_SW_OFF_ID = 22,
+ FMTSTR_RADIO_SW_ON_ID = 23,
+ FMTSTR_PWD_MISMATCH_ID = 24,
+ FMTSTR_FATAL_ERROR_ID = 25,
+ FMTSTR_AUTH_FAIL_ID = 26,
+ FMTSTR_ASSOC_FAIL_ID = 27,
+ FMTSTR_IBSS_FAIL_ID = 28,
+ FMTSTR_EXTAP_FAIL_ID = 29,
+ FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+#ifdef DONGLEOVERLAYS
+typedef struct {
+ uint32 flags_idx;
+ uint32 offset;
+ uint32 len;
+
+} wl_ioctl_overlay_t;
+
+#define OVERLAY_IDX_MASK 0x000000ff
+#define OVERLAY_IDX_SHIFT 0
+#define OVERLAY_FLAGS_MASK 0xffffff00
+#define OVERLAY_FLAGS_SHIFT 8
+
+#define OVERLAY_FLAG_POSTLOAD 0x100
+
+#define OVERLAY_FLAG_DEFER_DL 0x200
+
+#define OVERLAY_FLAG_PRESLEEP 0x400
+
+#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024
+#endif
+
+
+#include <packed_section_end.h>
+
+
+#include <packed_section_start.h>
+
+#define VNDR_IE_CMD_LEN 4
+
+
+#define VNDR_IE_BEACON_FLAG 0x1
+#define VNDR_IE_PRBRSP_FLAG 0x2
+#define VNDR_IE_ASSOCRSP_FLAG 0x4
+#define VNDR_IE_AUTHRSP_FLAG 0x8
+#define VNDR_IE_PRBREQ_FLAG 0x10
+#define VNDR_IE_ASSOCREQ_FLAG 0x20
+#define VNDR_IE_CUSTOM_FLAG 0x100
+
+#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32))
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag;
+ vndr_ie_t vndr_ie_data;
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int iecount;
+ vndr_ie_info_t vndr_ie_list[1];
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char cmd[VNDR_IE_CMD_LEN];
+ vndr_ie_buf_t vndr_ie_buffer;
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+ struct ether_addr staAddr;
+ uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+ sta_prbreq_wps_ie_hdr_t hdr;
+ uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+ uint32 totLen;
+ uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#ifdef WLMEDIA_TXFAILEVENT
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char dest[ETHER_ADDR_LEN];
+ uint8 prio;
+ uint8 flags;
+ uint32 tsf_l;
+ uint32 tsf_h;
+ uint16 rates;
+ uint16 txstatus;
+} BWL_POST_PACKED_STRUCT txfailinfo_t;
+#endif
+
+#include <packed_section_end.h>
+
+
+#define ASSERTLOG_CUR_VER 0x0100
+#define MAX_ASSRTSTR_LEN 64
+
+typedef struct assert_record {
+ uint32 time;
+ uint8 seq_num;
+ char str[MAX_ASSRTSTR_LEN];
+} assert_record_t;
+
+typedef struct assertlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ assert_record_t logs[1];
+} assertlog_results_t;
+
+#define LOGRRC_FIX_LEN 8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+
+
+
+
+
+#define CHANIM_DISABLE 0
+#define CHANIM_DETECT 1
+#define CHANIM_ACT 2
+#define CHANIM_MODE_MAX 2
+
+
+#define APCS_IOCTL 1
+#define APCS_CHANIM 2
+#define APCS_CSTIMER 3
+#define APCS_BTA 4
+
+
+#define CHANIM_ACS_RECORD 10
+
+
+typedef struct {
+ bool valid;
+ uint8 trigger;
+ chanspec_t selected_chspc;
+ uint32 glitch_cnt;
+ uint8 ccastats;
+ uint timestamp;
+} chanim_acs_record_t;
+
+typedef struct {
+ chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+ uint8 count;
+ uint timestamp;
+} wl_acs_record_t;
+
+
+
+#define SMFS_VERSION 1
+
+typedef struct wl_smfs_elem {
+ uint32 count;
+ uint16 code;
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+ uint32 version;
+ uint16 length;
+ uint8 type;
+ uint8 codetype;
+ uint32 ignored_cnt;
+ uint32 malformed_cnt;
+ uint32 count_total;
+ wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+ SMFS_CODETYPE_SC,
+ SMFS_CODETYPE_RC
+};
+
+
+#define SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED 0xFFFD
+
+typedef enum smfs_type {
+ SMFS_TYPE_AUTH,
+ SMFS_TYPE_ASSOC,
+ SMFS_TYPE_REASSOC,
+ SMFS_TYPE_DISASSOC_TX,
+ SMFS_TYPE_DISASSOC_RX,
+ SMFS_TYPE_DEAUTH_TX,
+ SMFS_TYPE_DEAUTH_RX,
+ SMFS_TYPE_MAX
+} smfs_type_t;
+
+#ifdef PHYMON
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+
+ int16 tx_iqlocal_a;
+ int16 tx_iqlocal_b;
+ int8 tx_iqlocal_ci;
+ int8 tx_iqlocal_cq;
+ int8 tx_iqlocal_di;
+ int8 tx_iqlocal_dq;
+ int8 tx_iqlocal_ei;
+ int8 tx_iqlocal_eq;
+ int8 tx_iqlocal_fi;
+ int8 tx_iqlocal_fq;
+
+
+ int16 rx_iqcal_a;
+ int16 rx_iqcal_b;
+
+ uint8 tx_iqlocal_pwridx;
+ uint32 papd_epsilon_table[64];
+ int16 papd_epsilon_offset;
+ uint8 curr_tx_pwrindex;
+ int8 idle_tssi;
+ int8 est_tx_pwr;
+ int8 est_rx_pwr;
+ uint16 rx_gaininfo;
+ uint16 init_gaincode;
+ int8 estirr_tx;
+ int8 estirr_rx;
+
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+ int version;
+ int8 num_phy_cores;
+ int8 curr_temperature;
+ chanspec_t chspec;
+ bool aci_state;
+ uint16 crsminpower;
+ uint16 crsminpowerl;
+ uint16 crsminpoweru;
+ wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+#endif
+
+#ifdef WLP2P
+
+typedef struct wl_p2p_disc_st {
+ uint8 state;
+ chanspec_t chspec;
+ uint16 dwell;
+} wl_p2p_disc_st_t;
+
+
+#define WL_P2P_DISC_ST_SCAN 0
+#define WL_P2P_DISC_ST_LISTEN 1
+#define WL_P2P_DISC_ST_SEARCH 2
+
+
+typedef struct wl_p2p_scan {
+ uint8 type;
+ uint8 reserved[3];
+
+} wl_p2p_scan_t;
+
+
+typedef struct wl_p2p_if {
+ struct ether_addr addr;
+ uint8 type;
+ chanspec_t chspec;
+} wl_p2p_if_t;
+
+
+#define WL_P2P_IF_CLIENT 0
+#define WL_P2P_IF_GO 1
+#define WL_P2P_IF_DYNBCN_GO 2
+#define WL_P2P_IF_DEV 3
+
+
+typedef struct wl_p2p_ifq {
+ uint bsscfgidx;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+
+typedef struct wl_p2p_ops {
+ uint8 ops;
+ uint8 ctw;
+} wl_p2p_ops_t;
+
+
+typedef struct wl_p2p_sched_desc {
+ uint32 start;
+ uint32 interval;
+ uint32 duration;
+ uint32 count;
+} wl_p2p_sched_desc_t;
+
+
+#define WL_P2P_SCHED_RSVD 0
+#define WL_P2P_SCHED_REPEAT 255
+
+typedef struct wl_p2p_sched {
+ uint8 type;
+ uint8 action;
+ uint8 option;
+ wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+#define WL_P2P_SCHED_FIXED_LEN 3
+
+
+#define WL_P2P_SCHED_TYPE_ABS 0
+#define WL_P2P_SCHED_TYPE_REQ_ABS 1
+
+
+#define WL_P2P_SCHED_ACTION_NONE 0
+#define WL_P2P_SCHED_ACTION_DOZE 1
+
+#define WL_P2P_SCHED_ACTION_GOOFF 2
+
+#define WL_P2P_SCHED_ACTION_RESET 255
+
+
+#define WL_P2P_SCHED_OPTION_NORMAL 0
+#define WL_P2P_SCHED_OPTION_BCNPCT 1
+
+#define WL_P2P_SCHED_OPTION_TSFOFS 2
+
+
+#define WL_P2P_FEAT_GO_CSA (1 << 0)
+#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1)
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2)
+#endif
+
+
+#define BCM_ACTION_RFAWARE 0x77
+#define BCM_ACTION_RFAWARE_DCS 0x01
+
+
+
+#define WL_11N_2x2 1
+#define WL_11N_3x3 3
+#define WL_11N_4x4 4
+
+
+#define WLFEATURE_DISABLE_11N 0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
+#define WLFEATURE_DISABLE_11N_GF 0x00000080
+
+
+#define LQ_IDX_LAST 3
+#define MCS_INDEX_SIZE 33
+
+#define LQ_IDX_MIN 0
+#define LQ_IDX_MAX 1
+#define LQ_IDX_AVG 2
+#define LQ_IDX_SUM 2
+#define LQ_IDX_LAST 3
+#define LQ_STOP_MONITOR 0
+#define LQ_START_MONITOR 1
+
+#define LINKQUAL_V1 0x01
+
+struct wl_lq {
+ int32 enable;
+ int32 rssi[LQ_IDX_LAST];
+ int32 rssicnt;
+ int32 snr[LQ_IDX_LAST];
+ uint32 nsamples;
+ uint8 isvalid;
+ uint8 version;
+};
+
+typedef struct wl_lq wl_lq_t;
+typedef struct wl_lq wl_lq_stats_t;
+
+typedef struct {
+ struct ether_addr ea;
+ uint8 ac_cat;
+ uint8 num_pkts;
+} wl_mac_ratehisto_cmd_t;
+
+
+typedef struct {
+ uint32 rate[WLC_MAXRATE + 1];
+ uint32 mcs_index[MCS_INDEX_SIZE];
+ uint32 tsf_timer[2][2];
+} wl_mac_ratehisto_res_t;
+
+#ifdef PROP_TXSTATUS
+
+
+#define WLFC_FLAGS_RSSI_SIGNALS 1
+
+
+#define WLFC_FLAGS_XONXOFF_SIGNALS 2
+
+
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 4
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 8
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 16
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 32
+#endif
+
+#define BTA_STATE_LOG_SZ 64
+
+
+enum {
+ HCIReset = 1,
+ HCIReadLocalAMPInfo,
+ HCIReadLocalAMPASSOC,
+ HCIWriteRemoteAMPASSOC,
+ HCICreatePhysicalLink,
+ HCIAcceptPhysicalLinkRequest,
+ HCIDisconnectPhysicalLink,
+ HCICreateLogicalLink,
+ HCIAcceptLogicalLink,
+ HCIDisconnectLogicalLink,
+ HCILogicalLinkCancel,
+ HCIAmpStateChange,
+ HCIWriteLogicalLinkAcceptTimeout
+};
+
+typedef struct flush_txfifo {
+ uint32 txfifobmp;
+ uint32 hwtxfifoflush;
+ struct ether_addr ea;
+} flush_txfifo_t;
+
+#define CHANNEL_5G_LOW_START 36
+#define CHANNEL_5G_MID_START 52
+#define CHANNEL_5G_HIGH_START 100
+#define CHANNEL_5G_UPPER_START 149
+
+enum {
+ SPATIAL_MODE_2G_IDX = 0,
+ SPATIAL_MODE_5G_LOW_IDX,
+ SPATIAL_MODE_5G_MID_IDX,
+ SPATIAL_MODE_5G_HIGH_IDX,
+ SPATIAL_MODE_5G_UPPER_IDX,
+ SPATIAL_MODE_MAX_IDX
+};
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c
new file mode 100644
index 000000000000..1a544378c1e6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/linux_osl.c
@@ -0,0 +1,919 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.c,v 1.168.2.7 2011-01-27 17:01:13 Exp $
+ */
+
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+
+#ifdef BCMASSERT_LOG
+#include <bcm_assert_log.h>
+#endif
+
+#include <linux/fs.h>
+
+#define PCI_CFG_RETRY 10
+
+#define OS_HANDLE_MAGIC 0x1234abcd
+#define BCM_MEM_FILENAME_LEN 24
+
+#ifdef DHD_USE_STATIC_BUF
+#define STATIC_BUF_MAX_NUM 16
+#define STATIC_BUF_SIZE (PAGE_SIZE * 2)
+#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+ struct semaphore static_sem;
+ unsigned char *buf_ptr;
+ unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#define STATIC_PKT_MAX_NUM 8
+
+typedef struct bcm_static_pkt {
+ struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
+ struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
+ struct semaphore osl_pkt_sem;
+ unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2];
+} bcm_static_pkt_t;
+
+static bcm_static_pkt_t *bcm_static_skb = 0;
+#endif
+
+typedef struct bcm_mem_link {
+ struct bcm_mem_link *prev;
+ struct bcm_mem_link *next;
+ uint size;
+ int line;
+ char file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_info {
+ osl_pubinfo_t pub;
+#ifdef CTFPOOL
+ ctfpool_t *ctfpool;
+#endif
+ uint magic;
+ void *pdev;
+ atomic_t malloced;
+ uint failed;
+ uint bustype;
+ bcm_mem_link_t *dbgmem_list;
+};
+
+
+
+
+uint32 g_assert_type = FALSE;
+
+static int16 linuxbcmerrormap[] =
+{ 0,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -E2BIG,
+ -E2BIG,
+ -EBUSY,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EFAULT,
+ -ENOMEM,
+ -EOPNOTSUPP,
+ -EMSGSIZE,
+ -EINVAL,
+ -EPERM,
+ -ENOMEM,
+ -EINVAL,
+ -ERANGE,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EIO,
+ -ENODEV,
+ -EINVAL,
+ -EIO,
+ -EIO,
+ -ENODEV,
+ -EINVAL,
+ -ENODATA,
+
+
+
+#if BCME_LAST != -42
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+ for new error code defined in bcmutils.h"
+#endif
+};
+
+
+int
+osl_error(int bcmerror)
+{
+ if (bcmerror > 0)
+ bcmerror = 0;
+ else if (bcmerror < BCME_LAST)
+ bcmerror = BCME_ERROR;
+
+
+ return linuxbcmerrormap[-bcmerror];
+}
+
+extern uint8* dhd_os_prealloc(void *osh, int section, int size);
+
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+ osl_t *osh;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ gfp_t flags;
+
+ flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ osh = kmalloc(sizeof(osl_t), flags);
+#else
+ osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
+#endif
+ ASSERT(osh);
+
+ bzero(osh, sizeof(osl_t));
+
+
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+ osh->magic = OS_HANDLE_MAGIC;
+ atomic_set(&osh->malloced, 0);
+ osh->failed = 0;
+ osh->dbgmem_list = NULL;
+ osh->pdev = pdev;
+ osh->pub.pkttag = pkttag;
+ osh->bustype = bustype;
+
+ switch (bustype) {
+ case PCI_BUS:
+ case SI_BUS:
+ case PCMCIA_BUS:
+ osh->pub.mmbus = TRUE;
+ break;
+ case JTAG_BUS:
+ case SDIO_BUS:
+ case USB_BUS:
+ case SPI_BUS:
+ case RPC_BUS:
+ osh->pub.mmbus = FALSE;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+
+#if defined(DHD_USE_STATIC_BUF)
+ if (!bcm_static_buf) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
+ STATIC_BUF_TOTAL_LEN))) {
+ printk("can not alloc static buf!\n");
+ }
+ else
+ printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+
+
+ sema_init(&bcm_static_buf->static_sem, 1);
+
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+ }
+
+ if (!bcm_static_skb) {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
+
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * 16);
+ for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++)
+ bcm_static_skb->pkt_use[i] = 0;
+
+ sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+ }
+#endif
+
+ return osh;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+ if (osh == NULL)
+ return;
+
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ kfree(osh);
+}
+
+static struct sk_buff *osl_alloc_skb(unsigned int len)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+ gfp_t flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+
+ return __dev_alloc_skb(len, flags);
+#else
+ return dev_alloc_skb(len);
+#endif
+}
+
+#ifdef CTFPOOL
+
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+ struct sk_buff *skb;
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return NULL;
+
+ spin_lock_bh(&osh->ctfpool->lock);
+ ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+
+ if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+ spin_unlock_bh(&osh->ctfpool->lock);
+ return NULL;
+ }
+
+
+ skb = osl_alloc_skb(osh->ctfpool->obj_size);
+ if (skb == NULL) {
+ printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+ osh->ctfpool->obj_size);
+ spin_unlock_bh(&osh->ctfpool->lock);
+ return NULL;
+ }
+
+
+ skb->next = (struct sk_buff *)osh->ctfpool->head;
+ osh->ctfpool->head = skb;
+ osh->ctfpool->fast_frees++;
+ osh->ctfpool->curr_obj++;
+
+
+ CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+
+ PKTFAST(osh, skb) = FASTBUF;
+
+ spin_unlock_bh(&osh->ctfpool->lock);
+
+ return skb;
+}
+
+
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+
+ while ((osh->ctfpool->refills > 0) && (thresh--)) {
+ osl_ctfpool_add(osh);
+ osh->ctfpool->refills--;
+ }
+}
+
+
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ gfp_t flags;
+
+ flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ osh->ctfpool = kmalloc(sizeof(ctfpool_t), flags);
+#else
+ osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
+#endif
+ ASSERT(osh->ctfpool);
+ bzero(osh->ctfpool, sizeof(ctfpool_t));
+
+ osh->ctfpool->max_obj = numobj;
+ osh->ctfpool->obj_size = size;
+
+ spin_lock_init(&osh->ctfpool->lock);
+
+ while (numobj--) {
+ if (!osl_ctfpool_add(osh))
+ return -1;
+ osh->ctfpool->fast_frees--;
+ }
+
+ return 0;
+}
+
+
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+ struct sk_buff *skb, *nskb;
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+ spin_lock_bh(&osh->ctfpool->lock);
+
+ skb = osh->ctfpool->head;
+
+ while (skb != NULL) {
+ nskb = skb->next;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ osh->ctfpool->curr_obj--;
+ }
+
+ ASSERT(osh->ctfpool->curr_obj == 0);
+ osh->ctfpool->head = NULL;
+ spin_unlock_bh(&osh->ctfpool->lock);
+
+ kfree(osh->ctfpool);
+ osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+ struct bcmstrbuf *bb;
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+#ifdef DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif
+
+ bb = b;
+
+ ASSERT((osh != NULL) && (bb != NULL));
+
+ bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+ osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+ osh->ctfpool->curr_obj, osh->ctfpool->refills);
+ bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+ osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+ osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+ struct sk_buff *skb;
+
+
+ if (osh->ctfpool == NULL)
+ return NULL;
+
+ spin_lock_bh(&osh->ctfpool->lock);
+ if (osh->ctfpool->head == NULL) {
+ ASSERT(osh->ctfpool->curr_obj == 0);
+ osh->ctfpool->slow_allocs++;
+ spin_unlock_bh(&osh->ctfpool->lock);
+ return NULL;
+ }
+
+ ASSERT(len <= osh->ctfpool->obj_size);
+
+
+ skb = (struct sk_buff *)osh->ctfpool->head;
+ osh->ctfpool->head = (void *)skb->next;
+
+ osh->ctfpool->fast_allocs++;
+ osh->ctfpool->curr_obj--;
+ ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+ spin_unlock_bh(&osh->ctfpool->lock);
+
+
+ skb->next = skb->prev = NULL;
+ skb->data = skb->head + 16;
+ skb->tail = skb->head + 16;
+
+ skb->len = 0;
+ skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+ skb->list = NULL;
+#endif
+ atomic_set(&skb->users, 1);
+
+ return skb;
+}
+#endif
+
+
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len)
+{
+ struct sk_buff *skb;
+
+#ifdef CTFPOOL
+ skb = osl_pktfastget(osh, len);
+ if ((skb != NULL) || ((skb = osl_alloc_skb(len)) != NULL)) {
+#else
+ if ((skb = osl_alloc_skb(len))) {
+#endif
+ skb_put(skb, len);
+ skb->priority = 0;
+
+ osh->pub.pktalloced++;
+ }
+
+ return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+ ctfpool_t *ctfpool;
+
+ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+ ASSERT(ctfpool != NULL);
+
+
+ spin_lock_bh(&ctfpool->lock);
+ skb->next = (struct sk_buff *)ctfpool->head;
+ ctfpool->head = (void *)skb;
+
+ ctfpool->fast_frees++;
+ ctfpool->curr_obj++;
+
+ ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+ spin_unlock_bh(&ctfpool->lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ skb->tstamp.tv.sec = 0;
+#else
+ skb->stamp.tv_sec = 0;
+#endif
+
+
+ skb->dev = NULL;
+ skb->dst = NULL;
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb->ip_summed = 0;
+ skb->destructor = NULL;
+}
+#endif
+
+
+void BCMFASTPATH
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+ struct sk_buff *skb, *nskb;
+
+ skb = (struct sk_buff*) p;
+
+ if (send && osh->pub.tx_fn)
+ osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+
+#ifdef CTFPOOL
+ if (PKTISFAST(osh, skb))
+ osl_pktfastfree(osh, skb);
+ else {
+#else
+ {
+#endif
+
+ if (skb->destructor)
+
+ dev_kfree_skb_any(skb);
+ else
+
+ dev_kfree_skb(skb);
+ }
+
+ osh->pub.pktalloced--;
+
+ skb = nskb;
+ }
+}
+
+#ifdef DHD_USE_STATIC_BUF
+void *
+osl_pktget_static(osl_t *osh, uint len)
+{
+ int i;
+ struct sk_buff *skb;
+
+ if (len > (PAGE_SIZE * 2)) {
+ printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+ return osl_pktget(osh, len);
+ }
+
+ down(&bcm_static_skb->osl_pkt_sem);
+
+ if (len <= PAGE_SIZE) {
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (bcm_static_skb->pkt_use[i] == 0)
+ break;
+ }
+
+ if (i != STATIC_PKT_MAX_NUM) {
+ bcm_static_skb->pkt_use[i] = 1;
+ up(&bcm_static_skb->osl_pkt_sem);
+ skb = bcm_static_skb->skb_4k[i];
+ skb->tail = skb->data + len;
+ skb->len = len;
+ return skb;
+ }
+ }
+
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0)
+ break;
+ }
+
+ if (i != STATIC_PKT_MAX_NUM) {
+ bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1;
+ up(&bcm_static_skb->osl_pkt_sem);
+ skb = bcm_static_skb->skb_8k[i];
+ skb->tail = skb->data + len;
+ skb->len = len;
+ return skb;
+ }
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ printk("%s: all static pkt in use!\n", __FUNCTION__);
+ return osl_pktget(osh, len);
+}
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+ int i;
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (p == bcm_static_skb->skb_4k[i]) {
+ down(&bcm_static_skb->osl_pkt_sem);
+ bcm_static_skb->pkt_use[i] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i]) {
+ down(&bcm_static_skb->osl_pkt_sem);
+ bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+
+ return osl_pktfree(osh, p, send);
+}
+#endif
+
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+ uint val = 0;
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+ ASSERT(size == 4);
+
+ do {
+ pci_read_config_dword(osh->pdev, offset, &val);
+ if (val != 0xffffffff)
+ break;
+ } while (retry--);
+
+
+ return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+ ASSERT(size == 4);
+
+ do {
+ pci_write_config_dword(osh->pdev, offset, val);
+ if (offset != PCI_BAR0_WIN)
+ break;
+ if (osl_pci_read_config(osh, offset, size) == val)
+ break;
+ } while (retry--);
+
+}
+
+
+uint
+osl_pci_bus(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+
+uint
+osl_pci_slot(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+ void *addr;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ gfp_t flags;
+
+
+ if (osh)
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+ flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ if ((addr = kmalloc(size, flags)) == NULL) {
+#else
+ if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
+#endif
+ if (osh)
+ osh->failed++;
+ return (NULL);
+ }
+ if (osh)
+ atomic_add(size, &osh->malloced);
+
+ return (addr);
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ atomic_sub(size, &osh->malloced);
+ }
+ kfree(addr);
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (atomic_read(&osh->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->failed);
+}
+
+
+
+uint
+osl_dma_consistent_align(void)
+{
+ return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
+{
+ uint16 align = (1 << align_bits);
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+ size += align;
+ *alloced = size;
+
+ return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+}
+
+uint BCMFASTPATH
+osl_dma_map(osl_t *osh, void *va, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void BCMFASTPATH
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+#if defined(BCMASSERT_LOG)
+void
+osl_assert(char *exp, char *file, int line)
+{
+ char tempbuf[256];
+ char *basename;
+
+ basename = strrchr(file, '/');
+
+ if (basename)
+ basename++;
+
+ if (!basename)
+ basename = file;
+
+#ifdef BCMASSERT_LOG
+ snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+ exp, basename, line);
+
+ bcm_assert_log(tempbuf);
+#endif
+
+
+}
+#endif
+
+void
+osl_delay(uint usec)
+{
+ uint d;
+
+ while (usec > 0) {
+ d = MIN(usec, 1000);
+ udelay(d);
+ usec -= d;
+ }
+}
+
+
+
+void *
+osl_pktdup(osl_t *osh, void *skb)
+{
+ void * p;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ gfp_t flags;
+
+ flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ if ((p = skb_clone((struct sk_buff *)skb, flags)) == NULL)
+#else
+ if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL)
+#endif
+ return NULL;
+
+#ifdef CTFPOOL
+ if (PKTISFAST(osh, skb)) {
+ ctfpool_t *ctfpool;
+
+
+ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+ ASSERT(ctfpool != NULL);
+ PKTCLRFAST(osh, p);
+ PKTCLRFAST(osh, skb);
+ ctfpool->refills++;
+ }
+#endif
+
+
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
+
+
+ osh->pub.pktalloced++;
+ return (p);
+}
+
+
+
+
+
+
+
+void *
+osl_os_open_image(char *filename)
+{
+ struct file *fp;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+
+ if (IS_ERR(fp))
+ fp = NULL;
+
+ return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+
+ if (!image)
+ return 0;
+
+ rdlen = kernel_read(fp, fp->f_pos, buf, len);
+ if (rdlen > 0)
+ fp->f_pos += rdlen;
+
+ return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+ if (image)
+ filp_close((struct file *)image, NULL);
+}
diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c
new file mode 100644
index 000000000000..02d1bc0a79d1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/sbutils.c
@@ -0,0 +1,992 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbutils.c,v 1.687.2.1 2010-11-29 20:21:56 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+ uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+
+#define SET_SBREG(sii, r, mask, val) \
+ W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
+#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
+#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+ uint8 tmp;
+ uint32 val, intr_val = 0;
+
+
+ /*
+ * compact flash only has 11 bits address, while we needs 12 bits address.
+ * MEM_SEG will be OR'd with other 11 bits address in hardware,
+ * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+ * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+ */
+ if (PCMCIA(sii)) {
+ INTR_OFF(sii, intr_val);
+ tmp = 1;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+ }
+
+ val = R_REG(sii->osh, sbr);
+
+ if (PCMCIA(sii)) {
+ tmp = 0;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+ uint8 tmp;
+ volatile uint32 dummy;
+ uint32 intr_val = 0;
+
+
+ /*
+ * compact flash only has 11 bits address, while we needs 12 bits address.
+ * MEM_SEG will be OR'd with other 11 bits address in hardware,
+ * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+ * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+ */
+ if (PCMCIA(sii)) {
+ INTR_OFF(sii, intr_val);
+ tmp = 1;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+ }
+
+ if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+ dummy = R_REG(sii->osh, sbr);
+ W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+ dummy = R_REG(sii->osh, sbr);
+ W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+ } else
+ W_REG(sii->osh, sbr, v);
+
+ if (PCMCIA(sii)) {
+ tmp = 0;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ INTR_RESTORE(sii, intr_val);
+ }
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+ si_info_t *sii;
+ void *corereg;
+ sbconfig_t *sb;
+ uint origidx, intflag, intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+ corereg = si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(corereg != NULL);
+ sb = REGS2SB(corereg);
+ intflag = R_SBREG(sii, &sb->sbflagst);
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+
+ return intflag;
+}
+
+uint
+sb_flag(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 vec;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ if (siflag == -1)
+ vec = 0;
+ else
+ vec = 1 << siflag;
+ W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+ uint i;
+
+ for (i = 0; i < sii->numcores; i ++)
+ if (sba == sii->coresba[i])
+ return i;
+ return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+ uint32 sbaddr;
+
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS: {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+ break;
+ }
+
+ case PCI_BUS:
+ sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ break;
+
+ case PCMCIA_BUS: {
+ uint8 tmp = 0;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+ sbaddr = (uint32)tmp << 12;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+ sbaddr |= (uint32)tmp << 16;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+ sbaddr |= (uint32)tmp << 24;
+ break;
+ }
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ sbaddr = (uint32)(uintptr)sii->curmap;
+ break;
+
+
+ default:
+ sbaddr = BADCOREADDR;
+ break;
+ }
+
+ return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint sbidh;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+ sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+ return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+ }
+
+ /* return the new value
+ * for write operation, the following readback ensures the completion of write opration.
+ */
+ return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+ (val << SBTMH_SISF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatehigh, w);
+ }
+
+ /* return the new value */
+ return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbtmstatelow) &
+ (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ if (regoff >= SBCONFIGOFF) {
+ w = (R_SBREG(sii, r) & ~mask) | val;
+ W_SBREG(sii, r, w);
+ } else {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+ }
+
+ /* readback */
+ if (regoff >= SBCONFIGOFF)
+ w = R_SBREG(sii, r);
+ else {
+ if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+ (coreidx == SI_CC_IDX) &&
+ (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+ w = val;
+ } else
+ w = R_REG(sii->osh, r);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ sb_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES 2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+ uint next;
+ uint ncc = 0;
+ uint i;
+
+ if (bus >= SB_MAXBUSES) {
+ SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+ return 0;
+ }
+ SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+ /* Scan all cores on the bus starting from core 0.
+ * Core addresses must be contiguous on each bus.
+ */
+ for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+ sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+ /* keep and reuse the initial register mapping */
+ if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
+ SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+ sii->regs[next] = regs;
+ }
+
+ /* change core to 'next' and read its coreid */
+ sii->curmap = _sb_setcoreidx(sii, next);
+ sii->curidx = next;
+
+ sii->coreid[next] = sb_coreid(&sii->pub);
+
+ /* core specific processing... */
+ /* chipc provides # cores */
+ if (sii->coreid[next] == CC_CORE_ID) {
+ chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+ uint32 ccrev = sb_corerev(&sii->pub);
+
+ /* determine numcores - this is the total # cores in the chip */
+ if (((ccrev == 4) || (ccrev >= 6)))
+ numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+ CID_CC_SHIFT;
+ else {
+ /* Older chips */
+ uint chip = CHIPID(sii->pub.chip);
+
+ if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
+ numcores = 6;
+ else if (chip == BCM4704_CHIP_ID)
+ numcores = 9;
+ else if (chip == BCM5365_CHIP_ID)
+ numcores = 7;
+ else {
+ SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+ chip));
+ ASSERT(0);
+ numcores = 1;
+ }
+ }
+ SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+ sii->pub.issim ? "QT" : ""));
+ }
+ /* scan bridged SB(s) and add results to the end of the list */
+ else if (sii->coreid[next] == OCP_CORE_ID) {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+ uint nsbcc;
+
+ sii->numcores = next + 1;
+
+ if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+ continue;
+ nsbba &= 0xfffff000;
+ if (_sb_coreidx(sii, nsbba) != BADIDX)
+ continue;
+
+ nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+ nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+ if (sbba == SI_ENUM_BASE)
+ numcores -= nsbcc;
+ ncc += nsbcc;
+ }
+ }
+
+ SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+ sii->numcores = i + ncc;
+ return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii;
+ uint32 origsba;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+ /* Save the current core info and validate it later till we know
+ * for sure what is good and what is bad.
+ */
+ origsba = _sb_coresba(sii);
+
+ /* scan all SB(s) starting from SI_ENUM_BASE */
+ sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (coreidx >= sii->numcores)
+ return (NULL);
+
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ sii->curmap = _sb_setcoreidx(sii, coreidx);
+ sii->curidx = coreidx;
+
+ return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+ uint32 sbaddr = sii->coresba[coreidx];
+ void *regs;
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ regs = sii->regs[coreidx];
+ break;
+
+ case PCI_BUS:
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+ regs = sii->curmap;
+ break;
+
+ case PCMCIA_BUS: {
+ uint8 tmp = (sbaddr >> 12) & 0x0f;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+ tmp = (sbaddr >> 16) & 0xff;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+ tmp = (sbaddr >> 24) & 0xff;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+ regs = sii->curmap;
+ break;
+ }
+ case SPI_BUS:
+ case SDIO_BUS:
+ /* map new one */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = (void *)(uintptr)sbaddr;
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ regs = sii->regs[coreidx];
+ break;
+
+
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+ sbconfig_t *sb;
+ volatile uint32 *addrm;
+
+ sb = REGS2SB(sii->curmap);
+
+ switch (asidx) {
+ case 0:
+ addrm = &sb->sbadmatch0;
+ break;
+
+ case 1:
+ addrm = &sb->sbadmatch1;
+ break;
+
+ case 2:
+ addrm = &sb->sbadmatch2;
+ break;
+
+ case 3:
+ addrm = &sb->sbadmatch3;
+ break;
+
+ default:
+ SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+ return 0;
+ }
+
+ return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ /* + 1 because of enumeration space */
+ return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ origidx = sii->curidx;
+ ASSERT(GOODIDX(origidx));
+
+ INTR_OFF(sii, intr_val);
+
+ /* switch over to chipcommon core if there is one, else use pci */
+ if (sii->pub.ccrev != NOREV) {
+ chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(ccregs != NULL);
+
+ /* do the buffer registers update */
+ W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+ W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+ } else
+ ASSERT(0);
+
+ /* restore core index */
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /* if core is already in reset, just return */
+ if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+ return;
+
+ /* if clocks are not enabled, put into reset and return */
+ if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+ goto disable;
+
+ /* set target reject and spin until busy is clear (preserve core-specific bits) */
+ OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+ SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+ OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+ dummy = R_SBREG(sii, &sb->sbimstate);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_REJ | SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(10);
+
+ /* don't forget to clear the initiator reject bit */
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+ AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+ /* leave reset and reject asserted */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+ OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /*
+ * Must do the disable sequence first to work for arbitrary current core state.
+ */
+ sb_core_disable(sih, (bits | resetbits));
+
+ /*
+ * Now do the initialization sequence.
+ */
+
+ /* set reset while enabling the clock and forcing them on throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(1);
+
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+ W_SBREG(sii, &sb->sbtmstatehigh, 0);
+ }
+ if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+ AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(1);
+
+ /* leave clock enabled */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ OSL_DELAY(1);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ * SI_BUS => mips
+ * JTAG_BUS => chipc
+ * PCI_BUS => pci or pcie
+ * PCMCIA_BUS => pcmcia
+ * SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 tmp, ret = 0xffffffff;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ if ((to & ~TO_MASK) != 0)
+ return ret;
+
+ /* Figure out the master core */
+ if (idx == BADIDX) {
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case PCI_BUS:
+ idx = sii->pub.buscoreidx;
+ break;
+ case JTAG_BUS:
+ idx = SI_CC_IDX;
+ break;
+ case PCMCIA_BUS:
+ case SDIO_BUS:
+ idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+ break;
+ case SI_BUS:
+ idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+ break;
+ default:
+ ASSERT(0);
+ }
+ if (idx == BADIDX)
+ return ret;
+ }
+
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+ tmp = R_SBREG(sii, &sb->sbimconfiglow);
+ ret = tmp & TO_MASK;
+ W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+ sb_commit(sih);
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+ return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+ uint32 base;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ base = 0;
+
+ if (type == 0) {
+ base = admatch & SBAM_BASE0_MASK;
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE1_MASK;
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE2_MASK;
+ }
+
+ return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+ uint32 size;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ size = 0;
+
+ if (type == 0) {
+ size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+ }
+
+ return (size);
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c
new file mode 100644
index 000000000000..22aa41265763
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils.c
@@ -0,0 +1,1720 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.c,v 1.813.2.36 2011-02-10 23:43:55 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <hndpmu.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, void *regs);
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+
+/*
+ * Allocate a si handle.
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a pointer area for "environment" variables
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ si_info_t *sii;
+
+ /* alloc si_info_t */
+ if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
+ SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+ return (NULL);
+ }
+
+ if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+ MFREE(osh, sii, sizeof(si_info_t));
+ return (NULL);
+ }
+ sii->vars = vars ? *vars : NULL;
+ sii->varsz = varsz ? *varsz : 0;
+
+ return (si_t *)sii;
+}
+
+/* global kernel resource */
+static si_info_t ksii;
+
+static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */
+
+/* generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+ static bool ksii_attached = FALSE;
+
+ if (!ksii_attached) {
+ void *regs;
+ regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+
+ if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+ SI_BUS, NULL,
+ osh != SI_OSH ? &ksii.vars : NULL,
+ osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
+ SI_ERROR(("si_kattach: si_doattach failed\n"));
+ REG_UNMAP(regs);
+ return NULL;
+ }
+ REG_UNMAP(regs);
+
+ /* save ticks normalized to ms for si_watchdog_ms() */
+ if (PMUCTL_ENAB(&ksii.pub)) {
+ /* based on 32KHz ILP clock */
+ wd_msticks = 32;
+ } else {
+ wd_msticks = ALP_CLOCK / 1000;
+ }
+
+ ksii_attached = TRUE;
+ SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+ ksii.pub.ccrev, wd_msticks));
+ }
+
+ return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+ /* need to set memseg flag for CF card first before any sb registers access */
+ if (BUSTYPE(bustype) == PCMCIA_BUS)
+ sii->memseg = TRUE;
+
+
+ if (BUSTYPE(bustype) == SDIO_BUS) {
+ int err;
+ uint8 clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (!err) {
+ uint8 clkval;
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+ SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval));
+ return FALSE;
+ }
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ OSL_DELAY(65);
+ }
+ }
+
+ /* Also, disable the extra SDIO pull-ups */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ }
+
+
+ return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, void *regs)
+{
+ bool pci, pcie;
+ uint i;
+ uint pciidx, pcieidx, pcirev, pcierev;
+
+ cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+ ASSERT((uintptr)cc);
+
+ /* get chipcommon rev */
+ sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+ /* get chipcommon chipstatus */
+ if (sii->pub.ccrev >= 11)
+ sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+ /* get chipcommon capabilites */
+ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+ /* get chipcommon extended capabilities */
+
+ if (sii->pub.ccrev >= 35)
+ sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+ /* get pmu rev and caps */
+ if (sii->pub.cccaps & CC_CAP_PMU) {
+ sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+ sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+ }
+
+ SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+ sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+ sii->pub.pmucaps));
+
+ /* figure out bus/orignal core idx */
+ sii->pub.buscoretype = NODEV_CORE_ID;
+ sii->pub.buscorerev = NOREV;
+ sii->pub.buscoreidx = BADIDX;
+
+ pci = pcie = FALSE;
+ pcirev = pcierev = NOREV;
+ pciidx = pcieidx = BADIDX;
+
+ for (i = 0; i < sii->numcores; i++) {
+ uint cid, crev;
+
+ si_setcoreidx(&sii->pub, i);
+ cid = si_coreid(&sii->pub);
+ crev = si_corerev(&sii->pub);
+
+ /* Display cores found */
+ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+ i, cid, crev, sii->coresba[i], sii->regs[i]));
+
+ if (BUSTYPE(bustype) == PCI_BUS) {
+ if (cid == PCI_CORE_ID) {
+ pciidx = i;
+ pcirev = crev;
+ pci = TRUE;
+ } else if (cid == PCIE_CORE_ID) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = TRUE;
+ }
+ } else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+ (cid == PCMCIA_CORE_ID)) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+ else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+ (BUSTYPE(bustype) == SPI_BUS)) &&
+ ((cid == PCMCIA_CORE_ID) ||
+ (cid == SDIOD_CORE_ID))) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+
+ /* find the core idx before entering this func. */
+ if ((savewin && (savewin == sii->coresba[i])) ||
+ (regs == sii->regs[i]))
+ *origidx = i;
+ }
+
+ if (pci) {
+ sii->pub.buscoretype = PCI_CORE_ID;
+ sii->pub.buscorerev = pcirev;
+ sii->pub.buscoreidx = pciidx;
+ } else if (pcie) {
+ sii->pub.buscoretype = PCIE_CORE_ID;
+ sii->pub.buscorerev = pcierev;
+ sii->pub.buscoreidx = pcieidx;
+ }
+
+ SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+ sii->pub.buscorerev));
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+ (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3))
+ OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+ /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+ * already running.
+ */
+ if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+ if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+ si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+ si_core_disable(&sii->pub, 0);
+ }
+
+ /* return to the original core */
+ si_setcoreidx(&sii->pub, *origidx);
+
+ return TRUE;
+}
+
+
+
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ struct si_pub *sih = &sii->pub;
+ uint32 w, savewin;
+ chipcregs_t *cc;
+ char *pvars = NULL;
+ uint origidx;
+
+ ASSERT(GOODREGS(regs));
+
+ bzero((uchar*)sii, sizeof(si_info_t));
+
+ savewin = 0;
+
+ sih->buscoreidx = BADIDX;
+
+ sii->curmap = regs;
+ sii->sdh = sdh;
+ sii->osh = osh;
+
+
+
+ /* find Chipcommon address */
+ if (bustype == PCI_BUS) {
+ savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+ savewin = SI_ENUM_BASE;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+ cc = (chipcregs_t *)regs;
+ } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+ cc = (chipcregs_t *)sii->curmap;
+ } else {
+ cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+ }
+
+ sih->bustype = bustype;
+ if (bustype != BUSTYPE(bustype)) {
+ SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+ bustype, BUSTYPE(bustype)));
+ return NULL;
+ }
+
+ /* bus/core/clk setup for register access */
+ if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+ return NULL;
+ }
+
+ /* ChipID recognition.
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+ * some way of recognizing them needs to be added here.
+ */
+ w = R_REG(osh, &cc->chipid);
+ sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ /* Might as wll fill in chip id rev & pkg */
+ sih->chip = w & CID_ID_MASK;
+ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+ if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK)
+ >> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT |
+ CST4322_SPROM_PRESENT))) {
+ SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+ (sih->chippkg != BCM4329_289PIN_PKG_ID)) {
+ sih->chippkg = BCM4329_182PIN_PKG_ID;
+ }
+
+ sih->issim = IS_SIM(sih->chippkg);
+
+ /* scan for cores */
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+ SI_MSG(("Found chip type SB (0x%08x)\n", w));
+ sb_scan(&sii->pub, regs, devid);
+ } else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+ SI_MSG(("Found chip type AI (0x%08x)\n", w));
+ /* pass chipc address instead of original core base */
+ ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+ } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+ SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+ /* pass chipc address instead of original core base */
+ ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+ } else {
+ SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+ return NULL;
+ }
+ /* no cores found, bail out */
+ if (sii->numcores == 0) {
+ SI_ERROR(("si_doattach: could not find any cores\n"));
+ return NULL;
+ }
+ /* bus/core/clk setup */
+ origidx = SI_CC_IDX;
+ if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+ SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+ goto exit;
+ }
+
+ /* assume current core is CC */
+ if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
+ CHIPID(sih->chip) == BCM43235_CHIP_ID ||
+ CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
+ (CHIPREV(sii->pub.chiprev) == 0))) {
+
+ if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+ uint clkdiv;
+ clkdiv = R_REG(osh, &cc->clkdiv);
+ /* otp_clk_div is even number, 120/14 < 9mhz */
+ clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+ W_REG(osh, &cc->clkdiv, clkdiv);
+ SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
+ }
+ OSL_DELAY(10);
+ }
+
+
+ pvars = NULL;
+
+
+
+ if (sii->pub.ccrev >= 20) {
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+ W_REG(osh, &cc->gpiopullup, 0);
+ W_REG(osh, &cc->gpiopulldown, 0);
+ si_setcoreidx(sih, origidx);
+ }
+
+
+
+
+ return (sii);
+
+exit:
+
+ return NULL;
+}
+
+/* may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+ si_info_t *sii;
+ uint idx;
+
+
+ sii = SI_INFO(sih);
+
+ if (sii == NULL)
+ return;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS)
+ for (idx = 0; idx < SI_MAXCORES; idx++)
+ if (sii->regs[idx]) {
+ REG_UNMAP(sii->regs[idx]);
+ sii->regs[idx] = NULL;
+ }
+
+
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+ if (sii != &ksii)
+#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+ MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sii->osh != NULL) {
+ SI_ERROR(("osh is already set....\n"));
+ ASSERT(!sii->osh);
+ }
+ sii->osh = osh;
+}
+
+/* register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intr_arg = intr_arg;
+ sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+ sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+ sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+ /* save current core id. when this function called, the current core
+ * must be the core which provides driver functions(il, et, wl, etc.)
+ */
+ sii->dev_coreid = sii->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intrsoff_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_intflag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return R_REG(sii->osh, ((uint32 *)(uintptr)
+ (sii->oob_router + OOB_STATUSA)));
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+si_flag(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_flag(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_setint(sih, siflag);
+ else
+ ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->curidx;
+}
+
+/* return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+ si_info_t *sii;
+ uint idx;
+ uint coreid;
+ uint coreunit;
+ uint i;
+
+ sii = SI_INFO(sih);
+ coreunit = 0;
+
+ idx = sii->curidx;
+
+ ASSERT(GOODREGS(sii->curmap));
+ coreid = si_coreid(sih);
+
+ /* count the cores of our type */
+ for (i = 0; i < idx; i++)
+ if (sii->coreid[i] == coreid)
+ coreunit++;
+
+ return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corevendor(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+ return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corerev(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+ si_info_t *sii;
+ uint found;
+ uint i;
+
+ sii = SI_INFO(sih);
+
+ found = 0;
+
+ for (i = 0; i < sii->numcores; i++)
+ if (sii->coreid[i] == coreid) {
+ if (found == coreunit)
+ return (i);
+ found++;
+ }
+
+ return (BADIDX);
+}
+
+/* return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ bcopy((uchar*)sii->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+ return (sii->numcores);
+}
+
+/* return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+
+ return (sii->curmap);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+ uint idx;
+
+ idx = si_findcoreidx(sih, coreid, coreunit);
+ if (!GOODIDX(idx))
+ return (NULL);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_setcoreidx(sih, idx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_setcoreidx(sih, coreidx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+/* Turn off interrupt as required by sb_setcore, before switch core */
+void *
+si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+ void *cc;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (SI_FAST(sii)) {
+ /* Overloading the origidx variable to remember the coreid,
+ * this works because the core ids cannot be confused with
+ * core indices.
+ */
+ *origidx = coreid;
+ if (coreid == CC_CORE_ID)
+ return (void *)CCREGS_FAST(sii);
+ else if (coreid == sih->buscoretype)
+ return (void *)PCIEREGS(sii);
+ }
+ INTR_OFF(sii, *intr_val);
+ *origidx = sii->curidx;
+ cc = si_setcore(sih, coreid, 0);
+ ASSERT(cc != NULL);
+
+ return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+ return;
+
+ si_setcoreidx(sih, coreid);
+ INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_numaddrspaces(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspace(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspace(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_addrspace(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspacesize(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspacesize(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_addrspacesize(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_core_cflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_cflags_wo(sih, mask, val);
+ else
+ ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_core_sflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_iscoreup(sih);
+ else {
+ ASSERT(0);
+ return FALSE;
+ }
+}
+
+uint
+si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ /* only for AI back plane chips */
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return (ai_wrap_reg(sih, offset, mask, val));
+ return 0;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corereg(sih, coreidx, regoff, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_reset(sih, bits, resetbits);
+}
+
+/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+ uint32 cflags;
+ int result = 0;
+
+ /* Read core control flags */
+ cflags = si_core_cflags(sih, 0, 0);
+
+ /* Set bist & fgc */
+ si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+ /* Wait for bist done */
+ SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+ if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+ result = BCME_ERROR;
+
+ /* Reset core control flags */
+ si_core_cflags(sih, 0xffff, cflags);
+
+ return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+ switch (x) {
+ case CC_F6_2: return 2;
+ case CC_F6_3: return 3;
+ case CC_F6_4: return 4;
+ case CC_F6_5: return 5;
+ case CC_F6_6: return 6;
+ case CC_F6_7: return 7;
+ default: return 0;
+ }
+}
+
+/* calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+ uint32 n1, n2, clock, m1, m2, m3, mc;
+
+ n1 = n & CN_N1_MASK;
+ n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+ if (pll_type == PLL_TYPE6) {
+ if (m & CC_T6_MMASK)
+ return CC_T6_M1;
+ else
+ return CC_T6_M0;
+ } else if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ n1 = factor6(n1);
+ n2 += CC_F5_BIAS;
+ } else if (pll_type == PLL_TYPE2) {
+ n1 += CC_T2_BIAS;
+ n2 += CC_T2_BIAS;
+ ASSERT((n1 >= 2) && (n1 <= 7));
+ ASSERT((n2 >= 5) && (n2 <= 23));
+ } else if (pll_type == PLL_TYPE5) {
+ return (100000000);
+ } else
+ ASSERT(0);
+ /* PLL types 3 and 7 use BASE2 (25Mhz) */
+ if ((pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE7)) {
+ clock = CC_CLOCK_BASE2 * n1 * n2;
+ } else
+ clock = CC_CLOCK_BASE1 * n1 * n2;
+
+ if (clock == 0)
+ return 0;
+
+ m1 = m & CC_M1_MASK;
+ m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+ m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+ mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+ if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ m1 = factor6(m1);
+ if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+ m2 += CC_F5_BIAS;
+ else
+ m2 = factor6(m2);
+ m3 = factor6(m3);
+
+ switch (mc) {
+ case CC_MC_BYPASS: return (clock);
+ case CC_MC_M1: return (clock / m1);
+ case CC_MC_M1M2: return (clock / (m1 * m2));
+ case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
+ case CC_MC_M1M3: return (clock / (m1 * m3));
+ default: return (0);
+ }
+ } else {
+ ASSERT(pll_type == PLL_TYPE2);
+
+ m1 += CC_T2_BIAS;
+ m2 += CC_T2M2_BIAS;
+ m3 += CC_T2_BIAS;
+ ASSERT((m1 >= 2) && (m1 <= 7));
+ ASSERT((m2 >= 3) && (m2 <= 10));
+ ASSERT((m3 >= 2) && (m3 <= 7));
+
+ if ((mc & CC_T2MC_M1BYP) == 0)
+ clock /= m1;
+ if ((mc & CC_T2MC_M2BYP) == 0)
+ clock /= m2;
+ if ((mc & CC_T2MC_M3BYP) == 0)
+ clock /= m3;
+
+ return (clock);
+ }
+}
+
+
+/* set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+ uint nb, maxt;
+
+ if (PMUCTL_ENAB(sih)) {
+
+ if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
+ (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+ si_setcore(sih, USB20D_CORE_ID, 0);
+ si_core_disable(sih, 1);
+ si_setcore(sih, CC_CORE_ID, 0);
+ }
+
+ nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
+ /* The mips compiler uses the sllv instruction,
+ * so we specially handle the 32-bit case.
+ */
+ if (nb == 32)
+ maxt = 0xffffffff;
+ else
+ maxt = ((1 << nb) - 1);
+
+ if (ticks == 1)
+ ticks = 2;
+ else if (ticks > maxt)
+ ticks = maxt;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
+ } else {
+ maxt = (1 << 28) - 1;
+ if (ticks > maxt)
+ ticks = maxt;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+ }
+}
+
+/* trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+ si_watchdog(sih, wd_msticks * ms);
+}
+
+
+
+
+/* change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+ return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/* mask&set gpiocontrol bits */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioouten);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioout);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already reserved */
+ if (si_gpioreservation & gpio_bitmask)
+ return 0xffffffff;
+ /* set reservation */
+ si_gpioreservation |= gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* release one gpio */
+/*
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till some one overwrites it
+ */
+
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already released */
+ if (!(si_gpioreservation & gpio_bitmask))
+ return 0xffffffff;
+
+ /* clear reservation */
+ si_gpioreservation &= ~gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ regoff = OFFSETOF(chipcregs_t, gpioin);
+ return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointmask);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 16)
+ return 0xffffffff;
+
+ /* gpio led powersave reg */
+ return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (sih->ccrev < 16)
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 20)
+ return 0xffffffff;
+
+ offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return 0xffffffff;
+
+ if (regtype == GPIO_REGEVT)
+ offs = OFFSETOF(chipcregs_t, gpioevent);
+ else if (regtype == GPIO_REGEVT_INTMSK)
+ offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+ else if (regtype == GPIO_REGEVT_INTPOL)
+ offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+ else
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+ bool level, gpio_handler_t cb, void *arg)
+{
+ si_info_t *sii;
+ gpioh_item_t *gi;
+
+ ASSERT(event);
+ ASSERT(cb != NULL);
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return NULL;
+
+ if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+ return NULL;
+
+ bzero(gi, sizeof(gpioh_item_t));
+ gi->event = event;
+ gi->handler = cb;
+ gi->arg = arg;
+ gi->level = level;
+
+ gi->next = sii->gpioh_head;
+ sii->gpioh_head = gi;
+
+ return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+ si_info_t *sii;
+ gpioh_item_t *p, *n;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return;
+
+ ASSERT(sii->gpioh_head != NULL);
+ if ((void*)sii->gpioh_head == gpioh) {
+ sii->gpioh_head = sii->gpioh_head->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ } else {
+ p = sii->gpioh_head;
+ n = p->next;
+ while (n) {
+ if ((void*)n == gpioh) {
+ p->next = n->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ }
+ p = n;
+ n = n->next;
+ }
+ }
+
+ ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+ si_info_t *sii;
+ gpioh_item_t *h;
+ uint32 level = si_gpioin(sih);
+ uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
+ uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+ uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
+
+ sii = SI_INFO(sih);
+ for (h = sii->gpioh_head; h != NULL; h = h->next) {
+ if (h->handler) {
+ uint32 status = (h->level ? level : edge) & h->event;
+ uint32 polarity = (h->level ? levelp : edgep) & h->event;
+
+ /* polarity bitval is opposite of status bitval */
+ if (status ^ polarity)
+ h->handler(status, h->arg);
+ }
+ }
+
+ si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return 0xffffffff;
+
+ offs = OFFSETOF(chipcregs_t, intmask);
+ return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/* Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 index, uint8 mem_type)
+{
+ uint banksize, bankinfo;
+ uint bankidx = index | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+ ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+ return banksize;
+}
+
+void
+si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ if (!set)
+ *enable = *protect = 0;
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 10) {
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+ uint32 bankidx, bankinfo;
+
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+ for (i = 0; i < nb; i++) {
+ bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ if (set) {
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+ if (*enable) {
+ bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+ if (*protect)
+ bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+ }
+ W_REG(sii->osh, &regs->bankinfo, bankinfo);
+ }
+ else if (i == 0) {
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+ *enable = 1;
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+ *protect = 1;
+ }
+ }
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_socdevram_pkg(si_t *sih)
+{
+ if (si_socdevram_size(sih) > 0)
+ return TRUE;
+ else
+ return FALSE;
+}
+
+uint32
+si_socdevram_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 memsize = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 10) {
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+ for (i = 0; i < nb; i++)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+/* Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev == 0)
+ memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+ else if (corerev < 3) {
+ memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+ memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ } else if ((corerev <= 7) || (corerev == 12)) {
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+ uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+ if (lss != 0)
+ nb --;
+ memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+ if (lss != 0)
+ memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+ } else {
+ uint8 i;
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+
+void
+si_btcgpiowar(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ chipcregs_t *cc;
+
+ sii = SI_INFO(sih);
+
+ /* Make sure that there is ChipCommon core present &&
+ * UART_TX is strapped to 1
+ */
+ if (!(sih->cccaps & CC_CAP_UARTGPIO))
+ return;
+
+ /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+ INTR_OFF(sii, intr_val);
+
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+}
+
+uint
+si_pll_reset(si_t *sih)
+{
+ uint err = 0;
+
+ return (err);
+}
+
+/* check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+ uint32 w;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case PCI_BUS:
+ ASSERT(sii->osh != NULL);
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
+ if ((w & 0xFFFF) != VENDOR_BROADCOM)
+ return TRUE;
+ break;
+ }
+ return FALSE;
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h
new file mode 100644
index 000000000000..d80246e01d1b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils_priv.h
@@ -0,0 +1,235 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils_priv.h,v 1.17.4.3 2010-10-25 16:56:56 Exp $
+ */
+
+#ifndef _siutils_priv_h_
+#define _siutils_priv_h_
+
+#define SI_ERROR(args)
+
+#define SI_MSG(args)
+
+/* Define SI_VMSG to printf for verbose debugging, but don't check it in */
+#define SI_VMSG(args)
+
+#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+ void *arg;
+ bool level;
+ gpio_handler_t handler;
+ uint32 event;
+ struct gpioh_item *next;
+} gpioh_item_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_info {
+ struct si_pub pub; /* back plane public state (must be first field) */
+
+ void *osh; /* osl os handle */
+ void *sdh; /* bcmsdh handle */
+
+ uint dev_coreid; /* the core provides driver functions */
+ void *intr_arg; /* interrupt callback function arg */
+ si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
+ si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+ si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+ void *pch; /* PCI/E core handle */
+
+ gpioh_item_t *gpioh_head; /* GPIO event handlers list */
+
+ bool memseg; /* flag to toggle MEM_SEG register */
+
+ char *vars;
+ uint varsz;
+
+ void *curmap; /* current regs va */
+ void *regs[SI_MAXCORES]; /* other regs va */
+
+ uint curidx; /* current core index */
+ uint numcores; /* # discovered cores */
+ uint coreid[SI_MAXCORES]; /* id of each core */
+ uint32 coresba[SI_MAXCORES]; /* backplane address of each core */
+ void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */
+ uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */
+ uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */
+ uint32 coresba2_size[SI_MAXCORES]; /* second address space size */
+
+ void *curwrap; /* current wrapper va */
+ void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
+ uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
+
+ uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 oob_router; /* oob router registers for axi */
+} si_info_t;
+
+#define SI_INFO(sih) (si_info_t *)(uintptr)sih
+
+#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+ ISALIGNED((x), SI_CORE_SIZE))
+#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR 0
+#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES)
+#define NOREV -1 /* Invalid rev */
+
+#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCI_CORE_ID))
+#define PCIE(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCIE_CORE_ID))
+#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
+ (((si)->pub.buscoretype == PCI_CORE_ID) && (si)->pub.buscorerev >= 13))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+ if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+ if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define LPOMINFREQ 25000 /* low power oscillator min */
+#define LPOMAXFREQ 43000 /* low power oscillator max */
+#define XTALMINFREQ 19800000 /* 20 MHz - 1% */
+#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
+#define PCIMINFREQ 25000000 /* 25 MHz */
+#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
+
+#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
+#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
+
+#define PCI_FORCEHT(si) \
+ (((PCIE(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+ ((PCI(si) || PCIE(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \
+ (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+
+
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b) (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#endif /* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h
new file mode 100644
index 000000000000..c51c68cd0eed
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/uamp_api.h
@@ -0,0 +1,176 @@
+/*
+ * Name: uamp_api.h
+ *
+ * Description: Universal AMP API
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: uamp_api.h,v 1.2.8.1 2011-02-05 00:16:14 Exp $
+ *
+ */
+#ifndef UAMP_API_H
+#define UAMP_API_H
+
+
+#include "typedefs.h"
+
+
+/*****************************************************************************
+** Constant and Type Definitions
+******************************************************************************
+*/
+
+#define BT_API
+
+/* Types. */
+typedef bool BOOLEAN;
+typedef uint8 UINT8;
+typedef uint16 UINT16;
+
+
+/* UAMP identifiers */
+#define UAMP_ID_1 1
+#define UAMP_ID_2 2
+typedef UINT8 tUAMP_ID;
+
+/* UAMP event ids (used by UAMP_CBACK) */
+#define UAMP_EVT_RX_READY 0 /* Data from AMP controller is ready to be read */
+#define UAMP_EVT_CTLR_REMOVED 1 /* Controller removed */
+#define UAMP_EVT_CTLR_READY 2 /* Controller added/ready */
+typedef UINT8 tUAMP_EVT;
+
+
+/* UAMP Channels */
+#define UAMP_CH_HCI_CMD 0 /* HCI Command channel */
+#define UAMP_CH_HCI_EVT 1 /* HCI Event channel */
+#define UAMP_CH_HCI_DATA 2 /* HCI ACL Data channel */
+typedef UINT8 tUAMP_CH;
+
+/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */
+typedef union {
+ tUAMP_CH channel; /* UAMP_EVT_RX_READY: channel for which rx occured */
+} tUAMP_EVT_DATA;
+
+
+/*****************************************************************************
+**
+** Function: UAMP_CBACK
+**
+** Description: Callback for events. Register callback using UAMP_Init.
+**
+** Parameters amp_id: AMP device identifier that generated the event
+** amp_evt: event id
+** p_amp_evt_data: pointer to event-specific data
+**
+******************************************************************************
+*/
+typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data);
+
+/*****************************************************************************
+** external function declarations
+******************************************************************************
+*/
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*****************************************************************************
+**
+** Function: UAMP_Init
+**
+** Description: Initialize UAMP driver
+**
+** Parameters p_cback: Callback function for UAMP event notification
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback);
+
+
+/*****************************************************************************
+**
+** Function: UAMP_Open
+**
+** Description: Open connection to local AMP device.
+**
+** Parameters app_id: Application specific AMP identifer. This value
+** will be included in AMP messages sent to the
+** BTU task, to identify source of the message
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id);
+
+/*****************************************************************************
+**
+** Function: UAMP_Close
+**
+** Description: Close connection to local AMP device.
+**
+** Parameters app_id: Application specific AMP identifer.
+**
+******************************************************************************
+*/
+BT_API void UAMP_Close(tUAMP_ID amp_id);
+
+
+/*****************************************************************************
+**
+** Function: UAMP_Write
+**
+** Description: Send buffer to AMP device. Frees GKI buffer when done.
+**
+**
+** Parameters: app_id: AMP identifer.
+** p_buf: pointer to buffer to write
+** num_bytes: number of bytes to write
+** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD
+**
+** Returns: number of bytes written
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel);
+
+/*****************************************************************************
+**
+** Function: UAMP_Read
+**
+** Description: Read incoming data from AMP. Call after receiving a
+** UAMP_EVT_RX_READY callback event.
+**
+** Parameters: app_id: AMP identifer.
+** p_buf: pointer to buffer for holding incoming AMP data
+** buf_size: size of p_buf
+** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT
+**
+** Returns: number of bytes read
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* UAMP_API_H */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c
new file mode 100644
index 000000000000..9ca3d6020239
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.c
@@ -0,0 +1,840 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <bcmsdbus.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/platform_device.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+#include <linux/wlan_plat.h>
+#else
+#include <linux/wifi_tiwlan.h>
+#endif
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START "START"
+#define CMD_STOP "STOP"
+#define CMD_SCAN_ACTIVE "SCAN-ACTIVE"
+#define CMD_SCAN_PASSIVE "SCAN-PASSIVE"
+#define CMD_RSSI "RSSI"
+#define CMD_LINKSPEED "LINKSPEED"
+#define CMD_RXFILTER_START "RXFILTER-START"
+#define CMD_RXFILTER_STOP "RXFILTER-STOP"
+#define CMD_RXFILTER_ADD "RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE "BTCOEXMODE"
+#define CMD_SETSUSPENDOPT "SETSUSPENDOPT"
+#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR"
+#define CMD_SETFWPATH "SETFWPATH"
+#define CMD_SETBAND "SETBAND"
+#define CMD_GETBAND "GETBAND"
+#define CMD_COUNTRY "COUNTRY"
+#define CMD_P2P_SET_NOA "P2P_SET_NOA"
+#define CMD_P2P_GET_NOA "P2P_GET_NOA"
+#define CMD_P2P_SET_PS "P2P_SET_PS"
+#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE"
+
+
+#ifdef PNO_SUPPORT
+#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR"
+#define CMD_PNOSETUP_SET "PNOSETUP "
+#define CMD_PNOENABLE_SET "PNOFORCE"
+#define CMD_PNODEBUG_SET "PNODEBUG"
+
+#define PNO_TLV_PREFIX 'S'
+#define PNO_TLV_VERSION '1'
+#define PNO_TLV_SUBVERSION '2'
+#define PNO_TLV_RESERVED '0'
+#define PNO_TLV_TYPE_SSID_IE 'S'
+#define PNO_TLV_TYPE_TIME 'T'
+#define PNO_TLV_FREQ_REPEAT 'R'
+#define PNO_TLV_FREQ_EXPO_MAX 'M'
+
+typedef struct cmd_tlv {
+ char prefix;
+ char version;
+ char subver;
+ char reserved;
+} cmd_tlv_t;
+#endif /* PNO_SUPPORT */
+
+typedef struct android_wifi_priv_cmd {
+ char *buf;
+ int used_len;
+ int total_len;
+} android_wifi_priv_cmd;
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+void dhd_customer_gpio_wlan_ctrl(int onoff);
+uint dhd_dev_reset(struct net_device *dev, uint8 flag);
+void dhd_dev_init_ioctl(struct net_device *dev);
+#ifdef WL_CFG80211
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command);
+#else
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{ return 0; }
+int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{ return 0; }
+#endif
+extern int dhd_os_check_if_up(void *dhdp);
+extern void *bcmsdh_get_drvdata(void);
+
+extern bool ap_fw_loaded;
+#ifdef CUSTOMER_HW2
+extern char iface_name[IFNAMSIZ];
+#endif
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+ int link_speed;
+ int bytes_written;
+ int error;
+
+ error = wldev_get_link_speed(net, &link_speed);
+ if (error)
+ return -1;
+
+ /* Convert Kbps to Android Mbps */
+ link_speed = link_speed / 1000;
+ bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+ DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+ return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+ wlc_ssid_t ssid = {0};
+ int rssi;
+ int bytes_written = 0;
+ int error;
+
+ error = wldev_get_rssi(net, &rssi);
+ if (error)
+ return -1;
+
+ error = wldev_get_ssid(net, &ssid);
+ if (error)
+ return -1;
+ if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
+ DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+ } else {
+ memcpy(command, ssid.SSID, ssid.SSID_len);
+ bytes_written = ssid.SSID_len;
+ }
+ bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi);
+ DHD_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+ return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
+{
+ int suspend_flag;
+ int ret_now;
+ int ret = 0;
+
+ suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+ if (suspend_flag != 0)
+ suspend_flag = 1;
+ ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+ if (ret_now != suspend_flag) {
+ if (!(ret = net_os_set_suspend(dev, ret_now)))
+ DHD_INFO(("%s: Suspend Flag %d -> %d\n",
+ __FUNCTION__, ret_now, suspend_flag));
+ else
+ DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
+ return ret;
+}
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+ uint band;
+ int bytes_written;
+ int error;
+
+ error = wldev_get_band(dev, &band);
+ if (error)
+ return -1;
+ bytes_written = snprintf(command, total_len, "Band %d", band);
+ return bytes_written;
+}
+
+#ifdef PNO_SUPPORT
+static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
+{
+ wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+ int res = -1;
+ int nssid = 0;
+ cmd_tlv_t *cmd_tlv_temp;
+ char *str_ptr;
+ int tlv_size_left;
+ int pno_time = 0;
+ int pno_repeat = 0;
+ int pno_freq_expo_max = 0;
+
+#ifdef PNO_SET_DEBUG
+ int i;
+ char pno_in_example[] = {
+ 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+ 'S', '1', '2', '0',
+ 'S',
+ 0x05,
+ 'd', 'l', 'i', 'n', 'k',
+ 'S',
+ 0x04,
+ 'G', 'O', 'O', 'G',
+ 'T',
+ '0', 'B',
+ 'R',
+ '2',
+ 'M',
+ '2',
+ 0x00
+ };
+#endif /* PNO_SET_DEBUG */
+
+ DHD_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+ if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
+ DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+ goto exit_proc;
+ }
+
+#ifdef PNO_SET_DEBUG
+ memcpy(command, pno_in_example, sizeof(pno_in_example));
+ for (i = 0; i < sizeof(pno_in_example); i++)
+ printf("%02X ", command[i]);
+ printf("\n");
+ total_len = sizeof(pno_in_example);
+#endif
+
+ str_ptr = command + strlen(CMD_PNOSETUP_SET);
+ tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
+
+ cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+ memset(ssids_local, 0, sizeof(ssids_local));
+
+ if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+ (cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+ (cmd_tlv_temp->subver == PNO_TLV_SUBVERSION)) {
+
+ str_ptr += sizeof(cmd_tlv_t);
+ tlv_size_left -= sizeof(cmd_tlv_t);
+
+ if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+ MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+ DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+ goto exit_proc;
+ } else {
+ if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+ DHD_ERROR(("%s scan duration corrupted field size %d\n",
+ __FUNCTION__, tlv_size_left));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+ if (str_ptr[0] != 0) {
+ if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+ DHD_ERROR(("%s pno repeat : corrupted field\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+ if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+ DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s: pno_freq_expo_max=%d\n",
+ __FUNCTION__, pno_freq_expo_max));
+ }
+ }
+ } else {
+ DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+ res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max);
+
+exit_proc:
+ return res;
+}
+#endif /* PNO_SUPPORT */
+
+static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
+{
+ int ret;
+ int bytes_written = 0;
+
+ ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command);
+ if (ret)
+ return 0;
+ bytes_written = sizeof(struct ether_addr);
+ return bytes_written;
+}
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+int wl_android_wifi_on(struct net_device *dev)
+{
+ int ret = 0;
+
+ printk("%s in\n", __FUNCTION__);
+ if (!dev) {
+ DHD_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd_net_if_lock(dev);
+ if (!g_wifi_on) {
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+ sdioh_start(NULL, 0);
+ ret = dhd_dev_reset(dev, FALSE);
+ sdioh_start(NULL, 1);
+ if (!ret)
+ dhd_dev_init_ioctl(dev);
+ g_wifi_on = 1;
+ }
+ dhd_net_if_unlock(dev);
+
+ return ret;
+}
+
+int wl_android_wifi_off(struct net_device *dev)
+{
+ int ret = 0;
+
+ printk("%s in\n", __FUNCTION__);
+ if (!dev) {
+ DHD_TRACE(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd_net_if_lock(dev);
+ if (g_wifi_on) {
+ ret = dhd_dev_reset(dev, TRUE);
+ sdioh_stop(NULL);
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ g_wifi_on = 0;
+ }
+ dhd_net_if_unlock(dev);
+
+ return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+ if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+ return -1;
+ bcm_strncpy_s(fw_path, sizeof(fw_path),
+ command + strlen(CMD_SETFWPATH) + 1, MOD_PARAM_PATHLEN - 1);
+ if (strstr(fw_path, "apsta") != NULL) {
+ DHD_INFO(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ DHD_INFO(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
+ }
+ return 0;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+ char *command = NULL;
+ int bytes_written = 0;
+ android_wifi_priv_cmd priv_cmd;
+
+ net_os_wake_lock(net);
+
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
+ if (!command)
+ {
+ DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+ if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+ DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
+ bytes_written = wl_android_wifi_on(net);
+ }
+ else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+ bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+ }
+
+ if (!g_wifi_on) {
+ DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+ __FUNCTION__, command, ifr->ifr_name));
+ ret = 0;
+ goto exit;
+ }
+
+ if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+ bytes_written = wl_android_wifi_off(net);
+ }
+ else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+ /* TBD: SCAN-ACTIVE */
+ }
+ else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+ /* TBD: SCAN-PASSIVE */
+ }
+ else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+ bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+ bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+ bytes_written = net_os_set_packet_filter(net, 1);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+ bytes_written = net_os_set_packet_filter(net, 0);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+ }
+ else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+ /* TBD: BTCOEXSCAN-START */
+ }
+ else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+ /* TBD: BTCOEXSCAN-STOP */
+ }
+ else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+ uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+ if (mode == 1)
+ net_os_set_packet_filter(net, 0); /* DHCP starts */
+ else
+ net_os_set_packet_filter(net, 1); /* DHCP ends */
+#ifdef WL_CFG80211
+ bytes_written = wl_cfg80211_set_btcoex_dhcp(net, command);
+#endif
+ }
+ else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+ bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+ bytes_written = wldev_set_band(net, band);
+ }
+ else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+ bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+ char *country_code = command + strlen(CMD_COUNTRY) + 1;
+ bytes_written = wldev_set_country(net, country_code);
+ }
+#ifdef PNO_SUPPORT
+ else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
+ bytes_written = dhd_dev_pno_reset(net);
+ }
+ else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
+ bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
+ uint pfn_enabled = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
+ bytes_written = dhd_dev_pno_enable(net, pfn_enabled);
+ }
+#endif
+ else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
+ bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
+ int skip = strlen(CMD_P2P_SET_NOA) + 1;
+ bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+ else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) {
+ bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
+ int skip = strlen(CMD_P2P_SET_PS) + 1;
+ bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
+ strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
+ int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
+ bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
+ priv_cmd.total_len - skip, *(command + skip - 2) - '0');
+ }
+#endif /* WL_CFG80211 */
+ else {
+ DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+ snprintf(command, 3, "OK");
+ bytes_written = strlen("OK");
+ }
+
+ if (bytes_written > 0) {
+ if (bytes_written > priv_cmd.total_len) {
+ DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written));
+ bytes_written = priv_cmd.total_len;
+ } else {
+ bytes_written++;
+ }
+ priv_cmd.used_len = bytes_written;
+ if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+ DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+ ret = -EFAULT;
+ }
+ } else {
+ ret = bytes_written;
+ }
+
+exit:
+ net_os_wake_unlock(net);
+ if (command) {
+ kfree(command);
+ }
+
+ return ret;
+}
+
+int wl_android_init(void)
+{
+ int ret = 0;
+
+ dhd_msg_level = DHD_ERROR_VAL;
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+ dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+#ifdef CUSTOMER_HW2
+ if (!iface_name[0]) {
+ memset(iface_name, 0, IFNAMSIZ);
+ bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+ }
+#endif /* CUSTOMER_HW2 */
+ return ret;
+}
+
+int wl_android_exit(void)
+{
+ int ret = 0;
+
+ return ret;
+}
+
+int wl_android_post_init(void)
+{
+ struct net_device *ndev;
+ int ret = 0;
+ char buf[IFNAMSIZ];
+ if (!dhd_download_fw_on_driverload) {
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ g_wifi_on = 0;
+ } else {
+ memset(buf, 0, IFNAMSIZ);
+#ifdef CUSTOMER_HW2
+ snprintf(buf, IFNAMSIZ, "%s%d", iface_name, 0);
+#else
+ snprintf(buf, IFNAMSIZ, "%s%d", "eth", 0);
+#endif
+ if ((ndev = dev_get_by_name (&init_net, buf)) != NULL) {
+ dhd_dev_init_ioctl(ndev);
+ dev_put(ndev);
+ }
+ }
+ return ret;
+}
+
+/**
+ * Functions for Android WiFi card detection
+ */
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+
+static int g_wifidev_registered = 0;
+static struct semaphore wifi_control_sem;
+static struct wifi_platform_data *wifi_control_data = NULL;
+static struct resource *wifi_irqres = NULL;
+
+static int wifi_add_dev(void);
+static void wifi_del_dev(void);
+
+int wl_android_wifictrl_func_add(void)
+{
+ int ret = 0;
+ sema_init(&wifi_control_sem, 0);
+
+ ret = wifi_add_dev();
+ if (ret) {
+ DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__));
+ return ret;
+ }
+ g_wifidev_registered = 1;
+
+ /* Waiting callback after platform_driver_register is done or exit with error */
+ if (down_timeout(&wifi_control_sem, msecs_to_jiffies(1000)) != 0) {
+ ret = -EINVAL;
+ DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__));
+ }
+
+ return ret;
+}
+
+void wl_android_wifictrl_func_del(void)
+{
+ if (g_wifidev_registered)
+ {
+ wifi_del_dev();
+ g_wifidev_registered = 0;
+ }
+}
+
+void* wl_android_prealloc(int section, unsigned long size)
+{
+ void *alloc_ptr = NULL;
+ if (wifi_control_data && wifi_control_data->mem_prealloc) {
+ alloc_ptr = wifi_control_data->mem_prealloc(section, size);
+ if (alloc_ptr) {
+ DHD_INFO(("success alloc section %d\n", section));
+ bzero(alloc_ptr, size);
+ return alloc_ptr;
+ }
+ }
+
+ DHD_ERROR(("can't alloc section %d\n", section));
+ return 0;
+}
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr)
+{
+ if (wifi_irqres) {
+ *irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
+ return (int)wifi_irqres->start;
+ }
+#ifdef CUSTOM_OOB_GPIO_NUM
+ return CUSTOM_OOB_GPIO_NUM;
+#else
+ return -1;
+#endif
+}
+
+int wifi_set_power(int on, unsigned long msec)
+{
+ DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ if (wifi_control_data && wifi_control_data->set_power) {
+ wifi_control_data->set_power(on);
+ }
+ if (msec)
+ msleep(msec);
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+int wifi_get_mac_addr(unsigned char *buf)
+{
+ DHD_ERROR(("%s\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+ if (wifi_control_data && wifi_control_data->get_mac_addr) {
+ return wifi_control_data->get_mac_addr(buf);
+ }
+ return -EOPNOTSUPP;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+void *wifi_get_country_code(char *ccode)
+{
+ DHD_TRACE(("%s\n", __FUNCTION__));
+ if (!ccode)
+ return NULL;
+ if (wifi_control_data && wifi_control_data->get_country_code) {
+ return wifi_control_data->get_country_code(ccode);
+ }
+ return NULL;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+static int wifi_set_carddetect(int on)
+{
+ DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ if (wifi_control_data && wifi_control_data->set_carddetect) {
+ wifi_control_data->set_carddetect(on);
+ }
+ return 0;
+}
+
+static int wifi_probe(struct platform_device *pdev)
+{
+ struct wifi_platform_data *wifi_ctrl =
+ (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ DHD_ERROR(("## %s\n", __FUNCTION__));
+ wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
+ if (wifi_irqres == NULL)
+ wifi_irqres = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "bcm4329_wlan_irq");
+ wifi_control_data = wifi_ctrl;
+
+ wifi_set_power(1, 0); /* Power On */
+ wifi_set_carddetect(1); /* CardDetect (0->1) */
+
+ up(&wifi_control_sem);
+ return 0;
+}
+
+static int wifi_remove(struct platform_device *pdev)
+{
+ struct wifi_platform_data *wifi_ctrl =
+ (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ DHD_ERROR(("## %s\n", __FUNCTION__));
+ wifi_control_data = wifi_ctrl;
+
+ wifi_set_power(0, 0); /* Power Off */
+ wifi_set_carddetect(0); /* CardDetect (1->0) */
+
+ up(&wifi_control_sem);
+ return 0;
+}
+
+static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(0);
+#endif
+ return 0;
+}
+
+static int wifi_resume(struct platform_device *pdev)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY)
+ if (dhd_os_check_if_up(bcmsdh_get_drvdata()))
+ bcmsdh_oob_intr_set(1);
+#endif
+ return 0;
+}
+
+static struct platform_driver wifi_device = {
+ .probe = wifi_probe,
+ .remove = wifi_remove,
+ .suspend = wifi_suspend,
+ .resume = wifi_resume,
+ .driver = {
+ .name = "bcmdhd_wlan",
+ }
+};
+
+static struct platform_driver wifi_device_legacy = {
+ .probe = wifi_probe,
+ .remove = wifi_remove,
+ .suspend = wifi_suspend,
+ .resume = wifi_resume,
+ .driver = {
+ .name = "bcm4329_wlan",
+ }
+};
+
+static int wifi_add_dev(void)
+{
+ DHD_TRACE(("## Calling platform_driver_register\n"));
+ platform_driver_register(&wifi_device);
+ platform_driver_register(&wifi_device_legacy);
+ return 0;
+}
+
+static void wifi_del_dev(void)
+{
+ DHD_TRACE(("## Unregister platform_driver_register\n"));
+ platform_driver_unregister(&wifi_device);
+ platform_driver_unregister(&wifi_device_legacy);
+}
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h
new file mode 100644
index 000000000000..17373b7f6d5b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.h
@@ -0,0 +1,57 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+int wl_android_post_init(void);
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+int wl_android_wifictrl_func_add(void);
+void wl_android_wifictrl_func_del(void);
+void* wl_android_prealloc(int section, unsigned long size);
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
new file mode 100644
index 000000000000..daa7d2605aaf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
@@ -0,0 +1,7330 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmwifi.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+
+#include <net/rtnetlink.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/firmware.h>
+#include <bcmsdbus.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+static struct sdio_func *cfg80211_sdio_func;
+static struct wl_priv *wlcfg_drv_priv;
+
+u32 wl_dbg_level = WL_DBG_ERR;
+
+#define WL_4329_FW_FILE "brcm/bcm4329-fullmac-4-218-248-5.bin"
+#define WL_4329_NVRAM_FILE "brcm/bcm4329-fullmac-4-218-248-5.txt"
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAX_WAIT_TIME 1500
+static s8 ioctlbuf[WLC_IOCTL_MAXLEN];
+
+#define COEX_DHCP
+
+#if defined(COEX_DHCP)
+#define BT_DHCP_eSCO_FIX /* use New SCO/eSCO smart YG
+ * suppression
+ */
+#define BT_DHCP_USE_FLAGS /* this flag boost wifi pkt priority
+ * to max, caution: -not fair to sco
+ */
+#define BT_DHCP_OPPR_WIN_TIME 2500 /* T1 start SCO/ESCo priority
+ * suppression
+ */
+#define BT_DHCP_FLAG_FORCE_TIME 5500 /* T2 turn off SCO/SCO supperesion
+ * is (timeout)
+ */
+enum wl_cfg80211_btcoex_status {
+ BT_DHCP_IDLE,
+ BT_DHCP_START,
+ BT_DHCP_OPPR_WIN,
+ BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+static int wl_cfg80211_btcoex_init(struct wl_priv *wl);
+static void wl_cfg80211_btcoex_deinit(struct wl_priv *wl);
+#endif
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
+ * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
+ * All the chnages in world regulatory domain are to be done here.
+ */
+static const struct ieee80211_regdomain brcm_regdom = {
+ .n_reg_rules = 5,
+ .alpha2 = "99",
+ .reg_rules = {
+ /* IEEE 802.11b/g, channels 1..11 */
+ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
+ /* IEEE 802.11b/g, channels 12..13. No HT40
+ * channel fits here.
+ */
+ REG_RULE(2467-10, 2472+10, 20, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS),
+ /* IEEE 802.11 channel 14 - Only JP enables
+ * this and for 802.11b only
+ */
+ REG_RULE(2484-10, 2484+10, 20, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_OFDM),
+ /* IEEE 802.11a, channel 36..64 */
+ REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+ /* IEEE 802.11a, channel 100..165 */
+ REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+};
+
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS 0x1008
+#define WPS_ID_REQ_TYPE 0x103A
+#define WPS_ID_DEVICE_NAME 0x1011
+#define WPS_ID_VERSION 0x104A
+#define WPS_ID_DEVICE_PWD_ID 0x1012
+#define WPS_ID_REQ_DEV_TYPE 0x106A
+#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE 0x1054
+
+/* Device Password ID */
+#define DEV_PW_DEFAULT 0x0000
+#define DEV_PW_USER_SPECIFIED 0x0001,
+#define DEV_PW_MACHINE_SPECIFIED 0x0002
+#define DEV_PW_REKEY 0x0003
+#define DEV_PW_PUSHBUTTON 0x0004
+#define DEV_PW_REGISTRAR_SPECIFIED 0x0005
+
+/* Config Methods */
+#define WPS_CONFIG_USBA 0x0001
+#define WPS_CONFIG_ETHERNET 0x0002
+#define WPS_CONFIG_LABEL 0x0004
+#define WPS_CONFIG_DISPLAY 0x0008
+#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010
+#define WPS_CONFIG_INT_NFC_TOKEN 0x0020
+#define WPS_CONFIG_NFC_INTERFACE 0x0040
+#define WPS_CONFIG_PUSHBUTTON 0x0080
+#define WPS_CONFIG_KEYPAD 0x0100
+#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280
+#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480
+#define WPS_CONFIG_VIRT_DISPLAY 0x2008
+#define WPS_CONFIG_PHY_DISPLAY 0x4008
+
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+ const struct ether_addr *sa, const struct ether_addr *bssid,
+ u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid);
+static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request);
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev);
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac,
+ struct station_info *sinfo);
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev, bool enabled,
+ s32 timeout);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code);
+static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ s32 dbm);
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ void *cookie, void (*callback) (void *cookie,
+ struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx);
+static s32 wl_cfg80211_resume(struct wiphy *wiphy);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *dev);
+static void wl_notify_escan_complete(struct wl_priv *wl, bool aborted);
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct wl_priv *wl);
+static void wl_destroy_event_handler(struct wl_priv *wl);
+static s32 wl_event_handler(void *data);
+static void wl_init_eq(struct wl_priv *wl);
+static void wl_flush_eq(struct wl_priv *wl);
+static unsigned long wl_lock_eq(struct wl_priv *wl);
+static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags);
+static void wl_init_eq_lock(struct wl_priv *wl);
+static void wl_init_event_handler(struct wl_priv *wl);
+static struct wl_event_q *wl_deq_event(struct wl_priv *wl);
+static s32 wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 type,
+ const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct wl_event_q *e);
+static void wl_wakeup_event(struct wl_priv *wl);
+static s32 wl_notify_connect_status(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, bool completed);
+static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+/*
+ * register/deregister sdio function
+ */
+struct sdio_func *wl_cfg80211_get_sdio_func(void);
+static void wl_clear_sdio_func(void);
+
+/*
+ * ioctl utilites
+ */
+static s32 wl_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
+ s32 buf_len);
+static __used s32 wl_dev_bufvar_set(struct net_device *dev, s8 *name,
+ s8 *buf, s32 len);
+static s32 wl_dev_intvar_set(struct net_device *dev, s8 *name, s32 val);
+static s32 wl_dev_intvar_get(struct net_device *dev, s8 *name,
+ s32 *retval);
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * wl profile utilities
+ */
+static s32 wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e,
+ void *data, s32 item);
+static void *wl_read_prof(struct wl_priv *wl, s32 item);
+static void wl_init_prof(struct wl_priv *wl);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev);
+static void wl_ch_to_chanspec(int ch,
+ struct wl_join_params *join_params, size_t *join_params_size);
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct wl_priv *wl);
+static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v);
+static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct wl_priv *wl);
+
+static s32 wl_mode_to_nl80211_iftype(s32 mode);
+
+static struct wireless_dev *wl_alloc_wdev(struct device *sdiofunc_dev);
+static void wl_free_wdev(struct wl_priv *wl);
+
+static s32 wl_inform_bss(struct wl_priv *wl);
+static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi);
+static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev);
+
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, const u8 *mac_addr,
+ struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * wl_priv memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct wl_priv *wl);
+static void wl_deinit_priv_mem(struct wl_priv *wl);
+
+static void wl_delay(u32 ms);
+
+/*
+ * ibss mode utilities
+ */
+static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct wl_priv *wl);
+
+/*
+ * dongle up/down , default configuration utilities
+ */
+static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e);
+static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev);
+static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e);
+static void wl_link_up(struct wl_priv *wl);
+static void wl_link_down(struct wl_priv *wl);
+static s32 wl_dongle_mode(struct wl_priv *wl, struct net_device *ndev, s32 iftype);
+static s32 __wl_cfg80211_up(struct wl_priv *wl);
+static s32 __wl_cfg80211_down(struct wl_priv *wl);
+static s32 wl_dongle_probecap(struct wl_priv *wl);
+static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_dongle_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+
+/*
+ * dongle configuration utilities
+ */
+#ifndef EMBEDDED_PLATFORM
+static s32 wl_dongle_country(struct net_device *ndev, u8 ccode);
+static s32 wl_dongle_up(struct net_device *ndev, u32 up);
+static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode);
+static s32 wl_dongle_glom(struct net_device *ndev, u32 glom,
+ u32 dongle_align);
+static s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar,
+ u32 bcn_timeout);
+static s32 wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+ s32 scan_unassoc_time);
+static s32 wl_dongle_offload(struct net_device *ndev, s32 arpoe,
+ s32 arp_ol);
+static s32 wl_pattern_atoh(s8 *src, s8 *dst);
+static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode);
+static s32 wl_update_wiphybands(struct wl_priv *wl);
+#endif /* !EMBEDDED_PLATFORM */
+static __used void wl_dongle_poweron(struct wl_priv *wl);
+static __used void wl_dongle_poweroff(struct wl_priv *wl);
+static s32 wl_config_dongle(struct wl_priv *wl, bool need_lock);
+
+/*
+ * iscan handler
+ */
+static void wl_iscan_timer(unsigned long data);
+static void wl_term_iscan(struct wl_priv *wl);
+static s32 wl_init_scan(struct wl_priv *wl);
+static s32 wl_iscan_thread(void *data);
+static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request,
+ u16 action);
+static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request);
+static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan);
+static s32 wl_invoke_iscan(struct wl_priv *wl);
+static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
+ struct wl_scan_results **bss_list);
+static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted);
+static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan);
+static s32 wl_iscan_done(struct wl_priv *wl);
+static s32 wl_iscan_pending(struct wl_priv *wl);
+static s32 wl_iscan_inprogress(struct wl_priv *wl);
+static s32 wl_iscan_aborted(struct wl_priv *wl);
+
+/*
+ * fw/nvram downloading handler
+ */
+static void wl_init_fw(struct wl_fw_ctrl *fw);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * update pmklist to dongle
+ */
+static __used s32 wl_update_pmklist(struct net_device *dev,
+ struct wl_pmk_list *pmk_list, s32 err);
+
+/*
+ * debufs support
+ */
+static int wl_debugfs_add_netdev_params(struct wl_priv *wl);
+static void wl_debugfs_remove_netdev(struct wl_priv *wl);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct wl_priv *wl, bool setup);
+static int wl_rfkill_set(void *data, bool blocked);
+
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+#define CHECK_SYS_UP(wlpriv) \
+do { \
+ if (unlikely(!wl_get_drv_status(wlpriv, READY))) { \
+ WL_INFO(("device is not ready : status (%d)\n", \
+ (int)wlpriv->status)); \
+ return -EIO; \
+ } \
+} while (0)
+
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK)
+
+
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG_ESTR_MAX 50
+static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
+ "SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND",
+ "DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC",
+ "REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END",
+ "BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM",
+ "TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH",
+ "EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND",
+ "BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND",
+ "PFN_NET_LOST",
+ "RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START",
+ "IBSS_ASSOC",
+ "RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT",
+ "PROBREQ_MSG",
+ "SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED",
+ "EXCEEDED_MEDIUM_TIME", "ICV_ERROR",
+ "UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE",
+ "WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE",
+ "RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG",
+ "ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND",
+ "WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED",
+ "WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT",
+ "WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE",
+ "WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP",
+ "WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE"
+};
+#endif /* WL_DBG_LEVEL */
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+ { \
+ .bitrate = RATE_TO_BASE100KBPS(_rateid), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+static struct ieee80211_rate __wl_rates[] = {
+ RATETAB_ENT(WLC_RATE_1M, 0),
+ RATETAB_ENT(WLC_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_6M, 0),
+ RATETAB_ENT(WLC_RATE_9M, 0),
+ RATETAB_ENT(WLC_RATE_12M, 0),
+ RATETAB_ENT(WLC_RATE_18M, 0),
+ RATETAB_ENT(WLC_RATE_24M, 0),
+ RATETAB_ENT(WLC_RATE_36M, 0),
+ RATETAB_ENT(WLC_RATE_48M, 0),
+ RATETAB_ENT(WLC_RATE_54M, 0)
+};
+
+#define wl_a_rates (__wl_rates + 4)
+#define wl_a_rates_size 8
+#define wl_g_rates (__wl_rates + 0)
+#define wl_g_rates_size 12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0)
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+ .channels = __wl_2ghz_channels,
+ .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+ .bitrates = wl_g_rates,
+ .n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+ .band = IEEE80211_BAND_5GHZ,
+ .channels = __wl_5ghz_a_channels,
+ .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+ .bitrates = wl_a_rates,
+ .n_bitrates = wl_a_rates_size
+};
+
+static const u32 __wl_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_ADHOC] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_AP_VLAN] = {
+ /* copy AP */
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ }
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+/* For debug: Dump the contents of the encoded wps ie buffe */
+static void
+wl_validate_wps_ie(char *wps_ie, bool *pbc)
+{
+ #define WPS_IE_FIXED_LEN 6
+ u16 len = (u16) wps_ie[TLV_LEN_OFF];
+ u8 *subel = wps_ie+ WPS_IE_FIXED_LEN;
+ u16 subelt_id;
+ u16 subelt_len;
+ u16 val;
+ u8 *valptr = (uint8*) &val;
+
+ WL_DBG(("wps_ie len=%d\n", len));
+
+ len -= 4; /* for the WPS IE's OUI, oui_type fields */
+
+ while (len >= 4) { /* must have attr id, attr len fields */
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_id = HTON16(val);
+
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_len = HTON16(val);
+
+ len -= 4; /* for the attr id, attr len fields */
+ len -= subelt_len; /* for the remaining fields in this attribute */
+ WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+ subel, subelt_id, subelt_len));
+
+ if (subelt_id == WPS_ID_VERSION) {
+ WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_REQ_TYPE) {
+ WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_DEVICE_NAME) {
+ char devname[100];
+ memcpy(devname, subel, subelt_len);
+ devname[subelt_len] = '\0';
+ WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+ devname, subelt_len));
+ } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+ *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+ } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+ ": cat=%u\n", HTON16(val)));
+ } else {
+ WL_DBG((" unknown attr 0x%x\n", subelt_id));
+ }
+
+ subel += subelt_len;
+ }
+}
+
+static struct net_device* wl_cfg80211_add_monitor_if(char *name)
+{
+ int ret = 0;
+ struct net_device* ndev = NULL;
+
+ ret = dhd_add_monitor(name, &ndev);
+ WL_INFO(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+ return ndev;
+}
+
+static struct net_device *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ s32 err;
+ s32 timeout = -1;
+ s32 wlif_type = -1;
+ s32 index = 0;
+ s32 mode = 0;
+ chanspec_t chspec;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *_ndev;
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ int (*net_attach)(dhd_pub_t *dhdp, int ifidx);
+ bool rollback_lock = false;
+
+ WL_DBG(("if name: %s, type: %d\n", name, type));
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ WL_ERR(("Unsupported interface type\n"));
+ mode = WL_MODE_IBSS;
+ return NULL;
+ case NL80211_IFTYPE_MONITOR:
+ return wl_cfg80211_add_monitor_if(name);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_STATION:
+ wlif_type = WL_P2P_IF_CLIENT;
+ mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ wlif_type = WL_P2P_IF_GO;
+ mode = WL_MODE_AP;
+ break;
+ default:
+ WL_ERR(("Unsupported interface type\n"));
+ return NULL;
+ break;
+ }
+
+ if (!name) {
+ WL_ERR(("name is NULL\n"));
+ return NULL;
+ }
+ if (wl->p2p_supported && (wlif_type != -1)) {
+ if (wl_get_p2p_status(wl, IF_DELETING)) {
+ /* wait till IF_DEL is complete
+ * release the lock for the unregister to proceed
+ */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ WL_INFO(("%s: Released the lock and wait till IF_DEL is complete\n",
+ __func__));
+ timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+ (wl_get_p2p_status(wl, IF_DELETING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+
+ /* put back the rtnl_lock again */
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+ if (timeout > 0) {
+ WL_ERR(("IF DEL is Success\n"));
+
+ } else {
+ WL_ERR(("timeount < 0, return -EAGAIN\n"));
+ return ERR_PTR(-EAGAIN);
+ }
+ }
+ if (!p2p_on(wl) && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+ p2p_on(wl) = true;
+ wl_cfgp2p_set_firm_p2p(wl);
+ wl_cfgp2p_init_discovery(wl);
+ }
+ memset(wl->p2p->vir_ifname, 0, IFNAMSIZ);
+ strncpy(wl->p2p->vir_ifname, name, IFNAMSIZ - 1);
+ wl_cfgp2p_generate_bss_mac(&dhd->mac, &wl->p2p->dev_addr, &wl->p2p->int_addr);
+
+ /* Temporary use channel 11, in case GO will be changed with set_channel API */
+ chspec = wf_chspec_aton(WL_P2P_TEMP_CHAN);
+
+ /* For P2P mode, use P2P-specific driver features to create the
+ * bss: "wl p2p_ifadd"
+ */
+ wl_set_p2p_status(wl, IF_ADD);
+ err = wl_cfgp2p_ifadd(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec);
+
+ if (unlikely(err))
+ return ERR_PTR(-ENOMEM);
+
+ timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+ (wl_get_p2p_status(wl, IF_ADD) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && (!wl_get_p2p_status(wl, IF_ADD))) {
+
+ struct wireless_dev *vwdev;
+ vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+ if (unlikely(!vwdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return ERR_PTR(-ENOMEM);
+ }
+ vwdev->wiphy = wl->wdev->wiphy;
+ WL_INFO((" virtual interface(%s) is created memalloc done \n",
+ wl->p2p->vir_ifname));
+ index = alloc_idx_vwdev(wl);
+ wl->vwdev[index] = vwdev;
+ vwdev->iftype =
+ (wlif_type == WL_P2P_IF_CLIENT) ? NL80211_IFTYPE_STATION
+ : NL80211_IFTYPE_AP;
+ _ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+ _ndev->ieee80211_ptr = vwdev;
+ SET_NETDEV_DEV(_ndev, wiphy_dev(vwdev->wiphy));
+ vwdev->netdev = _ndev;
+ wl_set_drv_status(wl, READY);
+ wl->p2p->vif_created = true;
+ set_mode_by_netdev(wl, _ndev, mode);
+ net_attach = wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION);
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ if (net_attach && !net_attach(dhd, _ndev->ifindex)) {
+ WL_DBG((" virtual interface(%s) is "
+ "created net attach done\n", wl->p2p->vir_ifname));
+ } else {
+ /* put back the rtnl_lock again */
+ if (rollback_lock)
+ rtnl_lock();
+ goto fail;
+ }
+ /* put back the rtnl_lock again */
+ if (rollback_lock)
+ rtnl_lock();
+ return _ndev;
+
+ } else {
+ wl_clr_p2p_status(wl, IF_ADD);
+ WL_ERR((" virtual interface(%s) is not created \n", wl->p2p->vir_ifname));
+ memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ);
+ wl->p2p->vif_created = false;
+ }
+ }
+fail:
+ return ERR_PTR(-ENODEV);
+}
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ether_addr p2p_mac;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 timeout = -1;
+ s32 ret = 0;
+ WL_DBG(("Enter\n"));
+ if (wl->p2p_supported) {
+ memcpy(p2p_mac.octet, wl->p2p->int_addr.octet, ETHER_ADDR_LEN);
+ if (wl->p2p->vif_created) {
+ if (wl_get_drv_status(wl, SCANNING)) {
+ wl_cfg80211_scan_abort(wl, dev);
+ }
+ wldev_iovar_setint(dev, "mpc", 1);
+ wl_set_p2p_status(wl, IF_DELETING);
+ ret = wl_cfgp2p_ifdel(wl, &p2p_mac);
+ if (ret) {
+ /* Firmware could not delete the interface so we will not get WLC_E_IF
+ * event for cleaning the dhd virtual nw interace
+ * So lets do it here. Failures from fw will ensure the application to do
+ * ifconfig <inter> down and up sequnce, which will reload the fw
+ * however we should cleanup the linux network virtual interfaces
+ */
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ WL_ERR(("Firmware returned an error from p2p_ifdel\n"));
+ WL_ERR(("try to remove linux virtual interface %s\n", dev->name));
+ dhd_del_if(dhd->info, dhd_net2idx(dhd->info, dev));
+ }
+
+ /* Wait for any pending scan req to get aborted from the sysioc context */
+ timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+ (wl_get_p2p_status(wl, IF_DELETING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(wl, IF_DELETING)) {
+ WL_DBG(("IFDEL operation done\n"));
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ }
+ ret = dhd_del_monitor(dev);
+ }
+ }
+ return ret;
+}
+
+static s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ s32 ap = 0;
+ s32 infra = 0;
+ s32 err = BCME_OK;
+ s32 timeout = -1;
+ s32 wlif_type;
+ s32 mode = 0;
+ chanspec_t chspec;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ WL_DBG(("Enter \n"));
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ ap = 1;
+ WL_ERR(("type (%d) : currently we do not support this type\n",
+ type));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ mode = WL_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ ap = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ if (ap) {
+ set_mode_by_netdev(wl, ndev, mode);
+ if (wl->p2p_supported && wl->p2p->vif_created) {
+ WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", wl->p2p->vif_created,
+ p2p_on(wl)));
+ chspec = wf_chspec_aton(WL_P2P_TEMP_CHAN);
+ wlif_type = ap ? WL_P2P_IF_GO : WL_P2P_IF_CLIENT;
+ WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n",
+ ndev->name, ap, infra, type));
+ wl_set_p2p_status(wl, IF_CHANGING);
+ wl_clr_p2p_status(wl, IF_CHANGED);
+ err = wl_cfgp2p_ifchange(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec);
+ timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+ (wl_get_p2p_status(wl, IF_CHANGED) == true),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ set_mode_by_netdev(wl, ndev, mode);
+ wl_clr_p2p_status(wl, IF_CHANGING);
+ wl_clr_p2p_status(wl, IF_CHANGED);
+ } else if (ndev == wl_to_prmry_ndev(wl) &&
+ !wl_get_drv_status(wl, AP_CREATED)) {
+ wl_set_drv_status(wl, AP_CREATING);
+ if (!wl->ap_info &&
+ !(wl->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
+ WL_ERR(("struct ap_saved_ie allocation failed\n"));
+ return -ENOMEM;
+ }
+ } else {
+ WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
+ return -EINVAL;
+ }
+ }
+
+ ndev->ieee80211_ptr->iftype = type;
+ return 0;
+}
+
+s32
+wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx,
+int (*_net_attach)(dhd_pub_t *dhdp, int ifidx))
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ s32 ret = BCME_OK;
+ if (!ndev) {
+ WL_ERR(("net is NULL\n"));
+ return 0;
+ }
+ if (wl->p2p_supported) {
+ WL_DBG(("IF_ADD event called from dongle, old interface name: %s,"
+ "new name: %s\n", ndev->name, wl->p2p->vir_ifname));
+ /* Assign the net device to CONNECT BSSCFG */
+ strncpy(ndev->name, wl->p2p->vir_ifname, IFNAMSIZ - 1);
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = ndev;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = bssidx;
+ wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION) = _net_attach;
+ ndev->ifindex = idx;
+ wl_clr_p2p_status(wl, IF_ADD);
+
+ wake_up_interruptible(&wl->dongle_event_wait);
+ }
+ return ret;
+}
+
+s32
+wl_cfg80211_notify_ifdel(struct net_device *ndev)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ bool rollback_lock = false;
+ s32 index = 0;
+
+ if (!ndev || !ndev->name) {
+ WL_ERR(("net is NULL\n"));
+ return 0;
+ }
+
+ if (p2p_is_on(wl) && wl->p2p->vif_created) {
+ if (wl->scan_request) {
+ /* Abort any pending scan requests */
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (!rtnl_is_locked()) {
+ rtnl_lock();
+ rollback_lock = true;
+ }
+ WL_DBG(("ESCAN COMPLETED\n"));
+ wl_notify_escan_complete(wl, true);
+ if (rollback_lock)
+ rtnl_unlock();
+ }
+ WL_ERR(("IF_DEL event called from dongle, net %x, vif name: %s\n",
+ (unsigned int)ndev, wl->p2p->vir_ifname));
+
+ memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ);
+ index = wl_cfgp2p_find_idx(wl, ndev);
+ wl_to_p2p_bss_ndev(wl, index) = NULL;
+ wl_to_p2p_bss_bssidx(wl, index) = 0;
+ wl->p2p->vif_created = false;
+ wl_cfgp2p_clear_management_ie(wl,
+ index);
+ wl_clr_p2p_status(wl, IF_DELETING);
+ WL_DBG(("index : %d\n", index));
+
+ }
+ /* Wake up any waiting thread */
+ wake_up_interruptible(&wl->dongle_event_wait);
+
+ return 0;
+}
+
+s32
+wl_cfg80211_is_progress_ifadd(void)
+{
+ s32 is_progress = 0;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_ADD))
+ is_progress = 1;
+ return is_progress;
+}
+
+s32
+wl_cfg80211_is_progress_ifchange(void)
+{
+ s32 is_progress = 0;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_CHANGING))
+ is_progress = 1;
+ return is_progress;
+}
+
+
+s32
+wl_cfg80211_notify_ifchange(void)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_CHANGING)) {
+ wl_set_p2p_status(wl, IF_CHANGED);
+ wake_up_interruptible(&wl->dongle_event_wait);
+ }
+ return 0;
+}
+
+static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request)
+{
+ u32 n_ssids = request->n_ssids;
+ u32 n_channels = request->n_channels;
+ u16 channel;
+ chanspec_t chanspec;
+ s32 i, offset;
+ char *ptr;
+ wlc_ssid_t ssid;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+ memset(&params->ssid, 0, sizeof(wlc_ssid_t));
+
+ WL_SCAN(("Preparing Scan request\n"));
+ WL_SCAN(("nprobes=%d\n", params->nprobes));
+ WL_SCAN(("active_time=%d\n", params->active_time));
+ WL_SCAN(("passive_time=%d\n", params->passive_time));
+ WL_SCAN(("home_time=%d\n", params->home_time));
+ WL_SCAN(("scan_type=%d\n", params->scan_type));
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+
+ /* Copy channel array if applicable */
+ WL_SCAN(("### List of channelspecs to scan ###\n"));
+ if (n_channels > 0) {
+ for (i = 0; i < n_channels; i++) {
+ chanspec = 0;
+ channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ if (request->channels[i]->band == IEEE80211_BAND_2GHZ)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40) {
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ } else {
+ chanspec |= WL_CHANSPEC_BW_40;
+ if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+ else
+ chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+ }
+
+ params->channel_list[i] = channel;
+ params->channel_list[i] &= WL_CHANSPEC_CHAN_MASK;
+ params->channel_list[i] |= chanspec;
+ WL_SCAN(("Chan : %d, Channel spec: %x \n",
+ channel, params->channel_list[i]));
+ params->channel_list[i] = htod16(params->channel_list[i]);
+ }
+ } else {
+ WL_SCAN(("Scanning all channels\n"));
+ }
+
+ /* Copy ssid array if applicable */
+ WL_SCAN(("### List of SSIDs to scan ###\n"));
+ if (n_ssids > 0) {
+ offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+ offset = roundup(offset, sizeof(u32));
+ ptr = (char*)params + offset;
+ for (i = 0; i < n_ssids; i++) {
+ memset(&ssid, 0, sizeof(wlc_ssid_t));
+ ssid.SSID_len = request->ssids[i].ssid_len;
+ memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
+ if (!ssid.SSID_len)
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ else
+ WL_SCAN(("%d: scan for %s size =%d\n", i,
+ ssid.SSID, ssid.SSID_len));
+ memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
+ ptr += sizeof(wlc_ssid_t);
+ }
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ /* Adding mask to channel numbers */
+ params->channel_num =
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+}
+
+static s32
+wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request, u16 action)
+{
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size =
+ (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params));
+ struct wl_iscan_params *params;
+ s32 err = 0;
+
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ }
+ params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL);
+ if (!params) {
+ return -ENOMEM;
+ }
+
+ if (request != NULL)
+ wl_scan_prep(&params->params, request);
+
+ params->version = htod32(ISCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->scan_duration = htod16(0);
+
+ if (params_size + sizeof("iscan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length is not sufficient\n"));
+ err = -ENOMEM;
+ goto done;
+ }
+ err = wldev_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+ iscan->ioctl_buf, WLC_IOCTL_MEDLEN);
+ if (unlikely(err)) {
+ if (err == -EBUSY) {
+ WL_ERR(("system busy : iscan canceled\n"));
+ } else {
+ WL_ERR(("error (%d)\n", err));
+ }
+ }
+done:
+ kfree(params);
+ return err;
+}
+
+static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 passive_scan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_SCANING;
+
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_DBG(("error (%d)\n", err));
+ return err;
+ }
+ wl->iscan_kickstart = true;
+ wl_run_iscan(iscan, request, WL_SCAN_ACTION_START);
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32
+wl_run_escan(struct wl_priv *wl, struct net_device *ndev,
+ struct cfg80211_scan_request *request, uint16 action)
+{
+ s32 err = BCME_OK;
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+ wl_escan_params_t *params;
+ struct cfg80211_scan_request *scan_request = wl->scan_request;
+ u32 num_chans = 0;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ u32 i;
+ u16 *default_chan_list = NULL;
+ struct net_device *dev = NULL;
+ WL_DBG(("Enter \n"));
+
+
+ if (!wl->p2p_supported || ((ndev == wl_to_prmry_ndev(wl)) &&
+ !p2p_scan(wl))) {
+ /* LEGACY SCAN TRIGGER */
+ WL_SCAN((" LEGACY E-SCAN START\n"));
+
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ }
+ params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ if (request != NULL)
+ wl_scan_prep(&params->params, request);
+ params->version = htod32(ESCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->sync_id = htod16(0x1234);
+ if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length not sufficient\n"));
+ kfree(params);
+ err = -ENOMEM;
+ goto exit;
+ }
+ err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+ wl->escan_ioctl_buf, WLC_IOCTL_MEDLEN);
+ if (unlikely(err))
+ WL_ERR((" Escan set error (%d)\n", err));
+ kfree(params);
+ }
+ else if (p2p_on(wl) && p2p_scan(wl)) {
+ /* P2P SCAN TRIGGER */
+ if (scan_request && scan_request->n_channels) {
+ num_chans = scan_request->n_channels;
+ WL_SCAN((" chann number : %d\n", num_chans));
+ default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+ GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ WL_ERR(("channel list allocation failed \n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ for (i = 0; i < num_chans; i++)
+ {
+ default_chan_list[i] =
+ ieee80211_frequency_to_channel(
+ scan_request->channels[i]->center_freq);
+ }
+ if (num_chans == 3 && (
+ (default_chan_list[0] == SOCIAL_CHAN_1) &&
+ (default_chan_list[1] == SOCIAL_CHAN_2) &&
+ (default_chan_list[2] == SOCIAL_CHAN_3))) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ WL_INFO(("P2P SEARCH PHASE START \n"));
+ } else if ((dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)) &&
+ (get_mode_by_netdev(wl, dev) == WL_MODE_AP)) {
+ /* If you are already a GO, then do SEARCH only */
+ WL_INFO(("Already a GO. Do SEARCH Only"));
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ } else {
+ WL_INFO(("P2P SCAN STATE START \n"));
+ }
+
+ }
+ err = wl_cfgp2p_escan(wl, ndev, wl->active_scan, num_chans, default_chan_list,
+ search_state, action,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ kfree(default_chan_list);
+ }
+exit:
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+ return err;
+}
+
+
+static s32
+wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = BCME_OK;
+ s32 passive_scan;
+ wl_scan_results_t *results;
+ WL_SCAN(("Enter \n"));
+
+ wl->escan_info.wiphy = wiphy;
+ wl->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ results = (wl_scan_results_t *) wl->escan_info.escan_buf;
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+ err = wl_run_escan(wl, ndev, request, WL_SCAN_ACTION_START);
+ return err;
+}
+
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct cfg80211_ssid *ssids;
+ struct wl_scan_req *sr = wl_to_sr(wl);
+ wpa_ie_fixed_t *wps_ie;
+ s32 passive_scan;
+ bool iscan_req;
+ bool escan_req;
+ bool spec_scan;
+ bool p2p_ssid;
+ s32 err = 0;
+ s32 i;
+ u32 wpsie_len = 0;
+ u8 wpsie[IE_MAX_LEN];
+
+ WL_DBG(("Enter wiphy (%p)\n", wiphy));
+ if (unlikely(wl_get_drv_status(wl, SCANNING))) {
+ WL_ERR(("Scanning already : status (%d)\n", (int)wl->status));
+ return -EAGAIN;
+ }
+ if (unlikely(wl_get_drv_status(wl, SCAN_ABORTING))) {
+ WL_ERR(("Scanning being aborted : status (%d)\n",
+ (int)wl->status));
+ return -EAGAIN;
+ }
+ if (request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+ WL_ERR(("n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+ return -EOPNOTSUPP;
+ }
+
+ /* Arm scan timeout timer */
+ mod_timer(&wl->scan_timeout, jiffies + WL_SCAN_TIMER_INTERVAL_MS * HZ / 1000);
+ iscan_req = false;
+ spec_scan = false;
+ if (request) { /* scan bss */
+ ssids = request->ssids;
+ if (wl->iscan_on && (!ssids || !ssids->ssid_len || request->n_ssids != 1)) {
+ iscan_req = true;
+ } else if (wl->escan_on) {
+ escan_req = true;
+ p2p_ssid = false;
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len && IS_P2P_SSID(ssids[i].ssid)) {
+ p2p_ssid = true;
+ break;
+ }
+ }
+ if (p2p_ssid) {
+ if (wl->p2p_supported) {
+ /* p2p scan trigger */
+ if (p2p_on(wl) == false) {
+ /* p2p on at the first time */
+ p2p_on(wl) = true;
+ wl_cfgp2p_set_firm_p2p(wl);
+ }
+ p2p_scan(wl) = true;
+ }
+ } else {
+ /* legacy scan trigger
+ * So, we have to disable p2p discovery if p2p discovery is on
+ */
+ if (wl->p2p_supported) {
+ p2p_scan(wl) = false;
+ /* If Netdevice is not equals to primary and p2p is on
+ * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+ */
+ if (p2p_on(wl) && (ndev != wl_to_prmry_ndev(wl)))
+ p2p_scan(wl) = true;
+
+ if (p2p_scan(wl) == false) {
+ if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ err = wl_cfgp2p_discover_enable_search(wl,
+ false);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+
+ }
+ }
+ }
+ if (!wl->p2p_supported || !p2p_scan(wl)) {
+ if (ndev == wl_to_prmry_ndev(wl)) {
+ /* find the WPSIE */
+ memset(wpsie, 0, sizeof(wpsie));
+ if ((wps_ie = wl_cfgp2p_find_wpsie(
+ (u8 *)request->ie,
+ request->ie_len)) != NULL) {
+ wpsie_len =
+ wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ memcpy(wpsie, wps_ie, wpsie_len);
+ } else {
+ wpsie_len = 0;
+ }
+ err = wl_cfgp2p_set_management_ie(wl, ndev, -1,
+ VNDR_IE_PRBREQ_FLAG, wpsie, wpsie_len);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+ }
+ }
+ } else { /* scan in ibss */
+ /* we don't do iscan in ibss */
+ ssids = this_ssid;
+ }
+ wl->scan_request = request;
+ wl_set_drv_status(wl, SCANNING);
+ if (iscan_req) {
+ err = wl_do_iscan(wl, request);
+ if (likely(!err))
+ return err;
+ else
+ goto scan_out;
+ } else if (escan_req) {
+ if (wl->p2p_supported) {
+ if (p2p_on(wl) && p2p_scan(wl)) {
+
+ err = wl_cfgp2p_enable_discovery(wl, ndev,
+ request->ie, request->ie_len);
+
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+ err = wl_do_escan(wl, wiphy, ndev, request);
+ if (likely(!err))
+ return err;
+ else
+ goto scan_out;
+
+
+ } else {
+ memset(&sr->ssid, 0, sizeof(sr->ssid));
+ sr->ssid.SSID_len =
+ min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len);
+ if (sr->ssid.SSID_len) {
+ memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len);
+ sr->ssid.SSID_len = htod32(sr->ssid.SSID_len);
+ WL_SCAN(("Specific scan ssid=\"%s\" len=%d\n",
+ sr->ssid.SSID, sr->ssid.SSID_len));
+ spec_scan = true;
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ WL_SCAN(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len));
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_SCAN(("WLC_SET_PASSIVE_SCAN error (%d)\n", err));
+ goto scan_out;
+ }
+ err = wldev_ioctl(ndev, WLC_SCAN, &sr->ssid,
+ sizeof(sr->ssid), false);
+ if (err) {
+ if (err == -EBUSY) {
+ WL_ERR(("system busy : scan for \"%s\" "
+ "canceled\n", sr->ssid.SSID));
+ } else {
+ WL_ERR(("WLC_SCAN error (%d)\n", err));
+ }
+ goto scan_out;
+ }
+ }
+
+ return 0;
+
+scan_out:
+ wl_clr_drv_status(wl, SCANNING);
+ wl->scan_request = NULL;
+ return err;
+}
+
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ WL_DBG(("Enter \n"));
+ CHECK_SYS_UP(wl);
+
+ err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("scan error (%d)\n", err));
+ return err;
+ }
+
+ return err;
+}
+
+static s32 wl_dev_intvar_set(struct net_device *dev, s8 *name, s32 val)
+{
+ s8 buf[WLC_IOCTL_SMLEN];
+ u32 len;
+ s32 err = 0;
+
+ val = htod32(val);
+ len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+ BUG_ON(unlikely(!len));
+
+ err = wldev_ioctl(dev, WLC_SET_VAR, buf, len, false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+
+ return err;
+}
+
+static s32
+wl_dev_intvar_get(struct net_device *dev, s8 *name, s32 *retval)
+{
+ union {
+ s8 buf[WLC_IOCTL_SMLEN];
+ s32 val;
+ } var;
+ u32 len;
+ u32 data_null;
+ s32 err = 0;
+
+ len = bcm_mkiovar(name, (char *)(&data_null), 0,
+ (char *)(&var), sizeof(var.buf));
+ BUG_ON(unlikely(!len));
+ err = wldev_ioctl(dev, WLC_GET_VAR, &var, len, false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+ *retval = dtoh32(var.val);
+
+ return err;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+ s32 err = 0;
+
+ err = wl_dev_intvar_set(dev, "rtsthresh", rts_threshold);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+ s32 err = 0;
+
+ err = wl_dev_intvar_set(dev, "fragthresh", frag_threshold);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+ s32 err = 0;
+ u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+ retry = htod32(retry);
+ err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), false);
+ if (unlikely(err)) {
+ WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct wl_priv *wl = (struct wl_priv *)wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+ (wl->conf->rts_threshold != wiphy->rts_threshold)) {
+ wl->conf->rts_threshold = wiphy->rts_threshold;
+ err = wl_set_rts(ndev, wl->conf->rts_threshold);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+ (wl->conf->frag_threshold != wiphy->frag_threshold)) {
+ wl->conf->frag_threshold = wiphy->frag_threshold;
+ err = wl_set_frag(ndev, wl->conf->frag_threshold);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_LONG &&
+ (wl->conf->retry_long != wiphy->retry_long)) {
+ wl->conf->retry_long = wiphy->retry_long;
+ err = wl_set_retry(ndev, wl->conf->retry_long, true);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_SHORT &&
+ (wl->conf->retry_short != wiphy->retry_short)) {
+ wl->conf->retry_short = wiphy->retry_short;
+ err = wl_set_retry(ndev, wl->conf->retry_short, false);
+ if (!err) {
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct cfg80211_bss *bss;
+ struct ieee80211_channel *chan;
+ struct wl_join_params join_params;
+ struct cfg80211_ssid ssid;
+ s32 scan_retry = 0;
+ s32 err = 0;
+ bool rollback_lock = false;
+
+ WL_TRACE(("In\n"));
+ CHECK_SYS_UP(wl);
+ if (params->bssid) {
+ WL_ERR(("Invalid bssid\n"));
+ return -EOPNOTSUPP;
+ }
+ bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+ if (!bss) {
+ memcpy(ssid.ssid, params->ssid, params->ssid_len);
+ ssid.ssid_len = params->ssid_len;
+ do {
+ if (unlikely
+ (__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+ -EBUSY)) {
+ wl_delay(150);
+ } else {
+ break;
+ }
+ } while (++scan_retry < WL_SCAN_RETRY_MAX);
+ /* to allow scan_inform to propagate to cfg80211 plane */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+
+ /* wait 4 secons till scan done.... */
+ schedule_timeout_interruptible(4 * HZ);
+ if (rollback_lock)
+ rtnl_lock();
+ bss = cfg80211_get_ibss(wiphy, NULL,
+ params->ssid, params->ssid_len);
+ }
+ if (bss) {
+ wl->ibss_starter = false;
+ WL_DBG(("Found IBSS\n"));
+ } else {
+ wl->ibss_starter = true;
+ }
+ chan = params->channel;
+ if (chan)
+ wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ memset(&join_params, 0, sizeof(join_params));
+ memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
+ params->ssid_len);
+ join_params.ssid.SSID_len = htod32(params->ssid_len);
+ if (params->bssid)
+ memcpy(&join_params.params.bssid, params->bssid,
+ ETHER_ADDR_LEN);
+ else
+ memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+
+ err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+ sizeof(join_params), false);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ wl_link_down(wl);
+
+ return err;
+}
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ val = WPA2_AUTH_PSK| WPA2_AUTH_UNSPECIFIED;
+ else
+ val = WPA_AUTH_DISABLED;
+
+ if (is_wps_conn(sme))
+ val = WPA_AUTH_DISABLED;
+
+ WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wpa_auth failed (%d)\n", err));
+ return err;
+ }
+ sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec->wpa_versions = sme->crypto.wpa_versions;
+ return err;
+}
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+ switch (sme->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ val = 0;
+ WL_DBG(("open system\n"));
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ val = 1;
+ WL_DBG(("shared key\n"));
+ break;
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ val = 2;
+ WL_DBG(("automatic\n"));
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ WL_DBG(("network eap\n"));
+ default:
+ val = 2;
+ WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+ break;
+ }
+
+ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set auth failed (%d)\n", err));
+ return err;
+ }
+ sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec->auth_type = sme->auth_type;
+ return err;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 pval = 0;
+ s32 gval = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.n_ciphers_pairwise) {
+ switch (sme->crypto.ciphers_pairwise[0]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ pval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ pval = TKIP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ pval = AES_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ pval = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("invalid cipher pairwise (%d)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return -EINVAL;
+ }
+ }
+ if (sme->crypto.cipher_group) {
+ switch (sme->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ gval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ gval = AES_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ gval = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ }
+
+ WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+
+ if (is_wps_conn(sme)) {
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+ } else {
+ WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
+ err = wldev_iovar_setint_bsscfg(dev, "wsec",
+ pval | gval, bssidx);
+ }
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+
+ sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+ sec->cipher_group = sme->crypto.cipher_group;
+
+ return err;
+}
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.n_akm_suites) {
+ err = wl_dev_intvar_get(dev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ return err;
+ }
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA2_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ }
+ WL_DBG(("setting wpa_auth to %d\n", val));
+
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("could not set wpa_auth (%d)\n", err));
+ return err;
+ }
+ }
+ sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec->wpa_auth = sme->crypto.akm_suites[0];
+
+ return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ struct wl_wsec_key key;
+ s32 val;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key len (%d)\n", sme->key_len));
+ if (sme->key_len) {
+ sec = wl_read_prof(wl, WL_PROF_SEC);
+ WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+ sec->wpa_versions, sec->cipher_pairwise));
+ if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+ NL80211_WPA_VERSION_2)) &&
+ (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+ WLAN_CIPHER_SUITE_WEP104))) {
+ memset(&key, 0, sizeof(key));
+ key.len = (u32) sme->key_len;
+ key.index = (u32) sme->key_idx;
+ if (unlikely(key.len > sizeof(key.data))) {
+ WL_ERR(("Too long key length (%u)\n", key.len));
+ return -EINVAL;
+ }
+ memcpy(key.data, sme->key, key.len);
+ key.flags = WL_PRIMARY_KEY;
+ switch (sec->cipher_pairwise) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ default:
+ WL_ERR(("Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return -EINVAL;
+ }
+ /* Set the new key/index */
+ WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+ key.len, key.index, key.algo));
+ WL_DBG(("key \"%s\"\n", key.data));
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ ioctlbuf, sizeof(ioctlbuf), bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+ if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
+ WL_DBG(("set auth_type to shared key\n"));
+ val = 1; /* shared key */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set auth failed (%d)\n", err));
+ return err;
+ }
+ }
+ }
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct ieee80211_channel *chan = sme->channel;
+ wl_extjoin_params_t *ext_join_params;
+ struct wl_join_params join_params;
+ size_t join_params_size;
+ s32 err = 0;
+ wpa_ie_fixed_t *wpa_ie;
+ wpa_ie_fixed_t *wps_ie;
+ bcm_tlv_t *wpa2_ie;
+ u8* wpaie = 0;
+ u32 wpaie_len = 0;
+ u32 wpsie_len = 0;
+ u32 chan_cnt = 0;
+ u8 wpsie[IE_MAX_LEN];
+ struct ether_addr bssid;
+
+ WL_DBG(("In\n"));
+ CHECK_SYS_UP(wl);
+
+ /*
+ * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+ */
+ if (wl->scan_request) {
+ wl_cfg80211_scan_abort(wl, dev);
+ }
+ /* Clean BSSID */
+ bzero(&bssid, sizeof(bssid));
+ wl_update_prof(wl, NULL, (void *)&bssid, WL_PROF_BSSID);
+
+ if (IS_P2P_SSID(sme->ssid) && (dev != wl_to_prmry_ndev(wl))) {
+ /* we only allow to connect using virtual interface in case of P2P */
+ if (p2p_on(wl) && is_wps_conn(sme)) {
+ WL_DBG(("ASSOC1 p2p index : %d sme->ie_len %d\n",
+ wl_cfgp2p_find_idx(wl, dev), sme->ie_len));
+ /* Have to apply WPS IE + P2P IE in assoc req frame */
+ wl_cfgp2p_set_management_ie(wl, dev,
+ wl_cfgp2p_find_idx(wl, dev), VNDR_IE_PRBREQ_FLAG,
+ wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie,
+ wl_to_p2p_bss_saved_ie(wl,
+ P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev),
+ VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+ } else if (p2p_on(wl) && (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
+ /* This is the connect req after WPS is done [credentials exchanged]
+ * currently identified with WPA_VERSION_2 .
+ * Update the previously set IEs with
+ * the newly received IEs from Supplicant. This will remove the WPS IE from
+ * the Assoc Req.
+ */
+ WL_DBG(("ASSOC2 p2p index : %d sme->ie_len %d\n",
+ wl_cfgp2p_find_idx(wl, dev), sme->ie_len));
+ wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev),
+ VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+ }
+
+ } else if (dev == wl_to_prmry_ndev(wl)) {
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie,
+ sme->ie_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if (wpa_ie != NULL || wpa2_ie != NULL) {
+ wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
+ wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
+ wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
+ wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+ ioctlbuf, sizeof(ioctlbuf));
+ } else {
+ wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
+ ioctlbuf, sizeof(ioctlbuf));
+ }
+
+ /* find the WPSIE */
+ memset(wpsie, 0, sizeof(wpsie));
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)sme->ie,
+ sme->ie_len)) != NULL) {
+ wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN;
+ memcpy(wpsie, wps_ie, wpsie_len);
+ } else {
+ wpsie_len = 0;
+ }
+ err = wl_cfgp2p_set_management_ie(wl, dev, -1,
+ VNDR_IE_ASSOCREQ_FLAG, wpsie, wpsie_len);
+ if (unlikely(err)) {
+ return err;
+ }
+ }
+ if (unlikely(!sme->ssid)) {
+ WL_ERR(("Invalid ssid\n"));
+ return -EOPNOTSUPP;
+ }
+ if (chan) {
+ wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ chan_cnt = 1;
+ WL_DBG(("channel (%d), center_req (%d)\n", wl->channel,
+ chan->center_freq));
+ } else
+ wl->channel = 0;
+ WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+ err = wl_set_wpa_version(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid wpa_version\n"));
+ return err;
+ }
+
+ err = wl_set_auth_type(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid auth type\n"));
+ return err;
+ }
+
+ err = wl_set_set_cipher(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid ciper\n"));
+ return err;
+ }
+
+ err = wl_set_key_mgmt(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid key mgmt\n"));
+ return err;
+ }
+
+ err = wl_set_set_sharedkey(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid shared key\n"));
+ return err;
+ }
+
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+ chan_cnt * sizeof(chanspec_t);
+ ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+ if (ext_join_params == NULL) {
+ err = -ENOMEM;
+ wl_clr_drv_status(wl, CONNECTING);
+ goto exit;
+ }
+ ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+ memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
+ ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+ /* Set up join scan parameters */
+ ext_join_params->scan.scan_type = -1;
+ ext_join_params->scan.nprobes = 2;
+ /* increate dwell time to receive probe response
+ * from target AP at a noisy air
+ */
+ ext_join_params->scan.active_time = 150;
+ ext_join_params->scan.passive_time = 300;
+ ext_join_params->scan.home_time = -1;
+ if (sme->bssid)
+ memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN);
+ else
+ memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+ ext_join_params->assoc.chanspec_num = chan_cnt;
+ if (chan_cnt) {
+ u16 channel, band, bw, ctl_sb;
+ chanspec_t chspec;
+ channel = wl->channel;
+ band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+ : WL_CHANSPEC_BAND_5G;
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+ chspec = (channel | band | bw | ctl_sb);
+ ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ ext_join_params->assoc.chanspec_list[0] |= chspec;
+ ext_join_params->assoc.chanspec_list[0] =
+ htodchanspec(ext_join_params->assoc.chanspec_list[0]);
+ }
+ ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+ if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ WL_INFO(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+ ext_join_params->ssid.SSID_len));
+ }
+ wl_set_drv_status(wl, CONNECTING);
+ err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size, ioctlbuf,
+ sizeof(ioctlbuf), wl_cfgp2p_find_idx(wl, dev));
+ kfree(ext_join_params);
+ if (err) {
+ wl_clr_drv_status(wl, CONNECTING);
+ if (err == BCME_UNSUPPORTED) {
+ WL_DBG(("join iovar is not supported\n"));
+ goto set_ssid;
+ } else
+ WL_ERR(("error (%d)\n", err));
+ } else
+ goto exit;
+
+set_ssid:
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid);
+
+ join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+ memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+ wl_update_prof(wl, NULL, &join_params.ssid, WL_PROF_SSID);
+ if (sme->bssid)
+ memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN);
+ else
+ memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+ wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size);
+ WL_DBG(("join_param_size %d\n", join_params_size));
+
+ if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ WL_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+ join_params.ssid.SSID_len));
+ }
+ wl_set_drv_status(wl, CONNECTING);
+ err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true);
+ if (err) {
+ WL_ERR(("error (%d)\n", err));
+ wl_clr_drv_status(wl, CONNECTING);
+ }
+exit:
+ return err;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ scb_val_t scbval;
+ bool act = false;
+ s32 err = 0;
+ u8 *curbssid;
+ WL_ERR(("Reason %d\n", reason_code));
+ CHECK_SYS_UP(wl);
+ act = *(bool *) wl_read_prof(wl, WL_PROF_ACT);
+ curbssid = wl_read_prof(wl, WL_PROF_BSSID);
+ if (likely(act)) {
+ /*
+ * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+ */
+ if (wl->scan_request) {
+ wl_cfg80211_scan_abort(wl, dev);
+ }
+ wl_set_drv_status(wl, DISCONNECTING);
+ scbval.val = reason_code;
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t), true);
+ if (unlikely(err)) {
+ wl_clr_drv_status(wl, DISCONNECTING);
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type, s32 dbm)
+{
+
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ u16 txpwrmw;
+ s32 err = 0;
+ s32 disable = 0;
+
+ CHECK_SYS_UP(wl);
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ break;
+ case NL80211_TX_POWER_LIMITED:
+ if (dbm < 0) {
+ WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+ return -EINVAL;
+ }
+ break;
+ case NL80211_TX_POWER_FIXED:
+ if (dbm < 0) {
+ WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+ return -EINVAL;
+ }
+ break;
+ }
+ /* Make sure radio is off or on as far as software is concerned */
+ disable = WL_RADIO_SW_DISABLE << 16;
+ disable = htod32(disable);
+ err = wldev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+ return err;
+ }
+
+ if (dbm > 0xffff)
+ txpwrmw = 0xffff;
+ else
+ txpwrmw = (u16) dbm;
+ err = wl_dev_intvar_set(ndev, "qtxpower",
+ (s32) (bcm_mw_to_qdbm(txpwrmw)));
+ if (unlikely(err)) {
+ WL_ERR(("qtxpower error (%d)\n", err));
+ return err;
+ }
+ wl->conf->tx_power = dbm;
+
+ return err;
+}
+
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 txpwrdbm;
+ u8 result;
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ err = wl_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ *dbm = (s32) bcm_qdbm_to_mw(result);
+
+ return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool unicast, bool multicast)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ u32 index;
+ s32 wsec;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ return err;
+ }
+ if (wsec & WEP_ENABLED) {
+ /* Just select a new current key */
+ index = (u32) key_idx;
+ index = htod32(index);
+ err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
+ sizeof(index), true);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+ }
+ return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_wsec_key key;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+ s32 mode = get_mode_by_netdev(wl, dev);
+ memset(&key, 0, sizeof(key));
+ key.index = (u32) key_idx;
+
+ if (!ETHER_ISMULTI(mac_addr))
+ memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+ key.len = (u32) params->key_len;
+
+ /* check for key index change */
+ if (key.len == 0) {
+ /* key delete */
+ swap_key_from_BE(&key);
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), ioctlbuf,
+ sizeof(ioctlbuf), bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("key delete error (%d)\n", err));
+ return err;
+ }
+ } else {
+ if (key.len > sizeof(key.data)) {
+ WL_ERR(("Invalid key length (%d)\n", key.len));
+ return -EINVAL;
+ }
+ WL_DBG(("Setting the key index %d\n", key.index));
+ memcpy(key.data, params->key, key.len);
+
+ if ((mode == WL_MODE_BSS) &&
+ (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+ u8 keybuf[8];
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+ memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ }
+
+ /* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+ if (params->seq && params->seq_len == 6) {
+ /* rx iv */
+ u8 *ivptr;
+ ivptr = (u8 *) params->seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = true;
+ }
+
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ break;
+ default:
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ return -EINVAL;
+ }
+ swap_key_from_BE(&key);
+#ifdef CONFIG_WIRELESS_EXT
+ dhd_wait_pend8021x(dev);
+#endif
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), ioctlbuf,
+ sizeof(ioctlbuf), bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wl_wsec_key key;
+ s32 val = 0;
+ s32 wsec = 0;
+ s32 err = 0;
+ u8 keybuf[8];
+ s32 bssidx = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 mode = get_mode_by_netdev(wl, dev);
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+
+ bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (mac_addr) {
+ wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+ goto exit;
+ }
+ memset(&key, 0, sizeof(key));
+
+ key.len = (u32) params->key_len;
+ key.index = (u32) key_idx;
+
+ if (unlikely(key.len > sizeof(key.data))) {
+ WL_ERR(("Too long key length (%u)\n", key.len));
+ return -EINVAL;
+ }
+ memcpy(key.data, params->key, key.len);
+
+ key.flags = WL_PRIMARY_KEY;
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ val = WEP_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ val = WEP_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ val = TKIP_ENABLED;
+ /* wpa_supplicant switches the third and fourth quarters of the TKIP key */
+ if (mode == WL_MODE_BSS) {
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ break;
+ default:
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ return -EINVAL;
+ }
+
+ /* Set the new key/index */
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), ioctlbuf,
+ sizeof(ioctlbuf), bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+
+exit:
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("get wsec error (%d)\n", err));
+ return err;
+ }
+
+ wsec |= val;
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wsec error (%d)\n", err));
+ return err;
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+ struct wl_wsec_key key;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("Enter\n"));
+ CHECK_SYS_UP(wl);
+ memset(&key, 0, sizeof(key));
+
+ key.index = (u32) key_idx;
+ key.flags = WL_PRIMARY_KEY;
+ key.algo = CRYPTO_ALGO_OFF;
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ /* Set the new key/index */
+ swap_key_from_BE(&key);
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), ioctlbuf,
+ sizeof(ioctlbuf), bssidx);
+ if (unlikely(err)) {
+ if (err == -EINVAL) {
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+ /* we ignore this key index in this case */
+ WL_DBG(("invalid key index (%d)\n", key_idx));
+ }
+ } else {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ }
+ return err;
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie, struct key_params * params))
+{
+ struct key_params params;
+ struct wl_wsec_key key;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_security *sec;
+ s32 wsec;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+ memset(&key, 0, sizeof(key));
+ key.index = key_idx;
+ swap_key_to_BE(&key);
+ memset(&params, 0, sizeof(params));
+ params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+ memcpy(params.key, key.data, params.key_len);
+
+ wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ return err;
+ }
+ switch (wsec & ~SES_OW_ENABLED) {
+ case WEP_ENABLED:
+ sec = wl_read_prof(wl, WL_PROF_SEC);
+ if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP40;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP104;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ }
+ break;
+ case TKIP_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_TKIP;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case AES_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+ default:
+ WL_ERR(("Invalid algo (0x%x)\n", wsec));
+ return -EINVAL;
+ }
+
+ callback(cookie, &params);
+ return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx)
+{
+ WL_INFO(("Not supported\n"));
+ return -EOPNOTSUPP;
+}
+
+static s32
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ scb_val_t scb_val;
+ s32 rssi;
+ s32 rate;
+ s32 err = 0;
+ sta_info_t *sta;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+ s8 eabuf[ETHER_ADDR_STR_LEN];
+#endif
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+
+ CHECK_SYS_UP(wl);
+ if (get_mode_by_netdev(wl, dev) == WL_MODE_AP) {
+ err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
+ ETHER_ADDR_LEN, ioctlbuf, sizeof(ioctlbuf));
+ if (err < 0) {
+ WL_ERR(("GET STA INFO failed, %d\n", err));
+ return err;
+ }
+ sinfo->filled = STATION_INFO_INACTIVE_TIME;
+ sta = (sta_info_t *)ioctlbuf;
+ sta->len = dtoh16(sta->len);
+ sta->cap = dtoh16(sta->cap);
+ sta->flags = dtoh32(sta->flags);
+ sta->idle = dtoh32(sta->idle);
+ sta->in = dtoh32(sta->in);
+ sinfo->inactive_time = sta->idle * 1000;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+ if (sta->flags & WL_STA_ASSOC) {
+ sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+ sinfo->connected_time = sta->in;
+ }
+ WL_INFO(("STA %s : idle time : %d sec, connected time :%d ms\n",
+ bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
+ sta->idle * 1000));
+#endif
+ } else if (get_mode_by_netdev(wl, dev) == WL_MODE_BSS) {
+ u8 *curmacp = wl_read_prof(wl, WL_PROF_BSSID);
+
+ if (!wl_get_drv_status(wl, CONNECTED) ||
+ (dhd_is_associated(dhd, NULL) == FALSE)) {
+ WL_ERR(("NOT assoc\n"));
+ err = -ENODEV;
+ goto get_station_err;
+ }
+ if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+ WL_ERR(("Wrong Mac address: "MACSTR" != "MACSTR"\n",
+ MAC2STR(mac), MAC2STR(curmacp)));
+ }
+
+ /* Report the current tx rate */
+ err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false);
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ } else {
+ rate = dtoh32(rate);
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.legacy = rate * 5;
+ WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+ }
+
+ memset(&scb_val, 0, sizeof(scb_val));
+ scb_val.val = 0;
+ err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val,
+ sizeof(scb_val_t), false);
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ goto get_station_err;
+ }
+
+ rssi = dtoh32(scb_val.val);
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = rssi;
+ WL_DBG(("RSSI %d dBm\n", rssi));
+
+get_station_err:
+ if (err) {
+ /* Disconnect due to zero BSSID or error to get RSSI */
+ WL_ERR(("force cfg80211_disconnected\n"));
+ wl_clr_drv_status(wl, CONNECTED);
+ cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL);
+ wl_link_down(wl);
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, s32 timeout)
+{
+ s32 pm;
+ s32 err = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ CHECK_SYS_UP(wl);
+ pm = enabled ? PM_FAST : PM_OFF;
+ /* Do not enable the power save after assoc if it is p2p interface */
+ if (wl->p2p && wl->p2p->vif_created) {
+ WL_DBG(("Do not enable the power save for p2p interfaces even after assoc\n"));
+ pm = PM_OFF;
+ }
+ pm = htod32(pm);
+ WL_DBG(("power save %s\n", (pm ? "enabled" : "disabled")));
+ err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true);
+ if (unlikely(err)) {
+ if (err == -ENODEV)
+ WL_DBG(("net_device is not ready yet\n"));
+ else
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+ u32 ret = 0;
+
+ if (bit16 & 0xff00) {
+ ret += 8;
+ bit16 >>= 8;
+ }
+
+ if (bit16 & 0xf0) {
+ ret += 4;
+ bit16 >>= 4;
+ }
+
+ if (bit16 & 0xc) {
+ ret += 2;
+ bit16 >>= 2;
+ }
+
+ if (bit16 & 2)
+ ret += bit16 & 2;
+ else if (bit16)
+ ret += bit16;
+
+ return ret;
+}
+
+static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+
+ if (unlikely(!wl_get_drv_status(wl, READY))) {
+ WL_INFO(("device is not ready : status (%d)\n",
+ (int)wl->status));
+ return 0;
+ }
+
+ wl_invoke_iscan(wl);
+
+ return err;
+}
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
+#endif
+{
+#ifdef DHD_CLEAR_ON_SUSPEND
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ unsigned long flags;
+
+ if (unlikely(!wl_get_drv_status(wl, READY))) {
+ WL_INFO(("device is not ready : status (%d)\n",
+ (int)wl->status));
+ return 0;
+ }
+
+ wl_set_drv_status(wl, SCAN_ABORTING);
+ wl_term_iscan(wl);
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ if (wl->scan_request) {
+ cfg80211_scan_done(wl->scan_request, true);
+ wl->scan_request = NULL;
+ }
+ wl_clr_drv_status(wl, SCANNING);
+ wl_clr_drv_status(wl, SCAN_ABORTING);
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+ if (wl_get_drv_status(wl, CONNECTING)) {
+ wl_bss_connect_done(wl, ndev, NULL, NULL, false);
+ }
+#endif
+ return 0;
+}
+
+static __used s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+ s32 err)
+{
+ int i, j;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct net_device *primary_dev = wl_to_prmry_ndev(wl);
+
+ /* Firmware is supporting pmk list only for STA interface i.e. primary interface
+ * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
+ * Do we really need to support PMK cache in P2P in firmware?
+ */
+ if (primary_dev != dev) {
+ WL_INFO(("Not supporting Flushing pmklist on virtual"
+ " interfaces than primary interface\n"));
+ return err;
+ }
+
+ WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+ for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+ WL_DBG(("PMKID[%d]: %pM =\n", i,
+ &pmk_list->pmkids.pmkid[i].BSSID));
+ for (j = 0; j < WPA2_PMKID_LEN; j++) {
+ WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+ }
+ }
+ if (likely(!err)) {
+ err = wl_dev_bufvar_set(dev, "pmkid_info", (char *)pmk_list,
+ sizeof(*pmk_list));
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ int i;
+
+ CHECK_SYS_UP(wl);
+ for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+ if (i < WL_NUM_PMKIDS_MAX) {
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+ ETHER_ADDR_LEN);
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+ WPA2_PMKID_LEN);
+ if (i == wl->pmk_list->pmkids.npmkid)
+ wl->pmk_list->pmkids.npmkid++;
+ } else {
+ err = -EINVAL;
+ }
+ WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].BSSID));
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n",
+ wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].
+ PMKID[i]));
+ }
+
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+
+ return err;
+}
+
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct _pmkid_list pmkid;
+ s32 err = 0;
+ int i;
+
+ CHECK_SYS_UP(wl);
+ memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+ memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
+
+ WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ &pmkid.pmkid[0].BSSID));
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+ }
+
+ for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp
+ (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+
+ if ((wl->pmk_list->pmkids.npmkid > 0) &&
+ (i < wl->pmk_list->pmkids.npmkid)) {
+ memset(&wl->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+ for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) {
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID,
+ &wl->pmk_list->pmkids.pmkid[i + 1].BSSID,
+ ETHER_ADDR_LEN);
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID,
+ &wl->pmk_list->pmkids.pmkid[i + 1].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ wl->pmk_list->pmkids.npmkid--;
+ } else {
+ err = -EINVAL;
+ }
+
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+
+ return err;
+
+}
+
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ CHECK_SYS_UP(wl);
+ memset(wl->pmk_list, 0, sizeof(*wl->pmk_list));
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+ return err;
+
+}
+
+wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size)
+{
+ wl_scan_params_t *params;
+ int params_size;
+ int num_chans;
+
+ *out_params_size = 0;
+
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+ params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ WL_ERR(("%s: mem alloc failed (%d bytes)\n", __func__, params_size));
+ return params;
+ }
+ memset(params, 0, params_size);
+ params->nprobes = nprobes;
+
+ num_chans = (channel == 0) ? 0 : 1;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = DOT11_SCANTYPE_ACTIVE;
+ params->nprobes = htod32(1);
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(10);
+ params->channel_list[0] = htodchanspec(channel);
+
+ /* Our scan params have 1 channel and 0 ssids */
+ params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+ *out_params_size = params_size; /* rtn size to the caller */
+ return params;
+}
+
+s32
+wl_cfg80211_scan_abort(struct wl_priv *wl, struct net_device *ndev)
+{
+ wl_scan_params_t *params = NULL;
+ s32 params_size = 0;
+ s32 err = BCME_OK;
+ unsigned long flags;
+
+ WL_DBG(("Enter\n"));
+
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params = wl_cfg80211_scan_alloc_params(-1, 0, &params_size);
+ if (params == NULL) {
+ WL_ERR(("scan params allocation failed \n"));
+ err = -ENOMEM;
+ } else {
+ /* Do a scan abort to stop the driver's scan engine */
+ err = wldev_ioctl(ndev, WLC_SCAN, params, params_size, true);
+ if (err < 0) {
+ WL_ERR(("scan abort failed \n"));
+ }
+ }
+ del_timer_sync(&wl->scan_timeout);
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ if (wl->scan_request) {
+ cfg80211_scan_done(wl->scan_request, true);
+ wl->scan_request = NULL;
+ }
+ wl_clr_drv_status(wl, SCANNING);
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+ if (params)
+ kfree(params);
+ return err;
+}
+
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel * channel,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie)
+{
+ s32 target_channel;
+
+ s32 err = BCME_OK;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ WL_DBG(("Enter, netdev_ifidx: %d \n", dev->ifindex));
+ if (likely(wl_get_drv_status(wl, SCANNING))) {
+ wl_cfg80211_scan_abort(wl, dev);
+ }
+
+ target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+ memcpy(&wl->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+ wl->remain_on_chan_type = channel_type;
+ wl->cache_cookie = *cookie;
+ cfg80211_ready_on_channel(dev, *cookie, channel,
+ channel_type, duration, GFP_KERNEL);
+ if (!p2p_on(wl)) {
+ wl_cfgp2p_generate_bss_mac(&dhd->mac, &wl->p2p->dev_addr, &wl->p2p->int_addr);
+
+ /* In case of p2p_listen command, supplicant send remain_on_channel
+ * without turning on P2P
+ */
+
+ p2p_on(wl) = true;
+ err = wl_cfgp2p_enable_discovery(wl, dev, NULL, 0);
+
+ if (unlikely(err)) {
+ goto exit;
+ }
+ }
+ if (p2p_on(wl))
+ wl_cfgp2p_discover_listen(wl, target_channel, duration);
+
+
+exit:
+ return err;
+}
+
+static s32
+wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, struct net_device *dev,
+ u64 cookie)
+{
+ s32 err = 0;
+ WL_DBG((" enter ) netdev_ifidx: %d \n", dev->ifindex));
+ return err;
+}
+
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *channel, bool offchan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid, unsigned int wait,
+ const u8* buf, size_t len, u64 *cookie)
+{
+ wl_action_frame_t *action_frame;
+ wl_af_params_t *af_params;
+ wifi_p2p_ie_t *p2p_ie;
+ wpa_ie_fixed_t *wps_ie;
+ const struct ieee80211_mgmt *mgmt;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ s32 err = BCME_OK;
+ s32 bssidx = 0;
+ u32 p2pie_len = 0;
+ u32 wpsie_len = 0;
+ u16 fc;
+ bool ack = false;
+ wifi_p2p_pub_act_frame_t *act_frm;
+ WL_DBG(("Enter \n"));
+ /* find bssidx based on ndev */
+ bssidx = wl_cfgp2p_find_idx(wl, dev);
+ /* cookie generation */
+ *cookie = (unsigned long) buf;
+
+ if (bssidx == -1) {
+
+ WL_ERR(("Can not find the bssidx for dev( %p )\n", dev));
+ return -ENODEV;
+ }
+ if (wl->p2p_supported && p2p_on(wl)) {
+ wl_cfgp2p_generate_bss_mac(&dhd->mac, &wl->p2p->dev_addr, &wl->p2p->int_addr);
+ /* Suspend P2P discovery search-listen to prevent it from changing the
+ * channel.
+ */
+ if ((err = wl_cfgp2p_discover_enable_search(wl, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ return -EFAULT;
+ }
+ }
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+ fc = mgmt->frame_control;
+ if (fc != IEEE80211_STYPE_ACTION) {
+ if (fc == IEEE80211_STYPE_PROBE_RESP) {
+ s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ s32 ie_len = len - ie_offset;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)(buf + ie_offset), ie_len))
+ != NULL) {
+ /* Total length of P2P Information Element */
+ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id);
+ /* Have to change p2p device address in dev_info attribute
+ * because Supplicant use primary eth0 address
+ */
+ #ifdef ENABLE_DRIVER_CHANGE_IFADDR /* We are now doing this in supplicant */
+ wl_cfg80211_change_ifaddr((u8 *)p2p_ie,
+ &wl->p2p_dev_addr, P2P_SEID_DEV_INFO);
+ #endif
+ }
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)(buf + ie_offset), ie_len))
+ != NULL) {
+ /* Order of Vendor IE is 1) WPS IE +
+ * 2) P2P IE created by supplicant
+ * So, it is ok to find start address of WPS IE
+ * to save IEs to firmware
+ */
+ wpsie_len = wps_ie->length + sizeof(wps_ie->length) +
+ sizeof(wps_ie->tag);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ VNDR_IE_PRBRSP_FLAG,
+ (u8 *)wps_ie, wpsie_len + p2pie_len);
+ }
+ }
+ cfg80211_mgmt_tx_status(dev, *cookie, buf, len, true, GFP_KERNEL);
+ goto exit;
+ } else {
+ /* Abort the dwell time of any previous off-channel action frame that may
+ * be still in effect. Sending off-channel action frames relies on the
+ * driver's scan engine. If a previous off-channel action frame tx is
+ * still in progress (including the dwell time), then this new action
+ * frame will not be sent out.
+ */
+ wl_cfg80211_scan_abort(wl, dev);
+ }
+ af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+
+ if (af_params == NULL)
+ {
+ WL_ERR(("unable to allocate frame\n"));
+ return -ENOMEM;
+ }
+
+ action_frame = &af_params->action_frame;
+
+ /* Add the packet Id */
+ action_frame->packetId = (u32) action_frame;
+ WL_DBG(("action frame %d\n", action_frame->packetId));
+ /* Add BSSID */
+ memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+ memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+ /* Add the length exepted for 802.11 header */
+ action_frame->len = len - DOT11_MGMT_HDR_LEN;
+ WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+ /* Add the channel */
+ af_params->channel =
+ ieee80211_frequency_to_channel(channel->center_freq);
+
+ /* Add the dwell time
+ * Dwell time to stay off-channel to wait for a response action frame
+ * after transmitting an GO Negotiation action frame
+ */
+ af_params->dwell_time = WL_DWELL_TIME;
+
+ memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+
+ act_frm = (wifi_p2p_pub_act_frame_t *) (action_frame->data);
+ WL_DBG(("action_frame->len: %d chan %d category %d subtype %d\n",
+ action_frame->len, af_params->channel,
+ act_frm->category, act_frm->subtype));
+ if (wl->p2p->vif_created) {
+ /*
+ * To make sure to send successfully action frame, we have to turn off mpc
+ */
+ if ((act_frm->subtype == P2P_PAF_GON_REQ)||
+ (act_frm->subtype == P2P_PAF_GON_RSP)) {
+ wldev_iovar_setint(dev, "mpc", 0);
+ } else if (act_frm->subtype == P2P_PAF_GON_CONF) {
+ wldev_iovar_setint(dev, "mpc", 1);
+ } else if (act_frm->subtype == P2P_PAF_DEVDIS_REQ) {
+ af_params->dwell_time = WL_LONG_DWELL_TIME;
+ }
+ }
+
+ ack = (wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx)) ? false : true;
+ cfg80211_mgmt_tx_status(dev, *cookie, buf, len, ack, GFP_KERNEL);
+
+ kfree(af_params);
+exit:
+ return err;
+}
+
+
+static void
+wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev,
+ u16 frame_type, bool reg)
+{
+
+ WL_DBG(("%s: frame_type: %x, reg: %d\n", __func__, frame_type, reg));
+
+ if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+ return;
+
+ return;
+}
+
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct bss_parameters *params)
+{
+ if (params->use_cts_prot >= 0) {
+ }
+
+ if (params->use_short_preamble >= 0) {
+ }
+
+ if (params->use_short_slot_time >= 0) {
+ }
+
+ if (params->basic_rates) {
+ }
+
+ if (params->ap_isolate >= 0) {
+ }
+
+ if (params->ht_opmode >= 0) {
+ }
+
+ return 0;
+}
+
+static s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ s32 channel;
+ s32 err = BCME_OK;
+
+ channel = ieee80211_frequency_to_channel(chan->center_freq);
+ WL_DBG(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+ dev->ifindex, channel_type, channel));
+ err = wldev_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel), true);
+ if (err < 0) {
+ WL_ERR(("WLC_SET_CHANNEL error %d chip may not be supporting this channel\n", err));
+ }
+ return err;
+}
+
+static s32
+wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+ s32 len = 0;
+ s32 err = BCME_OK;
+ u16 auth = 0; /* d11 open authentication */
+ u16 count;
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u8* tmp;
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
+ if (wpa2ie == NULL)
+ goto exit;
+
+ WL_DBG(("Enter \n"));
+ len = wpa2ie->len;
+ /* check the mcast cipher */
+ mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ tmp = mcast->oui;
+ switch (tmp[DOT11_OUI_LEN]) {
+ case WPA_CIPHER_NONE:
+ gval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ gval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ gval = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ break;
+ }
+ len -= WPA_SUITE_LEN;
+ /* check the unicast cipher */
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
+ count = ltoh16_ua(&ucast->count);
+ tmp = ucast->list[0].oui;
+ switch (tmp[DOT11_OUI_LEN]) {
+ case WPA_CIPHER_NONE:
+ pval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ pval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ pval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ pval = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* check the AKM */
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[1];
+ count = ltoh16_ua(&mgmt->count);
+ tmp = (u8 *)&mgmt->list[0];
+ switch (tmp[DOT11_OUI_LEN]) {
+ case RSN_AKM_NONE:
+ wpa_auth = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ wpa_auth = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ wpa_auth = WPA2_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+exit:
+ return 0;
+}
+
+static s32
+wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx)
+{
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
+ u16 auth = 0; /* d11 open authentication */
+ u16 count;
+ s32 err = BCME_OK;
+ s32 len = 0;
+ u32 i;
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u32 tmp = 0;
+
+ if (wpaie == NULL)
+ goto exit;
+ WL_DBG(("Enter \n"));
+ len = wpaie->length; /* value length */
+ len -= WPA_IE_TAG_FIXED_LEN;
+ /* check for multicast cipher suite */
+ if (len < WPA_SUITE_LEN) {
+ WL_INFO(("no multicast cipher suite\n"));
+ goto exit;
+ }
+
+ /* pick up multicast cipher */
+ mcast = (wpa_suite_mcast_t *)&wpaie[1];
+ len -= WPA_SUITE_LEN;
+ if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(mcast->type)) {
+ tmp = 0;
+ switch (mcast->type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ gval |= tmp;
+ }
+ }
+ /* Check for unicast suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFO(("no unicast suite\n"));
+ goto exit;
+ }
+ /* walk thru unicast cipher list and pick up what we recognize */
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
+ count = ltoh16_ua(&ucast->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(ucast->list[i].type)) {
+ tmp = 0;
+ switch (ucast->list[i].type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ pval |= tmp;
+ }
+ }
+ }
+ len -= (count - i) * WPA_SUITE_LEN;
+ /* Check for auth key management suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFO((" no auth key mgmt suite\n"));
+ goto exit;
+ }
+ /* walk thru auth management suite list and pick up what we recognize */
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+ count = ltoh16_ua(&mgmt->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_AKM(mgmt->list[i].type)) {
+ tmp = 0;
+ switch (mgmt->list[i].type) {
+ case RSN_AKM_NONE:
+ tmp = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ tmp = WPA_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ tmp = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ wpa_auth |= tmp;
+ }
+ }
+
+ }
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+exit:
+ return 0;
+}
+
+static s32
+wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ s32 err = BCME_OK;
+ bcm_tlv_t *ssid_ie;
+ wlc_ssid_t ssid;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_join_params join_params;
+ wpa_ie_fixed_t *wps_ie;
+ wpa_ie_fixed_t *wpa_ie;
+ bcm_tlv_t *wpa2_ie;
+ wifi_p2p_ie_t *p2p_ie;
+ bool is_bssup = false;
+ bool update_bss = false;
+ bool pbc = false;
+ u16 wpsie_len = 0;
+ u16 p2pie_len = 0;
+ u8 beacon_ie[IE_MAX_LEN];
+ s32 ie_offset = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+ s32 infra = 1;
+ s32 join_params_size = 0;
+ s32 ap = 0;
+ WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+ info->interval, info->dtim_period, info->head_len, info->tail_len));
+ if (wl->p2p_supported && p2p_on(wl) &&
+ (bssidx == wl_to_p2p_bss_bssidx(wl,
+ P2PAPI_BSSCFG_CONNECTION))) {
+ memset(beacon_ie, 0, sizeof(beacon_ie));
+ /* We don't need to set beacon for P2P_GO,
+ * but need to parse ssid from beacon_parameters
+ * because there is no way to set ssid
+ */
+ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ /* find the SSID */
+ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+ info->head_len - ie_offset,
+ DOT11_MNG_SSID_ID)) != NULL) {
+ memcpy(wl->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len);
+ wl->p2p->ssid.SSID_len = ssid_ie->len;
+ WL_DBG(("SSID (%s) in Head \n", ssid_ie->data));
+
+ } else {
+ WL_ERR(("No SSID in beacon \n"));
+ }
+
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ } else {
+ WL_ERR(("No WPSIE in beacon \n"));
+ }
+
+
+ /* find the P2PIE */
+ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)info->tail, info->tail_len)) != NULL) {
+ /* Total length of P2P Information Element */
+ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id);
+ #ifdef ENABLE_DRIVER_CHANGE_IFADDR /* We are now doing this in supplicant */
+ /* Have to change device address in dev_id attribute because Supplicant
+ * use primary eth0 address
+ */
+ wl_cfg80211_change_ifaddr((u8 *)p2p_ie, &wl->p2p_dev_addr, P2P_SEID_DEV_ID);
+ #endif
+ memcpy(&beacon_ie[wpsie_len], p2p_ie, p2pie_len);
+
+ } else {
+ WL_ERR(("No P2PIE in beacon \n"));
+ }
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_dongle_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len + p2pie_len);
+
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ is_bssup = wl_cfgp2p_bss_isup(dev, bssidx);
+
+ if (!is_bssup && (wpa2_ie != NULL)) {
+ wldev_iovar_setint(dev, "mpc", 0);
+ if ((err = wl_validate_wpa2ie(dev, wpa2_ie, bssidx)) < 0) {
+ WL_ERR(("WPA2 IE parsing error"));
+ goto exit;
+ }
+ err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+ if (err < 0) {
+ WL_ERR(("SET INFRA error %d\n", err));
+ goto exit;
+ }
+ err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &wl->p2p->ssid,
+ sizeof(wl->p2p->ssid), ioctlbuf, sizeof(ioctlbuf), bssidx);
+ if (err < 0) {
+ WL_ERR(("GO SSID setting error %d\n", err));
+ goto exit;
+ }
+ if ((err = wl_cfgp2p_bss(dev, bssidx, 1)) < 0) {
+ WL_ERR(("GO Bring up error %d\n", err));
+ goto exit;
+ }
+ }
+ } else if (wl_get_drv_status(wl, AP_CREATING)) {
+ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ ap = 1;
+ /* find the SSID */
+ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+ info->head_len - ie_offset,
+ DOT11_MNG_SSID_ID)) != NULL) {
+ memset(&ssid, 0, sizeof(wlc_ssid_t));
+ memcpy(ssid.SSID, ssid_ie->data, ssid_ie->len);
+ WL_DBG(("SSID is (%s) in Head \n", ssid.SSID));
+ ssid.SSID_len = ssid_ie->len;
+ wldev_iovar_setint(dev, "mpc", 0);
+ wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
+ wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+ if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+ WL_ERR(("setting AP mode failed %d \n", err));
+ return err;
+ }
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if ((wpa_ie != NULL || wpa2_ie != NULL)) {
+ if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) {
+ wl->ap_info->security_mode = false;
+ return BCME_ERROR;
+ }
+ wl->ap_info->security_mode = true;
+ if (wl->ap_info->rsn_ie) {
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = NULL;
+ }
+ if (wl->ap_info->wpa_ie) {
+ kfree(wl->ap_info->wpa_ie);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ if (wl->ap_info->wps_ie) {
+ kfree(wl->ap_info->wps_ie);
+ wl->ap_info->wps_ie = NULL;
+ }
+ if (wpa_ie != NULL) {
+ /* WPAIE */
+ wl->ap_info->rsn_ie = NULL;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else {
+ /* RSNIE */
+ wl->ap_info->wpa_ie = NULL;
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ } else
+ wl->ap_info->security_mode = false;
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len);
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_dongle_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ } else {
+ WL_DBG(("No WPSIE in beacon \n"));
+ }
+ if (info->interval) {
+ if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+ &info->interval, sizeof(s32), true)) < 0) {
+ WL_ERR(("Beacon Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+ if (info->dtim_period) {
+ if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+ &info->dtim_period, sizeof(s32), true)) < 0) {
+ WL_ERR(("DTIM Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+ err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ return err;
+ }
+ memset(&join_params, 0, sizeof(join_params));
+ /* join parameters starts with ssid */
+ join_params_size = sizeof(join_params.ssid);
+ memcpy(join_params.ssid.SSID, ssid.SSID, ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(ssid.SSID_len);
+ /* create softap */
+ if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+ join_params_size, true)) == 0) {
+ wl_clr_drv_status(wl, AP_CREATING);
+ wl_set_drv_status(wl, AP_CREATED);
+ }
+ }
+ } else if (wl_get_drv_status(wl, AP_CREATED)) {
+ ap = 1;
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len);
+ if (wl->ap_info->wps_ie &&
+ memcmp(wl->ap_info->wps_ie, wps_ie, wpsie_len)) {
+ WL_DBG((" WPS IE is changed\n"));
+ kfree(wl->ap_info->wps_ie);
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_dongle_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ } else if (wl->ap_info->wps_ie == NULL) {
+ WL_DBG((" WPS IE is added\n"));
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_dongle_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ }
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if ((wpa_ie != NULL || wpa2_ie != NULL)) {
+ if (!wl->ap_info->security_mode) {
+ /* change from open mode to security mode */
+ update_bss = true;
+ if (wpa_ie != NULL) {
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else {
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ } else if (wl->ap_info->wpa_ie) {
+ /* change from WPA mode to WPA2 mode */
+ if (wpa2_ie != NULL) {
+ update_bss = true;
+ kfree(wl->ap_info->wpa_ie);
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ else if (memcmp(wl->ap_info->wpa_ie,
+ wpa_ie, wpa_ie->length +
+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+ kfree(wl->ap_info->wpa_ie);
+ update_bss = true;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->rsn_ie = NULL;
+ }
+ } else {
+ /* change from WPA2 mode to WPA mode */
+ if (wpa_ie != NULL) {
+ update_bss = true;
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = NULL;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else if (memcmp(wl->ap_info->rsn_ie,
+ wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) {
+ update_bss = true;
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ }
+ if (update_bss) {
+ wl->ap_info->security_mode = true;
+ wl_cfgp2p_bss(dev, bssidx, 0);
+ if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) {
+ return BCME_ERROR;
+ }
+ wl_cfgp2p_bss(dev, bssidx, 1);
+ }
+ }
+ } else {
+ WL_ERR(("No WPSIE in beacon \n"));
+ }
+ }
+exit:
+ if (err)
+ wldev_iovar_setint(dev, "mpc", 1);
+ return err;
+}
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+ .add_virtual_intf = wl_cfg80211_add_virtual_iface,
+ .del_virtual_intf = wl_cfg80211_del_virtual_iface,
+ .change_virtual_intf = wl_cfg80211_change_virtual_iface,
+ .scan = wl_cfg80211_scan,
+ .set_wiphy_params = wl_cfg80211_set_wiphy_params,
+ .join_ibss = wl_cfg80211_join_ibss,
+ .leave_ibss = wl_cfg80211_leave_ibss,
+ .get_station = wl_cfg80211_get_station,
+ .set_tx_power = wl_cfg80211_set_tx_power,
+ .get_tx_power = wl_cfg80211_get_tx_power,
+ .add_key = wl_cfg80211_add_key,
+ .del_key = wl_cfg80211_del_key,
+ .get_key = wl_cfg80211_get_key,
+ .set_default_key = wl_cfg80211_config_default_key,
+ .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+ .set_power_mgmt = wl_cfg80211_set_power_mgmt,
+ .connect = wl_cfg80211_connect,
+ .disconnect = wl_cfg80211_disconnect,
+ .suspend = wl_cfg80211_suspend,
+ .resume = wl_cfg80211_resume,
+ .set_pmksa = wl_cfg80211_set_pmksa,
+ .del_pmksa = wl_cfg80211_del_pmksa,
+ .flush_pmksa = wl_cfg80211_flush_pmksa,
+ .remain_on_channel = wl_cfg80211_remain_on_channel,
+ .cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel,
+ .mgmt_tx = wl_cfg80211_mgmt_tx,
+ .mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
+ .change_bss = wl_cfg80211_change_bss,
+ .set_channel = wl_cfg80211_set_channel,
+ .set_beacon = wl_cfg80211_add_set_beacon,
+ .add_beacon = wl_cfg80211_add_set_beacon,
+};
+
+static s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+ s32 err = 0;
+
+ switch (mode) {
+ case WL_MODE_BSS:
+ return NL80211_IFTYPE_STATION;
+ case WL_MODE_IBSS:
+ return NL80211_IFTYPE_ADHOC;
+ case WL_MODE_AP:
+ return NL80211_IFTYPE_AP;
+ default:
+ return NL80211_IFTYPE_UNSPECIFIED;
+ }
+
+ return err;
+}
+
+static struct wireless_dev *wl_alloc_wdev(struct device *sdiofunc_dev)
+{
+ struct wireless_dev *wdev;
+ s32 err = 0;
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return ERR_PTR(-ENOMEM);
+ }
+ wdev->wiphy =
+ wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv));
+ if (unlikely(!wdev->wiphy)) {
+ WL_ERR(("Couldn not allocate wiphy device\n"));
+ err = -ENOMEM;
+ goto wiphy_new_out;
+ }
+ set_wiphy_dev(wdev->wiphy, sdiofunc_dev);
+ wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+ /* Report how many SSIDs Driver can support per Scan request */
+ wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX;
+ wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+ wdev->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC)
+ | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR);
+
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wdev->wiphy->cipher_suites = __wl_cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+ wdev->wiphy->max_remain_on_channel_duration = 5000;
+ wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+ wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+ wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif /* !WL_POWERSAVE_DISABLED */
+ wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+ WIPHY_FLAG_4ADDR_AP |
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)
+ WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+ WIPHY_FLAG_4ADDR_STATION;
+
+ WL_DBG(("Registering custom regulatory)\n"));
+ wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
+ /* Now we can register wiphy with cfg80211 module */
+ err = wiphy_register(wdev->wiphy);
+ if (unlikely(err < 0)) {
+ WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+ goto wiphy_register_out;
+ }
+ return wdev;
+
+wiphy_register_out:
+ wiphy_free(wdev->wiphy);
+
+wiphy_new_out:
+ kfree(wdev);
+
+ return ERR_PTR(err);
+}
+
+static void wl_free_wdev(struct wl_priv *wl)
+{
+ int i;
+ struct wireless_dev *wdev = wl->wdev;
+
+ if (unlikely(!wdev)) {
+ WL_ERR(("wdev is invalid\n"));
+ return;
+ }
+
+ for (i = 0; i < VWDEV_CNT; i++) {
+ if ((wl->vwdev[i] != NULL)) {
+ kfree(wl->vwdev[i]);
+ wl->vwdev[i] = NULL;
+ }
+ }
+ wiphy_unregister(wdev->wiphy);
+ wdev->wiphy->dev.parent = NULL;
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
+
+static s32 wl_inform_bss(struct wl_priv *wl)
+{
+ struct wl_scan_results *bss_list;
+ struct wl_bss_info *bi = NULL; /* must be initialized */
+ s32 err = 0;
+ s32 i;
+
+ bss_list = wl->bss_list;
+ WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ err = wl_inform_single_bss(wl, bi);
+ if (unlikely(err))
+ break;
+ }
+ return err;
+}
+
+static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
+{
+ struct wiphy *wiphy = wiphy_from_scan(wl);
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *band;
+ struct wl_cfg80211_bss_info *notif_bss_info;
+ struct wl_scan_req *sr = wl_to_sr(wl);
+ struct beacon_proberesp *beacon_proberesp;
+ s32 mgmt_type;
+ s32 signal;
+ u32 freq;
+ s32 err = 0;
+
+ if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+ WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+ return err;
+ }
+ notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+ - sizeof(u8) + WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (unlikely(!notif_bss_info)) {
+ WL_ERR(("notif_bss_info alloc failed\n"));
+ return -ENOMEM;
+ }
+ mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+ notif_bss_info->channel =
+ bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec);
+
+ if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ notif_bss_info->rssi = dtoh16(bi->RSSI);
+ memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+ mgmt_type = wl->active_scan ?
+ IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+ if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+ }
+ beacon_proberesp = wl->active_scan ?
+ (struct beacon_proberesp *)&mgmt->u.probe_resp :
+ (struct beacon_proberesp *)&mgmt->u.beacon;
+ beacon_proberesp->timestamp = 0;
+ beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+ beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+ wl_rst_ie(wl);
+
+ wl_mrg_ie(wl, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+ wl_cp_ie(wl, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+ offsetof(struct wl_cfg80211_bss_info, frame_buf));
+ notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable) + wl_get_ielen(wl);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+#else
+ freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
+#endif
+ channel = ieee80211_get_channel(wiphy, freq);
+
+ WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM"
+ "mgmt_type %d frame_len %d\n", bi->SSID,
+ notif_bss_info->rssi, notif_bss_info->channel,
+ mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type,
+ notif_bss_info->frame_len));
+
+ signal = notif_bss_info->rssi * 100;
+
+ if (unlikely(!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+ le16_to_cpu(notif_bss_info->frame_len),
+ signal, GFP_KERNEL))) {
+ WL_ERR(("cfg80211_inform_bss_frame error\n"));
+ kfree(notif_bss_info);
+ return -EINVAL;
+ }
+ kfree(notif_bss_info);
+
+ return err;
+}
+
+static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev)
+{
+ u32 event = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+ u16 flags = ntoh16(e->flags);
+
+ WL_DBG(("event %d, status %d\n", event, status));
+ if (event == WLC_E_SET_SSID) {
+ if (status == WLC_E_STATUS_SUCCESS) {
+ if (!wl_is_ibssmode(wl, ndev))
+ return true;
+ }
+ } else if (event == WLC_E_LINK) {
+ if (flags & WLC_EVENT_MSG_LINK)
+ return true;
+ }
+
+ WL_DBG(("wl_is_linkup false\n"));
+ return false;
+}
+
+static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e)
+{
+ u32 event = ntoh32(e->event_type);
+ u16 flags = ntoh16(e->flags);
+
+ if (event == WLC_E_DEAUTH_IND ||
+ event == WLC_E_DISASSOC_IND ||
+ event == WLC_E_DISASSOC ||
+ event == WLC_E_DEAUTH) {
+ return true;
+ } else if (event == WLC_E_LINK) {
+ if (!(flags & WLC_EVENT_MSG_LINK))
+ return true;
+ }
+
+ return false;
+}
+
+static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e)
+{
+ u32 event = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+
+ if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS)
+ return true;
+ if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
+ return true;
+
+ return false;
+}
+
+static s32
+wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ bool act;
+ bool isfree = false;
+ s32 err = 0;
+ s32 freq;
+ s32 channel;
+ u8 body[200];
+ u32 event = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
+ u32 len = ntoh32(e->datalen);
+ u16 fc = 0;
+ u8 *mgmt_frame;
+ u8 bsscfgidx = e->bsscfgidx;
+ struct ieee80211_supported_band *band;
+ struct ether_addr da;
+ struct ether_addr bssid;
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ channel_info_t ci;
+
+ memset(body, 0, sizeof(body));
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+ WL_DBG(("Enter \n"));
+
+ if (get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
+ memcpy(body, data, len);
+ wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+ NULL, 0, ioctlbuf, sizeof(ioctlbuf), bsscfgidx);
+ memcpy(da.octet, ioctlbuf, ETHER_ADDR_LEN);
+ err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+ switch (event) {
+ case WLC_E_ASSOC_IND:
+ fc = FC_ASSOC_REQ;
+ break;
+ case WLC_E_REASSOC_IND:
+ fc = FC_REASSOC_REQ;
+ break;
+ case WLC_E_DISASSOC_IND:
+ fc = FC_DISASSOC;
+ break;
+ case WLC_E_DEAUTH_IND:
+ fc = FC_DISASSOC;
+ break;
+ case WLC_E_DEAUTH:
+ fc = FC_DISASSOC;
+ break;
+ default:
+ fc = 0;
+ goto exit;
+ }
+ if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false)))
+ return err;
+
+ channel = dtoh32(ci.hw_channel);
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(channel);
+#else
+ freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+
+ err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
+ &mgmt_frame, &len, body);
+ if (err < 0)
+ goto exit;
+ isfree = true;
+
+ if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
+ cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+ } else if (event == WLC_E_DISASSOC_IND) {
+ cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+ } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+ cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+ }
+
+ } else {
+ WL_DBG(("wl_notify_connect_status : event %d status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
+ if (wl_is_linkup(wl, e, ndev)) {
+ wl_link_up(wl);
+ act = true;
+ wl_update_prof(wl, e, &act, WL_PROF_ACT);
+ wl_update_prof(wl, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ if (wl_is_ibssmode(wl, ndev)) {
+ printk("cfg80211_ibss_joined\n");
+ cfg80211_ibss_joined(ndev, (s8 *)&e->addr,
+ GFP_KERNEL);
+ WL_DBG(("joined in IBSS network\n"));
+ } else {
+ if (!wl_get_drv_status(wl, DISCONNECTING)) {
+ printk("wl_bss_connect_done succeeded status=(0x%x)\n",
+ (int)wl->status);
+ wl_bss_connect_done(wl, ndev, e, data, true);
+ WL_DBG(("joined in BSS network \"%s\"\n",
+ ((struct wlc_ssid *)
+ wl_read_prof(wl, WL_PROF_SSID))->SSID));
+ }
+ }
+
+ } else if (wl_is_linkdown(wl, e)) {
+ if (wl->scan_request) {
+ del_timer_sync(&wl->scan_timeout);
+ if (wl->escan_on) {
+ wl_notify_escan_complete(wl, true);
+ } else
+ wl_iscan_aborted(wl);
+ }
+ if (wl_get_drv_status(wl, CONNECTED)) {
+ scb_val_t scbval;
+ u8 *curbssid = wl_read_prof(wl, WL_PROF_BSSID);
+ printk("link down, call cfg80211_disconnected\n");
+ wl_clr_drv_status(wl, CONNECTED);
+ /* To make sure disconnect, explictly send dissassoc
+ * for BSSID 00:00:00:00:00:00 issue
+ */
+ scbval.val = WLAN_REASON_DEAUTH_LEAVING;
+
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ wldev_ioctl(ndev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t), true);
+ cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
+ wl_link_down(wl);
+ wl_init_prof(wl);
+ } else if (wl_get_drv_status(wl, CONNECTING)) {
+ printk("link down, during connecting\n");
+ wl_bss_connect_done(wl, ndev, e, data, false);
+ }
+ wl_clr_drv_status(wl, DISCONNECTING);
+
+ } else if (wl_is_nonetwork(wl, e)) {
+ printk("connect failed event=%d e->status 0x%x\n",
+ event, (int)ntoh32(e->status));
+ /* Clean up any pending scan request */
+ if (wl->scan_request) {
+ del_timer_sync(&wl->scan_timeout);
+ if (wl->escan_on) {
+ wl_notify_escan_complete(wl, true);
+ } else
+ wl_iscan_aborted(wl);
+ }
+ if (wl_get_drv_status(wl, CONNECTING))
+ wl_bss_connect_done(wl, ndev, e, data, false);
+ } else {
+ printk("%s nothing\n", __FUNCTION__);
+ }
+ }
+exit:
+ if (isfree)
+ kfree(mgmt_frame);
+ return err;
+}
+
+static s32
+wl_notify_roaming_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ bool act;
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ u32 status = be32_to_cpu(e->status);
+ WL_DBG(("Enter \n"));
+ if (event == WLC_E_ROAM && status == WLC_E_STATUS_SUCCESS) {
+ if (wl_get_drv_status(wl, CONNECTED))
+ wl_bss_roaming_done(wl, ndev, e, data);
+ else
+ wl_bss_connect_done(wl, ndev, e, data, true);
+ act = true;
+ wl_update_prof(wl, e, &act, WL_PROF_ACT);
+ wl_update_prof(wl, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ }
+ return err;
+}
+
+static __used s32
+wl_dev_bufvar_set(struct net_device *dev, s8 *name, s8 *buf, s32 len)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ u32 buflen;
+
+ buflen = bcm_mkiovar(name, buf, len, wl->ioctl_buf, WL_IOCTL_LEN_MAX);
+ BUG_ON(unlikely(!buflen));
+
+ return wldev_ioctl(dev, WLC_SET_VAR, wl->ioctl_buf, buflen, true);
+}
+
+static s32
+wl_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
+ s32 buf_len)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ u32 len;
+ s32 err = 0;
+
+ len = bcm_mkiovar(name, NULL, 0, wl->ioctl_buf, WL_IOCTL_LEN_MAX);
+ BUG_ON(unlikely(!len));
+ err = wldev_ioctl(dev, WLC_GET_VAR, (void *)wl->ioctl_buf,
+ WL_IOCTL_LEN_MAX, false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ memcpy(buf, wl->ioctl_buf, buf_len);
+
+ return err;
+}
+
+static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev)
+{
+ wl_assoc_info_t assoc_info;
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ err = wl_dev_bufvar_get(ndev, "assoc_info", wl->extra_buf,
+ WL_ASSOC_INFO_MAX);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc info (%d)\n", err));
+ return err;
+ }
+ memcpy(&assoc_info, wl->extra_buf, sizeof(wl_assoc_info_t));
+ assoc_info.req_len = htod32(assoc_info.req_len);
+ assoc_info.resp_len = htod32(assoc_info.resp_len);
+ assoc_info.flags = htod32(assoc_info.flags);
+ if (conn_info->req_ie_len) {
+ conn_info->req_ie_len = 0;
+ bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
+ }
+ if (conn_info->resp_ie_len) {
+ conn_info->resp_ie_len = 0;
+ bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+ }
+ if (assoc_info.req_len) {
+ err = wl_dev_bufvar_get(ndev, "assoc_req_ies", wl->extra_buf,
+ WL_ASSOC_INFO_MAX);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc req (%d)\n", err));
+ return err;
+ }
+ conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+ if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+ conn_info->req_ie_len -= ETHER_ADDR_LEN;
+ }
+ if (conn_info->req_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->req_ie, wl->extra_buf, conn_info->req_ie_len);
+ else {
+ WL_ERR(("%s IE size %d above max %d size \n",
+ __FUNCTION__, conn_info->req_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->req_ie_len = 0;
+ }
+ if (assoc_info.resp_len) {
+ err = wl_dev_bufvar_get(ndev, "assoc_resp_ies", wl->extra_buf,
+ WL_ASSOC_INFO_MAX);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc resp (%d)\n", err));
+ return err;
+ }
+ conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
+ if (conn_info->resp_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->resp_ie, wl->extra_buf, conn_info->resp_ie_len);
+ else {
+ WL_ERR(("%s IE size %d above max %d size \n",
+ __FUNCTION__, conn_info->resp_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->resp_ie_len = 0;
+ }
+ WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+ conn_info->resp_ie_len));
+
+ return err;
+}
+
+static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+ size_t *join_params_size)
+{
+ chanspec_t chanspec = 0;
+
+ if (ch != 0) {
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
+
+ if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
+
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ htodchanspec(join_params->params.chanspec_list[0]);
+
+ join_params->params.chanspec_num =
+ htod32(join_params->params.chanspec_num);
+
+ WL_DBG(("%s join_params->params.chanspec_list[0]= %X\n",
+ __FUNCTION__, join_params->params.chanspec_list[0]));
+
+ }
+}
+
+static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct cfg80211_bss *bss;
+ struct wl_bss_info *bi;
+ struct wlc_ssid *ssid;
+ struct bcm_tlv *tim;
+ u16 beacon_interval;
+ u8 dtim_period;
+ size_t ie_len;
+ u8 *ie;
+ u8 *curbssid;
+ s32 err = 0;
+ struct wiphy *wiphy;
+ wiphy = wl_to_wiphy(wl);
+
+ if (wl_is_ibssmode(wl, ndev))
+ return err;
+
+ ssid = (struct wlc_ssid *)wl_read_prof(wl, WL_PROF_SSID);
+ curbssid = wl_read_prof(wl, WL_PROF_BSSID);
+ bss = cfg80211_get_bss(wiphy, NULL, curbssid,
+ ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+
+ mutex_lock(&wl->usr_sync);
+ if (unlikely(!bss)) {
+ WL_DBG(("Could not find the AP\n"));
+ *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+ err = wldev_ioctl(ndev, WLC_GET_BSS_INFO,
+ wl->extra_buf, WL_EXTRA_BUF_MAX, false);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get bss info %d\n", err));
+ goto update_bss_info_out;
+ }
+ bi = (struct wl_bss_info *)(wl->extra_buf + 4);
+ if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+ err = -EIO;
+ goto update_bss_info_out;
+ }
+ err = wl_inform_single_bss(wl, bi);
+ if (unlikely(err))
+ goto update_bss_info_out;
+
+ ie = ((u8 *)bi) + bi->ie_offset;
+ ie_len = bi->ie_length;
+ beacon_interval = cpu_to_le16(bi->beacon_period);
+ } else {
+ WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+ beacon_interval = bss->beacon_interval;
+ cfg80211_put_bss(bss);
+ }
+
+ tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+ if (tim) {
+ dtim_period = tim->data[1];
+ } else {
+ /*
+ * active scan was done so we could not get dtim
+ * information out of probe response.
+ * so we speficially query dtim information to dongle.
+ */
+ err = wldev_ioctl(ndev, WLC_GET_DTIMPRD,
+ &dtim_period, sizeof(dtim_period), false);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+ goto update_bss_info_out;
+ }
+ }
+
+ wl_update_prof(wl, NULL, &beacon_interval, WL_PROF_BEACONINT);
+ wl_update_prof(wl, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+ mutex_unlock(&wl->usr_sync);
+ return err;
+}
+
+static s32
+wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+ u8 *curbssid;
+
+ wl_get_assoc_ies(wl, ndev);
+ wl_update_prof(wl, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(wl, WL_PROF_BSSID);
+ wl_update_bss_info(wl, ndev);
+ wl_update_pmklist(ndev, wl->pmk_list, err);
+ cfg80211_roamed(ndev,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+ NULL,
+#endif
+ curbssid,
+ conn_info->req_ie, conn_info->req_ie_len,
+ conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+ WL_DBG(("Report roaming result\n"));
+
+ wl_set_drv_status(wl, CONNECTED);
+
+ return err;
+}
+
+static s32
+wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, bool completed)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+ u8 *curbssid = wl_read_prof(wl, WL_PROF_BSSID);
+ WL_DBG((" enter\n"));
+ if (wl->scan_request) {
+ wl_cfg80211_scan_abort(wl, ndev);
+ }
+ if (wl_get_drv_status(wl, CONNECTING)) {
+ wl_clr_drv_status(wl, CONNECTING);
+ if (completed) {
+ wl_get_assoc_ies(wl, ndev);
+ wl_update_prof(wl, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(wl, WL_PROF_BSSID);
+ wl_update_bss_info(wl, ndev);
+ wl_update_pmklist(ndev, wl->pmk_list, err);
+ wl_set_drv_status(wl, CONNECTED);
+ }
+ cfg80211_connect_result(ndev,
+ curbssid,
+ conn_info->req_ie,
+ conn_info->req_ie_len,
+ conn_info->resp_ie,
+ conn_info->resp_ie_len,
+ completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT,
+ GFP_KERNEL);
+ if (completed)
+ WL_INFO(("Report connect result - connection succeeded\n"));
+ else
+ WL_ERR(("Report connect result - connection failed\n"));
+ }
+ return err;
+}
+
+static s32
+wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ u16 flags = ntoh16(e->flags);
+ enum nl80211_key_type key_type;
+
+ mutex_lock(&wl->usr_sync);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ key_type = NL80211_KEYTYPE_GROUP;
+ else
+ key_type = NL80211_KEYTYPE_PAIRWISE;
+
+ cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+ NULL, GFP_KERNEL);
+ mutex_unlock(&wl->usr_sync);
+
+ return 0;
+}
+
+static s32
+wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct channel_info channel_inform;
+ struct wl_scan_results *bss_list;
+ u32 len = WL_SCAN_BUF_MAX;
+ s32 err = 0;
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ if (wl->iscan_on && wl->iscan_kickstart)
+ return wl_wakeup_iscan(wl_to_iscan(wl));
+
+ mutex_lock(&wl->usr_sync);
+ wl_clr_drv_status(wl, SCANNING);
+ err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
+ sizeof(channel_inform), false);
+ if (unlikely(err)) {
+ WL_ERR(("scan busy (%d)\n", err));
+ goto scan_done_out;
+ }
+ channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+ if (unlikely(channel_inform.scan_channel)) {
+
+ WL_DBG(("channel_inform.scan_channel (%d)\n",
+ channel_inform.scan_channel));
+ }
+ wl->bss_list = wl->scan_results;
+ bss_list = wl->bss_list;
+ memset(bss_list, 0, len);
+ bss_list->buflen = htod32(len);
+ err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false);
+ if (unlikely(err)) {
+ WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+ err = -EINVAL;
+ goto scan_done_out;
+ }
+ bss_list->buflen = dtoh32(bss_list->buflen);
+ bss_list->version = dtoh32(bss_list->version);
+ bss_list->count = dtoh32(bss_list->count);
+
+ err = wl_inform_bss(wl);
+
+scan_done_out:
+ del_timer_sync(&wl->scan_timeout);
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ if (wl->scan_request) {
+ WL_DBG(("cfg80211_scan_done\n"));
+ cfg80211_scan_done(wl->scan_request, false);
+ wl->scan_request = NULL;
+ }
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+ mutex_unlock(&wl->usr_sync);
+ return err;
+}
+static s32
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+ const struct ether_addr *sa, const struct ether_addr *bssid,
+ u8 **pheader, u32 *body_len, u8 *pbody)
+{
+ struct dot11_management_header *hdr;
+ u32 totlen = 0;
+ s32 err = 0;
+ u8 *offset;
+ u32 prebody_len = *body_len;
+ switch (fc) {
+ case FC_ASSOC_REQ:
+ /* capability , listen interval */
+ totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+ break;
+
+ case FC_REASSOC_REQ:
+ /* capability, listen inteval, ap address */
+ totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+ break;
+ }
+ totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+ *pheader = kzalloc(totlen, GFP_KERNEL);
+ if (*pheader == NULL) {
+ WL_ERR(("memory alloc failed \n"));
+ return -ENOMEM;
+ }
+ hdr = (struct dot11_management_header *) (*pheader);
+ hdr->fc = htol16(fc);
+ hdr->durid = 0;
+ hdr->seq = 0;
+ offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+ bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+ bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+ bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+ bcopy((const char*)pbody, offset, prebody_len);
+ *body_len = totlen;
+ return err;
+}
+static s32
+wl_notify_rx_mgmt_frame(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct ieee80211_supported_band *band;
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct ether_addr da;
+ struct ether_addr bssid;
+ bool isfree = false;
+ s32 err = 0;
+ s32 freq;
+ wifi_p2p_pub_act_frame_t *act_frm;
+ wl_event_rx_frame_data_t *rxframe =
+ (wl_event_rx_frame_data_t*)data;
+ u32 event = ntoh32(e->event_type);
+ u8 *mgmt_frame;
+ u8 bsscfgidx = e->bsscfgidx;
+ u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+ u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK));
+
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(channel);
+#else
+ freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+ if (event == WLC_E_ACTION_FRAME_RX) {
+ wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+ NULL, 0, ioctlbuf, sizeof(ioctlbuf), bsscfgidx);
+
+ wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+ memcpy(da.octet, ioctlbuf, ETHER_ADDR_LEN);
+ err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
+ &mgmt_frame, &mgmt_frame_len,
+ (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+ if (err < 0) {
+ WL_ERR(("%s: Error in receiving action frame len %d channel %d freq %d\n",
+ __func__, mgmt_frame_len, channel, freq));
+ goto exit;
+ }
+ isfree = true;
+ act_frm =
+ (wifi_p2p_pub_act_frame_t *) (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ /*
+ * After complete GO Negotiation, roll back to mpc mode
+ */
+ if (act_frm->subtype == P2P_PAF_GON_CONF) {
+ wldev_iovar_setint(ndev, "mpc", 1);
+ }
+ } else {
+ mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+ }
+
+ cfg80211_rx_mgmt(ndev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+
+ WL_DBG(("%s: mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", __func__,
+ mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+
+ if (isfree)
+ kfree(mgmt_frame);
+exit:
+ return 0;
+}
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+ s32 i = 0;
+ WL_DBG(("Enter \n"));
+ for (i = 0; i <= VWDEV_CNT; i++) {
+ conf->mode[i].type = -1;
+ conf->mode[i].ndev = NULL;
+ }
+ conf->frag_threshold = (u32)-1;
+ conf->rts_threshold = (u32)-1;
+ conf->retry_short = (u32)-1;
+ conf->retry_long = (u32)-1;
+ conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct wl_priv *wl)
+{
+ unsigned long flags;
+
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ memset(wl->profile, 0, sizeof(struct wl_profile));
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+}
+
+static void wl_init_event_handler(struct wl_priv *wl)
+{
+ memset(wl->evt_handler, 0, sizeof(wl->evt_handler));
+
+ wl->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+ wl->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+ wl->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+ wl->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+ wl->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+ wl->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+
+}
+
+static s32 wl_init_priv_mem(struct wl_priv *wl)
+{
+ WL_DBG(("Enter \n"));
+ wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!wl->scan_results)) {
+ WL_ERR(("Scan results alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL);
+ if (unlikely(!wl->conf)) {
+ WL_ERR(("wl_conf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->profile = (void *)kzalloc(sizeof(*wl->profile), GFP_KERNEL);
+ if (unlikely(!wl->profile)) {
+ WL_ERR(("wl_profile alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->bss_info = (void *)kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (unlikely(!wl->bss_info)) {
+ WL_ERR(("Bss information alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->scan_req_int =
+ (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
+ if (unlikely(!wl->scan_req_int)) {
+ WL_ERR(("Scan req alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->ioctl_buf = (void *)kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
+ if (unlikely(!wl->ioctl_buf)) {
+ WL_ERR(("Ioctl buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!wl->escan_ioctl_buf)) {
+ WL_ERR(("Ioctl buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!wl->extra_buf)) {
+ WL_ERR(("Extra buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
+ if (unlikely(!wl->iscan)) {
+ WL_ERR(("Iscan buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->fw = (void *)kzalloc(sizeof(*wl->fw), GFP_KERNEL);
+ if (unlikely(!wl->fw)) {
+ WL_ERR(("fw object alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
+ if (unlikely(!wl->pmk_list)) {
+ WL_ERR(("pmk list alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->sta_info = (void *)kzalloc(sizeof(*wl->sta_info), GFP_KERNEL);
+ if (unlikely(!wl->sta_info)) {
+ WL_ERR(("sta info alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ return 0;
+
+init_priv_mem_out:
+ wl_deinit_priv_mem(wl);
+
+ return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct wl_priv *wl)
+{
+ kfree(wl->scan_results);
+ wl->scan_results = NULL;
+ kfree(wl->bss_info);
+ wl->bss_info = NULL;
+ kfree(wl->conf);
+ wl->conf = NULL;
+ kfree(wl->profile);
+ wl->profile = NULL;
+ kfree(wl->scan_req_int);
+ wl->scan_req_int = NULL;
+ kfree(wl->ioctl_buf);
+ wl->ioctl_buf = NULL;
+ kfree(wl->escan_ioctl_buf);
+ wl->escan_ioctl_buf = NULL;
+ kfree(wl->extra_buf);
+ wl->extra_buf = NULL;
+ kfree(wl->iscan);
+ wl->iscan = NULL;
+ kfree(wl->fw);
+ wl->fw = NULL;
+ kfree(wl->pmk_list);
+ wl->pmk_list = NULL;
+ kfree(wl->sta_info);
+ wl->sta_info = NULL;
+ if (wl->ap_info) {
+ kfree(wl->ap_info->wpa_ie);
+ kfree(wl->ap_info->rsn_ie);
+ kfree(wl->ap_info->wps_ie);
+ kfree(wl->ap_info);
+ wl->ap_info = NULL;
+ }
+}
+
+static s32 wl_create_event_handler(struct wl_priv *wl)
+{
+ int ret = 0;
+ WL_DBG(("Enter \n"));
+
+ wl->event_tsk.thr_pid = DHD_PID_KT_INVALID;
+ PROC_START(wl_event_handler, wl, &wl->event_tsk, 0);
+ if (wl->event_tsk.thr_pid < 0)
+ ret = -ENOMEM;
+ return ret;
+}
+
+static void wl_destroy_event_handler(struct wl_priv *wl)
+{
+ if (wl->event_tsk.thr_pid >= 0)
+ PROC_STOP(&wl->event_tsk);
+}
+
+static void wl_term_iscan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ WL_TRACE(("In\n"));
+ if (wl->iscan_on && iscan->tsk) {
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ WL_INFO(("SIGTERM\n"));
+ send_sig(SIGTERM, iscan->tsk, 1);
+ WL_DBG(("kthread_stop\n"));
+ kthread_stop(iscan->tsk);
+ iscan->tsk = NULL;
+ }
+}
+
+static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted)
+{
+ struct wl_priv *wl = iscan_to_wl(iscan);
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ if (unlikely(!wl_get_drv_status(wl, SCANNING))) {
+ wl_clr_drv_status(wl, SCANNING);
+ WL_ERR(("Scan complete while device not scanning\n"));
+ return;
+ }
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ wl_clr_drv_status(wl, SCANNING);
+ if (likely(wl->scan_request)) {
+ cfg80211_scan_done(wl->scan_request, aborted);
+ wl->scan_request = NULL;
+ }
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+ wl->iscan_kickstart = false;
+}
+
+static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
+{
+ if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
+ WL_DBG(("wake up iscan\n"));
+ up(&iscan->sync);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static s32
+wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
+ struct wl_scan_results **bss_list)
+{
+ struct wl_iscan_results list;
+ struct wl_scan_results *results;
+ struct wl_iscan_results *list_buf;
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
+ list_buf = (struct wl_iscan_results *)iscan->scan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WL_ISCAN_BUF_MAX);
+ err = wldev_iovar_getbuf(iscan->dev, "iscanresults", &list,
+ WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
+ WL_ISCAN_BUF_MAX);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ results->count = dtoh32(results->count);
+ WL_DBG(("results->count = %d\n", results->count));
+ WL_DBG(("results->buflen = %d\n", results->buflen));
+ *status = dtoh32(list_buf->status);
+ *bss_list = results;
+
+ return err;
+}
+
+static s32 wl_iscan_done(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ mutex_lock(&wl->usr_sync);
+ wl_inform_bss(wl);
+ wl_notify_iscan_complete(iscan, false);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static s32 wl_iscan_pending(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ /* Reschedule the timer */
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32 wl_iscan_inprogress(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ mutex_lock(&wl->usr_sync);
+ wl_inform_bss(wl);
+ wl_run_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+ mutex_unlock(&wl->usr_sync);
+ /* Reschedule the timer */
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32 wl_iscan_aborted(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ mutex_lock(&wl->usr_sync);
+ wl_notify_iscan_complete(iscan, true);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static s32 wl_iscan_thread(void *data)
+{
+ struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
+ struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+ struct wl_priv *wl = iscan_to_wl(iscan);
+ u32 status;
+ int err = 0;
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ allow_signal(SIGTERM);
+ status = WL_SCAN_RESULTS_PARTIAL;
+ while (likely(!down_interruptible(&iscan->sync))) {
+ if (kthread_should_stop())
+ break;
+ if (iscan->timer_on) {
+ del_timer_sync(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+ mutex_lock(&wl->usr_sync);
+ err = wl_get_iscan_results(iscan, &status, &wl->bss_list);
+ if (unlikely(err)) {
+ status = WL_SCAN_RESULTS_ABORTED;
+ WL_ERR(("Abort iscan\n"));
+ }
+ mutex_unlock(&wl->usr_sync);
+ iscan->iscan_handler[status] (wl);
+ }
+ if (iscan->timer_on) {
+ del_timer_sync(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+ WL_DBG(("%s was terminated\n", __func__));
+
+ return 0;
+}
+
+static void wl_scan_timeout(unsigned long data)
+{
+ struct wl_priv *wl = (struct wl_priv *)data;
+
+ if (wl->scan_request) {
+ WL_ERR(("timer expired\n"));
+ if (wl->escan_on)
+ wl_notify_escan_complete(wl, true);
+ else
+ wl_notify_iscan_complete(wl_to_iscan(wl), true);
+ }
+}
+
+static void wl_iscan_timer(unsigned long data)
+{
+ struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+
+ if (iscan) {
+ iscan->timer_on = 0;
+ WL_DBG(("timer expired\n"));
+ wl_wakeup_iscan(iscan);
+ }
+}
+
+static s32 wl_invoke_iscan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ int err = 0;
+
+ if (wl->iscan_on && !iscan->tsk) {
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ sema_init(&iscan->sync, 0);
+ iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+ if (IS_ERR(iscan->tsk)) {
+ WL_ERR(("Could not create iscan thread\n"));
+ iscan->tsk = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ return err;
+}
+
+static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan)
+{
+ memset(iscan->iscan_handler, 0, sizeof(iscan->iscan_handler));
+ iscan->iscan_handler[WL_SCAN_RESULTS_SUCCESS] = wl_iscan_done;
+ iscan->iscan_handler[WL_SCAN_RESULTS_PARTIAL] = wl_iscan_inprogress;
+ iscan->iscan_handler[WL_SCAN_RESULTS_PENDING] = wl_iscan_pending;
+ iscan->iscan_handler[WL_SCAN_RESULTS_ABORTED] = wl_iscan_aborted;
+ iscan->iscan_handler[WL_SCAN_RESULTS_NO_MEM] = wl_iscan_aborted;
+}
+
+static void wl_notify_escan_complete(struct wl_priv *wl, bool aborted)
+{
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ wl_clr_drv_status(wl, SCANNING);
+ if (wl->p2p_supported && p2p_on(wl))
+ wl_clr_p2p_status(wl, SCANNING);
+
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ if (likely(wl->scan_request)) {
+ cfg80211_scan_done(wl->scan_request, aborted);
+ wl->scan_request = NULL;
+ }
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+}
+
+static s32 wl_escan_handler(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ s32 status = ntoh32(e->status);
+ wl_bss_info_t *bi;
+ wl_escan_result_t *escan_result;
+ wl_bss_info_t *bss = NULL;
+ wl_scan_results_t *list;
+ u32 bi_length;
+ u32 i;
+ WL_DBG((" enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
+ if (!wl->escan_on &&
+ !wl_get_drv_status(wl, SCANNING)) {
+ WL_ERR(("escan is not ready \n"));
+ return err;
+ }
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_INFO(("WLC_E_STATUS_PARTIAL \n"));
+ escan_result = (wl_escan_result_t *) data;
+ if (!escan_result) {
+ WL_ERR(("Invalid escan result (NULL pointer)\n"));
+ goto exit;
+ }
+ if (dtoh16(escan_result->bss_count) != 1) {
+ WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ goto exit;
+ }
+ bi = escan_result->bss_info;
+ if (!bi) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+ goto exit;
+ }
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
+ list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ WL_ERR(("Buffer is too small: ignoring\n"));
+ goto exit;
+ }
+#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+ : list->bss_info;
+
+ if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ CHSPEC_BAND(bi->chanspec) == CHSPEC_BAND(bss->chanspec) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+ if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
+ (bi->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ bss->RSSI = MAX(bss->RSSI, bi->RSSI);
+ } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
+ (bi->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ bss->RSSI = bi->RSSI;
+ bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+ }
+
+ goto exit;
+ }
+ }
+ memcpy(&(wl->escan_info.escan_buf[list->buflen]), bi, bi_length);
+ list->version = dtoh32(bi->version);
+ list->buflen += bi_length;
+ list->count++;
+
+ }
+ else if (status == WLC_E_STATUS_SUCCESS) {
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ del_timer_sync(&wl->scan_timeout);
+ WL_INFO(("ESCAN COMPLETED\n"));
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, false);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+ else if (status == WLC_E_STATUS_ABORT) {
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ del_timer_sync(&wl->scan_timeout);
+ WL_INFO(("ESCAN ABORTED\n"));
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, true);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+ else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ del_timer_sync(&wl->scan_timeout);
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, true);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+exit:
+ return err;
+}
+
+static s32 wl_init_scan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ int err = 0;
+
+ if (wl->iscan_on) {
+ iscan->dev = wl_to_prmry_ndev(wl);
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ wl_init_iscan_handler(iscan);
+ iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
+ init_timer(&iscan->timer);
+ iscan->timer.data = (unsigned long) iscan;
+ iscan->timer.function = wl_iscan_timer;
+ sema_init(&iscan->sync, 0);
+ iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+ if (IS_ERR(iscan->tsk)) {
+ WL_ERR(("Could not create iscan thread\n"));
+ iscan->tsk = NULL;
+ return -ENOMEM;
+ }
+ iscan->data = wl;
+ } else if (wl->escan_on) {
+ wl->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ }
+ /* Init scan_timeout timer */
+ init_timer(&wl->scan_timeout);
+ wl->scan_timeout.data = (unsigned long) wl;
+ wl->scan_timeout.function = wl_scan_timeout;
+
+ return err;
+}
+
+static void wl_init_fw(struct wl_fw_ctrl *fw)
+{
+ fw->status = 0;
+}
+
+static s32 wl_init_priv(struct wl_priv *wl)
+{
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ s32 err = 0;
+ s32 i = 0;
+
+ wl->scan_request = NULL;
+ wl->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+ wl->iscan_on = false;
+ wl->escan_on = true;
+ wl->roam_on = false;
+ wl->iscan_kickstart = false;
+ wl->active_scan = true;
+ wl->dongle_up = false;
+ wl->rf_blocked = false;
+
+ for (i = 0; i < VWDEV_CNT; i++)
+ wl->vwdev[i] = NULL;
+
+ init_waitqueue_head(&wl->dongle_event_wait);
+ wl_init_eq(wl);
+ err = wl_init_priv_mem(wl);
+ if (unlikely(err))
+ return err;
+ if (unlikely(wl_create_event_handler(wl)))
+ return -ENOMEM;
+ wl_init_event_handler(wl);
+ mutex_init(&wl->usr_sync);
+ err = wl_init_scan(wl);
+ if (unlikely(err))
+ return err;
+ wl_init_fw(wl->fw);
+ wl_init_conf(wl->conf);
+ wl_init_prof(wl);
+ wl_link_down(wl);
+
+ return err;
+}
+
+static void wl_deinit_priv(struct wl_priv *wl)
+{
+ wl_destroy_event_handler(wl);
+ wl->dongle_up = false; /* dongle down */
+ wl_flush_eq(wl);
+ wl_link_down(wl);
+ del_timer_sync(&wl->scan_timeout);
+ wl_term_iscan(wl);
+ wl_deinit_priv_mem(wl);
+}
+
+#if defined(DHD_P2P_DEV_ADDR_FROM_SYSFS) && defined(CONFIG_SYSCTL)
+s32 wl_cfg80211_sysctl_export_devaddr(void *data)
+{
+ /* Export the p2p_dev_addr via sysctl interface
+ * so that wpa_supplicant can access it
+ */
+ dhd_pub_t *dhd = (dhd_pub_t *)data;
+ struct wl_priv *wl = wlcfg_drv_priv;
+
+ wl_cfgp2p_generate_bss_mac(&dhd->mac, &wl->p2p->dev_addr, &wl->p2p->int_addr);
+
+ sprintf((char *)&wl_sysctl_macstring[0], MACSTR, MAC2STR(wl->p2p->dev_addr.octet));
+ sprintf((char *)&wl_sysctl_macstring[1], MACSTR, MAC2STR(wl->p2p->int_addr.octet));
+
+ return 0;
+}
+#endif /* CONFIG_SYSCTL */
+
+s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+ struct wl_priv * wl = NULL;
+ s32 err = 0;
+ WL_TRACE(("In\n"));
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ wl = wlcfg_drv_priv;
+ if (wl && !wl_get_drv_status(wl, READY)) {
+ if (wl->wdev &&
+ wl_cfgp2p_supported(wl, ndev)) {
+ wl->wdev->wiphy->interface_modes |=
+ (BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO));
+ if ((err = wl_cfgp2p_init_priv(wl)) != 0)
+ goto fail;
+#if defined(DHD_P2P_DEV_ADDR_FROM_SYSFS) && defined(CONFIG_SYSCTL)
+ wl_cfg80211_sysctl_export_devaddr(wl->pub);
+#endif
+ wl->p2p_supported = true;
+ }
+ } else
+ return -ENODEV;
+
+ wl_set_drv_status(wl, READY);
+fail:
+ return err;
+}
+s32 wl_cfg80211_attach(struct net_device *ndev, void *data)
+{
+ struct wireless_dev *wdev;
+ struct wl_priv *wl;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ WL_DBG(("func %p\n", wl_cfg80211_get_sdio_func()));
+ wdev = wl_alloc_wdev(&wl_cfg80211_get_sdio_func()->dev);
+ if (unlikely(IS_ERR(wdev)))
+ return -ENOMEM;
+
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+ wl = (struct wl_priv *)wiphy_priv(wdev->wiphy);
+ wl->wdev = wdev;
+ wl->pub = data;
+
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+
+ err = wl_init_priv(wl);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+
+ err = wl_setup_rfkill(wl, TRUE);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to setup rfkill %d\n", err));
+ goto cfg80211_attach_out;
+ }
+
+#if defined(DHD_P2P_DEV_ADDR_FROM_SYSFS) && defined(CONFIG_SYSCTL)
+ if (!(wl_sysctl_hdr = register_sysctl_table(wl_sysctl_table))) {
+ WL_ERR(("%s: sysctl register failed!! \n", __func__));
+ goto cfg80211_attach_out;
+ }
+#endif
+#if defined(COEX_DHCP)
+ if (wl_cfg80211_btcoex_init(wl))
+ goto cfg80211_attach_out;
+#endif /* COEX_DHCP */
+
+ wlcfg_drv_priv = wl;
+ return err;
+
+cfg80211_attach_out:
+ err = wl_setup_rfkill(wl, FALSE);
+ wl_free_wdev(wl);
+ return err;
+}
+
+void wl_cfg80211_detach(void)
+{
+ struct wl_priv *wl;
+
+ wl = wlcfg_drv_priv;
+
+ WL_TRACE(("In\n"));
+
+#if defined(COEX_DHCP)
+ wl_cfg80211_btcoex_deinit(wl);
+#endif /* COEX_DHCP */
+
+#if defined(DHD_P2P_DEV_ADDR_FROM_SYSFS) && defined(CONFIG_SYSCTL)
+ if (wl_sysctl_hdr)
+ unregister_sysctl_table(wl_sysctl_hdr);
+#endif
+ wl_setup_rfkill(wl, FALSE);
+ if (wl->p2p_supported)
+ wl_cfgp2p_deinit_priv(wl);
+ wl_deinit_priv(wl);
+ wlcfg_drv_priv = NULL;
+ wl_clear_sdio_func();
+ wl_free_wdev(wl);
+}
+
+static void wl_wakeup_event(struct wl_priv *wl)
+{
+ if (wl->event_tsk.thr_pid >= 0) {
+ DHD_OS_WAKE_LOCK(wl->pub);
+ up(&wl->event_tsk.sema);
+ }
+}
+
+static s32 wl_event_handler(void *data)
+{
+ struct net_device *netdev;
+ struct wl_priv *wl = NULL;
+ struct wl_event_q *e;
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+
+ wl = (struct wl_priv *)tsk->parent;
+ complete(&tsk->completed);
+
+ while (down_interruptible (&tsk->sema) == 0) {
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated)
+ break;
+ while ((e = wl_deq_event(wl))) {
+ WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
+ netdev = dhd_idx2net((struct dhd_pub *)(wl->pub), e->emsg.ifidx);
+ if (!netdev)
+ netdev = wl_to_prmry_ndev(wl);
+ if (e->etype < WLC_E_LAST && wl->evt_handler[e->etype]) {
+ wl->evt_handler[e->etype] (wl, netdev, &e->emsg, e->edata);
+ } else {
+ WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+ }
+ wl_put_event(e);
+ }
+ DHD_OS_WAKE_UNLOCK(wl->pub);
+ }
+ WL_DBG(("%s was terminated\n", __func__));
+ complete_and_exit(&tsk->completed, 0);
+ return 0;
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+ u32 event_type = ntoh32(e->event_type);
+ struct wl_priv *wl = wlcfg_drv_priv;
+
+#if (WL_DBG_LEVEL > 0)
+ s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
+ wl_dbg_estr[event_type] : (s8 *) "Unknown";
+ WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+#endif /* (WL_DBG_LEVEL > 0) */
+
+ if (event_type == WLC_E_PFN_NET_FOUND)
+ WL_ERR((" PNO Event\n"));
+
+ if (likely(!wl_enq_event(wl, ndev, event_type, e, data)))
+ wl_wakeup_event(wl);
+}
+
+static void wl_init_eq(struct wl_priv *wl)
+{
+ wl_init_eq_lock(wl);
+ INIT_LIST_HEAD(&wl->eq_list);
+}
+
+static void wl_flush_eq(struct wl_priv *wl)
+{
+ struct wl_event_q *e;
+ unsigned long flags;
+
+ flags = wl_lock_eq(wl);
+ while (!list_empty(&wl->eq_list)) {
+ e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ kfree(e);
+ }
+ wl_unlock_eq(wl, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct wl_priv *wl)
+{
+ struct wl_event_q *e = NULL;
+ unsigned long flags;
+
+ flags = wl_lock_eq(wl);
+ if (likely(!list_empty(&wl->eq_list))) {
+ e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ }
+ wl_unlock_eq(wl, flags);
+
+ return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 event, const wl_event_msg_t *msg,
+ void *data)
+{
+ struct wl_event_q *e;
+ s32 err = 0;
+ uint32 evtq_size;
+ uint32 data_len;
+ unsigned long flags;
+ gfp_t aflags;
+
+ data_len = 0;
+ if (data)
+ data_len = ntoh32(msg->datalen);
+ evtq_size = sizeof(struct wl_event_q) + data_len;
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ e = kzalloc(evtq_size, aflags);
+ if (unlikely(!e)) {
+ WL_ERR(("event alloc failed\n"));
+ return -ENOMEM;
+ }
+ e->etype = event;
+ memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+ if (data)
+ memcpy(e->edata, data, data_len);
+ flags = wl_lock_eq(wl);
+ list_add_tail(&e->eq_list, &wl->eq_list);
+ wl_unlock_eq(wl, flags);
+
+ return err;
+}
+
+static void wl_put_event(struct wl_event_q *e)
+{
+ kfree(e);
+}
+
+void wl_cfg80211_set_sdio_func(void *func)
+{
+ cfg80211_sdio_func = (struct sdio_func *)func;
+}
+
+static void wl_clear_sdio_func(void)
+{
+ cfg80211_sdio_func = NULL;
+}
+
+struct sdio_func *wl_cfg80211_get_sdio_func(void)
+{
+ return cfg80211_sdio_func;
+}
+
+static s32 wl_dongle_mode(struct wl_priv *wl, struct net_device *ndev, s32 iftype)
+{
+ s32 infra = 0;
+ s32 err = 0;
+ s32 mode = 0;
+ switch (iftype) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ WL_ERR(("type (%d) : currently we do not support this mode\n",
+ iftype));
+ err = -EINVAL;
+ return err;
+ case NL80211_IFTYPE_ADHOC:
+ mode = WL_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ infra = 1;
+ break;
+ default:
+ err = -EINVAL;
+ WL_ERR(("invalid type (%d)\n", iftype));
+ return err;
+ }
+ infra = htod32(infra);
+ err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+ return err;
+ }
+
+ set_mode_by_netdev(wl, ndev, mode);
+
+ return 0;
+}
+static s32 wl_dongle_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ s8 eventmask[WL_EVENTING_MASK_LEN];
+ s32 err = 0;
+
+ /* Setup event_msgs */
+ bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+ if (unlikely(err)) {
+ WL_ERR(("Get event_msgs error (%d)\n", err));
+ goto dongle_eventmsg_out;
+ }
+ memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+ if (add) {
+ setbit(eventmask, event);
+ } else {
+ clrbit(eventmask, event);
+ }
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ goto dongle_eventmsg_out;
+ }
+
+dongle_eventmsg_out:
+ return err;
+
+}
+
+
+#ifndef EMBEDDED_PLATFORM
+static s32 wl_dongle_country(struct net_device *ndev, u8 ccode)
+{
+
+ s32 err = 0;
+
+ return err;
+}
+
+static s32 wl_dongle_up(struct net_device *ndev, u32 up)
+{
+ s32 err = 0;
+
+ err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ }
+ return err;
+}
+
+static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode)
+{
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ err = wldev_ioctl(ndev, WLC_SET_PM, &power_mode, sizeof(power_mode), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_PM error (%d)\n", err));
+ }
+ return err;
+}
+
+static s32
+wl_dongle_glom(struct net_device *ndev, u32 glom, u32 dongle_align)
+{
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ s32 err = 0;
+
+ /* Match Host and Dongle rx alignment */
+ bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (unlikely(err)) {
+ WL_ERR(("txglomalign error (%d)\n", err));
+ goto dongle_glom_out;
+ }
+ /* disable glom option per default */
+ bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (unlikely(err)) {
+ WL_ERR(("txglom error (%d)\n", err));
+ goto dongle_glom_out;
+ }
+dongle_glom_out:
+ return err;
+}
+
+static s32
+wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
+{
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ s32 err = 0;
+
+ /* Setup timeout if Beacons are lost and roam is off to report link down */
+ if (roamvar) {
+ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (unlikely(err)) {
+ WL_ERR(("bcn_timeout error (%d)\n", err));
+ goto dongle_rom_out;
+ }
+ }
+ /* Enable/Disable built-in roaming to allow supplicant to take care of roaming */
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (unlikely(err)) {
+ WL_ERR(("roam_off error (%d)\n", err));
+ goto dongle_rom_out;
+ }
+dongle_rom_out:
+ return err;
+}
+
+static s32
+wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+ s32 scan_unassoc_time)
+{
+ s32 err = 0;
+
+ err = wldev_ioctl(ndev, WLC_SET_SCAN_CHANNEL_TIME, &scan_assoc_time,
+ sizeof(scan_assoc_time), true);
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ WL_INFO(("Scan assoc time is not supported\n"));
+ } else {
+ WL_ERR(("Scan assoc time error (%d)\n", err));
+ }
+ goto dongle_scantime_out;
+ }
+ err = wldev_ioctl(ndev, WLC_SET_SCAN_UNASSOC_TIME, &scan_unassoc_time,
+ sizeof(scan_unassoc_time), true);
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ WL_INFO(("Scan unassoc time is not supported\n"));
+ } else {
+ WL_ERR(("Scan unassoc time error (%d)\n", err));
+ }
+ goto dongle_scantime_out;
+ }
+
+dongle_scantime_out:
+ return err;
+}
+
+static s32
+wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol)
+{
+ /* Room for "event_msgs" + '\0' + bitvec */
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ s32 err = 0;
+
+ /* Set ARP offload */
+ bcm_mkiovar("arpoe", (char *)&arpoe, 4, iovbuf, sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ WL_INFO(("arpoe is not supported\n"));
+ else
+ WL_ERR(("arpoe error (%d)\n", err));
+
+ goto dongle_offload_out;
+ }
+ bcm_mkiovar("arp_ol", (char *)&arp_ol, 4, iovbuf, sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ WL_INFO(("arp_ol is not supported\n"));
+ else
+ WL_ERR(("arp_ol error (%d)\n", err));
+
+ goto dongle_offload_out;
+ }
+
+dongle_offload_out:
+ return err;
+}
+
+static s32 wl_pattern_atoh(s8 *src, s8 *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
+ WL_ERR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ WL_ERR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[3];
+ strncpy(num, src, 2);
+ num[2] = '\0';
+ dst[i] = (u8) simple_strtoul(num, NULL, 16);
+ src += 2;
+ }
+ return i;
+}
+
+static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode)
+{
+ /* Room for "event_msgs" + '\0' + bitvec */
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ const s8 *str;
+ struct wl_pkt_filter pkt_filter;
+ struct wl_pkt_filter *pkt_filterp;
+ s32 buf_len;
+ s32 str_len;
+ u32 mask_size;
+ u32 pattern_size;
+ s8 buf[256];
+ s32 err = 0;
+
+ /* add a default packet filter pattern */
+ str = "pkt_filter_add";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[str_len] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (struct wl_pkt_filter *)(buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ pkt_filter.id = htod32(100);
+
+ /* Parse filter polarity. */
+ pkt_filter.negate_match = htod32(0);
+
+ /* Parse filter type. */
+ pkt_filter.type = htod32(0);
+
+ /* Parse pattern filter offset. */
+ pkt_filter.u.pattern.offset = htod32(0);
+
+ /* Parse pattern filter mask. */
+ mask_size = htod32(wl_pattern_atoh("0xff",
+ (char *)pkt_filterp->u.pattern.
+ mask_and_pattern));
+
+ /* Parse pattern filter pattern. */
+ pattern_size = htod32(wl_pattern_atoh("0x00",
+ (char *)&pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+ if (mask_size != pattern_size) {
+ WL_ERR(("Mask and pattern not the same size\n"));
+ err = -EINVAL;
+ goto dongle_filter_out;
+ }
+
+ pkt_filter.u.pattern.size_bytes = mask_size;
+ buf_len += WL_PKT_FILTER_FIXED_LEN;
+ buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+ /* Keep-alive attributes are set in local
+ * variable (keep_alive_pkt), and
+ * then memcpy'ed into buffer (keep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)pkt_filterp, &pkt_filter,
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+ err = wldev_ioctl(ndev, WLC_SET_VAR, buf, buf_len, true);
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ WL_INFO(("filter not supported\n"));
+ } else {
+ WL_ERR(("filter (%d)\n", err));
+ }
+ goto dongle_filter_out;
+ }
+
+ /* set mode to allow pattern */
+ bcm_mkiovar("pkt_filter_mode", (char *)&filter_mode, 4, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ WL_INFO(("filter_mode not supported\n"));
+ } else {
+ WL_ERR(("filter_mode (%d)\n", err));
+ }
+ goto dongle_filter_out;
+ }
+
+dongle_filter_out:
+ return err;
+}
+#endif /* !EMBEDDED_PLATFORM */
+
+s32 wl_config_dongle(struct wl_priv *wl, bool need_lock)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ if (wl->dongle_up) {
+ WL_ERR(("Dongle is already up\n"));
+ return err;
+ }
+
+ ndev = wl_to_prmry_ndev(wl);
+ wdev = ndev->ieee80211_ptr;
+ if (need_lock)
+ rtnl_lock();
+#ifndef EMBEDDED_PLATFORM
+ err = wl_dongle_up(ndev, 0);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_up failed\n"));
+ goto default_conf_out;
+ }
+ err = wl_dongle_country(ndev, 0);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_country failed\n"));
+ goto default_conf_out;
+ }
+ err = wl_dongle_power(ndev, PM_FAST);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_power failed\n"));
+ goto default_conf_out;
+ }
+ err = wl_dongle_glom(ndev, 0, DHD_SDALIGN);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_glom failed\n"));
+ goto default_conf_out;
+ }
+ err = wl_dongle_roam(ndev, (wl->roam_on ? 0 : 1), 3);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_roam failed\n"));
+ goto default_conf_out;
+ }
+ wl_dongle_scantime(ndev, 40, 80);
+ wl_dongle_offload(ndev, 1, 0xf);
+ wl_dongle_filter(ndev, 1);
+#endif /* !EMBEDDED_PLATFORM */
+
+ err = wl_dongle_mode(wl, ndev, wdev->iftype);
+ if (unlikely(err && err != -EINPROGRESS)) {
+ WL_ERR(("wl_dongle_mode failed\n"));
+ goto default_conf_out;
+ }
+ err = wl_dongle_probecap(wl);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_probecap failed\n"));
+ goto default_conf_out;
+ }
+
+ /* -EINPROGRESS: Call commit handler */
+
+default_conf_out:
+ if (need_lock)
+ rtnl_unlock();
+
+ wl->dongle_up = true;
+
+ return err;
+
+}
+
+static s32 wl_update_wiphybands(struct wl_priv *wl)
+{
+ struct wiphy *wiphy;
+ s8 phylist_buf[128];
+ s8 *phy;
+ s32 err = 0;
+
+ err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_PHYLIST, phylist_buf,
+ sizeof(phylist_buf), false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ phy = phylist_buf;
+ for (; *phy; phy++) {
+ if (*phy == 'a' || *phy == 'n') {
+ wiphy = wl_to_wiphy(wl);
+ wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &__wl_band_5ghz_a;
+ }
+ }
+ return err;
+}
+
+static s32 __wl_cfg80211_up(struct wl_priv *wl)
+{
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ wl_debugfs_add_netdev_params(wl);
+
+ err = wl_config_dongle(wl, false);
+ if (unlikely(err))
+ return err;
+ dhd_monitor_init(wl->pub);
+ wl_invoke_iscan(wl);
+ wl_set_drv_status(wl, READY);
+ return err;
+}
+
+static s32 __wl_cfg80211_down(struct wl_priv *wl)
+{
+ s32 err = 0;
+ unsigned long flags;
+
+ WL_TRACE(("In\n"));
+ /* Check if cfg80211 interface is already down */
+ if (!wl_get_drv_status(wl, READY))
+ return err; /* it is even not ready */
+
+ wl_set_drv_status(wl, SCAN_ABORTING);
+
+ wl_term_iscan(wl);
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ if (wl->scan_request) {
+ cfg80211_scan_done(wl->scan_request, true);
+ wl->scan_request = NULL;
+ }
+ wl_clr_drv_status(wl, READY);
+ wl_clr_drv_status(wl, SCANNING);
+ wl_clr_drv_status(wl, SCAN_ABORTING);
+ wl_clr_drv_status(wl, CONNECTING);
+ wl_clr_drv_status(wl, CONNECTED);
+ wl_clr_drv_status(wl, DISCONNECTING);
+ if (wl_get_drv_status(wl, AP_CREATED)) {
+ wl_clr_drv_status(wl, AP_CREATED);
+ wl_clr_drv_status(wl, AP_CREATING);
+ }
+ wl_to_prmry_ndev(wl)->ieee80211_ptr->iftype =
+ NL80211_IFTYPE_STATION;
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+
+ wl->dongle_up = false;
+ wl_flush_eq(wl);
+ wl_link_down(wl);
+ if (wl->p2p_supported)
+ wl_cfgp2p_down(wl);
+ dhd_monitor_uninit();
+
+ wl_debugfs_remove_netdev(wl);
+
+ return err;
+}
+
+s32 wl_cfg80211_up(void)
+{
+ struct wl_priv *wl;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ wl = wlcfg_drv_priv;
+ mutex_lock(&wl->usr_sync);
+ wl_cfg80211_attach_post(wl_to_prmry_ndev(wl));
+ err = __wl_cfg80211_up(wl);
+ if (err)
+ WL_ERR(("__wl_cfg80211_up failed\n"));
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+/* Private Event to Supplicant with indication that FW hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ WL_ERR(("In : FW crash Eventing\n"));
+ cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+ if (wl != NULL) {
+ wl_link_down(wl);
+ }
+ return 0;
+}
+
+s32 wl_cfg80211_down(void)
+{
+ struct wl_priv *wl;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ wl = wlcfg_drv_priv;
+ mutex_lock(&wl->usr_sync);
+ err = __wl_cfg80211_down(wl);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static s32 wl_dongle_probecap(struct wl_priv *wl)
+{
+ s32 err = 0;
+
+ err = wl_update_wiphybands(wl);
+ if (unlikely(err))
+ return err;
+
+ return err;
+}
+
+static void *wl_read_prof(struct wl_priv *wl, s32 item)
+{
+ unsigned long flags;
+ void *rptr = NULL;
+
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ switch (item) {
+ case WL_PROF_SEC:
+ rptr = &wl->profile->sec;
+ break;
+ case WL_PROF_ACT:
+ rptr = &wl->profile->active;
+ break;
+ case WL_PROF_BSSID:
+ rptr = &wl->profile->bssid;
+ break;
+ case WL_PROF_SSID:
+ rptr = &wl->profile->ssid;
+ break;
+ }
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+ if (!rptr)
+ WL_ERR(("invalid item (%d)\n", item));
+ return rptr;
+}
+
+static s32
+wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e, void *data,
+ s32 item)
+{
+ s32 err = 0;
+ struct wlc_ssid *ssid;
+ unsigned long flags;
+
+ flags = dhd_os_spin_lock((dhd_pub_t *)(wl->pub));
+ switch (item) {
+ case WL_PROF_SSID:
+ ssid = (wlc_ssid_t *) data;
+ memset(wl->profile->ssid.SSID, 0,
+ sizeof(wl->profile->ssid.SSID));
+ memcpy(wl->profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
+ wl->profile->ssid.SSID_len = ssid->SSID_len;
+ break;
+ case WL_PROF_BSSID:
+ if (data)
+ memcpy(wl->profile->bssid, data, ETHER_ADDR_LEN);
+ else
+ memset(wl->profile->bssid, 0, ETHER_ADDR_LEN);
+ break;
+ case WL_PROF_SEC:
+ memcpy(&wl->profile->sec, data, sizeof(wl->profile->sec));
+ break;
+ case WL_PROF_ACT:
+ wl->profile->active = *(bool *)data;
+ break;
+ case WL_PROF_BEACONINT:
+ wl->profile->beacon_interval = *(u16 *)data;
+ break;
+ case WL_PROF_DTIMPERIOD:
+ wl->profile->dtim_period = *(u8 *)data;
+ break;
+ default:
+ WL_ERR(("unsupported item (%d)\n", item));
+ err = -EOPNOTSUPP;
+ break;
+ }
+ dhd_os_spin_unlock((dhd_pub_t *)(wl->pub), flags);
+ return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+ /*
+ * prohibit to change debug level
+ * by insmod parameter.
+ * eventually debug level will be configured
+ * in compile time by using CONFIG_XXX
+ */
+ /* wl_dbg_level = level; */
+}
+
+static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev)
+{
+ return get_mode_by_netdev(wl, ndev) == WL_MODE_IBSS;
+}
+
+static __used bool wl_is_ibssstarter(struct wl_priv *wl)
+{
+ return wl->ibss_starter;
+}
+
+static void wl_rst_ie(struct wl_priv *wl)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+
+ ie->offset = 0;
+}
+
+static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ ie->buf[ie->offset] = t;
+ ie->buf[ie->offset + 1] = l;
+ memcpy(&ie->buf[ie->offset + 2], v, l);
+ ie->offset += l + 2;
+
+ return err;
+}
+
+static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei_stream crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+ ie->offset += ie_size;
+
+ return err;
+}
+
+static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset > dst_size)) {
+ WL_ERR(("dst_size is not enough\n"));
+ return -ENOSPC;
+ }
+ memcpy(dst, &ie->buf[0], ie->offset);
+
+ return err;
+}
+
+static u32 wl_get_ielen(struct wl_priv *wl)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+
+ return ie->offset;
+}
+
+static void wl_link_up(struct wl_priv *wl)
+{
+ wl->link_up = true;
+}
+
+static void wl_link_down(struct wl_priv *wl)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+
+ WL_DBG(("In\n"));
+ wl->link_up = false;
+ conn_info->req_ie_len = 0;
+ conn_info->resp_ie_len = 0;
+}
+
+static unsigned long wl_lock_eq(struct wl_priv *wl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->eq_lock, flags);
+ return flags;
+}
+
+static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags)
+{
+ spin_unlock_irqrestore(&wl->eq_lock, flags);
+}
+
+static void wl_init_eq_lock(struct wl_priv *wl)
+{
+ spin_lock_init(&wl->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+ if (ms < 1000 / HZ) {
+ cond_resched();
+ mdelay(ms);
+ } else {
+ msleep(ms);
+ }
+}
+
+s32 wl_cfg80211_read_fw(s8 *buf, u32 size)
+{
+ const struct firmware *fw_entry;
+ struct wl_priv *wl;
+
+ wl = wlcfg_drv_priv;
+
+ fw_entry = wl->fw->fw_entry;
+
+ if (fw_entry->size < wl->fw->ptr + size)
+ size = fw_entry->size - wl->fw->ptr;
+
+ memcpy(buf, &fw_entry->data[wl->fw->ptr], size);
+ wl->fw->ptr += size;
+ return size;
+}
+
+void wl_cfg80211_release_fw(void)
+{
+ struct wl_priv *wl;
+
+ wl = wlcfg_drv_priv;
+ release_firmware(wl->fw->fw_entry);
+ wl->fw->ptr = 0;
+}
+
+void *wl_cfg80211_request_fw(s8 *file_name)
+{
+ struct wl_priv *wl;
+ const struct firmware *fw_entry = NULL;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ WL_DBG(("file name : \"%s\"\n", file_name));
+ wl = wlcfg_drv_priv;
+
+ if (!test_bit(WL_FW_LOADING_DONE, &wl->fw->status)) {
+ err = request_firmware(&wl->fw->fw_entry, file_name,
+ &wl_cfg80211_get_sdio_func()->dev);
+ if (unlikely(err)) {
+ WL_ERR(("Could not download fw (%d)\n", err));
+ goto req_fw_out;
+ }
+ set_bit(WL_FW_LOADING_DONE, &wl->fw->status);
+ fw_entry = wl->fw->fw_entry;
+ if (fw_entry) {
+ WL_DBG(("fw size (%zd), data (%p)\n", fw_entry->size,
+ fw_entry->data));
+ }
+ } else if (!test_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status)) {
+ err = request_firmware(&wl->fw->fw_entry, file_name,
+ &wl_cfg80211_get_sdio_func()->dev);
+ if (unlikely(err)) {
+ WL_ERR(("Could not download nvram (%d)\n", err));
+ goto req_fw_out;
+ }
+ set_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status);
+ fw_entry = wl->fw->fw_entry;
+ if (fw_entry) {
+ WL_DBG(("nvram size (%zd), data (%p)\n", fw_entry->size,
+ fw_entry->data));
+ }
+ } else {
+ WL_DBG(("Downloading already done. Nothing to do more\n"));
+ err = -EPERM;
+ }
+
+req_fw_out:
+ if (unlikely(err)) {
+ return NULL;
+ }
+ wl->fw->ptr = 0;
+ return (void *)fw_entry->data;
+}
+
+s8 *wl_cfg80211_get_fwname(void)
+{
+ struct wl_priv *wl;
+
+ wl = wlcfg_drv_priv;
+ strcpy(wl->fw->fw_name, WL_4329_FW_FILE);
+ return wl->fw->fw_name;
+}
+
+s8 *wl_cfg80211_get_nvramname(void)
+{
+ struct wl_priv *wl;
+
+ wl = wlcfg_drv_priv;
+ strcpy(wl->fw->nvram_name, WL_4329_NVRAM_FILE);
+ return wl->fw->nvram_name;
+}
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{
+ struct wl_priv *wl;
+ dhd_pub_t *dhd_pub;
+ struct ether_addr p2pif_addr;
+
+ wl = wlcfg_drv_priv;
+ dhd_pub = (dhd_pub_t *)wl->pub;
+ wl_cfgp2p_generate_bss_mac(&dhd_pub->mac, p2pdev_addr, &p2pif_addr);
+
+ return 0;
+}
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_set_p2p_noa(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_get_p2p_noa(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_set_p2p_ps(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+ enum wl_management_type type)
+{
+ struct wl_priv *wl;
+ struct net_device *ndev = NULL;
+ s32 ret = 0;
+ s32 bssidx = 0;
+ s32 pktflag = 0;
+ wl = wlcfg_drv_priv;
+ if (wl->p2p && wl->p2p->vif_created) {
+ ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION);
+ } else if (wl_get_drv_status(wl, AP_CREATING) ||
+ wl_get_drv_status(wl, AP_CREATED)) {
+ ndev = net;
+ bssidx = 0;
+ }
+ if (ndev != NULL) {
+ switch (type) {
+ case WL_BEACON:
+ pktflag = VNDR_IE_BEACON_FLAG;
+ break;
+ case WL_PROBE_RESP:
+ pktflag = VNDR_IE_PRBRSP_FLAG;
+ break;
+ case WL_ASSOC_RESP:
+ pktflag = VNDR_IE_ASSOCRSP_FLAG;
+ break;
+ }
+ if (pktflag)
+ ret = wl_cfgp2p_set_management_ie(wl, ndev, bssidx, pktflag, buf, len);
+ }
+
+ return ret;
+}
+
+static __used void wl_dongle_poweron(struct wl_priv *wl)
+{
+ WL_DBG(("Enter \n"));
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+
+#if defined(BCMLXSDMMC)
+ sdioh_start(NULL, 0);
+#endif
+#if defined(BCMLXSDMMC)
+ sdioh_start(NULL, 1);
+#endif
+ wl_cfg80211_resume(wl_to_wiphy(wl));
+}
+
+static __used void wl_dongle_poweroff(struct wl_priv *wl)
+{
+ WL_DBG(("Enter \n"));
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+ wl_cfg80211_suspend(wl_to_wiphy(wl), NULL);
+#else
+ wl_cfg80211_suspend(wl_to_wiphy(wl));
+#endif
+
+#if defined(BCMLXSDMMC)
+ sdioh_stop(NULL);
+#endif
+ /* clean up dtim_skip setting */
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+}
+
+static int wl_debugfs_add_netdev_params(struct wl_priv *wl)
+{
+ char buf[10+IFNAMSIZ];
+ struct dentry *fd;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ sprintf(buf, "netdev:%s", wl_to_prmry_ndev(wl)->name);
+ wl->debugfsdir = debugfs_create_dir(buf, wl_to_wiphy(wl)->debugfsdir);
+
+ fd = debugfs_create_u16("beacon_int", S_IRUGO, wl->debugfsdir,
+ (u16 *)&wl->profile->beacon_interval);
+ if (!fd) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ fd = debugfs_create_u8("dtim_period", S_IRUGO, wl->debugfsdir,
+ (u8 *)&wl->profile->dtim_period);
+ if (!fd) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+err_out:
+ return err;
+}
+
+static void wl_debugfs_remove_netdev(struct wl_priv *wl)
+{
+ WL_DBG(("Enter \n"));
+}
+
+static const struct rfkill_ops wl_rfkill_ops = {
+ .set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+ struct wl_priv *wl = (struct wl_priv *)data;
+
+ WL_DBG(("Enter \n"));
+ WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+ if (!wl)
+ return -EINVAL;
+
+ wl->rf_blocked = blocked;
+
+ return 0;
+}
+
+static int wl_setup_rfkill(struct wl_priv *wl, bool setup)
+{
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ if (!wl)
+ return -EINVAL;
+ if (setup) {
+ wl->rfkill = rfkill_alloc("brcmfmac-wifi",
+ &wl_cfg80211_get_sdio_func()->dev,
+ RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)wl);
+
+ if (!wl->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rfkill_register(wl->rfkill);
+
+ if (err)
+ rfkill_destroy(wl->rfkill);
+ } else {
+ if (!wl->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rfkill_unregister(wl->rfkill);
+ rfkill_destroy(wl->rfkill);
+ }
+
+err_out:
+ return err;
+}
+
+#if defined(COEX_DHCP)
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, &reg_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+ uint reg, int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ bcm_mkiovar(name, (char *)(&reg), sizeof(reg),
+ (char *)(&var), sizeof(var.buf));
+ error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false);
+
+ *retval = dtoh32(var.val);
+ return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ char ioctlbuf[1024];
+#else
+ static char ioctlbuf[1024];
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+ bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf));
+
+ return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf, sizeof(ioctlbuf), true));
+}
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+ char reg_addr[8];
+
+ memset(reg_addr, 0, sizeof(reg_addr));
+ memcpy((char *)&reg_addr[0], (char *)addr, 4);
+ memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+ return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+ int ioc_res = 0;
+ bool res = FALSE;
+ int sco_id_cnt = 0;
+ int param27;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+
+ ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+ WL_TRACE(("%s, sample[%d], btc params: 27:%x\n",
+ __FUNCTION__, i, param27));
+
+ if (ioc_res < 0) {
+ WL_ERR(("%s ioc read btc params error\n", __FUNCTION__));
+ break;
+ }
+
+ if ((param27 & 0x6) == 2) { /* count both sco & esco */
+ sco_id_cnt++;
+ }
+
+ if (sco_id_cnt > 2) {
+ WL_TRACE(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n",
+ __FUNCTION__, sco_id_cnt, i));
+ res = TRUE;
+ break;
+ }
+
+ msleep(5);
+ }
+
+ return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+ static bool saved_status = FALSE;
+
+ char buf_reg50va_dhcp_on[8] =
+ { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+ char buf_reg51va_dhcp_on[8] =
+ { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg64va_dhcp_on[8] =
+ { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg65va_dhcp_on[8] =
+ { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg71va_dhcp_on[8] =
+ { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ uint32 regaddr;
+ static uint32 saved_reg50;
+ static uint32 saved_reg51;
+ static uint32 saved_reg64;
+ static uint32 saved_reg65;
+ static uint32 saved_reg71;
+
+ if (trump_sco) {
+ /* this should reduce eSCO agressive retransmit
+ * w/o breaking it
+ */
+
+ /* 1st save current */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+ if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+ saved_status = TRUE;
+ WL_TRACE(("%s saved bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ __FUNCTION__, saved_reg50, saved_reg51,
+ saved_reg64, saved_reg65, saved_reg71));
+ } else {
+ WL_ERR((":%s: save btc_params failed\n",
+ __FUNCTION__));
+ saved_status = FALSE;
+ return -1;
+ }
+
+ WL_TRACE(("override with [50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *(u32 *)(buf_reg50va_dhcp_on+4),
+ *(u32 *)(buf_reg51va_dhcp_on+4),
+ *(u32 *)(buf_reg64va_dhcp_on+4),
+ *(u32 *)(buf_reg65va_dhcp_on+4),
+ *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg50va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg51va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg64va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg65va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg71va_dhcp_on[0], 8);
+
+ saved_status = TRUE;
+ } else if (saved_status) {
+ /* restore previously saved bt params */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+
+ regaddr = 50;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg50);
+ regaddr = 51;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg51);
+ regaddr = 64;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg64);
+ regaddr = 65;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg65);
+ regaddr = 71;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg71);
+
+ WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51, saved_reg64,
+ saved_reg65, saved_reg71));
+
+ saved_status = FALSE;
+ } else {
+ WL_ERR((":%s att to restore not saved BTCOEX params\n",
+ __FUNCTION__));
+ return -1;
+ }
+ return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+ char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+#if defined(BT_DHCP_eSCO_FIX)
+ /* set = 1, save & turn on 0 - off & restore prev settings */
+ set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+ WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+ if (set == TRUE)
+ /* Forcing bt_flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_dhcp_on[0],
+ sizeof(buf_flag7_dhcp_on));
+ else
+ /* Restoring default bt flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0],
+ sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(ulong data)
+{
+ struct btcoex_info *bt_local = (struct btcoex_info *)data;
+ WL_TRACE(("%s\n", __FUNCTION__));
+ bt_local->timer_on = 0;
+ schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+ struct btcoex_info *btcx_inf;
+
+ btcx_inf = container_of(work, struct btcoex_info, work);
+
+ if (btcx_inf->timer_on) {
+ btcx_inf->timer_on = 0;
+ del_timer_sync(&btcx_inf->timer);
+ }
+
+ switch (btcx_inf->bt_state) {
+ case BT_DHCP_START:
+ /* DHCP started
+ * provide OPPORTUNITY window to get DHCP address
+ */
+ WL_TRACE(("%s bt_dhcp stm: started \n",
+ __FUNCTION__));
+ btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+ mod_timer(&btcx_inf->timer,
+ jiffies + BT_DHCP_OPPR_WIN_TIME*HZ/1000);
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_OPPR_WIN:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("%s DHCP Done before T1 expiration\n",
+ __FUNCTION__));
+ goto btc_coex_idle;
+ }
+
+ /* DHCP is not over yet, start lowering BT priority
+ * enforce btc_params + flags if necessary
+ */
+ WL_TRACE(("%s DHCP T1:%d expired\n", __FUNCTION__,
+ BT_DHCP_OPPR_WIN_TIME));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+ btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+ mod_timer(&btcx_inf->timer,
+ jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000);
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_FLAG_FORCE_TIMEOUT:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("%s DHCP Done before T2 expiration\n",
+ __FUNCTION__));
+ } else {
+ /* Noo dhcp during T1+T2, restore BT priority */
+ WL_TRACE(("%s DHCP wait interval T2:%d"
+ "msec expired\n", __FUNCTION__,
+ BT_DHCP_FLAG_FORCE_TIME));
+ }
+
+ /* Restoring default bt priority */
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+
+ default:
+ WL_ERR(("%s error g_status=%d !!!\n", __FUNCTION__,
+ btcx_inf->bt_state));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+ }
+
+ net_os_wake_unlock(btcx_inf->dev);
+}
+
+static int wl_cfg80211_btcoex_init(struct wl_priv *wl)
+{
+ struct btcoex_info *btco_inf = NULL;
+
+ btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+ if (!btco_inf)
+ return -ENOMEM;
+
+ btco_inf->bt_state = BT_DHCP_IDLE;
+ btco_inf->ts_dhcp_start = 0;
+ btco_inf->ts_dhcp_ok = 0;
+ /* Set up timer for BT */
+ btco_inf->timer_ms = 10;
+ init_timer(&btco_inf->timer);
+ btco_inf->timer.data = (ulong)btco_inf;
+ btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+
+ btco_inf->dev = wl->wdev->netdev;
+
+ INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+ wl->btcoex_info = btco_inf;
+ return 0;
+}
+
+static void
+wl_cfg80211_btcoex_deinit(struct wl_priv *wl)
+{
+ if (!wl->btcoex_info)
+ return;
+
+ if (!wl->btcoex_info->timer_on) {
+ wl->btcoex_info->timer_on = 0;
+ del_timer_sync(&wl->btcoex_info->timer);
+ }
+
+ cancel_work_sync(&wl->btcoex_info->work);
+
+ kfree(wl->btcoex_info);
+ wl->btcoex_info = NULL;
+}
+#endif /* COEX_DHCP */
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command)
+{
+ char powermode_val = 0;
+ char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+ char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+ char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg66;
+ static uint32 saved_reg41;
+ static uint32 saved_reg68;
+ static bool saved_status = FALSE;
+
+#ifdef COEX_DHCP
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+ struct btcoex_info *btco_inf = wlcfg_drv_priv->btcoex_info;
+#endif /* COEX_DHCP */
+
+ /* Figure out powermode 1 or o command */
+ strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+ WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__));
+
+ /* Retrieve and saved orig regs value */
+ if ((saved_status == FALSE) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) {
+ saved_status = TRUE;
+ WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+
+ /* Disable PM mode during dhpc session */
+
+ /* Disable PM mode during dhpc session */
+#ifdef COEX_DHCP
+ /* Start BT timer only for SCO connection */
+ if (btcoex_is_sco_active(dev)) {
+ /* btc_params 66 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg66va_dhcp_on[0],
+ sizeof(buf_reg66va_dhcp_on));
+ /* btc_params 41 0x33 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg41va_dhcp_on[0],
+ sizeof(buf_reg41va_dhcp_on));
+ /* btc_params 68 0x190 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg68va_dhcp_on[0],
+ sizeof(buf_reg68va_dhcp_on));
+ saved_status = TRUE;
+
+ btco_inf->bt_state = BT_DHCP_START;
+ btco_inf->timer_on = 1;
+ mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+ WL_TRACE(("%s enable BT DHCP Timer\n",
+ __FUNCTION__));
+ }
+#endif /* COEX_DHCP */
+ }
+ else if (saved_status == TRUE) {
+ WL_ERR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__));
+ }
+ }
+ else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+ /* Restoring PM mode */
+
+#ifdef COEX_DHCP
+ /* Stop any bt timer because DHCP session is done */
+ WL_TRACE(("%s disable BT DHCP Timer\n", __FUNCTION__));
+ if (btco_inf->timer_on) {
+ btco_inf->timer_on = 0;
+ del_timer_sync(&btco_inf->timer);
+
+ if (btco_inf->bt_state != BT_DHCP_IDLE) {
+ /* need to restore original btc flags & extra btc params */
+ WL_TRACE(("%s bt->bt_state:%d\n",
+ __FUNCTION__, btco_inf->bt_state));
+ /* wake up btcoex thread to restore btlags+params */
+ schedule_work(&btco_inf->work);
+ }
+ }
+
+ /* Restoring btc_flag paramter anyway */
+ if (saved_status == TRUE)
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+#endif /* COEX_DHCP */
+
+ /* Restore original values */
+ if (saved_status == TRUE) {
+ regaddr = 66;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg66);
+ regaddr = 41;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg41);
+ regaddr = 68;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg68);
+
+ WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+ }
+ saved_status = FALSE;
+
+ }
+ else {
+ WL_ERR(("%s Unkwown yet power setting, ignored\n",
+ __FUNCTION__));
+ }
+
+ snprintf(command, 3, "OK");
+
+ return (strlen("OK"));
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
new file mode 100644
index 000000000000..262335ef99c2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
@@ -0,0 +1,558 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.h,v 1.1.4.1.2.8 2011/02/09 01:37:52 Exp $
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+
+#include <wl_cfgp2p.h>
+
+struct wl_conf;
+struct wl_iface;
+struct wl_priv;
+struct wl_security;
+struct wl_ibss;
+
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#define WL_DBG_NONE 0
+#define WL_DBG_TRACE (1 << 4)
+#define WL_DBG_SCAN (1 << 3)
+#define WL_DBG_DBG (1 << 2)
+#define WL_DBG_INFO (1 << 1)
+#define WL_DBG_ERR (1 << 0)
+
+/* 0 invalidates all debug messages. default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#define WL_ERR(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_ERR "CFG80211-ERROR) %s : ", __func__); \
+ printk args; \
+ } \
+} while (0)
+#define WL_INFO(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printk(KERN_ERR "CFG80211-INFO) %s : ", __func__); \
+ printk args; \
+ } \
+} while (0)
+#define WL_SCAN(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_SCAN) { \
+ printk(KERN_ERR "CFG80211-SCAN) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#define WL_TRACE(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_TRACE) { \
+ printk(KERN_ERR "CFG80211-TRACE) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printk(KERN_ERR "CFG80211-DEBUG) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#else /* !(WL_DBG_LEVEL > 0) */
+#define WL_DBG(args)
+#endif /* (WL_DBG_LEVEL > 0) */
+
+
+#define WL_SCAN_RETRY_MAX 3 /* used for ibss scan */
+#define WL_NUM_PMKIDS_MAX MAXPMKID /* will be used
+ * for 2.6.33 kernel
+ * or later
+ */
+#define WL_SCAN_BUF_MAX (1024 * 8)
+#define WL_TLV_INFO_MAX 1024
+#define WL_SCAN_IE_LEN_MAX 2048
+#define WL_BSS_INFO_MAX 2048
+#define WL_ASSOC_INFO_MAX 512 /*
+ * needs to grab assoc info from dongle to
+ * report it to cfg80211 through "connect"
+ * event
+ */
+#define WL_IOCTL_LEN_MAX 1024
+#define WL_EXTRA_BUF_MAX 2048
+#define WL_ISCAN_BUF_MAX 2048 /*
+ * the buf lengh can be WLC_IOCTL_MAXLEN (8K)
+ * to reduce iteration
+ */
+#define WL_ISCAN_TIMER_INTERVAL_MS 3000
+#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX 256 /* virtually unlimitted as long
+ * as kernel memory allows
+ */
+#define WL_FILE_NAME_MAX 256
+#define WL_DWELL_TIME 200
+#define WL_LONG_DWELL_TIME 1000
+#define VWDEV_CNT 3
+
+#define WL_SCAN_TIMER_INTERVAL_MS 8000 /* Scan timeout */
+
+/* dongle status */
+enum wl_status {
+ WL_STATUS_READY = 0,
+ WL_STATUS_SCANNING,
+ WL_STATUS_SCAN_ABORTING,
+ WL_STATUS_CONNECTING,
+ WL_STATUS_CONNECTED,
+ WL_STATUS_DISCONNECTING,
+ WL_STATUS_AP_CREATING,
+ WL_STATUS_AP_CREATED
+};
+
+/* wi-fi mode */
+enum wl_mode {
+ WL_MODE_BSS,
+ WL_MODE_IBSS,
+ WL_MODE_AP
+};
+
+/* dongle profile list */
+enum wl_prof_list {
+ WL_PROF_MODE,
+ WL_PROF_SSID,
+ WL_PROF_SEC,
+ WL_PROF_IBSS,
+ WL_PROF_BAND,
+ WL_PROF_BSSID,
+ WL_PROF_ACT,
+ WL_PROF_BEACONINT,
+ WL_PROF_DTIMPERIOD
+};
+
+/* dongle iscan state */
+enum wl_iscan_state {
+ WL_ISCAN_STATE_IDLE,
+ WL_ISCAN_STATE_SCANING
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+ WL_ESCAN_STATE_IDLE,
+ WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+ WL_FW_LOADING_DONE,
+ WL_NVRAM_LOADING_DONE
+};
+
+enum wl_management_type {
+ WL_BEACON = 0x1,
+ WL_PROBE_RESP = 0x2,
+ WL_ASSOC_RESP = 0x4
+};
+/* beacon / probe_response */
+struct beacon_proberesp {
+ __le64 timestamp;
+ __le16 beacon_int;
+ __le16 capab_info;
+ u8 variable[0];
+} __attribute__ ((packed));
+
+/* dongle configuration */
+struct wl_conf {
+ struct net_mode {
+ struct net_device *ndev;
+ s32 type;
+ } mode [VWDEV_CNT + 1]; /* adhoc , infrastructure or ap */
+ u32 frag_threshold;
+ u32 rts_threshold;
+ u32 retry_short;
+ u32 retry_long;
+ s32 tx_power;
+ struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct wl_priv *wl,
+ struct net_device *ndev, const wl_event_msg_t *e, void *data);
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+ u16 band;
+ u16 channel;
+ s16 rssi;
+ u16 frame_len;
+ u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+ struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+ u16 offset;
+ u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+ struct list_head eq_list;
+ u32 etype;
+ wl_event_msg_t emsg;
+ s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+ u32 wpa_versions;
+ u32 auth_type;
+ u32 cipher_pairwise;
+ u32 cipher_group;
+ u32 wpa_auth;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+ u8 beacon_interval; /* in millisecond */
+ u8 atim; /* in millisecond */
+ s8 join_only;
+ u8 band;
+ u8 channel;
+};
+
+/* dongle profile */
+struct wl_profile {
+ u32 mode;
+ struct wlc_ssid ssid;
+ u8 bssid[ETHER_ADDR_LEN];
+ u16 beacon_interval;
+ u8 dtim_period;
+ struct wl_security sec;
+ struct wl_ibss ibss;
+ s32 band;
+ bool active;
+};
+
+typedef s32(*ISCAN_HANDLER) (struct wl_priv *wl);
+
+/* dongle iscan controller */
+struct wl_iscan_ctrl {
+ struct net_device *dev;
+ struct timer_list timer;
+ u32 timer_ms;
+ u32 timer_on;
+ s32 state;
+ struct task_struct *tsk;
+ struct semaphore sync;
+ ISCAN_HANDLER iscan_handler[WL_SCAN_ERSULTS_LAST];
+ void *data;
+ s8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s8 scan_buf[WL_ISCAN_BUF_MAX];
+};
+
+/* association inform */
+#define MAX_REQ_LINE 1024
+struct wl_connect_info {
+ u8 req_ie[MAX_REQ_LINE];
+ s32 req_ie_len;
+ u8 resp_ie[MAX_REQ_LINE];
+ s32 resp_ie_len;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+ const struct firmware *fw_entry;
+ unsigned long status;
+ u32 ptr;
+ s8 fw_name[WL_FILE_NAME_MAX];
+ s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+ u32 req_len;
+ u32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID - 1];
+};
+
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+ u32 escan_state;
+ u8 escan_buf[ESCAN_BUF_SIZE];
+ struct wiphy *wiphy;
+};
+
+struct ap_info {
+/* Structure to hold WPS, WPA IEs for a AP */
+ u8 probe_res_ie[IE_MAX_LEN];
+ u8 beacon_ie[IE_MAX_LEN];
+ u32 probe_res_ie_len;
+ u32 beacon_ie_len;
+ u8 *wpa_ie;
+ u8 *rsn_ie;
+ u8 *wps_ie;
+ bool security_mode;
+};
+struct btcoex_info {
+ struct timer_list timer;
+ uint32 timer_ms;
+ uint32 timer_on;
+ uint32 ts_dhcp_start; /* ms ts ecord time stats */
+ uint32 ts_dhcp_ok; /* ms ts ecord time stats */
+ bool dhcp_done; /* flag, indicates that host done with
+ * dhcp before t1/t2 expiration
+ */
+ int bt_state;
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+struct sta_info {
+ /* Structure to hold WPS IE for a STA */
+ u8 probe_req_ie[IE_MAX_LEN];
+ u8 assoc_req_ie[IE_MAX_LEN];
+ u32 probe_req_ie_len;
+ u32 assoc_req_ie_len;
+};
+/* dongle private data of cfg80211 interface */
+struct wl_priv {
+ struct wireless_dev *wdev; /* representing wl cfg80211 device */
+ struct wireless_dev *vwdev[VWDEV_CNT];
+ struct wl_conf *conf; /* dongle configuration */
+ struct cfg80211_scan_request *scan_request; /* scan request object */
+ EVENT_HANDLER evt_handler[WLC_E_LAST];
+ struct list_head eq_list; /* used for event queue */
+ spinlock_t eq_lock; /* for event queue synchronization */
+ struct mutex usr_sync; /* maily for dongle up/down synchronization */
+ struct wl_scan_results *bss_list;
+ struct wl_scan_results *scan_results;
+
+ /* scan request object for internal purpose */
+ struct wl_scan_req *scan_req_int;
+
+ /* bss information for cfg80211 layer */
+ struct wl_cfg80211_bss_info *bss_info;
+ /* information element object for internal purpose */
+ struct wl_ie ie;
+
+ /* for synchronization of main event thread */
+ struct wl_profile *profile; /* holding dongle profile */
+ struct wl_iscan_ctrl *iscan; /* iscan controller */
+
+ /* association information container */
+ struct wl_connect_info conn_info;
+
+ /* control firwmare and nvram paramter downloading */
+ struct wl_fw_ctrl *fw;
+ struct wl_pmk_list *pmk_list; /* wpa2 pmk list */
+ tsk_ctl_t event_tsk; /* task of main event handler thread */
+ unsigned long status; /* current dongle status */
+ void *pub;
+ u32 channel; /* current channel */
+ bool iscan_on; /* iscan on/off switch */
+ bool iscan_kickstart; /* indicate iscan already started */
+ bool escan_on; /* escan on/off switch */
+ struct escan_info escan_info; /* escan information */
+ bool active_scan; /* current scan mode */
+ bool ibss_starter; /* indicates this sta is ibss starter */
+ bool link_up; /* link/connection up flag */
+
+ /* indicate whether dongle to support power save mode */
+ bool pwr_save;
+ bool dongle_up; /* indicate whether dongle up or not */
+ bool roam_on; /* on/off switch for dongle self-roaming */
+ bool scan_tried; /* indicates if first scan attempted */
+ u8 *ioctl_buf; /* ioctl buffer */
+ u8 *escan_ioctl_buf;
+ u8 *extra_buf; /* maily to grab assoc information */
+ struct dentry *debugfsdir;
+ struct rfkill *rfkill;
+ bool rf_blocked;
+ struct ieee80211_channel remain_on_chan;
+ enum nl80211_channel_type remain_on_chan_type;
+ u64 cache_cookie;
+ wait_queue_head_t dongle_event_wait;
+ struct ap_info *ap_info;
+ struct sta_info *sta_info;
+ struct p2p_info *p2p;
+ bool p2p_supported;
+ struct btcoex_info *btcoex_info;
+ struct timer_list scan_timeout; /* Timer for catch scan event timeout */
+};
+
+#define wl_to_wiphy(w) (w->wdev->wiphy)
+#define wl_to_prmry_ndev(w) (w->wdev->netdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define wl_to_sr(w) (w->scan_req_int)
+#define wl_to_ie(w) (&w->ie)
+#define iscan_to_wl(i) ((struct wl_priv *)(i->data))
+#define wl_to_iscan(w) (w->iscan)
+#define wl_to_conn(w) (&w->conn_info)
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+#define wl_get_drv_status(wl, stat) (test_bit(WL_STATUS_ ## stat, &(wl)->status))
+#define wl_set_drv_status(wl, stat) (set_bit(WL_STATUS_ ## stat, &(wl)->status))
+#define wl_clr_drv_status(wl, stat) (clear_bit(WL_STATUS_ ## stat, &(wl)->status))
+#define wl_chg_drv_status(wl, stat) (change_bit(WL_STATUS_ ## stat, &(wl)->status))
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+ return bss = bss ?
+ (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+static inline s32 alloc_idx_vwdev(struct wl_priv *wl)
+{
+ s32 i = 0;
+ for (i = 0; i < VWDEV_CNT; i++) {
+ if (wl->vwdev[i] == NULL)
+ return i;
+ }
+ return -1;
+}
+
+static inline s32 get_idx_vwdev_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+{
+ s32 i = 0;
+ for (i = 0; i < VWDEV_CNT; i++) {
+ if ((wl->vwdev[i] != NULL) && (wl->vwdev[i]->netdev == ndev))
+ return i;
+ }
+ return -1;
+}
+
+static inline s32 get_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+{
+ s32 i = 0;
+ for (i = 0; i <= VWDEV_CNT; i++) {
+ if (wl->conf->mode[i].ndev != NULL && (wl->conf->mode[i].ndev == ndev))
+ return wl->conf->mode[i].type;
+ }
+ return -1;
+}
+static inline void set_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev, s32 type)
+{
+ s32 i = 0;
+ for (i = 0; i <= VWDEV_CNT; i++) {
+ if (type == -1) {
+ /* free the info of netdev */
+ if (wl->conf->mode[i].ndev == ndev) {
+ wl->conf->mode[i].ndev = NULL;
+ wl->conf->mode[i].type = -1;
+ break;
+ }
+
+ } else {
+ if ((wl->conf->mode[i].ndev != NULL)&&
+ (wl->conf->mode[i].ndev == ndev)) {
+ /* update type of ndev */
+ wl->conf->mode[i].type = type;
+ break;
+ }
+ else if ((wl->conf->mode[i].ndev == NULL)&&
+ (wl->conf->mode[i].type == -1)) {
+ wl->conf->mode[i].ndev = ndev;
+ wl->conf->mode[i].type = type;
+ break;
+ }
+ }
+ }
+}
+#define free_vwdev_by_index(wl, __i) do { \
+ if (wl->vwdev[__i] != NULL) \
+ kfree(wl->vwdev[__i]); \
+ wl->vwdev[__i] = NULL; \
+ } while (0)
+
+#define for_each_bss(list, bss, __i) \
+ for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+ ((_sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) && \
+ (!_sme->crypto.n_ciphers_pairwise) && \
+ (!_sme->crypto.cipher_group))
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *data);
+extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
+extern void wl_cfg80211_detach(void);
+/* event handler from dongle */
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+ void *data);
+extern void wl_cfg80211_set_sdio_func(void *func); /* set sdio function info */
+extern struct sdio_func *wl_cfg80211_get_sdio_func(void); /* set sdio function info */
+extern s32 wl_cfg80211_up(void); /* dongle up */
+extern s32 wl_cfg80211_down(void); /* dongle down */
+extern s32 wl_cfg80211_notify_ifadd(struct net_device *net, s32 idx, s32 bssidx,
+int (*_net_attach)(dhd_pub_t *dhdp, int ifidx));
+extern s32 wl_cfg80211_notify_ifdel(struct net_device *ndev);
+extern s32 wl_cfg80211_is_progress_ifadd(void);
+extern s32 wl_cfg80211_is_progress_ifchange(void);
+extern s32 wl_cfg80211_is_progress_ifadd(void);
+extern s32 wl_cfg80211_notify_ifchange(void);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern void *wl_cfg80211_request_fw(s8 *file_name);
+extern s32 wl_cfg80211_read_fw(s8 *buf, u32 size);
+extern void wl_cfg80211_release_fw(void);
+extern s8 *wl_cfg80211_get_fwname(void);
+extern s8 *wl_cfg80211_get_nvramname(void);
+extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+ enum wl_management_type type);
+extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
+
+/* do scan abort */
+extern s32
+wl_cfg80211_scan_abort(struct wl_priv *wl, struct net_device *ndev);
+
+extern s32
+wl_cfg80211_if_is_group_owner(void);
+#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
new file mode 100644
index 000000000000..4ee6557e17dd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
@@ -0,0 +1,1469 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
+ *
+ */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wldev_common.h>
+
+
+static s8 ioctlbuf[WLC_IOCTL_MAXLEN];
+static s8 scanparambuf[WLC_IOCTL_SMLEN];
+static s8 *smbuf = ioctlbuf;
+
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+static s32
+wl_cfgp2p_vndr_ie(struct net_device *ndev, s32 bssidx, s32 pktflag,
+ s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete);
+/*
+ * Initialize variables related to P2P
+ *
+ */
+s32
+wl_cfgp2p_init_priv(struct wl_priv *wl)
+{
+ if (!(wl->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
+ CFGP2P_ERR(("struct p2p_info allocation failed\n"));
+ return -ENOMEM;
+ }
+#define INIT_IE(IE_TYPE, BSS_TYPE) \
+ do { \
+ memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ } while (0);
+
+ INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(beacon, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(beacon, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION);
+#undef INIT_IE
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY) = wl_to_prmry_ndev(wl);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = NULL;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = 0;
+ spin_lock_init(&wl->p2p->timer_lock);
+ return BCME_OK;
+
+}
+/*
+ * Deinitialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_deinit_priv(struct wl_priv *wl)
+{
+ if (wl->p2p) {
+ kfree(wl->p2p);
+ wl->p2p = NULL;
+ }
+ wl->p2p_supported = 0;
+}
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct wl_priv *wl)
+{
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
+ s32 ret = BCME_OK;
+ s32 val = 0;
+ /* Do we have to check whether APSTA is enabled or not ? */
+ wldev_iovar_getint(ndev, "apsta", &val);
+ if (val == 0) {
+ val = 1;
+ wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true);
+ wldev_iovar_setint(ndev, "apsta", val);
+ wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true);
+ }
+ val = 1;
+ /* Disable firmware roaming for P2P */
+ wldev_iovar_setint(ndev, "roam_off", val);
+ /* In case of COB type, firmware has default mac address
+ * After Initializing firmware, we have to set current mac address to
+ * firmware for P2P device address
+ */
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
+ sizeof(null_eth_addr), ioctlbuf, sizeof(ioctlbuf), 0);
+ if (ret && ret != BCME_UNSUPPORTED) {
+ CFGP2P_ERR(("failed to update device address\n"));
+ }
+ return ret;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to create
+ * @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec)
+{
+ wl_p2p_if_t ifreq;
+ s32 err;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+
+ ifreq.type = if_type;
+ ifreq.chspec = chspec;
+ memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+ CFGP2P_INFO(("---wl p2p_ifadd %02x:%02x:%02x:%02x:%02x:%02x %s %u\n",
+ ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2],
+ ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5],
+ (if_type == WL_P2P_IF_GO) ? "go" : "client",
+ (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+ err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+ ioctlbuf, sizeof(ioctlbuf));
+ return err;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to create
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac)
+{
+ s32 ret;
+ struct net_device *netdev = wl_to_prmry_ndev(wl);
+
+ CFGP2P_INFO(("------primary idx %d : wl p2p_ifdel %02x:%02x:%02x:%02x:%02x:%02x\n",
+ netdev->ifindex, mac->octet[0], mac->octet[1], mac->octet[2],
+ mac->octet[3], mac->octet[4], mac->octet[5]));
+ ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+ ioctlbuf, sizeof(ioctlbuf));
+ if (unlikely(ret < 0)) {
+ printk("'wl p2p_ifdel' error %d\n", ret);
+ }
+ return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec)
+{
+ wl_p2p_if_t ifreq;
+ s32 err;
+ struct net_device *netdev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+
+ ifreq.type = if_type;
+ ifreq.chspec = chspec;
+ memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+ CFGP2P_INFO(("---wl p2p_ifchange %02x:%02x:%02x:%02x:%02x:%02x %s %u\n",
+ ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2],
+ ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5],
+ (if_type == WL_P2P_IF_GO) ? "go" : "client",
+ (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+ err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+ ioctlbuf, sizeof(ioctlbuf));
+
+ if (unlikely(err < 0)) {
+ printk("'wl p2p_ifupd' error %d\n", err);
+ }
+ return err;
+}
+
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the created BSS
+ * @index : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index)
+{
+ s32 ret;
+ u8 getbuf[64];
+ struct net_device *dev = wl_to_prmry_ndev(wl);
+
+ CFGP2P_INFO(("---wl p2p_if %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac->octet[0], mac->octet[1], mac->octet[2],
+ mac->octet[3], mac->octet[4], mac->octet[5]));
+
+ ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac),
+ getbuf, sizeof(getbuf), wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY));
+
+ if (ret == 0) {
+ memcpy(index, getbuf, sizeof(index));
+ CFGP2P_INFO(("---wl p2p_if ==> %d\n", *index));
+ }
+
+ return ret;
+}
+
+s32
+wl_cfgp2p_set_discovery(struct wl_priv *wl, s32 on)
+{
+ s32 ret = BCME_OK;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ CFGP2P_DBG(("enter\n"));
+
+ ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+ }
+
+ return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+ wl_p2p_disc_st_t discovery_mode;
+ s32 ret;
+ struct net_device *dev;
+ CFGP2P_DBG(("enter\n"));
+
+ if (unlikely(bssidx >= P2PAPI_BSSCFG_MAX)) {
+ CFGP2P_ERR((" %d index out of range\n", bssidx));
+ return -1;
+ }
+
+ dev = wl_to_p2p_bss_ndev(wl, bssidx);
+ if (unlikely(dev == NULL)) {
+ CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+ return BCME_NOTFOUND;
+ }
+
+ /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+ discovery_mode.state = mode;
+ discovery_mode.chspec = CH20MHZ_CHSPEC(channel);
+ discovery_mode.dwell = listen_ms;
+ ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+ sizeof(discovery_mode), ioctlbuf, sizeof(ioctlbuf), bssidx);
+
+ return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+s32
+wl_cfgp2p_get_disc_idx(struct wl_priv *wl, s32 *index)
+{
+ s32 ret;
+ struct net_device *dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+
+ ret = wldev_iovar_getint(dev, "p2p_dev", index);
+ CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+ return ret;
+ }
+ return ret;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct wl_priv *wl)
+{
+
+ s32 index = 0;
+ s32 ret = BCME_OK;
+
+ CFGP2P_DBG(("enter\n"));
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) != 0) {
+ CFGP2P_ERR(("do nothing, already initialized\n"));
+ return ret;
+ }
+
+ ret = wl_cfgp2p_set_discovery(wl, 1);
+ if (ret < 0) {
+ CFGP2P_ERR(("set discover error\n"));
+ return ret;
+ }
+ /* Enable P2P Discovery in the WL Driver */
+ ret = wl_cfgp2p_get_disc_idx(wl, &index);
+
+ if (ret < 0) {
+ return ret;
+ }
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) =
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = index;
+
+ /* Set the initial discovery state to SCAN */
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+ if (unlikely(ret != 0)) {
+ CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+ wl_cfgp2p_set_discovery(wl, 0);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+ return 0;
+ }
+ return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @wl : wl_private data
+ * Returns 0 if succes
+ */
+s32
+wl_cfgp2p_deinit_discovery(struct wl_priv *wl)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG(("enter\n"));
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+ CFGP2P_ERR(("do nothing, not initialized\n"));
+ return -1;
+ }
+ /* Set the discovery state to SCAN */
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+ ret = wl_cfgp2p_set_discovery(wl, 0);
+
+ /* Clear our saved WPS and P2P IEs for the discovery BSS. The driver
+ * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
+ * BSS.
+ */
+
+ /* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
+ * have no discovery BSS.
+ */
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+ return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @wl : wl_private data
+ * @ie : probe request ie (WPS IE + P2P IE)
+ * @ie_len : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len)
+{
+ s32 ret = BCME_OK;
+ if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
+ goto set_ie;
+ }
+
+ wl_set_p2p_status(wl, DISCOVERY_ON);
+
+ CFGP2P_DBG(("enter\n"));
+
+ ret = wl_cfgp2p_init_discovery(wl);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR((" init discovery error %d\n", ret));
+ goto exit;
+ }
+ /* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+ * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+ * Some peer devices may not initiate WPS with us if this bit is not set.
+ */
+ ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE),
+ "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR((" wsec error %d\n", ret));
+ }
+set_ie:
+ ret = wl_cfgp2p_set_management_ie(wl, dev,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE),
+ VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+ goto exit;
+ }
+exit:
+ return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @wl : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct wl_priv *wl)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG((" enter\n"));
+ wl_clr_p2p_status(wl, DISCOVERY_ON);
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+ CFGP2P_ERR((" do nothing, not initialized\n"));
+ goto exit;
+ }
+
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+ if (unlikely(ret < 0)) {
+
+ CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+ }
+ /* Do a scan abort to stop the driver's scan engine in case it is still
+ * waiting out an action frame tx dwell time.
+ */
+#ifdef NOT_YET
+ if (wl_get_p2p_status(wl, SCANNING)) {
+ p2pwlu_scan_abort(hdl, FALSE);
+ }
+#endif
+ wl_clr_p2p_status(wl, DISCOVERY_ON);
+ ret = wl_cfgp2p_deinit_discovery(wl);
+
+exit:
+ return ret;
+}
+
+s32
+wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active,
+ u32 num_chans, u16 *channels,
+ s32 search_state, u16 action, u32 bssidx)
+{
+ s32 ret = BCME_OK;
+ s32 memsize;
+ s32 eparams_size;
+ u32 i;
+ s8 *memblk;
+ wl_p2p_scan_t *p2p_params;
+ wl_escan_params_t *eparams;
+ wlc_ssid_t ssid;
+ /* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 4
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 100
+#define P2PAPI_SCAN_HOME_TIME_MS 10
+ struct net_device *pri_dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+ wl_set_p2p_status(wl, SCANNING);
+ /* Allocate scan params which need space for 3 channels and 0 ssids */
+ eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+
+ memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+ memblk = scanparambuf;
+ if (memsize > sizeof(scanparambuf)) {
+ CFGP2P_ERR((" scanpar buf too small (%u > %u)\n",
+ memsize, sizeof(scanparambuf)));
+ return -1;
+ }
+ memset(memblk, 0, memsize);
+ memset(ioctlbuf, 0, sizeof(ioctlbuf));
+ if (search_state == WL_P2P_DISC_ST_SEARCH) {
+ /*
+ * If we in SEARCH STATE, we don't need to set SSID explictly
+ * because dongle use P2P WILDCARD internally by default
+ */
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+ ssid.SSID_len = htod32(0);
+
+ } else if (search_state == WL_P2P_DISC_ST_SCAN) {
+ /* SCAN STATE 802.11 SCAN
+ * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+ * So if P2P_find command with type=progressive,
+ * we have to set ssid to P2P WILDCARD because
+ * we just do broadcast scan unless setting SSID
+ */
+ strcpy(ssid.SSID, WL_P2P_WILDCARD_SSID);
+ ssid.SSID_len = htod32(WL_P2P_WILDCARD_SSID_LEN);
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+ }
+
+
+ /* Fill in the P2P scan structure at the start of the iovar param block */
+ p2p_params = (wl_p2p_scan_t*) memblk;
+ p2p_params->type = 'E';
+ /* Fill in the Scan structure that follows the P2P scan structure */
+ eparams = (wl_escan_params_t*) (p2p_params + 1);
+ eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+ if (active)
+ eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+ else
+ eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+
+ memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+ if (ssid.SSID_len)
+ memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+ eparams->params.nprobes = htod32(P2PAPI_SCAN_NPROBES);
+ eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ if (wl_get_drv_status(wl, CONNECTED))
+ eparams->params.active_time = htod32(-1);
+ else if (num_chans == 3)
+ eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+ else
+ eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+ eparams->params.passive_time = htod32(-1);
+ eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+ for (i = 0; i < num_chans; i++) {
+ eparams->params.channel_list[i] = htodchanspec(channels[i]);
+ }
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->sync_id = htod16(0x1234);
+ CFGP2P_INFO(("SCAN CHANNELS : "));
+
+ for (i = 0; i < num_chans; i++) {
+ if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+ else CFGP2P_INFO((",%d", channels[i]));
+ }
+
+ CFGP2P_INFO(("\n"));
+
+ ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
+ memblk, memsize, smbuf, sizeof(ioctlbuf), bssidx);
+ return ret;
+}
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Delete and Set a management vndr ie to firmware
+ * Parameters:
+ * @wl : wl_private data
+ * @ndev : net device for bssidx
+ * @bssidx : bssidx for BSS
+ * @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+ * VNDR_IE_ASSOCREQ_FLAG)
+ * @ie : VNDR IE (such as P2P IE , WPS IE)
+ * @ie_len : VNDR IE Length
+ * Returns 0 if success.
+ */
+
+s32
+wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+ s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+{
+ /* Vendor-specific Information Element ID */
+#define VNDR_SPEC_ELEMENT_ID 0xdd
+ s32 ret = BCME_OK;
+ u32 pos;
+ u8 *ie_buf;
+ u8 *mgmt_ie_buf = NULL;
+ u32 mgmt_ie_buf_len = 0;
+ u32 *mgmt_ie_len = 0;
+ u8 ie_id, ie_len;
+ u8 delete = 0;
+#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie)
+#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len)
+ if (wl->p2p_supported && p2p_on(wl) && bssidx != -1) {
+ if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ switch (pktflag) {
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = IE_TYPE(probe_req, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(probe_req, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, bssidx));
+ break;
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = IE_TYPE(probe_res, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(probe_res, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, bssidx));
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = IE_TYPE(assoc_req, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(assoc_req, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, bssidx));
+ break;
+ case VNDR_IE_ASSOCRSP_FLAG :
+ mgmt_ie_buf = IE_TYPE(assoc_res, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(assoc_res, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, bssidx));
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = IE_TYPE(beacon, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(beacon, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, bssidx));
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ } else if (get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
+ switch (pktflag) {
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = wl->ap_info->probe_res_ie;
+ mgmt_ie_len = &wl->ap_info->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->ap_info->probe_res_ie);
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = wl->ap_info->beacon_ie;
+ mgmt_ie_len = &wl->ap_info->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->ap_info->beacon_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ bssidx = 0;
+ } else if (bssidx == -1 && get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) {
+ switch (pktflag) {
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = wl->sta_info->probe_req_ie;
+ mgmt_ie_len = &wl->sta_info->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->sta_info->probe_req_ie);
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = wl->sta_info->assoc_req_ie;
+ mgmt_ie_len = &wl->sta_info->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->sta_info->assoc_req_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ bssidx = 0;
+ } else {
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+
+ if (vndr_ie_len > mgmt_ie_buf_len) {
+ CFGP2P_ERR(("extra IE size too big\n"));
+ ret = -ENOMEM;
+ } else {
+ if (mgmt_ie_buf != NULL) {
+ if (vndr_ie_len && (vndr_ie_len == *mgmt_ie_len) &&
+ (memcmp(mgmt_ie_buf, vndr_ie, vndr_ie_len) == 0)) {
+ CFGP2P_INFO(("Previous mgmt IE is equals to current IE"));
+ goto exit;
+ }
+ pos = 0;
+ delete = 1;
+ ie_buf = (u8 *) mgmt_ie_buf;
+ while (pos < *mgmt_ie_len) {
+ ie_id = ie_buf[pos++];
+ ie_len = ie_buf[pos++];
+ if ((ie_id == DOT11_MNG_VS_ID) &&
+ (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0))) {
+ CFGP2P_INFO(("DELELED ID : %d, Len : %d , OUI :"
+ "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos],
+ ie_buf[pos+1], ie_buf[pos+2]));
+ ret = wl_cfgp2p_vndr_ie(ndev, bssidx, pktflag, ie_buf+pos,
+ VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3, ie_len-3, delete);
+ }
+ pos += ie_len;
+ }
+
+ }
+ *mgmt_ie_len = 0;
+ /* Add if there is any extra IE */
+ if (vndr_ie && vndr_ie_len) {
+ /* save the current IE in wl struct */
+ memcpy(mgmt_ie_buf, vndr_ie, vndr_ie_len);
+ *mgmt_ie_len = vndr_ie_len;
+ pos = 0;
+ ie_buf = (u8 *) vndr_ie;
+ delete = 0;
+ while (pos < vndr_ie_len) {
+ ie_id = ie_buf[pos++];
+ ie_len = ie_buf[pos++];
+ if ((ie_id == DOT11_MNG_VS_ID) &&
+ (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0))) {
+ CFGP2P_INFO(("ADDED ID : %d, Len : %d , OUI :"
+ "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos],
+ ie_buf[pos+1], ie_buf[pos+2]));
+ ret = wl_cfgp2p_vndr_ie(ndev, bssidx, pktflag, ie_buf+pos,
+ VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3, ie_len-3, delete);
+ }
+ pos += ie_len;
+ }
+ }
+ }
+#undef IE_TYPE
+#undef IE_TYPE_LEN
+exit:
+ return ret;
+}
+
+/* Clear the manament IE buffer of BSSCFG
+ * Parameters:
+ * @wl : wl_private data
+ * @bssidx : bssidx for BSS
+ *
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx)
+{
+#define INIT_IE(IE_TYPE, BSS_TYPE) \
+ do { \
+ memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ } while (0);
+ if (bssidx < 0) {
+ CFGP2P_ERR(("invalid bssidx\n"));
+ return BCME_BADARG;
+ }
+ INIT_IE(probe_req, bssidx);
+ INIT_IE(probe_res, bssidx);
+ INIT_IE(assoc_req, bssidx);
+ INIT_IE(assoc_res, bssidx);
+ INIT_IE(beacon, bssidx);
+ return BCME_OK;
+}
+
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return TRUE;
+ }
+
+ if (tlvs == NULL)
+ return FALSE;
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return FALSE;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
+ return (wifi_p2p_ie_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+static s32
+wl_cfgp2p_vndr_ie(struct net_device *ndev, s32 bssidx, s32 pktflag,
+ s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete)
+{
+ s32 err = BCME_OK;
+ s32 buf_len;
+ s32 iecount;
+
+ vndr_ie_setbuf_t *ie_setbuf;
+
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+ CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+ return -1;
+ }
+
+ buf_len = sizeof(vndr_ie_setbuf_t) + data_len - 1;
+ ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+
+ CFGP2P_INFO((" ie_id : %02x, data length : %d\n", ie_id, data_len));
+ if (!ie_setbuf) {
+
+ CFGP2P_ERR(("Error allocating buffer for IE\n"));
+ return -ENOMEM;
+ }
+ if (delete)
+ strcpy(ie_setbuf->cmd, "del");
+ else
+ strcpy(ie_setbuf->cmd, "add");
+ /* Buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int));
+ pktflag = htod32(pktflag);
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag,
+ &pktflag, sizeof(uint32));
+ ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+ ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len
+ = (uchar)(data_len + VNDR_IE_MIN_LEN);
+ memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, oui, 3);
+ memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, data, data_len);
+ err = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", ie_setbuf, buf_len,
+ ioctlbuf, sizeof(ioctlbuf), bssidx);
+
+ CFGP2P_INFO(("vndr_ie iovar returns %d\n", err));
+ kfree(ie_setbuf);
+ return err;
+}
+
+/*
+ * Search the bssidx based on dev argument
+ * Parameters:
+ * @wl : wl_private data
+ * @ndev : net device to search bssidx
+ * Returns bssidx for ndev
+ */
+s32
+wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev)
+{
+ u32 i;
+ s32 index = -1;
+
+ if (ndev == NULL) {
+ CFGP2P_ERR((" ndev is NULL\n"));
+ goto exit;
+ }
+ if (!wl->p2p_supported) {
+ return P2PAPI_BSSCFG_PRIMARY;
+ }
+ for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (ndev == wl_to_p2p_bss_ndev(wl, i)) {
+ index = wl_to_p2p_bss_bssidx(wl, i);
+ break;
+ }
+ }
+ if (index == -1)
+ return P2PAPI_BSSCFG_PRIMARY;
+exit:
+ return index;
+}
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 ret = BCME_OK;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl_get_p2p_status(wl, LISTEN_EXPIRED) == 0) {
+ wl_set_p2p_status(wl, LISTEN_EXPIRED);
+ if (timer_pending(&wl->p2p->listen_timer)) {
+ spin_lock_bh(&wl->p2p->timer_lock);
+ del_timer_sync(&wl->p2p->listen_timer);
+ spin_unlock_bh(&wl->p2p->timer_lock);
+ }
+ cfg80211_remain_on_channel_expired(ndev, wl->cache_cookie, &wl->remain_on_chan,
+ wl->remain_on_chan_type, GFP_KERNEL);
+ } else
+ wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+
+ return ret;
+
+}
+
+/*
+ * Timer expire callback function for LISTEN
+ * We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
+ * so lets do it from thread context.
+ */
+static void
+wl_cfgp2p_listen_expired(unsigned long data)
+{
+ wl_event_msg_t msg;
+ struct wl_priv *wl = (struct wl_priv *) data;
+
+ CFGP2P_DBG((" Enter\n"));
+ msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ wl_cfg80211_event(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
+}
+
+/*
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters :
+ * @wl : wl_private data
+ * @channel : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms)
+{
+#define INIT_TIMER(timer, func, duration, extra_delay) \
+ do { \
+ init_timer(timer); \
+ timer->function = func; \
+ timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+ timer->data = (unsigned long) wl; \
+ add_timer(timer); \
+ } while (0);
+
+ s32 ret = BCME_OK;
+ struct timer_list *_timer;
+ CFGP2P_DBG((" Enter Channel : %d, Duration : %d\n", channel, duration_ms));
+ if (unlikely(wl_get_p2p_status(wl, DISCOVERY_ON) == 0)) {
+
+ CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+
+ ret = BCME_NOTREADY;
+ goto exit;
+ }
+ if (timer_pending(&wl->p2p->listen_timer)) {
+ CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
+ goto exit;
+
+ } else
+ wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ _timer = &wl->p2p->listen_timer;
+
+ /* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
+ * otherwise we will wait up to duration_ms + 200ms
+ */
+ INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, 200);
+
+#undef INIT_TIMER
+exit:
+ return ret;
+}
+
+
+s32
+wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG((" Enter\n"));
+ if (!wl_get_p2p_status(wl, DISCOVERY_ON)) {
+
+ CFGP2P_DBG((" do nothing, discovery is off\n"));
+ return ret;
+ }
+ if (wl_get_p2p_status(wl, SEARCH_ENABLED) == enable) {
+ CFGP2P_DBG(("already : %d\n", enable));
+ return ret;
+ }
+
+ wl_chg_p2p_status(wl, SEARCH_ENABLED);
+ /* When disabling Search, reset the WL driver's p2p discovery state to
+ * WL_P2P_DISC_ST_SCAN.
+ */
+ if (!enable) {
+ wl_clr_p2p_status(wl, SCANNING);
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ }
+
+ return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 ret = BCME_OK;
+ u32 event_type = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+ CFGP2P_DBG((" Enter\n"));
+ if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+ CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+ if (status == WLC_E_STATUS_SUCCESS) {
+ wl_set_p2p_status(wl, ACTION_TX_COMPLETED);
+ }
+ else {
+ wl_set_p2p_status(wl, ACTION_TX_NOACK);
+ CFGP2P_ERR(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+ }
+ wake_up_interruptible(&wl->dongle_event_wait);
+ } else {
+ CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+ "status : %d\n", status));
+ }
+ return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+ wl_af_params_t *af_params, s32 bssidx)
+{
+ s32 ret = BCME_OK;
+ s32 timeout = 0;
+
+
+ CFGP2P_INFO(("\n"));
+ CFGP2P_INFO(("channel : %u , dwell time : %u\n",
+ af_params->channel, af_params->dwell_time));
+
+ wl_clr_p2p_status(wl, ACTION_TX_COMPLETED);
+ wl_clr_p2p_status(wl, ACTION_TX_NOACK);
+#define MAX_WAIT_TIME 2000
+ if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+
+ ret = wldev_iovar_setbuf_bsscfg(dev, "actframe",
+ af_params, sizeof(*af_params), ioctlbuf, sizeof(ioctlbuf), bssidx);
+
+ if (ret < 0) {
+
+ CFGP2P_ERR((" sending action frame is failed\n"));
+ goto exit;
+ }
+ timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+ (wl_get_p2p_status(wl, ACTION_TX_COMPLETED) || wl_get_p2p_status(wl, ACTION_TX_NOACK)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+
+ if (timeout > 0 && wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) {
+ CFGP2P_INFO(("tx action frame operation is completed\n"));
+ ret = BCME_OK;
+ } else {
+ ret = BCME_ERROR;
+ CFGP2P_INFO(("tx action frame operation is failed\n"));
+ }
+exit:
+ CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
+#undef MAX_WAIT_TIME
+ return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
+ struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
+{
+ memset(out_dev_addr, 0, sizeof(*out_dev_addr));
+ memset(out_int_addr, 0, sizeof(*out_int_addr));
+
+ /* Generate the P2P Device Address. This consists of the device's
+ * primary MAC address with the locally administered bit set.
+ */
+ memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
+ out_dev_addr->octet[0] |= 0x02;
+
+ /* Generate the P2P Interface Address. If the discovery and connection
+ * BSSCFGs need to simultaneously co-exist, then this address must be
+ * different from the P2P Device Address.
+ */
+ memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
+ out_int_addr->octet[4] ^= 0x80;
+
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+ wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+ u16 len = ie->len;
+ u8 *subel;
+ u8 subelt_id;
+ u16 subelt_len;
+ CFGP2P_DBG((" Enter\n"));
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ subel = ie->subelts;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ while (len >= 3) {
+ /* attribute id */
+ subelt_id = *subel;
+ subel += 1;
+ len -= 1;
+
+ /* 2-byte little endian */
+ subelt_len = *subel++;
+ subelt_len |= *subel++ << 8;
+
+ len -= 2;
+ len -= subelt_len; /* for the remaining subelt fields */
+
+ if (subelt_id == element_id) {
+ if (subelt_id == P2P_SEID_INTINTADDR) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_DEV_ID) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_DEV_INFO) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_GROUP_ID) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+ } return;
+ } else {
+ CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+ }
+ subel += subelt_len;
+ }
+}
+/*
+ * Check if a BSS is up.
+ * This is a common implementation called by most OSL implementations of
+ * p2posl_bss_isup(). DO NOT call this function directly from the
+ * common code -- call p2posl_bss_isup() instead to allow the OSL to
+ * override the common implementation if necessary.
+ */
+bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+ s32 result, val;
+ bool isup = false;
+ s8 getbuf[64];
+
+ /* Check if the BSS is up */
+ *(int*)getbuf = -1;
+ result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+ sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0);
+ if (result != 0) {
+ CFGP2P_ERR(("'wl bss -C %d' failed: %d\n", bsscfg_idx, result));
+ CFGP2P_ERR(("NOTE: this ioctl error is normal "
+ "when the BSS has not been created yet.\n"));
+ } else {
+ val = *(int*)getbuf;
+ val = dtoh32(val);
+ CFGP2P_INFO(("---wl bss -C %d ==> %d\n", bsscfg_idx, val));
+ isup = (val ? TRUE : FALSE);
+ }
+ return isup;
+}
+
+
+/* Bring up or down a BSS */
+s32
+wl_cfgp2p_bss(struct net_device *ndev, s32 bsscfg_idx, s32 up)
+{
+ s32 ret = BCME_OK;
+ s32 val = up ? 1 : 0;
+
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
+ CFGP2P_INFO(("---wl bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ ioctlbuf, sizeof(ioctlbuf));
+
+ if (ret != 0) {
+ CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
+ }
+
+ return ret;
+}
+
+/* Check if 'p2p' is supported in the driver */
+s32
+wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ s32 p2p_supported = 0;
+ ret = wldev_iovar_getint(ndev, "p2p",
+ &p2p_supported);
+ if (ret < 0) {
+ CFGP2P_ERR(("wl p2p error %d\n", ret));
+ return 0;
+ }
+ if (p2p_supported == 1) {
+ CFGP2P_INFO(("p2p is supported\n"));
+ } else {
+ CFGP2P_INFO(("p2p is unsupported\n"));
+ p2p_supported = 0;
+ }
+ return p2p_supported;
+}
+
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct wl_priv *wl)
+{
+ if (timer_pending(&wl->p2p->listen_timer))
+ del_timer_sync(&wl->p2p->listen_timer);
+ wl_cfgp2p_deinit_priv(wl);
+ return 0;
+}
+
+s32 wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
+{
+ s32 ret = -1;
+ int count, start, duration;
+ wl_p2p_sched_t dongle_noa;
+
+ CFGP2P_DBG((" Enter\n"));
+
+ memset(&dongle_noa, 0, sizeof(dongle_noa));
+
+ if (wl->p2p && wl->p2p->vif_created) {
+
+ wl->p2p->noa.desc[0].start = 0;
+
+ sscanf(buf, "%d %d %d", &count, &start, &duration);
+ CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
+ count, start, duration));
+ if (count != -1)
+ wl->p2p->noa.desc[0].count = count;
+
+ /* supplicant gives interval as start */
+ if (start != -1)
+ wl->p2p->noa.desc[0].interval = start;
+
+ if (duration != -1)
+ wl->p2p->noa.desc[0].duration = duration;
+
+ if (wl->p2p->noa.desc[0].count != 255) {
+ wl->p2p->noa.desc[0].start = 200;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
+ dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+ }
+ else {
+ /* Continuous NoA interval. */
+ dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+ if ((wl->p2p->noa.desc[0].interval == 102) ||
+ (wl->p2p->noa.desc[0].interval == 100)) {
+ wl->p2p->noa.desc[0].start = 100 -
+ wl->p2p->noa.desc[0].duration;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
+ }
+ else {
+ dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+ }
+ }
+ /* Put the noa descriptor in dongle format for dongle */
+ dongle_noa.desc[0].count = htod32(wl->p2p->noa.desc[0].count);
+ if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
+ dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start);
+ dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration);
+ }
+ else {
+ dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start*1000);
+ dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration*1000);
+ }
+ dongle_noa.desc[0].interval = htod32(wl->p2p->noa.desc[0].interval*1000);
+
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ "p2p_noa", &dongle_noa, sizeof(dongle_noa), ioctlbuf, sizeof(ioctlbuf));
+
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
+ }
+ return ret;
+}
+
+s32 wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int buf_len)
+{
+ wifi_p2p_noa_desc_t *noa_desc;
+ int len = 0, i;
+ char _buf[200];
+
+ CFGP2P_DBG((" Enter\n"));
+ buf[0] = '\0';
+ if (wl->p2p && wl->p2p->vif_created) {
+ if (wl->p2p->noa.desc[0].count || wl->p2p->ops.ops) {
+ _buf[0] = 1; /* noa index */
+ _buf[1] = (wl->p2p->ops.ops ? 0x80: 0) |
+ (wl->p2p->ops.ctw & 0x7f); /* ops + ctw */
+ len += 2;
+ if (wl->p2p->noa.desc[0].count) {
+ noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
+ noa_desc->cnt_type = wl->p2p->noa.desc[0].count;
+ noa_desc->duration = wl->p2p->noa.desc[0].duration;
+ noa_desc->interval = wl->p2p->noa.desc[0].interval;
+ noa_desc->start = wl->p2p->noa.desc[0].start;
+ len += sizeof(wifi_p2p_noa_desc_t);
+ }
+ if (buf_len <= len * 2) {
+ CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
+ "returning noa in string format\n", buf_len));
+ return -1;
+ }
+ /* We have to convert the buffer data into ASCII strings */
+ for (i = 0; i < len; i++) {
+ sprintf(buf, "%02x", _buf[i]);
+ buf += 2;
+ }
+ buf[i*2] = '\0';
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
+ return -1;
+ }
+ return len * 2;
+}
+
+s32 wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
+{
+ int ps, ctw;
+ int ret = -1;
+ s32 legacy_ps;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl->p2p && wl->p2p->vif_created) {
+ sscanf(buf, "%d %d %d", &legacy_ps, &ps, &ctw);
+ CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+ if (ctw != -1) {
+ wl->p2p->ops.ctw = ctw;
+ ret = 0;
+ }
+ if (ps != -1) {
+ wl->p2p->ops.ops = ps;
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ "p2p_ops", &wl->p2p->ops, sizeof(wl->p2p->ops),
+ ioctlbuf, sizeof(ioctlbuf));
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
+ }
+ }
+
+ if (legacy_ps != -1) {
+ s32 pm = legacy_ps ? PM_MAX : PM_OFF;
+ ret = wldev_ioctl(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ WLC_SET_PM, &pm, sizeof(pm), true);
+ if (unlikely(ret)) {
+ CFGP2P_ERR(("error (%d)\n", ret));
+ }
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
+ ret = -1;
+ }
+ return ret;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
new file mode 100644
index 000000000000..5a69168c6a3a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
@@ -0,0 +1,247 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.h,v 1.1.4.1.2.8 2011/02/09 01:37:52 Exp $
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <proto/802.11.h>
+#include <proto/p2p.h>
+
+struct wl_priv;
+extern u32 wl_dbg_level;
+
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library. Do not
+ * confuse this with a bsscfg index. This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+ P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+ P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+ P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+#define IE_MAX_LEN 300
+/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */
+struct p2p_saved_ie {
+ u8 p2p_probe_req_ie[IE_MAX_LEN];
+ u8 p2p_probe_res_ie[IE_MAX_LEN];
+ u8 p2p_assoc_req_ie[IE_MAX_LEN];
+ u8 p2p_assoc_res_ie[IE_MAX_LEN];
+ u8 p2p_beacon_ie[IE_MAX_LEN];
+ u32 p2p_probe_req_ie_len;
+ u32 p2p_probe_res_ie_len;
+ u32 p2p_assoc_req_ie_len;
+ u32 p2p_assoc_res_ie_len;
+ u32 p2p_beacon_ie_len;
+};
+
+struct p2p_bss {
+ u32 bssidx;
+ struct net_device *dev;
+ struct p2p_saved_ie saved_ie;
+ void *private_data;
+};
+
+struct p2p_info {
+ bool on; /* p2p on/off switch */
+ bool scan;
+ bool vif_created;
+ s8 vir_ifname[IFNAMSIZ];
+ unsigned long status;
+ struct ether_addr dev_addr;
+ struct ether_addr int_addr;
+ struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+ struct timer_list listen_timer;
+ wl_p2p_sched_t noa;
+ wl_p2p_ops_t ops;
+ wlc_ssid_t ssid;
+ spinlock_t timer_lock;
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+ WLP2P_STATUS_DISCOVERY_ON = 0,
+ WLP2P_STATUS_SEARCH_ENABLED,
+ WLP2P_STATUS_IF_ADD,
+ WLP2P_STATUS_IF_DEL,
+ WLP2P_STATUS_IF_DELETING,
+ WLP2P_STATUS_IF_CHANGING,
+ WLP2P_STATUS_IF_CHANGED,
+ WLP2P_STATUS_LISTEN_EXPIRED,
+ WLP2P_STATUS_ACTION_TX_COMPLETED,
+ WLP2P_STATUS_ACTION_TX_NOACK,
+ WLP2P_STATUS_SCANNING
+};
+
+
+#define wl_to_p2p_bss_ndev(w, type) ((wl)->p2p->bss_idx[type].dev)
+#define wl_to_p2p_bss_bssidx(w, type) ((wl)->p2p->bss_idx[type].bssidx)
+#define wl_to_p2p_bss_saved_ie(w, type) ((wl)->p2p->bss_idx[type].saved_ie)
+#define wl_to_p2p_bss_private(w, type) ((wl)->p2p->bss_idx[type].private_data)
+#define wl_to_p2p_bss(wl, type) ((wl)->p2p->bss_idx[type])
+#define wl_get_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : test_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_set_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? : set_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_clr_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? : clear_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_chg_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? : change_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define p2p_on(wl) ((wl)->p2p->on)
+#define p2p_scan(wl) ((wl)->p2p->scan)
+#define p2p_is_on(wl) ((wl)->p2p && (wl)->p2p->on)
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+#define CFGP2P_ERR(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_ERR "CFGP2P-ERROR) %s : ", __func__); \
+ printk args; \
+ } \
+ } while (0)
+#define CFGP2P_INFO(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printk(KERN_ERR "CFGP2P-INFO) %s : ", __func__); \
+ printk args; \
+ } \
+ } while (0)
+#define CFGP2P_DBG(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printk(KERN_ERR "CFGP2P-DEBUG) %s :", __func__); \
+ printk args; \
+ } \
+ } while (0)
+
+
+extern s32
+wl_cfgp2p_init_priv(struct wl_priv *wl);
+extern void
+wl_cfgp2p_deinit_priv(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode,
+ u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+
+extern s32
+wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active, u32 num_chans,
+ u16 *channels,
+ s32 search_state, u16 action, u32 bssidx);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
+
+extern s32
+wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+ s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx);
+
+extern s32
+wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev);
+
+
+extern s32
+wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+ wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr,
+ struct ether_addr *out_int_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+extern bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+extern s32
+wl_cfgp2p_bss(struct net_device *ndev, s32 bsscfg_idx, s32 up);
+
+
+extern s32
+wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct wl_priv *wl);
+
+extern s32
+wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN "11"
+#define IS_P2P_SSID(ssid) (memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) == 0)
+#endif /* _wl_cfgp2p_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h
new file mode 100644
index 000000000000..0b99557cbe8d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_dbg.h
@@ -0,0 +1,49 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_dbg.h,v 1.115.6.3 2010-12-15 21:42:23 Exp $
+ */
+
+
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+
+#define WL_PRINT(args) printf args
+
+
+
+#define WL_NONE(args)
+
+#define WL_ERROR(args)
+#define WL_TRACE(args)
+
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+#endif
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c
new file mode 100644
index 000000000000..ba3cc6c876ca
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.c
@@ -0,0 +1,8766 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.c,v 1.132.2.18 2011-02-05 01:44:47 Exp $
+ */
+
+#include <wlioctl.h>
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+
+typedef void wlc_info_t;
+typedef void wl_info_t;
+typedef const struct si_pub si_t;
+#include <wlioctl.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+#define WL_ASSOC(x)
+#define WL_INFORM(x)
+#define WL_WSEC(x)
+#define WL_SCAN(x)
+
+
+#ifdef PNO_SET_DEBUG
+#define WL_PNO(x) printf x
+#else
+#define WL_PNO(x)
+#endif
+
+
+#define JF2MS ((((jiffies / HZ) * 1000) + ((jiffies % HZ) * 1000) / HZ))
+
+#ifdef COEX_DBG
+#define WL_TRACE_COEX(x) printf("TS:%lu ", JF2MS); \
+ printf x
+#else
+#define WL_TRACE_COEX(x)
+#endif
+
+#ifdef SCAN_DBG
+#define WL_TRACE_SCAN(x) printf("TS:%lu ", JF2MS); \
+ printf x
+#else
+#define WL_TRACE_SCAN(x)
+#endif
+
+
+#include <wl_iw.h>
+
+
+
+
+#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+
+#include <linux/rtnetlink.h>
+
+#define WL_IW_USE_ISCAN 1
+#define ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS 1
+
+#ifdef OEM_CHROMIUMOS
+bool g_set_essid_before_scan = TRUE;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ struct mutex g_wl_ss_scan_lock;
+#endif
+
+#if defined(SOFTAP)
+#define WL_SOFTAP(x)
+static struct net_device *priv_dev;
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+struct net_device *ap_net_dev = NULL;
+tsk_ctl_t ap_eth_ctl;
+static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap);
+static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac);
+#endif
+
+
+#define WL_IW_IOCTL_CALL(func_call) \
+ do { \
+ func_call; \
+ } while (0)
+
+#define RETURN_IF_EXTRA_NULL(extra) \
+ if (!extra) { \
+ WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \
+ return -EINVAL; \
+ }
+
+static int g_onoff = G_WLAN_SET_ON;
+wl_iw_extra_params_t g_wl_iw_params;
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+
+static struct mutex wl_cache_lock;
+static struct mutex wl_softap_lock;
+
+#define DHD_OS_MUTEX_INIT(a) mutex_init(a)
+#define DHD_OS_MUTEX_LOCK(a) mutex_lock(a)
+#define DHD_OS_MUTEX_UNLOCK(a) mutex_unlock(a)
+
+#else
+
+#define DHD_OS_MUTEX_INIT(a)
+#define DHD_OS_MUTEX_LOCK(a)
+#define DHD_OS_MUTEX_UNLOCK(a)
+
+#endif
+
+#include <bcmsdbus.h>
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern uint dhd_dev_reset(struct net_device *dev, uint8 flag);
+extern void dhd_dev_init_ioctl(struct net_device *dev);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#ifdef CONFIG_WIRELESS_EXT
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#endif
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST)
+#endif
+
+static void *g_scan = NULL;
+static volatile uint g_scan_specified_ssid;
+static wlc_ssid_t g_specific_ssid;
+
+static wlc_ssid_t g_ssid;
+
+#ifdef CONFIG_WPS2
+static char *g_wps_probe_req_ie;
+static int g_wps_probe_req_ie_len;
+#endif
+
+bool btcoex_is_sco_active(struct net_device *dev);
+static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl;
+#if defined(CONFIG_FIRST_SCAN)
+static volatile uint g_first_broadcast_scan;
+static volatile uint g_first_counter_scans;
+#define MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN 3
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else
+#define RAISE_RX_SOFTIRQ() \
+ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+ } while (0);
+#endif
+
+#if defined(WL_IW_USE_ISCAN)
+#if !defined(CSCAN)
+static void wl_iw_free_ss_cache(void);
+static int wl_iw_run_ss_cache_timer(int kick_off);
+#endif
+#if defined(CONFIG_FIRST_SCAN)
+int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+#endif
+static int dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len);
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+
+
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ char iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+ struct net_device *dev;
+ struct timer_list timer;
+ uint32 timer_ms;
+ uint32 timer_on;
+ int iscan_state;
+ iscan_buf_t * list_hdr;
+ iscan_buf_t * list_cur;
+
+
+ tsk_ctl_t tsk_ctl;
+
+ uint32 scan_flag;
+#if defined CSCAN
+ char ioctlbuf[WLC_IOCTL_MEDLEN];
+#else
+ char ioctlbuf[WLC_IOCTL_SMLEN];
+#endif
+
+ wl_iscan_params_t *iscan_ex_params_p;
+ int iscan_ex_param_size;
+} iscan_info_t;
+
+
+
+#define COEX_DHCP 1
+#ifdef COEX_DHCP
+
+#define BT_DHCP_eSCO_FIX
+#define BT_DHCP_USE_FLAGS
+#define BT_DHCP_OPPORTUNITY_WINDOW_TIME 2500
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+
+
+static int wl_iw_set_btcoex_dhcp(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+);
+
+static void wl_iw_bt_flag_set(struct net_device *dev, bool set);
+static void wl_iw_bt_release(void);
+
+typedef enum bt_coex_status {
+ BT_DHCP_IDLE = 0,
+ BT_DHCP_START,
+ BT_DHCP_OPPORTUNITY_WINDOW,
+ BT_DHCP_FLAG_FORCE_TIMEOUT
+} coex_status_t;
+
+
+typedef struct bt_info {
+ struct net_device *dev;
+ struct timer_list timer;
+ uint32 timer_ms;
+ uint32 timer_on;
+ uint32 ts_dhcp_start;
+ uint32 ts_dhcp_ok;
+ bool dhcp_done;
+ int bt_state;
+
+
+ tsk_ctl_t tsk_ctl;
+
+} bt_info_t;
+
+bt_info_t *g_bt = NULL;
+static void wl_iw_bt_timerfunc(ulong data);
+#endif
+iscan_info_t *g_iscan = NULL;
+void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+#endif
+
+static int
+wl_iw_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+);
+
+#ifndef CSCAN
+static int
+wl_iw_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+);
+
+static uint
+wl_iw_get_scan_prep(
+ wl_scan_results_t *list,
+ struct iw_request_info *info,
+ char *extra,
+ short max_size
+);
+#endif
+
+static void
+swap_key_from_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void
+swap_key_to_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+ struct net_device *dev,
+ int cmd,
+ void *arg,
+ int len
+)
+{
+ struct ifreq ifr;
+ wl_ioctl_t ioc;
+ mm_segment_t fs;
+ int ret = -EINVAL;
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return ret;
+ }
+
+ net_os_wake_lock(dev);
+
+ WL_INFORM(("%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d ,\n",
+ __FUNCTION__, current->pid, cmd, arg, len));
+
+ if (g_onoff == G_WLAN_SET_ON) {
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+
+ strcpy(ifr.ifr_name, dev->name);
+ ifr.ifr_data = (caddr_t) &ioc;
+
+
+ ret = dev_open(dev);
+ if (ret) {
+ WL_ERROR(("%s: Error dev_open: %d\n", __func__, ret));
+ net_os_wake_unlock(dev);
+ return ret;
+ }
+
+ fs = get_fs();
+ set_fs(get_ds());
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)
+ ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+ set_fs(fs);
+ }
+ else {
+ WL_TRACE(("%s: call after driver stop : ignored\n", __FUNCTION__));
+ }
+
+ net_os_wake_unlock(dev);
+
+ return ret;
+}
+
+
+static int
+dev_wlc_intvar_get_reg(
+ struct net_device *dev,
+ char *name,
+ uint reg,
+ int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ uint len;
+ len = bcm_mkiovar(name, (char *)(&reg), sizeof(reg), (char *)(&var), sizeof(var.buf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+ *retval = dtoh32(var.val);
+ return (error);
+}
+
+
+static int
+dev_wlc_intvar_set_reg(
+ struct net_device *dev,
+ char *name,
+ char *addr,
+ char * val)
+{
+ char reg_addr[8];
+
+ memset(reg_addr, 0, sizeof(reg_addr));
+ memcpy((char *)&reg_addr[0], (char *)addr, 4);
+ memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+ return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+
+
+
+static int
+dev_wlc_intvar_set(
+ struct net_device *dev,
+ char *name,
+ int val)
+{
+ char buf[WLC_IOCTL_SMLEN];
+ uint len;
+
+ val = htod32(val);
+ len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+ ASSERT(len);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+#if defined(WL_IW_USE_ISCAN)
+static int
+dev_iw_iovar_setbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+
+ if (iolen == 0)
+ return 0;
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+
+ return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+#endif
+
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+ struct net_device *dev,
+ char *name,
+ char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+#else
+ static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+#endif
+ uint buflen;
+
+ buflen = bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf));
+ ASSERT(buflen);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen));
+}
+#endif
+
+
+static int
+dev_wlc_bufvar_get(
+ struct net_device *dev,
+ char *name,
+ char *buf, int buflen)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+#else
+ static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+#endif
+ int error;
+ uint len;
+
+ len = bcm_mkiovar(name, NULL, 0, ioctlbuf, sizeof(ioctlbuf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ if (!error)
+ bcopy(ioctlbuf, buf, buflen);
+
+ return (error);
+}
+
+
+
+static int
+dev_wlc_intvar_get(
+ struct net_device *dev,
+ char *name,
+ int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ uint len;
+ uint data_null;
+
+ len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+ *retval = dtoh32(var.val);
+
+ return (error);
+}
+
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_active_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int as = 0;
+ int error = 0;
+ char *p = extra;
+
+#if defined(WL_IW_USE_ISCAN)
+ if (g_iscan->iscan_state == ISCAN_STATE_IDLE)
+#endif
+ error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as));
+#if defined(WL_IW_USE_ISCAN)
+ else
+ g_iscan->scan_flag = as;
+#endif
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+static int
+wl_iw_set_passive_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int ps = 1;
+ int error = 0;
+ char *p = extra;
+
+#if defined(WL_IW_USE_ISCAN)
+ if (g_iscan->iscan_state == ISCAN_STATE_IDLE) {
+#endif
+
+
+ if (g_scan_specified_ssid == 0) {
+ error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &ps, sizeof(ps));
+ }
+#if defined(WL_IW_USE_ISCAN)
+ }
+ else
+ g_iscan->scan_flag = ps;
+#endif
+
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+
+static int
+wl_iw_set_txpower(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ char *p = extra;
+ int txpower = -1;
+
+ txpower = bcm_atoi(extra + strlen(TXPOWER_SET_CMD) + 1);
+ if ((txpower >= 0) && (txpower <= 127))
+ {
+ txpower |= WL_TXPWR_OVERRIDE;
+ txpower = htod32(txpower);
+
+ error = dev_wlc_intvar_set(dev, "qtxpower", txpower);
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_TRACE(("%s: set TXpower 0x%X is OK\n", __FUNCTION__, txpower));
+ } else {
+ WL_ERROR(("%s: set tx power failed\n", __FUNCTION__));
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+ }
+
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+static int
+wl_iw_get_macaddr(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error;
+ char buf[128];
+ struct ether_addr *id;
+ char *p = extra;
+
+
+ strcpy(buf, "cur_etheraddr");
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, buf, sizeof(buf));
+ id = (struct ether_addr *) buf;
+ p += snprintf(p, MAX_WX_STRING, "Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ id->octet[0], id->octet[1], id->octet[2],
+ id->octet[3], id->octet[4], id->octet[5]);
+ wrqu->data.length = p - extra + 1;
+
+ return error;
+}
+
+
+
+static int
+wl_iw_set_country(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ char country_code[WLC_CNTRY_BUF_SZ];
+ int error = 0;
+ char *p = extra;
+ int country_offset;
+ int country_code_size;
+ wl_country_t cspec = {{0}, 0, {0}};
+ char smbuf[WLC_IOCTL_SMLEN];
+ scb_val_t scbval;
+
+ cspec.rev = -1;
+ memset(country_code, 0, sizeof(country_code));
+ memset(smbuf, 0, sizeof(smbuf));
+
+
+ country_offset = strcspn(extra, " ");
+ country_code_size = strlen(extra) - country_offset;
+
+
+ if (country_offset != 0) {
+ strncpy(country_code, extra + country_offset +1,
+ MIN(country_code_size, sizeof(country_code)));
+
+
+ bzero(&scbval, sizeof(scb_val_t));
+ if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+ WL_ERROR(("%s: set country failed due to Disassoc error\n", __FUNCTION__));
+ goto exit_failed;
+ }
+
+ memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+
+ get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
+
+
+ if ((error = dev_iw_iovar_setbuf(dev, "country", &cspec,
+ sizeof(cspec), smbuf, sizeof(smbuf))) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_ERROR(("%s: set country for %s as %s rev %d is OK\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ dhd_bus_country_set(dev, &cspec);
+ goto exit;
+ }
+ }
+
+ WL_ERROR(("%s: set country for %s as %s rev %d failed\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+
+exit_failed:
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+static int
+wl_iw_set_power_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ char *p = extra;
+ static int pm = PM_FAST;
+ int pm_local = PM_OFF;
+ char powermode_val = 0;
+
+ WL_TRACE_COEX(("%s: DHCP session cmd:%s\n", __FUNCTION__, extra));
+
+ strncpy((char *)&powermode_val, extra + strlen("POWERMODE") +1, 1);
+
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+ WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__));
+
+ dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm));
+ dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local));
+
+
+ net_os_set_packet_filter(dev, 0);
+
+#ifdef COEX_DHCP
+ g_bt->ts_dhcp_start = JF2MS;
+ g_bt->dhcp_done = FALSE;
+ WL_TRACE_COEX(("%s: DHCP start, pm:%d changed to pm:%d\n",
+ __FUNCTION__, pm, pm_local));
+
+#endif
+ } else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) {
+
+
+ dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+
+
+ net_os_set_packet_filter(dev, 1);
+
+#ifdef COEX_DHCP
+ g_bt->dhcp_done = TRUE;
+ g_bt->ts_dhcp_ok = JF2MS;
+ WL_TRACE_COEX(("%s: DHCP done for:%d ms, restored pm:%d\n",
+ __FUNCTION__, (g_bt->ts_dhcp_ok - g_bt->ts_dhcp_start), pm));
+#endif
+
+ } else {
+ WL_ERROR(("%s Unkwown yet power setting, ignored\n",
+ __FUNCTION__));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ wrqu->data.length = p - extra + 1;
+
+ return error;
+}
+
+
+bool btcoex_is_sco_active(struct net_device *dev)
+{
+ int ioc_res = 0;
+ bool res = FALSE;
+ int sco_id_cnt = 0;
+ int param27;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+
+ ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+ WL_TRACE_COEX(("%s, sample[%d], btc params: 27:%x\n",
+ __FUNCTION__, i, param27));
+
+ if (ioc_res < 0) {
+ WL_ERROR(("%s ioc read btc params error\n", __FUNCTION__));
+ break;
+ }
+
+ if ((param27 & 0x6) == 2) {
+ sco_id_cnt++;
+ }
+
+ if (sco_id_cnt > 2) {
+ WL_TRACE_COEX(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n",
+ __FUNCTION__, sco_id_cnt, i));
+ res = TRUE;
+ break;
+ }
+
+ msleep(5);
+ }
+
+ return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+ static bool saved_status = FALSE;
+
+ char buf_reg50va_dhcp_on[8] = { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+ char buf_reg51va_dhcp_on[8] = { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg64va_dhcp_on[8] = { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg65va_dhcp_on[8] = { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg71va_dhcp_on[8] = { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg50;
+ static uint32 saved_reg51;
+ static uint32 saved_reg64;
+ static uint32 saved_reg65;
+ static uint32 saved_reg71;
+
+ if (trump_sco) {
+
+
+ WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n"));
+
+
+ if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+
+ saved_status = TRUE;
+ WL_TRACE_COEX(("%s saved bt_params[50,51,64,65,71]:"
+ " 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ __FUNCTION__, saved_reg50, saved_reg51,
+ saved_reg64, saved_reg65, saved_reg71));
+
+ } else {
+ WL_ERROR((":%s: save btc_params failed\n",
+ __FUNCTION__));
+ saved_status = FALSE;
+ return -1;
+ }
+
+ WL_TRACE_COEX(("override with [50,51,64,65,71]:"
+ " 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *(u32 *)(buf_reg50va_dhcp_on+4),
+ *(u32 *)(buf_reg51va_dhcp_on+4),
+ *(u32 *)(buf_reg64va_dhcp_on+4),
+ *(u32 *)(buf_reg65va_dhcp_on+4),
+ *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg50va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg51va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg64va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg65va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg71va_dhcp_on[0], 8);
+
+ saved_status = TRUE;
+
+ } else if (saved_status) {
+
+ WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n"));
+
+ regaddr = 50;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg50);
+ regaddr = 51;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg51);
+ regaddr = 64;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg64);
+ regaddr = 65;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg65);
+ regaddr = 71;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg71);
+
+ WL_TRACE_COEX(("restore bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51, saved_reg64,
+ saved_reg65, saved_reg71));
+
+ saved_status = FALSE;
+ } else {
+ WL_ERROR((":%s att to restore not saved BTCOEX params\n",
+ __FUNCTION__));
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+
+static int
+wl_iw_get_power_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ int pm_local;
+ char *p = extra;
+
+ error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm_local, sizeof(pm_local));
+ if (!error) {
+ WL_TRACE(("%s: Powermode = %d\n", __func__, pm_local));
+ if (pm_local == PM_OFF)
+ pm_local = 1;
+ else
+ pm_local = 0;
+ p += snprintf(p, MAX_WX_STRING, "powermode = %d", pm_local);
+ }
+ else {
+ WL_TRACE(("%s: Error = %d\n", __func__, error));
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+ }
+ wrqu->data.length = p - extra + 1;
+ return error;
+}
+
+static int
+wl_iw_set_btcoex_dhcp(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ char *p = extra;
+ char powermode_val = 0;
+ char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+ char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+ char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg66;
+ static uint32 saved_reg41;
+ static uint32 saved_reg68;
+ static bool saved_status = FALSE;
+
+#ifdef COEX_DHCP
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+
+ strncpy((char *)&powermode_val, extra + strlen("BTCOEXMODE") +1, 1);
+
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+ WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__));
+
+
+ if ((saved_status == FALSE) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) {
+ saved_status = TRUE;
+ WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+
+
+
+
+#ifdef COEX_DHCP
+
+ if (btcoex_is_sco_active(dev)) {
+
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg66va_dhcp_on[0],
+ sizeof(buf_reg66va_dhcp_on));
+
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg41va_dhcp_on[0],
+ sizeof(buf_reg41va_dhcp_on));
+
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg68va_dhcp_on[0],
+ sizeof(buf_reg68va_dhcp_on));
+ saved_status = TRUE;
+
+ g_bt->bt_state = BT_DHCP_START;
+ g_bt->timer_on = 1;
+ mod_timer(&g_bt->timer, g_bt->timer.expires);
+ WL_TRACE_COEX(("%s enable BT DHCP Timer\n",
+ __FUNCTION__));
+ }
+#endif
+ }
+ else if (saved_status == TRUE) {
+ WL_ERROR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__));
+ }
+ }
+ else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+
+
+#ifdef COEX_DHCP
+
+ WL_TRACE(("%s disable BT DHCP Timer\n", __FUNCTION__));
+ if (g_bt->timer_on) {
+ g_bt->timer_on = 0;
+ del_timer_sync(&g_bt->timer);
+
+ if (g_bt->bt_state != BT_DHCP_IDLE) {
+
+ WL_TRACE_COEX(("%s bt->bt_state:%d\n",
+ __FUNCTION__, g_bt->bt_state));
+
+ up(&g_bt->tsk_ctl.sema);
+ }
+ }
+
+
+ if (saved_status == TRUE)
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+#endif
+
+
+ if (saved_status == TRUE) {
+ regaddr = 66;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg66);
+ regaddr = 41;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg41);
+ regaddr = 68;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg68);
+
+ WL_TRACE_COEX(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+ }
+ saved_status = FALSE;
+
+ }
+ else {
+ WL_ERROR(("%s Unkwown yet power setting, ignored\n",
+ __FUNCTION__));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+ wrqu->data.length = p - extra + 1;
+
+ return error;
+}
+
+static int
+wl_iw_set_suspend(
+struct net_device *dev,
+struct iw_request_info *info,
+union iwreq_data *wrqu,
+char *extra
+)
+{
+ int suspend_flag;
+ int ret_now;
+ int ret = 0;
+
+ suspend_flag = *(extra + strlen(SETSUSPEND_CMD) + 1) - '0';
+
+ if (suspend_flag != 0)
+ suspend_flag = 1;
+
+ ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+
+ if (ret_now != suspend_flag) {
+ if (!(ret = net_os_set_suspend(dev, ret_now)))
+ WL_ERROR(("%s: Suspend Flag %d -> %d\n",
+ __FUNCTION__, ret_now, suspend_flag));
+ else
+ WL_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+
+static int
+wl_format_ssid(char* ssid_buf, uint8* ssid, int ssid_len)
+{
+ int i, c;
+ char *p = ssid_buf;
+
+ if (ssid_len > 32) ssid_len = 32;
+
+ for (i = 0; i < ssid_len; i++) {
+ c = (int)ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (isprint((uchar)c)) {
+ *p++ = (char)c;
+ } else {
+ p += sprintf(p, "\\x%02X", c);
+ }
+ }
+ *p = '\0';
+
+ return p - ssid_buf;
+}
+
+static int
+wl_iw_get_link_speed(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = 0;
+ char *p = extra;
+ static int link_speed;
+
+
+ net_os_wake_lock(dev);
+ if (g_onoff == G_WLAN_SET_ON) {
+ error = dev_wlc_ioctl(dev, WLC_GET_RATE, &link_speed, sizeof(link_speed));
+ link_speed *= 500000;
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "LinkSpeed %d", link_speed/1000000);
+
+ wrqu->data.length = p - extra + 1;
+
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+static int
+wl_iw_get_dtim_skip(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ char iovbuf[32];
+
+ net_os_wake_lock(dev);
+ if (g_onoff == G_WLAN_SET_ON) {
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ strcpy(iovbuf, "bcn_li_dtim");
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_VAR,
+ &iovbuf, sizeof(iovbuf))) >= 0) {
+
+ p += snprintf(p, MAX_WX_STRING, "Dtim_skip %d", iovbuf[0]);
+ WL_TRACE(("%s: get dtim_skip = %d\n", __FUNCTION__, iovbuf[0]));
+ wrqu->data.length = p - extra + 1;
+ }
+ else
+ WL_ERROR(("%s: get dtim_skip failed code %d\n",
+ __FUNCTION__, error));
+ }
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+static int
+wl_iw_set_dtim_skip(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ int bcn_li_dtim;
+ char iovbuf[32];
+
+ net_os_wake_lock(dev);
+ if (g_onoff == G_WLAN_SET_ON) {
+
+ bcn_li_dtim = htod32((uint)*(extra + strlen(DTIM_SKIP_SET_CMD) + 1) - '0');
+
+ if ((bcn_li_dtim >= 0) || ((bcn_li_dtim <= 5))) {
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+ 4, iovbuf, sizeof(iovbuf));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_VAR,
+ &iovbuf, sizeof(iovbuf))) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+
+
+ net_os_set_dtim_skip(dev, bcn_li_dtim);
+
+ WL_TRACE(("%s: set dtim_skip %d OK\n", __FUNCTION__,
+ bcn_li_dtim));
+ goto exit;
+ }
+ else WL_ERROR(("%s: set dtim_skip %d failed code %d\n",
+ __FUNCTION__, bcn_li_dtim, error));
+ }
+ else WL_ERROR(("%s Incorrect dtim_skip setting %d, ignored\n",
+ __FUNCTION__, bcn_li_dtim));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+static int
+wl_iw_get_band(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ static int band;
+
+ net_os_wake_lock(dev);
+
+ if (g_onoff == G_WLAN_SET_ON) {
+ error = dev_wlc_ioctl(dev, WLC_GET_BAND, &band, sizeof(band));
+
+ p += snprintf(p, MAX_WX_STRING, "Band %d", band);
+
+ wrqu->data.length = p - extra + 1;
+ }
+
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+static int
+wl_iw_set_band(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ uint band;
+
+ net_os_wake_lock(dev);
+
+ if (g_onoff == G_WLAN_SET_ON) {
+
+ band = htod32((uint)*(extra + strlen(BAND_SET_CMD) + 1) - '0');
+
+ if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_BAND,
+ &band, sizeof(band))) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_TRACE(("%s: set band %d OK\n", __FUNCTION__, band));
+ goto exit;
+ } else {
+ WL_ERROR(("%s: set band %d failed code %d\n", __FUNCTION__,
+ band, error));
+ }
+ } else {
+ WL_ERROR(("%s Incorrect band setting %d, ignored\n", __FUNCTION__, band));
+ }
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+#ifdef PNO_SUPPORT
+
+static int
+wl_iw_set_pno_reset(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+
+ net_os_wake_lock(dev);
+ if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) {
+
+ if ((error = dhd_dev_pno_reset(dev)) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_TRACE(("%s: set OK\n", __FUNCTION__));
+ goto exit;
+ }
+ else WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+
+static int
+wl_iw_set_pno_enable(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error = -1;
+ char *p = extra;
+ int pfn_enabled;
+
+ net_os_wake_lock(dev);
+ pfn_enabled = htod32((uint)*(extra + strlen(PNOENABLE_SET_CMD) + 1) - '0');
+
+ if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) {
+
+ if ((error = dhd_dev_pno_enable(dev, pfn_enabled)) >= 0) {
+ p += snprintf(p, MAX_WX_STRING, "OK");
+ WL_TRACE(("%s: set OK\n", __FUNCTION__));
+ goto exit;
+ }
+ else WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error));
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+ wrqu->data.length = p - extra + 1;
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+
+
+static int
+wl_iw_set_pno_set(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int res = -1;
+ wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+ int nssid = 0;
+ cmd_tlv_t *cmd_tlv_temp;
+ char *str_ptr;
+ int tlv_size_left;
+ int pno_time;
+ int pno_repeat;
+ int pno_freq_expo_max;
+#ifdef PNO_SET_DEBUG
+ int i;
+ char pno_in_example[] = {
+ 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+ 'S', '1', '2', '0',
+ 'S',
+ 0x04,
+ 'B', 'R', 'C', 'M',
+ 'S',
+ 0x04,
+ 'G', 'O', 'O', 'G',
+ 'T',
+ '1', 'E',
+ 'R',
+ '2',
+ 'M',
+ '2',
+ 0x00
+ };
+#endif
+
+ net_os_wake_lock(dev);
+ WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+ if (wrqu->data.length < (strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t))) {
+ WL_ERROR(("%s argument=%d less %d\n", __FUNCTION__,
+ wrqu->data.length, (int)(strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t))));
+ goto exit_proc;
+ }
+
+#ifdef PNO_SET_DEBUG
+ if (!(extra = kmalloc(sizeof(pno_in_example) +100, GFP_KERNEL))) {
+ res = -ENOMEM;
+ goto exit_proc;
+ }
+ memcpy(extra, pno_in_example, sizeof(pno_in_example));
+ wrqu->data.length = sizeof(pno_in_example);
+ for (i = 0; i < wrqu->data.length; i++)
+ printf("%02X ", extra[i]);
+ printf("\n");
+#endif
+
+ str_ptr = extra;
+#ifdef PNO_SET_DEBUG
+ str_ptr += strlen("PNOSETUP ");
+ tlv_size_left = wrqu->data.length - strlen("PNOSETUP ");
+#else
+ str_ptr += strlen(PNOSETUP_SET_CMD);
+ tlv_size_left = wrqu->data.length - strlen(PNOSETUP_SET_CMD);
+#endif
+
+ cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+ memset(ssids_local, 0, sizeof(ssids_local));
+ pno_repeat = pno_freq_expo_max = 0;
+
+ if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+ (cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+ (cmd_tlv_temp->subver == PNO_TLV_SUBVERSION))
+ {
+ str_ptr += sizeof(cmd_tlv_t);
+ tlv_size_left -= sizeof(cmd_tlv_t);
+
+
+ if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+ MAX_PFN_LIST_COUNT,
+ &tlv_size_left)) <= 0) {
+ WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+ goto exit_proc;
+ }
+ else {
+ if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+ WL_ERROR(("%s scan duration corrupted field size %d\n",
+ __FUNCTION__, tlv_size_left));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+ WL_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+
+ if (str_ptr[0] != 0) {
+ if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+ WL_ERROR(("%s pno repeat : corrupted field\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+ WL_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+ if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+ WL_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+ WL_PNO(("%s: pno_freq_expo_max=%d\n",
+ __FUNCTION__, pno_freq_expo_max));
+ }
+ }
+ }
+ else {
+ WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+
+ res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max);
+
+exit_proc:
+ net_os_wake_unlock(dev);
+ return res;
+}
+#endif
+
+static int
+wl_iw_get_rssi(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ static int rssi = 0;
+ static wlc_ssid_t ssid = {0};
+ int error = 0;
+ char *p = extra;
+ static char ssidbuf[SSID_FMT_BUF_LEN];
+ scb_val_t scb_val;
+
+ net_os_wake_lock(dev);
+
+ bzero(&scb_val, sizeof(scb_val_t));
+
+ if (g_onoff == G_WLAN_SET_ON) {
+ error = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+ if (error) {
+ WL_ERROR(("%s: Fails %d\n", __FUNCTION__, error));
+ net_os_wake_unlock(dev);
+ return error;
+ }
+ rssi = dtoh32(scb_val.val);
+
+ error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
+ if (!error) {
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+ wl_format_ssid(ssidbuf, ssid.SSID, dtoh32(ssid.SSID_len));
+ }
+ }
+
+ p += snprintf(p, MAX_WX_STRING, "%s rssi %d ", ssidbuf, rssi);
+ wrqu->data.length = p - extra + 1;
+
+ net_os_wake_unlock(dev);
+ return error;
+}
+
+int
+wl_iw_send_priv_event(
+ struct net_device *dev,
+ char *flag
+)
+{
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd;
+
+ cmd = IWEVCUSTOM;
+ memset(&wrqu, 0, sizeof(wrqu));
+ if (strlen(flag) > sizeof(extra))
+ return -1;
+
+ strcpy(extra, flag);
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ net_os_wake_lock_timeout_enable(dev, DHD_EVENT_TIMEOUT);
+ WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+ return 0;
+}
+
+
+int
+wl_control_wl_start(struct net_device *dev)
+{
+ wl_iw_t *iw;
+ int ret = 0;
+
+ WL_TRACE(("Enter %s \n", __FUNCTION__));
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ iw = *(wl_iw_t **)netdev_priv(dev);
+
+ if (!iw) {
+ WL_ERROR(("%s: wl is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ dhd_net_if_lock(dev);
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+
+#if defined(BCMLXSDMMC)
+ sdioh_start(NULL, 0);
+#endif
+
+ ret = dhd_dev_reset(dev, 0);
+
+#if defined(BCMLXSDMMC)
+ sdioh_start(NULL, 1);
+#endif
+ if (!ret)
+ dhd_dev_init_ioctl(dev);
+
+ g_onoff = G_WLAN_SET_ON;
+ }
+ WL_TRACE(("Exited %s\n", __FUNCTION__));
+
+ dhd_net_if_unlock(dev);
+ return ret;
+}
+
+
+static int
+wl_iw_control_wl_off(
+ struct net_device *dev,
+ struct iw_request_info *info
+)
+{
+ wl_iw_t *iw;
+ int ret = 0;
+
+ WL_TRACE(("Enter %s\n", __FUNCTION__));
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ iw = *(wl_iw_t **)netdev_priv(dev);
+
+ if (!iw) {
+ WL_ERROR(("%s: wl is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ dhd_net_if_lock(dev);
+
+#ifdef SOFTAP
+ ap_cfg_running = FALSE;
+#endif
+
+ if (g_onoff == G_WLAN_SET_ON) {
+ g_onoff = G_WLAN_SET_OFF;
+
+#if defined(WL_IW_USE_ISCAN)
+ g_iscan->iscan_state = ISCAN_STATE_IDLE;
+#endif
+
+ ret = dhd_dev_reset(dev, 1);
+
+#if defined(WL_IW_USE_ISCAN)
+#if !defined(CSCAN)
+
+ wl_iw_free_ss_cache();
+ wl_iw_run_ss_cache_timer(0);
+
+ g_ss_cache_ctrl.m_link_down = 1;
+#endif
+ memset(g_scan, 0, G_SCAN_RESULTS);
+ g_scan_specified_ssid = 0;
+#if defined(CONFIG_FIRST_SCAN)
+
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE;
+ g_first_counter_scans = 0;
+#endif
+#endif
+
+#if defined(BCMLXSDMMC)
+ sdioh_stop(NULL);
+#endif
+
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+
+ wl_iw_send_priv_event(dev, "STOP");
+ }
+
+ dhd_net_if_unlock(dev);
+
+ WL_TRACE(("Exited %s\n", __FUNCTION__));
+
+ return ret;
+}
+
+static int
+wl_iw_control_wl_on(
+ struct net_device *dev,
+ struct iw_request_info *info
+)
+{
+ int ret = 0;
+
+ WL_TRACE(("Enter %s \n", __FUNCTION__));
+
+ ret = wl_control_wl_start(dev);
+
+ wl_iw_send_priv_event(dev, "START");
+
+#ifdef SOFTAP
+ if (!ap_fw_loaded) {
+ wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+ }
+#else
+ wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+#endif
+
+ WL_TRACE(("Exited %s\n", __FUNCTION__));
+
+ return ret;
+}
+
+#ifdef SOFTAP
+static struct ap_profile my_ap;
+static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap);
+static int get_assoc_sta_list(struct net_device *dev, char *buf, int len);
+static int set_ap_mac_list(struct net_device *dev, void *buf);
+
+#define PTYPE_STRING 0
+#define PTYPE_INTDEC 1
+#define PTYPE_INTHEX 2
+#define PTYPE_STR_HEX 3
+
+static int get_parameter_from_string(
+ char **str_ptr, const char *token, int param_type, void *dst, int param_max_len);
+
+static int
+hex2num(char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+
+
+static int
+hstr_2_buf(const char *txt, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int a, b;
+
+ a = hex2num(*txt++);
+ if (a < 0)
+ return -1;
+ b = hex2num(*txt++);
+ if (b < 0)
+ return -1;
+ *buf++ = (a << 4) | b;
+ }
+
+ return 0;
+}
+
+
+
+static int
+init_ap_profile_from_string(char *param_str, struct ap_profile *ap_cfg)
+{
+ char *str_ptr = param_str;
+ char sub_cmd[16];
+ int ret = 0;
+
+ memset(sub_cmd, 0, sizeof(sub_cmd));
+ memset(ap_cfg, 0, sizeof(struct ap_profile));
+
+
+ if (get_parameter_from_string(&str_ptr, "ASCII_CMD=",
+ PTYPE_STRING, sub_cmd, SSID_LEN) != 0) {
+ return -1;
+ }
+ if (strncmp(sub_cmd, "AP_CFG", 6)) {
+ WL_ERROR(("ERROR: sub_cmd:%s != 'AP_CFG'!\n", sub_cmd));
+ return -1;
+ }
+
+
+
+ ret = get_parameter_from_string(&str_ptr, "SSID=", PTYPE_STRING, ap_cfg->ssid, SSID_LEN);
+
+ ret |= get_parameter_from_string(&str_ptr, "SEC=", PTYPE_STRING, ap_cfg->sec, SEC_LEN);
+
+ ret |= get_parameter_from_string(&str_ptr, "KEY=", PTYPE_STRING, ap_cfg->key, KEY_LEN);
+
+ ret |= get_parameter_from_string(&str_ptr, "CHANNEL=", PTYPE_INTDEC, &ap_cfg->channel, 5);
+
+
+ get_parameter_from_string(&str_ptr, "PREAMBLE=", PTYPE_INTDEC, &ap_cfg->preamble, 5);
+
+
+ get_parameter_from_string(&str_ptr, "MAX_SCB=", PTYPE_INTDEC, &ap_cfg->max_scb, 5);
+
+
+ get_parameter_from_string(&str_ptr, "HIDDEN=",
+ PTYPE_INTDEC, &ap_cfg->closednet, 5);
+
+
+ get_parameter_from_string(&str_ptr, "COUNTRY=",
+ PTYPE_STRING, &ap_cfg->country_code, 3);
+
+ return ret;
+}
+#endif
+
+
+
+#ifdef SOFTAP
+static int
+iwpriv_set_ap_config(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int res = 0;
+ char *extra = NULL;
+ struct ap_profile *ap_cfg = &my_ap;
+
+ WL_TRACE(("> Got IWPRIV SET_AP IOCTL: info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n",
+ info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (!ap_fw_loaded) {
+ WL_ERROR(("Can't execute %s(), SOFTAP fw is not Loaded\n",
+ __FUNCTION__));
+ return -1;
+ }
+
+ if (wrqu->data.length != 0) {
+
+ char *str_ptr;
+
+ if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ extra[wrqu->data.length] = 0;
+ WL_SOFTAP((" Got str param in iw_point:\n %s\n", extra));
+
+ memset(ap_cfg, 0, sizeof(struct ap_profile));
+
+
+
+ str_ptr = extra;
+
+ if ((res = init_ap_profile_from_string(extra, ap_cfg)) < 0) {
+ WL_ERROR(("%s failed to parse %d\n", __FUNCTION__, res));
+ kfree(extra);
+ return -1;
+ }
+
+ } else {
+
+ WL_ERROR(("IWPRIV argument len = 0 \n"));
+ return -1;
+ }
+
+ if ((res = set_ap_cfg(dev, ap_cfg)) < 0)
+ WL_ERROR(("%s failed to set_ap_cfg %d\n", __FUNCTION__, res));
+
+ kfree(extra);
+
+ return res;
+}
+#endif
+
+
+
+#ifdef SOFTAP
+static int iwpriv_get_assoc_list(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *p_iwrq,
+ char *extra)
+{
+ int i, ret = 0;
+ char mac_buf[256];
+ struct maclist *sta_maclist = (struct maclist *)mac_buf;
+
+ char mac_lst[384];
+ char *p_mac_str;
+ char *p_mac_str_end;
+ wl_iw_t *iw;
+
+ if ((!dev) || (!extra)) {
+
+ return -EINVAL;
+ }
+
+
+ iw = *(wl_iw_t **)netdev_priv(dev);
+
+ net_os_wake_lock(dev);
+ DHD_OS_MUTEX_LOCK(&wl_softap_lock);
+
+ WL_TRACE(("\n %s: IWPRIV IOCTL: cmd:%hx, flags:%hx, extra:%p, iwp.len:%d,"
+ "iwp.len:%p, iwp.flags:%x \n", __FUNCTION__, info->cmd, info->flags,
+ extra, p_iwrq->data.length, p_iwrq->data.pointer, p_iwrq->data.flags));
+
+
+ memset(sta_maclist, 0, sizeof(mac_buf));
+
+ sta_maclist->count = 8;
+
+ WL_SOFTAP(("%s: net device:%s, buf_sz:%d\n",
+ __FUNCTION__, dev->name, sizeof(mac_buf)));
+
+ if ((ret = get_assoc_sta_list(dev, mac_buf, sizeof(mac_buf))) < 0) {
+ WL_ERROR(("%s: sta list ioctl error:%d\n",
+ __FUNCTION__, ret));
+ goto func_exit;
+ }
+
+ WL_SOFTAP(("%s: got %d stations\n", __FUNCTION__,
+ sta_maclist->count));
+
+
+
+ memset(mac_lst, 0, sizeof(mac_lst));
+ p_mac_str = mac_lst;
+ p_mac_str_end = &mac_lst[sizeof(mac_lst)-1];
+
+ for (i = 0; i < 8; i++) {
+ struct ether_addr * id = &sta_maclist->ea[i];
+ if (!ETHER_ISNULLADDR(id->octet)) {
+ scb_val_t scb_val;
+ int rssi = 0;
+ bzero(&scb_val, sizeof(scb_val_t));
+
+
+ if ((p_mac_str_end - p_mac_str) <= 36) {
+ WL_ERROR(("%s: mac list buf is < 36 for item[%i] item\n",
+ __FUNCTION__, i));
+ break;
+ }
+
+ p_mac_str += snprintf(p_mac_str, MAX_WX_STRING,
+ "\nMac[%d]=%02X:%02X:%02X:%02X:%02X:%02X,", i,
+ id->octet[0], id->octet[1], id->octet[2],
+ id->octet[3], id->octet[4], id->octet[5]);
+
+
+ bcopy(id->octet, &scb_val.ea, 6);
+ ret = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+ if (ret < 0) {
+ snprintf(p_mac_str, MAX_WX_STRING, "RSSI:ERR");
+ WL_ERROR(("%s: RSSI ioctl error:%d\n",
+ __FUNCTION__, ret));
+ break;
+ }
+
+ rssi = dtoh32(scb_val.val);
+ p_mac_str += snprintf(p_mac_str, MAX_WX_STRING,
+ "RSSI:%d", rssi);
+ }
+ }
+
+ p_iwrq->data.length = strlen(mac_lst)+1;
+
+ WL_SOFTAP(("%s: data to user:\n%s\n usr_ptr:%p\n", __FUNCTION__,
+ mac_lst, p_iwrq->data.pointer));
+
+ if (p_iwrq->data.length) {
+ bcopy(mac_lst, extra, p_iwrq->data.length);
+ }
+
+func_exit:
+
+ DHD_OS_MUTEX_UNLOCK(&wl_softap_lock);
+ net_os_wake_unlock(dev);
+
+ WL_SOFTAP(("%s: Exited\n", __FUNCTION__));
+ return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+
+#define MAC_FILT_MAX 8
+static int iwpriv_set_mac_filters(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int i, ret = -1;
+ char * extra = NULL;
+ int mac_cnt = 0;
+ int mac_mode = 0;
+ struct ether_addr *p_ea;
+ struct mac_list_set mflist_set;
+
+ WL_SOFTAP((">>> Got IWPRIV SET_MAC_FILTER IOCTL: info->cmd:%x,"
+ "info->flags:%x, u.data:%p, u.len:%d\n",
+ info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (wrqu->data.length != 0) {
+
+ char *str_ptr;
+
+ if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ extra[wrqu->data.length] = 0;
+ WL_SOFTAP((" Got parameter string in iw_point:\n %s \n", extra));
+
+ memset(&mflist_set, 0, sizeof(mflist_set));
+
+
+ str_ptr = extra;
+
+
+
+ if (get_parameter_from_string(&str_ptr, "MAC_MODE=",
+ PTYPE_INTDEC, &mac_mode, 4) != 0) {
+ WL_ERROR(("ERROR: 'MAC_MODE=' token is missing\n"));
+ goto exit_proc;
+ }
+
+ p_ea = &mflist_set.mac_list.ea[0];
+
+ if (get_parameter_from_string(&str_ptr, "MAC_CNT=",
+ PTYPE_INTDEC, &mac_cnt, 4) != 0) {
+ WL_ERROR(("ERROR: 'MAC_CNT=' token param is missing \n"));
+ goto exit_proc;
+ }
+
+ if (mac_cnt > MAC_FILT_MAX) {
+ WL_ERROR(("ERROR: number of MAC filters > MAX\n"));
+ goto exit_proc;
+ }
+
+ for (i=0; i< mac_cnt; i++)
+ if (get_parameter_from_string(&str_ptr, "MAC=",
+ PTYPE_STR_HEX, &p_ea[i], 12) != 0) {
+ WL_ERROR(("ERROR: MAC_filter[%d] is missing !\n", i));
+ goto exit_proc;
+ }
+
+ WL_SOFTAP(("MAC_MODE=:%d, MAC_CNT=%d, MACs:..\n", mac_mode, mac_cnt));
+ for (i = 0; i < mac_cnt; i++) {
+ WL_SOFTAP(("mac_filt[%d]:", i));
+ dhd_print_buf(&p_ea[i], 6, 0);
+ }
+
+
+ mflist_set.mode = mac_mode;
+ mflist_set.mac_list.count = mac_cnt;
+ set_ap_mac_list(dev, &mflist_set);
+
+
+ wrqu->data.pointer = NULL;
+ wrqu->data.length = 0;
+ ret = 0;
+
+ } else {
+
+ WL_ERROR(("IWPRIV argument len is 0\n"));
+ return -1;
+ }
+
+ exit_proc:
+ kfree(extra);
+ return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+
+static int iwpriv_set_ap_sta_disassoc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int res = 0;
+ char sta_mac[6] = {0, 0, 0, 0, 0, 0};
+ char cmd_buf[256];
+ char *str_ptr = cmd_buf;
+
+ WL_SOFTAP((">>%s called\n args: info->cmd:%x,"
+ " info->flags:%x, u.data.p:%p, u.data.len:%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (wrqu->data.length != 0) {
+
+ if (copy_from_user(cmd_buf, wrqu->data.pointer, wrqu->data.length)) {
+ return -EFAULT;
+ }
+
+ if (get_parameter_from_string(&str_ptr,
+ "MAC=", PTYPE_STR_HEX, sta_mac, 12) == 0) {
+ res = wl_iw_softap_deassoc_stations(dev, sta_mac);
+ } else {
+ WL_ERROR(("ERROR: STA_MAC= token not found\n"));
+ }
+ }
+
+ return res;
+}
+#endif
+
+#endif
+
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+ __u16 cmd;
+ __u16 flags;
+};
+
+typedef int (*iw_handler)(struct net_device *dev,
+ struct iw_request_info *info,
+ void *wrqu,
+ char *extra);
+#endif
+
+static int
+wl_iw_config_commit(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ void *zwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+ struct sockaddr bssid;
+
+ WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+ return error;
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+ if (!ssid.SSID_len)
+ return 0;
+
+ bzero(&bssid, sizeof(struct sockaddr));
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+ WL_ERROR(("%s: WLC_REASSOC to %s failed \n", __FUNCTION__, ssid.SSID));
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_name(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ char *cwrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+ strcpy(cwrq, "IEEE 802.11-DS");
+
+ return 0;
+}
+
+static int
+wl_iw_set_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ int error, chan;
+ uint sf = 0;
+
+ WL_TRACE(("%s %s: SIOCSIWFREQ\n", __FUNCTION__, dev->name));
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_TRACE(("%s:>> not executed, 'SOFT_AP is active' \n", __FUNCTION__));
+ return 0;
+ }
+#endif
+
+
+ if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+ chan = fwrq->m;
+ }
+
+ else {
+
+ if (fwrq->e >= 6) {
+ fwrq->e -= 6;
+ while (fwrq->e--)
+ fwrq->m *= 10;
+ } else if (fwrq->e < 6) {
+ while (fwrq->e++ < 6)
+ fwrq->m /= 10;
+ }
+
+ if (fwrq->m > 4000 && fwrq->m < 5000)
+ sf = WF_CHAN_FACTOR_4_G;
+
+ chan = wf_mhz2channel(fwrq->m, sf);
+ }
+
+ chan = htod32(chan);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+ return error;
+
+ g_wl_iw_params.target_channel = chan;
+
+
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+
+
+ fwrq->m = dtoh32(ci.hw_channel);
+ fwrq->e = dtoh32(0);
+ return 0;
+}
+
+static int
+wl_iw_set_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int infra = 0, ap = 0, error = 0;
+
+ WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+ switch (*uwrq) {
+ case IW_MODE_MASTER:
+ infra = ap = 1;
+ break;
+ case IW_MODE_ADHOC:
+ case IW_MODE_AUTO:
+ break;
+ case IW_MODE_INFRA:
+ infra = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ infra = htod32(infra);
+ ap = htod32(ap);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+ return error;
+
+
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int error, infra = 0, ap = 0;
+
+ WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+ return error;
+
+ infra = dtoh32(infra);
+ ap = dtoh32(ap);
+ *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+ return 0;
+}
+
+static int
+wl_iw_get_range(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ struct iw_range *range = (struct iw_range *) extra;
+ wl_uint32_list_t *list;
+ wl_rateset_t rateset;
+ int8 *channels;
+ int error, i, k;
+ uint sf, ch;
+
+ int phytype;
+ int bw_cap = 0, sgi_tx = 0, nmode = 0;
+ channel_info_t ci;
+ uint8 nrate_list2copy = 0;
+ uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+ {14, 29, 43, 58, 87, 116, 130, 144},
+ {27, 54, 81, 108, 162, 216, 243, 270},
+ {30, 60, 90, 120, 180, 240, 270, 300}};
+
+ WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ channels = kmalloc((MAXCHANNEL+1)*4, GFP_KERNEL);
+ if (!channels) {
+ WL_ERROR(("Could not alloc channels\n"));
+ return -ENOMEM;
+ }
+ list = (wl_uint32_list_t *)channels;
+
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(*range));
+
+
+ range->min_nwid = range->max_nwid = 0;
+
+
+ list->count = htod32(MAXCHANNEL);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, (MAXCHANNEL+1)*4))) {
+ kfree(channels);
+ return error;
+ }
+ for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+ range->freq[i].i = dtoh32(list->element[i]);
+
+ ch = dtoh32(list->element[i]);
+ if (ch <= CH_MAX_2G_CHANNEL)
+ sf = WF_CHAN_FACTOR_2_4_G;
+ else
+ sf = WF_CHAN_FACTOR_5_G;
+
+ range->freq[i].m = wf_channel2mhz(ch, sf);
+ range->freq[i].e = 6;
+ }
+ range->num_frequency = range->num_channels = i;
+
+
+ range->max_qual.qual = 5;
+
+ range->max_qual.level = 0x100 - 200;
+
+ range->max_qual.noise = 0x100 - 200;
+
+ range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+
+ range->avg_qual.qual = 3;
+
+ range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+
+ range->avg_qual.noise = 0x100 - 75;
+#endif
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) {
+ kfree(channels);
+ return error;
+ }
+ rateset.count = dtoh32(rateset.count);
+ range->num_bitrates = rateset.count;
+ for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+ range->bitrate[i] = (rateset.rates[i]& 0x7f) * 500000;
+ dev_wlc_intvar_get(dev, "nmode", &nmode);
+ dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype));
+
+ if (nmode == 1 && phytype == WLC_PHY_TYPE_SSN) {
+ dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap);
+ dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx);
+ dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t));
+ ci.hw_channel = dtoh32(ci.hw_channel);
+
+ if (bw_cap == 0 ||
+ (bw_cap == 2 && ci.hw_channel <= 14)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 0;
+ else
+ nrate_list2copy = 1;
+ }
+ if (bw_cap == 1 ||
+ (bw_cap == 2 && ci.hw_channel >= 36)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 2;
+ else
+ nrate_list2copy = 3;
+ }
+ range->num_bitrates += 8;
+ for (k = 0; i < range->num_bitrates; k++, i++) {
+
+ range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+ }
+ }
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) {
+ kfree(channels);
+ return error;
+ }
+ i = dtoh32(i);
+ if (i == WLC_PHY_TYPE_A)
+ range->throughput = 24000000;
+ else
+ range->throughput = 1500000;
+
+
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+ range->num_encoding_sizes = 4;
+ range->encoding_size[0] = WEP1_KEY_SIZE;
+ range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+ range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+ range->encoding_size[2] = 0;
+#endif
+ range->encoding_size[3] = AES_KEY_SIZE;
+
+
+ range->min_pmp = 0;
+ range->max_pmp = 0;
+ range->min_pmt = 0;
+ range->max_pmt = 0;
+ range->pmp_flags = 0;
+ range->pm_capa = 0;
+
+
+ range->num_txpower = 2;
+ range->txpower[0] = 1;
+ range->txpower[1] = 255;
+ range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 19;
+
+
+ range->retry_capa = IW_RETRY_LIMIT;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = 0;
+
+ range->min_retry = 1;
+ range->max_retry = 255;
+
+ range->min_r_time = 0;
+ range->max_r_time = 0;
+#endif
+
+#if WIRELESS_EXT > 17
+ range->enc_capa = IW_ENC_CAPA_WPA;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+ range->enc_capa |= IW_ENC_CAPA_WPA2;
+
+
+ IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+#endif
+
+ kfree(channels);
+
+ return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ return 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ return 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ return 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ return 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ return 4;
+ else
+ return 5;
+}
+
+static int
+wl_iw_set_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = NETDEV_PRIV(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ int i;
+
+ WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+ for (i = 0; i < iw->spy_num; i++)
+ memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+ memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+ return 0;
+}
+
+static int
+wl_iw_get_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = NETDEV_PRIV(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ dwrq->length = iw->spy_num;
+ for (i = 0; i < iw->spy_num; i++) {
+ memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+ addr[i].sa_family = AF_UNIX;
+ memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+ iw->spy_qual[i].updated = 0;
+ }
+
+ return 0;
+}
+
+
+static int
+wl_iw_ch_to_chanspec(int ch, wl_join_params_t *join_params, int *join_params_size)
+{
+ chanspec_t chanspec = 0;
+
+ if (ch != 0) {
+
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
+
+ if (join_params->params.chanspec_list[0])
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
+
+
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ htodchanspec(join_params->params.chanspec_list[0]);
+
+ join_params->params.chanspec_num = htod32(join_params->params.chanspec_num);
+
+ WL_TRACE(("%s join_params->params.chanspec_list[0]= %X\n",
+ __FUNCTION__, join_params->params.chanspec_list[0]));
+ }
+ return 1;
+}
+
+static int
+wl_iw_set_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ int error = -EINVAL;
+ wl_join_params_t join_params;
+ int join_params_size;
+
+ WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+ if (awrq->sa_family != ARPHRD_ETHER) {
+ WL_ERROR(("Invalid Header...sa_family\n"));
+ return -EINVAL;
+ }
+
+
+ if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+ scb_val_t scbval;
+
+ bzero(&scbval, sizeof(scb_val_t));
+
+ (void) dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ return 0;
+ }
+
+
+
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid);
+
+ memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
+ memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN);
+
+
+
+ WL_TRACE(("%s target_channel=%d\n", __FUNCTION__, g_wl_iw_params.target_channel));
+ wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) {
+ WL_ERROR(("%s Invalid ioctl data=%d\n", __FUNCTION__, error));
+ return error;
+ }
+
+ if (g_ssid.SSID_len) {
+ WL_TRACE(("%s: join SSID=%s BSSID="MACSTR" ch=%d\n", __FUNCTION__,
+ g_ssid.SSID, MAC2STR((u8 *)awrq->sa_data),
+ g_wl_iw_params.target_channel));
+ }
+
+
+ memset(&g_ssid, 0, sizeof(g_ssid));
+ return 0;
+}
+
+static int
+wl_iw_get_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+ awrq->sa_family = ARPHRD_ETHER;
+ memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+
+ (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ struct iw_mlme *mlme;
+ scb_val_t scbval;
+ int error = -EINVAL;
+
+ WL_TRACE(("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name));
+
+ mlme = (struct iw_mlme *)extra;
+ if (mlme == NULL) {
+ WL_ERROR(("Invalid ioctl data.\n"));
+ return error;
+ }
+
+ scbval.val = mlme->reason_code;
+ bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+ if (mlme->cmd == IW_MLME_DISASSOC) {
+ scbval.val = htod32(scbval.val);
+ error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ }
+ else if (mlme->cmd == IW_MLME_DEAUTH) {
+ scbval.val = htod32(scbval.val);
+ error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+ sizeof(scb_val_t));
+ }
+ else {
+ WL_ERROR(("Invalid ioctl data.\n"));
+ return error;
+ }
+
+ return error;
+}
+#endif
+
+#ifndef WL_IW_USE_ISCAN
+static int
+wl_iw_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int error, i;
+ uint buflen = dwrq->length;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ list = kmalloc(buflen, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ memset(list, 0, buflen);
+ list->buflen = htod32(buflen);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+ WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+ kfree(list);
+ return error;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, list->version));
+ kfree(list);
+ return -EINVAL;
+ }
+
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ buflen));
+
+
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+ qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif
+
+ dwrq->length++;
+ }
+
+ kfree(list);
+
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+
+ dwrq->flags = 1;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef WL_IW_USE_ISCAN
+static int
+wl_iw_iscan_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ iscan_buf_t * buf;
+ iscan_info_t *iscan = g_iscan;
+
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) {
+ WL_ERROR(("%s error\n", __FUNCTION__));
+ return 0;
+ }
+
+ buf = iscan->list_hdr;
+
+ while (buf) {
+ list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, list->version));
+ return -EINVAL;
+ }
+
+ bi = NULL;
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length))
+ : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ WLC_IW_ISCAN_MAXLEN));
+
+
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+ qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif
+
+ dwrq->length++;
+ }
+ buf = buf->next;
+ }
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+
+ dwrq->flags = 1;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+ int err = 0;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+
+#if defined(CONFIG_FIRST_SCAN)
+
+ if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED)
+ params->passive_time = 30;
+#endif
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+ if (ssid && ssid->SSID_len)
+ memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+ return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+ int err = 0;
+
+ iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+ iscan->iscan_ex_params_p->action = htod16(action);
+ iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+ WL_SCAN(("%s : nprobes=%d\n", __FUNCTION__, iscan->iscan_ex_params_p->params.nprobes));
+ WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time));
+ WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time));
+ WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
+ WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
+ WL_SCAN(("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type));
+
+ if ((dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p,
+ iscan->iscan_ex_param_size, iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) {
+ WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err));
+ err = -1;
+ }
+
+ return err;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+ iscan_info_t *iscan = (iscan_info_t *)data;
+ if (iscan) {
+ iscan->timer_on = 0;
+ if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+ WL_TRACE(("timer trigger\n"));
+ up(&iscan->tsk_ctl.sema);
+ }
+ }
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+ setbit(eventmask, WLC_E_SCAN_COMPLETE);
+ dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+ iscan_buf_t * buf;
+ iscan_buf_t * ptr;
+ wl_iscan_results_t * list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ uint32 status;
+ int res = 0;
+
+ DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+ if (iscan->list_cur) {
+ buf = iscan->list_cur;
+ iscan->list_cur = buf->next;
+ }
+ else {
+ buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+ if (!buf) {
+ WL_ERROR(("%s can't alloc iscan_buf_t : going to abort currect iscan\n",
+ __FUNCTION__));
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+ return WL_SCAN_RESULTS_NO_MEM;
+ }
+ buf->next = NULL;
+ if (!iscan->list_hdr)
+ iscan->list_hdr = buf;
+ else {
+ ptr = iscan->list_hdr;
+ while (ptr->next) {
+ ptr = ptr->next;
+ }
+ ptr->next = buf;
+ }
+ }
+ memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ res = dev_iw_iovar_getbuf(
+ iscan->dev,
+ "iscanresults",
+ &list,
+ WL_ISCAN_RESULTS_FIXED_SIZE,
+ buf->iscan_buf,
+ WLC_IW_ISCAN_MAXLEN);
+ if (res == 0) {
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ results->count = dtoh32(results->count);
+ WL_TRACE(("results->count = %d\n", results->count));
+ WL_TRACE(("results->buflen = %d\n", results->buflen));
+ status = dtoh32(list_buf->status);
+ } else {
+ WL_ERROR(("%s returns error %d\n", __FUNCTION__, res));
+
+ status = WL_SCAN_RESULTS_NO_MEM;
+ }
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+ return status;
+}
+
+static void
+wl_iw_force_specific_scan(iscan_info_t *iscan)
+{
+ WL_TRACE(("%s force Specific SCAN for %s\n", __FUNCTION__, g_specific_ssid.SSID));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+
+ (void) dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+}
+
+static void
+wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+ union iwreq_data wrqu;
+
+ memset(&wrqu, 0, sizeof(wrqu));
+
+
+ wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED)
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_READY;
+#endif
+ WL_TRACE(("Send Event ISCAN complete\n"));
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+ uint32 status;
+
+ tsk_ctl_t *tsk_ctl = (tsk_ctl_t *)data;
+ iscan_info_t *iscan = (iscan_info_t *) tsk_ctl->parent;
+
+
+ static bool iscan_pass_abort = FALSE;
+
+ DAEMONIZE("iscan_sysioc");
+
+ status = WL_SCAN_RESULTS_PARTIAL;
+
+
+ complete(&tsk_ctl->completed);
+
+ while (down_interruptible(&tsk_ctl->sema) == 0) {
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk_ctl->terminated) {
+ break;
+ }
+#if defined(SOFTAP)
+
+ if (ap_cfg_running) {
+ WL_TRACE(("%s skipping SCAN ops in AP mode !!!\n", __FUNCTION__));
+ net_os_wake_unlock(iscan->dev);
+ continue;
+ }
+#endif
+
+ if (iscan->timer_on) {
+
+ iscan->timer_on = 0;
+ del_timer_sync(&iscan->timer);
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+ status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ if (g_scan_specified_ssid && (iscan_pass_abort == TRUE)) {
+ WL_TRACE(("%s Get results from specific scan status=%d\n", __FUNCTION__, status));
+ wl_iw_send_scan_complete(iscan);
+ iscan_pass_abort = FALSE;
+ status = -1;
+ }
+
+ switch (status) {
+ case WL_SCAN_RESULTS_PARTIAL:
+ WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+
+ wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_SUCCESS:
+ WL_TRACE(("iscanresults complete\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ wl_iw_send_scan_complete(iscan);
+ break;
+ case WL_SCAN_RESULTS_PENDING:
+ WL_TRACE(("iscanresults pending\n"));
+
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_ABORTED:
+ WL_TRACE(("iscanresults aborted\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ if (g_scan_specified_ssid == 0)
+ wl_iw_send_scan_complete(iscan);
+ else {
+ iscan_pass_abort = TRUE;
+ wl_iw_force_specific_scan(iscan);
+ }
+ break;
+ case WL_SCAN_RESULTS_NO_MEM:
+ WL_TRACE(("iscanresults can't alloc memory: skip\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ break;
+ default:
+ WL_TRACE(("iscanresults returned unknown status %d\n", status));
+ break;
+ }
+
+ net_os_wake_unlock(iscan->dev);
+ }
+
+ if (iscan->timer_on) {
+ iscan->timer_on = 0;
+ del_timer_sync(&iscan->timer);
+ }
+ complete_and_exit(&tsk_ctl->completed, 0);
+}
+#endif
+
+#if !defined(CSCAN)
+
+static void
+wl_iw_set_ss_cache_timer_flag(void)
+{
+ g_ss_cache_ctrl.m_timer_expired = 1;
+ WL_TRACE(("%s called\n", __FUNCTION__));
+}
+
+
+static int
+wl_iw_init_ss_cache_ctrl(void)
+{
+ WL_TRACE(("%s :\n", __FUNCTION__));
+ g_ss_cache_ctrl.m_prev_scan_mode = 0;
+ g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+ g_ss_cache_ctrl.m_cache_head = NULL;
+ g_ss_cache_ctrl.m_link_down = 0;
+ g_ss_cache_ctrl.m_timer_expired = 0;
+ memset(g_ss_cache_ctrl.m_active_bssid, 0, ETHER_ADDR_LEN);
+
+ g_ss_cache_ctrl.m_timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
+ if (!g_ss_cache_ctrl.m_timer) {
+ return -ENOMEM;
+ }
+ g_ss_cache_ctrl.m_timer->function = (void *)wl_iw_set_ss_cache_timer_flag;
+ init_timer(g_ss_cache_ctrl.m_timer);
+
+ return 0;
+}
+
+
+
+static void
+wl_iw_free_ss_cache(void)
+{
+ wl_iw_ss_cache_t *node, *cur;
+ wl_iw_ss_cache_t **spec_scan_head;
+
+ WL_TRACE(("%s called\n", __FUNCTION__));
+
+ DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+ spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+ node = *spec_scan_head;
+
+ for (;node;) {
+ WL_TRACE(("%s : SSID - %s\n", __FUNCTION__, node->bss_info->SSID));
+ cur = node;
+ node = cur->next;
+ kfree(cur);
+ }
+ *spec_scan_head = NULL;
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+}
+
+
+
+static int
+wl_iw_run_ss_cache_timer(int kick_off)
+{
+ struct timer_list **timer;
+
+ timer = &g_ss_cache_ctrl.m_timer;
+
+ if (*timer) {
+ if (kick_off) {
+#ifdef CONFIG_PRESCANNED
+ (*timer)->expires = jiffies + 70000 * HZ / 1000;
+#else
+ (*timer)->expires = jiffies + 30000 * HZ / 1000;
+#endif
+ add_timer(*timer);
+ WL_TRACE(("%s : timer starts \n", __FUNCTION__));
+ } else {
+ del_timer_sync(*timer);
+ WL_TRACE(("%s : timer stops \n", __FUNCTION__));
+ }
+ }
+
+ return 0;
+}
+
+
+static void
+wl_iw_release_ss_cache_ctrl(void)
+{
+ WL_TRACE(("%s :\n", __FUNCTION__));
+ wl_iw_free_ss_cache();
+ wl_iw_run_ss_cache_timer(0);
+ if (g_ss_cache_ctrl.m_timer) {
+ kfree(g_ss_cache_ctrl.m_timer);
+ }
+}
+
+
+
+static void
+wl_iw_reset_ss_cache(void)
+{
+ wl_iw_ss_cache_t *node, *prev, *cur;
+ wl_iw_ss_cache_t **spec_scan_head;
+
+ DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+ spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+ node = *spec_scan_head;
+ prev = node;
+
+ for (;node;) {
+ WL_TRACE(("%s : node SSID %s \n", __FUNCTION__, node->bss_info->SSID));
+ if (!node->dirty) {
+ cur = node;
+ if (cur == *spec_scan_head) {
+ *spec_scan_head = cur->next;
+ prev = *spec_scan_head;
+ }
+ else {
+ prev->next = cur->next;
+ }
+ node = cur->next;
+
+ WL_TRACE(("%s : Del node : SSID %s\n", __FUNCTION__, cur->bss_info->SSID));
+ kfree(cur);
+ continue;
+ }
+
+ node->dirty = 0;
+ prev = node;
+ node = node->next;
+ }
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+}
+
+
+static int
+wl_iw_add_bss_to_ss_cache(wl_scan_results_t *ss_list)
+{
+
+ wl_iw_ss_cache_t *node, *prev, *leaf;
+ wl_iw_ss_cache_t **spec_scan_head;
+ wl_bss_info_t *bi = NULL;
+ int i;
+
+
+ if (!ss_list->count) {
+ return 0;
+ }
+
+ DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+ spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+
+ for (i = 0; i < ss_list->count; i++) {
+
+ node = *spec_scan_head;
+ prev = node;
+
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
+
+ WL_TRACE(("%s : find %d with specific SSID %s\n", __FUNCTION__, i, bi->SSID));
+ for (;node;) {
+ if (!memcmp(&node->bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
+
+ WL_TRACE(("dirty marked : SSID %s\n", bi->SSID));
+ node->dirty = 1;
+ break;
+ }
+ prev = node;
+ node = node->next;
+ }
+
+ if (node) {
+ continue;
+ }
+
+ leaf = kmalloc(bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN, GFP_KERNEL);
+ if (!leaf) {
+ WL_ERROR(("Memory alloc failure %d\n",
+ bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN));
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(leaf->bss_info, bi, bi->length);
+ leaf->next = NULL;
+ leaf->dirty = 1;
+ leaf->count = 1;
+ leaf->version = ss_list->version;
+
+ if (!prev) {
+ *spec_scan_head = leaf;
+ }
+ else {
+ prev->next = leaf;
+ }
+ }
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+ return 0;
+}
+
+
+static int
+wl_iw_merge_scan_cache(struct iw_request_info *info, char *extra, uint buflen_from_user,
+__u16 *merged_len)
+{
+ wl_iw_ss_cache_t *node;
+ wl_scan_results_t *list_merge;
+
+ DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+ node = g_ss_cache_ctrl.m_cache_head;
+ for (;node;) {
+ list_merge = (wl_scan_results_t *)&node->buflen;
+ WL_TRACE(("%s: Cached Specific APs list=%d\n", __FUNCTION__, list_merge->count));
+ if (buflen_from_user - *merged_len > 0) {
+ *merged_len += (__u16) wl_iw_get_scan_prep(list_merge, info,
+ extra + *merged_len, buflen_from_user - *merged_len);
+ }
+ else {
+ WL_TRACE(("%s: exit with break\n", __FUNCTION__));
+ break;
+ }
+ node = node->next;
+ }
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+ return 0;
+}
+
+
+static int
+wl_iw_delete_bss_from_ss_cache(void *addr)
+{
+
+ wl_iw_ss_cache_t *node, *prev;
+ wl_iw_ss_cache_t **spec_scan_head;
+
+ DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+ spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+ node = *spec_scan_head;
+ prev = node;
+ for (;node;) {
+ if (!memcmp(&node->bss_info->BSSID, addr, ETHER_ADDR_LEN)) {
+ if (node == *spec_scan_head) {
+ *spec_scan_head = node->next;
+ }
+ else {
+ prev->next = node->next;
+ }
+
+ WL_TRACE(("%s : Del node : %s\n", __FUNCTION__, node->bss_info->SSID));
+ kfree(node);
+ break;
+ }
+
+ prev = node;
+ node = node->next;
+ }
+
+ memset(addr, 0, ETHER_ADDR_LEN);
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+ return 0;
+}
+
+#endif
+
+static int
+wl_iw_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int error;
+ WL_TRACE(("\n:%s dev:%s: SIOCSIWSCAN : SCAN\n", __FUNCTION__, dev->name));
+
+#ifdef OEM_CHROMIUMOS
+ g_set_essid_before_scan = FALSE;
+#endif
+
+#if defined(CSCAN)
+ WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__));
+ return -EINVAL;
+#endif
+
+#if defined(SOFTAP)
+
+ if (ap_cfg_running) {
+ WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+ return 0;
+ }
+#endif
+
+
+ if (g_onoff == G_WLAN_SET_OFF)
+ return 0;
+
+
+ memset(&g_specific_ssid, 0, sizeof(g_specific_ssid));
+#ifndef WL_IW_USE_ISCAN
+
+ g_scan_specified_ssid = 0;
+#endif
+
+#if WIRELESS_EXT > 17
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan != BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+
+ WL_TRACE(("%s Ignoring SC %s first BC is not done = %d\n",
+ __FUNCTION__, req->essid,
+ g_first_broadcast_scan));
+ return -EBUSY;
+ }
+#endif
+ if (g_scan_specified_ssid) {
+ WL_TRACE(("%s Specific SCAN is not done ignore scan for = %s \n",
+ __FUNCTION__, req->essid));
+
+ return -EBUSY;
+ }
+ else {
+ g_specific_ssid.SSID_len = MIN(sizeof(g_specific_ssid.SSID),
+ req->essid_len);
+ memcpy(g_specific_ssid.SSID, req->essid, g_specific_ssid.SSID_len);
+ g_specific_ssid.SSID_len = htod32(g_specific_ssid.SSID_len);
+ g_scan_specified_ssid = 1;
+ WL_TRACE(("### Specific scan ssid=%s len=%d\n",
+ g_specific_ssid.SSID, g_specific_ssid.SSID_len));
+ }
+ }
+ }
+#endif
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)))) {
+ WL_TRACE(("#### Set SCAN for %s failed with %d\n", g_specific_ssid.SSID, error));
+
+ g_scan_specified_ssid = 0;
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#ifdef WL_IW_USE_ISCAN
+int
+wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag)
+{
+ wlc_ssid_t ssid;
+ iscan_info_t *iscan = g_iscan;
+
+#if defined(CONFIG_FIRST_SCAN)
+
+ if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_IDLE) {
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_STARTED;
+ WL_TRACE(("%s: First Brodcast scan was forced\n", __FUNCTION__));
+ }
+ else if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) {
+ WL_TRACE(("%s: ignore ISCAN request first BS is not done yet\n", __FUNCTION__));
+ return 0;
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (flag)
+ rtnl_lock();
+#endif
+
+ dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &iscan->scan_flag, sizeof(iscan->scan_flag));
+ wl_iw_set_event_mask(dev);
+
+ WL_TRACE(("+++: Set Broadcast ISCAN\n"));
+
+ memset(&ssid, 0, sizeof(ssid));
+
+ iscan->list_cur = iscan->list_hdr;
+ iscan->iscan_state = ISCAN_STATE_SCANING;
+
+ memset(&iscan->iscan_ex_params_p->params, 0, iscan->iscan_ex_param_size);
+ wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, &ssid);
+ wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (flag)
+ rtnl_unlock();
+#endif
+
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+
+ iscan->timer_on = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ iscan_info_t *iscan = g_iscan;
+ int ret = 0;
+
+ WL_TRACE_SCAN(("%s: SIOCSIWSCAN : ISCAN\n", dev->name));
+
+#if defined(CSCAN)
+ WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__));
+ return -EINVAL;
+#endif
+
+ net_os_wake_lock(dev);
+
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+ goto set_scan_end;
+ }
+#endif
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+ goto set_scan_end;
+ }
+
+#ifdef PNO_SUPPORT
+
+ if (dhd_dev_get_pno_status(dev)) {
+ WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__));
+ }
+#endif
+
+
+ if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) {
+ WL_ERROR(("%s error \n", __FUNCTION__));
+ goto set_scan_end;
+ }
+
+ if (g_scan_specified_ssid) {
+ WL_TRACE(("%s Specific SCAN already running ignoring BC scan\n",
+ __FUNCTION__));
+ ret = EBUSY;
+ goto set_scan_end;
+ }
+
+
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ int as = 0;
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as));
+ wl_iw_set_event_mask(dev);
+ ret = wl_iw_set_scan(dev, info, wrqu, extra);
+ goto set_scan_end;
+ }
+ else {
+ g_scan_specified_ssid = 0;
+
+ if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+ WL_TRACE(("%s ISCAN already in progress \n", __FUNCTION__));
+ goto set_scan_end;
+ }
+ }
+ }
+#endif
+
+#if defined(CONFIG_FIRST_SCAN) && !defined(CSCAN)
+ if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+ if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) {
+
+ WL_ERROR(("%s Clean up First scan flag which is %d\n",
+ __FUNCTION__, g_first_broadcast_scan));
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+ }
+ else {
+ WL_ERROR(("%s Ignoring Broadcast Scan:First Scan is not done yet %d\n",
+ __FUNCTION__, g_first_counter_scans));
+ ret = -EBUSY;
+ goto set_scan_end;
+ }
+ }
+#endif
+
+ wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+
+set_scan_end:
+ net_os_wake_unlock(dev);
+ return ret;
+}
+#endif
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+
+
+ uint8 *ie = *wpaie;
+
+
+ if ((ie[1] >= 6) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+ return TRUE;
+ }
+
+
+ ie += ie[1] + 2;
+
+ *tlvs_len -= (int)(ie - *tlvs);
+
+ *tlvs = ie;
+ return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+
+
+ uint8 *ie = *wpsie;
+
+
+ if ((ie[1] >= 4) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+ return TRUE;
+ }
+
+
+ ie += ie[1] + 2;
+
+ *tlvs_len -= (int)(ie - *tlvs);
+
+ *tlvs = ie;
+ return FALSE;
+}
+#endif
+
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+ struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+ struct iw_event iwe;
+ char *event;
+
+ event = *event_p;
+ if (bi->ie_length) {
+
+ bcm_tlv_t *ie;
+ uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ int ptr_len = bi->ie_length;
+
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+
+ if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+ if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+ *event_p = event;
+ }
+#endif
+
+ return 0;
+}
+
+#ifndef CSCAN
+static uint
+wl_iw_get_scan_prep(
+ wl_scan_results_t *list,
+ struct iw_request_info *info,
+ char *extra,
+ short max_size)
+{
+ int i, j;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value;
+ int ret = 0;
+
+ if (!list) {
+ WL_ERROR(("%s: Null list pointer", __FUNCTION__));
+ return ret;
+ }
+
+
+
+ for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, list->version));
+ return ret;
+ }
+
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+
+ WL_TRACE(("%s : %s\n", __FUNCTION__, bi->SSID));
+
+
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+ CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+ iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+
+ if (bi->rateset.count) {
+ if (((event -extra) + IW_EV_LCP_LEN) <= (uintptr)end) {
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value =
+ (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+ }
+
+ if ((ret = (event - extra)) < 0) {
+ WL_ERROR(("==> Wrong size\n"));
+ ret = 0;
+ }
+
+ WL_TRACE(("%s: size=%d bytes prepared \n", __FUNCTION__, (unsigned int)(event - extra)));
+ return (uint)ret;
+}
+
+static int
+wl_iw_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ wl_scan_results_t *list_merge;
+ wl_scan_results_t *list = (wl_scan_results_t *) g_scan;
+ int error;
+ uint buflen_from_user = dwrq->length;
+ uint len = G_SCAN_RESULTS;
+ __u16 len_ret = 0;
+#if !defined(CSCAN)
+ __u16 merged_len = 0;
+#endif
+#if defined(WL_IW_USE_ISCAN)
+ iscan_info_t *iscan = g_iscan;
+ iscan_buf_t * p_buf;
+#if !defined(CSCAN)
+ uint32 counter = 0;
+#endif
+#endif
+
+ WL_TRACE(("%s: buflen_from_user %d: \n", dev->name, buflen_from_user));
+
+ if (!extra) {
+ WL_TRACE(("%s: wl_iw_get_scan return -EINVAL\n", dev->name));
+ return -EINVAL;
+ }
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+ ci.scan_channel = dtoh32(ci.scan_channel);
+ if (ci.scan_channel)
+ return -EAGAIN;
+
+#if !defined(CSCAN)
+ if (g_ss_cache_ctrl.m_timer_expired) {
+ wl_iw_free_ss_cache();
+ g_ss_cache_ctrl.m_timer_expired ^= 1;
+ }
+ if ((!g_scan_specified_ssid && g_ss_cache_ctrl.m_prev_scan_mode) ||
+ g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) {
+ g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+
+ wl_iw_reset_ss_cache();
+ }
+ g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid;
+ if (g_scan_specified_ssid) {
+ g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+ }
+ else {
+ g_ss_cache_ctrl.m_cons_br_scan_cnt++;
+ }
+#endif
+
+
+
+ if (g_scan_specified_ssid) {
+
+ list = kmalloc(len, GFP_KERNEL);
+ if (!list) {
+ WL_TRACE(("%s: wl_iw_get_scan return -ENOMEM\n", dev->name));
+ g_scan_specified_ssid = 0;
+ return -ENOMEM;
+ }
+ }
+
+ memset(list, 0, len);
+ list->buflen = htod32(len);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len))) {
+ WL_ERROR(("%s: %s : Scan_results ERROR %d\n", dev->name, __FUNCTION__, error));
+ dwrq->length = len;
+ if (g_scan_specified_ssid) {
+ g_scan_specified_ssid = 0;
+ kfree(list);
+ }
+ return 0;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+
+
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, list->version));
+ if (g_scan_specified_ssid) {
+ g_scan_specified_ssid = 0;
+ kfree(list);
+ }
+ return -EINVAL;
+ }
+
+#if !defined(CSCAN)
+ if (g_scan_specified_ssid) {
+
+ wl_iw_add_bss_to_ss_cache(list);
+ kfree(list);
+ }
+#endif
+
+#if !defined(CSCAN)
+ DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+#if defined(WL_IW_USE_ISCAN)
+ if (g_scan_specified_ssid)
+ WL_TRACE(("%s: Specified scan APs from scan=%d\n", __FUNCTION__, list->count));
+ p_buf = iscan->list_hdr;
+
+ while (p_buf != iscan->list_cur) {
+ list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+ WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+ counter += list_merge->count;
+ if (list_merge->count > 0)
+ len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info,
+ extra+len_ret, buflen_from_user -len_ret);
+ p_buf = p_buf->next;
+ }
+ WL_TRACE(("%s merged with total Bcast APs=%d\n", __FUNCTION__, counter));
+#else
+ list_merge = (wl_scan_results_t *) g_scan;
+ len_ret = (__u16) wl_iw_get_scan_prep(list_merge, info, extra, buflen_from_user);
+#endif
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+ if (g_ss_cache_ctrl.m_link_down) {
+
+ wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid);
+ }
+
+ wl_iw_merge_scan_cache(info, extra+len_ret, buflen_from_user-len_ret, &merged_len);
+ len_ret += merged_len;
+ wl_iw_run_ss_cache_timer(0);
+ wl_iw_run_ss_cache_timer(1);
+#else
+
+
+ if (g_scan_specified_ssid) {
+ WL_TRACE(("%s: Specified scan APs in the list =%d\n", __FUNCTION__, list->count));
+ len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user);
+ kfree(list);
+
+#if defined(WL_IW_USE_ISCAN)
+ p_buf = iscan->list_hdr;
+
+ while (p_buf != iscan->list_cur) {
+ list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+ WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+ if (list_merge->count > 0)
+ len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info,
+ extra+len_ret, buflen_from_user -len_ret);
+ p_buf = p_buf->next;
+ }
+#else
+ list_merge = (wl_scan_results_t *) g_scan;
+ WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+ if (list_merge->count > 0)
+ len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, extra+len_ret,
+ buflen_from_user -len_ret);
+#endif
+ }
+ else {
+ list = (wl_scan_results_t *) g_scan;
+ len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user);
+ }
+#endif
+
+#if defined(WL_IW_USE_ISCAN)
+
+ g_scan_specified_ssid = 0;
+#endif
+
+ if ((len_ret + WE_ADD_EVENT_FIX) < buflen_from_user)
+ len = len_ret;
+
+ dwrq->length = len;
+ dwrq->flags = 0;
+
+ WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, list->count));
+ return 0;
+}
+#endif
+
+#if defined(WL_IW_USE_ISCAN)
+static int
+wl_iw_iscan_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ int ii, j;
+ int apcnt;
+ char *event = extra, *end = extra + dwrq->length, *value;
+ iscan_info_t *iscan = g_iscan;
+ iscan_buf_t * p_buf;
+ uint32 counter = 0;
+ uint8 channel;
+#if !defined(CSCAN)
+ __u16 merged_len = 0;
+ uint buflen_from_user = dwrq->length;
+#endif
+
+ WL_TRACE(("%s %s buflen_from_user %d:\n", dev->name, __FUNCTION__, dwrq->length));
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+ return -EINVAL;
+ }
+#endif
+
+ if (!extra) {
+ WL_TRACE(("%s: INVALID SIOCGIWSCAN GET bad parameter\n", dev->name));
+ return -EINVAL;
+ }
+
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_READY) {
+ WL_TRACE(("%s %s: first ISCAN results are NOT ready yet \n",
+ dev->name, __FUNCTION__));
+ return -EAGAIN;
+ }
+#endif
+
+ if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) {
+ WL_ERROR(("%ssysioc_pid\n", __FUNCTION__));
+ return EAGAIN;
+ }
+
+
+
+#if !defined(CSCAN)
+ if (g_ss_cache_ctrl.m_timer_expired) {
+ wl_iw_free_ss_cache();
+ g_ss_cache_ctrl.m_timer_expired ^= 1;
+ }
+ if (g_scan_specified_ssid) {
+ return wl_iw_get_scan(dev, info, dwrq, extra);
+ }
+ else {
+ if (g_ss_cache_ctrl.m_link_down) {
+
+ wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid);
+ }
+ if (g_ss_cache_ctrl.m_prev_scan_mode || g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) {
+ g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+
+ wl_iw_reset_ss_cache();
+ }
+ g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid;
+ g_ss_cache_ctrl.m_cons_br_scan_cnt++;
+ }
+#endif
+
+ WL_TRACE(("%s: SIOCGIWSCAN GET broadcast results\n", dev->name));
+ apcnt = 0;
+ p_buf = iscan->list_hdr;
+
+ while (p_buf != iscan->list_cur) {
+ list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+ counter += list->count;
+
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __FUNCTION__, list->version));
+ return -EINVAL;
+ }
+
+ bi = NULL;
+ for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+ bi = (bi ?
+ (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) :
+ list->bss_info);
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ WLC_IW_ISCAN_MAXLEN));
+
+
+ if (event + ETHER_ADDR_LEN + bi->SSID_len +
+ IW_EV_UINT_LEN + IW_EV_FREQ_LEN + IW_EV_QUAL_LEN >= end)
+ return -E2BIG;
+
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end,
+ &iwe, IW_EV_UINT_LEN);
+ }
+
+
+ iwe.cmd = SIOCGIWFREQ;
+ channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+ iwe.u.freq.m = wf_channel2mhz(channel,
+ channel <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+ iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+
+ if (bi->rateset.count) {
+ if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+ return -E2BIG;
+
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value =
+ (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+ p_buf = p_buf->next;
+ }
+
+ dwrq->length = event - extra;
+ dwrq->flags = 0;
+
+#if !defined(CSCAN)
+
+ wl_iw_merge_scan_cache(info, event, buflen_from_user - dwrq->length, &merged_len);
+ dwrq->length += merged_len;
+ wl_iw_run_ss_cache_timer(0);
+ wl_iw_run_ss_cache_timer(1);
+#endif
+
+#if defined(CONFIG_FIRST_SCAN)
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+#endif
+
+ WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, counter));
+
+ return 0;
+}
+#endif
+
+#define WL_JOIN_PARAMS_MAX 1600
+#ifdef CONFIG_PRESCANNED
+static int
+check_prescan(wl_join_params_t *join_params, int *join_params_size)
+{
+ int cnt = 0;
+ int indx = 0;
+ wl_iw_ss_cache_t *node = NULL;
+ wl_bss_info_t *bi = NULL;
+ iscan_info_t *iscan = g_iscan;
+ iscan_buf_t * buf;
+ wl_scan_results_t *list;
+ char *destbuf;
+
+ buf = iscan->list_hdr;
+
+ while (buf) {
+ list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+ bi = NULL;
+ for (indx = 0; indx < list->count; indx++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length))
+ : list->bss_info;
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+ if ((dtoh32(bi->SSID_len) != join_params->ssid.SSID_len) ||
+ memcmp(bi->SSID, join_params->ssid.SSID,
+ join_params->ssid.SSID_len))
+ continue;
+ memcpy(&join_params->params.chanspec_list[cnt],
+ &bi->chanspec, sizeof(chanspec_t));
+ WL_ERROR(("iscan : chanspec :%d, count %d \n", bi->chanspec, cnt));
+ cnt++;
+ }
+ buf = buf->next;
+ }
+
+ if (!cnt) {
+ MUTEX_LOCK_WL_SCAN_SET();
+ node = g_ss_cache_ctrl.m_cache_head;
+ for (; node; ) {
+ if (!memcmp(&node->bss_info->SSID, join_params->ssid.SSID,
+ join_params->ssid.SSID_len)) {
+ memcpy(&join_params->params.chanspec_list[cnt],
+ &node->bss_info->chanspec, sizeof(chanspec_t));
+ WL_ERROR(("cache_scan : chanspec :%d, count %d \n",
+ (int)node->bss_info->chanspec, cnt));
+ cnt++;
+ }
+ node = node->next;
+ }
+ MUTEX_UNLOCK_WL_SCAN_SET();
+ }
+
+ if (!cnt) {
+ return 0;
+ }
+
+ destbuf = (char *)&join_params->params.chanspec_list[cnt];
+ *join_params_size = destbuf - (char*)join_params;
+ join_params->ssid.SSID_len = htod32(g_ssid.SSID_len);
+ memcpy(&(join_params->params.bssid), &ether_bcast, ETHER_ADDR_LEN);
+ join_params->params.chanspec_num = htod32(cnt);
+
+ if ((*join_params_size) > WL_JOIN_PARAMS_MAX) {
+ WL_ERROR(("can't fit bssids for all %d APs found\n", cnt));
+ kfree(join_params);
+ return 0;
+ }
+
+ WL_ERROR(("Passing %d channel/bssid pairs.\n", cnt));
+ return cnt;
+}
+#endif
+
+static int
+wl_iw_set_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ int error;
+ wl_join_params_t *join_params;
+ int join_params_size;
+
+ WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+ RETURN_IF_EXTRA_NULL(extra);
+
+#ifdef OEM_CHROMIUMOS
+ if (g_set_essid_before_scan)
+ return -EAGAIN;
+#endif
+ if (!(join_params = kmalloc(WL_JOIN_PARAMS_MAX, GFP_KERNEL))) {
+ WL_ERROR(("allocation failed for join_params size is %d\n", WL_JOIN_PARAMS_MAX));
+ return -ENOMEM;
+ }
+
+ memset(join_params, 0, WL_JOIN_PARAMS_MAX);
+
+
+ memset(&g_ssid, 0, sizeof(g_ssid));
+
+ if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+ g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length);
+#else
+ g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length-1);
+#endif
+ memcpy(g_ssid.SSID, extra, g_ssid.SSID_len);
+
+#ifdef CONFIG_PRESCANNED
+ memcpy(join_params->ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+ join_params->ssid.SSID_len = g_ssid.SSID_len;
+
+ if (check_prescan(join_params, &join_params_size)) {
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID,
+ join_params, join_params_size))) {
+ WL_ERROR(("Invalid ioctl data=%d\n", error));
+ kfree(join_params);
+ return error;
+ }
+ kfree(join_params);
+ return 0;
+ } else {
+ WL_ERROR(("No matched found\n Trying to join to specific channel\n"));
+ }
+#endif
+ } else {
+
+ g_ssid.SSID_len = 0;
+ }
+ g_ssid.SSID_len = htod32(g_ssid.SSID_len);
+
+
+ memset(join_params, 0, sizeof(*join_params));
+ join_params_size = sizeof(join_params->ssid);
+
+ memcpy(join_params->ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+ join_params->ssid.SSID_len = htod32(g_ssid.SSID_len);
+ memcpy(&(join_params->params.bssid), &ether_bcast, ETHER_ADDR_LEN);
+
+
+
+ wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, join_params, &join_params_size);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, join_params, join_params_size))) {
+ WL_ERROR(("Invalid ioctl data=%d\n", error));
+ return error;
+ }
+
+ if (g_ssid.SSID_len) {
+ WL_ERROR(("%s: join SSID=%s ch=%d\n", __FUNCTION__,
+ g_ssid.SSID, g_wl_iw_params.target_channel));
+ }
+ kfree(join_params);
+ return 0;
+}
+
+static int
+wl_iw_get_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+ WL_ERROR(("Error getting the SSID\n"));
+ return error;
+ }
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+
+ memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+ dwrq->length = ssid.SSID_len;
+
+ dwrq->flags = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = NETDEV_PRIV(dev);
+
+ WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ if (dwrq->length > sizeof(iw->nickname))
+ return -E2BIG;
+
+ memcpy(iw->nickname, extra, dwrq->length);
+ iw->nickname[dwrq->length - 1] = '\0';
+
+ return 0;
+}
+
+static int
+wl_iw_get_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = NETDEV_PRIV(dev);
+
+ WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ strcpy(extra, iw->nickname);
+ dwrq->length = strlen(extra) + 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ wl_rateset_t rateset;
+ int error, rate, i, error_bg, error_a;
+
+ WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+ return error;
+
+ rateset.count = dtoh32(rateset.count);
+
+ if (vwrq->value < 0) {
+
+ rate = rateset.rates[rateset.count - 1] & 0x7f;
+ } else if (vwrq->value < rateset.count) {
+
+ rate = rateset.rates[vwrq->value] & 0x7f;
+ } else {
+
+ rate = vwrq->value / 500000;
+ }
+
+ if (vwrq->fixed) {
+
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+ error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+ } else {
+
+
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+
+ error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+
+
+ for (i = 0; i < rateset.count; i++)
+ if ((rateset.rates[i] & 0x7f) > rate)
+ break;
+ rateset.count = htod32(i);
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rate;
+
+ WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+ return error;
+ rate = dtoh32(rate);
+ vwrq->value = rate * 500000;
+
+ return 0;
+}
+
+static int
+wl_iw_set_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+ if (vwrq->disabled)
+ rts = DOT11_DEFAULT_RTS_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+ return -EINVAL;
+ else
+ rts = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+ return error;
+
+ vwrq->value = rts;
+ vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, frag;
+
+ WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+ if (vwrq->disabled)
+ frag = DOT11_DEFAULT_FRAG_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+ return -EINVAL;
+ else
+ frag = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, fragthreshold;
+
+ WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+ return error;
+
+ vwrq->value = fragthreshold;
+ vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable;
+ uint16 txpwrmw;
+ WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+
+ disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+ disable += WL_RADIO_SW_DISABLE << 16;
+
+ disable = htod32(disable);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+ return error;
+
+
+ if (disable & WL_RADIO_SW_DISABLE)
+ return 0;
+
+
+ if (!(vwrq->flags & IW_TXPOW_MWATT))
+ return -EINVAL;
+
+
+ if (vwrq->value < 0)
+ return 0;
+
+ if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+ else txpwrmw = (uint16)vwrq->value;
+
+
+ error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+ return error;
+}
+
+static int
+wl_iw_get_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable, txpwrdbm;
+ uint8 result;
+
+ WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+ (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+ return error;
+
+ disable = dtoh32(disable);
+ result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ vwrq->value = (int32)bcm_qdbm_to_mw(result);
+ vwrq->fixed = 0;
+ vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+ vwrq->flags = IW_TXPOW_MWATT;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+
+ if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+ return -EINVAL;
+
+
+ if (vwrq->flags & IW_RETRY_LIMIT) {
+
+
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+ !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+ if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif
+ lrl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+ return error;
+ }
+
+
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+ !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+ if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif
+ srl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+ return error;
+ }
+ }
+ return 0;
+}
+
+static int
+wl_iw_get_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+ vwrq->disabled = 0;
+
+
+ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+ return -EINVAL;
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+ return error;
+
+ lrl = dtoh32(lrl);
+ srl = dtoh32(srl);
+
+
+ if (vwrq->flags & IW_RETRY_MAX) {
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ vwrq->value = lrl;
+ } else {
+ vwrq->flags = IW_RETRY_LIMIT;
+ vwrq->value = srl;
+ if (srl != lrl)
+ vwrq->flags |= IW_RETRY_MIN;
+ }
+
+ return 0;
+}
+#endif
+
+static int
+wl_iw_set_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec;
+
+ WL_TRACE(("%s: SIOCSIWENCODE index %d, len %d, flags %04x (%s%s%s%s%s)\n",
+ dev->name, dwrq->flags & IW_ENCODE_INDEX, dwrq->length, dwrq->flags,
+ dwrq->flags & IW_ENCODE_NOKEY ? "NOKEY" : "",
+ dwrq->flags & IW_ENCODE_DISABLED ? " DISABLED" : "",
+ dwrq->flags & IW_ENCODE_RESTRICTED ? " RESTRICTED" : "",
+ dwrq->flags & IW_ENCODE_OPEN ? " OPEN" : "",
+ dwrq->flags & IW_ENCODE_TEMP ? " TEMP" : ""));
+
+ memset(&key, 0, sizeof(key));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+
+ if (key.index == DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+ } else {
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ return -EINVAL;
+ }
+
+
+ if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ } else {
+ key.len = dwrq->length;
+
+ if (dwrq->length > sizeof(key.data))
+ return -EINVAL;
+
+ memcpy(key.data, extra, dwrq->length);
+
+ key.flags = WL_PRIMARY_KEY;
+ switch (key.len) {
+ case WEP1_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WEP128_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ case TKIP_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+#endif
+ case AES_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ swap_key_from_BE(&key);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+ return error;
+ }
+
+
+ val = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+ return error;
+
+ wsec &= ~(WEP_ENABLED);
+ wsec |= val;
+
+ if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+ return error;
+
+
+ val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+ val = htod32(val);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec, auth;
+
+ WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+
+ bzero(&key, sizeof(wl_wsec_key_t));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = key.index;
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+ } else
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+ return error;
+
+ swap_key_to_BE(&key);
+
+ wsec = dtoh32(wsec);
+ auth = dtoh32(auth);
+
+ dwrq->length = MIN(DOT11_MAX_KEY_SIZE, key.len);
+
+
+ dwrq->flags = key.index + 1;
+ if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+
+ dwrq->flags |= IW_ENCODE_DISABLED;
+ }
+ if (auth) {
+
+ dwrq->flags |= IW_ENCODE_RESTRICTED;
+ }
+
+
+ if (dwrq->length && extra)
+ memcpy(extra, key.data, dwrq->length);
+
+ return 0;
+}
+
+static int
+wl_iw_set_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+ pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+ pm = htod32(pm);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+ return error;
+
+ pm = dtoh32(pm);
+ vwrq->disabled = pm ? 0 : 1;
+ vwrq->flags = IW_POWER_ALL_R;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+
+ WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+ RETURN_IF_EXTRA_NULL(extra);
+
+#ifdef DHD_DEBUG
+ {
+ int i;
+
+ for (i = 0; i < iwp->length; i++)
+ WL_TRACE(("%02X ", extra[i]));
+ WL_TRACE(("\n"));
+ }
+#endif
+
+ dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+ return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+ iwp->length = 64;
+ dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+ return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error;
+ struct iw_encode_ext *iwe;
+
+ WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+ RETURN_IF_EXTRA_NULL(extra);
+
+ memset(&key, 0, sizeof(key));
+ iwe = (struct iw_encode_ext *)extra;
+
+
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+ }
+
+
+ key.index = 0;
+ if (dwrq->flags & IW_ENCODE_INDEX)
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ key.len = iwe->key_len;
+
+
+ if (!ETHER_ISMULTI(iwe->addr.sa_data))
+ bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+
+ if (key.len == 0) {
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+
+ key.index = htod32(key.index);
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+ &key.index, sizeof(key.index));
+ if (error)
+ return error;
+ }
+
+ else {
+ swap_key_from_BE(&key);
+ dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ }
+ }
+ else {
+ if (iwe->key_len > sizeof(key.data))
+ return -EINVAL;
+
+ WL_WSEC(("Setting the key index %d\n", key.index));
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("key is a Primary Key\n"));
+ key.flags = WL_PRIMARY_KEY;
+ }
+
+ bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+ if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+ uint8 keybuf[8];
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+
+
+ if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ uchar *ivptr;
+ ivptr = (uchar *)iwe->rx_seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = TRUE;
+ }
+
+ switch (iwe->alg) {
+ case IW_ENCODE_ALG_NONE:
+ key.algo = CRYPTO_ALGO_OFF;
+ break;
+ case IW_ENCODE_ALG_WEP:
+ if (iwe->key_len == WEP1_KEY_SIZE)
+ key.algo = CRYPTO_ALGO_WEP1;
+ else
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ default:
+ break;
+ }
+ swap_key_from_BE(&key);
+
+ dhd_wait_pend8021x(dev);
+
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+struct {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+
+static int
+wl_iw_set_pmksa(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ struct iw_pmksa *iwpmksa;
+ uint i;
+ int ret = 0;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid;
+
+ WL_WSEC(("%s: SIOCSIWPMKSA\n", dev->name));
+
+ RETURN_IF_EXTRA_NULL(extra);
+
+ iwpmksa = (struct iw_pmksa *)extra;
+ bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+
+ if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+ WL_WSEC(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+ bzero((char *)&pmkid_list, sizeof(pmkid_list));
+ }
+
+ else if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+ {
+ pmkid_list_t pmkid, *pmkidptr;
+ uint j;
+ pmkidptr = &pmkid;
+
+ bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+
+ WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_WSEC(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+ WL_WSEC(("\n"));
+ }
+
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+ if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+
+ if ((pmkid_list.pmkids.npmkid > 0) && (i < pmkid_list.pmkids.npmkid)) {
+ bzero(&pmkid_array[i], sizeof(pmkid_t));
+ for (; i < (pmkid_list.pmkids.npmkid - 1); i++) {
+ bcopy(&pmkid_array[i+1].BSSID,
+ &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&pmkid_array[i+1].PMKID,
+ &pmkid_array[i].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ pmkid_list.pmkids.npmkid--;
+ }
+ else
+ ret = -EINVAL;
+ }
+
+ else if (iwpmksa->cmd == IW_PMKSA_ADD) {
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+ if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+ if (i < MAXPMKID) {
+ bcopy(&iwpmksa->bssid.sa_data[0],
+ &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkid_array[i].PMKID,
+ WPA2_PMKID_LEN);
+ if (i == pmkid_list.pmkids.npmkid)
+ pmkid_list.pmkids.npmkid++;
+ }
+ else
+ ret = -EINVAL;
+
+ {
+ uint j;
+ uint k;
+ k = pmkid_list.pmkids.npmkid;
+ WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkid_array[k].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_WSEC(("%02x ", pmkid_array[k].PMKID[j]));
+ WL_WSEC(("\n"));
+ }
+ }
+ WL_WSEC(("PRINTING pmkid LIST - No of elements %d", pmkid_list.pmkids.npmkid));
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+ uint j;
+ WL_WSEC(("\nPMKID[%d]: %s = ", i,
+ bcm_ether_ntoa(&pmkid_array[i].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_WSEC(("%02x ", pmkid_array[i].PMKID[j]));
+ }
+ WL_WSEC(("\n"));
+
+ if (!ret)
+ ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list,
+ sizeof(pmkid_list));
+ return ret;
+}
+#endif
+
+static int
+wl_iw_get_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+ return 0;
+}
+
+
+static uint32
+wl_iw_create_wpaauth_wsec(struct net_device *dev)
+{
+ wl_iw_t *iw = NETDEV_PRIV(dev);
+ uint32 wsec;
+
+
+ if (iw->pcipher & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+ wsec = WEP_ENABLED;
+ else if (iw->pcipher & IW_AUTH_CIPHER_TKIP)
+ wsec = TKIP_ENABLED;
+ else if (iw->pcipher & IW_AUTH_CIPHER_CCMP)
+ wsec = AES_ENABLED;
+ else
+ wsec = 0;
+
+
+ if (iw->gcipher & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+ wsec |= WEP_ENABLED;
+ else if (iw->gcipher & IW_AUTH_CIPHER_TKIP)
+ wsec |= TKIP_ENABLED;
+ else if (iw->gcipher & IW_AUTH_CIPHER_CCMP)
+ wsec |= AES_ENABLED;
+
+
+ if (wsec == 0 && iw->privacy_invoked)
+ wsec = WEP_ENABLED;
+
+ WL_INFORM(("%s: returning wsec of %d\n", __FUNCTION__, wsec));
+
+ return wsec;
+}
+
+static int
+wl_iw_set_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error = 0;
+ int paramid;
+ int paramval;
+ int val = 0;
+ wl_iw_t *iw = NETDEV_PRIV(dev);
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+ paramval = vwrq->value;
+
+ WL_TRACE(("%s: SIOCSIWAUTH, %s(%d), paramval = 0x%0x\n",
+ dev->name,
+ paramid == IW_AUTH_WPA_VERSION ? "IW_AUTH_WPA_VERSION" :
+ paramid == IW_AUTH_CIPHER_PAIRWISE ? "IW_AUTH_CIPHER_PAIRWISE" :
+ paramid == IW_AUTH_CIPHER_GROUP ? "IW_AUTH_CIPHER_GROUP" :
+ paramid == IW_AUTH_KEY_MGMT ? "IW_AUTH_KEY_MGMT" :
+ paramid == IW_AUTH_TKIP_COUNTERMEASURES ? "IW_AUTH_TKIP_COUNTERMEASURES" :
+ paramid == IW_AUTH_DROP_UNENCRYPTED ? "IW_AUTH_DROP_UNENCRYPTED" :
+ paramid == IW_AUTH_80211_AUTH_ALG ? "IW_AUTH_80211_AUTH_ALG" :
+ paramid == IW_AUTH_WPA_ENABLED ? "IW_AUTH_WPA_ENABLED" :
+ paramid == IW_AUTH_RX_UNENCRYPTED_EAPOL ? "IW_AUTH_RX_UNENCRYPTED_EAPOL" :
+ paramid == IW_AUTH_ROAMING_CONTROL ? "IW_AUTH_ROAMING_CONTROL" :
+ paramid == IW_AUTH_PRIVACY_INVOKED ? "IW_AUTH_PRIVACY_INVOKED" :
+ "UNKNOWN",
+ paramid, paramval));
+
+#if defined(SOFTAP)
+ if (ap_cfg_running) {
+ WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+ return 0;
+ }
+#endif
+
+ switch (paramid) {
+ case IW_AUTH_WPA_VERSION:
+
+ iw->wpaversion = paramval;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ iw->pcipher = paramval;
+ val = wl_iw_create_wpaauth_wsec(dev);
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+ return error;
+ break;
+
+ case IW_AUTH_CIPHER_GROUP:
+ iw->gcipher = paramval;
+ val = wl_iw_create_wpaauth_wsec(dev);
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+ return error;
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+ if (paramval & IW_AUTH_KEY_MGMT_PSK) {
+ if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA)
+ val = WPA_AUTH_PSK;
+ else if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA2)
+ val = WPA2_AUTH_PSK;
+ else
+ val = WPA_AUTH_DISABLED;
+ } else if (paramval & IW_AUTH_KEY_MGMT_802_1X) {
+ if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA)
+ val = WPA_AUTH_UNSPECIFIED;
+ else if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA2)
+ val = WPA2_AUTH_UNSPECIFIED;
+ else
+ val = WPA_AUTH_DISABLED;
+ }
+ else
+ val = WPA_AUTH_DISABLED;
+
+ WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ WL_INFORM(("Setting the D11auth %d\n", paramval));
+ if (paramval == IW_AUTH_ALG_OPEN_SYSTEM)
+ val = 0;
+ else if (paramval == IW_AUTH_ALG_SHARED_KEY)
+ val = 1;
+ else if (paramval == (IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY))
+ val = 2;
+ else
+ error = 1;
+ if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ if (paramval == 0) {
+ iw->privacy_invoked = 0;
+ iw->pcipher = 0;
+ iw->gcipher = 0;
+ val = wl_iw_create_wpaauth_wsec(dev);
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+ return error;
+ WL_INFORM(("%s: %d: setting wpa_auth to %d, wsec to %d\n",
+ __FUNCTION__, __LINE__, paramval, val));
+ dev_wlc_intvar_set(dev, "wpa_auth", paramval);
+ return error;
+ }
+
+
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ if ((error = dev_wlc_intvar_set(dev, "wsec_restrict", paramval)))
+ return error;
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+ break;
+
+#if WIRELESS_EXT > 17
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_INFORM(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+
+ break;
+
+ case IW_AUTH_PRIVACY_INVOKED:
+ iw->privacy_invoked = paramval;
+ val = wl_iw_create_wpaauth_wsec(dev);
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+ return error;
+ break;
+
+#endif
+ default:
+ break;
+ }
+ return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error;
+ int paramid;
+ int paramval = 0;
+ int val;
+ wl_iw_t *iw = NETDEV_PRIV(dev);
+
+ WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+
+ switch (paramid) {
+ case IW_AUTH_WPA_VERSION:
+ paramval = iw->wpaversion;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ paramval = iw->pcipher;
+ break;
+
+ case IW_AUTH_CIPHER_GROUP:
+ paramval = iw->gcipher;
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (VAL_PSK(val))
+ paramval = IW_AUTH_KEY_MGMT_PSK;
+ else
+ paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+ break;
+
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ dev_wlc_intvar_get(dev, "wsec_restrict", &paramval);
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+ return error;
+ if (!val)
+ paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+ else
+ paramval = IW_AUTH_ALG_SHARED_KEY;
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val)
+ paramval = TRUE;
+ else
+ paramval = FALSE;
+ break;
+#if WIRELESS_EXT > 17
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
+ paramval = iw->privacy_invoked;
+ break;
+
+#endif
+ }
+ vwrq->value = paramval;
+ return 0;
+}
+#endif
+
+
+#ifdef SOFTAP
+
+static int ap_macmode = MACLIST_MODE_DISABLED;
+static struct mflist ap_black_list;
+
+static int
+wl_iw_parse_wep(char *keystr, wl_wsec_key_t *key)
+{
+ char hex[] = "XX";
+ unsigned char *data = key->data;
+
+ switch (strlen(keystr)) {
+ case 5:
+ case 13:
+ case 16:
+ key->len = strlen(keystr);
+ memcpy(data, keystr, key->len + 1);
+ break;
+ case 12:
+ case 28:
+ case 34:
+ case 66:
+
+ if (!strnicmp(keystr, "0x", 2))
+ keystr += 2;
+ else
+ return -1;
+
+ case 10:
+ case 26:
+ case 32:
+ case 64:
+ key->len = strlen(keystr) / 2;
+ while (*keystr) {
+ strncpy(hex, keystr, 2);
+ *data++ = (char) bcm_strtoul(hex, NULL, 16);
+ keystr += 2;
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ switch (key->len) {
+ case 5:
+ key->algo = CRYPTO_ALGO_WEP1;
+ break;
+ case 13:
+ key->algo = CRYPTO_ALGO_WEP128;
+ break;
+ case 16:
+
+ key->algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ case 32:
+ key->algo = CRYPTO_ALGO_TKIP;
+ break;
+ default:
+ return -1;
+ }
+
+
+ key->flags |= WL_PRIMARY_KEY;
+
+ return 0;
+}
+
+#ifdef EXT_WPA_CRYPTO
+#define SHA1HashSize 20
+extern void pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len,
+ int iterations, u8 *buf, size_t buflen);
+
+#else
+
+#define SHA1HashSize 20
+static int
+pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len,
+ int iterations, u8 *buf, size_t buflen)
+{
+ WL_ERROR(("WARNING: %s is not implemented !!!\n", __FUNCTION__));
+ return -1;
+}
+
+#endif
+
+
+static int
+dev_iw_write_cfg1_bss_var(struct net_device *dev, int val)
+{
+ struct {
+ int cfg;
+ int val;
+ } bss_setbuf;
+
+ int bss_set_res;
+ char smbuf[WLC_IOCTL_SMLEN];
+ memset(smbuf, 0, sizeof(smbuf));
+
+ bss_setbuf.cfg = 1;
+ bss_setbuf.val = val;
+
+ bss_set_res = dev_iw_iovar_setbuf(dev, "bss",
+ &bss_setbuf, sizeof(bss_setbuf), smbuf, sizeof(smbuf));
+ WL_TRACE(("%s: bss_set_result:%d set with %d\n", __FUNCTION__, bss_set_res, val));
+
+ return bss_set_res;
+}
+
+
+
+#ifndef AP_ONLY
+static int
+wl_bssiovar_mkbuf(
+ const char *iovar,
+ int bssidx,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen,
+ int *perr)
+{
+ const char *prefix = "bsscfg:";
+ int8* p;
+ uint prefixlen;
+ uint namelen;
+ uint iolen;
+
+ prefixlen = strlen(prefix);
+ namelen = strlen(iovar) + 1;
+ iolen = prefixlen + namelen + sizeof(int) + paramlen;
+
+
+ if (buflen < 0 || iolen > (uint)buflen) {
+ *perr = BCME_BUFTOOSHORT;
+ return 0;
+ }
+
+ p = (int8*)bufptr;
+
+
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+
+ memcpy(p, iovar, namelen);
+ p += namelen;
+
+
+ bssidx = htod32(bssidx);
+ memcpy(p, &bssidx, sizeof(int32));
+ p += sizeof(int32);
+
+
+ if (paramlen)
+ memcpy(p, param, paramlen);
+
+ *perr = 0;
+ return iolen;
+}
+#endif
+
+
+
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+
+#if defined(CSCAN)
+
+
+
+static int
+wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, int nchan)
+{
+ int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16);
+ int err = 0;
+ char *p;
+ int i;
+ iscan_info_t *iscan = g_iscan;
+
+ WL_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, nchan));
+
+ if ((!dev) && (!g_iscan) && (!iscan->iscan_ex_params_p)) {
+ WL_ERROR(("%s error exit\n", __FUNCTION__));
+ err = -1;
+ goto exit;
+ }
+
+#ifdef PNO_SUPPORT
+
+ if (dhd_dev_get_pno_status(dev)) {
+ WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__));
+ }
+#endif
+
+ params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+
+
+ if (nssid > 0) {
+ i = OFFSETOF(wl_scan_params_t, channel_list) + nchan * sizeof(uint16);
+ i = ROUNDUP(i, sizeof(uint32));
+ if (i + nssid * sizeof(wlc_ssid_t) > params_size) {
+ printf("additional ssids exceed params_size\n");
+ err = -1;
+ goto exit;
+ }
+
+ p = ((char*)&iscan->iscan_ex_params_p->params) + i;
+ memcpy(p, ssids_local, nssid * sizeof(wlc_ssid_t));
+ p += nssid * sizeof(wlc_ssid_t);
+ } else {
+ p = (char*)iscan->iscan_ex_params_p->params.channel_list + nchan * sizeof(uint16);
+ }
+
+
+ iscan->iscan_ex_params_p->params.channel_num =
+ htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (nchan & WL_SCAN_PARAMS_COUNT_MASK));
+
+ nssid = (uint)
+ ((iscan->iscan_ex_params_p->params.channel_num >> WL_SCAN_PARAMS_NSSID_SHIFT) &
+ WL_SCAN_PARAMS_COUNT_MASK);
+
+
+ params_size = (int) (p - (char*)iscan->iscan_ex_params_p + nssid * sizeof(wlc_ssid_t));
+ iscan->iscan_ex_param_size = params_size;
+
+ iscan->list_cur = iscan->list_hdr;
+ iscan->iscan_state = ISCAN_STATE_SCANING;
+ wl_iw_set_event_mask(dev);
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+
+ iscan->timer_on = 1;
+
+#ifdef SCAN_DUMP
+ {
+ int i;
+ WL_SCAN(("\n### List of SSIDs to scan ###\n"));
+ for (i = 0; i < nssid; i++) {
+ if (!ssids_local[i].SSID_len)
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ else
+ WL_SCAN(("%d: scan for %s size =%d\n", i,
+ ssids_local[i].SSID, ssids_local[i].SSID_len));
+ }
+ WL_SCAN(("### List of channels to scan ###\n"));
+ for (i = 0; i < nchan; i++)
+ {
+ WL_SCAN(("%d ", iscan->iscan_ex_params_p->params.channel_list[i]));
+ }
+ WL_SCAN(("\nnprobes=%d\n", iscan->iscan_ex_params_p->params.nprobes));
+ WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time));
+ WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time));
+ WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
+ WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
+ WL_SCAN(("\n###################\n"));
+ }
+#endif
+
+ if (params_size > WLC_IOCTL_MEDLEN) {
+ WL_ERROR(("Set ISCAN for %s due to params_size=%d \n",
+ __FUNCTION__, params_size));
+ err = -1;
+ }
+
+ if ((err = dev_iw_iovar_setbuf(dev, "iscan", iscan->iscan_ex_params_p,
+ iscan->iscan_ex_param_size,
+ iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) {
+ WL_TRACE(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err));
+ err = -1;
+ }
+
+exit:
+ return err;
+}
+
+
+static int
+iwpriv_set_cscan(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *ext)
+{
+ int res;
+ char *extra = NULL;
+ iscan_info_t *iscan = g_iscan;
+ wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX];
+ int nssid = 0;
+ int nchan = 0;
+ char *str_ptr;
+
+ WL_TRACE(("%s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ if (wrqu->data.length == 0) {
+ WL_ERROR(("IWPRIV argument len = 0\n"));
+ return -EINVAL;
+ }
+
+ if (!iscan->iscan_ex_params_p) {
+ return -EFAULT;
+ }
+
+ if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+ res = -EFAULT;
+ goto exit_proc;
+ }
+
+ extra[wrqu->data.length] = 0;
+ WL_ERROR(("Got str param in iw_point:\n %s\n", extra));
+
+ str_ptr = extra;
+
+
+ if (strncmp(str_ptr, GET_SSID, strlen(GET_SSID))) {
+ WL_ERROR(("%s Error: extracting SSID='' string\n", __FUNCTION__));
+ res = -EINVAL;
+ goto exit_proc;
+ }
+
+ str_ptr += strlen(GET_SSID);
+ nssid = wl_iw_parse_ssid_list(&str_ptr, ssids_local, nssid,
+ WL_SCAN_PARAMS_SSID_MAX);
+ if (nssid == -1) {
+ WL_ERROR(("%s wrong ssid list", __FUNCTION__));
+ res = -EINVAL;
+ goto exit_proc;
+ }
+
+ memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size);
+ ASSERT(iscan->iscan_ex_param_size < WLC_IOCTL_MAXLEN);
+
+
+ wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL);
+ iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+ iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START);
+ iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+
+ if ((nchan = wl_iw_parse_channel_list(&str_ptr,
+ &iscan->iscan_ex_params_p->params.channel_list[0],
+ WL_NUMCHANNELS)) == -1) {
+ WL_ERROR(("%s missing channel list\n", __FUNCTION__));
+ res = -EINVAL;
+ goto exit_proc;
+ }
+
+
+ get_parameter_from_string(&str_ptr,
+ GET_NPROBE, PTYPE_INTDEC,
+ &iscan->iscan_ex_params_p->params.nprobes, 2);
+
+ get_parameter_from_string(&str_ptr, GET_ACTIVE_ASSOC_DWELL, PTYPE_INTDEC,
+ &iscan->iscan_ex_params_p->params.active_time, 4);
+
+ get_parameter_from_string(&str_ptr, GET_PASSIVE_ASSOC_DWELL, PTYPE_INTDEC,
+ &iscan->iscan_ex_params_p->params.passive_time, 4);
+
+ get_parameter_from_string(&str_ptr, GET_HOME_DWELL, PTYPE_INTDEC,
+ &iscan->iscan_ex_params_p->params.home_time, 4);
+
+ get_parameter_from_string(&str_ptr, GET_SCAN_TYPE, PTYPE_INTDEC,
+ &iscan->iscan_ex_params_p->params.scan_type, 1);
+
+
+ res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan);
+
+exit_proc:
+ kfree(extra);
+
+ return res;
+}
+
+
+static int
+wl_iw_set_cscan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int res = -1;
+ iscan_info_t *iscan = g_iscan;
+ wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX];
+ int nssid = 0;
+ int nchan = 0;
+ cscan_tlv_t *cscan_tlv_temp;
+ char type;
+ char *str_ptr;
+ int tlv_size_left;
+#ifdef TLV_DEBUG
+ int i;
+ char tlv_in_example[] = {
+ 'C', 'S', 'C', 'A', 'N', ' ',
+ 0x53, 0x01, 0x00, 0x00,
+ 'S',
+ 0x00,
+ 'S',
+ 0x04,
+ 'B', 'R', 'C', 'M',
+ 'C',
+ 0x06,
+ 'P',
+ 0x94,
+ 0x11,
+ 'T',
+ 0x01
+ };
+#endif
+
+ WL_TRACE(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+ __FUNCTION__, info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ net_os_wake_lock(dev);
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+ return -1;
+ }
+
+ if (wrqu->data.length < (strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t))) {
+ WL_ERROR(("%s argument=%d less %d\n", __FUNCTION__,
+ wrqu->data.length, (int)(strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t))));
+ return -1;
+ }
+
+#ifdef TLV_DEBUG
+ memcpy(extra, tlv_in_example, sizeof(tlv_in_example));
+ wrqu->data.length = sizeof(tlv_in_example);
+ for (i = 0; i < wrqu->data.length; i++)
+ printf("%02X ", extra[i]);
+ printf("\n");
+#endif
+
+ str_ptr = extra;
+ str_ptr += strlen(CSCAN_COMMAND);
+ tlv_size_left = wrqu->data.length - strlen(CSCAN_COMMAND);
+
+ cscan_tlv_temp = (cscan_tlv_t *)str_ptr;
+ memset(ssids_local, 0, sizeof(ssids_local));
+
+ if ((cscan_tlv_temp->prefix == CSCAN_TLV_PREFIX) &&
+ (cscan_tlv_temp->version == CSCAN_TLV_VERSION) &&
+ (cscan_tlv_temp->subver == CSCAN_TLV_SUBVERSION))
+ {
+ str_ptr += sizeof(cscan_tlv_t);
+ tlv_size_left -= sizeof(cscan_tlv_t);
+
+
+ if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+ WL_SCAN_PARAMS_SSID_MAX, &tlv_size_left)) <= 0) {
+ WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+ goto exit_proc;
+ }
+ else {
+
+ memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size);
+
+
+ wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL);
+ iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+ iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START);
+ iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+
+ while (tlv_size_left > 0)
+ {
+ type = str_ptr[0];
+ switch (type) {
+ case CSCAN_TLV_TYPE_CHANNEL_IE:
+
+ if ((nchan = wl_iw_parse_channel_list_tlv(&str_ptr,
+ &iscan->iscan_ex_params_p->params.channel_list[0],
+ WL_NUMCHANNELS, &tlv_size_left)) == -1) {
+ WL_ERROR(("%s missing channel list\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_NPROBE_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr,
+ &iscan->iscan_ex_params_p->params.nprobes,
+ sizeof(iscan->iscan_ex_params_p->params.nprobes),
+ type, sizeof(char), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n",
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_ACTIVE_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr,
+ &iscan->iscan_ex_params_p->params.active_time,
+ sizeof(iscan->iscan_ex_params_p->params.active_time),
+ type, sizeof(short), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n",
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_PASSIVE_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr,
+ &iscan->iscan_ex_params_p->params.passive_time,
+ sizeof(iscan->iscan_ex_params_p->params.passive_time),
+ type, sizeof(short), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n",
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_HOME_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr,
+ &iscan->iscan_ex_params_p->params.home_time,
+ sizeof(iscan->iscan_ex_params_p->params.home_time),
+ type, sizeof(short), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n",
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+ case CSCAN_TLV_TYPE_STYPE_IE:
+ if ((res = wl_iw_parse_data_tlv(&str_ptr,
+ &iscan->iscan_ex_params_p->params.scan_type,
+ sizeof(iscan->iscan_ex_params_p->params.scan_type),
+ type, sizeof(char), &tlv_size_left)) == -1) {
+ WL_ERROR(("%s return %d\n",
+ __FUNCTION__, res));
+ goto exit_proc;
+ }
+ break;
+
+ default :
+ WL_ERROR(("%s get unkwown type %X\n",
+ __FUNCTION__, type));
+ goto exit_proc;
+ break;
+ }
+ }
+ }
+ }
+ else {
+ WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+#if defined(CONFIG_FIRST_SCAN)
+ if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+ if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) {
+
+ WL_ERROR(("%s Clean up First scan flag which is %d\n",
+ __FUNCTION__, g_first_broadcast_scan));
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+ }
+ else {
+ WL_ERROR(("%s Ignoring CSCAN : First Scan is not done yet %d\n",
+ __FUNCTION__, g_first_counter_scans));
+ return -EBUSY;
+ }
+ }
+#endif
+
+
+ res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan);
+
+exit_proc:
+ net_os_wake_unlock(dev);
+ return res;
+}
+
+#endif
+
+#ifdef CONFIG_WPS2
+static int
+wl_iw_del_wps_probe_req_ie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int ret;
+ vndr_ie_setbuf_t *ie_delbuf;
+
+ if (g_wps_probe_req_ie) {
+ ie_delbuf = (vndr_ie_setbuf_t *)(g_wps_probe_req_ie + strlen("vndr_ie "));
+ strncpy(ie_delbuf->cmd, "del", 3);
+ ie_delbuf->cmd[3] = '\0';
+
+ ret = dev_wlc_ioctl(dev, WLC_SET_VAR, g_wps_probe_req_ie, g_wps_probe_req_ie_len);
+ if (ret) {
+ WL_ERROR(("ioctl failed %d \n", ret));
+ }
+
+ kfree(g_wps_probe_req_ie);
+ g_wps_probe_req_ie = NULL;
+ g_wps_probe_req_ie_len = 0;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_add_wps_probe_req_ie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ char *str_ptr = NULL;
+ char *bufptr = NULL;
+ uint buflen, datalen, iecount, pktflag, iolen, total_len;
+ int ret = 0;
+ vndr_ie_setbuf_t *ie_setbuf = NULL;
+
+ if (!g_wps_probe_req_ie) {
+ ret = -1;
+ str_ptr = extra;
+ str_ptr += WPS_PROBE_REQ_IE_CMD_LENGTH;
+ datalen = wrqu->data.length - WPS_PROBE_REQ_IE_CMD_LENGTH;
+
+
+
+ buflen = sizeof(vndr_ie_setbuf_t) + datalen - sizeof(vndr_ie_t);
+ ie_setbuf = (vndr_ie_setbuf_t *)kmalloc(buflen, GFP_KERNEL);
+ if (!ie_setbuf) {
+ WL_ERROR(("memory alloc failure ie_setbuf\n"));
+ return ret;
+ }
+
+ memset(ie_setbuf, 0x00, buflen);
+
+
+ strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1);
+ ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+
+ iecount = htod32(1);
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int));
+
+
+ pktflag = 0x10;
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag,
+ &pktflag, sizeof(uint32));
+
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data,
+ str_ptr, datalen);
+
+ total_len = strlen("vndr_ie ") + buflen;
+ bufptr = (char *)kmalloc(total_len, GFP_KERNEL);
+ if (!bufptr) {
+ WL_ERROR(("memory alloc failure bufptr\n"));
+ goto fail;
+ }
+
+ iolen = bcm_mkiovar("vndr_ie", (char *)ie_setbuf, buflen, bufptr, total_len);
+ if (iolen == 0) {
+ WL_ERROR(("Buffer length is illegal\n"));
+ goto fail2;
+ }
+
+ ret = dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen);
+ if (ret) {
+ WL_ERROR(("ioctl failed\n"));
+ goto fail2;
+ }
+
+ g_wps_probe_req_ie = (char *)kmalloc(iolen, GFP_KERNEL);
+ if (!g_wps_probe_req_ie) {
+ WL_ERROR(("memory alloc failure g_wps_probe_req_ie\n"));
+ goto fail2;
+ }
+
+ memcpy(g_wps_probe_req_ie, bufptr, iolen);
+ g_wps_probe_req_ie_len = iolen;
+ }
+
+fail2:
+ if (bufptr) {
+ kfree(bufptr);
+ bufptr = NULL;
+ }
+fail:
+ if (ie_setbuf) {
+ kfree(ie_setbuf);
+ ie_setbuf = NULL;
+ }
+ return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+#ifndef AP_ONLY
+
+
+static int
+thr_wait_for_2nd_eth_dev(void *data)
+{
+ wl_iw_t *iw;
+ int ret = 0;
+ unsigned long flags = 0;
+
+ tsk_ctl_t *tsk_ctl = (tsk_ctl_t *)data;
+ struct net_device *dev = (struct net_device *)tsk_ctl->parent;
+ iw = *(wl_iw_t **)netdev_priv(dev);
+
+ DAEMONIZE("wl0_eth_wthread");
+
+
+ WL_SOFTAP(("\n>%s threda started:, PID:%x\n", __FUNCTION__, current->pid));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (!iw) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ tsk_ctl->thr_pid = -1;
+ complete(&tsk_ctl->completed);
+ return -1;
+ }
+ DHD_OS_WAKE_LOCK(iw->pub);
+ complete(&tsk_ctl->completed);
+ if (down_timeout(&tsk_ctl->sema, msecs_to_jiffies(1000)) != 0) {
+#else
+ if (down_interruptible(&tsk_ctl->sema) != 0) {
+#endif
+ WL_ERROR(("\n%s: sap_eth_sema timeout \n", __FUNCTION__));
+ ret = -1;
+ goto fail;
+ }
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk_ctl->terminated) {
+ ret = -1;
+ goto fail;
+ }
+
+ flags = dhd_os_spin_lock(iw->pub);
+ if (!ap_net_dev) {
+ WL_ERROR((" ap_net_dev is null !!!"));
+ ret = -1;
+ dhd_os_spin_unlock(iw->pub, flags);
+ goto fail;
+ }
+
+ WL_SOFTAP(("\n>%s: Thread:'softap ethdev IF:%s is detected!'\n\n",
+ __FUNCTION__, ap_net_dev->name));
+
+ ap_cfg_running = TRUE;
+
+ dhd_os_spin_unlock(iw->pub, flags);
+ bcm_mdelay(500);
+
+
+ wl_iw_send_priv_event(priv_dev, "AP_SET_CFG_OK");
+
+fail:
+
+ DHD_OS_WAKE_UNLOCK(iw->pub);
+
+ WL_SOFTAP(("\n>%s, thread completed\n", __FUNCTION__));
+
+ complete_and_exit(&tsk_ctl->completed, 0);
+ return ret;
+}
+#endif
+#ifndef AP_ONLY
+static int last_auto_channel = 6;
+#endif
+
+static int
+get_softap_auto_channel(struct net_device *dev, struct ap_profile *ap)
+{
+ int chosen = 0;
+ wl_uint32_list_t request;
+ int retry = 0;
+ int updown = 0;
+ int ret = 0;
+ wlc_ssid_t null_ssid;
+ int res = 0;
+#ifndef AP_ONLY
+ int iolen = 0;
+ int mkvar_err = 0;
+ int bsscfg_index = 1;
+ char buf[WLC_IOCTL_SMLEN];
+#endif
+ WL_SOFTAP(("Enter %s\n", __FUNCTION__));
+
+#ifndef AP_ONLY
+ if (ap_cfg_running) {
+ ap->channel = last_auto_channel;
+ return res;
+ }
+#endif
+
+ memset(&null_ssid, 0, sizeof(wlc_ssid_t));
+ res |= dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown));
+
+#ifdef AP_ONLY
+ res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &null_ssid, sizeof(null_ssid));
+#else
+
+ iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&null_ssid),
+ null_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err);
+ ASSERT(iolen);
+ res |= dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen);
+
+#endif
+
+ request.count = htod32(0);
+ ret = dev_wlc_ioctl(dev, WLC_START_CHANNEL_SEL, &request, sizeof(request));
+ if (ret < 0) {
+ WL_ERROR(("can't start auto channel scan\n"));
+ goto fail;
+ }
+
+ get_channel_retry:
+ bcm_mdelay(350);
+
+ ret = dev_wlc_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen));
+ if (ret < 0 || dtoh32(chosen) == 0) {
+ if (retry++ < 15) {
+ goto get_channel_retry;
+ } else {
+ if (ret < 0) {
+ WL_ERROR(("can't get auto channel sel, err = %d, "
+ "chosen = 0x%04X\n", ret, (uint16)chosen));
+ goto fail;
+ } else {
+ ap->channel = (uint16)last_auto_channel;
+ WL_ERROR(("auto channel sel timed out. we get channel %d\n",
+ ap->channel));
+ }
+ }
+ }
+
+ if (chosen) {
+ ap->channel = (uint16)chosen & 0x00FF;
+ WL_SOFTAP(("%s: Got auto channel = %d, attempt:%d\n",
+ __FUNCTION__, ap->channel, retry));
+ }
+
+ if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown))) < 0) {
+ WL_ERROR(("%s fail to set up err =%d\n", __FUNCTION__, res));
+ goto fail;
+ }
+
+#ifndef AP_ONLY
+ if (!res || !ret)
+ last_auto_channel = ap->channel;
+#endif
+
+fail :
+ if (ret < 0) {
+ WL_TRACE(("%s: return value %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ return res;
+}
+
+
+static int
+set_ap_cfg(struct net_device *dev, struct ap_profile *ap)
+{
+ int updown = 0;
+ int channel = 0;
+
+ wlc_ssid_t ap_ssid;
+ int max_assoc = 8;
+
+ int res = 0;
+ int apsta_var = 0;
+#ifndef AP_ONLY
+ int mpc = 0;
+ int iolen = 0;
+ int mkvar_err = 0;
+ int bsscfg_index = 1;
+ char buf[WLC_IOCTL_SMLEN];
+#endif
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ net_os_wake_lock(dev);
+ DHD_OS_MUTEX_LOCK(&wl_softap_lock);
+
+ WL_SOFTAP(("wl_iw: set ap profile:\n"));
+ WL_SOFTAP((" ssid = '%s'\n", ap->ssid));
+ WL_SOFTAP((" security = '%s'\n", ap->sec));
+ if (ap->key[0] != '\0')
+ WL_SOFTAP((" key = '%s'\n", ap->key));
+ WL_SOFTAP((" channel = %d\n", ap->channel));
+ WL_SOFTAP((" max scb = %d\n", ap->max_scb));
+
+#ifdef AP_ONLY
+ if (ap_cfg_running) {
+ wl_iw_softap_deassoc_stations(dev, NULL);
+ ap_cfg_running = FALSE;
+ }
+#endif
+
+
+ if (ap_cfg_running == FALSE) {
+
+#ifndef AP_ONLY
+
+
+ sema_init(&ap_eth_ctl.sema, 0);
+
+ mpc = 0;
+ if ((res = dev_wlc_intvar_set(dev, "mpc", mpc))) {
+ WL_ERROR(("%s fail to set mpc\n", __FUNCTION__));
+ goto fail;
+ }
+#endif
+
+ updown = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown)))) {
+ WL_ERROR(("%s fail to set updown\n", __FUNCTION__));
+ goto fail;
+ }
+
+#ifdef AP_ONLY
+
+ apsta_var = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) {
+ WL_ERROR(("%s fail to set apsta_var 0\n", __FUNCTION__));
+ goto fail;
+ }
+ apsta_var = 1;
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) {
+ WL_ERROR(("%s fail to set apsta_var 1\n", __FUNCTION__));
+ goto fail;
+ }
+ res = dev_wlc_ioctl(dev, WLC_GET_AP, &apsta_var, sizeof(apsta_var));
+#else
+
+ apsta_var = 1;
+ iolen = wl_bssiovar_mkbuf("apsta",
+ bsscfg_index, &apsta_var, sizeof(apsta_var)+4,
+ buf, sizeof(buf), &mkvar_err);
+ ASSERT(iolen);
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) {
+ WL_ERROR(("%s fail to set apsta \n", __FUNCTION__));
+ goto fail;
+ }
+ WL_TRACE(("\n>in %s: apsta set result: %d \n", __FUNCTION__, res));
+
+
+ mpc = 0;
+ if ((res = dev_wlc_intvar_set(dev, "mpc", mpc))) {
+ WL_ERROR(("%s fail to set mpc\n", __FUNCTION__));
+ goto fail;
+ }
+
+
+#endif
+
+ updown = 1;
+ if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown))) < 0) {
+ WL_ERROR(("%s fail to set apsta \n", __FUNCTION__));
+ goto fail;
+ }
+
+ } else {
+
+ if (!ap_net_dev) {
+ WL_ERROR(("%s: ap_net_dev is null\n", __FUNCTION__));
+ goto fail;
+ }
+
+ res = wl_iw_softap_deassoc_stations(ap_net_dev, NULL);
+
+
+ if ((res = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) {
+ WL_ERROR(("%s fail to set bss down\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+
+ if (strlen(ap->country_code)) {
+ WL_ERROR(("%s: Igonored: Country MUST be specified"
+ "COUNTRY command with \n", __FUNCTION__));
+ } else {
+ WL_SOFTAP(("%s: Country code is not specified,"
+ " will use Radio's default\n",
+ __FUNCTION__));
+
+ }
+ iolen = wl_bssiovar_mkbuf("closednet",
+ bsscfg_index, &ap->closednet, sizeof(ap->closednet)+4,
+ buf, sizeof(buf), &mkvar_err);
+ ASSERT(iolen);
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) {
+ WL_ERROR(("%s failed to set 'closednet'for apsta \n", __FUNCTION__));
+ goto fail;
+ }
+
+
+ if ((ap->channel == 0) && (get_softap_auto_channel(dev, ap) < 0)) {
+ ap->channel = 1;
+ WL_ERROR(("%s auto channel failed, use channel=%d\n",
+ __FUNCTION__, ap->channel));
+ }
+
+ channel = ap->channel;
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel)))) {
+ WL_ERROR(("%s fail to set channel\n", __FUNCTION__));
+ }
+
+
+ if (ap_cfg_running == FALSE) {
+ updown = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown)))) {
+ WL_ERROR(("%s fail to set up\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ max_assoc = ap->max_scb;
+ if ((res = dev_wlc_intvar_set(dev, "maxassoc", max_assoc))) {
+ WL_ERROR(("%s fail to set maxassoc\n", __FUNCTION__));
+ goto fail;
+ }
+
+ ap_ssid.SSID_len = strlen(ap->ssid);
+ strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len);
+
+
+#ifdef AP_ONLY
+ if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) {
+ WL_ERROR(("ERROR:%d in:%s, wl_iw_set_ap_security is skipped\n",
+ res, __FUNCTION__));
+ goto fail;
+ }
+ wl_iw_send_priv_event(dev, "ASCII_CMD=AP_BSS_START");
+ ap_cfg_running = TRUE;
+#else
+
+ iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&ap_ssid),
+ ap_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err);
+ ASSERT(iolen);
+ if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) != 0) {
+ WL_ERROR(("ERROR:%d in:%s, Security & BSS reconfiguration is skipped\n",
+ res, __FUNCTION__));
+ goto fail;
+ }
+ if (ap_cfg_running == FALSE) {
+
+ PROC_START(thr_wait_for_2nd_eth_dev, dev, &ap_eth_ctl, 0);
+ } else {
+ ap_eth_ctl.thr_pid = -1;
+
+ if (ap_net_dev == NULL) {
+ WL_ERROR(("%s ERROR: ap_net_dev is NULL !!!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ WL_ERROR(("%s: %s Configure security & restart AP bss \n",
+ __FUNCTION__, ap_net_dev->name));
+
+
+ if ((res = wl_iw_set_ap_security(ap_net_dev, &my_ap)) < 0) {
+ WL_ERROR(("%s fail to set security : %d\n", __FUNCTION__, res));
+ goto fail;
+ }
+
+
+ if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0) {
+ WL_ERROR(("%s fail to set bss up\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+#endif
+fail:
+ WL_SOFTAP(("%s exit with %d\n", __FUNCTION__, res));
+
+ DHD_OS_MUTEX_UNLOCK(&wl_softap_lock);
+ net_os_wake_unlock(dev);
+
+ return res;
+}
+#endif
+
+
+
+static int
+wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap)
+{
+ int wsec = 0;
+ int wpa_auth = 0;
+ int res = 0;
+ int i;
+ char *ptr;
+#ifdef AP_ONLY
+ int mpc = 0;
+ wlc_ssid_t ap_ssid;
+#endif
+ wl_wsec_key_t key;
+
+ WL_SOFTAP(("\nsetting SOFTAP security mode:\n"));
+ WL_SOFTAP(("wl_iw: set ap profile:\n"));
+ WL_SOFTAP((" ssid = '%s'\n", ap->ssid));
+ WL_SOFTAP((" security = '%s'\n", ap->sec));
+ if (ap->key[0] != '\0')
+ WL_SOFTAP((" key = '%s'\n", ap->key));
+ WL_SOFTAP((" channel = %d\n", ap->channel));
+ WL_SOFTAP((" max scb = %d\n", ap->max_scb));
+
+
+ if (strnicmp(ap->sec, "open", strlen("open")) == 0) {
+
+
+ wsec = 0;
+ res = dev_wlc_intvar_set(dev, "wsec", wsec);
+ wpa_auth = WPA_AUTH_DISABLED;
+ res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+ WL_SOFTAP(("=====================\n"));
+ WL_SOFTAP((" wsec & wpa_auth set 'OPEN', result:&d %d\n", res));
+ WL_SOFTAP(("=====================\n"));
+
+ } else if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) {
+
+
+ memset(&key, 0, sizeof(key));
+
+ wsec = WEP_ENABLED;
+ res = dev_wlc_intvar_set(dev, "wsec", wsec);
+
+ key.index = 0;
+ if (wl_iw_parse_wep(ap->key, &key)) {
+ WL_SOFTAP(("wep key parse err!\n"));
+ return -1;
+ }
+
+ key.index = htod32(key.index);
+ key.len = htod32(key.len);
+ key.algo = htod32(key.algo);
+ key.flags = htod32(key.flags);
+
+ res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+
+ wpa_auth = WPA_AUTH_DISABLED;
+ res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+ WL_SOFTAP(("=====================\n"));
+ WL_SOFTAP((" wsec & auth set 'WEP', result:&d %d\n", res));
+ WL_SOFTAP(("=====================\n"));
+
+ } else if (strnicmp(ap->sec, "wpa2-psk", strlen("wpa2-psk")) == 0) {
+
+
+
+ wsec_pmk_t psk;
+ size_t key_len;
+
+ wsec = AES_ENABLED;
+ dev_wlc_intvar_set(dev, "wsec", wsec);
+
+ key_len = strlen(ap->key);
+ if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) {
+ WL_SOFTAP(("passphrase must be between %d and %d characters long\n",
+ WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN));
+ return -1;
+ }
+
+
+ if (key_len < WSEC_MAX_PSK_LEN) {
+ unsigned char output[2*SHA1HashSize];
+ char key_str_buf[WSEC_MAX_PSK_LEN+1];
+
+
+ memset(output, 0, sizeof(output));
+ pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32);
+
+ ptr = key_str_buf;
+ for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) {
+
+ sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4],
+ (uint)output[i*4+1], (uint)output[i*4+2],
+ (uint)output[i*4+3]);
+ ptr += 8;
+ }
+ WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf));
+
+ psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN);
+ memcpy(psk.key, key_str_buf, psk.key_len);
+ } else {
+ psk.key_len = htod16((ushort) key_len);
+ memcpy(psk.key, ap->key, key_len);
+ }
+ psk.flags = htod16(WSEC_PASSPHRASE);
+ dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk));
+
+ wpa_auth = WPA2_AUTH_PSK;
+ dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+ } else if (strnicmp(ap->sec, "wpa-psk", strlen("wpa-psk")) == 0) {
+
+
+ wsec_pmk_t psk;
+ size_t key_len;
+
+ wsec = TKIP_ENABLED;
+ res = dev_wlc_intvar_set(dev, "wsec", wsec);
+
+ key_len = strlen(ap->key);
+ if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) {
+ WL_SOFTAP(("passphrase must be between %d and %d characters long\n",
+ WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN));
+ return -1;
+ }
+
+
+ if (key_len < WSEC_MAX_PSK_LEN) {
+ unsigned char output[2*SHA1HashSize];
+ char key_str_buf[WSEC_MAX_PSK_LEN+1];
+ bzero(output, 2*SHA1HashSize);
+
+ WL_SOFTAP(("%s: do passhash...\n", __FUNCTION__));
+
+ pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32);
+
+ ptr = key_str_buf;
+ for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) {
+ WL_SOFTAP(("[%02d]: %08x\n", i, *((unsigned int*)&output[i*4])));
+
+ sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4],
+ (uint)output[i*4+1], (uint)output[i*4+2],
+ (uint)output[i*4+3]);
+ ptr += 8;
+ }
+ printk("%s: passphase = %s\n", __FUNCTION__, key_str_buf);
+
+ psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN);
+ memcpy(psk.key, key_str_buf, psk.key_len);
+ } else {
+ psk.key_len = htod16((ushort) key_len);
+ memcpy(psk.key, ap->key, key_len);
+ }
+
+ psk.flags = htod16(WSEC_PASSPHRASE);
+ res |= dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk));
+
+ wpa_auth = WPA_AUTH_PSK;
+ res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+ WL_SOFTAP((" wsec & auth set 'wpa-psk' (TKIP), result:&d %d\n", res));
+ }
+
+#ifdef AP_ONLY
+ ap_ssid.SSID_len = strlen(ap->ssid);
+ strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len);
+ res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &ap_ssid, sizeof(ap_ssid));
+ mpc = 0;
+ res |= dev_wlc_intvar_set(dev, "mpc", mpc);
+ if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) {
+ res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ }
+#endif
+ return res;
+}
+
+
+
+static int
+get_parameter_from_string(
+ char **str_ptr, const char *token,
+ int param_type, void *dst, int param_max_len)
+{
+ char int_str[7] = "0";
+ int parm_str_len;
+ char *param_str_begin;
+ char *param_str_end;
+ char *orig_str = *str_ptr;
+
+ if ((*str_ptr) && !strncmp(*str_ptr, token, strlen(token))) {
+
+ strsep(str_ptr, "=,");
+ param_str_begin = *str_ptr;
+ strsep(str_ptr, "=,");
+
+ if (*str_ptr == NULL) {
+
+ parm_str_len = strlen(param_str_begin);
+ } else {
+ param_str_end = *str_ptr-1;
+ parm_str_len = param_str_end - param_str_begin;
+ }
+
+ WL_TRACE((" 'token:%s', len:%d, ", token, parm_str_len));
+
+ if (parm_str_len > param_max_len) {
+ WL_ERROR((" WARNING: extracted param len:%d is > MAX:%d\n",
+ parm_str_len, param_max_len));
+
+ parm_str_len = param_max_len;
+ }
+
+ switch (param_type) {
+
+ case PTYPE_INTDEC: {
+
+ int *pdst_int = dst;
+ char *eptr;
+
+ if (parm_str_len > sizeof(int_str))
+ parm_str_len = sizeof(int_str);
+
+ memcpy(int_str, param_str_begin, parm_str_len);
+
+ *pdst_int = simple_strtoul(int_str, &eptr, 10);
+
+ WL_TRACE((" written as integer:%d\n", *pdst_int));
+ }
+ break;
+ case PTYPE_STR_HEX: {
+ u8 *buf = dst;
+
+ param_max_len = param_max_len >> 1;
+ hstr_2_buf(param_str_begin, buf, param_max_len);
+ dhd_print_buf(buf, param_max_len, 0);
+ }
+ break;
+ default:
+
+ memcpy(dst, param_str_begin, parm_str_len);
+ *((char *)dst + parm_str_len) = 0;
+ WL_ERROR((" written as a string:%s\n", (char *)dst));
+ break;
+
+ }
+
+ return 0;
+ } else {
+ WL_ERROR(("\n %s: ERROR: can't find token:%s in str:%s \n",
+ __FUNCTION__, token, orig_str));
+
+ return -1;
+ }
+}
+
+static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac)
+{
+ int i;
+ int res = 0;
+ char mac_buf[128] = {0};
+ char z_mac[6] = {0, 0, 0, 0, 0, 0};
+ char *sta_mac;
+ struct maclist *assoc_maclist = (struct maclist *) mac_buf;
+ bool deauth_all = FALSE;
+
+
+ if (mac == NULL) {
+ deauth_all = TRUE;
+ sta_mac = z_mac;
+ } else {
+ sta_mac = mac;
+ }
+
+ memset(assoc_maclist, 0, sizeof(mac_buf));
+ assoc_maclist->count = 8;
+
+ res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 128);
+ if (res != 0) {
+ WL_SOFTAP(("%s: Error:%d Couldn't get ASSOC List\n", __FUNCTION__, res));
+ return res;
+ }
+
+ if (assoc_maclist->count)
+ for (i = 0; i < assoc_maclist->count; i++) {
+ scb_val_t scbval;
+ scbval.val = htod32(1);
+
+ bcopy(&assoc_maclist->ea[i], &scbval.ea, ETHER_ADDR_LEN);
+
+ if (deauth_all || (memcmp(&scbval.ea, sta_mac, ETHER_ADDR_LEN) == 0)) {
+
+ WL_SOFTAP(("%s, deauth STA:%d \n", __FUNCTION__, i));
+ res |= dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scb_val_t));
+ }
+ } else WL_SOFTAP(("%s: No Stations \n", __FUNCTION__));
+
+ if (res != 0) {
+ WL_ERROR(("%s: Error:%d\n", __FUNCTION__, res));
+ } else if (assoc_maclist->count) {
+
+ bcm_mdelay(200);
+ }
+ return res;
+}
+
+
+
+static int
+iwpriv_softap_stop(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int res = 0;
+
+ WL_SOFTAP(("got iwpriv AP_BSS_STOP \n"));
+
+ if ((!dev) && (!ap_net_dev)) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return res;
+ }
+
+ net_os_wake_lock(dev);
+ DHD_OS_MUTEX_LOCK(&wl_softap_lock);
+
+ if ((ap_cfg_running == TRUE)) {
+#ifdef AP_ONLY
+ wl_iw_softap_deassoc_stations(dev, NULL);
+#else
+ wl_iw_softap_deassoc_stations(ap_net_dev, NULL);
+ if ((res = dev_iw_write_cfg1_bss_var(dev, 2)) < 0)
+ WL_ERROR(("%s failed to del BSS err = %d", __FUNCTION__, res));
+#endif
+
+
+ bcm_mdelay(100);
+
+ wrqu->data.length = 0;
+ ap_cfg_running = FALSE;
+ } else
+ WL_ERROR(("%s: was called when SoftAP is OFF : move on\n", __FUNCTION__));
+
+ WL_SOFTAP(("%s Done with %d\n", __FUNCTION__, res));
+ DHD_OS_MUTEX_UNLOCK(&wl_softap_lock);
+ net_os_wake_unlock(dev);
+
+ return res;
+}
+
+
+
+static int
+iwpriv_fw_reload(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int ret = -1;
+ char extra[256];
+ char *fwstr = fw_path ;
+
+ WL_SOFTAP(("current firmware_path[]=%s\n", fwstr));
+
+ WL_TRACE((">Got FW_RELOAD cmd:"
+ "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d, "
+ "fw_path:%p, len:%d \n",
+ info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length, fwstr, strlen(fwstr)));
+
+ if ((wrqu->data.length > 4) && (wrqu->data.length < sizeof(extra))) {
+ char *str_ptr;
+
+ if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+ ret = -EFAULT;
+ goto exit_proc;
+ }
+
+
+ extra[wrqu->data.length] = 8;
+ str_ptr = extra;
+
+ if (get_parameter_from_string(&str_ptr,
+ "FW_PATH=", PTYPE_STRING, fwstr, 255) != 0) {
+ WL_ERROR(("Error: extracting FW_PATH='' string\n"));
+ goto exit_proc;
+ }
+
+ if (strstr(fwstr, "apsta") != NULL) {
+ WL_SOFTAP(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ WL_SOFTAP(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
+ }
+
+ WL_SOFTAP(("SET firmware_path[]=%s , str_p:%p\n", fwstr, fwstr));
+ ret = 0;
+ } else {
+ WL_ERROR(("Error: ivalid param len:%d\n", wrqu->data.length));
+ }
+
+exit_proc:
+ return ret;
+}
+
+#ifdef SOFTAP
+
+static int
+iwpriv_wpasupp_loop_tst(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *ext)
+{
+ int res = 0;
+ char *params = NULL;
+
+ WL_TRACE((">Got IWPRIV wp_supp loopback cmd test:"
+ "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n",
+ info->cmd, info->flags,
+ wrqu->data.pointer, wrqu->data.length));
+
+ if (wrqu->data.length != 0) {
+
+ if (!(params = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+ return -ENOMEM;
+
+
+ if (copy_from_user(params, wrqu->data.pointer, wrqu->data.length)) {
+ kfree(params);
+ return -EFAULT;
+ }
+
+ params[wrqu->data.length] = 0;
+ WL_SOFTAP(("\n>> copied from user:\n %s\n", params));
+ } else {
+ WL_ERROR(("ERROR param length is 0\n"));
+ return -EFAULT;
+ }
+
+
+ res = wl_iw_send_priv_event(dev, params);
+ kfree(params);
+
+ return res;
+}
+#endif
+
+
+static int
+iwpriv_en_ap_bss(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ void *wrqu,
+ char *extra)
+{
+ int res = 0;
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ net_os_wake_lock(dev);
+ DHD_OS_MUTEX_LOCK(&wl_softap_lock);
+
+ WL_TRACE(("%s: rcvd IWPRIV IOCTL: for dev:%s\n", __FUNCTION__, dev->name));
+
+
+#ifndef AP_ONLY
+ if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) {
+ WL_ERROR((" %s ERROR setting SOFTAP security in :%d\n", __FUNCTION__, res));
+ }
+ else {
+
+ if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0)
+ WL_ERROR(("%s fail to set bss up err=%d\n", __FUNCTION__, res));
+ else
+
+ bcm_mdelay(100);
+ }
+
+#endif
+ WL_SOFTAP(("%s done with res %d \n", __FUNCTION__, res));
+
+ DHD_OS_MUTEX_UNLOCK(&wl_softap_lock);
+ net_os_wake_unlock(dev);
+
+ return res;
+}
+
+static int
+get_assoc_sta_list(struct net_device *dev, char *buf, int len)
+{
+
+ WL_TRACE(("%s: dev_wlc_ioctl(dev:%p, cmd:%d, buf:%p, len:%d)\n",
+ __FUNCTION__, dev, WLC_GET_ASSOCLIST, buf, len));
+
+ return dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, buf, len);
+
+}
+
+
+void check_error(int res, const char *msg, const char *func, int line)
+{
+ if (res != 0)
+ WL_ERROR(("%s, %d function:%s, line:%d\n", msg, res, func, line));
+}
+
+static int
+set_ap_mac_list(struct net_device *dev, void *buf)
+{
+ struct mac_list_set *mac_list_set = (struct mac_list_set *)buf;
+ struct maclist *maclist = (struct maclist *)&mac_list_set->mac_list;
+ int length;
+ int i;
+ int mac_mode = mac_list_set->mode;
+ int ioc_res = 0;
+ ap_macmode = mac_list_set->mode;
+
+
+ bzero(&ap_black_list, sizeof(struct mflist));
+
+ if (mac_mode == MACLIST_MODE_DISABLED) {
+
+ ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode));
+ check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+ WL_SOFTAP(("%s: MAC filtering disabled\n", __FUNCTION__));
+ } else {
+
+ scb_val_t scbval;
+ char mac_buf[256] = {0};
+ struct maclist *assoc_maclist = (struct maclist *) mac_buf;
+
+
+ bcopy(maclist, &ap_black_list, sizeof(ap_black_list));
+
+
+ ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode));
+ check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+
+
+ length = sizeof(maclist->count) + maclist->count*ETHER_ADDR_LEN;
+ dev_wlc_ioctl(dev, WLC_SET_MACLIST, maclist, length);
+
+ WL_SOFTAP(("%s: applied MAC List, mode:%d, length %d:\n",
+ __FUNCTION__, mac_mode, length));
+
+ for (i = 0; i < maclist->count; i++)
+ WL_SOFTAP(("mac %d: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ i, maclist->ea[i].octet[0], maclist->ea[i].octet[1],
+ maclist->ea[i].octet[2],
+ maclist->ea[i].octet[3], maclist->ea[i].octet[4],
+ maclist->ea[i].octet[5]));
+
+
+ assoc_maclist->count = 8;
+ ioc_res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 256);
+ check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+ WL_SOFTAP((" Cur assoc clients:%d\n", assoc_maclist->count));
+
+
+ if (assoc_maclist->count)
+ for (i = 0; i < assoc_maclist->count; i++) {
+ int j;
+ bool assoc_mac_matched = FALSE;
+
+ WL_SOFTAP(("\n Cheking assoc STA: "));
+ dhd_print_buf(&assoc_maclist->ea[i], 6, 7);
+ WL_SOFTAP(("with the b/w list:"));
+
+ for (j = 0; j < maclist->count; j++)
+ if (!bcmp(&assoc_maclist->ea[i], &maclist->ea[j],
+ ETHER_ADDR_LEN)) {
+
+ assoc_mac_matched = TRUE;
+ break;
+ }
+
+
+ if (((mac_mode == MACLIST_MODE_ALLOW) && !assoc_mac_matched) ||
+ ((mac_mode == MACLIST_MODE_DENY) && assoc_mac_matched)) {
+
+ WL_SOFTAP(("b-match or w-mismatch,"
+ " do deauth/disassoc \n"));
+ scbval.val = htod32(1);
+ bcopy(&assoc_maclist->ea[i], &scbval.ea,
+ ETHER_ADDR_LEN);
+ ioc_res = dev_wlc_ioctl(dev,
+ WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scb_val_t));
+ check_error(ioc_res,
+ "ioctl ERROR:",
+ __FUNCTION__, __LINE__);
+
+ } else {
+ WL_SOFTAP((" no b/w list hits, let it be\n"));
+ }
+ } else {
+ WL_SOFTAP(("No ASSOC CLIENTS\n"));
+ }
+
+ }
+
+ WL_SOFTAP(("%s iocres:%d\n", __FUNCTION__, ioc_res));
+ return ioc_res;
+}
+#endif
+
+
+
+#ifdef SOFTAP
+#define PARAM_OFFSET PROFILE_OFFSET
+
+static int
+wl_iw_process_private_ascii_cmd(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *dwrq,
+ char *cmd_str)
+{
+ int ret = 0;
+ char *sub_cmd = cmd_str + PROFILE_OFFSET + strlen("ASCII_CMD=");
+
+ WL_SOFTAP(("\n %s: ASCII_CMD: offs_0:%s, offset_32:\n'%s'\n",
+ __FUNCTION__, cmd_str, cmd_str + PROFILE_OFFSET));
+
+ if (strnicmp(sub_cmd, "AP_CFG", strlen("AP_CFG")) == 0) {
+
+ WL_SOFTAP((" AP_CFG \n"));
+
+
+ if (init_ap_profile_from_string(cmd_str+PROFILE_OFFSET, &my_ap) != 0) {
+ WL_ERROR(("ERROR: SoftAP CFG prams !\n"));
+ ret = -1;
+ } else {
+ ret = set_ap_cfg(dev, &my_ap);
+ }
+
+ } else if (strnicmp(sub_cmd, "AP_BSS_START", strlen("AP_BSS_START")) == 0) {
+
+ WL_SOFTAP(("\n SOFTAP - ENABLE BSS \n"));
+
+
+ WL_SOFTAP(("\n!!! got 'WL_AP_EN_BSS' from WPA supplicant, dev:%s\n", dev->name));
+
+#ifndef AP_ONLY
+ if (ap_net_dev == NULL) {
+ printf("\n ERROR: SOFTAP net_dev* is NULL !!!\n");
+ } else {
+
+ if ((ret = iwpriv_en_ap_bss(ap_net_dev, info, dwrq, cmd_str)) < 0)
+ WL_ERROR(("%s line %d fail to set bss up\n",
+ __FUNCTION__, __LINE__));
+ }
+#else
+ if ((ret = iwpriv_en_ap_bss(dev, info, dwrq, cmd_str)) < 0)
+ WL_ERROR(("%s line %d fail to set bss up\n",
+ __FUNCTION__, __LINE__));
+#endif
+ } else if (strnicmp(sub_cmd, "ASSOC_LST", strlen("ASSOC_LST")) == 0) {
+
+
+
+ } else if (strnicmp(sub_cmd, "AP_BSS_STOP", strlen("AP_BSS_STOP")) == 0) {
+
+ WL_SOFTAP((" \n temp DOWN SOFTAP\n"));
+#ifndef AP_ONLY
+ if ((ret = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) {
+ WL_ERROR(("%s line %d fail to set bss down\n",
+ __FUNCTION__, __LINE__));
+ }
+#endif
+ }
+
+ return ret;
+
+}
+#endif
+
+
+static int
+wl_iw_set_priv(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *ext
+)
+{
+ int ret = 0;
+ char * extra;
+
+ if (!(extra = kmalloc(dwrq->length, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, dwrq->pointer, dwrq->length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ WL_TRACE(("%s: SIOCSIWPRIV request %s, info->cmd:%x, info->flags:%d\n dwrq->length:%d\n",
+ dev->name, extra, info->cmd, info->flags, dwrq->length));
+
+
+
+ net_os_wake_lock(dev);
+
+ if (dwrq->length && extra) {
+ if (strnicmp(extra, "START", strlen("START")) == 0) {
+ wl_iw_control_wl_on(dev, info);
+ WL_TRACE(("%s, Received regular START command\n", __FUNCTION__));
+ }
+
+ if (g_onoff == G_WLAN_SET_OFF) {
+ WL_TRACE(("%s, missing START, Fail\n", __FUNCTION__));
+ kfree(extra);
+ net_os_wake_unlock(dev);
+ return -EFAULT;
+ }
+
+ if (strnicmp(extra, "SCAN-ACTIVE", strlen("SCAN-ACTIVE")) == 0) {
+#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS
+ WL_TRACE(("%s: active scan setting suppressed\n", dev->name));
+#else
+ ret = wl_iw_set_active_scan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+ }
+ else if (strnicmp(extra, "SCAN-PASSIVE", strlen("SCAN-PASSIVE")) == 0)
+#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS
+ WL_TRACE(("%s: passive scan setting suppressed\n", dev->name));
+#else
+ ret = wl_iw_set_passive_scan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+ else if (strnicmp(extra, "RSSI", strlen("RSSI")) == 0)
+ ret = wl_iw_get_rssi(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "LINKSPEED", strlen("LINKSPEED")) == 0)
+ ret = wl_iw_get_link_speed(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "MACADDR", strlen("MACADDR")) == 0)
+ ret = wl_iw_get_macaddr(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "COUNTRY", strlen("COUNTRY")) == 0)
+ ret = wl_iw_set_country(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "STOP", strlen("STOP")) == 0)
+ ret = wl_iw_control_wl_off(dev, info);
+ else if (strnicmp(extra, BAND_GET_CMD, strlen(BAND_GET_CMD)) == 0)
+ ret = wl_iw_get_band(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, BAND_SET_CMD, strlen(BAND_SET_CMD)) == 0)
+ ret = wl_iw_set_band(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, DTIM_SKIP_GET_CMD, strlen(DTIM_SKIP_GET_CMD)) == 0)
+ ret = wl_iw_get_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, DTIM_SKIP_SET_CMD, strlen(DTIM_SKIP_SET_CMD)) == 0)
+ ret = wl_iw_set_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, SETSUSPEND_CMD, strlen(SETSUSPEND_CMD)) == 0)
+ ret = wl_iw_set_suspend(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, TXPOWER_SET_CMD, strlen(TXPOWER_SET_CMD)) == 0)
+ ret = wl_iw_set_txpower(dev, info, (union iwreq_data *)dwrq, extra);
+#if defined(PNO_SUPPORT)
+ else if (strnicmp(extra, PNOSSIDCLR_SET_CMD, strlen(PNOSSIDCLR_SET_CMD)) == 0)
+ ret = wl_iw_set_pno_reset(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, PNOSETUP_SET_CMD, strlen(PNOSETUP_SET_CMD)) == 0)
+ ret = wl_iw_set_pno_set(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, PNOENABLE_SET_CMD, strlen(PNOENABLE_SET_CMD)) == 0)
+ ret = wl_iw_set_pno_enable(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+#if defined(CSCAN)
+
+ else if (strnicmp(extra, CSCAN_COMMAND, strlen(CSCAN_COMMAND)) == 0)
+ ret = wl_iw_set_cscan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+#ifdef CONFIG_WPS2
+ else if (strnicmp(extra, WPS_ADD_PROBE_REQ_IE_CMD,
+ strlen(WPS_ADD_PROBE_REQ_IE_CMD)) == 0)
+ ret = wl_iw_add_wps_probe_req_ie(dev, info,
+ (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, WPS_DEL_PROBE_REQ_IE_CMD,
+ strlen(WPS_DEL_PROBE_REQ_IE_CMD)) == 0)
+ ret = wl_iw_del_wps_probe_req_ie(dev, info,
+ (union iwreq_data *)dwrq, extra);
+#endif
+ else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0)
+ ret = wl_iw_set_power_mode(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0)
+ ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra);
+ else if (strnicmp(extra, "GETPOWER", strlen("GETPOWER")) == 0)
+ ret = wl_iw_get_power_mode(dev, info, (union iwreq_data *)dwrq, extra);
+#ifdef SOFTAP
+ else if (strnicmp(extra, "ASCII_CMD", strlen("ASCII_CMD")) == 0) {
+ wl_iw_process_private_ascii_cmd(dev, info, (union iwreq_data *)dwrq, extra);
+ }
+ else if (strnicmp(extra, "AP_MAC_LIST_SET", strlen("AP_MAC_LIST_SET")) == 0) {
+ WL_SOFTAP(("penguin, set AP_MAC_LIST_SET\n"));
+ set_ap_mac_list(dev, (extra + PROFILE_OFFSET));
+ }
+#endif
+ else {
+ WL_ERROR(("Unknown PRIVATE command %s - ignored\n", extra));
+ snprintf(extra, MAX_WX_STRING, "OK");
+ dwrq->length = strlen("OK") + 1;
+ }
+ }
+
+ net_os_wake_unlock(dev);
+
+ if (extra) {
+ if (copy_to_user(dwrq->pointer, extra, dwrq->length)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ kfree(extra);
+ }
+
+ return ret;
+}
+
+static const iw_handler wl_iw_handler[] =
+{
+ (iw_handler) wl_iw_config_commit,
+ (iw_handler) wl_iw_get_name,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_freq,
+ (iw_handler) wl_iw_get_freq,
+ (iw_handler) wl_iw_set_mode,
+ (iw_handler) wl_iw_get_mode,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_get_range,
+ (iw_handler) wl_iw_set_priv,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_spy,
+ (iw_handler) wl_iw_get_spy,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_wap,
+ (iw_handler) wl_iw_get_wap,
+#if WIRELESS_EXT > 17
+ (iw_handler) wl_iw_mlme,
+#else
+ (iw_handler) NULL,
+#endif
+#if defined(WL_IW_USE_ISCAN)
+ (iw_handler) wl_iw_iscan_get_aplist,
+#else
+ (iw_handler) wl_iw_get_aplist,
+#endif
+#if WIRELESS_EXT > 13
+#if defined(WL_IW_USE_ISCAN)
+ (iw_handler) wl_iw_iscan_set_scan,
+ (iw_handler) wl_iw_iscan_get_scan,
+#else
+ (iw_handler) wl_iw_set_scan,
+ (iw_handler) wl_iw_get_scan,
+#endif
+#else
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+#endif
+ (iw_handler) wl_iw_set_essid,
+ (iw_handler) wl_iw_get_essid,
+ (iw_handler) wl_iw_set_nick,
+ (iw_handler) wl_iw_get_nick,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_rate,
+ (iw_handler) wl_iw_get_rate,
+ (iw_handler) wl_iw_set_rts,
+ (iw_handler) wl_iw_get_rts,
+ (iw_handler) wl_iw_set_frag,
+ (iw_handler) wl_iw_get_frag,
+ (iw_handler) wl_iw_set_txpow,
+ (iw_handler) wl_iw_get_txpow,
+#if WIRELESS_EXT > 10
+ (iw_handler) wl_iw_set_retry,
+ (iw_handler) wl_iw_get_retry,
+#endif
+ (iw_handler) wl_iw_set_encode,
+ (iw_handler) wl_iw_get_encode,
+ (iw_handler) wl_iw_set_power,
+ (iw_handler) wl_iw_get_power,
+#if WIRELESS_EXT > 17
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_wpaie,
+ (iw_handler) wl_iw_get_wpaie,
+ (iw_handler) wl_iw_set_wpaauth,
+ (iw_handler) wl_iw_get_wpaauth,
+ (iw_handler) wl_iw_set_encodeext,
+ (iw_handler) wl_iw_get_encodeext,
+ (iw_handler) wl_iw_set_pmksa,
+#endif
+};
+
+#if WIRELESS_EXT > 12
+static const iw_handler wl_iw_priv_handler[] = {
+ NULL,
+ (iw_handler)wl_iw_set_active_scan,
+ NULL,
+ (iw_handler)wl_iw_get_rssi,
+ NULL,
+ (iw_handler)wl_iw_set_passive_scan,
+ NULL,
+ (iw_handler)wl_iw_get_link_speed,
+ NULL,
+ (iw_handler)wl_iw_get_macaddr,
+ NULL,
+ (iw_handler)wl_iw_control_wl_off,
+ NULL,
+ (iw_handler)wl_iw_control_wl_on,
+#ifdef SOFTAP
+
+
+ NULL,
+ (iw_handler)iwpriv_set_ap_config,
+
+
+
+ NULL,
+ (iw_handler)iwpriv_get_assoc_list,
+
+
+ NULL,
+ (iw_handler)iwpriv_set_mac_filters,
+
+
+ NULL,
+ (iw_handler)iwpriv_en_ap_bss,
+
+
+ NULL,
+ (iw_handler)iwpriv_wpasupp_loop_tst,
+
+ NULL,
+ (iw_handler)iwpriv_softap_stop,
+
+ NULL,
+ (iw_handler)iwpriv_fw_reload,
+ NULL,
+ (iw_handler)iwpriv_set_ap_sta_disassoc,
+#endif
+#if defined(CSCAN)
+
+ NULL,
+ (iw_handler)iwpriv_set_cscan
+#endif
+};
+
+static const struct iw_priv_args wl_iw_priv_args[] =
+{
+ {
+ WL_IW_SET_ACTIVE_SCAN,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "SCAN-ACTIVE"
+ },
+ {
+ WL_IW_GET_RSSI,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "RSSI"
+ },
+ {
+ WL_IW_SET_PASSIVE_SCAN,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "SCAN-PASSIVE"
+ },
+ {
+ WL_IW_GET_LINK_SPEED,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "LINKSPEED"
+ },
+ {
+ WL_IW_GET_CURR_MACADDR,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "Macaddr"
+ },
+ {
+ WL_IW_SET_STOP,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "STOP"
+ },
+ {
+ WL_IW_SET_START,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "START"
+ },
+
+#ifdef SOFTAP
+
+
+ {
+ WL_SET_AP_CFG,
+ IW_PRIV_TYPE_CHAR | 256,
+ 0,
+ "AP_SET_CFG"
+ },
+
+ {
+ WL_AP_STA_LIST,
+ IW_PRIV_TYPE_CHAR | 0,
+ IW_PRIV_TYPE_CHAR | 1024,
+ "AP_GET_STA_LIST"
+ },
+
+ {
+ WL_AP_MAC_FLTR,
+ IW_PRIV_TYPE_CHAR | 256,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ "AP_SET_MAC_FLTR"
+ },
+
+ {
+ WL_AP_BSS_START,
+ 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+ "AP_BSS_START"
+ },
+
+ {
+ AP_LPB_CMD,
+ IW_PRIV_TYPE_CHAR | 256,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ "AP_LPB_CMD"
+ },
+
+ {
+ WL_AP_STOP,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ "AP_BSS_STOP"
+ },
+ {
+ WL_FW_RELOAD,
+ IW_PRIV_TYPE_CHAR | 256,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+ "WL_FW_RELOAD"
+ },
+#endif
+#if defined(CSCAN)
+ {
+ WL_COMBO_SCAN,
+ IW_PRIV_TYPE_CHAR | 1024,
+ 0,
+ "CSCAN"
+ },
+#endif
+ };
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+ .num_standard = ARRAYSIZE(wl_iw_handler),
+ .standard = (iw_handler *) wl_iw_handler,
+ .num_private = ARRAYSIZE(wl_iw_priv_handler),
+ .num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+ .private = (iw_handler *)wl_iw_priv_handler,
+ .private_args = (void *) wl_iw_priv_args,
+
+#if WIRELESS_EXT >= 19
+ get_wireless_stats: dhd_get_wireless_stats,
+#endif
+ };
+#endif
+
+
+
+int
+wl_iw_ioctl(
+ struct net_device *dev,
+ struct ifreq *rq,
+ int cmd
+)
+{
+ struct iwreq *wrq = (struct iwreq *) rq;
+ struct iw_request_info info;
+ iw_handler handler;
+ char *extra = NULL;
+ size_t token_size = 1;
+ int max_tokens = 0, ret = 0;
+
+ net_os_wake_lock(dev);
+
+ WL_TRACE(("\n%s, cmd:%x called via dhd->do_ioctl()entry point\n", __FUNCTION__, cmd));
+ if (cmd < SIOCIWFIRST ||
+ IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+ !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) {
+ WL_ERROR(("%s: error in cmd=%x : not supported\n", __FUNCTION__, cmd));
+ net_os_wake_unlock(dev);
+ return -EOPNOTSUPP;
+ }
+
+ switch (cmd) {
+
+ case SIOCSIWESSID:
+ case SIOCGIWESSID:
+ case SIOCSIWNICKN:
+ case SIOCGIWNICKN:
+ max_tokens = IW_ESSID_MAX_SIZE + 1;
+ break;
+
+ case SIOCSIWENCODE:
+ case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+ case SIOCSIWENCODEEXT:
+ case SIOCGIWENCODEEXT:
+#endif
+ max_tokens = wrq->u.data.length;
+ break;
+
+ case SIOCGIWRANGE:
+
+ max_tokens = sizeof(struct iw_range) + 500;
+ break;
+
+ case SIOCGIWAPLIST:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_AP;
+ break;
+
+#if WIRELESS_EXT > 13
+ case SIOCGIWSCAN:
+#if defined(WL_IW_USE_ISCAN)
+ if (g_iscan)
+ max_tokens = wrq->u.data.length;
+ else
+#endif
+ max_tokens = IW_SCAN_MAX_DATA;
+ break;
+#endif
+
+ case SIOCSIWSPY:
+ token_size = sizeof(struct sockaddr);
+ max_tokens = IW_MAX_SPY;
+ break;
+
+ case SIOCGIWSPY:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_SPY;
+ break;
+
+#if WIRELESS_EXT > 17
+ case SIOCSIWPMKSA:
+ case SIOCSIWGENIE:
+#endif
+ case SIOCSIWPRIV:
+ max_tokens = wrq->u.data.length;
+ break;
+ }
+
+ if (max_tokens && wrq->u.data.pointer) {
+ if (wrq->u.data.length > max_tokens) {
+ WL_ERROR(("%s: error in cmd=%x wrq->u.data.length=%d > max_tokens=%d\n",
+ __FUNCTION__, cmd, wrq->u.data.length, max_tokens));
+ ret = -E2BIG;
+ goto wl_iw_ioctl_done;
+ }
+ if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto wl_iw_ioctl_done;
+ }
+
+ if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ ret = -EFAULT;
+ goto wl_iw_ioctl_done;
+ }
+ }
+
+ info.cmd = cmd;
+ info.flags = 0;
+
+ ret = handler(dev, &info, &wrq->u, extra);
+
+ if (extra) {
+ if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ ret = -EFAULT;
+ goto wl_iw_ioctl_done;
+ }
+
+ kfree(extra);
+ }
+
+wl_iw_ioctl_done:
+
+ net_os_wake_unlock(dev);
+
+ return ret;
+}
+
+
+static bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+ char* stringBuf, uint buflen)
+{
+ typedef struct conn_fail_event_map_t {
+ uint32 inEvent;
+ uint32 inStatus;
+ uint32 inReason;
+ const char* outName;
+ const char* outCause;
+ } conn_fail_event_map_t;
+
+
+#define WL_IW_DONT_CARE 9999
+ const conn_fail_event_map_t event_map [] = {
+
+
+ {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE,
+ "Conn", "Success"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+ "Conn", "NoNetworks"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ConfigMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH,
+ "Conn", "EncrypMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH,
+ "Conn", "RsnMismatch"},
+ {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "AuthTimeout"},
+ {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "AuthFail"},
+ {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE,
+ "Conn", "AuthNoAck"},
+ {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ReassocFail"},
+ {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "ReassocTimeout"},
+ {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE,
+ "Conn", "ReassocAbort"},
+ {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE,
+ "Sup", "ConnSuccess"},
+ {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Sup", "WpaHandshakeFail"},
+ {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Deauth"},
+ {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "DisassocInd"},
+ {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Disassoc"}
+ };
+
+ const char* name = "";
+ const char* cause = NULL;
+ int i;
+
+
+ for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) {
+ const conn_fail_event_map_t* row = &event_map[i];
+ if (row->inEvent == event_type &&
+ (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+ (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+ name = row->outName;
+ cause = row->outCause;
+ break;
+ }
+ }
+
+
+ if (cause) {
+ memset(stringBuf, 0, buflen);
+ snprintf(stringBuf, buflen, "%s %s %02d %02d",
+ name, cause, status, reason);
+ WL_INFORM(("Connection status: %s\n", stringBuf));
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+#if WIRELESS_EXT > 14
+
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+ uint32 event = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+
+ if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+ return TRUE;
+ }
+ else
+ return FALSE;
+}
+#endif
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256
+#endif
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd = 0;
+ uint32 event_type = ntoh32(e->event_type);
+ uint16 flags = ntoh16(e->flags);
+ uint32 datalen = ntoh32(e->datalen);
+ uint32 status = ntoh32(e->status);
+ uint32 toto;
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
+
+ if (!dev) {
+ WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return;
+ }
+
+ net_os_wake_lock(dev);
+
+ WL_TRACE(("%s: dev=%s event=%d \n", __FUNCTION__, dev->name, event_type));
+
+
+ switch (event_type) {
+#if defined(SOFTAP)
+ case WLC_E_PRUNE:
+ if (ap_cfg_running) {
+ char *macaddr = (char *)&e->addr;
+ WL_SOFTAP(("PRUNE received, %02X:%02X:%02X:%02X:%02X:%02X!\n",
+ macaddr[0], macaddr[1], macaddr[2], macaddr[3],
+ macaddr[4], macaddr[5]));
+
+
+ if (ap_macmode)
+ {
+ int i;
+ for (i = 0; i < ap_black_list.count; i++) {
+ if (!bcmp(macaddr, &ap_black_list.ea[i],
+ sizeof(struct ether_addr))) {
+ WL_SOFTAP(("mac in black list, ignore it\n"));
+ break;
+ }
+ }
+
+ if (i == ap_black_list.count) {
+
+ char mac_buf[32] = {0};
+ sprintf(mac_buf, "STA_BLOCK %02X:%02X:%02X:%02X:%02X:%02X",
+ macaddr[0], macaddr[1], macaddr[2],
+ macaddr[3], macaddr[4], macaddr[5]);
+ wl_iw_send_priv_event(priv_dev, mac_buf);
+ }
+ }
+ }
+ break;
+#endif
+ case WLC_E_TXFAIL:
+ cmd = IWEVTXDROP;
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ break;
+#if WIRELESS_EXT > 14
+ case WLC_E_JOIN:
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+#if defined(SOFTAP)
+ WL_SOFTAP(("STA connect received %d\n", event_type));
+ if (ap_cfg_running) {
+ wl_iw_send_priv_event(priv_dev, "STA_JOIN");
+ goto wl_iw_event_end;
+ }
+#endif
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ cmd = IWEVREGISTERED;
+ break;
+ case WLC_E_ROAM:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ WL_ASSOC((" WLC_E_ROAM : success \n"));
+ goto wl_iw_event_end;
+ }
+ break;
+
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+#if defined(SOFTAP)
+ WL_SOFTAP(("STA disconnect received %d\n", event_type));
+ if (ap_cfg_running) {
+ wl_iw_send_priv_event(priv_dev, "STA_LEAVE");
+ goto wl_iw_event_end;
+ }
+#endif
+ cmd = SIOCGIWAP;
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ bzero(&extra, ETHER_ADDR_LEN);
+ break;
+ case WLC_E_LINK:
+ case WLC_E_NDIS_LINK:
+ cmd = SIOCGIWAP;
+ if (!(flags & WLC_EVENT_MSG_LINK)) {
+
+
+#ifdef SOFTAP
+#ifdef AP_ONLY
+ if (ap_cfg_running) {
+#else
+ if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) {
+#endif
+
+ WL_SOFTAP(("AP DOWN %d\n", event_type));
+ wl_iw_send_priv_event(priv_dev, "AP_DOWN");
+ } else {
+ WL_TRACE(("STA_Link Down\n"));
+ g_ss_cache_ctrl.m_link_down = 1;
+ }
+#else
+ g_ss_cache_ctrl.m_link_down = 1;
+#endif
+ WL_TRACE(("Link Down\n"));
+
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+ }
+ else {
+
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ g_ss_cache_ctrl.m_link_down = 0;
+
+ memcpy(g_ss_cache_ctrl.m_active_bssid, &e->addr, ETHER_ADDR_LEN);
+#ifdef SOFTAP
+
+#ifdef AP_ONLY
+ if (ap_cfg_running) {
+#else
+ if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) {
+#endif
+
+ WL_SOFTAP(("AP UP %d\n", event_type));
+ wl_iw_send_priv_event(priv_dev, "AP_UP");
+ } else {
+ WL_TRACE(("STA_LINK_UP\n"));
+ }
+#else
+#endif
+ WL_TRACE(("Link UP\n"));
+
+ }
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ break;
+ case WLC_E_ACTION_FRAME:
+ cmd = IWEVCUSTOM;
+ if (datalen + 1 <= sizeof(extra)) {
+ wrqu.data.length = datalen + 1;
+ extra[0] = WLC_E_ACTION_FRAME;
+ memcpy(&extra[1], data, datalen);
+ WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+ }
+ break;
+
+ case WLC_E_ACTION_FRAME_COMPLETE:
+ cmd = IWEVCUSTOM;
+ memcpy(&toto, data, 4);
+ if (sizeof(status) + 1 <= sizeof(extra)) {
+ wrqu.data.length = sizeof(status) + 1;
+ extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+ memcpy(&extra[1], &status, sizeof(status));
+ printf("wl_iw_event status %d PacketId %d \n", status, toto);
+ printf("WLC_E_ACTION_FRAME_COMPLETE len %d \n", wrqu.data.length);
+ }
+ break;
+#endif
+#if WIRELESS_EXT > 17
+ case WLC_E_MIC_ERROR: {
+ struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra;
+ cmd = IWEVMICHAELMICFAILURE;
+ wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ micerrevt->flags |= IW_MICFAILURE_GROUP;
+ else
+ micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+ memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+ break;
+ }
+ case WLC_E_PMKID_CACHE: {
+ if (data)
+ {
+ struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+ pmkid_cand_list_t *pmkcandlist;
+ pmkid_cand_t *pmkidcand;
+ int count;
+
+ cmd = IWEVPMKIDCAND;
+ pmkcandlist = data;
+ count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+ ASSERT(count >= 0);
+ wrqu.data.length = sizeof(struct iw_pmkid_cand);
+ pmkidcand = pmkcandlist->pmkid_cand;
+ while (count) {
+ bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+ if (pmkidcand->preauth)
+ iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+ bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+ ETHER_ADDR_LEN);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ pmkidcand++;
+ count--;
+ }
+ }
+ goto wl_iw_event_end;
+ }
+#endif
+
+ case WLC_E_SCAN_COMPLETE:
+#if defined(WL_IW_USE_ISCAN)
+ if (!g_iscan) {
+ WL_ERROR(("Event WLC_E_SCAN_COMPLETE on g_iscan NULL!"));
+ goto wl_iw_event_end;
+ }
+
+ if ((g_iscan) && (g_iscan->tsk_ctl.thr_pid >= 0) &&
+ (g_iscan->iscan_state != ISCAN_STATE_IDLE))
+ {
+ up(&g_iscan->tsk_ctl.sema);
+ } else {
+ cmd = SIOCGIWSCAN;
+ wrqu.data.length = strlen(extra);
+ WL_TRACE(("Event WLC_E_SCAN_COMPLETE from specific scan %d\n",
+ g_iscan->iscan_state));
+ }
+#else
+ cmd = SIOCGIWSCAN;
+ wrqu.data.length = strlen(extra);
+ WL_TRACE(("Event WLC_E_SCAN_COMPLETE\n"));
+#endif
+ break;
+
+
+ case WLC_E_PFN_NET_FOUND:
+ {
+ wl_pfn_net_info_t *netinfo;
+ netinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t) -
+ sizeof(wl_pfn_net_info_t));
+ WL_ERROR(("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n",
+ __FUNCTION__, PNO_EVENT_UP, netinfo->pfnsubnet.SSID,
+ netinfo->pfnsubnet.SSID_len));
+ cmd = IWEVCUSTOM;
+ memset(&wrqu, 0, sizeof(wrqu));
+ strcpy(extra, PNO_EVENT_UP);
+ wrqu.data.length = strlen(extra);
+ }
+ break;
+
+ default:
+
+ WL_TRACE(("Unknown Event %d: ignoring\n", event_type));
+ break;
+ }
+ if (cmd) {
+ if (cmd == SIOCGIWSCAN)
+ wireless_send_event(dev, cmd, &wrqu, NULL);
+ else
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+
+#if WIRELESS_EXT > 14
+
+ memset(extra, 0, sizeof(extra));
+ if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+ cmd = IWEVCUSTOM;
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+#endif
+
+ goto wl_iw_event_end;
+wl_iw_event_end:
+
+ net_os_wake_unlock(dev);
+#endif
+}
+
+int
+wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+ int res = 0;
+ wl_cnt_t cnt;
+ int phy_noise;
+ int rssi;
+ scb_val_t scb_val;
+
+ phy_noise = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+ goto done;
+
+ phy_noise = dtoh32(phy_noise);
+ WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise));
+
+ bzero(&scb_val, sizeof(scb_val_t));
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+ goto done;
+
+ rssi = dtoh32(scb_val.val);
+ WL_TRACE(("wl_iw_get_wireless_stats rssi=%d\n", rssi));
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ wstats->qual.qual = 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ wstats->qual.qual = 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ wstats->qual.qual = 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ wstats->qual.qual = 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ wstats->qual.qual = 4;
+ else
+ wstats->qual.qual = 5;
+
+
+ wstats->qual.level = 0x100 + rssi;
+ wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+ wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+ wstats->qual.updated |= 7;
+#endif
+
+#if WIRELESS_EXT > 11
+ WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n", (int)sizeof(wl_cnt_t)));
+
+ memset(&cnt, 0, sizeof(wl_cnt_t));
+ res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+ if (res)
+ {
+ WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d\n", res));
+ goto done;
+ }
+
+ cnt.version = dtoh16(cnt.version);
+ if (cnt.version != WL_CNT_T_VERSION) {
+ WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+ WL_CNT_T_VERSION, cnt.version));
+ goto done;
+ }
+
+ wstats->discard.nwid = 0;
+ wstats->discard.code = dtoh32(cnt.rxundec);
+ wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+ wstats->discard.retries = dtoh32(cnt.txfail);
+ wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+ wstats->miss.beacon = 0;
+
+ WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+ dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif
+
+done:
+ return res;
+}
+#if defined(COEX_DHCP)
+static void
+wl_iw_bt_flag_set(
+ struct net_device *dev,
+ bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+ char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+
+ set_btc_esco_params(dev, set);
+#endif
+
+
+#if defined(BT_DHCP_USE_FLAGS)
+ WL_TRACE_COEX(("WI-FI priority boost via bt flags, set:%d\n", set));
+ if (set == TRUE) {
+
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_dhcp_on[0], sizeof(buf_flag7_dhcp_on));
+ }
+ else {
+
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+}
+
+static void
+wl_iw_bt_timerfunc(ulong data)
+{
+ bt_info_t *bt_local = (bt_info_t *)data;
+ bt_local->timer_on = 0;
+ WL_TRACE(("%s\n", __FUNCTION__));
+
+ up(&bt_local->tsk_ctl.sema);
+}
+
+static int
+_bt_dhcp_sysioc_thread(void *data)
+{
+ tsk_ctl_t *tsk_ctl = (tsk_ctl_t *)data;
+
+ DAEMONIZE("dhcp_sysioc");
+
+ complete(&tsk_ctl->completed);
+
+ while (down_interruptible(&tsk_ctl->sema) == 0) {
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk_ctl->terminated) {
+ break;
+ }
+
+ if (g_bt->timer_on) {
+ g_bt->timer_on = 0;
+ del_timer_sync(&g_bt->timer);
+ }
+
+ switch (g_bt->bt_state) {
+ case BT_DHCP_START:
+
+ WL_TRACE_COEX(("%s bt_dhcp stm: started \n", __FUNCTION__));
+ g_bt->bt_state = BT_DHCP_OPPORTUNITY_WINDOW;
+ mod_timer(&g_bt->timer,
+ jiffies + BT_DHCP_OPPORTUNITY_WINDOW_TIME*HZ/1000);
+ g_bt->timer_on = 1;
+ break;
+
+ case BT_DHCP_OPPORTUNITY_WINDOW:
+ if (g_bt->dhcp_done) {
+ WL_TRACE_COEX(("%s DHCP Done before T1 expiration\n",
+ __FUNCTION__));
+ goto btc_coex_idle;
+ }
+
+
+ WL_TRACE_COEX(("%s DHCP T1:%d expired\n",
+ __FUNCTION__, BT_DHCP_OPPORTUNITY_WINDOW_TIME));
+
+ if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, TRUE);
+ g_bt->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+ mod_timer(&g_bt->timer, jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000);
+ g_bt->timer_on = 1;
+ break;
+
+ case BT_DHCP_FLAG_FORCE_TIMEOUT:
+ if (g_bt->dhcp_done) {
+ WL_TRACE_COEX(("%s DHCP Done before T2 expiration\n",
+ __FUNCTION__));
+ } else {
+
+ WL_TRACE_COEX(("%s DHCP wait interval T2:%d msec expired\n",
+ __FUNCTION__, BT_DHCP_FLAG_FORCE_TIME));
+ }
+
+
+ if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE);
+ btc_coex_idle:
+ g_bt->bt_state = BT_DHCP_IDLE;
+ g_bt->timer_on = 0;
+ break;
+
+ default:
+ WL_ERROR(("%s error g_status=%d !!!\n", __FUNCTION__,
+ g_bt->bt_state));
+ if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE);
+ g_bt->bt_state = BT_DHCP_IDLE;
+ g_bt->timer_on = 0;
+ break;
+ }
+
+ net_os_wake_unlock(g_bt->dev);
+ }
+
+ if (g_bt->timer_on) {
+ g_bt->timer_on = 0;
+ del_timer_sync(&g_bt->timer);
+ }
+ complete_and_exit(&tsk_ctl->completed, 0);
+}
+
+static void
+wl_iw_bt_release(void)
+{
+ bt_info_t *bt_local = g_bt;
+
+ if (!bt_local) {
+ return;
+ }
+
+ if (bt_local->tsk_ctl.thr_pid >= 0) {
+ PROC_STOP(&bt_local->tsk_ctl);
+ }
+ kfree(bt_local);
+ g_bt = NULL;
+}
+
+static int
+wl_iw_bt_init(struct net_device *dev)
+{
+ bt_info_t *bt_dhcp = NULL;
+
+ bt_dhcp = kmalloc(sizeof(bt_info_t), GFP_KERNEL);
+ if (!bt_dhcp)
+ return -ENOMEM;
+
+ memset(bt_dhcp, 0, sizeof(bt_info_t));
+
+ g_bt = bt_dhcp;
+ bt_dhcp->dev = dev;
+ bt_dhcp->bt_state = BT_DHCP_IDLE;
+
+
+ bt_dhcp->timer_ms = 10;
+ init_timer(&bt_dhcp->timer);
+ bt_dhcp->timer.data = (ulong)bt_dhcp;
+ bt_dhcp->timer.function = wl_iw_bt_timerfunc;
+ bt_dhcp->ts_dhcp_start = 0;
+ bt_dhcp->ts_dhcp_ok = 0;
+
+ PROC_START(_bt_dhcp_sysioc_thread, bt_dhcp, &bt_dhcp->tsk_ctl, 0);
+ if (bt_dhcp->tsk_ctl.thr_pid < 0) {
+ WL_ERROR(("Failed in %s\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+#endif
+
+int
+wl_iw_attach(struct net_device *dev, void * dhdp)
+{
+#if defined(WL_IW_USE_ISCAN)
+ int params_size = 0;
+#endif
+ wl_iw_t *iw;
+#if defined(WL_IW_USE_ISCAN)
+ iscan_info_t *iscan = NULL;
+#endif
+
+ DHD_OS_MUTEX_INIT(&wl_cache_lock);
+ DHD_OS_MUTEX_INIT(&wl_softap_lock);
+
+#if defined(WL_IW_USE_ISCAN)
+ if (!dev)
+ return 0;
+
+
+ memset(&g_wl_iw_params, 0, sizeof(wl_iw_extra_params_t));
+
+
+#ifdef CSCAN
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)) +
+ (WL_NUMCHANNELS * sizeof(uint16)) + WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+#else
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+#endif
+ iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+ if (!iscan)
+ return -ENOMEM;
+ memset(iscan, 0, sizeof(iscan_info_t));
+
+
+ iscan->iscan_ex_params_p = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+ if (!iscan->iscan_ex_params_p) {
+ kfree(iscan);
+ return -ENOMEM;
+ }
+ iscan->iscan_ex_param_size = params_size;
+
+
+ g_iscan = iscan;
+ iscan->dev = dev;
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+
+#if defined(CONFIG_FIRST_SCAN)
+ g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE;
+ g_first_counter_scans = 0;
+ g_iscan->scan_flag = 0;
+#endif
+
+#ifdef CONFIG_WPS2
+ g_wps_probe_req_ie = NULL;
+ g_wps_probe_req_ie_len = 0;
+#endif
+
+ iscan->timer_ms = 8000;
+ init_timer(&iscan->timer);
+ iscan->timer.data = (ulong)iscan;
+ iscan->timer.function = wl_iw_timerfunc;
+
+ PROC_START(_iscan_sysioc_thread, iscan, &iscan->tsk_ctl, 0);
+ if (iscan->tsk_ctl.thr_pid < 0)
+ return -ENOMEM;
+#endif
+
+ iw = *(wl_iw_t **)netdev_priv(dev);
+ iw->pub = (dhd_pub_t *)dhdp;
+#ifdef SOFTAP
+ priv_dev = dev;
+#endif
+ g_scan = NULL;
+
+
+ g_scan = (void *)kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
+ if (!g_scan)
+ return -ENOMEM;
+
+ memset(g_scan, 0, G_SCAN_RESULTS);
+ g_scan_specified_ssid = 0;
+
+#if !defined(CSCAN)
+
+ wl_iw_init_ss_cache_ctrl();
+#endif
+#ifdef COEX_DHCP
+
+ wl_iw_bt_init(dev);
+#endif
+
+
+ return 0;
+}
+
+void
+wl_iw_detach(void)
+{
+#if defined(WL_IW_USE_ISCAN)
+ iscan_buf_t *buf;
+ iscan_info_t *iscan = g_iscan;
+
+ if (!iscan)
+ return;
+ if (iscan->tsk_ctl.thr_pid >= 0) {
+ PROC_STOP(&iscan->tsk_ctl);
+ }
+ DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+ while (iscan->list_hdr) {
+ buf = iscan->list_hdr->next;
+ kfree(iscan->list_hdr);
+ iscan->list_hdr = buf;
+ }
+ kfree(iscan->iscan_ex_params_p);
+ kfree(iscan);
+ g_iscan = NULL;
+ DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+#endif
+
+ if (g_scan)
+ kfree(g_scan);
+
+ g_scan = NULL;
+#ifdef CONFIG_WPS2
+
+ if (g_wps_probe_req_ie) {
+ kfree(g_wps_probe_req_ie);
+ g_wps_probe_req_ie = NULL;
+ g_wps_probe_req_ie_len = 0;
+ }
+#endif
+#if !defined(CSCAN)
+ wl_iw_release_ss_cache_ctrl();
+#endif
+#ifdef COEX_DHCP
+ wl_iw_bt_release();
+#endif
+
+#ifdef SOFTAP
+ if (ap_cfg_running) {
+ WL_TRACE(("\n%s AP is going down\n", __FUNCTION__));
+
+ wl_iw_send_priv_event(priv_dev, "AP_DOWN");
+ }
+#endif
+
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h
new file mode 100644
index 000000000000..c0cc14bdde4e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.h
@@ -0,0 +1,306 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.h,v 1.15.80.6 2010-12-23 01:13:23 Exp $
+ */
+
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+#define GET_SSID "SSID="
+#define GET_CHANNEL "CH="
+#define GET_NPROBE "NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL "ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL "PASSIVE="
+#define GET_HOME_DWELL "HOME="
+#define GET_SCAN_TYPE "TYPE="
+
+#define BAND_GET_CMD "GETBAND"
+#define BAND_SET_CMD "SETBAND"
+#define DTIM_SKIP_GET_CMD "DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD "DTIMSKIPSET"
+#define SETSUSPEND_CMD "SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR"
+
+#define PNOSETUP_SET_CMD "PNOSETUP "
+#define PNOENABLE_SET_CMD "PNOFORCE"
+#define PNODEBUG_SET_CMD "PNODEBUG"
+#define TXPOWER_SET_CMD "TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+
+typedef struct wl_iw_extra_params {
+ int target_channel;
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+ char iso_abbrev[WLC_CNTRY_BUF_SZ];
+ char custom_locale[WLC_CNTRY_BUF_SZ];
+ int32 custom_locale_rev;
+};
+
+
+#define WL_IW_RSSI_MINVAL -200
+#define WL_IW_RSSI_NO_SIGNAL -91
+#define WL_IW_RSSI_VERY_LOW -80
+#define WL_IW_RSSI_LOW -70
+#define WL_IW_RSSI_GOOD -68
+#define WL_IW_RSSI_VERY_GOOD -58
+#define WL_IW_RSSI_EXCELLENT -57
+#define WL_IW_RSSI_INVALID 0
+#define MAX_WX_STRING 80
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13)
+
+
+#define WL_SET_AP_CFG (SIOCIWFIRSTPRIV+15)
+#define WL_AP_STA_LIST (SIOCIWFIRSTPRIV+17)
+#define WL_AP_MAC_FLTR (SIOCIWFIRSTPRIV+19)
+#define WL_AP_BSS_START (SIOCIWFIRSTPRIV+21)
+#define AP_LPB_CMD (SIOCIWFIRSTPRIV+23)
+#define WL_AP_STOP (SIOCIWFIRSTPRIV+25)
+#define WL_FW_RELOAD (SIOCIWFIRSTPRIV+27)
+#define WL_AP_STA_DISASSOC (SIOCIWFIRSTPRIV+29)
+#define WL_COMBO_SCAN (SIOCIWFIRSTPRIV+31)
+
+
+#define G_SCAN_RESULTS 8*1024
+#define WE_ADD_EVENT_FIX 0x80
+#define G_WLAN_SET_ON 0
+#define G_WLAN_SET_OFF 1
+
+#define CHECK_EXTRA_FOR_NULL(extra) \
+if (!extra) { \
+ WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \
+ return -EINVAL; \
+}
+
+typedef struct wl_iw {
+ char nickname[IW_ESSID_MAX_SIZE];
+
+ struct iw_statistics wstats;
+
+ int spy_num;
+ int wpaversion;
+ int pcipher;
+ int gcipher;
+ int privacy_invoked;
+
+ struct ether_addr spy_addr[IW_MAX_SPY];
+ struct iw_quality spy_qual[IW_MAX_SPY];
+ void *wlinfo;
+ dhd_pub_t * pub;
+} wl_iw_t;
+
+int wl_control_wl_start(struct net_device *dev);
+#define WLC_IW_SS_CACHE_MAXLEN 2048
+#define WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN 32
+#define WLC_IW_BSS_INFO_MAXLEN \
+ (WLC_IW_SS_CACHE_MAXLEN - WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN)
+
+typedef struct wl_iw_ss_cache {
+ struct wl_iw_ss_cache *next;
+ int dirty;
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ wl_bss_info_t bss_info[1];
+} wl_iw_ss_cache_t;
+
+typedef struct wl_iw_ss_cache_ctrl {
+ wl_iw_ss_cache_t *m_cache_head;
+ int m_link_down;
+ int m_timer_expired;
+ char m_active_bssid[ETHER_ADDR_LEN];
+ uint m_prev_scan_mode;
+ uint m_cons_br_scan_cnt;
+ struct timer_list *m_timer;
+} wl_iw_ss_cache_ctrl_t;
+
+typedef enum broadcast_first_scan {
+ BROADCAST_SCAN_FIRST_IDLE = 0,
+ BROADCAST_SCAN_FIRST_STARTED,
+ BROADCAST_SCAN_FIRST_RESULT_READY,
+ BROADCAST_SCAN_FIRST_RESULT_CONSUMED
+} broadcast_first_scan_t;
+#ifdef SOFTAP
+#define SSID_LEN 33
+#define SEC_LEN 16
+#define KEY_LEN 65
+#define PROFILE_OFFSET 32
+struct ap_profile {
+ uint8 ssid[SSID_LEN];
+ uint8 sec[SEC_LEN];
+ uint8 key[KEY_LEN];
+ uint32 channel;
+ uint32 preamble;
+ uint32 max_scb;
+ uint32 closednet;
+ char country_code[WLC_CNTRY_BUF_SZ];
+};
+
+
+#define MACLIST_MODE_DISABLED 0
+#define MACLIST_MODE_DENY 1
+#define MACLIST_MODE_ALLOW 2
+struct mflist {
+ uint count;
+ struct ether_addr ea[16];
+};
+struct mac_list_set {
+ uint32 mode;
+ struct mflist mac_list;
+};
+#endif
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+void wl_iw_detach(void);
+
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_send_hang_message(struct net_device *dev);
+extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec);
+
+#define PNO_TLV_PREFIX 'S'
+#define PNO_TLV_VERSION '1'
+#define PNO_TLV_SUBVERSION '2'
+#define PNO_TLV_RESERVED '0'
+#define PNO_TLV_TYPE_SSID_IE 'S'
+#define PNO_TLV_TYPE_TIME 'T'
+#define PNO_TLV_FREQ_REPEAT 'R'
+#define PNO_TLV_FREQ_EXPO_MAX 'M'
+#define PNO_EVENT_UP "PNO_EVENT"
+
+typedef struct cmd_tlv {
+ char prefix;
+ char version;
+ char subver;
+ char reserved;
+} cmd_tlv_t;
+
+
+
+
+typedef struct cscan_tlv {
+ char prefix;
+ char version;
+ char subver;
+ char reserved;
+} cscan_tlv_t;
+
+#define CSCAN_COMMAND "CSCAN "
+#define CSCAN_TLV_PREFIX 'S'
+#define CSCAN_TLV_VERSION 1
+#define CSCAN_TLV_SUBVERSION 0
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE 'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE 'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE 'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE 'P'
+#define CSCAN_TLV_TYPE_HOME_IE 'H'
+#define CSCAN_TLV_TYPE_STYPE_IE 'T'
+
+#ifdef SOFTAP_TLV_CFG
+
+#define SOFTAP_SET_CMD "SOFTAPSET "
+#define SOFTAP_TLV_PREFIX 'A'
+#define SOFTAP_TLV_VERSION '1'
+#define SOFTAP_TLV_SUBVERSION '0'
+#define SOFTAP_TLV_RESERVED '0'
+
+#define TLV_TYPE_SSID 'S'
+#define TLV_TYPE_SECUR 'E'
+#define TLV_TYPE_KEY 'K'
+#define TLV_TYPE_CHANNEL 'C'
+#endif
+
+extern int wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+ int channel_num, int *bytes_left);
+
+extern int wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size,
+ const char token, int input_size, int *bytes_left);
+
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid,
+ int max, int *bytes_left);
+
+extern int wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max);
+
+extern int wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num);
+
+
+#define NETDEV_PRIV(dev) (*(wl_iw_t **)netdev_priv(dev))
+
+#ifdef CONFIG_WPS2
+#define WPS_ADD_PROBE_REQ_IE_CMD "ADD_WPS_PROBE_REQ_IE "
+#define WPS_DEL_PROBE_REQ_IE_CMD "DEL_WPS_PROBE_REQ_IE "
+#define WPS_PROBE_REQ_IE_CMD_LENGTH 21
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c
new file mode 100644
index 000000000000..bb3eaea90d0f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.c
@@ -0,0 +1,341 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
+
+s32 wldev_ioctl(
+ struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+ s32 ret = 0;
+ struct wl_ioctl ioc;
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+
+ ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
+ return ret;
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+ s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, u32 buflen)
+{
+ s32 iolen = 0;
+
+ iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+ return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen)
+{
+ s32 ret = 0;
+ s32 iovar_len = 0;
+
+ iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+ ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+ return ret;
+}
+
+
+s32 wldev_iovar_setbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen)
+{
+ s32 ret = 0;
+ s32 iovar_len;
+
+ iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+ ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+ return ret;
+}
+
+s32 wldev_iovar_setint(
+ struct net_device *dev, s8 *iovar, s32 val)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ val = htod32(val);
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+ sizeof(iovar_buf));
+}
+
+
+s32 wldev_iovar_getint(
+ struct net_device *dev, s8 *iovar, s32 *pval)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 err;
+
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+ sizeof(iovar_buf));
+ if (err == 0)
+ {
+ memcpy(pval, iovar_buf, sizeof(*pval));
+ *pval = dtoh32(*pval);
+ }
+ return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+ const s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+ const s8 *prefix = "bsscfg:";
+ s8 *p;
+ u32 prefixlen;
+ u32 namelen;
+ u32 iolen;
+
+ if (bssidx == 0) {
+ return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
+ (s8 *) iovar_buf, buflen);
+ }
+
+ prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+ namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */
+ iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+ if (buflen < 0 || iolen > (u32)buflen)
+ {
+ DHD_ERROR(("%s: buffer is too short\n", __FUNCTION__));
+ return BCME_BUFTOOSHORT;
+ }
+
+ p = (s8 *)iovar_buf;
+
+ /* copy prefix, no null */
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+ /* copy iovar name including null */
+ memcpy(p, iovar_name, namelen);
+ p += namelen;
+
+ /* bss config index as first param */
+ bssidx = htod32(bssidx);
+ memcpy(p, &bssidx, sizeof(u32));
+ p += sizeof(u32);
+
+ /* parameter buffer follows */
+ if (paramlen)
+ memcpy(p, param, paramlen);
+
+ return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx)
+{
+ s32 ret = 0;
+ s32 iovar_len = 0;
+
+ iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+ ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+ return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx)
+{
+ s32 ret = 0;
+ s32 iovar_len;
+
+ iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+ ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+ return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ val = htod32(val);
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+ sizeof(iovar_buf), bssidx);
+}
+
+
+s32 wldev_iovar_getint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 err;
+
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+ sizeof(iovar_buf), bssidx);
+ if (err == 0)
+ {
+ memcpy(pval, iovar_buf, sizeof(*pval));
+ *pval = dtoh32(*pval);
+ }
+ return err;
+}
+
+int wldev_get_link_speed(
+ struct net_device *dev, int *plink_speed)
+{
+ int error;
+
+ if (!plink_speed)
+ return -ENOMEM;
+ error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0);
+ if (unlikely(error))
+ return error;
+
+ /* Convert internal 500Kbps to Kbps */
+ *plink_speed *= 500;
+ return error;
+}
+
+int wldev_get_rssi(
+ struct net_device *dev, int *prssi)
+{
+ scb_val_t scb_val;
+ int error;
+
+ if (!prssi)
+ return -ENOMEM;
+ bzero(&scb_val, sizeof(scb_val_t));
+
+ error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+ if (unlikely(error))
+ return error;
+
+ *prssi = dtoh32(scb_val.val);
+ return error;
+}
+
+int wldev_get_ssid(
+ struct net_device *dev, wlc_ssid_t *pssid)
+{
+ int error;
+
+ if (!pssid)
+ return -ENOMEM;
+ error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0);
+ if (unlikely(error))
+ return error;
+ pssid->SSID_len = dtoh32(pssid->SSID_len);
+ return error;
+}
+
+int wldev_get_band(
+ struct net_device *dev, uint *pband)
+{
+ int error;
+
+ error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0);
+ return error;
+}
+
+int wldev_set_band(
+ struct net_device *dev, uint band)
+{
+ int error = -1;
+
+ if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+ error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), 1);
+ }
+ return error;
+}
+
+int wldev_set_country(
+ struct net_device *dev, char *country_code)
+{
+ int error = -1;
+ wl_country_t cspec = {{0}, 0, {0}};
+ scb_val_t scbval;
+ char smbuf[WLC_IOCTL_SMLEN];
+
+ if (!country_code)
+ return error;
+
+ error = wldev_iovar_getbuf(dev, "country", &cspec, sizeof(cspec),
+ smbuf, sizeof(smbuf));
+ if (error < 0)
+ DHD_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
+
+ if ((error < 0) ||
+ (strncmp(country_code, smbuf, WLC_CNTRY_BUF_SZ) != 0)) {
+ bzero(&scbval, sizeof(scb_val_t));
+ error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), 1);
+ if (error < 0) {
+ DHD_ERROR(("%s: set country failed due to Disassoc error %d\n",
+ __FUNCTION__, error));
+ return error;
+ }
+ }
+ cspec.rev = -1;
+ memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+ get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
+ error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
+ smbuf, sizeof(smbuf));
+ if (error < 0) {
+ DHD_ERROR(("%s: set country for %s as %s rev %d failed\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ return error;
+ }
+ dhd_bus_country_set(dev, &cspec);
+ DHD_INFO(("%s: set country for %s as %s rev %d\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h
new file mode 100644
index 000000000000..46326803e216
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.h
@@ -0,0 +1,110 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.h,v 1.1.4.1.2.14 2011-02-09 01:40:07 Exp $
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/** wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or
+ * netdev_ops->ndo_do_ioctl in new kernels)
+ * @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+ struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with
+ * WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ * WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen);
+
+s32 wldev_iovar_setint(
+ struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+ struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ * indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+ const s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ * WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ * WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx);
+
+s32 wldev_iovar_getint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec);
+extern int wldev_set_country(struct net_device *dev, char *country_code);
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val);
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid,
+ int max, int *bytes_left);
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, int *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index 724f65d8f9e4..97a1f6b869f0 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -1,893 +1,392 @@
/*
- * Driver for the PN544 NFC chip.
+ * Copyright (C) 2010 Trusted Logic S.A.
*
- * Copyright (C) Nokia Corporation
- *
- * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
- * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
*/
-#include <linux/completion.h>
-#include <linux/crc-ccitt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/nfc/pn544.h>
-#include <linux/poll.h>
-#include <linux/regulator/consumer.h>
-#include <linux/serial_core.h> /* for TCGETS */
-#include <linux/slab.h>
-
-#define DRIVER_CARD "PN544 NFC"
-#define DRIVER_DESC "NFC driver for PN544"
-
-static struct i2c_device_id pn544_id_table[] = {
- { PN544_DRIVER_NAME, 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, pn544_id_table);
-
-#define HCI_MODE 0
-#define FW_MODE 1
-enum pn544_state {
- PN544_ST_COLD,
- PN544_ST_FW_READY,
- PN544_ST_READY,
+#define MAX_BUFFER_SIZE 512
+
+struct pn544_dev {
+ wait_queue_head_t read_wq;
+ struct mutex read_mutex;
+ struct i2c_client *client;
+ struct miscdevice pn544_device;
+ unsigned int ven_gpio;
+ unsigned int firm_gpio;
+ unsigned int irq_gpio;
+ bool irq_enabled;
+ spinlock_t irq_enabled_lock;
};
-enum pn544_irq {
- PN544_NONE,
- PN544_INT,
-};
-
-struct pn544_info {
- struct miscdevice miscdev;
- struct i2c_client *i2c_dev;
- struct regulator_bulk_data regs[3];
-
- enum pn544_state state;
- wait_queue_head_t read_wait;
- loff_t read_offset;
- enum pn544_irq read_irq;
- struct mutex read_mutex; /* Serialize read_irq access */
- struct mutex mutex; /* Serialize info struct access */
- u8 *buf;
- size_t buflen;
-};
-
-static const char reg_vdd_io[] = "Vdd_IO";
-static const char reg_vbat[] = "VBat";
-static const char reg_vsim[] = "VSim";
-
-/* sysfs interface */
-static ssize_t pn544_test(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pn544_info *info = dev_get_drvdata(dev);
- struct i2c_client *client = info->i2c_dev;
- struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
-}
-
-static int pn544_enable(struct pn544_info *info, int mode)
-{
- struct pn544_nfc_platform_data *pdata;
- struct i2c_client *client = info->i2c_dev;
-
- int r;
-
- r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
- if (r < 0)
- return r;
-
- pdata = client->dev.platform_data;
- info->read_irq = PN544_NONE;
- if (pdata->enable)
- pdata->enable(mode);
-
- if (mode) {
- info->state = PN544_ST_FW_READY;
- dev_dbg(&client->dev, "now in FW-mode\n");
- } else {
- info->state = PN544_ST_READY;
- dev_dbg(&client->dev, "now in HCI-mode\n");
- }
-
- usleep_range(10000, 15000);
-
- return 0;
-}
-
-static void pn544_disable(struct pn544_info *info)
-{
- struct pn544_nfc_platform_data *pdata;
- struct i2c_client *client = info->i2c_dev;
-
- pdata = client->dev.platform_data;
- if (pdata->disable)
- pdata->disable();
-
- info->state = PN544_ST_COLD;
-
- dev_dbg(&client->dev, "Now in OFF-mode\n");
-
- msleep(PN544_RESETVEN_TIME);
-
- info->read_irq = PN544_NONE;
- regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
-}
-
-static int check_crc(u8 *buf, int buflen)
+static void pn544_disable_irq(struct pn544_dev *pn544_dev)
{
- u8 len;
- u16 crc;
-
- len = buf[0] + 1;
- if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
- pr_err(PN544_DRIVER_NAME
- ": CRC; corrupt packet len %u (%d)\n", len, buflen);
- print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
- 16, 2, buf, buflen, false);
- return -EPERM;
- }
- crc = crc_ccitt(0xffff, buf, len - 2);
- crc = ~crc;
+ unsigned long flags;
- if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
- pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
- crc, buf[len-1], buf[len-2]);
-
- print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
- 16, 2, buf, buflen, false);
- return -EPERM;
+ spin_lock_irqsave(&pn544_dev->irq_enabled_lock, flags);
+ if (pn544_dev->irq_enabled) {
+ disable_irq_nosync(pn544_dev->client->irq);
+ pn544_dev->irq_enabled = false;
}
- return 0;
+ spin_unlock_irqrestore(&pn544_dev->irq_enabled_lock, flags);
}
-static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
+static irqreturn_t pn544_dev_irq_handler(int irq, void *dev_id)
{
- int r;
-
- if (len < 4 || len != (buf[0] + 1)) {
- dev_err(&client->dev, "%s: Illegal message length: %d\n",
- __func__, len);
- return -EINVAL;
- }
-
- if (check_crc(buf, len))
- return -EINVAL;
+ struct pn544_dev *pn544_dev = dev_id;
- usleep_range(3000, 6000);
-
- r = i2c_master_send(client, buf, len);
- dev_dbg(&client->dev, "send: %d\n", r);
-
- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
- usleep_range(6000, 10000);
- r = i2c_master_send(client, buf, len);
- dev_dbg(&client->dev, "send2: %d\n", r);
- }
+ pn544_disable_irq(pn544_dev);
- if (r != len)
- return -EREMOTEIO;
+ /* Wake up waiting readers */
+ wake_up(&pn544_dev->read_wq);
- return r;
+ return IRQ_HANDLED;
}
-static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
+static ssize_t pn544_dev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offset)
{
- int r;
- u8 len;
-
- /*
- * You could read a packet in one go, but then you'd need to read
- * max size and rest would be 0xff fill, so we do split reads.
- */
- r = i2c_master_recv(client, &len, 1);
- dev_dbg(&client->dev, "recv1: %d\n", r);
-
- if (r != 1)
- return -EREMOTEIO;
-
- if (len < PN544_LLC_HCI_OVERHEAD)
- len = PN544_LLC_HCI_OVERHEAD;
- else if (len > (PN544_MSG_MAX_SIZE - 1))
- len = PN544_MSG_MAX_SIZE - 1;
-
- if (1 + len > buflen) /* len+(data+crc16) */
- return -EMSGSIZE;
-
- buf[0] = len;
+ struct pn544_dev *pn544_dev = filp->private_data;
+ char tmp[MAX_BUFFER_SIZE];
+ int ret;
- r = i2c_master_recv(client, buf + 1, len);
- dev_dbg(&client->dev, "recv2: %d\n", r);
+ if (count > MAX_BUFFER_SIZE)
+ count = MAX_BUFFER_SIZE;
- if (r != len)
- return -EREMOTEIO;
+ pr_debug("%s : reading %zu bytes.\n", __func__, count);
- usleep_range(3000, 6000);
+ mutex_lock(&pn544_dev->read_mutex);
- return r + 1;
-}
+ if (!gpio_get_value(pn544_dev->irq_gpio)) {
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto fail;
+ }
-static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
-{
- int r;
+ pn544_dev->irq_enabled = true;
+ enable_irq(pn544_dev->client->irq);
+ ret = wait_event_interruptible(pn544_dev->read_wq,
+ gpio_get_value(pn544_dev->irq_gpio));
- dev_dbg(&client->dev, "%s\n", __func__);
+ pn544_disable_irq(pn544_dev);
- if (len < PN544_FW_HEADER_SIZE ||
- (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
- return -EINVAL;
+ if (ret)
+ goto fail;
- r = i2c_master_send(client, buf, len);
- dev_dbg(&client->dev, "fw send: %d\n", r);
-
- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
- usleep_range(6000, 10000);
- r = i2c_master_send(client, buf, len);
- dev_dbg(&client->dev, "fw send2: %d\n", r);
}
- if (r != len)
- return -EREMOTEIO;
-
- return r;
-}
-
-static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
-{
- int r, len;
-
- if (buflen < PN544_FW_HEADER_SIZE)
- return -EINVAL;
-
- r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
- dev_dbg(&client->dev, "FW recv1: %d\n", r);
-
- if (r < 0)
- return r;
-
- if (r < PN544_FW_HEADER_SIZE)
- return -EINVAL;
-
- len = (buf[1] << 8) + buf[2];
- if (len == 0) /* just header, no additional data */
- return r;
-
- if (len > buflen - PN544_FW_HEADER_SIZE)
- return -EMSGSIZE;
-
- r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
- dev_dbg(&client->dev, "fw recv2: %d\n", r);
-
- if (r != len)
- return -EINVAL;
-
- return r + PN544_FW_HEADER_SIZE;
-}
-
-static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
-{
- struct pn544_info *info = dev_id;
- struct i2c_client *client = info->i2c_dev;
-
- BUG_ON(!info);
- BUG_ON(irq != info->i2c_dev->irq);
-
- dev_dbg(&client->dev, "IRQ\n");
-
- mutex_lock(&info->read_mutex);
- info->read_irq = PN544_INT;
- mutex_unlock(&info->read_mutex);
-
- wake_up_interruptible(&info->read_wait);
+ /* Read data */
+ ret = i2c_master_recv(pn544_dev->client, tmp, count);
- return IRQ_HANDLED;
-}
-
-static enum pn544_irq pn544_irq_state(struct pn544_info *info)
-{
- enum pn544_irq irq;
-
- mutex_lock(&info->read_mutex);
- irq = info->read_irq;
- mutex_unlock(&info->read_mutex);
- /*
- * XXX: should we check GPIO-line status directly?
- * return pdata->irq_status() ? PN544_INT : PN544_NONE;
- */
-
- return irq;
-}
+ mutex_unlock(&pn544_dev->read_mutex);
-static ssize_t pn544_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- struct pn544_info *info = container_of(file->private_data,
- struct pn544_info, miscdev);
- struct i2c_client *client = info->i2c_dev;
- enum pn544_irq irq;
- size_t len;
- int r = 0;
-
- dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
- info, count);
-
- mutex_lock(&info->mutex);
-
- if (info->state == PN544_ST_COLD) {
- r = -ENODEV;
- goto out;
+ if (ret < 0) {
+ pr_err("%s: i2c_master_recv returned %d\n", __func__, ret);
+ return ret;
}
-
- irq = pn544_irq_state(info);
- if (irq == PN544_NONE) {
- if (file->f_flags & O_NONBLOCK) {
- r = -EAGAIN;
- goto out;
- }
-
- if (wait_event_interruptible(info->read_wait,
- (info->read_irq == PN544_INT))) {
- r = -ERESTARTSYS;
- goto out;
- }
+ if (ret > count) {
+ pr_err("%s: received too many bytes from i2c (%d)\n",
+ __func__, ret);
+ return -EIO;
}
-
- if (info->state == PN544_ST_FW_READY) {
- len = min(count, info->buflen);
-
- mutex_lock(&info->read_mutex);
- r = pn544_fw_read(info->i2c_dev, info->buf, len);
- info->read_irq = PN544_NONE;
- mutex_unlock(&info->read_mutex);
-
- if (r < 0) {
- dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
- goto out;
- }
-
- print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
- 16, 2, info->buf, r, false);
-
- *offset += r;
- if (copy_to_user(buf, info->buf, r)) {
- r = -EFAULT;
- goto out;
- }
- } else {
- len = min(count, info->buflen);
-
- mutex_lock(&info->read_mutex);
- r = pn544_i2c_read(info->i2c_dev, info->buf, len);
- info->read_irq = PN544_NONE;
- mutex_unlock(&info->read_mutex);
-
- if (r < 0) {
- dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
- goto out;
- }
- print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
- 16, 2, info->buf, r, false);
-
- *offset += r;
- if (copy_to_user(buf, info->buf, r)) {
- r = -EFAULT;
- goto out;
- }
+ if (copy_to_user(buf, tmp, ret)) {
+ pr_warning("%s : failed to copy to user space\n", __func__);
+ return -EFAULT;
}
+ return ret;
-out:
- mutex_unlock(&info->mutex);
-
- return r;
+fail:
+ mutex_unlock(&pn544_dev->read_mutex);
+ return ret;
}
-static unsigned int pn544_poll(struct file *file, poll_table *wait)
+static ssize_t pn544_dev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
{
- struct pn544_info *info = container_of(file->private_data,
- struct pn544_info, miscdev);
- struct i2c_client *client = info->i2c_dev;
- int r = 0;
+ struct pn544_dev *pn544_dev;
+ char tmp[MAX_BUFFER_SIZE];
+ int ret;
- dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
+ pn544_dev = filp->private_data;
- mutex_lock(&info->mutex);
+ if (count > MAX_BUFFER_SIZE)
+ count = MAX_BUFFER_SIZE;
- if (info->state == PN544_ST_COLD) {
- r = -ENODEV;
- goto out;
+ if (copy_from_user(tmp, buf, count)) {
+ pr_err("%s : failed to copy from user space\n", __func__);
+ return -EFAULT;
}
- poll_wait(file, &info->read_wait, wait);
-
- if (pn544_irq_state(info) == PN544_INT) {
- r = POLLIN | POLLRDNORM;
- goto out;
+ pr_debug("%s : writing %zu bytes.\n", __func__, count);
+ /* Write data */
+ ret = i2c_master_send(pn544_dev->client, tmp, count);
+ if (ret != count) {
+ pr_err("%s : i2c_master_send returned %d\n", __func__, ret);
+ ret = -EIO;
}
-out:
- mutex_unlock(&info->mutex);
- return r;
+ return ret;
}
-static ssize_t pn544_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static int pn544_dev_open(struct inode *inode, struct file *filp)
{
- struct pn544_info *info = container_of(file->private_data,
- struct pn544_info, miscdev);
- struct i2c_client *client = info->i2c_dev;
- ssize_t len;
- int r;
-
- dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
- info, count);
-
- mutex_lock(&info->mutex);
-
- if (info->state == PN544_ST_COLD) {
- r = -ENODEV;
- goto out;
- }
-
- /*
- * XXX: should we detect rset-writes and clean possible
- * read_irq state
- */
- if (info->state == PN544_ST_FW_READY) {
- size_t fw_len;
-
- if (count < PN544_FW_HEADER_SIZE) {
- r = -EINVAL;
- goto out;
- }
-
- len = min(count, info->buflen);
- if (copy_from_user(info->buf, buf, len)) {
- r = -EFAULT;
- goto out;
- }
-
- print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
- 16, 2, info->buf, len, false);
-
- fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
- info->buf[2];
-
- if (len > fw_len) /* 1 msg at a time */
- len = fw_len;
+ struct pn544_dev *pn544_dev = container_of(filp->private_data,
+ struct pn544_dev,
+ pn544_device);
- r = pn544_fw_write(info->i2c_dev, info->buf, len);
- } else {
- if (count < PN544_LLC_MIN_SIZE) {
- r = -EINVAL;
- goto out;
- }
-
- len = min(count, info->buflen);
- if (copy_from_user(info->buf, buf, len)) {
- r = -EFAULT;
- goto out;
- }
-
- print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
- 16, 2, info->buf, len, false);
-
- if (len > (info->buf[0] + 1)) /* 1 msg at a time */
- len = info->buf[0] + 1;
-
- r = pn544_i2c_write(info->i2c_dev, info->buf, len);
- }
-out:
- mutex_unlock(&info->mutex);
+ filp->private_data = pn544_dev;
- return r;
+ pr_debug("%s : %d,%d\n", __func__, imajor(inode), iminor(inode));
+ return 0;
}
-static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long pn544_dev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
- struct pn544_info *info = container_of(file->private_data,
- struct pn544_info, miscdev);
- struct i2c_client *client = info->i2c_dev;
- struct pn544_nfc_platform_data *pdata;
- unsigned int val;
- int r = 0;
-
- dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
+ struct pn544_dev *pn544_dev = filp->private_data;
- mutex_lock(&info->mutex);
-
- if (info->state == PN544_ST_COLD) {
- r = -ENODEV;
- goto out;
- }
-
- pdata = info->i2c_dev->dev.platform_data;
switch (cmd) {
- case PN544_GET_FW_MODE:
- dev_dbg(&client->dev, "%s: PN544_GET_FW_MODE\n", __func__);
-
- val = (info->state == PN544_ST_FW_READY);
- if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
- r = -EFAULT;
- goto out;
- }
-
- break;
-
- case PN544_SET_FW_MODE:
- dev_dbg(&client->dev, "%s: PN544_SET_FW_MODE\n", __func__);
-
- if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
- r = -EFAULT;
- goto out;
- }
-
- if (val) {
- if (info->state == PN544_ST_FW_READY)
- break;
-
- pn544_disable(info);
- r = pn544_enable(info, FW_MODE);
- if (r < 0)
- goto out;
+ case PN544_SET_PWR:
+ if (arg == 2) {
+ /* power on with firmware download (requires hw reset)
+ */
+ pr_info("%s power on with firmware\n", __func__);
+ gpio_set_value(pn544_dev->ven_gpio, 1);
+ msleep(20);
+ if (pn544_dev->firm_gpio)
+ gpio_set_value(pn544_dev->firm_gpio, 1);
+ msleep(20);
+ gpio_set_value(pn544_dev->ven_gpio, 0);
+ msleep(100);
+ gpio_set_value(pn544_dev->ven_gpio, 1);
+ msleep(20);
+ } else if (arg == 1) {
+ /* power on */
+ pr_info("%s power on\n", __func__);
+ if (pn544_dev->firm_gpio)
+ gpio_set_value(pn544_dev->firm_gpio, 0);
+ gpio_set_value(pn544_dev->ven_gpio, 1);
+ msleep(100);
+ } else if (arg == 0) {
+ /* power off */
+ pr_info("%s power off\n", __func__);
+ if (pn544_dev->firm_gpio)
+ gpio_set_value(pn544_dev->firm_gpio, 0);
+ gpio_set_value(pn544_dev->ven_gpio, 0);
+ msleep(100);
} else {
- if (info->state == PN544_ST_READY)
- break;
- pn544_disable(info);
- r = pn544_enable(info, HCI_MODE);
- if (r < 0)
- goto out;
+ pr_err("%s bad arg %lu\n", __func__, arg);
+ return -EINVAL;
}
- file->f_pos = info->read_offset;
break;
-
- case TCGETS:
- dev_dbg(&client->dev, "%s: TCGETS\n", __func__);
-
- r = -ENOIOCTLCMD;
- break;
-
default:
- dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
- r = -ENOIOCTLCMD;
- break;
- }
-
-out:
- mutex_unlock(&info->mutex);
-
- return r;
-}
-
-static int pn544_open(struct inode *inode, struct file *file)
-{
- struct pn544_info *info = container_of(file->private_data,
- struct pn544_info, miscdev);
- struct i2c_client *client = info->i2c_dev;
- int r = 0;
-
- dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
- info, info->i2c_dev);
-
- mutex_lock(&info->mutex);
-
- /*
- * Only 1 at a time.
- * XXX: maybe user (counter) would work better
- */
- if (info->state != PN544_ST_COLD) {
- r = -EBUSY;
- goto out;
+ pr_err("%s bad ioctl %u\n", __func__, cmd);
+ return -EINVAL;
}
- file->f_pos = info->read_offset;
- r = pn544_enable(info, HCI_MODE);
-
-out:
- mutex_unlock(&info->mutex);
- return r;
-}
-
-static int pn544_close(struct inode *inode, struct file *file)
-{
- struct pn544_info *info = container_of(file->private_data,
- struct pn544_info, miscdev);
- struct i2c_client *client = info->i2c_dev;
-
- dev_dbg(&client->dev, "%s: info: %p, client %p\n",
- __func__, info, info->i2c_dev);
-
- mutex_lock(&info->mutex);
- pn544_disable(info);
- mutex_unlock(&info->mutex);
-
return 0;
}
-static const struct file_operations pn544_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = pn544_read,
- .write = pn544_write,
- .poll = pn544_poll,
- .open = pn544_open,
- .release = pn544_close,
- .unlocked_ioctl = pn544_ioctl,
+static const struct file_operations pn544_dev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = pn544_dev_read,
+ .write = pn544_dev_write,
+ .open = pn544_dev_open,
+ .unlocked_ioctl = pn544_dev_ioctl,
};
-#ifdef CONFIG_PM
-static int pn544_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct pn544_info *info;
- int r = 0;
-
- dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
-
- info = i2c_get_clientdata(client);
- dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
- info, client);
-
- mutex_lock(&info->mutex);
-
- switch (info->state) {
- case PN544_ST_FW_READY:
- /* Do not suspend while upgrading FW, please! */
- r = -EPERM;
- break;
-
- case PN544_ST_READY:
- /*
- * CHECK: Device should be in standby-mode. No way to check?
- * Allowing low power mode for the regulator is potentially
- * dangerous if pn544 does not go to suspension.
- */
- break;
-
- case PN544_ST_COLD:
- break;
- };
-
- mutex_unlock(&info->mutex);
- return r;
-}
-
-static int pn544_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct pn544_info *info = i2c_get_clientdata(client);
- int r = 0;
-
- dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
- info, client);
-
- mutex_lock(&info->mutex);
-
- switch (info->state) {
- case PN544_ST_READY:
- /*
- * CHECK: If regulator low power mode is allowed in
- * pn544_suspend, we should go back to normal mode
- * here.
- */
- break;
-
- case PN544_ST_COLD:
- break;
-
- case PN544_ST_FW_READY:
- break;
- };
-
- mutex_unlock(&info->mutex);
-
- return r;
-}
-
-static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
-#endif
-
-static struct device_attribute pn544_attr =
- __ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
-
-static int __devinit pn544_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pn544_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- struct pn544_info *info;
- struct pn544_nfc_platform_data *pdata;
- int r = 0;
+ int ret;
+ struct pn544_i2c_platform_data *platform_data;
+ struct pn544_dev *pn544_dev;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+ platform_data = client->dev.platform_data;
- /* private data allocation */
- info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
- if (!info) {
- dev_err(&client->dev,
- "Cannot allocate memory for pn544_info.\n");
- r = -ENOMEM;
- goto err_info_alloc;
+ if (platform_data == NULL) {
+ pr_err("%s : nfc probe fail\n", __func__);
+ return -ENODEV;
}
- info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
- info->buf = kzalloc(info->buflen, GFP_KERNEL);
- if (!info->buf) {
- dev_err(&client->dev,
- "Cannot allocate memory for pn544_info->buf.\n");
- r = -ENOMEM;
- goto err_buf_alloc;
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("%s : need I2C_FUNC_I2C\n", __func__);
+ return -ENODEV;
}
- info->regs[0].supply = reg_vdd_io;
- info->regs[1].supply = reg_vbat;
- info->regs[2].supply = reg_vsim;
- r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
- info->regs);
- if (r < 0)
- goto err_kmalloc;
-
- info->i2c_dev = client;
- info->state = PN544_ST_COLD;
- info->read_irq = PN544_NONE;
- mutex_init(&info->read_mutex);
- mutex_init(&info->mutex);
- init_waitqueue_head(&info->read_wait);
- i2c_set_clientdata(client, info);
- pdata = client->dev.platform_data;
- if (!pdata) {
- dev_err(&client->dev, "No platform data\n");
- r = -EINVAL;
- goto err_reg;
+ ret = gpio_request(platform_data->irq_gpio, "nfc_int");
+ if (ret)
+ return -ENODEV;
+ ret = gpio_request(platform_data->ven_gpio, "nfc_ven");
+ if (ret)
+ goto err_ven;
+ if (platform_data->firm_gpio) {
+ ret = gpio_request(platform_data->firm_gpio, "nfc_firm");
+ if (ret)
+ goto err_firm;
}
- if (!pdata->request_resources) {
- dev_err(&client->dev, "request_resources() missing\n");
- r = -EINVAL;
- goto err_reg;
+ pn544_dev = kzalloc(sizeof(*pn544_dev), GFP_KERNEL);
+ if (pn544_dev == NULL) {
+ dev_err(&client->dev,
+ "failed to allocate memory for module data\n");
+ ret = -ENOMEM;
+ goto err_exit;
}
- r = pdata->request_resources(client);
- if (r) {
- dev_err(&client->dev, "Cannot get platform resources\n");
- goto err_reg;
- }
+ pn544_dev->irq_gpio = platform_data->irq_gpio;
+ pn544_dev->ven_gpio = platform_data->ven_gpio;
+ pn544_dev->firm_gpio = platform_data->firm_gpio;
+ pn544_dev->client = client;
- r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
- IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
- info);
- if (r < 0) {
- dev_err(&client->dev, "Unable to register IRQ handler\n");
- goto err_res;
+ ret = gpio_direction_input(pn544_dev->irq_gpio);
+ if (ret < 0) {
+ pr_err("%s :not able to set irq_gpio as input\n", __func__);
+ goto err_ven;
}
-
- /* If we don't have the test we don't need the sysfs file */
- if (pdata->test) {
- r = device_create_file(&client->dev, &pn544_attr);
- if (r) {
- dev_err(&client->dev,
- "sysfs registration failed, error %d\n", r);
- goto err_irq;
+ ret = gpio_direction_output(pn544_dev->ven_gpio, 0);
+ if (ret < 0) {
+ pr_err("%s : not able to set ven_gpio as output\n", __func__);
+ goto err_firm;
+ }
+ if (platform_data->firm_gpio) {
+ ret = gpio_direction_output(pn544_dev->firm_gpio, 0);
+ if (ret < 0) {
+ pr_err("%s : not able to set firm_gpio as output\n",
+ __func__);
+ goto err_exit;
}
}
- info->miscdev.minor = MISC_DYNAMIC_MINOR;
- info->miscdev.name = PN544_DRIVER_NAME;
- info->miscdev.fops = &pn544_fops;
- info->miscdev.parent = &client->dev;
- r = misc_register(&info->miscdev);
- if (r < 0) {
- dev_err(&client->dev, "Device registration failed\n");
- goto err_sysfs;
+ /* init mutex and queues */
+ init_waitqueue_head(&pn544_dev->read_wq);
+ mutex_init(&pn544_dev->read_mutex);
+ spin_lock_init(&pn544_dev->irq_enabled_lock);
+
+ pn544_dev->pn544_device.minor = MISC_DYNAMIC_MINOR;
+ pn544_dev->pn544_device.name = "pn544";
+ pn544_dev->pn544_device.fops = &pn544_dev_fops;
+
+ ret = misc_register(&pn544_dev->pn544_device);
+ if (ret) {
+ pr_err("%s : misc_register failed\n", __FILE__);
+ goto err_misc_register;
}
- dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
- __func__, info, pdata, client);
+ /* request irq. the irq is set whenever the chip has data available
+ * for reading. it is cleared when all data has been read.
+ */
+ pr_info("%s : requesting IRQ %d\n", __func__, client->irq);
+ pn544_dev->irq_enabled = true;
+ ret = request_irq(client->irq, pn544_dev_irq_handler,
+ IRQF_TRIGGER_HIGH, client->name, pn544_dev);
+ if (ret) {
+ dev_err(&client->dev, "request_irq failed\n");
+ goto err_request_irq_failed;
+ }
+ pn544_disable_irq(pn544_dev);
+ i2c_set_clientdata(client, pn544_dev);
return 0;
-err_sysfs:
- if (pdata->test)
- device_remove_file(&client->dev, &pn544_attr);
-err_irq:
- free_irq(client->irq, info);
-err_res:
- if (pdata->free_resources)
- pdata->free_resources();
-err_reg:
- regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
-err_kmalloc:
- kfree(info->buf);
-err_buf_alloc:
- kfree(info);
-err_info_alloc:
- return r;
+err_request_irq_failed:
+ misc_deregister(&pn544_dev->pn544_device);
+err_misc_register:
+ mutex_destroy(&pn544_dev->read_mutex);
+ kfree(pn544_dev);
+err_exit:
+ if (pn544_dev->firm_gpio)
+ gpio_free(platform_data->firm_gpio);
+err_firm:
+ gpio_free(platform_data->ven_gpio);
+err_ven:
+ gpio_free(platform_data->irq_gpio);
+ return ret;
}
-static __devexit int pn544_remove(struct i2c_client *client)
+static int pn544_remove(struct i2c_client *client)
{
- struct pn544_info *info = i2c_get_clientdata(client);
- struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
-
- dev_dbg(&client->dev, "%s\n", __func__);
-
- misc_deregister(&info->miscdev);
- if (pdata->test)
- device_remove_file(&client->dev, &pn544_attr);
-
- if (info->state != PN544_ST_COLD) {
- if (pdata->disable)
- pdata->disable();
-
- info->read_irq = PN544_NONE;
- }
-
- free_irq(client->irq, info);
- if (pdata->free_resources)
- pdata->free_resources();
-
- regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
- kfree(info->buf);
- kfree(info);
+ struct pn544_dev *pn544_dev;
+
+ pn544_dev = i2c_get_clientdata(client);
+ free_irq(client->irq, pn544_dev);
+ misc_deregister(&pn544_dev->pn544_device);
+ mutex_destroy(&pn544_dev->read_mutex);
+ gpio_free(pn544_dev->irq_gpio);
+ gpio_free(pn544_dev->ven_gpio);
+ if (pn544_dev->firm_gpio)
+ gpio_free(pn544_dev->firm_gpio);
+ kfree(pn544_dev);
return 0;
}
+static const struct i2c_device_id pn544_id[] = {
+ { "pn544", 0 },
+ { }
+};
+
static struct i2c_driver pn544_driver = {
- .driver = {
- .name = PN544_DRIVER_NAME,
-#ifdef CONFIG_PM
- .pm = &pn544_pm_ops,
-#endif
+ .id_table = pn544_id,
+ .probe = pn544_probe,
+ .remove = pn544_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "pn544",
},
- .probe = pn544_probe,
- .id_table = pn544_id_table,
- .remove = __devexit_p(pn544_remove),
};
-static int __init pn544_init(void)
-{
- int r;
-
- pr_debug(DRIVER_DESC ": %s\n", __func__);
-
- r = i2c_add_driver(&pn544_driver);
- if (r) {
- pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
- return r;
- }
+/*
+ * module load/unload record keeping
+ */
- return 0;
+static int __init pn544_dev_init(void)
+{
+ pr_info("Loading pn544 driver\n");
+ return i2c_add_driver(&pn544_driver);
}
+module_init(pn544_dev_init);
-static void __exit pn544_exit(void)
+static void __exit pn544_dev_exit(void)
{
+ pr_info("Unloading pn544 driver\n");
i2c_del_driver(&pn544_driver);
- pr_info(DRIVER_DESC ", Exiting.\n");
}
+module_exit(pn544_dev_exit);
-module_init(pn544_init);
-module_exit(pn544_exit);
-
+MODULE_AUTHOR("Sylvain Fonteneau");
+MODULE_DESCRIPTION("NFC PN544 driver");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 57de051a74b3..d70887fa2e1e 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -36,6 +36,13 @@ config MAX8925_POWER
Say Y here to enable support for the battery charger in the Maxim
MAX8925 PMIC.
+config MAX8907C_CHARGER
+ tristate "MAX8907c charger support"
+ depends on MFD_MAX8907C
+ help
+ Say Y here to enable support for the charger in the Maxim
+ MAX8907c PMIC.
+
config WM831X_BACKUP
tristate "WM831X backup battery charger support"
depends on MFD_WM831X
@@ -142,6 +149,21 @@ config BATTERY_BQ27X00_PLATFORM
help
Say Y here to enable support for batteries with BQ27000 (HDQ) chips.
+config CHARGER_TPS8003X
+ tristate "TPS8003x battery charger driver"
+ depends on MFD_TPS80031
+ default n
+ help
+ Say Y here to enable support for battery charging with TPS80031x chips.
+
+config BATTERY_GAUGE_TPS8003X
+ tristate "TPS8003x battery gauge driver"
+ depends on MFD_TPS80031
+ default n
+ help
+ Say Y here to enable support for battery gauge for TPS80031x chips.
+
+
config BATTERY_DA9030
tristate "DA9030 battery driver"
depends on PMIC_DA903X
@@ -250,3 +272,10 @@ config CHARGER_MAX8998
platform data of MAX8998/LP3974 PMICs.
endif # POWER_SUPPLY
+
+config TEGRA_BPC_MGMT
+ tristate "battery peak current management"
+ depends on ARCH_TEGRA
+ help
+ This driver reduces cpu frequency/voltage on gpio event from the
+ battery current monitor device.
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b4af13dd8b66..82bdd18e3d81 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_BQ20Z75) += bq20z75.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
+obj-$(CONFIG_CHARGER_TPS8003X) += tps80031-charger.o
+obj-$(CONFIG_BATTERY_GAUGE_TPS8003X) += tps80031_battery_gauge.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
@@ -38,3 +40,5 @@ obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_MAX8907C_CHARGER) += max8907c-charger.o
+obj-$(CONFIG_TEGRA_BPC_MGMT) += tegra_bpc_mgmt.o
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
index 9c5e5beda3a8..7f9dfbf61cde 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/bq20z75.c
@@ -147,7 +147,7 @@ static enum power_supply_property bq20z75_properties[] = {
struct bq20z75_info {
struct i2c_client *client;
struct power_supply power_supply;
- struct bq20z75_platform_data *pdata;
+ struct bq20z75_platform_data plat_data;
bool is_present;
bool gpio_detect;
bool enable_detection;
@@ -164,8 +164,7 @@ static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
s32 ret = 0;
int retries = 1;
- if (bq20z75_device->pdata)
- retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+ retries = max(bq20z75_device->plat_data.i2c_retry_count + 1, 1);
while (retries > 0) {
ret = i2c_smbus_read_word_data(client, address);
@@ -191,8 +190,7 @@ static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
s32 ret = 0;
int retries = 1;
- if (bq20z75_device->pdata)
- retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+ retries = max(bq20z75_device->plat_data.i2c_retry_count + 1, 1);
while (retries > 0) {
ret = i2c_smbus_write_word_data(client, address,
@@ -222,8 +220,8 @@ static int bq20z75_get_battery_presence_and_health(
if (psp == POWER_SUPPLY_PROP_PRESENT &&
bq20z75_device->gpio_detect) {
ret = gpio_get_value(
- bq20z75_device->pdata->battery_detect);
- if (ret == bq20z75_device->pdata->battery_detect_present)
+ bq20z75_device->plat_data.battery_detect);
+ if (ret == bq20z75_device->plat_data.battery_detect_present)
val->intval = 1;
else
val->intval = 0;
@@ -574,7 +572,7 @@ static void bq20z75_external_power_changed(struct power_supply *psy)
cancel_delayed_work_sync(&bq20z75_device->work);
schedule_delayed_work(&bq20z75_device->work, HZ);
- bq20z75_device->poll_time = bq20z75_device->pdata->poll_retry_count;
+ bq20z75_device->poll_time = bq20z75_device->plat_data.poll_retry_count;
}
static void bq20z75_delayed_work(struct work_struct *work)
@@ -645,7 +643,7 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
if (pdata) {
bq20z75_device->gpio_detect =
gpio_is_valid(pdata->battery_detect);
- bq20z75_device->pdata = pdata;
+ memcpy(&bq20z75_device->plat_data, pdata, sizeof(struct bq20z75_platform_data));
}
i2c_set_clientdata(client, bq20z75_device);
@@ -724,7 +722,7 @@ static int __devexit bq20z75_remove(struct i2c_client *client)
if (bq20z75_device->irq)
free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
if (bq20z75_device->gpio_detect)
- gpio_free(bq20z75_device->pdata->battery_detect);
+ gpio_free(bq20z75_device->plat_data.battery_detect);
power_supply_unregister(&bq20z75_device->power_supply);
@@ -755,11 +753,18 @@ static int bq20z75_suspend(struct i2c_client *client,
return 0;
}
+
+static int bq20z75_resume(struct i2c_client *client)
+{
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+
+ schedule_delayed_work(&bq20z75_device->work, HZ);
+ return 0;
+}
#else
#define bq20z75_suspend NULL
-#endif
-/* any smbus transaction will wake up bq20z75 */
#define bq20z75_resume NULL
+#endif
static const struct i2c_device_id bq20z75_id[] = {
{ "bq20z75", 0 },
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index bb16f5b7e167..e392f4dc77de 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -5,6 +5,7 @@
* Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
* Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2011 NVIDIA Corporation.
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
*
@@ -60,16 +61,36 @@
#define BQ27500_REG_SOC 0x2C
#define BQ27500_REG_DCAP 0x3C /* Design capacity */
#define BQ27500_FLAG_DSC BIT(0)
+#define BQ27500_FLAG_SOCF BIT(1)
+#define BQ27500_FLAG_BAT_DET BIT(3)
#define BQ27500_FLAG_FC BIT(9)
+#define BQ27500_FLAG_OTC BIT(15)
#define BQ27000_RS 20 /* Resistor sense */
+#define BQ27510_CNTL 0x00
+#define BQ27510_ATRATE 0x02
+#define BQ27510_ENERGY_AVAIL 0x22
+#define BQ27510_POWER_AVG 0x24
+
+/* bq27510-g2 control register sub-commands*/
+#define BQ27510_CNTL_DEVICE_TYPE 0x0001
+#define BQ27510_CNTL_SET_SLEEP 0x0013
+#define BQ27510_CNTL_CLEAR_SLEEP 0x0014
+
+/* bq27x00 requires 3 to 4 second to update charging status */
+#define CHARGING_STATUS_UPDATE_DELAY_SECS 4
+
struct bq27x00_device_info;
struct bq27x00_access_methods {
int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
+ int (*ctrl_read)(struct bq27x00_device_info *di, u8 ctrl_reg,
+ u16 ctrl_func_reg);
+ int (*write)(struct bq27x00_device_info *di, u8 reg, u16 val,
+ bool single);
};
-enum bq27x00_chip { BQ27000, BQ27500 };
+enum bq27x00_chip { BQ27000, BQ27500, BQ27510 };
struct bq27x00_reg_cache {
int temperature;
@@ -94,12 +115,14 @@ struct bq27x00_device_info {
unsigned long last_update;
struct delayed_work work;
+ struct delayed_work external_power_changed_work;
struct power_supply bat;
struct bq27x00_access_methods bus;
struct mutex lock;
+ struct mutex update_lock;
};
static enum power_supply_property bq27x00_battery_props[] = {
@@ -118,6 +141,9 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_HEALTH,
};
static unsigned int poll_interval = 360;
@@ -135,6 +161,43 @@ static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg,
return di->bus.read(di, reg, single);
}
+static inline int bq27x00_ctrl_read(struct bq27x00_device_info *di,
+ u8 ctrl_reg, u16 ctrl_func_reg)
+{
+ return di->bus.ctrl_read(di, ctrl_reg, ctrl_func_reg);
+}
+
+static inline int bq27x00_write(struct bq27x00_device_info *di, u8 reg,
+ u16 val, bool single)
+{
+ return di->bus.write(di, reg, val, single);
+}
+
+static int bq27510_battery_health(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ if ((di->chip == BQ27500) || (di->chip == BQ27510)) {
+ ret = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
+ if (ret < 0) {
+ dev_err(di->dev, "read failure\n");
+ return ret;
+ }
+
+ if (ret & BQ27500_FLAG_SOCF)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (ret & BQ27500_FLAG_OTC)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+
+ return 0;
+ }
+
+ return -1;
+}
+
/*
* Return the battery Relative State-of-Charge
* Or < 0 if something fails.
@@ -143,7 +206,7 @@ static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
{
int rsoc;
- if (di->chip == BQ27500)
+ if ((di->chip == BQ27500) || (di->chip == BQ27510))
rsoc = bq27x00_read(di, BQ27500_REG_SOC, false);
else
rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
@@ -168,7 +231,7 @@ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
return charge;
}
- if (di->chip == BQ27500)
+ if ((di->chip == BQ27500) || (di->chip == BQ27510))
charge *= 1000;
else
charge = charge * 3570 / BQ27000_RS;
@@ -202,7 +265,7 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
{
int ilmd;
- if (di->chip == BQ27500)
+ if ((di->chip == BQ27500) || (di->chip == BQ27510))
ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
else
ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
@@ -212,7 +275,7 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
return ilmd;
}
- if (di->chip == BQ27500)
+ if ((di->chip == BQ27500) || (di->chip == BQ27510))
ilmd *= 1000;
else
ilmd = ilmd * 256 * 3570 / BQ27000_RS;
@@ -258,8 +321,9 @@ static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
static void bq27x00_update(struct bq27x00_device_info *di)
{
struct bq27x00_reg_cache cache = {0, };
- bool is_bq27500 = di->chip == BQ27500;
+ bool is_bq27500 = (di->chip == BQ27500 || di->chip == BQ27510);
+ mutex_lock(&di->update_lock);
cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500);
if (cache.flags >= 0) {
cache.capacity = bq27x00_battery_read_rsoc(di);
@@ -286,6 +350,7 @@ static void bq27x00_update(struct bq27x00_device_info *di)
}
di->last_update = jiffies;
+ mutex_unlock(&di->update_lock);
}
static void bq27x00_battery_poll(struct work_struct *work)
@@ -302,6 +367,14 @@ static void bq27x00_battery_poll(struct work_struct *work)
}
}
+static void bq27x00_external_power_changed_work(struct work_struct *work)
+{
+ struct bq27x00_device_info *di =
+ container_of(work, struct bq27x00_device_info,
+ external_power_changed_work.work);
+
+ bq27x00_update(di);
+}
/*
* Return the battery temperature in tenths of degree Celsius
@@ -313,7 +386,7 @@ static int bq27x00_battery_temperature(struct bq27x00_device_info *di,
if (di->cache.temperature < 0)
return di->cache.temperature;
- if (di->chip == BQ27500)
+ if ((di->chip == BQ27500) || (di->chip == BQ27510))
val->intval = di->cache.temperature - 2731;
else
val->intval = ((di->cache.temperature * 5) - 5463) / 2;
@@ -331,15 +404,15 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di,
{
int curr;
- if (di->chip == BQ27500)
- curr = bq27x00_read(di, BQ27x00_REG_AI, false);
+ if ((di->chip == BQ27500) || (di->chip == BQ27510))
+ curr = bq27x00_read(di, BQ27x00_REG_AI, false);
else
- curr = di->cache.current_now;
+ curr = di->cache.current_now;
if (curr < 0)
return curr;
- if (di->chip == BQ27500) {
+ if ((di->chip == BQ27500) || (di->chip == BQ27510)) {
/* bq27500 returns signed value */
val->intval = (int)((s16)curr) * 1000;
} else {
@@ -359,7 +432,7 @@ static int bq27x00_battery_status(struct bq27x00_device_info *di,
{
int status;
- if (di->chip == BQ27500) {
+ if ((di->chip == BQ27500) || (di->chip == BQ27510)) {
if (di->cache.flags & BQ27500_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
else if (di->cache.flags & BQ27500_FLAG_DSC)
@@ -415,7 +488,7 @@ static int bq27x00_battery_energy(struct bq27x00_device_info *di,
return ae;
}
- if (di->chip == BQ27500)
+ if ((di->chip == BQ27500) || (di->chip == BQ27510))
ae *= 1000;
else
ae = ae * 29200 / BQ27000_RS;
@@ -437,6 +510,58 @@ static int bq27x00_simple_value(int value,
return 0;
}
+static int bq27510_battery_present(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ ret = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
+ if (ret < 0) {
+ dev_err(di->dev, "error reading flags\n");
+ return ret;
+ }
+
+ if (ret & BQ27500_FLAG_BAT_DET)
+ val->intval = 1;
+ else
+ val->intval = 0;
+
+ return 0;
+}
+
+static char bq27510_serial[5];
+static int bq27510_get_battery_serial_number(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ if (di->chip == BQ27510) {
+ ret = bq27x00_ctrl_read(di, BQ27510_CNTL,
+ BQ27510_CNTL_DEVICE_TYPE);
+ ret = sprintf(bq27510_serial, "%04x", ret);
+ val->strval = bq27510_serial;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static int bq27510_battery_power_avg(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int ret;
+ if (di->chip == BQ27510) {
+ ret = bq27x00_read(di, BQ27510_POWER_AVG, false);
+ if (ret < 0) {
+ dev_err(di->dev, "read failure\n");
+ return ret;
+ }
+ val->intval = ret;
+ return 0;
+ }
+ return -1;
+}
+
#define to_bq27x00_device_info(x) container_of((x), \
struct bq27x00_device_info, bat);
@@ -465,7 +590,7 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
ret = bq27x00_battery_voltage(di, val);
break;
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = di->cache.flags < 0 ? 0 : 1;
+ ret = bq27510_battery_present(di, val);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
ret = bq27x00_battery_current(di, val);
@@ -503,6 +628,16 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ENERGY_NOW:
ret = bq27x00_battery_energy(di, val);
break;
+ case POWER_SUPPLY_PROP_POWER_AVG:
+ ret = bq27510_battery_power_avg(di, val);
+ break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ if (bq27510_get_battery_serial_number(di, val))
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq27510_battery_health(di, val);
+ break;
default:
return -EINVAL;
}
@@ -510,12 +645,19 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
return ret;
}
+static unsigned int charging_update_delay_secs =
+ CHARGING_STATUS_UPDATE_DELAY_SECS;
+module_param(charging_update_delay_secs, uint, 0644);
+MODULE_PARM_DESC(charging_update_delay_secs, "battery charging " \
+ "status update delay in seconds");
+
static void bq27x00_external_power_changed(struct power_supply *psy)
{
struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
- cancel_delayed_work_sync(&di->work);
- schedule_delayed_work(&di->work, 0);
+ cancel_delayed_work_sync(&di->external_power_changed_work);
+ schedule_delayed_work(&di->external_power_changed_work,
+ charging_update_delay_secs * HZ);
}
static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
@@ -529,7 +671,10 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
di->bat.external_power_changed = bq27x00_external_power_changed;
INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll);
+ INIT_DELAYED_WORK(&di->external_power_changed_work,
+ bq27x00_external_power_changed_work);
mutex_init(&di->lock);
+ mutex_init(&di->update_lock);
ret = power_supply_register(di->dev, &di->bat);
if (ret) {
@@ -547,10 +692,11 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
{
cancel_delayed_work_sync(&di->work);
-
+ cancel_delayed_work_sync(&di->external_power_changed_work);
power_supply_unregister(&di->bat);
mutex_destroy(&di->lock);
+ mutex_destroy(&di->update_lock);
}
@@ -597,12 +743,55 @@ static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single)
return ret;
}
+static int bq27x00_write_i2c(struct bq27x00_device_info *di, u8 reg,
+ u16 val, bool single)
+{
+ struct i2c_client *client = to_i2c_client(di->dev);
+ unsigned char i2c_data[3];
+ int ret, len;
+
+ i2c_data[0] = reg;
+ i2c_data[1] = val & 0xff;
+
+ if (single) {
+ len = 2;
+ } else {
+ i2c_data[2] = (val >> 8) & 0xff;
+ len = 3;
+ }
+
+ ret = i2c_master_send(client, i2c_data, len);
+ if (ret == len)
+ return 0;
+
+ return (ret < 0) ? ret : -EIO;
+}
+
+static int bq27x00_ctrl_read_i2c(struct bq27x00_device_info *di,
+ u8 ctrl_reg, u16 ctrl_func_reg)
+{
+ int ret = bq27x00_write(di, ctrl_reg, ctrl_func_reg, false);
+ if (ret < 0) {
+ dev_err(di->dev, "write failure\n");
+ return ret;
+ }
+
+ ret = bq27x00_read(di, ctrl_reg, false);
+ if (ret < 0) {
+ dev_err(di->dev, "read failure\n");
+ return ret;
+ }
+
+ return ret;
+}
+
static int bq27x00_battery_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
char *name;
struct bq27x00_device_info *di;
int num;
+ u16 read_data;
int retval = 0;
/* Get new ID for the new battery device */
@@ -634,11 +823,29 @@ static int bq27x00_battery_probe(struct i2c_client *client,
di->chip = id->driver_data;
di->bat.name = name;
di->bus.read = &bq27x00_read_i2c;
+ di->bus.ctrl_read = &bq27x00_ctrl_read_i2c;
+ di->bus.write = &bq27x00_write_i2c;
+
+ i2c_set_clientdata(client, di);
- if (bq27x00_powersupply_init(di))
+ /* Let's see whether this adapter can support what we need. */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "insufficient functionality!\n");
+ retval = -ENODEV;
goto batt_failed_3;
+ }
- i2c_set_clientdata(client, di);
+ read_data = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
+
+ if (!(read_data & BQ27500_FLAG_BAT_DET)) {
+ dev_err(&client->dev, "no battery present\n");
+ retval = -ENODEV;
+ goto batt_failed_3;
+ }
+
+ retval = bq27x00_powersupply_init(di);
+ if (retval < 0)
+ goto batt_failed_3;
return 0;
@@ -671,20 +878,76 @@ static int bq27x00_battery_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int bq27x00_battery_suspend(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bq27x00_device_info *di = platform_get_drvdata(pdev);
+
+ if (di->chip == BQ27510) {
+ ret = bq27x00_write(di, BQ27510_CNTL,
+ BQ27510_CNTL_SET_SLEEP, false);
+ if (ret < 0) {
+ dev_err(di->dev, "write failure\n");
+ return ret;
+ }
+ ret = bq27x00_write(di, BQ27510_CNTL, 0x01, false);
+ if (ret < 0) {
+ dev_err(di->dev, "write failure\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int bq27x00_battery_resume(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bq27x00_device_info *di = platform_get_drvdata(pdev);
+
+ if (di->chip == BQ27510) {
+ ret = bq27x00_write(di, BQ27510_CNTL,
+ BQ27510_CNTL_CLEAR_SLEEP, false);
+ if (ret < 0) {
+ dev_err(di->dev, "write failure\n");
+ return ret;
+ }
+ ret = bq27x00_write(di, BQ27510_CNTL, 0x01, false);
+ if (ret < 0) {
+ dev_err(di->dev, "write failure\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops bq27x00_battery_pm_ops = {
+ .suspend = bq27x00_battery_suspend,
+ .resume = bq27x00_battery_resume,
+};
+
+#endif
+
static const struct i2c_device_id bq27x00_id[] = {
{ "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
{ "bq27500", BQ27500 },
+ { "bq27510", BQ27510 },
{},
};
MODULE_DEVICE_TABLE(i2c, bq27x00_id);
static struct i2c_driver bq27x00_battery_driver = {
+ .probe = bq27x00_battery_probe,
+ .remove = bq27x00_battery_remove,
+ .id_table = bq27x00_id,
.driver = {
.name = "bq27x00-battery",
+#if defined(CONFIG_PM)
+ .pm = &bq27x00_battery_pm_ops,
+#endif
},
- .probe = bq27x00_battery_probe,
- .remove = bq27x00_battery_remove,
- .id_table = bq27x00_id,
};
static inline int bq27x00_battery_i2c_init(void)
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index a64b8854cfd5..020b415ef598 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -54,7 +54,7 @@ static int gpio_charger_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = gpio_get_value(pdata->gpio);
+ val->intval = gpio_get_value_cansleep(pdata->gpio);
val->intval ^= pdata->gpio_active_low;
break;
default:
diff --git a/drivers/power/max8907c-charger.c b/drivers/power/max8907c-charger.c
new file mode 100644
index 000000000000..64855c589b15
--- /dev/null
+++ b/drivers/power/max8907c-charger.c
@@ -0,0 +1,228 @@
+/*
+ * Battery driver for Maxim MAX8907C
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max8907c.h>
+#include <linux/power/max8907c-charger.h>
+#include <linux/slab.h>
+
+struct max8907c_charger {
+ struct max8907c_charger_pdata *pdata;
+ struct max8907c *chip;
+ struct i2c_client *i2c;
+ int online;
+};
+
+static void max8907c_set_charger(struct max8907c_charger *charger)
+{
+ struct max8907c_charger_pdata *pdata = charger->pdata;
+ int ret;
+ if (charger->online) {
+ ret = max8907c_reg_write(charger->i2c, MAX8907C_REG_CHG_CNTL1,
+ (pdata->topoff_threshold << 5) |
+ (pdata->restart_hysteresis << 3) |
+ (pdata->fast_charging_current));
+ if (unlikely(ret != 0))
+ pr_err("Failed to set CHG_CNTL1: %d\n", ret);
+
+ ret = max8907c_set_bits(charger->i2c, MAX8907C_REG_CHG_CNTL2,
+ 0x30, pdata->fast_charger_time << 4);
+ if (unlikely(ret != 0))
+ pr_err("Failed to set CHG_CNTL2: %d\n", ret);
+ } else {
+ ret = max8907c_set_bits(charger->i2c, MAX8907C_REG_CHG_CNTL1, 0x80, 0x1);
+ if (unlikely(ret != 0))
+ pr_err("Failed to set CHG_CNTL1: %d\n", ret);
+ }
+}
+
+static irqreturn_t max8907c_charger_isr(int irq, void *dev_id)
+{
+ struct max8907c_charger *charger = dev_id;
+ struct max8907c *chip = charger->chip;
+
+ switch (irq - chip->irq_base) {
+ case MAX8907C_IRQ_VCHG_DC_R:
+ charger->online = 1;
+ max8907c_set_charger(charger);
+ break;
+ case MAX8907C_IRQ_VCHG_DC_F:
+ charger->online = 0;
+ max8907c_set_charger(charger);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int max8907c_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ const static int types[] = {
+ POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
+ POWER_SUPPLY_CHARGE_TYPE_FAST,
+ POWER_SUPPLY_CHARGE_TYPE_FAST,
+ POWER_SUPPLY_CHARGE_TYPE_NONE,
+ };
+ int ret = -ENODEV;
+ int status;
+
+ struct max8907c_charger *charger = dev_get_drvdata(psy->dev->parent);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = charger->online;
+ ret = 0;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ /* Get charger status from CHG_EN_STAT */
+ status = max8907c_reg_read(charger->i2c, MAX8907C_REG_CHG_STAT);
+ val->intval = ((status & 0x10) == 0x10) ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_NOT_CHARGING;
+ ret = 0;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ /* Get charging type from CHG_MODE */
+ status = max8907c_reg_read(charger->i2c, MAX8907C_REG_CHG_STAT);
+ val->intval = types[(status & 0x0C) >> 2];
+ ret = 0;
+ break;
+
+ default:
+ val->intval = 0;
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static enum power_supply_property max8907c_charger_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+};
+
+static struct power_supply max8907c_charger_ps = {
+ .name = "charger",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = max8907c_charger_props,
+ .num_properties = ARRAY_SIZE(max8907c_charger_props),
+ .get_property = max8907c_charger_get_property,
+};
+
+static __devinit int max8907c_charger_probe(struct platform_device *pdev)
+{
+ struct max8907c_charger_pdata *pdata = pdev->dev.platform_data;
+ struct max8907c_charger *charger = 0;
+ struct max8907c *chip = dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ charger = kzalloc(sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->pdata = pdata;
+ charger->online = 0;
+ charger->chip = chip;
+ charger->i2c = chip->i2c_power;
+
+ platform_set_drvdata(pdev, charger);
+
+ ret = max8907c_reg_read(charger->i2c, MAX8907C_REG_CHG_STAT);
+ if (ret & (1 << 7)) {
+ charger->online = 1;
+ max8907c_set_charger(charger);
+ }
+
+ ret = request_threaded_irq(chip->irq_base + MAX8907C_IRQ_VCHG_DC_F, NULL,
+ max8907c_charger_isr, IRQF_ONESHOT,
+ "power-remove", charger);
+ if (unlikely(ret < 0)) {
+ pr_debug("max8907c: failed to request IRQ %X\n", ret);
+ goto out;
+ }
+
+ ret = request_threaded_irq(chip->irq_base + MAX8907C_IRQ_VCHG_DC_R, NULL,
+ max8907c_charger_isr, IRQF_ONESHOT,
+ "power-insert", charger);
+ if (unlikely(ret < 0)) {
+ pr_debug("max8907c: failed to request IRQ %X\n", ret);
+ goto out1;
+ }
+
+
+ ret = power_supply_register(&pdev->dev, &max8907c_charger_ps);
+ if (unlikely(ret != 0)) {
+ pr_err("Failed to register max8907c_charger driver: %d\n", ret);
+ goto out2;
+ }
+
+ return 0;
+out2:
+ free_irq(chip->irq_base + MAX8907C_IRQ_VCHG_DC_R, charger);
+out1:
+ free_irq(chip->irq_base + MAX8907C_IRQ_VCHG_DC_F, charger);
+out:
+ kfree(charger);
+ return ret;
+}
+
+static __devexit int max8907c_charger_remove(struct platform_device *pdev)
+{
+ struct max8907c_charger *charger = platform_get_drvdata(pdev);
+ struct max8907c *chip = charger->chip;
+ int ret;
+
+ ret = max8907c_reg_write(charger->i2c, MAX8907C_REG_CHG_IRQ1_MASK, 0xFF);
+ if (unlikely(ret != 0)) {
+ pr_err("Failed to set IRQ1_MASK: %d\n", ret);
+ goto out;
+ }
+
+ free_irq(chip->irq_base + MAX8907C_IRQ_VCHG_DC_R, charger);
+ free_irq(chip->irq_base + MAX8907C_IRQ_VCHG_DC_F, charger);
+ power_supply_unregister(&max8907c_charger_ps);
+out:
+ kfree(charger);
+ return 0;
+}
+
+static struct platform_driver max8907c_charger_driver = {
+ .probe = max8907c_charger_probe,
+ .remove = __devexit_p(max8907c_charger_remove),
+ .driver = {
+ .name = "max8907c-charger",
+ },
+};
+
+static int __init max8907c_charger_init(void)
+{
+ return platform_driver_register(&max8907c_charger_driver);
+}
+module_init(max8907c_charger_init);
+
+static void __exit max8907c_charger_exit(void)
+{
+ platform_driver_unregister(&max8907c_charger_driver);
+}
+module_exit(max8907c_charger_exit);
+
+MODULE_DESCRIPTION("Charger driver for MAX8907C");
+MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@maxim-ic.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index 69f8aa3a6a4b..81b720107c3a 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/power_supply.h>
#include <linux/pda_power.h>
#include <linux/regulator/consumer.h>
@@ -38,9 +39,8 @@ static struct timer_list supply_timer;
static struct timer_list polling_timer;
static int polling;
-#ifdef CONFIG_USB_OTG_UTILS
static struct otg_transceiver *transceiver;
-#endif
+static struct notifier_block otg_nb;
static struct regulator *ac_draw;
enum {
@@ -222,7 +222,42 @@ static void polling_timer_func(unsigned long unused)
#ifdef CONFIG_USB_OTG_UTILS
static int otg_is_usb_online(void)
{
- return (transceiver->state == OTG_STATE_B_PERIPHERAL);
+ return (transceiver->last_event == USB_EVENT_VBUS ||
+ transceiver->last_event == USB_EVENT_ENUMERATED);
+}
+
+static int otg_is_ac_online(void)
+{
+ return (transceiver->last_event == USB_EVENT_CHARGER);
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ switch (event) {
+ case USB_EVENT_CHARGER:
+ ac_status = PDA_PSY_TO_CHANGE;
+ break;
+ case USB_EVENT_VBUS:
+ case USB_EVENT_ENUMERATED:
+ usb_status = PDA_PSY_TO_CHANGE;
+ break;
+ case USB_EVENT_NONE:
+ ac_status = PDA_PSY_TO_CHANGE;
+ usb_status = PDA_PSY_TO_CHANGE;
+ break;
+ default:
+ return NOTIFY_OK;
+ }
+
+ /*
+ * Wait a bit before reading ac/usb line status and setting charger,
+ * because ac/usb status readings may lag from irq.
+ */
+ mod_timer(&charger_timer,
+ jiffies + msecs_to_jiffies(pdata->wait_for_status));
+
+ return NOTIFY_OK;
}
#endif
@@ -282,6 +317,14 @@ static int pda_power_probe(struct platform_device *pdev)
ret = PTR_ERR(ac_draw);
}
+ transceiver = otg_get_transceiver();
+ if (transceiver && !pdata->is_usb_online) {
+ pdata->is_usb_online = otg_is_usb_online;
+ }
+ if (transceiver && !pdata->is_ac_online) {
+ pdata->is_ac_online = otg_is_ac_online;
+ }
+
if (pdata->is_ac_online) {
ret = power_supply_register(&pdev->dev, &pda_psy_ac);
if (ret) {
@@ -303,13 +346,6 @@ static int pda_power_probe(struct platform_device *pdev)
}
}
-#ifdef CONFIG_USB_OTG_UTILS
- transceiver = otg_get_transceiver();
- if (transceiver && !pdata->is_usb_online) {
- pdata->is_usb_online = otg_is_usb_online;
- }
-#endif
-
if (pdata->is_usb_online) {
ret = power_supply_register(&pdev->dev, &pda_psy_usb);
if (ret) {
@@ -331,6 +367,16 @@ static int pda_power_probe(struct platform_device *pdev)
}
}
+ if (transceiver && pdata->use_otg_notifier) {
+ otg_nb.notifier_call = otg_handle_notification;
+ ret = otg_register_notifier(transceiver, &otg_nb);
+ if (ret) {
+ dev_err(dev, "failure to register otg notifier\n");
+ goto otg_reg_notifier_failed;
+ }
+ polling = 0;
+ }
+
if (polling) {
dev_dbg(dev, "will poll for status\n");
setup_timer(&polling_timer, polling_timer_func, 0);
@@ -343,16 +389,17 @@ static int pda_power_probe(struct platform_device *pdev)
return 0;
+otg_reg_notifier_failed:
+ if (pdata->is_usb_online && usb_irq)
+ free_irq(usb_irq->start, &pda_psy_usb);
usb_irq_failed:
if (pdata->is_usb_online)
power_supply_unregister(&pda_psy_usb);
usb_supply_failed:
if (pdata->is_ac_online && ac_irq)
free_irq(ac_irq->start, &pda_psy_ac);
-#ifdef CONFIG_USB_OTG_UTILS
if (transceiver)
otg_put_transceiver(transceiver);
-#endif
ac_irq_failed:
if (pdata->is_ac_online)
power_supply_unregister(&pda_psy_ac);
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 329b46b2327d..03810ce5633f 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -41,23 +41,40 @@ static int __power_supply_changed_work(struct device *dev, void *data)
static void power_supply_changed_work(struct work_struct *work)
{
+ unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
dev_dbg(psy->dev, "%s\n", __func__);
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ if (psy->changed) {
+ psy->changed = false;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
- power_supply_update_leds(psy);
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
- kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ power_supply_update_leds(psy);
+
+ kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ }
+ if (!psy->changed)
+ wake_unlock(&psy->work_wake_lock);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
}
void power_supply_changed(struct power_supply *psy)
{
+ unsigned long flags;
+
dev_dbg(psy->dev, "%s\n", __func__);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ psy->changed = true;
+ wake_lock(&psy->work_wake_lock);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -181,6 +198,9 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto device_add_failed;
+ spin_lock_init(&psy->changed_lock);
+ wake_lock_init(&psy->work_wake_lock, WAKE_LOCK_SUSPEND, "power-supply");
+
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
@@ -190,6 +210,7 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto success;
create_triggers_failed:
+ wake_lock_destroy(&psy->work_wake_lock);
device_del(dev);
kobject_set_name_failed:
device_add_failed:
@@ -203,6 +224,7 @@ void power_supply_unregister(struct power_supply *psy)
{
cancel_work_sync(&psy->changed_work);
power_supply_remove_triggers(psy);
+ wake_lock_destroy(&psy->work_wake_lock);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/power/tegra_bpc_mgmt.c b/drivers/power/tegra_bpc_mgmt.c
new file mode 100644
index 000000000000..0d9ddeee282e
--- /dev/null
+++ b/drivers/power/tegra_bpc_mgmt.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irqflags.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/platform_data/tegra_bpc_mgmt.h>
+
+#include <mach/edp.h>
+
+static irqreturn_t tegra_bpc_mgmt_bh(int irq, void *data)
+{
+ int gpio_val = 0;
+ struct tegra_bpc_mgmt_platform_data *bpc_platform_data;
+ bpc_platform_data = (struct tegra_bpc_mgmt_platform_data *)data;
+
+ tegra_system_edp_alarm(true);
+ /**
+ * Keep on checking whether event has passed or not.
+ */
+ while (!gpio_val) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(
+ bpc_platform_data->bpc_mgmt_timeout));
+
+ gpio_val = gpio_get_value(bpc_platform_data->gpio_trigger);
+ }
+
+ tegra_system_edp_alarm(false);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_bpc_mgmt_isr(int irq, void *data)
+{
+ tegra_edp_throttle_cpu_now(2);
+ return IRQ_WAKE_THREAD;
+}
+
+static __devinit int tegra_bpc_mgmt_probe(struct platform_device *pdev)
+{
+ u32 ret;
+ struct task_struct *bh_thread;
+ struct irq_desc *bat_desc;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ struct tegra_bpc_mgmt_platform_data *bpc_platform_data;
+
+ bpc_platform_data = pdev->dev.platform_data;
+ if (!bpc_platform_data)
+ return -ENODEV;
+
+ if (gpio_is_valid(bpc_platform_data->gpio_trigger)) {
+ ret = gpio_request(bpc_platform_data->gpio_trigger,
+ "tegra-bpc-mgmt");
+
+ if (ret < 0) {
+ pr_err("BPC: GPIO request failed");
+ return -ENODEV;
+ }
+ } else {
+ pr_err("BPC: GPIO check failed, gpio %d",
+ bpc_platform_data->gpio_trigger);
+ return -ENODEV;
+ }
+
+ gpio_direction_input(bpc_platform_data->gpio_trigger);
+
+ ret = request_threaded_irq(
+ gpio_to_irq(bpc_platform_data->gpio_trigger),
+ tegra_bpc_mgmt_isr,
+ tegra_bpc_mgmt_bh, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "tegra-bpc-mgmt", bpc_platform_data);
+ if (ret < 0) {
+ pr_err("BPC:IRQ Installation failed\n");
+ return -ENODEV;
+ }
+ bat_desc = irq_to_desc(
+ gpio_to_irq(bpc_platform_data->gpio_trigger));
+
+ if (bat_desc) {
+ bh_thread = bat_desc->action->thread;
+ if (bh_thread)
+ sched_setscheduler_nocheck(bh_thread,
+ SCHED_FIFO, &param);
+ }
+
+ return 0;
+}
+
+static __devexit int tegra_bpc_mgmt_remove(struct platform_device *pdev)
+{
+ struct tegra_bpc_mgmt_platform_data *bpc_platform_data;
+ bpc_platform_data = pdev->dev.platform_data;
+ free_irq(gpio_to_irq(bpc_platform_data->gpio_trigger), NULL);
+ return 0;
+}
+
+static struct platform_driver tegra_bpc_mgmt_driver = {
+ .probe = tegra_bpc_mgmt_probe,
+ .remove = tegra_bpc_mgmt_remove,
+ .driver = {
+ .name = "tegra-bpc-mgmt",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_bpc_mgmt_init(void)
+{
+ return platform_driver_register(&tegra_bpc_mgmt_driver);
+}
+
+static void __exit tegra_bpc_mgmt_exit(void)
+{
+ platform_driver_unregister(&tegra_bpc_mgmt_driver);
+}
+
+module_init(tegra_bpc_mgmt_init);
+module_exit(tegra_bpc_mgmt_exit);
+
+MODULE_DESCRIPTION("TEGRA Battery Peak current Management");
+MODULE_AUTHOR("NVIDIA");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/tps80031-charger.c b/drivers/power/tps80031-charger.c
new file mode 100644
index 000000000000..3da8eef2e01e
--- /dev/null
+++ b/drivers/power/tps80031-charger.c
@@ -0,0 +1,471 @@
+/*
+ * drivers/power/tps80031_charger.c
+ *
+ * Battery charger driver for TI's tps80031
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/tps80031-charger.h>
+
+#define CONTROLLER_CTRL1 0xe1
+#define CONTROLLER_STAT1 0xe3
+#define CHARGERUSB_CTRL2 0xe9
+#define CHARGERUSB_CTRL3 0xea
+#define CHARGERUSB_VOREG 0xec
+#define CHARGERUSB_VICHRG 0xed
+#define CHARGERUSB_CINLIMIT 0xee
+#define CHARGERUSB_CTRLLIMIT2 0xf0
+#define CHARGERUSB_CTRLLIMIT1 0xef
+#define CHARGERUSB_VICHRG_PC 0xdd
+#define CONTROLLER_WDG 0xe2
+#define LINEAR_CHRG_STS 0xde
+
+#define TPS80031_VBUS_DET BIT(2)
+#define TPS80031_VAC_DET BIT(3)
+
+struct tps80031_charger {
+ int max_charge_current_mA;
+ int max_charge_volt_mV;
+ struct device *dev;
+ struct regulator_dev *rdev;
+ struct regulator_desc reg_desc;
+ struct regulator_init_data reg_init_data;
+ struct tps80031_charger_platform_data *pdata;
+ int (*board_init)(void *board_data);
+ void *board_data;
+ int irq_base;
+ int watch_time_sec;
+ enum charging_states state;
+ int charging_term_current_mA;
+ charging_callback_t charger_cb;
+ void *charger_cb_data;
+};
+
+static struct tps80031_charger *charger_data;
+
+static int set_charge_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct tps80031_charger *charger = rdev_get_drvdata(rdev);
+ int max_vbus_current = 1500;
+ int max_charge_current = 1500;
+ int ret;
+
+ dev_info(charger->dev, "%s(): Min curr %dmA and max current %dmA\n",
+ __func__, min_uA/1000, max_uA/1000);
+
+ if (!max_uA) {
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CONTROLLER_CTRL1, 0x0);
+ if (ret < 0)
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CONTROLLER_CTRL1);
+
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CONTROLLER_WDG, 0x0);
+ if (ret < 0)
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CONTROLLER_WDG);
+ charger->state = charging_state_charging_stopped;
+ if (charger->charger_cb)
+ charger->charger_cb(charger->state,
+ charger->charger_cb_data);
+ return ret;
+ }
+
+ max_vbus_current = min(max_uA/1000, max_vbus_current);
+ max_vbus_current = max_vbus_current/50;
+ if (max_vbus_current)
+ max_vbus_current--;
+ ret = tps80031_update(charger->dev->parent, SLAVE_ID2,
+ CHARGERUSB_CINLIMIT,
+ (uint8_t)max_vbus_current, 0x3F);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CHARGERUSB_CINLIMIT);
+ return ret;
+ }
+
+ max_charge_current = min(max_uA/1000, max_charge_current);
+ if (max_charge_current <= 300)
+ max_charge_current = 0;
+ else if ((max_charge_current > 300) && (max_charge_current <= 500))
+ max_charge_current = (max_charge_current - 300)/50;
+ else
+ max_charge_current = (max_charge_current - 500) / 100 + 4;
+ ret = tps80031_update(charger->dev->parent, SLAVE_ID2,
+ CHARGERUSB_VICHRG, (uint8_t)max_charge_current, 0xF);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CHARGERUSB_VICHRG);
+ return ret;
+ }
+
+ /* Enable watchdog timer */
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CONTROLLER_WDG, charger->watch_time_sec);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CONTROLLER_WDG);
+ return ret;
+ }
+
+ /* Enable the charging */
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CONTROLLER_CTRL1, 0x30);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CONTROLLER_CTRL1);
+ return ret;
+ }
+ charger->state = charging_state_charging_in_progress;
+ if (charger->charger_cb)
+ charger->charger_cb(charger->state,
+ charger->charger_cb_data);
+ return 0;
+}
+
+static struct regulator_ops tegra_regulator_ops = {
+ .set_current_limit = set_charge_current_limit,
+};
+
+int register_charging_state_callback(charging_callback_t cb, void *args)
+{
+ struct tps80031_charger *charger = charger_data;
+ if (!charger_data)
+ return -ENODEV;
+
+ charger->charger_cb = cb;
+ charger->charger_cb_data = args;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_charging_state_callback);
+
+static int configure_charging_parameter(struct tps80031_charger *charger)
+{
+ int ret;
+ int max_charge_current;
+ int max_charge_volt;
+ int term_current;
+
+ /* Disable watchdog timer */
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CONTROLLER_WDG, 0x0);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CONTROLLER_WDG);
+ return ret;
+ }
+
+ /* Disable the charging if any */
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CONTROLLER_CTRL1, 0x0);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CONTROLLER_CTRL1);
+ return ret;
+ }
+
+ if (charger->board_init) {
+ ret = charger->board_init(charger->board_data);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in board init\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ /* Unlock value */
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CHARGERUSB_CTRLLIMIT2, 0);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CHARGERUSB_CTRLLIMIT2);
+ return ret;
+ }
+
+ /* Set max current limit */
+ max_charge_current = min(1500, charger->max_charge_current_mA);
+ if (max_charge_current < 100)
+ max_charge_current = 0;
+ else
+ max_charge_current = (max_charge_current - 100)/100;
+ max_charge_current &= 0xF;
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CHARGERUSB_CTRLLIMIT2, (uint8_t)max_charge_current);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register "
+ "0x%02x\n", __func__, CHARGERUSB_CTRLLIMIT2);
+ return ret;
+ }
+
+ /* Set max voltage limit */
+ max_charge_volt = min(4760, charger->max_charge_volt_mV);
+ max_charge_volt = max(3500, max_charge_volt);
+ max_charge_volt -= 3500;
+ max_charge_volt = max_charge_volt/20;
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CHARGERUSB_CTRLLIMIT1, (uint8_t)max_charge_volt);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CHARGERUSB_CTRLLIMIT1);
+ return ret;
+ }
+
+ /* Lock value */
+ ret = tps80031_set_bits(charger->dev->parent, SLAVE_ID2,
+ CHARGERUSB_CTRLLIMIT2, (1 << 4));
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CHARGERUSB_CTRLLIMIT2);
+ return ret;
+ }
+
+ /* set Pre Charge current to 400mA */
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2, 0xDE, 0x3);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, 0xDD);
+ return ret;
+ }
+
+ /* set charging termination current*/
+ if (charger->charging_term_current_mA > 400)
+ term_current = 7;
+ else
+ term_current = (charger->charging_term_current_mA - 50)/50;
+ term_current = term_current << 5;
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CHARGERUSB_CTRL2, term_current);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CHARGERUSB_CTRL2);
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t linch_status_isr(int irq, void *dev_id)
+{
+ struct tps80031_charger *charger = dev_id;
+ uint8_t linch_status;
+ int ret;
+ dev_info(charger->dev, "%s() got called\n", __func__);
+
+ ret = tps80031_read(charger->dev->parent, SLAVE_ID2,
+ LINEAR_CHRG_STS, &linch_status);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s(): Failed in reading register 0x%02x\n",
+ __func__, LINEAR_CHRG_STS);
+ } else {
+ dev_info(charger->dev, "%s():The status of LINEAR_CHRG_STS is 0x%02x\n",
+ __func__, linch_status);
+ if (linch_status & 0x20) {
+ charger->state = charging_state_charging_completed;
+ if (charger->charger_cb)
+ charger->charger_cb(charger->state,
+ charger->charger_cb_data);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t watchdog_expire_isr(int irq, void *dev_id)
+{
+ struct tps80031_charger *charger = dev_id;
+ int ret;
+
+ dev_info(charger->dev, "%s()\n", __func__);
+ if (charger->state != charging_state_charging_in_progress)
+ return IRQ_HANDLED;
+
+ /* Enable watchdog timer again*/
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2, CONTROLLER_WDG,
+ charger->watch_time_sec);
+ if (ret < 0)
+ dev_err(charger->dev, "%s(): Failed in writing register 0x%02x\n",
+ __func__, CONTROLLER_WDG);
+
+ /* Rewrite to enable the charging */
+ if (!ret) {
+ ret = tps80031_write(charger->dev->parent, SLAVE_ID2,
+ CONTROLLER_CTRL1, 0x30);
+ if (ret < 0)
+ dev_err(charger->dev, "%s(): Failed in writing "
+ "register 0x%02x\n",
+ __func__, CONTROLLER_CTRL1);
+ }
+ return IRQ_HANDLED;
+}
+
+static int tps80031_charger_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device *dev = &pdev->dev;
+ struct tps80031_charger *charger;
+ struct tps80031_charger_platform_data *pdata = pdev->dev.platform_data;
+
+ dev_info(dev, "%s()\n", __func__);
+
+ if (!pdata) {
+ dev_err(dev, "%s() No platform data, exiting..\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!pdata->num_consumer_supplies) {
+ dev_err(dev, "%s() No consumer supply list, exiting..\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ charger = kzalloc(sizeof(*charger), GFP_KERNEL);
+ if (!charger) {
+ dev_err(dev, "failed to allocate memory status\n");
+ return -ENOMEM;
+ }
+
+ charger->dev = &pdev->dev;
+
+ charger->max_charge_current_mA = (pdata->max_charge_current_mA) ?
+ pdata->max_charge_current_mA : 1000;
+ charger->max_charge_volt_mV = (pdata->max_charge_volt_mV) ?
+ pdata->max_charge_volt_mV : 4200;
+ charger->irq_base = pdata->irq_base;
+ charger->watch_time_sec = min(pdata->watch_time_sec, 127);
+ if (!charger->watch_time_sec)
+ charger->watch_time_sec = 127;
+ charger->charging_term_current_mA =
+ min(50, pdata->charging_term_current_mA);
+ if (charger->charging_term_current_mA < 50)
+ charger->charging_term_current_mA = 50;
+
+ charger->reg_desc.name = "vbus_charger";
+ charger->reg_desc.id = pdata->regulator_id;
+ charger->reg_desc.ops = &tegra_regulator_ops;
+ charger->reg_desc.type = REGULATOR_CURRENT;
+ charger->reg_desc.owner = THIS_MODULE;
+
+ charger->reg_init_data.supply_regulator = NULL;
+ charger->reg_init_data.num_consumer_supplies =
+ pdata->num_consumer_supplies;
+ charger->reg_init_data.consumer_supplies = pdata->consumer_supplies;
+ charger->reg_init_data.regulator_init = NULL;
+ charger->reg_init_data.driver_data = charger;
+ charger->reg_init_data.constraints.name = "vbus_charger";
+ charger->reg_init_data.constraints.min_uA = 0;
+ charger->reg_init_data.constraints.max_uA =
+ pdata->max_charge_current_mA * 1000;
+ charger->reg_init_data.constraints.valid_modes_mask =
+ REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_STANDBY;
+ charger->reg_init_data.constraints.valid_ops_mask =
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_CURRENT;
+
+ charger->board_init = pdata->board_init;
+ charger->board_data = pdata->board_data;
+ charger->state = charging_state_idle;
+
+ charger->rdev = regulator_register(&charger->reg_desc, &pdev->dev,
+ &charger->reg_init_data, charger);
+ if (IS_ERR(charger->rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ charger->reg_desc.name);
+ ret = PTR_ERR(charger->rdev);
+ goto regulator_fail;
+ }
+
+ ret = request_threaded_irq(charger->irq_base + TPS80031_INT_LINCH_GATED,
+ NULL, linch_status_isr, 0, "tps80031-linch", charger);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register irq %d; error %d\n",
+ charger->irq_base + TPS80031_INT_LINCH_GATED, ret);
+ goto irq_linch_fail;
+ }
+
+ ret = request_threaded_irq(charger->irq_base + TPS80031_INT_FAULT_WDG,
+ NULL, watchdog_expire_isr, 0, "tps80031-wdg", charger);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register irq %d; error %d\n",
+ charger->irq_base + TPS80031_INT_FAULT_WDG, ret);
+ goto irq_wdg_fail;
+ }
+
+ ret = configure_charging_parameter(charger);
+ if (ret)
+ goto config_fail;
+
+ dev_set_drvdata(&pdev->dev, charger);
+ charger_data = charger;
+ return ret;
+
+config_fail:
+ free_irq(charger->irq_base + TPS80031_INT_FAULT_WDG, charger);
+irq_wdg_fail:
+ free_irq(charger->irq_base + TPS80031_INT_LINCH_GATED, charger);
+irq_linch_fail:
+ regulator_unregister(charger->rdev);
+regulator_fail:
+ kfree(charger);
+ return ret;
+}
+
+static int tps80031_charger_remove(struct platform_device *pdev)
+{
+ struct tps80031_charger *charger = dev_get_drvdata(&pdev->dev);
+
+ regulator_unregister(charger->rdev);
+ kfree(charger);
+ return 0;
+}
+
+static struct platform_driver tps80031_charger_driver = {
+ .driver = {
+ .name = "tps80031-charger",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_charger_probe,
+ .remove = tps80031_charger_remove,
+};
+
+static int __init tps80031_charger_init(void)
+{
+ return platform_driver_register(&tps80031_charger_driver);
+}
+
+static void __exit tps80031_charger_exit(void)
+{
+ platform_driver_unregister(&tps80031_charger_driver);
+}
+
+subsys_initcall(tps80031_charger_init);
+module_exit(tps80031_charger_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("tps80031 battery charger driver");
diff --git a/drivers/power/tps80031_battery_gauge.c b/drivers/power/tps80031_battery_gauge.c
new file mode 100644
index 000000000000..9ccfaad41441
--- /dev/null
+++ b/drivers/power/tps80031_battery_gauge.c
@@ -0,0 +1,606 @@
+/*
+ * drivers/power/tps80031_battery_gauge.c
+ *
+ * Gas Gauge driver for TI's tps80031
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/err.h>
+#include <linux/regulator/machine.h>
+#include <linux/mutex.h>
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/tps80031-charger.h>
+
+#define CHARGERUSB_CINLIMIT 0xee
+#define CONTROLLER_STAT1 0xe3
+#define LINEAR_CHARGE_STS 0xde
+#define STS_HW_CONDITIONS 0x21
+#define TOGGLE1 0x90
+#define TOGGLE1_FGS BIT(5)
+#define TOGGLE1_GPADCR BIT(1)
+#define GPCH0_LSB 0x3b
+#define GPCH0_MSB 0x3c
+#define GPCH0_MSB_COLLISION_GP BIT(4)
+#define GPSELECT_ISB 0x35
+#define GPADC_CTRL 0x2e
+#define MISC1 0xe4
+#define CTRL_P1 0x36
+#define CTRL_P1_SP1 BIT(3)
+#define CTRL_P1_EOCRT BIT(2)
+#define CTRL_P1_EOCP1 BIT(1)
+#define CTRL_P1_BUSY BIT(0)
+#define FG_REG_00 0xc0
+#define FG_REG_00_CC_CAL_EN BIT(1)
+#define FG_REG_00_CC_AUTOCLEAR BIT(2)
+#define FG_REG_01 0xc1 /* CONV_NR (unsigned) 0 - 7 */
+#define FG_REG_02 0xc2 /* CONV_NR (unsigned) 8 - 15 */
+#define FG_REG_03 0xc3 /* CONV_NR (unsigned) 16 - 23 */
+#define FG_REG_04 0xc4 /* ACCM (signed) 0 - 7 */
+#define FG_REG_05 0xc5 /* ACCM (signed) 8 - 15 */
+#define FG_REG_06 0xc6 /* ACCM (signed) 16 - 23 */
+#define FG_REG_07 0xc7 /* ACCM (signed) 24 - 31 */
+#define FG_REG_08 0xc8 /* OFFSET (signed) 0 - 7 */
+#define FG_REG_09 0xc9 /* OFFSET (signed) 8 - 9 */
+#define FG_REG_10 0xca /* LAST_READ (signed) 0 - 7 */
+#define FG_REG_11 0xcb /* LAST_READ (signed) 8 - 13 */
+
+#define TPS80031_VBUS_DET BIT(2)
+#define TPS80031_VAC_DET BIT(3)
+#define TPS80031_STS_VYSMIN_HI BIT(4)
+#define END_OF_CHARGE BIT(5)
+
+#define DRIVER_VERSION "1.1.0"
+#define BATTERY_POLL_PERIOD 30000
+
+static int tps80031_temp_table[] = {
+ /* adc code for temperature in degree C */
+ 929, 925, /* -2, -1 */
+ 920, 917, 912, 908, 904, 899, 895, 890, 885, 880, /* 00 - 09 */
+ 875, 869, 864, 858, 853, 847, 841, 835, 829, 823, /* 10 - 19 */
+ 816, 810, 804, 797, 790, 783, 776, 769, 762, 755, /* 20 - 29 */
+ 748, 740, 732, 725, 718, 710, 703, 695, 687, 679, /* 30 - 39 */
+ 671, 663, 655, 647, 639, 631, 623, 615, 607, 599, /* 40 - 49 */
+ 591, 583, 575, 567, 559, 551, 543, 535, 527, 519, /* 50 - 59 */
+ 511, 504, 496 /* 60 - 62 */
+};
+
+struct tps80031_device_info {
+ struct device *dev;
+ struct i2c_client *client;
+ struct power_supply bat;
+ struct power_supply ac;
+ struct power_supply usb;
+ struct timer_list battery_poll_timer;
+ uint32_t vsys;
+ uint8_t usb_online;
+ uint8_t ac_online;
+ uint8_t usb_status;
+ uint8_t capacity_sts;
+ uint8_t health;
+ uint8_t sys_vlow_intr;
+ uint8_t fg_calib_intr;
+ int16_t fg_offset;
+ struct mutex adc_lock;
+};
+
+static enum power_supply_property tps80031_bat_props[] = {
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+};
+
+static enum power_supply_property tps80031_usb_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static enum power_supply_property tps80031_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int tps80031_reg_read(struct tps80031_device_info *di, int sid, int reg,
+ uint8_t *val)
+{
+ int ret;
+
+ ret = tps80031_read(di->dev->parent, sid, reg, val);
+ if (ret < 0)
+ dev_err(di->dev, "Failed read register 0x%02x\n",
+ reg);
+ return ret;
+}
+
+static int tps80031_reg_write(struct tps80031_device_info *di, int sid, int reg,
+ uint8_t val)
+{
+ int ret;
+
+ ret = tps80031_write(di->dev->parent, sid, reg, val);
+ if (ret < 0)
+ dev_err(di->dev, "Failed write register 0x%02x\n",
+ reg);
+ return ret;
+}
+
+static int tps80031_battery_capacity(struct tps80031_device_info *di,
+ union power_supply_propval *val)
+{
+ uint8_t hwsts;
+ int ret;
+
+ ret = tps80031_reg_read(di, SLAVE_ID2, LINEAR_CHARGE_STS, &hwsts);
+ if (ret < 0)
+ return ret;
+
+ di->capacity_sts = di->vsys;
+ if (hwsts & END_OF_CHARGE)
+ di->capacity_sts = 100;
+
+ if (di->sys_vlow_intr) {
+ di->capacity_sts = 10;
+ di->sys_vlow_intr = 0;
+ }
+
+ if (di->capacity_sts <= 10)
+ di->health = POWER_SUPPLY_HEALTH_DEAD;
+ else
+ di->health = POWER_SUPPLY_HEALTH_GOOD;
+
+ return di->capacity_sts;
+}
+
+static int tps80031_battery_voltage(struct tps80031_device_info *di,
+ union power_supply_propval *val)
+{
+ int voltage;
+
+ voltage = tps80031_gpadc_conversion(SYSTEM_SUPPLY);
+ if (voltage < 0)
+ return voltage;
+ voltage = ((voltage * 1000) / 4) * 5;
+
+ if (voltage < 3700000)
+ di->vsys = 10;
+ else if (voltage > 3700000 && voltage <= 3800000)
+ di->vsys = 20;
+ else if (voltage > 3800000 && voltage <= 3900000)
+ di->vsys = 50;
+ else if (voltage > 3900000 && voltage <= 4000000)
+ di->vsys = 75;
+ else if (voltage >= 4000000)
+ di->vsys = 90;
+
+ return voltage;
+}
+
+static int tps80031_battery_charge_now(struct tps80031_device_info *di,
+ union power_supply_propval *val)
+{
+ int charge;
+
+ charge = tps80031_gpadc_conversion(BATTERY_CHARGING_CURRENT);
+ if (charge < 0)
+ return charge;
+ charge = charge * 78125 / 40;
+
+ return charge;
+}
+
+static int tps80031_battery_charge_counter(struct tps80031_device_info *di,
+ union power_supply_propval *val)
+{
+ int retval, ret;
+ uint32_t cnt_byte;
+ uint32_t acc_byte;
+
+ /* check if calibrated */
+ if (di->fg_calib_intr == 0)
+ return 0;
+
+ /* get current accumlator */
+ ret = tps80031_reads(di->dev->parent, SLAVE_ID2, FG_REG_04, 4,
+ (uint8_t *) &acc_byte);
+ if (ret < 0)
+ return ret;
+ /* counter value is mAs, need report uAh */
+ retval = (int32_t) acc_byte / 18 * 5;
+
+ /* get counter */
+ ret = tps80031_reads(di->dev->parent, SLAVE_ID2, FG_REG_01, 3,
+ (uint8_t *) &cnt_byte);
+ if (ret < 0)
+ return ret;
+ /* we need calibrate the offset current in uAh*/
+ retval = retval - (di->fg_offset / 4 * cnt_byte);
+
+ /* @todo, counter value will overflow if battery get continuously
+ * charged discharged for more than 108Ah using 250mS integration
+ * period althrough it is hightly impossible.
+ */
+
+ return retval;
+}
+
+static int tps80031_battery_temp(struct tps80031_device_info *di,
+ union power_supply_propval *val)
+{
+ int adc_code, temp;
+
+ adc_code = tps80031_gpadc_conversion(BATTERY_TEMPERATURE);
+ if (adc_code < 0)
+ return adc_code;
+
+ for (temp = 0; temp < ARRAY_SIZE(tps80031_temp_table); temp++) {
+ if (adc_code >= tps80031_temp_table[temp])
+ break;
+ }
+ /* first 2 values are for negative temperature */
+ val->intval = (temp - 2) * 10; /* in tenths of degree Celsius */
+
+ return val->intval;
+}
+
+#define to_tps80031_device_info_bat(x) container_of((x), \
+ struct tps80031_device_info, bat);
+
+static int tps80031_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct tps80031_device_info *di = to_tps80031_device_info_bat(psy);
+
+ switch (psp) {
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = di->health;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = tps80031_battery_capacity(di, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = tps80031_battery_charge_now(di, val);
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = tps80031_battery_voltage(di, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = tps80031_battery_charge_counter(di, val);
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = di->usb_status;
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = tps80031_battery_temp(di, val);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define to_tps80031_device_info_usb(x) container_of((x), \
+ struct tps80031_device_info, usb);
+
+static int tps80031_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct tps80031_device_info *di = to_tps80031_device_info_usb(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->usb_online;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define to_tps80031_device_info_ac(x) container_of((x), \
+ struct tps80031_device_info, ac);
+
+static int tps80031_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct tps80031_device_info *di = to_tps80031_device_info_ac(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->ac_online;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static irqreturn_t tps80031_sys_vlow(int irq, void *data)
+{
+ struct tps80031_device_info *di = data;
+
+ di->sys_vlow_intr = 1;
+ power_supply_changed(&di->bat);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tps80031_fg_calibrated(int irq, void *data)
+{
+ struct tps80031_device_info *di = data;
+ uint8_t acc_byte0;
+ uint8_t acc_byte1;
+ int ret;
+
+ ret = tps80031_reg_read(di, SLAVE_ID2, FG_REG_08, &acc_byte0);
+ if (ret < 0)
+ return IRQ_HANDLED;
+ ret = tps80031_reg_read(di, SLAVE_ID2, FG_REG_09, &acc_byte1);
+ if (ret < 0)
+ return IRQ_HANDLED;
+ /* sign extension */
+ if (acc_byte1 & 0x02)
+ acc_byte1 = acc_byte1 | 0xFC;
+ else
+ acc_byte1 = acc_byte1 & 0x03;
+
+ di->fg_offset = (int16_t) ((acc_byte1 << 8) | acc_byte0);
+ /* fuel gauge auto calibration finished */
+ di->fg_calib_intr = 1;
+ return IRQ_HANDLED;
+}
+
+static int tps80031_fg_start_gas_gauge(struct tps80031_device_info *di)
+{
+ int ret = 0;
+ di->fg_calib_intr = 0;
+
+ /* start gas gauge */
+ ret = tps80031_reg_write(di, SLAVE_ID2, TOGGLE1, 0x20);
+ if (ret < 0)
+ return ret;
+ /* set ADC update time to 3.9ms and start calibration */
+ ret = tps80031_reg_write(di, SLAVE_ID2, FG_REG_00, FG_REG_00_CC_CAL_EN);
+ if (ret < 0)
+ return ret;
+ return ret;
+}
+
+void tps80031_battery_status(enum charging_states status, void *data)
+{
+ struct tps80031_device_info *di = data;
+ int ret;
+ uint8_t retval;
+
+ if ((status == charging_state_charging_in_progress)) {
+ di->usb_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->health = POWER_SUPPLY_HEALTH_GOOD;
+ ret = tps80031_reg_read(di, SLAVE_ID2,
+ CHARGERUSB_CINLIMIT, &retval);
+ if (ret < 0) {
+ di->ac_online = 0;
+ di->usb_online = 0;
+ }
+ if (retval == 0x9) {
+ di->ac_online = 0;
+ di->usb_online = 1;
+ } else {
+ di->usb_online = 0;
+ di->ac_online = 1;
+ }
+ } else if (status == charging_state_charging_stopped) {
+ di->usb_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ di->ac_online = 0;
+ di->usb_online = 0;
+ }
+ power_supply_changed(&di->usb);
+ power_supply_changed(&di->bat);
+ power_supply_changed(&di->ac);
+}
+
+static void battery_poll_timer_func(unsigned long pdi)
+{
+ struct tps80031_device_info *di = (void *)pdi;
+ power_supply_changed(&di->bat);
+ mod_timer(&di->battery_poll_timer,
+ jiffies + msecs_to_jiffies(BATTERY_POLL_PERIOD));
+}
+
+static int tps80031_battery_probe(struct platform_device *pdev)
+{
+ int ret;
+ uint8_t retval;
+ struct device *dev = &pdev->dev;
+ struct tps80031_device_info *di;
+ struct tps80031_bg_platform_data *pdata = pdev->dev.platform_data;
+
+ di = devm_kzalloc(&pdev->dev, sizeof *di, GFP_KERNEL);
+ if (!di) {
+ dev_err(dev->parent, "failed to allocate device info data\n");
+ return -ENOMEM;
+ }
+
+ if (!pdata->battery_present) {
+ dev_err(dev, "%s() No battery detected, exiting..\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ di->dev = &pdev->dev;
+
+ ret = tps80031_reg_read(di, SLAVE_ID2, CONTROLLER_STAT1, &retval);
+ if (ret < 0)
+ return ret;
+
+ if ((retval & TPS80031_VAC_DET) | (retval & TPS80031_VBUS_DET)) {
+ di->usb_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->usb_online = 1;
+ } else {
+ di->usb_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ di->usb_online = 0;
+ }
+
+ di->capacity_sts = 50;
+ di->health = POWER_SUPPLY_HEALTH_GOOD;
+
+ di->bat.name = "tps80031-bat";
+ di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->bat.properties = tps80031_bat_props;
+ di->bat.num_properties = ARRAY_SIZE(tps80031_bat_props);
+ di->bat.get_property = tps80031_bat_get_property;
+
+ ret = power_supply_register(dev->parent, &di->bat);
+ if (ret) {
+ dev_err(dev->parent, "failed to register bat power supply\n");
+ return ret;
+ }
+
+ di->usb.name = "tps80031-usb";
+ di->usb.type = POWER_SUPPLY_TYPE_USB;
+ di->usb.properties = tps80031_usb_props;
+ di->usb.num_properties = ARRAY_SIZE(tps80031_usb_props);
+ di->usb.get_property = tps80031_usb_get_property;
+
+ ret = power_supply_register(dev->parent, &di->usb);
+ if (ret) {
+ dev_err(dev->parent, "failed to register ac power supply\n");
+ goto power_supply_fail2;
+ }
+
+ di->ac.name = "tps80031-ac";
+ di->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ di->ac.properties = tps80031_ac_props;
+ di->ac.num_properties = ARRAY_SIZE(tps80031_ac_props);
+ di->ac.get_property = tps80031_ac_get_property;
+
+ ret = power_supply_register(dev->parent, &di->ac);
+ if (ret) {
+ dev_err(dev->parent, "failed to register ac power supply\n");
+ goto power_supply_fail1;
+ }
+
+ dev_set_drvdata(&pdev->dev, di);
+
+ ret = register_charging_state_callback(tps80031_battery_status, di);
+ if (ret < 0)
+ goto power_supply_fail0;
+
+ ret = request_threaded_irq(pdata->irq_base + TPS80031_INT_SYS_VLOW,
+ NULL, tps80031_sys_vlow,
+ IRQF_ONESHOT, "tps80031_sys_vlow", di);
+ if (ret < 0) {
+ dev_err(dev->parent, "request IRQ %d fail\n", pdata->irq_base);
+ goto power_supply_fail0;
+ }
+
+ ret = request_threaded_irq(pdata->irq_base + TPS80031_INT_CC_AUTOCAL,
+ NULL, tps80031_fg_calibrated, IRQF_ONESHOT,
+ "tps80031_fuel_gauge_calibration", di);
+ if (ret < 0) {
+ dev_err(dev->parent, "request IRQ %d fail\n", pdata->irq_base);
+ goto irq_fail2;
+ }
+ setup_timer(&di->battery_poll_timer,
+ battery_poll_timer_func, (unsigned long) di);
+ mod_timer(&di->battery_poll_timer,
+ jiffies + msecs_to_jiffies(BATTERY_POLL_PERIOD));
+
+ ret = tps80031_fg_start_gas_gauge(di);
+ if (ret < 0) {
+ dev_err(dev->parent, "failed to start fuel-gauge\n");
+ goto irq_fail1;
+ }
+ dev_info(dev->parent, "support ver. %s enabled\n", DRIVER_VERSION);
+
+ return ret;
+
+irq_fail1:
+ free_irq(pdata->irq_base + TPS80031_INT_CC_AUTOCAL, di);
+irq_fail2:
+ free_irq(pdata->irq_base + TPS80031_INT_SYS_VLOW, di);
+power_supply_fail0:
+ power_supply_unregister(&di->ac);
+power_supply_fail1:
+ power_supply_unregister(&di->usb);
+power_supply_fail2:
+ power_supply_unregister(&di->bat);
+ return ret;
+}
+
+static int tps80031_battery_remove(struct platform_device *pdev)
+{
+ struct tps80031_device_info *di = dev_get_drvdata(&pdev->dev);
+
+ power_supply_unregister(&di->bat);
+ power_supply_unregister(&di->usb);
+ power_supply_unregister(&di->ac);
+
+ return 0;
+}
+
+static struct platform_driver tps80031_battery_driver = {
+ .driver = {
+ .name = "tps80031-battery-gauge",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_battery_probe,
+ .remove = tps80031_battery_remove,
+};
+
+static int __init tps80031_battery_init(void)
+{
+ return platform_driver_register(&tps80031_battery_driver);
+}
+
+static void __exit tps80031_battery_exit(void)
+{
+ platform_driver_unregister(&tps80031_battery_driver);
+}
+
+module_init(tps80031_battery_init);
+module_exit(tps80031_battery_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Syed Rafiuddin <srafiuddin@nvidia.com> ");
+MODULE_DESCRIPTION("tps80031 battery gauge driver");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c7fd2c0e3f2b..fe311c2af398 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -125,6 +125,22 @@ config REGULATOR_MAX8998
via I2C bus. The provided regulator is suitable for S3C6410
and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX8907C
+ tristate "Maxim 8907C voltage regulator"
+ depends on MFD_MAX8907C
+ help
+ This driver controls a Maxim 8907C voltage output regulator
+ via I2C bus. The provided regulator is suitable for Tegra
+ chip to control Step-Down DC-DC and LDOs.
+
+config REGULATOR_MAX77663
+ tristate "Maxim 77663 voltage regulator"
+ depends on MFD_MAX77663
+ help
+ This driver controls a Maxim 77663 voltage output regulator
+ via I2C bus. The provided regulator is suitable for Tegra
+ chip to control Step-Down DC-DC and LDOs.
+
config REGULATOR_TWL4030
bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC"
depends on TWL4030_CORE
@@ -317,5 +333,53 @@ config REGULATOR_AAT2870
If you have a AnalogicTech AAT2870 say Y to enable the
regulator driver.
+config REGULATOR_TPS6591X
+ tristate "TI TPS6591X Power regulators"
+ depends on MFD_TPS6591X
+ default n
+ help
+ This driver supports TPS6591X voltage regulator chips.
+
+config REGULATOR_TPS6236X
+ tristate "TI TPS6236X DC-DC core power suply regulators"
+ default n
+ help
+ This driver supports TPS6236X DC-DC Processor core supply.
+
+config REGULATOR_TPS80031
+ tristate "TI TPS80031 Power regulators"
+ depends on MFD_TPS80031
+ default n
+ help
+ This driver supports TPS80031 voltage regulator chips.
+
+config REGULATOR_RICOH583
+ tristate "RICOH 583 Power regulators"
+ depends on MFD_RICOH583
+ default n
+ help
+ This driver supports regulator driver for RICOH583 PMIC.
+
+config REGULATOR_GPIO_SWITCH
+ bool "GPIO based enable/disable of power rails."
+ default n
+ help
+ This driver supports gpio based switch control of the power rails.
+ Say Yes if the given platform have the rail enable through the
+ gpios.
+
+config REGULATOR_AAT2870
+ tristate "AnalogicTech AAT2870 Regulators"
+ depends on MFD_AAT2870_CORE
+ help
+ If you have a AnalogicTech AAT2870 say Y to enable the
+ regulator driver.
+
+config REGULATOR_FAN53555
+ tristate "Fairchild FAN53555 DC-DC CPU power supply regulators"
+ default n
+ help
+ This driver supports FAN53555 DC-DC CPU power supply.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 040d5aa63535..3de5dafa0a0f 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
+obj-$(CONFIG_REGULATOR_MAX8907C) += max8907c-regulator.o
+obj-$(CONFIG_REGULATOR_MAX77663) += max77663-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
@@ -27,6 +29,7 @@ obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6591X) += tps6591x-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
@@ -39,11 +42,15 @@ obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6236X) += tps6236x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
+obj-$(CONFIG_REGULATOR_RICOH583) += ricoh583-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
-
+obj-$(CONFIG_REGULATOR_GPIO_SWITCH) += gpio-switch-regulator.o
+obj-$(CONFIG_REGULATOR_FAN53555) += fan53555-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 11d1ab4abefa..896361f03e03 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -27,6 +27,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/aat2870.h>
struct aat2870_regulator {
@@ -186,8 +187,8 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
}
ri->pdev = pdev;
- rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ rdev = regulator_register(&ri->desc, &pdev->dev, mfd_get_data(pdev),
+ ri);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d8e6a429e8ba..f3247f7ccefc 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -31,6 +31,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/regulator.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
#include "dummy.h"
@@ -1368,6 +1371,8 @@ static int _regulator_enable(struct regulator_dev *rdev)
}
trace_regulator_enable(rdev_get_name(rdev));
+ _notifier_call_chain(
+ rdev, REGULATOR_EVENT_PRE_ENABLE, NULL);
/* Allow the regulator to ramp; it would be useful
* to extend this for bulk operations so that the
@@ -1385,6 +1390,8 @@ static int _regulator_enable(struct regulator_dev *rdev)
udelay(delay);
}
+ _notifier_call_chain(
+ rdev, REGULATOR_EVENT_POST_ENABLE, NULL);
trace_regulator_enable_complete(rdev_get_name(rdev));
} else if (ret < 0) {
@@ -1676,6 +1683,10 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
min_uV += rdev->constraints->uV_offset;
max_uV += rdev->constraints->uV_offset;
+ if (_regulator_is_enabled(rdev))
+ _notifier_call_chain(rdev, REGULATOR_EVENT_OUT_PRECHANGE,
+ NULL);
+
if (rdev->desc->ops->set_voltage) {
ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
&selector);
@@ -1743,6 +1754,10 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
NULL);
+ if (_regulator_is_enabled(rdev))
+ _notifier_call_chain(rdev, REGULATOR_EVENT_OUT_POSTCHANGE,
+ NULL);
+
trace_regulator_set_voltage_complete(rdev_get_name(rdev), selector);
return ret;
@@ -2988,4 +3003,59 @@ unlock:
return 0;
}
+
+#ifdef CONFIG_DEBUG_FS
+static int regulator_syncevent(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regulator_dev *rdev;
+ char buffer[40];
+ int buf_size;
+
+ memset(buffer, 0, sizeof(buffer));
+ buf_size = min(count, (sizeof(buffer)-1));
+
+ if (copy_from_user(buffer, user_buf, buf_size))
+ return -EFAULT;
+
+ if (!strnicmp("all", buffer, 3)) {
+
+ mutex_lock(&regulator_list_mutex);
+
+ list_for_each_entry(rdev, &regulator_list, list) {
+ mutex_lock(&rdev->mutex);
+
+ if (_regulator_is_enabled(rdev))
+ trace_regulator_enable(rdev_get_name(rdev));
+ else
+ trace_regulator_disable(rdev_get_name(rdev));
+
+ trace_regulator_set_voltage(rdev_get_name(rdev),
+ _regulator_get_voltage(rdev),
+ _regulator_get_voltage(rdev));
+
+ mutex_unlock(&rdev->mutex);
+ }
+ }
+
+ mutex_unlock(&regulator_list_mutex);
+
+ return count;
+}
+
+static const struct file_operations regulator_syncevent_fops = {
+ .write = regulator_syncevent,
+};
+
+static int __init regulator_init_debugfs(void)
+{
+ debugfs_create_file("syncevent_regulators", S_IWUSR, NULL, NULL,
+ &regulator_syncevent_fops);
+
+ return 0;
+}
+
+late_initcall(regulator_init_debugfs);
+#endif
+
late_initcall(regulator_init_complete);
diff --git a/drivers/regulator/fan53555-regulator.c b/drivers/regulator/fan53555-regulator.c
new file mode 100644
index 000000000000..13fa79c4ba3a
--- /dev/null
+++ b/drivers/regulator/fan53555-regulator.c
@@ -0,0 +1,567 @@
+/*
+ * driver/regultor/fan53555-regulator.c
+ *
+ * Driver for FAN53555UC00X, FAN53555UC01X, FAN53555UC03X,
+ * FAN53555UC04X, FAN53555UC05X
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fan53555-regulator.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+/* Register definitions */
+#define FAN53555_REG_VSEL0 0
+#define FAN53555_REG_VSEL1 1
+#define FAN53555_REG_CONTROL 2
+#define FAN53555_REG_ID1 3
+#define FAN53555_REG_ID2 4
+#define FAN53555_REG_MONITOR 5
+
+#define FAN53555_VSEL_BUCK_EN BIT(7)
+#define FAN53555_VSEL_MODE BIT(6)
+#define FAN53555_VSEL_NSEL_SHIFT 0
+#define FAN53555_VSEL_NSEL_MASK 0x3F
+
+#define FAN53555_CONTROL_DISCHARGE BIT(7)
+#define FAN53555_CONTROL_SLEW_SHIFT 4
+#define FAN53555_CONTROL_SLEW_MASK 0x70
+#define FAN53555_CONTROL_RESET BIT(2)
+
+#define FAN53555_ID1_VENDOR_SHIFT 4
+#define FAN53555_ID1_VENDOR_MASK 0xF0
+#define FAN53555_ID1_DIE_ID_SHIFT 0
+#define FAN53555_ID1_DIE_ID_MASK 0x0F
+
+#define FAN53555_ID2_REV_SHIFT 0
+#define FAN53555_ID2_REV_MASK 0x0F
+
+#define FAN53555_MONITOR_ILIM BIT(7)
+#define FAN53555_MONITOR_UVLO BIT(6)
+#define FAN53555_MONITOR_OVP BIT(5)
+#define FAN53555_MONITOR_POS BIT(4)
+#define FAN53555_MONITOR_NEG BIT(3)
+#define FAN53555_MONITOR_RESET_STAT BIT(2)
+#define FAN53555_MONITOR_OT BIT(1)
+#define FAN53555_MONITOR_BUCK_STATUS BIT(0)
+
+#define FAN53555_VSEL0_ID 0
+#define FAN53555_VSEL1_ID 1
+
+#define FAN53555UC00X_ID 0x80
+#define FAN53555UC01X_ID 0x81
+#define FAN53555UC03X_ID 0x83
+#define FAN53555UC04X_ID 0x84
+#define FAN53555UC05X_ID 0x85
+
+#define FAN53555_N_VOLTAGES 64
+
+/* FAN53555 chip information */
+struct fan53555_chip {
+ const char *name;
+ struct device *dev;
+ struct regulator_desc desc;
+ struct i2c_client *client;
+ struct regulator_dev *rdev;
+ struct mutex io_lock;
+ int chip_id;
+ int vsel_id;
+ u8 shadow[6];
+};
+
+#define FAN53555_VOLTAGE(chip_id, vsel) \
+ (((chip_id) == FAN53555UC04X_ID) ? \
+ ((vsel) * 12826 + 600000) : ((vsel) * 10000 + 600000))
+
+static int fan53555_read(struct fan53555_chip *fan, u8 reg)
+{
+ u8 data;
+ u8 val;
+ int ret;
+
+ data = reg;
+
+ ret = i2c_master_send(fan->client, &data, 1);
+ if (ret < 0)
+ goto out;
+
+ ret = i2c_master_recv(fan->client, &val, 1);
+ if (ret < 0)
+ goto out;
+
+ ret = val;
+out:
+ return ret;
+}
+
+static inline int fan53555_write(struct fan53555_chip *fan, u8 reg, u8 val)
+{
+ u8 msg[2];
+ int ret;
+
+ msg[0] = reg;
+ msg[1] = val;
+
+ ret = i2c_master_send(fan->client, msg, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+ return 0;
+}
+
+static int fan53555_read_reg(struct fan53555_chip *fan, u8 reg)
+{
+ int data;
+
+ mutex_lock(&fan->io_lock);
+ data = fan53555_read(fan, reg);
+ if (data < 0)
+ dev_err(fan->dev, "Read from reg 0x%x failed\n", reg);
+ mutex_unlock(&fan->io_lock);
+
+ return data;
+}
+
+static int fan53555_set_bits(struct fan53555_chip *fan, u8 reg, u8 mask, u8 val)
+{
+ int err;
+ u8 data;
+
+ mutex_lock(&fan->io_lock);
+ data = fan->shadow[reg];
+ data &= ~mask;
+ val &= mask;
+ data |= val;
+ err = fan53555_write(fan, reg, data);
+ if (err)
+ dev_err(fan->dev, "write for reg 0x%x failed\n", reg);
+ else
+ fan->shadow[reg] = data;
+ mutex_unlock(&fan->io_lock);
+
+ return err;
+}
+
+static int __fan53555_dcdc_set_voltage(struct fan53555_chip *fan,
+ int vsel_id, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int nsel;
+ int uV;
+ int chip_id;
+ int n_voltages;
+
+ chip_id = fan->chip_id;
+ n_voltages = fan->desc.n_voltages;
+
+ if (max_uV < min_uV) {
+ dev_err(fan->dev, "max_uV(%d) < min_uV(%d)\n", max_uV, min_uV);
+ return -EINVAL;
+ }
+ if (min_uV > FAN53555_VOLTAGE(chip_id, n_voltages - 1)) {
+ dev_err(fan->dev, "min_uV(%d) > %d[uV]\n",
+ min_uV, FAN53555_VOLTAGE(chip_id, n_voltages - 1));
+ return -EINVAL;
+ }
+ if (max_uV < FAN53555_VOLTAGE(chip_id, 0)) {
+ dev_err(fan->dev, "max_uV(%d) < %d[uV]\n",
+ max_uV, FAN53555_VOLTAGE(chip_id, 0));
+ return -EINVAL;
+ }
+ if ((vsel_id != FAN53555_VSEL0_ID) && (vsel_id != FAN53555_VSEL1_ID)) {
+ dev_err(fan->dev,
+ "%d is not valid VSEL register ID\n", vsel_id);
+ return -EINVAL;
+ }
+ for (nsel = 0; nsel < n_voltages; nsel++) {
+ uV = FAN53555_VOLTAGE(chip_id, nsel);
+ if (min_uV <= uV && uV <= max_uV) {
+ if (selector)
+ *selector = nsel;
+ return fan53555_set_bits(fan,
+ FAN53555_REG_VSEL0 + vsel_id,
+ FAN53555_VSEL_NSEL_MASK,
+ nsel <<
+ FAN53555_VSEL_NSEL_SHIFT);
+ }
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static int dbg_fan_show(struct seq_file *s, void *unused)
+{
+ struct fan53555_chip *fan = s->private;
+ int val;
+
+ seq_printf(s, "FAN53555 Registers\n");
+ seq_printf(s, "------------------\n");
+
+ val = fan53555_read_reg(fan, FAN53555_REG_VSEL0);
+ if (val >= 0)
+ seq_printf(s, "Reg VSEL0 Value 0x%02x\n", val);
+
+ val = fan53555_read_reg(fan, FAN53555_REG_VSEL1);
+ if (val >= 0)
+ seq_printf(s, "Reg VSEL1 Value 0x%02x\n", val);
+
+ val = fan53555_read_reg(fan, FAN53555_REG_CONTROL);
+ if (val >= 0)
+ seq_printf(s, "Reg CONTROL Value 0x%02x\n", val);
+
+ val = fan53555_read_reg(fan, FAN53555_REG_ID1);
+ if (val >= 0)
+ seq_printf(s, "Reg ID1 Value 0x%02x\n", val);
+
+ val = fan53555_read_reg(fan, FAN53555_REG_ID2);
+ if (val >= 0)
+ seq_printf(s, "Reg ID2 Value 0x%02x\n", val);
+
+ val = fan53555_read_reg(fan, FAN53555_REG_MONITOR);
+ if (val >= 0)
+ seq_printf(s, "Reg MONITOR Value 0x%02x\n", val);
+
+ return 0;
+}
+
+static int dbg_fan_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_fan_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = dbg_fan_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __init fan53555_debuginit(struct fan53555_chip *fan)
+{
+ (void)debugfs_create_file("fan53555", S_IRUGO, NULL, fan, &debug_fops);
+}
+#else
+static void __init fan53555_debuginit(struct fan53555_chip *fan)
+{
+}
+#endif
+
+static int fan53555_dcdc_init(struct fan53555_chip *fan,
+ struct i2c_client *client,
+ struct fan53555_regulator_platform_data *pdata)
+{
+ int err;
+ int val;
+
+ err = fan53555_read_reg(fan, FAN53555_REG_VSEL0);
+ if (err < 0)
+ return err;
+ fan->shadow[FAN53555_REG_VSEL0] = (u8)err;
+
+ err = fan53555_read_reg(fan, FAN53555_REG_VSEL1);
+ if (err < 0)
+ return err;
+ fan->shadow[FAN53555_REG_VSEL1] = (u8)err;
+
+ err = fan53555_read_reg(fan, FAN53555_REG_CONTROL);
+ if (err < 0)
+ return err;
+ fan->shadow[FAN53555_REG_CONTROL] = (u8)err;
+
+ err = __fan53555_dcdc_set_voltage(fan,
+ FAN53555_VSEL0_ID,
+ pdata->init_vsel0_min_uV,
+ pdata->init_vsel0_max_uV,
+ NULL);
+ if (err < 0)
+ return err;
+
+ val = pdata->vsel0_buck_en ? FAN53555_VSEL_BUCK_EN : 0;
+ val |= pdata->vsel0_mode ? FAN53555_VSEL_MODE : 0;
+ err = fan53555_set_bits(fan,
+ FAN53555_REG_VSEL0,
+ FAN53555_VSEL_BUCK_EN | FAN53555_VSEL_MODE,
+ val);
+ if (err < 0)
+ return err;
+
+ err = __fan53555_dcdc_set_voltage(fan,
+ FAN53555_VSEL1_ID,
+ pdata->init_vsel1_min_uV,
+ pdata->init_vsel1_max_uV,
+ NULL);
+ if (err < 0)
+ return err;
+
+ val = pdata->vsel1_buck_en ? FAN53555_VSEL_BUCK_EN : 0;
+ val |= pdata->vsel1_mode ? FAN53555_VSEL_MODE : 0;
+ err = fan53555_set_bits(fan,
+ FAN53555_REG_VSEL1,
+ FAN53555_VSEL_BUCK_EN | FAN53555_VSEL_MODE,
+ val);
+ if (err < 0)
+ return err;
+
+ val = pdata->slew_rate;
+ val <<= FAN53555_CONTROL_SLEW_SHIFT;
+ val |= pdata->output_discharge ? FAN53555_CONTROL_DISCHARGE : 0;
+ err = fan53555_set_bits(fan,
+ FAN53555_REG_CONTROL,
+ FAN53555_CONTROL_DISCHARGE |
+ FAN53555_CONTROL_SLEW_MASK, val);
+ return err;
+}
+
+static int fan53555_dcdc_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct fan53555_chip *fan = rdev_get_drvdata(dev);
+
+ if ((selector < 0) || (selector >= fan->desc.n_voltages))
+ return -EINVAL;
+
+ return FAN53555_VOLTAGE(fan->chip_id, selector);
+}
+
+static int fan53555_dcdc_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct fan53555_chip *fan = rdev_get_drvdata(dev);
+
+ return __fan53555_dcdc_set_voltage(fan, fan->vsel_id, min_uV, max_uV,
+ selector);
+}
+
+static int fan53555_dcdc_get_voltage(struct regulator_dev *dev)
+{
+ struct fan53555_chip *fan = rdev_get_drvdata(dev);
+ u8 data;
+
+ if ((fan->vsel_id != FAN53555_VSEL0_ID) &&
+ (fan->vsel_id != FAN53555_VSEL1_ID)) {
+ dev_err(fan->dev,
+ "%d is not valid VSEL register ID\n", fan->vsel_id);
+ return -EINVAL;
+ }
+ data = fan->shadow[FAN53555_REG_VSEL0 + fan->vsel_id];
+ data &= FAN53555_VSEL_NSEL_MASK;
+ data >>= FAN53555_VSEL_NSEL_SHIFT;
+
+ return FAN53555_VOLTAGE(fan->chip_id, data);
+}
+
+static int fan53555_dcdc_enable(struct regulator_dev *dev)
+{
+ struct fan53555_chip *fan = rdev_get_drvdata(dev);
+
+ if ((fan->vsel_id != FAN53555_VSEL0_ID) &&
+ (fan->vsel_id != FAN53555_VSEL1_ID)) {
+ dev_err(fan->dev,
+ "%d is not valid VSEL register ID\n", fan->vsel_id);
+ return -EINVAL;
+ }
+
+ return fan53555_set_bits(fan,
+ FAN53555_REG_VSEL0 + fan->vsel_id,
+ FAN53555_VSEL_BUCK_EN, FAN53555_VSEL_BUCK_EN);
+}
+
+static int fan53555_dcdc_disable(struct regulator_dev *dev)
+{
+ struct fan53555_chip *fan = rdev_get_drvdata(dev);
+
+ if ((fan->vsel_id != FAN53555_VSEL0_ID) &&
+ (fan->vsel_id != FAN53555_VSEL1_ID)) {
+ dev_err(fan->dev,
+ "%d is not valid VSEL register ID\n", fan->vsel_id);
+ return -EINVAL;
+ }
+
+ return fan53555_set_bits(fan,
+ FAN53555_REG_VSEL0 + fan->vsel_id,
+ FAN53555_VSEL_BUCK_EN, 0);
+}
+
+static int fan53555_dcdc_is_enabled(struct regulator_dev *dev)
+{
+ struct fan53555_chip *fan = rdev_get_drvdata(dev);
+ u8 data;
+
+ if ((fan->vsel_id != FAN53555_VSEL0_ID) &&
+ (fan->vsel_id != FAN53555_VSEL1_ID)) {
+ dev_err(fan->dev,
+ "%d is not valid VSEL register ID\n", fan->vsel_id);
+ return -EINVAL;
+ }
+ data = fan->shadow[FAN53555_REG_VSEL0 + fan->vsel_id];
+
+ return (data & FAN53555_VSEL_BUCK_EN) ? 1 : 0;
+}
+
+static struct regulator_ops fan53555_dcdc_ops = {
+ .list_voltage = fan53555_dcdc_list_voltage,
+ .set_voltage = fan53555_dcdc_set_voltage,
+ .get_voltage = fan53555_dcdc_get_voltage,
+ .enable = fan53555_dcdc_enable,
+ .disable = fan53555_dcdc_disable,
+ .is_enabled = fan53555_dcdc_is_enabled,
+};
+
+static int __devinit fan53555_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct fan53555_regulator_platform_data *pdata;
+ struct regulator_init_data *init_data;
+ struct regulator_dev *rdev;
+ struct fan53555_chip *fan;
+ int chip_id;
+ int err;
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->dev, "Err: Platform data not found\n");
+ return -EIO;
+ }
+ init_data = &pdata->reg_init_data;
+ fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+ if (!fan) {
+ dev_err(&client->dev, "Err: Memory allocation fails\n");
+ return -ENOMEM;
+ }
+ mutex_init(&fan->io_lock);
+ fan->client = client;
+ fan->dev = &client->dev;
+ fan->vsel_id = pdata->vsel_id;
+ fan->name = id->name;
+ fan->desc.name = id->name;
+ fan->desc.id = 0;
+ fan->desc.irq = 0;
+ fan->desc.ops = &fan53555_dcdc_ops;
+ fan->desc.type = REGULATOR_VOLTAGE;
+ fan->desc.owner = THIS_MODULE;
+ fan->desc.n_voltages = FAN53555_N_VOLTAGES;
+ i2c_set_clientdata(client, fan);
+
+ chip_id = fan53555_read_reg(fan, FAN53555_REG_ID1);
+ if (chip_id < 0) {
+ err = chip_id;
+ dev_err(fan->dev, "Error in reading device %d\n", err);
+ goto fail;
+ }
+
+ switch (chip_id) {
+ case FAN53555UC00X_ID:
+ case FAN53555UC01X_ID:
+ case FAN53555UC03X_ID:
+ case FAN53555UC04X_ID:
+ case FAN53555UC05X_ID:
+ fan->chip_id = chip_id;
+ break;
+ default:
+ dev_err(fan->dev, "Err: not supported device chip id 0x%x",
+ chip_id);
+ err = -ENODEV;
+ goto fail;
+ }
+
+ err = fan53555_dcdc_init(fan, client, pdata);
+ if (err < 0) {
+ dev_err(fan->dev, "FAN53555 init fails with %d\n", err);
+ goto fail;
+ }
+
+ rdev = regulator_register(&fan->desc, &client->dev, init_data, fan);
+ if (IS_ERR(rdev)) {
+ dev_err(fan->dev, "Failed to register %s\n", id->name);
+ err = PTR_ERR(rdev);
+ goto fail;
+ }
+ fan->rdev = rdev;
+
+ fan53555_debuginit(fan);
+ return 0;
+
+fail:
+ kfree(fan);
+ return err;
+}
+
+/**
+ * fan53555_remove - fan53555 driver i2c remove handler
+ * @client: i2c driver client device structure
+ *
+ * Unregister fan53555 driver as an i2c client device driver
+ */
+static int __devexit fan53555_remove(struct i2c_client *client)
+{
+ struct fan53555_chip *chip = i2c_get_clientdata(client);
+
+ regulator_unregister(chip->rdev);
+ kfree(chip);
+ return 0;
+}
+
+static const struct i2c_device_id fan53555_id[] = {
+ {.name = "fan53555", .driver_data = 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, fan53555_id);
+
+static struct i2c_driver fan53555_i2c_driver = {
+ .driver = {
+ .name = "fan53555",
+ .owner = THIS_MODULE,
+ },
+ .probe = fan53555_probe,
+ .remove = __devexit_p(fan53555_remove),
+ .id_table = fan53555_id,
+};
+
+/* Module init function */
+static int __init fan53555_init(void)
+{
+ return i2c_add_driver(&fan53555_i2c_driver);
+}
+subsys_initcall_sync(fan53555_init);
+
+/* Module exit function */
+static void __exit fan53555_cleanup(void)
+{
+ i2c_del_driver(&fan53555_i2c_driver);
+}
+module_exit(fan53555_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jake Park<jakep@nvidia.com>");
+MODULE_DESCRIPTION("Regulator Driver for Fairchild FAN53555 Regulator");
+MODULE_ALIAS("platform:fan53555-regulator");
diff --git a/drivers/regulator/gpio-switch-regulator.c b/drivers/regulator/gpio-switch-regulator.c
new file mode 100644
index 000000000000..55dd63675a06
--- /dev/null
+++ b/drivers/regulator/gpio-switch-regulator.c
@@ -0,0 +1,412 @@
+/*
+ * driver/regulator/gpio-switch-regulator.c
+ * GPIO based switch regulator to enable/disable power rails.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
+#include <linux/regulator/gpio-switch-regulator.h>
+
+struct gpio_switch_regulator {
+ struct regulator_desc reg_desc;
+ struct regulator_init_data reg_init_data;
+ struct regulator *input_regulator;
+ struct regulator_dev *rdev;
+ struct device *dev;
+ int gpio_nr;
+ int pin_group;
+ bool is_gpio_init;
+ bool is_enable;
+ bool active_low;
+ bool is_init_success;
+ int *voltages;
+ unsigned curr_vol_sel;
+ struct gpio_switch_regulator_subdev_data *psubdev_data;
+ int (*enable_rail)(struct gpio_switch_regulator_subdev_data *sdata);
+ int (*disable_rail)(struct gpio_switch_regulator_subdev_data *sdata);
+};
+
+static int _gpio_regulator_enable(struct device *dev,
+ struct gpio_switch_regulator *ri)
+{
+ int init_val;
+ int ret;
+
+ if (ri->enable_rail) {
+ ret = ri->enable_rail(ri->psubdev_data);
+ if (ret < 0)
+ dev_err(dev, "Unable to enable rail through board api"
+ " error %d\n", ret);
+ } else {
+ init_val = (ri->active_low) ? 0 : 1;
+ ret = gpio_direction_output(ri->gpio_nr, init_val);
+ if (ret < 0)
+ dev_err(dev, "Unable to set direction %d\n",
+ ri->gpio_nr);
+ }
+ return ret;
+}
+
+static int _gpio_regulator_disable(struct device *dev,
+ struct gpio_switch_regulator *ri)
+{
+ int init_val;
+ int ret;
+
+ if (ri->disable_rail) {
+ ret = ri->disable_rail(ri->psubdev_data);
+ if (ret < 0)
+ dev_err(dev, "Unable to disable rail through "
+ "board api %d\n", ret);
+ } else {
+ init_val = (ri->active_low) ? 1 : 0;
+ ret = gpio_direction_output(ri->gpio_nr, init_val);
+ if (ret < 0)
+ dev_err(dev, "Unable to set direction %d\n",
+ ri->gpio_nr);
+ }
+ return ret;
+}
+
+static int gpio_switch_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct gpio_switch_regulator *ri = rdev_get_drvdata(rdev);
+
+ if (selector < ri->reg_desc.n_voltages)
+ return ri->voltages[selector] * 1000;
+ else
+ return 0;
+}
+
+static int gpio_switch_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct gpio_switch_regulator *ri = rdev_get_drvdata(rdev);
+ int uV;
+ bool found = false;
+ unsigned val;
+
+ for (val = 0; val < ri->reg_desc.n_voltages; val++) {
+ uV = ri->voltages[val] * 1000;
+ if (min_uV <= uV && uV <= max_uV) {
+ found = true;
+ *selector = ri->curr_vol_sel = val;
+ break;
+ }
+ }
+ if (found && ri->input_regulator)
+ return regulator_set_voltage(ri->input_regulator, min_uV,
+ max_uV);
+ ri->curr_vol_sel = 0;
+ return -EINVAL;
+}
+
+static int gpio_switch_get_voltage(struct regulator_dev *rdev)
+{
+ struct gpio_switch_regulator *ri = rdev_get_drvdata(rdev);
+ if (ri->input_regulator)
+ return regulator_get_voltage(ri->input_regulator);
+
+ if (ri->curr_vol_sel < ri->reg_desc.n_voltages)
+ return ri->voltages[ri->curr_vol_sel] * 1000;
+ return 0;
+}
+
+static int gpio_switch_regulator_enable(struct regulator_dev *rdev)
+{
+ struct gpio_switch_regulator *ri = rdev_get_drvdata(rdev);
+ int ret = 0;
+ if (ri->is_enable)
+ return 0;
+
+ if (ri->input_regulator) {
+ ret = regulator_enable(ri->input_regulator);
+ if (ret < 0) {
+ dev_err(&rdev->dev, "%s:Failed to enable regulator"
+ " Error %d\n", __func__, ret);
+ return ret;
+ }
+ }
+
+ ret = _gpio_regulator_enable(&rdev->dev, ri);
+ if (ret < 0)
+ return ret;
+ ri->is_enable = true;
+ return 0;
+}
+
+static int gpio_switch_regulator_disable(struct regulator_dev *rdev)
+{
+ struct gpio_switch_regulator *ri = rdev_get_drvdata(rdev);
+ int ret = 0;
+
+ if (!ri->is_enable)
+ return 0;
+
+ ret = _gpio_regulator_disable(&rdev->dev, ri);
+ if (ret < 0)
+ return ret;
+
+ if (ri->input_regulator) {
+ ret = regulator_disable(ri->input_regulator);
+ if (ret < 0) {
+ dev_err(&rdev->dev, "%s:Failed to disable regulator"
+ " Error %d\n", __func__, ret);
+ return ret;
+ }
+ }
+
+ ri->is_enable = false;
+ return 0;
+}
+
+static int gpio_switch_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct gpio_switch_regulator *ri = rdev_get_drvdata(rdev);
+ int ret = 0;
+ if (ri->input_regulator) {
+ ret = regulator_is_enabled(ri->input_regulator);
+ if (!ret)
+ return ret;
+ }
+ return (ri->is_enable) ? 1 : 0;
+}
+
+static struct regulator_ops gpio_switch_regulator_ops = {
+ .list_voltage = gpio_switch_list_voltage,
+ .get_voltage = gpio_switch_get_voltage,
+ .set_voltage = gpio_switch_set_voltage,
+ .is_enabled = gpio_switch_regulator_is_enabled,
+ .enable = gpio_switch_regulator_enable,
+ .disable = gpio_switch_regulator_disable,
+};
+
+static int __devinit gpio_switch_regulator_probe(struct platform_device *pdev)
+{
+ struct gpio_switch_regulator *ri = NULL;
+ struct gpio_switch_regulator *gswitch_reg = NULL;
+ struct gpio_switch_regulator_platform_data *pdata;
+ struct gpio_switch_regulator_subdev_data *sdata = NULL;
+ int id = pdev->id;
+ int ret = 0;
+ int rcount;
+
+ dev_dbg(&pdev->dev, "Probing regulator %d\n", id);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s:No platform data Exiting\n", __func__);
+ return -ENODEV;
+ }
+
+ BUG_ON(!pdata->num_subdevs);
+
+ gswitch_reg = kzalloc(sizeof(struct gpio_switch_regulator) *
+ pdata->num_subdevs, GFP_KERNEL);
+ if (!gswitch_reg) {
+ dev_err(&pdev->dev, "%s:Failed to allocate memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (rcount = 0; rcount < pdata->num_subdevs; ++rcount) {
+ ri = &gswitch_reg[rcount];
+ sdata = pdata->subdevs[rcount];
+
+ /* Initialize the regulator parameter */
+ ri->reg_desc.name = sdata->regulator_name;
+ ri->reg_desc.ops = &gpio_switch_regulator_ops;
+ ri->reg_desc.type = REGULATOR_VOLTAGE;
+ ri->reg_desc.id = sdata->id;
+ ri->reg_desc.n_voltages = sdata->n_voltages;
+ ri->reg_desc.owner = THIS_MODULE;
+ ri->is_init_success = false;
+
+ memcpy(&ri->reg_init_data.constraints, &sdata->constraints,
+ sizeof(struct regulation_constraints));
+
+ /* Initialize min and maximum contraint voltage if it is not
+ * define in platform device */
+ if (!sdata->constraints.min_uV)
+ ri->reg_init_data.constraints.min_uV = 1000 *
+ sdata->voltages[0];
+
+ if (!sdata->constraints.max_uV)
+ ri->reg_init_data.constraints.max_uV = 1000 *
+ sdata->voltages[sdata->n_voltages - 1];
+
+ ri->reg_init_data.num_consumer_supplies =
+ sdata->num_consumer_supplies;
+ ri->reg_init_data.consumer_supplies = sdata->consumer_supplies;
+
+ ri->input_regulator = NULL;
+ ri->is_gpio_init = false;
+ ri->is_enable = (sdata->init_state) ? true : false;
+ ri->voltages = sdata->voltages;
+ ri->psubdev_data = sdata;
+ ri->gpio_nr = sdata->gpio_nr;
+ ri->active_low = sdata->active_low;
+ ri->dev = &pdev->dev;
+ ri->enable_rail = sdata->enable_rail;
+ ri->disable_rail = sdata->disable_rail;
+ ri->pin_group = sdata->pin_group;
+
+ /* Checking for board APIs enable/disable rail */
+ if (ri->enable_rail || ri->disable_rail)
+ BUG_ON(!(ri->enable_rail && ri->disable_rail));
+
+ /* Get the regulator structure if input supply is available */
+ if (sdata->input_supply) {
+ ri->input_regulator = regulator_get(NULL,
+ sdata->input_supply);
+ if (IS_ERR_OR_NULL(ri->input_regulator)) {
+ dev_err(&pdev->dev, "Unable to get regu"
+ "lator %s\n", sdata->input_supply);
+ ret = -ENODEV;
+ goto reg_get_fail;
+ }
+ if (ri->is_enable) {
+ ret = regulator_enable(ri->input_regulator);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to enable "
+ "regulator %s\n",
+ sdata->input_supply);
+ goto reg_en_fail;
+ }
+ }
+ }
+
+ /* Initialize gpios */
+ ret = gpio_request(ri->gpio_nr, sdata->regulator_name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to request gpio %d\n",
+ ri->gpio_nr);
+ goto gpio_req_fail;
+ }
+
+ if (ri->is_enable)
+ ret = _gpio_regulator_enable(&pdev->dev, ri);
+ else
+ ret = _gpio_regulator_disable(&pdev->dev, ri);
+ if (ret < 0)
+ goto reg_cont_fail;
+
+ ri->is_gpio_init = true;
+
+ ri->rdev = regulator_register(&ri->reg_desc, &pdev->dev,
+ &ri->reg_init_data, ri);
+ if (IS_ERR_OR_NULL(ri->rdev)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ ri->reg_desc.name);
+ ret = PTR_ERR(ri->rdev);
+ goto reg_reg_fail;
+ }
+
+ /* If everything success then continue for next registration */
+ ri->is_init_success = true;
+ continue;
+
+ /* Cleanup the current registration and continue for next
+ * registration*/
+reg_reg_fail:
+ if (ri->is_enable)
+ _gpio_regulator_disable(&pdev->dev, ri);
+reg_cont_fail:
+ gpio_free(ri->gpio_nr);
+gpio_req_fail:
+ if (ri->is_enable && ri->input_regulator)
+ regulator_disable(ri->input_regulator);
+reg_en_fail:
+ if (ri->input_regulator) {
+ regulator_put(ri->input_regulator);
+ ri->input_regulator = NULL;
+ }
+reg_get_fail:
+ dev_err(&pdev->dev, "Unable to register regulator %s\n",
+ sdata->regulator_name);
+ }
+
+ platform_set_drvdata(pdev, gswitch_reg);
+ return 0;
+}
+
+static int __devexit gpio_switch_regulator_remove(struct platform_device *pdev)
+{
+ struct gpio_switch_regulator *ri = NULL;
+ struct gpio_switch_regulator *gswitch_reg = platform_get_drvdata(pdev);
+ int i;
+ struct gpio_switch_regulator_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ /* Unregister devices in reverse order */
+ for (i = pdata->num_subdevs; i; --i) {
+ ri = &gswitch_reg[i - 1];
+ /* If registration was not success, then do not release */
+ if (!ri->is_init_success)
+ continue;
+
+ if (ri->is_enable)
+ _gpio_regulator_disable(&pdev->dev, ri);
+
+ if (ri->input_regulator) {
+ if (ri->is_enable)
+ regulator_disable(ri->input_regulator);
+ regulator_put(ri->input_regulator);
+ }
+
+ regulator_unregister(ri->rdev);
+ gpio_free(ri->gpio_nr);
+ }
+
+ kfree(gswitch_reg);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver gpio_switch_regulator_driver = {
+ .driver = {
+ .name = "gpio-switch-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = gpio_switch_regulator_probe,
+ .remove = __devexit_p(gpio_switch_regulator_remove),
+};
+
+static int __init gpio_switch_regulator_init(void)
+{
+ return platform_driver_register(&gpio_switch_regulator_driver);
+}
+
+static void __exit gpio_switch_regulator_exit(void)
+{
+ platform_driver_unregister(&gpio_switch_regulator_driver);
+}
+
+subsys_initcall_sync(gpio_switch_regulator_init);
+module_exit(gpio_switch_regulator_exit);
diff --git a/drivers/regulator/max77663-regulator.c b/drivers/regulator/max77663-regulator.c
new file mode 100644
index 000000000000..68b13dec1a66
--- /dev/null
+++ b/drivers/regulator/max77663-regulator.c
@@ -0,0 +1,895 @@
+/*
+ * drivers/regulator/max77663-regulator.c
+ * Maxim LDO and Buck regulators driver
+ *
+ * Copyright 2011 Maxim Integrated Products, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77663-core.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/max77663-regulator.h>
+
+/* Regulator types */
+#define REGULATOR_TYPE_SD 0
+#define REGULATOR_TYPE_LDO 1
+
+/* SD and LDO Registers */
+#define MAX77663_REG_SD0 0x16
+#define MAX77663_REG_SD1 0x17
+#define MAX77663_REG_SD2 0x18
+#define MAX77663_REG_SD3 0x19
+#define MAX77663_REG_SD4 0x1A
+#define MAX77663_REG_DVSSD0 0x1B
+#define MAX77663_REG_DVSSD1 0x1C
+#define MAX77663_REG_SD0_CFG 0x1D
+#define MAX77663_REG_DVSSD0_CFG MAX77663_REG_SD0_CFG
+#define MAX77663_REG_SD1_CFG 0x1E
+#define MAX77663_REG_DVSSD1_CFG MAX77663_REG_SD1_CFG
+#define MAX77663_REG_SD2_CFG 0x1F
+#define MAX77663_REG_SD3_CFG 0x20
+#define MAX77663_REG_SD4_CFG 0x21
+#define MAX77663_REG_LDO0_CFG 0x23
+#define MAX77663_REG_LDO0_CFG2 0x24
+#define MAX77663_REG_LDO1_CFG 0x25
+#define MAX77663_REG_LDO1_CFG2 0x26
+#define MAX77663_REG_LDO2_CFG 0x27
+#define MAX77663_REG_LDO2_CFG2 0x28
+#define MAX77663_REG_LDO3_CFG 0x29
+#define MAX77663_REG_LDO3_CFG2 0x2A
+#define MAX77663_REG_LDO4_CFG 0x2B
+#define MAX77663_REG_LDO4_CFG2 0x2C
+#define MAX77663_REG_LDO5_CFG 0x2D
+#define MAX77663_REG_LDO5_CFG2 0x2E
+#define MAX77663_REG_LDO6_CFG 0x2F
+#define MAX77663_REG_LDO6_CFG2 0x30
+#define MAX77663_REG_LDO7_CFG 0x31
+#define MAX77663_REG_LDO7_CFG2 0x32
+#define MAX77663_REG_LDO8_CFG 0x33
+#define MAX77663_REG_LDO8_CFG2 0x34
+
+/* Power Mode */
+#define POWER_MODE_NORMAL 3
+#define POWER_MODE_LPM 2
+#define POWER_MODE_GLPM 1
+#define POWER_MODE_DISABLE 0
+#define SD_POWER_MODE_MASK 0x30
+#define SD_POWER_MODE_SHIFT 4
+#define LDO_POWER_MODE_MASK 0xC0
+#define LDO_POWER_MODE_SHIFT 6
+
+/* SD Slew Rate */
+#define SD_SR_13_75 0
+#define SD_SR_27_5 1
+#define SD_SR_55 2
+#define SD_SR_100 3
+#define SD_SR_MASK 0xC0
+#define SD_SR_SHIFT 6
+
+/* SD Forced PWM Mode */
+#define SD_FPWM_MASK 0x04
+#define SD_FPWM_SHIFT 2
+
+/* SD Failling slew rate Active-Discharge Mode */
+#define SD_FSRADE_MASK 0x01
+#define SD_FSRADE_SHIFT 0
+
+/* Voltage */
+#define SDX_VOLT_MASK 0xFF
+#define SD1_VOLT_MASK 0x3F
+#define LDO_VOLT_MASK 0x3F
+
+/* FPS Registers */
+#define MAX77663_REG_FPS_CFG0 0x43
+#define MAX77663_REG_FPS_CFG1 0x44
+#define MAX77663_REG_FPS_CFG2 0x45
+#define MAX77663_REG_FPS_LDO0 0x46
+#define MAX77663_REG_FPS_LDO1 0x47
+#define MAX77663_REG_FPS_LDO2 0x48
+#define MAX77663_REG_FPS_LDO3 0x49
+#define MAX77663_REG_FPS_LDO4 0x4A
+#define MAX77663_REG_FPS_LDO5 0x4B
+#define MAX77663_REG_FPS_LDO6 0x4C
+#define MAX77663_REG_FPS_LDO7 0x4D
+#define MAX77663_REG_FPS_LDO8 0x4E
+#define MAX77663_REG_FPS_SD0 0x4F
+#define MAX77663_REG_FPS_SD1 0x50
+#define MAX77663_REG_FPS_SD2 0x51
+#define MAX77663_REG_FPS_SD3 0x52
+#define MAX77663_REG_FPS_SD4 0x53
+#define MAX77663_REG_FPS_NONE 0
+
+#define FPS_TIME_PERIOD_MASK 0x38
+#define FPS_TIME_PERIOD_SHIFT 3
+#define FPS_EN_SRC_MASK 0x06
+#define FPS_EN_SRC_SHIFT 1
+#define FPS_SW_EN_MASK 0x01
+#define FPS_SW_EN_SHIFT 0
+#define FPS_SRC_MASK 0xC0
+#define FPS_SRC_SHIFT 6
+#define FPS_PU_PERIOD_MASK 0x38
+#define FPS_PU_PERIOD_SHIFT 3
+#define FPS_PD_PERIOD_MASK 0x07
+#define FPS_PD_PERIOD_SHIFT 0
+
+/* Chip Identification Register */
+#define MAX77663_REG_CID5 0x5D
+
+#define CID_DIDM_MASK 0xF0
+#define CID_DIDM_SHIFT 4
+
+#define SD_SAFE_DOWN_UV 50000 /* 50mV */
+
+enum {
+ VOLT_REG = 0,
+ CFG_REG,
+ FPS_REG,
+};
+
+struct max77663_register {
+ u8 addr;
+ u8 val;
+};
+
+struct max77663_regulator {
+ struct regulator_dev *rdev;
+ struct device *dev;
+ struct max77663_regulator_platform_data *pdata;
+
+ u8 id;
+ u8 type;
+ u32 min_uV;
+ u32 max_uV;
+ u32 step_uV;
+ int safe_down_uV; /* for stable down scaling */
+ u32 regulator_mode;
+
+ struct max77663_register regs[3]; /* volt, cfg, fps */
+ enum max77663_regulator_fps_src fps_src;
+
+ u8 volt_mask;
+
+ u8 power_mode;
+ u8 power_mode_mask;
+ u8 power_mode_shift;
+};
+
+#define fps_src_name(fps_src) \
+ (fps_src == FPS_SRC_0 ? "FPS_SRC_0" : \
+ fps_src == FPS_SRC_1 ? "FPS_SRC_1" : \
+ fps_src == FPS_SRC_2 ? "FPS_SRC_2" : "FPS_SRC_NONE")
+
+static int fps_cfg_init;
+static struct max77663_register fps_cfg_regs[] = {
+ {
+ .addr = MAX77663_REG_FPS_CFG0,
+ },
+ {
+ .addr = MAX77663_REG_FPS_CFG1,
+ },
+ {
+ .addr = MAX77663_REG_FPS_CFG2,
+ },
+};
+
+static inline struct max77663_regulator_platform_data
+*_to_pdata(struct max77663_regulator *reg)
+{
+ return reg->pdata;
+}
+
+static inline struct device *_to_parent(struct max77663_regulator *reg)
+{
+ return reg->dev->parent;
+}
+
+static inline int max77663_regulator_cache_write(struct max77663_regulator *reg,
+ u8 addr, u8 mask, u8 val, u8 *cache)
+{
+ struct device *parent = _to_parent(reg);
+ u8 new_val;
+ int ret;
+
+ new_val = (*cache & ~mask) | (val & mask);
+ if (*cache != new_val) {
+ ret = max77663_write(parent, addr, &new_val, 1, 0);
+ if (ret < 0)
+ return ret;
+
+ *cache = new_val;
+ }
+ return 0;
+}
+
+static int
+max77663_regulator_set_fps_src(struct max77663_regulator *reg,
+ enum max77663_regulator_fps_src fps_src)
+{
+ int ret;
+
+ if ((reg->regs[FPS_REG].addr == MAX77663_REG_FPS_NONE) ||
+ (reg->fps_src == fps_src))
+ return 0;
+
+ switch (fps_src) {
+ case FPS_SRC_0:
+ case FPS_SRC_1:
+ case FPS_SRC_2:
+ case FPS_SRC_NONE:
+ break;
+ case FPS_SRC_DEF:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ ret = max77663_regulator_cache_write(reg, reg->regs[FPS_REG].addr,
+ FPS_SRC_MASK, fps_src << FPS_SRC_SHIFT,
+ &reg->regs[FPS_REG].val);
+ if (ret < 0)
+ return ret;
+
+ reg->fps_src = fps_src;
+ return 0;
+}
+
+static int max77663_regulator_set_fps(struct max77663_regulator *reg)
+{
+ struct max77663_regulator_platform_data *pdata = _to_pdata(reg);
+ u8 fps_val = 0, fps_mask = 0;
+ int ret = 0;
+
+ if (reg->regs[FPS_REG].addr == MAX77663_REG_FPS_NONE)
+ return 0;
+
+ if (reg->fps_src == FPS_SRC_NONE)
+ return 0;
+
+ /* FPS power up period setting */
+ if (pdata->fps_pu_period != FPS_POWER_PERIOD_DEF) {
+ fps_val |= (pdata->fps_pu_period << FPS_PU_PERIOD_SHIFT);
+ fps_mask |= FPS_PU_PERIOD_MASK;
+ }
+
+ /* FPS power down period setting */
+ if (pdata->fps_pd_period != FPS_POWER_PERIOD_DEF) {
+ fps_val |= (pdata->fps_pd_period << FPS_PD_PERIOD_SHIFT);
+ fps_mask |= FPS_PD_PERIOD_MASK;
+ }
+
+ if (fps_val)
+ ret = max77663_regulator_cache_write(reg,
+ reg->regs[FPS_REG].addr, fps_mask,
+ fps_val, &reg->regs[FPS_REG].val);
+
+ return ret;
+}
+
+static int
+max77663_regulator_set_fps_cfg(struct max77663_regulator *reg,
+ struct max77663_regulator_fps_cfg *fps_cfg)
+{
+ u8 val, mask;
+
+ if ((fps_cfg->src < FPS_SRC_0) || (fps_cfg->src > FPS_SRC_2))
+ return -EINVAL;
+
+ val = (fps_cfg->en_src << FPS_EN_SRC_SHIFT);
+ mask = FPS_EN_SRC_MASK;
+
+ if (fps_cfg->time_period != FPS_TIME_PERIOD_DEF) {
+ val |= (fps_cfg->time_period << FPS_TIME_PERIOD_SHIFT);
+ mask |= FPS_TIME_PERIOD_MASK;
+ }
+
+ return max77663_regulator_cache_write(reg,
+ fps_cfg_regs[fps_cfg->src].addr, mask,
+ val, &fps_cfg_regs[fps_cfg->src].val);
+}
+
+static int
+max77663_regulator_set_fps_cfgs(struct max77663_regulator *reg,
+ struct max77663_regulator_fps_cfg *fps_cfgs,
+ int num_fps_cfgs)
+{
+ struct device *parent = _to_parent(reg);
+ int i, ret;
+
+ if (fps_cfg_init)
+ return 0;
+
+ for (i = 0; i <= FPS_SRC_2; i++) {
+ ret = max77663_read(parent, fps_cfg_regs[i].addr,
+ &fps_cfg_regs[i].val, 1, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < num_fps_cfgs; i++) {
+ ret = max77663_regulator_set_fps_cfg(reg, &fps_cfgs[i]);
+ if (ret < 0)
+ return ret;
+ }
+ fps_cfg_init = 1;
+
+ return 0;
+}
+
+static int
+max77663_regulator_set_power_mode(struct max77663_regulator *reg, u8 power_mode)
+{
+ u8 mask = reg->power_mode_mask;
+ u8 shift = reg->power_mode_shift;
+ int ret;
+
+ if (reg->type == REGULATOR_TYPE_SD)
+ ret = max77663_regulator_cache_write(reg,
+ reg->regs[CFG_REG].addr,
+ mask, power_mode << shift,
+ &reg->regs[CFG_REG].val);
+ else
+ ret = max77663_regulator_cache_write(reg,
+ reg->regs[VOLT_REG].addr,
+ mask, power_mode << shift,
+ &reg->regs[VOLT_REG].val);
+
+ if (ret < 0)
+ return ret;
+
+ reg->power_mode = power_mode;
+ return ret;
+}
+
+static u8 max77663_regulator_get_power_mode(struct max77663_regulator *reg)
+{
+ u8 mask = reg->power_mode_mask;
+ u8 shift = reg->power_mode_shift;
+
+ if (reg->type == REGULATOR_TYPE_SD)
+ reg->power_mode = (reg->regs[CFG_REG].val & mask) >> shift;
+ else
+ reg->power_mode = (reg->regs[VOLT_REG].val & mask) >> shift;
+
+ return reg->power_mode;
+}
+
+static int max77663_regulator_do_set_voltage(struct max77663_regulator *reg,
+ int min_uV, int max_uV)
+{
+ u8 addr = reg->regs[VOLT_REG].addr;
+ u8 mask = reg->volt_mask;
+ u8 *cache = &reg->regs[VOLT_REG].val;
+ u8 val;
+ int old_uV, new_uV, safe_uV;
+ int i, steps = 1;
+ int ret = 0;
+
+ if (min_uV < reg->min_uV || max_uV > reg->max_uV)
+ return -EDOM;
+
+ old_uV = (*cache & mask) * reg->step_uV + reg->min_uV;
+
+ if ((old_uV > min_uV) && (reg->safe_down_uV >= reg->step_uV)) {
+ steps = DIV_ROUND_UP(old_uV - min_uV, reg->safe_down_uV);
+ safe_uV = -reg->safe_down_uV;
+ }
+
+ if (steps == 1) {
+ val = (min_uV - reg->min_uV) / reg->step_uV;
+ ret = max77663_regulator_cache_write(reg, addr, mask, val,
+ cache);
+ } else {
+ for (i = 0; i < steps; i++) {
+ if (abs(min_uV - old_uV) > abs(safe_uV))
+ new_uV = old_uV + safe_uV;
+ else
+ new_uV = min_uV;
+
+ dev_dbg(&reg->rdev->dev, "do_set_voltage: name=%s, "
+ "%d/%d, old_uV=%d, new_uV=%d\n",
+ reg->rdev->desc->name, i + 1, steps, old_uV,
+ new_uV);
+
+ val = (new_uV - reg->min_uV) / reg->step_uV;
+ ret = max77663_regulator_cache_write(reg, addr, mask,
+ val, cache);
+ if (ret < 0)
+ return ret;
+
+ old_uV = new_uV;
+ }
+ }
+
+ return ret;
+}
+
+static int max77663_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct max77663_regulator *reg = rdev_get_drvdata(rdev);
+
+ dev_dbg(&rdev->dev, "set_voltage: name=%s, min_uV=%d, max_uV=%d\n",
+ rdev->desc->name, min_uV, max_uV);
+ return max77663_regulator_do_set_voltage(reg, min_uV, max_uV);
+}
+
+static int max77663_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct max77663_regulator *reg = rdev_get_drvdata(rdev);
+ int volt;
+
+ volt = (reg->regs[VOLT_REG].val & reg->volt_mask)
+ * reg->step_uV + reg->min_uV;
+
+ dev_dbg(&rdev->dev, "get_voltage: name=%s, volt=%d, val=0x%02x\n",
+ rdev->desc->name, volt, reg->regs[VOLT_REG].val);
+ return volt;
+}
+
+static int max77663_regulator_enable(struct regulator_dev *rdev)
+{
+ struct max77663_regulator *reg = rdev_get_drvdata(rdev);
+ struct max77663_regulator_platform_data *pdata = _to_pdata(reg);
+ int power_mode = POWER_MODE_NORMAL;
+
+ if (reg->fps_src != FPS_SRC_NONE) {
+ dev_dbg(&rdev->dev, "enable: Regulator %s using %s\n",
+ rdev->desc->name, fps_src_name(reg->fps_src));
+ return 0;
+ }
+
+ if ((reg->id == MAX77663_REGULATOR_ID_SD0)
+ && (pdata->flags & EN2_CTRL_SD0)) {
+ dev_dbg(&rdev->dev,
+ "enable: Regulator %s is controlled by EN2\n",
+ rdev->desc->name);
+ return 0;
+ }
+
+ if (reg->regulator_mode == REGULATOR_MODE_STANDBY)
+ power_mode = POWER_MODE_LPM;
+
+ return max77663_regulator_set_power_mode(reg, power_mode);
+}
+
+static int max77663_regulator_disable(struct regulator_dev *rdev)
+{
+ struct max77663_regulator *reg = rdev_get_drvdata(rdev);
+ struct max77663_regulator_platform_data *pdata = _to_pdata(reg);
+ int power_mode = POWER_MODE_DISABLE;
+
+ if (reg->fps_src != FPS_SRC_NONE) {
+ dev_dbg(&rdev->dev, "disable: Regulator %s using %s\n",
+ rdev->desc->name, fps_src_name(reg->fps_src));
+ return 0;
+ }
+
+ if ((reg->id == MAX77663_REGULATOR_ID_SD0)
+ && (pdata->flags & EN2_CTRL_SD0)) {
+ dev_dbg(&rdev->dev,
+ "disable: Regulator %s is controlled by EN2\n",
+ rdev->desc->name);
+ return 0;
+ }
+
+ return max77663_regulator_set_power_mode(reg, power_mode);
+}
+
+static int max77663_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct max77663_regulator *reg = rdev_get_drvdata(rdev);
+ struct max77663_regulator_platform_data *pdata = _to_pdata(reg);
+ int ret = 1;
+
+ if (reg->fps_src != FPS_SRC_NONE) {
+ dev_dbg(&rdev->dev, "is_enable: Regulator %s using %s\n",
+ rdev->desc->name, fps_src_name(reg->fps_src));
+ return 1;
+ }
+
+ if ((reg->id == MAX77663_REGULATOR_ID_SD0)
+ && (pdata->flags & EN2_CTRL_SD0)) {
+ dev_dbg(&rdev->dev,
+ "is_enable: Regulator %s is controlled by EN2\n",
+ rdev->desc->name);
+ return 1;
+ }
+
+ if (max77663_regulator_get_power_mode(reg) == POWER_MODE_DISABLE)
+ ret = 0;
+
+ return ret;
+}
+
+static int max77663_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct max77663_regulator *reg = rdev_get_drvdata(rdev);
+ u8 power_mode;
+ int ret;
+
+ if (mode == REGULATOR_MODE_NORMAL)
+ power_mode = POWER_MODE_NORMAL;
+ else if (mode == REGULATOR_MODE_STANDBY)
+ power_mode = POWER_MODE_LPM;
+ else
+ return -EINVAL;
+
+ ret = max77663_regulator_set_power_mode(reg, power_mode);
+ if (!ret)
+ reg->regulator_mode = mode;
+
+ return ret;
+}
+
+static unsigned int max77663_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct max77663_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->regulator_mode;
+}
+
+static struct regulator_ops max77663_ldo_ops = {
+ .set_voltage = max77663_regulator_set_voltage,
+ .get_voltage = max77663_regulator_get_voltage,
+ .enable = max77663_regulator_enable,
+ .disable = max77663_regulator_disable,
+ .is_enabled = max77663_regulator_is_enabled,
+ .set_mode = max77663_regulator_set_mode,
+ .get_mode = max77663_regulator_get_mode,
+};
+
+static int max77663_regulator_preinit(struct max77663_regulator *reg)
+{
+ struct max77663_regulator_platform_data *pdata = _to_pdata(reg);
+ struct device *parent = _to_parent(reg);
+ int i;
+ u8 val, mask;
+ int ret;
+
+ /* Update registers */
+ for (i = 0; i <= FPS_REG; i++) {
+ ret = max77663_read(parent, reg->regs[i].addr,
+ &reg->regs[i].val, 1, 0);
+ if (ret < 0) {
+ dev_err(reg->dev,
+ "preinit: Failed to get register 0x%x\n",
+ reg->regs[i].addr);
+ return ret;
+ }
+ }
+
+ /* Update FPS source */
+ if (reg->regs[FPS_REG].addr == MAX77663_REG_FPS_NONE)
+ reg->fps_src = FPS_SRC_NONE;
+ else
+ reg->fps_src = (reg->regs[FPS_REG].val & FPS_SRC_MASK)
+ >> FPS_SRC_SHIFT;
+
+ dev_dbg(reg->dev, "preinit: initial fps_src=%s\n",
+ fps_src_name(reg->fps_src));
+
+ /* Update power mode */
+ max77663_regulator_get_power_mode(reg);
+
+ /* Check Chip Identification */
+ ret = max77663_read(parent, MAX77663_REG_CID5, &val, 1, 0);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: Failed to get register 0x%x\n",
+ MAX77663_REG_CID5);
+ return ret;
+ }
+
+ /* If metal revision is less than rev.3,
+ * set safe_down_uV for stable down scaling. */
+ if ((reg->type == REGULATOR_TYPE_SD) &&
+ ((val & CID_DIDM_MASK) >> CID_DIDM_SHIFT) <= 2)
+ reg->safe_down_uV = SD_SAFE_DOWN_UV;
+ else
+ reg->safe_down_uV = 0;
+
+ /* Set FPS */
+ ret = max77663_regulator_set_fps_cfgs(reg, pdata->fps_cfgs,
+ pdata->num_fps_cfgs);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: Failed to set FPSCFG\n");
+ return ret;
+ }
+
+ /* To prevent power rail turn-off when change FPS source,
+ * it must set power mode to NORMAL before change FPS source to NONE
+ * from SRC_0, SRC_1 and SRC_2. */
+ if ((reg->fps_src != FPS_SRC_NONE) && (pdata->fps_src == FPS_SRC_NONE)
+ && (reg->power_mode != POWER_MODE_NORMAL)) {
+ ret = max77663_regulator_set_power_mode(reg, POWER_MODE_NORMAL);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: Failed to "
+ "set power mode to POWER_MODE_NORMAL\n");
+ return ret;
+ }
+ }
+
+ ret = max77663_regulator_set_fps_src(reg, pdata->fps_src);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: Failed to set FPSSRC to %d\n",
+ pdata->fps_src);
+ return ret;
+ }
+
+ ret = max77663_regulator_set_fps(reg);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: Failed to set FPS\n");
+ return ret;
+ }
+
+ /* Set initial state */
+ if (!pdata->init_apply)
+ goto skip_init_apply;
+
+ if (pdata->init_uV >= 0) {
+ ret = max77663_regulator_do_set_voltage(reg, pdata->init_uV,
+ pdata->init_uV);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: Failed to set voltage to "
+ "%d\n", pdata->init_uV);
+ return ret;
+ }
+ }
+
+ if (pdata->init_enable)
+ val = POWER_MODE_NORMAL;
+ else
+ val = POWER_MODE_DISABLE;
+
+ ret = max77663_regulator_set_power_mode(reg, val);
+ if (ret < 0) {
+ dev_err(reg->dev,
+ "preinit: Failed to set power mode to %d\n", val);
+ return ret;
+ }
+
+skip_init_apply:
+ if (reg->type == REGULATOR_TYPE_SD) {
+ val = 0;
+ mask = 0;
+
+ if (pdata->flags & SD_SLEW_RATE_MASK) {
+ mask |= SD_SR_MASK;
+ if (pdata->flags & SD_SLEW_RATE_SLOWEST)
+ val |= (SD_SR_13_75 << SD_SR_SHIFT);
+ else if (pdata->flags & SD_SLEW_RATE_SLOW)
+ val |= (SD_SR_27_5 << SD_SR_SHIFT);
+ else if (pdata->flags & SD_SLEW_RATE_FAST)
+ val |= (SD_SR_55 << SD_SR_SHIFT);
+ else
+ val |= (SD_SR_100 << SD_SR_SHIFT);
+ }
+
+ if (pdata->flags & SD_FORCED_PWM_MODE) {
+ mask |= SD_FPWM_MASK;
+ val |= SD_FPWM_MASK;
+ }
+
+ if (pdata->flags & SD_FSRADE_DISABLE) {
+ mask |= SD_FSRADE_MASK;
+ val |= SD_FSRADE_MASK;
+ }
+
+ ret = max77663_regulator_cache_write(reg,
+ reg->regs[CFG_REG].addr, mask, val,
+ &reg->regs[CFG_REG].val);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: "
+ "Failed to set register 0x%x\n",
+ reg->regs[CFG_REG].addr);
+ return ret;
+ }
+
+ if ((reg->id == MAX77663_REGULATOR_ID_SD0)
+ && (pdata->flags & EN2_CTRL_SD0)) {
+ val = POWER_MODE_DISABLE;
+ ret = max77663_regulator_set_power_mode(reg, val);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: "
+ "Failed to set power mode to %d for "
+ "EN2_CTRL_SD0\n", val);
+ return ret;
+ }
+
+ ret = max77663_regulator_set_fps_src(reg, FPS_SRC_NONE);
+ if (ret < 0) {
+ dev_err(reg->dev, "preinit: "
+ "Failed to set FPSSRC to FPS_SRC_NONE "
+ "for EN2_CTRL_SD0\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+#define REGULATOR_SD(_id, _volt_mask, _fps_reg, _min_uV, _max_uV, _step_uV) \
+ [MAX77663_REGULATOR_ID_##_id] = { \
+ .id = MAX77663_REGULATOR_ID_##_id, \
+ .type = REGULATOR_TYPE_SD, \
+ .volt_mask = _volt_mask##_VOLT_MASK, \
+ .regs = { \
+ [VOLT_REG] = { \
+ .addr = MAX77663_REG_##_id, \
+ }, \
+ [CFG_REG] = { \
+ .addr = MAX77663_REG_##_id##_CFG, \
+ }, \
+ [FPS_REG] = { \
+ .addr = MAX77663_REG_FPS_##_fps_reg, \
+ }, \
+ }, \
+ .min_uV = _min_uV, \
+ .max_uV = _max_uV, \
+ .step_uV = _step_uV, \
+ .regulator_mode = REGULATOR_MODE_NORMAL, \
+ .power_mode = POWER_MODE_NORMAL, \
+ .power_mode_mask = SD_POWER_MODE_MASK, \
+ .power_mode_shift = SD_POWER_MODE_SHIFT, \
+ }
+
+#define REGULATOR_LDO(_id, _min_uV, _max_uV, _step_uV) \
+ [MAX77663_REGULATOR_ID_##_id] = { \
+ .id = MAX77663_REGULATOR_ID_##_id, \
+ .type = REGULATOR_TYPE_LDO, \
+ .volt_mask = LDO_VOLT_MASK, \
+ .regs = { \
+ [VOLT_REG] = { \
+ .addr = MAX77663_REG_##_id##_CFG, \
+ }, \
+ [CFG_REG] = { \
+ .addr = MAX77663_REG_##_id##_CFG2, \
+ }, \
+ [FPS_REG] = { \
+ .addr = MAX77663_REG_FPS_##_id, \
+ }, \
+ }, \
+ .min_uV = _min_uV, \
+ .max_uV = _max_uV, \
+ .step_uV = _step_uV, \
+ .regulator_mode = REGULATOR_MODE_NORMAL, \
+ .power_mode = POWER_MODE_NORMAL, \
+ .power_mode_mask = LDO_POWER_MODE_MASK, \
+ .power_mode_shift = LDO_POWER_MODE_SHIFT, \
+ }
+
+static struct max77663_regulator max77663_regs[MAX77663_REGULATOR_ID_NR] = {
+ REGULATOR_SD(SD0, SDX, SD0, 600000, 3387500, 12500),
+ REGULATOR_SD(DVSSD0, SDX, NONE, 600000, 3387500, 12500),
+ REGULATOR_SD(SD1, SD1, SD1, 800000, 1587500, 12500),
+ REGULATOR_SD(DVSSD1, SD1, NONE, 800000, 1587500, 12500),
+ REGULATOR_SD(SD2, SDX, SD2, 600000, 3387500, 12500),
+ REGULATOR_SD(SD3, SDX, SD3, 600000, 3387500, 12500),
+ REGULATOR_SD(SD4, SDX, SD4, 600000, 3387500, 12500),
+
+ REGULATOR_LDO(LDO0, 800000, 2350000, 25000),
+ REGULATOR_LDO(LDO1, 800000, 2350000, 25000),
+ REGULATOR_LDO(LDO2, 800000, 3950000, 50000),
+ REGULATOR_LDO(LDO3, 800000, 3950000, 50000),
+ REGULATOR_LDO(LDO4, 800000, 1587500, 12500),
+ REGULATOR_LDO(LDO5, 800000, 3950000, 50000),
+ REGULATOR_LDO(LDO6, 800000, 3950000, 50000),
+ REGULATOR_LDO(LDO7, 800000, 3950000, 50000),
+ REGULATOR_LDO(LDO8, 800000, 3950000, 50000),
+};
+
+#define REGULATOR_DESC(_id, _name) \
+ [MAX77663_REGULATOR_ID_##_id] = { \
+ .name = max77663_rails(_name), \
+ .id = MAX77663_REGULATOR_ID_##_id, \
+ .ops = &max77663_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+static struct regulator_desc max77663_rdesc[MAX77663_REGULATOR_ID_NR] = {
+ REGULATOR_DESC(SD0, sd0),
+ REGULATOR_DESC(DVSSD0, dvssd0),
+ REGULATOR_DESC(SD1, sd1),
+ REGULATOR_DESC(DVSSD1, dvssd1),
+ REGULATOR_DESC(SD2, sd2),
+ REGULATOR_DESC(SD3, sd3),
+ REGULATOR_DESC(SD4, sd4),
+ REGULATOR_DESC(LDO0, ldo0),
+ REGULATOR_DESC(LDO1, ldo1),
+ REGULATOR_DESC(LDO2, ldo2),
+ REGULATOR_DESC(LDO3, ldo3),
+ REGULATOR_DESC(LDO4, ldo4),
+ REGULATOR_DESC(LDO5, ldo5),
+ REGULATOR_DESC(LDO6, ldo6),
+ REGULATOR_DESC(LDO7, ldo7),
+ REGULATOR_DESC(LDO8, ldo8),
+};
+
+static int max77663_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_desc *rdesc;
+ struct max77663_regulator *reg;
+ int ret = 0;
+
+ if ((pdev->id < 0) || (pdev->id >= MAX77663_REGULATOR_ID_NR)) {
+ dev_err(&pdev->dev, "Invalid device id %d\n", pdev->id);
+ return -ENODEV;
+ }
+
+ rdesc = &max77663_rdesc[pdev->id];
+ reg = &max77663_regs[pdev->id];
+ reg->dev = &pdev->dev;
+ reg->pdata = dev_get_platdata(&pdev->dev);
+
+ dev_dbg(&pdev->dev, "probe: name=%s\n", rdesc->name);
+
+ ret = max77663_regulator_preinit(reg);
+ if (ret) {
+ dev_err(&pdev->dev, "probe: Failed to preinit regulator %s\n",
+ rdesc->name);
+ return ret;
+ }
+
+ reg->rdev = regulator_register(rdesc, &pdev->dev,
+ &reg->pdata->init_data, reg);
+ if (IS_ERR(reg->rdev)) {
+ dev_err(&pdev->dev, "probe: Failed to register regulator %s\n",
+ rdesc->name);
+ return PTR_ERR(reg->rdev);
+ }
+
+ return 0;
+}
+
+static int max77663_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver max77663_regulator_driver = {
+ .probe = max77663_regulator_probe,
+ .remove = __devexit_p(max77663_regulator_remove),
+ .driver = {
+ .name = "max77663-regulator",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init max77663_regulator_init(void)
+{
+ return platform_driver_register(&max77663_regulator_driver);
+}
+subsys_initcall(max77663_regulator_init);
+
+static void __exit max77663_reg_exit(void)
+{
+ platform_driver_unregister(&max77663_regulator_driver);
+}
+module_exit(max77663_reg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("max77663 regulator driver");
+MODULE_VERSION("1.0");
diff --git a/drivers/regulator/max8907c-regulator.c b/drivers/regulator/max8907c-regulator.c
new file mode 100644
index 000000000000..925f161d9922
--- /dev/null
+++ b/drivers/regulator/max8907c-regulator.c
@@ -0,0 +1,421 @@
+/*
+ * max8907c-regulator.c -- support regulators in max8907c
+ *
+ * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/max8907c.h>
+#include <linux/regulator/max8907c-regulator.h>
+
+#define MAX8907C_II2RR_VERSION_MASK 0xF0
+#define MAX8907C_II2RR_VERSION_REV_A 0x00
+#define MAX8907C_II2RR_VERSION_REV_B 0x10
+#define MAX8907C_II2RR_VERSION_REV_C 0x30
+
+#define MAX8907C_REGULATOR_CNT (ARRAY_SIZE(max8907c_regulators))
+
+struct max8907c_regulator_info {
+ u32 min_uV;
+ u32 max_uV;
+ u32 step_uV;
+ u8 reg_base;
+ struct regulator_desc desc;
+ struct i2c_client *i2c;
+};
+
+#define REG_LDO(ids, base, min, max, step) \
+ { \
+ .min_uV = (min), \
+ .max_uV = (max), \
+ .step_uV = (step), \
+ .reg_base = (base), \
+ .desc = { \
+ .name = #ids, \
+ .id = MAX8907C_##ids, \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .ops = &max8907c_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define REG_FIXED(ids, voltage) \
+ { \
+ .min_uV = (voltage), \
+ .max_uV = (voltage), \
+ .desc = { \
+ .name = #ids, \
+ .id = MAX8907C_##ids, \
+ .n_voltages = 1, \
+ .ops = &max8907c_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define REG_OUT5V(ids, base, voltage) \
+ { \
+ .min_uV = (voltage), \
+ .max_uV = (voltage), \
+ .reg_base = (base), \
+ .desc = { \
+ .name = #ids, \
+ .id = MAX8907C_##ids, \
+ .n_voltages = 1, \
+ .ops = &max8907c_out5v_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define REG_BBAT(ids, base, min, max, step) \
+ { \
+ .min_uV = (min), \
+ .max_uV = (max), \
+ .step_uV = (step), \
+ .reg_base = (base), \
+ .desc = { \
+ .name = #ids, \
+ .id = MAX8907C_##ids, \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .ops = &max8907c_bbat_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define REG_WLED(ids, base, voltage) \
+ { \
+ .min_uV = (voltage), \
+ .max_uV = (voltage), \
+ .reg_base = (base), \
+ .desc = { \
+ .name = #ids, \
+ .id = MAX8907C_##ids, \
+ .n_voltages = 1, \
+ .ops = &max8907c_wled_ops, \
+ .type = REGULATOR_CURRENT, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define LDO_750_50(id, base) REG_LDO(id, (base), 750000, 3900000, 50000)
+#define LDO_650_25(id, base) REG_LDO(id, (base), 650000, 2225000, 25000)
+
+static int max8907c_regulator_list_voltage(struct regulator_dev *dev,
+ unsigned index);
+static int max8907c_regulator_ldo_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV);
+static int max8907c_regulator_bbat_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV);
+static int max8907c_regulator_ldo_get_voltage(struct regulator_dev *dev);
+static int max8907c_regulator_fixed_get_voltage(struct regulator_dev *dev);
+static int max8907c_regulator_bbat_get_voltage(struct regulator_dev *dev);
+static int max8907c_regulator_wled_set_current_limit(struct regulator_dev *dev,
+ int min_uA, int max_uA);
+static int max8907c_regulator_wled_get_current_limit(struct regulator_dev *dev);
+static int max8907c_regulator_ldo_enable(struct regulator_dev *dev);
+static int max8907c_regulator_out5v_enable(struct regulator_dev *dev);
+static int max8907c_regulator_ldo_disable(struct regulator_dev *dev);
+static int max8907c_regulator_out5v_disable(struct regulator_dev *dev);
+static int max8907c_regulator_ldo_is_enabled(struct regulator_dev *dev);
+static int max8907c_regulator_out5v_is_enabled(struct regulator_dev *dev);
+
+static struct regulator_ops max8907c_ldo_ops = {
+ .list_voltage = max8907c_regulator_list_voltage,
+ .set_voltage = max8907c_regulator_ldo_set_voltage,
+ .get_voltage = max8907c_regulator_ldo_get_voltage,
+ .enable = max8907c_regulator_ldo_enable,
+ .disable = max8907c_regulator_ldo_disable,
+ .is_enabled = max8907c_regulator_ldo_is_enabled,
+};
+
+static struct regulator_ops max8907c_fixed_ops = {
+ .list_voltage = max8907c_regulator_list_voltage,
+ .get_voltage = max8907c_regulator_fixed_get_voltage,
+};
+
+static struct regulator_ops max8907c_out5v_ops = {
+ .list_voltage = max8907c_regulator_list_voltage,
+ .get_voltage = max8907c_regulator_fixed_get_voltage,
+ .enable = max8907c_regulator_out5v_enable,
+ .disable = max8907c_regulator_out5v_disable,
+ .is_enabled = max8907c_regulator_out5v_is_enabled,
+};
+
+static struct regulator_ops max8907c_bbat_ops = {
+ .list_voltage = max8907c_regulator_list_voltage,
+ .set_voltage = max8907c_regulator_bbat_set_voltage,
+ .get_voltage = max8907c_regulator_bbat_get_voltage,
+};
+
+static struct regulator_ops max8907c_wled_ops = {
+ .list_voltage = max8907c_regulator_list_voltage,
+ .set_current_limit = max8907c_regulator_wled_set_current_limit,
+ .get_current_limit = max8907c_regulator_wled_get_current_limit,
+ .get_voltage = max8907c_regulator_fixed_get_voltage,
+};
+
+static struct max8907c_regulator_info max8907c_regulators[] = {
+ REG_LDO(SD1, MAX8907C_REG_SDCTL1, 650000, 2225000, 25000),
+ REG_LDO(SD2, MAX8907C_REG_SDCTL2, 637500, 1425000, 12500),
+ REG_LDO(SD3, MAX8907C_REG_SDCTL3, 750000, 3900000, 50000),
+ LDO_750_50(LDO1, MAX8907C_REG_LDOCTL1),
+ LDO_650_25(LDO2, MAX8907C_REG_LDOCTL2),
+ LDO_650_25(LDO3, MAX8907C_REG_LDOCTL3),
+ LDO_750_50(LDO4, MAX8907C_REG_LDOCTL4),
+ LDO_750_50(LDO5, MAX8907C_REG_LDOCTL5),
+ LDO_750_50(LDO6, MAX8907C_REG_LDOCTL6),
+ LDO_750_50(LDO7, MAX8907C_REG_LDOCTL7),
+ LDO_750_50(LDO8, MAX8907C_REG_LDOCTL8),
+ LDO_750_50(LDO9, MAX8907C_REG_LDOCTL9),
+ LDO_750_50(LDO10, MAX8907C_REG_LDOCTL10),
+ LDO_750_50(LDO11, MAX8907C_REG_LDOCTL11),
+ LDO_750_50(LDO12, MAX8907C_REG_LDOCTL12),
+ LDO_750_50(LDO13, MAX8907C_REG_LDOCTL13),
+ LDO_750_50(LDO14, MAX8907C_REG_LDOCTL14),
+ LDO_750_50(LDO15, MAX8907C_REG_LDOCTL15),
+ LDO_750_50(LDO16, MAX8907C_REG_LDOCTL16),
+ LDO_650_25(LDO17, MAX8907C_REG_LDOCTL17),
+ LDO_650_25(LDO18, MAX8907C_REG_LDOCTL18),
+ LDO_750_50(LDO19, MAX8907C_REG_LDOCTL19),
+ LDO_750_50(LDO20, MAX8907C_REG_LDOCTL20),
+ REG_OUT5V(OUT5V, MAX8907C_REG_OUT5VEN, 5000000),
+ REG_OUT5V(OUT33V, MAX8907C_REG_OUT33VEN, 3300000),
+ REG_BBAT(BBAT, MAX8907C_REG_BBAT_CNFG, 2400000, 3000000, 200000),
+ REG_FIXED(SDBY, 1200000),
+ REG_FIXED(VRTC, 3300000),
+ REG_WLED(WLED, MAX8907C_REG_ILED_CNTL, 0),
+};
+
+static int max8907c_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned index)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return info->min_uV + info->step_uV * index;
+}
+
+static int max8907c_regulator_ldo_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ if (min_uV < info->min_uV || max_uV > info->max_uV)
+ return -EDOM;
+
+ val = (min_uV - info->min_uV) / info->step_uV;
+
+ return max8907c_reg_write(info->i2c, info->reg_base + MAX8907C_VOUT, val);
+}
+
+static int max8907c_regulator_bbat_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ if (min_uV < info->min_uV || max_uV > info->max_uV)
+ return -EDOM;
+
+ val = (min_uV - info->min_uV) / info->step_uV;
+
+ return max8907c_set_bits(info->i2c, info->reg_base, MAX8907C_MASK_VBBATTCV,
+ val);
+}
+
+static int max8907c_regulator_ldo_get_voltage(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ val = max8907c_reg_read(info->i2c, info->reg_base + MAX8907C_VOUT);
+ return val * info->step_uV + info->min_uV;
+}
+
+static int max8907c_regulator_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return info->min_uV;
+}
+
+static int max8907c_regulator_bbat_get_voltage(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ val =
+ max8907c_reg_read(info->i2c, info->reg_base) & MAX8907C_MASK_VBBATTCV;
+ return val * info->step_uV + info->min_uV;
+}
+
+static int max8907c_regulator_wled_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (min_uA > 25500)
+ return -EDOM;
+
+ return max8907c_reg_write(info->i2c, info->reg_base, min_uA / 100);
+}
+
+static int max8907c_regulator_wled_get_current_limit(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ val = max8907c_reg_read(info->i2c, info->reg_base);
+ return val * 100;
+}
+
+static int max8907c_regulator_ldo_enable(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return max8907c_set_bits(info->i2c, info->reg_base + MAX8907C_CTL,
+ MAX8907C_MASK_LDO_EN | MAX8907C_MASK_LDO_SEQ,
+ MAX8907C_MASK_LDO_EN | MAX8907C_MASK_LDO_SEQ);
+}
+
+static int max8907c_regulator_out5v_enable(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return max8907c_set_bits(info->i2c, info->reg_base,
+ MAX8907C_MASK_OUT5V_VINEN |
+ MAX8907C_MASK_OUT5V_ENSRC |
+ MAX8907C_MASK_OUT5V_EN,
+ MAX8907C_MASK_OUT5V_ENSRC |
+ MAX8907C_MASK_OUT5V_EN);
+}
+
+static int max8907c_regulator_ldo_disable(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return max8907c_set_bits(info->i2c, info->reg_base + MAX8907C_CTL,
+ MAX8907C_MASK_LDO_EN | MAX8907C_MASK_LDO_SEQ,
+ MAX8907C_MASK_LDO_SEQ);
+}
+
+static int max8907c_regulator_out5v_disable(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return max8907c_set_bits(info->i2c, info->reg_base,
+ MAX8907C_MASK_OUT5V_VINEN |
+ MAX8907C_MASK_OUT5V_ENSRC |
+ MAX8907C_MASK_OUT5V_EN,
+ MAX8907C_MASK_OUT5V_ENSRC);
+}
+
+static int max8907c_regulator_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ val = max8907c_reg_read(info->i2c, info->reg_base + MAX8907C_CTL);
+ if (val < 0)
+ return -EDOM;
+
+ return (val & MAX8907C_MASK_LDO_EN) || !(val & MAX8907C_MASK_LDO_SEQ);
+}
+
+static int max8907c_regulator_out5v_is_enabled(struct regulator_dev *rdev)
+{
+ const struct max8907c_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ val = max8907c_reg_read(info->i2c, info->reg_base);
+ if (val < 0)
+ return -EDOM;
+
+ if ((val &
+ (MAX8907C_MASK_OUT5V_VINEN | MAX8907C_MASK_OUT5V_ENSRC |
+ MAX8907C_MASK_OUT5V_EN))
+ == MAX8907C_MASK_OUT5V_ENSRC)
+ return 1;
+
+ return 0;
+}
+
+static int max8907c_regulator_probe(struct platform_device *pdev)
+{
+ struct max8907c *max8907c = dev_get_drvdata(pdev->dev.parent);
+ struct max8907c_regulator_info *info;
+ struct regulator_dev *rdev;
+ u8 version;
+
+ /* Backwards compatibility with max8907b, SD1 uses different voltages */
+ version = max8907c_reg_read(max8907c->i2c_power, MAX8907C_REG_II2RR);
+ if ((version & MAX8907C_II2RR_VERSION_MASK) == MAX8907C_II2RR_VERSION_REV_B) {
+ max8907c_regulators[MAX8907C_SD1].min_uV = 637500;
+ max8907c_regulators[MAX8907C_SD1].max_uV = 1425000;
+ max8907c_regulators[MAX8907C_SD1].step_uV = 12500;
+ }
+
+ info = &max8907c_regulators[pdev->id];
+ info->i2c = max8907c->i2c_power;
+
+ rdev = regulator_register(&info->desc,
+ &pdev->dev, pdev->dev.platform_data, info);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Cannot register regulator \"%s\", %ld\n",
+ info->desc.name, PTR_ERR(rdev));
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, rdev);
+ return 0;
+
+error:
+ return PTR_ERR(rdev);
+}
+
+static int max8907c_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver max8907c_regulator_driver = {
+ .driver = {
+ .name = "max8907c-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8907c_regulator_probe,
+ .remove = __devexit_p(max8907c_regulator_remove),
+};
+
+static int __init max8907c_regulator_init(void)
+{
+ return platform_driver_register(&max8907c_regulator_driver);
+}
+
+subsys_initcall(max8907c_regulator_init);
+
+static void __exit max8907c_reg_exit(void)
+{
+ platform_driver_unregister(&max8907c_regulator_driver);
+}
+
+module_exit(max8907c_reg_exit);
+
+MODULE_DESCRIPTION("MAX8907C regulator driver");
+MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@maxim-ic.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/ricoh583-regulator.c b/drivers/regulator/ricoh583-regulator.c
new file mode 100644
index 000000000000..867ffe629ed2
--- /dev/null
+++ b/drivers/regulator/ricoh583-regulator.c
@@ -0,0 +1,412 @@
+/*
+ * drivers/regulator/ricoh583-regulator.c
+ *
+ * Regulator driver for RICOH583 power management chip.
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * Copyright (C) 2011 RICOH COMPANY,LTD
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/ricoh583.h>
+#include <linux/regulator/ricoh583-regulator.h>
+
+struct ricoh583_regulator {
+ int id;
+ int deepsleep_id;
+ /* Regulator register address.*/
+ u8 reg_en_reg;
+ u8 en_bit;
+ u8 reg_disc_reg;
+ u8 disc_bit;
+ u8 vout_reg;
+ u8 vout_mask;
+ u8 vout_reg_cache;
+ u8 deepsleep_reg;
+
+ /* chip constraints on regulator behavior */
+ int min_uV;
+ int max_uV;
+ int step_uV;
+ int nsteps;
+
+ /* regulator specific turn-on delay */
+ u16 delay;
+
+ /* used by regulator core */
+ struct regulator_desc desc;
+
+ /* Device */
+ struct device *dev;
+};
+
+
+static inline struct device *to_ricoh583_dev(struct regulator_dev *rdev)
+{
+ return rdev_get_dev(rdev)->parent->parent;
+}
+
+static int ricoh583_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct ricoh583_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->delay;
+}
+
+static int ricoh583_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct ricoh583_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_ricoh583_dev(rdev);
+ uint8_t control;
+ int ret;
+
+ ret = ricoh583_read(parent, ri->reg_en_reg, &control);
+ if (ret < 0) {
+ dev_err(&rdev->dev, "Error in reading the control register\n");
+ return ret;
+ }
+ return (((control >> ri->en_bit) & 1) == 1);
+}
+
+static int ricoh583_reg_enable(struct regulator_dev *rdev)
+{
+ struct ricoh583_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_ricoh583_dev(rdev);
+ int ret;
+
+ ret = ricoh583_set_bits(parent, ri->reg_en_reg, (1 << ri->en_bit));
+ if (ret < 0) {
+ dev_err(&rdev->dev, "Error in updating the STATE register\n");
+ return ret;
+ }
+ udelay(ri->delay);
+ return ret;
+}
+
+static int ricoh583_reg_disable(struct regulator_dev *rdev)
+{
+ struct ricoh583_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_ricoh583_dev(rdev);
+ int ret;
+
+ ret = ricoh583_clr_bits(parent, ri->reg_en_reg, (1 << ri->en_bit));
+ if (ret < 0)
+ dev_err(&rdev->dev, "Error in updating the STATE register\n");
+
+ return ret;
+}
+
+static int ricoh583_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct ricoh583_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->min_uV + (ri->step_uV * index);
+}
+
+static int __ricoh583_set_ds_voltage(struct device *parent,
+ struct ricoh583_regulator *ri, int min_uV, int max_uV)
+{
+ int vsel;
+ int ret;
+
+ if ((min_uV < ri->min_uV) || (max_uV > ri->max_uV))
+ return -EDOM;
+
+ vsel = (min_uV - ri->min_uV + ri->step_uV - 1)/ri->step_uV;
+ if (vsel > ri->nsteps)
+ return -EDOM;
+
+ ret = ricoh583_update(parent, ri->deepsleep_reg, vsel, ri->vout_mask);
+ if (ret < 0)
+ dev_err(ri->dev, "Error in writing the deepsleep register\n");
+ return ret;
+}
+
+static int __ricoh583_set_voltage(struct device *parent,
+ struct ricoh583_regulator *ri, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int vsel;
+ int ret;
+ uint8_t vout_val;
+
+ if ((min_uV < ri->min_uV) || (max_uV > ri->max_uV))
+ return -EDOM;
+
+ vsel = (min_uV - ri->min_uV + ri->step_uV - 1)/ri->step_uV;
+ if (vsel > ri->nsteps)
+ return -EDOM;
+
+ if (selector)
+ *selector = vsel;
+
+ vout_val = (ri->vout_reg_cache & ~ri->vout_mask) |
+ (vsel & ri->vout_mask);
+ ret = ricoh583_write(parent, ri->vout_reg, vout_val);
+ if (ret < 0)
+ dev_err(ri->dev, "Error in writing the Voltage register\n");
+ else
+ ri->vout_reg_cache = vout_val;
+
+ return ret;
+}
+
+static int ricoh583_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct ricoh583_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_ricoh583_dev(rdev);
+
+ return __ricoh583_set_voltage(parent, ri, min_uV, max_uV, selector);
+}
+
+static int ricoh583_get_voltage(struct regulator_dev *rdev)
+{
+ struct ricoh583_regulator *ri = rdev_get_drvdata(rdev);
+ uint8_t vsel;
+
+ vsel = ri->vout_reg_cache & ri->vout_mask;
+ return ri->min_uV + vsel * ri->step_uV;
+}
+
+static struct regulator_ops ricoh583_ops = {
+ .list_voltage = ricoh583_list_voltage,
+ .set_voltage = ricoh583_set_voltage,
+ .get_voltage = ricoh583_get_voltage,
+ .enable = ricoh583_reg_enable,
+ .disable = ricoh583_reg_disable,
+ .is_enabled = ricoh583_reg_is_enabled,
+ .enable_time = ricoh583_regulator_enable_time,
+};
+
+#define RICOH583_REG(_id, _en_reg, _en_bit, _disc_reg, _disc_bit, _vout_reg, \
+ _vout_mask, _ds_reg, _min_mv, _max_mv, _step_uV, _nsteps, \
+ _ops, _delay) \
+{ \
+ .reg_en_reg = _en_reg, \
+ .en_bit = _en_bit, \
+ .reg_disc_reg = _disc_reg, \
+ .disc_bit = _disc_bit, \
+ .vout_reg = _vout_reg, \
+ .vout_mask = _vout_mask, \
+ .deepsleep_reg = _ds_reg, \
+ .min_uV = _min_mv * 1000, \
+ .max_uV = _max_mv * 1000, \
+ .step_uV = _step_uV, \
+ .nsteps = _nsteps, \
+ .delay = _delay, \
+ .id = RICOH583_ID_##_id, \
+ .deepsleep_id = RICOH583_DS_##_id, \
+ .desc = { \
+ .name = ricoh583_rails(_id), \
+ .id = RICOH583_ID_##_id, \
+ .n_voltages = _nsteps, \
+ .ops = &_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+}
+
+static struct ricoh583_regulator ricoh583_regulator[] = {
+ RICOH583_REG(DC0, 0x30, 0, 0x30, 1, 0x31, 0x7F, 0x60,
+ 700, 1500, 12500, 0x41, ricoh583_ops, 500),
+ RICOH583_REG(DC1, 0x34, 0, 0x34, 1, 0x35, 0x7F, 0x61,
+ 700, 1500, 12500, 0x41, ricoh583_ops, 500),
+ RICOH583_REG(DC2, 0x38, 0, 0x38, 1, 0x39, 0x7F, 0x62,
+ 900, 2400, 12500, 0x79, ricoh583_ops, 500),
+ RICOH583_REG(DC3, 0x3C, 0, 0x3C, 1, 0x3D, 0x7F, 0x63,
+ 900, 2400, 12500, 0x79, ricoh583_ops, 500),
+ RICOH583_REG(LDO0, 0x51, 0, 0x53, 0, 0x54, 0x7F, 0x64,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+ RICOH583_REG(LDO1, 0x51, 1, 0x53, 1, 0x55, 0x7F, 0x65,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+ RICOH583_REG(LDO2, 0x51, 2, 0x53, 2, 0x56, 0x7F, 0x66,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+ RICOH583_REG(LDO3, 0x51, 3, 0x53, 3, 0x57, 0x7F, 0x67,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+ RICOH583_REG(LDO4, 0x51, 4, 0x53, 4, 0x58, 0x3F, 0x68,
+ 750, 1500, 12500, 0x3D, ricoh583_ops, 500),
+ RICOH583_REG(LDO5, 0x51, 5, 0x53, 5, 0x59, 0x7F, 0x69,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+ RICOH583_REG(LDO6, 0x51, 6, 0x53, 6, 0x5A, 0x7F, 0x6A,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+ RICOH583_REG(LDO7, 0x51, 7, 0x53, 7, 0x5B, 0x7F, 0x6B,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+ RICOH583_REG(LDO8, 0x50, 0, 0x52, 0, 0x5C, 0x7F, 0x6C,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+ RICOH583_REG(LDO9, 0x50, 1, 0x52, 1, 0x5D, 0x7F, 0x6D,
+ 900, 3400, 25000, 0x65, ricoh583_ops, 500),
+};
+static inline struct ricoh583_regulator *find_regulator_info(int id)
+{
+ struct ricoh583_regulator *ri;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ricoh583_regulator); i++) {
+ ri = &ricoh583_regulator[i];
+ if (ri->desc.id == id)
+ return ri;
+ }
+ return NULL;
+}
+
+static int ricoh583_regulator_preinit(struct device *parent,
+ struct ricoh583_regulator *ri,
+ struct ricoh583_regulator_platform_data *ricoh583_pdata)
+{
+ int ret = 0;
+
+ if (ri->deepsleep_id != RICOH583_DS_NONE) {
+ ret = ricoh583_ext_power_req_config(parent, ri->deepsleep_id,
+ ricoh583_pdata->ext_pwr_req,
+ ricoh583_pdata->deepsleep_slots);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!ricoh583_pdata->init_apply)
+ return 0;
+
+ if (ricoh583_pdata->deepsleep_uV) {
+ ret = __ricoh583_set_ds_voltage(parent, ri,
+ ricoh583_pdata->deepsleep_uV,
+ ricoh583_pdata->deepsleep_uV);
+ if (ret < 0) {
+ dev_err(ri->dev, "Not able to initialize ds voltage %d"
+ " for rail %d err %d\n",
+ ricoh583_pdata->deepsleep_uV, ri->desc.id, ret);
+ return ret;
+ }
+ }
+
+ if (ricoh583_pdata->init_uV >= 0) {
+ ret = __ricoh583_set_voltage(parent, ri,
+ ricoh583_pdata->init_uV,
+ ricoh583_pdata->init_uV, 0);
+ if (ret < 0) {
+ dev_err(ri->dev, "Not able to initialize voltage %d "
+ "for rail %d err %d\n", ricoh583_pdata->init_uV,
+ ri->desc.id, ret);
+ return ret;
+ }
+ }
+
+ if (ricoh583_pdata->init_enable)
+ ret = ricoh583_set_bits(parent, ri->reg_en_reg,
+ (1 << ri->en_bit));
+ else
+ ret = ricoh583_clr_bits(parent, ri->reg_en_reg,
+ (1 << ri->en_bit));
+ if (ret < 0)
+ dev_err(ri->dev, "Not able to %s rail %d err %d\n",
+ (ricoh583_pdata->init_enable) ? "enable" : "disable",
+ ri->desc.id, ret);
+
+ return ret;
+}
+
+static inline int ricoh583_cache_regulator_register(struct device *parent,
+ struct ricoh583_regulator *ri)
+{
+ ri->vout_reg_cache = 0;
+ return ricoh583_read(parent, ri->vout_reg, &ri->vout_reg_cache);
+}
+
+static int __devinit ricoh583_regulator_probe(struct platform_device *pdev)
+{
+ struct ricoh583_regulator *ri = NULL;
+ struct regulator_dev *rdev;
+ struct ricoh583_regulator_platform_data *tps_pdata;
+ int id = pdev->id;
+ int err;
+
+ dev_dbg(&pdev->dev, "Probing reulator %d\n", id);
+
+ ri = find_regulator_info(id);
+ if (ri == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+ }
+ tps_pdata = pdev->dev.platform_data;
+ ri->dev = &pdev->dev;
+
+ err = ricoh583_cache_regulator_register(pdev->dev.parent, ri);
+ if (err) {
+ dev_err(&pdev->dev, "Fail in caching register\n");
+ return err;
+ }
+
+ err = ricoh583_regulator_preinit(pdev->dev.parent, ri, tps_pdata);
+ if (err) {
+ dev_err(&pdev->dev, "Fail in pre-initialisation\n");
+ return err;
+ }
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ &tps_pdata->regulator, ri);
+ if (IS_ERR_OR_NULL(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+
+ platform_set_drvdata(pdev, rdev);
+ return 0;
+}
+
+static int __devexit ricoh583_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver ricoh583_regulator_driver = {
+ .driver = {
+ .name = "ricoh583-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = ricoh583_regulator_probe,
+ .remove = __devexit_p(ricoh583_regulator_remove),
+};
+
+static int __init ricoh583_regulator_init(void)
+{
+ return platform_driver_register(&ricoh583_regulator_driver);
+}
+subsys_initcall(ricoh583_regulator_init);
+
+static void __exit ricoh583_regulator_exit(void)
+{
+ platform_driver_unregister(&ricoh583_regulator_driver);
+}
+module_exit(ricoh583_regulator_exit);
+
+MODULE_DESCRIPTION("RICOH583 regulator driver");
+MODULE_ALIAS("platform:ricoh583-regulator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/tps6236x-regulator.c b/drivers/regulator/tps6236x-regulator.c
new file mode 100644
index 000000000000..1928b30c8802
--- /dev/null
+++ b/drivers/regulator/tps6236x-regulator.c
@@ -0,0 +1,521 @@
+/*
+ * driver/regultor/tps6236x.c
+ *
+ * Driver for processor core supply tps62360 and tps62361B
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/tps6236x-regulator.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+/* Register definitions */
+#define REG_VSET0 0
+#define REG_VSET1 1
+#define REG_VSET2 2
+#define REG_VSET3 3
+#define REG_CONTROL 4
+#define REG_TEMP 5
+#define REG_RAMPCTRL 6
+#define REG_CHIPID 8
+
+enum chips {TPS62360, TPS62361B};
+
+/* Supported voltage values for regulators */
+static const u16 TPS62360_VOLTAGES[] = {
+ 770, 780, 790, 800, 810, 820, 830, 840, 850, 860,
+ 870, 880, 890, 900, 910, 920, 930, 940, 950, 960,
+ 970, 980, 990, 1000, 1010, 1020, 1030, 1040, 1050, 1060,
+ 1070, 1080, 1090, 1110, 1110, 1120, 1130, 1140, 1150, 1160,
+ 1170, 1180, 1190, 1200, 1210, 1220, 1230, 1240, 1250, 1260,
+ 1270, 1280, 1290, 1300, 1310, 1320, 1330, 1340, 1350, 1360,
+ 1370, 1380, 1390, 1400,
+};
+
+static const u16 TPS62361_VOLTAGES[] = {
+ 500, 510, 520, 530, 540, 550, 560, 570, 580, 590,
+ 600, 610, 620, 630, 640, 650, 660, 670, 680, 690,
+ 700, 710, 720, 730, 740, 750, 760, 770, 780, 790,
+ 800, 810, 820, 830, 840, 850, 860, 870, 880, 890,
+ 900, 910, 920, 930, 940, 950, 960, 970, 980, 990,
+ 1000, 1010, 1020, 1030, 1040, 1050, 1060, 1070, 1080, 1090,
+ 1110, 1110, 1120, 1130, 1140, 1150, 1160, 1170, 1180, 1190,
+ 1200, 1210, 1220, 1230, 1240, 1250, 1260, 1270, 1280, 1290,
+ 1300, 1310, 1320, 1330, 1340, 1350, 1360, 1370, 1380, 1390,
+ 1400, 1410, 1420, 1430, 1440, 1450, 1460, 1470, 1480, 1490,
+ 1500, 1510, 1520, 1530, 1540, 1550, 1560, 1570, 1580, 1590,
+ 1600, 1610, 1620, 1630, 1640, 1650, 1660, 1670, 1680, 1690,
+ 1700, 1710, 1720, 1730, 1740, 1750, 1760, 1770,
+};
+
+/* tps 6236x chip information */
+struct tps6236x_chip {
+ const char *name;
+ struct device *dev;
+ struct regulator_desc desc;
+ struct i2c_client *client;
+ struct regulator_dev *rdev;
+ struct mutex io_lock;
+ int chip_id;
+ unsigned int curr_uV;
+ int vsel_id;
+ int internal_pulldn_en;
+ const u16 *voltages;
+ u8 voltage_reg_mask;
+ bool is_force_pwm;
+ bool enable_discharge;
+};
+static inline int tps6236x_read(struct tps6236x_chip *tps, u8 reg)
+{
+ return i2c_smbus_read_byte_data(tps->client, reg);
+}
+
+static inline int tps6236x_write(struct tps6236x_chip *tps, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(tps->client, reg, val);
+}
+
+static int tps6236x_reg_read(struct tps6236x_chip *tps, u8 reg)
+{
+ int data;
+
+ mutex_lock(&tps->io_lock);
+
+ data = tps6236x_read(tps, reg);
+ if (data < 0)
+ dev_err(tps->dev, "Read from reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps->io_lock);
+
+ return data;
+}
+
+static int tps6236x_reg_write(struct tps6236x_chip *tps, u8 reg, u8 val)
+{
+ int err;
+
+ mutex_lock(&tps->io_lock);
+
+ err = tps6236x_write(tps, reg, val);
+ if (err < 0)
+ dev_err(tps->dev, "Write for reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps->io_lock);
+
+ return err;
+}
+
+static int tps6236x_reg_update(struct tps6236x_chip *tps, u8 reg, u8 val,
+ u8 mask)
+{
+ int err;
+ int data;
+ u8 reg_val;
+
+ mutex_lock(&tps->io_lock);
+ data = tps6236x_read(tps, reg);
+ if (data < 0) {
+ dev_err(tps->dev, "Read from reg 0x%x failed\n", reg);
+ err = data;
+ goto out;
+ }
+ reg_val = (u8)data;
+ reg_val = (reg_val & ~mask) | (val & mask);
+
+ err = tps6236x_write(tps, reg, reg_val);
+ if (err < 0)
+ dev_err(tps->dev, "Write for reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps->io_lock);
+ return err;
+}
+
+static int __tps6236x_dcdc_set_voltage(struct tps6236x_chip *tps,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int vsel;
+
+ if (max_uV < min_uV)
+ return -EINVAL;
+
+ if (min_uV > tps->voltages[tps->desc.n_voltages - 1] * 1000)
+ return -EINVAL;
+
+ if (max_uV < tps->voltages[0] * 1000)
+ return -EINVAL;
+
+ for (vsel = 0; vsel < tps->desc.n_voltages; ++vsel) {
+ int mV = tps->voltages[vsel];
+ int uV = mV * 1000;
+ if (min_uV <= uV && uV <= max_uV) {
+ if (selector)
+ *selector = vsel;
+ if (tps->is_force_pwm)
+ vsel |= (1 << 7);
+ return tps6236x_reg_write(tps, REG_VSET0 + tps->vsel_id,
+ vsel);
+ }
+ }
+ return -EINVAL;
+}
+
+static int tps6236x_dcdc_is_enabled(struct regulator_dev *dev)
+{
+ /* Always return 1 as the EN is not controlled by register
+ programming */
+ return 1;
+}
+
+static int tps6236x_dcdc_enable(struct regulator_dev *dev)
+{
+ /* No way to enable dc-dc converter through register programming */
+ return 0;
+
+}
+
+static int tps6236x_dcdc_disable(struct regulator_dev *dev)
+{
+ /* No way to disable dc-dc converter through register programming */
+ return 0;
+}
+
+static int tps6236x_dcdc_get_voltage(struct regulator_dev *dev)
+{
+ struct tps6236x_chip *tps = rdev_get_drvdata(dev);
+ int data;
+
+ data = tps6236x_reg_read(tps, REG_VSET0 + tps->vsel_id);
+ if (data < 0)
+ return data;
+ data &= tps->voltage_reg_mask;
+
+ return tps->voltages[data] * 1000;
+}
+
+static int tps6236x_dcdc_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct tps6236x_chip *tps = rdev_get_drvdata(dev);
+
+ return __tps6236x_dcdc_set_voltage(tps, min_uV, max_uV, selector);
+}
+
+static int tps6236x_dcdc_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps6236x_chip *tps = rdev_get_drvdata(dev);
+
+ if ((selector < 0) || (selector >= tps->desc.n_voltages))
+ return -EINVAL;
+
+ return tps->voltages[selector] * 1000;
+}
+
+/* Operations permitted on VDCDCx */
+static struct regulator_ops tps6236x_dcdc_ops = {
+ .is_enabled = tps6236x_dcdc_is_enabled,
+ .enable = tps6236x_dcdc_enable,
+ .disable = tps6236x_dcdc_disable,
+ .get_voltage = tps6236x_dcdc_get_voltage,
+ .set_voltage = tps6236x_dcdc_set_voltage,
+ .list_voltage = tps6236x_dcdc_list_voltage,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+static void print_regs(const char *header, struct seq_file *s,
+ struct tps6236x_chip *tps, int start_offset,
+ int end_offset)
+{
+ int reg_val;
+ int i;
+
+ seq_printf(s, "%s\n", header);
+ for (i = start_offset; i <= end_offset; ++i) {
+ reg_val = tps6236x_reg_read(tps, i);
+ if (reg_val >= 0)
+ seq_printf(s, "Reg 0x%02x Value 0x%02x\n", i, reg_val);
+ }
+ seq_printf(s, "------------------\n");
+}
+
+static int dbg_tps_show(struct seq_file *s, void *unused)
+{
+ struct tps6236x_chip *tps = s->private;
+
+ seq_printf(s, "TPS6236x Registers\n");
+ seq_printf(s, "------------------\n");
+
+ print_regs("Voltage Regs", s, tps, 0x0, 0x3);
+ print_regs("Config Regs", s, tps, 0x4, 0x6);
+ print_regs("ManId Regs", s, tps, 0x8, 0x9);
+ return 0;
+}
+
+static int dbg_tps_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_tps_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = dbg_tps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __init tps6236x_debuginit(struct tps6236x_chip *tps)
+{
+ (void)debugfs_create_file("tps6236x", S_IRUGO, NULL,
+ tps, &debug_fops);
+}
+#else
+static void __init tps6236x_debuginit(struct tps6236x_chip *tps)
+{
+ return;
+}
+#endif
+static int tps6236x_init_dcdc(struct i2c_client *client,
+ struct tps6236x_regulator_platform_data *pdata,
+ struct tps6236x_chip *tps)
+{
+ int st;
+ int init_mV;
+ int data;
+
+ if (pdata->internal_pd_enable)
+ st = tps6236x_write(tps, REG_CONTROL, 0xE0);
+ else
+ st = tps6236x_write(tps, REG_CONTROL, 0x0);
+ if (st < 0) {
+ dev_err(tps->dev, "%s() fails in writing reg %d\n",
+ __func__, REG_CONTROL);
+ return st;
+ }
+
+ data = tps6236x_reg_read(tps, REG_VSET0 + tps->vsel_id);
+ if (data < 0) {
+ dev_err(tps->dev, "%s() fails in reading reg %d\n",
+ __func__, REG_VSET0 + tps->vsel_id);
+ return data;
+ }
+ if (pdata->is_force_pwm)
+ data |= (1 << 7);
+ else
+ data &= ~(1 << 7);
+ st = tps6236x_reg_write(tps, REG_VSET0 + tps->vsel_id, data);
+ if (st < 0) {
+ dev_err(tps->dev, "%s() fails in writing reg %d\n",
+ __func__, REG_VSET0 + tps->vsel_id);
+ return st;
+ }
+
+ /* Reset output discharge path */
+ st = tps6236x_reg_update(tps, REG_RAMPCTRL, 0, 1 << 2);
+ if (st < 0) {
+ dev_err(tps->dev, "%s() fails in updating reg %d\n",
+ __func__, REG_RAMPCTRL);
+ return st;
+ }
+
+ if (!pdata->init_apply)
+ return 0;
+
+ init_mV = pdata->init_uV;
+ return __tps6236x_dcdc_set_voltage(tps, init_mV, init_mV, 0);
+}
+
+static int __devinit tps6236x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps6236x_regulator_platform_data *pdata;
+ struct regulator_init_data *init_data;
+ struct regulator_dev *rdev;
+ struct tps6236x_chip *tps;
+ int err;
+ int chip_id;
+ int part_id;
+
+ dev_dbg(&client->dev, "%s() is called\n", __func__);
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "Err: The I2c functionality is"
+ " not supported\n");
+ return -EIO;
+ }
+
+ /**
+ * init_data points to array of regulator_init structures
+ * coming from the board-evm file.
+ */
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->dev, "Err: Platform data not found\n");
+ return -EIO;
+ }
+
+ init_data = &pdata->reg_init_data;
+ tps = kzalloc(sizeof(*tps), GFP_KERNEL);
+ if (!tps) {
+ dev_err(&client->dev, "Err: Memory allocation fails\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&tps->io_lock);
+
+ tps->is_force_pwm = pdata->is_force_pwm;
+ tps->enable_discharge = pdata->enable_discharge;
+ tps->chip_id = id->driver_data;
+ tps->client = client;
+ tps->dev = &client->dev;
+ tps->internal_pulldn_en = pdata->internal_pd_enable;
+ tps->vsel_id = pdata->vsel;
+ tps->name = id->name;
+ tps->voltages = (tps->chip_id == TPS62360) ?
+ TPS62360_VOLTAGES : TPS62361_VOLTAGES;
+ tps->voltage_reg_mask = (tps->chip_id == TPS62360) ? 0x3F : 0x7F;
+
+ tps->desc.name = id->name;
+ tps->desc.id = 0;
+ tps->desc.n_voltages = (tps->chip_id == TPS62360) ?
+ ARRAY_SIZE(TPS62360_VOLTAGES) :
+ ARRAY_SIZE(TPS62361_VOLTAGES);
+ tps->desc.ops = &tps6236x_dcdc_ops;
+ tps->desc.type = REGULATOR_VOLTAGE;
+ tps->desc.owner = THIS_MODULE;
+
+ i2c_set_clientdata(client, tps);
+
+ /* Read version number and compare with chipid */
+ chip_id = tps6236x_read(tps, REG_CHIPID);
+ if (chip_id < 0) {
+ err = chip_id;
+ dev_err(tps->dev, "Error in reading device %d\n", err);
+ goto fail;
+ }
+ part_id = (chip_id >> 2) & 0x3;
+ if (((part_id == 0) && (tps->chip_id != TPS62360)) ||
+ ((part_id == 1) && (tps->chip_id != TPS62361B)) ||
+ (part_id == 2) || (part_id == 3)) {
+ dev_err(tps->dev, "Err: Mismatch of partid and driver chip-id"
+ " 0x%x\n", chip_id);
+ err = -ENODEV;
+ goto fail;
+ }
+
+ err = tps6236x_init_dcdc(client, pdata, tps);
+ if (err < 0) {
+ dev_err(tps->dev, "TPS6236X init fails with = %d\n", err);
+ goto fail;
+ }
+
+ /* Register the regulators */
+ rdev = regulator_register(&tps->desc, &client->dev, init_data, tps);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "Failed to register %s\n", id->name);
+ err = PTR_ERR(rdev);
+ goto fail;
+ }
+
+ tps->rdev = rdev;
+
+ tps6236x_debuginit(tps);
+ return 0;
+
+fail:
+ kfree(tps);
+ return err;
+}
+
+/**
+ * tps6236x_remove - tps6236x driver i2c remove handler
+ * @client: i2c driver client device structure
+ *
+ * Unregister TPS driver as an i2c client device driver
+ */
+static int __devexit tps6236x_remove(struct i2c_client *client)
+{
+ struct tps6236x_chip *tps = i2c_get_clientdata(client);
+
+ regulator_unregister(tps->rdev);
+ kfree(tps);
+ return 0;
+}
+
+static void tps6236x_shutdown(struct i2c_client *client)
+{
+ struct tps6236x_chip *tps = i2c_get_clientdata(client);
+ int st;
+
+ if (!tps->enable_discharge)
+ return;
+
+ /* Configure the output discharge path */
+ st = tps6236x_reg_update(tps, REG_RAMPCTRL, (1 << 2), (1 << 2));
+ if (st < 0)
+ dev_err(tps->dev, "%s() fails in updating reg %d\n",
+ __func__, REG_RAMPCTRL);
+}
+
+static const struct i2c_device_id tps6236x_id[] = {
+ {.name = "tps62360", .driver_data = TPS62360},
+ {.name = "tps62361B", .driver_data = TPS62361B},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, tps6236x_id);
+
+static struct i2c_driver tps6236x_i2c_driver = {
+ .driver = {
+ .name = "tps6236x",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6236x_probe,
+ .remove = __devexit_p(tps6236x_remove),
+ .shutdown = tps6236x_shutdown,
+ .id_table = tps6236x_id,
+};
+
+/* Module init function */
+static int __init tps6236x_init(void)
+{
+ return i2c_add_driver(&tps6236x_i2c_driver);
+}
+subsys_initcall_sync(tps6236x_init);
+
+/* Module exit function */
+static void __exit tps6236x_cleanup(void)
+{
+ i2c_del_driver(&tps6236x_i2c_driver);
+}
+module_exit(tps6236x_cleanup);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index bb04a75a4c98..9ab8f3fb68ef 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -21,6 +21,7 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/tps6586x.h>
+#include <linux/delay.h>
/* supply control and voltage setting */
#define TPS6586X_SUPPLYENA 0x10
@@ -61,8 +62,8 @@ struct tps6586x_regulator {
int volt_nbits;
int enable_bit[2];
int enable_reg[2];
-
int *voltages;
+ int delay; /* delay in us for regulator to stabilize */
/* for DVM regulators */
int go_reg;
@@ -94,10 +95,6 @@ static int __tps6586x_ldo_set_voltage(struct device *parent,
for (val = 0; val < ri->desc.n_voltages; val++) {
uV = ri->voltages[val] * 1000;
- /* LDO0 has minimal voltage 1.2 rather than 1.25 */
- if (ri->desc.id == TPS6586X_ID_LDO_0 && val == 0)
- uV -= 50 * 1000;
-
/* use the first in-range value */
if (min_uV <= uV && uV <= max_uV) {
@@ -190,11 +187,18 @@ static int tps6586x_regulator_is_enabled(struct regulator_dev *rdev)
return !!(reg_val & (1 << ri->enable_bit[0]));
}
+static int tps6586x_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->delay;
+}
+
static struct regulator_ops tps6586x_regulator_ldo_ops = {
.list_voltage = tps6586x_ldo_list_voltage,
.get_voltage = tps6586x_ldo_get_voltage,
.set_voltage = tps6586x_ldo_set_voltage,
-
+ .enable_time = tps6586x_regulator_enable_time,
.is_enabled = tps6586x_regulator_is_enabled,
.enable = tps6586x_regulator_enable,
.disable = tps6586x_regulator_disable,
@@ -204,7 +208,7 @@ static struct regulator_ops tps6586x_regulator_dvm_ops = {
.list_voltage = tps6586x_ldo_list_voltage,
.get_voltage = tps6586x_ldo_get_voltage,
.set_voltage = tps6586x_dvm_set_voltage,
-
+ .enable_time = tps6586x_regulator_enable_time,
.is_enabled = tps6586x_regulator_is_enabled,
.enable = tps6586x_regulator_enable,
.disable = tps6586x_regulator_disable,
@@ -214,6 +218,10 @@ static int tps6586x_ldo_voltages[] = {
1250, 1500, 1800, 2500, 2700, 2850, 3100, 3300,
};
+static int tps6586x_ldo0_voltages[] = {
+ 1200, 1500, 1800, 2500, 2700, 2850, 3100, 3300,
+};
+
static int tps6586x_ldo4_voltages[] = {
1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
1900, 1925, 1950, 1975, 2000, 2025, 2050, 2075,
@@ -236,7 +244,7 @@ static int tps6586x_dvm_voltages[] = {
};
#define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1) \
+ ereg0, ebit0, ereg1, ebit1, en_time) \
.desc = { \
.name = "REG-" #_id, \
.ops = &tps6586x_regulator_##_ops, \
@@ -252,43 +260,44 @@ static int tps6586x_dvm_voltages[] = {
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
.enable_bit[1] = (ebit1), \
- .voltages = tps6586x_##vdata##_voltages,
+ .voltages = tps6586x_##vdata##_voltages, \
+ .delay = en_time,
#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
.go_reg = TPS6586X_##goreg, \
.go_bit = (gobit),
#define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1) \
+ ereg0, ebit0, ereg1, ebit1, en_time) \
{ \
TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1) \
+ ereg0, ebit0, ereg1, ebit1, en_time) \
}
#define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
+ ereg0, ebit0, ereg1, ebit1, goreg, gobit, en_time) \
{ \
TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1) \
+ ereg0, ebit0, ereg1, ebit1, en_time) \
TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
}
static struct tps6586x_regulator tps6586x_regulator[] = {
- TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0),
- TPS6586X_LDO(LDO_3, ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2),
- TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
- TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
- TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
- TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
- TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
- TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
- TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
- TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
-
- TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6),
- TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6),
- TPS6586X_DVM(SM_0, dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
- TPS6586X_DVM(SM_1, dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
+ TPS6586X_LDO(LDO_0, ldo0, SUPPLYV1, 5, 3, ENC, 0, END, 0, 4000),
+ TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1, 4000),
+ TPS6586X_LDO(LDO_3, ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2, 3000),
+ TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6, 3000),
+ TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4, 15000),
+ TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5, 15000),
+ TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6, 15000),
+ TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7, 3000),
+ TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7, 0),
+ TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7, 0),
+
+ TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6, 3000),
+ TPS6586X_DVM(SM_0, dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2, 4000),
+ TPS6586X_DVM(SM_1, dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0, 4000),
+ TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6, 15000),
};
/*
@@ -332,6 +341,64 @@ static inline int tps6586x_regulator_preinit(struct device *parent,
1 << ri->enable_bit[1]);
}
+static inline int tps6586x_regulator_set_pwm_mode(struct platform_device *pdev)
+{
+ struct device *parent = pdev->dev.parent;
+ struct regulator_init_data *p = pdev->dev.platform_data;
+ struct tps6586x_settings *setting = p->driver_data;
+ int ret = 0;
+ uint8_t mask;
+
+ if (setting == NULL)
+ return 0;
+
+ switch (pdev->id) {
+ case TPS6586X_ID_SM_0:
+ mask = 1 << SM0_PWM_BIT;
+ break;
+ case TPS6586X_ID_SM_1:
+ mask = 1 << SM1_PWM_BIT;
+ break;
+ case TPS6586X_ID_SM_2:
+ mask = 1 << SM2_PWM_BIT;
+ break;
+ default:
+ /* not all regulators have PWM/PFM option */
+ return 0;
+ }
+
+ if (setting->sm_pwm_mode == PWM_ONLY)
+ ret = tps6586x_set_bits(parent, TPS6586X_SMODE1, mask);
+ else if (setting->sm_pwm_mode == AUTO_PWM_PFM)
+ ret = tps6586x_clr_bits(parent, TPS6586X_SMODE1, mask);
+
+ return ret;
+}
+
+static inline int tps6586x_regulator_set_slew_rate(struct platform_device *pdev)
+{
+ struct device *parent = pdev->dev.parent;
+ struct regulator_init_data *p = pdev->dev.platform_data;
+ struct tps6586x_settings *setting = p->driver_data;
+ uint8_t reg;
+
+ if (setting == NULL)
+ return 0;
+
+ /* only SM0 and SM1 can have the slew rate settings */
+ switch (pdev->id) {
+ case TPS6586X_ID_SM_0:
+ reg = TPS6586X_SM0SL;
+ break;
+ case TPS6586X_ID_SM_1:
+ reg = TPS6586X_SM1SL;
+ break;
+ default:
+ return 0;
+ }
+ return tps6586x_write(parent, reg, setting->slew_rate);
+}
+
static inline struct tps6586x_regulator *find_regulator_info(int id)
{
struct tps6586x_regulator *ri;
@@ -374,7 +441,11 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rdev);
- return 0;
+ err = tps6586x_regulator_set_slew_rate(pdev);
+ if (err)
+ return err;
+
+ return tps6586x_regulator_set_pwm_mode(pdev);
}
static int __devexit tps6586x_regulator_remove(struct platform_device *pdev)
diff --git a/drivers/regulator/tps6591x-regulator.c b/drivers/regulator/tps6591x-regulator.c
new file mode 100644
index 000000000000..5336f3d82576
--- /dev/null
+++ b/drivers/regulator/tps6591x-regulator.c
@@ -0,0 +1,955 @@
+/*
+ * driver/regulator/tps6591x-regulator.c
+ *
+ * Regulator driver for TI TPS6591x PMIC family
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/tps6591x-regulator.h>
+#include <linux/mfd/tps6591x.h>
+
+/* supply control and voltage setting */
+#define TPS6591X_VIO_ADD 0x20
+#define TPS6591X_VDD1_ADD 0x21
+#define TPS6591X_VDD1_OP_ADD 0x22
+#define TPS6591X_VDD1_SR_ADD 0x23
+#define TPS6591X_VDD2_ADD 0x24
+#define TPS6591X_VDD2_OP_ADD 0x25
+#define TPS6591X_VDD2_SR_ADD 0x26
+#define TPS6591X_VDDCTRL_ADD 0x27
+#define TPS6591X_VDDCTRL_OP_ADD 0x28
+#define TPS6591X_VDDCTRL_SR_ADD 0x29
+#define TPS6591X_LDO1_ADD 0x30
+#define TPS6591X_LDO2_ADD 0x31
+#define TPS6591X_LDO3_ADD 0x37
+#define TPS6591X_LDO4_ADD 0x36
+#define TPS6591X_LDO5_ADD 0x32
+#define TPS6591X_LDO6_ADD 0x35
+#define TPS6591X_LDO7_ADD 0x34
+#define TPS6591X_LDO8_ADD 0x33
+#define TPS6591X_SLEEP_SET_LDO_OFF_ADD 0x43
+#define TPS6591X_SLEEP_SET_RES_OFF_ADD 0x44
+#define TPS6591X_EN1_LDO_ADD 0x45
+#define TPS6591X_EN1_SMPS_ADD 0x46
+#define TPS6591X_EN2_LDO_ADD 0x47
+#define TPS6591X_EN2_SMPS_ADD 0x48
+#define TPS6591X_INVALID_ADD 0xFF
+
+#define EN1_EN2_OFFSET 2
+
+struct tps6591x_register_info {
+ unsigned char addr;
+ unsigned char nbits;
+ unsigned char shift_bits;
+ uint8_t cache_val;
+};
+
+enum {
+ supply_type_none = 0x0,
+ supply_type_single_reg,
+ supply_type_sr_op_reg
+};
+
+struct tps6591x_regulator {
+ struct regulator_desc desc;
+ int supply_type;
+
+ struct tps6591x_register_info supply_reg;
+ struct tps6591x_register_info op_reg;
+ struct tps6591x_register_info sr_reg;
+ struct tps6591x_register_info en1_reg;
+ struct tps6591x_register_info slp_off_reg;
+
+ int *voltages;
+
+ int enable_delay; /* delay in us for regulator to stabilize */
+ enum tps6591x_ext_control ectrl;
+ int current_volt_uv;
+
+ /* Time (micro sec) taken for 1uV change */
+ int voltage_change_uv_per_us;
+ unsigned int config_flags;
+};
+
+static inline struct device *to_tps6591x_dev(struct regulator_dev *rdev)
+{
+ return rdev_get_dev(rdev)->parent->parent;
+}
+
+static int tps6591x_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->enable_delay;
+}
+
+static int __tps6591x_ext_control_set(struct device *parent,
+ struct tps6591x_regulator *ri,
+ enum tps6591x_ext_control ectrl)
+{
+ int ret;
+ uint8_t mask, reg_val, addr, offset;
+ struct tps6591x_register_info *ext_reg;
+
+ /* For regulator that has separate operational and sleep register make
+ sure that operational is used and clear sleep register to turn
+ regulator off when external control is inactive */
+ if (ri->supply_type == supply_type_sr_op_reg) {
+ reg_val = ri->op_reg.cache_val;
+ if (reg_val & 0x80) { /* boot has used sr - switch to op */
+ reg_val = ri->sr_reg.cache_val;
+ mask = ((1 << ri->sr_reg.nbits) - 1)
+ << ri->sr_reg.shift_bits;
+ reg_val &= mask;
+ ret = tps6591x_write(parent, ri->op_reg.addr, reg_val);
+ if (ret)
+ return ret;
+ ri->op_reg.cache_val = reg_val;
+ }
+ ret = tps6591x_write(parent, ri->sr_reg.addr, 0);
+ if (ret)
+ return ret;
+ ri->sr_reg.cache_val = 0;
+ }
+
+ offset = 0;
+ switch (ectrl) {
+ case EXT_CTRL_EN2:
+ offset = EN1_EN2_OFFSET;
+ /* fall through to EXT_CTRL_EN1 */
+ case EXT_CTRL_EN1:
+ ext_reg = &(ri->en1_reg);
+ break;
+ case EXT_CTRL_SLEEP_OFF:
+ ext_reg = &(ri->slp_off_reg);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ addr = ext_reg->addr + offset;
+ mask = ((1 << ext_reg->nbits) - 1) << ext_reg->shift_bits;
+
+ return tps6591x_update(parent, addr, mask, mask);
+}
+
+static void wait_for_voltage_change(struct tps6591x_regulator *ri, int uV)
+{
+ int change_uv;
+ int change_us;
+
+ change_uv = abs(uV - ri->current_volt_uv);
+ change_us = change_uv/ri->voltage_change_uv_per_us + 1;
+ if (change_us >= 1000) {
+ mdelay(change_us/1000);
+ change_us -= (change_us/1000);
+ }
+ if (change_us)
+ udelay(change_us);
+ ri->current_volt_uv = uV;
+}
+
+static int __tps6591x_vio_set_voltage(struct device *parent,
+ struct tps6591x_regulator *ri,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int uV;
+ uint8_t mask;
+ uint8_t val;
+ int ret;
+ uint8_t reg_val;
+
+ for (val = 0; val < ri->desc.n_voltages; val++) {
+ uV = ri->voltages[val] * 1000;
+
+ /* use the first in-range value */
+ if (min_uV <= uV && uV <= max_uV) {
+ if (selector)
+ *selector = val;
+
+ reg_val = ri->supply_reg.cache_val;
+ val <<= ri->supply_reg.shift_bits;
+
+ mask = ((1 << ri->supply_reg.nbits) - 1) <<
+ ri->supply_reg.shift_bits;
+ reg_val = (reg_val & ~mask) | (val & mask);
+
+ ret = tps6591x_write(parent, ri->supply_reg.addr,
+ reg_val);
+ if (ret >= 0) {
+ wait_for_voltage_change(ri, uV);
+ ri->supply_reg.cache_val = reg_val;
+ }
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tps6591x_vio_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+
+ return __tps6591x_vio_set_voltage(parent, ri, min_uV, max_uV,
+ selector);
+}
+
+static int tps6591x_vio_get_voltage(struct regulator_dev *rdev)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ uint8_t val, mask;
+
+ val = ri->supply_reg.cache_val;
+
+ mask = ((1 << ri->supply_reg.nbits) - 1) << ri->supply_reg.shift_bits;
+ val = (val & mask) >> ri->supply_reg.shift_bits;
+
+ if (val >= ri->desc.n_voltages)
+ BUG();
+
+ return ri->voltages[val] * 1000;
+}
+
+
+static int tps6591x_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct tps6591x_regulator *info = rdev_get_drvdata(rdev);
+
+ return info->voltages[selector] * 1000;
+}
+
+static int __tps6591x_ldo1_set_voltage(struct device *parent,
+ struct tps6591x_regulator *ri,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int val, uV;
+ uint8_t mask;
+ uint8_t reg_val;
+ int ret;
+
+ for (val = 0; val < ri->desc.n_voltages; val++) {
+ uV = ri->voltages[val] * 1000;
+
+ /* use the first in-range value */
+ if (min_uV <= uV && uV <= max_uV) {
+ if (selector)
+ *selector = val;
+ reg_val = ri->supply_reg.cache_val;
+ val += 4;
+ val <<= ri->supply_reg.shift_bits;
+ mask = ((1 << ri->supply_reg.nbits) - 1) <<
+ ri->supply_reg.shift_bits;
+
+ reg_val = (reg_val & ~mask) | (val & mask);
+ ret = tps6591x_write(parent, ri->supply_reg.addr,
+ reg_val);
+ if (ret >= 0) {
+ wait_for_voltage_change(ri, uV);
+ ri->supply_reg.cache_val = reg_val;
+ }
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tps6591x_ldo1_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+
+ return __tps6591x_ldo1_set_voltage(parent, ri, min_uV, max_uV,
+ selector);
+}
+
+static int tps6591x_ldo1_get_voltage(struct regulator_dev *rdev)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ uint8_t val, mask;
+
+ val = ri->supply_reg.cache_val;
+ mask = ((1 << ri->supply_reg.nbits) - 1) << ri->supply_reg.shift_bits;
+ val = (val & mask) >> ri->supply_reg.shift_bits;
+
+ if (val < 4)
+ return 1000 * 1000;
+ else if (val > 0x32)
+ return 3300 * 1000;
+ else
+ val -= 4;
+ if (val >= ri->desc.n_voltages)
+ BUG();
+
+ return ri->voltages[val] * 1000;
+}
+
+static int __tps6591x_ldo3_set_voltage(struct device *parent,
+ struct tps6591x_regulator *ri,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int val, uV;
+ uint8_t mask;
+ int ret;
+ uint8_t reg_val;
+
+ for (val = 0; val < ri->desc.n_voltages; val++) {
+ uV = ri->voltages[val] * 1000;
+
+ /* use the first in-range value */
+ if (min_uV <= uV && uV <= max_uV) {
+ if (selector)
+ *selector = val;
+ reg_val = ri->supply_reg.cache_val;
+ val += 2;
+ val <<= ri->supply_reg.shift_bits;
+ mask = ((1 << ri->supply_reg.nbits) - 1) <<
+ ri->supply_reg.shift_bits;
+
+ reg_val = (reg_val & ~mask) | (val & mask);
+
+ ret = tps6591x_write(parent, ri->supply_reg.addr,
+ reg_val);
+ if (ret >= 0) {
+ wait_for_voltage_change(ri, uV);
+ ri->supply_reg.cache_val = reg_val;
+ }
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tps6591x_ldo3_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+
+ return __tps6591x_ldo3_set_voltage(parent, ri, min_uV, max_uV,
+ selector);
+}
+
+static int tps6591x_ldo3_get_voltage(struct regulator_dev *rdev)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ uint8_t val, mask;
+
+ val = ri->supply_reg.cache_val;
+ mask = ((1 << ri->supply_reg.nbits) - 1) << ri->supply_reg.shift_bits;
+ val = (val & mask) >> ri->supply_reg.shift_bits;
+
+ if (val < 2)
+ return 1000 * 1000;
+ else if (val > 0x19)
+ return 3300 * 1000;
+ else
+ val -= 2;
+ if (val >= ri->desc.n_voltages)
+ BUG();
+
+ return ri->voltages[val] * 1000;
+}
+
+static int __tps6591x_vdd_set_voltage(struct device *parent,
+ struct tps6591x_regulator *ri,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int val, uV, ret;
+ uint8_t mask;
+ uint8_t op_reg_val;
+ uint8_t sr_reg_val;
+
+ for (val = 0; val < ri->desc.n_voltages; val++) {
+ uV = ri->voltages[val] * 1000;
+
+ /* use the first in-range value */
+ if (min_uV <= uV && uV <= max_uV) {
+ if (selector)
+ *selector = val;
+ op_reg_val = ri->op_reg.cache_val;
+ val += 3;
+ if (op_reg_val & 0x80) {
+ sr_reg_val = ri->sr_reg.cache_val;
+ val <<= ri->sr_reg.shift_bits;
+ mask = ((1 << ri->sr_reg.nbits) - 1)
+ << ri->sr_reg.shift_bits;
+ sr_reg_val = (sr_reg_val & ~mask) |
+ (val & mask);
+ ret = tps6591x_write(parent,
+ ri->sr_reg.addr, sr_reg_val);
+ if (!ret)
+ ri->sr_reg.cache_val = sr_reg_val;
+ } else {
+ val <<= ri->op_reg.shift_bits;
+ mask = ((1 << ri->op_reg.nbits) - 1)
+ << ri->op_reg.shift_bits;
+ op_reg_val = (op_reg_val & ~mask) |
+ (val & mask);
+ ret = tps6591x_write(parent,
+ ri->op_reg.addr, op_reg_val);
+ if (!ret)
+ ri->op_reg.cache_val = op_reg_val;
+ }
+ if (ret >= 0)
+ wait_for_voltage_change(ri, uV);
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tps6591x_vdd_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+
+ return __tps6591x_vdd_set_voltage(parent, ri, min_uV, max_uV,
+ selector);
+}
+
+static int tps6591x_vdd_get_voltage(struct regulator_dev *rdev)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ uint8_t op_val, sr_val, val;
+
+ op_val = ri->op_reg.cache_val;
+ sr_val = ri->sr_reg.cache_val;
+
+ val = (op_val & 0x80) ? (sr_val & 0x7F) : (op_val & 0x7F);
+
+ if (!val)
+ return 0;
+ else if (val < 0x3)
+ return 600 * 1000;
+ else if (val > 0x4B)
+ return 1500 * 1000;
+ else
+ val -= 3;
+
+ if (val >= ri->desc.n_voltages)
+ BUG();
+
+ return ri->voltages[val] * 1000;
+}
+
+static int tps6591x_regulator_enable(struct regulator_dev *rdev)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+ uint8_t reg_val;
+ int ret;
+
+ reg_val = ri->supply_reg.cache_val;
+ reg_val |= 0x1;
+
+ ret = tps6591x_write(parent, ri->supply_reg.addr, reg_val);
+ if (!ret)
+ ri->supply_reg.cache_val = reg_val;
+ return ret;
+}
+
+static int tps6591x_regulator_disable(struct regulator_dev *rdev)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+ uint8_t reg_val;
+ int ret;
+
+ reg_val = ri->supply_reg.cache_val;
+ reg_val &= ~0x1;
+ ret = tps6591x_write(parent, ri->supply_reg.addr, reg_val);
+ if (!ret)
+ ri->supply_reg.cache_val = reg_val;
+ return ret;
+}
+
+static int tps6591x_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ uint8_t reg_val;
+
+ reg_val = ri->supply_reg.cache_val;
+ reg_val &= 0x1;
+ return reg_val & 0x1;
+}
+
+static struct regulator_ops tps6591x_regulator_vio_ops = {
+ .list_voltage = tps6591x_ldo_list_voltage,
+ .get_voltage = tps6591x_vio_get_voltage,
+ .set_voltage = tps6591x_vio_set_voltage,
+
+ .enable_time = tps6591x_regulator_enable_time,
+ .is_enabled = tps6591x_regulator_is_enabled,
+ .enable = tps6591x_regulator_enable,
+ .disable = tps6591x_regulator_disable,
+};
+
+static struct regulator_ops tps6591x_regulator_ldo1_ops = {
+ .list_voltage = tps6591x_ldo_list_voltage,
+ .get_voltage = tps6591x_ldo1_get_voltage,
+ .set_voltage = tps6591x_ldo1_set_voltage,
+
+ .enable_time = tps6591x_regulator_enable_time,
+ .is_enabled = tps6591x_regulator_is_enabled,
+ .enable = tps6591x_regulator_enable,
+ .disable = tps6591x_regulator_disable,
+};
+
+static struct regulator_ops tps6591x_regulator_ldo3_ops = {
+ .list_voltage = tps6591x_ldo_list_voltage,
+ .get_voltage = tps6591x_ldo3_get_voltage,
+ .set_voltage = tps6591x_ldo3_set_voltage,
+
+ .enable_time = tps6591x_regulator_enable_time,
+ .is_enabled = tps6591x_regulator_is_enabled,
+ .enable = tps6591x_regulator_enable,
+ .disable = tps6591x_regulator_disable,
+};
+
+static struct regulator_ops tps6591x_regulator_vdd_ops = {
+ .list_voltage = tps6591x_ldo_list_voltage,
+ .get_voltage = tps6591x_vdd_get_voltage,
+ .set_voltage = tps6591x_vdd_set_voltage,
+
+ .enable_time = tps6591x_regulator_enable_time,
+ .is_enabled = tps6591x_regulator_is_enabled,
+ .enable = tps6591x_regulator_enable,
+ .disable = tps6591x_regulator_disable,
+};
+
+static int tps6591x_vio_voltages[] = {
+ 1500, 1800, 2500, 3300,
+};
+
+/* SEL[7:2]=000100:1000mV --> 110010:3300mV */
+static int tps6591x_ldo124_voltages[] = {
+ 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350, 1400, 1450,
+ 1500, 1550, 1600, 1650, 1700, 1750, 1800, 1850, 1900, 1950,
+ 2000, 2050, 2100, 2150, 2200, 2250, 2300, 2350, 2400, 2450,
+ 2500, 2550, 2600, 2650, 2700, 2750, 2800, 2850, 2900, 2950,
+ 3000, 3050, 3100, 3150, 3200, 3250, 3300,
+};
+
+/* SEL[6:2]=00010:1000mv --> 11001:3300mV */
+static int tps6591x_ldo35678_voltages[] = {
+ 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900,
+ 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900,
+ 3000, 3100, 3200, 3300,
+};
+
+static int tps6591x_vdd_voltages[] = {
+ 600, 612, 625, 637, 650, 662, 675, 687, 700, 712, 725, 737,
+ 750, 762, 775, 787, 800, 812, 825, 837, 850, 862, 875, 887,
+ 900, 912, 925, 937, 950, 962, 975, 987, 1000, 1012, 1025,
+ 1037, 1050, 1062, 1075, 1087, 1100, 1112, 1125, 1137, 1150,
+ 1162, 1175, 1187, 1200, 1212, 1225, 1237, 1250, 1262, 1275,
+ 1287, 1300, 1312, 1325, 1337, 1350, 1362, 1375, 1387, 1400,
+ 1412, 1425, 1437, 1450, 1462, 1475, 1487, 1500,
+};
+
+static int tps6591x_vddctrl_voltages[] = {
+ 600, 612, 625, 637, 650, 662, 675, 687, 700, 712, 725, 737,
+ 750, 762, 775, 787, 800, 812, 825, 837, 850, 862, 875, 887,
+ 900, 912, 925, 937, 950, 962, 975, 987, 1000, 1012, 1025,
+ 1037, 1050, 1062, 1075, 1087, 1100, 1112, 1125, 1137, 1150,
+ 1162, 1175, 1187, 1200, 1212, 1225, 1237, 1250, 1262, 1275,
+ 1287, 1300, 1312, 1325, 1337, 1350, 1362, 1375, 1387, 1400,
+};
+
+#define TPS6591X_REGULATOR(_id, vdata, _ops, s_addr, s_nbits, s_shift, \
+ s_type, op_addr, op_nbits, op_shift, sr_addr, \
+ sr_nbits, sr_shift, en1_addr, en1_shift, \
+ slp_off_addr, slp_off_shift, en_time, \
+ change_rate) \
+ .desc = { \
+ .name = tps6591x_rails(_id), \
+ .ops = &tps6591x_regulator_##_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = TPS6591X_ID_##_id, \
+ .n_voltages = ARRAY_SIZE(tps6591x_##vdata##_voltages), \
+ .owner = THIS_MODULE, \
+ }, \
+ .supply_type = supply_type_##s_type, \
+ .supply_reg = { \
+ .addr = TPS6591X_##s_addr##_ADD, \
+ .nbits = s_nbits, \
+ .shift_bits = s_shift, \
+ }, \
+ .op_reg = { \
+ .addr = TPS6591X_##op_addr##_ADD, \
+ .nbits = op_nbits, \
+ .shift_bits = op_shift, \
+ }, \
+ .sr_reg = { \
+ .addr = TPS6591X_##sr_addr##_ADD, \
+ .nbits = sr_nbits, \
+ .shift_bits = sr_shift, \
+ }, \
+ .en1_reg = { \
+ .addr = TPS6591X_##en1_addr##_ADD, \
+ .nbits = 1, \
+ .shift_bits = en1_shift, \
+ }, \
+ .slp_off_reg = { \
+ .addr = TPS6591X_SLEEP_SET_##slp_off_addr##_ADD, \
+ .nbits = 1, \
+ .shift_bits = slp_off_shift, \
+ }, \
+ .voltages = tps6591x_##vdata##_voltages, \
+ .enable_delay = en_time, \
+ .voltage_change_uv_per_us = change_rate,
+
+#define TPS6591X_VIO(_id, vdata, s_addr, s_nbits, s_shift, s_type, \
+ en1_shift, slp_off_shift, en_time) \
+{ \
+ TPS6591X_REGULATOR(_id, vdata, vio_ops, s_addr, s_nbits, \
+ s_shift, s_type, INVALID, 0, 0, INVALID, 0, 0, \
+ EN1_SMPS, en1_shift, RES_OFF, slp_off_shift, \
+ en_time, 10000) \
+}
+
+#define TPS6591X_LDO1(_id, vdata, s_addr, s_nbits, s_shift, s_type, \
+ en1_shift, slp_off_shift, en_time) \
+{ \
+ TPS6591X_REGULATOR(_id, vdata, ldo1_ops, s_addr, s_nbits, \
+ s_shift, s_type, INVALID, 0, 0, INVALID, 0, 0, \
+ EN1_LDO, en1_shift, LDO_OFF, slp_off_shift, \
+ en_time, 6000) \
+}
+
+#define TPS6591X_LDO3(_id, vdata, s_addr, s_nbits, s_shift, s_type, \
+ en1_shift, slp_off_shift, en_time) \
+{ \
+ TPS6591X_REGULATOR(_id, vdata, ldo3_ops, s_addr, s_nbits, \
+ s_shift, s_type, INVALID, 0, 0, INVALID, 0, 0, \
+ EN1_LDO, en1_shift, LDO_OFF, slp_off_shift, \
+ en_time, 11000) \
+}
+
+#define TPS6591X_VDD(_id, vdata, s_addr, s_nbits, s_shift, s_type, \
+ op_addr, op_nbits, op_shift, sr_addr, sr_nbits, \
+ sr_shift, en1_shift, slp_off_shift, en_time) \
+{ \
+ TPS6591X_REGULATOR(_id, vdata, vdd_ops, s_addr, s_nbits, \
+ s_shift, s_type, op_addr, op_nbits, op_shift, \
+ sr_addr, sr_nbits, sr_shift, EN1_SMPS, \
+ en1_shift, RES_OFF, slp_off_shift, en_time, \
+ 5000) \
+}
+
+static struct tps6591x_regulator tps6591x_regulator[] = {
+ TPS6591X_VIO(VIO, vio, VIO, 2, 2, single_reg, 0, 0, 350),
+ TPS6591X_LDO1(LDO_1, ldo124, LDO1, 6, 2, single_reg, 1, 1, 420),
+ TPS6591X_LDO1(LDO_2, ldo124, LDO2, 6, 2, single_reg, 2, 2, 420),
+ TPS6591X_LDO3(LDO_3, ldo35678, LDO3, 5, 2, single_reg, 7, 7, 230),
+ TPS6591X_LDO1(LDO_4, ldo124, LDO4, 6, 2, single_reg, 6, 6, 230),
+ TPS6591X_LDO3(LDO_5, ldo35678, LDO5, 5, 2, single_reg, 3, 3, 230),
+ TPS6591X_LDO3(LDO_6, ldo35678, LDO6, 5, 2, single_reg, 0, 0, 230),
+ TPS6591X_LDO3(LDO_7, ldo35678, LDO7, 5, 2, single_reg, 5, 5, 230),
+ TPS6591X_LDO3(LDO_8, ldo35678, LDO8, 5, 2, single_reg, 4, 4, 230),
+ TPS6591X_VDD(VDD_1, vdd, VDD1, 2, 0, sr_op_reg, VDD1_OP,
+ 7, 0, VDD1_SR, 7, 0, 1, 1, 350),
+ TPS6591X_VDD(VDD_2, vdd, VDD2, 2, 0, sr_op_reg, VDD2_OP,
+ 7, 0, VDD2_SR, 7, 0, 2, 2, 350),
+ TPS6591X_VDD(VDDCTRL, vddctrl, VDDCTRL, 2, 0, sr_op_reg,
+ VDDCTRL_OP, 7, 0, VDDCTRL_SR, 7, 0, 3, 3, 900),
+};
+
+static inline int tps6591x_regulator_preinit(struct device *parent,
+ struct tps6591x_regulator *ri,
+ struct tps6591x_regulator_platform_data *tps6591x_pdata)
+{
+ int ret;
+ uint8_t reg_val;
+
+ if (tps6591x_pdata->ectrl != EXT_CTRL_NONE) {
+ ret = __tps6591x_ext_control_set(
+ parent, ri, tps6591x_pdata->ectrl);
+ if (ret < 0) {
+ pr_err("Not able to configure external control %d"
+ " for rail %d err %d\n", tps6591x_pdata->ectrl,
+ ri->desc.id, ret);
+ return ret;
+ }
+ }
+
+ if (!tps6591x_pdata->init_apply)
+ return 0;
+
+ if (tps6591x_pdata->init_uV >= 0) {
+ switch (ri->desc.id) {
+ case TPS6591X_ID_VIO:
+ ret = __tps6591x_vio_set_voltage(parent, ri,
+ tps6591x_pdata->init_uV,
+ tps6591x_pdata->init_uV, 0);
+ break;
+
+ case TPS6591X_ID_LDO_1:
+ case TPS6591X_ID_LDO_2:
+ case TPS6591X_ID_LDO_4:
+ ret = __tps6591x_ldo1_set_voltage(parent, ri,
+ tps6591x_pdata->init_uV,
+ tps6591x_pdata->init_uV, 0);
+ break;
+
+ case TPS6591X_ID_LDO_3:
+ case TPS6591X_ID_LDO_5:
+ case TPS6591X_ID_LDO_6:
+ case TPS6591X_ID_LDO_7:
+ case TPS6591X_ID_LDO_8:
+ ret = __tps6591x_ldo3_set_voltage(parent, ri,
+ tps6591x_pdata->init_uV,
+ tps6591x_pdata->init_uV, 0);
+ break;
+
+ case TPS6591X_ID_VDD_1:
+ case TPS6591X_ID_VDD_2:
+ case TPS6591X_ID_VDDCTRL:
+ ret = __tps6591x_vdd_set_voltage(parent, ri,
+ tps6591x_pdata->init_uV,
+ tps6591x_pdata->init_uV, 0);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret < 0) {
+ pr_err("Not able to initialize voltage %d for rail "
+ "%d err %d\n", tps6591x_pdata->init_uV,
+ ri->desc.id, ret);
+ return ret;
+ }
+ }
+
+ reg_val = ri->supply_reg.cache_val;
+ if (tps6591x_pdata->init_enable)
+ reg_val |= 0x1;
+ else
+ reg_val &= ~0x1;
+ ret = tps6591x_write(parent, ri->supply_reg.addr, reg_val);
+
+ if (ret < 0)
+ pr_err("Not able to %s rail %d err %d\n",
+ (tps6591x_pdata->init_enable) ? "enable" : "disable",
+ ri->desc.id, ret);
+ else
+ ri->supply_reg.cache_val = reg_val;
+ return ret;
+}
+
+static inline int tps6591x_cache_regulator_register(struct device *parent,
+ struct tps6591x_regulator *ri)
+{
+ int ret;
+ ret = tps6591x_read(parent, ri->supply_reg.addr,
+ &ri->supply_reg.cache_val);
+ if (!ret && (ri->supply_type == supply_type_sr_op_reg)) {
+ ret = tps6591x_read(parent, ri->op_reg.addr,
+ &ri->op_reg.cache_val);
+ if (!ret)
+ ret = tps6591x_read(parent, ri->sr_reg.addr,
+ &ri->sr_reg.cache_val);
+ }
+ return ret;
+}
+
+static inline struct tps6591x_regulator *find_regulator_info(int id)
+{
+ struct tps6591x_regulator *ri;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6591x_regulator); i++) {
+ ri = &tps6591x_regulator[i];
+ if (ri->desc.id == id)
+ return ri;
+ }
+ return NULL;
+}
+
+
+static int __devinit tps6591x_regulator_probe(struct platform_device *pdev)
+{
+ struct tps6591x_regulator *ri = NULL;
+ struct regulator_dev *rdev;
+ struct tps6591x_regulator_platform_data *tps_pdata;
+ int id = pdev->id;
+ int err;
+
+ dev_dbg(&pdev->dev, "Probing reulator %d\n", id);
+
+ ri = find_regulator_info(id);
+ if (ri == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+ }
+ tps_pdata = pdev->dev.platform_data;
+ ri->ectrl = tps_pdata->ectrl;
+ ri->config_flags = tps_pdata->flags;
+
+ if (tps_pdata->slew_rate_uV_per_us)
+ ri->voltage_change_uv_per_us = tps_pdata->slew_rate_uV_per_us;
+
+ err = tps6591x_cache_regulator_register(pdev->dev.parent, ri);
+ if (err) {
+ dev_err(&pdev->dev, "Error in caching registers error %d\n",
+ err);
+ return err;
+ }
+
+ err = tps6591x_regulator_preinit(pdev->dev.parent, ri, tps_pdata);
+ if (err) {
+ dev_err(&pdev->dev, "Error in pre-initialization of regulator "
+ "error %d\n", err);
+ return err;
+ }
+
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ &tps_pdata->regulator, ri);
+ if (IS_ERR_OR_NULL(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+ ri->current_volt_uv = ri->desc.ops->get_voltage(rdev);
+
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static int __devexit tps6591x_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static void tps6591x_regulator_shutdown(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+ int ret;
+
+ if (ri->ectrl == EXT_CTRL_EN1) {
+ ret = tps6591x_clr_bits(parent, ri->en1_reg.addr,
+ (1 << ri->en1_reg.shift_bits));
+ if (ret < 0)
+ dev_err(&pdev->dev, "Error in clearing external control\n");
+ }
+}
+
+static int tps6591x_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+ int ret = 0;
+ uint8_t reg_val;
+
+ if (ri->config_flags & LDO_LOW_POWER_ON_SUSPEND) {
+ ret = tps6591x_clr_bits(parent, ri->en1_reg.addr,
+ (1 << ri->en1_reg.shift_bits));
+ reg_val = ri->supply_reg.cache_val;
+ reg_val = (reg_val & ~0x3) | (0x3);
+ ret = tps6591x_write(parent, ri->supply_reg.addr, reg_val);
+ if (ret >= 0)
+ ri->supply_reg.cache_val = reg_val;
+ else
+ dev_err(&pdev->dev, "Error in updating the supply state\n");
+ }
+ return ret;
+}
+
+static int tps6591x_resume(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ struct tps6591x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6591x_dev(rdev);
+ int ret = 0;
+ uint8_t reg_val;
+
+ if (ri->config_flags & LDO_LOW_POWER_ON_SUSPEND) {
+ ret = tps6591x_clr_bits(parent, ri->en1_reg.addr,
+ (1 << ri->en1_reg.shift_bits));
+ reg_val = ri->supply_reg.cache_val;
+ reg_val = (reg_val & ~0x3) | (0x1);
+ ret = tps6591x_write(parent, ri->supply_reg.addr, reg_val);
+ if (ret >= 0)
+ ri->supply_reg.cache_val = reg_val;
+ else
+ dev_err(&pdev->dev, "Error in updating the supply state\n");
+ }
+ return ret;
+}
+
+static struct platform_driver tps6591x_regulator_driver = {
+ .driver = {
+ .name = "tps6591x-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6591x_regulator_probe,
+ .remove = __devexit_p(tps6591x_regulator_remove),
+ .shutdown = tps6591x_regulator_shutdown,
+ .suspend = tps6591x_suspend,
+ .resume = tps6591x_resume,
+};
+
+static int __init tps6591x_regulator_init(void)
+{
+ return platform_driver_register(&tps6591x_regulator_driver);
+}
+subsys_initcall(tps6591x_regulator_init);
+
+static void __exit tps6591x_regulator_exit(void)
+{
+ platform_driver_unregister(&tps6591x_regulator_driver);
+}
+module_exit(tps6591x_regulator_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Regulator Driver for TI TPS6591X PMIC");
+MODULE_ALIAS("platform:tps6591x-regulator");
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
new file mode 100644
index 000000000000..874abebd0e43
--- /dev/null
+++ b/drivers/regulator/tps80031-regulator.c
@@ -0,0 +1,1082 @@
+/*
+ * driver/regulator/tps80031-regulator.c
+ *
+ * Regulator driver for TI TPS80031
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/tps80031-regulator.h>
+#include <linux/mfd/tps80031.h>
+
+/* Flags for DCDC Voltage reading */
+#define DCDC_OFFSET_EN BIT(0)
+#define DCDC_EXTENDED_EN BIT(1)
+#define TRACK_MODE_ENABLE BIT(2)
+
+#define SMPS_MULTOFFSET_VIO BIT(1)
+#define SMPS_MULTOFFSET_SMPS1 BIT(3)
+#define SMPS_MULTOFFSET_SMPS2 BIT(4)
+#define SMPS_MULTOFFSET_SMPS3 BIT(6)
+#define SMPS_MULTOFFSET_SMPS4 BIT(0)
+
+#define PMC_SMPS_OFFSET_ADD 0xE0
+#define PMC_SMPS_MULT_ADD 0xE3
+
+#define STATE_OFF 0x00
+#define STATE_ON 0x01
+#define STATE_MASK 0x03
+
+#define TRANS_SLEEP_OFF 0x00
+#define TRANS_SLEEP_ON 0x04
+#define TRANS_SLEEP_MASK 0x0C
+
+#define SMPS_CMD_MASK 0xC0
+#define SMPS_VSEL_MASK 0x3F
+#define LDO_VSEL_MASK 0x1F
+
+#define TPS80031_MISC2_ADD 0xE5
+#define MISC2_LDOUSB_IN_VSYS 0x10
+#define MISC2_LDOUSB_IN_PMID 0x08
+#define MISC2_LDOUSB_IN_MASK 0x18
+
+#define MISC2_LDO3_SEL_VIB_VAL BIT(0)
+#define MISC2_LDO3_SEL_VIB_MASK 0x1
+
+#define CHARGERUSB_CTRL3_ADD 0xEA
+#define BOOST_HW_PWR_EN BIT(5)
+#define BOOST_HW_PWR_EN_MASK BIT(5)
+
+#define CHARGERUSB_CTRL1_ADD 0xE8
+#define OPA_MODE_EN BIT(6)
+#define OPA_MODE_EN_MASK BIT(6)
+
+#define USB_VBUS_CTRL_SET 0x04
+#define USB_VBUS_CTRL_CLR 0x05
+#define VBUS_DISCHRG 0x20
+
+#define EXT_PWR_REQ (PWR_REQ_INPUT_PREQ1 | PWR_REQ_INPUT_PREQ2 | \
+ PWR_REQ_INPUT_PREQ3)
+
+struct tps80031_regulator {
+
+ /* Regulator register address.*/
+ u8 trans_reg;
+ u8 state_reg;
+ u8 force_reg;
+ u8 volt_reg;
+ u8 volt_id;
+ uint8_t trans_reg_cache;
+ uint8_t state_reg_cache;
+ uint8_t force_reg_cache;
+ uint8_t volt_reg_cache;
+
+ /* twl resource ID, for resource control state machine */
+ u8 id;
+
+ /* chip constraints on regulator behavior */
+ u16 min_mV;
+ u16 max_mV;
+
+ /* regulator specific turn-on delay */
+ int delay;
+
+ u8 flags;
+ unsigned int platform_flags;
+ unsigned int ext_ctrl_flag;
+
+ /* used by regulator core */
+ struct regulator_desc desc;
+
+ /* Device */
+ struct device *dev;
+
+ /*Power request bits */
+ int preq_bit;
+};
+
+static inline struct device *to_tps80031_dev(struct regulator_dev *rdev)
+{
+ return rdev_get_dev(rdev)->parent->parent;
+}
+
+static int tps80031_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->delay;
+}
+
+static u8 tps80031_get_smps_offset(struct device *parent)
+{
+ u8 value;
+ int ret;
+
+ ret = tps80031_read(parent, SLAVE_ID1, PMC_SMPS_OFFSET_ADD, &value);
+ if (ret < 0) {
+ dev_err(parent, "Error in reading smps offset register\n");
+ return 0;
+ }
+ return value;
+}
+
+static u8 tps80031_get_smps_mult(struct device *parent)
+{
+ u8 value;
+ int ret;
+
+ ret = tps80031_read(parent, SLAVE_ID1, PMC_SMPS_MULT_ADD, &value);
+ if (ret < 0) {
+ dev_err(parent, "Error in reading smps mult register\n");
+ return 0;
+ }
+ return value;
+}
+
+static int tps80031_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+
+ if (ri->ext_ctrl_flag & EXT_PWR_REQ)
+ return true;
+ return ((ri->state_reg_cache & STATE_MASK) == STATE_ON);
+}
+
+static int tps80031_reg_enable(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret;
+ uint8_t reg_val;
+
+ if (ri->ext_ctrl_flag & EXT_PWR_REQ)
+ return 0;
+
+ reg_val = (ri->state_reg_cache & ~STATE_MASK) |
+ (STATE_ON & STATE_MASK);
+ ret = tps80031_write(parent, SLAVE_ID1, ri->state_reg, reg_val);
+ if (ret < 0) {
+ dev_err(&rdev->dev, "Error in writing the STATE register\n");
+ return ret;
+ }
+ ri->state_reg_cache = reg_val;
+ udelay(ri->delay);
+ return ret;
+}
+
+static int tps80031_reg_disable(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret;
+ uint8_t reg_val;
+
+ if (ri->ext_ctrl_flag & EXT_PWR_REQ)
+ return 0;
+
+ reg_val = (ri->state_reg_cache & ~STATE_MASK) |
+ (STATE_OFF & STATE_MASK);
+ ret = tps80031_write(parent, SLAVE_ID1, ri->state_reg, reg_val);
+ if (ret < 0)
+ dev_err(&rdev->dev, "Error in writing the STATE register\n");
+ else
+ ri->state_reg_cache = reg_val;
+ return ret;
+}
+
+/*
+ * DCDC status and control
+ */
+static int tps80031dcdc_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ int voltage = 0;
+
+ switch (ri->flags) {
+ case 0:
+ if (index == 0)
+ voltage = 0;
+ else if (index < 58)
+ voltage = (607700 + (12660 * (index - 1)));
+ else if (index == 58)
+ voltage = 1350 * 1000;
+ else if (index == 59)
+ voltage = 1500 * 1000;
+ else if (index == 60)
+ voltage = 1800 * 1000;
+ else if (index == 61)
+ voltage = 1900 * 1000;
+ else if (index == 62)
+ voltage = 2100 * 1000;
+ break;
+
+ case DCDC_OFFSET_EN:
+ if (index == 0)
+ voltage = 0;
+ else if (index < 58)
+ voltage = (700000 + (12500 * (index - 1)));
+ else if (index == 58)
+ voltage = 1350 * 1000;
+ else if (index == 59)
+ voltage = 1500 * 1000;
+ else if (index == 60)
+ voltage = 1800 * 1000;
+ else if (index == 61)
+ voltage = 1900 * 1000;
+ else if (index == 62)
+ voltage = 2100 * 1000;
+ break;
+
+ case DCDC_EXTENDED_EN:
+ if (index == 0)
+ voltage = 0;
+ else if (index < 58)
+ voltage = (1852000 + (38600 * (index - 1)));
+ else if (index == 58)
+ voltage = 2084 * 1000;
+ else if (index == 59)
+ voltage = 2315 * 1000;
+ else if (index == 60)
+ voltage = 2778 * 1000;
+ else if (index == 61)
+ voltage = 2932 * 1000;
+ else if (index == 62)
+ voltage = 3241 * 1000;
+ break;
+
+ case DCDC_OFFSET_EN|DCDC_EXTENDED_EN:
+ if (index == 0)
+ voltage = 0;
+ else if (index < 58)
+ voltage = (2161000 + (38600 * (index - 1)));
+ else if (index == 58)
+ voltage = 4167 * 1000;
+ else if (index == 59)
+ voltage = 2315 * 1000;
+ else if (index == 60)
+ voltage = 2778 * 1000;
+ else if (index == 61)
+ voltage = 2932 * 1000;
+ else if (index == 62)
+ voltage = 3241 * 1000;
+ break;
+ }
+
+ return voltage;
+}
+
+static int __tps80031_dcdc_set_voltage(struct device *parent,
+ struct tps80031_regulator *ri,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int vsel = 0;
+ int ret;
+
+ switch (ri->flags) {
+ case 0:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 607700) && (max_uV <= 1300000)) {
+ vsel = (10 * (min_uV - 607700)) / 1266;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ } else if ((min_uV > 1900000) && (max_uV >= 2100000))
+ vsel = 62;
+ else if ((min_uV > 1800000) && (max_uV >= 1900000))
+ vsel = 61;
+ else if ((min_uV > 1500000) && (max_uV >= 1800000))
+ vsel = 60;
+ else if ((min_uV > 1350000) && (max_uV >= 1500000))
+ vsel = 59;
+ else if ((min_uV > 1300000) && (max_uV >= 1350000))
+ vsel = 58;
+ else
+ return -EINVAL;
+ break;
+
+ case DCDC_OFFSET_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 700000) && (max_uV <= 1420000)) {
+ vsel = (min_uV - 600000) / 125;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ } else if ((min_uV > 1900000) && (max_uV >= 2100000))
+ vsel = 62;
+ else if ((min_uV > 1800000) && (max_uV >= 1900000))
+ vsel = 61;
+ else if ((min_uV > 1350000) && (max_uV >= 1800000))
+ vsel = 60;
+ else if ((min_uV > 1350000) && (max_uV >= 1500000))
+ vsel = 59;
+ else if ((min_uV > 1300000) && (max_uV >= 1350000))
+ vsel = 58;
+ else
+ return -EINVAL;
+ break;
+
+ case DCDC_EXTENDED_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
+ vsel = (min_uV - 1852000) / 386;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ break;
+
+ case DCDC_OFFSET_EN|DCDC_EXTENDED_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 2161000) && (max_uV <= 4321000)) {
+ vsel = (min_uV - 1852000) / 386;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ break;
+ }
+
+ if (selector)
+ *selector = vsel;
+
+ if (ri->force_reg) {
+ if (((ri->force_reg_cache >> 6) & 0x3) == 0) {
+ ret = tps80031_write(parent, ri->volt_id,
+ ri->force_reg, vsel);
+ if (ret < 0)
+ dev_err(ri->dev, "Error in writing the "
+ "force register\n");
+ else
+ ri->force_reg_cache = vsel;
+ return ret;
+ }
+ }
+ ret = tps80031_write(parent, ri->volt_id, ri->volt_reg, vsel);
+ if (ret < 0)
+ dev_err(ri->dev, "Error in writing the Voltage register\n");
+ else
+ ri->volt_reg_cache = vsel;
+ return ret;
+}
+
+static int tps80031dcdc_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ return __tps80031_dcdc_set_voltage(parent, ri, min_uV, max_uV,
+ selector);
+}
+
+static int tps80031dcdc_get_voltage(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ uint8_t vsel = 0;
+ int voltage = 0;
+
+ if (ri->force_reg) {
+ vsel = ri->force_reg_cache;
+ if ((vsel & SMPS_CMD_MASK) == 0)
+ goto decode;
+ }
+
+ vsel = ri->volt_reg_cache;
+
+decode:
+ vsel &= SMPS_VSEL_MASK;
+
+ switch (ri->flags) {
+ case 0:
+ if (vsel == 0)
+ voltage = 0;
+ else if (vsel < 58)
+ voltage = (607700 + (12660 * (vsel - 1)));
+ else if (vsel == 58)
+ voltage = 1350 * 1000;
+ else if (vsel == 59)
+ voltage = 1500 * 1000;
+ else if (vsel == 60)
+ voltage = 1800 * 1000;
+ else if (vsel == 61)
+ voltage = 1900 * 1000;
+ else if (vsel == 62)
+ voltage = 2100 * 1000;
+ break;
+
+ case DCDC_OFFSET_EN:
+ if (vsel == 0)
+ voltage = 0;
+ else if (vsel < 58)
+ voltage = (700000 + (12500 * (vsel - 1)));
+ else if (vsel == 58)
+ voltage = 1350 * 1000;
+ else if (vsel == 59)
+ voltage = 1500 * 1000;
+ else if (vsel == 60)
+ voltage = 1800 * 1000;
+ else if (vsel == 61)
+ voltage = 1900 * 1000;
+ else if (vsel == 62)
+ voltage = 2100 * 1000;
+ break;
+
+ case DCDC_EXTENDED_EN:
+ if (vsel == 0)
+ voltage = 0;
+ else if (vsel < 58)
+ voltage = (1852000 + (38600 * (vsel - 1)));
+ else if (vsel == 58)
+ voltage = 2084 * 1000;
+ else if (vsel == 59)
+ voltage = 2315 * 1000;
+ else if (vsel == 60)
+ voltage = 2778 * 1000;
+ else if (vsel == 61)
+ voltage = 2932 * 1000;
+ else if (vsel == 62)
+ voltage = 3241 * 1000;
+ break;
+
+ case DCDC_EXTENDED_EN|DCDC_OFFSET_EN:
+ if (vsel == 0)
+ voltage = 0;
+ else if (vsel < 58)
+ voltage = (2161000 + (38600 * (vsel - 1)));
+ else if (vsel == 58)
+ voltage = 4167 * 1000;
+ else if (vsel == 59)
+ voltage = 2315 * 1000;
+ else if (vsel == 60)
+ voltage = 2778 * 1000;
+ else if (vsel == 61)
+ voltage = 2932 * 1000;
+ else if (vsel == 62)
+ voltage = 3241 * 1000;
+ break;
+ }
+
+ return voltage;
+}
+
+static int tps80031ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+
+ if (index == 0)
+ return 0;
+
+ if ((ri->desc.id == TPS80031_ID_LDO2) &&
+ (ri->flags & TRACK_MODE_ENABLE))
+ return (ri->min_mV + (((index - 1) * 125))/10) * 1000;
+
+ return (ri->min_mV + ((index - 1) * 100)) * 1000;
+}
+
+static int __tps80031_ldo2_set_voltage_track_mode(struct device *parent,
+ struct tps80031_regulator *ri, int min_uV, int max_uV)
+{
+ int vsel = 0;
+ int ret;
+ int nvsel;
+
+ if (min_uV < 600000) {
+ vsel = 0;
+ } else if ((min_uV >= 600000) && (max_uV <= 1300000)) {
+ vsel = (min_uV - 600000) / 125;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Check for valid setting for TPS80031 or TPS80032-ES1.0 */
+ if ((tps80031_get_chip_info(parent) == TPS80031) ||
+ ((tps80031_get_chip_info(parent) == TPS80032) &&
+ (tps80031_get_pmu_version(parent) == 0x0))) {
+ nvsel = vsel & 0x1F;
+ if ((nvsel == 0x0) || (nvsel >= 0x19 && nvsel <= 0x1F)) {
+ dev_err(ri->dev, "Invalid value for track mode LDO2 "
+ "configuration for TPS8003x PMU\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = tps80031_write(parent, ri->volt_id, ri->volt_reg, vsel);
+ if (ret < 0)
+ dev_err(ri->dev, "Error in writing the Voltage register\n");
+ else
+ ri->volt_reg_cache = vsel;
+ return ret;
+}
+
+
+static int __tps80031_ldo_set_voltage(struct device *parent,
+ struct tps80031_regulator *ri,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ int vsel;
+ int ret;
+
+ if ((min_uV/1000 < ri->min_mV) || (max_uV/1000 > ri->max_mV))
+ return -EDOM;
+
+ if ((ri->desc.id == TPS80031_ID_LDO2) &&
+ (ri->flags & TRACK_MODE_ENABLE))
+ return __tps80031_ldo2_set_voltage_track_mode(parent, ri,
+ min_uV, max_uV);
+
+ /*
+ * Use the below formula to calculate vsel
+ * mV = 1000mv + 100mv * (vsel - 1)
+ */
+ vsel = (min_uV/1000 - 1000)/100 + 1;
+ if (selector)
+ *selector = vsel;
+ ret = tps80031_write(parent, ri->volt_id, ri->volt_reg, vsel);
+ if (ret < 0)
+ dev_err(ri->dev, "Error in writing the Voltage register\n");
+ else
+ ri->volt_reg_cache = vsel;
+ return ret;
+}
+
+static int tps80031ldo_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+
+ return __tps80031_ldo_set_voltage(parent, ri, min_uV, max_uV,
+ selector);
+}
+
+static int tps80031ldo_get_voltage(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ uint8_t vsel;
+
+
+ if ((ri->desc.id == TPS80031_ID_LDO2) &&
+ (ri->flags & TRACK_MODE_ENABLE)) {
+ vsel = ri->volt_reg_cache & 0x3F;
+ return (ri->min_mV + (((vsel - 1) * 125))/10) * 1000;
+ }
+
+ vsel = ri->volt_reg_cache & LDO_VSEL_MASK;
+ /*
+ * Use the below formula to calculate vsel
+ * mV = 1000mv + 100mv * (vsel - 1)
+ */
+ return (1000 + (100 * (vsel - 1))) * 1000;
+}
+
+/* VBUS */
+static int tps80031_vbus_enable_time(struct regulator_dev *rdev)
+{
+ /* Enable and settling time for vbus is 3ms */
+ return 3000;
+}
+static int tps80031_vbus_is_enabled(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ uint8_t ctrl1, ctrl3;
+ int ret;
+
+ if (ri->platform_flags & VBUS_SW_ONLY) {
+ ret = tps80031_read(parent, SLAVE_ID2,
+ CHARGERUSB_CTRL1_ADD, &ctrl1);
+ if (!ret)
+ ret = tps80031_read(parent, SLAVE_ID2,
+ CHARGERUSB_CTRL3_ADD, &ctrl3);
+ if (ret < 0) {
+ dev_err(&rdev->dev, "Error in reading control reg\n");
+ return ret;
+ }
+ if ((ctrl1 & OPA_MODE_EN) && (ctrl3 & BOOST_HW_PWR_EN))
+ return 1;
+ return 0;
+ } else {
+ return -EIO;
+ }
+}
+
+static int tps80031_vbus_enable(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret;
+
+ if (ri->platform_flags & VBUS_SW_ONLY) {
+ ret = tps80031_set_bits(parent, SLAVE_ID2,
+ CHARGERUSB_CTRL1_ADD, OPA_MODE_EN);
+ if (!ret)
+ ret = tps80031_set_bits(parent, SLAVE_ID2,
+ CHARGERUSB_CTRL3_ADD, BOOST_HW_PWR_EN);
+ if (ret < 0) {
+ dev_err(&rdev->dev, "Error in reading control reg\n");
+ return ret;
+ }
+ udelay(ri->delay);
+ return ret;
+ }
+ dev_err(&rdev->dev, "%s() is not supported with flag 0x%08x\n",
+ __func__, ri->platform_flags);
+ return -EIO;
+}
+
+static int tps80031_vbus_disable(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret = 0;
+
+ if (ri->platform_flags & VBUS_SW_ONLY) {
+
+ if (ri->platform_flags & VBUS_DISCHRG_EN_PDN)
+ ret = tps80031_write(parent, SLAVE_ID2,
+ USB_VBUS_CTRL_SET, VBUS_DISCHRG);
+ if (!ret)
+ ret = tps80031_clr_bits(parent, SLAVE_ID2,
+ CHARGERUSB_CTRL1_ADD, OPA_MODE_EN);
+ if (!ret)
+ ret = tps80031_clr_bits(parent, SLAVE_ID2,
+ CHARGERUSB_CTRL3_ADD, BOOST_HW_PWR_EN);
+ if (!ret)
+ mdelay((ri->delay + 999)/1000);
+
+ if (ri->platform_flags & VBUS_DISCHRG_EN_PDN)
+ tps80031_write(parent, SLAVE_ID2,
+ USB_VBUS_CTRL_CLR, VBUS_DISCHRG);
+
+ if (ret < 0)
+ dev_err(&rdev->dev, "Error in reading control reg\n");
+ return ret;
+ }
+ dev_err(&rdev->dev, "%s() is not supported with flag 0x%08x\n",
+ __func__, ri->platform_flags);
+ return -EIO;
+}
+
+static int tps80031vbus_get_voltage(struct regulator_dev *rdev)
+{
+ int ret;
+ ret = tps80031_vbus_is_enabled(rdev);
+ if (ret > 0)
+ return 5000000;
+ return ret;
+}
+
+static struct regulator_ops tps80031dcdc_ops = {
+ .list_voltage = tps80031dcdc_list_voltage,
+ .set_voltage = tps80031dcdc_set_voltage,
+ .get_voltage = tps80031dcdc_get_voltage,
+ .enable = tps80031_reg_enable,
+ .disable = tps80031_reg_disable,
+ .is_enabled = tps80031_reg_is_enabled,
+ .enable_time = tps80031_regulator_enable_time,
+};
+
+static struct regulator_ops tps80031ldo_ops = {
+ .list_voltage = tps80031ldo_list_voltage,
+ .set_voltage = tps80031ldo_set_voltage,
+ .get_voltage = tps80031ldo_get_voltage,
+ .enable = tps80031_reg_enable,
+ .disable = tps80031_reg_disable,
+ .is_enabled = tps80031_reg_is_enabled,
+ .enable_time = tps80031_regulator_enable_time,
+};
+
+static struct regulator_ops tps80031vbus_ops = {
+ .get_voltage = tps80031vbus_get_voltage,
+ .enable = tps80031_vbus_enable,
+ .disable = tps80031_vbus_disable,
+ .is_enabled = tps80031_vbus_is_enabled,
+ .enable_time = tps80031_vbus_enable_time,
+};
+
+#define TPS80031_REG(_id, _trans_reg, _state_reg, _force_reg, _volt_reg, \
+ _volt_id, min_mVolts, max_mVolts, _ops, _n_volt, _delay, \
+ _preq_bit) \
+{ \
+ .trans_reg = _trans_reg, \
+ .state_reg = _state_reg, \
+ .force_reg = _force_reg, \
+ .volt_reg = _volt_reg, \
+ .volt_id = _volt_id, \
+ .id = TPS80031_ID_##_id, \
+ .min_mV = min_mVolts, \
+ .max_mV = max_mVolts, \
+ .desc = { \
+ .name = tps80031_rails(_id), \
+ .id = TPS80031_ID_##_id, \
+ .n_voltages = _n_volt, \
+ .ops = &_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ .delay = _delay, \
+ .preq_bit = _preq_bit, \
+}
+
+static struct tps80031_regulator tps80031_regulator[] = {
+ TPS80031_REG(VIO, 0x47, 0x48, 0x49, 0x4A, SLAVE_ID0, 600, 2100,
+ tps80031dcdc_ops, 63, 500, 4),
+ TPS80031_REG(SMPS1, 0x53, 0x54, 0x55, 0x56, SLAVE_ID0, 600, 2100,
+ tps80031dcdc_ops, 63, 500, 0),
+ TPS80031_REG(SMPS2, 0x59, 0x5A, 0x5B, 0x5C, SLAVE_ID0, 600, 2100,
+ tps80031dcdc_ops, 63, 500, 1),
+ TPS80031_REG(SMPS3, 0x65, 0x66, 0x00, 0x68, SLAVE_ID1, 600, 2100,
+ tps80031dcdc_ops, 63, 500, 2),
+ TPS80031_REG(SMPS4, 0x41, 0x42, 0x00, 0x44, SLAVE_ID1, 600, 2100,
+ tps80031dcdc_ops, 63, 500, 3),
+
+ TPS80031_REG(LDO1, 0x9D, 0x9E, 0x00, 0x9F, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 8),
+ TPS80031_REG(LDO2, 0x85, 0x86, 0x00, 0x87, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 9),
+ TPS80031_REG(LDO3, 0x8D, 0x8E, 0x00, 0x8F, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 10),
+ TPS80031_REG(LDO4, 0x89, 0x8A, 0x00, 0x8B, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 11),
+ TPS80031_REG(LDO5, 0x99, 0x9A, 0x00, 0x9B, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 12),
+ TPS80031_REG(LDO6, 0x91, 0x92, 0x00, 0x93, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 13),
+ TPS80031_REG(LDO7, 0xA5, 0xA6, 0x00, 0xA7, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 14),
+ TPS80031_REG(LDOUSB, 0xA1, 0xA2, 0x00, 0xA3, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 5),
+ TPS80031_REG(LDOLN, 0x95, 0x96, 0x00, 0x97, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, 15),
+ TPS80031_REG(VANA, 0x81, 0x82, 0x00, 0x83, SLAVE_ID1, 1000, 3300,
+ tps80031ldo_ops, 25, 500, -1),
+ TPS80031_REG(VBUS, 0x0, 0x0, 0x00, 0x0, SLAVE_ID1, 0, 5000,
+ tps80031vbus_ops, 2, 200000, -1),
+};
+
+static int tps80031_power_req_config(struct device *parent,
+ struct tps80031_regulator *ri,
+ struct tps80031_regulator_platform_data *tps80031_pdata)
+{
+ int ret;
+ uint8_t reg_val;
+
+ if (ri->preq_bit < 0)
+ return 0;
+
+ ret = tps80031_ext_power_req_config(parent, ri->ext_ctrl_flag,
+ ri->preq_bit, ri->state_reg, ri->trans_reg);
+ if (!ret)
+ ret = tps80031_read(parent, SLAVE_ID1, ri->trans_reg,
+ &ri->trans_reg_cache);
+
+ if (!ret && ri->state_reg)
+ ret = tps80031_read(parent, SLAVE_ID1, ri->state_reg,
+ &ri->state_reg_cache);
+ if (ret < 0) {
+ dev_err(ri->dev, "%s() fails\n", __func__);
+ return ret;
+ }
+
+ if (tps80031_pdata->ext_ctrl_flag &
+ (PWR_OFF_ON_SLEEP | PWR_ON_ON_SLEEP)) {
+ reg_val = (ri->trans_reg_cache & ~0xC);
+ if (tps80031_pdata->ext_ctrl_flag & PWR_ON_ON_SLEEP)
+ reg_val |= 0x4;
+
+ ret = tps80031_write(parent, SLAVE_ID1, ri->trans_reg,
+ reg_val);
+ if (ret < 0)
+ dev_err(ri->dev, "Not able to write reg 0x%02x\n",
+ ri->trans_reg);
+ else
+ ri->trans_reg_cache = reg_val;
+ }
+ return ret;
+}
+
+static int tps80031_regulator_preinit(struct device *parent,
+ struct tps80031_regulator *ri,
+ struct tps80031_regulator_platform_data *tps80031_pdata)
+{
+ int ret = 0;
+ uint8_t reg_val;
+
+ if (ri->desc.id == TPS80031_ID_LDOUSB) {
+ if (ri->platform_flags & USBLDO_INPUT_VSYS)
+ ret = tps80031_update(parent, SLAVE_ID1,
+ TPS80031_MISC2_ADD,
+ MISC2_LDOUSB_IN_VSYS, MISC2_LDOUSB_IN_MASK);
+ if (ri->platform_flags & USBLDO_INPUT_PMID)
+ ret = tps80031_update(parent, SLAVE_ID1,
+ TPS80031_MISC2_ADD,
+ MISC2_LDOUSB_IN_PMID, MISC2_LDOUSB_IN_MASK);
+ if (ret < 0) {
+ dev_err(ri->dev, "Not able to configure the rail "
+ "LDOUSB as per platform data error %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ri->desc.id == TPS80031_ID_LDO3) {
+ if (ri->platform_flags & LDO3_OUTPUT_VIB)
+ ret = tps80031_update(parent, SLAVE_ID1,
+ TPS80031_MISC2_ADD,
+ MISC2_LDO3_SEL_VIB_VAL,
+ MISC2_LDO3_SEL_VIB_MASK);
+ if (ret < 0) {
+ dev_err(ri->dev, "Not able to configure the rail "
+ "LDO3 as per platform data error %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (!tps80031_pdata->init_apply)
+ return 0;
+
+ if (tps80031_pdata->init_uV >= 0) {
+ switch (ri->desc.id) {
+ case TPS80031_ID_VIO:
+ case TPS80031_ID_SMPS1:
+ case TPS80031_ID_SMPS2:
+ case TPS80031_ID_SMPS3:
+ case TPS80031_ID_SMPS4:
+ ret = __tps80031_dcdc_set_voltage(parent, ri,
+ tps80031_pdata->init_uV,
+ tps80031_pdata->init_uV, 0);
+ break;
+
+ case TPS80031_ID_LDO1:
+ case TPS80031_ID_LDO2:
+ case TPS80031_ID_LDO3:
+ case TPS80031_ID_LDO4:
+ case TPS80031_ID_LDO5:
+ case TPS80031_ID_LDO6:
+ case TPS80031_ID_LDO7:
+ case TPS80031_ID_LDOUSB:
+ case TPS80031_ID_LDOLN:
+ case TPS80031_ID_VANA:
+ ret = __tps80031_ldo_set_voltage(parent, ri,
+ tps80031_pdata->init_uV,
+ tps80031_pdata->init_uV, 0);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(ri->dev, "Not able to initialize voltage %d "
+ "for rail %d err %d\n", tps80031_pdata->init_uV,
+ ri->desc.id, ret);
+ return ret;
+ }
+ }
+
+ if (tps80031_pdata->init_enable)
+ reg_val = (ri->state_reg_cache & ~STATE_MASK) |
+ (STATE_ON & STATE_MASK);
+ else
+ reg_val = (ri->state_reg_cache & ~STATE_MASK) |
+ (STATE_OFF & STATE_MASK);
+
+ ret = tps80031_write(parent, SLAVE_ID1, ri->state_reg, reg_val);
+ if (ret < 0)
+ dev_err(ri->dev, "Not able to %s rail %d err %d\n",
+ (tps80031_pdata->init_enable) ? "enable" : "disable",
+ ri->desc.id, ret);
+ else
+ ri->state_reg_cache = reg_val;
+ return ret;
+}
+
+static inline struct tps80031_regulator *find_regulator_info(int id)
+{
+ struct tps80031_regulator *ri;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps80031_regulator); i++) {
+ ri = &tps80031_regulator[i];
+ if (ri->desc.id == id)
+ return ri;
+ }
+ return NULL;
+}
+static void check_smps_mode_mult(struct device *parent,
+ struct tps80031_regulator *ri)
+{
+ int mult_offset;
+ switch (ri->desc.id) {
+ case TPS80031_ID_VIO:
+ mult_offset = SMPS_MULTOFFSET_VIO;
+ break;
+ case TPS80031_ID_SMPS1:
+ mult_offset = SMPS_MULTOFFSET_SMPS1;
+ break;
+ case TPS80031_ID_SMPS2:
+ mult_offset = SMPS_MULTOFFSET_SMPS2;
+ break;
+ case TPS80031_ID_SMPS3:
+ mult_offset = SMPS_MULTOFFSET_SMPS3;
+ break;
+ case TPS80031_ID_SMPS4:
+ mult_offset = SMPS_MULTOFFSET_SMPS4;
+ break;
+ case TPS80031_ID_LDO2:
+ ri->flags = (tps80031_get_smps_mult(parent) & (1 << 5)) ?
+ TRACK_MODE_ENABLE : 0;
+ /* TRACK mode the ldo2 varies from 600mV to 1300mV */
+ if (ri->flags & TRACK_MODE_ENABLE) {
+ ri->min_mV = 600;
+ ri->max_mV = 1300;
+ ri->desc.n_voltages = 57;
+ }
+ return;
+ default:
+ return;
+ }
+
+ ri->flags = (tps80031_get_smps_offset(parent) & mult_offset) ?
+ DCDC_OFFSET_EN : 0;
+ ri->flags |= (tps80031_get_smps_mult(parent) & mult_offset) ?
+ DCDC_EXTENDED_EN : 0;
+ return;
+}
+
+static inline int tps80031_cache_regulator_register(struct device *parent,
+ struct tps80031_regulator *ri)
+{
+ int ret;
+
+ ret = tps80031_read(parent, SLAVE_ID1, ri->trans_reg,
+ &ri->trans_reg_cache);
+ if (!ret && ri->state_reg)
+ ret = tps80031_read(parent, SLAVE_ID1, ri->state_reg,
+ &ri->state_reg_cache);
+ if (!ret && ri->force_reg)
+ ret = tps80031_read(parent, ri->volt_id, ri->force_reg,
+ &ri->force_reg_cache);
+ if (!ret && ri->volt_reg)
+ ret = tps80031_read(parent, ri->volt_id, ri->volt_reg,
+ &ri->volt_reg_cache);
+ return ret;
+}
+
+static int __devinit tps80031_regulator_probe(struct platform_device *pdev)
+{
+ struct tps80031_regulator *ri = NULL;
+ struct regulator_dev *rdev;
+ struct tps80031_regulator_platform_data *tps_pdata;
+ int id = pdev->id;
+ int err;
+
+ dev_dbg(&pdev->dev, "Probing reulator %d\n", id);
+
+ ri = find_regulator_info(id);
+ if (ri == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+ }
+ tps_pdata = pdev->dev.platform_data;
+ ri->dev = &pdev->dev;
+ if (tps_pdata->delay_us > 0)
+ ri->delay = tps_pdata->delay_us;
+
+ check_smps_mode_mult(pdev->dev.parent, ri);
+ ri->platform_flags = tps_pdata->flags;
+ ri->ext_ctrl_flag = tps_pdata->ext_ctrl_flag;
+
+ err = tps80031_cache_regulator_register(pdev->dev.parent, ri);
+ if (err) {
+ dev_err(&pdev->dev, "Register access for caching is failed\n");
+ return err;
+ }
+ err = tps80031_regulator_preinit(pdev->dev.parent, ri, tps_pdata);
+ if (err)
+ return err;
+
+ err = tps80031_power_req_config(pdev->dev.parent, ri, tps_pdata);
+ if (err)
+ return err;
+
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ &tps_pdata->regulator, ri);
+ if (IS_ERR_OR_NULL(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static int __devexit tps80031_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver tps80031_regulator_driver = {
+ .driver = {
+ .name = "tps80031-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_regulator_probe,
+ .remove = __devexit_p(tps80031_regulator_remove),
+};
+
+static int __init tps80031_regulator_init(void)
+{
+ return platform_driver_register(&tps80031_regulator_driver);
+}
+subsys_initcall(tps80031_regulator_init);
+
+static void __exit tps80031_regulator_exit(void)
+{
+ platform_driver_unregister(&tps80031_regulator_driver);
+}
+module_exit(tps80031_regulator_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Regulator Driver for TI TPS80031 PMIC");
+MODULE_ALIAS("platform:tps80031-regulator");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 5a538fc1cc85..45f4c8040845 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -106,6 +106,24 @@ config RTC_INTF_DEV_UIE_EMUL
clock several times per second, please enable this option
only if you know that you really need it.
+config RTC_INTF_ALARM
+ bool "Android alarm driver"
+ depends on RTC_CLASS
+ default y
+ help
+ Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+ elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+ Also provides an interface to set the wall time which must be used
+ for elapsed realtime to work.
+
+config RTC_INTF_ALARM_DEV
+ bool "Android alarm device"
+ depends on RTC_INTF_ALARM
+ default y
+ help
+ Exports the alarm interface to user-space.
+
+
config RTC_DRV_TEST
tristate "Test driver/device"
help
@@ -213,6 +231,26 @@ config RTC_DRV_MAX8998
This driver can also be built as a module. If so, the module
will be called rtc-max8998.
+config RTC_DRV_MAX8907C
+ tristate "Maxim MAX8907C"
+ depends on MFD_MAX8907C
+ help
+ If you say yes here you will get support for the
+ RTC of Maxim MAX8907C PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max8907c.
+
+config RTC_DRV_MAX77663
+ tristate "Maxim MAX77663"
+ depends on MFD_MAX77663
+ help
+ If you say yes here you will get support for the
+ RTC of Maxim MAX77663 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max77663.
+
config RTC_DRV_RS5C372
tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
help
@@ -304,6 +342,12 @@ config RTC_DRV_DM355EVM
help
Supports the RTC firmware in the MSP430 on the DM355 EVM.
+config RTC_DRV_TPS6586X
+ tristate "TI TPS6586X RTC"
+ depends on MFD_TPS6586X
+ help
+ This driver supports TPS6586X RTC
+
config RTC_DRV_TWL92330
boolean "TI TWL92330/Menelaus"
depends on MENELAUS
@@ -1048,8 +1092,7 @@ config RTC_DRV_TEGRA
tristate "NVIDIA Tegra Internal RTC driver"
depends on RTC_CLASS && ARCH_TEGRA
help
- If you say yes here you get support for the
- Tegra 200 series internal RTC module.
+ If you say yes here you get support for the Tegra internal RTC module.
This drive can also be built as a module. If so, the module
will be called rtc-tegra.
@@ -1061,6 +1104,26 @@ config RTC_DRV_TILE
Enable support for the Linux driver side of the Tilera
hypervisor's real-time clock interface.
+config RTC_DRV_TPS6591x
+ tristate "TPS6591x RTC driver"
+ depends on MFD_TPS6591X
+ default n
+ help
+ If you say yes here you get support for the TPS6591x RTC module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-tps6591x.
+
+config RTC_DRV_TPS80031
+ tristate "TPS80031 RTC driver"
+ depends on MFD_TPS80031
+ default n
+ help
+ If you say yes here you get support for the TPS80031 RTC module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-tps6591x.
+
config RTC_DRV_PUV3
tristate "PKUnity v3 RTC support"
depends on ARCH_PUV3
@@ -1070,4 +1133,14 @@ config RTC_DRV_PUV3
This drive can also be built as a module. If so, the module
will be called rtc-puv3.
+config RTC_DRV_RC5T583
+ tristate "RICOH RC5T583 PMU RTC driver"
+ depends on MFD_RICOH583
+ default n
+ help
+ If you say yes here you get support for the RICOH RC5T583 RTC module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rc5t583.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 6e6982335c10..be5b16284db0 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
rtc-core-y := class.o interface.o
+obj-$(CONFIG_RTC_INTF_ALARM) += alarm.o
+obj-$(CONFIG_RTC_INTF_ALARM_DEV) += alarm-dev.o
rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
@@ -63,8 +65,10 @@ obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
+obj-$(CONFIG_RTC_DRV_MAX8907C) += rtc-max8907c.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
obj-$(CONFIG_RTC_DRV_MC13XXX) += rtc-mc13xxx.o
+obj-$(CONFIG_RTC_DRV_MAX77663) += rtc-max77663.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
@@ -101,6 +105,10 @@ obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
obj-$(CONFIG_RTC_DRV_TILE) += rtc-tile.o
+obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
+obj-$(CONFIG_RTC_DRV_TPS6591x) += rtc-tps6591x.o
+obj-$(CONFIG_RTC_DRV_TPS80031) += rtc-tps80031.o
+obj-$(CONFIG_RTC_DRV_RC5T583) += rtc-ricoh583.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
diff --git a/drivers/rtc/alarm-dev.c b/drivers/rtc/alarm-dev.c
new file mode 100644
index 000000000000..686e6f7ed480
--- /dev/null
+++ b/drivers/rtc/alarm-dev.c
@@ -0,0 +1,286 @@
+/* drivers/rtc/alarm-dev.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/mach/time.h>
+#include <linux/android_alarm.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/uaccess.h>
+#include <linux/wakelock.h>
+
+#define ANDROID_ALARM_PRINT_INFO (1U << 0)
+#define ANDROID_ALARM_PRINT_IO (1U << 1)
+#define ANDROID_ALARM_PRINT_INT (1U << 2)
+
+static int debug_mask = ANDROID_ALARM_PRINT_INFO;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static struct wake_lock alarm_wake_lock;
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+
+static struct alarm alarms[ANDROID_ALARM_TYPE_COUNT];
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int rv = 0;
+ unsigned long flags;
+ struct timespec new_alarm_time;
+ struct timespec new_rtc_time;
+ struct timespec tmp_time;
+ enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+ uint32_t alarm_type_mask = 1U << alarm_type;
+
+ if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+ return -EINVAL;
+
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+ if (file->private_data == NULL &&
+ cmd != ANDROID_ALARM_SET_RTC) {
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_opened) {
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return -EBUSY;
+ }
+ alarm_opened = 1;
+ file->private_data = (void *)1;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_CLEAR(0):
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d clear\n", alarm_type);
+ alarm_try_to_cancel(&alarms[alarm_type]);
+ if (alarm_pending) {
+ alarm_pending &= ~alarm_type_mask;
+ if (!alarm_pending && !wait_pending)
+ wake_unlock(&alarm_wake_lock);
+ }
+ alarm_enabled &= ~alarm_type_mask;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+
+ case ANDROID_ALARM_SET_OLD:
+ case ANDROID_ALARM_SET_AND_WAIT_OLD:
+ if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ new_alarm_time.tv_nsec = 0;
+ goto from_old_alarm_set;
+
+ case ANDROID_ALARM_SET_AND_WAIT(0):
+ case ANDROID_ALARM_SET(0):
+ if (copy_from_user(&new_alarm_time, (void __user *)arg,
+ sizeof(new_alarm_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+from_old_alarm_set:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
+ new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
+ alarm_enabled |= alarm_type_mask;
+ alarm_start_range(&alarms[alarm_type],
+ timespec_to_ktime(new_alarm_time),
+ timespec_to_ktime(new_alarm_time));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
+ && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
+ break;
+ /* fall though */
+ case ANDROID_ALARM_WAIT:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm wait\n");
+ if (!alarm_pending && wait_pending) {
+ wake_unlock(&alarm_wake_lock);
+ wait_pending = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+ if (rv)
+ goto err1;
+ spin_lock_irqsave(&alarm_slock, flags);
+ rv = alarm_pending;
+ wait_pending = 1;
+ alarm_pending = 0;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+ case ANDROID_ALARM_SET_RTC:
+ if (copy_from_user(&new_rtc_time, (void __user *)arg,
+ sizeof(new_rtc_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ rv = alarm_set_rtc(new_rtc_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+ wake_up(&alarm_wait_queue);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (rv < 0)
+ goto err1;
+ break;
+ case ANDROID_ALARM_GET_TIME(0):
+ switch (alarm_type) {
+ case ANDROID_ALARM_RTC_WAKEUP:
+ case ANDROID_ALARM_RTC:
+ getnstimeofday(&tmp_time);
+ break;
+ case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+ case ANDROID_ALARM_ELAPSED_REALTIME:
+ tmp_time =
+ ktime_to_timespec(alarm_get_elapsed_realtime());
+ break;
+ case ANDROID_ALARM_TYPE_COUNT:
+ case ANDROID_ALARM_SYSTEMTIME:
+ ktime_get_ts(&tmp_time);
+ break;
+ }
+ if (copy_to_user((void __user *)arg, &tmp_time,
+ sizeof(tmp_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto err1;
+ }
+err1:
+ return rv;
+}
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (file->private_data != 0) {
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+ uint32_t alarm_type_mask = 1U << i;
+ if (alarm_enabled & alarm_type_mask) {
+ pr_alarm(INFO, "alarm_release: clear alarm, "
+ "pending %d\n",
+ !!(alarm_pending & alarm_type_mask));
+ alarm_enabled &= ~alarm_type_mask;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ alarm_cancel(&alarms[i]);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (alarm_pending | wait_pending) {
+ if (alarm_pending)
+ pr_alarm(INFO, "alarm_release: clear "
+ "pending alarms %x\n", alarm_pending);
+ wake_unlock(&alarm_wake_lock);
+ wait_pending = 0;
+ alarm_pending = 0;
+ }
+ alarm_opened = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static void alarm_triggered(struct alarm *alarm)
+{
+ unsigned long flags;
+ uint32_t alarm_type_mask = 1U << alarm->type;
+
+ pr_alarm(INT, "alarm_triggered type %d\n", alarm->type);
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_enabled & alarm_type_mask) {
+ wake_lock_timeout(&alarm_wake_lock, 5 * HZ);
+ alarm_enabled &= ~alarm_type_mask;
+ alarm_pending |= alarm_type_mask;
+ wake_up(&alarm_wait_queue);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static const struct file_operations alarm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = alarm_ioctl,
+ .open = alarm_open,
+ .release = alarm_release,
+};
+
+static struct miscdevice alarm_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "alarm",
+ .fops = &alarm_fops,
+};
+
+static int __init alarm_dev_init(void)
+{
+ int err;
+ int i;
+
+ err = misc_register(&alarm_device);
+ if (err)
+ return err;
+
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++)
+ alarm_init(&alarms[i], i, alarm_triggered);
+ wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm");
+
+ return 0;
+}
+
+static void __exit alarm_dev_exit(void)
+{
+ misc_deregister(&alarm_device);
+ wake_lock_destroy(&alarm_wake_lock);
+}
+
+module_init(alarm_dev_init);
+module_exit(alarm_dev_exit);
+
diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c
new file mode 100644
index 000000000000..28b0df836a30
--- /dev/null
+++ b/drivers/rtc/alarm.c
@@ -0,0 +1,590 @@
+/* drivers/rtc/alarm.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/mach/time.h>
+#include <linux/android_alarm.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/wakelock.h>
+
+#define ANDROID_ALARM_PRINT_ERROR (1U << 0)
+#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
+#define ANDROID_ALARM_PRINT_TSET (1U << 2)
+#define ANDROID_ALARM_PRINT_CALL (1U << 3)
+#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4)
+#define ANDROID_ALARM_PRINT_INT (1U << 5)
+#define ANDROID_ALARM_PRINT_FLOW (1U << 6)
+
+static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \
+ ANDROID_ALARM_PRINT_INIT_STATUS;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+struct alarm_queue {
+ struct rb_root alarms;
+ struct rb_node *first;
+ struct hrtimer timer;
+ ktime_t delta;
+ bool stopped;
+ ktime_t stopped_time;
+};
+
+static struct rtc_device *alarm_rtc_dev;
+static DEFINE_SPINLOCK(alarm_slock);
+static DEFINE_MUTEX(alarm_setrtc_mutex);
+static struct wake_lock alarm_rtc_wake_lock;
+static struct platform_device *alarm_platform_dev;
+struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
+static bool suspended;
+
+static void update_timer_locked(struct alarm_queue *base, bool head_removed)
+{
+ struct alarm *alarm;
+ bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] ||
+ base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+
+ if (base->stopped) {
+ pr_alarm(FLOW, "changed alarm while setting the wall time\n");
+ return;
+ }
+
+ if (is_wakeup && !suspended && head_removed)
+ wake_unlock(&alarm_rtc_wake_lock);
+
+ if (!base->first)
+ return;
+
+ alarm = container_of(base->first, struct alarm, node);
+
+ pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (is_wakeup && suspended) {
+ pr_alarm(FLOW, "changed alarm while suspened\n");
+ wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+ return;
+ }
+
+ hrtimer_try_to_cancel(&base->timer);
+ base->timer.node.expires = ktime_add(base->delta, alarm->expires);
+ base->timer._softexpires = ktime_add(base->delta, alarm->softexpires);
+ hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS);
+}
+
+static void alarm_enqueue_locked(struct alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ struct rb_node **link = &base->alarms.rb_node;
+ struct rb_node *parent = NULL;
+ struct alarm *entry;
+ int leftmost = 1;
+ bool was_first = false;
+
+ pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ was_first = true;
+ }
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ }
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct alarm, node);
+ /*
+ * We dont care about collisions. Nodes with
+ * the same expiry time stay together.
+ */
+ if (alarm->expires.tv64 < entry->expires.tv64) {
+ link = &(*link)->rb_left;
+ } else {
+ link = &(*link)->rb_right;
+ leftmost = 0;
+ }
+ }
+ if (leftmost)
+ base->first = &alarm->node;
+ if (leftmost || was_first)
+ update_timer_locked(base, was_first);
+
+ rb_link_node(&alarm->node, parent, link);
+ rb_insert_color(&alarm->node, &base->alarms);
+}
+
+/**
+ * alarm_init - initialize an alarm
+ * @alarm: the alarm to be initialized
+ * @type: the alarm type to be used
+ * @function: alarm callback function
+ */
+void alarm_init(struct alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct alarm *))
+{
+ RB_CLEAR_NODE(&alarm->node);
+ alarm->type = type;
+ alarm->function = function;
+
+ pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function);
+}
+
+
+/**
+ * alarm_start_range - (re)start an alarm
+ * @alarm: the alarm to be added
+ * @start: earliest expiry time
+ * @end: expiry time
+ */
+void alarm_start_range(struct alarm *alarm, ktime_t start, ktime_t end)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm->softexpires = start;
+ alarm->expires = end;
+ alarm_enqueue_locked(alarm);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+/**
+ * alarm_try_to_cancel - try to deactivate an alarm
+ * @alarm: alarm to stop
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ * -1 when the alarm may currently be excuting the callback function and
+ * cannot be stopped (it may also be inactive)
+ */
+int alarm_try_to_cancel(struct alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ unsigned long flags;
+ bool first = false;
+ int ret = 0;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires));
+ ret = 1;
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ first = true;
+ }
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ if (first)
+ update_timer_locked(base, true);
+ } else
+ pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n",
+ alarm->type, alarm->function);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (!ret && hrtimer_callback_running(&base->timer))
+ ret = -1;
+ return ret;
+}
+
+/**
+ * alarm_cancel - cancel an alarm and wait for the handler to finish.
+ * @alarm: the alarm to be cancelled
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ */
+int alarm_cancel(struct alarm *alarm)
+{
+ for (;;) {
+ int ret = alarm_try_to_cancel(alarm);
+ if (ret >= 0)
+ return ret;
+ cpu_relax();
+ }
+}
+
+/**
+ * alarm_set_rtc - set the kernel and rtc walltime
+ * @new_time: timespec value containing the new time
+ */
+int alarm_set_rtc(struct timespec new_time)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ struct rtc_time rtc_new_rtc_time;
+ struct timespec tmp_time;
+
+ rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time);
+
+ pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
+ new_time.tv_sec, new_time.tv_nsec,
+ rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
+ rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
+ rtc_new_rtc_time.tm_mday,
+ rtc_new_rtc_time.tm_year + 1900);
+
+ mutex_lock(&alarm_setrtc_mutex);
+ spin_lock_irqsave(&alarm_slock, flags);
+ wake_lock(&alarm_rtc_wake_lock);
+ getnstimeofday(&tmp_time);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_try_to_cancel(&alarms[i].timer);
+ alarms[i].stopped = true;
+ alarms[i].stopped_time = timespec_to_ktime(tmp_time);
+ }
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
+ timespec_to_ktime(timespec_sub(tmp_time, new_time)));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ ret = do_settimeofday(&new_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ alarms[i].stopped = false;
+ update_timer_locked(&alarms[i], false);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ret < 0) {
+ pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n");
+ goto err;
+ }
+ if (!alarm_rtc_dev) {
+ pr_alarm(ERROR,
+ "alarm_set_rtc: no RTC, time will be lost on reboot\n");
+ goto err;
+ }
+ ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
+ if (ret < 0)
+ pr_alarm(ERROR, "alarm_set_rtc: "
+ "Failed to set RTC, time will be lost on reboot\n");
+err:
+ wake_unlock(&alarm_rtc_wake_lock);
+ mutex_unlock(&alarm_setrtc_mutex);
+ return ret;
+}
+
+/**
+ * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+ktime_t alarm_get_elapsed_realtime(void)
+{
+ ktime_t now;
+ unsigned long flags;
+ struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME];
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ now = base->stopped ? base->stopped_time : ktime_get_real();
+ now = ktime_sub(now, base->delta);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return now;
+}
+
+static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer)
+{
+ struct alarm_queue *base;
+ struct alarm *alarm;
+ unsigned long flags;
+ ktime_t now;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+
+ base = container_of(timer, struct alarm_queue, timer);
+ now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer);
+ now = ktime_sub(now, base->delta);
+
+ pr_alarm(INT, "alarm_timer_triggered type %d at %lld\n",
+ base - alarms, ktime_to_ns(now));
+
+ while (base->first) {
+ alarm = container_of(base->first, struct alarm, node);
+ if (alarm->softexpires.tv64 > now.tv64) {
+ pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n",
+ alarm->function, ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ break;
+ }
+ base->first = rb_next(&alarm->node);
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ alarm->function(alarm);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (!base->first)
+ pr_alarm(FLOW, "no more alarms of type %d\n", base - alarms);
+ update_timer_locked(base, true);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return HRTIMER_NORESTART;
+}
+
+static void alarm_triggered_func(void *p)
+{
+ struct rtc_device *rtc = alarm_rtc_dev;
+ if (!(rtc->irq_data & RTC_AF))
+ return;
+ pr_alarm(INT, "rtc alarm triggered\n");
+ wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+}
+
+static int alarm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int err = 0;
+ unsigned long flags;
+ struct rtc_wkalrm rtc_alarm;
+ struct rtc_time rtc_current_rtc_time;
+ unsigned long rtc_current_time;
+ unsigned long rtc_alarm_time;
+ struct timespec rtc_delta;
+ struct timespec wall_time;
+ struct alarm_queue *wakeup_queue = NULL;
+ struct alarm_queue *tmp_queue = NULL;
+
+ pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = true;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer);
+ hrtimer_cancel(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].timer);
+
+ tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP];
+ if (tmp_queue->first)
+ wakeup_queue = tmp_queue;
+ tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+ if (tmp_queue->first && (!wakeup_queue ||
+ hrtimer_get_expires(&tmp_queue->timer).tv64 <
+ hrtimer_get_expires(&wakeup_queue->timer).tv64))
+ wakeup_queue = tmp_queue;
+ if (wakeup_queue) {
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ getnstimeofday(&wall_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ set_normalized_timespec(&rtc_delta,
+ wall_time.tv_sec - rtc_current_time,
+ wall_time.tv_nsec);
+
+ rtc_alarm_time = timespec_sub(ktime_to_timespec(
+ hrtimer_get_expires(&wakeup_queue->timer)),
+ rtc_delta).tv_sec;
+
+ rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
+ rtc_alarm.enabled = 1;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ pr_alarm(SUSPEND,
+ "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
+ rtc_alarm_time, rtc_current_time,
+ rtc_delta.tv_sec, rtc_delta.tv_nsec);
+ if (rtc_current_time + 1 >= rtc_alarm_time) {
+ pr_alarm(SUSPEND, "alarm about to go off\n");
+ memset(&rtc_alarm, 0, sizeof(rtc_alarm));
+ rtc_alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ);
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP],
+ false);
+ update_timer_locked(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false);
+ err = -EBUSY;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+ return err;
+}
+
+static int alarm_resume(struct platform_device *pdev)
+{
+ struct rtc_wkalrm alarm;
+ unsigned long flags;
+
+ pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev);
+
+ memset(&alarm, 0, sizeof(alarm));
+ alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false);
+ update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP],
+ false);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ return 0;
+}
+
+static struct rtc_task alarm_rtc_task = {
+ .func = alarm_triggered_func
+};
+
+static int rtc_alarm_add_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int err;
+ struct rtc_device *rtc = to_rtc_device(dev);
+
+ mutex_lock(&alarm_setrtc_mutex);
+
+ if (alarm_rtc_dev) {
+ err = -EBUSY;
+ goto err1;
+ }
+
+ alarm_platform_dev =
+ platform_device_register_simple("alarm", -1, NULL, 0);
+ if (IS_ERR(alarm_platform_dev)) {
+ err = PTR_ERR(alarm_platform_dev);
+ goto err2;
+ }
+ err = rtc_irq_register(rtc, &alarm_rtc_task);
+ if (err)
+ goto err3;
+ alarm_rtc_dev = rtc;
+ pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name);
+ mutex_unlock(&alarm_setrtc_mutex);
+
+ return 0;
+
+err3:
+ platform_device_unregister(alarm_platform_dev);
+err2:
+err1:
+ mutex_unlock(&alarm_setrtc_mutex);
+ return err;
+}
+
+static void rtc_alarm_remove_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ if (dev == &alarm_rtc_dev->dev) {
+ pr_alarm(INIT_STATUS, "lost rtc device for alarms");
+ rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
+ platform_device_unregister(alarm_platform_dev);
+ alarm_rtc_dev = NULL;
+ }
+}
+
+static struct class_interface rtc_alarm_interface = {
+ .add_dev = &rtc_alarm_add_device,
+ .remove_dev = &rtc_alarm_remove_device,
+};
+
+static struct platform_driver alarm_driver = {
+ .suspend = alarm_suspend,
+ .resume = alarm_resume,
+ .driver = {
+ .name = "alarm"
+ }
+};
+
+static int __init alarm_late_init(void)
+{
+ unsigned long flags;
+ struct timespec tmp_time, system_time;
+
+ /* this needs to run after the rtc is read at boot */
+ spin_lock_irqsave(&alarm_slock, flags);
+ /* We read the current rtc and system time so we can later calulate
+ * elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
+ * (rtc - (boot_rtc - boot_systemtime))
+ */
+ getnstimeofday(&tmp_time);
+ ktime_get_ts(&system_time);
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ timespec_to_ktime(timespec_sub(tmp_time, system_time));
+
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static int __init alarm_driver_init(void)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_init(&alarms[i].timer,
+ CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ alarms[i].timer.function = alarm_timer_triggered;
+ }
+ hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered;
+ err = platform_driver_register(&alarm_driver);
+ if (err < 0)
+ goto err1;
+ wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc");
+ rtc_alarm_interface.class = rtc_class;
+ err = class_interface_register(&rtc_alarm_interface);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ wake_lock_destroy(&alarm_rtc_wake_lock);
+ platform_driver_unregister(&alarm_driver);
+err1:
+ return err;
+}
+
+static void __exit alarm_exit(void)
+{
+ class_interface_unregister(&rtc_alarm_interface);
+ wake_lock_destroy(&alarm_rtc_wake_lock);
+ platform_driver_unregister(&alarm_driver);
+}
+
+late_initcall(alarm_late_init);
+module_init(alarm_driver_init);
+module_exit(alarm_exit);
+
diff --git a/drivers/rtc/rtc-max77663.c b/drivers/rtc/rtc-max77663.c
new file mode 100644
index 000000000000..a2ecf9327353
--- /dev/null
+++ b/drivers/rtc/rtc-max77663.c
@@ -0,0 +1,611 @@
+/*
+ * drivers/rtc/rtc-max77663.c
+ * Max77663 RTC driver
+ *
+ * Copyright 2011 Maxim Integrated Products, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/mfd/max77663-core.h>
+
+/* RTC Registers */
+#define MAX77663_RTC_IRQ 0x00
+#define MAX77663_RTC_IRQ_MASK 0x01
+#define MAX77663_RTC_CTRL_MODE 0x02
+#define MAX77663_RTC_CTRL 0x03
+#define MAX77663_RTC_UPDATE0 0x04
+#define MAX77663_RTC_UPDATE1 0x05
+#define MAX77663_RTC_SEC 0x07
+#define MAX77663_RTC_MIN 0x08
+#define MAX77663_RTC_HOUR 0x09
+#define MAX77663_RTC_WEEKDAY 0x0A
+#define MAX77663_RTC_MONTH 0x0B
+#define MAX77663_RTC_YEAR 0x0C
+#define MAX77663_RTC_MONTHDAY 0x0D
+#define MAX77663_RTC_ALARM_SEC1 0x0E
+#define MAX77663_RTC_ALARM_MIN1 0x0F
+#define MAX77663_RTC_ALARM_HOUR1 0x10
+#define MAX77663_RTC_ALARM_WEEKDAY1 0x11
+#define MAX77663_RTC_ALARM_MONTH1 0x12
+#define MAX77663_RTC_ALARM_YEAR1 0x13
+#define MAX77663_RTC_ALARM_MONTHDAY1 0x14
+
+#define RTC_IRQ_60SEC_MASK (1 << 0)
+#define RTC_IRQ_ALARM1_MASK (1 << 1)
+#define RTC_IRQ_ALARM2_MASK (1 << 2)
+#define RTC_IRQ_SMPL_MASK (1 << 3)
+#define RTC_IRQ_1SEC_MASK (1 << 4)
+#define RTC_IRQ_MASK 0x1F
+
+#define BCD_MODE_MASK (1 << 0)
+#define HR_MODE_MASK (1 << 1)
+
+#define WB_UPDATE_MASK (1 << 0)
+#define FLAG_AUTO_CLEAR_MASK (1 << 1)
+#define FREEZE_SEC_MASK (1 << 2)
+#define RTC_WAKE_MASK (1 << 3)
+#define RB_UPDATE_MASK (1 << 4)
+
+#define WB_UPDATE_FLAG_MASK (1 << 0)
+#define RB_UPDATE_FLAG_MASK (1 << 1)
+
+#define SEC_MASK 0x7F
+#define MIN_MASK 0x7F
+#define HOUR_MASK 0x3F
+#define WEEKDAY_MASK 0x7F
+#define MONTH_MASK 0x1F
+#define YEAR_MASK 0xFF
+#define MONTHDAY_MASK 0x3F
+
+#define ALARM_EN_MASK 0x80
+#define ALARM_EN_SHIFT 7
+
+#define RTC_YEAR_BASE 100
+#define RTC_YEAR_MAX 99
+
+/* ON/OFF Registers */
+#define MAX77663_REG_ONOFF_CFG2 0x42
+
+#define ONOFF_WK_ALARM1_MASK (1 << 2)
+
+enum {
+ RTC_SEC,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WEEKDAY,
+ RTC_MONTH,
+ RTC_YEAR,
+ RTC_MONTHDAY,
+ RTC_NR
+};
+
+struct max77663_rtc {
+ struct rtc_device *rtc;
+ struct device *dev;
+
+ struct mutex io_lock;
+ int irq;
+ u8 irq_mask;
+};
+
+static inline struct device *_to_parent(struct max77663_rtc *rtc)
+{
+ return rtc->dev->parent;
+}
+
+static inline int max77663_rtc_update_buffer(struct max77663_rtc *rtc,
+ int write)
+{
+ struct device *parent = _to_parent(rtc);
+ u8 val = FLAG_AUTO_CLEAR_MASK | RTC_WAKE_MASK;
+ int ret;
+
+ if (write)
+ val |= WB_UPDATE_MASK;
+ else
+ val |= RB_UPDATE_MASK;
+
+ dev_dbg(rtc->dev, "rtc_update_buffer: write=%d, addr=0x%x, val=0x%x\n",
+ write, MAX77663_RTC_UPDATE0, val);
+ ret = max77663_write(parent, MAX77663_RTC_UPDATE0, &val, 1, 1);
+ if (ret < 0) {
+ dev_err(rtc->dev, "rtc_update_buffer: "
+ "Failed to get rtc update0\n");
+ return ret;
+ }
+
+ /*
+ * Must wait 14ms for buffer update.
+ * If the sleeping time is 10us - 20ms, usleep_range() is recommended.
+ * Please refer Documentation/timers/timers-howto.txt.
+ */
+ usleep_range(14000, 14000);
+
+ return 0;
+}
+
+static inline int max77663_rtc_write(struct max77663_rtc *rtc, u8 addr,
+ void *values, u32 len, int update_buffer)
+{
+ struct device *parent = _to_parent(rtc);
+ int ret;
+
+ mutex_lock(&rtc->io_lock);
+
+ dev_dbg(rtc->dev, "rtc_write: addr=0x%x, values=0x%x, len=%u, "
+ "update_buffer=%d\n",
+ addr, *((u8 *)values), len, update_buffer);
+ ret = max77663_write(parent, addr, values, len, 1);
+ if (ret < 0)
+ goto out;
+
+ if (update_buffer)
+ ret = max77663_rtc_update_buffer(rtc, 1);
+
+out:
+ mutex_unlock(&rtc->io_lock);
+ return ret;
+}
+
+static inline int max77663_rtc_read(struct max77663_rtc *rtc, u8 addr,
+ void *values, u32 len, int update_buffer)
+{
+ struct device *parent = _to_parent(rtc);
+ int ret;
+
+ mutex_lock(&rtc->io_lock);
+
+ if (update_buffer) {
+ ret = max77663_rtc_update_buffer(rtc, 0);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = max77663_read(parent, addr, values, len, 1);
+ dev_dbg(rtc->dev, "rtc_read: addr=0x%x, values=0x%x, len=%u, "
+ "update_buffer=%d\n",
+ addr, *((u8 *)values), len, update_buffer);
+
+out:
+ mutex_unlock(&rtc->io_lock);
+ return ret;
+}
+
+static inline int max77663_rtc_reg_to_tm(struct max77663_rtc *rtc, u8 *buf,
+ struct rtc_time *tm)
+{
+ int wday = buf[RTC_WEEKDAY] & WEEKDAY_MASK;
+
+ if (unlikely(!wday)) {
+ dev_err(rtc->dev,
+ "rtc_reg_to_tm: Invalid day of week, %d\n", wday);
+ return -EINVAL;
+ }
+
+ tm->tm_sec = (int)(buf[RTC_SEC] & SEC_MASK);
+ tm->tm_min = (int)(buf[RTC_MIN] & MIN_MASK);
+ tm->tm_hour = (int)(buf[RTC_HOUR] & HOUR_MASK);
+ tm->tm_mday = (int)(buf[RTC_MONTHDAY] & MONTHDAY_MASK);
+ tm->tm_mon = (int)(buf[RTC_MONTH] & MONTH_MASK) - 1;
+ tm->tm_year = (int)(buf[RTC_YEAR] & YEAR_MASK) + RTC_YEAR_BASE;
+ tm->tm_wday = ffs(wday) - 1;
+
+ return 0;
+}
+
+static inline int max77663_rtc_tm_to_reg(struct max77663_rtc *rtc, u8 *buf,
+ struct rtc_time *tm, int alarm)
+{
+ u8 alarm_mask = alarm ? ALARM_EN_MASK : 0;
+
+ if (unlikely((tm->tm_year < RTC_YEAR_BASE) ||
+ (tm->tm_year > RTC_YEAR_BASE + RTC_YEAR_MAX))) {
+ dev_err(rtc->dev,
+ "rtc_tm_to_reg: Invalid year, %d\n", tm->tm_year);
+ return -EINVAL;
+ }
+
+ buf[RTC_SEC] = tm->tm_sec | alarm_mask;
+ buf[RTC_MIN] = tm->tm_min | alarm_mask;
+ buf[RTC_HOUR] = tm->tm_hour | alarm_mask;
+ buf[RTC_MONTHDAY] = tm->tm_mday | alarm_mask;
+ buf[RTC_MONTH] = (tm->tm_mon + 1) | alarm_mask;
+ buf[RTC_YEAR] = (tm->tm_year - RTC_YEAR_BASE) | alarm_mask;
+
+ /* The wday is configured only when disabled alarm. */
+ if (!alarm)
+ buf[RTC_WEEKDAY] = (1 << tm->tm_wday);
+ else
+ buf[RTC_WEEKDAY] = 0;
+
+ return 0;
+}
+
+static inline int max77663_rtc_irq_mask(struct max77663_rtc *rtc, u8 irq)
+{
+ struct device *parent = _to_parent(rtc);
+ u8 irq_mask = rtc->irq_mask | irq;
+ int ret = 0;
+
+ ret = max77663_write(parent, MAX77663_RTC_IRQ_MASK, &irq_mask, 1, 1);
+ if (ret < 0) {
+ dev_err(rtc->dev, "rtc_irq_mask: Failed to set rtc irq mask\n");
+ goto out;
+ }
+ rtc->irq_mask = irq_mask;
+
+out:
+ return ret;
+}
+
+static inline int max77663_rtc_irq_unmask(struct max77663_rtc *rtc, u8 irq)
+{
+ struct device *parent = _to_parent(rtc);
+ u8 irq_mask = rtc->irq_mask & ~irq;
+ int ret = 0;
+
+ ret = max77663_write(parent, MAX77663_RTC_IRQ_MASK, &irq_mask, 1, 1);
+ if (ret < 0) {
+ dev_err(rtc->dev,
+ "rtc_irq_unmask: Failed to set rtc irq mask\n");
+ goto out;
+ }
+ rtc->irq_mask = irq_mask;
+
+out:
+ return ret;
+}
+
+static inline int max77663_rtc_do_irq(struct max77663_rtc *rtc)
+{
+ struct device *parent = _to_parent(rtc);
+ u8 irq_status;
+ int ret;
+
+ ret = max77663_read(parent, MAX77663_RTC_IRQ, &irq_status, 1, 1);
+ if (ret < 0) {
+ dev_err(rtc->dev, "rtc_irq: Failed to get rtc irq status\n");
+ return ret;
+ }
+
+ dev_dbg(rtc->dev, "rtc_do_irq: irq_mask=0x%02x, irq_status=0x%02x\n",
+ rtc->irq_mask, irq_status);
+
+ if (!(rtc->irq_mask & RTC_IRQ_ALARM1_MASK) &&
+ (irq_status & RTC_IRQ_ALARM1_MASK))
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+
+ if (!(rtc->irq_mask & RTC_IRQ_1SEC_MASK) &&
+ (irq_status & RTC_IRQ_1SEC_MASK))
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_UF);
+
+ return ret;
+}
+
+static irqreturn_t max77663_rtc_irq(int irq, void *data)
+{
+ struct max77663_rtc *rtc = (struct max77663_rtc *)data;
+
+ max77663_rtc_do_irq(rtc);
+
+ return IRQ_HANDLED;
+}
+
+static int max77663_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct max77663_rtc *rtc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (rtc->irq < 0)
+ return -ENXIO;
+
+ mutex_lock(&rtc->io_lock);
+
+ /* Handle pending interrupt */
+ ret = max77663_rtc_do_irq(rtc);
+ if (ret < 0)
+ goto out;
+
+ /* Config alarm interrupt */
+ if (enabled) {
+ ret = max77663_rtc_irq_unmask(rtc, RTC_IRQ_ALARM1_MASK);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = max77663_rtc_irq_mask(rtc, RTC_IRQ_ALARM1_MASK);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ mutex_unlock(&rtc->io_lock);
+ return ret;
+}
+
+static int max77663_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max77663_rtc *rtc = dev_get_drvdata(dev);
+ u8 buf[RTC_NR];
+ int ret;
+
+ ret = max77663_rtc_read(rtc, MAX77663_RTC_SEC, buf, sizeof(buf), 1);
+ if (ret < 0) {
+ dev_err(rtc->dev, "rtc_read_time: Failed to read rtc time\n");
+ return ret;
+ }
+
+ dev_dbg(rtc->dev, "rtc_read_time: "
+ "buf: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ buf[RTC_SEC], buf[RTC_MIN], buf[RTC_HOUR], buf[RTC_WEEKDAY],
+ buf[RTC_MONTH], buf[RTC_YEAR], buf[RTC_MONTHDAY]);
+
+ ret = max77663_rtc_reg_to_tm(rtc, buf, tm);
+ if (ret < 0) {
+ dev_err(rtc->dev, "rtc_read_time: "
+ "Failed to convert register format into time format\n");
+ return ret;
+ }
+
+ dev_dbg(rtc->dev, "rtc_read_time: "
+ "tm: %d-%02d-%02d %02d:%02d:%02d, wday=%d\n",
+ tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+ tm->tm_sec, tm->tm_wday);
+
+ return ret;
+}
+
+static int max77663_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max77663_rtc *rtc = dev_get_drvdata(dev);
+ u8 buf[RTC_NR];
+ int ret;
+
+ dev_dbg(rtc->dev, "rtc_set_time: "
+ "tm: %d-%02d-%02d %02d:%02d:%02d, wday=%d\n",
+ tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+ tm->tm_sec, tm->tm_wday);
+
+ ret = max77663_rtc_tm_to_reg(rtc, buf, tm, 0);
+ if (ret < 0) {
+ dev_err(rtc->dev, "rtc_set_time: "
+ "Failed to convert time format into register format\n");
+ return ret;
+ }
+
+ dev_dbg(rtc->dev, "rtc_set_time: "
+ "buf: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ buf[RTC_SEC], buf[RTC_MIN], buf[RTC_HOUR], buf[RTC_WEEKDAY],
+ buf[RTC_MONTH], buf[RTC_YEAR], buf[RTC_MONTHDAY]);
+
+ return max77663_rtc_write(rtc, MAX77663_RTC_SEC, buf, sizeof(buf), 1);
+}
+
+static int max77663_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max77663_rtc *rtc = dev_get_drvdata(dev);
+ u8 buf[RTC_NR];
+ int ret;
+
+ ret = max77663_rtc_read(rtc, MAX77663_RTC_ALARM_SEC1, buf, sizeof(buf),
+ 1);
+ if (ret < 0) {
+ dev_err(rtc->dev,
+ "rtc_read_alarm: Failed to read rtc alarm time\n");
+ return ret;
+ }
+
+ dev_dbg(rtc->dev, "rtc_read_alarm: "
+ "buf: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ buf[RTC_SEC], buf[RTC_MIN], buf[RTC_HOUR], buf[RTC_WEEKDAY],
+ buf[RTC_MONTH], buf[RTC_YEAR], buf[RTC_MONTHDAY]);
+
+ ret = max77663_rtc_reg_to_tm(rtc, buf, &alrm->time);
+ if (ret < 0) {
+ dev_err(rtc->dev, "rtc_read_alarm: "
+ "Failed to convert register format into time format\n");
+ return ret;
+ }
+
+ dev_dbg(rtc->dev, "rtc_read_alarm: "
+ "tm: %d-%02d-%02d %02d:%02d:%02d, wday=%d\n",
+ alrm->time.tm_year, alrm->time.tm_mon, alrm->time.tm_mday,
+ alrm->time.tm_hour, alrm->time.tm_min, alrm->time.tm_sec,
+ alrm->time.tm_wday);
+
+ if (rtc->irq_mask & RTC_IRQ_ALARM1_MASK)
+ alrm->enabled = 1;
+ else
+ alrm->enabled = 0;
+
+ return 0;
+}
+
+static int max77663_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max77663_rtc *rtc = dev_get_drvdata(dev);
+ u8 buf[RTC_NR];
+ int ret;
+
+ dev_dbg(rtc->dev, "rtc_set_alarm: "
+ "tm: %d-%02d-%02d %02d:%02d:%02d, wday=%d\n",
+ alrm->time.tm_year, alrm->time.tm_mon, alrm->time.tm_mday,
+ alrm->time.tm_hour, alrm->time.tm_min, alrm->time.tm_sec,
+ alrm->time.tm_wday);
+
+ ret = max77663_rtc_tm_to_reg(rtc, buf, &alrm->time, 1);
+ if (ret < 0) {
+ dev_err(rtc->dev, "rtc_set_alarm: "
+ "Failed to convert time format into register format\n");
+ return ret;
+ }
+
+ dev_dbg(rtc->dev, "rtc_set_alarm: "
+ "buf: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ buf[RTC_SEC], buf[RTC_MIN], buf[RTC_HOUR], buf[RTC_WEEKDAY],
+ buf[RTC_MONTH], buf[RTC_YEAR], buf[RTC_MONTHDAY]);
+
+ ret = max77663_rtc_write(rtc, MAX77663_RTC_ALARM_SEC1, buf, sizeof(buf),
+ 1);
+ if (ret < 0) {
+ dev_err(rtc->dev,
+ "rtc_set_alarm: Failed to write rtc alarm time\n");
+ return ret;
+ }
+
+ ret = max77663_rtc_alarm_irq_enable(dev, 1);
+ if (ret < 0) {
+ dev_err(rtc->dev,
+ "rtc_set_alarm: Failed to enable rtc alarm\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct rtc_class_ops max77663_rtc_ops = {
+ .read_time = max77663_rtc_read_time,
+ .set_time = max77663_rtc_set_time,
+ .read_alarm = max77663_rtc_read_alarm,
+ .set_alarm = max77663_rtc_set_alarm,
+ .alarm_irq_enable = max77663_rtc_alarm_irq_enable,
+};
+
+static int max77663_rtc_preinit(struct max77663_rtc *rtc)
+{
+ struct device *parent = _to_parent(rtc);
+ u8 val;
+ int ret;
+
+ /* Mask all interrupts */
+ rtc->irq_mask = 0xFF;
+ ret = max77663_rtc_write(rtc, MAX77663_RTC_IRQ_MASK, &rtc->irq_mask, 1,
+ 0);
+ if (ret < 0) {
+ dev_err(rtc->dev, "preinit: Failed to set rtc irq mask\n");
+ return ret;
+ }
+
+ /* Configure Binary mode and 24hour mode */
+ val = HR_MODE_MASK;
+ ret = max77663_rtc_write(rtc, MAX77663_RTC_CTRL, &val, 1, 0);
+ if (ret < 0) {
+ dev_err(rtc->dev, "preinit: Failed to set rtc control\n");
+ return ret;
+ }
+
+ /* It should be disabled alarm wakeup to wakeup from sleep
+ * by EN1 input signal */
+ ret = max77663_set_bits(parent, MAX77663_REG_ONOFF_CFG2,
+ ONOFF_WK_ALARM1_MASK, 0, 0);
+ if (ret < 0) {
+ dev_err(rtc->dev, "preinit: Failed to set onoff cfg2\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max77663_rtc_probe(struct platform_device *pdev)
+{
+ struct max77663_platform_data *parent_pdata =
+ pdev->dev.parent->platform_data;
+ static struct max77663_rtc *rtc;
+ int ret = 0;
+
+ rtc = kzalloc(sizeof(struct max77663_rtc), GFP_KERNEL);
+ if (!rtc) {
+ dev_err(&pdev->dev, "probe: kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(&pdev->dev, rtc);
+ rtc->dev = &pdev->dev;
+ mutex_init(&rtc->io_lock);
+
+ ret = max77663_rtc_preinit(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "probe: Failed to rtc preinit\n");
+ goto out_kfree;
+ }
+
+ rtc->rtc = rtc_device_register("max77663-rtc", &pdev->dev,
+ &max77663_rtc_ops, THIS_MODULE);
+ if (IS_ERR_OR_NULL(rtc->rtc)) {
+ dev_err(&pdev->dev, "probe: Failed to register rtc\n");
+ ret = PTR_ERR(rtc->rtc);
+ goto out_kfree;
+ }
+
+ if (parent_pdata->irq_base < 0)
+ goto out;
+
+ rtc->irq = parent_pdata->irq_base + MAX77663_IRQ_RTC;
+ ret = request_threaded_irq(rtc->irq, NULL, max77663_rtc_irq,
+ IRQF_ONESHOT, "max77663-rtc", rtc);
+ if (ret < 0) {
+ dev_err(rtc->dev, "probe: Failed to request irq %d\n",
+ rtc->irq);
+ rtc->irq = -1;
+ } else {
+ device_init_wakeup(rtc->dev, 1);
+ enable_irq_wake(rtc->irq);
+ }
+
+ return 0;
+
+out_kfree:
+ mutex_destroy(&rtc->io_lock);
+ kfree(rtc->rtc);
+out:
+ return ret;
+}
+
+static int __devexit max77663_rtc_remove(struct platform_device *pdev)
+{
+ struct max77663_rtc *rtc = dev_get_drvdata(&pdev->dev);
+
+ if (rtc->irq != -1)
+ free_irq(rtc->irq, rtc);
+
+ rtc_device_unregister(rtc->rtc);
+ mutex_destroy(&rtc->io_lock);
+ kfree(rtc);
+
+ return 0;
+}
+
+static struct platform_driver max77663_rtc_driver = {
+ .probe = max77663_rtc_probe,
+ .remove = __devexit_p(max77663_rtc_remove),
+ .driver = {
+ .name = "max77663-rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init max77663_rtc_init(void)
+{
+ return platform_driver_register(&max77663_rtc_driver);
+}
+module_init(max77663_rtc_init);
+
+static void __exit max77663_rtc_exit(void)
+{
+ platform_driver_unregister(&max77663_rtc_driver);
+}
+module_exit(max77663_rtc_exit);
+
+MODULE_DESCRIPTION("max77663 RTC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
diff --git a/drivers/rtc/rtc-max8907c.c b/drivers/rtc/rtc-max8907c.c
new file mode 100644
index 000000000000..668bc43461ba
--- /dev/null
+++ b/drivers/rtc/rtc-max8907c.c
@@ -0,0 +1,318 @@
+/*
+ * RTC driver for Maxim MAX8907c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Based on drivers/rtc/rtc-max8925.c, Copyright (C) 2009-2010 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/max8907c.h>
+
+enum {
+ RTC_SEC = 0,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WEEKDAY,
+ RTC_DATE,
+ RTC_MONTH,
+ RTC_YEAR1,
+ RTC_YEAR2,
+};
+
+#define TIME_NUM 8
+#define ALARM_1SEC (1 << 7)
+#define HOUR_12 (1 << 7)
+#define HOUR_AM_PM (1 << 5)
+#define ALARM0_IRQ (1 << 3)
+#define ALARM1_IRQ (1 << 2)
+#define ALARM0_STATUS (1 << 2)
+#define ALARM1_STATUS (1 << 1)
+
+struct max8907c_rtc_info {
+ struct rtc_device *rtc_dev;
+ struct i2c_client *i2c;
+ struct max8907c *chip;
+};
+
+static irqreturn_t rtc_update_handler(int irq, void *data)
+{
+ struct max8907c_rtc_info *info = (struct max8907c_rtc_info *)data;
+
+ /* disable ALARM0 except for 1SEC alarm */
+ max8907c_set_bits(info->i2c, MAX8907C_REG_ALARM0_CNTL, 0x7f, 0);
+ rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int tm_calc(struct rtc_time *tm, u8 *buf, int len)
+{
+ if (len < TIME_NUM)
+ return -EINVAL;
+ tm->tm_year = (buf[RTC_YEAR2] >> 4) * 1000
+ + (buf[RTC_YEAR2] & 0xf) * 100
+ + (buf[RTC_YEAR1] >> 4) * 10
+ + (buf[RTC_YEAR1] & 0xf);
+ tm->tm_year -= 1900;
+ tm->tm_mon = ((buf[RTC_MONTH] >> 4) & 0x01) * 10
+ + (buf[RTC_MONTH] & 0x0f);
+ tm->tm_mday = ((buf[RTC_DATE] >> 4) & 0x03) * 10
+ + (buf[RTC_DATE] & 0x0f);
+ tm->tm_wday = buf[RTC_WEEKDAY] & 0x07;
+ if (buf[RTC_HOUR] & HOUR_12) {
+ tm->tm_hour = ((buf[RTC_HOUR] >> 4) & 0x1) * 10
+ + (buf[RTC_HOUR] & 0x0f);
+ if (buf[RTC_HOUR] & HOUR_AM_PM)
+ tm->tm_hour += 12;
+ } else {
+ tm->tm_hour = ((buf[RTC_HOUR] >> 4) & 0x03) * 10
+ + (buf[RTC_HOUR] & 0x0f);
+ }
+ tm->tm_min = ((buf[RTC_MIN] >> 4) & 0x7) * 10
+ + (buf[RTC_MIN] & 0x0f);
+ tm->tm_sec = ((buf[RTC_SEC] >> 4) & 0x7) * 10
+ + (buf[RTC_SEC] & 0x0f);
+ return 0;
+}
+
+static int data_calc(u8 *buf, struct rtc_time *tm, int len)
+{
+ u8 high, low;
+
+ if (len < TIME_NUM)
+ return -EINVAL;
+
+ high = (tm->tm_year + 1900) / 1000;
+ low = (tm->tm_year + 1900) / 100;
+ low = low - high * 10;
+ buf[RTC_YEAR2] = (high << 4) + low;
+ high = (tm->tm_year + 1900) / 10;
+ low = tm->tm_year + 1900;
+ low = low - high * 10;
+ high = high - (high / 10) * 10;
+ buf[RTC_YEAR1] = (high << 4) + low;
+ high = tm->tm_mon / 10;
+ low = tm->tm_mon;
+ low = low - high * 10;
+ buf[RTC_MONTH] = (high << 4) + low;
+ high = tm->tm_mday / 10;
+ low = tm->tm_mday;
+ low = low - high * 10;
+ buf[RTC_DATE] = (high << 4) + low;
+ buf[RTC_WEEKDAY] = tm->tm_wday;
+ high = tm->tm_hour / 10;
+ low = tm->tm_hour;
+ low = low - high * 10;
+ buf[RTC_HOUR] = (high << 4) + low;
+ high = tm->tm_min / 10;
+ low = tm->tm_min;
+ low = low - high * 10;
+ buf[RTC_MIN] = (high << 4) + low;
+ high = tm->tm_sec / 10;
+ low = tm->tm_sec;
+ low = low - high * 10;
+ buf[RTC_SEC] = (high << 4) + low;
+ return 0;
+}
+
+static int max8907c_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max8907c_rtc_info *info = dev_get_drvdata(dev);
+ u8 buf[TIME_NUM];
+ int ret;
+
+ ret = max8907c_reg_bulk_read(info->i2c, MAX8907C_REG_RTC_SEC, TIME_NUM, buf);
+
+ if (ret < 0)
+ return ret;
+ ret = tm_calc(tm, buf, TIME_NUM);
+
+ return ret;
+}
+
+static int max8907c_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max8907c_rtc_info *info = dev_get_drvdata(dev);
+ u8 buf[TIME_NUM];
+ int ret;
+
+ ret = data_calc(buf, tm, TIME_NUM);
+
+ if (ret < 0)
+ return ret;
+ ret = max8907c_reg_bulk_write(info->i2c, MAX8907C_REG_RTC_SEC, TIME_NUM, buf);
+
+ return ret;
+}
+
+static int max8907c_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max8907c_rtc_info *info = dev_get_drvdata(dev);
+ unsigned char buf[TIME_NUM];
+ int ret;
+
+ ret = max8907c_reg_bulk_read(info->i2c, MAX8907C_REG_ALARM0_SEC, TIME_NUM, buf);
+ if (ret < 0)
+ return ret;
+ ret = tm_calc(&alrm->time, buf, TIME_NUM);
+ if (ret < 0)
+ return ret;
+ ret = max8907c_reg_read(info->i2c, MAX8907C_REG_RTC_IRQ_MASK);
+ if (ret < 0)
+ return ret;
+ if ((ret & ALARM0_IRQ) == 0)
+ alrm->enabled = 1;
+ else
+ alrm->enabled = 0;
+ ret = max8907c_reg_read(info->i2c, MAX8907C_REG_RTC_STATUS);
+ if (ret < 0)
+ return ret;
+ if (ret & ALARM0_STATUS)
+ alrm->pending = 1;
+ else
+ alrm->pending = 0;
+
+ return ret;
+}
+
+static int max8907c_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max8907c_rtc_info *info = dev_get_drvdata(dev);
+ unsigned char buf[TIME_NUM];
+ int ret;
+
+ ret = data_calc(buf, &alrm->time, TIME_NUM);
+ if (ret < 0)
+ return ret;
+ ret = max8907c_reg_bulk_write(info->i2c, MAX8907C_REG_ALARM0_SEC, TIME_NUM, buf);
+ if (ret < 0)
+ return ret;
+ /* only enable alarm on year/month/day/hour/min/sec */
+ ret = max8907c_reg_write(info->i2c, MAX8907C_REG_ALARM0_CNTL, 0x77);
+
+ return ret;
+}
+
+static const struct rtc_class_ops max8907c_rtc_ops = {
+ .read_time = max8907c_rtc_read_time,
+ .set_time = max8907c_rtc_set_time,
+ .read_alarm = max8907c_rtc_read_alarm,
+ .set_alarm = max8907c_rtc_set_alarm,
+};
+
+static int __devinit max8907c_rtc_probe(struct platform_device *pdev)
+{
+ struct max8907c *chip = dev_get_drvdata(pdev->dev.parent);
+ struct max8907c_rtc_info *info;
+ int irq, ret;
+
+ info = kzalloc(sizeof(struct max8907c_rtc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->i2c = chip->i2c_rtc;
+ info->chip = chip;
+
+ irq = chip->irq_base + MAX8907C_IRQ_RTC_ALARM0;
+
+ ret = request_threaded_irq(irq, NULL, rtc_update_handler,
+ IRQF_ONESHOT, "rtc-alarm0", info);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+ irq, ret);
+ goto out_irq;
+ }
+
+ dev_set_drvdata(&pdev->dev, info);
+ info->rtc_dev = rtc_device_register("max8907c-rtc", &pdev->dev,
+ &max8907c_rtc_ops, THIS_MODULE);
+ ret = PTR_ERR(info->rtc_dev);
+ if (IS_ERR(info->rtc_dev)) {
+ dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ goto out_rtc;
+ }
+
+ max8907c_set_bits(chip->i2c_power, MAX8907C_REG_SYSENSEL, 0x2, 0x2);
+
+ platform_set_drvdata(pdev, info);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ return 0;
+out_rtc:
+ free_irq(chip->irq_base + MAX8907C_IRQ_RTC_ALARM0, info);
+
+out_irq:
+ kfree(info);
+ return ret;
+}
+
+static int __devexit max8907c_rtc_remove(struct platform_device *pdev)
+{
+ struct max8907c_rtc_info *info = platform_get_drvdata(pdev);
+
+ if (info) {
+ free_irq(info->chip->irq_base + MAX8907C_IRQ_RTC_ALARM0, info);
+
+ rtc_device_unregister(info->rtc_dev);
+ kfree(info);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int max8907c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct device *dev=&pdev->dev;
+ struct max8907c_rtc_info *info = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(info->chip->irq_base + MAX8907C_IRQ_RTC_ALARM0);
+ return 0;
+}
+
+static int max8907c_rtc_resume(struct platform_device *pdev)
+{
+ struct device *dev=&pdev->dev;
+ struct max8907c_rtc_info *info = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(info->chip->irq_base + MAX8907C_IRQ_RTC_ALARM0);
+ return 0;
+}
+#endif
+
+static struct platform_driver max8907c_rtc_driver = {
+ .driver = {
+ .name = "max8907c-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8907c_rtc_probe,
+ .remove = __devexit_p(max8907c_rtc_remove),
+#ifdef CONFIG_PM
+ .suspend = max8907c_rtc_suspend,
+ .resume = max8907c_rtc_resume,
+#endif
+};
+
+static int __init max8907c_rtc_init(void)
+{
+ return platform_driver_register(&max8907c_rtc_driver);
+}
+module_init(max8907c_rtc_init);
+
+static void __exit max8907c_rtc_exit(void)
+{
+ platform_driver_unregister(&max8907c_rtc_driver);
+}
+module_exit(max8907c_rtc_exit);
+
+MODULE_DESCRIPTION("Maxim MAX8907C RTC driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/rtc/rtc-ricoh583.c b/drivers/rtc/rtc-ricoh583.c
new file mode 100644
index 000000000000..8bc17d9a1013
--- /dev/null
+++ b/drivers/rtc/rtc-ricoh583.c
@@ -0,0 +1,403 @@
+/*
+ * drivers/rtc/rtc_ricoh583.c
+ *
+ * rtc driver for ricoh rc5t583 pmu
+ *
+ * copyright (c) 2011, nvidia corporation.
+ *
+ * this program is free software; you can redistribute it and/or modify
+ * it under the terms of the gnu general public license as published by
+ * the free software foundation; either version 2 of the license, or
+ * (at your option) any later version.
+ *
+ * this program is distributed in the hope that it will be useful, but without
+ * any warranty; without even the implied warranty of merchantability or
+ * fitness for a particular purpose. see the gnu general public license for
+ * more details.
+ *
+ * you should have received a copy of the gnu general public license along
+ * with this program; if not, write to the free software foundation, inc.,
+ * 51 franklin street, fifth floor, boston, ma 02110-1301, usa.
+ */
+
+/* #define debug 1 */
+/* #define verbose_debug 1 */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/ricoh583.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define rtc_ctrl1 0xED
+#define rtc_ctrl2 0xEE
+#define rtc_seconds_reg 0xE0
+#define rtc_alarm_y 0xF0
+#define rtc_adjust 0xE7
+
+/*
+linux rtc driver refers 1900 as base year in many calculations.
+(e.g. refer drivers/rtc/rtc-lib.c)
+*/
+#define os_ref_year 1900
+
+/*
+ pmu rtc have only 2 nibbles to store year information, so using an
+ offset of 100 to set the base year as 2000 for our driver.
+*/
+#define rtc_year_offset 100
+
+struct ricoh583_rtc {
+ unsigned long epoch_start;
+ int irq;
+ struct rtc_device *rtc;
+ bool irq_en;
+};
+
+static int ricoh583_read_regs(struct device *dev, int reg, int len,
+ uint8_t *val)
+{
+ int ret;
+
+ ret = ricoh583_bulk_reads(dev->parent, reg, len, val);
+ if (ret < 0) {
+ dev_err(dev->parent, "\n %s failed reading from 0x%02x\n",
+ __func__, reg);
+ WARN_ON(1);
+ }
+ return ret;
+}
+
+static int ricoh583_write_regs(struct device *dev, int reg, int len,
+ uint8_t *val)
+{
+ int ret;
+ ret = ricoh583_bulk_writes(dev->parent, reg, len, val);
+ if (ret < 0) {
+ dev_err(dev->parent, "\n %s failed writing\n", __func__);
+ WARN_ON(1);
+ }
+
+ return ret;
+}
+
+static int ricoh583_rtc_valid_tm(struct device *dev, struct rtc_time *tm)
+{
+ if (tm->tm_year >= (rtc_year_offset + 99)
+ || tm->tm_mon > 12
+ || tm->tm_mday < 1
+ || tm->tm_mday > rtc_month_days(tm->tm_mon,
+ tm->tm_year + os_ref_year)
+ || tm->tm_hour >= 24
+ || tm->tm_min >= 60
+ || tm->tm_sec >= 60) {
+ dev_err(dev->parent, "\n returning error due to time"
+ "%d/%d/%d %d:%d:%d", tm->tm_mon, tm->tm_mday,
+ tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u8 dec2bcd(u8 dec)
+{
+ return ((dec/10)<<4)+(dec%10);
+}
+
+static u8 bcd2dec(u8 bcd)
+{
+ return (bcd >> 4)*10+(bcd & 0xf);
+}
+
+static void convert_bcd_to_decimal(u8 *buf, u8 len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ buf[i] = bcd2dec(buf[i]);
+}
+
+static void convert_decimal_to_bcd(u8 *buf, u8 len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ buf[i] = dec2bcd(buf[i]);
+}
+
+static void print_time(struct device *dev, struct rtc_time *tm)
+{
+ dev_info(dev, "rtc-time : %d/%d/%d %d:%d\n",
+ (tm->tm_mon + 1), tm->tm_mday, (tm->tm_year + os_ref_year),
+ tm->tm_hour, tm->tm_min);
+}
+
+static int ricoh583_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[7];
+ int err;
+ err = ricoh583_read_regs(dev, rtc_seconds_reg, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev, "\n %s :: failed to read time\n", __FILE__);
+ return err;
+ }
+ convert_bcd_to_decimal(buff, sizeof(buff));
+ tm->tm_sec = buff[0];
+ tm->tm_min = buff[1];
+ tm->tm_hour = buff[2];
+ tm->tm_wday = buff[3];
+ tm->tm_mday = buff[4];
+ tm->tm_mon = buff[5] - 1;
+ tm->tm_year = buff[6] + rtc_year_offset;
+ print_time(dev, tm);
+ return ricoh583_rtc_valid_tm(dev, tm);
+}
+
+static int ricoh583_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[7];
+ int err;
+
+ print_time(dev, tm);
+ buff[0] = tm->tm_sec;
+ buff[1] = tm->tm_min;
+ buff[2] = tm->tm_hour;
+ buff[3] = tm->tm_wday;
+ buff[4] = tm->tm_mday;
+ buff[5] = tm->tm_mon + 1;
+ buff[6] = tm->tm_year - rtc_year_offset;
+
+ convert_decimal_to_bcd(buff, sizeof(buff));
+ err = ricoh583_write_regs(dev, rtc_seconds_reg, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to program new time\n");
+ return err;
+ }
+
+ return 0;
+}
+static int ricoh583_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm);
+
+static int ricoh583_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct ricoh583_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long seconds;
+ u8 buff[5];
+ int err;
+ struct rtc_time tm;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ rtc_tm_to_time(&alrm->time, &seconds);
+ ricoh583_rtc_read_time(dev, &tm);
+ rtc_tm_to_time(&tm, &rtc->epoch_start);
+ /*
+ work around: As YAL does not provide the seconds register,
+ program minute register to next minute, in cases when alarm
+ is requested within a minute from the current time.
+ */
+ if (seconds - rtc->epoch_start < 60)
+ alrm->time.tm_min += 1;
+ dev_info(dev->parent, "\n setting alarm to requested time::\n");
+ print_time(dev->parent, &alrm->time);
+
+ if (WARN_ON(alrm->enabled && (seconds < rtc->epoch_start))) {
+ dev_err(dev->parent, "\n can't set alarm to requested time\n");
+ return -EINVAL;
+ }
+
+ if (alrm->enabled && !rtc->irq_en)
+ rtc->irq_en = true;
+ else if (!alrm->enabled && rtc->irq_en)
+ rtc->irq_en = false;
+
+ buff[0] = alrm->time.tm_min;
+ buff[1] = alrm->time.tm_hour;
+ buff[2] = alrm->time.tm_mday;
+ buff[3] = alrm->time.tm_mon + 1;
+ buff[4] = alrm->time.tm_year - rtc_year_offset;
+ convert_decimal_to_bcd(buff, sizeof(buff));
+ err = ricoh583_write_regs(dev, rtc_alarm_y, sizeof(buff), buff);
+ if (err) {
+ dev_err(dev->parent, "\n unable to set alarm\n");
+ return -EBUSY;
+ }
+ buff[0] = 0x20; /* to enable alarm_y */
+ buff[1] = 0x20; /* to enable 24-hour format */
+ err = ricoh583_write_regs(dev, rtc_ctrl1, 2, buff);
+ if (err) {
+ dev_err(dev, "failed programming rtc ctrl regs\n");
+ return -EBUSY;
+ }
+return err;
+}
+
+static int ricoh583_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ u8 buff[5];
+ int err;
+
+ err = ricoh583_read_regs(dev, rtc_alarm_y, sizeof(buff), buff);
+ if (err)
+ return err;
+ convert_bcd_to_decimal(buff, sizeof(buff));
+
+ alrm->time.tm_min = buff[0];
+ alrm->time.tm_hour = buff[1];
+ alrm->time.tm_mday = buff[2];
+ alrm->time.tm_mon = buff[3] - 1;
+ alrm->time.tm_year = buff[4] + rtc_year_offset;
+
+ dev_info(dev->parent, "\n getting alarm time::\n");
+ print_time(dev, &alrm->time);
+
+ return 0;
+}
+
+static const struct rtc_class_ops ricoh583_rtc_ops = {
+ .read_time = ricoh583_rtc_read_time,
+ .set_time = ricoh583_rtc_set_time,
+ .set_alarm = ricoh583_rtc_set_alarm,
+ .read_alarm = ricoh583_rtc_read_alarm,
+};
+
+static irqreturn_t ricoh583_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ricoh583_rtc *rtc = dev_get_drvdata(dev);
+ u8 reg;
+ int err;
+
+ /* clear alarm-Y status bits.*/
+ err = ricoh583_read_regs(dev, rtc_ctrl2, 1, &reg);
+ if (err) {
+ dev_err(dev->parent, "unable to read rtc_ctrl2 reg\n");
+ return -EBUSY;
+ }
+ reg &= ~0x8;
+ err = ricoh583_write_regs(dev, rtc_ctrl2, 1, &reg);
+ if (err) {
+ dev_err(dev->parent, "unable to program rtc_status reg\n");
+ return -EBUSY;
+ }
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int __devinit ricoh583_rtc_probe(struct platform_device *pdev)
+{
+ struct ricoh583_rtc_platform_data *pdata = pdev->dev.platform_data;
+ struct ricoh583_rtc *rtc;
+ struct rtc_time tm;
+ int err;
+ u8 reg[2];
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = -1;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform_data specified\n");
+ return -EINVAL;
+ }
+
+ if (pdata->irq < 0)
+ dev_err(&pdev->dev, "\n no irq specified, wakeup is disabled\n");
+
+ dev_set_drvdata(&pdev->dev, rtc);
+ device_init_wakeup(&pdev->dev, 1);
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &ricoh583_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto fail;
+ }
+ reg[0] = 0; /* clearing RTC Adjust register */
+ err = ricoh583_write_regs(&pdev->dev, rtc_adjust, 1, reg);
+ if (err) {
+ dev_err(&pdev->dev, "unable to program rtc_adjust reg\n");
+ return -EBUSY;
+ }
+
+ reg[0] = 0x20; /* to enable alarm_y */
+ reg[1] = 0x20; /* to enable 24-hour format */
+ err = ricoh583_write_regs(&pdev->dev, rtc_ctrl1, 2, reg);
+ if (err) {
+ dev_err(&pdev->dev, "failed rtc setup\n");
+ return -EBUSY;
+ }
+
+ ricoh583_rtc_read_time(&pdev->dev, &tm);
+ if (ricoh583_rtc_valid_tm(&pdev->dev, &tm)) {
+ if (pdata->time.tm_year < 2000 || pdata->time.tm_year > 2100) {
+ memset(&pdata->time, 0, sizeof(pdata->time));
+ pdata->time.tm_year = rtc_year_offset;
+ pdata->time.tm_mday = 1;
+ } else
+ pdata->time.tm_year -= os_ref_year;
+ ricoh583_rtc_set_time(&pdev->dev, &pdata->time);
+ }
+ if (pdata && (pdata->irq >= 0)) {
+ rtc->irq = pdata->irq;
+ err = request_threaded_irq(pdata->irq, NULL, ricoh583_rtc_irq,
+ IRQF_ONESHOT, "rtc_ricoh583",
+ &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "request IRQ:%d fail\n", rtc->irq);
+ rtc->irq = -1;
+ } else {
+ device_init_wakeup(&pdev->dev, 1);
+ enable_irq_wake(rtc->irq);
+ }
+ }
+ return 0;
+
+fail:
+ if (!IS_ERR_OR_NULL(rtc->rtc))
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return err;
+}
+
+static int __devexit ricoh583_rtc_remove(struct platform_device *pdev)
+{
+ struct ricoh583_rtc *rtc = dev_get_drvdata(&pdev->dev);
+
+ if (rtc->irq != -1)
+ free_irq(rtc->irq, rtc);
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return 0;
+}
+
+static struct platform_driver ricoh583_rtc_driver = {
+ .driver = {
+ .name = "rtc_ricoh583",
+ .owner = THIS_MODULE,
+ },
+ .probe = ricoh583_rtc_probe,
+ .remove = __devexit_p(ricoh583_rtc_remove),
+};
+
+static int __init ricoh583_rtc_init(void)
+{
+ return platform_driver_register(&ricoh583_rtc_driver);
+}
+module_init(ricoh583_rtc_init);
+
+static void __exit ricoh583_rtc_exit(void)
+{
+ platform_driver_unregister(&ricoh583_rtc_driver);
+}
+module_exit(ricoh583_rtc_exit);
+
+MODULE_DESCRIPTION("RICOH PMU ricoh583 RTC driver");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rtc_ricoh583");
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 75259fe38602..773adffac277 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -1,7 +1,8 @@
/*
* An RTC driver for the NVIDIA Tegra 200 series internal RTC.
*
- * Copyright (c) 2010, NVIDIA Corporation.
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ * Copyright (c) 2010 Jon Mayo <jmayo@nvidia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -376,6 +377,36 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
+#ifndef CONFIG_TEGRA_SILICON_PLATFORM
+ {
+ struct rtc_time tm;
+
+ /* Get the current time from the RTC. */
+ ret = tegra_rtc_read_time(&pdev->dev, &tm);
+ if (ret) {
+ /* Report but ignore this error. */
+ dev_err(&pdev->dev,
+ "Failed to get FPGA internal RTC time (err=%d)\n",
+ ret);
+ } else if (tm.tm_year < 2010) {
+ /* The RTC's default reset time is soooo last century. */
+ tm.tm_year = 2010-1900;
+ tm.tm_mon = 0;
+ tm.tm_mday = 1;
+ tm.tm_hour = 0;
+ tm.tm_min = 0;
+ tm.tm_sec = 0;
+ ret = tegra_rtc_set_time(&pdev->dev, &tm);
+ if (ret) {
+ /* Report but ignore this error. */
+ dev_err(&pdev->dev,
+ "Failed to set FPGA internal RTC time (err=%d)\n",
+ ret);
+ }
+ }
+ }
+#endif
+
return 0;
err_dev_unreg:
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
new file mode 100644
index 000000000000..c41edabf0b2c
--- /dev/null
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -0,0 +1,387 @@
+/*
+ * drivers/rtc/rtc-tps6586x.c
+ *
+ * RTC driver for TI TPS6586x
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define RTC_CTRL 0xc0
+#define POR_RESET_N BIT(7)
+#define OSC_SRC_SEL BIT(6)
+#define RTC_ENABLE BIT(5) /* enables alarm */
+#define RTC_BUF_ENABLE BIT(4) /* 32 KHz buffer enable */
+#define PRE_BYPASS BIT(3) /* 0=1KHz or 1=32KHz updates */
+#define CL_SEL_MASK (BIT(2)|BIT(1))
+#define CL_SEL_POS 1
+#define RTC_ALARM1_HI 0xc1
+#define RTC_COUNT4 0xc6
+#define RTC_COUNT4_DUMMYREAD 0xc5 /* start a PMU RTC access by reading the register prior to the RTC_COUNT4 */
+#define ALM1_VALID_RANGE_IN_SEC 0x3FFF /*only 14-bits width in second*/
+
+struct tps6586x_rtc {
+ unsigned long epoch_start;
+ int irq;
+ struct rtc_device *rtc;
+ bool irq_en;
+};
+
+static inline struct device *to_tps6586x_dev(struct device *dev)
+{
+ return dev->parent;
+}
+
+static int tps6586x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long long ticks = 0;
+ unsigned long seconds;
+ u8 buff[6];
+ int err;
+ int i;
+
+ err = tps6586x_reads(tps_dev, RTC_COUNT4_DUMMYREAD, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev, "failed to read counter\n");
+ return err;
+ }
+
+ for (i = 1; i < sizeof(buff); i++) {
+ ticks <<= 8;
+ ticks |= buff[i];
+ }
+
+ seconds = ticks >> 10;
+
+ seconds += rtc->epoch_start;
+ rtc_time_to_tm(seconds, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int tps6586x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long long ticks;
+ unsigned long seconds;
+ u8 buff[5];
+ int err;
+
+ rtc_tm_to_time(tm, &seconds);
+
+ if (WARN_ON(seconds < rtc->epoch_start)) {
+ dev_err(dev, "requested time unsupported\n");
+ return -EINVAL;
+ }
+
+ seconds -= rtc->epoch_start;
+
+ ticks = (unsigned long long)seconds << 10;
+ buff[0] = (ticks >> 32) & 0xff;
+ buff[1] = (ticks >> 24) & 0xff;
+ buff[2] = (ticks >> 16) & 0xff;
+ buff[3] = (ticks >> 8) & 0xff;
+ buff[4] = ticks & 0xff;
+
+ err = tps6586x_clr_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (err < 0) {
+ dev_err(dev, "failed to clear RTC_ENABLE\n");
+ return err;
+ }
+
+ err = tps6586x_writes(tps_dev, RTC_COUNT4, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev, "failed to program new time\n");
+ return err;
+ }
+
+ err = tps6586x_set_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (err < 0) {
+ dev_err(dev, "failed to set RTC_ENABLE\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long seconds;
+ unsigned long ticks;
+ unsigned long rtc_current_time;
+ unsigned long long rticks = 0;
+ u8 buff[3];
+ u8 rbuff[6];
+ int err;
+ int i;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ rtc_tm_to_time(&alrm->time, &seconds);
+
+ if (WARN_ON(alrm->enabled && (seconds < rtc->epoch_start))) {
+ dev_err(dev, "can't set alarm to requested time\n");
+ return -EINVAL;
+ }
+
+ if (alrm->enabled && !rtc->irq_en) {
+ enable_irq(rtc->irq);
+ rtc->irq_en = true;
+ } else if (!alrm->enabled && rtc->irq_en) {
+ disable_irq(rtc->irq);
+ rtc->irq_en = false;
+ }
+
+ seconds -= rtc->epoch_start;
+
+ err = tps6586x_reads(tps_dev, RTC_COUNT4_DUMMYREAD, sizeof(rbuff), rbuff);
+ if (err < 0) {
+ dev_err(dev, "failed to read counter\n");
+ return err;
+ }
+
+ for (i = 1; i < sizeof(rbuff); i++) {
+ rticks <<= 8;
+ rticks |= rbuff[i];
+ }
+
+ rtc_current_time = rticks >> 10;
+ if ((seconds - rtc_current_time) > ALM1_VALID_RANGE_IN_SEC)
+ seconds = rtc_current_time - 1;
+
+ ticks = (unsigned long long)seconds << 10;
+
+ buff[0] = (ticks >> 16) & 0xff;
+ buff[1] = (ticks >> 8) & 0xff;
+ buff[2] = ticks & 0xff;
+
+ err = tps6586x_writes(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
+ if (err)
+ dev_err(tps_dev, "unable to program alarm\n");
+
+ return err;
+}
+
+static int tps6586x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long ticks;
+ unsigned long seconds;
+ u8 buff[3];
+ int err;
+
+ err = tps6586x_reads(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
+ if (err)
+ return err;
+
+ ticks = (buff[0] << 16) | (buff[1] << 8) | buff[2];
+ seconds = ticks >> 10;
+ seconds += rtc->epoch_start;
+
+ rtc_time_to_tm(seconds, &alrm->time);
+
+ return 0;
+}
+
+static int tps6586x_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ u8 buff;
+ int err;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ err = tps6586x_read(tps_dev, RTC_CTRL, &buff);
+ if (err < 0) {
+ dev_err(dev, "failed to read RTC_CTRL\n");
+ return err;
+ }
+
+ if ((enabled && (buff & RTC_ENABLE)) ||
+ (!enabled && !(buff & RTC_ENABLE)))
+ return 0;
+
+ if (enabled) {
+ err = tps6586x_set_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (err < 0) {
+ dev_err(dev, "failed to set RTC_ENABLE\n");
+ return err;
+ }
+
+ if (!rtc->irq_en) {
+ enable_irq(rtc->irq);
+ rtc->irq_en = true;
+ }
+ } else {
+ err = tps6586x_clr_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (err < 0) {
+ dev_err(dev, "failed to clear RTC_ENABLE\n");
+ return err;
+ }
+
+ if (rtc->irq_en) {
+ disable_irq(rtc->irq);
+ rtc->irq_en = false;
+ }
+ }
+
+ return 0;
+}
+
+static const struct rtc_class_ops tps6586x_rtc_ops = {
+ .read_time = tps6586x_rtc_read_time,
+ .set_time = tps6586x_rtc_set_time,
+ .set_alarm = tps6586x_rtc_set_alarm,
+ .read_alarm = tps6586x_rtc_read_alarm,
+ .alarm_irq_enable = tps6586x_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t tps6586x_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps6586x_rtc_probe(struct platform_device *pdev)
+{
+ struct tps6586x_rtc_platform_data *pdata = pdev->dev.platform_data;
+ struct device *tps_dev = to_tps6586x_dev(&pdev->dev);
+ struct tps6586x_rtc *rtc;
+ int err;
+ struct tps6586x_epoch_start *epoch;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform_data specified\n");
+ return -EINVAL;
+ }
+
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = -1;
+
+ if (pdata->irq < 0)
+ dev_warn(&pdev->dev, "no IRQ specified, wakeup is disabled\n");
+
+ epoch = &pdata->start;
+ rtc->epoch_start = mktime(epoch->year, epoch->month, epoch->day,
+ epoch->hour, epoch->min, epoch->sec);
+
+ dev_set_drvdata(&pdev->dev, rtc);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ rtc->rtc = rtc_device_register("tps6586x-rtc", &pdev->dev,
+ &tps6586x_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto fail;
+ }
+
+ /* 1 kHz tick mode, enable tick counting */
+ err = tps6586x_update(tps_dev, RTC_CTRL,
+ RTC_ENABLE | OSC_SRC_SEL | ((pdata->cl_sel << CL_SEL_POS) &
+ CL_SEL_MASK),
+ RTC_ENABLE | OSC_SRC_SEL | PRE_BYPASS | CL_SEL_MASK);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to start counter\n");
+ goto fail;
+ }
+
+ if (pdata && (pdata->irq >= 0)) {
+ rtc->irq = pdata->irq;
+ err = request_threaded_irq(pdata->irq, NULL, tps6586x_rtc_irq,
+ IRQF_ONESHOT, "tps6586x-rtc",
+ &pdev->dev);
+ if (err) {
+ dev_warn(&pdev->dev, "unable to request IRQ(%d)\n", rtc->irq);
+ rtc->irq = -1;
+ } else {
+ enable_irq_wake(rtc->irq);
+ disable_irq(rtc->irq);
+ }
+ }
+
+ return 0;
+
+fail:
+ if (!IS_ERR_OR_NULL(rtc->rtc))
+ rtc_device_unregister(rtc->rtc);
+ device_init_wakeup(&pdev->dev, 0);
+ kfree(rtc);
+ return err;
+}
+
+static int __devexit tps6586x_rtc_remove(struct platform_device *pdev)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(&pdev->dev);
+
+ if (rtc->irq != -1)
+ free_irq(rtc->irq, rtc);
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return 0;
+}
+
+static struct platform_driver tps6586x_rtc_driver = {
+ .driver = {
+ .name = "tps6586x-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6586x_rtc_probe,
+ .remove = __devexit_p(tps6586x_rtc_remove),
+};
+
+static int __init tps6586x_rtc_init(void)
+{
+ return platform_driver_register(&tps6586x_rtc_driver);
+}
+module_init(tps6586x_rtc_init);
+
+static void __exit tps6586x_rtc_exit(void)
+{
+ platform_driver_unregister(&tps6586x_rtc_driver);
+}
+module_exit(tps6586x_rtc_exit);
+
+MODULE_DESCRIPTION("TI TPS6586x RTC driver");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rtc-tps6586x");
diff --git a/drivers/rtc/rtc-tps6591x.c b/drivers/rtc/rtc-tps6591x.c
new file mode 100644
index 000000000000..878c2046aac7
--- /dev/null
+++ b/drivers/rtc/rtc-tps6591x.c
@@ -0,0 +1,546 @@
+/*
+ * drivers/rtc/rtc_tps6591x.c
+ *
+ * RTC driver for TI TPS6591x
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps6591x.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define RTC_CTRL 0x10
+#define RTC_STATUS 0x11
+#define RTC_SECONDS_REG 0x0
+#define RTC_ALARM 0x8
+#define RTC_INT 0x12
+#define RTC_RESET_STATUS 0x16
+#define RTC_BBCH_REG 0x39
+
+#define RTC_BBCH_SEL 0x02
+#define RTC_BBCH_EN 0x01
+#define ENABLE_ALARM_INT 0x8
+#define RTC_RESET_VALUE 0x80
+#define ALARM_INT_STATUS 0x40
+
+/*
+Linux RTC driver refers 1900 as base year in many calculations.
+(e.g. refer drivers/rtc/rtc-lib.c)
+*/
+#define OS_REF_YEAR 1900
+
+/*
+ PMU RTC have only 2 nibbles to store year information, so using an offset
+ of 100 to set the base year as 2000 for our driver.
+*/
+#define RTC_YEAR_OFFSET 100
+
+struct tps6591x_rtc {
+ unsigned long epoch_start;
+ int irq;
+ struct rtc_device *rtc;
+ bool irq_en;
+};
+
+static int tps6591x_read_regs(struct device *dev, int reg, int len,
+ uint8_t *val)
+{
+ int ret;
+
+ /* dummy read of STATUS_REG as per data sheet */
+ ret = tps6591x_reads(dev->parent, RTC_STATUS, 1, val);
+ if (ret < 0) {
+ dev_err(dev->parent, "\n %s failed reading from RTC_STATUS\n",
+ __func__);
+ WARN_ON(1);
+ return ret;
+ }
+
+ ret = tps6591x_reads(dev->parent, reg, len, val);
+ if (ret < 0) {
+ dev_err(dev->parent, "\n %s failed reading from 0x%02x\n",
+ __func__, reg);
+ WARN_ON(1);
+ return ret;
+ }
+ return 0;
+}
+
+static int tps6591x_write_regs(struct device *dev, int reg, int len,
+ uint8_t *val)
+{
+ int ret;
+ ret = tps6591x_writes(dev->parent, reg, len, val);
+ if (ret < 0) {
+ dev_err(dev->parent, "\n %s failed writing\n", __func__);
+ WARN_ON(1);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps6591x_rtc_valid_tm(struct rtc_time *tm)
+{
+ if (tm->tm_year >= (RTC_YEAR_OFFSET + 99)
+ || tm->tm_mon >= 12
+ || tm->tm_mday < 1
+ || tm->tm_mday > rtc_month_days(tm->tm_mon, tm->tm_year + OS_REF_YEAR)
+ || tm->tm_hour >= 24
+ || tm->tm_min >= 60
+ || tm->tm_sec >= 60)
+ return -EINVAL;
+ return 0;
+}
+
+static u8 dec2bcd(u8 dec)
+{
+ return ((dec/10)<<4)+(dec%10);
+}
+
+static u8 bcd2dec(u8 bcd)
+{
+ return (bcd >> 4)*10+(bcd & 0xF);
+}
+
+static void convert_bcd_to_decimal(u8 *buf, u8 len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ buf[i] = bcd2dec(buf[i]);
+}
+
+static void convert_decimal_to_bcd(u8 *buf, u8 len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ buf[i] = dec2bcd(buf[i]);
+}
+
+static void print_time(struct device *dev, struct rtc_time *tm)
+{
+ dev_info(dev, "RTC Time : %d/%d/%d %d:%d:%d\n",
+ (tm->tm_mon + 1), tm->tm_mday, (tm->tm_year + OS_REF_YEAR),
+ tm->tm_hour, tm->tm_min , tm->tm_sec);
+}
+
+static int tps6591x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[7];
+ int err;
+ err = tps6591x_read_regs(dev, RTC_SECONDS_REG, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev, "\n %s :: failed to read time\n", __FILE__);
+ return err;
+ }
+ convert_bcd_to_decimal(buff, sizeof(buff));
+ tm->tm_sec = buff[0];
+ tm->tm_min = buff[1];
+ tm->tm_hour = buff[2];
+ tm->tm_mday = buff[3];
+ tm->tm_mon = buff[4];
+ tm->tm_year = buff[5];
+ tm->tm_wday = buff[6];
+ print_time(dev, tm);
+ return tps6591x_rtc_valid_tm(tm);
+}
+
+static int tps6591x_rtc_stop(struct device *dev)
+{
+ u8 reg = 0;
+ u8 retries = 0;
+ int err;
+ do {
+ err = tps6591x_read_regs(dev, RTC_CTRL, 1, &reg);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to read RTC_CTRL reg\n");
+ return err;
+ }
+
+ /* clear STOP bit alone */
+ reg &= ~0x1;
+
+ err = tps6591x_write_regs(dev, RTC_CTRL, 1, &reg);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to program RTC_CTRL reg\n");
+ return err;
+ }
+
+ err = tps6591x_read_regs(dev, RTC_STATUS, 1, &reg);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to read RTC_CTRL reg\n");
+ return err;
+ }
+ /* FixMe: Is allowing up to 5 retries sufficient?? */
+ if (retries++ == 5) {
+ dev_err(dev->parent, "\n failed to stop RTC\n");
+ return -EBUSY;
+ }
+ } while (reg & 2);
+ return 0;
+}
+
+static int tps6591x_rtc_start(struct device *dev)
+{
+ u8 reg = 0;
+ u8 retries = 0;
+ int err;
+
+ do {
+ err = tps6591x_read_regs(dev, RTC_CTRL, 1, &reg);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to read RTC_CTRL reg\n");
+ return err;
+ }
+
+ /* set STOP bit alone */
+ reg |= 0x1;
+
+ err = tps6591x_write_regs(dev, RTC_CTRL, 1, &reg);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to program RTC_CTRL reg\n");
+ return err;
+ }
+
+ err = tps6591x_read_regs(dev, RTC_STATUS, 1, &reg);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to read RTC_CTRL reg\n");
+ return err;
+ }
+ /* FixMe: Is allowing up to 5 retries sufficient?? */
+ if (retries++ == 5) {
+ dev_err(dev->parent, "\n failed to stop RTC\n");
+ return -EBUSY;
+ }
+ } while (!(reg & 2));
+ return 0;
+}
+
+
+static int tps6591x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[7];
+ int err;
+
+ buff[0] = tm->tm_sec;
+ buff[1] = tm->tm_min;
+ buff[2] = tm->tm_hour;
+ buff[3] = tm->tm_mday;
+ buff[4] = tm->tm_mon;
+ buff[5] = tm->tm_year;
+ buff[6] = tm->tm_wday;
+
+ print_time(dev, tm);
+ convert_decimal_to_bcd(buff, sizeof(buff));
+ err = tps6591x_rtc_stop(dev);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to clear RTC_ENABLE\n");
+ return err;
+ }
+
+ err = tps6591x_write_regs(dev, RTC_SECONDS_REG, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to program new time\n");
+ return err;
+ }
+
+ err = tps6591x_rtc_start(dev);
+ if (err < 0) {
+ dev_err(dev->parent, "\n failed to set RTC_ENABLE\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int tps6591x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6591x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long seconds;
+ u8 buff[6];
+ int err;
+ struct rtc_time tm;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ dev_info(dev->parent, "\n setting alarm to requested time::\n");
+ print_time(dev->parent, &alrm->time);
+ rtc_tm_to_time(&alrm->time, &seconds);
+ tps6591x_rtc_read_time(dev, &tm);
+ rtc_tm_to_time(&tm, &rtc->epoch_start);
+
+ if (WARN_ON(alrm->enabled && (seconds < rtc->epoch_start))) {
+ dev_err(dev->parent, "\n can't set alarm to requested time\n");
+ return -EINVAL;
+ }
+
+ if (alrm->enabled && !rtc->irq_en) {
+ rtc->irq_en = true;
+ } else if (!alrm->enabled && rtc->irq_en) {
+ rtc->irq_en = false;
+ }
+
+ buff[0] = alrm->time.tm_sec;
+ buff[1] = alrm->time.tm_min;
+ buff[2] = alrm->time.tm_hour;
+ buff[3] = alrm->time.tm_mday;
+ buff[4] = alrm->time.tm_mon;
+ buff[5] = alrm->time.tm_year;
+ convert_decimal_to_bcd(buff, sizeof(buff));
+ err = tps6591x_write_regs(dev, RTC_ALARM, sizeof(buff), buff);
+ if (err)
+ dev_err(dev->parent, "\n unable to program alarm\n");
+
+ return err;
+}
+
+static int tps6591x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ u8 buff[6];
+ int err;
+
+ err = tps6591x_read_regs(dev, RTC_ALARM, sizeof(buff), buff);
+ if (err)
+ return err;
+ convert_bcd_to_decimal(buff, sizeof(buff));
+
+ alrm->time.tm_sec = buff[0];
+ alrm->time.tm_min = buff[1];
+ alrm->time.tm_hour = buff[2];
+ alrm->time.tm_mday = buff[3];
+ alrm->time.tm_mon = buff[4];
+ alrm->time.tm_year = buff[5];
+
+ dev_info(dev->parent, "\n getting alarm time::\n");
+ print_time(dev, &alrm->time);
+
+ return 0;
+}
+
+static int tps6591x_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enable)
+{
+ struct tps6591x_rtc *rtc = dev_get_drvdata(dev);
+ u8 reg;
+ int err;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ if (enable) {
+ if (rtc->irq_en == true)
+ return 0;
+ err = tps6591x_read_regs(dev, RTC_INT, 1, &reg);
+ if (err)
+ return err;
+ reg |= 0x8;
+ err = tps6591x_write_regs(dev, RTC_INT, 1, &reg);
+ if (err)
+ return err;
+ rtc->irq_en = true;
+ } else {
+ if (rtc->irq_en == false)
+ return 0;
+ err = tps6591x_read_regs(dev, RTC_INT, 1, &reg);
+ if (err)
+ return err;
+ reg &= ~0x8;
+ err = tps6591x_write_regs(dev, RTC_INT, 1, &reg);
+ if (err)
+ return err;
+ rtc->irq_en = false;
+ }
+ return 0;
+}
+
+static const struct rtc_class_ops tps6591x_rtc_ops = {
+ .read_time = tps6591x_rtc_read_time,
+ .set_time = tps6591x_rtc_set_time,
+ .set_alarm = tps6591x_rtc_set_alarm,
+ .read_alarm = tps6591x_rtc_read_alarm,
+ .alarm_irq_enable = tps6591x_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t tps6591x_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tps6591x_rtc *rtc = dev_get_drvdata(dev);
+ u8 reg;
+ int err;
+
+ /* clear Alarm status bits.*/
+ err = tps6591x_read_regs(dev, RTC_STATUS, 1, &reg);
+ if (err) {
+ dev_err(dev->parent, "unable to read RTC_STATUS reg\n");
+ return -EBUSY;
+ }
+
+ reg = ALARM_INT_STATUS;
+ err = tps6591x_write_regs(dev, RTC_STATUS, 1, &reg);
+ if (err) {
+ dev_err(dev->parent, "unable to program RTC_STATUS reg\n");
+ return -EBUSY;
+ }
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps6591x_rtc_probe(struct platform_device *pdev)
+{
+ struct tps6591x_rtc_platform_data *pdata = pdev->dev.platform_data;
+ struct tps6591x_rtc *rtc;
+ struct rtc_time tm;
+ int err;
+ u8 reg;
+
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = -1;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform_data specified\n");
+ return -EINVAL;
+ }
+
+ if (pdata->irq < 0)
+ dev_err(&pdev->dev, "\n no IRQ specified, wakeup is disabled\n");
+
+ dev_set_drvdata(&pdev->dev, rtc);
+ device_init_wakeup(&pdev->dev, 1);
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &tps6591x_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto fail;
+ }
+
+ if ((int)pdev && (int)&pdev->dev)
+ err = tps6591x_read_regs(&pdev->dev, RTC_STATUS, 1, &reg);
+ else {
+ dev_err(&pdev->dev, "\n %s Input params incorrect\n", __func__);
+ return -EBUSY;
+ }
+ if (err) {
+ dev_err(&pdev->dev, "\n %s unable to read status\n", __func__);
+ return -EBUSY;
+ }
+
+ reg = RTC_BBCH_SEL | RTC_BBCH_EN;
+ tps6591x_write_regs(&pdev->dev, RTC_BBCH_REG, 1, &reg);
+ if (err) {
+ dev_err(&pdev->dev, "unable to program Charger reg\n");
+ return -EBUSY;
+ }
+
+ tps6591x_rtc_read_time(&pdev->dev, &tm);
+ if ((tm.tm_year < RTC_YEAR_OFFSET || tm.tm_year > (RTC_YEAR_OFFSET + 99))){
+ if (pdata->time.tm_year < 2000 || pdata->time.tm_year > 2100) {
+ memset(&pdata->time, 0, sizeof(pdata->time));
+ pdata->time.tm_year = RTC_YEAR_OFFSET;
+ pdata->time.tm_mday = 1;
+ } else
+ pdata->time.tm_year -= OS_REF_YEAR;
+ tps6591x_rtc_set_time(&pdev->dev, &pdata->time);
+ }
+
+ reg = ALARM_INT_STATUS;
+ err = tps6591x_write_regs(&pdev->dev, RTC_STATUS, 1, &reg);
+ if (err) {
+ dev_err(&pdev->dev, "unable to program RTC_STATUS reg\n");
+ return -EBUSY;
+ }
+
+ reg = ENABLE_ALARM_INT;
+ tps6591x_write_regs(&pdev->dev, RTC_INT, 1, &reg);
+ if (err) {
+ dev_err(&pdev->dev, "unable to program Interrupt Mask reg\n");
+ return -EBUSY;
+ }
+
+ if (pdata && (pdata->irq >= 0)) {
+ rtc->irq = pdata->irq;
+ err = request_threaded_irq(pdata->irq, NULL, tps6591x_rtc_irq,
+ IRQF_ONESHOT, "rtc_tps6591x",
+ &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "request IRQ:%d fail\n", rtc->irq);
+ rtc->irq = -1;
+ } else {
+ device_init_wakeup(&pdev->dev, 1);
+ enable_irq_wake(rtc->irq);
+ }
+ }
+ return 0;
+
+fail:
+ if (!IS_ERR_OR_NULL(rtc->rtc))
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return err;
+}
+
+static int __devexit tps6591x_rtc_remove(struct platform_device *pdev)
+{
+ struct tps6591x_rtc *rtc = dev_get_drvdata(&pdev->dev);
+
+ if (rtc->irq != -1)
+ free_irq(rtc->irq, rtc);
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return 0;
+}
+
+static struct platform_driver tps6591x_rtc_driver = {
+ .driver = {
+ .name = "rtc_tps6591x",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6591x_rtc_probe,
+ .remove = __devexit_p(tps6591x_rtc_remove),
+};
+
+static int __init tps6591x_rtc_init(void)
+{
+ return platform_driver_register(&tps6591x_rtc_driver);
+}
+module_init(tps6591x_rtc_init);
+
+static void __exit tps6591x_rtc_exit(void)
+{
+ platform_driver_unregister(&tps6591x_rtc_driver);
+}
+module_exit(tps6591x_rtc_exit);
+
+MODULE_DESCRIPTION("TI TPS6591x RTC driver");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rtc_tps6591x");
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c
new file mode 100644
index 000000000000..7d32032266fd
--- /dev/null
+++ b/drivers/rtc/rtc-tps80031.c
@@ -0,0 +1,452 @@
+/*
+ * drivers/rtc/rtc_tps80031.c
+ *
+ * RTC driver for TI TPS80031
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define RTC_CTRL 0x10
+#define RTC_STATUS 0x11
+#define RTC_SECONDS_REG 0x0
+#define RTC_ALARM 0x8
+#define RTC_INT 0x12
+#define RTC_RESET_STATUS 0x16
+
+#define ENABLE_ALARM_INT 0x8
+#define ALARM_INT_STATUS 0x40
+#define STOP_RTC 1
+
+/* Power on reset Values of RTC registers */
+#define RTC_POR_YEAR 0
+#define RTC_POR_MONTH 1
+#define RTC_POR_DAY 1
+
+/*
+Linux RTC driver refers 19th centaury as base year in many
+calculations. (e.g. refer drivers/rtc/rtc-lib.c)
+*/
+#define OS_REF_YEAR 1900
+
+/*
+ PMU RTC have only 2 nibbles to store year information, so using an
+ offset of 100 to set the base year as 2000 for our driver.
+*/
+#define RTC_YEAR_OFFSET 100
+
+struct tps80031_rtc {
+ unsigned long epoch_start;
+ int irq;
+ struct rtc_device *rtc;
+ bool irq_en;
+};
+
+static int tps80031_read_regs(struct device *dev, int reg, int len,
+ uint8_t *val)
+{
+ int ret;
+
+ /* dummy read of STATUS_REG as per data sheet */
+ ret = tps80031_reads(dev->parent, 1, RTC_STATUS, 1, val);
+ if (ret < 0) {
+ dev_err(dev->parent, "failed reading RTC_STATUS\n");
+ WARN_ON(1);
+ return ret;
+ }
+
+ ret = tps80031_reads(dev->parent, 1, reg, len, val);
+ if (ret < 0) {
+ dev_err(dev->parent, "failed reading from reg %d\n", reg);
+ WARN_ON(1);
+ return ret;
+ }
+ return 0;
+}
+
+static int tps80031_write_regs(struct device *dev, int reg, int len,
+ uint8_t *val)
+{
+ int ret;
+ ret = tps80031_writes(dev->parent, 1, reg, len, val);
+ if (ret < 0) {
+ dev_err(dev->parent, "failed writing reg: %d\n", reg);
+ WARN_ON(1);
+ return ret;
+ }
+
+ return 0;
+}
+
+static u8 dec2bcd(u8 dec)
+{
+ return ((dec/10)<<4)+(dec%10);
+}
+
+static u8 bcd2dec(u8 bcd)
+{
+ return (bcd >> 4)*10+(bcd & 0xF);
+}
+
+static void convert_bcd_to_decimal(u8 *buf, u8 len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ buf[i] = bcd2dec(buf[i]);
+}
+
+static void convert_decimal_to_bcd(u8 *buf, u8 len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ buf[i] = dec2bcd(buf[i]);
+}
+
+static int tps80031_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[7];
+ int err;
+ err = tps80031_read_regs(dev, RTC_SECONDS_REG, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev->parent, "failed reading time\n");
+ return err;
+ }
+ convert_bcd_to_decimal(buff, sizeof(buff));
+ tm->tm_sec = buff[0];
+ tm->tm_min = buff[1];
+ tm->tm_hour = buff[2];
+ tm->tm_mday = buff[3];
+ tm->tm_mon = buff[4];
+ tm->tm_year = buff[5] + RTC_YEAR_OFFSET;
+ tm->tm_wday = buff[6];
+ return 0;
+}
+
+static int tps80031_rtc_stop(struct device *dev)
+{
+ int err;
+ err = tps80031_clr_bits(dev->parent, 1, RTC_CTRL, STOP_RTC);
+ if (err < 0)
+ dev_err(dev->parent, "failed to stop RTC. err: %d\n", err);
+ return err;
+}
+
+static int tps80031_rtc_start(struct device *dev)
+{
+ int err;
+ err = tps80031_set_bits(dev->parent, 1, RTC_CTRL, STOP_RTC);
+ if (err < 0)
+ dev_err(dev->parent, "failed to start RTC. err: %d\n", err);
+ return err;
+}
+
+
+static int tps80031_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[7];
+ int err;
+
+ buff[0] = tm->tm_sec;
+ buff[1] = tm->tm_min;
+ buff[2] = tm->tm_hour;
+ buff[3] = tm->tm_mday;
+ buff[4] = tm->tm_mon;
+ buff[5] = tm->tm_year % RTC_YEAR_OFFSET;
+ buff[6] = tm->tm_wday;
+
+ convert_decimal_to_bcd(buff, sizeof(buff));
+ err = tps80031_rtc_stop(dev);
+ if (err < 0)
+ return err;
+
+ err = tps80031_write_regs(dev, RTC_SECONDS_REG, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev->parent, "failed to program new time\n");
+ return err;
+ }
+
+ err = tps80031_rtc_start(dev);
+ return err;
+}
+
+static int tps80031_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long seconds;
+ u8 buff[6];
+ int err;
+ struct rtc_time tm;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ rtc_tm_to_time(&alrm->time, &seconds);
+ tps80031_rtc_read_time(dev, &tm);
+ rtc_tm_to_time(&tm, &rtc->epoch_start);
+
+ if (WARN_ON(alrm->enabled && (seconds < rtc->epoch_start))) {
+ dev_err(dev->parent, "can't set alarm to requested time\n");
+ return -EINVAL;
+ }
+
+ if (alrm->enabled && !rtc->irq_en)
+ rtc->irq_en = true;
+ else if (!alrm->enabled && rtc->irq_en)
+ rtc->irq_en = false;
+
+ buff[0] = alrm->time.tm_sec;
+ buff[1] = alrm->time.tm_min;
+ buff[2] = alrm->time.tm_hour;
+ buff[3] = alrm->time.tm_mday;
+ buff[4] = alrm->time.tm_mon;
+ buff[5] = alrm->time.tm_year % RTC_YEAR_OFFSET;
+ convert_decimal_to_bcd(buff, sizeof(buff));
+ err = tps80031_write_regs(dev, RTC_ALARM, sizeof(buff), buff);
+ if (err)
+ dev_err(dev->parent, "unable to program alarm\n");
+
+ return err;
+}
+
+static int tps80031_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ u8 buff[6];
+ int err;
+
+ err = tps80031_read_regs(dev, RTC_ALARM, sizeof(buff), buff);
+ if (err)
+ return err;
+ convert_bcd_to_decimal(buff, sizeof(buff));
+
+ alrm->time.tm_sec = buff[0];
+ alrm->time.tm_min = buff[1];
+ alrm->time.tm_hour = buff[2];
+ alrm->time.tm_mday = buff[3];
+ alrm->time.tm_mon = buff[4];
+ alrm->time.tm_year = buff[5] + RTC_YEAR_OFFSET;
+
+ return 0;
+}
+
+static int tps80031_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enable)
+{
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+ int err;
+ struct device *p = dev->parent;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ if (enable) {
+ if (rtc->irq_en == true)
+ return 0;
+
+ err = tps80031_set_bits(p, 1, RTC_INT, ENABLE_ALARM_INT);
+ if (err < 0) {
+ dev_err(p, "failed to set ALRM int. err: %d\n", err);
+ return err;
+ }
+ rtc->irq_en = true;
+ } else {
+ if (rtc->irq_en == false)
+ return 0;
+ err = tps80031_clr_bits(p, 1, RTC_INT, ENABLE_ALARM_INT);
+ if (err < 0) {
+ dev_err(p, "failed to clear ALRM int. err: %d\n", err);
+ return err;
+ }
+ rtc->irq_en = false;
+ }
+ return 0;
+}
+
+static const struct rtc_class_ops tps80031_rtc_ops = {
+ .read_time = tps80031_rtc_read_time,
+ .set_time = tps80031_rtc_set_time,
+ .set_alarm = tps80031_rtc_set_alarm,
+ .read_alarm = tps80031_rtc_read_alarm,
+ .alarm_irq_enable = tps80031_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t tps80031_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+ u8 reg;
+ int err;
+
+ /* clear Alarm status bits.*/
+ err = tps80031_read_regs(dev, RTC_STATUS, 1, &reg);
+ if (err) {
+ dev_err(dev->parent, "unable to read RTC_STATUS reg\n");
+ return -EBUSY;
+ }
+
+ err = tps80031_force_update(dev->parent, 1, RTC_STATUS,
+ ALARM_INT_STATUS, ALARM_INT_STATUS);
+ if (err) {
+ dev_err(dev->parent, "unable to set Alarm INT\n");
+ return -EBUSY;
+ }
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps80031_rtc_probe(struct platform_device *pdev)
+{
+ struct tps80031_rtc_platform_data *pdata = pdev->dev.platform_data;
+ struct tps80031_rtc *rtc;
+ struct rtc_time tm;
+ int err;
+ u8 reg;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform_data specified\n");
+ return -EINVAL;
+ }
+
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = -1;
+ if (pdata->irq < 0)
+ dev_err(&pdev->dev, "no IRQ specified, wakeup is disabled\n");
+
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &tps80031_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto fail;
+ }
+
+ if ((int)pdev && (int)&pdev->dev)
+ err = tps80031_read_regs(&pdev->dev, RTC_STATUS, 1, &reg);
+ else {
+ dev_err(&pdev->dev, "%s Input params incorrect\n", __func__);
+ err = -EBUSY;
+ goto fail;
+ }
+
+ if (err) {
+ dev_err(&pdev->dev, "%s unable to read status\n", __func__);
+ err = -EBUSY;
+ goto fail;
+ }
+
+ /* If RTC have POR values, set time using platform data*/
+ tps80031_rtc_read_time(&pdev->dev, &tm);
+ if ((tm.tm_year == RTC_YEAR_OFFSET + RTC_POR_YEAR) &&
+ (tm.tm_mon == RTC_POR_MONTH) &&
+ (tm.tm_mday == RTC_POR_DAY)) {
+ if (pdata->time.tm_year < 2000 ||
+ pdata->time.tm_year > 2100) {
+ dev_err(&pdev->dev, "Invalid platform data\n");
+ memset(&pdata->time, 0, sizeof(pdata->time));
+ pdata->time.tm_year = 2011;
+ pdata->time.tm_mday = 1;
+ }
+ tps80031_rtc_set_time(&pdev->dev, &pdata->time);
+ }
+
+ reg = ALARM_INT_STATUS;
+ err = tps80031_write_regs(&pdev->dev, RTC_STATUS, 1, &reg);
+ if (err) {
+ dev_err(&pdev->dev, "unable to program RTC_STATUS reg\n");
+ return -EBUSY;
+ }
+
+ err = tps80031_set_bits(pdev->dev.parent, 1, RTC_INT, ENABLE_ALARM_INT);
+ if (err) {
+ dev_err(&pdev->dev, "unable to program Interrupt Mask reg\n");
+ err = -EBUSY;
+ goto fail;
+ }
+
+ dev_set_drvdata(&pdev->dev, rtc);
+ if (pdata && (pdata->irq >= 0)) {
+ rtc->irq = pdata->irq;
+ err = request_threaded_irq(pdata->irq, NULL, tps80031_rtc_irq,
+ IRQF_ONESHOT, "rtc_tps80031",
+ &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "request IRQ:%d fail\n", rtc->irq);
+ rtc->irq = -1;
+ } else {
+ device_init_wakeup(&pdev->dev, 1);
+ enable_irq_wake(rtc->irq);
+ }
+ }
+ return 0;
+
+fail:
+ if (!IS_ERR_OR_NULL(rtc->rtc))
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return err;
+}
+
+static int __devexit tps80031_rtc_remove(struct platform_device *pdev)
+{
+ struct tps80031_rtc *rtc = dev_get_drvdata(&pdev->dev);
+
+ if (rtc->irq != -1)
+ free_irq(rtc->irq, rtc);
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return 0;
+}
+
+static struct platform_driver tps80031_rtc_driver = {
+ .driver = {
+ .name = "rtc_tps80031",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_rtc_probe,
+ .remove = __devexit_p(tps80031_rtc_remove),
+};
+
+static int __init tps80031_rtc_init(void)
+{
+ return platform_driver_register(&tps80031_rtc_driver);
+}
+module_init(tps80031_rtc_init);
+
+static void __exit tps80031_rtc_exit(void)
+{
+ platform_driver_unregister(&tps80031_rtc_driver);
+}
+module_exit(tps80031_rtc_exit);
+
+MODULE_DESCRIPTION("TI TPS80031 RTC driver");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rtc_tps80031");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 52e2900d9d8e..8f7ee0e1ee46 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -338,6 +338,16 @@ config SPI_TEGRA
help
SPI driver for NVidia Tegra SoCs
+config SPI_SLAVE_TEGRA
+ tristate "Nvidia Tegra SPI slave controller"
+ depends on ARCH_TEGRA
+ select TEGRA_SYSTEM_DMA
+ default n
+ help
+ SPI Slave driver for NVidia Tegra SoCs.
+ Say yes if slave spi functionality is required from the tegra spi controller.
+ The interface is same as the master spi.
+
config SPI_TI_SSP
tristate "TI Sequencer Serial Port - SPI Support"
depends on MFD_TI_SSP
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 61c3261c388c..430f8200108a 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for kernel SPI drivers.
#
+GCOV_PROFILE := y
ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
@@ -53,6 +54,7 @@ obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
obj-$(CONFIG_SPI_TEGRA) += spi-tegra.o
+obj-$(CONFIG_SPI_SLAVE_TEGRA) += spi_slave_tegra.o
obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c
index a5a6302dc8e0..38cb8d11562d 100644
--- a/drivers/spi/spi-tegra.c
+++ b/drivers/spi/spi-tegra.c
@@ -6,6 +6,8 @@
* Author:
* Erik Gilling <konkers@android.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -17,6 +19,9 @@
*
*/
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -27,10 +32,14 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
#include <linux/spi/spi.h>
+#include <linux/spi-tegra.h>
#include <mach/dma.h>
+#include <mach/clk.h>
#define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -125,259 +134,645 @@
#define SLINK_STATUS2 0x01c
#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
-#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
+#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
#define SLINK_TX_FIFO 0x100
#define SLINK_RX_FIFO 0x180
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SPI_FIFO_DEPTH 32
+#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+
static const unsigned long spi_tegra_req_sels[] = {
TEGRA_DMA_REQ_SEL_SL2B1,
TEGRA_DMA_REQ_SEL_SL2B2,
TEGRA_DMA_REQ_SEL_SL2B3,
TEGRA_DMA_REQ_SEL_SL2B4,
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ TEGRA_DMA_REQ_SEL_SL2B5,
+ TEGRA_DMA_REQ_SEL_SL2B6,
+#endif
+
};
-#define BB_LEN 32
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
+#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
+
+#define SLINK_STATUS2_RESET \
+ (TX_FIFO_EMPTY_COUNT_MAX | \
+ RX_FIFO_FULL_COUNT_ZERO << 16)
+
+#define MAX_CHIP_SELECT 4
+#define SLINK_FIFO_DEPTH 4
struct spi_tegra_data {
struct spi_master *master;
struct platform_device *pdev;
spinlock_t lock;
+ char port_name[32];
struct clk *clk;
void __iomem *base;
- unsigned long phys;
+ phys_addr_t phys;
+ unsigned irq;
u32 cur_speed;
struct list_head queue;
struct spi_transfer *cur;
+ struct spi_device *cur_spi;
unsigned cur_pos;
unsigned cur_len;
- unsigned cur_bytes_per_word;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+
+ unsigned cur_direction;
+
+ bool is_dma_allowed;
- /* The tegra spi controller has a bug which causes the first word
- * in PIO transactions to be garbage. Since packed DMA transactions
- * require transfers to be 4 byte aligned we need a bounce buffer
- * for the generic case.
- */
struct tegra_dma_req rx_dma_req;
struct tegra_dma_channel *rx_dma;
- u32 *rx_bb;
- dma_addr_t rx_bb_phys;
+ u32 *rx_buf;
+ dma_addr_t rx_buf_phys;
+ unsigned cur_rx_pos;
+
+ struct tegra_dma_req tx_dma_req;
+ struct tegra_dma_channel *tx_dma;
+ u32 *tx_buf;
+ dma_addr_t tx_buf_phys;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ bool is_clkon_always;
+ bool clk_state;
+ bool is_suspended;
+
+ bool is_hw_based_cs;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+ bool is_transfer_in_progress;
+
+ u32 rx_complete;
+ u32 tx_complete;
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ unsigned long packed_size;
+
+ u32 command_reg;
+ u32 command2_reg;
+ u32 dma_control_reg;
+ u32 def_command_reg;
+ u32 def_command2_reg;
+
+ struct spi_clk_parent *parent_clk_list;
+ int parent_clk_count;
+ unsigned long max_rate;
+ unsigned long max_parent_rate;
+ int min_div;
+ struct workqueue_struct *spi_workqueue;
+ struct work_struct spi_transfer_work;
};
-
static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
- unsigned long reg)
+ unsigned long reg)
{
+ if (!tspi->clk_state)
+ BUG();
return readl(tspi->base + reg);
}
static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
- unsigned long val,
- unsigned long reg)
+ unsigned long val, unsigned long reg)
{
+ if (!tspi->clk_state)
+ BUG();
writel(val, tspi->base + reg);
}
-static void spi_tegra_go(struct spi_tegra_data *tspi)
+static void spi_tegra_clear_status(struct spi_tegra_data *tspi)
{
unsigned long val;
+ unsigned long val_write = 0;
- wmb();
-
- val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
- val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN;
- val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1);
- spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
-
- tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
+ val = spi_tegra_readl(tspi, SLINK_STATUS);
- val |= SLINK_DMA_EN;
- spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ val_write = SLINK_RDY;
+ if (val & SLINK_TX_OVF)
+ val_write |= SLINK_TX_OVF;
+ if (val & SLINK_RX_OVF)
+ val_write |= SLINK_RX_OVF;
+ if (val & SLINK_RX_UNF)
+ val_write |= SLINK_RX_UNF;
+ if (val & SLINK_TX_UNF)
+ val_write |= SLINK_TX_UNF;
+
+ spi_tegra_writel(tspi, val_write, SLINK_STATUS);
}
-static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi,
+static unsigned long spi_tegra_get_packed_size(struct spi_tegra_data *tspi,
struct spi_transfer *t)
{
- unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
- tspi->cur_bytes_per_word);
- u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos;
- int i, j;
unsigned long val;
- val = spi_tegra_readl(tspi, SLINK_COMMAND);
- val &= ~SLINK_WORD_SIZE(~0);
- val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1);
- spi_tegra_writel(tspi, val, SLINK_COMMAND);
-
- for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
+ switch (tspi->bytes_per_word) {
+ case 0:
+ val = SLINK_PACK_SIZE_4;
+ break;
+ case 1:
+ val = SLINK_PACK_SIZE_8;
+ break;
+ case 2:
+ val = SLINK_PACK_SIZE_16;
+ break;
+ case 4:
+ val = SLINK_PACK_SIZE_32;
+ break;
+ default:
val = 0;
- for (j = 0; j < tspi->cur_bytes_per_word; j++)
- val |= tx_buf[i + j] << j * 8;
+ }
+ return val;
+}
- spi_tegra_writel(tspi, val, SLINK_TX_FIFO);
+static unsigned spi_tegra_calculate_curr_xfer_param(
+ struct spi_device *spi, struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word ;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+ tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
}
+ tspi->packed_size = spi_tegra_get_packed_size(tspi, t);
- tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4;
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = remain_len/4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = remain_len/tspi->bytes_per_word;
+ }
+ return total_fifo_words;
+}
- return len;
+static unsigned spi_tegra_fill_tx_fifo_from_client_txbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ unsigned long fifo_status;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int written_words;
+
+ fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
+ tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ nbytes = tspi->curr_dma_words * tspi->bytes_per_word;
+ max_n_32bit = (min(nbytes, tx_empty_count*4) - 1)/4 + 1;
+ for (count = 0; count < max_n_32bit; ++count) {
+ x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (*tx_buf++) << (i*8);
+ spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ written_words = min(max_n_32bit * tspi->words_per_32bit,
+ tspi->curr_dma_words);
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ nbytes = max_n_32bit * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; ++count) {
+ x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ ++i, nbytes--)
+ x |= ((*tx_buf++) << i*8);
+ spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ written_words = max_n_32bit;
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
}
-static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi,
- struct spi_transfer *t)
+static unsigned int spi_tegra_read_rx_fifo_to_client_rxbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
{
- unsigned len = tspi->cur_len;
- u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos;
- int i, j;
- unsigned long val;
+ unsigned rx_full_count;
+ unsigned long fifo_status;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int read_words = 0;
+ unsigned len;
+
+ fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
+ rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
+ dev_dbg(&tspi->pdev->dev, "Rx fifo count %d\n", rx_full_count);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; ++count) {
+ x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; len && (i < 4); ++i, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ unsigned int rx_mask, bits_per_word;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ tspi->cur_spi->bits_per_word;
+ rx_mask = (1 << bits_per_word) -1;
+ for (count = 0; count < rx_full_count; ++count) {
+ x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
+ x &= rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); ++i)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
- for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
- val = tspi->rx_bb[i / tspi->cur_bytes_per_word];
- for (j = 0; j < tspi->cur_bytes_per_word; j++)
- rx_buf[i + j] = (val >> (j * 8)) & 0xff;
+static void spi_tegra_copy_client_txbuf_to_spi_txbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int x;
+
+ for (count = 0; count < tspi->curr_dma_words; ++count) {
+ x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ ++i, consume--)
+ x |= ((*tx_buf++) << i*8);
+ tspi->tx_buf[count] = x;
+ }
}
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+}
- return len;
+static void spi_tegra_copy_spi_rxbuf_to_client_rxbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ unsigned int x;
+ unsigned int rx_mask, bits_per_word;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ tspi->cur_spi->bits_per_word;
+ rx_mask = (1 << bits_per_word) -1;
+ for (count = 0; count < tspi->curr_dma_words; ++count) {
+ x = tspi->rx_buf[count];
+ x &= rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); ++i)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
}
-static void spi_tegra_start_transfer(struct spi_device *spi,
- struct spi_transfer *t)
+static int spi_tegra_start_dma_based_transfer(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
{
- struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
- u32 speed;
- u8 bits_per_word;
unsigned long val;
+ unsigned long test_val;
+ unsigned int len;
+ int ret = 0;
+
+ INIT_COMPLETION(tspi->rx_dma_complete);
+ INIT_COMPLETION(tspi->tx_dma_complete);
+
+ val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
+ val |= tspi->packed_size;
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
- speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
- bits_per_word = t->bits_per_word ? t->bits_per_word :
- spi->bits_per_word;
+ if (len & 0xF)
+ val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
+ else
+ val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
- tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
- if (speed != tspi->cur_speed)
- clk_set_rate(tspi->clk, speed);
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
- if (tspi->cur_speed == 0)
- clk_enable(tspi->clk);
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ spi_tegra_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ wmb();
+ tspi->tx_dma_req.size = len;
+ ret = tegra_dma_enqueue_req(tspi->tx_dma, &tspi->tx_dma_req);
+ if (ret < 0) {
+ dev_err(&tspi->pdev->dev, "Error in starting tx dma "
+ " error = %d\n", ret);
+ return ret;
+ }
- tspi->cur_speed = speed;
-
- val = spi_tegra_readl(tspi, SLINK_COMMAND2);
- val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN;
- if (t->rx_buf)
- val |= SLINK_RXEN;
- if (t->tx_buf)
- val |= SLINK_TXEN;
- val |= SLINK_SS_EN_CS(spi->chip_select);
- val |= SLINK_SPIE;
- spi_tegra_writel(tspi, val, SLINK_COMMAND2);
-
- val = spi_tegra_readl(tspi, SLINK_COMMAND);
- val &= ~SLINK_BIT_LENGTH(~0);
- val |= SLINK_BIT_LENGTH(bits_per_word - 1);
-
- /* FIXME: should probably control CS manually so that we can be sure
- * it does not go low between transfer and to support delay_usecs
- * correctly.
- */
- val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW;
-
- if (spi->mode & SPI_CPHA)
- val |= SLINK_CK_SDA;
-
- if (spi->mode & SPI_CPOL)
- val |= SLINK_IDLE_SCLK_DRIVE_HIGH;
- else
- val |= SLINK_IDLE_SCLK_DRIVE_LOW;
+ /* Wait for tx fifo to be fill before starting slink */
+ test_val = spi_tegra_readl(tspi, SLINK_STATUS);
+ while (!(test_val & SLINK_TX_FULL))
+ test_val = spi_tegra_readl(tspi, SLINK_STATUS);
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ tspi->rx_dma_req.size = len;
+ ret = tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
+ if (ret < 0) {
+ dev_err(&tspi->pdev->dev, "Error in starting rx dma "
+ " error = %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tegra_dma_dequeue_req(tspi->tx_dma,
+ &tspi->tx_dma_req);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ return ret;
+}
- val |= SLINK_M_S;
+static int spi_tegra_start_cpu_based_transfer(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned curr_words;
- spi_tegra_writel(tspi, val, SLINK_COMMAND);
+ val = tspi->packed_size;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
- spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
- tspi->cur = t;
- tspi->cur_pos = 0;
- tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t);
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
- spi_tegra_go(tspi);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ curr_words = spi_tegra_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ curr_words = tspi->curr_dma_words;
+ val |= SLINK_DMA_BLOCK_SIZE(curr_words - 1);
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ return 0;
}
-static void spi_tegra_start_message(struct spi_device *spi,
- struct spi_message *m)
+static void set_best_clk_source(struct spi_tegra_data *tspi,
+ unsigned long speed)
{
- struct spi_transfer *t;
+ long new_rate;
+ unsigned long err_rate;
+ int rate = speed * 4;
+ unsigned int fin_err = speed * 4;
+ int final_index = -1;
+ int count;
+ int ret;
+ struct clk *pclk;
+ unsigned long prate, crate, nrate;
+ unsigned long cdiv;
+
+ if (!tspi->parent_clk_count || !tspi->parent_clk_list)
+ return;
+
+ /* make sure divisor is more than min_div */
+ pclk = clk_get_parent(tspi->clk);
+ prate = clk_get_rate(pclk);
+ crate = clk_get_rate(tspi->clk);
+ cdiv = DIV_ROUND_UP(prate, crate);
+ if (cdiv < tspi->min_div) {
+ nrate = DIV_ROUND_UP(prate, tspi->min_div);
+ clk_set_rate(tspi->clk, nrate);
+ }
- m->actual_length = 0;
- m->status = 0;
+ for (count = 0; count < tspi->parent_clk_count; ++count) {
+ if (!tspi->parent_clk_list[count].parent_clk)
+ continue;
+ ret = clk_set_parent(tspi->clk,
+ tspi->parent_clk_list[count].parent_clk);
+ if (ret < 0) {
+ dev_warn(&tspi->pdev->dev, "Error in setting parent "
+ " clk src %s\n",
+ tspi->parent_clk_list[count].name);
+ continue;
+ }
- t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
- spi_tegra_start_transfer(spi, t);
-}
+ new_rate = clk_round_rate(tspi->clk, rate);
+ if (new_rate < 0)
+ continue;
-static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
-{
- struct spi_tegra_data *tspi = req->dev;
- unsigned long flags;
- struct spi_message *m;
- struct spi_device *spi;
- int timeout = 0;
- unsigned long val;
+ err_rate = abs(new_rate - rate);
+ if (err_rate < fin_err) {
+ final_index = count;
+ fin_err = err_rate;
+ }
+ }
- /* the SPI controller may come back with both the BSY and RDY bits
- * set. In this case we need to wait for the BSY bit to clear so
- * that we are sure the DMA is finished. 1000 reads was empirically
- * determined to be long enough.
- */
- while (timeout++ < 1000) {
- if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY))
- break;
+ if (final_index >= 0) {
+ dev_info(&tspi->pdev->dev, "Setting clk_src %s\n",
+ tspi->parent_clk_list[final_index].name);
+ clk_set_parent(tspi->clk,
+ tspi->parent_clk_list[final_index].parent_clk);
}
+}
- spin_lock_irqsave(&tspi->lock, flags);
+static void spi_tegra_start_transfer(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned total_fifo_words;
+ int ret;
+ struct tegra_spi_device_controller_data *cdata = spi->controller_data;
+ unsigned long command;
+ unsigned long command2;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long status2;
+#endif
+ int cs_setup_count;
+ int cs_hold_count;
+
+ unsigned int cs_pol_bit[] = {
+ SLINK_CS_POLARITY,
+ SLINK_CS_POLARITY1,
+ SLINK_CS_POLARITY2,
+ SLINK_CS_POLARITY3,
+ };
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
- val = spi_tegra_readl(tspi, SLINK_STATUS);
- val |= SLINK_RDY;
- spi_tegra_writel(tspi, val, SLINK_STATUS);
+ speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ if (speed != tspi->cur_speed) {
+ set_best_clk_source(tspi, speed);
+ clk_set_rate(tspi->clk, speed * 4);
+ tspi->cur_speed = speed;
+ }
- m = list_first_entry(&tspi->queue, struct spi_message, queue);
+ tspi->cur = t;
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->rx_complete = 0;
+ tspi->tx_complete = 0;
+ total_fifo_words = spi_tegra_calculate_curr_xfer_param(spi, tspi, t);
+
+ command2 = tspi->def_command2_reg;
+ if (is_first_of_msg) {
+ if (!tspi->is_clkon_always) {
+ if (!tspi->clk_state) {
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
+ }
+ }
- if (timeout >= 1000)
- m->status = -EIO;
+ spi_tegra_clear_status(tspi);
+
+ command = tspi->def_command_reg;
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ /* possibly use the hw based chip select */
+ tspi->is_hw_based_cs = false;
+ if (cdata && cdata->is_hw_based_cs && is_single_xfer) {
+ if ((tspi->curr_dma_words * tspi->bytes_per_word) ==
+ (t->len - tspi->cur_pos)) {
+ cs_setup_count = cdata->cs_setup_clk_count >> 1;
+ if (cs_setup_count > 3)
+ cs_setup_count = 3;
+ cs_hold_count = cdata->cs_hold_clk_count;
+ if (cs_hold_count > 0xF)
+ cs_hold_count = 0xF;
+ tspi->is_hw_based_cs = true;
+
+ command &= ~SLINK_CS_SW;
+ command2 &= ~SLINK_SS_SETUP(3);
+ command2 |= SLINK_SS_SETUP(cs_setup_count);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ status2 = spi_tegra_readl(tspi, SLINK_STATUS2);
+ status2 &= ~SLINK_SS_HOLD_TIME(0xF);
+ status2 |= SLINK_SS_HOLD_TIME(cs_hold_count);
+ spi_tegra_writel(tspi, status2, SLINK_STATUS2);
+#endif
+ }
+ }
+ if (!tspi->is_hw_based_cs) {
+ command |= SLINK_CS_SW;
+ command ^= cs_pol_bit[spi->chip_select];
+ }
- spi = m->state;
+ command &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA;
+ if (spi->mode & SPI_CPHA)
+ command |= SLINK_CK_SDA;
- tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur);
- m->actual_length += tspi->cur_pos;
-
- if (tspi->cur_pos < tspi->cur->len) {
- tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur);
- spi_tegra_go(tspi);
- } else if (!list_is_last(&tspi->cur->transfer_list,
- &m->transfers)) {
- tspi->cur = list_first_entry(&tspi->cur->transfer_list,
- struct spi_transfer,
- transfer_list);
- spi_tegra_start_transfer(spi, tspi->cur);
+ if (spi->mode & SPI_CPOL)
+ command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ command |= SLINK_IDLE_SCLK_DRIVE_LOW;
} else {
- list_del(&m->queue);
+ command = tspi->command_reg;
+ command &= ~SLINK_BIT_LENGTH(~0);
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+ }
- m->complete(m->context);
+ spi_tegra_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
- if (!list_empty(&tspi->queue)) {
- m = list_first_entry(&tspi->queue, struct spi_message,
- queue);
- spi = m->state;
- spi_tegra_start_message(spi, m);
- } else {
- clk_disable(tspi->clk);
- tspi->cur_speed = 0;
- }
+ dev_dbg(&tspi->pdev->dev, "The def 0x%x and written 0x%lx\n",
+ tspi->def_command_reg, command);
+
+ command2 &= ~(SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN);
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command2 |= SLINK_RXEN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command2 |= SLINK_TXEN;
+ tspi->cur_direction |= DATA_DIR_TX;
}
+ command2 |= SLINK_SS_EN_CS(spi->chip_select);
+ spi_tegra_writel(tspi, command2, SLINK_COMMAND2);
+ tspi->command2_reg = command2;
- spin_unlock_irqrestore(&tspi->lock, flags);
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ ret = spi_tegra_start_dma_based_transfer(tspi, t);
+ else
+ ret = spi_tegra_start_cpu_based_transfer(tspi, t);
+ WARN_ON(ret < 0);
}
static int spi_tegra_setup(struct spi_device *spi)
@@ -393,7 +788,7 @@ static int spi_tegra_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
-
+ BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
switch (spi->chip_select) {
case 0:
cs_bit = SLINK_CS_POLARITY;
@@ -407,7 +802,7 @@ static int spi_tegra_setup(struct spi_device *spi)
cs_bit = SLINK_CS_POLARITY2;
break;
- case 4:
+ case 3:
cs_bit = SLINK_CS_POLARITY3;
break;
@@ -416,25 +811,70 @@ static int spi_tegra_setup(struct spi_device *spi)
}
spin_lock_irqsave(&tspi->lock, flags);
-
- val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ val = tspi->def_command_reg;
if (spi->mode & SPI_CS_HIGH)
val |= cs_bit;
else
val &= ~cs_bit;
- spi_tegra_writel(tspi, val, SLINK_COMMAND);
-
- spin_unlock_irqrestore(&tspi->lock, flags);
+ tspi->def_command_reg |= val;
+ if (!tspi->is_clkon_always && !tspi->clk_state) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ clk_enable(tspi->clk);
+ spin_lock_irqsave(&tspi->lock, flags);
+ tspi->clk_state = 1;
+ }
+ spi_tegra_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ if (!tspi->is_clkon_always && tspi->clk_state) {
+ tspi->clk_state = 0;
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ clk_disable(tspi->clk);
+ } else
+ spin_unlock_irqrestore(&tspi->lock, flags);
return 0;
}
+static void tegra_spi_transfer_work(struct work_struct *work)
+{
+ struct spi_tegra_data *tspi;
+ struct spi_device *spi;
+ struct spi_message *m;
+ struct spi_transfer *t;
+ int single_xfer = 0;
+ unsigned long flags;
+
+ tspi = container_of(work, struct spi_tegra_data, spi_transfer_work);
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ if (tspi->is_transfer_in_progress || tspi->is_suspended) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return;
+ }
+ if (list_empty(&tspi->queue)) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return;
+ }
+
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+ spi = m->state;
+ single_xfer = list_is_singular(&m->transfers);
+ m->actual_length = 0;
+ m->status = 0;
+ t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
+ tspi->is_transfer_in_progress = true;
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ spi_tegra_start_transfer(spi, t, true, single_xfer);
+}
+
static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
{
struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
struct spi_transfer *t;
unsigned long flags;
int was_empty;
+ int bytes_per_word;
if (list_empty(&m->transfers) || !m->complete)
return -EINVAL;
@@ -446,30 +886,291 @@ static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
if (t->len == 0)
return -EINVAL;
+ /* Check that the all words are available */
+ if (t->bits_per_word)
+ bytes_per_word = (t->bits_per_word + 7)/8;
+ else
+ bytes_per_word = (spi->bits_per_word + 7)/8;
+
+ if (t->len % bytes_per_word != 0)
+ return -EINVAL;
+
if (!t->rx_buf && !t->tx_buf)
return -EINVAL;
}
- m->state = spi;
-
spin_lock_irqsave(&tspi->lock, flags);
+
+ if (WARN_ON(tspi->is_suspended)) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return -EBUSY;
+ }
+
+ m->state = spi;
was_empty = list_empty(&tspi->queue);
list_add_tail(&m->queue, &tspi->queue);
-
if (was_empty)
- spi_tegra_start_message(spi, m);
+ queue_work(tspi->spi_workqueue, &tspi->spi_transfer_work);
spin_unlock_irqrestore(&tspi->lock, flags);
-
return 0;
}
+static void spi_tegra_curr_transfer_complete(struct spi_tegra_data *tspi,
+ unsigned err, unsigned cur_xfer_size, unsigned long *irq_flags)
+{
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t;
+ int single_xfer = 0;
+
+ /* Check if CS need to be toggele here */
+ if (tspi->cur && tspi->cur->cs_change &&
+ tspi->cur->delay_usecs) {
+ udelay(tspi->cur->delay_usecs);
+ }
+
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+ if (err)
+ m->status = -EIO;
+ spi = m->state;
+
+ m->actual_length += cur_xfer_size;
+
+ if (!list_is_last(&tspi->cur->transfer_list, &m->transfers)) {
+ tspi->cur = list_first_entry(&tspi->cur->transfer_list,
+ struct spi_transfer, transfer_list);
+ spin_unlock_irqrestore(&tspi->lock, *irq_flags);
+ spi_tegra_start_transfer(spi, tspi->cur, false, 0);
+ spin_lock_irqsave(&tspi->lock, *irq_flags);
+ } else {
+ list_del(&m->queue);
+ m->complete(m->context);
+ if (!list_empty(&tspi->queue)) {
+ if (tspi->is_suspended) {
+ spi_tegra_writel(tspi, tspi->def_command_reg,
+ SLINK_COMMAND);
+ spi_tegra_writel(tspi, tspi->def_command2_reg,
+ SLINK_COMMAND2);
+ tspi->is_transfer_in_progress = false;
+ return;
+ }
+ m = list_first_entry(&tspi->queue, struct spi_message,
+ queue);
+ spi = m->state;
+ single_xfer = list_is_singular(&m->transfers);
+ m->actual_length = 0;
+ m->status = 0;
+
+ t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ spin_unlock_irqrestore(&tspi->lock, *irq_flags);
+ spi_tegra_start_transfer(spi, t, true, single_xfer);
+ spin_lock_irqsave(&tspi->lock, *irq_flags);
+ } else {
+ spi_tegra_writel(tspi, tspi->def_command_reg,
+ SLINK_COMMAND);
+ spi_tegra_writel(tspi, tspi->def_command2_reg,
+ SLINK_COMMAND2);
+ if (!tspi->is_clkon_always) {
+ if (tspi->clk_state) {
+ /* Provide delay to stablize the signal
+ state */
+ spin_unlock_irqrestore(&tspi->lock,
+ *irq_flags);
+ udelay(10);
+ clk_disable(tspi->clk);
+ spin_lock_irqsave(&tspi->lock,
+ *irq_flags);
+ tspi->clk_state = 0;
+ }
+ }
+ tspi->is_transfer_in_progress = false;
+ /* Check if any new request has come between
+ * clock disable */
+ queue_work(tspi->spi_workqueue,
+ &tspi->spi_transfer_work);
+ }
+ }
+ return;
+}
+
+static void tegra_spi_tx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ complete(&tspi->tx_dma_complete);
+}
+
+static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ complete(&tspi->rx_dma_complete);
+}
+
+static void handle_cpu_based_xfer(void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+ struct spi_transfer *t = tspi->cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status ||
+ (tspi->status_reg & SLINK_BSY)) {
+ dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
+ __func__, tspi->status_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ WARN_ON(1);
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len, &flags);
+ goto exit;
+ }
+
+ dev_vdbg(&tspi->pdev->dev, " Current direction %x\n",
+ tspi->cur_direction);
+ if (tspi->cur_direction & DATA_DIR_RX)
+ spi_tegra_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->cur_pos = tspi->cur_rx_pos;
+ else
+ WARN_ON(1);
+
+ dev_vdbg(&tspi->pdev->dev, "current position %d and length of the "
+ "transfer %d\n", tspi->cur_pos, t->len);
+ if (tspi->cur_pos == t->len) {
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len, &flags);
+ goto exit;
+ }
+
+ spi_tegra_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ spi_tegra_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return;
+}
+
+static irqreturn_t spi_tegra_isr_thread(int irq, void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+ struct spi_transfer *t = tspi->cur;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ if (!tspi->is_curr_dma_xfer) {
+ handle_cpu_based_xfer(context_data);
+ return IRQ_HANDLED;
+ }
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ tegra_dma_dequeue(tspi->tx_dma);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ tegra_dma_dequeue(tspi->tx_dma);
+ dev_err(&tspi->pdev->dev, "Error in Dma Tx "
+ "transfer\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ tegra_dma_dequeue(tspi->rx_dma);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ tegra_dma_dequeue(tspi->rx_dma);
+ dev_err(&tspi->pdev->dev, "Error in Dma Rx "
+ "transfer\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
+ __func__, tspi->status_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ WARN_ON(1);
+ spi_tegra_curr_transfer_complete(tspi, err, t->len, &flags);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ spi_tegra_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->cur_pos = tspi->cur_rx_pos;
+ else
+ WARN_ON(1);
+
+ if (tspi->cur_pos == t->len) {
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len, &flags);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = spi_tegra_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ err = spi_tegra_start_dma_based_transfer(tspi, t);
+ else
+ err = spi_tegra_start_cpu_based_transfer(tspi, t);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ WARN_ON(err < 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t spi_tegra_isr(int irq, void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+
+ tspi->status_reg = spi_tegra_readl(tspi, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SLINK_TX_OVF | SLINK_TX_UNF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SLINK_RX_OVF | SLINK_RX_UNF);
+ spi_tegra_clear_status(tspi);
+
+
+ return IRQ_WAKE_THREAD;
+}
+
static int __init spi_tegra_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct spi_tegra_data *tspi;
struct resource *r;
- int ret;
+ struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
+ int ret, spi_irq;
+ int i;
+ char spi_wq_name[20];
master = spi_alloc_master(&pdev->dev, sizeof *tspi);
if (master == NULL) {
@@ -480,28 +1181,31 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->bus_num = pdev->id;
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
master->setup = spi_tegra_setup;
master->transfer = spi_tegra_transfer;
- master->num_chipselect = 4;
+ master->num_chipselect = MAX_CHIP_SELECT;
dev_set_drvdata(&pdev->dev, master);
tspi = spi_master_get_devdata(master);
tspi->master = master;
tspi->pdev = pdev;
+ tspi->is_transfer_in_progress = false;
+ tspi->is_suspended = false;
spin_lock_init(&tspi->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
ret = -ENODEV;
- goto err0;
+ goto fail_no_mem;
}
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
ret = -EBUSY;
- goto err0;
+ goto fail_no_mem;
}
tspi->phys = r->start;
@@ -509,63 +1213,192 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
if (!tspi->base) {
dev_err(&pdev->dev, "can't ioremap iomem\n");
ret = -ENOMEM;
- goto err1;
+ goto fail_io_map;
+ }
+
+ spi_irq = platform_get_irq(pdev, 0);
+ if (unlikely(spi_irq < 0)) {
+ dev_err(&pdev->dev, "can't find irq resource\n");
+ ret = -ENXIO;
+ goto fail_irq_req;
+ }
+ tspi->irq = spi_irq;
+
+ sprintf(tspi->port_name, "tegra_spi_%d", pdev->id);
+ ret = request_threaded_irq(tspi->irq, spi_tegra_isr,
+ spi_tegra_isr_thread, IRQF_DISABLED,
+ tspi->port_name, tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto fail_irq_req;
}
tspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
- goto err2;
+ goto fail_clk_get;
}
INIT_LIST_HEAD(&tspi->queue);
- tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
+ if (pdata) {
+ tspi->is_clkon_always = pdata->is_clkon_always;
+ tspi->is_dma_allowed = pdata->is_dma_based;
+ tspi->dma_buf_size = (pdata->max_dma_buffer) ?
+ pdata->max_dma_buffer : DEFAULT_SPI_DMA_BUF_LEN;
+ tspi->parent_clk_count = pdata->parent_clk_count;
+ tspi->parent_clk_list = pdata->parent_clk_list;
+ tspi->max_rate = pdata->max_rate;
+ } else {
+ tspi->is_clkon_always = false;
+ tspi->is_dma_allowed = true;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+ tspi->parent_clk_count = 0;
+ tspi->parent_clk_list = NULL;
+ tspi->max_rate = 0;
+ }
+
+ tspi->max_parent_rate = 0;
+ tspi->min_div = 0;
+
+ if (tspi->parent_clk_count) {
+ tspi->max_parent_rate = tspi->parent_clk_list[0].fixed_clk_rate;
+ for (i = 1; i < tspi->parent_clk_count; ++i) {
+ tspi->max_parent_rate = max(tspi->max_parent_rate,
+ tspi->parent_clk_list[i].fixed_clk_rate);
+ }
+ if (tspi->max_rate)
+ tspi->min_div = DIV_ROUND_UP(tspi->max_parent_rate,
+ tspi->max_rate);
+ }
+ tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
+
+ if (!tspi->is_dma_allowed)
+ goto skip_dma_alloc;
+
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+
+ tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
+ "spi_rx_%d", pdev->id);
if (!tspi->rx_dma) {
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
ret = -ENODEV;
- goto err3;
+ goto fail_rx_dma_alloc;
}
- tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
- &tspi->rx_bb_phys, GFP_KERNEL);
- if (!tspi->rx_bb) {
+ tspi->rx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
+ &tspi->rx_buf_phys, GFP_KERNEL);
+ if (!tspi->rx_buf) {
dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
ret = -ENOMEM;
- goto err4;
+ goto fail_rx_buf_alloc;
}
+ memset(&tspi->rx_dma_req, 0, sizeof(struct tegra_dma_req));
tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
tspi->rx_dma_req.to_memory = 1;
- tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys;
+ tspi->rx_dma_req.dest_addr = tspi->rx_buf_phys;
+ tspi->rx_dma_req.virt_addr = tspi->rx_buf;
tspi->rx_dma_req.dest_bus_width = 32;
tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
tspi->rx_dma_req.source_bus_width = 32;
tspi->rx_dma_req.source_wrap = 4;
+ tspi->rx_dma_req.dest_wrap = 0;
tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
tspi->rx_dma_req.dev = tspi;
+ tspi->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
+ "spi_tx_%d", pdev->id);
+ if (!tspi->tx_dma) {
+ dev_err(&pdev->dev, "can not allocate tx dma channel\n");
+ ret = -ENODEV;
+ goto fail_tx_dma_alloc;
+ }
+
+ tspi->tx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
+ &tspi->tx_buf_phys, GFP_KERNEL);
+ if (!tspi->tx_buf) {
+ dev_err(&pdev->dev, "can not allocate tx bounce buffer\n");
+ ret = -ENOMEM;
+ goto fail_tx_buf_alloc;
+ }
+
+ memset(&tspi->tx_dma_req, 0, sizeof(struct tegra_dma_req));
+ tspi->tx_dma_req.complete = tegra_spi_tx_dma_complete;
+ tspi->tx_dma_req.to_memory = 0;
+ tspi->tx_dma_req.dest_addr = tspi->phys + SLINK_TX_FIFO;
+ tspi->tx_dma_req.virt_addr = tspi->tx_buf;
+ tspi->tx_dma_req.dest_bus_width = 32;
+ tspi->tx_dma_req.dest_wrap = 4;
+ tspi->tx_dma_req.source_wrap = 0;
+ tspi->tx_dma_req.source_addr = tspi->tx_buf_phys;
+ tspi->tx_dma_req.source_bus_width = 32;
+ tspi->tx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
+ tspi->tx_dma_req.dev = tspi;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ tspi->def_command_reg = SLINK_CS_SW | SLINK_M_S;
+ tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
+
+skip_dma_alloc:
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
master->dev.of_node = pdev->dev.of_node;
ret = spi_register_master(master);
+ if (!tspi->is_clkon_always) {
+ if (tspi->clk_state) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+ }
- if (ret < 0)
- goto err5;
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto fail_master_register;
+ }
+
+ /* create the workqueue for the kbc path */
+ snprintf(spi_wq_name, sizeof(spi_wq_name), "spi_tegra-%d", pdev->id);
+ tspi->spi_workqueue = create_singlethread_workqueue(spi_wq_name);
+ if (!tspi->spi_workqueue) {
+ dev_err(&pdev->dev, "Failed to create work queue\n");
+ ret = -ENODEV;
+ goto fail_workqueue;
+ }
+
+ INIT_WORK(&tspi->spi_transfer_work, tegra_spi_transfer_work);
return ret;
-err5:
- dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
- tspi->rx_bb, tspi->rx_bb_phys);
-err4:
- tegra_dma_free_channel(tspi->rx_dma);
-err3:
+fail_workqueue:
+ spi_unregister_master(master);
+
+fail_master_register:
+ if (tspi->tx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->tx_buf, tspi->tx_buf_phys);
+fail_tx_buf_alloc:
+ if (tspi->tx_dma)
+ tegra_dma_free_channel(tspi->tx_dma);
+fail_tx_dma_alloc:
+ if (tspi->rx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->rx_buf, tspi->rx_buf_phys);
+fail_rx_buf_alloc:
+ if (tspi->rx_dma)
+ tegra_dma_free_channel(tspi->rx_dma);
+fail_rx_dma_alloc:
clk_put(tspi->clk);
-err2:
+fail_clk_get:
+ free_irq(tspi->irq, tspi);
+fail_irq_req:
iounmap(tspi->base);
-err1:
+fail_io_map:
release_mem_region(r->start, resource_size(r));
-err0:
+fail_no_mem:
spi_master_put(master);
return ret;
}
@@ -580,20 +1413,128 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
tspi = spi_master_get_devdata(master);
spi_unregister_master(master);
- tegra_dma_free_channel(tspi->rx_dma);
-
- dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
- tspi->rx_bb, tspi->rx_bb_phys);
+ if (tspi->tx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->tx_buf, tspi->tx_buf_phys);
+ if (tspi->tx_dma)
+ tegra_dma_free_channel(tspi->tx_dma);
+ if (tspi->rx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->rx_buf, tspi->rx_buf_phys);
+ if (tspi->rx_dma)
+ tegra_dma_free_channel(tspi->rx_dma);
+
+ if (tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
clk_put(tspi->clk);
iounmap(tspi->base);
+ destroy_workqueue(tspi->spi_workqueue);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
return 0;
}
+#ifdef CONFIG_PM
+static int spi_tegra_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ unsigned limit = 50;
+ unsigned long flags;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ /* Wait for all transfer completes */
+ if (!list_empty(&tspi->queue))
+ dev_warn(&pdev->dev, "The transfer list is not empty "
+ "Waiting for time %d ms to complete transfer\n",
+ limit * 20);
+
+ while (!list_empty(&tspi->queue) && limit--) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&tspi->lock, flags);
+ }
+
+ /* Wait for current transfer completes only */
+ tspi->is_suspended = true;
+ if (!list_empty(&tspi->queue)) {
+ limit = 50;
+ dev_err(&pdev->dev, "All transfer has not completed, "
+ "Waiting for %d ms current transfer to complete\n",
+ limit * 20);
+ while (tspi->is_transfer_in_progress && limit--) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&tspi->lock, flags);
+ }
+ }
+
+ if (tspi->is_transfer_in_progress) {
+ dev_err(&pdev->dev, "Spi transfer is in progress "
+ "Avoiding suspend\n");
+ tspi->is_suspended = false;
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ if (tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+ return 0;
+}
+
+static int spi_tegra_resume(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int single_xfer = 0;
+ unsigned long flags;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
+ spi_tegra_writel(tspi, tspi->command_reg, SLINK_COMMAND);
+ if (!tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ tspi->cur_speed = 0;
+ tspi->is_suspended = false;
+ if (!list_empty(&tspi->queue)) {
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+ spi = m->state;
+ single_xfer = list_is_singular(&m->transfers);
+ m->actual_length = 0;
+ m->status = 0;
+ t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ tspi->is_transfer_in_progress = true;
+ }
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ if (t)
+ spi_tegra_start_transfer(spi, t, true, single_xfer);
+ return 0;
+}
+#endif
+
MODULE_ALIAS("platform:spi_tegra");
#ifdef CONFIG_OF
@@ -613,13 +1554,17 @@ static struct platform_driver spi_tegra_driver = {
.of_match_table = spi_tegra_of_match_table,
},
.remove = __devexit_p(spi_tegra_remove),
+#ifdef CONFIG_PM
+ .suspend = spi_tegra_suspend,
+ .resume = spi_tegra_resume,
+#endif
};
static int __init spi_tegra_init(void)
{
return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
}
-module_init(spi_tegra_init);
+subsys_initcall(spi_tegra_init);
static void __exit spi_tegra_exit(void)
{
diff --git a/drivers/spi/spi_slave_tegra.c b/drivers/spi/spi_slave_tegra.c
new file mode 100644
index 000000000000..3153ad54fb01
--- /dev/null
+++ b/drivers/spi/spi_slave_tegra.c
@@ -0,0 +1,1399 @@
+/*
+ * Driver for Nvidia TEGRA spi controller in slave mode.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi-tegra.h>
+
+#include <mach/dma.h>
+#include <mach/clk.h>
+#include <mach/spi.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
+#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SPI_FIFO_DEPTH 32
+#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+
+static const unsigned long spi_tegra_req_sels[] = {
+ TEGRA_DMA_REQ_SEL_SL2B1,
+ TEGRA_DMA_REQ_SEL_SL2B2,
+ TEGRA_DMA_REQ_SEL_SL2B3,
+ TEGRA_DMA_REQ_SEL_SL2B4,
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ TEGRA_DMA_REQ_SEL_SL2B5,
+ TEGRA_DMA_REQ_SEL_SL2B6,
+#endif
+
+};
+
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
+#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
+
+#define SLINK_STATUS2_RESET \
+ (TX_FIFO_EMPTY_COUNT_MAX | \
+ RX_FIFO_FULL_COUNT_ZERO << 16)
+
+#define MAX_CHIP_SELECT 4
+#define SLINK_FIFO_DEPTH 4
+
+struct spi_tegra_data {
+ struct spi_master *master;
+ struct platform_device *pdev;
+ spinlock_t lock;
+ char port_name[32];
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long phys;
+ unsigned irq;
+
+ u32 cur_speed;
+
+ struct list_head queue;
+ struct spi_transfer *cur;
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+
+ unsigned cur_direction;
+
+ bool is_dma_allowed;
+
+ struct tegra_dma_req rx_dma_req;
+ struct tegra_dma_channel *rx_dma;
+ u32 *rx_buf;
+ dma_addr_t rx_buf_phys;
+ unsigned cur_rx_pos;
+
+ struct tegra_dma_req tx_dma_req;
+ struct tegra_dma_channel *tx_dma;
+ u32 *tx_buf;
+ dma_addr_t tx_buf_phys;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ bool is_clkon_always;
+ bool clk_state;
+ bool is_suspended;
+
+ bool is_hw_based_cs;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 rx_complete;
+ u32 tx_complete;
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ unsigned long packed_size;
+
+ u32 command_reg;
+ u32 command2_reg;
+ u32 dma_control_reg;
+ u32 def_command_reg;
+ u32 def_command2_reg;
+
+ callback client_slave_ready_cb;
+ void *client_data;
+
+ struct spi_clk_parent *parent_clk_list;
+ int parent_clk_count;
+ unsigned long max_rate;
+ unsigned long max_parent_rate;
+ int min_div;
+};
+
+static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
+ unsigned long reg)
+{
+ if (!tspi->clk_state)
+ BUG();
+ return readl(tspi->base + reg);
+}
+
+static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
+ unsigned long val, unsigned long reg)
+{
+ if (!tspi->clk_state)
+ BUG();
+ writel(val, tspi->base + reg);
+}
+
+int spi_tegra_register_callback(struct spi_device *spi, callback func,
+ void *client_data)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+
+ if (!tspi || !func)
+ return -EINVAL;
+ tspi->client_slave_ready_cb = func;
+ tspi->client_data = client_data;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_tegra_register_callback);
+
+static void spi_tegra_clear_status(struct spi_tegra_data *tspi)
+{
+ unsigned long val;
+ unsigned long val_write = 0;
+
+ val = spi_tegra_readl(tspi, SLINK_STATUS);
+
+ val_write = SLINK_RDY;
+ if (val & SLINK_TX_OVF)
+ val_write |= SLINK_TX_OVF;
+ if (val & SLINK_RX_OVF)
+ val_write |= SLINK_RX_OVF;
+ if (val & SLINK_RX_UNF)
+ val_write |= SLINK_RX_UNF;
+ if (val & SLINK_TX_UNF)
+ val_write |= SLINK_TX_UNF;
+
+ spi_tegra_writel(tspi, val_write, SLINK_STATUS);
+}
+
+static unsigned long spi_tegra_get_packed_size(struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned long val;
+
+ switch (tspi->bytes_per_word) {
+ case 0:
+ val = SLINK_PACK_SIZE_4;
+ break;
+ case 1:
+ val = SLINK_PACK_SIZE_8;
+ break;
+ case 2:
+ val = SLINK_PACK_SIZE_16;
+ break;
+ case 4:
+ val = SLINK_PACK_SIZE_32;
+ break;
+ default:
+ val = 0;
+ }
+ return val;
+}
+
+static unsigned spi_tegra_calculate_curr_xfer_param(
+ struct spi_device *spi, struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word ;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+ tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+ tspi->packed_size = spi_tegra_get_packed_size(tspi, t);
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = remain_len/4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = remain_len/tspi->bytes_per_word;
+ }
+ /* All transfer should be in one shot */
+ if (tspi->curr_dma_words * tspi->bytes_per_word != t->len) {
+ dev_err(&tspi->pdev->dev, "The requested length can not be"
+ " transferred in one shot\n");
+ BUG();
+ }
+ return total_fifo_words;
+}
+
+static unsigned spi_tegra_fill_tx_fifo_from_client_txbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ unsigned long fifo_status;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int written_words;
+
+ fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
+ tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ nbytes = tspi->curr_dma_words * tspi->bytes_per_word;
+ max_n_32bit = (min(nbytes, tx_empty_count*4) - 1)/4 + 1;
+ for (count = 0; count < max_n_32bit; ++count) {
+ x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (*tx_buf++) << (i*8);
+ spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ written_words = min(max_n_32bit * tspi->words_per_32bit,
+ tspi->curr_dma_words);
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ nbytes = max_n_32bit * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; ++count) {
+ x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ ++i, nbytes--)
+ x |= ((*tx_buf++) << i*8);
+ spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ written_words = max_n_32bit;
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int spi_tegra_read_rx_fifo_to_client_rxbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ unsigned long fifo_status;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int read_words;
+ unsigned len;
+
+ fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
+ rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
+ dev_dbg(&tspi->pdev->dev, "Rx fifo count %d\n", rx_full_count);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; ++count) {
+ x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; len && (i < 4); ++i, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ for (count = 0; count < rx_full_count; ++count) {
+ x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; (i < tspi->bytes_per_word); ++i)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void spi_tegra_copy_client_txbuf_to_spi_txbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int x;
+
+ for (count = 0; count < tspi->curr_dma_words; ++count) {
+ x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ ++i, consume--)
+ x |= ((*tx_buf++) << i*8);
+ tspi->tx_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+}
+
+static void spi_tegra_copy_spi_rxbuf_to_client_rxbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ unsigned int x;
+ for (count = 0; count < tspi->curr_dma_words; ++count) {
+ x = tspi->rx_buf[count];
+ for (i = 0; (i < tspi->bytes_per_word); ++i)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+}
+
+static int spi_tegra_start_dma_based_transfer(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned long test_val;
+ unsigned int len;
+ int ret = 0;
+
+ INIT_COMPLETION(tspi->rx_dma_complete);
+ INIT_COMPLETION(tspi->tx_dma_complete);
+
+ val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
+ val |= tspi->packed_size;
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ if (len & 0xF)
+ val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
+ else
+ val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ spi_tegra_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ wmb();
+ tspi->tx_dma_req.size = len;
+ ret = tegra_dma_enqueue_req(tspi->tx_dma, &tspi->tx_dma_req);
+ if (ret < 0) {
+ dev_err(&tspi->pdev->dev, "Error in starting tx dma "
+ " error = %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for tx fifo to be fill before starting slink */
+ test_val = spi_tegra_readl(tspi, SLINK_STATUS);
+ while (!(test_val & SLINK_TX_FULL))
+ test_val = spi_tegra_readl(tspi, SLINK_STATUS);
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ tspi->rx_dma_req.size = len;
+ ret = tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
+ if (ret < 0) {
+ dev_err(&tspi->pdev->dev, "Error in starting rx dma "
+ " error = %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tegra_dma_dequeue_req(tspi->tx_dma,
+ &tspi->tx_dma_req);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ return ret;
+}
+
+static int spi_tegra_start_cpu_based_transfer(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned curr_words;
+
+ val = tspi->packed_size;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ curr_words = spi_tegra_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ curr_words = tspi->curr_dma_words;
+ val |= SLINK_DMA_BLOCK_SIZE(curr_words - 1);
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ return 0;
+}
+
+static void set_best_clk_source(struct spi_tegra_data *tspi,
+ unsigned long speed)
+{
+ long new_rate;
+ unsigned long err_rate;
+ int rate = speed * 4;
+ unsigned int fin_err = speed * 4;
+ int final_index = -1;
+ int count;
+ int ret;
+ struct clk *pclk;
+ unsigned long prate, crate, nrate;
+ unsigned long cdiv;
+
+ if (!tspi->parent_clk_count || !tspi->parent_clk_list)
+ return;
+
+ /* make sure divisor is more than min_div */
+ pclk = clk_get_parent(tspi->clk);
+ prate = clk_get_rate(pclk);
+ crate = clk_get_rate(tspi->clk);
+ cdiv = DIV_ROUND_UP(prate, crate);
+ if (cdiv < tspi->min_div) {
+ nrate = DIV_ROUND_UP(prate, tspi->min_div);
+ clk_set_rate(tspi->clk, nrate);
+ }
+
+ for (count = 0; count < tspi->parent_clk_count; ++count) {
+ if (!tspi->parent_clk_list[count].parent_clk)
+ continue;
+ ret = clk_set_parent(tspi->clk,
+ tspi->parent_clk_list[count].parent_clk);
+ if (ret < 0) {
+ dev_warn(&tspi->pdev->dev, "Error in setting parent "
+ " clk src %s\n",
+ tspi->parent_clk_list[count].name);
+ continue;
+ }
+
+ new_rate = clk_round_rate(tspi->clk, rate);
+ if (new_rate < 0)
+ continue;
+
+ err_rate = abs(new_rate - rate);
+ if (err_rate < fin_err) {
+ final_index = count;
+ fin_err = err_rate;
+ }
+ }
+
+ if (final_index >= 0) {
+ dev_info(&tspi->pdev->dev, "Setting clk_src %s\n",
+ tspi->parent_clk_list[final_index].name);
+ clk_set_parent(tspi->clk,
+ tspi->parent_clk_list[final_index].parent_clk);
+ }
+}
+
+static void spi_tegra_start_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned total_fifo_words;
+ int ret;
+ unsigned long command;
+ unsigned long command2;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+
+ speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ if (speed != tspi->cur_speed) {
+ set_best_clk_source(tspi, speed);
+ clk_set_rate(tspi->clk, speed * 4);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur = t;
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->rx_complete = 0;
+ tspi->tx_complete = 0;
+ total_fifo_words = spi_tegra_calculate_curr_xfer_param(spi, tspi, t);
+
+ command2 = tspi->def_command2_reg;
+ if (!tspi->is_clkon_always) {
+ if (!tspi->clk_state) {
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
+ }
+ }
+
+ spi_tegra_clear_status(tspi);
+
+ command = tspi->def_command_reg;
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ command |= SLINK_CS_SW;
+
+ command &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA;
+ if (spi->mode & SPI_CPHA)
+ command |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ command |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ spi_tegra_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
+
+ dev_dbg(&tspi->pdev->dev, "The def 0x%x and written 0x%lx\n",
+ tspi->def_command_reg, command);
+
+ command2 &= ~(SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN);
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command2 |= SLINK_RXEN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command2 |= SLINK_TXEN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ command2 |= SLINK_SS_EN_CS(spi->chip_select);
+ spi_tegra_writel(tspi, command2, SLINK_COMMAND2);
+ tspi->command2_reg = command2;
+
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ ret = spi_tegra_start_dma_based_transfer(tspi, t);
+ else
+ ret = spi_tegra_start_cpu_based_transfer(tspi, t);
+ WARN_ON(ret < 0);
+
+ if (tspi->client_slave_ready_cb)
+ tspi->client_slave_ready_cb(tspi->client_data);
+}
+
+static void spi_tegra_start_message(struct spi_device *spi,
+ struct spi_message *m)
+{
+ struct spi_transfer *t;
+ m->actual_length = 0;
+ m->status = 0;
+ t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
+ spi_tegra_start_transfer(spi, t);
+}
+
+static int spi_tegra_setup(struct spi_device *spi)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned long cs_bit;
+ unsigned long val;
+ unsigned long flags;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
+ switch (spi->chip_select) {
+ case 0:
+ cs_bit = SLINK_CS_POLARITY;
+ break;
+
+ case 1:
+ cs_bit = SLINK_CS_POLARITY1;
+ break;
+
+ case 2:
+ cs_bit = SLINK_CS_POLARITY2;
+ break;
+
+ case 3:
+ cs_bit = SLINK_CS_POLARITY3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_bit;
+ else
+ val &= ~cs_bit;
+ tspi->def_command_reg |= val;
+
+ if (!tspi->is_clkon_always && !tspi->clk_state) {
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
+ }
+ spi_tegra_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ if (!tspi->is_clkon_always && tspi->clk_state) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return 0;
+}
+
+static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ struct spi_transfer *t;
+ unsigned long flags;
+ int was_empty;
+ int bytes_per_word;
+ u8 bits_per_word;
+ int fifo_word;
+
+ /* Support only one transfer per message */
+ if (!list_is_singular(&m->transfers))
+ return -EINVAL;
+
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+
+ t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
+ if (t->bits_per_word < 0 || t->bits_per_word > 32)
+ return -EINVAL;
+
+ if (t->len == 0)
+ return -EINVAL;
+
+ bits_per_word = (t->bits_per_word) ? : spi->bits_per_word;
+
+ /* Check that the all words are available */
+ bytes_per_word = (bits_per_word + 7)/8;
+
+ if (t->len % bytes_per_word != 0)
+ return -EINVAL;
+
+ if (!t->rx_buf && !t->tx_buf)
+ return -EINVAL;
+
+ if ((bits_per_word == 8) || (bits_per_word == 16))
+ fifo_word = t->len/4;
+ else
+ fifo_word = t->len/bytes_per_word;
+ if (fifo_word >= tspi->max_buf_size/4)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ if (WARN_ON(tspi->is_suspended)) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return -EBUSY;
+ }
+
+ m->state = spi;
+
+ was_empty = list_empty(&tspi->queue);
+ list_add_tail(&m->queue, &tspi->queue);
+
+ if (was_empty)
+ spi_tegra_start_message(spi, m);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ return 0;
+}
+
+static void spi_tegra_curr_transfer_complete(struct spi_tegra_data *tspi,
+ unsigned err, unsigned cur_xfer_size)
+{
+ struct spi_message *m;
+ struct spi_device *spi;
+
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+ if (err)
+ m->status = -EIO;
+ spi = m->state;
+
+ m->actual_length += cur_xfer_size;
+ list_del(&m->queue);
+ m->complete(m->context);
+ if (!list_empty(&tspi->queue)) {
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+ spi = m->state;
+ spi_tegra_start_message(spi, m);
+ } else {
+ spi_tegra_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ spi_tegra_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+ if (!tspi->is_clkon_always) {
+ if (tspi->clk_state) {
+ /* Provide delay to stablize the signal
+ state */
+ udelay(10);
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+ }
+ }
+}
+
+static void tegra_spi_tx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ complete(&tspi->tx_dma_complete);
+}
+
+static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ complete(&tspi->rx_dma_complete);
+}
+
+static void handle_cpu_based_xfer(void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+ struct spi_transfer *t = tspi->cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status ||
+ (tspi->status_reg & SLINK_BSY)) {
+ dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
+ __func__, tspi->status_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ WARN_ON(1);
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len);
+ goto exit;
+ }
+
+ dev_vdbg(&tspi->pdev->dev, " Current direction %x\n",
+ tspi->cur_direction);
+ if (tspi->cur_direction & DATA_DIR_RX)
+ spi_tegra_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->cur_pos = tspi->cur_rx_pos;
+ else
+ WARN_ON(1);
+
+ dev_vdbg(&tspi->pdev->dev, "current position %d and length of the "
+ "transfer %d\n", tspi->cur_pos, t->len);
+ if (tspi->cur_pos == t->len) {
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len);
+ goto exit;
+ }
+
+ /* There should not be remaining transfer */
+ BUG();
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return;
+}
+
+static irqreturn_t spi_tegra_isr_thread(int irq, void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+ struct spi_transfer *t = tspi->cur;
+ long wait_status;
+ int err = 0;
+ unsigned long flags;
+
+ if (!tspi->is_curr_dma_xfer) {
+ handle_cpu_based_xfer(context_data);
+ return IRQ_HANDLED;
+ }
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ tegra_dma_dequeue(tspi->tx_dma);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ tegra_dma_dequeue(tspi->tx_dma);
+ dev_err(&tspi->pdev->dev, "Error in Dma Tx "
+ "transfer\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ tegra_dma_dequeue(tspi->rx_dma);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ tegra_dma_dequeue(tspi->rx_dma);
+ dev_err(&tspi->pdev->dev, "Error in Dma Rx "
+ "transfer\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
+ __func__, tspi->status_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ WARN_ON(1);
+ spi_tegra_curr_transfer_complete(tspi, err, t->len);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ spi_tegra_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->cur_pos = tspi->cur_rx_pos;
+ else
+ WARN_ON(1);
+
+ if (tspi->cur_pos == t->len) {
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ /* There should not be remaining transfer */
+ BUG();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t spi_tegra_isr(int irq, void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+
+ tspi->status_reg = spi_tegra_readl(tspi, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SLINK_TX_OVF | SLINK_TX_UNF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SLINK_RX_OVF | SLINK_RX_UNF);
+ spi_tegra_clear_status(tspi);
+
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int __init spi_tegra_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct resource *r;
+ struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+ int i;
+
+ master = spi_alloc_master(&pdev->dev, sizeof *tspi);
+ if (master == NULL) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+
+ master->setup = spi_tegra_setup;
+ master->transfer = spi_tegra_transfer;
+ master->num_chipselect = MAX_CHIP_SELECT;
+
+ dev_set_drvdata(&pdev->dev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->pdev = pdev;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto fail_no_mem;
+ }
+
+ if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ dev_name(&pdev->dev))) {
+ ret = -EBUSY;
+ goto fail_no_mem;
+ }
+
+ tspi->phys = r->start;
+ tspi->base = ioremap(r->start, r->end - r->start + 1);
+ if (!tspi->base) {
+ dev_err(&pdev->dev, "can't ioremap iomem\n");
+ ret = -ENOMEM;
+ goto fail_io_map;
+ }
+
+ tspi->irq = platform_get_irq(pdev, 0);
+ if (unlikely(tspi->irq < 0)) {
+ dev_err(&pdev->dev, "can't find irq resource\n");
+ ret = -ENXIO;
+ goto fail_irq_req;
+ }
+
+ sprintf(tspi->port_name, "tegra_spi_%d", pdev->id);
+ ret = request_threaded_irq(tspi->irq, spi_tegra_isr,
+ spi_tegra_isr_thread, IRQF_DISABLED,
+ tspi->port_name, tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto fail_irq_req;
+ }
+
+ tspi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR_OR_NULL(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto fail_clk_get;
+ }
+
+ INIT_LIST_HEAD(&tspi->queue);
+
+ if (pdata) {
+ tspi->is_clkon_always = pdata->is_clkon_always;
+ tspi->is_dma_allowed = pdata->is_dma_based;
+ tspi->dma_buf_size = (pdata->max_dma_buffer) ?
+ pdata->max_dma_buffer : DEFAULT_SPI_DMA_BUF_LEN;
+ tspi->parent_clk_count = pdata->parent_clk_count;
+ tspi->parent_clk_list = pdata->parent_clk_list;
+ tspi->max_rate = pdata->max_rate;
+ } else {
+ tspi->is_clkon_always = false;
+ tspi->is_dma_allowed = true;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+ tspi->parent_clk_count = 0;
+ tspi->parent_clk_list = NULL;
+ tspi->max_rate = 0;
+ }
+
+ tspi->max_parent_rate = 0;
+ tspi->min_div = 0;
+
+ if (tspi->parent_clk_count) {
+ tspi->max_parent_rate = tspi->parent_clk_list[0].fixed_clk_rate;
+ for (i = 1; i < tspi->parent_clk_count; ++i) {
+ tspi->max_parent_rate = max(tspi->max_parent_rate,
+ tspi->parent_clk_list[i].fixed_clk_rate);
+ }
+ if (tspi->max_rate)
+ tspi->min_div = DIV_ROUND_UP(tspi->max_parent_rate,
+ tspi->max_rate);
+ }
+ tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
+
+ if (!tspi->is_dma_allowed)
+ goto skip_dma_alloc;
+
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+
+ tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
+ "spi_rx_%d", pdev->id);
+ if (!tspi->rx_dma) {
+ dev_err(&pdev->dev, "can not allocate rx dma channel\n");
+ ret = -ENODEV;
+ goto fail_rx_dma_alloc;
+ }
+
+ tspi->rx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
+ &tspi->rx_buf_phys, GFP_KERNEL);
+ if (!tspi->rx_buf) {
+ dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
+ ret = -ENOMEM;
+ goto fail_rx_buf_alloc;
+ }
+
+ memset(&tspi->rx_dma_req, 0, sizeof(struct tegra_dma_req));
+ tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
+ tspi->rx_dma_req.to_memory = 1;
+ tspi->rx_dma_req.dest_addr = tspi->rx_buf_phys;
+ tspi->rx_dma_req.virt_addr = tspi->rx_buf;
+ tspi->rx_dma_req.dest_bus_width = 32;
+ tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
+ tspi->rx_dma_req.source_bus_width = 32;
+ tspi->rx_dma_req.source_wrap = 4;
+ tspi->rx_dma_req.dest_wrap = 0;
+ tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
+ tspi->rx_dma_req.dev = tspi;
+
+ tspi->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
+ "spi_tx_%d", pdev->id);
+ if (!tspi->tx_dma) {
+ dev_err(&pdev->dev, "can not allocate tx dma channel\n");
+ ret = -ENODEV;
+ goto fail_tx_dma_alloc;
+ }
+
+ tspi->tx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
+ &tspi->tx_buf_phys, GFP_KERNEL);
+ if (!tspi->tx_buf) {
+ dev_err(&pdev->dev, "can not allocate tx bounce buffer\n");
+ ret = -ENOMEM;
+ goto fail_tx_buf_alloc;
+ }
+
+ memset(&tspi->tx_dma_req, 0, sizeof(struct tegra_dma_req));
+ tspi->tx_dma_req.complete = tegra_spi_tx_dma_complete;
+ tspi->tx_dma_req.to_memory = 0;
+ tspi->tx_dma_req.dest_addr = tspi->phys + SLINK_TX_FIFO;
+ tspi->tx_dma_req.virt_addr = tspi->tx_buf;
+ tspi->tx_dma_req.dest_bus_width = 32;
+ tspi->tx_dma_req.dest_wrap = 4;
+ tspi->tx_dma_req.source_wrap = 0;
+ tspi->tx_dma_req.source_addr = tspi->tx_buf_phys;
+ tspi->tx_dma_req.source_bus_width = 32;
+ tspi->tx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
+ tspi->tx_dma_req.dev = tspi;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ tspi->def_command_reg = SLINK_CS_SW;
+ tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
+
+skip_dma_alloc:
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
+ spi_tegra_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+ ret = spi_register_master(master);
+ if (!tspi->is_clkon_always) {
+ if (tspi->clk_state) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+ }
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto fail_master_register;
+ }
+ return ret;
+
+fail_master_register:
+ if (tspi->tx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->tx_buf, tspi->tx_buf_phys);
+fail_tx_buf_alloc:
+ if (tspi->tx_dma)
+ tegra_dma_free_channel(tspi->tx_dma);
+fail_tx_dma_alloc:
+ if (tspi->rx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->rx_buf, tspi->rx_buf_phys);
+fail_rx_buf_alloc:
+ if (tspi->rx_dma)
+ tegra_dma_free_channel(tspi->rx_dma);
+fail_rx_dma_alloc:
+ clk_put(tspi->clk);
+fail_clk_get:
+ free_irq(tspi->irq, tspi);
+fail_irq_req:
+ iounmap(tspi->base);
+fail_io_map:
+ release_mem_region(r->start, (r->end - r->start) + 1);
+fail_no_mem:
+ spi_master_put(master);
+ return ret;
+}
+
+static int __devexit spi_tegra_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct resource *r;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+
+ if (tspi->tx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->tx_buf, tspi->tx_buf_phys);
+ if (tspi->tx_dma)
+ tegra_dma_free_channel(tspi->tx_dma);
+ if (tspi->rx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->rx_buf, tspi->rx_buf_phys);
+ if (tspi->rx_dma)
+ tegra_dma_free_channel(tspi->rx_dma);
+
+ if (tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+
+ clk_put(tspi->clk);
+ iounmap(tspi->base);
+
+ spi_master_put(master);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, (r->end - r->start) + 1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_tegra_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ unsigned long flags;
+ unsigned limit = 50;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+ spin_lock_irqsave(&tspi->lock, flags);
+ tspi->is_suspended = true;
+
+ WARN_ON(!list_empty(&tspi->queue));
+
+ while (!list_empty(&tspi->queue) && limit--) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&tspi->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ if (tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+ return 0;
+}
+
+static int spi_tegra_resume(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ unsigned long flags;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
+ spi_tegra_writel(tspi, tspi->command_reg, SLINK_COMMAND);
+ if (!tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+
+ tspi->cur_speed = 0;
+ tspi->is_suspended = false;
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return 0;
+}
+#endif
+
+MODULE_ALIAS("platform:spi_slave_tegra");
+
+static struct platform_driver spi_tegra_driver = {
+ .driver = {
+ .name = "spi_slave_tegra",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(spi_tegra_remove),
+#ifdef CONFIG_PM
+ .suspend = spi_tegra_suspend,
+ .resume = spi_tegra_resume,
+#endif
+};
+
+static int __init spi_tegra_init(void)
+{
+ return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
+}
+subsys_initcall(spi_tegra_init);
+
+static void __exit spi_tegra_exit(void)
+{
+ platform_driver_unregister(&spi_tegra_driver);
+}
+module_exit(spi_tegra_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 06c9081d596d..8541202a32e4 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,6 +66,8 @@ source "drivers/staging/rts_pstor/Kconfig"
source "drivers/staging/frontier/Kconfig"
+source "drivers/staging/android/Kconfig"
+
source "drivers/staging/pohmelfs/Kconfig"
source "drivers/staging/phison/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index f3c5e33bb263..c491642760f4 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_RTS_PSTOR) += rts_pstor/
obj-$(CONFIG_SPECTRA) += spectra/
obj-$(CONFIG_TRANZPORT) += frontier/
+obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_POHMELFS) += pohmelfs/
obj-$(CONFIG_IDE_PHISON) += phison/
obj-$(CONFIG_LINE6_USB) += line6/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
new file mode 100644
index 000000000000..247194992374
--- /dev/null
+++ b/drivers/staging/android/Kconfig
@@ -0,0 +1,95 @@
+menu "Android"
+
+config ANDROID
+ bool "Android Drivers"
+ default N
+ ---help---
+ Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+ bool "Android Binder IPC Driver"
+ default n
+
+config ANDROID_LOGGER
+ tristate "Android log driver"
+ default n
+
+config ANDROID_RAM_CONSOLE
+ bool "Android RAM buffer console"
+ default n
+
+config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ bool "Enable verbose console messages on Android RAM console"
+ default y
+ depends on ANDROID_RAM_CONSOLE
+
+menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ bool "Android RAM Console Enable error correction"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+ depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC8
+ select REED_SOLOMON_DEC8
+
+if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+ int "Android RAM Console Data data size"
+ default 128
+ help
+ Must be a power of 2.
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+ int "Android RAM Console ECC size"
+ default 16
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+ int "Android RAM Console Symbol size"
+ default 8
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+ hex "Android RAM Console Polynomial"
+ default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
+ default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
+ default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
+ default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
+ default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
+
+endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_EARLY_INIT
+ bool "Start Android RAM console early"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+
+config ANDROID_RAM_CONSOLE_EARLY_ADDR
+ hex "Android RAM console virtual address"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_RAM_CONSOLE_EARLY_SIZE
+ hex "Android RAM console buffer size"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_TIMED_OUTPUT
+ bool "Timed output class driver"
+ default y
+
+config ANDROID_TIMED_GPIO
+ tristate "Android timed gpio driver"
+ depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+ default n
+
+config ANDROID_LOW_MEMORY_KILLER
+ bool "Android Low Memory Killer"
+ default N
+ ---help---
+ Register processes to be killed when memory is low
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
new file mode 100644
index 000000000000..8e057e626d11
--- /dev/null
+++ b/drivers/staging/android/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
+obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
+obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
+obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
new file mode 100644
index 000000000000..e13b4c483407
--- /dev/null
+++ b/drivers/staging/android/binder.c
@@ -0,0 +1,3600 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "binder.h"
+
+static DEFINE_MUTEX(binder_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct binder_node *binder_context_mgr_node;
+static uid_t binder_context_mgr_uid = -1;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = binder_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K 0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M 0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
+ BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
+ BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
+ BINDER_DEBUG_DEAD_BINDER = 1U << 4,
+ BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
+ BINDER_DEBUG_READ_WRITE = 1U << 6,
+ BINDER_DEBUG_USER_REFS = 1U << 7,
+ BINDER_DEBUG_THREADS = 1U << 8,
+ BINDER_DEBUG_TRANSACTION = 1U << 9,
+ BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
+ BINDER_DEBUG_FREE_BUFFER = 1U << 11,
+ BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+ BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static int binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+ struct kernel_param *kp)
+{
+ int ret;
+ ret = param_set_int(val, kp);
+ if (binder_stop_on_user_error < 2)
+ wake_up(&binder_user_error_wait);
+ return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+ param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+ do { \
+ if (binder_debug_mask & mask) \
+ printk(KERN_INFO x); \
+ } while (0)
+
+#define binder_user_error(x...) \
+ do { \
+ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+ printk(KERN_INFO x); \
+ if (binder_stop_on_user_error) \
+ binder_stop_on_user_error = 2; \
+ } while (0)
+
+enum binder_stat_types {
+ BINDER_STAT_PROC,
+ BINDER_STAT_THREAD,
+ BINDER_STAT_NODE,
+ BINDER_STAT_REF,
+ BINDER_STAT_DEATH,
+ BINDER_STAT_TRANSACTION,
+ BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+ int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int obj_created[BINDER_STAT_COUNT];
+ int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+ binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+ binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+};
+struct binder_transaction_log {
+ int next;
+ int full;
+ struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+ struct binder_transaction_log *log)
+{
+ struct binder_transaction_log_entry *e;
+ e = &log->entry[log->next];
+ memset(e, 0, sizeof(*e));
+ log->next++;
+ if (log->next == ARRAY_SIZE(log->entry)) {
+ log->next = 0;
+ log->full = 1;
+ }
+ return e;
+}
+
+struct binder_work {
+ struct list_head entry;
+ enum {
+ BINDER_WORK_TRANSACTION = 1,
+ BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_NODE,
+ BINDER_WORK_DEAD_BINDER,
+ BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+ BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ } type;
+};
+
+struct binder_node {
+ int debug_id;
+ struct binder_work work;
+ union {
+ struct rb_node rb_node;
+ struct hlist_node dead_node;
+ };
+ struct binder_proc *proc;
+ struct hlist_head refs;
+ int internal_strong_refs;
+ int local_weak_refs;
+ int local_strong_refs;
+ void __user *ptr;
+ void __user *cookie;
+ unsigned has_strong_ref:1;
+ unsigned pending_strong_ref:1;
+ unsigned has_weak_ref:1;
+ unsigned pending_weak_ref:1;
+ unsigned has_async_transaction:1;
+ unsigned accept_fds:1;
+ unsigned min_priority:8;
+ struct list_head async_todo;
+};
+
+struct binder_ref_death {
+ struct binder_work work;
+ void __user *cookie;
+};
+
+struct binder_ref {
+ /* Lookups needed: */
+ /* node + proc => ref (transaction) */
+ /* desc + proc => ref (transaction, inc/dec ref) */
+ /* node => refs + procs (proc exit) */
+ int debug_id;
+ struct rb_node rb_node_desc;
+ struct rb_node rb_node_node;
+ struct hlist_node node_entry;
+ struct binder_proc *proc;
+ struct binder_node *node;
+ uint32_t desc;
+ int strong;
+ int weak;
+ struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by addesss */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned debug_id:29;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ uint8_t data[0];
+};
+
+enum binder_deferred_state {
+ BINDER_DEFERRED_PUT_FILES = 0x01,
+ BINDER_DEFERRED_FLUSH = 0x02,
+ BINDER_DEFERRED_RELEASE = 0x04,
+};
+
+struct binder_proc {
+ struct hlist_node proc_node;
+ struct rb_root threads;
+ struct rb_root nodes;
+ struct rb_root refs_by_desc;
+ struct rb_root refs_by_node;
+ int pid;
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct files_struct *files;
+ struct hlist_node deferred_work_node;
+ int deferred_work;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ struct list_head todo;
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+ struct list_head delivered_death;
+ int max_threads;
+ int requested_threads;
+ int requested_threads_started;
+ int ready_threads;
+ long default_priority;
+ struct dentry *debugfs_entry;
+};
+
+enum {
+ BINDER_LOOPER_STATE_REGISTERED = 0x01,
+ BINDER_LOOPER_STATE_ENTERED = 0x02,
+ BINDER_LOOPER_STATE_EXITED = 0x04,
+ BINDER_LOOPER_STATE_INVALID = 0x08,
+ BINDER_LOOPER_STATE_WAITING = 0x10,
+ BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+ struct binder_proc *proc;
+ struct rb_node rb_node;
+ int pid;
+ int looper;
+ struct binder_transaction *transaction_stack;
+ struct list_head todo;
+ uint32_t return_error; /* Write failed, return error code in read buf */
+ uint32_t return_error2; /* Write failed, return error code in read */
+ /* buffer. Used when sending a reply to a dead process that */
+ /* we are also waiting on */
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+};
+
+struct binder_transaction {
+ int debug_id;
+ struct binder_work work;
+ struct binder_thread *from;
+ struct binder_transaction *from_parent;
+ struct binder_proc *to_proc;
+ struct binder_thread *to_thread;
+ struct binder_transaction *to_parent;
+ unsigned need_reply:1;
+ /* unsigned is_dead:1; */ /* not used at the moment */
+
+ struct binder_buffer *buffer;
+ unsigned int code;
+ unsigned int flags;
+ long priority;
+ long saved_priority;
+ uid_t sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+/*
+ * copied from get_unused_fd_flags
+ */
+int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+ struct files_struct *files = proc->files;
+ int fd, error;
+ struct fdtable *fdt;
+ unsigned long rlim_cur;
+ unsigned long irqs;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ error = -EMFILE;
+ spin_lock(&files->file_lock);
+
+repeat:
+ fdt = files_fdtable(files);
+ fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
+ files->next_fd);
+
+ /*
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
+ rlim_cur = 0;
+ if (lock_task_sighand(proc->tsk, &irqs)) {
+ rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
+ unlock_task_sighand(proc->tsk, &irqs);
+ }
+ if (fd >= rlim_cur)
+ goto out;
+
+ /* Do we need to expand the fd array or fd set? */
+ error = expand_files(files, fd);
+ if (error < 0)
+ goto out;
+
+ if (error) {
+ /*
+ * If we needed to expand the fs array we
+ * might have blocked - try again.
+ */
+ error = -EMFILE;
+ goto repeat;
+ }
+
+ FD_SET(fd, fdt->open_fds);
+ if (flags & O_CLOEXEC)
+ FD_SET(fd, fdt->close_on_exec);
+ else
+ FD_CLR(fd, fdt->close_on_exec);
+ files->next_fd = fd + 1;
+#if 1
+ /* Sanity check */
+ if (fdt->fd[fd] != NULL) {
+ printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+ fdt->fd[fd] = NULL;
+ }
+#endif
+ error = fd;
+
+out:
+ spin_unlock(&files->file_lock);
+ return error;
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+ struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+ struct files_struct *files = proc->files;
+ struct fdtable *fdt;
+
+ if (files == NULL)
+ return;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ BUG_ON(fdt->fd[fd] != NULL);
+ rcu_assign_pointer(fdt->fd[fd], file);
+ spin_unlock(&files->file_lock);
+}
+
+/*
+ * copied from __put_unused_fd in open.c
+ */
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+ struct fdtable *fdt = files_fdtable(files);
+ __FD_CLR(fd, fdt->open_fds);
+ if (fd < files->next_fd)
+ files->next_fd = fd;
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+ struct file *filp;
+ struct files_struct *files = proc->files;
+ struct fdtable *fdt;
+ int retval;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ if (fd >= fdt->max_fds)
+ goto out_unlock;
+ filp = fdt->fd[fd];
+ if (!filp)
+ goto out_unlock;
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ FD_CLR(fd, fdt->close_on_exec);
+ __put_unused_fd(files, fd);
+ spin_unlock(&files->file_lock);
+ retval = filp_close(filp, files);
+
+ /* can't restart close syscall because file table entry was cleared */
+ if (unlikely(retval == -ERESTARTSYS ||
+ retval == -ERESTARTNOINTR ||
+ retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK))
+ retval = -EINTR;
+
+ return retval;
+
+out_unlock:
+ spin_unlock(&files->file_lock);
+ return -EBADF;
+}
+
+static void binder_set_nice(long nice)
+{
+ long min_nice;
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
+ }
+ min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "binder: %d: nice value %ld not allowed use "
+ "%ld instead\n", current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice < 20)
+ return;
+ binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &proc->buffers))
+ return proc->buffer + proc->buffer_size - (void *)buffer->data;
+ else
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: add free buffer, size %zd, "
+ "at %p\n", proc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+ void __user *user_ptr)
+{
+ struct rb_node *n = proc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else
+ return buffer;
+ }
+ return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct vm_struct tmp_area;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: %s pages %p-%p\n", proc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(proc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = proc->vma;
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
+ "map pages in userspace, no vma\n", proc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ struct page **page_array_ptr;
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (*page == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "for page at %p\n", proc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ tmp_area.addr = page_addr;
+ tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+ page_array_ptr = page;
+ ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %p in kernel\n",
+ proc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + proc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %lx in userspace\n",
+ proc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+ size_t data_size,
+ size_t offsets_size, int is_async)
+{
+ struct rb_node *n = proc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size;
+
+ if (proc->vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
+ proc->pid);
+ return NULL;
+ }
+
+ size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (size < data_size || size < offsets_size) {
+ binder_user_error("binder: %d: got transaction with invalid "
+ "size %zd-%zd\n", proc->pid, data_size, offsets_size);
+ return NULL;
+ }
+
+ if (is_async &&
+ proc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd"
+ "failed, no async space left\n", proc->pid, size);
+ return NULL;
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
+ "no address space\n", proc->pid, size);
+ return NULL;
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_buffer_size(proc, buffer);
+ }
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd got buff"
+ "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ if (binder_update_page_range(proc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+ return NULL;
+
+ rb_erase(best_fit, &proc->free_buffers);
+ buffer->free = 0;
+ binder_insert_allocated_buffer(proc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(proc, new_buffer);
+ }
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd got "
+ "%p\n", proc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ if (is_async) {
+ proc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "binder: %d: binder_alloc_buf size %zd "
+ "async free %zd\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(proc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer %p "
+ "share page with %p\n", proc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer"
+ " %p share page with %p\n", proc->pid,
+ buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer %p do "
+ "not share page%s%s with with %p or %p\n",
+ proc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(proc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *));
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_free_buf %p size %zd buffer"
+ "_size %zd\n", proc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < proc->buffer);
+ BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+ if (buffer->async_transaction) {
+ proc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "binder: %d: binder_free_buf size %zd "
+ "async free %zd\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ binder_update_page_range(proc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+ rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (next->free) {
+ rb_erase(&next->rb_node, &proc->free_buffers);
+ binder_delete_free_buffer(proc, next);
+ }
+ }
+ if (proc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+ if (prev->free) {
+ binder_delete_free_buffer(proc, buffer);
+ rb_erase(&prev->rb_node, &proc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ void __user *ptr)
+{
+ struct rb_node *n = proc->nodes.rb_node;
+ struct binder_node *node;
+
+ while (n) {
+ node = rb_entry(n, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ n = n->rb_left;
+ else if (ptr > node->ptr)
+ n = n->rb_right;
+ else
+ return node;
+ }
+ return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ void __user *ptr,
+ void __user *cookie)
+{
+ struct rb_node **p = &proc->nodes.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_node *node;
+
+ while (*p) {
+ parent = *p;
+ node = rb_entry(parent, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ p = &(*p)->rb_left;
+ else if (ptr > node->ptr)
+ p = &(*p)->rb_right;
+ else
+ return NULL;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_NODE);
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &proc->nodes);
+ node->debug_id = ++binder_last_id;
+ node->proc = proc;
+ node->ptr = ptr;
+ node->cookie = cookie;
+ node->work.type = BINDER_WORK_NODE;
+ INIT_LIST_HEAD(&node->work.entry);
+ INIT_LIST_HEAD(&node->async_todo);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p created\n",
+ proc->pid, current->pid, node->debug_id,
+ node->ptr, node->cookie);
+ return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ if (strong) {
+ if (internal) {
+ if (target_list == NULL &&
+ node->internal_strong_refs == 0 &&
+ !(node == binder_context_mgr_node &&
+ node->has_strong_ref)) {
+ printk(KERN_ERR "binder: invalid inc strong "
+ "node for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ node->internal_strong_refs++;
+ } else
+ node->local_strong_refs++;
+ if (!node->has_strong_ref && target_list) {
+ list_del_init(&node->work.entry);
+ list_add_tail(&node->work.entry, target_list);
+ }
+ } else {
+ if (!internal)
+ node->local_weak_refs++;
+ if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+ if (target_list == NULL) {
+ printk(KERN_ERR "binder: invalid inc weak node "
+ "for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ list_add_tail(&node->work.entry, target_list);
+ }
+ }
+ return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ if (strong) {
+ if (internal)
+ node->internal_strong_refs--;
+ else
+ node->local_strong_refs--;
+ if (node->local_strong_refs || node->internal_strong_refs)
+ return 0;
+ } else {
+ if (!internal)
+ node->local_weak_refs--;
+ if (node->local_weak_refs || !hlist_empty(&node->refs))
+ return 0;
+ }
+ if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+ if (list_empty(&node->work.entry)) {
+ list_add_tail(&node->work.entry, &node->proc->todo);
+ wake_up_interruptible(&node->proc->wait);
+ }
+ } else {
+ if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+ !node->local_weak_refs) {
+ list_del_init(&node->work.entry);
+ if (node->proc) {
+ rb_erase(&node->rb_node, &node->proc->nodes);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: refless node %d deleted\n",
+ node->debug_id);
+ } else {
+ hlist_del(&node->dead_node);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: dead node %d deleted\n",
+ node->debug_id);
+ }
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ }
+ }
+
+ return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+ uint32_t desc)
+{
+ struct rb_node *n = proc->refs_by_desc.rb_node;
+ struct binder_ref *ref;
+
+ while (n) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+ if (desc < ref->desc)
+ n = n->rb_left;
+ else if (desc > ref->desc)
+ n = n->rb_right;
+ else
+ return ref;
+ }
+ return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node)
+{
+ struct rb_node *n;
+ struct rb_node **p = &proc->refs_by_node.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_ref *ref, *new_ref;
+
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+ if (node < ref->node)
+ p = &(*p)->rb_left;
+ else if (node > ref->node)
+ p = &(*p)->rb_right;
+ else
+ return ref;
+ }
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (new_ref == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_REF);
+ new_ref->debug_id = ++binder_last_id;
+ new_ref->proc = proc;
+ new_ref->node = node;
+ rb_link_node(&new_ref->rb_node_node, parent, p);
+ rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+ new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->desc > new_ref->desc)
+ break;
+ new_ref->desc = ref->desc + 1;
+ }
+
+ p = &proc->refs_by_desc.rb_node;
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+ if (new_ref->desc < ref->desc)
+ p = &(*p)->rb_left;
+ else if (new_ref->desc > ref->desc)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_ref->rb_node_desc, parent, p);
+ rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+ if (node) {
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d new ref %d desc %d for "
+ "node %d\n", proc->pid, new_ref->debug_id,
+ new_ref->desc, node->debug_id);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d new ref %d desc %d for "
+ "dead node\n", proc->pid, new_ref->debug_id,
+ new_ref->desc);
+ }
+ return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d delete ref %d desc %d for "
+ "node %d\n", ref->proc->pid, ref->debug_id,
+ ref->desc, ref->node->debug_id);
+
+ rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+ rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+ if (ref->strong)
+ binder_dec_node(ref->node, 1, 1);
+ hlist_del(&ref->node_entry);
+ binder_dec_node(ref->node, 0, 1);
+ if (ref->death) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: %d delete ref %d desc %d "
+ "has death notification\n", ref->proc->pid,
+ ref->debug_id, ref->desc);
+ list_del(&ref->death->work.entry);
+ kfree(ref->death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ }
+ kfree(ref);
+ binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
+{
+ int ret;
+ if (strong) {
+ if (ref->strong == 0) {
+ ret = binder_inc_node(ref->node, 1, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->strong++;
+ } else {
+ if (ref->weak == 0) {
+ ret = binder_inc_node(ref->node, 0, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->weak++;
+ }
+ return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+ if (strong) {
+ if (ref->strong == 0) {
+ binder_user_error("binder: %d invalid dec strong, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->strong--;
+ if (ref->strong == 0) {
+ int ret;
+ ret = binder_dec_node(ref->node, strong, 1);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (ref->weak == 0) {
+ binder_user_error("binder: %d invalid dec weak, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->weak--;
+ }
+ if (ref->strong == 0 && ref->weak == 0)
+ binder_delete_ref(ref);
+ return 0;
+}
+
+static void binder_pop_transaction(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ if (target_thread) {
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+ }
+ t->need_reply = 0;
+ if (t->buffer)
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+}
+
+static void binder_send_failed_reply(struct binder_transaction *t,
+ uint32_t error_code)
+{
+ struct binder_thread *target_thread;
+ BUG_ON(t->flags & TF_ONE_WAY);
+ while (1) {
+ target_thread = t->from;
+ if (target_thread) {
+ if (target_thread->return_error != BR_OK &&
+ target_thread->return_error2 == BR_OK) {
+ target_thread->return_error2 =
+ target_thread->return_error;
+ target_thread->return_error = BR_OK;
+ }
+ if (target_thread->return_error == BR_OK) {
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: send failed reply for "
+ "transaction %d to %d:%d\n",
+ t->debug_id, target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction(target_thread, t);
+ target_thread->return_error = error_code;
+ wake_up_interruptible(&target_thread->wait);
+ } else {
+ printk(KERN_ERR "binder: reply failed, target "
+ "thread, %d:%d, has error code %d "
+ "already\n", target_thread->proc->pid,
+ target_thread->pid,
+ target_thread->return_error);
+ }
+ return;
+ } else {
+ struct binder_transaction *next = t->from_parent;
+
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: send failed reply "
+ "for transaction %d, target dead\n",
+ t->debug_id);
+
+ binder_pop_transaction(target_thread, t);
+ if (next == NULL) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: reply failed,"
+ " no target thread at root\n");
+ return;
+ }
+ t = next;
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: reply failed, no target "
+ "thread -- retry %d\n", t->debug_id);
+ }
+ }
+}
+
+static void binder_transaction_buffer_release(struct binder_proc *proc,
+ struct binder_buffer *buffer,
+ size_t *failed_at)
+{
+ size_t *offp, *off_end;
+ int debug_id = buffer->debug_id;
+
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
+ proc->pid, buffer->debug_id,
+ buffer->data_size, buffer->offsets_size, failed_at);
+
+ if (buffer->target_node)
+ binder_dec_node(buffer->target_node, 1, 0);
+
+ offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+ if (failed_at)
+ off_end = failed_at;
+ else
+ off_end = (void *)offp + buffer->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > buffer->data_size - sizeof(*fp) ||
+ buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ printk(KERN_ERR "binder: transaction release %d bad"
+ "offset %zd, size %zd\n", debug_id,
+ *offp, buffer->data_size);
+ continue;
+ }
+ fp = (struct flat_binder_object *)(buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ printk(KERN_ERR "binder: transaction release %d"
+ " bad node %p\n", debug_id, fp->binder);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p\n",
+ node->debug_id, node->ptr);
+ binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ printk(KERN_ERR "binder: transaction release %d"
+ " bad handle %ld\n", debug_id,
+ fp->handle);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, ref->node->debug_id);
+ binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+ } break;
+
+ case BINDER_TYPE_FD:
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld\n", fp->handle);
+ if (failed_at)
+ task_close_fd(proc, fp->handle);
+ break;
+
+ default:
+ printk(KERN_ERR "binder: transaction release %d bad "
+ "object type %lx\n", debug_id, fp->type);
+ break;
+ }
+ }
+}
+
+static void binder_transaction(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_transaction_data *tr, int reply)
+{
+ struct binder_transaction *t;
+ struct binder_work *tcomplete;
+ size_t *offp, *off_end;
+ struct binder_proc *target_proc;
+ struct binder_thread *target_thread = NULL;
+ struct binder_node *target_node = NULL;
+ struct list_head *target_list;
+ wait_queue_head_t *target_wait;
+ struct binder_transaction *in_reply_to = NULL;
+ struct binder_transaction_log_entry *e;
+ uint32_t return_error;
+
+ e = binder_transaction_log_add(&binder_transaction_log);
+ e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+ e->from_proc = proc->pid;
+ e->from_thread = thread->pid;
+ e->target_handle = tr->target.handle;
+ e->data_size = tr->data_size;
+ e->offsets_size = tr->offsets_size;
+
+ if (reply) {
+ in_reply_to = thread->transaction_stack;
+ if (in_reply_to == NULL) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with no transaction stack\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_empty_call_stack;
+ }
+ binder_set_nice(in_reply_to->saved_priority);
+ if (in_reply_to->to_thread != thread) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad transaction stack,"
+ " transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, in_reply_to->debug_id,
+ in_reply_to->to_proc ?
+ in_reply_to->to_proc->pid : 0,
+ in_reply_to->to_thread ?
+ in_reply_to->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ goto err_bad_call_stack;
+ }
+ thread->transaction_stack = in_reply_to->to_parent;
+ target_thread = in_reply_to->from;
+ if (target_thread == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (target_thread->transaction_stack != in_reply_to) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad target transaction stack %d, "
+ "expected %d\n",
+ proc->pid, thread->pid,
+ target_thread->transaction_stack ?
+ target_thread->transaction_stack->debug_id : 0,
+ in_reply_to->debug_id);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ target_thread = NULL;
+ goto err_dead_binder;
+ }
+ target_proc = target_thread->proc;
+ } else {
+ if (tr->target.handle) {
+ struct binder_ref *ref;
+ ref = binder_get_ref(proc, tr->target.handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction to invalid handle\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_invalid_target_handle;
+ }
+ target_node = ref->node;
+ } else {
+ target_node = binder_context_mgr_node;
+ if (target_node == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_no_context_mgr_node;
+ }
+ }
+ e->to_node = target_node->debug_id;
+ target_proc = target_node->proc;
+ if (target_proc == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+ struct binder_transaction *tmp;
+ tmp = thread->transaction_stack;
+ if (tmp->to_thread != thread) {
+ binder_user_error("binder: %d:%d got new "
+ "transaction with bad transaction stack"
+ ", transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, tmp->debug_id,
+ tmp->to_proc ? tmp->to_proc->pid : 0,
+ tmp->to_thread ?
+ tmp->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_call_stack;
+ }
+ while (tmp) {
+ if (tmp->from && tmp->from->proc == target_proc)
+ target_thread = tmp->from;
+ tmp = tmp->from_parent;
+ }
+ }
+ }
+ if (target_thread) {
+ e->to_thread = target_thread->pid;
+ target_list = &target_thread->todo;
+ target_wait = &target_thread->wait;
+ } else {
+ target_list = &target_proc->todo;
+ target_wait = &target_proc->wait;
+ }
+ e->to_proc = target_proc->pid;
+
+ /* TODO: reuse incoming transaction for reply */
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_t_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ if (tcomplete == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_tcomplete_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+ t->debug_id = ++binder_last_id;
+ e->debug_id = t->debug_id;
+
+ if (reply)
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_REPLY %d -> %d:%d, "
+ "data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_thread->pid,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+ else
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_TRANSACTION %d -> "
+ "%d - node %d, data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_node->debug_id,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+ else
+ t->from = NULL;
+ t->sender_euid = proc->tsk->cred->euid;
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ if (t->buffer == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_alloc_buf_failed;
+ }
+ t->buffer->allow_user_free = 0;
+ t->buffer->debug_id = t->debug_id;
+ t->buffer->transaction = t;
+ t->buffer->target_node = target_node;
+ if (target_node)
+ binder_inc_node(target_node, 1, 0, NULL);
+
+ offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+
+ if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "data ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "offsets ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offsets size, %zd\n",
+ proc->pid, thread->pid, tr->offsets_size);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ off_end = (void *)offp + tr->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > t->buffer->data_size - sizeof(*fp) ||
+ t->buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offset, %zd\n",
+ proc->pid, thread->pid, *offp);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_ref *ref;
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ node = binder_new_node(proc, fp->binder, fp->cookie);
+ if (node == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_new_node_failed;
+ }
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ if (fp->cookie != node->cookie) {
+ binder_user_error("binder: %d:%d sending u%p "
+ "node %d, cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ fp->binder, node->debug_id,
+ fp->cookie, node->cookie);
+ goto err_binder_get_ref_for_node_failed;
+ }
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ if (fp->type == BINDER_TYPE_BINDER)
+ fp->type = BINDER_TYPE_HANDLE;
+ else
+ fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->handle = ref->desc;
+ binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+ &thread->todo);
+
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p -> ref %d desc %d\n",
+ node->debug_id, node->ptr, ref->debug_id,
+ ref->desc);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction with invalid "
+ "handle, %ld\n", proc->pid,
+ thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_failed;
+ }
+ if (ref->node->proc == target_proc) {
+ if (fp->type == BINDER_TYPE_HANDLE)
+ fp->type = BINDER_TYPE_BINDER;
+ else
+ fp->type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> node %d u%p\n",
+ ref->debug_id, ref->desc, ref->node->debug_id,
+ ref->node->ptr);
+ } else {
+ struct binder_ref *new_ref;
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (new_ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ fp->handle = new_ref->desc;
+ binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id,
+ new_ref->desc, ref->node->debug_id);
+ }
+ } break;
+
+ case BINDER_TYPE_FD: {
+ int target_fd;
+ struct file *file;
+
+ if (reply) {
+ if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+ binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+ } else if (!target_node->accept_fds) {
+ binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+
+ file = fget(fp->handle);
+ if (file == NULL) {
+ binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fget_failed;
+ }
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+ if (target_fd < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ goto err_get_unused_fd_failed;
+ }
+ task_fd_install(target_proc, target_fd, file);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld -> %d\n", fp->handle, target_fd);
+ /* TODO: fput? */
+ fp->handle = target_fd;
+ } break;
+
+ default:
+ binder_user_error("binder: %d:%d got transactio"
+ "n with invalid object type, %lx\n",
+ proc->pid, thread->pid, fp->type);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_object_type;
+ }
+ }
+ if (reply) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ binder_pop_transaction(target_thread, in_reply_to);
+ } else if (!(t->flags & TF_ONE_WAY)) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ t->need_reply = 1;
+ t->from_parent = thread->transaction_stack;
+ thread->transaction_stack = t;
+ } else {
+ BUG_ON(target_node == NULL);
+ BUG_ON(t->buffer->async_transaction != 1);
+ if (target_node->has_async_transaction) {
+ target_list = &target_node->async_todo;
+ target_wait = NULL;
+ } else
+ target_node->has_async_transaction = 1;
+ }
+ t->work.type = BINDER_WORK_TRANSACTION;
+ list_add_tail(&t->work.entry, target_list);
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ list_add_tail(&tcomplete->entry, &thread->todo);
+ if (target_wait)
+ wake_up_interruptible(target_wait);
+ return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+ binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ t->buffer->transaction = NULL;
+ binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+ kfree(tcomplete);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d transaction failed %d, size %zd-%zd\n",
+ proc->pid, thread->pid, return_error,
+ tr->data_size, tr->offsets_size);
+
+ {
+ struct binder_transaction_log_entry *fe;
+ fe = binder_transaction_log_add(&binder_transaction_log_failed);
+ *fe = *e;
+ }
+
+ BUG_ON(thread->return_error != BR_OK);
+ if (in_reply_to) {
+ thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_send_failed_reply(in_reply_to, return_error);
+ } else
+ thread->return_error = return_error;
+}
+
+int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+ void __user *buffer, int size, signed long *consumed)
+{
+ uint32_t cmd;
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ while (ptr < end && thread->return_error == BR_OK) {
+ if (get_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+ binder_stats.bc[_IOC_NR(cmd)]++;
+ proc->stats.bc[_IOC_NR(cmd)]++;
+ thread->stats.bc[_IOC_NR(cmd)]++;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ case BC_ACQUIRE:
+ case BC_RELEASE:
+ case BC_DECREFS: {
+ uint32_t target;
+ struct binder_ref *ref;
+ const char *debug_string;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (target == 0 && binder_context_mgr_node &&
+ (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+ ref = binder_get_ref_for_node(proc,
+ binder_context_mgr_node);
+ if (ref->desc != target) {
+ binder_user_error("binder: %d:"
+ "%d tried to acquire "
+ "reference to desc 0, "
+ "got %d instead\n",
+ proc->pid, thread->pid,
+ ref->desc);
+ }
+ } else
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d refcou"
+ "nt change on invalid ref %d\n",
+ proc->pid, thread->pid, target);
+ break;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ debug_string = "IncRefs";
+ binder_inc_ref(ref, 0, NULL);
+ break;
+ case BC_ACQUIRE:
+ debug_string = "Acquire";
+ binder_inc_ref(ref, 1, NULL);
+ break;
+ case BC_RELEASE:
+ debug_string = "Release";
+ binder_dec_ref(ref, 1);
+ break;
+ case BC_DECREFS:
+ default:
+ debug_string = "DecRefs";
+ binder_dec_ref(ref, 0);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
+ proc->pid, thread->pid, debug_string, ref->debug_id,
+ ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ break;
+ }
+ case BC_INCREFS_DONE:
+ case BC_ACQUIRE_DONE: {
+ void __user *node_ptr;
+ void *cookie;
+ struct binder_node *node;
+
+ if (get_user(node_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (get_user(cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ node = binder_get_node(proc, node_ptr);
+ if (node == NULL) {
+ binder_user_error("binder: %d:%d "
+ "%s u%p no match\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" :
+ "BC_ACQUIRE_DONE",
+ node_ptr);
+ break;
+ }
+ if (cookie != node->cookie) {
+ binder_user_error("binder: %d:%d %s u%p node %d"
+ " cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+ node_ptr, node->debug_id,
+ cookie, node->cookie);
+ break;
+ }
+ if (cmd == BC_ACQUIRE_DONE) {
+ if (node->pending_strong_ref == 0) {
+ binder_user_error("binder: %d:%d "
+ "BC_ACQUIRE_DONE node %d has "
+ "no pending acquire request\n",
+ proc->pid, thread->pid,
+ node->debug_id);
+ break;
+ }
+ node->pending_strong_ref = 0;
+ } else {
+ if (node->pending_weak_ref == 0) {
+ binder_user_error("binder: %d:%d "
+ "BC_INCREFS_DONE node %d has "
+ "no pending increfs request\n",
+ proc->pid, thread->pid,
+ node->debug_id);
+ break;
+ }
+ node->pending_weak_ref = 0;
+ }
+ binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s node %d ls %d lw %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+ node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ break;
+ }
+ case BC_ATTEMPT_ACQUIRE:
+ printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
+ return -EINVAL;
+ case BC_ACQUIRE_RESULT:
+ printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
+ return -EINVAL;
+
+ case BC_FREE_BUFFER: {
+ void __user *data_ptr;
+ struct binder_buffer *buffer;
+
+ if (get_user(data_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ buffer = binder_buffer_lookup(proc, data_ptr);
+ if (buffer == NULL) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p no match\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ if (!buffer->allow_user_free) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p matched "
+ "unreturned buffer\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_FREE_BUFFER,
+ "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
+ proc->pid, thread->pid, data_ptr, buffer->debug_id,
+ buffer->transaction ? "active" : "finished");
+
+ if (buffer->transaction) {
+ buffer->transaction->buffer = NULL;
+ buffer->transaction = NULL;
+ }
+ if (buffer->async_transaction && buffer->target_node) {
+ BUG_ON(!buffer->target_node->has_async_transaction);
+ if (list_empty(&buffer->target_node->async_todo))
+ buffer->target_node->has_async_transaction = 0;
+ else
+ list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ }
+ binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_free_buf(proc, buffer);
+ break;
+ }
+
+ case BC_TRANSACTION:
+ case BC_REPLY: {
+ struct binder_transaction_data tr;
+
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+ binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+ break;
+ }
+
+ case BC_REGISTER_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "after BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ } else if (proc->requested_threads == 0) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "without request\n",
+ proc->pid, thread->pid);
+ } else {
+ proc->requested_threads--;
+ proc->requested_threads_started++;
+ }
+ thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ break;
+ case BC_ENTER_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_ENTER_LOOPER called after "
+ "BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ }
+ thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+ break;
+ case BC_EXIT_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_EXIT_LOOPER\n",
+ proc->pid, thread->pid);
+ thread->looper |= BINDER_LOOPER_STATE_EXITED;
+ break;
+
+ case BC_REQUEST_DEATH_NOTIFICATION:
+ case BC_CLEAR_DEATH_NOTIFICATION: {
+ uint32_t target;
+ void __user *cookie;
+ struct binder_ref *ref;
+ struct binder_ref_death *death;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d %s "
+ "invalid ref %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ target);
+ break;
+ }
+
+ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+ "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ cookie, ref->debug_id, ref->desc,
+ ref->strong, ref->weak, ref->node->debug_id);
+
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ if (ref->death) {
+ binder_user_error("binder: %d:%"
+ "d BC_REQUEST_DEATH_NOTI"
+ "FICATION death notific"
+ "ation already set\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ thread->return_error = BR_ERROR;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d "
+ "BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ binder_stats_created(BINDER_STAT_DEATH);
+ INIT_LIST_HEAD(&death->work.entry);
+ death->cookie = cookie;
+ ref->death = death;
+ if (ref->node->proc == NULL) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&ref->death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&ref->death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ }
+ } else {
+ if (ref->death == NULL) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion not active\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = ref->death;
+ if (death->cookie != cookie) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion cookie mismatch "
+ "%p != %p\n",
+ proc->pid, thread->pid,
+ death->cookie, cookie);
+ break;
+ }
+ ref->death = NULL;
+ if (list_empty(&death->work.entry)) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ } else {
+ BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+ death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+ }
+ }
+ } break;
+ case BC_DEAD_BINDER_DONE: {
+ struct binder_work *w;
+ void __user *cookie;
+ struct binder_ref_death *death = NULL;
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(void *);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ if (tmp_death->cookie == cookie) {
+ death = tmp_death;
+ break;
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
+ proc->pid, thread->pid, cookie, death);
+ if (death == NULL) {
+ binder_user_error("binder: %d:%d BC_DEAD"
+ "_BINDER_DONE %p not found\n",
+ proc->pid, thread->pid, cookie);
+ break;
+ }
+
+ list_del_init(&death->work.entry);
+ if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ }
+ } break;
+
+ default:
+ printk(KERN_ERR "binder: %d:%d unknown command %d\n",
+ proc->pid, thread->pid, cmd);
+ return -EINVAL;
+ }
+ *consumed = ptr - buffer;
+ }
+ return 0;
+}
+
+void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
+ uint32_t cmd)
+{
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+ binder_stats.br[_IOC_NR(cmd)]++;
+ proc->stats.br[_IOC_NR(cmd)]++;
+ thread->stats.br[_IOC_NR(cmd)]++;
+ }
+}
+
+static int binder_has_proc_work(struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ return !list_empty(&proc->todo) ||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+ return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_thread_read(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user *buffer, int size,
+ signed long *consumed, int non_block)
+{
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ int ret = 0;
+ int wait_for_proc_work;
+
+ if (*consumed == 0) {
+ if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ }
+
+retry:
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo);
+
+ if (thread->return_error != BR_OK && ptr < end) {
+ if (thread->return_error2 != BR_OK) {
+ if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (ptr == end)
+ goto done;
+ thread->return_error2 = BR_OK;
+ }
+ if (put_user(thread->return_error, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ thread->return_error = BR_OK;
+ goto done;
+ }
+
+
+ thread->looper |= BINDER_LOOPER_STATE_WAITING;
+ if (wait_for_proc_work)
+ proc->ready_threads++;
+ mutex_unlock(&binder_lock);
+ if (wait_for_proc_work) {
+ if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))) {
+ binder_user_error("binder: %d:%d ERROR: Thread waiting "
+ "for process work before calling BC_REGISTER_"
+ "LOOPER or BC_ENTER_LOOPER (state %x)\n",
+ proc->pid, thread->pid, thread->looper);
+ wait_event_interruptible(binder_user_error_wait,
+ binder_stop_on_user_error < 2);
+ }
+ binder_set_nice(proc->default_priority);
+ if (non_block) {
+ if (!binder_has_proc_work(proc, thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+ } else {
+ if (non_block) {
+ if (!binder_has_thread_work(thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+ }
+ mutex_lock(&binder_lock);
+ if (wait_for_proc_work)
+ proc->ready_threads--;
+ thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+ if (ret)
+ return ret;
+
+ while (1) {
+ uint32_t cmd;
+ struct binder_transaction_data tr;
+ struct binder_work *w;
+ struct binder_transaction *t = NULL;
+
+ if (!list_empty(&thread->todo))
+ w = list_first_entry(&thread->todo, struct binder_work, entry);
+ else if (!list_empty(&proc->todo) && wait_for_proc_work)
+ w = list_first_entry(&proc->todo, struct binder_work, entry);
+ else {
+ if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
+ goto retry;
+ break;
+ }
+
+ if (end - ptr < sizeof(tr) + 4)
+ break;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ t = container_of(w, struct binder_transaction, work);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ cmd = BR_TRANSACTION_COMPLETE;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+ "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
+ proc->pid, thread->pid);
+
+ list_del(&w->entry);
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ case BINDER_WORK_NODE: {
+ struct binder_node *node = container_of(w, struct binder_node, work);
+ uint32_t cmd = BR_NOOP;
+ const char *cmd_name;
+ int strong = node->internal_strong_refs || node->local_strong_refs;
+ int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+ if (weak && !node->has_weak_ref) {
+ cmd = BR_INCREFS;
+ cmd_name = "BR_INCREFS";
+ node->has_weak_ref = 1;
+ node->pending_weak_ref = 1;
+ node->local_weak_refs++;
+ } else if (strong && !node->has_strong_ref) {
+ cmd = BR_ACQUIRE;
+ cmd_name = "BR_ACQUIRE";
+ node->has_strong_ref = 1;
+ node->pending_strong_ref = 1;
+ node->local_strong_refs++;
+ } else if (!strong && node->has_strong_ref) {
+ cmd = BR_RELEASE;
+ cmd_name = "BR_RELEASE";
+ node->has_strong_ref = 0;
+ } else if (!weak && node->has_weak_ref) {
+ cmd = BR_DECREFS;
+ cmd_name = "BR_DECREFS";
+ node->has_weak_ref = 0;
+ }
+ if (cmd != BR_NOOP) {
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(node->ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (put_user(node->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s %d u%p c%p\n",
+ proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+ } else {
+ list_del_init(&w->entry);
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p deleted\n",
+ proc->pid, thread->pid, node->debug_id,
+ node->ptr, node->cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p state unchanged\n",
+ proc->pid, thread->pid, node->debug_id, node->ptr,
+ node->cookie);
+ }
+ }
+ } break;
+ case BINDER_WORK_DEAD_BINDER:
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+ struct binder_ref_death *death;
+ uint32_t cmd;
+
+ death = container_of(w, struct binder_ref_death, work);
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+ cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+ else
+ cmd = BR_DEAD_BINDER;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(death->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+ "binder: %d:%d %s %p\n",
+ proc->pid, thread->pid,
+ cmd == BR_DEAD_BINDER ?
+ "BR_DEAD_BINDER" :
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ death->cookie);
+
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+ list_del(&w->entry);
+ kfree(death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ } else
+ list_move(&w->entry, &proc->delivered_death);
+ if (cmd == BR_DEAD_BINDER)
+ goto done; /* DEAD_BINDER notifications can cause transactions */
+ } break;
+ }
+
+ if (!t)
+ continue;
+
+ BUG_ON(t->buffer == NULL);
+ if (t->buffer->target_node) {
+ struct binder_node *target_node = t->buffer->target_node;
+ tr.target.ptr = target_node->ptr;
+ tr.cookie = target_node->cookie;
+ t->saved_priority = task_nice(current);
+ if (t->priority < target_node->min_priority &&
+ !(t->flags & TF_ONE_WAY))
+ binder_set_nice(t->priority);
+ else if (!(t->flags & TF_ONE_WAY) ||
+ t->saved_priority > target_node->min_priority)
+ binder_set_nice(target_node->min_priority);
+ cmd = BR_TRANSACTION;
+ } else {
+ tr.target.ptr = NULL;
+ tr.cookie = NULL;
+ cmd = BR_REPLY;
+ }
+ tr.code = t->code;
+ tr.flags = t->flags;
+ tr.sender_euid = t->sender_euid;
+
+ if (t->from) {
+ struct task_struct *sender = t->from->proc->tsk;
+ tr.sender_pid = task_tgid_nr_ns(sender,
+ current->nsproxy->pid_ns);
+ } else {
+ tr.sender_pid = 0;
+ }
+
+ tr.data_size = t->buffer->data_size;
+ tr.offsets_size = t->buffer->offsets_size;
+ tr.data.ptr.buffer = (void *)t->buffer->data +
+ proc->user_buffer_offset;
+ tr.data.ptr.offsets = tr.data.ptr.buffer +
+ ALIGN(t->buffer->data_size,
+ sizeof(void *));
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &tr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d %s %d %d:%d, cmd %d"
+ "size %zd-%zd ptr %p-%p\n",
+ proc->pid, thread->pid,
+ (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+ "BR_REPLY",
+ t->debug_id, t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0, cmd,
+ t->buffer->data_size, t->buffer->offsets_size,
+ tr.data.ptr.buffer, tr.data.ptr.offsets);
+
+ list_del(&t->work.entry);
+ t->buffer->allow_user_free = 1;
+ if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ t->to_parent = thread->transaction_stack;
+ t->to_thread = thread;
+ thread->transaction_stack = t;
+ } else {
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ }
+ break;
+ }
+
+done:
+
+ *consumed = ptr - buffer;
+ if (proc->requested_threads + proc->ready_threads == 0 &&
+ proc->requested_threads_started < proc->max_threads &&
+ (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+ /*spawn a new thread if we leave this out */) {
+ proc->requested_threads++;
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BR_SPAWN_LOOPER\n",
+ proc->pid, thread->pid);
+ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+ struct binder_work *w;
+ while (!list_empty(list)) {
+ w = list_first_entry(list, struct binder_work, entry);
+ list_del_init(&w->entry);
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ struct binder_transaction *t;
+
+ t = container_of(w, struct binder_transaction, work);
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+ binder_send_failed_reply(t, BR_DEAD_REPLY);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ default:
+ break;
+ }
+ }
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread = NULL;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &proc->threads.rb_node;
+
+ while (*p) {
+ parent = *p;
+ thread = rb_entry(parent, struct binder_thread, rb_node);
+
+ if (current->pid < thread->pid)
+ p = &(*p)->rb_left;
+ else if (current->pid > thread->pid)
+ p = &(*p)->rb_right;
+ else
+ break;
+ }
+ if (*p == NULL) {
+ thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (thread == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->return_error = BR_OK;
+ thread->return_error2 = BR_OK;
+ }
+ return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct binder_transaction *t;
+ struct binder_transaction *send_reply = NULL;
+ int active_transactions = 0;
+
+ rb_erase(&thread->rb_node, &proc->threads);
+ t = thread->transaction_stack;
+ if (t && t->to_thread == thread)
+ send_reply = t;
+ while (t) {
+ active_transactions++;
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "binder: release %d:%d transaction %d "
+ "%s, still active\n", proc->pid, thread->pid,
+ t->debug_id,
+ (t->to_thread == thread) ? "in" : "out");
+
+ if (t->to_thread == thread) {
+ t->to_proc = NULL;
+ t->to_thread = NULL;
+ if (t->buffer) {
+ t->buffer->transaction = NULL;
+ t->buffer = NULL;
+ }
+ t = t->to_parent;
+ } else if (t->from == thread) {
+ t->from = NULL;
+ t = t->from_parent;
+ } else
+ BUG();
+ }
+ if (send_reply)
+ binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+ binder_release_work(&thread->todo);
+ kfree(thread);
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread = NULL;
+ int wait_for_proc_work;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo) && thread->return_error == BR_OK;
+ mutex_unlock(&binder_lock);
+
+ if (wait_for_proc_work) {
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ poll_wait(filp, &proc->wait, wait);
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ } else {
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ poll_wait(filp, &thread->wait, wait);
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ }
+ return 0;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread;
+ unsigned int size = _IOC_SIZE(cmd);
+ void __user *ubuf = (void __user *)arg;
+
+ /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
+
+ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+ if (ret)
+ return ret;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+ if (thread == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ switch (cmd) {
+ case BINDER_WRITE_READ: {
+ struct binder_write_read bwr;
+ if (size != sizeof(struct binder_write_read)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ binder_debug(BINDER_DEBUG_READ_WRITE,
+ "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
+ proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
+ bwr.read_size, bwr.read_buffer);
+
+ if (bwr.write_size > 0) {
+ ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ if (ret < 0) {
+ bwr.read_consumed = 0;
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ if (bwr.read_size > 0) {
+ ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+ if (!list_empty(&proc->todo))
+ wake_up_interruptible(&proc->wait);
+ if (ret < 0) {
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ binder_debug(BINDER_DEBUG_READ_WRITE,
+ "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
+ proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
+ bwr.read_consumed, bwr.read_size);
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_SET_MAX_THREADS:
+ if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case BINDER_SET_CONTEXT_MGR:
+ if (binder_context_mgr_node != NULL) {
+ printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ if (binder_context_mgr_uid != -1) {
+ if (binder_context_mgr_uid != current->cred->euid) {
+ printk(KERN_ERR "binder: BINDER_SET_"
+ "CONTEXT_MGR bad uid %d != %d\n",
+ current->cred->euid,
+ binder_context_mgr_uid);
+ ret = -EPERM;
+ goto err;
+ }
+ } else
+ binder_context_mgr_uid = current->cred->euid;
+ binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+ if (binder_context_mgr_node == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ binder_context_mgr_node->local_weak_refs++;
+ binder_context_mgr_node->local_strong_refs++;
+ binder_context_mgr_node->has_strong_ref = 1;
+ binder_context_mgr_node->has_weak_ref = 1;
+ break;
+ case BINDER_THREAD_EXIT:
+ binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
+ proc->pid, thread->pid);
+ binder_free_thread(proc, thread);
+ thread = NULL;
+ break;
+ case BINDER_VERSION:
+ if (size != sizeof(struct binder_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = 0;
+err:
+ if (thread)
+ thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+ mutex_unlock(&binder_lock);
+ wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+ if (ret && ret != -ERESTARTSYS)
+ printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+ return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+ dump_stack();
+}
+
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+ proc->vma = NULL;
+ binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+ .open = binder_vma_open,
+ .close = binder_vma_close,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ struct binder_proc *proc = filp->private_data;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ if ((vma->vm_end - vma->vm_start) > SZ_4M)
+ vma->vm_end = vma->vm_start + SZ_4M;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+
+ if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+ ret = -EPERM;
+ failure_string = "bad vm_flags";
+ goto err_bad_arg;
+ }
+ vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+ if (proc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ proc->buffer = area->addr;
+ proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+ printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+ if (proc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ proc->buffer_size = vma->vm_end - vma->vm_start;
+
+ vma->vm_ops = &binder_vm_ops;
+ vma->vm_private_data = proc;
+
+ if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = proc->buffer;
+ INIT_LIST_HEAD(&proc->buffers);
+ list_add(&buffer->entry, &proc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(proc, buffer);
+ proc->free_async_space = proc->buffer_size / 2;
+ barrier();
+ proc->files = get_files_struct(current);
+ proc->vma = vma;
+
+ /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
+ proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(proc->pages);
+ proc->pages = NULL;
+err_alloc_pages_failed:
+ vfree(proc->buffer);
+ proc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+err_bad_arg:
+ printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
+ proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+ current->group_leader->pid, current->pid);
+
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (proc == NULL)
+ return -ENOMEM;
+ get_task_struct(current);
+ proc->tsk = current;
+ INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->wait);
+ proc->default_priority = task_nice(current);
+ mutex_lock(&binder_lock);
+ binder_stats_created(BINDER_STAT_PROC);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ proc->pid = current->group_leader->pid;
+ INIT_LIST_HEAD(&proc->delivered_death);
+ filp->private_data = proc;
+ mutex_unlock(&binder_lock);
+
+ if (binder_debugfs_dir_entry_proc) {
+ char strbuf[11];
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+ binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+ }
+
+ return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+ struct binder_proc *proc = filp->private_data;
+
+ binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
+
+ return 0;
+}
+
+static void binder_deferred_flush(struct binder_proc *proc)
+{
+ struct rb_node *n;
+ int wake_count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+ wake_up_interruptible(&thread->wait);
+ wake_count++;
+ }
+ }
+ wake_up_interruptible_all(&proc->wait);
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_flush: %d woke %d threads\n", proc->pid,
+ wake_count);
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc = filp->private_data;
+ debugfs_remove(proc->debugfs_entry);
+ binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+
+ return 0;
+}
+
+static void binder_deferred_release(struct binder_proc *proc)
+{
+ struct hlist_node *pos;
+ struct binder_transaction *t;
+ struct rb_node *n;
+ int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
+
+ BUG_ON(proc->vma);
+ BUG_ON(proc->files);
+
+ hlist_del(&proc->proc_node);
+ if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder_release: %d context_mgr_node gone\n",
+ proc->pid);
+ binder_context_mgr_node = NULL;
+ }
+
+ threads = 0;
+ active_transactions = 0;
+ while ((n = rb_first(&proc->threads))) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+ threads++;
+ active_transactions += binder_free_thread(proc, thread);
+ }
+ nodes = 0;
+ incoming_refs = 0;
+ while ((n = rb_first(&proc->nodes))) {
+ struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
+
+ nodes++;
+ rb_erase(&node->rb_node, &proc->nodes);
+ list_del_init(&node->work.entry);
+ if (hlist_empty(&node->refs)) {
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ struct binder_ref *ref;
+ int death = 0;
+
+ node->proc = NULL;
+ node->local_strong_refs = 0;
+ node->local_weak_refs = 0;
+ hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+ incoming_refs++;
+ if (ref->death) {
+ death++;
+ if (list_empty(&ref->death->work.entry)) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+ wake_up_interruptible(&ref->proc->wait);
+ } else
+ BUG();
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: node %d now dead, "
+ "refs %d, death %d\n", node->debug_id,
+ incoming_refs, death);
+ }
+ }
+ outgoing_refs = 0;
+ while ((n = rb_first(&proc->refs_by_desc))) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
+ rb_node_desc);
+ outgoing_refs++;
+ binder_delete_ref(ref);
+ }
+ binder_release_work(&proc->todo);
+ buffers = 0;
+
+ while ((n = rb_first(&proc->allocated_buffers))) {
+ struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
+ rb_node);
+ t = buffer->transaction;
+ if (t) {
+ t->buffer = NULL;
+ buffer->transaction = NULL;
+ printk(KERN_ERR "binder: release proc %d, "
+ "transaction %d, not freed\n",
+ proc->pid, t->debug_id);
+ /*BUG();*/
+ }
+ binder_free_buf(proc, buffer);
+ buffers++;
+ }
+
+ binder_stats_deleted(BINDER_STAT_PROC);
+
+ page_count = 0;
+ if (proc->pages) {
+ int i;
+ for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+ if (proc->pages[i]) {
+ void *page_addr = proc->buffer + i * PAGE_SIZE;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder_release: %d: "
+ "page %d at %p not freed\n",
+ proc->pid, i,
+ page_addr);
+ unmap_kernel_range((unsigned long)page_addr,
+ PAGE_SIZE);
+ __free_page(proc->pages[i]);
+ page_count++;
+ }
+ }
+ kfree(proc->pages);
+ vfree(proc->buffer);
+ }
+
+ put_task_struct(proc->tsk);
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_release: %d threads %d, nodes %d (ref %d), "
+ "refs %d, active transactions %d, buffers %d, "
+ "pages %d\n",
+ proc->pid, threads, nodes, incoming_refs, outgoing_refs,
+ active_transactions, buffers, page_count);
+
+ kfree(proc);
+}
+
+static void binder_deferred_func(struct work_struct *work)
+{
+ struct binder_proc *proc;
+ struct files_struct *files;
+
+ int defer;
+ do {
+ mutex_lock(&binder_lock);
+ mutex_lock(&binder_deferred_lock);
+ if (!hlist_empty(&binder_deferred_list)) {
+ proc = hlist_entry(binder_deferred_list.first,
+ struct binder_proc, deferred_work_node);
+ hlist_del_init(&proc->deferred_work_node);
+ defer = proc->deferred_work;
+ proc->deferred_work = 0;
+ } else {
+ proc = NULL;
+ defer = 0;
+ }
+ mutex_unlock(&binder_deferred_lock);
+
+ files = NULL;
+ if (defer & BINDER_DEFERRED_PUT_FILES) {
+ files = proc->files;
+ if (files)
+ proc->files = NULL;
+ }
+
+ if (defer & BINDER_DEFERRED_FLUSH)
+ binder_deferred_flush(proc);
+
+ if (defer & BINDER_DEFERRED_RELEASE)
+ binder_deferred_release(proc); /* frees proc */
+
+ mutex_unlock(&binder_lock);
+ if (files)
+ put_files_struct(files);
+ } while (proc);
+}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
+{
+ mutex_lock(&binder_deferred_lock);
+ proc->deferred_work |= defer;
+ if (hlist_unhashed(&proc->deferred_work_node)) {
+ hlist_add_head(&proc->deferred_work_node,
+ &binder_deferred_list);
+ queue_work(binder_deferred_workqueue, &binder_deferred_work);
+ }
+ mutex_unlock(&binder_deferred_lock);
+}
+
+static void print_binder_transaction(struct seq_file *m, const char *prefix,
+ struct binder_transaction *t)
+{
+ seq_printf(m,
+ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ prefix, t->debug_id, t,
+ t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0,
+ t->to_proc ? t->to_proc->pid : 0,
+ t->to_thread ? t->to_thread->pid : 0,
+ t->code, t->flags, t->priority, t->need_reply);
+ if (t->buffer == NULL) {
+ seq_puts(m, " buffer free\n");
+ return;
+ }
+ if (t->buffer->target_node)
+ seq_printf(m, " node %d",
+ t->buffer->target_node->debug_id);
+ seq_printf(m, " size %zd:%zd data %p\n",
+ t->buffer->data_size, t->buffer->offsets_size,
+ t->buffer->data);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+static void print_binder_work(struct seq_file *m, const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
+{
+ struct binder_node *node;
+ struct binder_transaction *t;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ t = container_of(w, struct binder_transaction, work);
+ print_binder_transaction(m, transaction_prefix, t);
+ break;
+ case BINDER_WORK_TRANSACTION_COMPLETE:
+ seq_printf(m, "%stransaction complete\n", prefix);
+ break;
+ case BINDER_WORK_NODE:
+ node = container_of(w, struct binder_node, work);
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id, node->ptr, node->cookie);
+ break;
+ case BINDER_WORK_DEAD_BINDER:
+ seq_printf(m, "%shas dead binder\n", prefix);
+ break;
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ seq_printf(m, "%shas cleared dead binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+ seq_printf(m, "%shas cleared death notification\n", prefix);
+ break;
+ default:
+ seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+ break;
+ }
+}
+
+static void print_binder_thread(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
+{
+ struct binder_transaction *t;
+ struct binder_work *w;
+ size_t start_pos = m->count;
+ size_t header_pos;
+
+ seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ header_pos = m->count;
+ t = thread->transaction_stack;
+ while (t) {
+ if (t->from == thread) {
+ print_binder_transaction(m,
+ " outgoing transaction", t);
+ t = t->from_parent;
+ } else if (t->to_thread == thread) {
+ print_binder_transaction(m,
+ " incoming transaction", t);
+ t = t->to_parent;
+ } else {
+ print_binder_transaction(m, " bad transaction", t);
+ t = NULL;
+ }
+ }
+ list_for_each_entry(w, &thread->todo, entry) {
+ print_binder_work(m, " ", " pending transaction", w);
+ }
+ if (!print_always && m->count == header_pos)
+ m->count = start_pos;
+}
+
+static void print_binder_node(struct seq_file *m, struct binder_node *node)
+{
+ struct binder_ref *ref;
+ struct hlist_node *pos;
+ struct binder_work *w;
+ int count;
+
+ count = 0;
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ count++;
+
+ seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
+ node->debug_id, node->ptr, node->cookie,
+ node->has_strong_ref, node->has_weak_ref,
+ node->local_strong_refs, node->local_weak_refs,
+ node->internal_strong_refs, count);
+ if (count) {
+ seq_puts(m, " proc");
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ seq_printf(m, " %d", ref->proc->pid);
+ }
+ seq_puts(m, "\n");
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work(m, " ",
+ " pending async transaction", w);
+}
+
+static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+{
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
+ ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->strong, ref->weak, ref->death);
+}
+
+static void print_binder_proc(struct seq_file *m,
+ struct binder_proc *proc, int print_all)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ size_t start_pos = m->count;
+ size_t header_pos;
+
+ seq_printf(m, "proc %d\n", proc->pid);
+ header_pos = m->count;
+
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ print_binder_thread(m, rb_entry(n, struct binder_thread,
+ rb_node), print_all);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (print_all || node->has_async_transaction)
+ print_binder_node(m, node);
+ }
+ if (print_all) {
+ for (n = rb_first(&proc->refs_by_desc);
+ n != NULL;
+ n = rb_next(n))
+ print_binder_ref(m, rb_entry(n, struct binder_ref,
+ rb_node_desc));
+ }
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ list_for_each_entry(w, &proc->todo, entry)
+ print_binder_work(m, " ", " pending transaction", w);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ seq_puts(m, " has delivered dead binder\n");
+ break;
+ }
+ if (!print_all && m->count == header_pos)
+ m->count = start_pos;
+}
+
+static const char *binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY"
+};
+
+static const char *binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE"
+};
+
+static const char *binder_objstat_strings[] = {
+ "proc",
+ "thread",
+ "node",
+ "ref",
+ "death",
+ "transaction",
+ "transaction_complete"
+};
+
+static void print_binder_stats(struct seq_file *m, const char *prefix,
+ struct binder_stats *stats)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
+ ARRAY_SIZE(binder_command_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+ if (stats->bc[i])
+ seq_printf(m, "%s%s: %d\n", prefix,
+ binder_command_strings[i], stats->bc[i]);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
+ ARRAY_SIZE(binder_return_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+ if (stats->br[i])
+ seq_printf(m, "%s%s: %d\n", prefix,
+ binder_return_strings[i], stats->br[i]);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(binder_objstat_strings));
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(stats->obj_deleted));
+ for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+ if (stats->obj_created[i] || stats->obj_deleted[i])
+ seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ binder_objstat_strings[i],
+ stats->obj_created[i] - stats->obj_deleted[i],
+ stats->obj_created[i]);
+ }
+}
+
+static void print_binder_proc_stats(struct seq_file *m,
+ struct binder_proc *proc)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ int count, strong, weak;
+
+ seq_printf(m, "proc %d\n", proc->pid);
+ count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " threads: %d\n", count);
+ seq_printf(m, " requested threads: %d+%d/%d\n"
+ " ready threads %d\n"
+ " free async space %zd\n", proc->requested_threads,
+ proc->requested_threads_started, proc->max_threads,
+ proc->ready_threads, proc->free_async_space);
+ count = 0;
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " nodes: %d\n", count);
+ count = 0;
+ strong = 0;
+ weak = 0;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
+ rb_node_desc);
+ count++;
+ strong += ref->strong;
+ weak += ref->weak;
+ }
+ seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
+
+ count = 0;
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " buffers: %d\n", count);
+
+ count = 0;
+ list_for_each_entry(w, &proc->todo, entry) {
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ count++;
+ break;
+ default:
+ break;
+ }
+ }
+ seq_printf(m, " pending transactions: %d\n", count);
+
+ print_binder_stats(m, " ", &proc->stats);
+}
+
+
+static int binder_state_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ struct binder_node *node;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder state:\n");
+
+ if (!hlist_empty(&binder_dead_nodes))
+ seq_puts(m, "dead nodes:\n");
+ hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+ print_binder_node(m, node);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_stats_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder stats:\n");
+
+ print_binder_stats(m, "", &binder_stats);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc_stats(m, proc);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_transactions_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder transactions:\n");
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 0);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc = m->private;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static void print_binder_transaction_log_entry(struct seq_file *m,
+ struct binder_transaction_log_entry *e)
+{
+ seq_printf(m,
+ "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+ e->debug_id, (e->call_type == 2) ? "reply" :
+ ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+ e->from_thread, e->to_proc, e->to_thread, e->to_node,
+ e->target_handle, e->data_size, e->offsets_size);
+}
+
+static int binder_transaction_log_show(struct seq_file *m, void *unused)
+{
+ struct binder_transaction_log *log = m->private;
+ int i;
+
+ if (log->full) {
+ for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
+ print_binder_transaction_log_entry(m, &log->entry[i]);
+ }
+ for (i = 0; i < log->next; i++)
+ print_binder_transaction_log_entry(m, &log->entry[i]);
+ return 0;
+}
+
+static const struct file_operations binder_fops = {
+ .owner = THIS_MODULE,
+ .poll = binder_poll,
+ .unlocked_ioctl = binder_ioctl,
+ .mmap = binder_mmap,
+ .open = binder_open,
+ .flush = binder_flush,
+ .release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "binder",
+ .fops = &binder_fops
+};
+
+BINDER_DEBUG_ENTRY(state);
+BINDER_DEBUG_ENTRY(stats);
+BINDER_DEBUG_ENTRY(transactions);
+BINDER_DEBUG_ENTRY(transaction_log);
+
+static int __init binder_init(void)
+{
+ int ret;
+
+ binder_deferred_workqueue = create_singlethread_workqueue("binder");
+ if (!binder_deferred_workqueue)
+ return -ENOMEM;
+
+ binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
+ if (binder_debugfs_dir_entry_root)
+ binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+ binder_debugfs_dir_entry_root);
+ ret = misc_register(&binder_miscdev);
+ if (binder_debugfs_dir_entry_root) {
+ debugfs_create_file("state",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_state_fops);
+ debugfs_create_file("stats",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_stats_fops);
+ debugfs_create_file("transactions",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_transactions_fops);
+ debugfs_create_file("transaction_log",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ &binder_transaction_log,
+ &binder_transaction_log_fops);
+ debugfs_create_file("failed_transaction_log",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ &binder_transaction_log_failed,
+ &binder_transaction_log_fops);
+ }
+ return ret;
+}
+
+device_initcall(binder_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
new file mode 100644
index 000000000000..863ae1ad5d55
--- /dev/null
+++ b/drivers/staging/android/binder.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_H
+#define _LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ unsigned long type;
+ unsigned long flags;
+
+ /* 8 bytes of data. */
+ union {
+ void *binder; /* local object */
+ signed long handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ void *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses apropriately.
+ */
+
+struct binder_write_read {
+ signed long write_size; /* bytes to write */
+ signed long write_consumed; /* bytes consumed by driver */
+ unsigned long write_buffer;
+ signed long read_size; /* bytes to read */
+ signed long read_consumed; /* bytes consumed by driver */
+ unsigned long read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ signed long protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
+#define BINDER_THREAD_EXIT _IOW('b', 8, int)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ size_t handle; /* target descriptor of command transaction */
+ void *ptr; /* target descriptor of return transaction */
+ } target;
+ void *cookie; /* target object cookie */
+ unsigned int code; /* transaction command */
+
+ /* General information about the transaction. */
+ unsigned int flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ size_t data_size; /* number of bytes of data */
+ size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ const void *buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ const void *offsets;
+ } ptr;
+ uint8_t buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ void *ptr;
+ void *cookie;
+};
+
+struct binder_pri_desc {
+ int priority;
+ int desc;
+};
+
+struct binder_pri_ptr_cookie {
+ int priority;
+ void *ptr;
+ void *cookie;
+};
+
+enum BinderDriverReturnProtocol {
+ BR_ERROR = _IOR('r', 0, int),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incomming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, void *),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum BinderDriverCommandProtocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, int),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, int),
+ BC_ACQUIRE = _IOW('c', 5, int),
+ BC_RELEASE = _IOW('c', 6, int),
+ BC_DECREFS = _IOW('c', 7, int),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
new file mode 100644
index 000000000000..fa76ce7678a6
--- /dev/null
+++ b/drivers/staging/android/logger.c
@@ -0,0 +1,616 @@
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include "logger.h"
+
+#include <asm/ioctls.h>
+
+/*
+ * struct logger_log - represents a specific log, such as 'main' or 'radio'
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+ unsigned char *buffer;/* the ring buffer itself */
+ struct miscdevice misc; /* misc device representing the log */
+ wait_queue_head_t wq; /* wait queue for readers */
+ struct list_head readers; /* this log's readers */
+ struct mutex mutex; /* mutex protecting buffer */
+ size_t w_off; /* current write head offset */
+ size_t head; /* new readers start here */
+ size_t size; /* size of the log */
+};
+
+/*
+ * struct logger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+ struct logger_log *log; /* associated log */
+ struct list_head list; /* entry in logger_log's list */
+ size_t r_off; /* current read head offset */
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized) modulus */
+#define logger_offset(n) ((n) & (log->size - 1))
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ * 1) Need to quickly obtain the associated log during an I/O operation
+ * 2) Readers need to maintain state (logger_reader)
+ * 3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader->logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we just go
+ * file->logger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct logger_log *file_get_log(struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ return reader->log;
+ } else
+ return file->private_data;
+}
+
+/*
+ * get_entry_len - Grabs the length of the payload of the next entry starting
+ * from 'off'.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_len(struct logger_log *log, size_t off)
+{
+ __u16 val;
+
+ switch (log->size - off) {
+ case 1:
+ memcpy(&val, log->buffer + off, 1);
+ memcpy(((char *) &val) + 1, log->buffer, 1);
+ break;
+ default:
+ memcpy(&val, log->buffer + off, 2);
+ }
+
+ return sizeof(struct logger_entry) + val;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+ struct logger_reader *reader,
+ char __user *buf,
+ size_t count)
+{
+ size_t len;
+
+ /*
+ * We read from the log in two disjoint operations. First, we read from
+ * the current read head offset up to 'count' bytes or to the end of
+ * the log, whichever comes first.
+ */
+ len = min(count, log->size - reader->r_off);
+ if (copy_to_user(buf, log->buffer + reader->r_off, len))
+ return -EFAULT;
+
+ /*
+ * Second, we read any remaining bytes, starting back at the head of
+ * the log.
+ */
+ if (count != len)
+ if (copy_to_user(buf + len, log->buffer, count - len))
+ return -EFAULT;
+
+ reader->r_off = logger_offset(reader->r_off + count);
+
+ return count;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * - O_NONBLOCK works
+ * - If there are no log entries to read, blocks until log is written to
+ * - Atomically reads exactly one log entry
+ *
+ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct logger_reader *reader = file->private_data;
+ struct logger_log *log = reader->log;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+start:
+ while (1) {
+ prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&log->mutex);
+ ret = (log->w_off == reader->r_off);
+ mutex_unlock(&log->mutex);
+ if (!ret)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ schedule();
+ }
+
+ finish_wait(&log->wq, &wait);
+ if (ret)
+ return ret;
+
+ mutex_lock(&log->mutex);
+
+ /* is there still something to read or did we race? */
+ if (unlikely(log->w_off == reader->r_off)) {
+ mutex_unlock(&log->mutex);
+ goto start;
+ }
+
+ /* get the size of the next entry */
+ ret = get_entry_len(log, reader->r_off);
+ if (count < ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* get exactly one entry from the log */
+ ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
+{
+ size_t count = 0;
+
+ do {
+ size_t nr = get_entry_len(log, off);
+ off = logger_offset(off + nr);
+ count += nr;
+ } while (count < len);
+
+ return off;
+}
+
+/*
+ * clock_interval - is a < c < b in mod-space? Put another way, does the line
+ * from a to b cross c?
+ */
+static inline int clock_interval(size_t a, size_t b, size_t c)
+{
+ if (b < a) {
+ if (a < c || b >= c)
+ return 1;
+ } else {
+ if (a < c && b >= c)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+ size_t old = log->w_off;
+ size_t new = logger_offset(old + len);
+ struct logger_reader *reader;
+
+ if (clock_interval(old, new, log->head))
+ log->head = get_next_entry(log, log->head, len);
+
+ list_for_each_entry(reader, &log->readers, list)
+ if (clock_interval(old, new, reader->r_off))
+ reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ memcpy(log->buffer + log->w_off, buf, len);
+
+ if (count != len)
+ memcpy(log->buffer, buf + len, count - len);
+
+ log->w_off = logger_offset(log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+ const void __user *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+ return -EFAULT;
+
+ if (count != len)
+ if (copy_from_user(log->buffer, buf + len, count - len))
+ return -EFAULT;
+
+ log->w_off = logger_offset(log->w_off + count);
+
+ return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t ppos)
+{
+ struct logger_log *log = file_get_log(iocb->ki_filp);
+ size_t orig = log->w_off;
+ struct logger_entry header;
+ struct timespec now;
+ ssize_t ret = 0;
+
+ now = current_kernel_time();
+
+ header.pid = current->tgid;
+ header.tid = current->pid;
+ header.sec = now.tv_sec;
+ header.nsec = now.tv_nsec;
+ header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+
+ /* null writes succeed, return zero */
+ if (unlikely(!header.len))
+ return 0;
+
+ mutex_lock(&log->mutex);
+
+ /*
+ * Fix up any readers, pulling them forward to the first readable
+ * entry after (what will be) the new write offset. We do this now
+ * because if we partially fail, we can end up with clobbered log
+ * entries that encroach on readable buffer.
+ */
+ fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+ do_write_log(log, &header, sizeof(struct logger_entry));
+
+ while (nr_segs-- > 0) {
+ size_t len;
+ ssize_t nr;
+
+ /* figure out how much of this vector we can keep */
+ len = min_t(size_t, iov->iov_len, header.len - ret);
+
+ /* write out this segment's payload */
+ nr = do_write_log_from_user(log, iov->iov_base, len);
+ if (unlikely(nr < 0)) {
+ log->w_off = orig;
+ mutex_unlock(&log->mutex);
+ return nr;
+ }
+
+ iov++;
+ ret += nr;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ /* wake up any blocked readers */
+ wake_up_interruptible(&log->wq);
+
+ return ret;
+}
+
+static struct logger_log *get_log_from_minor(int);
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+ struct logger_log *log;
+ int ret;
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ log = get_log_from_minor(MINOR(inode->i_rdev));
+ if (!log)
+ return -ENODEV;
+
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader;
+
+ reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ reader->log = log;
+ INIT_LIST_HEAD(&reader->list);
+
+ mutex_lock(&log->mutex);
+ reader->r_off = log->head;
+ list_add_tail(&reader->list, &log->readers);
+ mutex_unlock(&log->mutex);
+
+ file->private_data = reader;
+ } else
+ file->private_data = log;
+
+ return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ list_del(&reader->list);
+ kfree(reader);
+ }
+
+ return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+ struct logger_reader *reader;
+ struct logger_log *log;
+ unsigned int ret = POLLOUT | POLLWRNORM;
+
+ if (!(file->f_mode & FMODE_READ))
+ return ret;
+
+ reader = file->private_data;
+ log = reader->log;
+
+ poll_wait(file, &log->wq, wait);
+
+ mutex_lock(&log->mutex);
+ if (log->w_off != reader->r_off)
+ ret |= POLLIN | POLLRDNORM;
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct logger_log *log = file_get_log(file);
+ struct logger_reader *reader;
+ long ret = -ENOTTY;
+
+ mutex_lock(&log->mutex);
+
+ switch (cmd) {
+ case LOGGER_GET_LOG_BUF_SIZE:
+ ret = log->size;
+ break;
+ case LOGGER_GET_LOG_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off >= reader->r_off)
+ ret = log->w_off - reader->r_off;
+ else
+ ret = (log->size - reader->r_off) + log->w_off;
+ break;
+ case LOGGER_GET_NEXT_ENTRY_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off != reader->r_off)
+ ret = get_entry_len(log, reader->r_off);
+ else
+ ret = 0;
+ break;
+ case LOGGER_FLUSH_LOG:
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ break;
+ }
+ list_for_each_entry(reader, &log->readers, list)
+ reader->r_off = log->w_off;
+ log->head = log->w_off;
+ ret = 0;
+ break;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static const struct file_operations logger_fops = {
+ .owner = THIS_MODULE,
+ .read = logger_read,
+ .aio_write = logger_aio_write,
+ .poll = logger_poll,
+ .unlocked_ioctl = logger_ioctl,
+ .compat_ioctl = logger_ioctl,
+ .open = logger_open,
+ .release = logger_release,
+};
+
+/*
+ * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
+ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
+ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ */
+#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
+static unsigned char _buf_ ## VAR[SIZE]; \
+static struct logger_log VAR = { \
+ .buffer = _buf_ ## VAR, \
+ .misc = { \
+ .minor = MISC_DYNAMIC_MINOR, \
+ .name = NAME, \
+ .fops = &logger_fops, \
+ .parent = NULL, \
+ }, \
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
+ .readers = LIST_HEAD_INIT(VAR .readers), \
+ .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
+ .w_off = 0, \
+ .head = 0, \
+ .size = SIZE, \
+};
+
+DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
+DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
+DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
+
+static struct logger_log *get_log_from_minor(int minor)
+{
+ if (log_main.misc.minor == minor)
+ return &log_main;
+ if (log_events.misc.minor == minor)
+ return &log_events;
+ if (log_radio.misc.minor == minor)
+ return &log_radio;
+ if (log_system.misc.minor == minor)
+ return &log_system;
+ return NULL;
+}
+
+static int __init init_log(struct logger_log *log)
+{
+ int ret;
+
+ ret = misc_register(&log->misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "logger: failed to register misc "
+ "device for log '%s'!\n", log->misc.name);
+ return ret;
+ }
+
+ printk(KERN_INFO "logger: created %luK log '%s'\n",
+ (unsigned long) log->size >> 10, log->misc.name);
+
+ return 0;
+}
+
+static int __init logger_init(void)
+{
+ int ret;
+
+ ret = init_log(&log_main);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_events);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_radio);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_system);
+ if (unlikely(ret))
+ goto out;
+
+out:
+ return ret;
+}
+device_initcall(logger_init);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
new file mode 100644
index 000000000000..2cb06e9d8f98
--- /dev/null
+++ b/drivers/staging/android/logger.h
@@ -0,0 +1,49 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct logger_entry {
+ __u16 len; /* length of the payload */
+ __u16 __pad; /* no matter what, we get 2 bytes of padding */
+ __s32 pid; /* generating process's pid */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ char msg[0]; /* the entry's payload */
+};
+
+#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
+#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
+#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
+#define LOGGER_LOG_MAIN "log_main" /* everything else */
+
+#define LOGGER_ENTRY_MAX_LEN (4*1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD \
+ (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+
+#define __LOGGERIO 0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
+
+#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
new file mode 100644
index 000000000000..86d51959b29f
--- /dev/null
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -0,0 +1,213 @@
+/* drivers/misc/lowmemorykiller.c
+ *
+ * The lowmemorykiller driver lets user-space specify a set of memory thresholds
+ * where processes with a range of oom_adj values will get killed. Specify the
+ * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
+ * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
+ * files take a comma separated list of numbers in ascending order.
+ *
+ * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
+ * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes
+ * with a oom_adj value of 8 or higher when the free memory drops below 4096 pages
+ * and kill processes with a oom_adj value of 0 or higher when the free memory
+ * drops below 1024 pages.
+ *
+ * The driver considers memory used for caches to be free, but if a large
+ * percentage of the cached memory is locked this can be very inaccurate
+ * and processes may not get killed until the normal oom killer is triggered.
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/notifier.h>
+
+static uint32_t lowmem_debug_level = 2;
+static int lowmem_adj[6] = {
+ 0,
+ 1,
+ 6,
+ 12,
+};
+static int lowmem_adj_size = 4;
+static size_t lowmem_minfree[6] = {
+ 3 * 512, /* 6MB */
+ 2 * 1024, /* 8MB */
+ 4 * 1024, /* 16MB */
+ 16 * 1024, /* 64MB */
+};
+static int lowmem_minfree_size = 4;
+
+static struct task_struct *lowmem_deathpending;
+static unsigned long lowmem_deathpending_timeout;
+
+#define lowmem_print(level, x...) \
+ do { \
+ if (lowmem_debug_level >= (level)) \
+ printk(x); \
+ } while (0)
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data);
+
+static struct notifier_block task_nb = {
+ .notifier_call = task_notify_func,
+};
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct task_struct *task = data;
+
+ if (task == lowmem_deathpending)
+ lowmem_deathpending = NULL;
+
+ return NOTIFY_OK;
+}
+
+static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+ struct task_struct *p;
+ struct task_struct *selected = NULL;
+ int rem = 0;
+ int tasksize;
+ int i;
+ int min_adj = OOM_ADJUST_MAX + 1;
+ int selected_tasksize = 0;
+ int selected_oom_adj;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+ int other_free = global_page_state(NR_FREE_PAGES);
+ int other_file = global_page_state(NR_FILE_PAGES) -
+ global_page_state(NR_SHMEM);
+
+ /*
+ * If we already have a death outstanding, then
+ * bail out right away; indicating to vmscan
+ * that we have nothing further to offer on
+ * this pass.
+ *
+ */
+ if (lowmem_deathpending &&
+ time_before_eq(jiffies, lowmem_deathpending_timeout))
+ return 0;
+
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+ if (lowmem_minfree_size < array_size)
+ array_size = lowmem_minfree_size;
+ for (i = 0; i < array_size; i++) {
+ if (other_free < lowmem_minfree[i] &&
+ other_file < lowmem_minfree[i]) {
+ min_adj = lowmem_adj[i];
+ break;
+ }
+ }
+ if (sc->nr_to_scan > 0)
+ lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
+ sc->nr_to_scan, sc->gfp_mask, other_free, other_file,
+ min_adj);
+ rem = global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_FILE);
+ if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
+ lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
+ sc->nr_to_scan, sc->gfp_mask, rem);
+ return rem;
+ }
+ selected_oom_adj = min_adj;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ struct mm_struct *mm;
+ struct signal_struct *sig;
+ int oom_adj;
+
+ task_lock(p);
+ mm = p->mm;
+ sig = p->signal;
+ if (!mm || !sig) {
+ task_unlock(p);
+ continue;
+ }
+ oom_adj = sig->oom_adj;
+ if (oom_adj < min_adj) {
+ task_unlock(p);
+ continue;
+ }
+ tasksize = get_mm_rss(mm);
+ task_unlock(p);
+ if (tasksize <= 0)
+ continue;
+ if (selected) {
+ if (oom_adj < selected_oom_adj)
+ continue;
+ if (oom_adj == selected_oom_adj &&
+ tasksize <= selected_tasksize)
+ continue;
+ }
+ selected = p;
+ selected_tasksize = tasksize;
+ selected_oom_adj = oom_adj;
+ lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
+ p->pid, p->comm, oom_adj, tasksize);
+ }
+ if (selected) {
+ lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
+ selected->pid, selected->comm,
+ selected_oom_adj, selected_tasksize);
+ lowmem_deathpending = selected;
+ lowmem_deathpending_timeout = jiffies + HZ;
+ force_sig(SIGKILL, selected);
+ rem -= selected_tasksize;
+ }
+ lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+ sc->nr_to_scan, sc->gfp_mask, rem);
+ read_unlock(&tasklist_lock);
+ return rem;
+}
+
+static struct shrinker lowmem_shrinker = {
+ .shrink = lowmem_shrink,
+ .seeks = DEFAULT_SEEKS * 16
+};
+
+static int __init lowmem_init(void)
+{
+ task_free_register(&task_nb);
+ register_shrinker(&lowmem_shrinker);
+ return 0;
+}
+
+static void __exit lowmem_exit(void)
+{
+ unregister_shrinker(&lowmem_shrinker);
+ task_free_unregister(&task_nb);
+}
+
+module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
+ S_IRUGO | S_IWUSR);
+module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
+ S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+
+module_init(lowmem_init);
+module_exit(lowmem_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
new file mode 100644
index 000000000000..cb42d899822e
--- /dev/null
+++ b/drivers/staging/android/ram_console.c
@@ -0,0 +1,443 @@
+/* drivers/android/ram_console.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/platform_data/ram_console.h>
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+#include <linux/rslib.h>
+#endif
+
+struct ram_console_buffer {
+ uint32_t sig;
+ uint32_t start;
+ uint32_t size;
+ uint8_t data[0];
+};
+
+#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static char __initdata
+ ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
+#endif
+static char *ram_console_old_log;
+static size_t ram_console_old_log_size;
+
+static struct ram_console_buffer *ram_console_buffer;
+static size_t ram_console_buffer_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static char *ram_console_par_buffer;
+static struct rs_control *ram_console_rs_decoder;
+static int ram_console_corrected_bytes;
+static int ram_console_bad_blocks;
+#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+#endif
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ /* Initialize the parity buffer */
+ memset(par, 0, sizeof(par));
+ encode_rs8(ram_console_rs_decoder, data, len, par, 0);
+ for (i = 0; i < ECC_SIZE; i++)
+ ecc[i] = par[i];
+}
+
+static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ for (i = 0; i < ECC_SIZE; i++)
+ par[i] = ecc[i];
+ return decode_rs8(ram_console_rs_decoder, data, par, len,
+ NULL, 0, NULL, 0, NULL);
+}
+#endif
+
+static void ram_console_update(const char *s, unsigned int count)
+{
+ struct ram_console_buffer *buffer = ram_console_buffer;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
+ uint8_t *block;
+ uint8_t *par;
+ int size = ECC_BLOCK_SIZE;
+#endif
+ memcpy(buffer->data + buffer->start, s, count);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
+ par = ram_console_par_buffer +
+ (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
+ do {
+ if (block + ECC_BLOCK_SIZE > buffer_end)
+ size = buffer_end - block;
+ ram_console_encode_rs8(block, size, par);
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ } while (block < buffer->data + buffer->start + count);
+#endif
+}
+
+static void ram_console_update_header(void)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ struct ram_console_buffer *buffer = ram_console_buffer;
+ uint8_t *par;
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+ ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
+#endif
+}
+
+static void
+ram_console_write(struct console *console, const char *s, unsigned int count)
+{
+ int rem;
+ struct ram_console_buffer *buffer = ram_console_buffer;
+
+ if (count > ram_console_buffer_size) {
+ s += count - ram_console_buffer_size;
+ count = ram_console_buffer_size;
+ }
+ rem = ram_console_buffer_size - buffer->start;
+ if (rem < count) {
+ ram_console_update(s, rem);
+ s += rem;
+ count -= rem;
+ buffer->start = 0;
+ buffer->size = ram_console_buffer_size;
+ }
+ ram_console_update(s, count);
+
+ buffer->start += count;
+ if (buffer->size < ram_console_buffer_size)
+ buffer->size += count;
+ ram_console_update_header();
+}
+
+static struct console ram_console = {
+ .name = "ram",
+ .write = ram_console_write,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .index = -1,
+};
+
+void ram_console_enable_console(int enabled)
+{
+ if (enabled)
+ ram_console.flags |= CON_ENABLED;
+ else
+ ram_console.flags &= ~CON_ENABLED;
+}
+
+static void __init
+ram_console_save_old(struct ram_console_buffer *buffer, const char *bootinfo,
+ char *dest)
+{
+ size_t old_log_size = buffer->size;
+ size_t bootinfo_size = 0;
+ size_t total_size = old_log_size;
+ char *ptr;
+ const char *bootinfo_label = "Boot info:\n";
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *block;
+ uint8_t *par;
+ char strbuf[80];
+ int strbuf_len = 0;
+
+ block = buffer->data;
+ par = ram_console_par_buffer;
+ while (block < buffer->data + buffer->size) {
+ int numerr;
+ int size = ECC_BLOCK_SIZE;
+ if (block + size > buffer->data + ram_console_buffer_size)
+ size = buffer->data + ram_console_buffer_size - block;
+ numerr = ram_console_decode_rs8(block, size, par);
+ if (numerr > 0) {
+#if 0
+ printk(KERN_INFO "ram_console: error in block %p, %d\n",
+ block, numerr);
+#endif
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+#if 0
+ printk(KERN_INFO "ram_console: uncorrectable error in "
+ "block %p\n", block);
+#endif
+ ram_console_bad_blocks++;
+ }
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ }
+ if (ram_console_corrected_bytes || ram_console_bad_blocks)
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\n%d Corrected bytes, %d unrecoverable blocks\n",
+ ram_console_corrected_bytes, ram_console_bad_blocks);
+ else
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\nNo errors detected\n");
+ if (strbuf_len >= sizeof(strbuf))
+ strbuf_len = sizeof(strbuf) - 1;
+ total_size += strbuf_len;
+#endif
+
+ if (bootinfo)
+ bootinfo_size = strlen(bootinfo) + strlen(bootinfo_label);
+ total_size += bootinfo_size;
+
+ if (dest == NULL) {
+ dest = kmalloc(total_size, GFP_KERNEL);
+ if (dest == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer\n");
+ return;
+ }
+ }
+
+ ram_console_old_log = dest;
+ ram_console_old_log_size = total_size;
+ memcpy(ram_console_old_log,
+ &buffer->data[buffer->start], buffer->size - buffer->start);
+ memcpy(ram_console_old_log + buffer->size - buffer->start,
+ &buffer->data[0], buffer->start);
+ ptr = ram_console_old_log + old_log_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ memcpy(ptr, strbuf, strbuf_len);
+ ptr += strbuf_len;
+#endif
+ if (bootinfo) {
+ memcpy(ptr, bootinfo_label, strlen(bootinfo_label));
+ ptr += strlen(bootinfo_label);
+ memcpy(ptr, bootinfo, bootinfo_size);
+ ptr += bootinfo_size;
+ }
+}
+
+static int __init ram_console_init(struct ram_console_buffer *buffer,
+ size_t buffer_size, const char *bootinfo,
+ char *old_buf)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ int numerr;
+ uint8_t *par;
+#endif
+ ram_console_buffer = buffer;
+ ram_console_buffer_size =
+ buffer_size - sizeof(struct ram_console_buffer);
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %zu, "
+ "datasize %zu\n", buffer, buffer_size,
+ ram_console_buffer_size);
+ return 0;
+ }
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
+ ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %zu, "
+ "non-ecc datasize %zu\n",
+ buffer, buffer_size, ram_console_buffer_size);
+ return 0;
+ }
+
+ ram_console_par_buffer = buffer->data + ram_console_buffer_size;
+
+
+ /* first consecutive root is 0
+ * primitive element to generate roots = 1
+ */
+ ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
+ if (ram_console_rs_decoder == NULL) {
+ printk(KERN_INFO "ram_console: init_rs failed\n");
+ return 0;
+ }
+
+ ram_console_corrected_bytes = 0;
+ ram_console_bad_blocks = 0;
+
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+
+ numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
+ if (numerr > 0) {
+ printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ printk(KERN_INFO
+ "ram_console: uncorrectable error in header\n");
+ ram_console_bad_blocks++;
+ }
+#endif
+
+ if (buffer->sig == RAM_CONSOLE_SIG) {
+ if (buffer->size > ram_console_buffer_size
+ || buffer->start > buffer->size)
+ printk(KERN_INFO "ram_console: found existing invalid "
+ "buffer, size %d, start %d\n",
+ buffer->size, buffer->start);
+ else {
+ printk(KERN_INFO "ram_console: found existing buffer, "
+ "size %d, start %d\n",
+ buffer->size, buffer->start);
+ ram_console_save_old(buffer, bootinfo, old_buf);
+ }
+ } else {
+ printk(KERN_INFO "ram_console: no valid data in buffer "
+ "(sig = 0x%08x)\n", buffer->sig);
+ }
+
+ buffer->sig = RAM_CONSOLE_SIG;
+ buffer->start = 0;
+ buffer->size = 0;
+
+ register_console(&ram_console);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ console_verbose();
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static int __init ram_console_early_init(void)
+{
+ return ram_console_init((struct ram_console_buffer *)
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
+ NULL,
+ ram_console_old_log_init_buffer);
+}
+#else
+static int ram_console_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res = pdev->resource;
+ size_t start;
+ size_t buffer_size;
+ void *buffer;
+ const char *bootinfo = NULL;
+ struct ram_console_platform_data *pdata = pdev->dev.platform_data;
+
+ if (res == NULL || pdev->num_resources != 1 ||
+ !(res->flags & IORESOURCE_MEM)) {
+ printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
+ "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
+ return -ENXIO;
+ }
+ buffer_size = res->end - res->start + 1;
+ start = res->start;
+ printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
+ start, buffer_size);
+ buffer = ioremap(res->start, buffer_size);
+ if (buffer == NULL) {
+ printk(KERN_ERR "ram_console: failed to map memory\n");
+ return -ENOMEM;
+ }
+
+ if (pdata)
+ bootinfo = pdata->bootinfo;
+
+ return ram_console_init(buffer, buffer_size, bootinfo, NULL/* allocate */);
+}
+
+static struct platform_driver ram_console_driver = {
+ .probe = ram_console_driver_probe,
+ .driver = {
+ .name = "ram_console",
+ },
+};
+
+static int __init ram_console_module_init(void)
+{
+ int err;
+ err = platform_driver_register(&ram_console_driver);
+ return err;
+}
+#endif
+
+static ssize_t ram_console_read_old(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ loff_t pos = *offset;
+ ssize_t count;
+
+ if (pos >= ram_console_old_log_size)
+ return 0;
+
+ count = min(len, (size_t)(ram_console_old_log_size - pos));
+ if (copy_to_user(buf, ram_console_old_log + pos, count))
+ return -EFAULT;
+
+ *offset += count;
+ return count;
+}
+
+static const struct file_operations ram_console_file_ops = {
+ .owner = THIS_MODULE,
+ .read = ram_console_read_old,
+};
+
+static int __init ram_console_late_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ if (ram_console_old_log == NULL)
+ return 0;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+ ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
+ if (ram_console_old_log == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer for old log\n");
+ ram_console_old_log_size = 0;
+ return 0;
+ }
+ memcpy(ram_console_old_log,
+ ram_console_old_log_init_buffer, ram_console_old_log_size);
+#endif
+ entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
+ if (!entry) {
+ printk(KERN_ERR "ram_console: failed to create proc entry\n");
+ kfree(ram_console_old_log);
+ ram_console_old_log = NULL;
+ return 0;
+ }
+
+ entry->proc_fops = &ram_console_file_ops;
+ entry->size = ram_console_old_log_size;
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+console_initcall(ram_console_early_init);
+#else
+postcore_initcall(ram_console_module_init);
+#endif
+late_initcall(ram_console_late_init);
+
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
new file mode 100644
index 000000000000..a64481c3e86d
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.c
@@ -0,0 +1,176 @@
+/* drivers/misc/timed_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+
+#include "timed_output.h"
+#include "timed_gpio.h"
+
+
+struct timed_gpio_data {
+ struct timed_output_dev dev;
+ struct hrtimer timer;
+ spinlock_t lock;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
+{
+ struct timed_gpio_data *data =
+ container_of(timer, struct timed_gpio_data, timer);
+
+ gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
+ return HRTIMER_NORESTART;
+}
+
+static int gpio_get_time(struct timed_output_dev *dev)
+{
+ struct timed_gpio_data *data =
+ container_of(dev, struct timed_gpio_data, dev);
+
+ if (hrtimer_active(&data->timer)) {
+ ktime_t r = hrtimer_get_remaining(&data->timer);
+ struct timeval t = ktime_to_timeval(r);
+ return t.tv_sec * 1000 + t.tv_usec / 1000;
+ } else
+ return 0;
+}
+
+static void gpio_enable(struct timed_output_dev *dev, int value)
+{
+ struct timed_gpio_data *data =
+ container_of(dev, struct timed_gpio_data, dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* cancel previous timer and set GPIO according to value */
+ hrtimer_cancel(&data->timer);
+ gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
+
+ if (value > 0) {
+ if (value > data->max_timeout)
+ value = data->max_timeout;
+
+ hrtimer_start(&data->timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static int timed_gpio_probe(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio *cur_gpio;
+ struct timed_gpio_data *gpio_data, *gpio_dat;
+ int i, j, ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
+ GFP_KERNEL);
+ if (!gpio_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ cur_gpio = &pdata->gpios[i];
+ gpio_dat = &gpio_data[i];
+
+ hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ gpio_dat->timer.function = gpio_timer_func;
+ spin_lock_init(&gpio_dat->lock);
+
+ gpio_dat->dev.name = cur_gpio->name;
+ gpio_dat->dev.get_time = gpio_get_time;
+ gpio_dat->dev.enable = gpio_enable;
+ ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
+ if (ret >= 0) {
+ ret = timed_output_dev_register(&gpio_dat->dev);
+ if (ret < 0)
+ gpio_free(cur_gpio->gpio);
+ }
+ if (ret < 0) {
+ for (j = 0; j < i; j++) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+ kfree(gpio_data);
+ return ret;
+ }
+
+ gpio_dat->gpio = cur_gpio->gpio;
+ gpio_dat->max_timeout = cur_gpio->max_timeout;
+ gpio_dat->active_low = cur_gpio->active_low;
+ gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
+ }
+
+ platform_set_drvdata(pdev, gpio_data);
+
+ return 0;
+}
+
+static int timed_gpio_remove(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+
+ kfree(gpio_data);
+
+ return 0;
+}
+
+static struct platform_driver timed_gpio_driver = {
+ .probe = timed_gpio_probe,
+ .remove = timed_gpio_remove,
+ .driver = {
+ .name = TIMED_GPIO_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init timed_gpio_init(void)
+{
+ return platform_driver_register(&timed_gpio_driver);
+}
+
+static void __exit timed_gpio_exit(void)
+{
+ platform_driver_unregister(&timed_gpio_driver);
+}
+
+module_init(timed_gpio_init);
+module_exit(timed_gpio_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed gpio driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
new file mode 100644
index 000000000000..a0e15f8be3f7
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.h
@@ -0,0 +1,33 @@
+/* include/linux/timed_gpio.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_GPIO_H
+#define _LINUX_TIMED_GPIO_H
+
+#define TIMED_GPIO_NAME "timed-gpio"
+
+struct timed_gpio {
+ const char *name;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+struct timed_gpio_platform_data {
+ int num_gpios;
+ struct timed_gpio *gpios;
+};
+
+#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
new file mode 100644
index 000000000000..f373422308e0
--- /dev/null
+++ b/drivers/staging/android/timed_output.c
@@ -0,0 +1,123 @@
+/* drivers/misc/timed_output.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+
+#include "timed_output.h"
+
+static struct class *timed_output_class;
+static atomic_t device_count;
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct timed_output_dev *tdev = dev_get_drvdata(dev);
+ int remaining = tdev->get_time(tdev);
+
+ return sprintf(buf, "%d\n", remaining);
+}
+
+static ssize_t enable_store(
+ struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct timed_output_dev *tdev = dev_get_drvdata(dev);
+ int value;
+
+ if (sscanf(buf, "%d", &value) != 1)
+ return -EINVAL;
+
+ tdev->enable(tdev, value);
+
+ return size;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+
+static int create_timed_output_class(void)
+{
+ if (!timed_output_class) {
+ timed_output_class = class_create(THIS_MODULE, "timed_output");
+ if (IS_ERR(timed_output_class))
+ return PTR_ERR(timed_output_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int timed_output_dev_register(struct timed_output_dev *tdev)
+{
+ int ret;
+
+ if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
+ return -EINVAL;
+
+ ret = create_timed_output_class();
+ if (ret < 0)
+ return ret;
+
+ tdev->index = atomic_inc_return(&device_count);
+ tdev->dev = device_create(timed_output_class, NULL,
+ MKDEV(0, tdev->index), NULL, tdev->name);
+ if (IS_ERR(tdev->dev))
+ return PTR_ERR(tdev->dev);
+
+ ret = device_create_file(tdev->dev, &dev_attr_enable);
+ if (ret < 0)
+ goto err_create_file;
+
+ dev_set_drvdata(tdev->dev, tdev);
+ tdev->state = 0;
+ return 0;
+
+err_create_file:
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
+ printk(KERN_ERR "timed_output: Failed to register driver %s\n",
+ tdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_register);
+
+void timed_output_dev_unregister(struct timed_output_dev *tdev)
+{
+ device_remove_file(tdev->dev, &dev_attr_enable);
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
+ dev_set_drvdata(tdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
+
+static int __init timed_output_init(void)
+{
+ return create_timed_output_class();
+}
+
+static void __exit timed_output_exit(void)
+{
+ class_destroy(timed_output_class);
+}
+
+module_init(timed_output_init);
+module_exit(timed_output_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed output class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
new file mode 100644
index 000000000000..ec907ab2ff54
--- /dev/null
+++ b/drivers/staging/android/timed_output.h
@@ -0,0 +1,37 @@
+/* include/linux/timed_output.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_OUTPUT_H
+#define _LINUX_TIMED_OUTPUT_H
+
+struct timed_output_dev {
+ const char *name;
+
+ /* enable the output and set the timer */
+ void (*enable)(struct timed_output_dev *sdev, int timeout);
+
+ /* returns the current number of milliseconds remaining on the timer */
+ int (*get_time)(struct timed_output_dev *sdev);
+
+ /* private data */
+ struct device *dev;
+ int index;
+ int state;
+};
+
+extern int timed_output_dev_register(struct timed_output_dev *dev);
+extern void timed_output_dev_unregister(struct timed_output_dev *dev);
+
+#endif
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index 1ad2d56c8ba8..92ee85fc85d6 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -4,15 +4,26 @@
comment "Light sensors"
config SENSORS_ISL29018
- tristate "ISL 29018 light and proximity sensor"
- depends on I2C
- default n
- help
- If you say yes here you get support for ambient light sensing and
- proximity infrared sensing from Intersil ISL29018.
- This driver will provide the measurements of ambient light intensity
- in lux, proximity infrared sensing and normal infrared sensing.
- Data from sensor is accessible via sysfs.
+ tristate "ISL 29018 light and proximity sensor"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for ambient light sensing and
+ proximity infrared sensing from Intersil ISL29018.
+ This driver will provide the measurements of ambient light intensity
+ in lux, proximity infrared sensing and normal infrared sensing.
+ Data from sensor is accessible via sysfs.
+
+config SENSORS_ISL29028
+ tristate "ISL 29028 light and proximity sensor"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for ambient light sensing and
+ proximity ir sensing from intersil ISL29028.
+ This driver will provide the measurements of ambient light intensity
+ in lux, proximity infrared sensing and normal infrared sensing.
+ Data from sensor is accessible via sysfs.
config SENSORS_TSL2563
tristate "TAOS TSL2560, TSL2561, TSL2562 and TSL2563 ambient light sensors"
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
index 3011fbfa8dc2..6f02c4c3f722 100644
--- a/drivers/staging/iio/light/Makefile
+++ b/drivers/staging/iio/light/Makefile
@@ -1,7 +1,10 @@
#
# Makefile for industrial I/O Light sensors
#
+GCOV_PROFILE := y
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_TSL2583) += tsl2583.o
+obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
+
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
new file mode 100644
index 000000000000..0b1d2f2f73aa
--- /dev/null
+++ b/drivers/staging/iio/light/isl29028.c
@@ -0,0 +1,1269 @@
+/*
+ * A iio driver for the light sensor ISL 29028.
+ *
+ * IIO Light driver for monitoring ambient light intensity in lux and proximity
+ * ir.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include "../iio.h"
+
+#define CONVERSION_TIME_MS 100
+
+#define ISL29028_REG_ADD_CONFIGURE 0x01
+
+#define CONFIGURE_PROX_EN_MASK (1 << 7)
+#define CONFIGURE_PROX_EN_SH 7
+
+#define CONFIGURE_PROX_SLP_SH 4
+#define CONFIGURE_PROX_SLP_MASK (7 << CONFIGURE_PROX_SLP_SH)
+
+#define CONFIGURE_PROX_DRIVE (1 << 3)
+
+#define CONFIGURE_ALS_EN 1
+#define CONFIGURE_ALS_DIS 0
+#define CONFIGURE_ALS_EN_SH 2
+#define CONFIGURE_ALS_EN_MASK (1 << CONFIGURE_ALS_EN_SH)
+
+
+#define CONFIGURE_ALS_RANGE_LOW_LUX 0
+#define CONFIGURE_ALS_RANGE_HIGH_LUX 1
+#define CONFIGURE_ALS_RANGE_SH 1
+#define CONFIGURE_ALS_RANGE_MASK (1 << CONFIGURE_ALS_RANGE_SH)
+
+#define CONFIGURE_ALS_IR_MODE_MASK 1
+#define CONFIGURE_ALS_IR_MODE_SH 0
+#define CONFIGURE_ALS_IR_MODE_IR 1
+#define CONFIGURE_ALS_IR_MODE_ALS 0
+
+#define ISL29028_REG_ADD_INTERRUPT 0x02
+#define INTERRUPT_PROX_FLAG_MASK (1 << 7)
+#define INTERRUPT_PROX_FLAG_SH 7
+#define INTERRUPT_PROX_FLAG_EN 1
+#define INTERRUPT_PROX_FLAG_DIS 0
+
+#define INTERRUPT_PROX_PERSIST_SH 5
+#define INTERRUPT_PROX_PERSIST_MASK (3 << 5)
+
+#define INTERRUPT_ALS_FLAG_MASK (1 << 3)
+#define INTERRUPT_ALS_FLAG_SH 3
+#define INTERRUPT_ALS_FLAG_EN 1
+#define INTERRUPT_ALS_FLAG_DIS 0
+
+#define INTERRUPT_ALS_PERSIST_SH 1
+#define INTERRUPT_ALS_PERSIST_MASK (3 << 1)
+
+#define ISL29028_REG_ADD_PROX_LOW_THRES 0x03
+#define ISL29028_REG_ADD_PROX_HIGH_THRES 0x04
+
+#define ISL29028_REG_ADD_ALSIR_LOW_THRES 0x05
+#define ISL29028_REG_ADD_ALSIR_LH_THRES 0x06
+#define ISL29028_REG_ADD_ALSIR_LH_THRES_L_SH 0
+#define ISL29028_REG_ADD_ALSIR_LH_THRES_H_SH 4
+#define ISL29028_REG_ADD_ALSIR_HIGH_THRES 0x07
+
+#define ISL29028_REG_ADD_PROX_DATA 0x08
+#define ISL29028_REG_ADD_ALSIR_L 0x09
+#define ISL29028_REG_ADD_ALSIR_U 0x0A
+
+#define ISL29028_REG_ADD_TEST1_MODE 0x0E
+#define ISL29028_REG_ADD_TEST2_MODE 0x0F
+
+#define ISL29028_MAX_REGS ISL29028_REG_ADD_TEST2_MODE
+
+enum {
+ MODE_NONE = 0,
+ MODE_ALS,
+ MODE_IR
+};
+
+struct isl29028_chip {
+ struct iio_dev *indio_dev;
+ struct i2c_client *client;
+ struct mutex lock;
+ int irq;
+
+ int prox_period;
+ int prox_low_thres;
+ int prox_high_thres;
+ int prox_persist;
+ bool is_prox_enable;
+ int prox_reading;
+
+ int als_high_thres;
+ int als_low_thres;
+ int als_persist;
+ int als_range;
+ int als_reading;
+ int als_ir_mode;
+
+ int ir_high_thres;
+ int ir_low_thres;
+ int ir_reading;
+
+ bool is_int_enable;
+ bool is_proxim_int_waiting;
+ bool is_als_int_waiting;
+ struct completion prox_completion;
+ struct completion als_completion;
+ u8 reg_cache[ISL29028_MAX_REGS];
+};
+
+static bool isl29028_write_data(struct i2c_client *client, u8 reg,
+ u8 val, u8 mask, u8 shift)
+{
+ u8 regval;
+ int ret = 0;
+ struct isl29028_chip *chip = i2c_get_clientdata(client);
+
+ regval = chip->reg_cache[reg];
+ regval &= ~mask;
+ regval |= val << shift;
+
+ ret = i2c_smbus_write_byte_data(client, reg, regval);
+ if (ret) {
+ dev_err(&client->dev, "Write to device reg %d fails status "
+ "%x\n", reg, ret);
+ return false;
+ }
+ chip->reg_cache[reg] = regval;
+ return true;
+}
+
+static bool isl29018_set_proxim_period(struct i2c_client *client,
+ bool is_enable, int period)
+{
+ int prox_period[] = {0, 12, 50, 75, 100, 200, 400, 800};
+ int i;
+ int sel;
+ bool st;
+ if (period < 12)
+ sel = 7;
+ else {
+ for (i = 1; i < ARRAY_SIZE(prox_period) - 1; ++i) {
+ if ((prox_period[i] <= period) &&
+ period < prox_period[i + 1])
+ break;
+ }
+ sel = 7 - i;
+ }
+
+ if (!is_enable) {
+ dev_dbg(&client->dev, "Disabling proximity sensing\n");
+ st = isl29028_write_data(client, ISL29028_REG_ADD_CONFIGURE,
+ 0, CONFIGURE_PROX_EN_MASK, CONFIGURE_PROX_EN_SH);
+ } else {
+ dev_dbg(&client->dev, "Enabling proximity sensing with period "
+ "of %d ms sel %d period %d\n", prox_period[7 - sel],
+ sel, period);
+ st = isl29028_write_data(client, ISL29028_REG_ADD_CONFIGURE,
+ sel, CONFIGURE_PROX_SLP_MASK, CONFIGURE_PROX_SLP_SH);
+ if (st)
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_CONFIGURE, 1,
+ CONFIGURE_PROX_EN_MASK, CONFIGURE_PROX_EN_SH);
+ }
+ return st;
+}
+
+static bool isl29018_set_proxim_persist(struct i2c_client *client,
+ bool is_enable, int persist)
+{
+ int prox_perstant[] = {1, 4, 8, 16};
+ int i;
+ int sel;
+ bool st;
+ if (is_enable) {
+ for (i = 0; i < ARRAY_SIZE(prox_perstant) - 1; ++i) {
+ if ((prox_perstant[i] <= persist) &&
+ persist < prox_perstant[i+1])
+ break;
+ }
+ sel = i;
+ }
+
+ if (is_enable) {
+ dev_dbg(&client->dev, "Enabling proximity threshold interrupt\n");
+ st = isl29028_write_data(client, ISL29028_REG_ADD_INTERRUPT,
+ sel, INTERRUPT_PROX_PERSIST_MASK,
+ INTERRUPT_PROX_PERSIST_SH);
+ if (st)
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_INTERRUPT,
+ INTERRUPT_PROX_FLAG_EN,
+ INTERRUPT_PROX_FLAG_MASK,
+ INTERRUPT_PROX_FLAG_SH);
+ } else {
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_INTERRUPT, INTERRUPT_PROX_FLAG_DIS,
+ INTERRUPT_PROX_FLAG_MASK, INTERRUPT_PROX_FLAG_SH);
+ }
+ return st;
+}
+
+static bool isl29018_set_als_persist(struct i2c_client *client, bool is_enable,
+ int persist)
+{
+ int prox_perstant[] = {1, 4, 8, 16};
+ int i;
+ int sel;
+ bool st;
+ if (is_enable) {
+ for (i = 0; i < ARRAY_SIZE(prox_perstant) - 1; ++i) {
+ if ((prox_perstant[i] <= persist) &&
+ persist < prox_perstant[i+1])
+ break;
+ }
+ sel = i;
+ }
+
+ if (is_enable) {
+ dev_dbg(&client->dev, "Enabling als threshold interrupt\n");
+ st = isl29028_write_data(client, ISL29028_REG_ADD_INTERRUPT,
+ sel, INTERRUPT_ALS_PERSIST_MASK,
+ INTERRUPT_ALS_PERSIST_SH);
+ if (st)
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_INTERRUPT,
+ INTERRUPT_ALS_FLAG_EN,
+ INTERRUPT_ALS_FLAG_MASK,
+ INTERRUPT_ALS_FLAG_SH);
+ } else {
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_INTERRUPT, INTERRUPT_ALS_FLAG_DIS,
+ INTERRUPT_ALS_FLAG_MASK, INTERRUPT_ALS_FLAG_SH);
+ }
+ return st;
+}
+
+static bool isl29018_set_proxim_high_threshold(struct i2c_client *client, u8 th)
+{
+ return isl29028_write_data(client, ISL29028_REG_ADD_PROX_HIGH_THRES,
+ th, 0xFF, 0);
+}
+
+static bool isl29018_set_proxim_low_threshold(struct i2c_client *client, u8 th)
+{
+ return isl29028_write_data(client, ISL29028_REG_ADD_PROX_LOW_THRES,
+ th, 0xFF, 0);
+}
+
+static bool isl29018_set_irals_high_threshold(struct i2c_client *client,
+ u32 als)
+{
+ bool st;
+ st = isl29028_write_data(client, ISL29028_REG_ADD_ALSIR_HIGH_THRES,
+ (als >> 4) & 0xFF, 0xFF, 0);
+ if (st)
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_ALSIR_LH_THRES, als & 0xF,
+ 0xF << ISL29028_REG_ADD_ALSIR_LH_THRES_H_SH,
+ ISL29028_REG_ADD_ALSIR_LH_THRES_H_SH);
+ return st;
+}
+
+static bool isl29018_set_irals_low_threshold(struct i2c_client *client, u32 als)
+{
+ bool st;
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_ALSIR_LH_THRES, (als >> 8) & 0xF,
+ 0xF << ISL29028_REG_ADD_ALSIR_LH_THRES_L_SH,
+ ISL29028_REG_ADD_ALSIR_LH_THRES_L_SH);
+ if (st)
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_ALSIR_LOW_THRES,
+ als & 0xFF, 0xFF, 0);
+ return st;
+}
+
+static bool isl29018_set_als_ir_mode(struct i2c_client *client, bool is_enable,
+ bool is_als)
+{
+ struct isl29028_chip *chip = i2c_get_clientdata(client);
+ bool st;
+ if (is_enable) {
+ if (is_als) {
+ dev_dbg(&client->dev, "Enabling ALS mode\n");
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_CONFIGURE,
+ CONFIGURE_ALS_IR_MODE_ALS,
+ CONFIGURE_ALS_IR_MODE_MASK,
+ CONFIGURE_ALS_IR_MODE_SH);
+ if (st)
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_CONFIGURE,
+ CONFIGURE_ALS_RANGE_HIGH_LUX,
+ CONFIGURE_ALS_RANGE_MASK,
+ CONFIGURE_ALS_RANGE_SH);
+ if (st)
+ st = isl29018_set_irals_high_threshold(client,
+ chip->als_high_thres);
+ if (st)
+ st = isl29018_set_irals_low_threshold(client,
+ chip->als_low_thres);
+ } else {
+ dev_dbg(&client->dev, "Enabling IR mode\n");
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_CONFIGURE,
+ CONFIGURE_ALS_IR_MODE_IR,
+ CONFIGURE_ALS_IR_MODE_MASK,
+ CONFIGURE_ALS_IR_MODE_SH);
+ if (st)
+ st = isl29018_set_irals_high_threshold(client,
+ chip->ir_high_thres);
+ if (st)
+ st = isl29018_set_irals_low_threshold(client,
+ chip->ir_low_thres);
+ }
+ if (st)
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_CONFIGURE,
+ CONFIGURE_ALS_EN,
+ CONFIGURE_ALS_EN_MASK,
+ CONFIGURE_ALS_EN_SH);
+ } else {
+ st = isl29028_write_data(client,
+ ISL29028_REG_ADD_CONFIGURE,
+ CONFIGURE_ALS_DIS,
+ CONFIGURE_ALS_EN_MASK,
+ CONFIGURE_ALS_EN_SH);
+ }
+ return st;
+}
+
+static bool isl29028_read_als_ir(struct i2c_client *client, int *als_ir)
+{
+ s32 lsb;
+ s32 msb;
+
+ lsb = i2c_smbus_read_byte_data(client, ISL29028_REG_ADD_ALSIR_L);
+ if (lsb < 0) {
+ dev_err(&client->dev, "Error in reading register %d, error %d\n",
+ ISL29028_REG_ADD_ALSIR_L, lsb);
+ return false;
+ }
+
+ msb = i2c_smbus_read_byte_data(client, ISL29028_REG_ADD_ALSIR_U);
+ if (msb < 0) {
+ dev_err(&client->dev, "Error in reading register %d, error %d\n",
+ ISL29028_REG_ADD_ALSIR_U, lsb);
+ return false;
+ }
+ *als_ir = ((msb & 0xF) << 8) | (lsb & 0xFF);
+ return true;
+}
+
+static bool isl29028_read_proxim(struct i2c_client *client, int *prox)
+{
+ s32 data;
+
+ data = i2c_smbus_read_byte_data(client, ISL29028_REG_ADD_PROX_DATA);
+ if (data < 0) {
+ dev_err(&client->dev, "Error in reading register %d, error %d\n",
+ ISL29028_REG_ADD_PROX_DATA, data);
+ return false;
+ }
+ *prox = (int)data;
+ return true;
+}
+
+/* Sysfs interface */
+/* proximity period */
+static ssize_t show_prox_period(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->prox_period);
+}
+
+static ssize_t store_prox_period(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ mutex_lock(&chip->lock);
+ st = isl29018_set_proxim_period(client, chip->is_prox_enable,
+ (int)lval);
+ if (st)
+ chip->prox_period = (int)lval;
+ else
+ dev_err(dev, "Error in setting the proximity period\n");
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* proximity enable/disable */
+static ssize_t show_prox_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ if (chip->is_prox_enable)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_prox_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+ if ((lval != 1) && (lval != 0)) {
+ dev_err(dev, "illegal value %lu\n", lval);
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ if (lval == 1)
+ st = isl29018_set_proxim_period(client, true,
+ chip->prox_period);
+ else
+ st = isl29018_set_proxim_period(client, false,
+ chip->prox_period);
+ if (st)
+ chip->is_prox_enable = (lval) ? true : false;
+ else
+ dev_err(dev, "Error in enabling proximity\n");
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* als/ir enable/disable */
+static ssize_t show_als_ir_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "Current Mode: %d [0:None, 1:ALS, 2:IR]\n",
+ chip->als_ir_mode);
+}
+
+static ssize_t store_als_ir_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+ if (lval > 2) {
+ dev_err(dev, "illegal value %lu\n", lval);
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ if (lval == 0)
+ st = isl29018_set_als_ir_mode(client, false, false);
+ else if (lval == 1)
+ st = isl29018_set_als_ir_mode(client, true, true);
+ else
+ st = isl29018_set_als_ir_mode(client, true, false);
+ if (st)
+ chip->als_ir_mode = (int)lval;
+ else
+ dev_err(dev, "Error in enabling als/ir mode\n");
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Proximity low thresholds */
+static ssize_t show_proxim_low_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->prox_low_thres);
+}
+
+static ssize_t store_proxim_low_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0xFF) || (lval < 0x0)) {
+ dev_err(dev, "The threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ st = isl29018_set_proxim_low_threshold(client, (u8)lval);
+ if (st)
+ chip->prox_low_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting proximity low threshold\n");
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Proximity high thresholds */
+static ssize_t show_proxim_high_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->prox_high_thres);
+}
+
+static ssize_t store_proxim_high_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0xFF) || (lval < 0x0)) {
+ dev_err(dev, "The threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ st = isl29018_set_proxim_high_threshold(client, (u8)lval);
+ if (st)
+ chip->prox_high_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting proximity high threshold\n");
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* als low thresholds */
+static ssize_t show_als_low_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->als_low_thres);
+}
+
+static ssize_t store_als_low_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0xFFFF) || (lval < 0x0)) {
+ dev_err(dev, "The ALS threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ if (chip->als_ir_mode == MODE_ALS) {
+ st = isl29018_set_irals_low_threshold(client, (int)lval);
+ if (st)
+ chip->als_low_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting als low threshold\n");
+ } else
+ chip->als_low_thres = (int)lval;
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Als high thresholds */
+static ssize_t show_als_high_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->als_high_thres);
+}
+
+static ssize_t store_als_high_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0xFFFF) || (lval < 0x0)) {
+ dev_err(dev, "The als threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ if (chip->als_ir_mode == MODE_ALS) {
+ st = isl29018_set_irals_high_threshold(client, (int)lval);
+ if (st)
+ chip->als_high_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting als high threshold\n");
+ } else
+ chip->als_high_thres = (int)lval;
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* IR low thresholds */
+static ssize_t show_ir_low_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->ir_low_thres);
+}
+
+static ssize_t store_ir_low_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0xFFFF) || (lval < 0x0)) {
+ dev_err(dev, "The IR threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ if (chip->als_ir_mode == MODE_IR) {
+ st = isl29018_set_irals_low_threshold(client, (int)lval);
+ if (st)
+ chip->ir_low_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting als low threshold\n");
+ } else
+ chip->ir_low_thres = (int)lval;
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* IR high thresholds */
+static ssize_t show_ir_high_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->ir_high_thres);
+}
+
+static ssize_t store_ir_high_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0xFFFF) || (lval < 0x0)) {
+ dev_err(dev, "The als threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ if (chip->als_ir_mode == MODE_IR) {
+ st = isl29018_set_irals_high_threshold(client, (int)lval);
+ if (st)
+ chip->ir_high_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting als high threshold\n");
+ } else
+ chip->ir_high_thres = (int)lval;
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Proximity persist */
+static ssize_t show_proxim_persist(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->prox_persist);
+}
+
+static ssize_t store_proxim_persist(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 16) || (lval < 0x0)) {
+ dev_err(dev, "The proximity persist is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ chip->prox_persist = (int)lval;
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* als/ir persist */
+static ssize_t show_als_persist(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->als_persist);
+}
+
+static ssize_t store_als_persist(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 16) || (lval < 0x0)) {
+ dev_err(dev, "The als persist is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ chip->als_persist = (int)lval;
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Display proxim data */
+static ssize_t show_proxim_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ int prox_data;
+ bool st;
+ ssize_t buf_count = 0;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ mutex_lock(&chip->lock);
+
+ if (chip->is_prox_enable) {
+ st = isl29028_read_proxim(chip->client, &prox_data);
+ if (st) {
+ buf_count = sprintf(buf, "%d\n", prox_data);
+ chip->prox_reading = prox_data;
+ }
+ } else
+ buf_count = sprintf(buf, "%d\n", chip->prox_reading);
+
+ mutex_unlock(&chip->lock);
+ return buf_count;
+}
+
+/* Display als data */
+static ssize_t show_als_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ int als_ir_data;
+ bool st;
+ ssize_t buf_count = 0;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ mutex_lock(&chip->lock);
+
+ if (chip->als_ir_mode == MODE_ALS) {
+ st = isl29028_read_als_ir(chip->client, &als_ir_data);
+ if (st) {
+ /* convert als data count to lux */
+ /* if als_range = 0, lux = count * 0.0326 */
+ /* if als_range = 1, lux = count * 0.522 */
+ if (!chip->als_range)
+ als_ir_data = (als_ir_data * 326) / 10000;
+ else
+ als_ir_data = (als_ir_data * 522) / 1000;
+
+ buf_count = sprintf(buf, "%d\n", als_ir_data);
+ chip->als_reading = als_ir_data;
+ }
+ } else
+ buf_count = sprintf(buf, "%d\n", chip->als_reading);
+ mutex_unlock(&chip->lock);
+ return buf_count;
+}
+
+/* Display IR data */
+static ssize_t show_ir_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ int als_ir_data;
+ bool st;
+ ssize_t buf_count = 0;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ mutex_lock(&chip->lock);
+
+ if (chip->als_ir_mode == MODE_IR) {
+ st = isl29028_read_als_ir(chip->client, &als_ir_data);
+ if (st) {
+ buf_count = sprintf(buf, "%d\n", als_ir_data);
+ chip->ir_reading = als_ir_data;
+ }
+ } else
+ buf_count = sprintf(buf, "%d\n", chip->ir_reading);
+ mutex_unlock(&chip->lock);
+ return buf_count;
+}
+
+/* Wait for the proximity threshold interrupt*/
+static ssize_t show_wait_proxim_int(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ if (!chip->is_int_enable) {
+ dev_err(dev, "%s() Interrupt mode not supported\n", __func__);
+ return sprintf(buf, "error\n");
+ }
+
+ mutex_lock(&chip->lock);
+ st = isl29018_set_proxim_persist(client, true, chip->prox_persist);
+ if (!st) {
+ dev_err(dev, "%s() Error in configuration\n", __func__);
+ mutex_unlock(&chip->lock);
+ return sprintf(buf, "error\n");
+ }
+
+ chip->is_proxim_int_waiting = true;
+ mutex_unlock(&chip->lock);
+ wait_for_completion(&chip->prox_completion);
+ mutex_lock(&chip->lock);
+ chip->is_proxim_int_waiting = false;
+ isl29018_set_proxim_persist(client, false, chip->prox_persist);
+ mutex_unlock(&chip->lock);
+ return sprintf(buf, "done\n");
+}
+
+/* Wait for the als/ir interrupt*/
+static ssize_t show_wait_als_ir_int(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ struct i2c_client *client = chip->client;
+ bool st;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ if (!chip->is_int_enable) {
+ dev_err(dev, "%s() Interrupt mode not supported\n", __func__);
+ return sprintf(buf, "error\n");
+ }
+
+ mutex_lock(&chip->lock);
+
+ st = isl29018_set_als_persist(client, true, chip->als_persist);
+ if (!st) {
+ dev_err(dev, "%s() Error in als ir int configuration\n",
+ __func__);
+ mutex_unlock(&chip->lock);
+ return sprintf(buf, "error\n");
+ }
+
+ chip->is_als_int_waiting = true;
+ mutex_unlock(&chip->lock);
+ wait_for_completion(&chip->als_completion);
+ mutex_lock(&chip->lock);
+ chip->is_als_int_waiting = false;
+ st = isl29018_set_als_persist(client, false, chip->als_persist);
+ mutex_unlock(&chip->lock);
+ return sprintf(buf, "done\n");
+}
+
+/* Read name */
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct isl29028_chip *chip = indio_dev->dev_data;
+ return sprintf(buf, "%s\n", chip->client->name);
+}
+
+static IIO_DEVICE_ATTR(proximity_low_threshold, S_IRUGO | S_IWUSR,
+ show_proxim_low_threshold, store_proxim_low_threshold, 0);
+static IIO_DEVICE_ATTR(proximity_high_threshold, S_IRUGO | S_IWUSR,
+ show_proxim_high_threshold, store_proxim_high_threshold, 0);
+static IIO_DEVICE_ATTR(proximity_persist, S_IRUGO | S_IWUSR,
+ show_proxim_persist, store_proxim_persist, 0);
+static IIO_DEVICE_ATTR(proximity_period, S_IRUGO | S_IWUSR,
+ show_prox_period, store_prox_period, 0);
+static IIO_DEVICE_ATTR(proximity_enable, S_IRUGO | S_IWUSR,
+ show_prox_enable, store_prox_enable, 0);
+static IIO_DEVICE_ATTR(wait_proxim_thres, S_IRUGO,
+ show_wait_proxim_int, NULL, 0);
+static IIO_DEVICE_ATTR(proximity_value, S_IRUGO,
+ show_proxim_data, NULL, 0);
+
+static IIO_DEVICE_ATTR(als_low_threshold, S_IRUGO | S_IWUSR,
+ show_als_low_threshold, store_als_low_threshold, 0);
+static IIO_DEVICE_ATTR(als_high_threshold, S_IRUGO | S_IWUSR,
+ show_als_high_threshold, store_als_high_threshold, 0);
+static IIO_DEVICE_ATTR(als_persist, S_IRUGO | S_IWUSR,
+ show_als_persist, store_als_persist, 0);
+static IIO_DEVICE_ATTR(als_ir_mode, S_IRUGO | S_IWUSR,
+ show_als_ir_mode, store_als_ir_mode, 0);
+static IIO_DEVICE_ATTR(als_value, S_IRUGO,
+ show_als_data, NULL, 0);
+static IIO_DEVICE_ATTR(wait_als_ir_thres, S_IRUGO,
+ show_wait_als_ir_int, NULL, 0);
+
+static IIO_DEVICE_ATTR(ir_value, S_IRUGO,
+ show_ir_data, NULL, 0);
+static IIO_DEVICE_ATTR(ir_low_threshold, S_IRUGO | S_IWUSR,
+ show_ir_low_threshold, store_ir_low_threshold, 0);
+static IIO_DEVICE_ATTR(ir_high_threshold, S_IRUGO | S_IWUSR,
+ show_ir_high_threshold, store_ir_high_threshold, 0);
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+
+static struct attribute *isl29028_attributes[] = {
+ &iio_dev_attr_name.dev_attr.attr,
+
+ &iio_dev_attr_ir_value.dev_attr.attr,
+
+ &iio_dev_attr_als_low_threshold.dev_attr.attr,
+ &iio_dev_attr_als_high_threshold.dev_attr.attr,
+ &iio_dev_attr_als_persist.dev_attr.attr,
+ &iio_dev_attr_als_ir_mode.dev_attr.attr,
+ &iio_dev_attr_als_value.dev_attr.attr,
+ &iio_dev_attr_wait_als_ir_thres.dev_attr.attr,
+ &iio_dev_attr_ir_low_threshold.dev_attr.attr,
+ &iio_dev_attr_ir_high_threshold.dev_attr.attr,
+
+ &iio_dev_attr_proximity_low_threshold.dev_attr.attr,
+ &iio_dev_attr_proximity_high_threshold.dev_attr.attr,
+ &iio_dev_attr_proximity_enable.dev_attr.attr,
+ &iio_dev_attr_proximity_period.dev_attr.attr,
+ &iio_dev_attr_proximity_persist.dev_attr.attr,
+ &iio_dev_attr_proximity_value.dev_attr.attr,
+ &iio_dev_attr_wait_proxim_thres.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group isl29108_group = {
+ .attrs = isl29028_attributes,
+};
+
+static int isl29028_chip_init(struct i2c_client *client)
+{
+ struct isl29028_chip *chip = i2c_get_clientdata(client);
+ int i;
+ bool st;
+
+ for (i = 0; i < ARRAY_SIZE(chip->reg_cache); i++)
+ chip->reg_cache[i] = 0;
+
+ chip->is_prox_enable = 0;
+ chip->prox_low_thres = 0;
+ chip->prox_high_thres = 0xFF;
+ chip->prox_period = 0;
+ chip->prox_reading = 0;
+
+ chip->als_low_thres = 0;
+ chip->als_high_thres = 0xFFF;
+ chip->als_range = 1;
+ chip->als_reading = 0;
+ chip->als_ir_mode = 0;
+
+ chip->ir_high_thres = 0xFFF;
+ chip->ir_low_thres = 0;
+ chip->ir_reading = 0;
+
+ chip->is_int_enable = false;
+ chip->prox_persist = 1;
+ chip->als_persist = 1;
+ chip->is_proxim_int_waiting = false;
+ chip->is_als_int_waiting = false;
+
+ st = isl29028_write_data(client, ISL29028_REG_ADD_TEST1_MODE,
+ 0x0, 0xFF, 0);
+ if (st)
+ st = isl29028_write_data(client, ISL29028_REG_ADD_TEST2_MODE,
+ 0x0, 0xFF, 0);
+ if (st)
+ st = isl29028_write_data(client, ISL29028_REG_ADD_CONFIGURE,
+ 0x0, 0xFF, 0);
+ if (st)
+ msleep(1);
+ if (!st) {
+ dev_err(&client->dev, "%s(): fails\n", __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static irqreturn_t threshold_isr(int irq, void *irq_data)
+{
+ struct isl29028_chip *chip = (struct isl29028_chip *)irq_data;
+ s32 int_reg;
+ struct i2c_client *client = chip->client;
+
+ int_reg = i2c_smbus_read_byte_data(client, ISL29028_REG_ADD_INTERRUPT);
+ if (int_reg < 0) {
+ dev_err(&client->dev, "Error in reading register %d, error %d\n",
+ ISL29028_REG_ADD_INTERRUPT, int_reg);
+ return IRQ_HANDLED;
+ }
+
+ if (int_reg & INTERRUPT_PROX_FLAG_MASK) {
+ /* Write 0 to clear */
+ isl29028_write_data(client,
+ ISL29028_REG_ADD_INTERRUPT, INTERRUPT_PROX_FLAG_DIS,
+ INTERRUPT_PROX_FLAG_MASK, INTERRUPT_PROX_FLAG_SH);
+ if (chip->is_proxim_int_waiting)
+ complete(&chip->prox_completion);
+ }
+
+ if (int_reg & INTERRUPT_ALS_FLAG_MASK) {
+ /* Write 0 to clear */
+ isl29028_write_data(client,
+ ISL29028_REG_ADD_INTERRUPT, INTERRUPT_ALS_FLAG_DIS,
+ INTERRUPT_ALS_FLAG_MASK, INTERRUPT_ALS_FLAG_SH);
+ if (chip->is_als_int_waiting)
+ complete(&chip->als_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info isl29028_info = {
+ .attrs = &isl29108_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit isl29028_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct isl29028_chip *chip;
+ int err;
+
+ dev_dbg(&client->dev, "%s() called\n", __func__);
+
+ chip = kzalloc(sizeof(struct isl29028_chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&client->dev, "Memory allocation fails\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+ chip->irq = client->irq;
+
+ mutex_init(&chip->lock);
+
+ err = isl29028_chip_init(client);
+ if (err)
+ goto exit_free;
+
+ init_completion(&chip->prox_completion);
+ init_completion(&chip->als_completion);
+
+ if (chip->irq > 0) {
+ err = request_threaded_irq(chip->irq, NULL, threshold_isr,
+ IRQF_SHARED, "ISL29028", chip);
+ if (err) {
+ dev_err(&client->dev, "Unable to register irq %d; "
+ "error %d\n", chip->irq, err);
+ goto exit_free;
+ }
+ }
+
+ chip->is_int_enable = true;
+ chip->indio_dev = iio_allocate_device(0);
+ if (!chip->indio_dev) {
+ dev_err(&client->dev, "iio allocation fails\n");
+ goto exit_irq;
+ }
+
+ chip->indio_dev->info = &isl29028_info;
+ chip->indio_dev->dev.parent = &client->dev;
+ chip->indio_dev->dev_data = (void *)(chip);
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ err = iio_device_register(chip->indio_dev);
+ if (err) {
+ dev_err(&client->dev, "iio registration fails\n");
+ goto exit_iio_free;
+ }
+ dev_dbg(&client->dev, "%s() success\n", __func__);
+ return 0;
+
+exit_iio_free:
+ iio_free_device(chip->indio_dev);
+exit_irq:
+ if (chip->irq > 0)
+ free_irq(chip->irq, chip);
+exit_free:
+ kfree(chip);
+exit:
+ return err;
+}
+
+static int __devexit isl29028_remove(struct i2c_client *client)
+{
+ struct isl29028_chip *chip = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "%s()\n", __func__);
+ iio_device_unregister(chip->indio_dev);
+ if (chip->irq > 0)
+ free_irq(chip->irq, chip);
+ kfree(chip);
+ return 0;
+}
+
+static const struct i2c_device_id isl29028_id[] = {
+ {"isl29028", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, isl29028_id);
+
+static struct i2c_driver isl29028_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "isl29028",
+ .owner = THIS_MODULE,
+ },
+ .probe = isl29028_probe,
+ .remove = __devexit_p(isl29028_remove),
+ .id_table = isl29028_id,
+};
+
+static int __init isl29028_init(void)
+{
+ return i2c_add_driver(&isl29028_driver);
+}
+
+static void __exit isl29028_exit(void)
+{
+ i2c_del_driver(&isl29028_driver);
+}
+
+module_init(isl29028_init);
+module_exit(isl29028_exit);
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 33919e87e7ce..c3481a560375 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -517,12 +517,6 @@ static int ak8975_probe(struct i2c_client *client,
goto exit_gpio;
}
data = iio_priv(indio_dev);
- /* Perform some basic start-of-day setup of the device. */
- err = ak8975_setup(client);
- if (err < 0) {
- dev_err(&client->dev, "AK8975 initialization fails\n");
- goto exit_gpio;
- }
i2c_set_clientdata(client, indio_dev);
data->client = client;
@@ -533,6 +527,13 @@ static int ak8975_probe(struct i2c_client *client,
indio_dev->info = &ak8975_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ /* Perform some basic start-of-day setup of the device. */
+ err = ak8975_setup(client);
+ if (err < 0) {
+ dev_err(&client->dev, "AK8975 initialization fails\n");
+ goto exit_gpio;
+ }
+
err = iio_device_register(indio_dev);
if (err < 0)
goto exit_free_iio;
diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig
new file mode 100644
index 000000000000..52385914b9ae
--- /dev/null
+++ b/drivers/switch/Kconfig
@@ -0,0 +1,15 @@
+menuconfig SWITCH
+ tristate "Switch class support"
+ help
+ Say Y here to enable switch class support. This allows
+ monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+ tristate "GPIO Swith support"
+ depends on GENERIC_GPIO
+ help
+ Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile
new file mode 100644
index 000000000000..f7606ed4a719
--- /dev/null
+++ b/drivers/switch/Makefile
@@ -0,0 +1,4 @@
+# Switch Class Driver
+obj-$(CONFIG_SWITCH) += switch_class.o
+obj-$(CONFIG_SWITCH_GPIO) += switch_gpio.o
+
diff --git a/drivers/switch/switch_class.c b/drivers/switch/switch_class.c
new file mode 100644
index 000000000000..e05fc2591147
--- /dev/null
+++ b/drivers/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ * drivers/switch/switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/switch.h>
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_state) {
+ int ret = sdev->print_state(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_name) {
+ int ret = sdev->print_name(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+ char name_buf[120];
+ char state_buf[120];
+ char *prop_buf;
+ char *envp[3];
+ int env_offset = 0;
+ int length;
+
+ if (sdev->state != state) {
+ sdev->state = state;
+
+ prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (prop_buf) {
+ length = name_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(name_buf, sizeof(name_buf),
+ "SWITCH_NAME=%s", prop_buf);
+ envp[env_offset++] = name_buf;
+ }
+ length = state_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(state_buf, sizeof(state_buf),
+ "SWITCH_STATE=%s", prop_buf);
+ envp[env_offset++] = state_buf;
+ }
+ envp[env_offset] = NULL;
+ kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+ free_page((unsigned long)prop_buf);
+ } else {
+ printk(KERN_ERR "out of memory in switch_set_state\n");
+ kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+ if (!switch_class) {
+ switch_class = class_create(THIS_MODULE, "switch");
+ if (IS_ERR(switch_class))
+ return PTR_ERR(switch_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+ int ret;
+
+ if (!switch_class) {
+ ret = create_switch_class();
+ if (ret < 0)
+ return ret;
+ }
+
+ sdev->index = atomic_inc_return(&device_count);
+ sdev->dev = device_create(switch_class, NULL,
+ MKDEV(0, sdev->index), NULL, sdev->name);
+ if (IS_ERR(sdev->dev))
+ return PTR_ERR(sdev->dev);
+
+ ret = device_create_file(sdev->dev, &dev_attr_state);
+ if (ret < 0)
+ goto err_create_file_1;
+ ret = device_create_file(sdev->dev, &dev_attr_name);
+ if (ret < 0)
+ goto err_create_file_2;
+
+ dev_set_drvdata(sdev->dev, sdev);
+ sdev->state = 0;
+ return 0;
+
+err_create_file_2:
+ device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+ device_remove_file(sdev->dev, &dev_attr_name);
+ device_remove_file(sdev->dev, &dev_attr_state);
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+ return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+ class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/switch/switch_gpio.c b/drivers/switch/switch_gpio.c
new file mode 100644
index 000000000000..7e9faa211e48
--- /dev/null
+++ b/drivers/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ * drivers/switch/switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/switch.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+
+struct gpio_switch_data {
+ struct switch_dev sdev;
+ unsigned gpio;
+ const char *name_on;
+ const char *name_off;
+ const char *state_on;
+ const char *state_off;
+ int irq;
+ struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+ int state;
+ struct gpio_switch_data *data =
+ container_of(work, struct gpio_switch_data, work);
+
+ state = gpio_get_value(data->gpio);
+ switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_switch_data *switch_data =
+ (struct gpio_switch_data *)dev_id;
+
+ schedule_work(&switch_data->work);
+ return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+ struct gpio_switch_data *switch_data =
+ container_of(sdev, struct gpio_switch_data, sdev);
+ const char *state;
+ if (switch_get_state(sdev))
+ state = switch_data->state_on;
+ else
+ state = switch_data->state_off;
+
+ if (state)
+ return sprintf(buf, "%s\n", state);
+ return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+ struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_switch_data *switch_data;
+ int ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+ if (!switch_data)
+ return -ENOMEM;
+
+ switch_data->sdev.name = pdata->name;
+ switch_data->gpio = pdata->gpio;
+ switch_data->name_on = pdata->name_on;
+ switch_data->name_off = pdata->name_off;
+ switch_data->state_on = pdata->state_on;
+ switch_data->state_off = pdata->state_off;
+ switch_data->sdev.print_state = switch_gpio_print_state;
+
+ ret = switch_dev_register(&switch_data->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ ret = gpio_request(switch_data->gpio, pdev->name);
+ if (ret < 0)
+ goto err_request_gpio;
+
+ ret = gpio_direction_input(switch_data->gpio);
+ if (ret < 0)
+ goto err_set_gpio_input;
+
+ INIT_WORK(&switch_data->work, gpio_switch_work);
+
+ switch_data->irq = gpio_to_irq(switch_data->gpio);
+ if (switch_data->irq < 0) {
+ ret = switch_data->irq;
+ goto err_detect_irq_num_failed;
+ }
+
+ ret = request_irq(switch_data->irq, gpio_irq_handler,
+ IRQF_TRIGGER_LOW, pdev->name, switch_data);
+ if (ret < 0)
+ goto err_request_irq;
+
+ /* Perform initial detection */
+ gpio_switch_work(&switch_data->work);
+
+ return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+ gpio_free(switch_data->gpio);
+err_request_gpio:
+ switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+ kfree(switch_data);
+
+ return ret;
+}
+
+static int __devexit gpio_switch_remove(struct platform_device *pdev)
+{
+ struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&switch_data->work);
+ gpio_free(switch_data->gpio);
+ switch_dev_unregister(&switch_data->sdev);
+ kfree(switch_data);
+
+ return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+ .probe = gpio_switch_probe,
+ .remove = __devexit_p(gpio_switch_remove),
+ .driver = {
+ .name = "switch-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_switch_init(void)
+{
+ return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+ platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index 7f50999eebc2..4358847af756 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -307,7 +307,8 @@ static const struct serial8250_config uart_config[] = {
.tx_loadsz = 8,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
UART_FCR_T_TRIG_01,
- .flags = UART_CAP_FIFO | UART_CAP_RTOIE,
+ .flags = UART_CAP_FIFO | UART_CAP_RTOIE |
+ UART_CAP_HW_CTSRTS,
},
};
@@ -1902,8 +1903,13 @@ static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
container_of(port, struct uart_8250_port, port);
unsigned char mcr = 0;
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
+ if (up->port.type == PORT_TEGRA) {
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_HW_RTS;
+ } else {
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ }
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
@@ -2461,6 +2467,19 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
serial_outp(up, UART_EFR, efr);
}
+ if (up->capabilities & UART_CAP_HW_CTSRTS) {
+ unsigned char mcr = serial_inp(up, UART_MCR);
+ /*
+ * TEGRA UART core support the auto control of the RTS and CTS
+ * flow control.
+ */
+ if (termios->c_cflag & CRTSCTS)
+ mcr |= UART_MCR_HW_CTS;
+ else
+ mcr &= ~UART_MCR_HW_CTS;
+ serial_outp(up, UART_MCR, mcr);
+ }
+
#ifdef CONFIG_ARCH_OMAP
/* Workaround to enable 115200 baud on OMAP1510 internal ports */
if (cpu_is_omap1510() && is_omap_port(up)) {
@@ -2716,6 +2735,9 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
up->bugs |= UART_BUG_NOMSR;
+ if (up->port.type == PORT_TEGRA)
+ up->bugs |= UART_BUG_NOMSR;
+
if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
autoconfig_irq(up);
diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250.h
index 6edf4a6a22d4..66a0c936b937 100644
--- a/drivers/tty/serial/8250.h
+++ b/drivers/tty/serial/8250.h
@@ -43,6 +43,7 @@ struct serial8250_config {
#define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */
#define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */
#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
+#define UART_CAP_HW_CTSRTS (1 << 14) /* UART core support hw control of RTS and CTS */
#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index abf6fa49e07f..5c9d9890c9a5 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -528,6 +528,14 @@ config SERIAL_S5PV210
help
Serial port support for Samsung's S5P Family of SoC's
+config SERIAL_TEGRA
+ boolean "High speed serial support for NVIDIA Tegra SoCs"
+ depends on ARCH_TEGRA && TEGRA_SYSTEM_DMA
+ select SERIAL_CORE
+ help
+ Support for the on-chip UARTs on NVIDIA Tegra SoC, providing
+ /dev/ttyHSx, where x is determined by the number of UARTs on the
+ platform
config SERIAL_MAX3100
tristate "MAX3100 support"
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 83b4da6a1062..438af89db55e 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the kernel serial device drivers.
#
+GCOV_PROFILE_tegra_hsuart.o := y
obj-$(CONFIG_SERIAL_CORE) += serial_core.o
obj-$(CONFIG_SERIAL_21285) += 21285.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
+obj-$(CONFIG_SERIAL_TEGRA) += tegra_hsuart.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 25f3094ec743..23b1f46c9663 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -91,6 +91,9 @@ static void __uart_start(struct tty_struct *tty)
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
+ if (port->ops->wake_peer)
+ port->ops->wake_peer(port);
+
if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
!tty->stopped && !tty->hw_stopped)
port->ops->start_tx(port);
diff --git a/drivers/tty/serial/tegra_hsuart.c b/drivers/tty/serial/tegra_hsuart.c
new file mode 100644
index 000000000000..f94dc24b5229
--- /dev/null
+++ b/drivers/tty/serial/tegra_hsuart.c
@@ -0,0 +1,1682 @@
+/*
+ * drivers/serial/tegra_hsuart.c
+ *
+ * High-speed serial driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2009-2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/termios.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/tegra_uart.h>
+
+#include <mach/dma.h>
+#include <mach/clk.h>
+
+#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
+
+#define BYTES_TO_ALIGN(x) ((unsigned long)(ALIGN((x), sizeof(u32))) - \
+ (unsigned long)(x))
+
+#define UART_RX_DMA_BUFFER_SIZE (2048*8)
+
+#define UART_LSR_FIFOE 0x80
+#define UART_LSR_TXFIFO_FULL 0x100
+#define UART_IER_EORD 0x20
+#define UART_MCR_RTS_EN 0x40
+#define UART_MCR_CTS_EN 0x20
+#define UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
+ UART_LSR_PE | UART_LSR_FE)
+
+#define TX_FORCE_PIO 0
+#define RX_FORCE_PIO 0
+
+const int dma_req_sel[] = {
+ TEGRA_DMA_REQ_SEL_UARTA,
+ TEGRA_DMA_REQ_SEL_UARTB,
+ TEGRA_DMA_REQ_SEL_UARTC,
+ TEGRA_DMA_REQ_SEL_UARTD,
+ TEGRA_DMA_REQ_SEL_UARTE,
+};
+
+#define TEGRA_TX_PIO 1
+#define TEGRA_TX_DMA 2
+
+#define TEGRA_UART_MIN_DMA 16
+#define TEGRA_UART_FIFO_SIZE 8
+
+#define TEGRA_UART_CLOSED 0
+#define TEGRA_UART_OPENED 1
+#define TEGRA_UART_CLOCK_OFF 2
+#define TEGRA_UART_SUSPEND 3
+
+/* Tx fifo trigger level setting in tegra uart is in
+ * reverse way then conventional uart */
+#define TEGRA_UART_TX_TRIG_16B 0x00
+#define TEGRA_UART_TX_TRIG_8B 0x10
+#define TEGRA_UART_TX_TRIG_4B 0x20
+#define TEGRA_UART_TX_TRIG_1B 0x30
+
+struct tegra_uart_port {
+ struct uart_port uport;
+ char port_name[32];
+
+ /* Module info */
+ unsigned long size;
+ struct clk *clk;
+ unsigned int baud;
+
+ /* Register shadow */
+ unsigned char fcr_shadow;
+ unsigned char mcr_shadow;
+ unsigned char lcr_shadow;
+ unsigned char ier_shadow;
+ bool use_cts_control;
+ bool rts_active;
+
+ int tx_in_progress;
+ unsigned int tx_bytes;
+
+ dma_addr_t xmit_dma_addr;
+
+ /* TX DMA */
+ struct tegra_dma_req tx_dma_req;
+ struct tegra_dma_channel *tx_dma;
+
+ /* RX DMA */
+ struct tegra_dma_req rx_dma_req;
+ struct tegra_dma_channel *rx_dma;
+
+ bool use_rx_dma;
+ bool use_tx_dma;
+ int uart_state;
+ bool rx_timeout;
+ int rx_in_progress;
+};
+
+static inline u8 uart_readb(struct tegra_uart_port *t, unsigned long reg)
+{
+ u8 val = readb(t->uport.membase + (reg << t->uport.regshift));
+ dev_vdbg(t->uport.dev, "%s: %p %03lx = %02x\n", __func__,
+ t->uport.membase, reg << t->uport.regshift, val);
+ return val;
+}
+
+static inline u32 uart_readl(struct tegra_uart_port *t, unsigned long reg)
+{
+ u32 val = readl(t->uport.membase + (reg << t->uport.regshift));
+ dev_vdbg(t->uport.dev, "%s: %p %03lx = %02x\n", __func__,
+ t->uport.membase, reg << t->uport.regshift, val);
+ return val;
+}
+
+static inline void uart_writeb(struct tegra_uart_port *t, u8 val,
+ unsigned long reg)
+{
+ dev_vdbg(t->uport.dev, "%s: %p %03lx %02x\n",
+ __func__, t->uport.membase, reg << t->uport.regshift, val);
+ writeb(val, t->uport.membase + (reg << t->uport.regshift));
+}
+
+static inline void uart_writel(struct tegra_uart_port *t, u32 val,
+ unsigned long reg)
+{
+ dev_vdbg(t->uport.dev, "%s: %p %03lx %08x\n",
+ __func__, t->uport.membase, reg << t->uport.regshift, val);
+ writel(val, t->uport.membase + (reg << t->uport.regshift));
+}
+
+static void tegra_set_baudrate(struct tegra_uart_port *t, unsigned int baud);
+static void tegra_set_mctrl(struct uart_port *u, unsigned int mctrl);
+static void do_handle_rx_pio(struct tegra_uart_port *t);
+static void do_handle_rx_dma(struct tegra_uart_port *t);
+static void set_rts(struct tegra_uart_port *t, bool active);
+static void set_dtr(struct tegra_uart_port *t, bool active);
+
+static void fill_tx_fifo(struct tegra_uart_port *t, int max_bytes)
+{
+ int i;
+ struct circ_buf *xmit = &t->uport.state->xmit;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ unsigned long lsr;
+#endif
+
+ for (i = 0; i < max_bytes; i++) {
+ BUG_ON(uart_circ_empty(xmit));
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ lsr = uart_readl(t, UART_LSR);
+ if ((lsr & UART_LSR_TXFIFO_FULL))
+ break;
+#endif
+ uart_writeb(t, xmit->buf[xmit->tail], UART_TX);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ t->uport.icount.tx++;
+ }
+}
+
+static void tegra_start_pio_tx(struct tegra_uart_port *t, unsigned int bytes)
+{
+ if (bytes > TEGRA_UART_FIFO_SIZE)
+ bytes = TEGRA_UART_FIFO_SIZE;
+
+ t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_8B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ t->tx_in_progress = TEGRA_TX_PIO;
+ t->tx_bytes = bytes;
+ t->ier_shadow |= UART_IER_THRI;
+ uart_writeb(t, t->ier_shadow, UART_IER);
+}
+
+static void tegra_start_dma_tx(struct tegra_uart_port *t, unsigned long bytes)
+{
+ struct circ_buf *xmit;
+ xmit = &t->uport.state->xmit;
+
+ dma_sync_single_for_device(t->uport.dev, t->xmit_dma_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_4B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ t->tx_bytes = bytes & ~(sizeof(u32)-1);
+ t->tx_dma_req.source_addr = t->xmit_dma_addr + xmit->tail;
+ t->tx_dma_req.size = t->tx_bytes;
+
+ t->tx_in_progress = TEGRA_TX_DMA;
+
+ tegra_dma_enqueue_req(t->tx_dma, &t->tx_dma_req);
+}
+
+/* Called with u->lock taken */
+static void tegra_start_next_tx(struct tegra_uart_port *t)
+{
+ unsigned long tail;
+ unsigned long count;
+
+ struct circ_buf *xmit;
+
+ xmit = &t->uport.state->xmit;
+ tail = (unsigned long)&xmit->buf[xmit->tail];
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+
+ dev_vdbg(t->uport.dev, "+%s %lu %d\n", __func__, count,
+ t->tx_in_progress);
+
+ if (count == 0)
+ goto out;
+
+ if (!t->use_tx_dma || count < TEGRA_UART_MIN_DMA)
+ tegra_start_pio_tx(t, count);
+ else if (BYTES_TO_ALIGN(tail) > 0)
+ tegra_start_pio_tx(t, BYTES_TO_ALIGN(tail));
+ else
+ tegra_start_dma_tx(t, count);
+
+out:
+ dev_vdbg(t->uport.dev, "-%s", __func__);
+}
+
+/* Called by serial core driver with u->lock taken. */
+static void tegra_start_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ struct circ_buf *xmit;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ xmit = &u->state->xmit;
+
+ if (!uart_circ_empty(xmit) && !t->tx_in_progress)
+ tegra_start_next_tx(t);
+}
+
+static int tegra_start_dma_rx(struct tegra_uart_port *t)
+{
+ wmb();
+ if (tegra_dma_enqueue_req(t->rx_dma, &t->rx_dma_req)) {
+ dev_err(t->uport.dev, "Could not enqueue Rx DMA req\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void tegra_rx_dma_threshold_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ struct uart_port *u = &t->uport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+
+ do_handle_rx_dma(t);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+}
+
+/* It is expected that the callers take the UART lock when this API is called.
+ *
+ * There are 2 contexts when this function is called:
+ *
+ * 1. DMA ISR - DMA ISR triggers the threshold complete calback, which calls the
+ * dequue API which in-turn calls this callback. UART lock is taken during
+ * the call to the threshold callback.
+ *
+ * 2. UART ISR - UART calls the dequue API which in-turn will call this API.
+ * In this case, UART ISR takes the UART lock.
+ * */
+static void tegra_rx_dma_complete_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ struct uart_port *u = &t->uport;
+ struct tty_struct *tty = u->state->port.tty;
+ int copied;
+
+ /* If we are here, DMA is stopped */
+
+ dev_dbg(t->uport.dev, "%s: %d %d\n", __func__, req->bytes_transferred,
+ req->status);
+ if (req->bytes_transferred) {
+ t->uport.icount.rx += req->bytes_transferred;
+ copied = tty_insert_flip_string(tty,
+ ((unsigned char *)(req->virt_addr)),
+ req->bytes_transferred);
+ if (copied != req->bytes_transferred) {
+ WARN_ON(1);
+ dev_err(t->uport.dev, "Not able to copy uart data "
+ "to tty layer Req %d and coped %d\n",
+ req->bytes_transferred, copied);
+ }
+ }
+
+ do_handle_rx_pio(t);
+
+ /* Push the read data later in caller place. */
+ if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED)
+ return;
+
+ spin_unlock(&u->lock);
+ tty_flip_buffer_push(u->state->port.tty);
+ spin_lock(&u->lock);
+}
+
+/* Lock already taken */
+static void do_handle_rx_dma(struct tegra_uart_port *t)
+{
+ struct uart_port *u = &t->uport;
+ if (t->rts_active)
+ set_rts(t, false);
+ tegra_dma_dequeue(t->rx_dma);
+ tty_flip_buffer_push(u->state->port.tty);
+ /* enqueue the request again */
+ tegra_start_dma_rx(t);
+ if (t->rts_active)
+ set_rts(t, true);
+}
+
+/* Wait for a symbol-time. */
+static void wait_sym_time(struct tegra_uart_port *t, unsigned int syms)
+{
+
+ /* Definitely have a start bit. */
+ unsigned int bits = 1;
+ switch (t->lcr_shadow & 3) {
+ case UART_LCR_WLEN5:
+ bits += 5;
+ break;
+ case UART_LCR_WLEN6:
+ bits += 6;
+ break;
+ case UART_LCR_WLEN7:
+ bits += 7;
+ break;
+ default:
+ bits += 8;
+ break;
+ }
+
+ /* Technically 5 bits gets 1.5 bits of stop... */
+ if (t->lcr_shadow & UART_LCR_STOP) {
+ bits += 2;
+ } else {
+ bits++;
+ }
+
+ if (t->lcr_shadow & UART_LCR_PARITY)
+ bits++;
+
+ if (likely(t->baud))
+ udelay(DIV_ROUND_UP(syms * bits * 1000000, t->baud));
+}
+
+/* Flush desired FIFO. */
+static void tegra_fifo_reset(struct tegra_uart_port *t, u8 fcr_bits)
+{
+ unsigned char fcr = t->fcr_shadow;
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ uart_writeb(t, fcr, UART_FCR);
+#else
+ /*Hw issue: Resetting tx fifo with non-fifo
+ mode to avoid any extra character to be sent*/
+ fcr &= ~UART_FCR_ENABLE_FIFO;
+ uart_writeb(t, fcr, UART_FCR);
+ udelay(60);
+ fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ uart_writeb(t, fcr, UART_FCR);
+ fcr |= UART_FCR_ENABLE_FIFO;
+ uart_writeb(t, fcr, UART_FCR);
+#endif
+ uart_readb(t, UART_SCR); /* Dummy read to ensure the write is posted */
+ wait_sym_time(t, 1); /* Wait for the flush to propagate. */
+}
+
+static char do_decode_rx_error(struct tegra_uart_port *t, u8 lsr)
+{
+ char flag = TTY_NORMAL;
+
+ if (unlikely(lsr & UART_LSR_ANY)) {
+ if (lsr & UART_LSR_OE) {
+ /* Overrrun error */
+ flag |= TTY_OVERRUN;
+ t->uport.icount.overrun++;
+ dev_err(t->uport.dev, "Got overrun errors\n");
+ } else if (lsr & UART_LSR_PE) {
+ /* Parity error */
+ flag |= TTY_PARITY;
+ t->uport.icount.parity++;
+ dev_err(t->uport.dev, "Got Parity errors\n");
+ } else if (lsr & UART_LSR_FE) {
+ flag |= TTY_FRAME;
+ t->uport.icount.frame++;
+ dev_err(t->uport.dev, "Got frame errors\n");
+ } else if (lsr & UART_LSR_BI) {
+ dev_err(t->uport.dev, "Got Break\n");
+ t->uport.icount.brk++;
+ /* If FIFO read error without any data, reset Rx FIFO */
+ if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
+ tegra_fifo_reset(t, UART_FCR_CLEAR_RCVR);
+ }
+ }
+ return flag;
+}
+
+static void do_handle_rx_pio(struct tegra_uart_port *t)
+{
+ int count = 0;
+ do {
+ char flag = TTY_NORMAL;
+ unsigned char lsr = 0;
+ unsigned char ch;
+
+
+ lsr = uart_readb(t, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ break;
+
+ flag = do_decode_rx_error(t, lsr);
+ ch = uart_readb(t, UART_RX);
+ t->uport.icount.rx++;
+ count++;
+
+ if (!uart_handle_sysrq_char(&t->uport, c))
+ uart_insert_char(&t->uport, lsr, UART_LSR_OE, ch, flag);
+ } while (1);
+
+ dev_dbg(t->uport.dev, "PIO received %d bytes\n", count);
+
+ return;
+}
+
+static void do_handle_modem_signal(struct uart_port *u)
+{
+ unsigned char msr;
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ msr = uart_readb(t, UART_MSR);
+ if (msr & UART_MSR_CTS)
+ dev_dbg(u->dev, "CTS triggered\n");
+ if (msr & UART_MSR_DSR)
+ dev_dbg(u->dev, "DSR enabled\n");
+ if (msr & UART_MSR_DCD)
+ dev_dbg(u->dev, "CD enabled\n");
+ if (msr & UART_MSR_RI)
+ dev_dbg(u->dev, "RI enabled\n");
+ return;
+}
+
+static void do_handle_tx_pio(struct tegra_uart_port *t)
+{
+ struct circ_buf *xmit = &t->uport.state->xmit;
+
+ fill_tx_fifo(t, t->tx_bytes);
+
+ t->tx_in_progress = 0;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&t->uport);
+
+ tegra_start_next_tx(t);
+ return;
+}
+
+static void tegra_tx_dma_complete_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ struct circ_buf *xmit = &t->uport.state->xmit;
+ int count = req->bytes_transferred;
+ unsigned long flags;
+
+ dev_vdbg(t->uport.dev, "%s: %d\n", __func__, count);
+
+ /* Update xmit pointers without lock if dma aborted. */
+ if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED) {
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ t->tx_in_progress = 0;
+ return;
+ }
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ t->tx_in_progress = 0;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&t->uport);
+
+ tegra_start_next_tx(t);
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+static irqreturn_t tegra_uart_isr(int irq, void *data)
+{
+ struct tegra_uart_port *t = data;
+ struct uart_port *u = &t->uport;
+ unsigned char iir;
+ unsigned char ier;
+ bool is_rx_int = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+ t = container_of(u, struct tegra_uart_port, uport);
+ while (1) {
+ iir = uart_readb(t, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ if (likely(t->use_rx_dma) && is_rx_int) {
+ do_handle_rx_dma(t);
+
+ if (t->rx_in_progress) {
+ ier = t->ier_shadow;
+ ier |= (UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ }
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(u->dev, "tegra_uart_isr iir = 0x%x (%d)\n", iir,
+ (iir >> 1) & 0x7);
+ switch ((iir >> 1) & 0x7) {
+ case 0: /* Modem signal change interrupt */
+ do_handle_modem_signal(u);
+ break;
+ case 1: /* Transmit interrupt only triggered when using PIO */
+ t->ier_shadow &= ~UART_IER_THRI;
+ uart_writeb(t, t->ier_shadow, UART_IER);
+ do_handle_tx_pio(t);
+ break;
+ case 4: /* End of data */
+ case 6: /* Rx timeout */
+ case 2: /* Receive */
+ if (likely(t->use_rx_dma)) {
+ if (!is_rx_int) {
+ is_rx_int = true;
+ /* Disable interrups */
+ ier = t->ier_shadow;
+ ier |= UART_IER_RDI;
+ uart_writeb(t, ier, UART_IER);
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ }
+ } else {
+ do_handle_rx_pio(t);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+ tty_flip_buffer_push(u->state->port.tty);
+ spin_lock_irqsave(&u->lock, flags);
+ }
+ break;
+ case 3: /* Receive error */
+ /* FIXME how to handle this? Why do we get here */
+ do_decode_rx_error(t, uart_readb(t, UART_LSR));
+ break;
+ case 5: /* break nothing to handle */
+ case 7: /* break nothing to handle */
+ break;
+ }
+ }
+}
+
+static void tegra_stop_rx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ unsigned char ier;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ if (t->rts_active)
+ set_rts(t, false);
+
+ if (t->rx_in_progress) {
+ wait_sym_time(t, 1); /* wait a character interval */
+
+ ier = t->ier_shadow;
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ t->rx_in_progress = 0;
+
+ if (t->use_rx_dma && t->rx_dma)
+ tegra_dma_dequeue(t->rx_dma);
+ else
+ do_handle_rx_pio(t);
+
+ tty_flip_buffer_push(u->state->port.tty);
+ }
+
+ return;
+}
+
+static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
+{
+ unsigned long flags;
+ int retry = 0;
+ unsigned long char_time = DIV_ROUND_UP(10000000, t->baud);
+ unsigned long fifo_empty_time = t->uport.fifosize * char_time;
+ unsigned long wait_time;
+ unsigned char lsr;
+ unsigned char msr;
+ unsigned char mcr;
+
+ /* Disable interrupts */
+ uart_writeb(t, 0, UART_IER);
+
+ lsr = uart_readb(t, UART_LSR);
+ if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
+ msr = uart_readb(t, UART_MSR);
+ mcr = uart_readb(t, UART_MCR);
+ if ((mcr & UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
+ dev_err(t->uport.dev, "%s: Tx fifo not empty and "
+ "slave disabled CTS, Waiting for slave to"
+ " be ready\n", __func__);
+
+ /* Wait for Tx fifo to be empty */
+ while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
+ wait_time = min(fifo_empty_time, 100);
+ udelay(wait_time);
+ fifo_empty_time -= wait_time;
+ if (!fifo_empty_time) {
+ msr = uart_readb(t, UART_MSR);
+ mcr = uart_readb(t, UART_MCR);
+ if ((mcr & UART_MCR_CTS_EN) &&
+ (msr & UART_MSR_CTS))
+ dev_err(t->uport.dev, "%s: Slave is "
+ "still not ready!\n", __func__);
+ break;
+ }
+ lsr = uart_readb(t, UART_LSR);
+ }
+ }
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+
+ /* Reset the Rx and Tx FIFOs */
+ tegra_fifo_reset(t, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
+
+ t->baud = 0;
+ t->uart_state = TEGRA_UART_CLOSED;
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+
+ clk_disable(t->clk);
+}
+
+static void tegra_uart_free_rx_dma_buffer(struct tegra_uart_port *t)
+{
+ if (likely(t->rx_dma_req.dest_addr))
+ dma_free_coherent(t->uport.dev, t->rx_dma_req.size,
+ t->rx_dma_req.virt_addr, t->rx_dma_req.dest_addr);
+ t->rx_dma_req.dest_addr = 0;
+ t->rx_dma_req.virt_addr = NULL;
+}
+
+static void tegra_uart_free_rx_dma(struct tegra_uart_port *t)
+{
+ if (!t->use_rx_dma)
+ return;
+
+ tegra_dma_free_channel(t->rx_dma);
+ t->rx_dma = NULL;
+ t->use_rx_dma = false;
+}
+
+static int tegra_uart_hw_init(struct tegra_uart_port *t)
+{
+ unsigned char ier;
+
+ dev_vdbg(t->uport.dev, "+tegra_uart_hw_init\n");
+
+ t->fcr_shadow = 0;
+ t->mcr_shadow = 0;
+ t->lcr_shadow = 0;
+ t->ier_shadow = 0;
+ t->baud = 0;
+
+ clk_enable(t->clk);
+
+ /* Reset the UART controller to clear all previous status.*/
+ tegra_periph_reset_assert(t->clk);
+ udelay(100);
+ tegra_periph_reset_deassert(t->clk);
+ udelay(100);
+
+ t->rx_in_progress = 0;
+
+ /* Set the trigger level
+ *
+ * For PIO mode:
+ *
+ * For receive, this will interrupt the CPU after that many number of
+ * bytes are received, for the remaining bytes the receive timeout
+ * interrupt is received.
+ *
+ * Rx high watermark is set to 4.
+ *
+ * For transmit, if the trasnmit interrupt is enabled, this will
+ * interrupt the CPU when the number of entries in the FIFO reaches the
+ * low watermark.
+ *
+ * Tx low watermark is set to 8.
+ *
+ * For DMA mode:
+ *
+ * Set the Tx trigger to 4. This should match the DMA burst size that
+ * programmed in the DMA registers.
+ * */
+ t->fcr_shadow = UART_FCR_ENABLE_FIFO;
+ t->fcr_shadow |= UART_FCR_R_TRIG_01;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_8B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ if (t->use_rx_dma) {
+ /* initialize the UART for a simple default configuration
+ * so that the receive DMA buffer may be enqueued */
+ t->lcr_shadow = 3; /* no parity, stop, 8 data bits */
+ tegra_set_baudrate(t, 115200);
+ t->fcr_shadow |= UART_FCR_DMA_SELECT;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ if (tegra_start_dma_rx(t)) {
+ dev_err(t->uport.dev, "Rx DMA enqueue failed\n");
+ tegra_uart_free_rx_dma(t);
+ t->fcr_shadow &= ~UART_FCR_DMA_SELECT;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ }
+ }
+ else
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ t->rx_in_progress = 1;
+
+ /*
+ * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
+ *
+ * If using DMA mode, enable EORD instead of receive interrupt which
+ * will interrupt after the UART is done with the receive instead of
+ * the interrupt when the FIFO "threshold" is reached.
+ *
+ * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
+ * the DATA is sitting in the FIFO and couldn't be transferred to the
+ * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
+ * triggered when there is a pause of the incomming data stream for 4
+ * characters long.
+ *
+ * For pauses in the data which is not aligned to 4 bytes, we get
+ * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
+ * then the EORD.
+ *
+ * Don't get confused, believe in the magic of nvidia hw...:-)
+ */
+ ier = 0;
+ ier |= UART_IER_RLSI | UART_IER_RTOIE;
+ if (t->use_rx_dma)
+ ier |= UART_IER_EORD;
+ else
+ ier |= UART_IER_RDI;
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+
+ t->uart_state = TEGRA_UART_OPENED;
+ dev_vdbg(t->uport.dev, "-tegra_uart_hw_init\n");
+ return 0;
+}
+
+static int tegra_uart_init_rx_dma_buffer(struct tegra_uart_port *t)
+{
+ dma_addr_t rx_dma_phys;
+ void *rx_dma_virt;
+
+ t->rx_dma_req.size = UART_RX_DMA_BUFFER_SIZE;
+ rx_dma_virt = dma_alloc_coherent(t->uport.dev,
+ t->rx_dma_req.size, &rx_dma_phys, GFP_KERNEL);
+ if (!rx_dma_virt) {
+ dev_err(t->uport.dev, "DMA buffers allocate failed\n");
+ return -ENOMEM;
+ }
+ t->rx_dma_req.dest_addr = rx_dma_phys;
+ t->rx_dma_req.virt_addr = rx_dma_virt;
+
+ t->rx_dma_req.source_addr = (unsigned long)t->uport.mapbase;
+ t->rx_dma_req.source_wrap = 4;
+ t->rx_dma_req.dest_wrap = 0;
+ t->rx_dma_req.to_memory = 1;
+ t->rx_dma_req.source_bus_width = 8;
+ t->rx_dma_req.dest_bus_width = 32;
+ t->rx_dma_req.req_sel = dma_req_sel[t->uport.line];
+ t->rx_dma_req.complete = tegra_rx_dma_complete_callback;
+ t->rx_dma_req.threshold = tegra_rx_dma_threshold_callback;
+ t->rx_dma_req.dev = t;
+
+ return 0;
+}
+
+static int tegra_uart_init_rx_dma(struct tegra_uart_port *t)
+{
+ dma_addr_t rx_dma_phys;
+
+ t->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_CONTINUOUS,
+ "uart_rx_%d", t->uport.line);
+ if (!t->rx_dma) {
+ dev_err(t->uport.dev, "%s: failed to allocate RX DMA.\n",
+ __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int tegra_startup(struct uart_port *u)
+{
+ struct tegra_uart_port *t = container_of(u,
+ struct tegra_uart_port, uport);
+ int ret = 0;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ sprintf(t->port_name, "tegra_uart_%d", u->line);
+
+ t->use_tx_dma = false;
+ if (!TX_FORCE_PIO) {
+ t->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
+ "uart_tx_%d", u->line);
+ if (t->tx_dma)
+ t->use_tx_dma = true;
+ else
+ pr_err("%s: failed to allocate TX DMA.\n", __func__);
+ }
+ if (t->use_tx_dma) {
+ t->tx_dma_req.instance = u->line;
+ t->tx_dma_req.complete = tegra_tx_dma_complete_callback;
+ t->tx_dma_req.to_memory = 0;
+
+ t->tx_dma_req.dest_addr = (unsigned long)t->uport.mapbase;
+ t->tx_dma_req.dest_wrap = 4;
+ t->tx_dma_req.source_wrap = 0;
+ t->tx_dma_req.source_bus_width = 32;
+ t->tx_dma_req.dest_bus_width = 8;
+ t->tx_dma_req.req_sel = dma_req_sel[t->uport.line];
+ t->tx_dma_req.dev = t;
+ t->tx_dma_req.size = 0;
+ t->xmit_dma_addr = dma_map_single(t->uport.dev,
+ t->uport.state->xmit.buf, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ }
+ t->tx_in_progress = 0;
+
+ t->use_rx_dma = false;
+ if (!RX_FORCE_PIO && t->rx_dma_req.virt_addr) {
+ if (!tegra_uart_init_rx_dma(t))
+ t->use_rx_dma = true;
+ }
+
+ ret = tegra_uart_hw_init(t);
+ if (ret)
+ goto fail;
+
+ dev_dbg(u->dev, "Requesting IRQ %d\n", u->irq);
+ msleep(1);
+
+ ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
+ t->port_name, t);
+ if (ret) {
+ dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
+ goto fail;
+ }
+ dev_dbg(u->dev,"Started UART port %d\n", u->line);
+
+ return 0;
+fail:
+ dev_err(u->dev, "Tegra UART startup failed\n");
+ return ret;
+}
+
+static void tegra_shutdown(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(u->dev, "+tegra_shutdown\n");
+
+ tegra_uart_hw_deinit(t);
+
+ t->rx_in_progress = 0;
+ t->tx_in_progress = 0;
+
+ tegra_uart_free_rx_dma(t);
+ if (t->use_tx_dma) {
+ tegra_dma_free_channel(t->tx_dma);
+ t->tx_dma = NULL;
+ t->use_tx_dma = false;
+ dma_unmap_single(t->uport.dev, t->xmit_dma_addr, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ t->xmit_dma_addr = 0;
+ }
+
+ free_irq(u->irq, t);
+ dev_vdbg(u->dev, "-tegra_shutdown\n");
+}
+
+static void tegra_wake_peer(struct uart_port *u)
+{
+ struct tegra_uart_platform_data *pdata = u->dev->platform_data;
+
+ if (pdata && pdata->wake_peer)
+ pdata->wake_peer(u);
+}
+
+static unsigned int tegra_get_mctrl(struct uart_port *u)
+{
+ /* RI - Ring detector is active
+ * CD/DCD/CAR - Carrier detect is always active. For some reason
+ * linux has different names for carrier detect.
+ * DSR - Data Set ready is active as the hardware doesn't support it.
+ * Don't know if the linux support this yet?
+ * CTS - Clear to send. Always set to active, as the hardware handles
+ * CTS automatically.
+ * */
+ return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void set_rts(struct tegra_uart_port *t, bool active)
+{
+ unsigned char mcr;
+ mcr = t->mcr_shadow;
+ if (active)
+ mcr |= UART_MCR_RTS_EN;
+ else
+ mcr &= ~UART_MCR_RTS_EN;
+ if (mcr != t->mcr_shadow) {
+ uart_writeb(t, mcr, UART_MCR);
+ t->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void set_dtr(struct tegra_uart_port *t, bool active)
+{
+ unsigned char mcr;
+ mcr = t->mcr_shadow;
+ if (active)
+ mcr |= UART_MCR_DTR;
+ else
+ mcr &= ~UART_MCR_DTR;
+ if (mcr != t->mcr_shadow) {
+ uart_writeb(t, mcr, UART_MCR);
+ t->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void tegra_set_mctrl(struct uart_port *u, unsigned int mctrl)
+{
+ unsigned char mcr;
+ struct tegra_uart_port *t;
+
+ dev_dbg(u->dev, "tegra_set_mctrl called with %d\n", mctrl);
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ mcr = t->mcr_shadow;
+ if (mctrl & TIOCM_RTS) {
+ t->rts_active = true;
+ set_rts(t, true);
+ } else {
+ t->rts_active = false;
+ set_rts(t, false);
+ }
+
+ if (mctrl & TIOCM_DTR)
+ set_dtr(t, true);
+ else
+ set_dtr(t, false);
+ return;
+}
+
+static void tegra_break_ctl(struct uart_port *u, int break_ctl)
+{
+ struct tegra_uart_port *t;
+ unsigned char lcr;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ lcr = t->lcr_shadow;
+ if (break_ctl)
+ lcr |= UART_LCR_SBC;
+ else
+ lcr &= ~UART_LCR_SBC;
+ uart_writeb(t, lcr, UART_LCR);
+ t->lcr_shadow = lcr;
+}
+
+static int tegra_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static void tegra_release_port(struct uart_port *u)
+{
+
+}
+
+static unsigned int tegra_tx_empty(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ unsigned int ret = 0;
+ unsigned long flags;
+ unsigned char lsr;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(u->dev, "+tegra_tx_empty\n");
+
+ spin_lock_irqsave(&u->lock, flags);
+ if (!t->tx_in_progress) {
+ lsr = uart_readb(t, UART_LSR);
+ if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
+ ret = TIOCSER_TEMT;
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+
+ dev_vdbg(u->dev, "-tegra_tx_empty\n");
+ return ret;
+}
+
+static void tegra_stop_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ if (t->use_tx_dma)
+ tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req);
+
+ return;
+}
+
+static void tegra_enable_ms(struct uart_port *u)
+{
+}
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static int clk_div71_get_divider(unsigned long parent_rate,
+ unsigned long rate)
+{
+ s64 divider_u71 = parent_rate;
+ if (!rate)
+ return -EINVAL;
+
+ divider_u71 *= 2;
+ divider_u71 += rate - 1;
+ do_div(divider_u71, rate);
+
+ if ((divider_u71 - 2) < 0)
+ return 0;
+
+ if ((divider_u71 - 2) > 255)
+ return -EINVAL;
+
+ return divider_u71 - 2;
+}
+#endif
+
+static int clk_div16_get_divider(unsigned long parent_rate, unsigned long rate)
+{
+ s64 divider_u16;
+
+ divider_u16 = parent_rate;
+ if (!rate)
+ return -EINVAL;
+ divider_u16 += rate - 1;
+ do_div(divider_u16, rate);
+
+ if (divider_u16 > 0xFFFF)
+ return -EINVAL;
+
+ return divider_u16;
+}
+
+static unsigned long find_best_clock_source(struct tegra_uart_port *t,
+ unsigned long rate)
+{
+ struct uart_port *u = &t->uport;
+ struct tegra_uart_platform_data *pdata;
+ int i;
+ int divider;
+ unsigned long parent_rate;
+ unsigned long new_rate;
+ unsigned long err_rate;
+ unsigned int fin_err = rate;
+ unsigned long fin_rate = rate;
+ int final_index = -1;
+ int count;
+ unsigned long error_2perc;
+
+ pdata = u->dev->platform_data;
+ if (!pdata || !pdata->parent_clk_count)
+ return fin_rate;
+
+ error_2perc = (rate / 50);
+
+ for (count = 0; count < pdata->parent_clk_count; ++count) {
+ parent_rate = pdata->parent_clk_list[count].fixed_clk_rate;
+
+ if (parent_rate < rate)
+ continue;
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ divider = clk_div71_get_divider(parent_rate, rate);
+
+ /* Get the best divider around calculated value */
+ if (divider > 2) {
+ for (i = divider - 2; i < (divider + 2); ++i) {
+ new_rate = ((parent_rate << 1) + i + 1) /
+ (i + 2);
+ err_rate = abs(new_rate - rate);
+ if (err_rate < fin_err) {
+ final_index = count;
+ fin_err = err_rate;
+ fin_rate = new_rate;
+ if (fin_err < error_2perc)
+ break;
+ }
+ }
+ if (fin_err < error_2perc)
+ break;
+ }
+#endif
+ /* Get the divisor by uart controller dll/dlm */
+ divider = clk_div16_get_divider(parent_rate, rate);
+
+ /* Get the best divider around calculated value */
+ if (divider > 2) {
+ for (i = divider - 2; i < (divider + 2); ++i) {
+ new_rate = parent_rate/i;
+ err_rate = abs(new_rate - rate);
+ if (err_rate < fin_err) {
+ final_index = count;
+ fin_err = err_rate;
+ fin_rate = parent_rate;
+ if (fin_err < error_2perc)
+ break;
+ }
+ }
+ if (fin_err < error_2perc)
+ break;
+ }
+ }
+
+ if (final_index >= 0) {
+ dev_info(t->uport.dev, "Setting clk_src %s\n",
+ pdata->parent_clk_list[final_index].name);
+ clk_set_parent(t->clk,
+ pdata->parent_clk_list[final_index].parent_clk);
+ }
+ return fin_rate;
+}
+
+#define UART_CLOCK_ACCURACY 5
+static void tegra_set_baudrate(struct tegra_uart_port *t, unsigned int baud)
+{
+ unsigned long rate;
+ unsigned int divisor;
+ unsigned char lcr;
+ unsigned int baud_actual;
+ unsigned int baud_delta;
+ unsigned long best_rate;
+
+ if (t->baud == baud)
+ return;
+
+ rate = baud * 16;
+ best_rate = find_best_clock_source(t, rate);
+ clk_set_rate(t->clk, best_rate);
+
+ rate = clk_get_rate(t->clk);
+
+ divisor = rate;
+ do_div(divisor, 16);
+ divisor += baud/2;
+ do_div(divisor, baud);
+
+ /* The allowable baudrate error from desired baudrate is 5% */
+ baud_actual = divisor ? rate / (16 * divisor) : 0;
+ baud_delta = abs(baud_actual - baud);
+ if (WARN_ON(baud_delta * 20 > baud)) {
+ dev_err(t->uport.dev, "requested baud %u, actual %u\n",
+ baud, baud_actual);
+ }
+
+ lcr = t->lcr_shadow;
+ lcr |= UART_LCR_DLAB;
+ uart_writeb(t, lcr, UART_LCR);
+
+ uart_writel(t, divisor & 0xFF, UART_TX);
+ uart_writel(t, ((divisor >> 8) & 0xFF), UART_IER);
+
+ lcr &= ~UART_LCR_DLAB;
+ uart_writeb(t, lcr, UART_LCR);
+ uart_readb(t, UART_SCR); /* Dummy read to ensure the write is posted */
+
+ t->baud = baud;
+ wait_sym_time(t, 2); /* wait two character intervals at new rate */
+ dev_dbg(t->uport.dev, "Baud %u clock freq %lu and divisor of %u\n",
+ baud, rate, divisor);
+}
+
+static void tegra_set_termios(struct uart_port *u, struct ktermios *termios,
+ struct ktermios *oldtermios)
+{
+ struct tegra_uart_port *t;
+ unsigned int baud;
+ unsigned long flags;
+ unsigned int lcr;
+ unsigned int c_cflag = termios->c_cflag;
+ unsigned char mcr;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(t->uport.dev, "+tegra_set_termios\n");
+
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Changing configuration, it is safe to stop any rx now */
+ if (t->rts_active)
+ set_rts(t, false);
+
+ /* Parity */
+ lcr = t->lcr_shadow;
+ lcr &= ~UART_LCR_PARITY;
+ if (PARENB == (c_cflag & PARENB)) {
+ if (CMSPAR == (c_cflag & CMSPAR)) {
+ /* FIXME What is space parity? */
+ /* data |= SPACE_PARITY; */
+ } else if (c_cflag & PARODD) {
+ lcr |= UART_LCR_PARITY;
+ lcr &= ~UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ } else {
+ lcr |= UART_LCR_PARITY;
+ lcr |= UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ }
+ }
+
+ lcr &= ~UART_LCR_WLEN8;
+ switch (c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ /* Stop bits */
+ if (termios->c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+ else
+ lcr &= ~UART_LCR_STOP;
+
+ uart_writeb(t, lcr, UART_LCR);
+ t->lcr_shadow = lcr;
+
+ /* Baud rate. */
+ baud = uart_get_baud_rate(u, termios, oldtermios, 200, 4000000);
+ spin_unlock_irqrestore(&u->lock, flags);
+ tegra_set_baudrate(t, baud);
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Flow control */
+ if (termios->c_cflag & CRTSCTS) {
+ mcr = t->mcr_shadow;
+ mcr |= UART_MCR_CTS_EN;
+ mcr &= ~UART_MCR_RTS_EN;
+ t->mcr_shadow = mcr;
+ uart_writeb(t, mcr, UART_MCR);
+ t->use_cts_control = true;
+ /* if top layer has asked to set rts active then do so here */
+ if (t->rts_active)
+ set_rts(t, true);
+ } else {
+ mcr = t->mcr_shadow;
+ mcr &= ~UART_MCR_CTS_EN;
+ mcr &= ~UART_MCR_RTS_EN;
+ t->mcr_shadow = mcr;
+ uart_writeb(t, mcr, UART_MCR);
+ t->use_cts_control = false;
+ }
+
+ /* update the port timeout based on new settings */
+ uart_update_timeout(u, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+ dev_vdbg(t->uport.dev, "-tegra_set_termios\n");
+ return;
+}
+
+/*
+ * Flush any TX data submitted for DMA and PIO. Called when the
+ * TX circular buffer is reset.
+ */
+static void tegra_flush_buffer(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ dev_vdbg(u->dev, "%s called", __func__);
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ t->tx_bytes = 0;
+
+ if (t->use_tx_dma) {
+ tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req);
+ t->tx_dma_req.size = 0;
+ }
+ return;
+}
+
+
+static void tegra_pm(struct uart_port *u, unsigned int state,
+ unsigned int oldstate)
+{
+
+}
+
+static const char *tegra_type(struct uart_port *u)
+{
+ return 0;
+}
+
+static struct uart_ops tegra_uart_ops = {
+ .tx_empty = tegra_tx_empty,
+ .set_mctrl = tegra_set_mctrl,
+ .get_mctrl = tegra_get_mctrl,
+ .stop_tx = tegra_stop_tx,
+ .start_tx = tegra_start_tx,
+ .stop_rx = tegra_stop_rx,
+ .flush_buffer = tegra_flush_buffer,
+ .enable_ms = tegra_enable_ms,
+ .break_ctl = tegra_break_ctl,
+ .startup = tegra_startup,
+ .shutdown = tegra_shutdown,
+ .wake_peer = tegra_wake_peer,
+ .set_termios = tegra_set_termios,
+ .pm = tegra_pm,
+ .type = tegra_type,
+ .request_port = tegra_request_port,
+ .release_port = tegra_release_port,
+};
+
+static int tegra_uart_probe(struct platform_device *pdev);
+static int __devexit tegra_uart_remove(struct platform_device *pdev);
+static int tegra_uart_suspend(struct platform_device *pdev, pm_message_t state);
+static int tegra_uart_resume(struct platform_device *pdev);
+
+static struct platform_driver tegra_uart_platform_driver = {
+ .remove = tegra_uart_remove,
+ .probe = tegra_uart_probe,
+ .suspend = tegra_uart_suspend,
+ .resume = tegra_uart_resume,
+ .driver = {
+ .name = "tegra_uart"
+ }
+};
+
+static struct uart_driver tegra_uart_driver =
+{
+ .owner = THIS_MODULE,
+ .driver_name = "tegra_uart",
+ .dev_name = "ttyHS",
+ .cons = 0,
+ .nr = 5,
+};
+
+static int tegra_uart_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ dev_dbg(t->uport.dev, "tegra_uart_suspend called\n");
+
+ /* enable clock before calling suspend so that controller
+ register can be accessible */
+ if (t->uart_state == TEGRA_UART_CLOCK_OFF) {
+ clk_enable(t->clk);
+ t->uart_state = TEGRA_UART_OPENED;
+ }
+
+ uart_suspend_port(&tegra_uart_driver, u);
+ t->uart_state = TEGRA_UART_SUSPEND;
+
+ return 0;
+}
+
+static int tegra_uart_resume(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ dev_dbg(t->uport.dev, "tegra_uart_resume called\n");
+
+ if (t->uart_state == TEGRA_UART_SUSPEND) {
+ uart_resume_port(&tegra_uart_driver, u);
+ }
+ return 0;
+}
+
+
+
+static int __devexit tegra_uart_remove(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ uart_remove_one_port(&tegra_uart_driver, u);
+
+ tegra_uart_free_rx_dma_buffer(t);
+
+ platform_set_drvdata(pdev, NULL);
+
+ pr_info("Unregistered UART port %s%d\n",
+ tegra_uart_driver.dev_name, u->line);
+ kfree(t);
+ return 0;
+}
+
+static int tegra_uart_probe(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t;
+ struct uart_port *u;
+ struct resource *resource;
+ int ret;
+ char name[64];
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr) {
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+ return -ENODEV;
+ }
+
+ t = kzalloc(sizeof(struct tegra_uart_port), GFP_KERNEL);
+ if (!t) {
+ pr_err("%s: Failed to allocate memory\n", __func__);
+ return -ENOMEM;
+ }
+ u = &t->uport;
+ u->dev = &pdev->dev;
+ platform_set_drvdata(pdev, u);
+ u->line = pdev->id;
+ u->ops = &tegra_uart_ops;
+ u->type = ~PORT_UNKNOWN;
+ u->fifosize = 32;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource)) {
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ u->mapbase = resource->start;
+ u->membase = IO_ADDRESS(u->mapbase);
+ if (unlikely(!u->membase)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ u->irq = platform_get_irq(pdev, 0);
+ if (unlikely(u->irq < 0)) {
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ u->regshift = 2;
+
+ t->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR_OR_NULL(t->clk)) {
+ dev_err(&pdev->dev, "Couldn't get the clock\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = uart_add_one_port(&tegra_uart_driver, u);
+ if (ret) {
+ pr_err("%s: Failed(%d) to add uart port %s%d\n",
+ __func__, ret, tegra_uart_driver.dev_name, u->line);
+ goto fail;
+ }
+
+ snprintf(name, sizeof(name), "tegra_hsuart_%d", u->line);
+ pr_info("Registered UART port %s%d\n",
+ tegra_uart_driver.dev_name, u->line);
+ t->uart_state = TEGRA_UART_CLOSED;
+
+ if (!RX_FORCE_PIO) {
+ ret = tegra_uart_init_rx_dma_buffer(t);
+ if (ret < 0) {
+ pr_err("%s: Failed(%d) to allocate rx dma buffer "
+ "%s%d\n", __func__, ret,
+ tegra_uart_driver.dev_name, u->line);
+ goto rx_dma_buff_fail;
+ }
+ }
+ return ret;
+
+rx_dma_buff_fail:
+ uart_remove_one_port(&tegra_uart_driver, u);
+fail:
+ if (t->clk)
+ clk_put(t->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(t);
+ return ret;
+}
+
+/* Switch off the clock of the uart controller. */
+void tegra_uart_request_clock_off(struct uart_port *uport)
+{
+ unsigned long flags;
+ struct tegra_uart_port *t;
+ bool is_clk_disable = false;
+
+ if (IS_ERR_OR_NULL(uport))
+ BUG();
+
+ dev_vdbg(uport->dev, "tegra_uart_request_clock_off");
+
+ t = container_of(uport, struct tegra_uart_port, uport);
+ spin_lock_irqsave(&uport->lock, flags);
+ if (t->uart_state == TEGRA_UART_OPENED) {
+ is_clk_disable = true;
+ t->uart_state = TEGRA_UART_CLOCK_OFF;
+ }
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ if (is_clk_disable)
+ clk_disable(t->clk);
+
+ return;
+}
+
+/* Switch on the clock of the uart controller */
+void tegra_uart_request_clock_on(struct uart_port *uport)
+{
+ unsigned long flags;
+ struct tegra_uart_port *t;
+ bool is_clk_enable = false;
+
+ if (IS_ERR_OR_NULL(uport))
+ BUG();
+
+ t = container_of(uport, struct tegra_uart_port, uport);
+ spin_lock_irqsave(&uport->lock, flags);
+ if (t->uart_state == TEGRA_UART_CLOCK_OFF) {
+ is_clk_enable = true;
+ t->uart_state = TEGRA_UART_OPENED;
+ }
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ if (is_clk_enable)
+ clk_enable(t->clk);
+
+ return;
+}
+
+/* Set the modem control signals state of uart controller. */
+void tegra_uart_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+ unsigned long flags;
+ struct tegra_uart_port *t;
+
+ t = container_of(uport, struct tegra_uart_port, uport);
+ if (t->uart_state != TEGRA_UART_OPENED) {
+ dev_err(t->uport.dev, "Uart is in invalid state\n");
+ return;
+ }
+
+ spin_lock_irqsave(&uport->lock, flags);
+ if (mctrl & TIOCM_RTS) {
+ t->rts_active = true;
+ set_rts(t, true);
+ } else {
+ t->rts_active = false;
+ set_rts(t, false);
+ }
+
+ if (mctrl & TIOCM_DTR)
+ set_dtr(t, true);
+ else
+ set_dtr(t, false);
+ spin_unlock_irqrestore(&uport->lock, flags);
+ return;
+}
+
+/* Return the status of the transmit fifo whether empty or not.
+ * Return 0 if tx fifo is not empty.
+ * Return TIOCSER_TEMT if tx fifo is empty.
+ */
+int tegra_uart_is_tx_empty(struct uart_port *uport)
+{
+ return tegra_tx_empty(uport);
+}
+
+static int __init tegra_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&tegra_uart_driver);
+ if (unlikely(ret)) {
+ pr_err("Could not register %s driver\n",
+ tegra_uart_driver.driver_name);
+ return ret;
+ }
+
+ ret = platform_driver_register(&tegra_uart_platform_driver);
+ if (unlikely(ret)) {
+ pr_err("Could not register the UART platfrom "
+ "driver\n");
+ uart_unregister_driver(&tegra_uart_driver);
+ return ret;
+ }
+
+ pr_info("Initialized tegra uart driver\n");
+ return 0;
+}
+
+static void __exit tegra_uart_exit(void)
+{
+ pr_info("Unloading tegra uart driver\n");
+ platform_driver_unregister(&tegra_uart_platform_driver);
+ uart_unregister_driver(&tegra_uart_driver);
+}
+
+module_init(tegra_uart_init);
+module_exit(tegra_uart_exit);
+MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8faa23cd74f1..a9193e3de7e1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -155,7 +155,6 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
wb->urb->transfer_dma = wb->dmah;
wb->urb->transfer_buffer_length = wb->len;
wb->urb->dev = acm->dev;
-
rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
if (rc < 0) {
dev_err(&acm->data->dev,
@@ -183,6 +182,15 @@ static int acm_write_start(struct acm *acm, int wbn)
acm->susp_count);
usb_autopm_get_interface_async(acm->control);
if (acm->susp_count) {
+#ifdef CONFIG_PM
+ printk("%s buffer urb\n", __func__);
+ acm->transmitting++;
+ wb->urb->transfer_buffer = wb->buf;
+ wb->urb->transfer_dma = wb->dmah;
+ wb->urb->transfer_buffer_length = wb->len;
+ wb->urb->dev = acm->dev;
+ usb_anchor_urb(wb->urb, &acm->deferred);
+#endif
if (!acm->delayed_wb)
acm->delayed_wb = wb;
else
@@ -476,7 +484,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
if (usb_autopm_get_interface(acm->control) < 0)
goto early_bail;
else
- acm->control->needs_remote_wakeup = 1;
+ acm->control->needs_remote_wakeup = 0;
mutex_lock(&acm->mutex);
if (acm->port.count++) {
@@ -865,8 +873,12 @@ static int acm_probe(struct usb_interface *intf,
quirks = (unsigned long)id->driver_info;
num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
+ /* not a real CDC ACM device */
+ if (quirks & NOT_REAL_ACM)
+ return -ENODEV;
+
/* handle quirks deadly to normal probing*/
- if (quirks == NO_UNION_NORMAL) {
+ if (quirks & NO_UNION_NORMAL) {
data_interface = usb_ifnum_to_if(usb_dev, 1);
control_interface = usb_ifnum_to_if(usb_dev, 0);
goto skip_normal_probe;
@@ -1076,6 +1088,7 @@ made_compressed_probe:
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
INIT_WORK(&acm->work, acm_softint);
+ init_usb_anchor(&acm->deferred);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
@@ -1083,6 +1096,8 @@ made_compressed_probe:
acm->is_int_ep = usb_endpoint_xfer_int(epread);
if (acm->is_int_ep)
acm->bInterval = epread->bInterval;
+ if (quirks & NO_HANGUP_IN_RESET_RESUME)
+ acm->no_hangup_in_reset_resume = 1;
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
@@ -1343,6 +1358,7 @@ static int acm_resume(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct acm_wb *wb;
int rv = 0;
+ struct urb *res;
int cnt;
spin_lock_irq(&acm->read_lock);
@@ -1353,10 +1369,21 @@ static int acm_resume(struct usb_interface *intf)
if (cnt)
return 0;
+
mutex_lock(&acm->mutex);
+
+#ifdef CONFIG_PM
+ while ((res = usb_get_from_anchor(&acm->deferred))) {
+ printk("%s process buffered request \n", __func__);
+ rv = usb_submit_urb(res, GFP_ATOMIC);
+ if (rv < 0) {
+ dbg("usb_submit_urb(pending request) failed: %d", rv);
+ }
+ }
+#endif
+
if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
-
spin_lock_irq(&acm->write_lock);
if (acm->delayed_wb) {
wb = acm->delayed_wb;
@@ -1367,6 +1394,7 @@ static int acm_resume(struct usb_interface *intf)
spin_unlock_irq(&acm->write_lock);
}
+
/*
* delayed error checking because we must
* do the write path at all cost
@@ -1391,7 +1419,8 @@ static int acm_reset_resume(struct usb_interface *intf)
if (acm->port.count) {
tty = tty_port_tty_get(&acm->port);
if (tty) {
- tty_hangup(tty);
+ if (!acm->no_hangup_in_reset_resume)
+ tty_hangup(tty);
tty_kref_put(tty);
}
}
@@ -1481,6 +1510,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0x1519, 0x0020),
+ .driver_info = NO_UNION_NORMAL | NO_HANGUP_IN_RESET_RESUME, /* has no union descriptor */
+ },
/* Nokia S60 phones expose two ACM channels. The first is
* a modem and is picked up by the standard AT-command
@@ -1561,6 +1593,16 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = NO_DATA_INTERFACE,
},
+ /* Exclude XMM6260 boot rom (not running modem software yet) */
+ { USB_DEVICE(0x058b, 0x0041),
+ .driver_info = NOT_REAL_ACM,
+ },
+
+ /* Icera 450 */
+ { USB_DEVICE(0x1983, 0x0321),
+ .driver_info = NO_HANGUP_IN_RESET_RESUME,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ca7937f26e27..ec59fda787fe 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -115,8 +115,10 @@ struct acm {
unsigned int is_int_ep:1; /* interrupt endpoints contrary to spec used */
unsigned int throttled:1; /* actually throttled */
unsigned int throttle_req:1; /* throttle requested */
+ unsigned int no_hangup_in_reset_resume:1; /* do not call tty_hangup in acm_reset_resume */
u8 bInterval;
struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
+ struct usb_anchor deferred;
};
#define CDC_DATA_INTERFACE_TYPE 0x0a
@@ -127,3 +129,5 @@ struct acm {
#define NO_CAP_LINE 4
#define NOT_A_MODEM 8
#define NO_DATA_INTERFACE 16
+#define NOT_REAL_ACM 32
+#define NO_HANGUP_IN_RESET_RESUME 64
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 6f5049156e17..f92c3df69195 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -141,7 +141,7 @@ config USB_ATMEL_USBA
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC || ARCH_MXC || ARCH_TEGRA
select USB_GADGET_DUALSPEED
select USB_FSL_MPH_DR_OF if OF
help
@@ -823,6 +823,14 @@ config USB_G_PRINTER
For more information, see Documentation/usb/gadget_printer.txt
which includes sample code for accessing the device file.
+config USB_G_ANDROID
+ boolean "Android Gadget"
+ depends on SWITCH
+ help
+ The Android gadget driver supports multiple USB functions.
+ The functions can be configured via a board file and may be
+ enabled and disabled dynamically.
+
config USB_CDC_COMPOSITE
tristate "CDC Composite Device (Ethernet and ACM)"
depends on NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 9ba725af4a08..581a5ae7337e 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -1,6 +1,9 @@
#
# USB peripheral controller drivers
#
+GCOV_PROFILE_fsl_tegra_udc.o := y
+GCOV_PROFILE_fsl_udc_core.o := y
+
ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
obj-$(CONFIG_USB_GADGET) += udc-core.o
@@ -19,6 +22,7 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
+fsl_usb2_udc-$(CONFIG_ARCH_TEGRA) += fsl_tegra_udc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
@@ -51,6 +55,7 @@ g_dbgp-y := dbgp.o
g_nokia-y := nokia.o
g_webcam-y := webcam.o
g_ncm-y := ncm.o
+g_android-y := android.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -69,3 +74,4 @@ obj-$(CONFIG_USB_G_MULTI) += g_multi.o
obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
obj-$(CONFIG_USB_G_NCM) += g_ncm.o
+obj-$(CONFIG_USB_G_ANDROID) += g_android.o
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
new file mode 100644
index 000000000000..034672ff5683
--- /dev/null
+++ b/drivers/usb/gadget/android.c
@@ -0,0 +1,1165 @@
+/*
+ * Gadget Driver for Android
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "composite.c"
+
+#include "f_mass_storage.c"
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_adb.c"
+#include "f_mtp.c"
+#include "f_accessory.c"
+#define USB_ETH_RNDIS y
+#include "f_rndis.c"
+#include "rndis.c"
+#include "u_ether.c"
+
+MODULE_AUTHOR("Mike Lockwood");
+MODULE_DESCRIPTION("Android Composite USB Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static const char longname[] = "Gadget Android";
+
+/* Default vendor and product IDs, overridden by userspace */
+#define VENDOR_ID 0x18D1
+#define PRODUCT_ID 0x0001
+
+struct android_usb_function {
+ char *name;
+ void *config;
+
+ struct device *dev;
+ char *dev_name;
+ struct device_attribute **attributes;
+
+ /* for android_dev.enabled_functions */
+ struct list_head enabled_list;
+
+ /* Optional: initialization during gadget bind */
+ int (*init)(struct android_usb_function *, struct usb_composite_dev *);
+ /* Optional: cleanup during gadget unbind */
+ void (*cleanup)(struct android_usb_function *);
+
+ int (*bind_config)(struct android_usb_function *, struct usb_configuration *);
+
+ /* Optional: called when the configuration is removed */
+ void (*unbind_config)(struct android_usb_function *, struct usb_configuration *);
+ /* Optional: handle ctrl requests before the device is configured */
+ int (*ctrlrequest)(struct android_usb_function *,
+ struct usb_composite_dev *,
+ const struct usb_ctrlrequest *);
+};
+
+struct android_dev {
+ struct android_usb_function **functions;
+ struct list_head enabled_functions;
+ struct usb_composite_dev *cdev;
+ struct device *dev;
+
+ bool enabled;
+ bool connected;
+ bool sw_connected;
+ struct work_struct work;
+};
+
+static struct class *android_class;
+static struct android_dev *_android_dev;
+static int android_bind_config(struct usb_configuration *c);
+static void android_unbind_config(struct usb_configuration *c);
+
+/* string IDs are assigned dynamically */
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_SERIAL_IDX 2
+
+static char manufacturer_string[256];
+static char product_string[256];
+static char serial_string[256];
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer_string,
+ [STRING_PRODUCT_IDX].s = product_string,
+ [STRING_SERIAL_IDX].s = serial_string,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof(device_desc),
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+ .idVendor = __constant_cpu_to_le16(VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(PRODUCT_ID),
+ .bcdDevice = __constant_cpu_to_le16(0xffff),
+ .bNumConfigurations = 1,
+};
+
+static struct usb_configuration android_config_driver = {
+ .label = "android",
+ .unbind = android_unbind_config,
+ .bConfigurationValue = 1,
+ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+ .bMaxPower = 0xFA, /* 500ma */
+};
+
+static void android_work(struct work_struct *data)
+{
+ struct android_dev *dev = container_of(data, struct android_dev, work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+ char *connected[2] = { "USB_STATE=CONNECTED", NULL };
+ char *configured[2] = { "USB_STATE=CONFIGURED", NULL };
+ char **uevent_envp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (cdev->config)
+ uevent_envp = configured;
+ else if (dev->connected != dev->sw_connected)
+ uevent_envp = dev->connected ? connected : disconnected;
+ dev->sw_connected = dev->connected;
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ if (uevent_envp) {
+ kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp);
+ pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]);
+ } else {
+ pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+ dev->connected, dev->sw_connected, cdev->config);
+ }
+}
+
+
+/*-------------------------------------------------------------------------*/
+/* Supported functions initialization */
+
+static int adb_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+ return adb_setup();
+}
+
+static void adb_function_cleanup(struct android_usb_function *f)
+{
+ adb_cleanup();
+}
+
+static int adb_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
+{
+ return adb_bind_config(c);
+}
+
+static struct android_usb_function adb_function = {
+ .name = "adb",
+ .init = adb_function_init,
+ .cleanup = adb_function_cleanup,
+ .bind_config = adb_function_bind_config,
+};
+
+
+#define MAX_ACM_INSTANCES 4
+struct acm_function_config {
+ int instances;
+};
+
+static int acm_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return gserial_setup(cdev->gadget, MAX_ACM_INSTANCES);
+}
+
+static void acm_function_cleanup(struct android_usb_function *f)
+{
+ gserial_cleanup();
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int acm_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
+{
+ int i;
+ int ret = 0;
+ struct acm_function_config *config = f->config;
+
+ for (i = 0; i < config->instances; i++) {
+ ret = acm_bind_config(c, i);
+ if (ret) {
+ pr_err("Could not bind acm%u config\n", i);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t acm_instances_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct acm_function_config *config = f->config;
+ return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t acm_instances_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct acm_function_config *config = f->config;
+ int value;
+
+ sscanf(buf, "%d", &value);
+ if (value > MAX_ACM_INSTANCES)
+ value = MAX_ACM_INSTANCES;
+ config->instances = value;
+ return size;
+}
+
+static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show, acm_instances_store);
+static struct device_attribute *acm_function_attributes[] = { &dev_attr_instances, NULL };
+
+static struct android_usb_function acm_function = {
+ .name = "acm",
+ .init = acm_function_init,
+ .cleanup = acm_function_cleanup,
+ .bind_config = acm_function_bind_config,
+ .attributes = acm_function_attributes,
+};
+
+
+static int mtp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+ return mtp_setup();
+}
+
+static void mtp_function_cleanup(struct android_usb_function *f)
+{
+ mtp_cleanup();
+}
+
+static int mtp_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
+{
+ return mtp_bind_config(c, false);
+}
+
+static int ptp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+ /* nothing to do - initialization is handled by mtp_function_init */
+ return 0;
+}
+
+static void ptp_function_cleanup(struct android_usb_function *f)
+{
+ /* nothing to do - cleanup is handled by mtp_function_cleanup */
+}
+
+static int ptp_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
+{
+ return mtp_bind_config(c, true);
+}
+
+static int mtp_function_ctrlrequest(struct android_usb_function *f,
+ struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *c)
+{
+ return mtp_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function mtp_function = {
+ .name = "mtp",
+ .init = mtp_function_init,
+ .cleanup = mtp_function_cleanup,
+ .bind_config = mtp_function_bind_config,
+ .ctrlrequest = mtp_function_ctrlrequest,
+};
+
+/* PTP function is same as MTP with slightly different interface descriptor */
+static struct android_usb_function ptp_function = {
+ .name = "ptp",
+ .init = ptp_function_init,
+ .cleanup = ptp_function_cleanup,
+ .bind_config = ptp_function_bind_config,
+};
+
+
+struct rndis_function_config {
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ char manufacturer[256];
+ bool wceis;
+};
+
+static int rndis_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+ return 0;
+}
+
+static void rndis_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int rndis_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int ret;
+ struct rndis_function_config *rndis = f->config;
+
+ if (!rndis) {
+ pr_err("%s: rndis_pdata\n", __func__);
+ return -1;
+ }
+
+ pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+ ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis");
+ if (ret) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return ret;
+ }
+
+ if (rndis->wceis) {
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ rndis_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_control_intf.bInterfaceSubClass = 0x01;
+ rndis_control_intf.bInterfaceProtocol = 0x03;
+ }
+
+ return rndis_bind_config(c, rndis->ethaddr, rndis->vendorID,
+ rndis->manufacturer);
+}
+
+static void rndis_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ gether_cleanup();
+}
+
+static ssize_t rndis_manufacturer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t rndis_manufacturer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+
+ if (size >= sizeof(config->manufacturer))
+ return -EINVAL;
+ if (sscanf(buf, "%s", config->manufacturer) == 1)
+ return size;
+ return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, rndis_manufacturer_show,
+ rndis_manufacturer_store);
+
+static ssize_t rndis_wceis_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%d\n", config->wceis);
+}
+
+static ssize_t rndis_wceis_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%d", &value) == 1) {
+ config->wceis = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, rndis_wceis_show,
+ rndis_wceis_store);
+
+static ssize_t rndis_ethaddr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *rndis = f->config;
+ return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+}
+
+static ssize_t rndis_ethaddr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *rndis = f->config;
+
+ if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (int *)&rndis->ethaddr[0], (int *)&rndis->ethaddr[1],
+ (int *)&rndis->ethaddr[2], (int *)&rndis->ethaddr[3],
+ (int *)&rndis->ethaddr[4], (int *)&rndis->ethaddr[5]) == 6)
+ return size;
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, rndis_ethaddr_show,
+ rndis_ethaddr_store);
+
+static ssize_t rndis_vendorID_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t rndis_vendorID_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%04x", &value) == 1) {
+ config->vendorID = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
+ rndis_vendorID_store);
+
+static struct device_attribute *rndis_function_attributes[] = {
+ &dev_attr_manufacturer,
+ &dev_attr_wceis,
+ &dev_attr_ethaddr,
+ &dev_attr_vendorID,
+ NULL
+};
+
+static struct android_usb_function rndis_function = {
+ .name = "rndis",
+ .init = rndis_function_init,
+ .cleanup = rndis_function_cleanup,
+ .bind_config = rndis_function_bind_config,
+ .unbind_config = rndis_function_unbind_config,
+ .attributes = rndis_function_attributes,
+};
+
+
+struct mass_storage_function_config {
+ struct fsg_config fsg;
+ struct fsg_common *common;
+};
+
+static int mass_storage_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct mass_storage_function_config *config;
+ struct fsg_common *common;
+ int err;
+
+ config = kzalloc(sizeof(struct mass_storage_function_config),
+ GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ config->fsg.nluns = 1;
+ config->fsg.luns[0].removable = 1;
+
+ common = fsg_common_init(NULL, cdev, &config->fsg);
+ if (IS_ERR(common)) {
+ kfree(config);
+ return PTR_ERR(common);
+ }
+
+ err = sysfs_create_link(&f->dev->kobj,
+ &common->luns[0].dev.kobj,
+ "lun");
+ if (err) {
+ kfree(config);
+ return err;
+ }
+
+ config->common = common;
+ f->config = config;
+ return 0;
+}
+
+static void mass_storage_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int mass_storage_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct mass_storage_function_config *config = f->config;
+ return fsg_bind_config(c->cdev, c, config->common);
+}
+
+static ssize_t mass_storage_inquiry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct mass_storage_function_config *config = f->config;
+ return sprintf(buf, "%s\n", config->common->inquiry_string);
+}
+
+static ssize_t mass_storage_inquiry_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct mass_storage_function_config *config = f->config;
+ if (size >= sizeof(config->common->inquiry_string))
+ return -EINVAL;
+ if (sscanf(buf, "%s", config->common->inquiry_string) != 1)
+ return -EINVAL;
+ return size;
+}
+
+static DEVICE_ATTR(inquiry_string, S_IRUGO | S_IWUSR,
+ mass_storage_inquiry_show,
+ mass_storage_inquiry_store);
+
+static struct device_attribute *mass_storage_function_attributes[] = {
+ &dev_attr_inquiry_string,
+ NULL
+};
+
+static struct android_usb_function mass_storage_function = {
+ .name = "mass_storage",
+ .init = mass_storage_function_init,
+ .cleanup = mass_storage_function_cleanup,
+ .bind_config = mass_storage_function_bind_config,
+ .attributes = mass_storage_function_attributes,
+};
+
+
+static int accessory_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return acc_setup();
+}
+
+static void accessory_function_cleanup(struct android_usb_function *f)
+{
+ acc_cleanup();
+}
+
+static int accessory_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return acc_bind_config(c);
+}
+
+static int accessory_function_ctrlrequest(struct android_usb_function *f,
+ struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *c)
+{
+ return acc_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function accessory_function = {
+ .name = "accessory",
+ .init = accessory_function_init,
+ .cleanup = accessory_function_cleanup,
+ .bind_config = accessory_function_bind_config,
+ .ctrlrequest = accessory_function_ctrlrequest,
+};
+
+
+static struct android_usb_function *supported_functions[] = {
+ &adb_function,
+ &acm_function,
+ &mtp_function,
+ &ptp_function,
+ &rndis_function,
+ &mass_storage_function,
+ &accessory_function,
+ NULL
+};
+
+
+static int android_init_functions(struct android_usb_function **functions,
+ struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+ struct android_usb_function *f;
+ struct device_attribute **attrs;
+ struct device_attribute *attr;
+ int err;
+ int index = 0;
+
+ for (; (f = *functions++); index++) {
+ f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+ f->dev = device_create(android_class, dev->dev,
+ MKDEV(0, index), f, f->dev_name);
+ if (IS_ERR(f->dev)) {
+ pr_err("%s: Failed to create dev %s", __func__,
+ f->dev_name);
+ err = PTR_ERR(f->dev);
+ goto err_create;
+ }
+
+ if (f->init) {
+ err = f->init(f, cdev);
+ if (err) {
+ pr_err("%s: Failed to init %s", __func__,
+ f->name);
+ goto err_out;
+ }
+ }
+
+ attrs = f->attributes;
+ if (attrs) {
+ while ((attr = *attrs++) && !err)
+ err = device_create_file(f->dev, attr);
+ }
+ if (err) {
+ pr_err("%s: Failed to create function %s attributes",
+ __func__, f->name);
+ goto err_out;
+ }
+ }
+ return 0;
+
+err_out:
+ device_destroy(android_class, f->dev->devt);
+err_create:
+ kfree(f->dev_name);
+ return err;
+}
+
+static void android_cleanup_functions(struct android_usb_function **functions)
+{
+ struct android_usb_function *f;
+
+ while (*functions) {
+ f = *functions++;
+
+ if (f->dev) {
+ device_destroy(android_class, f->dev->devt);
+ kfree(f->dev_name);
+ }
+
+ if (f->cleanup)
+ f->cleanup(f);
+ }
+}
+
+static int
+android_bind_enabled_functions(struct android_dev *dev,
+ struct usb_configuration *c)
+{
+ struct android_usb_function *f;
+ int ret;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ ret = f->bind_config(f, c);
+ if (ret) {
+ pr_err("%s: %s failed", __func__, f->name);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+android_unbind_enabled_functions(struct android_dev *dev,
+ struct usb_configuration *c)
+{
+ struct android_usb_function *f;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->unbind_config)
+ f->unbind_config(f, c);
+ }
+}
+
+static int android_enable_function(struct android_dev *dev, char *name)
+{
+ struct android_usb_function **functions = dev->functions;
+ struct android_usb_function *f;
+ while ((f = *functions++)) {
+ if (!strcmp(name, f->name)) {
+ list_add_tail(&f->enabled_list, &dev->enabled_functions);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+/* /sys/class/android_usb/android%d/ interface */
+
+static ssize_t
+functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct android_usb_function *f;
+ char *buff = buf;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list)
+ buff += sprintf(buff, "%s,", f->name);
+ if (buff != buf)
+ *(buff-1) = '\n';
+ return buff - buf;
+}
+
+static ssize_t
+functions_store(struct device *pdev, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ char *name;
+ char buf[256], *b;
+ int err;
+
+ INIT_LIST_HEAD(&dev->enabled_functions);
+
+ strncpy(buf, buff, sizeof(buf));
+ b = strim(buf);
+
+ while (b) {
+ name = strsep(&b, ",");
+ if (name) {
+ err = android_enable_function(dev, name);
+ if (err)
+ pr_err("android_usb: Cannot enable '%s'", name);
+ }
+ }
+
+ return size;
+}
+
+static ssize_t enable_show(struct device *pdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", dev->enabled);
+}
+
+static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct usb_composite_dev *cdev = dev->cdev;
+ int enabled = 0;
+
+ sscanf(buff, "%d", &enabled);
+ if (enabled && !dev->enabled) {
+ cdev->next_string_id = 0;
+ /* update values in composite driver's copy of device descriptor */
+ cdev->desc.idVendor = device_desc.idVendor;
+ cdev->desc.idProduct = device_desc.idProduct;
+ cdev->desc.bcdDevice = device_desc.bcdDevice;
+ cdev->desc.bDeviceClass = device_desc.bDeviceClass;
+ cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
+ cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
+ usb_add_config(cdev, &android_config_driver,
+ android_bind_config);
+ usb_gadget_connect(cdev->gadget);
+ dev->enabled = true;
+ } else if (!enabled && dev->enabled) {
+ usb_gadget_disconnect(cdev->gadget);
+ /* Cancel pending control requests */
+ usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+ usb_remove_config(cdev, &android_config_driver);
+ dev->enabled = false;
+ } else {
+ pr_err("android_usb: already %s\n",
+ dev->enabled ? "enabled" : "disabled");
+ }
+ return size;
+}
+
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct usb_composite_dev *cdev = dev->cdev;
+ char *state = "DISCONNECTED";
+ unsigned long flags;
+
+ if (!cdev)
+ goto out;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (cdev->config)
+ state = "CONFIGURED";
+ else if (dev->connected)
+ state = "CONNECTED";
+ spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+ return sprintf(buf, "%s\n", state);
+}
+
+#define DESCRIPTOR_ATTR(field, format_string) \
+static ssize_t \
+field ## _show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, format_string, device_desc.field); \
+} \
+static ssize_t \
+field ## _store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ int value; \
+ if (sscanf(buf, format_string, &value) == 1) { \
+ device_desc.field = value; \
+ return size; \
+ } \
+ return -1; \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define DESCRIPTOR_STRING_ATTR(field, buffer) \
+static ssize_t \
+field ## _show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%s", buffer); \
+} \
+static ssize_t \
+field ## _store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ if (size >= sizeof(buffer)) return -EINVAL; \
+ if (sscanf(buf, "%s", buffer) == 1) { \
+ return size; \
+ } \
+ return -1; \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+
+DESCRIPTOR_ATTR(idVendor, "%04x\n")
+DESCRIPTOR_ATTR(idProduct, "%04x\n")
+DESCRIPTOR_ATTR(bcdDevice, "%04x\n")
+DESCRIPTOR_ATTR(bDeviceClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceSubClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceProtocol, "%d\n")
+DESCRIPTOR_STRING_ATTR(iManufacturer, manufacturer_string)
+DESCRIPTOR_STRING_ATTR(iProduct, product_string)
+DESCRIPTOR_STRING_ATTR(iSerial, serial_string)
+
+static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show, functions_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+ &dev_attr_idVendor,
+ &dev_attr_idProduct,
+ &dev_attr_bcdDevice,
+ &dev_attr_bDeviceClass,
+ &dev_attr_bDeviceSubClass,
+ &dev_attr_bDeviceProtocol,
+ &dev_attr_iManufacturer,
+ &dev_attr_iProduct,
+ &dev_attr_iSerial,
+ &dev_attr_functions,
+ &dev_attr_enable,
+ &dev_attr_state,
+ NULL
+};
+
+/*-------------------------------------------------------------------------*/
+/* Composite driver */
+
+static int android_bind_config(struct usb_configuration *c)
+{
+ struct android_dev *dev = _android_dev;
+ int ret = 0;
+
+ ret = android_bind_enabled_functions(dev, c);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void android_unbind_config(struct usb_configuration *c)
+{
+ struct android_dev *dev = _android_dev;
+
+ android_unbind_enabled_functions(dev, c);
+}
+
+static int android_bind(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_gadget *gadget = cdev->gadget;
+ int gcnum, id, ret;
+
+ usb_gadget_disconnect(gadget);
+
+ ret = android_init_functions(dev->functions, cdev);
+ if (ret)
+ return ret;
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_MANUFACTURER_IDX].id = id;
+ device_desc.iManufacturer = id;
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_PRODUCT_IDX].id = id;
+ device_desc.iProduct = id;
+
+ /* Default strings - should be updated by userspace */
+ strncpy(manufacturer_string, "Android", sizeof(manufacturer_string) - 1);
+ strncpy(product_string, "Android", sizeof(product_string) - 1);
+ strncpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1);
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_SERIAL_IDX].id = id;
+ device_desc.iSerialNumber = id;
+
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
+ else {
+ /* gadget zero is so simple (for now, no altsettings) that
+ * it SHOULD NOT have problems with bulk-capable hardware.
+ * so just warn about unrcognized controllers -- don't panic.
+ *
+ * things like configuration and altsetting numbering
+ * can need hardware-specific attention though.
+ */
+ pr_warning("%s: controller '%s' not recognized\n",
+ longname, gadget->name);
+ device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
+ }
+
+ usb_gadget_set_selfpowered(gadget);
+ dev->cdev = cdev;
+
+ return 0;
+}
+
+static int android_usb_unbind(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+
+ cancel_work_sync(&dev->work);
+ android_cleanup_functions(dev->functions);
+ return 0;
+}
+
+static struct usb_composite_driver android_usb_driver = {
+ .name = "android_usb",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .unbind = android_usb_unbind,
+ .max_speed = USB_SPEED_HIGH,
+};
+
+static int
+android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_request *req = cdev->req;
+ struct android_usb_function *f;
+ int value = -EOPNOTSUPP;
+ unsigned long flags;
+
+ req->zero = 0;
+ req->complete = composite_setup_complete;
+ req->length = 0;
+ gadget->ep0->driver_data = cdev;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->ctrlrequest) {
+ value = f->ctrlrequest(f, cdev, c);
+ if (value >= 0)
+ break;
+ }
+ }
+
+ /* Special case the accessory function.
+ * It needs to handle control requests before it is enabled.
+ */
+ if (value < 0)
+ value = acc_ctrlrequest(cdev, c);
+
+ if (value < 0)
+ value = composite_setup(gadget, c);
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (!dev->connected) {
+ dev->connected = 1;
+ schedule_work(&dev->work);
+ }
+ else if (c->bRequest == USB_REQ_SET_CONFIGURATION && cdev->config) {
+ schedule_work(&dev->work);
+ }
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ return value;
+}
+
+static void android_disconnect(struct usb_gadget *gadget)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ unsigned long flags;
+
+ composite_disconnect(gadget);
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ dev->connected = 0;
+ schedule_work(&dev->work);
+ spin_unlock_irqrestore(&cdev->lock, flags);
+}
+
+static int android_create_device(struct android_dev *dev)
+{
+ struct device_attribute **attrs = android_usb_attributes;
+ struct device_attribute *attr;
+ int err;
+
+ dev->dev = device_create(android_class, NULL,
+ MKDEV(0, 0), NULL, "android0");
+ if (IS_ERR(dev->dev))
+ return PTR_ERR(dev->dev);
+
+ dev_set_drvdata(dev->dev, dev);
+
+ while ((attr = *attrs++)) {
+ err = device_create_file(dev->dev, attr);
+ if (err) {
+ device_destroy(android_class, dev->dev->devt);
+ return err;
+ }
+ }
+ return 0;
+}
+
+
+static int __init init(void)
+{
+ struct android_dev *dev;
+ int err;
+
+ android_class = class_create(THIS_MODULE, "android_usb");
+ if (IS_ERR(android_class))
+ return PTR_ERR(android_class);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->functions = supported_functions;
+ INIT_LIST_HEAD(&dev->enabled_functions);
+ INIT_WORK(&dev->work, android_work);
+
+ err = android_create_device(dev);
+ if (err) {
+ class_destroy(android_class);
+ kfree(dev);
+ return err;
+ }
+
+ _android_dev = dev;
+
+ /* Override composite driver functions */
+ composite_driver.setup = android_setup;
+ composite_driver.disconnect = android_disconnect;
+
+ return usb_composite_probe(&android_usb_driver, android_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+ usb_composite_unregister(&android_usb_driver);
+ class_destroy(android_class);
+ kfree(_android_dev);
+ _android_dev = NULL;
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index aef47414f5d5..9dac72cff73d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -712,6 +712,7 @@ static int set_config(struct usb_composite_dev *cdev,
power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW;
done:
usb_gadget_vbus_draw(gadget, power);
+
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
return result;
@@ -759,6 +760,7 @@ int usb_add_config(struct usb_composite_dev *cdev,
INIT_LIST_HEAD(&config->functions);
config->next_interface_id = 0;
+ memset(config->interface, '\0', sizeof(config->interface));
status = bind(config);
if (status < 0) {
@@ -799,6 +801,45 @@ done:
return status;
}
+static int remove_config(struct usb_composite_dev *cdev,
+ struct usb_configuration *config)
+{
+ while (!list_empty(&config->functions)) {
+ struct usb_function *f;
+
+ f = list_first_entry(&config->functions,
+ struct usb_function, list);
+ list_del(&f->list);
+ if (f->unbind) {
+ DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
+ f->unbind(config, f);
+ /* may free memory for "f" */
+ }
+ }
+ list_del(&config->list);
+ if (config->unbind) {
+ DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
+ config->unbind(config);
+ /* may free memory for "c" */
+ }
+ return 0;
+}
+
+int usb_remove_config(struct usb_composite_dev *cdev,
+ struct usb_configuration *config)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+
+ if (cdev->config == config)
+ reset_config(cdev);
+
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ return remove_config(cdev, config);
+}
+
/*-------------------------------------------------------------------------*/
/* We support strings in multiple languages ... string descriptor zero
@@ -1353,28 +1394,9 @@ composite_unbind(struct usb_gadget *gadget)
while (!list_empty(&cdev->configs)) {
struct usb_configuration *c;
-
c = list_first_entry(&cdev->configs,
struct usb_configuration, list);
- while (!list_empty(&c->functions)) {
- struct usb_function *f;
-
- f = list_first_entry(&c->functions,
- struct usb_function, list);
- list_del(&f->list);
- if (f->unbind) {
- DBG(cdev, "unbind function '%s'/%p\n",
- f->name, f);
- f->unbind(c, f);
- /* may free memory for "f" */
- }
- }
- list_del(&c->list);
- if (c->unbind) {
- DBG(cdev, "unbind config '%s'/%p\n", c->label, c);
- c->unbind(c);
- /* may free memory for "c" */
- }
+ remove_config(cdev, c);
}
if (composite->unbind)
composite->unbind(cdev);
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
new file mode 100644
index 000000000000..ae65faaf3d77
--- /dev/null
+++ b/drivers/usb/gadget/f_accessory.c
@@ -0,0 +1,788 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#define BULK_BUFFER_SIZE 16384
+#define ACC_STRING_SIZE 256
+
+#define PROTOCOL_VERSION 1
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+
+ /* set to 1 when we connect */
+ int online:1;
+ /* Set to 1 when we disconnect.
+ * Not cleared until our file is closed.
+ */
+ int disconnected:1;
+
+ /* strings sent by the host */
+ char manufacturer[ACC_STRING_SIZE];
+ char model[ACC_STRING_SIZE];
+ char description[ACC_STRING_SIZE];
+ char version[ACC_STRING_SIZE];
+ char uri[ACC_STRING_SIZE];
+ char serial[ACC_STRING_SIZE];
+
+ /* for acc_complete_set_string */
+ int string_index;
+
+ /* set to 1 if we have a pending start request */
+ int start_requested;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+
+ struct list_head tx_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+ struct delayed_work work;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_highspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_highspeed_out_desc,
+ NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+ [INTERFACE_STRING_INDEX].s = "Android Accessory Interface",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+ &acc_string_table,
+ NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+ return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+ dev->online = 0;
+ dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = _acc_dev;
+
+ if (req->status != 0)
+ acc_set_disconnected(dev);
+
+ req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = _acc_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ acc_set_disconnected(dev);
+
+ wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = ep->driver_data;
+ char *string_dest = NULL;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_set_string, err %d\n", req->status);
+ return;
+ }
+
+ switch (dev->string_index) {
+ case ACCESSORY_STRING_MANUFACTURER:
+ string_dest = dev->manufacturer;
+ break;
+ case ACCESSORY_STRING_MODEL:
+ string_dest = dev->model;
+ break;
+ case ACCESSORY_STRING_DESCRIPTION:
+ string_dest = dev->description;
+ break;
+ case ACCESSORY_STRING_VERSION:
+ string_dest = dev->version;
+ break;
+ case ACCESSORY_STRING_URI:
+ string_dest = dev->uri;
+ break;
+ case ACCESSORY_STRING_SERIAL:
+ string_dest = dev->serial;
+ break;
+ }
+ if (string_dest) {
+ unsigned long flags;
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ } else {
+ pr_err("unknown accessory string index %d\n",
+ dev->string_index);
+ }
+}
+
+static int __init create_bulk_endpoints(struct acc_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = acc_complete_in;
+ req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = acc_complete_out;
+ dev->rx_req[i] = req;
+ }
+
+ return 0;
+
+fail:
+ printk(KERN_ERR "acc_bind() could not allocate requests\n");
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ acc_request_free(dev->rx_req[i], dev->ep_out);
+ return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct acc_dev *dev = fp->private_data;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = 0;
+
+ pr_debug("acc_read(%d)\n", count);
+
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (count > BULK_BUFFER_SIZE)
+ count = BULK_BUFFER_SIZE;
+
+ /* we will block until we're online */
+ pr_debug("acc_read: waiting for online\n");
+ ret = wait_event_interruptible(dev->read_wq, dev->online);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ pr_debug("rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (dev->online) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ pr_debug("rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+done:
+ pr_debug("acc_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct acc_dev *dev = fp->private_data;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int ret;
+
+ pr_debug("acc_write(%d)\n", count);
+
+ if (!dev->online || dev->disconnected)
+ return -ENODEV;
+
+ while (count > 0) {
+ if (!dev->online) {
+ pr_debug("acc_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > BULK_BUFFER_SIZE)
+ xfer = BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_debug("acc_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ req_put(dev, &dev->tx_idle, req);
+
+ pr_debug("acc_write returning %d\n", r);
+ return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct acc_dev *dev = fp->private_data;
+ char *src = NULL;
+ int ret;
+
+ switch (code) {
+ case ACCESSORY_GET_STRING_MANUFACTURER:
+ src = dev->manufacturer;
+ break;
+ case ACCESSORY_GET_STRING_MODEL:
+ src = dev->model;
+ break;
+ case ACCESSORY_GET_STRING_DESCRIPTION:
+ src = dev->description;
+ break;
+ case ACCESSORY_GET_STRING_VERSION:
+ src = dev->version;
+ break;
+ case ACCESSORY_GET_STRING_URI:
+ src = dev->uri;
+ break;
+ case ACCESSORY_GET_STRING_SERIAL:
+ src = dev->serial;
+ break;
+ case ACCESSORY_IS_START_REQUESTED:
+ return dev->start_requested;
+ }
+ if (!src)
+ return -EINVAL;
+
+ ret = strlen(src) + 1;
+ if (copy_to_user((void __user *)value, src, ret))
+ ret = -EFAULT;
+ return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "acc_open\n");
+ if (atomic_xchg(&_acc_dev->open_excl, 1))
+ return -EBUSY;
+
+ _acc_dev->disconnected = 0;
+ fp->private_data = _acc_dev;
+ return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "acc_release\n");
+
+ WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+ _acc_dev->disconnected = 0;
+ return 0;
+}
+
+/* file operations for /dev/acc_usb */
+static const struct file_operations acc_fops = {
+ .owner = THIS_MODULE,
+ .read = acc_read,
+ .write = acc_write,
+ .unlocked_ioctl = acc_ioctl,
+ .open = acc_open,
+ .release = acc_release,
+};
+
+static struct miscdevice acc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "usb_accessory",
+ .fops = &acc_fops,
+};
+
+
+static int acc_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct acc_dev *dev = _acc_dev;
+ int value = -EOPNOTSUPP;
+ u8 b_requestType = ctrl->bRequestType;
+ u8 b_request = ctrl->bRequest;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+/*
+ printk(KERN_INFO "acc_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ b_requestType, b_request,
+ w_value, w_index, w_length);
+*/
+
+ if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+ if (b_request == ACCESSORY_START) {
+ dev->start_requested = 1;
+ schedule_delayed_work(
+ &dev->work, msecs_to_jiffies(10));
+ value = 0;
+ } else if (b_request == ACCESSORY_SEND_STRING) {
+ dev->string_index = w_index;
+ cdev->gadget->ep0->driver_data = dev;
+ cdev->req->complete = acc_complete_set_string;
+ value = w_length;
+ }
+ } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+ if (b_request == ACCESSORY_GET_PROTOCOL) {
+ *((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+ value = sizeof(u16);
+
+ /* clear any strings left over from a previous session */
+ memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+ memset(dev->model, 0, sizeof(dev->model));
+ memset(dev->description, 0, sizeof(dev->description));
+ memset(dev->version, 0, sizeof(dev->version));
+ memset(dev->uri, 0, sizeof(dev->uri));
+ memset(dev->serial, 0, sizeof(dev->serial));
+ dev->start_requested = 0;
+ }
+ }
+
+ if (value >= 0) {
+ cdev->req->zero = 0;
+ cdev->req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "%s setup response queue error\n",
+ __func__);
+ }
+
+ if (value == -EOPNOTSUPP)
+ VDBG(cdev,
+ "unknown class-specific control req "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ return value;
+}
+
+static int
+acc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct acc_dev *dev = func_to_dev(f);
+ int id;
+ int ret;
+
+ DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+ dev->start_requested = 0;
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ acc_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+ &acc_fullspeed_out_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ acc_highspeed_in_desc.bEndpointAddress =
+ acc_fullspeed_in_desc.bEndpointAddress;
+ acc_highspeed_out_desc.bEndpointAddress =
+ acc_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_request *req;
+ int i;
+
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ acc_request_free(dev->rx_req[i], dev->ep_out);
+}
+
+static void acc_work(struct work_struct *data)
+{
+ char *envp[2] = { "ACCESSORY=START", NULL };
+ kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+ config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+ config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+
+ dev->online = 1;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "acc_function_disable\n");
+ acc_set_disconnected(dev);
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_bind_config(struct usb_configuration *c)
+{
+ struct acc_dev *dev = _acc_dev;
+ int ret;
+
+ printk(KERN_INFO "acc_bind_config\n");
+
+ /* allocate a string ID for our interface */
+ if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ acc_interface_desc.iInterface = ret;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "accessory";
+ dev->function.strings = acc_strings,
+ dev->function.descriptors = fs_acc_descs;
+ dev->function.hs_descriptors = hs_acc_descs;
+ dev->function.bind = acc_function_bind;
+ dev->function.unbind = acc_function_unbind;
+ dev->function.set_alt = acc_function_set_alt;
+ dev->function.disable = acc_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int acc_setup(void)
+{
+ struct acc_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ atomic_set(&dev->open_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_DELAYED_WORK(&dev->work, acc_work);
+
+ /* _acc_dev must be set before calling usb_gadget_register_driver */
+ _acc_dev = dev;
+
+ ret = misc_register(&acc_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ printk(KERN_ERR "USB accessory gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void acc_cleanup(void)
+{
+ misc_deregister(&acc_device);
+ kfree(_acc_dev);
+ _acc_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 3f8849339ade..d84debc42215 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -677,6 +677,7 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
gs_free_req(acm->notify, acm->notify_req);
+ kfree(acm->port.func.name);
kfree(acm);
}
@@ -748,7 +749,11 @@ int acm_bind_config(struct usb_configuration *c, u8 port_num)
acm->port.disconnect = acm_disconnect;
acm->port.send_break = acm_send_break;
- acm->port.func.name = "acm";
+ acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num);
+ if (!acm->port.func.name) {
+ kfree(acm);
+ return -ENOMEM;
+ }
acm->port.func.strings = acm_strings;
/* descriptors are per-instance copies */
acm->port.func.bind = acm_bind;
diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c
new file mode 100644
index 000000000000..94a793f43901
--- /dev/null
+++ b/drivers/usb/gadget/f_adb.c
@@ -0,0 +1,635 @@
+/*
+ * Gadget Driver for Android ADB
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#define ADB_BULK_BUFFER_SIZE 4096
+
+/* number of tx requests to allocate */
+#define TX_REQ_MAX 4
+
+static const char adb_shortname[] = "android_adb";
+
+struct adb_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+
+ int online;
+ int error;
+
+ atomic_t read_excl;
+ atomic_t write_excl;
+ atomic_t open_excl;
+
+ struct list_head tx_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req;
+ int rx_done;
+};
+
+static struct usb_interface_descriptor adb_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0xFF,
+ .bInterfaceSubClass = 0x42,
+ .bInterfaceProtocol = 1,
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_adb_descs[] = {
+ (struct usb_descriptor_header *) &adb_interface_desc,
+ (struct usb_descriptor_header *) &adb_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &adb_fullspeed_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_adb_descs[] = {
+ (struct usb_descriptor_header *) &adb_interface_desc,
+ (struct usb_descriptor_header *) &adb_highspeed_in_desc,
+ (struct usb_descriptor_header *) &adb_highspeed_out_desc,
+ NULL,
+};
+
+
+/* temporary variable used between adb_open() and adb_gadget_bind() */
+static struct adb_dev *_adb_dev;
+
+static inline struct adb_dev *func_to_adb(struct usb_function *f)
+{
+ return container_of(f, struct adb_dev, function);
+}
+
+
+static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void adb_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static inline int adb_lock(atomic_t *excl)
+{
+ int ret = -1;
+
+ preempt_disable();
+ if (atomic_inc_return(excl) == 1) {
+ ret = 0;
+ } else
+ atomic_dec(excl);
+
+ preempt_enable();
+ return ret;
+}
+
+static inline void adb_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+void adb_req_put(struct adb_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+struct usb_request *adb_req_get(struct adb_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void adb_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ if (req->status != 0)
+ dev->error = 1;
+
+ adb_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void adb_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ dev->error = 1;
+
+ wake_up(&dev->read_wq);
+}
+
+static int adb_create_bulk_endpoints(struct adb_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ /* now allocate requests for our endpoints */
+ req = adb_request_new(dev->ep_out, ADB_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = adb_complete_out;
+ dev->rx_req = req;
+
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = adb_request_new(dev->ep_in, ADB_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = adb_complete_in;
+ adb_req_put(dev, &dev->tx_idle, req);
+ }
+
+ return 0;
+
+fail:
+ printk(KERN_ERR "adb_bind() could not allocate requests\n");
+ return -1;
+}
+
+static ssize_t adb_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct adb_dev *dev = fp->private_data;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret;
+
+ pr_debug("adb_read(%d)\n", count);
+ if (!_adb_dev)
+ return -ENODEV;
+
+ if (count > ADB_BULK_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (adb_lock(&dev->read_excl))
+ return -EBUSY;
+
+ /* we will block until we're online */
+ while (!(dev->online || dev->error)) {
+ pr_debug("adb_read: waiting for online state\n");
+ ret = wait_event_interruptible(dev->read_wq,
+ (dev->online || dev->error));
+ if (ret < 0) {
+ adb_unlock(&dev->read_excl);
+ return ret;
+ }
+ }
+ if (dev->error) {
+ r = -EIO;
+ goto done;
+ }
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req;
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_debug("adb_read: failed to queue req %p (%d)\n", req, ret);
+ r = -EIO;
+ dev->error = 1;
+ goto done;
+ } else {
+ pr_debug("rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ dev->error = 1;
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (!dev->error) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ pr_debug("rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+
+ } else
+ r = -EIO;
+
+done:
+ adb_unlock(&dev->read_excl);
+ pr_debug("adb_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t adb_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct adb_dev *dev = fp->private_data;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int ret;
+
+ if (!_adb_dev)
+ return -ENODEV;
+ pr_debug("adb_write(%d)\n", count);
+
+ if (adb_lock(&dev->write_excl))
+ return -EBUSY;
+
+ while (count > 0) {
+ if (dev->error) {
+ pr_debug("adb_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ (req = adb_req_get(dev, &dev->tx_idle)) || dev->error);
+
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+
+ if (req != 0) {
+ if (count > ADB_BULK_BUFFER_SIZE)
+ xfer = ADB_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_debug("adb_write: xfer error %d\n", ret);
+ dev->error = 1;
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+ }
+
+ if (req)
+ adb_req_put(dev, &dev->tx_idle, req);
+
+ adb_unlock(&dev->write_excl);
+ pr_debug("adb_write returning %d\n", r);
+ return r;
+}
+
+static int adb_open(struct inode *ip, struct file *fp)
+{
+ static unsigned long last_print;
+ static unsigned long count = 0;
+
+ if (!_adb_dev)
+ return -ENODEV;
+
+ if (++count == 1)
+ last_print = jiffies;
+ else {
+ if (!time_before(jiffies, last_print + HZ/2))
+ count = 0;
+ last_print = jiffies;
+ }
+
+ if (adb_lock(&_adb_dev->open_excl)) {
+ cpu_relax();
+ return -EBUSY;
+ }
+
+ if (count < 5)
+ printk(KERN_INFO "adb_open(%s)\n", current->comm);
+
+
+ fp->private_data = _adb_dev;
+
+ /* clear the error latch */
+ _adb_dev->error = 0;
+
+ return 0;
+}
+
+static int adb_release(struct inode *ip, struct file *fp)
+{
+ static unsigned long last_print;
+ static unsigned long count = 0;
+
+ if (++count == 1)
+ last_print = jiffies;
+ else {
+ if (!time_before(jiffies, last_print + HZ/2))
+ count = 0;
+ last_print = jiffies;
+ }
+
+ if (count < 5)
+ printk(KERN_INFO "adb_release\n");
+ adb_unlock(&_adb_dev->open_excl);
+ return 0;
+}
+
+/* file operations for ADB device /dev/android_adb */
+static struct file_operations adb_fops = {
+ .owner = THIS_MODULE,
+ .read = adb_read,
+ .write = adb_write,
+ .open = adb_open,
+ .release = adb_release,
+};
+
+static struct miscdevice adb_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = adb_shortname,
+ .fops = &adb_fops,
+};
+
+
+
+
+static int
+adb_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct adb_dev *dev = func_to_adb(f);
+ int id;
+ int ret;
+
+ dev->cdev = cdev;
+ DBG(cdev, "adb_function_bind dev: %p\n", dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ adb_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = adb_create_bulk_endpoints(dev, &adb_fullspeed_in_desc,
+ &adb_fullspeed_out_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ adb_highspeed_in_desc.bEndpointAddress =
+ adb_fullspeed_in_desc.bEndpointAddress;
+ adb_highspeed_out_desc.bEndpointAddress =
+ adb_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+adb_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_request *req;
+
+
+ dev->online = 0;
+ dev->error = 1;
+
+ wake_up(&dev->read_wq);
+
+ adb_request_free(dev->rx_req, dev->ep_out);
+ while ((req = adb_req_get(dev, &dev->tx_idle)))
+ adb_request_free(req, dev->ep_in);
+}
+
+static int adb_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt);
+ config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+ config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->online = 1;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void adb_function_disable(struct usb_function *f)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "adb_function_disable cdev %p\n", cdev);
+ dev->online = 0;
+ dev->error = 1;
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int adb_bind_config(struct usb_configuration *c)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ printk(KERN_INFO "adb_bind_config\n");
+
+ dev->cdev = c->cdev;
+ dev->function.name = "adb";
+ dev->function.descriptors = fs_adb_descs;
+ dev->function.hs_descriptors = hs_adb_descs;
+ dev->function.bind = adb_function_bind;
+ dev->function.unbind = adb_function_unbind;
+ dev->function.set_alt = adb_function_set_alt;
+ dev->function.disable = adb_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int adb_setup(void)
+{
+ struct adb_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->read_excl, 0);
+ atomic_set(&dev->write_excl, 0);
+
+ INIT_LIST_HEAD(&dev->tx_idle);
+
+ _adb_dev = dev;
+
+ ret = misc_register(&adb_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ printk(KERN_ERR "adb gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void adb_cleanup(void)
+{
+ misc_deregister(&adb_device);
+
+ kfree(_adb_dev);
+ _adb_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 5b9339582007..497a216188ad 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -3045,7 +3045,7 @@ static int fsg_bind_config(struct usb_composite_dev *cdev,
if (unlikely(!fsg))
return -ENOMEM;
- fsg->function.name = FSG_DRIVER_DESC;
+ fsg->function.name = "mass_storage";
fsg->function.strings = fsg_strings_array;
fsg->function.bind = fsg_bind;
fsg->function.unbind = fsg_unbind;
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
new file mode 100644
index 000000000000..5aa5214297d1
--- /dev/null
+++ b/drivers/usb/gadget/f_mtp.c
@@ -0,0 +1,1264 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+
+#define MTP_BULK_BUFFER_SIZE 16384
+#define INTR_BUFFER_SIZE 28
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE 0 /* initial state, disconnected */
+#define STATE_READY 1 /* ready for userspace calls */
+#define STATE_BUSY 2 /* processing userspace calls */
+#define STATE_CANCELED 3 /* transaction canceled by host */
+#define STATE_ERROR 4 /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID 0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL 0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
+#define MTP_REQ_RESET 0x66
+#define MTP_REQ_GET_DEVICE_STATUS 0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK 0x2001
+#define MTP_RESPONSE_DEVICE_BUSY 0x2019
+
+static const char mtp_shortname[] = "mtp_usb";
+
+struct mtp_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+ struct usb_ep *ep_intr;
+
+ int state;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+ /* to enforce only one ioctl at a time */
+ atomic_t ioctl_excl;
+
+ struct list_head tx_idle;
+ struct list_head intr_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ wait_queue_head_t intr_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+
+ /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+ * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+ */
+ struct workqueue_struct *wq;
+ struct work_struct send_file_work;
+ struct work_struct receive_file_work;
+ struct file *xfer_file;
+ loff_t xfer_file_offset;
+ int64_t xfer_file_length;
+ unsigned xfer_send_header;
+ uint16_t xfer_command;
+ uint32_t xfer_transaction_id;
+ int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+ .bInterval = 6,
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+ /* Naming interface "MTP" so libmtp will recognize us */
+ [INTERFACE_STRING_INDEX].s = "MTP",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+ &mtp_string_table,
+ NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+ 18, /* sizeof(mtp_os_string) */
+ USB_DT_STRING,
+ /* Signature field: "MSFT100" */
+ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+ /* vendor code */
+ 1,
+ /* padding */
+ 0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+ struct mtp_ext_config_desc_header header;
+ struct mtp_ext_config_desc_function function;
+} mtp_ext_config_desc = {
+ .header = {
+ .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+ .bcdVersion = __constant_cpu_to_le16(0x0100),
+ .wIndex = __constant_cpu_to_le16(4),
+ .bCount = __constant_cpu_to_le16(1),
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'M', 'T', 'P' },
+ },
+};
+
+struct mtp_device_status {
+ __le16 wLength;
+ __le16 wCode;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+ return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc,
+ struct usb_endpoint_descriptor *intr_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_intr = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_in;
+ mtp_req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_out;
+ dev->rx_req[i] = req;
+ }
+ for (i = 0; i < INTR_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_intr;
+ mtp_req_put(dev, &dev->intr_idle, req);
+ }
+
+ return 0;
+
+fail:
+ printk(KERN_ERR "mtp_bind() could not allocate requests\n");
+ return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = 0;
+
+ DBG(cdev, "mtp_read(%d)\n", count);
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ return -EINVAL;
+
+ /* we will block until we're online */
+ DBG(cdev, "mtp_read: waiting for online state\n");
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->state != STATE_OFFLINE);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ DBG(cdev, "rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (dev->state == STATE_BUSY) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ DBG(cdev, "rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+done:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "mtp_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int sendZLP = 0;
+ int ret;
+
+ DBG(cdev, "mtp_write(%d)\n", count);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0) {
+ sendZLP = 1;
+ }
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ if (dev->state != STATE_BUSY) {
+ DBG(cdev, "mtp_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY));
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ xfer = MTP_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (xfer && copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "mtp_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "mtp_write returning %d\n", r);
+ return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data) {
+ struct mtp_dev *dev = container_of(data, struct mtp_dev, send_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ struct mtp_data_header *header;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int xfer, ret, hdr_size;
+ int r = 0;
+ int sendZLP = 0;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+ if (dev->xfer_send_header) {
+ hdr_size = sizeof(struct mtp_data_header);
+ count += hdr_size;
+ } else {
+ hdr_size = 0;
+ }
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0) {
+ sendZLP = 1;
+ }
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ (req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ break;
+ }
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ xfer = MTP_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+
+ if (hdr_size) {
+ /* prepend MTP data header */
+ header = (struct mtp_data_header *)req->buf;
+ header->length = __cpu_to_le32(count);
+ header->type = __cpu_to_le16(2); /* data packet */
+ header->command = __cpu_to_le16(dev->xfer_command);
+ header->transaction_id = __cpu_to_le32(dev->xfer_transaction_id);
+ }
+
+ ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size, &offset);
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+ xfer = ret + hdr_size;
+ hdr_size = 0;
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "send_file_work: xfer error %d\n", ret);
+ dev->state = STATE_ERROR;
+ r = -EIO;
+ break;
+ }
+
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ DBG(cdev, "send_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev, receive_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *read_req = NULL, *write_req = NULL;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int ret, cur_buf = 0;
+ int r = 0;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "receive_file_work(%lld)\n", count);
+
+ while (count > 0 || write_req) {
+ if (count > 0) {
+ /* queue a request */
+ read_req = dev->rx_req[cur_buf];
+ cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+ read_req->length = (count > MTP_BULK_BUFFER_SIZE
+ ? MTP_BULK_BUFFER_SIZE : count);
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ dev->state = STATE_ERROR;
+ break;
+ }
+ }
+
+ if (write_req) {
+ DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+ ret = vfs_write(filp, write_req->buf, write_req->actual,
+ &offset);
+ DBG(cdev, "vfs_write %d\n", ret);
+ if (ret != write_req->actual) {
+ r = -EIO;
+ dev->state = STATE_ERROR;
+ break;
+ }
+ write_req = NULL;
+ }
+
+ if (read_req) {
+ /* wait for our last read to complete */
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->rx_done || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ if (!dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, read_req);
+ break;
+ }
+ /* if xfer_file_length is 0xFFFFFFFF, then we read until
+ * we get a zero length packet
+ */
+ if (count != 0xFFFFFFFF)
+ count -= read_req->actual;
+ if (read_req->actual < read_req->length) {
+ /* short packet is used to signal EOF for sizes > 4 gig */
+ DBG(cdev, "got short packet\n");
+ count = 0;
+ }
+
+ write_req = read_req;
+ read_req = NULL;
+ }
+ }
+
+ DBG(cdev, "receive_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+ struct usb_request *req= NULL;
+ int ret;
+ int length = event->length;
+
+ DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
+
+ if (length < 0 || length > INTR_BUFFER_SIZE)
+ return -EINVAL;
+ if (dev->state == STATE_OFFLINE)
+ return -ENODEV;
+
+ ret = wait_event_interruptible_timeout(dev->intr_wq,
+ (req = mtp_req_get(dev, &dev->intr_idle)), msecs_to_jiffies(1000));
+ if (!req)
+ return -ETIME;
+
+ if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+ mtp_req_put(dev, &dev->intr_idle, req);
+ return -EFAULT;
+ }
+ req->length = length;
+ ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+ if (ret)
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct file *filp = NULL;
+ int ret = -EINVAL;
+
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+
+ switch (code) {
+ case MTP_SEND_FILE:
+ case MTP_RECEIVE_FILE:
+ case MTP_SEND_FILE_WITH_HEADER:
+ {
+ struct mtp_file_range mfr;
+ struct work_struct *work;
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ ret = -ECANCELED;
+ goto out;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ ret = -ENODEV;
+ goto out;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ /* hold a reference to the file while we are working with it */
+ filp = fget(mfr.fd);
+ if (!filp) {
+ ret = -EBADF;
+ goto fail;
+ }
+
+ /* write the parameters */
+ dev->xfer_file = filp;
+ dev->xfer_file_offset = mfr.offset;
+ dev->xfer_file_length = mfr.length;
+ smp_wmb();
+
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr.command;
+ dev->xfer_transaction_id = mfr.transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
+ work = &dev->receive_file_work;
+ }
+
+ /* We do the file transfer on a work queue so it will run
+ * in kernel context, which is necessary for vfs_read and
+ * vfs_write to use our buffers in the kernel address space.
+ */
+ queue_work(dev->wq, work);
+ /* wait for operation to complete */
+ flush_workqueue(dev->wq);
+ fput(filp);
+
+ /* read the result */
+ smp_rmb();
+ ret = dev->xfer_result;
+ break;
+ }
+ case MTP_SEND_EVENT:
+ {
+ struct mtp_event event;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+ ret = -EFAULT;
+ else
+ ret = mtp_send_event(dev, &event);
+ goto out;
+ }
+ }
+
+fail:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ ret = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+out:
+ mtp_unlock(&dev->ioctl_excl);
+ DBG(dev->cdev, "ioctl returning %d\n", ret);
+ return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_open\n");
+ if (mtp_lock(&_mtp_dev->open_excl))
+ return -EBUSY;
+
+ /* clear any error condition */
+ if (_mtp_dev->state != STATE_OFFLINE)
+ _mtp_dev->state = STATE_READY;
+
+ fp->private_data = _mtp_dev;
+ return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_release\n");
+
+ mtp_unlock(&_mtp_dev->open_excl);
+ return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+ .owner = THIS_MODULE,
+ .read = mtp_read,
+ .write = mtp_write,
+ .unlocked_ioctl = mtp_ioctl,
+ .open = mtp_open,
+ .release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = mtp_shortname,
+ .fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
+
+ VDBG(cdev, "mtp_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* Handle MTP OS string */
+ if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+ && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+ && (w_value >> 8) == USB_DT_STRING
+ && (w_value & 0xFF) == MTP_OS_STRING_ID) {
+ value = (w_length < sizeof(mtp_os_string)
+ ? w_length : sizeof(mtp_os_string));
+ memcpy(cdev->req->buf, mtp_os_string, value);
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+ /* Handle MTP OS descriptor */
+ DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == 1
+ && (ctrl->bRequestType & USB_DIR_IN)
+ && (w_index == 4 || w_index == 5)) {
+ value = (w_length < sizeof(mtp_ext_config_desc) ?
+ w_length : sizeof(mtp_ext_config_desc));
+ memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+ }
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+ DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+ && w_value == 0) {
+ DBG(cdev, "MTP_REQ_CANCEL\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state == STATE_BUSY) {
+ dev->state = STATE_CANCELED;
+ wake_up(&dev->read_wq);
+ wake_up(&dev->write_wq);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* We need to queue a request to read the remaining
+ * bytes, but we don't actually need to look at
+ * the contents.
+ */
+ value = w_length;
+ } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+ && w_index == 0 && w_value == 0) {
+ struct mtp_device_status *status = cdev->req->buf;
+ status->wLength =
+ __constant_cpu_to_le16(sizeof(*status));
+
+ DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ /* device status is "busy" until we report
+ * the cancelation to userspace
+ */
+ if (dev->state == STATE_CANCELED)
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+ else
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_OK);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ value = sizeof(*status);
+ }
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ int rc;
+ cdev->req->zero = value < w_length;
+ cdev->req->length = value;
+ rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (rc < 0)
+ ERROR(cdev, "%s setup response queue error\n", __func__);
+ }
+ return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct mtp_dev *dev = func_to_mtp(f);
+ int id;
+ int ret;
+
+ dev->cdev = cdev;
+ DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ mtp_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+ &mtp_fullspeed_out_desc, &mtp_intr_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ mtp_highspeed_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_highspeed_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_request *req;
+ int i;
+
+ while ((req = mtp_req_get(dev, &dev->tx_idle)))
+ mtp_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ mtp_request_free(dev->rx_req[i], dev->ep_out);
+ while ((req = mtp_req_get(dev, &dev->intr_idle)))
+ mtp_request_free(req, dev->ep_intr);
+ dev->state = STATE_OFFLINE;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+ config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+ config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->ep_intr->desc = &mtp_intr_desc;
+ ret = usb_ep_enable(dev->ep_intr);
+ if (ret) {
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->state = STATE_READY;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "mtp_function_disable\n");
+ dev->state = STATE_OFFLINE;
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_intr);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int ret = 0;
+
+ printk(KERN_INFO "mtp_bind_config\n");
+
+ /* allocate a string ID for our interface */
+ if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ mtp_interface_desc.iInterface = ret;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "mtp";
+ dev->function.strings = mtp_strings;
+ if (ptp_config) {
+ dev->function.descriptors = fs_ptp_descs;
+ dev->function.hs_descriptors = hs_ptp_descs;
+ } else {
+ dev->function.descriptors = fs_mtp_descs;
+ dev->function.hs_descriptors = hs_mtp_descs;
+ }
+ dev->function.bind = mtp_function_bind;
+ dev->function.unbind = mtp_function_unbind;
+ dev->function.set_alt = mtp_function_set_alt;
+ dev->function.disable = mtp_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int mtp_setup(void)
+{
+ struct mtp_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ init_waitqueue_head(&dev->intr_wq);
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->ioctl_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->intr_idle);
+
+ dev->wq = create_singlethread_workqueue("f_mtp");
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ INIT_WORK(&dev->send_file_work, send_file_work);
+ INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+ _mtp_dev = dev;
+
+ ret = misc_register(&mtp_device);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ destroy_workqueue(dev->wq);
+err1:
+ _mtp_dev = NULL;
+ kfree(dev);
+ printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void mtp_cleanup(void)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (!dev)
+ return;
+
+ misc_deregister(&mtp_device);
+ destroy_workqueue(dev->wq);
+ _mtp_dev = NULL;
+ kfree(dev);
+}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 3ea4666be3d0..ac350235b27d 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <linux/atomic.h>
@@ -80,6 +80,8 @@ struct f_rndis {
struct gether port;
u8 ctrl_id, data_id;
u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ const char *manufacturer;
int config;
struct usb_ep *notify;
@@ -179,12 +181,11 @@ static struct usb_interface_assoc_descriptor
rndis_iad_descriptor = {
.bLength = sizeof rndis_iad_descriptor,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
-
.bFirstInterface = 0, /* XXX, hardcoded */
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
.bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ .bFunctionProtocol = USB_CDC_ACM_PROTO_VENDOR,
/* .iFunction = DYNAMIC */
};
@@ -776,12 +777,9 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
rndis_set_host_mac(rndis->config, rndis->ethaddr);
-#if 0
-// FIXME
- if (rndis_set_param_vendor(rndis->config, vendorID,
- manufacturer))
- goto fail0;
-#endif
+ if (rndis_set_param_vendor(rndis->config, rndis->vendorID,
+ rndis->manufacturer))
+ goto fail;
/* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
@@ -831,6 +829,9 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
if (gadget_is_superspeed(c->cdev->gadget))
usb_free_descriptors(f->ss_descriptors);
+
+ rndis_string_defs[0].id = 0;
+
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
@@ -861,7 +862,8 @@ static inline bool can_support_rndis(struct usb_configuration *c)
* for calling @gether_cleanup() before module unload.
*/
int
-rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer)
{
struct f_rndis *rndis;
int status;
@@ -906,6 +908,8 @@ rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
goto fail;
memcpy(rndis->ethaddr, ethaddr, ETH_ALEN);
+ rndis->vendorID = vendorID;
+ rndis->manufacturer = manufacturer;
/* RNDIS activates when the host changes this filter */
rndis->port.cdc_filter = 0;
diff --git a/drivers/usb/gadget/fsl_tegra_udc.c b/drivers/usb/gadget/fsl_tegra_udc.c
new file mode 100644
index 000000000000..b254258726fa
--- /dev/null
+++ b/drivers/usb/gadget/fsl_tegra_udc.c
@@ -0,0 +1,155 @@
+/*
+ * Description:
+ * Helper functions to support the tegra USB controller
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/fsl_devices.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <mach/usb_phy.h>
+
+static struct tegra_usb_phy *phy;
+static struct clk *udc_clk;
+static struct clk *emc_clk;
+static struct clk *sclk_clk;
+static void *udc_base;
+
+int fsl_udc_clk_init(struct platform_device *pdev)
+{
+ struct resource *res;
+ int err;
+ int instance;
+ struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+
+
+ udc_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc_clk)) {
+ dev_err(&pdev->dev, "Can't get udc clock\n");
+ return PTR_ERR(udc_clk);
+ }
+
+ clk_enable(udc_clk);
+
+ sclk_clk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(sclk_clk)) {
+ dev_err(&pdev->dev, "Can't get sclk clock\n");
+ err = PTR_ERR(sclk_clk);
+ goto err_sclk;
+ }
+
+ clk_set_rate(sclk_clk, 80000000);
+ clk_enable(sclk_clk);
+
+ emc_clk = clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc_clk)) {
+ dev_err(&pdev->dev, "Can't get emc clock\n");
+ err = PTR_ERR(emc_clk);
+ goto err_emc;
+ }
+
+ clk_enable(emc_clk);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Set DDR busy hints to 150MHz. For Tegra 2x SOC, DDR rate is half of EMC rate */
+ clk_set_rate(emc_clk, 300000000);
+#else
+ /* Set DDR busy hints to 100MHz. For Tegra 3x SOC DDR rate equals to EMC rate */
+ clk_set_rate(emc_clk, 100000000);
+#endif
+
+ /* we have to remap the registers ourselves as fsl_udc does not
+ * export them for us.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENXIO;
+ goto err0;
+ }
+ udc_base = ioremap(res->start, resource_size(res));
+ if (!udc_base) {
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ instance = pdev->id;
+ if (instance == -1)
+ instance = 0;
+
+ phy = tegra_usb_phy_open(instance, udc_base, pdata->phy_config,
+ TEGRA_USB_PHY_MODE_DEVICE, pdata->usb_phy_type);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "Can't open phy\n");
+ err = PTR_ERR(phy);
+ goto err1;
+ }
+ tegra_usb_phy_power_on(phy, true);
+
+ return 0;
+err1:
+ iounmap(udc_base);
+err0:
+ clk_disable(emc_clk);
+ clk_put(emc_clk);
+err_emc:
+ clk_disable(sclk_clk);
+ clk_put(sclk_clk);
+err_sclk:
+ clk_disable(udc_clk);
+ clk_put(udc_clk);
+ return err;
+}
+
+void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+}
+
+void fsl_udc_clk_release(void)
+{
+ tegra_usb_phy_close(phy);
+
+ iounmap(udc_base);
+
+ clk_disable(udc_clk);
+ clk_put(udc_clk);
+
+ clk_disable(sclk_clk);
+ clk_put(sclk_clk);
+
+ clk_disable(emc_clk);
+ clk_put(emc_clk);
+}
+
+void fsl_udc_clk_suspend(bool is_dpd)
+{
+ tegra_usb_phy_power_off(phy, is_dpd);
+ clk_disable(udc_clk);
+ clk_disable(sclk_clk);
+ clk_disable(emc_clk);
+}
+
+void fsl_udc_clk_resume(bool is_dpd)
+{
+ clk_enable(emc_clk);
+ clk_enable(sclk_clk);
+ clk_enable(udc_clk);
+ tegra_usb_phy_power_on(phy, is_dpd);
+}
+
+void fsl_udc_clk_enable(void)
+{
+ clk_enable(udc_clk);
+}
+
+void fsl_udc_clk_disable(void)
+{
+ clk_disable(udc_clk);
+}
+
+bool fsl_udc_charger_detect(void)
+{
+ return tegra_usb_phy_charger_detect(phy);
+}
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index de24a4233c25..813d290cdc5c 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -5,6 +5,8 @@
* Author: Li Yang <leoli@freescale.com>
* Jiang Bo <tanya.jiang@freescale.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* Description:
* Freescale high-speed USB SOC DR module device controller driver.
* This can be found on MPC8349E/MPC8313E/MPC5121E cpus.
@@ -40,6 +42,8 @@
#include <linux/fsl_devices.h>
#include <linux/dmapool.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -49,13 +53,21 @@
#include "fsl_usb2_udc.h"
+#ifdef CONFIG_ARCH_TEGRA
+#define DRIVER_DESC "NVidia Tegra High-Speed USB SOC Device Controller driver"
+#else
#define DRIVER_DESC "Freescale High-Speed USB SOC Device Controller driver"
+#endif
#define DRIVER_AUTHOR "Li Yang/Jiang Bo"
#define DRIVER_VERSION "Apr 20, 2007"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#ifdef CONFIG_ARCH_TEGRA
+static const char driver_name[] = "fsl-tegra-udc";
+#else
static const char driver_name[] = "fsl-usb2-udc";
+#endif
static const char driver_desc[] = DRIVER_DESC;
static struct usb_dr_device *dr_regs;
@@ -63,6 +75,11 @@ static struct usb_dr_device *dr_regs;
static struct usb_sys_interface *usb_sys_regs;
#endif
+/* Charger current limit=1800mA, as per the USB charger spec */
+#define USB_CHARGING_CURRENT_LIMIT_MA 1800
+/* 1 sec wait time for charger detection after vbus is detected */
+#define USB_CHARGER_DETECTION_WAIT_TIME_MS 1000
+
/* it is initialized in probe() */
static struct fsl_udc *udc_controller = NULL;
@@ -75,7 +92,9 @@ fsl_ep0_desc = {
.wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
};
+static u32 *control_reg = NULL;
static void fsl_ep_fifo_flush(struct usb_ep *_ep);
+static int reset_queues(struct fsl_udc *udc);
#ifdef CONFIG_PPC32
/*
@@ -150,10 +169,44 @@ static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
#define hc32_to_cpu(x) le32_to_cpu(x)
#endif /* CONFIG_PPC32 */
+/*
+ * High speed test mode packet(53 bytes).
+ * See USB 2.0 spec, section 7.1.20.
+ */
+static const u8 fsl_udc_test_packet[53] = {
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+};
+
/********************************************************************
* Internal Used Function
********************************************************************/
/*-----------------------------------------------------------------
+ * vbus_enabled() - checks vbus status
+ *--------------------------------------------------------------*/
+static inline bool vbus_enabled(void)
+{
+ bool status = false;
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ status = (fsl_readl(&usb_sys_regs->vbus_wakeup) & USB_SYS_VBUS_STATUS);
+#else
+ /*On FPGA VBUS is detected through VBUS A Session instead of VBUS status. */
+ status = (fsl_readl(&usb_sys_regs->vbus_sensors) & USB_SYS_VBUS_ASESSION);
+#endif
+ return status;
+}
+
+/*-----------------------------------------------------------------
* done() - retire a request; caller blocked irqs
* @status : request status to be set, only works when
* request is still in progress.
@@ -241,18 +294,47 @@ static void nuke(struct fsl_ep *ep, int status)
Internal Hardware related function
------------------------------------------------------------------*/
+#define FSL_UDC_RESET_TIMEOUT 1000
+static int dr_controller_reset(struct fsl_udc *udc)
+{
+ unsigned int tmp;
+ unsigned long timeout;
+
+ /* Stop and reset the usb controller */
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp &= ~USB_CMD_RUN_STOP;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp |= USB_CMD_CTRL_RESET;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+
+ /* Wait for reset to complete */
+ timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
+ while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
+ if (time_after(jiffies, timeout)) {
+ ERR("udc reset timeout!\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
static int dr_controller_setup(struct fsl_udc *udc)
{
unsigned int tmp, portctrl, ep_num;
unsigned int max_no_of_ep;
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
unsigned int ctrl;
#endif
+#ifdef CONFIG_ARCH_TEGRA
unsigned long timeout;
-#define FSL_UDC_RESET_TIMEOUT 1000
+#endif
+ int status;
/* Config PHY interface */
- portctrl = fsl_readl(&dr_regs->portsc1);
+ portctrl = fsl_readl(control_reg);
portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
switch (udc->phy_mode) {
case FSL_USB2_PHY_ULPI:
@@ -270,26 +352,11 @@ static int dr_controller_setup(struct fsl_udc *udc)
default:
return -EINVAL;
}
- fsl_writel(portctrl, &dr_regs->portsc1);
-
- /* Stop and reset the usb controller */
- tmp = fsl_readl(&dr_regs->usbcmd);
- tmp &= ~USB_CMD_RUN_STOP;
- fsl_writel(tmp, &dr_regs->usbcmd);
-
- tmp = fsl_readl(&dr_regs->usbcmd);
- tmp |= USB_CMD_CTRL_RESET;
- fsl_writel(tmp, &dr_regs->usbcmd);
+ fsl_writel(portctrl, control_reg);
- /* Wait for reset to complete */
- timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
- while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
- if (time_after(jiffies, timeout)) {
- ERR("udc reset timeout!\n");
- return -ETIMEDOUT;
- }
- cpu_relax();
- }
+ status = dr_controller_reset(udc);
+ if (status)
+ return status;
/* Set the controller as device mode */
tmp = fsl_readl(&dr_regs->usbmode);
@@ -301,6 +368,19 @@ static int dr_controller_setup(struct fsl_udc *udc)
tmp |= USB_MODE_ES;
fsl_writel(tmp, &dr_regs->usbmode);
+#ifdef CONFIG_ARCH_TEGRA
+ /* Wait for controller to switch to device mode */
+ timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
+ while ((fsl_readl(&dr_regs->usbmode) & USB_MODE_CTRL_MODE_DEVICE) !=
+ USB_MODE_CTRL_MODE_DEVICE) {
+ if (time_after(jiffies, timeout)) {
+ ERR("udc device mode setup timeout!\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+#endif
+
/* Clear the setup status */
fsl_writel(0, &dr_regs->usbsts);
@@ -321,7 +401,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
fsl_writel(tmp, &dr_regs->endptctrl[ep_num]);
}
/* Config control enable i/o output, cpu endian register */
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
if (udc->pdata->have_sysif_regs) {
ctrl = __raw_readl(&usb_sys_regs->control);
ctrl |= USB_CTRL_IOENB;
@@ -349,7 +429,32 @@ static int dr_controller_setup(struct fsl_udc *udc)
static void dr_controller_run(struct fsl_udc *udc)
{
u32 temp;
+#ifdef CONFIG_ARCH_TEGRA
+ unsigned long timeout;
+#define FSL_UDC_RUN_TIMEOUT 1000
+#endif
+ /* Clear stopped bit */
+ udc->stopped = 0;
+/* If OTG transceiver is available, then it handles the VBUS detection */
+ if (!udc_controller->transceiver) {
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ /* Enable cable detection interrupt, without setting the
+ * USB_SYS_VBUS_WAKEUP_INT bit. USB_SYS_VBUS_WAKEUP_INT is
+ * clear on write */
+ temp = fsl_readl(&usb_sys_regs->vbus_wakeup);
+ temp |= (USB_SYS_VBUS_WAKEUP_INT_ENABLE | USB_SYS_VBUS_WAKEUP_ENABLE);
+ temp &= ~USB_SYS_VBUS_WAKEUP_INT_STATUS;
+ fsl_writel(temp, &usb_sys_regs->vbus_wakeup);
+#else
+ /*On FPGA VBUS is detected through VBUS A Session instead of VBUS
+ * status. */
+ temp = fsl_readl(&usb_sys_regs->vbus_sensors);
+ temp |= USB_SYS_VBUS_ASESSION_INT_EN;
+ temp &= ~USB_SYS_VBUS_ASESSION_CHANGED;
+ fsl_writel(temp, &usb_sys_regs->vbus_sensors);
+#endif
+ }
/* Enable DR irq reg */
temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
| USB_INTR_PTC_DETECT_EN | USB_INTR_RESET_EN
@@ -357,9 +462,6 @@ static void dr_controller_run(struct fsl_udc *udc)
fsl_writel(temp, &dr_regs->usbintr);
- /* Clear stopped bit */
- udc->stopped = 0;
-
/* Set the controller as device mode */
temp = fsl_readl(&dr_regs->usbmode);
temp |= USB_MODE_CTRL_MODE_DEVICE;
@@ -369,24 +471,30 @@ static void dr_controller_run(struct fsl_udc *udc)
temp = fsl_readl(&dr_regs->usbcmd);
temp |= USB_CMD_RUN_STOP;
fsl_writel(temp, &dr_regs->usbcmd);
+
+#ifdef CONFIG_ARCH_TEGRA
+ /* Wait for controller to start */
+ timeout = jiffies + FSL_UDC_RUN_TIMEOUT;
+ while ((fsl_readl(&dr_regs->usbcmd) & USB_CMD_RUN_STOP) !=
+ USB_CMD_RUN_STOP) {
+ if (time_after(jiffies, timeout)) {
+ ERR("udc start timeout!\n");
+ return;
+ }
+ cpu_relax();
+ }
+#endif
+
+ return;
}
static void dr_controller_stop(struct fsl_udc *udc)
{
unsigned int tmp;
- pr_debug("%s\n", __func__);
-
- /* if we're in OTG mode, and the Host is currently using the port,
- * stop now and don't rip the controller out from under the
- * ehci driver
- */
- if (udc->gadget.is_otg) {
- if (!(fsl_readl(&dr_regs->otgsc) & OTGSC_STS_USB_ID)) {
- pr_debug("udc: Leaving early\n");
- return;
- }
- }
+ /* Clear pending interrupt status bits */
+ tmp = fsl_readl(&dr_regs->usbsts);
+ fsl_writel(tmp, &dr_regs->usbsts);
/* disable all INTR */
fsl_writel(0, &dr_regs->usbintr);
@@ -517,9 +625,9 @@ static void ep0_setup(struct fsl_udc *udc)
/* the intialization of an ep includes: fields in QH, Regs,
* fsl_ep struct */
struct_ep_qh_setup(udc, 0, USB_RECV, USB_ENDPOINT_XFER_CONTROL,
- USB_MAX_CTRL_PAYLOAD, 0, 0);
+ USB_MAX_CTRL_PAYLOAD, 1, 0);
struct_ep_qh_setup(udc, 0, USB_SEND, USB_ENDPOINT_XFER_CONTROL,
- USB_MAX_CTRL_PAYLOAD, 0, 0);
+ USB_MAX_CTRL_PAYLOAD, 1, 0);
dr_ep_setup(0, USB_RECV, USB_ENDPOINT_XFER_CONTROL);
dr_ep_setup(0, USB_SEND, USB_ENDPOINT_XFER_CONTROL);
@@ -642,15 +750,21 @@ static int fsl_ep_disable(struct usb_ep *_ep)
/* disable ep on controller */
ep_num = ep_index(ep);
- epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
- if (ep_is_in(ep)) {
- epctrl &= ~(EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE);
- epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_TX_EP_TYPE_SHIFT;
- } else {
- epctrl &= ~(EPCTRL_RX_ENABLE | EPCTRL_TX_TYPE);
- epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_RX_EP_TYPE_SHIFT;
+#if defined(CONFIG_ARCH_TEGRA)
+ /* Touch the registers if cable is connected and phy is on */
+ if (vbus_enabled())
+#endif
+ {
+ epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ if (ep_is_in(ep)) {
+ epctrl &= ~(EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE);
+ epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_TX_EP_TYPE_SHIFT;
+ } else {
+ epctrl &= ~(EPCTRL_RX_ENABLE | EPCTRL_TX_TYPE);
+ epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_RX_EP_TYPE_SHIFT;
+ }
+ fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
}
- fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
udc = (struct fsl_udc *)ep->udc;
spin_lock_irqsave(&udc->lock, flags);
@@ -710,6 +824,9 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
? (1 << (ep_index(ep) + 16))
: (1 << (ep_index(ep)));
+ /* Flush all the dTD structs out to memory */
+ wmb();
+
/* check if the pipe is empty */
if (!(list_empty(&ep->queue))) {
/* Add td to the end */
@@ -717,6 +834,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
lastreq->tail->next_td_ptr =
cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
+ wmb();
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
goto out;
@@ -767,7 +885,7 @@ out:
* @is_last: return flag if it is the last dTD of the request
* return: pointer to the built dTD */
static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
- dma_addr_t *dma, int *is_last)
+ dma_addr_t *dma, int *is_last, gfp_t gfp_flags)
{
u32 swap_temp;
struct ep_td_struct *dtd;
@@ -776,7 +894,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
*length = min(req->req.length - req->req.actual,
(unsigned)EP_MAX_LENGTH_TRANSFER);
- dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma);
+ dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma);
if (dtd == NULL)
return dtd;
@@ -826,7 +944,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
}
/* Generate dtd chain for a request */
-static int fsl_req_to_dtd(struct fsl_req *req)
+static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
{
unsigned count;
int is_last;
@@ -835,7 +953,7 @@ static int fsl_req_to_dtd(struct fsl_req *req)
dma_addr_t dma;
do {
- dtd = fsl_build_dtd(req, &count, &dma, &is_last);
+ dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
if (dtd == NULL)
return -ENOMEM;
@@ -864,8 +982,10 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = container_of(_req, struct fsl_req, req);
- struct fsl_udc *udc;
+ struct fsl_udc *udc = ep->udc;
unsigned long flags;
+ enum dma_data_direction dir;
+ int status;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
@@ -873,16 +993,26 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
VDBG("%s, bad params", __func__);
return -EINVAL;
}
- if (unlikely(!_ep || !ep->desc)) {
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (unlikely(!ep->desc)) {
VDBG("%s, bad ep", __func__);
+ spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
+
if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
- if (req->req.length > ep->ep.maxpacket)
+ if (req->req.length > ep->ep.maxpacket) {
+ spin_unlock_irqrestore(&udc->lock, flags);
return -EMSGSIZE;
+ }
}
- udc = ep->udc;
+ dir = ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
@@ -890,18 +1020,12 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
/* map virtual address to hardware */
if (req->req.dma == DMA_ADDR_INVALID) {
- req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
- req->req.buf,
- req->req.length, ep_is_in(ep)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
+ req->req.dma = dma_map_single(udc->gadget.dev.parent,
+ req->req.buf, req->req.length, dir);
req->mapped = 1;
} else {
- dma_sync_single_for_device(ep->udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- ep_is_in(ep)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
+ dma_sync_single_for_device(udc->gadget.dev.parent,
+ req->req.dma, req->req.length, dir);
req->mapped = 0;
}
@@ -909,16 +1033,23 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
req->req.actual = 0;
req->dtd_count = 0;
- spin_lock_irqsave(&udc->lock, flags);
/* build dtds and push them to device queue */
- if (!fsl_req_to_dtd(req)) {
- fsl_queue_td(ep, req);
- } else {
+ status = fsl_req_to_dtd(req, gfp_flags);
+ if (status)
+ goto err_unmap;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* re-check if the ep has not been disabled */
+ if (unlikely(!ep->desc)) {
spin_unlock_irqrestore(&udc->lock, flags);
- return -ENOMEM;
+ status = -EINVAL;
+ goto err_unmap;
}
+ fsl_queue_td(ep, req);
+
/* Update ep0 state */
if ((ep_index(ep) == 0))
udc->ep0_state = DATA_STATE_XMIT;
@@ -929,6 +1060,15 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
+
+err_unmap:
+ if (req->mapped) {
+ dma_unmap_single(udc->gadget.dev.parent,
+ req->req.dma, req->req.length, dir);
+ req->req.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ }
+ return status;
}
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
@@ -949,12 +1089,19 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* Stop the ep before we deal with the queue */
ep->stopped = 1;
ep_num = ep_index(ep);
- epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
- if (ep_is_in(ep))
- epctrl &= ~EPCTRL_TX_ENABLE;
- else
- epctrl &= ~EPCTRL_RX_ENABLE;
- fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+
+#if defined(CONFIG_ARCH_TEGRA)
+ /* Touch the registers if cable is connected and phy is on */
+ if(vbus_enabled())
+#endif
+ {
+ epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ if (ep_is_in(ep))
+ epctrl &= ~EPCTRL_TX_ENABLE;
+ else
+ epctrl &= ~EPCTRL_RX_ENABLE;
+ fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+ }
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
@@ -997,12 +1144,19 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
done(ep, req, -ECONNRESET);
/* Enable EP */
-out: epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
- if (ep_is_in(ep))
- epctrl |= EPCTRL_TX_ENABLE;
- else
- epctrl |= EPCTRL_RX_ENABLE;
- fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+out:
+#if defined(CONFIG_ARCH_TEGRA)
+ /* Touch the registers if cable is connected and phy is on */
+ if(vbus_enabled())
+#endif
+ {
+ epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ if (ep_is_in(ep))
+ epctrl |= EPCTRL_TX_ENABLE;
+ else
+ epctrl |= EPCTRL_RX_ENABLE;
+ fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+ }
ep->stopped = stopped;
spin_unlock_irqrestore(&ep->udc->lock, flags);
@@ -1100,6 +1254,12 @@ static void fsl_ep_fifo_flush(struct usb_ep *_ep)
unsigned long timeout;
#define FSL_UDC_FLUSH_TIMEOUT 1000
+#if defined(CONFIG_ARCH_TEGRA)
+ /* Touch the registers if cable is connected and phy is on */
+ if (!vbus_enabled())
+ return;
+#endif
+
if (!_ep) {
return;
} else {
@@ -1163,6 +1323,7 @@ static int fsl_get_frame(struct usb_gadget *gadget)
/*-----------------------------------------------------------------------
* Tries to wake up the host connected to this gadget
-----------------------------------------------------------------------*/
+#ifndef CONFIG_USB_ANDROID
static int fsl_wakeup(struct usb_gadget *gadget)
{
struct fsl_udc *udc = container_of(gadget, struct fsl_udc, gadget);
@@ -1181,6 +1342,7 @@ static int fsl_wakeup(struct usb_gadget *gadget)
fsl_writel(portsc, &dr_regs->portsc1);
return 0;
}
+#endif
static int can_pullup(struct fsl_udc *udc)
{
@@ -1195,8 +1357,55 @@ static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
unsigned long flags;
udc = container_of(gadget, struct fsl_udc, gadget);
- spin_lock_irqsave(&udc->lock, flags);
+
VDBG("VBUS %s", is_active ? "on" : "off");
+
+ if (udc->transceiver) {
+ if (udc->vbus_active && !is_active) {
+ /* If cable disconnected, cancel any delayed work */
+ cancel_delayed_work(&udc->work);
+ spin_lock_irqsave(&udc->lock, flags);
+ /* reset all internal Queues and inform client driver */
+ reset_queues(udc);
+ /* stop the controller and turn off the clocks */
+ dr_controller_stop(udc);
+ dr_controller_reset(udc);
+ udc->vbus_active = 0;
+ udc->usb_state = USB_STATE_DEFAULT;
+ spin_unlock_irqrestore(&udc->lock, flags);
+ fsl_udc_clk_suspend(false);
+ if (udc->vbus_regulator) {
+ /* set the current limit to 0mA */
+ regulator_set_current_limit(
+ udc->vbus_regulator, 0, 0);
+ }
+ } else if (!udc->vbus_active && is_active) {
+ fsl_udc_clk_resume(false);
+ /* setup the controller in the device mode */
+ dr_controller_setup(udc);
+ /* setup EP0 for setup packet */
+ ep0_setup(udc);
+ /* initialize the USB and EP states */
+ udc->usb_state = USB_STATE_ATTACHED;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+ udc->vbus_active = 1;
+ /* start the controller */
+ dr_controller_run(udc);
+ if (udc->vbus_regulator) {
+ /* set the current limit to 100mA */
+ regulator_set_current_limit(
+ udc->vbus_regulator, 0, 100);
+ }
+ /* Schedule work to wait for 1000 msec and check for
+ * charger if setup packet is not received */
+ schedule_delayed_work(&udc->work,
+ USB_CHARGER_DETECTION_WAIT_TIME_MS);
+ }
+ return 0;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -1220,6 +1429,12 @@ static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
struct fsl_udc *udc;
udc = container_of(gadget, struct fsl_udc, gadget);
+ /* check udc regulator is available for drawing the vbus current */
+ if (udc->vbus_regulator) {
+ udc->current_limit = mA;
+ schedule_work(&udc->charger_work);
+ }
+
if (udc->transceiver)
return otg_set_power(udc->transceiver, mA);
return -ENOTSUPP;
@@ -1234,13 +1449,16 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
udc = container_of(gadget, struct fsl_udc, gadget);
udc->softconnect = (is_on != 0);
- if (can_pullup(udc))
- fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
- &dr_regs->usbcmd);
- else
- fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
- &dr_regs->usbcmd);
-
+ if (udc_controller->transceiver) {
+ if (udc_controller->transceiver->state == OTG_STATE_B_PERIPHERAL) {
+ if (can_pullup(udc))
+ fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
+ &dr_regs->usbcmd);
+ else
+ fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
+ &dr_regs->usbcmd);
+ }
+ }
return 0;
}
@@ -1250,7 +1468,9 @@ static int fsl_stop(struct usb_gadget_driver *driver);
/* defined in gadget.h */
static struct usb_gadget_ops fsl_gadget_ops = {
.get_frame = fsl_get_frame,
+#ifndef CONFIG_USB_ANDROID
.wakeup = fsl_wakeup,
+#endif
/* .set_selfpowered = fsl_set_selfpowered, */ /* Always selfpowered */
.vbus_session = fsl_vbus_session,
.vbus_draw = fsl_vbus_draw,
@@ -1299,7 +1519,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->mapped = 1;
- if (fsl_req_to_dtd(req) == 0)
+ if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
fsl_queue_td(ep, req);
else
return -ENOMEM;
@@ -1377,13 +1597,25 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
req->req.complete = NULL;
req->dtd_count = 0;
- req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
- req->req.buf, req->req.length,
- ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- req->mapped = 1;
+ /* map virtual address to hardware */
+ if (req->req.dma == DMA_ADDR_INVALID) {
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf,
+ req->req.length, ep_is_in(ep)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ req->mapped = 1;
+ } else {
+ dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+ req->req.dma, req->req.length,
+ ep_is_in(ep)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ req->mapped = 0;
+ }
/* prime the data phase */
- if ((fsl_req_to_dtd(req) == 0))
+ if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
fsl_queue_td(ep, req);
else /* no mem */
goto stall;
@@ -1395,6 +1627,107 @@ stall:
ep0stall(udc);
}
+static void udc_test_mode(struct fsl_udc *udc, u32 test_mode)
+{
+ struct fsl_req *req;
+ struct fsl_ep *ep;
+ u32 portsc, bitmask;
+ unsigned long timeout;
+
+ /* Ack the ep0 IN */
+ if (ep0_prime_status(udc, EP_DIR_IN))
+ ep0stall(udc);
+
+ /* get the ep0 */
+ ep = &udc->eps[0];
+ bitmask = ep_is_in(ep)
+ ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep)));
+
+ timeout = jiffies + HZ;
+ /* Wait until ep0 IN endpoint txfr is complete */
+ while (!(fsl_readl(&dr_regs->endptcomplete) & bitmask)) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("Timeout for Ep0 IN Ack\n");
+ break;
+ }
+ cpu_relax();
+ }
+
+ switch (test_mode << PORTSCX_PTC_BIT_POS) {
+ case PORTSCX_PTC_JSTATE:
+ VDBG("TEST_J\n");
+ break;
+ case PORTSCX_PTC_KSTATE:
+ VDBG("TEST_K\n");
+ break;
+ case PORTSCX_PTC_SEQNAK:
+ VDBG("TEST_SE0_NAK\n");
+ break;
+ case PORTSCX_PTC_PACKET:
+ VDBG("TEST_PACKET\n");
+
+ /* get the ep and configure for IN direction */
+ ep = &udc->eps[0];
+ udc->ep0_dir = USB_DIR_IN;
+
+ /* Initialize ep0 status request structure */
+ req = container_of(fsl_alloc_request(NULL, GFP_ATOMIC),
+ struct fsl_req, req);
+ /* allocate a small amount of memory to get valid address */
+ req->req.buf = kmalloc(sizeof(fsl_udc_test_packet), GFP_ATOMIC);
+ req->req.dma = virt_to_phys(req->req.buf);
+
+ /* Fill in the reqest structure */
+ memcpy(req->req.buf, fsl_udc_test_packet, sizeof(fsl_udc_test_packet));
+ req->ep = ep;
+ req->req.length = sizeof(fsl_udc_test_packet);
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->req.complete = NULL;
+ req->dtd_count = 0;
+ req->mapped = 0;
+
+ dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+ req->req.dma, req->req.length,
+ ep_is_in(ep)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ /* prime the data phase */
+ if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
+ fsl_queue_td(ep, req);
+ else /* no mem */
+ goto stall;
+
+ list_add_tail(&req->queue, &ep->queue);
+ udc->ep0_state = DATA_STATE_XMIT;
+ break;
+ case PORTSCX_PTC_FORCE_EN:
+ VDBG("TEST_FORCE_EN\n");
+ break;
+ default:
+ ERR("udc unknown test mode[%d]!\n", test_mode);
+ goto stall;
+ }
+
+ /* read the portsc register */
+ portsc = fsl_readl(&dr_regs->portsc1);
+ /* set the test mode selector */
+ portsc |= test_mode << PORTSCX_PTC_BIT_POS;
+ fsl_writel(portsc, &dr_regs->portsc1);
+
+ /*
+ * The device must have its power cycled to exit test mode.
+ * See USB 2.0 spec, section 9.4.9 for test modes operation in "Set Feature"
+ * See USB 2.0 spec, section 7.1.20 for test modes.
+ */
+ pr_info("udc entering the test mode, power cycle to exit test mode\n");
+ return;
+stall:
+ ep0stall(udc);
+}
+
static void setup_received_irq(struct fsl_udc *udc,
struct usb_ctrlrequest *setup)
{
@@ -1419,6 +1752,11 @@ static void setup_received_irq(struct fsl_udc *udc,
if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
| USB_RECIP_DEVICE))
break;
+#ifdef CONFIG_ARCH_TEGRA
+ /* This delay is necessary for some windows drivers to
+ * properly recognize the device */
+ mdelay(1);
+#endif
ch9setaddress(udc, wValue, wIndex, wLength);
return;
@@ -1429,7 +1767,17 @@ static void setup_received_irq(struct fsl_udc *udc,
int rc = -EOPNOTSUPP;
u16 ptc = 0;
- if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+ if (setup->bRequestType == USB_RECIP_DEVICE &&
+ wValue == USB_DEVICE_TEST_MODE) {
+ /*
+ * If the feature selector is TEST_MODE, then the most
+ * significant byte of wIndex is used to specify the specific
+ * test mode and the lower byte of wIndex must be zero.
+ */
+ udc_test_mode(udc, wIndex >> 8);
+ return;
+
+ } else if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
== (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
int pipe = get_pipe_by_windex(wIndex);
struct fsl_ep *ep;
@@ -1492,7 +1840,7 @@ static void setup_received_irq(struct fsl_udc *udc,
udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
? USB_DIR_IN : USB_DIR_OUT;
spin_unlock(&udc->lock);
- if (udc->driver->setup(&udc->gadget,
+ if (udc->driver && udc->driver->setup(&udc->gadget,
&udc->local_setup_buff) < 0)
ep0stall(udc);
spin_lock(&udc->lock);
@@ -1502,7 +1850,7 @@ static void setup_received_irq(struct fsl_udc *udc,
/* No data phase, IN status from gadget */
udc->ep0_dir = USB_DIR_IN;
spin_unlock(&udc->lock);
- if (udc->driver->setup(&udc->gadget,
+ if (udc->driver && udc->driver->setup(&udc->gadget,
&udc->local_setup_buff) < 0)
ep0stall(udc);
spin_lock(&udc->lock);
@@ -1677,7 +2025,12 @@ static void dtd_complete_irq(struct fsl_udc *udc)
if (!bit_pos)
return;
+#ifdef CONFIG_ARCH_TEGRA
+ /* XXX what's going on here */
+ for (i = 0; i < udc->max_ep; i++) {
+#else
for (i = 0; i < udc->max_ep * 2; i++) {
+#endif
ep_num = i >> 1;
direction = i % 2;
@@ -1720,13 +2073,10 @@ static void port_change_irq(struct fsl_udc *udc)
{
u32 speed;
- if (udc->bus_reset)
- udc->bus_reset = 0;
-
/* Bus resetting is finished */
if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET)) {
/* Get the speed */
- speed = (fsl_readl(&dr_regs->portsc1)
+ speed = (fsl_readl(control_reg)
& PORTSCX_PORT_SPEED_MASK);
switch (speed) {
case PORTSCX_PORT_SPEED_HIGH:
@@ -1756,7 +2106,7 @@ static void suspend_irq(struct fsl_udc *udc)
udc->usb_state = USB_STATE_SUSPENDED;
/* report suspend to the driver, serial.c does not support this */
- if (udc->driver->suspend)
+ if (udc->driver && udc->driver->suspend)
udc->driver->suspend(&udc->gadget);
}
@@ -1766,7 +2116,7 @@ static void bus_resume(struct fsl_udc *udc)
udc->resume_state = 0;
/* report resume to the driver, serial.c does not support this */
- if (udc->driver->resume)
+ if (udc->driver && udc->driver->resume)
udc->driver->resume(&udc->gadget);
}
@@ -1780,7 +2130,8 @@ static int reset_queues(struct fsl_udc *udc)
/* report disconnect; the driver is already quiesced */
spin_unlock(&udc->lock);
- udc->driver->disconnect(&udc->gadget);
+ if (udc->driver && udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
return 0;
@@ -1828,10 +2179,17 @@ static void reset_irq(struct fsl_udc *udc)
/* Write 1s to the flush register */
fsl_writel(0xffffffff, &dr_regs->endptflush);
+#if defined(CONFIG_ARCH_TEGRA)
+ /* When the bus reset is seen on Tegra, the PORTSCX_PORT_RESET bit
+ * is not set */
+ VDBG("Bus reset");
+ /* Reset all the queues, include XD, dTD, EP queue
+ * head and TR Queue */
+ reset_queues(udc);
+ udc->usb_state = USB_STATE_DEFAULT;
+#else
if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
VDBG("Bus reset");
- /* Bus is reseting */
- udc->bus_reset = 1;
/* Reset all the queues, include XD, dTD, EP queue
* head and TR Queue */
reset_queues(udc);
@@ -1851,9 +2209,64 @@ static void reset_irq(struct fsl_udc *udc)
dr_controller_run(udc);
udc->usb_state = USB_STATE_ATTACHED;
}
+#endif
+}
+
+static void fsl_udc_set_current_limit_work(struct work_struct* work)
+{
+ struct fsl_udc *udc = container_of (work, struct fsl_udc, charger_work);
+
+ /* check udc regulator is available for drawing vbus current*/
+ if (udc->vbus_regulator) {
+ /* set the current limit in uA */
+ regulator_set_current_limit(
+ udc->vbus_regulator, 0,
+ udc->current_limit *1000);
+ }
}
/*
+ * If VBUS is detected and setup packet is not received in 100ms then
+ * work thread starts and checks for the USB charger detection.
+ */
+static void fsl_udc_charger_detect_work(struct work_struct* work)
+{
+ struct fsl_udc *udc = container_of (work, struct fsl_udc, work.work);
+
+ /* check for the platform charger detection */
+ if (fsl_udc_charger_detect()) {
+ printk(KERN_INFO "USB compliant charger detected\n");
+ /* check udc regulator is available for drawing vbus current*/
+ if (udc->vbus_regulator) {
+ /* set the current limit in uA */
+ regulator_set_current_limit(
+ udc->vbus_regulator, 0,
+ USB_CHARGING_CURRENT_LIMIT_MA*1000);
+ }
+ }
+}
+
+#if defined(CONFIG_ARCH_TEGRA)
+/*
+ * Restart device controller in the OTG mode on VBUS detection
+ */
+static void fsl_udc_restart(struct fsl_udc *udc)
+{
+ /* setup the controller in the device mode */
+ dr_controller_setup(udc);
+ /* setup EP0 for setup packet */
+ ep0_setup(udc);
+ /* start the controller */
+ dr_controller_run(udc);
+ /* initialize the USB and EP states */
+ udc->usb_state = USB_STATE_ATTACHED;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+ udc->vbus_active = 1;
+}
+#endif
+
+/*
* USB device controller interrupt handler
*/
static irqreturn_t fsl_udc_irq(int irq, void *_udc)
@@ -1863,10 +2276,21 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
irqreturn_t status = IRQ_NONE;
unsigned long flags;
+ spin_lock_irqsave(&udc->lock, flags);
+
/* Disable ISR for OTG host mode */
- if (udc->stopped)
+ if (udc->stopped) {
+ spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_NONE;
- spin_lock_irqsave(&udc->lock, flags);
+ }
+#ifndef CONFIG_TEGRA_SILICON_PLATFORM
+ {
+ u32 temp = fsl_readl(&usb_sys_regs->vbus_sensors);
+ udc->vbus_active = (temp & USB_SYS_VBUS_ASESSION) ? true : false;
+ /* write back the register to clear the interrupt */
+ fsl_writel(temp, &usb_sys_regs->vbus_sensors);
+ }
+#endif
irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
/* Clear notification bits */
fsl_writel(irq_src, &dr_regs->usbsts);
@@ -1883,6 +2307,9 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
VDBG("Packet int");
/* Setup package, we only support ep0 as control ep */
if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
+ /* Setup packet received, we are connected to host and
+ * not charger. Cancel any delayed work */
+ __cancel_delayed_work(&udc->work);
tripwire_handler(udc, 0,
(u8 *) (&udc->local_setup_buff));
setup_received_irq(udc, &udc->local_setup_buff);
@@ -1909,7 +2336,6 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* Reset Received */
if (irq_src & USB_STS_RESET) {
- VDBG("reset int");
reset_irq(udc);
status = IRQ_HANDLED;
}
@@ -1967,30 +2393,14 @@ static int fsl_start(struct usb_gadget_driver *driver,
goto out;
}
- if (udc_controller->transceiver) {
- /* Suspend the controller until OTG enable it */
- udc_controller->stopped = 1;
- printk(KERN_INFO "Suspend udc for OTG auto detect\n");
-
- /* connect to bus through transceiver */
- if (udc_controller->transceiver) {
- retval = otg_set_peripheral(udc_controller->transceiver,
- &udc_controller->gadget);
- if (retval < 0) {
- ERR("can't bind to transceiver\n");
- driver->unbind(&udc_controller->gadget);
- udc_controller->gadget.dev.driver = 0;
- udc_controller->driver = 0;
- return retval;
- }
- }
- } else {
- /* Enable DR IRQ reg and set USBCMD reg Run bit */
+ /* Enable DR IRQ reg and Set usbcmd reg Run bit */
+ if (!udc_controller->transceiver) {
dr_controller_run(udc_controller);
udc_controller->usb_state = USB_STATE_ATTACHED;
udc_controller->ep0_state = WAIT_FOR_SETUP;
udc_controller->ep0_dir = 0;
}
+
printk(KERN_INFO "%s: bind to driver %s\n",
udc_controller->gadget.name, driver->driver.name);
@@ -2013,9 +2423,6 @@ static int fsl_stop(struct usb_gadget_driver *driver)
if (!driver || driver != udc_controller->driver || !driver->unbind)
return -EINVAL;
- if (udc_controller->transceiver)
- otg_set_peripheral(udc_controller->transceiver, NULL);
-
/* stop DR, disable intr */
dr_controller_stop(udc_controller);
@@ -2053,7 +2460,11 @@ static int fsl_stop(struct usb_gadget_driver *driver)
#include <linux/seq_file.h>
+#ifdef CONFIG_ARCH_TEGRA
+static const char proc_filename[] = "driver/fsl_tegra_udc";
+#else
static const char proc_filename[] = "driver/fsl_usb2_udc";
+#endif
static int fsl_proc_read(char *page, char **start, off_t off, int count,
int *eof, void *_dev)
@@ -2064,6 +2475,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
unsigned long flags;
int t, i;
u32 tmp_reg;
+ u32 tmp_reg2;
struct fsl_ep *ep = NULL;
struct fsl_req *req;
@@ -2147,6 +2559,13 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
next += t;
tmp_reg = fsl_readl(&dr_regs->portsc1);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ tmp_reg2 = tmp_reg;
+#else
+ /* In Tegra3 the Phy Type Select(PTS) and Port Speed fields are specified in
+ * hostpc1devlc register instead of portsc1 register. */
+ tmp_reg2 = fsl_readl(&dr_regs->hostpc1devlc);
+#endif
t = scnprintf(next, size,
"USB Port Status&Control Reg:\n"
"Port Transceiver Type : %s Port Speed: %s\n"
@@ -2157,7 +2576,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
"Port Enabled/Disabled: %s "
"Current Connect Status: %s\n\n", ( {
char *s;
- switch (tmp_reg & PORTSCX_PTS_FSLS) {
+ switch (tmp_reg2 & PORTSCX_PTS_FSLS) {
case PORTSCX_PTS_UTMI:
s = "UTMI"; break;
case PORTSCX_PTS_ULPI:
@@ -2169,7 +2588,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
}
s;} ), ( {
char *s;
- switch (tmp_reg & PORTSCX_PORT_SPEED_UNDEF) {
+ switch (tmp_reg2 & PORTSCX_PORT_SPEED_UNDEF) {
case PORTSCX_PORT_SPEED_FULL:
s = "Full Speed"; break;
case PORTSCX_PORT_SPEED_LOW:
@@ -2235,7 +2654,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
size -= t;
next += t;
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
if (udc->pdata->have_sysif_regs) {
tmp_reg = usb_sys_regs->snoop1;
t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
@@ -2352,6 +2771,13 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
return -1;
}
+#ifdef CONFIG_ARCH_TEGRA
+ /* Tegra uses hardware queue heads */
+ size = udc->max_ep * sizeof(struct ep_queue_head);
+ udc->ep_qh = (struct ep_queue_head *)((u8 *)dr_regs + QH_OFFSET);
+ udc->ep_qh_dma = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start +
+ QH_OFFSET;
+#else
/* initialized QHs, take care of alignment */
size = udc->max_ep * sizeof(struct ep_queue_head);
if (size < QH_ALIGNMENT)
@@ -2367,6 +2793,7 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
kfree(udc->eps);
return -1;
}
+#endif
udc->ep_qh_size = size;
@@ -2431,6 +2858,9 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
int ret = -ENODEV;
unsigned int i;
u32 dccparams;
+#if defined(CONFIG_ARCH_TEGRA)
+ struct resource *res_sys = NULL;
+#endif
if (strcmp(pdev->name, driver_name)) {
VDBG("Wrong device");
@@ -2448,30 +2878,17 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
spin_lock_init(&udc_controller->lock);
udc_controller->stopped = 1;
-#ifdef CONFIG_USB_OTG
- if (pdata->operating_mode == FSL_USB2_DR_OTG) {
- udc_controller->transceiver = otg_get_transceiver();
- if (!udc_controller->transceiver) {
- ERR("Can't find OTG driver!\n");
- ret = -ENODEV;
- goto err_kfree;
- }
- }
-#endif
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENXIO;
goto err_kfree;
}
- if (pdata->operating_mode == FSL_USB2_DR_DEVICE) {
- if (!request_mem_region(res->start, resource_size(res),
- driver_name)) {
- ERR("request mem region for %s failed\n", pdev->name);
- ret = -EBUSY;
- goto err_kfree;
- }
+ if (!request_mem_region(res->start, resource_size(res),
+ driver_name)) {
+ ERR("request mem region for %s failed\n", pdev->name);
+ ret = -EBUSY;
+ goto err_kfree;
}
dr_regs = ioremap(res->start, resource_size(res));
@@ -2493,7 +2910,26 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
/* Set accessors only after pdata->init() ! */
fsl_set_accessors(pdata);
-#ifndef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_TEGRA)
+ /* If the PHY registers are NOT provided as a seperate aperture, then
+ * we should be using the registers inside the controller aperture. */
+ res_sys = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_sys) {
+ usb_sys_regs = ioremap(res_sys->start, resource_size(res_sys));
+ if (!usb_sys_regs)
+ goto err_release_mem_region;
+ } else {
+ usb_sys_regs = (struct usb_sys_interface *)
+ ((u32)dr_regs + USB_DR_SYS_OFFSET);
+ }
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ control_reg = &dr_regs->portsc1;
+#else
+ control_reg = &dr_regs->hostpc1devlc;
+#endif
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
if (pdata->have_sysif_regs)
usb_sys_regs = (struct usb_sys_interface *)
((u32)dr_regs + USB_DR_SYS_OFFSET);
@@ -2536,11 +2972,9 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
goto err_free_irq;
}
- if (!udc_controller->transceiver) {
- /* initialize usb hw reg except for regs for EP,
- * leave usbintr reg untouched */
- dr_controller_setup(udc_controller);
- }
+ /* initialize usb hw reg except for regs for EP,
+ * leave usbintr reg untouched */
+ dr_controller_setup(udc_controller);
fsl_udc_clk_finalize(pdev);
@@ -2560,9 +2994,6 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_free_irq;
- if (udc_controller->transceiver)
- udc_controller->gadget.is_otg = 1;
-
/* setup QH and epctrl for ep0 */
ep0_setup(udc_controller);
@@ -2599,6 +3030,38 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
goto err_del_udc;
create_proc_file();
+
+ /* create a delayed work for detecting the USB charger */
+ INIT_DELAYED_WORK(&udc_controller->work, fsl_udc_charger_detect_work);
+ INIT_WORK(&udc_controller->charger_work, fsl_udc_set_current_limit_work);
+
+ /* Get the regulator for drawing the vbus current in udc driver */
+ udc_controller->vbus_regulator = regulator_get(NULL, "usb_bat_chg");
+ if (IS_ERR(udc_controller->vbus_regulator)) {
+ dev_err(&pdev->dev,
+ "can't get charge regulator,err:%ld\n",
+ PTR_ERR(udc_controller->vbus_regulator));
+ udc_controller->vbus_regulator = NULL;
+ }
+
+#ifdef CONFIG_USB_OTG_UTILS
+ udc_controller->transceiver = otg_get_transceiver();
+ if (udc_controller->transceiver) {
+ dr_controller_stop(udc_controller);
+ dr_controller_reset(udc_controller);
+ fsl_udc_clk_suspend(false);
+ udc_controller->vbus_active = 0;
+ udc_controller->usb_state = USB_STATE_DEFAULT;
+ otg_set_peripheral(udc_controller->transceiver, &udc_controller->gadget);
+ }
+#else
+#ifdef CONFIG_ARCH_TEGRA
+ /* Power down the phy if cable is not connected */
+ if(!vbus_enabled())
+ fsl_udc_clk_suspend(false);
+#endif
+#endif
+
return 0;
err_del_udc:
@@ -2614,8 +3077,7 @@ err_iounmap:
err_iounmap_noclk:
iounmap(dr_regs);
err_release_mem_region:
- if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
- release_mem_region(res->start, resource_size(res));
+ release_mem_region(res->start, resource_size(res));
err_kfree:
kfree(udc_controller);
udc_controller = NULL;
@@ -2638,6 +3100,13 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
usb_del_gadget_udc(&udc_controller->gadget);
udc_controller->done = &done;
+ cancel_delayed_work(&udc_controller->work);
+ if (udc_controller->vbus_regulator)
+ regulator_put(udc_controller->vbus_regulator);
+
+ if (udc_controller->transceiver)
+ otg_set_peripheral(udc_controller->transceiver, NULL);
+
fsl_udc_clk_release();
/* DR has been stopped in usb_gadget_unregister_driver() */
@@ -2651,8 +3120,7 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
dma_pool_destroy(udc_controller->td_pool);
free_irq(udc_controller->irq, udc_controller);
iounmap(dr_regs);
- if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
- release_mem_region(res->start, resource_size(res));
+ release_mem_region(res->start, resource_size(res));
device_unregister(&udc_controller->gadget.dev);
/* free udc --wait for the release() finished */
@@ -2674,8 +3142,27 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
-----------------------------------------------------------------*/
static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
{
- dr_controller_stop(udc_controller);
- return 0;
+ if (udc_controller->transceiver) {
+ if (udc_controller->transceiver->state != OTG_STATE_B_PERIPHERAL) {
+ /* we are not in device mode, return */
+ return 0;
+ }
+ }
+ if (udc_controller->vbus_active) {
+ spin_lock(&udc_controller->lock);
+ /* Reset all internal Queues and inform client driver */
+ reset_queues(udc_controller);
+ udc_controller->vbus_active = 0;
+ udc_controller->usb_state = USB_STATE_DEFAULT;
+ spin_unlock(&udc_controller->lock);
+ }
+ /* stop the controller and turn off the clocks */
+ dr_controller_stop(udc_controller);
+ if (udc_controller->transceiver) {
+ udc_controller->transceiver->state = OTG_STATE_UNDEFINED;
+ }
+ fsl_udc_clk_suspend(true);
+ return 0;
}
/*-----------------------------------------------------------------
@@ -2684,6 +3171,30 @@ static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
*-----------------------------------------------------------------*/
static int fsl_udc_resume(struct platform_device *pdev)
{
+ if (udc_controller->transceiver) {
+ fsl_udc_clk_enable();
+ if (!(fsl_readl(&usb_sys_regs->vbus_wakeup) & USB_SYS_ID_PIN_STATUS)) {
+ /* If ID status is low means host is connected, return */
+ fsl_udc_clk_disable();
+ return 0;
+ }
+ /* check for VBUS */
+ if (!(fsl_readl(&usb_sys_regs->vbus_wakeup) & USB_SYS_VBUS_STATUS)) {
+ /* if there is no VBUS then power down the clocks and return */
+ fsl_udc_clk_disable();
+ return 0;
+ } else {
+ fsl_udc_clk_disable();
+ if (udc_controller->transceiver->state == OTG_STATE_A_HOST)
+ return 0;
+ /* Detected VBUS set the transceiver state to device mode */
+ udc_controller->transceiver->state = OTG_STATE_B_PERIPHERAL;
+ }
+ }
+ fsl_udc_clk_resume(true);
+#if defined(CONFIG_ARCH_TEGRA)
+ fsl_udc_restart(udc_controller);
+#else
/* Enable DR irq reg and set controller Run */
if (udc_controller->stopped) {
dr_controller_setup(udc_controller);
@@ -2692,65 +3203,14 @@ static int fsl_udc_resume(struct platform_device *pdev)
udc_controller->usb_state = USB_STATE_ATTACHED;
udc_controller->ep0_state = WAIT_FOR_SETUP;
udc_controller->ep0_dir = 0;
- return 0;
-}
-
-static int fsl_udc_otg_suspend(struct device *dev, pm_message_t state)
-{
- struct fsl_udc *udc = udc_controller;
- u32 mode, usbcmd;
-
- mode = fsl_readl(&dr_regs->usbmode) & USB_MODE_CTRL_MODE_MASK;
-
- pr_debug("%s(): mode 0x%x stopped %d\n", __func__, mode, udc->stopped);
-
- /*
- * If the controller is already stopped, then this must be a
- * PM suspend. Remember this fact, so that we will leave the
- * controller stopped at PM resume time.
- */
- if (udc->stopped) {
- pr_debug("gadget already stopped, leaving early\n");
- udc->already_stopped = 1;
- return 0;
- }
-
- if (mode != USB_MODE_CTRL_MODE_DEVICE) {
- pr_debug("gadget not in device mode, leaving early\n");
- return 0;
- }
-
- /* stop the controller */
- usbcmd = fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP;
- fsl_writel(usbcmd, &dr_regs->usbcmd);
-
- udc->stopped = 1;
-
- pr_info("USB Gadget suspended\n");
+#endif
+ /* Power down the phy if cable is not connected */
+ if (!(fsl_readl(&usb_sys_regs->vbus_wakeup) & USB_SYS_VBUS_STATUS))
+ fsl_udc_clk_suspend(false);
return 0;
}
-static int fsl_udc_otg_resume(struct device *dev)
-{
- pr_debug("%s(): stopped %d already_stopped %d\n", __func__,
- udc_controller->stopped, udc_controller->already_stopped);
-
- /*
- * If the controller was stopped at suspend time, then
- * don't resume it now.
- */
- if (udc_controller->already_stopped) {
- udc_controller->already_stopped = 0;
- pr_debug("gadget was already stopped, leaving early\n");
- return 0;
- }
-
- pr_info("USB Gadget resume\n");
-
- return fsl_udc_resume(NULL);
-}
-
/*-------------------------------------------------------------------------
Register entry point for the peripheral controller driver
--------------------------------------------------------------------------*/
@@ -2763,9 +3223,6 @@ static struct platform_driver udc_driver = {
.driver = {
.name = (char *)driver_name,
.owner = THIS_MODULE,
- /* udc suspend/resume called from OTG driver */
- .suspend = fsl_udc_otg_suspend,
- .resume = fsl_udc_otg_resume,
},
};
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 1d51be83fda8..0386be60ff79 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -9,6 +9,7 @@
#define USB_MAX_CTRL_PAYLOAD 64
#define USB_DR_SYS_OFFSET 0x400
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
/* USB DR device mode registers (Little Endian) */
struct usb_dr_device {
/* Capability register */
@@ -82,8 +83,106 @@ struct usb_dr_host {
u32 endptcomplete; /* Endpoint Complete Register */
u32 endptctrl[6]; /* Endpoint Control Registers */
};
+#else
+/* Tegra3 support.
+ * Following changes have been done in tegra3 USB regs:
+ * 1. Registers usbcmd to portsc1 have been shifted up by 16 bytes.
+ * 2. Registers otgsc and usbmode have been shifted down by 80 bytes.
+ * 3. hostpc1devlc register has been added at offset 0x1b4(436).
+ * 4. Registers endptsetupstat to endptctrl have shifted down by 92 bytes.
+ */
+
+/* USB DR device mode registers (Little Endian) */
+struct usb_dr_device {
+ /* Capability register */
+ u8 res1[256];
+ u16 caplength; /* Capability Register Length */
+ u16 hciversion; /* Host Controller Interface Version */
+ u32 hcsparams; /* Host Controller Structual Parameters */
+ u32 hccparams; /* Host Controller Capability Parameters */
+ u8 res2[20];
+ u32 dciversion; /* Device Controller Interface Version */
+ u32 dccparams; /* Device Controller Capability Parameters */
+ u8 res3[8];
+ /* Operation register */
+ u32 usbcmd; /* USB Command Register */
+ u32 usbsts; /* USB Status Register */
+ u32 usbintr; /* USB Interrupt Enable Register */
+ u32 frindex; /* Frame Index Register */
+ u8 res4[4];
+ u32 deviceaddr; /* Device Address */
+ u32 endpointlistaddr; /* Endpoint List Address Register */
+ u8 res5[4];
+ u32 burstsize; /* Master Interface Data Burst Size Register */
+ u32 txttfilltuning; /* Transmit FIFO Tuning Controls Register */
+ u8 res6[24];
+ u32 configflag; /* Configure Flag Register */
+ u32 portsc1; /* Port 1 Status and Control Register */
+ u8 res7[60];
+ u32 hostpc1devlc; /* Usb LPM Behavior and Control Register */
+ u8 res8[60];
+ u32 otgsc; /* On-The-Go Status and Control */
+ u32 usbmode; /* USB Mode Register */
+ u8 res9[12];
+ u32 endptsetupstat; /* Endpoint Setup Status Register */
+ u32 endpointprime; /* Endpoint Initialization Register */
+ u32 endptflush; /* Endpoint Flush Register */
+ u32 endptstatus; /* Endpoint Status Register */
+ u32 endptcomplete; /* Endpoint Complete Register */
+ u32 endptctrl[6]; /* Endpoint Control Registers */
+};
+
+ /* USB DR host mode registers (Little Endian) */
+struct usb_dr_host {
+ /* Capability register */
+ u8 res1[256];
+ u16 caplength; /* Capability Register Length */
+ u16 hciversion; /* Host Controller Interface Version */
+ u32 hcsparams; /* Host Controller Structual Parameters */
+ u32 hccparams; /* Host Controller Capability Parameters */
+ u8 res2[20];
+ u32 dciversion; /* Device Controller Interface Version */
+ u32 dccparams; /* Device Controller Capability Parameters */
+ u8 res3[8];
+ /* Operation register */
+ u32 usbcmd; /* USB Command Register */
+ u32 usbsts; /* USB Status Register */
+ u32 usbintr; /* USB Interrupt Enable Register */
+ u32 frindex; /* Frame Index Register */
+ u8 res4[4];
+ u32 periodiclistbase; /* Periodic Frame List Base Address Register */
+ u32 asynclistaddr; /* Current Asynchronous List Address Register */
+ u8 res5[4];
+ u32 burstsize; /* Master Interface Data Burst Size Register */
+ u32 txttfilltuning; /* Transmit FIFO Tuning Controls Register */
+ u8 res6[24];
+ u32 configflag; /* Configure Flag Register */
+ u32 portsc1; /* Port 1 Status and Control Register */
+ u8 res7[60];
+ u32 hostpc1devlc; /* Usb LPM Behavior and Control Register */
+ u8 res8[60];
+ u32 otgsc; /* On-The-Go Status and Control */
+ u32 usbmode; /* USB Mode Register */
+ u8 res9[12];
+ u32 endptsetupstat; /* Endpoint Setup Status Register */
+ u32 endpointprime; /* Endpoint Initialization Register */
+ u32 endptflush; /* Endpoint Flush Register */
+ u32 endptstatus; /* Endpoint Status Register */
+ u32 endptcomplete; /* Endpoint Complete Register */
+ u32 endptctrl[6]; /* Endpoint Control Registers */
+};
+#endif // ifdef CONFIG_ARCH_TEGRA_2x_SOC
/* non-EHCI USB system interface registers (Big Endian) */
+#ifdef CONFIG_ARCH_TEGRA
+struct usb_sys_interface {
+ u32 suspend_ctrl;
+ u32 vbus_sensors;
+ u32 vbus_wakeup;
+ u32 vbus_alt_status;
+ u32 legacy_ctrl;
+};
+#else
struct usb_sys_interface {
u32 snoop1;
u32 snoop2;
@@ -93,6 +192,7 @@ struct usb_sys_interface {
u8 res[236];
u32 control; /* General Purpose Control Register */
};
+#endif
/* ep0 transfer state */
#define WAIT_FOR_SETUP 0
@@ -198,11 +298,59 @@ struct usb_sys_interface {
#define PORTSCX_WAKE_ON_CONNECT_DIS 0x00200000
#define PORTSCX_WAKE_ON_OVER_CURRENT 0x00400000
#define PORTSCX_PHY_LOW_POWER_SPD 0x00800000
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
#define PORTSCX_PORT_FORCE_FULL_SPEED 0x01000000
#define PORTSCX_PORT_SPEED_MASK 0x0C000000
#define PORTSCX_PORT_WIDTH 0x10000000
#define PORTSCX_PHY_TYPE_SEL 0xC0000000
+/* bit 27-26 are port speed */
+#define PORTSCX_PORT_SPEED_FULL 0x00000000
+#define PORTSCX_PORT_SPEED_LOW 0x04000000
+#define PORTSCX_PORT_SPEED_HIGH 0x08000000
+#define PORTSCX_PORT_SPEED_UNDEF 0x0C000000
+#define PORTSCX_SPEED_BIT_POS 26
+
+/* bit 28 is parallel transceiver width for UTMI interface */
+#define PORTSCX_PTW 0x10000000
+#define PORTSCX_PTW_8BIT 0x00000000
+#define PORTSCX_PTW_16BIT 0x10000000
+
+/* bit 31-30 are port transceiver select */
+#define PORTSCX_PTS_UTMI 0x00000000
+#define PORTSCX_PTS_ULPI 0x80000000
+#define PORTSCX_PTS_FSLS 0xC0000000
+#define PORTSCX_PTS_BIT_POS 30
+#else
+/* In tegra3 the following fields have moved to new HOSTPC1_DEVLC reg and
+ * their offsets have changed.
+ * Keeping the name of bit masks same as before (PORTSCX_*) to have
+ * minimum changes to code */
+#define PORTSCX_PORT_FORCE_FULL_SPEED 0x00800000
+#define PORTSCX_PORT_SPEED_MASK 0x06000000
+#define PORTSCX_PORT_WIDTH 0x08000000
+#define PORTSCX_PHY_TYPE_SEL 0xE0000000
+
+/* bit 26-25 are port speed */
+#define PORTSCX_PORT_SPEED_FULL 0x00000000
+#define PORTSCX_PORT_SPEED_LOW 0x02000000
+#define PORTSCX_PORT_SPEED_HIGH 0x04000000
+#define PORTSCX_PORT_SPEED_UNDEF 0x06000000
+#define PORTSCX_SPEED_BIT_POS 25
+
+/* bit 27 is parallel transceiver width for UTMI interface */
+#define PORTSCX_PTW 0x08000000
+#define PORTSCX_PTW_8BIT 0x00000000
+#define PORTSCX_PTW_16BIT 0x08000000
+
+/* bit 31-29 are port transceiver select */
+#define PORTSCX_PTS_UTMI 0x00000000
+#define PORTSCX_PTS_ULPI 0x40000000
+#define PORTSCX_PTS_FSLS 0x60000000
+#define PORTSCX_PTS_BIT_POS 29
+#endif
+
/* bit 11-10 are line status */
#define PORTSCX_LINE_STATUS_SE0 0x00000000
#define PORTSCX_LINE_STATUS_JSTATE 0x00000400
@@ -226,24 +374,6 @@ struct usb_sys_interface {
#define PORTSCX_PTC_FORCE_EN 0x00050000
#define PORTSCX_PTC_BIT_POS 16
-/* bit 27-26 are port speed */
-#define PORTSCX_PORT_SPEED_FULL 0x00000000
-#define PORTSCX_PORT_SPEED_LOW 0x04000000
-#define PORTSCX_PORT_SPEED_HIGH 0x08000000
-#define PORTSCX_PORT_SPEED_UNDEF 0x0C000000
-#define PORTSCX_SPEED_BIT_POS 26
-
-/* bit 28 is parallel transceiver width for UTMI interface */
-#define PORTSCX_PTW 0x10000000
-#define PORTSCX_PTW_8BIT 0x00000000
-#define PORTSCX_PTW_16BIT 0x10000000
-
-/* bit 31-30 are port transceiver select */
-#define PORTSCX_PTS_UTMI 0x00000000
-#define PORTSCX_PTS_ULPI 0x80000000
-#define PORTSCX_PTS_FSLS 0xC0000000
-#define PORTSCX_PTS_BIT_POS 30
-
/* otgsc Register Bit Masks */
#define OTGSC_CTRL_VUSB_DISCHARGE 0x00000001
#define OTGSC_CTRL_VUSB_CHARGE 0x00000002
@@ -420,12 +550,25 @@ struct ep_td_struct {
DTD_STATUS_DATA_BUFF_ERR | \
DTD_STATUS_TRANSACTION_ERR)
/* Alignment requirements; must be a power of two */
+#if defined(CONFIG_ARCH_TEGRA)
+#define DTD_ALIGNMENT 0x80
+#else
#define DTD_ALIGNMENT 0x20
+#endif
#define QH_ALIGNMENT 2048
+#define QH_OFFSET 0x1000
/* Controller dma boundary */
#define UDC_DMA_BOUNDARY 0x1000
+#define USB_SYS_VBUS_ASESSION_INT_EN 0x10000
+#define USB_SYS_VBUS_ASESSION_CHANGED 0x20000
+#define USB_SYS_VBUS_ASESSION 0x40000
+#define USB_SYS_VBUS_WAKEUP_ENABLE 0x40000000
+#define USB_SYS_VBUS_WAKEUP_INT_ENABLE 0x100
+#define USB_SYS_VBUS_WAKEUP_INT_STATUS 0x200
+#define USB_SYS_VBUS_STATUS 0x400
+#define USB_SYS_ID_PIN_STATUS (0x4)
/*-------------------------------------------------------------------------*/
/* ### driver private data
@@ -476,7 +619,6 @@ struct fsl_udc {
unsigned vbus_active:1;
unsigned stopped:1;
unsigned remote_wakeup:1;
- unsigned already_stopped:1;
unsigned big_endian_desc:1;
struct ep_queue_head *ep_qh; /* Endpoints Queue-Head */
@@ -488,13 +630,16 @@ struct fsl_udc {
dma_addr_t ep_qh_dma; /* dma address of QH */
u32 max_pipes; /* Device max pipes */
- u32 bus_reset; /* Device is bus resetting */
u32 resume_state; /* USB state to resume */
u32 usb_state; /* USB current state */
u32 ep0_state; /* Endpoint zero state */
u32 ep0_dir; /* Endpoint zero direction: can be
USB_DIR_IN or USB_DIR_OUT */
u8 device_address; /* Device USB address */
+ struct delayed_work work; /* delayed work for charger detection */
+ struct regulator *vbus_regulator; /* regulator for drawing VBUS */
+ u32 current_limit;
+ struct work_struct charger_work; /* work for settting regulator current limit */
};
/*-------------------------------------------------------------------------*/
@@ -570,10 +715,15 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
struct platform_device;
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_ARCH_TEGRA)
int fsl_udc_clk_init(struct platform_device *pdev);
void fsl_udc_clk_finalize(struct platform_device *pdev);
void fsl_udc_clk_release(void);
+void fsl_udc_clk_suspend(bool is_dpd);
+void fsl_udc_clk_resume(bool is_dpd);
+void fsl_udc_clk_enable(void);
+void fsl_udc_clk_disable(void);
+bool fsl_udc_charger_detect(void);
#else
static inline int fsl_udc_clk_init(struct platform_device *pdev)
{
@@ -585,6 +735,22 @@ static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
static inline void fsl_udc_clk_release(void)
{
}
+static inline void fsl_udc_clk_suspend(bool is_dpd)
+{
+}
+static inline void fsl_udc_clk_resume(bool is_dpd)
+{
+}
+void fsl_udc_clk_enable(void)
+{
+}
+void fsl_udc_clk_disable(void)
+{
+}
+static inline bool fsl_udc_charger_detect(void)
+{
+ return false;
+}
#endif
#endif
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index d3cdffea9c8a..6cea2e17b32b 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -159,6 +159,25 @@ static const u32 oid_supported_list[] =
#endif /* RNDIS_PM */
};
+/* HACK: copied from net/core/dev.c to replace dev_get_stats since
+ * dev_get_stats cannot be called from atomic context */
+static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats)
+{
+#if BITS_PER_LONG == 64
+ BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
+ memcpy(stats64, netdev_stats, sizeof(*stats64));
+#else
+ size_t i, n = sizeof(*stats64) / sizeof(u64);
+ const unsigned long *src = (const unsigned long *)netdev_stats;
+ u64 *dst = (u64 *)stats64;
+
+ BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
+ sizeof(*stats64) / sizeof(u64));
+ for (i = 0; i < n; i++)
+ dst[i] = src[i];
+#endif
+}
/* NDIS Functions */
static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
@@ -171,7 +190,7 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
rndis_query_cmplt_type *resp;
struct net_device *net;
struct rtnl_link_stats64 temp;
- const struct rtnl_link_stats64 *stats;
+ struct rtnl_link_stats64 *stats = &temp;
if (!r) return -ENOMEM;
resp = (rndis_query_cmplt_type *)r->buf;
@@ -194,7 +213,7 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
resp->InformationBufferOffset = cpu_to_le32(16);
net = rndis_per_dev_params[configNr].dev;
- stats = dev_get_stats(net, &temp);
+ netdev_stats_to_stats64(stats, &net->stats);
switch (OID) {
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index d3dd227a2bfc..cdd407a252d1 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -763,10 +763,16 @@ static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr,
struct rw_semaphore *filesem = dev_get_drvdata(dev);
int rc = 0;
+
+#ifndef CONFIG_USB_ANDROID_MASS_STORAGE
+ /* disabled in android because we need to allow closing the backing file
+ * if the media was removed
+ */
if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
LDBG(curlun, "eject attempt prevented\n");
return -EBUSY; /* "Door is locked" */
}
+#endif
/* Remove a trailing newline */
if (count > 0 && buf[count-1] == '\n')
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index dfed4c1d96c0..4f0f71116cfe 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -767,6 +767,26 @@ static struct device_type gadget_type = {
*/
int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
{
+ return gether_setup_name(g, ethaddr, "usb");
+}
+
+/**
+ * gether_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname)
+{
struct eth_dev *dev;
struct net_device *net;
int status;
@@ -789,7 +809,7 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
/* network device setup */
dev->net = net;
- strcpy(net->name, "usb%d");
+ snprintf(net->name, sizeof(net->name), "%s%%d", netname);
if (get_ether_addr(dev_addr, net->dev_addr))
dev_warn(&g->dev,
@@ -945,7 +965,6 @@ void gether_disconnect(struct gether *link)
struct eth_dev *dev = link->ioport;
struct usb_request *req;
- WARN_ON(!dev);
if (!dev)
return;
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index c966440ddd70..46772413f0db 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -82,6 +82,9 @@ struct gether {
/* netdev setup/teardown as directed by the gadget driver */
int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]);
void gether_cleanup(void);
+/* variant of gether_setup that allows customizing network device name */
+int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname);
/* connect/disconnect is handled by individual functions */
struct net_device *gether_connect(struct gether *);
@@ -108,12 +111,14 @@ int eem_bind_config(struct usb_configuration *c);
#ifdef USB_ETH_RNDIS
-int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer);
#else
static inline int
-rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer)
{
return 0;
}
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index a8aa46962d81..ac0ef4b7deeb 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -122,7 +122,7 @@ struct gs_port {
};
/* increase N_PORTS if you need more */
-#define N_PORTS 4
+#define N_PORTS 8
static struct portmaster {
struct mutex lock; /* protect open/close */
struct gs_port *port;
@@ -1028,7 +1028,7 @@ static const struct tty_operations gs_tty_ops = {
static struct tty_driver *gs_tty_driver;
-static int __init
+static int
gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
{
struct gs_port *port;
@@ -1074,7 +1074,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
*
* Returns negative errno or zero.
*/
-int __init gserial_setup(struct usb_gadget *g, unsigned count)
+int gserial_setup(struct usb_gadget *g, unsigned count)
{
unsigned i;
struct usb_cdc_line_coding coding;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index ab085f12d570..9179076801c7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -578,3 +578,9 @@ config USB_OCTEON_OHCI
config USB_OCTEON2_COMMON
bool
default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
+
+config USB_EHCI_ONOFF_FEATURE
+ boolean "EHCI ON/OFF Feature"
+ depends on USB && USB_EHCI_HCD
+ help
+ Enable support for turning EHCI Off and On
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 624a362f2fee..ea6d2e8f6596 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for USB Host Controller Drivers
#
+GCOV_PROFILE := y
ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 34a3140d1e5f..ec39f8b57351 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -117,9 +117,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
pdata->regs = hcd->regs;
- if (pdata->power_budget)
- hcd->power_budget = pdata->power_budget;
-
/*
* do platform specific init: check the clock, grab/config pins, etc.
*/
@@ -137,30 +134,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
goto err4;
-
-#ifdef CONFIG_USB_OTG
- if (pdata->operating_mode == FSL_USB2_DR_OTG) {
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- ehci->transceiver = otg_get_transceiver();
- dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, transceiver=0x%p\n",
- hcd, ehci, ehci->transceiver);
-
- if (ehci->transceiver) {
- retval = otg_set_host(ehci->transceiver,
- &ehci_to_hcd(ehci)->self);
- if (retval) {
- if (ehci->transceiver)
- put_device(ehci->transceiver->dev);
- goto err4;
- }
- } else {
- dev_err(&pdev->dev, "can't find transceiver\n");
- retval = -ENODEV;
- goto err4;
- }
- }
-#endif
return retval;
err4:
@@ -191,12 +164,6 @@ static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- if (ehci->transceiver) {
- otg_set_host(ehci->transceiver, NULL);
- put_device(ehci->transceiver->dev);
- }
usb_remove_hcd(hcd);
@@ -577,38 +544,6 @@ static struct dev_pm_ops ehci_fsl_pm_ops = {
#define EHCI_FSL_PM_OPS NULL
#endif /* CONFIG_PM */
-#ifdef CONFIG_USB_OTG
-static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- u32 status;
-
- if (!port)
- return -EINVAL;
-
- port--;
-
- /* start port reset before HNP protocol time out */
- status = readl(&ehci->regs->port_status[port]);
- if (!(status & PORT_CONNECT))
- return -ENODEV;
-
- /* khubd will finish the reset later */
- if (ehci_is_TDI(ehci)) {
- writel(PORT_RESET |
- (status & ~(PORT_CSC | PORT_PEC | PORT_OCC)),
- &ehci->regs->port_status[port]);
- } else {
- writel(PORT_RESET, &ehci->regs->port_status[port]);
- }
-
- return 0;
-}
-#else
-#define ehci_start_port_reset NULL
-#endif /* CONFIG_USB_OTG */
-
-
static const struct hc_driver ehci_fsl_hc_driver = {
.description = hcd_name,
.product_desc = "Freescale On-Chip EHCI Host Controller",
@@ -648,7 +583,6 @@ static const struct hc_driver ehci_fsl_hc_driver = {
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
- .start_port_reset = ehci_start_port_reset,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d7318e321706..ab94b512e0ea 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -203,7 +203,10 @@ static int tdi_in_host_mode (struct ehci_hcd *ehci)
u32 __iomem *reg_ptr;
u32 tmp;
- reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
+ if (ehci->has_hostpc)
+ reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE_EX);
+ else
+ reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
tmp = ehci_readl(ehci, reg_ptr);
return (tmp & 3) == USBMODE_CM_HC;
}
@@ -277,7 +280,10 @@ static int ehci_reset (struct ehci_hcd *ehci)
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
- ehci_writel(ehci, command, &ehci->regs->command);
+#ifdef CONFIG_USB_EHCI_TEGRA
+ if (!ehci->controller_resets_phy)
+#endif
+ ehci_writel(ehci, command, &ehci->regs->command);
ehci_to_hcd(ehci)->state = HC_STATE_HALT;
ehci->next_statechange = jiffies;
retval = handshake (ehci, &ehci->regs->command,
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 4c32cb19b405..25ed607aab9a 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -27,7 +27,6 @@
*/
/*-------------------------------------------------------------------------*/
-#include <linux/usb/otg.h>
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
@@ -285,7 +284,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
changed = 1;
}
}
-
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (changed && ehci->has_hostpc) {
spin_unlock_irq(&ehci->lock);
msleep(5); /* 5 ms for HCD to enter low-power mode */
@@ -306,7 +305,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
"succeeded" : "failed");
}
}
-
+#endif
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
udelay(150);
@@ -726,13 +725,6 @@ static int ehci_hub_control (
goto error;
if (ehci->no_selective_suspend)
break;
-#ifdef CONFIG_USB_OTG
- if ((hcd->self.otg_port == (wIndex + 1))
- && hcd->self.b_hnp_enable) {
- otg_start_hnp(ehci->transceiver);
- break;
- }
-#endif
if (!(temp & PORT_SUSPEND))
break;
if ((temp & PORT_PE) == 0)
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 0917e3a32465..aaf48c5d633f 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -995,9 +995,16 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
head->qh_next.qh = qh;
head->hw->hw_next = dma;
+ /*
+ * flush qh descriptor into memory immediately,
+ * see comments in qh_append_tds.
+ * */
+ ehci_sync_mem();
+
qh_get(qh);
qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
+ wmb();
/* qtd completions reported later by interrupt */
}
@@ -1082,6 +1089,18 @@ static struct ehci_qh *qh_append_tds (
wmb ();
dummy->hw_token = token;
+ /*
+ * Writing to dma coherent buffer on ARM may
+ * be delayed to reach memory, so HC may not see
+ * hw_token of dummy qtd in time, which can cause
+ * the qtd transaction to be executed very late,
+ * and degrade performance a lot. ehci_sync_mem
+ * is added to flush 'token' immediatelly into
+ * memory, so that ehci can execute the transaction
+ * ASAP.
+ * */
+ ehci_sync_mem();
+
urb->hcpriv = qh_get (qh);
}
}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 02b2bfd49a10..c76f495b6bd2 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -2,7 +2,7 @@
* EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
*
* Copyright (C) 2010 Google, Inc.
- * Copyright (C) 2009 NVIDIA Corporation
+ * Copyright (C) 2009 - 2011 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -22,40 +22,79 @@
#include <linux/irq.h>
#include <linux/usb/otg.h>
#include <mach/usb_phy.h>
+#include <mach/iomap.h>
+
+#define TEGRA_USB_PORTSC_PHCD (1 << 23)
+
+#define TEGRA_USB_SUSP_CTRL_OFFSET 0x400
+#define TEGRA_USB_SUSP_CLR (1 << 5)
+#define TEGRA_USB_PHY_CLK_VALID (1 << 7)
+#define TEGRA_USB_SRT (1 << 25)
+#define TEGRA_USB_PHY_CLK_VALID_INT_ENB (1 << 9)
+#define TEGRA_USB_PHY_CLK_VALID_INT_STS (1 << 8)
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define TEGRA_USB_PORTSC1_OFFSET 0x184
+#else
+#define TEGRA_USB_PORTSC1_OFFSET 0x174
+#endif
+#define TEGRA_USB_PORTSC1_WKCN (1 << 20)
+
+#define TEGRA_LVL2_CLK_GATE_OVRB 0xfc
+#define TEGRA_USB2_CLK_OVR_ON (1 << 10)
#define TEGRA_USB_DMA_ALIGN 32
+#define STS_SRI (1<<7) /* SOF Recieved */
+
+#define TEGRA_HSIC_CONNECTION_MAX_RETRIES 5
+#define HOSTPC_REG_OFFSET 0x1b4
+
+#define HOSTPC1_DEVLC_STS (1 << 28)
+#define HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
+
struct tegra_ehci_hcd {
struct ehci_hcd *ehci;
struct tegra_usb_phy *phy;
struct clk *clk;
struct clk *emc_clk;
+ struct clk *sclk_clk;
struct otg_transceiver *transceiver;
int host_resumed;
int bus_suspended;
int port_resuming;
int power_down_on_bus_suspend;
+ struct delayed_work work;
enum tegra_usb_phy_port_speed port_speed;
+ struct work_struct clk_timer_work;
+ struct timer_list clk_timer;
+ bool clock_enabled;
+ bool timer_event;
+ int hsic_connect_retries;
+ struct mutex tegra_ehci_hcd_mutex;
+ unsigned int irq;
};
-static void tegra_ehci_power_up(struct usb_hcd *hcd)
+static void tegra_ehci_power_up(struct usb_hcd *hcd, bool is_dpd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
- clk_enable(tegra->emc_clk);
+#ifndef CONFIG_USB_HOTPLUG
clk_enable(tegra->clk);
- tegra_usb_phy_power_on(tegra->phy);
+#endif
+ tegra_usb_phy_power_on(tegra->phy, is_dpd);
tegra->host_resumed = 1;
}
-static void tegra_ehci_power_down(struct usb_hcd *hcd)
+static void tegra_ehci_power_down(struct usb_hcd *hcd, bool is_dpd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
tegra->host_resumed = 0;
- tegra_usb_phy_power_off(tegra->phy);
+ tegra_usb_phy_power_off(tegra->phy, is_dpd);
+#ifndef CONFIG_USB_HOTPLUG
clk_disable(tegra->clk);
- clk_disable(tegra->emc_clk);
+#endif
}
static int tegra_ehci_internal_port_reset(
@@ -123,6 +162,44 @@ static int tegra_ehci_internal_port_reset(
return retval;
}
+static irqreturn_t tegra_ehci_irq (struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
+ struct ehci_regs __iomem *hw = ehci->regs;
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ u32 val;
+
+ if ((tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) &&
+ (tegra->ehci->has_hostpc)) {
+ /* check if there is any remote wake event */
+ if (tegra_usb_phy_is_remotewake_detected(tegra->phy)) {
+ spin_lock (&ehci->lock);
+ usb_hcd_resume_root_hub(hcd);
+ spin_unlock (&ehci->lock);
+ }
+ }
+ if (tegra->phy->hotplug) {
+ spin_lock(&ehci->lock);
+ val = readl(hcd->regs + TEGRA_USB_SUSP_CTRL_OFFSET);
+ if ((val & TEGRA_USB_PHY_CLK_VALID_INT_STS)) {
+ val &= ~TEGRA_USB_PHY_CLK_VALID_INT_ENB |
+ TEGRA_USB_PHY_CLK_VALID_INT_STS;
+ writel(val , (hcd->regs + TEGRA_USB_SUSP_CTRL_OFFSET));
+
+ val = readl(&hw->status);
+ if (!(val & STS_PCD)) {
+ spin_unlock(&ehci->lock);
+ return 0;
+ }
+ val = readl(hcd->regs + TEGRA_USB_PORTSC1_OFFSET);
+ val &= ~(TEGRA_USB_PORTSC1_WKCN | PORT_RWC_BITS);
+ writel(val , (hcd->regs + TEGRA_USB_PORTSC1_OFFSET));
+ }
+ spin_unlock(&ehci->lock);
+ }
+ return ehci_irq(hcd);
+}
+
static int tegra_ehci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
@@ -133,11 +210,24 @@ static int tegra_ehci_hub_control(
)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ int ports = HCS_N_PORTS(ehci->hcs_params);
+ u32 temp, status;
u32 __iomem *status_reg;
- u32 temp;
+ u32 usbsts_reg;
+
unsigned long flags;
int retval = 0;
+ unsigned selector;
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ bool hsic = false;
+
+ if (!tegra->host_resumed) {
+ if (buf)
+ memset (buf, 0, wLength);
+ return retval;
+ }
+
+ hsic = (tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC);
status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
@@ -152,18 +242,28 @@ static int tegra_ehci_hub_control(
temp = ehci_readl(ehci, status_reg) & ~PORT_RWC_BITS;
ehci_writel(ehci, temp & ~PORT_PE, status_reg);
goto done;
- }
-
- else if (typeReq == GetPortStatus) {
+ } else if (typeReq == GetPortStatus) {
temp = ehci_readl(ehci, status_reg);
- if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
+ if (tegra->port_resuming && !(temp & PORT_SUSPEND) &&
+ time_after_eq(jiffies, ehci->reset_done[wIndex-1])) {
/* Resume completed, re-enable disconnect detection */
tegra->port_resuming = 0;
- tegra_usb_phy_postresume(tegra->phy);
+ clear_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ ehci->reset_done[wIndex-1] = 0;
+ tegra_usb_phy_postresume(tegra->phy, false);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) {
+ ehci->command |= CMD_RUN;
+ /*
+ * ehci run bit is disabled to avoid SOF.
+ * 2LS WAR is executed by now enable the run bit.
+ */
+ ehci_writel(ehci, ehci->command,
+ &ehci->regs->command);
+ }
+#endif
}
- }
-
- else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ } else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
temp = ehci_readl(ehci, status_reg);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
retval = -EPIPE;
@@ -174,6 +274,9 @@ static int tegra_ehci_hub_control(
temp |= PORT_WKDISC_E | PORT_WKOC_E;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+ /* Need a 4ms delay before the controller goes to suspend */
+ mdelay(4);
+
/*
* If a transaction is in progress, there may be a delay in
* suspending the port. Poll until the port is suspended.
@@ -183,6 +286,9 @@ static int tegra_ehci_hub_control(
pr_err("%s: timeout waiting for SUSPEND\n", __func__);
set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+
+ tegra_usb_phy_postsuspend(tegra->phy, false);
+
goto done;
}
@@ -211,28 +317,108 @@ static int tegra_ehci_hub_control(
if (!(temp & PORT_SUSPEND))
goto done;
+ tegra->port_resuming = 1;
+
/* Disable disconnect detection during port resume */
- tegra_usb_phy_preresume(tegra->phy);
+ tegra_usb_phy_preresume(tegra->phy, false);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (tegra->phy->usb_phy_type != TEGRA_USB_PHY_TYPE_UTMIP) {
+#endif
+ ehci_dbg(ehci, "%s:USBSTS = 0x%x", __func__,
+ ehci_readl(ehci, &ehci->regs->status));
+ usbsts_reg = ehci_readl(ehci, &ehci->regs->status);
+ ehci_writel(ehci, usbsts_reg, &ehci->regs->status);
+ usbsts_reg = ehci_readl(ehci, &ehci->regs->status);
+ udelay(20);
- ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
+ if (handshake(ehci, &ehci->regs->status, STS_SRI, STS_SRI, 2000))
+ pr_err("%s: timeout set for STS_SRI\n", __func__);
+ usbsts_reg = ehci_readl(ehci, &ehci->regs->status);
+ ehci_writel(ehci, usbsts_reg, &ehci->regs->status);
+
+ if (handshake(ehci, &ehci->regs->status, STS_SRI, 0, 2000))
+ pr_err("%s: timeout clear STS_SRI\n", __func__);
+
+ if (handshake(ehci, &ehci->regs->status, STS_SRI, STS_SRI, 2000))
+ pr_err("%s: timeout set STS_SRI\n", __func__);
+
+ udelay(20);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ }
+#endif
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- /* start resume signalling */
+ /* start resume signaling */
ehci_writel(ehci, temp | PORT_RESUME, status_reg);
- spin_unlock_irqrestore(&ehci->lock, flags);
- msleep(20);
- spin_lock_irqsave(&ehci->lock, flags);
-
- /* Poll until the controller clears RESUME and SUSPEND */
- if (handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
- pr_err("%s: timeout waiting for RESUME\n", __func__);
- if (handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
- pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+ ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
+ /* whoever resumes must GetPortStatus to complete it!! */
+ goto done;
+ }
- ehci->reset_done[wIndex-1] = 0;
+ /* Handle port reset here */
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if ((hsic) && (typeReq == SetPortFeature) &&
+ ((wValue == USB_PORT_FEAT_RESET) || (wValue == USB_PORT_FEAT_POWER))) {
+#else
+ if ((hsic) && (typeReq == SetPortFeature) &&
+ (wValue == USB_PORT_FEAT_POWER)) {
+#endif
+ selector = wIndex >> 8;
+ wIndex &= 0xff;
+ if (!wIndex || wIndex > ports) {
+ retval = -EPIPE;
+ goto done;
+ }
+ wIndex--;
+ status = 0;
+ temp = ehci_readl(ehci, status_reg);
+ if (temp & PORT_OWNER)
+ goto done;
+ temp &= ~PORT_RWC_BITS;
+
+ switch (wValue) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ case USB_PORT_FEAT_RESET:
+ {
+ if (temp & PORT_RESUME) {
+ retval = -EPIPE;
+ goto done;
+ }
+ /* line status bits may report this as low speed,
+ * which can be fine if this root hub has a
+ * transaction translator built in.
+ */
+ if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
+ && !ehci_is_TDI(ehci) && PORT_USB11 (temp)) {
+ ehci_dbg (ehci, "port %d low speed --> companion\n", wIndex + 1);
+ temp |= PORT_OWNER;
+ ehci_writel(ehci, temp, status_reg);
+ } else {
+ ehci_vdbg(ehci, "port %d reset\n", wIndex + 1);
+ temp &= ~PORT_PE;
+ /*
+ * caller must wait, then call GetPortStatus
+ * usb 2.0 spec says 50 ms resets on root
+ */
+ ehci->reset_done[wIndex] = jiffies + msecs_to_jiffies(50);
+ ehci_writel(ehci, temp, status_reg);
+ if (hsic && (wIndex == 0))
+ tegra_usb_phy_bus_reset(tegra->phy);
+ }
- tegra->port_resuming = 1;
+ break;
+ }
+#endif
+ case USB_PORT_FEAT_POWER:
+ {
+ if (HCS_PPC(ehci->hcs_params))
+ ehci_writel(ehci, temp | PORT_POWER, status_reg);
+ if (hsic && (wIndex == 0))
+ tegra_usb_phy_bus_connect(tegra->phy);
+ break;
+ }
+ }
goto done;
}
@@ -245,11 +431,20 @@ done:
return retval;
}
-static void tegra_ehci_restart(struct usb_hcd *hcd)
+#ifdef CONFIG_PM
+static void tegra_ehci_restart(struct usb_hcd *hcd, bool is_dpd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ unsigned int temp;
+ ehci->controller_resets_phy = 0;
+ tegra_ehci_pre_reset(tegra->phy, false);
ehci_reset(ehci);
+ tegra_ehci_post_reset(tegra->phy, false);
+
+ if (tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_NULL_ULPI)
+ ehci->controller_resets_phy = 1;
/* setup the frame list and Async q heads */
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
@@ -259,60 +454,111 @@ static void tegra_ehci_restart(struct usb_hcd *hcd)
ehci->command |= CMD_RUN;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ /* Enable the root Port Power */
+ if (HCS_PPC(ehci->hcs_params)) {
+ temp = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ ehci_writel(ehci, temp | PORT_POWER, &ehci->regs->port_status[0]);
+ }
+
down_write(&ehci_cf_port_reset_rwsem);
+ if(is_dpd)
+ hcd->state = HC_STATE_SUSPENDED;
+ else
+ hcd->state = HC_STATE_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
/* flush posted writes */
ehci_readl(ehci, &ehci->regs->command);
up_write(&ehci_cf_port_reset_rwsem);
+
+ /* Turn On Interrupts */
+ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
}
-static int tegra_usb_suspend(struct usb_hcd *hcd)
+static int tegra_usb_suspend(struct usb_hcd *hcd, bool is_dpd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
struct ehci_regs __iomem *hw = tegra->ehci->regs;
unsigned long flags;
+ int hsic = 0;
+
+ hsic = (tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC);
spin_lock_irqsave(&tegra->ehci->lock, flags);
- tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
+ if (tegra->ehci->has_hostpc)
+ tegra->port_speed = (readl(hcd->regs + HOSTPC_REG_OFFSET) >> 25) & 0x3;
+ else
+ tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
ehci_halt(tegra->ehci);
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) {
+ /*
+ * Ehci run bit is disabled by now read this into command variable
+ * so that bus resume will not enable run bit immedialty.
+ * this is required for 2LS WAR on UTMIP interface.
+ */
+ tegra->ehci->command = ehci_readl(tegra->ehci,
+ &tegra->ehci->regs->command);
+ }
+#endif
spin_unlock_irqrestore(&tegra->ehci->lock, flags);
- tegra_ehci_power_down(hcd);
+ tegra_ehci_power_down(hcd, is_dpd);
return 0;
}
-static int tegra_usb_resume(struct usb_hcd *hcd)
+static int tegra_usb_resume(struct usb_hcd *hcd, bool is_dpd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ struct usb_device *udev = hcd->self.root_hub;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct ehci_regs __iomem *hw = ehci->regs;
unsigned long val;
+ bool hsic;
+ bool null_ulpi;
+ null_ulpi = (tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_NULL_ULPI);
+ hsic = (tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_HSIC);
+
+ tegra_ehci_power_up(hcd, is_dpd);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- tegra_ehci_power_up(hcd);
- if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
- /* Wait for the phy to detect new devices
- * before we restart the controller */
- msleep(10);
+ if ((tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) || (hsic) ||
+ (null_ulpi))
goto restart;
- }
/* Force the phy to keep data lines in suspend state */
tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
+ if ((tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) &&
+ (tegra->ehci->has_hostpc)) {
+ ehci_reset(ehci);
+ }
+
/* Enable host mode */
tdi_reset(ehci);
+ if ((tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) &&
+ (tegra->ehci->has_hostpc)) {
+ val = readl(hcd->regs + HOSTPC_REG_OFFSET);
+ val &= ~HOSTPC1_DEVLC_PTS(~0);
+ val |= HOSTPC1_DEVLC_STS;
+ writel(val, hcd->regs + HOSTPC_REG_OFFSET);
+ }
+
/* Enable Port Power */
val = readl(&hw->port_status[0]);
val |= PORT_POWER;
writel(val, &hw->port_status[0]);
udelay(10);
+ if ((tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_UTMIP) &&
+ (tegra->ehci->has_hostpc) && (tegra->phy->remote_wakeup)) {
+ ehci->command |= CMD_RUN;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ }
+
/* Check if the phy resume from LP0. When the phy resume from LP0
* USB register will be reset. */
if (!readl(&hw->async_next)) {
@@ -360,6 +606,9 @@ static int tegra_usb_resume(struct usb_hcd *hcd)
val |= PORT_SUSPEND;
writel(val, &hw->port_status[0]);
+ /* Need a 4ms delay before the controller goes to suspend */
+ mdelay(4);
+
/* Wait until port suspend completes */
if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
PORT_SUSPEND, 1000)) {
@@ -373,28 +622,68 @@ static int tegra_usb_resume(struct usb_hcd *hcd)
return 0;
restart:
- if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
+ if (null_ulpi) {
+ bool LP0 = !readl(&hw->async_next);
+
+ if (LP0) {
+ static int cnt = 1;
+
+ pr_info("LP0 restart %d\n", cnt++);
+ tegra_ehci_phy_restore_start(tegra->phy,
+ tegra->port_speed);
+ }
+
+ val = readl(&hw->port_status[0]);
+ if (!((val & PORT_POWER) && (val & PORT_PE))) {
+ tegra_ehci_restart(hcd, is_dpd);
+ }
+
+ if (LP0)
+ tegra_ehci_phy_restore_end(tegra->phy);
+
+ return 0;
+ }
+
+ if ((tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH) && (!hsic))
tegra_ehci_phy_restore_end(tegra->phy);
+ if (hsic) {
+ val = readl(&hw->port_status[0]);
+ if (!((val & PORT_POWER) && (val & PORT_PE)))
+ tegra_ehci_restart(hcd, false);
+
+ tegra_usb_phy_bus_idle(tegra->phy);
+ tegra->hsic_connect_retries = 0;
+ if (!tegra_usb_phy_is_device_connected(tegra->phy))
+ schedule_delayed_work(&tegra->work, 50);
+ } else {
+ tegra_ehci_restart(hcd, false);
+ }
- tegra_ehci_restart(hcd);
return 0;
}
+#endif
static void tegra_ehci_shutdown(struct usb_hcd *hcd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ mutex_lock(&tegra->tegra_ehci_hcd_mutex);
/* ehci_shutdown touches the USB controller registers, make sure
* controller has clocks to it */
if (!tegra->host_resumed)
- tegra_ehci_power_up(hcd);
+ tegra_ehci_power_up(hcd, false);
ehci_shutdown(hcd);
+
+ /* we are ready to shut down, powerdown the phy */
+ tegra_ehci_power_down(hcd, false);
+ mutex_unlock(&tegra->tegra_ehci_hcd_mutex);
}
static int tegra_ehci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
int retval;
/* EHCI registers start at offset 0x100 */
@@ -408,9 +697,15 @@ static int tegra_ehci_setup(struct usb_hcd *hcd)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = readl(&ehci->caps->hcs_params);
- /* switch to host mode */
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ ehci->has_hostpc = 1;
+#endif
hcd->has_tt = 1;
- ehci_reset(ehci);
+
+ if (tegra->phy->usb_phy_type != TEGRA_USB_PHY_TYPE_NULL_ULPI) {
+ ehci_reset(ehci);
+ tegra_ehci_post_reset(tegra->phy, false);
+ }
retval = ehci_halt(ehci);
if (retval)
@@ -423,6 +718,19 @@ static int tegra_ehci_setup(struct usb_hcd *hcd)
ehci->sbrn = 0x20;
+ if (tegra->phy->usb_phy_type == TEGRA_USB_PHY_TYPE_NULL_ULPI) {
+ tegra_ehci_pre_reset(tegra->phy, false);
+ ehci_reset(ehci);
+ tegra_ehci_post_reset(tegra->phy, false);
+
+ /*
+ * Resetting the controller has the side effect of resetting the PHY.
+ * So, never reset the controller after the calling
+ * tegra_ehci_reinit API.
+ */
+ ehci->controller_resets_phy = 1;
+ }
+
ehci_port_power(ehci, 1);
return retval;
}
@@ -433,11 +741,18 @@ static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
int error_status = 0;
+ mutex_lock(&tegra->tegra_ehci_hcd_mutex);
+ /* ehci_shutdown touches the USB controller registers, make sure
+ * controller has clocks to it */
+ if (!tegra->host_resumed)
+ tegra_ehci_power_up(hcd, false);
error_status = ehci_bus_suspend(hcd);
if (!error_status && tegra->power_down_on_bus_suspend) {
- tegra_usb_suspend(hcd);
+ tegra_usb_suspend(hcd, false);
tegra->bus_suspended = 1;
}
+ tegra_usb_phy_postsuspend(tegra->phy, false);
+ mutex_unlock(&tegra->tegra_ehci_hcd_mutex);
return error_status;
}
@@ -445,15 +760,17 @@ static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ int ehci_bus_resumed;
+ mutex_lock(&tegra->tegra_ehci_hcd_mutex);
if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) {
- tegra_usb_resume(hcd);
+ tegra_usb_resume(hcd, false);
tegra->bus_suspended = 0;
}
- tegra_usb_phy_preresume(tegra->phy);
- tegra->port_resuming = 1;
- return ehci_bus_resume(hcd);
+ ehci_bus_resumed = ehci_bus_resume(hcd);
+ mutex_unlock(&tegra->tegra_ehci_hcd_mutex);
+ return ehci_bus_resumed;
}
#endif
@@ -543,6 +860,114 @@ static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
free_temp_buffer(urb);
}
+static void tegra_hsic_connection_work(struct work_struct *work)
+{
+ struct tegra_ehci_hcd *tegra =
+ container_of(work, struct tegra_ehci_hcd, work.work);
+ if (tegra_usb_phy_is_device_connected(tegra->phy)) {
+ cancel_delayed_work(&tegra->work);
+ return;
+ }
+ /* Few cases HSIC device may not be connected, so *
+ ** skip this check after configured max re-tries. */
+ if (tegra->hsic_connect_retries++ > TEGRA_HSIC_CONNECTION_MAX_RETRIES)
+ return;
+
+ schedule_delayed_work(&tegra->work, jiffies + msecs_to_jiffies(50));
+ return;
+}
+
+void clk_timer_callback(unsigned long data)
+{
+ struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd*) data;
+ unsigned long flags;
+
+ if (!timer_pending(&tegra->clk_timer)) {
+ spin_lock_irqsave(&tegra->ehci->lock, flags);
+ tegra->timer_event = 1;
+ spin_unlock_irqrestore(&tegra->ehci->lock, flags);
+ schedule_work(&tegra->clk_timer_work);
+ }
+}
+
+static void clk_timer_work_handler(struct work_struct* clk_timer_work) {
+ struct tegra_ehci_hcd *tegra = container_of(clk_timer_work,
+ struct tegra_ehci_hcd, clk_timer_work);
+ int ret;
+ unsigned long flags;
+ bool clock_enabled, timer_event;
+
+ spin_lock_irqsave(&tegra->ehci->lock, flags);
+ clock_enabled = tegra->clock_enabled;
+ timer_event = tegra->timer_event;
+ spin_unlock_irqrestore(&tegra->ehci->lock, flags);
+
+ if (timer_event) {
+ spin_lock_irqsave(&tegra->ehci->lock, flags);
+ tegra->clock_enabled = 0;
+ tegra->timer_event = 0;
+ spin_unlock_irqrestore(&tegra->ehci->lock, flags);
+ clk_disable(tegra->emc_clk);
+ clk_disable(tegra->sclk_clk);
+ return;
+ }
+
+ if ((!clock_enabled)) {
+ ret = mod_timer(&tegra->clk_timer, jiffies + msecs_to_jiffies(2000));
+ if (ret)
+ pr_err("tegra_ehci_urb_enqueue timer modify failed \n");
+ clk_enable(tegra->emc_clk);
+ clk_enable(tegra->sclk_clk);
+ spin_lock_irqsave(&tegra->ehci->lock, flags);
+ tegra->clock_enabled = 1;
+ spin_unlock_irqrestore(&tegra->ehci->lock, flags);
+ } else {
+ if (timer_pending(&tegra->clk_timer)) {
+ mod_timer_pending (&tegra->clk_timer, jiffies
+ + msecs_to_jiffies(2000));
+ }
+ }
+}
+
+static int tegra_ehci_urb_enqueue (
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct tegra_ehci_hcd *pdata;
+ int xfertype;
+ int transfer_buffer_length;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ unsigned long flags;
+ pdata = dev_get_drvdata(hcd->self.controller);
+
+ xfertype = usb_endpoint_type(&urb->ep->desc);
+ transfer_buffer_length = urb->transfer_buffer_length;
+ spin_lock_irqsave(&ehci->lock,flags);
+ /* Turn on the USB busy hints */
+ switch (xfertype) {
+ case USB_ENDPOINT_XFER_INT:
+ if (transfer_buffer_length < 255) {
+ /* Do nothing for interrupt buffers < 255 */
+ } else {
+ /* signal to set the busy hints */
+ schedule_work(&pdata->clk_timer_work);
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_BULK:
+ /* signal to set the busy hints */
+ schedule_work(&pdata->clk_timer_work);
+ break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ default:
+ /* Do nothing special here */
+ break;
+ }
+ spin_unlock_irqrestore(&ehci->lock,flags);
+ return ehci_urb_enqueue(hcd, urb, mem_flags);
+}
+
static const struct hc_driver tegra_ehci_hc_driver = {
.description = hcd_name,
.product_desc = "Tegra EHCI Host Controller",
@@ -551,12 +976,12 @@ static const struct hc_driver tegra_ehci_hc_driver = {
.flags = HCD_USB2 | HCD_MEMORY,
.reset = tegra_ehci_setup,
- .irq = ehci_irq,
+ .irq = tegra_ehci_irq,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = tegra_ehci_shutdown,
- .urb_enqueue = ehci_urb_enqueue,
+ .urb_enqueue = tegra_ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.map_urb_for_dma = tegra_ehci_map_urb_for_dma,
.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma,
@@ -594,6 +1019,8 @@ static int tegra_ehci_probe(struct platform_device *pdev)
if (!tegra)
return -ENOMEM;
+ mutex_init(&tegra->tegra_ehci_hcd_mutex);
+
hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd) {
@@ -615,15 +1042,33 @@ static int tegra_ehci_probe(struct platform_device *pdev)
if (err)
goto fail_clken;
+
+ tegra->sclk_clk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(tegra->sclk_clk)) {
+ dev_err(&pdev->dev, "Can't get sclk clock\n");
+ err = PTR_ERR(tegra->sclk_clk);
+ goto fail_sclk_clk;
+ }
+
+ clk_set_rate(tegra->sclk_clk, 80000000);
+
tegra->emc_clk = clk_get(&pdev->dev, "emc");
if (IS_ERR(tegra->emc_clk)) {
dev_err(&pdev->dev, "Can't get emc clock\n");
err = PTR_ERR(tegra->emc_clk);
goto fail_emc_clk;
}
-
- clk_enable(tegra->emc_clk);
- clk_set_rate(tegra->emc_clk, 400000000);
+ init_timer(&tegra->clk_timer);
+ tegra->clk_timer.function = clk_timer_callback;
+ tegra->clk_timer.data = (unsigned long) tegra;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ /* Set DDR busy hints to 150MHz. For Tegra 2x SOC, DDR rate is half of EMC rate */
+ clk_set_rate(tegra->emc_clk, 300000000);
+#else
+ /* Set DDR busy hints to 100MHz. For Tegra 3x SOC DDR rate equals to EMC rate */
+ clk_set_rate(tegra->emc_clk, 100000000);
+#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -640,15 +1085,20 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_io;
}
+ INIT_DELAYED_WORK(&tegra->work, tegra_hsic_connection_work);
+
+ INIT_WORK(&tegra->clk_timer_work, clk_timer_work_handler);
+
tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
- TEGRA_USB_PHY_MODE_HOST);
+ TEGRA_USB_PHY_MODE_HOST, pdata->phy_type);
if (IS_ERR(tegra->phy)) {
dev_err(&pdev->dev, "Failed to open USB phy\n");
err = -ENXIO;
goto fail_phy;
}
+ tegra->phy->hotplug = pdata->hotplug;
- err = tegra_usb_phy_power_on(tegra->phy);
+ err = tegra_usb_phy_power_on(tegra->phy, true);
if (err) {
dev_err(&pdev->dev, "Failed to power on the phy\n");
goto fail;
@@ -665,6 +1115,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail;
}
set_irq_flags(irq, IRQF_VALID);
+ tegra->irq = irq;
#ifdef CONFIG_USB_OTG_UTILS
if (pdata->operating_mode == TEGRA_USB_OTG) {
@@ -676,10 +1127,19 @@ static int tegra_ehci_probe(struct platform_device *pdev)
err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (err) {
- dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ dev_err(&pdev->dev, "Failed to add USB HCD error = %d\n", err);
goto fail;
}
+ err = enable_irq_wake(tegra->irq);
+ if (err < 0) {
+ dev_warn(&pdev->dev,
+ "Couldn't enable USB host mode wakeup, irq=%d, "
+ "error=%d\n", tegra->irq, err);
+ err = 0;
+ tegra->irq = 0;
+ }
+
return err;
fail:
@@ -696,6 +1156,9 @@ fail_io:
clk_disable(tegra->emc_clk);
clk_put(tegra->emc_clk);
fail_emc_clk:
+ clk_disable(tegra->sclk_clk);
+ clk_put(tegra->sclk_clk);
+fail_sclk_clk:
clk_disable(tegra->clk);
fail_clken:
clk_put(tegra->clk);
@@ -712,24 +1175,40 @@ static int tegra_ehci_resume(struct platform_device *pdev)
struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
- if (tegra->bus_suspended)
+ if ((tegra->bus_suspended) && (tegra->power_down_on_bus_suspend)) {
+#ifdef CONFIG_USB_HOTPLUG
+ clk_enable(tegra->clk);
+#endif
return 0;
+ }
- return tegra_usb_resume(hcd);
+#ifdef CONFIG_USB_HOTPLUG
+ clk_enable(tegra->clk);
+#endif
+ return tegra_usb_resume(hcd, true);
}
static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state)
{
struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+ int ret;
- if (tegra->bus_suspended)
+ if ((tegra->bus_suspended) && (tegra->power_down_on_bus_suspend)) {
+#ifdef CONFIG_USB_HOTPLUG
+ clk_disable(tegra->clk);
+#endif
return 0;
+ }
if (time_before(jiffies, tegra->ehci->next_statechange))
msleep(10);
- return tegra_usb_suspend(hcd);
+ ret = tegra_usb_suspend(hcd, true);
+#ifdef CONFIG_USB_HOTPLUG
+ clk_disable(tegra->clk);
+#endif
+ return ret;
}
#endif
@@ -740,6 +1219,9 @@ static int tegra_ehci_remove(struct platform_device *pdev)
if (tegra == NULL || hcd == NULL)
return -EINVAL;
+ /* make sure controller is on as we will touch its registers */
+ if (!tegra->host_resumed)
+ tegra_ehci_power_up(hcd, true);
#ifdef CONFIG_USB_OTG_UTILS
if (tegra->transceiver) {
@@ -748,16 +1230,28 @@ static int tegra_ehci_remove(struct platform_device *pdev)
}
#endif
+ /* Turn Off Interrupts */
+ ehci_writel(tegra->ehci, 0, &tegra->ehci->regs->intr_enable);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ if (tegra->irq)
+ disable_irq_wake(tegra->irq);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
-
+ cancel_delayed_work(&tegra->work);
+ tegra_usb_phy_power_off(tegra->phy, true);
tegra_usb_phy_close(tegra->phy);
iounmap(hcd->regs);
+ del_timer_sync(&tegra->clk_timer);
+
clk_disable(tegra->clk);
clk_put(tegra->clk);
- clk_disable(tegra->emc_clk);
+ if (tegra->clock_enabled) {
+ clk_disable(tegra->sclk_clk);
+ clk_disable(tegra->emc_clk);
+ }
+ clk_put(tegra->sclk_clk);
clk_put(tegra->emc_clk);
kfree(tegra);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index d92ed5c52243..7f0e828631ed 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -140,6 +140,9 @@ struct ehci_hcd { /* one per controller */
unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+#ifdef CONFIG_USB_EHCI_TEGRA
+ unsigned controller_resets_phy:1;
+#endif
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -166,10 +169,6 @@ struct ehci_hcd { /* one per controller */
#ifdef DEBUG
struct dentry *debug_dir;
#endif
- /*
- * OTG controllers and transceivers need software interaction
- */
- struct otg_transceiver *transceiver;
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
@@ -755,6 +754,23 @@ static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
#endif
+/*
+ * Writing to dma coherent memory on ARM may be delayed via L2
+ * writing buffer, so introduce the helper which can flush L2 writing
+ * buffer into memory immediately, especially used to flush ehci
+ * descriptor to memory.
+ * */
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+static inline void ehci_sync_mem()
+{
+ mb();
+}
+#else
+static inline void ehci_sync_mem()
+{
+}
+#endif
+
/*-------------------------------------------------------------------------*/
#ifndef DEBUG
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index c66481ad98d7..bcb3e8680337 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -12,6 +12,14 @@ config USB_OTG_UTILS
Select this to make sure the build includes objects from
the OTG infrastructure directory.
+config USB_OTG_WAKELOCK
+ bool "Hold a wakelock when USB connected"
+ depends on WAKELOCK
+ select USB_OTG_UTILS
+ help
+ Select this to automatically hold a wakelock when USB is
+ connected, preventing suspend.
+
if USB || USB_GADGET
#
@@ -113,6 +121,14 @@ config USB_MSM_OTG
This driver is not supported on boards like trout which
has an external PHY.
+config USB_TEGRA_OTG
+ boolean "Tegra OTG Driver"
+ depends on USB && ARCH_TEGRA
+ select USB_OTG_UTILS
+ help
+ Enable this driver on boards which use the internal VBUS and ID
+ sensing of the Tegra USB PHY.
+
config AB8500_USB
tristate "AB8500 USB Transceiver Driver"
depends on AB8500_CORE
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 566655c53331..0bc9935c2180 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -1,15 +1,19 @@
#
# OTG infrastructure and transceiver drivers
#
+GCOV_PROFILE_tegra-otg.o := y
ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
# infrastructure
obj-$(CONFIG_USB_OTG_UTILS) += otg.o
+obj-$(CONFIG_USB_OTG_WAKELOCK) += otg-wakelock.o
+obj-$(CONFIG_USB_OTG_UTILS) += otg_id.o
# transceiver drivers
obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
+obj-$(CONFIG_USB_TEGRA_OTG) += tegra-otg.o
obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
obj-$(CONFIG_TWL6030_USB) += twl6030-usb.o
diff --git a/drivers/usb/otg/otg-wakelock.c b/drivers/usb/otg/otg-wakelock.c
new file mode 100644
index 000000000000..2f11472dd2b3
--- /dev/null
+++ b/drivers/usb/otg/otg-wakelock.c
@@ -0,0 +1,169 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME 2000
+
+static bool enabled = true;
+static struct otg_transceiver *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+ char name[40];
+ struct wake_lock wakelock;
+ bool held;
+};
+
+/*
+ * VBUS present lock. Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+ if (!lock->held) {
+ wake_lock(&lock->wakelock);
+ lock->held = true;
+ }
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+ wake_lock_timeout(&lock->wakelock,
+ msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+ lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+ if (lock->held) {
+ wake_unlock(&lock->wakelock);
+ lock->held = false;
+ }
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+ if (!enabled) {
+ otgwl_drop(&vbus_lock);
+ spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+ return;
+ }
+
+ switch (event) {
+ case USB_EVENT_VBUS:
+ case USB_EVENT_ENUMERATED:
+ otgwl_hold(&vbus_lock);
+ break;
+
+ case USB_EVENT_NONE:
+ case USB_EVENT_ID:
+ case USB_EVENT_CHARGER:
+ otgwl_temporary_hold(&vbus_lock);
+ break;
+
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ otgwl_handle_event(event);
+ return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_bool(val, kp);
+
+ if (rv)
+ return rv;
+
+ if (otgwl_xceiv)
+ otgwl_handle_event(otgwl_xceiv->last_event);
+
+ return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+ .set = set_enabled,
+ .get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+ int ret;
+
+ otgwl_xceiv = otg_get_transceiver();
+
+ if (!otgwl_xceiv) {
+ pr_err("%s: No OTG transceiver found\n", __func__);
+ return -ENODEV;
+ }
+
+ snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+ dev_name(otgwl_xceiv->dev));
+ wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+ vbus_lock.name);
+
+ otgwl_nb.notifier_call = otgwl_otg_notifications;
+ ret = otg_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+ if (ret) {
+ pr_err("%s: otg_register_notifier on transceiver %s"
+ " failed\n", __func__,
+ dev_name(otgwl_xceiv->dev));
+ otgwl_xceiv = NULL;
+ wake_lock_destroy(&vbus_lock.wakelock);
+ return ret;
+ }
+
+ otgwl_handle_event(otgwl_xceiv->last_event);
+ return ret;
+}
+
+late_initcall(otg_wakelock_init);
diff --git a/drivers/usb/otg/otg_id.c b/drivers/usb/otg/otg_id.c
new file mode 100644
index 000000000000..8037edbf3141
--- /dev/null
+++ b/drivers/usb/otg/otg_id.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/usb/otg_id.h>
+
+static DEFINE_MUTEX(otg_id_lock);
+static struct plist_head otg_id_plist =
+ PLIST_HEAD_INIT(otg_id_plist);
+static struct otg_id_notifier_block *otg_id_active;
+static bool otg_id_cancelling;
+static bool otg_id_inited;
+static int otg_id_suspended;
+static bool otg_id_pending;
+
+static void otg_id_cancel(void)
+{
+ if (otg_id_active) {
+ otg_id_cancelling = true;
+ mutex_unlock(&otg_id_lock);
+
+ otg_id_active->cancel(otg_id_active);
+
+ mutex_lock(&otg_id_lock);
+ otg_id_cancelling = false;
+ }
+}
+
+static void __otg_id_notify(void)
+{
+ int ret;
+ struct otg_id_notifier_block *otg_id_nb;
+ bool proxy_wait = false;
+ if (plist_head_empty(&otg_id_plist))
+ return;
+
+ plist_for_each_entry(otg_id_nb, &otg_id_plist, p) {
+ if (proxy_wait) {
+ if (otg_id_nb->proxy_wait)
+ ret = otg_id_nb->proxy_wait(otg_id_nb);
+ } else {
+ ret = otg_id_nb->detect(otg_id_nb);
+ }
+ if (ret == OTG_ID_HANDLED) {
+ otg_id_active = otg_id_nb;
+ return;
+ }
+ if (ret == OTG_ID_PROXY_WAIT)
+ proxy_wait = true;
+
+ }
+
+ WARN(1, "otg id event not handled");
+ otg_id_active = NULL;
+}
+
+int otg_id_init(void)
+{
+ mutex_lock(&otg_id_lock);
+
+ otg_id_inited = true;
+ __otg_id_notify();
+
+ mutex_unlock(&otg_id_lock);
+ return 0;
+}
+late_initcall(otg_id_init);
+
+/**
+ * otg_id_register_notifier
+ * @otg_id_nb: notifier block containing priority and callback function
+ *
+ * Register a notifier that will be called on any USB cable state change.
+ * The priority determines the order the callback will be called in, a higher
+ * number will be called first. A callback function needs to determine the
+ * type of USB cable that is connected. If it can determine the type, it
+ * should notify the appropriate drivers (for example, call an otg notifier
+ * with USB_EVENT_VBUS), and return OTG_ID_HANDLED. Once a callback has
+ * returned OTG_ID_HANDLED, it is responsible for calling otg_id_notify() when
+ * the detected USB cable is disconnected.
+ */
+int otg_id_register_notifier(struct otg_id_notifier_block *otg_id_nb)
+{
+ plist_node_init(&otg_id_nb->p, otg_id_nb->priority);
+
+ mutex_lock(&otg_id_lock);
+ plist_add(&otg_id_nb->p, &otg_id_plist);
+
+ if (otg_id_inited) {
+ otg_id_cancel();
+ __otg_id_notify();
+ }
+
+ mutex_unlock(&otg_id_lock);
+
+ return 0;
+}
+
+void otg_id_unregister_notifier(struct otg_id_notifier_block *otg_id_nb)
+{
+ mutex_lock(&otg_id_lock);
+
+ plist_del(&otg_id_nb->p, &otg_id_plist);
+
+ if (otg_id_inited && (otg_id_active == otg_id_nb)) {
+ otg_id_cancel();
+ __otg_id_notify();
+ }
+
+ mutex_unlock(&otg_id_lock);
+}
+
+/**
+ * otg_id_notify
+ *
+ * Notify listeners on any USB cable state change.
+ *
+ * A driver may only call otg_id_notify if it returned OTG_ID_HANDLED the last
+ * time it's notifier was called, and it's cancel function has not been called.
+ */
+void otg_id_notify(void)
+{
+ mutex_lock(&otg_id_lock);
+
+ if (otg_id_cancelling)
+ goto out;
+
+ if (otg_id_suspended != 0) {
+ otg_id_pending = true;
+ goto out;
+ }
+
+ __otg_id_notify();
+out:
+ mutex_unlock(&otg_id_lock);
+}
+
+/**
+ * otg_id_suspend
+ *
+ * Mark the otg_id subsystem as going into suspend. From here on out,
+ * any notifications will be deferred until the last otg_id client resumes.
+ * If there is a pending notification when calling this function, it will
+ * return a negative errno and expects that the caller will abort suspend.
+ * Returs 0 on success.
+ */
+int otg_id_suspend(void)
+{
+ int ret = 0;
+
+ mutex_lock(&otg_id_lock);
+
+ /*
+ * if there's a pending notification, tell the caller to abort suspend
+ */
+ if (otg_id_suspended != 0 && otg_id_pending) {
+ pr_info("otg_id: pending notification, should abort suspend\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ otg_id_suspended++;
+out:
+ mutex_unlock(&otg_id_lock);
+ return ret;
+}
+
+/**
+ * otg_id_resume
+ *
+ * Inform the otg_id subsystem that a client is resuming. If this is the
+ * last client to be resumed and there's a pending notification,
+ * otg_id_notify() is called.
+ */
+void otg_id_resume(void)
+{
+ mutex_lock(&otg_id_lock);
+ if (WARN(!otg_id_suspended, "unbalanced otg_id_resume\n"))
+ goto out;
+ if (--otg_id_suspended == 0) {
+ if (otg_id_pending) {
+ pr_info("otg_id: had pending notification\n");
+ otg_id_pending = false;
+ __otg_id_notify();
+ }
+ }
+out:
+ mutex_unlock(&otg_id_lock);
+}
diff --git a/drivers/usb/otg/tegra-otg.c b/drivers/usb/otg/tegra-otg.c
new file mode 100644
index 000000000000..a063630e529d
--- /dev/null
+++ b/drivers/usb/otg/tegra-otg.c
@@ -0,0 +1,510 @@
+/*
+ * drivers/usb/otg/tegra-otg.c
+ *
+ * OTG transceiver driver for Tegra UTMI phy
+ *
+ * Copyright (C) 2010 NVIDIA Corp.
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#define USB_PHY_WAKEUP 0x408
+#define USB_ID_INT_EN (1 << 0)
+#define USB_ID_INT_STATUS (1 << 1)
+#define USB_ID_STATUS (1 << 2)
+#define USB_ID_PIN_WAKEUP_EN (1 << 6)
+#define USB_VBUS_WAKEUP_EN (1 << 30)
+#define USB_VBUS_INT_EN (1 << 8)
+#define USB_VBUS_INT_STATUS (1 << 9)
+#define USB_VBUS_STATUS (1 << 10)
+#define USB_INTS (USB_VBUS_INT_STATUS | USB_ID_INT_STATUS)
+
+struct tegra_otg_data {
+ struct otg_transceiver otg;
+ unsigned long int_status;
+ spinlock_t lock;
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ struct platform_device *pdev;
+ struct work_struct work;
+ unsigned int intr_reg_data;
+ bool detect_vbus;
+ bool clk_enabled;
+};
+static struct tegra_otg_data *tegra_clone;
+
+static inline unsigned long otg_readl(struct tegra_otg_data *tegra,
+ unsigned int offset)
+{
+ return readl(tegra->regs + offset);
+}
+
+static inline void otg_writel(struct tegra_otg_data *tegra, unsigned long val,
+ unsigned int offset)
+{
+ writel(val, tegra->regs + offset);
+}
+
+static void tegra_otg_enable_clk(void)
+{
+ if (!tegra_clone->clk_enabled)
+ clk_enable(tegra_clone->clk);
+ tegra_clone->clk_enabled = true;
+}
+
+static void tegra_otg_disable_clk(void)
+{
+ if (tegra_clone->clk_enabled)
+ clk_disable(tegra_clone->clk);
+ tegra_clone->clk_enabled = false;
+}
+
+static const char *tegra_state_name(enum usb_otg_state state)
+{
+ if (state == OTG_STATE_A_HOST)
+ return "HOST";
+ if (state == OTG_STATE_B_PERIPHERAL)
+ return "PERIPHERAL";
+ if (state == OTG_STATE_A_SUSPEND)
+ return "SUSPEND";
+ return "INVALID";
+}
+
+static struct platform_device *
+tegra_usb_otg_host_register(struct platform_device *ehci_device,
+ struct tegra_ehci_platform_data *pdata)
+{
+ struct platform_device *pdev;
+ void *platform_data;
+ int val;
+
+ pdev = platform_device_alloc(ehci_device->name, ehci_device->id);
+ if (!pdev)
+ return NULL;
+
+ val = platform_device_add_resources(pdev, ehci_device->resource,
+ ehci_device->num_resources);
+ if (val)
+ goto error;
+
+ pdev->dev.dma_mask = ehci_device->dev.dma_mask;
+ pdev->dev.coherent_dma_mask = ehci_device->dev.coherent_dma_mask;
+
+ platform_data = kmalloc(sizeof(struct tegra_ehci_platform_data),
+ GFP_KERNEL);
+ if (!platform_data)
+ goto error;
+
+ memcpy(platform_data, pdata, sizeof(struct tegra_ehci_platform_data));
+ pdev->dev.platform_data = platform_data;
+
+ val = platform_device_add(pdev);
+ if (val)
+ goto error_add;
+
+ return pdev;
+
+error_add:
+ kfree(platform_data);
+error:
+ pr_err("%s: failed to add the host controller device\n", __func__);
+ platform_device_put(pdev);
+ return NULL;
+}
+
+static void tegra_usb_otg_host_unregister(struct platform_device *pdev)
+{
+ kfree(pdev->dev.platform_data);
+ pdev->dev.platform_data = NULL;
+ platform_device_unregister(pdev);
+}
+
+void tegra_start_host(struct tegra_otg_data *tegra)
+{
+ struct tegra_otg_platform_data *pdata = tegra->otg.dev->platform_data;
+ if (!tegra->pdev) {
+ tegra->pdev = tegra_usb_otg_host_register(pdata->ehci_device,
+ pdata->ehci_pdata);
+ }
+}
+
+void tegra_stop_host(struct tegra_otg_data *tegra)
+{
+ if (tegra->pdev) {
+ tegra_usb_otg_host_unregister(tegra->pdev);
+ tegra->pdev = NULL;
+ }
+}
+
+static void irq_work(struct work_struct *work)
+{
+ struct tegra_otg_data *tegra =
+ container_of(work, struct tegra_otg_data, work);
+ struct otg_transceiver *otg = &tegra->otg;
+ enum usb_otg_state from = otg->state;
+ enum usb_otg_state to = OTG_STATE_UNDEFINED;
+ unsigned long flags;
+ unsigned long status;
+
+ if (tegra->detect_vbus) {
+ tegra->detect_vbus = false;
+ tegra_otg_enable_clk();
+ return;
+ }
+
+ clk_enable(tegra->clk);
+
+ spin_lock_irqsave(&tegra->lock, flags);
+
+ status = tegra->int_status;
+
+ if (tegra->int_status & USB_ID_INT_STATUS) {
+ if (status & USB_ID_STATUS) {
+ if ((status & USB_VBUS_STATUS) && (from != OTG_STATE_A_HOST))
+ to = OTG_STATE_B_PERIPHERAL;
+ else
+ to = OTG_STATE_A_SUSPEND;
+ }
+ else
+ to = OTG_STATE_A_HOST;
+ }
+ if (from != OTG_STATE_A_HOST) {
+ if (tegra->int_status & USB_VBUS_INT_STATUS) {
+ if (status & USB_VBUS_STATUS)
+ to = OTG_STATE_B_PERIPHERAL;
+ else
+ to = OTG_STATE_A_SUSPEND;
+ }
+ }
+ spin_unlock_irqrestore(&tegra->lock, flags);
+
+ if (to != OTG_STATE_UNDEFINED) {
+ otg->state = to;
+
+ dev_info(tegra->otg.dev, "%s --> %s\n", tegra_state_name(from),
+ tegra_state_name(to));
+
+ if (to == OTG_STATE_A_SUSPEND) {
+ if (from == OTG_STATE_A_HOST)
+ tegra_stop_host(tegra);
+ else if (from == OTG_STATE_B_PERIPHERAL && otg->gadget)
+ usb_gadget_vbus_disconnect(otg->gadget);
+ } else if (to == OTG_STATE_B_PERIPHERAL && otg->gadget) {
+ if (from == OTG_STATE_A_SUSPEND)
+ usb_gadget_vbus_connect(otg->gadget);
+ } else if (to == OTG_STATE_A_HOST) {
+ if (from == OTG_STATE_A_SUSPEND)
+ tegra_start_host(tegra);
+ }
+ }
+ clk_disable(tegra->clk);
+ tegra_otg_disable_clk();
+}
+
+static irqreturn_t tegra_otg_irq(int irq, void *data)
+{
+ struct tegra_otg_data *tegra = data;
+ unsigned long flags;
+ unsigned long val;
+
+ spin_lock_irqsave(&tegra->lock, flags);
+
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ if (val & (USB_VBUS_INT_EN | USB_ID_INT_EN)) {
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+ if ((val & USB_ID_INT_STATUS) || (val & USB_VBUS_INT_STATUS)) {
+ tegra->int_status = val;
+ tegra->detect_vbus = false;
+ schedule_work(&tegra->work);
+ }
+ }
+
+ spin_unlock_irqrestore(&tegra->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+void tegra_otg_check_vbus_detection(void)
+{
+ tegra_clone->detect_vbus = true;
+ schedule_work(&tegra_clone->work);
+}
+EXPORT_SYMBOL(tegra_otg_check_vbus_detection);
+
+static int tegra_otg_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ struct tegra_otg_data *tegra;
+ unsigned long val;
+
+ tegra = container_of(otg, struct tegra_otg_data, otg);
+ otg->gadget = gadget;
+
+ clk_enable(tegra->clk);
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ val |= (USB_VBUS_INT_EN | USB_VBUS_WAKEUP_EN);
+ val |= (USB_ID_INT_EN | USB_ID_PIN_WAKEUP_EN);
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+ /* Add delay to make sure register is updated */
+ udelay(1);
+ clk_disable(tegra->clk);
+
+ if ((val & USB_ID_STATUS) && (val & USB_VBUS_STATUS)) {
+ val |= USB_VBUS_INT_STATUS;
+ } else if (!(val & USB_ID_STATUS)) {
+ val |= USB_ID_INT_STATUS;
+ } else {
+ val &= ~(USB_ID_INT_STATUS | USB_VBUS_INT_STATUS);
+ }
+
+ if ((val & USB_ID_INT_STATUS) || (val & USB_VBUS_INT_STATUS)) {
+ tegra->int_status = val;
+ tegra->detect_vbus = false;
+ schedule_work (&tegra->work);
+ }
+
+ return 0;
+}
+
+static int tegra_otg_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ struct tegra_otg_data *tegra;
+ unsigned long val;
+
+ tegra = container_of(otg, struct tegra_otg_data, otg);
+ otg->host = host;
+
+ clk_enable(tegra->clk);
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ val &= ~(USB_VBUS_INT_STATUS | USB_ID_INT_STATUS);
+
+ val |= (USB_ID_INT_EN | USB_ID_PIN_WAKEUP_EN);
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+ clk_disable(tegra->clk);
+
+ return 0;
+}
+
+static int tegra_otg_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+ return 0;
+}
+
+static int tegra_otg_set_suspend(struct otg_transceiver *otg, int suspend)
+{
+ return 0;
+}
+
+static int tegra_otg_probe(struct platform_device *pdev)
+{
+ struct tegra_otg_data *tegra;
+ struct resource *res;
+ int err;
+
+ tegra = kzalloc(sizeof(struct tegra_otg_data), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ tegra->otg.dev = &pdev->dev;
+ tegra->otg.label = "tegra-otg";
+ tegra->otg.state = OTG_STATE_UNDEFINED;
+ tegra->otg.set_host = tegra_otg_set_host;
+ tegra->otg.set_peripheral = tegra_otg_set_peripheral;
+ tegra->otg.set_suspend = tegra_otg_set_suspend;
+ tegra->otg.set_power = tegra_otg_set_power;
+ spin_lock_init(&tegra->lock);
+
+ platform_set_drvdata(pdev, tegra);
+ tegra_clone = tegra;
+ tegra->clk_enabled = false;
+
+ tegra->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tegra->clk)) {
+ dev_err(&pdev->dev, "Can't get otg clock\n");
+ err = PTR_ERR(tegra->clk);
+ goto err_clk;
+ }
+
+ err = clk_enable(tegra->clk);
+ if (err)
+ goto err_clken;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto err_io;
+ }
+ tegra->regs = ioremap(res->start, resource_size(res));
+ if (!tegra->regs) {
+ err = -ENOMEM;
+ goto err_io;
+ }
+
+ tegra->otg.state = OTG_STATE_A_SUSPEND;
+
+ err = otg_set_transceiver(&tegra->otg);
+ if (err) {
+ dev_err(&pdev->dev, "can't register transceiver (%d)\n", err);
+ goto err_otg;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENXIO;
+ goto err_irq;
+ }
+ tegra->irq = res->start;
+ err = request_threaded_irq(tegra->irq, tegra_otg_irq,
+ NULL,
+ IRQF_SHARED, "tegra-otg", tegra);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register IRQ\n");
+ goto err_irq;
+ }
+ INIT_WORK (&tegra->work, irq_work);
+
+#ifndef CONFIG_USB_HOTPLUG
+ clk_disable(tegra->clk);
+#endif
+ dev_info(&pdev->dev, "otg transceiver registered\n");
+ return 0;
+
+err_irq:
+ otg_set_transceiver(NULL);
+err_otg:
+ iounmap(tegra->regs);
+err_io:
+ clk_disable(tegra->clk);
+err_clken:
+ clk_put(tegra->clk);
+err_clk:
+ platform_set_drvdata(pdev, NULL);
+ kfree(tegra);
+ return err;
+}
+
+static int __exit tegra_otg_remove(struct platform_device *pdev)
+{
+ struct tegra_otg_data *tegra = platform_get_drvdata(pdev);
+
+ free_irq(tegra->irq, tegra);
+ otg_set_transceiver(NULL);
+ iounmap(tegra->regs);
+ clk_disable(tegra->clk);
+ clk_put(tegra->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(tegra);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_otg_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_otg_data *tegra_otg = platform_get_drvdata(pdev);
+ struct otg_transceiver *otg = &tegra_otg->otg;
+ enum usb_otg_state from = otg->state;
+ /* store the interupt enable for cable ID and VBUS */
+ clk_enable(tegra_otg->clk);
+ tegra_otg->intr_reg_data = readl(tegra_otg->regs + USB_PHY_WAKEUP);
+ clk_disable(tegra_otg->clk);
+
+ if (from == OTG_STATE_B_PERIPHERAL && otg->gadget) {
+ usb_gadget_vbus_disconnect(otg->gadget);
+ otg->state = OTG_STATE_A_SUSPEND;
+ }
+ tegra_otg_disable_clk();
+ return 0;
+}
+
+static void tegra_otg_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_otg_data *tegra_otg = platform_get_drvdata(pdev);
+ int val;
+ unsigned long flags;
+
+ tegra_otg_enable_clk();
+
+ /* Following delay is intentional.
+ * It is placed here after observing system hang.
+ * Root cause is not confirmed.
+ */
+ msleep(1);
+ /* restore the interupt enable for cable ID and VBUS */
+ clk_enable(tegra_otg->clk);
+ writel(tegra_otg->intr_reg_data, (tegra_otg->regs + USB_PHY_WAKEUP));
+ val = readl(tegra_otg->regs + USB_PHY_WAKEUP);
+ clk_disable(tegra_otg->clk);
+
+ /* A device might be connected while CPU is in sleep mode. In this case no interrupt
+ * will be triggered
+ * force irq_work to recheck connected devices
+ */
+ if (!(val & USB_ID_STATUS)) {
+ spin_lock_irqsave(&tegra_otg->lock, flags);
+ tegra_otg->int_status = (val | USB_ID_INT_STATUS );
+ schedule_work(&tegra_otg->work);
+ spin_unlock_irqrestore(&tegra_otg->lock, flags);
+ }
+
+ return;
+}
+
+static const struct dev_pm_ops tegra_otg_pm_ops = {
+ .complete = tegra_otg_resume,
+ .suspend = tegra_otg_suspend,
+};
+#endif
+
+static struct platform_driver tegra_otg_driver = {
+ .driver = {
+ .name = "tegra-otg",
+#ifdef CONFIG_PM
+ .pm = &tegra_otg_pm_ops,
+#endif
+ },
+ .remove = __exit_p(tegra_otg_remove),
+ .probe = tegra_otg_probe,
+};
+
+static int __init tegra_otg_init(void)
+{
+ return platform_driver_register(&tegra_otg_driver);
+}
+subsys_initcall(tegra_otg_init);
+
+static void __exit tegra_otg_exit(void)
+{
+ platform_driver_unregister(&tegra_otg_driver);
+}
+module_exit(tegra_otg_exit);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index b71e309116a3..f6a54356c29d 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -661,4 +661,17 @@ config USB_SERIAL_DEBUG
To compile this driver as a module, choose M here: the
module will be called usb-debug.
+config USB_SERIAL_BASEBAND
+ tristate "USB Baseband Driver"
+ help
+ Say Y here if you want to use USB baseband character driver.
+
+ This driver may be used as a raw character device driver for
+ USB modems. For example, downloading modem firmware upon
+ modem boot up (for flashless modems), or sending modem
+ AT commands after modem software is running on device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called baseband_chr.
+
endif # USB_SERIAL
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 9e536eefb32c..2654c327aaf2 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -13,6 +13,7 @@ usbserial-$(CONFIG_USB_EZUSB) += ezusb.o
obj-$(CONFIG_USB_SERIAL_AIRCABLE) += aircable.o
obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o
+obj-$(CONFIG_USB_SERIAL_BASEBAND) += baseband_usb_chr.o
obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o
obj-$(CONFIG_USB_SERIAL_CH341) += ch341.o
obj-$(CONFIG_USB_SERIAL_CP210X) += cp210x.o
diff --git a/drivers/usb/serial/baseband_usb_chr.c b/drivers/usb/serial/baseband_usb_chr.c
new file mode 100644
index 000000000000..bb2649dc4c36
--- /dev/null
+++ b/drivers/usb/serial/baseband_usb_chr.c
@@ -0,0 +1,1105 @@
+/*
+ * baseband_usb_chr.c
+ *
+ * USB character driver to communicate with baseband modems.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+#include <asm/ioctls.h>
+#include <linux/uaccess.h>
+#include "baseband_usb_chr.h"
+
+MODULE_LICENSE("GPL");
+
+unsigned long baseband_usb_chr_vid = 0x058b;
+unsigned long baseband_usb_chr_pid = 0x0041;
+unsigned long baseband_usb_chr_intf = 0x01;
+
+module_param(baseband_usb_chr_vid, ulong, 0644);
+MODULE_PARM_DESC(baseband_usb_chr_vid, "baseband (usb chr) - USB VID");
+module_param(baseband_usb_chr_pid, ulong, 0644);
+MODULE_PARM_DESC(baseband_usb_chr_pid, "baseband (usb chr) - USB PID");
+module_param(baseband_usb_chr_intf, ulong, 0644);
+MODULE_PARM_DESC(baseband_usb_chr_intf, "baseband (usb chr) - USB interface");
+
+static struct baseband_usb *baseband_usb_chr;
+
+static atomic_t g_rx_count = ATOMIC_INIT(0);
+
+/* baseband ipc functions */
+
+static void baseband_ipc_dump(const char *prefix, unsigned long int offset,
+ const void *buf, size_t bufsiz)
+{
+ size_t i;
+
+ for (i = 0; i < bufsiz; i += 16) {
+ pr_debug("%s"
+ "[%lx+%x] %p "
+ "%02x %02x %02x %02x "
+ "%02x %02x %02x %02x "
+ "%02x %02x %02x %02x "
+ "%02x %02x %02x %02x\n",
+ prefix,
+ offset,
+ i,
+ ((const unsigned char *) buf) + i,
+ (i + 0 < bufsiz) ? ((const unsigned char *) buf)[i+0]
+ : 0xff,
+ (i + 1 < bufsiz) ? ((const unsigned char *) buf)[i+1]
+ : 0xff,
+ (i + 2 < bufsiz) ? ((const unsigned char *) buf)[i+2]
+ : 0xff,
+ (i + 3 < bufsiz) ? ((const unsigned char *) buf)[i+3]
+ : 0xff,
+ (i + 4 < bufsiz) ? ((const unsigned char *) buf)[i+4]
+ : 0xff,
+ (i + 5 < bufsiz) ? ((const unsigned char *) buf)[i+5]
+ : 0xff,
+ (i + 6 < bufsiz) ? ((const unsigned char *) buf)[i+6]
+ : 0xff,
+ (i + 7 < bufsiz) ? ((const unsigned char *) buf)[i+7]
+ : 0xff,
+ (i + 8 < bufsiz) ? ((const unsigned char *) buf)[i+8]
+ : 0xff,
+ (i + 9 < bufsiz) ? ((const unsigned char *) buf)[i+9]
+ : 0xff,
+ (i + 10 < bufsiz) ? ((const unsigned char *) buf)[i+10]
+ : 0xff,
+ (i + 11 < bufsiz) ? ((const unsigned char *) buf)[i+11]
+ : 0xff,
+ (i + 12 < bufsiz) ? ((const unsigned char *) buf)[i+12]
+ : 0xff,
+ (i + 13 < bufsiz) ? ((const unsigned char *) buf)[i+13]
+ : 0xff,
+ (i + 14 < bufsiz) ? ((const unsigned char *) buf)[i+14]
+ : 0xff,
+ (i + 15 < bufsiz) ? ((const unsigned char *) buf)[i+15]
+ : 0xff);
+ }
+
+}
+
+static size_t peek_ipc_tx_bufsiz(struct baseband_ipc *ipc,
+ size_t bufsiz)
+{
+ struct baseband_ipc_buf *ipc_buf, *ipc_buf_next;
+ size_t tx_bufsiz;
+
+ pr_debug("peek_ipc_tx_bufsiz\n");
+
+ /* check input */
+ if (!ipc) {
+ pr_err("!ipc\n");
+ return 0;
+ }
+
+ /* acquire tx buffer semaphores */
+ if (down_interruptible(&ipc->buf_sem)) {
+ pr_err("peek_ipc_tx_bufsiz - "
+ "cannot acquire buffer semaphore\n");
+ return -ERESTARTSYS;
+ }
+
+ /* calculate maximum number of tx buffers which can be sent */
+ tx_bufsiz = 0;
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->tx.buf, list)
+ {
+ pr_debug("peek_ipc_tx_bufsiz - "
+ "ipc_buf %p ipc_buf->offset %x ipc_buf->count %x\n",
+ ipc_buf, ipc_buf->offset, ipc_buf->count);
+ if (ipc_buf->count > bufsiz - tx_bufsiz)
+ break;
+ else
+ tx_bufsiz += ipc_buf->count;
+ }
+
+ /* release tx buffer semaphores */
+ up(&ipc->buf_sem);
+
+ return tx_bufsiz;
+}
+
+static size_t get_ipc_tx_buf(struct baseband_ipc *ipc,
+ void *buf, size_t bufsiz)
+{
+ struct baseband_ipc_buf *ipc_buf, *ipc_buf_next;
+ size_t tx_bufsiz;
+
+ pr_debug("get_ipc_tx_buf\n");
+
+ /* check input */
+ if (!ipc || !buf) {
+ pr_err("!ipc || !buf\n");
+ return 0;
+ }
+ if (!bufsiz)
+ return 0;
+
+ /* acquire tx buffer semaphores */
+ if (down_interruptible(&ipc->buf_sem)) {
+ pr_err("get_ipc_tx_buf - "
+ "cannot acquire buffer semaphore\n");
+ return -ERESTARTSYS;
+ }
+
+ /* get tx data from tx linked list */
+ tx_bufsiz = 0;
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->tx.buf, list)
+ {
+ pr_debug("get_ipc_tx_buf - "
+ "ipc_buf %p ipc_buf->offset %x ipc_buf->count %x\n",
+ ipc_buf, ipc_buf->offset, ipc_buf->count);
+ pr_debug("get_ipc_tx_buf - "
+ "ipc_buf->data [0] %x [1] %x [2] %x [3] %x\n",
+ ipc_buf->data[0],
+ ipc_buf->data[1],
+ ipc_buf->data[2],
+ ipc_buf->data[3]);
+ if (ipc_buf->count > bufsiz - tx_bufsiz) {
+ /* copy part of tx buffer */
+ memcpy(buf + tx_bufsiz,
+ ipc_buf->data + ipc_buf->offset,
+ bufsiz - tx_bufsiz);
+ ipc_buf->offset += bufsiz - tx_bufsiz;
+ ipc_buf->count -= bufsiz - tx_bufsiz;
+ tx_bufsiz = bufsiz;
+ } else {
+ /* copy all data from tx buffer */
+ memcpy(buf + tx_bufsiz,
+ ipc_buf->data + ipc_buf->offset,
+ ipc_buf->count);
+ tx_bufsiz += ipc_buf->count;
+ ipc_buf->offset = 0;
+ ipc_buf->count = 0;
+ /* add tx buffer to tx free list */
+ list_move_tail(&ipc_buf->list, &ipc->tx_free.buf);
+ wake_up(&ipc->tx_free.wait);
+ }
+ /* check if done */
+ if (tx_bufsiz == bufsiz)
+ break;
+ }
+
+ /* release tx buffer semaphores */
+ up(&ipc->buf_sem);
+
+ return tx_bufsiz;
+}
+
+static size_t put_ipc_rx_buf(struct baseband_ipc *ipc,
+ const void *buf, size_t bufsiz)
+{
+ struct baseband_ipc_buf *ipc_buf, *ipc_buf_next;
+ size_t rx_bufsiz;
+
+ pr_debug("put_ipc_rx_buf\n");
+
+ /* check input */
+ if (!ipc || !buf) {
+ pr_err("!ipc || !buf\n");
+ return 0;
+ }
+ if (!bufsiz)
+ return 0;
+
+ /* acquire rx buffer semaphores */
+retry:
+ if (down_interruptible(&ipc->buf_sem)) {
+ pr_err("put_ipc_rx_buf - "
+ "cannot acquire buffer semaphore\n");
+ return -ERESTARTSYS;
+ }
+
+ /* put rx data in rx linked list */
+ rx_bufsiz = 0;
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->rx_free.buf, list)
+ {
+ pr_debug("put_ipc_rx_buf - "
+ "ipc_buf %p ipc_buf->offset %x ipc_buf->count %x\n",
+ ipc_buf, ipc_buf->offset, ipc_buf->count);
+ if (sizeof(ipc_buf->data) > bufsiz - rx_bufsiz) {
+ /* partially fill rx free buffer */
+ memcpy(ipc_buf->data,
+ buf + rx_bufsiz,
+ bufsiz - rx_bufsiz);
+ ipc_buf->offset = 0;
+ ipc_buf->count = bufsiz - rx_bufsiz;
+ rx_bufsiz = bufsiz;
+ } else {
+ /* fill entire rx free buffer */
+ memcpy(ipc_buf->data,
+ buf + rx_bufsiz,
+ sizeof(ipc_buf->data));
+ ipc_buf->offset = 0;
+ ipc_buf->count = sizeof(ipc_buf->data);
+ rx_bufsiz += sizeof(ipc_buf->data);
+ }
+ /* add filled rx free buffer to rx linked list */
+ list_move_tail(&ipc_buf->list, &ipc->rx.buf);
+ wake_up(&ipc->rx.wait);
+ /* check if done */
+ if (rx_bufsiz == bufsiz)
+ break;
+ }
+
+ /* release rx buffer semaphores */
+ up(&ipc->buf_sem);
+
+ /* wait for rx free buffer available */
+ if (!rx_bufsiz) {
+ if (wait_event_interruptible(ipc->rx_free.wait,
+ !list_empty(&ipc->rx_free.buf))) {
+ pr_err("put_ipc_rx_buf - "
+ "interrupted wait\n");
+ return -ERESTARTSYS;
+ }
+ goto retry;
+ }
+
+ return rx_bufsiz;
+
+}
+
+static ssize_t baseband_ipc_file_read(struct baseband_ipc *ipc,
+ struct file *file, char *buf, size_t count, loff_t *pos)
+{
+ struct baseband_ipc_buf *ipc_buf, *ipc_buf_next;
+ size_t read_count;
+
+ pr_debug("baseband_ipc_file_read\n");
+
+ /* check input */
+ if (!ipc) {
+ pr_err("!ipc\n");
+ return -EIO;
+ }
+
+ /* acquire rx buffer semaphores */
+retry:
+ if (down_interruptible(&ipc->buf_sem)) {
+ pr_err("baseband_ipc_file_read - "
+ "cannot acquire buffer semaphore\n");
+ return -ERESTARTSYS;
+ }
+
+ /* get read data from rx linked list */
+ read_count = 0;
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->rx.buf, list)
+ {
+ pr_debug("baseband_ipc_file_read - "
+ "ipc_buf %p ipc_buf->offset %x ipc_buf->count %x\n",
+ ipc_buf, ipc_buf->offset, ipc_buf->count);
+ pr_debug("baseband_ipc_file_read - "
+ "ipc_buf->data [0] %x [1] %x [2] %x [3] %x\n",
+ ipc_buf->data[0],
+ ipc_buf->data[1],
+ ipc_buf->data[2],
+ ipc_buf->data[3]);
+ if (ipc_buf->count > count - read_count) {
+ /* copy part of rx buffer */
+ if (copy_to_user(buf + read_count,
+ ipc_buf->data + ipc_buf->offset,
+ count - read_count)) {
+ pr_err("copy_to_user failed\n");
+ up(&ipc->buf_sem);
+ return -EFAULT;
+ }
+ ipc_buf->offset += count - read_count;
+ ipc_buf->count -= count - read_count;
+ read_count = count;
+ } else {
+ /* copy all data from rx buffer */
+ if (copy_to_user(buf + read_count,
+ ipc_buf->data + ipc_buf->offset,
+ ipc_buf->count)) {
+ pr_err("copy_to_user failed\n");
+ up(&ipc->buf_sem);
+ return -EFAULT;
+ }
+ read_count += ipc_buf->count;
+ ipc_buf->offset = 0;
+ ipc_buf->count = 0;
+ /* add rx buffer to rx free list */
+ list_move_tail(&ipc_buf->list, &ipc->rx_free.buf);
+ wake_up(&ipc->rx_free.wait);
+ }
+ /* check if done */
+ if (read_count == count)
+ break;
+ }
+
+ /* release rx buffer semaphores */
+ up(&ipc->buf_sem);
+
+ /* wait for rx buffer available */
+ if (!read_count) {
+ if (wait_event_interruptible(ipc->rx.wait,
+ !list_empty(&ipc->rx.buf))) {
+ pr_err("baseband_ipc_file_read - "
+ "interrupted wait\n");
+ return -ERESTARTSYS;
+ }
+ goto retry;
+ }
+
+ return read_count;
+}
+
+static ssize_t baseband_ipc_file_write(struct baseband_ipc *ipc,
+ struct file *file, const char *buf, size_t count, loff_t *pos)
+{
+ struct baseband_ipc_buf *ipc_buf, *ipc_buf_next;
+ size_t write_count;
+
+ pr_debug("baseband_ipc_file_write\n");
+
+ /* check input */
+ if (!ipc) {
+ pr_err("!ipc\n");
+ return -EIO;
+ }
+
+ /* acquire tx buffer semaphores */
+retry:
+ if (down_interruptible(&ipc->buf_sem)) {
+ pr_err("baseband_ipc_file_write - "
+ "cannot acquire buffer semaphore\n");
+ return -ERESTARTSYS;
+ }
+
+ /* put write data in tx linked list */
+ write_count = 0;
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->tx_free.buf, list)
+ {
+ pr_debug("baseband_ipc_file_write - "
+ "ipc_buf %p ipc_buf->offset %x ipc_buf->count %x\n",
+ ipc_buf, ipc_buf->offset, ipc_buf->count);
+ if (sizeof(ipc_buf->data) > count - write_count) {
+ /* partially fill tx free buffer */
+ if (copy_from_user(ipc_buf->data,
+ buf + write_count,
+ count - write_count)) {
+ pr_err("copy_from_user failed\n");
+ up(&ipc->buf_sem);
+ return -EFAULT;
+ }
+ ipc_buf->offset = 0;
+ ipc_buf->count = count - write_count;
+ write_count = count;
+ } else {
+ /* fill entire tx free buffer */
+ if (copy_from_user(ipc_buf->data,
+ buf + write_count,
+ sizeof(ipc_buf->data))) {
+ pr_err("copy_from_user failed\n");
+ up(&ipc->buf_sem);
+ return -EFAULT;
+ }
+ ipc_buf->offset = 0;
+ ipc_buf->count = sizeof(ipc_buf->data);
+ write_count += sizeof(ipc_buf->data);
+ }
+ /* add filled tx free buffer to tx linked list */
+ pr_debug("baseband_ipc_file_write - "
+ "ipc_buf->data [0] %x [1] %x [2] %x [3] %x\n",
+ ipc_buf->data[0],
+ ipc_buf->data[1],
+ ipc_buf->data[2],
+ ipc_buf->data[3]);
+ list_move_tail(&ipc_buf->list, &ipc->tx.buf);
+ wake_up(&ipc->tx.wait);
+ /* check if done */
+ if (write_count == count)
+ break;
+ }
+
+ /* release tx buffer semaphores */
+ up(&ipc->buf_sem);
+
+ /* wait for tx buffer available */
+ if (!write_count) {
+ if (wait_event_interruptible(ipc->tx_free.wait,
+ !list_empty(&ipc->tx_free.buf))) {
+ pr_err("baseband_ipc_file_write - "
+ "interrupted wait\n");
+ return -ERESTARTSYS;
+ }
+ goto retry;
+ }
+
+ /* queue ipc transaction work */
+ queue_work(ipc->workqueue, &ipc->work);
+
+ return write_count;
+}
+
+static void baseband_ipc_close(struct baseband_ipc *ipc)
+{
+ struct baseband_ipc_buf *ipc_buf, *ipc_buf_next;
+
+ pr_debug("baseband_ipc_close {\n");
+
+ /* check input */
+ if (!ipc)
+ return;
+
+ /* destroy work queue */
+ if (ipc->workqueue) {
+ pr_debug("destroy workqueue {\n");
+ destroy_workqueue(ipc->workqueue);
+ ipc->workqueue = (struct workqueue_struct *) 0;
+ pr_debug("destroy workqueue }\n");
+ }
+ memset(&ipc->work, 0, sizeof(ipc->work));
+
+ /* destroy wait queues */
+ memset(&ipc->tx_free.wait, 0, sizeof(ipc->tx_free.wait));
+ memset(&ipc->rx_free.wait, 0, sizeof(ipc->rx_free.wait));
+ memset(&ipc->tx.wait, 0, sizeof(ipc->tx.wait));
+ memset(&ipc->rx.wait, 0, sizeof(ipc->rx.wait));
+
+ /* destroy data buffers */
+ kfree(ipc->ipc_tx);
+ ipc->ipc_tx = (unsigned char *) 0;
+ kfree(ipc->ipc_rx);
+ ipc->ipc_rx = (unsigned char *) 0;
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->tx_free.buf, list)
+ {
+ kfree(ipc_buf);
+ }
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->rx_free.buf, list)
+ {
+ kfree(ipc_buf);
+ }
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->tx.buf, list)
+ {
+ kfree(ipc_buf);
+ }
+ list_for_each_entry_safe(ipc_buf, ipc_buf_next, &ipc->rx.buf, list)
+ {
+ kfree(ipc_buf);
+ }
+
+ /* destroy semaphores */
+ memset(&ipc->buf_sem, 0, sizeof(ipc->buf_sem));
+
+ /* free baseband ipc structure */
+ kfree(ipc);
+
+ pr_debug("baseband_ipc_close }\n");
+}
+
+static struct baseband_ipc *baseband_ipc_open(work_func_t work_func,
+ work_func_t rx_work_func,
+ work_func_t tx_work_func)
+{
+ struct baseband_ipc *ipc;
+ struct baseband_ipc_buf *ipc_buf;
+ int i;
+
+ pr_debug("baseband_ipc_open {\n");
+
+ /* allocate baseband ipc structure */
+ ipc = kzalloc(sizeof(struct baseband_ipc), GFP_KERNEL);
+ if (!ipc)
+ return (struct baseband_ipc *) 0;
+
+ /* create semaphores */
+ sema_init(&ipc->buf_sem, 1);
+
+ /* create data buffers */
+ INIT_LIST_HEAD(&ipc->rx.buf);
+ INIT_LIST_HEAD(&ipc->tx.buf);
+ INIT_LIST_HEAD(&ipc->rx_free.buf);
+ INIT_LIST_HEAD(&ipc->tx_free.buf);
+ for (i = 0; i < BASEBAND_IPC_NUM_RX_BUF; i++) {
+ ipc_buf = (struct baseband_ipc_buf *)
+ kzalloc(sizeof(struct baseband_ipc_buf), GFP_KERNEL);
+ if (!ipc_buf) {
+ pr_err("cannot allocate baseband ipc rx buffer #%d\n",
+ i);
+ goto error_exit;
+ }
+ pr_debug("baseband_ipc_open - "
+ "rx_free: ipc_buf %p\n",
+ ipc_buf);
+ list_add_tail(&ipc_buf->list, &ipc->rx_free.buf);
+ }
+ for (i = 0; i < BASEBAND_IPC_NUM_TX_BUF; i++) {
+ ipc_buf = (struct baseband_ipc_buf *)
+ kzalloc(sizeof(struct baseband_ipc_buf), GFP_KERNEL);
+ if (!ipc_buf) {
+ pr_err("cannot allocate baseband ipc tx buffer #%d\n",
+ i);
+ goto error_exit;
+ }
+ pr_debug("baseband_ipc_open - "
+ "tx_free: ipc_buf %p\n",
+ ipc_buf);
+ list_add_tail(&ipc_buf->list, &ipc->tx_free.buf);
+ }
+ ipc->ipc_rx = (unsigned char *) 0;
+ ipc->ipc_tx = (unsigned char *) 0;
+
+ /* create wait queues */
+ init_waitqueue_head(&ipc->rx.wait);
+ init_waitqueue_head(&ipc->tx.wait);
+ init_waitqueue_head(&ipc->rx_free.wait);
+ init_waitqueue_head(&ipc->tx_free.wait);
+
+ /* create work queue */
+ ipc->workqueue = create_singlethread_workqueue
+ ("baseband_usb_chr_ipc_workqueue");
+ if (!ipc->workqueue) {
+ pr_err("cannot create workqueue\n");
+ goto error_exit;
+ }
+ if (work_func)
+ INIT_WORK(&ipc->work, work_func);
+ if (rx_work_func)
+ INIT_WORK(&ipc->rx_work, rx_work_func);
+ if (tx_work_func)
+ INIT_WORK(&ipc->tx_work, tx_work_func);
+
+ pr_debug("baseband_ipc_open }\n");
+ return ipc;
+
+error_exit:
+ baseband_ipc_close(ipc);
+ return (struct baseband_ipc *) 0;
+}
+
+/* usb rx */
+
+static void baseband_usb_chr_rx_urb_comp(struct urb *urb)
+{
+ struct baseband_usb *usb = (struct baseband_usb *) urb->context;
+
+ pr_debug("baseband_usb_chr_rx_urb_comp { urb %p\n", urb);
+
+ /* queue rx urb completion work */
+ queue_work(usb->ipc->workqueue, &usb->ipc->rx_work);
+
+ pr_debug("baseband_usb_chr_rx_urb_comp }\n");
+}
+
+static int baseband_usb_chr_rx_urb_submit(struct baseband_usb *usb)
+{
+ struct urb *urb;
+ void *buf;
+ int err;
+
+ pr_debug("baseband_usb_chr_rx_urb_submit { usb %p\n", usb);
+
+ /* check input */
+ if (usb->usb.rx_urb) {
+ pr_err("previous urb still active\n");
+ return -1;
+ }
+
+ /* allocate rx urb */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ pr_err("usb_alloc_urb() failed\n");
+ return -ENOMEM;
+ }
+ buf = kzalloc(USB_CHR_RX_BUFSIZ, GFP_ATOMIC);
+ if (!buf) {
+ pr_err("usb buffer kzalloc() failed\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+ usb_fill_bulk_urb(urb, usb->usb.device, usb->usb.pipe.bulk.in,
+ buf, USB_CHR_RX_BUFSIZ,
+ baseband_usb_chr_rx_urb_comp,
+ usb);
+ urb->transfer_flags = 0;
+
+ /* submit rx urb */
+ usb->usb.rx_urb = urb;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ pr_err("usb_submit_urb() failed - err %d\n", err);
+ usb->usb.rx_urb = (struct urb *) 0;
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ return err;
+ }
+
+ pr_debug("baseband_usb_chr_rx_urb_submit }\n");
+ return err;
+}
+
+static void baseband_usb_chr_rx_urb_comp_work(struct work_struct *work)
+{
+ struct baseband_usb *usb = baseband_usb_chr;
+ struct urb *urb = usb->usb.rx_urb;
+ size_t len;
+
+ pr_debug("baseband_usb_chr_rx_urb_comp_work { work %p\n", work);
+
+ /* put rx urb data in rx buffer */
+ if (urb->actual_length) {
+ pr_debug("baseband_usb_chr_rx_urb_comp_work - "
+ "urb->actual_length %d\n", urb->actual_length);
+ len = put_ipc_rx_buf(usb->ipc,
+ urb->transfer_buffer, urb->actual_length);
+ baseband_ipc_dump("baseband_usb_chr_rx_urb_comp_work"
+ " - rx buf ", 0,
+ urb->transfer_buffer, len > 16 ? 16 : len);
+ if (len != urb->actual_length) {
+ pr_err("baseband_usb_chr_rx_urb_comp_work - "
+ "put_ipx_rx_buf() only put %d/%d bytes\n",
+ len, urb->actual_length);
+ }
+ /* increment count of available rx bytes */
+ atomic_add(len, &g_rx_count);
+ }
+
+ /* free rx urb */
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = (void *) 0;
+ usb_free_urb(urb);
+ usb->usb.rx_urb = (struct urb *) 0;
+
+ /* submit next rx urb */
+ baseband_usb_chr_rx_urb_submit(usb);
+
+ pr_debug("baseband_usb_chr_rx_urb_comp_work }\n");
+}
+
+/* usb functions */
+
+static void find_usb_pipe(struct baseband_usb *usb)
+{
+ struct usb_device *usbdev = usb->usb.device;
+ struct usb_interface *intf = usb->usb.interface;
+ unsigned char numendpoint = intf->cur_altsetting->desc.bNumEndpoints;
+ struct usb_host_endpoint *endpoint = intf->cur_altsetting->endpoint;
+ unsigned char n;
+
+ for (n = 0; n < numendpoint; n++) {
+ if (usb_endpoint_is_isoc_in(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] isochronous in\n", n);
+ usb->usb.pipe.isoch.in = usb_rcvisocpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_isoc_out(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] isochronous out\n", n);
+ usb->usb.pipe.isoch.out = usb_sndisocpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_bulk_in(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] bulk in\n", n);
+ usb->usb.pipe.bulk.in = usb_rcvbulkpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_bulk_out(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] bulk out\n", n);
+ usb->usb.pipe.bulk.out = usb_sndbulkpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_int_in(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] interrupt in\n", n);
+ usb->usb.pipe.interrupt.in = usb_rcvintpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else if (usb_endpoint_is_int_out(&endpoint[n].desc)) {
+ pr_debug("endpoint[%d] interrupt out\n", n);
+ usb->usb.pipe.interrupt.out = usb_sndintpipe(usbdev,
+ endpoint[n].desc.bEndpointAddress);
+ } else {
+ pr_debug("endpoint[%d] skipped\n", n);
+ }
+ }
+}
+
+static int baseband_usb_driver_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int err;
+
+ pr_debug("%s(%d) { intf %p id %p\n", __func__, __LINE__, intf, id);
+
+ pr_debug("intf->cur_altsetting->desc.bInterfaceNumber %02x\n",
+ intf->cur_altsetting->desc.bInterfaceNumber);
+ pr_debug("intf->cur_altsetting->desc.bAlternateSetting %02x\n",
+ intf->cur_altsetting->desc.bAlternateSetting);
+ pr_debug("intf->cur_altsetting->desc.bNumEndpoints %02x\n",
+ intf->cur_altsetting->desc.bNumEndpoints);
+ pr_debug("intf->cur_altsetting->desc.bInterfaceClass %02x\n",
+ intf->cur_altsetting->desc.bInterfaceClass);
+ pr_debug("intf->cur_altsetting->desc.bInterfaceSubClass %02x\n",
+ intf->cur_altsetting->desc.bInterfaceSubClass);
+ pr_debug("intf->cur_altsetting->desc.bInterfaceProtocol %02x\n",
+ intf->cur_altsetting->desc.bInterfaceProtocol);
+ pr_debug("intf->cur_altsetting->desc.iInterface %02x\n",
+ intf->cur_altsetting->desc.iInterface);
+
+ /* usb interface mismatch */
+ if (baseband_usb_chr_intf !=
+ intf->cur_altsetting->desc.bInterfaceNumber) {
+ pr_debug("%s(%d) } -ENODEV\n", __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ /* usb interface match */
+ baseband_usb_chr->usb.device = interface_to_usbdev(intf);
+ baseband_usb_chr->usb.interface = intf;
+ find_usb_pipe(baseband_usb_chr);
+ baseband_usb_chr->usb.rx_urb = (struct urb *) 0;
+ baseband_usb_chr->usb.tx_urb = (struct urb *) 0;
+ pr_debug("baseband_usb_chr->usb.driver->name %s\n",
+ baseband_usb_chr->usb.driver->name);
+ pr_debug("baseband_usb_chr->usb.device %p\n",
+ baseband_usb_chr->usb.device);
+ pr_debug("baseband_usb_chr->usb.interface %p\n",
+ baseband_usb_chr->usb.interface);
+ pr_debug("baseband_usb_chr->usb.pipe.isoch.in %x\n",
+ baseband_usb_chr->usb.pipe.isoch.in);
+ pr_debug("baseband_usb_chr->usb.pipe.isoch.out %x\n",
+ baseband_usb_chr->usb.pipe.isoch.out);
+ pr_debug("baseband_usb_chr->usb.pipe.bulk.in %x\n",
+ baseband_usb_chr->usb.pipe.bulk.in);
+ pr_debug("baseband_usb_chr->usb.pipe.bulk.out %x\n",
+ baseband_usb_chr->usb.pipe.bulk.out);
+ pr_debug("baseband_usb_chr->usb.pipe.interrupt.in %x\n",
+ baseband_usb_chr->usb.pipe.interrupt.in);
+ pr_debug("baseband_usb_chr->usb.pipe.interrupt.out %x\n",
+ baseband_usb_chr->usb.pipe.interrupt.out);
+
+ /* start usb rx */
+ err = baseband_usb_chr_rx_urb_submit(baseband_usb_chr);
+ if (err < 0) {
+ pr_err("submit rx failed - err %d\n", err);
+ return -ENODEV;
+ }
+
+ pr_debug("%s(%d) }\n", __func__, __LINE__);
+ return 0;
+}
+
+static void baseband_usb_driver_disconnect(struct usb_interface *intf)
+{
+ pr_debug("%s(%d) { intf %p\n", __func__, __LINE__, intf);
+ pr_debug("%s(%d) }\n", __func__, __LINE__);
+}
+
+static char baseband_usb_driver_name[32];
+
+static struct usb_device_id baseband_usb_driver_id_table[2];
+
+static struct usb_driver baseband_usb_driver = {
+ .name = baseband_usb_driver_name,
+ .probe = baseband_usb_driver_probe,
+ .disconnect = baseband_usb_driver_disconnect,
+ .id_table = baseband_usb_driver_id_table,
+};
+
+static void baseband_usb_chr_work(struct work_struct *work)
+{
+ struct baseband_usb *usb = baseband_usb_chr;
+ struct {
+ unsigned char *buf;
+ unsigned int bufsiz_byte;
+ } rx, tx;
+ int ipc_tx_byte;
+ int err;
+
+ pr_debug("baseband_usb_chr_work {\n");
+
+ /* check input */
+ if (!usb || !usb->ipc) {
+ pr_err("baseband_usb_chr_work - "
+ "usb not open\n");
+ return;
+ }
+ if (!usb->usb.device) {
+ pr_err("baseband_usb_chr_work - "
+ "usb device not probed yet\n");
+ mdelay(10);
+ queue_work(usb->ipc->workqueue, &usb->ipc->work);
+ return;
+ }
+
+ /* allocate buffers on first transaction (will be freed on close) */
+ if (!usb->ipc->ipc_rx) {
+ usb->ipc->ipc_rx = kzalloc(USB_CHR_RX_BUFSIZ, GFP_KERNEL);
+ if (!usb->ipc->ipc_rx) {
+ pr_err("baseband_usb_chr_work - "
+ "cannot allocate usb->ipc->ipc_rx\n");
+ return;
+ }
+ }
+ if (!usb->ipc->ipc_tx) {
+ usb->ipc->ipc_tx = kzalloc(USB_CHR_TX_BUFSIZ, GFP_KERNEL);
+ if (!usb->ipc->ipc_tx) {
+ pr_err("baseband_usb_chr_work - "
+ "cannot allocate usb->ipc->ipc_tx\n");
+ return;
+ }
+ }
+
+ /* usb transaction loop */
+ rx.buf = usb->ipc->ipc_rx;
+ tx.buf = usb->ipc->ipc_tx;
+ while ((tx.bufsiz_byte = peek_ipc_tx_bufsiz(usb->ipc,
+ USB_CHR_TX_BUFSIZ)) != 0) {
+ get_ipc_tx_buf(usb->ipc, tx.buf, tx.bufsiz_byte);
+ err = usb_bulk_msg(usb->usb.device, usb->usb.pipe.bulk.out,
+ tx.buf, tx.bufsiz_byte, &ipc_tx_byte, USB_CHR_TIMEOUT);
+ if (err < 0) {
+ pr_err("baseband_usb_chr_work - "
+ "usb_bulk_msg err %d\n", err);
+ continue;
+ }
+ if (tx.bufsiz_byte != ipc_tx_byte) {
+ pr_err("tx.bufsiz_byte %d != ipc_tx_byte %d\n",
+ tx.bufsiz_byte, ipc_tx_byte);
+ continue;
+ }
+ }
+
+ pr_debug("baseband_usb_chr_work }\n");
+}
+
+/* usb character file operations */
+
+static int baseband_usb_chr_open(struct inode *inode, struct file *file)
+{
+ pr_debug("baseband_usb_chr_open\n");
+ return 0;
+}
+
+static int baseband_usb_chr_release(struct inode *inode, struct file *file)
+{
+ pr_debug("baseband_usb_chr_release\n");
+ return 0;
+}
+
+static ssize_t baseband_usb_chr_read(struct file *file, char *buf,
+ size_t count, loff_t *pos)
+{
+ ssize_t ret;
+
+ pr_debug("baseband_usb_chr_read\n");
+
+ ret = baseband_ipc_file_read(baseband_usb_chr->ipc,
+ file, buf, count, pos);
+ if (ret > 0) {
+ /* decrement count of available rx bytes */
+ int val = atomic_read(&g_rx_count);
+ pr_debug("baseband_usb_chr_read - read %d unread %d\n",
+ ret, val - ret);
+ atomic_sub(ret, &g_rx_count);
+ }
+ return ret;
+}
+
+static ssize_t baseband_usb_chr_write(struct file *file, const char *buf,
+ size_t count, loff_t *pos)
+{
+ pr_debug("baseband_usb_chr_write\n");
+ return baseband_ipc_file_write(baseband_usb_chr->ipc,
+ file, buf, count, pos);
+}
+
+static long baseband_usb_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ pr_debug("baseband_usb_chr_ioctl\n");
+ switch (cmd) {
+ case TCFLSH:
+ pr_debug("TCFLSH\n");
+ /* flush queued ipc transaction work */
+ flush_workqueue(baseband_usb_chr->ipc->workqueue);
+ return 0;
+ case FIONREAD:
+ pr_debug("FIONREAD\n");
+ /* return count of available rx bytes */
+ {
+ int __user *p = (int __user *) arg;
+ int val = atomic_read(&g_rx_count);
+ if (put_user(val, p))
+ break;
+ }
+ return 0;
+ default:
+ pr_err("unsupported ioctl cmd %x\n", cmd);
+ }
+ return -ENODEV;
+}
+
+static const struct file_operations baseband_usb_chr_fops = {
+ .open = baseband_usb_chr_open,
+ .release = baseband_usb_chr_release,
+ .read = baseband_usb_chr_read,
+ .write = baseband_usb_chr_write,
+ .unlocked_ioctl = baseband_usb_chr_ioctl,
+};
+
+/* usb device driver functions */
+
+static void baseband_usb_close(struct baseband_usb *usb)
+{
+ pr_debug("baseband_usb_close {\n");
+
+ /* check input */
+ if (!usb)
+ return;
+
+ /* close usb driver */
+ if (usb->usb.driver) {
+ pr_debug("close usb driver {\n");
+ usb_deregister(usb->usb.driver);
+ usb->usb.driver = (struct usb_driver *) 0;
+ pr_debug("close usb driver }\n");
+ }
+
+ /* close baseband ipc */
+ if (usb->ipc) {
+ baseband_ipc_close(usb->ipc);
+ usb->ipc = (struct baseband_ipc *) 0;
+ }
+
+ /* free baseband usb structure */
+ kfree(usb);
+
+ pr_debug("baseband_usb_close }\n");
+}
+
+static struct baseband_usb *baseband_usb_open(unsigned int vid,
+ unsigned int pid,
+ unsigned int intf,
+ work_func_t work_func,
+ work_func_t rx_work_func,
+ work_func_t tx_work_func)
+{
+ struct baseband_usb *usb;
+ int err;
+
+ pr_debug("baseband_usb_open {\n");
+
+ /* allocate baseband usb structure */
+ usb = kzalloc(sizeof(struct baseband_usb), GFP_KERNEL);
+ if (!usb)
+ return (struct baseband_usb *) 0;
+ baseband_usb_chr = usb;
+
+ /* open baseband ipc */
+ usb->ipc = baseband_ipc_open(work_func,
+ rx_work_func,
+ tx_work_func);
+ if (!usb->ipc) {
+ pr_err("open baseband ipc failed\n");
+ goto error_exit;
+ }
+
+ /* open usb driver */
+ sprintf(baseband_usb_driver_name,
+ "baseband_usb_%x_%x_%x",
+ vid, pid, intf);
+ baseband_usb_driver_id_table[0].match_flags
+ = USB_DEVICE_ID_MATCH_DEVICE;
+ baseband_usb_driver_id_table[0].idVendor = vid;
+ baseband_usb_driver_id_table[0].idProduct = pid;
+ usb->usb.driver = &baseband_usb_driver;
+ err = usb_register(&baseband_usb_driver);
+ if (err < 0) {
+ pr_err("cannot open usb driver - err %d\n", err);
+ goto error_exit;
+ }
+
+ pr_debug("baseband_usb_open }\n");
+ return usb;
+
+error_exit:
+ baseband_usb_close(usb);
+ baseband_usb_chr = (struct baseband_usb *) 0;
+ return (struct baseband_usb *) 0;
+}
+
+/* module init / exit functions */
+
+static int baseband_usb_chr_init(void)
+{
+ int err;
+
+ pr_debug("baseband_usb_chr_init {\n");
+
+ /* open baseband usb */
+ baseband_usb_chr = baseband_usb_open
+ (baseband_usb_chr_vid,
+ baseband_usb_chr_pid,
+ baseband_usb_chr_intf,
+ baseband_usb_chr_work,
+ baseband_usb_chr_rx_urb_comp_work,
+ (work_func_t) 0);
+ if (!baseband_usb_chr) {
+ pr_err("cannot open baseband usb chr\n");
+ err = -1;
+ goto err1;
+ }
+
+ /* register character device */
+ err = register_chrdev(BASEBAND_USB_CHR_DEV_MAJOR,
+ BASEBAND_USB_CHR_DEV_NAME,
+ &baseband_usb_chr_fops);
+ if (err < 0) {
+ pr_err("cannot register character device - %d\n", err);
+ goto err2;
+ }
+ pr_debug("registered baseband usb character device - major %d\n",
+ BASEBAND_USB_CHR_DEV_MAJOR);
+
+ pr_debug("baseband_usb_chr_init }\n");
+ return 0;
+err2: baseband_usb_close(baseband_usb_chr);
+ baseband_usb_chr = (struct baseband_usb *) 0;
+err1: return err;
+}
+
+static void baseband_usb_chr_exit(void)
+{
+ pr_debug("baseband_usb_chr_exit {\n");
+
+ /* unregister character device */
+ unregister_chrdev(BASEBAND_USB_CHR_DEV_MAJOR,
+ BASEBAND_USB_CHR_DEV_NAME);
+
+ /* close baseband usb */
+ if (baseband_usb_chr) {
+ baseband_usb_close(baseband_usb_chr);
+ baseband_usb_chr = (struct baseband_usb *) 0;
+ }
+
+ pr_debug("baseband_usb_chr_exit }\n");
+}
+
+module_init(baseband_usb_chr_init)
+module_exit(baseband_usb_chr_exit)
+
diff --git a/drivers/usb/serial/baseband_usb_chr.h b/drivers/usb/serial/baseband_usb_chr.h
new file mode 100644
index 000000000000..7935e795a54d
--- /dev/null
+++ b/drivers/usb/serial/baseband_usb_chr.h
@@ -0,0 +1,106 @@
+/*
+ * baseband_usb_chr.h
+ *
+ * USB character driver to communicate with baseband modems.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __BASEBAND_USB_CHR_H__
+#define __BASEBAND_USB_CHR_H__
+
+#define BASEBAND_USB_CHR_DEV_NAME "baseband_usb_chr"
+#define BASEBAND_USB_CHR_DEV_MAJOR 66
+
+#ifndef USB_CHR_RX_BUFSIZ
+#define USB_CHR_RX_BUFSIZ (128*1024)
+#endif /* USB_CHR_RX_BUFSIZ */
+
+#ifndef USB_CHR_TX_BUFSIZ
+#define USB_CHR_TX_BUFSIZ (128*1024)
+#endif /* USB_CHR_TX_BUFSIZ */
+
+#ifndef USB_CHR_TIMEOUT
+#define USB_CHR_TIMEOUT 5000 /* ms */
+#endif /* USB_CHR_TIMEOUT */
+
+#ifndef BASEBAND_IPC_NUM_RX_BUF
+#define BASEBAND_IPC_NUM_RX_BUF 32
+#endif /* BASEBAND_IPC_NUM_RX_BUF */
+
+#ifndef BASEBAND_IPC_NUM_TX_BUF
+#define BASEBAND_IPC_NUM_TX_BUF 16
+#endif /* BASEBAND_IPC_NUM_TX_BUF */
+
+#ifndef BASEBAND_IPC_BUFSIZ
+#define BASEBAND_IPC_BUFSIZ 65536
+#endif /* BASEBAND_IPC_BUFSIZ */
+
+struct baseband_ipc {
+ /* rx / tx data */
+ struct semaphore buf_sem;
+ struct {
+ /* linked list of data buffers */
+ struct list_head buf;
+ /* wait queue of processes trying to access data buffers */
+ wait_queue_head_t wait;
+ } rx, tx, rx_free, tx_free;
+ unsigned char *ipc_rx;
+ unsigned char *ipc_tx;
+ /* work queue
+ * - queued per ipc transaction
+ * - initiated by either:
+ * = interrupt on gpio line (rx data available)
+ * = tx data packet being added to tx linked list
+ */
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ struct work_struct rx_work;
+ struct work_struct tx_work;
+};
+
+struct baseband_ipc_buf {
+ struct list_head list;
+ /* data buffer */
+ unsigned char data[BASEBAND_IPC_BUFSIZ];
+ /* offset of first data byte */
+ size_t offset;
+ /* number of valid data bytes */
+ size_t count;
+};
+
+struct baseband_usb {
+ struct baseband_ipc *ipc;
+ struct {
+ struct usb_driver *driver;
+ struct usb_device *device;
+ struct usb_interface *interface;
+ struct {
+ struct {
+ unsigned int in;
+ unsigned int out;
+ } isoch, bulk, interrupt;
+ } pipe;
+ /* currently active rx urb */
+ struct urb *rx_urb;
+ /* currently active tx urb */
+ struct urb *tx_urb;
+ } usb;
+};
+
+#endif /* __BASEBAND_USB_CHR_H__ */
+
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 24caba79d722..f5dcc4f214fa 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1840,6 +1840,12 @@ UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
0),
+UNUSUAL_DEV( 0x12d1, 0x1446, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+
/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001,
"Minolta",
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 549b960667c8..97e8171020ad 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,6 +23,8 @@ source "drivers/gpu/drm/Kconfig"
source "drivers/gpu/stub/Kconfig"
+source "drivers/gpu/ion/Kconfig"
+
config VGASTATE
tristate
default n
@@ -2385,6 +2387,7 @@ config FB_PUV3_UNIGFX
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
+source "drivers/video/tegra/Kconfig"
source "drivers/video/backlight/Kconfig"
source "drivers/video/display/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 8b83129e209c..6ba964094fd4 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_FB_MSM) += msm/
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
+obj-y += tegra/
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 278aeaa92505..19db75bc1097 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -238,6 +238,15 @@ config BACKLIGHT_PWM
If you have a LCD backlight adjustable by PWM, say Y to enable
this driver.
+config BACKLIGHT_TEGRA_PWM
+ bool "Tegra PMx based PWM Backlight Driver"
+ depends on TEGRA_DC
+ help
+ Enable support for Tegra2 DC pwm backlight.
+
+ If you have a pwm backlight adjustable by the DC PM0 or PM1 signal
+ control on tegra, say Y to enable this driver.
+
config BACKLIGHT_DA903X
tristate "Backlight Driver for DA9030/DA9034 using WLED"
depends on PMIC_DA903X
@@ -338,6 +347,7 @@ config BACKLIGHT_PCF50633
config BACKLIGHT_AAT2870
tristate "AnalogicTech AAT2870 Backlight"
depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE
+ default n
help
If you have a AnalogicTech AAT2870 say Y to enable the
backlight driver.
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index fdd1fc4b2770..debd41c9313e 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
+obj-$(CONFIG_BACKLIGHT_TEGRA_PWM) += tegra_pwm_bl.o
obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index 331f1ef1dad5..203eaa65717e 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/backlight.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/aat2870.h>
struct aat2870_bl_driver_data {
@@ -127,7 +128,7 @@ static const struct backlight_ops aat2870_bl_ops = {
static int aat2870_bl_probe(struct platform_device *pdev)
{
- struct aat2870_bl_platform_data *pdata = pdev->dev.platform_data;
+ struct aat2870_bl_platform_data *pdata = mfd_get_data(pdev);
struct aat2870_bl_driver_data *aat2870_bl;
struct backlight_device *bd;
struct backlight_properties props;
diff --git a/drivers/video/backlight/tegra_pwm_bl.c b/drivers/video/backlight/tegra_pwm_bl.c
new file mode 100644
index 000000000000..4be691c54d3a
--- /dev/null
+++ b/drivers/video/backlight/tegra_pwm_bl.c
@@ -0,0 +1,177 @@
+/*
+ * linux/drivers/video/backlight/tegra_pwm_bl.c
+ *
+ * Tegra pwm backlight driver
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ * Author: Renuka Apte <rapte@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/tegra_pwm_bl.h>
+#include <mach/dc.h>
+
+struct tegra_pwm_bl_data {
+ struct device *dev;
+ int which_dc;
+ int (*notify)(struct device *, int brightness);
+ struct tegra_dc_pwm_params params;
+ int (*check_fb)(struct device *dev, struct fb_info *info);
+};
+
+static int tegra_pwm_backlight_update_status(struct backlight_device *bl)
+{
+ struct tegra_pwm_bl_data *tbl = dev_get_drvdata(&bl->dev);
+ int brightness = bl->props.brightness;
+ int max = bl->props.max_brightness;
+ struct tegra_dc *dc;
+
+ if (bl->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (tbl->notify)
+ brightness = tbl->notify(tbl->dev, brightness);
+
+ if (brightness > max)
+ dev_err(&bl->dev, "Invalid brightness value: %d max: %d\n",
+ brightness, max);
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ /* map API brightness range from (0~255) to hw range (0~128) */
+ tbl->params.duty_cycle = (brightness * 128) / 255;
+#else
+ tbl->params.duty_cycle = brightness & 0xFF;
+#endif
+
+ /* Call tegra display controller function to update backlight */
+ dc = tegra_dc_get_dc(tbl->which_dc);
+ if (dc)
+ tegra_dc_config_pwm(dc, &tbl->params);
+ else
+ dev_err(&bl->dev, "tegra display controller not available\n");
+
+ return 0;
+}
+
+static int tegra_pwm_backlight_get_brightness(struct backlight_device *bl)
+{
+ return bl->props.brightness;
+}
+
+static int tegra_pwm_backlight_check_fb(struct backlight_device *bl,
+ struct fb_info *info)
+{
+ struct tegra_pwm_bl_data *tbl = dev_get_drvdata(&bl->dev);
+ return !tbl->check_fb || tbl->check_fb(tbl->dev, info);
+}
+
+static const struct backlight_ops tegra_pwm_backlight_ops = {
+ .update_status = tegra_pwm_backlight_update_status,
+ .get_brightness = tegra_pwm_backlight_get_brightness,
+ .check_fb = tegra_pwm_backlight_check_fb,
+};
+
+static int tegra_pwm_backlight_probe(struct platform_device *pdev)
+{
+ struct backlight_properties props;
+ struct platform_tegra_pwm_backlight_data *data;
+ struct backlight_device *bl;
+ struct tegra_pwm_bl_data *tbl;
+ int ret;
+
+ data = pdev->dev.platform_data;
+ if (!data) {
+ dev_err(&pdev->dev, "failed to find platform data\n");
+ return -EINVAL;
+ }
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl) {
+ dev_err(&pdev->dev, "no memory for state\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ tbl->dev = &pdev->dev;
+ tbl->which_dc = data->which_dc;
+ tbl->notify = data->notify;
+ tbl->check_fb = data->check_fb;
+ tbl->params.which_pwm = data->which_pwm;
+ tbl->params.gpio_conf_to_sfio = data->gpio_conf_to_sfio;
+ tbl->params.switch_to_sfio = data->switch_to_sfio;
+ tbl->params.period = data->period;
+ tbl->params.clk_div = data->clk_div;
+ tbl->params.clk_select = data->clk_select;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = data->max_brightness;
+ bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, tbl,
+ &tegra_pwm_backlight_ops, &props);
+ if (IS_ERR(bl)) {
+ dev_err(&pdev->dev, "failed to register backlight\n");
+ ret = PTR_ERR(bl);
+ goto err_bl;
+ }
+
+ bl->props.brightness = data->dft_brightness;
+ backlight_update_status(bl);
+
+ platform_set_drvdata(pdev, bl);
+ return 0;
+
+err_bl:
+ kfree(tbl);
+err_alloc:
+ return ret;
+}
+
+static int tegra_pwm_backlight_remove(struct platform_device *pdev)
+{
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct tegra_pwm_bl_data *tbl = dev_get_drvdata(&bl->dev);
+
+ backlight_device_unregister(bl);
+ kfree(tbl);
+ return 0;
+}
+
+static struct platform_driver tegra_pwm_backlight_driver = {
+ .driver = {
+ .name = "tegra-pwm-bl",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_pwm_backlight_probe,
+ .remove = tegra_pwm_backlight_remove,
+};
+
+static int __init tegra_pwm_backlight_init(void)
+{
+ return platform_driver_register(&tegra_pwm_backlight_driver);
+}
+late_initcall(tegra_pwm_backlight_init);
+
+static void __exit tegra_pwm_backlight_exit(void)
+{
+ platform_driver_unregister(&tegra_pwm_backlight_driver);
+}
+module_exit(tegra_pwm_backlight_exit);
+
+MODULE_DESCRIPTION("Tegra PWM Backlight Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:tegra-pwm-backlight");
+
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 4f57485f8c54..f3fa4469dedc 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -546,6 +546,9 @@ static int get_dst_timing(unsigned char *block,
static void get_detailed_timing(unsigned char *block,
struct fb_videomode *mode)
{
+ int v_size = V_SIZE;
+ int h_size = H_SIZE;
+
mode->xres = H_ACTIVE;
mode->yres = V_ACTIVE;
mode->pixclock = PIXEL_CLOCK;
@@ -574,11 +577,18 @@ static void get_detailed_timing(unsigned char *block,
}
mode->flag = FB_MODE_IS_DETAILED;
+ /* get aspect ratio */
+ if (h_size * 18 > v_size * 31 && h_size * 18 < v_size * 33)
+ mode->flag |= FB_FLAG_RATIO_16_9;
+ if (h_size * 18 > v_size * 23 && h_size * 18 < v_size * 25)
+ mode->flag |= FB_FLAG_RATIO_4_3;
+
DPRINTK(" %d MHz ", PIXEL_CLOCK/1000000);
DPRINTK("%d %d %d %d ", H_ACTIVE, H_ACTIVE + H_SYNC_OFFSET,
H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH, H_ACTIVE + H_BLANKING);
DPRINTK("%d %d %d %d ", V_ACTIVE, V_ACTIVE + V_SYNC_OFFSET,
V_ACTIVE + V_SYNC_OFFSET + V_SYNC_WIDTH, V_ACTIVE + V_BLANKING);
+ DPRINTK("%dmm %dmm ", H_SIZE, V_SIZE);
DPRINTK("%sHSync %sVSync\n\n", (HSYNC_POSITIVE) ? "+" : "-",
(VSYNC_POSITIVE) ? "+" : "-");
}
@@ -976,7 +986,7 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
/**
* fb_edid_add_monspecs() - add monitor video modes from E-EDID data
* @edid: 128 byte array with an E-EDID block
- * @spacs: monitor specs to be extended
+ * @specs: monitor specs to be extended
*/
void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
@@ -1001,14 +1011,23 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
while (pos < edid[2]) {
u8 len = edid[pos] & 0x1f, type = (edid[pos] >> 5) & 7;
pr_debug("Data block %u of %u bytes\n", type, len);
- if (type == 2)
+
+ pos++;
+ if (type == 2) {
for (i = pos; i < pos + len; i++) {
- u8 idx = edid[pos + i] & 0x7f;
+ u8 idx = edid[i] & 0x7f;
svd[svd_n++] = idx;
pr_debug("N%sative mode #%d\n",
- edid[pos + i] & 0x80 ? "" : "on-n", idx);
+ edid[i] & 0x80 ? "" : "on-n", idx);
}
- pos += len + 1;
+ } else if (type == 3 && len >= 3) {
+ u32 ieee_reg = edid[pos] | (edid[pos + 1] << 8) |
+ (edid[pos + 2] << 16);
+ if (ieee_reg == 0x000c03)
+ specs->misc |= FB_MISC_HDMI;
+ }
+
+ pos += len;
}
block = edid + edid[2];
@@ -1041,10 +1060,8 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) {
int idx = svd[i - specs->modedb_len - num];
- if (!idx || idx > 63) {
+ if (!idx || idx > (CEA_MODEDB_SIZE - 1)) {
pr_warning("Reserved SVD code %d\n", idx);
- } else if (idx > ARRAY_SIZE(cea_modes) || !cea_modes[idx].xres) {
- pr_warning("Unimplemented SVD code %d\n", idx);
} else {
memcpy(&m[i], cea_modes + idx, sizeof(m[i]));
pr_debug("Adding SVD #%d: %ux%u@%u\n", idx,
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index cb175fe7abc0..34048e2b124a 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -292,64 +292,524 @@ static const struct fb_videomode modedb[] = {
};
#ifdef CONFIG_FB_MODE_HELPERS
-const struct fb_videomode cea_modes[64] = {
- /* #1: 640x480p@59.94/60Hz */
- [1] = {
- NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #3: 720x480p@59.94/60Hz */
- [3] = {
- NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #5: 1920x1080i@59.94/60Hz */
- [5] = {
- NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_INTERLACED, 0,
- },
- /* #7: 720(1440)x480iH@59.94/60Hz */
- [7] = {
- NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
- FB_VMODE_INTERLACED, 0,
- },
- /* #9: 720(1440)x240pH@59.94/60Hz */
- [9] = {
- NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #18: 720x576pH@50Hz */
- [18] = {
- NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #19: 1280x720p@50Hz */
- [19] = {
- NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #20: 1920x1080i@50Hz */
- [20] = {
- NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_INTERLACED, 0,
- },
- /* #32: 1920x1080p@23.98/24Hz */
- [32] = {
- NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #35: (2880)x480p4x@59.94/60Hz */
- [35] = {
- NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
+const struct fb_videomode cea_modes[CEA_MODEDB_SIZE] = {
+ {},
+ /* 1: 640x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 640, .yres = 480, .pixclock = 39721,
+ .left_margin = 48, .right_margin = 16,
+ .upper_margin = 33, .lower_margin = 1,
+ .hsync_len = 96, .vsync_len = 2,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 2: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 3: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 4: 1280x720p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 5: 1920x1080i @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 6: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 7: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 8: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 9: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 10: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 11: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 12: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 13: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 14: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 15: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 16: 1920x1080p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 17: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 18: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 19: 1280x720p @ 50Hz */
+ {.refresh = 50, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 20: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 21: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 22: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 23: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 24: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 25: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 26: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 27: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 28: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 29: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 30: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 31: 1920x1080p @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 32: 1920x1080p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 638,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 33: 1920x1080p @ 25Hz */
+ {.refresh = 25, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 34: 1920x1080p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 35: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 36: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 37: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 38: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 39: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13888,
+ .left_margin = 184, .right_margin = 32,
+ .upper_margin = 57, .lower_margin = 2,
+ .hsync_len = 168, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 40: 1920x1080i @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 41: 1280x720p @ 100Hz */
+ {.refresh = 100, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 42: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 43: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 44: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 45: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 46: 1920x1080i @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 47: 1280x720p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 48: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 49: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 50: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 51: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 52: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 53: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 54: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 55: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 56: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 57: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 58: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 59: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 60: 1280x720p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1280, .yres = 720, .pixclock = 16835,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 61: 1280x720p @ 25Hz */
+ {.refresh = 25, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 2420,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 62: 1280x720p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 63: 1920x1080p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 64: 1920x1080p @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
};
+EXPORT_SYMBOL(cea_modes);
-const struct fb_videomode vesa_modes[] = {
+const struct fb_videomode vesa_modes[VESA_MODEDB_SIZE] = {
/* 0 640x350-85 VESA */
{ NULL, 85, 640, 350, 31746, 96, 32, 60, 32, 64, 3,
FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA},
@@ -781,7 +1241,7 @@ void fb_var_to_videomode(struct fb_videomode *mode,
mode->upper_margin = var->upper_margin;
mode->lower_margin = var->lower_margin;
mode->sync = var->sync;
- mode->vmode = var->vmode & FB_VMODE_MASK;
+ mode->vmode = var->vmode & (FB_VMODE_MASK | FB_VMODE_STEREO_MASK);
mode->flag = FB_MODE_IS_FROM_VAR;
mode->refresh = 0;
@@ -826,7 +1286,7 @@ void fb_videomode_to_var(struct fb_var_screeninfo *var,
var->hsync_len = mode->hsync_len;
var->vsync_len = mode->vsync_len;
var->sync = mode->sync;
- var->vmode = mode->vmode & FB_VMODE_MASK;
+ var->vmode = mode->vmode & (FB_VMODE_MASK | FB_VMODE_STEREO_MASK);
}
/**
@@ -995,7 +1455,7 @@ int fb_add_videomode(const struct fb_videomode *mode, struct list_head *head)
if (!modelist)
return -ENOMEM;
modelist->mode = *mode;
- list_add(&modelist->list, head);
+ list_add_tail(&modelist->list, head);
}
return 0;
}
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig
new file mode 100644
index 000000000000..041afeda3b86
--- /dev/null
+++ b/drivers/video/tegra/Kconfig
@@ -0,0 +1,125 @@
+if ARCH_TEGRA
+
+comment "NVIDIA Tegra Display Driver options"
+
+config TEGRA_GRHOST
+ tristate "Tegra graphics host driver"
+ help
+ Driver for the Tegra graphics host hardware.
+
+config TEGRA_DC
+ tristate "Tegra Display Contoller"
+ depends on ARCH_TEGRA && TEGRA_GRHOST
+ select FB_MODE_HELPERS
+ select I2C
+ help
+ Tegra display controller support.
+
+config TEGRA_OVERLAY
+ tristate "Tegra Overlay Device Node"
+ depends on TEGRA_DC && !TEGRA_DC_EXTENSIONS
+ help
+ Device node for multi-client overlay support.
+
+config FB_TEGRA
+ tristate "Tegra Framebuffer driver"
+ depends on TEGRA_DC && FB = y
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default FB
+ help
+ Framebuffer device support for the Tegra display controller.
+
+config TEGRA_DC_EXTENSIONS
+ bool "Tegra Display Controller Extensions"
+ depends on TEGRA_DC
+ default y
+ help
+ This exposes support for extended capabilities of the Tegra display
+ controller to userspace drivers.
+
+config TEGRA_NVMAP
+ bool "Tegra GPU memory management driver (nvmap)"
+ default y
+ help
+ Say Y here to include the memory management driver for the Tegra
+ GPU, multimedia and display subsystems
+
+config NVMAP_RECLAIM_UNPINNED_VM
+ bool "Virtualize IOVMM memory in nvmap"
+ depends on TEGRA_NVMAP && TEGRA_IOVMM
+ default y
+ help
+ Say Y here to enable nvmap to reclaim I/O virtual memory after
+ it has been unpinned, and re-use it for other handles. This can
+ allow a larger virtual I/O VM space than would normally be
+ supported by the hardware, at a slight cost in performance.
+
+config NVMAP_ALLOW_SYSMEM
+ bool "Allow physical system memory to be used by nvmap"
+ depends on TEGRA_NVMAP
+ default y
+ help
+ Say Y here to allow nvmap to use physical system memory (i.e.,
+ shared with the operating system but not translated through
+ an IOVMM device) for allocations.
+
+config NVMAP_HIGHMEM_ONLY
+ bool "Use only HIGHMEM for nvmap"
+ depends on TEGRA_NVMAP && (NVMAP_ALLOW_SYSMEM || TEGRA_IOVMM) && HIGHMEM
+ default n
+ help
+ Say Y here to restrict nvmap system memory allocations (both
+ physical system memory and IOVMM) to just HIGHMEM pages.
+
+config NVMAP_CARVEOUT_KILLER
+ bool "Reclaim nvmap carveout by killing processes"
+ depends on TEGRA_NVMAP
+ default n
+ help
+ Say Y here to allow the system to reclaim carveout space by killing
+ processes. This will kill the largest consumers of lowest priority
+ first.
+
+config NVMAP_CARVEOUT_COMPACTOR
+ bool "Compact carveout when it gets fragmented"
+ depends on TEGRA_NVMAP
+ default y
+ help
+ When carveout allocation attempt fails, compactor defragements
+ heap and retries the failed allocation.
+ Say Y here to let nvmap to keep carveout fragmentation under control.
+
+
+config NVMAP_VPR
+ bool "Enable VPR Heap."
+ depends on TEGRA_NVMAP
+ default n
+ help
+ Say Y here to enable Video Protection Region(VPR) heap.
+ if unsure, say N.
+
+config TEGRA_DSI
+ bool "Enable DSI panel."
+ default n
+ help
+ Say Y here to enable the DSI panel.
+
+config NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+ bool "Convert carveout to IOVMM"
+ depends on TEGRA_NVMAP && TEGRA_IOVMM_SMMU
+ default y
+ help
+ Say Y here to force to convert carveout memory requests to
+ I/O virtual memory requests.
+
+config TEGRA_NVHDCP
+ bool "Support NVHDCP content protection on HDMI"
+ default n
+ help
+ Say Y here to support NVHDCP upstream and downstream protocols, this
+ requires a correctly fused chip to negotiate keys.
+
+endif
+
diff --git a/drivers/video/tegra/Makefile b/drivers/video/tegra/Makefile
new file mode 100644
index 000000000000..2299a3c5eaa3
--- /dev/null
+++ b/drivers/video/tegra/Makefile
@@ -0,0 +1,5 @@
+GCOV_PROFILE := y
+obj-$(CONFIG_TEGRA_GRHOST) += host/
+obj-$(CONFIG_TEGRA_DC) += dc/
+obj-$(CONFIG_FB_TEGRA) += fb.o
+obj-$(CONFIG_TEGRA_NVMAP) += nvmap/
diff --git a/drivers/video/tegra/dc/Makefile b/drivers/video/tegra/dc/Makefile
new file mode 100644
index 000000000000..efef0d65053e
--- /dev/null
+++ b/drivers/video/tegra/dc/Makefile
@@ -0,0 +1,11 @@
+GCOV_PROFILE := y
+obj-y += dc.o
+obj-y += rgb.o
+obj-y += hdmi.o
+obj-$(CONFIG_TEGRA_NVHDCP) += nvhdcp.o
+obj-y += edid.o
+obj-y += nvsd.o
+obj-y += dsi.o
+obj-y += dc_sysfs.o
+obj-$(CONFIG_TEGRA_OVERLAY) += overlay.o
+obj-$(CONFIG_TEGRA_DC_EXTENSIONS) += ext/
diff --git a/drivers/video/tegra/dc/dc.c b/drivers/video/tegra/dc/dc.c
new file mode 100644
index 000000000000..1ae7633eb9da
--- /dev/null
+++ b/drivers/video/tegra/dc/dc.c
@@ -0,0 +1,2989 @@
+/*
+ * drivers/video/tegra/dc/dc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/backlight.h>
+#include <video/tegrafb.h>
+#include <drm/drm_fixed.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
+
+
+#include <mach/clk.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/mc.h>
+#include <linux/nvhost.h>
+#include <mach/latency_allowance.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "overlay.h"
+#include "nvsd.h"
+
+#define TEGRA_CRC_LATCHED_DELAY 34
+
+#define DC_COM_PIN_OUTPUT_POLARITY1_INIT_VAL 0x01000000
+#define DC_COM_PIN_OUTPUT_POLARITY3_INIT_VAL 0x0
+
+#ifndef CONFIG_TEGRA_FPGA_PLATFORM
+#define ALL_UF_INT (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)
+#else
+/* ignore underflows when on simulation and fpga platform */
+#define ALL_UF_INT (0)
+#endif
+
+static int no_vsync;
+
+module_param_named(no_vsync, no_vsync, int, S_IRUGO | S_IWUSR);
+
+static int use_dynamic_emc = 1;
+
+module_param_named(use_dynamic_emc, use_dynamic_emc, int, S_IRUGO | S_IWUSR);
+
+struct tegra_dc *tegra_dcs[TEGRA_MAX_DC];
+
+DEFINE_MUTEX(tegra_dc_lock);
+DEFINE_MUTEX(shared_lock);
+
+static const struct {
+ bool h;
+ bool v;
+} can_filter[] = {
+ /* Window A has no filtering */
+ { false, false },
+ /* Window B has both H and V filtering */
+ { true, true },
+ /* Window C has only H filtering */
+ { false, true },
+};
+static inline bool win_use_v_filter(const struct tegra_dc_win *win)
+{
+ return can_filter[win->idx].v &&
+ win->h.full != dfixed_const(win->out_h);
+}
+static inline bool win_use_h_filter(const struct tegra_dc_win *win)
+{
+ return can_filter[win->idx].h &&
+ win->w.full != dfixed_const(win->out_w);
+}
+
+static inline int tegra_dc_fmt_bpp(int fmt)
+{
+ switch (fmt) {
+ case TEGRA_WIN_FMT_P1:
+ return 1;
+
+ case TEGRA_WIN_FMT_P2:
+ return 2;
+
+ case TEGRA_WIN_FMT_P4:
+ return 4;
+
+ case TEGRA_WIN_FMT_P8:
+ return 8;
+
+ case TEGRA_WIN_FMT_B4G4R4A4:
+ case TEGRA_WIN_FMT_B5G5R5A:
+ case TEGRA_WIN_FMT_B5G6R5:
+ case TEGRA_WIN_FMT_AB5G5R5:
+ return 16;
+
+ case TEGRA_WIN_FMT_B8G8R8A8:
+ case TEGRA_WIN_FMT_R8G8B8A8:
+ case TEGRA_WIN_FMT_B6x2G6x2R6x2A8:
+ case TEGRA_WIN_FMT_R6x2G6x2B6x2A8:
+ return 32;
+
+ /* for planar formats, size of the Y plane, 8bit */
+ case TEGRA_WIN_FMT_YCbCr420P:
+ case TEGRA_WIN_FMT_YUV420P:
+ case TEGRA_WIN_FMT_YCbCr422P:
+ case TEGRA_WIN_FMT_YUV422P:
+ return 8;
+
+ case TEGRA_WIN_FMT_YCbCr422:
+ case TEGRA_WIN_FMT_YUV422:
+ case TEGRA_WIN_FMT_YCbCr422R:
+ case TEGRA_WIN_FMT_YUV422R:
+ case TEGRA_WIN_FMT_YCbCr422RA:
+ case TEGRA_WIN_FMT_YUV422RA:
+ /* FIXME: need to know the bpp of these formats */
+ return 0;
+ }
+ return 0;
+}
+
+static inline bool tegra_dc_is_yuv_planar(int fmt)
+{
+ switch (fmt) {
+ case TEGRA_WIN_FMT_YUV420P:
+ case TEGRA_WIN_FMT_YCbCr420P:
+ case TEGRA_WIN_FMT_YCbCr422P:
+ case TEGRA_WIN_FMT_YUV422P:
+ return true;
+ }
+ return false;
+}
+
+#define DUMP_REG(a) do { \
+ snprintf(buff, sizeof(buff), "%-32s\t%03x\t%08lx\n", \
+ #a, a, tegra_dc_readl(dc, a)); \
+ print(data, buff); \
+ } while (0)
+
+static void _dump_regs(struct tegra_dc *dc, void *data,
+ void (* print)(void *data, const char *str))
+{
+ int i;
+ char buff[256];
+
+ tegra_dc_io_start(dc);
+ clk_enable(dc->clk);
+
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE);
+ DUMP_REG(DC_CMD_INT_STATUS);
+ DUMP_REG(DC_CMD_INT_MASK);
+ DUMP_REG(DC_CMD_INT_ENABLE);
+ DUMP_REG(DC_CMD_INT_TYPE);
+ DUMP_REG(DC_CMD_INT_POLARITY);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+ DUMP_REG(DC_CMD_STATE_ACCESS);
+ DUMP_REG(DC_CMD_STATE_CONTROL);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+ DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+ DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY);
+ DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY_TIMER);
+ DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+ DUMP_REG(DC_DISP_REF_TO_SYNC);
+ DUMP_REG(DC_DISP_SYNC_WIDTH);
+ DUMP_REG(DC_DISP_BACK_PORCH);
+ DUMP_REG(DC_DISP_DISP_ACTIVE);
+ DUMP_REG(DC_DISP_FRONT_PORCH);
+ DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+ DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+ DUMP_REG(DC_DISP_M0_CONTROL);
+ DUMP_REG(DC_DISP_M1_CONTROL);
+ DUMP_REG(DC_DISP_DI_CONTROL);
+ DUMP_REG(DC_DISP_PP_CONTROL);
+ DUMP_REG(DC_DISP_PP_SELECT_A);
+ DUMP_REG(DC_DISP_PP_SELECT_B);
+ DUMP_REG(DC_DISP_PP_SELECT_C);
+ DUMP_REG(DC_DISP_PP_SELECT_D);
+ DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+ DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+ DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+ DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+ DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+ DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+ DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+ DUMP_REG(DC_DISP_BORDER_COLOR);
+ DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+ DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+ DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+ DUMP_REG(DC_DISP_CURSOR_POSITION);
+ DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+ DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+ DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0C_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+ DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+ DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+
+
+ for (i = 0; i < 3; i++) {
+ print(data, "\n");
+ snprintf(buff, sizeof(buff), "WINDOW %c:\n", 'A' + i);
+ print(data, buff);
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << i,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_WIN_WIN_OPTIONS);
+ DUMP_REG(DC_WIN_BYTE_SWAP);
+ DUMP_REG(DC_WIN_BUFFER_CONTROL);
+ DUMP_REG(DC_WIN_COLOR_DEPTH);
+ DUMP_REG(DC_WIN_POSITION);
+ DUMP_REG(DC_WIN_SIZE);
+ DUMP_REG(DC_WIN_PRESCALED_SIZE);
+ DUMP_REG(DC_WIN_H_INITIAL_DDA);
+ DUMP_REG(DC_WIN_V_INITIAL_DDA);
+ DUMP_REG(DC_WIN_DDA_INCREMENT);
+ DUMP_REG(DC_WIN_LINE_STRIDE);
+ DUMP_REG(DC_WIN_BUF_STRIDE);
+ DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+ DUMP_REG(DC_WIN_BLEND_NOKEY);
+ DUMP_REG(DC_WIN_BLEND_1WIN);
+ DUMP_REG(DC_WIN_BLEND_2WIN_X);
+ DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+ DUMP_REG(DC_WIN_BLEND_3WIN_XY);
+ DUMP_REG(DC_WINBUF_START_ADDR);
+ DUMP_REG(DC_WINBUF_START_ADDR_U);
+ DUMP_REG(DC_WINBUF_START_ADDR_V);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+ DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+ DUMP_REG(DC_WIN_CSC_YOF);
+ DUMP_REG(DC_WIN_CSC_KYRGB);
+ DUMP_REG(DC_WIN_CSC_KUR);
+ DUMP_REG(DC_WIN_CSC_KVR);
+ DUMP_REG(DC_WIN_CSC_KUG);
+ DUMP_REG(DC_WIN_CSC_KVG);
+ DUMP_REG(DC_WIN_CSC_KUB);
+ DUMP_REG(DC_WIN_CSC_KVB);
+ }
+
+ DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE2);
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY2);
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA2);
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE2);
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT5);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+ DUMP_REG(DC_DISP_M1_CONTROL);
+ DUMP_REG(DC_COM_PM1_CONTROL);
+ DUMP_REG(DC_COM_PM1_DUTY_CYCLE);
+ DUMP_REG(DC_DISP_SD_CONTROL);
+
+ clk_disable(dc->clk);
+ tegra_dc_io_end(dc);
+}
+
+#undef DUMP_REG
+
+#ifdef DEBUG
+static void dump_regs_print(void *data, const char *str)
+{
+ struct tegra_dc *dc = data;
+ dev_dbg(&dc->ndev->dev, "%s", str);
+}
+
+static void dump_regs(struct tegra_dc *dc)
+{
+ _dump_regs(dc, dc, dump_regs_print);
+}
+#else /* !DEBUG */
+
+static void dump_regs(struct tegra_dc *dc) {}
+
+#endif /* DEBUG */
+
+#ifdef CONFIG_DEBUG_FS
+
+static void dbg_regs_print(void *data, const char *str)
+{
+ struct seq_file *s = data;
+
+ seq_printf(s, "%s", str);
+}
+
+#undef DUMP_REG
+
+static int dbg_dc_show(struct seq_file *s, void *unused)
+{
+ struct tegra_dc *dc = s->private;
+
+ _dump_regs(dc, s, dbg_regs_print);
+
+ return 0;
+}
+
+
+static int dbg_dc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_dc_show, inode->i_private);
+}
+
+static const struct file_operations regs_fops = {
+ .open = dbg_dc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dbg_dc_mode_show(struct seq_file *s, void *unused)
+{
+ struct tegra_dc *dc = s->private;
+ struct tegra_dc_mode *m;
+
+ mutex_lock(&dc->lock);
+ m = &dc->mode;
+ seq_printf(s,
+ "pclk: %d\n"
+ "h_ref_to_sync: %d\n"
+ "v_ref_to_sync: %d\n"
+ "h_sync_width: %d\n"
+ "v_sync_width: %d\n"
+ "h_back_porch: %d\n"
+ "v_back_porch: %d\n"
+ "h_active: %d\n"
+ "v_active: %d\n"
+ "h_front_porch: %d\n"
+ "v_front_porch: %d\n"
+ "stereo_mode: %d\n",
+ m->pclk, m->h_ref_to_sync, m->v_ref_to_sync,
+ m->h_sync_width, m->v_sync_width,
+ m->h_back_porch, m->v_back_porch,
+ m->h_active, m->v_active,
+ m->h_front_porch, m->v_front_porch,
+ m->stereo_mode);
+ mutex_unlock(&dc->lock);
+ return 0;
+}
+
+static int dbg_dc_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_dc_mode_show, inode->i_private);
+}
+
+static const struct file_operations mode_fops = {
+ .open = dbg_dc_mode_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dbg_dc_stats_show(struct seq_file *s, void *unused)
+{
+ struct tegra_dc *dc = s->private;
+
+ mutex_lock(&dc->lock);
+ seq_printf(s,
+ "underflows: %llu\n"
+ "underflows_a: %llu\n"
+ "underflows_b: %llu\n"
+ "underflows_c: %llu\n",
+ dc->stats.underflows,
+ dc->stats.underflows_a,
+ dc->stats.underflows_b,
+ dc->stats.underflows_c);
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+
+static int dbg_dc_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_dc_stats_show, inode->i_private);
+}
+
+static const struct file_operations stats_fops = {
+ .open = dbg_dc_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __devexit tegra_dc_remove_debugfs(struct tegra_dc *dc)
+{
+ if (dc->debugdir)
+ debugfs_remove_recursive(dc->debugdir);
+ dc->debugdir = NULL;
+}
+
+static void tegra_dc_create_debugfs(struct tegra_dc *dc)
+{
+ struct dentry *retval;
+
+ dc->debugdir = debugfs_create_dir(dev_name(&dc->ndev->dev), NULL);
+ if (!dc->debugdir)
+ goto remove_out;
+
+ retval = debugfs_create_file("regs", S_IRUGO, dc->debugdir, dc,
+ &regs_fops);
+ if (!retval)
+ goto remove_out;
+
+ retval = debugfs_create_file("mode", S_IRUGO, dc->debugdir, dc,
+ &mode_fops);
+ if (!retval)
+ goto remove_out;
+
+ retval = debugfs_create_file("stats", S_IRUGO, dc->debugdir, dc,
+ &stats_fops);
+ if (!retval)
+ goto remove_out;
+
+ return;
+remove_out:
+ dev_err(&dc->ndev->dev, "could not create debugfs\n");
+ tegra_dc_remove_debugfs(dc);
+}
+
+#else /* !CONFIG_DEBUGFS */
+static inline void tegra_dc_create_debugfs(struct tegra_dc *dc) { };
+static inline void __devexit tegra_dc_remove_debugfs(struct tegra_dc *dc) { };
+#endif /* CONFIG_DEBUGFS */
+
+static int tegra_dc_set(struct tegra_dc *dc, int index)
+{
+ int ret = 0;
+
+ mutex_lock(&tegra_dc_lock);
+ if (index >= TEGRA_MAX_DC) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (dc != NULL && tegra_dcs[index] != NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tegra_dcs[index] = dc;
+
+out:
+ mutex_unlock(&tegra_dc_lock);
+
+ return ret;
+}
+
+unsigned int tegra_dc_has_multiple_dc(void)
+{
+ unsigned int idx;
+ unsigned int cnt = 0;
+ struct tegra_dc *dc;
+
+ mutex_lock(&tegra_dc_lock);
+ for (idx = 0; idx < TEGRA_MAX_DC; idx++)
+ cnt += ((dc = tegra_dcs[idx]) != NULL && dc->enabled) ? 1 : 0;
+ mutex_unlock(&tegra_dc_lock);
+
+ return (cnt > 1);
+}
+
+struct tegra_dc *tegra_dc_get_dc(unsigned idx)
+{
+ if (idx < TEGRA_MAX_DC)
+ return tegra_dcs[idx];
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(tegra_dc_get_dc);
+
+struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win)
+{
+ if (win >= dc->n_windows)
+ return NULL;
+
+ return &dc->windows[win];
+}
+EXPORT_SYMBOL(tegra_dc_get_window);
+
+static int get_topmost_window(u32 *depths, unsigned long *wins)
+{
+ int idx, best = -1;
+
+ for_each_set_bit(idx, wins, DC_N_WINDOWS) {
+ if (best == -1 || depths[idx] < depths[best])
+ best = idx;
+ }
+ clear_bit(best, wins);
+ return best;
+}
+
+bool tegra_dc_get_connected(struct tegra_dc *dc)
+{
+ return dc->connected;
+}
+EXPORT_SYMBOL(tegra_dc_get_connected);
+
+static u32 blend_topwin(u32 flags)
+{
+ if (flags & TEGRA_WIN_FLAG_BLEND_COVERAGE)
+ return BLEND(NOKEY, ALPHA, 0xff, 0xff);
+ else if (flags & TEGRA_WIN_FLAG_BLEND_PREMULT)
+ return BLEND(NOKEY, PREMULT, 0xff, 0xff);
+ else
+ return BLEND(NOKEY, FIX, 0xff, 0xff);
+}
+
+static u32 blend_2win(int idx, unsigned long behind_mask, u32* flags, int xy)
+{
+ int other;
+
+ for (other = 0; other < DC_N_WINDOWS; other++) {
+ if (other != idx && (xy-- == 0))
+ break;
+ }
+ if (BIT(other) & behind_mask)
+ return blend_topwin(flags[idx]);
+ else if (flags[other])
+ return BLEND(NOKEY, DEPENDANT, 0x00, 0x00);
+ else
+ return BLEND(NOKEY, FIX, 0x00, 0x00);
+}
+
+static u32 blend_3win(int idx, unsigned long behind_mask, u32* flags)
+{
+ unsigned long infront_mask;
+ int first;
+
+ infront_mask = ~(behind_mask | BIT(idx));
+ infront_mask &= (BIT(DC_N_WINDOWS) - 1);
+ first = ffs(infront_mask) - 1;
+
+ if (!infront_mask)
+ return blend_topwin(flags[idx]);
+ else if (behind_mask && first != -1 && flags[first])
+ return BLEND(NOKEY, DEPENDANT, 0x00, 0x00);
+ else
+ return BLEND(NOKEY, FIX, 0x0, 0x0);
+}
+
+static void tegra_dc_set_blending(struct tegra_dc *dc, struct tegra_dc_blend *blend)
+{
+ unsigned long mask = BIT(DC_N_WINDOWS) - 1;
+
+ while (mask) {
+ int idx = get_topmost_window(blend->z, &mask);
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << idx,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff),
+ DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff),
+ DC_WIN_BLEND_1WIN);
+ tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 0),
+ DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 1),
+ DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, blend_3win(idx, mask, blend->flags),
+ DC_WIN_BLEND_3WIN_XY);
+ }
+}
+
+static void tegra_dc_init_csc_defaults(struct tegra_dc_csc *csc)
+{
+ csc->yof = 0x00f0;
+ csc->kyrgb = 0x012a;
+ csc->kur = 0x0000;
+ csc->kvr = 0x0198;
+ csc->kug = 0x039b;
+ csc->kvg = 0x032f;
+ csc->kub = 0x0204;
+ csc->kvb = 0x0000;
+}
+
+static void tegra_dc_set_csc(struct tegra_dc *dc, struct tegra_dc_csc *csc)
+{
+ tegra_dc_writel(dc, csc->yof, DC_WIN_CSC_YOF);
+ tegra_dc_writel(dc, csc->kyrgb, DC_WIN_CSC_KYRGB);
+ tegra_dc_writel(dc, csc->kur, DC_WIN_CSC_KUR);
+ tegra_dc_writel(dc, csc->kvr, DC_WIN_CSC_KVR);
+ tegra_dc_writel(dc, csc->kug, DC_WIN_CSC_KUG);
+ tegra_dc_writel(dc, csc->kvg, DC_WIN_CSC_KVG);
+ tegra_dc_writel(dc, csc->kub, DC_WIN_CSC_KUB);
+ tegra_dc_writel(dc, csc->kvb, DC_WIN_CSC_KVB);
+}
+
+int tegra_dc_update_csc(struct tegra_dc *dc, int win_idx)
+{
+ mutex_lock(&dc->lock);
+
+ if (!dc->enabled) {
+ mutex_unlock(&dc->lock);
+ return -EFAULT;
+ }
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << win_idx,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_set_csc(dc, &dc->windows[win_idx].csc);
+
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_update_csc);
+
+static void tegra_dc_init_lut_defaults(struct tegra_dc_lut *lut)
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ lut->r[i] = lut->g[i] = lut->b[i] = (u8)i;
+}
+
+static int tegra_dc_loop_lut(struct tegra_dc *dc,
+ struct tegra_dc_win *win,
+ int(*lambda)(struct tegra_dc *dc, int i, u32 rgb))
+{
+ struct tegra_dc_lut *lut = &win->lut;
+ struct tegra_dc_lut *global_lut = &dc->fb_lut;
+ int i;
+ for (i = 0; i < 256; i++) {
+
+ u32 r = (u32)lut->r[i];
+ u32 g = (u32)lut->g[i];
+ u32 b = (u32)lut->b[i];
+
+ if (!(win->ppflags & TEGRA_WIN_PPFLAG_CP_FBOVERRIDE)) {
+ r = (u32)global_lut->r[r];
+ g = (u32)global_lut->g[g];
+ b = (u32)global_lut->b[b];
+ }
+
+ if (!lambda(dc, i, r | (g<<8) | (b<<16)))
+ return 0;
+ }
+ return 1;
+}
+
+static int tegra_dc_lut_isdefaults_lambda(struct tegra_dc *dc, int i, u32 rgb)
+{
+ if (rgb != (i | (i<<8) | (i<<16)))
+ return 0;
+ return 1;
+}
+
+static int tegra_dc_set_lut_setreg_lambda(struct tegra_dc *dc, int i, u32 rgb)
+{
+ tegra_dc_writel(dc, rgb, DC_WIN_COLOR_PALETTE(i));
+ return 1;
+}
+
+static void tegra_dc_set_lut(struct tegra_dc *dc, struct tegra_dc_win* win)
+{
+ unsigned long val = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_loop_lut(dc, win, tegra_dc_set_lut_setreg_lambda);
+
+ if (win->ppflags & TEGRA_WIN_PPFLAG_CP_ENABLE)
+ val |= CP_ENABLE;
+ else
+ val &= ~CP_ENABLE;
+
+ tegra_dc_writel(dc, val, DC_WIN_WIN_OPTIONS);
+}
+
+static int tegra_dc_update_winlut(struct tegra_dc *dc, int win_idx, int fbovr)
+{
+ struct tegra_dc_win *win = &dc->windows[win_idx];
+
+ mutex_lock(&dc->lock);
+
+ if (!dc->enabled) {
+ mutex_unlock(&dc->lock);
+ return -EFAULT;
+ }
+
+ if (fbovr > 0)
+ win->ppflags |= TEGRA_WIN_PPFLAG_CP_FBOVERRIDE;
+ else if (fbovr == 0)
+ win->ppflags &= ~TEGRA_WIN_PPFLAG_CP_FBOVERRIDE;
+
+ if (!tegra_dc_loop_lut(dc, win, tegra_dc_lut_isdefaults_lambda))
+ win->ppflags |= TEGRA_WIN_PPFLAG_CP_ENABLE;
+ else
+ win->ppflags &= ~TEGRA_WIN_PPFLAG_CP_ENABLE;
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << win_idx,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_set_lut(dc, win);
+
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+
+int tegra_dc_update_lut(struct tegra_dc *dc, int win_idx, int fboveride)
+{
+ if (win_idx > -1)
+ return tegra_dc_update_winlut(dc, win_idx, fboveride);
+
+ for (win_idx = 0; win_idx < DC_N_WINDOWS; win_idx++) {
+ int err = tegra_dc_update_winlut(dc, win_idx, fboveride);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_update_lut);
+
+static void tegra_dc_set_scaling_filter(struct tegra_dc *dc)
+{
+ unsigned i;
+ unsigned v0 = 128;
+ unsigned v1 = 0;
+ /* linear horizontal and vertical filters */
+ for (i = 0; i < 16; i++) {
+ tegra_dc_writel(dc, (v1 << 16) | (v0 << 8),
+ DC_WIN_H_FILTER_P(i));
+
+ tegra_dc_writel(dc, v0,
+ DC_WIN_V_FILTER_P(i));
+ v0 -= 8;
+ v1 += 8;
+ }
+}
+
+static void tegra_dc_set_latency_allowance(struct tegra_dc *dc,
+ struct tegra_dc_win *w)
+{
+ /* windows A, B, C for first and second display */
+ static const enum tegra_la_id la_id_tab[2][3] = {
+ /* first display */
+ { TEGRA_LA_DISPLAY_0A, TEGRA_LA_DISPLAY_0B,
+ TEGRA_LA_DISPLAY_0C },
+ /* second display */
+ { TEGRA_LA_DISPLAY_0AB, TEGRA_LA_DISPLAY_0BB,
+ TEGRA_LA_DISPLAY_0CB },
+ };
+ /* window B V-filter tap for first and second display. */
+ static const enum tegra_la_id vfilter_tab[2] = {
+ TEGRA_LA_DISPLAY_1B, TEGRA_LA_DISPLAY_1BB,
+ };
+ unsigned long bw;
+
+ BUG_ON(dc->ndev->id >= ARRAY_SIZE(la_id_tab));
+ BUG_ON(dc->ndev->id >= ARRAY_SIZE(vfilter_tab));
+ BUG_ON(w->idx >= ARRAY_SIZE(*la_id_tab));
+
+ bw = w->new_bandwidth;
+
+ /* tegra_dc_get_bandwidth() treats V filter windows as double
+ * bandwidth, but LA has a seperate client for V filter */
+ if (w->idx == 1 && win_use_v_filter(w))
+ bw /= 2;
+
+ /* our bandwidth is in bytes/sec, but LA takes MBps.
+ * round up bandwidth to 1MBps */
+ bw = bw / 1000000 + 1;
+
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ tegra_set_latency_allowance(la_id_tab[dc->ndev->id][w->idx], bw);
+ /* if window B, also set the 1B client for the 2-tap V filter. */
+ if (w->idx == 1)
+ tegra_set_latency_allowance(vfilter_tab[dc->ndev->id], bw);
+#endif
+
+ w->bandwidth = w->new_bandwidth;
+}
+
+static unsigned int tegra_dc_windows_is_overlapped(struct tegra_dc_win *a,
+ struct tegra_dc_win *b)
+{
+ if (!WIN_IS_ENABLED(a) || !WIN_IS_ENABLED(b))
+ return 0;
+
+ /* because memory access to load the fifo can overlap, only care
+ * if windows overlap vertically */
+ return ((a->out_y + a->out_h > b->out_y) && (a->out_y <= b->out_y)) ||
+ ((b->out_y + b->out_h > a->out_y) && (b->out_y <= a->out_y));
+}
+
+static unsigned long tegra_dc_find_max_bandwidth(struct tegra_dc_win *wins[],
+ int n)
+{
+ unsigned i;
+ unsigned j;
+ unsigned overlap_count;
+ unsigned max_bw = 0;
+
+ WARN_ONCE(n > 3, "Code assumes at most 3 windows, bandwidth is likely"
+ "inaccurate.\n");
+
+ /* If we had a large number of windows, we would compute adjacency
+ * graph representing 2 window overlaps, find all cliques in the graph,
+ * assign bandwidth to each clique, and then select the clique with
+ * maximum bandwidth. But because we have at most 3 windows,
+ * implementing proper Bron-Kerbosh algorithm would be an overkill,
+ * brute force will suffice.
+ *
+ * Thus: find maximum bandwidth for either single or a pair of windows
+ * and count number of window pair overlaps. If there are three
+ * pairs, all 3 window overlap.
+ */
+
+ overlap_count = 0;
+ for (i = 0; i < n; i++) {
+ unsigned int bw1;
+
+ if (wins[i] == NULL)
+ continue;
+ bw1 = wins[i]->new_bandwidth;
+ if (bw1 > max_bw)
+ /* Single window */
+ max_bw = bw1;
+
+ for (j = i + 1; j < n; j++) {
+ if (wins[j] == NULL)
+ continue;
+ if (tegra_dc_windows_is_overlapped(wins[i], wins[j])) {
+ unsigned int bw2 = wins[j]->new_bandwidth;
+ if (bw1 + bw2 > max_bw)
+ /* Window pair overlaps */
+ max_bw = bw1 + bw2;
+ overlap_count++;
+ }
+ }
+ }
+
+ if (overlap_count == 3)
+ /* All three windows overlap */
+ max_bw = wins[0]->new_bandwidth + wins[1]->new_bandwidth +
+ wins[2]->new_bandwidth;
+
+ return max_bw;
+}
+
+/*
+ * Calculate peak EMC bandwidth for each enabled window =
+ * pixel_clock * win_bpp * (use_v_filter ? 2 : 1)) * H_scale_factor *
+ * (windows_tiling ? 2 : 1)
+ *
+ *
+ * note:
+ * (*) We use 2 tap V filter, so need double BW if use V filter
+ * (*) Tiling mode on T30 and DDR3 requires double BW
+ */
+static unsigned long tegra_dc_calc_win_bandwidth(struct tegra_dc *dc,
+ struct tegra_dc_win *w)
+{
+ unsigned long ret;
+ int tiled_windows_bw_multiplier;
+ unsigned long bpp;
+
+ if (!WIN_IS_ENABLED(w))
+ return 0;
+
+ if (dfixed_trunc(w->w) == 0 || dfixed_trunc(w->h) == 0 ||
+ w->out_w == 0 || w->out_h == 0)
+ return 0;
+
+ tiled_windows_bw_multiplier =
+ tegra_mc_get_tiled_memory_bandwidth_multiplier();
+
+ /* all of tegra's YUV formats(420 and 422) fetch 2 bytes per pixel,
+ * but the size reported by tegra_dc_fmt_bpp for the planar version
+ * is of the luma plane's size only. */
+ bpp = tegra_dc_is_yuv_planar(w->fmt) ?
+ 2 * tegra_dc_fmt_bpp(w->fmt) : tegra_dc_fmt_bpp(w->fmt);
+ /* perform calculations with most significant bits of pixel clock
+ * to prevent overflow of long. */
+ ret = (unsigned long)(dc->pixel_clk >> 16) *
+ bpp / 8 *
+ (win_use_v_filter(w) ? 2 : 1) * dfixed_trunc(w->w) / w->out_w *
+ (WIN_IS_TILED(w) ? tiled_windows_bw_multiplier : 1);
+
+/*
+ * Assuming 48% efficiency: i.e. if we calculate we need 70MBps, we
+ * will request 147MBps from EMC.
+ */
+ ret = ret * 2 + ret / 10;
+
+ /* if overflowed */
+ if (ret > (1UL << 31))
+ return ULONG_MAX;
+
+ return ret << 16; /* restore the scaling we did above */
+}
+
+unsigned long tegra_dc_get_bandwidth(struct tegra_dc_win *windows[], int n)
+{
+ int i;
+
+ BUG_ON(n > DC_N_WINDOWS);
+
+ /* emc rate and latency allowance both need to know per window
+ * bandwidths */
+ for (i = 0; i < n; i++) {
+ struct tegra_dc_win *w = windows[i];
+ if (w)
+ w->new_bandwidth = tegra_dc_calc_win_bandwidth(w->dc, w);
+ }
+
+ return tegra_dc_find_max_bandwidth(windows, n);
+}
+
+/* program bandwidth needs if higher than old bandwidth */
+static void tegra_dc_increase_bandwidth(struct tegra_dc *dc)
+{
+ unsigned i;
+
+ if (dc->emc_clk_rate < dc->new_emc_clk_rate) {
+ dc->emc_clk_rate = dc->new_emc_clk_rate;
+ clk_set_rate(dc->emc_clk, dc->emc_clk_rate);
+ }
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ struct tegra_dc_win *w = &dc->windows[i];
+ if (w->bandwidth < w->new_bandwidth && w->new_bandwidth != 0)
+ tegra_dc_set_latency_allowance(dc, w);
+ }
+}
+
+/* program the current bandwidth */
+static void tegra_dc_program_bandwidth(struct tegra_dc *dc)
+{
+ unsigned i;
+
+ if (dc->emc_clk_rate != dc->new_emc_clk_rate) {
+ dc->emc_clk_rate = dc->new_emc_clk_rate;
+ clk_set_rate(dc->emc_clk, dc->emc_clk_rate);
+ }
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ struct tegra_dc_win *w = &dc->windows[i];
+ if (w->bandwidth != w->new_bandwidth && w->new_bandwidth != 0)
+ tegra_dc_set_latency_allowance(dc, w);
+ }
+}
+
+static int tegra_dc_set_dynamic_emc(struct tegra_dc_win *windows[], int n)
+{
+ unsigned long new_rate;
+ struct tegra_dc *dc;
+
+ if (!use_dynamic_emc)
+ return 0;
+
+ dc = windows[0]->dc;
+
+ /* calculate the new rate based on this POST */
+ new_rate = tegra_dc_get_bandwidth(windows, n);
+ new_rate = EMC_BW_TO_FREQ(new_rate);
+
+ if (tegra_dc_has_multiple_dc())
+ new_rate = ULONG_MAX;
+
+ dc->new_emc_clk_rate = new_rate;
+
+ return 0;
+}
+
+static inline u32 compute_dda_inc(fixed20_12 in, unsigned out_int,
+ bool v, unsigned Bpp)
+{
+ /*
+ * min(round((prescaled_size_in_pixels - 1) * 0x1000 /
+ * (post_scaled_size_in_pixels - 1)), MAX)
+ * Where the value of MAX is as follows:
+ * For V_DDA_INCREMENT: 15.0 (0xF000)
+ * For H_DDA_INCREMENT: 4.0 (0x4000) for 4 Bytes/pix formats.
+ * 8.0 (0x8000) for 2 Bytes/pix formats.
+ */
+
+ fixed20_12 out = dfixed_init(out_int);
+ u32 dda_inc;
+ int max;
+
+ if (v) {
+ max = 15;
+ } else {
+ switch (Bpp) {
+ default:
+ WARN_ON_ONCE(1);
+ /* fallthrough */
+ case 4:
+ max = 4;
+ break;
+ case 2:
+ max = 8;
+ break;
+ }
+ }
+
+ out.full = max_t(u32, out.full - dfixed_const(1), dfixed_const(1));
+ in.full -= dfixed_const(1);
+
+ dda_inc = dfixed_div(in, out);
+
+ dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+ return dda_inc;
+}
+
+static inline u32 compute_initial_dda(fixed20_12 in)
+{
+ return dfixed_frac(in);
+}
+
+/* does not support updating windows on multiple dcs in one call */
+int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n)
+{
+ struct tegra_dc *dc;
+ unsigned long update_mask = GENERAL_ACT_REQ;
+ unsigned long val;
+ bool update_blend = false;
+ int i;
+
+ dc = windows[0]->dc;
+
+ mutex_lock(&dc->lock);
+
+ if (!dc->enabled) {
+ mutex_unlock(&dc->lock);
+ return -EFAULT;
+ }
+
+ if (no_vsync)
+ tegra_dc_writel(dc, WRITE_MUX_ACTIVE | READ_MUX_ACTIVE, DC_CMD_STATE_ACCESS);
+ else
+ tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | READ_MUX_ASSEMBLY, DC_CMD_STATE_ACCESS);
+
+ for (i = 0; i < n; i++) {
+ struct tegra_dc_win *win = windows[i];
+ unsigned h_dda;
+ unsigned v_dda;
+ fixed20_12 h_offset, v_offset;
+ bool invert_h = (win->flags & TEGRA_WIN_FLAG_INVERT_H) != 0;
+ bool invert_v = (win->flags & TEGRA_WIN_FLAG_INVERT_V) != 0;
+ bool yuvp = tegra_dc_is_yuv_planar(win->fmt);
+ unsigned Bpp = tegra_dc_fmt_bpp(win->fmt) / 8;
+ /* Bytes per pixel of bandwidth, used for dda_inc calculation */
+ unsigned Bpp_bw = Bpp * (yuvp ? 2 : 1);
+ const bool filter_h = win_use_h_filter(win);
+ const bool filter_v = win_use_v_filter(win);
+
+ if (win->z != dc->blend.z[win->idx]) {
+ dc->blend.z[win->idx] = win->z;
+ update_blend = true;
+ }
+ if ((win->flags & TEGRA_WIN_BLEND_FLAGS_MASK) !=
+ dc->blend.flags[win->idx]) {
+ dc->blend.flags[win->idx] =
+ win->flags & TEGRA_WIN_BLEND_FLAGS_MASK;
+ update_blend = true;
+ }
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << win->idx,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ if (!no_vsync)
+ update_mask |= WIN_A_ACT_REQ << win->idx;
+
+ if (!WIN_IS_ENABLED(win)) {
+ tegra_dc_writel(dc, 0, DC_WIN_WIN_OPTIONS);
+ continue;
+ }
+
+ tegra_dc_writel(dc, win->fmt, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ tegra_dc_writel(dc,
+ V_POSITION(win->out_y) | H_POSITION(win->out_x),
+ DC_WIN_POSITION);
+ tegra_dc_writel(dc,
+ V_SIZE(win->out_h) | H_SIZE(win->out_w),
+ DC_WIN_SIZE);
+ tegra_dc_writel(dc,
+ V_PRESCALED_SIZE(dfixed_trunc(win->h)) |
+ H_PRESCALED_SIZE(dfixed_trunc(win->w) * Bpp),
+ DC_WIN_PRESCALED_SIZE);
+
+ h_dda = compute_dda_inc(win->w, win->out_w, false, Bpp_bw);
+ v_dda = compute_dda_inc(win->h, win->out_h, true, Bpp_bw);
+ tegra_dc_writel(dc, V_DDA_INC(v_dda) | H_DDA_INC(h_dda),
+ DC_WIN_DDA_INCREMENT);
+ h_dda = compute_initial_dda(win->x);
+ v_dda = compute_initial_dda(win->y);
+ tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc,
+ (unsigned long)win->phys_addr,
+ DC_WINBUF_START_ADDR);
+
+ if (!yuvp) {
+ tegra_dc_writel(dc, win->stride, DC_WIN_LINE_STRIDE);
+ } else {
+ tegra_dc_writel(dc,
+ (unsigned long)win->phys_addr_u,
+ DC_WINBUF_START_ADDR_U);
+ tegra_dc_writel(dc,
+ (unsigned long)win->phys_addr_v,
+ DC_WINBUF_START_ADDR_V);
+ tegra_dc_writel(dc,
+ LINE_STRIDE(win->stride) |
+ UV_LINE_STRIDE(win->stride_uv),
+ DC_WIN_LINE_STRIDE);
+ }
+
+ h_offset = win->x;
+ if (invert_h) {
+ h_offset.full += win->w.full - dfixed_const(1);
+ }
+
+ v_offset = win->y;
+ if (invert_v) {
+ v_offset.full += win->h.full - dfixed_const(1);
+ }
+
+ tegra_dc_writel(dc, dfixed_trunc(h_offset) * Bpp,
+ DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, dfixed_trunc(v_offset),
+ DC_WINBUF_ADDR_V_OFFSET);
+
+ if (WIN_IS_TILED(win))
+ tegra_dc_writel(dc,
+ DC_WIN_BUFFER_ADDR_MODE_TILE |
+ DC_WIN_BUFFER_ADDR_MODE_TILE_UV,
+ DC_WIN_BUFFER_ADDR_MODE);
+ else
+ tegra_dc_writel(dc,
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV,
+ DC_WIN_BUFFER_ADDR_MODE);
+
+ val = WIN_ENABLE;
+ if (yuvp)
+ val |= CSC_ENABLE;
+ else if (tegra_dc_fmt_bpp(win->fmt) < 24)
+ val |= COLOR_EXPAND;
+
+ if (win->ppflags & TEGRA_WIN_PPFLAG_CP_ENABLE)
+ val |= CP_ENABLE;
+
+ if (filter_h)
+ val |= H_FILTER_ENABLE;
+ if (filter_v)
+ val |= V_FILTER_ENABLE;
+
+ if (invert_h)
+ val |= H_DIRECTION_DECREMENT;
+ if (invert_v)
+ val |= V_DIRECTION_DECREMENT;
+
+ tegra_dc_writel(dc, val, DC_WIN_WIN_OPTIONS);
+
+ win->dirty = no_vsync ? 0 : 1;
+
+ dev_dbg(&dc->ndev->dev, "%s():idx=%d z=%d x=%d y=%d w=%d h=%d "
+ "out_x=%u out_y=%u out_w=%u out_h=%u "
+ "fmt=%d yuvp=%d Bpp=%u filter_h=%d filter_v=%d",
+ __func__, win->idx, win->z,
+ dfixed_trunc(win->x), dfixed_trunc(win->y),
+ dfixed_trunc(win->w), dfixed_trunc(win->h),
+ win->out_x, win->out_y, win->out_w, win->out_h,
+ win->fmt, yuvp, Bpp, filter_h, filter_v);
+ }
+
+ if (update_blend) {
+ tegra_dc_set_blending(dc, &dc->blend);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ if (!no_vsync)
+ dc->windows[i].dirty = 1;
+ update_mask |= WIN_A_ACT_REQ << i;
+ }
+ }
+
+ tegra_dc_set_dynamic_emc(windows, n);
+ tegra_dc_increase_bandwidth(dc);
+
+ tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+ tegra_dc_writel(dc, FRAME_END_INT | V_BLANK_INT, DC_CMD_INT_STATUS);
+ if (!no_vsync) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ val |= (FRAME_END_INT | V_BLANK_INT | ALL_UF_INT);
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+ } else {
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ val &= ~(FRAME_END_INT | V_BLANK_INT | ALL_UF_INT);
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+ }
+
+ tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+
+ if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ tegra_dc_writel(dc, NC_HOST_TRIG, DC_CMD_STATE_CONTROL);
+
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_update_windows);
+
+u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc, int i)
+{
+ return dc->syncpt[i].id;
+}
+EXPORT_SYMBOL(tegra_dc_get_syncpt_id);
+
+u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc, int i)
+{
+ u32 max;
+
+ mutex_lock(&dc->lock);
+ max = nvhost_syncpt_incr_max(&dc->ndev->host->syncpt,
+ dc->syncpt[i].id, ((dc->enabled) ? 1 : 0));
+ dc->syncpt[i].max = max;
+ mutex_unlock(&dc->lock);
+
+ return max;
+}
+
+void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, int i, u32 val)
+{
+ mutex_lock(&dc->lock);
+ if ( dc->enabled )
+ while (dc->syncpt[i].min < val) {
+ dc->syncpt[i].min++;
+ nvhost_syncpt_cpu_incr(&dc->ndev->host->syncpt,
+ dc->syncpt[i].id);
+ }
+ mutex_unlock(&dc->lock);
+}
+
+static bool tegra_dc_windows_are_clean(struct tegra_dc_win *windows[],
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (windows[i]->dirty)
+ return false;
+ }
+
+ return true;
+}
+
+/* does not support syncing windows on multiple dcs in one call */
+int tegra_dc_sync_windows(struct tegra_dc_win *windows[], int n)
+{
+ if (n < 1 || n > DC_N_WINDOWS)
+ return -EINVAL;
+
+ if (!windows[0]->dc->enabled)
+ return -EFAULT;
+
+#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
+ /* Don't want to timeout on simulator */
+ return wait_event_interruptible(windows[0]->dc->wq,
+ tegra_dc_windows_are_clean(windows, n));
+#else
+ return wait_event_interruptible_timeout(windows[0]->dc->wq,
+ tegra_dc_windows_are_clean(windows, n),
+ HZ);
+#endif
+}
+EXPORT_SYMBOL(tegra_dc_sync_windows);
+
+static unsigned long tegra_dc_clk_get_rate(struct tegra_dc *dc)
+{
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ return clk_get_rate(dc->clk);
+#else
+ return 27000000;
+#endif
+}
+
+static unsigned long tegra_dc_pclk_round_rate(struct tegra_dc *dc, int pclk)
+{
+ unsigned long rate;
+ unsigned long div;
+
+ rate = tegra_dc_clk_get_rate(dc);
+
+ div = DIV_ROUND_CLOSEST(rate * 2, pclk);
+
+ if (div < 2)
+ return 0;
+
+ return rate * 2 / div;
+}
+
+void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk)
+{
+ int pclk;
+
+ if (dc->out->type == TEGRA_DC_OUT_RGB) {
+ unsigned long rate;
+ struct clk *parent_clk =
+ clk_get_sys(NULL, dc->out->parent_clk ? : "pll_p");
+
+ if (clk_get_parent(clk) != parent_clk)
+ clk_set_parent(clk, parent_clk);
+
+ if (parent_clk != clk_get_sys(NULL, "pll_p")) {
+ struct clk *base_clk = clk_get_parent(parent_clk);
+
+ /* Assuming either pll_d or pll_d2 is used */
+ rate = dc->mode.pclk * 2;
+
+ if (rate != clk_get_rate(base_clk))
+ clk_set_rate(base_clk, rate);
+ }
+ }
+
+ if (dc->out->type == TEGRA_DC_OUT_HDMI) {
+ unsigned long rate;
+ struct clk *parent_clk =
+ clk_get_sys(NULL, dc->out->parent_clk ? : "pll_d_out0");
+ struct clk *base_clk = clk_get_parent(parent_clk);
+
+ /* needs to match tegra_dc_hdmi_supported_modes[]
+ and tegra_pll_d_freq_table[] */
+ if (dc->mode.pclk > 70000000)
+ rate = 594000000;
+ else if (dc->mode.pclk > 25200000)
+ rate = 216000000;
+ else
+ rate = 504000000;
+
+ if (rate != clk_get_rate(base_clk))
+ clk_set_rate(base_clk, rate);
+
+ if (clk_get_parent(clk) != parent_clk)
+ clk_set_parent(clk, parent_clk);
+ }
+
+ if (dc->out->type == TEGRA_DC_OUT_DSI) {
+ unsigned long rate;
+ struct clk *parent_clk;
+ struct clk *base_clk;
+
+ if (clk == dc->clk) {
+ parent_clk = clk_get_sys(NULL,
+ dc->out->parent_clk ? : "pll_d_out0");
+ base_clk = clk_get_parent(parent_clk);
+ tegra_clk_cfg_ex(base_clk,
+ TEGRA_CLK_PLLD_DSI_OUT_ENB, 1);
+ } else {
+ if (dc->pdata->default_out->dsi->dsi_instance) {
+ parent_clk = clk_get_sys(NULL,
+ dc->out->parent_clk ? : "pll_d2_out0");
+ base_clk = clk_get_parent(parent_clk);
+ tegra_clk_cfg_ex(base_clk,
+ TEGRA_CLK_PLLD_CSI_OUT_ENB, 1);
+ } else {
+ parent_clk = clk_get_sys(NULL,
+ dc->out->parent_clk ? : "pll_d_out0");
+ base_clk = clk_get_parent(parent_clk);
+ tegra_clk_cfg_ex(base_clk,
+ TEGRA_CLK_PLLD_DSI_OUT_ENB, 1);
+ }
+ }
+
+ rate = dc->mode.pclk * 2;
+ if (rate != clk_get_rate(base_clk))
+ clk_set_rate(base_clk, rate);
+
+ if (clk_get_parent(clk) != parent_clk)
+ clk_set_parent(clk, parent_clk);
+ }
+
+ pclk = tegra_dc_pclk_round_rate(dc, dc->mode.pclk);
+ tegra_dvfs_set_rate(clk, pclk);
+}
+
+/* return non-zero if constraint is violated */
+static int calc_h_ref_to_sync(const struct tegra_dc_mode *mode, int *href)
+{
+ long a, b;
+
+ /* Constraint 5: H_REF_TO_SYNC >= 0 */
+ a = 0;
+
+ /* Constraint 6: H_FRONT_PORT >= (H_REF_TO_SYNC + 1) */
+ b = mode->h_front_porch - 1;
+
+ /* Constraint 1: H_REF_TO_SYNC + H_SYNC_WIDTH + H_BACK_PORCH > 11 */
+ if (a + mode->h_sync_width + mode->h_back_porch <= 11)
+ a = 1 + 11 - mode->h_sync_width - mode->h_back_porch;
+ /* check Constraint 1 and 6 */
+ if (a > b)
+ return 1;
+
+ /* Constraint 4: H_SYNC_WIDTH >= 1 */
+ if (mode->h_sync_width < 1)
+ return 4;
+
+ /* Constraint 7: H_DISP_ACTIVE >= 16 */
+ if (mode->h_active < 16)
+ return 7;
+
+ if (href) {
+ if (b > a && a % 2)
+ *href = a + 1; /* use smallest even value */
+ else
+ *href = a; /* even or only possible value */
+ }
+
+ return 0;
+}
+
+static int calc_v_ref_to_sync(const struct tegra_dc_mode *mode, int *vref)
+{
+ long a;
+ a = 1; /* Constraint 5: V_REF_TO_SYNC >= 1 */
+
+ /* Constraint 2: V_REF_TO_SYNC + V_SYNC_WIDTH + V_BACK_PORCH > 1 */
+ if (a + mode->v_sync_width + mode->v_back_porch <= 1)
+ a = 1 + 1 - mode->v_sync_width - mode->v_back_porch;
+
+ /* Constraint 6 */
+ if (mode->v_front_porch < a + 1)
+ a = mode->v_front_porch - 1;
+
+ /* Constraint 4: V_SYNC_WIDTH >= 1 */
+ if (mode->v_sync_width < 1)
+ return 4;
+
+ /* Constraint 7: V_DISP_ACTIVE >= 16 */
+ if (mode->v_active < 16)
+ return 7;
+
+ if (vref)
+ *vref = a;
+ return 0;
+}
+
+static int calc_ref_to_sync(struct tegra_dc_mode *mode)
+{
+ int ret;
+ ret = calc_h_ref_to_sync(mode, &mode->h_ref_to_sync);
+ if (ret)
+ return ret;
+ ret = calc_v_ref_to_sync(mode, &mode->v_ref_to_sync);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool check_ref_to_sync(struct tegra_dc_mode *mode)
+{
+ /* Constraint 1: H_REF_TO_SYNC + H_SYNC_WIDTH + H_BACK_PORCH > 11. */
+ if (mode->h_ref_to_sync + mode->h_sync_width + mode->h_back_porch <= 11)
+ return false;
+
+ /* Constraint 2: V_REF_TO_SYNC + V_SYNC_WIDTH + V_BACK_PORCH > 1. */
+ if (mode->v_ref_to_sync + mode->v_sync_width + mode->v_back_porch <= 1)
+ return false;
+
+ /* Constraint 3: V_FRONT_PORCH + V_SYNC_WIDTH + V_BACK_PORCH > 1
+ * (vertical blank). */
+ if (mode->v_front_porch + mode->v_sync_width + mode->v_back_porch <= 1)
+ return false;
+
+ /* Constraint 4: V_SYNC_WIDTH >= 1; H_SYNC_WIDTH >= 1. */
+ if (mode->v_sync_width < 1 || mode->h_sync_width < 1)
+ return false;
+
+ /* Constraint 5: V_REF_TO_SYNC >= 1; H_REF_TO_SYNC >= 0. */
+ if (mode->v_ref_to_sync < 1 || mode->h_ref_to_sync < 0)
+ return false;
+
+ /* Constraint 6: V_FRONT_PORT >= (V_REF_TO_SYNC + 1);
+ * H_FRONT_PORT >= (H_REF_TO_SYNC + 1). */
+ if (mode->v_front_porch < mode->v_ref_to_sync + 1 ||
+ mode->h_front_porch < mode->h_ref_to_sync + 1)
+ return false;
+
+ /* Constraint 7: H_DISP_ACTIVE >= 16; V_DISP_ACTIVE >= 16. */
+ if (mode->h_active < 16 || mode->v_active < 16)
+ return false;
+
+ return true;
+}
+
+#ifdef DEBUG
+/* return in 1000ths of a Hertz */
+static int calc_refresh(struct tegra_dc *dc, const struct tegra_dc_mode *m)
+{
+ long h_total, v_total, refresh;
+ h_total = m->h_active + m->h_front_porch + m->h_back_porch +
+ m->h_sync_width;
+ v_total = m->v_active + m->v_front_porch + m->v_back_porch +
+ m->v_sync_width;
+ refresh = dc->pixel_clk / h_total;
+ refresh *= 1000;
+ refresh /= v_total;
+ return refresh;
+}
+
+static void print_mode(struct tegra_dc *dc,
+ const struct tegra_dc_mode *mode, const char *note)
+{
+ if (mode) {
+ int refresh = calc_refresh(dc, mode);
+ dev_info(&dc->ndev->dev, "%s():MODE:%dx%d@%d.%03uHz pclk=%d\n",
+ note ? note : "",
+ mode->h_active, mode->v_active,
+ refresh / 1000, refresh % 1000,
+ mode->pclk);
+ }
+}
+#else /* !DEBUG */
+static inline void print_mode(struct tegra_dc *dc,
+ const struct tegra_dc_mode *mode, const char *note) { }
+#endif /* DEBUG */
+
+static inline void enable_dc_irq(unsigned int irq)
+{
+#ifndef CONFIG_TEGRA_FPGA_PLATFORM
+ enable_irq(irq);
+#else
+ /* Always disable DC interrupts on FPGA. */
+ disable_irq(irq);
+#endif
+}
+
+static inline void disable_dc_irq(unsigned int irq)
+{
+ disable_irq(irq);
+}
+
+static int tegra_dc_program_mode(struct tegra_dc *dc, struct tegra_dc_mode *mode)
+{
+ unsigned long val;
+ unsigned long rate;
+ unsigned long div;
+ unsigned long pclk;
+
+ print_mode(dc, mode, __func__);
+
+ /* use default EMC rate when switching modes */
+ dc->new_emc_clk_rate = tegra_dc_get_default_emc_clk_rate(dc);
+ tegra_dc_program_bandwidth(dc);
+
+ tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, mode->h_ref_to_sync | (mode->v_ref_to_sync << 16),
+ DC_DISP_REF_TO_SYNC);
+ tegra_dc_writel(dc, mode->h_sync_width | (mode->v_sync_width << 16),
+ DC_DISP_SYNC_WIDTH);
+ tegra_dc_writel(dc, mode->h_back_porch | (mode->v_back_porch << 16),
+ DC_DISP_BACK_PORCH);
+ tegra_dc_writel(dc, mode->h_active | (mode->v_active << 16),
+ DC_DISP_DISP_ACTIVE);
+ tegra_dc_writel(dc, mode->h_front_porch | (mode->v_front_porch << 16),
+ DC_DISP_FRONT_PORCH);
+
+ tegra_dc_writel(dc, DE_SELECT_ACTIVE | DE_CONTROL_NORMAL,
+ DC_DISP_DATA_ENABLE_OPTIONS);
+
+ /* TODO: MIPI/CRT/HDMI clock cals */
+
+ val = DISP_DATA_FORMAT_DF1P1C;
+
+ if (dc->out->align == TEGRA_DC_ALIGN_MSB)
+ val |= DISP_DATA_ALIGNMENT_MSB;
+ else
+ val |= DISP_DATA_ALIGNMENT_LSB;
+
+ if (dc->out->order == TEGRA_DC_ORDER_RED_BLUE)
+ val |= DISP_DATA_ORDER_RED_BLUE;
+ else
+ val |= DISP_DATA_ORDER_BLUE_RED;
+
+ tegra_dc_writel(dc, val, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ rate = tegra_dc_clk_get_rate(dc);
+
+ pclk = tegra_dc_pclk_round_rate(dc, mode->pclk);
+ if (pclk < (mode->pclk / 100 * 99) ||
+ pclk > (mode->pclk / 100 * 109)) {
+ dev_err(&dc->ndev->dev,
+ "can't divide %ld clock to %d -1/+9%% %ld %d %d\n",
+ rate, mode->pclk,
+ pclk, (mode->pclk / 100 * 99),
+ (mode->pclk / 100 * 109));
+ return -EINVAL;
+ }
+
+ div = (rate * 2 / pclk) - 2;
+
+ tegra_dc_writel(dc, 0x00010001,
+ DC_DISP_SHIFT_CLOCK_OPTIONS);
+ tegra_dc_writel(dc, PIXEL_CLK_DIVIDER_PCD1 | SHIFT_CLK_DIVIDER(div),
+ DC_DISP_DISP_CLOCK_CONTROL);
+
+#ifdef CONFIG_SWITCH
+ switch_set_state(&dc->modeset_switch,
+ (mode->h_active << 16) | mode->v_active);
+#endif
+
+ dc->pixel_clk = dc->mode.pclk;
+
+ return 0;
+}
+
+
+int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode)
+{
+ memcpy(&dc->mode, mode, sizeof(dc->mode));
+
+ print_mode(dc, mode, __func__);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_set_mode);
+
+int tegra_dc_set_fb_mode(struct tegra_dc *dc,
+ const struct fb_videomode *fbmode, bool stereo_mode)
+{
+ struct tegra_dc_mode mode;
+
+ if (!fbmode->pixclock)
+ return -EINVAL;
+
+ mode.pclk = PICOS2KHZ(fbmode->pixclock) * 1000;
+ mode.h_sync_width = fbmode->hsync_len;
+ mode.v_sync_width = fbmode->vsync_len;
+ mode.h_back_porch = fbmode->left_margin;
+ mode.v_back_porch = fbmode->upper_margin;
+ mode.h_active = fbmode->xres;
+ mode.v_active = fbmode->yres;
+ mode.h_front_porch = fbmode->right_margin;
+ mode.v_front_porch = fbmode->lower_margin;
+ mode.stereo_mode = stereo_mode;
+ if (dc->out->type == TEGRA_DC_OUT_HDMI) {
+ /* HDMI controller requires h_ref=1, v_ref=1 */
+ mode.h_ref_to_sync = 1;
+ mode.v_ref_to_sync = 1;
+ } else {
+ calc_ref_to_sync(&mode);
+ }
+ if (!check_ref_to_sync(&mode)) {
+ dev_err(&dc->ndev->dev,
+ "Display timing doesn't meet restrictions.\n");
+ return -EINVAL;
+ }
+ dev_info(&dc->ndev->dev, "Using mode %dx%d pclk=%d href=%d vref=%d\n",
+ mode.h_active, mode.v_active, mode.pclk,
+ mode.h_ref_to_sync, mode.v_ref_to_sync
+ );
+
+ if (mode.stereo_mode) {
+ mode.pclk *= 2;
+ /* total v_active = yres*2 + activespace */
+ mode.v_active = fbmode->yres*2 +
+ fbmode->vsync_len +
+ fbmode->upper_margin +
+ fbmode->lower_margin;
+ }
+
+ mode.flags = 0;
+
+ if (!(fbmode->sync & FB_SYNC_HOR_HIGH_ACT))
+ mode.flags |= TEGRA_DC_MODE_FLAG_NEG_H_SYNC;
+
+ if (!(fbmode->sync & FB_SYNC_VERT_HIGH_ACT))
+ mode.flags |= TEGRA_DC_MODE_FLAG_NEG_V_SYNC;
+
+ return tegra_dc_set_mode(dc, &mode);
+}
+EXPORT_SYMBOL(tegra_dc_set_fb_mode);
+
+void
+tegra_dc_config_pwm(struct tegra_dc *dc, struct tegra_dc_pwm_params *cfg)
+{
+ unsigned int ctrl;
+ unsigned long out_sel;
+ unsigned long cmd_state;
+
+ mutex_lock(&dc->lock);
+ if (!dc->enabled) {
+ mutex_unlock(&dc->lock);
+ return;
+ }
+
+ ctrl = ((cfg->period << PM_PERIOD_SHIFT) |
+ (cfg->clk_div << PM_CLK_DIVIDER_SHIFT) |
+ cfg->clk_select);
+
+ /* The new value should be effected immediately */
+ cmd_state = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
+ tegra_dc_writel(dc, (cmd_state | (1 << 2)), DC_CMD_STATE_ACCESS);
+
+ if (cfg->switch_to_sfio && cfg->gpio_conf_to_sfio)
+ cfg->switch_to_sfio(cfg->gpio_conf_to_sfio);
+ else
+ dev_err(&dc->ndev->dev, "Error: Need gpio_conf_to_sfio\n");
+
+ switch (cfg->which_pwm) {
+ case TEGRA_PWM_PM0:
+ /* Select the LM0 on PM0 */
+ out_sel = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_SELECT5);
+ out_sel &= ~(7 << 0);
+ out_sel |= (3 << 0);
+ tegra_dc_writel(dc, out_sel, DC_COM_PIN_OUTPUT_SELECT5);
+ tegra_dc_writel(dc, ctrl, DC_COM_PM0_CONTROL);
+ tegra_dc_writel(dc, cfg->duty_cycle, DC_COM_PM0_DUTY_CYCLE);
+ break;
+ case TEGRA_PWM_PM1:
+ /* Select the LM1 on PM1 */
+ out_sel = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_SELECT5);
+ out_sel &= ~(7 << 4);
+ out_sel |= (3 << 4);
+ tegra_dc_writel(dc, out_sel, DC_COM_PIN_OUTPUT_SELECT5);
+ tegra_dc_writel(dc, ctrl, DC_COM_PM1_CONTROL);
+ tegra_dc_writel(dc, cfg->duty_cycle, DC_COM_PM1_DUTY_CYCLE);
+ break;
+ default:
+ dev_err(&dc->ndev->dev, "Error: Need which_pwm\n");
+ break;
+ }
+ tegra_dc_writel(dc, cmd_state, DC_CMD_STATE_ACCESS);
+ mutex_unlock(&dc->lock);
+}
+EXPORT_SYMBOL(tegra_dc_config_pwm);
+
+void tegra_dc_set_out_pin_polars(struct tegra_dc *dc,
+ const struct tegra_dc_out_pin *pins,
+ const unsigned int n_pins)
+{
+ unsigned int i;
+
+ int name;
+ int pol;
+
+ u32 pol1, pol3;
+
+ u32 set1, unset1;
+ u32 set3, unset3;
+
+ set1 = set3 = unset1 = unset3 = 0;
+
+ for (i = 0; i < n_pins; i++) {
+ name = (pins + i)->name;
+ pol = (pins + i)->pol;
+
+ /* set polarity by name */
+ switch (name) {
+ case TEGRA_DC_OUT_PIN_DATA_ENABLE:
+ if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
+ set3 |= LSPI_OUTPUT_POLARITY_LOW;
+ else
+ unset3 |= LSPI_OUTPUT_POLARITY_LOW;
+ break;
+ case TEGRA_DC_OUT_PIN_H_SYNC:
+ if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
+ set1 |= LHS_OUTPUT_POLARITY_LOW;
+ else
+ unset1 |= LHS_OUTPUT_POLARITY_LOW;
+ break;
+ case TEGRA_DC_OUT_PIN_V_SYNC:
+ if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
+ set1 |= LVS_OUTPUT_POLARITY_LOW;
+ else
+ unset1 |= LVS_OUTPUT_POLARITY_LOW;
+ break;
+ case TEGRA_DC_OUT_PIN_PIXEL_CLOCK:
+ if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
+ set1 |= LSC0_OUTPUT_POLARITY_LOW;
+ else
+ unset1 |= LSC0_OUTPUT_POLARITY_LOW;
+ break;
+ default:
+ printk("Invalid argument in function %s\n",
+ __FUNCTION__);
+ break;
+ }
+ }
+
+ pol1 = DC_COM_PIN_OUTPUT_POLARITY1_INIT_VAL;
+ pol3 = DC_COM_PIN_OUTPUT_POLARITY3_INIT_VAL;
+
+ pol1 |= set1;
+ pol1 &= ~unset1;
+
+ pol3 |= set3;
+ pol3 &= ~unset3;
+
+ tegra_dc_writel(dc, pol1, DC_COM_PIN_OUTPUT_POLARITY1);
+ tegra_dc_writel(dc, pol3, DC_COM_PIN_OUTPUT_POLARITY3);
+}
+
+static void tegra_dc_set_out(struct tegra_dc *dc, struct tegra_dc_out *out)
+{
+ dc->out = out;
+
+ if (out->n_modes > 0)
+ tegra_dc_set_mode(dc, &dc->out->modes[0]);
+
+ switch (out->type) {
+ case TEGRA_DC_OUT_RGB:
+ dc->out_ops = &tegra_dc_rgb_ops;
+ break;
+
+ case TEGRA_DC_OUT_HDMI:
+ dc->out_ops = &tegra_dc_hdmi_ops;
+ break;
+
+ case TEGRA_DC_OUT_DSI:
+ dc->out_ops = &tegra_dc_dsi_ops;
+ break;
+
+ default:
+ dc->out_ops = NULL;
+ break;
+ }
+
+ if (dc->out_ops && dc->out_ops->init)
+ dc->out_ops->init(dc);
+
+}
+
+unsigned tegra_dc_get_out_height(const struct tegra_dc *dc)
+{
+ if (dc->out)
+ return dc->out->height;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_get_out_height);
+
+unsigned tegra_dc_get_out_width(const struct tegra_dc *dc)
+{
+ if (dc->out)
+ return dc->out->width;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_get_out_width);
+
+unsigned tegra_dc_get_out_max_pixclock(const struct tegra_dc *dc)
+{
+ if (dc->out && dc->out->max_pixclock)
+ return dc->out->max_pixclock;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_get_out_max_pixclock);
+
+void tegra_dc_enable_crc(struct tegra_dc *dc)
+{
+ u32 val;
+ tegra_dc_io_start(dc);
+
+ val = CRC_ALWAYS_ENABLE | CRC_INPUT_DATA_ACTIVE_DATA |
+ CRC_ENABLE_ENABLE;
+ tegra_dc_writel(dc, val, DC_COM_CRC_CONTROL);
+ tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
+void tegra_dc_disable_crc(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, 0x0, DC_COM_CRC_CONTROL);
+ tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ tegra_dc_io_end(dc);
+}
+
+u32 tegra_dc_read_checksum_latched(struct tegra_dc *dc)
+{
+ int crc = 0;
+
+ if(!dc) {
+ dev_err(&dc->ndev->dev, "Failed to get dc.\n");
+ goto crc_error;
+ }
+
+ /* TODO: Replace mdelay with code to sync VBlANK, since
+ * DC_COM_CRC_CHECKSUM_LATCHED is available after VBLANK */
+ mdelay(TEGRA_CRC_LATCHED_DELAY);
+
+ crc = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM_LATCHED);
+crc_error:
+ return crc;
+}
+
+static void tegra_dc_vblank(struct work_struct *work)
+{
+ struct tegra_dc *dc = container_of(work, struct tegra_dc, vblank_work);
+ bool nvsd_updated = false;
+
+ mutex_lock(&dc->lock);
+
+ /* update EMC clock if calculated bandwidth has changed */
+ tegra_dc_program_bandwidth(dc);
+
+ /* Update the SD brightness */
+ if (dc->enabled && dc->out->sd_settings)
+ nvsd_updated = nvsd_update_brightness(dc);
+
+ mutex_unlock(&dc->lock);
+
+ /* Do the actual brightness update outside of the mutex */
+ if (nvsd_updated && dc->out->sd_settings &&
+ dc->out->sd_settings->bl_device) {
+
+ struct platform_device *pdev = dc->out->sd_settings->bl_device;
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ if (bl)
+ backlight_update_status(bl);
+ }
+}
+
+#ifndef CONFIG_TEGRA_FPGA_PLATFORM
+static void tegra_dc_underflow_handler(struct tegra_dc *dc)
+{
+ u32 val, i;
+
+ /* Check for any underflow reset conditions */
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ if (dc->underflow_mask & (WIN_A_UF_INT << i)) {
+ dc->windows[i].underflows++;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ if (dc->windows[i].underflows > 4)
+ schedule_work(&dc->reset_work);
+#endif
+ } else {
+ dc->windows[i].underflows = 0;
+ }
+ }
+
+ if (!dc->underflow_mask) {
+ /* If we have no underflow to check, go ahead
+ and disable the interrupt */
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ val &= ~FRAME_END_INT;
+ else
+ val &= ~V_BLANK_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+ }
+
+ /* Clear the underflow mask now that we've checked it. */
+ dc->underflow_mask = 0;
+}
+
+static void tegra_dc_trigger_windows(struct tegra_dc *dc)
+{
+ u32 val, i;
+ u32 completed = 0;
+ u32 dirty = 0;
+
+ val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
+ /* FIXME: this is not needed when the simulator
+ clears WIN_x_UPDATE bits as in HW */
+ dc->windows[i].dirty = 0;
+ completed = 1;
+#else
+ if (!(val & (WIN_A_UPDATE << i))) {
+ dc->windows[i].dirty = 0;
+ completed = 1;
+ } else {
+ dirty = 1;
+ }
+#endif
+ }
+
+ if (!dirty) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ val &= ~V_BLANK_INT;
+ else
+ val &= ~FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+ }
+
+ if (completed) {
+ if (!dirty) {
+ /* With the last completed window, go ahead
+ and enable the vblank interrupt for nvsd. */
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ val |= V_BLANK_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+ }
+
+ wake_up(&dc->wq);
+ }
+}
+
+static void tegra_dc_one_shot_irq(struct tegra_dc *dc, unsigned long status)
+{
+ if (status & V_BLANK_INT) {
+ /* Sync up windows. */
+ tegra_dc_trigger_windows(dc);
+
+ /* Schedule any additional bottom-half vblank actvities. */
+ schedule_work(&dc->vblank_work);
+ }
+
+ /* Check underflow at frame end */
+ if (status & FRAME_END_INT) {
+ tegra_dc_underflow_handler(dc);
+
+ /* Mark the frame_end as complete. */
+ if (!completion_done(&dc->frame_end_complete))
+ complete(&dc->frame_end_complete);
+ }
+}
+
+static void tegra_dc_continuous_irq(struct tegra_dc *dc, unsigned long status)
+{
+ if (status & V_BLANK_INT) {
+ /* Check underflow */
+ tegra_dc_underflow_handler(dc);
+
+ /* Schedule any additional bottom-half vblank actvities. */
+ schedule_work(&dc->vblank_work);
+ }
+
+ if (status & FRAME_END_INT) {
+ /* Mark the frame_end as complete. */
+ if (!completion_done(&dc->frame_end_complete))
+ complete(&dc->frame_end_complete);
+
+ tegra_dc_trigger_windows(dc);
+ }
+}
+#endif
+
+/* return an arbitrarily large number if count overflow occurs.
+ * make it a nice base-10 number to show up in stats output */
+static u64 tegra_dc_underflow_count(struct tegra_dc *dc, unsigned reg)
+{
+ unsigned count = tegra_dc_readl(dc, reg);
+ tegra_dc_writel(dc, 0, reg);
+ return ((count & 0x80000000) == 0) ? count : 10000000000ll;
+}
+
+static irqreturn_t tegra_dc_irq(int irq, void *ptr)
+{
+#ifndef CONFIG_TEGRA_FPGA_PLATFORM
+ struct tegra_dc *dc = ptr;
+ unsigned long status;
+ unsigned long val;
+ unsigned long underflow_mask;
+
+ if (!nvhost_module_powered(&dc->ndev->host->mod)) {
+ WARN(1, "IRQ when DC not powered!\n");
+ tegra_dc_io_start(dc);
+ status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+ tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+ tegra_dc_io_end(dc);
+ return IRQ_HANDLED;
+ }
+
+ status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+ tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+ /*
+ * Overlays can get thier internal state corrupted during and underflow
+ * condition. The only way to fix this state is to reset the DC.
+ * if we get 4 consecutive frames with underflows, assume we're
+ * hosed and reset.
+ */
+ underflow_mask = status & ALL_UF_INT;
+
+ if (underflow_mask) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ val |= V_BLANK_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+ dc->underflow_mask |= underflow_mask;
+ dc->stats.underflows++;
+ if (status & WIN_A_UF_INT)
+ dc->stats.underflows_a += tegra_dc_underflow_count(dc,
+ DC_WINBUF_AD_UFLOW_STATUS);
+ if (status & WIN_B_UF_INT)
+ dc->stats.underflows_b += tegra_dc_underflow_count(dc,
+ DC_WINBUF_BD_UFLOW_STATUS);
+ if (status & WIN_C_UF_INT)
+ dc->stats.underflows_c += tegra_dc_underflow_count(dc,
+ DC_WINBUF_CD_UFLOW_STATUS);
+ }
+
+ if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+ tegra_dc_one_shot_irq(dc, status);
+ else
+ tegra_dc_continuous_irq(dc, status);
+
+ return IRQ_HANDLED;
+#else /* CONFIG_TEGRA_FPGA_PLATFORM */
+ return IRQ_NONE;
+#endif /* !CONFIG_TEGRA_FPGA_PLATFORM */
+}
+
+static void tegra_dc_set_color_control(struct tegra_dc *dc)
+{
+ u32 color_control;
+
+ switch (dc->out->depth) {
+ case 3:
+ color_control = BASE_COLOR_SIZE111;
+ break;
+
+ case 6:
+ color_control = BASE_COLOR_SIZE222;
+ break;
+
+ case 8:
+ color_control = BASE_COLOR_SIZE332;
+ break;
+
+ case 9:
+ color_control = BASE_COLOR_SIZE333;
+ break;
+
+ case 12:
+ color_control = BASE_COLOR_SIZE444;
+ break;
+
+ case 15:
+ color_control = BASE_COLOR_SIZE555;
+ break;
+
+ case 16:
+ color_control = BASE_COLOR_SIZE565;
+ break;
+
+ case 18:
+ color_control = BASE_COLOR_SIZE666;
+ break;
+
+ default:
+ color_control = BASE_COLOR_SIZE888;
+ break;
+ }
+
+ switch (dc->out->dither) {
+ case TEGRA_DC_DISABLE_DITHER:
+ color_control |= DITHER_CONTROL_DISABLE;
+ break;
+ case TEGRA_DC_ORDERED_DITHER:
+ color_control |= DITHER_CONTROL_ORDERED;
+ break;
+ case TEGRA_DC_ERRDIFF_DITHER:
+ /* The line buffer for error-diffusion dither is limited
+ * to 1280 pixels per line. This limits the maximum
+ * horizontal active area size to 1280 pixels when error
+ * diffusion is enabled.
+ */
+ BUG_ON(dc->mode.h_active > 1280);
+ color_control |= DITHER_CONTROL_ERRDIFF;
+ break;
+ }
+
+ tegra_dc_writel(dc, color_control, DC_DISP_DISP_COLOR_CONTROL);
+}
+
+static u32 get_syncpt(struct tegra_dc *dc, int idx)
+{
+ u32 syncpt_id;
+
+ switch (dc->ndev->id) {
+ case 0:
+ switch (idx) {
+ case 0:
+ syncpt_id = NVSYNCPT_DISP0_A;
+ break;
+ case 1:
+ syncpt_id = NVSYNCPT_DISP0_B;
+ break;
+ case 2:
+ syncpt_id = NVSYNCPT_DISP0_C;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ break;
+ case 1:
+ switch (idx) {
+ case 0:
+ syncpt_id = NVSYNCPT_DISP1_A;
+ break;
+ case 1:
+ syncpt_id = NVSYNCPT_DISP1_B;
+ break;
+ case 2:
+ syncpt_id = NVSYNCPT_DISP1_C;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return syncpt_id;
+}
+
+static void tegra_dc_init(struct tegra_dc *dc)
+{
+ int i;
+
+ tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ if (dc->ndev->id == 0) {
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0A,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0B,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0C,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1B,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHC,
+ TEGRA_MC_PRIO_HIGH);
+ } else if (dc->ndev->id == 1) {
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0AB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0BB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0CB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1BB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHCB,
+ TEGRA_MC_PRIO_HIGH);
+ }
+ tegra_dc_writel(dc, 0x00000100 | dc->vblank_syncpt,
+ DC_CMD_CONT_SYNCPT_VSYNC);
+ tegra_dc_writel(dc, 0x00004700, DC_CMD_INT_TYPE);
+ tegra_dc_writel(dc, 0x0001c700, DC_CMD_INT_POLARITY);
+ tegra_dc_writel(dc, 0x00202020, DC_DISP_MEM_HIGH_PRIORITY);
+ tegra_dc_writel(dc, 0x00010101, DC_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ /* enable interrupts for vblank, frame_end and underflows */
+ tegra_dc_writel(dc, (FRAME_END_INT | V_BLANK_INT | ALL_UF_INT),
+ DC_CMD_INT_ENABLE);
+ tegra_dc_writel(dc, ALL_UF_INT, DC_CMD_INT_MASK);
+
+ tegra_dc_writel(dc, 0x00000000, DC_DISP_BORDER_COLOR);
+
+ tegra_dc_set_color_control(dc);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ struct tegra_dc_win *win = &dc->windows[i];
+ tegra_dc_writel(dc, WINDOW_A_SELECT << i,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ tegra_dc_set_csc(dc, &win->csc);
+ tegra_dc_set_lut(dc, win);
+ tegra_dc_set_scaling_filter(dc);
+ }
+
+
+ for (i = 0; i < dc->n_windows; i++) {
+ u32 syncpt = get_syncpt(dc, i);
+
+ dc->syncpt[i].id = syncpt;
+
+ dc->syncpt[i].min = dc->syncpt[i].max =
+ nvhost_syncpt_read(&dc->ndev->host->syncpt, syncpt);
+ }
+
+ print_mode(dc, &dc->mode, __func__);
+
+ if (dc->mode.pclk)
+ tegra_dc_program_mode(dc, &dc->mode);
+
+ /* Initialize SD AFTER the modeset.
+ nvsd_init handles the sd_settings = NULL case. */
+ nvsd_init(dc, dc->out->sd_settings);
+}
+
+static bool _tegra_dc_controller_enable(struct tegra_dc *dc)
+{
+ if (dc->out->enable)
+ dc->out->enable();
+
+ tegra_dc_setup_clk(dc, dc->clk);
+ clk_enable(dc->clk);
+ clk_enable(dc->emc_clk);
+
+ /* do not accept interrupts during initialization */
+ tegra_dc_writel(dc, 0, DC_CMD_INT_ENABLE);
+ tegra_dc_writel(dc, 0, DC_CMD_INT_MASK);
+
+ enable_dc_irq(dc->irq);
+
+ tegra_dc_init(dc);
+
+ if (dc->out_ops && dc->out_ops->enable)
+ dc->out_ops->enable(dc);
+
+ if (dc->out->postpoweron)
+ dc->out->postpoweron();
+
+ /* force a full blending update */
+ dc->blend.z[0] = -1;
+
+ tegra_dc_ext_enable(dc->ext);
+
+ return true;
+}
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static bool _tegra_dc_controller_reset_enable(struct tegra_dc *dc)
+{
+ if (dc->out->enable)
+ dc->out->enable();
+
+ tegra_dc_setup_clk(dc, dc->clk);
+ clk_enable(dc->clk);
+ clk_enable(dc->emc_clk);
+
+ if (dc->ndev->id == 0 && tegra_dcs[1] != NULL) {
+ mutex_lock(&tegra_dcs[1]->lock);
+ disable_irq(tegra_dcs[1]->irq);
+ } else if (dc->ndev->id == 1 && tegra_dcs[0] != NULL) {
+ mutex_lock(&tegra_dcs[0]->lock);
+ disable_irq(tegra_dcs[0]->irq);
+ }
+
+ msleep(5);
+ tegra_periph_reset_assert(dc->clk);
+ msleep(2);
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ tegra_periph_reset_deassert(dc->clk);
+ msleep(1);
+#endif
+
+ if (dc->ndev->id == 0 && tegra_dcs[1] != NULL) {
+ enable_dc_irq(tegra_dcs[1]->irq);
+ mutex_unlock(&tegra_dcs[1]->lock);
+ } else if (dc->ndev->id == 1 && tegra_dcs[0] != NULL) {
+ enable_dc_irq(tegra_dcs[0]->irq);
+ mutex_unlock(&tegra_dcs[0]->lock);
+ }
+
+ enable_dc_irq(dc->irq);
+
+ tegra_dc_init(dc);
+
+ if (dc->out_ops && dc->out_ops->enable)
+ dc->out_ops->enable(dc);
+
+ if (dc->out->postpoweron)
+ dc->out->postpoweron();
+
+ /* force a full blending update */
+ dc->blend.z[0] = -1;
+
+ return true;
+}
+#endif
+
+static bool _tegra_dc_enable(struct tegra_dc *dc)
+{
+ if (dc->mode.pclk == 0)
+ return false;
+
+ if (!dc->out)
+ return false;
+
+ tegra_dc_io_start(dc);
+
+ return _tegra_dc_controller_enable(dc);
+}
+
+void tegra_dc_enable(struct tegra_dc *dc)
+{
+ mutex_lock(&dc->lock);
+
+ if (!dc->enabled)
+ dc->enabled = _tegra_dc_enable(dc);
+
+ mutex_unlock(&dc->lock);
+}
+
+static void _tegra_dc_controller_disable(struct tegra_dc *dc)
+{
+ unsigned i;
+
+ tegra_dc_writel(dc, 0, DC_CMD_INT_MASK);
+ tegra_dc_writel(dc, 0, DC_CMD_INT_ENABLE);
+ disable_irq(dc->irq);
+
+ if (dc->out_ops && dc->out_ops->disable)
+ dc->out_ops->disable(dc);
+
+ clk_disable(dc->emc_clk);
+ clk_disable(dc->clk);
+ tegra_dvfs_set_rate(dc->clk, 0);
+
+ if (dc->out && dc->out->disable)
+ dc->out->disable();
+
+ for (i = 0; i < dc->n_windows; i++) {
+ struct tegra_dc_win *w = &dc->windows[i];
+
+ /* reset window bandwidth */
+ w->bandwidth = 0;
+ w->new_bandwidth = 0;
+
+ /* disable windows */
+ w->flags &= ~TEGRA_WIN_FLAG_ENABLED;
+
+ /* flush any pending syncpt waits */
+ while (dc->syncpt[i].min < dc->syncpt[i].max) {
+ dc->syncpt[i].min++;
+ nvhost_syncpt_cpu_incr(&dc->ndev->host->syncpt,
+ dc->syncpt[i].id);
+ }
+ }
+}
+
+void tegra_dc_stats_enable(struct tegra_dc *dc, bool enable)
+{
+#if 0 /* underflow interrupt is already enabled by dc reset worker */
+ u32 val;
+ if (dc->enabled) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ if (enable)
+ val |= (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT);
+ else
+ val &= ~(WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT);
+ tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
+ }
+#endif
+}
+
+bool tegra_dc_stats_get(struct tegra_dc *dc)
+{
+#if 0 /* right now it is always enabled */
+ u32 val;
+ bool res;
+
+ if (dc->enabled) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ res = !!(val & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT));
+ } else {
+ res = false;
+ }
+
+ return res;
+#endif
+ return true;
+}
+
+/* make the screen blank by disabling all windows */
+void tegra_dc_blank(struct tegra_dc *dc)
+{
+ struct tegra_dc_win *dcwins[DC_N_WINDOWS];
+ unsigned i;
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ dcwins[i] = tegra_dc_get_window(dc, i);
+ dcwins[i]->flags &= ~TEGRA_WIN_FLAG_ENABLED;
+ }
+
+ tegra_dc_update_windows(dcwins, DC_N_WINDOWS);
+ tegra_dc_sync_windows(dcwins, DC_N_WINDOWS);
+}
+
+static void _tegra_dc_disable(struct tegra_dc *dc)
+{
+ _tegra_dc_controller_disable(dc);
+ tegra_dc_io_end(dc);
+}
+
+void tegra_dc_disable(struct tegra_dc *dc)
+{
+ if (dc->overlay)
+ tegra_overlay_disable(dc->overlay);
+
+ tegra_dc_ext_disable(dc->ext);
+
+ mutex_lock(&dc->lock);
+
+ if (dc->enabled) {
+ dc->enabled = false;
+
+ if (!dc->suspended)
+ _tegra_dc_disable(dc);
+ }
+
+#ifdef CONFIG_SWITCH
+ switch_set_state(&dc->modeset_switch, 0);
+#endif
+
+ mutex_unlock(&dc->lock);
+}
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static void tegra_dc_reset_worker(struct work_struct *work)
+{
+ struct tegra_dc *dc =
+ container_of(work, struct tegra_dc, reset_work);
+
+ unsigned long val = 0;
+
+ dev_warn(&dc->ndev->dev, "overlay stuck in underflow state. resetting.\n");
+
+ tegra_dc_ext_disable(dc->ext);
+
+ mutex_lock(&shared_lock);
+ mutex_lock(&dc->lock);
+
+ if (dc->enabled == false)
+ goto unlock;
+
+ dc->enabled = false;
+
+ /*
+ * off host read bus
+ */
+ val = tegra_dc_readl(dc, DC_CMD_CONT_SYNCPT_VSYNC);
+ val &= ~(0x00000100);
+ tegra_dc_writel(dc, val, DC_CMD_CONT_SYNCPT_VSYNC);
+
+ /*
+ * set DC to STOP mode
+ */
+ tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+
+ msleep(10);
+
+ _tegra_dc_controller_disable(dc);
+
+ /* _tegra_dc_controller_reset_enable deasserts reset */
+ _tegra_dc_controller_reset_enable(dc);
+
+ dc->enabled = true;
+unlock:
+ mutex_unlock(&dc->lock);
+ mutex_unlock(&shared_lock);
+}
+#endif
+
+#ifdef CONFIG_SWITCH
+static ssize_t switch_modeset_print_mode(struct switch_dev *sdev, char *buf)
+{
+ struct tegra_dc *dc =
+ container_of(sdev, struct tegra_dc, modeset_switch);
+
+ if (!sdev->state)
+ return sprintf(buf, "offline\n");
+
+ return sprintf(buf, "%dx%d\n", dc->mode.h_active, dc->mode.v_active);
+}
+#endif
+
+static int tegra_dc_probe(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc;
+ struct clk *clk;
+ struct clk *emc_clk;
+ struct resource *res;
+ struct resource *base_res;
+ struct resource *fb_mem = NULL;
+ int ret = 0;
+ void __iomem *base;
+ int irq;
+ int i;
+
+ if (!ndev->dev.platform_data) {
+ dev_err(&ndev->dev, "no platform data\n");
+ return -ENOENT;
+ }
+
+ dc = kzalloc(sizeof(struct tegra_dc), GFP_KERNEL);
+ if (!dc) {
+ dev_err(&ndev->dev, "can't allocate memory for tegra_dc\n");
+ return -ENOMEM;
+ }
+
+ irq = nvhost_get_irq_byname(ndev, "irq");
+ if (irq <= 0) {
+ dev_err(&ndev->dev, "no irq\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
+ res = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "regs");
+ if (!res) {
+ dev_err(&ndev->dev, "no mem resource\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
+ base_res = request_mem_region(res->start, resource_size(res), ndev->name);
+ if (!base_res) {
+ dev_err(&ndev->dev, "request_mem_region failed\n");
+ ret = -EBUSY;
+ goto err_free;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&ndev->dev, "registers can't be mapped\n");
+ ret = -EBUSY;
+ goto err_release_resource_reg;
+ }
+
+ fb_mem = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "fbmem");
+
+ clk = clk_get(&ndev->dev, NULL);
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(&ndev->dev, "can't get clock\n");
+ ret = -ENOENT;
+ goto err_iounmap_reg;
+ }
+
+ emc_clk = clk_get(&ndev->dev, "emc");
+ if (IS_ERR_OR_NULL(emc_clk)) {
+ dev_err(&ndev->dev, "can't get emc clock\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ dc->clk = clk;
+ dc->emc_clk = emc_clk;
+
+ dc->base_res = base_res;
+ dc->base = base;
+ dc->irq = irq;
+ dc->ndev = ndev;
+ dc->pdata = ndev->dev.platform_data;
+
+ /*
+ * The emc is a shared clock, it will be set based on
+ * the requirements for each user on the bus.
+ */
+ dc->emc_clk_rate = tegra_dc_get_default_emc_clk_rate(dc);
+ clk_set_rate(emc_clk, dc->emc_clk_rate);
+
+ if (dc->pdata->flags & TEGRA_DC_FLAG_ENABLED)
+ dc->enabled = true;
+
+ mutex_init(&dc->lock);
+ init_completion(&dc->frame_end_complete);
+ init_waitqueue_head(&dc->wq);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ INIT_WORK(&dc->reset_work, tegra_dc_reset_worker);
+#endif
+ INIT_WORK(&dc->vblank_work, tegra_dc_vblank);
+
+ tegra_dc_init_lut_defaults(&dc->fb_lut);
+
+ dc->n_windows = DC_N_WINDOWS;
+ for (i = 0; i < dc->n_windows; i++) {
+ struct tegra_dc_win *win = &dc->windows[i];
+ win->idx = i;
+ win->dc = dc;
+ tegra_dc_init_csc_defaults(&win->csc);
+ tegra_dc_init_lut_defaults(&win->lut);
+ }
+
+ ret = tegra_dc_set(dc, ndev->id);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "can't add dc\n");
+ goto err_free_irq;
+ }
+
+ nvhost_set_drvdata(ndev, dc);
+
+#ifdef CONFIG_SWITCH
+ dc->modeset_switch.name = dev_name(&ndev->dev);
+ dc->modeset_switch.state = 0;
+ dc->modeset_switch.print_state = switch_modeset_print_mode;
+ switch_dev_register(&dc->modeset_switch);
+#endif
+
+ if (dc->pdata->default_out)
+ tegra_dc_set_out(dc, dc->pdata->default_out);
+ else
+ dev_err(&ndev->dev, "No default output specified. Leaving output disabled.\n");
+
+ dc->vblank_syncpt = (dc->ndev->id == 0) ?
+ NVSYNCPT_VBLANK0 : NVSYNCPT_VBLANK1;
+
+ dc->ext = tegra_dc_ext_register(ndev, dc);
+ if (IS_ERR_OR_NULL(dc->ext)) {
+ dev_warn(&ndev->dev, "Failed to enable Tegra DC extensions.\n");
+ dc->ext = NULL;
+ }
+
+ /* interrupt handler must be registered before tegra_fb_register() */
+ if (request_irq(irq, tegra_dc_irq, IRQF_DISABLED,
+ dev_name(&ndev->dev), dc)) {
+ dev_err(&ndev->dev, "request_irq %d failed\n", irq);
+ ret = -EBUSY;
+ goto err_put_emc_clk;
+ }
+
+ /* hack to balance enable_irq calls in _tegra_dc_enable() */
+ disable_dc_irq(dc->irq);
+
+ mutex_lock(&dc->lock);
+ if (dc->enabled)
+ _tegra_dc_enable(dc);
+ mutex_unlock(&dc->lock);
+
+ tegra_dc_create_debugfs(dc);
+
+ dev_info(&ndev->dev, "probed\n");
+
+ if (dc->pdata->fb) {
+ if (dc->pdata->fb->bits_per_pixel == -1) {
+ unsigned long fmt;
+ tegra_dc_writel(dc,
+ WINDOW_A_SELECT << dc->pdata->fb->win,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ fmt = tegra_dc_readl(dc, DC_WIN_COLOR_DEPTH);
+ dc->pdata->fb->bits_per_pixel =
+ tegra_dc_fmt_bpp(fmt);
+ }
+
+ dc->fb = tegra_fb_register(ndev, dc, dc->pdata->fb, fb_mem);
+ if (IS_ERR_OR_NULL(dc->fb))
+ dc->fb = NULL;
+ }
+
+ if (dc->fb) {
+ dc->overlay = tegra_overlay_register(ndev, dc);
+ if (IS_ERR_OR_NULL(dc->overlay))
+ dc->overlay = NULL;
+ }
+
+ if (dc->out && dc->out->hotplug_init)
+ dc->out->hotplug_init();
+
+ if (dc->out_ops && dc->out_ops->detect)
+ dc->out_ops->detect(dc);
+ else
+ dc->connected = true;
+
+ tegra_dc_create_sysfs(&dc->ndev->dev);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, dc);
+err_put_emc_clk:
+ clk_put(emc_clk);
+err_put_clk:
+ clk_put(clk);
+err_iounmap_reg:
+ iounmap(base);
+ if (fb_mem)
+ release_resource(fb_mem);
+err_release_resource_reg:
+ release_resource(base_res);
+err_free:
+ kfree(dc);
+
+ return ret;
+}
+
+static int tegra_dc_remove(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ tegra_dc_remove_sysfs(&dc->ndev->dev);
+ tegra_dc_remove_debugfs(dc);
+
+ if (dc->overlay) {
+ tegra_overlay_unregister(dc->overlay);
+ }
+
+ if (dc->fb) {
+ tegra_fb_unregister(dc->fb);
+ if (dc->fb_mem)
+ release_resource(dc->fb_mem);
+ }
+
+ tegra_dc_ext_disable(dc->ext);
+
+ if (dc->ext)
+ tegra_dc_ext_unregister(dc->ext);
+
+ if (dc->enabled)
+ _tegra_dc_disable(dc);
+
+#ifdef CONFIG_SWITCH
+ switch_dev_unregister(&dc->modeset_switch);
+#endif
+ free_irq(dc->irq, dc);
+ clk_put(dc->emc_clk);
+ clk_put(dc->clk);
+ iounmap(dc->base);
+ if (dc->fb_mem)
+ release_resource(dc->base_res);
+ kfree(dc);
+ tegra_dc_set(NULL, ndev->id);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct nvhost_device *ndev, pm_message_t state)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ dev_info(&ndev->dev, "suspend\n");
+
+ if (dc->overlay)
+ tegra_overlay_disable(dc->overlay);
+
+ tegra_dc_ext_disable(dc->ext);
+
+ mutex_lock(&dc->lock);
+
+ if (dc->out_ops && dc->out_ops->suspend)
+ dc->out_ops->suspend(dc);
+
+ if (dc->enabled) {
+ _tegra_dc_disable(dc);
+
+ dc->suspended = true;
+ }
+
+ if (dc->out && dc->out->postsuspend) {
+ dc->out->postsuspend();
+ msleep(100); /* avoid resume event due to voltage falling */
+ }
+
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+
+static int tegra_dc_resume(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ dev_info(&ndev->dev, "resume\n");
+
+ mutex_lock(&dc->lock);
+ dc->suspended = false;
+
+ if (dc->enabled)
+ _tegra_dc_enable(dc);
+
+ if (dc->out && dc->out->hotplug_init)
+ dc->out->hotplug_init();
+
+ if (dc->out_ops && dc->out_ops->resume)
+ dc->out_ops->resume(dc);
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+extern int suspend_set(const char *val, struct kernel_param *kp)
+{
+ if (!strcmp(val, "dump"))
+ dump_regs(tegra_dcs[0]);
+#ifdef CONFIG_PM
+ else if (!strcmp(val, "suspend"))
+ tegra_dc_suspend(tegra_dcs[0]->ndev, PMSG_SUSPEND);
+ else if (!strcmp(val, "resume"))
+ tegra_dc_resume(tegra_dcs[0]->ndev);
+#endif
+
+ return 0;
+}
+
+extern int suspend_get(char *buffer, struct kernel_param *kp)
+{
+ return 0;
+}
+
+int suspend;
+
+module_param_call(suspend, suspend_set, suspend_get, &suspend, 0644);
+
+struct nvhost_driver tegra_dc_driver = {
+ .driver = {
+ .name = "tegradc",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_dc_probe,
+ .remove = tegra_dc_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_dc_suspend,
+ .resume = tegra_dc_resume,
+#endif
+};
+
+static int __init tegra_dc_module_init(void)
+{
+ int ret = tegra_dc_ext_module_init();
+ if (ret)
+ return ret;
+ return nvhost_driver_register(&tegra_dc_driver);
+}
+
+static void __exit tegra_dc_module_exit(void)
+{
+ nvhost_driver_unregister(&tegra_dc_driver);
+ tegra_dc_ext_module_exit();
+}
+
+module_exit(tegra_dc_module_exit);
+module_init(tegra_dc_module_init);
diff --git a/drivers/video/tegra/dc/dc_priv.h b/drivers/video/tegra/dc/dc_priv.h
new file mode 100644
index 000000000000..994edfe46d31
--- /dev/null
+++ b/drivers/video/tegra/dc/dc_priv.h
@@ -0,0 +1,222 @@
+/*
+ * drivers/video/tegra/dc/dc_priv.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H
+
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/completion.h>
+#include <linux/switch.h>
+
+#include <mach/dc.h>
+
+#include "../host/dev.h"
+#include "../host/host1x/host1x_syncpt.h"
+
+#include <mach/tegra_dc_ext.h>
+
+#define WIN_IS_TILED(win) ((win)->flags & TEGRA_WIN_FLAG_TILED)
+#define WIN_IS_ENABLED(win) ((win)->flags & TEGRA_WIN_FLAG_ENABLED)
+
+#define NEED_UPDATE_EMC_ON_EVERY_FRAME (windows_idle_detection_time == 0)
+
+/* DDR: 8 bytes transfer per clock */
+#define DDR_BW_TO_FREQ(bw) ((bw) / 8)
+
+#if defined(CONFIG_TEGRA_EMC_TO_DDR_CLOCK)
+#define EMC_BW_TO_FREQ(bw) (DDR_BW_TO_FREQ(bw) * CONFIG_TEGRA_EMC_TO_DDR_CLOCK)
+#else
+#define EMC_BW_TO_FREQ(bw) (DDR_BW_TO_FREQ(bw) * 2)
+#endif
+
+struct tegra_dc;
+
+struct tegra_dc_blend {
+ unsigned z[DC_N_WINDOWS];
+ unsigned flags[DC_N_WINDOWS];
+};
+
+struct tegra_dc_out_ops {
+ /* initialize output. dc clocks are not on at this point */
+ int (*init)(struct tegra_dc *dc);
+ /* destroy output. dc clocks are not on at this point */
+ void (*destroy)(struct tegra_dc *dc);
+ /* detect connected display. can sleep.*/
+ bool (*detect)(struct tegra_dc *dc);
+ /* enable output. dc clocks are on at this point */
+ void (*enable)(struct tegra_dc *dc);
+ /* disable output. dc clocks are on at this point */
+ void (*disable)(struct tegra_dc *dc);
+
+ /* suspend output. dc clocks are on at this point */
+ void (*suspend)(struct tegra_dc *dc);
+ /* resume output. dc clocks are on at this point */
+ void (*resume)(struct tegra_dc *dc);
+};
+
+struct tegra_dc {
+ struct nvhost_device *ndev;
+ struct tegra_dc_platform_data *pdata;
+
+ struct resource *base_res;
+ void __iomem *base;
+ int irq;
+
+ int pixel_clk;
+ struct clk *clk;
+ struct clk *emc_clk;
+ int emc_clk_rate;
+ int new_emc_clk_rate;
+
+ bool connected;
+ bool enabled;
+ bool suspended;
+
+ struct tegra_dc_out *out;
+ struct tegra_dc_out_ops *out_ops;
+ void *out_data;
+
+ struct tegra_dc_mode mode;
+
+ struct tegra_dc_win windows[DC_N_WINDOWS];
+ struct tegra_dc_blend blend;
+ int n_windows;
+
+ wait_queue_head_t wq;
+
+ struct mutex lock;
+
+ struct resource *fb_mem;
+ struct tegra_fb_info *fb;
+
+ struct tegra_overlay_info *overlay;
+
+ struct {
+ u32 id;
+ u32 min;
+ u32 max;
+ } syncpt[DC_N_WINDOWS];
+ u32 vblank_syncpt;
+
+ unsigned long underflow_mask;
+ struct work_struct reset_work;
+
+#ifdef CONFIG_SWITCH
+ struct switch_dev modeset_switch;
+#endif
+
+ struct completion frame_end_complete;
+
+ struct work_struct vblank_work;
+
+ struct {
+ u64 underflows;
+ u64 underflows_a;
+ u64 underflows_b;
+ u64 underflows_c;
+ } stats;
+
+ struct tegra_dc_ext *ext;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugdir;
+#endif
+ struct tegra_dc_lut fb_lut;
+};
+
+static inline void tegra_dc_io_start(struct tegra_dc *dc)
+{
+ nvhost_module_busy(&dc->ndev->host->mod);
+}
+
+static inline void tegra_dc_io_end(struct tegra_dc *dc)
+{
+ nvhost_module_idle(&dc->ndev->host->mod);
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+ unsigned long reg)
+{
+ BUG_ON(!nvhost_module_powered(&dc->ndev->host->mod));
+ return readl(dc->base + reg * 4);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long val,
+ unsigned long reg)
+{
+ BUG_ON(!nvhost_module_powered(&dc->ndev->host->mod));
+ writel(val, dc->base + reg * 4);
+}
+
+static inline void _tegra_dc_write_table(struct tegra_dc *dc, const u32 *table,
+ unsigned len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ tegra_dc_writel(dc, table[i * 2 + 1], table[i * 2]);
+}
+
+#define tegra_dc_write_table(dc, table) \
+ _tegra_dc_write_table(dc, table, ARRAY_SIZE(table) / 2)
+
+static inline void tegra_dc_set_outdata(struct tegra_dc *dc, void *data)
+{
+ dc->out_data = data;
+}
+
+static inline void *tegra_dc_get_outdata(struct tegra_dc *dc)
+{
+ return dc->out_data;
+}
+
+static inline unsigned long tegra_dc_get_default_emc_clk_rate(
+ struct tegra_dc *dc)
+{
+ return dc->pdata->emc_clk_rate ? dc->pdata->emc_clk_rate : ULONG_MAX;
+}
+
+void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk);
+
+extern struct tegra_dc_out_ops tegra_dc_rgb_ops;
+extern struct tegra_dc_out_ops tegra_dc_hdmi_ops;
+extern struct tegra_dc_out_ops tegra_dc_dsi_ops;
+
+/* defined in dc_sysfs.c, used by dc.c */
+void __devexit tegra_dc_remove_sysfs(struct device *dev);
+void tegra_dc_create_sysfs(struct device *dev);
+
+/* defined in dc.c, used by dc_sysfs.c */
+void tegra_dc_stats_enable(struct tegra_dc *dc, bool enable);
+bool tegra_dc_stats_get(struct tegra_dc *dc);
+
+/* defined in dc.c, used by overlay.c */
+unsigned int tegra_dc_has_multiple_dc(void);
+unsigned long tegra_dc_get_bandwidth(struct tegra_dc_win *wins[], int n);
+
+/* defined in dc.c, used by dc_sysfs.c */
+u32 tegra_dc_read_checksum_latched(struct tegra_dc *dc);
+void tegra_dc_enable_crc(struct tegra_dc *dc);
+void tegra_dc_disable_crc(struct tegra_dc *dc);
+
+void tegra_dc_set_out_pin_polars(struct tegra_dc *dc,
+ const struct tegra_dc_out_pin *pins,
+ const unsigned int n_pins);
+#endif
+
diff --git a/drivers/video/tegra/dc/dc_reg.h b/drivers/video/tegra/dc/dc_reg.h
new file mode 100644
index 000000000000..22379a194082
--- /dev/null
+++ b/drivers/video/tegra/dc/dc_reg.h
@@ -0,0 +1,555 @@
+/*
+ * drivers/video/tegra/dc/dc_reg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H
+
+#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
+#define MSF_POLARITY_HIGH (0 << 0)
+#define MSF_POLARITY_LOW (1 << 0)
+#define MSF_DISABLE (0 << 1)
+#define MSF_ENABLE (1 << 1)
+#define MSF_LSPI (0 << 2)
+#define MSF_LDC (1 << 2)
+#define MSF_LSDI (2 << 2)
+
+#define DC_CMD_DISPLAY_COMMAND 0x032
+#define DISP_COMMAND_RAISE (1 << 0)
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DISP_COMMAND_RAISE_VECTOR(x) (((x) & 0x1f) << 22)
+#define DISP_COMMAND_RAISE_CHANNEL_ID(x) (((x) & 0xf) << 27)
+
+#define DC_CMD_SIGNAL_RAISE 0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
+#define PW0_ENABLE (1 << 0)
+#define PW1_ENABLE (1 << 2)
+#define PW2_ENABLE (1 << 4)
+#define PW3_ENABLE (1 << 6)
+#define PW4_ENABLE (1 << 8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+#define SPI_ENABLE (1 << 24)
+#define HSPI_ENABLE (1 << 25)
+
+#define DC_CMD_INT_STATUS 0x037
+#define DC_CMD_INT_MASK 0x038
+#define DC_CMD_INT_ENABLE 0x039
+#define DC_CMD_INT_TYPE 0x03a
+#define DC_CMD_INT_POLARITY 0x03b
+#define CTXSW_INT (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define V_BLANK_INT (1 << 2)
+#define H_BLANK_INT (1 << 3)
+#define V_PULSE3_INT (1 << 4)
+#define SPI_BUSY_INT (1 << 7)
+#define WIN_A_UF_INT (1 << 8)
+#define WIN_B_UF_INT (1 << 9)
+#define WIN_C_UF_INT (1 << 10)
+#define MSF_INT (1 << 12)
+#define SSF_INT (1 << 13)
+#define WIN_A_OF_INT (1 << 14)
+#define WIN_B_OF_INT (1 << 15)
+#define WIN_C_OF_INT (1 << 16)
+#define GPIO_0_INT (1 << 18)
+#define GPIO_1_INT (1 << 19)
+#define GPIO_2_INT (1 << 20)
+
+#define DC_CMD_SIGNAL_RAISE1 0x03c
+#define DC_CMD_SIGNAL_RAISE2 0x03d
+#define DC_CMD_SIGNAL_RAISE3 0x03e
+#define DC_CMD_STATE_ACCESS 0x040
+#define READ_MUX_ASSEMBLY (0 << 0)
+#define READ_MUX_ACTIVE (1 << 0)
+#define WRITE_MUX_ASSEMBLY (0 << 2)
+#define WRITE_MUX_ACTIVE (1 << 2)
+
+#define DC_CMD_STATE_CONTROL 0x041
+#define GENERAL_ACT_REQ (1 << 0)
+#define WIN_A_ACT_REQ (1 << 1)
+#define WIN_B_ACT_REQ (1 << 2)
+#define WIN_C_ACT_REQ (1 << 3)
+#define GENERAL_UPDATE (1 << 8)
+#define WIN_A_UPDATE (1 << 9)
+#define WIN_B_UPDATE (1 << 10)
+#define WIN_C_UPDATE (1 << 11)
+#define NC_HOST_TRIG (1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL 0x043
+
+#define DC_COM_CRC_CONTROL 0x300
+#define CRC_ALWAYS_ENABLE (1 << 3)
+#define CRC_ALWAYS_DISABLE (0 << 3)
+#define CRC_INPUT_DATA_ACTIVE_DATA (1 << 2)
+#define CRC_INPUT_DATA_FULL_FRAME (0 << 2)
+#define CRC_WAIT_TWO_VSYNC (1 << 1)
+#define CRC_WAIT_ONE_VSYNC (0 << 1)
+#define CRC_ENABLE_ENABLE (1 << 0)
+#define CRC_ENABLE_DISABLE (0 << 0)
+#define DC_COM_CRC_CHECKSUM 0x301
+#define DC_COM_PIN_OUTPUT_ENABLE0 0x302
+#define DC_COM_PIN_OUTPUT_ENABLE1 0x303
+#define DC_COM_PIN_OUTPUT_ENABLE2 0x304
+#define DC_COM_PIN_OUTPUT_ENABLE3 0x305
+#define PIN_OUTPUT_LSPI_OUTPUT_EN (1 << 8)
+#define PIN_OUTPUT_LSPI_OUTPUT_DIS (1 << 8)
+#define DC_COM_PIN_OUTPUT_POLARITY0 0x306
+
+#define DC_COM_PIN_OUTPUT_POLARITY1 0x307
+#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
+#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
+#define LSC0_OUTPUT_POLARITY_LOW (1 << 24)
+
+#define DC_COM_PIN_OUTPUT_POLARITY2 0x308
+
+#define DC_COM_PIN_OUTPUT_POLARITY3 0x309
+#define LSPI_OUTPUT_POLARITY_LOW (1 << 8)
+
+#define DC_COM_PIN_OUTPUT_DATA0 0x30a
+#define DC_COM_PIN_OUTPUT_DATA1 0x30b
+#define DC_COM_PIN_OUTPUT_DATA2 0x30c
+#define DC_COM_PIN_OUTPUT_DATA3 0x30d
+#define DC_COM_PIN_INPUT_ENABLE0 0x30e
+#define DC_COM_PIN_INPUT_ENABLE1 0x30f
+#define DC_COM_PIN_INPUT_ENABLE2 0x310
+#define DC_COM_PIN_INPUT_ENABLE3 0x311
+#define PIN_INPUT_LSPI_INPUT_EN (1 << 8)
+#define PIN_INPUT_LSPI_INPUT_DIS (1 << 8)
+#define DC_COM_PIN_INPUT_DATA0 0x312
+#define DC_COM_PIN_INPUT_DATA1 0x313
+#define DC_COM_PIN_OUTPUT_SELECT0 0x314
+#define DC_COM_PIN_OUTPUT_SELECT1 0x315
+#define DC_COM_PIN_OUTPUT_SELECT2 0x316
+#define DC_COM_PIN_OUTPUT_SELECT3 0x317
+#define DC_COM_PIN_OUTPUT_SELECT4 0x318
+#define DC_COM_PIN_OUTPUT_SELECT5 0x319
+#define DC_COM_PIN_OUTPUT_SELECT6 0x31a
+
+#define PIN5_LM1_LCD_M1_OUTPUT_MASK (7 << 4)
+#define PIN5_LM1_LCD_M1_OUTPUT_M1 (0 << 4)
+#define PIN5_LM1_LCD_M1_OUTPUT_LD21 (2 << 4)
+#define PIN5_LM1_LCD_M1_OUTPUT_PM1 (3 << 4)
+
+#define PIN1_LHS_OUTPUT (1 << 30)
+#define PIN1_LVS_OUTPUT (1 << 28)
+
+#define DC_COM_PIN_MISC_CONTROL 0x31b
+#define DC_COM_PM0_CONTROL 0x31c
+#define DC_COM_PM0_DUTY_CYCLE 0x31d
+#define DC_COM_PM1_CONTROL 0x31e
+#define DC_COM_PM1_DUTY_CYCLE 0x31f
+
+#define PM_PERIOD_SHIFT 18
+#define PM_CLK_DIVIDER_SHIFT 4
+
+#define DC_COM_SPI_CONTROL 0x320
+#define DC_COM_SPI_START_BYTE 0x321
+#define DC_COM_HSPI_WRITE_DATA_AB 0x322
+#define DC_COM_HSPI_WRITE_DATA_CD 0x323
+#define DC_COM_HSPI_CS_DC 0x324
+#define DC_COM_SCRATCH_REGISTER_A 0x325
+#define DC_COM_SCRATCH_REGISTER_B 0x326
+#define DC_COM_GPIO_CTRL 0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
+#define H_PULSE_0_ENABLE (1 << 8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+#define V_PULSE_0_ENABLE (1 << 16)
+#define V_PULSE_1_ENABLE (1 << 18)
+#define V_PULSE_2_ENABLE (1 << 19)
+#define V_PULSE_3_ENABLE (1 << 20)
+#define M0_ENABLE (1 << 24)
+#define M1_ENABLE (1 << 26)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
+#define DI_ENABLE (1 << 16)
+#define PP_ENABLE (1 << 18)
+
+#define DC_DISP_DISP_WIN_OPTIONS 0x402
+#define CURSOR_ENABLE (1 << 16)
+#define TVO_ENABLE (1 << 28)
+#define DSI_ENABLE (1 << 29)
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_MEM_HIGH_PRIORITY 0x403
+#define DC_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
+#define DC_DISP_DISP_TIMING_OPTIONS 0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC 0x406
+#define DC_DISP_SYNC_WIDTH 0x407
+#define DC_DISP_BACK_PORCH 0x408
+#define DC_DISP_DISP_ACTIVE 0x409
+#define DC_DISP_FRONT_PORCH 0x40a
+#define DC_DISP_H_PULSE0_CONTROL 0x40b
+#define DC_DISP_H_PULSE0_POSITION_A 0x40c
+#define DC_DISP_H_PULSE0_POSITION_B 0x40d
+#define DC_DISP_H_PULSE0_POSITION_C 0x40e
+#define DC_DISP_H_PULSE0_POSITION_D 0x40f
+#define DC_DISP_H_PULSE1_CONTROL 0x410
+#define DC_DISP_H_PULSE1_POSITION_A 0x411
+#define DC_DISP_H_PULSE1_POSITION_B 0x412
+#define DC_DISP_H_PULSE1_POSITION_C 0x413
+#define DC_DISP_H_PULSE1_POSITION_D 0x414
+#define DC_DISP_H_PULSE2_CONTROL 0x415
+#define DC_DISP_H_PULSE2_POSITION_A 0x416
+#define DC_DISP_H_PULSE2_POSITION_B 0x417
+#define DC_DISP_H_PULSE2_POSITION_C 0x418
+#define DC_DISP_H_PULSE2_POSITION_D 0x419
+#define DC_DISP_V_PULSE0_CONTROL 0x41a
+#define DC_DISP_V_PULSE0_POSITION_A 0x41b
+#define DC_DISP_V_PULSE0_POSITION_B 0x41c
+#define DC_DISP_V_PULSE0_POSITION_C 0x41d
+#define DC_DISP_V_PULSE1_CONTROL 0x41e
+#define DC_DISP_V_PULSE1_POSITION_A 0x41f
+#define DC_DISP_V_PULSE1_POSITION_B 0x420
+#define DC_DISP_V_PULSE1_POSITION_C 0x421
+#define DC_DISP_V_PULSE2_CONTROL 0x422
+#define DC_DISP_V_PULSE2_POSITION_A 0x423
+#define DC_DISP_V_PULSE3_CONTROL 0x424
+#define DC_DISP_V_PULSE3_POSITION_A 0x425
+#define DC_DISP_M0_CONTROL 0x426
+#define DC_DISP_M1_CONTROL 0x427
+#define DC_DISP_DI_CONTROL 0x428
+#define DC_DISP_PP_CONTROL 0x429
+#define DC_DISP_PP_SELECT_A 0x42a
+#define DC_DISP_PP_SELECT_B 0x42b
+#define DC_DISP_PP_SELECT_C 0x42c
+#define DC_DISP_PP_SELECT_D 0x42d
+
+#define PULSE_MODE_NORMAL (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH (0 << 4)
+#define PULSE_POLARITY_LOW (1 << 4)
+#define PULSE_QUAL_ALWAYS (0 << 6)
+#define PULSE_QUAL_VACTIVE (2 << 6)
+#define PULSE_QUAL_VACTIVE1 (3 << 6)
+#define PULSE_LAST_START_A (0 << 8)
+#define PULSE_LAST_END_A (1 << 8)
+#define PULSE_LAST_START_B (2 << 8)
+#define PULSE_LAST_END_B (3 << 8)
+#define PULSE_LAST_START_C (4 << 8)
+#define PULSE_LAST_END_C (5 << 8)
+#define PULSE_LAST_START_D (6 << 8)
+#define PULSE_LAST_END_D (7 << 8)
+
+#define PULSE_START(x) ((x) & 0xfff)
+#define PULSE_END(x) (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
+#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
+#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S (5 << 0)
+#define DISP_DATA_FORMAT_DF3S (6 << 0)
+#define DISP_DATA_FORMAT_DFSPI (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (8 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (9 << 0)
+#define DISP_DATA_ALIGNMENT_MSB (0 << 8)
+#define DISP_DATA_ALIGNMENT_LSB (1 << 8)
+#define DISP_DATA_ORDER_RED_BLUE (0 << 9)
+#define DISP_DATA_ORDER_BLUE_RED (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL 0x430
+#define BASE_COLOR_SIZE666 (0 << 0)
+#define BASE_COLOR_SIZE111 (1 << 0)
+#define BASE_COLOR_SIZE222 (2 << 0)
+#define BASE_COLOR_SIZE333 (3 << 0)
+#define BASE_COLOR_SIZE444 (4 << 0)
+#define BASE_COLOR_SIZE555 (5 << 0)
+#define BASE_COLOR_SIZE565 (6 << 0)
+#define BASE_COLOR_SIZE332 (7 << 0)
+#define BASE_COLOR_SIZE888 (8 << 0)
+
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
+#define DE_SELECT_ACTIVE_BLANK 0x0
+#define DE_SELECT_ACTIVE 0x1
+#define DE_SELECT_ACTIVE_IS 0x2
+#define DE_CONTROL_ONECLK (0 << 2)
+#define DE_CONTROL_NORMAL (1 << 2)
+#define DE_CONTROL_EARLY_EXT (2 << 2)
+#define DE_CONTROL_EARLY (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
+#define DC_DISP_LCD_SPI_OPTIONS 0x434
+#define DC_DISP_BORDER_COLOR 0x435
+#define DC_DISP_COLOR_KEY0_LOWER 0x436
+#define DC_DISP_COLOR_KEY0_UPPER 0x437
+#define DC_DISP_COLOR_KEY1_LOWER 0x438
+#define DC_DISP_COLOR_KEY1_UPPER 0x439
+
+#define DC_DISP_CURSOR_FOREGROUND 0x43c
+#define DC_DISP_CURSOR_BACKGROUND 0x43d
+#define CURSOR_COLOR(_r, _g, _b) ((_r) | ((_g) << 8) | ((_b) << 16))
+
+#define DC_DISP_CURSOR_START_ADDR 0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
+#define CURSOR_START_ADDR_MASK (((1 << 22) - 1) << 10)
+#define CURSOR_START_ADDR(_addr) ((_addr) >> 10)
+#define CURSOR_SIZE_64 (1 << 24)
+
+#define DC_DISP_CURSOR_POSITION 0x440
+#define CURSOR_POSITION(_x, _y) \
+ (((_x) & ((1 << 16) - 1)) | \
+ (((_y) & ((1 << 16) - 1)) << 16))
+
+#define DC_DISP_CURSOR_POSITION_NS 0x441
+#define DC_DISP_INIT_SEQ_CONTROL 0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
+#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
+#define DC_DISP_MCCIF_DISPLAY0C_HYST 0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
+#define DC_DISP_DAC_CRT_CTRL 0x4c0
+#define DC_DISP_DISP_MISC_CONTROL 0x4c1
+
+#define DC_WIN_COLOR_PALETTE(x) (0x500 + (x))
+
+#define DC_WIN_PALETTE_COLOR_EXT 0x600
+#define DC_WIN_H_FILTER_P(x) (0x601 + (x))
+#define DC_WIN_CSC_YOF 0x611
+#define DC_WIN_CSC_KYRGB 0x612
+#define DC_WIN_CSC_KUR 0x613
+#define DC_WIN_CSC_KVR 0x614
+#define DC_WIN_CSC_KUG 0x615
+#define DC_WIN_CSC_KVG 0x616
+#define DC_WIN_CSC_KUB 0x617
+#define DC_WIN_CSC_KVB 0x618
+#define DC_WIN_V_FILTER_P(x) (0x619 + (x))
+#define DC_WIN_WIN_OPTIONS 0x700
+#define H_DIRECTION_INCREMENT (0 << 0)
+#define H_DIRECTION_DECREMENT (1 << 0)
+#define V_DIRECTION_INCREMENT (0 << 2)
+#define V_DIRECTION_DECREMENT (1 << 2)
+#define COLOR_EXPAND (1 << 6)
+#define H_FILTER_ENABLE (1 << 8)
+#define V_FILTER_ENABLE (1 << 10)
+#define CP_ENABLE (1 << 16)
+#define CSC_ENABLE (1 << 18)
+#define DV_ENABLE (1 << 20)
+#define WIN_ENABLE (1 << 30)
+
+#define DC_WIN_BYTE_SWAP 0x701
+#define BYTE_SWAP_NOSWAP 0
+#define BYTE_SWAP_SWAP2 1
+#define BYTE_SWAP_SWAP4 2
+#define BYTE_SWAP_SWAP4HW 3
+
+#define DC_WIN_BUFFER_CONTROL 0x702
+#define BUFFER_CONTROL_HOST 0
+#define BUFFER_CONTROL_VI 1
+#define BUFFER_CONTROL_EPP 2
+#define BUFFER_CONTROL_MPEGE 3
+#define BUFFER_CONTROL_SB2D 4
+
+#define DC_WIN_COLOR_DEPTH 0x703
+
+#define DC_WIN_POSITION 0x704
+#define H_POSITION(x) (((x) & 0xfff) << 0)
+#define V_POSITION(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_SIZE 0x705
+#define H_SIZE(x) (((x) & 0xfff) << 0)
+#define V_SIZE(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE 0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x3fff) << 0)
+#define V_PRESCALED_SIZE(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA 0x707
+#define DC_WIN_V_INITIAL_DDA 0x708
+#define DC_WIN_DDA_INCREMENT 0x709
+#define H_DDA_INC(x) (((x) & 0xffff) << 0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE 0x70a
+#define LINE_STRIDE(x) (x)
+#define UV_LINE_STRIDE(x) (((x) & 0xffff) << 16)
+#define DC_WIN_BUF_STRIDE 0x70b
+#define DC_WIN_UV_BUF_STRIDE 0x70c
+#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16)
+#define DC_WIN_DV_CONTROL 0x70e
+#define DC_WIN_BLEND_NOKEY 0x70f
+#define DC_WIN_BLEND_1WIN 0x710
+#define DC_WIN_BLEND_2WIN_X 0x711
+#define DC_WIN_BLEND_2WIN_Y 0x712
+#define DC_WIN_BLEND_3WIN_XY 0x713
+#define CKEY_NOKEY (0 << 0)
+#define CKEY_KEY0 (1 << 0)
+#define CKEY_KEY1 (2 << 0)
+#define CKEY_KEY01 (3 << 0)
+#define BLEND_CONTROL_FIX (0 << 2)
+#define BLEND_CONTROL_ALPHA (1 << 2)
+#define BLEND_CONTROL_DEPENDANT (2 << 2)
+#define BLEND_CONTROL_PREMULT (3 << 2)
+#define BLEND_WEIGHT0(x) (((x) & 0xff) << 8)
+#define BLEND_WEIGHT1(x) (((x) & 0xff) << 16)
+#define BLEND(key, control, weight0, weight1) \
+ (CKEY_ ## key | BLEND_CONTROL_ ## control | \
+ BLEND_WEIGHT0(weight0) | BLEND_WEIGHT1(weight1))
+
+
+#define DC_WIN_HP_FETCH_CONTROL 0x714
+#define DC_WINBUF_START_ADDR 0x800
+#define DC_WINBUF_START_ADDR_NS 0x801
+#define DC_WINBUF_START_ADDR_U 0x802
+#define DC_WINBUF_START_ADDR_U_NS 0x803
+#define DC_WINBUF_START_ADDR_V 0x804
+#define DC_WINBUF_START_ADDR_V_NS 0x805
+#define DC_WINBUF_ADDR_H_OFFSET 0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
+#define DC_WINBUF_ADDR_V_OFFSET 0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
+#define DC_WINBUF_UFLOW_STATUS 0x80a
+
+/* direct versions of DC_WINBUF_UFLOW_STATUS */
+#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
+
+#define DC_DISP_SD_CONTROL 0x4c2
+#define SD_ENABLE_NORMAL (1 << 0)
+#define SD_ENABLE_ONESHOT (2 << 0)
+#define SD_USE_VID_LUMA (1 << 2)
+#define SD_BIN_WIDTH_ONE (0 << 3)
+#define SD_BIN_WIDTH_TWO (1 << 3)
+#define SD_BIN_WIDTH_FOUR (2 << 3)
+#define SD_BIN_WIDTH_EIGHT (3 << 3)
+#define SD_BIN_WIDTH_MASK (3 << 3)
+#define SD_AGGRESSIVENESS(x) (((x) & 0x7) << 5)
+#define SD_HW_UPDATE_DLY(x) (((x) & 0x3) << 8)
+#define SD_ONESHOT_ENABLE (1 << 10)
+#define SD_CORRECTION_MODE_AUTO (0 << 11)
+#define SD_CORRECTION_MODE_MAN (1 << 11)
+
+#define NUM_BIN_WIDTHS 4
+#define STEPS_PER_AGG_LVL 64
+#define STEPS_PER_AGG_CHG_LOG2 5
+#define STEPS_PER_AGG_CHG (1<<STEPS_PER_AGG_CHG_LOG2)
+#define ADJ_PHASE_STEP 8
+#define K_STEP 4
+
+#define DC_DISP_SD_CSC_COEFF 0x4c3
+#define SD_CSC_COEFF_R(x) (((x) & 0xf) << 4)
+#define SD_CSC_COEFF_G(x) (((x) & 0xf) << 12)
+#define SD_CSC_COEFF_B(x) (((x) & 0xf) << 20)
+
+#define DC_DISP_SD_LUT(i) (0x4c4 + i)
+#define DC_DISP_SD_LUT_NUM 9
+#define SD_LUT_R(x) (((x) & 0xff) << 0)
+#define SD_LUT_G(x) (((x) & 0xff) << 8)
+#define SD_LUT_B(x) (((x) & 0xff) << 16)
+
+#define DC_DISP_SD_FLICKER_CONTROL 0x4cd
+#define SD_FC_TIME_LIMIT(x) (((x) & 0xff) << 0)
+#define SD_FC_THRESHOLD(x) (((x) & 0xff) << 8)
+
+#define DC_DISP_SD_PIXEL_COUNT 0x4ce
+
+#define DC_DISP_SD_HISTOGRAM(i) (0x4cf + i)
+#define DC_DISP_SD_HISTOGRAM_NUM 8
+#define SD_HISTOGRAM_BIN_0(val) (((val) & (0xff << 0)) >> 0)
+#define SD_HISTOGRAM_BIN_1(val) (((val) & (0xff << 8)) >> 8)
+#define SD_HISTOGRAM_BIN_2(val) (((val) & (0xff << 16)) >> 16)
+#define SD_HISTOGRAM_BIN_3(val) (((val) & (0xff << 24)) >> 24)
+
+#define DC_DISP_SD_BL_PARAMETERS 0x4d7
+#define SD_BLP_TIME_CONSTANT(x) (((x) & 0x7ff) << 0)
+#define SD_BLP_STEP(x) (((x) & 0xff) << 16)
+
+#define DC_DISP_SD_BL_TF(i) (0x4d8 + i)
+#define DC_DISP_SD_BL_TF_NUM 4
+#define SD_BL_TF_POINT_0(x) (((x) & 0xff) << 0)
+#define SD_BL_TF_POINT_1(x) (((x) & 0xff) << 8)
+#define SD_BL_TF_POINT_2(x) (((x) & 0xff) << 16)
+#define SD_BL_TF_POINT_3(x) (((x) & 0xff) << 24)
+
+#define DC_DISP_SD_BL_CONTROL 0x4dc
+#define SD_BLC_MODE_MAN (0 << 0)
+#define SD_BLC_MODE_AUTO (1 << 1)
+#define SD_BLC_BRIGHTNESS(val) (((val) & (0xff << 8)) >> 8)
+
+#define DC_DISP_SD_HW_K_VALUES 0x4dd
+#define SD_HW_K_R(val) (((val) & (0x3ff << 0)) >> 0)
+#define SD_HW_K_G(val) (((val) & (0x3ff << 10)) >> 10)
+#define SD_HW_K_B(val) (((val) & (0x3ff << 20)) >> 20)
+
+#define DC_DISP_SD_MAN_K_VALUES 0x4de
+#define SD_MAN_K_R(x) (((x) & 0x3ff) << 0)
+#define SD_MAN_K_G(x) (((x) & 0x3ff) << 10)
+#define SD_MAN_K_B(x) (((x) & 0x3ff) << 20)
+
+#define NUM_AGG_PRI_LVLS 4
+#define SD_AGG_PRI_LVL(x) ((x) >> 3)
+#define SD_GET_AGG(x) ((x) & 0x7)
+
+#endif
diff --git a/drivers/video/tegra/dc/dc_sysfs.c b/drivers/video/tegra/dc/dc_sysfs.c
new file mode 100644
index 000000000000..6bb18382e6ee
--- /dev/null
+++ b/drivers/video/tegra/dc/dc_sysfs.c
@@ -0,0 +1,327 @@
+/*
+ * drivers/video/tegra/dc/dc_sysfs.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+
+#include <mach/dc.h>
+#include <mach/fb.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "nvsd.h"
+
+static ssize_t mode_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvhost_device *ndev = to_nvhost_device(device);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_mode *m;
+ ssize_t res;
+
+ mutex_lock(&dc->lock);
+ m = &dc->mode;
+ res = snprintf(buf, PAGE_SIZE,
+ "pclk: %d\n"
+ "h_ref_to_sync: %d\n"
+ "v_ref_to_sync: %d\n"
+ "h_sync_width: %d\n"
+ "v_sync_width: %d\n"
+ "h_back_porch: %d\n"
+ "v_back_porch: %d\n"
+ "h_active: %d\n"
+ "v_active: %d\n"
+ "h_front_porch: %d\n"
+ "v_front_porch: %d\n"
+ "stereo_mode: %d\n",
+ m->pclk, m->h_ref_to_sync, m->v_ref_to_sync,
+ m->h_sync_width, m->v_sync_width,
+ m->h_back_porch, m->v_back_porch,
+ m->h_active, m->v_active,
+ m->h_front_porch, m->v_front_porch,
+ m->stereo_mode);
+ mutex_unlock(&dc->lock);
+
+ return res;
+}
+
+static DEVICE_ATTR(mode, S_IRUGO, mode_show, NULL);
+
+static ssize_t stats_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ bool enabled;
+
+ if (mutex_lock_killable(&dc->lock))
+ return -EINTR;
+ enabled = tegra_dc_stats_get(dc);
+ mutex_unlock(&dc->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d", enabled);
+}
+
+static ssize_t stats_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ unsigned long val = 0;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ if (mutex_lock_killable(&dc->lock))
+ return -EINTR;
+ tegra_dc_stats_enable(dc, !!val);
+ mutex_unlock(&dc->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(stats_enable, S_IRUGO|S_IWUSR,
+ stats_enable_show, stats_enable_store);
+
+static ssize_t enable_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvhost_device *ndev = to_nvhost_device(device);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ ssize_t res;
+
+ mutex_lock(&dc->lock);
+ res = snprintf(buf, PAGE_SIZE, "%d\n", dc->enabled);
+ mutex_unlock(&dc->lock);
+ return res;
+}
+
+static ssize_t enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ unsigned long val = 0;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ tegra_dc_enable(dc);
+ } else {
+ tegra_dc_disable(dc);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO|S_IWUSR, enable_show, enable_store);
+
+static ssize_t crc_checksum_latched_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvhost_device *ndev = to_nvhost_device(device);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ u32 crc;
+
+ if (!dc->enabled) {
+ dev_err(&dc->ndev->dev, "Failed to get dc.\n");
+ return -EFAULT;
+ }
+
+ crc = tegra_dc_read_checksum_latched(dc);
+
+ return snprintf(buf, PAGE_SIZE, "%u", crc);
+}
+
+static ssize_t crc_checksum_latched_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ unsigned long val = 0;
+
+ if (!dc->enabled) {
+ dev_err(&dc->ndev->dev, "Failed to get dc.\n");
+ return -EFAULT;
+ }
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ if (val == 1) {
+ tegra_dc_enable_crc(dc);
+ dev_err(&dc->ndev->dev, "crc is enabled.\n");
+ } else if (val == 0) {
+ tegra_dc_disable_crc(dc);
+ dev_err(&dc->ndev->dev, "crc is disabled.\n");
+ } else
+ dev_err(&dc->ndev->dev, "Invalid input.\n");
+
+ return count;
+}
+static DEVICE_ATTR(crc_checksum_latched, S_IRUGO|S_IWUSR,
+ crc_checksum_latched_show, crc_checksum_latched_store);
+
+#define ORIENTATION_PORTRAIT "portrait"
+#define ORIENTATION_LANDSCAPE "landscape"
+
+static ssize_t orientation_3d_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_out *dc_out = dc->out;
+ const char *orientation;
+ switch (dc_out->stereo->orientation) {
+ case TEGRA_DC_STEREO_LANDSCAPE:
+ orientation = ORIENTATION_LANDSCAPE;
+ break;
+ case TEGRA_DC_STEREO_PORTRAIT:
+ orientation = ORIENTATION_PORTRAIT;
+ break;
+ default:
+ pr_err("Invalid value is stored for stereo_orientation.\n");
+ return -EINVAL;
+ }
+ return snprintf(buf, PAGE_SIZE, "%s\n", orientation);
+}
+
+static ssize_t orientation_3d_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t cnt)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_out *dc_out = dc->out;
+ struct tegra_stereo_out *stereo = dc_out->stereo;
+ int orientation;
+
+ if (0 == strncmp(buf, ORIENTATION_PORTRAIT,
+ min(cnt, ARRAY_SIZE(ORIENTATION_PORTRAIT) - 1))) {
+ orientation = TEGRA_DC_STEREO_PORTRAIT;
+ } else if (0 == strncmp(buf, ORIENTATION_LANDSCAPE,
+ min(cnt, ARRAY_SIZE(ORIENTATION_LANDSCAPE) - 1))) {
+ orientation = TEGRA_DC_STEREO_LANDSCAPE;
+ } else {
+ pr_err("Invalid property value for stereo_orientation.\n");
+ return -EINVAL;
+ }
+ stereo->orientation = orientation;
+ stereo->set_orientation(orientation);
+ return cnt;
+}
+
+static DEVICE_ATTR(stereo_orientation,
+ S_IRUGO|S_IWUSR, orientation_3d_show, orientation_3d_store);
+
+#define MODE_2D "2d"
+#define MODE_3D "3d"
+
+static ssize_t mode_3d_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_out *dc_out = dc->out;
+ const char *mode;
+ switch (dc_out->stereo->mode_2d_3d) {
+ case TEGRA_DC_STEREO_MODE_2D:
+ mode = MODE_2D;
+ break;
+ case TEGRA_DC_STEREO_MODE_3D:
+ mode = MODE_3D;
+ break;
+ default:
+ pr_err("Invalid value is stored for stereo_mode.\n");
+ return -EINVAL;
+ }
+ return snprintf(buf, PAGE_SIZE, "%s\n", mode);
+}
+
+static ssize_t mode_3d_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t cnt)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_out *dc_out = dc->out;
+ struct tegra_stereo_out *stereo = dc_out->stereo;
+ int mode;
+
+ if (0 == strncmp(buf, MODE_2D, min(cnt, ARRAY_SIZE(MODE_2D) - 1))) {
+ mode = TEGRA_DC_STEREO_MODE_2D;
+ } else if (0 == strncmp(buf, MODE_3D,
+ min(cnt, ARRAY_SIZE(MODE_3D) - 1))) {
+ mode = TEGRA_DC_STEREO_MODE_3D;
+ } else {
+ pr_err("Invalid property value for stereo_mode.\n");
+ return -EINVAL;
+ }
+ stereo->mode_2d_3d = mode;
+ stereo->set_mode(mode);
+ return cnt;
+}
+
+static DEVICE_ATTR(stereo_mode,
+ S_IRUGO|S_IWUSR, mode_3d_show, mode_3d_store);
+
+void __devexit tegra_dc_remove_sysfs(struct device *dev)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings;
+
+ device_remove_file(dev, &dev_attr_mode);
+ device_remove_file(dev, &dev_attr_enable);
+ device_remove_file(dev, &dev_attr_stats_enable);
+ device_remove_file(dev, &dev_attr_crc_checksum_latched);
+
+ if (dc->out->stereo) {
+ device_remove_file(dev, &dev_attr_stereo_orientation);
+ device_remove_file(dev, &dev_attr_stereo_mode);
+ }
+
+ if (sd_settings)
+ nvsd_remove_sysfs(dev);
+}
+
+void tegra_dc_create_sysfs(struct device *dev)
+{
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings;
+ int error = 0;
+
+ error |= device_create_file(dev, &dev_attr_mode);
+ error |= device_create_file(dev, &dev_attr_enable);
+ error |= device_create_file(dev, &dev_attr_stats_enable);
+ error |= device_create_file(dev, &dev_attr_crc_checksum_latched);
+
+ if (dc->out->stereo) {
+ error |= device_create_file(dev, &dev_attr_stereo_orientation);
+ error |= device_create_file(dev, &dev_attr_stereo_mode);
+ }
+
+ if (sd_settings)
+ error |= nvsd_create_sysfs(dev);
+
+ if (error)
+ dev_err(&ndev->dev, "Failed to create sysfs attributes!\n");
+}
diff --git a/drivers/video/tegra/dc/dsi.c b/drivers/video/tegra/dc/dsi.c
new file mode 100644
index 000000000000..5ee7671a79fa
--- /dev/null
+++ b/drivers/video/tegra/dc/dsi.c
@@ -0,0 +1,2642 @@
+/*
+ * drivers/video/tegra/dc/dsi.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/csi.h>
+#include <linux/nvhost.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "dsi_regs.h"
+#include "dsi.h"
+
+#define DSI_USE_SYNC_POINTS 1
+#define S_TO_MS(x) (1000 * (x))
+
+#define DSI_MODULE_NOT_INIT 0x0
+#define DSI_MODULE_INIT 0x1
+
+#define DSI_LPHS_NOT_INIT 0x0
+#define DSI_LPHS_IN_LP_MODE 0x1
+#define DSI_LPHS_IN_HS_MODE 0x2
+
+#define DSI_VIDEO_TYPE_NOT_INIT 0x0
+#define DSI_VIDEO_TYPE_VIDEO_MODE 0x1
+#define DSI_VIDEO_TYPE_CMD_MODE 0x2
+
+#define DSI_DRIVEN_MODE_NOT_INIT 0x0
+#define DSI_DRIVEN_MODE_DC 0x1
+#define DSI_DRIVEN_MODE_HOST 0x2
+
+#define DSI_PHYCLK_OUT_DIS 0x0
+#define DSI_PHYCLK_OUT_EN 0x1
+
+#define DSI_PHYCLK_NOT_INIT 0x0
+#define DSI_PHYCLK_CONTINUOUS 0x1
+#define DSI_PHYCLK_TX_ONLY 0x2
+
+#define DSI_CLK_BURST_NOT_INIT 0x0
+#define DSI_CLK_BURST_NONE_BURST 0x1
+#define DSI_CLK_BURST_BURST_MODE 0x2
+
+#define DSI_DC_STREAM_DISABLE 0x0
+#define DSI_DC_STREAM_ENABLE 0x1
+
+#define DSI_LP_OP_NOT_INIT 0x0
+#define DSI_LP_OP_WRITE 0x1
+#define DSI_LP_OP_READ 0x2
+
+static bool enable_read_debug;
+module_param(enable_read_debug, bool, 0644);
+MODULE_PARM_DESC(enable_read_debug,
+ "Enable to print read fifo and return packet type");
+
+struct dsi_status {
+ unsigned init:2;
+
+ unsigned lphs:2;
+
+ unsigned vtype:2;
+ unsigned driven:2;
+
+ unsigned clk_out:2;
+ unsigned clk_mode:2;
+ unsigned clk_burst:2;
+
+ unsigned lp_op:2;
+
+ unsigned dc_stream:1;
+};
+
+/* source of video data */
+enum {
+ TEGRA_DSI_DRIVEN_BY_DC,
+ TEGRA_DSI_DRIVEN_BY_HOST,
+};
+
+struct tegra_dc_dsi_data {
+ struct tegra_dc *dc;
+ void __iomem *base;
+ struct resource *base_res;
+
+ struct clk *dc_clk;
+ struct clk *dsi_clk;
+ bool clk_ref;
+
+ struct mutex lock;
+
+ /* data from board info */
+ struct tegra_dsi_out info;
+
+ struct dsi_status status;
+
+ u8 driven_mode;
+ u8 controller_index;
+
+ u8 pixel_scaler_mul;
+ u8 pixel_scaler_div;
+
+ u32 default_shift_clk_div;
+ u32 default_pixel_clk_khz;
+ u32 default_hs_clk_khz;
+
+ u32 shift_clk_div;
+ u32 target_hs_clk_khz;
+ u32 target_lp_clk_khz;
+
+ u32 syncpt_id;
+ u32 syncpt_val;
+
+ u16 current_bit_clk_ns;
+ u32 current_dsi_clk_khz;
+
+ u32 dsi_control_val;
+
+ bool ulpm;
+ bool enabled;
+};
+
+const u32 dsi_pkt_seq_reg[NUMOF_PKT_SEQ] = {
+ DSI_PKT_SEQ_0_LO,
+ DSI_PKT_SEQ_0_HI,
+ DSI_PKT_SEQ_1_LO,
+ DSI_PKT_SEQ_1_HI,
+ DSI_PKT_SEQ_2_LO,
+ DSI_PKT_SEQ_2_HI,
+ DSI_PKT_SEQ_3_LO,
+ DSI_PKT_SEQ_3_HI,
+ DSI_PKT_SEQ_4_LO,
+ DSI_PKT_SEQ_4_HI,
+ DSI_PKT_SEQ_5_LO,
+ DSI_PKT_SEQ_5_HI,
+};
+
+const u32 dsi_pkt_seq_video_non_burst_syne[NUMOF_PKT_SEQ] = {
+ PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_VE) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+ PKT_ID2(CMD_HE) | PKT_LEN2(0),
+ PKT_ID3(CMD_BLNK) | PKT_LEN3(2) | PKT_ID4(CMD_RGB) | PKT_LEN4(3) |
+ PKT_ID5(CMD_BLNK) | PKT_LEN5(4),
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+ PKT_ID2(CMD_HE) | PKT_LEN2(0),
+ PKT_ID3(CMD_BLNK) | PKT_LEN3(2) | PKT_ID4(CMD_RGB) | PKT_LEN4(3) |
+ PKT_ID5(CMD_BLNK) | PKT_LEN5(4),
+};
+
+const u32 dsi_pkt_seq_video_non_burst[NUMOF_PKT_SEQ] = {
+ PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2) |
+ PKT_ID2(CMD_RGB) | PKT_LEN2(3),
+ PKT_ID3(CMD_BLNK) | PKT_LEN3(4),
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2) |
+ PKT_ID2(CMD_RGB) | PKT_LEN2(3),
+ PKT_ID3(CMD_BLNK) | PKT_LEN3(4),
+};
+
+static const u32 dsi_pkt_seq_video_burst[NUMOF_PKT_SEQ] = {
+ PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)|
+ PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP,
+ PKT_ID0(CMD_EOT) | PKT_LEN0(7),
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)|
+ PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP,
+ PKT_ID0(CMD_EOT) | PKT_LEN0(7),
+};
+
+static const u32 dsi_pkt_seq_video_burst_no_eot[NUMOF_PKT_SEQ] = {
+ PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)|
+ PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP,
+ PKT_ID0(CMD_EOT) | PKT_LEN0(0),
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+ 0,
+ PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)|
+ PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP,
+ PKT_ID0(CMD_EOT) | PKT_LEN0(0),
+};
+
+/* TODO: verify with hw about this format */
+const u32 dsi_pkt_seq_cmd_mode[NUMOF_PKT_SEQ] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ PKT_ID0(CMD_LONGW) | PKT_LEN0(3) | PKT_ID1(CMD_EOT) | PKT_LEN1(7),
+ 0,
+ 0,
+ 0,
+ PKT_ID0(CMD_LONGW) | PKT_LEN0(3) | PKT_ID1(CMD_EOT) | PKT_LEN1(7),
+ 0,
+};
+
+const u32 init_reg[] = {
+ DSI_INT_ENABLE,
+ DSI_INT_STATUS,
+ DSI_INT_MASK,
+ DSI_INIT_SEQ_DATA_0,
+ DSI_INIT_SEQ_DATA_1,
+ DSI_INIT_SEQ_DATA_2,
+ DSI_INIT_SEQ_DATA_3,
+ DSI_INIT_SEQ_DATA_4,
+ DSI_INIT_SEQ_DATA_5,
+ DSI_INIT_SEQ_DATA_6,
+ DSI_INIT_SEQ_DATA_7,
+ DSI_DCS_CMDS,
+ DSI_PKT_SEQ_0_LO,
+ DSI_PKT_SEQ_1_LO,
+ DSI_PKT_SEQ_2_LO,
+ DSI_PKT_SEQ_3_LO,
+ DSI_PKT_SEQ_4_LO,
+ DSI_PKT_SEQ_5_LO,
+ DSI_PKT_SEQ_0_HI,
+ DSI_PKT_SEQ_1_HI,
+ DSI_PKT_SEQ_2_HI,
+ DSI_PKT_SEQ_3_HI,
+ DSI_PKT_SEQ_4_HI,
+ DSI_PKT_SEQ_5_HI,
+ DSI_CONTROL,
+ DSI_HOST_DSI_CONTROL,
+ DSI_PAD_CONTROL,
+ DSI_PAD_CONTROL_CD,
+ DSI_SOL_DELAY,
+ DSI_MAX_THRESHOLD,
+ DSI_TRIGGER,
+ DSI_TX_CRC,
+ DSI_INIT_SEQ_CONTROL,
+ DSI_PKT_LEN_0_1,
+ DSI_PKT_LEN_2_3,
+ DSI_PKT_LEN_4_5,
+ DSI_PKT_LEN_6_7,
+};
+
+inline unsigned long tegra_dsi_readl(struct tegra_dc_dsi_data *dsi, u32 reg)
+{
+ return readl(dsi->base + reg * 4);
+}
+EXPORT_SYMBOL(tegra_dsi_readl);
+
+inline void tegra_dsi_writel(struct tegra_dc_dsi_data *dsi, u32 val, u32 reg)
+{
+ writel(val, dsi->base + reg * 4);
+}
+EXPORT_SYMBOL(tegra_dsi_writel);
+
+static int tegra_dsi_syncpt(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+ int ret;
+
+ ret = 0;
+
+ dsi->syncpt_val = nvhost_syncpt_read(
+ &dsi->dc->ndev->host->syncpt, dsi->syncpt_id);
+
+ val = DSI_INCR_SYNCPT_COND(OP_DONE) |
+ DSI_INCR_SYNCPT_INDX(dsi->syncpt_id);
+ tegra_dsi_writel(dsi, val, DSI_INCR_SYNCPT);
+
+ /* TODO: Use interrupt rather than polling */
+ ret = nvhost_syncpt_wait(&dsi->dc->ndev->host->syncpt,
+ dsi->syncpt_id, dsi->syncpt_val + 1);
+ if (ret < 0) {
+ dev_err(&dsi->dc->ndev->dev, "DSI sync point failure\n");
+ goto fail;
+ }
+
+ (dsi->syncpt_val)++;
+ return 0;
+fail:
+ return ret;
+}
+
+static u32 tegra_dsi_get_hs_clk_rate(struct tegra_dc_dsi_data *dsi)
+{
+ u32 dsi_clock_rate_khz;
+
+ switch (dsi->info.video_burst_mode) {
+ case TEGRA_DSI_VIDEO_BURST_MODE_LOW_SPEED:
+ case TEGRA_DSI_VIDEO_BURST_MODE_MEDIUM_SPEED:
+ case TEGRA_DSI_VIDEO_BURST_MODE_FAST_SPEED:
+ case TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED:
+ /* Calculate DSI HS clock rate for DSI burst mode */
+ dsi_clock_rate_khz = dsi->default_pixel_clk_khz *
+ dsi->shift_clk_div;
+ break;
+ case TEGRA_DSI_VIDEO_NONE_BURST_MODE:
+ case TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END:
+ case TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED:
+ default:
+ /* Clock rate is default DSI clock rate for non-burst mode */
+ dsi_clock_rate_khz = dsi->default_hs_clk_khz;
+ break;
+ }
+
+ return dsi_clock_rate_khz;
+}
+
+static u32 tegra_dsi_get_lp_clk_rate(struct tegra_dc_dsi_data *dsi, u8 lp_op)
+{
+ u32 dsi_clock_rate_khz;
+
+ if (dsi->info.enable_hs_clock_on_lp_cmd_mode)
+ if (dsi->info.hs_clk_in_lp_cmd_mode_freq_khz)
+ dsi_clock_rate_khz =
+ dsi->info.hs_clk_in_lp_cmd_mode_freq_khz;
+ else
+ dsi_clock_rate_khz = tegra_dsi_get_hs_clk_rate(dsi);
+ else
+ if (lp_op == DSI_LP_OP_READ)
+ dsi_clock_rate_khz =
+ dsi->info.lp_read_cmd_mode_freq_khz;
+ else
+ dsi_clock_rate_khz =
+ dsi->info.lp_cmd_mode_freq_khz;
+
+ return dsi_clock_rate_khz;
+}
+
+static u32 tegra_dsi_get_shift_clk_div(struct tegra_dc_dsi_data *dsi)
+{
+ u32 shift_clk_div;
+ u32 max_shift_clk_div;
+ u32 burst_width;
+ u32 burst_width_max;
+
+ /* Get the real value of default shift_clk_div. default_shift_clk_div
+ * holds the real value of shift_clk_div.
+ */
+ shift_clk_div = dsi->default_shift_clk_div;
+
+ /* Calculate shift_clk_div which can matche the video_burst_mode. */
+ if (dsi->info.video_burst_mode >=
+ TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED) {
+ /* The max_shift_clk_div is multiplied by 10 to save the
+ * fraction
+ */
+ if (dsi->info.max_panel_freq_khz >= dsi->default_hs_clk_khz)
+ max_shift_clk_div = dsi->info.max_panel_freq_khz
+ * shift_clk_div * 10 / dsi->default_hs_clk_khz;
+ else
+ max_shift_clk_div = shift_clk_div * 10;
+
+ burst_width = dsi->info.video_burst_mode
+ - TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED;
+ burst_width_max = TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED
+ - TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED;
+
+ shift_clk_div = (max_shift_clk_div - shift_clk_div * 10) *
+ burst_width / (burst_width_max * 10) + shift_clk_div;
+ }
+
+ return shift_clk_div;
+}
+
+static void tegra_dsi_init_sw(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ u32 h_width_pixels;
+ u32 v_width_lines;
+ u32 pixel_clk_hz;
+ u32 byte_clk_hz;
+ u32 plld_clk_mhz;
+
+ switch (dsi->info.pixel_format) {
+ case TEGRA_DSI_PIXEL_FORMAT_16BIT_P:
+ /* 2 bytes per pixel */
+ dsi->pixel_scaler_mul = 2;
+ dsi->pixel_scaler_div = 1;
+ break;
+ case TEGRA_DSI_PIXEL_FORMAT_18BIT_P:
+ /* 2.25 bytes per pixel */
+ dsi->pixel_scaler_mul = 9;
+ dsi->pixel_scaler_div = 4;
+ break;
+ case TEGRA_DSI_PIXEL_FORMAT_18BIT_NP:
+ case TEGRA_DSI_PIXEL_FORMAT_24BIT_P:
+ /* 3 bytes per pixel */
+ dsi->pixel_scaler_mul = 3;
+ dsi->pixel_scaler_div = 1;
+ break;
+ default:
+ break;
+ }
+
+ dsi->controller_index = dc->ndev->id;
+ dsi->ulpm = false;
+ dsi->enabled = false;
+ dsi->clk_ref = false;
+
+ dsi->dsi_control_val =
+ DSI_CONTROL_VIRTUAL_CHANNEL(dsi->info.virtual_channel) |
+ DSI_CONTROL_NUM_DATA_LANES(dsi->info.n_data_lanes - 1) |
+ DSI_CONTROL_VID_SOURCE(dsi->controller_index) |
+ DSI_CONTROL_DATA_FORMAT(dsi->info.pixel_format);
+
+ /* Below we are going to calculate dsi and dc clock rate.
+ * Calcuate the horizontal and vertical width.
+ */
+ h_width_pixels = dc->mode.h_back_porch + dc->mode.h_front_porch +
+ dc->mode.h_sync_width + dc->mode.h_active;
+ v_width_lines = dc->mode.v_back_porch + dc->mode.v_front_porch +
+ dc->mode.v_sync_width + dc->mode.v_active;
+
+ /* Calculate minimum required pixel rate. */
+ pixel_clk_hz = h_width_pixels * v_width_lines * dsi->info.refresh_rate;
+
+ dc->pixel_clk = pixel_clk_hz;
+
+ /* Calculate minimum byte rate on DSI interface. */
+ byte_clk_hz = (pixel_clk_hz * dsi->pixel_scaler_mul) /
+ (dsi->pixel_scaler_div * dsi->info.n_data_lanes);
+
+ /* Round up to multiple of mega hz. */
+ plld_clk_mhz = DIV_ROUND_UP((byte_clk_hz * NUMOF_BIT_PER_BYTE),
+ 1000000);
+
+ /* Calculate default real shift_clk_div. */
+ dsi->default_shift_clk_div = (NUMOF_BIT_PER_BYTE / 2) *
+ dsi->pixel_scaler_mul / (dsi->pixel_scaler_div *
+ dsi->info.n_data_lanes);
+ /* Calculate default DSI hs clock. DSI interface is double data rate.
+ * Data is transferred on both rising and falling edge of clk, div by 2
+ * to get the actual clock rate.
+ */
+ dsi->default_hs_clk_khz = plld_clk_mhz * 1000 / 2;
+ dsi->default_pixel_clk_khz = plld_clk_mhz * 1000 / 2
+ / dsi->default_shift_clk_div;
+
+ /* Get the actual shift_clk_div and clock rates. */
+ dsi->shift_clk_div = tegra_dsi_get_shift_clk_div(dsi);
+ dsi->target_lp_clk_khz =
+ tegra_dsi_get_lp_clk_rate(dsi, DSI_LP_OP_WRITE);
+ dsi->target_hs_clk_khz = tegra_dsi_get_hs_clk_rate(dsi);
+
+ dev_info(&dc->ndev->dev, "DSI: HS clock rate is %d\n",
+ dsi->target_hs_clk_khz);
+
+ dsi->controller_index = dc->ndev->id;
+
+#if DSI_USE_SYNC_POINTS
+ dsi->syncpt_id = NVSYNCPT_DSI;
+#endif
+
+ /*
+ * Force video clock to be continuous mode if
+ * enable_hs_clock_on_lp_cmd_mode is set
+ */
+ if (dsi->info.enable_hs_clock_on_lp_cmd_mode) {
+ if (dsi->info.video_clock_mode !=
+ TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS)
+ dev_warn(&dc->ndev->dev,
+ "Force clock continuous mode\n");
+
+ dsi->info.video_clock_mode = TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS;
+ }
+
+}
+
+static void tegra_dsi_get_phy_timing(struct tegra_dc_dsi_data *dsi,
+ struct dsi_phy_timing_inclk *phy_timing_clk,
+ u32 clk_ns)
+{
+
+ phy_timing_clk->t_hsdexit = dsi->info.phy_timing.t_hsdexit_ns ?
+ (dsi->info.phy_timing.t_hsdexit_ns / clk_ns) :
+ (T_HSEXIT_DEFAULT(clk_ns));
+
+ phy_timing_clk->t_hstrail = dsi->info.phy_timing.t_hstrail_ns ?
+ (dsi->info.phy_timing.t_hstrail_ns / clk_ns) :
+ (T_HSTRAIL_DEFAULT(clk_ns));
+
+ phy_timing_clk->t_datzero = dsi->info.phy_timing.t_datzero_ns ?
+ (dsi->info.phy_timing.t_datzero_ns / clk_ns) :
+ (T_DATZERO_DEFAULT(clk_ns));
+
+ phy_timing_clk->t_hsprepr = dsi->info.phy_timing.t_hsprepr_ns ?
+ (dsi->info.phy_timing.t_hsprepr_ns / clk_ns) :
+ (T_HSPREPR_DEFAULT(clk_ns));
+
+ phy_timing_clk->t_clktrail = dsi->info.phy_timing.t_clktrail_ns ?
+ (dsi->info.phy_timing.t_clktrail_ns / clk_ns) :
+ (T_CLKTRAIL_DEFAULT(clk_ns));
+
+ phy_timing_clk->t_clkpost = dsi->info.phy_timing.t_clkpost_ns ?
+ (dsi->info.phy_timing.t_clkpost_ns / clk_ns) :
+ (T_CLKPOST_DEFAULT(clk_ns));
+
+ phy_timing_clk->t_clkzero = dsi->info.phy_timing.t_clkzero_ns ?
+ (dsi->info.phy_timing.t_clkzero_ns / clk_ns) :
+ (T_CLKZERO_DEFAULT(clk_ns));
+
+ phy_timing_clk->t_tlpx = dsi->info.phy_timing.t_tlpx_ns ?
+ (dsi->info.phy_timing.t_tlpx_ns / clk_ns) :
+ (T_TLPX_DEFAULT(clk_ns));
+
+ phy_timing_clk->t_clkpre = T_CLKPRE_DEFAULT(clk_ns);
+ phy_timing_clk->t_clkprepare = T_CLKPREPARE_DEFAULT(clk_ns);
+ phy_timing_clk->t_wakeup = T_WAKEUP_DEFAULT(clk_ns);
+
+ phy_timing_clk->t_taget = 5 * phy_timing_clk->t_tlpx;
+ phy_timing_clk->t_tasure = 2 * phy_timing_clk->t_tlpx;
+ phy_timing_clk->t_tago = 4 * phy_timing_clk->t_tlpx;
+}
+
+static void tegra_dsi_set_phy_timing(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+ struct dsi_phy_timing_inclk phy_timing;
+
+ tegra_dsi_get_phy_timing(dsi, &phy_timing, dsi->current_bit_clk_ns);
+
+ val = DSI_PHY_TIMING_0_THSDEXIT(phy_timing.t_hsdexit) |
+ DSI_PHY_TIMING_0_THSTRAIL(phy_timing.t_hstrail) |
+ DSI_PHY_TIMING_0_TDATZERO(phy_timing.t_datzero) |
+ DSI_PHY_TIMING_0_THSPREPR(phy_timing.t_hsprepr);
+ tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_0);
+
+ val = DSI_PHY_TIMING_1_TCLKTRAIL(phy_timing.t_clktrail) |
+ DSI_PHY_TIMING_1_TCLKPOST(phy_timing.t_clkpost) |
+ DSI_PHY_TIMING_1_TCLKZERO(phy_timing.t_clkzero) |
+ DSI_PHY_TIMING_1_TTLPX(phy_timing.t_tlpx);
+ tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_1);
+
+ val = DSI_PHY_TIMING_2_TCLKPREPARE(phy_timing.t_clkprepare) |
+ DSI_PHY_TIMING_2_TCLKPRE(phy_timing.t_clkpre) |
+ DSI_PHY_TIMING_2_TWAKEUP(phy_timing.t_wakeup);
+ tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_2);
+
+ val = DSI_BTA_TIMING_TTAGET(phy_timing.t_taget) |
+ DSI_BTA_TIMING_TTASURE(phy_timing.t_tasure) |
+ DSI_BTA_TIMING_TTAGO(phy_timing.t_tago);
+ tegra_dsi_writel(dsi, val, DSI_BTA_TIMING);
+}
+
+static u32 tegra_dsi_sol_delay_burst(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ u32 dsi_to_pixel_clk_ratio;
+ u32 temp;
+ u32 temp1;
+ u32 mipi_clk_adj_kHz;
+ u32 sol_delay;
+ struct tegra_dc_mode *dc_modes = &dc->mode;
+
+ /* Get Fdsi/Fpixel ration (note: Fdsi is in bit format) */
+ dsi_to_pixel_clk_ratio = (dsi->current_dsi_clk_khz * 2 +
+ dsi->default_pixel_clk_khz - 1) / dsi->default_pixel_clk_khz;
+
+ /* Convert Fdsi to byte format */
+ dsi_to_pixel_clk_ratio *= 1000/8;
+
+ /* Multiplying by 1000 so that we don't loose the fraction part */
+ temp = dc_modes->h_active * 1000;
+ temp1 = dc_modes->h_active + dc_modes->h_back_porch +
+ dc_modes->h_sync_width;
+
+ sol_delay = temp1 * dsi_to_pixel_clk_ratio -
+ temp * dsi->pixel_scaler_mul /
+ (dsi->pixel_scaler_div * dsi->info.n_data_lanes);
+
+ /* Do rounding on sol delay */
+ sol_delay = (sol_delay + 1000 - 1)/1000;
+
+ /* TODO:
+ * 1. find out the correct sol fifo depth to use
+ * 2. verify with hw about the clamping function
+ */
+ if (sol_delay > (480 * 4)) {
+ sol_delay = (480 * 4);
+ mipi_clk_adj_kHz = sol_delay +
+ (dc_modes->h_active * dsi->pixel_scaler_mul) /
+ (dsi->info.n_data_lanes * dsi->pixel_scaler_div);
+
+ mipi_clk_adj_kHz *= (dsi->default_pixel_clk_khz / temp1);
+
+ mipi_clk_adj_kHz *= 4;
+ }
+
+ dsi->target_hs_clk_khz = mipi_clk_adj_kHz;
+
+ return sol_delay;
+}
+
+static void tegra_dsi_set_sol_delay(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ u32 sol_delay;
+
+ if (dsi->info.video_burst_mode == TEGRA_DSI_VIDEO_NONE_BURST_MODE ||
+ dsi->info.video_burst_mode ==
+ TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END) {
+ sol_delay = NUMOF_BIT_PER_BYTE * dsi->pixel_scaler_mul /
+ (dsi->pixel_scaler_div * dsi->info.n_data_lanes);
+ dsi->status.clk_burst = DSI_CLK_BURST_NONE_BURST;
+ } else {
+ sol_delay = tegra_dsi_sol_delay_burst(dc, dsi);
+ dsi->status.clk_burst = DSI_CLK_BURST_BURST_MODE;
+ }
+
+ tegra_dsi_writel(dsi, DSI_SOL_DELAY_SOL_DELAY(sol_delay),
+ DSI_SOL_DELAY);
+}
+
+static void tegra_dsi_set_timeout(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+ u32 bytes_per_frame;
+ u32 timeout = 0;
+
+ /* TODO: verify the following equation */
+ bytes_per_frame = dsi->current_dsi_clk_khz * 1000 * 2 /
+ (dsi->info.refresh_rate * 8);
+ timeout = bytes_per_frame / DSI_CYCLE_COUNTER_VALUE;
+ timeout = (timeout + DSI_HTX_TO_MARGIN) & 0xffff;
+
+ val = DSI_TIMEOUT_0_LRXH_TO(DSI_LRXH_TO_VALUE) |
+ DSI_TIMEOUT_0_HTX_TO(timeout);
+ tegra_dsi_writel(dsi, val, DSI_TIMEOUT_0);
+
+ if (dsi->info.panel_reset_timeout_msec)
+ timeout = (dsi->info.panel_reset_timeout_msec * 1000*1000)
+ / dsi->current_bit_clk_ns;
+ else
+ timeout = DSI_PR_TO_VALUE;
+
+ val = DSI_TIMEOUT_1_PR_TO(timeout) |
+ DSI_TIMEOUT_1_TA_TO(DSI_TA_TO_VALUE);
+ tegra_dsi_writel(dsi, val, DSI_TIMEOUT_1);
+
+ val = DSI_TO_TALLY_P_RESET_STATUS(IN_RESET) |
+ DSI_TO_TALLY_TA_TALLY(DSI_TA_TALLY_VALUE)|
+ DSI_TO_TALLY_LRXH_TALLY(DSI_LRXH_TALLY_VALUE)|
+ DSI_TO_TALLY_HTX_TALLY(DSI_HTX_TALLY_VALUE);
+ tegra_dsi_writel(dsi, val, DSI_TO_TALLY);
+}
+
+static void tegra_dsi_setup_video_mode_pkt_length(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+ u32 hact_pkt_len;
+ u32 hsa_pkt_len;
+ u32 hbp_pkt_len;
+ u32 hfp_pkt_len;
+
+ hact_pkt_len = dc->mode.h_active * dsi->pixel_scaler_mul /
+ dsi->pixel_scaler_div;
+ hsa_pkt_len = dc->mode.h_sync_width * dsi->pixel_scaler_mul /
+ dsi->pixel_scaler_div;
+ hbp_pkt_len = dc->mode.h_back_porch * dsi->pixel_scaler_mul /
+ dsi->pixel_scaler_div;
+ hfp_pkt_len = dc->mode.h_front_porch * dsi->pixel_scaler_mul /
+ dsi->pixel_scaler_div;
+
+ if (dsi->info.video_burst_mode !=
+ TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END)
+ hbp_pkt_len += hsa_pkt_len;
+
+ hsa_pkt_len -= DSI_HSYNC_BLNK_PKT_OVERHEAD;
+ hbp_pkt_len -= DSI_HBACK_PORCH_PKT_OVERHEAD;
+ hfp_pkt_len -= DSI_HFRONT_PORCH_PKT_OVERHEAD;
+
+ val = DSI_PKT_LEN_0_1_LENGTH_0(0) |
+ DSI_PKT_LEN_0_1_LENGTH_1(hsa_pkt_len);
+ tegra_dsi_writel(dsi, val, DSI_PKT_LEN_0_1);
+
+ val = DSI_PKT_LEN_2_3_LENGTH_2(hbp_pkt_len) |
+ DSI_PKT_LEN_2_3_LENGTH_3(hact_pkt_len);
+ tegra_dsi_writel(dsi, val, DSI_PKT_LEN_2_3);
+
+ val = DSI_PKT_LEN_4_5_LENGTH_4(hfp_pkt_len) |
+ DSI_PKT_LEN_4_5_LENGTH_5(0);
+ tegra_dsi_writel(dsi, val, DSI_PKT_LEN_4_5);
+
+ val = DSI_PKT_LEN_6_7_LENGTH_6(0) | DSI_PKT_LEN_6_7_LENGTH_7(0);
+ tegra_dsi_writel(dsi, val, DSI_PKT_LEN_6_7);
+}
+
+static void tegra_dsi_setup_cmd_mode_pkt_length(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ unsigned long val;
+ unsigned long act_bytes;
+
+ act_bytes = dc->mode.h_active * dsi->pixel_scaler_mul /
+ dsi->pixel_scaler_div + 1;
+
+ val = DSI_PKT_LEN_0_1_LENGTH_0(0) | DSI_PKT_LEN_0_1_LENGTH_1(0);
+ tegra_dsi_writel(dsi, val, DSI_PKT_LEN_0_1);
+
+ val = DSI_PKT_LEN_2_3_LENGTH_2(0) | DSI_PKT_LEN_2_3_LENGTH_3(act_bytes);
+ tegra_dsi_writel(dsi, val, DSI_PKT_LEN_2_3);
+
+ val = DSI_PKT_LEN_4_5_LENGTH_4(0) | DSI_PKT_LEN_4_5_LENGTH_5(act_bytes);
+ tegra_dsi_writel(dsi, val, DSI_PKT_LEN_4_5);
+
+ val = DSI_PKT_LEN_6_7_LENGTH_6(0) | DSI_PKT_LEN_6_7_LENGTH_7(0x0f0f);
+ tegra_dsi_writel(dsi, val, DSI_PKT_LEN_6_7);
+}
+
+static void tegra_dsi_set_pkt_length(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_HOST)
+ return;
+
+ if (dsi->info.video_data_type == TEGRA_DSI_VIDEO_TYPE_VIDEO_MODE)
+ tegra_dsi_setup_video_mode_pkt_length(dc, dsi);
+ else
+ tegra_dsi_setup_cmd_mode_pkt_length(dc, dsi);
+}
+
+static void tegra_dsi_set_pkt_seq(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ const u32 *pkt_seq;
+ u32 rgb_info;
+ u32 pkt_seq_3_5_rgb_lo;
+ u32 pkt_seq_3_5_rgb_hi;
+ u32 val;
+ u32 reg;
+ u8 i;
+
+ if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_HOST)
+ return;
+
+ switch (dsi->info.pixel_format) {
+ case TEGRA_DSI_PIXEL_FORMAT_16BIT_P:
+ rgb_info = CMD_RGB_16BPP;
+ break;
+ case TEGRA_DSI_PIXEL_FORMAT_18BIT_P:
+ rgb_info = CMD_RGB_18BPP;
+ break;
+ case TEGRA_DSI_PIXEL_FORMAT_18BIT_NP:
+ rgb_info = CMD_RGB_18BPPNP;
+ break;
+ case TEGRA_DSI_PIXEL_FORMAT_24BIT_P:
+ default:
+ rgb_info = CMD_RGB_24BPP;
+ break;
+ }
+
+ pkt_seq_3_5_rgb_lo = 0;
+ pkt_seq_3_5_rgb_hi = 0;
+ if (dsi->info.video_data_type == TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE)
+ pkt_seq = dsi_pkt_seq_cmd_mode;
+ else {
+ switch (dsi->info.video_burst_mode) {
+ case TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED:
+ case TEGRA_DSI_VIDEO_BURST_MODE_LOW_SPEED:
+ case TEGRA_DSI_VIDEO_BURST_MODE_MEDIUM_SPEED:
+ case TEGRA_DSI_VIDEO_BURST_MODE_FAST_SPEED:
+ case TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED:
+ pkt_seq_3_5_rgb_lo =
+ DSI_PKT_SEQ_3_LO_PKT_32_ID(rgb_info);
+ if (!dsi->info.no_pkt_seq_eot)
+ pkt_seq = dsi_pkt_seq_video_burst;
+ else
+ pkt_seq = dsi_pkt_seq_video_burst_no_eot;
+ break;
+ case TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END:
+ pkt_seq_3_5_rgb_hi =
+ DSI_PKT_SEQ_3_HI_PKT_34_ID(rgb_info);
+ pkt_seq = dsi_pkt_seq_video_non_burst_syne;
+ break;
+ case TEGRA_DSI_VIDEO_NONE_BURST_MODE:
+ default:
+ pkt_seq_3_5_rgb_lo =
+ DSI_PKT_SEQ_3_LO_PKT_32_ID(rgb_info);
+ pkt_seq = dsi_pkt_seq_video_non_burst;
+ break;
+ }
+ }
+
+ for (i = 0; i < NUMOF_PKT_SEQ; i++) {
+ val = pkt_seq[i];
+ reg = dsi_pkt_seq_reg[i];
+ if ((reg == DSI_PKT_SEQ_3_LO) || (reg == DSI_PKT_SEQ_5_LO))
+ val |= pkt_seq_3_5_rgb_lo;
+ if ((reg == DSI_PKT_SEQ_3_HI) || (reg == DSI_PKT_SEQ_5_HI))
+ val |= pkt_seq_3_5_rgb_hi;
+ tegra_dsi_writel(dsi, val, reg);
+ }
+}
+
+static void tegra_dsi_stop_dc_stream(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, 0, DC_DISP_DISP_WIN_OPTIONS);
+ tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ , DC_CMD_STATE_CONTROL);
+
+ dsi->status.dc_stream = DSI_DC_STREAM_DISABLE;
+}
+
+static void tegra_dsi_stop_dc_stream_at_frame_end(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ int val;
+ long timeout;
+ u32 frame_period = DIV_ROUND_UP(S_TO_MS(1), dsi->info.refresh_rate);
+
+ /* stop dc */
+ tegra_dsi_stop_dc_stream(dc, dsi);
+
+ /* enable frame end interrupt */
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ val |= FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+
+ /* wait for frame_end completion.
+ * timeout is 2 frame duration to accomodate for
+ * internal delay.
+ */
+ timeout = wait_for_completion_interruptible_timeout(
+ &dc->frame_end_complete,
+ msecs_to_jiffies(2 * frame_period));
+
+ /* disable frame end interrupt */
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ val &= ~FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+
+ if (timeout == 0)
+ dev_warn(&dc->ndev->dev,
+ "DC doesn't stop at end of frame.\n");
+}
+
+static void tegra_dsi_start_dc_stream(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+
+ tegra_dc_writel(dc, DSI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+ /* TODO: clean up */
+ tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+ DC_CMD_DISPLAY_POWER_CONTROL);
+
+ /* Configure one-shot mode or continuous mode */
+ if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) {
+ /* disable LSPI/LCD_DE output */
+ val = PIN_OUTPUT_LSPI_OUTPUT_DIS;
+ tegra_dc_writel(dc, val, DC_COM_PIN_OUTPUT_ENABLE3);
+
+ /* enable MSF & set MSF polarity */
+ val = MSF_ENABLE | MSF_LSPI;
+ if (!dsi->info.te_polarity_low)
+ val |= MSF_POLARITY_HIGH;
+ else
+ val |= MSF_POLARITY_LOW;
+ tegra_dc_writel(dc, val, DC_CMD_DISPLAY_COMMAND_OPTION0);
+
+ /* set non-continuous mode */
+ tegra_dc_writel(dc, DISP_CTRL_MODE_NC_DISPLAY,
+ DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ | NC_HOST_TRIG,
+ DC_CMD_STATE_CONTROL);
+ } else {
+ /* set continuous mode */
+ tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY,
+ DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ }
+
+ dsi->status.dc_stream = DSI_DC_STREAM_ENABLE;
+}
+
+static void tegra_dsi_set_dc_clk(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ u32 shift_clk_div_register;
+ u32 val;
+
+ /* Get the corresponding register value of shift_clk_div. */
+ shift_clk_div_register = dsi->shift_clk_div * 2 - 2;
+
+#ifndef CONFIG_TEGRA_SILICON_PLATFORM
+ shift_clk_div_register = 1;
+#endif
+
+ /* TODO: find out if PCD3 option is required */
+ val = PIXEL_CLK_DIVIDER_PCD1 |
+ SHIFT_CLK_DIVIDER(shift_clk_div_register);
+ tegra_dc_writel(dc, val, DC_DISP_DISP_CLOCK_CONTROL);
+}
+
+static void tegra_dsi_set_dsi_clk(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi, u32 clk)
+{
+ u32 rm;
+
+ rm = clk % 1000;
+ if (rm != 0)
+ clk -= rm;
+
+ dc->mode.pclk = clk*1000;
+ tegra_dc_setup_clk(dc, dsi->dsi_clk);
+ if (dsi->clk_ref == true)
+ clk_disable(dsi->dsi_clk);
+ else
+ dsi->clk_ref = true;
+ clk_enable(dsi->dsi_clk);
+ tegra_periph_reset_deassert(dsi->dsi_clk);
+
+ dsi->current_dsi_clk_khz = clk_get_rate(dsi->dsi_clk) / 1000;
+
+ dsi->current_bit_clk_ns = 1000*1000 / (dsi->current_dsi_clk_khz * 2);
+}
+
+static void tegra_dsi_hs_clk_out_enable(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+
+ val = tegra_dsi_readl(dsi, DSI_CONTROL);
+ val &= ~DSI_CONTROL_HS_CLK_CTRL(1);
+
+ if (dsi->info.video_clock_mode == TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS) {
+ val |= DSI_CONTROL_HS_CLK_CTRL(CONTINUOUS);
+ dsi->status.clk_mode = DSI_PHYCLK_CONTINUOUS;
+ } else {
+ val |= DSI_CONTROL_HS_CLK_CTRL(TX_ONLY);
+ dsi->status.clk_mode = DSI_PHYCLK_TX_ONLY;
+ }
+ tegra_dsi_writel(dsi, val, DSI_CONTROL);
+
+ val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+ val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1);
+ val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_HIGH);
+ tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+ dsi->status.clk_out = DSI_PHYCLK_OUT_EN;
+}
+
+static void tegra_dsi_hs_clk_out_enable_in_lp(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+ tegra_dsi_hs_clk_out_enable(dsi);
+
+ val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+ val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1);
+ val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW);
+ tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+}
+
+static void tegra_dsi_hs_clk_out_disable(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+
+ if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+ tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+ val = tegra_dsi_readl(dsi, DSI_CONTROL);
+ val &= ~DSI_CONTROL_HS_CLK_CTRL(1);
+ val |= DSI_CONTROL_HS_CLK_CTRL(TX_ONLY);
+ tegra_dsi_writel(dsi, val, DSI_CONTROL);
+
+ /* TODO: issue a cmd */
+
+ val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+ val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1);
+ val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW);
+ tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+ dsi->status.clk_mode = DSI_PHYCLK_NOT_INIT;
+ dsi->status.clk_out = DSI_PHYCLK_OUT_DIS;
+}
+
+static void tegra_dsi_set_control_reg_lp(struct tegra_dc_dsi_data *dsi)
+{
+ u32 dsi_control;
+ u32 host_dsi_control;
+ u32 max_threshold;
+
+ dsi_control = dsi->dsi_control_val | DSI_CTRL_HOST_DRIVEN;
+ host_dsi_control = HOST_DSI_CTRL_COMMON |
+ HOST_DSI_CTRL_HOST_DRIVEN |
+ DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW);
+ max_threshold = DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_HOST_FIFO_DEPTH);
+
+ tegra_dsi_writel(dsi, max_threshold, DSI_MAX_THRESHOLD);
+ tegra_dsi_writel(dsi, dsi_control, DSI_CONTROL);
+ tegra_dsi_writel(dsi, host_dsi_control, DSI_HOST_DSI_CONTROL);
+
+ dsi->status.driven = DSI_DRIVEN_MODE_HOST;
+ dsi->status.clk_burst = DSI_CLK_BURST_NOT_INIT;
+ dsi->status.vtype = DSI_VIDEO_TYPE_NOT_INIT;
+}
+
+static void tegra_dsi_set_control_reg_hs(struct tegra_dc_dsi_data *dsi)
+{
+ u32 dsi_control;
+ u32 host_dsi_control;
+ u32 max_threshold;
+ u32 dcs_cmd;
+
+ dsi_control = dsi->dsi_control_val;
+ host_dsi_control = HOST_DSI_CTRL_COMMON;
+ max_threshold = 0;
+ dcs_cmd = 0;
+
+ if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_HOST) {
+ dsi_control |= DSI_CTRL_HOST_DRIVEN;
+ host_dsi_control |= HOST_DSI_CTRL_HOST_DRIVEN;
+ max_threshold =
+ DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_HOST_FIFO_DEPTH);
+ dsi->status.driven = DSI_DRIVEN_MODE_HOST;
+ } else {
+ dsi_control |= DSI_CTRL_DC_DRIVEN;
+ host_dsi_control |= HOST_DSI_CTRL_DC_DRIVEN;
+ max_threshold =
+ DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_VIDEO_FIFO_DEPTH);
+ dsi->status.driven = DSI_DRIVEN_MODE_DC;
+ }
+
+ if (dsi->info.video_data_type == TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE) {
+ dsi_control |= DSI_CTRL_CMD_MODE;
+ dcs_cmd = DSI_DCS_CMDS_LT5_DCS_CMD(DSI_WRITE_MEMORY_START)|
+ DSI_DCS_CMDS_LT3_DCS_CMD(DSI_WRITE_MEMORY_CONTINUE);
+ dsi->status.vtype = DSI_VIDEO_TYPE_CMD_MODE;
+
+ } else {
+ dsi_control |= DSI_CTRL_VIDEO_MODE;
+ dsi->status.vtype = DSI_VIDEO_TYPE_VIDEO_MODE;
+ }
+
+ tegra_dsi_writel(dsi, max_threshold, DSI_MAX_THRESHOLD);
+ tegra_dsi_writel(dsi, dcs_cmd, DSI_DCS_CMDS);
+ tegra_dsi_writel(dsi, dsi_control, DSI_CONTROL);
+ tegra_dsi_writel(dsi, host_dsi_control, DSI_HOST_DSI_CONTROL);
+}
+
+static void tegra_dsi_pad_calibration(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+
+ val = DSI_PAD_CONTROL_PAD_LPUPADJ(0x1) |
+ DSI_PAD_CONTROL_PAD_LPDNADJ(0x1) |
+ DSI_PAD_CONTROL_PAD_PREEMP_EN(0x1) |
+ DSI_PAD_CONTROL_PAD_SLEWDNADJ(0x6) |
+ DSI_PAD_CONTROL_PAD_SLEWUPADJ(0x6);
+ if (!dsi->ulpm) {
+ val |= DSI_PAD_CONTROL_PAD_PDIO(0) |
+ DSI_PAD_CONTROL_PAD_PDIO_CLK(0) |
+ DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_DISABLE);
+ } else {
+ val |= DSI_PAD_CONTROL_PAD_PDIO(0x3) |
+ DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) |
+ DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_ENABLE);
+ }
+ tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL);
+
+ val = MIPI_CAL_TERMOSA(0x4);
+ tegra_vi_csi_writel(val, CSI_CILA_MIPI_CAL_CONFIG_0);
+
+ val = MIPI_CAL_TERMOSB(0x4);
+ tegra_vi_csi_writel(val, CSI_CILB_MIPI_CAL_CONFIG_0);
+
+ val = MIPI_CAL_HSPUOSD(0x3) | MIPI_CAL_HSPDOSD(0x4);
+ tegra_vi_csi_writel(val, CSI_DSI_MIPI_CAL_CONFIG);
+
+ val = PAD_DRIV_DN_REF(0x5) | PAD_DRIV_UP_REF(0x7);
+ tegra_vi_csi_writel(val, CSI_MIPIBIAS_PAD_CONFIG);
+}
+
+static int tegra_dsi_init_hw(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ u32 i;
+
+ tegra_dsi_writel(dsi,
+ DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE),
+ DSI_POWER_CONTROL);
+ /* stabilization delay */
+ udelay(300);
+
+ tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_lp_clk_khz);
+ if (dsi->info.dsi_instance) {
+ /* TODO:Set the misc register*/
+ }
+
+ /* TODO: only need to change the timing for bta */
+ tegra_dsi_set_phy_timing(dsi);
+
+ if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+ tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+ /* Initializing DSI registers */
+ for (i = 0; i < ARRAY_SIZE(init_reg); i++)
+ tegra_dsi_writel(dsi, 0, init_reg[i]);
+
+ tegra_dsi_writel(dsi, dsi->dsi_control_val, DSI_CONTROL);
+
+ tegra_dsi_pad_calibration(dsi);
+
+ tegra_dsi_writel(dsi,
+ DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_ENABLE),
+ DSI_POWER_CONTROL);
+ /* stabilization delay */
+ udelay(300);
+
+ dsi->status.init = DSI_MODULE_INIT;
+ dsi->status.lphs = DSI_LPHS_NOT_INIT;
+ dsi->status.vtype = DSI_VIDEO_TYPE_NOT_INIT;
+ dsi->status.driven = DSI_DRIVEN_MODE_NOT_INIT;
+ dsi->status.clk_out = DSI_PHYCLK_OUT_DIS;
+ dsi->status.clk_mode = DSI_PHYCLK_NOT_INIT;
+ dsi->status.clk_burst = DSI_CLK_BURST_NOT_INIT;
+ dsi->status.dc_stream = DSI_DC_STREAM_DISABLE;
+ dsi->status.lp_op = DSI_LP_OP_NOT_INIT;
+
+ return 0;
+}
+
+static int tegra_dsi_set_to_lp_mode(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi, u8 lp_op)
+{
+ int err;
+
+ if (dsi->status.init != DSI_MODULE_INIT) {
+ err = -EPERM;
+ goto fail;
+ }
+
+ if (dsi->status.lphs == DSI_LPHS_IN_LP_MODE &&
+ dsi->status.lp_op == lp_op)
+ goto success;
+
+ if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+ tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+ /* disable/enable hs clk according to enable_hs_clock_on_lp_cmd_mode */
+ if ((dsi->status.clk_out == DSI_PHYCLK_OUT_EN) &&
+ (!dsi->info.enable_hs_clock_on_lp_cmd_mode))
+ tegra_dsi_hs_clk_out_disable(dc, dsi);
+
+ dsi->target_lp_clk_khz = tegra_dsi_get_lp_clk_rate(dsi, lp_op);
+ if (dsi->current_dsi_clk_khz != dsi->target_lp_clk_khz) {
+ tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_lp_clk_khz);
+ tegra_dsi_set_timeout(dsi);
+ }
+
+ tegra_dsi_set_control_reg_lp(dsi);
+
+ if ((dsi->status.clk_out == DSI_PHYCLK_OUT_DIS) &&
+ (dsi->info.enable_hs_clock_on_lp_cmd_mode))
+ tegra_dsi_hs_clk_out_enable_in_lp(dsi);
+
+ dsi->status.lphs = DSI_LPHS_IN_LP_MODE;
+ dsi->status.lp_op = lp_op;
+success:
+ err = 0;
+fail:
+ return err;
+}
+
+static int tegra_dsi_set_to_hs_mode(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ int err;
+
+ if (dsi->status.init != DSI_MODULE_INIT) {
+ err = -EPERM;
+ goto fail;
+ }
+
+ if (dsi->status.lphs == DSI_LPHS_IN_HS_MODE)
+ goto success;
+
+ if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+ tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+ if ((dsi->status.clk_out == DSI_PHYCLK_OUT_EN) &&
+ (!dsi->info.enable_hs_clock_on_lp_cmd_mode))
+ tegra_dsi_hs_clk_out_disable(dc, dsi);
+
+ if (dsi->current_dsi_clk_khz != dsi->target_hs_clk_khz) {
+ tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_hs_clk_khz);
+ tegra_dsi_set_timeout(dsi);
+ }
+
+ tegra_dsi_set_phy_timing(dsi);
+
+ if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_DC) {
+ tegra_dsi_set_pkt_seq(dc, dsi);
+ tegra_dsi_set_pkt_length(dc, dsi);
+ tegra_dsi_set_sol_delay(dc, dsi);
+ tegra_dsi_set_dc_clk(dc, dsi);
+ }
+
+ tegra_dsi_set_control_reg_hs(dsi);
+
+ if (dsi->status.clk_out == DSI_PHYCLK_OUT_DIS ||
+ dsi->info.enable_hs_clock_on_lp_cmd_mode)
+ tegra_dsi_hs_clk_out_enable(dsi);
+
+ dsi->status.lphs = DSI_LPHS_IN_HS_MODE;
+success:
+ dsi->status.lp_op = DSI_LP_OP_NOT_INIT;
+ err = 0;
+fail:
+ return err;
+}
+
+static bool tegra_dsi_write_busy(struct tegra_dc_dsi_data *dsi)
+{
+ u32 timeout = 0;
+ bool retVal = true;
+
+ while (timeout <= DSI_MAX_COMMAND_DELAY_USEC) {
+ if (!(DSI_TRIGGER_HOST_TRIGGER(0x1) &
+ tegra_dsi_readl(dsi, DSI_TRIGGER))) {
+ retVal = false;
+ break;
+ }
+ udelay(DSI_COMMAND_DELAY_STEPS_USEC);
+ timeout += DSI_COMMAND_DELAY_STEPS_USEC;
+ }
+
+ return retVal;
+}
+
+static bool tegra_dsi_read_busy(struct tegra_dc_dsi_data *dsi)
+{
+ u32 timeout = 0;
+ bool retVal = true;
+
+ while (timeout < DSI_STATUS_POLLING_DURATION_USEC) {
+ if (!(DSI_HOST_DSI_CONTROL_IMM_BTA(0x1) &
+ tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL))) {
+ retVal = false;
+ break;
+ }
+ udelay(DSI_STATUS_POLLING_DELAY_USEC);
+ timeout += DSI_STATUS_POLLING_DELAY_USEC;
+ }
+
+ return retVal;
+}
+
+static bool tegra_dsi_host_busy(struct tegra_dc_dsi_data *dsi)
+{
+ int err = 0;
+
+ if (tegra_dsi_write_busy(dsi)) {
+ err = -EBUSY;
+ dev_err(&dsi->dc->ndev->dev,
+ "DSI trigger bit already set\n");
+ goto fail;
+ }
+
+ if (tegra_dsi_read_busy(dsi)) {
+ err = -EBUSY;
+ dev_err(&dsi->dc->ndev->dev,
+ "DSI immediate bta bit already set\n");
+ goto fail;
+ }
+fail:
+ return err;
+}
+
+static void tegra_dsi_reset_underflow_overflow
+ (struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+
+ val = tegra_dsi_readl(dsi, DSI_STATUS);
+ val &= (DSI_STATUS_LB_OVERFLOW(0x1) | DSI_STATUS_LB_UNDERFLOW(0x1));
+ if (val) {
+ dev_warn(&dsi->dc->ndev->dev, "Reset overflow/underflow\n");
+ val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+ val |= DSI_HOST_CONTROL_FIFO_STAT_RESET(0x1);
+ tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+ ndelay(200);
+ }
+}
+
+static void tegra_dsi_soft_reset(struct tegra_dc_dsi_data *dsi)
+{
+ tegra_dsi_writel(dsi,
+ DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE),
+ DSI_POWER_CONTROL);
+ /* stabilization delay */
+ udelay(300);
+
+ tegra_dsi_writel(dsi,
+ DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_ENABLE),
+ DSI_POWER_CONTROL);
+ /* stabilization delay */
+ udelay(300);
+}
+
+static void tegra_dsi_reset_read_count(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+
+ val = tegra_dsi_readl(dsi, DSI_STATUS);
+ val &= DSI_STATUS_RD_FIFO_COUNT(0x1f);
+ if (val) {
+ dev_warn(&dsi->dc->ndev->dev,
+ "DSI read count not zero, resetting\n");
+ tegra_dsi_soft_reset(dsi);
+ }
+}
+
+static struct dsi_status *tegra_dsi_save_state_switch_to_host_cmd_mode(
+ struct tegra_dc_dsi_data *dsi,
+ struct tegra_dc *dc,
+ u8 lp_op)
+{
+ struct dsi_status *init_status;
+ int err;
+
+ init_status = kzalloc(sizeof(*init_status), GFP_KERNEL);
+ if (!init_status)
+ return ERR_PTR(-ENOMEM);
+
+ *init_status = dsi->status;
+
+ if (dsi->status.lphs == DSI_LPHS_IN_HS_MODE) {
+ if (dsi->status.driven == DSI_DRIVEN_MODE_DC) {
+ if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+ tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+ dsi->driven_mode = TEGRA_DSI_DRIVEN_BY_HOST;
+ if (dsi->info.hs_cmd_mode_supported) {
+ err = tegra_dsi_set_to_hs_mode(dc, dsi);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "Switch to HS host mode failed\n");
+ goto fail;
+ }
+ }
+ }
+ if (!dsi->info.hs_cmd_mode_supported) {
+ err =
+ tegra_dsi_set_to_lp_mode(dc, dsi, lp_op);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to go to LP mode\n");
+ goto fail;
+ }
+ }
+ } else if (dsi->status.lphs == DSI_LPHS_IN_LP_MODE) {
+ if (dsi->status.lp_op != lp_op) {
+ err = tegra_dsi_set_to_lp_mode(dc, dsi, lp_op);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to go to LP mode\n");
+ goto fail;
+ }
+ }
+ }
+
+ return init_status;
+fail:
+ kfree(init_status);
+ return ERR_PTR(err);
+}
+
+static struct dsi_status *tegra_dsi_prepare_host_transmission(
+ struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi,
+ u8 lp_op)
+{
+ int err = 0;
+ struct dsi_status *init_status;
+
+ if (dsi->status.init != DSI_MODULE_INIT ||
+ dsi->ulpm) {
+ err = -EPERM;
+ goto fail;
+ }
+
+ if (tegra_dsi_host_busy(dsi)) {
+ err = -EBUSY;
+ dev_err(&dc->ndev->dev, "DSI host busy\n");
+ goto fail;
+ }
+
+ tegra_dsi_reset_underflow_overflow(dsi);
+
+ if (lp_op == DSI_LP_OP_READ)
+ tegra_dsi_reset_read_count(dsi);
+
+ if (dsi->status.lphs == DSI_LPHS_NOT_INIT) {
+ err = tegra_dsi_set_to_lp_mode(dc, dsi, lp_op);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "Failed to config LP write\n");
+ goto fail;
+ }
+ }
+
+ init_status = tegra_dsi_save_state_switch_to_host_cmd_mode
+ (dsi, dc, lp_op);
+ if (IS_ERR_OR_NULL(init_status)) {
+ err = PTR_ERR(init_status);
+ dev_err(&dc->ndev->dev, "DSI state saving failed\n");
+ goto fail;
+ }
+
+ return init_status;
+fail:
+ return ERR_PTR(err);
+}
+
+static int tegra_dsi_restore_state(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi,
+ struct dsi_status *init_status)
+{
+ bool switch_back_to_dc_mode = false;
+ bool switch_back_to_hs_mode = false;
+ bool restart_dc_stream;
+ int err = 0;
+
+ switch_back_to_dc_mode = (dsi->status.driven ==
+ DSI_DRIVEN_MODE_HOST &&
+ init_status->driven ==
+ DSI_DRIVEN_MODE_DC);
+ switch_back_to_hs_mode = (dsi->status.lphs ==
+ DSI_LPHS_IN_LP_MODE &&
+ init_status->lphs ==
+ DSI_LPHS_IN_HS_MODE);
+ restart_dc_stream = (dsi->status.dc_stream ==
+ DSI_DC_STREAM_DISABLE &&
+ init_status->dc_stream ==
+ DSI_DC_STREAM_ENABLE);
+
+ if (dsi->status.lphs == DSI_LPHS_IN_LP_MODE &&
+ init_status->lphs == DSI_LPHS_IN_LP_MODE) {
+ if (dsi->status.lp_op != init_status->lp_op) {
+ err =
+ tegra_dsi_set_to_lp_mode(dc, dsi, init_status->lp_op);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "Failed to config LP mode\n");
+ goto fail;
+ }
+ }
+ goto success;
+ }
+
+ if (switch_back_to_dc_mode)
+ dsi->driven_mode = TEGRA_DSI_DRIVEN_BY_DC;
+ if (switch_back_to_dc_mode || switch_back_to_hs_mode) {
+ err = tegra_dsi_set_to_hs_mode(dc, dsi);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "Failed to config HS mode\n");
+ goto fail;
+ }
+ }
+ if (restart_dc_stream)
+ tegra_dsi_start_dc_stream(dc, dsi);
+
+success:
+fail:
+ kfree(init_status);
+ return err;
+}
+
+static int tegra_dsi_host_trigger(struct tegra_dc_dsi_data *dsi)
+{
+ int status = 0;
+
+ if (tegra_dsi_readl(dsi, DSI_TRIGGER)) {
+ status = -EBUSY;
+ goto fail;
+ }
+
+ tegra_dsi_writel(dsi,
+ DSI_TRIGGER_HOST_TRIGGER(TEGRA_DSI_ENABLE), DSI_TRIGGER);
+
+#if DSI_USE_SYNC_POINTS
+ status = tegra_dsi_syncpt(dsi);
+ if (status < 0) {
+ dev_err(&dsi->dc->ndev->dev,
+ "DSI syncpt for host trigger failed\n");
+ goto fail;
+ }
+#else
+ if (tegra_dsi_write_busy(dsi)) {
+ status = -EBUSY;
+ dev_err(&dsi->dc->ndev->dev,
+ "Timeout waiting on write completion\n");
+ }
+#endif
+
+fail:
+ return status;
+}
+
+static int _tegra_dsi_write_data(struct tegra_dc_dsi_data *dsi,
+ u8 *pdata, u8 data_id, u16 data_len)
+{
+ u8 virtual_channel;
+ u8 *pval;
+ u32 val;
+ int err;
+
+ err = 0;
+
+ virtual_channel = dsi->info.virtual_channel <<
+ DSI_VIR_CHANNEL_BIT_POSITION;
+
+ /* always use hw for ecc */
+ val = (virtual_channel | data_id) << 0 |
+ data_len << 8;
+ tegra_dsi_writel(dsi, val, DSI_WR_DATA);
+
+ /* if pdata != NULL, pkt type is long pkt */
+ if (pdata != NULL) {
+ while (data_len) {
+ if (data_len >= 4) {
+ val = ((u32 *) pdata)[0];
+ data_len -= 4;
+ pdata += 4;
+ } else {
+ val = 0;
+ pval = (u8 *) &val;
+ do
+ *pval++ = *pdata++;
+ while (--data_len);
+ }
+ tegra_dsi_writel(dsi, val, DSI_WR_DATA);
+ }
+ }
+
+ err = tegra_dsi_host_trigger(dsi);
+ if (err < 0)
+ dev_err(&dsi->dc->ndev->dev, "DSI host trigger failed\n");
+
+ return err;
+}
+
+int tegra_dsi_write_data(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi,
+ u8 *pdata, u8 data_id, u16 data_len)
+{
+ int err = 0;
+ struct dsi_status *init_status;
+
+ tegra_dc_io_start(dc);
+
+ init_status = tegra_dsi_prepare_host_transmission(
+ dc, dsi, DSI_LP_OP_WRITE);
+ if (IS_ERR_OR_NULL(init_status)) {
+ err = PTR_ERR(init_status);
+ dev_err(&dc->ndev->dev, "DSI host config failed\n");
+ goto fail;
+ }
+
+ err = _tegra_dsi_write_data(dsi, pdata, data_id, data_len);
+fail:
+ err = tegra_dsi_restore_state(dc, dsi, init_status);
+ if (err < 0)
+ dev_err(&dc->ndev->dev, "Failed to restore prev state\n");
+ tegra_dc_io_end(dc);
+ return err;
+}
+EXPORT_SYMBOL(tegra_dsi_write_data);
+
+static int tegra_dsi_send_panel_cmd(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi,
+ struct tegra_dsi_cmd *cmd,
+ u32 n_cmd)
+{
+ u32 i;
+ int err;
+
+ err = 0;
+ for (i = 0; i < n_cmd; i++) {
+ struct tegra_dsi_cmd *cur_cmd;
+ cur_cmd = &cmd[i];
+
+ if (cur_cmd->cmd_type == TEGRA_DSI_DELAY_MS)
+ mdelay(cur_cmd->sp_len_dly.delay_ms);
+ else {
+ err = tegra_dsi_write_data(dc, dsi,
+ cur_cmd->pdata,
+ cur_cmd->data_id,
+ cur_cmd->sp_len_dly.data_len);
+ if (err < 0)
+ break;
+ }
+ }
+ return err;
+}
+
+static u8 get_8bit_ecc(u32 header)
+{
+ char ecc_parity[24] = {
+ 0x07, 0x0b, 0x0d, 0x0e, 0x13, 0x15, 0x16, 0x19,
+ 0x1a, 0x1c, 0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c,
+ 0x31, 0x32, 0x34, 0x38, 0x1f, 0x2f, 0x37, 0x3b
+ };
+ u8 ecc_byte;
+ int i;
+
+ ecc_byte = 0;
+ for (i = 0; i < 24; i++)
+ ecc_byte ^= ((header >> i) & 1) ? ecc_parity[i] : 0x00;
+
+ return ecc_byte;
+}
+
+/* This function is written to send DCS short write (1 parameter) only.
+ * This means the cmd will contain only 1 byte of index and 1 byte of value.
+ * The data type ID is fixed at 0x15 and the ECC is calculated based on the
+ * data in pdata.
+ * The command will be sent by hardware every frame.
+ * pdata should contain both the index + value for each cmd.
+ * data_len will be the total number of bytes in pdata.
+ */
+int tegra_dsi_send_panel_short_cmd(struct tegra_dc *dc, u8 *pdata, u8 data_len)
+{
+ u8 ecc8bits = 0, data_len_orig = 0;
+ u32 val = 0, pkthdr = 0;
+ int err = 0, count = 0;
+ struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+
+ data_len_orig = data_len;
+ if (pdata != NULL) {
+ while (data_len) {
+ if (data_len >= 2) {
+ pkthdr = (CMD_SHORTW |
+ (((u16 *)pdata)[0]) << 8 | 0x00 << 24);
+ ecc8bits = get_8bit_ecc(pkthdr);
+ val = (pkthdr | (ecc8bits << 24));
+ data_len -= 2;
+ pdata += 2;
+ count++;
+ }
+ switch (count) {
+ case 1:
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_0);
+ break;
+ case 2:
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_1);
+ break;
+ case 3:
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_2);
+ break;
+ case 4:
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_3);
+ break;
+ case 5:
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_4);
+ break;
+ case 6:
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_5);
+ break;
+ case 7:
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_6);
+ break;
+ case 8:
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_7);
+ break;
+ default:
+ err = 1;
+ break;
+ }
+ }
+ }
+
+ val = DSI_INIT_SEQ_CONTROL_DSI_FRAME_INIT_BYTE_COUNT(data_len_orig * 2)
+ | DSI_INIT_SEQ_CONTROL_DSI_SEND_INIT_SEQUENCE(1);
+ tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_CONTROL);
+
+ return err;
+}
+EXPORT_SYMBOL(tegra_dsi_send_panel_short_cmd);
+
+static int tegra_dsi_bta(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+ u32 poll_time;
+ int err;
+
+ poll_time = 0;
+ err = 0;
+
+ val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+ val |= DSI_HOST_DSI_CONTROL_IMM_BTA(TEGRA_DSI_ENABLE);
+ tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+#if DSI_USE_SYNC_POINTS
+ /* FIXME: Workaround for nvhost_syncpt_read */
+ dsi->syncpt_val = nvhost_syncpt_update_min(
+ &dsi->dc->ndev->host->syncpt, dsi->syncpt_id);
+
+ val = DSI_INCR_SYNCPT_COND(OP_DONE) |
+ DSI_INCR_SYNCPT_INDX(dsi->syncpt_id);
+ tegra_dsi_writel(dsi, val, DSI_INCR_SYNCPT);
+
+ /* TODO: Use interrupt rather than polling */
+ err = nvhost_syncpt_wait(&dsi->dc->ndev->host->syncpt,
+ dsi->syncpt_id, dsi->syncpt_val + 1);
+ if (err < 0)
+ dev_err(&dsi->dc->ndev->dev,
+ "DSI sync point failure\n");
+ else
+ (dsi->syncpt_val)++;
+#else
+ if (tegra_dsi_read_busy(dsi)) {
+ err = -EBUSY;
+ dev_err(&dsi->dc->ndev->dev,
+ "Timeout wating on read completion\n");
+ }
+#endif
+
+ return err;
+}
+
+static int tegra_dsi_parse_read_response(struct tegra_dc *dc,
+ u32 rd_fifo_cnt, u8 *read_fifo)
+{
+ int err;
+ u32 payload_size;
+
+ payload_size = 0;
+ err = 0;
+
+ switch (read_fifo[0]) {
+ case DSI_ESCAPE_CMD:
+ dev_info(&dc->ndev->dev, "escape cmd[0x%x]\n", read_fifo[0]);
+ break;
+ case DSI_ACK_NO_ERR:
+ dev_info(&dc->ndev->dev,
+ "Panel ack, no err[0x%x]\n", read_fifo[0]);
+ return err;
+ default:
+ dev_info(&dc->ndev->dev, "Invalid read response\n");
+ break;
+ }
+
+ switch (read_fifo[4] & 0xff) {
+ case GEN_LONG_RD_RES:
+ /* Fall through */
+ case DCS_LONG_RD_RES:
+ payload_size = (read_fifo[5] |
+ (read_fifo[6] << 8)) & 0xFFFF;
+ dev_info(&dc->ndev->dev, "Long read response Packet\n"
+ "payload_size[0x%x]\n", payload_size);
+ break;
+ case GEN_1_BYTE_SHORT_RD_RES:
+ /* Fall through */
+ case DCS_1_BYTE_SHORT_RD_RES:
+ payload_size = 1;
+ dev_info(&dc->ndev->dev, "Short read response Packet\n"
+ "payload_size[0x%x]\n", payload_size);
+ break;
+ case GEN_2_BYTE_SHORT_RD_RES:
+ /* Fall through */
+ case DCS_2_BYTE_SHORT_RD_RES:
+ payload_size = 2;
+ dev_info(&dc->ndev->dev, "Short read response Packet\n"
+ "payload_size[0x%x]\n", payload_size);
+ break;
+ case ACK_ERR_RES:
+ payload_size = 2;
+ dev_info(&dc->ndev->dev, "Acknowledge error report response\n"
+ "Packet payload_size[0x%x]\n", payload_size);
+ break;
+ default:
+ dev_info(&dc->ndev->dev, "Invalid response packet\n");
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int tegra_dsi_read_fifo(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi,
+ u8 *read_fifo)
+{
+ u32 val;
+ u32 i;
+ u32 poll_time = 0;
+ u32 rd_fifo_cnt;
+ int err = 0;
+ u8 *read_fifo_cp = read_fifo;
+
+ while (poll_time < DSI_DELAY_FOR_READ_FIFO) {
+ mdelay(1);
+ val = tegra_dsi_readl(dsi, DSI_STATUS);
+ rd_fifo_cnt = val & DSI_STATUS_RD_FIFO_COUNT(0x1f);
+ if (rd_fifo_cnt << 2 > DSI_READ_FIFO_DEPTH)
+ dev_err(&dc->ndev->dev,
+ "DSI RD_FIFO_CNT is greater than RD_FIFO_DEPTH\n");
+ break;
+ poll_time++;
+ }
+
+ if (rd_fifo_cnt == 0) {
+ dev_info(&dc->ndev->dev,
+ "DSI RD_FIFO_CNT is zero\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (val & (DSI_STATUS_LB_UNDERFLOW(0x1) |
+ DSI_STATUS_LB_OVERFLOW(0x1))) {
+ dev_warn(&dc->ndev->dev,
+ "DSI overflow/underflow error\n");
+ }
+
+ /* Read data from FIFO */
+ for (i = 0; i < rd_fifo_cnt; i++) {
+ val = tegra_dsi_readl(dsi, DSI_RD_DATA);
+ if (enable_read_debug)
+ dev_info(&dc->ndev->dev,
+ "Read data[%d]: 0x%x\n", i, val);
+ memcpy(read_fifo, &val, 4);
+ read_fifo += 4;
+ }
+
+ /* Make sure all the data is read from the FIFO */
+ val = tegra_dsi_readl(dsi, DSI_STATUS);
+ val &= DSI_STATUS_RD_FIFO_COUNT(0x1f);
+ if (val)
+ dev_err(&dc->ndev->dev, "DSI FIFO_RD_CNT not zero"
+ " even after reading FIFO_RD_CNT words from read fifo\n");
+
+ if (enable_read_debug) {
+ err =
+ tegra_dsi_parse_read_response(dc, rd_fifo_cnt, read_fifo_cp);
+ if (err < 0)
+ dev_warn(&dc->ndev->dev, "Unexpected read data\n");
+ }
+fail:
+ return err;
+}
+
+int tegra_dsi_read_data(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi,
+ u32 max_ret_payload_size,
+ u32 panel_reg_addr, u8 *read_data)
+{
+ int err = 0;
+ struct dsi_status *init_status;
+
+ tegra_dc_io_start(dc);
+
+ init_status = tegra_dsi_prepare_host_transmission(
+ dc, dsi, DSI_LP_OP_WRITE);
+ if (IS_ERR_OR_NULL(init_status)) {
+ err = PTR_ERR(init_status);
+ dev_err(&dc->ndev->dev, "DSI host config failed\n");
+ goto fail;
+ }
+
+ /* Set max return payload size in words */
+ err = _tegra_dsi_write_data(dsi, NULL,
+ dsi_command_max_return_pkt_size,
+ max_ret_payload_size);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI write failed\n");
+ goto fail;
+ }
+
+ /* DCS to read given panel register */
+ err = _tegra_dsi_write_data(dsi, NULL,
+ dsi_command_dcs_read_with_no_params,
+ panel_reg_addr);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI write failed\n");
+ goto fail;
+ }
+
+ tegra_dsi_reset_read_count(dsi);
+
+ if (dsi->status.lp_op == DSI_LP_OP_WRITE) {
+ err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_READ);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to go to LP read mode\n");
+ goto fail;
+ }
+ }
+
+ err = tegra_dsi_bta(dsi);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI IMM BTA timeout\n");
+ goto fail;
+ }
+
+ err = tegra_dsi_read_fifo(dc, dsi, read_data);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "DSI read fifo failure\n");
+ goto fail;
+ }
+fail:
+ err = tegra_dsi_restore_state(dc, dsi, init_status);
+ if (err < 0)
+ dev_err(&dc->ndev->dev, "Failed to restore prev state\n");
+ tegra_dc_io_end(dc);
+ return err;
+}
+EXPORT_SYMBOL(tegra_dsi_read_data);
+
+int tegra_dsi_panel_sanity_check(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ int err = 0;
+ u8 read_fifo[DSI_READ_FIFO_DEPTH];
+ struct dsi_status *init_status;
+ static struct tegra_dsi_cmd dsi_nop_cmd =
+ DSI_CMD_SHORT(0x05, 0x0, 0x0);
+
+ tegra_dc_io_start(dc);
+
+ init_status = tegra_dsi_prepare_host_transmission(
+ dc, dsi, DSI_LP_OP_WRITE);
+ if (IS_ERR_OR_NULL(init_status)) {
+ err = PTR_ERR(init_status);
+ dev_err(&dc->ndev->dev, "DSI host config failed\n");
+ goto fail;
+ }
+
+ err = _tegra_dsi_write_data(dsi, NULL, dsi_nop_cmd.data_id, 0x0);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "DSI nop write failed\n");
+ goto fail;
+ }
+
+ tegra_dsi_reset_read_count(dsi);
+
+ if (dsi->status.lp_op == DSI_LP_OP_WRITE) {
+ err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_READ);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to go to LP read mode\n");
+ goto fail;
+ }
+ }
+
+ err = tegra_dsi_bta(dsi);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "DSI BTA failed\n");
+ goto fail;
+ }
+
+ err = tegra_dsi_read_fifo(dc, dsi, read_fifo);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "DSI read fifo failure\n");
+ goto fail;
+ }
+
+ if (read_fifo[0] != DSI_ACK_NO_ERR) {
+ dev_warn(&dc->ndev->dev,
+ "Ack no error trigger message not received\n");
+ err = -EAGAIN;
+ }
+fail:
+ err = tegra_dsi_restore_state(dc, dsi, init_status);
+ if (err < 0)
+ dev_err(&dc->ndev->dev, "Failed to restore prev state\n");
+ tegra_dc_io_end(dc);
+ return err;
+}
+EXPORT_SYMBOL(tegra_dsi_panel_sanity_check);
+
+static int tegra_dsi_enter_ulpm(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+ int ret;
+
+ ret = 0;
+
+ val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+ val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(3);
+ val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(ENTER_ULPM);
+ tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+#if DSI_USE_SYNC_POINTS
+ ret = tegra_dsi_syncpt(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dc->ndev->dev,
+ "DSI syncpt for ulpm enter failed\n");
+ goto fail;
+ }
+#else
+ /* TODO: Find exact delay required */
+ mdelay(10);
+#endif
+ dsi->ulpm = true;
+fail:
+ return ret;
+}
+
+static int tegra_dsi_exit_ulpm(struct tegra_dc_dsi_data *dsi)
+{
+ u32 val;
+ int ret;
+
+ ret = 0;
+
+ val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+ val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(3);
+ val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(EXIT_ULPM);
+ tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+#if DSI_USE_SYNC_POINTS
+ ret = tegra_dsi_syncpt(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dc->ndev->dev,
+ "DSI syncpt for ulpm exit failed\n");
+ goto fail;
+ }
+#else
+ /* TODO: Find exact delay required */
+ mdelay(10);
+#endif
+ dsi->ulpm = false;
+
+ val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+ val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(0x3);
+ val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(NORMAL);
+ tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+fail:
+ return ret;
+
+}
+
+static void tegra_dc_dsi_enable(struct tegra_dc *dc)
+{
+ struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+ int err;
+ u32 val;
+
+ tegra_dc_io_start(dc);
+ mutex_lock(&dsi->lock);
+
+ /* Stop DC stream before configuring DSI registers
+ * to avoid visible glitches on panel during transition
+ * from bootloader to kernel driver
+ */
+ tegra_dsi_stop_dc_stream(dc, dsi);
+
+ if (dsi->enabled) {
+ if (dsi->ulpm) {
+ if (tegra_dsi_exit_ulpm(dsi) < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to exit ulpm\n");
+ goto fail;
+ }
+ }
+
+ if (dsi->info.panel_reset) {
+ err = tegra_dsi_send_panel_cmd(dc, dsi,
+ dsi->info.dsi_init_cmd,
+ dsi->info.n_init_cmd);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "dsi: error sending dsi init cmd\n");
+ goto fail;
+ }
+ } else if (dsi->info.dsi_late_resume_cmd) {
+ err = tegra_dsi_send_panel_cmd(dc, dsi,
+ dsi->info.dsi_late_resume_cmd,
+ dsi->info.n_late_resume_cmd);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "dsi: error sending late resume cmd\n");
+ goto fail;
+ }
+ }
+ } else {
+ err = tegra_dsi_init_hw(dc, dsi);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "dsi: not able to init dsi hardware\n");
+ goto fail;
+ }
+
+ if (dsi->ulpm) {
+ if (tegra_dsi_enter_ulpm(dsi) < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to enter ulpm\n");
+ goto fail;
+ }
+ val = DSI_PAD_CONTROL_PAD_PDIO(0) |
+ DSI_PAD_CONTROL_PAD_PDIO_CLK(0) |
+ DSI_PAD_CONTROL_PAD_PULLDN_ENAB
+ (TEGRA_DSI_DISABLE);
+ tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL);
+ if (tegra_dsi_exit_ulpm(dsi) < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to exit ulpm\n");
+ goto fail;
+ }
+ }
+
+ err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_WRITE);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "dsi: not able to set to lp mode\n");
+ goto fail;
+ }
+
+ err = tegra_dsi_send_panel_cmd(dc, dsi, dsi->info.dsi_init_cmd,
+ dsi->info.n_init_cmd);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "dsi: error while sending dsi init cmd\n");
+ goto fail;
+ }
+
+ err = tegra_dsi_set_to_hs_mode(dc, dsi);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "dsi: not able to set to hs mode\n");
+ goto fail;
+ }
+
+ dsi->enabled = true;
+ }
+
+ if (dsi->status.driven == DSI_DRIVEN_MODE_DC)
+ tegra_dsi_start_dc_stream(dc, dsi);
+fail:
+ mutex_unlock(&dsi->lock);
+ tegra_dc_io_end(dc);
+}
+
+static void _tegra_dc_dsi_init(struct tegra_dc *dc)
+{
+ struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+
+ tegra_dsi_init_sw(dc, dsi);
+ /* TODO: Configure the CSI pad configuration */
+}
+
+static int tegra_dc_dsi_cp_p_cmd(struct tegra_dsi_cmd *src,
+ struct tegra_dsi_cmd *dst, u16 n_cmd)
+{
+ u16 i;
+ u16 len;
+
+ memcpy(dst, src, sizeof(*dst) * n_cmd);
+
+ for (i = 0; i < n_cmd; i++)
+ if (src[i].pdata) {
+ len = sizeof(*src[i].pdata) *
+ src[i].sp_len_dly.data_len;
+ dst[i].pdata = kzalloc(len, GFP_KERNEL);
+ if (!dst[i].pdata)
+ goto free_cmd_pdata;
+ memcpy(dst[i].pdata, src[i].pdata, len);
+ }
+
+ return 0;
+
+free_cmd_pdata:
+ for (--i; i >= 0; i--)
+ if (dst[i].pdata)
+ kfree(dst[i].pdata);
+ return -ENOMEM;
+}
+
+static int tegra_dc_dsi_cp_info(struct tegra_dc_dsi_data *dsi,
+ struct tegra_dsi_out *p_dsi)
+{
+ struct tegra_dsi_cmd *p_init_cmd;
+ struct tegra_dsi_cmd *p_early_suspend_cmd;
+ struct tegra_dsi_cmd *p_late_resume_cmd;
+ struct tegra_dsi_cmd *p_suspend_cmd;
+ int err;
+
+ if (p_dsi->n_data_lanes > MAX_DSI_DATA_LANES)
+ return -EINVAL;
+
+ p_init_cmd = kzalloc(sizeof(*p_init_cmd) *
+ p_dsi->n_init_cmd, GFP_KERNEL);
+ if (!p_init_cmd)
+ return -ENOMEM;
+
+ if (p_dsi->dsi_early_suspend_cmd) {
+ p_early_suspend_cmd = kzalloc(sizeof(*p_early_suspend_cmd) *
+ p_dsi->n_early_suspend_cmd,
+ GFP_KERNEL);
+ if (!p_early_suspend_cmd) {
+ err = -ENOMEM;
+ goto err_free_init_cmd;
+ }
+ }
+
+ if (p_dsi->dsi_late_resume_cmd) {
+ p_late_resume_cmd = kzalloc(sizeof(*p_late_resume_cmd) *
+ p_dsi->n_late_resume_cmd,
+ GFP_KERNEL);
+ if (!p_late_resume_cmd) {
+ err = -ENOMEM;
+ goto err_free_p_early_suspend_cmd;
+ }
+ }
+
+ p_suspend_cmd = kzalloc(sizeof(*p_suspend_cmd) * p_dsi->n_suspend_cmd,
+ GFP_KERNEL);
+ if (!p_suspend_cmd) {
+ err = -ENOMEM;
+ goto err_free_p_late_resume_cmd;
+ }
+
+ memcpy(&dsi->info, p_dsi, sizeof(dsi->info));
+
+ /* Copy panel init cmd */
+ err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_init_cmd,
+ p_init_cmd, p_dsi->n_init_cmd);
+ if (err < 0)
+ goto err_free;
+ dsi->info.dsi_init_cmd = p_init_cmd;
+
+ /* Copy panel early suspend cmd */
+ if (p_dsi->dsi_early_suspend_cmd) {
+ err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_early_suspend_cmd,
+ p_early_suspend_cmd,
+ p_dsi->n_early_suspend_cmd);
+ if (err < 0)
+ goto err_free;
+ dsi->info.dsi_early_suspend_cmd = p_early_suspend_cmd;
+ }
+
+ /* Copy panel late resume cmd */
+ if (p_dsi->dsi_late_resume_cmd) {
+ err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_late_resume_cmd,
+ p_late_resume_cmd,
+ p_dsi->n_late_resume_cmd);
+ if (err < 0)
+ goto err_free;
+ dsi->info.dsi_late_resume_cmd = p_late_resume_cmd;
+ }
+
+ /* Copy panel suspend cmd */
+ err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_suspend_cmd, p_suspend_cmd,
+ p_dsi->n_suspend_cmd);
+ if (err < 0)
+ goto err_free;
+ dsi->info.dsi_suspend_cmd = p_suspend_cmd;
+
+ if (!dsi->info.panel_reset_timeout_msec)
+ dsi->info.panel_reset_timeout_msec =
+ DEFAULT_PANEL_RESET_TIMEOUT;
+
+ if (!dsi->info.panel_buffer_size_byte)
+ dsi->info.panel_buffer_size_byte = DEFAULT_PANEL_BUFFER_BYTE;
+
+ if (!dsi->info.max_panel_freq_khz) {
+ dsi->info.max_panel_freq_khz = DEFAULT_MAX_DSI_PHY_CLK_KHZ;
+
+ if (dsi->info.video_burst_mode >
+ TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END){
+ dev_err(&dsi->dc->ndev->dev, "DSI: max_panel_freq_khz"
+ "is not set for DSI burst mode.\n");
+ dsi->info.video_burst_mode =
+ TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED;
+ }
+ }
+
+ if (!dsi->info.lp_cmd_mode_freq_khz)
+ dsi->info.lp_cmd_mode_freq_khz = DEFAULT_LP_CMD_MODE_CLK_KHZ;
+
+ if (!dsi->info.chip_id || !dsi->info.chip_rev)
+ dev_warn(&dsi->dc->ndev->dev,
+ "DSI: Failed to get chip info\n");
+
+ if (!dsi->info.lp_read_cmd_mode_freq_khz)
+ dsi->info.lp_read_cmd_mode_freq_khz =
+ dsi->info.lp_cmd_mode_freq_khz;
+
+ /* host mode is for testing only */
+ dsi->driven_mode = TEGRA_DSI_DRIVEN_BY_DC;
+ return 0;
+
+err_free:
+ kfree(p_suspend_cmd);
+err_free_p_late_resume_cmd:
+ kfree(p_late_resume_cmd);
+err_free_p_early_suspend_cmd:
+ kfree(p_early_suspend_cmd);
+err_free_init_cmd:
+ kfree(p_init_cmd);
+ return err;
+}
+
+static int tegra_dc_dsi_init(struct tegra_dc *dc)
+{
+ struct tegra_dc_dsi_data *dsi;
+ struct resource *res;
+ struct resource *base_res;
+ void __iomem *base;
+ struct clk *dc_clk = NULL;
+ struct clk *dsi_clk = NULL;
+ struct tegra_dsi_out *dsi_pdata;
+ int err;
+
+ err = 0;
+
+ dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM,
+ "dsi_regs");
+ if (!res) {
+ dev_err(&dc->ndev->dev, "dsi: no mem resource\n");
+ err = -ENOENT;
+ goto err_free_dsi;
+ }
+
+ base_res = request_mem_region(res->start, resource_size(res),
+ dc->ndev->name);
+ if (!base_res) {
+ dev_err(&dc->ndev->dev, "dsi: request_mem_region failed\n");
+ err = -EBUSY;
+ goto err_free_dsi;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&dc->ndev->dev, "dsi: registers can't be mapped\n");
+ err = -EBUSY;
+ goto err_release_regs;
+ }
+
+ dsi_pdata = dc->pdata->default_out->dsi;
+ if (!dsi_pdata) {
+ dev_err(&dc->ndev->dev, "dsi: dsi data not available\n");
+ goto err_release_regs;
+ }
+
+ if (dsi_pdata->dsi_instance)
+ dsi_clk = clk_get(&dc->ndev->dev, "dsib");
+ else
+ dsi_clk = clk_get(&dc->ndev->dev, "dsia");
+
+ if (IS_ERR_OR_NULL(dsi_clk)) {
+ dev_err(&dc->ndev->dev, "dsi: can't get clock\n");
+ err = -EBUSY;
+ goto err_release_regs;
+ }
+
+ dc_clk = clk_get_sys(dev_name(&dc->ndev->dev), NULL);
+ if (IS_ERR_OR_NULL(dc_clk)) {
+ dev_err(&dc->ndev->dev, "dsi: dc clock %s unavailable\n",
+ dev_name(&dc->ndev->dev));
+ err = -EBUSY;
+ goto err_clk_put;
+ }
+
+ mutex_init(&dsi->lock);
+ dsi->dc = dc;
+ dsi->base = base;
+ dsi->base_res = base_res;
+ dsi->dc_clk = dc_clk;
+ dsi->dsi_clk = dsi_clk;
+
+ err = tegra_dc_dsi_cp_info(dsi, dsi_pdata);
+ if (err < 0)
+ goto err_dsi_data;
+
+ tegra_dc_set_outdata(dc, dsi);
+ _tegra_dc_dsi_init(dc);
+
+ return 0;
+
+err_dsi_data:
+err_clk_put:
+ clk_put(dsi_clk);
+err_release_regs:
+ release_resource(base_res);
+err_free_dsi:
+ kfree(dsi);
+
+ return err;
+}
+
+static void tegra_dc_dsi_destroy(struct tegra_dc *dc)
+{
+ struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+ u16 i;
+ u32 val;
+
+ mutex_lock(&dsi->lock);
+
+ /* free up the pdata */
+ for (i = 0; i < dsi->info.n_init_cmd; i++) {
+ if (dsi->info.dsi_init_cmd[i].pdata)
+ kfree(dsi->info.dsi_init_cmd[i].pdata);
+ }
+ kfree(dsi->info.dsi_init_cmd);
+
+ /* Disable dc stream */
+ if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+ tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+ /* Disable dsi phy clock */
+ if (dsi->status.clk_out == DSI_PHYCLK_OUT_EN)
+ tegra_dsi_hs_clk_out_disable(dc, dsi);
+
+ val = DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE);
+ tegra_dsi_writel(dsi, val, DSI_POWER_CONTROL);
+
+ iounmap(dsi->base);
+ release_resource(dsi->base_res);
+
+ clk_put(dsi->dc_clk);
+ clk_put(dsi->dsi_clk);
+
+ mutex_unlock(&dsi->lock);
+
+ mutex_destroy(&dsi->lock);
+ kfree(dsi);
+}
+
+static int tegra_dsi_deep_sleep(struct tegra_dc *dc,
+ struct tegra_dc_dsi_data *dsi)
+{
+ int err = 0;
+ int val;
+ struct clk *parent_clk = NULL;
+ struct clk *base_clk = NULL;
+
+ if (!dsi->enabled) {
+ err = -EPERM;
+ goto fail;
+ }
+
+ err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_WRITE);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to go to LP mode\n");
+ goto fail;
+ }
+
+ /* Suspend panel */
+ err = tegra_dsi_send_panel_cmd(dc, dsi,
+ dsi->info.dsi_suspend_cmd,
+ dsi->info.n_suspend_cmd);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "dsi: Error sending suspend cmd\n");
+ goto fail;
+ }
+
+ if (!dsi->ulpm) {
+ err = tegra_dsi_enter_ulpm(dsi);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to enter ulpm\n");
+ goto fail;
+ }
+ }
+
+ /* Suspend pad */
+ val = tegra_dsi_readl(dsi, DSI_PAD_CONTROL);
+ val = DSI_PAD_CONTROL_PAD_PDIO(0x3) |
+ DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) |
+ DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_ENABLE);
+ tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL);
+
+ /* Suspend core-logic */
+ val = DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE);
+ tegra_dsi_writel(dsi, val, DSI_POWER_CONTROL);
+
+ /* Disable dsi fast and slow clock */
+ parent_clk = clk_get_parent(dsi->dsi_clk);
+ base_clk = clk_get_parent(parent_clk);
+ if (dsi->info.dsi_instance)
+ tegra_clk_cfg_ex(base_clk,
+ TEGRA_CLK_PLLD_CSI_OUT_ENB,
+ 0);
+ else
+ tegra_clk_cfg_ex(base_clk,
+ TEGRA_CLK_PLLD_DSI_OUT_ENB,
+ 0);
+
+ /* Disable dsi source clock */
+ clk_disable(dsi->dsi_clk);
+
+ dsi->clk_ref = false;
+ dsi->enabled = false;
+
+ return 0;
+fail:
+ return err;
+}
+
+static void tegra_dc_dsi_disable(struct tegra_dc *dc)
+{
+ int err;
+ struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+
+ tegra_dc_io_start(dc);
+ mutex_lock(&dsi->lock);
+
+ if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+ tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+ if (dsi->info.power_saving_suspend) {
+ if (tegra_dsi_deep_sleep(dc, dsi) < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to enter deep sleep\n");
+ goto fail;
+ }
+ } else {
+ if (dsi->info.dsi_early_suspend_cmd) {
+ err = tegra_dsi_send_panel_cmd(dc, dsi,
+ dsi->info.dsi_early_suspend_cmd,
+ dsi->info.n_early_suspend_cmd);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev,
+ "dsi: Error sending early suspend cmd\n");
+ goto fail;
+ }
+ }
+
+ if (!dsi->ulpm) {
+ if (tegra_dsi_enter_ulpm(dsi) < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to enter ulpm\n");
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ mutex_unlock(&dsi->lock);
+ tegra_dc_io_end(dc);
+}
+
+#ifdef CONFIG_PM
+static void tegra_dc_dsi_suspend(struct tegra_dc *dc)
+{
+ struct tegra_dc_dsi_data *dsi;
+
+ dsi = tegra_dc_get_outdata(dc);
+
+ tegra_dc_io_start(dc);
+ mutex_lock(&dsi->lock);
+
+ if (!dsi->enabled)
+ goto fail;
+
+ if (!dsi->info.power_saving_suspend) {
+ if (dsi->ulpm) {
+ if (tegra_dsi_exit_ulpm(dsi) < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to exit ulpm");
+ goto fail;
+ }
+ }
+
+ if (tegra_dsi_deep_sleep(dc, dsi) < 0) {
+ dev_err(&dc->ndev->dev,
+ "DSI failed to enter deep sleep\n");
+ goto fail;
+ }
+ }
+fail:
+ mutex_unlock(&dsi->lock);
+ tegra_dc_io_end(dc);
+}
+
+static void tegra_dc_dsi_resume(struct tegra_dc *dc)
+{
+ /* Not required since tegra_dc_dsi_enable
+ * will reconfigure the controller from scratch
+ */
+}
+#endif
+
+struct tegra_dc_out_ops tegra_dc_dsi_ops = {
+ .init = tegra_dc_dsi_init,
+ .destroy = tegra_dc_dsi_destroy,
+ .enable = tegra_dc_dsi_enable,
+ .disable = tegra_dc_dsi_disable,
+#ifdef CONFIG_PM
+ .suspend = tegra_dc_dsi_suspend,
+ .resume = tegra_dc_dsi_resume,
+#endif
+};
diff --git a/drivers/video/tegra/dc/dsi.h b/drivers/video/tegra/dc/dsi.h
new file mode 100644
index 000000000000..d86a60a50037
--- /dev/null
+++ b/drivers/video/tegra/dc/dsi.h
@@ -0,0 +1,279 @@
+/*
+ * drivers/video/tegra/dc/dsi.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DSI_H__
+#define __DRIVERS_VIDEO_TEGRA_DC_DSI_H__
+
+/* source of video data */
+enum{
+ TEGRA_DSI_VIDEO_DRIVEN_BY_DC,
+ TEGRA_DSI_VIDEO_DRIVEN_BY_HOST,
+};
+
+/* Max number of data lanes supported */
+#define MAX_DSI_DATA_LANES 2
+/* Default Peripheral reset timeout */
+#define DSI_PR_TO_VALUE 0x2000
+
+/* DCS commands for command mode */
+#define DSI_ENTER_PARTIAL_MODE 0x12
+#define DSI_SET_PIXEL_FORMAT 0x3A
+#define DSI_AREA_COLOR_MODE 0x4C
+#define DSI_SET_PARTIAL_AREA 0x30
+#define DSI_SET_PAGE_ADDRESS 0x2B
+#define DSI_SET_ADDRESS_MODE 0x36
+#define DSI_SET_COLUMN_ADDRESS 0x2A
+#define DSI_WRITE_MEMORY_START 0x2C
+#define DSI_WRITE_MEMORY_CONTINUE 0x3C
+#define DSI_MAX_COMMAND_DELAY_USEC 250000
+#define DSI_COMMAND_DELAY_STEPS_USEC 10
+
+/* Trigger message */
+#define DSI_ESCAPE_CMD 0x87
+#define DSI_ACK_NO_ERR 0x84
+
+/* DSI return packet types */
+#define GEN_LONG_RD_RES 0x1A
+#define DCS_LONG_RD_RES 0x1C
+#define GEN_1_BYTE_SHORT_RD_RES 0x11
+#define DCS_1_BYTE_SHORT_RD_RES 0x21
+#define GEN_2_BYTE_SHORT_RD_RES 0x12
+#define DCS_2_BYTE_SHORT_RD_RES 0x22
+#define ACK_ERR_RES 0x02
+
+/* End of Transmit command for HS mode */
+#define DSI_CMD_HS_EOT_PACKAGE 0x000F0F08
+
+/* Delay required after issuing the trigger*/
+#define DSI_COMMAND_COMPLETION_DELAY_USEC 5
+
+#define DSI_DELAY_FOR_READ_FIFO 5
+
+/* Dsi virtual channel bit position, refer to the DSI specs */
+#define DSI_VIR_CHANNEL_BIT_POSITION 6
+
+/* DSI packet commands from Host to peripherals */
+enum {
+ dsi_command_v_sync_start = 0x01,
+ dsi_command_v_sync_end = 0x11,
+ dsi_command_h_sync_start = 0x21,
+ dsi_command_h_sync_end = 0x31,
+ dsi_command_end_of_transaction = 0x08,
+ dsi_command_blanking = 0x19,
+ dsi_command_null_packet = 0x09,
+ dsi_command_h_active_length_16bpp = 0x0E,
+ dsi_command_h_active_length_18bpp = 0x1E,
+ dsi_command_h_active_length_18bpp_np = 0x2E,
+ dsi_command_h_active_length_24bpp = 0x3E,
+ dsi_command_h_sync_active = dsi_command_blanking,
+ dsi_command_h_back_porch = dsi_command_blanking,
+ dsi_command_h_front_porch = dsi_command_blanking,
+ dsi_command_writ_no_param = 0x05,
+ dsi_command_long_write = 0x39,
+ dsi_command_max_return_pkt_size = 0x37,
+ dsi_command_generic_read_request_with_2_param = 0x24,
+ dsi_command_dcs_read_with_no_params = 0x06,
+};
+
+/* Maximum polling time for reading the dsi status register */
+#define DSI_STATUS_POLLING_DURATION_USEC 100000
+#define DSI_STATUS_POLLING_DELAY_USEC 100
+
+/*
+ * Horizontal Sync Blank Packet Over head
+ * DSI_overhead = size_of(HS packet header)
+ * + size_of(BLANK packet header) + size_of(checksum)
+ * DSI_overhead = 4 + 4 + 2 = 10
+ */
+#define DSI_HSYNC_BLNK_PKT_OVERHEAD 10
+
+/*
+ * Horizontal Front Porch Packet Overhead
+ * DSI_overhead = size_of(checksum)
+ * + size_of(BLANK packet header) + size_of(checksum)
+ * DSI_overhead = 2 + 4 + 2 = 8
+ */
+#define DSI_HFRONT_PORCH_PKT_OVERHEAD 8
+
+/*
+ * Horizontal Back Porch Packet
+ * DSI_overhead = size_of(HE packet header)
+ * + size_of(BLANK packet header) + size_of(checksum)
+ * + size_of(RGB packet header)
+ * DSI_overhead = 4 + 4 + 2 + 4 = 14
+ */
+#define DSI_HBACK_PORCH_PKT_OVERHEAD 14
+
+/* Additional Hs TX timeout margin */
+#define DSI_HTX_TO_MARGIN 720
+
+#define DSI_CYCLE_COUNTER_VALUE 512
+
+#define DSI_LRXH_TO_VALUE 0x2000
+
+/* Turn around timeout terminal count */
+#define DSI_TA_TO_VALUE 0x2000
+
+/* Turn around timeout tally */
+#define DSI_TA_TALLY_VALUE 0x0
+/* LP Rx timeout tally */
+#define DSI_LRXH_TALLY_VALUE 0x0
+/* HS Tx Timeout tally */
+#define DSI_HTX_TALLY_VALUE 0x0
+
+/* DSI Power control settle time 10 micro seconds */
+#define DSI_POWER_CONTROL_SETTLE_TIME_US 10
+
+#define DSI_HOST_FIFO_DEPTH 64
+#define DSI_VIDEO_FIFO_DEPTH 480
+#define DSI_READ_FIFO_DEPTH (32 << 2)
+
+#define NUMOF_BIT_PER_BYTE 8
+#define DEFAULT_LP_CMD_MODE_CLK_KHZ 10000
+#define DEFAULT_MAX_DSI_PHY_CLK_KHZ (500*1000)
+#define DEFAULT_PANEL_RESET_TIMEOUT 2
+#define DEFAULT_PANEL_BUFFER_BYTE 512
+
+/*
+ * TODO: are DSI_HOST_DSI_CONTROL_CRC_RESET(RESET_CRC) and
+ * DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(IMMEDIATE) required for everyone?
+ */
+#define HOST_DSI_CTRL_COMMON \
+ (DSI_HOST_DSI_CONTROL_PHY_CLK_DIV(DSI_PHY_CLK_DIV1) | \
+ DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(NORMAL) | \
+ DSI_HOST_DSI_CONTROL_PERIPH_RESET(TEGRA_DSI_DISABLE) | \
+ DSI_HOST_DSI_CONTROL_RAW_DATA(TEGRA_DSI_DISABLE) | \
+ DSI_HOST_DSI_CONTROL_IMM_BTA(TEGRA_DSI_DISABLE) | \
+ DSI_HOST_DSI_CONTROL_PKT_BTA(TEGRA_DSI_DISABLE) | \
+ DSI_HOST_DSI_CONTROL_CS_ENABLE(TEGRA_DSI_ENABLE) | \
+ DSI_HOST_DSI_CONTROL_ECC_ENABLE(TEGRA_DSI_ENABLE) | \
+ DSI_HOST_DSI_CONTROL_PKT_WR_FIFO_SEL(HOST_ONLY))
+
+#define HOST_DSI_CTRL_HOST_DRIVEN \
+ (DSI_HOST_DSI_CONTROL_CRC_RESET(RESET_CRC) | \
+ DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(IMMEDIATE))
+
+#define HOST_DSI_CTRL_DC_DRIVEN 0
+
+#define DSI_CTRL_HOST_DRIVEN (DSI_CONTROL_VID_ENABLE(TEGRA_DSI_DISABLE) | \
+ DSI_CONTROL_HOST_ENABLE(TEGRA_DSI_ENABLE))
+
+#define DSI_CTRL_DC_DRIVEN (DSI_CONTROL_VID_TX_TRIG_SRC(SOL) | \
+ DSI_CONTROL_VID_ENABLE(TEGRA_DSI_ENABLE) | \
+ DSI_CONTROL_HOST_ENABLE(TEGRA_DSI_DISABLE))
+
+#define DSI_CTRL_CMD_MODE (DSI_CONTROL_VID_DCS_ENABLE(TEGRA_DSI_ENABLE))
+
+#define DSI_CTRL_VIDEO_MODE (DSI_CONTROL_VID_DCS_ENABLE(TEGRA_DSI_DISABLE))
+
+
+enum {
+ CMD_VS = 0x01,
+ CMD_VE = 0x11,
+
+ CMD_HS = 0x21,
+ CMD_HE = 0x31,
+
+ CMD_EOT = 0x08,
+ CMD_NULL = 0x09,
+ CMD_SHORTW = 0x15,
+ CMD_BLNK = 0x19,
+ CMD_LONGW = 0x39,
+
+ CMD_RGB = 0x00,
+ CMD_RGB_16BPP = 0x0E,
+ CMD_RGB_18BPP = 0x1E,
+ CMD_RGB_18BPPNP = 0x2E,
+ CMD_RGB_24BPP = 0x3E,
+};
+
+#define PKT_ID0(id) DSI_PKT_SEQ_0_LO_PKT_00_ID(id) | \
+ DSI_PKT_SEQ_1_LO_PKT_10_EN(TEGRA_DSI_ENABLE)
+#define PKT_LEN0(len) DSI_PKT_SEQ_0_LO_PKT_00_SIZE(len)
+
+#define PKT_ID1(id) DSI_PKT_SEQ_0_LO_PKT_01_ID(id) | \
+ DSI_PKT_SEQ_1_LO_PKT_11_EN(TEGRA_DSI_ENABLE)
+#define PKT_LEN1(len) DSI_PKT_SEQ_0_LO_PKT_01_SIZE(len)
+
+#define PKT_ID2(id) DSI_PKT_SEQ_0_LO_PKT_02_ID(id) | \
+ DSI_PKT_SEQ_1_LO_PKT_12_EN(TEGRA_DSI_ENABLE)
+#define PKT_LEN2(len) DSI_PKT_SEQ_0_LO_PKT_02_SIZE(len)
+
+#define PKT_ID3(id) DSI_PKT_SEQ_0_HI_PKT_03_ID(id) | \
+ DSI_PKT_SEQ_1_HI_PKT_13_EN(TEGRA_DSI_ENABLE)
+#define PKT_LEN3(len) DSI_PKT_SEQ_0_HI_PKT_03_SIZE(len)
+
+#define PKT_ID4(id) DSI_PKT_SEQ_0_HI_PKT_04_ID(id) | \
+ DSI_PKT_SEQ_1_HI_PKT_14_EN(TEGRA_DSI_ENABLE)
+#define PKT_LEN4(len) DSI_PKT_SEQ_0_HI_PKT_04_SIZE(len)
+
+#define PKT_ID5(id) DSI_PKT_SEQ_0_HI_PKT_05_ID(id) | \
+ DSI_PKT_SEQ_1_HI_PKT_15_EN(TEGRA_DSI_ENABLE)
+#define PKT_LEN5(len) DSI_PKT_SEQ_0_HI_PKT_05_SIZE(len)
+
+#define PKT_LP DSI_PKT_SEQ_0_LO_SEQ_0_FORCE_LP(TEGRA_DSI_ENABLE)
+
+#define NUMOF_PKT_SEQ 12
+
+
+/* Macros for calculating the phy timings */
+#define T_HSEXIT_DEFAULT(clkns) (100 / ((clkns) * 8) + 1)
+#define T_HSTRAIL_DEFAULT(clkns) (3 + max((8 * (clkns)), \
+ (60 + 4 * (clkns))) / ((clkns) * 8) + 1)
+#define T_HSPREPR_ORG(clkns) ((65 + 5 * (clkns)) / ((clkns) * 8))
+#define T_HSPREPR_DEFAULT(clkns) ((T_HSPREPR_ORG(clkns) == 0) ? \
+ 1 : T_HSPREPR_ORG(clkns))
+#define T_DATZERO_DEFAULT(clkns) ((145 + 5 * (clkns)) / ((clkns) * 8) +1)
+
+#define T_CLKTRAIL_DEFAULT(clkns) (60 / ((clkns) * 8) + 1)
+#define T_CLKPOST_DEFAULT(clkns) ((60 + 52 * (clkns)) / ((clkns) * 8) +1)
+#define T_CLKZERO_DEFAULT(clkns) (170 / ((clkns) * 8) + 1)
+#define T_TLPX_ORG(clkns) (50 / ((clkns) * 8) + 1)
+#define T_TLPX_DEFAULT(clkns) ((T_TLPX_ORG(clkns) == 0) ? \
+ 1 : T_TLPX_ORG(clkns))
+
+#define T_CLKPRE_DEFAULT(clkns) 1
+#define T_CLKPREPARE_DEFAULT(clkns) 4
+
+/* Minimum ULPM wakeup time as per the spec is 1msec */
+#define T_WAKEUP_DEFAULT(clkns) (2*1000*1000 / (clkns))
+
+#define DSI_CYCLE_COUNTER_VALUE 512
+
+/* Defines the DSI phy timing parameters */
+struct dsi_phy_timing_inclk
+{
+ unsigned t_hsdexit;
+ unsigned t_hstrail;
+ unsigned t_hsprepr;
+ unsigned t_datzero;
+
+ unsigned t_clktrail;
+ unsigned t_clkpost;
+ unsigned t_clkzero;
+ unsigned t_tlpx;
+
+ unsigned t_clkpre;
+ unsigned t_clkprepare;
+ unsigned t_wakeup;
+
+ unsigned t_taget;
+ unsigned t_tasure;
+ unsigned t_tago;
+
+};
+
+#endif
diff --git a/drivers/video/tegra/dc/dsi_regs.h b/drivers/video/tegra/dc/dsi_regs.h
new file mode 100644
index 000000000000..203ac32bd92d
--- /dev/null
+++ b/drivers/video/tegra/dc/dsi_regs.h
@@ -0,0 +1,351 @@
+/*
+ * drivers/video/tegra/dc/dsi_regs.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DSI_REG_H__
+#define __DRIVERS_VIDEO_TEGRA_DC_DSI_REG_H__
+
+enum {
+ TEGRA_DSI_DISABLE,
+ TEGRA_DSI_ENABLE,
+};
+
+/* These are word offsets from base (not byte offsets) */
+enum {
+ OP_DONE = 1,
+};
+#define DSI_INCR_SYNCPT 0x00
+#define DSI_INCR_SYNCPT_COND(x) (((x) & 0xff) << 8)
+#define DSI_INCR_SYNCPT_INDX(x) (((x) & 0xff) << 0)
+
+#define DSI_INCR_SYNCPT_CNTRL 0x01
+#define DSI_INCR_SYNCPT_ERROR 0x02
+#define DSI_CTXSW 0x08
+#define DSI_RD_DATA 0x09
+#define DSI_WR_DATA 0x0a
+
+#define DSI_POWER_CONTROL 0x0b
+#define DSI_POWER_CONTROL_LEG_DSI_ENABLE(x) (((x) & 0x1) << 0)
+
+#define DSI_INT_ENABLE 0x0c
+#define DSI_INT_STATUS 0x0d
+#define DSI_INT_MASK 0x0e
+
+#define DSI_HOST_DSI_CONTROL 0x0f
+enum {
+ RESET_CRC = 1,
+};
+#define DSI_HOST_CONTROL_FIFO_STAT_RESET(x) (((x) & 0x1) << 21)
+#define DSI_HOST_DSI_CONTROL_CRC_RESET(x) (((x) & 0x1) << 20)
+enum {
+ DSI_PHY_CLK_DIV1,
+ DSI_PHY_CLK_DIV2,
+};
+#define DSI_HOST_DSI_CONTROL_PHY_CLK_DIV(x) (((x) & 0x7) << 16)
+enum {
+ SOL,
+ FIFO_LEVEL,
+ IMMEDIATE,
+};
+#define DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(x) (((x) & 0x3) << 12)
+enum {
+ NORMAL,
+ ENTER_ULPM,
+ EXIT_ULPM,
+};
+#define DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(x) (((x) & 0x3) << 8)
+#define DSI_HOST_DSI_CONTROL_PERIPH_RESET(x) (((x) & 0x1) << 7)
+#define DSI_HOST_DSI_CONTROL_RAW_DATA(x) (((x) & 0x1) << 6)
+enum {
+ TEGRA_DSI_LOW,
+ TEGRA_DSI_HIGH,
+};
+#define DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(x) (((x) & 0x1) << 5)
+enum {
+ HOST_ONLY,
+ VIDEO_HOST,
+};
+#define DSI_HOST_DSI_CONTROL_PKT_WR_FIFO_SEL(x) (((x) & 0x1) << 4)
+#define DSI_HOST_DSI_CONTROL_IMM_BTA(x) (((x) & 0x1) << 3)
+#define DSI_HOST_DSI_CONTROL_PKT_BTA(x) (((x) & 0x1) << 2)
+#define DSI_HOST_DSI_CONTROL_CS_ENABLE(x) (((x) & 0x1) << 1)
+#define DSI_HOST_DSI_CONTROL_ECC_ENABLE(x) (((x) & 0x1) << 0)
+
+#define DSI_CONTROL 0x10
+#define DSI_CONTROL_DBG_ENABLE(x) (((x) & 0x1) << 31)
+enum {
+ CONTINUOUS,
+ TX_ONLY,
+};
+#define DSI_CONTROL_HS_CLK_CTRL(x) (((x) & 0x1) << 20)
+#define DSI_CONTROL_VIRTUAL_CHANNEL(x) (((x) & 0x3) << 16)
+#define DSI_CONTROL_DATA_FORMAT(x) (((x) & 0x3) << 12)
+#define DSI_CONTROL_VID_TX_TRIG_SRC(x) (((x) & 0x3) << 8)
+#define DSI_CONTROL_NUM_DATA_LANES(x) (((x) & 0x3) << 4)
+#define DSI_CONTROL_VID_DCS_ENABLE(x) (((x) & 0x1) << 3)
+#define DSI_CONTROL_VID_SOURCE(x) (((x) & 0x1) << 2)
+#define DSI_CONTROL_VID_ENABLE(x) (((x) & 0x1) << 1)
+#define DSI_CONTROL_HOST_ENABLE(x) (((x) & 0x1) << 0)
+
+#define DSI_SOL_DELAY 0x11
+#define DSI_SOL_DELAY_SOL_DELAY(x) (((x) & 0xffff) << 0)
+
+#define DSI_MAX_THRESHOLD 0x12
+#define DSI_MAX_THRESHOLD_MAX_THRESHOLD(x) (((x) & 0xffff) << 0)
+
+#define DSI_TRIGGER 0x13
+#define DSI_TRIGGER_HOST_TRIGGER(x) (((x) & 0x1) << 1)
+#define DSI_TRIGGER_VID_TRIGGER(x) (((x) & 0x1) << 0)
+
+#define DSI_TX_CRC 0x14
+#define DSI_TX_CRC_TX_CRC(x) (((x) & 0xffffffff) << 0)
+
+#define DSI_STATUS 0x15
+#define DSI_STATUS_IDLE(x) (((x) & 0x1) << 10)
+#define DSI_STATUS_LB_UNDERFLOW(x) (((x) & 0x1) << 9)
+#define DSI_STATUS_LB_OVERFLOW(x) (((x) & 0x1) << 8)
+#define DSI_STATUS_RD_FIFO_COUNT(x) (((x) & 0x1f) << 0)
+
+#define DSI_INIT_SEQ_CONTROL 0x1a
+#define DSI_INIT_SEQ_CONTROL_DSI_FRAME_INIT_BYTE_COUNT(x) \
+ (((x) & 0x3f) << 8)
+#define DSI_INIT_SEQ_CONTROL_DSI_SEND_INIT_SEQUENCE(x) \
+ (((x) & 0xff) << 0)
+
+#define DSI_INIT_SEQ_DATA_0 0x1b
+#define DSI_INIT_SEQ_DATA_1 0x1c
+#define DSI_INIT_SEQ_DATA_2 0x1d
+#define DSI_INIT_SEQ_DATA_3 0x1e
+#define DSI_INIT_SEQ_DATA_4 0x1f
+#define DSI_INIT_SEQ_DATA_5 0x20
+#define DSI_INIT_SEQ_DATA_6 0x21
+#define DSI_INIT_SEQ_DATA_7 0x22
+
+#define DSI_PKT_SEQ_0_LO 0x23
+#define DSI_PKT_SEQ_0_LO_SEQ_0_FORCE_LP(x) (((x) & 0x1) << 30)
+#define DSI_PKT_SEQ_0_LO_PKT_02_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_0_LO_PKT_02_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_0_LO_PKT_02_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_0_LO_PKT_01_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_0_LO_PKT_01_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_0_LO_PKT_01_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_0_LO_PKT_00_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_0_LO_PKT_00_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_0_LO_PKT_00_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_0_HI 0x24
+#define DSI_PKT_SEQ_0_HI_PKT_05_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_0_HI_PKT_05_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_0_HI_PKT_05_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_0_HI_PKT_04_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_0_HI_PKT_04_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_0_HI_PKT_04_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_0_HI_PKT_03_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_0_HI_PKT_03_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_0_HI_PKT_03_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_1_LO 0x25
+#define DSI_PKT_SEQ_1_LO_SEQ_1_FORCE_LP(x) (((x) & 0x1) << 30)
+#define DSI_PKT_SEQ_1_LO_PKT_12_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_1_LO_PKT_12_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_1_LO_PKT_12_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_1_LO_PKT_11_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_1_LO_PKT_11_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_1_LO_PKT_11_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_1_LO_PKT_10_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_1_LO_PKT_10_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_1_LO_PKT_10_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_1_HI 0x26
+#define DSI_PKT_SEQ_1_HI_PKT_15_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_1_HI_PKT_15_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_1_HI_PKT_15_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_1_HI_PKT_14_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_1_HI_PKT_14_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_1_HI_PKT_14_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_1_HI_PKT_13_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_1_HI_PKT_13_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_1_HI_PKT_13_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_2_LO 0x27
+#define DSI_PKT_SEQ_2_LO_SEQ_2_FORCE_LP(x) (((x) & 0x1) << 30)
+#define DSI_PKT_SEQ_2_LO_PKT_22_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_2_LO_PKT_22_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_2_LO_PKT_22_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_2_LO_PKT_21_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_2_LO_PKT_21_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_2_LO_PKT_21_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_2_LO_PKT_20_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_2_LO_PKT_20_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_2_LO_PKT_20_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_2_HI 0x28
+#define DSI_PKT_SEQ_2_HI_PKT_25_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_2_HI_PKT_25_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_2_HI_PKT_25_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_2_HI_PKT_24_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_2_HI_PKT_24_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_2_HI_PKT_24_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_2_HI_PKT_23_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_2_HI_PKT_23_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_2_HI_PKT_23_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_3_LO 0x29
+#define DSI_PKT_SEQ_3_LO_SEQ_3_FORCE_LP(x) (((x) & 0x1) << 30)
+#define DSI_PKT_SEQ_3_LO_PKT_32_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_3_LO_PKT_32_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_3_LO_PKT_32_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_3_LO_PKT_31_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_3_LO_PKT_31_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_3_LO_PKT_31_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_3_LO_PKT_30_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_3_LO_PKT_30_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_3_LO_PKT_30_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_3_HI 0x2a
+#define DSI_PKT_SEQ_3_HI_PKT_35_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_3_HI_PKT_35_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_3_HI_PKT_35_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_3_HI_PKT_34_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_3_HI_PKT_34_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_3_HI_PKT_34_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_3_HI_PKT_33_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_3_HI_PKT_33_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_3_HI_PKT_33_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_4_LO 0x2b
+#define DSI_PKT_SEQ_4_LO_SEQ_4_FORCE_LP(x) (((x) & 0x1) << 30)
+#define DSI_PKT_SEQ_4_LO_PKT_42_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_4_LO_PKT_42_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_4_LO_PKT_42_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_4_LO_PKT_41_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_4_LO_PKT_41_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_4_LO_PKT_41_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_4_LO_PKT_40_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_4_LO_PKT_40_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_4_LO_PKT_40_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_4_HI 0x2c
+#define DSI_PKT_SEQ_4_HI_PKT_45_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_4_HI_PKT_45_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_4_HI_PKT_45_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_4_HI_PKT_44_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_4_HI_PKT_44_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_4_HI_PKT_44_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_4_HI_PKT_43_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_4_HI_PKT_43_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_4_HI_PKT_43_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_5_LO 0x2d
+#define DSI_PKT_SEQ_5_LO_SEQ_5_FORCE_LP(x) (((x) & 0x1) << 30)
+#define DSI_PKT_SEQ_5_LO_PKT_52_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_5_LO_PKT_52_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_5_LO_PKT_52_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_5_LO_PKT_51_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_5_LO_PKT_51_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_5_LO_PKT_51_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_5_LO_PKT_50_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_5_LO_PKT_50_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_5_LO_PKT_50_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_5_HI 0x2e
+#define DSI_PKT_SEQ_5_HI_PKT_55_EN(x) (((x) & 0x1) << 29)
+#define DSI_PKT_SEQ_5_HI_PKT_55_ID(x) (((x) & 0x3f) << 23)
+#define DSI_PKT_SEQ_5_HI_PKT_55_SIZE(x) (((x) & 0x7) << 20)
+#define DSI_PKT_SEQ_5_HI_PKT_54_EN(x) (((x) & 0x1) << 19)
+#define DSI_PKT_SEQ_5_HI_PKT_54_ID(x) (((x) & 0x3f) << 13)
+#define DSI_PKT_SEQ_5_HI_PKT_54_SIZE(x) (((x) & 0x7) << 10)
+#define DSI_PKT_SEQ_5_HI_PKT_53_EN(x) (((x) & 0x1) << 9)
+#define DSI_PKT_SEQ_5_HI_PKT_53_ID(x) (((x) & 0x3f) << 3)
+#define DSI_PKT_SEQ_5_HI_PKT_53_SIZE(x) (((x) & 0x7) << 0)
+
+#define DSI_DCS_CMDS 0x33
+#define DSI_DCS_CMDS_LT5_DCS_CMD(x) (((x) & 0xff) << 8)
+#define DSI_DCS_CMDS_LT3_DCS_CMD(x) (((x) & 0xff) << 0)
+
+#define DSI_PKT_LEN_0_1 0x34
+#define DSI_PKT_LEN_0_1_LENGTH_1(x) (((x) & 0xffff) << 16)
+#define DSI_PKT_LEN_0_1_LENGTH_0(x) (((x) & 0xffff) << 0)
+
+#define DSI_PKT_LEN_2_3 0x35
+#define DSI_PKT_LEN_2_3_LENGTH_3(x) (((x) & 0xffff) << 16)
+#define DSI_PKT_LEN_2_3_LENGTH_2(x) (((x) & 0xffff) << 0)
+
+
+#define DSI_PKT_LEN_4_5 0x36
+#define DSI_PKT_LEN_4_5_LENGTH_5(x) (((x) & 0xffff) << 16)
+#define DSI_PKT_LEN_4_5_LENGTH_4(x) (((x) & 0xffff) << 0)
+
+#define DSI_PKT_LEN_6_7 0x37
+#define DSI_PKT_LEN_6_7_LENGTH_7(x) (((x) & 0xffff) << 16)
+#define DSI_PKT_LEN_6_7_LENGTH_6(x) (((x) & 0xffff) << 0)
+
+#define DSI_PHY_TIMING_0 0x3c
+#define DSI_PHY_TIMING_0_THSDEXIT(x) (((x) & 0xff) << 24)
+#define DSI_PHY_TIMING_0_THSTRAIL(x) (((x) & 0xff) << 16)
+#define DSI_PHY_TIMING_0_TDATZERO(x) (((x) & 0xff) << 8)
+#define DSI_PHY_TIMING_0_THSPREPR(x) (((x) & 0xff) << 0)
+
+#define DSI_PHY_TIMING_1 0x3d
+#define DSI_PHY_TIMING_1_TCLKTRAIL(x) (((x) & 0xff) << 24)
+#define DSI_PHY_TIMING_1_TCLKPOST(x) (((x) & 0xff) << 16)
+#define DSI_PHY_TIMING_1_TCLKZERO(x) (((x) & 0xff) << 8)
+#define DSI_PHY_TIMING_1_TTLPX(x) (((x) & 0xff) << 0)
+
+#define DSI_PHY_TIMING_2 0x3e
+#define DSI_PHY_TIMING_2_TCLKPREPARE(x) (((x) & 0xff) << 16)
+#define DSI_PHY_TIMING_2_TCLKPRE(x) (((x) & 0xff) << 8)
+#define DSI_PHY_TIMING_2_TWAKEUP(x) (((x) & 0xff) << 0)
+
+#define DSI_BTA_TIMING 0x3f
+#define DSI_BTA_TIMING_TTAGET(x) (((x) & 0xff) << 16)
+#define DSI_BTA_TIMING_TTASURE(x) (((x) & 0xff) << 8)
+#define DSI_BTA_TIMING_TTAGO(x) (((x) & 0xff) << 0)
+
+
+#define DSI_TIMEOUT_0 0x44
+#define DSI_TIMEOUT_0_LRXH_TO(x) (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_0_HTX_TO(x) (((x) & 0xffff) << 0)
+
+#define DSI_TIMEOUT_1 0x45
+#define DSI_TIMEOUT_1_PR_TO(x) (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_1_TA_TO(x) (((x) & 0xffff) << 0)
+
+#define DSI_TO_TALLY 0x46
+enum {
+ IN_RESET,
+ READY,
+};
+#define DSI_TO_TALLY_P_RESET_STATUS(x) (((x) & 0x1) << 24)
+#define DSI_TO_TALLY_TA_TALLY(x) (((x) & 0xff) << 16)
+#define DSI_TO_TALLY_LRXH_TALLY(x) (((x) & 0xff) << 8)
+#define DSI_TO_TALLY_HTX_TALLY(x) (((x) & 0xff) << 0)
+
+#define DSI_PAD_CONTROL 0x4b
+#define DSI_PAD_CONTROL_PAD_PULLDN_ENAB(x) (((x) & 0x1) << 28)
+#define DSI_PAD_CONTROL_PAD_SLEWUPADJ(x) (((x) & 0x7) << 24)
+#define DSI_PAD_CONTROL_PAD_SLEWDNADJ(x) (((x) & 0x7) << 20)
+#define DSI_PAD_CONTROL_PAD_PREEMP_EN(x) (((x) & 0x1) << 19)
+#define DSI_PAD_CONTROL_PAD_PDIO_CLK(x) (((x) & 0x1) << 18)
+#define DSI_PAD_CONTROL_PAD_PDIO(x) (((x) & 0x3) << 16)
+#define DSI_PAD_CONTROL_PAD_LPUPADJ(x) (((x) & 0x3) << 14)
+#define DSI_PAD_CONTROL_PAD_LPDNADJ(x) (((x) & 0x3) << 12)
+
+#define DSI_PAD_CONTROL_CD 0x4c
+#define DSI_PAD_CD_STATUS 0x4d
+#define DSI_VID_MODE_CONTROL 0x4e
+
+#endif
+
diff --git a/drivers/video/tegra/dc/edid.c b/drivers/video/tegra/dc/edid.c
new file mode 100644
index 000000000000..dd92154c1d19
--- /dev/null
+++ b/drivers/video/tegra/dc/edid.c
@@ -0,0 +1,611 @@
+/*
+ * drivers/video/tegra/dc/edid.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#include "edid.h"
+
+struct tegra_edid_pvt {
+ struct kref refcnt;
+ struct tegra_edid_hdmi_eld eld;
+ bool support_stereo;
+ bool support_underscan;
+ /* Note: dc_edid must remain the last member */
+ struct tegra_dc_edid dc_edid;
+};
+
+struct tegra_edid {
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ int bus;
+
+ struct tegra_edid_pvt *data;
+
+ struct mutex lock;
+};
+
+#if defined(DEBUG) || defined(CONFIG_DEBUG_FS)
+static int tegra_edid_show(struct seq_file *s, void *unused)
+{
+ struct tegra_edid *edid = s->private;
+ struct tegra_dc_edid *data;
+ u8 *buf;
+ int i;
+
+ data = tegra_edid_get_data(edid);
+ if (!data) {
+ seq_printf(s, "No EDID\n");
+ return 0;
+ }
+
+ buf = data->buf;
+
+ for (i = 0; i < data->len; i++) {
+ if (i % 16 == 0)
+ seq_printf(s, "edid[%03x] =", i);
+
+ seq_printf(s, " %02x", buf[i]);
+
+ if (i % 16 == 15)
+ seq_printf(s, "\n");
+ }
+
+ tegra_edid_put_data(data);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_edid_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_edid_show, inode->i_private);
+}
+
+static const struct file_operations tegra_edid_debug_fops = {
+ .open = tegra_edid_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void tegra_edid_debug_add(struct tegra_edid *edid)
+{
+ char name[] = "edidX";
+
+ snprintf(name, sizeof(name), "edid%1d", edid->bus);
+ debugfs_create_file(name, S_IRUGO, NULL, edid, &tegra_edid_debug_fops);
+}
+#else
+void tegra_edid_debug_add(struct tegra_edid *edid)
+{
+}
+#endif
+
+#ifdef DEBUG
+static char tegra_edid_dump_buff[16 * 1024];
+
+static void tegra_edid_dump(struct tegra_edid *edid)
+{
+ struct seq_file s;
+ int i;
+ char c;
+
+ memset(&s, 0x0, sizeof(s));
+
+ s.buf = tegra_edid_dump_buff;
+ s.size = sizeof(tegra_edid_dump_buff);
+ s.private = edid;
+
+ tegra_edid_show(&s, NULL);
+
+ i = 0;
+ while (i < s.count ) {
+ if ((s.count - i) > 256) {
+ c = s.buf[i + 256];
+ s.buf[i + 256] = 0;
+ printk("%s", s.buf + i);
+ s.buf[i + 256] = c;
+ } else {
+ printk("%s", s.buf + i);
+ }
+ i += 256;
+ }
+}
+#else
+static void tegra_edid_dump(struct tegra_edid *edid)
+{
+}
+#endif
+
+int tegra_edid_read_block(struct tegra_edid *edid, int block, u8 *data)
+{
+ u8 block_buf[] = {block >> 1};
+ u8 cmd_buf[] = {(block & 0x1) * 128};
+ int status;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x30,
+ .flags = 0,
+ .len = 1,
+ .buf = block_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = cmd_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 128,
+ .buf = data,
+ }};
+ struct i2c_msg *m;
+ int msg_len;
+
+ if (block > 1) {
+ msg_len = 3;
+ m = msg;
+ } else {
+ msg_len = 2;
+ m = &msg[1];
+ }
+
+ status = i2c_transfer(edid->client->adapter, m, msg_len);
+
+ if (status < 0)
+ return status;
+
+ if (status != msg_len)
+ return -EIO;
+
+ return 0;
+}
+
+int tegra_edid_parse_ext_block(const u8 *raw, int idx,
+ struct tegra_edid_pvt *edid)
+{
+ const u8 *ptr;
+ u8 tmp;
+ u8 code;
+ int len;
+ int i;
+ bool basic_audio = false;
+
+ ptr = &raw[0];
+
+ /* If CEA 861 block get info for eld struct */
+ if (edid && ptr) {
+ if (*ptr <= 3)
+ edid->eld.eld_ver = 0x02;
+ edid->eld.cea_edid_ver = ptr[1];
+
+ /* check for basic audio support in CEA 861 block */
+ if(raw[3] & (1<<6)) {
+ /* For basic audio, set spk_alloc to Left+Right.
+ * If there is a Speaker Alloc block this will
+ * get over written with that value */
+ basic_audio = true;
+ }
+ }
+
+ if (raw[3] & 0x80)
+ edid->support_underscan = 1;
+ else
+ edid->support_underscan = 0;
+
+ ptr = &raw[4];
+
+ while (ptr < &raw[idx]) {
+ tmp = *ptr;
+ len = tmp & 0x1f;
+
+ /* HDMI Specification v1.4a, section 8.3.2:
+ * see Table 8-16 for HDMI VSDB format.
+ * data blocks have tags in top 3 bits:
+ * tag code 2: video data block
+ * tag code 3: vendor specific data block
+ */
+ code = (tmp >> 5) & 0x7;
+ switch (code) {
+ case 1:
+ {
+ edid->eld.sad_count = len;
+ edid->eld.conn_type = 0x00;
+ edid->eld.support_hdcp = 0x00;
+ for (i = 0; (i < len) && (i < ELD_MAX_SAD); i ++)
+ edid->eld.sad[i] = ptr[i + 1];
+ len++;
+ ptr += len; /* adding the header */
+ /* Got an audio data block so enable audio */
+ if(basic_audio == true)
+ edid->eld.spk_alloc = 1;
+ break;
+ }
+ /* case 2 is commented out for now */
+ case 3:
+ {
+ int j = 0;
+
+ if ((ptr[1] == 0x03) &&
+ (ptr[2] == 0x0c) &&
+ (ptr[3] == 0)) {
+ edid->eld.port_id[0] = ptr[4];
+ edid->eld.port_id[1] = ptr[5];
+ }
+ if ((len >= 8) &&
+ (ptr[1] == 0x03) &&
+ (ptr[2] == 0x0c) &&
+ (ptr[3] == 0)) {
+ j = 8;
+ tmp = ptr[j++];
+ /* HDMI_Video_present? */
+ if (tmp & 0x20) {
+ /* Latency_Fields_present? */
+ if (tmp & 0x80)
+ j += 2;
+ /* I_Latency_Fields_present? */
+ if (tmp & 0x40)
+ j += 2;
+ /* 3D_present? */
+ if (j <= len && (ptr[j] & 0x80))
+ edid->support_stereo = 1;
+ }
+ }
+ if ((len > 5) &&
+ (ptr[1] == 0x03) &&
+ (ptr[2] == 0x0c) &&
+ (ptr[3] == 0)) {
+
+ edid->eld.support_ai = (ptr[6] & 0x80);
+ }
+
+ if ((len > 9) &&
+ (ptr[1] == 0x03) &&
+ (ptr[2] == 0x0c) &&
+ (ptr[3] == 0)) {
+
+ edid->eld.aud_synch_delay = ptr[10];
+ }
+ len++;
+ ptr += len; /* adding the header */
+ break;
+ }
+ case 4:
+ {
+ edid->eld.spk_alloc = ptr[1];
+ len++;
+ ptr += len; /* adding the header */
+ break;
+ }
+ default:
+ len++; /* len does not include header */
+ ptr += len;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int tegra_edid_mode_support_stereo(struct fb_videomode *mode)
+{
+ if (!mode)
+ return 0;
+
+ if (mode->xres == 1280 &&
+ mode->yres == 720 &&
+ ((mode->refresh == 60) || (mode->refresh == 50)))
+ return 1;
+
+ /* Disabling 1080p stereo mode due to bug 869099. */
+ /* Must re-enable this to 1 once it is fixed. */
+ if (mode->xres == 1920 && mode->yres == 1080 && mode->refresh == 24)
+ return 0;
+
+ return 0;
+}
+
+static void data_release(struct kref *ref)
+{
+ struct tegra_edid_pvt *data =
+ container_of(ref, struct tegra_edid_pvt, refcnt);
+ vfree(data);
+}
+
+int tegra_edid_get_monspecs_test(struct tegra_edid *edid,
+ struct fb_monspecs *specs, unsigned char *edid_ptr)
+{
+ int i, j, ret;
+ int extension_blocks;
+ struct tegra_edid_pvt *new_data, *old_data;
+ u8 *data;
+
+ new_data = vmalloc(SZ_32K + sizeof(struct tegra_edid_pvt));
+ if (!new_data)
+ return -ENOMEM;
+
+ kref_init(&new_data->refcnt);
+
+ new_data->support_stereo = 0;
+ new_data->support_underscan = 0;
+
+ data = new_data->dc_edid.buf;
+ memcpy(data, edid_ptr, 128);
+
+ memset(specs, 0x0, sizeof(struct fb_monspecs));
+ memset(&new_data->eld, 0x0, sizeof(new_data->eld));
+ fb_edid_to_monspecs(data, specs);
+ if (specs->modedb == NULL) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ memcpy(new_data->eld.monitor_name, specs->monitor,
+ sizeof(specs->monitor));
+
+ new_data->eld.mnl = strlen(new_data->eld.monitor_name) + 1;
+ new_data->eld.product_id[0] = data[0x8];
+ new_data->eld.product_id[1] = data[0x9];
+ new_data->eld.manufacture_id[0] = data[0xA];
+ new_data->eld.manufacture_id[1] = data[0xB];
+
+ extension_blocks = data[0x7e];
+ for (i = 1; i <= extension_blocks; i++) {
+ memcpy(data+128, edid_ptr+128, 128);
+
+ if (data[i * 128] == 0x2) {
+ fb_edid_add_monspecs(data + i * 128, specs);
+
+ tegra_edid_parse_ext_block(data + i * 128,
+ data[i * 128 + 2], new_data);
+
+ if (new_data->support_stereo) {
+ for (j = 0; j < specs->modedb_len; j++) {
+ if (tegra_edid_mode_support_stereo(
+ &specs->modedb[j]))
+ specs->modedb[j].vmode |=
+ FB_VMODE_STEREO_FRAME_PACK;
+ }
+ }
+ }
+ }
+
+ new_data->dc_edid.len = i * 128;
+
+ mutex_lock(&edid->lock);
+ old_data = edid->data;
+ edid->data = new_data;
+ mutex_unlock(&edid->lock);
+
+ if (old_data)
+ kref_put(&old_data->refcnt, data_release);
+
+ tegra_edid_dump(edid);
+ return 0;
+fail:
+ vfree(new_data);
+ return ret;
+}
+
+int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs)
+{
+ int i;
+ int j;
+ int ret;
+ int extension_blocks;
+ struct tegra_edid_pvt *new_data, *old_data;
+ u8 *data;
+
+ new_data = vmalloc(SZ_32K + sizeof(struct tegra_edid_pvt));
+ if (!new_data)
+ return -ENOMEM;
+
+ kref_init(&new_data->refcnt);
+
+ new_data->support_stereo = 0;
+
+ data = new_data->dc_edid.buf;
+
+ ret = tegra_edid_read_block(edid, 0, data);
+ if (ret)
+ goto fail;
+
+ memset(specs, 0x0, sizeof(struct fb_monspecs));
+ memset(&new_data->eld, 0x0, sizeof(new_data->eld));
+ fb_edid_to_monspecs(data, specs);
+ if (specs->modedb == NULL) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ memcpy(new_data->eld.monitor_name, specs->monitor, sizeof(specs->monitor));
+ new_data->eld.mnl = strlen(new_data->eld.monitor_name) + 1;
+ new_data->eld.product_id[0] = data[0x8];
+ new_data->eld.product_id[1] = data[0x9];
+ new_data->eld.manufacture_id[0] = data[0xA];
+ new_data->eld.manufacture_id[1] = data[0xB];
+
+ extension_blocks = data[0x7e];
+
+ for (i = 1; i <= extension_blocks; i++) {
+ ret = tegra_edid_read_block(edid, i, data + i * 128);
+ if (ret < 0)
+ break;
+
+ if (data[i * 128] == 0x2) {
+ fb_edid_add_monspecs(data + i * 128, specs);
+
+ tegra_edid_parse_ext_block(data + i * 128,
+ data[i * 128 + 2], new_data);
+
+ if (new_data->support_stereo) {
+ for (j = 0; j < specs->modedb_len; j++) {
+ if (tegra_edid_mode_support_stereo(
+ &specs->modedb[j]))
+ specs->modedb[j].vmode |=
+ FB_VMODE_STEREO_FRAME_PACK;
+ }
+ }
+ }
+ }
+
+ new_data->dc_edid.len = i * 128;
+
+ mutex_lock(&edid->lock);
+ old_data = edid->data;
+ edid->data = new_data;
+ mutex_unlock(&edid->lock);
+
+ if (old_data)
+ kref_put(&old_data->refcnt, data_release);
+
+ tegra_edid_dump(edid);
+ return 0;
+
+fail:
+ vfree(new_data);
+ return ret;
+}
+
+int tegra_edid_underscan_supported(struct tegra_edid *edid)
+{
+ if ((!edid) || (!edid->data))
+ return 0;
+
+ return edid->data->support_underscan;
+}
+
+int tegra_edid_get_eld(struct tegra_edid *edid, struct tegra_edid_hdmi_eld *elddata)
+{
+ if (!elddata || !edid->data)
+ return -EFAULT;
+
+ memcpy(elddata,&edid->data->eld,sizeof(struct tegra_edid_hdmi_eld));
+
+ return 0;
+}
+
+struct tegra_edid *tegra_edid_create(int bus)
+{
+ struct tegra_edid *edid;
+ struct i2c_adapter *adapter;
+ int err;
+
+ edid = kzalloc(sizeof(struct tegra_edid), GFP_KERNEL);
+ if (!edid)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&edid->lock);
+ strlcpy(edid->info.type, "tegra_edid", sizeof(edid->info.type));
+ edid->bus = bus;
+ edid->info.addr = 0x50;
+ edid->info.platform_data = edid;
+
+ adapter = i2c_get_adapter(bus);
+ if (!adapter) {
+ pr_err("can't get adpater for bus %d\n", bus);
+ err = -EBUSY;
+ goto free_edid;
+ }
+
+ edid->client = i2c_new_device(adapter, &edid->info);
+ i2c_put_adapter(adapter);
+
+ if (!edid->client) {
+ pr_err("can't create new device\n");
+ err = -EBUSY;
+ goto free_edid;
+ }
+
+ tegra_edid_debug_add(edid);
+
+ return edid;
+
+free_edid:
+ kfree(edid);
+
+ return ERR_PTR(err);
+}
+
+void tegra_edid_destroy(struct tegra_edid *edid)
+{
+ i2c_release_client(edid->client);
+ if (edid->data)
+ kref_put(&edid->data->refcnt, data_release);
+ kfree(edid);
+}
+
+struct tegra_dc_edid *tegra_edid_get_data(struct tegra_edid *edid)
+{
+ struct tegra_edid_pvt *data;
+
+ mutex_lock(&edid->lock);
+ data = edid->data;
+ if (data)
+ kref_get(&data->refcnt);
+ mutex_unlock(&edid->lock);
+
+ return data ? &data->dc_edid : NULL;
+}
+
+void tegra_edid_put_data(struct tegra_dc_edid *data)
+{
+ struct tegra_edid_pvt *pvt;
+
+ if (!data)
+ return;
+
+ pvt = container_of(data, struct tegra_edid_pvt, dc_edid);
+
+ kref_put(&pvt->refcnt, data_release);
+}
+
+static const struct i2c_device_id tegra_edid_id[] = {
+ { "tegra_edid", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, tegra_edid_id);
+
+static struct i2c_driver tegra_edid_driver = {
+ .id_table = tegra_edid_id,
+ .driver = {
+ .name = "tegra_edid",
+ },
+};
+
+static int __init tegra_edid_init(void)
+{
+ return i2c_add_driver(&tegra_edid_driver);
+}
+
+static void __exit tegra_edid_exit(void)
+{
+ i2c_del_driver(&tegra_edid_driver);
+}
+
+module_init(tegra_edid_init);
+module_exit(tegra_edid_exit);
diff --git a/drivers/video/tegra/dc/edid.h b/drivers/video/tegra/dc/edid.h
new file mode 100644
index 000000000000..77db36f4adbf
--- /dev/null
+++ b/drivers/video/tegra/dc/edid.h
@@ -0,0 +1,62 @@
+/*
+ * drivers/video/tegra/dc/edid.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_EDID_H
+#define __DRIVERS_VIDEO_TEGRA_DC_EDID_H
+
+#include <linux/i2c.h>
+#include <linux/wait.h>
+#include <mach/dc.h>
+
+#define ELD_MAX_MNL 16
+#define ELD_MAX_SAD 16
+struct tegra_edid;
+
+/*
+ * ELD: EDID Like Data
+ */
+struct tegra_edid_hdmi_eld {
+ u8 baseline_len;
+ u8 eld_ver;
+ u8 cea_edid_ver;
+ char monitor_name[ELD_MAX_MNL + 1];
+ u8 mnl;
+ u8 manufacture_id[2];
+ u8 product_id[2];
+ u8 port_id[8];
+ u8 support_hdcp;
+ u8 support_ai;
+ u8 conn_type;
+ u8 aud_synch_delay;
+ u8 spk_alloc;
+ u8 sad_count;
+ u8 sad[ELD_MAX_SAD];
+};
+
+struct tegra_edid *tegra_edid_create(int bus);
+void tegra_edid_destroy(struct tegra_edid *edid);
+
+int tegra_edid_get_monspecs_test(struct tegra_edid *edid,
+ struct fb_monspecs *specs, u8 *edid_ptr);
+int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs);
+int tegra_edid_get_eld(struct tegra_edid *edid, struct tegra_edid_hdmi_eld *elddata);
+
+struct tegra_dc_edid *tegra_edid_get_data(struct tegra_edid *edid);
+void tegra_edid_put_data(struct tegra_dc_edid *data);
+
+int tegra_edid_underscan_supported(struct tegra_edid *edid);
+#endif
diff --git a/drivers/video/tegra/dc/ext/Makefile b/drivers/video/tegra/dc/ext/Makefile
new file mode 100644
index 000000000000..19860ab5db11
--- /dev/null
+++ b/drivers/video/tegra/dc/ext/Makefile
@@ -0,0 +1,5 @@
+obj-y += dev.o
+obj-y += util.o
+obj-y += cursor.o
+obj-y += events.o
+obj-y += control.o
diff --git a/drivers/video/tegra/dc/ext/control.c b/drivers/video/tegra/dc/ext/control.c
new file mode 100644
index 000000000000..f6fb3c0d9006
--- /dev/null
+++ b/drivers/video/tegra/dc/ext/control.c
@@ -0,0 +1,261 @@
+/*
+ * drivers/video/tegra/dc/ext/control.c
+ *
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "tegra_dc_ext_priv.h"
+
+static struct tegra_dc_ext_control g_control;
+
+int tegra_dc_ext_process_hotplug(int output)
+{
+ return tegra_dc_ext_queue_hotplug(&g_control, output);
+}
+
+static int
+get_output_properties(struct tegra_dc_ext_control_output_properties *properties)
+{
+ struct tegra_dc *dc;
+
+ /* TODO: this should be more dynamic */
+ if (properties->handle > 2)
+ return -EINVAL;
+
+ switch (properties->handle) {
+ case 0:
+ properties->type = TEGRA_DC_EXT_LVDS;
+ break;
+ case 1:
+ properties->type = TEGRA_DC_EXT_HDMI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ properties->associated_head = properties->handle;
+ properties->head_mask = (1 << properties->associated_head);
+
+ dc = tegra_dc_get_dc(properties->associated_head);
+ properties->connected = tegra_dc_get_connected(dc);
+
+ return 0;
+}
+
+static int get_output_edid(struct tegra_dc_ext_control_output_edid *edid)
+{
+ struct tegra_dc *dc;
+ size_t user_size = edid->size;
+ struct tegra_dc_edid *dc_edid = NULL;
+ int ret;
+
+ /* TODO: this should be more dynamic */
+ if (edid->handle > 2)
+ return -EINVAL;
+
+ dc = tegra_dc_get_dc(edid->handle);
+
+ dc_edid = tegra_dc_get_edid(dc);
+ if (IS_ERR(dc_edid))
+ return PTR_ERR(dc_edid);
+
+ if (!dc_edid) {
+ edid->size = 0;
+ } else {
+ edid->size = dc_edid->len;
+
+ if (user_size < edid->size) {
+ ret = -EFBIG;
+ goto done;
+ }
+
+ if (copy_to_user(edid->data, dc_edid->buf, edid->size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ }
+
+done:
+ if (dc_edid)
+ tegra_dc_put_edid(dc_edid);
+
+ return ret;
+}
+
+static int set_event_mask(struct tegra_dc_ext_control_user *user, u32 mask)
+{
+ struct list_head *list, *tmp;
+
+ if (mask & ~TEGRA_DC_EXT_EVENT_MASK_ALL)
+ return -EINVAL;
+
+ mutex_lock(&user->lock);
+
+ user->event_mask = mask;
+
+ list_for_each_safe(list, tmp, &user->event_list) {
+ struct tegra_dc_ext_event_list *ev_list;
+ ev_list = list_entry(list, struct tegra_dc_ext_event_list,
+ list);
+ if (!(mask & ev_list->event.type)) {
+ list_del(list);
+ kfree(ev_list);
+ }
+ }
+ mutex_unlock(&user->lock);
+
+ return 0;
+}
+
+static long tegra_dc_ext_control_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *user_arg = (void __user *)arg;
+ struct tegra_dc_ext_control_user *user = filp->private_data;
+
+ switch (cmd) {
+ case TEGRA_DC_EXT_CONTROL_GET_NUM_OUTPUTS:
+ {
+ u32 num = tegra_dc_ext_get_num_outputs();
+
+ if (copy_to_user(user_arg, &num, sizeof(num)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case TEGRA_DC_EXT_CONTROL_GET_OUTPUT_PROPERTIES:
+ {
+ struct tegra_dc_ext_control_output_properties args;
+ int ret;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ ret = get_output_properties(&args);
+
+ if (copy_to_user(user_arg, &args, sizeof(args)))
+ return -EFAULT;
+
+ return ret;
+ }
+ case TEGRA_DC_EXT_CONTROL_GET_OUTPUT_EDID:
+ {
+ struct tegra_dc_ext_control_output_edid args;
+ int ret;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ ret = get_output_edid(&args);
+
+ if (copy_to_user(user_arg, &args, sizeof(args)))
+ return -EFAULT;
+
+ return ret;
+ }
+ case TEGRA_DC_EXT_CONTROL_SET_EVENT_MASK:
+ return set_event_mask(user, (u32) arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tegra_dc_ext_control_open(struct inode *inode, struct file *filp)
+{
+ struct tegra_dc_ext_control_user *user;
+ struct tegra_dc_ext_control *control;
+
+ user = kzalloc(sizeof(*user), GFP_KERNEL);
+ if (!user)
+ return -ENOMEM;
+
+ control = container_of(inode->i_cdev, struct tegra_dc_ext_control,
+ cdev);
+ user->control = control;;
+
+ INIT_LIST_HEAD(&user->event_list);
+ mutex_init(&user->lock);
+
+ filp->private_data = user;
+
+ mutex_lock(&control->lock);
+ list_add(&user->list, &control->users);
+ mutex_unlock(&control->lock);
+
+ return 0;
+}
+
+static int tegra_dc_ext_control_release(struct inode *inode, struct file *filp)
+{
+ struct tegra_dc_ext_control_user *user = filp->private_data;
+ struct tegra_dc_ext_control *control = user->control;
+
+ /* This will free any pending events for this user */
+ set_event_mask(user, 0);
+
+ mutex_lock(&control->lock);
+ list_del(&user->list);
+ mutex_unlock(&control->lock);
+
+ kfree(user);
+
+ return 0;
+}
+
+static const struct file_operations tegra_dc_ext_event_devops = {
+ .owner = THIS_MODULE,
+ .open = tegra_dc_ext_control_open,
+ .release = tegra_dc_ext_control_release,
+ .read = tegra_dc_ext_event_read,
+ .poll = tegra_dc_ext_event_poll,
+ .unlocked_ioctl = tegra_dc_ext_control_ioctl,
+};
+
+int tegra_dc_ext_control_init(void)
+{
+ struct tegra_dc_ext_control *control = &g_control;
+ int ret;
+
+ cdev_init(&control->cdev, &tegra_dc_ext_event_devops);
+ control->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&control->cdev, tegra_dc_ext_devno, 1);
+ if (ret)
+ return ret;
+
+ control->dev = device_create(tegra_dc_ext_class,
+ NULL,
+ tegra_dc_ext_devno,
+ NULL,
+ "tegra_dc_ctrl");
+ if (IS_ERR(control->dev)) {
+ ret = PTR_ERR(control->dev);
+ cdev_del(&control->cdev);
+ }
+
+ mutex_init(&control->lock);
+
+ INIT_LIST_HEAD(&control->users);
+
+ return ret;
+}
diff --git a/drivers/video/tegra/dc/ext/cursor.c b/drivers/video/tegra/dc/ext/cursor.c
new file mode 100644
index 000000000000..d8fa5fd8e6d9
--- /dev/null
+++ b/drivers/video/tegra/dc/ext/cursor.c
@@ -0,0 +1,203 @@
+/*
+ * drivers/video/tegra/dc/ext/cursor.c
+ *
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <video/tegra_dc_ext.h>
+
+#include "tegra_dc_ext_priv.h"
+
+/* ugh */
+#include "../dc_priv.h"
+#include "../dc_reg.h"
+
+int tegra_dc_ext_get_cursor(struct tegra_dc_ext_user *user)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ int ret = 0;
+
+ mutex_lock(&ext->cursor.lock);
+
+ if (!ext->cursor.user)
+ ext->cursor.user = user;
+ else if (ext->cursor.user != user)
+ ret = -EBUSY;
+
+ mutex_unlock(&ext->cursor.lock);
+
+ return ret;
+}
+
+int tegra_dc_ext_put_cursor(struct tegra_dc_ext_user *user)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ int ret = 0;
+
+ mutex_lock(&ext->cursor.lock);
+
+ if (ext->cursor.user == user)
+ ext->cursor.user = 0;
+ else
+ ret = -EACCES;
+
+ mutex_unlock(&ext->cursor.lock);
+
+ return ret;
+}
+
+static void set_cursor_image_hw(struct tegra_dc *dc,
+ struct tegra_dc_ext_cursor_image *args,
+ dma_addr_t phys_addr)
+{
+ tegra_dc_writel(dc,
+ CURSOR_COLOR(args->foreground.r,
+ args->foreground.g,
+ args->foreground.b),
+ DC_DISP_CURSOR_FOREGROUND);
+ tegra_dc_writel(dc,
+ CURSOR_COLOR(args->background.r,
+ args->background.g,
+ args->background.b),
+ DC_DISP_CURSOR_BACKGROUND);
+
+ BUG_ON(phys_addr & ~CURSOR_START_ADDR_MASK);
+
+ tegra_dc_writel(dc,
+ CURSOR_START_ADDR(((unsigned long) phys_addr)) |
+ ((args->flags & TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64) ?
+ CURSOR_SIZE_64 : 0),
+ DC_DISP_CURSOR_START_ADDR);
+}
+
+int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_cursor_image *args)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ struct tegra_dc *dc = ext->dc;
+ struct nvmap_handle_ref *handle, *old_handle;
+ dma_addr_t phys_addr;
+ u32 size;
+ int ret;
+
+ if (!user->nvmap)
+ return -EFAULT;
+
+ size = args->flags & (TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 |
+ TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64);
+
+ if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 &&
+ size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64)
+ return -EINVAL;
+
+ mutex_lock(&ext->cursor.lock);
+
+ if (ext->cursor.user != user) {
+ ret = -EACCES;
+ goto unlock;
+ }
+
+ if (!ext->enabled) {
+ ret = -ENXIO;
+ goto unlock;
+ }
+
+ old_handle = ext->cursor.cur_handle;
+
+ ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr);
+ if (ret)
+ goto unlock;
+
+ ext->cursor.cur_handle = handle;
+
+ mutex_lock(&dc->lock);
+
+ set_cursor_image_hw(dc, args, phys_addr);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* XXX sync here? */
+
+ mutex_unlock(&dc->lock);
+
+ mutex_unlock(&ext->cursor.lock);
+
+ if (old_handle) {
+ nvmap_unpin(ext->nvmap, old_handle);
+ nvmap_free(ext->nvmap, old_handle);
+ }
+
+ return 0;
+
+unlock:
+ mutex_unlock(&ext->cursor.lock);
+
+ return ret;
+}
+
+int tegra_dc_ext_set_cursor(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_cursor *args)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ struct tegra_dc *dc = ext->dc;
+ u32 win_options;
+ bool enable;
+ int ret;
+
+ mutex_lock(&ext->cursor.lock);
+
+ if (ext->cursor.user != user) {
+ ret = -EACCES;
+ goto unlock;
+ }
+
+ if (!ext->enabled) {
+ ret = -ENXIO;
+ goto unlock;
+ }
+
+ enable = !!(args->flags & TEGRA_DC_EXT_CURSOR_FLAGS_VISIBLE);
+
+ mutex_lock(&dc->lock);
+
+ win_options = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ if (!!(win_options & CURSOR_ENABLE) != enable) {
+ win_options &= ~CURSOR_ENABLE;
+ if (enable)
+ win_options |= CURSOR_ENABLE;
+ tegra_dc_writel(dc, win_options, DC_DISP_DISP_WIN_OPTIONS);
+ }
+
+ tegra_dc_writel(dc, CURSOR_POSITION(args->x, args->y),
+ DC_DISP_CURSOR_POSITION);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* TODO: need to sync here? hopefully can avoid this, but need to
+ * figure out interaction w/ rest of GENERAL_ACT_REQ */
+
+ mutex_unlock(&dc->lock);
+
+ mutex_unlock(&ext->cursor.lock);
+
+ return 0;
+
+unlock:
+ mutex_unlock(&ext->cursor.lock);
+
+ return ret;
+}
diff --git a/drivers/video/tegra/dc/ext/dev.c b/drivers/video/tegra/dc/ext/dev.c
new file mode 100644
index 000000000000..2148c0b18c71
--- /dev/null
+++ b/drivers/video/tegra/dc/ext/dev.c
@@ -0,0 +1,919 @@
+/*
+ * drivers/video/tegra/dc/dev.c
+ *
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ * Some code based on fbdev extensions written by:
+ * Erik Gilling <konkers@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <video/tegra_dc_ext.h>
+
+#include <mach/dc.h>
+#include <mach/nvmap.h>
+#include <mach/tegra_dc_ext.h>
+
+/* XXX ew */
+#include "../dc_priv.h"
+/* XXX ew 2 */
+#include "../../host/dev.h"
+/* XXX ew 3 */
+#include "../../nvmap/nvmap.h"
+#include "tegra_dc_ext_priv.h"
+
+int tegra_dc_ext_devno;
+struct class *tegra_dc_ext_class;
+static int head_count;
+
+struct tegra_dc_ext_flip_win {
+ struct tegra_dc_ext_flip_windowattr attr;
+ struct nvmap_handle_ref *handle[TEGRA_DC_NUM_PLANES];
+ dma_addr_t phys_addr;
+ dma_addr_t phys_addr_u;
+ dma_addr_t phys_addr_v;
+ u32 syncpt_max;
+};
+
+struct tegra_dc_ext_flip_data {
+ struct tegra_dc_ext *ext;
+ struct work_struct work;
+ struct tegra_dc_ext_flip_win win[DC_N_WINDOWS];
+};
+
+int tegra_dc_ext_get_num_outputs(void)
+{
+ /* TODO: decouple output count from head count */
+ return head_count;
+}
+
+static int tegra_dc_ext_set_nvmap_fd(struct tegra_dc_ext_user *user,
+ int fd)
+{
+ struct nvmap_client *nvmap = NULL;
+
+ if (fd >= 0) {
+ nvmap = nvmap_client_get_file(fd);
+ if (IS_ERR(nvmap))
+ return PTR_ERR(nvmap);
+ }
+
+ if (user->nvmap)
+ nvmap_client_put(user->nvmap);
+
+ user->nvmap = nvmap;
+
+ return 0;
+}
+
+static int tegra_dc_ext_get_window(struct tegra_dc_ext_user *user,
+ unsigned int n)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ struct tegra_dc_ext_win *win;
+ int ret = 0;
+
+ if (n >= DC_N_WINDOWS)
+ return -EINVAL;
+
+ win = &ext->win[n];
+
+ mutex_lock(&win->lock);
+
+ if (!win->user)
+ win->user = user;
+ else if (win->user != user)
+ ret = -EBUSY;
+
+ mutex_unlock(&win->lock);
+
+ return ret;
+}
+
+static int tegra_dc_ext_put_window(struct tegra_dc_ext_user *user,
+ unsigned int n)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ struct tegra_dc_ext_win *win;
+ int ret = 0;
+
+ if (n >= DC_N_WINDOWS)
+ return -EINVAL;
+
+ win = &ext->win[n];
+
+ mutex_lock(&win->lock);
+
+ if (win->user == user) {
+ flush_workqueue(win->flip_wq);
+ win->user = 0;
+ } else {
+ ret = -EACCES;
+ }
+
+ mutex_unlock(&win->lock);
+
+ return ret;
+}
+
+static void set_enable(struct tegra_dc_ext *ext, bool en)
+{
+ int i;
+
+ /*
+ * Take all locks to make sure any flip requests or cursor moves are
+ * out of their critical sections
+ */
+ for (i = 0; i < ext->dc->n_windows; i++)
+ mutex_lock(&ext->win[i].lock);
+ mutex_lock(&ext->cursor.lock);
+
+ ext->enabled = en;
+
+ mutex_unlock(&ext->cursor.lock);
+ for (i = ext->dc->n_windows - 1; i >= 0 ; i--)
+ mutex_unlock(&ext->win[i].lock);
+}
+
+void tegra_dc_ext_enable(struct tegra_dc_ext *ext)
+{
+ set_enable(ext, true);
+}
+
+void tegra_dc_ext_disable(struct tegra_dc_ext *ext)
+{
+ int i;
+ set_enable(ext, false);
+
+ /*
+ * Flush the flip queue -- note that this must be called with dc->lock
+ * unlocked or else it will hang.
+ */
+ for (i = 0; i < ext->dc->n_windows; i++) {
+ struct tegra_dc_ext_win *win = &ext->win[i];
+
+ flush_workqueue(win->flip_wq);
+ }
+}
+
+static int tegra_dc_ext_set_windowattr(struct tegra_dc_ext *ext,
+ struct tegra_dc_win *win,
+ const struct tegra_dc_ext_flip_win *flip_win)
+{
+ struct tegra_dc_ext_win *ext_win = &ext->win[win->idx];
+
+ if (flip_win->handle[TEGRA_DC_Y] == NULL) {
+ win->flags = 0;
+ memset(ext_win->cur_handle, 0, sizeof(ext_win->cur_handle));
+ return 0;
+ }
+
+ win->flags = TEGRA_WIN_FLAG_ENABLED;
+ if (flip_win->attr.blend == TEGRA_DC_EXT_BLEND_PREMULT)
+ win->flags |= TEGRA_WIN_FLAG_BLEND_PREMULT;
+ else if (flip_win->attr.blend == TEGRA_DC_EXT_BLEND_COVERAGE)
+ win->flags |= TEGRA_WIN_FLAG_BLEND_COVERAGE;
+ if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_TILED)
+ win->flags |= TEGRA_WIN_FLAG_TILED;
+ if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_INVERT_H)
+ win->flags |= TEGRA_WIN_FLAG_INVERT_H;
+ if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_INVERT_V)
+ win->flags |= TEGRA_WIN_FLAG_INVERT_V;
+ win->fmt = flip_win->attr.pixformat;
+ win->x.full = flip_win->attr.x;
+ win->y.full = flip_win->attr.y;
+ win->w.full = flip_win->attr.w;
+ win->h.full = flip_win->attr.h;
+ /* XXX verify that this doesn't go outside display's active region */
+ win->out_x = flip_win->attr.out_x;
+ win->out_y = flip_win->attr.out_y;
+ win->out_w = flip_win->attr.out_w;
+ win->out_h = flip_win->attr.out_h;
+ win->z = flip_win->attr.z;
+ memcpy(ext_win->cur_handle, flip_win->handle,
+ sizeof(ext_win->cur_handle));
+
+ /* XXX verify that this won't read outside of the surface */
+ win->phys_addr = flip_win->phys_addr + flip_win->attr.offset;
+
+ win->phys_addr_u = flip_win->handle[TEGRA_DC_U] ?
+ flip_win->phys_addr_u : flip_win->phys_addr;
+ win->phys_addr_u += flip_win->attr.offset_u;
+
+ win->phys_addr_v = flip_win->handle[TEGRA_DC_V] ?
+ flip_win->phys_addr_v : flip_win->phys_addr;
+ win->phys_addr_v += flip_win->attr.offset_v;
+
+ win->stride = flip_win->attr.stride;
+ win->stride_uv = flip_win->attr.stride_uv;
+
+ if ((s32)flip_win->attr.pre_syncpt_id >= 0) {
+ nvhost_syncpt_wait_timeout(&ext->dc->ndev->host->syncpt,
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val,
+ msecs_to_jiffies(500), NULL);
+ }
+
+
+ return 0;
+}
+
+static void tegra_dc_ext_flip_worker(struct work_struct *work)
+{
+ struct tegra_dc_ext_flip_data *data =
+ container_of(work, struct tegra_dc_ext_flip_data, work);
+ struct tegra_dc_ext *ext = data->ext;
+ struct tegra_dc_win *wins[DC_N_WINDOWS];
+ struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS *
+ TEGRA_DC_NUM_PLANES];
+ int i, nr_unpin = 0, nr_win = 0;
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
+ int index = flip_win->attr.index;
+ struct tegra_dc_win *win;
+ struct tegra_dc_ext_win *ext_win;
+
+ if (index < 0)
+ continue;
+
+ win = tegra_dc_get_window(ext->dc, index);
+ ext_win = &ext->win[index];
+
+ if (win->flags & TEGRA_WIN_FLAG_ENABLED) {
+ int j;
+ for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) {
+ if (!ext_win->cur_handle[j])
+ continue;
+
+ unpin_handles[nr_unpin++] =
+ ext_win->cur_handle[j];
+ }
+ }
+
+ tegra_dc_ext_set_windowattr(ext, win, &data->win[i]);
+
+ wins[nr_win++] = win;
+ }
+
+ tegra_dc_update_windows(wins, nr_win);
+ /* TODO: implement swapinterval here */
+ tegra_dc_sync_windows(wins, nr_win);
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
+ int index = flip_win->attr.index;
+
+ if (index < 0)
+ continue;
+
+ tegra_dc_incr_syncpt_min(ext->dc, index,
+ flip_win->syncpt_max);
+ }
+
+ /* unpin and deref previous front buffers */
+ for (i = 0; i < nr_unpin; i++) {
+ nvmap_unpin(ext->nvmap, unpin_handles[i]);
+ nvmap_free(ext->nvmap, unpin_handles[i]);
+ }
+
+ kfree(data);
+}
+
+static int lock_windows_for_flip(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_flip *args)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ int i;
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ int index = args->win[i].index;
+ struct tegra_dc_ext_win *win;
+
+ if (index < 0)
+ continue;
+
+ win = &ext->win[index];
+
+ mutex_lock(&win->lock);
+
+ if (win->user != user)
+ goto fail_unlock;
+ }
+
+ return 0;
+
+fail_unlock:
+ do {
+ int index = args->win[i].index;
+
+ if (index < 0)
+ continue;
+
+ mutex_unlock(&ext->win[index].lock);
+ } while (i--);
+
+ return -EACCES;
+}
+
+static void unlock_windows_for_flip(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_flip *args)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ int i;
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ int index = args->win[i].index;
+
+ if (index < 0)
+ continue;
+
+ mutex_unlock(&ext->win[index].lock);
+ }
+}
+
+static int sanitize_flip_args(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_flip *args)
+{
+ int i, used_windows = 0;
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ int index = args->win[i].index;
+
+ if (index < 0)
+ continue;
+
+ if (index >= DC_N_WINDOWS)
+ return -EINVAL;
+
+ if (used_windows & BIT(index))
+ return -EINVAL;
+
+ used_windows |= BIT(index);
+ }
+
+ if (!used_windows)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tegra_dc_ext_flip(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_flip *args)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ struct tegra_dc_ext_flip_data *data;
+ int work_index;
+ int i, ret = 0;
+
+ if (!user->nvmap)
+ return -EFAULT;
+
+ ret = sanitize_flip_args(user, args);
+ if (ret)
+ return ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_WORK(&data->work, tegra_dc_ext_flip_worker);
+ data->ext = ext;
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
+ int index = args->win[i].index;
+
+ memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));
+
+ if (index < 0)
+ continue;
+
+ ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id,
+ &flip_win->handle[TEGRA_DC_Y],
+ &flip_win->phys_addr);
+ if (ret)
+ goto fail_pin;
+
+ if (flip_win->attr.buff_id_u) {
+ ret = tegra_dc_ext_pin_window(user,
+ flip_win->attr.buff_id_u,
+ &flip_win->handle[TEGRA_DC_U],
+ &flip_win->phys_addr_u);
+ if (ret)
+ goto fail_pin;
+ } else {
+ flip_win->handle[TEGRA_DC_U] = NULL;
+ flip_win->phys_addr_u = 0;
+ }
+
+ if (flip_win->attr.buff_id_v) {
+ ret = tegra_dc_ext_pin_window(user,
+ flip_win->attr.buff_id_v,
+ &flip_win->handle[TEGRA_DC_V],
+ &flip_win->phys_addr_v);
+ if (ret)
+ goto fail_pin;
+ } else {
+ flip_win->handle[TEGRA_DC_V] = NULL;
+ flip_win->phys_addr_v = 0;
+ }
+ }
+
+ ret = lock_windows_for_flip(user, args);
+ if (ret)
+ goto fail_pin;
+
+ if (!ext->enabled) {
+ ret = -ENXIO;
+ goto unlock;
+ }
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ u32 syncpt_max;
+ int index = args->win[i].index;
+
+ if (index < 0)
+ continue;
+
+ syncpt_max = tegra_dc_incr_syncpt_max(ext->dc, index);
+
+ data->win[i].syncpt_max = syncpt_max;
+
+ /*
+ * Any of these windows' syncpoints should be equivalent for
+ * the client, so we just send back an arbitrary one of them
+ */
+ args->post_syncpt_val = syncpt_max;
+ args->post_syncpt_id = tegra_dc_get_syncpt_id(ext->dc, index);
+ work_index = index;
+ }
+ queue_work(ext->win[work_index].flip_wq, &data->work);
+
+ unlock_windows_for_flip(user, args);
+
+ return 0;
+
+unlock:
+ unlock_windows_for_flip(user, args);
+
+fail_pin:
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ int j;
+ for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) {
+ if (!data->win[i].handle[j])
+ continue;
+
+ nvmap_unpin(ext->nvmap, data->win[i].handle[j]);
+ nvmap_free(ext->nvmap, data->win[i].handle[j]);
+ }
+ }
+ kfree(data);
+
+ return ret;
+}
+
+static int tegra_dc_ext_set_csc(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_csc *new_csc)
+{
+ unsigned int index = new_csc->win_index;
+ struct tegra_dc *dc = user->ext->dc;
+ struct tegra_dc_ext_win *ext_win;
+ struct tegra_dc_csc *csc;
+
+ if (index >= DC_N_WINDOWS)
+ return -EINVAL;
+
+ ext_win = &user->ext->win[index];
+ csc = &dc->windows[index].csc;
+
+ mutex_lock(&ext_win->lock);
+
+ if (ext_win->user != user) {
+ mutex_unlock(&ext_win->lock);
+ return -EACCES;
+ }
+
+ csc->yof = new_csc->yof;
+ csc->kyrgb = new_csc->kyrgb;
+ csc->kur = new_csc->kur;
+ csc->kvr = new_csc->kvr;
+ csc->kug = new_csc->kug;
+ csc->kvg = new_csc->kvg;
+ csc->kub = new_csc->kub;
+ csc->kvb = new_csc->kvb;
+
+ tegra_dc_update_csc(dc, index);
+
+ mutex_unlock(&ext_win->lock);
+
+ return 0;
+}
+
+static int set_lut_channel(u16 *channel_from_user,
+ u8 *channel_to,
+ u32 start,
+ u32 len)
+{
+ int i;
+ u16 lut16bpp[256];
+
+ if (channel_from_user) {
+ if (copy_from_user(lut16bpp, channel_from_user, len<<1))
+ return 1;
+
+ for (i = 0; i < len; i++)
+ channel_to[start+i] = lut16bpp[i]>>8;
+ } else {
+ for (i = 0; i < len; i++)
+ channel_to[start+i] = start+i;
+ }
+
+ return 0;
+}
+
+static int tegra_dc_ext_set_lut(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_lut *new_lut)
+{
+ int err;
+ unsigned int index = new_lut->win_index;
+ u32 start = new_lut->start;
+ u32 len = new_lut->len;
+
+ struct tegra_dc *dc = user->ext->dc;
+ struct tegra_dc_ext_win *ext_win;
+ struct tegra_dc_lut *lut;
+
+ if (index >= DC_N_WINDOWS)
+ return -EINVAL;
+
+ if ((start >= 256) || (len > 256) || ((start + len) > 256))
+ return -EINVAL;
+
+ ext_win = &user->ext->win[index];
+ lut = &dc->windows[index].lut;
+
+ mutex_lock(&ext_win->lock);
+
+ if (ext_win->user != user) {
+ mutex_unlock(&ext_win->lock);
+ return -EACCES;
+ }
+
+ err = set_lut_channel(new_lut->r, lut->r, start, len) |
+ set_lut_channel(new_lut->g, lut->g, start, len) |
+ set_lut_channel(new_lut->b, lut->b, start, len);
+
+ if (err) {
+ mutex_unlock(&ext_win->lock);
+ return -EFAULT;
+ }
+
+ tegra_dc_update_lut(dc, index,
+ new_lut->flags & TEGRA_DC_EXT_LUT_FLAGS_FBOVERRIDE);
+
+ mutex_unlock(&ext_win->lock);
+
+ return 0;
+}
+
+static u32 tegra_dc_ext_get_vblank_syncpt(struct tegra_dc_ext_user *user)
+{
+ struct tegra_dc *dc = user->ext->dc;
+
+ return dc->vblank_syncpt;
+}
+
+static int tegra_dc_ext_get_status(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_status *status)
+{
+ struct tegra_dc *dc = user->ext->dc;
+
+ memset(status, 0, sizeof(*status));
+
+ if (dc->enabled)
+ status->flags |= TEGRA_DC_EXT_FLAGS_ENABLED;
+
+ return 0;
+}
+
+static long tegra_dc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *user_arg = (void __user *)arg;
+ struct tegra_dc_ext_user *user = filp->private_data;
+
+ switch (cmd) {
+ case TEGRA_DC_EXT_SET_NVMAP_FD:
+ return tegra_dc_ext_set_nvmap_fd(user, arg);
+
+ case TEGRA_DC_EXT_GET_WINDOW:
+ return tegra_dc_ext_get_window(user, arg);
+ case TEGRA_DC_EXT_PUT_WINDOW:
+ return tegra_dc_ext_put_window(user, arg);
+
+ case TEGRA_DC_EXT_FLIP:
+ {
+ struct tegra_dc_ext_flip args;
+ int ret;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ ret = tegra_dc_ext_flip(user, &args);
+
+ if (copy_to_user(user_arg, &args, sizeof(args)))
+ return -EFAULT;
+
+ return ret;
+ }
+
+ case TEGRA_DC_EXT_GET_CURSOR:
+ return tegra_dc_ext_get_cursor(user);
+ case TEGRA_DC_EXT_PUT_CURSOR:
+ return tegra_dc_ext_put_cursor(user);
+ case TEGRA_DC_EXT_SET_CURSOR_IMAGE:
+ {
+ struct tegra_dc_ext_cursor_image args;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ return tegra_dc_ext_set_cursor_image(user, &args);
+ }
+ case TEGRA_DC_EXT_SET_CURSOR:
+ {
+ struct tegra_dc_ext_cursor args;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ return tegra_dc_ext_set_cursor(user, &args);
+ }
+
+ case TEGRA_DC_EXT_SET_CSC:
+ {
+ struct tegra_dc_ext_csc args;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ return tegra_dc_ext_set_csc(user, &args);
+ }
+
+ case TEGRA_DC_EXT_GET_VBLANK_SYNCPT:
+ {
+ u32 syncpt = tegra_dc_ext_get_vblank_syncpt(user);
+
+ if (copy_to_user(user_arg, &syncpt, sizeof(syncpt)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case TEGRA_DC_EXT_GET_STATUS:
+ {
+ struct tegra_dc_ext_status args;
+ int ret;
+
+ ret = tegra_dc_ext_get_status(user, &args);
+
+ if (copy_to_user(user_arg, &args, sizeof(args)))
+ return -EFAULT;
+
+ return ret;
+ }
+
+ case TEGRA_DC_EXT_SET_LUT:
+ {
+ struct tegra_dc_ext_lut args;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ return tegra_dc_ext_set_lut(user, &args);
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tegra_dc_open(struct inode *inode, struct file *filp)
+{
+ struct tegra_dc_ext_user *user;
+ struct tegra_dc_ext *ext;
+
+ user = kzalloc(sizeof(*user), GFP_KERNEL);
+ if (!user)
+ return -ENOMEM;
+
+ ext = container_of(inode->i_cdev, struct tegra_dc_ext, cdev);
+ user->ext = ext;
+
+ filp->private_data = user;
+
+ return 0;
+}
+
+static int tegra_dc_release(struct inode *inode, struct file *filp)
+{
+ struct tegra_dc_ext_user *user = filp->private_data;
+ struct tegra_dc_ext *ext = user->ext;
+ unsigned int i;
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ if (ext->win[i].user == user)
+ tegra_dc_ext_put_window(user, i);
+ }
+ if (ext->cursor.user == user)
+ tegra_dc_ext_put_cursor(user);
+
+ if (user->nvmap)
+ nvmap_client_put(user->nvmap);
+
+ kfree(user);
+
+ return 0;
+}
+
+static int tegra_dc_ext_setup_windows(struct tegra_dc_ext *ext)
+{
+ int i, ret;
+
+ for (i = 0; i < ext->dc->n_windows; i++) {
+ struct tegra_dc_ext_win *win = &ext->win[i];
+ char name[32];
+
+ win->ext = ext;
+ win->idx = i;
+
+ snprintf(name, sizeof(name), "tegradc.%d/%c",
+ ext->dc->ndev->id, 'a' + i);
+ win->flip_wq = create_singlethread_workqueue(name);
+ if (!win->flip_wq) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ mutex_init(&win->lock);
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct tegra_dc_ext_win *win = &ext->win[i];
+ destroy_workqueue(win->flip_wq);
+ }
+
+ return ret;
+}
+
+static const struct file_operations tegra_dc_devops = {
+ .owner = THIS_MODULE,
+ .open = tegra_dc_open,
+ .release = tegra_dc_release,
+ .unlocked_ioctl = tegra_dc_ioctl,
+};
+
+struct tegra_dc_ext *tegra_dc_ext_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc)
+{
+ int ret;
+ struct tegra_dc_ext *ext;
+ int devno;
+
+ ext = kzalloc(sizeof(*ext), GFP_KERNEL);
+ if (!ext)
+ return ERR_PTR(-ENOMEM);
+
+ BUG_ON(!tegra_dc_ext_devno);
+ devno = tegra_dc_ext_devno + head_count + 1;
+
+ cdev_init(&ext->cdev, &tegra_dc_devops);
+ ext->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&ext->cdev, devno, 1);
+ if (ret) {
+ dev_err(&ndev->dev, "Failed to create character device\n");
+ goto cleanup_alloc;
+ }
+
+ ext->dev = device_create(tegra_dc_ext_class,
+ &ndev->dev,
+ devno,
+ NULL,
+ "tegra_dc_%d",
+ ndev->id);
+
+ if (IS_ERR(ext->dev)) {
+ ret = PTR_ERR(ext->dev);
+ goto cleanup_cdev;
+ }
+
+ ext->dc = dc;
+
+ ext->nvmap = nvmap_create_client(nvmap_dev, "tegra_dc_ext");
+ if (!ext->nvmap) {
+ ret = -ENOMEM;
+ goto cleanup_device;
+ }
+
+ ret = tegra_dc_ext_setup_windows(ext);
+ if (ret)
+ goto cleanup_nvmap;
+
+ mutex_init(&ext->cursor.lock);
+
+ head_count++;
+
+ return ext;
+
+cleanup_nvmap:
+ nvmap_client_put(ext->nvmap);
+
+cleanup_device:
+ device_del(ext->dev);
+
+cleanup_cdev:
+ cdev_del(&ext->cdev);
+
+cleanup_alloc:
+ kfree(ext);
+
+ return ERR_PTR(ret);
+}
+
+void tegra_dc_ext_unregister(struct tegra_dc_ext *ext)
+{
+ int i;
+
+ for (i = 0; i < ext->dc->n_windows; i++) {
+ struct tegra_dc_ext_win *win = &ext->win[i];
+
+ flush_workqueue(win->flip_wq);
+ destroy_workqueue(win->flip_wq);
+ }
+
+ nvmap_client_put(ext->nvmap);
+ device_del(ext->dev);
+ cdev_del(&ext->cdev);
+
+ kfree(ext);
+
+ head_count--;
+}
+
+int __init tegra_dc_ext_module_init(void)
+{
+ int ret;
+
+ tegra_dc_ext_class = class_create(THIS_MODULE, "tegra_dc_ext");
+ if (!tegra_dc_ext_class) {
+ printk(KERN_ERR "tegra_dc_ext: failed to create class\n");
+ return -ENOMEM;
+ }
+
+ /* Reserve one character device per head, plus the control device */
+ ret = alloc_chrdev_region(&tegra_dc_ext_devno,
+ 0, TEGRA_MAX_DC + 1,
+ "tegra_dc_ext");
+ if (ret)
+ goto cleanup_class;
+
+ ret = tegra_dc_ext_control_init();
+ if (ret)
+ goto cleanup_region;
+
+ return 0;
+
+cleanup_region:
+ unregister_chrdev_region(tegra_dc_ext_devno, TEGRA_MAX_DC);
+
+cleanup_class:
+ class_destroy(tegra_dc_ext_class);
+
+ return ret;
+}
+
+void __exit tegra_dc_ext_module_exit(void)
+{
+ unregister_chrdev_region(tegra_dc_ext_devno, TEGRA_MAX_DC);
+ class_destroy(tegra_dc_ext_class);
+}
diff --git a/drivers/video/tegra/dc/ext/events.c b/drivers/video/tegra/dc/ext/events.c
new file mode 100644
index 000000000000..150a1501fced
--- /dev/null
+++ b/drivers/video/tegra/dc/ext/events.c
@@ -0,0 +1,197 @@
+/*
+ * drivers/video/tegra/dc/ext/events.c
+ *
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "tegra_dc_ext_priv.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(event_wait);
+
+unsigned int tegra_dc_ext_event_poll(struct file *filp, poll_table *wait)
+{
+ struct tegra_dc_ext_control_user *user = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &event_wait, wait);
+
+ if (atomic_read(&user->num_events))
+ mask |= POLLIN;
+
+ return mask;
+}
+
+static int get_next_event(struct tegra_dc_ext_control_user *user,
+ struct tegra_dc_ext_event_list *event,
+ bool block)
+{
+ struct list_head *list = &user->event_list;
+ struct tegra_dc_ext_event_list *next_event;
+ int ret;
+
+ if (block) {
+ ret = wait_event_interruptible(event_wait,
+ atomic_read(&user->num_events));
+
+ if (unlikely(ret)) {
+ if (ret == -ERESTARTSYS)
+ return -EAGAIN;
+ return ret;
+ }
+ } else {
+ if (!atomic_read(&user->num_events))
+ return 0;
+ }
+
+ mutex_lock(&user->lock);
+
+ BUG_ON(list_empty(list));
+ next_event = list_first_entry(list, struct tegra_dc_ext_event_list,
+ list);
+ *event = *next_event;
+ list_del(&next_event->list);
+ kfree(next_event);
+
+ atomic_dec(&user->num_events);
+
+ mutex_unlock(&user->lock);
+
+ return 1;
+}
+
+ssize_t tegra_dc_ext_event_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct tegra_dc_ext_control_user *user = filp->private_data;
+ struct tegra_dc_ext_event_list event_elem;
+ struct tegra_dc_ext_event *event = &event_elem.event;
+ ssize_t retval = 0, to_copy, event_size, pending;
+ loff_t previously_copied = 0;
+ char *to_copy_ptr;
+
+ if (size == 0)
+ return 0;
+
+ if (user->partial_copy) {
+ /*
+ * We didn't transfer the entire event last time, need to
+ * finish it up
+ */
+ event_elem = user->event_to_copy;
+ previously_copied = user->partial_copy;
+ } else {
+ /* Get the next event, if any */
+ pending = get_next_event(user, &event_elem,
+ !(filp->f_flags & O_NONBLOCK));
+ if (pending <= 0)
+ return pending;
+ }
+
+ /* Write the event to the user */
+ event_size = sizeof(*event) + event->data_size;
+ BUG_ON(event_size <= previously_copied);
+ event_size -= previously_copied;
+
+ to_copy_ptr = (char *)event + previously_copied;
+ to_copy = min_t(ssize_t, size, event_size);
+ if (copy_to_user(buf, to_copy_ptr, to_copy)) {
+ retval = -EFAULT;
+ to_copy = 0;
+ }
+
+ /* Note that we currently only deliver one event at a time */
+
+ if (event_size > to_copy) {
+ /*
+ * We were only able to copy part of this event. Stash it for
+ * next time.
+ */
+ user->event_to_copy = event_elem;
+ user->partial_copy = previously_copied + to_copy;
+ } else {
+ user->partial_copy = 0;
+ }
+
+ return to_copy ? to_copy : retval;
+}
+
+static int tegra_dc_ext_queue_event(struct tegra_dc_ext_control *control,
+ struct tegra_dc_ext_event *event)
+{
+ struct list_head *cur;
+ int retval = 0;
+
+ mutex_lock(&control->lock);
+ list_for_each(cur, &control->users) {
+ struct tegra_dc_ext_control_user *user;
+ struct tegra_dc_ext_event_list *ev_list;
+
+ user = container_of(cur, struct tegra_dc_ext_control_user,
+ list);
+ mutex_lock(&user->lock);
+
+ if (!(user->event_mask & event->type)) {
+ mutex_unlock(&user->lock);
+ continue;
+ }
+
+ ev_list = kmalloc(sizeof(*ev_list), GFP_KERNEL);
+ if (!ev_list) {
+ retval = -ENOMEM;
+ mutex_unlock(&user->lock);
+ continue;
+ }
+
+ memcpy(&ev_list->event, event,
+ sizeof(*event) + event->data_size);
+
+ list_add_tail(&ev_list->list, &user->event_list);
+
+ atomic_inc(&user->num_events);
+
+ mutex_unlock(&user->lock);
+ }
+ mutex_unlock(&control->lock);
+
+ /* Is it worth it to track waiters with more granularity? */
+ wake_up(&event_wait);
+
+ return retval;
+}
+
+int tegra_dc_ext_queue_hotplug(struct tegra_dc_ext_control *control, int output)
+{
+ struct {
+ struct tegra_dc_ext_event event;
+ struct tegra_dc_ext_control_event_hotplug hotplug;
+ } __packed pack;
+
+ pack.event.type = TEGRA_DC_EXT_EVENT_HOTPLUG;
+ pack.event.data_size = sizeof(pack.hotplug);
+
+ pack.hotplug.handle = output;
+
+ tegra_dc_ext_queue_event(control, &pack.event);
+
+ return 0;
+}
diff --git a/drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h b/drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h
new file mode 100644
index 000000000000..54a10b2c8682
--- /dev/null
+++ b/drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h
@@ -0,0 +1,142 @@
+/*
+ * drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h
+ *
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_DC_EXT_PRIV_H
+#define __TEGRA_DC_EXT_PRIV_H
+
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <mach/dc.h>
+#include <mach/nvmap.h>
+
+#include <video/tegra_dc_ext.h>
+
+struct tegra_dc_ext;
+
+struct tegra_dc_ext_user {
+ struct tegra_dc_ext *ext;
+ struct nvmap_client *nvmap;
+};
+
+enum {
+ TEGRA_DC_Y,
+ TEGRA_DC_U,
+ TEGRA_DC_V,
+ TEGRA_DC_NUM_PLANES,
+};
+
+struct tegra_dc_ext_win {
+ struct tegra_dc_ext *ext;
+
+ int idx;
+
+ struct tegra_dc_ext_user *user;
+
+ struct mutex lock;
+
+ /* Current nvmap handle (if any) for Y, U, V planes */
+ struct nvmap_handle_ref *cur_handle[TEGRA_DC_NUM_PLANES];
+
+ struct workqueue_struct *flip_wq;
+};
+
+struct tegra_dc_ext {
+ struct tegra_dc *dc;
+
+ struct cdev cdev;
+ struct device *dev;
+
+ struct nvmap_client *nvmap;
+
+ struct tegra_dc_ext_win win[DC_N_WINDOWS];
+
+ struct {
+ struct tegra_dc_ext_user *user;
+ struct nvmap_handle_ref *cur_handle;
+ struct mutex lock;
+ } cursor;
+
+ bool enabled;
+};
+
+#define TEGRA_DC_EXT_EVENT_MASK_ALL \
+ TEGRA_DC_EXT_EVENT_HOTPLUG
+
+#define TEGRA_DC_EXT_EVENT_MAX_SZ 8
+
+struct tegra_dc_ext_event_list {
+ struct tegra_dc_ext_event event;
+ /* The data field _must_ follow the event field. */
+ char data[TEGRA_DC_EXT_EVENT_MAX_SZ];
+
+ struct list_head list;
+};
+
+struct tegra_dc_ext_control_user {
+ struct tegra_dc_ext_control *control;
+
+ struct list_head event_list;
+ atomic_t num_events;
+
+ u32 event_mask;
+
+ struct tegra_dc_ext_event_list event_to_copy;
+ loff_t partial_copy;
+
+ struct mutex lock;
+
+ struct list_head list;
+};
+
+struct tegra_dc_ext_control {
+ struct cdev cdev;
+ struct device *dev;
+
+ struct list_head users;
+
+ struct mutex lock;
+};
+
+extern int tegra_dc_ext_devno;
+extern struct class *tegra_dc_ext_class;
+
+extern int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 id,
+ struct nvmap_handle_ref **handle,
+ dma_addr_t *phys_addr);
+
+extern int tegra_dc_ext_get_cursor(struct tegra_dc_ext_user *user);
+extern int tegra_dc_ext_put_cursor(struct tegra_dc_ext_user *user);
+extern int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_cursor_image *);
+extern int tegra_dc_ext_set_cursor(struct tegra_dc_ext_user *user,
+ struct tegra_dc_ext_cursor *);
+
+extern int tegra_dc_ext_control_init(void);
+
+extern int tegra_dc_ext_queue_hotplug(struct tegra_dc_ext_control *,
+ int output);
+extern ssize_t tegra_dc_ext_event_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *ppos);
+extern unsigned int tegra_dc_ext_event_poll(struct file *, poll_table *);
+
+extern int tegra_dc_ext_get_num_outputs(void);
+
+#endif /* __TEGRA_DC_EXT_PRIV_H */
diff --git a/drivers/video/tegra/dc/ext/util.c b/drivers/video/tegra/dc/ext/util.c
new file mode 100644
index 000000000000..747085579f15
--- /dev/null
+++ b/drivers/video/tegra/dc/ext/util.c
@@ -0,0 +1,78 @@
+/*
+ * drivers/video/tegra/dc/ext/util.c
+ *
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+#include <mach/dc.h>
+#include <mach/nvmap.h>
+
+/* ugh */
+#include "../../nvmap/nvmap.h"
+
+#include "tegra_dc_ext_priv.h"
+
+int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 id,
+ struct nvmap_handle_ref **handle,
+ dma_addr_t *phys_addr)
+{
+ struct tegra_dc_ext *ext = user->ext;
+ struct nvmap_handle_ref *win_dup;
+ struct nvmap_handle *win_handle;
+ dma_addr_t phys;
+
+ if (!id) {
+ *handle = NULL;
+ *phys_addr = -1;
+
+ return 0;
+ }
+
+ /*
+ * Take a reference to the buffer using the user's nvmap context, to
+ * make sure they have permissions to access it.
+ */
+ win_handle = nvmap_get_handle_id(user->nvmap, id);
+ if (!win_handle)
+ return -EACCES;
+
+ /*
+ * Duplicate the buffer's handle into the dc_ext driver's nvmap
+ * context, to ensure that the handle won't be freed as long as it is
+ * in use by display.
+ */
+ win_dup = nvmap_duplicate_handle_id(ext->nvmap, id);
+
+ /* Release the reference we took in the user's context above */
+ nvmap_handle_put(win_handle);
+
+ if (IS_ERR(win_dup))
+ return PTR_ERR(win_dup);
+
+ phys = nvmap_pin(ext->nvmap, win_dup);
+ /* XXX this isn't correct for non-pointers... */
+ if (IS_ERR((void *)phys)) {
+ nvmap_free(ext->nvmap, win_dup);
+ return PTR_ERR((void *)phys);
+ }
+
+ *phys_addr = phys;
+ *handle = win_dup;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/dc/hdmi.c b/drivers/video/tegra/dc/hdmi.c
new file mode 100644
index 000000000000..413a3df8c39b
--- /dev/null
+++ b/drivers/video/tegra/dc/hdmi.c
@@ -0,0 +1,1838 @@
+/*
+ * drivers/video/tegra/dc/hdmi.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+
+#include <mach/clk.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <linux/nvhost.h>
+#include <mach/hdmi-audio.h>
+
+#include <video/tegrafb.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "hdmi_reg.h"
+#include "hdmi.h"
+#include "edid.h"
+#include "nvhdcp.h"
+
+/* datasheet claims this will always be 216MHz */
+#define HDMI_AUDIOCLK_FREQ 216000000
+
+#define HDMI_REKEY_DEFAULT 56
+
+#define HDMI_ELD_RESERVED1_INDEX 1
+#define HDMI_ELD_RESERVED2_INDEX 3
+#define HDMI_ELD_VER_INDEX 0
+#define HDMI_ELD_BASELINE_LEN_INDEX 2
+#define HDMI_ELD_CEA_VER_MNL_INDEX 4
+#define HDMI_ELD_SAD_CNT_CON_TYP_SAI_HDCP_INDEX 5
+#define HDMI_ELD_AUD_SYNC_DELAY_INDEX 6
+#define HDMI_ELD_SPK_ALLOC_INDEX 7
+#define HDMI_ELD_PORT_ID_INDEX 8
+#define HDMI_ELD_MANF_NAME_INDEX 16
+#define HDMI_ELD_PRODUCT_CODE_INDEX 18
+#define HDMI_ELD_MONITOR_NAME_INDEX 20
+
+struct tegra_dc_hdmi_data {
+ struct tegra_dc *dc;
+ struct tegra_edid *edid;
+ struct tegra_edid_hdmi_eld eld;
+ struct tegra_nvhdcp *nvhdcp;
+ struct delayed_work work;
+
+ struct resource *base_res;
+ void __iomem *base;
+ struct clk *clk;
+
+ struct clk *disp1_clk;
+ struct clk *disp2_clk;
+ struct clk *hda_clk;
+ struct clk *hda2codec_clk;
+ struct clk *hda2hdmi_clk;
+
+#ifdef CONFIG_SWITCH
+ struct switch_dev hpd_switch;
+#endif
+
+ spinlock_t suspend_lock;
+ bool suspended;
+ bool eld_retrieved;
+ bool clk_enabled;
+ unsigned audio_freq;
+ unsigned audio_source;
+
+ bool dvi;
+};
+
+struct tegra_dc_hdmi_data *dc_hdmi;
+
+const struct fb_videomode tegra_dc_hdmi_supported_modes[] = {
+ /* 1280x720p 60hz: EIA/CEA-861-B Format 4 */
+ {
+ .xres = 1280,
+ .yres = 720,
+ .pixclock = KHZ2PICOS(74250),
+ .hsync_len = 40, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 220, /* h_back_porch */
+ .upper_margin = 20, /* v_back_porch */
+ .right_margin = 110, /* h_front_porch */
+ .lower_margin = 5, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+
+ /* 1280x720p 60hz: EIA/CEA-861-B Format 4 (Stereo)*/
+ {
+ .xres = 1280,
+ .yres = 720,
+ .pixclock = KHZ2PICOS(74250),
+ .hsync_len = 40, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 220, /* h_back_porch */
+ .upper_margin = 20, /* v_back_porch */
+ .right_margin = 110, /* h_front_porch */
+ .lower_margin = 5, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED |
+ FB_VMODE_STEREO_FRAME_PACK,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+
+ /* 720x480p 59.94hz: EIA/CEA-861-B Formats 2 & 3 */
+ {
+ .xres = 720,
+ .yres = 480,
+ .pixclock = KHZ2PICOS(27000),
+ .hsync_len = 62, /* h_sync_width */
+ .vsync_len = 6, /* v_sync_width */
+ .left_margin = 60, /* h_back_porch */
+ .upper_margin = 30, /* v_back_porch */
+ .right_margin = 16, /* h_front_porch */
+ .lower_margin = 9, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = 0,
+ },
+
+ /* 640x480p 60hz: EIA/CEA-861-B Format 1 */
+ {
+ .xres = 640,
+ .yres = 480,
+ .pixclock = KHZ2PICOS(25200),
+ .hsync_len = 96, /* h_sync_width */
+ .vsync_len = 2, /* v_sync_width */
+ .left_margin = 48, /* h_back_porch */
+ .upper_margin = 33, /* v_back_porch */
+ .right_margin = 16, /* h_front_porch */
+ .lower_margin = 10, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = 0,
+ },
+
+ /* 720x576p 50hz EIA/CEA-861-B Formats 17 & 18 */
+ {
+ .xres = 720,
+ .yres = 576,
+ .pixclock = KHZ2PICOS(27000),
+ .hsync_len = 64, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 68, /* h_back_porch */
+ .upper_margin = 39, /* v_back_porch */
+ .right_margin = 12, /* h_front_porch */
+ .lower_margin = 5, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = 0,
+ },
+
+ /* 1920x1080p 23.98/24hz: EIA/CEA-861-B Format 32 (Stereo)*/
+ {
+ .xres = 1920,
+ .yres = 1080,
+ .pixclock = KHZ2PICOS(74250),
+ .hsync_len = 44, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 148, /* h_back_porch */
+ .upper_margin = 36, /* v_back_porch */
+ .right_margin = 638, /* h_front_porch */
+ .lower_margin = 4, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED |
+ FB_VMODE_STEREO_FRAME_PACK,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+
+ /* 1920x1080p 30Hz EIA/CEA-861-B Format 34 */
+ {
+ .xres = 1920,
+ .yres = 1080,
+ .pixclock = KHZ2PICOS(74250),
+ .hsync_len = 44, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 148, /* h_back_porch */
+ .upper_margin = 36, /* v_back_porch */
+ .right_margin = 88, /* h_front_porch */
+ .lower_margin = 4, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+ /* 1920x1080p 59.94/60hz EIA/CEA-861-B Format 16 */
+ {
+ .xres = 1920,
+ .yres = 1080,
+ .pixclock = KHZ2PICOS(148500),
+ .hsync_len = 44, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 148, /* h_back_porch */
+ .upper_margin = 36, /* v_back_porch */
+ .right_margin = 88, /* h_front_porch */
+ .lower_margin = 4, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+};
+
+/* table of electrical settings, must be in acending order. */
+struct tdms_config {
+ int pclk;
+ u32 pll0;
+ u32 pll1;
+ u32 pe_current; /* pre-emphasis */
+ u32 drive_current;
+};
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+const struct tdms_config tdms_config[] = {
+ { /* 480p modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+ SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ },
+ { /* 720p modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+ SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ },
+ { /* 1080p modes */
+ .pclk = INT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+ SOR_PLL_VCOCAP(3) | SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ },
+};
+#else /* CONFIG_ARCH_TEGRA_2x_SOC */
+const struct tdms_config tdms_config[] = {
+ { /* 480p modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+ SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+ { /* 720p modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+ SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+ PE_CURRENT1(PE_CURRENT_6_0_mA) |
+ PE_CURRENT2(PE_CURRENT_6_0_mA) |
+ PE_CURRENT3(PE_CURRENT_6_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+ { /* 1080p modes */
+ .pclk = INT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+ SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+ PE_CURRENT1(PE_CURRENT_6_0_mA) |
+ PE_CURRENT2(PE_CURRENT_6_0_mA) |
+ PE_CURRENT3(PE_CURRENT_6_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+};
+#endif
+
+struct tegra_hdmi_audio_config {
+ unsigned pix_clock;
+ unsigned n;
+ unsigned cts;
+ unsigned aval;
+};
+
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+ {25200000, 4096, 25200, 24000},
+ {27000000, 4096, 27000, 24000},
+ {74250000, 4096, 74250, 24000},
+ {148500000, 4096, 148500, 24000},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+ {25200000, 5880, 26250, 25000},
+ {27000000, 5880, 28125, 25000},
+ {74250000, 4704, 61875, 20000},
+ {148500000, 4704, 123750, 20000},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+ {25200000, 6144, 25200, 24000},
+ {27000000, 6144, 27000, 24000},
+ {74250000, 6144, 74250, 24000},
+ {148500000, 6144, 148500, 24000},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
+ {25200000, 11760, 26250, 25000},
+ {27000000, 11760, 28125, 25000},
+ {74250000, 9408, 61875, 20000},
+ {148500000, 9408, 123750, 20000},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
+ {25200000, 12288, 25200, 24000},
+ {27000000, 12288, 27000, 24000},
+ {74250000, 12288, 74250, 24000},
+ {148500000, 12288, 148500, 24000},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
+ {25200000, 23520, 26250, 25000},
+ {27000000, 23520, 28125, 25000},
+ {74250000, 18816, 61875, 20000},
+ {148500000, 18816, 123750, 20000},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
+ {25200000, 24576, 25200, 24000},
+ {27000000, 24576, 27000, 24000},
+ {74250000, 24576, 74250, 24000},
+ {148500000, 24576, 148500, 24000},
+ {0, 0, 0},
+};
+
+static const struct tegra_hdmi_audio_config
+*tegra_hdmi_get_audio_config(unsigned audio_freq, unsigned pix_clock)
+{
+ const struct tegra_hdmi_audio_config *table;
+
+ switch (audio_freq) {
+ case AUDIO_FREQ_32K:
+ table = tegra_hdmi_audio_32k;
+ break;
+ case AUDIO_FREQ_44_1K:
+ table = tegra_hdmi_audio_44_1k;
+ break;
+ case AUDIO_FREQ_48K:
+ table = tegra_hdmi_audio_48k;
+ break;
+ case AUDIO_FREQ_88_2K:
+ table = tegra_hdmi_audio_88_2k;
+ break;
+ case AUDIO_FREQ_96K:
+ table = tegra_hdmi_audio_96k;
+ break;
+ case AUDIO_FREQ_176_4K:
+ table = tegra_hdmi_audio_176_4k;
+ break;
+ case AUDIO_FREQ_192K:
+ table = tegra_hdmi_audio_192k;
+ break;
+ default:
+ return NULL;
+ }
+
+ while (table->pix_clock) {
+ if (table->pix_clock == pix_clock)
+ return table;
+ table++;
+ }
+
+ return NULL;
+}
+
+
+unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long reg)
+{
+ return readl(hdmi->base + reg * 4);
+}
+
+void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, hdmi->base + reg * 4);
+}
+
+static inline void tegra_hdmi_clrsetbits(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long reg, unsigned long clr,
+ unsigned long set)
+{
+ unsigned long val = tegra_hdmi_readl(hdmi, reg);
+ val &= ~clr;
+ val |= set;
+ tegra_hdmi_writel(hdmi, val, reg);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dbg_hdmi_show(struct seq_file *s, void *unused)
+{
+ struct tegra_dc_hdmi_data *hdmi = s->private;
+
+#define DUMP_REG(a) do { \
+ seq_printf(s, "%-32s\t%03x\t%08lx\n", \
+ #a, a, tegra_hdmi_readl(hdmi, a)); \
+ } while (0)
+
+ tegra_dc_io_start(hdmi->dc);
+ clk_enable(hdmi->clk);
+
+ DUMP_REG(HDMI_CTXSW);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST3);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST4);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST5);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST6);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST7);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST8);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST9);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTC);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTD);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTE);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTF);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+ DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+ DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+ DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+#undef DUMP_REG
+
+ clk_disable(hdmi->clk);
+ tegra_dc_io_end(hdmi->dc);
+
+ return 0;
+}
+
+static int dbg_hdmi_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_hdmi_show, inode->i_private);
+}
+
+static const struct file_operations dbg_fops = {
+ .open = dbg_hdmi_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *hdmidir;
+
+static void tegra_dc_hdmi_debug_create(struct tegra_dc_hdmi_data *hdmi)
+{
+ struct dentry *retval;
+
+ hdmidir = debugfs_create_dir("tegra_hdmi", NULL);
+ if (!hdmidir)
+ return;
+ retval = debugfs_create_file("regs", S_IRUGO, hdmidir, hdmi,
+ &dbg_fops);
+ if (!retval)
+ goto free_out;
+ return;
+free_out:
+ debugfs_remove_recursive(hdmidir);
+ hdmidir = NULL;
+ return;
+}
+#else
+static inline void tegra_dc_hdmi_debug_create(struct tegra_dc_hdmi_data *hdmi)
+{ }
+#endif
+
+#define PIXCLOCK_TOLERANCE 200
+
+static int tegra_dc_calc_clock_per_frame(const struct fb_videomode *mode)
+{
+ return (mode->left_margin + mode->xres +
+ mode->right_margin + mode->hsync_len) *
+ (mode->upper_margin + mode->yres +
+ mode->lower_margin + mode->vsync_len);
+}
+static bool tegra_dc_hdmi_mode_equal(const struct fb_videomode *mode1,
+ const struct fb_videomode *mode2)
+{
+ int clock_per_frame = tegra_dc_calc_clock_per_frame(mode1);
+
+ /* allows up to 1Hz of pixclock difference */
+ if (mode1->pixclock != mode2->pixclock) {
+ return (mode1->xres == mode2->xres &&
+ mode1->yres == mode2->yres &&
+ mode1->vmode == mode2->vmode &&
+ (abs(PICOS2KHZ(mode1->pixclock) -
+ PICOS2KHZ(mode2->pixclock)) *
+ 1000 / clock_per_frame <= 1));
+ } else {
+ return (mode1->xres == mode2->xres &&
+ mode1->yres == mode2->yres &&
+ mode1->vmode == mode2->vmode);
+ }
+}
+
+static bool tegra_dc_hdmi_valid_pixclock(const struct tegra_dc *dc,
+ const struct fb_videomode *mode)
+{
+ unsigned max_pixclock = tegra_dc_get_out_max_pixclock(dc);
+ if (max_pixclock) {
+ /* this might look counter-intuitive,
+ * but pixclock's unit is picos(not Khz)
+ */
+ return mode->pixclock >= max_pixclock;
+ } else {
+ return true;
+ }
+}
+
+static bool tegra_dc_hdmi_mode_filter(const struct tegra_dc *dc,
+ struct fb_videomode *mode)
+{
+ int i;
+ int clock_per_frame;
+
+ if (!mode->pixclock)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_dc_hdmi_supported_modes); i++) {
+ const struct fb_videomode *supported_mode
+ = &tegra_dc_hdmi_supported_modes[i];
+ if (tegra_dc_hdmi_mode_equal(supported_mode, mode) &&
+ tegra_dc_hdmi_valid_pixclock(dc, supported_mode)) {
+ memcpy(mode, supported_mode, sizeof(*mode));
+ mode->flag = FB_MODE_IS_DETAILED;
+ clock_per_frame = tegra_dc_calc_clock_per_frame(mode);
+ mode->refresh = (PICOS2KHZ(mode->pixclock) * 1000)
+ / clock_per_frame;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+static bool tegra_dc_hdmi_hpd(struct tegra_dc *dc)
+{
+ int sense;
+ int level;
+
+ level = gpio_get_value(dc->out->hotplug_gpio);
+
+ sense = dc->out->flags & TEGRA_DC_OUT_HOTPLUG_MASK;
+
+ return (sense == TEGRA_DC_OUT_HOTPLUG_HIGH && level) ||
+ (sense == TEGRA_DC_OUT_HOTPLUG_LOW && !level);
+}
+
+
+void tegra_dc_hdmi_detect_config(struct tegra_dc *dc,
+ struct fb_monspecs *specs)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ /* monitors like to lie about these but they are still useful for
+ * detecting aspect ratios
+ */
+ dc->out->h_size = specs->max_x * 1000;
+ dc->out->v_size = specs->max_y * 1000;
+
+ hdmi->dvi = !(specs->misc & FB_MISC_HDMI);
+
+ tegra_fb_update_monspecs(dc->fb, specs, tegra_dc_hdmi_mode_filter);
+#ifdef CONFIG_SWITCH
+ hdmi->hpd_switch.state = 0;
+ switch_set_state(&hdmi->hpd_switch, 1);
+#endif
+ dev_info(&dc->ndev->dev, "display detected\n");
+
+ dc->connected = true;
+ tegra_dc_ext_process_hotplug(dc->ndev->id);
+}
+
+/* This function is used to enable DC1 and HDMI for the purpose of testing. */
+bool tegra_dc_hdmi_detect_test(struct tegra_dc *dc, unsigned char *edid_ptr)
+{
+ int err;
+ struct fb_monspecs specs;
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ if (!dc || !hdmi || !edid_ptr) {
+ dev_err(&dc->ndev->dev, "HDMI test failed to get arguments.\n");
+ return false;
+ }
+
+ err = tegra_edid_get_monspecs_test(hdmi->edid, &specs, edid_ptr);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "error reading edid\n");
+ goto fail;
+ }
+
+ err = tegra_edid_get_eld(hdmi->edid, &hdmi->eld);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "error populating eld\n");
+ goto fail;
+ }
+ hdmi->eld_retrieved = true;
+
+ tegra_dc_hdmi_detect_config(dc, &specs);
+
+ return true;
+
+fail:
+ hdmi->eld_retrieved = false;
+#ifdef CONFIG_SWITCH
+ switch_set_state(&hdmi->hpd_switch, 0);
+#endif
+ tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0);
+ return false;
+}
+EXPORT_SYMBOL(tegra_dc_hdmi_detect_test);
+
+static bool tegra_dc_hdmi_detect(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct fb_monspecs specs;
+ int err;
+
+ if (!tegra_dc_hdmi_hpd(dc))
+ goto fail;
+
+ err = tegra_edid_get_monspecs(hdmi->edid, &specs);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "error reading edid\n");
+ goto fail;
+ }
+
+ err = tegra_edid_get_eld(hdmi->edid, &hdmi->eld);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "error populating eld\n");
+ goto fail;
+ }
+ hdmi->eld_retrieved = true;
+
+ tegra_dc_hdmi_detect_config(dc, &specs);
+
+ return true;
+
+fail:
+ hdmi->eld_retrieved = false;
+#ifdef CONFIG_SWITCH
+ switch_set_state(&hdmi->hpd_switch, 0);
+#endif
+ tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0);
+ return false;
+}
+
+
+static void tegra_dc_hdmi_detect_worker(struct work_struct *work)
+{
+ struct tegra_dc_hdmi_data *hdmi =
+ container_of(to_delayed_work(work), struct tegra_dc_hdmi_data, work);
+ struct tegra_dc *dc = hdmi->dc;
+
+ tegra_dc_enable(dc);
+ msleep(5);
+ if (!tegra_dc_hdmi_detect(dc)) {
+ tegra_dc_disable(dc);
+ tegra_fb_update_monspecs(dc->fb, NULL, NULL);
+
+ dc->connected = false;
+ tegra_dc_ext_process_hotplug(dc->ndev->id);
+ }
+}
+
+static irqreturn_t tegra_dc_hdmi_irq(int irq, void *ptr)
+{
+ struct tegra_dc *dc = ptr;
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ if (!hdmi->suspended) {
+ __cancel_delayed_work(&hdmi->work);
+ if (tegra_dc_hdmi_hpd(dc))
+ queue_delayed_work(system_nrt_wq, &hdmi->work,
+ msecs_to_jiffies(100));
+ else
+ queue_delayed_work(system_nrt_wq, &hdmi->work,
+ msecs_to_jiffies(30));
+ }
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_dc_hdmi_suspend(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ tegra_nvhdcp_suspend(hdmi->nvhdcp);
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ hdmi->suspended = true;
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+}
+
+static void tegra_dc_hdmi_resume(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ hdmi->suspended = false;
+
+ if (tegra_dc_hdmi_hpd(dc))
+ queue_delayed_work(system_nrt_wq, &hdmi->work,
+ msecs_to_jiffies(100));
+ else
+ queue_delayed_work(system_nrt_wq, &hdmi->work,
+ msecs_to_jiffies(30));
+
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+ tegra_nvhdcp_resume(hdmi->nvhdcp);
+}
+
+static ssize_t underscan_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef CONFIG_SWITCH
+ struct tegra_dc_hdmi_data *hdmi =
+ container_of(dev_get_drvdata(dev), struct tegra_dc_hdmi_data, hpd_switch);
+
+ if (hdmi->edid)
+ return sprintf(buf, "%d\n", tegra_edid_underscan_supported(hdmi->edid));
+ else
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+static DEVICE_ATTR(underscan, S_IRUGO | S_IWUSR, underscan_show, NULL);
+
+static int tegra_dc_hdmi_init(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi;
+ struct resource *res;
+ struct resource *base_res;
+ int ret;
+ void __iomem *base;
+ struct clk *clk = NULL;
+ struct clk *disp1_clk = NULL;
+ struct clk *disp2_clk = NULL;
+ int err;
+
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM, "hdmi_regs");
+ if (!res) {
+ dev_err(&dc->ndev->dev, "hdmi: no mem resource\n");
+ err = -ENOENT;
+ goto err_free_hdmi;
+ }
+
+ base_res = request_mem_region(res->start, resource_size(res), dc->ndev->name);
+ if (!base_res) {
+ dev_err(&dc->ndev->dev, "hdmi: request_mem_region failed\n");
+ err = -EBUSY;
+ goto err_free_hdmi;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&dc->ndev->dev, "hdmi: registers can't be mapped\n");
+ err = -EBUSY;
+ goto err_release_resource_reg;
+ }
+
+ clk = clk_get(&dc->ndev->dev, "hdmi");
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't get clock\n");
+ err = -ENOENT;
+ goto err_iounmap_reg;
+ }
+
+ disp1_clk = clk_get_sys("tegradc.0", NULL);
+ if (IS_ERR_OR_NULL(disp1_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't disp1 clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+
+ disp2_clk = clk_get_sys("tegradc.1", NULL);
+ if (IS_ERR_OR_NULL(disp2_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't disp2 clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ hdmi->hda_clk = clk_get_sys("tegra30-hda", "hda");
+ if (IS_ERR_OR_NULL(hdmi->hda_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't get hda clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+
+ hdmi->hda2codec_clk = clk_get_sys("tegra30-hda", "hda2codec");
+ if (IS_ERR_OR_NULL(hdmi->hda2codec_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't get hda2codec clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+
+ hdmi->hda2hdmi_clk = clk_get_sys("tegra30-hda", "hda2hdmi");
+ if (IS_ERR_OR_NULL(hdmi->hda2hdmi_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't get hda2hdmi clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+#endif
+
+ /* TODO: support non-hotplug */
+ if (request_irq(gpio_to_irq(dc->out->hotplug_gpio), tegra_dc_hdmi_irq,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(&dc->ndev->dev), dc)) {
+ dev_err(&dc->ndev->dev, "hdmi: request_irq %d failed\n",
+ gpio_to_irq(dc->out->hotplug_gpio));
+ err = -EBUSY;
+ goto err_put_clock;
+ }
+
+ hdmi->edid = tegra_edid_create(dc->out->dcc_bus);
+ if (IS_ERR_OR_NULL(hdmi->edid)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't create edid\n");
+ err = PTR_ERR(hdmi->edid);
+ goto err_free_irq;
+ }
+
+#ifdef CONFIG_TEGRA_NVHDCP
+ hdmi->nvhdcp = tegra_nvhdcp_create(hdmi, dc->ndev->id,
+ dc->out->dcc_bus);
+ if (IS_ERR_OR_NULL(hdmi->nvhdcp)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't create nvhdcp\n");
+ err = PTR_ERR(hdmi->nvhdcp);
+ goto err_edid_destroy;
+ }
+#else
+ hdmi->nvhdcp = NULL;
+#endif
+
+ INIT_DELAYED_WORK(&hdmi->work, tegra_dc_hdmi_detect_worker);
+
+ hdmi->dc = dc;
+ hdmi->base = base;
+ hdmi->base_res = base_res;
+ hdmi->clk = clk;
+ hdmi->disp1_clk = disp1_clk;
+ hdmi->disp2_clk = disp2_clk;
+ hdmi->suspended = false;
+ hdmi->eld_retrieved= false;
+ hdmi->clk_enabled = false;
+ hdmi->audio_freq = 44100;
+ hdmi->audio_source = AUTO;
+ spin_lock_init(&hdmi->suspend_lock);
+
+#ifdef CONFIG_SWITCH
+ hdmi->hpd_switch.name = "hdmi";
+ ret = switch_dev_register(&hdmi->hpd_switch);
+
+ if (!ret)
+ device_create_file(hdmi->hpd_switch.dev, &dev_attr_underscan);
+#endif
+
+ dc->out->depth = 24;
+
+ tegra_dc_set_outdata(dc, hdmi);
+
+ dc_hdmi = hdmi;
+ /* boards can select default content protection policy */
+ if (dc->out->flags & TEGRA_DC_OUT_NVHDCP_POLICY_ON_DEMAND) {
+ tegra_nvhdcp_set_policy(hdmi->nvhdcp,
+ TEGRA_NVHDCP_POLICY_ON_DEMAND);
+ } else {
+ tegra_nvhdcp_set_policy(hdmi->nvhdcp,
+ TEGRA_NVHDCP_POLICY_ALWAYS_ON);
+ }
+
+ tegra_dc_hdmi_debug_create(hdmi);
+
+ return 0;
+
+err_edid_destroy:
+ tegra_edid_destroy(hdmi->edid);
+err_free_irq:
+ free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc);
+err_put_clock:
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ if (!IS_ERR_OR_NULL(hdmi->hda2hdmi_clk))
+ clk_put(hdmi->hda2hdmi_clk);
+ if (!IS_ERR_OR_NULL(hdmi->hda2codec_clk))
+ clk_put(hdmi->hda2codec_clk);
+ if (!IS_ERR_OR_NULL(hdmi->hda_clk))
+ clk_put(hdmi->hda_clk);
+#endif
+ if (!IS_ERR_OR_NULL(disp2_clk))
+ clk_put(disp2_clk);
+ if (!IS_ERR_OR_NULL(disp1_clk))
+ clk_put(disp1_clk);
+ if (!IS_ERR_OR_NULL(clk))
+ clk_put(clk);
+err_iounmap_reg:
+ iounmap(base);
+err_release_resource_reg:
+ release_resource(base_res);
+err_free_hdmi:
+ kfree(hdmi);
+ return err;
+}
+
+static void tegra_dc_hdmi_destroy(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc);
+ cancel_delayed_work_sync(&hdmi->work);
+#ifdef CONFIG_SWITCH
+ switch_dev_unregister(&hdmi->hpd_switch);
+#endif
+ iounmap(hdmi->base);
+ release_resource(hdmi->base_res);
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ clk_put(hdmi->hda2hdmi_clk);
+ clk_put(hdmi->hda2codec_clk);
+ clk_put(hdmi->hda_clk);
+#endif
+ clk_put(hdmi->clk);
+ clk_put(hdmi->disp1_clk);
+ clk_put(hdmi->disp2_clk);
+ tegra_edid_destroy(hdmi->edid);
+ tegra_nvhdcp_destroy(hdmi->nvhdcp);
+
+ kfree(hdmi);
+
+}
+
+static void tegra_dc_hdmi_setup_audio_fs_tables(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ int i;
+ unsigned freqs[] = {
+ 32000,
+ 44100,
+ 48000,
+ 88200,
+ 96000,
+ 176400,
+ 192000,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ unsigned f = freqs[i];
+ unsigned eight_half;
+ unsigned delta;;
+
+ if (f > 96000)
+ delta = 2;
+ else if (f > 48000)
+ delta = 6;
+ else
+ delta = 9;
+
+ eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+ tegra_hdmi_writel(hdmi, AUDIO_FS_LOW(eight_half - delta) |
+ AUDIO_FS_HIGH(eight_half + delta),
+ HDMI_NV_PDISP_AUDIO_FS(i));
+ }
+}
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+static void tegra_dc_hdmi_setup_eld_buff(struct tegra_dc *dc)
+{
+ int i;
+ int j;
+ u8 tmp;
+
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ /* program ELD stuff */
+ for (i = 0; i < HDMI_ELD_MONITOR_NAME_INDEX; i++) {
+ switch (i) {
+ case HDMI_ELD_VER_INDEX:
+ tmp = (hdmi->eld.eld_ver << 3);
+ tegra_hdmi_writel(hdmi, (i << 8) | tmp,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ break;
+ case HDMI_ELD_BASELINE_LEN_INDEX:
+ break;
+ case HDMI_ELD_CEA_VER_MNL_INDEX:
+ tmp = (hdmi->eld.cea_edid_ver << 5);
+ tmp |= (hdmi->eld.mnl & 0x1f);
+ tegra_hdmi_writel(hdmi, (i << 8) | tmp,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ break;
+ case HDMI_ELD_SAD_CNT_CON_TYP_SAI_HDCP_INDEX:
+ tmp = (hdmi->eld.sad_count << 4);
+ tmp |= (hdmi->eld.conn_type & 0xC);
+ tmp |= (hdmi->eld.support_ai & 0x2);
+ tmp |= (hdmi->eld.support_hdcp & 0x1);
+ tegra_hdmi_writel(hdmi, (i << 8) | tmp,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ break;
+ case HDMI_ELD_AUD_SYNC_DELAY_INDEX:
+ tegra_hdmi_writel(hdmi, (i << 8) | (hdmi->eld.aud_synch_delay),
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ break;
+ case HDMI_ELD_SPK_ALLOC_INDEX:
+ tegra_hdmi_writel(hdmi, (i << 8) | (hdmi->eld.spk_alloc),
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ break;
+ case HDMI_ELD_PORT_ID_INDEX:
+ for (j = 0; j < 8;j++) {
+ tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.port_id[j]),
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ }
+ break;
+ case HDMI_ELD_MANF_NAME_INDEX:
+ for (j = 0; j < 2;j++) {
+ tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.manufacture_id[j]),
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ }
+ break;
+ case HDMI_ELD_PRODUCT_CODE_INDEX:
+ for (j = 0; j < 2;j++) {
+ tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.product_id[j]),
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ }
+ break;
+ }
+ }
+ for (j = 0; j < hdmi->eld.mnl;j++) {
+ tegra_hdmi_writel(hdmi, ((j + HDMI_ELD_MONITOR_NAME_INDEX) << 8) |
+ (hdmi->eld.monitor_name[j]),
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ }
+ for (j = 0; j < hdmi->eld.sad_count;j++) {
+ tegra_hdmi_writel(hdmi, ((j + HDMI_ELD_MONITOR_NAME_INDEX + hdmi->eld.mnl) << 8) |
+ (hdmi->eld.sad[j]),
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+ }
+ /* set presence andvalid bit */
+ tegra_hdmi_writel(hdmi, 3, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0);
+}
+#endif
+
+static int tegra_dc_hdmi_setup_audio(struct tegra_dc *dc, unsigned audio_freq,
+ unsigned audio_source)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ const struct tegra_hdmi_audio_config *config;
+ unsigned long audio_n;
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ unsigned long reg_addr = 0;
+#endif
+ unsigned a_source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+
+ if (HDA == audio_source)
+ a_source = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ else if (SPDIF == audio_source)
+ a_source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ tegra_hdmi_writel(hdmi,a_source,
+ HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0);
+ tegra_hdmi_writel(hdmi,
+ AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0),
+ HDMI_NV_PDISP_AUDIO_CNTRL0);
+#else
+ tegra_hdmi_writel(hdmi,
+ AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+ a_source,
+ HDMI_NV_PDISP_AUDIO_CNTRL0);
+#endif
+ config = tegra_hdmi_get_audio_config(audio_freq, dc->mode.pclk);
+ if (!config) {
+ dev_err(&dc->ndev->dev,
+ "hdmi: can't set audio to %d at %d pix_clock",
+ audio_freq, dc->mode.pclk);
+ return -EINVAL;
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+ audio_n = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNALTE |
+ AUDIO_N_VALUE(config->n - 1);
+ tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+ tegra_hdmi_writel(hdmi, SPARE_HW_CTS | SPARE_FORCE_SW_CTS |
+ SPARE_CTS_RESET_VAL(1),
+ HDMI_NV_PDISP_HDMI_SPARE);
+
+ audio_n &= ~AUDIO_N_RESETF;
+ tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ switch (audio_freq) {
+ case AUDIO_FREQ_32K:
+ reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320_0;
+ break;
+ case AUDIO_FREQ_44_1K:
+ reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441_0;
+ break;
+ case AUDIO_FREQ_48K:
+ reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480_0;
+ break;
+ case AUDIO_FREQ_88_2K:
+ reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882_0;
+ break;
+ case AUDIO_FREQ_96K:
+ reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960_0;
+ break;
+ case AUDIO_FREQ_176_4K:
+ reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764_0;
+ break;
+ case AUDIO_FREQ_192K:
+ reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920_0;
+ break;
+ }
+
+ tegra_hdmi_writel(hdmi, config->aval, reg_addr);
+#endif
+ tegra_dc_hdmi_setup_audio_fs_tables(dc);
+
+ return 0;
+}
+
+int tegra_hdmi_setup_audio_freq_source(unsigned audio_freq, unsigned audio_source)
+{
+ struct tegra_dc_hdmi_data *hdmi = dc_hdmi;
+
+ if (!hdmi)
+ return -EAGAIN;
+
+ /* check for know freq */
+ if (AUDIO_FREQ_32K == audio_freq ||
+ AUDIO_FREQ_44_1K== audio_freq ||
+ AUDIO_FREQ_48K== audio_freq ||
+ AUDIO_FREQ_88_2K== audio_freq ||
+ AUDIO_FREQ_96K== audio_freq ||
+ AUDIO_FREQ_176_4K== audio_freq ||
+ AUDIO_FREQ_192K== audio_freq) {
+ /* If we can program HDMI, then proceed */
+ if (hdmi->clk_enabled)
+ tegra_dc_hdmi_setup_audio(hdmi->dc, audio_freq,audio_source);
+
+ /* Store it for using it in enable */
+ hdmi->audio_freq = audio_freq;
+ hdmi->audio_source = audio_source;
+ }
+ else
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_hdmi_setup_audio_freq_source);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+int tegra_hdmi_setup_hda_presence()
+{
+ struct tegra_dc_hdmi_data *hdmi = dc_hdmi;
+
+ if (!hdmi)
+ return -EAGAIN;
+
+ if (hdmi->clk_enabled && hdmi->eld_retrieved) {
+ /* If HDA_PRESENCE is already set reset it */
+ if (tegra_hdmi_readl(hdmi,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0))
+ tegra_hdmi_writel(hdmi, 0,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0);
+
+ tegra_dc_hdmi_setup_eld_buff(hdmi->dc);
+ }
+ else
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_hdmi_setup_hda_presence);
+#endif
+
+static void tegra_dc_hdmi_write_infopack(struct tegra_dc *dc, int header_reg,
+ u8 type, u8 version, void *data, int len)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ u32 subpack[2]; /* extra byte for zero padding of subpack */
+ int i;
+ u8 csum;
+
+ /* first byte of data is the checksum */
+ csum = type + version + len - 1;
+ for (i = 1; i < len; i++)
+ csum +=((u8 *)data)[i];
+ ((u8 *)data)[0] = 0x100 - csum;
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_HEADER_TYPE(type) |
+ INFOFRAME_HEADER_VERSION(version) |
+ INFOFRAME_HEADER_LEN(len - 1),
+ header_reg);
+
+ /* The audio inforame only has one set of subpack registers. The hdmi
+ * block pads the rest of the data as per the spec so we have to fixup
+ * the length before filling in the subpacks.
+ */
+ if (header_reg == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+ len = 6;
+
+ /* each subpack 7 bytes devided into:
+ * subpack_low - bytes 0 - 3
+ * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 0; i < len; i++) {
+ int subpack_idx = i % 7;
+
+ if (subpack_idx == 0)
+ memset(subpack, 0x0, sizeof(subpack));
+
+ ((u8 *)subpack)[subpack_idx] = ((u8 *)data)[i];
+
+ if (subpack_idx == 6 || (i + 1 == len)) {
+ int reg = header_reg + 1 + (i / 7) * 2;
+
+ tegra_hdmi_writel(hdmi, subpack[0], reg);
+ tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+ }
+ }
+}
+
+static void tegra_dc_hdmi_setup_avi_infoframe(struct tegra_dc *dc, bool dvi)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct hdmi_avi_infoframe avi;
+
+ if (dvi) {
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&avi, 0x0, sizeof(avi));
+
+ avi.r = HDMI_AVI_R_SAME;
+
+ if (dc->mode.v_active == 480) {
+ if (dc->mode.h_active == 640) {
+ avi.m = HDMI_AVI_M_4_3;
+ avi.vic = 1;
+ } else {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 3;
+ }
+ } else if (dc->mode.v_active == 576) {
+ /* CEC modes 17 and 18 differ only by the pysical size of the
+ * screen so we have to calculation the physical aspect
+ * ratio. 4 * 10 / 3 is 13
+ */
+ if ((dc->out->h_size * 10) / dc->out->v_size > 14) {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 18;
+ } else {
+ avi.m = HDMI_AVI_M_4_3;
+ avi.vic = 17;
+ }
+ } else if (dc->mode.v_active == 720 ||
+ (dc->mode.v_active == 1470 && dc->mode.stereo_mode)) {
+ /* VIC for both 720p and 720p 3D mode */
+ avi.m = HDMI_AVI_M_16_9;
+ if (dc->mode.h_front_porch == 110)
+ avi.vic = 4; /* 60 Hz */
+ else
+ avi.vic = 19; /* 50 Hz */
+ } else if (dc->mode.v_active == 1080 ||
+ (dc->mode.v_active == 2205 && dc->mode.stereo_mode)) {
+ /* VIC for both 1080p and 1080p 3D mode */
+ avi.m = HDMI_AVI_M_16_9;
+ if (dc->mode.h_front_porch == 88)
+ avi.vic = 16; /* 60 Hz */
+ else if (dc->mode.h_front_porch == 528)
+ avi.vic = 31; /* 50 Hz */
+ else
+ avi.vic = 32; /* 24 Hz */
+ } else {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 0;
+ }
+
+
+ tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AVI,
+ HDMI_AVI_VERSION,
+ &avi, sizeof(avi));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_dc_hdmi_setup_stereo_infoframe(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct hdmi_stereo_infoframe stereo;
+ u32 val;
+
+ if (!dc->mode.stereo_mode) {
+ val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ val &= ~GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ return;
+ }
+
+ memset(&stereo, 0x0, sizeof(stereo));
+
+ stereo.regid0 = 0x03;
+ stereo.regid1 = 0x0c;
+ stereo.regid2 = 0x00;
+ stereo.hdmi_video_format = 2; /* 3D_Structure present */
+ stereo._3d_structure = 0; /* frame packing */
+
+ tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ HDMI_VENDOR_VERSION,
+ &stereo, 6);
+
+ val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ val |= GENERIC_CTRL_ENABLE;
+
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_dc_hdmi_setup_audio_infoframe(struct tegra_dc *dc, bool dvi)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct hdmi_audio_infoframe audio;
+
+ if (dvi) {
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&audio, 0x0, sizeof(audio));
+
+ audio.cc = HDMI_AUDIO_CC_2;
+ tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AUDIO,
+ HDMI_AUDIO_VERSION,
+ &audio, sizeof(audio));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_dc_hdmi_setup_tdms(struct tegra_dc_hdmi_data *hdmi,
+ const struct tdms_config *tc)
+{
+ tegra_hdmi_writel(hdmi, tc->pll0, HDMI_NV_PDISP_SOR_PLL0);
+ tegra_hdmi_writel(hdmi, tc->pll1, HDMI_NV_PDISP_SOR_PLL1);
+
+ tegra_hdmi_writel(hdmi, tc->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+ tegra_hdmi_writel(hdmi,
+ tc->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE,
+ HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+}
+
+static void tegra_dc_hdmi_enable(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ int pulse_start;
+ int dispclk_div_8_2;
+ int retries;
+ int rekey;
+ int err;
+ unsigned long val;
+ unsigned i;
+ unsigned long oldrate;
+
+ /* enbale power, clocks, resets, etc. */
+
+ /* The upstream DC needs to be clocked for accesses to HDMI to not
+ * hard lock the system. Because we don't know if HDMI is conencted
+ * to disp1 or disp2 we need to enable both until we set the DC mux.
+ */
+ clk_enable(hdmi->disp1_clk);
+ clk_enable(hdmi->disp2_clk);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ /* Enabling HDA clocks before asserting HDA PD and ELDV bits */
+ clk_enable(hdmi->hda_clk);
+ clk_enable(hdmi->hda2codec_clk);
+ clk_enable(hdmi->hda2hdmi_clk);
+#endif
+
+ /* back off multiplier before attaching to parent at new rate. */
+ oldrate = clk_get_rate(hdmi->clk);
+ clk_set_rate(hdmi->clk, oldrate / 2);
+
+ tegra_dc_setup_clk(dc, hdmi->clk);
+ clk_set_rate(hdmi->clk, dc->mode.pclk);
+
+ clk_enable(hdmi->clk);
+ tegra_periph_reset_assert(hdmi->clk);
+ mdelay(1);
+ tegra_periph_reset_deassert(hdmi->clk);
+
+ /* TODO: copy HDCP keys from KFUSE to HDMI */
+
+ /* Program display timing registers: handled by dc */
+
+ /* program HDMI registers and SOR sequencer */
+
+ tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+ DC_DISP_DISP_COLOR_CONTROL);
+
+ /* video_preamble uses h_pulse2 */
+ pulse_start = dc->mode.h_ref_to_sync + dc->mode.h_sync_width +
+ dc->mode.h_back_porch - 10;
+ tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ tegra_dc_writel(dc,
+ PULSE_MODE_NORMAL |
+ PULSE_POLARITY_HIGH |
+ PULSE_QUAL_VACTIVE |
+ PULSE_LAST_END_A,
+ DC_DISP_H_PULSE2_CONTROL);
+ tegra_dc_writel(dc, PULSE_START(pulse_start) | PULSE_END(pulse_start + 8),
+ DC_DISP_H_PULSE2_POSITION_A);
+
+ tegra_hdmi_writel(hdmi,
+ VSYNC_WINDOW_END(0x210) |
+ VSYNC_WINDOW_START(0x200) |
+ VSYNC_WINDOW_ENABLE,
+ HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+ tegra_hdmi_writel(hdmi,
+ (dc->ndev->id ? HDMI_SRC_DISPLAYB : HDMI_SRC_DISPLAYA) |
+ ARM_VIDEO_RANGE_LIMITED,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+
+ clk_disable(hdmi->disp1_clk);
+ clk_disable(hdmi->disp2_clk);
+
+ dispclk_div_8_2 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+ tegra_hdmi_writel(hdmi,
+ SOR_REFCLK_DIV_INT(dispclk_div_8_2 >> 2) |
+ SOR_REFCLK_DIV_FRAC(dispclk_div_8_2),
+ HDMI_NV_PDISP_SOR_REFCLK);
+
+ hdmi->clk_enabled = true;
+
+ if (!hdmi->dvi) {
+ err = tegra_dc_hdmi_setup_audio(dc, hdmi->audio_freq,
+ hdmi->audio_source);
+
+ if (err < 0)
+ hdmi->dvi = true;
+ }
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ if (hdmi->eld_retrieved)
+ tegra_dc_hdmi_setup_eld_buff(dc);
+#endif
+
+ rekey = HDMI_REKEY_DEFAULT;
+ val = HDMI_CTRL_REKEY(rekey);
+ val |= HDMI_CTRL_MAX_AC_PACKET((dc->mode.h_sync_width +
+ dc->mode.h_back_porch +
+ dc->mode.h_front_porch -
+ rekey - 18) / 32);
+ if (!hdmi->dvi)
+ val |= HDMI_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_CTRL);
+
+ if (hdmi->dvi)
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ else
+ tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+ tegra_dc_hdmi_setup_avi_infoframe(dc, hdmi->dvi);
+ tegra_dc_hdmi_setup_audio_infoframe(dc, hdmi->dvi);
+ tegra_dc_hdmi_setup_stereo_infoframe(dc);
+
+ /* TMDS CONFIG */
+ for (i = 0; i < ARRAY_SIZE(tdms_config); i++) {
+ if (dc->mode.pclk <= tdms_config[i].pclk) {
+ tegra_dc_hdmi_setup_tdms(hdmi, &tdms_config[i]);
+ break;
+ }
+ }
+
+ tegra_hdmi_writel(hdmi,
+ SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC_ALT(0) |
+ SOR_SEQ_PD_PC(8) |
+ SOR_SEQ_PD_PC_ALT(8),
+ HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+ val = SOR_SEQ_INST_WAIT_TIME(1) |
+ SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+ SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_PIN_A_LOW |
+ SOR_SEQ_INST_PIN_B_LOW |
+ SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST0);
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST8);
+
+ val = 0x1c800;
+ val &= ~SOR_CSTM_ROTCLK(~0);
+ val |= SOR_CSTM_ROTCLK(2);
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_CSTM);
+
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+
+ /* start SOR */
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_TRIGGER,
+ HDMI_NV_PDISP_SOR_PWR);
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_DONE,
+ HDMI_NV_PDISP_SOR_PWR);
+
+ retries = 1000;
+ do {
+ BUG_ON(--retries < 0);
+ val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+ } while (val & SOR_PWR_SETTING_NEW_PENDING);
+
+ val = SOR_STATE_ASY_CRCMODE_COMPLETE |
+ SOR_STATE_ASY_OWNER_HEAD0 |
+ SOR_STATE_ASY_SUBOWNER_BOTH |
+ SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+ SOR_STATE_ASY_DEPOL_POS;
+
+ if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_H_SYNC)
+ val |= SOR_STATE_ASY_HSYNCPOL_NEG;
+ else
+ val |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+ if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_V_SYNC)
+ val |= SOR_STATE_ASY_VSYNCPOL_NEG;
+ else
+ val |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE2);
+
+ val = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE1);
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, val | SOR_STATE_ATTACHED,
+ HDMI_NV_PDISP_SOR_STATE1);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+ tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+ DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ tegra_nvhdcp_set_plug(hdmi->nvhdcp, 1);
+}
+
+static void tegra_dc_hdmi_disable(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0);
+ /* sleep 1ms before disabling clocks to ensure HDA gets the interrupt */
+ msleep(1);
+ clk_disable(hdmi->hda2hdmi_clk);
+ clk_disable(hdmi->hda2codec_clk);
+ clk_disable(hdmi->hda_clk);
+#endif
+ tegra_periph_reset_assert(hdmi->clk);
+ hdmi->clk_enabled = false;
+ clk_disable(hdmi->clk);
+ tegra_dvfs_set_rate(hdmi->clk, 0);
+}
+
+struct tegra_dc_out_ops tegra_dc_hdmi_ops = {
+ .init = tegra_dc_hdmi_init,
+ .destroy = tegra_dc_hdmi_destroy,
+ .enable = tegra_dc_hdmi_enable,
+ .disable = tegra_dc_hdmi_disable,
+ .detect = tegra_dc_hdmi_detect,
+ .suspend = tegra_dc_hdmi_suspend,
+ .resume = tegra_dc_hdmi_resume,
+};
+
+struct tegra_dc_edid *tegra_dc_get_edid(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi;
+
+ /* TODO: Support EDID on non-HDMI devices */
+ if (dc->out->type != TEGRA_DC_OUT_HDMI)
+ return ERR_PTR(-ENODEV);
+
+ hdmi = tegra_dc_get_outdata(dc);
+
+ return tegra_edid_get_data(hdmi->edid);
+}
+EXPORT_SYMBOL(tegra_dc_get_edid);
+
+void tegra_dc_put_edid(struct tegra_dc_edid *edid)
+{
+ tegra_edid_put_data(edid);
+}
+EXPORT_SYMBOL(tegra_dc_put_edid);
diff --git a/drivers/video/tegra/dc/hdmi.h b/drivers/video/tegra/dc/hdmi.h
new file mode 100644
index 000000000000..702ab16e87f0
--- /dev/null
+++ b/drivers/video/tegra/dc/hdmi.h
@@ -0,0 +1,222 @@
+/*
+ * drivers/video/tegra/dc/hdmi.h
+ *
+ * non-tegra specific HDMI declarations
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_H
+#define __DRIVERS_VIDEO_TEGRA_DC_HDMI_H
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HDMI_INFOFRAME_TYPE_AVI 0x82
+#define HDMI_INFOFRAME_TYPE_SPD 0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned s:2; /* scan information */
+ unsigned b:2; /* bar info data valid */
+ unsigned a:1; /* active info present */
+ unsigned y:2; /* RGB or YCbCr */
+ unsigned res1:1;
+
+ /* PB2 */
+ unsigned r:4; /* active format aspect ratio */
+ unsigned m:2; /* picture aspect ratio */
+ unsigned c:2; /* colorimetry */
+
+ /* PB3 */
+ unsigned sc:2; /* scan information */
+ unsigned q:2; /* quantization range */
+ unsigned ec:3; /* extended colorimetry */
+ unsigned itc:1; /* it content */
+
+ /* PB4 */
+ unsigned vic:7; /* video format id code */
+ unsigned res4:1;
+
+ /* PB5 */
+ unsigned pr:4; /* pixel repetition factor */
+ unsigned cn:2; /* it content type*/
+ unsigned yq:2; /* ycc quantization range */
+
+ /* PB6-7 */
+ u16 top_bar_end_line;
+
+ /* PB8-9 */
+ u16 bot_bar_start_line;
+
+ /* PB10-11 */
+ u16 left_bar_end_pixel;
+
+ /* PB12-13 */
+ u16 right_bar_start_pixel;
+} __attribute__((packed));
+
+#define HDMI_AVI_VERSION 0x02
+
+#define HDMI_AVI_Y_RGB 0x0
+#define HDMI_AVI_Y_YCBCR_422 0x1
+#define HDMI_AVI_Y_YCBCR_444 0x2
+
+#define HDMI_AVI_B_VERT 0x1
+#define HDMI_AVI_B_HORIZ 0x2
+
+#define HDMI_AVI_S_NONE 0x0
+#define HDMI_AVI_S_OVERSCAN 0x1
+#define HDMI_AVI_S_UNDERSCAN 0x2
+
+#define HDMI_AVI_C_NONE 0x0
+#define HDMI_AVI_C_SMPTE 0x1
+#define HDMI_AVI_C_ITU_R 0x2
+#define HDMI_AVI_C_EXTENDED 0x4
+
+#define HDMI_AVI_M_4_3 0x1
+#define HDMI_AVI_M_16_9 0x2
+
+#define HDMI_AVI_R_SAME 0x8
+#define HDMI_AVI_R_4_3_CENTER 0x9
+#define HDMI_AVI_R_16_9_CENTER 0xa
+#define HDMI_AVI_R_14_9_CENTER 0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned cc:3; /* channel count */
+ unsigned res1:1;
+ unsigned ct:4; /* coding type */
+
+ /* PB2 */
+ unsigned ss:2; /* sample size */
+ unsigned sf:3; /* sample frequency */
+ unsigned res2:3;
+
+ /* PB3 */
+ unsigned cxt:5; /* coding extention type */
+ unsigned res3:3;
+
+ /* PB4 */
+ u8 ca; /* channel/speaker allocation */
+
+ /* PB5 */
+ unsigned res5:3;
+ unsigned lsv:4; /* level shift value */
+ unsigned dm_inh:1; /* downmix inhibit */
+
+ /* PB6-10 reserved */
+ u8 res6;
+ u8 res7;
+ u8 res8;
+ u8 res9;
+ u8 res10;
+} __attribute__((packed));
+
+#define HDMI_AUDIO_VERSION 0x01
+
+#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2 0x1
+#define HDMI_AUDIO_CC_3 0x2
+#define HDMI_AUDIO_CC_4 0x3
+#define HDMI_AUDIO_CC_5 0x4
+#define HDMI_AUDIO_CC_6 0x5
+#define HDMI_AUDIO_CC_7 0x6
+#define HDMI_AUDIO_CC_8 0x7
+
+#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM 0x1
+#define HDMI_AUDIO_CT_AC3 0x2
+#define HDMI_AUDIO_CT_MPEG1 0x3
+#define HDMI_AUDIO_CT_MP3 0x4
+#define HDMI_AUDIO_CT_MPEG2 0x5
+#define HDMI_AUDIO_CT_AAC_LC 0x6
+#define HDMI_AUDIO_CT_DTS 0x7
+#define HDMI_AUDIO_CT_ATRAC 0x8
+#define HDMI_AUDIO_CT_DSD 0x9
+#define HDMI_AUDIO_CT_E_AC3 0xa
+#define HDMI_AUDIO_CT_DTS_HD 0xb
+#define HDMI_AUDIO_CT_MLP 0xc
+#define HDMI_AUDIO_CT_DST 0xd
+#define HDMI_AUDIO_CT_WMA_PRO 0xe
+#define HDMI_AUDIO_CT_CXT 0xf
+
+#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K 0x1
+#define HDMI_AUDIO_SF_44_1K 0x2
+#define HDMI_AUDIO_SF_48K 0x3
+#define HDMI_AUDIO_SF_88_2K 0x4
+#define HDMI_AUDIO_SF_96K 0x5
+#define HDMI_AUDIO_SF_176_4K 0x6
+#define HDMI_AUDIO_SF_192K 0x7
+
+#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT 0x1
+#define HDMI_AUDIO_SS_20BIT 0x2
+#define HDMI_AUDIO_SS_24BIT 0x3
+
+#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC 0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
+
+/* all fields little endian */
+struct hdmi_stereo_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ u8 regid0;
+
+ /* PB2 */
+ u8 regid1;
+
+ /* PB3 */
+ u8 regid2;
+
+ /* PB4 */
+ unsigned res1:5;
+ unsigned hdmi_video_format:3;
+
+ /* PB5 */
+ unsigned res2:4;
+ unsigned _3d_structure:4;
+
+ /* PB6*/
+ unsigned res3:4;
+ unsigned _3d_ext_data:4;
+
+} __attribute__((packed));
+
+#define HDMI_VENDOR_VERSION 0x01
+
+struct tegra_dc_hdmi_data;
+
+unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long reg);
+void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long val, unsigned long reg);
+
+#endif
diff --git a/drivers/video/tegra/dc/hdmi_reg.h b/drivers/video/tegra/dc/hdmi_reg.h
new file mode 100644
index 000000000000..7de5d869b93a
--- /dev/null
+++ b/drivers/video/tegra/dc/hdmi_reg.h
@@ -0,0 +1,477 @@
+/*
+ * drivers/video/tegra/dc/hdmi_reg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H
+
+#define HDMI_CTXSW 0x00
+#define HDMI_NV_PDISP_SOR_STATE0 0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1 0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_SLEEP (0 << 0)
+#define SOR_STATE_ASY_HEAD_OPMODE_SNOOSE (1 << 0)
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_SAFE (0 << 2)
+#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
+#define SOR_STATE_ATTACHED (1 << 3)
+#define SOR_STATE_ARM_SHOW_VGA (1 << 4)
+
+#define HDMI_NV_PDISP_SOR_STATE2 0x03
+#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
+#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
+#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
+#define REPEATER (1 << 31)
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
+#define HDCP_RUN_YES (1 << 0)
+#define CRYPT_ENABLED (1 << 1)
+#define ONEONE_ENABLED (1 << 3)
+#define AN_VALID (1 << 8)
+#define R0_VALID (1 << 9)
+#define SPRIME_VALID (1 << 10)
+#define MPRIME_VALID (1 << 11)
+#define SROM_ERR (1 << 13)
+#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
+#define TMDS0_LINK0 (1 << 4)
+#define READ_S (1 << 0)
+#define READ_M (2 << 0)
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
+#define STATUS_CS (1 << 6)
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+#define INFOFRAME_CTRL_OTHER (1 << 4)
+#define INFOFRAME_CTRL_SINGLE (1 << 8)
+
+#define INFOFRAME_HEADER_TYPE(x) ((x) & 0xff)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0xf) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
+#define GENERIC_CTRL_ENABLE (1 << 0)
+#define GENERIC_CTRL_OTHER (1 << 4)
+#define GENERIC_CTRL_SINGLE (1 << 8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
+#define ACR_SB3(x) (((x) & 0xff) << 8)
+#define ACR_SB2(x) (((x) & 0xff) << 16)
+#define ACR_SB1(x) (((x) & 0xff) << 24)
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+
+#define ACR_SB6(x) (((x) & 0xff) << 0)
+#define ACR_SB5(x) (((x) & 0xff) << 8)
+#define ACR_SB4(x) (((x) & 0xff) << 16)
+#define ACR_ENABLE (1 << 31)
+#define ACR_SUBPACK_N(x) ((x) & 0xffffff)
+
+#define HDMI_NV_PDISP_HDMI_CTRL 0x44
+#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define HDMI_CTRL_AUDIO_LAYOUT (1 << 8)
+#define HDMI_CTRL_SAMPLE_FLAT (1 << 12)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
+#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
+#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
+#define SPARE_HW_CTS (1 << 0)
+#define SPARE_FORCE_SW_CTS (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+#define SPARE_ACR_PRIORITY_HIGH (0 << 31)
+#define SPARE_ACR_PRIORITY_LOW (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
+#define HDMI_NV_PDISP_HDCPRIF_ROM_CTRL 0x53
+#define HDMI_NV_PDISP_SOR_CAP 0x54
+#define HDMI_NV_PDISP_SOR_PWR 0x55
+#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
+#define SOR_PWR_NORMAL_START_ALT (1 << 1)
+#define SOR_PWR_SAFE_STATE_PD (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU (1 << 16)
+#define SOR_PWR_SAFE_START_NORMAL (0 << 17)
+#define SOR_PWR_SAFE_START_ALT (1 << 17)
+#define SOR_PWR_HALT_DELAY (1 << 24)
+#define SOR_PWR_MODE (1 << 28)
+#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST 0x56
+#define HDMI_NV_PDISP_SOR_PLL0 0x57
+#define SOR_PLL_PWR (1 << 0)
+#define SOR_PLL_PDBG (1 << 1)
+#define SOR_PLL_VCOPD (1 << 2)
+#define SOR_PLL_PDPORT (1 << 3)
+#define SOR_PLL_RESISTORSEL (1 << 4)
+#define SOR_PLL_PULLDOWN (1 << 5)
+#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0x3) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1 0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN (1 << 28)
+#define SOR_PLL_HALF_FULL_PE (1 << 29)
+#define SOR_PLL_S_D_PIN_PE (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2 0x59
+#define HDMI_NV_PDISP_SOR_CSTM 0x5a
+#define SOR_CSTM_PD_TXDA_0 (1 << 0)
+#define SOR_CSTM_PD_TXDA_1 (1 << 1)
+#define SOR_CSTM_PD_TXDA_2 (1 << 2)
+#define SOR_CSTM_PD_TXDA_3 (1 << 3)
+#define SOR_CSTM_PD_TXDB_0 (1 << 4)
+#define SOR_CSTM_PD_TXDB_1 (1 << 5)
+#define SOR_CSTM_PD_TXDB_2 (1 << 6)
+#define SOR_CSTM_PD_TXDB_3 (1 << 7)
+#define SOR_CSTM_PD_TXCA (1 << 8)
+#define SOR_CSTM_PD_TXCB (1 << 9)
+#define SOR_CSTM_UPPER (1 << 11)
+#define SOR_CSTM_MODE(x) (((x) & 0x3) << 12)
+#define SOR_CSTM_LINKACTA (1 << 14)
+#define SOR_CSTM_LINKACTB (1 << 15)
+#define SOR_CSTM_LVDS_EN (1 << 16)
+#define SOR_CSTM_DUP_SYNC (1 << 17)
+#define SOR_CSTM_NEW_MODE (1 << 18)
+#define SOR_CSTM_BALANCED (1 << 19)
+#define SOR_CSTM_PLLDIV (1 << 21)
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+#define SOR_CSTM_ROTDAT(x) (((x) & 0x7) << 28)
+
+#define HDMI_NV_PDISP_SOR_LVDS 0x5b
+#define HDMI_NV_PDISP_SOR_CRCA 0x5c
+#define HDMI_NV_PDISP_SOR_CRCB 0x5d
+#define HDMI_NV_PDISP_SOR_BLANK 0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS (1 << 28)
+#define SOR_SEQ_SWITCH (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST0 0x60
+#define HDMI_NV_PDISP_SOR_SEQ_INST1 0x61
+#define HDMI_NV_PDISP_SOR_SEQ_INST2 0x62
+#define HDMI_NV_PDISP_SOR_SEQ_INST3 0x63
+#define HDMI_NV_PDISP_SOR_SEQ_INST4 0x64
+#define HDMI_NV_PDISP_SOR_SEQ_INST5 0x65
+#define HDMI_NV_PDISP_SOR_SEQ_INST6 0x66
+#define HDMI_NV_PDISP_SOR_SEQ_INST7 0x67
+#define HDMI_NV_PDISP_SOR_SEQ_INST8 0x68
+#define HDMI_NV_PDISP_SOR_SEQ_INST9 0x69
+#define HDMI_NV_PDISP_SOR_SEQ_INSTA 0x6a
+#define HDMI_NV_PDISP_SOR_SEQ_INSTB 0x6b
+#define HDMI_NV_PDISP_SOR_SEQ_INSTC 0x6c
+#define HDMI_NV_PDISP_SOR_SEQ_INSTD 0x6d
+#define HDMI_NV_PDISP_SOR_SEQ_INSTE 0x6e
+#define HDMI_NV_PDISP_SOR_SEQ_INSTF 0x6f
+#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_US (0 << 12)
+#define SOR_SEQ_INST_WAIT_UNITS_MS (1 << 12)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define SOR_SEQ_INST_SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define SOR_SEQ_INST_BLANK_H (1 << 27)
+#define SOR_SEQ_INST_BLANK_V (1 << 28)
+#define SOR_SEQ_INST_ASSERT_PLL_RESETV (1 << 29)
+#define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
+#define HDMI_NV_PDISP_SOR_TRIG 0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
+#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
+#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
+#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+#define DRIVE_CURRENT_1_500_mA 0x00
+#define DRIVE_CURRENT_1_875_mA 0x01
+#define DRIVE_CURRENT_2_250_mA 0x02
+#define DRIVE_CURRENT_2_625_mA 0x03
+#define DRIVE_CURRENT_3_000_mA 0x04
+#define DRIVE_CURRENT_3_375_mA 0x05
+#define DRIVE_CURRENT_3_750_mA 0x06
+#define DRIVE_CURRENT_4_125_mA 0x07
+#define DRIVE_CURRENT_4_500_mA 0x08
+#define DRIVE_CURRENT_4_875_mA 0x09
+#define DRIVE_CURRENT_5_250_mA 0x0a
+#define DRIVE_CURRENT_5_625_mA 0x0b
+#define DRIVE_CURRENT_6_000_mA 0x0c
+#define DRIVE_CURRENT_6_375_mA 0x0d
+#define DRIVE_CURRENT_6_750_mA 0x0e
+#define DRIVE_CURRENT_7_125_mA 0x0f
+#define DRIVE_CURRENT_7_500_mA 0x10
+#define DRIVE_CURRENT_7_875_mA 0x11
+#define DRIVE_CURRENT_8_250_mA 0x12
+#define DRIVE_CURRENT_8_625_mA 0x13
+#define DRIVE_CURRENT_9_000_mA 0x14
+#define DRIVE_CURRENT_9_375_mA 0x15
+#define DRIVE_CURRENT_9_750_mA 0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
+/* note: datasheet defines FS1..FS7. we have FS(0)..FS(6) */
+#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
+#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0 0xac
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0 0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0 0xbd
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320_0 0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441_0 0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882_0 0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764_0 0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480_0 0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960_0 0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920_0 0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT_0 0xc6
+#endif
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOFT_RESET (1 << 8)
+#define AUDIO_CNTRL0_SOFT_RESET_ALL (1 << 12)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_UNKNOWN (1 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_32K (2 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_44_1K (0 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_48K (2 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_88_2K (8 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_96K (10 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_176_4K (12 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_192K (14 << 16)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N 0x8c
+#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNALTE (1 << 24)
+#define AUDIO_N_LOOKUP_ENABLE (1 << 28)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
+#define HDMI_NV_PDISP_SOR_REFCLK 0x95
+#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL 0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
+#define HDMI_SRC_DISPLAYA (0 << 0)
+#define HDMI_SRC_DISPLAYB (1 << 0)
+#define ARM_VIDEO_RANGE_FULL (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH 0x98
+#define HDMI_NV_PDISP_PE_CURRENT 0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+#define PE_CURRENT_0_0_mA 0x0
+#define PE_CURRENT_0_5_mA 0x1
+#define PE_CURRENT_1_0_mA 0x2
+#define PE_CURRENT_1_5_mA 0x3
+#define PE_CURRENT_2_0_mA 0x4
+#define PE_CURRENT_2_5_mA 0x5
+#define PE_CURRENT_3_0_mA 0x6
+#define PE_CURRENT_3_5_mA 0x7
+#define PE_CURRENT_4_0_mA 0x8
+#define PE_CURRENT_4_5_mA 0x9
+#define PE_CURRENT_5_0_mA 0xa
+#define PE_CURRENT_5_5_mA 0xb
+#define PE_CURRENT_6_0_mA 0xc
+#define PE_CURRENT_6_5_mA 0xd
+#define PE_CURRENT_7_0_mA 0xe
+#define PE_CURRENT_7_5_mA 0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL 0x9a
+#define LOCAL_KEYS (1 << 0)
+#define AUTOINC (1 << 1)
+#define WRITE16 (1 << 4)
+#define PKEY_REQUEST_RELOAD_TRIGGER (1 << 5)
+#define PKEY_LOADED (1 << 6)
+#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
+
+#endif
diff --git a/drivers/video/tegra/dc/nvhdcp.c b/drivers/video/tegra/dc/nvhdcp.c
new file mode 100644
index 000000000000..263de07a3da0
--- /dev/null
+++ b/drivers/video/tegra/dc/nvhdcp.c
@@ -0,0 +1,1259 @@
+/*
+ * drivers/video/tegra/dc/nvhdcp.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#include <mach/dc.h>
+#include <mach/kfuse.h>
+
+#include <video/nvhdcp.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "hdmi_reg.h"
+#include "hdmi.h"
+
+DECLARE_WAIT_QUEUE_HEAD(wq_worker);
+
+/* for 0x40 Bcaps */
+#define BCAPS_REPEATER (1 << 6)
+#define BCAPS_READY (1 << 5)
+#define BCAPS_11 (1 << 1) /* used for both Bcaps and Ainfo */
+
+/* for 0x41 Bstatus */
+#define BSTATUS_MAX_DEVS_EXCEEDED (1 << 7)
+#define BSTATUS_MAX_CASCADE_EXCEEDED (1 << 11)
+
+#ifdef VERBOSE_DEBUG
+#define nvhdcp_vdbg(...) \
+ printk("nvhdcp: " __VA_ARGS__)
+#else
+#define nvhdcp_vdbg(...) \
+({ \
+ if(0) \
+ printk("nvhdcp: " __VA_ARGS__); \
+ 0; \
+})
+#endif
+#define nvhdcp_debug(...) \
+ pr_debug("nvhdcp: " __VA_ARGS__)
+#define nvhdcp_err(...) \
+ pr_err("nvhdcp: Error: " __VA_ARGS__)
+#define nvhdcp_info(...) \
+ pr_info("nvhdcp: " __VA_ARGS__)
+
+
+/* for nvhdcp.state */
+enum tegra_nvhdcp_state {
+ STATE_OFF,
+ STATE_UNAUTHENTICATED,
+ STATE_LINK_VERIFY,
+ STATE_RENEGOTIATE,
+};
+
+struct tegra_nvhdcp {
+ struct delayed_work work;
+ struct tegra_dc_hdmi_data *hdmi;
+ struct workqueue_struct *downstream_wq;
+ struct mutex lock;
+ struct miscdevice miscdev;
+ char name[12];
+ unsigned id;
+ bool plugged; /* true if hotplug detected */
+ atomic_t policy; /* set policy */
+ enum tegra_nvhdcp_state state; /* STATE_xxx */
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ int bus;
+ u32 b_status;
+ u64 a_n;
+ u64 c_n;
+ u64 a_ksv;
+ u64 b_ksv;
+ u64 c_ksv;
+ u64 d_ksv;
+ u8 v_prime[20];
+ u64 m_prime;
+ u32 num_bksv_list;
+ u64 bksv_list[TEGRA_NVHDCP_MAX_DEVS];
+ int fail_count;
+};
+
+static inline bool nvhdcp_is_plugged(struct tegra_nvhdcp *nvhdcp)
+{
+ rmb();
+ return nvhdcp->plugged;
+}
+
+static inline bool nvhdcp_set_plugged(struct tegra_nvhdcp *nvhdcp, bool plugged)
+{
+ nvhdcp->plugged = plugged;
+ wmb();
+ return plugged;
+}
+
+static int nvhdcp_i2c_read(struct tegra_nvhdcp *nvhdcp, u8 reg,
+ size_t len, void *data)
+{
+ int status;
+ int retries = 15;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x74 >> 1, /* primary link */
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ },
+ {
+ .addr = 0x74 >> 1, /* primary link */
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ },
+ };
+
+ do {
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("disconnect during i2c xfer\n");
+ return -EIO;
+ }
+ status = i2c_transfer(nvhdcp->client->adapter,
+ msg, ARRAY_SIZE(msg));
+ if ((status < 0) && (retries > 1))
+ msleep(250);
+ } while ((status < 0) && retries--);
+
+ if (status < 0) {
+ nvhdcp_err("i2c xfer error %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+
+static int nvhdcp_i2c_write(struct tegra_nvhdcp *nvhdcp, u8 reg,
+ size_t len, const void *data)
+{
+ int status;
+ u8 buf[len + 1];
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x74 >> 1, /* primary link */
+ .flags = 0,
+ .len = len + 1,
+ .buf = buf,
+ },
+ };
+ int retries = 15;
+
+ buf[0] = reg;
+ memcpy(buf + 1, data, len);
+
+ do {
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("disconnect during i2c xfer\n");
+ return -EIO;
+ }
+ status = i2c_transfer(nvhdcp->client->adapter,
+ msg, ARRAY_SIZE(msg));
+ if ((status < 0) && (retries > 1))
+ msleep(250);
+ } while ((status < 0) && retries--);
+
+ if (status < 0) {
+ nvhdcp_err("i2c xfer error %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+
+static inline int nvhdcp_i2c_read8(struct tegra_nvhdcp *nvhdcp, u8 reg, u8 *val)
+{
+ return nvhdcp_i2c_read(nvhdcp, reg, 1, val);
+}
+
+static inline int nvhdcp_i2c_write8(struct tegra_nvhdcp *nvhdcp, u8 reg, u8 val)
+{
+ return nvhdcp_i2c_write(nvhdcp, reg, 1, &val);
+}
+
+static inline int nvhdcp_i2c_read16(struct tegra_nvhdcp *nvhdcp,
+ u8 reg, u16 *val)
+{
+ u8 buf[2];
+ int e;
+
+ e = nvhdcp_i2c_read(nvhdcp, reg, sizeof buf, buf);
+ if (e)
+ return e;
+
+ if (val)
+ *val = buf[0] | (u16)buf[1] << 8;
+
+ return 0;
+}
+
+static int nvhdcp_i2c_read40(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 *val)
+{
+ u8 buf[5];
+ int e, i;
+ u64 n;
+
+ e = nvhdcp_i2c_read(nvhdcp, reg, sizeof buf, buf);
+ if (e)
+ return e;
+
+ for(i = 0, n = 0; i < 5; i++ ) {
+ n <<= 8;
+ n |= buf[4 - i];
+ }
+
+ if (val)
+ *val = n;
+
+ return 0;
+}
+
+static int nvhdcp_i2c_write40(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 val)
+{
+ char buf[5];
+ int i;
+ for(i = 0; i < 5; i++ ) {
+ buf[i] = val;
+ val >>= 8;
+ }
+ return nvhdcp_i2c_write(nvhdcp, reg, sizeof buf, buf);
+}
+
+static int nvhdcp_i2c_write64(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 val)
+{
+ char buf[8];
+ int i;
+ for(i = 0; i < 8; i++ ) {
+ buf[i] = val;
+ val >>= 8;
+ }
+ return nvhdcp_i2c_write(nvhdcp, reg, sizeof buf, buf);
+}
+
+
+/* 64-bit link encryption session random number */
+static inline u64 get_an(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AN_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+ return r;
+}
+
+/* 64-bit upstream exchange random number */
+static inline void set_cn(struct tegra_dc_hdmi_data *hdmi, u64 c_n)
+{
+ tegra_hdmi_writel(hdmi, (u32)c_n, HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+ tegra_hdmi_writel(hdmi, c_n >> 32, HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+}
+
+
+/* 40-bit transmitter's key selection vector */
+static inline u64 get_aksv(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AKSV_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+ return r;
+}
+
+/* 40-bit receiver's key selection vector */
+static inline void set_bksv(struct tegra_dc_hdmi_data *hdmi, u64 b_ksv, bool repeater)
+{
+ if (repeater)
+ b_ksv |= (u64)REPEATER << 32;
+ tegra_hdmi_writel(hdmi, (u32)b_ksv, HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+ tegra_hdmi_writel(hdmi, b_ksv >> 32, HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+}
+
+
+/* 40-bit software's key selection vector */
+static inline void set_cksv(struct tegra_dc_hdmi_data *hdmi, u64 c_ksv)
+{
+ tegra_hdmi_writel(hdmi, (u32)c_ksv, HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+ tegra_hdmi_writel(hdmi, c_ksv >> 32, HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+}
+
+/* 40-bit connection state */
+static inline u64 get_cs(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CS_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+ return r;
+}
+
+/* 40-bit upstream key selection vector */
+static inline u64 get_dksv(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_DKSV_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+ return r;
+}
+
+/* 64-bit encrypted M0 value */
+static inline u64 get_mprime(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+ return r;
+}
+
+static inline u16 get_transmitter_ri(struct tegra_dc_hdmi_data *hdmi)
+{
+ return tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_RI);
+}
+
+static inline int get_receiver_ri(struct tegra_nvhdcp *nvhdcp, u16 *r)
+{
+ return nvhdcp_i2c_read16(nvhdcp, 0x8, r); /* long read */
+}
+
+static int get_bcaps(struct tegra_nvhdcp *nvhdcp, u8 *b_caps)
+{
+ return nvhdcp_i2c_read8(nvhdcp, 0x40, b_caps);
+}
+
+static int get_ksvfifo(struct tegra_nvhdcp *nvhdcp,
+ unsigned num_bksv_list, u64 *ksv_list)
+{
+ u8 *buf, *p;
+ int e;
+ unsigned i;
+ size_t buf_len = num_bksv_list * 5;
+
+ if (!ksv_list || num_bksv_list > TEGRA_NVHDCP_MAX_DEVS)
+ return -EINVAL;
+
+ if (num_bksv_list == 0)
+ return 0;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(buf))
+ return -ENOMEM;
+
+ e = nvhdcp_i2c_read(nvhdcp, 0x43, buf_len, buf);
+ if (e) {
+ kfree(buf);
+ return e;
+ }
+
+ /* load 40-bit keys from repeater into array of u64 */
+ p = buf;
+ for (i = 0; i < num_bksv_list; i++) {
+ ksv_list[i] = p[0] | ((u64)p[1] << 8) | ((u64)p[2] << 16)
+ | ((u64)p[3] << 24) | ((u64)p[4] << 32);
+ p += 5;
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+/* get V' 160-bit SHA-1 hash from repeater */
+static int get_vprime(struct tegra_nvhdcp *nvhdcp, u8 *v_prime)
+{
+ int e, i;
+
+ for (i = 0; i < 20; i += 4) {
+ e = nvhdcp_i2c_read(nvhdcp, 0x20 + i, 4, v_prime + i);
+ if (e)
+ return e;
+ }
+ return 0;
+}
+
+
+/* set or clear RUN_YES */
+static void hdcp_ctrl_run(struct tegra_dc_hdmi_data *hdmi, bool v)
+{
+ u32 ctrl;
+
+ if (v) {
+ ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+ ctrl |= HDCP_RUN_YES;
+ } else {
+ ctrl = 0;
+ }
+
+ tegra_hdmi_writel(hdmi, ctrl, HDMI_NV_PDISP_RG_HDCP_CTRL);
+}
+
+/* wait for any bits in mask to be set in HDMI_NV_PDISP_RG_HDCP_CTRL
+ * sleeps up to 120mS */
+static int wait_hdcp_ctrl(struct tegra_dc_hdmi_data *hdmi, u32 mask, u32 *v)
+{
+ int retries = 13;
+ u32 ctrl;
+
+ do {
+ ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+ if ((ctrl & mask)) {
+ if (v)
+ *v = ctrl;
+ break;
+ }
+ if (retries > 1)
+ msleep(10);
+ } while (--retries);
+ if (!retries) {
+ nvhdcp_err("ctrl read timeout (mask=0x%x)\n", mask);
+ return -EIO;
+ }
+ return 0;
+}
+
+/* wait for bits in mask to be set to value in HDMI_NV_PDISP_KEY_CTRL
+ * waits up to 100mS */
+static int wait_key_ctrl(struct tegra_dc_hdmi_data *hdmi, u32 mask, u32 value)
+{
+ int retries = 101;
+ u32 ctrl;
+
+ do {
+ msleep(1);
+ ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_CTRL);
+ if (((ctrl ^ value) & mask) == 0)
+ break;
+ } while (--retries);
+ if (!retries) {
+ nvhdcp_err("key ctrl read timeout (mask=0x%x)\n", mask);
+ return -EIO;
+ }
+ return 0;
+}
+
+/* check that key selection vector is well formed.
+ * NOTE: this function assumes KSV has already been checked against
+ * revocation list.
+ */
+static int verify_ksv(u64 k)
+{
+ unsigned i;
+
+ /* count set bits, must be exactly 20 set to be valid */
+ for(i = 0; k; i++)
+ k ^= k & -k;
+
+ return (i != 20) ? -EINVAL : 0;
+}
+
+/* get Status and Kprime signature - READ_S on TMDS0_LINK0 only */
+static int get_s_prime(struct tegra_nvhdcp *nvhdcp, struct tegra_nvhdcp_packet *pkt)
+{
+ struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+ u32 sp_msb, sp_lsb1, sp_lsb2;
+ int e;
+
+ /* if connection isn't authenticated ... */
+ mutex_lock(&nvhdcp->lock);
+ if (nvhdcp->state != STATE_LINK_VERIFY) {
+ memset(pkt, 0, sizeof *pkt);
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_LINK_FAILED;
+ e = 0;
+ goto err;
+ }
+
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+
+ /* we will be taking c_n, c_ksv as input */
+ if (!(pkt->value_flags & TEGRA_NVHDCP_FLAG_CN)
+ || !(pkt->value_flags & TEGRA_NVHDCP_FLAG_CKSV)) {
+ nvhdcp_err("missing value_flags (0x%x)\n", pkt->value_flags);
+ e = -EINVAL;
+ goto err;
+ }
+
+ pkt->value_flags = 0;
+
+ pkt->a_ksv = nvhdcp->a_ksv;
+ pkt->a_n = nvhdcp->a_n;
+ pkt->value_flags = TEGRA_NVHDCP_FLAG_AKSV | TEGRA_NVHDCP_FLAG_AN;
+
+ nvhdcp_vdbg("%s():cn %llx cksv %llx\n", __func__, pkt->c_n, pkt->c_ksv);
+
+ set_cn(hdmi, pkt->c_n);
+
+ tegra_hdmi_writel(hdmi, TMDS0_LINK0 | READ_S,
+ HDMI_NV_PDISP_RG_HDCP_CMODE);
+
+ set_cksv(hdmi, pkt->c_ksv);
+
+ e = wait_hdcp_ctrl(hdmi, SPRIME_VALID, NULL);
+ if (e) {
+ nvhdcp_err("Sprime read timeout\n");
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+ e = -EIO;
+ goto err;
+ }
+
+ msleep(50);
+
+ /* read 56-bit Sprime plus 16 status bits */
+ sp_msb = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+ sp_lsb1 = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+ sp_lsb2 = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+
+ /* top 8 bits of LSB2 and bottom 8 bits of MSB hold status bits. */
+ pkt->hdcp_status = ( sp_msb << 8 ) | ( sp_lsb2 >> 24);
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_S;
+
+ /* 56-bit Kprime */
+ pkt->k_prime = ((u64)(sp_lsb2 & 0xffffff) << 32) | sp_lsb1;
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_KP;
+
+ /* is connection state supported? */
+ if (sp_msb & STATUS_CS) {
+ pkt->cs = get_cs(hdmi);
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_CS;
+ }
+
+ /* load Dksv */
+ pkt->d_ksv = get_dksv(hdmi);
+ if (verify_ksv(pkt->d_ksv)) {
+ nvhdcp_err("Dksv invalid!\n");
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+ e = -EIO; /* treat bad Dksv as I/O error */
+ }
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_DKSV;
+
+ /* copy current Bksv */
+ pkt->b_ksv = nvhdcp->b_ksv;
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSV;
+
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_SUCCESS;
+ mutex_unlock(&nvhdcp->lock);
+ return 0;
+
+err:
+ mutex_unlock(&nvhdcp->lock);
+ return e;
+}
+
+/* get M prime - READ_M on TMDS0_LINK0 only */
+static inline int get_m_prime(struct tegra_nvhdcp *nvhdcp, struct tegra_nvhdcp_packet *pkt)
+{
+ struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+ int e;
+
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+
+ /* if connection isn't authenticated ... */
+ mutex_lock(&nvhdcp->lock);
+ if (nvhdcp->state != STATE_LINK_VERIFY) {
+ memset(pkt, 0, sizeof *pkt);
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_LINK_FAILED;
+ e = 0;
+ goto err;
+ }
+
+ pkt->a_ksv = nvhdcp->a_ksv;
+ pkt->a_n = nvhdcp->a_n;
+ pkt->value_flags = TEGRA_NVHDCP_FLAG_AKSV | TEGRA_NVHDCP_FLAG_AN;
+
+ set_cn(hdmi, pkt->c_n);
+
+ tegra_hdmi_writel(hdmi, TMDS0_LINK0 | READ_M,
+ HDMI_NV_PDISP_RG_HDCP_CMODE);
+
+ /* Cksv write triggers Mprime update */
+ set_cksv(hdmi, pkt->c_ksv);
+
+ e = wait_hdcp_ctrl(hdmi, MPRIME_VALID, NULL);
+ if (e) {
+ nvhdcp_err("Mprime read timeout\n");
+ e = -EIO;
+ goto err;
+ }
+ msleep(50);
+
+ /* load Mprime */
+ pkt->m_prime = get_mprime(hdmi);
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_MP;
+
+ pkt->b_status = nvhdcp->b_status;
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_BSTATUS;
+
+ /* copy most recent KSVFIFO, if it is non-zero */
+ pkt->num_bksv_list = nvhdcp->num_bksv_list;
+ if( nvhdcp->num_bksv_list ) {
+ BUILD_BUG_ON(sizeof(pkt->bksv_list) != sizeof(nvhdcp->bksv_list));
+ memcpy(pkt->bksv_list, nvhdcp->bksv_list,
+ nvhdcp->num_bksv_list * sizeof(*pkt->bksv_list));
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSVLIST;
+ }
+
+ /* copy v_prime */
+ BUILD_BUG_ON(sizeof(pkt->v_prime) != sizeof(nvhdcp->v_prime));
+ memcpy(pkt->v_prime, nvhdcp->v_prime, sizeof(nvhdcp->v_prime));
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_V;
+
+ /* load Dksv */
+ pkt->d_ksv = get_dksv(hdmi);
+ if (verify_ksv(pkt->d_ksv)) {
+ nvhdcp_err("Dksv invalid!\n");
+ e = -EIO;
+ goto err;
+ }
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_DKSV;
+
+ /* copy current Bksv */
+ pkt->b_ksv = nvhdcp->b_ksv;
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSV;
+
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_SUCCESS;
+ mutex_unlock(&nvhdcp->lock);
+ return 0;
+
+err:
+ mutex_unlock(&nvhdcp->lock);
+ return e;
+}
+
+static int load_kfuse(struct tegra_dc_hdmi_data *hdmi)
+{
+ unsigned buf[KFUSE_DATA_SZ / 4];
+ int e, i;
+ u32 ctrl;
+ u32 tmp;
+ int retries;
+
+ /* copy load kfuse into buffer - only needed for early Tegra parts */
+ e = tegra_kfuse_read(buf, sizeof buf);
+ if (e) {
+ nvhdcp_err("Kfuse read failure\n");
+ return e;
+ }
+
+ /* write the kfuse to HDMI SRAM */
+
+ tegra_hdmi_writel(hdmi, 1, HDMI_NV_PDISP_KEY_CTRL); /* LOAD_KEYS */
+
+ /* issue a reload */
+ ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_CTRL);
+ tegra_hdmi_writel(hdmi, ctrl | PKEY_REQUEST_RELOAD_TRIGGER
+ | LOCAL_KEYS , HDMI_NV_PDISP_KEY_CTRL);
+
+ e = wait_key_ctrl(hdmi, PKEY_LOADED, PKEY_LOADED);
+ if (e) {
+ nvhdcp_err("key reload timeout\n");
+ return -EIO;
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_KEY_SKEY_INDEX);
+
+ /* wait for SRAM to be cleared */
+ retries = 6;
+ do {
+ tmp = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_DEBUG0);
+ if ((tmp & 1) == 0) break;
+ if (retries > 1)
+ mdelay(1);
+ } while (--retries);
+ if (!retries) {
+ nvhdcp_err("key SRAM clear timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < KFUSE_DATA_SZ / 4; i += 4) {
+
+ /* load 128-bits*/
+ tegra_hdmi_writel(hdmi, buf[i], HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+ tegra_hdmi_writel(hdmi, buf[i+1], HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+ tegra_hdmi_writel(hdmi, buf[i+2], HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+ tegra_hdmi_writel(hdmi, buf[i+3], HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+
+ /* trigger LOAD_HDCP_KEY */
+ tegra_hdmi_writel(hdmi, 0x100, HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+
+ tmp = LOCAL_KEYS | WRITE16;
+ if (i)
+ tmp |= AUTOINC;
+ tegra_hdmi_writel(hdmi, tmp, HDMI_NV_PDISP_KEY_CTRL);
+
+ /* wait for WRITE16 to complete */
+ e = wait_key_ctrl(hdmi, 0x10, 0); /* WRITE16 */
+ if (e) {
+ nvhdcp_err("key write timeout\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int verify_link(struct tegra_nvhdcp *nvhdcp, bool wait_ri)
+{
+ struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+ int retries = 3;
+ u16 old, rx, tx;
+ int e;
+
+ old = 0;
+ rx = 0;
+ tx = 0;
+ /* retry 3 times to deal with I2C link issues */
+ do {
+ if (wait_ri)
+ old = get_transmitter_ri(hdmi);
+
+ e = get_receiver_ri(nvhdcp, &rx);
+ if (!e) {
+ if (!rx) {
+ nvhdcp_err("Ri is 0!\n");
+ return -EINVAL;
+ }
+
+ tx = get_transmitter_ri(hdmi);
+ } else {
+ rx = ~tx;
+ msleep(50);
+ }
+
+ } while (wait_ri && --retries && old != tx);
+
+ nvhdcp_debug("R0 Ri poll:rx=0x%04x tx=0x%04x\n", rx, tx);
+
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("aborting verify links - lost hdmi connection\n");
+ return -EIO;
+ }
+
+ if (rx != tx)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int get_repeater_info(struct tegra_nvhdcp *nvhdcp)
+{
+ int e, retries;
+ u8 b_caps;
+ u16 b_status;
+
+ nvhdcp_vdbg("repeater found:fetching repeater info\n");
+
+ /* wait up to 5 seconds for READY on repeater */
+ retries = 51;
+ do {
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("disconnect while waiting for repeater\n");
+ return -EIO;
+ }
+
+ e = get_bcaps(nvhdcp, &b_caps);
+ if (!e && (b_caps & BCAPS_READY)) {
+ nvhdcp_debug("Bcaps READY from repeater\n");
+ break;
+ }
+ if (retries > 1)
+ msleep(100);
+ } while (--retries);
+ if (!retries) {
+ nvhdcp_err("repeater Bcaps read timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ memset(nvhdcp->v_prime, 0, sizeof nvhdcp->v_prime);
+ e = get_vprime(nvhdcp, nvhdcp->v_prime);
+ if (e) {
+ nvhdcp_err("repeater Vprime read failure!\n");
+ return e;
+ }
+
+ e = nvhdcp_i2c_read16(nvhdcp, 0x41, &b_status);
+ if (e) {
+ nvhdcp_err("Bstatus read failure!\n");
+ return e;
+ }
+
+ if (b_status & BSTATUS_MAX_DEVS_EXCEEDED) {
+ nvhdcp_err("repeater:max devices (0x%04x)\n", b_status);
+ return -EINVAL;
+ }
+
+ if (b_status & BSTATUS_MAX_CASCADE_EXCEEDED) {
+ nvhdcp_err("repeater:max cascade (0x%04x)\n", b_status);
+ return -EINVAL;
+ }
+
+ nvhdcp->b_status = b_status;
+ nvhdcp->num_bksv_list = b_status & 0x7f;
+ nvhdcp_vdbg("Bstatus 0x%x (devices: %d)\n",
+ b_status, nvhdcp->num_bksv_list);
+
+ memset(nvhdcp->bksv_list, 0, sizeof nvhdcp->bksv_list);
+ e = get_ksvfifo(nvhdcp, nvhdcp->num_bksv_list, nvhdcp->bksv_list);
+ if (e) {
+ nvhdcp_err("repeater:could not read KSVFIFO (err %d)\n", e);
+ return e;
+ }
+
+ return 0;
+}
+
+static void nvhdcp_downstream_worker(struct work_struct *work)
+{
+ struct tegra_nvhdcp *nvhdcp =
+ container_of(to_delayed_work(work), struct tegra_nvhdcp, work);
+ struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+ int e;
+ u8 b_caps;
+ u32 tmp;
+ u32 res;
+
+ nvhdcp_vdbg("%s():started thread %s\n", __func__, nvhdcp->name);
+
+ mutex_lock(&nvhdcp->lock);
+ if (nvhdcp->state == STATE_OFF) {
+ nvhdcp_err("nvhdcp failure - giving up\n");
+ goto err;
+ }
+ nvhdcp->state = STATE_UNAUTHENTICATED;
+
+ /* check plug state to terminate early in case flush_workqueue() */
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("worker started while unplugged!\n");
+ goto lost_hdmi;
+ }
+ nvhdcp_vdbg("%s():hpd=%d\n", __func__, nvhdcp->plugged);
+
+ nvhdcp->a_ksv = 0;
+ nvhdcp->b_ksv = 0;
+ nvhdcp->a_n = 0;
+
+ e = get_bcaps(nvhdcp, &b_caps);
+ if (e) {
+ nvhdcp_err("Bcaps read failure\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("read Bcaps = 0x%02x\n", b_caps);
+
+ nvhdcp_vdbg("kfuse loading ...\n");
+
+ /* repeater flag in Bskv must be configured before loading fuses */
+ set_bksv(hdmi, 0, (b_caps & BCAPS_REPEATER));
+
+ e = load_kfuse(hdmi);
+ if (e) {
+ nvhdcp_err("kfuse could not be loaded\n");
+ goto failure;
+ }
+
+ hdcp_ctrl_run(hdmi, 1);
+
+ nvhdcp_vdbg("wait AN_VALID ...\n");
+
+ /* wait for hardware to generate HDCP values */
+ e = wait_hdcp_ctrl(hdmi, AN_VALID | SROM_ERR, &res);
+ if (e) {
+ nvhdcp_err("An key generation timeout\n");
+ goto failure;
+ }
+ if (res & SROM_ERR) {
+ nvhdcp_err("SROM error\n");
+ goto failure;
+ }
+
+ msleep(25);
+
+ nvhdcp->a_ksv = get_aksv(hdmi);
+ nvhdcp->a_n = get_an(hdmi);
+ nvhdcp_vdbg("Aksv is 0x%016llx\n", nvhdcp->a_ksv);
+ nvhdcp_vdbg("An is 0x%016llx\n", nvhdcp->a_n);
+ if (verify_ksv(nvhdcp->a_ksv)) {
+ nvhdcp_err("Aksv verify failure! (0x%016llx)\n", nvhdcp->a_ksv);
+ goto disable;
+ }
+
+ /* write Ainfo to receiver - set 1.1 only if b_caps supports it */
+ e = nvhdcp_i2c_write8(nvhdcp, 0x15, b_caps & BCAPS_11);
+ if (e) {
+ nvhdcp_err("Ainfo write failure\n");
+ goto failure;
+ }
+
+ /* write An to receiver */
+ e = nvhdcp_i2c_write64(nvhdcp, 0x18, nvhdcp->a_n);
+ if (e) {
+ nvhdcp_err("An write failure\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("wrote An = 0x%016llx\n", nvhdcp->a_n);
+
+ /* write Aksv to receiver - triggers auth sequence */
+ e = nvhdcp_i2c_write40(nvhdcp, 0x10, nvhdcp->a_ksv);
+ if (e) {
+ nvhdcp_err("Aksv write failure\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("wrote Aksv = 0x%010llx\n", nvhdcp->a_ksv);
+
+ /* bail out if unplugged in the middle of negotiation */
+ if (!nvhdcp_is_plugged(nvhdcp))
+ goto lost_hdmi;
+
+ /* get Bksv from receiver */
+ e = nvhdcp_i2c_read40(nvhdcp, 0x00, &nvhdcp->b_ksv);
+ if (e) {
+ nvhdcp_err("Bksv read failure\n");
+ goto failure;
+ }
+ nvhdcp_vdbg("Bksv is 0x%016llx\n", nvhdcp->b_ksv);
+ if (verify_ksv(nvhdcp->b_ksv)) {
+ nvhdcp_err("Bksv verify failure!\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("read Bksv = 0x%010llx from device\n", nvhdcp->b_ksv);
+
+ set_bksv(hdmi, nvhdcp->b_ksv, (b_caps & BCAPS_REPEATER));
+
+ nvhdcp_vdbg("loaded Bksv into controller\n");
+
+ e = wait_hdcp_ctrl(hdmi, R0_VALID, NULL);
+ if (e) {
+ nvhdcp_err("R0 read failure!\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("R0 valid\n");
+
+ msleep(100); /* can't read R0' within 100ms of writing Aksv */
+
+ nvhdcp_vdbg("verifying links ...\n");
+
+ e = verify_link(nvhdcp, false);
+ if (e) {
+ nvhdcp_err("link verification failed err %d\n", e);
+ goto failure;
+ }
+
+ /* if repeater then get repeater info */
+ if (b_caps & BCAPS_REPEATER) {
+ e = get_repeater_info(nvhdcp);
+ if (e) {
+ nvhdcp_err("get repeater info failed\n");
+ goto failure;
+ }
+ }
+
+ tmp = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+ tmp |= CRYPT_ENABLED;
+ if (b_caps & BCAPS_11) /* HDCP 1.1 ? */
+ tmp |= ONEONE_ENABLED;
+ tegra_hdmi_writel(hdmi, tmp, HDMI_NV_PDISP_RG_HDCP_CTRL);
+
+ nvhdcp_vdbg("CRYPT enabled\n");
+
+ nvhdcp->state = STATE_LINK_VERIFY;
+ nvhdcp_info("link verified!\n");
+
+ while (1) {
+ if (!nvhdcp_is_plugged(nvhdcp))
+ goto lost_hdmi;
+
+ if (nvhdcp->state != STATE_LINK_VERIFY)
+ goto failure;
+
+ e = verify_link(nvhdcp, true);
+ if (e) {
+ nvhdcp_err("link verification failed err %d\n", e);
+ goto failure;
+ }
+ mutex_unlock(&nvhdcp->lock);
+ wait_event_interruptible_timeout(wq_worker,
+ !nvhdcp_is_plugged(nvhdcp), msecs_to_jiffies(1500));
+ mutex_lock(&nvhdcp->lock);
+
+ }
+
+failure:
+ nvhdcp->fail_count++;
+ if(nvhdcp->fail_count > 5) {
+ nvhdcp_err("nvhdcp failure - too many failures, giving up!\n");
+ } else {
+ nvhdcp_err("nvhdcp failure - renegotiating in 1 second\n");
+ if (!nvhdcp_is_plugged(nvhdcp))
+ goto lost_hdmi;
+ queue_delayed_work(nvhdcp->downstream_wq, &nvhdcp->work,
+ msecs_to_jiffies(1000));
+ }
+
+lost_hdmi:
+ nvhdcp->state = STATE_UNAUTHENTICATED;
+ hdcp_ctrl_run(hdmi, 0);
+
+err:
+ mutex_unlock(&nvhdcp->lock);
+ return;
+disable:
+ nvhdcp->state = STATE_OFF;
+ nvhdcp_set_plugged(nvhdcp, false);
+ mutex_unlock(&nvhdcp->lock);
+ return;
+}
+
+static int tegra_nvhdcp_on(struct tegra_nvhdcp *nvhdcp)
+{
+ nvhdcp->state = STATE_UNAUTHENTICATED;
+ if (nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp->fail_count = 0;
+ queue_delayed_work(nvhdcp->downstream_wq, &nvhdcp->work,
+ msecs_to_jiffies(100));
+ }
+ return 0;
+}
+
+static int tegra_nvhdcp_off(struct tegra_nvhdcp *nvhdcp)
+{
+ mutex_lock(&nvhdcp->lock);
+ nvhdcp->state = STATE_OFF;
+ nvhdcp_set_plugged(nvhdcp, false);
+ mutex_unlock(&nvhdcp->lock);
+ wake_up_interruptible(&wq_worker);
+ flush_workqueue(nvhdcp->downstream_wq);
+ return 0;
+}
+
+void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd)
+{
+ nvhdcp_debug("hdmi hotplug detected (hpd = %d)\n", hpd);
+
+ if (hpd) {
+ nvhdcp_set_plugged(nvhdcp, true);
+ tegra_nvhdcp_on(nvhdcp);
+ } else {
+ tegra_nvhdcp_off(nvhdcp);
+ }
+}
+
+int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol)
+{
+ if (pol == TEGRA_NVHDCP_POLICY_ALWAYS_ON) {
+ nvhdcp_info("using \"always on\" policy.\n");
+ if (atomic_xchg(&nvhdcp->policy, pol) != pol) {
+ /* policy changed, start working */
+ tegra_nvhdcp_on(nvhdcp);
+ }
+ } else {
+ /* unsupported policy */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_nvhdcp_renegotiate(struct tegra_nvhdcp *nvhdcp)
+{
+ mutex_lock(&nvhdcp->lock);
+ nvhdcp->state = STATE_RENEGOTIATE;
+ mutex_unlock(&nvhdcp->lock);
+ tegra_nvhdcp_on(nvhdcp);
+ return 0;
+}
+
+void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp)
+{
+ if (!nvhdcp) return;
+ tegra_nvhdcp_off(nvhdcp);
+}
+
+void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp)
+{
+ if (!nvhdcp) return;
+ tegra_nvhdcp_renegotiate(nvhdcp);
+}
+
+static long nvhdcp_dev_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tegra_nvhdcp *nvhdcp = filp->private_data;
+ struct tegra_nvhdcp_packet *pkt;
+ int e = -ENOTTY;
+
+ switch (cmd) {
+ case TEGRAIO_NVHDCP_ON:
+ return tegra_nvhdcp_on(nvhdcp);
+
+ case TEGRAIO_NVHDCP_OFF:
+ return tegra_nvhdcp_off(nvhdcp);
+
+ case TEGRAIO_NVHDCP_SET_POLICY:
+ return tegra_nvhdcp_set_policy(nvhdcp, arg);
+
+ case TEGRAIO_NVHDCP_READ_M:
+ pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+ if (copy_from_user(pkt, (void __user *)arg, sizeof(*pkt))) {
+ e = -EFAULT;
+ goto kfree_pkt;
+ }
+ e = get_m_prime(nvhdcp, pkt);
+ if (copy_to_user((void __user *)arg, pkt, sizeof(*pkt))) {
+ e = -EFAULT;
+ goto kfree_pkt;
+ }
+ kfree(pkt);
+ return e;
+
+ case TEGRAIO_NVHDCP_READ_S:
+ pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+ if (copy_from_user(pkt, (void __user *)arg, sizeof(*pkt))) {
+ e = -EFAULT;
+ goto kfree_pkt;
+ }
+ e = get_s_prime(nvhdcp, pkt);
+ if (copy_to_user((void __user *)arg, pkt, sizeof(*pkt))) {
+ e = -EFAULT;
+ goto kfree_pkt;
+ }
+ kfree(pkt);
+ return e;
+
+ case TEGRAIO_NVHDCP_RENEGOTIATE:
+ e = tegra_nvhdcp_renegotiate(nvhdcp);
+ break;
+ }
+
+ return e;
+kfree_pkt:
+ kfree(pkt);
+ return e;
+}
+
+static int nvhdcp_dev_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct tegra_nvhdcp *nvhdcp =
+ container_of(miscdev, struct tegra_nvhdcp, miscdev);
+ filp->private_data = nvhdcp;
+ return 0;
+}
+
+static int nvhdcp_dev_release(struct inode *inode, struct file *filp)
+{
+ filp->private_data = NULL;
+ return 0;
+}
+
+static const struct file_operations nvhdcp_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = nvhdcp_dev_ioctl,
+ .open = nvhdcp_dev_open,
+ .release = nvhdcp_dev_release,
+};
+
+/* we only support one AP right now, so should only call this once. */
+struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi,
+ int id, int bus)
+{
+ static struct tegra_nvhdcp *nvhdcp; /* prevent multiple calls */
+ struct i2c_adapter *adapter;
+ int e;
+
+ if (nvhdcp)
+ return ERR_PTR(-EMFILE);
+
+ nvhdcp = kzalloc(sizeof(*nvhdcp), GFP_KERNEL);
+ if (!nvhdcp)
+ return ERR_PTR(-ENOMEM);
+
+ nvhdcp->id = id;
+ snprintf(nvhdcp->name, sizeof(nvhdcp->name), "nvhdcp%u", id);
+ nvhdcp->hdmi = hdmi;
+ mutex_init(&nvhdcp->lock);
+
+ strlcpy(nvhdcp->info.type, nvhdcp->name, sizeof(nvhdcp->info.type));
+ nvhdcp->bus = bus;
+ nvhdcp->info.addr = 0x74 >> 1;
+ nvhdcp->info.platform_data = nvhdcp;
+ nvhdcp->fail_count = 0;
+
+ adapter = i2c_get_adapter(bus);
+ if (!adapter) {
+ nvhdcp_err("can't get adapter for bus %d\n", bus);
+ e = -EBUSY;
+ goto free_nvhdcp;
+ }
+
+ nvhdcp->client = i2c_new_device(adapter, &nvhdcp->info);
+ i2c_put_adapter(adapter);
+
+ if (!nvhdcp->client) {
+ nvhdcp_err("can't create new device\n");
+ e = -EBUSY;
+ goto free_nvhdcp;
+ }
+
+ nvhdcp->state = STATE_UNAUTHENTICATED;
+
+ nvhdcp->downstream_wq = create_singlethread_workqueue(nvhdcp->name);
+ INIT_DELAYED_WORK(&nvhdcp->work, nvhdcp_downstream_worker);
+
+ nvhdcp->miscdev.minor = MISC_DYNAMIC_MINOR;
+ nvhdcp->miscdev.name = nvhdcp->name;
+ nvhdcp->miscdev.fops = &nvhdcp_fops;
+
+ e = misc_register(&nvhdcp->miscdev);
+ if (e)
+ goto free_workqueue;
+
+ nvhdcp_vdbg("%s(): created misc device %s\n", __func__, nvhdcp->name);
+
+ return nvhdcp;
+free_workqueue:
+ destroy_workqueue(nvhdcp->downstream_wq);
+ i2c_release_client(nvhdcp->client);
+free_nvhdcp:
+ kfree(nvhdcp);
+ nvhdcp_err("unable to create device.\n");
+ return ERR_PTR(e);
+}
+
+void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp)
+{
+ misc_deregister(&nvhdcp->miscdev);
+ tegra_nvhdcp_off(nvhdcp);
+ destroy_workqueue(nvhdcp->downstream_wq);
+ i2c_release_client(nvhdcp->client);
+ kfree(nvhdcp);
+}
diff --git a/drivers/video/tegra/dc/nvhdcp.h b/drivers/video/tegra/dc/nvhdcp.h
new file mode 100644
index 000000000000..90ea0be36d19
--- /dev/null
+++ b/drivers/video/tegra/dc/nvhdcp.h
@@ -0,0 +1,46 @@
+/*
+ * drivers/video/tegra/dc/nvhdcp.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_NVHDCP_H
+#define __DRIVERS_VIDEO_TEGRA_DC_NVHDCP_H
+#include <video/nvhdcp.h>
+
+struct tegra_nvhdcp;
+#ifdef CONFIG_TEGRA_NVHDCP
+void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd);
+int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol);
+void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp);
+void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp);
+struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi,
+ int id, int bus);
+void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp);
+#else
+inline void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd) { }
+inline int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol)
+{
+ return 0;
+}
+inline void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp) { }
+inline void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp) { }
+inline struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi,
+ int id, int bus)
+{
+ return NULL;
+}
+inline void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp) { }
+#endif
+
+#endif
diff --git a/drivers/video/tegra/dc/nvsd.c b/drivers/video/tegra/dc/nvsd.c
new file mode 100644
index 000000000000..3f0ed4ab933d
--- /dev/null
+++ b/drivers/video/tegra/dc/nvsd.c
@@ -0,0 +1,904 @@
+/*
+ * drivers/video/tegra/dc/nvsd.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <mach/dc.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/backlight.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "nvsd.h"
+
+/* Elements for sysfs access */
+#define NVSD_ATTR(__name) static struct kobj_attribute nvsd_attr_##__name = \
+ __ATTR(__name, S_IRUGO|S_IWUSR, nvsd_settings_show, nvsd_settings_store)
+#define NVSD_ATTRS_ENTRY(__name) (&nvsd_attr_##__name.attr)
+#define IS_NVSD_ATTR(__name) (attr == &nvsd_attr_##__name)
+
+static ssize_t nvsd_settings_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+
+static ssize_t nvsd_settings_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count);
+
+static ssize_t nvsd_registers_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+
+NVSD_ATTR(enable);
+NVSD_ATTR(aggressiveness);
+NVSD_ATTR(phase_in_settings);
+NVSD_ATTR(phase_in_adjustments);
+NVSD_ATTR(bin_width);
+NVSD_ATTR(hw_update_delay);
+NVSD_ATTR(use_vid_luma);
+NVSD_ATTR(coeff);
+NVSD_ATTR(blp_time_constant);
+NVSD_ATTR(blp_step);
+NVSD_ATTR(fc_time_limit);
+NVSD_ATTR(fc_threshold);
+NVSD_ATTR(lut);
+NVSD_ATTR(bltf);
+static struct kobj_attribute nvsd_attr_registers =
+ __ATTR(registers, S_IRUGO, nvsd_registers_show, NULL);
+
+static struct attribute *nvsd_attrs[] = {
+ NVSD_ATTRS_ENTRY(enable),
+ NVSD_ATTRS_ENTRY(aggressiveness),
+ NVSD_ATTRS_ENTRY(phase_in_settings),
+ NVSD_ATTRS_ENTRY(phase_in_adjustments),
+ NVSD_ATTRS_ENTRY(bin_width),
+ NVSD_ATTRS_ENTRY(hw_update_delay),
+ NVSD_ATTRS_ENTRY(use_vid_luma),
+ NVSD_ATTRS_ENTRY(coeff),
+ NVSD_ATTRS_ENTRY(blp_time_constant),
+ NVSD_ATTRS_ENTRY(blp_step),
+ NVSD_ATTRS_ENTRY(fc_time_limit),
+ NVSD_ATTRS_ENTRY(fc_threshold),
+ NVSD_ATTRS_ENTRY(lut),
+ NVSD_ATTRS_ENTRY(bltf),
+ NVSD_ATTRS_ENTRY(registers),
+ NULL,
+};
+
+static struct attribute_group nvsd_attr_group = {
+ .attrs = nvsd_attrs,
+};
+
+static struct kobject *nvsd_kobj;
+
+/* shared brightness variable */
+static atomic_t *sd_brightness = NULL;
+/* shared boolean for manual K workaround */
+static atomic_t man_k_until_blank = ATOMIC_INIT(0);
+
+static u8 nvsd_get_bw_idx(struct tegra_dc_sd_settings *settings)
+{
+ u8 bw;
+
+ switch (settings->bin_width) {
+ default:
+ case -1:
+ /* A -1 bin-width indicates 'automatic'
+ based upon aggressiveness. */
+ settings->bin_width = -1;
+ switch (settings->aggressiveness) {
+ default:
+ case 0:
+ case 1:
+ bw = SD_BIN_WIDTH_ONE;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ bw = SD_BIN_WIDTH_TWO;
+ break;
+ case 5:
+ bw = SD_BIN_WIDTH_FOUR;
+ break;
+ }
+ break;
+ case 1:
+ bw = SD_BIN_WIDTH_ONE;
+ break;
+ case 2:
+ bw = SD_BIN_WIDTH_TWO;
+ break;
+ case 4:
+ bw = SD_BIN_WIDTH_FOUR;
+ break;
+ case 8:
+ bw = SD_BIN_WIDTH_EIGHT;
+ break;
+ }
+ return bw >> 3;
+
+}
+
+static bool nvsd_phase_in_adjustments(struct tegra_dc *dc,
+ struct tegra_dc_sd_settings *settings)
+{
+ u8 step, cur_sd_brightness;
+ u16 target_k, cur_k;
+ u32 man_k, val;
+
+ cur_sd_brightness = atomic_read(sd_brightness);
+
+ target_k = tegra_dc_readl(dc, DC_DISP_SD_HW_K_VALUES);
+ target_k = SD_HW_K_R(target_k);
+ cur_k = tegra_dc_readl(dc, DC_DISP_SD_MAN_K_VALUES);
+ cur_k = SD_HW_K_R(cur_k);
+
+ /* read brightness value */
+ val = tegra_dc_readl(dc, DC_DISP_SD_BL_CONTROL);
+ val = SD_BLC_BRIGHTNESS(val);
+
+ step = settings->phase_adj_step;
+ if (cur_sd_brightness != val || target_k != cur_k) {
+ if (!step)
+ step = ADJ_PHASE_STEP;
+
+ /* Phase in Backlight and Pixel K
+ every ADJ_PHASE_STEP frames*/
+ if (step-- & ADJ_PHASE_STEP == ADJ_PHASE_STEP) {
+
+ if (val != cur_sd_brightness)
+ val > cur_sd_brightness ?
+ (cur_sd_brightness++) :
+ (cur_sd_brightness--);
+
+ if (target_k != cur_k)
+ if (target_k > cur_k)
+ cur_k += K_STEP;
+ else
+ cur_k -= K_STEP;
+
+ /* Set manual k value */
+ man_k = SD_MAN_K_R(cur_k) |
+ SD_MAN_K_G(cur_k) | SD_MAN_K_B(cur_k);
+ tegra_dc_writel(dc, man_k, DC_DISP_SD_MAN_K_VALUES);
+ /* Set manual brightness value */
+ atomic_set(sd_brightness, cur_sd_brightness);
+ }
+ settings->phase_adj_step = step;
+ return true;
+ } else
+ return false;
+}
+
+/* phase in the luts based on the current and max step */
+static void nvsd_phase_in_luts(struct tegra_dc_sd_settings *settings,
+ struct tegra_dc *dc)
+{
+ u32 val;
+ u8 bw_idx;
+ int i;
+ u16 phase_settings_step = settings->phase_settings_step;
+ u16 num_phase_in_steps = settings->num_phase_in_steps;
+
+ bw_idx = nvsd_get_bw_idx(settings);
+
+ /* Phase in Final LUT */
+ for (i = 0; i < DC_DISP_SD_LUT_NUM; i++) {
+ val = SD_LUT_R((settings->lut[bw_idx][i].r *
+ phase_settings_step)/num_phase_in_steps) |
+ SD_LUT_G((settings->lut[bw_idx][i].g *
+ phase_settings_step)/num_phase_in_steps) |
+ SD_LUT_B((settings->lut[bw_idx][i].b *
+ phase_settings_step)/num_phase_in_steps);
+
+ tegra_dc_writel(dc, val, DC_DISP_SD_LUT(i));
+ }
+ /* Phase in Final BLTF */
+ for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) {
+ val = SD_BL_TF_POINT_0(255-((255-settings->bltf[bw_idx][i][0])
+ * phase_settings_step)/num_phase_in_steps) |
+ SD_BL_TF_POINT_1(255-((255-settings->bltf[bw_idx][i][1])
+ * phase_settings_step)/num_phase_in_steps) |
+ SD_BL_TF_POINT_2(255-((255-settings->bltf[bw_idx][i][2])
+ * phase_settings_step)/num_phase_in_steps) |
+ SD_BL_TF_POINT_3(255-((255-settings->bltf[bw_idx][i][3])
+ * phase_settings_step)/num_phase_in_steps);
+
+ tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i));
+ }
+}
+
+/* handle the commands that may be invoked for phase_in_settings */
+static void nvsd_cmd_handler(struct tegra_dc_sd_settings *settings,
+ struct tegra_dc *dc)
+{
+ u32 val;
+ u8 bw_idx, bw;
+
+ if (settings->cmd & ENABLE) {
+ settings->phase_settings_step++;
+ if (settings->phase_settings_step >=
+ settings->num_phase_in_steps)
+ settings->cmd &= ~ENABLE;
+
+ nvsd_phase_in_luts(settings, dc);
+ }
+ if (settings->cmd & DISABLE) {
+ settings->phase_settings_step--;
+ nvsd_phase_in_luts(settings, dc);
+ if (settings->phase_settings_step == 0) {
+ /* finish up aggressiveness phase in */
+ if (settings->cmd & AGG_CHG)
+ settings->aggressiveness = settings->final_agg;
+ settings->cmd = NO_CMD;
+ settings->enable = 0;
+ nvsd_init(dc, settings);
+ }
+ }
+ if (settings->cmd & AGG_CHG) {
+ if (settings->aggressiveness == settings->final_agg)
+ settings->cmd &= ~AGG_CHG;
+ if ((settings->cur_agg_step++ & (STEPS_PER_AGG_CHG - 1)) == 0) {
+ settings->final_agg > settings->aggressiveness ?
+ settings->aggressiveness++ :
+ settings->aggressiveness--;
+
+ /* Update aggressiveness value in HW */
+ val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL);
+ val &= ~SD_AGGRESSIVENESS(0x7);
+ val |= SD_AGGRESSIVENESS(settings->aggressiveness);
+
+ /* Adjust bin_width for automatic setting */
+ if (settings->bin_width == -1) {
+ bw_idx = nvsd_get_bw_idx(settings);
+
+ bw = bw_idx << 3;
+
+ val &= ~SD_BIN_WIDTH_MASK;
+ val |= bw;
+ }
+ tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL);
+
+ nvsd_phase_in_luts(settings, dc);
+ }
+ }
+}
+
+static bool nvsd_update_enable(struct tegra_dc_sd_settings *settings,
+ int enable_val)
+{
+
+ if (enable_val != 1 && enable_val != 0)
+ return false;
+
+ if (!settings->cmd && settings->enable != enable_val) {
+ settings->num_phase_in_steps =
+ STEPS_PER_AGG_LVL*settings->aggressiveness;
+ settings->phase_settings_step = enable_val ?
+ 0 : settings->num_phase_in_steps;
+ }
+
+ if (settings->enable != enable_val || settings->cmd & DISABLE) {
+ settings->cmd &= ~(ENABLE | DISABLE);
+ if (!settings->enable && enable_val)
+ settings->cmd |= PHASE_IN;
+ settings->cmd |= enable_val ? ENABLE : DISABLE;
+ return true;
+ }
+
+ return false;
+}
+
+static bool nvsd_update_agg(struct tegra_dc_sd_settings *settings, int agg_val)
+{
+ int i;
+ int pri_lvl = SD_AGG_PRI_LVL(agg_val);
+ int agg_lvl = SD_GET_AGG(agg_val);
+ struct tegra_dc_sd_agg_priorities *sd_agg_priorities =
+ &settings->agg_priorities;
+
+ if (agg_lvl > 5 || agg_lvl < 0)
+ return false;
+ else if (agg_lvl == 0 && pri_lvl == 0)
+ return false;
+
+ if (pri_lvl >= 0 && pri_lvl < 4)
+ sd_agg_priorities->agg[pri_lvl] = agg_lvl;
+
+ for (i = NUM_AGG_PRI_LVLS - 1; i >= 0; i--) {
+ if (sd_agg_priorities->agg[i])
+ break;
+ }
+
+ sd_agg_priorities->pri_lvl = i;
+ pri_lvl = i;
+ agg_lvl = sd_agg_priorities->agg[i];
+
+ if (settings->phase_in_settings && settings->enable &&
+ settings->aggressiveness != agg_lvl) {
+
+ settings->final_agg = agg_lvl;
+ settings->cmd |= AGG_CHG;
+ settings->cur_agg_step = 0;
+ return true;
+ } else if (settings->aggressiveness != agg_lvl) {
+ settings->aggressiveness = agg_lvl;
+ return true;
+ }
+
+ return false;
+}
+
+/* Functional initialization */
+void nvsd_init(struct tegra_dc *dc, struct tegra_dc_sd_settings *settings)
+{
+ u32 i = 0;
+ u32 val = 0;
+ u32 bw_idx = 0;
+ /* TODO: check if HW says SD's available */
+
+ /* If SD's not present or disabled, clear the register and return. */
+ if (!settings || settings->enable == 0) {
+ /* clear the brightness val, too. */
+ if (sd_brightness)
+ atomic_set(sd_brightness, 255);
+
+ sd_brightness = NULL;
+
+ if (settings)
+ settings->phase_settings_step = 0;
+ tegra_dc_writel(dc, 0, DC_DISP_SD_CONTROL);
+ return;
+ }
+
+ dev_dbg(&dc->ndev->dev, "NVSD Init:\n");
+
+ /* init agg_priorities */
+ if (!settings->agg_priorities.agg[0])
+ settings->agg_priorities.agg[0] = settings->aggressiveness;
+
+ /* WAR: Settings will not be valid until the next flip.
+ * Thus, set manual K to either HW's current value (if
+ * we're already enabled) or a non-effective value (if
+ * we're about to enable). */
+ val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL);
+
+ if (val & SD_ENABLE_NORMAL)
+ i = tegra_dc_readl(dc, DC_DISP_SD_HW_K_VALUES);
+ else
+ i = 0; /* 0 values for RGB = 1.0, i.e. non-affected */
+
+ tegra_dc_writel(dc, i, DC_DISP_SD_MAN_K_VALUES);
+ /* Enable manual correction mode here so that changing the
+ * settings won't immediately impact display dehavior. */
+ val |= SD_CORRECTION_MODE_MAN;
+ tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL);
+
+ bw_idx = nvsd_get_bw_idx(settings);
+
+ /* Write LUT */
+ if (!settings->cmd) {
+ dev_dbg(&dc->ndev->dev, " LUT:\n");
+
+ for (i = 0; i < DC_DISP_SD_LUT_NUM; i++) {
+ val = SD_LUT_R(settings->lut[bw_idx][i].r) |
+ SD_LUT_G(settings->lut[bw_idx][i].g) |
+ SD_LUT_B(settings->lut[bw_idx][i].b);
+ tegra_dc_writel(dc, val, DC_DISP_SD_LUT(i));
+
+ dev_dbg(&dc->ndev->dev, " %d: 0x%08x\n", i, val);
+ }
+ }
+
+ /* Write BL TF */
+ if (!settings->cmd) {
+ dev_dbg(&dc->ndev->dev, " BL_TF:\n");
+
+ for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) {
+ val = SD_BL_TF_POINT_0(settings->bltf[bw_idx][i][0]) |
+ SD_BL_TF_POINT_1(settings->bltf[bw_idx][i][1]) |
+ SD_BL_TF_POINT_2(settings->bltf[bw_idx][i][2]) |
+ SD_BL_TF_POINT_3(settings->bltf[bw_idx][i][3]);
+
+ tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i));
+
+ dev_dbg(&dc->ndev->dev, " %d: 0x%08x\n", i, val);
+ }
+ } else if ((settings->cmd & PHASE_IN)) {
+ settings->cmd &= ~PHASE_IN;
+ /* Write NO_OP values for BLTF */
+ for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) {
+ val = SD_BL_TF_POINT_0(0xFF) |
+ SD_BL_TF_POINT_1(0xFF) |
+ SD_BL_TF_POINT_2(0xFF) |
+ SD_BL_TF_POINT_3(0xFF);
+
+ tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i));
+
+ dev_dbg(&dc->ndev->dev, " %d: 0x%08x\n", i, val);
+ }
+ }
+
+ /* Set step correctly on init */
+ if (!settings->cmd && settings->phase_in_settings) {
+ settings->num_phase_in_steps = STEPS_PER_AGG_LVL *
+ settings->aggressiveness;
+ settings->phase_settings_step = settings->enable ?
+ settings->num_phase_in_steps : 0;
+ }
+
+ /* Write Coeff */
+ val = SD_CSC_COEFF_R(settings->coeff.r) |
+ SD_CSC_COEFF_G(settings->coeff.g) |
+ SD_CSC_COEFF_B(settings->coeff.b);
+ tegra_dc_writel(dc, val, DC_DISP_SD_CSC_COEFF);
+ dev_dbg(&dc->ndev->dev, " COEFF: 0x%08x\n", val);
+
+ /* Write BL Params */
+ val = SD_BLP_TIME_CONSTANT(settings->blp.time_constant) |
+ SD_BLP_STEP(settings->blp.step);
+ tegra_dc_writel(dc, val, DC_DISP_SD_BL_PARAMETERS);
+ dev_dbg(&dc->ndev->dev, " BLP: 0x%08x\n", val);
+
+ /* Write Auto/Manual PWM */
+ val = (settings->use_auto_pwm) ? SD_BLC_MODE_AUTO : SD_BLC_MODE_MAN;
+ tegra_dc_writel(dc, val, DC_DISP_SD_BL_CONTROL);
+ dev_dbg(&dc->ndev->dev, " BL_CONTROL: 0x%08x\n", val);
+
+ /* Write Flicker Control */
+ val = SD_FC_TIME_LIMIT(settings->fc.time_limit) |
+ SD_FC_THRESHOLD(settings->fc.threshold);
+ tegra_dc_writel(dc, val, DC_DISP_SD_FLICKER_CONTROL);
+ dev_dbg(&dc->ndev->dev, " FLICKER_CONTROL: 0x%08x\n", val);
+
+ /* Manage SD Control */
+ val = 0;
+ /* Stay in manual correction mode until the next flip. */
+ val |= SD_CORRECTION_MODE_MAN;
+ /* Enable / One-Shot */
+ val |= (settings->enable == 2) ?
+ (SD_ENABLE_ONESHOT | SD_ONESHOT_ENABLE) :
+ SD_ENABLE_NORMAL;
+ /* HW Update Delay */
+ val |= SD_HW_UPDATE_DLY(settings->hw_update_delay);
+ /* Video Luma */
+ val |= (settings->use_vid_luma) ? SD_USE_VID_LUMA : 0;
+ /* Aggressiveness */
+ val |= SD_AGGRESSIVENESS(settings->aggressiveness);
+ /* Bin Width (value derived from bw_idx) */
+ val |= bw_idx << 3;
+ /* Finally, Write SD Control */
+ tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL);
+ dev_dbg(&dc->ndev->dev, " SD_CONTROL: 0x%08x\n", val);
+
+ /* set the brightness pointer */
+ sd_brightness = settings->sd_brightness;
+
+ /* note that we're in manual K until the next flip */
+ atomic_set(&man_k_until_blank, 1);
+}
+
+/* Periodic update */
+bool nvsd_update_brightness(struct tegra_dc *dc)
+{
+ u32 val = 0;
+ int cur_sd_brightness;
+ struct tegra_dc_sd_settings *settings = dc->out->sd_settings;
+
+ if (sd_brightness) {
+ if (atomic_read(&man_k_until_blank) &&
+ !settings->phase_in_adjustments) {
+ val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL);
+ val &= ~SD_CORRECTION_MODE_MAN;
+ tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL);
+ atomic_set(&man_k_until_blank, 0);
+ }
+
+ if (settings->cmd)
+ nvsd_cmd_handler(settings, dc);
+
+ /* nvsd_cmd_handler may turn off didim */
+ if (!settings->enable)
+ return true;
+
+ cur_sd_brightness = atomic_read(sd_brightness);
+
+ /* read brightness value */
+ val = tegra_dc_readl(dc, DC_DISP_SD_BL_CONTROL);
+ val = SD_BLC_BRIGHTNESS(val);
+
+ if (settings->phase_in_adjustments) {
+ return nvsd_phase_in_adjustments(dc, settings);
+ } else if (val != (u32)cur_sd_brightness) {
+ /* set brightness value and note the update */
+ atomic_set(sd_brightness, (int)val);
+ return true;
+ }
+ }
+
+ /* No update needed. */
+ return false;
+}
+
+static ssize_t nvsd_lut_show(struct tegra_dc_sd_settings *sd_settings,
+ char *buf, ssize_t res)
+{
+ u32 i;
+ u32 j;
+
+ for (i = 0; i < NUM_BIN_WIDTHS; i++) {
+ res += snprintf(buf + res, PAGE_SIZE - res,
+ "Bin Width: %d\n", 1 << i);
+
+ for (j = 0; j < DC_DISP_SD_LUT_NUM; j++) {
+ res += snprintf(buf + res,
+ PAGE_SIZE - res,
+ "%d: R: %3d / G: %3d / B: %3d\n",
+ j,
+ sd_settings->lut[i][j].r,
+ sd_settings->lut[i][j].g,
+ sd_settings->lut[i][j].b);
+ }
+ }
+ return res;
+}
+
+static ssize_t nvsd_bltf_show(struct tegra_dc_sd_settings *sd_settings,
+ char *buf, ssize_t res)
+{
+ u32 i;
+ u32 j;
+
+ for (i = 0; i < NUM_BIN_WIDTHS; i++) {
+ res += snprintf(buf + res, PAGE_SIZE - res,
+ "Bin Width: %d\n", 1 << i);
+
+ for (j = 0; j < DC_DISP_SD_BL_TF_NUM; j++) {
+ res += snprintf(buf + res,
+ PAGE_SIZE - res,
+ "%d: 0: %3d / 1: %3d / 2: %3d / 3: %3d\n",
+ j,
+ sd_settings->bltf[i][j][0],
+ sd_settings->bltf[i][j][1],
+ sd_settings->bltf[i][j][2],
+ sd_settings->bltf[i][j][3]);
+ }
+ }
+ return res;
+}
+
+/* Sysfs accessors */
+static ssize_t nvsd_settings_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct device *dev = container_of((kobj->parent), struct device, kobj);
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings;
+ ssize_t res = 0;
+
+ if (sd_settings) {
+ if (IS_NVSD_ATTR(enable))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->enable);
+ else if (IS_NVSD_ATTR(aggressiveness))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->aggressiveness);
+ else if (IS_NVSD_ATTR(phase_in_settings))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->phase_in_settings);
+ else if (IS_NVSD_ATTR(phase_in_adjustments))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->phase_in_adjustments);
+ else if (IS_NVSD_ATTR(bin_width))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->bin_width);
+ else if (IS_NVSD_ATTR(hw_update_delay))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->hw_update_delay);
+ else if (IS_NVSD_ATTR(use_vid_luma))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->use_vid_luma);
+ else if (IS_NVSD_ATTR(coeff))
+ res = snprintf(buf, PAGE_SIZE,
+ "R: %d / G: %d / B: %d\n",
+ sd_settings->coeff.r,
+ sd_settings->coeff.g,
+ sd_settings->coeff.b);
+ else if (IS_NVSD_ATTR(blp_time_constant))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->blp.time_constant);
+ else if (IS_NVSD_ATTR(blp_step))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->blp.step);
+ else if (IS_NVSD_ATTR(fc_time_limit))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->fc.time_limit);
+ else if (IS_NVSD_ATTR(fc_threshold))
+ res = snprintf(buf, PAGE_SIZE, "%d\n",
+ sd_settings->fc.threshold);
+ else if (IS_NVSD_ATTR(lut))
+ res = nvsd_lut_show(sd_settings, buf, res);
+ else if (IS_NVSD_ATTR(bltf))
+ res = nvsd_bltf_show(sd_settings, buf, res);
+ else
+ res = -EINVAL;
+ } else {
+ /* This shouldn't be reachable. But just in case... */
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+#define nvsd_check_and_update(_min, _max, _varname) { \
+ int val = simple_strtol(buf, NULL, 10); \
+ if (val >= _min && val <= _max) { \
+ sd_settings->_varname = val; \
+ settings_updated = true; \
+ } }
+
+#define nvsd_get_multi(_ele, _num, _act, _min, _max) { \
+ char *b, *c, *orig_b; \
+ b = orig_b = kstrdup(buf, GFP_KERNEL); \
+ for (_act = 0; _act < _num; _act++) { \
+ if (!b) \
+ break; \
+ b = strim(b); \
+ c = strsep(&b, " "); \
+ if (!strlen(c)) \
+ break; \
+ _ele[_act] = simple_strtol(c, NULL, 10); \
+ if (_ele[_act] < _min || _ele[_act] > _max) \
+ break; \
+ } \
+ if (orig_b) \
+ kfree(orig_b); \
+}
+
+static int nvsd_lut_store(struct tegra_dc_sd_settings *sd_settings,
+ const char *buf)
+{
+ int ele[3 * DC_DISP_SD_LUT_NUM * NUM_BIN_WIDTHS];
+ int i = 0;
+ int j = 0;
+ int num = 3 * DC_DISP_SD_LUT_NUM * NUM_BIN_WIDTHS;
+
+ nvsd_get_multi(ele, num, i, 0, 255);
+
+ if (i != num)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_BIN_WIDTHS; i++) {
+ for (j = 0; j < DC_DISP_SD_LUT_NUM; j++) {
+ sd_settings->lut[i][j].r =
+ ele[i * NUM_BIN_WIDTHS + j * 3 + 0];
+ sd_settings->lut[i][j].g =
+ ele[i * NUM_BIN_WIDTHS + j * 3 + 1];
+ sd_settings->lut[i][j].b =
+ ele[i * NUM_BIN_WIDTHS + j * 3 + 2];
+ }
+ }
+ return 0;
+}
+
+static int nvsd_bltf_store(struct tegra_dc_sd_settings *sd_settings,
+ const char *buf)
+{
+ int ele[4 * DC_DISP_SD_BL_TF_NUM * NUM_BIN_WIDTHS];
+ int i = 0, j = 0, num = ARRAY_SIZE(ele);
+
+ nvsd_get_multi(ele, num, i, 0, 255);
+
+ if (i != num)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_BIN_WIDTHS; i++) {
+ for (j = 0; j < DC_DISP_SD_BL_TF_NUM; j++) {
+ size_t base = (i * NUM_BIN_WIDTHS *
+ DC_DISP_SD_BL_TF_NUM) + (j * 4);
+ sd_settings->bltf[i][j][0] = ele[base + 0];
+ sd_settings->bltf[i][j][1] = ele[base + 1];
+ sd_settings->bltf[i][j][2] = ele[base + 2];
+ sd_settings->bltf[i][j][3] = ele[base + 3];
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t nvsd_settings_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct device *dev = container_of((kobj->parent), struct device, kobj);
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings;
+ ssize_t res = count;
+ bool settings_updated = false;
+ long int result;
+ int err;
+
+ if (sd_settings) {
+ if (IS_NVSD_ATTR(enable)) {
+ if (sd_settings->phase_in_settings) {
+ err = strict_strtol(buf, 10, &result);
+ if (err)
+ return err;
+
+ if (nvsd_update_enable(sd_settings, result))
+ nvsd_check_and_update(1, 1, enable);
+
+ } else {
+ nvsd_check_and_update(0, 1, enable);
+ }
+ } else if (IS_NVSD_ATTR(aggressiveness)) {
+ err = strict_strtol(buf, 10, &result);
+ if (err)
+ return err;
+
+ if (nvsd_update_agg(sd_settings, result)
+ && !sd_settings->phase_in_settings)
+ settings_updated = true;
+
+ } else if (IS_NVSD_ATTR(phase_in_settings)) {
+ nvsd_check_and_update(0, 1, phase_in_settings);
+ } else if (IS_NVSD_ATTR(phase_in_adjustments)) {
+ nvsd_check_and_update(0, 1, phase_in_adjustments);
+ } else if (IS_NVSD_ATTR(bin_width)) {
+ nvsd_check_and_update(0, 8, bin_width);
+ } else if (IS_NVSD_ATTR(hw_update_delay)) {
+ nvsd_check_and_update(0, 2, hw_update_delay);
+ } else if (IS_NVSD_ATTR(use_vid_luma)) {
+ nvsd_check_and_update(0, 1, use_vid_luma);
+ } else if (IS_NVSD_ATTR(coeff)) {
+ int ele[3], i = 0, num = 3;
+ nvsd_get_multi(ele, num, i, 0, 15);
+
+ if (i == num) {
+ sd_settings->coeff.r = ele[0];
+ sd_settings->coeff.g = ele[1];
+ sd_settings->coeff.b = ele[2];
+ settings_updated = true;
+ } else {
+ res = -EINVAL;
+ }
+ } else if (IS_NVSD_ATTR(blp_time_constant)) {
+ nvsd_check_and_update(0, 1024, blp.time_constant);
+ } else if (IS_NVSD_ATTR(blp_step)) {
+ nvsd_check_and_update(0, 255, blp.step);
+ } else if (IS_NVSD_ATTR(fc_time_limit)) {
+ nvsd_check_and_update(0, 255, fc.time_limit);
+ } else if (IS_NVSD_ATTR(fc_threshold)) {
+ nvsd_check_and_update(0, 255, fc.threshold);
+ } else if (IS_NVSD_ATTR(lut)) {
+ if (nvsd_lut_store(sd_settings, buf))
+ res = -EINVAL;
+ else
+ settings_updated = true;
+ } else if (IS_NVSD_ATTR(bltf)) {
+ if (nvsd_bltf_store(sd_settings, buf))
+ res = -EINVAL;
+ else
+ settings_updated = true;
+ } else {
+ res = -EINVAL;
+ }
+
+ /* Re-init if our settings were updated. */
+ if (settings_updated) {
+ mutex_lock(&dc->lock);
+ if (!dc->enabled) {
+ mutex_unlock(&dc->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&dc->lock);
+
+ nvsd_init(dc, sd_settings);
+
+ /* Update backlight state IFF we're disabling! */
+ if (!sd_settings->enable && sd_settings->bl_device) {
+ /* Do the actual brightness update outside of
+ * the mutex */
+ struct platform_device *pdev =
+ sd_settings->bl_device;
+ struct backlight_device *bl =
+ platform_get_drvdata(pdev);
+
+ if (bl)
+ backlight_update_status(bl);
+ }
+ }
+ } else {
+ /* This shouldn't be reachable. But just in case... */
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+#define NVSD_PRINT_REG(__name) { \
+ u32 val = tegra_dc_readl(dc, __name); \
+ res += snprintf(buf + res, PAGE_SIZE - res, #__name ": 0x%08x\n", \
+ val); \
+}
+
+#define NVSD_PRINT_REG_ARRAY(__name) { \
+ u32 val = 0, i = 0; \
+ res += snprintf(buf + res, PAGE_SIZE - res, #__name ":\n"); \
+ for (i = 0; i < __name##_NUM; i++) { \
+ val = tegra_dc_readl(dc, __name(i)); \
+ res += snprintf(buf + res, PAGE_SIZE - res, " %d: 0x%08x\n", \
+ i, val); \
+ } \
+}
+
+static ssize_t nvsd_registers_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct device *dev = container_of((kobj->parent), struct device, kobj);
+ struct nvhost_device *ndev = to_nvhost_device(dev);
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+ ssize_t res = 0;
+
+ mutex_lock(&dc->lock);
+ if (!dc->enabled) {
+ mutex_unlock(&dc->lock);
+ return -ENODEV;
+ }
+
+ mutex_unlock(&dc->lock);
+ NVSD_PRINT_REG(DC_DISP_SD_CONTROL);
+ NVSD_PRINT_REG(DC_DISP_SD_CSC_COEFF);
+ NVSD_PRINT_REG_ARRAY(DC_DISP_SD_LUT);
+ NVSD_PRINT_REG(DC_DISP_SD_FLICKER_CONTROL);
+ NVSD_PRINT_REG(DC_DISP_SD_PIXEL_COUNT);
+ NVSD_PRINT_REG_ARRAY(DC_DISP_SD_HISTOGRAM);
+ NVSD_PRINT_REG(DC_DISP_SD_BL_PARAMETERS);
+ NVSD_PRINT_REG_ARRAY(DC_DISP_SD_BL_TF);
+ NVSD_PRINT_REG(DC_DISP_SD_BL_CONTROL);
+ NVSD_PRINT_REG(DC_DISP_SD_HW_K_VALUES);
+ NVSD_PRINT_REG(DC_DISP_SD_MAN_K_VALUES);
+
+ return res;
+}
+
+/* Sysfs initializer */
+int nvsd_create_sysfs(struct device *dev)
+{
+ int retval = 0;
+
+ nvsd_kobj = kobject_create_and_add("smartdimmer", &dev->kobj);
+
+ if (!nvsd_kobj)
+ return -ENOMEM;
+
+ retval = sysfs_create_group(nvsd_kobj, &nvsd_attr_group);
+
+ if (retval) {
+ kobject_put(nvsd_kobj);
+ dev_err(dev, "%s: failed to create attributes\n", __func__);
+ }
+
+ return retval;
+}
+
+/* Sysfs destructor */
+void __devexit nvsd_remove_sysfs(struct device *dev)
+{
+ if (nvsd_kobj) {
+ sysfs_remove_group(nvsd_kobj, &nvsd_attr_group);
+ kobject_put(nvsd_kobj);
+ }
+}
diff --git a/drivers/video/tegra/dc/nvsd.h b/drivers/video/tegra/dc/nvsd.h
new file mode 100644
index 000000000000..f7fc4a1ead6e
--- /dev/null
+++ b/drivers/video/tegra/dc/nvsd.h
@@ -0,0 +1,25 @@
+/*
+ * drivers/video/tegra/dc/nvsd.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_NVSD_H
+#define __DRIVERS_VIDEO_TEGRA_DC_NVSD_H
+
+void nvsd_init(struct tegra_dc *dc, struct tegra_dc_sd_settings *settings);
+bool nvsd_update_brightness(struct tegra_dc *dc);
+int nvsd_create_sysfs(struct device *dev);
+void __devexit nvsd_remove_sysfs(struct device *dev);
+
+#endif
diff --git a/drivers/video/tegra/dc/overlay.c b/drivers/video/tegra/dc/overlay.c
new file mode 100644
index 000000000000..446fc4d407eb
--- /dev/null
+++ b/drivers/video/tegra/dc/overlay.c
@@ -0,0 +1,898 @@
+/*
+ * drivers/video/tegra/overlay/overlay.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_overlay.h>
+#include <linux/uaccess.h>
+#include <drm/drm_fixed.h>
+
+#include <asm/atomic.h>
+
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <linux/nvhost.h>
+
+#include "dc_priv.h"
+#include "../nvmap/nvmap.h"
+#include "overlay.h"
+
+/* Minimum extra shot for DIDIM if n shot is enabled. */
+#define TEGRA_DC_DIDIM_MIN_SHOT 1
+
+DEFINE_MUTEX(tegra_flip_lock);
+
+struct overlay_client;
+
+struct overlay {
+ struct overlay_client *owner;
+};
+
+struct tegra_overlay_info {
+ struct miscdevice dev;
+
+ struct list_head clients;
+ spinlock_t clients_lock;
+
+ struct overlay overlays[DC_N_WINDOWS];
+ struct mutex overlays_lock;
+
+ struct nvhost_device *ndev;
+
+ struct nvmap_client *overlay_nvmap;
+
+ struct tegra_dc *dc;
+
+ struct tegra_dc_blend blend;
+
+ u32 n_shot;
+ u32 overlay_ref;
+ struct mutex lock;
+ struct workqueue_struct *flip_wq;
+
+ /* Big enough for tegra_dc%u when %u < 10 */
+ char name[10];
+};
+
+struct overlay_client {
+ struct tegra_overlay_info *dev;
+ struct list_head list;
+ struct task_struct *task;
+ struct nvmap_client *user_nvmap;
+};
+
+struct tegra_overlay_flip_win {
+ struct tegra_overlay_windowattr attr;
+ struct nvmap_handle_ref *handle;
+ dma_addr_t phys_addr;
+};
+
+struct tegra_overlay_flip_data {
+ bool didim_work;
+ u32 flags;
+ u32 nr_unpin;
+ u32 syncpt_max;
+ struct work_struct work;
+ struct tegra_overlay_info *overlay;
+ struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS];
+ struct tegra_overlay_flip_win win[TEGRA_FB_FLIP_N_WINDOWS];
+};
+
+static void tegra_overlay_flip_worker(struct work_struct *work);
+
+/* Overlay window manipulation */
+static int tegra_overlay_pin_window(struct tegra_overlay_info *overlay,
+ struct tegra_overlay_flip_win *flip_win,
+ struct nvmap_client *user_nvmap)
+{
+ struct nvmap_handle_ref *win_dupe;
+ struct nvmap_handle *win_handle;
+ unsigned long buff_id = flip_win->attr.buff_id;
+
+ if (!buff_id)
+ return 0;
+
+ win_handle = nvmap_get_handle_id(user_nvmap, buff_id);
+ if (win_handle == NULL) {
+ dev_err(&overlay->ndev->dev, "%s: flip invalid "
+ "handle %08lx\n", current->comm, buff_id);
+ return -EPERM;
+ }
+
+ /* duplicate the new framebuffer's handle into the fb driver's
+ * nvmap context, to ensure that the handle won't be freed as
+ * long as it is in-use by the fb driver */
+ win_dupe = nvmap_duplicate_handle_id(overlay->overlay_nvmap, buff_id);
+ nvmap_handle_put(win_handle);
+
+ if (IS_ERR(win_dupe)) {
+ dev_err(&overlay->ndev->dev, "couldn't duplicate handle\n");
+ return PTR_ERR(win_dupe);
+ }
+
+ flip_win->handle = win_dupe;
+
+ flip_win->phys_addr = nvmap_pin(overlay->overlay_nvmap, win_dupe);
+ if (IS_ERR((void *)flip_win->phys_addr)) {
+ dev_err(&overlay->ndev->dev, "couldn't pin handle\n");
+ nvmap_free(overlay->overlay_nvmap, win_dupe);
+ return PTR_ERR((void *)flip_win->phys_addr);
+ }
+
+ return 0;
+}
+
+static int tegra_overlay_set_windowattr(struct tegra_overlay_info *overlay,
+ struct tegra_dc_win *win,
+ const struct tegra_overlay_flip_win *flip_win)
+{
+ int xres, yres;
+ if (flip_win->handle == NULL) {
+ win->flags = 0;
+ win->cur_handle = NULL;
+ return 0;
+ }
+
+ xres = overlay->dc->mode.h_active;
+ yres = overlay->dc->mode.v_active;
+
+ win->flags = TEGRA_WIN_FLAG_ENABLED;
+ if (flip_win->attr.blend == TEGRA_FB_WIN_BLEND_PREMULT)
+ win->flags |= TEGRA_WIN_FLAG_BLEND_PREMULT;
+ else if (flip_win->attr.blend == TEGRA_FB_WIN_BLEND_COVERAGE)
+ win->flags |= TEGRA_WIN_FLAG_BLEND_COVERAGE;
+ if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_INVERT_H)
+ win->flags |= TEGRA_WIN_FLAG_INVERT_H;
+ if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_INVERT_V)
+ win->flags |= TEGRA_WIN_FLAG_INVERT_V;
+ if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_TILED)
+ win->flags |= TEGRA_WIN_FLAG_TILED;
+
+ win->fmt = flip_win->attr.pixformat;
+ win->x.full = dfixed_const(flip_win->attr.x);
+ win->y.full = dfixed_const(flip_win->attr.y);
+ win->w.full = dfixed_const(flip_win->attr.w);
+ win->h.full = dfixed_const(flip_win->attr.h);
+ win->out_x = flip_win->attr.out_x;
+ win->out_y = flip_win->attr.out_y;
+ win->out_w = flip_win->attr.out_w;
+ win->out_h = flip_win->attr.out_h;
+
+ WARN_ONCE(win->out_x >= xres,
+ "%s:application window x offset(%d) exceeds display width(%d)\n",
+ dev_name(&win->dc->ndev->dev), win->out_x, xres);
+ WARN_ONCE(win->out_y >= yres,
+ "%s:application window y offset(%d) exceeds display height(%d)\n",
+ dev_name(&win->dc->ndev->dev), win->out_y, yres);
+ WARN_ONCE(win->out_x + win->out_w > xres && win->out_x < xres,
+ "%s:application window width(%d) exceeds display width(%d)\n",
+ dev_name(&win->dc->ndev->dev), win->out_x + win->out_w, xres);
+ WARN_ONCE(win->out_y + win->out_h > yres && win->out_y < yres,
+ "%s:application window height(%d) exceeds display height(%d)\n",
+ dev_name(&win->dc->ndev->dev), win->out_y + win->out_h, yres);
+
+ if (((win->out_x + win->out_w) > xres) && (win->out_x < xres)) {
+ long new_w = xres - win->out_x;
+ u64 in_w = win->w.full * new_w;
+ do_div(in_w, win->out_w);
+ win->w.full = lower_32_bits(in_w);
+ win->out_w = new_w;
+ }
+ if (((win->out_y + win->out_h) > yres) && (win->out_y < yres)) {
+ long new_h = yres - win->out_y;
+ u64 in_h = win->h.full * new_h;
+ do_div(in_h, win->out_h);
+ win->h.full = lower_32_bits(in_h);
+ win->out_h = new_h;
+ }
+
+ win->z = flip_win->attr.z;
+ win->cur_handle = flip_win->handle;
+
+ /* STOPSHIP verify that this won't read outside of the surface */
+ win->phys_addr = flip_win->phys_addr + flip_win->attr.offset;
+ win->phys_addr_u = flip_win->phys_addr + flip_win->attr.offset_u;
+ win->phys_addr_v = flip_win->phys_addr + flip_win->attr.offset_v;
+ win->stride = flip_win->attr.stride;
+ win->stride_uv = flip_win->attr.stride_uv;
+
+ if ((s32)flip_win->attr.pre_syncpt_id >= 0) {
+ nvhost_syncpt_wait_timeout(&overlay->ndev->host->syncpt,
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val,
+ msecs_to_jiffies(500),
+ NULL);
+ }
+
+ /* Store the blend state incase we need to reorder later */
+ overlay->blend.z[win->idx] = win->z;
+ overlay->blend.flags[win->idx] = win->flags & TEGRA_WIN_BLEND_FLAGS_MASK;
+
+ return 0;
+}
+
+/* overlay policy for premult is dst alpha, which needs reassignment */
+/* of blend settings for the DC */
+static void tegra_overlay_blend_reorder(struct tegra_dc_blend *blend,
+ struct tegra_dc_win *windows[])
+{
+ int idx, below;
+
+ /* Copy across the original blend state to each window */
+ for (idx = 0; idx < DC_N_WINDOWS; idx++) {
+ windows[idx]->z = blend->z[idx];
+ windows[idx]->flags &= ~TEGRA_WIN_BLEND_FLAGS_MASK;
+ windows[idx]->flags |= blend->flags[idx];
+ }
+
+ /* Find a window with PreMult */
+ for (idx = 0; idx < DC_N_WINDOWS; idx++) {
+ if (blend->flags[idx] == TEGRA_WIN_FLAG_BLEND_PREMULT)
+ break;
+ }
+ if (idx == DC_N_WINDOWS)
+ return;
+
+ /* Find the window directly below it */
+ for (below = 0; below < DC_N_WINDOWS; below++) {
+ if (below == idx)
+ continue;
+ if (blend->z[below] > blend->z[idx])
+ break;
+ }
+ if (below == DC_N_WINDOWS)
+ return;
+
+ /* Switch the flags and the ordering */
+ windows[idx]->z = blend->z[below];
+ windows[idx]->flags &= ~TEGRA_WIN_BLEND_FLAGS_MASK;
+ windows[idx]->flags |= blend->flags[below];
+ windows[below]->z = blend->z[idx];
+ windows[below]->flags &= ~TEGRA_WIN_BLEND_FLAGS_MASK;
+ windows[below]->flags |= blend->flags[idx];
+}
+
+static int tegra_overlay_flip_didim(struct tegra_overlay_flip_data *data)
+{
+ mutex_lock(&tegra_flip_lock);
+ INIT_WORK(&data->work, tegra_overlay_flip_worker);
+
+ queue_work(data->overlay->flip_wq, &data->work);
+
+ mutex_unlock(&tegra_flip_lock);
+
+ return 0;
+}
+
+static void tegra_overlay_n_shot(struct tegra_overlay_flip_data *data,
+ struct nvmap_handle_ref **unpin_handles, int *nr_unpin)
+{
+ int i;
+ struct tegra_overlay_info *overlay = data->overlay;
+ u32 didim_delay = overlay->dc->out->sd_settings->hw_update_delay;
+ u32 didim_enable = overlay->dc->out->sd_settings->enable;
+
+ mutex_lock(&overlay->lock);
+
+ if (data->didim_work) {
+ /* Increment sync point if we finish n shot;
+ * otherwise send overlay flip request. */
+ if (overlay->n_shot)
+ overlay->n_shot--;
+
+ if (overlay->n_shot && didim_enable) {
+ tegra_overlay_flip_didim(data);
+ mutex_unlock(&overlay->lock);
+ return;
+ } else {
+ *nr_unpin = data->nr_unpin;
+ for (i = 0; i < *nr_unpin; i++)
+ unpin_handles[i] = data->unpin_handles[i];
+ tegra_dc_incr_syncpt_min(overlay->dc, 0,
+ data->syncpt_max);
+ }
+ } else {
+ overlay->overlay_ref--;
+ /* If no new flip request in the queue, we will send
+ * the last frame n times for DIDIM */
+ if (!overlay->overlay_ref && didim_enable)
+ overlay->n_shot = TEGRA_DC_DIDIM_MIN_SHOT + didim_delay;
+
+ if (overlay->n_shot && didim_enable) {
+ data->nr_unpin = *nr_unpin;
+ data->didim_work = true;
+ for (i = 0; i < *nr_unpin; i++)
+ data->unpin_handles[i] = unpin_handles[i];
+ tegra_overlay_flip_didim(data);
+ mutex_unlock(&overlay->lock);
+ return;
+ } else {
+ tegra_dc_incr_syncpt_min(overlay->dc, 0,
+ data->syncpt_max);
+ }
+ }
+
+ mutex_unlock(&overlay->lock);
+
+ /* unpin and deref previous front buffers */
+ for (i = 0; i < *nr_unpin; i++) {
+ nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]);
+ nvmap_free(overlay->overlay_nvmap, unpin_handles[i]);
+ }
+
+ kfree(data);
+}
+
+static void tegra_overlay_flip_worker(struct work_struct *work)
+{
+ struct tegra_overlay_flip_data *data =
+ container_of(work, struct tegra_overlay_flip_data, work);
+ struct tegra_overlay_info *overlay = data->overlay;
+ struct tegra_dc_win *win;
+ struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS];
+ struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS];
+ int i, nr_win = 0, nr_unpin = 0;
+
+ data = container_of(work, struct tegra_overlay_flip_data, work);
+
+ for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+ struct tegra_overlay_flip_win *flip_win = &data->win[i];
+ int idx = flip_win->attr.index;
+
+ if (idx == -1)
+ continue;
+
+ win = tegra_dc_get_window(overlay->dc, idx);
+
+ if (!win)
+ continue;
+
+ if (win->flags && win->cur_handle && !data->didim_work)
+ unpin_handles[nr_unpin++] = win->cur_handle;
+
+ tegra_overlay_set_windowattr(overlay, win, &data->win[i]);
+
+ wins[nr_win++] = win;
+
+#if 0
+ if (flip_win->attr.pre_syncpt_id < 0)
+ continue;
+ printk("%08x %08x\n",
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val);
+
+ nvhost_syncpt_wait_timeout(&overlay->ndev->host->syncpt,
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val,
+ msecs_to_jiffies(500));
+#endif
+ }
+
+ if (data->flags & TEGRA_OVERLAY_FLIP_FLAG_BLEND_REORDER) {
+ struct tegra_dc_win *dcwins[DC_N_WINDOWS];
+
+ for (i = 0; i < DC_N_WINDOWS; i++)
+ dcwins[i] = tegra_dc_get_window(overlay->dc, i);
+
+ tegra_overlay_blend_reorder(&overlay->blend, dcwins);
+ tegra_dc_update_windows(dcwins, DC_N_WINDOWS);
+ tegra_dc_sync_windows(dcwins, DC_N_WINDOWS);
+ } else {
+ tegra_dc_update_windows(wins, nr_win);
+ /* TODO: implement swapinterval here */
+ tegra_dc_sync_windows(wins, nr_win);
+ }
+
+ if ((overlay->dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) &&
+ (overlay->dc->out->flags & TEGRA_DC_OUT_N_SHOT_MODE)) {
+ tegra_overlay_n_shot(data, unpin_handles, &nr_unpin);
+ } else {
+ tegra_dc_incr_syncpt_min(overlay->dc, 0, data->syncpt_max);
+
+ /* unpin and deref previous front buffers */
+ for (i = 0; i < nr_unpin; i++) {
+ nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]);
+ nvmap_free(overlay->overlay_nvmap, unpin_handles[i]);
+ }
+
+ kfree(data);
+ }
+}
+
+static int tegra_overlay_flip(struct tegra_overlay_info *overlay,
+ struct tegra_overlay_flip_args *args,
+ struct nvmap_client *user_nvmap)
+{
+ struct tegra_overlay_flip_data *data;
+ struct tegra_overlay_flip_win *flip_win;
+ u32 syncpt_max;
+ int i, err;
+
+ if (WARN_ON(!overlay->ndev))
+ return -EFAULT;
+
+ mutex_lock(&tegra_flip_lock);
+ mutex_lock(&overlay->dc->lock);
+ if (!overlay->dc->enabled) {
+ mutex_unlock(&overlay->dc->lock);
+ mutex_unlock(&tegra_flip_lock);
+ return -EFAULT;
+ }
+ mutex_unlock(&overlay->dc->lock);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&overlay->ndev->dev,
+ "can't allocate memory for flip\n");
+ mutex_unlock(&tegra_flip_lock);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&data->work, tegra_overlay_flip_worker);
+ data->overlay = overlay;
+ data->flags = args->flags;
+ data->didim_work = false;
+
+ if ((overlay->dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) &&
+ (overlay->dc->out->flags & TEGRA_DC_OUT_N_SHOT_MODE)) {
+ mutex_lock(&overlay->lock);
+ overlay->overlay_ref++;
+ overlay->n_shot = 0;
+ mutex_unlock(&overlay->lock);
+ }
+
+ for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+ flip_win = &data->win[i];
+
+ memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));
+
+ if (flip_win->attr.index == -1)
+ continue;
+
+ err = tegra_overlay_pin_window(overlay, flip_win, user_nvmap);
+ if (err < 0) {
+ dev_err(&overlay->ndev->dev,
+ "error setting window attributes\n");
+ goto surf_err;
+ }
+ }
+
+ syncpt_max = tegra_dc_incr_syncpt_max(overlay->dc, 0);
+ data->syncpt_max = syncpt_max;
+
+ queue_work(overlay->flip_wq, &data->work);
+
+ args->post_syncpt_val = syncpt_max;
+ args->post_syncpt_id = tegra_dc_get_syncpt_id(overlay->dc, 0);
+ mutex_unlock(&tegra_flip_lock);
+
+ return 0;
+
+surf_err:
+ while (i--) {
+ if (data->win[i].handle) {
+ nvmap_unpin(overlay->overlay_nvmap,
+ data->win[i].handle);
+ nvmap_free(overlay->overlay_nvmap,
+ data->win[i].handle);
+ }
+ }
+ kfree(data);
+ mutex_unlock(&tegra_flip_lock);
+ return err;
+}
+
+static void tegra_overlay_set_emc_freq(struct tegra_overlay_info *dev)
+{
+ unsigned long new_rate;
+ int i;
+ struct tegra_dc_win *win;
+ struct tegra_dc_win *wins[DC_N_WINDOWS];
+
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ win = tegra_dc_get_window(dev->dc, i);
+ wins[i] = win;
+ }
+
+ new_rate = tegra_dc_get_bandwidth(wins, dev->dc->n_windows);
+ new_rate = EMC_BW_TO_FREQ(new_rate);
+
+ if (tegra_dc_has_multiple_dc())
+ new_rate = ULONG_MAX;
+
+ clk_set_rate(dev->dc->emc_clk, new_rate);
+}
+
+/* Overlay functions */
+static bool tegra_overlay_get(struct overlay_client *client, int idx)
+{
+ struct tegra_overlay_info *dev = client->dev;
+ bool ret = false;
+
+ if (idx < 0 || idx > dev->dc->n_windows)
+ return ret;
+
+ mutex_lock(&dev->overlays_lock);
+ if (dev->overlays[idx].owner == NULL) {
+ dev->overlays[idx].owner = client;
+ ret = true;
+ if (dev->dc->mode.pclk != 0)
+ tegra_overlay_set_emc_freq(dev);
+
+ dev_dbg(&client->dev->ndev->dev,
+ "%s(): idx=%d pid=%d comm=%s\n",
+ __func__, idx, client->task->pid, client->task->comm);
+ }
+ mutex_unlock(&dev->overlays_lock);
+
+ return ret;
+}
+
+static void tegra_overlay_put_locked(struct overlay_client *client, int idx)
+{
+ struct tegra_overlay_flip_args flip_args;
+ struct tegra_overlay_info *dev = client->dev;
+
+ if (idx < 0 || idx > dev->dc->n_windows)
+ return;
+
+ if (dev->overlays[idx].owner != client)
+ return;
+
+ dev_dbg(&client->dev->ndev->dev,
+ "%s(): idx=%d pid=%d comm=%s\n",
+ __func__, idx, client->task->pid, client->task->comm);
+
+ dev->overlays[idx].owner = NULL;
+
+ flip_args.win[0].index = idx;
+ flip_args.win[0].buff_id = 0;
+ flip_args.win[1].index = -1;
+ flip_args.win[2].index = -1;
+ flip_args.flags = 0;
+
+ tegra_overlay_flip(dev, &flip_args, NULL);
+ if (dev->dc->mode.pclk != 0)
+ tegra_overlay_set_emc_freq(dev);
+}
+
+static void tegra_overlay_put(struct overlay_client *client, int idx)
+{
+ mutex_lock(&client->dev->overlays_lock);
+ tegra_overlay_put_locked(client, idx);
+ mutex_unlock(&client->dev->overlays_lock);
+}
+
+/* Ioctl implementations */
+static int tegra_overlay_ioctl_open(struct overlay_client *client,
+ void __user *arg)
+{
+ int idx = -1;
+
+ if (copy_from_user(&idx, arg, sizeof(idx)))
+ return -EFAULT;
+
+ if (!tegra_overlay_get(client, idx))
+ return -EBUSY;
+
+ if (copy_to_user(arg, &idx, sizeof(idx))) {
+ tegra_overlay_put(client, idx);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int tegra_overlay_ioctl_close(struct overlay_client *client,
+ void __user *arg)
+{
+ int err = 0;
+ int idx;
+
+ if (copy_from_user(&idx, arg, sizeof(idx)))
+ return -EFAULT;
+
+ if (idx < 0 || idx > client->dev->dc->n_windows)
+ return -EINVAL;
+
+ mutex_lock(&client->dev->overlays_lock);
+ if (client->dev->overlays[idx].owner == client)
+ tegra_overlay_put_locked(client, idx);
+ else
+ err = -EINVAL;
+ mutex_unlock(&client->dev->overlays_lock);
+
+ return err;
+}
+
+static int tegra_overlay_ioctl_flip(struct overlay_client *client,
+ void __user *arg)
+{
+ int i = 0;
+ int idx = 0;
+ int err;
+ bool found_one = false;
+ struct tegra_overlay_flip_args flip_args;
+
+ mutex_lock(&client->dev->dc->lock);
+ if (!client->dev->dc->enabled) {
+ mutex_unlock(&client->dev->dc->lock);
+ return -EPIPE;
+ }
+ mutex_unlock(&client->dev->dc->lock);
+
+ if (copy_from_user(&flip_args, arg, sizeof(flip_args)))
+ return -EFAULT;
+
+ for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+ idx = flip_args.win[i].index;
+ if (idx == -1) {
+ flip_args.win[i].buff_id = 0;
+ continue;
+ }
+
+ if (idx < 0 || idx > client->dev->dc->n_windows) {
+ dev_err(&client->dev->ndev->dev,
+ "Flipping an invalid overlay! %d\n", idx);
+ flip_args.win[i].index = -1;
+ flip_args.win[i].buff_id = 0;
+ continue;
+ }
+
+ if (client->dev->overlays[idx].owner != client) {
+ dev_err(&client->dev->ndev->dev,
+ "Flipping a non-owned overlay! %d\n", idx);
+ flip_args.win[i].index = -1;
+ flip_args.win[i].buff_id = 0;
+ continue;
+ }
+
+ found_one = true;
+ }
+
+ if (!found_one)
+ return -EFAULT;
+
+ err = tegra_overlay_flip(client->dev, &flip_args, client->user_nvmap);
+
+ if (err)
+ return err;
+
+ if (copy_to_user(arg, &flip_args, sizeof(flip_args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int tegra_overlay_ioctl_set_nvmap_fd(struct overlay_client *client,
+ void __user *arg)
+{
+ int fd;
+ struct nvmap_client *nvmap = NULL;
+
+ if (copy_from_user(&fd, arg, sizeof(fd)))
+ return -EFAULT;
+
+ if (fd < 0)
+ return -EINVAL;
+
+ nvmap = nvmap_client_get_file(fd);
+ if (IS_ERR(nvmap))
+ return PTR_ERR(nvmap);
+
+ if (client->user_nvmap)
+ nvmap_client_put(client->user_nvmap);
+
+ client->user_nvmap = nvmap;
+
+ return 0;
+}
+
+/* File operations */
+static int tegra_overlay_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct tegra_overlay_info *dev = container_of(miscdev,
+ struct tegra_overlay_info,
+ dev);
+ struct overlay_client *priv;
+ unsigned long flags;
+ int ret;
+
+ ret = nonseekable_open(inode, filp);
+ if (unlikely(ret))
+ return ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ get_task_struct(current);
+ priv->task = current;
+
+ spin_lock_irqsave(&dev->clients_lock, flags);
+ list_add(&priv->list, &dev->clients);
+ spin_unlock_irqrestore(&dev->clients_lock, flags);
+
+ filp->private_data = priv;
+ return 0;
+}
+
+static int tegra_overlay_release(struct inode *inode, struct file *filp)
+{
+ struct overlay_client *client = filp->private_data;
+ unsigned long flags;
+ int i;
+
+ mutex_lock(&client->dev->overlays_lock);
+ for (i = 0; i < client->dev->dc->n_windows; i++)
+ if (client->dev->overlays[i].owner == client)
+ tegra_overlay_put_locked(client, i);
+ mutex_unlock(&client->dev->overlays_lock);
+
+ spin_lock_irqsave(&client->dev->clients_lock, flags);
+ list_del(&client->list);
+ spin_unlock_irqrestore(&client->dev->clients_lock, flags);
+
+ nvmap_client_put(client->user_nvmap);
+ put_task_struct(client->task);
+
+ kfree(client);
+ return 0;
+}
+
+static long tegra_overlay_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct overlay_client *client = filp->private_data;
+ int err = 0;
+ void __user *uarg = (void __user *)arg;
+
+ if (_IOC_TYPE(cmd) != TEGRA_OVERLAY_IOCTL_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_NR(cmd) < TEGRA_OVERLAY_IOCTL_MIN_NR)
+ return -ENOTTY;
+
+ if (_IOC_NR(cmd) > TEGRA_OVERLAY_IOCTL_MAX_NR)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case TEGRA_OVERLAY_IOCTL_OPEN_WINDOW:
+ err = tegra_overlay_ioctl_open(client, uarg);
+ break;
+ case TEGRA_OVERLAY_IOCTL_CLOSE_WINDOW:
+ err = tegra_overlay_ioctl_close(client, uarg);
+ break;
+ case TEGRA_OVERLAY_IOCTL_FLIP:
+ err = tegra_overlay_ioctl_flip(client, uarg);
+ break;
+ case TEGRA_OVERLAY_IOCTL_SET_NVMAP_FD:
+ err = tegra_overlay_ioctl_set_nvmap_fd(client, uarg);
+ break;
+ default:
+ return -ENOTTY;
+ }
+ return err;
+}
+
+static const struct file_operations overlay_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_overlay_open,
+ .release = tegra_overlay_release,
+ .unlocked_ioctl = tegra_overlay_ioctl,
+};
+
+/* Registration */
+struct tegra_overlay_info *tegra_overlay_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc)
+{
+ struct tegra_overlay_info *dev;
+ int e;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&ndev->dev, "out of memory for device\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ snprintf(dev->name, sizeof(dev->name), "tegra_dc%u", ndev->id);
+
+ dev->ndev = ndev;
+ dev->dev.minor = MISC_DYNAMIC_MINOR;
+ dev->dev.name = dev->name;
+ dev->dev.fops = &overlay_fops;
+ dev->dev.parent = &ndev->dev;
+
+ spin_lock_init(&dev->clients_lock);
+ INIT_LIST_HEAD(&dev->clients);
+
+ mutex_init(&dev->overlays_lock);
+
+ e = misc_register(&dev->dev);
+ if (e) {
+ dev_err(&ndev->dev, "unable to register miscdevice %s\n",
+ dev->dev.name);
+ goto fail;
+ }
+
+ dev->overlay_nvmap = nvmap_create_client(nvmap_dev, "overlay");
+ if (!dev->overlay_nvmap) {
+ dev_err(&ndev->dev, "couldn't create nvmap client\n");
+ e = -ENOMEM;
+ goto err_free;
+ }
+
+ dev->flip_wq = create_singlethread_workqueue(dev_name(&ndev->dev));
+ if (!dev->flip_wq) {
+ dev_err(&ndev->dev, "couldn't create flip work-queue\n");
+ e = -ENOMEM;
+ goto err_delete_wq;
+ }
+ mutex_init(&dev->lock);
+ dev->overlay_ref = 0;
+ dev->n_shot = 0;
+
+ dev->dc = dc;
+
+ dev_info(&ndev->dev, "registered overlay\n");
+
+ return dev;
+
+err_delete_wq:
+err_free:
+fail:
+ if (dev->dev.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev);
+ kfree(dev);
+ return ERR_PTR(e);
+}
+
+void tegra_overlay_unregister(struct tegra_overlay_info *info)
+{
+ misc_deregister(&info->dev);
+
+ kfree(info);
+}
+
+void tegra_overlay_disable(struct tegra_overlay_info *overlay_info)
+{
+ mutex_lock(&tegra_flip_lock);
+ mutex_lock(&overlay_info->lock);
+ overlay_info->n_shot = 0;
+ flush_workqueue(overlay_info->flip_wq);
+ mutex_unlock(&overlay_info->lock);
+ mutex_unlock(&tegra_flip_lock);
+}
diff --git a/drivers/video/tegra/dc/overlay.h b/drivers/video/tegra/dc/overlay.h
new file mode 100644
index 000000000000..812bc0237562
--- /dev/null
+++ b/drivers/video/tegra/dc/overlay.h
@@ -0,0 +1,43 @@
+/*
+ * drivers/video/tegra/dc/overlay.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_OVERLAY_H
+#define __DRIVERS_VIDEO_TEGRA_OVERLAY_H
+
+struct tegra_overlay_info;
+
+#ifdef CONFIG_TEGRA_OVERLAY
+struct tegra_overlay_info *tegra_overlay_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc);
+void tegra_overlay_unregister(struct tegra_overlay_info *overlay_info);
+void tegra_overlay_disable(struct tegra_overlay_info *overlay_info);
+#else
+static inline struct tegra_overlay_info *tegra_overlay_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc)
+{
+ return NULL;
+}
+
+static inline void tegra_overlay_unregister(struct tegra_overlay_info *overlay_info)
+{
+}
+
+static inline void tegra_overlay_disable(struct tegra_overlay_info *overlay_info)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/video/tegra/dc/rgb.c b/drivers/video/tegra/dc/rgb.c
new file mode 100644
index 000000000000..2112643058f4
--- /dev/null
+++ b/drivers/video/tegra/dc/rgb.c
@@ -0,0 +1,160 @@
+/*
+ * drivers/video/tegra/dc/rgb.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include <mach/dc.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+
+static const u32 tegra_dc_rgb_enable_partial_pintable[] = {
+ DC_COM_PIN_OUTPUT_ENABLE0, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE1, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE2, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE3, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA0, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA1, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA2, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA3, 0x00000000,
+};
+
+static const u32 tegra_dc_rgb_enable_pintable[] = {
+ DC_COM_PIN_OUTPUT_ENABLE0, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE1, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE2, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE3, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY1, 0x01000000,
+ DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY3, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA0, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA1, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA2, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA3, 0x00000000,
+};
+
+static const u32 tegra_dc_rgb_enable_out_sel_pintable[] = {
+ DC_COM_PIN_OUTPUT_SELECT0, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT1, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT2, 0x00000000,
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+ DC_COM_PIN_OUTPUT_SELECT3, 0x00000000,
+#else
+ /* The display panel sub-board used on FPGA platforms (panel 86)
+ is non-standard. It expects the Data Enable signal on the WR
+ pin instead of the DE pin. */
+ DC_COM_PIN_OUTPUT_SELECT3, 0x00200000,
+#endif
+ DC_COM_PIN_OUTPUT_SELECT4, 0x00210222,
+ DC_COM_PIN_OUTPUT_SELECT5, 0x00002200,
+ DC_COM_PIN_OUTPUT_SELECT6, 0x00020000,
+};
+
+static const u32 tegra_dc_rgb_disable_pintable[] = {
+ DC_COM_PIN_OUTPUT_ENABLE0, 0x55555555,
+ DC_COM_PIN_OUTPUT_ENABLE1, 0x55150005,
+ DC_COM_PIN_OUTPUT_ENABLE2, 0x55555555,
+ DC_COM_PIN_OUTPUT_ENABLE3, 0x55555555,
+ DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY1, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY3, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA0, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA1, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA2, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA3, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_SELECT0, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT1, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT2, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT3, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT4, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT5, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT6, 0x00000000,
+};
+
+void tegra_dc_rgb_enable(struct tegra_dc *dc)
+{
+ int i;
+ u32 out_sel_pintable[ARRAY_SIZE(tegra_dc_rgb_enable_out_sel_pintable)];
+
+ tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+ DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+
+ if (dc->out->out_pins) {
+ tegra_dc_set_out_pin_polars(dc, dc->out->out_pins,
+ dc->out->n_out_pins);
+ tegra_dc_write_table(dc, tegra_dc_rgb_enable_partial_pintable);
+ } else {
+ tegra_dc_write_table(dc, tegra_dc_rgb_enable_pintable);
+ }
+
+ memcpy(out_sel_pintable, tegra_dc_rgb_enable_out_sel_pintable,
+ sizeof(tegra_dc_rgb_enable_out_sel_pintable));
+
+ if (dc->out && dc->out->out_sel_configs) {
+ u8 *out_sels = dc->out->out_sel_configs;
+ for (i = 0; i < dc->out->n_out_sel_configs; i++) {
+ switch (out_sels[i]) {
+ case TEGRA_PIN_OUT_CONFIG_SEL_LM1_M1:
+ out_sel_pintable[5*2+1] =
+ (out_sel_pintable[5*2+1] &
+ ~PIN5_LM1_LCD_M1_OUTPUT_MASK) |
+ PIN5_LM1_LCD_M1_OUTPUT_M1;
+ break;
+ case TEGRA_PIN_OUT_CONFIG_SEL_LM1_LD21:
+ out_sel_pintable[5*2+1] =
+ (out_sel_pintable[5*2+1] &
+ ~PIN5_LM1_LCD_M1_OUTPUT_MASK) |
+ PIN5_LM1_LCD_M1_OUTPUT_LD21;
+ break;
+ case TEGRA_PIN_OUT_CONFIG_SEL_LM1_PM1:
+ out_sel_pintable[5*2+1] =
+ (out_sel_pintable[5*2+1] &
+ ~PIN5_LM1_LCD_M1_OUTPUT_MASK) |
+ PIN5_LM1_LCD_M1_OUTPUT_PM1;
+ break;
+ default:
+ dev_err(&dc->ndev->dev,
+ "Invalid pin config[%d]: %d\n",
+ i, out_sels[i]);
+ break;
+ }
+ }
+ }
+
+ tegra_dc_write_table(dc, out_sel_pintable);
+}
+
+void tegra_dc_rgb_disable(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, 0x00000000, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_write_table(dc, tegra_dc_rgb_disable_pintable);
+}
+
+struct tegra_dc_out_ops tegra_dc_rgb_ops = {
+ .enable = tegra_dc_rgb_enable,
+ .disable = tegra_dc_rgb_disable,
+};
+
diff --git a/drivers/video/tegra/fb.c b/drivers/video/tegra/fb.c
new file mode 100644
index 000000000000..e3fec88966f7
--- /dev/null
+++ b/drivers/video/tegra/fb.c
@@ -0,0 +1,499 @@
+/*
+ * drivers/video/tegra/fb.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ * Colin Cross <ccross@android.com>
+ * Travis Geiselbrecht <travis@palm.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+
+#include <video/tegrafb.h>
+
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+
+#include "host/dev.h"
+#include "nvmap/nvmap.h"
+#include "dc/dc_priv.h"
+
+/* Pad pitch to 16-byte boundary. */
+#define TEGRA_LINEAR_PITCH_ALIGNMENT 16
+
+struct tegra_fb_info {
+ struct tegra_dc_win *win;
+ struct nvhost_device *ndev;
+ struct fb_info *info;
+ bool valid;
+
+ struct resource *fb_mem;
+
+ int xres;
+ int yres;
+};
+
+/* palette array used by the fbcon */
+static u32 pseudo_palette[16];
+
+static int tegra_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if ((var->yres * var->xres * var->bits_per_pixel / 8 * 2) >
+ info->screen_size)
+ return -EINVAL;
+
+ /* double yres_virtual to allow double buffering through pan_display */
+ var->yres_virtual = var->yres * 2;
+
+ return 0;
+}
+
+static int tegra_fb_set_par(struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+
+ if (var->bits_per_pixel) {
+ /* we only support RGB ordering for now */
+ switch (var->bits_per_pixel) {
+ case 32:
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 16;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ tegra_fb->win->fmt = TEGRA_WIN_FMT_R8G8B8A8;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ tegra_fb->win->fmt = TEGRA_WIN_FMT_B5G6R5;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ info->fix.line_length = var->xres * var->bits_per_pixel / 8;
+ /* Pad the stride to 16-byte boundary. */
+ info->fix.line_length = round_up(info->fix.line_length,
+ TEGRA_LINEAR_PITCH_ALIGNMENT);
+ tegra_fb->win->stride = info->fix.line_length;
+ tegra_fb->win->stride_uv = 0;
+ tegra_fb->win->phys_addr_u = 0;
+ tegra_fb->win->phys_addr_v = 0;
+ }
+
+ if (var->pixclock) {
+ bool stereo;
+ struct fb_videomode m;
+
+ fb_var_to_videomode(&m, var);
+
+ info->mode = (struct fb_videomode *)
+ fb_find_nearest_mode(&m, &info->modelist);
+ if (!info->mode) {
+ dev_warn(&tegra_fb->ndev->dev, "can't match video mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * only enable stereo if the mode supports it and
+ * client requests it
+ */
+ stereo = !!(var->vmode & info->mode->vmode &
+ FB_VMODE_STEREO_FRAME_PACK);
+
+ tegra_dc_set_fb_mode(tegra_fb->win->dc, info->mode, stereo);
+
+ tegra_fb->win->w.full = dfixed_const(info->mode->xres);
+ tegra_fb->win->h.full = dfixed_const(info->mode->yres);
+ tegra_fb->win->out_w = info->mode->xres;
+ tegra_fb->win->out_h = info->mode->yres;
+ }
+ return 0;
+}
+
+static int tegra_fb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ struct tegra_dc *dc = tegra_fb->win->dc;
+ int i;
+ u16 *red = cmap->red;
+ u16 *green = cmap->green;
+ u16 *blue = cmap->blue;
+ int start = cmap->start;
+
+ if (((unsigned)start > 255) || ((start + cmap->len) > 256))
+ return -EINVAL;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ for (i = 0; i < cmap->len; i++) {
+ dc->fb_lut.r[start+i] = *red++ >> 8;
+ dc->fb_lut.g[start+i] = *green++ >> 8;
+ dc->fb_lut.b[start+i] = *blue++ >> 8;
+ }
+
+ tegra_dc_update_lut(dc, -1, -1);
+ }
+
+ return 0;
+}
+
+static int tegra_fb_blank(int blank, struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ dev_dbg(&tegra_fb->ndev->dev, "unblank\n");
+ tegra_fb->win->flags = TEGRA_WIN_FLAG_ENABLED;
+ tegra_dc_enable(tegra_fb->win->dc);
+ return 0;
+
+ case FB_BLANK_NORMAL:
+ dev_dbg(&tegra_fb->ndev->dev, "blank - normal\n");
+ tegra_dc_blank(tegra_fb->win->dc);
+ return 0;
+
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ dev_dbg(&tegra_fb->ndev->dev, "blank - powerdown\n");
+ tegra_dc_disable(tegra_fb->win->dc);
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int tegra_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ char __iomem *flush_start;
+ char __iomem *flush_end;
+ u32 addr;
+
+ if (!tegra_fb->win->cur_handle) {
+ flush_start = info->screen_base + (var->yoffset * info->fix.line_length);
+ flush_end = flush_start + (var->yres * info->fix.line_length);
+
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+
+ addr = info->fix.smem_start + (var->yoffset * info->fix.line_length) +
+ (var->xoffset * (var->bits_per_pixel/8));
+
+ tegra_fb->win->phys_addr = addr;
+ /* TODO: update virt_addr */
+
+ tegra_dc_update_windows(&tegra_fb->win, 1);
+ tegra_dc_sync_windows(&tegra_fb->win, 1);
+ }
+
+ return 0;
+}
+
+static void tegra_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ cfb_fillrect(info, rect);
+}
+
+static void tegra_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ cfb_copyarea(info, region);
+}
+
+static void tegra_fb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ cfb_imageblit(info, image);
+}
+
+static int tegra_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct tegra_fb_modedb modedb;
+ struct fb_modelist *modelist;
+ int i;
+
+ switch (cmd) {
+ case FBIO_TEGRA_GET_MODEDB:
+ if (copy_from_user(&modedb, (void __user *)arg, sizeof(modedb)))
+ return -EFAULT;
+
+ i = 0;
+ list_for_each_entry(modelist, &info->modelist, list) {
+ struct fb_var_screeninfo var;
+
+ if (i >= modedb.modedb_len)
+ break;
+
+ /* fb_videomode_to_var doesn't fill out all the members
+ of fb_var_screeninfo */
+ memset(&var, 0x0, sizeof(var));
+
+ fb_videomode_to_var(&var, &modelist->mode);
+
+ if (copy_to_user((void __user *)&modedb.modedb[i],
+ &var, sizeof(var)))
+ return -EFAULT;
+ i++;
+
+ if (var.vmode & FB_VMODE_STEREO_MASK) {
+ if (i >= modedb.modedb_len)
+ break;
+ var.vmode &= ~FB_VMODE_STEREO_MASK;
+ if (copy_to_user(
+ (void __user *)&modedb.modedb[i],
+ &var, sizeof(var)))
+ return -EFAULT;
+ i++;
+ }
+ }
+ modedb.modedb_len = i;
+
+ if (copy_to_user((void __user *)arg, &modedb, sizeof(modedb)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops tegra_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = tegra_fb_check_var,
+ .fb_set_par = tegra_fb_set_par,
+ .fb_setcmap = tegra_fb_setcmap,
+ .fb_blank = tegra_fb_blank,
+ .fb_pan_display = tegra_fb_pan_display,
+ .fb_fillrect = tegra_fb_fillrect,
+ .fb_copyarea = tegra_fb_copyarea,
+ .fb_imageblit = tegra_fb_imageblit,
+ .fb_ioctl = tegra_fb_ioctl,
+};
+
+void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+ struct fb_monspecs *specs,
+ bool (*mode_filter)(const struct tegra_dc *dc,
+ struct fb_videomode *mode))
+{
+ struct fb_event event;
+ int i;
+
+ mutex_lock(&fb_info->info->lock);
+ fb_destroy_modedb(fb_info->info->monspecs.modedb);
+
+ fb_destroy_modelist(&fb_info->info->modelist);
+
+ if (specs == NULL) {
+ struct tegra_dc_mode mode;
+ memset(&fb_info->info->monspecs, 0x0,
+ sizeof(fb_info->info->monspecs));
+ memset(&mode, 0x0, sizeof(mode));
+
+ /*
+ * reset video mode properties to prevent garbage being displayed on 'mode' device.
+ */
+ fb_info->info->mode = (struct fb_videomode*) NULL;
+
+ tegra_dc_set_mode(fb_info->win->dc, &mode);
+ mutex_unlock(&fb_info->info->lock);
+ return;
+ }
+
+ memcpy(&fb_info->info->monspecs, specs,
+ sizeof(fb_info->info->monspecs));
+
+ for (i = 0; i < specs->modedb_len; i++) {
+ if (mode_filter) {
+ if (mode_filter(fb_info->win->dc, &specs->modedb[i]))
+ fb_add_videomode(&specs->modedb[i],
+ &fb_info->info->modelist);
+ } else {
+ fb_add_videomode(&specs->modedb[i],
+ &fb_info->info->modelist);
+ }
+ }
+
+ event.info = fb_info->info;
+ fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+ mutex_unlock(&fb_info->info->lock);
+}
+
+struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc,
+ struct tegra_fb_data *fb_data,
+ struct resource *fb_mem)
+{
+ struct tegra_dc_win *win;
+ struct fb_info *info;
+ struct tegra_fb_info *tegra_fb;
+ void __iomem *fb_base = NULL;
+ unsigned long fb_size = 0;
+ unsigned long fb_phys = 0;
+ int ret = 0;
+
+ win = tegra_dc_get_window(dc, fb_data->win);
+ if (!win) {
+ dev_err(&ndev->dev, "dc does not have a window at index %d\n",
+ fb_data->win);
+ return ERR_PTR(-ENOENT);
+ }
+
+ info = framebuffer_alloc(sizeof(struct tegra_fb_info), &ndev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tegra_fb = info->par;
+ tegra_fb->win = win;
+ tegra_fb->ndev = ndev;
+ tegra_fb->fb_mem = fb_mem;
+ tegra_fb->xres = fb_data->xres;
+ tegra_fb->yres = fb_data->yres;
+
+ if (fb_mem) {
+ fb_size = resource_size(fb_mem);
+ fb_phys = fb_mem->start;
+ fb_base = ioremap_nocache(fb_phys, fb_size);
+ if (!fb_base) {
+ dev_err(&ndev->dev, "fb can't be mapped\n");
+ ret = -EBUSY;
+ goto err_free;
+ }
+ tegra_fb->valid = true;
+ }
+
+ info->fbops = &tegra_fb_ops;
+ info->pseudo_palette = pseudo_palette;
+ info->screen_base = fb_base;
+ info->screen_size = fb_size;
+
+ strlcpy(info->fix.id, "tegra_fb", sizeof(info->fix.id));
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.xpanstep = 1;
+ info->fix.ypanstep = 1;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.smem_start = fb_phys;
+ info->fix.smem_len = fb_size;
+ info->fix.line_length = fb_data->xres * fb_data->bits_per_pixel / 8;
+ /* Pad the stride to 16-byte boundary. */
+ info->fix.line_length = round_up(info->fix.line_length,
+ TEGRA_LINEAR_PITCH_ALIGNMENT);
+
+ info->var.xres = fb_data->xres;
+ info->var.yres = fb_data->yres;
+ info->var.xres_virtual = fb_data->xres;
+ info->var.yres_virtual = fb_data->yres * 2;
+ info->var.bits_per_pixel = fb_data->bits_per_pixel;
+ info->var.activate = FB_ACTIVATE_VBL;
+ info->var.height = tegra_dc_get_out_height(dc);
+ info->var.width = tegra_dc_get_out_width(dc);
+ info->var.pixclock = 0;
+ info->var.left_margin = 0;
+ info->var.right_margin = 0;
+ info->var.upper_margin = 0;
+ info->var.lower_margin = 0;
+ info->var.hsync_len = 0;
+ info->var.vsync_len = 0;
+ info->var.vmode = FB_VMODE_NONINTERLACED;
+
+ win->x.full = dfixed_const(0);
+ win->y.full = dfixed_const(0);
+ win->w.full = dfixed_const(fb_data->xres);
+ win->h.full = dfixed_const(fb_data->yres);
+ /* TODO: set to output res dc */
+ win->out_x = 0;
+ win->out_y = 0;
+ win->out_w = fb_data->xres;
+ win->out_h = fb_data->yres;
+ win->z = 0;
+ win->phys_addr = fb_phys;
+ win->virt_addr = fb_base;
+ win->phys_addr_u = 0;
+ win->phys_addr_v = 0;
+ win->stride = info->fix.line_length;
+ win->stride_uv = 0;
+ win->flags = TEGRA_WIN_FLAG_ENABLED;
+
+ if (fb_mem)
+ tegra_fb_set_par(info);
+
+ if (register_framebuffer(info)) {
+ dev_err(&ndev->dev, "failed to register framebuffer\n");
+ ret = -ENODEV;
+ goto err_iounmap_fb;
+ }
+
+ tegra_fb->info = info;
+
+ dev_info(&ndev->dev, "probed\n");
+
+ if (fb_data->flags & TEGRA_FB_FLIP_ON_PROBE) {
+ tegra_dc_update_windows(&tegra_fb->win, 1);
+ tegra_dc_sync_windows(&tegra_fb->win, 1);
+ }
+
+ return tegra_fb;
+
+err_iounmap_fb:
+ if (fb_base)
+ iounmap(fb_base);
+err_free:
+ framebuffer_release(info);
+err:
+ return ERR_PTR(ret);
+}
+
+void tegra_fb_unregister(struct tegra_fb_info *fb_info)
+{
+ struct fb_info *info = fb_info->info;
+
+ unregister_framebuffer(info);
+
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+}
diff --git a/drivers/video/tegra/host/Makefile b/drivers/video/tegra/host/Makefile
new file mode 100644
index 000000000000..4fd19ac809b8
--- /dev/null
+++ b/drivers/video/tegra/host/Makefile
@@ -0,0 +1,19 @@
+GCOV_PROFILE := y
+nvhost-objs = \
+ nvhost_acm.o \
+ nvhost_syncpt.o \
+ nvhost_cdma.o \
+ nvhost_cpuaccess.o \
+ nvhost_intr.o \
+ nvhost_channel.o \
+ nvhost_job.o \
+ dev.o \
+ bus.o \
+ debug.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += mpe/
+obj-$(CONFIG_TEGRA_GRHOST) += gr3d/
+obj-$(CONFIG_TEGRA_GRHOST) += host1x/
+obj-$(CONFIG_TEGRA_GRHOST) += t20/
+obj-$(CONFIG_TEGRA_GRHOST) += t30/
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o
diff --git a/drivers/video/tegra/host/bus.c b/drivers/video/tegra/host/bus.c
new file mode 100644
index 000000000000..7e21bcbd7490
--- /dev/null
+++ b/drivers/video/tegra/host/bus.c
@@ -0,0 +1,567 @@
+/*
+ * drivers/video/tegra/host/bus.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@google.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/nvhost.h>
+
+#include "dev.h"
+
+struct nvhost_master *nvhost;
+struct device nvhost_bus = {
+ .init_name = "nvhost",
+};
+
+struct resource *nvhost_get_resource(struct nvhost_device *dev,
+ unsigned int type, unsigned int num)
+{
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource);
+
+int nvhost_get_irq(struct nvhost_device *dev, unsigned int num)
+{
+ struct resource *r = nvhost_get_resource(dev, IORESOURCE_IRQ, num);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq);
+
+struct resource *nvhost_get_resource_byname(struct nvhost_device *dev,
+ unsigned int type,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && !strcmp(r->name, name))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource_byname);
+
+int nvhost_get_irq_byname(struct nvhost_device *dev, const char *name)
+{
+ struct resource *r = nvhost_get_resource_byname(dev, IORESOURCE_IRQ,
+ name);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq_byname);
+
+static int nvhost_drv_probe(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ dev->host = nvhost;
+
+ return drv->probe(dev);
+}
+
+static int nvhost_drv_remove(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ return drv->remove(dev);
+}
+
+static void nvhost_drv_shutdown(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ drv->shutdown(dev);
+}
+
+int nvhost_driver_register(struct nvhost_driver *drv)
+{
+ drv->driver.bus = &nvhost_bus_type;
+ if (drv->probe)
+ drv->driver.probe = nvhost_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = nvhost_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = nvhost_drv_shutdown;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(nvhost_driver_register);
+
+void nvhost_driver_unregister(struct nvhost_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvhost_driver_unregister);
+
+int nvhost_device_register(struct nvhost_device *dev)
+{
+ int i, ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ device_initialize(&dev->dev);
+
+ if (!dev->dev.parent)
+ dev->dev.parent = &nvhost_bus;
+
+ dev->dev.bus = &nvhost_bus_type;
+
+ if (dev->id != -1)
+ dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id);
+ else
+ dev_set_name(&dev->dev, "%s", dev->name);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *p, *r = &dev->resource[i];
+
+ if (r->name == NULL)
+ r->name = dev_name(&dev->dev);
+
+ p = r->parent;
+ if (!p) {
+ if (resource_type(r) == IORESOURCE_MEM)
+ p = &iomem_resource;
+ else if (resource_type(r) == IORESOURCE_IO)
+ p = &ioport_resource;
+ }
+
+ if (p && insert_resource(p, r)) {
+ pr_err("%s: failed to claim resource %d\n",
+ dev_name(&dev->dev), i);
+ ret = -EBUSY;
+ goto failed;
+ }
+ }
+
+ ret = device_add(&dev->dev);
+ if (ret == 0)
+ return ret;
+
+failed:
+ while (--i >= 0) {
+ struct resource *r = &dev->resource[i];
+ unsigned long type = resource_type(r);
+
+ if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ release_resource(r);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvhost_device_register);
+
+void nvhost_device_unregister(struct nvhost_device *dev)
+{
+ int i;
+ if (dev) {
+ device_del(&dev->dev);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+ unsigned long type = resource_type(r);
+
+ if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ release_resource(r);
+ }
+
+ put_device(&dev->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(nvhost_device_unregister);
+
+
+static int nvhost_bus_match(struct device *_dev, struct device_driver *drv)
+{
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ return !strncmp(dev->name, drv->name, strlen(drv->name));
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int nvhost_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+ struct nvhost_device *pdev = to_nvhost_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->suspend)
+ ret = pdrv->suspend(pdev, mesg);
+
+ return ret;
+}
+
+static int nvhost_legacy_resume(struct device *dev)
+{
+ struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+ struct nvhost_device *pdev = to_nvhost_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->resume)
+ ret = pdrv->resume(pdev);
+
+ return ret;
+}
+
+static int nvhost_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void nvhost_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define nvhost_pm_prepare NULL
+#define nvhost_pm_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+int __weak nvhost_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define nvhost_pm_suspend NULL
+#define nvhost_pm_resume NULL
+#define nvhost_pm_suspend_noirq NULL
+#define nvhost_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int nvhost_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define nvhost_pm_freeze NULL
+#define nvhost_pm_thaw NULL
+#define nvhost_pm_poweroff NULL
+#define nvhost_pm_restore NULL
+#define nvhost_pm_freeze_noirq NULL
+#define nvhost_pm_thaw_noirq NULL
+#define nvhost_pm_poweroff_noirq NULL
+#define nvhost_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM_RUNTIME
+
+int __weak nvhost_pm_runtime_suspend(struct device *dev)
+{
+ return pm_generic_runtime_suspend(dev);
+};
+
+int __weak nvhost_pm_runtime_resume(struct device *dev)
+{
+ return pm_generic_runtime_resume(dev);
+};
+
+int __weak nvhost_pm_runtime_idle(struct device *dev)
+{
+ return pm_generic_runtime_idle(dev);
+};
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define nvhost_pm_runtime_suspend NULL
+#define nvhost_pm_runtime_resume NULL
+#define nvhost_pm_runtime_idle NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops nvhost_dev_pm_ops = {
+ .prepare = nvhost_pm_prepare,
+ .complete = nvhost_pm_complete,
+ .suspend = nvhost_pm_suspend,
+ .resume = nvhost_pm_resume,
+ .freeze = nvhost_pm_freeze,
+ .thaw = nvhost_pm_thaw,
+ .poweroff = nvhost_pm_poweroff,
+ .restore = nvhost_pm_restore,
+ .suspend_noirq = nvhost_pm_suspend_noirq,
+ .resume_noirq = nvhost_pm_resume_noirq,
+ .freeze_noirq = nvhost_pm_freeze_noirq,
+ .thaw_noirq = nvhost_pm_thaw_noirq,
+ .poweroff_noirq = nvhost_pm_poweroff_noirq,
+ .restore_noirq = nvhost_pm_restore_noirq,
+ .runtime_suspend = nvhost_pm_runtime_suspend,
+ .runtime_resume = nvhost_pm_runtime_resume,
+ .runtime_idle = nvhost_pm_runtime_idle,
+};
+
+struct bus_type nvhost_bus_type = {
+ .name = "nvhost",
+ .match = nvhost_bus_match,
+ .pm = &nvhost_dev_pm_ops,
+};
+EXPORT_SYMBOL(nvhost_bus_type);
+
+int nvhost_bus_register(struct nvhost_master *host)
+{
+ nvhost = host;
+
+ return 0;
+}
+
+
+int nvhost_bus_init(void)
+{
+ int err;
+
+ pr_info("host1x bus init\n");
+ err = device_register(&nvhost_bus);
+ if (err)
+ return err;
+
+ err = bus_register(&nvhost_bus_type);
+ if (err)
+ device_unregister(&nvhost_bus);
+
+ return err;
+}
+postcore_initcall(nvhost_bus_init);
+
diff --git a/drivers/video/tegra/host/chip_support.h b/drivers/video/tegra/host/chip_support.h
new file mode 100644
index 000000000000..3b8c4dc46b45
--- /dev/null
+++ b/drivers/video/tegra/host/chip_support.h
@@ -0,0 +1,146 @@
+/*
+ * drivers/video/tegra/host/chip_support.h
+ *
+ * Tegra Graphics Host Chip Support
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _NVHOST_CHIP_SUPPORT_H_
+#define _NVHOST_CHIP_SUPPORT_H_
+
+#include <linux/types.h>
+struct output;
+struct nvhost_waitchk;
+struct nvhost_userctx_timeout;
+struct nvhost_master;
+struct nvhost_channel;
+struct nvmap_handle;
+struct nvmap_client;
+struct nvhost_hwctx;
+struct nvhost_cdma;
+struct nvhost_intr;
+struct push_buffer;
+struct nvhost_syncpt;
+struct nvhost_cpuaccess;
+struct nvhost_module;
+struct nvhost_master;
+struct dentry;
+struct nvhost_job;
+
+struct nvhost_chip_support {
+ struct {
+ int (*init)(struct nvhost_channel *,
+ struct nvhost_master *,
+ int chid);
+ int (*submit)(struct nvhost_job *job);
+ int (*read3dreg)(struct nvhost_channel *channel,
+ struct nvhost_hwctx *hwctx,
+ u32 offset,
+ u32 *value);
+ } channel;
+
+ struct {
+ void (*start)(struct nvhost_cdma *);
+ void (*stop)(struct nvhost_cdma *);
+ void (*kick)(struct nvhost_cdma *);
+ int (*timeout_init)(struct nvhost_cdma *,
+ u32 syncpt_id);
+ void (*timeout_destroy)(struct nvhost_cdma *);
+ void (*timeout_teardown_begin)(struct nvhost_cdma *);
+ void (*timeout_teardown_end)(struct nvhost_cdma *,
+ u32 getptr);
+ void (*timeout_cpu_incr)(struct nvhost_cdma *,
+ u32 getptr,
+ u32 syncpt_incrs,
+ u32 syncval,
+ u32 nr_slots);
+ void (*timeout_pb_incr)(struct nvhost_cdma *,
+ u32 getptr,
+ u32 syncpt_incrs,
+ u32 nr_slots,
+ bool exec_ctxsave);
+ } cdma;
+
+ struct {
+ void (*reset)(struct push_buffer *);
+ int (*init)(struct push_buffer *);
+ void (*destroy)(struct push_buffer *);
+ void (*push_to)(struct push_buffer *,
+ struct nvmap_client *,
+ struct nvmap_handle *,
+ u32 op1, u32 op2);
+ void (*pop_from)(struct push_buffer *,
+ unsigned int slots);
+ u32 (*space)(struct push_buffer *);
+ u32 (*putptr)(struct push_buffer *);
+ } push_buffer;
+
+ struct {
+ void (*debug_init)(struct dentry *de);
+ void (*show_channel_cdma)(struct nvhost_master *,
+ struct output *,
+ int chid);
+ void (*show_channel_fifo)(struct nvhost_master *,
+ struct output *,
+ int chid);
+ void (*show_mlocks)(struct nvhost_master *m,
+ struct output *o);
+
+ } debug;
+
+ struct {
+ void (*reset)(struct nvhost_syncpt *, u32 id);
+ void (*reset_wait_base)(struct nvhost_syncpt *, u32 id);
+ void (*read_wait_base)(struct nvhost_syncpt *, u32 id);
+ u32 (*update_min)(struct nvhost_syncpt *, u32 id);
+ void (*cpu_incr)(struct nvhost_syncpt *, u32 id);
+ int (*wait_check)(struct nvhost_syncpt *sp,
+ struct nvmap_client *nvmap,
+ u32 waitchk_mask,
+ struct nvhost_waitchk *wait,
+ int num_waitchk);
+ void (*debug)(struct nvhost_syncpt *);
+ const char * (*name)(struct nvhost_syncpt *, u32 id);
+ } syncpt;
+
+ struct {
+ void (*init_host_sync)(struct nvhost_intr *);
+ void (*set_host_clocks_per_usec)(
+ struct nvhost_intr *, u32 clocks);
+ void (*set_syncpt_threshold)(
+ struct nvhost_intr *, u32 id, u32 thresh);
+ void (*enable_syncpt_intr)(struct nvhost_intr *, u32 id);
+ void (*disable_all_syncpt_intrs)(struct nvhost_intr *);
+ int (*request_host_general_irq)(struct nvhost_intr *);
+ void (*free_host_general_irq)(struct nvhost_intr *);
+ int (*request_syncpt_irq)(struct nvhost_intr_syncpt *syncpt);
+ } intr;
+
+ struct {
+ int (*mutex_try_lock)(struct nvhost_cpuaccess *,
+ unsigned int idx);
+ void (*mutex_unlock)(struct nvhost_cpuaccess *,
+ unsigned int idx);
+ } cpuaccess;
+
+};
+
+
+int nvhost_init_t20_support(struct nvhost_master *host);
+int nvhost_init_t30_support(struct nvhost_master *host);
+
+#endif /* _NVHOST_CHIP_SUPPORT_H_ */
diff --git a/drivers/video/tegra/host/debug.c b/drivers/video/tegra/host/debug.c
new file mode 100644
index 000000000000..aa4b41d96574
--- /dev/null
+++ b/drivers/video/tegra/host/debug.c
@@ -0,0 +1,159 @@
+/*
+ * drivers/video/tegra/host/debug.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+
+pid_t nvhost_debug_null_kickoff_pid;
+unsigned int nvhost_debug_trace_cmdbuf;
+
+pid_t nvhost_debug_force_timeout_pid;
+u32 nvhost_debug_force_timeout_val;
+u32 nvhost_debug_force_timeout_channel;
+
+void nvhost_debug_output(struct output *o, const char* fmt, ...)
+{
+ va_list args;
+ int len;
+
+ va_start(args, fmt);
+ len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
+ va_end(args);
+ o->fn(o->ctx, o->buf, len);
+}
+
+
+static void show_channels(struct nvhost_master *m, struct output *o)
+{
+ int i;
+ nvhost_debug_output(o, "---- channels ----\n");
+ for (i = 0; i < m->nb_channels; i++) {
+ struct nvhost_channel *ch = &m->channels[i];
+ mutex_lock(&ch->reflock);
+ if (ch->refcount) {
+ mutex_lock(&ch->cdma.lock);
+ m->op.debug.show_channel_fifo(m, o, i);
+ m->op.debug.show_channel_cdma(m, o, i);
+ mutex_unlock(&ch->cdma.lock);
+ }
+ mutex_unlock(&ch->reflock);
+ }
+}
+
+
+static void show_syncpts(struct nvhost_master *m, struct output *o)
+{
+ int i;
+ BUG_ON(!m->op.syncpt.name);
+ nvhost_debug_output(o, "---- syncpts ----\n");
+ for (i = 0; i < m->syncpt.nb_pts; i++) {
+ u32 max = nvhost_syncpt_read_max(&m->syncpt, i);
+ if (!max)
+ continue;
+ nvhost_debug_output(o, "id %d (%s) min %d max %d\n",
+ i, m->op.syncpt.name(&m->syncpt, i),
+ nvhost_syncpt_update_min(&m->syncpt, i), max);
+ }
+
+ for (i = 0; i < m->syncpt.nb_bases; i++) {
+ u32 base_val;
+ base_val = nvhost_syncpt_read_wait_base(&m->syncpt, i);
+ if (base_val)
+ nvhost_debug_output(o, "waitbase id %d val %d\n",
+ i, base_val);
+ }
+
+ nvhost_debug_output(o, "\n");
+}
+
+static void show_all(struct nvhost_master *m, struct output *o)
+{
+ nvhost_module_busy(&m->mod);
+
+ m->op.debug.show_mlocks(m, o);
+ show_syncpts(m, o);
+ show_channels(m, o);
+
+ nvhost_module_idle(&m->mod);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+static int nvhost_debug_show(struct seq_file *s, void *unused)
+{
+ struct output o = {
+ .fn = write_to_seqfile,
+ .ctx = s
+ };
+ show_all(s->private, &o);
+ return 0;
+}
+
+static int nvhost_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvhost_debug_show, inode->i_private);
+}
+
+static const struct file_operations nvhost_debug_fops = {
+ .open = nvhost_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nvhost_debug_init(struct nvhost_master *master)
+{
+ struct dentry *de = debugfs_create_dir("tegra_host", NULL);
+
+ debugfs_create_file("status", S_IRUGO, de,
+ master, &nvhost_debug_fops);
+
+ debugfs_create_u32("null_kickoff_pid", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_null_kickoff_pid);
+ debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_trace_cmdbuf);
+
+ if (master->op.debug.debug_init)
+ master->op.debug.debug_init(de);
+
+ debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_force_timeout_pid);
+ debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_force_timeout_val);
+ debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
+ &nvhost_debug_force_timeout_channel);
+}
+#else
+void nvhost_debug_init(struct nvhost_master *master)
+{
+}
+#endif
+
+void nvhost_debug_dump(struct nvhost_master *master)
+{
+ struct output o = {
+ .fn = write_to_printk
+ };
+ show_all(master, &o);
+}
diff --git a/drivers/video/tegra/host/debug.h b/drivers/video/tegra/host/debug.h
new file mode 100644
index 000000000000..874d5c87d57b
--- /dev/null
+++ b/drivers/video/tegra/host/debug.h
@@ -0,0 +1,52 @@
+/*
+ * drivers/video/tegra/host/debug.h
+ *
+ * Tegra Graphics Host Debug
+ *
+ * Copyright (c) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __NVHOST_DEBUG_H
+#define __NVHOST_DEBUG_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+struct output {
+ void (*fn)(void *ctx, const char* str, size_t len);
+ void *ctx;
+ char buf[256];
+};
+
+static inline void write_to_seqfile(void *ctx, const char* str, size_t len)
+{
+ seq_write((struct seq_file *)ctx, str, len);
+}
+
+static inline void write_to_printk(void *ctx, const char* str, size_t len)
+{
+ printk(KERN_INFO "%s", str);
+}
+
+void nvhost_debug_output(struct output *o, const char* fmt, ...);
+
+extern pid_t nvhost_debug_null_kickoff_pid;
+extern pid_t nvhost_debug_force_timeout_pid;
+extern u32 nvhost_debug_force_timeout_val;
+extern u32 nvhost_debug_force_timeout_channel;
+extern unsigned int nvhost_debug_trace_cmdbuf;
+
+#endif /*__NVHOST_DEBUG_H */
diff --git a/drivers/video/tegra/host/dev.c b/drivers/video/tegra/host/dev.c
new file mode 100644
index 000000000000..bdbb21fe9e7c
--- /dev/null
+++ b/drivers/video/tegra/host/dev.c
@@ -0,0 +1,1076 @@
+/*
+ * drivers/video/tegra/host/dev.c
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "dev.h"
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/nvhost.h>
+
+#include <linux/io.h>
+
+#include <linux/nvhost.h>
+#include <linux/nvhost_ioctl.h>
+#include <mach/nvmap.h>
+#include <mach/gpufuse.h>
+#include <mach/hardware.h>
+
+#include "debug.h"
+#include "nvhost_job.h"
+
+#define DRIVER_NAME "tegra_grhost"
+#define IFACE_NAME "nvhost"
+#define TRACE_MAX_LENGTH 128U
+
+static int nvhost_major = NVHOST_MAJOR;
+static int nvhost_minor;
+static unsigned int register_sets;
+
+struct nvhost_channel_userctx {
+ struct nvhost_channel *ch;
+ struct nvhost_hwctx *hwctx;
+ struct nvhost_submit_hdr_ext hdr;
+ int num_relocshifts;
+ struct nvhost_job *job;
+ struct nvmap_client *nvmap;
+ u32 timeout;
+ u32 priority;
+ int clientid;
+};
+
+struct nvhost_ctrl_userctx {
+ struct nvhost_master *dev;
+ u32 *mod_locks;
+};
+
+/*
+ * Write cmdbuf to ftrace output. Checks if cmdbuf contents should be output
+ * and mmaps the cmdbuf contents if required.
+ */
+static void trace_write_cmdbufs(struct nvhost_job *job)
+{
+#if defined(CONFIG_TEGRA_NVMAP)
+ struct nvmap_handle_ref handle;
+ void *mem = NULL;
+ int i = 0;
+
+ for (i = 0; i < job->num_gathers; i++) {
+ struct nvhost_channel_gather *gather = &job->gathers[i];
+ if (nvhost_debug_trace_cmdbuf) {
+ handle.handle = nvmap_id_to_handle(gather->mem_id);
+ mem = nvmap_mmap(&handle);
+ if (IS_ERR_OR_NULL(mem))
+ mem = NULL;
+ };
+
+ if (mem) {
+ u32 i;
+ /*
+ * Write in batches of 128 as there seems to be a limit
+ * of how much you can output to ftrace at once.
+ */
+ for (i = 0; i < gather->words; i += TRACE_MAX_LENGTH) {
+ trace_nvhost_channel_write_cmdbuf_data(
+ job->ch->desc->name,
+ gather->mem_id,
+ min(gather->words - i,
+ TRACE_MAX_LENGTH),
+ gather->offset + i * sizeof(u32),
+ mem);
+ }
+ nvmap_munmap(&handle, mem);
+ }
+ }
+#endif
+}
+
+static int nvhost_channelrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+
+ trace_nvhost_channel_release(priv->ch->desc->name);
+
+ filp->private_data = NULL;
+
+ nvhost_module_remove_client(priv->ch->dev, &priv->ch->mod, priv);
+ nvhost_putchannel(priv->ch, priv->hwctx);
+
+ if (priv->hwctx)
+ priv->ch->ctxhandler.put(priv->hwctx);
+
+ if (priv->job)
+ nvhost_job_put(priv->job);
+
+ nvmap_client_put(priv->nvmap);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_channelopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv;
+ struct nvhost_channel *ch;
+
+ ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
+ ch = nvhost_getchannel(ch);
+ if (!ch)
+ return -ENOMEM;
+ trace_nvhost_channel_open(ch->desc->name);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ nvhost_putchannel(ch, NULL);
+ return -ENOMEM;
+ }
+ filp->private_data = priv;
+ priv->ch = ch;
+ nvhost_module_add_client(ch->dev, &ch->mod, priv);
+
+ if (ch->ctxhandler.alloc) {
+ priv->hwctx = ch->ctxhandler.alloc(ch);
+ if (!priv->hwctx)
+ goto fail;
+ }
+ priv->priority = NVHOST_PRIORITY_MEDIUM;
+ priv->clientid = atomic_add_return(1, &ch->dev->clientid);
+
+ priv->job = nvhost_job_alloc(ch, priv->hwctx, &priv->hdr,
+ NULL, priv->priority, priv->clientid);
+ if (!priv->job)
+ goto fail;
+
+ return 0;
+fail:
+ nvhost_channelrelease(inode, filp);
+ return -ENOMEM;
+}
+
+static int set_submit(struct nvhost_channel_userctx *ctx)
+{
+ struct device *device = &ctx->ch->dev->pdev->dev;
+
+ /* submit should have at least 1 cmdbuf */
+ if (!ctx->hdr.num_cmdbufs)
+ return -EIO;
+
+ if (!ctx->nvmap) {
+ dev_err(device, "no nvmap context set\n");
+ return -EFAULT;
+ }
+
+ ctx->job = nvhost_job_realloc(ctx->job,
+ &ctx->hdr,
+ ctx->nvmap,
+ ctx->priority,
+ ctx->clientid);
+ if (!ctx->job)
+ return -ENOMEM;
+ ctx->job->timeout = ctx->timeout;
+
+ if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
+ ctx->num_relocshifts = ctx->hdr.num_relocs;
+
+ return 0;
+}
+
+static void reset_submit(struct nvhost_channel_userctx *ctx)
+{
+ ctx->hdr.num_cmdbufs = 0;
+ ctx->hdr.num_relocs = 0;
+ ctx->num_relocshifts = 0;
+ ctx->hdr.num_waitchks = 0;
+}
+
+static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ size_t remaining = count;
+ int err = 0;
+ struct nvhost_job *job = priv->job;
+ struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
+ const char *chname = priv->ch->desc->name;
+
+ while (remaining) {
+ size_t consumed;
+ if (!hdr->num_relocs &&
+ !priv->num_relocshifts &&
+ !hdr->num_cmdbufs &&
+ !hdr->num_waitchks) {
+ consumed = sizeof(struct nvhost_submit_hdr);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(hdr, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
+ err = set_submit(priv);
+ if (err)
+ break;
+ trace_nvhost_channel_write_submit(chname,
+ count, hdr->num_cmdbufs, hdr->num_relocs,
+ hdr->syncpt_id, hdr->syncpt_incrs);
+ } else if (hdr->num_cmdbufs) {
+ struct nvhost_cmdbuf cmdbuf;
+ consumed = sizeof(cmdbuf);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&cmdbuf, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ trace_nvhost_channel_write_cmdbuf(chname,
+ cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+ nvhost_job_add_gather(job,
+ cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+ hdr->num_cmdbufs--;
+ } else if (hdr->num_relocs) {
+ struct nvmap_pinarray_elem *elem =
+ &job->pinarray[job->num_pins];
+ consumed = sizeof(struct nvhost_reloc);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(elem, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ elem->patch_mem =
+ nvmap_convert_handle_u2k(elem->patch_mem);
+ elem->pin_mem =
+ nvmap_convert_handle_u2k(elem->pin_mem);
+ trace_nvhost_channel_write_reloc(chname);
+ job->num_pins++;
+ hdr->num_relocs--;
+ } else if (hdr->num_waitchks) {
+ struct nvhost_waitchk *waitchk =
+ &job->waitchk[job->num_waitchk];
+ consumed = sizeof(struct nvhost_waitchk);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(waitchk, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ waitchk->mem = nvmap_convert_handle_u2k(waitchk->mem);
+ trace_nvhost_channel_write_waitchks(
+ chname, 1,
+ hdr->waitchk_mask);
+ job->num_waitchk++;
+ hdr->num_waitchks--;
+ } else if (priv->num_relocshifts) {
+ int next_shift =
+ job->num_pins - priv->num_relocshifts;
+ consumed = sizeof(struct nvhost_reloc_shift);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(
+ &job->pinarray[next_shift].reloc_shift,
+ buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ priv->num_relocshifts--;
+ } else {
+ err = -EFAULT;
+ break;
+ }
+ remaining -= consumed;
+ buf += consumed;
+ }
+
+ if (err < 0) {
+ dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
+ reset_submit(priv);
+ return err;
+ }
+
+ return count - remaining;
+}
+
+static int nvhost_ioctl_channel_flush(
+ struct nvhost_channel_userctx *ctx,
+ struct nvhost_get_param_args *args,
+ int null_kickoff)
+{
+ struct device *device = &ctx->ch->dev->pdev->dev;
+ int err;
+
+ trace_nvhost_ioctl_channel_flush(ctx->ch->desc->name);
+
+ if (!ctx->job ||
+ ctx->hdr.num_relocs ||
+ ctx->hdr.num_cmdbufs ||
+ ctx->hdr.num_waitchks) {
+ reset_submit(ctx);
+ dev_err(device, "channel submit out of sync\n");
+ return -EFAULT;
+ }
+
+ err = nvhost_job_pin(ctx->job);
+ if (err) {
+ dev_warn(device, "nvhost_job_pin failed: %d\n", err);
+ return err;
+ }
+
+ if (nvhost_debug_null_kickoff_pid == current->tgid)
+ null_kickoff = 1;
+ ctx->job->null_kickoff = null_kickoff;
+
+ if ((nvhost_debug_force_timeout_pid == current->tgid) &&
+ (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
+ ctx->timeout = nvhost_debug_force_timeout_val;
+ }
+
+ trace_write_cmdbufs(ctx->job);
+
+ /* context switch if needed, and submit user's gathers to the channel */
+ err = nvhost_channel_submit(ctx->job);
+ args->value = ctx->job->syncpt_end;
+ if (err)
+ nvhost_job_unpin(ctx->job);
+
+ return err;
+}
+
+static int nvhost_ioctl_channel_read_3d_reg(
+ struct nvhost_channel_userctx *ctx,
+ struct nvhost_read_3d_reg_args *args)
+{
+ BUG_ON(!channel_op(ctx->ch).read3dreg);
+ return channel_op(ctx->ch).read3dreg(ctx->ch, ctx->hwctx,
+ args->offset, &args->value);
+}
+
+static long nvhost_channelctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CHANNEL_FLUSH:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
+ break;
+ case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
+ break;
+ case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
+ {
+ struct nvhost_submit_hdr_ext *hdr;
+
+ if (priv->hdr.num_relocs ||
+ priv->num_relocshifts ||
+ priv->hdr.num_cmdbufs ||
+ priv->hdr.num_waitchks) {
+ reset_submit(priv);
+ dev_err(&priv->ch->dev->pdev->dev,
+ "channel submit out of sync\n");
+ err = -EIO;
+ break;
+ }
+
+ hdr = (struct nvhost_submit_hdr_ext *)buf;
+ if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
+ dev_err(&priv->ch->dev->pdev->dev,
+ "submit version %d > max supported %d\n",
+ hdr->submit_version,
+ NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
+ err = -EINVAL;
+ break;
+ }
+ memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
+ err = set_submit(priv);
+ trace_nvhost_ioctl_channel_submit(priv->ch->desc->name,
+ priv->hdr.submit_version,
+ priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
+ priv->hdr.num_waitchks,
+ priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
+ break;
+ }
+ case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
+ /* host syncpt ID is used by the RM (and never be given out) */
+ BUG_ON(priv->ch->desc->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->syncpts;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->waitbases;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->modulemutexes;
+ break;
+ case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
+ {
+ int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
+ struct nvmap_client *new_client = nvmap_client_get_file(fd);
+
+ if (IS_ERR(new_client)) {
+ err = PTR_ERR(new_client);
+ break;
+ }
+
+ if (priv->nvmap)
+ nvmap_client_put(priv->nvmap);
+
+ priv->nvmap = new_client;
+ break;
+ }
+ case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
+ err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
+ {
+ unsigned long rate;
+ struct nvhost_clk_rate_args *arg =
+ (struct nvhost_clk_rate_args *)buf;
+
+ err = nvhost_module_get_rate(priv->ch->dev,
+ &priv->ch->mod, &rate, 0);
+ if (err == 0)
+ arg->rate = rate;
+ break;
+ }
+ case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
+ {
+ struct nvhost_clk_rate_args *arg =
+ (struct nvhost_clk_rate_args *)buf;
+ unsigned long rate = (unsigned long)arg->rate;
+
+ err = nvhost_module_set_rate(priv->ch->dev,
+ &priv->ch->mod, priv, rate, 0);
+ break;
+ }
+ case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
+ priv->timeout =
+ (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
+ dev_dbg(&priv->ch->dev->pdev->dev,
+ "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
+ __func__, priv->timeout, priv);
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->hwctx->has_timedout;
+ break;
+ case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
+ priv->priority =
+ (u32)((struct nvhost_set_priority_args *)buf)->priority;
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static const struct file_operations nvhost_channelops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_channelrelease,
+ .open = nvhost_channelopen,
+ .write = nvhost_channelwrite,
+ .unlocked_ioctl = nvhost_channelctl
+};
+
+static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ int i;
+
+ trace_nvhost_ctrlrelease(priv->dev->mod.name);
+
+ filp->private_data = NULL;
+ if (priv->mod_locks[0])
+ nvhost_module_idle(&priv->dev->mod);
+ for (i = 1; i < priv->dev->nb_mlocks; i++)
+ if (priv->mod_locks[i])
+ nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
+ kfree(priv->mod_locks);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
+ struct nvhost_ctrl_userctx *priv;
+ u32 *mod_locks;
+
+ trace_nvhost_ctrlopen(host->mod.name);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ mod_locks = kzalloc(sizeof(u32)*host->nb_mlocks, GFP_KERNEL);
+
+ if (!(priv && mod_locks)) {
+ kfree(priv);
+ kfree(mod_locks);
+ return -ENOMEM;
+ }
+
+ priv->dev = host;
+ priv->mod_locks = mod_locks;
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_read(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_read_args *args)
+{
+ if (args->id >= ctx->dev->syncpt.nb_pts)
+ return -EINVAL;
+ trace_nvhost_ioctl_ctrl_syncpt_read(args->id);
+ args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_incr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_incr_args *args)
+{
+ if (args->id >= ctx->dev->syncpt.nb_pts)
+ return -EINVAL;
+ trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
+ nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_waitex(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_waitex_args *args)
+{
+ u32 timeout;
+ if (args->id >= ctx->dev->syncpt.nb_pts)
+ return -EINVAL;
+ if (args->timeout == NVHOST_NO_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = (u32)msecs_to_jiffies(args->timeout);
+
+ trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
+ args->timeout);
+ return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
+ args->thresh, timeout, &args->value);
+}
+
+static int nvhost_ioctl_ctrl_module_mutex(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_mutex_args *args)
+{
+ int err = 0;
+ if (args->id >= ctx->dev->nb_mlocks ||
+ args->lock > 1)
+ return -EINVAL;
+
+ trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
+ if (args->lock && !ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_busy(&ctx->dev->mod);
+ else
+ err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
+ if (!err)
+ ctx->mod_locks[args->id] = 1;
+ } else if (!args->lock && ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_idle(&ctx->dev->mod);
+ else
+ nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
+ ctx->mod_locks[args->id] = 0;
+ }
+ return err;
+}
+
+static int nvhost_ioctl_ctrl_module_regrdwr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_regrdwr_args *args)
+{
+ u32 num_offsets = args->num_offsets;
+ u32 *offsets = args->offsets;
+ void *values = args->values;
+ u32 vals[64];
+
+ trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
+ args->num_offsets, args->write);
+ if (!(args->id < ctx->dev->nb_modules) ||
+ (num_offsets == 0))
+ return -EINVAL;
+
+ while (num_offsets--) {
+ u32 remaining = args->block_size;
+ u32 offs;
+ if (get_user(offs, offsets))
+ return -EFAULT;
+ offsets++;
+ while (remaining) {
+ u32 batch = min(remaining, 64*sizeof(u32));
+ if (args->write) {
+ if (copy_from_user(vals, values, batch))
+ return -EFAULT;
+ nvhost_write_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ } else {
+ nvhost_read_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ if (copy_to_user(values, vals, batch))
+ return -EFAULT;
+ }
+ remaining -= batch;
+ offs += batch;
+ values += batch;
+ }
+ }
+
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_get_version(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_get_param_args *args)
+{
+ args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
+ return 0;
+}
+
+static long nvhost_ctrlctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CTRL_SYNCPT_READ:
+ err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
+ err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
+ err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
+ err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
+ err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
+ err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_GET_VERSION:
+ err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static const struct file_operations nvhost_ctrlops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_ctrlrelease,
+ .open = nvhost_ctrlopen,
+ .unlocked_ioctl = nvhost_ctrlctl
+};
+
+static void power_on_host(struct nvhost_module *mod)
+{
+ struct nvhost_master *dev =
+ container_of(mod, struct nvhost_master, mod);
+
+ nvhost_intr_start(&dev->intr, clk_get_rate(mod->clk[0]));
+ nvhost_syncpt_reset(&dev->syncpt);
+}
+
+static int power_off_host(struct nvhost_module *mod)
+{
+ struct nvhost_master *dev =
+ container_of(mod, struct nvhost_master, mod);
+
+ nvhost_syncpt_save(&dev->syncpt);
+ nvhost_intr_stop(&dev->intr);
+ return 0;
+}
+
+static int __devinit nvhost_user_init(struct nvhost_master *host)
+{
+ int i, err, devno;
+
+ host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
+ if (IS_ERR(host->nvhost_class)) {
+ err = PTR_ERR(host->nvhost_class);
+ dev_err(&host->pdev->dev, "failed to create class\n");
+ goto fail;
+ }
+
+ if (nvhost_major) {
+ devno = MKDEV(nvhost_major, nvhost_minor);
+ err = register_chrdev_region(devno, host->nb_channels + 1,
+ IFACE_NAME);
+ } else {
+ err = alloc_chrdev_region(&devno, nvhost_minor,
+ host->nb_channels + 1, IFACE_NAME);
+ nvhost_major = MAJOR(devno);
+ }
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
+ goto fail;
+ }
+
+ for (i = 0; i < host->nb_channels; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+
+ cdev_init(&ch->cdev, &nvhost_channelops);
+ ch->cdev.owner = THIS_MODULE;
+
+ devno = MKDEV(nvhost_major, nvhost_minor + i);
+ err = cdev_add(&ch->cdev, devno, 1);
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
+ goto fail;
+ }
+ ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-%s", ch->desc->name);
+ if (IS_ERR(ch->node)) {
+ err = PTR_ERR(ch->node);
+ dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
+ goto fail;
+ }
+ }
+
+ cdev_init(&host->cdev, &nvhost_ctrlops);
+ host->cdev.owner = THIS_MODULE;
+ devno = MKDEV(nvhost_major, nvhost_minor + host->nb_channels);
+ err = cdev_add(&host->cdev, devno, 1);
+ if (err < 0)
+ goto fail;
+ host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-ctrl");
+ if (IS_ERR(host->ctrl)) {
+ err = PTR_ERR(host->ctrl);
+ dev_err(&host->pdev->dev, "failed to create ctrl device\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static void nvhost_remove_chip_support(struct nvhost_master *host)
+{
+
+ kfree(host->channels);
+ host->channels = 0;
+
+ kfree(host->syncpt.min_val);
+ host->syncpt.min_val = 0;
+
+ kfree(host->syncpt.max_val);
+ host->syncpt.max_val = 0;
+
+ kfree(host->syncpt.base_val);
+ host->syncpt.base_val = 0;
+
+ kfree(host->intr.syncpt);
+ host->intr.syncpt = 0;
+
+ kfree(host->cpuaccess.regs);
+ host->cpuaccess.regs = 0;
+
+ kfree(host->cpuaccess.reg_mem);
+ host->cpuaccess.reg_mem = 0;
+
+ kfree(host->cpuaccess.lock_counts);
+ host->cpuaccess.lock_counts = 0;
+}
+
+static int __devinit nvhost_init_chip_support(struct nvhost_master *host)
+{
+ int err;
+ switch (tegra_get_chipid()) {
+ case TEGRA_CHIPID_TEGRA2:
+ err = nvhost_init_t20_support(host);
+ break;
+
+ case TEGRA_CHIPID_TEGRA3:
+ err = nvhost_init_t30_support(host);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (err)
+ return err;
+
+ /* allocate items sized in chip specific support init */
+ host->channels = kzalloc(sizeof(struct nvhost_channel) *
+ host->nb_channels, GFP_KERNEL);
+
+ host->syncpt.min_val = kzalloc(sizeof(atomic_t) *
+ host->syncpt.nb_pts, GFP_KERNEL);
+
+ host->syncpt.max_val = kzalloc(sizeof(atomic_t) *
+ host->syncpt.nb_pts, GFP_KERNEL);
+
+ host->syncpt.base_val = kzalloc(sizeof(u32) *
+ host->syncpt.nb_bases, GFP_KERNEL);
+
+ host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
+ host->syncpt.nb_pts, GFP_KERNEL);
+
+ host->cpuaccess.reg_mem = kzalloc(sizeof(struct resource *) *
+ host->nb_modules, GFP_KERNEL);
+
+ host->cpuaccess.regs = kzalloc(sizeof(void __iomem *) *
+ host->nb_modules, GFP_KERNEL);
+
+ host->cpuaccess.lock_counts = kzalloc(sizeof(atomic_t) *
+ host->nb_mlocks, GFP_KERNEL);
+
+ if (!(host->channels && host->syncpt.min_val &&
+ host->syncpt.max_val && host->syncpt.base_val &&
+ host->intr.syncpt && host->cpuaccess.reg_mem &&
+ host->cpuaccess.regs && host->cpuaccess.lock_counts)) {
+ /* frees happen in the support removal phase */
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+const struct nvhost_moduledesc hostdesc = {
+ .finalize_poweron = power_on_host,
+ .prepare_poweroff = power_off_host,
+ .clocks = {{"host1x", UINT_MAX}, {} },
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+};
+
+static int __devinit nvhost_probe(struct platform_device *pdev)
+{
+ struct nvhost_master *host;
+ struct resource *regs, *intr0, *intr1;
+ int i, err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!regs || !intr0 || !intr1) {
+ dev_err(&pdev->dev, "missing required platform resources\n");
+ return -ENXIO;
+ }
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+
+ host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
+ if (!host->nvmap) {
+ dev_err(&pdev->dev, "unable to create nvmap client\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ host->reg_mem = request_mem_region(regs->start,
+ resource_size(regs), pdev->name);
+ if (!host->reg_mem) {
+ dev_err(&pdev->dev, "failed to get host register memory\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->aperture = ioremap(regs->start, resource_size(regs));
+ if (!host->aperture) {
+ dev_err(&pdev->dev, "failed to remap host registers\n");
+ err = -ENXIO;
+ goto fail;
+ }
+
+ err = nvhost_init_chip_support(host);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init chip support\n");
+ goto fail;
+ }
+
+ for (i = 0; i < host->nb_channels; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+ BUG_ON(!host_channel_op(host).init);
+ err = host_channel_op(host).init(ch, host, i);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init channel %d\n", i);
+ goto fail;
+ }
+ }
+
+ err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
+ if (err)
+ goto fail;
+
+ err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
+ if (err)
+ goto fail;
+
+ err = nvhost_user_init(host);
+ if (err)
+ goto fail;
+
+ err = nvhost_module_init(&host->mod, "host1x",
+ &hostdesc, NULL, &pdev->dev);
+ for (i = 0; i < host->nb_channels; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+ nvhost_module_preinit(ch->desc->name,
+ &ch->desc->module);
+ }
+
+ if (err)
+ goto fail;
+
+
+ platform_set_drvdata(pdev, host);
+
+ clk_enable(host->mod.clk[0]);
+ nvhost_syncpt_reset(&host->syncpt);
+ clk_disable(host->mod.clk[0]);
+
+ nvhost_bus_register(host);
+
+ nvhost_debug_init(host);
+
+ dev_info(&pdev->dev, "initialized\n");
+ return 0;
+
+fail:
+ nvhost_remove_chip_support(host);
+ if (host->nvmap)
+ nvmap_client_put(host->nvmap);
+ kfree(host);
+ return err;
+}
+
+static int __exit nvhost_remove(struct platform_device *pdev)
+{
+ struct nvhost_master *host = platform_get_drvdata(pdev);
+ nvhost_remove_chip_support(host);
+ return 0;
+}
+
+static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nvhost_master *host = platform_get_drvdata(pdev);
+ int i, ret;
+ dev_info(&pdev->dev, "suspending\n");
+
+ for (i = 0; i < host->nb_channels; i++) {
+ ret = nvhost_channel_suspend(&host->channels[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = nvhost_module_suspend(&host->mod, true);
+ dev_info(&pdev->dev, "suspend status: %d\n", ret);
+ return ret;
+}
+
+static int nvhost_resume(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "resuming\n");
+ return 0;
+}
+
+static struct platform_driver nvhost_driver = {
+ .remove = __exit_p(nvhost_remove),
+ .suspend = nvhost_suspend,
+ .resume = nvhost_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME
+ }
+};
+
+static int __init nvhost_mod_init(void)
+{
+ register_sets = tegra_gpu_register_sets();
+ return platform_driver_probe(&nvhost_driver, nvhost_probe);
+}
+
+static void __exit nvhost_mod_exit(void)
+{
+ platform_driver_unregister(&nvhost_driver);
+}
+
+module_init(nvhost_mod_init);
+module_exit(nvhost_mod_exit);
+
+module_param_call(register_sets, NULL, param_get_uint, &register_sets, 0444);
+MODULE_PARM_DESC(register_sets, "Number of register sets");
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("Graphics host driver for Tegra products");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform-nvhost");
diff --git a/drivers/video/tegra/host/dev.h b/drivers/video/tegra/host/dev.h
new file mode 100644
index 000000000000..3d05da8c53e8
--- /dev/null
+++ b/drivers/video/tegra/host/dev.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/video/tegra/host/dev.h
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_DEV_H
+#define __NVHOST_DEV_H
+
+#include "nvhost_acm.h"
+#include "nvhost_syncpt.h"
+#include "nvhost_intr.h"
+#include "nvhost_cpuaccess.h"
+#include "nvhost_channel.h"
+#include "chip_support.h"
+
+#define NVHOST_MAJOR 0 /* dynamic */
+struct nvhost_hwctx;
+
+struct nvhost_master {
+ void __iomem *aperture;
+ void __iomem *sync_aperture;
+ struct resource *reg_mem;
+ struct platform_device *pdev;
+ struct class *nvhost_class;
+ struct cdev cdev;
+ struct device *ctrl;
+ struct nvhost_syncpt syncpt;
+ struct nvmap_client *nvmap;
+ struct nvhost_cpuaccess cpuaccess;
+ u32 nb_mlocks;
+ struct nvhost_intr intr;
+ struct nvhost_module mod;
+ struct nvhost_channel *channels;
+ u32 nb_channels;
+ u32 nb_modules;
+
+ u32 sync_queue_size;
+
+ struct nvhost_chip_support op;
+
+ atomic_t clientid;
+};
+
+void nvhost_debug_init(struct nvhost_master *master);
+void nvhost_debug_dump(struct nvhost_master *master);
+
+extern pid_t nvhost_debug_null_kickoff_pid;
+
+#endif
diff --git a/drivers/video/tegra/host/gr3d/Makefile b/drivers/video/tegra/host/gr3d/Makefile
new file mode 100644
index 000000000000..dfbd078ab423
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/Makefile
@@ -0,0 +1,10 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-gr3d-objs = \
+ gr3d.o \
+ gr3d_t20.o \
+ gr3d_t30.o \
+ scale3d.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr3d.o
diff --git a/drivers/video/tegra/host/gr3d/gr3d.c b/drivers/video/tegra/host/gr3d/gr3d.c
new file mode 100644
index 000000000000..f7f892b883a0
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d.c
@@ -0,0 +1,154 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d.c
+ *
+ * Tegra Graphics Host 3D
+ *
+ * Copyright (c) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*** restore ***/
+
+#include <mach/nvmap.h>
+#include <linux/slab.h>
+#include "t20/t20.h"
+#include "host1x/host1x_channel.h"
+#include "host1x/host1x_hardware.h"
+#include "host1x/host1x_syncpt.h"
+#include "nvhost_hwctx.h"
+#include "dev.h"
+#include "gr3d.h"
+
+unsigned int nvhost_3dctx_restore_size;
+unsigned int nvhost_3dctx_restore_incrs;
+struct nvmap_handle_ref *nvhost_3dctx_save_buf;
+unsigned int nvhost_3dctx_save_incrs;
+unsigned int nvhost_3dctx_save_thresh;
+unsigned int nvhost_3dctx_save_slots;
+
+void nvhost_3dctx_restore_begin(u32 *ptr)
+{
+ /* set class to host */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ /* increment sync point base */
+ ptr[1] = nvhost_class_host_incr_syncpt_base(NVWAITBASE_3D,
+ nvhost_3dctx_restore_incrs);
+ /* set class to 3D */
+ ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /* program PSEQ_QUAD_ID */
+ ptr[3] = nvhost_opcode_imm(AR3D_PSEQ_QUAD_ID, 0);
+}
+
+void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_incr(start_reg, count);
+}
+
+void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg, u32 offset,
+ u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_imm(offset_reg, offset);
+ ptr[1] = nvhost_opcode_nonincr(data_reg, count);
+}
+
+void nvhost_3dctx_restore_end(u32 *ptr)
+{
+ /* syncpt increment to track restore gather. */
+ ptr[0] = nvhost_opcode_imm_incr_syncpt(
+ NV_SYNCPT_OP_DONE, NVSYNCPT_3D);
+}
+
+/*** ctx3d ***/
+
+struct nvhost_hwctx *nvhost_3dctx_alloc_common(struct nvhost_channel *ch,
+ bool map_restore)
+{
+ struct nvmap_client *nvmap = ch->dev->nvmap;
+ struct nvhost_hwctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->restore = nvmap_alloc(nvmap, nvhost_3dctx_restore_size * 4, 32,
+ map_restore ? NVMAP_HANDLE_WRITE_COMBINE
+ : NVMAP_HANDLE_UNCACHEABLE);
+ if (IS_ERR_OR_NULL(ctx->restore))
+ goto fail;
+
+ if (map_restore) {
+ ctx->restore_virt = nvmap_mmap(ctx->restore);
+ if (!ctx->restore_virt)
+ goto fail;
+ } else
+ ctx->restore_virt = NULL;
+
+ kref_init(&ctx->ref);
+ ctx->channel = ch;
+ ctx->valid = false;
+ ctx->save = nvhost_3dctx_save_buf;
+ ctx->save_incrs = nvhost_3dctx_save_incrs;
+ ctx->save_thresh = nvhost_3dctx_save_thresh;
+ ctx->save_slots = nvhost_3dctx_save_slots;
+ ctx->restore_phys = nvmap_pin(nvmap, ctx->restore);
+ if (IS_ERR_VALUE(ctx->restore_phys))
+ goto fail;
+
+ ctx->restore_size = nvhost_3dctx_restore_size;
+ ctx->restore_incrs = nvhost_3dctx_restore_incrs;
+ return ctx;
+
+fail:
+ if (map_restore && ctx->restore_virt) {
+ nvmap_munmap(ctx->restore, ctx->restore_virt);
+ ctx->restore_virt = NULL;
+ }
+ nvmap_free(nvmap, ctx->restore);
+ ctx->restore = NULL;
+ kfree(ctx);
+ return NULL;
+}
+
+void nvhost_3dctx_get(struct nvhost_hwctx *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+void nvhost_3dctx_free(struct kref *ref)
+{
+ struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
+ struct nvmap_client *nvmap = ctx->channel->dev->nvmap;
+
+ if (ctx->restore_virt) {
+ nvmap_munmap(ctx->restore, ctx->restore_virt);
+ ctx->restore_virt = NULL;
+ }
+ nvmap_unpin(nvmap, ctx->restore);
+ ctx->restore_phys = 0;
+ nvmap_free(nvmap, ctx->restore);
+ ctx->restore = NULL;
+ kfree(ctx);
+}
+
+void nvhost_3dctx_put(struct nvhost_hwctx *ctx)
+{
+ kref_put(&ctx->ref, nvhost_3dctx_free);
+}
+
+int nvhost_gr3d_prepare_power_off(struct nvhost_module *mod)
+{
+ return host1x_save_context(mod, NVSYNCPT_3D);
+}
diff --git a/drivers/video/tegra/host/gr3d/gr3d.h b/drivers/video/tegra/host/gr3d/gr3d.h
new file mode 100644
index 000000000000..cdaea188ebf2
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d.h
@@ -0,0 +1,62 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d.h
+ *
+ * Tegra Graphics Host 3D
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_H
+#define __NVHOST_GR3D_GR3D_H
+
+#include <linux/types.h>
+
+/* Registers of 3D unit */
+
+#define AR3D_PSEQ_QUAD_ID 0x545
+#define AR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904
+#define AR3D_DW_MEMORY_OUTPUT_DATA 0x905
+#define AR3D_GSHIM_WRITE_MASK 0xb00
+#define AR3D_GSHIM_READ_SELECT 0xb01
+#define AR3D_GLOBAL_MEMORY_OUTPUT_READS 0xe40
+
+/* Internal variables used by common 3D context switch functions */
+extern unsigned int nvhost_3dctx_restore_size;
+extern unsigned int nvhost_3dctx_restore_incrs;
+extern struct nvmap_handle_ref *nvhost_3dctx_save_buf;
+extern unsigned int nvhost_3dctx_save_incrs;
+extern unsigned int nvhost_3dctx_save_thresh;
+extern unsigned int nvhost_3dctx_save_slots;
+
+struct nvhost_hwctx;
+struct nvhost_channel;
+struct kref;
+
+/* Functions used commonly by all 3D context switch modules */
+void nvhost_3dctx_restore_begin(u32 *ptr);
+void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count);
+void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg,
+ u32 offset, u32 data_reg, u32 count);
+void nvhost_3dctx_restore_end(u32 *ptr);
+struct nvhost_hwctx *nvhost_3dctx_alloc_common(
+ struct nvhost_channel *ch, bool map_restore);
+void nvhost_3dctx_get(struct nvhost_hwctx *ctx);
+void nvhost_3dctx_free(struct kref *ref);
+void nvhost_3dctx_put(struct nvhost_hwctx *ctx);
+int nvhost_gr3d_prepare_power_off(struct nvhost_module *mod);
+
+#endif
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.c b/drivers/video/tegra/host/gr3d/gr3d_t20.c
new file mode 100644
index 000000000000..c1b25bd164c3
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t20.c
@@ -0,0 +1,385 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t20.c
+ *
+ * Tegra Graphics Host 3D for Tegra2
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_hwctx.h"
+#include "dev.h"
+#include "host1x/host1x_channel.h"
+#include "host1x/host1x_hardware.h"
+#include "host1x/host1x_syncpt.h"
+#include "gr3d.h"
+
+#include <linux/slab.h>
+
+static const struct hwctx_reginfo ctxsave_regs_3d_global[] = {
+ HWCTX_REGINFO(0xe00, 4, DIRECT),
+ HWCTX_REGINFO(0xe05, 30, DIRECT),
+ HWCTX_REGINFO(0xe25, 2, DIRECT),
+ HWCTX_REGINFO(0xe28, 2, DIRECT),
+ HWCTX_REGINFO(0x001, 2, DIRECT),
+ HWCTX_REGINFO(0x00c, 10, DIRECT),
+ HWCTX_REGINFO(0x100, 34, DIRECT),
+ HWCTX_REGINFO(0x124, 2, DIRECT),
+ HWCTX_REGINFO(0x200, 5, DIRECT),
+ HWCTX_REGINFO(0x205, 1024, INDIRECT),
+ HWCTX_REGINFO(0x207, 1024, INDIRECT),
+ HWCTX_REGINFO(0x209, 1, DIRECT),
+ HWCTX_REGINFO(0x300, 64, DIRECT),
+ HWCTX_REGINFO(0x343, 25, DIRECT),
+ HWCTX_REGINFO(0x363, 2, DIRECT),
+ HWCTX_REGINFO(0x400, 16, DIRECT),
+ HWCTX_REGINFO(0x411, 1, DIRECT),
+ HWCTX_REGINFO(0x500, 4, DIRECT),
+ HWCTX_REGINFO(0x520, 32, DIRECT),
+ HWCTX_REGINFO(0x540, 64, INDIRECT),
+ HWCTX_REGINFO(0x600, 16, INDIRECT_4X),
+ HWCTX_REGINFO(0x603, 128, INDIRECT),
+ HWCTX_REGINFO(0x608, 4, DIRECT),
+ HWCTX_REGINFO(0x60e, 1, DIRECT),
+ HWCTX_REGINFO(0x700, 64, INDIRECT),
+ HWCTX_REGINFO(0x710, 50, DIRECT),
+ HWCTX_REGINFO(0x800, 16, INDIRECT_4X),
+ HWCTX_REGINFO(0x803, 512, INDIRECT),
+ HWCTX_REGINFO(0x805, 64, INDIRECT),
+ HWCTX_REGINFO(0x820, 32, DIRECT),
+ HWCTX_REGINFO(0x900, 64, INDIRECT),
+ HWCTX_REGINFO(0x902, 2, DIRECT),
+ HWCTX_REGINFO(0xa02, 10, DIRECT),
+ HWCTX_REGINFO(0xe04, 1, DIRECT),
+ HWCTX_REGINFO(0xe2a, 1, DIRECT),
+};
+
+/* the same context save command sequence is used for all contexts. */
+static phys_addr_t save_phys;
+static unsigned int save_size;
+
+#define SAVE_BEGIN_V0_SIZE 5
+#define SAVE_DIRECT_V0_SIZE 3
+#define SAVE_INDIRECT_V0_SIZE 5
+#define SAVE_END_V0_SIZE 5
+#define SAVE_INCRS 3
+#define SAVE_THRESH_OFFSET 1
+#define RESTORE_BEGIN_SIZE 4
+#define RESTORE_DIRECT_SIZE 1
+#define RESTORE_INDIRECT_SIZE 2
+#define RESTORE_END_SIZE 1
+
+struct save_info {
+ u32 *ptr;
+ unsigned int save_count;
+ unsigned int restore_count;
+ unsigned int save_incrs;
+ unsigned int restore_incrs;
+};
+
+static u32 *setup_restore_regs_v0(u32 *ptr,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ u32 indoff = offset + 1;
+ switch (regs->type) {
+ case HWCTX_REGINFO_DIRECT:
+ nvhost_3dctx_restore_direct(ptr, offset, count);
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_4X:
+ ++indoff;
+ /* fall through */
+ case HWCTX_REGINFO_INDIRECT:
+ nvhost_3dctx_restore_indirect(ptr,
+ offset, 0, indoff, count);
+ ptr += RESTORE_INDIRECT_SIZE;
+ break;
+ }
+ ptr += count;
+ }
+ return ptr;
+}
+
+static void setup_restore_v0(u32 *ptr)
+{
+ nvhost_3dctx_restore_begin(ptr);
+ ptr += RESTORE_BEGIN_SIZE;
+
+ ptr = setup_restore_regs_v0(ptr,
+ ctxsave_regs_3d_global,
+ ARRAY_SIZE(ctxsave_regs_3d_global));
+
+ nvhost_3dctx_restore_end(ptr);
+
+ wmb();
+}
+
+/*** v0 saver ***/
+
+static void save_push_v0(struct nvhost_cdma *cdma,
+ struct nvhost_hwctx *ctx)
+{
+ nvhost_cdma_push_gather(cdma,
+ (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE,
+ (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE,
+ nvhost_opcode_gather(save_size),
+ save_phys);
+}
+
+static void __init save_begin_v0(u32 *ptr)
+{
+ /* 3d: when done, increment syncpt to base+1 */
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ ptr[1] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE,
+ NVSYNCPT_3D); /* incr 1 */
+ /* host: wait for syncpt base+1 */
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ ptr[3] = nvhost_class_host_wait_syncpt_base(NVSYNCPT_3D,
+ NVWAITBASE_3D, 1);
+ /* host: signal context read thread to start reading */
+ ptr[4] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE,
+ NVSYNCPT_3D); /* incr 2 */
+}
+
+static void __init save_direct_v0(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ start_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+
+static void __init save_indirect_v0(u32 *ptr, u32 offset_reg, u32 offset,
+ u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+ offset_reg, 1);
+ ptr[1] = offset;
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INDOFF, 1);
+ ptr[3] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ data_reg, false);
+ ptr[4] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+
+static void __init save_end_v0(u32 *ptr)
+{
+ /* Wait for context read service to finish (cpu incr 3) */
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ ptr[1] = nvhost_class_host_wait_syncpt_base(NVSYNCPT_3D,
+ NVWAITBASE_3D, nvhost_3dctx_save_incrs);
+ /* Advance syncpoint base */
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ ptr[3] = nvhost_class_host_incr_syncpt_base(NVWAITBASE_3D,
+ nvhost_3dctx_save_incrs);
+ /* set class back to the unit */
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+}
+
+static u32 *save_regs_v0(u32 *ptr, unsigned int *pending,
+ void __iomem *chan_regs,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+ int drain_result = 0;
+
+ for ( ; regs != rend; ++regs) {
+ u32 count = regs->count;
+ switch (regs->type) {
+ case HWCTX_REGINFO_DIRECT:
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ case HWCTX_REGINFO_INDIRECT_4X:
+ ptr += RESTORE_INDIRECT_SIZE;
+ break;
+ }
+ drain_result = host1x_drain_read_fifo(chan_regs,
+ ptr, count, pending);
+ BUG_ON(drain_result < 0);
+ ptr += count;
+ }
+ return ptr;
+}
+
+/*** save ***/
+
+static void __init setup_save_regs(struct save_info *info,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+ u32 *ptr = info->ptr;
+ unsigned int save_count = info->save_count;
+ unsigned int restore_count = info->restore_count;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ u32 indoff = offset + 1;
+ switch (regs->type) {
+ case HWCTX_REGINFO_DIRECT:
+ if (ptr) {
+ save_direct_v0(ptr, offset, count);
+ ptr += SAVE_DIRECT_V0_SIZE;
+ }
+ save_count += SAVE_DIRECT_V0_SIZE;
+ restore_count += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_4X:
+ ++indoff;
+ /* fall through */
+ case HWCTX_REGINFO_INDIRECT:
+ if (ptr) {
+ save_indirect_v0(ptr, offset, 0,
+ indoff, count);
+ ptr += SAVE_INDIRECT_V0_SIZE;
+ }
+ save_count += SAVE_INDIRECT_V0_SIZE;
+ restore_count += RESTORE_INDIRECT_SIZE;
+ break;
+ }
+ if (ptr) {
+ /* SAVE cases only: reserve room for incoming data */
+ u32 k = 0;
+ /*
+ * Create a signature pattern for indirect data (which
+ * will be overwritten by true incoming data) for
+ * better deducing where we are in a long command
+ * sequence, when given only a FIFO snapshot for debug
+ * purposes.
+ */
+ for (k = 0; k < count; k++)
+ *(ptr + k) = 0xd000d000 | (offset << 16) | k;
+ ptr += count;
+ }
+ save_count += count;
+ restore_count += count;
+ }
+
+ info->ptr = ptr;
+ info->save_count = save_count;
+ info->restore_count = restore_count;
+}
+
+static void __init setup_save(u32 *ptr)
+{
+ struct save_info info = {
+ ptr,
+ SAVE_BEGIN_V0_SIZE,
+ RESTORE_BEGIN_SIZE,
+ SAVE_INCRS,
+ 1
+ };
+
+ if (info.ptr) {
+ save_begin_v0(info.ptr);
+ info.ptr += SAVE_BEGIN_V0_SIZE;
+ }
+
+ /* save regs */
+ setup_save_regs(&info,
+ ctxsave_regs_3d_global,
+ ARRAY_SIZE(ctxsave_regs_3d_global));
+
+ if (info.ptr) {
+ save_end_v0(info.ptr);
+ info.ptr += SAVE_END_V0_SIZE;
+ }
+
+ wmb();
+
+ save_size = info.save_count + SAVE_END_V0_SIZE;
+ nvhost_3dctx_restore_size = info.restore_count + RESTORE_END_SIZE;
+ nvhost_3dctx_save_incrs = info.save_incrs;
+ nvhost_3dctx_save_thresh =
+ nvhost_3dctx_save_incrs - SAVE_THRESH_OFFSET;
+ nvhost_3dctx_restore_incrs = info.restore_incrs;
+}
+
+
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc_v0(struct nvhost_channel *ch)
+{
+ struct nvhost_hwctx *ctx = nvhost_3dctx_alloc_common(ch, true);
+ if (ctx)
+ setup_restore_v0(ctx->restore_virt);
+ return ctx;
+}
+
+static void ctx3d_save_service(struct nvhost_hwctx *ctx)
+{
+ u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
+ unsigned int pending = 0;
+
+ ptr = save_regs_v0(ptr, &pending, ctx->channel->aperture,
+ ctxsave_regs_3d_global,
+ ARRAY_SIZE(ctxsave_regs_3d_global));
+
+ wmb();
+ nvhost_syncpt_cpu_incr(&ctx->channel->dev->syncpt, NVSYNCPT_3D);
+}
+
+int __init nvhost_gr3d_t20_ctxhandler_init(struct nvhost_hwctx_handler *h)
+{
+ struct nvhost_channel *ch;
+ struct nvmap_client *nvmap;
+ u32 *save_ptr;
+
+ ch = container_of(h, struct nvhost_channel, ctxhandler);
+ nvmap = ch->dev->nvmap;
+
+ setup_save(NULL);
+
+ nvhost_3dctx_save_buf = nvmap_alloc(nvmap, save_size * sizeof(u32), 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(nvhost_3dctx_save_buf)) {
+ int err = PTR_ERR(nvhost_3dctx_save_buf);
+ nvhost_3dctx_save_buf = NULL;
+ return err;
+ }
+
+ nvhost_3dctx_save_slots = 1;
+
+ save_ptr = nvmap_mmap(nvhost_3dctx_save_buf);
+ if (!save_ptr) {
+ nvmap_free(nvmap, nvhost_3dctx_save_buf);
+ nvhost_3dctx_save_buf = NULL;
+ return -ENOMEM;
+ }
+
+ save_phys = nvmap_pin(nvmap, nvhost_3dctx_save_buf);
+
+ setup_save(save_ptr);
+
+ h->alloc = ctx3d_alloc_v0;
+ h->save_push = save_push_v0;
+ h->save_service = ctx3d_save_service;
+ h->get = nvhost_3dctx_get;
+ h->put = nvhost_3dctx_put;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.h b/drivers/video/tegra/host/gr3d/gr3d_t20.h
new file mode 100644
index 000000000000..21c437c30ec1
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t20.h
@@ -0,0 +1,30 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t20.h
+ *
+ * Tegra Graphics Host 3D for Tegra2
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_T20_H
+#define __NVHOST_GR3D_GR3D_T20_H
+
+struct nvhost_hwctx_handler;
+
+int nvhost_gr3d_t20_ctxhandler_init(struct nvhost_hwctx_handler *h);
+
+#endif
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t30.c b/drivers/video/tegra/host/gr3d/gr3d_t30.c
new file mode 100644
index 000000000000..5f991a41db85
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t30.c
@@ -0,0 +1,425 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t30.c
+ *
+ * Tegra Graphics Host 3D for Tegra3
+ *
+ * Copyright (c) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_hwctx.h"
+#include "dev.h"
+#include "host1x/host1x_hardware.h"
+#include "host1x/host1x_syncpt.h"
+#include "gr3d.h"
+
+#include <mach/gpufuse.h>
+#include <mach/hardware.h>
+#include <linux/slab.h>
+
+/* 99 > 2, which makes kernel panic if register set is incorrect */
+static int register_sets = 99;
+
+static const struct hwctx_reginfo ctxsave_regs_3d_global[] = {
+ HWCTX_REGINFO(0xe00, 4, DIRECT),
+ HWCTX_REGINFO(0xe05, 30, DIRECT),
+ HWCTX_REGINFO(0xe25, 2, DIRECT),
+ HWCTX_REGINFO(0xe28, 2, DIRECT),
+ HWCTX_REGINFO(0xe30, 16, DIRECT),
+ HWCTX_REGINFO(0x001, 2, DIRECT),
+ HWCTX_REGINFO(0x00c, 10, DIRECT),
+ HWCTX_REGINFO(0x100, 34, DIRECT),
+ HWCTX_REGINFO(0x124, 2, DIRECT),
+ HWCTX_REGINFO(0x200, 5, DIRECT),
+ HWCTX_REGINFO(0x205, 1024, INDIRECT),
+ HWCTX_REGINFO(0x207, 1024, INDIRECT),
+ HWCTX_REGINFO(0x209, 1, DIRECT),
+ HWCTX_REGINFO(0x300, 64, DIRECT),
+ HWCTX_REGINFO(0x343, 25, DIRECT),
+ HWCTX_REGINFO(0x363, 2, DIRECT),
+ HWCTX_REGINFO(0x400, 16, DIRECT),
+ HWCTX_REGINFO(0x411, 1, DIRECT),
+ HWCTX_REGINFO(0x412, 1, DIRECT),
+ HWCTX_REGINFO(0x500, 4, DIRECT),
+ HWCTX_REGINFO(0x520, 32, DIRECT),
+ HWCTX_REGINFO(0x540, 64, INDIRECT),
+ HWCTX_REGINFO(0x600, 16, INDIRECT_4X),
+ HWCTX_REGINFO(0x603, 128, INDIRECT),
+ HWCTX_REGINFO(0x608, 4, DIRECT),
+ HWCTX_REGINFO(0x60e, 1, DIRECT),
+ HWCTX_REGINFO(0x700, 64, INDIRECT),
+ HWCTX_REGINFO(0x710, 50, DIRECT),
+ HWCTX_REGINFO(0x750, 16, DIRECT),
+ HWCTX_REGINFO(0x800, 16, INDIRECT_4X),
+ HWCTX_REGINFO(0x803, 512, INDIRECT),
+ HWCTX_REGINFO(0x805, 64, INDIRECT),
+ HWCTX_REGINFO(0x820, 32, DIRECT),
+ HWCTX_REGINFO(0x900, 64, INDIRECT),
+ HWCTX_REGINFO(0x902, 2, DIRECT),
+ HWCTX_REGINFO(0x90a, 1, DIRECT),
+ HWCTX_REGINFO(0xa02, 10, DIRECT),
+ HWCTX_REGINFO(0xb04, 1, DIRECT),
+ HWCTX_REGINFO(0xb06, 13, DIRECT),
+};
+
+static const struct hwctx_reginfo ctxsave_regs_3d_perset[] = {
+ HWCTX_REGINFO(0xe04, 1, DIRECT),
+ HWCTX_REGINFO(0xe2a, 1, DIRECT),
+ HWCTX_REGINFO(0x413, 1, DIRECT),
+ HWCTX_REGINFO(0x90b, 1, DIRECT),
+ HWCTX_REGINFO(0xe41, 1, DIRECT),
+};
+
+static unsigned int restore_set1_offset;
+
+/* the same context save command sequence is used for all contexts. */
+static phys_addr_t save_phys;
+static unsigned int save_size;
+
+#define SAVE_BEGIN_V1_SIZE (1 + RESTORE_BEGIN_SIZE)
+#define SAVE_DIRECT_V1_SIZE (4 + RESTORE_DIRECT_SIZE)
+#define SAVE_INDIRECT_V1_SIZE (6 + RESTORE_INDIRECT_SIZE)
+#define SAVE_END_V1_SIZE (9 + RESTORE_END_SIZE)
+#define SAVE_INCRS 3
+#define SAVE_THRESH_OFFSET 0
+#define RESTORE_BEGIN_SIZE 4
+#define RESTORE_DIRECT_SIZE 1
+#define RESTORE_INDIRECT_SIZE 2
+#define RESTORE_END_SIZE 1
+
+struct save_info {
+ u32 *ptr;
+ unsigned int save_count;
+ unsigned int restore_count;
+ unsigned int save_incrs;
+ unsigned int restore_incrs;
+};
+
+/*** v1 saver ***/
+
+static void save_push_v1(struct nvhost_cdma *cdma,
+ struct nvhost_hwctx *ctx)
+{
+ /* wait for 3d idle */
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+ nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE,
+ NVSYNCPT_3D));
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1),
+ nvhost_class_host_wait_syncpt_base(NVSYNCPT_3D,
+ NVWAITBASE_3D, 1));
+ /* back to 3d */
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+ NVHOST_OPCODE_NOOP);
+ /* set register set 0 and 1 register read memory output addresses,
+ and send their reads to memory */
+ if (register_sets == 2) {
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2),
+ nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS,
+ 1));
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_nonincr(0x904, 1),
+ ctx->restore_phys + restore_set1_offset * 4);
+ }
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1),
+ nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
+ ctx->restore_phys);
+ /* gather the save buffer */
+ nvhost_cdma_push_gather(cdma,
+ (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE,
+ (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE,
+ nvhost_opcode_gather(save_size),
+ save_phys);
+}
+
+static void __init save_begin_v1(u32 *ptr)
+{
+ ptr[0] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA,
+ RESTORE_BEGIN_SIZE);
+ nvhost_3dctx_restore_begin(ptr + 1);
+ ptr += RESTORE_BEGIN_SIZE;
+}
+
+static void __init save_direct_v1(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+ AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+ nvhost_3dctx_restore_direct(ptr + 1, start_reg, count);
+ ptr += RESTORE_DIRECT_SIZE;
+ ptr[1] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INDOFF, 1);
+ ptr[2] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ start_reg, true);
+ /* TODO could do this in the setclass if count < 6 */
+ ptr[3] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+
+static void __init save_indirect_v1(u32 *ptr, u32 offset_reg, u32 offset,
+ u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ ptr[1] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA,
+ RESTORE_INDIRECT_SIZE);
+ nvhost_3dctx_restore_indirect(ptr + 2, offset_reg, offset, data_reg,
+ count);
+ ptr += RESTORE_INDIRECT_SIZE;
+ ptr[2] = nvhost_opcode_imm(offset_reg, offset);
+ ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INDOFF, 1);
+ ptr[4] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ data_reg, false);
+ ptr[5] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+
+static void __init save_end_v1(u32 *ptr)
+{
+ /* write end of restore buffer */
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+ AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+ nvhost_3dctx_restore_end(ptr + 1);
+ ptr += RESTORE_END_SIZE;
+ /* reset to dual reg if necessary */
+ ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+ (1 << register_sets) - 1);
+ /* op_done syncpt incr to flush FDC */
+ ptr[2] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, NVSYNCPT_3D);
+ /* host wait for that syncpt incr, and advance the wait base */
+ ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE,
+ nvhost_mask2(
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE));
+ ptr[4] = nvhost_class_host_wait_syncpt_base(NVSYNCPT_3D,
+ NVWAITBASE_3D, nvhost_3dctx_save_incrs - 1);
+ ptr[5] = nvhost_class_host_incr_syncpt_base(NVWAITBASE_3D,
+ nvhost_3dctx_save_incrs);
+ /* set class back to 3d */
+ ptr[6] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /* send reg reads back to host */
+ ptr[7] = nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0);
+ /* final syncpt increment to release waiters */
+ ptr[8] = nvhost_opcode_imm(0, NVSYNCPT_3D);
+}
+
+/*** save ***/
+
+
+
+static void __init setup_save_regs(struct save_info *info,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+ u32 *ptr = info->ptr;
+ unsigned int save_count = info->save_count;
+ unsigned int restore_count = info->restore_count;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ u32 indoff = offset + 1;
+ switch (regs->type) {
+ case HWCTX_REGINFO_DIRECT:
+ if (ptr) {
+ save_direct_v1(ptr, offset, count);
+ ptr += SAVE_DIRECT_V1_SIZE;
+ }
+ save_count += SAVE_DIRECT_V1_SIZE;
+ restore_count += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_4X:
+ ++indoff;
+ /* fall through */
+ case HWCTX_REGINFO_INDIRECT:
+ if (ptr) {
+ save_indirect_v1(ptr, offset, 0,
+ indoff, count);
+ ptr += SAVE_INDIRECT_V1_SIZE;
+ }
+ save_count += SAVE_INDIRECT_V1_SIZE;
+ restore_count += RESTORE_INDIRECT_SIZE;
+ break;
+ }
+ if (ptr) {
+ /* SAVE cases only: reserve room for incoming data */
+ u32 k = 0;
+ /*
+ * Create a signature pattern for indirect data (which
+ * will be overwritten by true incoming data) for
+ * better deducing where we are in a long command
+ * sequence, when given only a FIFO snapshot for debug
+ * purposes.
+ */
+ for (k = 0; k < count; k++)
+ *(ptr + k) = 0xd000d000 | (offset << 16) | k;
+ ptr += count;
+ }
+ save_count += count;
+ restore_count += count;
+ }
+
+ info->ptr = ptr;
+ info->save_count = save_count;
+ info->restore_count = restore_count;
+}
+
+static void __init switch_gpu(struct save_info *info,
+ unsigned int save_src_set,
+ u32 save_dest_sets,
+ u32 restore_dest_sets)
+{
+ if (info->ptr) {
+ info->ptr[0] = nvhost_opcode_setclass(
+ NV_GRAPHICS_3D_CLASS_ID,
+ AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+ info->ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+ restore_dest_sets);
+ info->ptr[2] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+ save_dest_sets);
+ info->ptr[3] = nvhost_opcode_imm(AR3D_GSHIM_READ_SELECT,
+ save_src_set);
+ info->ptr += 4;
+ }
+ info->save_count += 4;
+ info->restore_count += 1;
+}
+
+static void __init setup_save(u32 *ptr)
+{
+ struct save_info info = {
+ ptr,
+ SAVE_BEGIN_V1_SIZE,
+ RESTORE_BEGIN_SIZE,
+ SAVE_INCRS,
+ 1
+ };
+ int save_end_size = SAVE_END_V1_SIZE;
+
+ BUG_ON(register_sets > 2);
+
+ if (info.ptr) {
+ save_begin_v1(info.ptr);
+ info.ptr += SAVE_BEGIN_V1_SIZE;
+ }
+
+ /* read from set0, write cmds through set0, restore to set0 and 1 */
+ if (register_sets == 2)
+ switch_gpu(&info, 0, 1, 3);
+
+ /* save regs that are common to both sets */
+ setup_save_regs(&info,
+ ctxsave_regs_3d_global,
+ ARRAY_SIZE(ctxsave_regs_3d_global));
+
+ /* read from set 0, write cmds through set0, restore to set0 */
+ if (register_sets == 2)
+ switch_gpu(&info, 0, 1, 1);
+
+ /* save set 0 specific regs */
+ setup_save_regs(&info,
+ ctxsave_regs_3d_perset,
+ ARRAY_SIZE(ctxsave_regs_3d_perset));
+
+ if (register_sets == 2) {
+ /* read from set1, write cmds through set1, restore to set1 */
+ switch_gpu(&info, 1, 2, 2);
+ /* note offset at which set 1 restore starts */
+ restore_set1_offset = info.restore_count;
+ /* save set 1 specific regs */
+ setup_save_regs(&info,
+ ctxsave_regs_3d_perset,
+ ARRAY_SIZE(ctxsave_regs_3d_perset));
+ }
+
+ /* read from set0, write cmds through set1, restore to set0 and 1 */
+ if (register_sets == 2)
+ switch_gpu(&info, 0, 2, 3);
+
+ if (info.ptr) {
+ save_end_v1(info.ptr);
+ info.ptr += SAVE_END_V1_SIZE;
+ }
+
+ wmb();
+
+ save_size = info.save_count + save_end_size;
+ nvhost_3dctx_restore_size = info.restore_count + RESTORE_END_SIZE;
+ nvhost_3dctx_save_incrs = info.save_incrs;
+ nvhost_3dctx_save_thresh = nvhost_3dctx_save_incrs
+ - SAVE_THRESH_OFFSET;
+ nvhost_3dctx_restore_incrs = info.restore_incrs;
+}
+
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc_v1(struct nvhost_channel *ch)
+{
+ return nvhost_3dctx_alloc_common(ch, false);
+}
+
+int __init nvhost_gr3d_t30_ctxhandler_init(struct nvhost_hwctx_handler *h)
+{
+ struct nvhost_channel *ch;
+ struct nvmap_client *nvmap;
+ u32 *save_ptr;
+
+ ch = container_of(h, struct nvhost_channel, ctxhandler);
+ nvmap = ch->dev->nvmap;
+
+ register_sets = tegra_gpu_register_sets();
+ BUG_ON(register_sets == 0 || register_sets > 2);
+
+ setup_save(NULL);
+
+ nvhost_3dctx_save_buf = nvmap_alloc(nvmap, save_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(nvhost_3dctx_save_buf)) {
+ int err = PTR_ERR(nvhost_3dctx_save_buf);
+ nvhost_3dctx_save_buf = NULL;
+ return err;
+ }
+
+ nvhost_3dctx_save_slots = 6;
+ if (register_sets == 2)
+ nvhost_3dctx_save_slots += 2;
+
+ save_ptr = nvmap_mmap(nvhost_3dctx_save_buf);
+ if (!save_ptr) {
+ nvmap_free(nvmap, nvhost_3dctx_save_buf);
+ nvhost_3dctx_save_buf = NULL;
+ return -ENOMEM;
+ }
+
+ save_phys = nvmap_pin(nvmap, nvhost_3dctx_save_buf);
+
+ setup_save(save_ptr);
+
+ h->alloc = ctx3d_alloc_v1;
+ h->save_push = save_push_v1;
+ h->save_service = NULL;
+ h->get = nvhost_3dctx_get;
+ h->put = nvhost_3dctx_put;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t30.h b/drivers/video/tegra/host/gr3d/gr3d_t30.h
new file mode 100644
index 000000000000..933fb24025b1
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t30.h
@@ -0,0 +1,30 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t30.h
+ *
+ * Tegra Graphics Host 3D for Tegra3
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_T30_H
+#define __NVHOST_GR3D_GR3D_T30_H
+
+struct nvhost_hwctx_handler;
+
+int nvhost_gr3d_t30_ctxhandler_init(struct nvhost_hwctx_handler *h);
+
+#endif
diff --git a/drivers/video/tegra/host/gr3d/scale3d.c b/drivers/video/tegra/host/gr3d/scale3d.c
new file mode 100644
index 000000000000..a8d7dec27f8c
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/scale3d.c
@@ -0,0 +1,651 @@
+/*
+ * drivers/video/tegra/host/t20/scale3d.c
+ *
+ * Tegra Graphics Host 3D clock scaling
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * 3d clock scaling
+ *
+ * module3d_notify_busy() is called upon submit, module3d_notify_idle() is
+ * called when all outstanding submits are completed. Idle times are measured
+ * over a fixed time period (scale3d.p_period). If the 3d module idle time
+ * percentage goes over the limit (set in scale3d.p_idle_max), 3d clocks are
+ * scaled down. If the percentage goes under the minimum limit (set in
+ * scale3d.p_idle_min), 3d clocks are scaled up. An additional test is made
+ * over the time frame given in scale3d.p_fast_response for clocking up
+ * quickly in response to load peaks.
+ *
+ * 3d.emc clock is scaled proportionately to 3d clock, with a quadratic-
+ * bezier-like factor added to pull 3d.emc rate a bit lower.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <mach/clk.h>
+#include <mach/hardware.h>
+#include "scale3d.h"
+#include "../dev.h"
+
+static int scale3d_is_enabled(void);
+static void scale3d_enable(int enable);
+
+#define POW2(x) ((x) * (x))
+
+/*
+ * debugfs parameters to control 3d clock scaling test
+ *
+ * period - time period for clock rate evaluation
+ * fast_response - time period for evaluation of 'busy' spikes
+ * idle_min - if less than [idle_min] percent idle over [fast_response]
+ * microseconds, clock up.
+ * idle_max - if over [idle_max] percent idle over [period] microseconds,
+ * clock down.
+ * max_scale - limits rate changes to no less than (100 - max_scale)% or
+ * (100 + 2 * max_scale)% of current clock rate
+ * verbosity - set above 5 for debug printouts
+ */
+
+struct scale3d_info_rec {
+ struct mutex lock; /* lock for timestamps etc */
+ int enable;
+ int init;
+ ktime_t idle_frame;
+ ktime_t fast_frame;
+ ktime_t last_idle;
+ ktime_t last_short_term_idle;
+ int is_idle;
+ ktime_t last_tweak;
+ ktime_t last_down;
+ int fast_up_count;
+ int slow_down_count;
+ int is_scaled;
+ int fast_responses;
+ unsigned long idle_total;
+ unsigned long idle_short_term_total;
+ unsigned long max_rate_3d;
+ long emc_slope;
+ long emc_offset;
+ long emc_dip_slope;
+ long emc_dip_offset;
+ long emc_xmid;
+ unsigned long min_rate_3d;
+ struct work_struct work;
+ struct delayed_work idle_timer;
+ unsigned int scale;
+ unsigned int p_period;
+ unsigned int period;
+ unsigned int p_idle_min;
+ unsigned int idle_min;
+ unsigned int p_idle_max;
+ unsigned int idle_max;
+ unsigned int p_fast_response;
+ unsigned int fast_response;
+ unsigned int p_adjust;
+ unsigned int p_scale_emc;
+ unsigned int p_emc_dip;
+ unsigned int p_verbosity;
+ struct clk *clk_3d;
+ struct clk *clk_3d2;
+ struct clk *clk_3d_emc;
+};
+
+static struct scale3d_info_rec scale3d;
+
+static void scale3d_clocks(unsigned long percent)
+{
+ unsigned long hz, curr;
+
+ if (!tegra_is_clk_enabled(scale3d.clk_3d))
+ return;
+
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3)
+ if (!tegra_is_clk_enabled(scale3d.clk_3d2))
+ return;
+
+ curr = clk_get_rate(scale3d.clk_3d);
+ hz = percent * (curr / 100);
+
+ if (!(hz >= scale3d.max_rate_3d && curr == scale3d.max_rate_3d)) {
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3)
+ clk_set_rate(scale3d.clk_3d2, 0);
+ clk_set_rate(scale3d.clk_3d, hz);
+
+ if (scale3d.p_scale_emc) {
+ long after = (long) clk_get_rate(scale3d.clk_3d);
+ hz = after * scale3d.emc_slope + scale3d.emc_offset;
+ if (scale3d.p_emc_dip)
+ hz -=
+ (scale3d.emc_dip_slope *
+ POW2(after / 1000 - scale3d.emc_xmid) +
+ scale3d.emc_dip_offset);
+ clk_set_rate(scale3d.clk_3d_emc, hz);
+ }
+ }
+}
+
+static void scale3d_clocks_handler(struct work_struct *work)
+{
+ unsigned int scale;
+
+ mutex_lock(&scale3d.lock);
+ scale = scale3d.scale;
+ mutex_unlock(&scale3d.lock);
+
+ if (scale != 0)
+ scale3d_clocks(scale);
+}
+
+void nvhost_scale3d_suspend(struct nvhost_module *mod)
+{
+ cancel_work_sync(&scale3d.work);
+ cancel_delayed_work(&scale3d.idle_timer);
+}
+
+/* set 3d clocks to max */
+static void reset_3d_clocks(void)
+{
+ if (clk_get_rate(scale3d.clk_3d) != scale3d.max_rate_3d) {
+ clk_set_rate(scale3d.clk_3d, scale3d.max_rate_3d);
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3)
+ clk_set_rate(scale3d.clk_3d2, scale3d.max_rate_3d);
+ if (scale3d.p_scale_emc)
+ clk_set_rate(scale3d.clk_3d_emc,
+ clk_round_rate(scale3d.clk_3d_emc, UINT_MAX));
+ }
+}
+
+static int scale3d_is_enabled(void)
+{
+ int enable;
+
+ mutex_lock(&scale3d.lock);
+ enable = scale3d.enable;
+ mutex_unlock(&scale3d.lock);
+
+ return enable;
+}
+
+static void scale3d_enable(int enable)
+{
+ int disable = 0;
+
+ mutex_lock(&scale3d.lock);
+
+ if (enable) {
+ if (scale3d.max_rate_3d != scale3d.min_rate_3d)
+ scale3d.enable = 1;
+ } else {
+ scale3d.enable = 0;
+ disable = 1;
+ }
+
+ mutex_unlock(&scale3d.lock);
+
+ if (disable)
+ reset_3d_clocks();
+}
+
+static void reset_scaling_counters(ktime_t time)
+{
+ scale3d.idle_total = 0;
+ scale3d.idle_short_term_total = 0;
+ scale3d.last_idle = time;
+ scale3d.last_short_term_idle = time;
+ scale3d.idle_frame = time;
+}
+
+/* scaling_adjust - use scale up / scale down hint counts to adjust scaling
+ * parameters.
+ *
+ * hint_ratio is 100 x the ratio of scale up to scale down hints. Three cases
+ * are distinguished:
+ *
+ * hint_ratio < HINT_RATIO_MIN - set parameters to maximize scaling effect
+ * hint_ratio > HINT_RATIO_MAX - set parameters to minimize scaling effect
+ * hint_ratio between limits - scale parameters linearly
+ *
+ * the parameters adjusted are
+ *
+ * * fast_response time
+ * * period - time for scaling down estimate
+ * * idle_min percentage
+ * * idle_max percentage
+ */
+#define SCALING_ADJUST_PERIOD 1000000
+#define HINT_RATIO_MAX 400
+#define HINT_RATIO_MIN 100
+#define HINT_RATIO_MID ((HINT_RATIO_MAX + HINT_RATIO_MIN) / 2)
+#define HINT_RATIO_DIFF (HINT_RATIO_MAX - HINT_RATIO_MIN)
+
+static void scaling_adjust(ktime_t time)
+{
+ long hint_ratio;
+ long fast_response_adjustment;
+ long period_adjustment;
+ int idle_min_adjustment;
+ int idle_max_adjustment;
+ unsigned long dt;
+
+ dt = (unsigned long) ktime_us_delta(time, scale3d.last_tweak);
+ if (dt < SCALING_ADJUST_PERIOD)
+ return;
+
+ hint_ratio = (100 * (scale3d.fast_up_count + 1)) /
+ (scale3d.slow_down_count + 1);
+
+ if (hint_ratio > HINT_RATIO_MAX) {
+ fast_response_adjustment = -((int) scale3d.p_fast_response) / 4;
+ period_adjustment = scale3d.p_period / 2;
+ idle_min_adjustment = scale3d.p_idle_min;
+ idle_max_adjustment = scale3d.p_idle_max;
+ } else if (hint_ratio < HINT_RATIO_MIN) {
+ fast_response_adjustment = scale3d.p_fast_response / 2;
+ period_adjustment = -((int) scale3d.p_period) / 4;
+ idle_min_adjustment = -((int) scale3d.p_idle_min) / 2;
+ idle_max_adjustment = -((int) scale3d.p_idle_max) / 2;
+ } else {
+ int diff;
+ int factor;
+
+ diff = HINT_RATIO_MID - hint_ratio;
+ if (diff < 0)
+ factor = -diff * 2;
+ else {
+ factor = -diff;
+ diff *= 2;
+ }
+
+ fast_response_adjustment = diff *
+ (scale3d.p_fast_response / (HINT_RATIO_DIFF * 2));
+ period_adjustment =
+ diff * (scale3d.p_period / HINT_RATIO_DIFF);
+ idle_min_adjustment =
+ (factor * (int) scale3d.p_idle_min) / HINT_RATIO_DIFF;
+ idle_max_adjustment =
+ (factor * (int) scale3d.p_idle_max) / HINT_RATIO_DIFF;
+ }
+
+ scale3d.fast_response =
+ scale3d.p_fast_response + fast_response_adjustment;
+ scale3d.period = scale3d.p_period + period_adjustment;
+ scale3d.idle_min = scale3d.p_idle_min + idle_min_adjustment;
+ scale3d.idle_max = scale3d.p_idle_max + idle_max_adjustment;
+
+ if (scale3d.p_verbosity >= 10)
+ pr_info("scale3d stats: + %d - %d (/ %d) f %u p %u min %u max %u\n",
+ scale3d.fast_up_count, scale3d.slow_down_count,
+ scale3d.fast_responses, scale3d.fast_response,
+ scale3d.period, scale3d.idle_min, scale3d.idle_max);
+
+ scale3d.fast_up_count = 0;
+ scale3d.slow_down_count = 0;
+ scale3d.fast_responses = 0;
+ scale3d.last_down = time;
+ scale3d.last_tweak = time;
+}
+
+#undef SCALING_ADJUST_PERIOD
+#undef HINT_RATIO_MAX
+#undef HINT_RATIO_MIN
+#undef HINT_RATIO_MID
+#undef HINT_RATIO_DIFF
+
+static void scaling_state_check(ktime_t time)
+{
+ unsigned long dt;
+
+ /* adjustment: set scale parameters (fast_response, period) +/- 25%
+ * based on ratio of scale up to scale down hints
+ */
+ if (scale3d.p_adjust)
+ scaling_adjust(time);
+ else {
+ scale3d.fast_response = scale3d.p_fast_response;
+ scale3d.period = scale3d.p_period;
+ scale3d.idle_min = scale3d.p_idle_min;
+ scale3d.idle_max = scale3d.p_idle_max;
+ }
+
+ /* check for load peaks */
+ dt = (unsigned long) ktime_us_delta(time, scale3d.fast_frame);
+ if (dt > scale3d.fast_response) {
+ unsigned long idleness =
+ (scale3d.idle_short_term_total * 100) / dt;
+ scale3d.fast_responses++;
+ scale3d.fast_frame = time;
+ /* if too busy, scale up */
+ if (idleness < scale3d.idle_min) {
+ scale3d.is_scaled = 0;
+ scale3d.fast_up_count++;
+ if (scale3d.p_verbosity >= 5)
+ pr_info("scale3d: %ld%% busy\n",
+ 100 - idleness);
+
+ reset_3d_clocks();
+ reset_scaling_counters(time);
+ return;
+ }
+ scale3d.idle_short_term_total = 0;
+ scale3d.last_short_term_idle = time;
+ }
+
+ dt = (unsigned long) ktime_us_delta(time, scale3d.idle_frame);
+ if (dt > scale3d.period) {
+ unsigned long idleness = (scale3d.idle_total * 100) / dt;
+
+ if (scale3d.p_verbosity >= 5)
+ pr_info("scale3d: idle %lu, ~%lu%%\n",
+ scale3d.idle_total, idleness);
+
+ if (idleness > scale3d.idle_max) {
+ if (!scale3d.is_scaled) {
+ scale3d.is_scaled = 1;
+ scale3d.last_down = time;
+ }
+ scale3d.slow_down_count++;
+ /* if idle time is high, clock down */
+ scale3d.scale = 100 - (idleness - scale3d.idle_min);
+ schedule_work(&scale3d.work);
+ }
+
+ reset_scaling_counters(time);
+ }
+}
+
+void nvhost_scale3d_notify_idle(struct nvhost_module *mod)
+{
+ ktime_t t;
+ unsigned long dt;
+
+ mutex_lock(&scale3d.lock);
+
+ if (!scale3d.enable)
+ goto done;
+
+ t = ktime_get();
+
+ if (scale3d.is_idle) {
+ dt = ktime_us_delta(t, scale3d.last_idle);
+ scale3d.idle_total += dt;
+ dt = ktime_us_delta(t, scale3d.last_short_term_idle);
+ scale3d.idle_short_term_total += dt;
+ } else
+ scale3d.is_idle = 1;
+
+ scale3d.last_idle = t;
+ scale3d.last_short_term_idle = t;
+
+ scaling_state_check(scale3d.last_idle);
+
+ /* delay idle_max % of 2 * fast_response time (given in microseconds) */
+ schedule_delayed_work(&scale3d.idle_timer,
+ msecs_to_jiffies((scale3d.idle_max * scale3d.fast_response)
+ / 50000));
+
+done:
+ mutex_unlock(&scale3d.lock);
+}
+
+void nvhost_scale3d_notify_busy(struct nvhost_module *mod)
+{
+ unsigned long idle;
+ unsigned long short_term_idle;
+ ktime_t t;
+
+ mutex_lock(&scale3d.lock);
+
+ if (!scale3d.enable)
+ goto done;
+
+ cancel_delayed_work(&scale3d.idle_timer);
+
+ t = ktime_get();
+
+ if (scale3d.is_idle) {
+ idle = (unsigned long)
+ ktime_us_delta(t, scale3d.last_idle);
+ scale3d.idle_total += idle;
+ short_term_idle =
+ ktime_us_delta(t, scale3d.last_short_term_idle);
+ scale3d.idle_short_term_total += short_term_idle;
+ scale3d.is_idle = 0;
+ }
+
+ scaling_state_check(t);
+
+done:
+ mutex_unlock(&scale3d.lock);
+}
+
+static void scale3d_idle_handler(struct work_struct *work)
+{
+ int notify_idle = 0;
+
+ mutex_lock(&scale3d.lock);
+
+ if (scale3d.enable && scale3d.is_idle &&
+ tegra_is_clk_enabled(scale3d.clk_3d)) {
+ unsigned long curr = clk_get_rate(scale3d.clk_3d);
+ if (curr > scale3d.min_rate_3d)
+ notify_idle = 1;
+ }
+
+ mutex_unlock(&scale3d.lock);
+
+ if (notify_idle)
+ nvhost_scale3d_notify_idle(NULL);
+}
+
+void nvhost_scale3d_reset()
+{
+ ktime_t t = ktime_get();
+ mutex_lock(&scale3d.lock);
+ reset_scaling_counters(t);
+ mutex_unlock(&scale3d.lock);
+}
+
+/*
+ * debugfs parameters to control 3d clock scaling
+ */
+
+void nvhost_scale3d_debug_init(struct dentry *de)
+{
+ struct dentry *d, *f;
+
+ d = debugfs_create_dir("scaling", de);
+ if (!d) {
+ pr_err("scale3d: can\'t create debugfs directory\n");
+ return;
+ }
+
+#define CREATE_SCALE3D_FILE(fname) \
+ do {\
+ f = debugfs_create_u32(#fname, S_IRUGO | S_IWUSR, d,\
+ &scale3d.p_##fname);\
+ if (NULL == f) {\
+ pr_err("scale3d: can\'t create file " #fname "\n");\
+ return;\
+ } \
+ } while (0)
+
+ CREATE_SCALE3D_FILE(fast_response);
+ CREATE_SCALE3D_FILE(idle_min);
+ CREATE_SCALE3D_FILE(idle_max);
+ CREATE_SCALE3D_FILE(period);
+ CREATE_SCALE3D_FILE(adjust);
+ CREATE_SCALE3D_FILE(scale_emc);
+ CREATE_SCALE3D_FILE(emc_dip);
+ CREATE_SCALE3D_FILE(verbosity);
+#undef CREATE_SCALE3D_FILE
+}
+
+static ssize_t enable_3d_scaling_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t res;
+
+ res = snprintf(buf, PAGE_SIZE, "%d\n", scale3d_is_enabled());
+
+ return res;
+}
+
+static ssize_t enable_3d_scaling_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long val = 0;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ scale3d_enable(val);
+
+ return count;
+}
+
+static DEVICE_ATTR(enable_3d_scaling, S_IRUGO | S_IWUGO,
+ enable_3d_scaling_show, enable_3d_scaling_store);
+
+void nvhost_scale3d_init(struct device *d, struct nvhost_module *mod)
+{
+ if (!scale3d.init) {
+ int error;
+ unsigned long max_emc, min_emc;
+ long correction;
+ mutex_init(&scale3d.lock);
+
+ scale3d.clk_3d = mod->clk[0];
+ if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) {
+ scale3d.clk_3d2 = mod->clk[1];
+ scale3d.clk_3d_emc = mod->clk[2];
+ } else
+ scale3d.clk_3d_emc = mod->clk[1];
+
+ scale3d.max_rate_3d = clk_round_rate(scale3d.clk_3d, UINT_MAX);
+ scale3d.min_rate_3d = clk_round_rate(scale3d.clk_3d, 0);
+
+ if (scale3d.max_rate_3d == scale3d.min_rate_3d) {
+ pr_warn("scale3d: 3d max rate = min rate (%lu), "
+ "disabling\n", scale3d.max_rate_3d);
+ scale3d.enable = 0;
+ return;
+ }
+
+ /* emc scaling:
+ *
+ * Remc = S * R3d + O - (Sd * (R3d - Rm)^2 + Od)
+ *
+ * Remc - 3d.emc rate
+ * R3d - 3d.cbus rate
+ * Rm - 3d.cbus 'middle' rate = (max + min)/2
+ * S - emc_slope
+ * O - emc_offset
+ * Sd - emc_dip_slope
+ * Od - emc_dip_offset
+ *
+ * this superposes a quadratic dip centered around the middle 3d
+ * frequency over a linear correlation of 3d.emc to 3d clock
+ * rates.
+ *
+ * S, O are chosen so that the maximum 3d rate produces the
+ * maximum 3d.emc rate exactly, and the minimum 3d rate produces
+ * at least the minimum 3d.emc rate.
+ *
+ * Sd and Od are chosen to produce the largest dip that will
+ * keep 3d.emc frequencies monotonously decreasing with 3d
+ * frequencies. To achieve this, the first derivative of Remc
+ * with respect to R3d should be zero for the minimal 3d rate:
+ *
+ * R'emc = S - 2 * Sd * (R3d - Rm)
+ * R'emc(R3d-min) = 0
+ * S = 2 * Sd * (R3d-min - Rm)
+ * = 2 * Sd * (R3d-min - R3d-max) / 2
+ * Sd = S / (R3d-min - R3d-max)
+ *
+ * +---------------------------------------------------+
+ * | Sd = -(emc-max - emc-min) / (R3d-min - R3d-max)^2 |
+ * +---------------------------------------------------+
+ *
+ * dip = Sd * (R3d - Rm)^2 + Od
+ *
+ * requiring dip(R3d-min) = 0 and dip(R3d-max) = 0 gives
+ *
+ * Sd * (R3d-min - Rm)^2 + Od = 0
+ * Od = -Sd * ((R3d-min - R3d-max) / 2)^2
+ * = -Sd * ((R3d-min - R3d-max)^2) / 4
+ *
+ * +------------------------------+
+ * | Od = (emc-max - emc-min) / 4 |
+ * +------------------------------+
+ */
+
+ max_emc = clk_round_rate(scale3d.clk_3d_emc, UINT_MAX);
+ min_emc = clk_round_rate(scale3d.clk_3d_emc, 0);
+
+ scale3d.emc_slope = (max_emc - min_emc) /
+ (scale3d.max_rate_3d - scale3d.min_rate_3d);
+ scale3d.emc_offset = max_emc -
+ scale3d.emc_slope * scale3d.max_rate_3d;
+ /* guarantee max 3d rate maps to max emc rate */
+ scale3d.emc_offset += max_emc -
+ (scale3d.emc_slope * scale3d.max_rate_3d +
+ scale3d.emc_offset);
+
+ scale3d.emc_dip_offset = (max_emc - min_emc) / 4;
+ scale3d.emc_dip_slope =
+ -4 * (scale3d.emc_dip_offset /
+ (POW2(scale3d.max_rate_3d - scale3d.min_rate_3d)));
+ scale3d.emc_xmid =
+ (scale3d.max_rate_3d + scale3d.min_rate_3d) / 2;
+ correction =
+ scale3d.emc_dip_offset +
+ scale3d.emc_dip_slope *
+ POW2(scale3d.max_rate_3d - scale3d.emc_xmid);
+ scale3d.emc_dip_offset -= correction;
+
+ INIT_WORK(&scale3d.work, scale3d_clocks_handler);
+ INIT_DELAYED_WORK(&scale3d.idle_timer, scale3d_idle_handler);
+
+ /* set scaling parameter defaults */
+ scale3d.enable = 1;
+ scale3d.period = scale3d.p_period = 100000;
+ scale3d.idle_min = scale3d.p_idle_min = 10;
+ scale3d.idle_max = scale3d.p_idle_max = 15;
+ scale3d.fast_response = scale3d.p_fast_response = 7000;
+ scale3d.p_scale_emc = 1;
+ scale3d.p_emc_dip = 1;
+ scale3d.p_verbosity = 0;
+ scale3d.p_adjust = 1;
+
+ error = device_create_file(d, &dev_attr_enable_3d_scaling);
+ if (error)
+ dev_err(d, "failed to create sysfs attributes");
+
+ scale3d.init = 1;
+ }
+
+ nvhost_scale3d_reset();
+}
+
+void nvhost_scale3d_deinit(struct device *dev, struct nvhost_module *mod)
+{
+ device_remove_file(dev, &dev_attr_enable_3d_scaling);
+ scale3d.init = 0;
+}
diff --git a/drivers/video/tegra/host/gr3d/scale3d.h b/drivers/video/tegra/host/gr3d/scale3d.h
new file mode 100644
index 000000000000..e6d1a40f53e0
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/scale3d.h
@@ -0,0 +1,49 @@
+/*
+ * drivers/video/tegra/host/t30/scale3d.h
+ *
+ * Tegra Graphics Host 3D Clock Scaling
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef NVHOST_T30_SCALE3D_H
+#define NVHOST_T30_SCALE3D_H
+
+struct nvhost_module;
+struct device;
+struct dentry;
+
+/* Initialization and de-initialization for module */
+void nvhost_scale3d_init(struct device *, struct nvhost_module *);
+void nvhost_scale3d_deinit(struct device *, struct nvhost_module *);
+
+/* Suspend is called when powering down module */
+void nvhost_scale3d_suspend(struct nvhost_module *);
+
+/* reset 3d module load counters, called on resume */
+void nvhost_scale3d_reset(void);
+
+/*
+ * call when performing submit to notify scaling mechanism that 3d module is
+ * in use
+ */
+void nvhost_scale3d_notify_busy(struct nvhost_module *);
+void nvhost_scale3d_notify_idle(struct nvhost_module *);
+
+void nvhost_scale3d_debug_init(struct dentry *de);
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/Makefile b/drivers/video/tegra/host/host1x/Makefile
new file mode 100644
index 000000000000..ba59d870d15b
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/Makefile
@@ -0,0 +1,13 @@
+GCOV_PROFILE := y
+
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-host1x-objs = \
+ host1x_syncpt.o \
+ host1x_cpuaccess.o \
+ host1x_channel.o \
+ host1x_intr.o \
+ host1x_cdma.o \
+ host1x_debug.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-host1x.o
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.c b/drivers/video/tegra/host/host1x/host1x_cdma.c
new file mode 100644
index 000000000000..008c8bfcde15
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_cdma.c
@@ -0,0 +1,668 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include "nvhost_cdma.h"
+#include "dev.h"
+
+#include "host1x_hardware.h"
+#include "host1x_syncpt.h"
+#include "host1x_cdma.h"
+
+static inline u32 host1x_channel_dmactrl(int stop, int get_rst, int init_get)
+{
+ return HOST1X_CREATE(CHANNEL_DMACTRL, DMASTOP, stop)
+ | HOST1X_CREATE(CHANNEL_DMACTRL, DMAGETRST, get_rst)
+ | HOST1X_CREATE(CHANNEL_DMACTRL, DMAINITGET, init_get);
+}
+
+static void cdma_timeout_handler(struct work_struct *work);
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == cur
+ * means that the push buffer is full, not empty.
+ */
+
+
+/**
+ * Reset to empty push buffer
+ */
+static void push_buffer_reset(struct push_buffer *pb)
+{
+ pb->fence = PUSH_BUFFER_SIZE - 8;
+ pb->cur = 0;
+}
+
+/**
+ * Init push buffer resources
+ */
+static int push_buffer_init(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+ pb->nvmap = NULL;
+
+ BUG_ON(!cdma_pb_op(cdma).reset);
+ cdma_pb_op(cdma).reset(pb);
+
+ /* allocate and map pushbuffer memory */
+ pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR_OR_NULL(pb->mem)) {
+ pb->mem = NULL;
+ goto fail;
+ }
+ pb->mapped = nvmap_mmap(pb->mem);
+ if (pb->mapped == NULL)
+ goto fail;
+
+ /* pin pushbuffer and get physical address */
+ pb->phys = nvmap_pin(nvmap, pb->mem);
+ if (pb->phys >= 0xfffff000) {
+ pb->phys = 0;
+ goto fail;
+ }
+
+ /* memory for storing nvmap client and handles for each opcode pair */
+ pb->nvmap = kzalloc(NVHOST_GATHER_QUEUE_SIZE *
+ sizeof(struct nvmap_client_handle),
+ GFP_KERNEL);
+ if (!pb->nvmap)
+ goto fail;
+
+ /* put the restart at the end of pushbuffer memory */
+ *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) =
+ nvhost_opcode_restart(pb->phys);
+
+ return 0;
+
+fail:
+ cdma_pb_op(cdma).destroy(pb);
+ return -ENOMEM;
+}
+
+/**
+ * Clean up push buffer resources
+ */
+static void push_buffer_destroy(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ if (pb->mapped)
+ nvmap_munmap(pb->mem, pb->mapped);
+
+ if (pb->phys != 0)
+ nvmap_unpin(nvmap, pb->mem);
+
+ if (pb->mem)
+ nvmap_free(nvmap, pb->mem);
+
+ kfree(pb->nvmap);
+
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+ pb->nvmap = 0;
+}
+
+/**
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void push_buffer_push_to(struct push_buffer *pb,
+ struct nvmap_client *client,
+ struct nvmap_handle *handle, u32 op1, u32 op2)
+{
+ u32 cur = pb->cur;
+ u32 *p = (u32 *)((u32)pb->mapped + cur);
+ u32 cur_nvmap = (cur/8) & (NVHOST_GATHER_QUEUE_SIZE - 1);
+ BUG_ON(cur == pb->fence);
+ *(p++) = op1;
+ *(p++) = op2;
+ pb->nvmap[cur_nvmap].client = client;
+ pb->nvmap[cur_nvmap].handle = handle;
+ pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void push_buffer_pop_from(struct push_buffer *pb,
+ unsigned int slots)
+{
+ /* Clear the nvmap references for old items from pb */
+ unsigned int i;
+ u32 fence_nvmap = pb->fence/8;
+ for (i = 0; i < slots; i++) {
+ int cur_fence_nvmap = (fence_nvmap+i)
+ & (NVHOST_GATHER_QUEUE_SIZE - 1);
+ struct nvmap_client_handle *h =
+ &pb->nvmap[cur_fence_nvmap];
+ h->client = NULL;
+ h->handle = NULL;
+ }
+ /* Advance the next write position */
+ pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 push_buffer_space(struct push_buffer *pb)
+{
+ return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
+}
+
+static u32 push_buffer_putptr(struct push_buffer *pb)
+{
+ return pb->phys + pb->cur;
+}
+
+/*
+ * The syncpt incr buffer is filled with methods to increment syncpts, which
+ * is later GATHER-ed into the mainline PB. It's used when a timed out context
+ * is interleaved with other work, so needs to inline the syncpt increments
+ * to maintain the count (but otherwise does no work).
+ */
+
+/**
+ * Init timeout and syncpt incr buffer resources
+ */
+static int cdma_timeout_init(struct nvhost_cdma *cdma,
+ u32 syncpt_id)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ struct syncpt_buffer *sb = &cdma->syncpt_buffer;
+ struct nvhost_channel *ch = cdma_to_channel(cdma);
+ u32 i = 0;
+
+ if (syncpt_id == NVSYNCPT_INVALID)
+ return -EINVAL;
+
+ /* allocate and map syncpt incr memory */
+ sb->mem = nvmap_alloc(nvmap,
+ (SYNCPT_INCR_BUFFER_SIZE_WORDS * sizeof(u32)), 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR_OR_NULL(sb->mem)) {
+ sb->mem = NULL;
+ goto fail;
+ }
+ sb->mapped = nvmap_mmap(sb->mem);
+ if (sb->mapped == NULL)
+ goto fail;
+
+ /* pin syncpt buffer and get physical address */
+ sb->phys = nvmap_pin(nvmap, sb->mem);
+ if (sb->phys >= 0xfffff000) {
+ sb->phys = 0;
+ goto fail;
+ }
+
+ dev_dbg(&dev->pdev->dev, "%s: SYNCPT_INCR buffer at 0x%x\n",
+ __func__, sb->phys);
+
+ sb->words_per_incr = (syncpt_id == NVSYNCPT_3D) ? 5 : 3;
+ sb->incr_per_buffer = (SYNCPT_INCR_BUFFER_SIZE_WORDS /
+ sb->words_per_incr);
+
+ /* init buffer with SETCL and INCR_SYNCPT methods */
+ while (i < sb->incr_per_buffer) {
+ sb->mapped[i++] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ 0, 0);
+ sb->mapped[i++] = nvhost_opcode_imm_incr_syncpt(
+ NV_SYNCPT_IMMEDIATE,
+ syncpt_id);
+ if (syncpt_id == NVSYNCPT_3D) {
+ /* also contains base increments */
+ sb->mapped[i++] = nvhost_opcode_nonincr(
+ NV_CLASS_HOST_INCR_SYNCPT_BASE,
+ 1);
+ sb->mapped[i++] = nvhost_class_host_incr_syncpt_base(
+ NVWAITBASE_3D, 1);
+ }
+ sb->mapped[i++] = nvhost_opcode_setclass(ch->desc->class,
+ 0, 0);
+ }
+ wmb();
+
+ INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
+ cdma->timeout.initialized = true;
+
+ return 0;
+fail:
+ cdma_op(cdma).timeout_destroy(cdma);
+ return -ENOMEM;
+}
+
+/**
+ * Clean up timeout syncpt buffer resources
+ */
+static void cdma_timeout_destroy(struct nvhost_cdma *cdma)
+{
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ struct syncpt_buffer *sb = &cdma->syncpt_buffer;
+
+ if (sb->mapped)
+ nvmap_munmap(sb->mem, sb->mapped);
+
+ if (sb->phys != 0)
+ nvmap_unpin(nvmap, sb->mem);
+
+ if (sb->mem)
+ nvmap_free(nvmap, sb->mem);
+
+ sb->mem = NULL;
+ sb->mapped = NULL;
+ sb->phys = 0;
+
+ if (cdma->timeout.initialized)
+ cancel_delayed_work(&cdma->timeout.wq);
+ cdma->timeout.initialized = false;
+}
+
+/**
+ * Increment timedout buffer's syncpt via CPU.
+ */
+static void cdma_timeout_cpu_incr(struct nvhost_cdma *cdma, u32 getptr,
+ u32 syncpt_incrs, u32 syncval, u32 nr_slots)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct push_buffer *pb = &cdma->push_buffer;
+ u32 i, getidx;
+
+ for (i = 0; i < syncpt_incrs; i++)
+ nvhost_syncpt_cpu_incr(&dev->syncpt, cdma->timeout.syncpt_id);
+
+ /* after CPU incr, ensure shadow is up to date */
+ nvhost_syncpt_update_min(&dev->syncpt, cdma->timeout.syncpt_id);
+
+ /* update WAITBASE_3D by same number of incrs */
+ if (cdma->timeout.syncpt_id == NVSYNCPT_3D) {
+ void __iomem *p;
+ p = dev->sync_aperture + HOST1X_SYNC_SYNCPT_BASE_0 +
+ (NVWAITBASE_3D * sizeof(u32));
+ writel(syncval, p);
+ }
+
+ /* NOP all the PB slots */
+ getidx = getptr - pb->phys;
+ while (nr_slots--) {
+ u32 *p = (u32 *)((u32)pb->mapped + getidx);
+ *(p++) = NVHOST_OPCODE_NOOP;
+ *(p++) = NVHOST_OPCODE_NOOP;
+ dev_dbg(&dev->pdev->dev, "%s: NOP at 0x%x\n",
+ __func__, pb->phys + getidx);
+ getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
+ }
+ wmb();
+}
+
+/**
+ * This routine is called at the point we transition back into a timed
+ * ctx. The syncpts are incremented via pushbuffer with a flag indicating
+ * whether there's a CTXSAVE that should be still executed (for the
+ * preceding HW ctx).
+ */
+static void cdma_timeout_pb_incr(struct nvhost_cdma *cdma, u32 getptr,
+ u32 syncpt_incrs, u32 nr_slots,
+ bool exec_ctxsave)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct syncpt_buffer *sb = &cdma->syncpt_buffer;
+ struct push_buffer *pb = &cdma->push_buffer;
+ struct nvhost_hwctx *hwctx = cdma->timeout.ctx;
+ u32 getidx, *p;
+
+ /* should have enough slots to incr to desired count */
+ BUG_ON(syncpt_incrs > (nr_slots * sb->incr_per_buffer));
+
+ getidx = getptr - pb->phys;
+ if (exec_ctxsave) {
+ /* don't disrupt the CTXSAVE of a good/non-timed out ctx */
+ nr_slots -= hwctx->save_slots;
+ syncpt_incrs -= hwctx->save_incrs;
+
+ getidx += (hwctx->save_slots * 8);
+ getidx &= (PUSH_BUFFER_SIZE - 1);
+
+ dev_dbg(&dev->pdev->dev,
+ "%s: exec CTXSAVE of prev ctx (slots %d, incrs %d)\n",
+ __func__, nr_slots, syncpt_incrs);
+ }
+
+ while (syncpt_incrs) {
+ u32 incrs, count;
+
+ /* GATHER count are incrs * number of DWORDs per incr */
+ incrs = min(syncpt_incrs, sb->incr_per_buffer);
+ count = incrs * sb->words_per_incr;
+
+ p = (u32 *)((u32)pb->mapped + getidx);
+ *(p++) = nvhost_opcode_gather(count);
+ *(p++) = sb->phys;
+
+ dev_dbg(&dev->pdev->dev,
+ "%s: GATHER at 0x%x, from 0x%x, dcount = %d\n",
+ __func__,
+ pb->phys + getidx, sb->phys,
+ (incrs * sb->words_per_incr));
+
+ syncpt_incrs -= incrs;
+ getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
+ nr_slots--;
+ }
+
+ /* NOP remaining slots */
+ while (nr_slots--) {
+ p = (u32 *)((u32)pb->mapped + getidx);
+ *(p++) = NVHOST_OPCODE_NOOP;
+ *(p++) = NVHOST_OPCODE_NOOP;
+ dev_dbg(&dev->pdev->dev, "%s: NOP at 0x%x\n",
+ __func__, pb->phys + getidx);
+ getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
+ }
+ wmb();
+}
+
+/**
+ * Start channel DMA
+ */
+static void cdma_start(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (cdma->running)
+ return;
+
+ BUG_ON(!cdma_pb_op(cdma).putptr);
+ cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
+
+ writel(host1x_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* set base, put, end pointer (all of memory) */
+ writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
+ writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
+
+ /* reset GET */
+ writel(host1x_channel_dmactrl(true, true, true),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* start the command DMA */
+ writel(host1x_channel_dmactrl(false, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = true;
+}
+
+/**
+ * Similar to cdma_start(), but rather than starting from an idle
+ * state (where DMA GET is set to DMA PUT), on a timeout we restore
+ * DMA GET from an explicit value (so DMA may again be pending).
+ */
+static void cdma_timeout_restart(struct nvhost_cdma *cdma, u32 getptr)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (cdma->running)
+ return;
+
+ BUG_ON(!cdma_pb_op(cdma).putptr);
+ cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
+
+ writel(host1x_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* set base, end pointer (all of memory) */
+ writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
+ writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
+
+ /* set GET, by loading the value in PUT (then reset GET) */
+ writel(getptr, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ writel(host1x_channel_dmactrl(true, true, true),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ dev_dbg(&dev->pdev->dev,
+ "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+ __func__,
+ readl(chan_regs + HOST1X_CHANNEL_DMAGET),
+ readl(chan_regs + HOST1X_CHANNEL_DMAPUT),
+ cdma->last_put);
+
+ /* deassert GET reset and set PUT */
+ writel(host1x_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+ writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+
+ /* start the command DMA */
+ writel(host1x_channel_dmactrl(false, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = true;
+}
+
+/**
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void cdma_kick(struct nvhost_cdma *cdma)
+{
+ u32 put;
+ BUG_ON(!cdma_pb_op(cdma).putptr);
+
+ put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
+
+ if (put != cdma->last_put) {
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+ wmb();
+ writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ cdma->last_put = put;
+ }
+}
+
+static void cdma_stop(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ mutex_lock(&cdma->lock);
+ if (cdma->running) {
+ nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ writel(host1x_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+ cdma->running = false;
+ }
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Retrieve the op pair at a slot offset from a DMA address
+ */
+void cdma_peek(struct nvhost_cdma *cdma,
+ u32 dmaget, int slot, u32 *out)
+{
+ u32 offset = dmaget - cdma->push_buffer.phys;
+ u32 *p = cdma->push_buffer.mapped;
+
+ offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2;
+ out[0] = p[offset];
+ out[1] = p[offset + 1];
+}
+
+/**
+ * Stops both channel's command processor and CDMA immediately.
+ * Also, tears down the channel and resets corresponding module.
+ */
+void cdma_timeout_teardown_begin(struct nvhost_cdma *cdma)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct nvhost_channel *ch = cdma_to_channel(cdma);
+ u32 cmdproc_stop;
+
+ BUG_ON(cdma->torndown);
+
+ dev_dbg(&dev->pdev->dev,
+ "begin channel teardown (channel id %d)\n", ch->chid);
+
+ cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
+ cmdproc_stop |= BIT(ch->chid);
+ writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
+
+ dev_dbg(&dev->pdev->dev,
+ "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+ __func__,
+ readl(ch->aperture + HOST1X_CHANNEL_DMAGET),
+ readl(ch->aperture + HOST1X_CHANNEL_DMAPUT),
+ cdma->last_put);
+
+ writel(host1x_channel_dmactrl(true, false, false),
+ ch->aperture + HOST1X_CHANNEL_DMACTRL);
+
+ writel(BIT(ch->chid), dev->sync_aperture + HOST1X_SYNC_CH_TEARDOWN);
+ nvhost_module_reset(&dev->pdev->dev, &ch->mod);
+
+ cdma->running = false;
+ cdma->torndown = true;
+}
+
+void cdma_timeout_teardown_end(struct nvhost_cdma *cdma, u32 getptr)
+{
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+ struct nvhost_channel *ch = cdma_to_channel(cdma);
+ u32 cmdproc_stop;
+
+ BUG_ON(!cdma->torndown || cdma->running);
+
+ dev_dbg(&dev->pdev->dev,
+ "end channel teardown (id %d, DMAGET restart = 0x%x)\n",
+ ch->chid, getptr);
+
+ cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
+ cmdproc_stop &= ~(BIT(ch->chid));
+ writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
+
+ cdma->torndown = false;
+ cdma_timeout_restart(cdma, getptr);
+}
+
+/**
+ * If this timeout fires, it indicates the current sync_queue entry has
+ * exceeded its TTL and the userctx should be timed out and remaining
+ * submits already issued cleaned up (future submits return an error).
+ */
+static void cdma_timeout_handler(struct work_struct *work)
+{
+ struct nvhost_cdma *cdma;
+ struct nvhost_master *dev;
+ struct nvhost_syncpt *sp;
+ struct nvhost_channel *ch;
+
+ u32 syncpt_val;
+
+ u32 prev_cmdproc, cmdproc_stop;
+
+ cdma = container_of(to_delayed_work(work), struct nvhost_cdma,
+ timeout.wq);
+ dev = cdma_to_dev(cdma);
+ sp = &dev->syncpt;
+ ch = cdma_to_channel(cdma);
+
+ mutex_lock(&cdma->lock);
+
+ if (!cdma->timeout.clientid) {
+ dev_dbg(&dev->pdev->dev,
+ "cdma_timeout: expired, but has no clientid\n");
+ mutex_unlock(&cdma->lock);
+ return;
+ }
+
+ /* stop processing to get a clean snapshot */
+ prev_cmdproc = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
+ cmdproc_stop = prev_cmdproc | BIT(ch->chid);
+ writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
+
+ dev_dbg(&dev->pdev->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
+ prev_cmdproc, cmdproc_stop);
+
+ syncpt_val = nvhost_syncpt_update_min(&dev->syncpt,
+ cdma->timeout.syncpt_id);
+
+ /* has buffer actually completed? */
+ if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
+ dev_dbg(&dev->pdev->dev,
+ "cdma_timeout: expired, but buffer had completed\n");
+ /* restore */
+ cmdproc_stop = prev_cmdproc & ~(BIT(ch->chid));
+ writel(cmdproc_stop,
+ dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
+ mutex_unlock(&cdma->lock);
+ return;
+ }
+
+ dev_warn(&dev->pdev->dev,
+ "%s: timeout: %d (%s) ctx 0x%p, HW thresh %d, done %d\n",
+ __func__,
+ cdma->timeout.syncpt_id,
+ syncpt_op(sp).name(sp, cdma->timeout.syncpt_id),
+ cdma->timeout.ctx,
+ syncpt_val, cdma->timeout.syncpt_val);
+
+ /* stop HW, resetting channel/module */
+ cdma_op(cdma).timeout_teardown_begin(cdma);
+
+ nvhost_cdma_update_sync_queue(cdma, sp, &dev->pdev->dev);
+ mutex_unlock(&cdma->lock);
+}
+
+int host1x_init_cdma_support(struct nvhost_master *host)
+{
+ host->op.cdma.start = cdma_start;
+ host->op.cdma.stop = cdma_stop;
+ host->op.cdma.kick = cdma_kick;
+
+ host->op.cdma.timeout_init = cdma_timeout_init;
+ host->op.cdma.timeout_destroy = cdma_timeout_destroy;
+ host->op.cdma.timeout_teardown_begin = cdma_timeout_teardown_begin;
+ host->op.cdma.timeout_teardown_end = cdma_timeout_teardown_end;
+ host->op.cdma.timeout_cpu_incr = cdma_timeout_cpu_incr;
+ host->op.cdma.timeout_pb_incr = cdma_timeout_pb_incr;
+
+ host->sync_queue_size = NVHOST_SYNC_QUEUE_SIZE;
+
+ host->op.push_buffer.reset = push_buffer_reset;
+ host->op.push_buffer.init = push_buffer_init;
+ host->op.push_buffer.destroy = push_buffer_destroy;
+ host->op.push_buffer.push_to = push_buffer_push_to;
+ host->op.push_buffer.pop_from = push_buffer_pop_from;
+ host->op.push_buffer.space = push_buffer_space;
+ host->op.push_buffer.putptr = push_buffer_putptr;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.h b/drivers/video/tegra/host/host1x/host1x_cdma.h
new file mode 100644
index 000000000000..e66da0f19772
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_cdma.h
@@ -0,0 +1,43 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_cdma.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HOST1X_HOST1X_CDMA_H
+#define __NVHOST_HOST1X_HOST1X_CDMA_H
+
+/* Size of the sync queue. If it is too small, we won't be able to queue up
+ * many command buffers. If it is too large, we waste memory. */
+#define NVHOST_SYNC_QUEUE_SIZE 512
+
+/* Number of gathers we allow to be queued up per channel. Must be a
+ * power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
+#define NVHOST_GATHER_QUEUE_SIZE 512
+
+/* 8 bytes per slot. (This number does not include the final RESTART.) */
+#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
+
+/* 4K page containing GATHERed methods to increment channel syncpts
+ * and replaces the original timed out contexts GATHER slots */
+#define SYNCPT_INCR_BUFFER_SIZE_WORDS (4096 / sizeof(u32))
+
+int host1x_init_cdma_support(struct nvhost_master *);
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.c b/drivers/video/tegra/host/host1x/host1x_channel.c
new file mode 100644
index 000000000000..f5f36ca6a001
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_channel.c
@@ -0,0 +1,570 @@
+/*
+ * drivers/video/tegra/host/host1x/channel_host1x.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_hwctx.h"
+#include <trace/events/nvhost.h>
+#include <linux/slab.h>
+
+#include "host1x_syncpt.h"
+#include "host1x_channel.h"
+#include "host1x_hardware.h"
+#include "nvhost_intr.h"
+
+#define NV_FIFO_READ_TIMEOUT 200000
+
+static void sync_waitbases(struct nvhost_channel *ch, u32 syncpt_val)
+{
+ unsigned long waitbase;
+ unsigned long int waitbase_mask = ch->desc->waitbases;
+ if (ch->desc->waitbasesync) {
+ waitbase = find_first_bit(&waitbase_mask, BITS_PER_LONG);
+ nvhost_cdma_push(&ch->cdma,
+ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_LOAD_SYNCPT_BASE,
+ 1),
+ nvhost_class_host_load_syncpt_base(waitbase,
+ syncpt_val));
+ }
+}
+
+int host1x_channel_submit(struct nvhost_job *job)
+{
+ struct nvhost_hwctx *hwctx_to_save = NULL;
+ struct nvhost_channel *channel = job->ch;
+ struct nvhost_syncpt *sp = &job->ch->dev->syncpt;
+ u32 user_syncpt_incrs = job->syncpt_incrs;
+ bool need_restore = false;
+ u32 syncval;
+ int err;
+ void *ctxrestore_waiter = NULL;
+ void *ctxsave_waiter, *completed_waiter;
+
+ if (job->hwctx && job->hwctx->has_timedout)
+ return -ETIMEDOUT;
+
+ ctxsave_waiter = nvhost_intr_alloc_waiter();
+ completed_waiter = nvhost_intr_alloc_waiter();
+ if (!ctxsave_waiter || !completed_waiter) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* keep module powered */
+ nvhost_module_busy(&channel->mod);
+ if (channel->mod.desc->busy)
+ channel->mod.desc->busy(&channel->mod);
+
+ /* before error checks, return current max */
+ job->syncpt_end = nvhost_syncpt_read_max(sp, job->syncpt_id);
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&channel->submitlock);
+ if (err) {
+ nvhost_module_idle(&channel->mod);
+ goto done;
+ }
+
+ /* If we are going to need a restore, allocate a waiter for it */
+ if (channel->cur_ctx != job->hwctx && job->hwctx && job->hwctx->valid) {
+ ctxrestore_waiter = nvhost_intr_alloc_waiter();
+ if (!ctxrestore_waiter) {
+ mutex_unlock(&channel->submitlock);
+ nvhost_module_idle(&channel->mod);
+ err = -ENOMEM;
+ goto done;
+ }
+ need_restore = true;
+ }
+
+ /* remove stale waits */
+ if (job->num_waitchk) {
+ err = nvhost_syncpt_wait_check(sp,
+ job->nvmap,
+ job->waitchk_mask,
+ job->waitchk,
+ job->num_waitchk);
+ if (err) {
+ dev_warn(&channel->dev->pdev->dev,
+ "nvhost_syncpt_wait_check failed: %d\n", err);
+ mutex_unlock(&channel->submitlock);
+ nvhost_module_idle(&channel->mod);
+ goto done;
+ }
+ }
+
+ /* begin a CDMA submit */
+ err = nvhost_cdma_begin(&channel->cdma, job);
+ if (err) {
+ mutex_unlock(&channel->submitlock);
+ nvhost_module_idle(&channel->mod);
+ goto done;
+ }
+
+ sync_waitbases(channel, job->syncpt_end);
+
+ /* context switch */
+ if (channel->cur_ctx != job->hwctx) {
+ trace_nvhost_channel_context_switch(channel->desc->name,
+ channel->cur_ctx, job->hwctx);
+ hwctx_to_save = channel->cur_ctx;
+ if (hwctx_to_save &&
+ hwctx_to_save->has_timedout) {
+ hwctx_to_save = NULL;
+ dev_dbg(&channel->dev->pdev->dev,
+ "%s: skip save of timed out context (0x%p)\n",
+ __func__, channel->cur_ctx);
+ }
+ if (hwctx_to_save) {
+ job->syncpt_incrs += hwctx_to_save->save_incrs;
+ hwctx_to_save->valid = true;
+ channel->ctxhandler.get(hwctx_to_save);
+ }
+ channel->cur_ctx = job->hwctx;
+ if (need_restore)
+ job->syncpt_incrs += channel->cur_ctx->restore_incrs;
+ }
+
+ /* get absolute sync value */
+ if (BIT(job->syncpt_id) & sp->client_managed)
+ syncval = nvhost_syncpt_set_max(sp,
+ job->syncpt_id, job->syncpt_incrs);
+ else
+ syncval = nvhost_syncpt_incr_max(sp,
+ job->syncpt_id, job->syncpt_incrs);
+
+ job->syncpt_end = syncval;
+
+ /* push save buffer (pre-gather setup depends on unit) */
+ if (hwctx_to_save)
+ channel->ctxhandler.save_push(&channel->cdma, hwctx_to_save);
+
+ /* gather restore buffer */
+ if (need_restore) {
+ nvhost_cdma_push_gather(&channel->cdma,
+ channel->dev->nvmap,
+ nvmap_ref_to_handle(channel->cur_ctx->restore),
+ nvhost_opcode_gather(channel->cur_ctx->restore_size),
+ channel->cur_ctx->restore_phys);
+ channel->ctxhandler.get(channel->cur_ctx);
+ }
+
+ /* add a setclass for modules that require it (unless ctxsw added it) */
+ if (!hwctx_to_save && !need_restore && channel->desc->class)
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_setclass(channel->desc->class, 0, 0),
+ NVHOST_OPCODE_NOOP);
+
+ if (job->null_kickoff) {
+ int incr;
+ u32 op_incr;
+
+ /* TODO ideally we'd also perform host waits here */
+
+ /* push increments that correspond to nulled out commands */
+ op_incr = nvhost_opcode_imm(0, 0x100 | job->syncpt_id);
+ for (incr = 0; incr < (user_syncpt_incrs >> 1); incr++)
+ nvhost_cdma_push(&channel->cdma, op_incr, op_incr);
+ if (user_syncpt_incrs & 1)
+ nvhost_cdma_push(&channel->cdma,
+ op_incr, NVHOST_OPCODE_NOOP);
+
+ /* for 3d, waitbase needs to be incremented after each submit */
+ if (channel->desc->class == NV_GRAPHICS_3D_CLASS_ID)
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_setclass(
+ NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE,
+ 1),
+ nvhost_class_host_incr_syncpt_base(
+ NVWAITBASE_3D,
+ user_syncpt_incrs));
+ } else {
+ /* push user gathers */
+ int i = 0;
+ for ( ; i < job->num_gathers; i++) {
+ u32 op1 = nvhost_opcode_gather(job->gathers[i].words);
+ u32 op2 = job->gathers[i].mem;
+ nvhost_cdma_push_gather(&channel->cdma,
+ job->nvmap, job->unpins[i/2],
+ op1, op2);
+ }
+ }
+
+ /* end CDMA submit & stash pinned hMems into sync queue */
+ nvhost_cdma_end(&channel->cdma, job);
+
+ trace_nvhost_channel_submitted(channel->desc->name,
+ syncval - job->syncpt_incrs, syncval);
+
+ /*
+ * schedule a context save interrupt (to drain the host FIFO
+ * if necessary, and to release the restore buffer)
+ */
+ if (hwctx_to_save) {
+ err = nvhost_intr_add_action(&channel->dev->intr,
+ job->syncpt_id,
+ syncval - job->syncpt_incrs
+ + hwctx_to_save->save_thresh,
+ NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
+ ctxsave_waiter,
+ NULL);
+ ctxsave_waiter = NULL;
+ WARN(err, "Failed to set ctx save interrupt");
+ }
+
+ if (need_restore) {
+ BUG_ON(!ctxrestore_waiter);
+ err = nvhost_intr_add_action(&channel->dev->intr,
+ job->syncpt_id,
+ syncval - user_syncpt_incrs,
+ NVHOST_INTR_ACTION_CTXRESTORE, channel->cur_ctx,
+ ctxrestore_waiter,
+ NULL);
+ ctxrestore_waiter = NULL;
+ WARN(err, "Failed to set ctx restore interrupt");
+ }
+
+ /* schedule a submit complete interrupt */
+ err = nvhost_intr_add_action(&channel->dev->intr, job->syncpt_id,
+ syncval,
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel,
+ completed_waiter,
+ NULL);
+ completed_waiter = NULL;
+ WARN(err, "Failed to set submit complete interrupt");
+
+ mutex_unlock(&channel->submitlock);
+
+done:
+ kfree(ctxrestore_waiter);
+ kfree(ctxsave_waiter);
+ kfree(completed_waiter);
+ return err;
+}
+
+int host1x_channel_read_3d_reg(
+ struct nvhost_channel *channel,
+ struct nvhost_hwctx *hwctx,
+ u32 offset,
+ u32 *value)
+{
+ struct nvhost_hwctx *hwctx_to_save = NULL;
+ bool need_restore = false;
+ u32 syncpt_incrs = 4;
+ unsigned int pending = 0;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ void *ctx_waiter, *read_waiter, *completed_waiter;
+ struct nvhost_job *job;
+ u32 syncval;
+ int err;
+
+ if (hwctx && hwctx->has_timedout)
+ return -ETIMEDOUT;
+
+ ctx_waiter = nvhost_intr_alloc_waiter();
+ read_waiter = nvhost_intr_alloc_waiter();
+ completed_waiter = nvhost_intr_alloc_waiter();
+ if (!ctx_waiter || !read_waiter || !completed_waiter) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ job = nvhost_job_alloc(channel, hwctx,
+ NULL,
+ channel->dev->nvmap, 0, 0);
+ if (!job) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* keep module powered */
+ nvhost_module_busy(&channel->mod);
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&channel->submitlock);
+ if (err) {
+ nvhost_module_idle(&channel->mod);
+ return err;
+ }
+
+ /* context switch */
+ if (channel->cur_ctx != hwctx) {
+ hwctx_to_save = channel->cur_ctx;
+ if (hwctx_to_save) {
+ syncpt_incrs += hwctx_to_save->save_incrs;
+ hwctx_to_save->valid = true;
+ channel->ctxhandler.get(hwctx_to_save);
+ }
+ channel->cur_ctx = hwctx;
+ if (channel->cur_ctx && channel->cur_ctx->valid) {
+ need_restore = true;
+ syncpt_incrs += channel->cur_ctx->restore_incrs;
+ }
+ }
+
+ syncval = nvhost_syncpt_incr_max(&channel->dev->syncpt,
+ NVSYNCPT_3D, syncpt_incrs);
+
+ job->syncpt_id = NVSYNCPT_3D;
+ job->syncpt_incrs = syncpt_incrs;
+ job->syncpt_end = syncval;
+
+ /* begin a CDMA submit */
+ nvhost_cdma_begin(&channel->cdma, job);
+
+ /* push save buffer (pre-gather setup depends on unit) */
+ if (hwctx_to_save)
+ channel->ctxhandler.save_push(&channel->cdma, hwctx_to_save);
+
+ /* gather restore buffer */
+ if (need_restore)
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_gather(channel->cur_ctx->restore_size),
+ channel->cur_ctx->restore_phys);
+
+ /* Switch to 3D - wait for it to complete what it was doing */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+ nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, NVSYNCPT_3D));
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1),
+ nvhost_class_host_wait_syncpt_base(NVSYNCPT_3D,
+ NVWAITBASE_3D, 1));
+ /* Tell 3D to send register value to FIFO */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1),
+ nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ offset, false));
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_imm(NV_CLASS_HOST_INDDATA, 0),
+ NVHOST_OPCODE_NOOP);
+ /* Increment syncpt to indicate that FIFO can be read */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE,
+ NVSYNCPT_3D),
+ NVHOST_OPCODE_NOOP);
+ /* Wait for value to be read from FIFO */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1),
+ nvhost_class_host_wait_syncpt_base(NVSYNCPT_3D,
+ NVWAITBASE_3D, 3));
+ /* Indicate submit complete */
+ nvhost_cdma_push(&channel->cdma,
+ nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1),
+ nvhost_class_host_incr_syncpt_base(NVWAITBASE_3D, 4));
+ nvhost_cdma_push(&channel->cdma,
+ NVHOST_OPCODE_NOOP,
+ nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE,
+ NVSYNCPT_3D));
+
+ /* end CDMA submit */
+ nvhost_cdma_end(&channel->cdma, job);
+ nvhost_job_put(job);
+ job = NULL;
+
+ /*
+ * schedule a context save interrupt (to drain the host FIFO
+ * if necessary, and to release the restore buffer)
+ */
+ if (hwctx_to_save) {
+ err = nvhost_intr_add_action(&channel->dev->intr, NVSYNCPT_3D,
+ syncval - syncpt_incrs + hwctx_to_save->save_incrs - 1,
+ NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
+ ctx_waiter,
+ NULL);
+ ctx_waiter = NULL;
+ WARN(err, "Failed to set context save interrupt");
+ }
+
+ /* Wait for FIFO to be ready */
+ err = nvhost_intr_add_action(&channel->dev->intr, NVSYNCPT_3D,
+ syncval - 2,
+ NVHOST_INTR_ACTION_WAKEUP, &wq,
+ read_waiter,
+ &ref);
+ read_waiter = NULL;
+ WARN(err, "Failed to set wakeup interrupt");
+ wait_event(wq,
+ nvhost_syncpt_min_cmp(&channel->dev->syncpt,
+ NVSYNCPT_3D, syncval - 2));
+ nvhost_intr_put_ref(&channel->dev->intr, ref);
+
+ /* Read the register value from FIFO */
+ err = host1x_drain_read_fifo(channel->aperture,
+ value, 1, &pending);
+
+ /* Indicate we've read the value */
+ nvhost_syncpt_cpu_incr(&channel->dev->syncpt, NVSYNCPT_3D);
+
+ /* Schedule a submit complete interrupt */
+ err = nvhost_intr_add_action(&channel->dev->intr, NVSYNCPT_3D, syncval,
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel,
+ completed_waiter, NULL);
+ completed_waiter = NULL;
+ WARN(err, "Failed to set submit complete interrupt");
+
+ mutex_unlock(&channel->submitlock);
+
+done:
+ kfree(ctx_waiter);
+ kfree(read_waiter);
+ kfree(completed_waiter);
+ return err;
+}
+
+
+int host1x_drain_read_fifo(void __iomem *chan_regs,
+ u32 *ptr, unsigned int count, unsigned int *pending)
+{
+ unsigned int entries = *pending;
+ unsigned long timeout = jiffies + NV_FIFO_READ_TIMEOUT;
+ while (count) {
+ unsigned int num;
+
+ while (!entries && time_before(jiffies, timeout)) {
+ /* query host for number of entries in fifo */
+ entries = HOST1X_VAL(CHANNEL_FIFOSTAT, OUTFENTRIES,
+ readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT));
+ if (!entries)
+ cpu_relax();
+ }
+
+ /* timeout -> return error */
+ if (!entries)
+ return -EIO;
+
+ num = min(entries, count);
+ entries -= num;
+ count -= num;
+
+ while (num & ~0x3) {
+ u32 arr[4];
+ arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ memcpy(ptr, arr, 4*sizeof(u32));
+ ptr += 4;
+ num -= 4;
+ }
+ while (num--)
+ *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ }
+ *pending = entries;
+
+ return 0;
+}
+
+int host1x_save_context(struct nvhost_module *mod, u32 syncpt_id)
+{
+ struct nvhost_channel *ch =
+ container_of(mod, struct nvhost_channel, mod);
+ struct nvhost_hwctx *hwctx_to_save;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ u32 syncpt_incrs, syncpt_val;
+ int err = 0;
+ void *ref;
+ void *ctx_waiter = NULL, *wakeup_waiter = NULL;
+ struct nvhost_job *job;
+
+ ctx_waiter = nvhost_intr_alloc_waiter();
+ wakeup_waiter = nvhost_intr_alloc_waiter();
+ if (!ctx_waiter || !wakeup_waiter) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ if (mod->desc->busy)
+ mod->desc->busy(mod);
+
+ mutex_lock(&ch->submitlock);
+ hwctx_to_save = ch->cur_ctx;
+ if (!hwctx_to_save) {
+ mutex_unlock(&ch->submitlock);
+ goto done;
+ }
+
+ job = nvhost_job_alloc(ch, hwctx_to_save,
+ NULL,
+ ch->dev->nvmap, 0, 0);
+ if (IS_ERR_OR_NULL(job)) {
+ err = PTR_ERR(job);
+ mutex_unlock(&ch->submitlock);
+ goto done;
+ }
+
+ hwctx_to_save->valid = true;
+ ch->ctxhandler.get(hwctx_to_save);
+ ch->cur_ctx = NULL;
+
+ syncpt_incrs = hwctx_to_save->save_incrs;
+ syncpt_val = nvhost_syncpt_incr_max(&ch->dev->syncpt,
+ syncpt_id, syncpt_incrs);
+
+ job->syncpt_id = syncpt_id;
+ job->syncpt_incrs = syncpt_incrs;
+ job->syncpt_end = syncpt_val;
+
+ err = nvhost_cdma_begin(&ch->cdma, job);
+ if (err) {
+ mutex_unlock(&ch->submitlock);
+ goto done;
+ }
+
+ ch->ctxhandler.save_push(&ch->cdma, hwctx_to_save);
+ nvhost_cdma_end(&ch->cdma, job);
+ nvhost_job_put(job);
+ job = NULL;
+
+ err = nvhost_intr_add_action(&ch->dev->intr, syncpt_id,
+ syncpt_val - syncpt_incrs + hwctx_to_save->save_thresh,
+ NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
+ ctx_waiter,
+ NULL);
+ ctx_waiter = NULL;
+ WARN(err, "Failed to set context save interrupt");
+
+ err = nvhost_intr_add_action(&ch->dev->intr, syncpt_id, syncpt_val,
+ NVHOST_INTR_ACTION_WAKEUP, &wq,
+ wakeup_waiter,
+ &ref);
+ wakeup_waiter = NULL;
+ WARN(err, "Failed to set wakeup interrupt");
+ wait_event(wq,
+ nvhost_syncpt_min_cmp(&ch->dev->syncpt,
+ syncpt_id, syncpt_val));
+
+ nvhost_intr_put_ref(&ch->dev->intr, ref);
+
+ nvhost_cdma_update(&ch->cdma);
+
+ mutex_unlock(&ch->submitlock);
+
+done:
+ kfree(ctx_waiter);
+ kfree(wakeup_waiter);
+ return err;
+}
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.h b/drivers/video/tegra/host/host1x/host1x_channel.h
new file mode 100644
index 000000000000..3c4cf4f9cbcd
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_channel.h
@@ -0,0 +1,46 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_channel.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HOST1X_CHANNEL_H
+#define __NVHOST_HOST1X_CHANNEL_H
+
+struct nvhost_job;
+struct nvhost_channel;
+struct nvhost_hwctx;
+
+/* Submit job to a host1x client */
+int host1x_channel_submit(struct nvhost_job *job);
+
+/* Read 3d register via FIFO */
+int host1x_channel_read_3d_reg(
+ struct nvhost_channel *channel,
+ struct nvhost_hwctx *hwctx,
+ u32 offset,
+ u32 *value);
+
+/* Reads words from FIFO */
+int host1x_drain_read_fifo(void __iomem *chan_regs,
+ u32 *ptr, unsigned int count, unsigned int *pending);
+
+int host1x_save_context(struct nvhost_module *mod, u32 syncpt_id);
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_cpuaccess.c b/drivers/video/tegra/host/host1x/host1x_cpuaccess.c
new file mode 100644
index 000000000000..927f4ca85bdc
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_cpuaccess.c
@@ -0,0 +1,54 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_cpuaccess.c
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cpuaccess.h"
+#include "dev.h"
+#include "host1x_hardware.h"
+
+static int t20_cpuaccess_mutex_try_lock(struct nvhost_cpuaccess *ctx,
+ unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ /* mlock registers returns 0 when the lock is aquired.
+ * writing 0 clears the lock. */
+ return !!readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+}
+
+static void t20_cpuaccess_mutex_unlock(struct nvhost_cpuaccess *ctx,
+ unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+
+ writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+}
+
+int nvhost_init_t20_cpuaccess_support(struct nvhost_master *host)
+{
+ host->nb_modules = NVHOST_MODULE_NUM;
+
+ host->op.cpuaccess.mutex_try_lock = t20_cpuaccess_mutex_try_lock;
+ host->op.cpuaccess.mutex_unlock = t20_cpuaccess_mutex_unlock;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/host/host1x/host1x_debug.c b/drivers/video/tegra/host/host1x/host1x_debug.c
new file mode 100644
index 000000000000..f47a2a5c275f
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_debug.c
@@ -0,0 +1,404 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_debug.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "nvhost_cdma.h"
+#include "../../nvmap/nvmap.h"
+
+#include "host1x_hardware.h"
+#include "host1x_cdma.h"
+
+#define NVHOST_DEBUG_MAX_PAGE_OFFSET 102400
+
+enum {
+ NVHOST_DBG_STATE_CMD = 0,
+ NVHOST_DBG_STATE_DATA = 1,
+ NVHOST_DBG_STATE_GATHER = 2
+};
+
+static int show_channel_command(struct output *o, u32 addr, u32 val, int *count)
+{
+ unsigned mask;
+ unsigned subop;
+
+ switch (val >> 28) {
+ case 0x0:
+ mask = val & 0x3f;
+ if (mask) {
+ nvhost_debug_output(o,
+ "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+ val >> 6 & 0x3ff, val >> 16 & 0xfff, mask);
+ *count = hweight8(mask);
+ return NVHOST_DBG_STATE_DATA;
+ } else {
+ nvhost_debug_output(o, "SETCL(class=%03x)\n",
+ val >> 6 & 0x3ff);
+ return NVHOST_DBG_STATE_CMD;
+ }
+
+ case 0x1:
+ nvhost_debug_output(o, "INCR(offset=%03x, [",
+ val >> 16 & 0xfff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x2:
+ nvhost_debug_output(o, "NONINCR(offset=%03x, [",
+ val >> 16 & 0xfff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x3:
+ mask = val & 0xffff;
+ nvhost_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
+ val >> 16 & 0xfff, mask);
+ *count = hweight16(mask);
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x4:
+ nvhost_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
+ val >> 16 & 0xfff, val & 0xffff);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x5:
+ nvhost_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x6:
+ nvhost_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+ val >> 16 & 0xfff, val >> 15 & 0x1, val >> 14 & 0x1,
+ val & 0x3fff);
+ *count = val & 0x3fff; /* TODO: insert */
+ return NVHOST_DBG_STATE_GATHER;
+
+ case 0xe:
+ subop = val >> 24 & 0xf;
+ if (subop == 0)
+ nvhost_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
+ val & 0xff);
+ else if (subop == 1)
+ nvhost_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
+ val & 0xff);
+ else
+ nvhost_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
+ return NVHOST_DBG_STATE_CMD;
+
+ default:
+ return NVHOST_DBG_STATE_CMD;
+ }
+}
+
+static void show_channel_gather(struct output *o, u32 addr,
+ phys_addr_t phys_addr, u32 words, struct nvhost_cdma *cdma);
+
+static void show_channel_word(struct output *o, int *state, int *count,
+ u32 addr, u32 val, struct nvhost_cdma *cdma)
+{
+ static int start_count, dont_print;
+
+ switch (*state) {
+ case NVHOST_DBG_STATE_CMD:
+ if (addr)
+ nvhost_debug_output(o, "%08x: %08x:", addr, val);
+ else
+ nvhost_debug_output(o, "%08x:", val);
+
+ *state = show_channel_command(o, addr, val, count);
+ dont_print = 0;
+ start_count = *count;
+ if (*state == NVHOST_DBG_STATE_DATA && *count == 0) {
+ *state = NVHOST_DBG_STATE_CMD;
+ nvhost_debug_output(o, "])\n");
+ }
+ break;
+
+ case NVHOST_DBG_STATE_DATA:
+ (*count)--;
+ if (start_count - *count < 64)
+ nvhost_debug_output(o, "%08x%s",
+ val, *count > 0 ? ", " : "])\n");
+ else if (!dont_print && (*count > 0)) {
+ nvhost_debug_output(o, "[truncated; %d more words]\n",
+ *count);
+ dont_print = 1;
+ }
+ if (*count == 0)
+ *state = NVHOST_DBG_STATE_CMD;
+ break;
+
+ case NVHOST_DBG_STATE_GATHER:
+ *state = NVHOST_DBG_STATE_CMD;
+ nvhost_debug_output(o, "%08x]):\n", val);
+ if (cdma) {
+ show_channel_gather(o, addr, val,
+ *count, cdma);
+ }
+ break;
+ }
+}
+
+static void show_channel_gather(struct output *o, u32 addr,
+ phys_addr_t phys_addr,
+ u32 words, struct nvhost_cdma *cdma)
+{
+#if defined(CONFIG_TEGRA_NVMAP)
+ /* Map dmaget cursor to corresponding nvmap_handle */
+ struct push_buffer *pb = &cdma->push_buffer;
+ u32 cur = addr - pb->phys;
+ struct nvmap_client_handle *nvmap = &pb->nvmap[cur/8];
+ struct nvmap_handle_ref ref;
+ u32 *map_addr, offset;
+ phys_addr_t pin_addr;
+ int state, count, i;
+
+ if ((u32)nvmap->handle == NVHOST_CDMA_PUSH_GATHER_CTXSAVE) {
+ nvhost_debug_output(o, "[context save]\n");
+ return;
+ }
+
+ if (!nvmap->handle || !nvmap->client
+ || atomic_read(&nvmap->handle->ref) < 1) {
+ nvhost_debug_output(o, "[already deallocated]\n");
+ return;
+ }
+
+ /* Create a fake nvmap_handle_ref - nvmap requires it
+ * but accesses only the first field - nvmap_handle */
+ ref.handle = nvmap->handle;
+
+ map_addr = nvmap_mmap(&ref);
+ if (!map_addr) {
+ nvhost_debug_output(o, "[could not mmap]\n");
+ return;
+ }
+
+ /* Get base address from nvmap */
+ pin_addr = nvmap_pin(nvmap->client, &ref);
+ if (IS_ERR_VALUE(pin_addr)) {
+ nvhost_debug_output(o, "[couldn't pin]\n");
+ nvmap_munmap(&ref, map_addr);
+ return;
+ }
+
+ offset = phys_addr - pin_addr;
+ /*
+ * Sometimes we're given different hardware address to the same
+ * page - in these cases the offset will get an invalid number and
+ * we just have to bail out.
+ */
+ if (offset > NVHOST_DEBUG_MAX_PAGE_OFFSET) {
+ nvhost_debug_output(o, "[address mismatch]\n");
+ } else {
+ /* GATHER buffer starts always with commands */
+ state = NVHOST_DBG_STATE_CMD;
+ for (i = 0; i < words; i++)
+ show_channel_word(o, &state, &count,
+ phys_addr + i * 4,
+ *(map_addr + offset/4 + i),
+ cdma);
+ }
+ nvmap_unpin(nvmap->client, &ref);
+ nvmap_munmap(&ref, map_addr);
+#endif
+}
+
+static void show_channel_pair(struct output *o, u32 addr,
+ u32 w0, u32 w1, struct nvhost_cdma *cdma)
+{
+ int state = NVHOST_DBG_STATE_CMD;
+ int count;
+
+ show_channel_word(o, &state, &count, addr, w0, cdma);
+ show_channel_word(o, &state, &count, addr+4, w1, cdma);
+}
+
+/**
+ * Retrieve the op pair at a slot offset from a DMA address
+ */
+static void cdma_peek(struct nvhost_cdma *cdma,
+ u32 dmaget, int slot, u32 *out)
+{
+ u32 offset = dmaget - cdma->push_buffer.phys;
+ u32 *p = cdma->push_buffer.mapped;
+
+ offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2;
+ out[0] = p[offset];
+ out[1] = p[offset + 1];
+}
+
+u32 previous_oppair(struct nvhost_cdma *cdma, u32 cur)
+{
+ u32 pb = cdma->push_buffer.phys;
+ u32 prev = cur-8;
+ if (prev < pb)
+ prev += PUSH_BUFFER_SIZE;
+ return prev;
+}
+
+static void t20_debug_show_channel_cdma(struct nvhost_master *m,
+ struct output *o, int chid)
+{
+ struct nvhost_channel *channel = m->channels + chid;
+ struct nvhost_cdma *cdma = &channel->cdma;
+ u32 dmaput, dmaget, dmactrl;
+ u32 cbstat, cbread;
+ u32 val, base, baseval;
+ u32 pbw[2];
+
+ dmaput = readl(channel->aperture + HOST1X_CHANNEL_DMAPUT);
+ dmaget = readl(channel->aperture + HOST1X_CHANNEL_DMAGET);
+ dmactrl = readl(channel->aperture + HOST1X_CHANNEL_DMACTRL);
+ cbread = readl(m->sync_aperture + HOST1X_SYNC_CBREAD_x(chid));
+ cbstat = readl(m->sync_aperture + HOST1X_SYNC_CBSTAT_x(chid));
+
+ nvhost_debug_output(o, "%d-%s (%d): ", chid,
+ channel->mod.name,
+ channel->mod.refcount);
+
+ if (HOST1X_VAL(CHANNEL_DMACTRL, DMASTOP, dmactrl)
+ || !channel->cdma.push_buffer.mapped) {
+ nvhost_debug_output(o, "inactive\n\n");
+ return;
+ }
+
+ switch (cbstat) {
+ case 0x00010008:
+ nvhost_debug_output(o, "waiting on syncpt %d val %d\n",
+ cbread >> 24, cbread & 0xffffff);
+ break;
+
+ case 0x00010009:
+ base = (cbread >> 16) & 0xff;
+ val = readl(m->sync_aperture +
+ HOST1X_SYNC_SYNCPT_BASE_x(base));
+ baseval = HOST1X_VAL(SYNC_SYNCPT_BASE_0, BASE, val);
+ val = cbread & 0xffff;
+ nvhost_debug_output(o, "waiting on syncpt %d val %d "
+ "(base %d = %d; offset = %d)\n",
+ cbread >> 24, baseval + val,
+ base, baseval, val);
+ break;
+
+ default:
+ nvhost_debug_output(o,
+ "active class %02x, offset %04x, val %08x\n",
+ HOST1X_VAL(SYNC_CBSTAT_0, CBCLASS0, cbstat),
+ HOST1X_VAL(SYNC_CBSTAT_0, CBOFFSET0, cbstat),
+ cbread);
+ break;
+ }
+
+ nvhost_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ dmaput, dmaget, dmactrl);
+ nvhost_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+
+ cdma_peek(cdma, dmaget, -1, pbw);
+ show_channel_pair(o, previous_oppair(cdma, dmaget),
+ pbw[0], pbw[1], &channel->cdma);
+ nvhost_debug_output(o, "\n");
+}
+
+void t20_debug_show_channel_fifo(struct nvhost_master *m,
+ struct output *o, int chid)
+{
+ u32 val, rd_ptr, wr_ptr, start, end;
+ struct nvhost_channel *channel = m->channels + chid;
+ int state, count;
+
+ nvhost_debug_output(o, "%d: fifo:\n", chid);
+
+ val = readl(channel->aperture + HOST1X_CHANNEL_FIFOSTAT);
+ nvhost_debug_output(o, "FIFOSTAT %08x\n", val);
+ if (HOST1X_VAL(CHANNEL_FIFOSTAT, CFEMPTY, val)) {
+ nvhost_debug_output(o, "[empty]\n");
+ return;
+ }
+
+ writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ writel(HOST1X_CREATE(SYNC_CFPEEK_CTRL, ENA, 1)
+ | HOST1X_CREATE(SYNC_CFPEEK_CTRL, CHANNR, chid),
+ m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
+
+ val = readl(m->sync_aperture + HOST1X_SYNC_CFPEEK_PTRS);
+ rd_ptr = HOST1X_VAL(SYNC_CFPEEK_PTRS, CF_RD_PTR, val);
+ wr_ptr = HOST1X_VAL(SYNC_CFPEEK_PTRS, CF_WR_PTR, val);
+
+ val = readl(m->sync_aperture + HOST1X_SYNC_CFx_SETUP(chid));
+ start = HOST1X_VAL(SYNC_CF0_SETUP, BASE, val);
+ end = HOST1X_VAL(SYNC_CF0_SETUP, LIMIT, val);
+
+ state = NVHOST_DBG_STATE_CMD;
+
+ do {
+ writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ writel(HOST1X_CREATE(SYNC_CFPEEK_CTRL, ENA, 1)
+ | HOST1X_CREATE(SYNC_CFPEEK_CTRL, CHANNR, chid)
+ | HOST1X_CREATE(SYNC_CFPEEK_CTRL, ADDR, rd_ptr),
+ m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ val = readl(m->sync_aperture + HOST1X_SYNC_CFPEEK_READ);
+
+ show_channel_word(o, &state, &count, 0, val, NULL);
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+ } while (rd_ptr != wr_ptr);
+
+ if (state == NVHOST_DBG_STATE_DATA)
+ nvhost_debug_output(o, ", ...])\n");
+ nvhost_debug_output(o, "\n");
+
+ writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
+}
+
+static void t20_debug_show_mlocks(struct nvhost_master *m, struct output *o)
+{
+ u32 __iomem *mlo_regs = m->sync_aperture + HOST1X_SYNC_MLOCK_OWNER_0;
+ int i;
+
+ nvhost_debug_output(o, "---- mlocks ----\n");
+ for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) {
+ u32 owner = readl(mlo_regs + i);
+ if (HOST1X_VAL(SYNC_MLOCK_OWNER_0, CH_OWNS, owner))
+ nvhost_debug_output(o, "%d: locked by channel %d\n",
+ i, HOST1X_VAL(SYNC_MLOCK_OWNER_0, CHID, owner));
+ else if (HOST1X_VAL(SYNC_MLOCK_OWNER_0, CPU_OWNS, owner))
+ nvhost_debug_output(o, "%d: locked by cpu\n", i);
+ else
+ nvhost_debug_output(o, "%d: unlocked\n", i);
+ }
+ nvhost_debug_output(o, "\n");
+}
+
+int nvhost_init_t20_debug_support(struct nvhost_master *host)
+{
+ host->op.debug.show_channel_cdma = t20_debug_show_channel_cdma;
+ host->op.debug.show_channel_fifo = t20_debug_show_channel_fifo;
+ host->op.debug.show_mlocks = t20_debug_show_mlocks;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/host/host1x/host1x_hardware.h b/drivers/video/tegra/host/host1x/host1x_hardware.h
new file mode 100644
index 000000000000..da0cf18b6a2e
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_hardware.h
@@ -0,0 +1,276 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_hardware.h
+ *
+ * Tegra Graphics Host Register Offsets
+ *
+ * Copyright (c) 2010,2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HOST1X_HOST1X_HARDWARE_H
+#define __NVHOST_HOST1X_HOST1X_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+/* class ids */
+enum {
+ NV_HOST1X_CLASS_ID = 0x1,
+ NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
+ NV_GRAPHICS_3D_CLASS_ID = 0x60
+};
+
+
+/* channel registers */
+#define NV_HOST1X_CHANNELS 8
+#define NV_HOST1X_CHANNEL0_BASE 0
+#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
+#define NV_HOST1X_SYNC_MLOCK_NUM 16
+
+#define HOST1X_VAL(reg, field, regdata) \
+ ((regdata >> HOST1X_##reg##_##field##_SHIFT) \
+ & HOST1X_##reg##_##field##_MASK)
+#define HOST1X_CREATE(reg, field, data) \
+ ((data & HOST1X_##reg##_##field##_MASK) \
+ << HOST1X_##reg##_##field##_SHIFT) \
+
+#define HOST1X_CHANNEL_FIFOSTAT 0x00
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_SHIFT 10
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_MASK 0x1
+#define HOST1X_CHANNEL_FIFOSTAT_OUTFENTRIES_SHIFT 24
+#define HOST1X_CHANNEL_FIFOSTAT_OUTFENTRIES_MASK 0x1f
+#define HOST1X_CHANNEL_INDDATA 0x0c
+#define HOST1X_CHANNEL_DMASTART 0x14
+#define HOST1X_CHANNEL_DMAPUT 0x18
+#define HOST1X_CHANNEL_DMAGET 0x1c
+#define HOST1X_CHANNEL_DMAEND 0x20
+#define HOST1X_CHANNEL_DMACTRL 0x24
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_SHIFT 0
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_MASK 0x1
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST_SHIFT 1
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST_MASK 0x1
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET_SHIFT 2
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET_MASK 0x1
+
+#define HOST1X_CHANNEL_SYNC_REG_BASE 0x3000
+
+#define HOST1X_SYNC_INTMASK 0x4
+#define HOST1X_SYNC_INTC0MASK 0x8
+#define HOST1X_SYNC_HINTSTATUS 0x20
+#define HOST1X_SYNC_HINTMASK 0x24
+#define HOST1X_SYNC_HINTSTATUS_EXT 0x28
+#define HOST1X_SYNC_HINTSTATUS_EXT_IP_READ_INT_SHIFT 30
+#define HOST1X_SYNC_HINTSTATUS_EXT_IP_READ_INT_MASK 0x1
+#define HOST1X_SYNC_HINTSTATUS_EXT_IP_WRITE_INT_SHIFT 31
+#define HOST1X_SYNC_HINTSTATUS_EXT_IP_WRITE_INT_MASK 0x1
+#define HOST1X_SYNC_HINTMASK_EXT 0x2c
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS 0x40
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS 0x48
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE 0x60
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 0x68
+#define HOST1X_SYNC_CF0_SETUP 0x80
+#define HOST1X_SYNC_CF0_SETUP_BASE_SHIFT 0
+#define HOST1X_SYNC_CF0_SETUP_BASE_MASK 0x1ff
+#define HOST1X_SYNC_CF0_SETUP_LIMIT_SHIFT 16
+#define HOST1X_SYNC_CF0_SETUP_LIMIT_MASK 0x1ff
+#define HOST1X_SYNC_CFx_SETUP(x) (HOST1X_SYNC_CF0_SETUP + (4 * (x)))
+
+#define HOST1X_SYNC_CMDPROC_STOP 0xac
+#define HOST1X_SYNC_CH_TEARDOWN 0xb0
+#define HOST1X_SYNC_USEC_CLK 0x1a4
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG 0x1a8
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT 0x1bc
+#define HOST1X_SYNC_IP_READ_TIMEOUT_ADDR 0x1c0
+#define HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR 0x1c4
+#define HOST1X_SYNC_MLOCK_0 0x2c0
+#define HOST1X_SYNC_MLOCK_OWNER_0 0x340
+#define HOST1X_SYNC_MLOCK_OWNER_0_CHID_SHIFT 8
+#define HOST1X_SYNC_MLOCK_OWNER_0_CHID_MASK 0xf
+#define HOST1X_SYNC_MLOCK_OWNER_0_CPU_OWNS_SHIFT 1
+#define HOST1X_SYNC_MLOCK_OWNER_0_CPU_OWNS_MASK 0x1
+#define HOST1X_SYNC_MLOCK_OWNER_0_CH_OWNS_SHIFT 0
+#define HOST1X_SYNC_MLOCK_OWNER_0_CH_OWNS_MASK 0x1
+#define HOST1X_SYNC_SYNCPT_0 0x400
+#define HOST1X_SYNC_SYNCPT_INT_THRESH_0 0x500
+
+#define HOST1X_SYNC_SYNCPT_BASE_0 0x600
+#define HOST1X_SYNC_SYNCPT_BASE_0_BASE_SHIFT 0
+#define HOST1X_SYNC_SYNCPT_BASE_0_BASE_MASK 0xffff
+#define HOST1X_SYNC_SYNCPT_BASE_x(x) (HOST1X_SYNC_SYNCPT_BASE_0 + (4 * (x)))
+
+#define HOST1X_SYNC_SYNCPT_CPU_INCR 0x700
+
+#define HOST1X_SYNC_CBREAD_0 0x720
+#define HOST1X_SYNC_CBREAD_x(x) (HOST1X_SYNC_CBREAD_0 + (4 * (x)))
+#define HOST1X_SYNC_CFPEEK_CTRL 0x74c
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_SHIFT 0
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_MASK 0x1ff
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_SHIFT 16
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_MASK 0x7
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_SHIFT 31
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_MASK 0x1
+#define HOST1X_SYNC_CFPEEK_READ 0x750
+#define HOST1X_SYNC_CFPEEK_PTRS 0x754
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_SHIFT 0
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_MASK 0x1ff
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_SHIFT 16
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_MASK 0x1ff
+#define HOST1X_SYNC_CBSTAT_0 0x758
+#define HOST1X_SYNC_CBSTAT_0_CBOFFSET0_SHIFT 0
+#define HOST1X_SYNC_CBSTAT_0_CBOFFSET0_MASK 0xffff
+#define HOST1X_SYNC_CBSTAT_0_CBCLASS0_SHIFT 16
+#define HOST1X_SYNC_CBSTAT_0_CBCLASS0_MASK 0xffff
+#define HOST1X_SYNC_CBSTAT_x(x) (HOST1X_SYNC_CBSTAT_0 + (4 * (x)))
+
+/* sync registers */
+#define NV_HOST1X_SYNCPT_NB_PTS 32
+#define NV_HOST1X_SYNCPT_NB_BASES 8
+#define NV_HOST1X_NB_MLOCKS 16
+
+/* host class methods */
+enum {
+ NV_CLASS_HOST_INCR_SYNCPT = 0x0,
+ NV_CLASS_HOST_WAIT_SYNCPT = 0x8,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9,
+ NV_CLASS_HOST_LOAD_SYNCPT_BASE = 0xb,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc,
+ NV_CLASS_HOST_INDOFF = 0x2d,
+ NV_CLASS_HOST_INDDATA = 0x2e
+};
+/* sync point conditionals */
+enum {
+ NV_SYNCPT_IMMEDIATE = 0x0,
+ NV_SYNCPT_OP_DONE = 0x1,
+ NV_SYNCPT_RD_DONE = 0x2,
+ NV_SYNCPT_REG_WR_SAFE = 0x3,
+};
+
+static inline u32 nvhost_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return (indx << 24) | (threshold & 0xffffff);
+}
+
+static inline u32 nvhost_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return (indx << 24) | (threshold & 0xffffff);
+}
+
+static inline u32 nvhost_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return (indx << 24) | (base_indx << 16) | offset;
+}
+
+static inline u32 nvhost_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return (base_indx << 24) | offset;
+}
+
+static inline u32 nvhost_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return (cond << 8) | indx;
+}
+
+enum {
+ NV_HOST_MODULE_HOST1X = 0,
+ NV_HOST_MODULE_MPE = 1,
+ NV_HOST_MODULE_GR3D = 6
+};
+
+static inline u32 nvhost_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2);
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+static inline u32 nvhost_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (mod_id << 18) | (offset << 2) | 1;
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 nvhost_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 nvhost_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return nvhost_opcode_imm(NV_CLASS_HOST_INCR_SYNCPT,
+ nvhost_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 nvhost_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 nvhost_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
+
+static inline u32 nvhost_mask2(unsigned x, unsigned y)
+{
+ return 1 | (1 << (y - x));
+}
+
+#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_intr.c b/drivers/video/tegra/host/host1x/host1x_intr.c
new file mode 100644
index 000000000000..a2a78d79c704
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_intr.c
@@ -0,0 +1,220 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "nvhost_intr.h"
+#include "dev.h"
+#include "host1x_hardware.h"
+
+
+/*** HW host sync management ***/
+
+static void t20_intr_init_host_sync(struct nvhost_intr *intr)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ /* disable the ip_busy_timeout. this prevents write drops, etc.
+ * there's no real way to recover from a hung client anyway.
+ */
+ writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+ /* increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on ap20.
+ */
+ writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+}
+
+static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ /* write microsecond clock register */
+ writel(cpm, sync_regs + HOST1X_SYNC_USEC_CLK);
+}
+
+static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr,
+ u32 id, u32 thresh)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ thresh &= 0xffff;
+ writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
+}
+
+static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
+}
+
+static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr)
+{
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+ /* disable interrupts for both cpu's */
+ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+
+ /* clear status for both cpu's */
+ writel(0xffffffffu, sync_regs +
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+ writel(0xffffffffu, sync_regs +
+ HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS);
+}
+
+/**
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+irqreturn_t t20_intr_syncpt_thresh_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
+
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * Host general interrupt service function
+ * Handles read / write failures
+ */
+static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr *intr = dev_id;
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ u32 stat;
+ u32 ext_stat;
+ u32 addr;
+
+ stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
+ ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+
+ if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_READ_INT, ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
+ pr_err("Host read timeout at address %x\n", addr);
+ }
+
+ if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_WRITE_INT, ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
+ pr_err("Host write timeout at address %x\n", addr);
+ }
+
+ writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+ writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
+
+ return IRQ_HANDLED;
+}
+static int t20_intr_request_host_general_irq(struct nvhost_intr *intr)
+{
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ int err;
+
+ if (intr->host_general_irq_requested)
+ return 0;
+
+ /* master disable for general (not syncpt) host interrupts */
+ writel(0, sync_regs + HOST1X_SYNC_INTMASK);
+
+ /* clear status & extstatus */
+ writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+ writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS);
+
+ err = request_irq(intr->host_general_irq, t20_intr_host1x_isr, 0,
+ "host_status", intr);
+ if (err)
+ return err;
+
+ /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
+ writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
+
+ /* enable extra interrupt sources */
+ writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
+
+ /* enable host module interrupt to CPU0 */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
+
+ /* master enable for general (not syncpt) host interrupts */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
+
+ intr->host_general_irq_requested = true;
+
+ return err;
+}
+
+static void t20_intr_free_host_general_irq(struct nvhost_intr *intr)
+{
+ if (intr->host_general_irq_requested) {
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ /* master disable for general (not syncpt) host interrupts */
+ writel(0, sync_regs + HOST1X_SYNC_INTMASK);
+
+ free_irq(intr->host_general_irq, intr);
+ intr->host_general_irq_requested = false;
+ }
+}
+
+static int t20_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ int err;
+ if (syncpt->irq_requested)
+ return 0;
+
+ err = request_threaded_irq(syncpt->irq,
+ t20_intr_syncpt_thresh_isr,
+ nvhost_syncpt_thresh_fn,
+ 0, syncpt->thresh_irq_name, syncpt);
+ if (err)
+ return err;
+
+ syncpt->irq_requested = 1;
+ return 0;
+}
+
+int nvhost_init_t20_intr_support(struct nvhost_master *host)
+{
+ host->op.intr.init_host_sync = t20_intr_init_host_sync;
+ host->op.intr.set_host_clocks_per_usec =
+ t20_intr_set_host_clocks_per_usec;
+ host->op.intr.set_syncpt_threshold = t20_intr_set_syncpt_threshold;
+ host->op.intr.enable_syncpt_intr = t20_intr_enable_syncpt_intr;
+ host->op.intr.disable_all_syncpt_intrs =
+ t20_intr_disable_all_syncpt_intrs;
+ host->op.intr.request_host_general_irq =
+ t20_intr_request_host_general_irq;
+ host->op.intr.free_host_general_irq =
+ t20_intr_free_host_general_irq;
+ host->op.intr.request_syncpt_irq =
+ t20_request_syncpt_irq;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.c b/drivers/video/tegra/host/host1x/host1x_syncpt.c
new file mode 100644
index 000000000000..ecfa08c31e94
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_syncpt.c
@@ -0,0 +1,244 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints for HOST1X
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include "nvhost_syncpt.h"
+#include "dev.h"
+#include "host1x_syncpt.h"
+#include "host1x_hardware.h"
+
+/**
+ * Write the current syncpoint value back to hw.
+ */
+static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ int min = nvhost_syncpt_read_min(sp, id);
+ writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+}
+
+/**
+ * Write the current waitbase value back to hw.
+ */
+static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ writel(sp->base_val[id],
+ dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Read waitbase value from hw.
+ */
+static void t20_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ sp->base_val[id] = readl(dev->sync_aperture +
+ (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Updates the last value read from hardware.
+ * (was nvhost_syncpt_update_min)
+ */
+static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 old, live;
+
+ do {
+ old = nvhost_syncpt_read_min(sp, id);
+ live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+ } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
+
+ if (!nvhost_syncpt_check_max(sp, id, live)) {
+ dev_err(&syncpt_to_dev(sp)->pdev->dev,
+ "%s failed: id=%u\n",
+ __func__,
+ id);
+ nvhost_debug_dump(syncpt_to_dev(sp));
+ BUG();
+ }
+ return live;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ BUG_ON(!nvhost_module_powered(&dev->mod));
+ if (!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)) {
+ dev_err(&syncpt_to_dev(sp)->pdev->dev,
+ "Syncpoint id %d\n",
+ id);
+ nvhost_debug_dump(syncpt_to_dev(sp));
+ BUG();
+ }
+ writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
+ wmb();
+}
+
+/* returns true, if a <= b < c using wrapping comparison */
+static inline bool nvhost_syncpt_is_between(u32 a, u32 b, u32 c)
+{
+ return b-a < c-a;
+}
+
+/* returns true, if syncpt >= threshold (mod 1 << 32) */
+static bool nvhost_syncpt_wrapping_comparison(u32 syncpt, u32 threshold)
+{
+ return nvhost_syncpt_is_between(threshold, syncpt,
+ (1UL<<31UL)+threshold);
+}
+
+/* check for old WAITs to be removed (avoiding a wrap) */
+static int t20_syncpt_wait_check(struct nvhost_syncpt *sp,
+ struct nvmap_client *nvmap,
+ u32 waitchk_mask,
+ struct nvhost_waitchk *wait,
+ int num_waitchk)
+{
+ u32 idx;
+ int err = 0;
+
+ /* get current syncpt values */
+ for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) {
+ if (BIT(idx) & waitchk_mask)
+ nvhost_syncpt_update_min(sp, idx);
+ }
+
+ BUG_ON(!wait && !num_waitchk);
+
+ /* compare syncpt vs wait threshold */
+ while (num_waitchk) {
+ u32 syncpt, override;
+
+ BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS);
+
+ syncpt = atomic_read(&sp->min_val[wait->syncpt_id]);
+ if (nvhost_syncpt_wrapping_comparison(syncpt, wait->thresh)) {
+ /*
+ * NULL an already satisfied WAIT_SYNCPT host method,
+ * by patching its args in the command stream. The
+ * method data is changed to reference a reserved
+ * (never given out or incr) NVSYNCPT_GRAPHICS_HOST
+ * syncpt with a matching threshold value of 0, so
+ * is guaranteed to be popped by the host HW.
+ */
+ dev_dbg(&syncpt_to_dev(sp)->pdev->dev,
+ "drop WAIT id %d (%s) thresh 0x%x, syncpt 0x%x\n",
+ wait->syncpt_id,
+ syncpt_op(sp).name(sp, wait->syncpt_id),
+ wait->thresh, syncpt);
+
+ /* patch the wait */
+ override = nvhost_class_host_wait_syncpt(
+ NVSYNCPT_GRAPHICS_HOST, 0);
+ err = nvmap_patch_word(nvmap,
+ (struct nvmap_handle *)wait->mem,
+ wait->offset, override);
+ if (err)
+ break;
+ }
+
+ wait++;
+ num_waitchk--;
+ }
+ return err;
+}
+
+
+static const char *s_syncpt_names[32] = {
+ "gfx_host",
+ "", "", "", "", "", "", "",
+ "disp0_a", "disp1_a", "avp_0",
+ "csi_vi_0", "csi_vi_1",
+ "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4",
+ "2d_0", "2d_1",
+ "disp0_b", "disp1_b",
+ "3d",
+ "mpe",
+ "disp0_c", "disp1_c",
+ "vblank0", "vblank1",
+ "mpe_ebm_eof", "mpe_wr_safe",
+ "2d_tinyblt",
+ "dsi"
+};
+
+static const char *t20_syncpt_name(struct nvhost_syncpt *s, u32 id)
+{
+ BUG_ON(id >= ARRAY_SIZE(s_syncpt_names));
+ return s_syncpt_names[id];
+}
+
+static void t20_syncpt_debug(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ u32 max = nvhost_syncpt_read_max(sp, i);
+ if (!max)
+ continue;
+ dev_info(&syncpt_to_dev(sp)->pdev->dev,
+ "id %d (%s) min %d max %d\n",
+ i, syncpt_op(sp).name(sp, i),
+ nvhost_syncpt_update_min(sp, i), max);
+
+ }
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++) {
+ u32 base_val;
+ t20_syncpt_read_wait_base(sp, i);
+ base_val = sp->base_val[i];
+ if (base_val)
+ dev_info(&syncpt_to_dev(sp)->pdev->dev,
+ "waitbase id %d val %d\n",
+ i, base_val);
+
+ }
+}
+
+int host1x_init_syncpt_support(struct nvhost_master *host)
+{
+
+ host->sync_aperture = host->aperture +
+ (NV_HOST1X_CHANNEL0_BASE +
+ HOST1X_CHANNEL_SYNC_REG_BASE);
+
+ host->op.syncpt.reset = t20_syncpt_reset;
+ host->op.syncpt.reset_wait_base = t20_syncpt_reset_wait_base;
+ host->op.syncpt.read_wait_base = t20_syncpt_read_wait_base;
+ host->op.syncpt.update_min = t20_syncpt_update_min;
+ host->op.syncpt.cpu_incr = t20_syncpt_cpu_incr;
+ host->op.syncpt.wait_check = t20_syncpt_wait_check;
+ host->op.syncpt.debug = t20_syncpt_debug;
+ host->op.syncpt.name = t20_syncpt_name;
+
+ host->syncpt.nb_pts = NV_HOST1X_SYNCPT_NB_PTS;
+ host->syncpt.nb_bases = NV_HOST1X_SYNCPT_NB_BASES;
+ host->syncpt.client_managed = NVSYNCPTS_CLIENT_MANAGED;
+
+ return 0;
+}
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.h b/drivers/video/tegra/host/host1x/host1x_syncpt.h
new file mode 100644
index 000000000000..324ca8b45e49
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_syncpt.h
@@ -0,0 +1,79 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints for HOST1X
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HOST1X_HOST1X_SYNCPT_H
+#define __NVHOST_HOST1X_HOST1X_SYNCPT_H
+
+#define NVSYNCPT_DISP0_A (8)
+#define NVSYNCPT_DISP1_A (9)
+#define NVSYNCPT_AVP_0 (10)
+#define NVSYNCPT_CSI_VI_0 (11)
+#define NVSYNCPT_CSI_VI_1 (12)
+#define NVSYNCPT_VI_ISP_0 (13)
+#define NVSYNCPT_VI_ISP_1 (14)
+#define NVSYNCPT_VI_ISP_2 (15)
+#define NVSYNCPT_VI_ISP_3 (16)
+#define NVSYNCPT_VI_ISP_4 (17)
+#define NVSYNCPT_2D_0 (18)
+#define NVSYNCPT_2D_1 (19)
+#define NVSYNCPT_DISP0_B (20)
+#define NVSYNCPT_DISP1_B (21)
+#define NVSYNCPT_3D (22)
+#define NVSYNCPT_MPE (23)
+#define NVSYNCPT_DISP0_C (24)
+#define NVSYNCPT_DISP1_C (25)
+#define NVSYNCPT_VBLANK0 (26)
+#define NVSYNCPT_VBLANK1 (27)
+#define NVSYNCPT_MPE_EBM_EOF (28)
+#define NVSYNCPT_MPE_WR_SAFE (29)
+#define NVSYNCPT_DSI (31)
+
+
+/*#define NVSYNCPT_2D_CHANNEL2_0 (20) */
+/*#define NVSYNCPT_2D_CHANNEL2_1 (21) */
+/*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/
+/*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/
+
+/* sync points that are wholly managed by the client */
+#define NVSYNCPTS_CLIENT_MANAGED ( \
+ BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | \
+ BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | \
+ BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | \
+ BIT(NVSYNCPT_DSI) | \
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1) | \
+ BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | \
+ BIT(NVSYNCPT_VI_ISP_1) | BIT(NVSYNCPT_VI_ISP_2) | \
+ BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | \
+ BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
+ BIT(NVSYNCPT_2D_1) | BIT(NVSYNCPT_AVP_0))
+
+
+#define NVWAITBASE_2D_0 (1)
+#define NVWAITBASE_2D_1 (2)
+#define NVWAITBASE_3D (3)
+#define NVWAITBASE_MPE (4)
+
+struct nvhost_master;
+int host1x_init_syncpt(struct nvhost_master *host);
+int host1x_init_syncpt_support(struct nvhost_master *host);
+
+#endif
diff --git a/drivers/video/tegra/host/mpe/Makefile b/drivers/video/tegra/host/mpe/Makefile
new file mode 100644
index 000000000000..efd77bb88fe7
--- /dev/null
+++ b/drivers/video/tegra/host/mpe/Makefile
@@ -0,0 +1,7 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-mpe-objs = \
+ mpe.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-mpe.o
diff --git a/drivers/video/tegra/host/mpe/mpe.c b/drivers/video/tegra/host/mpe/mpe.c
new file mode 100644
index 000000000000..3e89e6989e44
--- /dev/null
+++ b/drivers/video/tegra/host/mpe/mpe.c
@@ -0,0 +1,570 @@
+/*
+ * drivers/video/tegra/host/mpe/mpe.c
+ *
+ * Tegra Graphics Host MPE
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_hwctx.h"
+#include "dev.h"
+#include "host1x/host1x_hardware.h"
+#include "host1x/host1x_channel.h"
+#include "host1x/host1x_syncpt.h"
+#include "t20/t20.h"
+#include <linux/slab.h>
+
+enum {
+ HWCTX_REGINFO_NORMAL = 0,
+ HWCTX_REGINFO_STASH,
+ HWCTX_REGINFO_CALCULATE,
+ HWCTX_REGINFO_WRITEBACK
+};
+
+const struct hwctx_reginfo ctxsave_regs_mpe[] = {
+ HWCTX_REGINFO(0x124, 1, STASH),
+ HWCTX_REGINFO(0x123, 1, STASH),
+ HWCTX_REGINFO(0x103, 1, STASH),
+ HWCTX_REGINFO(0x074, 1, STASH),
+ HWCTX_REGINFO(0x021, 1, NORMAL),
+ HWCTX_REGINFO(0x020, 1, STASH),
+ HWCTX_REGINFO(0x024, 2, NORMAL),
+ HWCTX_REGINFO(0x0e6, 1, NORMAL),
+ HWCTX_REGINFO(0x3fc, 1, NORMAL),
+ HWCTX_REGINFO(0x3d0, 1, NORMAL),
+ HWCTX_REGINFO(0x3d4, 1, NORMAL),
+ HWCTX_REGINFO(0x013, 1, NORMAL),
+ HWCTX_REGINFO(0x022, 1, NORMAL),
+ HWCTX_REGINFO(0x030, 4, NORMAL),
+ HWCTX_REGINFO(0x023, 1, NORMAL),
+ HWCTX_REGINFO(0x070, 1, NORMAL),
+ HWCTX_REGINFO(0x0a0, 9, NORMAL),
+ HWCTX_REGINFO(0x071, 1, NORMAL),
+ HWCTX_REGINFO(0x100, 4, NORMAL),
+ HWCTX_REGINFO(0x104, 2, NORMAL),
+ HWCTX_REGINFO(0x108, 9, NORMAL),
+ HWCTX_REGINFO(0x112, 2, NORMAL),
+ HWCTX_REGINFO(0x114, 1, STASH),
+ HWCTX_REGINFO(0x014, 1, NORMAL),
+ HWCTX_REGINFO(0x072, 1, NORMAL),
+ HWCTX_REGINFO(0x200, 1, NORMAL),
+ HWCTX_REGINFO(0x0d1, 1, NORMAL),
+ HWCTX_REGINFO(0x0d0, 1, NORMAL),
+ HWCTX_REGINFO(0x0c0, 1, NORMAL),
+ HWCTX_REGINFO(0x0c3, 2, NORMAL),
+ HWCTX_REGINFO(0x0d2, 1, NORMAL),
+ HWCTX_REGINFO(0x0d8, 1, NORMAL),
+ HWCTX_REGINFO(0x0e0, 2, NORMAL),
+ HWCTX_REGINFO(0x07f, 2, NORMAL),
+ HWCTX_REGINFO(0x084, 8, NORMAL),
+ HWCTX_REGINFO(0x0d3, 1, NORMAL),
+ HWCTX_REGINFO(0x040, 13, NORMAL),
+ HWCTX_REGINFO(0x050, 6, NORMAL),
+ HWCTX_REGINFO(0x058, 1, NORMAL),
+ HWCTX_REGINFO(0x057, 1, NORMAL),
+ HWCTX_REGINFO(0x111, 1, NORMAL),
+ HWCTX_REGINFO(0x130, 3, NORMAL),
+ HWCTX_REGINFO(0x201, 1, NORMAL),
+ HWCTX_REGINFO(0x068, 2, NORMAL),
+ HWCTX_REGINFO(0x08c, 1, NORMAL),
+ HWCTX_REGINFO(0x0cf, 1, NORMAL),
+ HWCTX_REGINFO(0x082, 2, NORMAL),
+ HWCTX_REGINFO(0x075, 1, NORMAL),
+ HWCTX_REGINFO(0x0e8, 1, NORMAL),
+ HWCTX_REGINFO(0x056, 1, NORMAL),
+ HWCTX_REGINFO(0x057, 1, NORMAL),
+ HWCTX_REGINFO(0x073, 1, CALCULATE),
+ HWCTX_REGINFO(0x074, 1, NORMAL),
+ HWCTX_REGINFO(0x075, 1, NORMAL),
+ HWCTX_REGINFO(0x076, 1, STASH),
+ HWCTX_REGINFO(0x11a, 9, NORMAL),
+ HWCTX_REGINFO(0x123, 1, NORMAL),
+ HWCTX_REGINFO(0x124, 1, NORMAL),
+ HWCTX_REGINFO(0x12a, 5, NORMAL),
+ HWCTX_REGINFO(0x12f, 1, STASH),
+ HWCTX_REGINFO(0x125, 2, NORMAL),
+ HWCTX_REGINFO(0x034, 1, NORMAL),
+ HWCTX_REGINFO(0x133, 2, NORMAL),
+ HWCTX_REGINFO(0x127, 1, NORMAL),
+ HWCTX_REGINFO(0x106, 1, WRITEBACK),
+ HWCTX_REGINFO(0x107, 1, WRITEBACK)
+};
+
+#define NR_STASHES 8
+#define NR_WRITEBACKS 2
+
+#define RC_RAM_LOAD_CMD 0x115
+#define RC_RAM_LOAD_DATA 0x116
+#define RC_RAM_READ_CMD 0x128
+#define RC_RAM_READ_DATA 0x129
+#define RC_RAM_SIZE 692
+
+#define IRFR_RAM_LOAD_CMD 0xc5
+#define IRFR_RAM_LOAD_DATA 0xc6
+#define IRFR_RAM_READ_CMD 0xcd
+#define IRFR_RAM_READ_DATA 0xce
+#define IRFR_RAM_SIZE 408
+
+struct mpe_save_info {
+ u32 in[NR_STASHES];
+ u32 out[NR_WRITEBACKS];
+ unsigned in_pos;
+ unsigned out_pos;
+ u32 h264_mode;
+};
+
+
+/*** restore ***/
+
+static unsigned int restore_size;
+
+static void restore_begin(u32 *ptr)
+{
+ /* set class to host */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ /* increment sync point base */
+ ptr[1] = nvhost_class_host_incr_syncpt_base(NVWAITBASE_MPE, 1);
+ /* set class to MPE */
+ ptr[2] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+}
+#define RESTORE_BEGIN_SIZE 3
+
+static void restore_ram(u32 *ptr, unsigned words,
+ unsigned cmd_reg, unsigned data_reg)
+{
+ ptr[0] = nvhost_opcode_imm(cmd_reg, words);
+ ptr[1] = nvhost_opcode_nonincr(data_reg, words);
+}
+#define RESTORE_RAM_SIZE 2
+
+static void restore_end(u32 *ptr)
+{
+ /* syncpt increment to track restore gather. */
+ ptr[0] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE,
+ NVSYNCPT_MPE);
+}
+#define RESTORE_END_SIZE 1
+
+static u32 *setup_restore_regs(u32 *ptr,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ *ptr++ = nvhost_opcode_incr(offset, count);
+ ptr += count;
+ }
+ return ptr;
+}
+
+static u32 *setup_restore_ram(u32 *ptr, unsigned words,
+ unsigned cmd_reg, unsigned data_reg)
+{
+ restore_ram(ptr, words, cmd_reg, data_reg);
+ return ptr + (RESTORE_RAM_SIZE + words);
+}
+
+static void setup_restore(u32 *ptr)
+{
+ restore_begin(ptr);
+ ptr += RESTORE_BEGIN_SIZE;
+
+ ptr = setup_restore_regs(ptr, ctxsave_regs_mpe,
+ ARRAY_SIZE(ctxsave_regs_mpe));
+
+ ptr = setup_restore_ram(ptr, RC_RAM_SIZE,
+ RC_RAM_LOAD_CMD, RC_RAM_LOAD_DATA);
+
+ ptr = setup_restore_ram(ptr, IRFR_RAM_SIZE,
+ IRFR_RAM_LOAD_CMD, IRFR_RAM_LOAD_DATA);
+
+ restore_end(ptr);
+
+ wmb();
+}
+
+
+/*** save ***/
+
+/* the same context save command sequence is used for all contexts. */
+static struct nvmap_handle_ref *save_buf;
+static phys_addr_t save_phys;
+static unsigned int save_size;
+
+struct save_info {
+ u32 *ptr;
+ unsigned int save_count;
+ unsigned int restore_count;
+};
+
+static void __init save_begin(u32 *ptr)
+{
+ /* MPE: when done, increment syncpt to base+1 */
+ ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+ ptr[1] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE,
+ NVSYNCPT_MPE);
+ /* host: wait for syncpt base+1 */
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ ptr[3] = nvhost_class_host_wait_syncpt_base(NVSYNCPT_MPE,
+ NVWAITBASE_MPE, 1);
+ /* host: signal context read thread to start reading */
+ ptr[4] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE,
+ NVSYNCPT_MPE);
+}
+#define SAVE_BEGIN_SIZE 5
+
+static void __init save_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE,
+ start_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_DIRECT_SIZE 3
+
+static void __init save_set_ram_cmd(u32 *ptr, u32 cmd_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ cmd_reg, 1);
+ ptr[1] = count;
+}
+#define SAVE_SET_RAM_CMD_SIZE 2
+
+static void __init save_read_ram_data_nasty(u32 *ptr, u32 data_reg)
+{
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE,
+ data_reg, false);
+ ptr[2] = nvhost_opcode_imm(NV_CLASS_HOST_INDDATA, 0);
+ /* write junk data to avoid 'cached problem with register memory' */
+ ptr[3] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ data_reg, 1);
+ ptr[4] = 0x99;
+}
+#define SAVE_READ_RAM_DATA_NASTY_SIZE 5
+
+static void __init save_end(u32 *ptr)
+{
+ /* Wait for context read service to finish (cpu incr 3) */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ ptr[1] = nvhost_class_host_wait_syncpt_base(NVSYNCPT_MPE,
+ NVWAITBASE_MPE, 3);
+ /* Advance syncpoint base */
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ ptr[3] = nvhost_class_host_incr_syncpt_base(NVWAITBASE_MPE, 3);
+ /* set class back to the unit */
+ ptr[4] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+}
+#define SAVE_END_SIZE 5
+
+static void __init setup_save_regs(struct save_info *info,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+ u32 *ptr = info->ptr;
+ unsigned int save_count = info->save_count;
+ unsigned int restore_count = info->restore_count;
+
+ for ( ; regs != rend; ++regs) {
+ u32 offset = regs->offset;
+ u32 count = regs->count;
+ if (regs->type != HWCTX_REGINFO_WRITEBACK) {
+ if (ptr) {
+ save_direct(ptr, offset, count);
+ ptr += SAVE_DIRECT_SIZE;
+ memset(ptr, 0, count * 4);
+ ptr += count;
+ }
+ save_count += (SAVE_DIRECT_SIZE + count);
+ }
+ restore_count += (1 + count);
+ }
+
+ info->ptr = ptr;
+ info->save_count = save_count;
+ info->restore_count = restore_count;
+}
+
+static void __init setup_save_ram_nasty(struct save_info *info, unsigned words,
+ unsigned cmd_reg, unsigned data_reg)
+{
+ u32 *ptr = info->ptr;
+ unsigned int save_count = info->save_count;
+ unsigned int restore_count = info->restore_count;
+ unsigned i;
+
+ if (ptr) {
+ save_set_ram_cmd(ptr, cmd_reg, words);
+ ptr += SAVE_SET_RAM_CMD_SIZE;
+ for (i = words; i; --i) {
+ save_read_ram_data_nasty(ptr, data_reg);
+ ptr += SAVE_READ_RAM_DATA_NASTY_SIZE;
+ }
+ }
+
+ save_count += SAVE_SET_RAM_CMD_SIZE;
+ save_count += words * SAVE_READ_RAM_DATA_NASTY_SIZE;
+ restore_count += (RESTORE_RAM_SIZE + words);
+
+ info->ptr = ptr;
+ info->save_count = save_count;
+ info->restore_count = restore_count;
+}
+
+static void __init setup_save(u32 *ptr)
+{
+ struct save_info info = {
+ ptr,
+ SAVE_BEGIN_SIZE,
+ RESTORE_BEGIN_SIZE
+ };
+
+ if (info.ptr) {
+ save_begin(info.ptr);
+ info.ptr += SAVE_BEGIN_SIZE;
+ }
+
+ setup_save_regs(&info, ctxsave_regs_mpe,
+ ARRAY_SIZE(ctxsave_regs_mpe));
+
+ setup_save_ram_nasty(&info, RC_RAM_SIZE,
+ RC_RAM_READ_CMD, RC_RAM_READ_DATA);
+
+ setup_save_ram_nasty(&info, IRFR_RAM_SIZE,
+ IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
+
+ if (info.ptr) {
+ save_end(info.ptr);
+ info.ptr += SAVE_END_SIZE;
+ }
+
+ wmb();
+
+ save_size = info.save_count + SAVE_END_SIZE;
+ restore_size = info.restore_count + RESTORE_END_SIZE;
+}
+
+
+static u32 calculate_mpe(u32 word, struct mpe_save_info *msi)
+{
+ u32 buffer_full_read = msi->in[0] & 0x01ffffff;
+ u32 byte_len = msi->in[1];
+ u32 drain = (msi->in[2] >> 2) & 0x007fffff;
+ u32 rep_frame = msi->in[3] & 0x0000ffff;
+ u32 h264_mode = (msi->in[4] >> 11) & 1;
+ int new_buffer_full;
+
+ if (h264_mode)
+ byte_len >>= 3;
+ new_buffer_full = buffer_full_read + byte_len - (drain * 4);
+ msi->out[0] = max(0, new_buffer_full);
+ msi->out[1] = rep_frame;
+ if (rep_frame == 0)
+ word &= 0xffff0000;
+ return word;
+}
+
+static u32 *save_regs(u32 *ptr, unsigned int *pending,
+ struct nvhost_channel *channel,
+ const struct hwctx_reginfo *regs,
+ unsigned int nr_regs,
+ struct mpe_save_info *msi)
+{
+ const struct hwctx_reginfo *rend = regs + nr_regs;
+
+ for ( ; regs != rend; ++regs) {
+ u32 count = regs->count;
+ ++ptr; /* restore incr */
+ if (regs->type == HWCTX_REGINFO_NORMAL) {
+ host1x_drain_read_fifo(channel->aperture,
+ ptr, count, pending);
+ ptr += count;
+ } else {
+ u32 word;
+ if (regs->type == HWCTX_REGINFO_WRITEBACK) {
+ BUG_ON(msi->out_pos >= NR_WRITEBACKS);
+ word = msi->out[msi->out_pos++];
+ } else {
+ host1x_drain_read_fifo(channel->aperture,
+ &word, 1, pending);
+ if (regs->type == HWCTX_REGINFO_STASH) {
+ BUG_ON(msi->in_pos >= NR_STASHES);
+ msi->in[msi->in_pos++] = word;
+ } else {
+ word = calculate_mpe(word, msi);
+ }
+ }
+ *ptr++ = word;
+ }
+ }
+ return ptr;
+}
+
+static u32 *save_ram(u32 *ptr, unsigned int *pending,
+ struct nvhost_channel *channel,
+ unsigned words, unsigned cmd_reg, unsigned data_reg)
+{
+ int err = 0;
+ ptr += RESTORE_RAM_SIZE;
+ err = host1x_drain_read_fifo(channel->aperture, ptr, words, pending);
+ WARN_ON(err);
+ return ptr + words;
+}
+
+
+/*** ctxmpe ***/
+
+static struct nvhost_hwctx *ctxmpe_alloc(struct nvhost_channel *ch)
+{
+ struct nvmap_client *nvmap = ch->dev->nvmap;
+ struct nvhost_hwctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->restore = nvmap_alloc(nvmap, restore_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR_OR_NULL(ctx->restore)) {
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->restore_virt = nvmap_mmap(ctx->restore);
+ if (!ctx->restore_virt) {
+ nvmap_free(nvmap, ctx->restore);
+ kfree(ctx);
+ return NULL;
+ }
+
+ kref_init(&ctx->ref);
+ ctx->channel = ch;
+ ctx->valid = false;
+ ctx->save = save_buf;
+ ctx->save_incrs = 3;
+ ctx->save_thresh = 2;
+ ctx->restore_phys = nvmap_pin(nvmap, ctx->restore);
+ ctx->restore_size = restore_size;
+ ctx->restore_incrs = 1;
+
+ setup_restore(ctx->restore_virt);
+
+ return ctx;
+}
+
+static void ctxmpe_get(struct nvhost_hwctx *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+static void ctxmpe_free(struct kref *ref)
+{
+ struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
+ struct nvmap_client *nvmap = ctx->channel->dev->nvmap;
+
+ if (ctx->restore_virt)
+ nvmap_munmap(ctx->restore, ctx->restore_virt);
+ nvmap_unpin(nvmap, ctx->restore);
+ nvmap_free(nvmap, ctx->restore);
+ kfree(ctx);
+}
+
+static void ctxmpe_put(struct nvhost_hwctx *ctx)
+{
+ kref_put(&ctx->ref, ctxmpe_free);
+}
+
+static void ctxmpe_save_push(struct nvhost_cdma *cdma, struct nvhost_hwctx *ctx)
+{
+ nvhost_cdma_push(cdma,
+ nvhost_opcode_gather(save_size),
+ save_phys);
+}
+
+static void ctxmpe_save_service(struct nvhost_hwctx *ctx)
+{
+ u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
+ unsigned int pending = 0;
+ struct mpe_save_info msi;
+
+ msi.in_pos = 0;
+ msi.out_pos = 0;
+
+ ptr = save_regs(ptr, &pending, ctx->channel,
+ ctxsave_regs_mpe, ARRAY_SIZE(ctxsave_regs_mpe), &msi);
+
+ ptr = save_ram(ptr, &pending, ctx->channel,
+ RC_RAM_SIZE, RC_RAM_READ_CMD, RC_RAM_READ_DATA);
+
+ ptr = save_ram(ptr, &pending, ctx->channel,
+ IRFR_RAM_SIZE, IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
+
+ wmb();
+ nvhost_syncpt_cpu_incr(&ctx->channel->dev->syncpt, NVSYNCPT_MPE);
+}
+
+int __init nvhost_mpe_ctxhandler_init(struct nvhost_hwctx_handler *h)
+{
+ struct nvhost_channel *ch;
+ struct nvmap_client *nvmap;
+ u32 *save_ptr;
+
+ ch = container_of(h, struct nvhost_channel, ctxhandler);
+ nvmap = ch->dev->nvmap;
+
+ setup_save(NULL);
+
+ save_buf = nvmap_alloc(nvmap, save_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(save_buf)) {
+ int err = PTR_ERR(save_buf);
+ save_buf = NULL;
+ return err;
+ }
+
+ save_ptr = nvmap_mmap(save_buf);
+ if (!save_ptr) {
+ nvmap_free(nvmap, save_buf);
+ save_buf = NULL;
+ return -ENOMEM;
+ }
+
+ save_phys = nvmap_pin(nvmap, save_buf);
+
+ setup_save(save_ptr);
+
+ h->alloc = ctxmpe_alloc;
+ h->save_push = ctxmpe_save_push;
+ h->save_service = ctxmpe_save_service;
+ h->get = ctxmpe_get;
+ h->put = ctxmpe_put;
+
+ return 0;
+}
+
+int nvhost_mpe_prepare_power_off(struct nvhost_module *mod)
+{
+ return host1x_save_context(mod, NVSYNCPT_MPE);
+}
diff --git a/drivers/video/tegra/host/mpe/mpe.h b/drivers/video/tegra/host/mpe/mpe.h
new file mode 100644
index 000000000000..dfc7259f1c67
--- /dev/null
+++ b/drivers/video/tegra/host/mpe/mpe.h
@@ -0,0 +1,31 @@
+/*
+ * drivers/video/tegra/host/mpe/mpe.h
+ *
+ * Tegra Graphics Host MPE
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_MPE_MPE_H
+#define __NVHOST_MPE_MPE_H
+
+struct nvhost_hwctx_handler;
+
+int nvhost_mpe_ctxhandler_init(struct nvhost_hwctx_handler *h);
+int nvhost_mpe_prepare_power_off(struct nvhost_module *mod);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_acm.c b/drivers/video/tegra/host/nvhost_acm.c
new file mode 100644
index 000000000000..c3b4085df738
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.c
@@ -0,0 +1,510 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.c
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_acm.h"
+#include "dev.h"
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <mach/powergate.h>
+#include <mach/clk.h>
+#include <mach/hardware.h>
+
+#define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ)
+#define POWERGATE_DELAY 10
+#define MAX_DEVID_LENGTH 16
+
+DEFINE_MUTEX(client_list_lock);
+
+struct nvhost_module_client {
+ struct list_head node;
+ unsigned long rate[NVHOST_MODULE_MAX_CLOCKS];
+ void *priv;
+};
+
+static void do_powergate_locked(int id)
+{
+ if (id != -1 && tegra_powergate_is_powered(id))
+ tegra_powergate_partition(id);
+}
+
+static void do_unpowergate_locked(int id)
+{
+ if (id != -1)
+ tegra_unpowergate_partition(id);
+}
+
+void nvhost_module_reset(struct device *dev, struct nvhost_module *mod)
+{
+ dev_dbg(dev,
+ "%s: asserting %s module reset (id %d, id2 %d)\n",
+ __func__, mod->name,
+ mod->desc->powergate_ids[0], mod->desc->powergate_ids[1]);
+
+ mutex_lock(&mod->lock);
+
+ /* assert module and mc client reset */
+ if (mod->desc->powergate_ids[0] != -1) {
+ tegra_powergate_mc_disable(mod->desc->powergate_ids[0]);
+ tegra_periph_reset_assert(mod->clk[0]);
+ tegra_powergate_mc_flush(mod->desc->powergate_ids[0]);
+ }
+ if (mod->desc->powergate_ids[1] != -1) {
+ tegra_powergate_mc_disable(mod->desc->powergate_ids[1]);
+ tegra_periph_reset_assert(mod->clk[1]);
+ tegra_powergate_mc_flush(mod->desc->powergate_ids[1]);
+ }
+
+ udelay(POWERGATE_DELAY);
+
+ /* deassert reset */
+ if (mod->desc->powergate_ids[0] != -1) {
+ tegra_powergate_mc_flush_done(mod->desc->powergate_ids[0]);
+ tegra_periph_reset_deassert(mod->clk[0]);
+ tegra_powergate_mc_enable(mod->desc->powergate_ids[0]);
+ }
+ if (mod->desc->powergate_ids[1] != -1) {
+ tegra_powergate_mc_flush_done(mod->desc->powergate_ids[1]);
+ tegra_periph_reset_deassert(mod->clk[1]);
+ tegra_powergate_mc_enable(mod->desc->powergate_ids[1]);
+ }
+
+ mutex_unlock(&mod->lock);
+
+ dev_dbg(dev, "%s: module %s out of reset\n",
+ __func__, mod->name);
+}
+
+static void to_state_clockgated_locked(struct nvhost_module *mod)
+{
+ const struct nvhost_moduledesc *desc = mod->desc;
+ if (mod->powerstate == NVHOST_POWER_STATE_RUNNING) {
+ int i;
+ for (i = 0; i < mod->num_clks; i++)
+ clk_disable(mod->clk[i]);
+ if (mod->parent)
+ nvhost_module_idle(mod->parent);
+ } else if (mod->powerstate == NVHOST_POWER_STATE_POWERGATED
+ && mod->desc->can_powergate) {
+ do_unpowergate_locked(desc->powergate_ids[0]);
+ do_unpowergate_locked(desc->powergate_ids[1]);
+ }
+ mod->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
+}
+
+static void to_state_running_locked(struct nvhost_module *mod)
+{
+ int prev_state = mod->powerstate;
+ if (mod->powerstate == NVHOST_POWER_STATE_POWERGATED)
+ to_state_clockgated_locked(mod);
+ if (mod->powerstate == NVHOST_POWER_STATE_CLOCKGATED) {
+ int i;
+
+ if (mod->parent)
+ nvhost_module_busy(mod->parent);
+
+ for (i = 0; i < mod->num_clks; i++) {
+ int err = clk_enable(mod->clk[i]);
+ BUG_ON(err);
+ }
+
+ if (prev_state == NVHOST_POWER_STATE_POWERGATED
+ && mod->desc->finalize_poweron)
+ mod->desc->finalize_poweron(mod);
+ }
+ mod->powerstate = NVHOST_POWER_STATE_RUNNING;
+}
+
+/* This gets called from powergate_handler() and from module suspend.
+ * Module suspend is done for all modules, runtime power gating only
+ * for modules with can_powergate set.
+ */
+static int to_state_powergated_locked(struct nvhost_module *mod)
+{
+ int err = 0;
+
+ if (mod->desc->prepare_poweroff
+ && mod->powerstate != NVHOST_POWER_STATE_POWERGATED) {
+ /* Clock needs to be on in prepare_poweroff */
+ to_state_running_locked(mod);
+ err = mod->desc->prepare_poweroff(mod);
+ if (err)
+ return err;
+ }
+
+ if (mod->powerstate == NVHOST_POWER_STATE_RUNNING)
+ to_state_clockgated_locked(mod);
+
+ if (mod->desc->can_powergate) {
+ do_powergate_locked(mod->desc->powergate_ids[0]);
+ do_powergate_locked(mod->desc->powergate_ids[1]);
+ }
+
+ mod->powerstate = NVHOST_POWER_STATE_POWERGATED;
+ return 0;
+}
+
+static void schedule_powergating_locked(struct nvhost_module *mod)
+{
+ if (mod->desc->can_powergate)
+ schedule_delayed_work(&mod->powerstate_down,
+ msecs_to_jiffies(mod->desc->powergate_delay));
+}
+
+static void schedule_clockgating_locked(struct nvhost_module *mod)
+{
+ schedule_delayed_work(&mod->powerstate_down,
+ msecs_to_jiffies(mod->desc->clockgate_delay));
+}
+
+void nvhost_module_busy(struct nvhost_module *mod)
+{
+ if (mod->desc->busy)
+ mod->desc->busy(mod);
+
+ mutex_lock(&mod->lock);
+ cancel_delayed_work(&mod->powerstate_down);
+
+ mod->refcount++;
+ if (mod->refcount > 0 && !nvhost_module_powered(mod))
+ to_state_running_locked(mod);
+ mutex_unlock(&mod->lock);
+}
+
+static void powerstate_down_handler(struct work_struct *work)
+{
+ struct nvhost_module *mod;
+
+ mod = container_of(to_delayed_work(work),
+ struct nvhost_module,
+ powerstate_down);
+
+ mutex_lock(&mod->lock);
+ if (mod->refcount == 0) {
+ switch (mod->powerstate) {
+ case NVHOST_POWER_STATE_RUNNING:
+ to_state_clockgated_locked(mod);
+ schedule_powergating_locked(mod);
+ break;
+ case NVHOST_POWER_STATE_CLOCKGATED:
+ if (to_state_powergated_locked(mod))
+ schedule_powergating_locked(mod);
+ break;
+ default:
+ break;
+ }
+ }
+ mutex_unlock(&mod->lock);
+}
+
+
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs)
+{
+ bool kick = false;
+
+ mutex_lock(&mod->lock);
+ mod->refcount -= refs;
+ if (mod->refcount == 0) {
+ if (nvhost_module_powered(mod))
+ schedule_clockgating_locked(mod);
+ kick = true;
+ }
+ mutex_unlock(&mod->lock);
+
+ if (kick) {
+ wake_up(&mod->idle);
+
+ if (mod->desc->idle)
+ mod->desc->idle(mod);
+ }
+}
+
+int nvhost_module_get_rate(struct nvhost_master *host,
+ struct nvhost_module *mod, unsigned long *rate,
+ int index)
+{
+ struct clk *c;
+
+ c = mod->clk[index];
+ if (IS_ERR_OR_NULL(c))
+ return -EINVAL;
+
+ /* Need to enable client to get correct rate */
+ nvhost_module_busy(mod);
+ *rate = clk_get_rate(c);
+ nvhost_module_idle(mod);
+ return 0;
+
+}
+
+static int nvhost_module_update_rate(struct nvhost_module *mod, int index)
+{
+ unsigned long rate = 0;
+ struct nvhost_module_client *m;
+
+ if (!mod->clk[index])
+ return -EINVAL;
+
+ list_for_each_entry(m, &mod->client_list, node) {
+ rate = max(m->rate[index], rate);
+ }
+ if (!rate)
+ rate = clk_round_rate(mod->clk[index],
+ mod->desc->clocks[index].default_rate);
+
+ return clk_set_rate(mod->clk[index], rate);
+}
+
+int nvhost_module_set_rate(struct nvhost_master *host,
+ struct nvhost_module *mod, void *priv,
+ unsigned long rate, int index)
+{
+ struct nvhost_module_client *m;
+ int ret;
+
+ mutex_lock(&client_list_lock);
+ list_for_each_entry(m, &mod->client_list, node) {
+ if (m->priv == priv) {
+ rate = clk_round_rate(mod->clk[index], rate);
+ m->rate[index] = rate;
+ break;
+ }
+ }
+ ret = nvhost_module_update_rate(mod, index);
+ mutex_unlock(&client_list_lock);
+ return ret;
+
+}
+
+int nvhost_module_add_client(struct nvhost_master *host,
+ struct nvhost_module *mod, void *priv)
+{
+ int i;
+ unsigned long rate;
+ struct nvhost_module_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&client->node);
+ client->priv = priv;
+
+ for (i = 0; i < mod->num_clks; i++) {
+ rate = clk_round_rate(mod->clk[i],
+ mod->desc->clocks[i].default_rate);
+ client->rate[i] = rate;
+ }
+ mutex_lock(&client_list_lock);
+ list_add_tail(&client->node, &mod->client_list);
+ mutex_unlock(&client_list_lock);
+ return 0;
+}
+
+void nvhost_module_remove_client(struct nvhost_master *host,
+ struct nvhost_module *mod, void *priv)
+{
+ int i;
+ struct nvhost_module_client *m;
+
+ mutex_lock(&client_list_lock);
+ list_for_each_entry(m, &mod->client_list, node) {
+ if (priv == m->priv) {
+ list_del(&m->node);
+ break;
+ }
+ }
+ if (m) {
+ kfree(m);
+ for (i = 0; i < mod->num_clks; i++)
+ nvhost_module_update_rate(mod, i);
+ }
+ mutex_unlock(&client_list_lock);
+}
+
+void nvhost_module_preinit(const char *name,
+ const struct nvhost_moduledesc *desc)
+{
+ int i = 0;
+
+ /* initialize clocks to known state */
+ while (desc->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) {
+ char devname[MAX_DEVID_LENGTH];
+ long rate = desc->clocks[i].default_rate;
+ struct clk *c;
+
+ snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", name);
+ c = clk_get_sys(devname, desc->clocks[i].name);
+ BUG_ON(IS_ERR_OR_NULL(c));
+
+ rate = clk_round_rate(c, rate);
+ clk_enable(c);
+ clk_set_rate(c, rate);
+ clk_disable(c);
+ i++;
+ }
+
+ if (desc->can_powergate) {
+ do_powergate_locked(desc->powergate_ids[0]);
+ do_powergate_locked(desc->powergate_ids[1]);
+ } else {
+ do_unpowergate_locked(desc->powergate_ids[0]);
+ do_unpowergate_locked(desc->powergate_ids[1]);
+ }
+}
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ const struct nvhost_moduledesc *desc,
+ struct nvhost_module *parent,
+ struct device *dev)
+{
+ int i = 0;
+ int err;
+
+ /* register to kernel */
+ mod->drv.driver.name = name;
+ mod->drv.driver.owner = THIS_MODULE;
+ err = nvhost_driver_register(&mod->drv);
+ if (err)
+ return err;
+
+ nvhost_module_preinit(name, desc);
+ mod->name = name;
+
+ INIT_LIST_HEAD(&mod->client_list);
+ while (desc->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) {
+ char devname[MAX_DEVID_LENGTH];
+
+ snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", name);
+ mod->clk[i] = clk_get_sys(devname, desc->clocks[i].name);
+ BUG_ON(IS_ERR_OR_NULL(mod->clk[i]));
+ i++;
+ }
+ mod->num_clks = i;
+ mod->desc = desc;
+ mod->parent = parent;
+
+ mutex_init(&mod->lock);
+ init_waitqueue_head(&mod->idle);
+ INIT_DELAYED_WORK(&mod->powerstate_down, powerstate_down_handler);
+
+ if (desc->can_powergate) {
+ mod->powerstate = NVHOST_POWER_STATE_POWERGATED;
+ } else {
+ mod->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
+ }
+
+ if (desc->init)
+ desc->init(dev, mod);
+
+ return 0;
+}
+
+static int is_module_idle(struct nvhost_module *mod)
+{
+ int count;
+ mutex_lock(&mod->lock);
+ count = mod->refcount;
+ mutex_unlock(&mod->lock);
+ return (count == 0);
+}
+
+static void debug_not_idle(struct nvhost_master *dev)
+{
+ int i;
+ bool lock_released = true;
+
+ for (i = 0; i < dev->nb_channels; i++) {
+ struct nvhost_module *mod = &dev->channels[i].mod;
+ mutex_lock(&mod->lock);
+ if (mod->name)
+ dev_warn(&dev->pdev->dev,
+ "tegra_grhost: %s: refcnt %d\n",
+ mod->name, mod->refcount);
+ mutex_unlock(&mod->lock);
+ }
+
+ for (i = 0; i < dev->nb_mlocks; i++) {
+ int c = atomic_read(&dev->cpuaccess.lock_counts[i]);
+ if (c) {
+ dev_warn(&dev->pdev->dev,
+ "tegra_grhost: lock id %d: refcnt %d\n",
+ i, c);
+ lock_released = false;
+ }
+ }
+ if (lock_released)
+ dev_dbg(&dev->pdev->dev, "tegra_grhost: all locks released\n");
+}
+
+int nvhost_module_suspend(struct nvhost_module *mod, bool system_suspend)
+{
+ int ret;
+ struct nvhost_master *dev;
+
+ if (system_suspend) {
+ dev = container_of(mod, struct nvhost_master, mod);
+ if (!is_module_idle(mod))
+ debug_not_idle(dev);
+ } else {
+ dev = container_of(mod, struct nvhost_channel, mod)->dev;
+ }
+
+ ret = wait_event_timeout(mod->idle, is_module_idle(mod),
+ ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT);
+ if (ret == 0) {
+ dev_info(&dev->pdev->dev, "%s prevented suspend\n", mod->name);
+ return -EBUSY;
+ }
+
+ if (system_suspend)
+ dev_dbg(&dev->pdev->dev, "tegra_grhost: entered idle\n");
+
+ mutex_lock(&mod->lock);
+ cancel_delayed_work(&mod->powerstate_down);
+ to_state_powergated_locked(mod);
+ mutex_unlock(&mod->lock);
+
+ if (mod->desc->suspend)
+ mod->desc->suspend(mod);
+
+ return 0;
+}
+
+void nvhost_module_deinit(struct device *dev, struct nvhost_module *mod)
+{
+ int i;
+
+ nvhost_driver_unregister(&mod->drv);
+
+ if (mod->desc->deinit)
+ mod->desc->deinit(dev, mod);
+
+ nvhost_module_suspend(mod, false);
+ for (i = 0; i < mod->num_clks; i++)
+ clk_put(mod->clk[i]);
+ mod->powerstate = NVHOST_POWER_STATE_DEINIT;
+}
+
diff --git a/drivers/video/tegra/host/nvhost_acm.h b/drivers/video/tegra/host/nvhost_acm.h
new file mode 100644
index 000000000000..548f3e6a5c25
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.h
@@ -0,0 +1,121 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.h
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_ACM_H
+#define __NVHOST_ACM_H
+
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/nvhost.h>
+
+#define NVHOST_MODULE_MAX_CLOCKS 3
+#define NVHOST_MODULE_MAX_POWERGATE_IDS 2
+struct nvhost_module;
+struct nvhost_master;
+
+struct nvhost_moduledesc_clock {
+ char *name;
+ long default_rate;
+};
+
+#define NVHOST_MODULE_NO_POWERGATE_IDS .powergate_ids = {-1, -1}
+#define NVHOST_DEFAULT_CLOCKGATE_DELAY .clockgate_delay = 25
+
+struct nvhost_moduledesc {
+ int (*prepare_poweroff)(struct nvhost_module *mod);
+ void (*finalize_poweron)(struct nvhost_module *mod);
+ void (*busy)(struct nvhost_module *);
+ void (*idle)(struct nvhost_module *);
+ void (*suspend)(struct nvhost_module *);
+ void (*init)(struct device *dev, struct nvhost_module *);
+ void (*deinit)(struct device *dev, struct nvhost_module *);
+
+ int powergate_ids[NVHOST_MODULE_MAX_POWERGATE_IDS];
+ bool can_powergate;
+ int clockgate_delay;
+ int powergate_delay;
+ struct nvhost_moduledesc_clock clocks[NVHOST_MODULE_MAX_CLOCKS];
+};
+
+enum nvhost_module_powerstate_t {
+ NVHOST_POWER_STATE_DEINIT,
+ NVHOST_POWER_STATE_RUNNING,
+ NVHOST_POWER_STATE_CLOCKGATED,
+ NVHOST_POWER_STATE_POWERGATED
+};
+
+struct nvhost_module {
+ struct nvhost_driver drv;
+ const char *name;
+ struct delayed_work powerstate_down;
+ int num_clks;
+ struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
+ struct mutex lock;
+ int powerstate;
+ int refcount;
+ wait_queue_head_t idle;
+ struct nvhost_module *parent;
+ const struct nvhost_moduledesc *desc;
+ struct list_head client_list;
+};
+
+/* Sets clocks and powergating state for a module */
+void nvhost_module_preinit(const char *name, const struct nvhost_moduledesc *desc);
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ const struct nvhost_moduledesc *desc,
+ struct nvhost_module *parent,
+ struct device *dev);
+void nvhost_module_deinit(struct device *dev, struct nvhost_module *mod);
+int nvhost_module_suspend(struct nvhost_module *mod, bool system_suspend);
+
+void nvhost_module_reset(struct device *dev, struct nvhost_module *mod);
+void nvhost_module_busy(struct nvhost_module *mod);
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs);
+int nvhost_module_add_client(struct nvhost_master *host,
+ struct nvhost_module *mod,
+ void *priv);
+void nvhost_module_remove_client(struct nvhost_master *host,
+ struct nvhost_module *mod,
+ void *priv);
+int nvhost_module_get_rate(struct nvhost_master *host,
+ struct nvhost_module *mod,
+ unsigned long *rate,
+ int index);
+int nvhost_module_set_rate(struct nvhost_master *host,
+ struct nvhost_module *mod, void *priv,
+ unsigned long rate, int index);
+
+
+static inline bool nvhost_module_powered(struct nvhost_module *mod)
+{
+ return mod->powerstate == NVHOST_POWER_STATE_RUNNING;
+}
+
+static inline void nvhost_module_idle(struct nvhost_module *mod)
+{
+ nvhost_module_idle_mult(mod, 1);
+}
+
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_cdma.c b/drivers/video/tegra/host/nvhost_cdma.c
new file mode 100644
index 000000000000..63ce365e990e
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cdma.c
@@ -0,0 +1,565 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cdma.h"
+#include "dev.h"
+#include <asm/cacheflush.h>
+
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <trace/events/nvhost.h>
+#include <linux/interrupt.h>
+
+/*
+ * TODO:
+ * stats
+ * - for figuring out what to optimize further
+ * resizable push buffer & sync queue
+ * - some channels hardly need any, some channels (3d) could use more
+ */
+
+/**
+ * kfifo_save - save current out pointer
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_save(fifo) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ __kfifo->out; \
+}) \
+)
+
+/**
+ * kfifo_restore - restore previously saved pointer
+ * @fifo: address of the fifo to be used
+ * @out: output pointer
+ */
+#define kfifo_restore(fifo, restore) \
+(void)({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ __kfifo->out = (restore); \
+})
+
+/**
+ * Add an entry to the sync queue.
+ */
+static void add_to_sync_queue(struct nvhost_cdma *cdma,
+ struct nvhost_job *job,
+ u32 nr_slots,
+ u32 first_get)
+{
+ BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+ job->first_get = first_get;
+ job->num_slots = nr_slots;
+ nvhost_job_get(job);
+ kfifo_in(&cdma->sync_queue, (void *)&job, 1);
+}
+
+/**
+ * Return the status of the cdma's sync queue or push buffer for the given event
+ * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-)
+ * - sq space: returns the number of handles that can be stored in the queue
+ * - pb space: returns the number of free slots in the channel's push buffer
+ * Must be called with the cdma lock held.
+ */
+static unsigned int cdma_status_locked(struct nvhost_cdma *cdma,
+ enum cdma_event event)
+{
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
+ return kfifo_len(&cdma->sync_queue) == 0 ? 1 : 0;
+ case CDMA_EVENT_SYNC_QUEUE_SPACE:
+ return kfifo_avail(&cdma->sync_queue);
+ case CDMA_EVENT_PUSH_BUFFER_SPACE: {
+ struct push_buffer *pb = &cdma->push_buffer;
+ BUG_ON(!cdma_pb_op(cdma).space);
+ return cdma_pb_op(cdma).space(pb);
+ }
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Sleep (if necessary) until the requested event happens
+ * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ * - Returns 1
+ * - CDMA_EVENT_SYNC_QUEUE_SPACE : there is space in the sync queue.
+ * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ * - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma,
+ enum cdma_event event)
+{
+ for (;;) {
+ unsigned int space = cdma_status_locked(cdma, event);
+ if (space)
+ return space;
+
+ trace_nvhost_wait_cdma(cdma_to_channel(cdma)->desc->name,
+ event);
+
+ BUG_ON(cdma->event != CDMA_EVENT_NONE);
+ cdma->event = event;
+
+ mutex_unlock(&cdma->lock);
+ down(&cdma->sem);
+ mutex_lock(&cdma->lock);
+ }
+ return 0;
+}
+
+/**
+ * Start timer for a buffer submition that has completed yet.
+ * Must be called with the cdma lock held.
+ */
+static void cdma_start_timer_locked(struct nvhost_cdma *cdma,
+ struct nvhost_job *job)
+{
+ BUG_ON(!job);
+ if (cdma->timeout.clientid) {
+ /* timer already started */
+ return;
+ }
+
+ cdma->timeout.ctx = job->hwctx;
+ cdma->timeout.clientid = job->clientid;
+ cdma->timeout.syncpt_id = job->syncpt_id;
+ cdma->timeout.syncpt_val = job->syncpt_end;
+ cdma->timeout.start_ktime = ktime_get();
+
+ schedule_delayed_work(&cdma->timeout.wq,
+ msecs_to_jiffies(job->timeout));
+}
+
+/**
+ * Stop timer when a buffer submition completes.
+ * Must be called with the cdma lock held.
+ */
+static void stop_cdma_timer_locked(struct nvhost_cdma *cdma)
+{
+ cancel_delayed_work(&cdma->timeout.wq);
+ cdma->timeout.ctx = NULL;
+ cdma->timeout.clientid = 0;
+}
+
+/**
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ * - unpin & unref their mems
+ * - pop their push buffer slots
+ * - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma_locked(struct nvhost_cdma *cdma)
+{
+ bool signal = false;
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+
+ BUG_ON(!cdma->running);
+
+ /*
+ * Walk the sync queue, reading the sync point registers as necessary,
+ * to consume as many sync queue entries as possible without blocking
+ */
+ for (;;) {
+ struct nvhost_syncpt *sp = &dev->syncpt;
+ struct nvhost_job *job;
+ int result;
+
+ result = kfifo_peek(&cdma->sync_queue, &job);
+ if (!result) {
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ signal = true;
+ break;
+ }
+
+ BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+ /* Check whether this syncpt has completed, and bail if not */
+ if (!nvhost_syncpt_min_cmp(sp,
+ job->syncpt_id, job->syncpt_end)) {
+ /* Start timer on next pending syncpt */
+ if (job->timeout)
+ cdma_start_timer_locked(cdma, job);
+ break;
+ }
+
+ /* Cancel timeout, when a buffer completes */
+ if (cdma->timeout.clientid)
+ stop_cdma_timer_locked(cdma);
+
+ /* Unpin the memory */
+ nvhost_job_unpin(job);
+
+ /* Pop push buffer slots */
+ if (job->num_slots) {
+ struct push_buffer *pb = &cdma->push_buffer;
+ BUG_ON(!cdma_pb_op(cdma).pop_from);
+ cdma_pb_op(cdma).pop_from(pb, job->num_slots);
+ if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+ signal = true;
+ }
+
+ nvhost_job_put(job);
+ kfifo_skip(&cdma->sync_queue);
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_SPACE)
+ signal = true;
+ }
+
+ /* Wake up CdmaWait() if the requested event happened */
+ if (signal) {
+ cdma->event = CDMA_EVENT_NONE;
+ up(&cdma->sem);
+ }
+}
+
+void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma,
+ struct nvhost_syncpt *syncpt, struct device *dev)
+{
+ u32 get_restart;
+ u32 syncpt_incrs;
+ bool exec_ctxsave;
+ unsigned int queue_restore;
+ struct nvhost_job *job = NULL;
+ int result;
+ u32 syncpt_val;
+
+ syncpt_val = nvhost_syncpt_update_min(syncpt, cdma->timeout.syncpt_id);
+ queue_restore = kfifo_save(&cdma->sync_queue);
+
+ dev_dbg(dev,
+ "%s: starting cleanup (thresh %d, queue length %d)\n",
+ __func__,
+ syncpt_val, kfifo_len(&cdma->sync_queue));
+
+ /*
+ * Move the sync_queue read pointer to the first entry that hasn't
+ * completed based on the current HW syncpt value. It's likely there
+ * won't be any (i.e. we're still at the head), but covers the case
+ * where a syncpt incr happens just prior/during the teardown.
+ */
+
+ dev_dbg(dev,
+ "%s: skip completed buffers still in sync_queue\n",
+ __func__);
+
+ result = kfifo_peek(&cdma->sync_queue, &job);
+ while (result && syncpt_val >= job->syncpt_end) {
+ nvhost_job_dump(dev, job);
+ kfifo_skip(&cdma->sync_queue);
+ result = kfifo_peek(&cdma->sync_queue, &job);
+ }
+
+ /*
+ * Walk the sync_queue, first incrementing with the CPU syncpts that
+ * are partially executed (the first buffer) or fully skipped while
+ * still in the current context (slots are also NOP-ed).
+ *
+ * At the point contexts are interleaved, syncpt increments must be
+ * done inline with the pushbuffer from a GATHER buffer to maintain
+ * the order (slots are modified to be a GATHER of syncpt incrs).
+ *
+ * Note: save in get_restart the location where the timed out buffer
+ * started in the PB, so we can start the refetch from there (with the
+ * modified NOP-ed PB slots). This lets things appear to have completed
+ * properly for this buffer and resources are freed.
+ */
+
+ dev_dbg(dev,
+ "%s: perform CPU incr on pending same ctx buffers\n",
+ __func__);
+
+ get_restart = cdma->last_put;
+ if (kfifo_len(&cdma->sync_queue) > 0)
+ get_restart = job->first_get;
+
+ /* do CPU increments as long as this context continues */
+ while (result && job->clientid == cdma->timeout.clientid) {
+ /* different context, gets us out of this loop */
+ if (job->clientid != cdma->timeout.clientid)
+ break;
+
+ /* won't need a timeout when replayed */
+ job->timeout = 0;
+
+ syncpt_incrs = job->syncpt_end - syncpt_val;
+ dev_dbg(dev,
+ "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
+
+ nvhost_job_dump(dev, job);
+
+ /* safe to use CPU to incr syncpts */
+ cdma_op(cdma).timeout_cpu_incr(cdma,
+ job->first_get,
+ syncpt_incrs,
+ job->syncpt_end,
+ job->num_slots);
+
+ kfifo_skip(&cdma->sync_queue);
+ result = kfifo_peek(&cdma->sync_queue, &job);
+ }
+
+ dev_dbg(dev,
+ "%s: GPU incr blocked interleaved ctx buffers\n",
+ __func__);
+
+ exec_ctxsave = false;
+
+ /* setup GPU increments */
+ while (result) {
+ /* same context, increment in the pushbuffer */
+ if (job->clientid == cdma->timeout.clientid) {
+ /* won't need a timeout when replayed */
+ job->timeout = 0;
+
+ /* update buffer's syncpts in the pushbuffer */
+ cdma_op(cdma).timeout_pb_incr(cdma,
+ job->first_get,
+ job->syncpt_incrs,
+ job->num_slots,
+ exec_ctxsave);
+
+ exec_ctxsave = false;
+ } else {
+ dev_dbg(dev,
+ "%s: switch to a different userctx\n",
+ __func__);
+ /*
+ * If previous context was the timed out context
+ * then clear its CTXSAVE in this slot.
+ */
+ exec_ctxsave = true;
+ }
+
+ nvhost_job_dump(dev, job);
+
+ kfifo_skip(&cdma->sync_queue);
+ result = kfifo_peek(&cdma->sync_queue, &job);
+ }
+
+ dev_dbg(dev,
+ "%s: finished sync_queue modification\n", __func__);
+
+ kfifo_restore(&cdma->sync_queue, queue_restore);
+
+ /* roll back DMAGET and start up channel again */
+ cdma_op(cdma).timeout_teardown_end(cdma, get_restart);
+
+ if (cdma->timeout.ctx)
+ cdma->timeout.ctx->has_timedout = true;
+}
+
+/**
+ * Create a cdma
+ */
+int nvhost_cdma_init(struct nvhost_cdma *cdma)
+{
+ int err;
+ struct push_buffer *pb = &cdma->push_buffer;
+ BUG_ON(!cdma_pb_op(cdma).init);
+ mutex_init(&cdma->lock);
+ sema_init(&cdma->sem, 0);
+
+ err = kfifo_alloc(&cdma->sync_queue,
+ cdma_to_dev(cdma)->sync_queue_size
+ * sizeof(struct nvhost_job *),
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ cdma->event = CDMA_EVENT_NONE;
+ cdma->running = false;
+ cdma->torndown = false;
+
+ err = cdma_pb_op(cdma).init(pb);
+ if (err)
+ return err;
+ return 0;
+}
+
+/**
+ * Destroy a cdma
+ */
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma)
+{
+ struct push_buffer *pb = &cdma->push_buffer;
+
+ BUG_ON(!cdma_pb_op(cdma).destroy);
+ BUG_ON(cdma->running);
+ kfifo_free(&cdma->sync_queue);
+ cdma_pb_op(cdma).destroy(pb);
+ cdma_op(cdma).timeout_destroy(cdma);
+}
+
+/**
+ * Begin a cdma submit
+ */
+int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job)
+{
+ mutex_lock(&cdma->lock);
+
+ if (job->timeout) {
+ /* init state on first submit with timeout value */
+ if (!cdma->timeout.initialized) {
+ int err;
+ BUG_ON(!cdma_op(cdma).timeout_init);
+ err = cdma_op(cdma).timeout_init(cdma,
+ job->syncpt_id);
+ if (err) {
+ mutex_unlock(&cdma->lock);
+ return err;
+ }
+ }
+ }
+ if (!cdma->running) {
+ BUG_ON(!cdma_op(cdma).start);
+ cdma_op(cdma).start(cdma);
+ }
+ cdma->slots_free = 0;
+ cdma->slots_used = 0;
+ cdma->first_get = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
+ return 0;
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2)
+{
+ nvhost_cdma_push_gather(cdma, NULL, NULL, op1, op2);
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push_gather(struct nvhost_cdma *cdma,
+ struct nvmap_client *client,
+ struct nvmap_handle *handle, u32 op1, u32 op2)
+{
+ u32 slots_free = cdma->slots_free;
+ struct push_buffer *pb = &cdma->push_buffer;
+ BUG_ON(!cdma_pb_op(cdma).push_to);
+ BUG_ON(!cdma_op(cdma).kick);
+ if (slots_free == 0) {
+ cdma_op(cdma).kick(cdma);
+ slots_free = nvhost_cdma_wait_locked(cdma,
+ CDMA_EVENT_PUSH_BUFFER_SPACE);
+ }
+ cdma->slots_free = slots_free - 1;
+ cdma->slots_used++;
+ cdma_pb_op(cdma).push_to(pb, client, handle, op1, op2);
+}
+
+/**
+ * End a cdma submit
+ * Kick off DMA, add a contiguous block of memory handles to the sync queue,
+ * and a number of slots to be freed from the pushbuffer.
+ * Blocks as necessary if the sync queue is full.
+ * The handles for a submit must all be pinned at the same time, but they
+ * can be unpinned in smaller chunks.
+ */
+void nvhost_cdma_end(struct nvhost_cdma *cdma,
+ struct nvhost_job *job)
+{
+ bool was_idle = kfifo_len(&cdma->sync_queue) == 0;
+
+ BUG_ON(!cdma_op(cdma).kick);
+ cdma_op(cdma).kick(cdma);
+
+ BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+ nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_SPACE);
+ add_to_sync_queue(cdma,
+ job,
+ cdma->slots_used,
+ cdma->first_get);
+
+ /* start timer on idle -> active transitions */
+ if (job->timeout && was_idle)
+ cdma_start_timer_locked(cdma, job);
+
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Update cdma state according to current sync point values
+ */
+void nvhost_cdma_update(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ update_cdma_locked(cdma);
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Wait for push buffer to be empty.
+ * @cdma pointer to channel cdma
+ * @timeout timeout in ms
+ * Returns -ETIME if timeout was reached, zero if push buffer is empty.
+ */
+int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout)
+{
+ unsigned int space, err = 0;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+
+ /*
+ * Wait for at most timeout ms. Recalculate timeout at each iteration
+ * to better keep within given timeout.
+ */
+ while(!err && time_before(jiffies, end_jiffies)) {
+ int timeout_jiffies = end_jiffies - jiffies;
+
+ mutex_lock(&cdma->lock);
+ space = cdma_status_locked(cdma,
+ CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ if (space) {
+ mutex_unlock(&cdma->lock);
+ return 0;
+ }
+
+ /*
+ * Wait for sync queue to become empty. If there is already
+ * an event pending, we need to poll.
+ */
+ if (cdma->event != CDMA_EVENT_NONE) {
+ mutex_unlock(&cdma->lock);
+ schedule();
+ } else {
+ cdma->event = CDMA_EVENT_SYNC_QUEUE_EMPTY;
+
+ mutex_unlock(&cdma->lock);
+ err = down_timeout(&cdma->sem,
+ jiffies_to_msecs(timeout_jiffies));
+ }
+ }
+ return err;
+}
diff --git a/drivers/video/tegra/host/nvhost_cdma.h b/drivers/video/tegra/host/nvhost_cdma.h
new file mode 100644
index 000000000000..87b6a14d60e3
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cdma.h
@@ -0,0 +1,136 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CDMA_H
+#define __NVHOST_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+#include <linux/kfifo.h>
+
+#include "nvhost_acm.h"
+
+struct nvhost_syncpt;
+struct nvhost_userctx_timeout;
+struct nvhost_job;
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ * begin
+ * push - send ops to the push buffer
+ * end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ * update - call to update sync queue and push buffer, unpin memory
+ */
+
+struct nvmap_client_handle {
+ struct nvmap_client *client;
+ struct nvmap_handle *handle;
+};
+
+struct push_buffer {
+ struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */
+ u32 *mapped; /* mapped pushbuffer memory */
+ u32 phys; /* physical address of pushbuffer */
+ u32 fence; /* index we've written */
+ u32 cur; /* index to write to */
+ struct nvmap_client_handle *nvmap;
+ /* nvmap handle for each opcode pair */
+};
+
+struct syncpt_buffer {
+ struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */
+ u32 *mapped; /* mapped gather buffer (at channel offset */
+ u32 phys; /* physical address (at channel offset) */
+ u32 incr_per_buffer; /* max # of incrs per GATHER */
+ u32 words_per_incr; /* # of DWORDS in buffer to incr a syncpt */
+};
+
+struct buffer_timeout {
+ struct delayed_work wq; /* work queue */
+ bool initialized; /* timer one-time setup flag */
+ u32 syncpt_id; /* buffer completion syncpt id */
+ u32 syncpt_val; /* syncpt value when completed */
+ ktime_t start_ktime; /* starting time */
+ /* context timeout information */
+ struct nvhost_hwctx *ctx;
+ int clientid;
+};
+
+enum cdma_event {
+ CDMA_EVENT_NONE, /* not waiting for any event */
+ CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
+ CDMA_EVENT_SYNC_QUEUE_SPACE, /* wait for space in sync queue */
+ CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
+};
+
+struct nvhost_cdma {
+ struct mutex lock; /* controls access to shared state */
+ struct semaphore sem; /* signalled when event occurs */
+ enum cdma_event event; /* event that sem is waiting for */
+ unsigned int slots_used; /* pb slots used in current submit */
+ unsigned int slots_free; /* pb slots free in current submit */
+ unsigned int first_get; /* DMAGET value, where submit begins */
+ unsigned int last_put; /* last value written to DMAPUT */
+ struct push_buffer push_buffer; /* channel's push buffer */
+ struct syncpt_buffer syncpt_buffer; /* syncpt incr buffer */
+ DECLARE_KFIFO_PTR(sync_queue, struct nvhost_job *); /* job queue */
+ struct buffer_timeout timeout; /* channel's timeout state/wq */
+ bool running;
+ bool torndown;
+};
+
+#define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma)
+#define cdma_to_dev(cdma) ((cdma_to_channel(cdma))->dev)
+#define cdma_op(cdma) (cdma_to_dev(cdma)->op.cdma)
+#define cdma_to_nvmap(cdma) ((cdma_to_dev(cdma))->nvmap)
+#define pb_to_cdma(pb) container_of(pb, struct nvhost_cdma, push_buffer)
+#define cdma_pb_op(cdma) (cdma_to_dev(cdma)->op.push_buffer)
+
+int nvhost_cdma_init(struct nvhost_cdma *cdma);
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma);
+void nvhost_cdma_stop(struct nvhost_cdma *cdma);
+int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job);
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2);
+#define NVHOST_CDMA_PUSH_GATHER_CTXSAVE 0xffffffff
+void nvhost_cdma_push_gather(struct nvhost_cdma *cdma,
+ struct nvmap_client *client,
+ struct nvmap_handle *handle, u32 op1, u32 op2);
+void nvhost_cdma_end(struct nvhost_cdma *cdma,
+ struct nvhost_job *job);
+void nvhost_cdma_update(struct nvhost_cdma *cdma);
+int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout);
+void nvhost_cdma_peek(struct nvhost_cdma *cdma,
+ u32 dmaget, int slot, u32 *out);
+unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma,
+ enum cdma_event event);
+void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma,
+ struct nvhost_syncpt *syncpt, struct device *dev);
+#endif
diff --git a/drivers/video/tegra/host/nvhost_channel.c b/drivers/video/tegra/host/nvhost_channel.c
new file mode 100644
index 000000000000..85256016ad70
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_channel.c
@@ -0,0 +1,117 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_hwctx.h"
+#include "nvhost_job.h"
+#include <trace/events/nvhost.h>
+#include <linux/nvhost_ioctl.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+
+#define NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT 50
+
+int nvhost_channel_submit(struct nvhost_job *job)
+{
+ /* Low priority submits wait until sync queue is empty. Ignores result
+ * from nvhost_cdma_flush, as we submit either when push buffer is
+ * empty or when we reach the timeout. */
+ if (job->priority < NVHOST_PRIORITY_MEDIUM)
+ (void)nvhost_cdma_flush(&job->ch->cdma,
+ NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT);
+
+ return channel_op(job->ch).submit(job);
+}
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch)
+{
+ int err = 0;
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 0) {
+ err = nvhost_module_init(&ch->mod, ch->desc->name,
+ &ch->desc->module,
+ &ch->dev->mod,
+ &ch->dev->pdev->dev);
+ if (!err) {
+ err = nvhost_cdma_init(&ch->cdma);
+ if (err)
+ nvhost_module_deinit(&ch->dev->pdev->dev,
+ &ch->mod);
+ }
+ } else if (ch->desc->exclusive) {
+ err = -EBUSY;
+ }
+ if (!err)
+ ch->refcount++;
+
+ mutex_unlock(&ch->reflock);
+
+ /* Keep alive modules that needs to be when a channel is open */
+ if (!err && ch->desc->keepalive)
+ nvhost_module_busy(&ch->mod);
+
+ return err ? NULL : ch;
+}
+
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx)
+{
+ BUG_ON(!channel_cdma_op(ch).stop);
+
+ if (ctx) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx == ctx)
+ ch->cur_ctx = NULL;
+ mutex_unlock(&ch->submitlock);
+ }
+
+ /* Allow keep-alive'd module to be turned off */
+ if (ch->desc->keepalive)
+ nvhost_module_idle(&ch->mod);
+
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 1) {
+ channel_cdma_op(ch).stop(&ch->cdma);
+ nvhost_cdma_deinit(&ch->cdma);
+ nvhost_module_deinit(&ch->dev->pdev->dev, &ch->mod);
+ }
+ ch->refcount--;
+ mutex_unlock(&ch->reflock);
+}
+
+int nvhost_channel_suspend(struct nvhost_channel *ch)
+{
+ int ret = 0;
+
+ mutex_lock(&ch->reflock);
+ BUG_ON(!channel_cdma_op(ch).stop);
+
+ if (ch->refcount) {
+ ret = nvhost_module_suspend(&ch->mod, false);
+ if (!ret)
+ channel_cdma_op(ch).stop(&ch->cdma);
+ }
+ mutex_unlock(&ch->reflock);
+
+ return ret;
+}
diff --git a/drivers/video/tegra/host/nvhost_channel.h b/drivers/video/tegra/host/nvhost_channel.h
new file mode 100644
index 000000000000..c6d60fbf1189
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_channel.h
@@ -0,0 +1,101 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CHANNEL_H
+#define __NVHOST_CHANNEL_H
+
+#include "nvhost_cdma.h"
+#include "nvhost_acm.h"
+#include "nvhost_hwctx.h"
+#include "nvhost_job.h"
+
+#include <linux/cdev.h>
+#include <linux/io.h>
+
+#define NVHOST_MAX_WAIT_CHECKS 256
+#define NVHOST_MAX_GATHERS 512
+#define NVHOST_MAX_HANDLES 1280
+#define NVHOST_MAX_POWERGATE_IDS 2
+
+struct nvhost_master;
+struct nvhost_waitchk;
+
+struct nvhost_channeldesc {
+ const char *name;
+ u32 syncpts;
+ u32 waitbases;
+ u32 modulemutexes;
+ u32 class;
+ bool exclusive;
+ bool keepalive;
+ bool waitbasesync;
+ struct nvhost_moduledesc module;
+};
+
+struct nvhost_channel_gather {
+ u32 words;
+ phys_addr_t mem;
+ u32 mem_id;
+ int offset;
+};
+
+struct nvhost_channel {
+ int refcount;
+ int chid;
+ u32 syncpt_id;
+ struct mutex reflock;
+ struct mutex submitlock;
+ void __iomem *aperture;
+ struct nvhost_master *dev;
+ const struct nvhost_channeldesc *desc;
+ struct nvhost_hwctx *cur_ctx;
+ struct device *node;
+ struct cdev cdev;
+ struct nvhost_hwctx_handler ctxhandler;
+ struct nvhost_module mod;
+ struct nvhost_cdma cdma;
+};
+
+int nvhost_channel_init(
+ struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index);
+
+int nvhost_channel_submit(struct nvhost_job *job);
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch);
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx);
+int nvhost_channel_suspend(struct nvhost_channel *ch);
+
+#define channel_cdma_op(ch) (ch->dev->op.cdma)
+#define channel_op(ch) (ch->dev->op.channel)
+#define host_channel_op(host) (host->op.channel)
+
+int nvhost_channel_drain_read_fifo(void __iomem *chan_regs,
+ u32 *ptr, unsigned int count, unsigned int *pending);
+
+int nvhost_channel_read_3d_reg(
+ struct nvhost_channel *channel,
+ struct nvhost_hwctx *hwctx,
+ u32 offset,
+ u32 *value);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_cpuaccess.c b/drivers/video/tegra/host/nvhost_cpuaccess.c
new file mode 100644
index 000000000000..6c876b332001
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cpuaccess.c
@@ -0,0 +1,120 @@
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.c
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cpuaccess.h"
+#include "dev.h"
+#include <linux/string.h>
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev)
+{
+ struct nvhost_master *host = cpuaccess_to_dev(ctx);
+ int i;
+
+ for (i = 0; i < host->nb_modules; i++) {
+ struct resource *mem;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i+1);
+ if (!mem) {
+ dev_err(&pdev->dev, "missing module memory resource\n");
+ return -ENXIO;
+ }
+ ctx->reg_mem[i] = mem;
+ ctx->regs[i] = ioremap(mem->start, resource_size(mem));
+ if (!ctx->regs[i]) {
+ dev_err(&pdev->dev, "failed to map module registers\n");
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx)
+{
+ struct nvhost_master *host = cpuaccess_to_dev(ctx);
+ int i;
+
+ for (i = 0; i < host->nb_modules; i++) {
+ iounmap(ctx->regs[i]);
+ release_resource(ctx->reg_mem[i]);
+ }
+}
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ u32 reg;
+ BUG_ON(!cpuaccess_op(ctx).mutex_try_lock);
+
+ nvhost_module_busy(&dev->mod);
+ reg = cpuaccess_op(ctx).mutex_try_lock(ctx, idx);
+ if (reg) {
+ nvhost_module_idle(&dev->mod);
+ return -EBUSY;
+ }
+ atomic_inc(&ctx->lock_counts[idx]);
+ return 0;
+}
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ BUG_ON(!cpuaccess_op(ctx).mutex_unlock);
+
+ cpuaccess_op(ctx).mutex_unlock(ctx, idx);
+ nvhost_module_idle(&dev->mod);
+ atomic_dec(&ctx->lock_counts[idx]);
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ u32 *out = (u32 *)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ *(out++) = readl(p);
+ p += 4;
+ }
+ rmb();
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ const u32 *in = (const u32 *)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ writel(*(in++), p);
+ p += 4;
+ }
+ wmb();
+ nvhost_module_idle(&dev->mod);
+}
diff --git a/drivers/video/tegra/host/nvhost_cpuaccess.h b/drivers/video/tegra/host/nvhost_cpuaccess.h
new file mode 100644
index 000000000000..2e210b7477af
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cpuaccess.h
@@ -0,0 +1,65 @@
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.h
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CPUACCESS_H
+#define __NVHOST_CPUACCESS_H
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+enum nvhost_module_id {
+ NVHOST_MODULE_DISPLAY_A = 0,
+ NVHOST_MODULE_DISPLAY_B,
+ NVHOST_MODULE_VI,
+ NVHOST_MODULE_ISP,
+ NVHOST_MODULE_MPE,
+#if 0
+ /* TODO: [ahatala 2010-07-02] find out if these are needed */
+ NVHOST_MODULE_FUSE,
+ NVHOST_MODULE_APB_MISC,
+ NVHOST_MODULE_CLK_RESET,
+#endif
+ NVHOST_MODULE_NUM
+};
+
+struct nvhost_cpuaccess {
+ struct resource **reg_mem;
+ void __iomem **regs;
+ atomic_t *lock_counts;
+};
+
+#define cpuaccess_to_dev(ctx) container_of(ctx, struct nvhost_master, cpuaccess)
+#define cpuaccess_op(ctx) (cpuaccess_to_dev(ctx)->op.cpuaccess)
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev);
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values);
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_hwctx.h b/drivers/video/tegra/host/nvhost_hwctx.h
new file mode 100644
index 000000000000..9fbab78c8331
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_hwctx.h
@@ -0,0 +1,78 @@
+/*
+ * drivers/video/tegra/host/nvhost_hwctx.h
+ *
+ * Tegra Graphics Host Hardware Context Interface
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HWCTX_H
+#define __NVHOST_HWCTX_H
+
+#include <linux/string.h>
+#include <linux/kref.h>
+
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+
+struct nvhost_channel;
+struct nvhost_cdma;
+
+struct nvhost_hwctx {
+ struct kref ref;
+
+ struct nvhost_channel *channel;
+ bool valid;
+
+ struct nvmap_handle_ref *save;
+ u32 save_incrs;
+ u32 save_thresh;
+ u32 save_slots;
+
+ struct nvmap_handle_ref *restore;
+ u32 *restore_virt;
+ phys_addr_t restore_phys;
+ u32 restore_size;
+ u32 restore_incrs;
+
+ bool has_timedout;
+};
+
+struct nvhost_hwctx_handler {
+ struct nvhost_hwctx * (*alloc) (struct nvhost_channel *ch);
+ void (*get) (struct nvhost_hwctx *ctx);
+ void (*put) (struct nvhost_hwctx *ctx);
+ void (*save_push) (struct nvhost_cdma *cdma, struct nvhost_hwctx *ctx);
+ void (*save_service) (struct nvhost_hwctx *ctx);
+};
+
+
+struct hwctx_reginfo {
+ unsigned int offset:12;
+ unsigned int count:16;
+ unsigned int type:2;
+};
+
+enum {
+ HWCTX_REGINFO_DIRECT = 0,
+ HWCTX_REGINFO_INDIRECT,
+ HWCTX_REGINFO_INDIRECT_4X
+};
+
+#define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type}
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_intr.c b/drivers/video/tegra/host/nvhost_intr.c
new file mode 100644
index 000000000000..1a1a9b3fa4c3
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.c
@@ -0,0 +1,440 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_intr.h"
+#include "dev.h"
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <trace/events/nvhost.h>
+
+
+
+
+
+/*** Wait list management ***/
+
+struct nvhost_waitlist {
+ struct list_head list;
+ struct kref refcount;
+ u32 thresh;
+ enum nvhost_intr_action action;
+ atomic_t state;
+ void *data;
+ int count;
+};
+
+enum waitlist_state {
+ WLS_PENDING,
+ WLS_REMOVED,
+ WLS_CANCELLED,
+ WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct nvhost_waitlist, refcount));
+}
+
+/**
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
+ struct list_head *queue)
+{
+ struct nvhost_waitlist *pos;
+ u32 thresh = waiter->thresh;
+
+ list_for_each_entry_reverse(pos, queue, list)
+ if ((s32)(pos->thresh - thresh) <= 0) {
+ list_add(&waiter->list, &pos->list);
+ return false;
+ }
+
+ list_add(&waiter->list, queue);
+ return true;
+}
+
+/**
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *dest;
+ struct nvhost_waitlist *waiter, *next, *prev;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ if ((s32)(waiter->thresh - sync) > 0)
+ break;
+
+ dest = completed + waiter->action;
+
+ /* consolidate submit cleanups */
+ if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
+ && !list_empty(dest)) {
+ prev = list_entry(dest->prev,
+ struct nvhost_waitlist, list);
+ if (prev->data == waiter->data) {
+ prev->count++;
+ dest = NULL;
+ }
+ }
+
+ /* PENDING->REMOVED or CANCELLED->HANDLED */
+ if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ } else {
+ list_move_tail(&waiter->list, dest);
+ }
+ }
+}
+
+void reset_threshold_interrupt(struct nvhost_intr *intr,
+ struct list_head *head,
+ unsigned int id)
+{
+ u32 thresh = list_first_entry(head,
+ struct nvhost_waitlist, list)->thresh;
+ BUG_ON(!(intr_op(intr).set_syncpt_threshold &&
+ intr_op(intr).enable_syncpt_intr));
+
+ intr_op(intr).set_syncpt_threshold(intr, id, thresh);
+ intr_op(intr).enable_syncpt_intr(intr, id);
+}
+
+
+static void action_submit_complete(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_channel *channel = waiter->data;
+ int nr_completed = waiter->count;
+
+ /* Add nr_completed to trace */
+ trace_nvhost_channel_submit_complete(channel->desc->name,
+ nr_completed);
+
+ nvhost_cdma_update(&channel->cdma);
+ nvhost_module_idle_mult(&channel->mod, nr_completed);
+}
+
+static void action_ctxsave(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_hwctx *hwctx = waiter->data;
+ struct nvhost_channel *channel = hwctx->channel;
+
+ if (channel->ctxhandler.save_service)
+ channel->ctxhandler.save_service(hwctx);
+ channel->ctxhandler.put(hwctx);
+}
+
+static void action_ctxrestore(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_hwctx *hwctx = waiter->data;
+ struct nvhost_channel *channel = hwctx->channel;
+
+ channel->ctxhandler.put(hwctx);
+}
+
+static void action_wakeup(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct nvhost_waitlist *waiter);
+
+static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
+ action_submit_complete,
+ action_ctxsave,
+ action_ctxrestore,
+ action_wakeup,
+ action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *head = completed;
+ int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
+ action_handler handler = action_handlers[i];
+ struct nvhost_waitlist *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ list_del(&waiter->list);
+ handler(waiter);
+ WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+}
+
+/**
+ * Remove & handle all waiters that have completed for the given syncpt
+ */
+static int process_wait_list(struct nvhost_intr *intr,
+ struct nvhost_intr_syncpt *syncpt,
+ u32 threshold)
+{
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT];
+ unsigned int i;
+ int empty;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
+ INIT_LIST_HEAD(completed + i);
+
+ spin_lock(&syncpt->lock);
+
+ remove_completed_waiters(&syncpt->wait_head, threshold, completed);
+
+ empty = list_empty(&syncpt->wait_head);
+ if (!empty)
+ reset_threshold_interrupt(intr, &syncpt->wait_head,
+ syncpt->id);
+
+ spin_unlock(&syncpt->lock);
+
+ run_handlers(completed);
+
+ return empty;
+}
+
+/*** host syncpt interrupt service functions ***/
+/**
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
+ struct nvhost_master *dev = intr_to_dev(intr);
+
+ (void)process_wait_list(intr, syncpt,
+ nvhost_syncpt_update_min(&dev->syncpt, id));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * free a syncpt's irq. syncpt interrupt should be disabled first.
+ */
+static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ if (syncpt->irq_requested) {
+ free_irq(syncpt->irq, syncpt);
+ syncpt->irq_requested = 0;
+ }
+}
+
+
+/*** host general interrupt service functions ***/
+
+
+/*** Main API ***/
+
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void *_waiter,
+ void **ref)
+{
+ struct nvhost_waitlist *waiter = _waiter;
+ struct nvhost_intr_syncpt *syncpt;
+ int queue_was_empty;
+ int err;
+
+ BUG_ON(waiter == NULL);
+
+ BUG_ON(!(intr_op(intr).set_syncpt_threshold &&
+ intr_op(intr).enable_syncpt_intr));
+
+ /* initialize a new waiter */
+ INIT_LIST_HEAD(&waiter->list);
+ kref_init(&waiter->refcount);
+ if (ref)
+ kref_get(&waiter->refcount);
+ waiter->thresh = thresh;
+ waiter->action = action;
+ atomic_set(&waiter->state, WLS_PENDING);
+ waiter->data = data;
+ waiter->count = 1;
+
+ BUG_ON(id >= intr_to_dev(intr)->syncpt.nb_pts);
+ syncpt = intr->syncpt + id;
+
+ spin_lock(&syncpt->lock);
+
+ /* lazily request irq for this sync point */
+ if (!syncpt->irq_requested) {
+ spin_unlock(&syncpt->lock);
+
+ mutex_lock(&intr->mutex);
+ BUG_ON(!(intr_op(intr).request_syncpt_irq));
+ err = intr_op(intr).request_syncpt_irq(syncpt);
+ mutex_unlock(&intr->mutex);
+
+ if (err) {
+ kfree(waiter);
+ return err;
+ }
+
+ spin_lock(&syncpt->lock);
+ }
+
+ queue_was_empty = list_empty(&syncpt->wait_head);
+
+ if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
+ /* added at head of list - new threshold value */
+ intr_op(intr).set_syncpt_threshold(intr, id, thresh);
+
+ /* added as first waiter - enable interrupt */
+ if (queue_was_empty)
+ intr_op(intr).enable_syncpt_intr(intr, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ if (ref)
+ *ref = waiter;
+ return 0;
+}
+
+void *nvhost_intr_alloc_waiter()
+{
+ return kzalloc(sizeof(struct nvhost_waitlist),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
+{
+ struct nvhost_waitlist *waiter = ref;
+
+ while (atomic_cmpxchg(&waiter->state,
+ WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
+ schedule();
+
+ kref_put(&waiter->refcount, waiter_release);
+}
+
+
+/*** Init & shutdown ***/
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+ struct nvhost_master *host =
+ container_of(intr, struct nvhost_master, intr);
+ u32 nb_pts = host->syncpt.nb_pts;
+
+ mutex_init(&intr->mutex);
+ intr->host_general_irq = irq_gen;
+ intr->host_general_irq_requested = false;
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < nb_pts;
+ ++id, ++syncpt) {
+ syncpt->intr = &host->intr;
+ syncpt->id = id;
+ syncpt->irq = irq_sync + id;
+ syncpt->irq_requested = 0;
+ spin_lock_init(&syncpt->lock);
+ INIT_LIST_HEAD(&syncpt->wait_head);
+ snprintf(syncpt->thresh_irq_name,
+ sizeof(syncpt->thresh_irq_name),
+ "host_sp_%02d", id);
+ }
+
+ return 0;
+}
+
+void nvhost_intr_deinit(struct nvhost_intr *intr)
+{
+ nvhost_intr_stop(intr);
+}
+
+void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
+{
+ BUG_ON(!(intr_op(intr).init_host_sync &&
+ intr_op(intr).set_host_clocks_per_usec &&
+ intr_op(intr).request_host_general_irq));
+
+ mutex_lock(&intr->mutex);
+
+ intr_op(intr).init_host_sync(intr);
+ intr_op(intr).set_host_clocks_per_usec(intr,
+ (hz + 1000000 - 1)/1000000);
+
+ intr_op(intr).request_host_general_irq(intr);
+
+ mutex_unlock(&intr->mutex);
+}
+
+void nvhost_intr_stop(struct nvhost_intr *intr)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+ u32 nb_pts = intr_to_dev(intr)->syncpt.nb_pts;
+
+ BUG_ON(!(intr_op(intr).disable_all_syncpt_intrs &&
+ intr_op(intr).free_host_general_irq));
+
+ mutex_lock(&intr->mutex);
+
+ intr_op(intr).disable_all_syncpt_intrs(intr);
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < nb_pts;
+ ++id, ++syncpt) {
+ struct nvhost_waitlist *waiter, *next;
+ list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
+ if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
+ == WLS_CANCELLED) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+
+ if (!list_empty(&syncpt->wait_head)) { /* output diagnostics */
+ printk(KERN_DEBUG "%s id=%d\n", __func__, id);
+ BUG_ON(1);
+ }
+
+ free_syncpt_irq(syncpt);
+ }
+
+ intr_op(intr).free_host_general_irq(intr);
+
+ mutex_unlock(&intr->mutex);
+}
diff --git a/drivers/video/tegra/host/nvhost_intr.h b/drivers/video/tegra/host/nvhost_intr.h
new file mode 100644
index 000000000000..95fd94c522ad
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.h
@@ -0,0 +1,123 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_INTR_H
+#define __NVHOST_INTR_H
+
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+
+struct nvhost_channel;
+
+enum nvhost_intr_action {
+ /**
+ * Perform cleanup after a submit has completed.
+ * 'data' points to a channel
+ */
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+ /**
+ * Save a HW context.
+ * 'data' points to a context
+ */
+ NVHOST_INTR_ACTION_CTXSAVE,
+
+ /**
+ * Restore a HW context.
+ * 'data' points to a context
+ */
+ NVHOST_INTR_ACTION_CTXRESTORE,
+
+ /**
+ * Wake up a task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP,
+
+ /**
+ * Wake up a interruptible task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+ NVHOST_INTR_ACTION_COUNT
+};
+
+struct nvhost_intr;
+
+struct nvhost_intr_syncpt {
+ struct nvhost_intr *intr;
+ u8 id;
+ u8 irq_requested;
+ u16 irq;
+ spinlock_t lock;
+ struct list_head wait_head;
+ char thresh_irq_name[12];
+};
+
+struct nvhost_intr {
+ struct nvhost_intr_syncpt *syncpt;
+ struct mutex mutex;
+ int host_general_irq;
+ bool host_general_irq_requested;
+};
+#define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
+#define intr_op(intr) (intr_to_dev(intr)->op.intr)
+#define intr_syncpt_to_intr(is) (is->intr)
+
+/**
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @waiter waiter allocated with nvhost_intr_alloc_waiter - assumes ownership
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void *waiter,
+ void **ref);
+
+/**
+ * Allocate a waiter.
+ */
+void *nvhost_intr_alloc_waiter(void);
+
+/**
+ * Unreference an action submitted to nvhost_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from nvhost_intr_add_action()
+ */
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref);
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync);
+void nvhost_intr_deinit(struct nvhost_intr *intr);
+void nvhost_intr_start(struct nvhost_intr *intr, u32 hz);
+void nvhost_intr_stop(struct nvhost_intr *intr);
+
+irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id);
+#endif
diff --git a/drivers/video/tegra/host/nvhost_job.c b/drivers/video/tegra/host/nvhost_job.c
new file mode 100644
index 000000000000..c7e700fce264
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_job.c
@@ -0,0 +1,321 @@
+/*
+ * drivers/video/tegra/host/nvhost_job.c
+ *
+ * Tegra Graphics Host Job
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/err.h>
+#include <mach/nvmap.h>
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "dev.h"
+
+/* Magic to use to fill freed handle slots */
+#define BAD_MAGIC 0xdeadbeef
+
+static int job_size(struct nvhost_submit_hdr_ext *hdr)
+{
+ int num_pins = hdr ? (hdr->num_relocs + hdr->num_cmdbufs)*2 : 0;
+ int num_waitchks = hdr ? hdr->num_waitchks : 0;
+
+ return sizeof(struct nvhost_job)
+ + num_pins * sizeof(struct nvmap_pinarray_elem)
+ + num_pins * sizeof(struct nvmap_handle *)
+ + num_waitchks * sizeof(struct nvhost_waitchk);
+}
+
+static int gather_size(int num_cmdbufs)
+{
+ return num_cmdbufs * sizeof(struct nvhost_channel_gather);
+}
+
+static void free_gathers(struct nvhost_job *job)
+{
+ if (job->gathers) {
+ nvmap_munmap(job->gather_mem, job->gathers);
+ job->gathers = NULL;
+ }
+ if (job->gather_mem) {
+ nvmap_free(job->nvmap, job->gather_mem);
+ job->gather_mem = NULL;
+ }
+}
+
+static int alloc_gathers(struct nvhost_job *job,
+ int num_cmdbufs)
+{
+ int err = 0;
+
+ job->gather_mem = NULL;
+ job->gathers = NULL;
+ job->gather_mem_size = 0;
+
+ if (num_cmdbufs) {
+ /* Allocate memory */
+ job->gather_mem = nvmap_alloc(job->nvmap,
+ gather_size(num_cmdbufs),
+ 32, NVMAP_HANDLE_CACHEABLE);
+ if (IS_ERR_OR_NULL(job->gather_mem)) {
+ err = PTR_ERR(job->gather_mem);
+ job->gather_mem = NULL;
+ goto error;
+ }
+ job->gather_mem_size = gather_size(num_cmdbufs);
+
+ /* Map memory to kernel */
+ job->gathers = nvmap_mmap(job->gather_mem);
+ if (IS_ERR_OR_NULL(job->gathers)) {
+ err = PTR_ERR(job->gathers);
+ job->gathers = NULL;
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ free_gathers(job);
+ return err;
+}
+
+static int realloc_gathers(struct nvhost_job *oldjob,
+ struct nvhost_job *newjob,
+ int num_cmdbufs)
+{
+ int err = 0;
+
+ /* Check if we can reuse gather buffer */
+ if (oldjob->gather_mem_size < gather_size(num_cmdbufs)
+ || oldjob->nvmap != newjob->nvmap) {
+ free_gathers(oldjob);
+ err = alloc_gathers(newjob, num_cmdbufs);
+ } else {
+ newjob->gather_mem = oldjob->gather_mem;
+ newjob->gathers = oldjob->gathers;
+ newjob->gather_mem_size = oldjob->gather_mem_size;
+
+ oldjob->gather_mem = NULL;
+ oldjob->gathers = NULL;
+ oldjob->gather_mem_size = 0;
+ }
+ return err;
+}
+
+static void init_fields(struct nvhost_job *job,
+ struct nvhost_submit_hdr_ext *hdr,
+ int priority, int clientid)
+{
+ int num_pins = hdr ? (hdr->num_relocs + hdr->num_cmdbufs)*2 : 0;
+ int num_waitchks = hdr ? hdr->num_waitchks : 0;
+ void *mem = job;
+
+ /* First init state to zero */
+ job->num_gathers = 0;
+ job->num_pins = 0;
+ job->num_unpins = 0;
+ job->num_waitchk = 0;
+ job->waitchk_mask = 0;
+ job->syncpt_id = 0;
+ job->syncpt_incrs = 0;
+ job->syncpt_end = 0;
+ job->priority = priority;
+ job->clientid = clientid;
+ job->null_kickoff = false;
+ job->first_get = 0;
+ job->num_slots = 0;
+
+ /* Redistribute memory to the structs */
+ mem += sizeof(struct nvhost_job);
+ if (num_pins) {
+ job->pinarray = mem;
+ mem += num_pins * sizeof(struct nvmap_pinarray_elem);
+ job->unpins = mem;
+ mem += num_pins * sizeof(struct nvmap_handle *);
+ } else {
+ job->pinarray = NULL;
+ job->unpins = NULL;
+ }
+
+ job->waitchk = num_waitchks ? mem : NULL;
+
+ /* Copy information from header */
+ if (hdr) {
+ job->waitchk_mask = hdr->waitchk_mask;
+ job->syncpt_id = hdr->syncpt_id;
+ job->syncpt_incrs = hdr->syncpt_incrs;
+ }
+}
+
+struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
+ struct nvhost_hwctx *hwctx,
+ struct nvhost_submit_hdr_ext *hdr,
+ struct nvmap_client *nvmap,
+ int priority,
+ int clientid)
+{
+ struct nvhost_job *job = NULL;
+ int num_cmdbufs = hdr ? hdr->num_cmdbufs : 0;
+ int err = 0;
+
+ job = kzalloc(job_size(hdr), GFP_KERNEL);
+ if (!job)
+ goto error;
+
+ kref_init(&job->ref);
+ job->ch = ch;
+ job->hwctx = hwctx;
+ job->nvmap = nvmap ? nvmap_client_get(nvmap) : NULL;
+
+ err = alloc_gathers(job, num_cmdbufs);
+ if (err)
+ goto error;
+
+ init_fields(job, hdr, priority, clientid);
+
+ return job;
+
+error:
+ if (job)
+ nvhost_job_put(job);
+ return NULL;
+}
+
+struct nvhost_job *nvhost_job_realloc(
+ struct nvhost_job *oldjob,
+ struct nvhost_submit_hdr_ext *hdr,
+ struct nvmap_client *nvmap,
+ int priority, int clientid)
+{
+ struct nvhost_job *newjob = NULL;
+ int num_cmdbufs = hdr ? hdr->num_cmdbufs : 0;
+ int err = 0;
+
+ newjob = kzalloc(job_size(hdr), GFP_KERNEL);
+ if (!newjob)
+ goto error;
+ kref_init(&newjob->ref);
+ newjob->ch = oldjob->ch;
+ newjob->hwctx = oldjob->hwctx;
+ newjob->timeout = oldjob->timeout;
+ newjob->nvmap = nvmap ? nvmap_client_get(nvmap) : NULL;
+
+ err = realloc_gathers(oldjob, newjob, num_cmdbufs);
+ if (err)
+ goto error;
+
+ nvhost_job_put(oldjob);
+
+ init_fields(newjob, hdr, priority, clientid);
+
+ return newjob;
+
+error:
+ if (newjob)
+ nvhost_job_put(newjob);
+ if (oldjob)
+ nvhost_job_put(oldjob);
+ return NULL;
+}
+
+void nvhost_job_get(struct nvhost_job *job)
+{
+ kref_get(&job->ref);
+}
+
+static void job_free(struct kref *ref)
+{
+ struct nvhost_job *job = container_of(ref, struct nvhost_job, ref);
+
+ if (job->gathers)
+ nvmap_munmap(job->gather_mem, job->gathers);
+ if (job->gather_mem)
+ nvmap_free(job->nvmap, job->gather_mem);
+ if (job->nvmap)
+ nvmap_client_put(job->nvmap);
+ kfree(job);
+}
+
+void nvhost_job_put(struct nvhost_job *job)
+{
+ kref_put(&job->ref, job_free);
+}
+
+void nvhost_job_add_gather(struct nvhost_job *job,
+ u32 mem_id, u32 words, u32 offset)
+{
+ struct nvmap_pinarray_elem *pin;
+ struct nvhost_channel_gather *cur_gather =
+ &job->gathers[job->num_gathers];
+
+ pin = &job->pinarray[job->num_pins++];
+ pin->patch_mem = (u32)nvmap_ref_to_handle(job->gather_mem);
+ pin->patch_offset = (void *)&(cur_gather->mem) - (void *)job->gathers;
+ pin->pin_mem = nvmap_convert_handle_u2k(mem_id);
+ pin->pin_offset = offset;
+ cur_gather->words = words;
+ cur_gather->mem_id = mem_id;
+ cur_gather->offset = offset;
+ job->num_gathers += 1;
+}
+
+int nvhost_job_pin(struct nvhost_job *job)
+{
+ int err = 0;
+
+ /* pin mem handles and patch physical addresses */
+ job->num_unpins = nvmap_pin_array(job->nvmap,
+ nvmap_ref_to_handle(job->gather_mem),
+ job->pinarray, job->num_pins,
+ job->unpins);
+ if (job->num_unpins < 0)
+ err = job->num_unpins;
+
+ return err;
+}
+
+void nvhost_job_unpin(struct nvhost_job *job)
+{
+ nvmap_unpin_handles(job->nvmap, job->unpins,
+ job->num_unpins);
+ memset(job->unpins, BAD_MAGIC,
+ job->num_unpins * sizeof(struct nvmap_handle *));
+}
+
+/**
+ * Debug routine used to dump job entries
+ */
+void nvhost_job_dump(struct device *dev, struct nvhost_job *job)
+{
+ dev_dbg(dev, " SYNCPT_ID %d\n",
+ job->syncpt_id);
+ dev_dbg(dev, " SYNCPT_VAL %d\n",
+ job->syncpt_end);
+ dev_dbg(dev, " FIRST_GET 0x%x\n",
+ job->first_get);
+ dev_dbg(dev, " TIMEOUT %d\n",
+ job->timeout);
+ dev_dbg(dev, " CTX 0x%p\n",
+ job->hwctx);
+ dev_dbg(dev, " NUM_SLOTS %d\n",
+ job->num_slots);
+ dev_dbg(dev, " NUM_HANDLES %d\n",
+ job->num_unpins);
+}
diff --git a/drivers/video/tegra/host/nvhost_job.h b/drivers/video/tegra/host/nvhost_job.h
new file mode 100644
index 000000000000..d00d60f2164a
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_job.h
@@ -0,0 +1,140 @@
+/*
+ * drivers/video/tegra/host/nvhost_job.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_JOB_H
+#define __NVHOST_JOB_H
+
+#include <linux/nvhost_ioctl.h>
+
+struct nvhost_channel;
+struct nvhost_hwctx;
+struct nvmap_client;
+struct nvhost_waitchk;
+struct nvmap_handle;
+
+/*
+ * Each submit is tracked as a nvhost_job.
+ */
+struct nvhost_job {
+ /* When refcount goes to zero, job can be freed */
+ struct kref ref;
+
+ /* Channel where job is submitted to */
+ struct nvhost_channel *ch;
+
+ /* Hardware context valid for this client */
+ struct nvhost_hwctx *hwctx;
+ int clientid;
+
+ /* Nvmap to be used for pinning & unpinning memory */
+ struct nvmap_client *nvmap;
+
+ /* Gathers and their memory */
+ struct nvmap_handle_ref *gather_mem;
+ struct nvhost_channel_gather *gathers;
+ int num_gathers;
+ int gather_mem_size;
+
+ /* Wait checks to be processed at submit time */
+ struct nvhost_waitchk *waitchk;
+ int num_waitchk;
+ u32 waitchk_mask;
+
+ /* Array of handles to be pinned & unpinned */
+ struct nvmap_pinarray_elem *pinarray;
+ int num_pins;
+ struct nvmap_handle **unpins;
+ int num_unpins;
+
+ /* Sync point id, number of increments and end related to the submit */
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 syncpt_end;
+
+ /* Priority of this submit. */
+ int priority;
+
+ /* Maximum time to wait for this job */
+ int timeout;
+
+ /* Null kickoff prevents submit from being sent to hardware */
+ bool null_kickoff;
+
+ /* Index and number of slots used in the push buffer */
+ int first_get;
+ int num_slots;
+};
+
+/*
+ * Allocate memory for a job. Just enough memory will be allocated to
+ * accomodate the submit announced in submit header.
+ */
+struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
+ struct nvhost_hwctx *hwctx,
+ struct nvhost_submit_hdr_ext *hdr,
+ struct nvmap_client *nvmap,
+ int priority, int clientid);
+
+/*
+ * Allocate memory for a job. Just enough memory will be allocated to
+ * accomodate the submit announced in submit header. Gather memory from
+ * oldjob will be reused, and nvhost_job_put() will be called to it.
+ */
+struct nvhost_job *nvhost_job_realloc(struct nvhost_job *oldjob,
+ struct nvhost_submit_hdr_ext *hdr,
+ struct nvmap_client *nvmap,
+ int priority, int clientid);
+
+/*
+ * Add a gather to a job.
+ */
+void nvhost_job_add_gather(struct nvhost_job *job,
+ u32 mem_id, u32 words, u32 offset);
+
+/*
+ * Increment reference going to nvhost_job.
+ */
+void nvhost_job_get(struct nvhost_job *job);
+
+/*
+ * Decrement reference job, free if goes to zero.
+ */
+void nvhost_job_put(struct nvhost_job *job);
+
+/*
+ * Pin memory related to job. This handles relocation of addresses to the
+ * host1x address space. Handles both the gather memory and any other memory
+ * referred to from the gather buffers.
+ */
+int nvhost_job_pin(struct nvhost_job *job);
+
+/*
+ * Unpin memory related to job.
+ */
+void nvhost_job_unpin(struct nvhost_job *job);
+
+/*
+ * Dump contents of job to debug output.
+ */
+void nvhost_job_dump(struct device *dev, struct nvhost_job *job);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_syncpt.c b/drivers/video/tegra/host/nvhost_syncpt.c
new file mode 100644
index 000000000000..0fa6d3e1ce20
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.c
@@ -0,0 +1,247 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include "nvhost_syncpt.h"
+#include "dev.h"
+
+#define MAX_STUCK_CHECK_COUNT 15
+
+/**
+ * Resets syncpoint and waitbase values to sw shadows
+ */
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ BUG_ON(!(syncpt_op(sp).reset && syncpt_op(sp).reset_wait_base));
+
+ for (i = 0; i < sp->nb_pts; i++)
+ syncpt_op(sp).reset(sp, i);
+ for (i = 0; i < sp->nb_bases; i++)
+ syncpt_op(sp).reset_wait_base(sp, i);
+ wmb();
+}
+
+/**
+ * Updates sw shadow state for client managed registers
+ */
+void nvhost_syncpt_save(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ BUG_ON(!(syncpt_op(sp).update_min && syncpt_op(sp).read_wait_base));
+
+ for (i = 0; i < sp->nb_pts; i++) {
+ if (client_managed(i))
+ syncpt_op(sp).update_min(sp, i);
+ else
+ BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
+ }
+
+ for (i = 0; i < sp->nb_bases; i++)
+ syncpt_op(sp).read_wait_base(sp, i);
+}
+
+/**
+ * Updates the last value read from hardware.
+ */
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+ BUG_ON(!syncpt_op(sp).update_min);
+
+ return syncpt_op(sp).update_min(sp, id);
+}
+
+/**
+ * Get the current syncpoint value
+ */
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+ BUG_ON(!syncpt_op(sp).update_min);
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ val = syncpt_op(sp).update_min(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return val;
+}
+
+/**
+ * Get the current syncpoint base
+ */
+u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+ BUG_ON(!syncpt_op(sp).read_wait_base);
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ syncpt_op(sp).read_wait_base(sp, id);
+ val = sp->base_val[id];
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return val;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ BUG_ON(!syncpt_op(sp).cpu_incr);
+ syncpt_op(sp).cpu_incr(sp, id);
+}
+
+/**
+ * Increment syncpoint value from cpu, updating cache
+ */
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ nvhost_syncpt_incr_max(sp, id, 1);
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ nvhost_syncpt_cpu_incr(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+}
+
+/**
+ * Main entrypoint for syncpoint value waits.
+ */
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
+ u32 thresh, u32 timeout, u32 *value)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ void *waiter;
+ int err = 0, check_count = 0, low_timeout = 0;
+
+ if (value)
+ *value = 0;
+
+ BUG_ON(!syncpt_op(sp).update_min);
+ if (!nvhost_syncpt_check_max(sp, id, thresh)) {
+ dev_warn(&syncpt_to_dev(sp)->pdev->dev,
+ "wait %d (%s) for (%d) wouldn't be met (max %d)\n",
+ id, syncpt_op(sp).name(sp, id), thresh,
+ nvhost_syncpt_read_max(sp, id));
+ nvhost_debug_dump(syncpt_to_dev(sp));
+ return -EINVAL;
+ }
+
+ /* first check cache */
+ if (nvhost_syncpt_min_cmp(sp, id, thresh)) {
+ if (value)
+ *value = nvhost_syncpt_read_min(sp, id);
+ return 0;
+ }
+
+ /* keep host alive */
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+
+ if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
+ /* try to read from register */
+ u32 val = syncpt_op(sp).update_min(sp, id);
+ if ((s32)(val - thresh) >= 0) {
+ if (value)
+ *value = val;
+ goto done;
+ }
+ }
+
+ if (!timeout) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* schedule a wakeup when the syncpoint value is reached */
+ waiter = nvhost_intr_alloc_waiter();
+ if (!waiter) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
+ waiter,
+ &ref);
+ if (err)
+ goto done;
+
+ err = -EAGAIN;
+ /* wait for the syncpoint, or timeout, or signal */
+ while (timeout) {
+ u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
+ int remain = wait_event_interruptible_timeout(wq,
+ nvhost_syncpt_min_cmp(sp, id, thresh),
+ check);
+ if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
+ if (value)
+ *value = nvhost_syncpt_read_min(sp, id);
+ err = 0;
+ break;
+ }
+ if (remain < 0) {
+ err = remain;
+ break;
+ }
+ if (timeout != NVHOST_NO_TIMEOUT) {
+ if (timeout < SYNCPT_CHECK_PERIOD) {
+ /* Caller-specified timeout may be impractically low */
+ low_timeout = timeout;
+ }
+ timeout -= check;
+ }
+ if (timeout) {
+ dev_warn(&syncpt_to_dev(sp)->pdev->dev,
+ "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
+ current->comm, id, syncpt_op(sp).name(sp, id),
+ thresh, timeout);
+ syncpt_op(sp).debug(sp);
+ if (check_count > MAX_STUCK_CHECK_COUNT) {
+ if (low_timeout) {
+ dev_warn(&syncpt_to_dev(sp)->pdev->dev,
+ "is timeout %d too low?\n",
+ low_timeout);
+ }
+ nvhost_debug_dump(syncpt_to_dev(sp));
+ BUG();
+ }
+ check_count++;
+ }
+ }
+ nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);
+
+done:
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return err;
+}
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
+{
+ syncpt_op(sp).debug(sp);
+}
+
+/* check for old WAITs to be removed (avoiding a wrap) */
+int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp,
+ struct nvmap_client *nvmap,
+ u32 waitchk_mask,
+ struct nvhost_waitchk *wait,
+ int num_waitchk)
+{
+ return syncpt_op(sp).wait_check(sp, nvmap,
+ waitchk_mask, wait, num_waitchk);
+}
diff --git a/drivers/video/tegra/host/nvhost_syncpt.h b/drivers/video/tegra/host/nvhost_syncpt.h
new file mode 100644
index 000000000000..0dfb11775980
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.h
@@ -0,0 +1,162 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_SYNCPT_H
+#define __NVHOST_SYNCPT_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/nvhost.h>
+#include <mach/nvmap.h>
+#include <linux/atomic.h>
+
+struct nvhost_syncpt;
+struct nvhost_waitchk;
+
+/* host managed and invalid syncpt id */
+#define NVSYNCPT_GRAPHICS_HOST (0)
+#define NVSYNCPT_INVALID (-1)
+
+struct nvhost_syncpt {
+ atomic_t *min_val;
+ atomic_t *max_val;
+ u32 *base_val;
+ u32 nb_pts;
+ u32 nb_bases;
+ u32 client_managed;
+};
+
+int nvhost_syncpt_init(struct nvhost_syncpt *);
+#define client_managed(id) (BIT(id) & sp->client_managed)
+#define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt)
+#define syncpt_op(sp) (syncpt_to_dev(sp)->op.syncpt)
+#define SYNCPT_CHECK_PERIOD (2*HZ)
+
+
+/**
+ * Updates the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp,
+ u32 id, u32 incrs)
+{
+ return (u32)atomic_add_return(incrs, &sp->max_val[id]);
+}
+
+/**
+ * Updated the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp,
+ u32 id, u32 val)
+{
+ atomic_set(&sp->max_val[id], val);
+ smp_wmb();
+ return val;
+}
+
+static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val[id]);
+}
+
+static inline u32 nvhost_syncpt_read_min(struct nvhost_syncpt *sp, u32 id)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->min_val[id]);
+}
+
+static inline bool nvhost_syncpt_check_max(struct nvhost_syncpt *sp,
+ u32 id, u32 real)
+{
+ u32 max;
+ if (client_managed(id))
+ return true;
+ max = nvhost_syncpt_read_max(sp, id);
+ return (s32)(max - real) >= 0;
+}
+
+/**
+ * Returns true if syncpoint has reached threshold
+ */
+static inline bool nvhost_syncpt_min_cmp(struct nvhost_syncpt *sp,
+ u32 id, u32 thresh)
+{
+ u32 cur;
+ smp_rmb();
+ cur = (u32)atomic_read(&sp->min_val[id]);
+ return ((s32)(cur - thresh) >= 0);
+}
+
+/**
+ * Returns true if syncpoint min == max
+ */
+static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id)
+{
+ int min, max;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ max = atomic_read(&sp->max_val[id]);
+ return (min == max);
+}
+
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id);
+
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_save(struct nvhost_syncpt *sp);
+
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp);
+
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id);
+u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id);
+
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh,
+ u32 timeout, u32 *value);
+
+static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh)
+{
+ return nvhost_syncpt_wait_timeout(sp, id, thresh,
+ MAX_SCHEDULE_TIMEOUT, NULL);
+}
+
+/*
+ * Check driver supplied waitchk structs for syncpt thresholds
+ * that have already been satisfied and NULL the comparison (to
+ * avoid a wrap condition in the HW).
+ *
+ * @param: sp - global shadowed syncpt struct
+ * @param: nvmap - needed to access command buffer
+ * @param: mask - bit mask of syncpt IDs referenced in WAITs
+ * @param: wait - start of filled in array of waitchk structs
+ * @param: waitend - end ptr (one beyond last valid waitchk)
+ */
+int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp,
+ struct nvmap_client *nvmap,
+ u32 mask,
+ struct nvhost_waitchk *wait,
+ int num_waitchk);
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp);
+
+#endif
diff --git a/drivers/video/tegra/host/t20/Makefile b/drivers/video/tegra/host/t20/Makefile
new file mode 100644
index 000000000000..c2ade9bf925b
--- /dev/null
+++ b/drivers/video/tegra/host/t20/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-t20-objs = \
+ t20.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t20.o
diff --git a/drivers/video/tegra/host/t20/t20.c b/drivers/video/tegra/host/t20/t20.c
new file mode 100644
index 000000000000..510e0eb5f2df
--- /dev/null
+++ b/drivers/video/tegra/host/t20/t20.c
@@ -0,0 +1,221 @@
+/*
+ * drivers/video/tegra/host/t20/t20.c
+ *
+ * Tegra Graphics Init for T20 Architecture Chips
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <mach/powergate.h>
+#include "dev.h"
+#include "t20.h"
+#include "host1x/host1x_channel.h"
+#include "host1x/host1x_syncpt.h"
+#include "host1x/host1x_hardware.h"
+#include "host1x/host1x_cdma.h"
+#include "gr3d/gr3d.h"
+#include "gr3d/gr3d_t20.h"
+#include "mpe/mpe.h"
+
+#define NVMODMUTEX_2D_FULL (1)
+#define NVMODMUTEX_2D_SIMPLE (2)
+#define NVMODMUTEX_2D_SB_A (3)
+#define NVMODMUTEX_2D_SB_B (4)
+#define NVMODMUTEX_3D (5)
+#define NVMODMUTEX_DISPLAYA (6)
+#define NVMODMUTEX_DISPLAYB (7)
+#define NVMODMUTEX_VI (8)
+#define NVMODMUTEX_DSI (9)
+
+#define NVHOST_NUMCHANNELS (NV_HOST1X_CHANNELS - 1)
+
+static struct nvhost_device devices[] = {
+ {.name = "gr3d", .id = -1 },
+ {.name = "gr2d", .id = -1 },
+ {.name = "isp", .id = -1 },
+ {.name = "vi", .id = -1 },
+ {.name = "mpe", .id = -1 },
+ {.name = "dsi", .id = -1 }
+};
+
+const struct nvhost_channeldesc nvhost_t20_channelmap[] = {
+{
+ /* channel 0 */
+ .name = "display",
+ .syncpts = BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) |
+ BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) |
+ BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) |
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+ .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+ .module = {
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+},
+{
+ /* channel 1 */
+ .name = "gr3d",
+ .syncpts = BIT(NVSYNCPT_3D),
+ .waitbases = BIT(NVWAITBASE_3D),
+ .modulemutexes = BIT(NVMODMUTEX_3D),
+ .class = NV_GRAPHICS_3D_CLASS_ID,
+ .module = {
+ .prepare_poweroff = nvhost_gr3d_prepare_power_off,
+ .clocks = {{"gr3d", UINT_MAX}, {"emc", UINT_MAX}, {} },
+ .powergate_ids = {TEGRA_POWERGATE_3D, -1},
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+},
+{
+ /* channel 2 */
+ .name = "gr2d",
+ .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+ .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+ .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+ BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+ .module = {
+ .clocks = {{"gr2d", UINT_MAX} ,
+ {"epp", UINT_MAX} ,
+ {"emc", UINT_MAX} },
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ .clockgate_delay = 0,
+ }
+},
+{
+ /* channel 3 */
+ .name = "isp",
+ .syncpts = 0,
+ .module = {
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+},
+{
+ /* channel 4 */
+ .name = "vi",
+ .syncpts = BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) |
+ BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+ BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4),
+ .modulemutexes = BIT(NVMODMUTEX_VI),
+ .exclusive = true,
+ .module = {
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ }
+},
+{
+ /* channel 5 */
+ .name = "mpe",
+ .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+ BIT(NVSYNCPT_MPE_WR_SAFE),
+ .waitbases = BIT(NVWAITBASE_MPE),
+ .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ .waitbasesync = true,
+ .keepalive = true,
+ .module = {
+ .prepare_poweroff = nvhost_mpe_prepare_power_off,
+ .clocks = {{"mpe", UINT_MAX}, {"emc", UINT_MAX}, {} },
+ .powergate_ids = {TEGRA_POWERGATE_MPE, -1},
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+},
+{
+ /* channel 6 */
+ .name = "dsi",
+ .syncpts = BIT(NVSYNCPT_DSI),
+ .modulemutexes = BIT(NVMODMUTEX_DSI),
+ .module = {
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+} };
+
+static inline void __iomem *t20_channel_aperture(void __iomem *p, int ndx)
+{
+ p += NV_HOST1X_CHANNEL0_BASE;
+ p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
+ return p;
+}
+
+static inline int t20_nvhost_hwctx_handler_init(
+ struct nvhost_hwctx_handler *h,
+ const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return nvhost_gr3d_t20_ctxhandler_init(h);
+ else if (strcmp(module, "mpe") == 0)
+ return nvhost_mpe_ctxhandler_init(h);
+ return 0;
+}
+
+static int t20_channel_init(struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index)
+{
+ ch->dev = dev;
+ ch->chid = index;
+ ch->desc = nvhost_t20_channelmap + index;
+ mutex_init(&ch->reflock);
+ mutex_init(&ch->submitlock);
+
+ ch->aperture = t20_channel_aperture(dev->aperture, index);
+
+ return t20_nvhost_hwctx_handler_init(&ch->ctxhandler, ch->desc->name);
+}
+
+int nvhost_init_t20_channel_support(struct nvhost_master *host)
+{
+ host->nb_mlocks = NV_HOST1X_SYNC_MLOCK_NUM;
+ host->nb_channels = NVHOST_NUMCHANNELS;
+
+ host->op.channel.init = t20_channel_init;
+ host->op.channel.submit = host1x_channel_submit;
+ host->op.channel.read3dreg = host1x_channel_read_3d_reg;
+
+ return 0;
+}
+
+int nvhost_init_t20_support(struct nvhost_master *host)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(devices); i++)
+ nvhost_device_register(&devices[i]);
+
+ /* don't worry about cleaning up on failure... "remove" does it. */
+ err = nvhost_init_t20_channel_support(host);
+ if (err)
+ return err;
+ err = host1x_init_cdma_support(host);
+ if (err)
+ return err;
+ err = nvhost_init_t20_debug_support(host);
+ if (err)
+ return err;
+ err = host1x_init_syncpt_support(host);
+ if (err)
+ return err;
+ err = nvhost_init_t20_intr_support(host);
+ if (err)
+ return err;
+ err = nvhost_init_t20_cpuaccess_support(host);
+ if (err)
+ return err;
+ return 0;
+}
diff --git a/drivers/video/tegra/host/t20/t20.h b/drivers/video/tegra/host/t20/t20.h
new file mode 100644
index 000000000000..c7eac39ba089
--- /dev/null
+++ b/drivers/video/tegra/host/t20/t20.h
@@ -0,0 +1,35 @@
+/*
+ * drivers/video/tegra/host/t20/t20.h
+ *
+ * Tegra Graphics Chip support for T20
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _NVHOST_T20_H_
+#define _NVHOST_T20_H_
+
+struct nvhost_master;
+struct nvhost_module;
+
+int nvhost_init_t20_channel_support(struct nvhost_master *);
+int nvhost_init_t20_debug_support(struct nvhost_master *);
+int nvhost_init_t20_syncpt_support(struct nvhost_master *);
+int nvhost_init_t20_intr_support(struct nvhost_master *);
+int nvhost_init_t20_cpuaccess_support(struct nvhost_master *);
+int nvhost_t20_save_context(struct nvhost_module *mod, u32 syncpt_id);
+
+#endif /* _NVHOST_T20_H_ */
diff --git a/drivers/video/tegra/host/t30/Makefile b/drivers/video/tegra/host/t30/Makefile
new file mode 100644
index 000000000000..b343eb4fc7cc
--- /dev/null
+++ b/drivers/video/tegra/host/t30/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+
+nvhost-t30-objs = \
+ t30.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t30.o
diff --git a/drivers/video/tegra/host/t30/t30.c b/drivers/video/tegra/host/t30/t30.c
new file mode 100644
index 000000000000..425b352a66a8
--- /dev/null
+++ b/drivers/video/tegra/host/t30/t30.c
@@ -0,0 +1,244 @@
+/*
+ * drivers/video/tegra/host/t30/t30.c
+ *
+ * Tegra Graphics Init for T30 Architecture Chips
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/mutex.h>
+#include <mach/powergate.h>
+#include "dev.h"
+#include "t30.h"
+#include "gr3d/gr3d.h"
+#include "mpe/mpe.h"
+#include "gr3d/gr3d_t30.h"
+#include "gr3d/scale3d.h"
+#include "host1x/host1x_hardware.h"
+#include "host1x/host1x_cdma.h"
+#include "host1x/host1x_syncpt.h"
+#include "gr3d/scale3d.h"
+#include "../chip_support.h"
+
+static struct nvhost_device devices[] = {
+ {.name = "gr3d", .id = -1 },
+ {.name = "gr2d", .id = -1 },
+ {.name = "isp", .id = -1 },
+ {.name = "vi", .id = -1 },
+ {.name = "mpe", .id = -1 },
+ {.name = "dsi", .id = -1 },
+};
+
+#define NVMODMUTEX_2D_FULL (1)
+#define NVMODMUTEX_2D_SIMPLE (2)
+#define NVMODMUTEX_2D_SB_A (3)
+#define NVMODMUTEX_2D_SB_B (4)
+#define NVMODMUTEX_3D (5)
+#define NVMODMUTEX_DISPLAYA (6)
+#define NVMODMUTEX_DISPLAYB (7)
+#define NVMODMUTEX_VI (8)
+#define NVMODMUTEX_DSI (9)
+
+#ifndef TEGRA_POWERGATE_3D1
+#define TEGRA_POWERGATE_3D1 -1
+#endif
+
+const struct nvhost_channeldesc nvhost_t30_channelmap[] = {
+{
+ /* channel 0 */
+ .name = "display",
+ .syncpts = BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) |
+ BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) |
+ BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) |
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+ .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+ .module = {
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+},
+{
+ /* channel 1 */
+ .name = "gr3d",
+ .syncpts = BIT(NVSYNCPT_3D),
+ .waitbases = BIT(NVWAITBASE_3D),
+ .modulemutexes = BIT(NVMODMUTEX_3D),
+ .class = NV_GRAPHICS_3D_CLASS_ID,
+ .module = {
+ .prepare_poweroff = nvhost_gr3d_prepare_power_off,
+ .busy = nvhost_scale3d_notify_busy,
+ .idle = nvhost_scale3d_notify_idle,
+ .init = nvhost_scale3d_init,
+ .deinit = nvhost_scale3d_deinit,
+ .suspend = nvhost_scale3d_suspend,
+ .clocks = {{"gr3d", UINT_MAX},
+ {"gr3d2", UINT_MAX},
+ {"emc", UINT_MAX} },
+ .powergate_ids = {TEGRA_POWERGATE_3D,
+ TEGRA_POWERGATE_3D1},
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .can_powergate = true,
+ .powergate_delay = 100,
+ },
+},
+{
+ /* channel 2 */
+ .name = "gr2d",
+ .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+ .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+ .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+ BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+ .module = {
+ .clocks = {{"gr2d", 0},
+ {"epp", 0},
+ {"emc", 300000000} },
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ .clockgate_delay = 0,
+ },
+},
+{
+ /* channel 3 */
+ .name = "isp",
+ .syncpts = 0,
+ .module = {
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+},
+{
+ /* channel 4 */
+ .name = "vi",
+ .syncpts = BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) |
+ BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+ BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4),
+ .modulemutexes = BIT(NVMODMUTEX_VI),
+ .exclusive = true,
+ .module = {
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+},
+{
+ /* channel 5 */
+ .name = "mpe",
+ .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+ BIT(NVSYNCPT_MPE_WR_SAFE),
+ .waitbases = BIT(NVWAITBASE_MPE),
+ .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ .waitbasesync = true,
+ .keepalive = true,
+ .module = {
+ .prepare_poweroff = nvhost_mpe_prepare_power_off,
+ .clocks = {{"mpe", UINT_MAX}, {"emc", UINT_MAX}, {} },
+ .powergate_ids = {TEGRA_POWERGATE_MPE, -1},
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ .can_powergate = true,
+ .powergate_delay = 100,
+ },
+},
+{
+ /* channel 6 */
+ .name = "dsi",
+ .syncpts = BIT(NVSYNCPT_DSI),
+ .modulemutexes = BIT(NVMODMUTEX_DSI),
+ .module = {
+ NVHOST_MODULE_NO_POWERGATE_IDS,
+ NVHOST_DEFAULT_CLOCKGATE_DELAY,
+ },
+} };
+
+#define NVHOST_CHANNEL_BASE 0
+
+static inline int t30_nvhost_hwctx_handler_init(
+ struct nvhost_hwctx_handler *h,
+ const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return nvhost_gr3d_t30_ctxhandler_init(h);
+ else if (strcmp(module, "mpe") == 0)
+ return nvhost_mpe_ctxhandler_init(h);
+
+ return 0;
+}
+
+static inline void __iomem *t30_channel_aperture(void __iomem *p, int ndx)
+{
+ ndx += NVHOST_CHANNEL_BASE;
+ p += NV_HOST1X_CHANNEL0_BASE;
+ p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
+ return p;
+}
+
+static int t30_channel_init(struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index)
+{
+ ch->dev = dev;
+ ch->chid = index;
+ ch->desc = nvhost_t30_channelmap + index;
+ mutex_init(&ch->reflock);
+ mutex_init(&ch->submitlock);
+
+ ch->aperture = t30_channel_aperture(dev->aperture, index);
+
+ return t30_nvhost_hwctx_handler_init(&ch->ctxhandler, ch->desc->name);
+}
+
+int nvhost_init_t30_channel_support(struct nvhost_master *host)
+{
+ int result = nvhost_init_t20_channel_support(host);
+ host->op.channel.init = t30_channel_init;
+
+ return result;
+}
+int nvhost_init_t30_debug_support(struct nvhost_master *host)
+{
+ nvhost_init_t20_debug_support(host);
+ host->op.debug.debug_init = nvhost_scale3d_debug_init;
+
+ return 0;
+}
+
+int nvhost_init_t30_support(struct nvhost_master *host)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(devices); i++)
+ nvhost_device_register(&devices[i]);
+
+ /* don't worry about cleaning up on failure... "remove" does it. */
+ err = nvhost_init_t30_channel_support(host);
+ if (err)
+ return err;
+ err = host1x_init_cdma_support(host);
+ if (err)
+ return err;
+ err = nvhost_init_t30_debug_support(host);
+ if (err)
+ return err;
+ err = host1x_init_syncpt_support(host);
+ if (err)
+ return err;
+ err = nvhost_init_t20_intr_support(host);
+ if (err)
+ return err;
+ err = nvhost_init_t20_cpuaccess_support(host);
+ if (err)
+ return err;
+ return 0;
+}
diff --git a/drivers/video/tegra/host/t30/t30.h b/drivers/video/tegra/host/t30/t30.h
new file mode 100644
index 000000000000..4c0c2d175d73
--- /dev/null
+++ b/drivers/video/tegra/host/t30/t30.h
@@ -0,0 +1,30 @@
+/*
+ * drivers/video/tegra/host/t30/t30.h
+ *
+ * Tegra Graphics Chip support for Tegra3
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _NVHOST_T30_H_
+#define _NVHOST_T30_H_
+
+#include "../t20/t20.h"
+
+int nvhost_init_t30_channel_support(struct nvhost_master *);
+int nvhost_init_t30_debug_support(struct nvhost_master *);
+
+#endif /* _NVHOST_T30_H_ */
diff --git a/drivers/video/tegra/nvmap/Makefile b/drivers/video/tegra/nvmap/Makefile
new file mode 100644
index 000000000000..95d7f68836af
--- /dev/null
+++ b/drivers/video/tegra/nvmap/Makefile
@@ -0,0 +1,7 @@
+GCOV_PROFILE := y
+obj-y += nvmap.o
+obj-y += nvmap_dev.o
+obj-y += nvmap_handle.o
+obj-y += nvmap_heap.o
+obj-y += nvmap_ioctl.o
+obj-${CONFIG_NVMAP_RECLAIM_UNPINNED_VM} += nvmap_mru.o \ No newline at end of file
diff --git a/drivers/video/tegra/nvmap/nvmap.c b/drivers/video/tegra/nvmap/nvmap.c
new file mode 100644
index 000000000000..5fcdee61b71c
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap.c
@@ -0,0 +1,867 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap.c
+ *
+ * Memory manager for Tegra GPU
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/rbtree.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* private nvmap_handle flag for pinning duplicate detection */
+#define NVMAP_HANDLE_VISITED (0x1ul << 31)
+
+/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
+static void map_iovmm_area(struct nvmap_handle *h)
+{
+ tegra_iovmm_addr_t va;
+ unsigned long i;
+
+ BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
+ BUG_ON(h->size & ~PAGE_MASK);
+ WARN_ON(!h->pgalloc.dirty);
+
+ for (va = h->pgalloc.area->iovm_start, i = 0;
+ va < (h->pgalloc.area->iovm_start + h->size);
+ i++, va += PAGE_SIZE) {
+ BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
+ tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
+ page_to_pfn(h->pgalloc.pages[i]));
+ }
+ h->pgalloc.dirty = false;
+}
+
+/* must be called inside nvmap_pin_lock, to ensure that an entire stream
+ * of pins will complete without racing with a second stream. handle should
+ * have nvmap_handle_get (or nvmap_validate_get) called before calling
+ * this function. */
+static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ struct tegra_iovmm_area *area;
+ BUG_ON(!h->alloc);
+
+ nvmap_mru_lock(client->share);
+ if (atomic_inc_return(&h->pin) == 1) {
+ if (h->heap_pgalloc && !h->pgalloc.contig) {
+ area = nvmap_handle_iovmm_locked(client, h);
+ if (!area) {
+ /* no race here, inside the pin mutex */
+ atomic_dec(&h->pin);
+ nvmap_mru_unlock(client->share);
+ return -ENOMEM;
+ }
+ if (area != h->pgalloc.area)
+ h->pgalloc.dirty = true;
+ h->pgalloc.area = area;
+ }
+ }
+ nvmap_mru_unlock(client->share);
+ return 0;
+}
+
+/* doesn't need to be called inside nvmap_pin_lock, since this will only
+ * expand the available VM area */
+static int handle_unpin(struct nvmap_client *client,
+ struct nvmap_handle *h, int free_vm)
+{
+ int ret = 0;
+ nvmap_mru_lock(client->share);
+
+ if (atomic_read(&h->pin) == 0) {
+ nvmap_err(client, "%s unpinning unpinned handle %p\n",
+ current->group_leader->comm, h);
+ nvmap_mru_unlock(client->share);
+ return 0;
+ }
+
+ BUG_ON(!h->alloc);
+
+ if (!atomic_dec_return(&h->pin)) {
+ if (h->heap_pgalloc && h->pgalloc.area) {
+ /* if a secure handle is clean (i.e., mapped into
+ * IOVMM, it needs to be zapped on unpin. */
+ if (h->secure && !h->pgalloc.dirty) {
+ tegra_iovmm_zap_vm(h->pgalloc.area);
+ h->pgalloc.dirty = true;
+ }
+ if (free_vm) {
+ tegra_iovmm_free_vm(h->pgalloc.area);
+ h->pgalloc.area = NULL;
+ } else
+ nvmap_mru_insert_locked(client->share, h);
+ ret = 1;
+ }
+ }
+
+ nvmap_mru_unlock(client->share);
+ nvmap_handle_put(h);
+ return ret;
+}
+
+static int pin_array_locked(struct nvmap_client *client,
+ struct nvmap_handle **h, int count)
+{
+ int pinned;
+ int i;
+ int err = 0;
+
+ for (pinned = 0; pinned < count; pinned++) {
+ err = pin_locked(client, h[pinned]);
+ if (err)
+ break;
+ }
+
+ if (err) {
+ /* unpin pinned handles */
+ for (i = 0; i < pinned; i++) {
+ /* inc ref counter, because
+ * handle_unpin decrements it */
+ nvmap_handle_get(h[i]);
+ /* unpin handles and free vm */
+ handle_unpin(client, h[i], true);
+ }
+ }
+
+ if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
+ client->iovm_limit) {
+ /* First attempt to pin in empty iovmm
+ * may still fail because of fragmentation caused by
+ * placing handles in MRU areas. After such failure
+ * all MRU gets cleaned and iovm space is freed.
+ *
+ * We have to do pinning again here since there might be is
+ * no more incoming pin_wait wakeup calls from unpin
+ * operations */
+ for (pinned = 0; pinned < count; pinned++) {
+ err = pin_locked(client, h[pinned]);
+ if (err)
+ break;
+ }
+ if (err) {
+ pr_err("Pinning in empty iovmm failed!!!\n");
+ BUG_ON(1);
+ }
+ }
+ return err;
+}
+
+static int wait_pin_array_locked(struct nvmap_client *client,
+ struct nvmap_handle **h, int count)
+{
+ int ret = 0;
+
+ ret = pin_array_locked(client, h, count);
+
+ if (ret) {
+ ret = wait_event_interruptible(client->share->pin_wait,
+ !pin_array_locked(client, h, count));
+ }
+ return ret ? -EINTR : 0;
+}
+
+static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
+{
+ struct nvmap_handle *h;
+ int w;
+
+ h = nvmap_validate_get(client, id);
+ if (unlikely(!h)) {
+ nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
+ current->group_leader->comm, (void *)id);
+ return 0;
+ }
+
+ nvmap_err(client, "%s unpinning unreferenced handle %p\n",
+ current->group_leader->comm, h);
+ WARN_ON(1);
+
+ w = handle_unpin(client, h, false);
+ nvmap_handle_put(h);
+ return w;
+}
+
+void nvmap_unpin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids)
+{
+ unsigned int i;
+ int do_wake = 0;
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle_ref *ref;
+
+ if (!ids[i])
+ continue;
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (ref) {
+ struct nvmap_handle *h = ref->handle;
+ int e = atomic_add_unless(&ref->pin, -1, 0);
+
+ nvmap_ref_unlock(client);
+
+ if (!e) {
+ nvmap_err(client, "%s unpinning unpinned "
+ "handle %08lx\n",
+ current->group_leader->comm, ids[i]);
+ } else {
+ do_wake |= handle_unpin(client, h, false);
+ }
+ } else {
+ nvmap_ref_unlock(client);
+ if (client->super)
+ do_wake |= handle_unpin_noref(client, ids[i]);
+ else
+ nvmap_err(client, "%s unpinning invalid "
+ "handle %08lx\n",
+ current->group_leader->comm, ids[i]);
+ }
+ }
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+}
+
+/* pins a list of handle_ref objects; same conditions apply as to
+ * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
+int nvmap_pin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids)
+{
+ int ret = 0;
+ unsigned int i;
+ struct nvmap_handle **h = (struct nvmap_handle **)ids;
+ struct nvmap_handle_ref *ref;
+
+ /* to optimize for the common case (client provided valid handle
+ * references and the pin succeeds), increment the handle_ref pin
+ * count during validation. in error cases, the tree will need to
+ * be re-walked, since the handle_ref is discarded so that an
+ * allocation isn't required. if a handle_ref is not found,
+ * locally validate that the caller has permission to pin the handle;
+ * handle_refs are not created in this case, so it is possible that
+ * if the caller crashes after pinning a global handle, the handle
+ * will be permanently leaked. */
+ nvmap_ref_lock(client);
+ for (i = 0; i < nr && !ret; i++) {
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (ref) {
+ atomic_inc(&ref->pin);
+ nvmap_handle_get(h[i]);
+ } else {
+ struct nvmap_handle *verify;
+ nvmap_ref_unlock(client);
+ verify = nvmap_validate_get(client, ids[i]);
+ if (verify)
+ nvmap_warn(client, "%s pinning unreferenced "
+ "handle %p\n",
+ current->group_leader->comm, h[i]);
+ else
+ ret = -EPERM;
+ nvmap_ref_lock(client);
+ }
+ }
+ nvmap_ref_unlock(client);
+
+ nr = i;
+
+ if (ret)
+ goto out;
+
+ ret = mutex_lock_interruptible(&client->share->pin_lock);
+ if (WARN_ON(ret))
+ goto out;
+
+ ret = wait_pin_array_locked(client, h, nr);
+
+ mutex_unlock(&client->share->pin_lock);
+
+ if (ret) {
+ ret = -EINTR;
+ } else {
+ for (i = 0; i < nr; i++) {
+ if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
+ map_iovmm_area(h[i]);
+ }
+ }
+
+out:
+ if (ret) {
+ nvmap_ref_lock(client);
+ for (i = 0; i < nr; i++) {
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (!ref) {
+ nvmap_warn(client, "%s freed handle %p "
+ "during pinning\n",
+ current->group_leader->comm,
+ (void *)ids[i]);
+ continue;
+ }
+ atomic_dec(&ref->pin);
+ }
+ nvmap_ref_unlock(client);
+
+ for (i = 0; i < nr; i++)
+ nvmap_handle_put(h[i]);
+ }
+
+ return ret;
+}
+
+static phys_addr_t handle_phys(struct nvmap_handle *h)
+{
+ phys_addr_t addr;
+
+ if (h->heap_pgalloc && h->pgalloc.contig) {
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ } else if (h->heap_pgalloc) {
+ BUG_ON(!h->pgalloc.area);
+ addr = h->pgalloc.area->iovm_start;
+ } else {
+ addr = h->carveout->base;
+ }
+
+ return addr;
+}
+
+/* stores the physical address (+offset) of each handle relocation entry
+ * into its output location. see nvmap_pin_array for more details.
+ *
+ * each entry in arr (i.e., each relocation request) specifies two handles:
+ * the handle to pin (pin), and the handle where the address of pin should be
+ * written (patch). in pseudocode, this loop basically looks like:
+ *
+ * for (i = 0; i < nr; i++) {
+ * (pin, pin_offset, patch, patch_offset) = arr[i];
+ * patch[patch_offset] = address_of(pin) + pin_offset;
+ * }
+ */
+static int nvmap_reloc_pin_array(struct nvmap_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct nvmap_handle *gather)
+{
+ struct nvmap_handle *last_patch = NULL;
+ unsigned int last_pfn = 0;
+ pte_t **pte;
+ void *addr;
+ int i;
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle *patch;
+ struct nvmap_handle *pin;
+ phys_addr_t reloc_addr;
+ phys_addr_t phys;
+ unsigned int pfn;
+
+ /* all of the handles are validated and get'ted prior to
+ * calling this function, so casting is safe here */
+ pin = (struct nvmap_handle *)arr[i].pin_mem;
+
+ if (arr[i].patch_mem == (unsigned long)last_patch) {
+ patch = last_patch;
+ } else if (arr[i].patch_mem == (unsigned long)gather) {
+ patch = gather;
+ } else {
+ if (last_patch)
+ nvmap_handle_put(last_patch);
+
+ patch = nvmap_get_handle_id(client, arr[i].patch_mem);
+ if (!patch) {
+ nvmap_free_pte(client->dev, pte);
+ return -EPERM;
+ }
+ last_patch = patch;
+ }
+
+ if (patch->heap_pgalloc) {
+ unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
+ phys = page_to_phys(patch->pgalloc.pages[page]);
+ phys += (arr[i].patch_offset & ~PAGE_MASK);
+ } else {
+ phys = patch->carveout->base + arr[i].patch_offset;
+ }
+
+ pfn = __phys_to_pfn(phys);
+ if (pfn != last_pfn) {
+ pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel);
+ phys_addr_t kaddr = (phys_addr_t)addr;
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(kaddr);
+ last_pfn = pfn;
+ }
+
+ reloc_addr = handle_phys(pin) + arr[i].pin_offset;
+ reloc_addr >>= arr[i].reloc_shift;
+ __raw_writel(reloc_addr, addr + (phys & ~PAGE_MASK));
+ }
+
+ nvmap_free_pte(client->dev, pte);
+
+ if (last_patch)
+ nvmap_handle_put(last_patch);
+
+ wmb();
+
+ return 0;
+}
+
+static int nvmap_validate_get_pin_array(struct nvmap_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct nvmap_handle **h)
+{
+ int i;
+ int ret = 0;
+ int count = 0;
+
+ nvmap_ref_lock(client);
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle_ref *ref;
+
+ if (need_resched()) {
+ nvmap_ref_unlock(client);
+ schedule();
+ nvmap_ref_lock(client);
+ }
+
+ ref = _nvmap_validate_id_locked(client, arr[i].pin_mem);
+
+ if (!ref)
+ nvmap_warn(client, "falied to validate id\n");
+ else if (!ref->handle)
+ nvmap_warn(client, "id had no associated handle\n");
+ else if (!ref->handle->alloc)
+ nvmap_warn(client, "handle had no allocation\n");
+
+ if (!ref || !ref->handle || !ref->handle->alloc) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* a handle may be referenced multiple times in arr, but
+ * it will only be pinned once; this ensures that the
+ * minimum number of sync-queue slots in the host driver
+ * are dedicated to storing unpin lists, which allows
+ * for greater parallelism between the CPU and graphics
+ * processor */
+ if (ref->handle->flags & NVMAP_HANDLE_VISITED)
+ continue;
+
+ ref->handle->flags |= NVMAP_HANDLE_VISITED;
+
+ h[count] = nvmap_handle_get(ref->handle);
+ BUG_ON(!h[count]);
+ count++;
+ }
+
+ nvmap_ref_unlock(client);
+
+ if (ret) {
+ for (i = 0; i < count; i++) {
+ h[i]->flags &= ~NVMAP_HANDLE_VISITED;
+ nvmap_handle_put(h[i]);
+ }
+ }
+
+ return ret ?: count;
+}
+
+/* a typical mechanism host1x clients use for using the Tegra graphics
+ * processor is to build a command buffer which contains relocatable
+ * memory handle commands, and rely on the kernel to convert these in-place
+ * to addresses which are understood by the GPU hardware.
+ *
+ * this is implemented by having clients provide a sideband array
+ * of relocatable handles (+ offsets) and the location in the command
+ * buffer handle to patch with the GPU address when the client submits
+ * its command buffer to the host1x driver.
+ *
+ * the host driver also uses this relocation mechanism internally to
+ * relocate the client's (unpinned) command buffers into host-addressable
+ * memory.
+ *
+ * @client: nvmap_client which should be used for validation; should be
+ * owned by the process which is submitting command buffers
+ * @gather: special handle for relocated command buffer outputs used
+ * internally by the host driver. if this handle is encountered
+ * as an output handle in the relocation array, it is assumed
+ * to be a known-good output and is not validated.
+ * @arr: array of ((relocatable handle, offset), (output handle, offset))
+ * tuples.
+ * @nr: number of entries in arr
+ * @unique_arr: list of nvmap_handle objects which were pinned by
+ * nvmap_pin_array. must be unpinned by the caller after the
+ * command buffers referenced in gather have completed.
+ */
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique_arr)
+{
+ int count = 0;
+ int ret = 0;
+ int i;
+
+ if (mutex_lock_interruptible(&client->share->pin_lock)) {
+ nvmap_warn(client, "%s interrupted when acquiring pin lock\n",
+ current->group_leader->comm);
+ return -EINTR;
+ }
+
+ count = nvmap_validate_get_pin_array(client, arr, nr, unique_arr);
+ if (count < 0) {
+ mutex_unlock(&client->share->pin_lock);
+ nvmap_warn(client, "failed to validate pin array\n");
+ return count;
+ }
+
+ for (i = 0; i < count; i++)
+ unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;
+
+ ret = wait_pin_array_locked(client, unique_arr, count);
+
+ mutex_unlock(&client->share->pin_lock);
+
+ if (!ret)
+ ret = nvmap_reloc_pin_array(client, arr, nr, gather);
+
+ if (WARN_ON(ret)) {
+ for (i = 0; i < count; i++)
+ nvmap_handle_put(unique_arr[i]);
+ return ret;
+ } else {
+ for (i = 0; i < count; i++) {
+ if (unique_arr[i]->heap_pgalloc &&
+ unique_arr[i]->pgalloc.dirty)
+ map_iovmm_area(unique_arr[i]);
+ }
+ }
+
+ return count;
+}
+
+phys_addr_t nvmap_pin(struct nvmap_client *client,
+ struct nvmap_handle_ref *ref)
+{
+ struct nvmap_handle *h;
+ phys_addr_t phys;
+ int ret = 0;
+
+ h = nvmap_handle_get(ref->handle);
+ if (WARN_ON(!h))
+ return -EINVAL;
+
+ atomic_inc(&ref->pin);
+
+ if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
+ ret = -EINTR;
+ } else {
+ ret = wait_pin_array_locked(client, &h, 1);
+ mutex_unlock(&client->share->pin_lock);
+ }
+
+ if (ret) {
+ atomic_dec(&ref->pin);
+ nvmap_handle_put(h);
+ } else {
+ if (h->heap_pgalloc && h->pgalloc.dirty)
+ map_iovmm_area(h);
+ phys = handle_phys(h);
+ }
+
+ return ret ?: phys;
+}
+
+phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
+{
+ struct nvmap_handle *h;
+ phys_addr_t phys;
+
+ h = nvmap_get_handle_id(c, id);
+ if (!h)
+ return -EPERM;
+ mutex_lock(&h->lock);
+ phys = handle_phys(h);
+ mutex_unlock(&h->lock);
+ nvmap_handle_put(h);
+
+ return phys;
+}
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
+{
+ if (!ref)
+ return;
+
+ atomic_dec(&ref->pin);
+ if (handle_unpin(client, ref->handle, false))
+ wake_up(&client->share->pin_wait);
+}
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr)
+{
+ int i;
+ int do_wake = 0;
+
+ for (i = 0; i < nr; i++) {
+ if (WARN_ON(!h[i]))
+ continue;
+ do_wake |= handle_unpin(client, h[i], false);
+ }
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+}
+
+void *nvmap_mmap(struct nvmap_handle_ref *ref)
+{
+ struct nvmap_handle *h;
+ pgprot_t prot;
+ unsigned long adj_size;
+ unsigned long offs;
+ struct vm_struct *v;
+ void *p;
+
+ h = nvmap_handle_get(ref->handle);
+ if (!h)
+ return NULL;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (h->heap_pgalloc)
+ return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
+ -1, prot);
+
+ /* carveout - explicitly map the pfns into a vmalloc area */
+
+ nvmap_usecount_inc(h);
+
+ adj_size = h->carveout->base & ~PAGE_MASK;
+ adj_size += h->size;
+ adj_size = PAGE_ALIGN(adj_size);
+
+ v = alloc_vm_area(adj_size);
+ if (!v) {
+ nvmap_usecount_dec(h);
+ nvmap_handle_put(h);
+ return NULL;
+ }
+
+ p = v->addr + (h->carveout->base & ~PAGE_MASK);
+
+ for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
+ unsigned long addr = (unsigned long) v->addr + offs;
+ unsigned int pfn;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pfn = __phys_to_pfn(h->carveout->base + offs);
+ pgd = pgd_offset_k(addr);
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd)
+ break;
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ break;
+ set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(addr);
+ }
+
+ if (offs != adj_size) {
+ free_vm_area(v);
+ nvmap_usecount_dec(h);
+ nvmap_handle_put(h);
+ return NULL;
+ }
+
+ /* leave the handle ref count incremented by 1, so that
+ * the handle will not be freed while the kernel mapping exists.
+ * nvmap_handle_put will be called by unmapping this address */
+ return p;
+}
+
+void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
+{
+ struct nvmap_handle *h;
+
+ if (!ref)
+ return;
+
+ h = ref->handle;
+
+ if (h->heap_pgalloc) {
+ vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
+ } else {
+ struct vm_struct *vm;
+ addr -= (h->carveout->base & ~PAGE_MASK);
+ vm = remove_vm_area(addr);
+ BUG_ON(!vm);
+ kfree(vm);
+ nvmap_usecount_dec(h);
+ }
+ nvmap_handle_put(h);
+}
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags)
+{
+ const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
+ NVMAP_HEAP_CARVEOUT_GENERIC);
+ struct nvmap_handle_ref *r = NULL;
+ int err;
+
+ r = nvmap_create_handle(client, size);
+ if (IS_ERR(r))
+ return r;
+
+ err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
+ default_heap, align, flags);
+
+ if (err) {
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+ return ERR_PTR(err);
+ }
+
+ return r;
+}
+
+/* allocates memory with specifed iovm_start address. */
+struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
+ size_t size, size_t align, unsigned int flags, unsigned int iovm_start)
+{
+ int err;
+ struct nvmap_handle *h;
+ struct nvmap_handle_ref *r;
+ const unsigned int default_heap = NVMAP_HEAP_IOVMM;
+
+ /* size need to be more than one page.
+ * otherwise heap preference would change to system heap.
+ */
+ if (size <= PAGE_SIZE)
+ size = PAGE_SIZE << 1;
+ r = nvmap_create_handle(client, size);
+ if (IS_ERR_OR_NULL(r))
+ return r;
+
+ h = r->handle;
+ h->pgalloc.iovm_addr = iovm_start;
+ err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
+ default_heap, align, flags);
+ if (err)
+ goto fail;
+
+ err = mutex_lock_interruptible(&client->share->pin_lock);
+ if (WARN_ON(err))
+ goto fail;
+ err = pin_locked(client, h);
+ mutex_unlock(&client->share->pin_lock);
+ if (err)
+ goto fail;
+ return r;
+
+fail:
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+ return ERR_PTR(err);
+}
+
+void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ unsigned long ref_id = nvmap_ref_to_id(r);
+
+ nvmap_unpin_ids(client, 1, &ref_id);
+ nvmap_free_handle_id(client, ref_id);
+}
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ if (!r)
+ return;
+
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+}
+
+/*
+ * create a mapping to the user's buffer and write it
+ * (uses similar logic from nvmap_reloc_pin_array to map the cmdbuf)
+ */
+int nvmap_patch_word(struct nvmap_client *client,
+ struct nvmap_handle *patch,
+ u32 patch_offset, u32 patch_value)
+{
+ phys_addr_t phys;
+ unsigned long kaddr;
+ unsigned int pfn;
+ void *addr;
+ pte_t **pte;
+ pgprot_t prot;
+
+ if (patch_offset >= patch->size) {
+ nvmap_warn(client, "read/write outside of handle\n");
+ return -EFAULT;
+ }
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ /* derive physaddr of cmdbuf WAIT to patch */
+ if (patch->heap_pgalloc) {
+ unsigned int page = patch_offset >> PAGE_SHIFT;
+ phys = page_to_phys(patch->pgalloc.pages[page]);
+ phys += (patch_offset & ~PAGE_MASK);
+ } else {
+ phys = patch->carveout->base + patch_offset;
+ }
+
+ pfn = __phys_to_pfn(phys);
+ prot = nvmap_pgprot(patch, pgprot_kernel);
+ kaddr = (unsigned long)addr;
+
+ /* write PTE, so addr points to cmdbuf PFN */
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(kaddr);
+
+ /* write patch_value to addr + page offset */
+ __raw_writel(patch_value, addr + (phys & ~PAGE_MASK));
+
+ nvmap_free_pte(client->dev, pte);
+ wmb();
+ return 0;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h
new file mode 100644
index 000000000000..63b3471ec141
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap.h
@@ -0,0 +1,244 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap.h
+ *
+ * GPU memory management driver for Tegra
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *'
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
+#define __VIDEO_TEGRA_NVMAP_NVMAP_H
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+#include <mach/nvmap.h>
+#include "nvmap_heap.h"
+
+struct nvmap_device;
+struct page;
+struct tegra_iovmm_area;
+
+#if defined(CONFIG_TEGRA_NVMAP)
+#define nvmap_err(_client, _fmt, ...) \
+ dev_err(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_warn(_client, _fmt, ...) \
+ dev_warn(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_debug(_client, _fmt, ...) \
+ dev_dbg(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_ref_to_id(_ref) ((unsigned long)(_ref)->handle)
+
+/* handles allocated using shared system memory (either IOVMM- or high-order
+ * page allocations */
+struct nvmap_pgalloc {
+ struct page **pages;
+ struct tegra_iovmm_area *area;
+ struct list_head mru_list; /* MRU entry for IOVMM reclamation */
+ bool contig; /* contiguous system memory */
+ bool dirty; /* area is invalid and needs mapping */
+ u32 iovm_addr; /* is non-zero, if client need specific iova mapping */
+};
+
+struct nvmap_handle {
+ struct rb_node node; /* entry on global handle tree */
+ atomic_t ref; /* reference count (i.e., # of duplications) */
+ atomic_t pin; /* pin count */
+ unsigned int usecount; /* how often is used */
+ unsigned long flags;
+ size_t size; /* padded (as-allocated) size */
+ size_t orig_size; /* original (as-requested) size */
+ size_t align;
+ struct nvmap_client *owner;
+ struct nvmap_device *dev;
+ union {
+ struct nvmap_pgalloc pgalloc;
+ struct nvmap_heap_block *carveout;
+ };
+ bool global; /* handle may be duplicated by other clients */
+ bool secure; /* zap IOVMM area on unpin */
+ bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
+ bool alloc; /* handle has memory allocated */
+ unsigned int userflags; /* flags passed from userspace */
+ struct mutex lock;
+};
+
+struct nvmap_share {
+ struct tegra_iovmm_client *iovmm;
+ wait_queue_head_t pin_wait;
+ struct mutex pin_lock;
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ struct mutex mru_lock;
+ struct list_head *mru_lists;
+ int nr_mru;
+#endif
+};
+
+struct nvmap_carveout_commit {
+ size_t commit;
+ struct list_head list;
+};
+
+struct nvmap_client {
+ const char *name;
+ struct nvmap_device *dev;
+ struct nvmap_share *share;
+ struct rb_root handle_refs;
+ atomic_t iovm_commit;
+ size_t iovm_limit;
+ struct mutex ref_lock;
+ bool super;
+ atomic_t count;
+ struct task_struct *task;
+ struct list_head list;
+ struct nvmap_carveout_commit carveout_commit[0];
+};
+
+struct nvmap_vma_priv {
+ struct nvmap_handle *handle;
+ size_t offs;
+ atomic_t count; /* number of processes cloning the VMA */
+};
+
+static inline void nvmap_ref_lock(struct nvmap_client *priv)
+{
+ mutex_lock(&priv->ref_lock);
+}
+
+static inline void nvmap_ref_unlock(struct nvmap_client *priv)
+{
+ mutex_unlock(&priv->ref_lock);
+}
+#endif /* CONFIG_TEGRA_NVMAP */
+
+struct device *nvmap_client_to_device(struct nvmap_client *client);
+
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
+
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
+
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
+
+void nvmap_usecount_inc(struct nvmap_handle *h);
+void nvmap_usecount_dec(struct nvmap_handle *h);
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
+ struct nvmap_handle *handle,
+ unsigned long type);
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+ struct nvmap_heap_block *b);
+
+struct nvmap_carveout_node;
+void nvmap_carveout_commit_add(struct nvmap_client *client,
+ struct nvmap_carveout_node *node, size_t len);
+
+void nvmap_carveout_commit_subtract(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len);
+
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
+
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+ unsigned long handle);
+
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
+ unsigned long id);
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id);
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+ size_t size);
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id);
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+ unsigned long id, unsigned int heap_mask,
+ size_t align, unsigned int flags);
+
+void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
+
+int nvmap_pin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids);
+
+void nvmap_unpin_ids(struct nvmap_client *priv,
+ unsigned int nr, const unsigned long *ids);
+
+void _nvmap_handle_free(struct nvmap_handle *h);
+
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
+
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
+
+#if defined(CONFIG_TEGRA_NVMAP)
+static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
+{
+ if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
+ pr_err("%s: %s getting a freed handle\n",
+ __func__, current->group_leader->comm);
+ if (atomic_read(&h->ref) <= 0)
+ return NULL;
+ }
+ return h;
+}
+
+static inline void nvmap_handle_put(struct nvmap_handle *h)
+{
+ int cnt = atomic_dec_return(&h->ref);
+
+ if (WARN_ON(cnt < 0)) {
+ pr_err("%s: %s put to negative references\n",
+ __func__, current->comm);
+ } else if (cnt == 0)
+ _nvmap_handle_free(h);
+}
+
+static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
+{
+ if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
+ return pgprot_noncached(prot);
+ else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
+ return pgprot_writecombine(prot);
+ else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
+ return pgprot_inner_writeback(prot);
+ return prot;
+}
+#else /* CONFIG_TEGRA_NVMAP */
+struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
+void nvmap_handle_put(struct nvmap_handle *h);
+pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
+#endif /* !CONFIG_TEGRA_NVMAP */
+
+int is_nvmap_vma(struct vm_area_struct *vma);
+
+struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
+ size_t size, size_t align, unsigned int flags, unsigned int iova_start);
+
+void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_common.h b/drivers/video/tegra/nvmap/nvmap_common.h
new file mode 100644
index 000000000000..6da010720bb2
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_common.h
@@ -0,0 +1,38 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_common.h
+ *
+ * GPU memory management driver for Tegra
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *'
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+extern void v7_flush_kern_cache_all(void *);
+extern void v7_clean_kern_cache_all(void *);
+
+#define FLUSH_CLEAN_BY_SET_WAY_THRESHOLD (8 * PAGE_SIZE)
+
+static inline void inner_flush_cache_all(void)
+{
+ on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
+}
+
+static inline void inner_clean_cache_all(void)
+{
+ on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
+}
+
+extern void __flush_dcache_page(struct address_space *, struct page *);
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
new file mode 100644
index 000000000000..c353505b9a05
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -0,0 +1,1423 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_dev.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/backing-dev.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_ioctl.h"
+#include "nvmap_mru.h"
+#include "nvmap_common.h"
+
+#define NVMAP_NUM_PTES 64
+#define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
+
+#ifdef CONFIG_NVMAP_CARVEOUT_KILLER
+static bool carveout_killer = true;
+#else
+static bool carveout_killer;
+#endif
+module_param(carveout_killer, bool, 0640);
+
+struct nvmap_carveout_node {
+ unsigned int heap_bit;
+ struct nvmap_heap *carveout;
+ int index;
+ struct list_head clients;
+ spinlock_t clients_lock;
+};
+
+struct nvmap_device {
+ struct vm_struct *vm_rgn;
+ pte_t *ptes[NVMAP_NUM_PTES];
+ unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
+ unsigned int lastpte;
+ spinlock_t ptelock;
+
+ struct rb_root handles;
+ spinlock_t handle_lock;
+ wait_queue_head_t pte_wait;
+ struct miscdevice dev_super;
+ struct miscdevice dev_user;
+ struct nvmap_carveout_node *heaps;
+ int nr_carveouts;
+ struct nvmap_share iovmm_master;
+ struct list_head clients;
+ spinlock_t clients_lock;
+};
+
+struct nvmap_device *nvmap_dev;
+
+static struct backing_dev_info nvmap_bdi = {
+ .ra_pages = 0,
+ .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
+ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
+};
+
+static int nvmap_open(struct inode *inode, struct file *filp);
+static int nvmap_release(struct inode *inode, struct file *filp);
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
+static void nvmap_vma_open(struct vm_area_struct *vma);
+static void nvmap_vma_close(struct vm_area_struct *vma);
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+static const struct file_operations nvmap_user_fops = {
+ .owner = THIS_MODULE,
+ .open = nvmap_open,
+ .release = nvmap_release,
+ .unlocked_ioctl = nvmap_ioctl,
+ .mmap = nvmap_map,
+};
+
+static const struct file_operations nvmap_super_fops = {
+ .owner = THIS_MODULE,
+ .open = nvmap_open,
+ .release = nvmap_release,
+ .unlocked_ioctl = nvmap_ioctl,
+ .mmap = nvmap_map,
+};
+
+static struct vm_operations_struct nvmap_vma_ops = {
+ .open = nvmap_vma_open,
+ .close = nvmap_vma_close,
+ .fault = nvmap_vma_fault,
+};
+
+int is_nvmap_vma(struct vm_area_struct *vma)
+{
+ return vma->vm_ops == &nvmap_vma_ops;
+}
+
+struct device *nvmap_client_to_device(struct nvmap_client *client)
+{
+ if (client->super)
+ return client->dev->dev_super.this_device;
+ else
+ return client->dev->dev_user.this_device;
+}
+
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
+{
+ return &dev->iovmm_master;
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. may be called from IRQs */
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
+{
+ unsigned long flags;
+ unsigned long bit;
+
+ spin_lock_irqsave(&dev->ptelock, flags);
+ bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
+ if (bit == NVMAP_NUM_PTES) {
+ bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
+ if (bit == dev->lastpte)
+ bit = NVMAP_NUM_PTES;
+ }
+
+ if (bit == NVMAP_NUM_PTES) {
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev->lastpte = bit;
+ set_bit(bit, dev->ptebits);
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+
+ *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
+ return &(dev->ptes[bit]);
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. must be called from sleepable contexts */
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
+{
+ int ret;
+ pte_t **pte;
+ ret = wait_event_interruptible(dev->pte_wait,
+ !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
+
+ if (ret == -ERESTARTSYS)
+ return ERR_PTR(-EINTR);
+
+ return pte;
+}
+
+/* frees a PTE */
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
+{
+ unsigned long addr;
+ unsigned int bit = pte - dev->ptes;
+ unsigned long flags;
+
+ if (WARN_ON(bit >= NVMAP_NUM_PTES))
+ return;
+
+ addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
+ set_pte_at(&init_mm, addr, *pte, 0);
+
+ spin_lock_irqsave(&dev->ptelock, flags);
+ clear_bit(bit, dev->ptebits);
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+ wake_up(&dev->pte_wait);
+}
+
+/* verifies that the handle ref value "ref" is a valid handle ref for the
+ * file. caller must hold the file's ref_lock prior to calling this function */
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
+ unsigned long id)
+{
+ struct rb_node *n = c->handle_refs.rb_node;
+
+ while (n) {
+ struct nvmap_handle_ref *ref;
+ ref = rb_entry(n, struct nvmap_handle_ref, node);
+ if ((unsigned long)ref->handle == id)
+ return ref;
+ else if (id > (unsigned long)ref->handle)
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ }
+
+ return NULL;
+}
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle_ref *ref;
+ struct nvmap_handle *h = NULL;
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, id);
+ if (ref)
+ h = ref->handle;
+ if (h)
+ h = nvmap_handle_get(h);
+ nvmap_ref_unlock(client);
+ return h;
+}
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+ struct nvmap_heap_block *b)
+{
+ struct nvmap_heap *h = nvmap_block_to_heap(b);
+ struct nvmap_carveout_node *n;
+ int i;
+
+ for (i = 0; i < c->dev->nr_carveouts; i++) {
+ n = &c->dev->heaps[i];
+ if (n->carveout == h)
+ return n->heap_bit;
+ }
+ return 0;
+}
+
+/*
+ * This routine is used to flush the carveout memory from cache.
+ * Why cache flush is needed for carveout? Consider the case, where a piece of
+ * carveout is allocated as cached and released. After this, if the same memory is
+ * allocated for uncached request and the memory is not flushed out from cache.
+ * In this case, the client might pass this to H/W engine and it could start modify
+ * the memory. As this was cached earlier, it might have some portion of it in cache.
+ * During cpu request to read/write other memory, the cached portion of this memory
+ * might get flushed back to main memory and would cause corruptions, if it happens
+ * after H/W writes data to memory.
+ *
+ * But flushing out the memory blindly on each carveout allocation is redundant.
+ *
+ * In order to optimize the carveout buffer cache flushes, the following
+ * strategy is used.
+ *
+ * The whole Carveout is flushed out from cache during its initialization.
+ * During allocation, carveout buffers are not flused from cache.
+ * During deallocation, carveout buffers are flushed, if they were allocated as cached.
+ * if they were allocated as uncached/writecombined, no cache flush is needed.
+ * Just draining store buffers is enough.
+ */
+int nvmap_flush_heap_block(struct nvmap_client *client,
+ struct nvmap_heap_block *block, size_t len, unsigned int prot)
+{
+ pte_t **pte;
+ void *addr;
+ phys_addr_t kaddr;
+ phys_addr_t phys = block->base;
+ phys_addr_t end = block->base + len;
+
+ if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
+ goto out;
+
+ if (len >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD) {
+ inner_flush_cache_all();
+ if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
+ outer_flush_range(block->base, block->base + len);
+ goto out;
+ }
+
+ pte = nvmap_alloc_pte((client ? client->dev : nvmap_dev), &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ kaddr = (phys_addr_t)addr;
+
+ while (phys < end) {
+ phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys);
+ void *base = (void *)kaddr + (phys & ~PAGE_MASK);
+
+ next = min(next, end);
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
+ flush_tlb_kernel_page(kaddr);
+ __cpuc_flush_dcache_area(base, next - phys);
+ phys = next;
+ }
+
+ if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
+ outer_flush_range(block->base, block->base + len);
+
+ nvmap_free_pte((client ? client->dev : nvmap_dev), pte);
+out:
+ wmb();
+ return 0;
+}
+
+void nvmap_carveout_commit_add(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len)
+{
+ unsigned long flags;
+
+ nvmap_ref_lock(client);
+ spin_lock_irqsave(&node->clients_lock, flags);
+ BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
+ client->carveout_commit[node->index].commit != 0);
+
+ client->carveout_commit[node->index].commit += len;
+ /* if this client isn't already on the list of nodes for this heap,
+ add it */
+ if (list_empty(&client->carveout_commit[node->index].list)) {
+ list_add(&client->carveout_commit[node->index].list,
+ &node->clients);
+ }
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+ nvmap_ref_unlock(client);
+}
+
+void nvmap_carveout_commit_subtract(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len)
+{
+ unsigned long flags;
+
+ if (!client)
+ return;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ BUG_ON(client->carveout_commit[node->index].commit < len);
+ client->carveout_commit[node->index].commit -= len;
+ /* if no more allocation in this carveout for this node, delete it */
+ if (!client->carveout_commit[node->index].commit)
+ list_del_init(&client->carveout_commit[node->index].list);
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+}
+
+static struct nvmap_client *get_client_from_carveout_commit(
+ struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
+{
+ struct nvmap_carveout_commit *first_commit = commit - node->index;
+ return (void *)first_commit - offsetof(struct nvmap_client,
+ carveout_commit);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_reclaim);
+static int wait_count;
+bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
+{
+ struct nvmap_carveout_commit *commit;
+ size_t selected_size = 0;
+ int selected_oom_adj = OOM_ADJUST_MIN;
+ struct task_struct *selected_task = NULL;
+ unsigned long flags;
+ bool wait = false;
+ int current_oom_adj = OOM_ADJUST_MIN;
+
+ task_lock(current);
+ if (current->signal)
+ current_oom_adj = current->signal->oom_adj;
+ task_unlock(current);
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ /* find the task with the smallest oom_adj (lowest priority)
+ * and largest carveout allocation -- ignore kernel allocations,
+ * there's no way to handle them */
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ size_t size = commit->commit;
+ struct task_struct *task = client->task;
+ struct signal_struct *sig;
+
+ if (!task)
+ continue;
+
+ task_lock(task);
+ sig = task->signal;
+ if (!task->mm || !sig)
+ goto end;
+ /* don't try to kill current */
+ if (task == current->group_leader)
+ goto end;
+ /* don't try to kill higher priority tasks */
+ if (sig->oom_adj < current_oom_adj)
+ goto end;
+ if (sig->oom_adj < selected_oom_adj)
+ goto end;
+ if (sig->oom_adj == selected_oom_adj &&
+ size <= selected_size)
+ goto end;
+ selected_oom_adj = sig->oom_adj;
+ selected_size = size;
+ selected_task = task;
+end:
+ task_unlock(task);
+ }
+ if (selected_task) {
+ wait = true;
+ if (fatal_signal_pending(selected_task)) {
+ pr_warning("carveout_killer: process %d dying "
+ "slowly\n", selected_task->pid);
+ goto out;
+ }
+ pr_info("carveout_killer: killing process %d with oom_adj %d "
+ "to reclaim %d (for process with oom_adj %d)\n",
+ selected_task->pid, selected_oom_adj,
+ selected_size, current_oom_adj);
+ force_sig(SIGKILL, selected_task);
+ }
+out:
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+ return wait;
+}
+
+static
+struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
+ struct nvmap_handle *handle,
+ unsigned long type)
+{
+ struct nvmap_carveout_node *co_heap;
+ struct nvmap_device *dev = client->dev;
+ int i;
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_heap_block *block;
+ co_heap = &dev->heaps[i];
+
+ if (!(co_heap->heap_bit & type))
+ continue;
+
+ block = nvmap_heap_alloc(co_heap->carveout, handle);
+ if (block)
+ return block;
+ }
+ return NULL;
+}
+
+static bool nvmap_carveout_freed(int count)
+{
+ smp_rmb();
+ return count != wait_count;
+}
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
+ struct nvmap_handle *handle,
+ unsigned long type)
+{
+ struct nvmap_heap_block *block;
+ struct nvmap_carveout_node *co_heap;
+ struct nvmap_device *dev = client->dev;
+ int i;
+ unsigned long end = jiffies +
+ msecs_to_jiffies(NVMAP_CARVEOUT_KILLER_RETRY_TIME);
+ int count = 0;
+
+ do {
+ block = do_nvmap_carveout_alloc(client, handle, type);
+ if (!carveout_killer)
+ return block;
+
+ if (block)
+ return block;
+
+ if (!count++) {
+ char task_comm[TASK_COMM_LEN];
+ if (client->task)
+ get_task_comm(task_comm, client->task);
+ else
+ task_comm[0] = 0;
+ pr_info("%s: failed to allocate %u bytes for "
+ "process %s, firing carveout "
+ "killer!\n", __func__, handle->size, task_comm);
+
+ } else {
+ pr_info("%s: still can't allocate %u bytes, "
+ "attempt %d!\n", __func__, handle->size, count);
+ }
+
+ /* shrink carveouts that matter and try again */
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ int count;
+ co_heap = &dev->heaps[i];
+
+ if (!(co_heap->heap_bit & type))
+ continue;
+
+ count = wait_count;
+ /* indicates we didn't find anything to kill,
+ might as well stop trying */
+ if (!nvmap_shrink_carveout(co_heap))
+ return NULL;
+
+ if (time_is_after_jiffies(end))
+ wait_event_interruptible_timeout(wait_reclaim,
+ nvmap_carveout_freed(count),
+ end - jiffies);
+ }
+ } while (time_is_after_jiffies(end));
+
+ if (time_is_before_jiffies(end))
+ pr_info("carveout_killer: timeout expired without "
+ "allocation succeeding.\n");
+
+ return NULL;
+}
+
+/* remove a handle from the device's tree of all handles; called
+ * when freeing handles. */
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+ spin_lock(&dev->handle_lock);
+
+ /* re-test inside the spinlock if the handle really has no clients;
+ * only remove the handle if it is unreferenced */
+ if (atomic_add_return(0, &h->ref) > 0) {
+ spin_unlock(&dev->handle_lock);
+ return -EBUSY;
+ }
+ smp_rmb();
+ BUG_ON(atomic_read(&h->ref) < 0);
+ BUG_ON(atomic_read(&h->pin) != 0);
+
+ rb_erase(&h->node, &dev->handles);
+
+ spin_unlock(&dev->handle_lock);
+ return 0;
+}
+
+/* adds a newly-created handle to the device master tree */
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+
+ spin_lock(&dev->handle_lock);
+ p = &dev->handles.rb_node;
+ while (*p) {
+ struct nvmap_handle *b;
+
+ parent = *p;
+ b = rb_entry(parent, struct nvmap_handle, node);
+ if (h > b)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&h->node, parent, p);
+ rb_insert_color(&h->node, &dev->handles);
+ spin_unlock(&dev->handle_lock);
+}
+
+/* validates that a handle is in the device master tree, and that the
+ * client has permission to access it */
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle *h = NULL;
+ struct rb_node *n;
+
+ spin_lock(&client->dev->handle_lock);
+
+ n = client->dev->handles.rb_node;
+
+ while (n) {
+ h = rb_entry(n, struct nvmap_handle, node);
+ if ((unsigned long)h == id) {
+ if (client->super || h->global || (h->owner == client))
+ h = nvmap_handle_get(h);
+ else
+ h = NULL;
+ spin_unlock(&client->dev->handle_lock);
+ return h;
+ }
+ if (id > (unsigned long)h)
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ }
+ spin_unlock(&client->dev->handle_lock);
+ return NULL;
+}
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name)
+{
+ struct nvmap_client *client;
+ struct task_struct *task;
+ int i;
+
+ if (WARN_ON(!dev))
+ return NULL;
+
+ client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
+ * dev->nr_carveouts), GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->name = name;
+ client->super = true;
+ client->dev = dev;
+ /* TODO: allocate unique IOVMM client for each nvmap client */
+ client->share = &dev->iovmm_master;
+ client->handle_refs = RB_ROOT;
+
+ atomic_set(&client->iovm_commit, 0);
+
+ client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ INIT_LIST_HEAD(&client->carveout_commit[i].list);
+ client->carveout_commit[i].commit = 0;
+ }
+
+ get_task_struct(current->group_leader);
+ task_lock(current->group_leader);
+ /* don't bother to store task struct for kernel threads,
+ they can't be killed anyway */
+ if (current->flags & PF_KTHREAD) {
+ put_task_struct(current->group_leader);
+ task = NULL;
+ } else {
+ task = current->group_leader;
+ }
+ task_unlock(current->group_leader);
+ client->task = task;
+
+ mutex_init(&client->ref_lock);
+ atomic_set(&client->count, 1);
+
+ spin_lock(&dev->clients_lock);
+ list_add(&client->list, &dev->clients);
+ spin_unlock(&dev->clients_lock);
+ return client;
+}
+
+static void destroy_client(struct nvmap_client *client)
+{
+ struct rb_node *n;
+ int i;
+
+ if (!client)
+ return;
+
+
+ while ((n = rb_first(&client->handle_refs))) {
+ struct nvmap_handle_ref *ref;
+ int pins, dupes;
+
+ ref = rb_entry(n, struct nvmap_handle_ref, node);
+ rb_erase(&ref->node, &client->handle_refs);
+
+ smp_rmb();
+ pins = atomic_read(&ref->pin);
+
+ if (ref->handle->owner == client)
+ ref->handle->owner = NULL;
+
+ while (pins--)
+ nvmap_unpin_handles(client, &ref->handle, 1);
+
+ dupes = atomic_read(&ref->dupes);
+ while (dupes--)
+ nvmap_handle_put(ref->handle);
+
+ kfree(ref);
+ }
+
+ if (carveout_killer) {
+ wait_count++;
+ smp_wmb();
+ wake_up_all(&wait_reclaim);
+ }
+
+ for (i = 0; i < client->dev->nr_carveouts; i++)
+ list_del(&client->carveout_commit[i].list);
+
+ if (client->task)
+ put_task_struct(client->task);
+
+ spin_lock(&client->dev->clients_lock);
+ list_del(&client->list);
+ spin_unlock(&client->dev->clients_lock);
+ kfree(client);
+}
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
+{
+ if (WARN_ON(!client))
+ return NULL;
+
+ if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
+ return NULL;
+
+ return client;
+}
+
+struct nvmap_client *nvmap_client_get_file(int fd)
+{
+ struct nvmap_client *client = ERR_PTR(-EFAULT);
+ struct file *f = fget(fd);
+ if (!f)
+ return ERR_PTR(-EINVAL);
+
+ if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
+ client = f->private_data;
+ atomic_inc(&client->count);
+ }
+
+ fput(f);
+ return client;
+}
+
+void nvmap_client_put(struct nvmap_client *client)
+{
+ if (!client)
+ return;
+
+ if (!atomic_dec_return(&client->count))
+ destroy_client(client);
+}
+
+static int nvmap_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
+ struct nvmap_client *priv;
+ int ret;
+
+ ret = nonseekable_open(inode, filp);
+ if (unlikely(ret))
+ return ret;
+
+ BUG_ON(dev != nvmap_dev);
+ priv = nvmap_create_client(dev, "user");
+ if (!priv)
+ return -ENOMEM;
+
+ priv->super = (filp->f_op == &nvmap_super_fops);
+
+ filp->f_mapping->backing_dev_info = &nvmap_bdi;
+
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvmap_release(struct inode *inode, struct file *filp)
+{
+ nvmap_client_put(filp->private_data);
+ return 0;
+}
+
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv;
+
+ /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
+ * will be stored in vm_private_data and faulted in. until the
+ * ioctl is made, the VMA is mapped no-access */
+ vma->vm_private_data = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->offs = 0;
+ priv->handle = NULL;
+ atomic_set(&priv->count, 1);
+
+ vma->vm_flags |= VM_SHARED;
+ vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
+ vma->vm_ops = &nvmap_vma_ops;
+ vma->vm_private_data = priv;
+
+ return 0;
+}
+
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ void __user *uarg = (void __user *)arg;
+
+ if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case NVMAP_IOC_CLAIM:
+ nvmap_warn(filp->private_data, "preserved handles not"
+ "supported\n");
+ err = -ENODEV;
+ break;
+ case NVMAP_IOC_CREATE:
+ case NVMAP_IOC_FROM_ID:
+ err = nvmap_ioctl_create(filp, cmd, uarg);
+ break;
+
+ case NVMAP_IOC_GET_ID:
+ err = nvmap_ioctl_getid(filp, uarg);
+ break;
+
+ case NVMAP_IOC_PARAM:
+ err = nvmap_ioctl_get_param(filp, uarg);
+ break;
+
+ case NVMAP_IOC_UNPIN_MULT:
+ case NVMAP_IOC_PIN_MULT:
+ err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
+ break;
+
+ case NVMAP_IOC_ALLOC:
+ err = nvmap_ioctl_alloc(filp, uarg);
+ break;
+
+ case NVMAP_IOC_FREE:
+ err = nvmap_ioctl_free(filp, arg);
+ break;
+
+ case NVMAP_IOC_MMAP:
+ err = nvmap_map_into_caller_ptr(filp, uarg);
+ break;
+
+ case NVMAP_IOC_WRITE:
+ case NVMAP_IOC_READ:
+ err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
+ break;
+
+ case NVMAP_IOC_CACHE:
+ err = nvmap_ioctl_cache_maint(filp, uarg);
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+ return err;
+}
+
+/* to ensure that the backing store for the VMA isn't freed while a fork'd
+ * reference still exists, nvmap_vma_open increments the reference count on
+ * the handle, and nvmap_vma_close decrements it. alternatively, we could
+ * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
+*/
+static void nvmap_vma_open(struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv;
+
+ priv = vma->vm_private_data;
+
+ BUG_ON(!priv);
+
+ atomic_inc(&priv->count);
+}
+
+static void nvmap_vma_close(struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv = vma->vm_private_data;
+
+ if (priv) {
+ if (priv->handle) {
+ nvmap_usecount_dec(priv->handle);
+ BUG_ON(priv->handle->usecount < 0);
+ }
+ if (!atomic_dec_return(&priv->count)) {
+ if (priv->handle)
+ nvmap_handle_put(priv->handle);
+ kfree(priv);
+ }
+ }
+ vma->vm_private_data = NULL;
+}
+
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct nvmap_vma_priv *priv;
+ unsigned long offs;
+
+ offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
+ priv = vma->vm_private_data;
+ if (!priv || !priv->handle || !priv->handle->alloc)
+ return VM_FAULT_SIGBUS;
+
+ offs += priv->offs;
+ /* if the VMA was split for some reason, vm_pgoff will be the VMA's
+ * offset from the original VMA */
+ offs += (vma->vm_pgoff << PAGE_SHIFT);
+
+ if (offs >= priv->handle->size)
+ return VM_FAULT_SIGBUS;
+
+ if (!priv->handle->heap_pgalloc) {
+ unsigned long pfn;
+ BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
+ pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
+ vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ return VM_FAULT_NOPAGE;
+ } else {
+ struct page *page;
+ offs >>= PAGE_SHIFT;
+ page = priv->handle->pgalloc.pages[offs];
+ if (page)
+ get_page(page);
+ vmf->page = page;
+ return (page) ? 0 : VM_FAULT_SIGBUS;
+ }
+}
+
+static ssize_t attr_show_usage(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
+
+ return sprintf(buf, "%08x\n", node->heap_bit);
+}
+
+static struct device_attribute heap_attr_show_usage =
+ __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
+
+static struct attribute *heap_extra_attrs[] = {
+ &heap_attr_show_usage.attr,
+ NULL,
+};
+
+static struct attribute_group heap_extra_attr_group = {
+ .attrs = heap_extra_attrs,
+};
+
+static void client_stringify(struct nvmap_client *client, struct seq_file *s)
+{
+ char task_comm[TASK_COMM_LEN];
+ if (!client->task) {
+ seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
+ return;
+ }
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
+ client->task->pid);
+}
+
+static void allocations_stringify(struct nvmap_client *client,
+ struct seq_file *s)
+{
+ unsigned long base = 0;
+ struct rb_node *n = rb_first(&client->handle_refs);
+
+ for (; n != NULL; n = rb_next(n)) {
+ struct nvmap_handle_ref *ref =
+ rb_entry(n, struct nvmap_handle_ref, node);
+ struct nvmap_handle *handle = ref->handle;
+ if (handle->alloc && !handle->heap_pgalloc) {
+ seq_printf(s, "%-18s %-18s %8lx %10u %8lx\n", "", "",
+ (unsigned long)(handle->carveout->base),
+ handle->size, handle->userflags);
+ } else if (handle->alloc && handle->heap_pgalloc) {
+ seq_printf(s, "%-18s %-18s %8lx %10u %8lx\n", "", "",
+ base, handle->size, handle->userflags);
+ }
+ }
+}
+
+static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
+{
+ struct nvmap_carveout_node *node = s->private;
+ struct nvmap_carveout_commit *commit;
+ unsigned long flags;
+ unsigned int total = 0;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ seq_printf(s, "%-18s %18s %8s %10s %8s\n", "CLIENT", "PROCESS", "PID",
+ "SIZE", "FLAGS");
+ seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
+ "BASE", "SIZE");
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ client_stringify(client, s);
+ seq_printf(s, " %10u\n", commit->commit);
+ allocations_stringify(client, s);
+ seq_printf(s, "\n");
+ total += commit->commit;
+ }
+ seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+
+ return 0;
+}
+
+static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvmap_debug_allocations_show,
+ inode->i_private);
+}
+
+static const struct file_operations debug_allocations_fops = {
+ .open = nvmap_debug_allocations_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
+{
+ struct nvmap_carveout_node *node = s->private;
+ struct nvmap_carveout_commit *commit;
+ unsigned long flags;
+ unsigned int total = 0;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
+ "SIZE");
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ client_stringify(client, s);
+ seq_printf(s, " %10u\n", commit->commit);
+ total += commit->commit;
+ }
+ seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+
+ return 0;
+}
+
+static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvmap_debug_clients_show, inode->i_private);
+}
+
+static const struct file_operations debug_clients_fops = {
+ .open = nvmap_debug_clients_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvmap_debug_iovmm_clients_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ unsigned int total = 0;
+ struct nvmap_client *client;
+ struct nvmap_device *dev = s->private;
+
+ spin_lock_irqsave(&dev->clients_lock, flags);
+ seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
+ "SIZE");
+ list_for_each_entry(client, &dev->clients, list) {
+ client_stringify(client, s);
+ seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
+ total += atomic_read(&client->iovm_commit);
+ }
+ seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
+ spin_unlock_irqrestore(&dev->clients_lock, flags);
+
+ return 0;
+}
+
+static int nvmap_debug_iovmm_clients_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, nvmap_debug_iovmm_clients_show,
+ inode->i_private);
+}
+
+static const struct file_operations debug_iovmm_clients_fops = {
+ .open = nvmap_debug_iovmm_clients_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvmap_debug_iovmm_allocations_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ unsigned int total = 0;
+ struct nvmap_client *client;
+ struct nvmap_device *dev = s->private;
+
+ spin_lock_irqsave(&dev->clients_lock, flags);
+ seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
+ "SIZE");
+ seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
+ "BASE", "SIZE");
+ list_for_each_entry(client, &dev->clients, list) {
+ client_stringify(client, s);
+ seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
+ allocations_stringify(client, s);
+ seq_printf(s, "\n");
+ total += atomic_read(&client->iovm_commit);
+ }
+ seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
+ spin_unlock_irqrestore(&dev->clients_lock, flags);
+
+ return 0;
+}
+
+static int nvmap_debug_iovmm_allocations_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, nvmap_debug_iovmm_allocations_show,
+ inode->i_private);
+}
+
+static const struct file_operations debug_iovmm_allocations_fops = {
+ .open = nvmap_debug_iovmm_allocations_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvmap_probe(struct platform_device *pdev)
+{
+ struct nvmap_platform_data *plat = pdev->dev.platform_data;
+ struct nvmap_device *dev;
+ struct dentry *nvmap_debug_root;
+ unsigned int i;
+ int e;
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(nvmap_dev != NULL)) {
+ dev_err(&pdev->dev, "only one nvmap device may be present\n");
+ return -ENODEV;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "out of memory for device\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_user.minor = MISC_DYNAMIC_MINOR;
+ dev->dev_user.name = "nvmap";
+ dev->dev_user.fops = &nvmap_user_fops;
+ dev->dev_user.parent = &pdev->dev;
+
+ dev->dev_super.minor = MISC_DYNAMIC_MINOR;
+ dev->dev_super.name = "knvmap";
+ dev->dev_super.fops = &nvmap_super_fops;
+ dev->dev_super.parent = &pdev->dev;
+
+ dev->handles = RB_ROOT;
+
+ init_waitqueue_head(&dev->pte_wait);
+
+ init_waitqueue_head(&dev->iovmm_master.pin_wait);
+ mutex_init(&dev->iovmm_master.pin_lock);
+ dev->iovmm_master.iovmm =
+ tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL,
+ &(dev->dev_user));
+#ifdef CONFIG_TEGRA_IOVMM
+ if (!dev->iovmm_master.iovmm) {
+ e = PTR_ERR(dev->iovmm_master.iovmm);
+ dev_err(&pdev->dev, "couldn't create iovmm client\n");
+ goto fail;
+ }
+#endif
+ dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE);
+ if (!dev->vm_rgn) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate remapping region\n");
+ goto fail;
+ }
+ e = nvmap_mru_init(&dev->iovmm_master);
+ if (e) {
+ dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
+ goto fail;
+ }
+
+ spin_lock_init(&dev->ptelock);
+ spin_lock_init(&dev->handle_lock);
+ INIT_LIST_HEAD(&dev->clients);
+ spin_lock_init(&dev->clients_lock);
+
+ for (i = 0; i < NVMAP_NUM_PTES; i++) {
+ unsigned long addr;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
+ pgd = pgd_offset_k(addr);
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ dev->ptes[i] = pte_alloc_kernel(pmd, addr);
+ if (!dev->ptes[i]) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ }
+
+ e = misc_register(&dev->dev_user);
+ if (e) {
+ dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+ dev->dev_user.name);
+ goto fail;
+ }
+
+ e = misc_register(&dev->dev_super);
+ if (e) {
+ dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+ dev->dev_super.name);
+ goto fail;
+ }
+
+ dev->nr_carveouts = 0;
+ dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
+ plat->nr_carveouts, GFP_KERNEL);
+ if (!dev->heaps) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
+ goto fail;
+ }
+
+ nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
+ if (IS_ERR_OR_NULL(nvmap_debug_root))
+ dev_err(&pdev->dev, "couldn't create debug files\n");
+
+ for (i = 0; i < plat->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
+ const struct nvmap_platform_carveout *co = &plat->carveouts[i];
+ if (!co->size)
+ continue;
+ node->carveout = nvmap_heap_create(dev->dev_user.this_device,
+ co->name, co->base, co->size,
+ co->buddy_size, node);
+ if (!node->carveout) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't create %s\n", co->name);
+ goto fail_heaps;
+ }
+ node->index = dev->nr_carveouts;
+ dev->nr_carveouts++;
+ spin_lock_init(&node->clients_lock);
+ INIT_LIST_HEAD(&node->clients);
+ node->heap_bit = co->usage_mask;
+ if (nvmap_heap_create_group(node->carveout,
+ &heap_extra_attr_group))
+ dev_warn(&pdev->dev, "couldn't add extra attributes\n");
+
+ dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
+ co->name, co->size / 1024);
+
+ if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
+ struct dentry *heap_root =
+ debugfs_create_dir(co->name, nvmap_debug_root);
+ if (!IS_ERR_OR_NULL(heap_root)) {
+ debugfs_create_file("clients", 0664, heap_root,
+ node, &debug_clients_fops);
+ debugfs_create_file("allocations", 0664,
+ heap_root, node, &debug_allocations_fops);
+ }
+ }
+ }
+ if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
+ struct dentry *iovmm_root =
+ debugfs_create_dir("iovmm", nvmap_debug_root);
+ if (!IS_ERR_OR_NULL(iovmm_root)) {
+ debugfs_create_file("clients", 0664, iovmm_root,
+ dev, &debug_iovmm_clients_fops);
+ debugfs_create_file("allocations", 0664, iovmm_root,
+ dev, &debug_iovmm_allocations_fops);
+ }
+ }
+
+ platform_set_drvdata(pdev, dev);
+ nvmap_dev = dev;
+
+ return 0;
+fail_heaps:
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[i];
+ nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+ nvmap_heap_destroy(node->carveout);
+ }
+fail:
+ kfree(dev->heaps);
+ nvmap_mru_destroy(&dev->iovmm_master);
+ if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev_super);
+ if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev_user);
+ if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+ tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+ if (dev->vm_rgn)
+ free_vm_area(dev->vm_rgn);
+ kfree(dev);
+ nvmap_dev = NULL;
+ return e;
+}
+
+static int nvmap_remove(struct platform_device *pdev)
+{
+ struct nvmap_device *dev = platform_get_drvdata(pdev);
+ struct rb_node *n;
+ struct nvmap_handle *h;
+ int i;
+
+ misc_deregister(&dev->dev_super);
+ misc_deregister(&dev->dev_user);
+
+ while ((n = rb_first(&dev->handles))) {
+ h = rb_entry(n, struct nvmap_handle, node);
+ rb_erase(&h->node, &dev->handles);
+ kfree(h);
+ }
+
+ if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+ tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+
+ nvmap_mru_destroy(&dev->iovmm_master);
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[i];
+ nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+ nvmap_heap_destroy(node->carveout);
+ }
+ kfree(dev->heaps);
+
+ free_vm_area(dev->vm_rgn);
+ kfree(dev);
+ nvmap_dev = NULL;
+ return 0;
+}
+
+static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int nvmap_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver nvmap_driver = {
+ .probe = nvmap_probe,
+ .remove = nvmap_remove,
+ .suspend = nvmap_suspend,
+ .resume = nvmap_resume,
+
+ .driver = {
+ .name = "tegra-nvmap",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nvmap_init_driver(void)
+{
+ int e;
+
+ nvmap_dev = NULL;
+
+ e = nvmap_heap_init();
+ if (e)
+ goto fail;
+
+ e = platform_driver_register(&nvmap_driver);
+ if (e) {
+ nvmap_heap_deinit();
+ goto fail;
+ }
+
+fail:
+ return e;
+}
+fs_initcall(nvmap_init_driver);
+
+static void __exit nvmap_exit_driver(void)
+{
+ platform_driver_unregister(&nvmap_driver);
+ nvmap_heap_deinit();
+ nvmap_dev = NULL;
+}
+module_exit(nvmap_exit_driver);
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
new file mode 100644
index 000000000000..924952932c85
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -0,0 +1,626 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_handle.c
+ *
+ * Handle allocation and freeing routines for nvmap
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include <linux/vmstat.h>
+#include <linux/swap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+#include "nvmap_common.h"
+
+#define PRINT_CARVEOUT_CONVERSION 0
+#if PRINT_CARVEOUT_CONVERSION
+#define PR_INFO pr_info
+#else
+#define PR_INFO(...)
+#endif
+
+#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
+ NVMAP_HEAP_CARVEOUT_VPR)
+#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
+#define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN)
+#else
+#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+#endif
+/* handles may be arbitrarily large (16+MiB), and any handle allocated from
+ * the kernel (i.e., not a carveout handle) includes its array of pages. to
+ * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
+ * the array is allocated using vmalloc. */
+#define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2)
+
+static inline void *altalloc(size_t len)
+{
+ if (len >= PAGELIST_VMALLOC_MIN)
+ return vmalloc(len);
+ else
+ return kmalloc(len, GFP_KERNEL);
+}
+
+static inline void altfree(void *ptr, size_t len)
+{
+ if (!ptr)
+ return;
+
+ if (len >= PAGELIST_VMALLOC_MIN)
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+void _nvmap_handle_free(struct nvmap_handle *h)
+{
+ struct nvmap_device *dev = h->dev;
+ unsigned int i, nr_page;
+
+ if (nvmap_handle_remove(dev, h) != 0)
+ return;
+
+ if (!h->alloc)
+ goto out;
+
+ if (!h->heap_pgalloc) {
+ nvmap_usecount_inc(h);
+ nvmap_heap_free(h->carveout);
+ goto out;
+ }
+
+ nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
+
+ BUG_ON(h->size & ~PAGE_MASK);
+ BUG_ON(!h->pgalloc.pages);
+
+ nvmap_mru_remove(nvmap_get_share_from_dev(dev), h);
+
+ /* Restore page attributes. */
+ if (h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
+ h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+ h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
+ set_pages_array_wb(h->pgalloc.pages, nr_page);
+
+ if (h->pgalloc.area)
+ tegra_iovmm_free_vm(h->pgalloc.area);
+
+ for (i = 0; i < nr_page; i++)
+ __free_page(h->pgalloc.pages[i]);
+
+ altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
+
+out:
+ kfree(h);
+}
+
+static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
+{
+ struct page *page, *p, *e;
+ unsigned int order;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+ page = alloc_pages(gfp, order);
+
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+ e = page + (1 << order);
+ for (p = page + (size >> PAGE_SHIFT); p < e; p++)
+ __free_page(p);
+
+ return page;
+}
+
+static int handle_page_alloc(struct nvmap_client *client,
+ struct nvmap_handle *h, bool contiguous)
+{
+ size_t size = PAGE_ALIGN(h->size);
+ unsigned int nr_page = size >> PAGE_SHIFT;
+ pgprot_t prot;
+ unsigned int i = 0;
+ struct page **pages;
+ unsigned long base;
+
+ pages = altalloc(nr_page * sizeof(*pages));
+ if (!pages)
+ return -ENOMEM;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ if (nr_page == 1)
+ contiguous = true;
+#endif
+
+ h->pgalloc.area = NULL;
+ if (contiguous) {
+ struct page *page;
+ page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
+ if (!page)
+ goto fail;
+
+ for (i = 0; i < nr_page; i++)
+ pages[i] = nth_page(page, i);
+
+ } else {
+ for (i = 0; i < nr_page; i++) {
+ pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
+ PAGE_SIZE);
+ if (!pages[i])
+ goto fail;
+ }
+
+#ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
+ NULL, size, h->align, prot,
+ h->pgalloc.iovm_addr);
+ if (!h->pgalloc.area)
+ goto fail;
+
+ h->pgalloc.dirty = true;
+#endif
+ }
+
+ /* Update the pages mapping in kernel page table. */
+ if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
+ set_pages_array_wc(pages, nr_page);
+ else if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
+ set_pages_array_uc(pages, nr_page);
+ else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
+ set_pages_array_iwb(pages, nr_page);
+ else
+ goto skip_cache_flush;
+
+ /* Flush the cache for allocated high mem pages only */
+ for (i = 0; i < nr_page; i++) {
+ if (PageHighMem(pages[i])) {
+ __flush_dcache_page(page_mapping(pages[i]), pages[i]);
+ base = page_to_phys(pages[i]);
+ outer_flush_range(base, base + PAGE_SIZE);
+ }
+ }
+
+skip_cache_flush:
+ h->size = size;
+ h->pgalloc.pages = pages;
+ h->pgalloc.contig = contiguous;
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return 0;
+
+fail:
+ while (i--)
+ __free_page(pages[i]);
+ altfree(pages, nr_page * sizeof(*pages));
+ wmb();
+ return -ENOMEM;
+}
+
+static void alloc_handle(struct nvmap_client *client,
+ struct nvmap_handle *h, unsigned int type)
+{
+ BUG_ON(type & (type - 1));
+
+#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+#define __NVMAP_HEAP_CARVEOUT (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_CARVEOUT_VPR)
+#define __NVMAP_HEAP_IOVMM (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC)
+ if (type & NVMAP_HEAP_CARVEOUT_GENERIC) {
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ if (h->size <= PAGE_SIZE) {
+ PR_INFO("###CARVEOUT CONVERTED TO SYSMEM "
+ "0x%x bytes %s(%d)###\n",
+ h->size, current->comm, current->pid);
+ goto sysheap;
+ }
+#endif
+ PR_INFO("###CARVEOUT CONVERTED TO IOVM "
+ "0x%x bytes %s(%d)###\n",
+ h->size, current->comm, current->pid);
+ }
+#else
+#define __NVMAP_HEAP_CARVEOUT NVMAP_HEAP_CARVEOUT_MASK
+#define __NVMAP_HEAP_IOVMM NVMAP_HEAP_IOVMM
+#endif
+
+ if (type & __NVMAP_HEAP_CARVEOUT) {
+ struct nvmap_heap_block *b;
+#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+ PR_INFO("###IRAM REQUEST RETAINED "
+ "0x%x bytes %s(%d)###\n",
+ h->size, current->comm, current->pid);
+#endif
+ /* Protect handle from relocation */
+ nvmap_usecount_inc(h);
+
+ b = nvmap_carveout_alloc(client, h, type);
+ if (b) {
+ h->heap_pgalloc = false;
+ h->alloc = true;
+ nvmap_carveout_commit_add(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(b)),
+ h->size);
+ }
+ nvmap_usecount_dec(h);
+
+ } else if (type & __NVMAP_HEAP_IOVMM) {
+ size_t reserved = PAGE_ALIGN(h->size);
+ int commit = 0;
+ int ret;
+
+ /* increment the committed IOVM space prior to allocation
+ * to avoid race conditions with other threads simultaneously
+ * allocating. */
+ commit = atomic_add_return(reserved,
+ &client->iovm_commit);
+
+ if (commit < client->iovm_limit)
+ ret = handle_page_alloc(client, h, false);
+ else
+ ret = -ENOMEM;
+
+ if (!ret) {
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ } else {
+ atomic_sub(reserved, &client->iovm_commit);
+ }
+
+ } else if (type & NVMAP_HEAP_SYSMEM) {
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM) && \
+ defined(CONFIG_NVMAP_ALLOW_SYSMEM)
+sysheap:
+#endif
+ if (handle_page_alloc(client, h, true) == 0) {
+ BUG_ON(!h->pgalloc.contig);
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ }
+ }
+}
+
+/* small allocations will try to allocate from generic OS memory before
+ * any of the limited heaps, to increase the effective memory for graphics
+ * allocations, and to reduce fragmentation of the graphics heaps with
+ * sub-page splinters */
+static const unsigned int heap_policy_small[] = {
+ NVMAP_HEAP_CARVEOUT_VPR,
+ NVMAP_HEAP_CARVEOUT_IRAM,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ NVMAP_HEAP_SYSMEM,
+#endif
+ NVMAP_HEAP_CARVEOUT_MASK,
+ NVMAP_HEAP_IOVMM,
+ 0,
+};
+
+static const unsigned int heap_policy_large[] = {
+ NVMAP_HEAP_CARVEOUT_VPR,
+ NVMAP_HEAP_CARVEOUT_IRAM,
+ NVMAP_HEAP_IOVMM,
+ NVMAP_HEAP_CARVEOUT_MASK,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ NVMAP_HEAP_SYSMEM,
+#endif
+ 0,
+};
+
+/* Do not override single page policy if there is not much space to
+avoid invoking system oom killer. */
+#define NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD 50000000
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+ unsigned long id, unsigned int heap_mask,
+ size_t align, unsigned int flags)
+{
+ struct nvmap_handle *h = NULL;
+ const unsigned int *alloc_policy;
+ int nr_page;
+ int err = -ENOMEM;
+
+ h = nvmap_get_handle_id(client, id);
+
+ if (!h)
+ return -EINVAL;
+
+ if (h->alloc)
+ goto out;
+
+ h->userflags = flags;
+ nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ h->secure = !!(flags & NVMAP_HANDLE_SECURE);
+ h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
+ h->align = max_t(size_t, align, L1_CACHE_BYTES);
+
+#ifndef CONFIG_TEGRA_IOVMM
+ if (heap_mask & NVMAP_HEAP_IOVMM) {
+ heap_mask &= NVMAP_HEAP_IOVMM;
+ heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
+ }
+#endif
+#ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ /* Allow single pages allocations in system memory to save
+ * carveout space and avoid extra iovm mappings */
+ if (nr_page == 1) {
+ if (heap_mask & NVMAP_HEAP_IOVMM)
+ heap_mask |= NVMAP_HEAP_SYSMEM;
+ else if (heap_mask & NVMAP_HEAP_CARVEOUT_GENERIC) {
+ /* Calculate size of free physical pages
+ * managed by kernel */
+ unsigned long freeMem =
+ (global_page_state(NR_FREE_PAGES) +
+ global_page_state(NR_FILE_PAGES) -
+ total_swapcache_pages) << PAGE_SHIFT;
+
+ if (freeMem > NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD)
+ heap_mask |= NVMAP_HEAP_SYSMEM;
+ }
+ }
+#endif
+
+ /* This restriction is deprecated as alignments greater than
+ PAGE_SIZE are now correctly handled, but it is retained for
+ AP20 compatibility. */
+ if (h->align > PAGE_SIZE)
+ heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
+#endif
+ /* secure allocations can only be served from secure heaps */
+ if (h->secure)
+ heap_mask &= NVMAP_SECURE_HEAPS;
+
+ if (!heap_mask) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
+
+ while (!h->alloc && *alloc_policy) {
+ unsigned int heap_type;
+
+ heap_type = *alloc_policy++;
+ heap_type &= heap_mask;
+
+ if (!heap_type)
+ continue;
+
+ heap_mask &= ~heap_type;
+
+ while (heap_type && !h->alloc) {
+ unsigned int heap;
+
+ /* iterate possible heaps MSB-to-LSB, since higher-
+ * priority carveouts will have higher usage masks */
+ heap = 1 << __fls(heap_type);
+ alloc_handle(client, h, heap);
+ heap_type &= ~heap;
+ }
+ }
+
+out:
+ err = (h->alloc) ? 0 : err;
+ nvmap_handle_put(h);
+ return err;
+}
+
+void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
+{
+ struct nvmap_handle_ref *ref;
+ struct nvmap_handle *h;
+ int pins;
+
+ nvmap_ref_lock(client);
+
+ ref = _nvmap_validate_id_locked(client, id);
+ if (!ref) {
+ nvmap_ref_unlock(client);
+ return;
+ }
+
+ BUG_ON(!ref->handle);
+ h = ref->handle;
+
+ if (atomic_dec_return(&ref->dupes)) {
+ nvmap_ref_unlock(client);
+ goto out;
+ }
+
+ smp_rmb();
+ pins = atomic_read(&ref->pin);
+ rb_erase(&ref->node, &client->handle_refs);
+
+ if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
+ atomic_sub(h->size, &client->iovm_commit);
+
+ if (h->alloc && !h->heap_pgalloc) {
+ mutex_lock(&h->lock);
+ nvmap_carveout_commit_subtract(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+ h->size);
+ mutex_unlock(&h->lock);
+ }
+
+ nvmap_ref_unlock(client);
+
+ if (pins)
+ nvmap_err(client, "%s freeing pinned handle %p\n",
+ current->group_leader->comm, h);
+
+ while (pins--)
+ nvmap_unpin_handles(client, &ref->handle, 1);
+
+ if (h->owner == client)
+ h->owner = NULL;
+
+ kfree(ref);
+
+out:
+ BUG_ON(!atomic_read(&h->ref));
+ nvmap_handle_put(h);
+}
+
+static void add_handle_ref(struct nvmap_client *client,
+ struct nvmap_handle_ref *ref)
+{
+ struct rb_node **p, *parent = NULL;
+
+ nvmap_ref_lock(client);
+ p = &client->handle_refs.rb_node;
+ while (*p) {
+ struct nvmap_handle_ref *node;
+ parent = *p;
+ node = rb_entry(parent, struct nvmap_handle_ref, node);
+ if (ref->handle > node->handle)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&ref->node, parent, p);
+ rb_insert_color(&ref->node, &client->handle_refs);
+ nvmap_ref_unlock(client);
+}
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+ size_t size)
+{
+ struct nvmap_handle *h;
+ struct nvmap_handle_ref *ref = NULL;
+
+ if (!client)
+ return ERR_PTR(-EINVAL);
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ kfree(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&h->ref, 1);
+ atomic_set(&h->pin, 0);
+ h->owner = client;
+ h->dev = client->dev;
+ BUG_ON(!h->owner);
+ h->size = h->orig_size = size;
+ h->flags = NVMAP_HANDLE_WRITE_COMBINE;
+ mutex_init(&h->lock);
+
+ nvmap_handle_add(client->dev, h);
+
+ atomic_set(&ref->dupes, 1);
+ ref->handle = h;
+ atomic_set(&ref->pin, 0);
+ add_handle_ref(client, ref);
+ return ref;
+}
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle_ref *ref = NULL;
+ struct nvmap_handle *h = NULL;
+
+ BUG_ON(!client || client->dev != nvmap_dev);
+ /* on success, the reference count for the handle should be
+ * incremented, so the success paths will not call nvmap_handle_put */
+ h = nvmap_validate_get(client, id);
+
+ if (!h) {
+ nvmap_debug(client, "%s duplicate handle failed\n",
+ current->group_leader->comm);
+ return ERR_PTR(-EPERM);
+ }
+
+ if (!h->alloc) {
+ nvmap_err(client, "%s duplicating unallocated handle\n",
+ current->group_leader->comm);
+ nvmap_handle_put(h);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, (unsigned long)h);
+
+ if (ref) {
+ /* handle already duplicated in client; just increment
+ * the reference count rather than re-duplicating it */
+ atomic_inc(&ref->dupes);
+ nvmap_ref_unlock(client);
+ return ref;
+ }
+
+ nvmap_ref_unlock(client);
+
+ /* verify that adding this handle to the process' access list
+ * won't exceed the IOVM limit */
+ if (h->heap_pgalloc && !h->pgalloc.contig) {
+ int oc;
+ oc = atomic_add_return(h->size, &client->iovm_commit);
+ if (oc > client->iovm_limit && !client->super) {
+ atomic_sub(h->size, &client->iovm_commit);
+ nvmap_handle_put(h);
+ nvmap_err(client, "duplicating %p in %s over-commits"
+ " IOVMM space\n", (void *)id,
+ current->group_leader->comm);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ nvmap_handle_put(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (!h->heap_pgalloc) {
+ mutex_lock(&h->lock);
+ nvmap_carveout_commit_add(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+ h->size);
+ mutex_unlock(&h->lock);
+ }
+
+ atomic_set(&ref->dupes, 1);
+ ref->handle = h;
+ atomic_set(&ref->pin, 0);
+ add_handle_ref(client, ref);
+ return ref;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c
new file mode 100644
index 000000000000..7474f31534ff
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_heap.c
@@ -0,0 +1,1113 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_heap.c
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <mach/nvmap.h>
+#include "nvmap.h"
+#include "nvmap_heap.h"
+#include "nvmap_common.h"
+
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * "carveouts" are platform-defined regions of physically contiguous memory
+ * which are not managed by the OS. a platform may specify multiple carveouts,
+ * for either small special-purpose memory regions (like IRAM on Tegra SoCs)
+ * or reserved regions of main system memory.
+ *
+ * the carveout allocator returns allocations which are physically contiguous.
+ * to reduce external fragmentation, the allocation algorithm implemented in
+ * this file employs 3 strategies for keeping allocations of similar size
+ * grouped together inside the larger heap: the "small", "normal" and "huge"
+ * strategies. the size thresholds (in bytes) for determining which strategy
+ * to employ should be provided by the platform for each heap. it is possible
+ * for a platform to define a heap where only the "normal" strategy is used.
+ *
+ * o "normal" allocations use an address-order first-fit allocator (called
+ * BOTTOM_UP in the code below). each allocation is rounded up to be
+ * an integer multiple of the "small" allocation size.
+ *
+ * o "huge" allocations use an address-order last-fit allocator (called
+ * TOP_DOWN in the code below). like "normal" allocations, each allocation
+ * is rounded up to be an integer multiple of the "small" allocation size.
+ *
+ * o "small" allocations are treated differently: the heap manager maintains
+ * a pool of "small"-sized blocks internally from which allocations less
+ * than 1/2 of the "small" size are buddy-allocated. if a "small" allocation
+ * is requested and none of the buddy sub-heaps is able to service it,
+ * the heap manager will try to allocate a new buddy-heap.
+ *
+ * this allocator is intended to keep "splinters" colocated in the carveout,
+ * and to ensure that the minimum free block size in the carveout (i.e., the
+ * "small" threshold) is still a meaningful size.
+ *
+ */
+
+#define MAX_BUDDY_NR 128 /* maximum buddies in a buddy allocator */
+
+enum direction {
+ TOP_DOWN,
+ BOTTOM_UP
+};
+
+enum block_type {
+ BLOCK_FIRST_FIT, /* block was allocated directly from the heap */
+ BLOCK_BUDDY, /* block was allocated from a buddy sub-heap */
+ BLOCK_EMPTY,
+};
+
+struct heap_stat {
+ size_t free; /* total free size */
+ size_t free_largest; /* largest free block */
+ size_t free_count; /* number of free blocks */
+ size_t total; /* total size */
+ size_t largest; /* largest unique block */
+ size_t count; /* total number of blocks */
+ /* fast compaction attempt counter */
+ unsigned int compaction_count_fast;
+ /* full compaction attempt counter */
+ unsigned int compaction_count_full;
+};
+
+struct buddy_heap;
+
+struct buddy_block {
+ struct nvmap_heap_block block;
+ struct buddy_heap *heap;
+};
+
+struct list_block {
+ struct nvmap_heap_block block;
+ struct list_head all_list;
+ unsigned int mem_prot;
+ unsigned long orig_addr;
+ size_t size;
+ size_t align;
+ struct nvmap_heap *heap;
+ struct list_head free_list;
+};
+
+struct combo_block {
+ union {
+ struct list_block lb;
+ struct buddy_block bb;
+ };
+};
+
+struct buddy_bits {
+ unsigned int alloc:1;
+ unsigned int order:7; /* log2(MAX_BUDDY_NR); */
+};
+
+struct buddy_heap {
+ struct list_block *heap_base;
+ unsigned int nr_buddies;
+ struct list_head buddy_list;
+ struct buddy_bits bitmap[MAX_BUDDY_NR];
+};
+
+struct nvmap_heap {
+ struct list_head all_list;
+ struct list_head free_list;
+ struct mutex lock;
+ struct list_head buddy_list;
+ unsigned int min_buddy_shift;
+ unsigned int buddy_heap_size;
+ unsigned int small_alloc;
+ const char *name;
+ void *arg;
+ struct device dev;
+};
+
+static struct kmem_cache *buddy_heap_cache;
+static struct kmem_cache *block_cache;
+
+static inline struct nvmap_heap *parent_of(struct buddy_heap *heap)
+{
+ return heap->heap_base->heap;
+}
+
+static inline unsigned int order_of(size_t len, size_t min_shift)
+{
+ len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1;
+ return fls(len)-1;
+}
+
+/* returns the free size in bytes of the buddy heap; must be called while
+ * holding the parent heap's lock. */
+static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat)
+{
+ unsigned int index;
+ unsigned int shift = parent_of(heap)->min_buddy_shift;
+
+ for (index = 0; index < heap->nr_buddies;
+ index += (1 << heap->bitmap[index].order)) {
+ size_t curr = 1 << (heap->bitmap[index].order + shift);
+
+ stat->largest = max(stat->largest, curr);
+ stat->total += curr;
+ stat->count++;
+
+ if (!heap->bitmap[index].alloc) {
+ stat->free += curr;
+ stat->free_largest = max(stat->free_largest, curr);
+ stat->free_count++;
+ }
+ }
+}
+
+/* returns the free size of the heap (including any free blocks in any
+ * buddy-heap suballocators; must be called while holding the parent
+ * heap's lock. */
+static unsigned long heap_stat(struct nvmap_heap *heap, struct heap_stat *stat)
+{
+ struct buddy_heap *bh;
+ struct list_block *l = NULL;
+ unsigned long base = -1ul;
+
+ memset(stat, 0, sizeof(*stat));
+ mutex_lock(&heap->lock);
+ list_for_each_entry(l, &heap->all_list, all_list) {
+ stat->total += l->size;
+ stat->largest = max(l->size, stat->largest);
+ stat->count++;
+ base = min(base, l->orig_addr);
+ }
+
+ list_for_each_entry(bh, &heap->buddy_list, buddy_list) {
+ buddy_stat(bh, stat);
+ /* the total counts are double-counted for buddy heaps
+ * since the blocks allocated for buddy heaps exist in the
+ * all_list; subtract out the doubly-added stats */
+ stat->total -= bh->heap_base->size;
+ stat->count--;
+ }
+
+ list_for_each_entry(l, &heap->free_list, free_list) {
+ stat->free += l->size;
+ stat->free_count++;
+ stat->free_largest = max(l->size, stat->free_largest);
+ }
+ mutex_unlock(&heap->lock);
+
+ return base;
+}
+
+static ssize_t heap_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t heap_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static struct device_attribute heap_stat_total_max =
+ __ATTR(total_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_count =
+ __ATTR(total_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_size =
+ __ATTR(total_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_max =
+ __ATTR(free_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_count =
+ __ATTR(free_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_size =
+ __ATTR(free_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_base =
+ __ATTR(base, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_attr_name =
+ __ATTR(name, S_IRUGO, heap_name_show, NULL);
+
+static struct attribute *heap_stat_attrs[] = {
+ &heap_stat_total_max.attr,
+ &heap_stat_total_count.attr,
+ &heap_stat_total_size.attr,
+ &heap_stat_free_max.attr,
+ &heap_stat_free_count.attr,
+ &heap_stat_free_size.attr,
+ &heap_stat_base.attr,
+ &heap_attr_name.attr,
+ NULL,
+};
+
+static struct attribute_group heap_stat_attr_group = {
+ .attrs = heap_stat_attrs,
+};
+
+static ssize_t heap_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ return sprintf(buf, "%s\n", heap->name);
+}
+
+static ssize_t heap_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ struct heap_stat stat;
+ unsigned long base;
+
+ base = heap_stat(heap, &stat);
+
+ if (attr == &heap_stat_total_max)
+ return sprintf(buf, "%u\n", stat.largest);
+ else if (attr == &heap_stat_total_count)
+ return sprintf(buf, "%u\n", stat.count);
+ else if (attr == &heap_stat_total_size)
+ return sprintf(buf, "%u\n", stat.total);
+ else if (attr == &heap_stat_free_max)
+ return sprintf(buf, "%u\n", stat.free_largest);
+ else if (attr == &heap_stat_free_count)
+ return sprintf(buf, "%u\n", stat.free_count);
+ else if (attr == &heap_stat_free_size)
+ return sprintf(buf, "%u\n", stat.free);
+ else if (attr == &heap_stat_base)
+ return sprintf(buf, "%08lx\n", base);
+ else
+ return -EINVAL;
+}
+#ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
+ size_t size, size_t align,
+ unsigned int mem_prot)
+{
+ unsigned int index = 0;
+ unsigned int min_shift = parent_of(heap)->min_buddy_shift;
+ unsigned int order = order_of(size, min_shift);
+ unsigned int align_mask;
+ unsigned int best = heap->nr_buddies;
+ struct buddy_block *b;
+
+ if (heap->heap_base->mem_prot != mem_prot)
+ return NULL;
+
+ align = max(align, (size_t)(1 << min_shift));
+ align_mask = (align >> min_shift) - 1;
+
+ for (index = 0; index < heap->nr_buddies;
+ index += (1 << heap->bitmap[index].order)) {
+
+ if (heap->bitmap[index].alloc || (index & align_mask) ||
+ (heap->bitmap[index].order < order))
+ continue;
+
+ if (best == heap->nr_buddies ||
+ heap->bitmap[index].order < heap->bitmap[best].order)
+ best = index;
+
+ if (heap->bitmap[best].order == order)
+ break;
+ }
+
+ if (best == heap->nr_buddies)
+ return NULL;
+
+ b = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ while (heap->bitmap[best].order != order) {
+ unsigned int buddy;
+ heap->bitmap[best].order--;
+ buddy = best ^ (1 << heap->bitmap[best].order);
+ heap->bitmap[buddy].order = heap->bitmap[best].order;
+ heap->bitmap[buddy].alloc = 0;
+ }
+ heap->bitmap[best].alloc = 1;
+ b->block.base = heap->heap_base->block.base + (best << min_shift);
+ b->heap = heap;
+ b->block.type = BLOCK_BUDDY;
+ return &b->block;
+}
+#endif
+
+static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
+{
+ struct buddy_block *b = container_of(block, struct buddy_block, block);
+ struct buddy_heap *h = b->heap;
+ unsigned int min_shift = parent_of(h)->min_buddy_shift;
+ unsigned int index;
+
+ index = (block->base - h->heap_base->block.base) >> min_shift;
+ h->bitmap[index].alloc = 0;
+
+ for (;;) {
+ unsigned int buddy = index ^ (1 << h->bitmap[index].order);
+ if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc ||
+ h->bitmap[buddy].order != h->bitmap[index].order)
+ break;
+
+ h->bitmap[buddy].order++;
+ h->bitmap[index].order++;
+ index = min(buddy, index);
+ }
+
+ kmem_cache_free(block_cache, b);
+ if ((1 << h->bitmap[0].order) == h->nr_buddies)
+ return h;
+
+ return NULL;
+}
+
+
+/*
+ * base_max limits position of allocated chunk in memory.
+ * if base_max is 0 then there is no such limitation.
+ */
+static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
+ size_t len, size_t align,
+ unsigned int mem_prot,
+ unsigned long base_max)
+{
+ struct list_block *b = NULL;
+ struct list_block *i = NULL;
+ struct list_block *rem = NULL;
+ unsigned long fix_base;
+ enum direction dir;
+
+ /* since pages are only mappable with one cache attribute,
+ * and most allocations from carveout heaps are DMA coherent
+ * (i.e., non-cacheable), round cacheable allocations up to
+ * a page boundary to ensure that the physical pages will
+ * only be mapped one way. */
+ if (mem_prot == NVMAP_HANDLE_CACHEABLE ||
+ mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) {
+ align = max_t(size_t, align, PAGE_SIZE);
+ len = PAGE_ALIGN(len);
+ }
+
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+ dir = BOTTOM_UP;
+#else
+ dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN;
+#endif
+
+ if (dir == BOTTOM_UP) {
+ list_for_each_entry(i, &heap->free_list, free_list) {
+ size_t fix_size;
+ fix_base = ALIGN(i->block.base, align);
+ fix_size = i->size - (fix_base - i->block.base);
+
+ /* needed for compaction. relocated chunk
+ * should never go up */
+ if (base_max && fix_base > base_max)
+ break;
+
+ if (fix_size >= len) {
+ b = i;
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_reverse(i, &heap->free_list, free_list) {
+ if (i->size >= len) {
+ fix_base = i->block.base + i->size - len;
+ fix_base &= ~(align-1);
+ if (fix_base >= i->block.base) {
+ b = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!b)
+ return NULL;
+
+ if (dir == BOTTOM_UP)
+ b->block.type = BLOCK_FIRST_FIT;
+
+ /* split free block */
+ if (b->block.base != fix_base) {
+ /* insert a new free block before allocated */
+ rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!rem) {
+ b->orig_addr = b->block.base;
+ b->block.base = fix_base;
+ b->size -= (b->block.base - b->orig_addr);
+ goto out;
+ }
+
+ rem->block.type = BLOCK_EMPTY;
+ rem->block.base = b->block.base;
+ rem->orig_addr = rem->block.base;
+ rem->size = fix_base - rem->block.base;
+ b->block.base = fix_base;
+ b->orig_addr = fix_base;
+ b->size -= rem->size;
+ list_add_tail(&rem->all_list, &b->all_list);
+ list_add_tail(&rem->free_list, &b->free_list);
+ }
+
+ b->orig_addr = b->block.base;
+
+ if (b->size > len) {
+ /* insert a new free block after allocated */
+ rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!rem)
+ goto out;
+
+ rem->block.type = BLOCK_EMPTY;
+ rem->block.base = b->block.base + len;
+ rem->size = b->size - len;
+ BUG_ON(rem->size > b->size);
+ rem->orig_addr = rem->block.base;
+ b->size = len;
+ list_add(&rem->all_list, &b->all_list);
+ list_add(&rem->free_list, &b->free_list);
+ }
+
+out:
+ list_del(&b->free_list);
+ b->heap = heap;
+ b->mem_prot = mem_prot;
+ b->align = align;
+ return &b->block;
+}
+
+#ifdef DEBUG_FREE_LIST
+static void freelist_debug(struct nvmap_heap *heap, const char *title,
+ struct list_block *token)
+{
+ int i;
+ struct list_block *n;
+
+ dev_debug(&heap->dev, "%s\n", title);
+ i = 0;
+ list_for_each_entry(n, &heap->free_list, free_list) {
+ dev_debug(&heap->dev, "\t%d [%p..%p]%s\n", i, (void *)n->orig_addr,
+ (void *)(n->orig_addr + n->size),
+ (n == token) ? "<--" : "");
+ i++;
+ }
+}
+#else
+#define freelist_debug(_heap, _title, _token) do { } while (0)
+#endif
+
+static struct list_block *do_heap_free(struct nvmap_heap_block *block)
+{
+ struct list_block *b = container_of(block, struct list_block, block);
+ struct list_block *n = NULL;
+ struct nvmap_heap *heap = b->heap;
+
+ BUG_ON(b->block.base > b->orig_addr);
+ b->size += (b->block.base - b->orig_addr);
+ b->block.base = b->orig_addr;
+
+ freelist_debug(heap, "free list before", b);
+
+ /* Find position of first free block to the right of freed one */
+ list_for_each_entry(n, &heap->free_list, free_list) {
+ if (n->block.base > b->block.base)
+ break;
+ }
+
+ /* Add freed block before found free one */
+ list_add_tail(&b->free_list, &n->free_list);
+ BUG_ON(list_empty(&b->all_list));
+
+ freelist_debug(heap, "free list pre-merge", b);
+
+ /* merge freed block with next if they connect
+ * freed block becomes bigger, next one is destroyed */
+ if (!list_is_last(&b->free_list, &heap->free_list)) {
+ n = list_first_entry(&b->free_list, struct list_block, free_list);
+ if (n->block.base == b->block.base + b->size) {
+ list_del(&n->all_list);
+ list_del(&n->free_list);
+ BUG_ON(b->orig_addr >= n->orig_addr);
+ b->size += n->size;
+ kmem_cache_free(block_cache, n);
+ }
+ }
+
+ /* merge freed block with prev if they connect
+ * previous free block becomes bigger, freed one is destroyed */
+ if (b->free_list.prev != &heap->free_list) {
+ n = list_entry(b->free_list.prev, struct list_block, free_list);
+ if (n->block.base + n->size == b->block.base) {
+ list_del(&b->all_list);
+ list_del(&b->free_list);
+ BUG_ON(n->orig_addr >= b->orig_addr);
+ n->size += b->size;
+ kmem_cache_free(block_cache, b);
+ b = n;
+ }
+ }
+
+ freelist_debug(heap, "free list after", b);
+ b->block.type = BLOCK_EMPTY;
+ return b;
+}
+
+#ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+
+static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
+ size_t len, size_t align,
+ unsigned int mem_prot)
+{
+ struct buddy_heap *bh;
+ struct nvmap_heap_block *b = NULL;
+
+ list_for_each_entry(bh, &h->buddy_list, buddy_list) {
+ b = buddy_alloc(bh, len, align, mem_prot);
+ if (b)
+ return b;
+ }
+
+ /* no buddy heaps could service this allocation: try to create a new
+ * buddy heap instead */
+ bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL);
+ if (!bh)
+ return NULL;
+
+ b = do_heap_alloc(h, h->buddy_heap_size,
+ h->buddy_heap_size, mem_prot, 0);
+ if (!b) {
+ kmem_cache_free(buddy_heap_cache, bh);
+ return NULL;
+ }
+
+ bh->heap_base = container_of(b, struct list_block, block);
+ bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift;
+ bh->bitmap[0].alloc = 0;
+ bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift);
+ list_add_tail(&bh->buddy_list, &h->buddy_list);
+ return buddy_alloc(bh, len, align, mem_prot);
+}
+
+#endif
+
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+
+static int do_heap_copy_listblock(struct nvmap_device *dev,
+ unsigned long dst_base, unsigned long src_base, size_t len)
+{
+ pte_t **pte_src = NULL;
+ pte_t **pte_dst = NULL;
+ void *addr_src = NULL;
+ void *addr_dst = NULL;
+ unsigned long kaddr_src;
+ unsigned long kaddr_dst;
+ unsigned long phys_src = src_base;
+ unsigned long phys_dst = dst_base;
+ unsigned long pfn_src;
+ unsigned long pfn_dst;
+ int error = 0;
+
+ pgprot_t prot = pgprot_writecombine(pgprot_kernel);
+
+ int page;
+
+ pte_src = nvmap_alloc_pte(dev, &addr_src);
+ if (IS_ERR(pte_src)) {
+ pr_err("Error when allocating pte_src\n");
+ pte_src = NULL;
+ error = -1;
+ goto fail;
+ }
+
+ pte_dst = nvmap_alloc_pte(dev, &addr_dst);
+ if (IS_ERR(pte_dst)) {
+ pr_err("Error while allocating pte_dst\n");
+ pte_dst = NULL;
+ error = -1;
+ goto fail;
+ }
+
+ kaddr_src = (unsigned long)addr_src;
+ kaddr_dst = (unsigned long)addr_dst;
+
+ BUG_ON(phys_dst > phys_src);
+ BUG_ON((phys_src & PAGE_MASK) != phys_src);
+ BUG_ON((phys_dst & PAGE_MASK) != phys_dst);
+ BUG_ON((len & PAGE_MASK) != len);
+
+ for (page = 0; page < (len >> PAGE_SHIFT) ; page++) {
+
+ pfn_src = __phys_to_pfn(phys_src) + page;
+ pfn_dst = __phys_to_pfn(phys_dst) + page;
+
+ set_pte_at(&init_mm, kaddr_src, *pte_src,
+ pfn_pte(pfn_src, prot));
+ flush_tlb_kernel_page(kaddr_src);
+
+ set_pte_at(&init_mm, kaddr_dst, *pte_dst,
+ pfn_pte(pfn_dst, prot));
+ flush_tlb_kernel_page(kaddr_dst);
+
+ memcpy(addr_dst, addr_src, PAGE_SIZE);
+ }
+
+fail:
+ if (pte_src)
+ nvmap_free_pte(dev, pte_src);
+ if (pte_dst)
+ nvmap_free_pte(dev, pte_dst);
+ return error;
+}
+
+
+static struct nvmap_heap_block *do_heap_relocate_listblock(
+ struct list_block *block, bool fast)
+{
+ struct nvmap_heap_block *heap_block = &block->block;
+ struct nvmap_heap_block *heap_block_new = NULL;
+ struct nvmap_heap *heap = block->heap;
+ struct nvmap_handle *handle = heap_block->handle;
+ unsigned long src_base = heap_block->base;
+ unsigned long dst_base;
+ size_t src_size = block->size;
+ size_t src_align = block->align;
+ unsigned int src_prot = block->mem_prot;
+ int error = 0;
+ struct nvmap_share *share;
+
+ if (!handle) {
+ pr_err("INVALID HANDLE!\n");
+ return NULL;
+ }
+
+ mutex_lock(&handle->lock);
+
+ share = nvmap_get_share_from_dev(handle->dev);
+
+ /* TODO: It is possible to use only handle lock and no share
+ * pin_lock, but then we'll need to lock every handle during
+ * each pinning operation. Need to estimate performance impact
+ * if we decide to simplify locking this way. */
+ mutex_lock(&share->pin_lock);
+
+ /* abort if block is pinned */
+ if (atomic_read(&handle->pin))
+ goto fail;
+ /* abort if block is mapped */
+ if (handle->usecount)
+ goto fail;
+
+ if (fast) {
+ /* Fast compaction path - first allocate, then free. */
+ heap_block_new = do_heap_alloc(heap, src_size, src_align,
+ src_prot, src_base);
+ if (heap_block_new)
+ do_heap_free(heap_block);
+ else
+ goto fail;
+ } else {
+ /* Full compaction path, first free, then allocate
+ * It is slower but provide best compaction results */
+ do_heap_free(heap_block);
+ heap_block_new = do_heap_alloc(heap, src_size, src_align,
+ src_prot, src_base);
+ /* Allocation should always succeed*/
+ BUG_ON(!heap_block_new);
+ }
+
+ /* update handle */
+ handle->carveout = heap_block_new;
+ heap_block_new->handle = handle;
+
+ /* copy source data to new block location */
+ dst_base = heap_block_new->base;
+
+ /* new allocation should always go lower addresses */
+ BUG_ON(dst_base >= src_base);
+
+ error = do_heap_copy_listblock(handle->dev,
+ dst_base, src_base, src_size);
+ BUG_ON(error);
+
+fail:
+ mutex_unlock(&share->pin_lock);
+ mutex_unlock(&handle->lock);
+ return heap_block_new;
+}
+
+static void nvmap_heap_compact(struct nvmap_heap *heap,
+ size_t requested_size, bool fast)
+{
+ struct list_block *block_current = NULL;
+ struct list_block *block_prev = NULL;
+ struct list_block *block_next = NULL;
+
+ struct list_head *ptr, *ptr_prev, *ptr_next;
+ int relocation_count = 0;
+
+ ptr = heap->all_list.next;
+
+ /* walk through all blocks */
+ while (ptr != &heap->all_list) {
+ block_current = list_entry(ptr, struct list_block, all_list);
+
+ ptr_prev = ptr->prev;
+ ptr_next = ptr->next;
+
+ if (block_current->block.type != BLOCK_EMPTY) {
+ ptr = ptr_next;
+ continue;
+ }
+
+ if (fast && block_current->size >= requested_size)
+ break;
+
+ /* relocate prev block */
+ if (ptr_prev != &heap->all_list) {
+
+ block_prev = list_entry(ptr_prev,
+ struct list_block, all_list);
+
+ BUG_ON(block_prev->block.type != BLOCK_FIRST_FIT);
+
+ if (do_heap_relocate_listblock(block_prev, true)) {
+
+ /* After relocation current free block can be
+ * destroyed when it is merged with previous
+ * free block. Updated pointer to new free
+ * block can be obtained from the next block */
+ relocation_count++;
+ ptr = ptr_next->prev;
+ continue;
+ }
+ }
+
+ if (ptr_next != &heap->all_list) {
+
+ block_next = list_entry(ptr_next,
+ struct list_block, all_list);
+
+ BUG_ON(block_next->block.type != BLOCK_FIRST_FIT);
+
+ if (do_heap_relocate_listblock(block_next, fast)) {
+ ptr = ptr_prev->next;
+ relocation_count++;
+ continue;
+ }
+ }
+ ptr = ptr_next;
+ }
+ pr_err("Relocated %d chunks\n", relocation_count);
+}
+#endif
+
+void nvmap_usecount_inc(struct nvmap_handle *h)
+{
+ if (h->alloc && !h->heap_pgalloc) {
+ mutex_lock(&h->lock);
+ h->usecount++;
+ mutex_unlock(&h->lock);
+ } else {
+ h->usecount++;
+ }
+}
+
+
+void nvmap_usecount_dec(struct nvmap_handle *h)
+{
+ h->usecount--;
+}
+
+/* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
+ * align bytes. */
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h,
+ struct nvmap_handle *handle)
+{
+ struct nvmap_heap_block *b;
+ size_t len = handle->size;
+ size_t align = handle->align;
+ unsigned int prot = handle->flags;
+
+ mutex_lock(&h->lock);
+
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+ /* Align to page size */
+ align = ALIGN(align, PAGE_SIZE);
+ len = ALIGN(len, PAGE_SIZE);
+ b = do_heap_alloc(h, len, align, prot, 0);
+ if (!b) {
+ pr_err("Compaction triggered!\n");
+ nvmap_heap_compact(h, len, true);
+ b = do_heap_alloc(h, len, align, prot, 0);
+ if (!b) {
+ pr_err("Full compaction triggered!\n");
+ nvmap_heap_compact(h, len, false);
+ b = do_heap_alloc(h, len, align, prot, 0);
+ }
+ }
+#else
+ if (len <= h->buddy_heap_size / 2) {
+ b = do_buddy_alloc(h, len, align, prot);
+ } else {
+ if (h->buddy_heap_size)
+ len = ALIGN(len, h->buddy_heap_size);
+ align = max(align, (size_t)L1_CACHE_BYTES);
+ b = do_heap_alloc(h, len, align, prot, 0);
+ }
+#endif
+
+ if (b) {
+ b->handle = handle;
+ handle->carveout = b;
+ }
+ mutex_unlock(&h->lock);
+ return b;
+}
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
+{
+ if (b->type == BLOCK_BUDDY) {
+ struct buddy_block *bb;
+ bb = container_of(b, struct buddy_block, block);
+ return parent_of(bb->heap);
+ } else {
+ struct list_block *lb;
+ lb = container_of(b, struct list_block, block);
+ return lb->heap;
+ }
+}
+
+/* nvmap_heap_free: frees block b*/
+void nvmap_heap_free(struct nvmap_heap_block *b)
+{
+ struct buddy_heap *bh = NULL;
+ struct nvmap_heap *h = nvmap_block_to_heap(b);
+ struct list_block *lb;
+
+ mutex_lock(&h->lock);
+ if (b->type == BLOCK_BUDDY)
+ bh = do_buddy_free(b);
+ else {
+ lb = container_of(b, struct list_block, block);
+ nvmap_flush_heap_block(NULL, b, lb->size, lb->mem_prot);
+ do_heap_free(b);
+ }
+
+ if (bh) {
+ list_del(&bh->buddy_list);
+ mutex_unlock(&h->lock);
+ nvmap_heap_free(&bh->heap_base->block);
+ kmem_cache_free(buddy_heap_cache, bh);
+ } else
+ mutex_unlock(&h->lock);
+}
+
+
+static void heap_release(struct device *heap)
+{
+}
+
+/* nvmap_heap_create: create a heap object of len bytes, starting from
+ * address base.
+ *
+ * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2
+ * of the buddy heap size will use a buddy sub-allocator, where each buddy
+ * heap is buddy_size bytes (should be a power of 2). all other allocations
+ * will be rounded up to be a multiple of buddy_size bytes.
+ */
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+ phys_addr_t base, size_t len,
+ size_t buddy_size, void *arg)
+{
+ struct nvmap_heap *h = NULL;
+ struct list_block *l = NULL;
+
+ if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) {
+ dev_warn(parent, "%s: buddy_size %u too small\n", __func__,
+ buddy_size);
+ buddy_size = 0;
+ } else if (WARN_ON(buddy_size >= len)) {
+ dev_warn(parent, "%s: buddy_size %u too large\n", __func__,
+ buddy_size);
+ buddy_size = 0;
+ } else if (WARN_ON(buddy_size & (buddy_size - 1))) {
+ dev_warn(parent, "%s: buddy_size %u not a power of 2\n",
+ __func__, buddy_size);
+ buddy_size = 1 << (ilog2(buddy_size) + 1);
+ }
+
+ if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) {
+ unsigned long orig = base;
+ dev_warn(parent, "%s: base address %p not aligned to "
+ "buddy_size %u\n", __func__, (void *)base, buddy_size);
+ base = ALIGN(base, buddy_size);
+ len -= (base - orig);
+ }
+
+ if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) {
+ dev_warn(parent, "%s: length %u not aligned to "
+ "buddy_size %u\n", __func__, len, buddy_size);
+ len &= ~(buddy_size - 1);
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h) {
+ dev_err(parent, "%s: out of memory\n", __func__);
+ goto fail_alloc;
+ }
+
+ l = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!l) {
+ dev_err(parent, "%s: out of memory\n", __func__);
+ goto fail_alloc;
+ }
+
+ dev_set_name(&h->dev, "heap-%s", name);
+ h->name = name;
+ h->arg = arg;
+ h->dev.parent = parent;
+ h->dev.driver = NULL;
+ h->dev.release = heap_release;
+ if (device_register(&h->dev)) {
+ dev_err(parent, "%s: failed to register %s\n", __func__,
+ dev_name(&h->dev));
+ goto fail_alloc;
+ }
+ if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) {
+ dev_err(&h->dev, "%s: failed to create attributes\n", __func__);
+ goto fail_register;
+ }
+ h->small_alloc = max(2 * buddy_size, len / 256);
+ h->buddy_heap_size = buddy_size;
+ if (buddy_size)
+ h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR);
+ INIT_LIST_HEAD(&h->free_list);
+ INIT_LIST_HEAD(&h->buddy_list);
+ INIT_LIST_HEAD(&h->all_list);
+ mutex_init(&h->lock);
+ l->block.base = base;
+ l->block.type = BLOCK_EMPTY;
+ l->size = len;
+ l->orig_addr = base;
+ list_add_tail(&l->free_list, &h->free_list);
+ list_add_tail(&l->all_list, &h->all_list);
+
+ inner_flush_cache_all();
+ outer_flush_range(base, base + len);
+ wmb();
+ return h;
+
+fail_register:
+ device_unregister(&h->dev);
+fail_alloc:
+ if (l)
+ kmem_cache_free(block_cache, l);
+ kfree(h);
+ return NULL;
+}
+
+void *nvmap_heap_device_to_arg(struct device *dev)
+{
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ return heap->arg;
+}
+
+void *nvmap_heap_to_arg(struct nvmap_heap *heap)
+{
+ return heap->arg;
+}
+
+/* nvmap_heap_destroy: frees all resources in heap */
+void nvmap_heap_destroy(struct nvmap_heap *heap)
+{
+ WARN_ON(!list_empty(&heap->buddy_list));
+
+ sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group);
+ device_unregister(&heap->dev);
+
+ while (!list_empty(&heap->buddy_list)) {
+ struct buddy_heap *b;
+ b = list_first_entry(&heap->buddy_list, struct buddy_heap,
+ buddy_list);
+ list_del(&heap->buddy_list);
+ nvmap_heap_free(&b->heap_base->block);
+ kmem_cache_free(buddy_heap_cache, b);
+ }
+
+ WARN_ON(!list_is_singular(&heap->all_list));
+ while (!list_empty(&heap->all_list)) {
+ struct list_block *l;
+ l = list_first_entry(&heap->all_list, struct list_block,
+ all_list);
+ list_del(&l->all_list);
+ kmem_cache_free(block_cache, l);
+ }
+
+ kfree(heap);
+}
+
+/* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp)
+{
+ return sysfs_create_group(&heap->dev.kobj, grp);
+}
+
+/* nvmap_heap_remove_group: removes the attribute_group grp */
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp)
+{
+ sysfs_remove_group(&heap->dev.kobj, grp);
+}
+
+int nvmap_heap_init(void)
+{
+ BUG_ON(buddy_heap_cache != NULL);
+ buddy_heap_cache = KMEM_CACHE(buddy_heap, 0);
+ if (!buddy_heap_cache) {
+ pr_err("%s: unable to create buddy heap cache\n", __func__);
+ return -ENOMEM;
+ }
+
+ block_cache = KMEM_CACHE(combo_block, 0);
+ if (!block_cache) {
+ kmem_cache_destroy(buddy_heap_cache);
+ pr_err("%s: unable to create block cache\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void nvmap_heap_deinit(void)
+{
+ if (buddy_heap_cache)
+ kmem_cache_destroy(buddy_heap_cache);
+ if (block_cache)
+ kmem_cache_destroy(block_cache);
+
+ block_cache = NULL;
+ buddy_heap_cache = NULL;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.h b/drivers/video/tegra/nvmap/nvmap_heap.h
new file mode 100644
index 000000000000..158a1fa3d33c
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_heap.h
@@ -0,0 +1,68 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_heap.h
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVMAP_HEAP_H
+#define __NVMAP_HEAP_H
+
+struct device;
+struct nvmap_heap;
+struct attribute_group;
+
+struct nvmap_heap_block {
+ phys_addr_t base;
+ unsigned int type;
+ struct nvmap_handle *handle;
+};
+
+#define NVMAP_HEAP_MIN_BUDDY_SIZE 8192
+
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+ phys_addr_t base, size_t len,
+ unsigned int buddy_size, void *arg);
+
+void nvmap_heap_destroy(struct nvmap_heap *heap);
+
+void *nvmap_heap_device_to_arg(struct device *dev);
+
+void *nvmap_heap_to_arg(struct nvmap_heap *heap);
+
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap,
+ struct nvmap_handle *handle);
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
+
+void nvmap_heap_free(struct nvmap_heap_block *block);
+
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp);
+
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp);
+
+int __init nvmap_heap_init(void);
+
+void nvmap_heap_deinit(void);
+
+int nvmap_flush_heap_block(struct nvmap_client *client,
+ struct nvmap_heap_block *block, size_t len, unsigned int prot);
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c
new file mode 100644
index 000000000000..58bc71d50469
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c
@@ -0,0 +1,749 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap_ioctl.h"
+#include "nvmap.h"
+#include "nvmap_common.h"
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+ int is_read, unsigned long h_offs,
+ unsigned long sys_addr, unsigned long h_stride,
+ unsigned long sys_stride, unsigned long elem_size,
+ unsigned long count);
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op);
+
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
+{
+ struct nvmap_pin_handle op;
+ struct nvmap_handle *h;
+ unsigned long on_stack[16];
+ unsigned long *refs;
+ unsigned long __user *output;
+ unsigned int i;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.count)
+ return -EINVAL;
+
+ if (op.count > 1) {
+ size_t bytes = op.count * sizeof(unsigned long *);
+
+ if (op.count > ARRAY_SIZE(on_stack))
+ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
+ else
+ refs = on_stack;
+
+ if (!refs)
+ return -ENOMEM;
+
+ if (copy_from_user(refs, (void *)op.handles, bytes)) {
+ err = -EFAULT;
+ goto out;
+ }
+ } else {
+ refs = on_stack;
+ on_stack[0] = (unsigned long)op.handles;
+ }
+
+ if (is_pin)
+ err = nvmap_pin_ids(filp->private_data, op.count, refs);
+ else
+ nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+ /* skip the output stage on unpin */
+ if (err || !is_pin)
+ goto out;
+
+ /* it is guaranteed that if nvmap_pin_ids returns 0 that
+ * all of the handle_ref objects are valid, so dereferencing
+ * directly here is safe */
+ if (op.count > 1)
+ output = (unsigned long __user *)op.addr;
+ else {
+ struct nvmap_pin_handle __user *tmp = arg;
+ output = (unsigned long __user *)&(tmp->addr);
+ }
+
+ if (!output)
+ goto out;
+
+ for (i = 0; i < op.count && !err; i++) {
+ unsigned long addr;
+
+ h = (struct nvmap_handle *)refs[i];
+
+ if (h->heap_pgalloc && h->pgalloc.contig)
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->heap_pgalloc)
+ addr = h->pgalloc.area->iovm_start;
+ else
+ addr = h->carveout->base;
+
+ err = put_user(addr, &output[i]);
+ }
+
+ if (err)
+ nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+out:
+ if (refs != on_stack)
+ kfree(refs);
+
+ return err;
+}
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_create_handle op;
+ struct nvmap_handle *h = NULL;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+
+ if (!h)
+ return -EPERM;
+
+ op.id = (__u32)h;
+ if (client == h->owner)
+ h->global = true;
+
+ nvmap_handle_put(h);
+
+ return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
+}
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
+{
+ struct nvmap_alloc_handle op;
+ struct nvmap_client *client = filp->private_data;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ if (op.align & (op.align - 1))
+ return -EINVAL;
+
+ /* user-space handles are aligned to page boundaries, to prevent
+ * data leakage. */
+ op.align = max_t(size_t, op.align, PAGE_SIZE);
+
+ return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
+ op.align, op.flags);
+}
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
+{
+ struct nvmap_create_handle op;
+ struct nvmap_handle_ref *ref = NULL;
+ struct nvmap_client *client = filp->private_data;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!client)
+ return -ENODEV;
+
+ if (cmd == NVMAP_IOC_CREATE) {
+ ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
+ if (!IS_ERR(ref))
+ ref->handle->orig_size = op.size;
+ } else if (cmd == NVMAP_IOC_FROM_ID) {
+ ref = nvmap_duplicate_handle_id(client, op.id);
+ } else {
+ return -EINVAL;
+ }
+
+ if (IS_ERR(ref))
+ return PTR_ERR(ref);
+
+ op.handle = nvmap_ref_to_id(ref);
+ if (copy_to_user(arg, &op, sizeof(op))) {
+ err = -EFAULT;
+ nvmap_free_handle_id(client, op.handle);
+ }
+
+ return err;
+}
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_map_caller op;
+ struct nvmap_vma_priv *vpriv;
+ struct vm_area_struct *vma;
+ struct nvmap_handle *h = NULL;
+ unsigned int cache_flags;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+
+ if (!h)
+ return -EPERM;
+
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->mm, op.addr);
+ if (!vma || !vma->vm_private_data) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (op.offset & ~PAGE_MASK) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if ((op.offset + op.length) > h->size) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ vpriv = vma->vm_private_data;
+ BUG_ON(!vpriv);
+
+ /* the VMA must exactly match the requested mapping operation, and the
+ * VMA that is targetted must have been created by this driver
+ */
+ if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
+ (vma->vm_end-vma->vm_start != op.length)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ /* verify that each mmap() system call creates a unique VMA */
+
+ if (vpriv->handle && (h == vpriv->handle)) {
+ goto out;
+ } else if (vpriv->handle) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ nvmap_usecount_inc(h);
+
+ if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
+ nvmap_usecount_dec(h);
+ err = -EFAULT;
+ goto out;
+ }
+
+ vpriv->handle = h;
+ vpriv->offs = op.offset;
+
+ cache_flags = op.flags & NVMAP_HANDLE_CACHE_FLAG;
+ if ((cache_flags == NVMAP_HANDLE_INNER_CACHEABLE ||
+ cache_flags == NVMAP_HANDLE_CACHEABLE) &&
+ (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+ h->flags == NVMAP_HANDLE_WRITE_COMBINE)) {
+ if (h->size & ~PAGE_MASK) {
+ pr_err("\n%s:attempt to convert a buffer from uc/wc to"
+ " wb, whose size is not a multiple of page size."
+ " request ignored.\n", __func__);
+ } else {
+ unsigned int nr_page = h->size >> PAGE_SHIFT;
+ wmb();
+ /* override allocation time cache coherency attributes. */
+ h->flags &= ~NVMAP_HANDLE_CACHE_FLAG;
+ h->flags |= cache_flags;
+
+ /* Update page attributes, if the memory is allocated
+ * from system heap pages.
+ */
+ if (cache_flags == NVMAP_HANDLE_INNER_CACHEABLE &&
+ h->heap_pgalloc)
+ set_pages_array_iwb(h->pgalloc.pages, nr_page);
+ else if (h->heap_pgalloc)
+ set_pages_array_wb(h->pgalloc.pages, nr_page);
+ }
+ }
+ vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
+
+out:
+ up_read(&current->mm->mmap_sem);
+
+ if (err)
+ nvmap_handle_put(h);
+ return err;
+}
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
+{
+ struct nvmap_handle_param op;
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_handle *h;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ h = nvmap_get_handle_id(client, op.handle);
+ if (!h)
+ return -EINVAL;
+
+ switch (op.param) {
+ case NVMAP_HANDLE_PARAM_SIZE:
+ op.result = h->orig_size;
+ break;
+ case NVMAP_HANDLE_PARAM_ALIGNMENT:
+ mutex_lock(&h->lock);
+ if (!h->alloc)
+ op.result = 0;
+ else if (h->heap_pgalloc)
+ op.result = PAGE_SIZE;
+ else if (h->carveout->base)
+ op.result = (h->carveout->base & -h->carveout->base);
+ else
+ op.result = SZ_4M;
+ mutex_unlock(&h->lock);
+ break;
+ case NVMAP_HANDLE_PARAM_BASE:
+ if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
+ op.result = -1ul;
+ else if (!h->heap_pgalloc) {
+ mutex_lock(&h->lock);
+ op.result = h->carveout->base;
+ mutex_unlock(&h->lock);
+ } else if (h->pgalloc.contig)
+ op.result = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->pgalloc.area)
+ op.result = h->pgalloc.area->iovm_start;
+ else
+ op.result = -1ul;
+ break;
+ case NVMAP_HANDLE_PARAM_HEAP:
+ if (!h->alloc)
+ op.result = 0;
+ else if (!h->heap_pgalloc) {
+ mutex_lock(&h->lock);
+ op.result = nvmap_carveout_usage(client, h->carveout);
+ mutex_unlock(&h->lock);
+ } else if (h->pgalloc.contig)
+ op.result = NVMAP_HEAP_SYSMEM;
+ else
+ op.result = NVMAP_HEAP_IOVMM;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (!err && copy_to_user(arg, &op, sizeof(op)))
+ err = -EFAULT;
+
+ nvmap_handle_put(h);
+ return err;
+}
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_rw_handle __user *uarg = arg;
+ struct nvmap_rw_handle op;
+ struct nvmap_handle *h;
+ ssize_t copied;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle || !op.addr || !op.count || !op.elem_size)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+ if (!h)
+ return -EPERM;
+
+ nvmap_usecount_inc(h);
+
+ copied = rw_handle(client, h, is_read, op.offset,
+ (unsigned long)op.addr, op.hmem_stride,
+ op.user_stride, op.elem_size, op.count);
+
+ if (copied < 0) {
+ err = copied;
+ copied = 0;
+ } else if (copied < (op.count * op.elem_size))
+ err = -EINTR;
+
+ __put_user(copied, &uarg->count);
+
+ nvmap_usecount_dec(h);
+
+ nvmap_handle_put(h);
+
+ return err;
+}
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_cache_op op;
+ struct vm_area_struct *vma;
+ struct nvmap_vma_priv *vpriv;
+ unsigned long start;
+ unsigned long end;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
+ op.op > NVMAP_CACHE_OP_WB_INV)
+ return -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->active_mm, (unsigned long)op.addr);
+ if (!vma || !is_nvmap_vma(vma) ||
+ (unsigned long)op.addr + op.len > vma->vm_end) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
+
+ if ((unsigned long)vpriv->handle != op.handle) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ start = (unsigned long)op.addr - vma->vm_start;
+ end = start + op.len;
+
+ err = cache_maint(client, vpriv->handle, start, end, op.op);
+out:
+ up_read(&current->mm->mmap_sem);
+ return err;
+}
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg)
+{
+ struct nvmap_client *client = filp->private_data;
+
+ if (!arg)
+ return 0;
+
+ nvmap_free_handle_id(client, arg);
+ return 0;
+}
+
+static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
+{
+ if (op == NVMAP_CACHE_OP_WB_INV)
+ dmac_flush_range(vaddr, vaddr + size);
+ else if (op == NVMAP_CACHE_OP_INV)
+ dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
+ else
+ dmac_map_area(vaddr, size, DMA_TO_DEVICE);
+}
+
+static void outer_cache_maint(unsigned int op, unsigned long paddr, size_t size)
+{
+ if (op == NVMAP_CACHE_OP_WB_INV)
+ outer_flush_range(paddr, paddr + size);
+ else if (op == NVMAP_CACHE_OP_INV)
+ outer_inv_range(paddr, paddr + size);
+ else
+ outer_clean_range(paddr, paddr + size);
+}
+
+static void heap_page_cache_maint(struct nvmap_client *client,
+ struct nvmap_handle *h, unsigned long start, unsigned long end,
+ unsigned int op, bool inner, bool outer, pte_t **pte,
+ unsigned long kaddr, pgprot_t prot)
+{
+ struct page *page;
+ unsigned long paddr;
+ unsigned long next;
+ unsigned long off;
+ size_t size;
+
+ while (start < end) {
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
+ off = start & ~PAGE_MASK;
+ size = next - start;
+ paddr = page_to_phys(page) + off;
+
+ if (inner) {
+ void *vaddr = (void *)kaddr + off;
+ BUG_ON(!pte);
+ BUG_ON(!kaddr);
+ set_pte_at(&init_mm, kaddr, *pte,
+ pfn_pte(__phys_to_pfn(paddr), prot));
+ flush_tlb_kernel_page(kaddr);
+ inner_cache_maint(op, vaddr, size);
+ }
+
+ if (outer)
+ outer_cache_maint(op, paddr, size);
+ start = next;
+ }
+}
+
+static bool fast_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op)
+{
+ int ret = false;
+
+ if ((op == NVMAP_CACHE_OP_INV) ||
+ ((end - start) < FLUSH_CLEAN_BY_SET_WAY_THRESHOLD))
+ goto out;
+
+ if (op == NVMAP_CACHE_OP_WB_INV)
+ inner_flush_cache_all();
+ else if (op == NVMAP_CACHE_OP_WB)
+ inner_clean_cache_all();
+
+ if (h->heap_pgalloc && (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)) {
+ heap_page_cache_maint(client, h, start, end, op,
+ false, true, NULL, 0, 0);
+ } else if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
+ start += h->carveout->base;
+ end += h->carveout->base;
+ outer_cache_maint(op, start, end - start);
+ }
+ ret = true;
+out:
+ return ret;
+}
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op)
+{
+ pgprot_t prot;
+ pte_t **pte = NULL;
+ unsigned long kaddr;
+ unsigned long loop;
+ int err = 0;
+
+ h = nvmap_handle_get(h);
+ if (!h)
+ return -EFAULT;
+
+ if (!h->alloc) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ wmb();
+ if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+ h->flags == NVMAP_HANDLE_WRITE_COMBINE || start == end)
+ goto out;
+
+ if (fast_cache_maint(client, h, start, end, op))
+ goto out;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+ pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
+ if (IS_ERR(pte)) {
+ err = PTR_ERR(pte);
+ pte = NULL;
+ goto out;
+ }
+
+ if (h->heap_pgalloc) {
+ heap_page_cache_maint(client, h, start, end, op, true,
+ (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ? false : true,
+ pte, kaddr, prot);
+ goto out;
+ }
+
+ if (start > h->size || end > h->size) {
+ nvmap_warn(client, "cache maintenance outside handle\n");
+ return -EINVAL;
+ }
+
+ /* lock carveout from relocation by mapcount */
+ nvmap_usecount_inc(h);
+
+ start += h->carveout->base;
+ end += h->carveout->base;
+
+ loop = start;
+
+ while (loop < end) {
+ unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
+ void *base = (void *)kaddr + (loop & ~PAGE_MASK);
+ next = min(next, end);
+
+ set_pte_at(&init_mm, kaddr, *pte,
+ pfn_pte(__phys_to_pfn(loop), prot));
+ flush_tlb_kernel_page(kaddr);
+
+ inner_cache_maint(op, base, next - loop);
+ loop = next;
+ }
+
+ if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
+ outer_cache_maint(op, start, end - start);
+
+ /* unlock carveout */
+ nvmap_usecount_dec(h);
+
+out:
+ if (pte)
+ nvmap_free_pte(client->dev, pte);
+ nvmap_handle_put(h);
+ return err;
+}
+
+static int rw_handle_page(struct nvmap_handle *h, int is_read,
+ phys_addr_t start, unsigned long rw_addr,
+ unsigned long bytes, unsigned long kaddr, pte_t *pte)
+{
+ pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
+ unsigned long end = start + bytes;
+ int err = 0;
+
+ while (!err && start < end) {
+ struct page *page = NULL;
+ phys_addr_t phys;
+ size_t count;
+ void *src;
+
+ if (!h->heap_pgalloc) {
+ phys = h->carveout->base + start;
+ } else {
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ BUG_ON(!page);
+ get_page(page);
+ phys = page_to_phys(page) + (start & ~PAGE_MASK);
+ }
+
+ set_pte_at(&init_mm, kaddr, pte,
+ pfn_pte(__phys_to_pfn(phys), prot));
+ flush_tlb_kernel_page(kaddr);
+
+ src = (void *)kaddr + (phys & ~PAGE_MASK);
+ phys = PAGE_SIZE - (phys & ~PAGE_MASK);
+ count = min_t(size_t, end - start, phys);
+
+ if (is_read)
+ err = copy_to_user((void *)rw_addr, src, count);
+ else
+ err = copy_from_user(src, (void *)rw_addr, count);
+
+ if (err)
+ err = -EFAULT;
+
+ rw_addr += count;
+ start += count;
+
+ if (page)
+ put_page(page);
+ }
+
+ return err;
+}
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+ int is_read, unsigned long h_offs,
+ unsigned long sys_addr, unsigned long h_stride,
+ unsigned long sys_stride, unsigned long elem_size,
+ unsigned long count)
+{
+ ssize_t copied = 0;
+ pte_t **pte;
+ void *addr;
+ int ret = 0;
+
+ if (!elem_size)
+ return -EINVAL;
+
+ if (!h->alloc)
+ return -EFAULT;
+
+ if (elem_size == h_stride && elem_size == sys_stride) {
+ elem_size *= count;
+ h_stride = elem_size;
+ sys_stride = elem_size;
+ count = 1;
+ }
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ while (count--) {
+ if (h_offs + elem_size > h->size) {
+ nvmap_warn(client, "read/write outside of handle\n");
+ ret = -EFAULT;
+ break;
+ }
+ if (is_read)
+ cache_maint(client, h, h_offs,
+ h_offs + elem_size, NVMAP_CACHE_OP_INV);
+
+ ret = rw_handle_page(h, is_read, h_offs, sys_addr,
+ elem_size, (unsigned long)addr, *pte);
+
+ if (ret)
+ break;
+
+ if (!is_read)
+ cache_maint(client, h, h_offs,
+ h_offs + elem_size, NVMAP_CACHE_OP_WB);
+
+ copied += elem_size;
+ sys_addr += sys_stride;
+ h_offs += h_stride;
+ }
+
+ nvmap_free_pte(client->dev, pte);
+ return ret ?: copied;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.h b/drivers/video/tegra/nvmap/nvmap_ioctl.h
new file mode 100644
index 000000000000..c802cd4dd7ae
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_ioctl.h
@@ -0,0 +1,159 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.h
+ *
+ * ioctl declarations for nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_IOCTL_H
+#define __VIDEO_TEGRA_NVMAP_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/file.h>
+
+#include <mach/nvmap.h>
+
+enum {
+ NVMAP_HANDLE_PARAM_SIZE = 1,
+ NVMAP_HANDLE_PARAM_ALIGNMENT,
+ NVMAP_HANDLE_PARAM_BASE,
+ NVMAP_HANDLE_PARAM_HEAP,
+};
+
+enum {
+ NVMAP_CACHE_OP_WB = 0,
+ NVMAP_CACHE_OP_INV,
+ NVMAP_CACHE_OP_WB_INV,
+};
+
+
+struct nvmap_create_handle {
+ union {
+ __u32 key; /* ClaimPreservedHandle */
+ __u32 id; /* FromId */
+ __u32 size; /* CreateHandle */
+ };
+ __u32 handle;
+};
+
+struct nvmap_alloc_handle {
+ __u32 handle;
+ __u32 heap_mask;
+ __u32 flags;
+ __u32 align;
+};
+
+struct nvmap_map_caller {
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem; should be page-aligned */
+ __u32 length; /* number of bytes to map */
+ __u32 flags;
+ unsigned long addr; /* user pointer */
+};
+
+struct nvmap_rw_handle {
+ unsigned long addr; /* user pointer */
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem */
+ __u32 elem_size; /* individual atom size */
+ __u32 hmem_stride; /* delta in bytes between atoms in hmem */
+ __u32 user_stride; /* delta in bytes between atoms in user */
+ __u32 count; /* number of atoms to copy */
+};
+
+struct nvmap_pin_handle {
+ unsigned long handles; /* array of handles to pin/unpin */
+ unsigned long addr; /* array of addresses to return */
+ __u32 count; /* number of entries in handles */
+};
+
+struct nvmap_handle_param {
+ __u32 handle;
+ __u32 param;
+ unsigned long result;
+};
+
+struct nvmap_cache_op {
+ unsigned long addr;
+ __u32 handle;
+ __u32 len;
+ __s32 op;
+};
+
+#define NVMAP_IOC_MAGIC 'N'
+
+/* Creates a new memory handle. On input, the argument is the size of the new
+ * handle; on return, the argument is the name of the new handle
+ */
+#define NVMAP_IOC_CREATE _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle)
+#define NVMAP_IOC_CLAIM _IOWR(NVMAP_IOC_MAGIC, 1, struct nvmap_create_handle)
+#define NVMAP_IOC_FROM_ID _IOWR(NVMAP_IOC_MAGIC, 2, struct nvmap_create_handle)
+
+/* Actually allocates memory for the specified handle */
+#define NVMAP_IOC_ALLOC _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle)
+
+/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
+ */
+#define NVMAP_IOC_FREE _IO(NVMAP_IOC_MAGIC, 4)
+
+/* Maps the region of the specified handle into a user-provided virtual address
+ * that was previously created via an mmap syscall on this fd */
+#define NVMAP_IOC_MMAP _IOWR(NVMAP_IOC_MAGIC, 5, struct nvmap_map_caller)
+
+/* Reads/writes data (possibly strided) from a user-provided buffer into the
+ * hmem at the specified offset */
+#define NVMAP_IOC_WRITE _IOW(NVMAP_IOC_MAGIC, 6, struct nvmap_rw_handle)
+#define NVMAP_IOC_READ _IOW(NVMAP_IOC_MAGIC, 7, struct nvmap_rw_handle)
+
+#define NVMAP_IOC_PARAM _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param)
+
+/* Pins a list of memory handles into IO-addressable memory (either IOVMM
+ * space or physical memory, depending on the allocation), and returns the
+ * address. Handles may be pinned recursively. */
+#define NVMAP_IOC_PIN_MULT _IOWR(NVMAP_IOC_MAGIC, 10, struct nvmap_pin_handle)
+#define NVMAP_IOC_UNPIN_MULT _IOW(NVMAP_IOC_MAGIC, 11, struct nvmap_pin_handle)
+
+#define NVMAP_IOC_CACHE _IOW(NVMAP_IOC_MAGIC, 12, struct nvmap_cache_op)
+
+/* Returns a global ID usable to allow a remote process to create a handle
+ * reference to the same handle */
+#define NVMAP_IOC_GET_ID _IOWR(NVMAP_IOC_MAGIC, 13, struct nvmap_create_handle)
+
+#define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_GET_ID))
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg);
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg);
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg);
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg);
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg);
+
+
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.c b/drivers/video/tegra/nvmap/nvmap_mru.c
new file mode 100644
index 000000000000..f54d44923ebf
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_mru.c
@@ -0,0 +1,187 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* if IOVMM reclamation is enabled (CONFIG_NVMAP_RECLAIM_UNPINNED_VM),
+ * unpinned handles are placed onto a most-recently-used eviction list;
+ * multiple lists are maintained, segmented by size (sizes were chosen to
+ * roughly correspond with common sizes for graphics surfaces).
+ *
+ * if a handle is located on the MRU list, then the code below may
+ * steal its IOVMM area at any time to satisfy a pin operation if no
+ * free IOVMM space is available
+ */
+
+static const size_t mru_cutoff[] = {
+ 262144, 393216, 786432, 1048576, 1572864
+};
+
+static inline struct list_head *mru_list(struct nvmap_share *share, size_t size)
+{
+ unsigned int i;
+
+ BUG_ON(!share->mru_lists);
+ for (i = 0; i < ARRAY_SIZE(mru_cutoff); i++)
+ if (size <= mru_cutoff[i])
+ break;
+
+ return &share->mru_lists[i];
+}
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm)
+{
+ size_t vm_size = tegra_iovmm_get_vm_size(iovmm);
+ return (vm_size >> 2) * 3;
+}
+
+/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h)
+{
+ size_t len = h->pgalloc.area->iovm_length;
+ list_add(&h->pgalloc.mru_list, mru_list(share, len));
+}
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
+{
+ nvmap_mru_lock(s);
+ if (!list_empty(&h->pgalloc.mru_list))
+ list_del(&h->pgalloc.mru_list);
+ nvmap_mru_unlock(s);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+}
+
+/* returns a tegra_iovmm_area for a handle. if the handle already has
+ * an iovmm_area allocated, the handle is simply removed from its MRU list
+ * and the existing iovmm_area is returned.
+ *
+ * if no existing allocation exists, try to allocate a new IOVMM area.
+ *
+ * if a new area can not be allocated, try to re-use the most-recently-unpinned
+ * handle's allocation.
+ *
+ * and if that fails, iteratively evict handles from the MRU lists and free
+ * their allocations, until the new allocation succeeds.
+ */
+struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
+ struct nvmap_handle *h)
+{
+ struct list_head *mru;
+ struct nvmap_handle *evict = NULL;
+ struct tegra_iovmm_area *vm = NULL;
+ unsigned int i, idx;
+ pgprot_t prot;
+
+ BUG_ON(!h || !c || !c->share);
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (h->pgalloc.area) {
+ BUG_ON(list_empty(&h->pgalloc.mru_list));
+ list_del(&h->pgalloc.mru_list);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return h->pgalloc.area;
+ }
+
+ vm = tegra_iovmm_create_vm(c->share->iovmm, NULL,
+ h->size, h->align, prot,
+ h->pgalloc.iovm_addr);
+
+ if (vm) {
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return vm;
+ }
+ /* if client is looking for specific iovm address, return from here. */
+ if ((vm == NULL) && (h->pgalloc.iovm_addr != 0))
+ return NULL;
+ /* attempt to re-use the most recently unpinned IOVMM area in the
+ * same size bin as the current handle. If that fails, iteratively
+ * evict handles (starting from the current bin) until an allocation
+ * succeeds or no more areas can be evicted */
+ mru = mru_list(c->share, h->size);
+ if (!list_empty(mru))
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ if (evict && evict->pgalloc.area->iovm_length >= h->size) {
+ list_del(&evict->pgalloc.mru_list);
+ vm = evict->pgalloc.area;
+ evict->pgalloc.area = NULL;
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ return vm;
+ }
+
+ idx = mru - c->share->mru_lists;
+
+ for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
+ if (idx >= c->share->nr_mru)
+ idx = 0;
+ mru = &c->share->mru_lists[idx];
+ while (!list_empty(mru) && !vm) {
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ BUG_ON(atomic_read(&evict->pin) != 0);
+ BUG_ON(!evict->pgalloc.area);
+ list_del(&evict->pgalloc.mru_list);
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ tegra_iovmm_free_vm(evict->pgalloc.area);
+ evict->pgalloc.area = NULL;
+ vm = tegra_iovmm_create_vm(c->share->iovmm,
+ NULL, h->size, h->align,
+ prot, h->pgalloc.iovm_addr);
+ }
+ }
+ return vm;
+}
+
+int nvmap_mru_init(struct nvmap_share *share)
+{
+ int i;
+ mutex_init(&share->mru_lock);
+ share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1;
+
+ share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru,
+ GFP_KERNEL);
+
+ if (!share->mru_lists)
+ return -ENOMEM;
+
+ for (i = 0; i < share->nr_mru; i++)
+ INIT_LIST_HEAD(&share->mru_lists[i]);
+
+ return 0;
+}
+
+void nvmap_mru_destroy(struct nvmap_share *share)
+{
+ kfree(share->mru_lists);
+ share->mru_lists = NULL;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.h b/drivers/video/tegra/nvmap/nvmap_mru.h
new file mode 100644
index 000000000000..6c94630bc3ef
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_mru.h
@@ -0,0 +1,84 @@
+/*
+ * drivers/video/tegra/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef __VIDEO_TEGRA_NVMAP_MRU_H
+#define __VIDEO_TEGRA_NVMAP_MRU_H
+
+#include <linux/spinlock.h>
+
+#include "nvmap.h"
+
+struct tegra_iovmm_area;
+struct tegra_iovmm_client;
+
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+
+static inline void nvmap_mru_lock(struct nvmap_share *share)
+{
+ mutex_lock(&share->mru_lock);
+}
+
+static inline void nvmap_mru_unlock(struct nvmap_share *share)
+{
+ mutex_unlock(&share->mru_lock);
+}
+
+int nvmap_mru_init(struct nvmap_share *share);
+
+void nvmap_mru_destroy(struct nvmap_share *share);
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm);
+
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h);
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h);
+
+struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
+ struct nvmap_handle *h);
+
+#else
+
+#define nvmap_mru_lock(_s) do { } while (0)
+#define nvmap_mru_unlock(_s) do { } while (0)
+#define nvmap_mru_init(_s) 0
+#define nvmap_mru_destroy(_s) do { } while (0)
+#define nvmap_mru_vm_size(_a) tegra_iovmm_get_vm_size(_a)
+
+static inline void nvmap_mru_insert_locked(struct nvmap_share *share,
+ struct nvmap_handle *h)
+{ }
+
+static inline void nvmap_mru_remove(struct nvmap_share *s,
+ struct nvmap_handle *h)
+{ }
+
+static inline struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
+ struct nvmap_handle *h)
+{
+ BUG_ON(!h->pgalloc.area);
+ return h->pgalloc.area;
+}
+
+#endif
+
+#endif
diff --git a/drivers/w1/Makefile b/drivers/w1/Makefile
index 6bb0b54965f2..ae25ae5fdaf3 100644
--- a/drivers/w1/Makefile
+++ b/drivers/w1/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the Dallas's 1-wire bus.
#
+GCOV_PROFILE := y
obj-$(CONFIG_W1) += wire.o
wire-objs := w1.o w1_int.o w1_family.o w1_netlink.o w1_io.o
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 979d6eed9a0f..dd278e51ed88 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -58,6 +58,13 @@ config W1_MASTER_GPIO
This support is also available as a module. If so, the module
will be called w1-gpio.
+config W1_MASTER_TEGRA
+ tristate "NVidia Tegra SoC 1-wire busmaster"
+ depends on ARCH_TEGRA
+ help
+ Say Y here if you want to communicate with your 1-wire devices using
+ the NVidia Tegra SoC one-wire interfaces.
+
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
depends on SOC_OMAP2430 || ARCH_OMAP3
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index c5a3e96fcbab..96499dce0b94 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for 1-wire bus master drivers.
#
+GCOV_PROFILE_tegra_w1.o := y
obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o
obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
@@ -10,3 +11,4 @@ obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
+obj-$(CONFIG_W1_MASTER_TEGRA) += tegra_w1.o
diff --git a/drivers/w1/masters/tegra_w1.c b/drivers/w1/masters/tegra_w1.c
new file mode 100644
index 000000000000..9443c4b1dbc6
--- /dev/null
+++ b/drivers/w1/masters/tegra_w1.c
@@ -0,0 +1,491 @@
+/*
+ * drivers/w1/masters/tegra-w1.c
+ *
+ * W1 master driver for internal OWR controllers in NVIDIA Tegra SoCs.
+ *
+ * Copyright (C) 2010 Motorola, Inc
+ * Author: Andrei Warkentin <andreiw@motorola.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <mach/w1.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_log.h"
+
+#define DRIVER_NAME "tegra_w1"
+
+/* OWR_CONTROL_0 is the main control register, and should be configured
+ last after configuring all other settings. */
+#define OWR_CONTROL (0x0)
+#define OC_RD_BIT (1 << 31)
+#define OC_WR0_BIT (1 << 30)
+#define OC_RD_SCLK_SHIFT (23)
+#define OC_RD_SCLK_MASK (0xF)
+#define OC_P_SCLK_SHIFT (15)
+#define OC_P_SCLK_MASK (0xFF)
+#define OC_BIT_XMODE (1 << 2)
+#define OC_GO (1 << 0)
+
+/* OWR_WR_RD_TCTL_0 controls read/write timings. */
+#define OWR_WR_RD_TCTL (0xc)
+#define ORWT_TSU_SHIFT (28)
+#define ORWT_TSU_MASK (0x3)
+#define ORWT_TRELEASE_SHIFT (22)
+#define ORWT_TRELEASE_MASK (0x3F)
+#define ORWT_TRDV_SHIFT (18)
+#define ORWT_TRDV_MASK (0xF)
+#define ORWT_TLOW0_SHIFT (11)
+#define ORWT_TLOW0_MASK (0x7F)
+#define ORWT_TLOW1_SHIFT (7)
+#define ORWT_TLOW1_MASK (0xF)
+#define ORWT_TSLOT_SHIFT (0)
+#define ORWT_TSLOT_MASK (0x7F)
+
+/* OWR_RST_PRES_TCTL_0 controls reset presence timings. */
+#define OWR_RST_PRES_TCTL (0x10)
+#define ORPT_TPDL_SHIFT (24)
+#define ORPT_TPDL_MASK (0xFF)
+#define ORPT_TPDH_SHIFT (18)
+#define ORPT_TPDH_MASK (0x3F)
+#define ORPT_TRSTL_SHIFT (9)
+#define ORPT_TRSTL_MASK (0x1FF)
+#define ORPT_TRSTH_SHIFT (0)
+#define ORPT_TRSTH_MASK (0x1FF)
+
+/* OWR_INTR_MASK_0 stores the masks for the interrupts. */
+#define OWR_INTR_MASK (0x24)
+#define OI_BIT_XFER_DONE (1 << 13)
+#define OI_PRESENCE_DONE (1 << 5)
+#define OI_PRESENCE_ERR (1 << 0)
+
+/* OWR_INTR_STATUS_0 is the interrupt status register. */
+#define OWR_INTR_STATUS (0x28)
+
+/* OWR_STATUS_0 is the status register. */
+#define OWR_STATUS (0x34)
+#define OS_READ_BIT_SHIFT (23)
+#define OS_RDY (1 << 0)
+
+/* Transfer_completion wait time. */
+#define BIT_XFER_COMPLETION_TIMEOUT_MSEC (5000)
+
+/* Errors in the interrupt status register for bit
+ transfers. */
+#define BIT_XFER_ERRORS (OI_PRESENCE_ERR)
+
+/* OWR requires 1MHz clock. This value is in Herz. */
+#define OWR_CLOCK (1000000)
+
+#define W1_ERR(format, ...) \
+ printk(KERN_ERR "(%s: line %d) " format, \
+ __func__, __LINE__, ## __VA_ARGS__)
+
+struct tegra_device {
+ bool ready;
+ struct w1_bus_master bus_master;
+ struct clk *clk;
+ void __iomem *ioaddr;
+ struct mutex mutex;
+ spinlock_t spinlock;
+ struct completion *transfer_completion;
+ unsigned long intr_status;
+ struct tegra_w1_timings *timings;
+};
+
+/* If debug_print & DEBUG_PRESENCE, print whether slaves detected
+ or not in reset_bus. */
+#define DEBUG_PRESENCE (0x1)
+
+/* If debug_print & DEBUG_TIMEOUT, print whether timeouts on waiting
+ for device interrupts occurs. */
+#define DEBUG_TIMEOUT (0x2)
+
+static uint32_t debug_print;
+module_param_named(debug, debug_print, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debugging output commands:\n"
+ "\tbit 0 - log reset_bus presence detects\n"
+ "\tbit 1 - log interrupt timeouts\n");
+
+/* Reads the OWR register specified by base offset in 'reg'. */
+static inline unsigned long w1_readl(struct tegra_device *dev,
+ unsigned long reg)
+{
+ return readl(dev->ioaddr + reg);
+}
+
+/* Writes 'val' into the OWR registers specified by base offset in 'reg'. */
+static inline void w1_writel(struct tegra_device *dev, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, dev->ioaddr + reg);
+}
+
+/* Sets interrupt mask the device. */
+static inline void w1_imask(struct tegra_device *dev, unsigned long mask)
+{
+ w1_writel(dev, mask, OWR_INTR_MASK);
+}
+
+/* Waits for completion of a bit transfer, checks intr_status against
+ BIT_XFER_ERRORS and an additional provided bit mask. */
+static inline int w1_wait(struct tegra_device *dev, unsigned long mask)
+{
+ int ret;
+ unsigned long irq_flags;
+ unsigned long intr_status;
+
+ ret = wait_for_completion_timeout(dev->transfer_completion,
+ msecs_to_jiffies(BIT_XFER_COMPLETION_TIMEOUT_MSEC));
+
+ if (unlikely(!ret)) {
+ if (debug_print & DEBUG_TIMEOUT)
+ W1_ERR("timeout\n");
+ return -ETIME;
+ }
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+ intr_status = dev->intr_status;
+ dev->intr_status = 0;
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+
+ if (unlikely(intr_status & BIT_XFER_ERRORS ||
+ !(intr_status & mask)))
+ return -EIO;
+ return 0;
+}
+
+/* Programs timing registers, and puts the device into a known state.
+ Interrupts are safe to enable past this point. */
+static int w1_setup(struct tegra_device *dev)
+{
+ unsigned long value;
+ clk_enable(dev->clk);
+
+ value =
+ ((dev->timings->tslot & ORWT_TSLOT_MASK) << ORWT_TSLOT_SHIFT) |
+ ((dev->timings->tlow1 & ORWT_TLOW1_MASK) << ORWT_TLOW1_SHIFT) |
+ ((dev->timings->tlow0 & ORWT_TLOW0_MASK) << ORWT_TLOW0_SHIFT) |
+ ((dev->timings->trdv & ORWT_TRDV_MASK) << ORWT_TRDV_SHIFT) |
+ ((dev->timings->trelease & ORWT_TRELEASE_MASK) <<
+ ORWT_TRELEASE_SHIFT) |
+ ((dev->timings->tsu & ORWT_TSU_MASK) << ORWT_TSU_SHIFT);
+ w1_writel(dev, value, OWR_WR_RD_TCTL);
+
+ value =
+ ((dev->timings->trsth & ORPT_TRSTH_MASK) << ORPT_TRSTH_SHIFT) |
+ ((dev->timings->trstl & ORPT_TRSTL_MASK) << ORPT_TRSTL_SHIFT) |
+ ((dev->timings->tpdh & ORPT_TPDH_MASK) << ORPT_TPDH_SHIFT) |
+ ((dev->timings->tpdl & ORPT_TPDL_MASK) << ORPT_TPDL_SHIFT);
+ w1_writel(dev, value, OWR_RST_PRES_TCTL);
+
+ /* Clear interrupt status/mask registers in case
+ anything was set in it. */
+ w1_imask(dev, 0);
+ w1_writel(dev, 0xFFFFFFFF, OWR_INTR_STATUS);
+ clk_disable(dev->clk);
+ return 0;
+}
+
+/* Interrupt handler for OWR communication. */
+static irqreturn_t tegra_w1_irq(int irq, void *cookie)
+{
+ unsigned long irq_flags;
+ unsigned long status;
+ struct tegra_device *dev = cookie;
+
+ status = w1_readl(dev, OWR_INTR_STATUS);
+ if (unlikely(!status)) {
+
+ /* Not for me if no status bits are set. */
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+
+ if (likely(dev->transfer_completion)) {
+ dev->intr_status = status;
+ w1_writel(dev, status, OWR_INTR_STATUS);
+ complete(dev->transfer_completion);
+ } else {
+ W1_ERR("spurious interrupt, status = 0x%lx\n", status);
+ }
+
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ return IRQ_HANDLED;
+}
+
+/* Perform a write-0 cycle if bit == 0, otherwise
+ perform a read cycle. */
+static u8 tegra_w1_touch_bit(void *data, u8 bit)
+{
+ int rc;
+ u8 return_bit;
+ unsigned long control;
+ DECLARE_COMPLETION_ONSTACK(touch_done);
+ struct tegra_device *dev = (struct tegra_device *) data;
+
+ return_bit = 0;
+ mutex_lock(&dev->mutex);
+ if (!dev->ready)
+ goto done;
+
+ clk_enable(dev->clk);
+ w1_imask(dev, OI_BIT_XFER_DONE);
+ dev->transfer_completion = &touch_done;
+ control =
+ ((dev->timings->rdsclk & OC_RD_SCLK_MASK) << OC_RD_SCLK_SHIFT) |
+ ((dev->timings->psclk & OC_P_SCLK_MASK) << OC_P_SCLK_SHIFT) |
+ OC_BIT_XMODE;
+
+ /* Read bit (well, writes a 1 to the bus as well). */
+ if (bit) {
+ w1_writel(dev, control | OC_RD_BIT, OWR_CONTROL);
+ rc = w1_wait(dev, OI_BIT_XFER_DONE);
+
+ if (rc) {
+ W1_ERR("write-1/read failed\n");
+ goto done;
+ }
+
+ return_bit =
+ (w1_readl(dev, OWR_STATUS) >> OS_READ_BIT_SHIFT) & 1;
+
+ }
+
+ /* Write 0. */
+ else {
+ w1_writel(dev, control | OC_WR0_BIT, OWR_CONTROL);
+ rc = w1_wait(dev, OI_BIT_XFER_DONE);
+ if (rc) {
+ W1_ERR("write-0 failed\n");
+ goto done;
+ }
+ }
+
+done:
+
+ w1_imask(dev, 0);
+ dev->transfer_completion = NULL;
+ clk_disable(dev->clk);
+ mutex_unlock(&dev->mutex);
+ return return_bit;
+}
+
+/* Performs a bus reset cycle, and returns 0 if slaves present. */
+static u8 tegra_w1_reset_bus(void *data)
+{
+ int rc;
+ int presence;
+ unsigned long value;
+ DECLARE_COMPLETION_ONSTACK(reset_done);
+ struct tegra_device *dev = (struct tegra_device *) data;
+
+ presence = 1;
+ mutex_lock(&dev->mutex);
+ if (!dev->ready)
+ goto done;
+
+ clk_enable(dev->clk);
+ w1_imask(dev, OI_PRESENCE_DONE);
+ dev->transfer_completion = &reset_done;
+ value =
+ ((dev->timings->rdsclk & OC_RD_SCLK_MASK) << OC_RD_SCLK_SHIFT) |
+ ((dev->timings->psclk & OC_P_SCLK_MASK) << OC_P_SCLK_SHIFT) |
+ OC_BIT_XMODE | OC_GO;
+ w1_writel(dev, value, OWR_CONTROL);
+
+ rc = w1_wait(dev, OI_PRESENCE_DONE);
+ if (rc)
+ goto done;
+
+ presence = 0;
+done:
+
+ if (debug_print & DEBUG_PRESENCE) {
+ if (presence)
+ W1_ERR("no slaves present\n");
+ else
+ W1_ERR("slaves present\n");
+ }
+
+ w1_imask(dev, 0);
+ dev->transfer_completion = NULL;
+ clk_disable(dev->clk);
+ mutex_unlock(&dev->mutex);
+ return presence;
+}
+
+static int tegra_w1_probe(struct platform_device *pdev)
+{
+ int rc;
+ int irq;
+ struct resource *res;
+ struct tegra_device *dev;
+ struct tegra_w1_platform_data *plat = pdev->dev.platform_data;
+
+ printk(KERN_INFO "Driver for Tegra SoC 1-wire controller\n");
+
+ if (plat == NULL || plat->timings == NULL)
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ dev = kzalloc(sizeof(struct tegra_device), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+ dev->clk = clk_get(&pdev->dev, plat->clk_id);
+ if (IS_ERR(dev->clk)) {
+ rc = PTR_ERR(dev->clk);
+ goto cleanup_alloc;
+ }
+
+ /* OWR requires 1MHz clock. */
+ rc = clk_set_rate(dev->clk, OWR_CLOCK);
+ if (rc)
+ goto cleanup_clock;
+
+ if (!request_mem_region
+ (res->start, res->end - res->start + 1, dev_name(&pdev->dev))) {
+ rc = -EBUSY;
+ goto cleanup_clock;
+ }
+
+ dev->ioaddr = ioremap(res->start, res->end - res->start + 1);
+ if (!dev->ioaddr) {
+ rc = -ENOMEM;
+ goto cleanup_reqmem;
+ }
+
+ dev->timings = plat->timings;
+ dev->bus_master.data = dev;
+ dev->bus_master.touch_bit = tegra_w1_touch_bit;
+ dev->bus_master.reset_bus = tegra_w1_reset_bus;
+
+ spin_lock_init(&dev->spinlock);
+ mutex_init(&dev->mutex);
+
+ /* Program device into known state. */
+ w1_setup(dev);
+
+ rc = request_irq(irq, tegra_w1_irq, IRQF_SHARED, DRIVER_NAME, dev);
+ if (rc)
+ goto cleanup_ioremap;
+
+ rc = w1_add_master_device(&dev->bus_master);
+ if (rc)
+ goto cleanup_irq;
+
+ dev->ready = true;
+ return 0;
+
+cleanup_irq:
+ free_irq(irq, dev);
+cleanup_ioremap:
+ iounmap(dev->ioaddr);
+cleanup_reqmem:
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+cleanup_clock:
+ clk_put(dev->clk);
+cleanup_alloc:
+ platform_set_drvdata(pdev, NULL);
+ kfree(dev);
+ return rc;
+}
+
+static int tegra_w1_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tegra_device *dev = platform_get_drvdata(pdev);
+
+ mutex_lock(&dev->mutex);
+ dev->ready = false;
+ mutex_unlock(&dev->mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ free_irq(res->start, dev);
+ iounmap(dev->ioaddr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start + 1);
+ clk_put(dev->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(dev);
+ return 0;
+}
+
+static int tegra_w1_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int tegra_w1_resume(struct platform_device *pdev)
+{
+ struct tegra_device *dev = platform_get_drvdata(pdev);
+
+ /* TODO: Is this necessary? I would assume yes. */
+ w1_setup(dev);
+ return 0;
+}
+
+static struct platform_driver tegra_w1_driver = {
+ .probe = tegra_w1_probe,
+ .remove = tegra_w1_remove,
+ .suspend = tegra_w1_suspend,
+ .resume = tegra_w1_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_w1_init(void)
+{
+ return platform_driver_register(&tegra_w1_driver);
+}
+
+static void __exit tegra_w1_exit(void)
+{
+ platform_driver_unregister(&tegra_w1_driver);
+}
+
+module_init(tegra_w1_init);
+module_exit(tegra_w1_exit);
+
+MODULE_DESCRIPTION("Tegra W1 master driver");
+MODULE_AUTHOR("Andrei Warkentin <andreiw@motorola.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 86b0735e6aa0..cb460109b95f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -212,6 +212,23 @@ config MPCORE_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called mpcore_wdt.
+config TEGRA_WATCHDOG
+ tristate "Tegra watchdog"
+ depends on ARCH_TEGRA
+ help
+ Say Y here to include support for the watchdog timer
+ embedded in NVIDIA Tegra SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tegra_wdt.
+
+config TEGRA_WATCHDOG_ENABLE_ON_PROBE
+ tristate "Tegra watchdog"
+ depends on ARCH_TEGRA && TEGRA_WATCHDOG
+ help
+ Say Y here to enable the tegra watchdog at driver
+ probe time, rather than when the device is opened.
+
config EP93XX_WATCHDOG
tristate "EP93xx Watchdog"
depends on ARCH_EP93XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 55bd5740e910..dd35599a2612 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the WatchDog device drivers.
#
+GCOV_PROFILE_tegra_wdt.o := y
# The WatchDog Timer Driver Core.
watchdog-objs += watchdog_core.o watchdog_dev.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
obj-$(CONFIG_DW_WATCHDOG) += dw_wdt.o
obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o
+obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
new file mode 100644
index 000000000000..d873eccfcd4b
--- /dev/null
+++ b/drivers/watchdog/tegra_wdt.c
@@ -0,0 +1,444 @@
+/*
+ * drivers/watchdog/tegra_wdt.c
+ *
+ * watchdog driver for NVIDIA tegra internal watchdog
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * based on drivers/watchdog/softdog.c and drivers/watchdog/omap_wdt.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+/* minimum and maximum watchdog trigger periods, in seconds */
+#define MIN_WDT_PERIOD 5
+#define MAX_WDT_PERIOD 1000
+
+struct tegra_wdt {
+ struct miscdevice miscdev;
+ struct notifier_block notifier;
+ struct resource *res_src;
+ struct resource *res_wdt;
+ unsigned long users;
+ void __iomem *wdt_source;
+ void __iomem *wdt_timer;
+ int irq;
+ int timeout;
+ bool enabled;
+};
+
+static struct platform_device *tegra_wdt_dev;
+/*
+ * For spinlock lockup detection to work, the heartbeat should be 2*lockup
+ * for cases where the spinlock disabled irqs.
+ */
+static int heartbeat = 120; /* must be greater than MIN_WDT_PERIOD and lower than MAX_WDT_PERIOD */
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+
+#define TIMER_PTV 0x0
+ #define TIMER_EN (1 << 31)
+ #define TIMER_PERIODIC (1 << 30)
+#define TIMER_PCR 0x4
+ #define TIMER_PCR_INTR (1 << 30)
+#define WDT_EN (1 << 5)
+#define WDT_SEL_TMR1 (0 << 4)
+#define WDT_SYS_RST (1 << 2)
+
+static void tegra_wdt_enable(struct tegra_wdt *wdt)
+{
+ u32 val;
+
+ /* since the watchdog reset occurs when a second interrupt
+ * is asserted before the first is processed, program the
+ * timer period to one-half of the watchdog period */
+ val = wdt->timeout * 1000000ul / 2;
+ val |= (TIMER_EN | TIMER_PERIODIC);
+ writel(val, wdt->wdt_timer + TIMER_PTV);
+
+ val = WDT_EN | WDT_SEL_TMR1 | WDT_SYS_RST;
+ writel(val, wdt->wdt_source);
+}
+
+static void tegra_wdt_disable(struct tegra_wdt *wdt)
+{
+ writel(0, wdt->wdt_source);
+ writel(0, wdt->wdt_timer + TIMER_PTV);
+}
+
+static irqreturn_t tegra_wdt_interrupt(int irq, void *dev_id)
+{
+ struct tegra_wdt *wdt = dev_id;
+
+ writel(TIMER_PCR_INTR, wdt->wdt_timer + TIMER_PCR);
+ return IRQ_HANDLED;
+}
+#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
+
+#define TIMER_PTV 0
+ #define TIMER_EN (1 << 31)
+ #define TIMER_PERIODIC (1 << 30)
+#define TIMER_PCR 0x4
+ #define TIMER_PCR_INTR (1 << 30)
+#define WDT_CFG (0)
+ #define WDT_CFG_TMR_SRC (0 << 0) /* for TMR10. */
+ #define WDT_CFG_PERIOD (1 << 4)
+ #define WDT_CFG_INT_EN (1 << 12)
+ #define WDT_CFG_SYS_RST_EN (1 << 14)
+ #define WDT_CFG_PMC2CAR_RST_EN (1 << 15)
+#define WDT_CMD (8)
+ #define WDT_CMD_START_COUNTER (1 << 0)
+ #define WDT_CMD_DISABLE_COUNTER (1 << 1)
+#define WDT_UNLOCK (0xC)
+ #define WDT_UNLOCK_PATTERN (0xC45A << 0)
+
+static void tegra_wdt_set_timeout(struct tegra_wdt *wdt, int sec)
+{
+ u32 ptv;
+
+ ptv = readl(wdt->wdt_timer + TIMER_PTV);
+
+ wdt->timeout = clamp(sec, MIN_WDT_PERIOD, MAX_WDT_PERIOD);
+ if (ptv & TIMER_EN) {
+ /* since the watchdog reset occurs when a fourth interrupt
+ * is asserted before the first is processed, program the
+ * timer period to one-fourth of the watchdog period */
+ ptv = (wdt->timeout * 1000000ul) / 4;
+ ptv |= (TIMER_EN | TIMER_PERIODIC);
+ writel(ptv, wdt->wdt_timer + TIMER_PTV);
+ }
+}
+
+static void tegra_wdt_enable(struct tegra_wdt *wdt)
+{
+ u32 val;
+
+ val = (wdt->timeout * 1000000ul) / 4;
+ val |= (TIMER_EN | TIMER_PERIODIC);
+ writel(val, wdt->wdt_timer + TIMER_PTV);
+
+ val = WDT_CFG_TMR_SRC | WDT_CFG_PERIOD | WDT_CFG_INT_EN |
+ /*WDT_CFG_SYS_RST_EN |*/ WDT_CFG_PMC2CAR_RST_EN;
+ writel(val, wdt->wdt_source + WDT_CFG);
+ writel(WDT_CMD_START_COUNTER, wdt->wdt_source + WDT_CMD);
+}
+
+static void tegra_wdt_disable(struct tegra_wdt *wdt)
+{
+ writel(WDT_UNLOCK_PATTERN, wdt->wdt_source + WDT_UNLOCK);
+ writel(WDT_CMD_DISABLE_COUNTER, wdt->wdt_source + WDT_CMD);
+
+ writel(0, wdt->wdt_timer + TIMER_PTV);
+}
+
+static irqreturn_t tegra_wdt_interrupt(int irq, void *dev_id)
+{
+ struct tegra_wdt *wdt = dev_id;
+
+ writel(WDT_CMD_START_COUNTER, wdt->wdt_source + WDT_CMD);
+ return IRQ_HANDLED;
+}
+#endif
+
+static int tegra_wdt_notify(struct notifier_block *this,
+ unsigned long code, void *dev)
+{
+ struct tegra_wdt *wdt = container_of(this, struct tegra_wdt, notifier);
+
+ if (code == SYS_DOWN || code == SYS_HALT)
+ tegra_wdt_disable(wdt);
+ return NOTIFY_DONE;
+}
+
+static int tegra_wdt_open(struct inode *inode, struct file *file)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(tegra_wdt_dev);
+
+ if (test_and_set_bit(1, &wdt->users))
+ return -EBUSY;
+
+ wdt->enabled = true;
+ wdt->timeout = heartbeat;
+ tegra_wdt_enable(wdt);
+ file->private_data = wdt;
+ return nonseekable_open(inode, file);
+}
+
+static int tegra_wdt_release(struct inode *inode, struct file *file)
+{
+ struct tegra_wdt *wdt = file->private_data;
+
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ tegra_wdt_disable(wdt);
+ wdt->enabled = false;
+#endif
+ wdt->users = 0;
+ return 0;
+}
+
+static long tegra_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_wdt *wdt = file->private_data;
+ static DEFINE_SPINLOCK(lock);
+ int new_timeout;
+ static const struct watchdog_info ident = {
+ .identity = "Tegra Watchdog",
+ .options = WDIOF_SETTIMEOUT,
+ .firmware_version = 0,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user((struct watchdog_info __user *)arg, &ident,
+ sizeof(ident));
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int __user *)arg);
+
+ case WDIOC_KEEPALIVE:
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, (int __user *)arg))
+ return -EFAULT;
+ spin_lock(&lock);
+ tegra_wdt_disable(wdt);
+ wdt->timeout = clamp(new_timeout, MIN_WDT_PERIOD, MAX_WDT_PERIOD);
+ tegra_wdt_enable(wdt);
+ spin_unlock(&lock);
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt->timeout, (int __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t tegra_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ return len;
+}
+
+static const struct file_operations tegra_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = tegra_wdt_write,
+ .unlocked_ioctl = tegra_wdt_ioctl,
+ .open = tegra_wdt_open,
+ .release = tegra_wdt_release,
+};
+
+static int tegra_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res_src, *res_wdt, *res_irq;
+ struct tegra_wdt *wdt;
+ u32 src;
+ int ret = 0;
+
+ if (pdev->id != -1) {
+ dev_err(&pdev->dev, "only id -1 supported\n");
+ return -ENODEV;
+ }
+
+ if (tegra_wdt_dev != NULL) {
+ dev_err(&pdev->dev, "watchdog already registered\n");
+ return -EIO;
+ }
+
+ res_src = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_wdt = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!res_src || !res_wdt || !res_irq) {
+ dev_err(&pdev->dev, "incorrect resources\n");
+ return -ENOENT;
+ }
+
+ wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
+ if (!wdt) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ wdt->irq = -1;
+ wdt->miscdev.parent = &pdev->dev;
+ wdt->miscdev.minor = WATCHDOG_MINOR;
+ wdt->miscdev.name = "watchdog";
+ wdt->miscdev.fops = &tegra_wdt_fops;
+
+ wdt->notifier.notifier_call = tegra_wdt_notify;
+
+ res_src = request_mem_region(res_src->start, resource_size(res_src),
+ pdev->name);
+ res_wdt = request_mem_region(res_wdt->start, resource_size(res_wdt),
+ pdev->name);
+
+ if (!res_src || !res_wdt) {
+ dev_err(&pdev->dev, "unable to request memory resources\n");
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ wdt->wdt_source = ioremap(res_src->start, resource_size(res_src));
+ wdt->wdt_timer = ioremap(res_wdt->start, resource_size(res_wdt));
+ if (!wdt->wdt_source || !wdt->wdt_timer) {
+ dev_err(&pdev->dev, "unable to map registers\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ src = readl(wdt->wdt_source);
+ if (src & BIT(12))
+ dev_info(&pdev->dev, "last reset due to watchdog timeout\n");
+
+ tegra_wdt_disable(wdt);
+ writel(TIMER_PCR_INTR, wdt->wdt_timer + TIMER_PCR);
+
+ ret = request_irq(res_irq->start, tegra_wdt_interrupt, IRQF_DISABLED,
+ dev_name(&pdev->dev), wdt);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to configure IRQ\n");
+ goto fail;
+ }
+
+ wdt->irq = res_irq->start;
+ wdt->res_src = res_src;
+ wdt->res_wdt = res_wdt;
+
+ ret = register_reboot_notifier(&wdt->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register reboot notifier\n");
+ goto fail;
+ }
+
+ ret = misc_register(&wdt->miscdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register misc device\n");
+ unregister_reboot_notifier(&wdt->notifier);
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+ tegra_wdt_dev = pdev;
+#ifdef CONFIG_TEGRA_WATCHDOG_ENABLE_ON_PROBE
+ wdt->enabled = true;
+ wdt->timeout = heartbeat;
+ tegra_wdt_enable(wdt);
+#endif
+ return 0;
+fail:
+ if (wdt->irq != -1)
+ free_irq(wdt->irq, wdt);
+ if (wdt->wdt_source)
+ iounmap(wdt->wdt_source);
+ if (wdt->wdt_timer)
+ iounmap(wdt->wdt_timer);
+ if (res_src)
+ release_mem_region(res_src->start, resource_size(res_src));
+ if (res_wdt)
+ release_mem_region(res_wdt->start, resource_size(res_wdt));
+ kfree(wdt);
+ return ret;
+}
+
+static int tegra_wdt_remove(struct platform_device *pdev)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ tegra_wdt_disable(wdt);
+
+ unregister_reboot_notifier(&wdt->notifier);
+ misc_deregister(&wdt->miscdev);
+ free_irq(wdt->irq, wdt);
+ iounmap(wdt->wdt_source);
+ iounmap(wdt->wdt_timer);
+ release_mem_region(wdt->res_src->start, resource_size(wdt->res_src));
+ release_mem_region(wdt->res_wdt->start, resource_size(wdt->res_wdt));
+ kfree(wdt);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ tegra_wdt_disable(wdt);
+ return 0;
+}
+
+static int tegra_wdt_resume(struct platform_device *pdev)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (wdt->enabled)
+ tegra_wdt_enable(wdt);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_wdt_driver = {
+ .probe = tegra_wdt_probe,
+ .remove = __devexit_p(tegra_wdt_remove),
+#ifdef CONFIG_PM
+ .suspend = tegra_wdt_suspend,
+ .resume = tegra_wdt_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tegra_wdt",
+ },
+};
+
+static int __init tegra_wdt_init(void)
+{
+ return platform_driver_register(&tegra_wdt_driver);
+}
+
+static void __exit tegra_wdt_exit(void)
+{
+ platform_driver_unregister(&tegra_wdt_driver);
+}
+
+module_init(tegra_wdt_init);
+module_exit(tegra_wdt_exit);
+
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_DESCRIPTION("Tegra Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:tegra_wdt");
+
diff --git a/fs/Kconfig b/fs/Kconfig
index 9fe0b349f4cd..99453badf455 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -199,6 +199,10 @@ source "fs/hfsplus/Kconfig"
source "fs/befs/Kconfig"
source "fs/bfs/Kconfig"
source "fs/efs/Kconfig"
+
+# Patched by YAFFS
+source "fs/yaffs2/Kconfig"
+
source "fs/jffs2/Kconfig"
# UBIFS File system configuration
source "fs/ubifs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index afc109691a9b..a8bbb3227014 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -123,3 +123,6 @@ obj-$(CONFIG_GFS2_FS) += gfs2/
obj-$(CONFIG_EXOFS_FS) += exofs/
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
+
+# Patched by YAFFS
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 065ff37aa3b0..59cef1123053 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3344,8 +3344,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
- sbi->s_itb_per_group = sbi->s_inodes_per_group /
- sbi->s_inodes_per_block;
+ sbi->s_itb_per_group = DIV_ROUND_UP(sbi->s_inodes_per_group,
+ sbi->s_inodes_per_block);
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 5efbd5d7701a..a13aacea9641 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -754,6 +754,13 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
return ret;
}
+static int fat_ioctl_volume_id(struct inode *dir)
+{
+ struct super_block *sb = dir->i_sb;
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ return sbi->vol_id;
+}
+
static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -770,6 +777,8 @@ static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
short_only = 0;
both = 1;
break;
+ case VFAT_IOCTL_GET_VOLUME_ID:
+ return fat_ioctl_volume_id(inode);
default:
return fat_generic_ioctl(filp, cmd, arg);
}
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index a5d3853822e0..ba4a156ae76e 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -78,6 +78,7 @@ struct msdos_sb_info {
const void *dir_ops; /* Opaque; default directory operations */
int dir_per_block; /* dir entries per block */
int dir_per_block_bits; /* log2(dir_per_block) */
+ unsigned long vol_id; /* volume ID */
int fatent_shift;
struct fatent_operations *fatent_ops;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 1726d7303047..b2eae577b91a 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1247,6 +1247,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
struct inode *root_inode = NULL, *fat_inode = NULL;
struct buffer_head *bh;
struct fat_boot_sector *b;
+ struct fat_boot_bsx *bsx;
struct msdos_sb_info *sbi;
u16 logical_sector_size;
u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
@@ -1391,6 +1392,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
goto out_fail;
}
+ bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET);
+
fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
if (!IS_FSINFO(fsinfo)) {
fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
@@ -1406,8 +1409,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
}
brelse(fsinfo_bh);
+ } else {
+ bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET);
}
+ /* interpret volume ID as a little endian 32 bit integer */
+ sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) |
+ ((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24));
+
sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 04cf3b91e501..2585571eff5c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1057,7 +1057,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if ((inode->i_state & flags) == flags)
return;
- if (unlikely(block_dump))
+ if (unlikely(block_dump > 1))
block_dump___mark_inode_dirty(inode);
spin_lock(&inode->i_lock);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 2aaf3eaaf13d..c858b5c83209 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,6 +19,7 @@
#include <linux/pipe_fs_i.h>
#include <linux/swap.h>
#include <linux/splice.h>
+#include <linux/freezer.h>
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -387,7 +388,10 @@ __acquires(fc->lock)
* Wait it out.
*/
spin_unlock(&fc->lock);
- wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
+
+ while (req->state != FUSE_REQ_FINISHED)
+ wait_event_freezable(req->waitq,
+ req->state == FUSE_REQ_FINISHED);
spin_lock(&fc->lock);
if (!req->aborted)
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index e3c63d1c5e13..b4095cc73436 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -366,10 +366,21 @@ static void part_release(struct device *dev)
kfree(p);
}
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct hd_struct *part = dev_to_part(dev);
+
+ add_uevent_var(env, "PARTN=%u", part->partno);
+ if (part->info && part->info->volname[0])
+ add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+ return 0;
+}
+
struct device_type part_type = {
.name = "partition",
.groups = part_attr_groups,
.release = part_release,
+ .uevent = part_uevent,
};
static void delete_partition_rcu_cb(struct rcu_head *head)
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 6296b403c67a..30546cc8d03f 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -105,6 +105,7 @@
* the partition tables happens after init too.
*/
static int force_gpt;
+static u64 force_gpt_sector;
static int __init
force_gpt_fn(char *str)
{
@@ -113,6 +114,13 @@ force_gpt_fn(char *str)
}
__setup("gpt", force_gpt_fn);
+static int __init force_gpt_sector_fn(char *str)
+{
+ force_gpt_sector = simple_strtoull(str, NULL, 0);
+ return 1;
+}
+__setup("gpt_sector=", force_gpt_sector_fn);
+
/**
* efi_crc32() - EFI version of crc32 function
@@ -556,6 +564,9 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
if (!good_agpt && force_gpt)
good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
+ if (!good_agpt && force_gpt && force_gpt_sector)
+ good_agpt = is_gpt_valid(state, force_gpt_sector, &agpt, &aptes);
+
/* The obviously unsuccessful case */
if (!good_pgpt && !good_agpt)
goto fail;
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 15af6222f8a4..ddb83a0e15e8 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -67,3 +67,15 @@ config PROC_PAGE_MONITOR
/proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap,
/proc/kpagecount, and /proc/kpageflags. Disabling these
interfaces will reduce the size of the kernel by approximately 4kb.
+
+config REPORT_PRESENT_CPUS
+ default n
+ depends on PROC_FS && SMP
+ bool "Report present cpus instead of online cpus"
+ help
+ This is a work around to report Present CPUs instead of Online CPUs.
+ Some power savings implements use CPU hotplug for power domains.
+ It is a bug to enable this on a server or other architecture that
+ uses cpu hotplug in the correct way.
+
+
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5eb02069e1b8..445777158609 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -133,6 +133,12 @@ struct pid_entry {
NULL, &proc_single_file_operations, \
{ .proc_show = show } )
+/* ANDROID is for special files in /proc. */
+#define ANDROID(NAME, MODE, OTYPE) \
+ NOD(NAME, (S_IFREG|(MODE)), \
+ &proc_##OTYPE##_inode_operations, \
+ &proc_##OTYPE##_operations, {})
+
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
* and .. links.
@@ -263,7 +269,8 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
mm = get_task_mm(task);
if (mm && mm != current->mm &&
- !ptrace_may_access(task, PTRACE_MODE_READ)) {
+ !ptrace_may_access(task, PTRACE_MODE_READ) &&
+ !capable(CAP_SYS_RESOURCE)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
@@ -1140,6 +1147,38 @@ out:
return err < 0 ? err : count;
}
+static int oom_adjust_permission(struct inode *inode, int mask)
+{
+ uid_t uid;
+ struct task_struct *p;
+
+ if (mask & MAY_NOT_BLOCK)
+ return -ECHILD;
+
+ p = get_proc_task(inode);
+ if(p) {
+ uid = task_uid(p);
+ put_task_struct(p);
+ }
+
+ /*
+ * System Server (uid == 1000) is granted access to oom_adj of all
+ * android applications (uid > 10000) as and services (uid >= 1000)
+ */
+ if (p && (current_fsuid() == 1000) && (uid >= 1000)) {
+ if (inode->i_mode >> 6 & mask) {
+ return 0;
+ }
+ }
+
+ /* Fall back to default. */
+ return generic_permission(inode, mask);
+}
+
+static const struct inode_operations proc_oom_adjust_inode_operations = {
+ .permission = oom_adjust_permission,
+};
+
static const struct file_operations proc_oom_adjust_operations = {
.read = oom_adjust_read,
.write = oom_adjust_write,
@@ -2846,7 +2885,7 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
- REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
+ ANDROID("oom_adj",S_IRUGO|S_IWUSR, oom_adjust),
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 9758b654a1bc..4b758ad5c831 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -75,7 +75,11 @@ static int show_stat(struct seq_file *p, void *v)
(unsigned long long)cputime64_to_clock_t(steal),
(unsigned long long)cputime64_to_clock_t(guest),
(unsigned long long)cputime64_to_clock_t(guest_nice));
+#if defined(CONFIG_REPORT_PRESENT_CPUS)
+ for_each_present_cpu(i) {
+#else
for_each_online_cpu(i) {
+#endif
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
user = kstat_cpu(i).cpustat.user;
diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
new file mode 100644
index 000000000000..635414059997
--- /dev/null
+++ b/fs/yaffs2/Kconfig
@@ -0,0 +1,161 @@
+#
+# YAFFS file system configurations
+#
+
+config YAFFS_FS
+ tristate "YAFFS2 file system support"
+ default n
+ depends on MTD_BLOCK
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+ YAFFS2, or Yet Another Flash Filing System, is a filing system
+ optimised for NAND Flash chips.
+
+ To compile the YAFFS2 file system support as a module, choose M
+ here: the module will be called yaffs2.
+
+ If unsure, say N.
+
+ Further information on YAFFS2 is available at
+ <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+ bool "512 byte / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable YAFFS1 support -- yaffs for 512 byte / page devices
+
+ Not needed for 2K-page devices.
+
+ If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+ bool "Use older-style on-NAND data format with pageStatus byte"
+ depends on YAFFS_YAFFS1
+ default n
+ help
+
+ Older-style on-NAND data format has a "pageStatus" byte to record
+ chunk/page state. This byte is zero when the page is discarded.
+ Choose this option if you have existing on-NAND data using this
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+ older-style format. See notes on tags formats and MTD versions
+ in yaffs_mtdif1.c.
+
+ If unsure, say N.
+
+config YAFFS_DOES_ECC
+ bool "Lets Yaffs do its own ECC"
+ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This enables Yaffs to use its own ECC functions instead of using
+ the ones from the generic MTD-NAND driver.
+
+ If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This makes yaffs_ecc.c use the same ecc byte order as Steven
+ Hill's nand_ecc.c. If not set, then you get the same ecc byte
+ order as SmartMedia.
+
+ If unsure, say N.
+
+config YAFFS_YAFFS2
+ bool "2048 byte (or larger) / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable YAFFS2 support -- yaffs for >= 2K bytes per page devices
+
+ If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+ bool "Autoselect yaffs2 format"
+ depends on YAFFS_YAFFS2
+ default y
+ help
+ Without this, you need to explicitely use yaffs2 as the file
+ system type. With this, you can say "yaffs" and yaffs or yaffs2
+ will be used depending on the device page size (yaffs on
+ 512-byte page devices, yaffs2 on 2K page devices).
+
+ If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+ bool "Disable YAFFS from doing ECC on tags by default"
+ depends on YAFFS_FS && YAFFS_YAFFS2
+ default n
+ help
+ This defaults Yaffs to using its own ECC calculations on tags instead of
+ just relying on the MTD.
+ This behavior can also be overridden with tags_ecc_on and
+ tags_ecc_off mount options.
+
+ If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ bool "Force chunk erase check"
+ depends on YAFFS_FS
+ default n
+ help
+ Normally YAFFS only checks chunks before writing until an erased
+ chunk is found. This helps to detect any partially written
+ chunks that might have happened due to power loss.
+
+ Enabling this forces on the test that chunks are erased in flash
+ before writing to them. This takes more time but is potentially
+ a bit more secure.
+
+ Suggest setting Y during development and ironing out driver
+ issues etc. Suggest setting to N if you want faster writing.
+
+ If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+ bool "Empty lost and found on boot"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is enabled then the contents of lost and found is
+ automatically dumped at mount.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+ bool "Disable yaffs2 block refreshing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then block refreshing is disabled.
+ Block refreshing infrequently refreshes the oldest block in
+ a yaffs2 file system. This mechanism helps to refresh flash to
+ mitigate against data loss. This is particularly useful for MLC.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+ bool "Disable yaffs2 background processing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then background processing is disabled.
+ Background processing makes many foreground activities faster.
+
+ If unsure, say N.
+
+config YAFFS_XATTR
+ bool "Enable yaffs2 xattr support"
+ depends on YAFFS_FS
+ default y
+ help
+ If this is set then yaffs2 will provide xattr support.
+ If unsure, say Y.
diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
new file mode 100644
index 000000000000..e63a28aa3ed6
--- /dev/null
+++ b/fs/yaffs2/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_verify.o
+
diff --git a/fs/yaffs2/yaffs_allocator.c b/fs/yaffs2/yaffs_allocator.c
new file mode 100644
index 000000000000..f9cd5becd8f4
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.c
@@ -0,0 +1,396 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+#ifdef CONFIG_YAFFS_KMALLOC_ALLOCATOR
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ return (struct yaffs_tnode *)kmalloc(dev->tnode_size, GFP_NOFS);
+}
+
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ dev = dev;
+ kfree(tn);
+}
+
+void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return (struct yaffs_obj *)kmalloc(sizeof(struct yaffs_obj));
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ dev = dev;
+ kfree(obj);
+}
+
+#else
+
+struct yaffs_tnode_list {
+ struct yaffs_tnode_list *next;
+ struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+ struct yaffs_obj_list *next;
+ struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+ int n_tnodes_created;
+ struct yaffs_tnode *free_tnodes;
+ int n_free_tnodes;
+ struct yaffs_tnode_list *alloc_tnode_list;
+
+ int n_obj_created;
+ struct yaffs_obj *free_objs;
+ int n_free_objects;
+
+ struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+
+ struct yaffs_tnode_list *tmp;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ while (allocator->alloc_tnode_list) {
+ tmp = allocator->alloc_tnode_list->next;
+
+ kfree(allocator->alloc_tnode_list->tnodes);
+ kfree(allocator->alloc_tnode_list);
+ allocator->alloc_tnode_list = tmp;
+
+ }
+
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (allocator) {
+ allocator->alloc_tnode_list = NULL;
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+ } else {
+ YBUG();
+ }
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ int i;
+ struct yaffs_tnode *new_tnodes;
+ u8 *mem;
+ struct yaffs_tnode *curr;
+ struct yaffs_tnode *next;
+ struct yaffs_tnode_list *tnl;
+
+ if (!allocator) {
+ YBUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_tnodes < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+
+ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+ mem = (u8 *) new_tnodes;
+
+ if (!new_tnodes) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: Could not allocate Tnodes");
+ return YAFFS_FAIL;
+ }
+
+ /* New hookup for wide tnodes */
+ for (i = 0; i < n_tnodes - 1; i++) {
+ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+ curr->internal[0] = next;
+ }
+
+ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+ curr->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+ allocator->n_free_tnodes += n_tnodes;
+ allocator->n_tnodes_created += n_tnodes;
+
+ /* Now add this bunch of tnodes to a list for freeing up.
+ * NB If we can't add this to the management list it isn't fatal
+ * but it just means we can't free this bunch of tnodes later.
+ */
+
+ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+ if (!tnl) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Could not add tnodes to management list");
+ return YAFFS_FAIL;
+ } else {
+ tnl->tnodes = new_tnodes;
+ tnl->next = allocator->alloc_tnode_list;
+ allocator->alloc_tnode_list = tnl;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,"Tnodes added");
+
+ return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode *tn = NULL;
+
+ if (!allocator) {
+ YBUG();
+ return NULL;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_tnodes)
+ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+ if (allocator->free_tnodes) {
+ tn = allocator->free_tnodes;
+ allocator->free_tnodes = allocator->free_tnodes->internal[0];
+ allocator->n_free_tnodes--;
+ }
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ if (tn) {
+ tn->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = tn;
+ allocator->n_free_tnodes++;
+ }
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (allocator) {
+ allocator->allocated_obj_list = NULL;
+ allocator->free_objs = NULL;
+ allocator->n_free_objects = 0;
+ } else {
+ YBUG();
+ }
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ struct yaffs_obj_list *tmp;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ while (allocator->allocated_obj_list) {
+ tmp = allocator->allocated_obj_list->next;
+ kfree(allocator->allocated_obj_list->objects);
+ kfree(allocator->allocated_obj_list);
+
+ allocator->allocated_obj_list = tmp;
+ }
+
+ allocator->free_objs = NULL;
+ allocator->n_free_objects = 0;
+ allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ int i;
+ struct yaffs_obj *new_objs;
+ struct yaffs_obj_list *list;
+
+ if (!allocator) {
+ YBUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_obj < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+ if (!new_objs || !list) {
+ if (new_objs) {
+ kfree(new_objs);
+ new_objs = NULL;
+ }
+ if (list) {
+ kfree(list);
+ list = NULL;
+ }
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Could not allocate more objects");
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+ for (i = 0; i < n_obj - 1; i++) {
+ new_objs[i].siblings.next =
+ (struct list_head *)(&new_objs[i + 1]);
+ }
+
+ new_objs[n_obj - 1].siblings.next = (void *)allocator->free_objs;
+ allocator->free_objs = new_objs;
+ allocator->n_free_objects += n_obj;
+ allocator->n_obj_created += n_obj;
+
+ /* Now add this bunch of Objects to a list for freeing up. */
+
+ list->objects = new_objs;
+ list->next = allocator->allocated_obj_list;
+ allocator->allocated_obj_list = list;
+
+ return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ YBUG();
+ return obj;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_objs)
+ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+ if (allocator->free_objs) {
+ obj = allocator->free_objs;
+ allocator->free_objs =
+ (struct yaffs_obj *)(allocator->free_objs->siblings.next);
+ allocator->n_free_objects--;
+ }
+
+ return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator)
+ YBUG();
+ else {
+ /* Link into the free list. */
+ obj->siblings.next = (struct list_head *)(allocator->free_objs);
+ allocator->free_objs = obj;
+ allocator->n_free_objects++;
+ }
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ if (dev->allocator) {
+ yaffs_deinit_raw_tnodes(dev);
+ yaffs_deinit_raw_objs(dev);
+
+ kfree(dev->allocator);
+ dev->allocator = NULL;
+ } else {
+ YBUG();
+ }
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator;
+
+ if (!dev->allocator) {
+ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+ if (allocator) {
+ dev->allocator = allocator;
+ yaffs_init_raw_tnodes(dev);
+ yaffs_init_raw_objs(dev);
+ }
+ } else {
+ YBUG();
+ }
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_allocator.h b/fs/yaffs2/yaffs_allocator.h
new file mode 100644
index 000000000000..4d5f2aec89ff
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
diff --git a/fs/yaffs2/yaffs_attribs.c b/fs/yaffs2/yaffs_attribs.c
new file mode 100644
index 000000000000..9b47d376310c
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.c
@@ -0,0 +1,124 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+ obj->yst_uid = oh->yst_uid;
+ obj->yst_gid = oh->yst_gid;
+ obj->yst_atime = oh->yst_atime;
+ obj->yst_mtime = oh->yst_mtime;
+ obj->yst_ctime = oh->yst_ctime;
+ obj->yst_rdev = oh->yst_rdev;
+}
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+ oh->yst_uid = obj->yst_uid;
+ oh->yst_gid = obj->yst_gid;
+ oh->yst_atime = obj->yst_atime;
+ oh->yst_mtime = obj->yst_mtime;
+ oh->yst_ctime = obj->yst_ctime;
+ oh->yst_rdev = obj->yst_rdev;
+
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+ obj->yst_mtime = Y_CURRENT_TIME;
+ if (do_a)
+ obj->yst_atime = obj->yst_mtime;
+ if (do_c)
+ obj->yst_ctime = obj->yst_mtime;
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+ yaffs_load_current_time(obj, 1, 1);
+ obj->yst_rdev = rdev;
+ obj->yst_uid = uid;
+ obj->yst_gid = gid;
+}
+
+loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+ YCHAR *alias = NULL;
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return obj->variant.file_variant.file_size;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = obj->variant.symlink_variant.alias;
+ if (!alias)
+ return 0;
+ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+ default:
+ return 0;
+ }
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = attr->ia_valid;
+
+ if (valid & ATTR_MODE)
+ obj->yst_mode = attr->ia_mode;
+ if (valid & ATTR_UID)
+ obj->yst_uid = attr->ia_uid;
+ if (valid & ATTR_GID)
+ obj->yst_gid = attr->ia_gid;
+
+ if (valid & ATTR_ATIME)
+ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+ if (valid & ATTR_CTIME)
+ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+ if (valid & ATTR_MTIME)
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+ yaffs_resize_file(obj, attr->ia_size);
+
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = 0;
+
+ attr->ia_mode = obj->yst_mode;
+ valid |= ATTR_MODE;
+ attr->ia_uid = obj->yst_uid;
+ valid |= ATTR_UID;
+ attr->ia_gid = obj->yst_gid;
+ valid |= ATTR_GID;
+
+ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+ valid |= ATTR_ATIME;
+ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+ valid |= ATTR_CTIME;
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_attribs.h b/fs/yaffs2/yaffs_attribs.h
new file mode 100644
index 000000000000..33d541d69441
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+#endif
diff --git a/fs/yaffs2/yaffs_bitmap.c b/fs/yaffs2/yaffs_bitmap.c
new file mode 100644
index 000000000000..7df42cd0066e
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.c
@@ -0,0 +1,98 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "BlockBits block %d is not valid",
+ blk);
+ YBUG();
+ }
+ return dev->chunk_bits +
+ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+ chunk < 0 || chunk >= dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Chunk Id (%d:%d) invalid",
+ blk, chunk);
+ YBUG();
+ }
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ for (i = 0; i < dev->chunk_bit_stride; i++) {
+ if (*blk_bits)
+ return 1;
+ blk_bits++;
+ }
+ return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ int n = 0;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+ n += hweight8(*blk_bits);
+
+ return n;
+}
diff --git a/fs/yaffs2/yaffs_bitmap.h b/fs/yaffs2/yaffs_bitmap.h
new file mode 100644
index 000000000000..cf9ea58da0d9
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c
new file mode 100644
index 000000000000..4e40f437e655
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,415 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpt blocks_avail = %d", blocks_avail);
+
+ return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (!dev->param.erase_fn)
+ return 0;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checking blocks %d to %d",
+ dev->internal_start_block, dev->internal_end_block);
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "erasing checkpt block %d", i);
+
+ dev->n_erasures++;
+
+ if (dev->param.
+ erase_fn(dev,
+ i - dev->block_offset /* realign */ )) {
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ dev->n_free_chunks +=
+ dev->param.chunks_per_block;
+ } else {
+ dev->param.bad_block_fn(dev, i);
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+ int i;
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
+ dev->n_erased_blocks, dev->param.n_reserved_blocks,
+ blocks_avail, dev->checkpt_next_block);
+
+ if (dev->checkpt_next_block >= 0 &&
+ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocks_avail > 0) {
+
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpt_next_block = i + 1;
+ dev->checkpt_cur_block = i;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block %d", i);
+ return;
+ }
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_ext_tags tags;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: start: blocks %d next %d",
+ dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ int chunk = i * dev->param.chunks_per_block;
+ int realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->param.read_chunk_tags_fn(dev, realigned_chunk,
+ NULL, &tags);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: search: block %d oid %d seq %d eccr %d",
+ i, tags.obj_id, tags.seq_number,
+ tags.ecc_result);
+
+ if (tags.seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ /* Right kind of block */
+ dev->checkpt_next_block = tags.obj_id;
+ dev->checkpt_cur_block = i;
+ dev->checkpt_block_list[dev->
+ blocks_in_checkpt] = i;
+ dev->blocks_in_checkpt++;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "found checkpt block %d", i);
+ return;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+
+ dev->checkpt_open_write = writing;
+
+ /* Got the functions we need? */
+ if (!dev->param.write_chunk_tags_fn ||
+ !dev->param.read_chunk_tags_fn ||
+ !dev->param.erase_fn || !dev->param.bad_block_fn)
+ return 0;
+
+ if (writing && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+ if (!dev->checkpt_buffer)
+ dev->checkpt_buffer =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ dev->checkpt_page_seq = 0;
+ dev->checkpt_byte_count = 0;
+ dev->checkpt_sum = 0;
+ dev->checkpt_xor = 0;
+ dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_chunk = -1;
+ dev->checkpt_next_block = dev->internal_start_block;
+
+ /* Erase all the blocks in the checkpoint area */
+ if (writing) {
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+ dev->checkpt_byte_offs = 0;
+ return yaffs_checkpt_erase(dev);
+ } else {
+ int i;
+ /* Set to a value that will kick off a read */
+ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
+ * going to be way more than we need */
+ dev->blocks_in_checkpt = 0;
+ dev->checkpt_max_blocks =
+ (dev->internal_end_block - dev->internal_start_block) / 16 +
+ 2;
+ dev->checkpt_block_list =
+ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+ if (!dev->checkpt_block_list)
+ return 0;
+
+ for (i = 0; i < dev->checkpt_max_blocks; i++)
+ dev->checkpt_block_list[i] = -1;
+ }
+
+ return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+ u32 composite_sum;
+ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xFF);
+ *sum = composite_sum;
+ return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+ int chunk;
+ int realigned_chunk;
+
+ struct yaffs_ext_tags tags;
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_erased_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+ tags.is_deleted = 0;
+ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
+ tags.chunk_id = dev->checkpt_page_seq + 1;
+ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.n_bytes = dev->data_bytes_per_chunk;
+ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->checkpt_cur_block);
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocks_in_checkpt++;
+ }
+
+ chunk =
+ dev->checkpt_cur_block * dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+ tags.obj_id, tags.chunk_id);
+
+ realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->n_page_writes++;
+
+ dev->param.write_chunk_tags_fn(dev, realigned_chunk,
+ dev->checkpt_buffer, &tags);
+ dev->checkpt_byte_offs = 0;
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+ dev->checkpt_cur_chunk = 0;
+ dev->checkpt_cur_block = -1;
+ }
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (!dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ struct yaffs_ext_tags tags;
+
+ int chunk;
+ int realigned_chunk;
+
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ ok = 0;
+ else {
+ chunk = dev->checkpt_cur_block *
+ dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ dev->param.read_chunk_tags_fn(dev,
+ realigned_chunk,
+ dev->
+ checkpt_buffer,
+ &tags);
+
+ if (tags.chunk_id != (dev->checkpt_page_seq + 1)
+ || tags.ecc_result > YAFFS_ECC_RESULT_FIXED
+ || tags.seq_number !=
+ YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ ok = 0;
+
+ dev->checkpt_byte_offs = 0;
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+
+ if (dev->checkpt_cur_chunk >=
+ dev->param.chunks_per_block)
+ dev->checkpt_cur_block = -1;
+ }
+ }
+
+ if (ok) {
+ *data_bytes =
+ dev->checkpt_buffer[dev->checkpt_byte_offs];
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+ }
+ }
+
+ return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+
+ if (dev->checkpt_open_write) {
+ if (dev->checkpt_byte_offs != 0)
+ yaffs2_checkpt_flush_buffer(dev);
+ } else if (dev->checkpt_block_list) {
+ int i;
+ for (i = 0;
+ i < dev->blocks_in_checkpt
+ && dev->checkpt_block_list[i] >= 0; i++) {
+ int blk = dev->checkpt_block_list[i];
+ struct yaffs_block_info *bi = NULL;
+ if (dev->internal_start_block <= blk
+ && blk <= dev->internal_end_block)
+ bi = yaffs_get_block_info(dev, blk);
+ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ else {
+ /* Todo this looks odd... */
+ }
+ }
+ kfree(dev->checkpt_block_list);
+ dev->checkpt_block_list = NULL;
+ }
+
+ dev->n_free_chunks -=
+ dev->blocks_in_checkpt * dev->param.chunks_per_block;
+ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,"checkpoint byte count %d",
+ dev->checkpt_byte_count);
+
+ if (dev->checkpt_buffer) {
+ /* free the buffer */
+ kfree(dev->checkpt_buffer);
+ dev->checkpt_buffer = NULL;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+ /* Erase the checkpoint data */
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint invalidate of %d blocks",
+ dev->blocks_in_checkpt);
+
+ return yaffs_checkpt_erase(dev);
+}
diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h
new file mode 100644
index 000000000000..361c6067717e
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c
new file mode 100644
index 000000000000..e95a8069a8c5
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,298 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore
+ * this bytes influence on the line parity.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+static const unsigned char column_parity_table[] = {
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc)
+{
+ unsigned int i;
+
+ unsigned char col_parity = 0;
+ unsigned char line_parity = 0;
+ unsigned char line_parity_prime = 0;
+ unsigned char t;
+ unsigned char b;
+
+ for (i = 0; i < 256; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+
+ t = 0;
+ if (line_parity & 0x80)
+ t |= 0x80;
+ if (line_parity_prime & 0x80)
+ t |= 0x40;
+ if (line_parity & 0x40)
+ t |= 0x20;
+ if (line_parity_prime & 0x40)
+ t |= 0x10;
+ if (line_parity & 0x20)
+ t |= 0x08;
+ if (line_parity_prime & 0x20)
+ t |= 0x04;
+ if (line_parity & 0x10)
+ t |= 0x02;
+ if (line_parity_prime & 0x10)
+ t |= 0x01;
+ ecc[1] = ~t;
+
+ t = 0;
+ if (line_parity & 0x08)
+ t |= 0x80;
+ if (line_parity_prime & 0x08)
+ t |= 0x40;
+ if (line_parity & 0x04)
+ t |= 0x20;
+ if (line_parity_prime & 0x04)
+ t |= 0x10;
+ if (line_parity & 0x02)
+ t |= 0x08;
+ if (line_parity_prime & 0x02)
+ t |= 0x04;
+ if (line_parity & 0x01)
+ t |= 0x02;
+ if (line_parity_prime & 0x01)
+ t |= 0x01;
+ ecc[0] = ~t;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+ /* Swap the bytes into the wrong order */
+ t = ecc[0];
+ ecc[0] = ecc[1];
+ ecc[1] = t;
+#endif
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+{
+ unsigned char d0, d1, d2; /* deltas */
+
+ d0 = read_ecc[0] ^ test_ecc[0];
+ d1 = read_ecc[1] ^ test_ecc[1];
+ d2 = read_ecc[2] ^ test_ecc[2];
+
+ if ((d0 | d1 | d2) == 0)
+ return 0; /* no error */
+
+ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+ /* Single bit (recoverable) error in data */
+
+ unsigned byte;
+ unsigned bit;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+ /* swap the bytes to correct for the wrong order */
+ unsigned char t;
+
+ t = d0;
+ d0 = d1;
+ d1 = t;
+#endif
+
+ bit = byte = 0;
+
+ if (d1 & 0x80)
+ byte |= 0x80;
+ if (d1 & 0x20)
+ byte |= 0x40;
+ if (d1 & 0x08)
+ byte |= 0x20;
+ if (d1 & 0x02)
+ byte |= 0x10;
+ if (d0 & 0x80)
+ byte |= 0x08;
+ if (d0 & 0x20)
+ byte |= 0x04;
+ if (d0 & 0x08)
+ byte |= 0x02;
+ if (d0 & 0x02)
+ byte |= 0x01;
+
+ if (d2 & 0x80)
+ bit |= 0x04;
+ if (d2 & 0x20)
+ bit |= 0x02;
+ if (d2 & 0x08)
+ bit |= 0x01;
+
+ data[byte] ^= (1 << bit);
+
+ return 1; /* Corrected the error */
+ }
+
+ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+ read_ecc[1] = test_ecc[1];
+ read_ecc[2] = test_ecc[2];
+
+ return 1; /* Corrected the error */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc_other)
+{
+ unsigned int i;
+
+ unsigned char col_parity = 0;
+ unsigned line_parity = 0;
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) {
+ /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+ ecc_other->line_parity = line_parity;
+ ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc)
+{
+ unsigned char delta_col; /* column parity delta */
+ unsigned delta_line; /* line parity delta */
+ unsigned delta_line_prime; /* line parity delta */
+ unsigned bit;
+
+ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+ delta_line_prime =
+ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+ if ((delta_col | delta_line | delta_line_prime) == 0)
+ return 0; /* no error */
+
+ if (delta_line == ~delta_line_prime &&
+ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+
+ if (delta_col & 0x20)
+ bit |= 0x04;
+ if (delta_col & 0x08)
+ bit |= 0x02;
+ if (delta_col & 0x02)
+ bit |= 0x01;
+
+ if (delta_line >= n_bytes)
+ return -1;
+
+ data[delta_line] ^= (1 << bit);
+
+ return 1; /* corrected */
+ }
+
+ if ((hweight32(delta_line) +
+ hweight32(delta_line_prime) +
+ hweight8(delta_col)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+ return 1; /* corrected */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+}
diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h
new file mode 100644
index 000000000000..b0c461d699e6
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+ unsigned char col_parity;
+ unsigned line_parity;
+ unsigned line_parity_prime;
+};
+
+void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc);
+#endif
diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h
new file mode 100644
index 000000000000..d87acbde997c
--- /dev/null
+++ b/fs/yaffs2/yaffs_getblockinfo.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+ *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs: get_block_info block %d is not valid",
+ blk);
+ YBUG();
+ }
+ return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 000000000000..f4ae9deed727
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5164 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_tagsvalidity.h"
+#include "yaffs_getblockinfo.h"
+
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_nand.h"
+
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+
+#include "yaffs_attribs.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 * buffer, int n_bytes, int use_reserve);
+
+
+
+/* Function to calculate chunk and offset */
+
+static void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 * offset_out)
+{
+ int chunk;
+ u32 offset;
+
+ chunk = (u32) (addr >> dev->chunk_shift);
+
+ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+ offset = (u32) (addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunk_base;
+
+ chunk /= dev->chunk_div;
+
+ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+ offset = (u32) (addr - chunk_base);
+ }
+
+ *chunk_out = chunk;
+ *offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static u32 calc_shifts_ceiling(u32 x)
+{
+ int extra_bits;
+ int shifts;
+
+ shifts = extra_bits = 0;
+
+ while (x > 1) {
+ if (x & 1)
+ extra_bits++;
+ x >>= 1;
+ shifts++;
+ }
+
+ if (extra_bits)
+ shifts++;
+
+ return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static u32 calc_shifts(u32 x)
+{
+ u32 shifts;
+
+ shifts = 0;
+
+ if (!x)
+ return 0;
+
+ while (!(x & 1)) {
+ x >>= 1;
+ shifts++;
+ }
+
+ return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+ int i;
+ u8 *buf = (u8 *) 1;
+
+ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->temp_buffer[i].line = 0; /* not in use */
+ dev->temp_buffer[i].buffer = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev, int line_no)
+{
+ int i, j;
+
+ dev->temp_in_use++;
+ if (dev->temp_in_use > dev->max_temp)
+ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].line == 0) {
+ dev->temp_buffer[i].line = line_no;
+ if ((i + 1) > dev->max_temp) {
+ dev->max_temp = i + 1;
+ for (j = 0; j <= i; j++)
+ dev->temp_buffer[j].max_line =
+ dev->temp_buffer[j].line;
+ }
+
+ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Out of temp buffers at line %d, other held by lines:",
+ line_no);
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ yaffs_trace(YAFFS_TRACE_BUFFERS," %d", dev->temp_buffer[i].line);
+
+ /*
+ * If we got here then we have to allocate an unmanaged one
+ * This is not good.
+ */
+
+ dev->unmanaged_buffer_allocs++;
+ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no)
+{
+ int i;
+
+ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer) {
+ dev->temp_buffer[i].line = 0;
+ return;
+ }
+ }
+
+ if (buffer) {
+ /* assume it is an unmanaged one. */
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Releasing unmanaged temp buffer in line %d",
+ line_no);
+ kfree(buffer);
+ dev->unmanaged_buffer_deallocs++;
+ }
+
+}
+
+/*
+ * Determine if we have a managed buffer.
+ */
+int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 * buffer)
+{
+ int i;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer)
+ return 1;
+ }
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].data == buffer)
+ return 1;
+ }
+
+ if (buffer == dev->checkpt_buffer)
+ return 1;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: unmaged buffer detected.");
+ return 0;
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags)
+{
+ dev = dev;
+ nand_chunk = nand_chunk;
+ data = data;
+ tags = tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+ const struct yaffs_ext_tags *tags)
+{
+ dev = dev;
+ nand_chunk = nand_chunk;
+ tags = tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+ if (!bi->gc_prioritise) {
+ bi->gc_prioritise = 1;
+ dev->has_pending_prioritised_gc = 1;
+ bi->chunk_error_strikes++;
+
+ if (bi->chunk_error_strikes > 3) {
+ bi->needs_retiring = 1; /* Too many stikes, so retire this */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs: Block struck out");
+
+ }
+ }
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+ int erased_ok)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs_handle_chunk_error(dev, bi);
+
+ if (erased_ok) {
+ /* Was an actual write failure, so mark the block for retirement */
+ bi->needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d needs retiring", flash_block);
+ }
+
+ /* Delete the chunk */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ * Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+ n = abs(n);
+ return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+ return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+ return dev->lost_n_found;
+}
+
+/*
+ * Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 * buffer, int n_bytes)
+{
+ /* Horrible, slow implementation */
+ while (n_bytes--) {
+ if (*buffer != 0xFF)
+ return 0;
+ buffer++;
+ }
+ return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+ int retval = YAFFS_OK;
+ u8 *data = yaffs_get_temp_buffer(dev, __LINE__);
+ struct yaffs_ext_tags tags;
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+ tags.chunk_used) {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS, "Chunk %d not erased", nand_chunk);
+ retval = YAFFS_FAIL;
+ }
+
+ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+ return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = YAFFS_OK;
+ struct yaffs_ext_tags temp_tags;
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+ temp_tags.obj_id != tags->obj_id ||
+ temp_tags.chunk_id != tags->chunk_id ||
+ temp_tags.n_bytes != tags->n_bytes)
+ retval = YAFFS_FAIL;
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+ int reserved_chunks;
+ int reserved_blocks = dev->param.n_reserved_blocks;
+ int checkpt_blocks;
+
+ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+ reserved_chunks =
+ ((reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block);
+
+ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+ int i;
+
+ struct yaffs_block_info *bi;
+
+ if (dev->n_erased_blocks < 1) {
+ /* Hoosterman we've got a problem.
+ * Can't get space to gc
+ */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no more erased blocks" );
+
+ return -1;
+ }
+
+ /* Find an empty block. */
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ dev->alloc_block_finder++;
+ if (dev->alloc_block_finder < dev->internal_start_block
+ || dev->alloc_block_finder > dev->internal_end_block) {
+ dev->alloc_block_finder = dev->internal_start_block;
+ }
+
+ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->seq_number++;
+ bi->seq_number = dev->seq_number;
+ dev->n_erased_blocks--;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Allocated block %d, seq %d, %d left" ,
+ dev->alloc_block_finder, dev->seq_number,
+ dev->n_erased_blocks);
+ return dev->alloc_block_finder;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs tragedy: no more erased blocks, but there should have been %d",
+ dev->n_erased_blocks);
+
+ return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+ struct yaffs_block_info **block_ptr)
+{
+ int ret_val;
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block < 0) {
+ /* Get next block to allocate off */
+ dev->alloc_block = yaffs_find_alloc_block(dev);
+ dev->alloc_page = 0;
+ }
+
+ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+ /* Not enough space to allocate unless we're allowed to use the reserve. */
+ return -1;
+ }
+
+ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+ && dev->alloc_page == 0)
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+ /* Next page please.... */
+ if (dev->alloc_block >= 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+ dev->alloc_page;
+ bi->pages_in_use++;
+ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+ dev->alloc_page++;
+
+ dev->n_free_chunks--;
+
+ /* If the block is full set the state to full */
+ if (dev->alloc_page >= dev->param.chunks_per_block) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ if (block_ptr)
+ *block_ptr = bi;
+
+ return ret_val;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ERROR, "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" );
+
+ return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+ int n;
+
+ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ if (dev->alloc_block > 0)
+ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+ return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+ if (dev->alloc_block > 0) {
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->alloc_block);
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+ }
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+ const u8 * data,
+ struct yaffs_ext_tags *tags, int use_reserver)
+{
+ int attempts = 0;
+ int write_ok = 0;
+ int chunk;
+
+ yaffs2_checkpt_invalidate(dev);
+
+ do {
+ struct yaffs_block_info *bi = 0;
+ int erased_ok = 0;
+
+ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+ if (chunk < 0) {
+ /* no space */
+ break;
+ }
+
+ /* First check this chunk is erased, if it needs
+ * checking. The checking policy (unless forced
+ * always on) is as follows:
+ *
+ * Check the first page we try to write in a block.
+ * If the check passes then we don't need to check any
+ * more. If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc,
+ * then we think there might be something odd about
+ * this block and stop using it.
+ *
+ * Rationale: We should only ever see chunks that have
+ * not been erased if there was a partially written
+ * chunk due to power loss. This checking policy should
+ * catch that case with very few checks and thus save a
+ * lot of checks that are most likely not needed.
+ *
+ * Mods to the above
+ * If an erase check fails or the write fails we skip the
+ * rest of the block.
+ */
+
+ /* let's give it a try */
+ attempts++;
+
+ if (dev->param.always_check_erased)
+ bi->skip_erased_check = 0;
+
+ if (!bi->skip_erased_check) {
+ erased_ok = yaffs_check_chunk_erased(dev, chunk);
+ if (erased_ok != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d was not erased",
+ chunk);
+
+ /* If not erased, delete this one,
+ * skip rest of block and
+ * try another chunk */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+ continue;
+ }
+ }
+
+ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+ if (!bi->skip_erased_check)
+ write_ok =
+ yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+ if (write_ok != YAFFS_OK) {
+ /* Clean up aborted write, skip to next block and
+ * try another chunk */
+ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+ continue;
+ }
+
+ bi->skip_erased_check = 1;
+
+ /* Copy the data into the robustification buffer */
+ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+ } while (write_ok != YAFFS_OK &&
+ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+ if (!write_ok)
+ chunk = -1;
+
+ if (attempts > 1) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs write required %d attempts",
+ attempts);
+ dev->n_retired_writes += (attempts - 1);
+ }
+
+ return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to mark bad and erase block %d",
+ flash_block);
+ } else {
+ struct yaffs_ext_tags tags;
+ int chunk_id =
+ flash_block * dev->param.chunks_per_block;
+
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ yaffs_init_tags(&tags);
+ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+ if (dev->param.write_chunk_tags_fn(dev, chunk_id -
+ dev->chunk_offset,
+ buffer,
+ &tags) != YAFFS_OK)
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to write bad block marker to block %d",
+ flash_block);
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+ }
+ }
+
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ bi->gc_prioritise = 0;
+ bi->needs_retiring = 0;
+
+ dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static u16 yaffs_calc_name_sum(const YCHAR * name)
+{
+ u16 sum = 0;
+ u16 i = 1;
+
+ const YUCHAR *bname = (const YUCHAR *)name;
+ if (bname) {
+ while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH / 2))) {
+
+ /* 0x1f mask is case insensitive */
+ sum += ((*bname) & 0x1f) * i;
+ i++;
+ bname++;
+ }
+ }
+ return sum;
+}
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ memset(obj->short_name, 0, sizeof(obj->short_name));
+ if (name &&
+ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+ YAFFS_SHORT_NAME_LENGTH)
+ strcpy(obj->short_name, name);
+ else
+ obj->short_name[0] = _Y('\0');
+#endif
+ obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+ memset(tmp_name, 0, sizeof(tmp_name));
+ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_set_obj_name(obj, tmp_name);
+#else
+ yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+ if (tn) {
+ memset(tn, 0, dev->tnode_size);
+ dev->n_tnodes++;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ yaffs_free_raw_tnode(dev, tn);
+ dev->n_tnodes--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ yaffs_deinit_raw_tnodes_and_objs(dev);
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+}
+
+void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos, unsigned val)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 mask;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+ val >>= dev->chunk_grp_bits;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ mask = dev->tnode_mask << bit_in_word;
+
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val << bit_in_word));
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;;
+ mask =
+ dev->tnode_mask >> ( /*dev->tnode_width - */ bit_in_word);
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val >> bit_in_word));
+ }
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 val;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ val = map[word_in_map] >> bit_in_word;
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;;
+ val |= (map[word_in_map] << bit_in_word);
+ }
+
+ val &= dev->tnode_mask;
+ val <<= dev->chunk_grp_bits;
+
+ return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id)
+{
+ struct yaffs_tnode *tn = file_struct->top;
+ u32 i;
+ int required_depth;
+ int level = file_struct->top_level;
+
+ dev = dev;
+
+ /* Check sane level and chunk Id */
+ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (i) {
+ i >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level)
+ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+ tn = tn->internal[(chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (level - 1) *
+ YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+ }
+
+ return tn;
+}
+
+/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
+ * This happens in two steps:
+ * 1. If the tree isn't tall enough, then make it taller.
+ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
+ * be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn)
+{
+ int required_depth;
+ int i;
+ int l;
+ struct yaffs_tnode *tn;
+
+ u32 x;
+
+ /* Check sane level and page Id */
+ if (file_struct->top_level < 0
+ || file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level) {
+ /* Not tall enough, gotta make the tree taller */
+ for (i = file_struct->top_level; i < required_depth; i++) {
+
+ tn = yaffs_get_tnode(dev);
+
+ if (tn) {
+ tn->internal[0] = file_struct->top;
+ file_struct->top = tn;
+ file_struct->top_level++;
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR, "yaffs: no more tnodes");
+ return NULL;
+ }
+ }
+ }
+
+ /* Traverse down to level 0, adding anything we need */
+
+ l = file_struct->top_level;
+ tn = file_struct->top;
+
+ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+ if (passed_tn) {
+ /* If we already have one, then release it. */
+ if (tn->internal[x])
+ yaffs_free_tnode(dev,
+ tn->
+ internal[x]);
+ tn->internal[x] = passed_tn;
+
+ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ }
+ }
+
+ tn = tn->internal[x];
+ l--;
+ }
+ } else {
+ /* top is level 0 */
+ if (passed_tn) {
+ memcpy(tn, passed_tn,
+ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+ yaffs_free_tnode(dev, passed_tn);
+ }
+ }
+
+ return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+ int chunk_obj)
+{
+ return (tags->chunk_id == chunk_obj &&
+ tags->obj_id == obj_id && !tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+ struct yaffs_ext_tags *tags, int obj_id,
+ int inode_chunk)
+{
+ int j;
+
+ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+ if (yaffs_check_chunk_bit
+ (dev, the_chunk / dev->param.chunks_per_block,
+ the_chunk % dev->param.chunks_per_block)) {
+
+ if (dev->chunk_grp_size == 1)
+ return the_chunk;
+ else {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ tags);
+ if (yaffs_tags_match(tags, obj_id, inode_chunk)) {
+ /* found it; */
+ return the_chunk;
+ }
+ }
+ }
+ the_chunk++;
+ }
+ return -1;
+}
+
+static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ int ret_val = -1;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (tn) {
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val =
+ yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+ }
+ return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+
+ struct yaffs_dev *dev = in->my_dev;
+ int ret_val = -1;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (tn) {
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val =
+ yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+
+ /* Delete the entry in the filestructure (if found) */
+ if (ret_val != -1)
+ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+ }
+
+ return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan)
+{
+ /* NB in_scan is zero unless scanning.
+ * For forward scanning, in_scan is > 0;
+ * for backward scanning in_scan is < 0
+ *
+ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+ */
+
+ struct yaffs_tnode *tn;
+ struct yaffs_dev *dev = in->my_dev;
+ int existing_cunk;
+ struct yaffs_ext_tags existing_tags;
+ struct yaffs_ext_tags new_tags;
+ unsigned existing_serial, new_serial;
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+ /* Just ignore an attempt at putting a chunk into a non-file during scanning
+ * If it is not during Scanning then something went wrong!
+ */
+ if (!in_scan) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy:attempt to put data chunk into a non-file"
+ );
+ YBUG();
+ }
+
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+
+ tn = yaffs_add_find_tnode_0(dev,
+ &in->variant.file_variant,
+ inode_chunk, NULL);
+ if (!tn)
+ return YAFFS_FAIL;
+
+ if (!nand_chunk)
+ /* Dummy insert, bail now */
+ return YAFFS_OK;
+
+ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ if (in_scan != 0) {
+ /* If we're scanning then we need to test for duplicates
+ * NB This does not need to be efficient since it should only ever
+ * happen when the power fails during a write, then only one
+ * chunk should ever be affected.
+ *
+ * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
+ * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+ */
+
+ if (existing_cunk > 0) {
+ /* NB Right now existing chunk will not be real chunk_id if the chunk group size > 1
+ * thus we have to do a FindChunkInFile to get the real chunk id.
+ *
+ * We have a duplicate now we need to decide which one to use:
+ *
+ * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
+ * Forward scanning YAFFS2: The new one is what we use, dump the old one.
+ * YAFFS1: Get both sets of tags and compare serial numbers.
+ */
+
+ if (in_scan > 0) {
+ /* Only do this for forward scanning */
+ yaffs_rd_chunk_tags_nand(dev,
+ nand_chunk,
+ NULL, &new_tags);
+
+ /* Do a proper find */
+ existing_cunk =
+ yaffs_find_chunk_in_file(in, inode_chunk,
+ &existing_tags);
+ }
+
+ if (existing_cunk <= 0) {
+ /*Hoosterman - how did this happen? */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: existing chunk < 0 in scan"
+ );
+
+ }
+
+ /* NB The deleted flags should be false, otherwise the chunks will
+ * not be loaded during a scan
+ */
+
+ if (in_scan > 0) {
+ new_serial = new_tags.serial_number;
+ existing_serial = existing_tags.serial_number;
+ }
+
+ if ((in_scan > 0) &&
+ (existing_cunk <= 0 ||
+ ((existing_serial + 1) & 3) == new_serial)) {
+ /* Forward scanning.
+ * Use new
+ * Delete the old one and drop through to update the tnode
+ */
+ yaffs_chunk_del(dev, existing_cunk, 1,
+ __LINE__);
+ } else {
+ /* Backward scanning or we want to use the existing one
+ * Use existing.
+ * Delete the new one and return early so that the tnode isn't changed
+ */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+ }
+
+ }
+
+ if (existing_cunk == 0)
+ in->n_data_chunks++;
+
+ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+ return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+ struct yaffs_block_info *the_block;
+ unsigned block_no;
+
+ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+ block_no = chunk / dev->param.chunks_per_block;
+ the_block = yaffs_get_block_info(dev, block_no);
+ if (the_block) {
+ the_block->soft_del_pages++;
+ dev->n_free_chunks++;
+ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+ }
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
+ * of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+ u32 level, int chunk_offset)
+{
+ int i;
+ int the_chunk;
+ int all_done = 1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = YAFFS_NTNODES_INTERNAL - 1; all_done && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ all_done =
+ yaffs_soft_del_worker(in,
+ tn->internal
+ [i],
+ level - 1,
+ (chunk_offset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ if (all_done) {
+ yaffs_free_tnode(dev,
+ tn->internal
+ [i]);
+ tn->internal[i] = NULL;
+ } else {
+ /* Hoosterman... how could this happen? */
+ }
+ }
+ }
+ return (all_done) ? 1 : 0;
+ } else if (level == 0) {
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk) {
+ /* Note this does not find the real chunk, only the chunk group.
+ * We make an assumption that a chunk group is not larger than
+ * a block.
+ */
+ yaffs_soft_del_chunk(dev, the_chunk);
+ yaffs_load_tnode_0(dev, tn, i, 0);
+ }
+
+ }
+ return 1;
+
+ }
+
+ }
+
+ return 1;
+
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_obj *parent;
+
+ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+ yaffs_verify_dir(parent);
+
+ if (dev && dev->param.remove_obj_fn)
+ dev->param.remove_obj_fn(obj);
+
+ list_del_init(&obj->siblings);
+ obj->parent = NULL;
+
+ yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a null pointer directory"
+ );
+ YBUG();
+ return;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a non-directory"
+ );
+ YBUG();
+ }
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+ YBUG();
+ }
+
+ yaffs_verify_dir(directory);
+
+ yaffs_remove_obj_from_dir(obj);
+
+ /* Now add it */
+ list_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+ if (directory == obj->my_dev->unlinked_dir
+ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+ obj->my_dev->n_unlinked_files++;
+ obj->rename_allowed = 0;
+ }
+
+ yaffs_verify_dir(directory);
+ yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+ struct yaffs_obj *new_dir,
+ const YCHAR * new_name, int force, int shadows)
+{
+ int unlink_op;
+ int del_op;
+
+ struct yaffs_obj *existing_target;
+
+ if (new_dir == NULL)
+ new_dir = obj->parent; /* use the old directory */
+
+ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
+ );
+ YBUG();
+ }
+
+ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+ if (obj->my_dev->param.is_yaffs2)
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+ else
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir
+ && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
+
+ del_op = (new_dir == obj->my_dev->del_dir);
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+
+ /* If the object is a file going into the unlinked directory,
+ * then it is OK to just stuff it in since duplicate names are allowed.
+ * else only proceed if the new name does not exist and if we're putting
+ * it into a directory.
+ */
+ if ((unlink_op ||
+ del_op ||
+ force ||
+ (shadows > 0) ||
+ !existing_target) &&
+ new_dir->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_set_obj_name(obj, new_name);
+ obj->dirty = 1;
+
+ yaffs_add_obj_to_dir(new_dir, obj);
+
+ if (unlink_op)
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc purposes. */
+ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >=
+ 0)
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ----------------------------------------
+ * In many situations where there is no high level buffering a lot of
+ * reads might be short sequential reads, and a lot of writes may be short
+ * sequential writes. eg. scanning/writing a jpeg file.
+ * In these cases, a short read/write cache can provide a huge perfomance
+ * benefit with dumb-as-a-rock code.
+ * In Linux, the page cache provides read buffering and the short op cache
+ * provides write buffering.
+ *
+ * There are a limited number (~10) of cache chunks per device so that we don't
+ * need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ struct yaffs_cache *cache;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ for (i = 0; i < n_caches; i++) {
+ cache = &dev->cache[i];
+ if (cache->object == obj && cache->dirty)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int lowest = -99; /* Stop compiler whining. */
+ int i;
+ struct yaffs_cache *cache;
+ int chunk_written = 0;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ if (n_caches > 0) {
+ do {
+ cache = NULL;
+
+ /* Find the dirty cache for this object with the lowest chunk id. */
+ for (i = 0; i < n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].dirty) {
+ if (!cache
+ || dev->cache[i].chunk_id <
+ lowest) {
+ cache = &dev->cache[i];
+ lowest = cache->chunk_id;
+ }
+ }
+ }
+
+ if (cache && !cache->locked) {
+ /* Write it out and free it up */
+
+ chunk_written =
+ yaffs_wr_data_obj(cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ cache->object = NULL;
+ }
+
+ } while (cache && chunk_written > 0);
+
+ if (cache)
+ /* Hoosterman, disk full while writing cache out. */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no space during cache write");
+
+ }
+
+}
+
+/*yaffs_flush_whole_cache(dev)
+ *
+ *
+ */
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int n_caches = dev->param.n_caches;
+ int i;
+
+ /* Find a dirty object in the cache and flush it...
+ * until there are no further dirty objects.
+ */
+ do {
+ obj = NULL;
+ for (i = 0; i < n_caches && !obj; i++) {
+ if (dev->cache[i].object && dev->cache[i].dirty)
+ obj = dev->cache[i].object;
+
+ }
+ if (obj)
+ yaffs_flush_file_cache(obj);
+
+ } while (obj);
+
+}
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (!dev->cache[i].object)
+ return &dev->cache[i];
+ }
+ }
+
+ return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_cache *cache;
+ struct yaffs_obj *the_obj;
+ int usage;
+ int i;
+ int pushout;
+
+ if (dev->param.n_caches > 0) {
+ /* Try find a non-dirty one... */
+
+ cache = yaffs_grab_chunk_worker(dev);
+
+ if (!cache) {
+ /* They were all dirty, find the last recently used object and flush
+ * its cache, then find again.
+ * NB what's here is not very accurate, we actually flush the object
+ * the last recently used page.
+ */
+
+ /* With locking we can't assume we can use entry zero */
+
+ the_obj = NULL;
+ usage = -1;
+ cache = NULL;
+ pushout = -1;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object &&
+ !dev->cache[i].locked &&
+ (dev->cache[i].last_use < usage
+ || !cache)) {
+ usage = dev->cache[i].last_use;
+ the_obj = dev->cache[i].object;
+ cache = &dev->cache[i];
+ pushout = i;
+ }
+ }
+
+ if (!cache || cache->dirty) {
+ /* Flush and try again */
+ yaffs_flush_file_cache(the_obj);
+ cache = yaffs_grab_chunk_worker(dev);
+ }
+
+ }
+ return cache;
+ } else {
+ return NULL;
+ }
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+ int chunk_id)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].chunk_id == chunk_id) {
+ dev->cache_hits++;
+
+ return &dev->cache[i];
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+ int is_write)
+{
+
+ if (dev->param.n_caches > 0) {
+ if (dev->cache_last_use < 0 || dev->cache_last_use > 100000000) {
+ /* Reset the cache usages */
+ int i;
+ for (i = 1; i < dev->param.n_caches; i++)
+ dev->cache[i].last_use = 0;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_last_use++;
+
+ cache->last_use = dev->cache_last_use;
+
+ if (is_write)
+ cache->dirty = 1;
+ }
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+ if (object->my_dev->param.n_caches > 0) {
+ struct yaffs_cache *cache =
+ yaffs_find_chunk_cache(object, chunk_id);
+
+ if (cache)
+ cache->object = NULL;
+ }
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.n_caches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == in)
+ dev->cache[i].object = NULL;
+ }
+ }
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+ int bucket;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ /* If it is still linked into the bucket list, free from the list */
+ if (!list_empty(&obj->hash_link)) {
+ list_del_init(&obj->hash_link);
+ bucket = yaffs_hash_fn(obj->obj_id);
+ dev->obj_bucket[bucket].count--;
+ }
+}
+
+/* FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+ obj, obj->my_inode);
+
+ if (!obj)
+ YBUG();
+ if (obj->parent)
+ YBUG();
+ if (!list_empty(&obj->siblings))
+ YBUG();
+
+ if (obj->my_inode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+ */
+ obj->defered_free = 1;
+ return;
+ }
+
+ yaffs_unhash_obj(obj);
+
+ yaffs_free_raw_obj(dev, obj);
+ dev->n_obj--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+ if (obj->defered_free)
+ yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+
+ /* First off, invalidate the file's data in the cache, without flushing. */
+ yaffs_invalidate_whole_cache(in);
+
+ if (in->my_dev->param.is_yaffs2 && (in->parent != in->my_dev->del_dir)) {
+ /* Move to the unlinked directory so we have a record that it was deleted. */
+ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+ 0);
+
+ }
+
+ yaffs_remove_obj_from_dir(in);
+ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+ in->hdr_chunk = 0;
+
+ yaffs_free_obj(in);
+ return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+ if (obj->deleted &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_FILE && !obj->soft_del) {
+ if (obj->n_data_chunks <= 0) {
+ /* Empty file with no duplicate object headers,
+ * just delete it immediately */
+ yaffs_free_tnode(obj->my_dev,
+ obj->variant.file_variant.top);
+ obj->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: Deleting empty file %d",
+ obj->obj_id);
+ yaffs_generic_obj_del(obj);
+ } else {
+ yaffs_soft_del_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.
+ file_variant.top_level, 0);
+ obj->soft_del = 1;
+ }
+ }
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+ struct yaffs_tnode *tn, u32 level,
+ int del0)
+{
+ int i;
+ int has_data;
+
+ if (tn) {
+ has_data = 0;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ tn->internal[i] =
+ yaffs_prune_worker(dev,
+ tn->internal[i],
+ level - 1,
+ (i ==
+ 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i])
+ has_data++;
+ }
+ } else {
+ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+ u32 *map = (u32 *) tn;
+
+ for (i = 0; !has_data && i < tnode_size_u32; i++) {
+ if (map[i])
+ has_data++;
+ }
+ }
+
+ if (has_data == 0 && del0) {
+ /* Free and return NULL */
+
+ yaffs_free_tnode(dev, tn);
+ tn = NULL;
+ }
+
+ }
+
+ return tn;
+
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct)
+{
+ int i;
+ int has_data;
+ int done = 0;
+ struct yaffs_tnode *tn;
+
+ if (file_struct->top_level > 0) {
+ file_struct->top =
+ yaffs_prune_worker(dev, file_struct->top,
+ file_struct->top_level, 0);
+
+ /* Now we have a tree with all the non-zero branches NULL but the height
+ * is the same as it was.
+ * Let's see if we can trim internal tnodes to shorten the tree.
+ * We can do this if only the 0th element in the tnode is in use
+ * (ie all the non-zero are NULL)
+ */
+
+ while (file_struct->top_level && !done) {
+ tn = file_struct->top;
+
+ has_data = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i])
+ has_data++;
+ }
+
+ if (!has_data) {
+ file_struct->top = tn->internal[0];
+ file_struct->top_level--;
+ yaffs_free_tnode(dev, tn);
+ } else {
+ done = 1;
+ }
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+ if (obj) {
+ dev->n_obj++;
+
+ /* Now sweeten it up... */
+
+ memset(obj, 0, sizeof(struct yaffs_obj));
+ obj->being_created = 1;
+
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0;
+ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+ INIT_LIST_HEAD(&(obj->hard_links));
+ INIT_LIST_HEAD(&(obj->hash_link));
+ INIT_LIST_HEAD(&obj->siblings);
+
+ /* Now make the directory sane */
+ if (dev->root_dir) {
+ obj->parent = dev->root_dir;
+ list_add(&(obj->siblings),
+ &dev->root_dir->variant.dir_variant.children);
+ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lost-n-found in lost-n-found so
+ * check if lost-n-found exists first
+ */
+ if (dev->lost_n_found)
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+ obj->being_created = 0;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+ int i;
+ int l = 999;
+ int lowest = 999999;
+
+ /* Search for the shortest list or one that
+ * isn't too long.
+ */
+
+ for (i = 0; i < 10 && lowest > 4; i++) {
+ dev->bucket_finder++;
+ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+ lowest = dev->obj_bucket[dev->bucket_finder].count;
+ l = dev->bucket_finder;
+ }
+
+ }
+
+ return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+ int bucket = yaffs_find_nice_bucket(dev);
+
+ /* Now find an object value that has not already been taken
+ * by scanning the list.
+ */
+
+ int found = 0;
+ struct list_head *i;
+
+ u32 n = (u32) bucket;
+
+ /* yaffs_check_obj_hash_sane(); */
+
+ while (!found) {
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->obj_bucket[bucket].count > 0) {
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* If there is already one in the list */
+ if (i && list_entry(i, struct yaffs_obj,
+ hash_link)->obj_id == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+
+ return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+ int bucket = yaffs_hash_fn(in->obj_id);
+ struct yaffs_dev *dev = in->my_dev;
+
+ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+ dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+ int bucket = yaffs_hash_fn(number);
+ struct list_head *i;
+ struct yaffs_obj *in;
+
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* Look if it is in the list */
+ if (i) {
+ in = list_entry(i, struct yaffs_obj, hash_link);
+ if (in->obj_id == number) {
+
+ /* Don't tell the VFS about this one if it is defered free */
+ if (in->defered_free)
+ return NULL;
+
+ return in;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+ struct yaffs_tnode *tn = NULL;
+
+ if (number < 0)
+ number = yaffs_new_obj_id(dev);
+
+ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_get_tnode(dev);
+ if (!tn)
+ return NULL;
+ }
+
+ the_obj = yaffs_alloc_empty_obj(dev);
+ if (!the_obj) {
+ if (tn)
+ yaffs_free_tnode(dev, tn);
+ return NULL;
+ }
+
+ if (the_obj) {
+ the_obj->fake = 0;
+ the_obj->rename_allowed = 1;
+ the_obj->unlink_allowed = 1;
+ the_obj->obj_id = number;
+ yaffs_hash_obj(the_obj);
+ the_obj->variant_type = type;
+ yaffs_load_current_time(the_obj, 1, 1);
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ the_obj->variant.file_variant.file_size = 0;
+ the_obj->variant.file_variant.scanned_size = 0;
+ the_obj->variant.file_variant.shrink_size = ~0; /* max */
+ the_obj->variant.file_variant.top_level = 0;
+ the_obj->variant.file_variant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* No action required */
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* todo this should not happen */
+ break;
+ }
+ }
+
+ return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+ int number, u32 mode)
+{
+
+ struct yaffs_obj *obj =
+ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (obj) {
+ obj->fake = 1; /* it is fake so it might have no NAND presence... */
+ obj->rename_allowed = 0; /* ... and we're not allowed to rename it... */
+ obj->unlink_allowed = 0; /* ... or unlink it */
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0; /* Not a valid chunk. */
+ }
+
+ return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ int i;
+
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+
+ yaffs_init_raw_tnodes_and_objs(dev);
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+ dev->obj_bucket[i].count = 0;
+ }
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+
+ if (number > 0)
+ the_obj = yaffs_find_by_number(dev, number);
+
+ if (!the_obj)
+ the_obj = yaffs_new_obj(dev, number, type);
+
+ return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR * str)
+{
+ YCHAR *new_str = NULL;
+ int len;
+
+ if (!str)
+ str = _Y("");
+
+ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+ if (new_str) {
+ strncpy(new_str, str, len);
+ new_str[len] = 0;
+ }
+ return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ * create dir/a : update dir's mtime/ctime
+ * rm dir/a: update dir's mtime/ctime
+ * modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+ if (!obj)
+ return;
+ dev = obj->my_dev;
+ obj->dirty = 1;
+ yaffs_load_current_time(obj, 0, 1);
+ if (dev->param.defered_dir_update) {
+ struct list_head *link = &obj->variant.dir_variant.dirty;
+
+ if (list_empty(link)) {
+ list_add(link, &dev->dirty_dirs);
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Added object %d to dirty directories",
+ obj->obj_id);
+ }
+
+ } else {
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+ struct list_head *link;
+ struct yaffs_obj *obj;
+ struct yaffs_dir_var *d_s;
+ union yaffs_obj_var *o_v;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+ while (!list_empty(&dev->dirty_dirs)) {
+ link = dev->dirty_dirs.next;
+ list_del_init(link);
+
+ d_s = list_entry(link, struct yaffs_dir_var, dirty);
+ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+ obj = list_entry(o_v, struct yaffs_obj, variant);
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+ obj->obj_id);
+
+ if (obj->dirty)
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+ struct yaffs_obj *parent,
+ const YCHAR * name,
+ u32 mode,
+ u32 uid,
+ u32 gid,
+ struct yaffs_obj *equiv_obj,
+ const YCHAR * alias_str, u32 rdev)
+{
+ struct yaffs_obj *in;
+ YCHAR *str = NULL;
+
+ struct yaffs_dev *dev = parent->my_dev;
+
+ /* Check if the entry exists. If it does then fail the call since we don't want a dup. */
+ if (yaffs_find_by_name(parent, name))
+ return NULL;
+
+ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_clone_str(alias_str);
+ if (!str)
+ return NULL;
+ }
+
+ in = yaffs_new_obj(dev, -1, type);
+
+ if (!in) {
+ if (str)
+ kfree(str);
+ return NULL;
+ }
+
+ if (in) {
+ in->hdr_chunk = 0;
+ in->valid = 1;
+ in->variant_type = type;
+
+ in->yst_mode = mode;
+
+ yaffs_attribs_init(in, gid, uid, rdev);
+
+ in->n_data_chunks = 0;
+
+ yaffs_set_obj_name(in, name);
+ in->dirty = 1;
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ in->my_dev = parent->my_dev;
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardlink_variant.equiv_obj = equiv_obj;
+ in->variant.hardlink_variant.equiv_id =
+ equiv_obj->obj_id;
+ list_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+ /* Could not create the object header, fail the creation */
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ yaffs_update_parent(parent);
+ }
+
+ return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+ uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
+ u32 mode, u32 uid, u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, u32 rdev)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, const YCHAR * alias)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+ uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj)
+{
+ /* Get the real object in case we were fed a hard link as an equivalent object */
+ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+ if (yaffs_create_obj
+ (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
+ equiv_obj, NULL, 0)) {
+ return equiv_obj;
+ } else {
+ return NULL;
+ }
+
+}
+
+
+
+/*------------------------- Block Management and Page Allocation ----------------*/
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ dev->block_info = NULL;
+ dev->chunk_bits = NULL;
+
+ dev->alloc_block = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->block_info =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+ if (!dev->block_info) {
+ dev->block_info =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+ dev->block_info_alt = 1;
+ } else {
+ dev->block_info_alt = 0;
+ }
+
+ if (dev->block_info) {
+ /* Set up dynamic blockinfo stuff. Round up bytes. */
+ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+ dev->chunk_bits =
+ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+ if (!dev->chunk_bits) {
+ dev->chunk_bits =
+ vmalloc(dev->chunk_bit_stride * n_blocks);
+ dev->chunk_bits_alt = 1;
+ } else {
+ dev->chunk_bits_alt = 0;
+ }
+ }
+
+ if (dev->block_info && dev->chunk_bits) {
+ memset(dev->block_info, 0,
+ n_blocks * sizeof(struct yaffs_block_info));
+ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+ if (dev->block_info_alt && dev->block_info)
+ vfree(dev->block_info);
+ else if (dev->block_info)
+ kfree(dev->block_info);
+
+ dev->block_info_alt = 0;
+
+ dev->block_info = NULL;
+
+ if (dev->chunk_bits_alt && dev->chunk_bits)
+ vfree(dev->chunk_bits);
+ else if (dev->chunk_bits)
+ kfree(dev->chunk_bits);
+ dev->chunk_bits_alt = 0;
+ dev->chunk_bits = NULL;
+}
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+
+ int erased_ok = 0;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ "yaffs_block_became_dirty block %d state %d %s",
+ block_no, bi->block_state,
+ (bi->needs_retiring) ? "needs retiring" : "");
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+ /* If this is the block being garbage collected then stop gc'ing this block */
+ if (block_no == dev->gc_block)
+ dev->gc_block = 0;
+
+ /* If this block is currently the best candidate for gc then drop as a candidate */
+ if (block_no == dev->gc_dirtiest) {
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ }
+
+ if (!bi->needs_retiring) {
+ yaffs2_checkpt_invalidate(dev);
+ erased_ok = yaffs_erase_block(dev, block_no);
+ if (!erased_ok) {
+ dev->n_erase_failures++;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Erasure failed %d", block_no);
+ }
+ }
+
+ if (erased_ok &&
+ ((yaffs_trace_mask & YAFFS_TRACE_ERASE)
+ || !yaffs_skip_verification(dev))) {
+ int i;
+ for (i = 0; i < dev->param.chunks_per_block; i++) {
+ if (!yaffs_check_chunk_erased
+ (dev, block_no * dev->param.chunks_per_block + i)) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ ">>Block %d erasure supposedly OK, but chunk %d not erased",
+ block_no, i);
+ }
+ }
+ }
+
+ if (erased_ok) {
+ /* Clean it up... */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ bi->seq_number = 0;
+ dev->n_erased_blocks++;
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+ bi->has_shrink_hdr = 0;
+ bi->skip_erased_check = 1; /* Clean, so no need to check */
+ bi->gc_prioritise = 0;
+ yaffs_clear_chunk_bits(dev, block_no);
+
+ yaffs_trace(YAFFS_TRACE_ERASE,
+ "Erased block %d", block_no);
+ } else {
+ /* We lost a block of free space */
+ dev->n_free_chunks -= dev->param.chunks_per_block;
+ yaffs_retire_block(dev, block_no);
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d retired", block_no);
+ }
+}
+
+
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+ int old_chunk;
+ int new_chunk;
+ int mark_flash;
+ int ret_val = YAFFS_OK;
+ int i;
+ int is_checkpt_block;
+ int matching_chunk;
+ int max_copies;
+
+ int chunks_before = yaffs_get_erased_chunks(dev);
+ int chunks_after;
+
+ struct yaffs_ext_tags tags;
+
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+ struct yaffs_obj *object;
+
+ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d, in use %d, shrink %d, whole_block %d",
+ block, bi->pages_in_use, bi->has_shrink_hdr,
+ whole_block);
+
+ /*yaffs_verify_free_chunks(dev); */
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+ dev->gc_disable = 1;
+
+ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d that has no chunks in use",
+ block);
+ yaffs_block_became_dirty(dev, block);
+ } else {
+
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ yaffs_verify_blk(dev, bi, block);
+
+ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+ for ( /* init already done */ ;
+ ret_val == YAFFS_OK &&
+ dev->gc_chunk < dev->param.chunks_per_block &&
+ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+ max_copies > 0; dev->gc_chunk++, old_chunk++) {
+ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+
+ /* This page is in use and might need to be copied off */
+
+ max_copies--;
+
+ mark_flash = 1;
+
+ yaffs_init_tags(&tags);
+
+ yaffs_rd_chunk_tags_nand(dev, old_chunk,
+ buffer, &tags);
+
+ object = yaffs_find_by_number(dev, tags.obj_id);
+
+ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+ "Collecting chunk in block %d, %d %d %d ",
+ dev->gc_chunk, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ if (object && !yaffs_skip_verification(dev)) {
+ if (tags.chunk_id == 0)
+ matching_chunk =
+ object->hdr_chunk;
+ else if (object->soft_del)
+ matching_chunk = old_chunk; /* Defeat the test */
+ else
+ matching_chunk =
+ yaffs_find_chunk_in_file
+ (object, tags.chunk_id,
+ NULL);
+
+ if (old_chunk != matching_chunk)
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "gc: page in gc mismatch: %d %d %d %d",
+ old_chunk,
+ matching_chunk,
+ tags.obj_id,
+ tags.chunk_id);
+
+ }
+
+ if (!object) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "page %d in gc has no object: %d %d %d ",
+ old_chunk,
+ tags.obj_id, tags.chunk_id,
+ tags.n_bytes);
+ }
+
+ if (object &&
+ object->deleted &&
+ object->soft_del && tags.chunk_id != 0) {
+ /* Data chunk in a soft deleted file, throw it away
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget about it and
+ * fix up the object.
+ */
+
+ /* Free chunks already includes softdeleted chunks.
+ * How ever this chunk is going to soon be really deleted
+ * which will increment free chunks.
+ * We have to decrement free chunks so this works out properly.
+ */
+ dev->n_free_chunks--;
+ bi->soft_del_pages--;
+
+ object->n_data_chunks--;
+
+ if (object->n_data_chunks <= 0) {
+ /* remeber to clean up the object */
+ dev->gc_cleanup_list[dev->
+ n_clean_ups]
+ = tags.obj_id;
+ dev->n_clean_ups++;
+ }
+ mark_flash = 0;
+ } else if (0) {
+ /* Todo object && object->deleted && object->n_data_chunks == 0 */
+ /* Deleted object header with no data chunks.
+ * Can be discarded and the file deleted.
+ */
+ object->hdr_chunk = 0;
+ yaffs_free_tnode(object->my_dev,
+ object->
+ variant.file_variant.
+ top);
+ object->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(object);
+
+ } else if (object) {
+ /* It's either a data chunk in a live file or
+ * an ObjectHeader, so we're interested in it.
+ * NB Need to keep the ObjectHeaders of deleted files
+ * until the whole file has been deleted off
+ */
+ tags.serial_number++;
+
+ dev->n_gc_copies++;
+
+ if (tags.chunk_id == 0) {
+ /* It is an object Id,
+ * We need to nuke the shrinkheader flags first
+ * Also need to clean up shadowing.
+ * We no longer want the shrink_header flag since its work is done
+ * and if it is left in place it will mess up scanning.
+ */
+
+ struct yaffs_obj_hdr *oh;
+ oh = (struct yaffs_obj_hdr *)
+ buffer;
+
+ oh->is_shrink = 0;
+ tags.extra_is_shrink = 0;
+
+ oh->shadows_obj = 0;
+ oh->inband_shadowed_obj_id = 0;
+ tags.extra_shadows = 0;
+
+ /* Update file size */
+ if (object->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) {
+ oh->file_size =
+ object->variant.
+ file_variant.
+ file_size;
+ tags.extra_length =
+ oh->file_size;
+ }
+
+ yaffs_verify_oh(object, oh,
+ &tags, 1);
+ new_chunk =
+ yaffs_write_new_chunk(dev,
+ (u8 *)
+ oh,
+ &tags,
+ 1);
+ } else {
+ new_chunk =
+ yaffs_write_new_chunk(dev,
+ buffer,
+ &tags,
+ 1);
+ }
+
+ if (new_chunk < 0) {
+ ret_val = YAFFS_FAIL;
+ } else {
+
+ /* Ok, now fix up the Tnodes etc. */
+
+ if (tags.chunk_id == 0) {
+ /* It's a header */
+ object->hdr_chunk =
+ new_chunk;
+ object->serial =
+ tags.serial_number;
+ } else {
+ /* It's a data chunk */
+ int ok;
+ ok = yaffs_put_chunk_in_file(object, tags.chunk_id, new_chunk, 0);
+ }
+ }
+ }
+
+ if (ret_val == YAFFS_OK)
+ yaffs_chunk_del(dev, old_chunk,
+ mark_flash, __LINE__);
+
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ }
+
+ yaffs_verify_collected_blk(dev, bi, block);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ /*
+ * The gc did not complete. Set block state back to FULL
+ * because checkpointing does not restore gc.
+ */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ } else {
+ /* The gc completed. */
+ /* Do any required cleanups */
+ for (i = 0; i < dev->n_clean_ups; i++) {
+ /* Time to delete the file too */
+ object =
+ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+ if (object) {
+ yaffs_free_tnode(dev,
+ object->variant.
+ file_variant.top);
+ object->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: About to finally delete object %d",
+ object->obj_id);
+ yaffs_generic_obj_del(object);
+ object->my_dev->n_deleted_files--;
+ }
+
+ }
+
+ chunks_after = yaffs_get_erased_chunks(dev);
+ if (chunks_before >= chunks_after)
+ yaffs_trace(YAFFS_TRACE_GC,
+ "gc did not increase free chunks before %d after %d",
+ chunks_before, chunks_after);
+ dev->gc_block = 0;
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ dev->gc_disable = 0;
+
+ return ret_val;
+}
+
+/*
+ * FindBlockForgarbageCollection is used to select the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+ int aggressive, int background)
+{
+ int i;
+ int iterations;
+ unsigned selected = 0;
+ int prioritised = 0;
+ int prioritised_exist = 0;
+ struct yaffs_block_info *bi;
+ int threshold;
+
+ /* First let's see if we need to grab a prioritised block */
+ if (dev->has_pending_prioritised_gc && !aggressive) {
+ dev->gc_dirtiest = 0;
+ bi = dev->block_info;
+ for (i = dev->internal_start_block;
+ i <= dev->internal_end_block && !selected; i++) {
+
+ if (bi->gc_prioritise) {
+ prioritised_exist = 1;
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ selected = i;
+ prioritised = 1;
+ }
+ }
+ bi++;
+ }
+
+ /*
+ * If there is a prioritised block and none was selected then
+ * this happened because there is at least one old dirty block gumming
+ * up the works. Let's gc the oldest dirty block.
+ */
+
+ if (prioritised_exist &&
+ !selected && dev->oldest_dirty_block > 0)
+ selected = dev->oldest_dirty_block;
+
+ if (!prioritised_exist) /* None found, so we can clear this */
+ dev->has_pending_prioritised_gc = 0;
+ }
+
+ /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
+ * search harder.
+ * else (we're doing a leasurely gc), then we only bother to do this if the
+ * block has only a few pages in use.
+ */
+
+ if (!selected) {
+ int pages_used;
+ int n_blocks =
+ dev->internal_end_block - dev->internal_start_block + 1;
+ if (aggressive) {
+ threshold = dev->param.chunks_per_block;
+ iterations = n_blocks;
+ } else {
+ int max_threshold;
+
+ if (background)
+ max_threshold = dev->param.chunks_per_block / 2;
+ else
+ max_threshold = dev->param.chunks_per_block / 8;
+
+ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+ if (threshold > max_threshold)
+ threshold = max_threshold;
+
+ iterations = n_blocks / 16 + 1;
+ if (iterations > 100)
+ iterations = 100;
+ }
+
+ for (i = 0;
+ i < iterations &&
+ (dev->gc_dirtiest < 1 ||
+ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); i++) {
+ dev->gc_block_finder++;
+ if (dev->gc_block_finder < dev->internal_start_block ||
+ dev->gc_block_finder > dev->internal_end_block)
+ dev->gc_block_finder =
+ dev->internal_start_block;
+
+ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+ pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ pages_used < dev->param.chunks_per_block &&
+ (dev->gc_dirtiest < 1
+ || pages_used < dev->gc_pages_in_use)
+ && yaffs_block_ok_for_gc(dev, bi)) {
+ dev->gc_dirtiest = dev->gc_block_finder;
+ dev->gc_pages_in_use = pages_used;
+ }
+ }
+
+ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+ selected = dev->gc_dirtiest;
+ }
+
+ /*
+ * If nothing has been selected for a while, try selecting the oldest dirty
+ * because that's gumming up the works.
+ */
+
+ if (!selected && dev->param.is_yaffs2 &&
+ dev->gc_not_done >= (background ? 10 : 20)) {
+ yaffs2_find_oldest_dirty_seq(dev);
+ if (dev->oldest_dirty_block > 0) {
+ selected = dev->oldest_dirty_block;
+ dev->gc_dirtiest = selected;
+ dev->oldest_dirty_gc_count++;
+ bi = yaffs_get_block_info(dev, selected);
+ dev->gc_pages_in_use =
+ bi->pages_in_use - bi->soft_del_pages;
+ } else {
+ dev->gc_not_done = 0;
+ }
+ }
+
+ if (selected) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC Selected block %d with %d free, prioritised:%d",
+ selected,
+ dev->param.chunks_per_block - dev->gc_pages_in_use,
+ prioritised);
+
+ dev->n_gc_blocks++;
+ if (background)
+ dev->bg_gcs++;
+
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ dev->gc_not_done = 0;
+ if (dev->refresh_skip > 0)
+ dev->refresh_skip--;
+ } else {
+ dev->gc_not_done++;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+ dev->gc_block_finder, dev->gc_not_done, threshold,
+ dev->gc_dirtiest, dev->gc_pages_in_use,
+ dev->oldest_dirty_block, background ? " bg" : "");
+ }
+
+ return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+ int aggressive = 0;
+ int gc_ok = YAFFS_OK;
+ int max_tries = 0;
+ int min_erased;
+ int erased_chunks;
+ int checkpt_block_adjust;
+
+ if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
+ return YAFFS_OK;
+
+ if (dev->gc_disable) {
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+ }
+
+ /* This loop should pass the first time.
+ * We'll only see looping here if the collection does not increase space.
+ */
+
+ do {
+ max_tries++;
+
+ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+ min_erased =
+ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+ erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ /* If we need a block soon then do aggressive gc. */
+ if (dev->n_erased_blocks < min_erased)
+ aggressive = 1;
+ else {
+ if (!background
+ && erased_chunks > (dev->n_free_chunks / 4))
+ break;
+
+ if (dev->gc_skip > 20)
+ dev->gc_skip = 20;
+ if (erased_chunks < dev->n_free_chunks / 2 ||
+ dev->gc_skip < 1 || background)
+ aggressive = 0;
+ else {
+ dev->gc_skip--;
+ break;
+ }
+ }
+
+ dev->gc_skip = 5;
+
+ /* If we don't already have a block being gc'd then see if we should start another */
+
+ if (dev->gc_block < 1 && !aggressive) {
+ dev->gc_block = yaffs2_find_refresh_block(dev);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+ if (dev->gc_block < 1) {
+ dev->gc_block =
+ yaffs_find_gc_block(dev, aggressive, background);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ if (dev->gc_block > 0) {
+ dev->all_gcs++;
+ if (!aggressive)
+ dev->passive_gc_count++;
+
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC n_erased_blocks %d aggressive %d",
+ dev->n_erased_blocks, aggressive);
+
+ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks)
+ && dev->gc_block > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+ dev->n_erased_blocks, max_tries,
+ dev->gc_block);
+ }
+ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ (dev->gc_block > 0) && (max_tries < 2));
+
+ return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+ yaffs_check_gc(dev, 1);
+ return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+ if (nand_chunk >= 0)
+ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+ buffer, NULL);
+ else {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not found zero instead",
+ nand_chunk);
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+ return 0;
+ }
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn)
+{
+ int block;
+ int page;
+ struct yaffs_ext_tags tags;
+ struct yaffs_block_info *bi;
+
+ if (chunk_id <= 0)
+ return;
+
+ dev->n_deletions++;
+ block = chunk_id / dev->param.chunks_per_block;
+ page = chunk_id % dev->param.chunks_per_block;
+
+ if (!yaffs_check_chunk_bit(dev, block, page))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Deleting invalid chunk %d", chunk_id);
+
+ bi = yaffs_get_block_info(dev, block);
+
+ yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+ yaffs_trace(YAFFS_TRACE_DELETION,
+ "line %d delete of chunk %d",
+ lyn, chunk_id);
+
+ if (!dev->param.is_yaffs2 && mark_flash &&
+ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+ yaffs_init_tags(&tags);
+
+ tags.is_deleted = 1;
+
+ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+ yaffs_handle_chunk_update(dev, chunk_id, &tags);
+ } else {
+ dev->n_unmarked_deletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->n_free_chunks++;
+
+ yaffs_clear_chunk_bit(dev, block, page);
+
+ bi->pages_in_use--;
+
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ yaffs_block_became_dirty(dev, block);
+ }
+
+ }
+
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 * buffer, int n_bytes, int use_reserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prev_chunk_id;
+ struct yaffs_ext_tags prev_tags;
+
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ yaffs_check_gc(dev, 0);
+
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
+ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+ if (prev_chunk_id < 1 &&
+ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+ return 0;
+
+ /* Set up new tags */
+ yaffs_init_tags(&new_tags);
+
+ new_tags.chunk_id = inode_chunk;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number =
+ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+ new_tags.n_bytes = n_bytes;
+
+ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Writing %d bytes to chunk!!!!!!!!!",
+ n_bytes);
+ YBUG();
+ }
+
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+ if (new_chunk_id > 0) {
+ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ yaffs_verify_file_sane(in);
+ }
+ return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+ const YCHAR * name, const void *value, int size,
+ int flags)
+{
+ struct yaffs_xattr_mod xmod;
+
+ int result;
+
+ xmod.set = set;
+ xmod.name = name;
+ xmod.data = value;
+ xmod.size = size;
+ xmod.flags = flags;
+ xmod.result = -ENOSPC;
+
+ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+ if (result > 0)
+ return xmod.result;
+ else
+ return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+ struct yaffs_xattr_mod *xmod)
+{
+ int retval = 0;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+
+ char *x_buffer = buffer + x_offs;
+
+ if (xmod->set)
+ retval =
+ nval_set(x_buffer, x_size, xmod->name, xmod->data,
+ xmod->size, xmod->flags);
+ else
+ retval = nval_del(x_buffer, x_size, xmod->name);
+
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+
+ xmod->result = retval;
+
+ return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR * name,
+ void *value, int size)
+{
+ char *buffer = NULL;
+ int result;
+ struct yaffs_ext_tags tags;
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+
+ char *x_buffer;
+
+ int retval = 0;
+
+ if (obj->hdr_chunk < 1)
+ return -ENODATA;
+
+ /* If we know that the object has no xattribs then don't do all the
+ * reading and parsing.
+ */
+ if (obj->xattr_known && !obj->has_xattr) {
+ if (name)
+ return -ENODATA;
+ else
+ return 0;
+ }
+
+ buffer = (char *)yaffs_get_temp_buffer(dev, __LINE__);
+ if (!buffer)
+ return -ENOMEM;
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+ if (result != YAFFS_OK)
+ retval = -ENOENT;
+ else {
+ x_buffer = buffer + x_offs;
+
+ if (!obj->xattr_known) {
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ }
+
+ if (name)
+ retval = nval_get(x_buffer, x_size, name, value, size);
+ else
+ retval = nval_list(x_buffer, x_size, value, size);
+ }
+ yaffs_release_temp_buffer(dev, (u8 *) buffer, __LINE__);
+ return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags)
+{
+ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size)
+{
+ return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+ u8 *chunk_data;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+
+ if (!in)
+ return;
+
+ dev = in->my_dev;
+
+ if (in->lazy_loaded && in->hdr_chunk > 0) {
+ in->lazy_loaded = 0;
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, chunk_data,
+ &tags);
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ yaffs_set_obj_name_from_oh(in, oh);
+
+ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ in->variant.symlink_variant.alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.symlink_variant.alias)
+ alloc_failed = 1; /* Not returned to caller */
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+ }
+}
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR * name,
+ const YCHAR * oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ if (dev->param.auto_unicode) {
+ if (*oh_name) {
+ /* It is an ASCII name, do an ASCII to
+ * unicode conversion */
+ const char *ascii_oh_name = (const char *)oh_name;
+ int n = buff_size - 1;
+ while (n > 0 && *ascii_oh_name) {
+ *name = *ascii_oh_name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ strncpy(name, oh_name + 1, buff_size - 1);
+ }
+ } else {
+#else
+ {
+#endif
+ strncpy(name, oh_name, buff_size - 1);
+ }
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR * oh_name,
+ const YCHAR * name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+ int is_ascii;
+ YCHAR *w;
+
+ if (dev->param.auto_unicode) {
+
+ is_ascii = 1;
+ w = name;
+
+ /* Figure out if the name will fit in ascii character set */
+ while (is_ascii && *w) {
+ if ((*w) & 0xff00)
+ is_ascii = 0;
+ w++;
+ }
+
+ if (is_ascii) {
+ /* It is an ASCII name, so do a unicode to ascii conversion */
+ char *ascii_oh_name = (char *)oh_name;
+ int n = YAFFS_MAX_NAME_LENGTH - 1;
+ while (n > 0 && *name) {
+ *ascii_oh_name = *name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ /* It is a unicode name, so save starting at the second YCHAR */
+ *oh_name = 0;
+ strncpy(oh_name + 1, name,
+ YAFFS_MAX_NAME_LENGTH - 2);
+ }
+ } else {
+#else
+ {
+#endif
+ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+ }
+
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name, int force,
+ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+ struct yaffs_block_info *bi;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ int prev_chunk_id;
+ int ret_val = 0;
+ int result = 0;
+
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_ext_tags old_tags;
+ const YCHAR *alias = NULL;
+
+ u8 *buffer = NULL;
+ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ struct yaffs_obj_hdr *oh = NULL;
+
+ strcpy(old_name, _Y("silly old name"));
+
+ if (!in->fake || in == dev->root_dir ||
+ force || xmod) {
+
+ yaffs_check_gc(dev, 0);
+ yaffs_check_obj_details_loaded(in);
+
+ buffer = yaffs_get_temp_buffer(in->my_dev, __LINE__);
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ prev_chunk_id = in->hdr_chunk;
+
+ if (prev_chunk_id > 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+ buffer, &old_tags);
+
+ yaffs_verify_oh(in, oh, &old_tags, 0);
+
+ memcpy(old_name, oh->name, sizeof(oh->name));
+ memset(buffer, 0xFF, sizeof(struct yaffs_obj_hdr));
+ } else {
+ memset(buffer, 0xFF, dev->data_bytes_per_chunk);
+ }
+
+ oh->type = in->variant_type;
+ oh->yst_mode = in->yst_mode;
+ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+ yaffs_load_attribs_oh(oh, in);
+
+ if (in->parent)
+ oh->parent_obj_id = in->parent->obj_id;
+ else
+ oh->parent_obj_id = 0;
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_load_oh_from_name(dev, oh->name, name);
+ } else if (prev_chunk_id > 0) {
+ memcpy(oh->name, old_name, sizeof(oh->name));
+ } else {
+ memset(oh->name, 0, sizeof(oh->name));
+ }
+
+ oh->is_shrink = is_shrink;
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Should not happen */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ oh->file_size =
+ (oh->parent_obj_id == YAFFS_OBJECTID_DELETED
+ || oh->parent_obj_id ==
+ YAFFS_OBJECTID_UNLINKED) ? 0 : in->
+ variant.file_variant.file_size;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = in->variant.symlink_variant.alias;
+ if (!alias)
+ alias = _Y("no alias");
+ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+ break;
+ }
+
+ /* process any xattrib modifications */
+ if (xmod)
+ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+ /* Tags */
+ yaffs_init_tags(&new_tags);
+ in->serial++;
+ new_tags.chunk_id = 0;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number = in->serial;
+
+ /* Add extra info for file header */
+
+ new_tags.extra_available = 1;
+ new_tags.extra_parent_id = oh->parent_obj_id;
+ new_tags.extra_length = oh->file_size;
+ new_tags.extra_is_shrink = oh->is_shrink;
+ new_tags.extra_equiv_id = oh->equiv_id;
+ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+ new_tags.extra_obj_type = in->variant_type;
+
+ yaffs_verify_oh(in, oh, &new_tags, 1);
+
+ /* Create new chunk in NAND */
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags,
+ (prev_chunk_id > 0) ? 1 : 0);
+
+ if (new_chunk_id >= 0) {
+
+ in->hdr_chunk = new_chunk_id;
+
+ if (prev_chunk_id > 0) {
+ yaffs_chunk_del(dev, prev_chunk_id, 1,
+ __LINE__);
+ }
+
+ if (!yaffs_obj_cache_dirty(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block that the chunk lives on */
+ if (is_shrink) {
+ bi = yaffs_get_block_info(in->my_dev,
+ new_chunk_id /
+ in->my_dev->param.
+ chunks_per_block);
+ bi->has_shrink_hdr = 1;
+ }
+
+ }
+
+ ret_val = new_chunk_id;
+
+ }
+
+ if (buffer)
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ return ret_val;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ struct yaffs_cache *cache;
+
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0) {
+ /* chunk = offset / dev->data_bytes_per_chunk + 1; */
+ /* start = offset % dev->data_bytes_per_chunk; */
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+ if ((start + n) < dev->data_bytes_per_chunk)
+ n_copy = n;
+ else
+ n_copy = dev->data_bytes_per_chunk - start;
+
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than a whole chunk
+ * or we're using inband tags then use the cache (if there is caching)
+ * else bypass the cache.
+ */
+ if (cache || n_copy != dev->data_bytes_per_chunk
+ || dev->param.inband_tags) {
+ if (dev->param.n_caches > 0) {
+
+ /* If we can't find the data in the cache, then load it up. */
+
+ if (!cache) {
+ cache =
+ yaffs_grab_chunk_cache(in->my_dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ cache->n_bytes = 0;
+ }
+
+ yaffs_use_cache(dev, cache, 0);
+
+ cache->locked = 1;
+
+ memcpy(buffer, &cache->data[start], n_copy);
+
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy.. */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev, __LINE__);
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(buffer, &local_buffer[start], n_copy);
+
+ yaffs_release_temp_buffer(dev, local_buffer,
+ __LINE__);
+ }
+
+ } else {
+
+ /* A full chunk. Read directly into the supplied buffer. */
+ yaffs_rd_data_obj(in, chunk, buffer);
+
+ }
+
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+
+ }
+
+ return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ int n_writeback;
+ int start_write = offset;
+ int chunk_written = 0;
+ u32 n_bytes_read;
+ u32 chunk_start;
+
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0 && chunk_written >= 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+ if (chunk * dev->data_bytes_per_chunk + start != offset ||
+ start >= dev->data_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "AddrToChunk of offset %d gives chunk %d start %d",
+ (int)offset, chunk, start);
+ }
+ chunk++; /* File pos to chunk in file offset */
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+
+ if ((start + n) < dev->data_bytes_per_chunk) {
+ n_copy = n;
+
+ /* Now folks, to calculate how many bytes to write back....
+ * If we're overwriting and not writing to then end of file then
+ * we need to write back as much as was there before.
+ */
+
+ chunk_start = ((chunk - 1) * dev->data_bytes_per_chunk);
+
+ if (chunk_start > in->variant.file_variant.file_size)
+ n_bytes_read = 0; /* Past end of file */
+ else
+ n_bytes_read =
+ in->variant.file_variant.file_size -
+ chunk_start;
+
+ if (n_bytes_read > dev->data_bytes_per_chunk)
+ n_bytes_read = dev->data_bytes_per_chunk;
+
+ n_writeback =
+ (n_bytes_read >
+ (start + n)) ? n_bytes_read : (start + n);
+
+ if (n_writeback < 0
+ || n_writeback > dev->data_bytes_per_chunk)
+ YBUG();
+
+ } else {
+ n_copy = dev->data_bytes_per_chunk - start;
+ n_writeback = dev->data_bytes_per_chunk;
+ }
+
+ if (n_copy != dev->data_bytes_per_chunk
+ || dev->param.inband_tags) {
+ /* An incomplete start or end chunk (or maybe both start and end chunk),
+ * or we're using inband tags, so we want to use the cache buffers.
+ */
+ if (dev->param.n_caches > 0) {
+ struct yaffs_cache *cache;
+ /* If we can't find the data in the cache, then load the cache */
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ if (!cache
+ && yaffs_check_alloc_available(dev, 1)) {
+ cache = yaffs_grab_chunk_cache(dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ } else if (cache &&
+ !cache->dirty &&
+ !yaffs_check_alloc_available(dev,
+ 1)) {
+ /* Drop the cache if it was a read cache item and
+ * no space check has been made for it.
+ */
+ cache = NULL;
+ }
+
+ if (cache) {
+ yaffs_use_cache(dev, cache, 1);
+ cache->locked = 1;
+
+ memcpy(&cache->data[start], buffer,
+ n_copy);
+
+ cache->locked = 0;
+ cache->n_bytes = n_writeback;
+
+ if (write_trhrough) {
+ chunk_written =
+ yaffs_wr_data_obj
+ (cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ }
+
+ } else {
+ chunk_written = -1; /* fail the write */
+ }
+ } else {
+ /* An incomplete start or end chunk (or maybe both start and end chunk)
+ * Read into the local buffer then copy, then copy over and write back.
+ */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev, __LINE__);
+
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(&local_buffer[start], buffer, n_copy);
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk,
+ local_buffer,
+ n_writeback, 0);
+
+ yaffs_release_temp_buffer(dev, local_buffer,
+ __LINE__);
+
+ }
+
+ } else {
+ /* A full chunk. Write directly from the supplied buffer. */
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk, buffer,
+ dev->data_bytes_per_chunk, 0);
+
+ /* Since we've overwritten the cached data, we better invalidate it. */
+ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+ if (chunk_written >= 0) {
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+
+ }
+
+ /* Update file object */
+
+ if ((start_write + n_done) > in->variant.file_variant.file_size)
+ in->variant.file_variant.file_size = (start_write + n_done);
+
+ in->dirty = 1;
+
+ return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough)
+{
+ yaffs2_handle_hole(in, offset);
+ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_trhrough);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, int new_size)
+{
+
+ struct yaffs_dev *dev = in->my_dev;
+ int old_size = in->variant.file_variant.file_size;
+
+ int last_del = 1 + (old_size - 1) / dev->data_bytes_per_chunk;
+
+ int start_del = 1 + (new_size + dev->data_bytes_per_chunk - 1) /
+ dev->data_bytes_per_chunk;
+ int i;
+ int chunk_id;
+
+ /* Delete backwards so that we don't end up with holes if
+ * power is lost part-way through the operation.
+ */
+ for (i = last_del; i >= start_del; i--) {
+ /* NB this could be optimised somewhat,
+ * eg. could retrieve the tags and write them without
+ * using yaffs_chunk_del
+ */
+
+ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+ if (chunk_id > 0) {
+ if (chunk_id <
+ (dev->internal_start_block *
+ dev->param.chunks_per_block)
+ || chunk_id >=
+ ((dev->internal_end_block +
+ 1) * dev->param.chunks_per_block)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Found daft chunk_id %d for %d",
+ chunk_id, i);
+ } else {
+ in->n_data_chunks--;
+ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+ }
+ }
+ }
+
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+ int new_full;
+ u32 new_partial;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+ yaffs_prune_chunks(obj, new_size);
+
+ if (new_partial != 0) {
+ int last_chunk = 1 + new_full;
+ u8 *local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ /* Rewrite the last chunk with its new size and zero pad */
+ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+ memset(local_buffer + new_partial, 0,
+ dev->data_bytes_per_chunk - new_partial);
+
+ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+ new_partial, 1);
+
+ yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
+ }
+
+ obj->variant.file_variant.file_size = new_size;
+
+ yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+ struct yaffs_dev *dev = in->my_dev;
+ int old_size = in->variant.file_variant.file_size;
+
+ yaffs_flush_file_cache(in);
+ yaffs_invalidate_whole_cache(in);
+
+ yaffs_check_gc(dev, 0);
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ if (new_size == old_size)
+ return YAFFS_OK;
+
+ if (new_size > old_size) {
+ yaffs2_handle_hole(in, new_size);
+ in->variant.file_variant.file_size = new_size;
+ } else {
+ /* new_size < old_size */
+ yaffs_resize_file_down(in, new_size);
+ }
+
+ /* Write a new object header to reflect the resize.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories
+ * and is not shadowed.
+ */
+ if (in->parent &&
+ !in->is_shadowed &&
+ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+ return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
+{
+ int ret_val;
+ if (in->dirty) {
+ yaffs_flush_file_cache(in);
+ if (data_sync) /* Only sync data */
+ ret_val = YAFFS_OK;
+ else {
+ if (update_time)
+ yaffs_load_current_time(in, 0, 0);
+
+ ret_val = (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >=
+ 0) ? YAFFS_OK : YAFFS_FAIL;
+ }
+ } else {
+ ret_val = YAFFS_OK;
+ }
+
+ return ret_val;
+
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+
+ int ret_val;
+ int del_now = 0;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!in->my_inode)
+ del_now = 1;
+
+ if (del_now) {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->del_dir,
+ _Y("deleted"), 0, 0);
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: immediate deletion of file %d",
+ in->obj_id);
+ in->deleted = 1;
+ in->my_dev->n_deleted_files++;
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+ yaffs_soft_del_file(in);
+ } else {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+
+ return ret_val;
+}
+
+int yaffs_del_file(struct yaffs_obj *in)
+{
+ int ret_val = YAFFS_OK;
+ int deleted; /* Need to cache value on stack if in is freed */
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+
+ if (in->n_data_chunks > 0) {
+ /* Use soft deletion if there is data in the file.
+ * That won't be the case if it has been resized to zero.
+ */
+ if (!in->unlinked)
+ ret_val = yaffs_unlink_file_if_needed(in);
+
+ deleted = in->deleted;
+
+ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
+ deleted = 1;
+ in->my_dev->n_deleted_files++;
+ yaffs_soft_del_file(in);
+ }
+ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+ in->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(in);
+
+ return YAFFS_OK;
+ }
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+ return (obj &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+ !(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+ /* First check that the directory is empty. */
+ if (yaffs_is_non_empty_dir(obj))
+ return YAFFS_FAIL;
+
+ return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+ if (in->variant.symlink_variant.alias)
+ kfree(in->variant.symlink_variant.alias);
+ in->variant.symlink_variant.alias = NULL;
+
+ return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+ /* remove this hardlink from the list assocaited with the equivalent
+ * object
+ */
+ list_del_init(&in->hard_links);
+ return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+ int ret_val = -1;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ ret_val = yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!list_empty(&obj->variant.dir_variant.dirty)) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Remove object %d from dirty directories",
+ obj->obj_id);
+ list_del_init(&obj->variant.dir_variant.dirty);
+ }
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ ret_val = yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ ret_val = yaffs_del_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ ret_val = yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ ret_val = 0;
+ break; /* should not happen. */
+ }
+
+ return ret_val;
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+
+ int del_now = 0;
+
+ if (!obj->my_inode)
+ del_now = 1;
+
+ if (obj)
+ yaffs_update_parent(obj->parent);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_del_link(obj);
+ } else if (!list_empty(&obj->hard_links)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+ * The Linux link/inode model.
+ *
+ * We can't really delete the object.
+ * Instead, we do the following:
+ * - Select a hardlink.
+ * - Unhook it from the hard links
+ * - Move it from its parent directory (so that the rename can work)
+ * - Rename the object to the hardlink's name.
+ * - Delete the hardlink
+ */
+
+ struct yaffs_obj *hl;
+ struct yaffs_obj *parent;
+ int ret_val;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+ hard_links);
+
+ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ parent = hl->parent;
+
+ list_del_init(&hl->hard_links);
+
+ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+ if (ret_val == YAFFS_OK)
+ ret_val = yaffs_generic_obj_del(hl);
+
+ return ret_val;
+
+ } else if (del_now) {
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ list_del_init(&obj->variant.dir_variant.dirty);
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ return yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ return YAFFS_FAIL;
+ }
+ } else if (yaffs_is_non_empty_dir(obj)) {
+ return YAFFS_FAIL;
+ } else {
+ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+
+ if (obj && obj->unlink_allowed)
+ return yaffs_unlink_worker(obj);
+
+ return YAFFS_FAIL;
+
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name)
+{
+ struct yaffs_obj *obj;
+
+ obj = yaffs_find_by_name(dir, name);
+ return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *existing_target = NULL;
+ int force = 0;
+ int result;
+ struct yaffs_dev *dev;
+
+ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+
+ dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems.
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+ if (old_dir == new_dir &&
+ old_name && new_name &&
+ strcmp(old_name, new_name) == 0)
+ force = 1;
+#endif
+
+ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+
+ if(old_name)
+ obj = yaffs_find_by_name(old_dir, old_name);
+ else{
+ obj = old_dir;
+ old_dir = obj->parent;
+ }
+
+
+ if (obj && obj->rename_allowed) {
+
+ /* Now do the handling for an existing target, if there is one */
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+ if (yaffs_is_non_empty_dir(existing_target)){
+ return YAFFS_FAIL; /* ENOTEMPTY */
+ } else if (existing_target && existing_target != obj) {
+ /* Nuke the target first, using shadowing,
+ * but only if it isn't the same object.
+ *
+ * Note we must disable gc otherwise it can mess up the shadowing.
+ *
+ */
+ dev->gc_disable = 1;
+ yaffs_change_obj_name(obj, new_dir, new_name, force,
+ existing_target->obj_id);
+ existing_target->is_shadowed = 1;
+ yaffs_unlink_obj(existing_target);
+ dev->gc_disable = 0;
+ }
+
+ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+ yaffs_update_parent(old_dir);
+ if (new_dir != old_dir)
+ yaffs_update_parent(new_dir);
+
+ return result;
+ }
+ return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning)
+{
+ struct yaffs_obj *obj;
+
+ if (!backward_scanning) {
+ /* Handle YAFFS1 forward scanning case
+ * For YAFFS1 we always do the deletion
+ */
+
+ } else {
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+ obj = yaffs_find_by_number(dev, obj_id);
+ if (obj)
+ return;
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+ * We put it in unlinked dir to be cleaned up after the scanning
+ */
+ obj =
+ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+ if (!obj)
+ return;
+ obj->is_shadowed = 1;
+ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+ obj->variant.file_variant.shrink_size = 0;
+ obj->valid = 1; /* So that we don't read any other info for this file */
+
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list)
+{
+ struct yaffs_obj *hl;
+ struct yaffs_obj *in;
+
+ while (hard_list) {
+ hl = hard_list;
+ hard_list = (struct yaffs_obj *)(hard_list->hard_links.next);
+
+ in = yaffs_find_by_number(dev,
+ hl->variant.
+ hardlink_variant.equiv_id);
+
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardlink_variant.equiv_obj = in;
+ list_add(&hl->hard_links, &in->hard_links);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardlink_variant.equiv_obj = NULL;
+ INIT_LIST_HEAD(&hl->hard_links);
+
+ }
+ }
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+ /*
+ * Sort out state of unlinked and deleted objects after scanning.
+ */
+ struct list_head *i;
+ struct list_head *n;
+ struct yaffs_obj *l;
+
+ if (dev->read_only)
+ return;
+
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinked_dir->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+ }
+
+ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+ }
+
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ * This code assumes that we don't ever change the current relationships between
+ * directories:
+ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ * lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been deleted
+ * leaving the object "hanging" without being rooted in the directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+ return (obj == dev->del_dir ||
+ obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *parent;
+ int i;
+ struct list_head *lh;
+ struct list_head *n;
+ int depth_limit;
+ int hanging;
+
+ if (dev->read_only)
+ return;
+
+ /* Iterate through the objects in each hash entry,
+ * looking at each object.
+ * Make sure it is rooted.
+ */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ parent = obj->parent;
+
+ if (yaffs_has_null_parent(dev, obj)) {
+ /* These directories are not hanging */
+ hanging = 0;
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ hanging = 1;
+ } else if (yaffs_has_null_parent(dev, parent)) {
+ hanging = 0;
+ } else {
+ /*
+ * Need to follow the parent chain to see if it is hanging.
+ */
+ hanging = 0;
+ depth_limit = 100;
+
+ while (parent != dev->root_dir &&
+ parent->parent &&
+ parent->parent->variant_type ==
+ YAFFS_OBJECT_TYPE_DIRECTORY
+ && depth_limit > 0) {
+ parent = parent->parent;
+ depth_limit--;
+ }
+ if (parent != dev->root_dir)
+ hanging = 1;
+ }
+ if (hanging) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Hanging object %d moved to lost and found",
+ obj->obj_id);
+ yaffs_add_obj_to_dir(dev->lost_n_found,
+ obj);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+ struct yaffs_obj *obj;
+ struct list_head *lh;
+ struct list_head *n;
+
+ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+
+ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+ if (lh) {
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffs_del_dir_contents(obj);
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Deleting lost_found object %d",
+ obj->obj_id);
+
+ /* Need to use UnlinkObject since Delete would not handle
+ * hardlinked objects correctly.
+ */
+ yaffs_unlink_obj(obj);
+ }
+ }
+
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+ yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+ const YCHAR * name)
+{
+ int sum;
+
+ struct list_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+ struct yaffs_obj *l;
+
+ if (!name)
+ return NULL;
+
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: null pointer directory"
+ );
+ YBUG();
+ return NULL;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: non-directory"
+ );
+ YBUG();
+ }
+
+ sum = yaffs_calc_name_sum(name);
+
+ list_for_each(i, &directory->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+
+ if (l->parent != directory)
+ YBUG();
+
+ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
+ return l;
+ } else if (l->sum == sum
+ || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_get_obj_name(l, buffer,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ if (strncmp
+ (name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
+ return l;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ /* We want the object id of the equivalent object, not this one */
+ obj = obj->variant.hardlink_variant.equiv_obj;
+ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+}
+
+/*
+ * A note or two on object names.
+ * * If the object name is missing, we then make one up in the form objnnn
+ *
+ * * ASCII names are stored in the object header's name field from byte zero
+ * * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ * - If the name can fit in the ASCII character space then they are saved as
+ * ascii names as per above.
+ * - If the name needs Unicode then the name is saved in Unicode
+ * starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR * name,
+ int buffer_size)
+{
+ /* Create an object name if we could not find one. */
+ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+ YCHAR local_name[20];
+ YCHAR num_string[20];
+ YCHAR *x = &num_string[19];
+ unsigned v = obj->obj_id;
+ num_string[19] = 0;
+ while (v > 0) {
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+ strcat(local_name, x);
+ strncpy(name, local_name, buffer_size - 1);
+ }
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size)
+{
+ memset(name, 0, buffer_size * sizeof(YCHAR));
+
+ yaffs_check_obj_details_loaded(obj);
+
+ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+ }
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ else if (obj->short_name[0]) {
+ strcpy(name, obj->short_name);
+ }
+#endif
+ else if (obj->hdr_chunk > 0) {
+ int result;
+ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev, __LINE__);
+
+ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+ if (obj->hdr_chunk > 0) {
+ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+ obj->hdr_chunk,
+ buffer, NULL);
+ }
+ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+ buffer_size);
+
+ yaffs_release_temp_buffer(obj->my_dev, buffer, __LINE__);
+ }
+
+ yaffs_fix_null_name(obj, name, buffer_size);
+
+ return strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+int yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+ /* Dereference any hard linking */
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.file_variant.file_size;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ if (!obj->variant.symlink_variant.alias)
+ return 0;
+ return strnlen(obj->variant.symlink_variant.alias,
+ YAFFS_MAX_ALIAS_LENGTH);
+ } else {
+ /* Only a directory should drop through to here */
+ return obj->my_dev->data_bytes_per_chunk;
+ }
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+ int count = 0;
+ struct list_head *i;
+
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+ list_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return DT_DIR;
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return DT_LNK;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ if (S_ISFIFO(obj->yst_mode))
+ return DT_FIFO;
+ if (S_ISCHR(obj->yst_mode))
+ return DT_CHR;
+ if (S_ISBLK(obj->yst_mode))
+ return DT_BLK;
+ if (S_ISSOCK(obj->yst_mode))
+ return DT_SOCK;
+ default:
+ return DT_REG;
+ break;
+ }
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+ return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(const struct yaffs_dev *dev)
+{
+
+ /* Common functions, gotta have */
+ if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
+ return 0;
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+ /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
+ if (dev->param.write_chunk_tags_fn &&
+ dev->param.read_chunk_tags_fn &&
+ !dev->param.write_chunk_fn &&
+ !dev->param.read_chunk_fn &&
+ dev->param.bad_block_fn && dev->param.query_block_fn)
+ return 1;
+#endif
+
+ /* Can use the "spare" style interface for yaffs1 */
+ if (!dev->param.is_yaffs2 &&
+ !dev->param.write_chunk_tags_fn &&
+ !dev->param.read_chunk_tags_fn &&
+ dev->param.write_chunk_fn &&
+ dev->param.read_chunk_fn &&
+ !dev->param.bad_block_fn && !dev->param.query_block_fn)
+ return 1;
+
+ return 0; /* bad */
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+ /* Initialise the unlinked, deleted, root and lost and found directories */
+
+ dev->lost_n_found = dev->root_dir = NULL;
+ dev->unlinked_dir = dev->del_dir = NULL;
+
+ dev->unlinked_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+
+ dev->del_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+
+ dev->root_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+ dev->lost_n_found =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+ && dev->del_dir) {
+ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()" );
+
+ /* Check stuff that must be set */
+
+ if (!dev) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Need a device"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->internal_start_block = dev->param.start_block;
+ dev->internal_end_block = dev->param.end_block;
+ dev->block_offset = 0;
+ dev->chunk_offset = 0;
+ dev->n_free_chunks = 0;
+
+ dev->gc_block = 0;
+
+ if (dev->param.start_block == 0) {
+ dev->internal_start_block = dev->param.start_block + 1;
+ dev->internal_end_block = dev->param.end_block + 1;
+ dev->block_offset = 1;
+ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 1024) ||
+ (!dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 512) ||
+ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
+ dev->param.chunks_per_block < 2 ||
+ dev->param.n_reserved_blocks < 2 ||
+ dev->internal_start_block <= 0 ||
+ dev->internal_end_block <= 0 ||
+ dev->internal_end_block <=
+ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+ ) {
+ /* otherwise it is too small */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+ dev->param.total_bytes_per_chunk,
+ dev->param.is_yaffs2 ? "2" : "",
+ dev->param.inband_tags);
+ return YAFFS_FAIL;
+ }
+
+ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+ if (dev->param.inband_tags)
+ dev->data_bytes_per_chunk =
+ dev->param.total_bytes_per_chunk -
+ sizeof(struct yaffs_packed_tags2_tags_only);
+ else
+ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+ if (!yaffs_check_dev_fns(dev)) {
+ /* Function missing */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "device function(s) missing or wrong");
+
+ return YAFFS_FAIL;
+ }
+
+ if (dev->is_mounted) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+ return YAFFS_FAIL;
+ }
+
+ /* Finished with most checks. One or two more checks happen later on too. */
+
+ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ x = dev->data_bytes_per_chunk;
+ /* We always use dev->chunk_shift and dev->chunk_div */
+ dev->chunk_shift = calc_shifts(x);
+ x >>= dev->chunk_shift;
+ dev->chunk_div = x;
+ /* We only use chunk mask if chunk_div is 1 */
+ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+ /*
+ * Calculate chunk_grp_bits.
+ * We need to find the next power of 2 > than internal_end_block
+ */
+
+ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = calc_shifts_ceiling(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+ dev->tnode_width = 16;
+ else
+ dev->tnode_width = bits;
+ } else {
+ dev->tnode_width = 16;
+ }
+
+ dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+ * to figure out chunk shift and chunk_grp_size
+ */
+
+ if (bits <= dev->tnode_width)
+ dev->chunk_grp_bits = 0;
+ else
+ dev->chunk_grp_bits = bits - dev->tnode_width;
+
+ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+ if (dev->tnode_size < sizeof(struct yaffs_tnode))
+ dev->tnode_size = sizeof(struct yaffs_tnode);
+
+ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+ */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+ return YAFFS_FAIL;
+ }
+
+ /* OK, we've finished verifying the device, lets continue with initialisation */
+
+ /* More device initialisation */
+ dev->all_gcs = 0;
+ dev->passive_gc_count = 0;
+ dev->oldest_dirty_gc_count = 0;
+ dev->bg_gcs = 0;
+ dev->gc_block_finder = 0;
+ dev->buffered_block = -1;
+ dev->doing_buffered_block_rewrite = 0;
+ dev->n_deleted_files = 0;
+ dev->n_bg_deletions = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_ecc_fixed = 0;
+ dev->n_ecc_unfixed = 0;
+ dev->n_tags_ecc_fixed = 0;
+ dev->n_tags_ecc_unfixed = 0;
+ dev->n_erase_failures = 0;
+ dev->n_erased_blocks = 0;
+ dev->gc_disable = 0;
+ dev->has_pending_prioritised_gc = 1; /* Assume the worst for now, will get fixed on first GC */
+ INIT_LIST_HEAD(&dev->dirty_dirs);
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+ dev->cache = NULL;
+ dev->gc_cleanup_list = NULL;
+
+ if (!init_failed && dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+ int cache_bytes =
+ dev->param.n_caches * sizeof(struct yaffs_cache);
+
+ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+ buf = (u8 *) dev->cache;
+
+ if (dev->cache)
+ memset(dev->cache, 0, cache_bytes);
+
+ for (i = 0; i < dev->param.n_caches && buf; i++) {
+ dev->cache[i].object = NULL;
+ dev->cache[i].last_use = 0;
+ dev->cache[i].dirty = 0;
+ dev->cache[i].data = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+ if (!buf)
+ init_failed = 1;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_hits = 0;
+
+ if (!init_failed) {
+ dev->gc_cleanup_list =
+ kmalloc(dev->param.chunks_per_block * sizeof(u32),
+ GFP_NOFS);
+ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+ if (dev->param.is_yaffs2)
+ dev->param.use_header_file_size = 1;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->param.is_yaffs2) {
+ if (yaffs2_checkpt_restore(dev)) {
+ yaffs_check_obj_details_loaded(dev->root_dir);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "yaffs: restored from checkpoint"
+ );
+ } else {
+
+ /* Clean up the mess caused by an aborted checkpoint load
+ * and scan backwards.
+ */
+ yaffs_deinit_blocks(dev);
+
+ yaffs_deinit_tnodes_and_objs(dev);
+
+ dev->n_erased_blocks = 0;
+ dev->n_free_chunks = 0;
+ dev->alloc_block = -1;
+ dev->alloc_page = -1;
+ dev->n_deleted_files = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_bg_deletions = 0;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed
+ && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+ } else if (!yaffs1_scan(dev)) {
+ init_failed = 1;
+ }
+
+ yaffs_strip_deleted_objs(dev);
+ yaffs_fix_hanging_objs(dev);
+ if (dev->param.empty_lost_n_found)
+ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() aborted.");
+
+ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+ dev->n_page_reads = 0;
+ dev->n_page_writes = 0;
+ dev->n_erasures = 0;
+ dev->n_gc_copies = 0;
+ dev->n_retired_writes = 0;
+
+ dev->n_retired_blocks = 0;
+
+ yaffs_verify_free_chunks(dev);
+ yaffs_verify_blocks(dev);
+
+ /* Clean up any aborted checkpoint data */
+ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() done.");
+ return YAFFS_OK;
+
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+ if (dev->is_mounted) {
+ int i;
+
+ yaffs_deinit_blocks(dev);
+ yaffs_deinit_tnodes_and_objs(dev);
+ if (dev->param.n_caches > 0 && dev->cache) {
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].data)
+ kfree(dev->cache[i].data);
+ dev->cache[i].data = NULL;
+ }
+
+ kfree(dev->cache);
+ dev->cache = NULL;
+ }
+
+ kfree(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ kfree(dev->temp_buffer[i].buffer);
+
+ dev->is_mounted = 0;
+
+ if (dev->param.deinitialise_flash_fn)
+ dev->param.deinitialise_flash_fn(dev);
+ }
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+ int n_free = 0;
+ int b;
+
+ struct yaffs_block_info *blk;
+
+ blk = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ n_free +=
+ (dev->param.chunks_per_block - blk->pages_in_use +
+ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
+ blk++;
+ }
+
+ return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+ /* This is what we report to the outside world */
+
+ int n_free;
+ int n_dirty_caches;
+ int blocks_for_checkpt;
+ int i;
+
+ n_free = dev->n_free_chunks;
+ n_free += dev->n_deleted_files;
+
+ /* Now count the number of dirty chunks in the cache and subtract those */
+
+ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].dirty)
+ n_dirty_caches++;
+ }
+
+ n_free -= n_dirty_caches;
+
+ n_free -=
+ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now we figure out how much to reserve for the checkpoint and report that... */
+ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+ if (n_free < 0)
+ n_free = 0;
+
+ return n_free;
+
+}
diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
new file mode 100644
index 000000000000..307eba28676f
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,915 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK 1
+#define YAFFS_FAIL 0
+
+/* Give us a Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xFF
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC 0x5941FF53
+
+#define YAFFS_NTNODES_LEVEL0 16
+#define YAFFS_TNODES_LEVEL0_BITS 4
+#define YAFFS_TNODES_LEVEL0_MASK 0xf
+
+#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK 0x7
+#define YAFFS_TNODES_MAX_LEVEL 6
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+#define YAFFS_BYTES_PER_SPARE 16
+#define YAFFS_BYTES_PER_CHUNK 512
+#define YAFFS_CHUNK_SIZE_SHIFT 9
+#define YAFFS_CHUNKS_PER_BLOCK 32
+#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+#endif
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
+
+#define YAFFS_MAX_CHUNK_ID 0x000FFFFF
+
+#define YAFFS_ALLOCATION_NOBJECTS 100
+#define YAFFS_ALLOCATION_NTNODES 100
+#define YAFFS_ALLOCATION_NLINKS 100
+
+#define YAFFS_NOBJECT_BUCKETS 256
+
+#define YAFFS_OBJECT_SPACE 0x40000
+#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE -1)
+
+#define YAFFS_CHECKPOINT_VERSION 4
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH 127
+#define YAFFS_MAX_ALIAS_LENGTH 79
+#else
+#define YAFFS_MAX_NAME_LENGTH 255
+#define YAFFS_MAX_ALIAS_LENGTH 159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH 15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT 1
+#define YAFFS_OBJECTID_LOSTNFOUND 2
+#define YAFFS_OBJECTID_UNLINKED 3
+#define YAFFS_OBJECTID_DELETED 4
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_SB_HEADER 0x10
+#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES 20
+
+#define YAFFS_N_TEMP_BUFFERS 6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS (5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks per second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xEFFFFF00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK 0xFFFF0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+ struct yaffs_obj *object;
+ int chunk_id;
+ int last_use;
+ int dirty;
+ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+ u8 *data;
+};
+
+/* Tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary otherwise
+ * the structure size will get blown out.
+ */
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+struct yaffs_tags {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes_lsb:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+ struct yaffs_tags as_tags;
+ u8 as_bytes[8];
+};
+
+#endif
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+ YAFFS_ECC_RESULT_UNKNOWN,
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+ YAFFS_OBJECT_TYPE_FILE,
+ YAFFS_OBJECT_TYPE_SYMLINK,
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+
+ unsigned validity0;
+ unsigned chunk_used; /* Status of the chunk: used or unused */
+ unsigned obj_id; /* If 0 then this is not part of an object (unused) */
+ unsigned chunk_id; /* If 0 then this is a header, else a data chunk */
+ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+ enum yaffs_ecc_result ecc_result;
+ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+ unsigned is_deleted; /* The chunk is marked deleted */
+ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+ unsigned extra_available; /* There is extra info available if this is not zero */
+ unsigned extra_parent_id; /* The parent object */
+ unsigned extra_is_shrink; /* Is it a shrink header? */
+ unsigned extra_shadows; /* Does this shadow another object? */
+
+ enum yaffs_obj_type extra_obj_type; /* What object type? */
+
+ unsigned extra_length; /* Length if it is a file */
+ unsigned extra_equiv_id; /* Equivalent object Id if it is a hard link */
+
+ unsigned validity1;
+
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+ u8 tb0;
+ u8 tb1;
+ u8 tb2;
+ u8 tb3;
+ u8 page_status; /* set to 0 to delete the chunk */
+ u8 block_status;
+ u8 tb4;
+ u8 tb5;
+ u8 ecc1[3];
+ u8 tb6;
+ u8 tb7;
+ u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+ struct yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
+ /* Being scanned */
+
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING,
+ /* The block might have something on it (ie it is allocating or full, perhaps empty)
+ * but it needs to be scanned to determine its true state.
+ * This state is only valid during scanning.
+ * NB We tolerate empty because the pre-scanner might be incapable of deciding
+ * However, if this state is returned on a YAFFS2 device, then we expect a sequence number
+ */
+
+ YAFFS_BLOCK_STATE_EMPTY,
+ /* This block is empty */
+
+ YAFFS_BLOCK_STATE_ALLOCATING,
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+ * allocation. Should never be more than one of these.
+ * If a block is only partially allocated at mount it is treated as full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
+ * If a block was only partially allocated when mounted we treat
+ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+};
+
+#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+ int soft_del_pages:10; /* number of soft deleted pages */
+ int pages_in_use:10; /* number of pages in use */
+ unsigned block_state:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
+ u32 needs_retiring:1; /* Data has failed on this block, need to get valid data off */
+ /* and retire the block. */
+ u32 skip_erased_check:1; /* If this is set we can skip the erased check on this block */
+ u32 gc_prioritise:1; /* An ECC check or blank check has failed on this block.
+ It should be prioritised for GC */
+ u32 chunk_error_strikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+#ifdef CONFIG_YAFFS_YAFFS2
+ u32 has_shrink_hdr:1; /* This block has at least one shrink object header */
+ u32 seq_number; /* block sequence number for yaffs2 */
+#endif
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+ enum yaffs_obj_type type;
+
+ /* Apply to everything */
+ int parent_obj_id;
+ u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to directories, files, symlinks - not hard links */
+ u32 yst_mode; /* protection */
+
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+
+ /* File size applies to files only */
+ int file_size;
+
+ /* Equivalent object id applies to hard links only. */
+ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+ u32 yst_rdev; /* device stuff for block and char devices (major/min) */
+
+ u32 win_ctime[2];
+ u32 win_atime[2];
+ u32 win_mtime[2];
+
+ u32 inband_shadowed_obj_id;
+ u32 inband_is_shrink;
+
+ u32 reserved[2];
+ int shadows_obj; /* This object header shadows the specified object if > 0 */
+
+ /* is_shrink applies to object headers written when we shrink the file (ie resize) */
+ u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------ Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+ u32 file_size;
+ u32 scanned_size;
+ u32 shrink_size;
+ int top_level;
+ struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+ struct list_head children; /* list of child links */
+ struct list_head dirty; /* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+ YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+ struct yaffs_obj *equiv_obj;
+ u32 equiv_id;
+};
+
+union yaffs_obj_var {
+ struct yaffs_file_var file_variant;
+ struct yaffs_dir_var dir_variant;
+ struct yaffs_symlink_var symlink_variant;
+ struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+ u8 deleted:1; /* This should only apply to unlinked files. */
+ u8 soft_del:1; /* it has also been soft deleted */
+ u8 unlinked:1; /* An unlinked file. The file should be in the unlinked directory. */
+ u8 fake:1; /* A fake object has no presence on NAND. */
+ u8 rename_allowed:1; /* Some objects are not allowed to be renamed. */
+ u8 unlink_allowed:1;
+ u8 dirty:1; /* the object needs to be written to flash */
+ u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available (ie. file data records appear before the header).
+ */
+ u8 lazy_loaded:1; /* This object has been lazy loaded and is missing some detail */
+
+ u8 defered_free:1; /* For Linux kernel. Object is removed from NAND, but is
+ * still in the inode cache. Free of object is defered.
+ * until the inode is released.
+ */
+ u8 being_created:1; /* This object is still being created so skip some checks. */
+ u8 is_shadowed:1; /* This object is shadowed on the way to being renamed. */
+
+ u8 xattr_known:1; /* We know if this has object has xattribs or not. */
+ u8 has_xattr:1; /* This object has xattribs. Valid if xattr_known. */
+
+ u8 serial; /* serial number of chunk in NAND. Cached here */
+ u16 sum; /* sum of the name to speed searching */
+
+ struct yaffs_dev *my_dev; /* The device I'm on */
+
+ struct list_head hash_link; /* list of objects in this hash bucket */
+
+ struct list_head hard_links; /* all the equivalent hard linked objects */
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_obj *parent;
+ struct list_head siblings;
+
+ /* Where's my object header in NAND? */
+ int hdr_chunk;
+
+ int n_data_chunks; /* Number of data chunks attached to the file. */
+
+ u32 obj_id; /* the object id value */
+
+ u32 yst_mode;
+
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+#endif
+
+#ifdef CONFIG_YAFFS_WINCE
+ u32 win_ctime[2];
+ u32 win_mtime[2];
+ u32 win_atime[2];
+#else
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+#endif
+
+ u32 yst_rdev;
+
+ void *my_inode;
+
+ enum yaffs_obj_type variant_type;
+
+ union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+ struct list_head list;
+ int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+ int struct_type;
+ u32 obj_id;
+ u32 parent_id;
+ int hdr_chunk;
+ enum yaffs_obj_type variant_type:3;
+ u8 deleted:1;
+ u8 soft_del:1;
+ u8 unlinked:1;
+ u8 fake:1;
+ u8 rename_allowed:1;
+ u8 unlink_allowed:1;
+ u8 serial;
+ int n_data_chunks;
+ u32 size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few
+ */
+
+struct yaffs_buffer {
+ u8 *buffer;
+ int line; /* track from whence this buffer was allocated */
+ int max_line;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+ const YCHAR *name;
+
+ /*
+ * Entry parameters set up way early. Yaffs sets up the rest.
+ * The structure should be zeroed out before use so that unused
+ * and defualt values are zero.
+ */
+
+ int inband_tags; /* Use unband tags */
+ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to be a power of 2 */
+ int chunks_per_block; /* does not need to be a power of 2 */
+ int spare_bytes_per_chunk; /* spare area size */
+ int start_block; /* Start block we're allowed to use */
+ int end_block; /* End block we're allowed to use */
+ int n_reserved_blocks; /* We want this tuneable so that we can reduce */
+ /* reserved blocks on NOR and RAM. */
+
+ int n_caches; /* If <= 0, then short op caching is disabled, else
+ * the number of short op caches (don't use too many).
+ * 10 to 20 is a good bet.
+ */
+ int use_nand_ecc; /* Flag to decide whether or not to use NANDECC on data (yaffs1) */
+ int no_tags_ecc; /* Flag to decide whether or not to do ECC on packed tags (yaffs2) */
+
+ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+ int refresh_period; /* How often we should check to do a block refresh */
+
+ /* Checkpoint control. Can be set before or after initialisation */
+ u8 skip_checkpt_rd;
+ u8 skip_checkpt_wr;
+
+ int enable_xattr; /* Enable xattribs */
+
+ /* NAND access functions (Must be set before calling YAFFS) */
+
+ int (*write_chunk_fn) (struct yaffs_dev * dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_spare * spare);
+ int (*read_chunk_fn) (struct yaffs_dev * dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_spare * spare);
+ int (*erase_fn) (struct yaffs_dev * dev, int flash_block);
+ int (*initialise_flash_fn) (struct yaffs_dev * dev);
+ int (*deinitialise_flash_fn) (struct yaffs_dev * dev);
+
+#ifdef CONFIG_YAFFS_YAFFS2
+ int (*write_chunk_tags_fn) (struct yaffs_dev * dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_ext_tags * tags);
+ int (*read_chunk_tags_fn) (struct yaffs_dev * dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_ext_tags * tags);
+ int (*bad_block_fn) (struct yaffs_dev * dev, int block_no);
+ int (*query_block_fn) (struct yaffs_dev * dev, int block_no,
+ enum yaffs_block_state * state,
+ u32 * seq_number);
+#endif
+
+ /* The remove_obj_fn function must be supplied by OS flavours that
+ * need it.
+ * yaffs direct uses it to implement the faster readdir.
+ * Linux uses it to protect the directory during unlocking.
+ */
+ void (*remove_obj_fn) (struct yaffs_obj * obj);
+
+ /* Callback to mark the superblock dirty */
+ void (*sb_dirty_fn) (struct yaffs_dev * dev);
+
+ /* Callback to control garbage collection. */
+ unsigned (*gc_control) (struct yaffs_dev * dev);
+
+ /* Debug control flags. Don't use unless you know what you're doing */
+ int use_header_file_size; /* Flag to determine if we should use file sizes from the header */
+ int disable_lazy_load; /* Disable lazy loading on this device */
+ int wide_tnodes_disabled; /* Set to disable wide tnodes */
+ int disable_soft_del; /* yaffs 1 only: Set to disable the use of softdeletion. */
+
+ int defered_dir_update; /* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ int auto_unicode;
+#endif
+ int always_check_erased; /* Force chunk erased check always on */
+};
+
+struct yaffs_dev {
+ struct yaffs_param param;
+
+ /* Context storage. Holds extra OS specific data for this device */
+
+ void *os_context;
+ void *driver_context;
+
+ struct list_head dev_list;
+
+ /* Runtime parameters. Set up by YAFFS. */
+ int data_bytes_per_chunk;
+
+ /* Non-wide tnode stuff */
+ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
+ * the tnodes are not wide enough.
+ */
+ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+ u32 tnode_width;
+ u32 tnode_mask;
+ u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+ u32 chunk_shift; /* Shift value */
+ u32 chunk_div; /* Divisor after shifting: 1 for power-of-2 sizes */
+ u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+ int is_mounted;
+ int read_only;
+ int is_checkpointed;
+
+ /* Stuff to support block offsetting to support start block zero */
+ int internal_start_block;
+ int internal_end_block;
+ int block_offset;
+ int chunk_offset;
+
+ /* Runtime checkpointing stuff */
+ int checkpt_page_seq; /* running sequence number of checkpoint pages */
+ int checkpt_byte_count;
+ int checkpt_byte_offs;
+ u8 *checkpt_buffer;
+ int checkpt_open_write;
+ int blocks_in_checkpt;
+ int checkpt_cur_chunk;
+ int checkpt_cur_block;
+ int checkpt_next_block;
+ int *checkpt_block_list;
+ int checkpt_max_blocks;
+ u32 checkpt_sum;
+ u32 checkpt_xor;
+
+ int checkpoint_blocks_required; /* Number of blocks needed to store current checkpoint set */
+
+ /* Block Info */
+ struct yaffs_block_info *block_info;
+ u8 *chunk_bits; /* bitmap of chunks in use */
+ unsigned block_info_alt:1; /* was allocated using alternative strategy */
+ unsigned chunk_bits_alt:1; /* was allocated using alternative strategy */
+ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
+ * Must be consistent with chunks_per_block.
+ */
+
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int alloc_block_finder; /* Used to search for next allocation block */
+
+ /* Object and Tnode memory management */
+ void *allocator;
+ int n_obj;
+ int n_tnodes;
+
+ int n_hardlinks;
+
+ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+ u32 bucket_finder;
+
+ int n_free_chunks;
+
+ /* Garbage collection control */
+ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
+ u32 n_clean_ups;
+
+ unsigned has_pending_prioritised_gc; /* We think this device might have pending prioritised gcs */
+ unsigned gc_disable;
+ unsigned gc_block_finder;
+ unsigned gc_dirtiest;
+ unsigned gc_pages_in_use;
+ unsigned gc_not_done;
+ unsigned gc_block;
+ unsigned gc_chunk;
+ unsigned gc_skip;
+
+ /* Special directories */
+ struct yaffs_obj *root_dir;
+ struct yaffs_obj *lost_n_found;
+
+ /* Buffer areas for storing data to recover from write failures TODO
+ * u8 buffered_data[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
+ * struct yaffs_spare buffered_spare[YAFFS_CHUNKS_PER_BLOCK];
+ */
+
+ int buffered_block; /* Which block is buffered here? */
+ int doing_buffered_block_rewrite;
+
+ struct yaffs_cache *cache;
+ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files. */
+ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted files live. */
+ struct yaffs_obj *del_dir; /* Directory where deleted objects are sent to disappear. */
+ struct yaffs_obj *unlinked_deletion; /* Current file being background deleted. */
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+ int max_temp;
+ int temp_in_use;
+ int unmanaged_buffer_allocs;
+ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently allocating block */
+ unsigned oldest_dirty_seq;
+ unsigned oldest_dirty_block;
+
+ /* Block refreshing */
+ int refresh_skip; /* A skip down counter. Refresh happens when this gets to zero. */
+
+ /* Dirty directory handling */
+ struct list_head dirty_dirs; /* List of dirty directories */
+
+ /* Statistcs */
+ u32 n_page_writes;
+ u32 n_page_reads;
+ u32 n_erasures;
+ u32 n_erase_failures;
+ u32 n_gc_copies;
+ u32 all_gcs;
+ u32 passive_gc_count;
+ u32 oldest_dirty_gc_count;
+ u32 n_gc_blocks;
+ u32 bg_gcs;
+ u32 n_retired_writes;
+ u32 n_retired_blocks;
+ u32 n_ecc_fixed;
+ u32 n_ecc_unfixed;
+ u32 n_tags_ecc_fixed;
+ u32 n_tags_ecc_unfixed;
+ u32 n_deletions;
+ u32 n_unmarked_deletions;
+ u32 refresh_count;
+ u32 cache_hits;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes at runtime and
+ * must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+ int struct_type;
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int n_free_chunks;
+
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+ int struct_type;
+ u32 magic;
+ u32 version;
+ u32 head;
+};
+
+struct yaffs_shadow_fixer {
+ int obj_id;
+ int shadowed_id;
+ struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+ int set; /* If 0 then this is a deletion */
+ const YCHAR *name;
+ const void *data;
+ int size;
+ int flags;
+ int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+int yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+ int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
+ u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+ const YCHAR * name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, const YCHAR * alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn);
+int yaffs_check_ff(u8 * buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev, int line_no);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR * str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name,
+ int force, int is_shrink, int shadows,
+ struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+#endif
diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h
new file mode 100644
index 000000000000..3b508cbc4e8a
--- /dev/null
+++ b/fs/yaffs2/yaffs_linux.h
@@ -0,0 +1,41 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_LINUX_H__
+#define __YAFFS_LINUX_H__
+
+#include "yportenv.h"
+
+struct yaffs_linux_context {
+ struct list_head context_list; /* List of these we have mounted */
+ struct yaffs_dev *dev;
+ struct super_block *super;
+ struct task_struct *bg_thread; /* Background thread for this device */
+ int bg_running;
+ struct mutex gross_lock; /* Gross locking mutex*/
+ u8 *spare_buffer; /* For mtdif2 use. Don't know the size of the buffer
+ * at compile time so we have to allocate it.
+ */
+ struct list_head search_contexts;
+ void (*put_super_fn) (struct super_block * sb);
+
+ struct task_struct *readdir_process;
+ unsigned mount_id;
+};
+
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c
new file mode 100644
index 000000000000..7cf53b3d91be
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,54 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+
+#include "yaffs_linux.h"
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ u32 addr =
+ ((loff_t) block_no) * dev->param.total_bytes_per_chunk
+ * dev->param.chunks_per_block;
+ struct erase_info ei;
+
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ retval = mtd->erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_initialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h
new file mode 100644
index 000000000000..666507417fec
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no);
+int nandmtd_initialise(struct yaffs_dev *dev);
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif1.c b/fs/yaffs2/yaffs_mtdif1.c
new file mode 100644
index 000000000000..51083695eb33
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif1.c
@@ -0,0 +1,330 @@
+/*
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This module provides the interface between yaffs_nand.c and the
+ * MTD API. This version is used when the MTD interface supports the
+ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
+ * and we have small-page NAND device.
+ *
+ * These functions are invoked via function pointers in yaffs_nand.c.
+ * This replaces functionality provided by functions in yaffs_mtdif.c
+ * and the yaffs_tags compatability functions in yaffs_tagscompat.c that are
+ * called in yaffs_mtdif.c when the function pointers are NULL.
+ * We assume the MTD layer is performing ECC (use_nand_ecc is true).
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_packedtags1.h"
+#include "yaffs_tagscompat.h" /* for yaffs_calc_tags_ecc */
+#include "yaffs_linux.h"
+
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+#include "linux/mtd/mtd.h"
+
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+# define YTAG1_SIZE 8
+#else
+# define YTAG1_SIZE 9
+#endif
+
+/* Write a chunk (page) of data to NAND.
+ *
+ * Caller always provides ExtendedTags data which are converted to a more
+ * compact (packed) form for storage in NAND. A mini-ECC runs over the
+ * contents of the tags meta-data; used to valid the tags when read.
+ *
+ * - Pack ExtendedTags to packed_tags1 form
+ * - Compute mini-ECC for packed_tags1
+ * - Write data and packed tags to NAND.
+ *
+ * Note: Due to the use of the packed_tags1 meta-data which does not include
+ * a full sequence number (as found in the larger packed_tags2 form) it is
+ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
+ * discarded and dirty. This is not ideal: newer NAND parts are supposed
+ * to be written just once. When Yaffs performs this operation, this
+ * function is called with a NULL data pointer -- calling MTD write_oob
+ * without data is valid usage (2.6.17).
+ *
+ * Any underlying MTD error results in YAFFS_FAIL.
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_write_chunk_tags(struct yaffs_dev *dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_ext_tags *etags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_bytes = dev->data_bytes_per_chunk;
+ loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
+ struct mtd_oob_ops ops;
+ struct yaffs_packed_tags1 pt1;
+ int retval;
+
+ /* we assume that packed_tags1 and struct yaffs_tags are compatible */
+ compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12);
+ compile_time_assertion(sizeof(struct yaffs_tags) == 8);
+
+ yaffs_pack_tags1(&pt1, etags);
+ yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+ * etags, one with is_deleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+#else
+ ((u8 *) & pt1)[8] = 0xff;
+ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* zero page_status byte to indicate deleted */
+ ((u8 *) & pt1)[8] = 0;
+ }
+#endif
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunk_bytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = (u8 *) data;
+ ops.oobbuf = (u8 *) & pt1;
+
+ retval = mtd->write_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "write_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Return with empty ExtendedTags but add ecc_result.
+ */
+static int rettags(struct yaffs_ext_tags *etags, int ecc_result, int retval)
+{
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+ etags->ecc_result = ecc_result;
+ }
+ return retval;
+}
+
+/* Read a chunk (page) from NAND.
+ *
+ * Caller expects ExtendedTags data to be usable even on error; that is,
+ * all members except ecc_result and block_bad are zeroed.
+ *
+ * - Check ECC results for data (if applicable)
+ * - Check for blank/erased block (return empty ExtendedTags if blank)
+ * - Check the packed_tags1 mini-ECC (correct if necessary/possible)
+ * - Convert packed_tags1 to ExtendedTags
+ * - Update ecc_result and block_bad members to refect state.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_read_chunk_tags(struct yaffs_dev *dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_ext_tags *etags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_bytes = dev->data_bytes_per_chunk;
+ loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+ struct mtd_oob_ops ops;
+ struct yaffs_packed_tags1 pt1;
+ int retval;
+ int deleted;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunk_bytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = data;
+ ops.oobbuf = (u8 *) & pt1;
+
+ /* Read page and oob using MTD.
+ * Check status and determine ECC result.
+ */
+ retval = mtd->read_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "read_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+
+ switch (retval) {
+ case 0:
+ /* no error */
+ break;
+
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ break;
+
+ case -EBADMSG:
+ /* MTD's ECC could not fix the data */
+ dev->n_ecc_unfixed++;
+ /* fall into... */
+ default:
+ rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+ etags->block_bad = (mtd->block_isbad) (mtd, addr);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk.
+ */
+ if (yaffs_check_ff((u8 *) & pt1, 8)) {
+ /* when blank, upper layers want ecc_result to be <= NO_ERROR */
+ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+ }
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ /* Read deleted status (bit) then return it to it's non-deleted
+ * state before performing tags mini-ECC check. pt1.deleted is
+ * inverted.
+ */
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+#else
+ deleted = (yaffs_count_bits(((u8 *) & pt1)[8]) < 7);
+#endif
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible.
+ */
+ retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+ dev->n_tags_ecc_fixed++;
+ if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+ dev->n_tags_ecc_unfixed++;
+ return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+ * [set should_be_ff just to keep yaffs_unpack_tags1 happy]
+ */
+ pt1.should_be_ff = 0xFFFFFFFF;
+ yaffs_unpack_tags1(etags, &pt1);
+ etags->ecc_result = eccres;
+
+ /* Set deleted state */
+ etags->is_deleted = deleted;
+ return YAFFS_OK;
+}
+
+/* Mark a block bad.
+ *
+ * This is a persistant state.
+ * Use of this function should be rare.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "marking block %d bad", block_no);
+
+ retval = mtd->block_markbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Check any MTD prerequists.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+static int nandmtd1_test_prerequists(struct mtd_info *mtd)
+{
+ /* 2.6.18 has mtd->ecclayout->oobavail */
+ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+ int oobavail = mtd->ecclayout->oobavail;
+
+ if (oobavail < YTAG1_SIZE) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "mtd device has only %d bytes for tags, need %d",
+ oobavail, YTAG1_SIZE);
+ return YAFFS_FAIL;
+ }
+ return YAFFS_OK;
+}
+
+/* Query for the current state of a specific block.
+ *
+ * Examine the tags of the first chunk of the block and return the state:
+ * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
+ * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
+ * - YAFFS_BLOCK_STATE_EMPTY, the block is clean
+ *
+ * Always returns YAFFS_OK.
+ */
+int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state_ptr, u32 * seq_ptr)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_num = block_no * dev->param.chunks_per_block;
+ loff_t addr = (loff_t) chunk_num * dev->data_bytes_per_chunk;
+ struct yaffs_ext_tags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+ int retval;
+
+ /* We don't yet have a good place to test for MTD config prerequists.
+ * Do it here as we are called during the initial scan.
+ */
+ if (nandmtd1_test_prerequists(mtd) != YAFFS_OK)
+ return YAFFS_FAIL;
+
+ retval = nandmtd1_read_chunk_tags(dev, chunk_num, NULL, &etags);
+ etags.block_bad = (mtd->block_isbad) (mtd, addr);
+ if (etags.block_bad) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is marked bad", block_no);
+ state = YAFFS_BLOCK_STATE_DEAD;
+ } else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+ /* bad tags, need to look more closely */
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else if (etags.chunk_used) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ seqnum = etags.seq_number;
+ } else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+ *state_ptr = state;
+ *seq_ptr = seqnum;
+
+ /* query always succeeds */
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_mtdif1.h b/fs/yaffs2/yaffs_mtdif1.h
new file mode 100644
index 000000000000..07ce4524f0f6
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif1.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF1_H__
+#define __YAFFS_MTDIF1_H__
+
+int nandmtd1_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags);
+
+int nandmtd1_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+
+int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no);
+
+int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number);
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif2.c b/fs/yaffs2/yaffs_mtdif2.c
new file mode 100644
index 000000000000..d1643df2c381
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif2.c
@@ -0,0 +1,225 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* mtd interface for YAFFS2 */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_mtdif2.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+
+#include "yaffs_packedtags2.h"
+
+#include "yaffs_linux.h"
+
+/* NB For use with inband tags....
+ * We assume that the data buffer is of size total_bytes_per_chunk so that we can also
+ * use it to load the tags.
+ */
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ struct mtd_oob_ops ops;
+ int retval = 0;
+
+ loff_t addr;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_write_chunk_tags chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+ * the end of the data buffer.
+ */
+ if (!data || !tags)
+ BUG();
+ else if (dev->param.inband_tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)(data +
+ dev->
+ data_bytes_per_chunk);
+ yaffs_pack_tags2_tags_only(pt2tp, tags);
+ } else {
+ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+ }
+
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
+ ops.len = dev->param.total_bytes_per_chunk;
+ ops.ooboffs = 0;
+ ops.datbuf = (u8 *) data;
+ ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr;
+ retval = mtd->write_oob(mtd, addr, &ops);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ struct mtd_oob_ops ops;
+
+ size_t dummy;
+ int retval = 0;
+ int local_data = 0;
+
+ loff_t addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_read_chunk_tags chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ if (dev->param.inband_tags) {
+
+ if (!data) {
+ local_data = 1;
+ data = yaffs_get_temp_buffer(dev, __LINE__);
+ }
+
+ }
+
+ if (dev->param.inband_tags || (data && !tags))
+ retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = packed_tags_size;
+ ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
+ ops.ooboffs = 0;
+ ops.datbuf = data;
+ ops.oobbuf = yaffs_dev_to_lc(dev)->spare_buffer;
+ retval = mtd->read_oob(mtd, addr, &ops);
+ }
+
+ if (dev->param.inband_tags) {
+ if (tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)&data[dev->
+ data_bytes_per_chunk];
+ yaffs_unpack_tags2_tags_only(tags, pt2tp);
+ }
+ } else {
+ if (tags) {
+ memcpy(packed_tags_ptr,
+ yaffs_dev_to_lc(dev)->spare_buffer,
+ packed_tags_size);
+ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+ }
+
+ if (local_data)
+ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+ if (tags && retval == -EBADMSG
+ && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ dev->n_ecc_unfixed++;
+ }
+ if (tags && retval == -EUCLEAN
+ && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ }
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_mark_block_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_mark_block_bad %d", block_no);
+
+ retval =
+ mtd->block_markbad(mtd,
+ block_no * dev->param.chunks_per_block *
+ dev->param.total_bytes_per_chunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+
+}
+
+int nandmtd2_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "nandmtd2_query_block %d", block_no);
+ retval =
+ mtd->block_isbad(mtd,
+ block_no * dev->param.chunks_per_block *
+ dev->param.total_bytes_per_chunk);
+
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ *seq_number = 0;
+ } else {
+ struct yaffs_ext_tags t;
+ nandmtd2_read_chunk_tags(dev, block_no *
+ dev->param.chunks_per_block, NULL, &t);
+
+ if (t.chunk_used) {
+ *seq_number = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else {
+ *seq_number = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block is bad seq %d state %d", *seq_number, *state);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
diff --git a/fs/yaffs2/yaffs_mtdif2.h b/fs/yaffs2/yaffs_mtdif2.h
new file mode 100644
index 000000000000..d82112610d04
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif2.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF2_H__
+#define __YAFFS_MTDIF2_H__
+
+#include "yaffs_guts.h"
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags);
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+int nandmtd2_mark_block_bad(struct yaffs_dev *dev, int block_no);
+int nandmtd2_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number);
+
+#endif
diff --git a/fs/yaffs2/yaffs_nameval.c b/fs/yaffs2/yaffs_nameval.c
new file mode 100644
index 000000000000..daa36f989d31
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.c
@@ -0,0 +1,201 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of values and fits
+ * into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ * sizeof(int) bytes record size.
+ * strnlen+1 bytes name null terminated.
+ * nbytes value.
+ * ----------
+ * total size stored in record size
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR * name,
+ int *exist_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ if (strncmp
+ ((YCHAR *) (xb + pos + sizeof(int)), name, size) == 0) {
+ if (exist_size)
+ *exist_size = size;
+ return pos;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ if (exist_size)
+ *exist_size = 0;
+ return -1;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR * name)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+ /* Find size, shift rest over this record, then zero out the rest of buffer */
+ memcpy(&size, xb + pos, sizeof(int));
+ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+ memset(xb + (xb_size - size), 0, size);
+ return 0;
+ } else {
+ return -ENODATA;
+ }
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags)
+{
+ int pos;
+ int namelen = strnlen(name, xb_size);
+ int reclen;
+ int size_exist = 0;
+ int space;
+ int start;
+
+ pos = nval_find(xb, xb_size, name, &size_exist);
+
+ if (flags & XATTR_CREATE && pos >= 0)
+ return -EEXIST;
+ if (flags & XATTR_REPLACE && pos < 0)
+ return -ENODATA;
+
+ start = nval_used(xb, xb_size);
+ space = xb_size - start + size_exist;
+
+ reclen = (sizeof(int) + namelen + 1 + bsize);
+
+ if (reclen > space)
+ return -ENOSPC;
+
+ if (pos >= 0) {
+ nval_del(xb, xb_size, name);
+ start = nval_used(xb, xb_size);
+ }
+
+ pos = start;
+
+ memcpy(xb + pos, &reclen, sizeof(int));
+ pos += sizeof(int);
+ strncpy((YCHAR *) (xb + pos), name, reclen);
+ pos += (namelen + 1);
+ memcpy(xb + pos, buf, bsize);
+ return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+
+ memcpy(&size, xb + pos, sizeof(int));
+ pos += sizeof(int); /* advance past record length */
+ size -= sizeof(int);
+
+ /* Advance over name string */
+ while (xb[pos] && size > 0 && pos < xb_size) {
+ pos++;
+ size--;
+ }
+ /*Advance over NUL */
+ pos++;
+ size--;
+
+ if (size <= bsize) {
+ memcpy(buf, xb + pos, size);
+ return size;
+ }
+
+ }
+ if (pos >= 0)
+ return -ERANGE;
+ else
+ return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+ int pos = 0;
+ int size;
+ int name_len;
+ int ncopied = 0;
+ int filled = 0;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > sizeof(int) && size <= xb_size && (pos + size) < xb_size
+ && !filled) {
+ pos += sizeof(int);
+ size -= sizeof(int);
+ name_len = strnlen((YCHAR *) (xb + pos), size);
+ if (ncopied + name_len + 1 < bsize) {
+ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+ buf += name_len;
+ *buf = '\0';
+ buf++;
+ if (sizeof(YCHAR) > 1) {
+ *buf = '\0';
+ buf++;
+ }
+ ncopied += (name_len + 1);
+ } else {
+ filled = 1;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+ return nval_used(xb, xb_size) > 0;
+}
diff --git a/fs/yaffs2/yaffs_nameval.h b/fs/yaffs2/yaffs_nameval.h
new file mode 100644
index 000000000000..2bb02b627628
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c
new file mode 100644
index 000000000000..e816cabf43f8
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,127 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsvalidity.h"
+
+#include "yaffs_getblockinfo.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 * buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ struct yaffs_ext_tags local_tags;
+
+ int realigned_chunk = nand_chunk - dev->chunk_offset;
+
+ dev->n_page_reads++;
+
+ /* If there are no tags provided, use local tags to get prioritised gc working */
+ if (!tags)
+ tags = &local_tags;
+
+ if (dev->param.read_chunk_tags_fn)
+ result =
+ dev->param.read_chunk_tags_fn(dev, realigned_chunk, buffer,
+ tags);
+ else
+ result = yaffs_tags_compat_rd(dev,
+ realigned_chunk, buffer, tags);
+ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ struct yaffs_block_info *bi;
+ bi = yaffs_get_block_info(dev,
+ nand_chunk /
+ dev->param.chunks_per_block);
+ yaffs_handle_chunk_error(dev, bi);
+ }
+
+ return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * buffer, struct yaffs_ext_tags *tags)
+{
+
+ dev->n_page_writes++;
+
+ nand_chunk -= dev->chunk_offset;
+
+ if (tags) {
+ tags->seq_number = dev->seq_number;
+ tags->chunk_used = 1;
+ if (!yaffs_validate_tags(tags)) {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing uninitialised tags");
+ YBUG();
+ }
+ yaffs_trace(YAFFS_TRACE_WRITE,
+ "Writing chunk %d tags %d %d",
+ nand_chunk, tags->obj_id, tags->chunk_id);
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+ YBUG();
+ }
+
+ if (dev->param.write_chunk_tags_fn)
+ return dev->param.write_chunk_tags_fn(dev, nand_chunk, buffer,
+ tags);
+ else
+ return yaffs_tags_compat_wr(dev, nand_chunk, buffer, tags);
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ block_no -= dev->block_offset;
+
+ if (dev->param.bad_block_fn)
+ return dev->param.bad_block_fn(dev, block_no);
+ else
+ return yaffs_tags_compat_mark_bad(dev, block_no);
+}
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number)
+{
+ block_no -= dev->block_offset;
+
+ if (dev->param.query_block_fn)
+ return dev->param.query_block_fn(dev, block_no, state,
+ seq_number);
+ else
+ return yaffs_tags_compat_query_block(dev, block_no,
+ state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block)
+{
+ int result;
+
+ flash_block -= dev->block_offset;
+
+ dev->n_erasures++;
+
+ result = dev->param.erase_fn(dev, flash_block);
+
+ return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+ if (dev->param.initialise_flash_fn)
+ return dev->param.initialise_flash_fn(dev);
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h
new file mode 100644
index 000000000000..543f1987124e
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,38 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 * buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c
new file mode 100644
index 000000000000..a77f0954fc13
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,53 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t)
+{
+ pt->chunk_id = t->chunk_id;
+ pt->serial_number = t->serial_number;
+ pt->n_bytes = t->n_bytes;
+ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unused_stuff = 0;
+ pt->should_be_ff = 0xFFFFFFFF;
+
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt)
+{
+ static const u8 all_ff[] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff
+ };
+
+ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+ t->block_bad = 0;
+ if (pt->should_be_ff != 0xFFFFFFFF)
+ t->block_bad = 1;
+ t->chunk_used = 1;
+ t->obj_id = pt->obj_id;
+ t->chunk_id = pt->chunk_id;
+ t->n_bytes = pt->n_bytes;
+ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ t->is_deleted = (pt->deleted) ? 0 : 1;
+ t->serial_number = pt->serial_number;
+ } else {
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+ }
+}
diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h
new file mode 100644
index 000000000000..d6861ff505e4
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unused_stuff:1;
+ unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c
new file mode 100644
index 000000000000..8e7fea3d2860
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,196 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_tagsvalidity.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG 0x80000000
+#define EXTRA_SHRINK_FLAG 0x40000000
+#define EXTRA_SHADOWS_FLAG 0x20000000
+#define EXTRA_SPARE_FLAGS 0x10000000
+
+#define ALL_EXTRA_FLAGS 0xF0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(const struct
+ yaffs_packed_tags2_tags_only *ptt)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "packed tags obj %d chunk %d byte %d seq %d",
+ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+ yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+ t->seq_number);
+
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+ const struct yaffs_ext_tags *t)
+{
+ ptt->chunk_id = t->chunk_id;
+ ptt->seq_number = t->seq_number;
+ ptt->n_bytes = t->n_bytes;
+ ptt->obj_id = t->obj_id;
+
+ if (t->chunk_id == 0 && t->extra_available) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunk_id */
+ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+ if (t->extra_is_shrink)
+ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+ if (t->extra_shadows)
+ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ ptt->n_bytes = t->extra_equiv_id;
+ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ ptt->n_bytes = t->extra_length;
+ else
+ ptt->n_bytes = 0;
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc)
+{
+ yaffs_pack_tags2_tags_only(&pt->t, t);
+
+ if (tags_ecc)
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *ptt)
+{
+
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+ yaffs_init_tags(t);
+
+ if (ptt->seq_number != 0xFFFFFFFF) {
+ t->block_bad = 0;
+ t->chunk_used = 1;
+ t->obj_id = ptt->obj_id;
+ t->chunk_id = ptt->chunk_id;
+ t->n_bytes = ptt->n_bytes;
+ t->is_deleted = 0;
+ t->serial_number = 0;
+ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+
+ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+ t->chunk_id = 0;
+ t->n_bytes = 0;
+
+ t->extra_available = 1;
+ t->extra_parent_id =
+ ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+ t->extra_is_shrink =
+ (ptt->chunk_id & EXTRA_SHRINK_FLAG) ? 1 : 0;
+ t->extra_shadows =
+ (ptt->chunk_id & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+ t->extra_obj_type =
+ ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ t->extra_equiv_id = ptt->n_bytes;
+ else
+ t->extra_length = ptt->n_bytes;
+ }
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc)
+{
+
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ if (pt->t.seq_number != 0xFFFFFFFF && tags_ecc) {
+ /* Chunk is in use and we need to do ECC */
+
+ struct yaffs_ecc_other ecc;
+ int result;
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &ecc);
+ result =
+ yaffs_ecc_correct_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &pt->ecc, &ecc);
+ switch (result) {
+ case 0:
+ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
+
+ yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+ t->ecc_result = ecc_result;
+
+ yaffs_dump_packed_tags2(pt);
+ yaffs_dump_tags2(t);
+}
diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h
new file mode 100644
index 000000000000..f3296697bc0c
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+ unsigned seq_number;
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+ struct yaffs_packed_tags2_tags_only t;
+ struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c
new file mode 100644
index 000000000000..7578075d9ac1
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,422 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+
+
+/********** Tags ECC calculations *********/
+
+void yaffs_calc_ecc(const u8 * data, struct yaffs_spare *spare)
+{
+ yaffs_ecc_cacl(data, spare->ecc1);
+ yaffs_ecc_cacl(&data[256], spare->ecc2);
+}
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+ /* Calculate an ecc */
+
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+
+ tags->ecc = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+ if (b[i] & j)
+ ecc ^= bit;
+ }
+ }
+
+ tags->ecc = ecc;
+
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+ unsigned ecc = tags->ecc;
+
+ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+ /* Wierd ecc failure value */
+ /* TODO Need to do somethiong here */
+ return -1; /* unrecovered error */
+ }
+
+ return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+ yaffs_calc_tags_ecc(tags_ptr);
+
+ spare_ptr->tb0 = tu->as_bytes[0];
+ spare_ptr->tb1 = tu->as_bytes[1];
+ spare_ptr->tb2 = tu->as_bytes[2];
+ spare_ptr->tb3 = tu->as_bytes[3];
+ spare_ptr->tb4 = tu->as_bytes[4];
+ spare_ptr->tb5 = tu->as_bytes[5];
+ spare_ptr->tb6 = tu->as_bytes[6];
+ spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+ struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+ int result;
+
+ tu->as_bytes[0] = spare_ptr->tb0;
+ tu->as_bytes[1] = spare_ptr->tb1;
+ tu->as_bytes[2] = spare_ptr->tb2;
+ tu->as_bytes[3] = spare_ptr->tb3;
+ tu->as_bytes[4] = spare_ptr->tb4;
+ tu->as_bytes[5] = spare_ptr->tb5;
+ tu->as_bytes[6] = spare_ptr->tb6;
+ tu->as_bytes[7] = spare_ptr->tb7;
+
+ result = yaffs_check_tags_ecc(tags_ptr);
+ if (result > 0)
+ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+ dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+ memset(spare, 0xFF, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+ int nand_chunk, const u8 * data,
+ struct yaffs_spare *spare)
+{
+ if (nand_chunk < dev->param.start_block * dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d is not valid",
+ nand_chunk);
+ return YAFFS_FAIL;
+ }
+
+ return dev->param.write_chunk_fn(dev, nand_chunk, data, spare);
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data,
+ struct yaffs_spare *spare,
+ enum yaffs_ecc_result *ecc_result,
+ int correct_errors)
+{
+ int ret_val;
+ struct yaffs_spare local_spare;
+
+ if (!spare && data) {
+ /* If we don't have a real spare, then we use a local one. */
+ /* Need this for the calculation of the ecc */
+ spare = &local_spare;
+ }
+
+ if (!dev->param.use_nand_ecc) {
+ ret_val =
+ dev->param.read_chunk_fn(dev, nand_chunk, data, spare);
+ if (data && correct_errors) {
+ /* Do ECC correction */
+ /* Todo handle any errors */
+ int ecc_result1, ecc_result2;
+ u8 calc_ecc[3];
+
+ yaffs_ecc_cacl(data, calc_ecc);
+ ecc_result1 =
+ yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+ yaffs_ecc_cacl(&data[256], calc_ecc);
+ ecc_result2 =
+ yaffs_ecc_correct(&data[256], spare->ecc2,
+ calc_ecc);
+
+ if (ecc_result1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (ecc_result1 < 0 || ecc_result2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (ecc_result1 > 0 || ecc_result2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+ } else {
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+ struct yaffs_nand_spare nspare;
+
+ memset(&nspare, 0, sizeof(nspare));
+
+ ret_val = dev->param.read_chunk_fn(dev, nand_chunk, data,
+ (struct yaffs_spare *)
+ &nspare);
+ memcpy(spare, &nspare, sizeof(struct yaffs_spare));
+ if (data && correct_errors) {
+ if (nspare.eccres1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ } else if (nspare.eccres1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ }
+
+ if (nspare.eccres2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ } else if (nspare.eccres2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ }
+
+ if (nspare.eccres1 || nspare.eccres2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ }
+ }
+ return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+ yaffs_get_block_info(dev,
+ flash_block + dev->block_offset)->needs_retiring =
+ 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>>Block %d marked for retirement",
+ flash_block);
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+ * then retire the block
+ * NB recursion
+ */
+}
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data, const struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+
+ yaffs_spare_init(&spare);
+
+ if (ext_tags->is_deleted)
+ spare.page_status = 0;
+ else {
+ tags.obj_id = ext_tags->obj_id;
+ tags.chunk_id = ext_tags->chunk_id;
+
+ tags.n_bytes_lsb = ext_tags->n_bytes & 0x3ff;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+ else
+ tags.n_bytes_msb = 3;
+
+ tags.serial_number = ext_tags->serial_number;
+
+ if (!dev->param.use_nand_ecc && data)
+ yaffs_calc_ecc(data, &spare);
+
+ yaffs_load_tags_to_spare(&spare, &tags);
+
+ }
+
+ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *ext_tags)
+{
+
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+
+ static struct yaffs_spare spare_ff;
+ static int init;
+
+ if (!init) {
+ memset(&spare_ff, 0xFF, sizeof(spare_ff));
+ init = 1;
+ }
+
+ if (yaffs_rd_chunk_nand(dev, nand_chunk, data, &spare, &ecc_result, 1)) {
+ /* ext_tags may be NULL */
+ if (ext_tags) {
+
+ int deleted =
+ (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+ ext_tags->is_deleted = deleted;
+ ext_tags->ecc_result = ecc_result;
+ ext_tags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+ ext_tags->chunk_used =
+ (memcmp(&spare_ff, &spare, sizeof(spare_ff)) !=
+ 0) ? 1 : 0;
+
+ if (ext_tags->chunk_used) {
+ yaffs_get_tags_from_spare(dev, &spare, &tags);
+
+ ext_tags->obj_id = tags.obj_id;
+ ext_tags->chunk_id = tags.chunk_id;
+ ext_tags->n_bytes = tags.n_bytes_lsb;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ ext_tags->n_bytes |=
+ (((unsigned)tags.
+ n_bytes_msb) << 10);
+
+ ext_tags->serial_number = tags.serial_number;
+ }
+ }
+
+ return YAFFS_OK;
+ } else {
+ return YAFFS_FAIL;
+ }
+}
+
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+
+ struct yaffs_spare spare;
+
+ memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+ spare.block_status = 'Y';
+
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number)
+{
+
+ struct yaffs_spare spare0, spare1;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ enum yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spare_ff, 0xFF, sizeof(spare_ff));
+ init = 1;
+ }
+
+ *seq_number = 0;
+
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, NULL,
+ &spare0, &dummy, 1);
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+ NULL, &spare1, &dummy, 1);
+
+ if (hweight8(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ else
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h
new file mode 100644
index 000000000000..8cd35dcd3cab
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,36 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+#include "yaffs_guts.h"
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number);
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+int yaffs_count_bits(u8 byte);
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagsvalidity.c b/fs/yaffs2/yaffs_tagsvalidity.c
new file mode 100644
index 000000000000..4358d79d4bec
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsvalidity.c
@@ -0,0 +1,27 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_tagsvalidity.h"
+
+void yaffs_init_tags(struct yaffs_ext_tags *tags)
+{
+ memset(tags, 0, sizeof(struct yaffs_ext_tags));
+ tags->validity0 = 0xAAAAAAAA;
+ tags->validity1 = 0x55555555;
+}
+
+int yaffs_validate_tags(struct yaffs_ext_tags *tags)
+{
+ return (tags->validity0 == 0xAAAAAAAA && tags->validity1 == 0x55555555);
+
+}
diff --git a/fs/yaffs2/yaffs_tagsvalidity.h b/fs/yaffs2/yaffs_tagsvalidity.h
new file mode 100644
index 000000000000..36a021fc8fa8
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsvalidity.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGS_VALIDITY_H__
+#define __YAFFS_TAGS_VALIDITY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_tags(struct yaffs_ext_tags *tags);
+int yaffs_validate_tags(struct yaffs_ext_tags *tags);
+#endif
diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h
new file mode 100644
index 000000000000..6273dbf9f63f
--- /dev/null
+++ b/fs/yaffs2/yaffs_trace.h
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS 0x00000002
+#define YAFFS_TRACE_ALLOCATE 0x00000004
+#define YAFFS_TRACE_SCAN 0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+#define YAFFS_TRACE_ERASE 0x00000020
+#define YAFFS_TRACE_GC 0x00000040
+#define YAFFS_TRACE_WRITE 0x00000080
+#define YAFFS_TRACE_TRACING 0x00000100
+#define YAFFS_TRACE_DELETION 0x00000200
+#define YAFFS_TRACE_BUFFERS 0x00000400
+#define YAFFS_TRACE_NANDACCESS 0x00000800
+#define YAFFS_TRACE_GC_DETAIL 0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+#define YAFFS_TRACE_MTD 0x00004000
+#define YAFFS_TRACE_CHECKPOINT 0x00008000
+
+#define YAFFS_TRACE_VERIFY 0x00010000
+#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+#define YAFFS_TRACE_VERIFY_ALL 0x000F0000
+
+#define YAFFS_TRACE_SYNC 0x00100000
+#define YAFFS_TRACE_BACKGROUND 0x00200000
+#define YAFFS_TRACE_LOCK 0x00400000
+#define YAFFS_TRACE_MOUNT 0x00800000
+
+#define YAFFS_TRACE_ERROR 0x40000000
+#define YAFFS_TRACE_BUG 0x80000000
+#define YAFFS_TRACE_ALWAYS 0xF0000000
+
+#endif
diff --git a/fs/yaffs2/yaffs_verify.c b/fs/yaffs2/yaffs_verify.c
new file mode 100644
index 000000000000..738c7f69a5ec
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.c
@@ -0,0 +1,535 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask &
+ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char *block_state_name[] = {
+ "Unknown",
+ "Needs scanning",
+ "Scanning",
+ "Empty",
+ "Allocating",
+ "Full",
+ "Dirty",
+ "Checkpoint",
+ "Collecting",
+ "Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+ int actually_used;
+ int in_use;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Report illegal runtime states */
+ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has undefined state %d",
+ n, bi->block_state);
+
+ switch (bi->block_state) {
+ case YAFFS_BLOCK_STATE_UNKNOWN:
+ case YAFFS_BLOCK_STATE_SCANNING:
+ case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has bad run-state %s",
+ n, block_state_name[bi->block_state]);
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->pages_in_use < 0
+ || bi->pages_in_use > dev->param.chunks_per_block
+ || bi->soft_del_pages < 0
+ || bi->soft_del_pages > dev->param.chunks_per_block
+ || actually_used < 0 || actually_used > dev->param.chunks_per_block)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
+ n, bi->pages_in_use, bi->soft_del_pages);
+
+ /* Check chunk bitmap legal */
+ in_use = yaffs_count_chunk_bits(dev, n);
+ if (in_use != bi->pages_in_use)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+ n, bi->pages_in_use, in_use);
+
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n)
+{
+ yaffs_verify_blk(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+
+ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Block %d is in state %d after gc, should be erased",
+ n, bi->block_state);
+ }
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+ int i;
+ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int illegal_states = 0;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ memset(state_count, 0, sizeof(state_count));
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ yaffs_verify_blk(dev, bi, i);
+
+ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+ state_count[bi->block_state]++;
+ else
+ illegal_states++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
+
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%d blocks have illegal states",
+ illegal_states);
+ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many allocating blocks");
+
+ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%s %d blocks",
+ block_state_name[i], state_count[i]);
+
+ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Checkpoint block count wrong dev %d count %d",
+ dev->blocks_in_checkpt,
+ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Erased block count wrong dev %d count %d",
+ dev->n_erased_blocks,
+ state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many collecting blocks %d (max is 1)",
+ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
+ * case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!(tags && obj && oh)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Verifying object header tags %p obj %p oh %p",
+ tags, obj, oh);
+ return;
+ }
+
+ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+ oh->type > YAFFS_OBJECT_TYPE_MAX)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header type is illegal value 0x%x",
+ tags->obj_id, oh->type);
+
+ if (tags->obj_id != obj->obj_id)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch obj_id %d",
+ tags->obj_id, obj->obj_id);
+
+ /*
+ * Check that the object's parent ids match if parent_check requested.
+ *
+ * Tests do not apply to the root object.
+ */
+
+ if (parent_check && tags->obj_id > 1 && !obj->parent)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d obj->parent is NULL",
+ tags->obj_id, oh->parent_obj_id);
+
+ if (parent_check && obj->parent &&
+ oh->parent_obj_id != obj->parent->obj_id &&
+ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d parent_obj_id %d",
+ tags->obj_id, oh->parent_obj_id,
+ obj->parent->obj_id);
+
+ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is NULL",
+ obj->obj_id);
+
+ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Trashed name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is 0xFF",
+ obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+ int required_depth;
+ int actual_depth;
+ u32 last_chunk;
+ u32 x;
+ u32 i;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ struct yaffs_tnode *tn;
+ u32 obj_id;
+
+ if (!obj)
+ return;
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ dev = obj->my_dev;
+ obj_id = obj->obj_id;
+
+ /* Check file size is consistent with tnode depth */
+ last_chunk =
+ obj->variant.file_variant.file_size / dev->data_bytes_per_chunk + 1;
+ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ actual_depth = obj->variant.file_variant.top_level;
+
+ /* Check that the chunks in the tnode tree are all correct.
+ * We do this by scanning through the tnode tree and
+ * checking the tags for every chunk match.
+ */
+
+ if (yaffs_skip_nand_verification(dev))
+ return;
+
+ for (i = 1; i <= last_chunk; i++) {
+ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+ if (tn) {
+ u32 the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk > 0) {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ &tags);
+ if (tags.obj_id != obj_id || tags.chunk_id != i)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+ obj_id, i, the_chunk,
+ tags.obj_id, tags.chunk_id);
+ }
+ }
+ }
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ u32 chunk_min;
+ u32 chunk_max;
+
+ u32 chunk_id_ok;
+ u32 chunk_in_range;
+ u32 chunk_wrongly_deleted;
+ u32 chunk_valid;
+
+ if (!obj)
+ return;
+
+ if (obj->being_created)
+ return;
+
+ dev = obj->my_dev;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Check sane object header chunk */
+
+ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+ chunk_max =
+ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+ chunk_valid = chunk_in_range &&
+ yaffs_check_chunk_bit(dev,
+ obj->hdr_chunk / dev->param.chunks_per_block,
+ obj->hdr_chunk % dev->param.chunks_per_block);
+ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has chunk_id %d %s %s",
+ obj->obj_id, obj->hdr_chunk,
+ chunk_id_ok ? "" : ",out of range",
+ chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj_hdr *oh;
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+ yaffs_verify_oh(obj, oh, &tags, 1);
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+ }
+
+ /* Verify it has a parent */
+ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has parent pointer %p which does not look like an object",
+ obj->obj_id, obj->parent);
+ }
+
+ /* Verify parent is a directory */
+ if (obj->parent
+ && obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d's parent is not a directory (type %d)",
+ obj->obj_id, obj->parent->variant_type);
+ }
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_verify_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_verify_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_verify_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_verify_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_verify_special(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has illegaltype %d",
+ obj->obj_id, obj->variant_type);
+ break;
+ }
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int i;
+ struct list_head *lh;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ yaffs_verify_obj(obj);
+ }
+ }
+ }
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ int count = 0;
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+ YBUG();
+ return;
+ }
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!obj->parent) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent" );
+ YBUG();
+ return;
+ }
+
+ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+ YBUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+ if (lh) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ yaffs_verify_obj(list_obj);
+ if (obj == list_obj)
+ count++;
+ }
+ }
+
+ if (count != 1) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory %d times",
+ count);
+ YBUG();
+ }
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ if (!directory) {
+ YBUG();
+ return;
+ }
+
+ if (yaffs_skip_full_verification(directory->my_dev))
+ return;
+
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Directory has wrong type: %d",
+ directory->variant_type);
+ YBUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &directory->variant.dir_variant.children) {
+ if (lh) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (list_obj->parent != directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory list has wrong parent %p",
+ list_obj->parent);
+ YBUG();
+ }
+ yaffs_verify_obj_in_dir(list_obj);
+ }
+ }
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+ int counted;
+ int difference;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ counted = yaffs_count_free_chunks(dev);
+
+ difference = dev->n_free_chunks - counted;
+
+ if (difference) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Freechunks verification failure %d %d %d",
+ dev->n_free_chunks, counted, difference);
+ yaffs_free_verification_failures++;
+ }
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+ in = in;
+ return YAFFS_OK;
+}
+
diff --git a/fs/yaffs2/yaffs_verify.h b/fs/yaffs2/yaffs_verify.h
new file mode 100644
index 000000000000..cc6f88999305
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.h
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+ int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
new file mode 100644
index 000000000000..d95875fe1e57
--- /dev/null
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -0,0 +1,2790 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
+ * this superblock
+ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this
+ * superblock
+ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
+ */
+
+/*
+ * NB There are two variants of Linux VFS glue code. This variant supports
+ * a single version and should not include any multi-version code.
+ */
+#include <linux/version.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/namei.h>
+#include <linux/exportfs.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+
+#include <asm/div64.h>
+
+#include <linux/statfs.h>
+
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+
+#define YPROC_ROOT NULL
+
+#define Y_INIT_TIMER(a) init_timer_on_stack(a)
+
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) ((mtd)->writesize)
+
+static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+{
+ uint64_t result = partition_size;
+ do_div(result, block_size);
+ return (uint32_t) result;
+}
+
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#include "yaffs_linux.h"
+
+#include "yaffs_mtdif.h"
+#include "yaffs_mtdif1.h"
+#include "yaffs_mtdif2.h"
+
+unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS;
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+unsigned int yaffs_auto_checkpoint = 1;
+unsigned int yaffs_gc_control = 1;
+unsigned int yaffs_bg_enable = 1;
+
+/* Module Parameters */
+module_param(yaffs_trace_mask, uint, 0644);
+module_param(yaffs_wr_attempts, uint, 0644);
+module_param(yaffs_auto_checkpoint, uint, 0644);
+module_param(yaffs_gc_control, uint, 0644);
+module_param(yaffs_bg_enable, uint, 0644);
+
+
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
+#define yaffs_inode_to_obj(iptr) ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
+#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info)
+
+#define update_dir_time(dir) do {\
+ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
+ } while(0)
+
+
+static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
+{
+ return yaffs_gc_control;
+}
+
+static void yaffs_gross_lock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
+ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
+}
+
+static void yaffs_gross_unlock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
+ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
+}
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj);
+
+static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ yaffs_gross_unlock(dev);
+
+ unlock_new_inode(inode);
+ return inode;
+}
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+ struct yaffs_obj *obj)
+{
+ struct inode *inode;
+
+ if (!sb) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL super_block!!");
+ return NULL;
+
+ }
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL object!!");
+ return NULL;
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for object %d",
+ obj->obj_id);
+
+ inode = yaffs_iget(sb, obj->obj_id);
+ if (IS_ERR(inode))
+ return NULL;
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+ /* NB You can't be holding gross_lock or deadlock will happen! */
+
+ return inode;
+}
+
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+{
+ struct inode *inode;
+
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_dev *dev;
+
+ struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
+
+ int error = -ENOSPC;
+ uid_t uid = current->cred->fsuid;
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? dir->i_gid : current->cred->fsgid;
+
+ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: parent object %d type %d",
+ parent->obj_id, parent->variant_type);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: could not get parent object");
+ return -EPERM;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: making oject for %s, mode %x dev %x",
+ dentry->d_name.name, mode, rdev);
+
+ dev = parent->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, old_encode_dev(rdev));
+ break;
+ case S_IFREG: /* file */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
+ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
+ gid);
+ break;
+ case S_IFDIR: /* directory */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
+ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
+ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+
+ /* Can not call yaffs_get_inode() with gross lock held */
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod created object %d count = %d",
+ obj->obj_id, atomic_read(&inode->i_count));
+ error = 0;
+ yaffs_fill_inode_from_obj(dir, parent);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
+ error = -ENOMEM;
+ }
+
+ return error;
+}
+
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ return yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+}
+
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+{
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *link = NULL;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+ link =
+ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ obj);
+
+ if (link) {
+ old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj);
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_link link count %d i_count %d",
+ old_dentry->d_inode->i_nlink,
+ atomic_read(&old_dentry->d_inode->i_count));
+ }
+
+ yaffs_gross_unlock(dev);
+
+ if (link) {
+ update_dir_time(dir);
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ uid_t uid = current->cred->fsuid;
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? dir->i_gid : current->cred->fsgid;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
+
+ dev = yaffs_inode_to_obj(dir)->my_dev;
+ yaffs_gross_lock(dev);
+ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
+ return 0;
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
+ }
+
+ return -ENOMEM;
+}
+
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n)
+{
+ struct yaffs_obj *obj;
+ struct inode *inode = NULL;
+
+ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup for %d:%s",
+ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
+
+ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
+
+ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
+
+ /* Can't hold gross lock when calling yaffs_get_inode() */
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup found %d", obj->obj_id);
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+
+ if (inode) {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_loookup dentry");
+ d_add(dentry, inode);
+ /* return dentry; */
+ return NULL;
+ }
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
+
+ }
+
+ d_add(dentry, inode);
+
+ return NULL;
+}
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int ret_val;
+
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_unlink %d:%s",
+ (int)(dir->i_ino), dentry->d_name.name);
+ obj = yaffs_inode_to_obj(dir);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ ret_val = yaffs_unlinker(obj, dentry->d_name.name);
+
+ if (ret_val == YAFFS_OK) {
+ dentry->d_inode->i_nlink--;
+ dir->i_version++;
+ yaffs_gross_unlock(dev);
+ mark_inode_dirty(dentry->d_inode);
+ update_dir_time(dir);
+ return 0;
+ }
+ yaffs_gross_unlock(dev);
+ return -ENOTEMPTY;
+}
+
+static int yaffs_sync_object(struct file *file, int datasync)
+{
+
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct dentry *dentry = file->f_path.dentry;
+
+ obj = yaffs_dentry_to_obj(dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, "yaffs_sync_object");
+ yaffs_gross_lock(dev);
+ yaffs_flush_file(obj, 1, datasync);
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct yaffs_dev *dev;
+ int ret_val = YAFFS_FAIL;
+ struct yaffs_obj *target;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
+ dev = yaffs_inode_to_obj(old_dir)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+
+ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
+ !list_empty(&target->variant.dir_variant.children)) {
+
+ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
+
+ ret_val = YAFFS_FAIL;
+ } else {
+ /* Now does unlinking internally using shadowing mechanism */
+ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
+
+ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
+ old_dentry->d_name.name,
+ yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+ }
+ yaffs_gross_unlock(dev);
+
+ if (ret_val == YAFFS_OK) {
+ if (target) {
+ new_dentry->d_inode->i_nlink--;
+ mark_inode_dirty(new_dentry->d_inode);
+ }
+
+ update_dir_time(old_dir);
+ if (old_dir != new_dir)
+ update_dir_time(new_dir);
+ return 0;
+ } else {
+ return -ENOTEMPTY;
+ }
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_setattr of object %d",
+ yaffs_inode_to_obj(inode)->obj_id);
+
+ /* Fail if a requested resize >= 2GB */
+ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
+ error = -EINVAL;
+
+ if (error == 0)
+ error = inode_change_ok(inode, attr);
+ if (error == 0) {
+ int result;
+ if (!error) {
+ setattr_copy(inode, attr);
+ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
+ if (attr->ia_valid & ATTR_SIZE) {
+ truncate_setsize(inode, attr->ia_size);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+ }
+ }
+ dev = yaffs_inode_to_obj(inode)->my_dev;
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_trace(YAFFS_TRACE_OS, "resize to %d(%x)",
+ (int)(attr->ia_size),
+ (int)(attr->ia_size));
+ }
+ yaffs_gross_lock(dev);
+ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
+ if (result == YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+ }
+ yaffs_gross_unlock(dev);
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
+
+ return error;
+}
+
+#ifdef CONFIG_YAFFS_XATTR
+static int yaffs_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_set_xattrib(obj, name, value, size, flags);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name, void *buff,
+ size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_getxattr \"%s\" from object %d",
+ name, obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_get_xattrib(obj, name, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_remove_xattrib(obj, name);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_list_xattrib(obj, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr done returning %d", error);
+
+ return error;
+}
+
+#endif
+
+static const struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+ .unlink = yaffs_unlink,
+ .symlink = yaffs_symlink,
+ .mkdir = yaffs_mkdir,
+ .rmdir = yaffs_unlink,
+ .mknod = yaffs_mknod,
+ .rename = yaffs_rename,
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+/*-----------------------------------------------------------------*/
+/* Directory search context allows us to unlock access to yaffs during
+ * filldir without causing problems with the directory being modified.
+ * This is similar to the tried and tested mechanism used in yaffs direct.
+ *
+ * A search context iterates along a doubly linked list of siblings in the
+ * directory. If the iterating object is deleted then this would corrupt
+ * the list iteration, likely causing a crash. The search context avoids
+ * this by using the remove_obj_fn to move the search context to the
+ * next object before the object is deleted.
+ *
+ * Many readdirs (and thus seach conexts) may be alive simulateously so
+ * each struct yaffs_dev has a list of these.
+ *
+ * A seach context lives for the duration of a readdir.
+ *
+ * All these functions must be called while yaffs is locked.
+ */
+
+struct yaffs_search_context {
+ struct yaffs_dev *dev;
+ struct yaffs_obj *dir_obj;
+ struct yaffs_obj *next_return;
+ struct list_head others;
+};
+
+/*
+ * yaffs_new_search() creates a new search context, initialises it and
+ * adds it to the device's search context list.
+ *
+ * Called at start of readdir.
+ */
+static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
+{
+ struct yaffs_dev *dev = dir->my_dev;
+ struct yaffs_search_context *sc =
+ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
+ if (sc) {
+ sc->dir_obj = dir;
+ sc->dev = dev;
+ if (list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else
+ sc->next_return =
+ list_entry(dir->variant.dir_variant.children.next,
+ struct yaffs_obj, siblings);
+ INIT_LIST_HEAD(&sc->others);
+ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
+ }
+ return sc;
+}
+
+/*
+ * yaffs_search_end() disposes of a search context and cleans up.
+ */
+static void yaffs_search_end(struct yaffs_search_context *sc)
+{
+ if (sc) {
+ list_del(&sc->others);
+ kfree(sc);
+ }
+}
+
+/*
+ * yaffs_search_advance() moves a search context to the next object.
+ * Called when the search iterates or when an object removal causes
+ * the search context to be moved to the next object.
+ */
+static void yaffs_search_advance(struct yaffs_search_context *sc)
+{
+ if (!sc)
+ return;
+
+ if (sc->next_return == NULL ||
+ list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else {
+ struct list_head *next = sc->next_return->siblings.next;
+
+ if (next == &sc->dir_obj->variant.dir_variant.children)
+ sc->next_return = NULL; /* end of list */
+ else
+ sc->next_return =
+ list_entry(next, struct yaffs_obj, siblings);
+ }
+}
+
+/*
+ * yaffs_remove_obj_callback() is called when an object is unlinked.
+ * We check open search contexts and advance any which are currently
+ * on the object being iterated.
+ */
+static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
+{
+
+ struct list_head *i;
+ struct yaffs_search_context *sc;
+ struct list_head *search_contexts =
+ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
+
+ /* Iterate through the directory search contexts.
+ * If any are currently on the object being removed, then advance
+ * the search context to the next object to prevent a hanging pointer.
+ */
+ list_for_each(i, search_contexts) {
+ if (i) {
+ sc = list_entry(i, struct yaffs_search_context, others);
+ if (sc->next_return == obj)
+ yaffs_search_advance(sc);
+ }
+ }
+
+}
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ struct inode *inode = f->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ offset = f->f_pos;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: starting at %d", (int)offset);
+
+ if (offset == 0) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry . ino %d",
+ (int)inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry .. ino %d",
+ (int)f->f_dentry->d_parent->d_inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, "..", 2, offset,
+ f->f_dentry->d_parent->d_inode->i_ino,
+ DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+ if (f->f_version != inode->i_version) {
+ offset = 2;
+ f->f_pos = offset;
+ f->f_version = inode->i_version;
+ }
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= offset) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (filldir(dirent,
+ name,
+ strlen(name),
+ offset, this_inode, this_type) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ offset++;
+ f->f_pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+
+static const struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = yaffs_readdir,
+ .fsync = yaffs_sync_object,
+ .llseek = generic_file_llseek,
+};
+
+
+
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+{
+ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
+
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_flush object %d (%s)",
+ obj->obj_id, obj->dirty ? "dirty" : "clean");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_file(obj, 1, 0);
+
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
+ .llseek = generic_file_llseek,
+};
+
+
+/* ExportFS support */
+static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
+ uint32_t generation)
+{
+ return yaffs_iget(sb, ino);
+}
+
+static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+struct dentry *yaffs2_get_parent(struct dentry *dentry)
+{
+
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct dentry *parent = ERR_PTR(-ENOENT);
+ struct inode *inode;
+ unsigned long parent_ino;
+ struct yaffs_obj *d_obj;
+ struct yaffs_obj *parent_obj;
+
+ d_obj = yaffs_inode_to_obj(dentry->d_inode);
+
+ if (d_obj) {
+ parent_obj = d_obj->parent;
+ if (parent_obj) {
+ parent_ino = yaffs_get_obj_inode(parent_obj);
+ inode = yaffs_iget(sb, parent_ino);
+
+ if (IS_ERR(inode)) {
+ parent = ERR_CAST(inode);
+ } else {
+ parent = d_obtain_alias(inode);
+ if (!IS_ERR(parent)) {
+ parent = ERR_PTR(-ENOMEM);
+ iput(inode);
+ }
+ }
+ }
+ }
+
+ return parent;
+}
+
+/* Just declare a zero structure as a NULL value implies
+ * using the default functions of exportfs.
+ */
+
+static struct export_operations yaffs_export_ops = {
+ .fh_to_dentry = yaffs2_fh_to_dentry,
+ .fh_to_parent = yaffs2_fh_to_parent,
+ .get_parent = yaffs2_get_parent,
+};
+
+
+/*-----------------------------------------------------------------*/
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+ int buflen)
+{
+ unsigned char *alias;
+ int ret;
+
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+
+ yaffs_gross_unlock(dev);
+
+ if (!alias)
+ return -ENOMEM;
+
+ ret = vfs_readlink(dentry, buffer, buflen, alias);
+ kfree(alias);
+ return ret;
+}
+
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ unsigned char *alias;
+ void *ret;
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+ yaffs_gross_unlock(dev);
+
+ if (!alias) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ nd_set_link(nd, alias);
+ ret = (void *)alias;
+out:
+ return ret;
+}
+
+void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
+{
+ kfree(alias);
+}
+
+
+static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
+{
+ /* Clear the association between the inode and
+ * the struct yaffs_obj.
+ */
+ obj->my_inode = NULL;
+ yaffs_inode_to_obj_lv(inode) = NULL;
+
+ /* If the object freeing was deferred, then the real
+ * free happens now.
+ * This should fix the inode inconsistency problem.
+ */
+ yaffs_handle_defered_free(obj);
+}
+
+/* yaffs_evict_inode combines into one operation what was previously done in
+ * yaffs_clear_inode() and yaffs_delete_inode()
+ *
+ */
+static void yaffs_evict_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ int deleteme = 0;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_evict_inode: ino %d, count %d %s",
+ (int)inode->i_ino,
+ atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (!inode->i_nlink && !is_bad_inode(inode))
+ deleteme = 1;
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+
+ if (deleteme && obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+
+}
+
+static void yaffs_touch_super(struct yaffs_dev *dev)
+{
+ struct super_block *sb = yaffs_dev_to_lc(dev)->super;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_touch_super() sb = %p", sb);
+ if (sb)
+ sb->s_dirt = 1;
+}
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+ /* Lifted from jffs2 */
+
+ struct yaffs_obj *obj;
+ unsigned char *pg_buf;
+ int ret;
+
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readpage_nolock at %08x, size %08x",
+ (unsigned)(pg->index << PAGE_CACHE_SHIFT),
+ (unsigned)PAGE_CACHE_SIZE);
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ BUG_ON(!PageLocked(pg));
+
+ pg_buf = kmap(pg);
+ /* FIXME: Can kmap fail? */
+
+ yaffs_gross_lock(dev);
+
+ ret = yaffs_file_rd(obj, pg_buf,
+ pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+
+ yaffs_gross_unlock(dev);
+
+ if (ret >= 0)
+ ret = 0;
+
+ if (ret) {
+ ClearPageUptodate(pg);
+ SetPageError(pg);
+ } else {
+ SetPageUptodate(pg);
+ ClearPageError(pg);
+ }
+
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
+ return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+ int ret = yaffs_readpage_nolock(f, pg);
+ UnlockPage(pg);
+ return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+ int ret;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
+ ret = yaffs_readpage_unlock(f, pg);
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
+ return ret;
+}
+
+/* writepage inspired by/stolen from smbfs */
+
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct yaffs_dev *dev;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ unsigned long end_index;
+ char *buffer;
+ struct yaffs_obj *obj;
+ int n_written = 0;
+ unsigned n_bytes;
+ loff_t i_size;
+
+ if (!mapping)
+ BUG();
+ inode = mapping->host;
+ if (!inode)
+ BUG();
+ i_size = i_size_read(inode);
+
+ end_index = i_size >> PAGE_CACHE_SHIFT;
+
+ if (page->index < end_index)
+ n_bytes = PAGE_CACHE_SIZE;
+ else {
+ n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
+
+ if (page->index > end_index || !n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %08x, inode size = %08x!!!",
+ (unsigned)(page->index << PAGE_CACHE_SHIFT),
+ (unsigned)inode->i_size);
+ yaffs_trace(YAFFS_TRACE_OS,
+ " -> don't care!!");
+
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ return 0;
+ }
+ }
+
+ if (n_bytes != PAGE_CACHE_SIZE)
+ zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
+
+ get_page(page);
+
+ buffer = kmap(page);
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %08x, size %08x",
+ (unsigned)(page->index << PAGE_CACHE_SHIFT), n_bytes);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag0: obj = %05x, ino = %05x",
+ (int)obj->variant.file_variant.file_size, (int)inode->i_size);
+
+ n_written = yaffs_wr_file(obj, buffer,
+ page->index << PAGE_CACHE_SHIFT, n_bytes, 0);
+
+ yaffs_touch_super(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag1: obj = %05x, ino = %05x",
+ (int)obj->variant.file_variant.file_size, (int)inode->i_size);
+
+ yaffs_gross_unlock(dev);
+
+ kunmap(page);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ put_page(page);
+
+ return (n_written == n_bytes) ? 0 : -ENOSPC;
+}
+
+/* Space holding and freeing is done to ensure we have space available for
+ * write_begin/end.
+ * For now we just assume few parallel writes and check against a small
+ * number.
+ * Todo: need to do this with a counter to handle parallel reads better.
+ */
+
+static ssize_t yaffs_hold_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ int n_free_chunks;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ n_free_chunks = yaffs_get_n_free_chunks(dev);
+
+ yaffs_gross_unlock(dev);
+
+ return (n_free_chunks > 20) ? 1 : 0;
+}
+
+static void yaffs_release_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_gross_unlock(dev);
+}
+
+static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct page *pg = NULL;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+ int ret = 0;
+ int space_held = 0;
+
+ /* Get a page */
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+
+ *pagep = pg;
+ if (!pg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "start yaffs_write_begin index %d(%x) uptodate %d",
+ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
+
+ /* Get fs space */
+ space_held = yaffs_hold_space(filp);
+
+ if (!space_held) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Update page if required */
+
+ if (!Page_Uptodate(pg))
+ ret = yaffs_readpage_nolock(filp, pg);
+
+ if (ret)
+ goto out;
+
+ /* Happy path return */
+ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
+
+ return 0;
+
+out:
+ yaffs_trace(YAFFS_TRACE_OS,
+ "end yaffs_write_begin fail returning %d", ret);
+ if (space_held)
+ yaffs_release_space(filp);
+ if (pg) {
+ unlock_page(pg);
+ page_cache_release(pg);
+ }
+ return ret;
+}
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+ loff_t * pos)
+{
+ struct yaffs_obj *obj;
+ int n_written, ipos;
+ struct inode *inode;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ inode = f->f_dentry->d_inode;
+
+ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+ ipos = inode->i_size;
+ else
+ ipos = *pos;
+
+ if (!obj)
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: hey obj is null!");
+ else
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %d(%x)",
+ (unsigned)n, (unsigned)n, obj->obj_id, ipos, ipos);
+
+ n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
+
+ yaffs_touch_super(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: %d(%x) bytes written",
+ (unsigned)n, (unsigned)n);
+
+ if (n_written > 0) {
+ ipos += n_written;
+ *pos = ipos;
+ if (ipos > inode->i_size) {
+ inode->i_size = ipos;
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write size updated to %d bytes, %d blocks",
+ ipos, (int)(inode->i_blocks));
+ }
+
+ }
+ yaffs_gross_unlock(dev);
+ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
+}
+
+static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *pg, void *fsdadata)
+{
+ int ret = 0;
+ void *addr, *kva;
+ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+
+ kva = kmap(pg);
+ addr = kva + offset_into_page;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end addr %p pos %x n_bytes %d",
+ addr, (unsigned)pos, copied);
+
+ ret = yaffs_file_write(filp, addr, copied, &pos);
+
+ if (ret != copied) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end not same size ret %d copied %d",
+ ret, copied);
+ SetPageError(pg);
+ }
+
+ kunmap(pg);
+
+ yaffs_release_space(filp);
+ unlock_page(pg);
+ page_cache_release(pg);
+ return ret;
+}
+
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+ struct super_block *sb = dentry->d_sb;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
+
+ yaffs_gross_lock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+
+ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
+ /* Do this if chunk size is not a power of 2 */
+
+ uint64_t bytes_in_dev;
+ uint64_t bytes_free;
+
+ bytes_in_dev =
+ ((uint64_t)
+ ((dev->param.end_block - dev->param.start_block +
+ 1))) * ((uint64_t) (dev->param.chunks_per_block *
+ dev->data_bytes_per_chunk));
+
+ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */
+ buf->f_blocks = bytes_in_dev;
+
+ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
+ ((uint64_t) (dev->data_bytes_per_chunk));
+
+ do_div(bytes_free, sb->s_blocksize);
+
+ buf->f_bfree = bytes_free;
+
+ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
+
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ } else {
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+ }
+
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+static void yaffs_flush_inodes(struct super_block *sb)
+{
+ struct inode *iptr;
+ struct yaffs_obj *obj;
+
+ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
+ obj = yaffs_inode_to_obj(iptr);
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "flushing obj %d", obj->obj_id);
+ yaffs_flush_file(obj, 1, 0);
+ }
+ }
+}
+
+static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ if (!dev)
+ return;
+
+ yaffs_flush_inodes(sb);
+ yaffs_update_dirty_dirs(dev);
+ yaffs_flush_whole_cache(dev);
+ if (do_checkpoint)
+ yaffs_checkpoint_save(dev);
+}
+
+static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
+{
+ unsigned erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned scattered = 0; /* Free chunks not in an erased block */
+
+ if (erased_chunks < dev->n_free_chunks)
+ scattered = (dev->n_free_chunks - erased_chunks);
+
+ if (!context->bg_running)
+ return 0;
+ else if (scattered < (dev->param.chunks_per_block * 2))
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 2)
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 4)
+ return 1;
+ else
+ return 2;
+}
+
+static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
+{
+
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
+ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
+ int do_checkpoint;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_do_sync_fs: gc-urgency %d %s %s%s",
+ gc_urgent,
+ sb->s_dirt ? "dirty" : "clean",
+ request_checkpoint ? "checkpoint requested" : "no checkpoint",
+ oneshot_checkpoint ? " one-shot" : "");
+
+ yaffs_gross_lock(dev);
+ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
+ oneshot_checkpoint) && !dev->is_checkpointed;
+
+ if (sb->s_dirt || do_checkpoint) {
+ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
+ sb->s_dirt = 0;
+ if (oneshot_checkpoint)
+ yaffs_auto_checkpoint &= ~4;
+ }
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+/*
+ * yaffs background thread functions .
+ * yaffs_bg_thread_fn() the thread function
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+ * NB:
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+ */
+
+void yaffs_background_waker(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+static int yaffs_bg_thread_fn(void *data)
+{
+ struct yaffs_dev *dev = (struct yaffs_dev *)data;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned long now = jiffies;
+ unsigned long next_dir_update = now;
+ unsigned long next_gc = now;
+ unsigned long expires;
+ unsigned int urgency;
+
+ int gc_result;
+ struct timer_list timer;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "yaffs_background starting for dev %p", (void *)dev);
+
+ set_freezable();
+ while (context->bg_running) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
+
+ if (kthread_should_stop())
+ break;
+
+ if (try_to_freeze())
+ continue;
+
+ yaffs_gross_lock(dev);
+
+ now = jiffies;
+
+ if (time_after(now, next_dir_update) && yaffs_bg_enable) {
+ yaffs_update_dirty_dirs(dev);
+ next_dir_update = now + HZ;
+ }
+
+ if (time_after(now, next_gc) && yaffs_bg_enable) {
+ if (!dev->is_checkpointed) {
+ urgency = yaffs_bg_gc_urgency(dev);
+ gc_result = yaffs_bg_gc(dev, urgency);
+ if (urgency > 1)
+ next_gc = now + HZ / 20 + 1;
+ else if (urgency > 0)
+ next_gc = now + HZ / 10 + 1;
+ else
+ next_gc = now + HZ * 2;
+ } else {
+ /*
+ * gc not running so set to next_dir_update
+ * to cut down on wake ups
+ */
+ next_gc = next_dir_update;
+ }
+ }
+ yaffs_gross_unlock(dev);
+ expires = next_dir_update;
+ if (time_before(next_gc, expires))
+ expires = next_gc;
+ if (time_before(expires, now))
+ expires = now + HZ;
+
+ Y_INIT_TIMER(&timer);
+ timer.expires = expires + 1;
+ timer.data = (unsigned long)current;
+ timer.function = yaffs_background_waker;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_timer(&timer);
+ schedule();
+ del_timer_sync(&timer);
+ }
+
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ int retval = 0;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+
+ if (dev->read_only)
+ return -1;
+
+ context->bg_running = 1;
+
+ context->bg_thread = kthread_run(yaffs_bg_thread_fn,
+ (void *)dev, "yaffs-bg-%d",
+ context->mount_id);
+
+ if (IS_ERR(context->bg_thread)) {
+ retval = PTR_ERR(context->bg_thread);
+ context->bg_thread = NULL;
+ context->bg_running = 0;
+ }
+ return retval;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
+
+ ctxt->bg_running = 0;
+
+ if (ctxt->bg_thread) {
+ kthread_stop(ctxt->bg_thread);
+ ctxt->bg_thread = NULL;
+ }
+}
+
+static void yaffs_write_super(struct super_block *sb)
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_write_super%s",
+ request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+}
+
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+ return 0;
+}
+
+
+static LIST_HEAD(yaffs_context_list);
+struct mutex yaffs_context_lock;
+
+
+
+struct yaffs_options {
+ int inband_tags;
+ int skip_checkpoint_read;
+ int skip_checkpoint_write;
+ int no_cache;
+ int tags_ecc_on;
+ int tags_ecc_overridden;
+ int lazy_loading_enabled;
+ int lazy_loading_overridden;
+ int empty_lost_and_found;
+ int empty_lost_and_found_overridden;
+};
+
+#define MAX_OPT_LEN 30
+static int yaffs_parse_options(struct yaffs_options *options,
+ const char *options_str)
+{
+ char cur_opt[MAX_OPT_LEN + 1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+ while (options_str && *options_str && !error) {
+ memset(cur_opt, 0, MAX_OPT_LEN + 1);
+ p = 0;
+
+ while (*options_str == ',')
+ options_str++;
+
+ while (*options_str && *options_str != ',') {
+ if (p < MAX_OPT_LEN) {
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+ if (!strcmp(cur_opt, "inband-tags")) {
+ options->inband_tags = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-off")) {
+ options->tags_ecc_on = 0;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-on")) {
+ options->tags_ecc_on = 1;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-off")) {
+ options->lazy_loading_enabled = 0;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-on")) {
+ options->lazy_loading_enabled = 1;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
+ options->empty_lost_and_found = 0;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
+ options->empty_lost_and_found = 1;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "no-cache")) {
+ options->no_cache = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-read")) {
+ options->skip_checkpoint_read = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-write")) {
+ options->skip_checkpoint_write = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint")) {
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+ cur_opt);
+ error = 1;
+ }
+ }
+
+ return error;
+}
+
+static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
+ .write_begin = yaffs_write_begin,
+ .write_end = yaffs_write_end,
+};
+
+
+
+static const struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+
+static const struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+ .follow_link = yaffs_follow_link,
+ .put_link = yaffs_put_link,
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj)
+{
+ if (inode && obj) {
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+ u32 mode = obj->yst_mode;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (!S_ISREG(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFREG;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (!S_ISLNK(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFLNK;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!S_ISDIR(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFDIR;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ default:
+ /* TODO? */
+ break;
+ }
+
+ inode->i_flags |= S_NOATIME;
+
+ inode->i_ino = obj->obj_id;
+ inode->i_mode = obj->yst_mode;
+ inode->i_uid = obj->yst_uid;
+ inode->i_gid = obj->yst_gid;
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+ inode->i_atime.tv_nsec = 0;
+ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+ inode->i_mtime.tv_nsec = 0;
+ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+ inode->i_ctime.tv_nsec = 0;
+ inode->i_size = yaffs_get_obj_length(obj);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ inode->i_nlink = yaffs_get_obj_link_count(obj);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode mode %x uid %d gid %d size %d count %d",
+ inode->i_mode, inode->i_uid, inode->i_gid,
+ (int)inode->i_size, atomic_read(&inode->i_count));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+ init_special_inode(inode, obj->yst_mode,
+ old_decode_dev(obj->yst_rdev));
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+ break;
+ case S_IFLNK: /* symlink */
+ inode->i_op = &yaffs_symlink_inode_operations;
+ break;
+ }
+
+ yaffs_inode_to_obj_lv(inode) = obj;
+
+ obj->my_inode = inode;
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode invalid parameters");
+ }
+}
+
+static void yaffs_put_super(struct super_block *sb)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_put_super");
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "Shutting down yaffs background thread");
+ yaffs_bg_stop(dev);
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "yaffs background thread shut down");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_super(sb, 1);
+
+ if (yaffs_dev_to_lc(dev)->put_super_fn)
+ yaffs_dev_to_lc(dev)->put_super_fn(sb);
+
+ yaffs_deinitialise(dev);
+
+ yaffs_gross_unlock(dev);
+ mutex_lock(&yaffs_context_lock);
+ list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
+ mutex_unlock(&yaffs_context_lock);
+
+ if (yaffs_dev_to_lc(dev)->spare_buffer) {
+ kfree(yaffs_dev_to_lc(dev)->spare_buffer);
+ yaffs_dev_to_lc(dev)->spare_buffer = NULL;
+ }
+
+ kfree(dev);
+}
+
+static void yaffs_mtd_put_super(struct super_block *sb)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(yaffs_super_to_dev(sb));
+
+ if (mtd->sync)
+ mtd->sync(mtd);
+
+ put_mtd_device(mtd);
+}
+
+static const struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
+ .put_super = yaffs_put_super,
+ .evict_inode = yaffs_evict_inode,
+ .sync_fs = yaffs_sync_fs,
+ .write_super = yaffs_write_super,
+};
+
+static struct super_block *yaffs_internal_read_super(int yaffs_version,
+ struct super_block *sb,
+ void *data, int silent)
+{
+ int n_blocks;
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct yaffs_dev *dev = 0;
+ char devname_buf[BDEVNAME_SIZE + 1];
+ struct mtd_info *mtd;
+ int err;
+ char *data_str = (char *)data;
+ struct yaffs_linux_context *context = NULL;
+ struct yaffs_param *param;
+
+ int read_only = 0;
+
+ struct yaffs_options options;
+
+ unsigned mount_id;
+ int found;
+ struct yaffs_linux_context *context_iterator;
+ struct list_head *l;
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
+ sb->s_flags |= MS_NOATIME;
+
+ read_only = ((sb->s_flags & MS_RDONLY) != 0);
+
+ sb->s_export_op = &yaffs_export_ops;
+
+ if (!sb)
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+ else if (!sb->s_dev)
+ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+ else if (!yaffs_devname(sb, devname_buf))
+ printk(KERN_INFO "yaffs: devname is NULL\n");
+ else
+ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
+
+ if (!data_str)
+ data_str = "";
+
+ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+ memset(&options, 0, sizeof(options));
+
+ if (yaffs_parse_options(&options, data_str)) {
+ /* Option parsing failed */
+ return NULL;
+ }
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: Using yaffs%d", yaffs_version);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: block size %d", (int)(sb->s_blocksize));
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Attempting MTD mount of %u.%u,\"%s\"",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ yaffs_devname(sb, devname_buf));
+
+ /* Check it's an mtd device..... */
+ if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
+ return NULL; /* This isn't an mtd device */
+
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device #%u doesn't appear to exist",
+ MINOR(sb->s_dev));
+ return NULL;
+ }
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device is not NAND it's type %d",
+ mtd->type);
+ return NULL;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, " erase %p", mtd->erase);
+ yaffs_trace(YAFFS_TRACE_OS, " read %p", mtd->read);
+ yaffs_trace(YAFFS_TRACE_OS, " write %p", mtd->write);
+ yaffs_trace(YAFFS_TRACE_OS, " readoob %p", mtd->read_oob);
+ yaffs_trace(YAFFS_TRACE_OS, " writeoob %p", mtd->write_oob);
+ yaffs_trace(YAFFS_TRACE_OS, " block_isbad %p", mtd->block_isbad);
+ yaffs_trace(YAFFS_TRACE_OS, " block_markbad %p", mtd->block_markbad);
+ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
+ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
+ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
+ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
+
+#ifdef CONFIG_YAFFS_AUTO_YAFFS2
+
+ if (yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
+ yaffs_version = 2;
+ }
+
+ /* Added NCB 26/5/2006 for completeness */
+ if (yaffs_version == 2 && !options.inband_tags
+ && WRITE_SIZE(mtd) == 512) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
+ yaffs_version = 1;
+ }
+#endif
+
+ if (yaffs_version == 2) {
+ /* Check for version 2 style functions */
+ if (!mtd->erase ||
+ !mtd->block_isbad ||
+ !mtd->block_markbad ||
+ !mtd->read ||
+ !mtd->write || !mtd->read_oob || !mtd->write_oob) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support required functions");
+ return NULL;
+ }
+
+ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+ !options.inband_tags) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not have the right page sizes");
+ return NULL;
+ }
+ } else {
+ /* Check for V1 style functions */
+ if (!mtd->erase ||
+ !mtd->read ||
+ !mtd->write || !mtd->read_oob || !mtd->write_oob) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support required functions");
+ return NULL;
+ }
+
+ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support have the right page sizes");
+ return NULL;
+ }
+ }
+
+ /* OK, so if we got here, we have an MTD that's NAND and looks
+ * like it has the right capabilities
+ * Set the struct yaffs_dev up for mtd
+ */
+
+ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+ read_only = 1;
+ printk(KERN_INFO
+ "yaffs: mtd is read only, setting superblock read only");
+ sb->s_flags |= MS_RDONLY;
+ }
+
+ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
+ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
+
+ if (!dev || !context) {
+ if (dev)
+ kfree(dev);
+ if (context)
+ kfree(context);
+ dev = NULL;
+ context = NULL;
+ }
+
+ if (!dev) {
+ /* Deep shit could not allocate device structure */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super failed trying to allocate yaffs_dev");
+ return NULL;
+ }
+ memset(dev, 0, sizeof(struct yaffs_dev));
+ param = &(dev->param);
+
+ memset(context, 0, sizeof(struct yaffs_linux_context));
+ dev->os_context = context;
+ INIT_LIST_HEAD(&(context->context_list));
+ context->dev = dev;
+ context->super = sb;
+
+ dev->read_only = read_only;
+
+ sb->s_fs_info = dev;
+
+ dev->driver_context = mtd;
+ param->name = mtd->name;
+
+ /* Set up the memory size parameters.... */
+
+ n_blocks =
+ YCALCBLOCKS(mtd->size,
+ (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
+ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
+ param->n_reserved_blocks = 5;
+ param->n_caches = (options.no_cache) ? 0 : 10;
+ param->inband_tags = options.inband_tags;
+
+#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
+ param->disable_lazy_load = 1;
+#endif
+#ifdef CONFIG_YAFFS_XATTR
+ param->enable_xattr = 1;
+#endif
+ if (options.lazy_loading_overridden)
+ param->disable_lazy_load = !options.lazy_loading_enabled;
+
+#ifdef CONFIG_YAFFS_DISABLE_TAGS_ECC
+ param->no_tags_ecc = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_BACKGROUND
+#else
+ param->defered_dir_update = 1;
+#endif
+
+ if (options.tags_ecc_overridden)
+ param->no_tags_ecc = !options.tags_ecc_on;
+
+#ifdef CONFIG_YAFFS_EMPTY_LOST_AND_FOUND
+ param->empty_lost_n_found = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING
+ param->refresh_period = 0;
+#else
+ param->refresh_period = 500;
+#endif
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ param->always_check_erased = 1;
+#endif
+
+ if (options.empty_lost_and_found_overridden)
+ param->empty_lost_n_found = options.empty_lost_and_found;
+
+ /* ... and the functions. */
+ if (yaffs_version == 2) {
+ param->write_chunk_tags_fn = nandmtd2_write_chunk_tags;
+ param->read_chunk_tags_fn = nandmtd2_read_chunk_tags;
+ param->bad_block_fn = nandmtd2_mark_block_bad;
+ param->query_block_fn = nandmtd2_query_block;
+ yaffs_dev_to_lc(dev)->spare_buffer =
+ kmalloc(mtd->oobsize, GFP_NOFS);
+ param->is_yaffs2 = 1;
+ param->total_bytes_per_chunk = mtd->writesize;
+ param->chunks_per_block = mtd->erasesize / mtd->writesize;
+ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ } else {
+ /* use the MTD interface in yaffs_mtdif1.c */
+ param->write_chunk_tags_fn = nandmtd1_write_chunk_tags;
+ param->read_chunk_tags_fn = nandmtd1_read_chunk_tags;
+ param->bad_block_fn = nandmtd1_mark_block_bad;
+ param->query_block_fn = nandmtd1_query_block;
+ param->is_yaffs2 = 0;
+ }
+ /* ... and common functions */
+ param->erase_fn = nandmtd_erase_block;
+ param->initialise_flash_fn = nandmtd_initialise;
+
+ yaffs_dev_to_lc(dev)->put_super_fn = yaffs_mtd_put_super;
+
+ param->sb_dirty_fn = yaffs_touch_super;
+ param->gc_control = yaffs_gc_control_callback;
+
+ yaffs_dev_to_lc(dev)->super = sb;
+
+#ifndef CONFIG_YAFFS_DOES_ECC
+ param->use_nand_ecc = 1;
+#endif
+
+ param->skip_checkpt_rd = options.skip_checkpoint_read;
+ param->skip_checkpt_wr = options.skip_checkpoint_write;
+
+ mutex_lock(&yaffs_context_lock);
+ /* Get a mount id */
+ found = 0;
+ for (mount_id = 0; !found; mount_id++) {
+ found = 1;
+ list_for_each(l, &yaffs_context_list) {
+ context_iterator =
+ list_entry(l, struct yaffs_linux_context,
+ context_list);
+ if (context_iterator->mount_id == mount_id)
+ found = 0;
+ }
+ }
+ context->mount_id = mount_id;
+
+ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
+ &yaffs_context_list);
+ mutex_unlock(&yaffs_context_lock);
+
+ /* Directory search handling... */
+ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
+ param->remove_obj_fn = yaffs_remove_obj_callback;
+
+ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
+
+ yaffs_gross_lock(dev);
+
+ err = yaffs_guts_initialise(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: guts initialised %s",
+ (err == YAFFS_OK) ? "OK" : "FAILED");
+
+ if (err == YAFFS_OK)
+ yaffs_bg_start(dev);
+
+ if (!context->bg_thread)
+ param->defered_dir_update = 0;
+
+ /* Release lock before yaffs_get_inode() */
+ yaffs_gross_unlock(dev);
+
+ /* Create root inode */
+ if (err == YAFFS_OK)
+ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
+
+ if (!inode)
+ return NULL;
+
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
+
+ root = d_alloc_root(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: d_alloc_root done");
+
+ if (!root) {
+ iput(inode);
+ return NULL;
+ }
+ sb->s_root = root;
+ sb->s_dirt = !dev->is_checkpointed;
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
+ return sb;
+}
+
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+static struct dentry *yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return mount_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd);
+}
+
+static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+ .mount = yaffs_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+static struct dentry *yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd);
+}
+
+static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+ .mount = yaffs2_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#endif /* CONFIG_YAFFS_YAFFS2 */
+
+static struct proc_dir_entry *my_proc_entry;
+
+static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
+{
+ struct yaffs_param *param = &dev->param;
+ buf += sprintf(buf, "start_block........... %d\n", param->start_block);
+ buf += sprintf(buf, "end_block............. %d\n", param->end_block);
+ buf += sprintf(buf, "total_bytes_per_chunk. %d\n",
+ param->total_bytes_per_chunk);
+ buf += sprintf(buf, "use_nand_ecc.......... %d\n",
+ param->use_nand_ecc);
+ buf += sprintf(buf, "no_tags_ecc........... %d\n", param->no_tags_ecc);
+ buf += sprintf(buf, "is_yaffs2............. %d\n", param->is_yaffs2);
+ buf += sprintf(buf, "inband_tags........... %d\n", param->inband_tags);
+ buf += sprintf(buf, "empty_lost_n_found.... %d\n",
+ param->empty_lost_n_found);
+ buf += sprintf(buf, "disable_lazy_load..... %d\n",
+ param->disable_lazy_load);
+ buf += sprintf(buf, "refresh_period........ %d\n",
+ param->refresh_period);
+ buf += sprintf(buf, "n_caches.............. %d\n", param->n_caches);
+ buf += sprintf(buf, "n_reserved_blocks..... %d\n",
+ param->n_reserved_blocks);
+ buf += sprintf(buf, "always_check_erased... %d\n",
+ param->always_check_erased);
+
+ return buf;
+}
+
+static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
+{
+ buf +=
+ sprintf(buf, "data_bytes_per_chunk.. %d\n",
+ dev->data_bytes_per_chunk);
+ buf += sprintf(buf, "chunk_grp_bits........ %d\n", dev->chunk_grp_bits);
+ buf += sprintf(buf, "chunk_grp_size........ %d\n", dev->chunk_grp_size);
+ buf +=
+ sprintf(buf, "n_erased_blocks....... %d\n", dev->n_erased_blocks);
+ buf +=
+ sprintf(buf, "blocks_in_checkpt..... %d\n", dev->blocks_in_checkpt);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_tnodes.............. %d\n", dev->n_tnodes);
+ buf += sprintf(buf, "n_obj................. %d\n", dev->n_obj);
+ buf += sprintf(buf, "n_free_chunks......... %d\n", dev->n_free_chunks);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_page_writes......... %u\n", dev->n_page_writes);
+ buf += sprintf(buf, "n_page_reads.......... %u\n", dev->n_page_reads);
+ buf += sprintf(buf, "n_erasures............ %u\n", dev->n_erasures);
+ buf += sprintf(buf, "n_gc_copies........... %u\n", dev->n_gc_copies);
+ buf += sprintf(buf, "all_gcs............... %u\n", dev->all_gcs);
+ buf +=
+ sprintf(buf, "passive_gc_count...... %u\n", dev->passive_gc_count);
+ buf +=
+ sprintf(buf, "oldest_dirty_gc_count. %u\n",
+ dev->oldest_dirty_gc_count);
+ buf += sprintf(buf, "n_gc_blocks........... %u\n", dev->n_gc_blocks);
+ buf += sprintf(buf, "bg_gcs................ %u\n", dev->bg_gcs);
+ buf +=
+ sprintf(buf, "n_retired_writes...... %u\n", dev->n_retired_writes);
+ buf +=
+ sprintf(buf, "n_retired_blocks...... %u\n", dev->n_retired_blocks);
+ buf += sprintf(buf, "n_ecc_fixed........... %u\n", dev->n_ecc_fixed);
+ buf += sprintf(buf, "n_ecc_unfixed......... %u\n", dev->n_ecc_unfixed);
+ buf +=
+ sprintf(buf, "n_tags_ecc_fixed...... %u\n", dev->n_tags_ecc_fixed);
+ buf +=
+ sprintf(buf, "n_tags_ecc_unfixed.... %u\n",
+ dev->n_tags_ecc_unfixed);
+ buf += sprintf(buf, "cache_hits............ %u\n", dev->cache_hits);
+ buf +=
+ sprintf(buf, "n_deleted_files....... %u\n", dev->n_deleted_files);
+ buf +=
+ sprintf(buf, "n_unlinked_files...... %u\n", dev->n_unlinked_files);
+ buf += sprintf(buf, "refresh_count......... %u\n", dev->refresh_count);
+ buf += sprintf(buf, "n_bg_deletions........ %u\n", dev->n_bg_deletions);
+
+ return buf;
+}
+
+static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+{
+ struct list_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+
+ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+ * We use 'offset' (*ppos) to indicate where we are in dev_list.
+ * This also assumes the user has posted a read buffer large
+ * enough to hold the complete output; but that's life in /proc.
+ */
+
+ *(int *)start = 1;
+
+ /* Print header first */
+ if (step == 0)
+ buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__ "\n");
+ else if (step == 1)
+ buf += sprintf(buf, "\n");
+ else {
+ step -= 2;
+
+ mutex_lock(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_context_list) {
+ struct yaffs_linux_context *dc =
+ list_entry(item, struct yaffs_linux_context,
+ context_list);
+ struct yaffs_dev *dev = dc->dev;
+
+ if (n < (step & ~1)) {
+ n += 2;
+ continue;
+ }
+ if ((step & 1) == 0) {
+ buf +=
+ sprintf(buf, "\nDevice %d \"%s\"\n", n,
+ dev->param.name);
+ buf = yaffs_dump_dev_part0(buf, dev);
+ } else {
+ buf = yaffs_dump_dev_part1(buf, dev);
+ }
+
+ break;
+ }
+ mutex_unlock(&yaffs_context_lock);
+ }
+
+ return buf - page < count ? buf - page : count;
+}
+
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+ char *mask_name;
+ unsigned mask_bitfield;
+} mask_flags[] = {
+ {"allocate", YAFFS_TRACE_ALLOCATE},
+ {"always", YAFFS_TRACE_ALWAYS},
+ {"background", YAFFS_TRACE_BACKGROUND},
+ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+ {"buffers", YAFFS_TRACE_BUFFERS},
+ {"bug", YAFFS_TRACE_BUG},
+ {"checkpt", YAFFS_TRACE_CHECKPOINT},
+ {"deletion", YAFFS_TRACE_DELETION},
+ {"erase", YAFFS_TRACE_ERASE},
+ {"error", YAFFS_TRACE_ERROR},
+ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+ {"gc", YAFFS_TRACE_GC},
+ {"lock", YAFFS_TRACE_LOCK},
+ {"mtd", YAFFS_TRACE_MTD},
+ {"nandaccess", YAFFS_TRACE_NANDACCESS},
+ {"os", YAFFS_TRACE_OS},
+ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+ {"scan", YAFFS_TRACE_SCAN},
+ {"mount", YAFFS_TRACE_MOUNT},
+ {"tracing", YAFFS_TRACE_TRACING},
+ {"sync", YAFFS_TRACE_SYNC},
+ {"write", YAFFS_TRACE_WRITE},
+ {"verify", YAFFS_TRACE_VERIFY},
+ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+ {"all", 0xffffffff},
+ {"none", 0},
+ {NULL, 0},
+};
+
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ unsigned rg = 0, mask_bitfield;
+ char *end;
+ char *mask_name;
+ const char *x;
+ char substring[MAX_MASK_NAME_LENGTH + 1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+ int pos = 0;
+
+ rg = yaffs_trace_mask;
+
+ while (!done && (pos < count)) {
+ done = 1;
+ while ((pos < count) && isspace(buf[pos]))
+ pos++;
+
+ switch (buf[pos]) {
+ case '+':
+ case '-':
+ case '=':
+ add = buf[pos];
+ pos++;
+ break;
+
+ default:
+ add = ' ';
+ break;
+ }
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+ for (x = buf + pos, i = 0;
+ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ if (strcmp(substring, mask_flags[i].mask_name)
+ == 0) {
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield =
+ mask_flags[i].mask_bitfield;
+ done = 0;
+ break;
+ }
+ }
+ }
+
+ if (mask_name != NULL) {
+ done = 0;
+ switch (add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+ case '+':
+ rg |= mask_bitfield;
+ break;
+ case '=':
+ rg = mask_bitfield;
+ break;
+ default:
+ rg |= mask_bitfield;
+ break;
+ }
+ }
+ }
+
+ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
+
+ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) ==
+ mask_flags[i].mask_bitfield) ? '+' : '-';
+ printk(KERN_DEBUG "%c%s\n", flag,
+ mask_flags[i].mask_name);
+ }
+ }
+
+ return count;
+}
+
+static int yaffs_proc_write(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ return yaffs_proc_write_trace_options(file, buf, count, data);
+}
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+ struct file_system_type *fst;
+ int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+ {&yaffs_fs_type, 0},
+ {&yaffs2_fs_type, 0},
+ {NULL, 0}
+};
+
+static int __init init_yaffs_fs(void)
+{
+ int error = 0;
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " Installing.");
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "\n\nYAFFS-WARNING CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED selected.\n\n\n");
+#endif
+
+ mutex_init(&yaffs_context_lock);
+
+ /* Install the proc_fs entries */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG, YPROC_ROOT);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+ } else {
+ return -ENOMEM;
+ }
+
+
+ /* Now add the file system entries */
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+ if (!error)
+ fsinst->installed = 1;
+ fsinst++;
+ }
+
+ /* Any errors? uninstall */
+ if (error) {
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+ }
+
+ return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " removing.");
+
+ remove_proc_entry("yaffs", YPROC_ROOT);
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+}
+
+module_init(init_yaffs_fs)
+ module_exit(exit_yaffs_fs)
+
+ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2010");
+MODULE_LICENSE("GPL");
diff --git a/fs/yaffs2/yaffs_yaffs1.c b/fs/yaffs2/yaffs_yaffs1.c
new file mode 100644
index 000000000000..9eb603082547
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.c
@@ -0,0 +1,433 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int result;
+
+ int chunk;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ struct yaffs_obj *hard_list = NULL;
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+
+ int alloc_failed = 0;
+
+ struct yaffs_shadow_fixer *shadow_fixers = NULL;
+
+ u8 *chunk_data;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs1_scan starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ }
+ bi++;
+ }
+
+ /* For each block.... */
+ for (blk = dev->internal_start_block;
+ !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+ cond_resched();
+
+ bi = yaffs_get_block_info(dev, blk);
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ for (c = 0; !alloc_failed && c < dev->param.chunks_per_block &&
+ state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
+ /* Read the tags and decide what to do */
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED
+ || tags.is_deleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+ deleted++;
+ dev->n_free_chunks++;
+ /*T((" %d %d deleted\n",blk,c)); */
+ } else if (!tags.chunk_used) {
+ /* An unassigned chunk in the block
+ * This means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ /* this is the block being allocated from */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->alloc_block_finder = blk;
+ /* Set block finder here to encourage the allocator to go forth from here. */
+
+ }
+
+ dev->n_free_chunks +=
+ (dev->param.chunks_per_block - c);
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ /* PutChunkIntoFile checks for a clash (two data chunks with
+ * the same chunk_id).
+ */
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in) {
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, 1))
+ alloc_failed = 1;
+ }
+
+ endpos =
+ (tags.chunk_id -
+ 1) * dev->data_bytes_per_chunk +
+ tags.n_bytes;
+ if (in
+ && in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE
+ && in->variant.file_variant.scanned_size <
+ endpos) {
+ in->variant.file_variant.scanned_size =
+ endpos;
+ if (!dev->param.use_header_file_size) {
+ in->variant.
+ file_variant.file_size =
+ in->variant.
+ file_variant.scanned_size;
+ }
+
+ }
+ /* T((" %d %d data %d %d\n",blk,c,tags.obj_id,tags.chunk_id)); */
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make the object
+ */
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in = yaffs_find_by_number(dev, tags.obj_id);
+ if (in && in->variant_type != oh->type) {
+ /* This should not happen, but somehow
+ * Wev'e ended up with an obj_id that has been reused but not yet
+ * deleted, and worse still it has changed type. Delete the old object.
+ */
+
+ yaffs_del_obj(in);
+
+ in = 0;
+ }
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ oh->type);
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadows_obj > 0) {
+
+ struct yaffs_shadow_fixer *fixer;
+ fixer =
+ kmalloc(sizeof
+ (struct yaffs_shadow_fixer),
+ GFP_NOFS);
+ if (fixer) {
+ fixer->next = shadow_fixers;
+ shadow_fixers = fixer;
+ fixer->obj_id = tags.obj_id;
+ fixer->shadowed_id =
+ oh->shadows_obj;
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Shadow fixer: %d shadows %d",
+ fixer->obj_id,
+ fixer->shadowed_id);
+
+ }
+
+ }
+
+ if (in && in->valid) {
+ /* We have already filled this one. We have a duplicate and need to resolve it. */
+
+ unsigned existing_serial = in->serial;
+ unsigned new_serial =
+ tags.serial_number;
+
+ if (((existing_serial + 1) & 3) ==
+ new_serial) {
+ /* Use new one - destroy the exisiting one */
+ yaffs_chunk_del(dev,
+ in->hdr_chunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+ }
+ }
+
+ if (in && !in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (!parent)
+ alloc_failed = 1;
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ if (0 && (parent == dev->del_dir ||
+ parent ==
+ dev->unlinked_dir)) {
+ in->deleted = 1; /* If it is unlinked at start up then it wants deleting */
+ dev->n_deleted_files++;
+ }
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent object is scanned
+ * we put them all in a list.
+ * After scanning is complete, we should have all the objects, so we run through this
+ * list and fix up all the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (dev->param.
+ use_header_file_size)
+
+ in->variant.
+ file_variant.file_size
+ = oh->file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.
+ hardlink_variant.equiv_id =
+ oh->equiv_id;
+ in->hard_links.next =
+ (struct list_head *)
+ hard_list;
+ hard_list = in;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.
+ alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.
+ symlink_variant.alias)
+ alloc_failed = 1;
+ break;
+ }
+
+ }
+ }
+ }
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ /* If we got this far while scanning, then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ /* If the block was partially allocated then treat it as fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ }
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+
+ yaffs_link_fixup(dev, hard_list);
+
+ /* Fix up any shadowed objects */
+ {
+ struct yaffs_shadow_fixer *fixer;
+ struct yaffs_obj *obj;
+
+ while (shadow_fixers) {
+ fixer = shadow_fixers;
+ shadow_fixers = fixer->next;
+ /* Complete the rename transaction by deleting the shadowed object
+ * then setting the object header to unshadowed.
+ */
+ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+ if (obj)
+ yaffs_del_obj(obj);
+
+ obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+ if (obj)
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ kfree(fixer);
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs1.h b/fs/yaffs2/yaffs_yaffs1.h
new file mode 100644
index 000000000000..db23e04973ba
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_yaffs2.c b/fs/yaffs2/yaffs_yaffs2.c
new file mode 100644
index 000000000000..33397af7003d
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.c
@@ -0,0 +1,1598 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ int i;
+ unsigned seq;
+ unsigned block_no = 0;
+ struct yaffs_block_info *b;
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ /* Find the oldest dirty sequence number. */
+ seq = dev->seq_number + 1;
+ b = dev->block_info;
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+ (b->pages_in_use - b->soft_del_pages) <
+ dev->param.chunks_per_block && b->seq_number < seq) {
+ seq = b->seq_number;
+ block_no = i;
+ }
+ b++;
+ }
+
+ if (block_no) {
+ dev->oldest_dirty_seq = seq;
+ dev->oldest_dirty_block = block_no;
+ }
+
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->oldest_dirty_seq)
+ yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+ }
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (dev->oldest_dirty_seq) {
+ if (dev->oldest_dirty_seq > bi->seq_number) {
+ dev->oldest_dirty_seq = bi->seq_number;
+ dev->oldest_dirty_block = block_no;
+ }
+ }
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return 1; /* disqualification only applies to yaffs2. */
+
+ if (!bi->has_shrink_hdr)
+ return 1; /* can gc */
+
+ yaffs2_find_oldest_dirty_seq(dev);
+
+ /* Can't do gc of this block if there are any blocks older than this one that have
+ * discarded pages.
+ */
+ return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev * dev)
+{
+ u32 b;
+
+ u32 oldest = 0;
+ u32 oldest_seq = 0;
+
+ struct yaffs_block_info *bi;
+
+ if (!dev->param.is_yaffs2)
+ return oldest;
+
+ /*
+ * If refresh period < 10 then refreshing is disabled.
+ */
+ if (dev->param.refresh_period < 10)
+ return oldest;
+
+ /*
+ * Fix broken values.
+ */
+ if (dev->refresh_skip > dev->param.refresh_period)
+ dev->refresh_skip = dev->param.refresh_period;
+
+ if (dev->refresh_skip > 0)
+ return oldest;
+
+ /*
+ * Refresh skip is now zero.
+ * We'll do a refresh this time around....
+ * Update the refresh skip and find the oldest block.
+ */
+ dev->refresh_skip = dev->param.refresh_period;
+ dev->refresh_count++;
+ bi = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+ if (oldest < 1 || bi->seq_number < oldest_seq) {
+ oldest = b;
+ oldest_seq = bi->seq_number;
+ }
+ }
+ bi++;
+ }
+
+ if (oldest > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC refresh count %d selected block %d with seq_number %d",
+ dev->refresh_count, oldest, oldest_seq);
+ }
+
+ return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+ int nblocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ return !dev->param.skip_checkpt_wr &&
+ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+ int retval;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+ /* Not a valid value so recalculate */
+ int n_bytes = 0;
+ int n_blocks;
+ int dev_blocks =
+ (dev->param.end_block - dev->param.start_block + 1);
+
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(struct yaffs_checkpt_dev);
+ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+ n_bytes += dev_blocks * dev->chunk_bit_stride;
+ n_bytes +=
+ (sizeof(struct yaffs_checkpt_obj) +
+ sizeof(u32)) * (dev->n_obj);
+ n_bytes += (dev->tnode_size + sizeof(u32)) * (dev->n_tnodes);
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(u32); /* checksum */
+
+ /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
+
+ n_blocks =
+ (n_bytes /
+ (dev->data_bytes_per_chunk *
+ dev->param.chunks_per_block)) + 3;
+
+ dev->checkpoint_blocks_required = n_blocks;
+ }
+
+ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+ if (retval < 0)
+ retval = 0;
+ return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.struct_type = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ if (ok)
+ ok = (cp.struct_type == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+ (cp.head == ((head) ? 1 : 0));
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+ struct yaffs_dev *dev)
+{
+ cp->n_erased_blocks = dev->n_erased_blocks;
+ cp->alloc_block = dev->alloc_block;
+ cp->alloc_page = dev->alloc_page;
+ cp->n_free_chunks = dev->n_free_chunks;
+
+ cp->n_deleted_files = dev->n_deleted_files;
+ cp->n_unlinked_files = dev->n_unlinked_files;
+ cp->n_bg_deletions = dev->n_bg_deletions;
+ cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+ struct yaffs_checkpt_dev *cp)
+{
+ dev->n_erased_blocks = cp->n_erased_blocks;
+ dev->alloc_block = cp->alloc_block;
+ dev->alloc_page = cp->alloc_page;
+ dev->n_free_chunks = cp->n_free_chunks;
+
+ dev->n_deleted_files = cp->n_deleted_files;
+ dev->n_unlinked_files = cp->n_unlinked_files;
+ dev->n_bg_deletions = cp->n_bg_deletions;
+ dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+
+ int ok;
+
+ /* Write device runtime values */
+ yaffs2_dev_to_checkpt_dev(&cp, dev);
+ cp.struct_type = sizeof(cp);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ /* Write block info */
+ if (ok) {
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) ==
+ n_bytes);
+ }
+
+ /* Write chunk bits */
+ if (ok) {
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) ==
+ n_bytes);
+ }
+ return ok ? 1 : 0;
+
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ if (cp.struct_type != sizeof(cp))
+ return 0;
+
+ yaffs_checkpt_dev_to_dev(dev, &cp);
+
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+ if (!ok)
+ return 0;
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+
+ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+ struct yaffs_obj *obj)
+{
+
+ cp->obj_id = obj->obj_id;
+ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+ cp->hdr_chunk = obj->hdr_chunk;
+ cp->variant_type = obj->variant_type;
+ cp->deleted = obj->deleted;
+ cp->soft_del = obj->soft_del;
+ cp->unlinked = obj->unlinked;
+ cp->fake = obj->fake;
+ cp->rename_allowed = obj->rename_allowed;
+ cp->unlink_allowed = obj->unlink_allowed;
+ cp->serial = obj->serial;
+ cp->n_data_chunks = obj->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int taffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+ struct yaffs_checkpt_obj *cp)
+{
+
+ struct yaffs_obj *parent;
+
+ if (obj->variant_type != cp->variant_type) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+ cp->obj_id, cp->variant_type, cp->hdr_chunk,
+ obj->variant_type);
+ return 0;
+ }
+
+ obj->obj_id = cp->obj_id;
+
+ if (cp->parent_id)
+ parent = yaffs_find_or_create_by_number(obj->my_dev,
+ cp->parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ else
+ parent = NULL;
+
+ if (parent) {
+ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+ cp->obj_id, cp->parent_id,
+ cp->variant_type, cp->hdr_chunk,
+ parent->variant_type);
+ return 0;
+ }
+ yaffs_add_obj_to_dir(parent, obj);
+ }
+
+ obj->hdr_chunk = cp->hdr_chunk;
+ obj->variant_type = cp->variant_type;
+ obj->deleted = cp->deleted;
+ obj->soft_del = cp->soft_del;
+ obj->unlinked = cp->unlinked;
+ obj->fake = cp->fake;
+ obj->rename_allowed = cp->rename_allowed;
+ obj->unlink_allowed = cp->unlink_allowed;
+ obj->serial = cp->serial;
+ obj->n_data_chunks = cp->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+ if (obj->hdr_chunk > 0)
+ obj->lazy_loaded = 1;
+ return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+ struct yaffs_tnode *tn, u32 level,
+ int chunk_offset)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+ int ok = 1;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (tn->internal[i]) {
+ ok = yaffs2_checkpt_tnode_worker(in,
+ tn->
+ internal
+ [i],
+ level -
+ 1,
+ (chunk_offset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ }
+ }
+ } else if (level == 0) {
+ u32 base_offset =
+ chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+ ok = (yaffs2_checkpt_wr
+ (dev, &base_offset,
+ sizeof(base_offset)) == sizeof(base_offset));
+ if (ok)
+ ok = (yaffs2_checkpt_wr
+ (dev, tn,
+ dev->tnode_size) == dev->tnode_size);
+ }
+ }
+
+ return ok;
+
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 end_marker = ~0;
+ int ok = 1;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_checkpt_tnode_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.file_variant.
+ top_level, 0);
+ if (ok)
+ ok = (yaffs2_checkpt_wr
+ (obj->my_dev, &end_marker,
+ sizeof(end_marker)) == sizeof(end_marker));
+ }
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 base_chunk;
+ int ok = 1;
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+ struct yaffs_tnode *tn;
+ int nread = 0;
+
+ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+ sizeof(base_chunk));
+
+ while (ok && (~base_chunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+ tn = yaffs_get_tnode(dev);
+ if (tn) {
+ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+ } else {
+ ok = 0;
+ }
+
+ if (tn && ok)
+ ok = yaffs_add_find_tnode_0(dev,
+ file_stuct_ptr,
+ base_chunk, tn) ? 1 : 0;
+
+ if (ok)
+ ok = (yaffs2_checkpt_rd
+ (dev, &base_chunk,
+ sizeof(base_chunk)) == sizeof(base_chunk));
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read tnodes %d records, last %d. ok %d",
+ nread, base_chunk, ok);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int i;
+ int ok = 1;
+ struct list_head *lh;
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ if (!obj->defered_free) {
+ yaffs2_obj_checkpt_obj(&cp, obj);
+ cp.struct_type = sizeof(cp);
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+ cp.obj_id, cp.parent_id,
+ cp.variant_type, cp.hdr_chunk, obj);
+
+ ok = (yaffs2_checkpt_wr
+ (dev, &cp,
+ sizeof(cp)) == sizeof(cp));
+
+ if (ok
+ && obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs2_wr_checkpt_tnodes
+ (obj);
+ }
+ }
+ }
+ }
+
+ /* Dump end of list */
+ memset(&cp, 0xFF, sizeof(struct yaffs_checkpt_obj));
+ cp.struct_type = sizeof(cp);
+
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int ok = 1;
+ int done = 0;
+ struct yaffs_obj *hard_list = NULL;
+
+ while (ok && !done) {
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (cp.struct_type != sizeof(cp)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "struct size %d instead of %d ok %d",
+ cp.struct_type, (int)sizeof(cp), ok);
+ ok = 0;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read object %d parent %d type %d chunk %d ",
+ cp.obj_id, cp.parent_id, cp.variant_type,
+ cp.hdr_chunk);
+
+ if (ok && cp.obj_id == ~0) {
+ done = 1;
+ } else if (ok) {
+ obj =
+ yaffs_find_or_create_by_number(dev, cp.obj_id,
+ cp.variant_type);
+ if (obj) {
+ ok = taffs2_checkpt_obj_to_obj(obj, &cp);
+ if (!ok)
+ break;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_rd_checkpt_tnodes(obj);
+ } else if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj->hard_links.next =
+ (struct list_head *)hard_list;
+ hard_list = obj;
+ }
+ } else {
+ ok = 0;
+ }
+ }
+ }
+
+ if (ok)
+ yaffs_link_fixup(dev, hard_list);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+ sizeof(checkpt_sum));
+
+ if (!ok)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum0;
+ u32 checkpt_sum1;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+ sizeof(checkpt_sum1));
+
+ if (!ok)
+ return 0;
+
+ if (checkpt_sum0 != checkpt_sum1)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!yaffs2_checkpt_required(dev)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint write");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 1);
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint device");
+ ok = yaffs2_wr_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint objects");
+ ok = yaffs2_wr_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok)
+ ok = yaffs2_wr_checkpt_sum(dev);
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!dev->param.is_yaffs2)
+ ok = 0;
+
+ if (ok && dev->param.skip_checkpt_rd) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint read");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint device");
+ ok = yaffs2_rd_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint objects");
+ ok = yaffs2_rd_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok) {
+ ok = yaffs2_rd_checkpt_sum(dev);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint checksum %d", ok);
+ }
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return ok ? 1 : 0;
+
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+ dev->is_checkpointed = 0;
+ yaffs2_checkpt_invalidate_stream(dev);
+ }
+ if (dev->param.sb_dirty_fn)
+ dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "save entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+
+ if (!dev->is_checkpointed) {
+ yaffs2_checkpt_invalidate(dev);
+ yaffs2_wr_checkpt_data(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "save exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+ int retval;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ retval = yaffs2_rd_checkpt_data(dev);
+
+ if (dev->is_checkpointed) {
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+ /* if new_size > old_file_size.
+ * We're going to be writing a hole.
+ * If the hole is small then write zeros otherwise write a start of hole marker.
+ */
+
+ loff_t old_file_size;
+ int increase;
+ int small_hole;
+ int result = YAFFS_OK;
+ struct yaffs_dev *dev = NULL;
+
+ u8 *local_buffer = NULL;
+
+ int small_increase_ok = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ dev = obj->my_dev;
+
+ /* Bail out if not yaffs2 mode */
+ if (!dev->param.is_yaffs2)
+ return YAFFS_OK;
+
+ old_file_size = obj->variant.file_variant.file_size;
+
+ if (new_size <= old_file_size)
+ return YAFFS_OK;
+
+ increase = new_size - old_file_size;
+
+ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+ small_hole = 1;
+ else
+ small_hole = 0;
+
+ if (small_hole)
+ local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ if (local_buffer) {
+ /* fill hole with zero bytes */
+ int pos = old_file_size;
+ int this_write;
+ int written;
+ memset(local_buffer, 0, dev->data_bytes_per_chunk);
+ small_increase_ok = 1;
+
+ while (increase > 0 && small_increase_ok) {
+ this_write = increase;
+ if (this_write > dev->data_bytes_per_chunk)
+ this_write = dev->data_bytes_per_chunk;
+ written =
+ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+ 0);
+ if (written == this_write) {
+ pos += this_write;
+ increase -= this_write;
+ } else {
+ small_increase_ok = 0;
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
+
+ /* If we were out of space then reverse any chunks we've added */
+ if (!small_increase_ok)
+ yaffs_resize_file_down(obj, old_file_size);
+ }
+
+ if (!small_increase_ok &&
+ obj->parent &&
+ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+ /* Write a hole start header with the old file size */
+ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+ }
+
+ return result;
+
+}
+
+struct yaffs_block_index {
+ int seq;
+ int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+ int aseq = ((struct yaffs_block_index *)a)->seq;
+ int bseq = ((struct yaffs_block_index *)b)->seq;
+ int ablock = ((struct yaffs_block_index *)a)->block;
+ int bblock = ((struct yaffs_block_index *)b)->block;
+ if (aseq == bseq)
+ return ablock - bblock;
+ else
+ return aseq - bseq;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int block_iter;
+ int start_iter;
+ int end_iter;
+ int n_to_scan = 0;
+
+ int chunk;
+ int result;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ struct yaffs_obj *hard_list = NULL;
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ int is_unlinked;
+ u8 *chunk_data;
+
+ int file_size;
+ int is_shrink;
+ int found_chunks;
+ int equiv_id;
+ int alloc_failed = 0;
+
+ struct yaffs_block_index *block_index = NULL;
+ int alt_block_index = 0;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ block_index = kmalloc(n_blocks * sizeof(struct yaffs_block_index),
+ GFP_NOFS);
+
+ if (!block_index) {
+ block_index =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+ alt_block_index = 1;
+ }
+
+ if (!block_index) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards() could not allocate block index!"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->block_state = state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocks_in_checkpt++;
+
+ } else if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+
+ /* Determine the highest sequence number */
+ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+
+ block_index[n_to_scan].seq = seq_number;
+ block_index[n_to_scan].block = blk;
+
+ n_to_scan++;
+
+ if (seq_number >= dev->seq_number)
+ dev->seq_number = seq_number;
+ } else {
+ /* TODO: Nasty sequence number! */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Block scanning block %d has bad sequence number %d",
+ blk, seq_number);
+
+ }
+ }
+ bi++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
+
+ cond_resched();
+
+ /* Sort the blocks by sequence number */
+ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+ yaffs2_ybicmp, NULL);
+
+ cond_resched();
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+ /* Now scan the blocks looking at the data. */
+ start_iter = 0;
+ end_iter = n_to_scan - 1;
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+ /* For each block.... backwards */
+ for (block_iter = end_iter; !alloc_failed && block_iter >= start_iter;
+ block_iter--) {
+ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+ cond_resched();
+
+ /* get the block to scan in the correct order */
+ blk = block_index[block_iter].block;
+
+ bi = yaffs_get_block_info(dev, blk);
+
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ found_chunks = 0;
+ for (c = dev->param.chunks_per_block - 1;
+ !alloc_failed && c >= 0 &&
+ (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
+ /* Scan backwards...
+ * Read the tags and decide what to do
+ */
+
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (!tags.chunk_used) {
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing the erased
+ * check. Just skip it so that it can be deleted.
+ * But, more typically, We get here when this is an unallocated
+ * chunk and his means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (found_chunks) {
+ /* This is a chunk that was skipped due to failing the erased check */
+ } else if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ if (state ==
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING
+ || state ==
+ YAFFS_BLOCK_STATE_ALLOCATING) {
+ if (dev->seq_number ==
+ bi->seq_number) {
+ /* this is the block being allocated from */
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+
+ state =
+ YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->
+ alloc_block_finder =
+ blk;
+ } else {
+ /* This is a partially written block that is not
+ * the current allocation block.
+ */
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Partially written block %d detected",
+ blk);
+ }
+ }
+ }
+
+ dev->n_free_chunks++;
+
+ } else if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Unfixed ECC in chunk(%d:%d), chunk ignored",
+ blk, c);
+
+ dev->n_free_chunks++;
+
+ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+ (tags.chunk_id > 0
+ && tags.n_bytes > dev->data_bytes_per_chunk)
+ || tags.seq_number != bi->seq_number) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+ blk, c, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ dev->n_free_chunks++;
+
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+ u32 chunk_base =
+ (tags.chunk_id -
+ 1) * dev->data_bytes_per_chunk;
+
+ found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ if (!in) {
+ /* Out of memory */
+ alloc_failed = 1;
+ }
+
+ if (in &&
+ in->variant_type == YAFFS_OBJECT_TYPE_FILE
+ && chunk_base <
+ in->variant.file_variant.shrink_size) {
+ /* This has not been invalidated by a resize */
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, -1)) {
+ alloc_failed = 1;
+ }
+
+ /* File size is calculated by looking at the data chunks if we have not
+ * seen an object header yet. Stop this practice once we find an object header.
+ */
+ endpos = chunk_base + tags.n_bytes;
+
+ if (!in->valid && /* have not got an object header yet */
+ in->variant.file_variant.
+ scanned_size < endpos) {
+ in->variant.file_variant.
+ scanned_size = endpos;
+ in->variant.file_variant.
+ file_size = endpos;
+ }
+
+ } else if (in) {
+ /* This chunk has been invalidated by a resize, or a past file deletion
+ * so delete the chunk*/
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make the object
+ */
+ found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ oh = NULL;
+ in = NULL;
+
+ if (tags.extra_available) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.
+ obj_id,
+ tags.
+ extra_obj_type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ if (!in ||
+ (!in->valid && dev->param.disable_lazy_load)
+ || tags.extra_shadows || (!in->valid
+ && (tags.obj_id ==
+ YAFFS_OBJECTID_ROOT
+ || tags.
+ obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)))
+ {
+
+ /* If we don't have valid info then we need to read the chunk
+ * TODO In future we can probably defer reading the chunk and
+ * living with invalid data until needed.
+ */
+
+ result = yaffs_rd_chunk_tags_nand(dev,
+ chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ if (dev->param.inband_tags) {
+ /* Fix up the header if they got corrupted by inband tags */
+ oh->shadows_obj =
+ oh->inband_shadowed_obj_id;
+ oh->is_shrink =
+ oh->inband_is_shrink;
+ }
+
+ if (!in) {
+ in = yaffs_find_or_create_by_number(dev, tags.obj_id, oh->type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ }
+
+ if (!in) {
+ /* TODO Hoosterman we have a problem! */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
+ tags.obj_id, chunk);
+ continue;
+ }
+
+ if (in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate that will be discarded, but
+ * we first have to suck out resize info if it is a file.
+ */
+
+ if ((in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) && ((oh
+ &&
+ oh->
+ type
+ ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ||
+ (tags.
+ extra_available
+ &&
+ tags.
+ extra_obj_type
+ ==
+ YAFFS_OBJECT_TYPE_FILE)))
+ {
+ u32 this_size =
+ (oh) ? oh->
+ file_size :
+ tags.extra_length;
+ u32 parent_obj_id =
+ (oh) ? oh->parent_obj_id :
+ tags.extra_parent_id;
+
+ is_shrink =
+ (oh) ? oh->
+ is_shrink :
+ tags.extra_is_shrink;
+
+ /* If it is deleted (unlinked at start also means deleted)
+ * we treat the file size as being zeroed at this point.
+ */
+ if (parent_obj_id ==
+ YAFFS_OBJECTID_DELETED
+ || parent_obj_id ==
+ YAFFS_OBJECTID_UNLINKED) {
+ this_size = 0;
+ is_shrink = 1;
+ }
+
+ if (is_shrink
+ && in->variant.file_variant.
+ shrink_size > this_size)
+ in->variant.
+ file_variant.
+ shrink_size =
+ this_size;
+
+ if (is_shrink)
+ bi->has_shrink_hdr = 1;
+
+ }
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+
+ }
+
+ if (!in->valid && in->variant_type !=
+ (oh ? oh->type : tags.extra_obj_type))
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
+ oh ?
+ oh->type : tags.extra_obj_type,
+ in->variant_type, tags.obj_id,
+ chunk);
+
+ if (!in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+
+ if (oh) {
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->lazy_loaded = 0;
+ } else {
+ in->lazy_loaded = 1;
+ }
+ in->hdr_chunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->hdr_chunk = chunk;
+
+ if (oh) {
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+
+ if (oh->shadows_obj > 0)
+ yaffs_handle_shadowed_obj
+ (dev,
+ oh->shadows_obj,
+ 1);
+
+ yaffs_set_obj_name_from_oh(in,
+ oh);
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ file_size = oh->file_size;
+ is_shrink = oh->is_shrink;
+ equiv_id = oh->equiv_id;
+
+ } else {
+ in->variant_type =
+ tags.extra_obj_type;
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, tags.extra_parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = tags.extra_length;
+ is_shrink =
+ tags.extra_is_shrink;
+ equiv_id = tags.extra_equiv_id;
+ in->lazy_loaded = 1;
+
+ }
+ in->dirty = 0;
+
+ if (!parent)
+ alloc_failed = 1;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ is_unlinked = (parent == dev->del_dir)
+ || (parent == dev->unlinked_dir);
+
+ if (is_shrink) {
+ /* Mark the block as having a shrink header */
+ bi->has_shrink_hdr = 1;
+ }
+
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent object is scanned
+ * we put them all in a list.
+ * After scanning is complete, we should have all the objects, so we run
+ * through this list and fix up all the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+
+ if (in->variant.
+ file_variant.scanned_size <
+ file_size) {
+ /* This covers the case where the file size is greater
+ * than where the data is
+ * This will happen if the file is resized to be larger
+ * than its current data extents.
+ */
+ in->variant.
+ file_variant.
+ file_size =
+ file_size;
+ in->variant.
+ file_variant.
+ scanned_size =
+ file_size;
+ }
+
+ if (in->variant.file_variant.
+ shrink_size > file_size)
+ in->variant.
+ file_variant.
+ shrink_size =
+ file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ if (!is_unlinked) {
+ in->variant.
+ hardlink_variant.
+ equiv_id = equiv_id;
+ in->hard_links.next =
+ (struct list_head *)
+ hard_list;
+ hard_list = in;
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (oh) {
+ in->variant.
+ symlink_variant.
+ alias =
+ yaffs_clone_str(oh->
+ alias);
+ if (!in->variant.
+ symlink_variant.
+ alias)
+ alloc_failed =
+ 1;
+ }
+ break;
+ }
+
+ }
+
+ }
+
+ } /* End of scanning for each chunk */
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ /* If we got this far while scanning, then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ }
+
+ yaffs_skip_rest_of_block(dev);
+
+ if (alt_block_index)
+ vfree(block_index);
+ else
+ kfree(block_index);
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+ yaffs_link_fixup(dev, hard_list);
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs2.h b/fs/yaffs2/yaffs_yaffs2.h
new file mode 100644
index 000000000000..e1a9287fc506
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h
new file mode 100644
index 000000000000..8183425448cd
--- /dev/null
+++ b/fs/yaffs2/yportenv.h
@@ -0,0 +1,70 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YPORTENV_LINUX_H__
+#define __YPORTENV_LINUX_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/xattr.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sort.h>
+#include <linux/bitops.h>
+
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+
+#define YAFFS_ROOT_MODE 0755
+#define YAFFS_LOSTNFOUND_MODE 0700
+
+#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+#define Y_TIME_CONVERT(x) (x).tv_sec
+
+#define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+
+#ifndef Y_DUMP_STACK
+#define Y_DUMP_STACK() dump_stack()
+#endif
+
+#define yaffs_trace(msk, fmt, ...) do { \
+ if(yaffs_trace_mask & (msk)) \
+ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while(0)
+
+#ifndef YBUG
+#define YBUG() do {\
+ yaffs_trace(YAFFS_TRACE_BUG,\
+ "bug " __FILE__ " %d",\
+ __LINE__);\
+ Y_DUMP_STACK();\
+} while (0)
+#endif
+
+#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index db22d136ad08..ff4d370a4e89 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -473,7 +473,7 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
- *(.ctors) \
+ *(CONFIG_GCOV_CTORS) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
#define KERNEL_CTORS()
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 4a08a664ff1f..0ead502e17d2 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -37,6 +37,7 @@ typedef union dfixed {
#define dfixed_init(A) { .full = dfixed_const((A)) }
#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
#define dfixed_trunc(A) ((A).full >> 12)
+#define dfixed_frac(A) ((A).full & ((1 << 12) - 1))
static inline u32 dfixed_floor(fixed20_12 A)
{
diff --git a/include/linux/adt7461.h b/include/linux/adt7461.h
new file mode 100644
index 000000000000..7307395ba7c8
--- /dev/null
+++ b/include/linux/adt7461.h
@@ -0,0 +1,41 @@
+/*
+ * include/linux/adt8461.h
+ *
+ * ADT7461, temperature monitoring device from ON Semiconductors
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_ADT7461_H
+#define _LINUX_ADT7461_H
+
+#include <linux/types.h>
+
+struct adt7461_platform_data {
+ bool supported_hwrev;
+ bool ext_range;
+ bool therm2;
+ u8 conv_rate;
+ u8 offset;
+ u8 hysteresis;
+ u8 shutdown_ext_limit;
+ u8 shutdown_local_limit;
+ u8 throttling_ext_limit;
+ void (*alarm_fn)(bool raised);
+};
+
+#endif /* _LINUX_ADT7461_H */
diff --git a/include/linux/akm8975.h b/include/linux/akm8975.h
new file mode 100644
index 000000000000..6a7c43260018
--- /dev/null
+++ b/include/linux/akm8975.h
@@ -0,0 +1,87 @@
+/*
+ * Definitions for akm8975 compass chip.
+ */
+#ifndef AKM8975_H
+#define AKM8975_H
+
+#include <linux/ioctl.h>
+
+/*! \name AK8975 operation mode
+ \anchor AK8975_Mode
+ Defines an operation mode of the AK8975.*/
+/*! @{*/
+#define AK8975_MODE_SNG_MEASURE 0x01
+#define AK8975_MODE_SELF_TEST 0x08
+#define AK8975_MODE_FUSE_ACCESS 0x0F
+#define AK8975_MODE_POWER_DOWN 0x00
+/*! @}*/
+
+#define RBUFF_SIZE 8 /* Rx buffer size */
+
+/*! \name AK8975 register address
+\anchor AK8975_REG
+Defines a register address of the AK8975.*/
+/*! @{*/
+#define AK8975_REG_WIA 0x00
+#define AK8975_REG_INFO 0x01
+#define AK8975_REG_ST1 0x02
+#define AK8975_REG_HXL 0x03
+#define AK8975_REG_HXH 0x04
+#define AK8975_REG_HYL 0x05
+#define AK8975_REG_HYH 0x06
+#define AK8975_REG_HZL 0x07
+#define AK8975_REG_HZH 0x08
+#define AK8975_REG_ST2 0x09
+#define AK8975_REG_CNTL 0x0A
+#define AK8975_REG_RSV 0x0B
+#define AK8975_REG_ASTC 0x0C
+#define AK8975_REG_TS1 0x0D
+#define AK8975_REG_TS2 0x0E
+#define AK8975_REG_I2CDIS 0x0F
+/*! @}*/
+
+/*! \name AK8975 fuse-rom address
+\anchor AK8975_FUSE
+Defines a read-only address of the fuse ROM of the AK8975.*/
+/*! @{*/
+#define AK8975_FUSE_ASAX 0x10
+#define AK8975_FUSE_ASAY 0x11
+#define AK8975_FUSE_ASAZ 0x12
+/*! @}*/
+
+#define AKMIO 0xA1
+
+/* IOCTLs for AKM library */
+#define ECS_IOCTL_WRITE _IOW(AKMIO, 0x02, char[5])
+#define ECS_IOCTL_READ _IOWR(AKMIO, 0x03, char[5])
+#define ECS_IOCTL_GETDATA _IOR(AKMIO, 0x08, char[RBUFF_SIZE])
+#define ECS_IOCTL_SET_YPR _IOW(AKMIO, 0x0C, short[12])
+#define ECS_IOCTL_GET_OPEN_STATUS _IOR(AKMIO, 0x0D, int)
+#define ECS_IOCTL_GET_CLOSE_STATUS _IOR(AKMIO, 0x0E, int)
+#define ECS_IOCTL_GET_DELAY _IOR(AKMIO, 0x30, short)
+
+/* IOCTLs for APPs */
+#define ECS_IOCTL_APP_SET_MFLAG _IOW(AKMIO, 0x11, short)
+#define ECS_IOCTL_APP_GET_MFLAG _IOW(AKMIO, 0x12, short)
+#define ECS_IOCTL_APP_SET_AFLAG _IOW(AKMIO, 0x13, short)
+#define ECS_IOCTL_APP_GET_AFLAG _IOR(AKMIO, 0x14, short)
+#define ECS_IOCTL_APP_SET_DELAY _IOW(AKMIO, 0x18, short)
+#define ECS_IOCTL_APP_GET_DELAY ECS_IOCTL_GET_DELAY
+/* Set raw magnetic vector flag */
+#define ECS_IOCTL_APP_SET_MVFLAG _IOW(AKMIO, 0x19, short)
+/* Get raw magnetic vector flag */
+#define ECS_IOCTL_APP_GET_MVFLAG _IOR(AKMIO, 0x1A, short)
+#define ECS_IOCTL_APP_SET_TFLAG _IOR(AKMIO, 0x15, short)
+
+
+struct akm8975_platform_data {
+ int intr;
+
+ int (*init)(void);
+ void (*exit)(void);
+ int (*power_on)(void);
+ int (*power_off)(void);
+};
+
+#endif
+
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 21114810c7c0..60c737f7bda5 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -5,6 +5,15 @@
#define AMBA_MMCI_H
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+ struct sdio_cis cis;
+ struct sdio_cccr cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+};
/* Just some dummy forwarding */
struct dma_chan;
@@ -55,6 +64,9 @@ struct mmci_platform_data {
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
+ unsigned int status_irq;
+ struct embedded_sdio_data *embedded_sdio;
+ int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
};
#endif
diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h
new file mode 100644
index 000000000000..0f904b3ba7f0
--- /dev/null
+++ b/include/linux/android_aid.h
@@ -0,0 +1,28 @@
+/* include/linux/android_aid.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_AID_H
+#define _LINUX_ANDROID_AID_H
+
+/* AIDs that the kernel treats differently */
+#define AID_NET_BT_ADMIN 3001
+#define AID_NET_BT 3002
+#define AID_INET 3003
+#define AID_NET_RAW 3004
+#define AID_NET_ADMIN 3005
+#define AID_NET_BW_STATS 3006 /* read bandwidth statistics */
+#define AID_NET_BW_ACCT 3007 /* change bandwidth statistics accounting */
+
+#endif
diff --git a/include/linux/android_alarm.h b/include/linux/android_alarm.h
new file mode 100644
index 000000000000..f8f14e793dbf
--- /dev/null
+++ b/include/linux/android_alarm.h
@@ -0,0 +1,106 @@
+/* include/linux/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_ALARM_H
+#define _LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+#ifdef __KERNEL__
+
+#include <linux/ktime.h>
+#include <linux/rbtree.h>
+
+/*
+ * The alarm interface is similar to the hrtimer interface but adds support
+ * for wakeup from suspend. It also adds an elapsed realtime clock that can
+ * be used for periodic timers that need to keep runing while the system is
+ * suspended and not be disrupted when the wall time is set.
+ */
+
+/**
+ * struct alarm - the basic alarm structure
+ * @node: red black tree node for time ordered insertion
+ * @type: alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup.
+ * @softexpires: the absolute earliest expiry time of the alarm.
+ * @expires: the absolute expiry time.
+ * @function: alarm expiry callback function
+ *
+ * The alarm structure must be initialized by alarm_init()
+ *
+ */
+
+struct alarm {
+ struct rb_node node;
+ enum android_alarm_type type;
+ ktime_t softexpires;
+ ktime_t expires;
+ void (*function)(struct alarm *);
+};
+
+void alarm_init(struct alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct alarm *));
+void alarm_start_range(struct alarm *alarm, ktime_t start, ktime_t end);
+int alarm_try_to_cancel(struct alarm *alarm);
+int alarm_cancel(struct alarm *alarm);
+ktime_t alarm_get_elapsed_realtime(void);
+
+/* set rtc while preserving elapsed realtime */
+int alarm_set_rtc(const struct timespec ts);
+
+#endif
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h
new file mode 100644
index 000000000000..f633621f5be3
--- /dev/null
+++ b/include/linux/android_pmem.h
@@ -0,0 +1,93 @@
+/* include/linux/android_pmem.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ANDROID_PMEM_H_
+#define _ANDROID_PMEM_H_
+
+#define PMEM_IOCTL_MAGIC 'p'
+#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
+#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
+#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int)
+#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int)
+/* This ioctl will allocate pmem space, backing the file, it will fail
+ * if the file already has an allocation, pass it the len as the argument
+ * to the ioctl */
+#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int)
+/* This will connect a one pmem file to another, pass the file that is already
+ * backed in memory as the argument to the ioctl
+ */
+#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int)
+/* Returns the total size of the pmem region it is sent to as a pmem_region
+ * struct (with offset set to 0).
+ */
+#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
+#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
+
+struct android_pmem_platform_data
+{
+ const char* name;
+ /* starting physical address of memory region */
+ unsigned long start;
+ /* size of memory region */
+ unsigned long size;
+ /* set to indicate the region should not be managed with an allocator */
+ unsigned no_allocator;
+ /* set to indicate maps of this region should be cached, if a mix of
+ * cached and uncached is desired, set this and open the device with
+ * O_SYNC to get an uncached region */
+ unsigned cached;
+ /* The MSM7k has bits to enable a write buffer in the bus controller*/
+ unsigned buffered;
+};
+
+struct pmem_region {
+ unsigned long offset;
+ unsigned long len;
+};
+
+#ifdef CONFIG_ANDROID_PMEM
+int is_pmem_file(struct file *file);
+int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+ unsigned long *end, struct file **filp);
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *end);
+void put_pmem_file(struct file* file);
+void flush_pmem_file(struct file *file, unsigned long start, unsigned long len);
+int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *));
+int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation);
+
+#else
+static inline int is_pmem_file(struct file *file) { return 0; }
+static inline int get_pmem_file(int fd, unsigned long *start,
+ unsigned long *vstart, unsigned long *end,
+ struct file **filp) { return -ENOSYS; }
+static inline int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *end) { return -ENOSYS; }
+static inline void put_pmem_file(struct file* file) { return; }
+static inline void flush_pmem_file(struct file *file, unsigned long start,
+ unsigned long len) { return; }
+static inline int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *)) { return -ENOSYS; }
+
+static inline int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation) { return -ENOSYS; }
+#endif
+
+#endif //_ANDROID_PPP_H_
+
diff --git a/include/linux/ashmem.h b/include/linux/ashmem.h
new file mode 100644
index 000000000000..1976b10ef93e
--- /dev/null
+++ b/include/linux/ashmem.h
@@ -0,0 +1,48 @@
+/*
+ * include/linux/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _LINUX_ASHMEM_H
+#define _LINUX_ASHMEM_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _LINUX_ASHMEM_H */
diff --git a/include/linux/bq27x00.h b/include/linux/bq27x00.h
new file mode 100644
index 000000000000..b95cd2035238
--- /dev/null
+++ b/include/linux/bq27x00.h
@@ -0,0 +1,30 @@
+/*
+ * include/linux/bq27x00.h
+ *
+ * BQ27x00 battery driver
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_BQ27x00_H
+#define _LINUX_BQ27x00_H
+
+struct bq27x00_platform_data {
+ int ac_persent_gpio;
+};
+
+#endif /* _LINUX_BQ27x00_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index da7e4bc34e8c..cefedc8ced97 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -84,12 +84,6 @@ enum {
CSS_REMOVED, /* This CSS is dead */
};
-/* Caller must verify that the css is not for root cgroup */
-static inline void __css_get(struct cgroup_subsys_state *css, int count)
-{
- atomic_add(count, &css->refcnt);
-}
-
/*
* Call css_get() to hold a reference on the css; it can be used
* for a reference obtained via:
@@ -97,6 +91,7 @@ static inline void __css_get(struct cgroup_subsys_state *css, int count)
* - task->cgroups for a locked task
*/
+extern void __css_get(struct cgroup_subsys_state *css, int count);
static inline void css_get(struct cgroup_subsys_state *css)
{
/* We don't need to reference count the root state */
@@ -143,10 +138,7 @@ static inline void css_put(struct cgroup_subsys_state *css)
enum {
/* Control Group is dead */
CGRP_REMOVED,
- /*
- * Control Group has previously had a child cgroup or a task,
- * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
- */
+ /* Control Group has ever had a child cgroup or a task */
CGRP_RELEASABLE,
/* Control Group requires release notifications to userspace */
CGRP_NOTIFY_ON_RELEASE,
@@ -287,6 +279,7 @@ struct css_set {
/* For RCU-protected deletion */
struct rcu_head rcu_head;
+ struct work_struct work;
};
/*
@@ -466,6 +459,7 @@ struct cgroup_subsys {
struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
+ int (*allow_attach)(struct cgroup *cgrp, struct task_struct *tsk);
int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk);
int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b1a635acf72a..54d948ec49ab 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -207,4 +207,11 @@ static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
#endif /* !CONFIG_PM_SLEEP_SMP */
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpuacct.h b/include/linux/cpuacct.h
new file mode 100644
index 000000000000..8f68e733fe19
--- /dev/null
+++ b/include/linux/cpuacct.h
@@ -0,0 +1,43 @@
+/* include/linux/cpuacct.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _CPUACCT_H_
+#define _CPUACCT_H_
+
+#include <linux/cgroup.h>
+
+#ifdef CONFIG_CGROUP_CPUACCT
+
+/*
+ * Platform specific CPU frequency hooks for cpuacct. These functions are
+ * called from the scheduler.
+ */
+struct cpuacct_charge_calls {
+ /*
+ * Platforms can take advantage of this data and use
+ * per-cpu allocations if necessary.
+ */
+ void (*init) (void **cpuacct_data);
+ void (*charge) (void *cpuacct_data, u64 cputime, unsigned int cpu);
+ void (*cpufreq_show) (void *cpuacct_data, struct cgroup_map_cb *cb);
+ /* Returns power consumed in milliWatt seconds */
+ u64 (*power_usage) (void *cpuacct_data);
+};
+
+int cpuacct_charge_register(struct cpuacct_charge_calls *fn);
+
+#endif /* CONFIG_CGROUP_CPUACCT */
+
+#endif // _CPUACCT_H_
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 6216115c7789..c6126b9fb7cf 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -363,6 +363,9 @@ extern struct cpufreq_governor cpufreq_gov_ondemand;
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
extern struct cpufreq_governor cpufreq_gov_conservative;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive)
#endif
diff --git a/include/linux/earlysuspend.h b/include/linux/earlysuspend.h
new file mode 100644
index 000000000000..8343b817af31
--- /dev/null
+++ b/include/linux/earlysuspend.h
@@ -0,0 +1,56 @@
+/* include/linux/earlysuspend.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_EARLYSUSPEND_H
+#define _LINUX_EARLYSUSPEND_H
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/list.h>
+#endif
+
+/* The early_suspend structure defines suspend and resume hooks to be called
+ * when the user visible sleep state of the system changes, and a level to
+ * control the order. They can be used to turn off the screen and input
+ * devices that are not used for wakeup.
+ * Suspend handlers are called in low to high level order, resume handlers are
+ * called in the opposite order. If, when calling register_early_suspend,
+ * the suspend handlers have already been called without a matching call to the
+ * resume handlers, the suspend handler will be called directly from
+ * register_early_suspend. This direct call can violate the normal level order.
+ */
+enum {
+ EARLY_SUSPEND_LEVEL_BLANK_SCREEN = 50,
+ EARLY_SUSPEND_LEVEL_STOP_DRAWING = 100,
+ EARLY_SUSPEND_LEVEL_DISABLE_FB = 150,
+};
+struct early_suspend {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct list_head link;
+ int level;
+ void (*suspend)(struct early_suspend *h);
+ void (*resume)(struct early_suspend *h);
+#endif
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void register_early_suspend(struct early_suspend *handler);
+void unregister_early_suspend(struct early_suspend *handler);
+#else
+#define register_early_suspend(handler) do { } while (0)
+#define unregister_early_suspend(handler) do { } while (0)
+#endif
+
+#endif
+
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 1d6836c498dd..bb565a4c2e1c 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -226,6 +226,19 @@ struct fb_bitfield {
#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */
#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */
+#define FB_FLAG_RATIO_4_3 64
+#define FB_FLAG_RATIO_16_9 128
+#define FB_FLAG_PIXEL_REPEAT 256
+
+/*
+ * Stereo modes
+ */
+#define FB_VMODE_STEREO_NONE 0x00000000 /* not stereo */
+#define FB_VMODE_STEREO_FRAME_PACK 0x01000000 /* frame packing */
+#define FB_VMODE_STEREO_TOP_BOTTOM 0x02000000 /* top-bottom */
+#define FB_VMODE_STEREO_LEFT_RIGHT 0x04000000 /* left-right */
+#define FB_VMODE_STEREO_MASK 0xFF000000
+
/*
* Display rotation support
*/
@@ -439,6 +452,8 @@ struct file;
#define FB_MISC_PRIM_COLOR 1
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */
+#define FB_MISC_HDMI 4 /* display supports HDMI signaling */
+
struct fb_chroma {
__u32 redx; /* in fraction of 1024 */
__u32 greenx;
@@ -1104,6 +1119,7 @@ extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
/* drivers/video/modedb.c */
#define VESA_MODEDB_SIZE 34
+#define CEA_MODEDB_SIZE 65
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
@@ -1156,7 +1172,7 @@ struct fb_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[64];
+extern const struct fb_videomode cea_modes[];
struct fb_modelist {
struct list_head list;
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index fffdf00f87b9..1962102d132e 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -58,6 +58,13 @@ enum fsl_usb2_phy_modes {
FSL_USB2_PHY_SERIAL,
};
+enum fsl_usb2_phy_types {
+ FSL_USB2_PHY_TYPE_UTMIP = 1,
+ FSL_USB2_PHY_TYPE_ULPI = 2,
+ FSL_USB2_PHY_TYPE_HSIC = 3,
+ FSL_USB2_PHY_TYPE_ICUSB = 4,
+};
+
struct clk;
struct platform_device;
@@ -72,7 +79,6 @@ struct fsl_usb2_platform_data {
void (*exit)(struct platform_device *);
void __iomem *regs; /* ioremap'd register base */
struct clk *clk;
- unsigned power_budget; /* hcd->power_budget */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
unsigned es:1; /* need USBMODE:ES */
@@ -95,6 +101,9 @@ struct fsl_usb2_platform_data {
u32 pm_configured_flag;
u32 pm_portsc;
u32 pm_usbgenctrl;
+
+ void *phy_config;
+ enum fsl_usb2_phy_types usb_phy_type;
};
/* Flags in fsl_usb2_mph_platform_data */
diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h
new file mode 100644
index 000000000000..2613fc5e4a93
--- /dev/null
+++ b/include/linux/gpio_event.h
@@ -0,0 +1,170 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+ int count;
+ struct input_dev *dev[];
+};
+enum {
+ GPIO_EVENT_FUNC_UNINIT = 0x0,
+ GPIO_EVENT_FUNC_INIT = 0x1,
+ GPIO_EVENT_FUNC_SUSPEND = 0x2,
+ GPIO_EVENT_FUNC_RESUME = 0x3,
+};
+struct gpio_event_info {
+ int (*func)(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info,
+ void **data, int func);
+ int (*event)(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info,
+ void **data, unsigned int dev, unsigned int type,
+ unsigned int code, int value); /* out events */
+ bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+ const char *name;
+ struct gpio_event_info **info;
+ size_t info_count;
+ int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+ const char *names[]; /* If name is NULL, names contain a NULL */
+ /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+ /* unset: drive active output low, set: drive active output high */
+ GPIOKPF_ACTIVE_HIGH = 1U << 0,
+ GPIOKPF_DEBOUNCE = 1U << 1,
+ GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+ GPIOKPF_REMOVE_PHANTOM_KEYS = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+ GPIOKPF_DEBOUNCE,
+ GPIOKPF_DRIVE_INACTIVE = 1U << 3,
+ GPIOKPF_LEVEL_TRIGGERED_IRQ = 1U << 4,
+ GPIOKPF_PRINT_UNMAPPED_KEYS = 1U << 16,
+ GPIOKPF_PRINT_MAPPED_KEYS = 1U << 17,
+ GPIOKPF_PRINT_PHANTOM_KEYS = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+ (((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+ /* initialize to gpio_event_matrix_func */
+ struct gpio_event_info info;
+ /* size must be ninputs * noutputs */
+ const unsigned short *keymap;
+ unsigned int *input_gpios;
+ unsigned int *output_gpios;
+ unsigned int ninputs;
+ unsigned int noutputs;
+ /* time to wait before reading inputs after driving each output */
+ ktime_t settle_time;
+ /* time to wait before scanning the keypad a second time */
+ ktime_t debounce_delay;
+ ktime_t poll_time;
+ unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+ GPIOEDF_ACTIVE_HIGH = 1U << 0,
+/* GPIOEDF_USE_DOWN_IRQ = 1U << 1, */
+/* GPIOEDF_USE_IRQ = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+ GPIOEDF_PRINT_KEYS = 1U << 8,
+ GPIOEDF_PRINT_KEY_DEBOUNCE = 1U << 9,
+ GPIOEDF_PRINT_KEY_UNSTABLE = 1U << 10,
+};
+
+struct gpio_event_direct_entry {
+ uint32_t gpio:16;
+ uint32_t code:10;
+ uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+ /* initialize to gpio_event_input_func */
+ struct gpio_event_info info;
+ ktime_t debounce_time;
+ ktime_t poll_time;
+ uint16_t flags;
+ uint16_t type;
+ const struct gpio_event_direct_entry *keymap;
+ size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data,
+ unsigned int dev, unsigned int type,
+ unsigned int code, int value);
+struct gpio_event_output_info {
+ /* initialize to gpio_event_output_func and gpio_event_output_event */
+ struct gpio_event_info info;
+ uint16_t flags;
+ uint16_t type;
+ const struct gpio_event_direct_entry *keymap;
+ size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+ GPIOEAF_PRINT_UNKNOWN_DIRECTION = 1U << 16,
+ GPIOEAF_PRINT_RAW = 1U << 17,
+ GPIOEAF_PRINT_EVENT = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+ /* initialize to gpio_event_axis_func */
+ struct gpio_event_info info;
+ uint8_t count; /* number of gpios for this axis */
+ uint8_t dev; /* device index when using multiple input devices */
+ uint8_t type; /* EV_REL or EV_ABS */
+ uint16_t code;
+ uint16_t decoded_size;
+ uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+ uint32_t *gpio;
+ uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+ struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+ struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index b5ca4b2c08ec..d944b2ee3d3b 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -23,6 +23,7 @@ struct gpio_keys_platform_data {
int (*enable)(struct device *dev);
void (*disable)(struct device *dev);
const char *name; /* input device name */
+ int (*wakeup_key)(void);
};
#endif
diff --git a/include/linux/gpio_scrollwheel.h b/include/linux/gpio_scrollwheel.h
new file mode 100644
index 000000000000..33d17a0199ea
--- /dev/null
+++ b/include/linux/gpio_scrollwheel.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _GPIO_SCROLLWHEEL_H
+#define _GPIO_SCROLLWHEEL_H
+
+#define GPIO_SCROLLWHEEL_PIN_ONOFF 0
+#define GPIO_SCROLLWHEEL_PIN_PRESS 1
+#define GPIO_SCROLLWHEEL_PIN_ROT1 2
+#define GPIO_SCROLLWHEEL_PIN_ROT2 3
+#define GPIO_SCROLLWHEEL_PIN_MAX 4
+
+struct gpio_scrollwheel_button {
+ /* Configuration parameters */
+ int pinaction; /* GPIO_SCROLLWHEEL_PIN_* */
+ int gpio;
+ char *desc;
+ int active_low;
+ int debounce_interval; /* debounce ticks interval in msecs */
+};
+
+struct gpio_scrollwheel_platform_data {
+ struct gpio_scrollwheel_button *buttons;
+ int nbuttons;
+ unsigned int rep:1; /* enable input subsystem auto repeat */
+ int (*enable)(struct device *dev);
+ void (*disable)(struct device *dev);
+};
+
+#endif
+
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9cf8e7ae7450..ab64d4768f65 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -598,6 +598,8 @@ struct hid_usage_id {
* @input_mapping: invoked on input registering before mapping an usage
* @input_mapped: invoked on input registering after mapping an usage
* @feature_mapping: invoked on feature registering
+ * @input_register: called just before input device is registered after reports
+ * are parsed.
* @suspend: invoked on suspend (NULL means nop)
* @resume: invoked on resume if device was not reset (NULL means nop)
* @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -644,6 +646,8 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
+ int (*input_register)(struct hid_device *hdev, struct hid_input
+ *hidinput);
#ifdef CONFIG_PM
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
diff --git a/include/linux/i2c-slave.h b/include/linux/i2c-slave.h
new file mode 100755
index 000000000000..34df64f73f6f
--- /dev/null
+++ b/include/linux/i2c-slave.h
@@ -0,0 +1,259 @@
+/*
+ * i2c-slave.h - definitions for the i2c-slave-bus interface
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#ifndef _LINUX_I2C_SLAVE_H
+#define _LINUX_I2C_SLAVE_H
+
+#include <linux/types.h>
+#ifdef __KERNEL__
+/* --- General options ------------------------------------------------ */
+
+struct i2c_client;
+struct i2c_slave_algorithm;
+struct i2c_slave_adapter;
+#if defined(CONFIG_I2C_SLAVE) && defined(CONFIG_I2C)
+
+/**
+ * i2c_slave_send - Sends data to master. When master issues a read cycle, the
+ * data is sent by the slave.
+ * This function copies the client data into the slave tx buffer and return to
+ * client. This is not a blocking call. Data will be sent to master later once
+ * slave got the master-ready cycle transfer.
+ * if there is no sufficient space to write the client buffer, it will return
+ * error. it will not write partial data.
+ * @client: Handle to i2c-slave client.
+ * @buf: Data that will be written to the master
+ * @count: How many bytes to write.
+ *
+ * Returns negative errno, or else the number of bytes written.
+ */
+extern int i2c_slave_send(struct i2c_client *client, const char *buf,
+ int count);
+
+/**
+ * i2c_slave_get_tx_status - Get amount of data available in tx buffer. If there
+ * is still data in tx buffer then wait for given time to transfer complete
+ * for a give timeout.
+ * @client: Handle to i2c-slave client.
+ * @timeout_ms: Time to wait for transfer to complete.
+ *
+ * Returns negative errno, or else the number of bytes remaining in tx buffer.
+ */
+extern int i2c_slave_get_tx_status(struct i2c_client *client, int timeout_ms);
+
+/**
+ * i2c_slave_recv - Receive data from master. The data received from master is
+ * stored on slave rx buffer. When this api will be called, the data will be
+ * copied from the slave rx buffer to client buffer. If requested amount (count)
+ * of data is not available then it will wait for either min_count to be receive
+ * or timeout whatever first.
+ *
+ * if timeout_ms = 0, then wait for min_count data to be read.
+ * if timoue_ms non zero then wait for the data till timeout happen.
+ * @client: Handle to i2c-slave client.
+ * @buf: Data that will be read from the master
+ * @count: How many bytes to read.
+ * @min_count: Block till read min_count of data.
+ * @timeout_ms: Time to wait for read to be complete.
+ *
+ * Returns negative errno, or else the number of bytes read.
+ */
+extern int i2c_slave_recv(struct i2c_client *client, char *buf, int count,
+ int min_count, int timeout_ms);
+
+/**
+ * i2c_slave_start - Start the i2c slave to receive/transmit data.
+ * After this i2c controller starts responding master.
+ * The dummy-char will send to master if there is no data to send on slave tx
+ * buffer.
+ * @client: Handle to i2c-slave client.
+ * @dummy_char: Data which will be send to master if there is no data to be send
+ * in slave tx buffer.
+ *
+ * Returns negative errno, or else 0 for success.
+ */
+extern int i2c_slave_start(struct i2c_client *client, unsigned char dummy_char);
+
+/**
+ * i2c_slave_stop - Stop slave to receive/transmit data.
+ * After this i2c controller stops responding master.
+ * @client: Handle to i2c-slave client.
+ * @is_buffer_clear: Reset the tx and rx slave buffer or not.
+ */
+extern void i2c_slave_stop(struct i2c_client *client, int is_buffer_clear);
+
+/**
+ * i2c_slave_flush_buffer - Flush the receive and transmit buffer.
+ * @client: Handle to i2c-slave client.
+ * @is_flush_tx_buffer: Reset the tx slave buffer or not.
+ * @is_flush_rx_buffer: Reset the rx slave buffer or not.
+ *
+ * Returns negative errno, or else 0 for success.
+ */
+extern int i2c_slave_flush_buffer(struct i2c_client *client,
+ int is_flush_tx_buffer, int is_flush_rx_buffer);
+
+/**
+ * i2c_slave_get_nack_cycle - Get the number of master read cycle on which
+ * dummy char sent. This is the way to find that how much cycle slave sent the
+ * NACK packet.
+ *
+ * @client: Handle to i2c-slave client.
+ * @is_cout_reset: Reset the nack count or not.
+ *
+ * Returns negative errno, or else 0 for success.
+ */
+extern int i2c_slave_get_nack_cycle(struct i2c_client *client,
+ int is_cout_reset);
+
+
+/**
+ * i2c_add_slave_adapter - Add slave adapter.
+ *
+ * @slv_adap: Slave adapter.
+ * @force_nr: Adapter number.
+ *
+ * Returns negative errno, or else 0 for success.
+ */
+extern int i2c_add_slave_adapter(struct i2c_slave_adapter *slv_adap,
+ bool force_nr);
+
+/**
+ * i2c_del_slave_adapter - Delete slave adapter.
+ *
+ * @slv_adap: Slave adapter.
+ *
+ * Returns negative errno, or else 0 for success.
+ */
+extern int i2c_del_slave_adapter(struct i2c_slave_adapter *slv_adap);
+
+#endif /* I2C_SLAVE */
+
+/*
+ * i2c_slave_adapter is the structure used to identify a physical i2c bus along
+ * with the access algorithms necessary to access it.
+ */
+struct i2c_slave_adapter {
+ struct module *owner;
+ unsigned int id;
+ unsigned int class; /* classes to allow probing for */
+ /* the algorithm to access the i2c-slave bus */
+ const struct i2c_slave_algorithm *slv_algo;
+ void *algo_data;
+ void *parent_data;
+
+ /* data fields that are valid for all devices */
+ u8 level; /* nesting level for lockdep */
+ struct mutex bus_lock;
+
+ int timeout; /* in jiffies */
+ int retries;
+ struct device *dev; /* the adapter device */
+ struct device *parent_dev; /* the adapter device */
+
+ int nr;
+ char name[48];
+ struct completion dev_released;
+};
+
+static inline void *i2c_get_slave_adapdata(const struct i2c_slave_adapter *dev)
+{
+ return dev_get_drvdata(dev->dev);
+}
+
+static inline void i2c_set_slave_adapdata(struct i2c_slave_adapter *dev,
+ void *data)
+{
+ dev_set_drvdata(dev->dev, data);
+}
+
+/*
+ * The following struct are for those who like to implement new i2c slave
+ * bus drivers:
+ * i2c_slave_algorithm is the interface to a class of hardware solutions which
+ * can be addressed using the same bus algorithms.
+ */
+struct i2c_slave_algorithm {
+ /* Start the slave to receive/transmit data.
+ * The dummy-char will send to master if there is no data to send on
+ * slave tx buffer.
+ */
+ int (*slave_start)(struct i2c_slave_adapter *slv_adap, int addr,
+ int is_ten_bit_addr, unsigned char dummy_char);
+
+ /* Stop slave to receive/transmit data.
+ * Required information to reset the slave rx and tx buffer to reset
+ * or not.
+ */
+ void (*slave_stop)(struct i2c_slave_adapter *slv_adap,
+ int is_buffer_clear);
+
+ /*
+ * Send data to master. The data will be copied on the slave tx buffer
+ * and will send to master once master initiates the master-read cycle.
+ * Function will return immediately once the buffer copied into slave
+ * tx buffer.
+ * Client will not wait till data is sent to master.
+ * This function will not copy data partially. If sufficient space is
+ * not available, it will return error.
+ */
+ int (*slave_send)(struct i2c_slave_adapter *slv_adap, const char *buf,
+ int count);
+
+ /*
+ * Get amount of data available in tx buffer. If there is still data in
+ * tx buffer wait for given time to get slave tx buffer emptied.
+ * returns number of data available in slave tx buffer.
+ */
+ int (*slave_get_tx_status)(struct i2c_slave_adapter *slv_adap,
+ int timeout_ms);
+
+ /*
+ * Receive data to master. The data received from master is stored on
+ * slave rx buffer. When this api will be called, the data will be
+ * coped from the slave rx buffer to client buffer. If requested (count)
+ * data is not available then it will wait for either min_count to be
+ * receive or timeout whatever first.
+ *
+ * if timeout_ms = 0, then wait for min_count data to be read.
+ * if timoue_ms non zero then wait for the data till timeout happen.
+ * returns number of bytes read as positive integer otherwise error.
+ */
+ int (*slave_recv)(struct i2c_slave_adapter *slv_adap, char *buf,
+ int count, int min_count, int timeout_ms);
+
+ /* Flush the receive and transmit buffer.
+ */
+ int (*slave_flush_buffer)(struct i2c_slave_adapter *slv_adap,
+ int is_flush_tx_buffer, int is_flush_rx_buffer);
+
+ /* Get the number of dummy char cycle.
+ * Get the number of master read cycle on which dummy character has
+ * been sent.
+ * This can be treat as NACK cycle from slave side.
+ * Pass option whether count need to be reset or not.
+ */
+ int (*slave_get_nack_cycle)(struct i2c_slave_adapter *slv_adap,
+ int is_cout_reset);
+};
+#endif /* __KERNEL__ */
+#endif /* _LINUX_I2C_SLAVE_H */
diff --git a/include/linux/i2c-tegra.h b/include/linux/i2c-tegra.h
index 9c85da49857a..90dcddc52f29 100644
--- a/include/linux/i2c-tegra.h
+++ b/include/linux/i2c-tegra.h
@@ -4,6 +4,8 @@
* Copyright (C) 2010 Google, Inc.
* Author: Colin Cross <ccross@android.com>
*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -18,8 +20,33 @@
#ifndef _LINUX_I2C_TEGRA_H
#define _LINUX_I2C_TEGRA_H
+#include <mach/pinmux.h>
+
+#define TEGRA_I2C_MAX_BUS 3
+
struct tegra_i2c_platform_data {
+ int adapter_nr;
+ int bus_count;
+ const struct tegra_pingroup_config *bus_mux[TEGRA_I2C_MAX_BUS];
+ int bus_mux_len[TEGRA_I2C_MAX_BUS];
+ unsigned long bus_clk_rate[TEGRA_I2C_MAX_BUS];
+ bool is_dvc;
+ bool is_clkon_always;
+ int retries;
+ int timeout; /* in jiffies */
+ u16 slave_addr;
+ int scl_gpio[TEGRA_I2C_MAX_BUS];
+ int sda_gpio[TEGRA_I2C_MAX_BUS];
+ int (*arb_recovery)(int scl_gpio, int sda_gpio);
+};
+
+struct tegra_i2c_slave_platform_data {
+ int adapter_nr;
+ const struct tegra_pingroup_config *pinmux;
+ int bus_mux_len;
unsigned long bus_clk_rate;
+ int max_rx_buffer_size;
+ int max_tx_buffer_size;
};
#endif /* _LINUX_I2C_TEGRA_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index a6c652ef516d..0bd9ea2b62ea 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -540,6 +540,7 @@ struct i2c_msg {
#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */
#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */
+#define I2C_FUNC_I2C_SLAVE_SUPPORT 0x10000000 /* i2c slave support */
#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \
I2C_FUNC_SMBUS_WRITE_BYTE)
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h
index f027f7a63511..29550c11f6dd 100644
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ b/include/linux/i2c/atmel_mxt_ts.h
@@ -2,6 +2,8 @@
* Atmel maXTouch Touchscreen driver
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Copyright (C) 2011 Atmel Corporation
+ * Copyright (C) 2011 NVIDIA Corporation
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -15,6 +17,16 @@
#include <linux/types.h>
+/*
+ * Atmel I2C addresses
+ */
+#define MXT224_I2C_ADDR1 0x4A
+#define MXT224_I2C_ADDR2 0x4B
+#define MXT1386_I2C_ADDR1 0x4C
+#define MXT1386_I2C_ADDR2 0x4D
+#define MXT1386_I2C_ADDR3 0x5A
+#define MXT1386_I2C_ADDR4 0x5B
+
/* Orient */
#define MXT_NORMAL 0x0
#define MXT_DIAGONAL 0x1
@@ -39,6 +51,10 @@ struct mxt_platform_data {
unsigned int voltage;
unsigned char orient;
unsigned long irqflags;
+ u8(*read_chg) (void);
+ unsigned long config_crc;
+ unsigned int actv_cycle_time;
+ unsigned int idle_cycle_time;
};
#endif /* __LINUX_ATMEL_MXT_TS_H */
diff --git a/include/linux/i2c/panjit_ts.h b/include/linux/i2c/panjit_ts.h
new file mode 100644
index 000000000000..1dd51e1ecae8
--- /dev/null
+++ b/include/linux/i2c/panjit_ts.h
@@ -0,0 +1,30 @@
+/*
+ * include/linux/i2c/panjit_ts.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_I2C_PANJIT_TS_H
+#define _LINUX_I2C_PANJIT_TS_H
+
+struct device;
+
+struct panjit_i2c_ts_platform_data {
+ int gpio_reset;
+};
+
+#endif
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 114c0f6fc63d..bb92f0b13288 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -82,6 +82,10 @@
#define TWL_MODULE_RTC TWL4030_MODULE_RTC
#define TWL_MODULE_PWM TWL4030_MODULE_PWM0
+#define TWL6030_MODULE_CHARGER TWL4030_MODULE_MAIN_CHARGE
+#define TWL6025_MODULE_CHARGER 0x18
+
+#define TWL6030_MODULE_GASGAUGE 0x0B
#define TWL6030_MODULE_ID0 0x0D
#define TWL6030_MODULE_ID1 0x0E
#define TWL6030_MODULE_ID2 0x0F
@@ -108,6 +112,7 @@
#define GASGAUGE_INTR_OFFSET 17
#define USBOTG_INTR_OFFSET 4
#define CHARGER_INTR_OFFSET 2
+#define GPADCSW_INTR_OFFSET 1
#define RSV_INTR_OFFSET 0
/* INT register offsets */
@@ -172,6 +177,14 @@ TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
#define TWL6025_SUBCLASS BIT(4) /* TWL6025 has changed registers */
+#define MPU80031_SUBCLASS BIT(5) /* MPU80031 has changed registers */
+
+/* So we can recover the features in other parts of twl stack */
+unsigned int twl_features(void);
+
+/* so we can get at the EPROM SMPS OFFSET/MULT stuff */
+u8 twl_get_smps_offset(void);
+u8 twl_get_smps_mult(void);
/*
* Read and write single 8-bit registers
@@ -179,6 +192,8 @@ TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+void twl_reg_dump(int module, int start, int end);
+
/*
* Read and write several 8-bit registers at once.
*
@@ -215,6 +230,10 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
return -EIO;
}
#endif
+
+int twl6030_set_usb_charge_enable(int enable);
+int twl6030_set_usb_in_current(int currentmA);
+
/*----------------------------------------------------------------------*/
/*
@@ -552,11 +571,27 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
struct twl4030_clock_init_data {
bool ck32k_lowpwr_enable;
+ bool clk32_active_state_on;
};
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
- unsigned int tblsize;
+ unsigned int battery_tmp_tblsize;
+ int *battery_volt_tbl;
+ unsigned int battery_volt_tblsize;
+ unsigned int monitoring_interval;
+
+ unsigned int max_charger_currentmA;
+ unsigned int max_charger_voltagemV;
+ unsigned int termination_currentmA;
+
+ unsigned int max_bat_voltagemV;
+ unsigned int low_bat_voltagemV;
+
+ /* twl6025 */
+ unsigned int use_hw_charger;
+ unsigned int use_eeprom_config;
+ unsigned int power_path;
};
/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */
@@ -621,6 +656,7 @@ struct twl4030_usb_data {
int (*phy_set_clock)(struct device *dev, int on);
/* suspend/resume of phy */
int (*phy_suspend)(struct device *dev, int suspend);
+ int (*board_control_power)(struct device *dev, int on);
};
struct twl4030_ins {
@@ -696,6 +732,10 @@ struct twl4030_audio_data {
struct twl4030_platform_data {
unsigned irq_base, irq_end;
+
+ /* Callback for boar regulator initialisation */
+ int (*init)(void);
+
struct twl4030_clock_init_data *clock;
struct twl4030_bci_platform_data *bci;
struct twl4030_gpio_platform_data *gpio;
@@ -822,6 +862,22 @@ static inline int twl4030charger_usb_en(int enable) { return 0; }
#define TWL6030_REG_VDAC 45
#define TWL6030_REG_VUSB 46
+/* These are renamed in 6025 but same registers */
+#define TWL6025_REG_LDO2 48
+#define TWL6025_REG_LDO4 49
+#define TWL6025_REG_LDO3 50
+#define TWL6025_REG_LDO5 51
+#define TWL6025_REG_LDO1 52
+#define TWL6025_REG_LDO7 53
+#define TWL6025_REG_LDO6 54
+#define TWL6025_REG_LDOLN 55
+#define TWL6025_REG_LDOUSB 56
+
+/* 6025 DCDC supplies */
+#define TWL6025_REG_SMPS3 57
+#define TWL6025_REG_SMPS4 58
+#define TWL6025_REG_VIO 59
+
/* INTERNAL LDOs */
#define TWL6030_REG_VRTC 47
#define TWL6030_REG_CLK32KG 48
diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h
new file mode 100644
index 000000000000..c06bd6c8ba26
--- /dev/null
+++ b/include/linux/if_pppolac.h
@@ -0,0 +1,33 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+ sa_family_t sa_family; /* AF_PPPOX */
+ unsigned int sa_protocol; /* PX_PROTO_OLAC */
+ int udp_socket;
+ struct __attribute__((packed)) {
+ __u16 tunnel, session;
+ } local, remote;
+} __attribute__((packed));
+
+#endif /* __LINUX_IF_PPPOLAC_H */
diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h
new file mode 100644
index 000000000000..0cf34b4d551f
--- /dev/null
+++ b/include/linux/if_pppopns.h
@@ -0,0 +1,32 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+ sa_family_t sa_family; /* AF_PPPOX */
+ unsigned int sa_protocol; /* PX_PROTO_OPNS */
+ int tcp_socket;
+ __u16 local;
+ __u16 remote;
+} __attribute__((packed));
+
+#endif /* __LINUX_IF_PPPOPNS_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 397921b09ef9..999ccd3fff37 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -27,6 +27,8 @@
#include <linux/ppp_channel.h>
#endif /* __KERNEL__ */
#include <linux/if_pppol2tp.h>
+#include <linux/if_pppolac.h>
+#include <linux/if_pppopns.h>
/* For user-space programs to pick up these definitions
* which they wouldn't get otherwise without defining __KERNEL__
@@ -60,7 +62,9 @@ struct pptp_addr {
#define PX_PROTO_OE 0 /* Currently just PPPoE */
#define PX_PROTO_OL2TP 1 /* Now L2TP also */
#define PX_PROTO_PPTP 2
-#define PX_MAX_PROTO 3
+#define PX_PROTO_OLAC 3
+#define PX_PROTO_OPNS 4
+#define PX_MAX_PROTO 5
struct sockaddr_pppox {
sa_family_t sa_family; /* address family, AF_PPPOX */
@@ -167,6 +171,25 @@ struct pptp_opt {
u32 seq_sent, seq_recv;
int ppp_flags;
};
+
+struct pppolac_opt {
+ __u32 local;
+ __u32 remote;
+ __u32 recv_sequence;
+ __u32 xmit_sequence;
+ atomic_t sequencing;
+ int (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
+};
+
+struct pppopns_opt {
+ __u16 local;
+ __u16 remote;
+ __u32 recv_sequence;
+ __u32 xmit_sequence;
+ void (*data_ready)(struct sock *sk_raw, int length);
+ int (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
+};
+
#include <net/sock.h>
struct pppox_sock {
@@ -177,6 +200,8 @@ struct pppox_sock {
union {
struct pppoe_opt pppoe;
struct pptp_opt pptp;
+ struct pppolac_opt lac;
+ struct pppopns_opt pns;
} proto;
__be16 num;
};
diff --git a/include/linux/ina219.h b/include/linux/ina219.h
new file mode 100644
index 000000000000..c27fa26c5759
--- /dev/null
+++ b/include/linux/ina219.h
@@ -0,0 +1,34 @@
+/*
+ * linux/include/linux/ina219.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _INA219_H
+#define _INA219_H
+
+#include <linux/types.h>
+
+struct ina219_platform_data {
+ u8 divisor; /*divisor needed to get current value */
+ u32 calibration_data;
+ u32 power_lsb;
+ char rail_name[20];
+};
+
+#endif /* _LINUX_INA219_H */
+
diff --git a/include/linux/interrupt_keys.h b/include/linux/interrupt_keys.h
new file mode 100755
index 000000000000..8be6e9a6b0a0
--- /dev/null
+++ b/include/linux/interrupt_keys.h
@@ -0,0 +1,47 @@
+/*
+ * include/linux/interrupt_keys.h
+ *
+ * Key driver for keys directly connected to intrrupt lines.
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _INTERRUPT_KEYS_H
+#define _INTERRUPT_KEYS_H
+
+struct interrupt_keys_button {
+ /* Configuration parameters */
+ int code; /* input event code (KEY_*, SW_*) */
+ int irq;
+ int active_low;
+ char *desc;
+ int type; /* input event type (EV_KEY, EV_SW) */
+ int wakeup; /* configure the interrupt source as a wake-up
+ * source */
+ int debounce_interval; /* debounce ticks interval in msecs */
+ bool can_disable;
+};
+
+struct interrupt_keys_platform_data {
+ struct interrupt_keys_button *int_buttons;
+ int nbuttons;
+ unsigned int rep:1; /* enable input subsystem auto repeat */
+ int (*enable)(struct device *dev);
+ void (*disable)(struct device *dev);
+};
+
+#endif
diff --git a/include/linux/ion.h b/include/linux/ion.h
new file mode 100644
index 000000000000..aed8349279ed
--- /dev/null
+++ b/include/linux/ion.h
@@ -0,0 +1,344 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+struct ion_handle;
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_END: helper for iterating over heaps
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+
+#ifdef __KERNEL__
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating (lower numbers
+ * will be allocated from first)
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ struct ion_platform_heap heaps[];
+};
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ unsigned int heap_mask, const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks have
+ * alignment requirements of some kind
+ * @flags: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from lowest to highest order bit
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_map_dma should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_map_dma - create a dma mapping for a given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Return an sglist describing the given handle
+ */
+struct scatterlist *ion_map_dma(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_unmap_dma() - destroy a dma mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share() - given a handle, obtain a buffer to pass to other clients
+ * @client: the client
+ * @handle: the handle to share
+ *
+ * Given a handle, return a buffer, which exists in a global name
+ * space, and can be passed to other clients. Should be passed into ion_import
+ * to obtain a new handle for this buffer.
+ *
+ * NOTE: This function does do not an extra reference. The burden is on the
+ * caller to make sure the buffer doesn't go away while it's being passed to
+ * another client. That is, ion_free should not be called on this handle until
+ * the buffer has been imported into the other client.
+ */
+struct ion_buffer *ion_share(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_import() - given an buffer in another client, import it
+ * @client: this blocks client
+ * @buffer: the buffer to import (as obtained from ion_share)
+ *
+ * Given a buffer, add it to the client and return the handle to use to refer
+ * to it further. This is called to share a handle from one kernel client to
+ * another.
+ */
+struct ion_handle *ion_import(struct ion_client *client,
+ struct ion_buffer *buffer);
+
+/**
+ * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
+ * @client: this blocks client
+ * @fd: the fd
+ *
+ * A helper function for drivers that will be recieving ion buffers shared
+ * with them from userspace. These buffers are represented by a file
+ * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
+ * This function coverts that fd into the underlying buffer, and returns
+ * the handle to use to refer to it further.
+ */
+struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
+#endif /* __KERNEL__ */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to refer
+ * to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int flags;
+ struct ion_handle *handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ struct ion_handle *handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ struct ion_handle *handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, int)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _LINUX_ION_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 46ac9a50528d..54bf5a471e1c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -744,4 +744,7 @@ struct sysinfo {
char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
};
+/* To identify board information in panic logs, set this */
+extern char *mach_panic_string;
+
#endif
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
new file mode 100644
index 000000000000..856a5850217b
--- /dev/null
+++ b/include/linux/keychord.h
@@ -0,0 +1,52 @@
+/*
+ * Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_KEYCHORD_H_
+#define __LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION 1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed. A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+ /* should be KEYCHORD_VERSION */
+ __u16 version;
+ /*
+ * client specified ID, returned from read()
+ * when this keychord is pressed.
+ */
+ __u16 id;
+
+ /* number of keycodes in this keychord */
+ __u16 count;
+
+ /* variable length array of keycodes */
+ __u16 keycodes[];
+};
+
+#endif /* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644
index 000000000000..a2ac49e5b684
--- /dev/null
+++ b/include/linux/keyreset.h
@@ -0,0 +1,28 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+ int (*reset_fn)(void);
+ int *keys_up;
+ int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
diff --git a/include/linux/mfd/max77663-core.h b/include/linux/mfd/max77663-core.h
new file mode 100644
index 000000000000..1edaf7aaaf4d
--- /dev/null
+++ b/include/linux/mfd/max77663-core.h
@@ -0,0 +1,173 @@
+/*
+ * include/linux/mfd/max77663-core.h
+ *
+ * Copyright 2011 Maxim Integrated Products, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_MAX77663_CORE_H__
+#define __LINUX_MFD_MAX77663_CORE_H__
+
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+
+/*
+ * Interrupts
+ */
+enum {
+ MAX77663_IRQ_LBT_LB, /* Low-Battery */
+ MAX77663_IRQ_LBT_THERM_ALRM1, /* Thermal alarm status, > 120C */
+ MAX77663_IRQ_LBT_THERM_ALRM2, /* Thermal alarm status, > 140C */
+
+ MAX77663_IRQ_GPIO0, /* GPIO0 edge detection */
+ MAX77663_IRQ_GPIO1, /* GPIO1 edge detection */
+ MAX77663_IRQ_GPIO2, /* GPIO2 edge detection */
+ MAX77663_IRQ_GPIO3, /* GPIO3 edge detection */
+ MAX77663_IRQ_GPIO4, /* GPIO4 edge detection */
+ MAX77663_IRQ_GPIO5, /* GPIO5 edge detection */
+ MAX77663_IRQ_GPIO6, /* GPIO6 edge detection */
+ MAX77663_IRQ_GPIO7, /* GPIO7 edge detection */
+
+ MAX77663_IRQ_ONOFF_HRDPOWRN, /* Hard power off warnning */
+ MAX77663_IRQ_ONOFF_EN0_1SEC, /* EN0 active for 1s */
+ MAX77663_IRQ_ONOFF_EN0_FALLING, /* EN0 falling */
+ MAX77663_IRQ_ONOFF_EN0_RISING, /* EN0 rising */
+ MAX77663_IRQ_ONOFF_LID_FALLING, /* LID falling */
+ MAX77663_IRQ_ONOFF_LID_RISING, /* LID rising */
+ MAX77663_IRQ_ONOFF_ACOK_FALLING,/* ACOK falling */
+ MAX77663_IRQ_ONOFF_ACOK_RISING, /* ACOK rising */
+
+ MAX77663_IRQ_RTC, /* RTC */
+ MAX77663_IRQ_SD_PF, /* SD power fail */
+ MAX77663_IRQ_LDO_PF, /* LDO power fail */
+ MAX77663_IRQ_32K, /* 32kHz oscillator */
+ MAX77663_IRQ_NVER, /* Non-Volatile Event Recorder */
+
+ MAX77663_IRQ_NR,
+};
+
+/*
+ *GPIOs
+ */
+enum {
+ MAX77663_GPIO0,
+ MAX77663_GPIO1,
+ MAX77663_GPIO2,
+ MAX77663_GPIO3,
+ MAX77663_GPIO4,
+ MAX77663_GPIO5,
+ MAX77663_GPIO6,
+ MAX77663_GPIO7,
+
+ MAX77663_GPIO_NR,
+};
+
+/* Direction */
+enum max77663_gpio_dir {
+ GPIO_DIR_DEF,
+ GPIO_DIR_IN,
+ GPIO_DIR_OUT,
+};
+
+/* Data output */
+enum max77663_gpio_data_out {
+ GPIO_DOUT_DEF,
+ GPIO_DOUT_HIGH,
+ GPIO_DOUT_LOW,
+};
+
+/* Output drive */
+enum max77663_gpio_out_drv {
+ GPIO_OUT_DRV_DEF,
+ GPIO_OUT_DRV_PUSH_PULL,
+ GPIO_OUT_DRV_OPEN_DRAIN,
+};
+
+/* Pull-up */
+enum max77663_gpio_pull_up {
+ GPIO_PU_DEF,
+ GPIO_PU_ENABLE,
+ GPIO_PU_DISABLE,
+};
+
+/* Pull-down */
+enum max77663_gpio_pull_down {
+ GPIO_PD_DEF,
+ GPIO_PD_ENABLE,
+ GPIO_PD_DISABLE,
+};
+
+/* Alternate */
+enum max77663_gpio_alt {
+ GPIO_ALT_DEF,
+ GPIO_ALT_ENABLE,
+ GPIO_ALT_DISABLE,
+};
+
+struct max77663_gpio_config {
+ int gpio; /* gpio number */
+ enum max77663_gpio_dir dir;
+ enum max77663_gpio_data_out dout;
+ enum max77663_gpio_out_drv out_drv;
+ enum max77663_gpio_pull_up pull_up;
+ enum max77663_gpio_pull_down pull_down;
+ enum max77663_gpio_alt alternate;
+};
+
+struct max77663_platform_data {
+ int irq_base;
+ int gpio_base;
+
+ int num_gpio_cfgs;
+ struct max77663_gpio_config *gpio_cfgs;
+
+ int num_subdevs;
+ struct mfd_cell *sub_devices;
+};
+
+#if defined(CONFIG_MFD_MAX77663)
+int max77663_read(struct device *dev, u8 addr, void *values, u32 len,
+ bool is_rtc);
+int max77663_write(struct device *dev, u8 addr, void *values, u32 len,
+ bool is_rtc);
+int max77663_set_bits(struct device *dev, u8 addr, u8 mask, u8 value,
+ bool is_rtc);
+int max77663_power_off(void);
+int max77663_gpio_set_alternate(int gpio, int alternate);
+#else
+static inline int max77663_read(struct device *dev, u8 addr, void *values,
+ u32 len, bool is_rtc)
+{
+ return 0;
+}
+
+static inline int max77663_write(struct device *dev, u8 addr, void *values,
+ u32 len, bool is_rtc)
+{
+ return 0;
+}
+
+static inline int max77663_set_bits(struct device *dev, u8 addr, u8 mask,
+ u8 value, bool is_rtc)
+{
+ return 0;
+}
+
+static inline int max77663_power_off(void)
+{
+ return 0;
+}
+
+static inline int max77663_gpio_set_alternate(int gpio, int alternate)
+{
+ return 0;
+}
+#endif /* defined(CONFIG_MFD_MAX77663) */
+
+#endif /* __LINUX_MFD_MAX77663_CORE_H__ */
diff --git a/include/linux/mfd/max8907c.h b/include/linux/mfd/max8907c.h
new file mode 100644
index 000000000000..76dbdcc03046
--- /dev/null
+++ b/include/linux/mfd/max8907c.h
@@ -0,0 +1,259 @@
+/* linux/mfd/max8907c.h
+ *
+ * Functions to access MAX8907C power management chip.
+ *
+ * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_MAX8907C_H
+#define __LINUX_MFD_MAX8907C_H
+
+/* MAX8907C register map */
+#define MAX8907C_REG_SYSENSEL 0x00
+#define MAX8907C_REG_ON_OFF_IRQ1 0x01
+#define MAX8907C_REG_ON_OFF_IRQ1_MASK 0x02
+#define MAX8907C_REG_ON_OFF_STAT 0x03
+#define MAX8907C_REG_SDCTL1 0x04
+#define MAX8907C_REG_SDSEQCNT1 0x05
+#define MAX8907C_REG_SDV1 0x06
+#define MAX8907C_REG_SDCTL2 0x07
+#define MAX8907C_REG_SDSEQCNT2 0x08
+#define MAX8907C_REG_SDV2 0x09
+#define MAX8907C_REG_SDCTL3 0x0A
+#define MAX8907C_REG_SDSEQCNT3 0x0B
+#define MAX8907C_REG_SDV3 0x0C
+#define MAX8907C_REG_ON_OFF_IRQ2 0x0D
+#define MAX8907C_REG_ON_OFF_IRQ2_MASK 0x0E
+#define MAX8907C_REG_RESET_CNFG 0x0F
+#define MAX8907C_REG_LDOCTL16 0x10
+#define MAX8907C_REG_LDOSEQCNT16 0x11
+#define MAX8907C_REG_LDO16VOUT 0x12
+#define MAX8907C_REG_SDBYSEQCNT 0x13
+#define MAX8907C_REG_LDOCTL17 0x14
+#define MAX8907C_REG_LDOSEQCNT17 0x15
+#define MAX8907C_REG_LDO17VOUT 0x16
+#define MAX8907C_REG_LDOCTL1 0x18
+#define MAX8907C_REG_LDOSEQCNT1 0x19
+#define MAX8907C_REG_LDO1VOUT 0x1A
+#define MAX8907C_REG_LDOCTL2 0x1C
+#define MAX8907C_REG_LDOSEQCNT2 0x1D
+#define MAX8907C_REG_LDO2VOUT 0x1E
+#define MAX8907C_REG_LDOCTL3 0x20
+#define MAX8907C_REG_LDOSEQCNT3 0x21
+#define MAX8907C_REG_LDO3VOUT 0x22
+#define MAX8907C_REG_LDOCTL4 0x24
+#define MAX8907C_REG_LDOSEQCNT4 0x25
+#define MAX8907C_REG_LDO4VOUT 0x26
+#define MAX8907C_REG_LDOCTL5 0x28
+#define MAX8907C_REG_LDOSEQCNT5 0x29
+#define MAX8907C_REG_LDO5VOUT 0x2A
+#define MAX8907C_REG_LDOCTL6 0x2C
+#define MAX8907C_REG_LDOSEQCNT6 0x2D
+#define MAX8907C_REG_LDO6VOUT 0x2E
+#define MAX8907C_REG_LDOCTL7 0x30
+#define MAX8907C_REG_LDOSEQCNT7 0x31
+#define MAX8907C_REG_LDO7VOUT 0x32
+#define MAX8907C_REG_LDOCTL8 0x34
+#define MAX8907C_REG_LDOSEQCNT8 0x35
+#define MAX8907C_REG_LDO8VOUT 0x36
+#define MAX8907C_REG_LDOCTL9 0x38
+#define MAX8907C_REG_LDOSEQCNT9 0x39
+#define MAX8907C_REG_LDO9VOUT 0x3A
+#define MAX8907C_REG_LDOCTL10 0x3C
+#define MAX8907C_REG_LDOSEQCNT10 0x3D
+#define MAX8907C_REG_LDO10VOUT 0x3E
+#define MAX8907C_REG_LDOCTL11 0x40
+#define MAX8907C_REG_LDOSEQCNT11 0x41
+#define MAX8907C_REG_LDO11VOUT 0x42
+#define MAX8907C_REG_LDOCTL12 0x44
+#define MAX8907C_REG_LDOSEQCNT12 0x45
+#define MAX8907C_REG_LDO12VOUT 0x46
+#define MAX8907C_REG_LDOCTL13 0x48
+#define MAX8907C_REG_LDOSEQCNT13 0x49
+#define MAX8907C_REG_LDO13VOUT 0x4A
+#define MAX8907C_REG_LDOCTL14 0x4C
+#define MAX8907C_REG_LDOSEQCNT14 0x4D
+#define MAX8907C_REG_LDO14VOUT 0x4E
+#define MAX8907C_REG_LDOCTL15 0x50
+#define MAX8907C_REG_LDOSEQCNT15 0x51
+#define MAX8907C_REG_LDO15VOUT 0x52
+#define MAX8907C_REG_OUT5VEN 0x54
+#define MAX8907C_REG_OUT5VSEQ 0x55
+#define MAX8907C_REG_OUT33VEN 0x58
+#define MAX8907C_REG_OUT33VSEQ 0x59
+#define MAX8907C_REG_LDOCTL19 0x5C
+#define MAX8907C_REG_LDOSEQCNT19 0x5D
+#define MAX8907C_REG_LDO19VOUT 0x5E
+#define MAX8907C_REG_LBCNFG 0x60
+#define MAX8907C_REG_SEQ1CNFG 0x64
+#define MAX8907C_REG_SEQ2CNFG 0x65
+#define MAX8907C_REG_SEQ3CNFG 0x66
+#define MAX8907C_REG_SEQ4CNFG 0x67
+#define MAX8907C_REG_SEQ5CNFG 0x68
+#define MAX8907C_REG_SEQ6CNFG 0x69
+#define MAX8907C_REG_SEQ7CNFG 0x6A
+#define MAX8907C_REG_LDOCTL18 0x72
+#define MAX8907C_REG_LDOSEQCNT18 0x73
+#define MAX8907C_REG_LDO18VOUT 0x74
+#define MAX8907C_REG_BBAT_CNFG 0x78
+#define MAX8907C_REG_CHG_CNTL1 0x7C
+#define MAX8907C_REG_CHG_CNTL2 0x7D
+#define MAX8907C_REG_CHG_IRQ1 0x7E
+#define MAX8907C_REG_CHG_IRQ2 0x7F
+#define MAX8907C_REG_CHG_IRQ1_MASK 0x80
+#define MAX8907C_REG_CHG_IRQ2_MASK 0x81
+#define MAX8907C_REG_CHG_STAT 0x82
+#define MAX8907C_REG_WLED_MODE_CNTL 0x84
+#define MAX8907C_REG_ILED_CNTL 0x84
+#define MAX8907C_REG_II1RR 0x8E
+#define MAX8907C_REG_II2RR 0x8F
+#define MAX8907C_REG_LDOCTL20 0x9C
+#define MAX8907C_REG_LDOSEQCNT20 0x9D
+#define MAX8907C_REG_LDO20VOUT 0x9E
+
+/* RTC register */
+#define MAX8907C_REG_RTC_SEC 0x00
+#define MAX8907C_REG_RTC_MIN 0x01
+#define MAX8907C_REG_RTC_HOURS 0x02
+#define MAX8907C_REG_RTC_WEEKDAY 0x03
+#define MAX8907C_REG_RTC_DATE 0x04
+#define MAX8907C_REG_RTC_MONTH 0x05
+#define MAX8907C_REG_RTC_YEAR1 0x06
+#define MAX8907C_REG_RTC_YEAR2 0x07
+#define MAX8907C_REG_ALARM0_SEC 0x08
+#define MAX8907C_REG_ALARM0_MIN 0x09
+#define MAX8907C_REG_ALARM0_HOURS 0x0A
+#define MAX8907C_REG_ALARM0_WEEKDAY 0x0B
+#define MAX8907C_REG_ALARM0_DATE 0x0C
+#define MAX8907C_REG_ALARM0_MONTH 0x0D
+#define MAX8907C_REG_ALARM0_YEAR1 0x0E
+#define MAX8907C_REG_ALARM0_YEAR2 0x0F
+#define MAX8907C_REG_ALARM1_SEC 0x10
+#define MAX8907C_REG_ALARM1_MIN 0x11
+#define MAX8907C_REG_ALARM1_HOURS 0x12
+#define MAX8907C_REG_ALARM1_WEEKDAY 0x13
+#define MAX8907C_REG_ALARM1_DATE 0x14
+#define MAX8907C_REG_ALARM1_MONTH 0x15
+#define MAX8907C_REG_ALARM1_YEAR1 0x16
+#define MAX8907C_REG_ALARM1_YEAR2 0x17
+#define MAX8907C_REG_ALARM0_CNTL 0x18
+#define MAX8907C_REG_ALARM1_CNTL 0x19
+#define MAX8907C_REG_RTC_STATUS 0x1A
+#define MAX8907C_REG_RTC_CNTL 0x1B
+#define MAX8907C_REG_RTC_IRQ 0x1C
+#define MAX8907C_REG_RTC_IRQ_MASK 0x1D
+#define MAX8907C_REG_MPL_CNTL 0x1E
+
+/* ADC and Touch Screen Controller register map */
+
+#define MAX8907C_CTL 0
+#define MAX8907C_SEQCNT 1
+#define MAX8907C_VOUT 2
+
+/* mask bit fields */
+#define MAX8907C_MASK_LDO_SEQ 0x1C
+#define MAX8907C_MASK_LDO_EN 0x01
+#define MAX8907C_MASK_VBBATTCV 0x03
+#define MAX8907C_MASK_OUT5V_VINEN 0x10
+#define MAX8907C_MASK_OUT5V_ENSRC 0x0E
+#define MAX8907C_MASK_OUT5V_EN 0x01
+
+/* Power off bit in RESET_CNFG reg */
+#define MAX8907C_MASK_POWER_OFF 0x40
+
+#define MAX8907C_MASK_PWR_EN 0x80
+#define MAX8907C_MASK_CTL_SEQ 0x1C
+
+#define MAX8907C_PWR_EN 0x80
+#define MAX8907C_CTL_SEQ 0x04
+
+#define MAX8907C_SD_SEQ1 0x02
+#define MAX8907C_SD_SEQ2 0x06
+
+#define MAX8907C_DELAY_CNT0 0x00
+
+#define MAX8907C_POWER_UP_DELAY_CNT1 0x10
+#define MAX8907C_POWER_UP_DELAY_CNT12 0xC0
+
+#define MAX8907C_POWER_DOWN_DELAY_CNT12 0x0C
+
+#define RTC_I2C_ADDR 0x68
+
+/*
+ * MAX8907B revision requires s/w WAR to connect PWREN input to
+ * sequencer 2 because of the bug in the silicon.
+ */
+#define MAX8907B_II2RR_PWREN_WAR (0x12)
+
+/* Defines common for all supplies PWREN sequencer selection */
+#define MAX8907B_SEQSEL_PWREN_LXX 1 /* SEQ2 (PWREN) */
+
+/* IRQ definitions */
+enum {
+ MAX8907C_IRQ_VCHG_DC_OVP,
+ MAX8907C_IRQ_VCHG_DC_F,
+ MAX8907C_IRQ_VCHG_DC_R,
+ MAX8907C_IRQ_VCHG_THM_OK_R,
+ MAX8907C_IRQ_VCHG_THM_OK_F,
+ MAX8907C_IRQ_VCHG_MBATTLOW_F,
+ MAX8907C_IRQ_VCHG_MBATTLOW_R,
+ MAX8907C_IRQ_VCHG_RST,
+ MAX8907C_IRQ_VCHG_DONE,
+ MAX8907C_IRQ_VCHG_TOPOFF,
+ MAX8907C_IRQ_VCHG_TMR_FAULT,
+ MAX8907C_IRQ_GPM_RSTIN,
+ MAX8907C_IRQ_GPM_MPL,
+ MAX8907C_IRQ_GPM_SW_3SEC,
+ MAX8907C_IRQ_GPM_EXTON_F,
+ MAX8907C_IRQ_GPM_EXTON_R,
+ MAX8907C_IRQ_GPM_SW_1SEC,
+ MAX8907C_IRQ_GPM_SW_F,
+ MAX8907C_IRQ_GPM_SW_R,
+ MAX8907C_IRQ_GPM_SYSCKEN_F,
+ MAX8907C_IRQ_GPM_SYSCKEN_R,
+ MAX8907C_IRQ_RTC_ALARM1,
+ MAX8907C_IRQ_RTC_ALARM0,
+ MAX8907C_NR_IRQS,
+};
+
+struct max8907c {
+ struct device *dev;
+ struct mutex io_lock;
+ struct mutex irq_lock;
+ struct i2c_client *i2c_power;
+ struct i2c_client *i2c_rtc;
+ int irq_base;
+ int core_irq;
+
+ unsigned char cache_chg[2];
+ unsigned char cache_on[2];
+ unsigned char cache_rtc;
+
+};
+
+struct max8907c_platform_data {
+ int num_subdevs;
+ struct platform_device **subdevs;
+ int irq_base;
+ int (*max8907c_setup)(void);
+};
+
+int max8907c_reg_read(struct i2c_client *i2c, u8 reg);
+int max8907c_reg_bulk_read(struct i2c_client *i2c, u8 reg, u8 count, u8 *val);
+int max8907c_reg_write(struct i2c_client *i2c, u8 reg, u8 val);
+int max8907c_reg_bulk_write(struct i2c_client *i2c, u8 reg, u8 count, u8 *val);
+int max8907c_set_bits(struct i2c_client *i2c, u8 reg, u8 mask, u8 val);
+
+int max8907c_irq_init(struct max8907c *chip, int irq, int irq_base);
+void max8907c_irq_free(struct max8907c *chip);
+int max8907c_suspend(struct i2c_client *i2c, pm_message_t state);
+int max8907c_resume(struct i2c_client *i2c);
+int max8907c_power_off(void);
+void max8907c_deep_sleep(int enter);
+int max8907c_pwr_en_config(void);
+int max8907c_pwr_en_attach(void);
+#endif
diff --git a/include/linux/mfd/ricoh583.h b/include/linux/mfd/ricoh583.h
new file mode 100644
index 000000000000..4e38cded0fa5
--- /dev/null
+++ b/include/linux/mfd/ricoh583.h
@@ -0,0 +1,164 @@
+/* include/linux/mfd/ricoh583.h
+ *
+ * Core driver interface to access RICOH583 power management chip.
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_MFD_RICOH583_H
+#define __LINUX_MFD_RICOH583_H
+
+#include <linux/rtc.h>
+/* RICOH583 IRQ definitions */
+enum {
+ RICOH583_IRQ_ONKEY,
+ RICOH583_IRQ_ACOK,
+ RICOH583_IRQ_LIDOPEN,
+ RICOH583_IRQ_PREOT,
+ RICOH583_IRQ_CLKSTP,
+ RICOH583_IRQ_ONKEY_OFF,
+ RICOH583_IRQ_WD,
+ RICOH583_IRQ_EN_PWRREQ1,
+ RICOH583_IRQ_EN_PWRREQ2,
+ RICOH583_IRQ_PRE_VINDET,
+
+ RICOH583_IRQ_DC0LIM,
+ RICOH583_IRQ_DC1LIM,
+ RICOH583_IRQ_DC2LIM,
+ RICOH583_IRQ_DC3LIM,
+
+ RICOH583_IRQ_CTC,
+ RICOH583_IRQ_YALE,
+ RICOH583_IRQ_DALE,
+ RICOH583_IRQ_WALE,
+
+ RICOH583_IRQ_AIN1L,
+ RICOH583_IRQ_AIN2L,
+ RICOH583_IRQ_AIN3L,
+ RICOH583_IRQ_VBATL,
+ RICOH583_IRQ_VIN3L,
+ RICOH583_IRQ_VIN8L,
+ RICOH583_IRQ_AIN1H,
+ RICOH583_IRQ_AIN2H,
+ RICOH583_IRQ_AIN3H,
+ RICOH583_IRQ_VBATH,
+ RICOH583_IRQ_VIN3H,
+ RICOH583_IRQ_VIN8H,
+ RICOH583_IRQ_ADCEND,
+
+ RICOH583_IRQ_GPIO0,
+ RICOH583_IRQ_GPIO1,
+ RICOH583_IRQ_GPIO2,
+ RICOH583_IRQ_GPIO3,
+ RICOH583_IRQ_GPIO4,
+ RICOH583_IRQ_GPIO5,
+ RICOH583_IRQ_GPIO6,
+ RICOH583_IRQ_GPIO7,
+ RICOH583_NR_IRQS,
+};
+
+/* Ricoh583 gpio definitions */
+enum {
+ RICOH583_GPIO0,
+ RICOH583_GPIO1,
+ RICOH583_GPIO2,
+ RICOH583_GPIO3,
+ RICOH583_GPIO4,
+ RICOH583_GPIO5,
+ RICOH583_GPIO6,
+ RICOH583_GPIO7,
+
+ RICOH583_NR_GPIO,
+};
+
+enum ricoh583_deepsleep_control_id {
+ RICOH583_DS_NONE,
+ RICOH583_DS_DC0,
+ RICOH583_DS_DC1,
+ RICOH583_DS_DC2,
+ RICOH583_DS_DC3,
+ RICOH583_DS_LDO0,
+ RICOH583_DS_LDO1,
+ RICOH583_DS_LDO2,
+ RICOH583_DS_LDO3,
+ RICOH583_DS_LDO4,
+ RICOH583_DS_LDO5,
+ RICOH583_DS_LDO6,
+ RICOH583_DS_LDO7,
+ RICOH583_DS_LDO8,
+ RICOH583_DS_LDO9,
+ RICOH583_DS_PSO0,
+ RICOH583_DS_PSO1,
+ RICOH583_DS_PSO2,
+ RICOH583_DS_PSO3,
+ RICOH583_DS_PSO4,
+ RICOH583_DS_PSO5,
+ RICOH583_DS_PSO6,
+ RICOH583_DS_PSO7,
+};
+enum ricoh583_ext_pwrreq_control {
+ RICOH583_EXT_PWRREQ1_CONTROL = 0x1,
+ RICOH583_EXT_PWRREQ2_CONTROL = 0x2,
+};
+
+struct ricoh583_subdev_info {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct ricoh583_rtc_platform_data {
+ int irq;
+ struct rtc_time time;
+};
+
+struct ricoh583_gpio_init_data {
+ unsigned pulldn_en:1; /* Enable pull down */
+ unsigned output_mode_en:1; /* Enable output mode during init */
+ unsigned output_val:1; /* Output value if it is in output mode */
+ unsigned init_apply:1; /* Apply init data on configuring gpios*/
+};
+
+struct ricoh583_platform_data {
+ int num_subdevs;
+ struct ricoh583_subdev_info *subdevs;
+ int gpio_base;
+ int irq_base;
+
+ struct ricoh583_gpio_init_data *gpio_init_data;
+ int num_gpioinit_data;
+ bool enable_shutdown_pin;
+};
+
+extern int ricoh583_read(struct device *dev, uint8_t reg, uint8_t *val);
+extern int ricoh583_bulk_reads(struct device *dev, u8 reg, u8 count,
+ uint8_t *val);
+extern int ricoh583_write(struct device *dev, u8 reg, uint8_t val);
+extern int ricoh583_bulk_writes(struct device *dev, u8 reg, u8 count,
+ uint8_t *val);
+extern int ricoh583_set_bits(struct device *dev, u8 reg, uint8_t bit_mask);
+extern int ricoh583_clr_bits(struct device *dev, u8 reg, uint8_t bit_mask);
+extern int ricoh583_update(struct device *dev, u8 reg, uint8_t val,
+ uint8_t mask);
+extern int ricoh583_ext_power_req_config(struct device *dev,
+ enum ricoh583_deepsleep_control_id control_id,
+ enum ricoh583_ext_pwrreq_control ext_pwr_req,
+ int deepsleep_slot_nr);
+extern int ricoh583_power_off(void);
+
+#endif
diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h
index b6bab1b04e25..e43184a43201 100644
--- a/include/linux/mfd/tps6586x.h
+++ b/include/linux/mfd/tps6586x.h
@@ -1,6 +1,10 @@
#ifndef __LINUX_MFD_TPS6586X_H
#define __LINUX_MFD_TPS6586X_H
+#define SM0_PWM_BIT 0
+#define SM1_PWM_BIT 1
+#define SM2_PWM_BIT 2
+
enum {
TPS6586X_ID_SM_0,
TPS6586X_ID_SM_1,
@@ -48,12 +52,60 @@ enum {
TPS6586X_INT_RTC_ALM2,
};
+enum pwm_pfm_mode {
+ PWM_ONLY,
+ AUTO_PWM_PFM,
+ PWM_DEFAULT_VALUE,
+
+};
+
+enum slew_rate_settings {
+ SLEW_RATE_INSTANTLY = 0,
+ SLEW_RATE_0110UV_PER_SEC = 0x1,
+ SLEW_RATE_0220UV_PER_SEC = 0x2,
+ SLEW_RATE_0440UV_PER_SEC = 0x3,
+ SLEW_RATE_0880UV_PER_SEC = 0x4,
+ SLEW_RATE_1760UV_PER_SEC = 0x5,
+ SLEW_RATE_3520UV_PER_SEC = 0x6,
+ SLEW_RATE_7040UV_PER_SEC = 0x7,
+ SLEW_RATE_DEFAULT_VALUE,
+};
+
+struct tps6586x_settings {
+ /* SM0, SM1 and SM2 have PWM-only and auto PWM/PFM mode */
+ enum pwm_pfm_mode sm_pwm_mode;
+ /* SM0 and SM1 have slew rate settings */
+ enum slew_rate_settings slew_rate;
+};
+
+enum {
+ TPS6586X_RTC_CL_SEL_1_5PF = 0x0,
+ TPS6586X_RTC_CL_SEL_6_5PF = 0x1,
+ TPS6586X_RTC_CL_SEL_7_5PF = 0x2,
+ TPS6586X_RTC_CL_SEL_12_5PF = 0x3,
+};
+
struct tps6586x_subdev_info {
int id;
const char *name;
void *platform_data;
};
+struct tps6586x_epoch_start {
+ int year;
+ int month;
+ int day;
+ int hour;
+ int min;
+ int sec;
+};
+
+struct tps6586x_rtc_platform_data {
+ int irq;
+ struct tps6586x_epoch_start start;
+ int cl_sel; /* internal XTAL capacitance, see TPS6586X_RTC_CL_SEL* */
+};
+
struct tps6586x_platform_data {
int num_subdevs;
struct tps6586x_subdev_info *subdevs;
@@ -74,5 +126,6 @@ extern int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
extern int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
extern int tps6586x_update(struct device *dev, int reg, uint8_t val,
uint8_t mask);
+extern int tps6586x_power_off(void);
#endif /*__LINUX_MFD_TPS6586X_H */
diff --git a/include/linux/mfd/tps6591x.h b/include/linux/mfd/tps6591x.h
new file mode 100644
index 000000000000..525a8616c44b
--- /dev/null
+++ b/include/linux/mfd/tps6591x.h
@@ -0,0 +1,123 @@
+/*
+ * include/linux/mfd/tps6591x.c
+ * Core driver interface for TI TPS6591x PMIC family
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_MFD_TPS6591X_H
+#define __LINUX_MFD_TPS6591X_H
+
+#include <linux/rtc.h>
+
+enum {
+ TPS6591X_INT_PWRHOLD_F,
+ TPS6591X_INT_VMBHI,
+ TPS6591X_INT_PWRON,
+ TPS6591X_INT_PWRON_LP,
+ TPS6591X_INT_PWRHOLD_R,
+ TPS6591X_INT_HOTDIE,
+ TPS6591X_INT_RTC_ALARM,
+ TPS6591X_INT_RTC_PERIOD,
+ TPS6591X_INT_GPIO0,
+ TPS6591X_INT_GPIO1,
+ TPS6591X_INT_GPIO2,
+ TPS6591X_INT_GPIO3,
+ TPS6591X_INT_GPIO4,
+ TPS6591X_INT_GPIO5,
+ TPS6591X_INT_WTCHDG,
+ TPS6591X_INT_VMBCH2_H,
+ TPS6591X_INT_VMBCH2_L,
+ TPS6591X_INT_PWRDN,
+
+ /* Last entry */
+ TPS6591X_INT_NR,
+};
+
+/* Gpio definitions */
+enum {
+ TPS6591X_GPIO_GP0 = 0,
+ TPS6591X_GPIO_GP1 = 1,
+ TPS6591X_GPIO_GP2 = 2,
+ TPS6591X_GPIO_GP3 = 3,
+ TPS6591X_GPIO_GP4 = 4,
+ TPS6591X_GPIO_GP5 = 5,
+ TPS6591X_GPIO_GP6 = 6,
+ TPS6591X_GPIO_GP7 = 7,
+ TPS6591X_GPIO_GP8 = 8,
+
+ /* Last entry */
+ TPS6591X_GPIO_NR,
+};
+
+struct tps6591x_subdev_info {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct tps6591x_rtc_platform_data {
+ int irq;
+ struct rtc_time time;
+};
+
+struct tps6591x_sleep_keepon_data {
+ /* set 1 to maintain the following on sleep mode */
+ unsigned therm_keepon:1; /* themal monitoring */
+ unsigned clkout32k_keepon:1; /* CLK32KOUT */
+ unsigned vrtc_keepon:1; /* LD0 full load capability */
+ unsigned i2chs_keepon:1; /* high speed internal clock */
+};
+
+struct tps6591x_gpio_init_data {
+ unsigned sleep_en:1; /* Enable sleep mode */
+ unsigned pulldn_en:1; /* Enable pull down */
+ unsigned output_mode_en:1; /* Enable output mode during init */
+ unsigned output_val:1; /* Output value if it is in output mode */
+ unsigned init_apply:1; /* Apply init data on configuring gpios*/
+};
+
+struct tps6591x_platform_data {
+ int gpio_base;
+ int irq_base;
+
+ int num_subdevs;
+ struct tps6591x_subdev_info *subdevs;
+
+ bool dev_slp_en;
+ struct tps6591x_sleep_keepon_data *slp_keepon;
+
+ struct tps6591x_gpio_init_data *gpio_init_data;
+ int num_gpioinit_data;
+};
+
+/*
+ * NOTE: the functions below are not intended for use outside
+ * of the TPS6591X sub-device drivers
+ */
+extern int tps6591x_write(struct device *dev, int reg, uint8_t val);
+extern int tps6591x_writes(struct device *dev, int reg, int len, uint8_t *val);
+extern int tps6591x_read(struct device *dev, int reg, uint8_t *val);
+extern int tps6591x_reads(struct device *dev, int reg, int len, uint8_t *val);
+extern int tps6591x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int tps6591x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int tps6591x_update(struct device *dev, int reg, uint8_t val,
+ uint8_t mask);
+extern int tps6591x_power_off(void);
+
+#endif /*__LINUX_MFD_TPS6591X_H */
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
new file mode 100644
index 000000000000..c6aa7eadbedd
--- /dev/null
+++ b/include/linux/mfd/tps80031.h
@@ -0,0 +1,204 @@
+/*
+ * include/linux/mfd/tps80031.c
+ *
+ * Core driver interface for TI TPS80031 PMIC
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_MFD_TPS80031_H
+#define __LINUX_MFD_TPS80031_H
+
+#include <linux/rtc.h>
+
+/* Supported chips */
+enum chips {
+ TPS80031 = 0x00000001,
+ TPS80032 = 0x00000002,
+};
+
+enum {
+ TPS80031_INT_PWRON,
+ TPS80031_INT_RPWRON,
+ TPS80031_INT_SYS_VLOW,
+ TPS80031_INT_RTC_ALARM,
+ TPS80031_INT_RTC_PERIOD,
+ TPS80031_INT_HOT_DIE,
+ TPS80031_INT_VXX_SHORT,
+ TPS80031_INT_SPDURATION,
+ TPS80031_INT_WATCHDOG,
+ TPS80031_INT_BAT,
+ TPS80031_INT_SIM,
+ TPS80031_INT_MMC,
+ TPS80031_INT_RES,
+ TPS80031_INT_GPADC_RT,
+ TPS80031_INT_GPADC_SW2_EOC,
+ TPS80031_INT_CC_AUTOCAL,
+ TPS80031_INT_ID_WKUP,
+ TPS80031_INT_VBUSS_WKUP,
+ TPS80031_INT_ID,
+ TPS80031_INT_VBUS,
+ TPS80031_INT_CHRG_CTRL,
+ TPS80031_INT_EXT_CHRG,
+ TPS80031_INT_INT_CHRG,
+ TPS80031_INT_RES2,
+ TPS80031_INT_BAT_TEMP_OVRANGE,
+ TPS80031_INT_BAT_REMOVED,
+ TPS80031_INT_VBUS_DET,
+ TPS80031_INT_VAC_DET,
+ TPS80031_INT_FAULT_WDG,
+ TPS80031_INT_LINCH_GATED,
+
+ /* Last interrupt id to get the end number */
+ TPS80031_INT_NR,
+};
+
+enum adc_channel {
+ BATTERY_TYPE = 0, /* External ADC */
+ BATTERY_TEMPERATURE = 1, /* External ADC */
+ AUDIO_ACCESSORY = 2, /* External ADC */
+ TEMPERATURE_EXTERNAL_DIODE = 3, /* External ADC */
+ TEMPERATURE_MEASUREMENT = 4, /* External ADC */
+ GENERAL_PURPOSE_1 = 5, /* External ADC */
+ GENERAL_PURPOSE_2 = 6, /* External ADC */
+ SYSTEM_SUPPLY = 7, /* Internal ADC */
+ BACKUP_BATTERY = 8, /* Internal ADC */
+ EXTERNAL_CHARGER_INPUT = 9, /* Internal ADC */
+ VBUS = 10, /* Internal ADC */
+ VBUS_DCDC_OUTPUT_CURRENT = 11, /* Internal ADC */
+ DIE_TEMPERATURE_1 = 12, /* Internal ADC */
+ DIE_TEMPERATURE_2 = 13, /* Internal ADC */
+ USB_ID_LINE = 14, /* Internal ADC */
+ TEST_NETWORK_1 = 15, /* Internal ADC */
+ TEST_NETWORK_2 = 16, /* Internal ADC */
+ BATTERY_CHARGING_CURRENT = 17, /* Internal ADC */
+ BATTERY_VOLTAGE = 18, /* Internal ADC */
+};
+
+enum TPS80031_GPIO {
+ TPS80031_GPIO_REGEN1,
+ TPS80031_GPIO_REGEN2,
+ TPS80031_GPIO_SYSEN,
+
+ /* Last entry */
+ TPS80031_GPIO_NR,
+};
+
+enum TPS80031_CLOCK32K {
+ TPS80031_CLOCK32K_AO,
+ TPS80031_CLOCK32K_G,
+ TPS80031_CLOCK32K_AUDIO,
+
+ /* Last entry */
+ TPS80031_CLOCK32K_NR,
+};
+
+enum {
+ SLAVE_ID0 = 0,
+ SLAVE_ID1 = 1,
+ SLAVE_ID2 = 2,
+ SLAVE_ID3 = 3,
+};
+
+enum {
+ I2C_ID0_ADDR = 0x12,
+ I2C_ID1_ADDR = 0x48,
+ I2C_ID2_ADDR = 0x49,
+ I2C_ID3_ADDR = 0x4A,
+};
+
+/* External controls requests */
+enum tps80031_ext_control {
+ PWR_REQ_INPUT_NONE = 0x00000000,
+ PWR_REQ_INPUT_PREQ1 = 0x00000001,
+ PWR_REQ_INPUT_PREQ2 = 0x00000002,
+ PWR_REQ_INPUT_PREQ3 = 0x00000004,
+ PWR_OFF_ON_SLEEP = 0x00000008,
+ PWR_ON_ON_SLEEP = 0x00000010,
+};
+
+struct tps80031_subdev_info {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct tps80031_rtc_platform_data {
+ int irq;
+ struct rtc_time time;
+};
+
+struct tps80031_clk32k_init_data {
+ int clk32k_nr;
+ bool enable;
+ unsigned long ext_ctrl_flag;
+};
+
+struct tps80031_gpio_init_data {
+ int gpio_nr;
+ unsigned long ext_ctrl_flag;
+};
+
+struct tps80031_platform_data {
+ int num_subdevs;
+ struct tps80031_subdev_info *subdevs;
+ int gpio_base;
+ int irq_base;
+ struct tps80031_32kclock_plat_data *clk32k_pdata;
+ struct tps80031_gpio_init_data *gpio_init_data;
+ int gpio_init_data_size;
+ struct tps80031_clk32k_init_data *clk32k_init_data;
+ int clk32k_init_data_size;
+};
+
+struct tps80031_bg_platform_data {
+ int irq_base;
+ int battery_present;
+};
+
+/*
+ * NOTE: the functions below are not intended for use outside
+ * of the TPS80031 sub-device drivers
+ */
+extern int tps80031_write(struct device *dev, int sid, int reg, uint8_t val);
+extern int tps80031_writes(struct device *dev, int sid, int reg, int len,
+ uint8_t *val);
+extern int tps80031_read(struct device *dev, int sid, int reg, uint8_t *val);
+extern int tps80031_reads(struct device *dev, int sid, int reg, int len,
+ uint8_t *val);
+extern int tps80031_set_bits(struct device *dev, int sid, int reg,
+ uint8_t bit_mask);
+extern int tps80031_clr_bits(struct device *dev, int sid, int reg,
+ uint8_t bit_mask);
+extern int tps80031_update(struct device *dev, int sid, int reg, uint8_t val,
+ uint8_t mask);
+extern int tps80031_force_update(struct device *dev, int sid, int reg,
+ uint8_t val, uint8_t mask);
+extern int tps80031_ext_power_req_config(struct device *dev,
+ unsigned long ext_ctrl_flag, int preq_bit,
+ int state_reg_add, int trans_reg_add);
+
+extern int tps80031_power_off(void);
+
+extern unsigned long tps80031_get_chip_info(struct device *dev);
+
+extern int tps80031_gpadc_conversion(int channle_no);
+
+extern int tps80031_get_pmu_version(struct device *dev);
+
+#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fedc5f0e62ea..d1d9840093f2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -871,6 +871,7 @@ extern bool skip_free_areas_node(unsigned int flags, int nid);
int shmem_lock(struct file *file, int lock, struct user_struct *user);
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
int shmem_zero_setup(struct vm_area_struct *);
extern int can_do_mlock(void);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1d09562ccf73..51128f8a5775 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -12,6 +12,7 @@
#include <linux/leds.h>
#include <linux/sched.h>
+#include <linux/wakelock.h>
#include <linux/mmc/core.h>
#include <linux/mmc/pm.h>
@@ -280,10 +281,15 @@ struct mmc_host {
int claim_cnt; /* "claim" nesting count */
struct delayed_work detect;
+ struct wake_lock detect_wake_lock;
const struct mmc_bus_ops *bus_ops; /* current bus driver */
unsigned int bus_refs; /* reference counter */
+ unsigned int bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1)
+
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
atomic_t sdio_irq_thread_abort;
@@ -302,6 +308,15 @@ struct mmc_host {
struct mmc_async_req *areq; /* active async req */
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ struct {
+ struct sdio_cis *cis;
+ struct sdio_cccr *cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+ } embedded_sdio_data;
+#endif
+
unsigned long private[0] ____cacheline_aligned;
};
@@ -310,6 +325,14 @@ extern int mmc_add_host(struct mmc_host *);
extern void mmc_remove_host(struct mmc_host *);
extern void mmc_free_host(struct mmc_host *);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs);
+#endif
+
static inline void *mmc_priv(struct mmc_host *host)
{
return (void *)host->private;
@@ -320,6 +343,18 @@ static inline void *mmc_priv(struct mmc_host *host)
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_NEEDS_RESUME)
+#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+ if (manual)
+ host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+ else
+ host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
extern int mmc_suspend_host(struct mmc_host *);
extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index 4a139204c20c..6e2d6a135c7e 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -26,5 +26,6 @@ typedef unsigned int mmc_pm_flag_t;
#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */
#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY (1 << 2) /* ignore mmc pm notify */
#endif /* LINUX_MMC_PM_H */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 5666f3abfab7..85e7850b88af 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -21,72 +21,74 @@ struct sdhci_host {
/* Data set by hardware interface driver */
const char *hw_name; /* Hardware bus name */
- unsigned int quirks; /* Deviations from spec. */
+ u64 quirks; /* Deviations from spec. */
/* Controller doesn't honor resets unless we touch the clock register */
-#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
+#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1ULL<<0)
/* Controller has bad caps bits, but really supports DMA */
-#define SDHCI_QUIRK_FORCE_DMA (1<<1)
+#define SDHCI_QUIRK_FORCE_DMA (1ULL<<1)
/* Controller doesn't like to be reset when there is no card inserted. */
-#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
+#define SDHCI_QUIRK_NO_CARD_NO_RESET (1ULL<<2)
/* Controller doesn't like clearing the power reg before a change */
-#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
+#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1ULL<<3)
/* Controller has flaky internal state so reset it on each ios change */
-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
+#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1ULL<<4)
/* Controller has an unusable DMA engine */
-#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
+#define SDHCI_QUIRK_BROKEN_DMA (1ULL<<5)
/* Controller has an unusable ADMA engine */
-#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
+#define SDHCI_QUIRK_BROKEN_ADMA (1ULL<<6)
/* Controller can only DMA from 32-bit aligned addresses */
-#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
+#define SDHCI_QUIRK_32BIT_DMA_ADDR (1ULL<<7)
/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
+#define SDHCI_QUIRK_32BIT_DMA_SIZE (1ULL<<8)
/* Controller can only ADMA chunks that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
+#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1ULL<<9)
/* Controller needs to be reset after each request to stay stable */
-#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
+#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1ULL<<10)
/* Controller needs voltage and power writes to happen separately */
-#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
+#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1ULL<<11)
/* Controller provides an incorrect timeout value for transfers */
-#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
+#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1ULL<<12)
/* Controller has an issue with buffer bits for small transfers */
-#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
+#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1ULL<<13)
/* Controller does not provide transfer-complete interrupt when not busy */
-#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
+#define SDHCI_QUIRK_NO_BUSY_IRQ (1ULL<<14)
/* Controller has unreliable card detection */
-#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
+#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1ULL<<15)
/* Controller reports inverted write-protect state */
-#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
+#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1ULL<<16)
/* Controller has nonstandard clock management */
-#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
+#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1ULL<<17)
/* Controller does not like fast PIO transfers */
-#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
+#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1ULL<<18)
/* Controller losing signal/interrupt enable states after reset */
-#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
+#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1ULL<<19)
/* Controller has to be forced to use block size of 2048 bytes */
-#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
+#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1ULL<<20)
/* Controller cannot do multi-block transfers */
-#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
+#define SDHCI_QUIRK_NO_MULTIBLOCK (1ULL<<21)
/* Controller can only handle 1-bit data transfers */
-#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
+#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1ULL<<22)
/* Controller needs 10ms delay between applying power and clock */
-#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
+#define SDHCI_QUIRK_DELAY_AFTER_POWER (1ULL<<23)
/* Controller uses SDCLK instead of TMCLK for data timeouts */
-#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
+#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1ULL<<24)
/* Controller reports wrong base clock capability */
-#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
+#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1ULL<<25)
/* Controller cannot support End Attribute in NOP ADMA descriptor */
-#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
+#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1ULL<<26)
/* Controller is missing device caps. Use caps provided by host */
-#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
+#define SDHCI_QUIRK_MISSING_CAPS (1ULL<<27)
/* Controller uses Auto CMD12 command to stop the transfer */
-#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
+#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1ULL<<28)
/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
-#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
+#define SDHCI_QUIRK_NO_HISPD_BIT (1ULL<<29)
/* Controller treats ADMA descriptors with length 0000h incorrectly */
-#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
+#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1ULL<<30)
/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */
-#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31)
+#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1ULL<<31)
+/* Controller cannot report the line status in present state register */
+#define SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING (1ULL<<32)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -145,6 +147,7 @@ struct sdhci_host {
struct tasklet_struct finish_tasklet;
struct timer_list timer; /* Timer for timeouts */
+ unsigned int card_int_set; /* card int status */
unsigned int caps; /* Alternative capabilities */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 50f0bc952328..dc680c4b50d4 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -23,6 +23,14 @@ struct sdio_func;
typedef void (sdio_irq_handler_t)(struct sdio_func *);
/*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+ uint8_t f_class;
+ uint32_t f_maxblksize;
+};
+
+/*
* SDIO function CIS tuple (unknown to the core)
*/
struct sdio_func_tuple {
@@ -130,6 +138,8 @@ extern int sdio_release_irq(struct sdio_func *func);
extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret,
+ unsigned in);
extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
diff --git a/include/linux/mpu.h b/include/linux/mpu.h
new file mode 100644
index 000000000000..fd66ba0db875
--- /dev/null
+++ b/include/linux/mpu.h
@@ -0,0 +1,366 @@
+/*
+ $License:
+ Copyright (C) 2011 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __MPU_H_
+#define __MPU_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Number of axes on each sensor */
+#define GYRO_NUM_AXES (3)
+#define ACCEL_NUM_AXES (3)
+#define COMPASS_NUM_AXES (3)
+
+struct mpu_read_write {
+ /* Memory address or register address depending on ioctl */
+ __u16 address;
+ __u16 length;
+ __u8 *data;
+};
+
+enum mpuirq_data_type {
+ MPUIRQ_DATA_TYPE_MPU_IRQ,
+ MPUIRQ_DATA_TYPE_SLAVE_IRQ,
+ MPUIRQ_DATA_TYPE_PM_EVENT,
+ MPUIRQ_DATA_TYPE_NUM_TYPES,
+};
+
+/* User space PM event notification */
+#define MPU_PM_EVENT_SUSPEND_PREPARE (3)
+#define MPU_PM_EVENT_POST_SUSPEND (4)
+
+struct mpuirq_data {
+ __u32 interruptcount;
+ __u64 irqtime;
+ __u32 data_type;
+ __s32 data;
+};
+
+enum ext_slave_config_key {
+ MPU_SLAVE_CONFIG_ODR_SUSPEND,
+ MPU_SLAVE_CONFIG_ODR_RESUME,
+ MPU_SLAVE_CONFIG_FSR_SUSPEND,
+ MPU_SLAVE_CONFIG_FSR_RESUME,
+ MPU_SLAVE_CONFIG_MOT_THS,
+ MPU_SLAVE_CONFIG_NMOT_THS,
+ MPU_SLAVE_CONFIG_MOT_DUR,
+ MPU_SLAVE_CONFIG_NMOT_DUR,
+ MPU_SLAVE_CONFIG_IRQ_SUSPEND,
+ MPU_SLAVE_CONFIG_IRQ_RESUME,
+ MPU_SLAVE_WRITE_REGISTERS,
+ MPU_SLAVE_READ_REGISTERS,
+ MPU_SLAVE_CONFIG_INTERNAL_REFERENCE,
+ /* AMI 306 specific config keys */
+ MPU_SLAVE_PARAM,
+ MPU_SLAVE_WINDOW,
+ MPU_SLAVE_READWINPARAMS,
+ MPU_SLAVE_SEARCHOFFSET,
+ /* AKM specific config keys */
+ MPU_SLAVE_READ_SCALE,
+ /* MPU3050 and MPU6050 Keys */
+ MPU_SLAVE_INT_CONFIG,
+ MPU_SLAVE_EXT_SYNC,
+ MPU_SLAVE_FULL_SCALE,
+ MPU_SLAVE_LPF,
+ MPU_SLAVE_CLK_SRC,
+ MPU_SLAVE_DIVIDER,
+ MPU_SLAVE_DMP_ENABLE,
+ MPU_SLAVE_FIFO_ENABLE,
+ MPU_SLAVE_DMP_CFG1,
+ MPU_SLAVE_DMP_CFG2,
+ MPU_SLAVE_TC,
+ MPU_SLAVE_GYRO,
+ MPU_SLAVE_ADDR,
+ MPU_SLAVE_PRODUCT_REVISION,
+ MPU_SLAVE_SILICON_REVISION,
+ MPU_SLAVE_PRODUCT_ID,
+ MPU_SLAVE_GYRO_SENS_TRIM,
+ MPU_SLAVE_ACCEL_SENS_TRIM,
+ MPU_SLAVE_RAM,
+ /* -------------------------- */
+ MPU_SLAVE_CONFIG_NUM_CONFIG_KEYS
+};
+
+/* For the MPU_SLAVE_CONFIG_IRQ_SUSPEND and MPU_SLAVE_CONFIG_IRQ_RESUME */
+enum ext_slave_config_irq_type {
+ MPU_SLAVE_IRQ_TYPE_NONE,
+ MPU_SLAVE_IRQ_TYPE_MOTION,
+ MPU_SLAVE_IRQ_TYPE_DATA_READY,
+};
+
+/* Structure for the following IOCTS's
+ * MPU_CONFIG_GYRO
+ * MPU_CONFIG_ACCEL
+ * MPU_CONFIG_COMPASS
+ * MPU_CONFIG_PRESSURE
+ * MPU_GET_CONFIG_GYRO
+ * MPU_GET_CONFIG_ACCEL
+ * MPU_GET_CONFIG_COMPASS
+ * MPU_GET_CONFIG_PRESSURE
+ *
+ * @key one of enum ext_slave_config_key
+ * @len length of data pointed to by data
+ * @apply zero if communication with the chip is not necessary, false otherwise
+ * This flag can be used to select cached data or to refresh cashed data
+ * cache data to be pushed later or push immediately. If true and the
+ * slave is on the secondary bus the MPU will first enger bypass mode
+ * before calling the slaves .config or .get_config funcion
+ * @data pointer to the data to confgure or get
+ */
+struct ext_slave_config {
+ __u8 key;
+ __u16 len;
+ __u8 apply;
+ void *data;
+};
+
+enum ext_slave_type {
+ EXT_SLAVE_TYPE_GYROSCOPE,
+ EXT_SLAVE_TYPE_ACCEL,
+ EXT_SLAVE_TYPE_COMPASS,
+ EXT_SLAVE_TYPE_PRESSURE,
+ /*EXT_SLAVE_TYPE_TEMPERATURE */
+
+ EXT_SLAVE_NUM_TYPES
+};
+
+enum ext_slave_id {
+ ID_INVALID = 0,
+
+ ACCEL_ID_LIS331,
+ ACCEL_ID_LSM303DLX,
+ ACCEL_ID_LIS3DH,
+ ACCEL_ID_KXSD9,
+ ACCEL_ID_KXTF9,
+ ACCEL_ID_BMA150,
+ ACCEL_ID_BMA222,
+ ACCEL_ID_BMA250,
+ ACCEL_ID_ADXL34X,
+ ACCEL_ID_MMA8450,
+ ACCEL_ID_MMA845X,
+ ACCEL_ID_MPU6050,
+
+ COMPASS_ID_AK8975,
+ COMPASS_ID_AK8972,
+ COMPASS_ID_AMI30X,
+ COMPASS_ID_AMI306,
+ COMPASS_ID_YAS529,
+ COMPASS_ID_YAS530,
+ COMPASS_ID_HMC5883,
+ COMPASS_ID_LSM303DLH,
+ COMPASS_ID_LSM303DLM,
+ COMPASS_ID_MMC314X,
+ COMPASS_ID_HSCDTD002B,
+ COMPASS_ID_HSCDTD004A,
+
+ PRESSURE_ID_BMA085,
+};
+
+enum ext_slave_endian {
+ EXT_SLAVE_BIG_ENDIAN,
+ EXT_SLAVE_LITTLE_ENDIAN,
+ EXT_SLAVE_FS8_BIG_ENDIAN,
+ EXT_SLAVE_FS16_BIG_ENDIAN,
+};
+
+enum ext_slave_bus {
+ EXT_SLAVE_BUS_INVALID = -1,
+ EXT_SLAVE_BUS_PRIMARY = 0,
+ EXT_SLAVE_BUS_SECONDARY = 1
+};
+
+
+/**
+ * struct ext_slave_platform_data - Platform data for mpu3050 and mpu6050
+ * slave devices
+ *
+ * @type: the type of slave device based on the enum ext_slave_type
+ * definitions.
+ * @irq: the irq number attached to the slave if any.
+ * @adapt_num: the I2C adapter number.
+ * @bus: the bus the slave is attached to: enum ext_slave_bus
+ * @address: the I2C slave address of the slave device.
+ * @orientation: the mounting matrix of the device relative to MPU.
+ * @irq_data: private data for the slave irq handler
+ * @private_data: additional data, user customizable. Not touched by the MPU
+ * driver.
+ *
+ * The orientation matricies are 3x3 rotation matricies
+ * that are applied to the data to rotate from the mounting orientation to the
+ * platform orientation. The values must be one of 0, 1, or -1 and each row and
+ * column should have exactly 1 non-zero value.
+ */
+struct ext_slave_platform_data {
+ __u8 type;
+ __u32 irq;
+ __u32 adapt_num;
+ __u32 bus;
+ __u8 address;
+ __s8 orientation[9];
+ void *irq_data;
+ void *private_data;
+};
+
+struct fix_pnt_range {
+ __s32 mantissa;
+ __s32 fraction;
+};
+
+static inline long range_fixedpoint_to_long_mg(struct fix_pnt_range rng)
+{
+ return (long)(rng.mantissa * 1000 + rng.fraction / 10);
+}
+
+struct ext_slave_read_trigger {
+ __u8 reg;
+ __u8 value;
+};
+
+/**
+ * struct ext_slave_descr - Description of the slave device for programming.
+ *
+ * @suspend: function pointer to put the device in suspended state
+ * @resume: function pointer to put the device in running state
+ * @read: function that reads the device data
+ * @init: function used to preallocate memory used by the driver
+ * @exit: function used to free memory allocated for the driver
+ * @config: function used to configure the device
+ * @get_config:function used to get the device's configuration
+ *
+ * @name: text name of the device
+ * @type: device type. enum ext_slave_type
+ * @id: enum ext_slave_id
+ * @read_reg: starting register address to retrieve data.
+ * @read_len: length in bytes of the sensor data. Typically 6.
+ * @endian: byte order of the data. enum ext_slave_endian
+ * @range: full scale range of the slave ouput: struct fix_pnt_range
+ * @trigger: If reading data first requires writing a register this is the
+ * data to write.
+ *
+ * Defines the functions and information about the slave the mpu3050 and
+ * mpu6050 needs to use the slave device.
+ */
+struct ext_slave_descr {
+ int (*init) (void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata);
+ int (*exit) (void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata);
+ int (*suspend) (void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata);
+ int (*resume) (void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata);
+ int (*read) (void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ __u8 *data);
+ int (*config) (void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *config);
+ int (*get_config) (void *mlsl_handle,
+ struct ext_slave_descr *slave,
+ struct ext_slave_platform_data *pdata,
+ struct ext_slave_config *config);
+
+ char *name;
+ __u8 type;
+ __u8 id;
+ __u8 read_reg;
+ __u8 read_len;
+ __u8 endian;
+ struct fix_pnt_range range;
+ struct ext_slave_read_trigger *trigger;
+};
+
+/**
+ * struct mpu_platform_data - Platform data for the mpu driver
+ * @int_config: Bits [7:3] of the int config register.
+ * @level_shifter: 0: VLogic, 1: VDD
+ * @orientation: Orientation matrix of the gyroscope
+ *
+ * Contains platform specific information on how to configure the MPU3050 to
+ * work on this platform. The orientation matricies are 3x3 rotation matricies
+ * that are applied to the data to rotate from the mounting orientation to the
+ * platform orientation. The values must be one of 0, 1, or -1 and each row and
+ * column should have exactly 1 non-zero value.
+ */
+struct mpu_platform_data {
+ __u8 int_config;
+ __u8 level_shifter;
+ __s8 orientation[GYRO_NUM_AXES * GYRO_NUM_AXES];
+};
+
+#define MPU_IOCTL (0x81) /* Magic number for MPU Iocts */
+/* IOCTL commands for /dev/mpu */
+
+/*--------------------------------------------------------------------------
+ * Deprecated, debugging only
+ */
+#define MPU_SET_MPU_PLATFORM_DATA \
+ _IOWR(MPU_IOCTL, 0x01, struct mpu_platform_data)
+#define MPU_SET_EXT_SLAVE_PLATFORM_DATA \
+ _IOWR(MPU_IOCTL, 0x01, struct ext_slave_platform_data)
+/*--------------------------------------------------------------------------*/
+#define MPU_GET_EXT_SLAVE_PLATFORM_DATA \
+ _IOWR(MPU_IOCTL, 0x02, struct ext_slave_platform_data)
+#define MPU_GET_MPU_PLATFORM_DATA \
+ _IOWR(MPU_IOCTL, 0x02, struct mpu_platform_data)
+#define MPU_GET_EXT_SLAVE_DESCR \
+ _IOWR(MPU_IOCTL, 0x02, struct ext_slave_descr)
+
+#define MPU_READ _IOWR(MPU_IOCTL, 0x10, struct mpu_read_write)
+#define MPU_WRITE _IOW(MPU_IOCTL, 0x10, struct mpu_read_write)
+#define MPU_READ_MEM _IOWR(MPU_IOCTL, 0x11, struct mpu_read_write)
+#define MPU_WRITE_MEM _IOW(MPU_IOCTL, 0x11, struct mpu_read_write)
+#define MPU_READ_FIFO _IOWR(MPU_IOCTL, 0x12, struct mpu_read_write)
+#define MPU_WRITE_FIFO _IOW(MPU_IOCTL, 0x12, struct mpu_read_write)
+
+#define MPU_READ_COMPASS _IOR(MPU_IOCTL, 0x12, __u8)
+#define MPU_READ_ACCEL _IOR(MPU_IOCTL, 0x13, __u8)
+#define MPU_READ_PRESSURE _IOR(MPU_IOCTL, 0x14, __u8)
+
+#define MPU_CONFIG_GYRO _IOW(MPU_IOCTL, 0x20, struct ext_slave_config)
+#define MPU_CONFIG_ACCEL _IOW(MPU_IOCTL, 0x21, struct ext_slave_config)
+#define MPU_CONFIG_COMPASS _IOW(MPU_IOCTL, 0x22, struct ext_slave_config)
+#define MPU_CONFIG_PRESSURE _IOW(MPU_IOCTL, 0x23, struct ext_slave_config)
+
+#define MPU_GET_CONFIG_GYRO _IOWR(MPU_IOCTL, 0x20, struct ext_slave_config)
+#define MPU_GET_CONFIG_ACCEL _IOWR(MPU_IOCTL, 0x21, struct ext_slave_config)
+#define MPU_GET_CONFIG_COMPASS _IOWR(MPU_IOCTL, 0x22, struct ext_slave_config)
+#define MPU_GET_CONFIG_PRESSURE _IOWR(MPU_IOCTL, 0x23, struct ext_slave_config)
+
+#define MPU_SUSPEND _IOW(MPU_IOCTL, 0x30, __u32)
+#define MPU_RESUME _IOW(MPU_IOCTL, 0x31, __u32)
+/* Userspace PM Event response */
+#define MPU_PM_EVENT_HANDLED _IO(MPU_IOCTL, 0x32)
+
+#define MPU_GET_REQUESTED_SENSORS _IOR(MPU_IOCTL, 0x40, __u8)
+#define MPU_SET_REQUESTED_SENSORS _IOW(MPU_IOCTL, 0x40, __u8)
+#define MPU_GET_IGNORE_SYSTEM_SUSPEND _IOR(MPU_IOCTL, 0x41, __u8)
+#define MPU_SET_IGNORE_SYSTEM_SUSPEND _IOW(MPU_IOCTL, 0x41, __u8)
+#define MPU_GET_MLDL_STATUS _IOR(MPU_IOCTL, 0x42, __u8)
+#define MPU_GET_I2C_SLAVES_ENABLED _IOR(MPU_IOCTL, 0x43, __u8)
+
+
+#endif /* __MPU_H_ */
diff --git a/include/linux/mpu3050.h b/include/linux/mpu3050.h
new file mode 100644
index 000000000000..a8dcd5a9473f
--- /dev/null
+++ b/include/linux/mpu3050.h
@@ -0,0 +1,255 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+#ifndef __MPU3050_H_
+#define __MPU3050_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#endif
+
+#ifdef M_HW
+#error MPU6000 build including MPU3050 header
+#endif
+
+#define MPU_NAME "mpu3050"
+#define DEFAULT_MPU_SLAVEADDR 0x68
+
+/*==== MPU REGISTER SET ====*/
+enum mpu_register {
+ MPUREG_WHO_AM_I = 0, /* 00 0x00 */
+ MPUREG_PRODUCT_ID, /* 01 0x01 */
+ MPUREG_02_RSVD, /* 02 0x02 */
+ MPUREG_03_RSVD, /* 03 0x03 */
+ MPUREG_04_RSVD, /* 04 0x04 */
+ MPUREG_XG_OFFS_TC, /* 05 0x05 */
+ MPUREG_06_RSVD, /* 06 0x06 */
+ MPUREG_07_RSVD, /* 07 0x07 */
+ MPUREG_YG_OFFS_TC, /* 08 0x08 */
+ MPUREG_09_RSVD, /* 09 0x09 */
+ MPUREG_0A_RSVD, /* 10 0x0a */
+ MPUREG_ZG_OFFS_TC, /* 11 0x0b */
+ MPUREG_X_OFFS_USRH, /* 12 0x0c */
+ MPUREG_X_OFFS_USRL, /* 13 0x0d */
+ MPUREG_Y_OFFS_USRH, /* 14 0x0e */
+ MPUREG_Y_OFFS_USRL, /* 15 0x0f */
+ MPUREG_Z_OFFS_USRH, /* 16 0x10 */
+ MPUREG_Z_OFFS_USRL, /* 17 0x11 */
+ MPUREG_FIFO_EN1, /* 18 0x12 */
+ MPUREG_FIFO_EN2, /* 19 0x13 */
+ MPUREG_AUX_SLV_ADDR, /* 20 0x14 */
+ MPUREG_SMPLRT_DIV, /* 21 0x15 */
+ MPUREG_DLPF_FS_SYNC, /* 22 0x16 */
+ MPUREG_INT_CFG, /* 23 0x17 */
+ MPUREG_ACCEL_BURST_ADDR,/* 24 0x18 */
+ MPUREG_19_RSVD, /* 25 0x19 */
+ MPUREG_INT_STATUS, /* 26 0x1a */
+ MPUREG_TEMP_OUT_H, /* 27 0x1b */
+ MPUREG_TEMP_OUT_L, /* 28 0x1c */
+ MPUREG_GYRO_XOUT_H, /* 29 0x1d */
+ MPUREG_GYRO_XOUT_L, /* 30 0x1e */
+ MPUREG_GYRO_YOUT_H, /* 31 0x1f */
+ MPUREG_GYRO_YOUT_L, /* 32 0x20 */
+ MPUREG_GYRO_ZOUT_H, /* 33 0x21 */
+ MPUREG_GYRO_ZOUT_L, /* 34 0x22 */
+ MPUREG_23_RSVD, /* 35 0x23 */
+ MPUREG_24_RSVD, /* 36 0x24 */
+ MPUREG_25_RSVD, /* 37 0x25 */
+ MPUREG_26_RSVD, /* 38 0x26 */
+ MPUREG_27_RSVD, /* 39 0x27 */
+ MPUREG_28_RSVD, /* 40 0x28 */
+ MPUREG_29_RSVD, /* 41 0x29 */
+ MPUREG_2A_RSVD, /* 42 0x2a */
+ MPUREG_2B_RSVD, /* 43 0x2b */
+ MPUREG_2C_RSVD, /* 44 0x2c */
+ MPUREG_2D_RSVD, /* 45 0x2d */
+ MPUREG_2E_RSVD, /* 46 0x2e */
+ MPUREG_2F_RSVD, /* 47 0x2f */
+ MPUREG_30_RSVD, /* 48 0x30 */
+ MPUREG_31_RSVD, /* 49 0x31 */
+ MPUREG_32_RSVD, /* 50 0x32 */
+ MPUREG_33_RSVD, /* 51 0x33 */
+ MPUREG_34_RSVD, /* 52 0x34 */
+ MPUREG_DMP_CFG_1, /* 53 0x35 */
+ MPUREG_DMP_CFG_2, /* 54 0x36 */
+ MPUREG_BANK_SEL, /* 55 0x37 */
+ MPUREG_MEM_START_ADDR, /* 56 0x38 */
+ MPUREG_MEM_R_W, /* 57 0x39 */
+ MPUREG_FIFO_COUNTH, /* 58 0x3a */
+ MPUREG_FIFO_COUNTL, /* 59 0x3b */
+ MPUREG_FIFO_R_W, /* 60 0x3c */
+ MPUREG_USER_CTRL, /* 61 0x3d */
+ MPUREG_PWR_MGM, /* 62 0x3e */
+ MPUREG_3F_RSVD, /* 63 0x3f */
+ NUM_OF_MPU_REGISTERS /* 64 0x40 */
+};
+
+/*==== BITS FOR MPU ====*/
+
+/*---- MPU 'FIFO_EN1' register (12) ----*/
+#define BIT_TEMP_OUT 0x80
+#define BIT_GYRO_XOUT 0x40
+#define BIT_GYRO_YOUT 0x20
+#define BIT_GYRO_ZOUT 0x10
+#define BIT_ACCEL_XOUT 0x08
+#define BIT_ACCEL_YOUT 0x04
+#define BIT_ACCEL_ZOUT 0x02
+#define BIT_AUX_1OUT 0x01
+/*---- MPU 'FIFO_EN2' register (13) ----*/
+#define BIT_AUX_2OUT 0x02
+#define BIT_AUX_3OUT 0x01
+/*---- MPU 'DLPF_FS_SYNC' register (16) ----*/
+#define BITS_EXT_SYNC_NONE 0x00
+#define BITS_EXT_SYNC_TEMP 0x20
+#define BITS_EXT_SYNC_GYROX 0x40
+#define BITS_EXT_SYNC_GYROY 0x60
+#define BITS_EXT_SYNC_GYROZ 0x80
+#define BITS_EXT_SYNC_ACCELX 0xA0
+#define BITS_EXT_SYNC_ACCELY 0xC0
+#define BITS_EXT_SYNC_ACCELZ 0xE0
+#define BITS_EXT_SYNC_MASK 0xE0
+#define BITS_FS_250DPS 0x00
+#define BITS_FS_500DPS 0x08
+#define BITS_FS_1000DPS 0x10
+#define BITS_FS_2000DPS 0x18
+#define BITS_FS_MASK 0x18
+#define BITS_DLPF_CFG_256HZ_NOLPF2 0x00
+#define BITS_DLPF_CFG_188HZ 0x01
+#define BITS_DLPF_CFG_98HZ 0x02
+#define BITS_DLPF_CFG_42HZ 0x03
+#define BITS_DLPF_CFG_20HZ 0x04
+#define BITS_DLPF_CFG_10HZ 0x05
+#define BITS_DLPF_CFG_5HZ 0x06
+#define BITS_DLPF_CFG_2100HZ_NOLPF 0x07
+#define BITS_DLPF_CFG_MASK 0x07
+/*---- MPU 'INT_CFG' register (17) ----*/
+#define BIT_ACTL 0x80
+#define BIT_ACTL_LOW 0x80
+#define BIT_ACTL_HIGH 0x00
+#define BIT_OPEN 0x40
+#define BIT_OPEN_DRAIN 0x40
+#define BIT_PUSH_PULL 0x00
+#define BIT_LATCH_INT_EN 0x20
+#define BIT_LATCH_INT_EN 0x20
+#define BIT_INT_PULSE_WIDTH_50US 0x00
+#define BIT_INT_ANYRD_2CLEAR 0x10
+#define BIT_INT_STAT_READ_2CLEAR 0x00
+#define BIT_MPU_RDY_EN 0x04
+#define BIT_DMP_INT_EN 0x02
+#define BIT_RAW_RDY_EN 0x01
+/*---- MPU 'INT_STATUS' register (1A) ----*/
+#define BIT_INT_STATUS_FIFO_OVERLOW 0x80
+#define BIT_MPU_RDY 0x04
+#define BIT_DMP_INT 0x02
+#define BIT_RAW_RDY 0x01
+/*---- MPU 'BANK_SEL' register (37) ----*/
+#define BIT_PRFTCH_EN 0x20
+#define BIT_CFG_USER_BANK 0x10
+#define BITS_MEM_SEL 0x0f
+/*---- MPU 'USER_CTRL' register (3D) ----*/
+#define BIT_DMP_EN 0x80
+#define BIT_FIFO_EN 0x40
+#define BIT_AUX_IF_EN 0x20
+#define BIT_AUX_RD_LENG 0x10
+#define BIT_AUX_IF_RST 0x08
+#define BIT_DMP_RST 0x04
+#define BIT_FIFO_RST 0x02
+#define BIT_GYRO_RST 0x01
+/*---- MPU 'PWR_MGM' register (3E) ----*/
+#define BIT_H_RESET 0x80
+#define BIT_SLEEP 0x40
+#define BIT_STBY_XG 0x20
+#define BIT_STBY_YG 0x10
+#define BIT_STBY_ZG 0x08
+#define BITS_CLKSEL 0x07
+
+/*---- MPU Silicon Revision ----*/
+#define MPU_SILICON_REV_A4 1 /* MPU A4 Device */
+#define MPU_SILICON_REV_B1 2 /* MPU B1 Device */
+#define MPU_SILICON_REV_B4 3 /* MPU B4 Device */
+#define MPU_SILICON_REV_B6 4 /* MPU B6 Device */
+
+/*---- MPU Memory ----*/
+#define MPU_MEM_BANK_SIZE (256)
+#define FIFO_HW_SIZE (512)
+
+enum MPU_MEMORY_BANKS {
+ MPU_MEM_RAM_BANK_0 = 0,
+ MPU_MEM_RAM_BANK_1,
+ MPU_MEM_RAM_BANK_2,
+ MPU_MEM_RAM_BANK_3,
+ MPU_MEM_NUM_RAM_BANKS,
+ MPU_MEM_OTP_BANK_0 = MPU_MEM_NUM_RAM_BANKS,
+ /* This one is always last */
+ MPU_MEM_NUM_BANKS
+};
+
+#define MPU_NUM_AXES (3)
+
+/*---- structure containing control variables used by MLDL ----*/
+/*---- MPU clock source settings ----*/
+/*---- MPU filter selections ----*/
+enum mpu_filter {
+ MPU_FILTER_256HZ_NOLPF2 = 0,
+ MPU_FILTER_188HZ,
+ MPU_FILTER_98HZ,
+ MPU_FILTER_42HZ,
+ MPU_FILTER_20HZ,
+ MPU_FILTER_10HZ,
+ MPU_FILTER_5HZ,
+ MPU_FILTER_2100HZ_NOLPF,
+ NUM_MPU_FILTER
+};
+
+enum mpu_fullscale {
+ MPU_FS_250DPS = 0,
+ MPU_FS_500DPS,
+ MPU_FS_1000DPS,
+ MPU_FS_2000DPS,
+ NUM_MPU_FS
+};
+
+enum mpu_clock_sel {
+ MPU_CLK_SEL_INTERNAL = 0,
+ MPU_CLK_SEL_PLLGYROX,
+ MPU_CLK_SEL_PLLGYROY,
+ MPU_CLK_SEL_PLLGYROZ,
+ MPU_CLK_SEL_PLLEXT32K,
+ MPU_CLK_SEL_PLLEXT19M,
+ MPU_CLK_SEL_RESERVED,
+ MPU_CLK_SEL_STOP,
+ NUM_CLK_SEL
+};
+
+enum mpu_ext_sync {
+ MPU_EXT_SYNC_NONE = 0,
+ MPU_EXT_SYNC_TEMP,
+ MPU_EXT_SYNC_GYROX,
+ MPU_EXT_SYNC_GYROY,
+ MPU_EXT_SYNC_GYROZ,
+ MPU_EXT_SYNC_ACCELX,
+ MPU_EXT_SYNC_ACCELY,
+ MPU_EXT_SYNC_ACCELZ,
+ NUM_MPU_EXT_SYNC
+};
+
+#define DLPF_FS_SYNC_VALUE(ext_sync, full_scale, lpf) \
+ ((ext_sync << 5) | (full_scale << 3) | lpf)
+
+#endif /* __MPU3050_H_ */
diff --git a/include/linux/mpu6000.h b/include/linux/mpu6000.h
new file mode 100644
index 000000000000..5a63c8f07b70
--- /dev/null
+++ b/include/linux/mpu6000.h
@@ -0,0 +1,406 @@
+/*
+ $License:
+ Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ $
+ */
+
+/**
+ * @defgroup
+ * @brief
+ *
+ * @{
+ * @file mpu6000.h
+ * @brief
+ */
+
+#ifndef __MPU6000_H_
+#define __MPU6000_H_
+
+#define MPU_NAME "mpu6000"
+#define DEFAULT_MPU_SLAVEADDR 0x68
+
+/*==== M_HW REGISTER SET ====*/
+enum {
+ MPUREG_XG_OFFS_TC = 0, /* 0x00 */
+ MPUREG_YG_OFFS_TC, /* 0x00 */
+ MPUREG_ZG_OFFS_TC, /* 0x00 */
+ MPUREG_X_FINE_GAIN, /* 0x00 */
+ MPUREG_Y_FINE_GAIN, /* 0x00 */
+ MPUREG_Z_FINE_GAIN, /* 0x00 */
+ MPUREG_XA_OFFS_H, /* 0x00 */
+ MPUREG_XA_OFFS_L_TC, /* 0x00 */
+ MPUREG_YA_OFFS_H, /* 0x00 */
+ MPUREG_YA_OFFS_L_TC, /* 0x00 */
+ MPUREG_ZA_OFFS_H, /* 0x00 */
+ MPUREG_ZA_OFFS_L_TC, /* 0xB */
+ MPUREG_0C_RSVD, /* 0x00 */
+ MPUREG_0D_RSVD, /* 0x00 */
+ MPUREG_0E_RSVD, /* 0x00 */
+ MPUREG_0F_RSVD, /* 0x00 */
+ MPUREG_10_RSVD, /* 0x00 */
+ MPUREG_11_RSVD, /* 0x00 */
+ MPUREG_12_RSVD, /* 0x00 */
+ MPUREG_XG_OFFS_USRH, /* 0x00 */
+ MPUREG_XG_OFFS_USRL, /* 0x00 */
+ MPUREG_YG_OFFS_USRH, /* 0x00 */
+ MPUREG_YG_OFFS_USRL, /* 0x00 */
+ MPUREG_ZG_OFFS_USRH, /* 0x00 */
+ MPUREG_ZG_OFFS_USRL, /* 0x00 */
+ MPUREG_SMPLRT_DIV, /* 0x19 */
+ MPUREG_CONFIG, /* 0x1A ==> DLPF_FS_SYNC */
+ MPUREG_GYRO_CONFIG, /* 0x00 */
+ MPUREG_ACCEL_CONFIG, /* 0x00 */
+ MPUREG_ACCEL_FF_THR, /* 0x00 */
+ MPUREG_ACCEL_FF_DUR, /* 0x00 */
+ MPUREG_ACCEL_MOT_THR, /* 0x00 */
+ MPUREG_ACCEL_MOT_DUR, /* 0x00 */
+ MPUREG_ACCEL_ZRMOT_THR, /* 0x00 */
+ MPUREG_ACCEL_ZRMOT_DUR, /* 0x00 */
+ MPUREG_FIFO_EN, /* 0x23 */
+ MPUREG_I2C_MST_CTRL, /* 0x00 */
+ MPUREG_I2C_SLV0_ADDR, /* 0x25 */
+ MPUREG_I2C_SLV0_REG, /* 0x00 */
+ MPUREG_I2C_SLV0_CTRL, /* 0x00 */
+ MPUREG_I2C_SLV1_ADDR, /* 0x28 */
+ MPUREG_I2C_SLV1_REG_PASSWORD, /* 0x00 */
+ MPUREG_I2C_SLV1_CTRL, /* 0x00 */
+ MPUREG_I2C_SLV2_ADDR, /* 0x2B */
+ MPUREG_I2C_SLV2_REG, /* 0x00 */
+ MPUREG_I2C_SLV2_CTRL, /* 0x00 */
+ MPUREG_I2C_SLV3_ADDR, /* 0x2E */
+ MPUREG_I2C_SLV3_REG, /* 0x00 */
+ MPUREG_I2C_SLV3_CTRL, /* 0x00 */
+ MPUREG_I2C_SLV4_ADDR, /* 0x31 */
+ MPUREG_I2C_SLV4_REG, /* 0x00 */
+ MPUREG_I2C_SLV4_DO, /* 0x00 */
+ MPUREG_I2C_SLV4_CTRL, /* 0x00 */
+ MPUREG_I2C_SLV4_DI, /* 0x00 */
+ MPUREG_I2C_MST_STATUS, /* 0x36 */
+ MPUREG_INT_PIN_CFG, /* 0x37 ==> -* INT_CFG */
+ MPUREG_INT_ENABLE, /* 0x38 ==> / */
+ MPUREG_DMP_INT_STATUS, /* 0x39 */
+ MPUREG_INT_STATUS, /* 0x3A */
+ MPUREG_ACCEL_XOUT_H, /* 0x3B */
+ MPUREG_ACCEL_XOUT_L, /* 0x00 */
+ MPUREG_ACCEL_YOUT_H, /* 0x00 */
+ MPUREG_ACCEL_YOUT_L, /* 0x00 */
+ MPUREG_ACCEL_ZOUT_H, /* 0x00 */
+ MPUREG_ACCEL_ZOUT_L, /* 0x00 */
+ MPUREG_TEMP_OUT_H, /* 0x41 */
+ MPUREG_TEMP_OUT_L, /* 0x00 */
+ MPUREG_GYRO_XOUT_H, /* 0x43 */
+ MPUREG_GYRO_XOUT_L, /* 0x00 */
+ MPUREG_GYRO_YOUT_H, /* 0x00 */
+ MPUREG_GYRO_YOUT_L, /* 0x00 */
+ MPUREG_GYRO_ZOUT_H, /* 0x00 */
+ MPUREG_GYRO_ZOUT_L, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_00, /* 0x49 */
+ MPUREG_EXT_SLV_SENS_DATA_01, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_02, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_03, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_04, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_05, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_06, /* 0x4F */
+ MPUREG_EXT_SLV_SENS_DATA_07, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_08, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_09, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_10, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_11, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_12, /* 0x55 */
+ MPUREG_EXT_SLV_SENS_DATA_13, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_14, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_15, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_16, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_17, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_18, /* 0x5B */
+ MPUREG_EXT_SLV_SENS_DATA_19, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_20, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_21, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_22, /* 0x00 */
+ MPUREG_EXT_SLV_SENS_DATA_23, /* 0x00 */
+ ACCEL_INTEL_STATUS, /* 0x61 */
+ MPUREG_62_RSVD, /* 0x00 */
+ MPUREG_63_RSVD, /* 0x00 */
+ MPUREG_64_RSVD, /* 0x00 */
+ MPUREG_65_RSVD, /* 0x00 */
+ MPUREG_66_RSVD, /* 0x00 */
+ MPUREG_67_RSVD, /* 0x00 */
+ SIGNAL_PATH_RESET, /* 0x68 */
+ ACCEL_INTEL_CTRL, /* 0x69 */
+ MPUREG_USER_CTRL, /* 0x6A */
+ MPUREG_PWR_MGMT_1, /* 0x6B */
+ MPUREG_PWR_MGMT_2, /* 0x00 */
+ MPUREG_BANK_SEL, /* 0x6D */
+ MPUREG_MEM_START_ADDR, /* 0x6E */
+ MPUREG_MEM_R_W, /* 0x6F */
+ MPUREG_PRGM_STRT_ADDRH, /* 0x00 */
+ MPUREG_PRGM_STRT_ADDRL, /* 0x00 */
+ MPUREG_FIFO_COUNTH, /* 0x72 */
+ MPUREG_FIFO_COUNTL, /* 0x00 */
+ MPUREG_FIFO_R_W, /* 0x74 */
+ MPUREG_WHOAMI, /* 0x75,117 */
+
+ NUM_OF_MPU_REGISTERS /* = 0x76,118 */
+};
+
+/*==== M_HW MEMORY ====*/
+enum MPU_MEMORY_BANKS {
+ MEM_RAM_BANK_0 = 0,
+ MEM_RAM_BANK_1,
+ MEM_RAM_BANK_2,
+ MEM_RAM_BANK_3,
+ MEM_RAM_BANK_4,
+ MEM_RAM_BANK_5,
+ MEM_RAM_BANK_6,
+ MEM_RAM_BANK_7,
+ MEM_RAM_BANK_8,
+ MEM_RAM_BANK_9,
+ MEM_RAM_BANK_10,
+ MEM_RAM_BANK_11,
+ MPU_MEM_NUM_RAM_BANKS,
+ MPU_MEM_OTP_BANK_0 = 16
+};
+
+
+/*==== M_HW parameters ====*/
+
+#define NUM_REGS (NUM_OF_MPU_REGISTERS)
+#define START_SENS_REGS (0x3B)
+#define NUM_SENS_REGS (0x60-START_SENS_REGS+1)
+
+/*---- MPU Memory ----*/
+#define NUM_BANKS (MPU_MEM_NUM_RAM_BANKS)
+#define BANK_SIZE (256)
+#define MEM_SIZE (NUM_BANKS*BANK_SIZE)
+#define MPU_MEM_BANK_SIZE (BANK_SIZE) /*alternative name */
+
+#define FIFO_HW_SIZE (1024)
+
+#define NUM_EXT_SLAVES (4)
+
+
+/*==== BITS FOR M_HW ====*/
+
+/*---- M_HW 'FIFO_EN' register (23) ----*/
+#define BIT_TEMP_OUT 0x80
+#define BIT_GYRO_XOUT 0x40
+#define BIT_GYRO_YOUT 0x20
+#define BIT_GYRO_ZOUT 0x10
+#define BIT_ACCEL 0x08
+#define BIT_SLV_2 0x04
+#define BIT_SLV_1 0x02
+#define BIT_SLV_0 0x01
+/*---- M_HW 'CONFIG' register (1A) ----*/
+/*NONE 0xC0 */
+#define BITS_EXT_SYNC_SET 0x38
+#define BITS_DLPF_CFG 0x07
+/*---- M_HW 'GYRO_CONFIG' register (1B) ----*/
+/* voluntarily modified label from BITS_FS_SEL to
+ * BITS_GYRO_FS_SEL to avoid confusion with MPU
+ */
+#define BITS_GYRO_FS_SEL 0x18
+/*NONE 0x07 */
+/*---- M_HW 'ACCEL_CONFIG' register (1C) ----*/
+#define BITS_ACCEL_FS_SEL 0x18
+#define BITS_ACCEL_HPF 0x07
+/*---- M_HW 'I2C_MST_CTRL' register (24) ----*/
+#define BIT_MULT_MST_DIS 0x80
+#define BIT_WAIT_FOR_ES 0x40
+#define BIT_I2C_MST_VDDIO 0x20
+/*NONE 0x10 */
+#define BITS_I2C_MST_CLK 0x0F
+/*---- M_HW 'I2C_SLV?_CTRL' register (27,2A,2D,30) ----*/
+#define BIT_SLV_ENABLE 0x80
+#define BIT_SLV_BYTE_SW 0x40
+/*NONE 0x20 */
+#define BIT_SLV_GRP 0x10
+#define BITS_SLV_LENG 0x0F
+/*---- M_HW 'I2C_SLV4_ADDR' register (31) ----*/
+#define BIT_I2C_SLV4_RNW 0x80
+/*---- M_HW 'I2C_SLV4_CTRL' register (34) ----*/
+#define BIT_I2C_SLV4_EN 0x80
+#define BIT_SLV4_DONE_INT_EN 0x40
+/*NONE 0x3F */
+/*---- M_HW 'I2C_MST_STATUS' register (36) ----*/
+#define BIT_PASSTHROUGH 0x80
+#define BIT_I2C_SLV4_DONE 0x40
+#define BIT_I2C_LOST_ARB 0x20
+#define BIT_I2C_SLV4_NACK 0x10
+#define BIT_I2C_SLV3_NACK 0x08
+#define BIT_I2C_SLV2_NACK 0x04
+#define BIT_I2C_SLV1_NACK 0x02
+#define BIT_I2C_SLV0_NACK 0x01
+/*---- M_HW 'INT_PIN_CFG' register (37) ----*/
+#define BIT_ACTL 0x80
+#define BIT_ACTL_LOW 0x80
+#define BIT_ACTL_HIGH 0x00
+#define BIT_OPEN 0x40
+#define BIT_LATCH_INT_EN 0x20
+#define BIT_INT_ANYRD_2CLEAR 0x10
+#define BIT_ACTL_FSYNC 0x08
+#define BIT_FSYNC_INT_EN 0x04
+#define BIT_BYPASS_EN 0x02
+#define BIT_CLKOUT_EN 0x01
+/*---- M_HW 'INT_ENABLE' register (38) ----*/
+#define BIT_FF_EN 0x80
+#define BIT_MOT_EN 0x40
+#define BIT_ZMOT_EN 0x20
+#define BIT_FIFO_OVERFLOW_EN 0x10
+#define BIT_I2C_MST_INT_EN 0x08
+#define BIT_PLL_RDY_EN 0x04
+#define BIT_DMP_INT_EN 0x02
+#define BIT_RAW_RDY_EN 0x01
+/*---- M_HW 'DMP_INT_STATUS' register (39) ----*/
+/*NONE 0x80 */
+/*NONE 0x40 */
+#define BIT_DMP_INT_5 0x20
+#define BIT_DMP_INT_4 0x10
+#define BIT_DMP_INT_3 0x08
+#define BIT_DMP_INT_2 0x04
+#define BIT_DMP_INT_1 0x02
+#define BIT_DMP_INT_0 0x01
+/*---- M_HW 'INT_STATUS' register (3A) ----*/
+#define BIT_FF_INT 0x80
+#define BIT_MOT_INT 0x40
+#define BIT_ZMOT_INT 0x20
+#define BIT_FIFO_OVERFLOW_INT 0x10
+#define BIT_I2C_MST_INT 0x08
+#define BIT_PLL_RDY_INT 0x04
+#define BIT_DMP_INT 0x02
+#define BIT_RAW_DATA_RDY_INT 0x01
+/*---- M_HW 'BANK_SEL' register (6D) ----*/
+#define BIT_PRFTCH_EN 0x40
+#define BIT_CFG_USER_BANK 0x20
+#define BITS_MEM_SEL 0x1f
+/*---- M_HW 'USER_CTRL' register (6A) ----*/
+#define BIT_DMP_EN 0x80
+#define BIT_FIFO_EN 0x40
+#define BIT_I2C_MST_EN 0x20
+#define BIT_I2C_IF_DIS 0x10
+#define BIT_DMP_RST 0x08
+#define BIT_FIFO_RST 0x04
+#define BIT_I2C_MST_RST 0x02
+#define BIT_SIG_COND_RST 0x01
+/*---- M_HW 'PWR_MGMT_1' register (6B) ----*/
+#define BIT_H_RESET 0x80
+#define BITS_PWRSEL 0x70
+#define BIT_WKUP_INT 0x08
+#define BITS_CLKSEL 0x07
+/*---- M_HW 'PWR_MGMT_2' register (6C) ----*/
+#define BITS_LPA_WAKE_CTRL 0xC0
+#define BIT_STBY_XA 0x20
+#define BIT_STBY_YA 0x10
+#define BIT_STBY_ZA 0x08
+#define BIT_STBY_XG 0x04
+#define BIT_STBY_YG 0x02
+#define BIT_STBY_ZG 0x01
+
+/* although it has 6, this refers to the gyros */
+#define MPU_NUM_AXES (3)
+
+#define ACCEL_MOT_THR_LSB (32) /* mg */
+#define ACCEL_MOT_DUR_LSB (1)
+#define ACCEL_ZRMOT_THR_LSB_CONVERSION(mg) ((mg *1000)/255)
+#define ACCEL_ZRMOT_DUR_LSB (64)
+
+/*----------------------------------------------------------------------------*/
+/*---- Alternative names to take care of conflicts with current mpu3050.h ----*/
+/*----------------------------------------------------------------------------*/
+
+/*-- registers --*/
+#define MPUREG_DLPF_FS_SYNC MPUREG_CONFIG /* 0x1A */
+
+#define MPUREG_PRODUCT_ID MPUREG_WHOAMI /* 0x75 HACK!*/
+#define MPUREG_PWR_MGM MPUREG_PWR_MGMT_1 /* 0x6B */
+#define MPUREG_FIFO_EN1 MPUREG_FIFO_EN /* 0x23 */
+#define MPUREG_DMP_CFG_1 MPUREG_PRGM_STRT_ADDRH /* 0x70 */
+#define MPUREG_DMP_CFG_2 MPUREG_PRGM_STRT_ADDRL /* 0x71 */
+#define MPUREG_INT_CFG MPUREG_INT_ENABLE /* 0x38 */
+#define MPUREG_X_OFFS_USRH MPUREG_XG_OFFS_USRH /* 0x13 */
+#define MPUREG_WHO_AM_I MPUREG_WHOAMI /* 0x75 */
+#define MPUREG_23_RSVD MPUREG_EXT_SLV_SENS_DATA_00 /* 0x49 */
+#define MPUREG_AUX_SLV_ADDR MPUREG_I2C_SLV0_ADDR /* 0x25 */
+#define MPUREG_ACCEL_BURST_ADDR MPUREG_I2C_SLV0_REG /* 0x26 */
+
+/*-- bits --*/
+/* 'USER_CTRL' register */
+#define BIT_AUX_IF_EN BIT_I2C_MST_EN
+#define BIT_AUX_RD_LENG BIT_I2C_MST_EN
+#define BIT_IME_IF_RST BIT_I2C_MST_RST
+#define BIT_GYRO_RST BIT_SIG_COND_RST
+/* 'INT_ENABLE' register */
+#define BIT_RAW_RDY BIT_RAW_DATA_RDY_INT
+#define BIT_MPU_RDY_EN BIT_PLL_RDY_EN
+/* 'INT_STATUS' register */
+#define BIT_INT_STATUS_FIFO_OVERLOW BIT_FIFO_OVERFLOW_INT
+
+
+
+/*---- M_HW Silicon Revisions ----*/
+#define MPU_SILICON_REV_A1 1 /* M_HW A1 Device */
+#define MPU_SILICON_REV_B1 2 /* M_HW B1 Device */
+
+/*---- structure containing control variables used by MLDL ----*/
+/*---- MPU clock source settings ----*/
+/*---- MPU filter selections ----*/
+enum mpu_filter {
+ MPU_FILTER_256HZ_NOLPF2 = 0,
+ MPU_FILTER_188HZ,
+ MPU_FILTER_98HZ,
+ MPU_FILTER_42HZ,
+ MPU_FILTER_20HZ,
+ MPU_FILTER_10HZ,
+ MPU_FILTER_5HZ,
+ MPU_FILTER_2100HZ_NOLPF,
+ NUM_MPU_FILTER
+};
+
+enum mpu_fullscale {
+ MPU_FS_250DPS = 0,
+ MPU_FS_500DPS,
+ MPU_FS_1000DPS,
+ MPU_FS_2000DPS,
+ NUM_MPU_FS
+};
+
+enum mpu_clock_sel {
+ MPU_CLK_SEL_INTERNAL = 0,
+ MPU_CLK_SEL_PLLGYROX,
+ MPU_CLK_SEL_PLLGYROY,
+ MPU_CLK_SEL_PLLGYROZ,
+ MPU_CLK_SEL_PLLEXT32K,
+ MPU_CLK_SEL_PLLEXT19M,
+ MPU_CLK_SEL_RESERVED,
+ MPU_CLK_SEL_STOP,
+ NUM_CLK_SEL
+};
+
+enum mpu_ext_sync {
+ MPU_EXT_SYNC_NONE = 0,
+ MPU_EXT_SYNC_TEMP,
+ MPU_EXT_SYNC_GYROX,
+ MPU_EXT_SYNC_GYROY,
+ MPU_EXT_SYNC_GYROZ,
+ MPU_EXT_SYNC_ACCELX,
+ MPU_EXT_SYNC_ACCELY,
+ MPU_EXT_SYNC_ACCELZ,
+ NUM_MPU_EXT_SYNC
+};
+
+#define DLPF_FS_SYNC_VALUE(ext_sync, full_scale, lpf) \
+ ((ext_sync << 5) | (full_scale << 3) | lpf)
+
+#endif /* __IMU6000_H_ */
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 34066e65fdeb..f38d4f0a5ae8 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -101,6 +101,7 @@ struct __fat_dirent {
/* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */
#define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
#define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32)
+#define VFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32)
struct fat_boot_sector {
__u8 ignored[3]; /* Boot strap short or near jump */
@@ -138,6 +139,17 @@ struct fat_boot_fsinfo {
__le32 reserved2[4];
};
+struct fat_boot_bsx {
+ __u8 drive; /* drive number */
+ __u8 reserved1;
+ __u8 signature; /* extended boot signature */
+ __u8 vol_id[4]; /* volume ID */
+ __u8 vol_label[11]; /* volume label */
+ __u8 type[8]; /* file system type */
+};
+#define FAT16_BSX_OFFSET 36 /* offset of fat_boot_bsx in FAT12 and FAT16 */
+#define FAT32_BSX_OFFSET 64 /* offset of fat_boot_bsx in FAT32 */
+
struct msdos_dir_entry {
__u8 name[MSDOS_NAME];/* name and extension */
__u8 attr; /* attribute bits */
diff --git a/include/linux/nct1008.h b/include/linux/nct1008.h
new file mode 100644
index 000000000000..d2be5dba1563
--- /dev/null
+++ b/include/linux/nct1008.h
@@ -0,0 +1,98 @@
+/*
+ * include/linux/nct1008.h
+ *
+ * NCT1008, temperature monitoring device from ON Semiconductors
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_NCT1008_H
+#define _LINUX_NCT1008_H
+
+#include <linux/types.h>
+
+#include <mach/edp.h>
+
+#define MAX_ZONES 16
+
+struct nct1008_data;
+
+struct nct1008_platform_data {
+ bool supported_hwrev;
+ bool ext_range;
+ u8 conv_rate;
+ u8 offset;
+ u8 hysteresis;
+ s8 shutdown_ext_limit;
+ s8 shutdown_local_limit;
+ s8 throttling_ext_limit;
+ s8 thermal_zones[MAX_ZONES];
+ u8 thermal_zones_sz;
+ void (*alarm_fn)(bool raised);
+ void (*probe_callback)(struct nct1008_data *);
+};
+
+struct nct1008_data {
+ struct work_struct work;
+ struct i2c_client *client;
+ struct nct1008_platform_data plat_data;
+ struct mutex mutex;
+ struct dentry *dent;
+ u8 config;
+ s8 *limits;
+ u8 limits_sz;
+ void (*alarm_fn)(bool raised);
+ struct regulator *nct_reg;
+ long current_lo_limit;
+ long current_hi_limit;
+
+ void (*alert_func)(void *);
+ void *alert_data;
+};
+
+#ifdef CONFIG_SENSORS_NCT1008
+int nct1008_thermal_get_temp(struct nct1008_data *data, long *temp);
+int nct1008_thermal_get_temp_low(struct nct1008_data *data, long *temp);
+int nct1008_thermal_set_limits(struct nct1008_data *data,
+ long lo_limit_milli,
+ long hi_limit_milli);
+int nct1008_thermal_set_alert(struct nct1008_data *data,
+ void (*alert_func)(void *),
+ void *alert_data);
+int nct1008_thermal_set_shutdown_temp(struct nct1008_data *data,
+ long shutdown_temp);
+#else
+static inline int nct1008_thermal_get_temp(struct nct1008_data *data,
+ long *temp)
+{ return -EINVAL; }
+static inline int nct1008_thermal_get_temp_low(struct nct1008_data *data,
+ long *temp)
+{ return -EINVAL; }
+static inline int nct1008_thermal_set_limits(struct nct1008_data *data,
+ long lo_limit_milli,
+ long hi_limit_milli)
+{ return -EINVAL; }
+static inline int nct1008_thermal_set_alert(struct nct1008_data *data,
+ void (*alert_func)(void *),
+ void *alert_data)
+{ return -EINVAL; }
+static inline int nct1008_thermal_set_shutdown_temp(struct nct1008_data *data,
+ long shutdown_temp)
+{ return -EINVAL; }
+#endif
+
+#endif /* _LINUX_NCT1008_H */
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
new file mode 100644
index 000000000000..ca60fbdec2f3
--- /dev/null
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -0,0 +1,13 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID XT_OWNER_UID
+#define XT_QTAGUID_GID XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 000000000000..eadc6903314e
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,25 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+ XT_QUOTA_INVERT = 1 << 0,
+ XT_QUOTA_GROW = 1 << 1,
+ XT_QUOTA_PACKET = 1 << 2,
+ XT_QUOTA_NO_CHANGE = 1 << 3,
+ XT_QUOTA_MASK = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+ char name[15];
+ u_int8_t flags;
+
+ /* Comparison-invariant */
+ aligned_u64 quota;
+
+ /* Used internally by the kernel */
+ struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h
index 26d7217bd4f1..63594564831c 100644
--- a/include/linux/netfilter/xt_socket.h
+++ b/include/linux/netfilter/xt_socket.h
@@ -11,4 +11,10 @@ struct xt_socket_mtinfo1 {
__u8 flags;
};
+void xt_socket_put_sk(struct sock *sk);
+struct sock *xt_socket_get4_sk(const struct sk_buff *skb,
+ struct xt_action_param *par);
+struct sock *xt_socket_get6_sk(const struct sk_buff *skb,
+ struct xt_action_param *par);
+
#endif /* _XT_SOCKET_H */
diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h
index 7ab8521f2347..9285000dbb46 100644
--- a/include/linux/nfc/pn544.h
+++ b/include/linux/nfc/pn544.h
@@ -1,97 +1,33 @@
/*
- * Driver include for the PN544 NFC chip.
+ * Copyright (C) 2010 Trusted Logic S.A.
*
- * Copyright (C) Nokia Corporation
- *
- * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
- * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _PN544_H_
-#define _PN544_H_
-
-#include <linux/i2c.h>
-
-#define PN544_DRIVER_NAME "pn544"
-#define PN544_MAXWINDOW_SIZE 7
-#define PN544_WINDOW_SIZE 4
-#define PN544_RETRIES 10
-#define PN544_MAX_I2C_TRANSFER 0x0400
-#define PN544_MSG_MAX_SIZE 0x21 /* at normal HCI mode */
-
-/* ioctl */
-#define PN544_CHAR_BASE 'P'
-#define PN544_IOR(num, dtype) _IOR(PN544_CHAR_BASE, num, dtype)
-#define PN544_IOW(num, dtype) _IOW(PN544_CHAR_BASE, num, dtype)
-#define PN544_GET_FW_MODE PN544_IOW(1, unsigned int)
-#define PN544_SET_FW_MODE PN544_IOW(2, unsigned int)
-#define PN544_GET_DEBUG PN544_IOW(3, unsigned int)
-#define PN544_SET_DEBUG PN544_IOW(4, unsigned int)
-
-/* Timing restrictions (ms) */
-#define PN544_RESETVEN_TIME 30 /* 7 */
-#define PN544_PVDDVEN_TIME 0
-#define PN544_VBATVEN_TIME 0
-#define PN544_GPIO4VEN_TIME 0
-#define PN544_WAKEUP_ACK 5
-#define PN544_WAKEUP_GUARD (PN544_WAKEUP_ACK + 1)
-#define PN544_INACTIVITY_TIME 1000
-#define PN544_INTERFRAME_DELAY 200 /* us */
-#define PN544_BAUDRATE_CHANGE 150 /* us */
-
-/* Debug bits */
-#define PN544_DEBUG_BUF 0x01
-#define PN544_DEBUG_READ 0x02
-#define PN544_DEBUG_WRITE 0x04
-#define PN544_DEBUG_IRQ 0x08
-#define PN544_DEBUG_CALLS 0x10
-#define PN544_DEBUG_MODE 0x20
-
-/* Normal (HCI) mode */
-#define PN544_LLC_HCI_OVERHEAD 3 /* header + crc (to length) */
-#define PN544_LLC_MIN_SIZE (1 + PN544_LLC_HCI_OVERHEAD) /* length + */
-#define PN544_LLC_MAX_DATA (PN544_MSG_MAX_SIZE - 2)
-#define PN544_LLC_MAX_HCI_SIZE (PN544_LLC_MAX_DATA - 2)
+#define PN544_MAGIC 0xE9
-struct pn544_llc_packet {
- unsigned char length; /* of rest of packet */
- unsigned char header;
- unsigned char data[PN544_LLC_MAX_DATA]; /* includes crc-ccitt */
-};
-
-/* Firmware upgrade mode */
-#define PN544_FW_HEADER_SIZE 3
-/* max fw transfer is 1024bytes, but I2C limits it to 0xC0 */
-#define PN544_MAX_FW_DATA (PN544_MAX_I2C_TRANSFER - PN544_FW_HEADER_SIZE)
-
-struct pn544_fw_packet {
- unsigned char command; /* status in answer */
- unsigned char length[2]; /* big-endian order (msf) */
- unsigned char data[PN544_MAX_FW_DATA];
-};
+/*
+ * PN544 power control via ioctl
+ * PN544_SET_PWR(0): power off
+ * PN544_SET_PWR(1): power on
+ * PN544_SET_PWR(2): reset and power on with firmware download enabled
+ */
+#define PN544_SET_PWR _IOW(PN544_MAGIC, 0x01, unsigned int)
-#ifdef __KERNEL__
-/* board config */
-struct pn544_nfc_platform_data {
- int (*request_resources) (struct i2c_client *client);
- void (*free_resources) (void);
- void (*enable) (int fw);
- int (*test) (void);
- void (*disable) (void);
+struct pn544_i2c_platform_data {
+ unsigned int irq_gpio;
+ unsigned int ven_gpio;
+ unsigned int firm_gpio;
};
-#endif /* __KERNEL__ */
-
-#endif /* _PN544_H_ */
diff --git a/include/linux/nvhost.h b/include/linux/nvhost.h
new file mode 100644
index 000000000000..a1d211de1ef1
--- /dev/null
+++ b/include/linux/nvhost.h
@@ -0,0 +1,73 @@
+/*
+ * include/linux/nvhost.h
+ *
+ * Tegra graphics host driver
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_NVHOST_H
+#define __LINUX_NVHOST_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct nvhost_master;
+
+struct nvhost_device {
+ const char *name;
+ struct device dev;
+ int id;
+ u32 num_resources;
+ struct resource *resource;
+
+ struct nvhost_master *host;
+};
+
+extern int nvhost_device_register(struct nvhost_device *);
+extern void nvhost_device_unregister(struct nvhost_device *);
+
+extern struct bus_type nvhost_bus_type;
+
+struct nvhost_driver {
+ int (*probe)(struct nvhost_device *);
+ int (*remove)(struct nvhost_device *);
+ void (*shutdown)(struct nvhost_device *);
+ int (*suspend)(struct nvhost_device *, pm_message_t state);
+ int (*resume)(struct nvhost_device *);
+ struct device_driver driver;
+};
+
+extern int nvhost_driver_register(struct nvhost_driver *);
+extern void nvhost_driver_unregister(struct nvhost_driver *);
+extern struct resource *nvhost_get_resource(struct nvhost_device *,
+ unsigned int, unsigned int);
+extern int nvhost_get_irq(struct nvhost_device *, unsigned int);
+extern struct resource *nvhost_get_resource_byname(struct nvhost_device *,
+ unsigned int, const char *);
+extern int nvhost_get_irq_byname(struct nvhost_device *, const char *);
+
+#define to_nvhost_device(x) container_of((x), struct nvhost_device, dev)
+#define to_nvhost_driver(drv) (container_of((drv), struct nvhost_driver, \
+ driver))
+
+#define nvhost_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
+#define nvhost_set_drvdata(_dev, data) dev_set_drvdata(&(_dev)->dev, (data))
+
+int nvhost_bus_register(struct nvhost_master *host);
+
+#endif
diff --git a/include/linux/nvhost_ioctl.h b/include/linux/nvhost_ioctl.h
new file mode 100644
index 000000000000..a1fc0b7cd247
--- /dev/null
+++ b/include/linux/nvhost_ioctl.h
@@ -0,0 +1,204 @@
+/*
+ * include/linux/nvhost_ioctl.h
+ *
+ * Tegra graphics host driver
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_NVHOST_IOCTL_H
+#define __LINUX_NVHOST_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#define NVHOST_INVALID_SYNCPOINT 0xFFFFFFFF
+#define NVHOST_NO_TIMEOUT (-1)
+#define NVHOST_NO_CONTEXT 0x0
+#define NVHOST_IOCTL_MAGIC 'H'
+#define NVHOST_PRIORITY_LOW 50
+#define NVHOST_PRIORITY_MEDIUM 100
+#define NVHOST_PRIORITY_HIGH 150
+
+/* version 0 header (used with write() submit interface) */
+struct nvhost_submit_hdr {
+ __u32 syncpt_id;
+ __u32 syncpt_incrs;
+ __u32 num_cmdbufs;
+ __u32 num_relocs;
+};
+
+#define NVHOST_SUBMIT_VERSION_V0 0x0
+#define NVHOST_SUBMIT_VERSION_V1 0x1
+#define NVHOST_SUBMIT_VERSION_V2 0x2
+#define NVHOST_SUBMIT_VERSION_MAX_SUPPORTED NVHOST_SUBMIT_VERSION_V2
+
+/* version 1 header (used with ioctl() submit interface) */
+struct nvhost_submit_hdr_ext {
+ __u32 syncpt_id; /* version 0 fields */
+ __u32 syncpt_incrs;
+ __u32 num_cmdbufs;
+ __u32 num_relocs;
+ __u32 submit_version; /* version 1 fields */
+ __u32 num_waitchks;
+ __u32 waitchk_mask;
+ __u32 pad[5]; /* future expansion */
+};
+
+struct nvhost_cmdbuf {
+ __u32 mem;
+ __u32 offset;
+ __u32 words;
+};
+
+struct nvhost_reloc {
+ __u32 cmdbuf_mem;
+ __u32 cmdbuf_offset;
+ __u32 target;
+ __u32 target_offset;
+};
+
+struct nvhost_reloc_shift {
+ __u32 shift;
+};
+
+struct nvhost_waitchk {
+ __u32 mem;
+ __u32 offset;
+ __u32 syncpt_id;
+ __u32 thresh;
+};
+
+struct nvhost_get_param_args {
+ __u32 value;
+};
+
+struct nvhost_set_nvmap_fd_args {
+ __u32 fd;
+};
+
+struct nvhost_read_3d_reg_args {
+ __u32 offset;
+ __u32 value;
+};
+
+struct nvhost_clk_rate_args {
+ __u64 rate;
+};
+
+struct nvhost_set_timeout_args {
+ __u32 timeout;
+};
+
+struct nvhost_set_priority_args {
+ __u32 priority;
+};
+
+#define NVHOST_IOCTL_CHANNEL_FLUSH \
+ _IOR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS \
+ _IOR(NVHOST_IOCTL_MAGIC, 2, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_WAITBASES \
+ _IOR(NVHOST_IOCTL_MAGIC, 3, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES \
+ _IOR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD \
+ _IOW(NVHOST_IOCTL_MAGIC, 5, struct nvhost_set_nvmap_fd_args)
+#define NVHOST_IOCTL_CHANNEL_NULL_KICKOFF \
+ _IOR(NVHOST_IOCTL_MAGIC, 6, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SUBMIT_EXT \
+ _IOW(NVHOST_IOCTL_MAGIC, 7, struct nvhost_submit_hdr_ext)
+#define NVHOST_IOCTL_CHANNEL_READ_3D_REG \
+ _IOWR(NVHOST_IOCTL_MAGIC, 8, struct nvhost_read_3d_reg_args)
+#define NVHOST_IOCTL_CHANNEL_GET_CLK_RATE \
+ _IOR(NVHOST_IOCTL_MAGIC, 9, struct nvhost_clk_rate_args)
+#define NVHOST_IOCTL_CHANNEL_SET_CLK_RATE \
+ _IOW(NVHOST_IOCTL_MAGIC, 10, struct nvhost_clk_rate_args)
+#define NVHOST_IOCTL_CHANNEL_SET_TIMEOUT \
+ _IOW(NVHOST_IOCTL_MAGIC, 11, struct nvhost_set_timeout_args)
+#define NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT \
+ _IOR(NVHOST_IOCTL_MAGIC, 12, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SET_PRIORITY \
+ _IOW(NVHOST_IOCTL_MAGIC, 13, struct nvhost_set_priority_args)
+#define NVHOST_IOCTL_CHANNEL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CHANNEL_SET_PRIORITY)
+#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_submit_hdr_ext)
+
+struct nvhost_ctrl_syncpt_read_args {
+ __u32 id;
+ __u32 value;
+};
+
+struct nvhost_ctrl_syncpt_incr_args {
+ __u32 id;
+};
+
+struct nvhost_ctrl_syncpt_wait_args {
+ __u32 id;
+ __u32 thresh;
+ __s32 timeout;
+};
+
+struct nvhost_ctrl_syncpt_waitex_args {
+ __u32 id;
+ __u32 thresh;
+ __s32 timeout;
+ __u32 value;
+};
+
+struct nvhost_ctrl_module_mutex_args {
+ __u32 id;
+ __u32 lock;
+};
+
+struct nvhost_ctrl_module_regrdwr_args {
+ __u32 id;
+ __u32 num_offsets;
+ __u32 block_size;
+ __u32 *offsets;
+ __u32 *values;
+ __u32 write;
+};
+
+#define NVHOST_IOCTL_CTRL_SYNCPT_READ \
+ _IOWR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_ctrl_syncpt_read_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_INCR \
+ _IOW(NVHOST_IOCTL_MAGIC, 2, struct nvhost_ctrl_syncpt_incr_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_WAIT \
+ _IOW(NVHOST_IOCTL_MAGIC, 3, struct nvhost_ctrl_syncpt_wait_args)
+
+#define NVHOST_IOCTL_CTRL_MODULE_MUTEX \
+ _IOWR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_ctrl_module_mutex_args)
+#define NVHOST_IOCTL_CTRL_MODULE_REGRDWR \
+ _IOWR(NVHOST_IOCTL_MAGIC, 5, struct nvhost_ctrl_module_regrdwr_args)
+
+#define NVHOST_IOCTL_CTRL_SYNCPT_WAITEX \
+ _IOWR(NVHOST_IOCTL_MAGIC, 6, struct nvhost_ctrl_syncpt_waitex_args)
+
+#define NVHOST_IOCTL_CTRL_GET_VERSION \
+ _IOR(NVHOST_IOCTL_MAGIC, 7, struct nvhost_get_param_args)
+
+#define NVHOST_IOCTL_CTRL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CTRL_GET_VERSION)
+#define NVHOST_IOCTL_CTRL_MAX_ARG_SIZE \
+ sizeof(struct nvhost_ctrl_module_regrdwr_args)
+
+#endif
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
index c9e4d814ff77..2bb62bf296ac 100644
--- a/include/linux/pda_power.h
+++ b/include/linux/pda_power.h
@@ -35,6 +35,8 @@ struct pda_power_pdata {
unsigned int polling_interval; /* msecs, default is 2000 */
unsigned long ac_max_uA; /* current to draw when on AC */
+
+ bool use_otg_notifier;
};
#endif /* __PDA_POWER_H__ */
diff --git a/include/linux/platform_data/ina230.h b/include/linux/platform_data/ina230.h
new file mode 100644
index 000000000000..fb1ac28ff623
--- /dev/null
+++ b/include/linux/platform_data/ina230.h
@@ -0,0 +1,32 @@
+/*
+ * include/linux/platform_data/ina230.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _INA230_H
+#define _INA230_H
+
+#include <linux/types.h>
+
+struct ina230_platform_data {
+ const char *rail_name;
+ s32 current_threshold;
+ s32 resistor;
+ s32 min_cores_online;
+};
+
+#endif /* _INA230_H */
diff --git a/include/linux/platform_data/ram_console.h b/include/linux/platform_data/ram_console.h
new file mode 100644
index 000000000000..9f1125c11066
--- /dev/null
+++ b/include/linux/platform_data/ram_console.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
+#define _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
+
+struct ram_console_platform_data {
+ const char *bootinfo;
+};
+
+#endif /* _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ */
diff --git a/include/linux/platform_data/tegra_bpc_mgmt.h b/include/linux/platform_data/tegra_bpc_mgmt.h
new file mode 100644
index 000000000000..bdd4862d63a3
--- /dev/null
+++ b/include/linux/platform_data/tegra_bpc_mgmt.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEGRA_BPC_MGMT_H
+#define __TEGRA_BPC_MGMT_H
+#include <linux/cpumask.h>
+
+struct tegra_bpc_mgmt_platform_data {
+ int gpio_trigger;
+ struct cpumask affinity_mask;
+ int bpc_mgmt_timeout;
+};
+
+#endif /*__TEGRA_BPC_MGMT_H*/
diff --git a/include/linux/platform_data/tegra_nor.h b/include/linux/platform_data/tegra_nor.h
new file mode 100644
index 000000000000..cd8faff2f1cd
--- /dev/null
+++ b/include/linux/platform_data/tegra_nor.h
@@ -0,0 +1,37 @@
+/*
+ * include/linux/platform_data/tegra_nor.h
+ *
+ * Copyright (C) 2010 - 2011 NVIDIA Corporation.
+ *
+ * Author:
+ * Raghavendra V K <rvk@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_NOR_PDATA_H
+#define __MACH_TEGRA_NOR_PDATA_H
+
+#include <asm/mach/flash.h>
+
+struct tegra_nor_chip_parms {
+ struct {
+ uint32_t timing0;
+ uint32_t timing1;
+ } timing_default, timing_read;
+};
+
+struct tegra_nor_platform_data {
+ struct tegra_nor_chip_parms chip_parms;
+ struct flash_platform_data flash;
+};
+
+#endif /* __MACH_TEGRA_NOR_PDATA_H */
diff --git a/include/linux/platform_data/tegra_usb.h b/include/linux/platform_data/tegra_usb.h
index 6bca5b569acb..09f32de5c671 100644
--- a/include/linux/platform_data/tegra_usb.h
+++ b/include/linux/platform_data/tegra_usb.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010-2011 NVIDIA Corporation
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -21,11 +22,26 @@ enum tegra_usb_operating_modes {
TEGRA_USB_OTG,
};
+enum tegra_usb_phy_type {
+ TEGRA_USB_PHY_TYPE_UTMIP = 0,
+ TEGRA_USB_PHY_TYPE_LINK_ULPI = 1,
+ TEGRA_USB_PHY_TYPE_NULL_ULPI = 2,
+ TEGRA_USB_PHY_TYPE_HSIC = 3,
+ TEGRA_USB_PHY_TYPE_ICUSB = 4,
+};
+
struct tegra_ehci_platform_data {
enum tegra_usb_operating_modes operating_mode;
/* power down the phy on bus suspend */
int power_down_on_bus_suspend;
+ int hotplug;
void *phy_config;
+ enum tegra_usb_phy_type phy_type;
+};
+
+struct tegra_otg_platform_data {
+ struct platform_device *ehci_device;
+ struct tegra_ehci_platform_data *ehci_pdata;
};
#endif /* _TEGRA_USB_H_ */
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
index a7d87f911cab..eee55bc0b667 100644
--- a/include/linux/pm_qos_params.h
+++ b/include/linux/pm_qos_params.h
@@ -8,17 +8,24 @@
#include <linux/notifier.h>
#include <linux/miscdevice.h>
-#define PM_QOS_RESERVED 0
-#define PM_QOS_CPU_DMA_LATENCY 1
-#define PM_QOS_NETWORK_LATENCY 2
-#define PM_QOS_NETWORK_THROUGHPUT 3
+enum {
+ PM_QOS_RESERVED = 0,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_NETWORK_LATENCY,
+ PM_QOS_NETWORK_THROUGHPUT,
+ PM_QOS_MAX_ONLINE_CPUS,
+
+ /* insert new class ID */
+
+ PM_QOS_NUM_CLASSES,
+};
-#define PM_QOS_NUM_CLASSES 4
#define PM_QOS_DEFAULT_VALUE -1
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
+#define PM_QOS_MAX_ONLINE_CPUS_DEFAULT_VALUE LONG_MAX
struct pm_qos_request_list {
struct plist_node list;
diff --git a/include/linux/power/max8907c-charger.h b/include/linux/power/max8907c-charger.h
new file mode 100644
index 000000000000..2cebad768b0d
--- /dev/null
+++ b/include/linux/power/max8907c-charger.h
@@ -0,0 +1,64 @@
+/* linux/power/max8907c-charger.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MAX8907C_CHARGER_H
+#define __LINUX_MAX8907C_CHARGER_H
+
+/* interrupt */
+#define MAX8907C_VCHG_OVP (1 << 0)
+#define MAX8907C_VCHG_F (1 << 1)
+#define MAX8907C_VCHG_R (1 << 2)
+#define MAX8907C_THM_OK_R (1 << 8)
+#define MAX8907C_THM_OK_F (1 << 9)
+#define MAX8907C_MBATTLOW_F (1 << 10)
+#define MAX8907C_MBATTLOW_R (1 << 11)
+#define MAX8907C_CHG_RST (1 << 12)
+#define MAX8907C_CHG_DONE (1 << 13)
+#define MAX8907C_CHG_TOPOFF (1 << 14)
+#define MAX8907C_CHK_TMR_FAULT (1 << 15)
+
+enum max8907c_charger_topoff_threshold {
+ MAX8907C_TOPOFF_5PERCENT = 0x00,
+ MAX8907C_TOPOFF_10PERCENT = 0x01,
+ MAX8907C_TOPOFF_15PERCENT = 0x02,
+ MAX8907C_TOPOFF_20PERCENT = 0x03,
+};
+
+enum max8907c_charger_restart_hysteresis {
+ MAX8907C_RESTART_100MV = 0x00,
+ MAX8907C_RESTART_150MV = 0x01,
+ MAX8907C_RESTART_200MV = 0x02,
+ MAX8907C_RESTART_FLOAT = 0x03,
+};
+
+enum max8907c_fast_charging_current {
+ MAX8907C_FASTCHARGE_90MA = 0x00,
+ MAX8907C_FASTCHARGE_300MA = 0x01,
+ MAX8907C_FASTCHARGE_460MA = 0x02,
+ MAX8907C_FASTCHARGE_600MA = 0x03,
+ MAX8907C_FASTCHARGE_700MA = 0x04,
+ MAX8907C_FASTCHARGE_800MA = 0x05,
+ MAX8907C_FASTCHARGE_900MA = 0x06,
+ MAX8907C_FASTCHARGE_1000MA = 0x07,
+};
+
+enum max8907c_fast_charger_time {
+ MAX8907C_FCHARGE_TM_8H = 0x00,
+ MAX8907C_FCHARGE_TM_12H = 0x01,
+ MAX8907C_FCHARGE_TM_16H = 0x02,
+ MAX8907C_FCHARGE_TM_OFF = 0x03,
+};
+
+struct max8907c_charger_pdata {
+ int irq;
+ enum max8907c_charger_topoff_threshold topoff_threshold;
+ enum max8907c_charger_restart_hysteresis restart_hysteresis;
+ enum max8907c_charger_restart_hysteresis fast_charging_current;
+ enum max8907c_fast_charger_time fast_charger_time;
+};
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 204c18dfdc9e..2287c3214138 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -14,6 +14,7 @@
#define __LINUX_POWER_SUPPLY_H__
#include <linux/device.h>
+#include <linux/wakelock.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
@@ -163,6 +164,9 @@ struct power_supply {
/* private */
struct device *dev;
struct work_struct changed_work;
+ spinlock_t changed_lock;
+ bool changed;
+ struct wake_lock work_wake_lock;
#ifdef CONFIG_LEDS_TRIGGERS
struct led_trigger *charging_full_trig;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 60a65cd7e1a0..04e59205dec9 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -78,5 +78,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regmap_update_bits_lazy(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val);
#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index b47771aa5718..24aae2104c08 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -92,6 +92,10 @@
* FORCE_DISABLE Regulator forcibly shut down by software.
* VOLTAGE_CHANGE Regulator voltage changed.
* DISABLE Regulator was disabled.
+ * PRE_ENABLE Regulator is to be enabled
+ * POST_ENABLE Regulator was enabled
+ * OUT_PRECHANGE Regulator is enabled and its voltage is to be changed
+ * OUT_POSTCHANGE Regulator is enabled and its voltage was changed
*
* NOTE: These events can be OR'ed together when passed into handler.
*/
@@ -104,6 +108,10 @@
#define REGULATOR_EVENT_FORCE_DISABLE 0x20
#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_PRE_ENABLE 0x100
+#define REGULATOR_EVENT_POST_ENABLE 0x200
+#define REGULATOR_EVENT_OUT_PRECHANGE 0x400
+#define REGULATOR_EVENT_OUT_POSTCHANGE 0x800
struct regulator;
diff --git a/include/linux/regulator/fan53555-regulator.h b/include/linux/regulator/fan53555-regulator.h
new file mode 100644
index 000000000000..e2ed83bab4ed
--- /dev/null
+++ b/include/linux/regulator/fan53555-regulator.h
@@ -0,0 +1,63 @@
+/*
+ * include/linux/regulator/fan53555.h
+ *
+ * Interface for regulator driver for FairChild fan53555 Processor power supply
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __REGULATOR_FAN53555_H
+#define __REGULATOR_FAN53555_H
+
+#include <linux/regulator/machine.h>
+
+/*
+ * struct fan53555_regulator_platform_data - fan53555 regulator platform data.
+ * When VSEL pin is low, VOUT is set by the VSEL0 register.
+ * When VSEL pin is high, VOUT is set by the VSEL1 register
+ *
+ * @reg_init_data: The regulator init data.
+ * @vsel_id: Select the voltage id register.
+ * @vsel0_buck_en: Software Buck enable when EN=high & VSEL=low
+ * @vsel0_mode: Force PWM mode(1) or Allow auto-PFM mode(0) during light load
+ * @init_vsel0_min_uV: initial micro volts when EN=high & VSEL=low
+ * @init_vsel0_max_uV: initial micro volts when EN=high & VSEL=low
+ * @vsel1_buck_en: Software Buck enable when EN=high & VSEL=high
+ * @vsel1_mode: Force PWM mode(1) or Allow auto-PFM mode(0) during light load
+ * @init_vsel1_min_uV: initial micro volts when EN=high & VSEL=high
+ * @init_vsel1_max_uV: initial micro volts when EN=high & VSEL=high
+ * @output_discharge: when the regulator is disabled, VOUT discharges
+ * @slew_rate: slew rate of trasitioning from a low to high voltage
+ */
+
+struct fan53555_regulator_platform_data {
+ struct regulator_init_data reg_init_data;
+ int vsel_id;
+ unsigned vsel0_buck_en:1;
+ unsigned vsel0_mode:1;
+ int init_vsel0_min_uV;
+ int init_vsel0_max_uV;
+ unsigned vsel1_buck_en:1;
+ unsigned vsel1_mode:1;
+ int init_vsel1_min_uV;
+ int init_vsel1_max_uV;
+ unsigned output_discharge:1;
+ unsigned slew_rate:3;
+};
+
+#endif /* __REGULATOR_FAN53555_H */
diff --git a/include/linux/regulator/gpio-switch-regulator.h b/include/linux/regulator/gpio-switch-regulator.h
new file mode 100644
index 000000000000..68776b93ef00
--- /dev/null
+++ b/include/linux/regulator/gpio-switch-regulator.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _GPIO_SWITCH_REGULATOR_H
+#define _GPIO_SWITCH_REGULATOR_H
+
+#include <linux/regulator/machine.h>
+
+/*
+ * struct gpio_switch_regulator_subdev_data - Gpio switch regulator subdevice
+ * data.
+ *
+ * Subdevice data to register a gpio regulator switch device driver.
+ *
+ * @regulator_name: The name of regulator.
+ * @input_supply: Input supply name.
+ * @id: The id of the switch.
+ * @gpio_nr: Gpio nr which controls this switch.
+ * @active_low: true if making gpio low makes voltage output enable.
+ * @init_state: 1 if init_state should be active.
+ * @voltages: Possible voltages to set at output. The values are in millivolt.
+ * @n_voltages: Number of voltages.
+ * @num_consumer_supplies: Number of cosumer supplies.
+ * @consumer_supplies: List of consumer spllies.
+ */
+struct gpio_switch_regulator_subdev_data {
+ const char *regulator_name;
+ const char *input_supply;
+ int id;
+ int gpio_nr;
+ int active_low;
+ int pin_group;
+ int init_state;
+ int *voltages;
+ unsigned n_voltages;
+ struct regulator_consumer_supply *consumer_supplies;
+ int num_consumer_supplies;
+ struct regulation_constraints constraints;
+ int (*enable_rail)(struct gpio_switch_regulator_subdev_data *pdata);
+ int (*disable_rail)(struct gpio_switch_regulator_subdev_data *pdata);
+
+};
+
+/**
+ * gpio_switch_regulator_platform_data - platform data for gpio_switch_regulator
+ * @num_subdevs: number of regulators used
+ * @subdevs: pointer to regulators used
+ */
+struct gpio_switch_regulator_platform_data {
+ int num_subdevs;
+ struct gpio_switch_regulator_subdev_data **subdevs;
+};
+
+#endif
diff --git a/include/linux/regulator/max77663-regulator.h b/include/linux/regulator/max77663-regulator.h
new file mode 100644
index 000000000000..093ca42d4ca7
--- /dev/null
+++ b/include/linux/regulator/max77663-regulator.h
@@ -0,0 +1,125 @@
+/*
+ * include/linux/regulator/max77663-regulator.h
+ * Maxim LDO and Buck regulators driver
+ *
+ * Copyright 2011 Maxim Integrated Products, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#ifndef __LINUX_REGULATOR_MAX77663_REGULATOR_H__
+#define __LINUX_REGULATOR_MAX77663_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+
+#define max77663_rails(_name) "max77663_"#_name
+
+enum max77663_regulator_id {
+ MAX77663_REGULATOR_ID_SD0,
+ MAX77663_REGULATOR_ID_DVSSD0,
+ MAX77663_REGULATOR_ID_SD1,
+ MAX77663_REGULATOR_ID_DVSSD1,
+ MAX77663_REGULATOR_ID_SD2,
+ MAX77663_REGULATOR_ID_SD3,
+ MAX77663_REGULATOR_ID_SD4,
+ MAX77663_REGULATOR_ID_LDO0,
+ MAX77663_REGULATOR_ID_LDO1,
+ MAX77663_REGULATOR_ID_LDO2,
+ MAX77663_REGULATOR_ID_LDO3,
+ MAX77663_REGULATOR_ID_LDO4,
+ MAX77663_REGULATOR_ID_LDO5,
+ MAX77663_REGULATOR_ID_LDO6,
+ MAX77663_REGULATOR_ID_LDO7,
+ MAX77663_REGULATOR_ID_LDO8,
+ MAX77663_REGULATOR_ID_NR,
+};
+
+/* FPS Power Up/Down Period */
+enum max77663_regulator_fps_power_period {
+ FPS_POWER_PERIOD_0,
+ FPS_POWER_PERIOD_1,
+ FPS_POWER_PERIOD_2,
+ FPS_POWER_PERIOD_3,
+ FPS_POWER_PERIOD_4,
+ FPS_POWER_PERIOD_5,
+ FPS_POWER_PERIOD_6,
+ FPS_POWER_PERIOD_7,
+ FPS_POWER_PERIOD_DEF = -1,
+};
+
+/* FPS Time Period */
+enum max77663_regulator_fps_time_period {
+ FPS_TIME_PERIOD_20US,
+ FPS_TIME_PERIOD_40US,
+ FPS_TIME_PERIOD_80US,
+ FPS_TIME_PERIOD_160US,
+ FPS_TIME_PERIOD_320US,
+ FPS_TIME_PERIOD_640US,
+ FPS_TIME_PERIOD_1280US,
+ FPS_TIME_PERIOD_2560US,
+ FPS_TIME_PERIOD_DEF = -1,
+};
+
+/* FPS Enable Source */
+enum max77663_regulator_fps_en_src {
+ FPS_EN_SRC_EN0,
+ FPS_EN_SRC_EN1,
+ FPS_EN_SRC_SW,
+ FPS_EN_SRC_RSVD,
+};
+
+/* FPS Source */
+enum max77663_regulator_fps_src {
+ FPS_SRC_0,
+ FPS_SRC_1,
+ FPS_SRC_2,
+ FPS_SRC_NONE,
+ FPS_SRC_DEF = -1,
+};
+
+/*
+ * Flags
+ */
+/* SD0 is controlled by EN2 */
+#define EN2_CTRL_SD0 0x01
+
+/* SD Slew Rate */
+#define SD_SLEW_RATE_SLOWEST 0x02 /* 13.75mV/us */
+#define SD_SLEW_RATE_SLOW 0x04 /* 27.50mV/us */
+#define SD_SLEW_RATE_FAST 0x08 /* 55.00mV/us */
+#define SD_SLEW_RATE_FASTEST 0x10 /* 100.00mV/us */
+#define SD_SLEW_RATE_MASK 0x1E
+
+/* SD Forced PWM Mode */
+#define SD_FORCED_PWM_MODE 0x20
+
+/* SD Failling Slew Rate Active-Discharge Mode */
+#define SD_FSRADE_DISABLE 0x40
+
+struct max77663_regulator_fps_cfg {
+ enum max77663_regulator_fps_src src;
+ enum max77663_regulator_fps_en_src en_src;
+ enum max77663_regulator_fps_time_period time_period;
+};
+
+struct max77663_regulator_platform_data {
+ struct regulator_init_data init_data;
+ bool init_apply;
+ bool init_enable;
+ int init_uV;
+ enum max77663_regulator_fps_src fps_src;
+ enum max77663_regulator_fps_power_period fps_pu_period;
+ enum max77663_regulator_fps_power_period fps_pd_period;
+
+ int num_fps_cfgs;
+ struct max77663_regulator_fps_cfg *fps_cfgs;
+
+ unsigned int flags;
+};
+
+#endif /* __LINUX_REGULATOR_MAX77663_REGULATOR_H__ */
diff --git a/include/linux/regulator/max8907c-regulator.h b/include/linux/regulator/max8907c-regulator.h
new file mode 100644
index 000000000000..ddc5f0a60339
--- /dev/null
+++ b/include/linux/regulator/max8907c-regulator.h
@@ -0,0 +1,46 @@
+/* linux/regulator/max8907c-regulator.h
+ *
+ * Functions to access MAX8907C power management chip.
+ *
+ * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_REGULATOR_MAX8907C_H
+#define __LINUX_REGULATOR_MAX8907C_H
+
+/* IDs */
+#define MAX8907C_SD1 0
+#define MAX8907C_SD2 1
+#define MAX8907C_SD3 2
+#define MAX8907C_LDO1 3
+#define MAX8907C_LDO2 4
+#define MAX8907C_LDO3 5
+#define MAX8907C_LDO4 6
+#define MAX8907C_LDO5 7
+#define MAX8907C_LDO6 8
+#define MAX8907C_LDO7 9
+#define MAX8907C_LDO8 10
+#define MAX8907C_LDO9 11
+#define MAX8907C_LDO10 12
+#define MAX8907C_LDO11 13
+#define MAX8907C_LDO12 14
+#define MAX8907C_LDO13 15
+#define MAX8907C_LDO14 16
+#define MAX8907C_LDO15 17
+#define MAX8907C_LDO16 18
+#define MAX8907C_LDO17 19
+#define MAX8907C_LDO18 20
+#define MAX8907C_LDO19 21
+#define MAX8907C_LDO20 22
+#define MAX8907C_OUT5V 23
+#define MAX8907C_OUT33V 24
+#define MAX8907C_BBAT 25
+#define MAX8907C_SDBY 26
+#define MAX8907C_VRTC 27
+#define MAX8907C_WLED 27
+
+#endif
diff --git a/include/linux/regulator/ricoh583-regulator.h b/include/linux/regulator/ricoh583-regulator.h
new file mode 100644
index 000000000000..39fdb9e56e05
--- /dev/null
+++ b/include/linux/regulator/ricoh583-regulator.h
@@ -0,0 +1,63 @@
+/*
+ * linux/regulator/ricoh583-regulator.h
+ *
+ * Interface for regulator driver for RICOH583 power management chip.
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * Copyright (C) 2011 RICOH COMPANY,LTD
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_REGULATOR_RICOH583_H
+#define __LINUX_REGULATOR_RICOH583_H
+
+#include <linux/regulator/machine.h>
+
+
+#define ricoh583_rails(_name) "RICOH583_"#_name
+
+/* RICHOH Regulator IDs */
+enum regulator_id {
+ RICOH583_ID_DC0,
+ RICOH583_ID_DC1,
+ RICOH583_ID_DC2,
+ RICOH583_ID_DC3,
+ RICOH583_ID_LDO0,
+ RICOH583_ID_LDO1,
+ RICOH583_ID_LDO2,
+ RICOH583_ID_LDO3,
+ RICOH583_ID_LDO4,
+ RICOH583_ID_LDO5,
+ RICOH583_ID_LDO6,
+ RICOH583_ID_LDO7,
+ RICOH583_ID_LDO8,
+ RICOH583_ID_LDO9,
+};
+
+struct ricoh583_regulator_platform_data {
+ struct regulator_init_data regulator;
+ int init_uV;
+ unsigned init_enable:1;
+ unsigned init_apply:1;
+ int deepsleep_uV;
+ int deepsleep_slots;
+ unsigned long ext_pwr_req;
+ unsigned long flags;
+};
+
+#endif
diff --git a/include/linux/regulator/tps6236x-regulator.h b/include/linux/regulator/tps6236x-regulator.h
new file mode 100644
index 000000000000..7e8a37529665
--- /dev/null
+++ b/include/linux/regulator/tps6236x-regulator.h
@@ -0,0 +1,54 @@
+/*
+ * include/linux/regulator/tps6236x.h
+ *
+ * Interface for regulator driver for TI TPS6236x Processor core supply
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __REGULATOR_TPS6236X_H
+#define __REGULATOR_TPS6236X_H
+
+#include <linux/regulator/machine.h>
+
+/*
+ * struct tps6236x_regulator_platform_data - tps6236x regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @internal_pd_enable: internal pull down enable or not.
+ * @vsel: Select the voltage id register.
+ * @init_uV: initial micro volts which need to be set.
+ * @init_enable: Enable or do not enable the rails during initialization.
+ * @init_apply: Init parameter applied or not.
+ * @is_force_pwm: Enable force pwm or not. If not then PFM mode configuration
+ * will be used.
+ * @enable_discharge: Enable discharge the output capacitor via a typ. 300Ohm
+ * path
+ */
+
+struct tps6236x_regulator_platform_data {
+ struct regulator_init_data reg_init_data;
+ int internal_pd_enable;
+ int vsel;
+ int init_uV;
+ unsigned init_apply:1;
+ bool is_force_pwm;
+ bool enable_discharge;
+};
+
+#endif /* __REGULATOR_TPS6236X_H */
diff --git a/include/linux/regulator/tps6591x-regulator.h b/include/linux/regulator/tps6591x-regulator.h
new file mode 100644
index 000000000000..7f7f647906d0
--- /dev/null
+++ b/include/linux/regulator/tps6591x-regulator.h
@@ -0,0 +1,77 @@
+/*
+ * include/linux/regulator/tps6591x-regulator.h
+ *
+ * Interface for regulator driver for TI TPS6591x PMIC family
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __REGULATOR_TPS6591X_H
+#define __REGULATOR_TPS6591X_H
+
+#include <linux/regulator/machine.h>
+
+#define tps6591x_rails(_name) "tps6591x_"#_name
+
+enum {
+ TPS6591X_ID_VIO,
+ TPS6591X_ID_VDD_1,
+ TPS6591X_ID_VDD_2,
+ TPS6591X_ID_VDDCTRL,
+ TPS6591X_ID_LDO_1,
+ TPS6591X_ID_LDO_2,
+ TPS6591X_ID_LDO_3,
+ TPS6591X_ID_LDO_4,
+ TPS6591X_ID_LDO_5,
+ TPS6591X_ID_LDO_6,
+ TPS6591X_ID_LDO_7,
+ TPS6591X_ID_LDO_8,
+};
+
+enum tps6591x_ext_control {
+ EXT_CTRL_NONE = 0x0,
+ EXT_CTRL_EN1,
+ EXT_CTRL_EN2,
+ EXT_CTRL_SLEEP_OFF,
+};
+
+enum tps6591x_config_flags {
+ LDO_LOW_POWER_ON_SUSPEND = 0x1,
+};
+
+/*
+ * struct tps6591x_regulator_platform_data - tps6591x regulator platform data.
+ *
+ * @regulator: The regulator init data.
+ * @init_uV: initial micro volts which need to be set.
+ * @init_enable: Enable or do not enable the rails during initialization.
+ * @init_apply: Init parameter applied or not.
+ * @slew_rate_uV_per_us: Slew rate microvolt per microsec.
+ */
+
+struct tps6591x_regulator_platform_data {
+ struct regulator_init_data regulator;
+ int init_uV;
+ unsigned init_enable:1;
+ unsigned init_apply:1;
+ enum tps6591x_ext_control ectrl;
+ int slew_rate_uV_per_us;
+ unsigned int flags;
+};
+
+#endif /* __REGULATOR_TPS6591X_H */
diff --git a/include/linux/regulator/tps80031-regulator.h b/include/linux/regulator/tps80031-regulator.h
new file mode 100644
index 000000000000..1670d147fc3e
--- /dev/null
+++ b/include/linux/regulator/tps80031-regulator.h
@@ -0,0 +1,89 @@
+/*
+ * include/linux/regulator/tps80031-regulator.h
+ *
+ * Interface for regulator driver for TI TPS80031
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __REGULATOR_TPS80031_H
+#define __REGULATOR_TPS80031_H
+
+#include <linux/regulator/machine.h>
+
+#define tps80031_rails(_name) "tps80031_"#_name
+
+enum {
+ TPS80031_ID_VIO,
+ TPS80031_ID_SMPS1,
+ TPS80031_ID_SMPS2,
+ TPS80031_ID_SMPS3,
+ TPS80031_ID_SMPS4,
+ TPS80031_ID_VANA,
+ TPS80031_ID_LDO1,
+ TPS80031_ID_LDO2,
+ TPS80031_ID_LDO3,
+ TPS80031_ID_LDO4,
+ TPS80031_ID_LDO5,
+ TPS80031_ID_LDO6,
+ TPS80031_ID_LDO7,
+ TPS80031_ID_LDOLN,
+ TPS80031_ID_LDOUSB,
+ TPS80031_ID_VBUS,
+ TPS80031_ID_CHARGER,
+};
+
+
+enum {
+ /* USBLDO input selection */
+ USBLDO_INPUT_VSYS = 0x00000001,
+ USBLDO_INPUT_PMID = 0x00000002,
+
+ /* LDO3 output mode */
+ LDO3_OUTPUT_VIB = 0x00000004,
+
+ /* VBUS configuration */
+ VBUS_DISCHRG_EN_PDN = 0x00000004,
+ VBUS_SW_ONLY = 0x00000008,
+ VBUS_SW_N_ID = 0x00000010,
+};
+
+/*
+ * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
+ *
+ * @regulator: The regulator init data.
+ * @init_uV: initial micro volts which need to be set.
+ * @init_enable: Enable or do not enable the rails during initialization.
+ * @init_apply: Init parameter applied or not.
+ * @ext_ctrl_flag: External control flag for sleep/power request control.
+ * @flags: Configuration flag to configure the rails. It should be ORed of
+ * above enums.
+ * @delay_us: Delay in microsecond after setting the desired voltage.
+ */
+
+struct tps80031_regulator_platform_data {
+ struct regulator_init_data regulator;
+ int init_uV;
+ unsigned init_enable:1;
+ unsigned init_apply:1;
+ unsigned int ext_ctrl_flag;
+ unsigned int flags;
+ int delay_us;
+};
+
+#endif /* __REGULATOR_TPS80031_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 41d0237fd449..5bb4dd2e4c59 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1753,6 +1753,9 @@ static inline void put_task_struct(struct task_struct *t)
extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern int task_free_register(struct notifier_block *n);
+extern int task_free_unregister(struct notifier_block *n);
+
/*
* Per process flags
*/
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a5c31146a337..0d239897b2f4 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -245,6 +245,7 @@ struct uart_ops {
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
int (*set_wake)(struct uart_port *, unsigned int state);
+ void (*wake_peer)(struct uart_port *);
/*
* Return a string describing the type of the port
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h
index c75bda37c18e..543e361b15c1 100644
--- a/include/linux/serial_reg.h
+++ b/include/linux/serial_reg.h
@@ -2,10 +2,10 @@
* include/linux/serial_reg.h
*
* Copyright (C) 1992, 1994 by Theodore Ts'o.
- *
- * Redistribution of this file is permitted under the terms of the GNU
+ *
+ * Redistribution of this file is permitted under the terms of the GNU
* Public License (GPL)
- *
+ *
* These are the UART port assignments, expressed as offsets from the base
* register. These assignments should hold for any serial port based on
* a 8250, 16450, or 16550(A).
@@ -86,7 +86,7 @@
#define UART_LCR 3 /* Out: Line Control Register */
/*
- * Note: if the word length is 5 bits (UART_LCR_WLEN5), then setting
+ * Note: if the word length is 5 bits (UART_LCR_WLEN5), then setting
* UART_LCR_STOP will select 1.5 stop bits, not 2 stop bits.
*/
#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
@@ -110,8 +110,10 @@
#define UART_MCR 4 /* Out: Modem Control Register */
#define UART_MCR_CLKSEL 0x80 /* Divide clock by 4 (TI16C752, EFR[4]=1) */
#define UART_MCR_TCRTLR 0x40 /* Access TCR/TLR (TI16C752, EFR[4]=1) */
+#define UART_MCR_HW_RTS 0x40 /* Enable hw control of RTS (Tegra UART) */
#define UART_MCR_XONANY 0x20 /* Enable Xon Any (TI16C752, EFR[4]=1) */
#define UART_MCR_AFE 0x20 /* Enable auto-RTS/CTS (TI16C550C/TI16C750) */
+#define UART_MCR_HW_CTS 0x20 /* Enable HW based CTS control (Tegra UART)*/
#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */
#define UART_MCR_OUT2 0x08 /* Out2 complement */
#define UART_MCR_OUT1 0x04 /* Out1 complement */
diff --git a/include/linux/sockios.h b/include/linux/sockios.h
index 7997a506ad41..f7ffe36db03c 100644
--- a/include/linux/sockios.h
+++ b/include/linux/sockios.h
@@ -65,6 +65,7 @@
#define SIOCDIFADDR 0x8936 /* delete PA address */
#define SIOCSIFHWBROADCAST 0x8937 /* set hardware broadcast addr */
#define SIOCGIFCOUNT 0x8938 /* get number of devices */
+#define SIOCKILLADDR 0x8939 /* kill sockets with this local addr */
#define SIOCGIFBR 0x8940 /* Bridging support */
#define SIOCSIFBR 0x8941 /* Set bridging options */
diff --git a/include/linux/spi-tegra.h b/include/linux/spi-tegra.h
new file mode 100644
index 000000000000..380965005516
--- /dev/null
+++ b/include/linux/spi-tegra.h
@@ -0,0 +1,50 @@
+/*
+ * include/linux/spi-tegra.c
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_SPI_TEGRA_H
+#define _LINUX_SPI_TEGRA_H
+
+#include <linux/clk.h>
+
+struct spi_clk_parent {
+ const char *name;
+ struct clk *parent_clk;
+ unsigned long fixed_clk_rate;
+};
+
+struct tegra_spi_platform_data {
+ bool is_dma_based;
+ int max_dma_buffer;
+ bool is_clkon_always;
+ unsigned int max_rate;
+ struct spi_clk_parent *parent_clk_list;
+ int parent_clk_count;
+};
+
+/* Controller data from device to pass some info like
+ * hw based chip select can be used or not and if yes
+ * then CS hold and setup time. */
+struct tegra_spi_device_controller_data {
+ bool is_hw_based_cs;
+ int cs_setup_clk_count;
+ int cs_hold_clk_count;
+};
+
+#endif /* _LINUX_SPI_TEGRA_H */
diff --git a/include/linux/switch.h b/include/linux/switch.h
new file mode 100644
index 000000000000..3e4c748e343a
--- /dev/null
+++ b/include/linux/switch.h
@@ -0,0 +1,53 @@
+/*
+ * Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+ const char *name;
+ struct device *dev;
+ int index;
+ int state;
+
+ ssize_t (*print_name)(struct switch_dev *sdev, char *buf);
+ ssize_t (*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+ const char *name;
+ unsigned gpio;
+
+ /* if NULL, switch_dev.name will be printed */
+ const char *name_on;
+ const char *name_off;
+ /* if NULL, "0" or "1" will be printed */
+ const char *state_on;
+ const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+ return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/include/linux/synaptics_i2c_rmi.h b/include/linux/synaptics_i2c_rmi.h
new file mode 100644
index 000000000000..5539cc520779
--- /dev/null
+++ b/include/linux/synaptics_i2c_rmi.h
@@ -0,0 +1,55 @@
+/*
+ * include/linux/synaptics_i2c_rmi.h - platform data structure for f75375s sensor
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNAPTICS_I2C_RMI_H
+#define _LINUX_SYNAPTICS_I2C_RMI_H
+
+#define SYNAPTICS_I2C_RMI_NAME "synaptics-rmi-ts"
+
+enum {
+ SYNAPTICS_FLIP_X = 1UL << 0,
+ SYNAPTICS_FLIP_Y = 1UL << 1,
+ SYNAPTICS_SWAP_XY = 1UL << 2,
+ SYNAPTICS_SNAP_TO_INACTIVE_EDGE = 1UL << 3,
+};
+
+struct synaptics_i2c_rmi_platform_data {
+ uint32_t version; /* Use this entry for panels with */
+ /* (major << 8 | minor) version or above. */
+ /* If non-zero another array entry follows */
+ int (*power)(int on); /* Only valid in first array entry */
+ uint32_t flags;
+ unsigned long irqflags;
+ uint32_t inactive_left; /* 0x10000 = screen width */
+ uint32_t inactive_right; /* 0x10000 = screen width */
+ uint32_t inactive_top; /* 0x10000 = screen height */
+ uint32_t inactive_bottom; /* 0x10000 = screen height */
+ uint32_t snap_left_on; /* 0x10000 = screen width */
+ uint32_t snap_left_off; /* 0x10000 = screen width */
+ uint32_t snap_right_on; /* 0x10000 = screen width */
+ uint32_t snap_right_off; /* 0x10000 = screen width */
+ uint32_t snap_top_on; /* 0x10000 = screen height */
+ uint32_t snap_top_off; /* 0x10000 = screen height */
+ uint32_t snap_bottom_on; /* 0x10000 = screen height */
+ uint32_t snap_bottom_off; /* 0x10000 = screen height */
+ uint32_t fuzz_x; /* 0x10000 = screen width */
+ uint32_t fuzz_y; /* 0x10000 = screen height */
+ int fuzz_p;
+ int fuzz_w;
+ int8_t sensitivity_adjust;
+};
+
+#endif /* _LINUX_SYNAPTICS_I2C_RMI_H */
diff --git a/include/linux/tegra_audio.h b/include/linux/tegra_audio.h
new file mode 100644
index 000000000000..516b5a89c4e6
--- /dev/null
+++ b/include/linux/tegra_audio.h
@@ -0,0 +1,78 @@
+/* include/linux/tegra_audio.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _TEGRA_AUDIO_H
+#define _TEGRA_AUDIO_H
+
+#include <linux/ioctl.h>
+
+#define TEGRA_AUDIO_MAGIC 't'
+
+#define TEGRA_AUDIO_IN_START _IO(TEGRA_AUDIO_MAGIC, 0)
+#define TEGRA_AUDIO_IN_STOP _IO(TEGRA_AUDIO_MAGIC, 1)
+
+struct tegra_audio_in_config {
+ int rate;
+ int stereo;
+};
+
+struct dam_srate {
+ unsigned int in_sample_rate;
+ unsigned int out_sample_rate;
+ unsigned int audio_bits;
+ unsigned int client_bits;
+ unsigned int audio_channels;
+ unsigned int client_channels;
+ unsigned int apbif_chan;
+};
+
+#define TEGRA_AUDIO_IN_SET_CONFIG _IOW(TEGRA_AUDIO_MAGIC, 2, \
+ const struct tegra_audio_in_config *)
+#define TEGRA_AUDIO_IN_GET_CONFIG _IOR(TEGRA_AUDIO_MAGIC, 3, \
+ struct tegra_audio_in_config *)
+
+#define TEGRA_AUDIO_IN_SET_NUM_BUFS _IOW(TEGRA_AUDIO_MAGIC, 4, \
+ const unsigned int *)
+#define TEGRA_AUDIO_IN_GET_NUM_BUFS _IOW(TEGRA_AUDIO_MAGIC, 5, \
+ unsigned int *)
+#define TEGRA_AUDIO_OUT_SET_NUM_BUFS _IOW(TEGRA_AUDIO_MAGIC, 6, \
+ const unsigned int *)
+#define TEGRA_AUDIO_OUT_GET_NUM_BUFS _IOW(TEGRA_AUDIO_MAGIC, 7, \
+ unsigned int *)
+
+#define TEGRA_AUDIO_OUT_FLUSH _IO(TEGRA_AUDIO_MAGIC, 10)
+
+#define TEGRA_AUDIO_BIT_FORMAT_DEFAULT 0
+#define TEGRA_AUDIO_BIT_FORMAT_DSP 1
+#define TEGRA_AUDIO_SET_BIT_FORMAT _IOW(TEGRA_AUDIO_MAGIC, 11, \
+ const unsigned int *)
+#define TEGRA_AUDIO_GET_BIT_FORMAT _IOR(TEGRA_AUDIO_MAGIC, 12, \
+ unsigned int *)
+
+#define DAM_SRC_START _IOW(TEGRA_AUDIO_MAGIC, 13, struct dam_srate *)
+#define DAM_SRC_STOP _IO(TEGRA_AUDIO_MAGIC, 14)
+#define DAM_MIXING_START _IOW(TEGRA_AUDIO_MAGIC, 15, struct dam_srate *)
+#define DAM_MIXING_STOP _IO(TEGRA_AUDIO_MAGIC, 16)
+#define DAM_SET_MIXING_FLAG _IO(TEGRA_AUDIO_MAGIC, 17)
+
+#define I2S_START _IOW(TEGRA_AUDIO_MAGIC, 15, struct i2s_pcm_format *)
+#define I2S_STOP _IOW(TEGRA_AUDIO_MAGIC, 16, struct i2s_pcm_format *)
+#define I2S_LOOPBACK _IOW(TEGRA_AUDIO_MAGIC, 17, unsigned int *)
+#define I2S_MODE_I2S _IOW(TEGRA_AUDIO_MAGIC, 18, unsigned int *)
+
+#endif/*_CPCAP_AUDIO_H*/
diff --git a/include/linux/tegra_avp.h b/include/linux/tegra_avp.h
new file mode 100644
index 000000000000..9dc92f821368
--- /dev/null
+++ b/include/linux/tegra_avp.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_AVP_H
+#define __LINUX_TEGRA_AVP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define TEGRA_AVP_LIB_MAX_NAME 32
+#define TEGRA_AVP_LIB_MAX_ARGS 220 /* DO NOT CHANGE THIS! */
+
+struct tegra_avp_lib {
+ char name[TEGRA_AVP_LIB_MAX_NAME];
+ void __user *args;
+ size_t args_len;
+ int greedy;
+ unsigned long handle;
+};
+
+struct tegra_avp_platform_data {
+ unsigned long emc_clk_rate;
+};
+#define TEGRA_AVP_IOCTL_MAGIC 'r'
+
+#define TEGRA_AVP_IOCTL_LOAD_LIB _IOWR(TEGRA_AVP_IOCTL_MAGIC, 0x40, struct tegra_avp_lib)
+#define TEGRA_AVP_IOCTL_UNLOAD_LIB _IOW(TEGRA_AVP_IOCTL_MAGIC, 0x41, unsigned long)
+
+#define TEGRA_AVP_IOCTL_MIN_NR _IOC_NR(TEGRA_AVP_IOCTL_LOAD_LIB)
+#define TEGRA_AVP_IOCTL_MAX_NR _IOC_NR(TEGRA_AVP_IOCTL_UNLOAD_LIB)
+
+#endif
diff --git a/include/linux/tegra_caif.h b/include/linux/tegra_caif.h
new file mode 100644
index 000000000000..fed67499defc
--- /dev/null
+++ b/include/linux/tegra_caif.h
@@ -0,0 +1,34 @@
+/* include/linux/tegra_caif.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _TEGRA_CAIF_H_
+#define _TEGRA_CAIF_H_
+
+/* The GPIO details needed by the rainbow caif */
+struct tegra_caif_platform_data {
+ int reset;
+ int power;
+ int awr;
+ int cwr;
+ int spi_int;
+ int spi_ss;
+};
+
+#endif /* _TEGRA_CAIF_H_ */
+
diff --git a/include/linux/tegra_ion.h b/include/linux/tegra_ion.h
new file mode 100644
index 000000000000..43d3d5a1cfdc
--- /dev/null
+++ b/include/linux/tegra_ion.h
@@ -0,0 +1,98 @@
+/*
+ * include/linux/tegra_ion.h
+ *
+ * Copyright (C) 2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ion.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#ifndef _LINUX_TEGRA_ION_H
+#define _LINUX_TEGRA_ION_H
+
+struct tegra_ion_id_data {
+ struct ion_handle *handle;
+ unsigned long id;
+ size_t size;
+};
+
+struct tegra_ion_pin_data {
+ struct ion_handle **handles; /* array of handles to pin/unpin */
+ unsigned long *addr; /* array pf addresses to return */
+ unsigned long count; /* number of entries in handles */
+};
+
+/* Cache operations. */
+enum {
+ TEGRA_ION_CACHE_OP_WB = 0,
+ TEGRA_ION_CACHE_OP_INV,
+ TEGRA_ION_CACHE_OP_WB_INV,
+};
+
+struct tegra_ion_cache_maint_data {
+ unsigned long addr;
+ struct ion_handle *handle;
+ size_t len;
+ unsigned int op;
+};
+
+struct tegra_ion_rw_data {
+ unsigned long addr; /* user pointer*/
+ struct ion_handle *handle;
+ unsigned int offset; /* offset into handle mem */
+ unsigned int elem_size; /* individual atome size */
+ unsigned int mem_stride; /*delta in bytes between atoms in handle mem*/
+ unsigned int user_stride; /* delta in bytes between atoms in user */
+ unsigned int count; /* number of atoms to copy */
+};
+
+struct tegra_ion_get_params_data {
+ struct ion_handle *handle;
+ size_t size;
+ unsigned int align;
+ unsigned int heap;
+ unsigned long addr;
+};
+
+/* Custom Ioctl's. */
+enum {
+ TEGRA_ION_ALLOC_FROM_ID = 0,
+ TEGRA_ION_GET_ID,
+ TEGRA_ION_PIN,
+ TEGRA_ION_UNPIN,
+ TEGRA_ION_CACHE_MAINT,
+ TEGRA_ION_READ,
+ TEGRA_ION_WRITE,
+ TEGRA_ION_GET_PARAM,
+};
+
+/* List of heaps in the system. */
+enum {
+ TEGRA_ION_HEAP_CARVEOUT = 0,
+ TEGRA_ION_HEAP_IRAM,
+ TEGRA_ION_HEAP_VPR,
+ TEGRA_ION_HEAP_IOMMU
+};
+
+/* additional heap types used only on tegra */
+enum {
+ TEGRA_ION_HEAP_TYPE_IOMMU = ION_HEAP_TYPE_CUSTOM + 1,
+};
+
+#define TEGRA_ION_HEAP_IOMMU_MASK (1 << TEGRA_ION_HEAP_TYPE_IOMMU)
+
+#endif /* _LINUX_TEGRA_ION_H */
diff --git a/include/linux/tegra_mediaserver.h b/include/linux/tegra_mediaserver.h
new file mode 100644
index 000000000000..f28473baf63e
--- /dev/null
+++ b/include/linux/tegra_mediaserver.h
@@ -0,0 +1,112 @@
+/* include/linux/tegra_mediaserver.h
+ *
+ * Media Server driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef _TEGRA_MEDIASERVER_H
+#define _TEGRA_MEDIASERVER_H
+
+#include <linux/ioctl.h>
+
+#define TEGRA_MEDIASERVER_MAGIC 'm'
+#define TEGRA_MEDIASERVER_IOCTL_ALLOC \
+ _IOWR(TEGRA_MEDIASERVER_MAGIC, 0x40, \
+ union tegra_mediaserver_alloc_info)
+
+enum tegra_mediaserver_resource_type {
+ TEGRA_MEDIASERVER_RESOURCE_BLOCK = 0,
+ TEGRA_MEDIASERVER_RESOURCE_IRAM,
+};
+
+enum tegra_mediaserver_block_type {
+ TEGRA_MEDIASERVER_BLOCK_AUDDEC = 0,
+ TEGRA_MEDIASERVER_BLOCK_VIDDEC,
+};
+
+enum tegra_mediaserver_iram_type {
+ TEGRA_MEDIASERVER_IRAM_SCRATCH = 0,
+ TEGRA_MEDIASERVER_IRAM_SHARED,
+};
+
+
+struct tegra_mediaserver_block_info {
+ int nvmm_block_handle;
+ int avp_block_handle;
+ int avp_block_library_handle;
+ int service_handle;
+ int service_library_handle;
+};
+
+struct tegra_mediaserver_iram_info {
+ unsigned long rm_handle;
+ int physical_address;
+};
+
+union tegra_mediaserver_alloc_info {
+ struct {
+ int tegra_mediaserver_resource_type;
+
+ union {
+ struct tegra_mediaserver_block_info block;
+
+ struct {
+ int tegra_mediaserver_iram_type;
+ int alignment;
+ size_t size;
+ } iram;
+ } u;
+ } in;
+
+ struct {
+ union {
+ struct {
+ int count;
+ } block;
+
+ struct tegra_mediaserver_iram_info iram;
+ } u;
+ } out;
+};
+
+
+#define TEGRA_MEDIASERVER_IOCTL_FREE \
+ _IOR(TEGRA_MEDIASERVER_MAGIC, 0x41, union tegra_mediaserver_free_info)
+
+union tegra_mediaserver_free_info {
+ struct {
+ int tegra_mediaserver_resource_type;
+
+ union {
+ int nvmm_block_handle;
+ int iram_rm_handle;
+ } u;
+ } in;
+};
+
+
+#define TEGRA_MEDIASERVER_IOCTL_UPDATE_BLOCK_INFO \
+ _IOR(TEGRA_MEDIASERVER_MAGIC, 0x45, \
+ union tegra_mediaserver_update_block_info)
+
+union tegra_mediaserver_update_block_info {
+ struct tegra_mediaserver_block_info in;
+};
+#endif
+
diff --git a/include/linux/tegra_nvavp.h b/include/linux/tegra_nvavp.h
new file mode 100644
index 000000000000..32dc4c62b4bd
--- /dev/null
+++ b/include/linux/tegra_nvavp.h
@@ -0,0 +1,84 @@
+/*
+ * include/linux/tegra_nvavp.h
+ *
+ * Copyright (C) 2011 NVIDIA Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __LINUX_TEGRA_NVAVP_H
+#define __LINUX_TEGRA_NVAVP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define NVAVP_MAX_RELOCATION_COUNT 64
+
+/* avp submit flags */
+#define NVAVP_FLAG_NONE 0x00000000
+#define NVAVP_UCODE_EXT 0x00000001 /*use external ucode provided */
+
+enum {
+ NVAVP_MODULE_ID_AVP = 2,
+ NVAVP_MODULE_ID_VCP = 3,
+ NVAVP_MODULE_ID_BSEA = 27,
+ NVAVP_MODULE_ID_VDE = 28,
+ NVAVP_MODULE_ID_MPE = 29,
+ NVAVP_MODULE_ID_EMC = 75,
+};
+
+struct nvavp_cmdbuf {
+ __u32 mem;
+ __u32 offset;
+ __u32 words;
+};
+
+struct nvavp_reloc {
+ __u32 cmdbuf_mem;
+ __u32 cmdbuf_offset;
+ __u32 target;
+ __u32 target_offset;
+};
+
+struct nvavp_syncpt {
+ __u32 id;
+ __u32 value;
+};
+
+struct nvavp_pushbuffer_submit_hdr {
+ struct nvavp_cmdbuf cmdbuf;
+ struct nvavp_reloc *relocs;
+ __u32 num_relocs;
+ struct nvavp_syncpt *syncpt;
+ __u32 flags;
+};
+
+struct nvavp_set_nvmap_fd_args {
+ __u32 fd;
+};
+
+struct nvavp_clock_args {
+ __u32 id;
+ __u32 rate;
+};
+
+#define NVAVP_IOCTL_MAGIC 'n'
+
+#define NVAVP_IOCTL_SET_NVMAP_FD _IOW(NVAVP_IOCTL_MAGIC, 0x60, \
+ struct nvavp_set_nvmap_fd_args)
+#define NVAVP_IOCTL_GET_SYNCPOINT_ID _IOR(NVAVP_IOCTL_MAGIC, 0x61, \
+ __u32)
+#define NVAVP_IOCTL_PUSH_BUFFER_SUBMIT _IOWR(NVAVP_IOCTL_MAGIC, 0x63, \
+ struct nvavp_pushbuffer_submit_hdr)
+#define NVAVP_IOCTL_SET_CLOCK _IOWR(NVAVP_IOCTL_MAGIC, 0x64, \
+ struct nvavp_clock_args)
+#define NVAVP_IOCTL_GET_CLOCK _IOR(NVAVP_IOCTL_MAGIC, 0x65, \
+ struct nvavp_clock_args)
+
+
+#define NVAVP_IOCTL_MIN_NR _IOC_NR(NVAVP_IOCTL_SET_NVMAP_FD)
+#define NVAVP_IOCTL_MAX_NR _IOC_NR(NVAVP_IOCTL_GET_CLOCK)
+
+#endif /* __LINUX_TEGRA_NVAVP_H */
diff --git a/include/linux/tegra_overlay.h b/include/linux/tegra_overlay.h
new file mode 100644
index 000000000000..2a6025afdad7
--- /dev/null
+++ b/include/linux/tegra_overlay.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 NVIDIA Corporation
+ * Author: Dan Willemsen <dwillemsen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_OVERLAY_H
+#define __LINUX_TEGRA_OVERLAY_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <video/tegrafb.h>
+
+#define TEGRA_FB_WIN_BLEND_NONE 0
+#define TEGRA_FB_WIN_BLEND_PREMULT 1
+#define TEGRA_FB_WIN_BLEND_COVERAGE 2
+
+#define TEGRA_FB_WIN_FLAG_INVERT_H (1 << 0)
+#define TEGRA_FB_WIN_FLAG_INVERT_V (1 << 1)
+#define TEGRA_FB_WIN_FLAG_TILED (1 << 2)
+
+/* set index to -1 to ignore window data */
+struct tegra_overlay_windowattr {
+ __s32 index;
+ __u32 buff_id;
+ __u32 blend;
+ __u32 offset;
+ __u32 offset_u;
+ __u32 offset_v;
+ __u32 stride;
+ __u32 stride_uv;
+ __u32 pixformat;
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+ __u32 out_x;
+ __u32 out_y;
+ __u32 out_w;
+ __u32 out_h;
+ __u32 z;
+ __u32 pre_syncpt_id;
+ __u32 pre_syncpt_val;
+ __u32 hfilter;
+ __u32 vfilter;
+ __u32 do_not_use__tiled; /* compatibility */
+ __u32 flags;
+};
+
+#define TEGRA_OVERLAY_FLIP_FLAG_BLEND_REORDER (1 << 0)
+#define TEGRA_FB_FLIP_N_WINDOWS 3
+
+struct tegra_overlay_flip_args {
+ struct tegra_overlay_windowattr win[TEGRA_FB_FLIP_N_WINDOWS];
+ __u32 post_syncpt_id;
+ __u32 post_syncpt_val;
+ __u32 flags;
+};
+
+#define TEGRA_OVERLAY_IOCTL_MAGIC 'O'
+
+#define TEGRA_OVERLAY_IOCTL_OPEN_WINDOW _IOWR(TEGRA_OVERLAY_IOCTL_MAGIC, 0x40, __u32)
+#define TEGRA_OVERLAY_IOCTL_CLOSE_WINDOW _IOW(TEGRA_OVERLAY_IOCTL_MAGIC, 0x41, __u32)
+#define TEGRA_OVERLAY_IOCTL_FLIP _IOW(TEGRA_OVERLAY_IOCTL_MAGIC, 0x42, struct tegra_overlay_flip_args)
+#define TEGRA_OVERLAY_IOCTL_SET_NVMAP_FD _IOW(TEGRA_OVERLAY_IOCTL_MAGIC, 0x43, __u32)
+
+#define TEGRA_OVERLAY_IOCTL_MIN_NR _IOC_NR(TEGRA_OVERLAY_IOCTL_OPEN_WINDOW)
+#define TEGRA_OVERLAY_IOCTL_MAX_NR _IOC_NR(TEGRA_OVERLAY_IOCTL_SET_NVMAP_FD)
+
+#endif
diff --git a/include/linux/tegra_pwm_bl.h b/include/linux/tegra_pwm_bl.h
new file mode 100644
index 000000000000..71a81f2eda88
--- /dev/null
+++ b/include/linux/tegra_pwm_bl.h
@@ -0,0 +1,31 @@
+/* Tegra PWM backlight data *
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ * Author: Renuka Apte <rapte@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef TEGRA_PWM_BL_H
+#define TEGRA_PWM_BL_H
+
+#include <linux/backlight.h>
+
+struct platform_tegra_pwm_backlight_data {
+ int which_dc;
+ int which_pwm;
+ void (*switch_to_sfio)(int);
+ int gpio_conf_to_sfio;
+ unsigned int dft_brightness;
+ unsigned int max_brightness;
+ unsigned int period;
+ unsigned int clk_div;
+ unsigned int clk_select;
+ int (*notify)(struct device *dev, int brightness);
+ int (*check_fb)(struct device *dev, struct fb_info *info);
+};
+
+#endif /* TERGA_PWM_BL_H */
diff --git a/include/linux/tegra_rpc.h b/include/linux/tegra_rpc.h
new file mode 100644
index 000000000000..16e6367cf569
--- /dev/null
+++ b/include/linux/tegra_rpc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original code from NVIDIA, and a partial rewrite by:
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_RPC_H
+#define __LINUX_TEGRA_RPC_H
+
+#define TEGRA_RPC_MAX_MSG_LEN 256
+
+/* Note: the actual size of the name in the protocol message is 16 bytes,
+ * but that is because the name there is not NUL terminated, only NUL
+ * padded. */
+#define TEGRA_RPC_MAX_NAME_LEN 17
+
+struct tegra_rpc_port_desc {
+ char name[TEGRA_RPC_MAX_NAME_LEN];
+ int notify_fd; /* fd representing a trpc_sema to signal when a
+ * message has been received */
+};
+
+#define TEGRA_RPC_IOCTL_MAGIC 'r'
+
+#define TEGRA_RPC_IOCTL_PORT_CREATE _IOW(TEGRA_RPC_IOCTL_MAGIC, 0x20, struct tegra_rpc_port_desc)
+#define TEGRA_RPC_IOCTL_PORT_GET_NAME _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x21, char *)
+#define TEGRA_RPC_IOCTL_PORT_CONNECT _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x22, long)
+#define TEGRA_RPC_IOCTL_PORT_LISTEN _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x23, long)
+
+#define TEGRA_RPC_IOCTL_MIN_NR _IOC_NR(TEGRA_RPC_IOCTL_PORT_CREATE)
+#define TEGRA_RPC_IOCTL_MAX_NR _IOC_NR(TEGRA_RPC_IOCTL_PORT_LISTEN)
+
+#endif
diff --git a/include/linux/tegra_sema.h b/include/linux/tegra_sema.h
new file mode 100644
index 000000000000..7b423b6cb5c4
--- /dev/null
+++ b/include/linux/tegra_sema.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_SEMA_H
+#define __LINUX_TEGRA_SEMA_H
+
+/* this shares the magic with the tegra RPC and AVP drivers.
+ * See include/linux/tegra_avp.h and include/linux/tegra_rpc.h */
+#define TEGRA_SEMA_IOCTL_MAGIC 'r'
+
+/* If IOCTL_WAIT is interrupted by a signal and the timeout was not -1,
+ * then the value pointed to by the argument will be updated with the amount
+ * of time remaining for the wait. */
+#define TEGRA_SEMA_IOCTL_WAIT _IOW(TEGRA_SEMA_IOCTL_MAGIC, 0x30, long *)
+#define TEGRA_SEMA_IOCTL_SIGNAL _IO(TEGRA_SEMA_IOCTL_MAGIC, 0x31)
+
+#define TEGRA_SEMA_IOCTL_MIN_NR _IOC_NR(TEGRA_SEMA_IOCTL_WAIT)
+#define TEGRA_SEMA_IOCTL_MAX_NR _IOC_NR(TEGRA_SEMA_IOCTL_SIGNAL)
+
+#endif
diff --git a/include/linux/tegra_spdif.h b/include/linux/tegra_spdif.h
new file mode 100644
index 000000000000..8d7f6457a0d1
--- /dev/null
+++ b/include/linux/tegra_spdif.h
@@ -0,0 +1,56 @@
+/* include/linux/tegra_spdif.h
+ *
+ * SPDIF audio driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _TEGRA_SPDIF_H
+#define _TEGRA_SPDIF_H
+
+#include <linux/ioctl.h>
+
+#define TEGRA_SPDIF_MAGIC 's'
+
+
+
+struct tegra_audio_buf_config {
+ unsigned size; /* order */
+ unsigned threshold; /* order */
+ unsigned chunk; /* order */
+};
+
+
+
+#define TEGRA_AUDIO_OUT_SET_BUF_CONFIG _IOW(TEGRA_SPDIF_MAGIC, 0, \
+ const struct tegra_audio_buf_config *)
+#define TEGRA_AUDIO_OUT_GET_BUF_CONFIG _IOR(TEGRA_SPDIF_MAGIC, 1, \
+ struct tegra_audio_buf_config *)
+
+#define TEGRA_AUDIO_OUT_GET_ERROR_COUNT _IOR(TEGRA_SPDIF_MAGIC, 2, \
+ unsigned *)
+
+struct tegra_audio_out_preload {
+ void *data;
+ size_t len;
+ size_t len_written;
+};
+
+#define TEGRA_AUDIO_OUT_PRELOAD_FIFO _IOWR(TEGRA_SPDIF_MAGIC, 3, \
+ struct tegra_audio_out_preload *)
+
+#endif/*_TEGRA_SPDIF_H*/
diff --git a/include/linux/tegra_uart.h b/include/linux/tegra_uart.h
new file mode 100644
index 000000000000..3d35e217cbca
--- /dev/null
+++ b/include/linux/tegra_uart.h
@@ -0,0 +1,43 @@
+/* include/linux/tegra_uart.h
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _TEGRA_UART_H_
+#define _TEGRA_UART_H_
+
+#include <linux/clk.h>
+
+struct uart_clk_parent {
+ const char *name;
+ struct clk *parent_clk;
+ unsigned long fixed_clk_rate;
+};
+
+struct tegra_uart_platform_data {
+ void (*wake_peer)(struct uart_port *);
+ struct uart_clk_parent *parent_clk_list;
+ int parent_clk_count;
+};
+
+int tegra_uart_is_tx_empty(struct uart_port *);
+void tegra_uart_request_clock_on(struct uart_port *);
+void tegra_uart_set_mctrl(struct uart_port *, unsigned int);
+void tegra_uart_request_clock_off(struct uart_port *uport);
+
+#endif /* _TEGRA_UART_H_ */
+
diff --git a/include/linux/tps80031-charger.h b/include/linux/tps80031-charger.h
new file mode 100644
index 000000000000..26c228edc306
--- /dev/null
+++ b/include/linux/tps80031-charger.h
@@ -0,0 +1,62 @@
+/*
+ * include/linux/tps80031-charger.h
+ *
+ * Battery charger driver interface for TI TPS80031 PMIC
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_TPS80031_CHARGER_H
+#define __LINUX_TPS80031_CHARGER_H
+
+#include <linux/regulator/machine.h>
+
+enum charging_states {
+ charging_state_idle,
+ charging_state_charging_in_progress,
+ charging_state_charging_completed,
+ charging_state_charging_stopped,
+};
+
+/**
+ * Callback type definition which is called when any state changed in the
+ * charging.
+ */
+typedef void (*charging_callback_t)(enum charging_states state, void *args);
+
+struct tps80031_charger_platform_data {
+ int regulator_id;
+ int max_charge_volt_mV;
+ int max_charge_current_mA;
+ int charging_term_current_mA;
+ int refresh_time;
+ int irq_base;
+ int watch_time_sec;
+ struct regulator_consumer_supply *consumer_supplies;
+ int num_consumer_supplies;
+ int (*board_init)(void *board_data);
+ void *board_data;
+};
+
+/**
+ * Register the callback function for the client. This callback gets called
+ * when there is any change in the chanrging states.
+ */
+extern int register_charging_state_callback(charging_callback_t cb, void *args);
+
+#endif /*__LINUX_TPS80031_CHARGER_H */
diff --git a/include/linux/tracedump.h b/include/linux/tracedump.h
new file mode 100644
index 000000000000..9e86946e354e
--- /dev/null
+++ b/include/linux/tracedump.h
@@ -0,0 +1,43 @@
+/*
+ * include/linux/tracedump.h
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LINUX_KERNEL_TRACEDUMP_H
+#define _LINUX_KERNEL_TRACEDUMP_H
+
+/* tracedump
+ * This module provides additional mechanisms for retreiving tracing data.
+ * For details on configurations, parameters and usage, see tracedump.txt.
+ */
+
+#define TD_NO_PRINT 0
+#define TD_PRINT_CONSOLE 1
+#define TD_PRINT_USER 2
+
+/* Dump the tracer to console */
+int tracedump_dump(size_t max_out);
+
+/* Dumping functions */
+int tracedump_init(void);
+ssize_t tracedump_all(int print_to);
+ssize_t tracedump_next(size_t max_out, int print_to);
+int tracedump_reset(void);
+int tracedump_deinit(void);
+
+#endif /* _LINUX_KERNEL_TRACEDUMP_H */
diff --git a/include/linux/tracelevel.h b/include/linux/tracelevel.h
new file mode 100644
index 000000000000..ac3351c6ed85
--- /dev/null
+++ b/include/linux/tracelevel.h
@@ -0,0 +1,42 @@
+/*
+ * include/linux/tracelevel.c
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _TRACELEVEL_H
+#define _TRACELEVEL_H
+
+/* tracelevel allows a subsystem author to add priorities to
+ * trace_events. For usage details, see tracelevel.txt.
+ */
+
+#define TRACELEVEL_ERR 3
+#define TRACELEVEL_WARN 2
+#define TRACELEVEL_INFO 1
+#define TRACELEVEL_DEBUG 0
+
+#define TRACELEVEL_MAX TRACELEVEL_ERR
+#define TRACELEVEL_DEFAULT TRACELEVEL_ERR
+
+int __tracelevel_register(char *name, unsigned int level);
+int tracelevel_set_level(int level);
+
+#define tracelevel_register(name, level) \
+ __tracelevel_register(#name, level)
+
+#endif /* _TRACELEVEL_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index d553ea4fe094..969daca21076 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -295,7 +295,7 @@ struct tty_struct {
void *driver_data;
struct list_head tty_files;
-#define N_TTY_BUF_SIZE 4096
+#define N_TTY_BUF_SIZE 32768
/*
* The following is data for the N_TTY line discipline. For
diff --git a/include/linux/uid_stat.h b/include/linux/uid_stat.h
new file mode 100644
index 000000000000..6bd6c4e52d17
--- /dev/null
+++ b/include/linux/uid_stat.h
@@ -0,0 +1,29 @@
+/* include/linux/uid_stat.h
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __uid_stat_h
+#define __uid_stat_h
+
+/* Contains definitions for resource tracking per uid. */
+
+#ifdef CONFIG_UID_STAT
+int uid_stat_tcp_snd(uid_t uid, int size);
+int uid_stat_tcp_rcv(uid_t uid, int size);
+#else
+#define uid_stat_tcp_snd(uid, size) do {} while (0);
+#define uid_stat_tcp_rcv(uid, size) do {} while (0);
+#endif
+
+#endif /* _LINUX_UID_STAT_H */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index a316fba73518..6938a8608cf1 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -242,6 +242,9 @@ int usb_add_config(struct usb_composite_dev *,
struct usb_configuration *,
int (*)(struct usb_configuration *));
+int usb_remove_config(struct usb_composite_dev *,
+ struct usb_configuration *);
+
/**
* struct usb_composite_driver - groups configurations into a gadget
* @name: For diagnostics, identifies the driver.
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644
index 000000000000..5b2dcf9728e1
--- /dev/null
+++ b/include/linux/usb/f_accessory.h
@@ -0,0 +1,83 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER 0
+#define ACCESSORY_STRING_MODEL 1
+#define ACCESSORY_STRING_DESCRIPTION 2
+#define ACCESSORY_STRING_VERSION 3
+#define ACCESSORY_STRING_URI 4
+#define ACCESSORY_STRING_SERIAL 5
+
+/* Control request for retrieving device's protocol version (currently 1)
+ *
+ * requestType: USB_DIR_IN | USB_TYPE_VENDOR
+ * request: ACCESSORY_GET_PROTOCOL
+ * value: 0
+ * index: 0
+ * data version number (16 bits little endian)
+ */
+#define ACCESSORY_GET_PROTOCOL 51
+
+/* Control request for host to send a string to the device
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SEND_STRING
+ * value: 0
+ * index: string ID
+ * data zero terminated UTF8 string
+ *
+ * The device can later retrieve these strings via the
+ * ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING 52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_START
+ * value: 0
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_START 53
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED _IO('M', 7)
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644
index 000000000000..7422b17c6eb1
--- /dev/null
+++ b/include/linux/usb/f_mtp.h
@@ -0,0 +1,75 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+
+#ifdef __KERNEL__
+
+struct mtp_data_header {
+ /* length of packet, including this header */
+ uint32_t length;
+ /* container type (2 for data packet) */
+ uint16_t type;
+ /* MTP command code */
+ uint16_t command;
+ /* MTP transaction ID */
+ uint32_t transaction_id;
+};
+
+#endif /* __KERNEL__ */
+
+struct mtp_file_range {
+ /* file descriptor for file to transfer */
+ int fd;
+ /* offset in file for start of transfer */
+ loff_t offset;
+ /* number of bytes to transfer */
+ int64_t length;
+ /* MTP command ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint16_t command;
+ /* MTP transaction ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint32_t transaction_id;
+};
+
+struct mtp_event {
+ /* size of the event */
+ size_t length;
+ /* event data to send */
+ void *data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, struct mtp_file_range)
+
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/usb/otg_id.h b/include/linux/usb/otg_id.h
new file mode 100644
index 000000000000..f9f5189a73b7
--- /dev/null
+++ b/include/linux/usb/otg_id.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_OTG_ID_H
+#define __LINUX_USB_OTG_ID_H
+
+#include <linux/notifier.h>
+#include <linux/plist.h>
+
+/**
+ * otg_id_notifier_block
+ *
+ * @priority: Order the notifications will be called in. Higher numbers
+ * get called first.
+ * @detect: Called during otg_id_notify. Return OTG_ID_HANDLED if the USB cable
+ * has been identified
+ * @proxy_wait: Called during otg_id_notify if a previous handler returns
+ * OTG_ID_PROXY_WAIT. This should wait on ID change then call otg_id_notify.
+ * This is used when a handler knows what's connected but can't detect
+ * the change itself.
+ * @cancel: Called after detect has returned OTG_ID_HANDLED to ask it to
+ * release detection resources to allow a new identification to occur.
+ */
+
+struct otg_id_notifier_block {
+ int priority;
+ int (*detect)(struct otg_id_notifier_block *otg_id_nb);
+ int (*proxy_wait)(struct otg_id_notifier_block *otg_id_nb);
+ void (*cancel)(struct otg_id_notifier_block *otg_id_nb);
+ struct plist_node p;
+};
+
+#define OTG_ID_PROXY_WAIT 2
+#define OTG_ID_HANDLED 1
+#define OTG_ID_UNHANDLED 0
+
+int otg_id_register_notifier(struct otg_id_notifier_block *otg_id_nb);
+void otg_id_unregister_notifier(struct otg_id_notifier_block *otg_id_nb);
+
+void otg_id_notify(void);
+int otg_id_suspend(void);
+void otg_id_resume(void);
+
+#endif /* __LINUX_USB_OTG_ID_H */
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100644
index 000000000000..a096d24ada1d
--- /dev/null
+++ b/include/linux/wakelock.h
@@ -0,0 +1,91 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/list.h>
+#include <linux/ktime.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend. If the type is WAKE_LOCK_IDLE, low power
+ * states that cause large interrupt latencies or that disable a set of
+ * interrupts will not entered from idle until the wake_locks are released.
+ */
+
+enum {
+ WAKE_LOCK_SUSPEND, /* Prevent suspend */
+ WAKE_LOCK_IDLE, /* Prevent low power idle */
+ WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+#ifdef CONFIG_HAS_WAKELOCK
+ struct list_head link;
+ int flags;
+ const char *name;
+ unsigned long expires;
+#ifdef CONFIG_WAKELOCK_STAT
+ struct {
+ int count;
+ int expire_count;
+ int wakeup_count;
+ ktime_t total_time;
+ ktime_t prevent_suspend_time;
+ ktime_t max_time;
+ ktime_t last_time;
+ } stat;
+#endif
+#endif
+};
+
+#ifdef CONFIG_HAS_WAKELOCK
+
+void wake_lock_init(struct wake_lock *lock, int type, const char *name);
+void wake_lock_destroy(struct wake_lock *lock);
+void wake_lock(struct wake_lock *lock);
+void wake_lock_timeout(struct wake_lock *lock, long timeout);
+void wake_unlock(struct wake_lock *lock);
+
+/* wake_lock_active returns a non-zero value if the wake_lock is currently
+ * locked. If the wake_lock has a timeout, it does not check the timeout
+ * but if the timeout had aready been checked it will return 0.
+ */
+int wake_lock_active(struct wake_lock *lock);
+
+/* has_wake_lock returns 0 if no wake locks of the specified type are active,
+ * and non-zero if one or more wake locks are held. Specifically it returns
+ * -1 if one or more wake locks with no timeout are active or the
+ * number of jiffies until all active wake locks time out.
+ */
+long has_wake_lock(int type);
+
+#else
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+ const char *name) {}
+static inline void wake_lock_destroy(struct wake_lock *lock) {}
+static inline void wake_lock(struct wake_lock *lock) {}
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout) {}
+static inline void wake_unlock(struct wake_lock *lock) {}
+
+static inline int wake_lock_active(struct wake_lock *lock) { return 0; }
+static inline long has_wake_lock(int type) { return 0; }
+
+#endif
+
+#endif
+
diff --git a/include/linux/wifi_tiwlan.h b/include/linux/wifi_tiwlan.h
new file mode 100644
index 000000000000..f07e0679fb82
--- /dev/null
+++ b/include/linux/wifi_tiwlan.h
@@ -0,0 +1,27 @@
+/* include/linux/wifi_tiwlan.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WIFI_TIWLAN_H_
+#define _LINUX_WIFI_TIWLAN_H_
+
+#include <linux/wlan_plat.h>
+
+#define WMPA_NUMBER_OF_SECTIONS 3
+#define WMPA_NUMBER_OF_BUFFERS 160
+#define WMPA_SECTION_HEADER 24
+#define WMPA_SECTION_SIZE_0 (WMPA_NUMBER_OF_BUFFERS * 64)
+#define WMPA_SECTION_SIZE_1 (WMPA_NUMBER_OF_BUFFERS * 256)
+#define WMPA_SECTION_SIZE_2 (WMPA_NUMBER_OF_BUFFERS * 2048)
+
+#endif
diff --git a/include/linux/wl127x-rfkill.h b/include/linux/wl127x-rfkill.h
new file mode 100644
index 000000000000..9057ec63d5d3
--- /dev/null
+++ b/include/linux/wl127x-rfkill.h
@@ -0,0 +1,35 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _LINUX_WL127X_RFKILL_H
+#define _LINUX_WL127X_RFKILL_H
+
+#include <linux/rfkill.h>
+
+struct wl127x_rfkill_platform_data {
+ int nshutdown_gpio;
+
+ struct rfkill *rfkill; /* for driver only */
+};
+
+#endif
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
new file mode 100644
index 000000000000..40ec3482d1ef
--- /dev/null
+++ b/include/linux/wlan_plat.h
@@ -0,0 +1,27 @@
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+struct wifi_platform_data {
+ int (*set_power)(int val);
+ int (*set_reset)(int val);
+ int (*set_carddetect)(int val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+ int (*get_mac_addr)(unsigned char *buf);
+ void *(*get_country_code)(char *ccode);
+};
+
+#endif
diff --git a/include/media/ad5820.h b/include/media/ad5820.h
new file mode 100644
index 000000000000..c3e710113402
--- /dev/null
+++ b/include/media/ad5820.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * Contributors:
+ * Sachin Nikam <snikam@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __AD5820_H__
+#define __AD5820_H__
+
+#include <linux/ioctl.h> /* For IOCTL macros */
+
+#define AD5820_IOCTL_GET_CONFIG _IOR('o', 1, struct ad5820_config)
+#define AD5820_IOCTL_SET_POSITION _IOW('o', 2, u32)
+
+struct ad5820_config {
+ __u32 settle_time;
+ __u32 actuator_range;
+ __u32 pos_low;
+ __u32 pos_high;
+ float focal_length;
+ float fnumber;
+ float max_aperture;
+};
+
+#endif /* __AD5820_H__ */
+
diff --git a/include/media/ar0832_main.h b/include/media/ar0832_main.h
new file mode 100644
index 000000000000..f5e3713b46fb
--- /dev/null
+++ b/include/media/ar0832_main.h
@@ -0,0 +1,106 @@
+/*
+* ar0832_main.h
+*
+* Copyright (c) 2011, NVIDIA, All Rights Reserved.
+*
+* This file is licensed under the terms of the GNU General Public License
+* version 2. This program is licensed "as is" without any warranty of any
+* kind, whether express or implied.
+*/
+
+#ifndef __AR0832_MAIN_H__
+#define __AR0832_MAIN_H__
+
+#include <linux/ioctl.h> /* For IOCTL macros */
+
+#define AR0832_IOCTL_SET_MODE _IOW('o', 0x01, struct ar0832_mode)
+#define AR0832_IOCTL_SET_FRAME_LENGTH _IOW('o', 0x02, __u32)
+#define AR0832_IOCTL_SET_COARSE_TIME _IOW('o', 0x03, __u32)
+#define AR0832_IOCTL_SET_GAIN _IOW('o', 0x04, __u16)
+#define AR0832_IOCTL_GET_STATUS _IOR('o', 0x05, __u8)
+#define AR0832_IOCTL_GET_OTP _IOR('o', 0x06, struct ar0832_otp_data)
+#define AR0832_IOCTL_TEST_PATTERN _IOW('o', 0x07, enum ar0832_test_pattern)
+#define AR0832_IOCTL_SET_POWER_ON _IOW('o', 0x08, struct ar0832_mode)
+#define AR0832_IOCTL_SET_SENSOR_REGION _IOW('o', 0x09, struct ar0832_stereo_region)
+
+#define AR0832_FOCUSER_IOCTL_GET_CONFIG _IOR('o', 0x10, struct ar0832_focuser_config)
+#define AR0832_FOCUSER_IOCTL_SET_POSITION _IOW('o', 0x11, __u32)
+
+#define AR0832_IOCTL_GET_SENSOR_ID _IOR('o', 0x12, __u16)
+
+#define AR0832_SENSOR_ID_8141 0x1006
+#define AR0832_SENSOR_ID_8140 0x3006
+
+enum ar0832_test_pattern {
+ TEST_PATTERN_NONE,
+ TEST_PATTERN_COLORBARS,
+ TEST_PATTERN_CHECKERBOARD
+};
+
+struct ar0832_otp_data {
+ /* Only the first 5 bytes are actually used. */
+ __u8 sensor_serial_num[6];
+ __u8 part_num[8];
+ __u8 lens_id[1];
+ __u8 manufacture_id[2];
+ __u8 factory_id[2];
+ __u8 manufacture_date[9];
+ __u8 manufacture_line[2];
+
+ __u32 module_serial_num;
+ __u8 focuser_liftoff[2];
+ __u8 focuser_macro[2];
+ __u8 reserved1[12];
+ __u8 shutter_cal[16];
+ __u8 reserved2[183];
+
+ /* Big-endian. CRC16 over 0x00-0x41 (inclusive) */
+ __u16 crc;
+ __u8 reserved3[3];
+ __u8 auto_load[2];
+} __attribute__ ((packed));
+
+struct ar0832_mode {
+ int xres;
+ int yres;
+ __u32 frame_length;
+ __u32 coarse_time;
+ __u16 gain;
+ int stereo;
+};
+
+struct ar0832_point{
+ int x;
+ int y;
+};
+
+struct ar0832_reg {
+ __u16 addr;
+ __u16 val;
+};
+
+struct ar0832_stereo_region {
+ int camera_index;
+ struct ar0832_point image_start;
+ struct ar0832_point image_end;
+};
+
+struct ar0832_focuser_config {
+ __u32 settle_time;
+ __u32 actuator_range;
+ __u32 pos_low;
+ __u32 pos_high;
+ __u32 focal_length;
+ __u32 fnumber;
+ __u32 max_aperture;
+};
+
+#ifdef __KERNEL__
+struct ar0832_platform_data {
+ int (*power_on)(int is_stereo);
+ int (*power_off)(int is_stereo);
+ char *id;
+};
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/media/nvc.h b/include/media/nvc.h
new file mode 100644
index 000000000000..c1be3473ecf9
--- /dev/null
+++ b/include/media/nvc.h
@@ -0,0 +1,146 @@
+/* Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __NVC_H__
+#define __NVC_H__
+
+#include <linux/ioctl.h>
+
+struct nvc_param {
+ int param;
+ __u32 sizeofvalue;
+ void *p_value;
+} __packed;
+
+#define NVC_PARAM_EXPOSURE 0
+#define NVC_PARAM_GAIN 1
+#define NVC_PARAM_FRAMERATE 2
+#define NVC_PARAM_MAX_FRAMERATE 3
+#define NVC_PARAM_INPUT_CLOCK 4
+#define NVC_PARAM_LOCUS 5
+#define NVC_PARAM_FLASH_CAPS 6
+#define NVC_PARAM_FLASH_LEVEL 7
+#define NVC_PARAM_FLASH_PIN_STATE 8
+#define NVC_PARAM_TORCH_CAPS 9
+#define NVC_PARAM_TORCH_LEVEL 10
+#define NVC_PARAM_FOCAL_LEN 11
+#define NVC_PARAM_MAX_APERTURE 12
+#define NVC_PARAM_FNUMBER 13
+#define NVC_PARAM_EXPOSURE_LIMITS 14
+#define NVC_PARAM_GAIN_LIMITS 15
+#define NVC_PARAM_FRAMERATE_LIMITS 16
+#define NVC_PARAM_FRAME_RATES 17
+#define NVC_PARAM_EXP_LATCH_TIME 19
+#define NVC_PARAM_REGION_USED 20
+#define NVC_PARAM_SELF_TEST 23
+#define NVC_PARAM_STS 24
+#define NVC_PARAM_TESTMODE 25
+#define NVC_PARAM_EXPECTED_VALUES 26
+#define NVC_PARAM_RESET 27
+#define NVC_PARAM_OPTIMIZE_RES 28
+#define NVC_PARAM_LINES_PER_SEC 30
+#define NVC_PARAM_CAPS 31
+#define NVC_PARAM_STEREO_CAP 33
+#define NVC_PARAM_FOCUS_STEREO 34
+#define NVC_PARAM_STEREO 35
+#define NVC_PARAM_INHERENT_GAIN 36
+#define NVC_PARAM_VIEW_ANGLE_H 37
+#define NVC_PARAM_VIEW_ANGLE_V 38
+#define NVC_PARAM_DEV_ID 46
+#define NVC_PARAM_TEST_PATTERN 0x10000002
+#define NVC_PARAM_SENSOR_TYPE 0x10000006
+#define NVC_PARAM_I2C 1001
+
+/* sync off */
+#define NVC_SYNC_OFF 0
+/* use only this device (the one receiving the call) */
+#define NVC_SYNC_MASTER 1
+/* use only the synced device (the "other" device) */
+#define NVC_SYNC_SLAVE 2
+/* use both synced devices at the same time */
+#define NVC_SYNC_STEREO 3
+
+#define NVC_RESET_HARD 0
+#define NVC_RESET_SOFT 1
+
+#define NVC_IOCTL_PWR_WR _IOW('o', 102, int)
+#define NVC_IOCTL_PWR_RD _IOW('o', 103, int)
+#define NVC_IOCTL_PARAM_WR _IOW('o', 104, struct nvc_param)
+#define NVC_IOCTL_PARAM_RD _IOWR('o', 105, struct nvc_param)
+
+
+#ifdef __KERNEL__
+
+#include <linux/regulator/consumer.h>
+
+/* The NVC_CFG_ defines are for the .cfg entry in the
+ * platform data structure.
+ */
+/* Device not registered if not found */
+#define NVC_CFG_NODEV (1 << 0)
+/* Don't return errors */
+#define NVC_CFG_NOERR (1 << 1)
+/* Always go to _PWR_STDBY instead of _PWR_OFF */
+#define NVC_CFG_OFF2STDBY (1 << 2)
+/* Init device at sys boot */
+#define NVC_CFG_BOOT_INIT (1 << 3)
+/* Sync mode uses an I2C MUX to send at same time */
+#define NVC_CFG_SYNC_I2C_MUX (1 << 4)
+
+/* Expected higher level power calls are:
+ * 1 = OFF
+ * 2 = STANDBY
+ * 3 = ON
+ * These will be multiplied by 2 before given to the driver's PM code that
+ * uses the _PWR_ defines. This allows us to insert defines to give more power
+ * granularity and still remain linear with regards to the power usage and
+ * full power state transition latency for easy implementation of PM
+ * algorithms.
+ * The PM actions:
+ * _PWR_ERR = Non-valid state.
+ * _PWR_OFF_DELAYED = _PWR_OFF is called after a period of time.
+ * _PWR_OFF = Device, regulators, clocks, etc is turned off. The longest
+ * transition time to _PWR_ON is from this state.
+ * _PWR_STDBY_OFF = Device is useless but powered. No communication possible.
+ * Device does not retain programming. Main purpose is for
+ * faster return to _PWR_ON without regulator delays.
+ * _PWR_STDBY = Device is in standby. Device retains programming.
+ * _PWR_COMM = Device is powered enough to communicate with the device.
+ * _PWR_ON = Device is at full power with active output.
+ *
+ * The kernel drivers treat these calls as guaranteed level of service.
+ */
+
+#define NVC_PWR_ERR 0
+#define NVC_PWR_OFF_DELAYED 1 /* obsolete - never used */
+#define NVC_PWR_OFF_FORCE 1
+#define NVC_PWR_OFF 2
+#define NVC_PWR_STDBY_OFF 3
+#define NVC_PWR_STDBY 4
+#define NVC_PWR_COMM 5
+#define NVC_PWR_ON 6
+
+struct nvc_regulator {
+ bool vreg_flag;
+ struct regulator *vreg;
+ const char *vreg_name;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* __NVC_H__ */
+
diff --git a/include/media/nvc_focus.h b/include/media/nvc_focus.h
new file mode 100644
index 000000000000..fd83258abab3
--- /dev/null
+++ b/include/media/nvc_focus.h
@@ -0,0 +1,48 @@
+/* Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __NVC_FOCUS_H__
+#define __NVC_FOCUS_H__
+
+enum nvc_focus_sts {
+ NVC_FOCUS_STS_UNKNOWN = 1,
+ NVC_FOCUS_STS_NO_DEVICE,
+ NVC_FOCUS_STS_INITIALIZING,
+ NVC_FOCUS_STS_INIT_ERR,
+ NVC_FOCUS_STS_WAIT_FOR_MOVE_END,
+ NVC_FOCUS_STS_WAIT_FOR_SETTLE,
+ NVC_FOCUS_STS_LENS_SETTLED,
+ NVC_FOCUS_STS_FORCE32 = 0x7FFFFFFF
+};
+
+struct nvc_focus_nvc {
+ __u32 focal_length;
+ __u32 fnumber;
+ __u32 max_aperature;
+} __packed;
+
+struct nvc_focus_cap {
+ __u32 version;
+ __u32 actuator_range;
+ __u32 settle_time;
+ __u32 focus_macro;
+ __u32 focus_hyper;
+ __u32 focus_infinity;
+} __packed;
+
+#endif /* __NVC_FOCUS_H__ */
+
diff --git a/include/media/nvc_torch.h b/include/media/nvc_torch.h
new file mode 100644
index 000000000000..7c88bc75af05
--- /dev/null
+++ b/include/media/nvc_torch.h
@@ -0,0 +1,43 @@
+/* Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __NVC_TORCH_H__
+#define __NVC_TORCH_H__
+
+struct nvc_torch_level_info {
+ __s32 guidenum;
+ __u32 sustaintime;
+ __s32 rechargefactor;
+} __packed;
+
+struct nvc_torch_pin_state {
+ __u16 mask;
+ __u16 values;
+} __packed;
+
+struct nvc_torch_flash_capabilities {
+ __u32 numberoflevels;
+ struct nvc_torch_level_info levels[];
+} __packed;
+
+struct nvc_torch_torch_capabilities {
+ __u32 numberoflevels;
+ __s32 guidenum[];
+} __packed;
+
+#endif /* __NVC_TORCH_H__ */
+
diff --git a/include/media/ov14810.h b/include/media/ov14810.h
new file mode 100644
index 000000000000..67a864959186
--- /dev/null
+++ b/include/media/ov14810.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __OV14810_H__
+#define __OV14810_H__
+
+#include <linux/ioctl.h> /* For IOCTL macros */
+
+#define OV14810_IOCTL_SET_MODE _IOW('o', 1, struct ov14810_mode)
+#define OV14810_IOCTL_SET_FRAME_LENGTH _IOW('o', 2, __u32)
+#define OV14810_IOCTL_SET_COARSE_TIME _IOW('o', 3, __u32)
+#define OV14810_IOCTL_SET_GAIN _IOW('o', 4, __u16)
+#define OV14810_IOCTL_GET_STATUS _IOR('o', 5, __u8)
+#define OV14810_IOCTL_SET_CAMERA_MODE _IOW('o', 10, __u32)
+#define OV14810_IOCTL_SYNC_SENSORS _IOW('o', 11, __u32)
+
+struct ov14810_mode {
+ int xres;
+ int yres;
+ __u32 frame_length;
+ __u32 coarse_time;
+ __u16 gain;
+};
+#ifdef __KERNEL__
+struct ov14810_platform_data {
+ int (*power_on)(void);
+ int (*power_off)(void);
+ void (*synchronize_sensors)(void);
+};
+#endif /* __KERNEL__ */
+
+#endif /* __OV14810_H__ */
diff --git a/include/media/ov2710.h b/include/media/ov2710.h
new file mode 100644
index 000000000000..e3d43056d700
--- /dev/null
+++ b/include/media/ov2710.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010 Motorola, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __OV2710_H__
+#define __OV2710_H__
+
+#include <linux/ioctl.h> /* For IOCTL macros */
+
+#define OV2710_IOCTL_SET_MODE _IOW('o', 1, struct ov2710_mode)
+#define OV2710_IOCTL_SET_FRAME_LENGTH _IOW('o', 2, __u32)
+#define OV2710_IOCTL_SET_COARSE_TIME _IOW('o', 3, __u32)
+#define OV2710_IOCTL_SET_GAIN _IOW('o', 4, __u16)
+#define OV2710_IOCTL_GET_STATUS _IOR('o', 5, __u8)
+
+struct ov2710_mode {
+ int xres;
+ int yres;
+ __u32 frame_length;
+ __u32 coarse_time;
+ __u16 gain;
+};
+#ifdef __KERNEL__
+struct ov2710_platform_data {
+ int (*power_on)(void);
+ int (*power_off)(void);
+
+};
+#endif /* __KERNEL__ */
+
+#endif /* __OV2710_H__ */
+
diff --git a/include/media/ov5650.h b/include/media/ov5650.h
new file mode 100644
index 000000000000..00efcec61a5f
--- /dev/null
+++ b/include/media/ov5650.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2010 Motorola, Inc.
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __OV5650_H__
+#define __OV5650_H__
+
+#include <linux/ioctl.h> /* For IOCTL macros */
+
+#define OV5650_IOCTL_SET_MODE _IOW('o', 1, struct ov5650_mode)
+#define OV5650_IOCTL_SET_FRAME_LENGTH _IOW('o', 2, __u32)
+#define OV5650_IOCTL_SET_COARSE_TIME _IOW('o', 3, __u32)
+#define OV5650_IOCTL_SET_GAIN _IOW('o', 4, __u16)
+#define OV5650_IOCTL_GET_STATUS _IOR('o', 5, __u8)
+#define OV5650_IOCTL_SET_BINNING _IOW('o', 6, __u8)
+#define OV5650_IOCTL_TEST_PATTERN _IOW('o', 7, enum ov5650_test_pattern)
+#define OV5650_IOCTL_SET_CAMERA_MODE _IOW('o', 10, __u32)
+#define OV5650_IOCTL_SYNC_SENSORS _IOW('o', 11, __u32)
+
+/* OV5650 registers */
+#define OV5650_SRM_GRUP_ACCESS (0x3212)
+#define OV5650_ARRAY_CONTROL_01 (0x3621)
+#define OV5650_ANALOG_CONTROL_D (0x370D)
+#define OV5650_TIMING_TC_REG_18 (0x3818)
+#define OV5650_TIMING_CONTROL_HS_HIGH (0x3800)
+#define OV5650_TIMING_CONTROL_HS_LOW (0x3801)
+#define OV5650_TIMING_CONTROL_VS_HIGH (0x3802)
+#define OV5650_TIMING_CONTROL_VS_LOW (0x3803)
+#define OV5650_TIMING_HW_HIGH (0x3804)
+#define OV5650_TIMING_HW_LOW (0x3805)
+#define OV5650_TIMING_VH_HIGH (0x3806)
+#define OV5650_TIMING_VH_LOW (0x3807)
+#define OV5650_TIMING_TC_REG_18 (0x3818)
+#define OV5650_TIMING_HREFST_MAN_HIGH (0x3824)
+#define OV5650_TIMING_HREFST_MAN_LOW (0x3825)
+#define OV5650_H_BINNING_BIT (1 << 7)
+#define OV5650_H_SUBSAMPLING_BIT (1 << 6)
+#define OV5650_V_BINNING_BIT (1 << 6)
+#define OV5650_V_SUBSAMPLING_BIT (1 << 0)
+#define OV5650_GROUP_HOLD_BIT (1 << 7)
+#define OV5650_GROUP_LAUNCH_BIT (1 << 5)
+#define OV5650_GROUP_HOLD_END_BIT (1 << 4)
+#define OV5650_GROUP_ID(id) (id)
+
+enum ov5650_test_pattern {
+ TEST_PATTERN_NONE,
+ TEST_PATTERN_COLORBARS,
+ TEST_PATTERN_CHECKERBOARD
+};
+
+struct ov5650_mode {
+ int xres;
+ int yres;
+ __u32 frame_length;
+ __u32 coarse_time;
+ __u16 gain;
+};
+
+#ifdef __KERNEL__
+struct ov5650_platform_data {
+ int (*power_on)(void);
+ int (*power_off)(void);
+ void (*synchronize_sensors)(void);
+};
+#endif /* __KERNEL__ */
+
+#endif /* __OV5650_H__ */
+
diff --git a/include/media/ov9726.h b/include/media/ov9726.h
new file mode 100644
index 000000000000..b1e759ba583c
--- /dev/null
+++ b/include/media/ov9726.h
@@ -0,0 +1,62 @@
+/*
+* ov9726.h
+*
+* Copyright (c) 2011, NVIDIA, All Rights Reserved.
+*
+* This file is licensed under the terms of the GNU General Public License
+* version 2. This program is licensed "as is" without any warranty of any
+* kind, whether express or implied.
+*/
+
+#ifndef __OV9726_H__
+#define __OV9726_H__
+
+#include <linux/ioctl.h>
+
+#define OV9726_I2C_ADDR 0x20
+
+#define OV9726_IOCTL_SET_MODE _IOW('o', 1, struct ov9726_mode)
+#define OV9726_IOCTL_SET_FRAME_LENGTH _IOW('o', 2, __u32)
+#define OV9726_IOCTL_SET_COARSE_TIME _IOW('o', 3, __u32)
+#define OV9726_IOCTL_SET_GAIN _IOW('o', 4, __u16)
+#define OV9726_IOCTL_GET_STATUS _IOR('o', 5, __u8)
+
+struct ov9726_mode {
+ int mode_id;
+ int xres;
+ int yres;
+ __u32 frame_length;
+ __u32 coarse_time;
+ __u16 gain;
+};
+
+struct ov9726_reg {
+ __u16 addr;
+ __u16 val;
+};
+
+#ifdef __KERNEL__
+#define OV9726_REG_FRAME_LENGTH_HI 0x340
+#define OV9726_REG_FRAME_LENGTH_LO 0x341
+#define OV9726_REG_COARSE_TIME_HI 0x202
+#define OV9726_REG_COARSE_TIME_LO 0x203
+#define OV9726_REG_GAIN_HI 0x204
+#define OV9726_REG_GAIN_LO 0x205
+
+#define OV9726_MAX_RETRIES 3
+
+#define OV9726_TABLE_WAIT_MS 0
+#define OV9726_TABLE_END 1
+
+struct ov9726_platform_data {
+ int (*power_on)(void);
+ int (*power_off)(void);
+ unsigned gpio_rst;
+ bool rst_low_active;
+ unsigned gpio_pwdn;
+ bool pwdn_low_active;
+};
+#endif /* __KERNEL__ */
+
+#endif /* __OV9726_H__ */
+
diff --git a/include/media/sh532u.h b/include/media/sh532u.h
new file mode 100644
index 000000000000..19da2070b70f
--- /dev/null
+++ b/include/media/sh532u.h
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __SH532U_H__
+#define __SH532U_H__
+
+#include <media/nvc_focus.h>
+
+
+struct sh532u_platform_data {
+ int cfg;
+ int num;
+ int sync;
+ const char *dev_name;
+ struct nvc_focus_nvc (*nvc);
+ struct nvc_focus_cap (*cap);
+ struct sh532u_pdata_info (*info);
+ __u8 i2c_addr_rom;
+ unsigned gpio_reset;
+/* Due to a Linux limitation, a GPIO is defined to "enable" the device. This
+ * workaround is for when the device's power GPIO's are behind an I2C expander.
+ * The Linux limitation doesn't allow the I2C GPIO expander to be ready for
+ * use when this device is probed.
+ */
+ unsigned gpio_en;
+};
+
+struct sh532u_pdata_info {
+ __s16 pos_low;
+ __s16 pos_high;
+ __s16 limit_low;
+ __s16 limit_high;
+ int move_timeoutms;
+ __u32 focus_hyper_ratio;
+ __u32 focus_hyper_div;
+};
+
+
+/* Register Definition : Sany Driver IC */
+/* EEPROM addresses */
+#define addrHallOffset 0x10
+#define addrHallBias 0x11
+#define addrInf1 0x12
+#define addrMac1 0x13
+#define addrLoopGainH 0x14
+#define addrLoopGainL 0x15
+#define addrInf2 0x16
+#define addrMac2 0x17
+
+#define addrInf1_H 0x20 /* bottom mechanical limit of HVCA */
+#define addrInf1_L 0x21
+#define addrMac1_H 0x22 /* top mechanical limit of HVCA */
+#define addrMac1_L 0x23
+#define addrInf2_H 0x24 /* lens position when object is ?120cm */
+#define addrInf2_L 0x25
+#define addrMac2_H 0x26 /* lens position when object is ?10cm */
+#define addrMac2_L 0x27
+#define addrDacDeltaUp_H 0x28 /* difference between face up and down */
+#define addrDacDeltaUp_L 0x29
+#define addrAFoffset_H 0x2A /* best focus position subtract value */
+#define addrAFoffset_L 0x2B
+
+/* Convergence Judgement */
+#define INI_MSSET_211 0x00
+#define CHTGOKN_TIME 0x80
+#define CHTGOKN_WAIT 1
+#define CHTGOKN_TIMEOUT 50
+#define CHTGSTOKN_TOMEOUT 15
+
+/* StepMove */
+#define STMV_SIZE 0x0180
+
+#define STMCHTG_ON 0x08
+#define STMSV_ON 0x04
+#define STMLFF_ON 0x02
+#define STMVEN_ON 0x01
+#define STMCHTG_OFF 0x00
+#define STMSV_OFF 0x00
+#define STMLFF_OFF 0x00
+#define STMVEN_OFF 0x00
+
+#define STMCHTG_SET STMCHTG_ON
+#define STMSV_SET STMSV_ON
+#define STMLFF_SET STMLFF_OFF
+
+#define CHTGST_ON 0x01
+#define DEFAULT_DADAT 0x8040
+
+/* Delay RAM 00h ~ 3Fh */
+#define ADHXI_211H 0x00
+#define ADHXI_211L 0x01
+#define PIDZO_211H 0x02
+#define PIDZO_211L 0x03
+#define RZ_211H 0x04
+#define RZ_211L 0x05
+#define DZ1_211H 0x06
+#define DZ1_211L 0x07
+#define DZ2_211H 0x08
+#define DZ2_211L 0x09
+#define UZ1_211H 0x0A
+#define UZ1_211L 0x0B
+#define UZ2_211H 0x0C
+#define UZ2_211L 0x0D
+#define IZ1_211H 0x0E
+#define IZ1_211L 0x0F
+#define IZ2_211H 0x10
+#define IZ2_211L 0x11
+#define MS1Z01_211H 0x12
+#define MS1Z01_211L 0x13
+#define MS1Z11_211H 0x14
+#define MS1Z11_211L 0x15
+#define MS1Z12_211H 0x16
+#define MS1Z12_211L 0x17
+#define MS1Z22_211H 0x18
+#define MS1Z22_211L 0x19
+#define MS2Z01_211H 0x1A
+#define MS2Z01_211L 0x1B
+#define MS2Z11_211H 0x1C
+#define MS2Z11_211L 0x1D
+#define MS2Z12_211H 0x1E
+#define MS2Z12_211L 0x1F
+#define MS2Z22_211H 0x20
+#define MS2Z22_211L 0x21
+#define MS2Z23_211H 0x22
+#define MS2Z23_211L 0x23
+#define OZ1_211H 0x24
+#define OZ1_211L 0x25
+#define OZ2_211H 0x26
+#define OZ2_211L 0x27
+#define DAHLXO_211H 0x28
+#define DAHLXO_211L 0x29
+#define OZ3_211H 0x2A
+#define OZ3_211L 0x2B
+#define OZ4_211H 0x2C
+#define OZ4_211L 0x2D
+#define OZ5_211H 0x2E
+#define OZ5_211L 0x2F
+#define oe_211H 0x30
+#define oe_211L 0x31
+#define MSR1CMAX_211H 0x32
+#define MSR1CMAX_211L 0x33
+#define MSR1CMIN_211H 0x34
+#define MSR1CMIN_211L 0x35
+#define MSR2CMAX_211H 0x36
+#define MSR2CMAX_211L 0x37
+#define MSR2CMIN_211H 0x38
+#define MSR2CMIN_211L 0x39
+#define OFFSET_211H 0x3A
+#define OFFSET_211L 0x3B
+#define ADOFFSET_211H 0x3C
+#define ADOFFSET_211L 0x3D
+#define EZ_211H 0x3E
+#define EZ_211L 0x3F
+
+/* Coefficient RAM 40h ~ 7Fh */
+#define ag_211H 0x40
+#define ag_211L 0x41
+#define da_211H 0x42
+#define da_211L 0x43
+#define db_211H 0x44
+#define db_211L 0x45
+#define dc_211H 0x46
+#define dc_211L 0x47
+#define dg_211H 0x48
+#define dg_211L 0x49
+#define pg_211H 0x4A
+#define pg_211L 0x4B
+#define gain1_211H 0x4C
+#define gain1_211L 0x4D
+#define gain2_211H 0x4E
+#define gain2_211L 0x4F
+#define ua_211H 0x50
+#define ua_211L 0x51
+#define uc_211H 0x52
+#define uc_211L 0x53
+#define ia_211H 0x54
+#define ia_211L 0x55
+#define ib_211H 0x56
+#define ib_211L 0x57
+#define i_c_211H 0x58
+#define i_c_211L 0x59
+#define ms11a_211H 0x5A
+#define ms11a_211L 0x5B
+#define ms11c_211H 0x5C
+#define ms11c_211L 0x5D
+#define ms12a_211H 0x5E
+#define ms12a_211L 0x5F
+#define ms12c_211H 0x60
+#define ms12c_211L 0x61
+#define ms21a_211H 0x62
+#define ms21a_211L 0x63
+#define ms21b_211H 0x64
+#define ms21b_211L 0x65
+#define ms21c_211H 0x66
+#define ms21c_211L 0x67
+#define ms22a_211H 0x68
+#define ms22a_211L 0x69
+#define ms22c_211H 0x6A
+#define ms22c_211L 0x6B
+#define ms22d_211H 0x6C
+#define ms22d_211L 0x6D
+#define ms22e_211H 0x6E
+#define ms22e_211L 0x6F
+#define ms23p_211H 0x70
+#define ms23p_211L 0x71
+#define oa_211H 0x72
+#define oa_211L 0x73
+#define oc_211H 0x74
+#define oc_211L 0x75
+#define PX12_211H 0x76
+#define PX12_211L 0x77
+#define PX3_211H 0x78
+#define PX3_211L 0x79
+#define MS2X_211H 0x7A
+#define MS2X_211L 0x7B
+#define CHTGX_211H 0x7C
+#define CHTGX_211L 0x7D
+#define CHTGN_211H 0x7E
+#define CHTGN_211L 0x7F
+
+/* Register 80h ~ 9F */
+#define CLKSEL_211 0x80
+#define ADSET_211 0x81
+#define PWMSEL_211 0x82
+#define SWTCH_211 0x83
+#define STBY_211 0x84
+#define CLR_211 0x85
+#define DSSEL_211 0x86
+#define ENBL_211 0x87
+#define ANA1_211 0x88
+#define STMVEN_211 0x8A
+#define STPT_211 0x8B
+#define SWFC_211 0x8C
+#define SWEN_211 0x8D
+#define MSNUM_211 0x8E
+#define MSSET_211 0x8F
+#define DLYMON_211 0x90
+#define MONA_211 0x91
+#define PWMLIMIT_211 0x92
+#define PINSEL_211 0x93
+#define PWMSEL2_211 0x94
+#define SFTRST_211 0x95
+#define TEST_211 0x96
+#define PWMZONE2_211 0x97
+#define PWMZONE1_211 0x98
+#define PWMZONE0_211 0x99
+#define ZONE3_211 0x9A
+#define ZONE2_211 0x9B
+#define ZONE1_211 0x9C
+#define ZONE0_211 0x9D
+#define GCTIM_211 0x9E
+#define GCTIM_211NU 0x9F
+#define STMINT_211 0xA0
+#define STMVENDH_211 0xA1
+#define STMVENDL_211 0xA2
+#define MSNUMR_211 0xA3
+#define ANA2_211 0xA4
+
+/* Device ID of HVCA Drive IC */
+#define HVCA_DEVICE_ID 0xE4
+
+/* Device ID of E2P ROM */
+#define EEP_DEVICE_ID 0xA0
+#define EEP_PAGE0 0x00
+#define EEP_PAGE1 0x02
+#define EEP_PAGE2 0x04
+#define EEP_PAGE3 0x06
+/* E2P ROM has 1023 bytes. So there are 4 pages memory */
+/* E2PROM Device ID = 1 0 1 0 0 P0 P1 0 */
+/*
+P0 P1
+0 0 : Page 0
+0 1 : Page 1
+1 0 : Page 2
+1 1 : Page 3
+*/
+/* Page 0: address 0x000~0x0FF, E2PROM Device ID = E2P_DEVICE_ID|E2P_PAGE0 */
+/* Page 1: address 0x100~0x1FF, E2PROM Device ID = E2P_DEVICE_ID|E2P_PAGE1 */
+/* Page 2: address 0x200~0x2FF, E2PROM Device ID = E2P_DEVICE_ID|E2P_PAGE2 */
+/* Page 3: address 0x300~0x3FF, E2PROM Device ID = E2P_DEVICE_ID|E2P_PAGE3 */
+/*
+*/
+
+/* E2P data type define of HVCA Initial Value Section */
+#define DIRECT_MODE 0x00
+#define INDIRECT_EEPROM 0x10
+#define INDIRECT_HVCA 0x20
+#define MASK_AND 0x70
+#define MASK_OR 0x80
+
+#define DATA_1BYTE 0x01
+#define DATA_2BYTE 0x02
+
+#define START_ADDR 0x0030
+#define END_ADDR 0x01BF
+
+/*Macro define*/
+#if !defined(abs)
+#define abs(a) (((a) > 0) ? (a) : -(a))
+#endif
+
+#endif
+/* __SH532U_H__ */
+
diff --git a/include/media/soc380.h b/include/media/soc380.h
new file mode 100644
index 000000000000..254625f74f82
--- /dev/null
+++ b/include/media/soc380.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SOC380_H__
+#define __SOC380_H__
+
+#include <linux/ioctl.h> /* For IOCTL macros */
+
+#define SOC380_IOCTL_SET_MODE _IOW('o', 1, struct soc380_mode)
+#define SOC380_IOCTL_GET_STATUS _IOR('o', 2, struct soc380_status)
+
+struct soc380_mode {
+ int xres;
+ int yres;
+};
+
+struct soc380_status {
+ int data;
+ int status;
+};
+
+#ifdef __KERNEL__
+struct soc380_platform_data {
+ int (*power_on)(void);
+ int (*power_off)(void);
+
+};
+#endif /* __KERNEL__ */
+
+#endif /* __SOC380_H__ */
+
diff --git a/include/media/ssl3250a.h b/include/media/ssl3250a.h
new file mode 100644
index 000000000000..c4e802fa2efc
--- /dev/null
+++ b/include/media/ssl3250a.h
@@ -0,0 +1,38 @@
+/* Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __SSL3250A_H__
+#define __SSL3250A_H__
+
+#include <media/nvc_torch.h>
+
+#define SSL3250A_MAX_TORCH_LEVEL 11
+#define SSL3250A_MAX_FLASH_LEVEL 20
+
+struct ssl3250a_platform_data {
+ unsigned cfg; /* use the NVC_CFG_ defines */
+ unsigned num; /* see implementation notes in driver */
+ unsigned sync; /* see implementation notes in driver */
+ const char *dev_name; /* see implementation notes in driver */
+ struct nvc_torch_pin_state (*pinstate); /* see notes in driver */
+ unsigned max_amp_torch; /* maximum torch value allowed */
+ unsigned max_amp_flash; /* maximum flash value allowed */
+ unsigned gpio_act; /* GPIO connected to the ACT signal */
+};
+
+#endif /* __SSL3250A_H__ */
+
diff --git a/include/media/tegra_camera.h b/include/media/tegra_camera.h
new file mode 100644
index 000000000000..d7d08bd9a99b
--- /dev/null
+++ b/include/media/tegra_camera.h
@@ -0,0 +1,55 @@
+/*
+ * include/linux/tegra_camera.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef TEGRA_CAMERA_H
+#define TEGRA_CAMERA_H
+
+/* this is to enable VI pattern generator (Null Sensor) */
+#define TEGRA_CAMERA_ENABLE_PD2VI_CLK 0x1
+
+enum {
+ TEGRA_CAMERA_MODULE_ISP = 0,
+ TEGRA_CAMERA_MODULE_VI,
+ TEGRA_CAMERA_MODULE_CSI,
+};
+
+enum {
+ TEGRA_CAMERA_VI_CLK,
+ TEGRA_CAMERA_VI_SENSOR_CLK,
+};
+
+struct tegra_camera_clk_info {
+ uint id;
+ uint clk_id;
+ unsigned long rate;
+ uint flag; /* to inform if any special bits need to enabled/disabled */
+};
+
+enum StereoCameraMode {
+ Main = 0x0, /* Sets the default camera to Main */
+ StereoCameraMode_Left = 0x01, /* the left camera is on. */
+ StereoCameraMode_Right = 0x02, /* the right camera is on. */
+ StereoCameraMode_Stereo = 0x03, /* both cameras are on. */
+ StereoCameraMode_Force32 = 0x7FFFFFFF
+};
+
+
+#define TEGRA_CAMERA_IOCTL_ENABLE _IOWR('i', 1, uint)
+#define TEGRA_CAMERA_IOCTL_DISABLE _IOWR('i', 2, uint)
+#define TEGRA_CAMERA_IOCTL_CLK_SET_RATE \
+ _IOWR('i', 3, struct tegra_camera_clk_info)
+#define TEGRA_CAMERA_IOCTL_RESET _IOWR('i', 4, uint)
+
+#endif
diff --git a/include/media/tps61050.h b/include/media/tps61050.h
new file mode 100644
index 000000000000..1c289eb3616e
--- /dev/null
+++ b/include/media/tps61050.h
@@ -0,0 +1,36 @@
+/* Copyright (C) 2011 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __TPS61050_H__
+#define __TPS61050_H__
+
+#include <media/nvc_torch.h>
+
+#define TPS61050_MAX_TORCH_LEVEL 7
+#define TPS61050_MAX_FLASH_LEVEL 8
+
+struct tps61050_platform_data {
+ unsigned cfg; /* use the NVC_CFG_ defines */
+ unsigned num; /* see implementation notes in driver */
+ unsigned sync; /* see implementation notes in driver */
+ const char *dev_name; /* see implementation notes in driver */
+ struct nvc_torch_pin_state (*pinstate); /* see notes in driver */
+ unsigned max_amp_torch; /* see implementation notes in driver */
+ unsigned max_amp_flash; /* see implementation notes in driver */
+};
+
+#endif /* __TPS61050_H__ */
diff --git a/include/net/activity_stats.h b/include/net/activity_stats.h
new file mode 100644
index 000000000000..10e4c1506eeb
--- /dev/null
+++ b/include/net/activity_stats.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#ifndef __activity_stats_h
+#define __activity_stats_h
+
+#ifdef CONFIG_NET_ACTIVITY_STATS
+void activity_stats_update(void);
+#else
+#define activity_stats_update(void) {}
+#endif
+
+#endif /* _NET_ACTIVITY_STATS_H */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 582e4ae70753..cbc6bb0a6838 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -8,7 +8,7 @@
#define TEMP_VALID_LIFETIME (7*86400)
#define TEMP_PREFERRED_LIFETIME (86400)
-#define REGEN_MAX_RETRY (5)
+#define REGEN_MAX_RETRY (3)
#define MAX_DESYNC_FACTOR (600)
#define ADDR_CHECK_FREQUENCY (120*HZ)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index be30aabe7b88..915d7ae527b3 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -37,6 +37,7 @@
#define HCI_DEV_DOWN 4
#define HCI_DEV_SUSPEND 5
#define HCI_DEV_RESUME 6
+#define HCI_DEV_WRITE 7
/* HCI notify events */
#define HCI_NOTIFY_CONN_ADD 1
@@ -157,8 +158,10 @@ enum {
#define ESCO_2EV5 0x0100
#define ESCO_3EV5 0x0200
-#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
-#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
+#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define ALL_ESCO_MASK (SCO_ESCO_MASK | ESCO_EV3 | ESCO_EV4 | ESCO_EV5 | \
+ EDR_ESCO_MASK)
/* ACL flags */
#define ACL_START_NO_FLUSH 0x00
@@ -1261,6 +1264,9 @@ struct hci_conn_info {
__u8 out;
__u16 state;
__u32 link_mode;
+ __u32 mtu;
+ __u32 cnt;
+ __u32 pkts;
};
struct hci_dev_req {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8f441b8b2963..626f8b0a6791 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -446,12 +446,14 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle);
void hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+ __u16 pkt_type, bdaddr_t *dst);
int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+ __u16 pkt_type, bdaddr_t *dst,
__u8 sec_level, __u8 auth_type);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
@@ -480,7 +482,7 @@ static inline void hci_conn_put(struct hci_conn *conn)
if (conn->state == BT_CONNECTED) {
timeo = msecs_to_jiffies(conn->disc_timeout);
if (!conn->out)
- timeo *= 2;
+ timeo *= 20;
} else {
timeo = msecs_to_jiffies(10);
}
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1e35c43657c8..6d1857ab8e5f 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -37,6 +37,7 @@
struct sockaddr_sco {
sa_family_t sco_family;
bdaddr_t sco_bdaddr;
+ __u16 sco_pkt_type;
};
/* SCO socket options */
@@ -72,7 +73,8 @@ struct sco_conn {
struct sco_pinfo {
struct bt_sock bt;
- __u32 flags;
+ __u16 pkt_type;
+
struct sco_conn *conn;
};
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 11cf373970a9..51a7031b4aa3 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -41,6 +41,7 @@ struct inet6_ifaddr {
struct in6_addr addr;
__u32 prefix_len;
+ /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
__u32 valid_lft;
__u32 prefered_lft;
atomic_t refcnt;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index acc620a4a45f..4fcd77af4055 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1431,6 +1431,8 @@ extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
extern int tcp_gro_complete(struct sk_buff *skb);
extern int tcp4_gro_complete(struct sk_buff *skb);
+extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
+
#ifdef CONFIG_PROC_FS
extern int tcp4_proc_init(void);
extern void tcp4_proc_exit(void);
diff --git a/include/sound/max98088.h b/include/sound/max98088.h
index c3ba8239182d..9b4fceb360ac 100644
--- a/include/sound/max98088.h
+++ b/include/sound/max98088.h
@@ -31,6 +31,10 @@ struct max98088_pdata {
struct max98088_eq_cfg *eq_cfg;
unsigned int eq_cfgcnt;
+ /* has to be one of 25,50,100 or 200 ms
+ as per the data sheet */
+ unsigned int debounce_time_ms;
+
/* Receiver output can be configured as power amplifier or LINE out */
/* Set receiver_mode to:
* 0 = amplifier output, or
diff --git a/include/sound/tlv320aic326x.h b/include/sound/tlv320aic326x.h
new file mode 100644
index 000000000000..97e5841f9044
--- /dev/null
+++ b/include/sound/tlv320aic326x.h
@@ -0,0 +1,23 @@
+/*
+ * Platform data for Texas Instruments TLV320AIC326x codec
+ *
+ * Copyright 2010 TI Products
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __LINUX_SND_TLV320AIC326x_H__
+#define __LINUX_SND_TLV320AIC326x_H__
+
+/* codec platform data */
+struct aic326x_pdata {
+
+ /* has to be one of 16,32,64,128,256,512 ms
+ as per the data sheet */
+ unsigned int debounce_time_ms;
+};
+
+#endif
diff --git a/include/trace/events/nvhost.h b/include/trace/events/nvhost.h
new file mode 100644
index 000000000000..6c266b9f2ea4
--- /dev/null
+++ b/include/trace/events/nvhost.h
@@ -0,0 +1,411 @@
+/*
+ * include/trace/events/nvhost.h
+ *
+ * Nvhost event logging to ftrace.
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvhost
+
+#if !defined(_TRACE_NVHOST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVHOST_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(nvhost,
+ TP_PROTO(const char *name),
+ TP_ARGS(name),
+ TP_STRUCT__entry(__field(const char *, name)),
+ TP_fast_assign(__entry->name = name;),
+ TP_printk("name=%s", __entry->name)
+);
+
+DEFINE_EVENT(nvhost, nvhost_channel_open,
+ TP_PROTO(const char *name),
+ TP_ARGS(name)
+);
+
+DEFINE_EVENT(nvhost, nvhost_channel_release,
+ TP_PROTO(const char *name),
+ TP_ARGS(name)
+);
+
+DEFINE_EVENT(nvhost, nvhost_ioctl_channel_flush,
+ TP_PROTO(const char *name),
+ TP_ARGS(name)
+);
+
+TRACE_EVENT(nvhost_channel_write_submit,
+ TP_PROTO(const char *name, ssize_t count, u32 cmdbufs, u32 relocs,
+ u32 syncpt_id, u32 syncpt_incrs),
+
+ TP_ARGS(name, count, cmdbufs, relocs, syncpt_id, syncpt_incrs),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(ssize_t, count)
+ __field(u32, cmdbufs)
+ __field(u32, relocs)
+ __field(u32, syncpt_id)
+ __field(u32, syncpt_incrs)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->count = count;
+ __entry->cmdbufs = cmdbufs;
+ __entry->relocs = relocs;
+ __entry->syncpt_id = syncpt_id;
+ __entry->syncpt_incrs = syncpt_incrs;
+ ),
+
+ TP_printk("name=%s, count=%d, cmdbufs=%u, relocs=%u, syncpt_id=%u, syncpt_incrs=%u",
+ __entry->name, __entry->count, __entry->cmdbufs, __entry->relocs,
+ __entry->syncpt_id, __entry->syncpt_incrs)
+);
+
+TRACE_EVENT(nvhost_ioctl_channel_submit,
+ TP_PROTO(const char *name, u32 version, u32 cmdbufs, u32 relocs,
+ u32 waitchks, u32 syncpt_id, u32 syncpt_incrs),
+
+ TP_ARGS(name, version, cmdbufs, relocs, waitchks,
+ syncpt_id, syncpt_incrs),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(u32, version)
+ __field(u32, cmdbufs)
+ __field(u32, relocs)
+ __field(u32, waitchks)
+ __field(u32, syncpt_id)
+ __field(u32, syncpt_incrs)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->version = version;
+ __entry->cmdbufs = cmdbufs;
+ __entry->relocs = relocs;
+ __entry->waitchks = waitchks;
+ __entry->syncpt_id = syncpt_id;
+ __entry->syncpt_incrs = syncpt_incrs;
+ ),
+
+ TP_printk("name=%s, version=%u, cmdbufs=%u, relocs=%u, waitchks=%u, syncpt_id=%u, syncpt_incrs=%u",
+ __entry->name, __entry->version, __entry->cmdbufs, __entry->relocs,
+ __entry->waitchks, __entry->syncpt_id, __entry->syncpt_incrs)
+);
+
+TRACE_EVENT(nvhost_channel_write_cmdbuf,
+ TP_PROTO(const char *name, u32 mem_id,
+ u32 words, u32 offset),
+
+ TP_ARGS(name, mem_id, words, offset),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(u32, mem_id)
+ __field(u32, words)
+ __field(u32, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->mem_id = mem_id;
+ __entry->words = words;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d",
+ __entry->name, __entry->mem_id,
+ __entry->words, __entry->offset)
+);
+
+TRACE_EVENT(nvhost_channel_write_cmdbuf_data,
+ TP_PROTO(const char *name, u32 mem_id,
+ u32 words, u32 offset, void *cmdbuf),
+
+ TP_ARGS(name, mem_id, words, offset, cmdbuf),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(u32, mem_id)
+ __field(u32, words)
+ __field(u32, offset)
+ __field(bool, cmdbuf)
+ __dynamic_array(u32, cmdbuf, words)
+ ),
+
+ TP_fast_assign(
+ if (cmdbuf) {
+ memcpy(__get_dynamic_array(cmdbuf), cmdbuf+offset,
+ words * sizeof(u32));
+ }
+ __entry->cmdbuf = cmdbuf;
+ __entry->name = name;
+ __entry->mem_id = mem_id;
+ __entry->words = words;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
+ __entry->name, __entry->mem_id,
+ __entry->words, __entry->offset,
+ __print_hex(__get_dynamic_array(cmdbuf),
+ __entry->cmdbuf ? __entry->words * 4 : 0))
+);
+
+TRACE_EVENT(nvhost_channel_write_reloc,
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ ),
+
+ TP_printk("name=%s",
+ __entry->name)
+);
+
+TRACE_EVENT(nvhost_channel_write_waitchks,
+ TP_PROTO(const char *name, u32 waitchks, u32 waitmask),
+
+ TP_ARGS(name, waitchks, waitmask),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(u32, waitchks)
+ __field(u32, waitmask)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->waitchks = waitchks;
+ __entry->waitmask = waitmask;
+ ),
+
+ TP_printk("name=%s, waitchks=%u, waitmask=%08x",
+ __entry->name, __entry->waitchks, __entry->waitmask)
+);
+
+TRACE_EVENT(nvhost_channel_context_switch,
+ TP_PROTO(const char *name, void *old_ctx, void *new_ctx),
+
+ TP_ARGS(name, old_ctx, new_ctx),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(void*, old_ctx)
+ __field(void*, new_ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->old_ctx = old_ctx;
+ __entry->new_ctx = new_ctx;
+ ),
+
+ TP_printk("name=%s, old=%p, new=%p",
+ __entry->name, __entry->old_ctx, __entry->new_ctx)
+);
+
+TRACE_EVENT(nvhost_ctrlopen,
+ TP_PROTO(const char *name),
+ TP_ARGS(name),
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ ),
+ TP_fast_assign(
+ __entry->name = name
+ ),
+ TP_printk("name=%s", __entry->name)
+);
+
+TRACE_EVENT(nvhost_ctrlrelease,
+ TP_PROTO(const char *name),
+ TP_ARGS(name),
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ ),
+ TP_fast_assign(
+ __entry->name = name
+ ),
+ TP_printk("name=%s", __entry->name)
+);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_module_mutex,
+ TP_PROTO(u32 lock, u32 id),
+
+ TP_ARGS(lock, id),
+
+ TP_STRUCT__entry(
+ __field(u32, lock);
+ __field(u32, id);
+ ),
+
+ TP_fast_assign(
+ __entry->lock = lock;
+ __entry->id = id;
+ ),
+
+ TP_printk("lock=%u, id=%d",
+ __entry->lock, __entry->id)
+ );
+
+TRACE_EVENT(nvhost_ioctl_ctrl_syncpt_incr,
+ TP_PROTO(u32 id),
+
+ TP_ARGS(id),
+
+ TP_STRUCT__entry(
+ __field(u32, id);
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ ),
+
+ TP_printk("id=%d", __entry->id)
+);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_syncpt_read,
+ TP_PROTO(u32 id),
+
+ TP_ARGS(id),
+
+ TP_STRUCT__entry(
+ __field(u32, id);
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ ),
+
+ TP_printk("id=%d", __entry->id)
+);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_syncpt_wait,
+ TP_PROTO(u32 id, u32 threshold, s32 timeout),
+
+ TP_ARGS(id, threshold, timeout),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, threshold)
+ __field(s32, timeout)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->threshold = threshold;
+ __entry->timeout = timeout;
+ ),
+
+ TP_printk("id=%u, threshold=%u, timeout=%d",
+ __entry->id, __entry->threshold, __entry->timeout)
+);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_module_regrdwr,
+ TP_PROTO(u32 id, u32 num_offsets, bool write),
+
+ TP_ARGS(id, num_offsets, write),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, num_offsets)
+ __field(bool, write)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->num_offsets = num_offsets;
+ __entry->write = write;
+ ),
+
+ TP_printk("id=%u, num_offsets=%u, write=%d",
+ __entry->id, __entry->num_offsets, __entry->write)
+);
+
+TRACE_EVENT(nvhost_channel_submitted,
+ TP_PROTO(const char *name, u32 syncpt_base, u32 syncpt_max),
+
+ TP_ARGS(name, syncpt_base, syncpt_max),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(u32, syncpt_base)
+ __field(u32, syncpt_max)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->syncpt_base = syncpt_base;
+ __entry->syncpt_max = syncpt_max;
+ ),
+
+ TP_printk("name=%s, syncpt_base=%d, syncpt_max=%d",
+ __entry->name, __entry->syncpt_base, __entry->syncpt_max)
+);
+
+TRACE_EVENT(nvhost_channel_submit_complete,
+ TP_PROTO(const char *name, int count),
+
+ TP_ARGS(name, count),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->count = count;
+ ),
+
+ TP_printk("name=%s, count=%d", __entry->name, __entry->count)
+);
+
+TRACE_EVENT(nvhost_wait_cdma,
+ TP_PROTO(const char *name, u32 eventid),
+
+ TP_ARGS(name, eventid),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(u32, eventid)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->eventid = eventid;
+ ),
+
+ TP_printk("name=%s, event=%d", __entry->name, __entry->eventid)
+);
+
+#endif /* _TRACE_NVHOST_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/video/nvhdcp.h b/include/video/nvhdcp.h
new file mode 100644
index 000000000000..f282ff8caa99
--- /dev/null
+++ b/include/video/nvhdcp.h
@@ -0,0 +1,91 @@
+/*
+ * include/video/nvhdcp.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_NVHDCP_H_
+#define _LINUX_NVHDCP_H_
+
+#include <linux/fb.h>
+#include <linux/types.h>
+#include <asm/ioctl.h>
+
+/* maximum receivers and repeaters connected at a time */
+#define TEGRA_NVHDCP_MAX_DEVS 127
+
+/* values for value_flags */
+#define TEGRA_NVHDCP_FLAG_AN 0x0001
+#define TEGRA_NVHDCP_FLAG_AKSV 0x0002
+#define TEGRA_NVHDCP_FLAG_BKSV 0x0004
+#define TEGRA_NVHDCP_FLAG_BSTATUS 0x0008 /* repeater status */
+#define TEGRA_NVHDCP_FLAG_CN 0x0010 /* c_n */
+#define TEGRA_NVHDCP_FLAG_CKSV 0x0020 /* c_ksv */
+#define TEGRA_NVHDCP_FLAG_DKSV 0x0040 /* d_ksv */
+#define TEGRA_NVHDCP_FLAG_KP 0x0080 /* k_prime */
+#define TEGRA_NVHDCP_FLAG_S 0x0100 /* hdcp_status */
+#define TEGRA_NVHDCP_FLAG_CS 0x0200 /* connection state */
+#define TEGRA_NVHDCP_FLAG_V 0x0400
+#define TEGRA_NVHDCP_FLAG_MP 0x0800
+#define TEGRA_NVHDCP_FLAG_BKSVLIST 0x1000
+
+/* values for packet_results */
+#define TEGRA_NVHDCP_RESULT_SUCCESS 0
+#define TEGRA_NVHDCP_RESULT_UNSUCCESSFUL 1
+#define TEGRA_NVHDCP_RESULT_PENDING 0x103
+#define TEGRA_NVHDCP_RESULT_LINK_FAILED 0xc0000013
+/* TODO: replace with -EINVAL */
+#define TEGRA_NVHDCP_RESULT_INVALID_PARAMETER 0xc000000d
+#define TEGRA_NVHDCP_RESULT_INVALID_PARAMETER_MIX 0xc0000030
+/* TODO: replace with -ENOMEM */
+#define TEGRA_NVHDCP_RESULT_NO_MEMORY 0xc0000017
+
+struct tegra_nvhdcp_packet {
+ __u32 value_flags; // (IN/OUT)
+ __u32 packet_results; // (OUT)
+
+ __u64 c_n; // (IN) upstream exchange number
+ __u64 c_ksv; // (IN)
+
+ __u32 b_status; // (OUT) link/repeater status
+ __u64 hdcp_status; // (OUT) READ_S
+ __u64 cs; // (OUT) Connection State
+
+ __u64 k_prime; // (OUT)
+ __u64 a_n; // (OUT)
+ __u64 a_ksv; // (OUT)
+ __u64 b_ksv; // (OUT)
+ __u64 d_ksv; // (OUT)
+ __u8 v_prime[20]; // (OUT) 160-bit
+ __u64 m_prime; // (OUT)
+
+ // (OUT) Valid KSVs in the bKsvList. Maximum is 127 devices
+ __u32 num_bksv_list;
+
+ // (OUT) Up to 127 receivers & repeaters
+ __u64 bksv_list[TEGRA_NVHDCP_MAX_DEVS];
+};
+
+/* parameters to TEGRAIO_NVHDCP_SET_POLICY */
+#define TEGRA_NVHDCP_POLICY_ON_DEMAND 0
+#define TEGRA_NVHDCP_POLICY_ALWAYS_ON 1
+
+/* ioctls */
+#define TEGRAIO_NVHDCP_ON _IO('F', 0x70)
+#define TEGRAIO_NVHDCP_OFF _IO('F', 0x71)
+#define TEGRAIO_NVHDCP_SET_POLICY _IOW('F', 0x72, __u32)
+#define TEGRAIO_NVHDCP_READ_M _IOWR('F', 0x73, struct tegra_nvhdcp_packet)
+#define TEGRAIO_NVHDCP_READ_S _IOWR('F', 0x74, struct tegra_nvhdcp_packet)
+#define TEGRAIO_NVHDCP_RENEGOTIATE _IO('F', 0x75)
+
+#endif
diff --git a/include/video/tegra_dc_ext.h b/include/video/tegra_dc_ext.h
new file mode 100644
index 000000000000..7f504be1346a
--- /dev/null
+++ b/include/video/tegra_dc_ext.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ * Some code based on fbdev extensions written by:
+ * Erik Gilling <konkers@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_DC_EXT_H
+#define __TEGRA_DC_EXT_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#if defined(__KERNEL__)
+# include <linux/time.h>
+#else
+# include <time.h>
+# include <unistd.h>
+#endif
+
+#define TEGRA_DC_EXT_FMT_P1 0
+#define TEGRA_DC_EXT_FMT_P2 1
+#define TEGRA_DC_EXT_FMT_P4 2
+#define TEGRA_DC_EXT_FMT_P8 3
+#define TEGRA_DC_EXT_FMT_B4G4R4A4 4
+#define TEGRA_DC_EXT_FMT_B5G5R5A 5
+#define TEGRA_DC_EXT_FMT_B5G6R5 6
+#define TEGRA_DC_EXT_FMT_AB5G5R5 7
+#define TEGRA_DC_EXT_FMT_B8G8R8A8 12
+#define TEGRA_DC_EXT_FMT_R8G8B8A8 13
+#define TEGRA_DC_EXT_FMT_B6x2G6x2R6x2A8 14
+#define TEGRA_DC_EXT_FMT_R6x2G6x2B6x2A8 15
+#define TEGRA_DC_EXT_FMT_YCbCr422 16
+#define TEGRA_DC_EXT_FMT_YUV422 17
+#define TEGRA_DC_EXT_FMT_YCbCr420P 18
+#define TEGRA_DC_EXT_FMT_YUV420P 19
+#define TEGRA_DC_EXT_FMT_YCbCr422P 20
+#define TEGRA_DC_EXT_FMT_YUV422P 21
+#define TEGRA_DC_EXT_FMT_YCbCr422R 22
+#define TEGRA_DC_EXT_FMT_YUV422R 23
+#define TEGRA_DC_EXT_FMT_YCbCr422RA 24
+#define TEGRA_DC_EXT_FMT_YUV422RA 25
+
+#define TEGRA_DC_EXT_BLEND_NONE 0
+#define TEGRA_DC_EXT_BLEND_PREMULT 1
+#define TEGRA_DC_EXT_BLEND_COVERAGE 2
+
+#define TEGRA_DC_EXT_FLIP_FLAG_INVERT_H (1 << 0)
+#define TEGRA_DC_EXT_FLIP_FLAG_INVERT_V (1 << 1)
+#define TEGRA_DC_EXT_FLIP_FLAG_TILED (1 << 2)
+
+struct tegra_dc_ext_flip_windowattr {
+ __s32 index;
+ __u32 buff_id;
+ __u32 blend;
+ __u32 offset;
+ __u32 offset_u;
+ __u32 offset_v;
+ __u32 stride;
+ __u32 stride_uv;
+ __u32 pixformat;
+ /*
+ * x, y, w, h are fixed-point: 20 bits of integer (MSB) and 12 bits of
+ * fractional (LSB)
+ */
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+ __u32 out_x;
+ __u32 out_y;
+ __u32 out_w;
+ __u32 out_h;
+ __u32 z;
+ __u32 swap_interval;
+ struct timespec timestamp;
+ __u32 pre_syncpt_id;
+ __u32 pre_syncpt_val;
+ /* These two are optional; if zero, U and V are taken from buff_id */
+ __u32 buff_id_u;
+ __u32 buff_id_v;
+ __u32 flags;
+ /* Leave some wiggle room for future expansion */
+ __u32 pad[5];
+};
+
+#define TEGRA_DC_EXT_FLIP_N_WINDOWS 3
+
+struct tegra_dc_ext_flip {
+ struct tegra_dc_ext_flip_windowattr win[TEGRA_DC_EXT_FLIP_N_WINDOWS];
+ __u32 post_syncpt_id;
+ __u32 post_syncpt_val;
+};
+
+/*
+ * Cursor image format:
+ * - Tegra hardware supports two colors: foreground and background, specified
+ * by the client in RGB8.
+ * - The image should be specified as two 1bpp bitmaps immediately following
+ * each other in memory. Each pixel in the final cursor will be constructed
+ * from the bitmaps with the following logic:
+ * bitmap1 bitmap0
+ * (mask) (color)
+ * 1 0 transparent
+ * 1 1 inverted
+ * 0 0 background color
+ * 0 1 foreground color
+ * - Exactly one of the SIZE flags must be specified.
+ */
+#define TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 1
+#define TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64 2
+struct tegra_dc_ext_cursor_image {
+ struct {
+ __u8 r;
+ __u8 g;
+ __u8 b;
+ } foreground, background;
+ __u32 buff_id;
+ __u32 flags;
+};
+
+/* Possible flags for struct nvdc_cursor's flags field */
+#define TEGRA_DC_EXT_CURSOR_FLAGS_VISIBLE 1
+
+struct tegra_dc_ext_cursor {
+ __s16 x;
+ __s16 y;
+ __u32 flags;
+};
+
+/*
+ * Color conversion is performed as follows:
+ *
+ * r = sat(kyrgb * sat(y + yof) + kur * u + kvr * v)
+ * g = sat(kyrgb * sat(y + yof) + kug * u + kvg * v)
+ * b = sat(kyrgb * sat(y + yof) + kub * u + kvb * v)
+ *
+ * Coefficients should be specified as fixed-point values; the exact format
+ * varies for each coefficient.
+ * The format for each coefficient is listed below with the syntax:
+ * - A "s." prefix means that the coefficient has a sign bit (twos complement).
+ * - The first number is the number of bits in the integer component (not
+ * including the optional sign bit).
+ * - The second number is the number of bits in the fractional component.
+ *
+ * All three fields should be tightly packed, justified to the LSB of the
+ * 16-bit value. For example, the "s.2.8" value should be packed as:
+ * (MSB) 5 bits of 0, 1 bit of sign, 2 bits of integer, 8 bits of frac (LSB)
+ */
+struct tegra_dc_ext_csc {
+ __u32 win_index;
+ __u16 yof; /* s.7.0 */
+ __u16 kyrgb; /* 2.8 */
+ __u16 kur; /* s.2.8 */
+ __u16 kvr; /* s.2.8 */
+ __u16 kug; /* s.1.8 */
+ __u16 kvg; /* s.1.8 */
+ __u16 kub; /* s.2.8 */
+ __u16 kvb; /* s.2.8 */
+};
+
+/*
+ * RGB Lookup table
+ *
+ * In true-color and YUV modes this is used for post-CSC RGB->RGB lookup, i.e.
+ * gamma-correction. In palette-indexed RGB modes, this table designates the
+ * mode's color palette.
+ *
+ * To convert 8-bit per channel RGB values to 16-bit, duplicate the 8 bits
+ * in low and high byte, e.g. r=r|(r<<8)
+ *
+ * To just update flags, set len to 0.
+ *
+ * Current Tegra DC hardware supports 8-bit per channel to 8-bit per channel,
+ * and each hardware window (overlay) uses its own lookup table.
+ *
+ */
+struct tegra_dc_ext_lut {
+ __u32 win_index; /* window index to set lut for */
+ __u32 flags; /* Flag bitmask, see TEGRA_DC_EXT_LUT_FLAGS_* */
+ __u32 start; /* start index to update lut from */
+ __u32 len; /* number of valid lut entries */
+ __u16 *r; /* array of 16-bit red values, 0 to reset */
+ __u16 *g; /* array of 16-bit green values, 0 to reset */
+ __u16 *b; /* array of 16-bit blue values, 0 to reset */
+};
+
+/* tegra_dc_ext_lut.flags - override global fb device lookup table.
+ * Default behaviour is double-lookup.
+ */
+#define TEGRA_DC_EXT_LUT_FLAGS_FBOVERRIDE 0x01
+
+#define TEGRA_DC_EXT_FLAGS_ENABLED 1
+struct tegra_dc_ext_status {
+ __u32 flags;
+ /* Leave some wiggle room for future expansion */
+ __u32 pad[3];
+};
+
+#define TEGRA_DC_EXT_SET_NVMAP_FD \
+ _IOW('D', 0x00, __s32)
+
+#define TEGRA_DC_EXT_GET_WINDOW \
+ _IOW('D', 0x01, __u32)
+#define TEGRA_DC_EXT_PUT_WINDOW \
+ _IOW('D', 0x02, __u32)
+
+#define TEGRA_DC_EXT_FLIP \
+ _IOWR('D', 0x03, struct tegra_dc_ext_flip)
+
+#define TEGRA_DC_EXT_GET_CURSOR \
+ _IO('D', 0x04)
+#define TEGRA_DC_EXT_PUT_CURSOR \
+ _IO('D', 0x05)
+#define TEGRA_DC_EXT_SET_CURSOR_IMAGE \
+ _IOW('D', 0x06, struct tegra_dc_ext_cursor_image)
+#define TEGRA_DC_EXT_SET_CURSOR \
+ _IOW('D', 0x07, struct tegra_dc_ext_cursor)
+
+#define TEGRA_DC_EXT_SET_CSC \
+ _IOW('D', 0x08, struct tegra_dc_ext_csc)
+
+#define TEGRA_DC_EXT_GET_STATUS \
+ _IOR('D', 0x09, struct tegra_dc_ext_status)
+
+/*
+ * Returns the auto-incrementing vblank syncpoint for the head associated with
+ * this device node
+ */
+#define TEGRA_DC_EXT_GET_VBLANK_SYNCPT \
+ _IOR('D', 0x09, __u32)
+
+#define TEGRA_DC_EXT_SET_LUT \
+ _IOW('D', 0x0A, struct tegra_dc_ext_lut)
+
+enum tegra_dc_ext_control_output_type {
+ TEGRA_DC_EXT_DSI,
+ TEGRA_DC_EXT_LVDS,
+ TEGRA_DC_EXT_VGA,
+ TEGRA_DC_EXT_HDMI,
+ TEGRA_DC_EXT_DVI,
+};
+
+/*
+ * Get the properties for a given output.
+ *
+ * handle (in): Which output to query
+ * type (out): Describes the type of the output
+ * connected (out): Non-zero iff the output is currently connected
+ * associated_head (out): The head number that the output is currently
+ * bound to. -1 iff the output is not associated with any head.
+ * head_mask (out): Bitmask of which heads the output may be bound to (some
+ * outputs are permanently bound to a single head).
+ */
+struct tegra_dc_ext_control_output_properties {
+ __u32 handle;
+ enum tegra_dc_ext_control_output_type type;
+ __u32 connected;
+ __s32 associated_head;
+ __u32 head_mask;
+};
+
+/*
+ * This allows userspace to query the raw EDID data for the specified output
+ * handle.
+ *
+ * Here, the size parameter is both an input and an output:
+ * 1. Userspace passes in the size of the buffer allocated for data.
+ * 2. If size is too small, the call fails with the error EFBIG; otherwise, the
+ * raw EDID data is written to the buffer pointed to by data. In both
+ * cases, size will be filled in with the size of the data.
+ */
+struct tegra_dc_ext_control_output_edid {
+ __u32 handle;
+ __u32 size;
+ void *data;
+};
+
+struct tegra_dc_ext_event {
+ __u32 type;
+ ssize_t data_size;
+ char data[0];
+};
+
+#define TEGRA_DC_EXT_EVENT_HOTPLUG 0x1
+struct tegra_dc_ext_control_event_hotplug {
+ __u32 handle;
+};
+
+#define TEGRA_DC_EXT_CONTROL_GET_NUM_OUTPUTS \
+ _IOR('C', 0x00, __u32)
+#define TEGRA_DC_EXT_CONTROL_GET_OUTPUT_PROPERTIES \
+ _IOWR('C', 0x01, struct tegra_dc_ext_control_output_properties)
+#define TEGRA_DC_EXT_CONTROL_GET_OUTPUT_EDID \
+ _IOWR('C', 0x02, struct tegra_dc_ext_control_output_edid)
+#define TEGRA_DC_EXT_CONTROL_SET_EVENT_MASK \
+ _IOW('C', 0x03, __u32)
+
+#endif /* __TEGRA_DC_EXT_H */
diff --git a/include/video/tegrafb.h b/include/video/tegrafb.h
new file mode 100644
index 000000000000..919661b1a8e0
--- /dev/null
+++ b/include/video/tegrafb.h
@@ -0,0 +1,32 @@
+/*
+ * include/video/tegrafb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_TEGRAFB_H_
+#define _LINUX_TEGRAFB_H_
+
+#include <linux/fb.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct tegra_fb_modedb {
+ struct fb_var_screeninfo *modedb;
+ __u32 modedb_len;
+};
+
+#define FBIO_TEGRA_GET_MODEDB _IOWR('F', 0x42, struct tegra_fb_modedb)
+
+#endif
diff --git a/init/Kconfig b/init/Kconfig
index d62778390e55..6aad581f18e8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -915,6 +915,12 @@ config SYSCTL
config ANON_INODES
bool
+config PANIC_TIMEOUT
+ int "Default panic timeout"
+ default 0
+ help
+ Set default panic timeout.
+
menuconfig EXPERT
bool "Configure standard kernel features (expert users)"
# Unhide debug options, to make the on-by-default options visible
@@ -1088,6 +1094,15 @@ config SHMEM
option replaces shmem and tmpfs with the much simpler ramfs code,
which may be appropriate on small systems without swap.
+config ASHMEM
+ bool "Enable the Anonymous Shared Memory Subsystem"
+ default n
+ depends on SHMEM || TINY_SHMEM
+ help
+ The ashmem subsystem is a new shared memory allocator, similar to
+ POSIX SHM but with different behavior and sporting a simpler
+ file-based API.
+
config AIO
bool "Enable AIO support" if EXPERT
default y
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 1d2b6ceea95d..bab5a7911b84 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -270,6 +270,33 @@ static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
static void check_for_release(struct cgroup *cgrp);
+/*
+ * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
+ * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
+ * reference to css->refcnt. In general, this refcnt is expected to goes down
+ * to zero, soon.
+ *
+ * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
+ */
+DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
+
+static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
+{
+ if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
+ wake_up_all(&cgroup_rmdir_waitq);
+}
+
+void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
+{
+ css_get(css);
+}
+
+void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
+{
+ cgroup_wakeup_rmdir_waiter(css->cgroup);
+ css_put(css);
+}
+
/* Link structure for associating css_set objects with cgroups */
struct cg_cgroup_link {
/*
@@ -329,52 +356,43 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
return &css_set_table[index];
}
-/* We don't maintain the lists running through each css_set to its
- * task until after the first call to cgroup_iter_start(). This
- * reduces the fork()/exit() overhead for people who have cgroups
- * compiled into their kernel but not actually in use */
-static int use_task_css_set_links __read_mostly;
-
-static void __put_css_set(struct css_set *cg, int taskexit)
+static void free_css_set_work(struct work_struct *work)
{
+ struct css_set *cg = container_of(work, struct css_set, work);
struct cg_cgroup_link *link;
struct cg_cgroup_link *saved_link;
- /*
- * Ensure that the refcount doesn't hit zero while any readers
- * can see it. Similar to atomic_dec_and_lock(), but for an
- * rwlock
- */
- if (atomic_add_unless(&cg->refcount, -1, 1))
- return;
- write_lock(&css_set_lock);
- if (!atomic_dec_and_test(&cg->refcount)) {
- write_unlock(&css_set_lock);
- return;
- }
-
- /* This css_set is dead. unlink it and release cgroup refcounts */
- hlist_del(&cg->hlist);
- css_set_count--;
+ write_lock(&css_set_lock);
list_for_each_entry_safe(link, saved_link, &cg->cg_links,
cg_link_list) {
struct cgroup *cgrp = link->cgrp;
list_del(&link->cg_link_list);
list_del(&link->cgrp_link_list);
- if (atomic_dec_and_test(&cgrp->count) &&
- notify_on_release(cgrp)) {
- if (taskexit)
- set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ if (atomic_dec_and_test(&cgrp->count)) {
check_for_release(cgrp);
+ cgroup_wakeup_rmdir_waiter(cgrp);
}
-
kfree(link);
}
-
write_unlock(&css_set_lock);
- kfree_rcu(cg, rcu_head);
+
+ kfree(cg);
}
+static void free_css_set_rcu(struct rcu_head *obj)
+{
+ struct css_set *cg = container_of(obj, struct css_set, rcu_head);
+
+ INIT_WORK(&cg->work, free_css_set_work);
+ schedule_work(&cg->work);
+}
+
+/* We don't maintain the lists running through each css_set to its
+ * task until after the first call to cgroup_iter_start(). This
+ * reduces the fork()/exit() overhead for people who have cgroups
+ * compiled into their kernel but not actually in use */
+static int use_task_css_set_links __read_mostly;
+
/*
* refcounted get/put for css_set objects
*/
@@ -383,14 +401,26 @@ static inline void get_css_set(struct css_set *cg)
atomic_inc(&cg->refcount);
}
-static inline void put_css_set(struct css_set *cg)
+static void put_css_set(struct css_set *cg)
{
- __put_css_set(cg, 0);
-}
+ /*
+ * Ensure that the refcount doesn't hit zero while any readers
+ * can see it. Similar to atomic_dec_and_lock(), but for an
+ * rwlock
+ */
+ if (atomic_add_unless(&cg->refcount, -1, 1))
+ return;
+ write_lock(&css_set_lock);
+ if (!atomic_dec_and_test(&cg->refcount)) {
+ write_unlock(&css_set_lock);
+ return;
+ }
-static inline void put_css_set_taskexit(struct css_set *cg)
-{
- __put_css_set(cg, 1);
+ hlist_del(&cg->hlist);
+ css_set_count--;
+
+ write_unlock(&css_set_lock);
+ call_rcu(&cg->rcu_head, free_css_set_rcu);
}
/*
@@ -722,9 +752,9 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
* cgroup_attach_task(), which overwrites one tasks cgroup pointer with
* another. It does so using cgroup_mutex, however there are
* several performance critical places that need to reference
- * task->cgroup without the expense of grabbing a system global
+ * task->cgroups without the expense of grabbing a system global
* mutex. Therefore except as noted below, when dereferencing or, as
- * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
+ * in cgroup_attach_task(), modifying a task's cgroups pointer we use
* task_lock(), which acts on a spinlock (task->alloc_lock) already in
* the task_struct routinely used for such matters.
*
@@ -914,33 +944,6 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
}
/*
- * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
- * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
- * reference to css->refcnt. In general, this refcnt is expected to goes down
- * to zero, soon.
- *
- * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
- */
-DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
-
-static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
-{
- if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
- wake_up_all(&cgroup_rmdir_waitq);
-}
-
-void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
-{
- css_get(css);
-}
-
-void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
-{
- cgroup_wakeup_rmdir_waiter(css->cgroup);
- css_put(css);
-}
-
-/*
* Call with cgroup_mutex held. Drops reference counts on modules, including
* any duplicate ones that parse_cgroupfs_options took. If this function
* returns an error, no reference counts are touched.
@@ -1823,6 +1826,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
struct cgroup_subsys *ss, *failed_ss = NULL;
struct cgroup *oldcgrp;
struct cgroupfs_root *root = cgrp->root;
+ struct css_set *cg;
/* Nothing to do if the task is already in that cgroup */
oldcgrp = task_cgroup_from_root(tsk, root);
@@ -1852,6 +1856,11 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
}
}
+ task_lock(tsk);
+ cg = tsk->cgroups;
+ get_css_set(cg);
+ task_unlock(tsk);
+
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
if (retval)
goto out;
@@ -1864,8 +1873,9 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
if (ss->attach)
ss->attach(ss, cgrp, oldcgrp, tsk);
}
-
- synchronize_rcu();
+ set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ /* put_css_set will not destroy cg until after an RCU grace period */
+ put_css_set(cg);
/*
* wake up rmdir() waiter. the rmdir should fail since the cgroup
@@ -2192,6 +2202,24 @@ out_free_group_list:
return retval;
}
+static int cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk)
+{
+ struct cgroup_subsys *ss;
+ int ret;
+
+ for_each_subsys(cgrp->root, ss) {
+ if (ss->allow_attach) {
+ ret = ss->allow_attach(cgrp, tsk);
+ if (ret)
+ return ret;
+ } else {
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
/*
* Find the task_struct of the task to attach by vpid and pass it along to the
* function to attach either it or all tasks in its threadgroup. Will take
@@ -2237,9 +2265,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
if (cred->euid &&
cred->euid != tcred->uid &&
cred->euid != tcred->suid) {
- rcu_read_unlock();
- cgroup_unlock();
- return -EACCES;
+ /*
+ * if the default permission check fails, give each
+ * cgroup a chance to extend the permission check
+ */
+ ret = cgroup_allow_attach(cgrp, tsk);
+ if (ret) {
+ rcu_read_unlock();
+ cgroup_unlock();
+ return ret;
+ }
}
get_task_struct(tsk);
rcu_read_unlock();
@@ -3814,6 +3849,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
if (err < 0)
goto err_remove;
+ set_bit(CGRP_RELEASABLE, &parent->flags);
+
/* The cgroup directory was pre-locked for us */
BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
@@ -3945,6 +3982,21 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp)
return !failed;
}
+/* checks if all of the css_sets attached to a cgroup have a refcount of 0.
+ * Must be called with css_set_lock held */
+static int cgroup_css_sets_empty(struct cgroup *cgrp)
+{
+ struct cg_cgroup_link *link;
+
+ list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
+ struct css_set *cg = link->cg;
+ if (atomic_read(&cg->refcount) > 0)
+ return 0;
+ }
+
+ return 1;
+}
+
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
{
struct cgroup *cgrp = dentry->d_fsdata;
@@ -3957,7 +4009,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
/* the vfs holds both inode->i_mutex already */
again:
mutex_lock(&cgroup_mutex);
- if (atomic_read(&cgrp->count) != 0) {
+ if (!cgroup_css_sets_empty(cgrp)) {
mutex_unlock(&cgroup_mutex);
return -EBUSY;
}
@@ -3990,7 +4042,7 @@ again:
mutex_lock(&cgroup_mutex);
parent = cgrp->parent;
- if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
+ if (!cgroup_css_sets_empty(cgrp) || !list_empty(&cgrp->children)) {
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
mutex_unlock(&cgroup_mutex);
return -EBUSY;
@@ -4030,7 +4082,6 @@ again:
cgroup_d_remove_dir(d);
dput(d);
- set_bit(CGRP_RELEASABLE, &parent->flags);
check_for_release(parent);
/*
@@ -4630,7 +4681,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
task_unlock(tsk);
if (cg)
- put_css_set_taskexit(cg);
+ put_css_set(cg);
}
/**
@@ -4684,6 +4735,14 @@ static void check_for_release(struct cgroup *cgrp)
}
/* Caller must verify that the css is not for root cgroup */
+void __css_get(struct cgroup_subsys_state *css, int count)
+{
+ atomic_add(count, &css->refcnt);
+ set_bit(CGRP_RELEASABLE, &css->cgroup->flags);
+}
+EXPORT_SYMBOL_GPL(__css_get);
+
+/* Caller must verify that the css is not for root cgroup */
void __css_put(struct cgroup_subsys_state *css, int count)
{
struct cgroup *cgrp = css->cgroup;
@@ -4691,10 +4750,7 @@ void __css_put(struct cgroup_subsys_state *css, int count)
rcu_read_lock();
val = atomic_sub_return(count, &css->refcnt);
if (val == 1) {
- if (notify_on_release(cgrp)) {
- set_bit(CGRP_RELEASABLE, &cgrp->flags);
- check_for_release(cgrp);
- }
+ check_for_release(cgrp);
cgroup_wakeup_rmdir_waiter(cgrp);
}
rcu_read_unlock();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 12b7458f23b1..404770761a4e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -594,3 +594,23 @@ void init_cpu_online(const struct cpumask *src)
{
cpumask_copy(to_cpumask(cpu_online_bits), src);
}
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+ atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
diff --git a/kernel/fork.c b/kernel/fork.c
index 8e6b6f4fb272..f65fa0627c04 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -153,6 +153,9 @@ struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
+/* Notifier list called when a task struct is freed */
+static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
+
static void account_kernel_stack(struct thread_info *ti, int account)
{
struct zone *zone = page_zone(virt_to_page(ti));
@@ -184,6 +187,18 @@ static inline void put_signal_struct(struct signal_struct *sig)
free_signal_struct(sig);
}
+int task_free_register(struct notifier_block *n)
+{
+ return atomic_notifier_chain_register(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_register);
+
+int task_free_unregister(struct notifier_block *n)
+{
+ return atomic_notifier_chain_unregister(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_unregister);
+
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
@@ -194,6 +209,7 @@ void __put_task_struct(struct task_struct *tsk)
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
+ atomic_notifier_call_chain(&task_free_notifier, 0, tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index a92028196cc1..824b741925bb 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -35,7 +35,7 @@ config GCOV_KERNEL
config GCOV_PROFILE_ALL
bool "Profile entire Kernel"
depends on GCOV_KERNEL
- depends on SUPERH || S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE
+ depends on SUPERH || S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE || ARM
default n
---help---
This options activates profiling for the entire kernel.
@@ -46,4 +46,10 @@ config GCOV_PROFILE_ALL
larger and run slower. Also be sure to exclude files from profiling
which are not linked to the kernel image to prevent linker errors.
+config GCOV_CTORS
+ string
+ depends on CONSTRUCTORS
+ default ".init_array" if ARM && AEABI
+ default ".ctors"
+
endmenu
diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c
index ae5bb4260033..d753d1152b7b 100644
--- a/kernel/gcov/gcc_3_4.c
+++ b/kernel/gcov/gcc_3_4.c
@@ -297,16 +297,30 @@ void gcov_iter_start(struct gcov_iterator *iter)
}
/* Mapping of logical record number to actual file content. */
-#define RECORD_FILE_MAGIC 0
-#define RECORD_GCOV_VERSION 1
-#define RECORD_TIME_STAMP 2
-#define RECORD_FUNCTION_TAG 3
-#define RECORD_FUNCTON_TAG_LEN 4
-#define RECORD_FUNCTION_IDENT 5
-#define RECORD_FUNCTION_CHECK 6
-#define RECORD_COUNT_TAG 7
-#define RECORD_COUNT_LEN 8
-#define RECORD_COUNT 9
+#define RECORD_FILE_MAGIC 0
+#define RECORD_GCOV_VERSION 1
+#define RECORD_TIME_STAMP 2
+#define RECORD_FUNCTION_TAG 3
+#define RECORD_FUNCTON_TAG_LEN 4
+#define RECORD_FUNCTION_IDENT 5
+#define RECORD_FUNCTION_CHECK_LINE 6
+#define RECORD_FUNCTION_CHECK_CFG 7
+#define RECORD_FUNCTION_NAME_LEN 8
+#define RECORD_FUNCTION_NAME 9
+#define RECORD_COUNT_TAG 10
+#define RECORD_COUNT_LEN 11
+#define RECORD_COUNT 12
+
+/* Return length of string encoded in GCOV format. */
+static size_t
+sizeof_str(const char *str)
+{
+ size_t len;
+ len = (str) ? strlen(str) : 0;
+ if (len == 0)
+ return 1;
+ return 1 + ((len + 4) >> 2);
+}
/**
* gcov_iter_next - advance file iterator to next logical record
@@ -323,6 +337,9 @@ int gcov_iter_next(struct gcov_iterator *iter)
case RECORD_FUNCTON_TAG_LEN:
case RECORD_FUNCTION_IDENT:
case RECORD_COUNT_TAG:
+ case RECORD_FUNCTION_CHECK_LINE:
+ case RECORD_FUNCTION_CHECK_CFG:
+ case RECORD_FUNCTION_NAME_LEN:
/* Advance to next record */
iter->record++;
break;
@@ -332,7 +349,7 @@ int gcov_iter_next(struct gcov_iterator *iter)
/* fall through */
case RECORD_COUNT_LEN:
if (iter->count < get_func(iter)->n_ctrs[iter->type]) {
- iter->record = 9;
+ iter->record = 12;
break;
}
/* Advance to next counter type */
@@ -340,9 +357,9 @@ int gcov_iter_next(struct gcov_iterator *iter)
iter->count = 0;
iter->type++;
/* fall through */
- case RECORD_FUNCTION_CHECK:
+ case RECORD_FUNCTION_NAME:
if (iter->type < iter->num_types) {
- iter->record = 7;
+ iter->record = 10;
break;
}
/* Advance to next function */
@@ -395,6 +412,34 @@ static int seq_write_gcov_u64(struct seq_file *seq, u64 v)
data[1] = (v >> 32);
return seq_write(seq, data, sizeof(data));
}
+/**
+ * seq_write_gcov_str - write string in gcov format to seq_file
+ * @seq: seq_file handle
+ * @str: string to be stored
+ *
+ * Number format defined by gcc: numbers are recorded in the 32 bit
+ * unsigned binary form of the endianness of the machine generating the
+ * file. 64 bit numbers are stored as two 32 bit numbers, the low part
+ * first.
+ */
+static int seq_write_gcov_str(struct seq_file *seq, const char *str)
+{
+ if (str) {
+ size_t len;
+ int str_off;
+ u32 data;
+ len = strlen(str);
+ for (str_off = 0; str_off < (sizeof_str(str) - 2) ; str_off++) {
+ memcpy(&data, (str + str_off * 4), 4);
+ seq_write(seq, &data, sizeof(data));
+ }
+ data = 0;
+ memcpy(&data, (str + str_off * 4), (len - str_off * 4));
+ return seq_write(seq, &data, sizeof(data));
+ } else {
+ return 0;
+ }
+}
/**
* gcov_iter_write - write data for current pos to seq_file
@@ -421,13 +466,24 @@ int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION);
break;
case RECORD_FUNCTON_TAG_LEN:
- rc = seq_write_gcov_u32(seq, 2);
+ rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION_LENGTH +
+ (sizeof_str(get_func(iter)->name)));
break;
case RECORD_FUNCTION_IDENT:
rc = seq_write_gcov_u32(seq, get_func(iter)->ident);
break;
- case RECORD_FUNCTION_CHECK:
- rc = seq_write_gcov_u32(seq, get_func(iter)->checksum);
+ case RECORD_FUNCTION_CHECK_LINE:
+ rc = seq_write_gcov_u32(seq, get_func(iter)->lineno_checksum);
+ break;
+ case RECORD_FUNCTION_CHECK_CFG:
+ rc = seq_write_gcov_u32(seq, get_func(iter)->cfg_checksum);
+ break;
+ case RECORD_FUNCTION_NAME_LEN:
+ rc = seq_write_gcov_u32(seq,
+ (sizeof_str(get_func(iter)->name) - 1));
+ break;
+ case RECORD_FUNCTION_NAME:
+ rc = seq_write_gcov_str(seq, get_func(iter)->name);
break;
case RECORD_COUNT_TAG:
rc = seq_write_gcov_u32(seq,
diff --git a/kernel/gcov/gcov.h b/kernel/gcov/gcov.h
index 060073ebf7a6..040c6980df0d 100644
--- a/kernel/gcov/gcov.h
+++ b/kernel/gcov/gcov.h
@@ -21,9 +21,10 @@
* gcc and need to be kept as close to the original definition as possible to
* remain compatible.
*/
-#define GCOV_COUNTERS 5
+#define GCOV_COUNTERS 10
#define GCOV_DATA_MAGIC ((unsigned int) 0x67636461)
#define GCOV_TAG_FUNCTION ((unsigned int) 0x01000000)
+#define GCOV_TAG_FUNCTION_LENGTH 3
#define GCOV_TAG_COUNTER_BASE ((unsigned int) 0x01a10000)
#define GCOV_TAG_FOR_COUNTER(count) \
(GCOV_TAG_COUNTER_BASE + ((unsigned int) (count) << 17))
@@ -34,10 +35,38 @@ typedef long gcov_type;
typedef long long gcov_type;
#endif
+/*
+ * Source module info. The data structure is used in both runtime and
+ * profile-use phase.
+ */
+struct gcov_module_info {
+ unsigned int ident;
+/*
+ * This is overloaded to mean two things:
+ * (1) means FDO/LIPO in instrumented binary.
+ * (2) means IS_PRIMARY in persistent file or memory copy used in profile-use.
+ */
+ unsigned int is_primary;
+ unsigned int is_exported;
+ unsigned int lang;
+ char *da_filename;
+ char *source_filename;
+ unsigned int num_quote_paths;
+ unsigned int num_bracket_paths;
+ unsigned int num_cpp_defines;
+ unsigned int num_cpp_includes;
+ unsigned int num_cl_args;
+ char *string_array[1];
+};
+
+
/**
* struct gcov_fn_info - profiling meta data per function
* @ident: object file-unique function identifier
- * @checksum: function checksum
+ * @lineno_checksum: function lineno checksum
+ * @cfg_checksum: function cfg checksum
+ * @dc_offset: direct call offset
+ * @name: function name
* @n_ctrs: number of values per counter type belonging to this function
*
* This data is generated by gcc during compilation and doesn't change
@@ -45,7 +74,10 @@ typedef long long gcov_type;
*/
struct gcov_fn_info {
unsigned int ident;
- unsigned int checksum;
+ unsigned int lineno_checksum;
+ unsigned int cfg_checksum;
+ unsigned int dc_offset;
+ const char *name;
unsigned int n_ctrs[0];
};
@@ -67,9 +99,11 @@ struct gcov_ctr_info {
/**
* struct gcov_info - profiling data per object file
* @version: gcov version magic indicating the gcc version used for compilation
+ * @modinfo: additional module information
* @next: list head for a singly-linked list
* @stamp: time stamp
* @filename: name of the associated gcov data file
+ * @eof_pos: end position of profile data
* @n_functions: number of instrumented functions
* @functions: function data
* @ctr_mask: mask specifying which counter types are active
@@ -80,9 +114,11 @@ struct gcov_ctr_info {
*/
struct gcov_info {
unsigned int version;
+ struct gcov_module_info *mod_info;
struct gcov_info *next;
unsigned int stamp;
const char *filename;
+ unsigned int eof_pos;
unsigned int n_functions;
const struct gcov_fn_info *functions;
unsigned int ctr_mask;
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 15e53b1766a6..fe4b09cf829c 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -104,8 +104,13 @@ int check_wakeup_irqs(void)
for_each_irq_desc(irq, desc) {
if (irqd_is_wakeup_set(&desc->irq_data)) {
- if (desc->istate & IRQS_PENDING)
+ if (desc->istate & IRQS_PENDING) {
+ pr_info("Wakeup IRQ %d %s pending, suspend aborted\n",
+ irq,
+ desc->action && desc->action->name ?
+ desc->action->name : "");
return -EBUSY;
+ }
continue;
}
/*
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 14dd5761e8c9..ef60772d2feb 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -55,17 +55,18 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq)
{
- /*
- * We do not resend level type interrupts. Level type
- * interrupts are resent by hardware when they are still
- * active.
- */
- if (irq_settings_is_level(desc))
- return;
- if (desc->istate & IRQS_REPLAY)
- return;
if (desc->istate & IRQS_PENDING) {
desc->istate &= ~IRQS_PENDING;
+ /*
+ * We do not resend level type interrupts. Level type
+ * interrupts are resent by hardware when they are still
+ * active.
+ */
+ if (irq_settings_is_level(desc))
+ return;
+ if (desc->istate & IRQS_REPLAY)
+ return;
+
desc->istate |= IRQS_REPLAY;
if (!desc->irq_data.chip->irq_retrigger ||
diff --git a/kernel/module.c b/kernel/module.c
index 04379f92f843..e0ddcece2be4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2528,7 +2528,7 @@ static void find_module_sections(struct module *mod, struct load_info *info)
mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
#endif
#ifdef CONFIG_CONSTRUCTORS
- mod->ctors = section_objs(info, ".ctors",
+ mod->ctors = section_objs(info, CONFIG_GCOV_CTORS,
sizeof(*mod->ctors), &mod->num_ctors);
#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index d7bb6974efb5..41fc78ea3db9 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,13 +27,19 @@
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
+/* Machine specific panic information string */
+char *mach_panic_string;
+
int panic_on_oops;
static unsigned long tainted_mask;
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
-int panic_timeout;
+#ifndef CONFIG_PANIC_TIMEOUT
+#define CONFIG_PANIC_TIMEOUT 0
+#endif
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
@@ -344,6 +350,11 @@ late_initcall(init_oops_id);
void print_oops_end_marker(void)
{
init_oops_id();
+
+ if (mach_panic_string)
+ printk(KERN_WARNING "Board Information: %s\n",
+ mach_panic_string);
+
printk(KERN_WARNING "---[ end trace %016llx ]---\n",
(unsigned long long)oops_id);
}
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 37f05d0f0793..06e74202a8b9 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -103,12 +103,23 @@ static struct pm_qos_object network_throughput_pm_qos = {
.type = PM_QOS_MAX,
};
+static BLOCKING_NOTIFIER_HEAD(max_online_cpus_notifier);
+static struct pm_qos_object max_online_cpus_pm_qos = {
+ .requests = PLIST_HEAD_INIT(max_online_cpus_pm_qos.requests),
+ .notifiers = &max_online_cpus_notifier,
+ .name = "max_online_cpus",
+ .target_value = PM_QOS_MAX_ONLINE_CPUS_DEFAULT_VALUE,
+ .default_value = PM_QOS_MAX_ONLINE_CPUS_DEFAULT_VALUE,
+ .type = PM_QOS_MIN,
+};
+
static struct pm_qos_object *pm_qos_array[] = {
&null_pm_qos,
&cpu_dma_pm_qos,
&network_lat_pm_qos,
- &network_throughput_pm_qos
+ &network_throughput_pm_qos,
+ &max_online_cpus_pm_qos
};
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
@@ -459,21 +470,18 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
static int __init pm_qos_power_init(void)
{
int ret = 0;
+ int i;
- ret = register_pm_qos_misc(&cpu_dma_pm_qos);
- if (ret < 0) {
- printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
- return ret;
- }
- ret = register_pm_qos_misc(&network_lat_pm_qos);
- if (ret < 0) {
- printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
- return ret;
+ BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
+
+ for (i = 1; i < PM_QOS_NUM_CLASSES; i++) {
+ ret = register_pm_qos_misc(pm_qos_array[i]);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: %s setup failed\n",
+ pm_qos_array[i]->name);
+ return ret;
+ }
}
- ret = register_pm_qos_misc(&network_throughput_pm_qos);
- if (ret < 0)
- printk(KERN_ERR
- "pm_qos_param: network_throughput setup failed\n");
return ret;
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 3744c594b19b..fcf5a834c4ec 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,73 @@ config SUSPEND_FREEZER
Turning OFF this setting is NOT recommended! If in doubt, say Y.
+config HAS_WAKELOCK
+ bool
+
+config HAS_EARLYSUSPEND
+ bool
+
+config WAKELOCK
+ bool "Wake lock"
+ depends on PM && RTC_CLASS
+ default n
+ select HAS_WAKELOCK
+ ---help---
+ Enable wakelocks. When user space request a sleep state the
+ sleep request will be delayed until no wake locks are held.
+
+config WAKELOCK_STAT
+ bool "Wake lock stats"
+ depends on WAKELOCK
+ default y
+ ---help---
+ Report wake lock stats in /proc/wakelocks
+
+config USER_WAKELOCK
+ bool "Userspace wake locks"
+ depends on WAKELOCK
+ default y
+ ---help---
+ User-space wake lock api. Write "lockname" or "lockname timeout"
+ to /sys/power/wake_lock lock and if needed create a wake lock.
+ Write "lockname" to /sys/power/wake_unlock to unlock a user wake
+ lock.
+
+config EARLYSUSPEND
+ bool "Early suspend"
+ depends on WAKELOCK
+ default y
+ select HAS_EARLYSUSPEND
+ ---help---
+ Call early suspend handlers when the user requested sleep state
+ changes.
+
+choice
+ prompt "User-space screen access"
+ default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE
+ default CONSOLE_EARLYSUSPEND
+ depends on HAS_EARLYSUSPEND
+
+ config NO_USER_SPACE_SCREEN_ACCESS_CONTROL
+ bool "None"
+
+ config CONSOLE_EARLYSUSPEND
+ bool "Console switch on early-suspend"
+ depends on HAS_EARLYSUSPEND && VT
+ ---help---
+ Register early suspend handler to perform a console switch to
+ when user-space should stop drawing to the screen and a switch
+ back when it should resume.
+
+ config FB_EARLYSUSPEND
+ bool "Sysfs interface"
+ depends on HAS_EARLYSUSPEND
+ ---help---
+ Register early suspend handler that notifies and waits for
+ user-space through sysfs when user-space should stop drawing
+ to the screen and notifies user-space when it should resume.
+endchoice
+
config HIBERNATE_CALLBACKS
bool
@@ -235,3 +302,10 @@ config PM_GENERIC_DOMAINS
config PM_GENERIC_DOMAINS_RUNTIME
def_bool y
depends on PM_RUNTIME && PM_GENERIC_DOMAINS
+
+config SUSPEND_TIME
+ bool "Log time spent in suspend"
+ ---help---
+ Prints the time spent in suspend in the kernel log, and
+ keeps statistics on the time spent in suspend in
+ /sys/kernel/debug/suspend_time
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index c5ebc6a90643..9b224e16b191 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -8,5 +8,11 @@ obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
block_io.o
+obj-$(CONFIG_WAKELOCK) += wakelock.o
+obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o
+obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o
+obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o
+obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o
+obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c
new file mode 100644
index 000000000000..a3edcb267389
--- /dev/null
+++ b/kernel/power/consoleearlysuspend.c
@@ -0,0 +1,78 @@
+/* kernel/power/consoleearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/earlysuspend.h>
+#include <linux/kbd_kern.h>
+#include <linux/module.h>
+#include <linux/vt_kern.h>
+#include <linux/wait.h>
+
+#define EARLY_SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
+
+static int orig_fgconsole;
+static void console_early_suspend(struct early_suspend *h)
+{
+ acquire_console_sem();
+ orig_fgconsole = fg_console;
+ if (vc_allocate(EARLY_SUSPEND_CONSOLE))
+ goto err;
+ if (set_console(EARLY_SUSPEND_CONSOLE))
+ goto err;
+ release_console_sem();
+
+ if (vt_waitactive(EARLY_SUSPEND_CONSOLE + 1))
+ pr_warning("console_early_suspend: Can't switch VCs.\n");
+ return;
+err:
+ pr_warning("console_early_suspend: Can't set console\n");
+ release_console_sem();
+}
+
+static void console_late_resume(struct early_suspend *h)
+{
+ int ret;
+ acquire_console_sem();
+ ret = set_console(orig_fgconsole);
+ release_console_sem();
+ if (ret) {
+ pr_warning("console_late_resume: Can't set console.\n");
+ return;
+ }
+
+ if (vt_waitactive(orig_fgconsole + 1))
+ pr_warning("console_late_resume: Can't switch VCs.\n");
+}
+
+static struct early_suspend console_early_suspend_desc = {
+ .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+ .suspend = console_early_suspend,
+ .resume = console_late_resume,
+};
+
+static int __init console_early_suspend_init(void)
+{
+ register_early_suspend(&console_early_suspend_desc);
+ return 0;
+}
+
+static void __exit console_early_suspend_exit(void)
+{
+ unregister_early_suspend(&console_early_suspend_desc);
+}
+
+module_init(console_early_suspend_init);
+module_exit(console_early_suspend_exit);
+
diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c
new file mode 100644
index 000000000000..b15f02eba45c
--- /dev/null
+++ b/kernel/power/earlysuspend.c
@@ -0,0 +1,187 @@
+/* kernel/power/earlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/syscalls.h> /* sys_sync */
+#include <linux/wakelock.h>
+#include <linux/workqueue.h>
+
+#include "power.h"
+
+enum {
+ DEBUG_USER_STATE = 1U << 0,
+ DEBUG_SUSPEND = 1U << 2,
+ DEBUG_VERBOSE = 1U << 3,
+};
+static int debug_mask = DEBUG_USER_STATE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(early_suspend_lock);
+static LIST_HEAD(early_suspend_handlers);
+static void early_suspend(struct work_struct *work);
+static void late_resume(struct work_struct *work);
+static DECLARE_WORK(early_suspend_work, early_suspend);
+static DECLARE_WORK(late_resume_work, late_resume);
+static DEFINE_SPINLOCK(state_lock);
+enum {
+ SUSPEND_REQUESTED = 0x1,
+ SUSPENDED = 0x2,
+ SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED,
+};
+static int state;
+
+void register_early_suspend(struct early_suspend *handler)
+{
+ struct list_head *pos;
+
+ mutex_lock(&early_suspend_lock);
+ list_for_each(pos, &early_suspend_handlers) {
+ struct early_suspend *e;
+ e = list_entry(pos, struct early_suspend, link);
+ if (e->level > handler->level)
+ break;
+ }
+ list_add_tail(&handler->link, pos);
+ if ((state & SUSPENDED) && handler->suspend)
+ handler->suspend(handler);
+ mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(register_early_suspend);
+
+void unregister_early_suspend(struct early_suspend *handler)
+{
+ mutex_lock(&early_suspend_lock);
+ list_del(&handler->link);
+ mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(unregister_early_suspend);
+
+static void early_suspend(struct work_struct *work)
+{
+ struct early_suspend *pos;
+ unsigned long irqflags;
+ int abort = 0;
+
+ mutex_lock(&early_suspend_lock);
+ spin_lock_irqsave(&state_lock, irqflags);
+ if (state == SUSPEND_REQUESTED)
+ state |= SUSPENDED;
+ else
+ abort = 1;
+ spin_unlock_irqrestore(&state_lock, irqflags);
+
+ if (abort) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: abort, state %d\n", state);
+ mutex_unlock(&early_suspend_lock);
+ goto abort;
+ }
+
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: call handlers\n");
+ list_for_each_entry(pos, &early_suspend_handlers, link) {
+ if (pos->suspend != NULL) {
+ if (debug_mask & DEBUG_VERBOSE)
+ pr_info("early_suspend: calling %pf\n", pos->suspend);
+ pos->suspend(pos);
+ }
+ }
+ mutex_unlock(&early_suspend_lock);
+
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: sync\n");
+
+ sys_sync();
+abort:
+ spin_lock_irqsave(&state_lock, irqflags);
+ if (state == SUSPEND_REQUESTED_AND_SUSPENDED)
+ wake_unlock(&main_wake_lock);
+ spin_unlock_irqrestore(&state_lock, irqflags);
+}
+
+static void late_resume(struct work_struct *work)
+{
+ struct early_suspend *pos;
+ unsigned long irqflags;
+ int abort = 0;
+
+ mutex_lock(&early_suspend_lock);
+ spin_lock_irqsave(&state_lock, irqflags);
+ if (state == SUSPENDED)
+ state &= ~SUSPENDED;
+ else
+ abort = 1;
+ spin_unlock_irqrestore(&state_lock, irqflags);
+
+ if (abort) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: abort, state %d\n", state);
+ goto abort;
+ }
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: call handlers\n");
+ list_for_each_entry_reverse(pos, &early_suspend_handlers, link) {
+ if (pos->resume != NULL) {
+ if (debug_mask & DEBUG_VERBOSE)
+ pr_info("late_resume: calling %pf\n", pos->resume);
+
+ pos->resume(pos);
+ }
+ }
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: done\n");
+abort:
+ mutex_unlock(&early_suspend_lock);
+}
+
+void request_suspend_state(suspend_state_t new_state)
+{
+ unsigned long irqflags;
+ int old_sleep;
+
+ spin_lock_irqsave(&state_lock, irqflags);
+ old_sleep = state & SUSPEND_REQUESTED;
+ if (debug_mask & DEBUG_USER_STATE) {
+ struct timespec ts;
+ struct rtc_time tm;
+ getnstimeofday(&ts);
+ rtc_time_to_tm(ts.tv_sec, &tm);
+ pr_info("request_suspend_state: %s (%d->%d) at %lld "
+ "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
+ new_state != PM_SUSPEND_ON ? "sleep" : "wakeup",
+ requested_suspend_state, new_state,
+ ktime_to_ns(ktime_get()),
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+ }
+ if (!old_sleep && new_state != PM_SUSPEND_ON) {
+ state |= SUSPEND_REQUESTED;
+ queue_work(suspend_work_queue, &early_suspend_work);
+ } else if (old_sleep && new_state == PM_SUSPEND_ON) {
+ state &= ~SUSPEND_REQUESTED;
+ wake_lock(&main_wake_lock);
+ queue_work(suspend_work_queue, &late_resume_work);
+ }
+ requested_suspend_state = new_state;
+ spin_unlock_irqrestore(&state_lock, irqflags);
+}
+
+suspend_state_t get_suspend_state(void)
+{
+ return requested_suspend_state;
+}
diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c
new file mode 100644
index 000000000000..15137650149c
--- /dev/null
+++ b/kernel/power/fbearlysuspend.c
@@ -0,0 +1,153 @@
+/* kernel/power/fbearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+
+#include "power.h"
+
+static wait_queue_head_t fb_state_wq;
+static DEFINE_SPINLOCK(fb_state_lock);
+static enum {
+ FB_STATE_STOPPED_DRAWING,
+ FB_STATE_REQUEST_STOP_DRAWING,
+ FB_STATE_DRAWING_OK,
+} fb_state;
+
+/* tell userspace to stop drawing, wait for it to stop */
+static void stop_drawing_early_suspend(struct early_suspend *h)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ fb_state = FB_STATE_REQUEST_STOP_DRAWING;
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ wake_up_all(&fb_state_wq);
+ ret = wait_event_timeout(fb_state_wq,
+ fb_state == FB_STATE_STOPPED_DRAWING,
+ HZ);
+ if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING))
+ pr_warning("stop_drawing_early_suspend: timeout waiting for "
+ "userspace to stop drawing\n");
+}
+
+/* tell userspace to start drawing */
+static void start_drawing_late_resume(struct early_suspend *h)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ fb_state = FB_STATE_DRAWING_OK;
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+ wake_up(&fb_state_wq);
+}
+
+static struct early_suspend stop_drawing_early_suspend_desc = {
+ .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+ .suspend = stop_drawing_early_suspend,
+ .resume = start_drawing_late_resume,
+};
+
+static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int ret;
+
+ ret = wait_event_interruptible(fb_state_wq,
+ fb_state != FB_STATE_DRAWING_OK);
+ if (ret && fb_state == FB_STATE_DRAWING_OK)
+ return ret;
+ else
+ s += sprintf(buf, "sleeping");
+ return s - buf;
+}
+
+static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
+ fb_state = FB_STATE_STOPPED_DRAWING;
+ wake_up(&fb_state_wq);
+ }
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ ret = wait_event_interruptible(fb_state_wq,
+ fb_state == FB_STATE_DRAWING_OK);
+ if (ret && fb_state != FB_STATE_DRAWING_OK)
+ return ret;
+ else
+ s += sprintf(buf, "awake");
+
+ return s - buf;
+}
+
+#define power_ro_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0444, \
+ }, \
+ .show = _name##_show, \
+ .store = NULL, \
+}
+
+power_ro_attr(wait_for_fb_sleep);
+power_ro_attr(wait_for_fb_wake);
+
+static struct attribute *g[] = {
+ &wait_for_fb_sleep_attr.attr,
+ &wait_for_fb_wake_attr.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = g,
+};
+
+static int __init android_power_init(void)
+{
+ int ret;
+
+ init_waitqueue_head(&fb_state_wq);
+ fb_state = FB_STATE_DRAWING_OK;
+
+ ret = sysfs_create_group(power_kobj, &attr_group);
+ if (ret) {
+ pr_err("android_power_init: sysfs_create_group failed\n");
+ return ret;
+ }
+
+ register_early_suspend(&stop_drawing_early_suspend_desc);
+ return 0;
+}
+
+static void __exit android_power_exit(void)
+{
+ unregister_early_suspend(&stop_drawing_early_suspend_desc);
+ sysfs_remove_group(power_kobj, &attr_group);
+}
+
+module_init(android_power_init);
+module_exit(android_power_exit);
+
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 6c601f871964..3304594553ce 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -171,7 +171,11 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
+#ifdef CONFIG_EARLYSUSPEND
+ suspend_state_t state = PM_SUSPEND_ON;
+#else
suspend_state_t state = PM_SUSPEND_STANDBY;
+#endif
const char * const *s;
#endif
char *p;
@@ -193,8 +197,15 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
break;
}
if (state < PM_SUSPEND_MAX && *s)
+#ifdef CONFIG_EARLYSUSPEND
+ if (state == PM_SUSPEND_ON || valid_state(state)) {
+ error = 0;
+ request_suspend_state(state);
+ }
+#else
error = enter_state(state);
#endif
+#endif
Exit:
return error ? error : n;
@@ -298,6 +309,11 @@ power_attr(pm_trace_dev_match);
#endif /* CONFIG_PM_TRACE */
+#ifdef CONFIG_USER_WAKELOCK
+power_attr(wake_lock);
+power_attr(wake_unlock);
+#endif
+
static struct attribute * g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
@@ -310,6 +326,10 @@ static struct attribute * g[] = {
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
+#ifdef CONFIG_USER_WAKELOCK
+ &wake_lock_attr.attr,
+ &wake_unlock_attr.attr,
+#endif
#endif
NULL,
};
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 9a00a0a26280..b6b9006480ff 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -245,3 +245,27 @@ static inline void suspend_thaw_processes(void)
{
}
#endif
+
+#ifdef CONFIG_WAKELOCK
+/* kernel/power/wakelock.c */
+extern struct workqueue_struct *suspend_work_queue;
+extern struct wake_lock main_wake_lock;
+extern suspend_state_t requested_suspend_state;
+#endif
+
+#ifdef CONFIG_USER_WAKELOCK
+ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n);
+ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n);
+#endif
+
+#ifdef CONFIG_EARLYSUSPEND
+/* kernel/power/earlysuspend.c */
+void request_suspend_state(suspend_state_t state);
+suspend_state_t get_suspend_state(void);
+#endif
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0cf3a27a6c9d..31338cdeafc4 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -16,6 +16,7 @@
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
+#include <linux/wakelock.h>
/*
* Timeout for stopping processes
@@ -82,6 +83,10 @@ static int try_to_freeze_tasks(bool sig_only)
todo += wq_busy;
}
+ if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
+ wakeup = 1;
+ break;
+ }
if (!todo || time_after(jiffies, end_time))
break;
@@ -108,19 +113,25 @@ static int try_to_freeze_tasks(bool sig_only)
* and caller must call thaw_processes() if something fails),
* but it cleans up leftover PF_FREEZE requests.
*/
- printk("\n");
- printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
- "(%d tasks refusing to freeze, wq_busy=%d):\n",
- wakeup ? "aborted" : "failed",
- elapsed_csecs / 100, elapsed_csecs % 100,
- todo - wq_busy, wq_busy);
-
+ if(wakeup) {
+ printk("\n");
+ printk(KERN_ERR "Freezing of %s aborted\n",
+ sig_only ? "user space " : "tasks ");
+ }
+ else {
+ printk("\n");
+ printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
+ "(%d tasks refusing to freeze, wq_busy=%d):\n",
+ elapsed_csecs / 100, elapsed_csecs % 100,
+ todo - wq_busy, wq_busy);
+ }
thaw_workqueues();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
- if (!wakeup && freezing(p) && !freezer_should_skip(p))
+ if (freezing(p) && !freezer_should_skip(p) &&
+ elapsed_csecs > 100)
sched_show_task(p);
cancel_freezing(p);
task_unlock(p);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index d3caa7634987..a6f6e3114a24 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -28,6 +28,9 @@
#include "power.h"
const char *const pm_states[PM_SUSPEND_MAX] = {
+#ifdef CONFIG_EARLYSUSPEND
+ [PM_SUSPEND_ON] = "on",
+#endif
[PM_SUSPEND_STANDBY] = "standby",
[PM_SUSPEND_MEM] = "mem",
};
diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c
new file mode 100644
index 000000000000..d2a65da9f22c
--- /dev/null
+++ b/kernel/power/suspend_time.c
@@ -0,0 +1,111 @@
+/*
+ * debugfs file to track time spent in suspend
+ *
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/syscore_ops.h>
+#include <linux/time.h>
+
+static struct timespec suspend_time_before;
+static unsigned int time_in_suspend_bins[32];
+
+#ifdef CONFIG_DEBUG_FS
+static int suspend_time_debug_show(struct seq_file *s, void *data)
+{
+ int bin;
+ seq_printf(s, "time (secs) count\n");
+ seq_printf(s, "------------------\n");
+ for (bin = 0; bin < 32; bin++) {
+ if (time_in_suspend_bins[bin] == 0)
+ continue;
+ seq_printf(s, "%4d - %4d %4u\n",
+ bin ? 1 << (bin - 1) : 0, 1 << bin,
+ time_in_suspend_bins[bin]);
+ }
+ return 0;
+}
+
+static int suspend_time_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, suspend_time_debug_show, NULL);
+}
+
+static const struct file_operations suspend_time_debug_fops = {
+ .open = suspend_time_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init suspend_time_debug_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
+ &suspend_time_debug_fops);
+ if (!d) {
+ pr_err("Failed to create suspend_time debug file\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+late_initcall(suspend_time_debug_init);
+#endif
+
+static int suspend_time_syscore_suspend(void)
+{
+ read_persistent_clock(&suspend_time_before);
+
+ return 0;
+}
+
+static void suspend_time_syscore_resume(void)
+{
+ struct timespec after;
+
+ read_persistent_clock(&after);
+
+ after = timespec_sub(after, suspend_time_before);
+
+ time_in_suspend_bins[fls(after.tv_sec)]++;
+
+ pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
+ after.tv_nsec / NSEC_PER_MSEC);
+}
+
+static struct syscore_ops suspend_time_syscore_ops = {
+ .suspend = suspend_time_syscore_suspend,
+ .resume = suspend_time_syscore_resume,
+};
+
+static int suspend_time_syscore_init(void)
+{
+ register_syscore_ops(&suspend_time_syscore_ops);
+
+ return 0;
+}
+
+static void suspend_time_syscore_exit(void)
+{
+ unregister_syscore_ops(&suspend_time_syscore_ops);
+}
+module_init(suspend_time_syscore_init);
+module_exit(suspend_time_syscore_exit);
diff --git a/kernel/power/userwakelock.c b/kernel/power/userwakelock.c
new file mode 100644
index 000000000000..a28a8db41468
--- /dev/null
+++ b/kernel/power/userwakelock.c
@@ -0,0 +1,219 @@
+/* kernel/power/userwakelock.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/wakelock.h>
+#include <linux/slab.h>
+
+#include "power.h"
+
+enum {
+ DEBUG_FAILURE = BIT(0),
+ DEBUG_ERROR = BIT(1),
+ DEBUG_NEW = BIT(2),
+ DEBUG_ACCESS = BIT(3),
+ DEBUG_LOOKUP = BIT(4),
+};
+static int debug_mask = DEBUG_FAILURE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(tree_lock);
+
+struct user_wake_lock {
+ struct rb_node node;
+ struct wake_lock wake_lock;
+ char name[0];
+};
+struct rb_root user_wake_locks;
+
+static struct user_wake_lock *lookup_wake_lock_name(
+ const char *buf, int allocate, long *timeoutptr)
+{
+ struct rb_node **p = &user_wake_locks.rb_node;
+ struct rb_node *parent = NULL;
+ struct user_wake_lock *l;
+ int diff;
+ u64 timeout;
+ int name_len;
+ const char *arg;
+
+ /* Find length of lock name and start of optional timeout string */
+ arg = buf;
+ while (*arg && !isspace(*arg))
+ arg++;
+ name_len = arg - buf;
+ if (!name_len)
+ goto bad_arg;
+ while (isspace(*arg))
+ arg++;
+
+ /* Process timeout string */
+ if (timeoutptr && *arg) {
+ timeout = simple_strtoull(arg, (char **)&arg, 0);
+ while (isspace(*arg))
+ arg++;
+ if (*arg)
+ goto bad_arg;
+ /* convert timeout from nanoseconds to jiffies > 0 */
+ timeout += (NSEC_PER_SEC / HZ) - 1;
+ do_div(timeout, (NSEC_PER_SEC / HZ));
+ if (timeout <= 0)
+ timeout = 1;
+ *timeoutptr = timeout;
+ } else if (*arg)
+ goto bad_arg;
+ else if (timeoutptr)
+ *timeoutptr = 0;
+
+ /* Lookup wake lock in rbtree */
+ while (*p) {
+ parent = *p;
+ l = rb_entry(parent, struct user_wake_lock, node);
+ diff = strncmp(buf, l->name, name_len);
+ if (!diff && l->name[name_len])
+ diff = -1;
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: compare %.*s %s %d\n",
+ name_len, buf, l->name, diff);
+
+ if (diff < 0)
+ p = &(*p)->rb_left;
+ else if (diff > 0)
+ p = &(*p)->rb_right;
+ else
+ return l;
+ }
+
+ /* Allocate and add new wakelock to rbtree */
+ if (!allocate) {
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: %.*s not found\n",
+ name_len, buf);
+ return ERR_PTR(-EINVAL);
+ }
+ l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL);
+ if (l == NULL) {
+ if (debug_mask & DEBUG_FAILURE)
+ pr_err("lookup_wake_lock_name: failed to allocate "
+ "memory for %.*s\n", name_len, buf);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(l->name, buf, name_len);
+ if (debug_mask & DEBUG_NEW)
+ pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name);
+ wake_lock_init(&l->wake_lock, WAKE_LOCK_SUSPEND, l->name);
+ rb_link_node(&l->node, parent, p);
+ rb_insert_color(&l->node, &user_wake_locks);
+ return l;
+
+bad_arg:
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: wake lock, %.*s, bad arg, %s\n",
+ name_len, buf, arg);
+ return ERR_PTR(-EINVAL);
+}
+
+ssize_t wake_lock_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ char *end = buf + PAGE_SIZE;
+ struct rb_node *n;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+
+ for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
+ l = rb_entry(n, struct user_wake_lock, node);
+ if (wake_lock_active(&l->wake_lock))
+ s += scnprintf(s, end - s, "%s ", l->name);
+ }
+ s += scnprintf(s, end - s, "\n");
+
+ mutex_unlock(&tree_lock);
+ return (s - buf);
+}
+
+ssize_t wake_lock_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ long timeout;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+ l = lookup_wake_lock_name(buf, 1, &timeout);
+ if (IS_ERR(l)) {
+ n = PTR_ERR(l);
+ goto bad_name;
+ }
+
+ if (debug_mask & DEBUG_ACCESS)
+ pr_info("wake_lock_store: %s, timeout %ld\n", l->name, timeout);
+
+ if (timeout)
+ wake_lock_timeout(&l->wake_lock, timeout);
+ else
+ wake_lock(&l->wake_lock);
+bad_name:
+ mutex_unlock(&tree_lock);
+ return n;
+}
+
+
+ssize_t wake_unlock_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ char *end = buf + PAGE_SIZE;
+ struct rb_node *n;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+
+ for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
+ l = rb_entry(n, struct user_wake_lock, node);
+ if (!wake_lock_active(&l->wake_lock))
+ s += scnprintf(s, end - s, "%s ", l->name);
+ }
+ s += scnprintf(s, end - s, "\n");
+
+ mutex_unlock(&tree_lock);
+ return (s - buf);
+}
+
+ssize_t wake_unlock_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+ l = lookup_wake_lock_name(buf, 0, NULL);
+ if (IS_ERR(l)) {
+ n = PTR_ERR(l);
+ goto not_found;
+ }
+
+ if (debug_mask & DEBUG_ACCESS)
+ pr_info("wake_unlock_store: %s\n", l->name);
+
+ wake_unlock(&l->wake_lock);
+not_found:
+ mutex_unlock(&tree_lock);
+ return n;
+}
+
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
new file mode 100644
index 000000000000..81e1b7c65ca1
--- /dev/null
+++ b/kernel/power/wakelock.c
@@ -0,0 +1,634 @@
+/* kernel/power/wakelock.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/suspend.h>
+#include <linux/syscalls.h> /* sys_sync */
+#include <linux/wakelock.h>
+#ifdef CONFIG_WAKELOCK_STAT
+#include <linux/proc_fs.h>
+#endif
+#include "power.h"
+
+enum {
+ DEBUG_EXIT_SUSPEND = 1U << 0,
+ DEBUG_WAKEUP = 1U << 1,
+ DEBUG_SUSPEND = 1U << 2,
+ DEBUG_EXPIRE = 1U << 3,
+ DEBUG_WAKE_LOCK = 1U << 4,
+};
+static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define WAKE_LOCK_TYPE_MASK (0x0f)
+#define WAKE_LOCK_INITIALIZED (1U << 8)
+#define WAKE_LOCK_ACTIVE (1U << 9)
+#define WAKE_LOCK_AUTO_EXPIRE (1U << 10)
+#define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11)
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(inactive_locks);
+static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT];
+static int current_event_num;
+struct workqueue_struct *suspend_work_queue;
+struct wake_lock main_wake_lock;
+suspend_state_t requested_suspend_state = PM_SUSPEND_MEM;
+static struct wake_lock unknown_wakeup;
+static struct wake_lock suspend_backoff_lock;
+
+#define SUSPEND_BACKOFF_THRESHOLD 10
+#define SUSPEND_BACKOFF_INTERVAL 10000
+
+static unsigned suspend_short_count;
+
+#ifdef CONFIG_WAKELOCK_STAT
+static struct wake_lock deleted_wake_locks;
+static ktime_t last_sleep_time_update;
+static int wait_for_wakeup;
+
+int get_expired_time(struct wake_lock *lock, ktime_t *expire_time)
+{
+ struct timespec ts;
+ struct timespec kt;
+ struct timespec tomono;
+ struct timespec delta;
+ struct timespec sleep;
+ long timeout;
+
+ if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE))
+ return 0;
+ get_xtime_and_monotonic_and_sleep_offset(&kt, &tomono, &sleep);
+ timeout = lock->expires - jiffies;
+ if (timeout > 0)
+ return 0;
+ jiffies_to_timespec(-timeout, &delta);
+ set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec,
+ kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec);
+ *expire_time = timespec_to_ktime(ts);
+ return 1;
+}
+
+
+static int print_lock_stat(struct seq_file *m, struct wake_lock *lock)
+{
+ int lock_count = lock->stat.count;
+ int expire_count = lock->stat.expire_count;
+ ktime_t active_time = ktime_set(0, 0);
+ ktime_t total_time = lock->stat.total_time;
+ ktime_t max_time = lock->stat.max_time;
+
+ ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time;
+ if (lock->flags & WAKE_LOCK_ACTIVE) {
+ ktime_t now, add_time;
+ int expired = get_expired_time(lock, &now);
+ if (!expired)
+ now = ktime_get();
+ add_time = ktime_sub(now, lock->stat.last_time);
+ lock_count++;
+ if (!expired)
+ active_time = add_time;
+ else
+ expire_count++;
+ total_time = ktime_add(total_time, add_time);
+ if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND)
+ prevent_suspend_time = ktime_add(prevent_suspend_time,
+ ktime_sub(now, last_sleep_time_update));
+ if (add_time.tv64 > max_time.tv64)
+ max_time = add_time;
+ }
+
+ return seq_printf(m,
+ "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t%lld\n",
+ lock->name, lock_count, expire_count,
+ lock->stat.wakeup_count, ktime_to_ns(active_time),
+ ktime_to_ns(total_time),
+ ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time),
+ ktime_to_ns(lock->stat.last_time));
+}
+
+static int wakelock_stats_show(struct seq_file *m, void *unused)
+{
+ unsigned long irqflags;
+ struct wake_lock *lock;
+ int ret;
+ int type;
+
+ spin_lock_irqsave(&list_lock, irqflags);
+
+ ret = seq_puts(m, "name\tcount\texpire_count\twake_count\tactive_since"
+ "\ttotal_time\tsleep_time\tmax_time\tlast_change\n");
+ list_for_each_entry(lock, &inactive_locks, link)
+ ret = print_lock_stat(m, lock);
+ for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) {
+ list_for_each_entry(lock, &active_wake_locks[type], link)
+ ret = print_lock_stat(m, lock);
+ }
+ spin_unlock_irqrestore(&list_lock, irqflags);
+ return 0;
+}
+
+static void wake_unlock_stat_locked(struct wake_lock *lock, int expired)
+{
+ ktime_t duration;
+ ktime_t now;
+ if (!(lock->flags & WAKE_LOCK_ACTIVE))
+ return;
+ if (get_expired_time(lock, &now))
+ expired = 1;
+ else
+ now = ktime_get();
+ lock->stat.count++;
+ if (expired)
+ lock->stat.expire_count++;
+ duration = ktime_sub(now, lock->stat.last_time);
+ lock->stat.total_time = ktime_add(lock->stat.total_time, duration);
+ if (ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time))
+ lock->stat.max_time = duration;
+ lock->stat.last_time = ktime_get();
+ if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) {
+ duration = ktime_sub(now, last_sleep_time_update);
+ lock->stat.prevent_suspend_time = ktime_add(
+ lock->stat.prevent_suspend_time, duration);
+ lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND;
+ }
+}
+
+static void update_sleep_wait_stats_locked(int done)
+{
+ struct wake_lock *lock;
+ ktime_t now, etime, elapsed, add;
+ int expired;
+
+ now = ktime_get();
+ elapsed = ktime_sub(now, last_sleep_time_update);
+ list_for_each_entry(lock, &active_wake_locks[WAKE_LOCK_SUSPEND], link) {
+ expired = get_expired_time(lock, &etime);
+ if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) {
+ if (expired)
+ add = ktime_sub(etime, last_sleep_time_update);
+ else
+ add = elapsed;
+ lock->stat.prevent_suspend_time = ktime_add(
+ lock->stat.prevent_suspend_time, add);
+ }
+ if (done || expired)
+ lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND;
+ else
+ lock->flags |= WAKE_LOCK_PREVENTING_SUSPEND;
+ }
+ last_sleep_time_update = now;
+}
+#endif
+
+
+static void expire_wake_lock(struct wake_lock *lock)
+{
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_unlock_stat_locked(lock, 1);
+#endif
+ lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE);
+ list_del(&lock->link);
+ list_add(&lock->link, &inactive_locks);
+ if (debug_mask & (DEBUG_WAKE_LOCK | DEBUG_EXPIRE))
+ pr_info("expired wake lock %s\n", lock->name);
+}
+
+/* Caller must acquire the list_lock spinlock */
+static void print_active_locks(int type)
+{
+ struct wake_lock *lock;
+ bool print_expired = true;
+
+ BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+ list_for_each_entry(lock, &active_wake_locks[type], link) {
+ if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) {
+ long timeout = lock->expires - jiffies;
+ if (timeout > 0)
+ pr_info("active wake lock %s, time left %ld\n",
+ lock->name, timeout);
+ else if (print_expired)
+ pr_info("wake lock %s, expired\n", lock->name);
+ } else {
+ pr_info("active wake lock %s\n", lock->name);
+ if (!(debug_mask & DEBUG_EXPIRE))
+ print_expired = false;
+ }
+ }
+}
+
+static long has_wake_lock_locked(int type)
+{
+ struct wake_lock *lock, *n;
+ long max_timeout = 0;
+
+ BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+ list_for_each_entry_safe(lock, n, &active_wake_locks[type], link) {
+ if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) {
+ long timeout = lock->expires - jiffies;
+ if (timeout <= 0)
+ expire_wake_lock(lock);
+ else if (timeout > max_timeout)
+ max_timeout = timeout;
+ } else
+ return -1;
+ }
+ return max_timeout;
+}
+
+long has_wake_lock(int type)
+{
+ long ret;
+ unsigned long irqflags;
+ spin_lock_irqsave(&list_lock, irqflags);
+ ret = has_wake_lock_locked(type);
+ if (ret && (debug_mask & DEBUG_WAKEUP) && type == WAKE_LOCK_SUSPEND)
+ print_active_locks(type);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+ return ret;
+}
+
+static void suspend_backoff(void)
+{
+ pr_info("suspend: too many immediate wakeups, back off\n");
+ wake_lock_timeout(&suspend_backoff_lock,
+ msecs_to_jiffies(SUSPEND_BACKOFF_INTERVAL));
+}
+
+static void suspend(struct work_struct *work)
+{
+ int ret;
+ int entry_event_num;
+ struct timespec ts_entry, ts_exit;
+
+ if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("suspend: abort suspend\n");
+ return;
+ }
+
+ entry_event_num = current_event_num;
+ sys_sync();
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("suspend: enter suspend\n");
+ getnstimeofday(&ts_entry);
+ ret = pm_suspend(requested_suspend_state);
+ getnstimeofday(&ts_exit);
+
+ if (debug_mask & DEBUG_EXIT_SUSPEND) {
+ struct rtc_time tm;
+ rtc_time_to_tm(ts_exit.tv_sec, &tm);
+ pr_info("suspend: exit suspend, ret = %d "
+ "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret,
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, ts_exit.tv_nsec);
+ }
+
+ if (ts_exit.tv_sec - ts_entry.tv_sec <= 1) {
+ ++suspend_short_count;
+
+ if (suspend_short_count == SUSPEND_BACKOFF_THRESHOLD) {
+ suspend_backoff();
+ suspend_short_count = 0;
+ }
+ } else {
+ suspend_short_count = 0;
+ }
+
+ if (current_event_num == entry_event_num) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("suspend: pm_suspend returned with no event\n");
+ wake_lock_timeout(&unknown_wakeup, HZ / 2);
+ }
+}
+static DECLARE_WORK(suspend_work, suspend);
+
+static void expire_wake_locks(unsigned long data)
+{
+ long has_lock;
+ unsigned long irqflags;
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("expire_wake_locks: start\n");
+ spin_lock_irqsave(&list_lock, irqflags);
+ if (debug_mask & DEBUG_SUSPEND)
+ print_active_locks(WAKE_LOCK_SUSPEND);
+ has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND);
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock);
+ if (has_lock == 0)
+ queue_work(suspend_work_queue, &suspend_work);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0);
+
+static int power_suspend_late(struct device *dev)
+{
+ int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0;
+#ifdef CONFIG_WAKELOCK_STAT
+ wait_for_wakeup = !ret;
+#endif
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("power_suspend_late return %d\n", ret);
+ return ret;
+}
+
+static struct dev_pm_ops power_driver_pm_ops = {
+ .suspend_noirq = power_suspend_late,
+};
+
+static struct platform_driver power_driver = {
+ .driver.name = "power",
+ .driver.pm = &power_driver_pm_ops,
+};
+static struct platform_device power_device = {
+ .name = "power",
+};
+
+void wake_lock_init(struct wake_lock *lock, int type, const char *name)
+{
+ unsigned long irqflags = 0;
+
+ if (name)
+ lock->name = name;
+ BUG_ON(!lock->name);
+
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_lock_init name=%s\n", lock->name);
+#ifdef CONFIG_WAKELOCK_STAT
+ lock->stat.count = 0;
+ lock->stat.expire_count = 0;
+ lock->stat.wakeup_count = 0;
+ lock->stat.total_time = ktime_set(0, 0);
+ lock->stat.prevent_suspend_time = ktime_set(0, 0);
+ lock->stat.max_time = ktime_set(0, 0);
+ lock->stat.last_time = ktime_set(0, 0);
+#endif
+ lock->flags = (type & WAKE_LOCK_TYPE_MASK) | WAKE_LOCK_INITIALIZED;
+
+ INIT_LIST_HEAD(&lock->link);
+ spin_lock_irqsave(&list_lock, irqflags);
+ list_add(&lock->link, &inactive_locks);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_lock_init);
+
+void wake_lock_destroy(struct wake_lock *lock)
+{
+ unsigned long irqflags;
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_lock_destroy name=%s\n", lock->name);
+ spin_lock_irqsave(&list_lock, irqflags);
+ lock->flags &= ~WAKE_LOCK_INITIALIZED;
+#ifdef CONFIG_WAKELOCK_STAT
+ if (lock->stat.count) {
+ deleted_wake_locks.stat.count += lock->stat.count;
+ deleted_wake_locks.stat.expire_count += lock->stat.expire_count;
+ deleted_wake_locks.stat.total_time =
+ ktime_add(deleted_wake_locks.stat.total_time,
+ lock->stat.total_time);
+ deleted_wake_locks.stat.prevent_suspend_time =
+ ktime_add(deleted_wake_locks.stat.prevent_suspend_time,
+ lock->stat.prevent_suspend_time);
+ deleted_wake_locks.stat.max_time =
+ ktime_add(deleted_wake_locks.stat.max_time,
+ lock->stat.max_time);
+ }
+#endif
+ list_del(&lock->link);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_lock_destroy);
+
+static void wake_lock_internal(
+ struct wake_lock *lock, long timeout, int has_timeout)
+{
+ int type;
+ unsigned long irqflags;
+ long expire_in;
+
+ spin_lock_irqsave(&list_lock, irqflags);
+ type = lock->flags & WAKE_LOCK_TYPE_MASK;
+ BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+ BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED));
+#ifdef CONFIG_WAKELOCK_STAT
+ if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) {
+ if (debug_mask & DEBUG_WAKEUP)
+ pr_info("wakeup wake lock: %s\n", lock->name);
+ wait_for_wakeup = 0;
+ lock->stat.wakeup_count++;
+ }
+ if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) &&
+ (long)(lock->expires - jiffies) <= 0) {
+ wake_unlock_stat_locked(lock, 0);
+ lock->stat.last_time = ktime_get();
+ }
+#endif
+ if (!(lock->flags & WAKE_LOCK_ACTIVE)) {
+ lock->flags |= WAKE_LOCK_ACTIVE;
+#ifdef CONFIG_WAKELOCK_STAT
+ lock->stat.last_time = ktime_get();
+#endif
+ }
+ list_del(&lock->link);
+ if (has_timeout) {
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_lock: %s, type %d, timeout %ld.%03lu\n",
+ lock->name, type, timeout / HZ,
+ (timeout % HZ) * MSEC_PER_SEC / HZ);
+ lock->expires = jiffies + timeout;
+ lock->flags |= WAKE_LOCK_AUTO_EXPIRE;
+ list_add_tail(&lock->link, &active_wake_locks[type]);
+ } else {
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_lock: %s, type %d\n", lock->name, type);
+ lock->expires = LONG_MAX;
+ lock->flags &= ~WAKE_LOCK_AUTO_EXPIRE;
+ list_add(&lock->link, &active_wake_locks[type]);
+ }
+ if (type == WAKE_LOCK_SUSPEND) {
+ current_event_num++;
+#ifdef CONFIG_WAKELOCK_STAT
+ if (lock == &main_wake_lock)
+ update_sleep_wait_stats_locked(1);
+ else if (!wake_lock_active(&main_wake_lock))
+ update_sleep_wait_stats_locked(0);
+#endif
+ if (has_timeout)
+ expire_in = has_wake_lock_locked(type);
+ else
+ expire_in = -1;
+ if (expire_in > 0) {
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("wake_lock: %s, start expire timer, "
+ "%ld\n", lock->name, expire_in);
+ mod_timer(&expire_timer, jiffies + expire_in);
+ } else {
+ if (del_timer(&expire_timer))
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("wake_lock: %s, stop expire timer\n",
+ lock->name);
+ if (expire_in == 0)
+ queue_work(suspend_work_queue, &suspend_work);
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+
+void wake_lock(struct wake_lock *lock)
+{
+ wake_lock_internal(lock, 0, 0);
+}
+EXPORT_SYMBOL(wake_lock);
+
+void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+ wake_lock_internal(lock, timeout, 1);
+}
+EXPORT_SYMBOL(wake_lock_timeout);
+
+void wake_unlock(struct wake_lock *lock)
+{
+ int type;
+ unsigned long irqflags;
+ spin_lock_irqsave(&list_lock, irqflags);
+ type = lock->flags & WAKE_LOCK_TYPE_MASK;
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_unlock_stat_locked(lock, 0);
+#endif
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_unlock: %s\n", lock->name);
+ lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE);
+ list_del(&lock->link);
+ list_add(&lock->link, &inactive_locks);
+ if (type == WAKE_LOCK_SUSPEND) {
+ long has_lock = has_wake_lock_locked(type);
+ if (has_lock > 0) {
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("wake_unlock: %s, start expire timer, "
+ "%ld\n", lock->name, has_lock);
+ mod_timer(&expire_timer, jiffies + has_lock);
+ } else {
+ if (del_timer(&expire_timer))
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("wake_unlock: %s, stop expire "
+ "timer\n", lock->name);
+ if (has_lock == 0)
+ queue_work(suspend_work_queue, &suspend_work);
+ }
+ if (lock == &main_wake_lock) {
+ if (debug_mask & DEBUG_SUSPEND)
+ print_active_locks(WAKE_LOCK_SUSPEND);
+#ifdef CONFIG_WAKELOCK_STAT
+ update_sleep_wait_stats_locked(0);
+#endif
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_unlock);
+
+int wake_lock_active(struct wake_lock *lock)
+{
+ return !!(lock->flags & WAKE_LOCK_ACTIVE);
+}
+EXPORT_SYMBOL(wake_lock_active);
+
+static int wakelock_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wakelock_stats_show, NULL);
+}
+
+static const struct file_operations wakelock_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = wakelock_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init wakelocks_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(active_wake_locks); i++)
+ INIT_LIST_HEAD(&active_wake_locks[i]);
+
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_lock_init(&deleted_wake_locks, WAKE_LOCK_SUSPEND,
+ "deleted_wake_locks");
+#endif
+ wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main");
+ wake_lock(&main_wake_lock);
+ wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups");
+ wake_lock_init(&suspend_backoff_lock, WAKE_LOCK_SUSPEND,
+ "suspend_backoff");
+
+ ret = platform_device_register(&power_device);
+ if (ret) {
+ pr_err("wakelocks_init: platform_device_register failed\n");
+ goto err_platform_device_register;
+ }
+ ret = platform_driver_register(&power_driver);
+ if (ret) {
+ pr_err("wakelocks_init: platform_driver_register failed\n");
+ goto err_platform_driver_register;
+ }
+
+ suspend_work_queue = create_singlethread_workqueue("suspend");
+ if (suspend_work_queue == NULL) {
+ ret = -ENOMEM;
+ goto err_suspend_work_queue;
+ }
+
+#ifdef CONFIG_WAKELOCK_STAT
+ proc_create("wakelocks", S_IRUGO, NULL, &wakelock_stats_fops);
+#endif
+
+ return 0;
+
+err_suspend_work_queue:
+ platform_driver_unregister(&power_driver);
+err_platform_driver_register:
+ platform_device_unregister(&power_device);
+err_platform_device_register:
+ wake_lock_destroy(&suspend_backoff_lock);
+ wake_lock_destroy(&unknown_wakeup);
+ wake_lock_destroy(&main_wake_lock);
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_lock_destroy(&deleted_wake_locks);
+#endif
+ return ret;
+}
+
+static void __exit wakelocks_exit(void)
+{
+#ifdef CONFIG_WAKELOCK_STAT
+ remove_proc_entry("wakelocks", NULL);
+#endif
+ destroy_workqueue(suspend_work_queue);
+ platform_driver_unregister(&power_driver);
+ platform_device_unregister(&power_device);
+ wake_lock_destroy(&suspend_backoff_lock);
+ wake_lock_destroy(&unknown_wakeup);
+ wake_lock_destroy(&main_wake_lock);
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_lock_destroy(&deleted_wake_locks);
+#endif
+}
+
+core_initcall(wakelocks_init);
+module_exit(wakelocks_exit);
diff --git a/kernel/printk.c b/kernel/printk.c
index 28a40d8171b8..1baace7d8674 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -53,6 +53,10 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+#ifdef CONFIG_DEBUG_LL
+extern void printascii(char *);
+#endif
+
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
@@ -290,6 +294,53 @@ static inline void boot_delay_msec(void)
}
#endif
+/*
+ * Return the number of unread characters in the log buffer.
+ */
+static int log_buf_get_len(void)
+{
+ return logged_chars;
+}
+
+/*
+ * Clears the ring-buffer
+ */
+void log_buf_clear(void)
+{
+ logged_chars = 0;
+}
+
+/*
+ * Copy a range of characters from the log buffer.
+ */
+int log_buf_copy(char *dest, int idx, int len)
+{
+ int ret, max;
+ bool took_lock = false;
+
+ if (!oops_in_progress) {
+ spin_lock_irq(&logbuf_lock);
+ took_lock = true;
+ }
+
+ max = log_buf_get_len();
+ if (idx < 0 || idx >= max) {
+ ret = -1;
+ } else {
+ if (len > max - idx)
+ len = max - idx;
+ ret = len;
+ idx += (log_end - max);
+ while (len-- > 0)
+ dest[len] = LOG_BUF(idx + len);
+ }
+
+ if (took_lock)
+ spin_unlock_irq(&logbuf_lock);
+
+ return ret;
+}
+
#ifdef CONFIG_SECURITY_DMESG_RESTRICT
int dmesg_restrict = 1;
#else
@@ -876,6 +927,10 @@ asmlinkage int vprintk(const char *fmt, va_list args)
printed_len += vscnprintf(printk_buf + printed_len,
sizeof(printk_buf) - printed_len, fmt, args);
+#ifdef CONFIG_DEBUG_LL
+ printascii(printk_buf);
+#endif
+
p = printk_buf;
/* Read log level and handle special printk prefix */
@@ -1150,7 +1205,6 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_ONLINE:
case CPU_DEAD:
- case CPU_DYING:
case CPU_DOWN_FAILED:
case CPU_UP_CANCELED:
console_lock();
diff --git a/kernel/sched.c b/kernel/sched.c
index b50b0f0c9aa9..5525f209ebdf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
+#include <linux/cpuacct.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
@@ -6510,7 +6511,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
+ case CPU_STARTING:
case CPU_DOWN_FAILED:
set_cpu_active((long)hcpu, true);
return NOTIFY_OK;
@@ -8202,12 +8203,23 @@ static inline int preempt_count_equals(int preempt_offset)
return (nested == preempt_offset);
}
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+ __might_sleep_init_called = 1;
+ return 0;
+}
+early_initcall(__might_sleep_init);
+
void __might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
- system_state != SYSTEM_RUNNING || oops_in_progress)
+ oops_in_progress)
+ return;
+ if (system_state != SYSTEM_RUNNING &&
+ (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
@@ -8954,6 +8966,20 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
}
static int
+cpu_cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk)
+{
+ const struct cred *cred = current_cred(), *tcred;
+
+ tcred = __task_cred(tsk);
+
+ if ((current != tsk) && !capable(CAP_SYS_NICE) &&
+ cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EACCES;
+
+ return 0;
+}
+
+static int
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
#ifdef CONFIG_RT_GROUP_SCHED
@@ -9058,6 +9084,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
.name = "cpu",
.create = cpu_cgroup_create,
.destroy = cpu_cgroup_destroy,
+ .allow_attach = cpu_cgroup_allow_attach,
.can_attach_task = cpu_cgroup_can_attach_task,
.attach_task = cpu_cgroup_attach_task,
.exit = cpu_cgroup_exit,
@@ -9084,8 +9111,30 @@ struct cpuacct {
u64 __percpu *cpuusage;
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
struct cpuacct *parent;
+ struct cpuacct_charge_calls *cpufreq_fn;
+ void *cpuacct_data;
};
+static struct cpuacct *cpuacct_root;
+
+/* Default calls for cpufreq accounting */
+static struct cpuacct_charge_calls *cpuacct_cpufreq;
+int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn)
+{
+ cpuacct_cpufreq = fn;
+
+ /*
+ * Root node is created before platform can register callbacks,
+ * initalize here.
+ */
+ if (cpuacct_root && fn) {
+ cpuacct_root->cpufreq_fn = fn;
+ if (fn->init)
+ fn->init(&cpuacct_root->cpuacct_data);
+ }
+ return 0;
+}
+
struct cgroup_subsys cpuacct_subsys;
/* return cpu accounting group corresponding to this container */
@@ -9120,8 +9169,16 @@ static struct cgroup_subsys_state *cpuacct_create(
if (percpu_counter_init(&ca->cpustat[i], 0))
goto out_free_counters;
+ ca->cpufreq_fn = cpuacct_cpufreq;
+
+ /* If available, have platform code initalize cpu frequency table */
+ if (ca->cpufreq_fn && ca->cpufreq_fn->init)
+ ca->cpufreq_fn->init(&ca->cpuacct_data);
+
if (cgrp->parent)
ca->parent = cgroup_ca(cgrp->parent);
+ else
+ cpuacct_root = ca;
return &ca->css;
@@ -9249,6 +9306,32 @@ static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
return 0;
}
+static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft,
+ struct cgroup_map_cb *cb)
+{
+ struct cpuacct *ca = cgroup_ca(cgrp);
+ if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show)
+ ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb);
+
+ return 0;
+}
+
+/* return total cpu power usage (milliWatt second) of a group */
+static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ int i;
+ struct cpuacct *ca = cgroup_ca(cgrp);
+ u64 totalpower = 0;
+
+ if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage)
+ for_each_present_cpu(i) {
+ totalpower += ca->cpufreq_fn->power_usage(
+ ca->cpuacct_data);
+ }
+
+ return totalpower;
+}
+
static struct cftype files[] = {
{
.name = "usage",
@@ -9263,6 +9346,14 @@ static struct cftype files[] = {
.name = "stat",
.read_map = cpuacct_stats_show,
},
+ {
+ .name = "cpufreq",
+ .read_map = cpuacct_cpufreq_show,
+ },
+ {
+ .name = "power",
+ .read_u64 = cpuacct_powerusage_read
+ },
};
static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
@@ -9292,6 +9383,10 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
for (; ca; ca = ca->parent) {
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
*cpuusage += cputime;
+
+ /* Call back into platform code to account for CPU speeds */
+ if (ca->cpufreq_fn && ca->cpufreq_fn->charge)
+ ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu);
}
rcu_read_unlock();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 11d65b531e50..fd15163f360a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -96,6 +96,7 @@ extern char core_pattern[];
extern unsigned int core_pipe_limit;
extern int pid_max;
extern int min_free_kbytes;
+extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int sysctl_drop_caches;
extern int percpu_pagelist_fraction;
@@ -1189,6 +1190,13 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
+ .procname = "min_free_order_shift",
+ .data = &min_free_order_shift,
+ .maxlen = sizeof(min_free_order_shift),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
.maxlen = sizeof(percpu_pagelist_fraction),
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index e2fd74b8e8c2..cae2ad7491b0 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,5 +1,5 @@
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
-obj-y += timeconv.o posix-clock.o alarmtimer.o
+obj-y += timeconv.o posix-clock.o #alarmtimer.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index cd3134510f3d..93168c0f9910 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -487,6 +487,39 @@ config RING_BUFFER_BENCHMARK
If unsure, say N.
+config TRACELEVEL
+ bool "Add capability to prioritize traces"
+ depends on EVENT_TRACING
+ help
+ This option allows subsystem programmers to add priorities to trace
+ events by calling to tracelevel_register. Traces of high priority
+ will automatically be enabled on kernel boot, and users can change
+ the the trace level in a kernel parameter.
+
+config TRACEDUMP
+ bool "Dumping functionality for ftrace"
+ depends on FUNCTION_TRACER
+ help
+ This option adds functionality to dump tracing data in several forms
+ Data can be dumped in ascii form or as raw pages from the tracing
+ ring buffers, along with the saved cmdlines. This is specified by
+ the module parameter tracedump_ascii. Data will be compressed
+ using zlib.
+
+config TRACEDUMP_PANIC
+ bool "Tracedump to console on panic"
+ depends on TRACEDUMP
+ help
+ With this option, tracedump will automatically dump to the console
+ on a kernel panic.
+
+config TRACEDUMP_PROCFS
+ bool "Tracedump via proc file"
+ depends on TRACEDUMP
+ help
+ With this option, tracedump can be dumped from user space by reading
+ from /proc/tracedump.
+
endif # FTRACE
endif # TRACING_SUPPORT
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 761c510a06c5..1360a1a90d5d 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -56,5 +56,7 @@ obj-$(CONFIG_TRACEPOINTS) += power-traces.o
ifeq ($(CONFIG_TRACING),y)
obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
endif
+obj-$(CONFIG_TRACELEVEL) += tracelevel.o
+obj-$(CONFIG_TRACEDUMP) += tracedump.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/tracedump.c b/kernel/trace/tracedump.c
new file mode 100644
index 000000000000..8d9589faad82
--- /dev/null
+++ b/kernel/trace/tracedump.c
@@ -0,0 +1,682 @@
+/*
+ * kernel/trace/tracedump.c
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/proc_fs.h>
+#include <linux/ring_buffer.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/threads.h>
+#include <linux/tracedump.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+
+#include "trace.h"
+#include "trace_output.h"
+
+#define CPU_MAX (NR_CPUS-1)
+
+#define TRYM(fn, ...) do { \
+ int try_error = (fn); \
+ if (try_error < 0) { \
+ printk(__VA_ARGS__); \
+ return try_error; \
+ } \
+} while (0)
+
+#define TRY(fn) TRYM(fn, TAG "Caught error from %s in %s\n", #fn, __func__)
+
+/* Stolen from printk.c */
+#define for_each_console(con) \
+ for (con = console_drivers; con != NULL; con = con->next)
+
+#define TAG KERN_ERR "tracedump: "
+
+#define TD_MIN_CONSUME 2000
+#define TD_COMPRESS_CHUNK 0x8000
+
+static DEFINE_MUTEX(tracedump_proc_lock);
+
+static const char MAGIC_NUMBER[9] = "TRACEDUMP";
+static const char CPU_DELIM[7] = "CPU_END";
+#define CMDLINE_DELIM "|"
+
+/* Type of output */
+static bool current_format;
+static bool format_ascii;
+module_param(format_ascii, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(format_ascii, "Dump ascii or raw data");
+
+/* Max size of output */
+static uint panic_size = 0x80000;
+module_param(panic_size, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(panic_size, "Max dump size during kernel panic (bytes)");
+
+static uint compress_level = 9;
+module_param(compress_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(compress_level, "Level of compression to use. [0-9]");
+
+static char out_buf[TD_COMPRESS_CHUNK];
+static z_stream stream;
+static int compress_done;
+static int flush;
+
+static int old_trace_flags;
+
+static struct trace_iterator iter;
+static struct pager_s {
+ struct trace_array *tr;
+ void *spare;
+ int cpu;
+ int len;
+ char __user *ubuf;
+} pager;
+
+static char cmdline_buf[16+TASK_COMM_LEN];
+
+static int print_to_console(const char *buf, size_t len)
+{
+ struct console *con;
+
+ /* Stolen from printk.c */
+ for_each_console(con) {
+ if ((con->flags & CON_ENABLED) && con->write &&
+ (cpu_online(smp_processor_id()) ||
+ (con->flags & CON_ANYTIME)))
+ con->write(con, buf, len);
+ }
+ return 0;
+}
+
+static int print_to_user(const char *buf, size_t len)
+{
+ int size;
+ size = copy_to_user(pager.ubuf, buf, len);
+ if (size > 0) {
+ printk(TAG "Failed to copy to user %d bytes\n", size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int print(const char *buf, size_t len, int print_to)
+{
+ if (print_to == TD_PRINT_CONSOLE)
+ TRY(print_to_console(buf, len));
+ else if (print_to == TD_PRINT_USER)
+ TRY(print_to_user(buf, len));
+ return 0;
+}
+
+/* print_magic will print MAGIC_NUMBER using the
+ * print function selected by print_to.
+ */
+static inline ssize_t print_magic(int print_to)
+{
+ print(MAGIC_NUMBER, sizeof(MAGIC_NUMBER), print_to);
+ return sizeof(MAGIC_NUMBER);
+}
+
+static int iter_init(void)
+{
+ int cpu;
+
+ /* Make iter point to global ring buffer used in trace. */
+ trace_init_global_iter(&iter);
+
+ /* Disable tracing */
+ for_each_tracing_cpu(cpu) {
+ atomic_inc(&iter.tr->data[cpu]->disabled);
+ }
+
+ /* Save flags */
+ old_trace_flags = trace_flags;
+
+ /* Dont look at memory in panic mode. */
+ trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
+
+ /* Prepare ring buffer iter */
+ for_each_tracing_cpu(cpu) {
+ iter.buffer_iter[cpu] =
+ ring_buffer_read_prepare(iter.tr->buffer, cpu);
+ }
+ ring_buffer_read_prepare_sync();
+ for_each_tracing_cpu(cpu) {
+ ring_buffer_read_start(iter.buffer_iter[cpu]);
+ tracing_iter_reset(&iter, cpu);
+ }
+ return 0;
+}
+
+/* iter_next gets the next entry in the ring buffer, ordered by time.
+ * If there are no more entries, returns 0.
+ */
+static ssize_t iter_next(void)
+{
+ /* Zero out the iterator's seq */
+ memset(&iter.seq, 0,
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
+
+ while (!trace_empty(&iter)) {
+ if (trace_find_next_entry_inc(&iter) == NULL) {
+ printk(TAG "trace_find_next_entry failed!\n");
+ return -EINVAL;
+ }
+
+ /* Copy the ring buffer data to iterator's seq */
+ print_trace_line(&iter);
+ if (iter.seq.len != 0)
+ return iter.seq.len;
+ }
+ return 0;
+}
+
+static int iter_deinit(void)
+{
+ int cpu;
+ /* Enable tracing */
+ for_each_tracing_cpu(cpu) {
+ ring_buffer_read_finish(iter.buffer_iter[cpu]);
+ }
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&iter.tr->data[cpu]->disabled);
+ }
+
+ /* Restore flags */
+ trace_flags = old_trace_flags;
+ return 0;
+}
+
+static int pager_init(void)
+{
+ int cpu;
+
+ /* Need to do this to get a pointer to global_trace (iter.tr).
+ Lame, I know. */
+ trace_init_global_iter(&iter);
+
+ /* Turn off tracing */
+ for_each_tracing_cpu(cpu) {
+ atomic_inc(&iter.tr->data[cpu]->disabled);
+ }
+
+ memset(&pager, 0, sizeof(pager));
+ pager.tr = iter.tr;
+ pager.len = TD_COMPRESS_CHUNK;
+
+ return 0;
+}
+
+/* pager_next_cpu moves the pager to the next cpu.
+ * Returns 0 if pager is done, else 1.
+ */
+static ssize_t pager_next_cpu(void)
+{
+ if (pager.cpu <= CPU_MAX) {
+ pager.cpu += 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* pager_next gets the next page of data from the ring buffer
+ * of the current cpu. Returns page size or 0 if no more data.
+ */
+static ssize_t pager_next(void)
+{
+ int ret;
+
+ if (pager.cpu > CPU_MAX)
+ return 0;
+
+ if (!pager.spare)
+ pager.spare = ring_buffer_alloc_read_page(pager.tr->buffer);
+ if (!pager.spare) {
+ printk(TAG "ring_buffer_alloc_read_page failed!");
+ return -ENOMEM;
+ }
+
+ ret = ring_buffer_read_page(pager.tr->buffer,
+ &pager.spare,
+ pager.len,
+ pager.cpu, 0);
+ if (ret < 0)
+ return 0;
+
+ return PAGE_SIZE;
+}
+
+static int pager_deinit(void)
+{
+ int cpu;
+ if (pager.spare != NULL)
+ ring_buffer_free_read_page(pager.tr->buffer, pager.spare);
+
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&iter.tr->data[cpu]->disabled);
+ }
+ return 0;
+}
+
+/* cmdline_next gets the next saved cmdline from the trace and
+ * puts it in cmdline_buf. Returns the size of the cmdline, or 0 if empty.
+ * but will reset itself on a subsequent call.
+ */
+static ssize_t cmdline_next(void)
+{
+ static int pid;
+ ssize_t size = 0;
+
+ if (pid >= PID_MAX_DEFAULT)
+ pid = -1;
+
+ while (size == 0 && pid < PID_MAX_DEFAULT) {
+ pid++;
+ trace_find_cmdline(pid, cmdline_buf);
+ if (!strncmp(cmdline_buf, "<...>", 5))
+ continue;
+
+ sprintf(&cmdline_buf[strlen(cmdline_buf)], " %d"
+ CMDLINE_DELIM, pid);
+ size = strlen(cmdline_buf);
+ }
+ return size;
+}
+
+/* comsume_events removes the first 'num' entries from the ring buffer. */
+static int consume_events(size_t num)
+{
+ TRY(iter_init());
+ for (; num > 0 && !trace_empty(&iter); num--) {
+ trace_find_next_entry_inc(&iter);
+ ring_buffer_consume(iter.tr->buffer, iter.cpu, &iter.ts,
+ &iter.lost_events);
+ }
+ TRY(iter_deinit());
+ return 0;
+}
+
+static int data_init(void)
+{
+ if (current_format)
+ TRY(iter_init());
+ else
+ TRY(pager_init());
+ return 0;
+}
+
+/* data_next will figure out the right 'next' function to
+ * call and will select the right buffer to pass back
+ * to compress_next.
+ *
+ * iter_next should be used to get data entry-by-entry, ordered
+ * by time, which is what we need in order to convert it to ascii.
+ *
+ * pager_next will return a full page of raw data at a time, one
+ * CPU at a time. pager_next_cpu must be called to get the next CPU.
+ * cmdline_next will get the next saved cmdline
+ */
+static ssize_t data_next(const char **buf)
+{
+ ssize_t size;
+
+ if (current_format) {
+ TRY(size = iter_next());
+ *buf = iter.seq.buffer;
+ } else {
+ TRY(size = pager_next());
+ *buf = pager.spare;
+ if (size == 0) {
+ if (pager_next_cpu()) {
+ size = sizeof(CPU_DELIM);
+ *buf = CPU_DELIM;
+ } else {
+ TRY(size = cmdline_next());
+ *buf = cmdline_buf;
+ }
+ }
+ }
+ return size;
+}
+
+static int data_deinit(void)
+{
+ if (current_format)
+ TRY(iter_deinit());
+ else
+ TRY(pager_deinit());
+ return 0;
+}
+
+static int compress_init(void)
+{
+ int workspacesize, ret;
+
+ compress_done = 0;
+ flush = Z_NO_FLUSH;
+ stream.data_type = current_format ? Z_ASCII : Z_BINARY;
+ workspacesize = zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL);
+ stream.workspace = vmalloc(workspacesize);
+ if (!stream.workspace) {
+ printk(TAG "Could not allocate "
+ "enough memory for zlib!\n");
+ return -ENOMEM;
+ }
+ memset(stream.workspace, 0, workspacesize);
+
+ ret = zlib_deflateInit(&stream, compress_level);
+ if (ret != Z_OK) {
+ printk(TAG "%s\n", stream.msg);
+ return ret;
+ }
+ stream.avail_in = 0;
+ stream.avail_out = 0;
+ TRY(data_init());
+ return 0;
+}
+
+/* compress_next will compress up to min(max_out, TD_COMPRESS_CHUNK) bytes
+ * of data into the output buffer. It gets the data by calling data_next.
+ * It will return the most data it possibly can. If it returns 0, then
+ * there is no more data.
+ *
+ * By the way that zlib works, each call to zlib_deflate will possibly
+ * consume up to avail_in bytes from next_in, and will fill up to
+ * avail_out bytes in next_out. Once flush == Z_FINISH, it can not take
+ * any more input. It will output until it is finished, and will return
+ * Z_STREAM_END.
+ */
+static ssize_t compress_next(size_t max_out)
+{
+ ssize_t ret;
+ max_out = min(max_out, (size_t)TD_COMPRESS_CHUNK);
+ stream.next_out = out_buf;
+ stream.avail_out = max_out;
+ while (stream.avail_out > 0 && !compress_done) {
+ if (stream.avail_in == 0 && flush != Z_FINISH) {
+ TRY(stream.avail_in =
+ data_next((const char **)&stream.next_in));
+ flush = (stream.avail_in == 0) ? Z_FINISH : Z_NO_FLUSH;
+ }
+ if (stream.next_in != NULL) {
+ TRYM((ret = zlib_deflate(&stream, flush)),
+ "zlib: %s\n", stream.msg);
+ compress_done = (ret == Z_STREAM_END);
+ }
+ }
+ ret = max_out - stream.avail_out;
+ return ret;
+}
+
+static int compress_deinit(void)
+{
+ TRY(data_deinit());
+
+ zlib_deflateEnd(&stream);
+ vfree(stream.workspace);
+
+ /* TODO: remove */
+ printk(TAG "Total in: %ld\n", stream.total_in);
+ printk(TAG "Total out: %ld\n", stream.total_out);
+ return stream.total_out;
+}
+
+static int compress_reset(void)
+{
+ TRY(compress_deinit());
+ TRY(compress_init());
+ return 0;
+}
+
+/* tracedump_init initializes all tracedump components.
+ * Call this before tracedump_next
+ */
+int tracedump_init(void)
+{
+ TRY(compress_init());
+ return 0;
+}
+
+/* tracedump_next will print up to max_out data from the tracing ring
+ * buffers using the print function selected by print_to. The data is
+ * compressed using zlib.
+ *
+ * The output type of the data is specified by the format_ascii module
+ * parameter. If format_ascii == 1, human-readable data will be output.
+ * Otherwise, it will output raw data from the ring buffer in cpu order,
+ * followed by the saved_cmdlines data.
+ */
+ssize_t tracedump_next(size_t max_out, int print_to)
+{
+ ssize_t size;
+ TRY(size = compress_next(max_out));
+ print(out_buf, size, print_to);
+ return size;
+}
+
+/* tracedump_all will print all data in the tracing ring buffers using
+ * the print function selected by print_to. The data is compressed using
+ * zlib, and is surrounded by MAGIC_NUMBER.
+ *
+ * The output type of the data is specified by the format_ascii module
+ * parameter. If format_ascii == 1, human-readable data will be output.
+ * Otherwise, it will output raw data from the ring buffer in cpu order,
+ * followed by the saved_cmdlines data.
+ */
+ssize_t tracedump_all(int print_to)
+{
+ ssize_t ret, size = 0;
+ TRY(size += print_magic(print_to));
+
+ do {
+ /* Here the size used doesn't really matter,
+ * since we're dumping everything. */
+ TRY(ret = tracedump_next(0xFFFFFFFF, print_to));
+ size += ret;
+ } while (ret > 0);
+
+ TRY(size += print_magic(print_to));
+
+ return size;
+}
+
+/* tracedump_deinit deinitializes all tracedump components.
+ * This must be called, even on error.
+ */
+int tracedump_deinit(void)
+{
+ TRY(compress_deinit());
+ return 0;
+}
+
+/* tracedump_reset reinitializes all tracedump components. */
+int tracedump_reset(void)
+{
+ TRY(compress_reset());
+ return 0;
+}
+
+
+
+/* tracedump_open opens the tracedump file for reading. */
+static int tracedump_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ mutex_lock(&tracedump_proc_lock);
+ current_format = format_ascii;
+ ret = tracedump_init();
+ if (ret < 0)
+ goto err;
+
+ ret = nonseekable_open(inode, file);
+ if (ret < 0)
+ goto err;
+ return ret;
+
+err:
+ mutex_unlock(&tracedump_proc_lock);
+ return ret;
+}
+
+/* tracedump_read will reads data from tracedump_next and prints
+ * it to userspace. It will surround the data with MAGIC_NUMBER.
+ */
+static ssize_t tracedump_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ static int done;
+ ssize_t size = 0;
+
+ pager.ubuf = buf;
+
+ if (*offset == 0) {
+ done = 0;
+ TRY(size = print_magic(TD_PRINT_USER));
+ } else if (!done) {
+ TRY(size = tracedump_next(len, TD_PRINT_USER));
+ if (size == 0) {
+ TRY(size = print_magic(TD_PRINT_USER));
+ done = 1;
+ }
+ }
+
+ *offset += size;
+
+ return size;
+}
+
+static int tracedump_release(struct inode *inode, struct file *file)
+{
+ int ret;
+ ret = tracedump_deinit();
+ mutex_unlock(&tracedump_proc_lock);
+ return ret;
+}
+
+/* tracedump_dump dumps all tracing data from the tracing ring buffers
+ * to all consoles. For details about the output format, see
+ * tracedump_all.
+
+ * At most max_out bytes are dumped. To accomplish this,
+ * tracedump_dump calls tracedump_all several times without writing the data,
+ * each time tossing out old data until it reaches its goal.
+ *
+ * Note: dumping raw pages currently does NOT follow the size limit.
+ */
+
+int tracedump_dump(size_t max_out)
+{
+ ssize_t size;
+ size_t consume;
+
+ printk(TAG "\n");
+
+ tracedump_init();
+
+ if (format_ascii) {
+ size = tracedump_all(TD_NO_PRINT);
+ if (size < 0) {
+ printk(TAG "failed to dump\n");
+ goto out;
+ }
+ while (size > max_out) {
+ TRY(tracedump_deinit());
+ /* Events take more or less 60 ascii bytes each,
+ not counting compression */
+ consume = TD_MIN_CONSUME + (size - max_out) /
+ (60 / (compress_level + 1));
+ TRY(consume_events(consume));
+ TRY(tracedump_init());
+ size = tracedump_all(TD_NO_PRINT);
+ if (size < 0) {
+ printk(TAG "failed to dump\n");
+ goto out;
+ }
+ }
+
+ TRY(tracedump_reset());
+ }
+ size = tracedump_all(TD_PRINT_CONSOLE);
+ if (size < 0) {
+ printk(TAG "failed to dump\n");
+ goto out;
+ }
+
+out:
+ tracedump_deinit();
+ printk(KERN_INFO "\n" TAG " end\n");
+ return size;
+}
+
+static const struct file_operations tracedump_fops = {
+ .owner = THIS_MODULE,
+ .open = tracedump_open,
+ .read = tracedump_read,
+ .release = tracedump_release,
+};
+
+#ifdef CONFIG_TRACEDUMP_PANIC
+static int tracedump_panic_handler(struct notifier_block *this,
+ unsigned long event, void *unused)
+{
+ tracedump_dump(panic_size);
+ return 0;
+}
+
+static struct notifier_block tracedump_panic_notifier = {
+ .notifier_call = tracedump_panic_handler,
+ .next = NULL,
+ .priority = 150 /* priority: INT_MAX >= x >= 0 */
+};
+#endif
+
+static int __init tracedump_initcall(void)
+{
+#ifdef CONFIG_TRACEDUMP_PROCFS
+ struct proc_dir_entry *entry;
+
+ /* Create a procfs file for easy dumping */
+ entry = create_proc_entry("tracedump", S_IFREG | S_IRUGO, NULL);
+ if (!entry)
+ printk(TAG "failed to create proc entry\n");
+ else
+ entry->proc_fops = &tracedump_fops;
+#endif
+
+#ifdef CONFIG_TRACEDUMP_PANIC
+ /* Automatically dump to console on a kernel panic */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &tracedump_panic_notifier);
+#endif
+ return 0;
+}
+
+early_initcall(tracedump_initcall);
diff --git a/kernel/trace/tracelevel.c b/kernel/trace/tracelevel.c
new file mode 100644
index 000000000000..9f8b8eedbb58
--- /dev/null
+++ b/kernel/trace/tracelevel.c
@@ -0,0 +1,142 @@
+/*
+ * kernel/trace/tracelevel.c
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/ftrace_event.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/tracelevel.h>
+#include <linux/vmalloc.h>
+
+#include "trace.h"
+
+#define TAG KERN_ERR "tracelevel: "
+
+struct tracelevel_record {
+ struct list_head list;
+ char *name;
+ int level;
+};
+
+static LIST_HEAD(tracelevel_list);
+
+static bool started;
+static unsigned int tracelevel_level = TRACELEVEL_DEFAULT;
+
+static DEFINE_MUTEX(tracelevel_record_lock);
+
+/* tracelevel_set_event sets a single event if set = 1, or
+ * clears an event if set = 0.
+ */
+static int tracelevel_set_event(struct tracelevel_record *evt, bool set)
+{
+ if (trace_set_clr_event(NULL, evt->name, set) < 0) {
+ printk(TAG "failed to set event %s\n", evt->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Registers an event. If possible, it also sets it.
+ * If not, we'll set it in tracelevel_init.
+ */
+int __tracelevel_register(char *name, unsigned int level)
+{
+ struct tracelevel_record *evt = (struct tracelevel_record *)
+ vmalloc(sizeof(struct tracelevel_record));
+ if (!evt) {
+ printk(TAG "failed to allocate tracelevel_record for %s\n",
+ name);
+ return -ENOMEM;
+ }
+
+ evt->name = name;
+ evt->level = level;
+
+ mutex_lock(&tracelevel_record_lock);
+ list_add(&evt->list, &tracelevel_list);
+ mutex_unlock(&tracelevel_record_lock);
+
+ if (level >= tracelevel_level && started)
+ tracelevel_set_event(evt, 1);
+ return 0;
+}
+
+/* tracelevel_set_level sets the global level, clears events
+ * lower than that level, and enables events greater or equal.
+ */
+int tracelevel_set_level(int level)
+{
+ struct tracelevel_record *evt = NULL;
+
+ if (level < 0 || level > TRACELEVEL_MAX)
+ return -EINVAL;
+ tracelevel_level = level;
+
+ mutex_lock(&tracelevel_record_lock);
+ list_for_each_entry(evt, &tracelevel_list, list) {
+ if (evt->level >= level)
+ tracelevel_set_event(evt, 1);
+ else
+ tracelevel_set_event(evt, 0);
+ }
+ mutex_unlock(&tracelevel_record_lock);
+ return 0;
+}
+
+static int param_set_level(const char *val, const struct kernel_param *kp)
+{
+ int level, ret;
+ ret = strict_strtol(val, 0, &level);
+ if (ret < 0)
+ return ret;
+ return tracelevel_set_level(level);
+}
+
+static int param_get_level(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_int(buffer, kp);
+}
+
+static struct kernel_param_ops tracelevel_level_ops = {
+ .set = param_set_level,
+ .get = param_get_level
+};
+
+module_param_cb(level, &tracelevel_level_ops, &tracelevel_level, 0644);
+
+/* Turn on the tracing that has been registered thus far. */
+static int __init tracelevel_init(void)
+{
+ int ret;
+ started = true;
+
+ /* Ring buffer is initialize to 1 page until the user sets a tracer.
+ * Since we're doing this manually, we need to ask for expanded buffer.
+ */
+ ret = tracing_update_buffers();
+ if (ret < 0)
+ return ret;
+
+ return tracelevel_set_level(tracelevel_level);
+}
+
+/* Tracing mechanism is set up during fs_initcall. */
+fs_initcall_sync(tracelevel_init);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c0cb9c4bc46d..36b60dbac3a1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -670,8 +670,9 @@ config DEBUG_LOCKING_API_SELFTESTS
mutexes and rwsems.
config STACKTRACE
- bool
+ bool "Stacktrace"
depends on STACKTRACE_SUPPORT
+ default y
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
diff --git a/mm/Makefile b/mm/Makefile
index 836e4163c1bf..2d00bf57ca42 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
+obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_COMPACTION) += compaction.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
diff --git a/mm/ashmem.c b/mm/ashmem.c
new file mode 100644
index 000000000000..66e3f23ee33c
--- /dev/null
+++ b/mm/ashmem.c
@@ -0,0 +1,748 @@
+/* mm/ashmem.c
+**
+** Anonymous Shared Memory Subsystem, ashmem
+**
+** Copyright (C) 2008 Google, Inc.
+**
+** Robert Love <rlove@google.com>
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/security.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/personality.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include <linux/ashmem.h>
+
+#define ASHMEM_NAME_PREFIX "dev/ashmem/"
+#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
+#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
+
+/*
+ * ashmem_area - anonymous shared memory area
+ * Lifecycle: From our parent file's open() until its release()
+ * Locking: Protected by `ashmem_mutex'
+ * Big Note: Mappings do NOT pin this structure; it dies on close()
+ */
+struct ashmem_area {
+ char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */
+ struct list_head unpinned_list; /* list of all ashmem areas */
+ struct file *file; /* the shmem-based backing file */
+ size_t size; /* size of the mapping, in bytes */
+ unsigned long prot_mask; /* allowed prot bits, as vm_flags */
+};
+
+/*
+ * ashmem_range - represents an interval of unpinned (evictable) pages
+ * Lifecycle: From unpin to pin
+ * Locking: Protected by `ashmem_mutex'
+ */
+struct ashmem_range {
+ struct list_head lru; /* entry in LRU list */
+ struct list_head unpinned; /* entry in its area's unpinned list */
+ struct ashmem_area *asma; /* associated area */
+ size_t pgstart; /* starting page, inclusive */
+ size_t pgend; /* ending page, inclusive */
+ unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
+};
+
+/* LRU list of unpinned pages, protected by ashmem_mutex */
+static LIST_HEAD(ashmem_lru_list);
+
+/* Count of pages on our LRU list, protected by ashmem_mutex */
+static unsigned long lru_count;
+
+/*
+ * ashmem_mutex - protects the list of and each individual ashmem_area
+ *
+ * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
+ */
+static DEFINE_MUTEX(ashmem_mutex);
+
+static struct kmem_cache *ashmem_area_cachep __read_mostly;
+static struct kmem_cache *ashmem_range_cachep __read_mostly;
+
+#define range_size(range) \
+ ((range)->pgend - (range)->pgstart + 1)
+
+#define range_on_lru(range) \
+ ((range)->purged == ASHMEM_NOT_PURGED)
+
+#define page_range_subsumes_range(range, start, end) \
+ (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
+
+#define page_range_subsumed_by_range(range, start, end) \
+ (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
+
+#define page_in_range(range, page) \
+ (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
+
+#define page_range_in_range(range, start, end) \
+ (page_in_range(range, start) || page_in_range(range, end) || \
+ page_range_subsumes_range(range, start, end))
+
+#define range_before_page(range, page) \
+ ((range)->pgend < (page))
+
+#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
+
+static inline void lru_add(struct ashmem_range *range)
+{
+ list_add_tail(&range->lru, &ashmem_lru_list);
+ lru_count += range_size(range);
+}
+
+static inline void lru_del(struct ashmem_range *range)
+{
+ list_del(&range->lru);
+ lru_count -= range_size(range);
+}
+
+/*
+ * range_alloc - allocate and initialize a new ashmem_range structure
+ *
+ * 'asma' - associated ashmem_area
+ * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
+ * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
+ * 'start' - starting page, inclusive
+ * 'end' - ending page, inclusive
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int range_alloc(struct ashmem_area *asma,
+ struct ashmem_range *prev_range, unsigned int purged,
+ size_t start, size_t end)
+{
+ struct ashmem_range *range;
+
+ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+ if (unlikely(!range))
+ return -ENOMEM;
+
+ range->asma = asma;
+ range->pgstart = start;
+ range->pgend = end;
+ range->purged = purged;
+
+ list_add_tail(&range->unpinned, &prev_range->unpinned);
+
+ if (range_on_lru(range))
+ lru_add(range);
+
+ return 0;
+}
+
+static void range_del(struct ashmem_range *range)
+{
+ list_del(&range->unpinned);
+ if (range_on_lru(range))
+ lru_del(range);
+ kmem_cache_free(ashmem_range_cachep, range);
+}
+
+/*
+ * range_shrink - shrinks a range
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static inline void range_shrink(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ size_t pre = range_size(range);
+
+ range->pgstart = start;
+ range->pgend = end;
+
+ if (range_on_lru(range))
+ lru_count -= pre - range_size(range);
+}
+
+static int ashmem_open(struct inode *inode, struct file *file)
+{
+ struct ashmem_area *asma;
+ int ret;
+
+ ret = generic_file_open(inode, file);
+ if (unlikely(ret))
+ return ret;
+
+ asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
+ if (unlikely(!asma))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&asma->unpinned_list);
+ memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
+ asma->prot_mask = PROT_MASK;
+ file->private_data = asma;
+
+ return 0;
+}
+
+static int ashmem_release(struct inode *ignored, struct file *file)
+{
+ struct ashmem_area *asma = file->private_data;
+ struct ashmem_range *range, *next;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
+ range_del(range);
+ mutex_unlock(&ashmem_mutex);
+
+ if (asma->file)
+ fput(asma->file);
+ kmem_cache_free(ashmem_area_cachep, asma);
+
+ return 0;
+}
+
+static ssize_t ashmem_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* If size is not set, or set to 0, always return EOF. */
+ if (asma->size == 0) {
+ goto out;
+ }
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret;
+
+ mutex_lock(&ashmem_mutex);
+
+ if (asma->size == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ ret = asma->file->f_op->llseek(asma->file, offset, origin);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /** Copy f_pos from backing file, since f_ops->llseek() sets it */
+ file->f_pos = asma->file->f_pos;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static inline unsigned long
+calc_vm_may_flags(unsigned long prot)
+{
+ return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) |
+ _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
+ _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
+}
+
+static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* user needs to SET_SIZE before mapping */
+ if (unlikely(!asma->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* requested protection bits must match our allowed protection mask */
+ if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
+ calc_vm_prot_bits(PROT_MASK))) {
+ ret = -EPERM;
+ goto out;
+ }
+ vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
+
+ if (!asma->file) {
+ char *name = ASHMEM_NAME_DEF;
+ struct file *vmfile;
+
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+ name = asma->name;
+
+ /* ... and allocate the backing shmem file */
+ vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
+ if (unlikely(IS_ERR(vmfile))) {
+ ret = PTR_ERR(vmfile);
+ goto out;
+ }
+ asma->file = vmfile;
+ }
+ get_file(asma->file);
+
+ if (vma->vm_flags & VM_SHARED)
+ shmem_set_file(vma, asma->file);
+ else {
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = asma->file;
+ }
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+/*
+ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
+ *
+ * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
+ * many objects (pages) we have in total.
+ *
+ * 'gfp_mask' is the mask of the allocation that got us into this mess.
+ *
+ * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * proceed without risk of deadlock (due to gfp_mask).
+ *
+ * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
+ * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
+ * pages freed.
+ */
+static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+ struct ashmem_range *range, *next;
+
+ /* We might recurse into filesystem code, so bail out if necessary */
+ if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
+ return -1;
+ if (!sc->nr_to_scan)
+ return lru_count;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+ struct inode *inode = range->asma->file->f_dentry->d_inode;
+ loff_t start = range->pgstart * PAGE_SIZE;
+ loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+
+ vmtruncate_range(inode, start, end);
+ range->purged = ASHMEM_WAS_PURGED;
+ lru_del(range);
+
+ sc->nr_to_scan -= range_size(range);
+ if (sc->nr_to_scan <= 0)
+ break;
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ return lru_count;
+}
+
+static struct shrinker ashmem_shrinker = {
+ .shrink = ashmem_shrink,
+ .seeks = DEFAULT_SEEKS * 4,
+};
+
+static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* the user can only remove, not add, protection bits */
+ if (unlikely((asma->prot_mask & prot) != prot)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* does the application expect PROT_READ to imply PROT_EXEC? */
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ asma->prot_mask = prot;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static int set_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* cannot change an existing mapping's name */
+ if (unlikely(asma->file)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
+ name, ASHMEM_NAME_LEN)))
+ ret = -EFAULT;
+ asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
+
+out:
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+static int get_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
+ size_t len;
+
+ /*
+ * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
+ * prevents us from revealing one user's stack to another.
+ */
+ len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
+ if (unlikely(copy_to_user(name,
+ asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
+ ret = -EFAULT;
+ } else {
+ if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
+ sizeof(ASHMEM_NAME_DEF))))
+ ret = -EFAULT;
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+/*
+ * ashmem_pin - pin the given ashmem region, returning whether it was
+ * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+ struct ashmem_range *range, *next;
+ int ret = ASHMEM_NOT_PURGED;
+
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* moved past last applicable page; we can short circuit */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to pin pages that span multiple ranges,
+ * or to pin pages that aren't even unpinned, so this is messy.
+ *
+ * Four cases:
+ * 1. The requested range subsumes an existing range, so we
+ * just remove the entire matching range.
+ * 2. The requested range overlaps the start of an existing
+ * range, so we just update that range.
+ * 3. The requested range overlaps the end of an existing
+ * range, so we just update that range.
+ * 4. The requested range punches a hole in an existing range,
+ * so we have to update one side of the range and then
+ * create a new range for the other side.
+ */
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret |= range->purged;
+
+ /* Case #1: Easy. Just nuke the whole thing. */
+ if (page_range_subsumes_range(range, pgstart, pgend)) {
+ range_del(range);
+ continue;
+ }
+
+ /* Case #2: We overlap from the start, so adjust it */
+ if (range->pgstart >= pgstart) {
+ range_shrink(range, pgend + 1, range->pgend);
+ continue;
+ }
+
+ /* Case #3: We overlap from the rear, so adjust it */
+ if (range->pgend <= pgend) {
+ range_shrink(range, range->pgstart, pgstart-1);
+ continue;
+ }
+
+ /*
+ * Case #4: We eat a chunk out of the middle. A bit
+ * more complicated, we allocate a new range for the
+ * second half and adjust the first chunk's endpoint.
+ */
+ range_alloc(asma, range, range->purged,
+ pgend + 1, range->pgend);
+ range_shrink(range, range->pgstart, pgstart - 1);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * ashmem_unpin - unpin the given range of pages. Returns zero on success.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+ struct ashmem_range *range, *next;
+ unsigned int purged = ASHMEM_NOT_PURGED;
+
+restart:
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* short circuit: this is our insertion point */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to unpin pages that are already entirely
+ * or partially pinned. We handle those two cases here.
+ */
+ if (page_range_subsumed_by_range(range, pgstart, pgend))
+ return 0;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ pgstart = min_t(size_t, range->pgstart, pgstart),
+ pgend = max_t(size_t, range->pgend, pgend);
+ purged |= range->purged;
+ range_del(range);
+ goto restart;
+ }
+ }
+
+ return range_alloc(asma, range, purged, pgstart, pgend);
+}
+
+/*
+ * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
+ * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
+ size_t pgend)
+{
+ struct ashmem_range *range;
+ int ret = ASHMEM_IS_PINNED;
+
+ list_for_each_entry(range, &asma->unpinned_list, unpinned) {
+ if (range_before_page(range, pgstart))
+ break;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret = ASHMEM_IS_UNPINNED;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+ void __user *p)
+{
+ struct ashmem_pin pin;
+ size_t pgstart, pgend;
+ int ret = -EINVAL;
+
+ if (unlikely(!asma->file))
+ return -EINVAL;
+
+ if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+ return -EFAULT;
+
+ /* per custom, you can pass zero for len to mean "everything onward" */
+ if (!pin.len)
+ pin.len = PAGE_ALIGN(asma->size) - pin.offset;
+
+ if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+ return -EINVAL;
+
+ if (unlikely(((__u32) -1) - pin.offset < pin.len))
+ return -EINVAL;
+
+ if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+ return -EINVAL;
+
+ pgstart = pin.offset / PAGE_SIZE;
+ pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
+
+ mutex_lock(&ashmem_mutex);
+
+ switch (cmd) {
+ case ASHMEM_PIN:
+ ret = ashmem_pin(asma, pgstart, pgend);
+ break;
+ case ASHMEM_UNPIN:
+ ret = ashmem_unpin(asma, pgstart, pgend);
+ break;
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_get_pin_status(asma, pgstart, pgend);
+ break;
+ }
+
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ashmem_area *asma = file->private_data;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case ASHMEM_SET_NAME:
+ ret = set_name(asma, (void __user *) arg);
+ break;
+ case ASHMEM_GET_NAME:
+ ret = get_name(asma, (void __user *) arg);
+ break;
+ case ASHMEM_SET_SIZE:
+ ret = -EINVAL;
+ if (!asma->file) {
+ ret = 0;
+ asma->size = (size_t) arg;
+ }
+ break;
+ case ASHMEM_GET_SIZE:
+ ret = asma->size;
+ break;
+ case ASHMEM_SET_PROT_MASK:
+ ret = set_prot_mask(asma, arg);
+ break;
+ case ASHMEM_GET_PROT_MASK:
+ ret = asma->prot_mask;
+ break;
+ case ASHMEM_PIN:
+ case ASHMEM_UNPIN:
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
+ break;
+ case ASHMEM_PURGE_ALL_CACHES:
+ ret = -EPERM;
+ if (capable(CAP_SYS_ADMIN)) {
+ struct shrink_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .nr_to_scan = 0,
+ };
+ ret = ashmem_shrink(&ashmem_shrinker, &sc);
+ sc.nr_to_scan = ret;
+ ashmem_shrink(&ashmem_shrinker, &sc);
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static struct file_operations ashmem_fops = {
+ .owner = THIS_MODULE,
+ .open = ashmem_open,
+ .release = ashmem_release,
+ .read = ashmem_read,
+ .llseek = ashmem_llseek,
+ .mmap = ashmem_mmap,
+ .unlocked_ioctl = ashmem_ioctl,
+ .compat_ioctl = ashmem_ioctl,
+};
+
+static struct miscdevice ashmem_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ashmem",
+ .fops = &ashmem_fops,
+};
+
+static int __init ashmem_init(void)
+{
+ int ret;
+
+ ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
+ sizeof(struct ashmem_area),
+ 0, 0, NULL);
+ if (unlikely(!ashmem_area_cachep)) {
+ printk(KERN_ERR "ashmem: failed to create slab cache\n");
+ return -ENOMEM;
+ }
+
+ ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
+ sizeof(struct ashmem_range),
+ 0, 0, NULL);
+ if (unlikely(!ashmem_range_cachep)) {
+ printk(KERN_ERR "ashmem: failed to create slab cache\n");
+ return -ENOMEM;
+ }
+
+ ret = misc_register(&ashmem_misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "ashmem: failed to register misc device!\n");
+ return ret;
+ }
+
+ register_shrinker(&ashmem_shrinker);
+
+ printk(KERN_INFO "ashmem: initialized\n");
+
+ return 0;
+}
+
+static void __exit ashmem_exit(void)
+{
+ int ret;
+
+ unregister_shrinker(&ashmem_shrinker);
+
+ ret = misc_deregister(&ashmem_misc);
+ if (unlikely(ret))
+ printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
+
+ kmem_cache_destroy(ashmem_range_cachep);
+ kmem_cache_destroy(ashmem_area_cachep);
+
+ printk(KERN_INFO "ashmem: unloaded\n");
+}
+
+module_init(ashmem_init);
+module_exit(ashmem_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e8fae15667fb..8859578e4bdc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -127,6 +127,20 @@ void pm_restrict_gfp_mask(void)
saved_gfp_mask = gfp_allowed_mask;
gfp_allowed_mask &= ~GFP_IOFS;
}
+
+static bool pm_suspending(void)
+{
+ if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
+ return false;
+ return true;
+}
+
+#else
+
+static bool pm_suspending(void)
+{
+ return false;
+}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -176,6 +190,7 @@ static char * const zone_names[MAX_NR_ZONES] = {
};
int min_free_kbytes = 1024;
+int min_free_order_shift = 1;
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
@@ -1469,7 +1484,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
free_pages -= z->free_area[o].nr_free << o;
/* Require fewer higher order pages to be free */
- min >>= 1;
+ min >>= min_free_order_shift;
if (free_pages <= min)
return false;
@@ -2207,6 +2222,14 @@ rebalance:
goto restart;
}
+
+ /*
+ * Suspend converts GFP_KERNEL to __GFP_WAIT which can
+ * prevent reclaim making forward progress without
+ * invoking OOM. Bail if we are suspending
+ */
+ if (pm_suspending())
+ goto nopage;
}
/* Check if we should retry the allocation */
diff --git a/mm/shmem.c b/mm/shmem.c
index 32f6763f16fb..fba53caba0d4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2520,6 +2520,15 @@ put_memory:
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = file;
+ vma->vm_ops = &shmem_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+}
+
/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -2533,11 +2542,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
if (IS_ERR(file))
return PTR_ERR(file);
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = file;
- vma->vm_ops = &shmem_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
+ shmem_set_file(vma, file);
return 0;
}
diff --git a/net/Kconfig b/net/Kconfig
index a07314844238..a54855c66320 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -79,6 +79,20 @@ source "net/netlabel/Kconfig"
endif # if INET
+config ANDROID_PARANOID_NETWORK
+ bool "Only allow certain groups to create sockets"
+ default y
+ help
+ none
+
+config NET_ACTIVITY_STATS
+ bool "Network activity statistics tracking"
+ default y
+ help
+ Network activity statistics are useful for tracking wireless
+ modem activity on 2G, 3G, 4G wireless networks. Counts number of
+ transmissions and groups them in specified time buckets.
+
config NETWORK_SECMARK
bool "Security Marking"
help
@@ -217,7 +231,7 @@ source "net/dns_resolver/Kconfig"
source "net/batman-adv/Kconfig"
config RPS
- boolean
+ boolean "RPS"
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
diff --git a/net/Makefile b/net/Makefile
index acdde4950de4..572712dd5b42 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -69,3 +69,4 @@ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
obj-$(CONFIG_CEPH_LIB) += ceph/
obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_NFC) += nfc/
+obj-$(CONFIG_NET_ACTIVITY_STATS) += activity_stats.o
diff --git a/net/activity_stats.c b/net/activity_stats.c
new file mode 100644
index 000000000000..8a3e93470069
--- /dev/null
+++ b/net/activity_stats.c
@@ -0,0 +1,115 @@
+/* net/activity_stats.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/suspend.h>
+#include <net/net_namespace.h>
+
+/*
+ * Track transmission rates in buckets (power of 2).
+ * 1,2,4,8...512 seconds.
+ *
+ * Buckets represent the count of network transmissions at least
+ * N seconds apart, where N is 1 << bucket index.
+ */
+#define BUCKET_MAX 10
+
+/* Track network activity frequency */
+static unsigned long activity_stats[BUCKET_MAX];
+static ktime_t last_transmit;
+static ktime_t suspend_time;
+static DEFINE_SPINLOCK(activity_lock);
+
+void activity_stats_update(void)
+{
+ int i;
+ unsigned long flags;
+ ktime_t now;
+ s64 delta;
+
+ spin_lock_irqsave(&activity_lock, flags);
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, last_transmit));
+
+ for (i = BUCKET_MAX - 1; i >= 0; i--) {
+ /*
+ * Check if the time delta between network activity is within the
+ * minimum bucket range.
+ */
+ if (delta < (1000000000ULL << i))
+ continue;
+
+ activity_stats[i]++;
+ last_transmit = now;
+ break;
+ }
+ spin_unlock_irqrestore(&activity_lock, flags);
+}
+
+static int activity_stats_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i;
+ int len;
+ char *p = page;
+
+ /* Only print if offset is 0, or we have enough buffer space */
+ if (off || count < (30 * BUCKET_MAX + 22))
+ return -ENOMEM;
+
+ len = snprintf(p, count, "Min Bucket(sec) Count\n");
+ count -= len;
+ p += len;
+
+ for (i = 0; i < BUCKET_MAX; i++) {
+ len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]);
+ count -= len;
+ p += len;
+ }
+ *eof = 1;
+
+ return p - page;
+}
+
+static int activity_stats_notifier(struct notifier_block *nb,
+ unsigned long event, void *dummy)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ suspend_time = ktime_get_real();
+ break;
+
+ case PM_POST_SUSPEND:
+ suspend_time = ktime_sub(ktime_get_real(), suspend_time);
+ last_transmit = ktime_sub(last_transmit, suspend_time);
+ }
+
+ return 0;
+}
+
+static struct notifier_block activity_stats_notifier_block = {
+ .notifier_call = activity_stats_notifier,
+};
+
+static int __init activity_stats_init(void)
+{
+ create_proc_read_entry("activity", S_IRUGO,
+ init_net.proc_net_stat, activity_stats_read_proc, NULL);
+ return register_pm_notifier(&activity_stats_notifier_block);
+}
+
+subsys_initcall(activity_stats_init);
+
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 117e0d161780..7c73a10d7edc 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,6 +40,15 @@
#include <net/bluetooth/bluetooth.h>
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
+#ifndef CONFIG_BT_SOCK_DEBUG
+#undef BT_DBG
+#define BT_DBG(D...)
+#endif
+
#define VERSION "2.16"
/* Bluetooth sockets */
@@ -125,11 +134,40 @@ int bt_sock_unregister(int proto)
}
EXPORT_SYMBOL(bt_sock_unregister);
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+static inline int current_has_bt_admin(void)
+{
+ return (!current_euid() || in_egroup_p(AID_NET_BT_ADMIN));
+}
+
+static inline int current_has_bt(void)
+{
+ return (current_has_bt_admin() || in_egroup_p(AID_NET_BT));
+}
+# else
+static inline int current_has_bt_admin(void)
+{
+ return 1;
+}
+
+static inline int current_has_bt(void)
+{
+ return 1;
+}
+#endif
+
static int bt_sock_create(struct net *net, struct socket *sock, int proto,
int kern)
{
int err;
+ if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
+ proto == BTPROTO_L2CAP) {
+ if (!current_has_bt())
+ return -EPERM;
+ } else if (!current_has_bt_admin())
+ return -EPERM;
+
if (net != &init_net)
return -EAFNOSUPPORT;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ea7f031f3b04..33c4e0cd83b1 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -333,7 +333,8 @@ static void hci_conn_auto_accept(unsigned long arg)
hci_dev_unlock(hdev);
}
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+ __u16 pkt_type, bdaddr_t *dst)
{
struct hci_conn *conn;
@@ -361,14 +362,22 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
break;
case SCO_LINK:
- if (lmp_esco_capable(hdev))
- conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
- (hdev->esco_type & EDR_ESCO_MASK);
- else
- conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
- break;
+ if (!pkt_type)
+ pkt_type = SCO_ESCO_MASK;
case ESCO_LINK:
- conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
+ if (!pkt_type)
+ pkt_type = ALL_ESCO_MASK;
+ if (lmp_esco_capable(hdev)) {
+ /* HCI Setup Synchronous Connection Command uses
+ reverse logic on the EDR_ESCO_MASK bits */
+ conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) &
+ hdev->esco_type;
+ } else {
+ /* Legacy HCI Add Sco Connection Command uses a
+ shifted bitmask */
+ conn->pkt_type = (pkt_type << 5) & hdev->pkt_type &
+ SCO_PTYPE_MASK;
+ }
break;
}
@@ -492,7 +501,9 @@ EXPORT_SYMBOL(hci_get_route);
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+ __u16 pkt_type, bdaddr_t *dst,
+ __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
@@ -511,7 +522,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
if (!entry)
return ERR_PTR(-EHOSTUNREACH);
- le = hci_conn_add(hdev, LE_LINK, dst);
+ le = hci_conn_add(hdev, LE_LINK, 0, dst);
if (!le)
return ERR_PTR(-ENOMEM);
@@ -526,7 +537,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
- acl = hci_conn_add(hdev, ACL_LINK, dst);
+ acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
if (!acl)
return NULL;
}
@@ -545,7 +556,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
sco = hci_conn_hash_lookup_ba(hdev, type, dst);
if (!sco) {
- sco = hci_conn_add(hdev, type, dst);
+ sco = hci_conn_add(hdev, type, pkt_type, dst);
if (!sco) {
hci_conn_put(acl);
return NULL;
@@ -608,6 +619,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
struct hci_cp_auth_requested cp;
+
+ /* encrypt must be pending if auth is also pending */
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
@@ -892,6 +907,15 @@ int hci_get_conn_list(void __user *arg)
(ci + n)->out = c->out;
(ci + n)->state = c->state;
(ci + n)->link_mode = c->link_mode;
+ if (c->type == SCO_LINK) {
+ (ci + n)->mtu = hdev->sco_mtu;
+ (ci + n)->cnt = hdev->sco_cnt;
+ (ci + n)->pkts = hdev->sco_pkts;
+ } else {
+ (ci + n)->mtu = hdev->acl_mtu;
+ (ci + n)->cnt = hdev->acl_cnt;
+ (ci + n)->pkts = hdev->acl_pkts;
+ }
if (++n >= req.conn_num)
break;
}
@@ -928,6 +952,15 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
ci.out = conn->out;
ci.state = conn->state;
ci.link_mode = conn->link_mode;
+ if (req.type == SCO_LINK) {
+ ci.mtu = hdev->sco_mtu;
+ ci.cnt = hdev->sco_cnt;
+ ci.pkts = hdev->sco_pkts;
+ } else {
+ ci.mtu = hdev->acl_mtu;
+ ci.cnt = hdev->acl_cnt;
+ ci.pkts = hdev->acl_pkts;
+ }
}
hci_dev_unlock_bh(hdev);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 56943add45cc..ac3a60a75b8e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1904,6 +1904,9 @@ static int hci_send_frame(struct sk_buff *skb)
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
+ /* Notify the registered devices about a new send */
+ hci_notify(hdev, HCI_DEV_WRITE);
+
return hdev->send(skb);
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7ef4eb4435fb..5a7074a7b5b8 100644..100755
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -992,7 +992,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
+ conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
if (conn) {
conn->out = 1;
conn->link_mode |= HCI_LM_MASTER;
@@ -1315,7 +1315,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
+ conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
if (conn) {
conn->dst_type = cp->peer_addr_type;
conn->out = 1;
@@ -1462,6 +1462,15 @@ unlock:
hci_conn_check_pending(hdev);
}
+static inline bool is_sco_active(struct hci_dev *hdev)
+{
+ if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
+ (hci_conn_hash_lookup_state(hdev, ESCO_LINK,
+ BT_CONNECTED)))
+ return true;
+ return false;
+}
+
static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_request *ev = (void *) skb->data;
@@ -1486,7 +1495,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
+ /* pkt_type not yet used for incoming connections */
+ conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
@@ -1504,7 +1514,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
bacpy(&cp.bdaddr, &ev->bdaddr);
- if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
+ if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
+ || is_sco_active(hdev)))
cp.role = 0x00; /* Become master */
else
cp.role = 0x01; /* Remain slave */
@@ -2479,6 +2490,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
hci_conn_add_sysfs(conn);
break;
+ case 0x10: /* Connection Accept Timeout */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
@@ -2798,7 +2810,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+ conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b3bdb482bbe6..12844fdac683 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1108,10 +1108,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
auth_type = l2cap_get_auth_type(chan);
if (chan->dcid == L2CAP_CID_LE_DATA)
- hcon = hci_connect(hdev, LE_LINK, dst,
+ hcon = hci_connect(hdev, LE_LINK, 0, dst,
chan->sec_level, auth_type);
else
- hcon = hci_connect(hdev, ACL_LINK, dst,
+ hcon = hci_connect(hdev, ACL_LINK, 0, dst,
chan->sec_level, auth_type);
if (IS_ERR(hcon)) {
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 53e109eb043e..98327213d93d 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1372,7 +1372,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
}
- conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
+ conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level, auth_type);
if (IS_ERR(conn)) {
err = PTR_ERR(conn);
goto unlock;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5ba3f6df665c..c2486a53714e 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -464,7 +464,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
switch (d->state) {
case BT_CONNECT:
- case BT_CONFIG:
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
rfcomm_schedule();
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index a324b009e34b..c0b9ad0524e5 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -178,6 +178,7 @@ static int sco_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
+ __u16 pkt_type = sco_pi(sk)->pkt_type;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
@@ -193,10 +194,12 @@ static int sco_connect(struct sock *sk)
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
- else
+ else {
type = SCO_LINK;
+ pkt_type &= SCO_ESCO_MASK;
+ }
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, pkt_type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
@@ -463,18 +466,22 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
return 0;
}
-static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
- struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
+ struct sockaddr_sco sa;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa->sco_bdaddr;
- int err = 0;
+ bdaddr_t *src = &sa.sco_bdaddr;
+ int len, err = 0;
- BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
+ BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
+ memset(&sa, 0, sizeof(sa));
+ len = min_t(unsigned int, sizeof(sa), alen);
+ memcpy(&sa, addr, len);
+
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
@@ -488,7 +495,8 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+ bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
+ sco_pi(sk)->pkt_type = sa.sco_pkt_type;
sk->sk_state = BT_BOUND;
}
@@ -501,27 +509,34 @@ done:
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
- struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- int err = 0;
-
+ struct sockaddr_sco sa;
+ int len, err = 0;
BT_DBG("sk %p", sk);
- if (alen < sizeof(struct sockaddr_sco) ||
- addr->sa_family != AF_BLUETOOTH)
+ if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
- if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
- return -EBADFD;
-
- if (sk->sk_type != SOCK_SEQPACKET)
- return -EINVAL;
+ memset(&sa, 0, sizeof(sa));
+ len = min_t(unsigned int, sizeof(sa), alen);
+ memcpy(&sa, addr, len);
lock_sock(sk);
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+ err = -EBADFD;
+ goto done;
+ }
+
/* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+ bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
+ sco_pi(sk)->pkt_type = sa.sco_pkt_type;
err = sco_connect(sk);
if (err)
@@ -628,6 +643,7 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+ sa->sco_pkt_type = sco_pi(sk)->pkt_type;
return 0;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 391888b88a92..c45dd737cdde 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -270,6 +270,9 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
mod_timer(&conn->security_timer, jiffies +
msecs_to_jiffies(SMP_TIMEOUT));
+ mod_timer(&conn->security_timer, jiffies +
+ msecs_to_jiffies(SMP_TIMEOUT));
+
return 0;
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ff3ed6086ce1..dac6a2147467 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -38,16 +38,17 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
- u64_stats_update_begin(&brstats->syncp);
- brstats->tx_packets++;
- brstats->tx_bytes += skb->len;
- u64_stats_update_end(&brstats->syncp);
-
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
+ u64_stats_update_begin(&brstats->syncp);
+ brstats->tx_packets++;
+ /* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
+ brstats->tx_bytes += skb->len;
+ u64_stats_update_end(&brstats->syncp);
+
rcu_read_lock();
if (is_broadcast_ether_addr(dest))
br_flood_deliver(br, skb);
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f2dc69cffb57..681084d76a93 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -14,6 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \
inet_fragment.o ping.o
obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd2b9478ddd1..bf488051a8de 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -118,6 +118,19 @@
#include <linux/mroute.h>
#endif
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+ return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+ return 1;
+}
+#endif
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
@@ -258,6 +271,7 @@ static inline int inet_netns_ok(struct net *net, int protocol)
return ipprot->netns_ok;
}
+
/*
* Create an inet socket.
*/
@@ -274,6 +288,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
int try_loading_module = 0;
int err;
+ if (!current_has_network())
+ return -EACCES;
+
if (unlikely(!inet_ehash_secret))
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
build_ehash_secret();
@@ -879,6 +896,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCSIFPFLAGS:
case SIOCGIFPFLAGS:
case SIOCSIFFLAGS:
+ case SIOCKILLADDR:
err = devinet_ioctl(net, cmd, (void __user *)arg);
break;
default:
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc19bd06dd00..80554bcfd979 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -59,6 +59,7 @@
#include <net/arp.h>
#include <net/ip.h>
+#include <net/tcp.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/rtnetlink.h>
@@ -735,6 +736,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCSIFBRDADDR: /* Set the broadcast address */
case SIOCSIFDSTADDR: /* Set the destination address */
case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ case SIOCKILLADDR: /* Nuke all sockets on this address */
ret = -EACCES;
if (!capable(CAP_NET_ADMIN))
goto out;
@@ -786,7 +788,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
}
ret = -EADDRNOTAVAIL;
- if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
+ if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
+ && cmd != SIOCKILLADDR)
goto done;
switch (cmd) {
@@ -912,6 +915,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
inet_insert_ifa(ifa);
}
break;
+ case SIOCKILLADDR: /* Nuke all connections on this address */
+ ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
+ break;
}
done:
rtnl_unlock();
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1dfc18a03fd4..73b4e91a87e7 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -113,6 +113,18 @@ config IP_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_REJECT_SKERR
+ bool "Force socket error when rejecting with icmp*"
+ depends on IP_NF_TARGET_REJECT
+ default n
+ help
+ This option enables turning a "--reject-with icmp*" into a matching
+ socket error also.
+ The REJECT target normally allows sending an ICMP message. But it
+ leaves the local socket unaware of any ingress rejects.
+
+ If unsure, say N.
+
config IP_NF_TARGET_LOG
tristate "LOG target support"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 51f13f8ec724..9dd754c7f2b6 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -128,6 +128,14 @@ static void send_reset(struct sk_buff *oldskb, int hook)
static inline void send_unreach(struct sk_buff *skb_in, int code)
{
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP_NF_TARGET_REJECT_SKERR
+ if (skb_in->sk) {
+ skb_in->sk->sk_err = icmp_err_convert[code].errno;
+ skb_in->sk->sk_error_report(skb_in->sk);
+ pr_debug("ipt_REJECT: sk_err=%d for skb=%p sk=%p\n",
+ skb_in->sk->sk_err, skb_in, skb_in->sk);
+ }
+#endif
}
static unsigned int
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644
index 000000000000..0cbbf10026a6
--- /dev/null
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val, ret; \
+ ret = sscanf(buf, "%d", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ if (val < 0) \
+ return -EINVAL; \
+ _var = val; \
+ return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+ &tcp_wmem_min_attr.attr,
+ &tcp_wmem_def_attr.attr,
+ &tcp_wmem_max_attr.attr,
+ &tcp_rmem_min_attr.attr,
+ &tcp_rmem_def_attr.attr,
+ &tcp_rmem_max_attr.attr,
+ NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+ .attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+ struct kobject *ipv4_kobject;
+ int ret;
+
+ ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+ if (!ipv4_kobject)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+ if (ret) {
+ kobject_put(ipv4_kobject);
+ return ret;
+ }
+
+ return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46febcacb729..09ced58e6a51 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -266,11 +266,15 @@
#include <linux/crypto.h>
#include <linux/time.h>
#include <linux/slab.h>
+#include <linux/uid_stat.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
#include <net/netdma.h>
#include <net/sock.h>
@@ -1112,6 +1116,9 @@ out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle);
release_sock(sk);
+
+ if (copied > 0)
+ uid_stat_tcp_snd(current_uid(), copied);
return copied;
do_fault:
@@ -1388,8 +1395,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
- if (copied > 0)
+ if (copied > 0) {
tcp_cleanup_rbuf(sk, copied);
+ uid_stat_tcp_rcv(current_uid(), copied);
+ }
+
return copied;
}
EXPORT_SYMBOL(tcp_read_sock);
@@ -1771,6 +1781,9 @@ skip_copy:
tcp_cleanup_rbuf(sk, copied);
release_sock(sk);
+
+ if (copied > 0)
+ uid_stat_tcp_rcv(current_uid(), copied);
return copied;
out:
@@ -1779,6 +1792,8 @@ out:
recv_urg:
err = tcp_recv_urg(sk, msg, len, flags);
+ if (err > 0)
+ uid_stat_tcp_rcv(current_uid(), err);
goto out;
}
EXPORT_SYMBOL(tcp_recvmsg);
@@ -3310,3 +3325,107 @@ void __init tcp_init(void)
tcp_secret_retiring = &tcp_secret_two;
tcp_secret_secondary = &tcp_secret_two;
}
+
+static int tcp_is_local(struct net *net, __be32 addr) {
+ struct rtable *rt;
+ struct flowi4 fl4 = { .daddr = addr };
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR_OR_NULL(rt))
+ return 0;
+ return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
+ struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
+ return rt6 && rt6->rt6i_dev && (rt6->rt6i_dev->flags & IFF_LOOPBACK);
+}
+#endif
+
+/*
+ * tcp_nuke_addr - destroy all sockets on the given local address
+ * if local address is the unspecified address (0.0.0.0 or ::), destroy all
+ * sockets with local addresses that are not configured.
+ */
+int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
+{
+ int family = addr->sa_family;
+ unsigned int bucket;
+
+ struct in_addr *in;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct in6_addr *in6;
+#endif
+ if (family == AF_INET) {
+ in = &((struct sockaddr_in *)addr)->sin_addr;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ } else if (family == AF_INET6) {
+ in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
+#endif
+ } else {
+ return -EAFNOSUPPORT;
+ }
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if (family == AF_INET) {
+ __be32 s4 = inet->inet_rcv_saddr;
+ if (s4 == LOOPBACK4_IPV6)
+ continue;
+
+ if (in->s_addr != s4 &&
+ !(in->s_addr == INADDR_ANY &&
+ !tcp_is_local(net, s4)))
+ continue;
+ }
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ if (family == AF_INET6) {
+ struct in6_addr *s6;
+ if (!inet->pinet6)
+ continue;
+
+ s6 = &inet->pinet6->rcv_saddr;
+ if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
+ continue;
+
+ if (!ipv6_addr_equal(in6, s6) &&
+ !(ipv6_addr_equal(in6, &in6addr_any) &&
+ !tcp_is_local6(net, s6)))
+ continue;
+ }
+#endif
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ sk->sk_err = ETIMEDOUT;
+ sk->sk_error_report(sk);
+
+ tcp_done(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ sock_put(sk);
+
+ goto restart;
+ }
+ spin_unlock_bh(lock);
+ }
+
+ return 0;
+}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 12368c586068..1587d0d9295e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -824,12 +824,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
{
struct inet6_dev *idev = ifp->idev;
struct in6_addr addr, *tmpaddr;
- unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
+ unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
int max_addresses;
u32 addr_flags;
+ unsigned long now = jiffies;
write_lock(&idev->lock);
if (ift) {
@@ -874,7 +875,7 @@ retry:
goto out;
}
memcpy(&addr.s6_addr[8], idev->rndid, 8);
- age = (jiffies - ifp->tstamp) / HZ;
+ age = (now - ifp->tstamp) / HZ;
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
idev->cnf.temp_valid_lft + age);
@@ -884,7 +885,6 @@ retry:
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
max_addresses = idev->cnf.max_addresses;
- tmp_cstamp = ifp->cstamp;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
@@ -929,7 +929,7 @@ retry:
ift->ifpub = ifp;
ift->valid_lft = tmp_valid_lft;
ift->prefered_lft = tmp_prefered_lft;
- ift->cstamp = tmp_cstamp;
+ ift->cstamp = now;
ift->tstamp = tmp_tstamp;
spin_unlock_bh(&ift->lock);
@@ -1999,25 +1999,50 @@ ok:
#ifdef CONFIG_IPV6_PRIVACY
read_lock_bh(&in6_dev->lock);
/* update all temporary addresses in the list */
- list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) {
- /*
- * When adjusting the lifetimes of an existing
- * temporary address, only lower the lifetimes.
- * Implementations must not increase the
- * lifetimes of an existing temporary address
- * when processing a Prefix Information Option.
- */
+ list_for_each_entry(ift, &in6_dev->tempaddr_list,
+ tmp_list) {
+ int age, max_valid, max_prefered;
+
if (ifp != ift->ifpub)
continue;
+ /*
+ * RFC 4941 section 3.3:
+ * If a received option will extend the lifetime
+ * of a public address, the lifetimes of
+ * temporary addresses should be extended,
+ * subject to the overall constraint that no
+ * temporary addresses should ever remain
+ * "valid" or "preferred" for a time longer than
+ * (TEMP_VALID_LIFETIME) or
+ * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
+ * respectively.
+ */
+ age = (now - ift->cstamp) / HZ;
+ max_valid = in6_dev->cnf.temp_valid_lft - age;
+ if (max_valid < 0)
+ max_valid = 0;
+
+ max_prefered = in6_dev->cnf.temp_prefered_lft -
+ in6_dev->cnf.max_desync_factor -
+ age;
+ if (max_prefered < 0)
+ max_prefered = 0;
+
+ if (valid_lft > max_valid)
+ valid_lft = max_valid;
+
+ if (prefered_lft > max_prefered)
+ prefered_lft = max_prefered;
+
spin_lock(&ift->lock);
flags = ift->flags;
- if (ift->valid_lft > valid_lft &&
- ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ)
- ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ;
- if (ift->prefered_lft > prefered_lft &&
- ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ)
- ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ;
+ ift->valid_lft = valid_lft;
+ ift->prefered_lft = prefered_lft;
+ ift->tstamp = now;
+ if (prefered_lft > 0)
+ ift->flags &= ~IFA_F_DEPRECATED;
+
spin_unlock(&ift->lock);
if (!(flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ift);
@@ -2025,9 +2050,11 @@ ok:
if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
/*
- * When a new public address is created as described in [ADDRCONF],
- * also create a new temporary address. Also create a temporary
- * address if it's enabled but no temporary address currently exists.
+ * When a new public address is created as
+ * described in [ADDRCONF], also create a new
+ * temporary address. Also create a temporary
+ * address if it's enabled but no temporary
+ * address currently exists.
*/
read_unlock_bh(&in6_dev->lock);
ipv6_create_tempaddr(ifp, NULL);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d27c797f9f05..4252b3cc183d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -63,6 +63,20 @@
#include <asm/system.h>
#include <linux/mroute6.h>
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+ return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+ return 1;
+}
+#endif
+
MODULE_AUTHOR("Cast of dozens");
MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
MODULE_LICENSE("GPL");
@@ -109,6 +123,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
int try_loading_module = 0;
int err;
+ if (!current_has_network())
+ return -EACCES;
+
if (sock->type != SOCK_RAW &&
sock->type != SOCK_DGRAM &&
!inet_ehash_secret)
@@ -477,6 +494,21 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
EXPORT_SYMBOL(inet6_getname);
+int inet6_killaddr_ioctl(struct net *net, void __user *arg) {
+ struct in6_ifreq ireq;
+ struct sockaddr_in6 sin6;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+
+ if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
+ return -EFAULT;
+
+ sin6.sin6_family = AF_INET6;
+ ipv6_addr_copy(&sin6.sin6_addr, &ireq.ifr6_addr);
+ return tcp_nuke_addr(net, (struct sockaddr *) &sin6);
+}
+
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
@@ -501,6 +533,8 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return addrconf_del_ifaddr(net, (void __user *) arg);
case SIOCSIFDSTADDR:
return addrconf_set_dstaddr(net, (void __user *) arg);
+ case SIOCKILLADDR:
+ return inet6_killaddr_ioctl(net, (void __user *) arg);
default:
if (!sk->sk_prot->ioctl)
return -ENOIOCTLCMD;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 448464844a25..5bbf53169202 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -174,6 +174,18 @@ config IP6_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_TARGET_REJECT_SKERR
+ bool "Force socket error when rejecting with icmp*"
+ depends on IP6_NF_TARGET_REJECT
+ default n
+ help
+ This option enables turning a "--reject-with icmp*" into a matching
+ socket error also.
+ The REJECT target normally allows sending an ICMP message. But it
+ leaves the local socket unaware of any ingress rejects.
+
+ If unsure, say N.
+
config IP6_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0bdcdc..14cb310064f6 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -2292,16 +2292,15 @@ static void __exit ip6_tables_fini(void)
* "No next header".
*
* If target header is found, its offset is set in *offset and return protocol
- * number. Otherwise, return -1.
+ * number. Otherwise, return -ENOENT or -EBADMSG.
*
* If the first fragment doesn't contain the final protocol header or
* NEXTHDR_NONE it is considered invalid.
*
* Note that non-1st fragment is special case that "the protocol number
* of last header" is "next header" field in Fragment header. In this case,
- * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
- * isn't NULL.
- *
+ * *offset is meaningless. If fragoff is not NULL, the fragment offset is
+ * stored in *fragoff; if it is NULL, return -EINVAL.
*/
int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
int target, unsigned short *fragoff)
@@ -2342,9 +2341,12 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
if (target < 0 &&
((!ipv6_ext_hdr(hp->nexthdr)) ||
hp->nexthdr == NEXTHDR_NONE)) {
- if (fragoff)
+ if (fragoff) {
*fragoff = _frag_off;
- return hp->nexthdr;
+ return hp->nexthdr;
+ } else {
+ return -EINVAL;
+ }
}
return -ENOENT;
}
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index a5a4c5dd5396..09d30498c927 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -177,6 +177,15 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
skb_in->dev = net->loopback_dev;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP6_NF_TARGET_REJECT_SKERR
+ if (skb_in->sk) {
+ icmpv6_err_convert(ICMPV6_DEST_UNREACH, code,
+ &skb_in->sk->sk_err);
+ skb_in->sk->sk_error_report(skb_in->sk);
+ pr_debug("ip6t_REJECT: sk_err=%d for skb=%p sk=%p\n",
+ skb_in->sk->sk_err, skb_in, skb_in->sk);
+ }
+#endif
}
static unsigned int
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32bff6d86cb2..5bd5c612a9bf 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -902,6 +902,8 @@ config NETFILTER_XT_MATCH_OWNER
based on who created the socket: the user or group. It is also
possible to check whether a socket actually exists.
+ Conflicts with '"quota, tag, uid" match'
+
config NETFILTER_XT_MATCH_POLICY
tristate 'IPsec "policy" match support'
depends on XFRM
@@ -935,6 +937,22 @@ config NETFILTER_XT_MATCH_PKTTYPE
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_QTAGUID
+ bool '"quota, tag, owner" match and stats support'
+ depends on NETFILTER_XT_MATCH_SOCKET
+ depends on NETFILTER_XT_MATCH_OWNER=n
+ help
+ This option replaces the `owner' match. In addition to matching
+ on uid, it keeps stats based on a tag assigned to a socket.
+ The full tag is comprised of a UID and an accounting tag.
+ The tags are assignable to sockets from user space (e.g. a download
+ manager can assign the socket to another UID for accounting).
+ Stats and control are done via /proc/net/xt_qtaguid/.
+ It replaces owner as it takes the same arguments, but should
+ really be recognized by the iptables tool.
+
+ If unsure, say `N'.
+
config NETFILTER_XT_MATCH_QUOTA
tristate '"quota" match support'
depends on NETFILTER_ADVANCED
@@ -945,6 +963,30 @@ config NETFILTER_XT_MATCH_QUOTA
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+config NETFILTER_XT_MATCH_QUOTA2
+ tristate '"quota2" match support'
+ depends on NETFILTER_ADVANCED
+ help
+ This option adds a `quota2' match, which allows to match on a
+ byte counter correctly and not per CPU.
+ It allows naming the quotas.
+ This is based on http://xtables-addons.git.sourceforge.net
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+ bool '"quota2" Netfilter LOG support'
+ depends on NETFILTER_XT_MATCH_QUOTA2
+ depends on IP_NF_TARGET_ULOG=n # not yes, not module, just no
+ default n
+ help
+ This option allows `quota2' to log ONCE when a quota limit
+ is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+ It logs similarly to how ipt_ULOG would without data.
+
+ If unsure, say `N'.
+
config NETFILTER_XT_MATCH_RATEEST
tristate '"rateest" match support'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1a02853df863..6d917176c3b8 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -95,7 +95,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644
index 000000000000..08086d680c2c
--- /dev/null
+++ b/net/netfilter/xt_qtaguid.c
@@ -0,0 +1,2785 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * There are run-time debug flags enabled via the debug_mask module param, or
+ * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
+ */
+#define DEBUG
+
+#include <linux/file.h>
+#include <linux/inetdevice.h>
+#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_qtaguid.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#include <linux/netfilter/xt_socket.h>
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+/*
+ * We only use the xt_socket funcs within a similar context to avoid unexpected
+ * return values.
+ */
+#define XT_SOCKET_SUPPORTED_HOOKS \
+ ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
+
+
+static const char *module_procdirname = "xt_qtaguid";
+static struct proc_dir_entry *xt_qtaguid_procdir;
+
+static unsigned int proc_iface_perms = S_IRUGO;
+module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_stats_file;
+static unsigned int proc_stats_perms = S_IRUGO;
+module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_ctrl_file;
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
+#else
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUSR;
+#endif
+module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
+
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+static gid_t proc_stats_readall_gid = AID_NET_BW_STATS;
+static gid_t proc_ctrl_write_gid = AID_NET_BW_ACCT;
+#else
+/* 0 means, don't limit anybody */
+static gid_t proc_stats_readall_gid;
+static gid_t proc_ctrl_write_gid;
+#endif
+module_param_named(stats_readall_gid, proc_stats_readall_gid, uint,
+ S_IRUGO | S_IWUSR);
+module_param_named(ctrl_write_gid, proc_ctrl_write_gid, uint,
+ S_IRUGO | S_IWUSR);
+
+/*
+ * Limit the number of active tags (via socket tags) for a given UID.
+ * Multiple processes could share the UID.
+ */
+static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
+module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
+
+/*
+ * After the kernel has initiallized this module, it is still possible
+ * to make it passive.
+ * Setting passive to Y:
+ * - the iface stats handling will not act on notifications.
+ * - iptables matches will never match.
+ * - ctrl commands silently succeed.
+ * - stats are always empty.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool module_passive;
+module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
+
+/*
+ * Control how qtaguid data is tracked per proc/uid.
+ * Setting tag_tracking_passive to Y:
+ * - don't create proc specific structs to track tags
+ * - don't check that active tag stats exceed some limits.
+ * - don't clean up socket tags on process exits.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool qtu_proc_handling_passive;
+module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
+ S_IRUGO | S_IWUSR);
+
+#define QTU_DEV_NAME "xt_qtaguid"
+
+uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
+module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+static const char *iface_stat_procdirname = "iface_stat";
+static struct proc_dir_entry *iface_stat_procdir;
+static const char *iface_stat_all_procfilename = "iface_stat_all";
+static struct proc_dir_entry *iface_stat_all_procfile;
+
+/*
+ * Ordering of locks:
+ * outer locks:
+ * iface_stat_list_lock
+ * sock_tag_list_lock
+ * inner locks:
+ * uid_tag_data_tree_lock
+ * tag_counter_set_list_lock
+ * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock
+ * is acquired.
+ *
+ * Call tree with all lock holders as of 2011-09-25:
+ *
+ * iface_stat_all_proc_read()
+ * iface_stat_list_lock
+ * (struct iface_stat)
+ *
+ * qtaguid_ctrl_proc_read()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * (struct proc_qtu_data->sock_tag_list)
+ * prdebug_full_state()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * uid_tag_data_tree_lock
+ * (uid_tag_data_tree)
+ * (proc_qtu_data_tree)
+ * iface_stat_list_lock
+ *
+ * qtaguid_stats_proc_read()
+ * iface_stat_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ *
+ * qtudev_open()
+ * uid_tag_data_tree_lock
+ *
+ * qtudev_release()
+ * sock_tag_data_list_lock
+ * uid_tag_data_tree_lock
+ * prdebug_full_state()
+ * sock_tag_list_lock
+ * uid_tag_data_tree_lock
+ * iface_stat_list_lock
+ *
+ * iface_netdev_event_handler()
+ * iface_stat_create()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * iface_inetaddr_event_handler()
+ * iface_stat_create()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * iface_inet6addr_event_handler()
+ * iface_stat_create_ipv6()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * qtaguid_mt()
+ * account_for_uid()
+ * if_tag_stat_update()
+ * get_sock_stat()
+ * sock_tag_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ * tag_stat_update()
+ * get_active_counter_set()
+ * tag_counter_set_list_lock
+ * tag_stat_update()
+ * get_active_counter_set()
+ * tag_counter_set_list_lock
+ *
+ *
+ * qtaguid_ctrl_parse()
+ * ctrl_cmd_delete()
+ * sock_tag_list_lock
+ * tag_counter_set_list_lock
+ * iface_stat_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ * uid_tag_data_tree_lock
+ * ctrl_cmd_counter_set()
+ * tag_counter_set_list_lock
+ * ctrl_cmd_tag()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * get_tag_ref()
+ * uid_tag_data_tree_lock
+ * (uid_tag_data_tree)
+ * uid_tag_data_tree_lock
+ * (proc_qtu_data_tree)
+ * ctrl_cmd_untag()
+ * sock_tag_list_lock
+ * uid_tag_data_tree_lock
+ *
+ */
+static LIST_HEAD(iface_stat_list);
+static DEFINE_SPINLOCK(iface_stat_list_lock);
+
+static struct rb_root sock_tag_tree = RB_ROOT;
+static DEFINE_SPINLOCK(sock_tag_list_lock);
+
+static struct rb_root tag_counter_set_tree = RB_ROOT;
+static DEFINE_SPINLOCK(tag_counter_set_list_lock);
+
+static struct rb_root uid_tag_data_tree = RB_ROOT;
+static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
+
+static struct rb_root proc_qtu_data_tree = RB_ROOT;
+/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
+
+static struct qtaguid_event_counts qtu_events;
+/*----------------------------------------------*/
+static bool can_manipulate_uids(void)
+{
+ /* root pwnd */
+ return unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_gid)
+ || in_egroup_p(proc_ctrl_write_gid);
+}
+
+static bool can_impersonate_uid(uid_t uid)
+{
+ return uid == current_fsuid() || can_manipulate_uids();
+}
+
+static bool can_read_other_uid_stats(uid_t uid)
+{
+ /* root pwnd */
+ return unlikely(!current_fsuid()) || uid == current_fsuid()
+ || unlikely(!proc_stats_readall_gid)
+ || in_egroup_p(proc_stats_readall_gid);
+}
+
+static inline void dc_add_byte_packets(struct data_counters *counters, int set,
+ enum ifs_tx_rx direction,
+ enum ifs_proto ifs_proto,
+ int bytes,
+ int packets)
+{
+ counters->bpc[set][direction][ifs_proto].bytes += bytes;
+ counters->bpc[set][direction][ifs_proto].packets += packets;
+}
+
+static inline uint64_t dc_sum_bytes(struct data_counters *counters,
+ int set,
+ enum ifs_tx_rx direction)
+{
+ return counters->bpc[set][direction][IFS_TCP].bytes
+ + counters->bpc[set][direction][IFS_UDP].bytes
+ + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
+}
+
+static inline uint64_t dc_sum_packets(struct data_counters *counters,
+ int set,
+ enum ifs_tx_rx direction)
+{
+ return counters->bpc[set][direction][IFS_TCP].packets
+ + counters->bpc[set][direction][IFS_UDP].packets
+ + counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
+}
+
+static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct tag_node *data = rb_entry(node, struct tag_node, node);
+ int result;
+ RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+ " node=%p data=%p\n", tag, node, data);
+ result = tag_compare(tag, data->tag);
+ RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+ " data.tag=0x%llx (uid=%u) res=%d\n",
+ tag, data->tag, get_uid_from_tag(data->tag), result);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct tag_node *this = rb_entry(*new, struct tag_node,
+ node);
+ int result = tag_compare(data->tag, this->tag);
+ RB_DEBUG("qtaguid: %s(): tag=0x%llx"
+ " (uid=%u)\n", __func__,
+ this->tag,
+ get_uid_from_tag(this->tag));
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_stat, tn.node);
+}
+
+static void tag_counter_set_tree_insert(struct tag_counter_set *data,
+ struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
+ tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_counter_set, tn.node);
+
+}
+
+static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_ref, tn.node);
+}
+
+static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
+ const struct sock *sk)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct sock_tag *data = rb_entry(node, struct sock_tag,
+ sock_node);
+ if (sk < data->sk)
+ node = node->rb_left;
+ else if (sk > data->sk)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct sock_tag *this = rb_entry(*new, struct sock_tag,
+ sock_node);
+ parent = *new;
+ if (data->sk < this->sk)
+ new = &((*new)->rb_left);
+ else if (data->sk > this->sk)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->sock_node, parent, new);
+ rb_insert_color(&data->sock_node, root);
+}
+
+static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
+{
+ struct rb_node *node;
+ struct sock_tag *st_entry;
+
+ node = rb_first(st_to_free_tree);
+ while (node) {
+ st_entry = rb_entry(node, struct sock_tag, sock_node);
+ node = rb_next(node);
+ CT_DEBUG("qtaguid: %s(): "
+ "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
+ st_entry->sk,
+ st_entry->tag,
+ get_uid_from_tag(st_entry->tag));
+ rb_erase(&st_entry->sock_node, st_to_free_tree);
+ sockfd_put(st_entry->socket);
+ kfree(st_entry);
+ }
+}
+
+static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
+ const pid_t pid)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct proc_qtu_data *data = rb_entry(node,
+ struct proc_qtu_data,
+ node);
+ if (pid < data->pid)
+ node = node->rb_left;
+ else if (pid > data->pid)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
+ struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct proc_qtu_data *this = rb_entry(*new,
+ struct proc_qtu_data,
+ node);
+ parent = *new;
+ if (data->pid < this->pid)
+ new = &((*new)->rb_left);
+ else if (data->pid > this->pid)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void uid_tag_data_tree_insert(struct uid_tag_data *data,
+ struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct uid_tag_data *this = rb_entry(*new,
+ struct uid_tag_data,
+ node);
+ parent = *new;
+ if (data->uid < this->uid)
+ new = &((*new)->rb_left);
+ else if (data->uid > this->uid)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
+ uid_t uid)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct uid_tag_data *data = rb_entry(node,
+ struct uid_tag_data,
+ node);
+ if (uid < data->uid)
+ node = node->rb_left;
+ else if (uid > data->uid)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+/*
+ * Allocates a new uid_tag_data struct if needed.
+ * Returns a pointer to the found or allocated uid_tag_data.
+ * Returns a PTR_ERR on failures, and lock is not held.
+ * If found is not NULL:
+ * sets *found to true if not allocated.
+ * sets *found to false if allocated.
+ */
+struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
+{
+ struct uid_tag_data *utd_entry;
+
+ /* Look for top level uid_tag_data for the UID */
+ utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
+ DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
+
+ if (found_res)
+ *found_res = utd_entry;
+ if (utd_entry)
+ return utd_entry;
+
+ utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
+ if (!utd_entry) {
+ pr_err("qtaguid: get_uid_data(%u): "
+ "tag data alloc failed\n", uid);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ utd_entry->uid = uid;
+ utd_entry->tag_ref_tree = RB_ROOT;
+ uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
+ DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
+ return utd_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *new_tag_ref(tag_t new_tag,
+ struct uid_tag_data *utd_entry)
+{
+ struct tag_ref *tr_entry;
+ int res;
+
+ if (utd_entry->num_active_tags + 1 > max_sock_tags) {
+ pr_info("qtaguid: new_tag_ref(0x%llx): "
+ "tag ref alloc quota exceeded. max=%d\n",
+ new_tag, max_sock_tags);
+ res = -EMFILE;
+ goto err_res;
+
+ }
+
+ tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
+ if (!tr_entry) {
+ pr_err("qtaguid: new_tag_ref(0x%llx): "
+ "tag ref alloc failed\n",
+ new_tag);
+ res = -ENOMEM;
+ goto err_res;
+ }
+ tr_entry->tn.tag = new_tag;
+ /* tr_entry->num_sock_tags handled by caller */
+ utd_entry->num_active_tags++;
+ tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
+ DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
+ " inserted new tag ref %p\n",
+ new_tag, tr_entry);
+ return tr_entry;
+
+err_res:
+ return ERR_PTR(res);
+}
+
+static struct tag_ref *lookup_tag_ref(tag_t full_tag,
+ struct uid_tag_data **utd_res)
+{
+ struct uid_tag_data *utd_entry;
+ struct tag_ref *tr_entry;
+ bool found_utd;
+ uid_t uid = get_uid_from_tag(full_tag);
+
+ DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
+ full_tag, uid);
+
+ utd_entry = get_uid_data(uid, &found_utd);
+ if (IS_ERR_OR_NULL(utd_entry)) {
+ if (utd_res)
+ *utd_res = utd_entry;
+ return NULL;
+ }
+
+ tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
+ if (utd_res)
+ *utd_res = utd_entry;
+ DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
+ full_tag, utd_entry, tr_entry);
+ return tr_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *get_tag_ref(tag_t full_tag,
+ struct uid_tag_data **utd_res)
+{
+ struct uid_tag_data *utd_entry;
+ struct tag_ref *tr_entry;
+
+ DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
+ full_tag);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ tr_entry = lookup_tag_ref(full_tag, &utd_entry);
+ BUG_ON(IS_ERR_OR_NULL(utd_entry));
+ if (!tr_entry)
+ tr_entry = new_tag_ref(full_tag, utd_entry);
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ if (utd_res)
+ *utd_res = utd_entry;
+ DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
+ full_tag, utd_entry, tr_entry);
+ return tr_entry;
+}
+
+/* Checks and maybe frees the UID Tag Data entry */
+static void put_utd_entry(struct uid_tag_data *utd_entry)
+{
+ /* Are we done with the UID tag data entry? */
+ if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
+ !utd_entry->num_pqd) {
+ DR_DEBUG("qtaguid: %s(): "
+ "erase utd_entry=%p uid=%u "
+ "by pid=%u tgid=%u uid=%u\n", __func__,
+ utd_entry, utd_entry->uid,
+ current->pid, current->tgid, current_fsuid());
+ BUG_ON(utd_entry->num_active_tags);
+ rb_erase(&utd_entry->node, &uid_tag_data_tree);
+ kfree(utd_entry);
+ } else {
+ DR_DEBUG("qtaguid: %s(): "
+ "utd_entry=%p still has %d tags %d proc_qtu_data\n",
+ __func__, utd_entry, utd_entry->num_active_tags,
+ utd_entry->num_pqd);
+ BUG_ON(!(utd_entry->num_active_tags ||
+ utd_entry->num_pqd));
+ }
+}
+
+/*
+ * If no sock_tags are using this tag_ref,
+ * decrements refcount of utd_entry, removes tr_entry
+ * from utd_entry->tag_ref_tree and frees.
+ */
+static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
+ struct uid_tag_data *utd_entry)
+{
+ DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
+ tr_entry, tr_entry->tn.tag,
+ get_uid_from_tag(tr_entry->tn.tag));
+ if (!tr_entry->num_sock_tags) {
+ BUG_ON(!utd_entry->num_active_tags);
+ utd_entry->num_active_tags--;
+ rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
+ DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
+ kfree(tr_entry);
+ }
+}
+
+static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
+{
+ struct rb_node *node;
+ struct tag_ref *tr_entry;
+ tag_t acct_tag;
+
+ DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
+ full_tag, get_uid_from_tag(full_tag));
+ acct_tag = get_atag_from_tag(full_tag);
+ node = rb_first(&utd_entry->tag_ref_tree);
+ while (node) {
+ tr_entry = rb_entry(node, struct tag_ref, tn.node);
+ node = rb_next(node);
+ if (!acct_tag || tr_entry->tn.tag == full_tag)
+ free_tag_ref_from_utd_entry(tr_entry, utd_entry);
+ }
+}
+
+static int read_proc_u64(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ uint64_t value;
+ char *p = page;
+ uint64_t *iface_entry = data;
+
+ if (!data)
+ return 0;
+
+ value = *iface_entry;
+ p += sprintf(p, "%llu\n", value);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int read_proc_bool(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ bool value;
+ char *p = page;
+ bool *bool_entry = data;
+
+ if (!data)
+ return 0;
+
+ value = *bool_entry;
+ p += sprintf(p, "%u\n", value);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int get_active_counter_set(tag_t tag)
+{
+ int active_set = 0;
+ struct tag_counter_set *tcs;
+
+ MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
+ " (uid=%u)\n",
+ tag, get_uid_from_tag(tag));
+ /* For now we only handle UID tags for active sets */
+ tag = get_utag_from_tag(tag);
+ spin_lock_bh(&tag_counter_set_list_lock);
+ tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (tcs)
+ active_set = tcs->active_set;
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ return active_set;
+}
+
+/*
+ * Find the entry for tracking the specified interface.
+ * Caller must hold iface_stat_list_lock
+ */
+static struct iface_stat *get_iface_entry(const char *ifname)
+{
+ struct iface_stat *iface_entry;
+
+ /* Find the entry for tracking the specified tag within the interface */
+ if (ifname == NULL) {
+ pr_info("qtaguid: iface_stat: get() NULL device name\n");
+ return NULL;
+ }
+
+ /* Iterate over interfaces */
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ if (!strcmp(ifname, iface_entry->ifname))
+ goto done;
+ }
+ iface_entry = NULL;
+done:
+ return iface_entry;
+}
+
+static int iface_stat_all_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count,
+ int *eof, void *data)
+{
+ char *outp = page;
+ int item_index = 0;
+ int len;
+ struct iface_stat *iface_entry;
+ struct rtnl_link_stats64 dev_stats, *stats;
+ struct rtnl_link_stats64 no_dev_stats = {0};
+
+ if (unlikely(module_passive)) {
+ *eof = 1;
+ return 0;
+ }
+
+ CT_DEBUG("qtaguid:proc iface_stat_all "
+ "page=%p *num_items_returned=%p off=%ld "
+ "char_count=%d *eof=%d\n", page, *num_items_returned,
+ items_to_skip, char_count, *eof);
+
+ if (*eof)
+ return 0;
+
+ /*
+ * This lock will prevent iface_stat_update() from changing active,
+ * and in turn prevent an interface from unregistering itself.
+ */
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ if (item_index++ < items_to_skip)
+ continue;
+
+ if (iface_entry->active) {
+ stats = dev_get_stats(iface_entry->net_dev,
+ &dev_stats);
+ } else {
+ stats = &no_dev_stats;
+ }
+ len = snprintf(outp, char_count,
+ "%s %d "
+ "%llu %llu %llu %llu "
+ "%llu %llu %llu %llu\n",
+ iface_entry->ifname,
+ iface_entry->active,
+ iface_entry->totals[IFS_RX].bytes,
+ iface_entry->totals[IFS_RX].packets,
+ iface_entry->totals[IFS_TX].bytes,
+ iface_entry->totals[IFS_TX].packets,
+ stats->rx_bytes, stats->rx_packets,
+ stats->tx_bytes, stats->tx_packets);
+ if (len >= char_count) {
+ spin_unlock_bh(&iface_stat_list_lock);
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ *eof = 1;
+ return outp - page;
+}
+
+static void iface_create_proc_worker(struct work_struct *work)
+{
+ struct proc_dir_entry *proc_entry;
+ struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
+ iface_work);
+ struct iface_stat *new_iface = isw->iface_entry;
+
+ /* iface_entries are not deleted, so safe to manipulate. */
+ proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
+ if (IS_ERR_OR_NULL(proc_entry)) {
+ pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
+ kfree(isw);
+ return;
+ }
+
+ new_iface->proc_ptr = proc_entry;
+
+ create_proc_read_entry("tx_bytes", proc_iface_perms, proc_entry,
+ read_proc_u64, &new_iface->totals[IFS_TX].bytes);
+ create_proc_read_entry("rx_bytes", proc_iface_perms, proc_entry,
+ read_proc_u64, &new_iface->totals[IFS_RX].bytes);
+ create_proc_read_entry("tx_packets", proc_iface_perms, proc_entry,
+ read_proc_u64, &new_iface->totals[IFS_TX].packets);
+ create_proc_read_entry("rx_packets", proc_iface_perms, proc_entry,
+ read_proc_u64, &new_iface->totals[IFS_RX].packets);
+ create_proc_read_entry("active", proc_iface_perms, proc_entry,
+ read_proc_bool, &new_iface->active);
+
+ IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
+ "entry=%p dev=%s\n", new_iface, new_iface->ifname);
+ kfree(isw);
+}
+
+/*
+ * Will set the entry's active state, and
+ * update the net_dev accordingly also.
+ */
+static void _iface_stat_set_active(struct iface_stat *entry,
+ struct net_device *net_dev,
+ bool activate)
+{
+ if (activate) {
+ entry->net_dev = net_dev;
+ entry->active = true;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "enable tracking. rfcnt=%d\n", __func__,
+ entry->ifname,
+ percpu_read(*net_dev->pcpu_refcnt));
+ } else {
+ entry->active = false;
+ entry->net_dev = NULL;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "disable tracking. rfcnt=%d\n", __func__,
+ entry->ifname,
+ percpu_read(*net_dev->pcpu_refcnt));
+
+ }
+}
+
+/* Caller must hold iface_stat_list_lock */
+static struct iface_stat *iface_alloc(struct net_device *net_dev)
+{
+ struct iface_stat *new_iface;
+ struct iface_stat_work *isw;
+
+ new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
+ if (new_iface == NULL) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "iface_stat alloc failed\n", net_dev->name);
+ return NULL;
+ }
+ new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
+ if (new_iface->ifname == NULL) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "ifname alloc failed\n", net_dev->name);
+ kfree(new_iface);
+ return NULL;
+ }
+ spin_lock_init(&new_iface->tag_stat_list_lock);
+ new_iface->tag_stat_tree = RB_ROOT;
+ _iface_stat_set_active(new_iface, net_dev, true);
+
+ /*
+ * ipv6 notifier chains are atomic :(
+ * No create_proc_read_entry() for you!
+ */
+ isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
+ if (!isw) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "work alloc failed\n", new_iface->ifname);
+ _iface_stat_set_active(new_iface, net_dev, false);
+ kfree(new_iface->ifname);
+ kfree(new_iface);
+ return NULL;
+ }
+ isw->iface_entry = new_iface;
+ INIT_WORK(&isw->iface_work, iface_create_proc_worker);
+ schedule_work(&isw->iface_work);
+ list_add(&new_iface->list, &iface_stat_list);
+ return new_iface;
+}
+
+static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
+ struct iface_stat *iface)
+{
+ struct rtnl_link_stats64 dev_stats, *stats;
+ bool stats_rewound;
+
+ stats = dev_get_stats(net_dev, &dev_stats);
+ /* No empty packets */
+ stats_rewound =
+ (stats->rx_bytes < iface->last_known[IFS_RX].bytes)
+ || (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
+
+ IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
+ "bytes rx/tx=%llu/%llu "
+ "active=%d last_known=%d "
+ "stats_rewound=%d\n", __func__,
+ net_dev ? net_dev->name : "?",
+ iface, net_dev,
+ stats->rx_bytes, stats->tx_bytes,
+ iface->active, iface->last_known_valid, stats_rewound);
+
+ if (iface->active && iface->last_known_valid && stats_rewound) {
+ pr_warn_once("qtaguid: iface_stat: %s(%s): "
+ "iface reset its stats unexpectedly\n", __func__,
+ net_dev->name);
+
+ iface->totals[IFS_TX].bytes += iface->last_known[IFS_TX].bytes;
+ iface->totals[IFS_TX].packets +=
+ iface->last_known[IFS_TX].packets;
+ iface->totals[IFS_RX].bytes += iface->last_known[IFS_RX].bytes;
+ iface->totals[IFS_RX].packets +=
+ iface->last_known[IFS_RX].packets;
+ iface->last_known_valid = false;
+ IF_DEBUG("qtaguid: %s(%s): iface=%p "
+ "used last known bytes rx/tx=%llu/%llu\n", __func__,
+ iface->ifname, iface, iface->last_known[IFS_RX].bytes,
+ iface->last_known[IFS_TX].bytes);
+ }
+}
+
+/*
+ * Create a new entry for tracking the specified interface.
+ * Do nothing if the entry already exists.
+ * Called when an interface is configured with a valid IP address.
+ */
+static void iface_stat_create(struct net_device *net_dev,
+ struct in_ifaddr *ifa)
+{
+ struct in_device *in_dev = NULL;
+ const char *ifname;
+ struct iface_stat *entry;
+ __be32 ipaddr = 0;
+ struct iface_stat *new_iface;
+
+ IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
+ net_dev ? net_dev->name : "?",
+ ifa, net_dev);
+ if (!net_dev) {
+ pr_err("qtaguid: iface_stat: create(): no net dev\n");
+ return;
+ }
+
+ ifname = net_dev->name;
+ if (!ifa) {
+ in_dev = in_dev_get(net_dev);
+ if (!in_dev) {
+ pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
+ ifname);
+ return;
+ }
+ IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
+ ifname, in_dev);
+ for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): "
+ "ifa=%p ifa_label=%s\n",
+ ifname, ifa,
+ ifa->ifa_label ? ifa->ifa_label : "(null)");
+ if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+ break;
+ }
+ }
+
+ if (!ifa) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
+ ifname);
+ goto done_put;
+ }
+ ipaddr = ifa->ifa_local;
+
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(ifname);
+ if (entry != NULL) {
+ bool activate = !ipv4_is_loopback(ipaddr);
+ IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
+ ifname, entry);
+ iface_check_stats_reset_and_adjust(net_dev, entry);
+ _iface_stat_set_active(entry, net_dev, activate);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "tracking now %d on ip=%pI4\n", __func__,
+ entry->ifname, activate, &ipaddr);
+ goto done_unlock_put;
+ } else if (ipv4_is_loopback(ipaddr)) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): "
+ "ignore loopback dev. ip=%pI4\n", ifname, &ipaddr);
+ goto done_unlock_put;
+ }
+
+ new_iface = iface_alloc(net_dev);
+ IF_DEBUG("qtaguid: iface_stat: create(%s): done "
+ "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
+done_unlock_put:
+ spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+ if (in_dev)
+ in_dev_put(in_dev);
+}
+
+static void iface_stat_create_ipv6(struct net_device *net_dev,
+ struct inet6_ifaddr *ifa)
+{
+ struct in_device *in_dev;
+ const char *ifname;
+ struct iface_stat *entry;
+ struct iface_stat *new_iface;
+ int addr_type;
+
+ IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
+ ifa, net_dev, net_dev ? net_dev->name : "");
+ if (!net_dev) {
+ pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
+ return;
+ }
+ ifname = net_dev->name;
+
+ in_dev = in_dev_get(net_dev);
+ if (!in_dev) {
+ pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
+ ifname);
+ return;
+ }
+
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
+ ifname, in_dev);
+
+ if (!ifa) {
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
+ ifname);
+ goto done_put;
+ }
+ addr_type = ipv6_addr_type(&ifa->addr);
+
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(ifname);
+ if (entry != NULL) {
+ bool activate = !(addr_type & IPV6_ADDR_LOOPBACK);
+ IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ ifname, entry);
+ iface_check_stats_reset_and_adjust(net_dev, entry);
+ _iface_stat_set_active(entry, net_dev, activate);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "tracking now %d on ip=%pI6c\n", __func__,
+ entry->ifname, activate, &ifa->addr);
+ goto done_unlock_put;
+ } else if (addr_type & IPV6_ADDR_LOOPBACK) {
+ IF_DEBUG("qtaguid: %s(%s): "
+ "ignore loopback dev. ip=%pI6c\n", __func__,
+ ifname, &ifa->addr);
+ goto done_unlock_put;
+ }
+
+ new_iface = iface_alloc(net_dev);
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
+ "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
+
+done_unlock_put:
+ spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+ in_dev_put(in_dev);
+}
+
+static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
+{
+ MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
+ return sock_tag_tree_search(&sock_tag_tree, sk);
+}
+
+static struct sock_tag *get_sock_stat(const struct sock *sk)
+{
+ struct sock_tag *sock_tag_entry;
+ MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
+ if (!sk)
+ return NULL;
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(sk);
+ spin_unlock_bh(&sock_tag_list_lock);
+ return sock_tag_entry;
+}
+
+static void
+data_counters_update(struct data_counters *dc, int set,
+ enum ifs_tx_rx direction, int proto, int bytes)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
+ break;
+ case IPPROTO_UDP:
+ dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
+ break;
+ case IPPROTO_IP:
+ default:
+ dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
+ 1);
+ break;
+ }
+}
+
+/*
+ * Update stats for the specified interface. Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called when an device is being unregistered.
+ */
+static void iface_stat_update(struct net_device *net_dev, bool stash_only)
+{
+ struct rtnl_link_stats64 dev_stats, *stats;
+ struct iface_stat *entry;
+
+ stats = dev_get_stats(net_dev, &dev_stats);
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(net_dev->name);
+ if (entry == NULL) {
+ IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
+ net_dev->name);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+
+ IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ net_dev->name, entry);
+ if (!entry->active) {
+ IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
+ net_dev->name);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+
+ if (stash_only) {
+ entry->last_known[IFS_TX].bytes = stats->tx_bytes;
+ entry->last_known[IFS_TX].packets = stats->tx_packets;
+ entry->last_known[IFS_RX].bytes = stats->rx_bytes;
+ entry->last_known[IFS_RX].packets = stats->rx_packets;
+ entry->last_known_valid = true;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "dev stats stashed rx/tx=%llu/%llu\n", __func__,
+ net_dev->name, stats->rx_bytes, stats->tx_bytes);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+ entry->totals[IFS_TX].bytes += stats->tx_bytes;
+ entry->totals[IFS_TX].packets += stats->tx_packets;
+ entry->totals[IFS_RX].bytes += stats->rx_bytes;
+ entry->totals[IFS_RX].packets += stats->rx_packets;
+ /* We don't need the last_known[] anymore */
+ entry->last_known_valid = false;
+ _iface_stat_set_active(entry, net_dev, false);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "disable tracking. rx/tx=%llu/%llu\n", __func__,
+ net_dev->name, stats->rx_bytes, stats->tx_bytes);
+ spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static void tag_stat_update(struct tag_stat *tag_entry,
+ enum ifs_tx_rx direction, int proto, int bytes)
+{
+ int active_set;
+ active_set = get_active_counter_set(tag_entry->tn.tag);
+ MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
+ "dir=%d proto=%d bytes=%d)\n",
+ tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
+ active_set, direction, proto, bytes);
+ data_counters_update(&tag_entry->counters, active_set, direction,
+ proto, bytes);
+ if (tag_entry->parent_counters)
+ data_counters_update(tag_entry->parent_counters, active_set,
+ direction, proto, bytes);
+}
+
+/*
+ * Create a new entry for tracking the specified {acct_tag,uid_tag} within
+ * the interface.
+ * iface_entry->tag_stat_list_lock should be held.
+ */
+static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
+ tag_t tag)
+{
+ struct tag_stat *new_tag_stat_entry = NULL;
+ IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
+ " (uid=%u)\n", __func__,
+ iface_entry, tag, get_uid_from_tag(tag));
+ new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
+ if (!new_tag_stat_entry) {
+ pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
+ goto done;
+ }
+ new_tag_stat_entry->tn.tag = tag;
+ tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
+done:
+ return new_tag_stat_entry;
+}
+
+static void if_tag_stat_update(const char *ifname, uid_t uid,
+ const struct sock *sk, enum ifs_tx_rx direction,
+ int proto, int bytes)
+{
+ struct tag_stat *tag_stat_entry;
+ tag_t tag, acct_tag;
+ tag_t uid_tag;
+ struct data_counters *uid_tag_counters;
+ struct sock_tag *sock_tag_entry;
+ struct iface_stat *iface_entry;
+ struct tag_stat *new_tag_stat;
+ MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
+ "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
+ ifname, uid, sk, direction, proto, bytes);
+
+
+ iface_entry = get_iface_entry(ifname);
+ if (!iface_entry) {
+ pr_err("qtaguid: iface_stat: stat_update() %s not found\n",
+ ifname);
+ return;
+ }
+ /* It is ok to process data when an iface_entry is inactive */
+
+ MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+ ifname, iface_entry);
+
+ /*
+ * Look for a tagged sock.
+ * It will have an acct_uid.
+ */
+ sock_tag_entry = get_sock_stat(sk);
+ if (sock_tag_entry) {
+ tag = sock_tag_entry->tag;
+ acct_tag = get_atag_from_tag(tag);
+ uid_tag = get_utag_from_tag(tag);
+ } else {
+ acct_tag = make_atag_from_value(0);
+ tag = combine_atag_with_uid(acct_tag, uid);
+ uid_tag = make_tag_from_uid(uid);
+ }
+ MT_DEBUG("qtaguid: iface_stat: stat_update(): "
+ " looking for tag=0x%llx (uid=%u) in ife=%p\n",
+ tag, get_uid_from_tag(tag), iface_entry);
+ /* Loop over tag list under this interface for {acct_tag,uid_tag} */
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+
+ tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+ tag);
+ if (tag_stat_entry) {
+ /*
+ * Updating the {acct_tag, uid_tag} entry handles both stats:
+ * {0, uid_tag} will also get updated.
+ */
+ tag_stat_update(tag_stat_entry, direction, proto, bytes);
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ return;
+ }
+
+ /* Loop over tag list under this interface for {0,uid_tag} */
+ tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+ uid_tag);
+ if (!tag_stat_entry) {
+ /* Here: the base uid_tag did not exist */
+ /*
+ * No parent counters. So
+ * - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
+ */
+ new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+ uid_tag_counters = &new_tag_stat->counters;
+ } else {
+ uid_tag_counters = &tag_stat_entry->counters;
+ }
+
+ if (acct_tag) {
+ new_tag_stat = create_if_tag_stat(iface_entry, tag);
+ new_tag_stat->parent_counters = uid_tag_counters;
+ }
+ tag_stat_update(new_tag_stat, direction, proto, bytes);
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+}
+
+static int iface_netdev_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr) {
+ struct net_device *dev = ptr;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
+ "ev=0x%lx/%s netdev=%p->name=%s\n",
+ event, netdev_evt_str(event), dev, dev ? dev->name : "");
+
+ switch (event) {
+ case NETDEV_UP:
+ iface_stat_create(dev, NULL);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int iface_inet6addr_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *ifa = ptr;
+ struct net_device *dev;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
+ "ev=0x%lx/%s ifa=%p\n",
+ event, netdev_evt_str(event), ifa);
+
+ switch (event) {
+ case NETDEV_UP:
+ BUG_ON(!ifa || !ifa->idev);
+ dev = (struct net_device *)ifa->idev->dev;
+ iface_stat_create_ipv6(dev, ifa);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ BUG_ON(!ifa || !ifa->idev);
+ dev = (struct net_device *)ifa->idev->dev;
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int iface_inetaddr_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *dev;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
+ "ev=0x%lx/%s ifa=%p\n",
+ event, netdev_evt_str(event), ifa);
+
+ switch (event) {
+ case NETDEV_UP:
+ BUG_ON(!ifa || !ifa->ifa_dev);
+ dev = ifa->ifa_dev->dev;
+ iface_stat_create(dev, ifa);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ BUG_ON(!ifa || !ifa->ifa_dev);
+ dev = ifa->ifa_dev->dev;
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block iface_netdev_notifier_blk = {
+ .notifier_call = iface_netdev_event_handler,
+};
+
+static struct notifier_block iface_inetaddr_notifier_blk = {
+ .notifier_call = iface_inetaddr_event_handler,
+};
+
+static struct notifier_block iface_inet6addr_notifier_blk = {
+ .notifier_call = iface_inet6addr_event_handler,
+};
+
+static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
+{
+ int err;
+
+ iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
+ if (!iface_stat_procdir) {
+ pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
+ err = -1;
+ goto err;
+ }
+
+ iface_stat_all_procfile = create_proc_entry(iface_stat_all_procfilename,
+ proc_iface_perms,
+ parent_procdir);
+ if (!iface_stat_all_procfile) {
+ pr_err("qtaguid: iface_stat: init "
+ " failed to create stat_all proc entry\n");
+ err = -1;
+ goto err_zap_entry;
+ }
+ iface_stat_all_procfile->read_proc = iface_stat_all_proc_read;
+
+
+ err = register_netdevice_notifier(&iface_netdev_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register dev event handler\n");
+ goto err_zap_all_stats_entry;
+ }
+ err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register ipv4 dev event handler\n");
+ goto err_unreg_nd;
+ }
+
+ err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register ipv6 dev event handler\n");
+ goto err_unreg_ip4_addr;
+ }
+ return 0;
+
+err_unreg_ip4_addr:
+ unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+err_unreg_nd:
+ unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_all_stats_entry:
+ remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
+err_zap_entry:
+ remove_proc_entry(iface_stat_procdirname, parent_procdir);
+err:
+ return err;
+}
+
+static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
+ struct xt_action_param *par)
+{
+ struct sock *sk;
+ unsigned int hook_mask = (1 << par->hooknum);
+
+ MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
+ par->hooknum, par->family);
+
+ /*
+ * Let's not abuse the the xt_socket_get*_sk(), or else it will
+ * return garbage SKs.
+ */
+ if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
+ return NULL;
+
+ switch (par->family) {
+ case NFPROTO_IPV6:
+ sk = xt_socket_get6_sk(skb, par);
+ break;
+ case NFPROTO_IPV4:
+ sk = xt_socket_get4_sk(skb, par);
+ break;
+ default:
+ return NULL;
+ }
+
+ /*
+ * Seems to be issues on the file ptr for TCP_TIME_WAIT SKs.
+ * http://kerneltrap.org/mailarchive/linux-netdev/2010/10/21/6287959
+ * Not fixed in 3.0-r3 :(
+ */
+ if (sk) {
+ MT_DEBUG("qtaguid: %p->sk_proto=%u "
+ "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+ if (sk->sk_state == TCP_TIME_WAIT) {
+ xt_socket_put_sk(sk);
+ sk = NULL;
+ }
+ }
+ return sk;
+}
+
+static void account_for_uid(const struct sk_buff *skb,
+ const struct sock *alternate_sk, uid_t uid,
+ struct xt_action_param *par)
+{
+ const struct net_device *el_dev;
+
+ if (!skb->dev) {
+ MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+ el_dev = par->in ? : par->out;
+ } else {
+ const struct net_device *other_dev;
+ el_dev = skb->dev;
+ other_dev = par->in ? : par->out;
+ if (el_dev != other_dev) {
+ MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+ "par->(in/out)=%p %s\n",
+ par->hooknum, el_dev, el_dev->name, other_dev,
+ other_dev->name);
+ }
+ }
+
+ if (unlikely(!el_dev)) {
+ pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
+ } else if (unlikely(!el_dev->name)) {
+ pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
+ } else {
+ MT_DEBUG("qtaguid[%d]: dev name=%s type=%d\n",
+ par->hooknum,
+ el_dev->name,
+ el_dev->type);
+
+ if_tag_stat_update(el_dev->name, uid,
+ skb->sk ? skb->sk : alternate_sk,
+ par->in ? IFS_RX : IFS_TX,
+ ip_hdr(skb)->protocol, skb->len);
+ }
+}
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_qtaguid_match_info *info = par->matchinfo;
+ const struct file *filp;
+ bool got_sock = false;
+ struct sock *sk;
+ uid_t sock_uid;
+ bool res;
+
+ if (unlikely(module_passive))
+ return (info->match ^ info->invert) == 0;
+
+ MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
+ par->hooknum, skb, par->in, par->out, par->family);
+
+ atomic64_inc(&qtu_events.match_calls);
+ if (skb == NULL) {
+ res = (info->match ^ info->invert) == 0;
+ goto ret_res;
+ }
+
+ sk = skb->sk;
+
+ if (sk == NULL) {
+ /*
+ * A missing sk->sk_socket happens when packets are in-flight
+ * and the matching socket is already closed and gone.
+ */
+ sk = qtaguid_find_sk(skb, par);
+ /*
+ * If we got the socket from the find_sk(), we will need to put
+ * it back, as nf_tproxy_get_sock_v4() got it.
+ */
+ got_sock = sk;
+ if (sk)
+ atomic64_inc(&qtu_events.match_found_sk_in_ct);
+ else
+ atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
+ } else {
+ atomic64_inc(&qtu_events.match_found_sk);
+ }
+ MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d proto=%d\n",
+ par->hooknum, sk, got_sock, ip_hdr(skb)->protocol);
+ if (sk != NULL) {
+ MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+ par->hooknum, sk, sk->sk_socket,
+ sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+ filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+ MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+ par->hooknum, filp ? filp->f_cred->fsuid : -1);
+ }
+
+ if (sk == NULL || sk->sk_socket == NULL) {
+ /*
+ * Here, the qtaguid_find_sk() using connection tracking
+ * couldn't find the owner, so for now we just count them
+ * against the system.
+ */
+ /*
+ * TODO: unhack how to force just accounting.
+ * For now we only do iface stats when the uid-owner is not
+ * requested.
+ */
+ if (!(info->match & XT_QTAGUID_UID))
+ account_for_uid(skb, sk, 0, par);
+ MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
+ par->hooknum,
+ sk ? sk->sk_socket : NULL);
+ res = (info->match ^ info->invert) == 0;
+ atomic64_inc(&qtu_events.match_no_sk);
+ goto put_sock_ret_res;
+ } else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
+ res = false;
+ goto put_sock_ret_res;
+ }
+ filp = sk->sk_socket->file;
+ if (filp == NULL) {
+ MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
+ account_for_uid(skb, sk, 0, par);
+ res = ((info->match ^ info->invert) &
+ (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
+ atomic64_inc(&qtu_events.match_no_sk_file);
+ goto put_sock_ret_res;
+ }
+ sock_uid = filp->f_cred->fsuid;
+ /*
+ * TODO: unhack how to force just accounting.
+ * For now we only do iface stats when the uid-owner is not requested
+ */
+ if (!(info->match & XT_QTAGUID_UID))
+ account_for_uid(skb, sk, sock_uid, par);
+
+ /*
+ * The following two tests fail the match when:
+ * id not in range AND no inverted condition requested
+ * or id in range AND inverted condition requested
+ * Thus (!a && b) || (a && !b) == a ^ b
+ */
+ if (info->match & XT_QTAGUID_UID)
+ if ((filp->f_cred->fsuid >= info->uid_min &&
+ filp->f_cred->fsuid <= info->uid_max) ^
+ !(info->invert & XT_QTAGUID_UID)) {
+ MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
+ par->hooknum);
+ res = false;
+ goto put_sock_ret_res;
+ }
+ if (info->match & XT_QTAGUID_GID)
+ if ((filp->f_cred->fsgid >= info->gid_min &&
+ filp->f_cred->fsgid <= info->gid_max) ^
+ !(info->invert & XT_QTAGUID_GID)) {
+ MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
+ par->hooknum);
+ res = false;
+ goto put_sock_ret_res;
+ }
+
+ MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
+ res = true;
+
+put_sock_ret_res:
+ if (got_sock)
+ xt_socket_put_sk(sk);
+ret_res:
+ MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
+ return res;
+}
+
+#ifdef DDEBUG
+/* This function is not in xt_qtaguid_print.c because of locks visibility */
+static void prdebug_full_state(int indent_level, const char *fmt, ...)
+{
+ va_list args;
+ char *fmt_buff;
+ char *buff;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ fmt_buff = kasprintf(GFP_ATOMIC,
+ "qtaguid: %s(): %s {\n", __func__, fmt);
+ BUG_ON(!fmt_buff);
+ va_start(args, fmt);
+ buff = kvasprintf(GFP_ATOMIC,
+ fmt_buff, args);
+ BUG_ON(!buff);
+ pr_debug("%s", buff);
+ kfree(fmt_buff);
+ kfree(buff);
+ va_end(args);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
+ prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ spin_lock_bh(&iface_stat_list_lock);
+ prdebug_iface_stat_list(indent_level, &iface_stat_list);
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ pr_debug("qtaguid: %s(): }\n", __func__);
+}
+#else
+static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+#endif
+
+/*
+ * Procfs reader to get all active socket tags using style "1)" as described in
+ * fs/proc/generic.c
+ */
+static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count, int *eof,
+ void *data)
+{
+ char *outp = page;
+ int len;
+ uid_t uid;
+ struct rb_node *node;
+ struct sock_tag *sock_tag_entry;
+ int item_index = 0;
+ int indent_level = 0;
+ long f_count;
+
+ if (unlikely(module_passive)) {
+ *eof = 1;
+ return 0;
+ }
+
+ if (*eof)
+ return 0;
+
+ CT_DEBUG("qtaguid: proc ctrl page=%p off=%ld char_count=%d *eof=%d\n",
+ page, items_to_skip, char_count, *eof);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ for (node = rb_first(&sock_tag_tree);
+ node;
+ node = rb_next(node)) {
+ if (item_index++ < items_to_skip)
+ continue;
+ sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+ uid = get_uid_from_tag(sock_tag_entry->tag);
+ CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
+ "pid=%u\n",
+ sock_tag_entry->sk,
+ sock_tag_entry->tag,
+ uid,
+ sock_tag_entry->pid
+ );
+ f_count = atomic_long_read(
+ &sock_tag_entry->socket->file->f_count);
+ len = snprintf(outp, char_count,
+ "sock=%p tag=0x%llx (uid=%u) pid=%u "
+ "f_count=%lu\n",
+ sock_tag_entry->sk,
+ sock_tag_entry->tag, uid,
+ sock_tag_entry->pid, f_count);
+ if (len >= char_count) {
+ spin_unlock_bh(&sock_tag_list_lock);
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ if (item_index++ >= items_to_skip) {
+ len = snprintf(outp, char_count,
+ "events: sockets_tagged=%llu "
+ "sockets_untagged=%llu "
+ "counter_set_changes=%llu "
+ "delete_cmds=%llu "
+ "iface_events=%llu "
+ "match_calls=%llu "
+ "match_found_sk=%llu "
+ "match_found_sk_in_ct=%llu "
+ "match_found_no_sk_in_ct=%llu "
+ "match_no_sk=%llu "
+ "match_no_sk_file=%llu\n",
+ atomic64_read(&qtu_events.sockets_tagged),
+ atomic64_read(&qtu_events.sockets_untagged),
+ atomic64_read(&qtu_events.counter_set_changes),
+ atomic64_read(&qtu_events.delete_cmds),
+ atomic64_read(&qtu_events.iface_events),
+ atomic64_read(&qtu_events.match_calls),
+ atomic64_read(&qtu_events.match_found_sk),
+ atomic64_read(&qtu_events.match_found_sk_in_ct),
+ atomic64_read(
+ &qtu_events.match_found_no_sk_in_ct),
+ atomic64_read(&qtu_events.match_no_sk),
+ atomic64_read(&qtu_events.match_no_sk_file));
+ if (len >= char_count) {
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+
+ /* Count the following as part of the last item_index */
+ if (item_index > items_to_skip) {
+ prdebug_full_state(indent_level, "proc ctrl");
+ }
+
+ *eof = 1;
+ return outp - page;
+}
+
+/*
+ * Delete socket tags, and stat tags associated with a given
+ * accouting tag and uid.
+ */
+static int ctrl_cmd_delete(const char *input)
+{
+ char cmd;
+ uid_t uid;
+ uid_t entry_uid;
+ tag_t acct_tag;
+ tag_t tag;
+ int res, argc;
+ struct iface_stat *iface_entry;
+ struct rb_node *node;
+ struct sock_tag *st_entry;
+ struct rb_root st_to_free_tree = RB_ROOT;
+ struct tag_stat *ts_entry;
+ struct tag_counter_set *tcs_entry;
+ struct tag_ref *tr_entry;
+ struct uid_tag_data *utd_entry;
+
+ argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid);
+ CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
+ "user_tag=0x%llx uid=%u\n", input, argc, cmd,
+ acct_tag, uid);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ if (!valid_atag(acct_tag)) {
+ pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
+ res = -EINVAL;
+ goto err;
+ }
+ if (argc < 3) {
+ uid = current_fsuid();
+ } else if (!can_impersonate_uid(uid)) {
+ pr_info("qtaguid: ctrl_delete(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err;
+ }
+
+ tag = combine_atag_with_uid(acct_tag, uid);
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "looking for tag=0x%llx (uid=%u)\n",
+ input, tag, uid);
+
+ /* Delete socket tags */
+ spin_lock_bh(&sock_tag_list_lock);
+ node = rb_first(&sock_tag_tree);
+ while (node) {
+ st_entry = rb_entry(node, struct sock_tag, sock_node);
+ entry_uid = get_uid_from_tag(st_entry->tag);
+ node = rb_next(node);
+ if (entry_uid != uid)
+ continue;
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
+ input, st_entry->tag, entry_uid);
+
+ if (!acct_tag || st_entry->tag == tag) {
+ rb_erase(&st_entry->sock_node, &sock_tag_tree);
+ /* Can't sockfd_put() within spinlock, do it later. */
+ sock_tag_tree_insert(st_entry, &st_to_free_tree);
+ tr_entry = lookup_tag_ref(st_entry->tag, NULL);
+ BUG_ON(tr_entry->num_sock_tags <= 0);
+ tr_entry->num_sock_tags--;
+ /*
+ * TODO: remove if, and start failing.
+ * This is a hack to work around the fact that in some
+ * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
+ * and are trying to work around apps
+ * that didn't open the /dev/xt_qtaguid.
+ */
+ if (st_entry->list.next && st_entry->list.prev)
+ list_del(&st_entry->list);
+ }
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ sock_tag_tree_erase(&st_to_free_tree);
+
+ /* Delete tag counter-sets */
+ spin_lock_bh(&tag_counter_set_list_lock);
+ /* Counter sets are only on the uid tag, not full tag */
+ tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (tcs_entry) {
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
+ input,
+ tcs_entry->tn.tag,
+ get_uid_from_tag(tcs_entry->tn.tag),
+ tcs_entry->active_set);
+ rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
+ kfree(tcs_entry);
+ }
+ spin_unlock_bh(&tag_counter_set_list_lock);
+
+ /*
+ * If acct_tag is 0, then all entries belonging to uid are
+ * erased.
+ */
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+ node = rb_first(&iface_entry->tag_stat_tree);
+ while (node) {
+ ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ entry_uid = get_uid_from_tag(ts_entry->tn.tag);
+ node = rb_next(node);
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "ts tag=0x%llx (uid=%u)\n",
+ input, ts_entry->tn.tag, entry_uid);
+
+ if (entry_uid != uid)
+ continue;
+ if (!acct_tag || ts_entry->tn.tag == tag) {
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "erase ts: %s 0x%llx %u\n",
+ input, iface_entry->ifname,
+ get_atag_from_tag(ts_entry->tn.tag),
+ entry_uid);
+ rb_erase(&ts_entry->tn.node,
+ &iface_entry->tag_stat_tree);
+ kfree(ts_entry);
+ }
+ }
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ /* Cleanup the uid_tag_data */
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ node = rb_first(&uid_tag_data_tree);
+ while (node) {
+ utd_entry = rb_entry(node, struct uid_tag_data, node);
+ entry_uid = utd_entry->uid;
+ node = rb_next(node);
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "utd uid=%u\n",
+ input, entry_uid);
+
+ if (entry_uid != uid)
+ continue;
+ /*
+ * Go over the tag_refs, and those that don't have
+ * sock_tags using them are freed.
+ */
+ put_tag_ref_tree(tag, utd_entry);
+ put_utd_entry(utd_entry);
+ }
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+
+ atomic64_inc(&qtu_events.delete_cmds);
+ res = 0;
+
+err:
+ return res;
+}
+
+static int ctrl_cmd_counter_set(const char *input)
+{
+ char cmd;
+ uid_t uid = 0;
+ tag_t tag;
+ int res, argc;
+ struct tag_counter_set *tcs;
+ int counter_set;
+
+ argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
+ CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
+ "set=%d uid=%u\n", input, argc, cmd,
+ counter_set, uid);
+ if (argc != 3) {
+ res = -EINVAL;
+ goto err;
+ }
+ if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
+ pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
+ input);
+ res = -EINVAL;
+ goto err;
+ }
+ if (!can_manipulate_uids()) {
+ pr_info("qtaguid: ctrl_counterset(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err;
+ }
+
+ tag = make_tag_from_uid(uid);
+ spin_lock_bh(&tag_counter_set_list_lock);
+ tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (!tcs) {
+ tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
+ if (!tcs) {
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ pr_err("qtaguid: ctrl_counterset(%s): "
+ "failed to alloc counter set\n",
+ input);
+ res = -ENOMEM;
+ goto err;
+ }
+ tcs->tn.tag = tag;
+ tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
+ CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
+ "(uid=%u) set=%d\n",
+ input, tag, get_uid_from_tag(tag), counter_set);
+ }
+ tcs->active_set = counter_set;
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ atomic64_inc(&qtu_events.counter_set_changes);
+ res = 0;
+
+err:
+ return res;
+}
+
+static int ctrl_cmd_tag(const char *input)
+{
+ char cmd;
+ int sock_fd = 0;
+ uid_t uid = 0;
+ tag_t acct_tag = make_atag_from_value(0);
+ tag_t full_tag;
+ struct socket *el_socket;
+ int res, argc;
+ struct sock_tag *sock_tag_entry;
+ struct tag_ref *tag_ref_entry;
+ struct uid_tag_data *uid_tag_data_entry;
+ struct proc_qtu_data *pqd_entry;
+
+ /* Unassigned args will get defaulted later. */
+ argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid);
+ CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
+ "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
+ acct_tag, uid);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
+ if (!el_socket) {
+ pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
+ " sock_fd=%d err=%d\n", input, sock_fd, res);
+ goto err;
+ }
+ CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
+ input, atomic_long_read(&el_socket->file->f_count),
+ el_socket->sk);
+ if (argc < 3) {
+ acct_tag = make_atag_from_value(0);
+ } else if (!valid_atag(acct_tag)) {
+ pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
+ res = -EINVAL;
+ goto err_put;
+ }
+ CT_DEBUG("qtaguid: ctrl_tag(%s): "
+ "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
+ "in_group=%d in_egroup=%d\n",
+ input, current->pid, current->tgid, current_uid(),
+ current_euid(), current_fsuid(),
+ in_group_p(proc_ctrl_write_gid),
+ in_egroup_p(proc_ctrl_write_gid));
+ if (argc < 4) {
+ uid = current_fsuid();
+ } else if (!can_impersonate_uid(uid)) {
+ pr_info("qtaguid: ctrl_tag(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err_put;
+ }
+ full_tag = combine_atag_with_uid(acct_tag, uid);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+ tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
+ if (IS_ERR(tag_ref_entry)) {
+ res = PTR_ERR(tag_ref_entry);
+ spin_unlock_bh(&sock_tag_list_lock);
+ goto err_put;
+ }
+ tag_ref_entry->num_sock_tags++;
+ if (sock_tag_entry) {
+ struct tag_ref *prev_tag_ref_entry;
+
+ CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
+ "st@%p ...->f_count=%ld\n",
+ input, el_socket->sk, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count));
+ /*
+ * This is a re-tagging, so release the sock_fd that was
+ * locked at the time of the 1st tagging.
+ * There is still the ref from this call's sockfd_lookup() so
+ * it can be done within the spinlock.
+ */
+ sockfd_put(sock_tag_entry->socket);
+ prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
+ &uid_tag_data_entry);
+ BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
+ BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
+ prev_tag_ref_entry->num_sock_tags--;
+ sock_tag_entry->tag = full_tag;
+ } else {
+ CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
+ input, el_socket->sk);
+ sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
+ GFP_ATOMIC);
+ if (!sock_tag_entry) {
+ pr_err("qtaguid: ctrl_tag(%s): "
+ "socket tag alloc failed\n",
+ input);
+ spin_unlock_bh(&sock_tag_list_lock);
+ res = -ENOMEM;
+ goto err_tag_unref_put;
+ }
+ sock_tag_entry->sk = el_socket->sk;
+ sock_tag_entry->socket = el_socket;
+ sock_tag_entry->pid = current->tgid;
+ sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
+ uid);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ pqd_entry = proc_qtu_data_tree_search(
+ &proc_qtu_data_tree, current->tgid);
+ /*
+ * TODO: remove if, and start failing.
+ * At first, we want to catch user-space code that is not
+ * opening the /dev/xt_qtaguid.
+ */
+ if (IS_ERR_OR_NULL(pqd_entry))
+ pr_warn_once(
+ "qtaguid: %s(): "
+ "User space forgot to open /dev/xt_qtaguid? "
+ "pid=%u tgid=%u uid=%u\n", __func__,
+ current->pid, current->tgid,
+ current_fsuid());
+ else
+ list_add(&sock_tag_entry->list,
+ &pqd_entry->sock_tag_list);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+
+ sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
+ atomic64_inc(&qtu_events.sockets_tagged);
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+ /* We keep the ref to the socket (file) until it is untagged */
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+ input, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count));
+ return 0;
+
+err_tag_unref_put:
+ BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+ tag_ref_entry->num_sock_tags--;
+ free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
+err_put:
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
+ input, atomic_long_read(&el_socket->file->f_count) - 1);
+ /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+ sockfd_put(el_socket);
+ return res;
+
+err:
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
+ return res;
+}
+
+static int ctrl_cmd_untag(const char *input)
+{
+ char cmd;
+ int sock_fd = 0;
+ struct socket *el_socket;
+ int res, argc;
+ struct sock_tag *sock_tag_entry;
+ struct tag_ref *tag_ref_entry;
+ struct uid_tag_data *utd_entry;
+ struct proc_qtu_data *pqd_entry;
+
+ argc = sscanf(input, "%c %d", &cmd, &sock_fd);
+ CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
+ input, argc, cmd, sock_fd);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
+ if (!el_socket) {
+ pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
+ " sock_fd=%d err=%d\n", input, sock_fd, res);
+ goto err;
+ }
+ CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
+ input, atomic_long_read(&el_socket->file->f_count),
+ el_socket->sk);
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+ if (!sock_tag_entry) {
+ spin_unlock_bh(&sock_tag_list_lock);
+ res = -EINVAL;
+ goto err_put;
+ }
+ /*
+ * The socket already belongs to the current process
+ * so it can do whatever it wants to it.
+ */
+ rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
+
+ tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
+ BUG_ON(!tag_ref_entry);
+ BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ pqd_entry = proc_qtu_data_tree_search(
+ &proc_qtu_data_tree, current->tgid);
+ /*
+ * TODO: remove if, and start failing.
+ * At first, we want to catch user-space code that is not
+ * opening the /dev/xt_qtaguid.
+ */
+ if (IS_ERR_OR_NULL(pqd_entry))
+ pr_warn_once("qtaguid: %s(): "
+ "User space forgot to open /dev/xt_qtaguid? "
+ "pid=%u tgid=%u uid=%u\n", __func__,
+ current->pid, current->tgid, current_fsuid());
+ else
+ list_del(&sock_tag_entry->list);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ /*
+ * We don't free tag_ref from the utd_entry here,
+ * only during a cmd_delete().
+ */
+ tag_ref_entry->num_sock_tags--;
+ spin_unlock_bh(&sock_tag_list_lock);
+ /*
+ * Release the sock_fd that was grabbed at tag time,
+ * and once more for the sockfd_lookup() here.
+ */
+ sockfd_put(sock_tag_entry->socket);
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
+ input, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count) - 1);
+ sockfd_put(el_socket);
+
+ kfree(sock_tag_entry);
+ atomic64_inc(&qtu_events.sockets_untagged);
+
+ return 0;
+
+err_put:
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
+ input, atomic_long_read(&el_socket->file->f_count) - 1);
+ /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+ sockfd_put(el_socket);
+ return res;
+
+err:
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
+ return res;
+}
+
+static int qtaguid_ctrl_parse(const char *input, int count)
+{
+ char cmd;
+ int res;
+
+ cmd = input[0];
+ /* Collect params for commands */
+ switch (cmd) {
+ case 'd':
+ res = ctrl_cmd_delete(input);
+ break;
+
+ case 's':
+ res = ctrl_cmd_counter_set(input);
+ break;
+
+ case 't':
+ res = ctrl_cmd_tag(input);
+ break;
+
+ case 'u':
+ res = ctrl_cmd_untag(input);
+ break;
+
+ default:
+ res = -EINVAL;
+ goto err;
+ }
+ if (!res)
+ res = count;
+err:
+ CT_DEBUG("qtaguid: ctrl(%s): res=%d\n", input, res);
+ return res;
+}
+
+#define MAX_QTAGUID_CTRL_INPUT_LEN 255
+static int qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
+
+ if (unlikely(module_passive))
+ return count;
+
+ if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
+ return -EINVAL;
+
+ if (copy_from_user(input_buf, buffer, count))
+ return -EFAULT;
+
+ input_buf[count] = '\0';
+ return qtaguid_ctrl_parse(input_buf, count);
+}
+
+struct proc_print_info {
+ char *outp;
+ char **num_items_returned;
+ struct iface_stat *iface_entry;
+ struct tag_stat *ts_entry;
+ int item_index;
+ int items_to_skip;
+ int char_count;
+};
+
+static int pp_stats_line(struct proc_print_info *ppi, int cnt_set)
+{
+ int len;
+ struct data_counters *cnts;
+
+ if (!ppi->item_index) {
+ if (ppi->item_index++ < ppi->items_to_skip)
+ return 0;
+ len = snprintf(ppi->outp, ppi->char_count,
+ "idx iface acct_tag_hex uid_tag_int cnt_set "
+ "rx_bytes rx_packets "
+ "tx_bytes tx_packets "
+ "rx_tcp_bytes rx_tcp_packets "
+ "rx_udp_bytes rx_udp_packets "
+ "rx_other_bytes rx_other_packets "
+ "tx_tcp_bytes tx_tcp_packets "
+ "tx_udp_bytes tx_udp_packets "
+ "tx_other_bytes tx_other_packets\n");
+ } else {
+ tag_t tag = ppi->ts_entry->tn.tag;
+ uid_t stat_uid = get_uid_from_tag(tag);
+
+ if (!can_read_other_uid_stats(stat_uid)) {
+ CT_DEBUG("qtaguid: stats line: "
+ "%s 0x%llx %u: insufficient priv "
+ "from pid=%u tgid=%u uid=%u\n",
+ ppi->iface_entry->ifname,
+ get_atag_from_tag(tag), stat_uid,
+ current->pid, current->tgid, current_fsuid());
+ return 0;
+ }
+ if (ppi->item_index++ < ppi->items_to_skip)
+ return 0;
+ cnts = &ppi->ts_entry->counters;
+ len = snprintf(
+ ppi->outp, ppi->char_count,
+ "%d %s 0x%llx %u %u "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu\n",
+ ppi->item_index,
+ ppi->iface_entry->ifname,
+ get_atag_from_tag(tag),
+ stat_uid,
+ cnt_set,
+ dc_sum_bytes(cnts, cnt_set, IFS_RX),
+ dc_sum_packets(cnts, cnt_set, IFS_RX),
+ dc_sum_bytes(cnts, cnt_set, IFS_TX),
+ dc_sum_packets(cnts, cnt_set, IFS_TX),
+ cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+ cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+ cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+ }
+ return len;
+}
+
+static bool pp_sets(struct proc_print_info *ppi)
+{
+ int len;
+ int counter_set;
+ for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
+ counter_set++) {
+ len = pp_stats_line(ppi, counter_set);
+ if (len >= ppi->char_count) {
+ *ppi->outp = '\0';
+ return false;
+ }
+ if (len) {
+ ppi->outp += len;
+ ppi->char_count -= len;
+ (*ppi->num_items_returned)++;
+ }
+ }
+ return true;
+}
+
+/*
+ * Procfs reader to get all tag stats using style "1)" as described in
+ * fs/proc/generic.c
+ * Groups all protocols tx/rx bytes.
+ */
+static int qtaguid_stats_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count, int *eof,
+ void *data)
+{
+ struct proc_print_info ppi;
+ int len;
+
+ ppi.outp = page;
+ ppi.item_index = 0;
+ ppi.char_count = char_count;
+ ppi.num_items_returned = num_items_returned;
+ ppi.items_to_skip = items_to_skip;
+
+ if (unlikely(module_passive)) {
+ len = pp_stats_line(&ppi, 0);
+ /* The header should always be shorter than the buffer. */
+ BUG_ON(len >= ppi.char_count);
+ (*num_items_returned)++;
+ *eof = 1;
+ return len;
+ }
+
+ CT_DEBUG("qtaguid:proc stats page=%p *num_items_returned=%p off=%ld "
+ "char_count=%d *eof=%d\n", page, *num_items_returned,
+ items_to_skip, char_count, *eof);
+
+ if (*eof)
+ return 0;
+
+ /* The idx is there to help debug when things go belly up. */
+ len = pp_stats_line(&ppi, 0);
+ /* Don't advance the outp unless the whole line was printed */
+ if (len >= ppi.char_count) {
+ *ppi.outp = '\0';
+ return ppi.outp - page;
+ }
+ if (len) {
+ ppi.outp += len;
+ ppi.char_count -= len;
+ (*num_items_returned)++;
+ }
+
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(ppi.iface_entry, &iface_stat_list, list) {
+ struct rb_node *node;
+ spin_lock_bh(&ppi.iface_entry->tag_stat_list_lock);
+ for (node = rb_first(&ppi.iface_entry->tag_stat_tree);
+ node;
+ node = rb_next(node)) {
+ ppi.ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ if (!pp_sets(&ppi)) {
+ spin_unlock_bh(
+ &ppi.iface_entry->tag_stat_list_lock);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return ppi.outp - page;
+ }
+ }
+ spin_unlock_bh(&ppi.iface_entry->tag_stat_list_lock);
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ *eof = 1;
+ return ppi.outp - page;
+}
+
+/*------------------------------------------*/
+static int qtudev_open(struct inode *inode, struct file *file)
+{
+ struct uid_tag_data *utd_entry;
+ struct proc_qtu_data *pqd_entry;
+ struct proc_qtu_data *new_pqd_entry;
+ int res;
+ bool utd_entry_found;
+
+ if (unlikely(qtu_proc_handling_passive))
+ return 0;
+
+ DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
+ current->pid, current->tgid, current_fsuid());
+
+ spin_lock_bh(&uid_tag_data_tree_lock);
+
+ /* Look for existing uid data, or alloc one. */
+ utd_entry = get_uid_data(current_fsuid(), &utd_entry_found);
+ if (IS_ERR_OR_NULL(utd_entry)) {
+ res = PTR_ERR(utd_entry);
+ goto err;
+ }
+
+ /* Look for existing PID based proc_data */
+ pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
+ current->tgid);
+ if (pqd_entry) {
+ pr_err("qtaguid: qtudev_open(): %u/%u %u "
+ "%s already opened\n",
+ current->pid, current->tgid, current_fsuid(),
+ QTU_DEV_NAME);
+ res = -EBUSY;
+ goto err_unlock_free_utd;
+ }
+
+ new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
+ if (!new_pqd_entry) {
+ pr_err("qtaguid: qtudev_open(): %u/%u %u: "
+ "proc data alloc failed\n",
+ current->pid, current->tgid, current_fsuid());
+ res = -ENOMEM;
+ goto err_unlock_free_utd;
+ }
+ new_pqd_entry->pid = current->tgid;
+ INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
+ new_pqd_entry->parent_tag_data = utd_entry;
+ utd_entry->num_pqd++;
+
+ proc_qtu_data_tree_insert(new_pqd_entry,
+ &proc_qtu_data_tree);
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
+ current_fsuid(), new_pqd_entry);
+ file->private_data = new_pqd_entry;
+ return 0;
+
+err_unlock_free_utd:
+ if (!utd_entry_found) {
+ rb_erase(&utd_entry->node, &uid_tag_data_tree);
+ kfree(utd_entry);
+ }
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+err:
+ return res;
+}
+
+static int qtudev_release(struct inode *inode, struct file *file)
+{
+ struct proc_qtu_data *pqd_entry = file->private_data;
+ struct uid_tag_data *utd_entry = pqd_entry->parent_tag_data;
+ struct sock_tag *st_entry;
+ struct rb_root st_to_free_tree = RB_ROOT;
+ struct list_head *entry, *next;
+ struct tag_ref *tr;
+
+ if (unlikely(qtu_proc_handling_passive))
+ return 0;
+
+ /*
+ * Do not trust the current->pid, it might just be a kworker cleaning
+ * up after a dead proc.
+ */
+ DR_DEBUG("qtaguid: qtudev_release(): "
+ "pid=%u tgid=%u uid=%u "
+ "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
+ current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
+ pqd_entry, pqd_entry->pid, utd_entry,
+ utd_entry->num_active_tags);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+
+ list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
+ st_entry = list_entry(entry, struct sock_tag, list);
+ DR_DEBUG("qtaguid: %s(): "
+ "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
+ __func__,
+ st_entry, st_entry->sk,
+ current->pid, current->tgid,
+ pqd_entry->parent_tag_data->uid);
+
+ utd_entry = uid_tag_data_tree_search(
+ &uid_tag_data_tree,
+ get_uid_from_tag(st_entry->tag));
+ BUG_ON(IS_ERR_OR_NULL(utd_entry));
+ DR_DEBUG("qtaguid: %s(): "
+ "looking for tag=0x%llx in utd_entry=%p\n", __func__,
+ st_entry->tag, utd_entry);
+ tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
+ st_entry->tag);
+ BUG_ON(!tr);
+ BUG_ON(tr->num_sock_tags <= 0);
+ tr->num_sock_tags--;
+ free_tag_ref_from_utd_entry(tr, utd_entry);
+
+ rb_erase(&st_entry->sock_node, &sock_tag_tree);
+ list_del(&st_entry->list);
+ /* Can't sockfd_put() within spinlock, do it later. */
+ sock_tag_tree_insert(st_entry, &st_to_free_tree);
+
+ /*
+ * Try to free the utd_entry if no other proc_qtu_data is
+ * using it (num_pqd is 0) and it doesn't have active tags
+ * (num_active_tags is 0).
+ */
+ put_utd_entry(utd_entry);
+ }
+
+ rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
+ BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
+ pqd_entry->parent_tag_data->num_pqd--;
+ put_utd_entry(pqd_entry->parent_tag_data);
+ kfree(pqd_entry);
+ file->private_data = NULL;
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+
+ sock_tag_tree_erase(&st_to_free_tree);
+
+ prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+ current->pid, current->tgid);
+ return 0;
+}
+
+/*------------------------------------------*/
+static const struct file_operations qtudev_fops = {
+ .owner = THIS_MODULE,
+ .open = qtudev_open,
+ .release = qtudev_release,
+};
+
+static struct miscdevice qtu_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = QTU_DEV_NAME,
+ .fops = &qtudev_fops,
+ /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
+};
+
+/*------------------------------------------*/
+static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
+{
+ int ret;
+ *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
+ if (!*res_procdir) {
+ pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
+ ret = -ENOMEM;
+ goto no_dir;
+ }
+
+ xt_qtaguid_ctrl_file = create_proc_entry("ctrl", proc_ctrl_perms,
+ *res_procdir);
+ if (!xt_qtaguid_ctrl_file) {
+ pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
+ " file\n");
+ ret = -ENOMEM;
+ goto no_ctrl_entry;
+ }
+ xt_qtaguid_ctrl_file->read_proc = qtaguid_ctrl_proc_read;
+ xt_qtaguid_ctrl_file->write_proc = qtaguid_ctrl_proc_write;
+
+ xt_qtaguid_stats_file = create_proc_entry("stats", proc_stats_perms,
+ *res_procdir);
+ if (!xt_qtaguid_stats_file) {
+ pr_err("qtaguid: failed to create xt_qtaguid/stats "
+ "file\n");
+ ret = -ENOMEM;
+ goto no_stats_entry;
+ }
+ xt_qtaguid_stats_file->read_proc = qtaguid_stats_proc_read;
+ /*
+ * TODO: add support counter hacking
+ * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
+ */
+ return 0;
+
+no_stats_entry:
+ remove_proc_entry("ctrl", *res_procdir);
+no_ctrl_entry:
+ remove_proc_entry("xt_qtaguid", NULL);
+no_dir:
+ return ret;
+}
+
+static struct xt_match qtaguid_mt_reg __read_mostly = {
+ /*
+ * This module masquerades as the "owner" module so that iptables
+ * tools can deal with it.
+ */
+ .name = "owner",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .match = qtaguid_mt,
+ .matchsize = sizeof(struct xt_qtaguid_match_info),
+ .me = THIS_MODULE,
+};
+
+static int __init qtaguid_mt_init(void)
+{
+ if (qtaguid_proc_register(&xt_qtaguid_procdir)
+ || iface_stat_init(xt_qtaguid_procdir)
+ || xt_register_match(&qtaguid_mt_reg)
+ || misc_register(&qtu_device))
+ return -1;
+ return 0;
+}
+
+/*
+ * TODO: allow unloading of the module.
+ * For now stats are permanent.
+ * Kconfig forces'y/n' and never an 'm'.
+ */
+
+module_init(qtaguid_mt_init);
+MODULE_AUTHOR("jpa <jpa@google.com>");
+MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_owner");
+MODULE_ALIAS("ip6t_owner");
+MODULE_ALIAS("ipt_qtaguid");
+MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644
index 000000000000..02479d6d317d
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -0,0 +1,330 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_INTERNAL_H__
+#define __XT_QTAGUID_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
+/* Iface handling */
+#define IDEBUG_MASK (1<<0)
+/* Iptable Matching. Per packet. */
+#define MDEBUG_MASK (1<<1)
+/* Red-black tree handling. Per packet. */
+#define RDEBUG_MASK (1<<2)
+/* procfs ctrl/stats handling */
+#define CDEBUG_MASK (1<<3)
+/* dev and resource tracking */
+#define DDEBUG_MASK (1<<4)
+
+/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
+#define DEFAULT_DEBUG_MASK 0
+
+/*
+ * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
+ * All undef: text size ~ 0x3030; all def: ~ 0x4404.
+ */
+#define IDEBUG
+#define MDEBUG
+#define RDEBUG
+#define CDEBUG
+#define DDEBUG
+
+#define MSK_DEBUG(mask, ...) do { \
+ if (unlikely(qtaguid_debug_mask & (mask))) \
+ pr_debug(__VA_ARGS__); \
+ } while (0)
+#ifdef IDEBUG
+#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
+#else
+#define IF_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef MDEBUG
+#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
+#else
+#define MT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef RDEBUG
+#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
+#else
+#define RB_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef CDEBUG
+#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
+#else
+#define CT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef DDEBUG
+#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
+#else
+#define DR_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+
+extern uint qtaguid_debug_mask;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * Tags:
+ *
+ * They represent what the data usage counters will be tracked against.
+ * By default a tag is just based on the UID.
+ * The UID is used as the base for policing, and can not be ignored.
+ * So a tag will always at least represent a UID (uid_tag).
+ *
+ * A tag can be augmented with an "accounting tag" which is associated
+ * with a UID.
+ * User space can set the acct_tag portion of the tag which is then used
+ * with sockets: all data belonging to that socket will be counted against the
+ * tag. The policing is then based on the tag's uid_tag portion,
+ * and stats are collected for the acct_tag portion separately.
+ *
+ * There could be
+ * a: {acct_tag=1, uid_tag=10003}
+ * b: {acct_tag=2, uid_tag=10003}
+ * c: {acct_tag=3, uid_tag=10003}
+ * d: {acct_tag=0, uid_tag=10003}
+ * a, b, and c represent tags associated with specific sockets.
+ * d is for the totals for that uid, including all untagged traffic.
+ * Typically d is used with policing/quota rules.
+ *
+ * We want tag_t big enough to distinguish uid_t and acct_tag.
+ * It might become a struct if needed.
+ * Nothing should be using it as an int.
+ */
+typedef uint64_t tag_t; /* Only used via accessors */
+
+#define TAG_UID_MASK 0xFFFFFFFFULL
+#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
+
+static inline int tag_compare(tag_t t1, tag_t t2)
+{
+ return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
+}
+
+static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
+{
+ return acct_tag | uid;
+}
+static inline tag_t make_tag_from_uid(uid_t uid)
+{
+ return uid;
+}
+static inline uid_t get_uid_from_tag(tag_t tag)
+{
+ return tag & TAG_UID_MASK;
+}
+static inline tag_t get_utag_from_tag(tag_t tag)
+{
+ return tag & TAG_UID_MASK;
+}
+static inline tag_t get_atag_from_tag(tag_t tag)
+{
+ return tag & TAG_ACCT_MASK;
+}
+
+static inline bool valid_atag(tag_t tag)
+{
+ return !(tag & TAG_UID_MASK);
+}
+static inline tag_t make_atag_from_value(uint32_t value)
+{
+ return (uint64_t)value << 32;
+}
+/*---------------------------------------------------------------------------*/
+
+/*
+ * Maximum number of socket tags that a UID is allowed to have active.
+ * Multiple processes belonging to the same UID contribute towards this limit.
+ * Special UIDs that can impersonate a UID also contribute (e.g. download
+ * manager, ...)
+ */
+#define DEFAULT_MAX_SOCK_TAGS 1024
+
+/*
+ * For now we only track 2 sets of counters.
+ * The default set is 0.
+ * Userspace can activate another set for a given uid being tracked.
+ */
+#define IFS_MAX_COUNTER_SETS 2
+
+enum ifs_tx_rx {
+ IFS_TX,
+ IFS_RX,
+ IFS_MAX_DIRECTIONS
+};
+
+/* For now, TCP, UDP, the rest */
+enum ifs_proto {
+ IFS_TCP,
+ IFS_UDP,
+ IFS_PROTO_OTHER,
+ IFS_MAX_PROTOS
+};
+
+struct byte_packet_counters {
+ uint64_t bytes;
+ uint64_t packets;
+};
+
+struct data_counters {
+ struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
+};
+
+/* Generic X based nodes used as a base for rb_tree ops */
+struct tag_node {
+ struct rb_node node;
+ tag_t tag;
+};
+
+struct tag_stat {
+ struct tag_node tn;
+ struct data_counters counters;
+ /*
+ * If this tag is acct_tag based, we need to count against the
+ * matching parent uid_tag.
+ */
+ struct data_counters *parent_counters;
+};
+
+struct iface_stat {
+ struct list_head list; /* in iface_stat_list */
+ char *ifname;
+ bool active;
+ /* net_dev is only valid for active iface_stat */
+ struct net_device *net_dev;
+
+ struct byte_packet_counters totals[IFS_MAX_DIRECTIONS];
+ /*
+ * We keep the last_known, because some devices reset their counters
+ * just before NETDEV_UP, while some will reset just before
+ * NETDEV_REGISTER (which is more normal).
+ * So now, if the device didn't do a NETDEV_UNREGISTER and we see
+ * its current dev stats smaller that what was previously known, we
+ * assume an UNREGISTER and just use the last_known.
+ */
+ struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
+ /* last_known is usable when last_known_valid is true */
+ bool last_known_valid;
+
+ struct proc_dir_entry *proc_ptr;
+
+ struct rb_root tag_stat_tree;
+ spinlock_t tag_stat_list_lock;
+};
+
+/* This is needed to create proc_dir_entries from atomic context. */
+struct iface_stat_work {
+ struct work_struct iface_work;
+ struct iface_stat *iface_entry;
+};
+
+/*
+ * Track tag that this socket is transferring data for, and not necessarily
+ * the uid that owns the socket.
+ * This is the tag against which tag_stat.counters will be billed.
+ * These structs need to be looked up by sock and pid.
+ */
+struct sock_tag {
+ struct rb_node sock_node;
+ struct sock *sk; /* Only used as a number, never dereferenced */
+ /* The socket is needed for sockfd_put() */
+ struct socket *socket;
+ /* Used to associate with a given pid */
+ struct list_head list; /* in proc_qtu_data.sock_tag_list */
+ pid_t pid;
+
+ tag_t tag;
+};
+
+struct qtaguid_event_counts {
+ /* Various successful events */
+ atomic64_t sockets_tagged;
+ atomic64_t sockets_untagged;
+ atomic64_t counter_set_changes;
+ atomic64_t delete_cmds;
+ atomic64_t iface_events; /* Number of NETDEV_* events handled */
+
+ atomic64_t match_calls; /* Number of times iptables called mt */
+ /*
+ * match_found_sk_*: numbers related to the netfilter matching
+ * function finding a sock for the sk_buff.
+ * Total skbs processed is sum(match_found*).
+ */
+ atomic64_t match_found_sk; /* An sk was already in the sk_buff. */
+ /* The connection tracker had or didn't have the sk. */
+ atomic64_t match_found_sk_in_ct;
+ atomic64_t match_found_no_sk_in_ct;
+ /*
+ * No sk could be found. No apparent owner. Could happen with
+ * unsolicited traffic.
+ */
+ atomic64_t match_no_sk;
+ /*
+ * The file ptr in the sk_socket wasn't there.
+ * This might happen for traffic while the socket is being closed.
+ */
+ atomic64_t match_no_sk_file;
+};
+
+/* Track the set active_set for the given tag. */
+struct tag_counter_set {
+ struct tag_node tn;
+ int active_set;
+};
+
+/*----------------------------------------------*/
+/*
+ * The qtu uid data is used to track resources that are created directly or
+ * indirectly by processes (uid tracked).
+ * It is shared by the processes with the same uid.
+ * Some of the resource will be counted to prevent further rogue allocations,
+ * some will need freeing once the owner process (uid) exits.
+ */
+struct uid_tag_data {
+ struct rb_node node;
+ uid_t uid;
+
+ /*
+ * For the uid, how many accounting tags have been set.
+ */
+ int num_active_tags;
+ /* Track the number of proc_qtu_data that reference it */
+ int num_pqd;
+ struct rb_root tag_ref_tree;
+ /* No tag_node_tree_lock; use uid_tag_data_tree_lock */
+};
+
+struct tag_ref {
+ struct tag_node tn;
+
+ /*
+ * This tracks the number of active sockets that have a tag on them
+ * which matches this tag_ref.tn.tag.
+ * A tag ref can live on after the sockets are untagged.
+ * A tag ref can only be removed during a tag delete command.
+ */
+ int num_sock_tags;
+};
+
+struct proc_qtu_data {
+ struct rb_node node;
+ pid_t pid;
+
+ struct uid_tag_data *parent_tag_data;
+
+ /* Tracks the sock_tags that need freeing upon this proc's death */
+ struct list_head sock_tag_list;
+ /* No spinlock_t sock_tag_list_lock; use the global one. */
+};
+
+/*----------------------------------------------*/
+#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644
index 000000000000..39176785c91f
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -0,0 +1,556 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Most of the functions in this file just waste time if DEBUG is not defined.
+ * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
+ * debug flags ore not defined.
+ * Those funcs that fail to allocate memory will panic as there is no need to
+ * hobble allong just pretending to do the requested work.
+ */
+
+#define DEBUG
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/net.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+
+
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+#ifdef DDEBUG
+
+static void _bug_on_err_or_null(void *ptr)
+{
+ if (IS_ERR_OR_NULL(ptr)) {
+ pr_err("qtaguid: kmalloc failed\n");
+ BUG();
+ }
+}
+
+char *pp_tag_t(tag_t *tag)
+{
+ char *res;
+
+ if (!tag)
+ res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC,
+ "tag_t@%p{tag=0x%llx, uid=%u}",
+ tag, *tag, get_uid_from_tag(*tag));
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+ char *res;
+
+ if (!dc)
+ res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
+ else if (showValues)
+ res = kasprintf(
+ GFP_ATOMIC, "data_counters@%p{"
+ "set0{"
+ "rx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}, "
+ "tx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}}, "
+ "set1{"
+ "rx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}, "
+ "tx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}}}",
+ dc,
+ dc->bpc[0][IFS_RX][IFS_TCP].bytes,
+ dc->bpc[0][IFS_RX][IFS_TCP].packets,
+ dc->bpc[0][IFS_RX][IFS_UDP].bytes,
+ dc->bpc[0][IFS_RX][IFS_UDP].packets,
+ dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
+ dc->bpc[0][IFS_TX][IFS_TCP].bytes,
+ dc->bpc[0][IFS_TX][IFS_TCP].packets,
+ dc->bpc[0][IFS_TX][IFS_UDP].bytes,
+ dc->bpc[0][IFS_TX][IFS_UDP].packets,
+ dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
+ dc->bpc[1][IFS_RX][IFS_TCP].bytes,
+ dc->bpc[1][IFS_RX][IFS_TCP].packets,
+ dc->bpc[1][IFS_RX][IFS_UDP].bytes,
+ dc->bpc[1][IFS_RX][IFS_UDP].packets,
+ dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
+ dc->bpc[1][IFS_TX][IFS_TCP].bytes,
+ dc->bpc[1][IFS_TX][IFS_TCP].packets,
+ dc->bpc[1][IFS_TX][IFS_UDP].bytes,
+ dc->bpc[1][IFS_TX][IFS_UDP].packets,
+ dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
+ else
+ res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_tag_node(struct tag_node *tn)
+{
+ char *tag_str;
+ char *res;
+
+ if (!tn) {
+ res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tag_str = pp_tag_t(&tn->tag);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_node@%p{tag=%s}",
+ tn, tag_str);
+ _bug_on_err_or_null(res);
+ kfree(tag_str);
+ return res;
+}
+
+char *pp_tag_ref(struct tag_ref *tr)
+{
+ char *tn_str;
+ char *res;
+
+ if (!tr) {
+ res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tn_str = pp_tag_node(&tr->tn);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_ref@%p{%s, num_sock_tags=%d}",
+ tr, tn_str, tr->num_sock_tags);
+ _bug_on_err_or_null(res);
+ kfree(tn_str);
+ return res;
+}
+
+char *pp_tag_stat(struct tag_stat *ts)
+{
+ char *tn_str;
+ char *counters_str;
+ char *parent_counters_str;
+ char *res;
+
+ if (!ts) {
+ res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tn_str = pp_tag_node(&ts->tn);
+ counters_str = pp_data_counters(&ts->counters, true);
+ parent_counters_str = pp_data_counters(ts->parent_counters, false);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
+ ts, tn_str, counters_str, parent_counters_str);
+ _bug_on_err_or_null(res);
+ kfree(tn_str);
+ kfree(counters_str);
+ kfree(parent_counters_str);
+ return res;
+}
+
+char *pp_iface_stat(struct iface_stat *is)
+{
+ char *res;
+ if (!is)
+ res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
+ "list=list_head{...}, "
+ "ifname=%s, "
+ "total={rx={bytes=%llu, "
+ "packets=%llu}, "
+ "tx={bytes=%llu, "
+ "packets=%llu}}, "
+ "last_known_valid=%d, "
+ "last_known={rx={bytes=%llu, "
+ "packets=%llu}, "
+ "tx={bytes=%llu, "
+ "packets=%llu}}, "
+ "active=%d, "
+ "net_dev=%p, "
+ "proc_ptr=%p, "
+ "tag_stat_tree=rb_root{...}}",
+ is,
+ is->ifname,
+ is->totals[IFS_RX].bytes,
+ is->totals[IFS_RX].packets,
+ is->totals[IFS_TX].bytes,
+ is->totals[IFS_TX].packets,
+ is->last_known_valid,
+ is->last_known[IFS_RX].bytes,
+ is->last_known[IFS_RX].packets,
+ is->last_known[IFS_TX].bytes,
+ is->last_known[IFS_TX].packets,
+ is->active,
+ is->net_dev,
+ is->proc_ptr);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_sock_tag(struct sock_tag *st)
+{
+ char *tag_str;
+ char *res;
+
+ if (!st) {
+ res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tag_str = pp_tag_t(&st->tag);
+ res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
+ "sock_node=rb_node{...}, "
+ "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+ "pid=%u, tag=%s}",
+ st, st->sk, st->socket, atomic_long_read(
+ &st->socket->file->f_count),
+ st->pid, tag_str);
+ _bug_on_err_or_null(res);
+ kfree(tag_str);
+ return res;
+}
+
+char *pp_uid_tag_data(struct uid_tag_data *utd)
+{
+ char *res;
+
+ if (!utd)
+ res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
+ "uid=%u, num_active_acct_tags=%d, "
+ "num_pqd=%d, "
+ "tag_node_tree=rb_root{...}, "
+ "proc_qtu_data_tree=rb_root{...}}",
+ utd, utd->uid,
+ utd->num_active_tags, utd->num_pqd);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+ char *parent_tag_data_str;
+ char *res;
+
+ if (!pqd) {
+ res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
+ res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
+ "node=rb_node{...}, pid=%u, "
+ "parent_tag_data=%s, "
+ "sock_tag_list=list_head{...}}",
+ pqd, pqd->pid, parent_tag_data_str
+ );
+ _bug_on_err_or_null(res);
+ kfree(parent_tag_data_str);
+ return res;
+}
+
+/*------------------------------------------*/
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree)
+{
+ struct rb_node *node;
+ struct sock_tag *sock_tag_entry;
+ char *str;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(sock_tag_tree)) {
+ str = "sock_tag_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "sock_tag_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(sock_tag_tree);
+ node;
+ node = rb_next(node)) {
+ sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+ str = pp_sock_tag(sock_tag_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list)
+{
+ struct sock_tag *sock_tag_entry;
+ char *str;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (list_empty(sock_tag_list)) {
+ str = "sock_tag_list=list_head{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "sock_tag_list=list_head{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
+ str = pp_sock_tag(sock_tag_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct proc_qtu_data *proc_qtu_data_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
+ str = "proc_qtu_data_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "proc_qtu_data_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(proc_qtu_data_tree);
+ node;
+ node = rb_next(node)) {
+ proc_qtu_data_entry = rb_entry(node,
+ struct proc_qtu_data,
+ node);
+ str = pp_proc_qtu_data(proc_qtu_data_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ indent_level++;
+ prdebug_sock_tag_list(indent_level,
+ &proc_qtu_data_entry->sock_tag_list);
+ indent_level--;
+
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct tag_ref *tag_ref_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(tag_ref_tree)) {
+ str = "tag_ref_tree{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "tag_ref_tree{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(tag_ref_tree);
+ node;
+ node = rb_next(node)) {
+ tag_ref_entry = rb_entry(node,
+ struct tag_ref,
+ tn.node);
+ str = pp_tag_ref(tag_ref_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct uid_tag_data *uid_tag_data_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
+ str = "uid_tag_data_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "uid_tag_data_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(uid_tag_data_tree);
+ node;
+ node = rb_next(node)) {
+ uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
+ node);
+ str = pp_uid_tag_data(uid_tag_data_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
+ indent_level++;
+ prdebug_tag_ref_tree(indent_level,
+ &uid_tag_data_entry->tag_ref_tree);
+ indent_level--;
+ }
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct tag_stat *ts_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(tag_stat_tree)) {
+ str = "tag_stat_tree{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "tag_stat_tree{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(tag_stat_tree);
+ node;
+ node = rb_next(node)) {
+ ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ str = pp_tag_stat(ts_entry);
+ pr_debug("%*d: %s\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list)
+{
+ char *str;
+ struct iface_stat *iface_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (list_empty(iface_stat_list)) {
+ str = "iface_stat_list=list_head{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "iface_stat_list=list_head{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ list_for_each_entry(iface_entry, iface_stat_list, list) {
+ str = pp_iface_stat(iface_entry);
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ kfree(str);
+
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+ if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
+ indent_level++;
+ prdebug_tag_stat_tree(indent_level,
+ &iface_entry->tag_stat_tree);
+ indent_level--;
+ }
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+#endif /* ifdef DDEBUG */
+/*------------------------------------------*/
+static const char * const netdev_event_strings[] = {
+ "netdev_unknown",
+ "NETDEV_UP",
+ "NETDEV_DOWN",
+ "NETDEV_REBOOT",
+ "NETDEV_CHANGE",
+ "NETDEV_REGISTER",
+ "NETDEV_UNREGISTER",
+ "NETDEV_CHANGEMTU",
+ "NETDEV_CHANGEADDR",
+ "NETDEV_GOING_DOWN",
+ "NETDEV_CHANGENAME",
+ "NETDEV_FEAT_CHANGE",
+ "NETDEV_BONDING_FAILOVER",
+ "NETDEV_PRE_UP",
+ "NETDEV_PRE_TYPE_CHANGE",
+ "NETDEV_POST_TYPE_CHANGE",
+ "NETDEV_POST_INIT",
+ "NETDEV_UNREGISTER_BATCH",
+ "NETDEV_RELEASE",
+ "NETDEV_NOTIFY_PEERS",
+ "NETDEV_JOIN",
+};
+
+const char *netdev_evt_str(int netdev_event)
+{
+ if (netdev_event < 0
+ || netdev_event >= ARRAY_SIZE(netdev_event_strings))
+ return "bad event num";
+ return netdev_event_strings[netdev_event];
+}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644
index 000000000000..b63871a0be5a
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.h
@@ -0,0 +1,120 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_PRINT_H__
+#define __XT_QTAGUID_PRINT_H__
+
+#include "xt_qtaguid_internal.h"
+
+#ifdef DDEBUG
+
+char *pp_tag_t(tag_t *tag);
+char *pp_data_counters(struct data_counters *dc, bool showValues);
+char *pp_tag_node(struct tag_node *tn);
+char *pp_tag_ref(struct tag_ref *tr);
+char *pp_tag_stat(struct tag_stat *ts);
+char *pp_iface_stat(struct iface_stat *is);
+char *pp_sock_tag(struct sock_tag *st);
+char *pp_uid_tag_data(struct uid_tag_data *qtd);
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
+
+/*------------------------------------------*/
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list);
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree);
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree);
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree);
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree);
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list);
+
+#else
+
+/*------------------------------------------*/
+static inline char *pp_tag_t(tag_t *tag)
+{
+ return NULL;
+}
+static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+ return NULL;
+}
+static inline char *pp_tag_node(struct tag_node *tn)
+{
+ return NULL;
+}
+static inline char *pp_tag_ref(struct tag_ref *tr)
+{
+ return NULL;
+}
+static inline char *pp_tag_stat(struct tag_stat *ts)
+{
+ return NULL;
+}
+static inline char *pp_iface_stat(struct iface_stat *is)
+{
+ return NULL;
+}
+static inline char *pp_sock_tag(struct sock_tag *st)
+{
+ return NULL;
+}
+static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
+{
+ return NULL;
+}
+static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+ return NULL;
+}
+
+/*------------------------------------------*/
+static inline
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list)
+{
+}
+static inline
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree)
+{
+}
+static inline
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree)
+{
+}
+static inline
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+}
+static inline
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree)
+{
+}
+static inline
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree)
+{
+}
+static inline
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list)
+{
+}
+#endif
+/*------------------------------------------*/
+const char *netdev_evt_str(int netdev_event);
+#endif /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 000000000000..3c72bea2dd69
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,381 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * netfilter module to enforce network quotas
+ * Sam Johnston <samj@samj.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either
+ * version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+#include <linux/netfilter_ipv4/ipt_ULOG.h>
+#endif
+
+/**
+ * @lock: lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+ u_int64_t quota;
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t ref;
+ char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+ struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+ "Event number for NETLINK_NFLOG message. 0 disables log."
+ "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static unsigned int quota_list_uid = 0;
+static unsigned int quota_list_gid = 0;
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR);
+module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
+
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const char *prefix)
+{
+ ulog_packet_msg_t *pm;
+ struct sk_buff *log_skb;
+ size_t size;
+ struct nlmsghdr *nlh;
+
+ if (!qlog_nl_event)
+ return;
+
+ size = NLMSG_SPACE(sizeof(*pm));
+ size = max(size, (size_t)NLMSG_GOODSIZE);
+ log_skb = alloc_skb(size, GFP_ATOMIC);
+ if (!log_skb) {
+ pr_err("xt_quota2: cannot alloc skb for logging\n");
+ return;
+ }
+
+ /* NLMSG_PUT() uses "goto nlmsg_failure" */
+ nlh = NLMSG_PUT(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+ sizeof(*pm));
+ pm = NLMSG_DATA(nlh);
+ if (skb->tstamp.tv64 == 0)
+ __net_timestamp((struct sk_buff *)skb);
+ pm->data_len = 0;
+ pm->hook = hooknum;
+ if (prefix != NULL)
+ strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+ else
+ *(pm->prefix) = '\0';
+ if (in)
+ strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+ else
+ pm->indev_name[0] = '\0';
+
+ if (out)
+ strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+ else
+ pm->outdev_name[0] = '\0';
+
+ NETLINK_CB(log_skb).dst_group = 1;
+ pr_debug("throwing 1 packets to netlink group 1\n");
+ netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+
+nlmsg_failure: /* Used within NLMSG_PUT() */
+ pr_debug("xt_quota2: error during NLMSG_PUT\n");
+}
+#else
+static void quota2_log(unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const char *prefix)
+{
+}
+#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static int quota_proc_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ struct xt_quota_counter *e = data;
+ int ret;
+
+ spin_lock_bh(&e->lock);
+ ret = snprintf(page, PAGE_SIZE, "%llu\n", e->quota);
+ spin_unlock_bh(&e->lock);
+ return ret;
+}
+
+static int quota_proc_write(struct file *file, const char __user *input,
+ unsigned long size, void *data)
+{
+ struct xt_quota_counter *e = data;
+ char buf[sizeof("18446744073709551616")];
+
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+ if (copy_from_user(buf, input, size) != 0)
+ return -EFAULT;
+ buf[sizeof(buf)-1] = '\0';
+
+ spin_lock_bh(&e->lock);
+ e->quota = simple_strtoull(buf, NULL, 0);
+ spin_unlock_bh(&e->lock);
+ return size;
+}
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+ struct xt_quota_counter *e;
+ unsigned int size;
+
+ /* Do not need all the procfs things for anonymous counters. */
+ size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+ e = kmalloc(size, GFP_KERNEL);
+ if (e == NULL)
+ return NULL;
+
+ e->quota = q->quota;
+ spin_lock_init(&e->lock);
+ if (!anon) {
+ INIT_LIST_HEAD(&e->list);
+ atomic_set(&e->ref, 1);
+ strlcpy(e->name, q->name, sizeof(e->name));
+ }
+ return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name: name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+ struct proc_dir_entry *p;
+ struct xt_quota_counter *e = NULL;
+ struct xt_quota_counter *new_e;
+
+ if (*q->name == '\0')
+ return q2_new_counter(q, true);
+
+ /* No need to hold a lock while getting a new counter */
+ new_e = q2_new_counter(q, false);
+ if (new_e == NULL)
+ goto out;
+
+ spin_lock_bh(&counter_list_lock);
+ list_for_each_entry(e, &counter_list, list)
+ if (strcmp(e->name, q->name) == 0) {
+ atomic_inc(&e->ref);
+ spin_unlock_bh(&counter_list_lock);
+ kfree(new_e);
+ pr_debug("xt_quota2: old counter name=%s", e->name);
+ return e;
+ }
+ e = new_e;
+ pr_debug("xt_quota2: new_counter name=%s", e->name);
+ list_add_tail(&e->list, &counter_list);
+ /* The entry having a refcount of 1 is not directly destructible.
+ * This func has not yet returned the new entry, thus iptables
+ * has not references for destroying this entry.
+ * For another rule to try to destroy it, it would 1st need for this
+ * func* to be re-invoked, acquire a new ref for the same named quota.
+ * Nobody will access the e->procfs_entry either.
+ * So release the lock. */
+ spin_unlock_bh(&counter_list_lock);
+
+ /* create_proc_entry() is not spin_lock happy */
+ p = e->procfs_entry = create_proc_entry(e->name, quota_list_perms,
+ proc_xt_quota);
+
+ if (IS_ERR_OR_NULL(p)) {
+ spin_lock_bh(&counter_list_lock);
+ list_del(&e->list);
+ spin_unlock_bh(&counter_list_lock);
+ goto out;
+ }
+ p->data = e;
+ p->read_proc = quota_proc_read;
+ p->write_proc = quota_proc_write;
+ p->uid = quota_list_uid;
+ p->gid = quota_list_gid;
+ return e;
+
+ out:
+ kfree(e);
+ return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+ struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+ pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+ if (q->flags & ~XT_QUOTA_MASK)
+ return -EINVAL;
+
+ q->name[sizeof(q->name)-1] = '\0';
+ if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+ printk(KERN_ERR "xt_quota.3: illegal name\n");
+ return -EINVAL;
+ }
+
+ q->master = q2_get_counter(q);
+ if (q->master == NULL) {
+ printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_quota_mtinfo2 *q = par->matchinfo;
+ struct xt_quota_counter *e = q->master;
+
+ if (*q->name == '\0') {
+ kfree(e);
+ return;
+ }
+
+ spin_lock_bh(&counter_list_lock);
+ if (!atomic_dec_and_test(&e->ref)) {
+ spin_unlock_bh(&counter_list_lock);
+ return;
+ }
+
+ list_del(&e->list);
+ remove_proc_entry(e->name, proc_xt_quota);
+ spin_unlock_bh(&counter_list_lock);
+ kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+ struct xt_quota_counter *e = q->master;
+ bool ret = q->flags & XT_QUOTA_INVERT;
+
+ spin_lock_bh(&e->lock);
+ if (q->flags & XT_QUOTA_GROW) {
+ /*
+ * While no_change is pointless in "grow" mode, we will
+ * implement it here simply to have a consistent behavior.
+ */
+ if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
+ e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+ }
+ ret = true;
+ } else {
+ if (e->quota >= skb->len) {
+ if (!(q->flags & XT_QUOTA_NO_CHANGE))
+ e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+ ret = !ret;
+ } else {
+ /* We are transitioning, log that fact. */
+ if (e->quota) {
+ quota2_log(par->hooknum,
+ skb,
+ par->in,
+ par->out,
+ q->name);
+ }
+ /* we do not allow even small packets from now on */
+ e->quota = 0;
+ }
+ }
+ spin_unlock_bh(&e->lock);
+ return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+ {
+ .name = "quota2",
+ .revision = 3,
+ .family = NFPROTO_IPV4,
+ .checkentry = quota_mt2_check,
+ .match = quota_mt2,
+ .destroy = quota_mt2_destroy,
+ .matchsize = sizeof(struct xt_quota_mtinfo2),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "quota2",
+ .revision = 3,
+ .family = NFPROTO_IPV6,
+ .checkentry = quota_mt2_check,
+ .match = quota_mt2,
+ .destroy = quota_mt2_destroy,
+ .matchsize = sizeof(struct xt_quota_mtinfo2),
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init quota_mt2_init(void)
+{
+ int ret;
+ pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+ nflognl = netlink_kernel_create(&init_net,
+ NETLINK_NFLOG, 1, NULL,
+ NULL, THIS_MODULE);
+ if (!nflognl)
+ return -ENOMEM;
+#endif
+
+ proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+ if (proc_xt_quota == NULL)
+ return -EACCES;
+
+ ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+ if (ret < 0)
+ remove_proc_entry("xt_quota", init_net.proc_net);
+ pr_debug("xt_quota2: init() %d", ret);
+ return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+ xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+ remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index fe39f7e913df..ddf5e0507f5f 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,7 +35,7 @@
#include <net/netfilter/nf_conntrack.h>
#endif
-static void
+void
xt_socket_put_sk(struct sock *sk)
{
if (sk->sk_state == TCP_TIME_WAIT)
@@ -43,6 +43,7 @@ xt_socket_put_sk(struct sock *sk)
else
sock_put(sk);
}
+EXPORT_SYMBOL(xt_socket_put_sk);
static int
extract_icmp4_fields(const struct sk_buff *skb,
@@ -101,9 +102,8 @@ extract_icmp4_fields(const struct sk_buff *skb,
return 0;
}
-static bool
-socket_match(const struct sk_buff *skb, struct xt_action_param *par,
- const struct xt_socket_mtinfo1 *info)
+struct sock*
+xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct iphdr *iph = ip_hdr(skb);
struct udphdr _hdr, *hp = NULL;
@@ -120,7 +120,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
hp = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(_hdr), &_hdr);
if (hp == NULL)
- return false;
+ return NULL;
protocol = iph->protocol;
saddr = iph->saddr;
@@ -131,9 +131,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
} else if (iph->protocol == IPPROTO_ICMP) {
if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
&sport, &dport))
- return false;
+ return NULL;
} else {
- return false;
+ return NULL;
}
#ifdef XT_SOCKET_HAVE_CONNTRACK
@@ -157,6 +157,23 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+
+ pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
+ protocol, &saddr, ntohs(sport),
+ &daddr, ntohs(dport),
+ &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+
+ return sk;
+}
+EXPORT_SYMBOL(xt_socket_get4_sk);
+
+static bool
+socket_match(const struct sk_buff *skb, struct xt_action_param *par,
+ const struct xt_socket_mtinfo1 *info)
+{
+ struct sock *sk;
+
+ sk = xt_socket_get4_sk(skb, par);
if (sk != NULL) {
bool wildcard;
bool transparent = true;
@@ -179,11 +196,6 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
sk = NULL;
}
- pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
- protocol, &saddr, ntohs(sport),
- &daddr, ntohs(dport),
- &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
return (sk != NULL);
}
@@ -253,8 +265,8 @@ extract_icmp6_fields(const struct sk_buff *skb,
return 0;
}
-static bool
-socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+struct sock*
+xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ipv6hdr *iph = ipv6_hdr(skb);
struct udphdr _hdr, *hp = NULL;
@@ -262,7 +274,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
struct in6_addr *daddr, *saddr;
__be16 dport, sport;
int thoff, tproto;
- const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
if (tproto < 0) {
@@ -274,7 +285,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
hp = skb_header_pointer(skb, thoff,
sizeof(_hdr), &_hdr);
if (hp == NULL)
- return false;
+ return NULL;
saddr = &iph->saddr;
sport = hp->source;
@@ -284,13 +295,30 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
} else if (tproto == IPPROTO_ICMPV6) {
if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
&sport, &dport))
- return false;
+ return NULL;
} else {
- return false;
+ return NULL;
}
sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+ pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
+ "(orig %pI6:%hu) sock %p\n",
+ tproto, saddr, ntohs(sport),
+ daddr, ntohs(dport),
+ &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+ return sk;
+}
+EXPORT_SYMBOL(xt_socket_get6_sk);
+
+static bool
+socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct sock *sk;
+ const struct xt_socket_mtinfo1 *info;
+
+ info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+ sk = xt_socket_get6_sk(skb, par);
if (sk != NULL) {
bool wildcard;
bool transparent = true;
@@ -313,12 +341,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
sk = NULL;
}
- pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
- "(orig %pI6:%hu) sock %p\n",
- tproto, saddr, ntohs(sport),
- daddr, ntohs(dport),
- &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
return (sk != NULL);
}
#endif
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 78efe895b663..8e12c8a2b82b 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,6 +10,11 @@ menuconfig RFKILL
To compile this driver as a module, choose M here: the
module will be called rfkill.
+config RFKILL_PM
+ bool "Power off on suspend"
+ depends on RFKILL && PM
+ default y
+
# LED trigger support
config RFKILL_LEDS
bool
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index be90640a2774..df2dae6b2723 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -769,6 +769,7 @@ void rfkill_pause_polling(struct rfkill *rfkill)
}
EXPORT_SYMBOL(rfkill_pause_polling);
+#ifdef CONFIG_RFKILL_PM
void rfkill_resume_polling(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
@@ -803,14 +804,17 @@ static int rfkill_resume(struct device *dev)
return 0;
}
+#endif
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
.dev_attrs = rfkill_dev_attrs,
.dev_uevent = rfkill_dev_uevent,
+#ifdef CONFIG_RFKILL_PM
.suspend = rfkill_suspend,
.resume = rfkill_resume,
+#endif
};
bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 1f1ef70f34f2..8e2a668c9230 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -159,3 +159,14 @@ config LIB80211_DEBUG
from lib80211.
If unsure, say N.
+
+config CFG80211_ALLOW_RECONNECT
+ bool "Allow reconnect while already connected"
+ depends on CFG80211
+ default n
+ help
+ cfg80211 stack doesn't allow to connect if you are already
+ connected. This option allows to make a connection in this case.
+
+ Select this option ONLY for wlan drivers that are specifically
+ built for such purposes.
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2936cb809152..1c100d331e11 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -17,7 +17,7 @@
#include "nl80211.h"
#include "wext-compat.h"
-#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE (3 * HZ)
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
{
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dec0fa28372e..c0fe1a8a6ee1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -661,8 +661,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
return;
+#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
if (wdev->sme_state != CFG80211_SME_CONNECTED)
return;
+#endif
if (wdev->current_bss) {
cfg80211_unhold_bss(wdev->current_bss);
@@ -760,10 +762,14 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
+#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
if (wdev->sme_state != CFG80211_SME_IDLE)
return -EALREADY;
if (WARN_ON(wdev->connect_keys)) {
+#else
+ if (wdev->connect_keys) {
+#endif
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
}
diff --git a/security/Kconfig b/security/Kconfig
index e0f08b52e4ab..85923b649d17 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -185,6 +185,7 @@ source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
source security/apparmor/Kconfig
+source security/tf_driver/Kconfig
source security/integrity/ima/Kconfig
diff --git a/security/Makefile b/security/Makefile
index 8bb0fe9e1ca9..9962092f0655 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -7,6 +7,7 @@ subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_SECURITY_SMACK) += smack
subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
+subdir-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver
# always enable default capabilities
obj-y += commoncap.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_AUDIT) += lsm_audit.o
obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver/built-in.o
# Object integrity file lists
subdir-$(CONFIG_IMA) += integrity/ima
diff --git a/security/commoncap.c b/security/commoncap.c
index a93b3b733079..e508e2b170a8 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -29,6 +29,10 @@
#include <linux/securebits.h>
#include <linux/user_namespace.h>
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
/*
* If a non-root user executes a setuid-root binary in
* !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -83,6 +87,12 @@ EXPORT_SYMBOL(cap_netlink_recv);
int cap_capable(struct task_struct *tsk, const struct cred *cred,
struct user_namespace *targ_ns, int cap, int audit)
{
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+ if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
+ return 0;
+ if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
+ return 0;
+#endif
for (;;) {
/* The creator of the user namespace has all caps. */
if (targ_ns != &init_user_ns && targ_ns->creator == cred->user)
diff --git a/security/tf_driver/Kconfig b/security/tf_driver/Kconfig
new file mode 100644
index 000000000000..2a980c5ade43
--- /dev/null
+++ b/security/tf_driver/Kconfig
@@ -0,0 +1,8 @@
+config TRUSTED_FOUNDATIONS
+ bool "Enable TF Driver"
+ default n
+ select CRYPTO_SHA1
+ help
+ This option adds kernel support for communication with the Trusted Foundations.
+ If you are unsure how to answer this question, answer N.
+
diff --git a/security/tf_driver/Makefile b/security/tf_driver/Makefile
new file mode 100644
index 000000000000..dfadb7d97406
--- /dev/null
+++ b/security/tf_driver/Makefile
@@ -0,0 +1,36 @@
+#
+# Copyright (c) 2006-2010 Trusted Logic S.A.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+# debug options
+#EXTRA_CFLAGS += -O0 -DDEBUG -D_DEBUG -DCONFIG_TF_DRIVER_DEBUG_SUPPORT
+EXTRA_CFLAGS += -DNDEBUG
+EXTRA_CFLAGS += -DLINUX -DCONFIG_TF_TRUSTZONE -DCONFIG_TFN
+
+ifdef S_VERSION_BUILD
+EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
+endif
+
+tf_driver-objs += tf_util.o
+tf_driver-objs += tf_conn.o
+tf_driver-objs += tf_device.o
+tf_driver-objs += tf_comm.o
+tf_driver-objs += tf_comm_tz.o
+
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver.o
diff --git a/security/tf_driver/s_version.h b/security/tf_driver/s_version.h
new file mode 100644
index 000000000000..6244d3fe7e8d
--- /dev/null
+++ b/security/tf_driver/s_version.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __S_VERSION_H__
+#define __S_VERSION_H__
+
+/*
+ * Usage: define S_VERSION_BUILD on the compiler's command line.
+ *
+ * Then set:
+ * - S_VERSION_OS
+ * - S_VERSION_PLATFORM
+ * - S_VERSION_MAIN
+ * - S_VERSION_ENG is optional
+ * - S_VERSION_PATCH is optional
+ * - S_VERSION_BUILD = 0 if S_VERSION_BUILD not defined or empty
+ */
+
+#define S_VERSION_OS "A" /* "A" for all Android */
+#define S_VERSION_PLATFORM "B" /* "B" for Tegra3 */
+
+/*
+ * This version number must be updated for each new release
+ */
+#define S_VERSION_MAIN "01.03"
+
+/*
+* If this is a patch or engineering version use the following
+* defines to set the version number. Else set these values to 0.
+*/
+#define S_VERSION_ENG 0
+#define S_VERSION_PATCH 0
+
+#ifdef S_VERSION_BUILD
+/* TRICK: detect if S_VERSION is defined but empty */
+#if 0 == S_VERSION_BUILD-0
+#undef S_VERSION_BUILD
+#define S_VERSION_BUILD 0
+#endif
+#else
+/* S_VERSION_BUILD is not defined */
+#define S_VERSION_BUILD 0
+#endif
+
+#define __STRINGIFY(X) #X
+#define __STRINGIFY2(X) __STRINGIFY(X)
+
+#if S_VERSION_ENG != 0
+#define _S_VERSION_ENG "e" __STRINGIFY2(S_VERSION_ENG)
+#else
+#define _S_VERSION_ENG ""
+#endif
+
+#if S_VERSION_PATCH != 0
+#define _S_VERSION_PATCH "p" __STRINGIFY2(S_VERSION_PATCH)
+#else
+#define _S_VERSION_PATCH ""
+#endif
+
+#if !defined(NDEBUG) || defined(_DEBUG)
+#define S_VERSION_VARIANT "D "
+#else
+#define S_VERSION_VARIANT " "
+#endif
+
+#define S_VERSION_STRING \
+ "TFN" \
+ S_VERSION_OS \
+ S_VERSION_PLATFORM \
+ S_VERSION_MAIN \
+ _S_VERSION_ENG \
+ _S_VERSION_PATCH \
+ "." __STRINGIFY2(S_VERSION_BUILD) " " \
+ S_VERSION_VARIANT
+
+#endif /* __S_VERSION_H__ */
diff --git a/security/tf_driver/tf_comm.c b/security/tf_driver/tf_comm.c
new file mode 100644
index 000000000000..8b12f293eabf
--- /dev/null
+++ b/security/tf_driver/tf_comm.c
@@ -0,0 +1,1745 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+#include <linux/freezer.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_zebra.h"
+#endif
+
+/*---------------------------------------------------------------------------
+ * Internal Constants
+ *---------------------------------------------------------------------------*/
+
+/*
+ * shared memories descriptor constants
+ */
+#define DESCRIPTOR_B_MASK (1 << 2)
+#define DESCRIPTOR_C_MASK (1 << 3)
+#define DESCRIPTOR_S_MASK (1 << 10)
+
+#define L1_COARSE_DESCRIPTOR_BASE (0x00000001)
+#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00)
+#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
+
+#define L2_PAGE_DESCRIPTOR_BASE (0x00000003)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+/*
+ * Reject an attempt to share a strongly-Ordered or Device memory
+ * Strongly-Ordered: TEX=0b000, C=0, B=0
+ * Shared Device: TEX=0b000, C=0, B=1
+ * Non-Shared Device: TEX=0b010, C=0, B=0
+ */
+#define L2_TEX_C_B_MASK \
+ ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
+#define L2_TEX_C_B_STRONGLY_ORDERED \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
+#define L2_TEX_C_B_SHARED_DEVICE \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
+#define L2_TEX_C_B_NON_SHARED_DEVICE \
+ ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
+
+#define CACHE_S(x) ((x) & (1 << 24))
+#define CACHE_DSIZE(x) (((x) >> 12) & 4095)
+
+#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
+#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL)
+
+/*---------------------------------------------------------------------------
+ * atomic operation definitions
+ *---------------------------------------------------------------------------*/
+
+/*
+ * Atomically updates the sync_serial_n and time_n register
+ * sync_serial_n and time_n modifications are thread safe
+ */
+void tf_set_current_time(struct tf_comm *comm)
+{
+ u32 new_sync_serial;
+ struct timeval now;
+ u64 time64;
+
+ /*
+ * lock the structure while updating the L1 shared memory fields
+ */
+ spin_lock(&comm->lock);
+
+ /* read sync_serial_n and change the TimeSlot bit field */
+ new_sync_serial =
+ tf_read_reg32(&comm->l1_buffer->sync_serial_n) + 1;
+
+ do_gettimeofday(&now);
+ time64 = now.tv_sec;
+ time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+ /* Write the new time64 and nSyncSerial into shared memory */
+ tf_write_reg64(&comm->l1_buffer->time_n[new_sync_serial &
+ TF_SYNC_SERIAL_TIMESLOT_N], time64);
+ tf_write_reg32(&comm->l1_buffer->sync_serial_n,
+ new_sync_serial);
+
+ spin_unlock(&comm->lock);
+}
+
+/*
+ * Performs the specific read timeout operation
+ * The difficulty here is to read atomically 2 u32
+ * values from the L1 shared buffer.
+ * This is guaranteed by reading before and after the operation
+ * the timeslot given by the Secure World
+ */
+static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
+{
+ u32 sync_serial_s_initial = 0;
+ u32 sync_serial_s_final = 1;
+ u64 time64;
+
+ spin_lock(&comm->lock);
+
+ while (sync_serial_s_initial != sync_serial_s_final) {
+ sync_serial_s_initial = tf_read_reg32(
+ &comm->l1_buffer->sync_serial_s);
+ time64 = tf_read_reg64(
+ &comm->l1_buffer->timeout_s[sync_serial_s_initial&1]);
+
+ sync_serial_s_final = tf_read_reg32(
+ &comm->l1_buffer->sync_serial_s);
+ }
+
+ spin_unlock(&comm->lock);
+
+ *time = time64;
+}
+
+/*----------------------------------------------------------------------------
+ * SIGKILL signal handling
+ *----------------------------------------------------------------------------*/
+
+static bool sigkill_pending(void)
+{
+ if (signal_pending(current)) {
+ dprintk(KERN_INFO "A signal is pending\n");
+ if (sigismember(&current->pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending\n");
+ return true;
+ } else if (sigismember(
+ &current->signal->shared_pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ u32 type)
+{
+ struct tf_coarse_page_table *coarse_pg_table = NULL;
+
+ spin_lock(&(alloc_context->lock));
+
+ if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) {
+ /*
+ * The free list can provide us a coarse page table
+ * descriptor
+ */
+ coarse_pg_table = list_first_entry(
+ &alloc_context->free_coarse_page_tables,
+ struct tf_coarse_page_table, list);
+ list_del(&(coarse_pg_table->list));
+
+ coarse_pg_table->parent->ref_count++;
+ } else {
+ /* no array of coarse page tables, create a new one */
+ struct tf_coarse_page_table_array *array;
+ void *page;
+ int i;
+
+ spin_unlock(&(alloc_context->lock));
+
+ /* first allocate a new page descriptor */
+ array = internal_kmalloc(sizeof(*array), GFP_KERNEL);
+ if (array == NULL) {
+ dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+ " failed to allocate a table array\n",
+ alloc_context);
+ return NULL;
+ }
+
+ array->type = type;
+ INIT_LIST_HEAD(&(array->list));
+
+ /* now allocate the actual page the page descriptor describes */
+ page = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (page == NULL) {
+ dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+ " failed allocate a page\n",
+ alloc_context);
+ internal_kfree(array);
+ return NULL;
+ }
+
+ spin_lock(&(alloc_context->lock));
+
+ /* initialize the coarse page table descriptors */
+ for (i = 0; i < 4; i++) {
+ INIT_LIST_HEAD(&(array->coarse_page_tables[i].list));
+ array->coarse_page_tables[i].descriptors =
+ page + (i * SIZE_1KB);
+ array->coarse_page_tables[i].parent = array;
+
+ if (i == 0) {
+ /*
+ * the first element is kept for the current
+ * coarse page table allocation
+ */
+ coarse_pg_table =
+ &(array->coarse_page_tables[i]);
+ array->ref_count++;
+ } else {
+ /*
+ * The other elements are added to the free list
+ */
+ list_add(&(array->coarse_page_tables[i].list),
+ &(alloc_context->
+ free_coarse_page_tables));
+ }
+ }
+
+ list_add(&(array->list),
+ &(alloc_context->coarse_page_table_arrays));
+ }
+ spin_unlock(&(alloc_context->lock));
+
+ return coarse_pg_table;
+}
+
+
+void tf_free_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_coarse_page_table *coarse_pg_table,
+ int force)
+{
+ struct tf_coarse_page_table_array *array;
+
+ spin_lock(&(alloc_context->lock));
+
+ array = coarse_pg_table->parent;
+
+ (array->ref_count)--;
+
+ if (array->ref_count == 0) {
+ /*
+ * no coarse page table descriptor is used
+ * check if we should free the whole page
+ */
+
+ if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
+ && (force == 0))
+ /*
+ * This is a preallocated page,
+ * add the page back to the free list
+ */
+ list_add(&(coarse_pg_table->list),
+ &(alloc_context->free_coarse_page_tables));
+ else {
+ /*
+ * None of the page's coarse page table descriptors
+ * are in use, free the whole page
+ */
+ int i;
+ u32 *descriptors;
+
+ /*
+ * remove the page's associated coarse page table
+ * descriptors from the free list
+ */
+ for (i = 0; i < 4; i++)
+ if (&(array->coarse_page_tables[i]) !=
+ coarse_pg_table)
+ list_del(&(array->
+ coarse_page_tables[i].list));
+
+ descriptors =
+ array->coarse_page_tables[0].descriptors;
+ array->coarse_page_tables[0].descriptors = NULL;
+
+ /* remove the coarse page table from the array */
+ list_del(&(array->list));
+
+ spin_unlock(&(alloc_context->lock));
+ /*
+ * Free the page.
+ * The address of the page is contained in the first
+ * element
+ */
+ internal_free_page((unsigned long) descriptors);
+ /* finaly free the array */
+ internal_kfree(array);
+
+ spin_lock(&(alloc_context->lock));
+ }
+ } else {
+ /*
+ * Some coarse page table descriptors are in use.
+ * Add the descriptor to the free list
+ */
+ list_add(&(coarse_pg_table->list),
+ &(alloc_context->free_coarse_page_tables));
+ }
+
+ spin_unlock(&(alloc_context->lock));
+}
+
+
+void tf_init_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+ spin_lock_init(&(alloc_context->lock));
+ INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays));
+ INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables));
+}
+
+void tf_release_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+ spin_lock(&(alloc_context->lock));
+
+ /* now clean up the list of page descriptors */
+ while (!list_empty(&(alloc_context->coarse_page_table_arrays))) {
+ struct tf_coarse_page_table_array *page_desc;
+ u32 *descriptors;
+
+ page_desc = list_first_entry(
+ &alloc_context->coarse_page_table_arrays,
+ struct tf_coarse_page_table_array, list);
+
+ descriptors = page_desc->coarse_page_tables[0].descriptors;
+ list_del(&(page_desc->list));
+
+ spin_unlock(&(alloc_context->lock));
+
+ if (descriptors != NULL)
+ internal_free_page((unsigned long)descriptors);
+
+ internal_kfree(page_desc);
+
+ spin_lock(&(alloc_context->lock));
+ }
+
+ spin_unlock(&(alloc_context->lock));
+}
+
+/*
+ * Returns the L1 coarse page descriptor for
+ * a coarse page table located at address coarse_pg_table_descriptors
+ */
+u32 tf_get_l1_coarse_descriptor(
+ u32 coarse_pg_table_descriptors[256])
+{
+ u32 descriptor = L1_COARSE_DESCRIPTOR_BASE;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors)
+ & L1_COARSE_DESCRIPTOR_ADDR_MASK);
+
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
+ dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor "
+ "V31-12 added to descriptor\n");
+ /* the 16k alignment restriction applies */
+ descriptor |= (DESCRIPTOR_V13_12_GET(
+ (u32)coarse_pg_table_descriptors) <<
+ L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
+ }
+
+ return descriptor;
+}
+
+
+#define dprintk_desc(...)
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ u32 *hwpte;
+ u32 tex = 0;
+ u32 descriptor = 0;
+
+ dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr);
+ pgd = pgd_offset(mm, vaddr);
+ dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
+ (unsigned int) *pgd);
+ if (pgd_none(*pgd))
+ goto error;
+ pud = pud_offset(pgd, vaddr);
+ dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
+ (unsigned int) *pud);
+ if (pud_none(*pud))
+ goto error;
+ pmd = pmd_offset(pud, vaddr);
+ dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
+ (unsigned int) *pmd);
+ if (pmd_none(*pmd))
+ goto error;
+
+ if (PMD_TYPE_SECT&(*pmd)) {
+ /* We have a section */
+ dprintk_desc(KERN_INFO "Section descr=%x\n",
+ (unsigned int)*pmd);
+ if ((*pmd) & PMD_SECT_BUFFERABLE)
+ descriptor |= DESCRIPTOR_B_MASK;
+ if ((*pmd) & PMD_SECT_CACHEABLE)
+ descriptor |= DESCRIPTOR_C_MASK;
+ if ((*pmd) & PMD_SECT_S)
+ descriptor |= DESCRIPTOR_S_MASK;
+ tex = ((*pmd) >> 12) & 7;
+ } else {
+ /* We have a table */
+ ptep = pte_offset_map(pmd, vaddr);
+ if (pte_present(*ptep)) {
+ dprintk_desc(KERN_INFO "L2 descr=%x\n",
+ (unsigned int) *ptep);
+ if ((*ptep) & L_PTE_MT_BUFFERABLE)
+ descriptor |= DESCRIPTOR_B_MASK;
+ if ((*ptep) & L_PTE_MT_WRITETHROUGH)
+ descriptor |= DESCRIPTOR_C_MASK;
+ if ((*ptep) & L_PTE_MT_DEV_SHARED)
+ descriptor |= DESCRIPTOR_S_MASK;
+
+ /*
+ * Linux's pte doesn't keep track of TEX value.
+ * Have to jump to hwpte see include/asm/pgtable.h
+ * (-2k before 2.6.38, then +2k)
+ */
+#ifdef PTE_HWTABLE_SIZE
+ hwpte = (u32 *) (ptep+PTE_HWTABLE_PTRS);
+#else
+ hwpte = (u32 *) (ptep-PTRS_PER_PTE);
+#endif
+ if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
+ ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
+ goto error;
+ dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
+ tex = ((*hwpte) >> 6) & 7;
+ pte_unmap(ptep);
+ } else {
+ pte_unmap(ptep);
+ goto error;
+ }
+ }
+
+ descriptor |= (tex << 6);
+
+ return descriptor;
+
+error:
+ dprintk(KERN_ERR "Error occured in %s\n", __func__);
+ return 0;
+}
+
+
+/*
+ * Changes an L2 page descriptor back to a pointer to a physical page
+ */
+inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor)
+{
+ return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK);
+}
+
+
+/*
+ * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
+ * must be in the kernel address space.
+ */
+static void tf_get_l2_page_descriptor(
+ u32 *l2_page_descriptor,
+ u32 flags, struct mm_struct *mm)
+{
+ unsigned long page_vaddr;
+ u32 descriptor;
+ struct page *page;
+ bool unmap_page = false;
+
+#if 0
+ dprintk(KERN_INFO
+ "tf_get_l2_page_descriptor():"
+ "*l2_page_descriptor=%x\n",
+ *l2_page_descriptor);
+#endif
+
+ if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT)
+ return;
+
+ page = (struct page *) (*l2_page_descriptor);
+
+ page_vaddr = (unsigned long) page_address(page);
+ if (page_vaddr == 0) {
+ dprintk(KERN_INFO "page_address returned 0\n");
+ /* Should we use kmap_atomic(page, KM_USER0) instead ? */
+ page_vaddr = (unsigned long) kmap(page);
+ if (page_vaddr == 0) {
+ *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+ dprintk(KERN_ERR "kmap returned 0\n");
+ return;
+ }
+ unmap_page = true;
+ }
+
+ descriptor = tf_get_l2_descriptor_common(page_vaddr, mm);
+ if (descriptor == 0) {
+ *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+ return;
+ }
+ descriptor |= L2_PAGE_DESCRIPTOR_BASE;
+
+ descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK);
+
+ if (!(flags & TF_SHMEM_TYPE_WRITE))
+ /* only read access */
+ descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
+ else
+ /* read and write access */
+ descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
+
+ if (unmap_page)
+ kunmap(page);
+
+ *l2_page_descriptor = descriptor;
+}
+
+
+/*
+ * Unlocks the physical memory pages
+ * and frees the coarse pages that need to
+ */
+void tf_cleanup_shared_memory(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup)
+{
+ u32 coarse_page_index;
+
+ dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n",
+ shmem_desc);
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "tf_cleanup_shared_memory "
+ "- number of coarse page tables=%d\n",
+ shmem_desc->coarse_pg_table_count);
+
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ shmem_desc->coarse_pg_table[coarse_page_index],
+ shmem_desc->coarse_pg_table[coarse_page_index]->
+ descriptors,
+ coarse_page_index);
+ if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) {
+ for (j = 0;
+ j < TF_DESCRIPTOR_TABLE_CAPACITY;
+ j += 8) {
+ int k;
+ printk(KERN_DEBUG " ");
+ for (k = j; k < j + 8; k++)
+ printk(KERN_DEBUG "%p ",
+ shmem_desc->coarse_pg_table[
+ coarse_page_index]->
+ descriptors);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ }
+ printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n");
+#endif
+
+ /* Parse the coarse page descriptors */
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+ u32 found = 0;
+
+ /* parse the page descriptors of the coarse page */
+ for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) {
+ u32 l2_page_descriptor = (u32) (shmem_desc->
+ coarse_pg_table[coarse_page_index]->
+ descriptors[j]);
+
+ if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) {
+ struct page *page =
+ tf_l2_page_descriptor_to_page(
+ l2_page_descriptor);
+
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ internal_page_cache_release(page);
+
+ found = 1;
+ } else if (found == 1) {
+ break;
+ }
+ }
+
+ /*
+ * Only free the coarse pages of descriptors not preallocated
+ */
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (full_cleanup != 0))
+ tf_free_coarse_page_table(alloc_context,
+ shmem_desc->coarse_pg_table[coarse_page_index],
+ 0);
+ }
+
+ shmem_desc->coarse_pg_table_count = 0;
+ dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n",
+ shmem_desc);
+}
+
+/*
+ * Make sure the coarse pages are allocated. If not allocated, do it.
+ * Locks down the physical memory pages.
+ * Verifies the memory attributes depending on flags.
+ */
+int tf_fill_descriptor_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 buffer,
+ struct vm_area_struct **vmas,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 buffer_size,
+ u32 *buffer_start_offset,
+ bool in_user_space,
+ u32 flags,
+ u32 *descriptor_count)
+{
+ u32 coarse_page_index;
+ u32 coarse_page_count;
+ u32 page_count;
+ u32 page_shift = 0;
+ int ret = 0;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ dprintk(KERN_INFO "tf_fill_descriptor_table"
+ "(%p, buffer=0x%08X, size=0x%08X, user=%01x "
+ "flags = 0x%08x)\n",
+ shmem_desc,
+ buffer,
+ buffer_size,
+ in_user_space,
+ flags);
+
+ /*
+ * Compute the number of pages
+ * Compute the number of coarse pages
+ * Compute the page offset
+ */
+ page_count = ((buffer & ~PAGE_MASK) +
+ buffer_size + ~PAGE_MASK) >> PAGE_SHIFT;
+
+ /* check whether the 16k alignment restriction applies */
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
+ /*
+ * The 16k alignment restriction applies.
+ * Shift data to get them 16k aligned
+ */
+ page_shift = DESCRIPTOR_V13_12_GET(buffer);
+ page_count += page_shift;
+
+
+ /*
+ * Check the number of pages fit in the coarse pages
+ */
+ if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY *
+ TF_MAX_COARSE_PAGES)) {
+ dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
+ "%u pages required to map shared memory!\n",
+ shmem_desc, page_count);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* coarse page describe 256 pages */
+ coarse_page_count = ((page_count +
+ TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
+ TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
+
+ /*
+ * Compute the buffer offset
+ */
+ *buffer_start_offset = (buffer & ~PAGE_MASK) |
+ (page_shift << PAGE_SHIFT);
+
+ /* map each coarse page */
+ for (coarse_page_index = 0;
+ coarse_page_index < coarse_page_count;
+ coarse_page_index++) {
+ u32 j;
+ struct tf_coarse_page_table *coarse_pg_table;
+
+ /* compute a virtual address with appropriate offset */
+ u32 buffer_offset_vaddr = buffer +
+ (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE);
+ u32 pages_to_get;
+
+ /*
+ * Compute the number of pages left for this coarse page.
+ * Decrement page_count each time
+ */
+ pages_to_get = (page_count >>
+ TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
+ TF_DESCRIPTOR_TABLE_CAPACITY : page_count;
+ page_count -= pages_to_get;
+
+ /*
+ * Check if the coarse page has already been allocated
+ * If not, do it now
+ */
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM)
+ || (shmem_desc->type ==
+ TF_SHMEM_TYPE_PM_HIBERNATE)) {
+ coarse_pg_table = tf_alloc_coarse_page_table(
+ alloc_context,
+ TF_PAGE_DESCRIPTOR_TYPE_NORMAL);
+
+ if (coarse_pg_table == NULL) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table(%p): "
+ "tf_alloc_coarse_page_table "
+ "failed for coarse page %d\n",
+ shmem_desc, coarse_page_index);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ shmem_desc->coarse_pg_table[coarse_page_index] =
+ coarse_pg_table;
+ } else {
+ coarse_pg_table =
+ shmem_desc->coarse_pg_table[coarse_page_index];
+ }
+
+ /*
+ * The page is not necessarily filled with zeroes.
+ * Set the fault descriptors ( each descriptor is 4 bytes long)
+ */
+ memset(coarse_pg_table->descriptors, 0x00,
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+
+ if (in_user_space) {
+ int pages;
+
+ /*
+ * TRICK: use pCoarsePageDescriptor->descriptors to
+ * hold the (struct page*) items before getting their
+ * physical address
+ */
+ down_read(&(current->mm->mmap_sem));
+ pages = internal_get_user_pages(
+ current,
+ current->mm,
+ buffer_offset_vaddr,
+ /*
+ * page_shift is cleared after retrieving first
+ * coarse page
+ */
+ (pages_to_get - page_shift),
+ (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0,
+ 0,
+ (struct page **) (coarse_pg_table->descriptors
+ + page_shift),
+ vmas);
+ up_read(&(current->mm->mmap_sem));
+
+ if ((pages <= 0) ||
+ (pages != (pages_to_get - page_shift))) {
+ dprintk(KERN_ERR "tf_fill_descriptor_table:"
+ " get_user_pages got %d pages while "
+ "trying to get %d pages!\n",
+ pages, pages_to_get - page_shift);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ for (j = page_shift;
+ j < page_shift + pages;
+ j++) {
+ /* Get the actual L2 descriptors */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ current->mm);
+ /*
+ * Reject Strongly-Ordered or Device Memory
+ */
+#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
+ ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
+
+ if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
+ coarse_pg_table->
+ descriptors[j])) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table:"
+ " descriptor 0x%08X use "
+ "strongly-ordered or device "
+ "memory. Rejecting!\n",
+ coarse_pg_table->
+ descriptors[j]);
+ ret = -EFAULT;
+ goto error;
+ }
+ }
+ } else if (is_vmalloc_addr((void *)buffer_offset_vaddr)) {
+ /* Kernel-space memory obtained through vmalloc */
+ dprintk(KERN_INFO
+ "tf_fill_descriptor_table: "
+ "vmalloc'ed buffer starting at %p\n",
+ (void *)buffer_offset_vaddr);
+ for (j = page_shift; j < pages_to_get; j++) {
+ struct page *page;
+ void *addr =
+ (void *)(buffer_offset_vaddr +
+ (j - page_shift) * PAGE_SIZE);
+ page = vmalloc_to_page(addr);
+ if (page == NULL) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table: "
+ "cannot map %p (vmalloc) "
+ "to page\n",
+ addr);
+ ret = -EFAULT;
+ goto error;
+ }
+ coarse_pg_table->descriptors[j] = (u32)page;
+ get_page(page);
+
+ /* change coarse page "page address" */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ &init_mm);
+ }
+ } else {
+ /* Kernel-space memory given by a virtual address */
+ dprintk(KERN_INFO
+ "tf_fill_descriptor_table: "
+ "buffer starting at virtual address %p\n",
+ (void *)buffer_offset_vaddr);
+ for (j = page_shift; j < pages_to_get; j++) {
+ struct page *page;
+ void *addr =
+ (void *)(buffer_offset_vaddr +
+ (j - page_shift) * PAGE_SIZE);
+ page = virt_to_page(addr);
+ if (page == NULL) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table: "
+ "cannot map %p (virtual) "
+ "to page\n",
+ addr);
+ ret = -EFAULT;
+ goto error;
+ }
+ coarse_pg_table->descriptors[j] = (u32)page;
+ get_page(page);
+
+ /* change coarse page "page address" */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ &init_mm);
+ }
+ }
+
+ dmac_flush_range((void *)coarse_pg_table->descriptors,
+ (void *)(((u32)(coarse_pg_table->descriptors)) +
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
+
+ outer_clean_range(
+ __pa(coarse_pg_table->descriptors),
+ __pa(coarse_pg_table->descriptors) +
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+ wmb();
+
+ /* Update the coarse page table address */
+ descriptors[coarse_page_index] =
+ tf_get_l1_coarse_descriptor(
+ coarse_pg_table->descriptors);
+
+ /*
+ * The next coarse page has no page shift, reset the
+ * page_shift
+ */
+ page_shift = 0;
+ }
+
+ *descriptor_count = coarse_page_count;
+ shmem_desc->coarse_pg_table_count = coarse_page_count;
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X "
+ "numberOfCoarsePages=%d\n", buffer_size,
+ shmem_desc->coarse_pg_table_count);
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+ struct tf_coarse_page_table *coarse_page_table =
+ shmem_desc->coarse_pg_table[coarse_page_index];
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ coarse_page_table,
+ coarse_page_table->descriptors,
+ coarse_page_index);
+ for (j = 0;
+ j < TF_DESCRIPTOR_TABLE_CAPACITY;
+ j += 8) {
+ int k;
+ printk(KERN_DEBUG " ");
+ for (k = j; k < j + 8; k++)
+ printk(KERN_DEBUG "0x%08X ",
+ coarse_page_table->descriptors[k]);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n");
+#endif
+
+ return 0;
+
+error:
+ tf_cleanup_shared_memory(
+ alloc_context,
+ shmem_desc,
+ 0);
+
+ return ret;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+u8 *tf_get_description(struct tf_comm *comm)
+{
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ return comm->l1_buffer->version_description;
+
+ return NULL;
+}
+
+/*
+ * Returns a non-zero value if the specified S-timeout has expired, zero
+ * otherwise.
+ *
+ * The placeholder referenced to by relative_timeout_jiffies gives the relative
+ * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
+ * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
+ */
+static int tf_test_s_timeout(
+ u64 timeout,
+ signed long *relative_timeout_jiffies)
+{
+ struct timeval now;
+ u64 time64;
+
+ *relative_timeout_jiffies = 0;
+
+ /* immediate timeout */
+ if (timeout == TIME_IMMEDIATE)
+ return 1;
+
+ /* infinite timeout */
+ if (timeout == TIME_INFINITE) {
+ dprintk(KERN_DEBUG "tf_test_s_timeout: "
+ "timeout is infinite\n");
+ *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+ return 0;
+ }
+
+ do_gettimeofday(&now);
+ time64 = now.tv_sec;
+ /* will not overflow as operations are done on 64bit values */
+ time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+ /* timeout expired */
+ if (time64 >= timeout) {
+ dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n");
+ return 1;
+ }
+
+ /*
+ * finite timeout, compute relative_timeout_jiffies
+ */
+ /* will not overflow as time64 < timeout */
+ timeout -= time64;
+
+ /* guarantee *relative_timeout_jiffies is a valid timeout */
+ if ((timeout >> 32) != 0)
+ *relative_timeout_jiffies = MAX_JIFFY_OFFSET;
+ else
+ *relative_timeout_jiffies =
+ msecs_to_jiffies((unsigned int) timeout);
+
+ dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n",
+ *relative_timeout_jiffies);
+ return 0;
+}
+
+static void tf_copy_answers(struct tf_comm *comm)
+{
+ u32 first_answer;
+ u32 first_free_answer;
+ struct tf_answer_struct *answerStructureTemp;
+
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+ spin_lock(&comm->lock);
+ first_free_answer = tf_read_reg32(
+ &comm->l1_buffer->first_free_answer);
+ first_answer = tf_read_reg32(
+ &comm->l1_buffer->first_answer);
+
+ while (first_answer != first_free_answer) {
+ /* answer queue not empty */
+ union tf_answer sComAnswer;
+ struct tf_answer_header header;
+
+ /*
+ * the size of the command in words of 32bit, not in
+ * bytes
+ */
+ u32 command_size;
+ u32 i;
+ u32 *temp = (uint32_t *) &header;
+
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_answers(%p): "
+ "Read answers from L1\n",
+ current->pid, comm);
+
+ /* Read the answer header */
+ for (i = 0;
+ i < sizeof(struct tf_answer_header)/sizeof(u32);
+ i++)
+ temp[i] = comm->l1_buffer->answer_queue[
+ (first_answer + i) %
+ TF_S_ANSWER_QUEUE_CAPACITY];
+
+ /* Read the answer from the L1_Buffer*/
+ command_size = header.message_size +
+ sizeof(struct tf_answer_header)/sizeof(u32);
+ temp = (uint32_t *) &sComAnswer;
+ for (i = 0; i < command_size; i++)
+ temp[i] = comm->l1_buffer->answer_queue[
+ (first_answer + i) %
+ TF_S_ANSWER_QUEUE_CAPACITY];
+
+ answerStructureTemp = (struct tf_answer_struct *)
+ sComAnswer.header.operation_id;
+
+ tf_dump_answer(&sComAnswer);
+
+ memcpy(answerStructureTemp->answer, &sComAnswer,
+ command_size * sizeof(u32));
+ answerStructureTemp->answer_copied = true;
+
+ first_answer += command_size;
+ tf_write_reg32(&comm->l1_buffer->first_answer,
+ first_answer);
+ }
+ spin_unlock(&(comm->lock));
+ }
+}
+
+static void tf_copy_command(
+ struct tf_comm *comm,
+ union tf_command *command,
+ struct tf_connection *connection,
+ enum TF_COMMAND_STATE *command_status)
+{
+ if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ && (command != NULL)) {
+ /*
+ * Write the message in the message queue.
+ */
+
+ if (*command_status == TF_COMMAND_STATE_PENDING) {
+ u32 command_size;
+ u32 queue_words_count;
+ u32 i;
+ u32 first_free_command;
+ u32 first_command;
+
+ spin_lock(&comm->lock);
+
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+
+ queue_words_count = first_free_command - first_command;
+ command_size = command->header.message_size +
+ sizeof(struct tf_command_header)/sizeof(u32);
+ if ((queue_words_count + command_size) <
+ TF_N_MESSAGE_QUEUE_CAPACITY) {
+ /*
+ * Command queue is not full.
+ * If the Command queue is full,
+ * the command will be copied at
+ * another iteration
+ * of the current function.
+ */
+
+ /*
+ * Change the conn state
+ */
+ if (connection == NULL)
+ goto copy;
+
+ spin_lock(&(connection->state_lock));
+
+ if ((connection->state ==
+ TF_CONN_STATE_NO_DEVICE_CONTEXT)
+ &&
+ (command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+
+ dprintk(KERN_INFO
+ "tf_copy_command(%p):"
+ "Conn state is DEVICE_CONTEXT_SENT\n",
+ connection);
+ connection->state =
+ TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
+ } else if ((connection->state !=
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+ &&
+ (command->header.message_type !=
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+ /* The connection
+ * is no longer valid.
+ * We may not send any command on it,
+ * not even another
+ * DESTROY_DEVICE_CONTEXT.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Connection no longer valid."
+ "ABORT\n",
+ current->pid, connection);
+ *command_status =
+ TF_COMMAND_STATE_ABORTED;
+ spin_unlock(
+ &(connection->state_lock));
+ spin_unlock(
+ &comm->lock);
+ return;
+ } else if (
+ (command->header.message_type ==
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
+ (connection->state ==
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+ ) {
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Conn state is "
+ "DESTROY_DEVICE_CONTEXT_SENT\n",
+ current->pid, connection);
+ connection->state =
+ TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
+ }
+ spin_unlock(&(connection->state_lock));
+copy:
+ /*
+ * Copy the command to L1 Buffer
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Write Message in the queue\n",
+ current->pid, command);
+ tf_dump_command(command);
+
+ for (i = 0; i < command_size; i++)
+ comm->l1_buffer->command_queue[
+ (first_free_command + i) %
+ TF_N_MESSAGE_QUEUE_CAPACITY] =
+ ((uint32_t *) command)[i];
+
+ *command_status =
+ TF_COMMAND_STATE_SENT;
+ first_free_command += command_size;
+
+ tf_write_reg32(
+ &comm->
+ l1_buffer->first_free_command,
+ first_free_command);
+ }
+ spin_unlock(&comm->lock);
+ }
+ }
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the command and waits for the answer
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_send_recv(struct tf_comm *comm,
+ union tf_command *command,
+ struct tf_answer_struct *answerStruct,
+ struct tf_connection *connection,
+ int bKillable
+ )
+{
+ int result;
+ u64 timeout;
+ signed long nRelativeTimeoutJiffies;
+ bool wait_prepared = false;
+ enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING;
+ DEFINE_WAIT(wait);
+#ifdef CONFIG_FREEZER
+ unsigned long saved_flags;
+#endif
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
+ current->pid, command);
+
+#ifdef CONFIG_TF_ZEBRA
+ tf_clock_timer_start();
+#endif
+
+#ifdef CONFIG_FREEZER
+ saved_flags = current->flags;
+ current->flags |= PF_FREEZER_NOSIG;
+#endif
+
+ /*
+ * Read all answers from the answer queue
+ */
+copy_answers:
+ tf_copy_answers(comm);
+
+ tf_copy_command(comm, command, connection, &command_status);
+
+ /*
+ * Notify all waiting threads
+ */
+ wake_up(&(comm->wait_queue));
+
+#ifdef CONFIG_FREEZER
+ if (unlikely(freezing(current))) {
+
+ dprintk(KERN_INFO
+ "Entering refrigerator.\n");
+ refrigerator();
+ dprintk(KERN_INFO
+ "Left refrigerator.\n");
+ goto copy_answers;
+ }
+#endif
+
+#ifndef CONFIG_PREEMPT
+ if (need_resched())
+ schedule();
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Handle RPC (if any)
+ */
+ if (tf_rpc_execute(comm) == RPC_NON_YIELD)
+ goto schedule_secure_world;
+#endif
+
+ /*
+ * Join wait queue
+ */
+ /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
+ current->pid, command);*/
+ prepare_to_wait(&comm->wait_queue, &wait,
+ bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ wait_prepared = true;
+
+ /*
+ * Check if our answer is available
+ */
+ if (command_status == TF_COMMAND_STATE_ABORTED) {
+ /* Not waiting for an answer, return error code */
+ result = -EINTR;
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Command status is ABORTED."
+ "Exit with 0x%x\n",
+ current->pid, result);
+ goto exit;
+ }
+ if (answerStruct->answer_copied) {
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "Received answer (type 0x%02X)\n",
+ current->pid,
+ answerStruct->answer->header.message_type);
+ result = 0;
+ goto exit;
+ }
+
+ /*
+ * Check if a signal is pending
+ */
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == TF_COMMAND_STATE_PENDING)
+ /*Command was not sent. */
+ result = -EINTR;
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ /*
+ * Check if secure world is schedulable. It is schedulable if at
+ * least one of the following conditions holds:
+ * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED
+ * is not set);
+ * + there is a command in the queue;
+ * + the secure world timeout is zero.
+ */
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+ u32 first_free_command;
+ u32 first_command;
+ spin_lock(&comm->lock);
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+ spin_unlock(&comm->lock);
+ tf_read_timeout(comm, &timeout);
+ if ((first_free_command == first_command) &&
+ (tf_test_s_timeout(timeout,
+ &nRelativeTimeoutJiffies) == 0))
+ /*
+ * If command queue is empty and if timeout has not
+ * expired secure world is not schedulable
+ */
+ goto wait;
+ }
+
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+
+ /*
+ * Yield to the Secure World
+ */
+#ifdef CONFIG_TF_ZEBRA
+schedule_secure_world:
+#endif
+
+ result = tf_schedule_secure_world(comm);
+ if (result < 0)
+ goto exit;
+ goto copy_answers;
+
+wait:
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == TF_COMMAND_STATE_PENDING)
+ result = -EINTR; /* Command was not sent. */
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending while waiting. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "prepare to sleep infinitely\n", current->pid);
+ else
+ dprintk(KERN_INFO "tf_send_recv: "
+ "prepare to sleep 0x%lx jiffies\n",
+ nRelativeTimeoutJiffies);
+
+ /* go to sleep */
+ if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
+ dprintk(KERN_INFO
+ "tf_send_recv: timeout expired\n");
+ else
+ dprintk(KERN_INFO
+ "tf_send_recv: signal delivered\n");
+
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ goto copy_answers;
+
+exit:
+ if (wait_prepared) {
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ }
+
+#ifdef CONFIG_FREEZER
+ current->flags &= ~(PF_FREEZER_NOSIG);
+ current->flags |= (saved_flags & PF_FREEZER_NOSIG);
+#endif
+
+ return result;
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the message and waits for the corresponding answer
+ * It may return if a signal needs to be delivered.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_send_receive(struct tf_comm *comm,
+ union tf_command *command,
+ union tf_answer *answer,
+ struct tf_connection *connection,
+ bool bKillable)
+{
+ int error;
+ struct tf_answer_struct answerStructure;
+#ifdef CONFIG_SMP
+ long ret_affinity;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+#endif
+
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ if (command != NULL)
+ command->header.operation_id = (u32) &answerStructure;
+
+ dprintk(KERN_INFO "tf_send_receive\n");
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_ERR "tf_send_receive(%p): "
+ "Secure world not started\n", comm);
+
+ return -EFAULT;
+ }
+#endif
+
+ if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) {
+ dprintk(KERN_DEBUG
+ "tf_send_receive: Flag Terminating is set\n");
+ return 0;
+ }
+
+#ifdef CONFIG_SMP
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret_affinity = sched_setaffinity(0, &local_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+#endif
+
+
+ /*
+ * Send the command
+ */
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, bKillable);
+
+ if (!bKillable && sigkill_pending()) {
+ if ((command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
+ (answer->create_device_context.error_code ==
+ S_SUCCESS)) {
+
+ /*
+ * CREATE_DEVICE_CONTEXT was interrupted.
+ */
+ dprintk(KERN_INFO "tf_send_receive: "
+ "sending DESTROY_DEVICE_CONTEXT\n");
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ command->header.message_type =
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command->header.message_size =
+ (sizeof(struct
+ tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ command->header.operation_id =
+ (u32) &answerStructure;
+ command->destroy_device_context.device_context =
+ answer->create_device_context.
+ device_context;
+
+ goto destroy_context;
+ }
+ }
+
+ if (error == 0) {
+ /*
+ * tf_send_recv returned Success.
+ */
+ if (command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ } else if (command->header.message_type ==
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ }
+ } else if (error == -EINTR) {
+ /*
+ * No command was sent, return failure.
+ */
+ dprintk(KERN_ERR
+ "tf_send_receive: "
+ "tf_send_recv failed (error %d) !\n",
+ error);
+ } else if (error == -EIO) {
+ /*
+ * A command was sent but its answer is still pending.
+ */
+
+ /* means bKillable is true */
+ dprintk(KERN_ERR
+ "tf_send_receive: "
+ "tf_send_recv interrupted (error %d)."
+ "Send DESTROY_DEVICE_CONTEXT.\n", error);
+
+ /* Send the DESTROY_DEVICE_CONTEXT. */
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ command->header.message_type =
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command->header.message_size =
+ (sizeof(struct tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ command->header.operation_id =
+ (u32) &answerStructure;
+ command->destroy_device_context.device_context =
+ connection->device_context;
+
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, false);
+ if (error == -EINTR) {
+ /*
+ * Another thread already sent
+ * DESTROY_DEVICE_CONTEXT.
+ * We must still wait for the answer
+ * to the original command.
+ */
+ command = NULL;
+ goto destroy_context;
+ } else {
+ /* An answer was received.
+ * Check if it is the answer
+ * to the DESTROY_DEVICE_CONTEXT.
+ */
+ spin_lock(&comm->lock);
+ if (answer->header.message_type !=
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ answerStructure.answer_copied = false;
+ }
+ spin_unlock(&comm->lock);
+ if (!answerStructure.answer_copied) {
+ /* Answer to DESTROY_DEVICE_CONTEXT
+ * was not yet received.
+ * Wait for the answer.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_send_receive:"
+ "Answer to DESTROY_DEVICE_CONTEXT"
+ "not yet received.Retry\n",
+ current->pid);
+ command = NULL;
+ goto destroy_context;
+ }
+ }
+ }
+
+ dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n");
+ goto exit;
+
+destroy_context:
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, false);
+
+ /*
+ * tf_send_recv cannot return an error because
+ * it's not killable and not within a connection
+ */
+ BUG_ON(error != 0);
+
+ /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+exit:
+
+#ifdef CONFIG_SMP
+ ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+#endif
+ return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+
+/*
+ * Handles all the power management calls.
+ * The operation is the type of power management
+ * operation to be performed.
+ *
+ * This routine will only return if a failure occured or if
+ * the required opwer management is of type "resume".
+ * "Hibernate" and "Shutdown" should lock when doing the
+ * corresponding SMC to the Secure World
+ */
+int tf_power_management(struct tf_comm *comm,
+ enum TF_POWER_OPERATION operation)
+{
+ u32 status;
+ int error = 0;
+
+ dprintk(KERN_INFO "tf_power_management(%d)\n", operation);
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_INFO "tf_power_management(%p): "
+ "succeeded (not started)\n", comm);
+
+ return 0;
+ }
+#endif
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ switch (operation) {
+ case TF_POWER_OPERATION_SHUTDOWN:
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ error = tf_pm_shutdown(comm);
+
+ if (error) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case TF_POWER_OPERATION_HIBERNATE:
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ error = tf_pm_hibernate(comm);
+
+ if (error) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case TF_POWER_OPERATION_RESUME:
+ error = tf_pm_resume(comm);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+ }
+
+ dprintk(KERN_INFO "tf_power_management(): succeeded\n");
+ return 0;
+
+not_allowed:
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Power command not allowed in current "
+ "Secure World state %d\n", status);
+ error = -ENOTTY;
+error:
+ return error;
+}
+
diff --git a/security/tf_driver/tf_comm.h b/security/tf_driver/tf_comm.h
new file mode 100644
index 000000000000..8921dc1d1be0
--- /dev/null
+++ b/security/tf_driver/tf_comm.h
@@ -0,0 +1,202 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_COMM_H__
+#define __TF_COMM_H__
+
+#include "tf_defs.h"
+#include "tf_protocol.h"
+
+/*----------------------------------------------------------------------------
+ * Misc
+ *----------------------------------------------------------------------------*/
+
+void tf_set_current_time(struct tf_comm *comm);
+
+/*
+ * Atomic accesses to 32-bit variables in the L1 Shared buffer
+ */
+static inline u32 tf_read_reg32(const u32 *comm_buffer)
+{
+ u32 result;
+
+ __asm__ __volatile__("@ tf_read_reg32\n"
+ "ldrex %0, [%1]\n"
+ : "=&r" (result)
+ : "r" (comm_buffer)
+ );
+
+ return result;
+}
+
+static inline void tf_write_reg32(void *comm_buffer, u32 value)
+{
+ u32 tmp;
+
+ __asm__ __volatile__("@ tf_write_reg32\n"
+ "1: ldrex %0, [%2]\n"
+ " strex %0, %1, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r" (tmp)
+ : "r" (value), "r" (comm_buffer)
+ : "cc"
+ );
+}
+
+/*
+ * Atomic accesses to 64-bit variables in the L1 Shared buffer
+ */
+static inline u64 tf_read_reg64(void *comm_buffer)
+{
+ u64 result;
+
+ __asm__ __volatile__("@ tf_read_reg64\n"
+ "ldrexd %0, [%1]\n"
+ : "=&r" (result)
+ : "r" (comm_buffer)
+ );
+
+ return result;
+}
+
+static inline void tf_write_reg64(void *comm_buffer, u64 value)
+{
+ u64 tmp;
+
+ __asm__ __volatile__("@ tf_write_reg64\n"
+ "1: ldrexd %0, [%2]\n"
+ " strexd %0, %1, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r" (tmp)
+ : "r" (value), "r" (comm_buffer)
+ : "cc"
+ );
+}
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+/* RPC return values */
+#define RPC_NO 0x00 /* No RPC to execute */
+#define RPC_YIELD 0x01 /* Yield RPC */
+#define RPC_NON_YIELD 0x02 /* non-Yield RPC */
+
+int tf_rpc_execute(struct tf_comm *comm);
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+#define L1_DESCRIPTOR_FAULT (0x00000000)
+#define L2_DESCRIPTOR_FAULT (0x00000000)
+
+#define L2_DESCRIPTOR_ADDR_MASK (0xFFFFF000)
+
+#define DESCRIPTOR_V13_12_MASK (0x3 << PAGE_SHIFT)
+#define DESCRIPTOR_V13_12_GET(a) ((a & DESCRIPTOR_V13_12_MASK) >> PAGE_SHIFT)
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ u32 type);
+
+void tf_free_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_coarse_page_table *coarse_pg_table,
+ int force);
+
+void tf_init_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context);
+
+void tf_release_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context);
+
+struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor);
+
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm);
+
+void tf_cleanup_shared_memory(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup);
+
+int tf_fill_descriptor_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 buffer,
+ struct vm_area_struct **vmas,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 buffer_size,
+ u32 *buffer_start_offset,
+ bool in_user_space,
+ u32 flags,
+ u32 *descriptor_count);
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+int tf_schedule_secure_world(struct tf_comm *comm);
+
+int tf_send_receive(
+ struct tf_comm *comm,
+ union tf_command *command,
+ union tf_answer *answer,
+ struct tf_connection *connection,
+ bool bKillable);
+
+
+/**
+ * get a pointer to the secure world description.
+ * This points directly into the L1 shared buffer
+ * and is valid only once the communication has
+ * been initialized
+ **/
+u8 *tf_get_description(struct tf_comm *comm);
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+enum TF_POWER_OPERATION {
+ TF_POWER_OPERATION_HIBERNATE = 1,
+ TF_POWER_OPERATION_SHUTDOWN = 2,
+ TF_POWER_OPERATION_RESUME = 3,
+};
+
+int tf_pm_hibernate(struct tf_comm *comm);
+int tf_pm_resume(struct tf_comm *comm);
+int tf_pm_shutdown(struct tf_comm *comm);
+
+int tf_power_management(struct tf_comm *comm,
+ enum TF_POWER_OPERATION operation);
+
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+int tf_init(struct tf_comm *comm);
+
+void tf_terminate(struct tf_comm *comm);
+
+
+#endif /* __TF_COMM_H__ */
diff --git a/security/tf_driver/tf_comm_tz.c b/security/tf_driver/tf_comm_tz.c
new file mode 100644
index 000000000000..4c89de84accf
--- /dev/null
+++ b/security/tf_driver/tf_comm_tz.c
@@ -0,0 +1,885 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+/*
+ * Structure common to all SMC operations
+ */
+struct tf_generic_smc {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+ u32 reg3;
+ u32 reg4;
+};
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+static inline void tf_smc_generic_call(
+ struct tf_generic_smc *generic_smc)
+{
+#ifdef CONFIG_SMP
+ long ret;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret = sched_setaffinity(0, &local_cpu_mask);
+ if (ret != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
+#endif
+
+ __asm__ volatile(
+ "mov r0, %2\n"
+ "mov r1, %3\n"
+ "mov r2, %4\n"
+ "mov r3, %5\n"
+ "mov r4, %6\n"
+ ".word 0xe1600070 @ SMC 0\n"
+ "mov %0, r0\n"
+ "mov %1, r1\n"
+ : "=r" (generic_smc->reg0), "=r" (generic_smc->reg1)
+ : "r" (generic_smc->reg0), "r" (generic_smc->reg1),
+ "r" (generic_smc->reg2), "r" (generic_smc->reg3),
+ "r" (generic_smc->reg4)
+ : "r0", "r1", "r2", "r3", "r4");
+
+#ifdef CONFIG_SMP
+ ret = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
+#endif
+}
+
+/*
+ * Calls the get protocol version SMC.
+ * Fills the parameter pProtocolVersion with the version number returned by the
+ * SMC
+ */
+static inline void tf_smc_get_protocol_version(u32 *protocol_version)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_GET_PROTOCOL_VERSION;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+ *protocol_version = generic_smc.reg1;
+}
+
+
+/*
+ * Calls the init SMC with the specified parameters.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_smc_init(u32 shared_page_descriptor)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_INIT;
+ /* Descriptor for the layer 1 shared buffer */
+ generic_smc.reg1 = shared_page_descriptor;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+ if (generic_smc.reg0 != S_SUCCESS)
+ printk(KERN_ERR "tf_smc_init:"
+ " r0=0x%08X upon return (expected 0x%08X)!\n",
+ generic_smc.reg0,
+ S_SUCCESS);
+
+ return generic_smc.reg0;
+}
+
+
+/*
+ * Calls the reset irq SMC.
+ */
+static inline void tf_smc_reset_irq(void)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_RESET_IRQ;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+}
+
+
+/*
+ * Calls the WAKE_UP SMC.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_smc_wake_up(u32 l1_shared_buffer_descriptor,
+ u32 shared_mem_start_offset,
+ u32 shared_mem_size)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_WAKE_UP;
+ generic_smc.reg1 = shared_mem_start_offset;
+ /* long form command */
+ generic_smc.reg2 = shared_mem_size | 0x80000000;
+ generic_smc.reg3 = l1_shared_buffer_descriptor;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+
+ if (generic_smc.reg0 != S_SUCCESS)
+ printk(KERN_ERR "tf_smc_wake_up:"
+ " r0=0x%08X upon return (expected 0x%08X)!\n",
+ generic_smc.reg0,
+ S_SUCCESS);
+
+ return generic_smc.reg0;
+}
+
+/*
+ * Calls the N-Yield SMC.
+ */
+static inline void tf_smc_nyield(void)
+{
+ struct tf_generic_smc generic_smc;
+
+ generic_smc.reg0 = TF_SMC_N_YIELD;
+ generic_smc.reg1 = 0;
+ generic_smc.reg2 = 0;
+ generic_smc.reg3 = 0;
+ generic_smc.reg4 = 0;
+
+ tf_smc_generic_call(&generic_smc);
+}
+
+/* Yields the Secure World */
+int tf_schedule_secure_world(struct tf_comm *comm)
+{
+ tf_set_current_time(comm);
+
+ /* yield to the Secure World */
+ tf_smc_nyield();
+
+ return 0;
+}
+
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+static u32 tf_get_l2init_descriptor(u32 vaddr)
+{
+ struct page *page;
+ u32 paddr;
+ u32 descriptor;
+
+ descriptor = L2_INIT_DESCRIPTOR_BASE;
+
+ /* get physical address and add to descriptor */
+ page = virt_to_page(vaddr);
+ paddr = page_to_phys(page);
+ descriptor |= (paddr & L2_DESCRIPTOR_ADDR_MASK);
+
+ /* Add virtual address v[13:12] bits to descriptor */
+ descriptor |= (DESCRIPTOR_V13_12_GET(vaddr)
+ << L2_INIT_DESCRIPTOR_V13_12_SHIFT);
+
+ descriptor |= tf_get_l2_descriptor_common(vaddr, &init_mm);
+
+
+ return descriptor;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Free the memory used by the W3B buffer for the specified comm.
+ * This function does nothing if no W3B buffer is allocated for the device.
+ */
+static inline void tf_free_w3b(struct tf_comm *comm)
+{
+ tf_cleanup_shared_memory(
+ &(comm->w3b_cpt_alloc_context),
+ &(comm->w3b_shmem_desc),
+ 0);
+
+ tf_release_coarse_page_table_allocator(&(comm->w3b_cpt_alloc_context));
+
+ internal_vfree((void *)comm->w3b);
+ comm->w3b = 0;
+ comm->w3b_shmem_size = 0;
+ clear_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
+}
+
+
+/*
+ * Allocates the W3B buffer for the specified comm.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_allocate_w3b(struct tf_comm *comm)
+{
+ int error;
+ u32 flags;
+ u32 config_flag_s;
+ u32 *w3b_descriptors;
+ u32 w3b_descriptor_count;
+ u32 w3b_current_size;
+
+ config_flag_s = tf_read_reg32(&comm->l1_buffer->config_flag_s);
+
+retry:
+ if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags))) == 0) {
+ /*
+ * Initialize the shared memory for the W3B
+ */
+ tf_init_coarse_page_table_allocator(
+ &comm->w3b_cpt_alloc_context);
+ } else {
+ /*
+ * The W3B is allocated but do we have to reallocate a bigger
+ * one?
+ */
+ /* Check H bit */
+ if ((config_flag_s & (1<<4)) != 0) {
+ /* The size of the W3B may change after SMC_INIT */
+ /* Read the current value */
+ w3b_current_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_current_s);
+ if (comm->w3b_shmem_size > w3b_current_size)
+ return 0;
+
+ tf_free_w3b(comm);
+ goto retry;
+ } else {
+ return 0;
+ }
+ }
+
+ /* check H bit */
+ if ((config_flag_s & (1<<4)) != 0)
+ /* The size of the W3B may change after SMC_INIT */
+ /* Read the current value */
+ comm->w3b_shmem_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_current_s);
+ else
+ comm->w3b_shmem_size = tf_read_reg32(
+ &comm->l1_buffer->w3b_size_max_s);
+
+ comm->w3b = (u32) internal_vmalloc(comm->w3b_shmem_size);
+ if (comm->w3b == 0) {
+ printk(KERN_ERR "tf_allocate_w3b():"
+ " Out of memory for W3B buffer (%u bytes)!\n",
+ (unsigned int)(comm->w3b_shmem_size));
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* initialize the w3b_shmem_desc structure */
+ comm->w3b_shmem_desc.type = TF_SHMEM_TYPE_PM_HIBERNATE;
+ INIT_LIST_HEAD(&(comm->w3b_shmem_desc.list));
+
+ flags = (TF_SHMEM_TYPE_READ | TF_SHMEM_TYPE_WRITE);
+
+ /* directly point to the L1 shared buffer W3B descriptors */
+ w3b_descriptors = comm->l1_buffer->w3b_descriptors;
+
+ /*
+ * tf_fill_descriptor_table uses the following parameter as an
+ * IN/OUT
+ */
+
+ error = tf_fill_descriptor_table(
+ &(comm->w3b_cpt_alloc_context),
+ &(comm->w3b_shmem_desc),
+ comm->w3b,
+ NULL,
+ w3b_descriptors,
+ comm->w3b_shmem_size,
+ &(comm->w3b_shmem_offset),
+ false,
+ flags,
+ &w3b_descriptor_count);
+ if (error != 0) {
+ printk(KERN_ERR "tf_allocate_w3b():"
+ " tf_fill_descriptor_table failed with "
+ "error code 0x%08x!\n",
+ error);
+ goto error;
+ }
+
+ set_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
+
+ /* successful completion */
+ return 0;
+
+error:
+ tf_free_w3b(comm);
+
+ return error;
+}
+
+/*
+ * Perform a Secure World shutdown operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_shutdown(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ union tf_command command;
+ union tf_answer answer;
+
+ dprintk(KERN_INFO "tf_pm_shutdown()\n");
+
+ memset(&command, 0, sizeof(command));
+
+ command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+ command.header.message_size =
+ (sizeof(struct tf_command_management) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+
+ command.management.command = TF_MANAGEMENT_SHUTDOWN;
+
+ error = tf_send_receive(
+ comm,
+ &command,
+ &answer,
+ NULL,
+ false);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_shutdown(): "
+ "tf_send_receive failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ if (answer.header.error_code != 0)
+ dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
+ else
+ dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
+#endif
+
+ return answer.header.error_code;
+#endif
+}
+
+
+/*
+ * Perform a Secure World hibernate operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_hibernate(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ union tf_command command;
+ union tf_answer answer;
+ u32 first_command;
+ u32 first_free_command;
+
+ dprintk(KERN_INFO "tf_pm_hibernate()\n");
+
+ error = tf_allocate_w3b(comm);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_hibernate(): "
+ "tf_allocate_w3b failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+ /*
+ * As the polling thread is already hibernating, we
+ * should send the message and receive the answer ourself
+ */
+
+ /* build the "prepare to hibernate" message */
+ command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+ command.management.command = TF_MANAGEMENT_HIBERNATE;
+ /* Long Form Command */
+ command.management.shared_mem_descriptors[0] = 0;
+ command.management.shared_mem_descriptors[1] = 0;
+ command.management.w3b_size =
+ comm->w3b_shmem_size | 0x80000000;
+ command.management.w3b_start_offset =
+ comm->w3b_shmem_offset;
+ command.header.operation_id = (u32) &answer;
+
+ tf_dump_command(&command);
+
+ /* find a slot to send the message in */
+
+ /* AFY: why not use the function tf_send_receive?? We are
+ * duplicating a lot of subtle code here. And it's not going to be
+ * tested because power management is currently not supported by the
+ * secure world. */
+ for (;;) {
+ int queue_words_count, command_size;
+
+ spin_lock(&(comm->lock));
+
+ first_command = tf_read_reg32(
+ &comm->l1_buffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->l1_buffer->first_free_command);
+
+ queue_words_count = first_free_command - first_command;
+ command_size = command.header.message_size
+ + sizeof(struct tf_command_header);
+ if ((queue_words_count + command_size) <
+ TF_N_MESSAGE_QUEUE_CAPACITY) {
+ /* Command queue is not full */
+ memcpy(&comm->l1_buffer->command_queue[
+ first_free_command %
+ TF_N_MESSAGE_QUEUE_CAPACITY],
+ &command,
+ command_size * sizeof(u32));
+
+ tf_write_reg32(&comm->l1_buffer->first_free_command,
+ first_free_command + command_size);
+
+ spin_unlock(&(comm->lock));
+ break;
+ }
+
+ spin_unlock(&(comm->lock));
+ (void)tf_schedule_secure_world(comm);
+ }
+
+ /* now wait for the answer, dispatching other answers */
+ while (1) {
+ u32 first_answer;
+ u32 first_free_answer;
+
+ /* check all the answers */
+ first_free_answer = tf_read_reg32(
+ &comm->l1_buffer->first_free_answer);
+ first_answer = tf_read_reg32(
+ &comm->l1_buffer->first_answer);
+
+ if (first_answer != first_free_answer) {
+ int bFoundAnswer = 0;
+
+ do {
+ /* answer queue not empty */
+ union tf_answer tmp_answer;
+ struct tf_answer_header header;
+ /* size of the command in words of 32bit */
+ int command_size;
+
+ /* get the message_size */
+ memcpy(&header,
+ &comm->l1_buffer->answer_queue[
+ first_answer %
+ TF_S_ANSWER_QUEUE_CAPACITY],
+ sizeof(struct tf_answer_header));
+ command_size = header.message_size +
+ sizeof(struct tf_answer_header);
+
+ /*
+ * NOTE: message_size is the number of words
+ * following the first word
+ */
+ memcpy(&tmp_answer,
+ &comm->l1_buffer->answer_queue[
+ first_answer %
+ TF_S_ANSWER_QUEUE_CAPACITY],
+ command_size * sizeof(u32));
+
+ tf_dump_answer(&tmp_answer);
+
+ if (tmp_answer.header.operation_id ==
+ (u32) &answer) {
+ /*
+ * this is the answer to the "prepare to
+ * hibernate" message
+ */
+ memcpy(&answer,
+ &tmp_answer,
+ command_size * sizeof(u32));
+
+ bFoundAnswer = 1;
+ tf_write_reg32(
+ &comm->l1_buffer->first_answer,
+ first_answer + command_size);
+ break;
+ } else {
+ /*
+ * this is a standard message answer,
+ * dispatch it
+ */
+ struct tf_answer_struct
+ *answerStructure;
+
+ answerStructure =
+ (struct tf_answer_struct *)
+ tmp_answer.header.operation_id;
+
+ memcpy(answerStructure->answer,
+ &tmp_answer,
+ command_size * sizeof(u32));
+
+ answerStructure->answer_copied = true;
+ }
+
+ tf_write_reg32(
+ &comm->l1_buffer->first_answer,
+ first_answer + command_size);
+ } while (first_answer != first_free_answer);
+
+ if (bFoundAnswer)
+ break;
+ }
+
+ /*
+ * since the Secure World is at least running the "prepare to
+ * hibernate" message, its timeout must be immediate So there is
+ * no need to check its timeout and schedule() the current
+ * thread
+ */
+ (void)tf_schedule_secure_world(comm);
+ } /* while (1) */
+
+ printk(KERN_INFO "tf_driver: hibernate.\n");
+ return 0;
+#endif
+}
+
+
+/*
+ * Perform a Secure World resume operation.
+ * The routine returns once the Secure World is active again
+ * or if an error occurs during the "resume" process
+ */
+int tf_pm_resume(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+ /* this function is useless for the TEGRA product */
+ return 0;
+#else
+ int error;
+ u32 status;
+
+ dprintk(KERN_INFO "tf_pm_resume()\n");
+
+ error = tf_smc_wake_up(
+ tf_get_l2init_descriptor((u32)comm->l1_buffer),
+ comm->w3b_shmem_offset,
+ comm->w3b_shmem_size);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "tf_smc_wake_up failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ while ((status != TF_POWER_MODE_ACTIVE)
+ && (status != TF_POWER_MODE_PANIC)) {
+ tf_smc_nyield();
+
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ /*
+ * As this may last quite a while, call the kernel scheduler to
+ * hand over CPU for other operations
+ */
+ schedule();
+ }
+
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ break;
+
+ case TF_POWER_MODE_PANIC:
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "Secure World POWER_MODE_PANIC!\n");
+ return -EINVAL;
+
+ default:
+ dprintk(KERN_ERR "tf_pm_resume(): "
+ "unexpected Secure World POWER_MODE (%d)!\n", status);
+ return -EINVAL;
+ }
+
+ dprintk(KERN_INFO "tf_pm_resume() succeeded\n");
+ return 0;
+#endif
+}
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Handles the software interrupts issued by the Secure World.
+ */
+static irqreturn_t tf_soft_int_handler(int irq, void *dev_id)
+{
+ struct tf_comm *comm = (struct tf_comm *) dev_id;
+
+ if (comm->l1_buffer == NULL)
+ return IRQ_NONE;
+
+ if ((tf_read_reg32(&comm->l1_buffer->status_s) &
+ TF_STATUS_P_MASK) == 0)
+ /* interrupt not issued by the Trusted Foundations Software */
+ return IRQ_NONE;
+
+ tf_smc_reset_irq();
+
+ /* signal N_SM_EVENT */
+ wake_up(&comm->wait_queue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initializes the communication with the Secure World.
+ * The L1 shared buffer is allocated and the Secure World
+ * is yielded for the first time.
+ * returns successfuly once the communication with
+ * the Secure World is up and running
+ *
+ * Returns 0 upon success or appropriate error code
+ * upon failure
+ */
+int tf_init(struct tf_comm *comm)
+{
+ int error;
+ struct page *buffer_page;
+ u32 protocol_version;
+
+ dprintk(KERN_INFO "tf_init()\n");
+
+ spin_lock_init(&(comm->lock));
+ comm->flags = 0;
+ comm->l1_buffer = NULL;
+ init_waitqueue_head(&(comm->wait_queue));
+
+ /*
+ * Check the Secure World protocol version is the expected one.
+ */
+ tf_smc_get_protocol_version(&protocol_version);
+
+ if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
+ != TF_S_PROTOCOL_MAJOR_VERSION) {
+ printk(KERN_ERR "tf_init():"
+ " Unsupported Secure World Major Version "
+ "(0x%02X, expected 0x%02X)!\n",
+ GET_PROTOCOL_MAJOR_VERSION(protocol_version),
+ TF_S_PROTOCOL_MAJOR_VERSION);
+ error = -EIO;
+ goto error;
+ }
+
+ /*
+ * Register the software interrupt handler if required to.
+ */
+ if (comm->soft_int_irq != -1) {
+ dprintk(KERN_INFO "tf_init(): "
+ "Registering software interrupt handler (IRQ %d)\n",
+ comm->soft_int_irq);
+
+ error = request_irq(comm->soft_int_irq,
+ tf_soft_int_handler,
+ IRQF_SHARED,
+ TF_DEVICE_BASE_NAME,
+ comm);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_init(): "
+ "request_irq failed for irq %d (error %d)\n",
+ comm->soft_int_irq, error);
+ goto error;
+ }
+ set_bit(TF_COMM_FLAG_IRQ_REQUESTED, &(comm->flags));
+ }
+
+ /*
+ * Allocate and initialize the L1 shared buffer.
+ */
+ comm->l1_buffer = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (comm->l1_buffer == NULL) {
+ printk(KERN_ERR "tf_init():"
+ " get_zeroed_page failed for L1 shared buffer!\n");
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * Ensure the page storing the L1 shared buffer is mapped.
+ */
+ buffer_page = virt_to_page(comm->l1_buffer);
+ trylock_page(buffer_page);
+
+ dprintk(KERN_INFO "tf_init(): "
+ "L1 shared buffer allocated at virtual:%p, "
+ "physical:%p (page:%p)\n",
+ comm->l1_buffer,
+ (void *)virt_to_phys(comm->l1_buffer),
+ buffer_page);
+
+ set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags));
+
+ /*
+ * Init SMC
+ */
+ error = tf_smc_init(
+ tf_get_l2init_descriptor((u32)comm->l1_buffer));
+ if (error != S_SUCCESS) {
+ dprintk(KERN_ERR "tf_init(): "
+ "tf_smc_init failed (error 0x%08X)!\n",
+ error);
+ goto error;
+ }
+
+ /*
+ * check whether the interrupts are actually enabled
+ * If not, remove irq handler
+ */
+ if ((tf_read_reg32(&comm->l1_buffer->config_flag_s) &
+ TF_CONFIG_FLAG_S) == 0) {
+ if (test_and_clear_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags)) != 0) {
+ dprintk(KERN_INFO "tf_init(): "
+ "Interrupts not used, unregistering "
+ "softint (IRQ %d)\n",
+ comm->soft_int_irq);
+
+ free_irq(comm->soft_int_irq, comm);
+ }
+ } else {
+ if (test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags)) == 0) {
+ /*
+ * Interrupts are enabled in the Secure World, but not
+ * handled by driver
+ */
+ dprintk(KERN_ERR "tf_init(): "
+ "soft_interrupt argument not provided\n");
+ error = -EINVAL;
+ goto error;
+ }
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ /* yield for the first time */
+ (void)tf_schedule_secure_world(comm);
+
+ dprintk(KERN_INFO "tf_init(): Success\n");
+ return S_SUCCESS;
+
+error:
+ /*
+ * Error handling.
+ */
+ dprintk(KERN_INFO "tf_init(): Failure (error %d)\n",
+ error);
+ tf_terminate(comm);
+ return error;
+}
+
+
+/*
+ * Attempt to terminate the communication with the Secure World.
+ * The L1 shared buffer is freed.
+ * Calling this routine terminates definitaly the communication
+ * with the Secure World : there is no way to inform the Secure World of a new
+ * L1 shared buffer to be used once it has been initialized.
+ */
+void tf_terminate(struct tf_comm *comm)
+{
+ dprintk(KERN_INFO "tf_terminate()\n");
+
+ set_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags));
+
+ if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED,
+ &(comm->flags))) != 0) {
+ dprintk(KERN_INFO "tf_terminate(): "
+ "Freeing the W3B buffer...\n");
+ tf_free_w3b(comm);
+ }
+
+ if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
+ &(comm->flags))) != 0) {
+ __clear_page_locked(virt_to_page(comm->l1_buffer));
+ internal_free_page((unsigned long) comm->l1_buffer);
+ }
+
+ if ((test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+ &(comm->flags))) != 0) {
+ dprintk(KERN_INFO "tf_terminate(): "
+ "Unregistering softint (IRQ %d)\n",
+ comm->soft_int_irq);
+ free_irq(comm->soft_int_irq, comm);
+ }
+}
diff --git a/security/tf_driver/tf_conn.c b/security/tf_driver/tf_conn.c
new file mode 100644
index 000000000000..3148fec46358
--- /dev/null
+++ b/security/tf_driver/tf_conn.c
@@ -0,0 +1,1574 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include "s_version.h"
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_comm.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_crypto.h"
+#endif
+
+#ifdef CONFIG_ANDROID
+#define TF_PRIVILEGED_UID_GID 1000 /* Android system AID */
+#else
+#define TF_PRIVILEGED_UID_GID 0
+#endif
+
+/*----------------------------------------------------------------------------
+ * Management of the shared memory blocks.
+ *
+ * Shared memory blocks are the blocks registered through
+ * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
+ *----------------------------------------------------------------------------*/
+
+/**
+ * Unmaps a shared memory
+ **/
+void tf_unmap_shmem(
+ struct tf_connection *connection,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup)
+{
+ /* check shmem_desc contains a descriptor */
+ if (shmem_desc == NULL)
+ return;
+
+ dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);
+
+retry:
+ mutex_lock(&(connection->shmem_mutex));
+ if (atomic_read(&shmem_desc->ref_count) > 1) {
+ /*
+ * Shared mem still in use, wait for other operations completion
+ * before actually unmapping it.
+ */
+ dprintk(KERN_INFO "Descriptor in use\n");
+ mutex_unlock(&(connection->shmem_mutex));
+ schedule();
+ goto retry;
+ }
+
+ tf_cleanup_shared_memory(
+ &(connection->cpt_alloc_context),
+ shmem_desc,
+ full_cleanup);
+
+ list_del(&(shmem_desc->list));
+
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (full_cleanup != 0)) {
+ internal_kfree(shmem_desc);
+
+ atomic_dec(&(connection->shmem_count));
+ } else {
+ /*
+ * This is a preallocated shared memory, add to free list
+ * Since the device context is unmapped last, it is
+ * always the first element of the free list if no
+ * device context has been created
+ */
+ shmem_desc->block_identifier = 0;
+ list_add(&(shmem_desc->list), &(connection->free_shmem_list));
+ }
+
+ mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/**
+ * Find the first available slot for a new block of shared memory
+ * and map the user buffer.
+ * Update the descriptors to L1 descriptors
+ * Update the buffer_start_offset and buffer_size fields
+ * shmem_desc is updated to the mapped shared memory descriptor
+ **/
+int tf_map_shmem(
+ struct tf_connection *connection,
+ u32 buffer,
+ /* flags for read-write access rights on the memory */
+ u32 flags,
+ bool in_user_space,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 *buffer_start_offset,
+ u32 buffer_size,
+ struct tf_shmem_desc **shmem_desc,
+ u32 *descriptor_count)
+{
+ struct tf_shmem_desc *desc = NULL;
+ int error;
+
+ dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
+ connection,
+ (void *) buffer,
+ flags);
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ /*
+ * Check the list of free shared memory
+ * is not empty
+ */
+ if (list_empty(&(connection->free_shmem_list))) {
+ if (atomic_read(&(connection->shmem_count)) ==
+ TF_SHMEM_MAX_COUNT) {
+ printk(KERN_ERR "tf_map_shmem(%p):"
+ " maximum shared memories already registered\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* no descriptor available, allocate a new one */
+
+ desc = (struct tf_shmem_desc *) internal_kmalloc(
+ sizeof(*desc), GFP_KERNEL);
+ if (desc == NULL) {
+ printk(KERN_ERR "tf_map_shmem(%p):"
+ " failed to allocate descriptor\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the structure */
+ desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
+ atomic_set(&desc->ref_count, 1);
+ INIT_LIST_HEAD(&(desc->list));
+
+ atomic_inc(&(connection->shmem_count));
+ } else {
+ /* take the first free shared memory descriptor */
+ desc = list_first_entry(&(connection->free_shmem_list),
+ struct tf_shmem_desc, list);
+ list_del(&(desc->list));
+ }
+
+ /* Add the descriptor to the used list */
+ list_add(&(desc->list), &(connection->used_shmem_list));
+
+ error = tf_fill_descriptor_table(
+ &(connection->cpt_alloc_context),
+ desc,
+ buffer,
+ connection->vmas,
+ descriptors,
+ buffer_size,
+ buffer_start_offset,
+ in_user_space,
+ flags,
+ descriptor_count);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_map_shmem(%p):"
+ " tf_fill_descriptor_table failed with error "
+ "code %d!\n",
+ connection,
+ error);
+ goto error;
+ }
+ desc->client_buffer = (u8 *) buffer;
+
+ /*
+ * Successful completion.
+ */
+ *shmem_desc = desc;
+ mutex_unlock(&(connection->shmem_mutex));
+ dprintk(KERN_DEBUG "tf_map_shmem: success\n");
+ return 0;
+
+
+ /*
+ * Error handling.
+ */
+error:
+ mutex_unlock(&(connection->shmem_mutex));
+ dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
+ error);
+
+ tf_unmap_shmem(
+ connection,
+ desc,
+ 0);
+
+ return error;
+}
+
+
+
+/* This function is a copy of the find_vma() function
+in linux kernel 2.6.15 version with some fixes :
+ - memory block may end on vm_end
+ - check the full memory block is in the memory area
+ - guarantee NULL is returned if no memory area is found */
+struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
+ unsigned long addr, unsigned long size)
+{
+ struct vm_area_struct *vma = NULL;
+
+ dprintk(KERN_INFO
+ "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size);
+
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end >= (addr+size) &&
+ vma->vm_start <= addr)) {
+ struct rb_node *rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct *vma_tmp;
+
+ vma_tmp = rb_entry(rb_node,
+ struct vm_area_struct, vm_rb);
+
+ dprintk(KERN_INFO
+ "vma_tmp->vm_start=0x%lX"
+ "vma_tmp->vm_end=0x%lX\n",
+ vma_tmp->vm_start,
+ vma_tmp->vm_end);
+
+ if (vma_tmp->vm_end >= (addr+size)) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+
+ rb_node = rb_node->rb_left;
+ } else {
+ rb_node = rb_node->rb_right;
+ }
+ }
+
+ if (vma)
+ mm->mmap_cache = vma;
+ if (rb_node == NULL)
+ vma = NULL;
+ }
+ }
+ return vma;
+}
+
+int tf_validate_shmem_and_flags(
+ u32 shmem,
+ u32 shmem_size,
+ u32 flags)
+{
+ struct vm_area_struct *vma;
+ u32 chunk;
+
+ if (shmem_size == 0)
+ /* This is always valid */
+ return 0;
+
+ if ((shmem + shmem_size) < shmem)
+ /* Overflow */
+ return -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ /*
+ * When looking for a memory address, split buffer into chunks of
+ * size=PAGE_SIZE.
+ */
+ chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1));
+ if (chunk > shmem_size)
+ chunk = shmem_size;
+
+ do {
+ vma = tf_find_vma(current->mm, shmem, chunk);
+
+ if (vma == NULL) {
+ dprintk(KERN_ERR "%s: area not found\n", __func__);
+ goto error;
+ }
+
+ if (flags & TF_SHMEM_TYPE_READ)
+ if (!(vma->vm_flags & VM_READ)) {
+ dprintk(KERN_ERR "%s: no read permission\n",
+ __func__);
+ goto error;
+ }
+ if (flags & TF_SHMEM_TYPE_WRITE)
+ if (!(vma->vm_flags & VM_WRITE)) {
+ dprintk(KERN_ERR "%s: no write permission\n",
+ __func__);
+ goto error;
+ }
+
+ shmem_size -= chunk;
+ shmem += chunk;
+ chunk = (shmem_size <= PAGE_SIZE ?
+ shmem_size : PAGE_SIZE);
+ } while (shmem_size != 0);
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+
+error:
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+}
+
+
+static int tf_map_temp_shmem(struct tf_connection *connection,
+ struct tf_command_param_temp_memref *temp_memref,
+ u32 param_type,
+ struct tf_shmem_desc **shmem_desc)
+{
+ u32 flags;
+ u32 error = S_SUCCESS;
+ bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+
+ dprintk(KERN_INFO "tf_map_temp_shmem(%p, "
+ "0x%08x[size=0x%08x], offset=0x%08x)\n",
+ connection,
+ temp_memref->descriptor,
+ temp_memref->size,
+ temp_memref->offset);
+
+ switch (param_type) {
+ case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
+ flags = TF_SHMEM_TYPE_READ;
+ break;
+ case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
+ flags = TF_SHMEM_TYPE_WRITE;
+ break;
+ case TF_PARAM_TYPE_MEMREF_TEMP_INOUT:
+ flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ;
+ break;
+ default:
+ error = -EINVAL;
+ goto error;
+ }
+
+ if (temp_memref->descriptor == 0) {
+ /* NULL tmpref */
+ temp_memref->offset = 0;
+ *shmem_desc = NULL;
+ } else if ((temp_memref->descriptor != 0) &&
+ (temp_memref->size == 0)) {
+ /* Empty tmpref */
+ temp_memref->offset = temp_memref->descriptor;
+ temp_memref->descriptor = 0;
+ temp_memref->size = 0;
+ *shmem_desc = NULL;
+ } else {
+ /* Map the temp shmem block */
+
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+ u32 descriptor_count;
+
+ if (in_user_space) {
+ error = tf_validate_shmem_and_flags(
+ temp_memref->descriptor,
+ temp_memref->size,
+ flags);
+ if (error != 0)
+ goto error;
+ }
+
+ error = tf_map_shmem(
+ connection,
+ temp_memref->descriptor,
+ flags,
+ in_user_space,
+ shared_mem_descriptors,
+ &(temp_memref->offset),
+ temp_memref->size,
+ shmem_desc,
+ &descriptor_count);
+ temp_memref->descriptor = shared_mem_descriptors[0];
+ }
+
+error:
+ return error;
+}
+
+/*
+ * Clean up a list of shared memory descriptors.
+ */
+static void tf_shared_memory_cleanup_list(
+ struct tf_connection *connection,
+ struct list_head *shmem_desc_list)
+{
+ while (!list_empty(shmem_desc_list)) {
+ struct tf_shmem_desc *shmem_desc;
+
+ shmem_desc = list_first_entry(shmem_desc_list,
+ struct tf_shmem_desc, list);
+
+ tf_unmap_shmem(connection, shmem_desc, 1);
+ }
+}
+
+
+/*
+ * Clean up the shared memory information in the connection.
+ * Releases all allocated pages.
+ */
+static void tf_cleanup_shared_memories(struct tf_connection *connection)
+{
+ /* clean up the list of used and free descriptors.
+ * done outside the mutex, because tf_unmap_shmem already
+ * mutex()ed
+ */
+ tf_shared_memory_cleanup_list(connection,
+ &connection->used_shmem_list);
+ tf_shared_memory_cleanup_list(connection,
+ &connection->free_shmem_list);
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ /* Free the Vmas page */
+ if (connection->vmas) {
+ internal_free_page((unsigned long) connection->vmas);
+ connection->vmas = NULL;
+ }
+
+ tf_release_coarse_page_table_allocator(
+ &(connection->cpt_alloc_context));
+
+ mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/*
+ * Initialize the shared memory in a connection.
+ * Allocates the minimum memory to be provided
+ * for shared memory management
+ */
+int tf_init_shared_memory(struct tf_connection *connection)
+{
+ int error;
+ int i;
+ int coarse_page_index;
+
+ /*
+ * We only need to initialize special elements and attempt to allocate
+ * the minimum shared memory descriptors we want to support
+ */
+
+ mutex_init(&(connection->shmem_mutex));
+ INIT_LIST_HEAD(&(connection->free_shmem_list));
+ INIT_LIST_HEAD(&(connection->used_shmem_list));
+ atomic_set(&(connection->shmem_count), 0);
+
+ tf_init_coarse_page_table_allocator(
+ &(connection->cpt_alloc_context));
+
+
+ /*
+ * Preallocate 3 pages to increase the chances that a connection
+ * succeeds in allocating shared mem
+ */
+ for (i = 0;
+ i < 3;
+ i++) {
+ struct tf_shmem_desc *shmem_desc =
+ (struct tf_shmem_desc *) internal_kmalloc(
+ sizeof(*shmem_desc), GFP_KERNEL);
+
+ if (shmem_desc == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p):"
+ " failed to pre allocate descriptor %d\n",
+ connection,
+ i);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ for (coarse_page_index = 0;
+ coarse_page_index < TF_MAX_COARSE_PAGES;
+ coarse_page_index++) {
+ struct tf_coarse_page_table *coarse_pg_table;
+
+ coarse_pg_table = tf_alloc_coarse_page_table(
+ &(connection->cpt_alloc_context),
+ TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
+
+ if (coarse_pg_table == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p)"
+ ": descriptor %d coarse page %d - "
+ "tf_alloc_coarse_page_table() "
+ "failed\n",
+ connection,
+ i,
+ coarse_page_index);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ shmem_desc->coarse_pg_table[coarse_page_index] =
+ coarse_pg_table;
+ }
+ shmem_desc->coarse_pg_table_count = 0;
+
+ shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
+ atomic_set(&shmem_desc->ref_count, 1);
+
+ /*
+ * add this preallocated descriptor to the list of free
+ * descriptors Keep the device context specific one at the
+ * beginning of the list
+ */
+ INIT_LIST_HEAD(&(shmem_desc->list));
+ list_add_tail(&(shmem_desc->list),
+ &(connection->free_shmem_list));
+ }
+
+ /* allocate memory for the vmas structure */
+ connection->vmas =
+ (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
+ if (connection->vmas == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p):"
+ " vmas - failed to get_zeroed_page\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ tf_cleanup_shared_memories(connection);
+ return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+ struct tf_connection *connection)
+{
+ union tf_command command;
+ union tf_answer answer;
+ int error = 0;
+
+ dprintk(KERN_INFO "tf_create_device_context(%p)\n",
+ connection);
+
+ command.create_device_context.message_type =
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
+ command.create_device_context.message_size =
+ (sizeof(struct tf_command_create_device_context)
+ - sizeof(struct tf_command_header))/sizeof(u32);
+ command.create_device_context.operation_id = (u32) &answer;
+ command.create_device_context.device_context_id = (u32) connection;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ &command,
+ &answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer.create_device_context.error_code != S_SUCCESS))
+ goto error;
+
+ /*
+ * CREATE_DEVICE_CONTEXT succeeded,
+ * store device context handler and update connection status
+ */
+ connection->device_context =
+ answer.create_device_context.device_context;
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_create_device_context(%p):"
+ " device_context=0x%08x\n",
+ connection,
+ answer.create_device_context.device_context);
+ return 0;
+
+error:
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_create_device_context failed with "
+ "error %d\n", error);
+ } else {
+ /*
+ * We sent a DeviceCreateContext. The state is now
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
+ * reset if we ever want to send a DeviceCreateContext again
+ */
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ dprintk(KERN_ERR "tf_create_device_context failed with "
+ "error_code 0x%08X\n",
+ answer.create_device_context.error_code);
+ if (answer.create_device_context.error_code ==
+ S_ERROR_OUT_OF_MEMORY)
+ error = -ENOMEM;
+ else
+ error = -EFAULT;
+ }
+
+ return error;
+}
+
+/* Check that the current application belongs to the
+ * requested GID */
+static bool tf_check_gid(gid_t requested_gid)
+{
+ if (requested_gid == current_egid()) {
+ return true;
+ } else {
+ u32 size;
+ u32 i;
+ /* Look in the supplementary GIDs */
+ get_group_info(GROUP_INFO);
+ size = GROUP_INFO->ngroups;
+ for (i = 0; i < size; i++)
+ if (requested_gid == GROUP_AT(GROUP_INFO , i))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Opens a client session to the Secure World
+ */
+int tf_open_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc[4] = {NULL};
+ u32 i;
+
+ dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection);
+
+ /*
+ * Initialize the message size with no login data. This will be later
+ * adjusted the the cases below
+ */
+ command->open_client_session.message_size =
+ (sizeof(struct tf_command_open_client_session) - 20
+ - sizeof(struct tf_command_header))/4;
+
+ switch (command->open_client_session.login_type) {
+ case TF_LOGIN_PUBLIC:
+ /* Nothing to do */
+ break;
+
+ case TF_LOGIN_USER:
+ /*
+ * Send the EUID of the calling application in the login data.
+ * Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_euid();
+#ifndef CONFIG_ANDROID
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_USER_LINUX_EUID;
+#else
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_USER_ANDROID_EUID;
+#endif
+
+ /* Added one word */
+ command->open_client_session.message_size += 1;
+ break;
+
+ case TF_LOGIN_GROUP: {
+ /* Check requested GID */
+ gid_t requested_gid =
+ *(u32 *) command->open_client_session.login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_GROUP: requested GID (0x%x) does "
+ "not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+#ifndef CONFIG_ANDROID
+ command->open_client_session.login_type =
+ TF_LOGIN_GROUP_LINUX_GID;
+#else
+ command->open_client_session.login_type =
+ TF_LOGIN_GROUP_ANDROID_GID;
+#endif
+
+ command->open_client_session.message_size += 1; /* GID */
+ break;
+ }
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION: {
+ /*
+ * Compute SHA-1 hash of the application fully-qualified path
+ * name. Truncate the hash to 16 bytes and send it as login
+ * data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ NULL, 0);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION:
+ /*
+ * Send the real UID of the calling application in the login
+ * data. Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_APPLICATION_ANDROID_UID;
+
+ /* Added one word */
+ command->open_client_session.message_size += 1;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION_USER: {
+ /*
+ * Compute SHA-1 hash of the concatenation of the application
+ * fully-qualified path name and the EUID of the calling
+ * application. Truncate the hash to 16 bytes and send it as
+ * login data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ (u8 *) &(current_euid()), sizeof(current_euid()));
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
+
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION_USER:
+ /*
+ * Send the real UID and the EUID of the calling application in
+ * the login data. Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+ *(u32 *) &command->open_client_session.login_data[4] =
+ current_euid();
+
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
+
+ /* Added two words */
+ command->open_client_session.message_size += 2;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Compute SHA-1 hash of the concatenation
+ * of the application fully-qualified path name and the
+ * requested GID. Update message size
+ */
+ gid_t requested_gid;
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ requested_gid = *(u32 *) &command->open_client_session.
+ login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ &requested_gid, sizeof(u32));
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
+
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Send the real UID and the requested GID
+ * in the login data. Update message size.
+ */
+ gid_t requested_gid;
+
+ requested_gid = *(u32 *) &command->open_client_session.
+ login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+ *(u32 *) &command->open_client_session.login_data[4] =
+ requested_gid;
+
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
+
+ /* Added two words */
+ command->open_client_session.message_size += 2;
+
+ break;
+ }
+#endif
+
+ case TF_LOGIN_PRIVILEGED:
+ /* A privileged login may be performed only on behalf of the
+ kernel itself or on behalf of a process with euid=0 or
+ egid=0 or euid=system or egid=system. */
+ if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED for kernel API\n");
+ } else if ((current_euid() != TF_PRIVILEGED_UID_GID) &&
+ (current_egid() != TF_PRIVILEGED_UID_GID) &&
+ (current_euid() != 0) && (current_egid() != 0)) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ " user %d, group %d not allowed to open "
+ "session with TF_LOGIN_PRIVILEGED\n",
+ current_euid(), current_egid());
+ error = -EACCES;
+ goto error;
+ } else {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED for %u:%u\n",
+ current_euid(), current_egid());
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED;
+ break;
+
+ case TF_LOGIN_AUTHENTICATION: {
+ /*
+ * Compute SHA-1 hash of the application binary
+ * Send this hash as the login data (20 bytes)
+ */
+
+ u8 *hash;
+ hash = &(command->open_client_session.login_data[0]);
+
+ error = tf_get_current_process_hash(hash);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_get_current_process_hash\n");
+ goto error;
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
+
+ /* 20 bytes */
+ command->open_client_session.message_size += 5;
+ break;
+ }
+
+ case TF_LOGIN_PRIVILEGED_KERNEL:
+ /* A kernel login may be performed only on behalf of the
+ kernel itself. */
+ if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n");
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ } else {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ " user %d, group %d not allowed to open "
+ "session with TF_LOGIN_PRIVILEGED_KERNEL\n",
+ current_euid(), current_egid());
+ error = -EACCES;
+ goto error;
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "unknown login_type(%08X)\n",
+ command->open_client_session.login_type);
+ error = -EOPNOTSUPP;
+ goto error;
+ }
+
+ /* Map the temporary memory references */
+ for (i = 0; i < 4; i++) {
+ int param_type;
+ param_type = TF_GET_PARAM_TYPE(
+ command->open_client_session.param_types, i);
+ if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+ TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* Map temp mem ref */
+ error = tf_map_temp_shmem(connection,
+ &command->open_client_session.
+ params[i].temp_memref,
+ param_type,
+ &shmem_desc[i]);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "unable to map temporary memory block "
+ "(%08X)\n", error);
+ goto error;
+ }
+ }
+ }
+
+ /* Fill the handle of the Device Context */
+ command->open_client_session.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+error:
+ /* Unmap the temporary memory references */
+ for (i = 0; i < 4; i++)
+ if (shmem_desc[i] != NULL)
+ tf_unmap_shmem(connection, shmem_desc[i], 0);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_open_client_session returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_open_client_session returns "
+ "error_code 0x%08X\n",
+ answer->open_client_session.error_code);
+
+ return error;
+}
+
+
+/*
+ * Closes a client session from the Secure World
+ */
+int tf_close_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection);
+
+ command->close_client_session.message_size =
+ (sizeof(struct tf_command_close_client_session) -
+ sizeof(struct tf_command_header)) / 4;
+ command->close_client_session.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_close_client_session returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_close_client_session returns "
+ "error 0x%08X\n",
+ answer->close_client_session.error_code);
+
+ return error;
+}
+
+
+/*
+ * Registers a shared memory to the Secure World
+ */
+int tf_register_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc = NULL;
+ bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+ struct tf_command_register_shared_memory *msg =
+ &command->register_shared_memory;
+
+ dprintk(KERN_INFO "tf_register_shared_memory(%p) "
+ "%p[0x%08X][0x%08x]\n",
+ connection,
+ (void *)msg->shared_mem_descriptors[0],
+ msg->shared_mem_size,
+ (u32)msg->memory_flags);
+
+ if (in_user_space) {
+ error = tf_validate_shmem_and_flags(
+ msg->shared_mem_descriptors[0],
+ msg->shared_mem_size,
+ (u32)msg->memory_flags);
+ if (error != 0)
+ goto error;
+ }
+
+ /* Initialize message_size with no descriptors */
+ msg->message_size
+ = (offsetof(struct tf_command_register_shared_memory,
+ shared_mem_descriptors) -
+ sizeof(struct tf_command_header)) / 4;
+
+ /* Map the shmem block and update the message */
+ if (msg->shared_mem_size == 0) {
+ /* Empty shared mem */
+ msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
+ } else {
+ u32 descriptor_count;
+ error = tf_map_shmem(
+ connection,
+ msg->shared_mem_descriptors[0],
+ msg->memory_flags,
+ in_user_space,
+ msg->shared_mem_descriptors,
+ &(msg->shared_mem_start_offset),
+ msg->shared_mem_size,
+ &shmem_desc,
+ &descriptor_count);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_register_shared_memory: "
+ "unable to map shared memory block\n");
+ goto error;
+ }
+ msg->message_size += descriptor_count;
+ }
+
+ /*
+ * write the correct device context handle and the address of the shared
+ * memory descriptor in the message
+ */
+ msg->device_context = connection->device_context;
+ msg->block_id = (u32)shmem_desc;
+
+ /* Send the updated message */
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->register_shared_memory.error_code
+ != S_SUCCESS)) {
+ dprintk(KERN_ERR "tf_register_shared_memory: "
+ "operation failed. Unmap block\n");
+ goto error;
+ }
+
+ /* Saves the block handle returned by the secure world */
+ if (shmem_desc != NULL)
+ shmem_desc->block_identifier =
+ answer->register_shared_memory.block;
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_register_shared_memory(%p):"
+ " block_id=0x%08x block=0x%08x\n",
+ connection, msg->block_id,
+ answer->register_shared_memory.block);
+ return 0;
+
+ /* error completion */
+error:
+ tf_unmap_shmem(
+ connection,
+ shmem_desc,
+ 0);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_register_shared_memory returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_register_shared_memory returns "
+ "error_code 0x%08X\n",
+ answer->register_shared_memory.error_code);
+
+ return error;
+}
+
+
+/*
+ * Releases a shared memory from the Secure World
+ */
+int tf_release_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection);
+
+ command->release_shared_memory.message_size =
+ (sizeof(struct tf_command_release_shared_memory) -
+ sizeof(struct tf_command_header)) / 4;
+ command->release_shared_memory.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->release_shared_memory.error_code != S_SUCCESS))
+ goto error;
+
+ /* Use block_id to get back the pointer to shmem_desc */
+ tf_unmap_shmem(
+ connection,
+ (struct tf_shmem_desc *)
+ answer->release_shared_memory.block_id,
+ 0);
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_release_shared_memory(%p):"
+ " block_id=0x%08x block=0x%08x\n",
+ connection, answer->release_shared_memory.block_id,
+ command->release_shared_memory.block);
+ return 0;
+
+
+error:
+ if (error != 0)
+ dprintk(KERN_ERR "tf_release_shared_memory returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_release_shared_memory returns "
+ "nChannelStatus 0x%08X\n",
+ answer->release_shared_memory.error_code);
+
+ return error;
+
+}
+
+
+/*
+ * Invokes a client command to the Secure World
+ */
+int tf_invoke_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc[4] = {NULL};
+ int i;
+
+ dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection);
+
+ command->release_shared_memory.message_size =
+ (sizeof(struct tf_command_invoke_client_command) -
+ sizeof(struct tf_command_header)) / 4;
+
+#ifdef CONFIG_TF_ZEBRA
+ error = tf_crypto_try_shortcuted_update(connection,
+ (struct tf_command_invoke_client_command *) command,
+ (struct tf_answer_invoke_client_command *) answer);
+ if (error == 0)
+ return error;
+#endif
+
+ /* Map the tmprefs */
+ for (i = 0; i < 4; i++) {
+ int param_type = TF_GET_PARAM_TYPE(
+ command->invoke_client_command.param_types, i);
+ if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+ TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* A temporary memref: map it */
+ error = tf_map_temp_shmem(connection,
+ &command->invoke_client_command.
+ params[i].temp_memref,
+ param_type, &shmem_desc[i]);
+ if (error != 0) {
+ dprintk(KERN_ERR
+ "tf_invoke_client_command: "
+ "unable to map temporary memory "
+ "block\n (%08X)", error);
+ goto error;
+ }
+ }
+ }
+
+ command->invoke_client_command.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(&connection->dev->sm, command,
+ answer, connection, true);
+
+error:
+ /* Unmap de temp mem refs */
+ for (i = 0; i < 4; i++) {
+ if (shmem_desc[i] != NULL) {
+ dprintk(KERN_INFO "tf_invoke_client_command: "
+ "UnMatemp_memref %d\n ", i);
+ tf_unmap_shmem(connection, shmem_desc[i], 0);
+ }
+ }
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_invoke_client_command returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_invoke_client_command returns "
+ "error_code 0x%08X\n",
+ answer->invoke_client_command.error_code);
+
+ return error;
+}
+
+
+/*
+ * Cancels a client command from the Secure World
+ */
+int tf_cancel_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection);
+
+ command->cancel_client_operation.device_context =
+ connection->device_context;
+ command->cancel_client_operation.message_size =
+ (sizeof(struct tf_command_cancel_client_operation) -
+ sizeof(struct tf_command_header)) / 4;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->cancel_client_operation.error_code != S_SUCCESS))
+ goto error;
+
+
+ /* successful completion */
+ return 0;
+
+error:
+ if (error != 0)
+ dprintk(KERN_ERR "tf_cancel_client_command returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_cancel_client_command returns "
+ "nChannelStatus 0x%08X\n",
+ answer->cancel_client_operation.error_code);
+
+ return error;
+}
+
+
+
+/*
+ * Destroys a device context from the Secure World
+ */
+int tf_destroy_device_context(
+ struct tf_connection *connection)
+{
+ int error;
+ /*
+ * AFY: better use the specialized tf_command_destroy_device_context
+ * structure: this will save stack
+ */
+ union tf_command command;
+ union tf_answer answer;
+
+ dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection);
+
+ BUG_ON(connection == NULL);
+
+ command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command.header.message_size =
+ (sizeof(struct tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+
+ /*
+ * fill in the device context handler
+ * it is guarantied that the first shared memory descriptor describes
+ * the device context
+ */
+ command.destroy_device_context.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ &command,
+ &answer,
+ connection,
+ false);
+
+ if ((error != 0) ||
+ (answer.destroy_device_context.error_code != S_SUCCESS))
+ goto error;
+
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_destroy_device_context(%p)\n",
+ connection);
+ return 0;
+
+error:
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_destroy_device_context failed with "
+ "error %d\n", error);
+ } else {
+ dprintk(KERN_ERR "tf_destroy_device_context failed with "
+ "error_code 0x%08X\n",
+ answer.destroy_device_context.error_code);
+ if (answer.destroy_device_context.error_code ==
+ S_ERROR_OUT_OF_MEMORY)
+ error = -ENOMEM;
+ else
+ error = -EFAULT;
+ }
+
+ return error;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Opens a connection to the specified device.
+ *
+ * The placeholder referenced by connection is set to the address of the
+ * new connection; it is set to NULL upon failure.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_open(struct tf_device *dev,
+ struct file *file,
+ struct tf_connection **connection)
+{
+ int error;
+ struct tf_connection *conn = NULL;
+
+ dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);
+
+ /*
+ * Allocate and initialize the conn.
+ * kmalloc only allocates sizeof(*conn) virtual memory
+ */
+ conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
+ GFP_KERNEL);
+ if (conn == NULL) {
+ printk(KERN_ERR "tf_open(): "
+ "Out of memory for conn!\n");
+ error = -ENOMEM;
+ goto error;
+ }
+
+ memset(conn, 0, sizeof(*conn));
+
+ conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ conn->dev = dev;
+ spin_lock_init(&(conn->state_lock));
+ atomic_set(&(conn->pending_op_count), 0);
+ INIT_LIST_HEAD(&(conn->list));
+
+ /*
+ * Initialize the shared memory
+ */
+ error = tf_init_shared_memory(conn);
+ if (error != 0)
+ goto error;
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initialize CUS specifics
+ */
+ tf_crypto_init_cus(conn);
+#endif
+
+ /*
+ * Attach the conn to the device.
+ */
+ spin_lock(&(dev->connection_list_lock));
+ list_add(&(conn->list), &(dev->connection_list));
+ spin_unlock(&(dev->connection_list_lock));
+
+ /*
+ * Successful completion.
+ */
+
+ *connection = conn;
+
+ dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error:
+ dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
+ /* Deallocate the descriptor pages if necessary */
+ internal_kfree(conn);
+ *connection = NULL;
+ return error;
+}
+
+
+/*
+ * Closes the specified connection.
+ *
+ * Upon return, the connection has been destroyed and cannot be used anymore.
+ *
+ * This function does nothing if connection is set to NULL.
+ */
+void tf_close(struct tf_connection *connection)
+{
+ int error;
+ enum TF_CONN_STATE state;
+
+ dprintk(KERN_DEBUG "tf_close(%p)\n", connection);
+
+ if (connection == NULL)
+ return;
+
+ /*
+ * Assumption: Linux guarantees that no other operation is in progress
+ * and that no other operation will be started when close is called
+ */
+ BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);
+
+ /*
+ * Exchange a Destroy Device Context message if needed.
+ */
+ spin_lock(&(connection->state_lock));
+ state = connection->state;
+ spin_unlock(&(connection->state_lock));
+ if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
+ /*
+ * A DestroyDeviceContext operation was not performed. Do it
+ * now.
+ */
+ error = tf_destroy_device_context(connection);
+ if (error != 0)
+ /* avoid cleanup if destroy device context fails */
+ goto error;
+ }
+
+ /*
+ * Clean up the shared memory
+ */
+ tf_cleanup_shared_memories(connection);
+
+ spin_lock(&(connection->dev->connection_list_lock));
+ list_del(&(connection->list));
+ spin_unlock(&(connection->dev->connection_list_lock));
+
+ internal_kfree(connection);
+
+ return;
+
+error:
+ dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
+ connection, error);
+}
+
diff --git a/security/tf_driver/tf_conn.h b/security/tf_driver/tf_conn.h
new file mode 100644
index 000000000000..8bed16f19d5f
--- /dev/null
+++ b/security/tf_driver/tf_conn.h
@@ -0,0 +1,106 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_CONN_H__
+#define __TF_CONN_H__
+
+#include "tf_defs.h"
+
+/*
+ * Returns a pointer to the connection referenced by the
+ * specified file.
+ */
+static inline struct tf_connection *tf_conn_from_file(
+ struct file *file)
+{
+ return file->private_data;
+}
+
+int tf_validate_shmem_and_flags(u32 shmem, u32 shmem_size, u32 flags);
+
+int tf_map_shmem(
+ struct tf_connection *connection,
+ u32 buffer,
+ /* flags for read-write access rights on the memory */
+ u32 flags,
+ bool in_user_space,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 *buffer_start_offset,
+ u32 buffer_size,
+ struct tf_shmem_desc **shmem_desc,
+ u32 *descriptor_count);
+
+void tf_unmap_shmem(
+ struct tf_connection *connection,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup);
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+ struct tf_connection *connection);
+
+int tf_destroy_device_context(
+ struct tf_connection *connection);
+
+int tf_open_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_close_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_register_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_release_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_invoke_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_cancel_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+int tf_open(struct tf_device *dev,
+ struct file *file,
+ struct tf_connection **connection);
+
+void tf_close(
+ struct tf_connection *connection);
+
+
+#endif /* !defined(__TF_CONN_H__) */
diff --git a/security/tf_driver/tf_defs.h b/security/tf_driver/tf_defs.h
new file mode 100644
index 000000000000..ac209370c55d
--- /dev/null
+++ b/security/tf_driver/tf_defs.h
@@ -0,0 +1,538 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_DEFS_H__
+#define __TF_DEFS_H__
+
+#include <linux/atomic.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#include "tf_protocol.h"
+
+/*----------------------------------------------------------------------------*/
+
+#define SIZE_1KB 0x400
+
+/*
+ * Maximum number of shared memory blocks that can be reigsters in a connection
+ */
+#define TF_SHMEM_MAX_COUNT (64)
+
+/*
+ * Describes the possible types of shared memories
+ *
+ * TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are preallocated when initializing the
+ * connection
+ * TF_SHMEM_TYPE_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are not preallocated
+ * TF_SHMEM_TYPE_PM_HIBERNATE :
+ * The descriptor describes a power management shared memory.
+ */
+enum TF_SHMEM_TYPE {
+ TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
+ TF_SHMEM_TYPE_REGISTERED_SHMEM,
+ TF_SHMEM_TYPE_PM_HIBERNATE,
+};
+
+
+/*
+ * This structure contains a pointer on a coarse page table
+ */
+struct tf_coarse_page_table {
+ /*
+ * Identifies the coarse page table descriptor in
+ * free_coarse_page_tables list
+ */
+ struct list_head list;
+
+ /*
+ * The address of the coarse page table
+ */
+ u32 *descriptors;
+
+ /*
+ * The address of the array containing this coarse page table
+ */
+ struct tf_coarse_page_table_array *parent;
+};
+
+
+#define TF_PAGE_DESCRIPTOR_TYPE_NORMAL 0
+#define TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
+
+/*
+ * This structure describes an array of up to 4 coarse page tables
+ * allocated within a single 4KB page.
+ */
+struct tf_coarse_page_table_array {
+ /*
+ * identifies the element in the coarse_page_table_arrays list
+ */
+ struct list_head list;
+
+ /*
+ * Type of page descriptor
+ * can take any of TF_PAGE_DESCRIPTOR_TYPE_XXX value
+ */
+ u32 type;
+
+ struct tf_coarse_page_table coarse_page_tables[4];
+
+ /*
+ * A counter of the number of coarse pages currently used
+ * the max value should be 4 (one coarse page table is 1KB while one
+ * page is 4KB)
+ */
+ u8 ref_count;
+};
+
+
+/*
+ * This structure describes a list of coarse page table arrays
+ * with some of the coarse page tables free. It is used
+ * when the driver needs to allocate a new coarse page
+ * table.
+ */
+struct tf_coarse_page_table_allocation_context {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * The list of allocated coarse page table arrays
+ */
+ struct list_head coarse_page_table_arrays;
+
+ /*
+ * The list of free coarse page tables
+ */
+ struct list_head free_coarse_page_tables;
+};
+
+
+/*
+ * Fully describes a shared memory block
+ */
+struct tf_shmem_desc {
+ /*
+ * Identifies the shared memory descriptor in the list of free shared
+ * memory descriptors
+ */
+ struct list_head list;
+
+ /*
+ * Identifies the type of shared memory descriptor
+ */
+ enum TF_SHMEM_TYPE type;
+
+ /*
+ * The identifier of the block of shared memory, as returned by the
+ * Secure World.
+ * This identifier is block field of a REGISTER_SHARED_MEMORY answer
+ */
+ u32 block_identifier;
+
+ /* Client buffer */
+ u8 *client_buffer;
+
+ /* Up to eight coarse page table context */
+ struct tf_coarse_page_table *coarse_pg_table[TF_MAX_COARSE_PAGES];
+
+ u32 coarse_pg_table_count;
+
+ /* Reference counter */
+ atomic_t ref_count;
+};
+
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * This structure describes the communication with the Secure World
+ *
+ * Note that this driver supports only one instance of the Secure World
+ */
+struct tf_comm {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * Bit vector with the following possible flags:
+ * - TF_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
+ * the IRQ has been successfuly requested.
+ * - TF_COMM_FLAG_TERMINATING: If set, indicates that the
+ * communication with the Secure World is being terminated.
+ * Transmissions to the Secure World are not permitted
+ * - TF_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
+ * W3B buffer has been allocated.
+ *
+ * This bit vector must be accessed with the kernel's atomic bitwise
+ * operations.
+ */
+ unsigned long flags;
+
+ /*
+ * The virtual address of the L1 shared buffer.
+ */
+ struct tf_l1_shared_buffer *l1_buffer;
+
+ /*
+ * The wait queue the client threads are waiting on.
+ */
+ wait_queue_head_t wait_queue;
+
+#ifdef CONFIG_TF_TRUSTZONE
+ /*
+ * The interrupt line used by the Secure World.
+ */
+ int soft_int_irq;
+
+ /* ----- W3B ----- */
+ /* shared memory descriptor to identify the W3B */
+ struct tf_shmem_desc w3b_shmem_desc;
+
+ /* Virtual address of the kernel allocated shared memory */
+ u32 w3b;
+
+ /* offset of data in shared memory coarse pages */
+ u32 w3b_shmem_offset;
+
+ u32 w3b_shmem_size;
+
+ struct tf_coarse_page_table_allocation_context
+ w3b_cpt_alloc_context;
+#endif
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * The SE SDP can only be initialized once...
+ */
+ int se_initialized;
+
+ /*
+ * Lock to be held by a client when executing an RPC
+ */
+ struct mutex rpc_mutex;
+
+ /*
+ * Lock to protect concurrent accesses to DMA channels
+ */
+ struct mutex dma_mutex;
+#endif
+};
+
+
+#define TF_COMM_FLAG_IRQ_REQUESTED (0)
+#define TF_COMM_FLAG_PA_AVAILABLE (1)
+#define TF_COMM_FLAG_TERMINATING (2)
+#define TF_COMM_FLAG_W3B_ALLOCATED (3)
+#define TF_COMM_FLAG_L1_SHARED_ALLOCATED (4)
+
+/*----------------------------------------------------------------------------*/
+
+struct tf_device_stats {
+ atomic_t stat_pages_allocated;
+ atomic_t stat_memories_allocated;
+ atomic_t stat_pages_locked;
+};
+
+/*
+ * This structure describes the information about one device handled by the
+ * driver. Note that the driver supports only a single device. see the global
+ * variable g_tf_dev
+
+ */
+struct tf_device {
+ /*
+ * The kernel object for the device
+ */
+ struct kobject kobj;
+
+ /*
+ * The device number for the device.
+ */
+ dev_t dev_number;
+
+ /*
+ * Interfaces the char device with the kernel.
+ */
+ struct cdev cdev;
+
+#ifdef CONFIG_TF_TEEC
+ struct cdev cdev_teec;
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ struct cdev cdev_ctrl;
+
+ /*
+ * Globals for CUS
+ */
+ /* Current key handles loaded in HWAs */
+ u32 aes1_key_context;
+ u32 des_key_context;
+ bool sham1_is_public;
+
+ /* Object used to serialize HWA accesses */
+ struct semaphore aes1_sema;
+ struct semaphore des_sema;
+ struct semaphore sha_sema;
+
+ /*
+ * An aligned and correctly shaped pre-allocated buffer used for DMA
+ * transfers
+ */
+ u32 dma_buffer_length;
+ u8 *dma_buffer;
+ dma_addr_t dma_buffer_phys;
+
+ /* Workspace allocated at boot time and reserved to the Secure World */
+ u32 workspace_addr;
+ u32 workspace_size;
+
+ /*
+ * A Mutex to provide exclusive locking of the ioctl()
+ */
+ struct mutex dev_mutex;
+#endif
+
+ /*
+ * Communications with the SM.
+ */
+ struct tf_comm sm;
+
+ /*
+ * Lists the connections attached to this device. A connection is
+ * created each time a user space application "opens" a file descriptor
+ * on the driver
+ */
+ struct list_head connection_list;
+
+ /*
+ * The spin lock used to protect concurrent access to the connection
+ * list.
+ */
+ spinlock_t connection_list_lock;
+
+ struct tf_device_stats stats;
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * This type describes a connection state.
+ * This is used to determine whether a message is valid or not.
+ *
+ * Messages are only valid in a certain device state.
+ * Messages may be invalidated between the start of the ioctl call and the
+ * moment the message is sent to the Secure World.
+ *
+ * TF_CONN_STATE_NO_DEVICE_CONTEXT :
+ * The connection has no DEVICE_CONTEXT created and no
+ * CREATE_DEVICE_CONTEXT being processed by the Secure World
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
+ * The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
+ * World
+ * TF_CONN_STATE_VALID_DEVICE_CONTEXT :
+ * The connection has a DEVICE_CONTEXT created and no
+ * DESTROY_DEVICE_CONTEXT is being processed by the Secure World
+ * TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
+ * The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
+ * World
+ */
+enum TF_CONN_STATE {
+ TF_CONN_STATE_NO_DEVICE_CONTEXT = 0,
+ TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT,
+ TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
+};
+
+
+/*
+ * This type describes the status of the command.
+ *
+ * PENDING:
+ * The initial state; the command has not been sent yet.
+ * SENT:
+ * The command has been sent, we are waiting for an answer.
+ * ABORTED:
+ * The command cannot be sent because the device context is invalid.
+ * Note that this only covers the case where some other thread
+ * sent a DESTROY_DEVICE_CONTEXT command.
+ */
+enum TF_COMMAND_STATE {
+ TF_COMMAND_STATE_PENDING = 0,
+ TF_COMMAND_STATE_SENT,
+ TF_COMMAND_STATE_ABORTED
+};
+
+/*
+ * The origin of connection parameters such as login data and
+ * memory reference pointers.
+ *
+ * PROCESS: the calling process. All arguments must be validated.
+ * KERNEL: kernel code. All arguments can be trusted by this driver.
+ */
+enum TF_CONNECTION_OWNER {
+ TF_CONNECTION_OWNER_PROCESS = 0,
+ TF_CONNECTION_OWNER_KERNEL,
+};
+
+
+/*
+ * This structure describes a connection to the driver
+ * A connection is created each time an application opens a file descriptor on
+ * the driver
+ */
+struct tf_connection {
+ /*
+ * Identifies the connection in the list of the connections attached to
+ * the same device.
+ */
+ struct list_head list;
+
+ /*
+ * State of the connection.
+ */
+ enum TF_CONN_STATE state;
+
+ /*
+ * A pointer to the corresponding device structure
+ */
+ struct tf_device *dev;
+
+ /*
+ * A spinlock to use to access state
+ */
+ spinlock_t state_lock;
+
+ /*
+ * Counts the number of operations currently pending on the connection.
+ * (for debug only)
+ */
+ atomic_t pending_op_count;
+
+ /*
+ * A handle for the device context
+ */
+ u32 device_context;
+
+ /*
+ * Lists the used shared memory descriptors
+ */
+ struct list_head used_shmem_list;
+
+ /*
+ * Lists the free shared memory descriptors
+ */
+ struct list_head free_shmem_list;
+
+ /*
+ * A mutex to use to access this structure
+ */
+ struct mutex shmem_mutex;
+
+ /*
+ * Counts the number of shared memories registered.
+ */
+ atomic_t shmem_count;
+
+ /*
+ * Page to retrieve memory properties when
+ * registering shared memory through REGISTER_SHARED_MEMORY
+ * messages
+ */
+ struct vm_area_struct **vmas;
+
+ /*
+ * coarse page table allocation context
+ */
+ struct tf_coarse_page_table_allocation_context cpt_alloc_context;
+
+ /* The origin of connection parameters such as login data and
+ memory reference pointers. */
+ enum TF_CONNECTION_OWNER owner;
+
+#ifdef CONFIG_TF_ZEBRA
+ /* Lists all the Cryptoki Update Shortcuts */
+ struct list_head shortcut_list;
+
+ /* Lock to protect concurrent accesses to shortcut_list */
+ spinlock_t shortcut_list_lock;
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The operation_id field of a message points to this structure.
+ * It is used to identify the thread that triggered the message transmission
+ * Whoever reads an answer can wake up that thread using the completion event
+ */
+struct tf_answer_struct {
+ bool answer_copied;
+ union tf_answer *answer;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * The ASCII-C string representation of the base name of the devices managed by
+ * this driver.
+ */
+#define TF_DEVICE_BASE_NAME "tf_driver"
+
+
+/**
+ * The major and minor numbers of the registered character device driver.
+ * Only 1 instance of the driver is supported.
+ */
+#define TF_DEVICE_MINOR_NUMBER (0)
+
+struct tf_device *tf_get_device(void);
+
+#define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Kernel Differences
+ */
+
+#ifdef CONFIG_ANDROID
+#define GROUP_INFO get_current_groups()
+#else
+#define GROUP_INFO (current->group_info)
+#endif
+
+#endif /* !defined(__TF_DEFS_H__) */
diff --git a/security/tf_driver/tf_device.c b/security/tf_driver/tf_device.c
new file mode 100644
index 000000000000..ad44b46c2067
--- /dev/null
+++ b/security/tf_driver/tf_device.c
@@ -0,0 +1,796 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/syscore_ops.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+#include "tf_comm.h"
+#ifdef CONFIG_TF_ZEBRA
+#include <plat/cpu.h>
+#include "tf_zebra.h"
+#endif
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+#include "tf_crypto.h"
+#endif
+
+#include "s_version.h"
+
+/*----------------------------------------------------------------------------
+ * Forward Declarations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Creates and registers the device to be managed by the specified driver.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_device_register(void);
+
+
+/*
+ * Implements the device Open callback.
+ */
+static int tf_device_open(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device Release callback.
+ */
+static int tf_device_release(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device ioctl callback.
+ */
+static long tf_device_ioctl(
+ struct file *file,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param);
+
+
+/*
+ * Implements the device shutdown callback.
+ */
+static int tf_device_shutdown(void);
+
+
+/*
+ * Implements the device suspend callback.
+ */
+static int tf_device_suspend(void);
+
+
+/*
+ * Implements the device resume callback.
+ */
+static int tf_device_resume(void);
+
+
+/*---------------------------------------------------------------------------
+ * Module Parameters
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The device major number used to register a unique character device driver.
+ * Let the default value be 122
+ */
+static int device_major_number = 122;
+
+module_param(device_major_number, int, 0000);
+MODULE_PARM_DESC(device_major_number,
+ "The device major number used to register a unique character "
+ "device driver");
+
+#ifdef CONFIG_TF_TRUSTZONE
+/**
+ * The softint interrupt line used by the Secure World.
+ */
+static int soft_interrupt = -1;
+
+module_param(soft_interrupt, int, 0000);
+MODULE_PARM_DESC(soft_interrupt,
+ "The softint interrupt line used by the Secure world");
+#endif
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+unsigned tf_debug_level = UINT_MAX;
+module_param_named(debug, tf_debug_level, uint, 0644);
+#endif
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+char *tf_integrity_hmac_sha256_expected_value;
+module_param_named(hmac_sha256, tf_integrity_hmac_sha256_expected_value,
+ charp, 0444);
+
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+unsigned tf_fault_injection_mask;
+module_param_named(fault, tf_fault_injection_mask, uint, 0644);
+#endif
+
+int tf_self_test_blkcipher_align;
+module_param_named(post_align, tf_self_test_blkcipher_align, int, 0644);
+int tf_self_test_blkcipher_use_vmalloc;
+module_param_named(post_vmalloc, tf_self_test_blkcipher_use_vmalloc, int, 0644);
+#endif
+
+#ifdef CONFIG_ANDROID
+static struct class *tf_class;
+#endif
+
+/*----------------------------------------------------------------------------
+ * Global Variables
+ *----------------------------------------------------------------------------*/
+
+/*
+ * tf_driver character device definitions.
+ * read and write methods are not defined
+ * and will return an error if used by user space
+ */
+static const struct file_operations g_tf_device_file_ops = {
+ .owner = THIS_MODULE,
+ .open = tf_device_open,
+ .release = tf_device_release,
+ .unlocked_ioctl = tf_device_ioctl,
+ .llseek = no_llseek,
+};
+
+
+static struct syscore_ops g_tf_device_syscore_ops = {
+ .shutdown = tf_device_shutdown,
+ .suspend = tf_device_suspend,
+ .resume = tf_device_resume,
+};
+
+/* The single device supported by this driver */
+static struct tf_device g_tf_dev;
+
+/*----------------------------------------------------------------------------
+ * Implementations
+ *----------------------------------------------------------------------------*/
+
+struct tf_device *tf_get_device(void)
+{
+ return &g_tf_dev;
+}
+
+/*
+ * sysfs entries
+ */
+struct tf_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct tf_device *, char *);
+ ssize_t (*store)(struct tf_device *, const char *, size_t);
+};
+
+/*
+ * sysfs entry showing allocation stats
+ */
+static ssize_t info_show(struct tf_device *dev, char *buf)
+{
+ struct tf_device_stats *dev_stats = &dev->stats;
+
+ return snprintf(buf, PAGE_SIZE,
+ "stat.memories.allocated: %d\n"
+ "stat.pages.allocated: %d\n"
+ "stat.pages.locked: %d\n",
+ atomic_read(&dev_stats->stat_memories_allocated),
+ atomic_read(&dev_stats->stat_pages_allocated),
+ atomic_read(&dev_stats->stat_pages_locked));
+}
+static struct tf_sysfs_entry tf_info_entry = __ATTR_RO(info);
+
+#ifdef CONFIG_TF_ZEBRA
+/*
+ * sysfs entry showing whether secure world is up and running
+ */
+static ssize_t tf_started_show(struct tf_device *dev, char *buf)
+{
+ int tf_started = test_bit(TF_COMM_FLAG_PA_AVAILABLE,
+ &dev->sm.flags);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", tf_started ? "yes" : "no");
+}
+static struct tf_sysfs_entry tf_started_entry =
+ __ATTR_RO(tf_started);
+
+static ssize_t workspace_addr_show(struct tf_device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_addr);
+}
+static struct tf_sysfs_entry tf_workspace_addr_entry =
+ __ATTR_RO(workspace_addr);
+
+static ssize_t workspace_size_show(struct tf_device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_size);
+}
+static struct tf_sysfs_entry tf_workspace_size_entry =
+ __ATTR_RO(workspace_size);
+#endif
+
+static ssize_t tf_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *page)
+{
+ struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+ attr);
+ struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(dev, page);
+}
+
+static ssize_t tf_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+ attr);
+ struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(dev, page, length);
+}
+
+static void tf_kobj_release(struct kobject *kobj) {}
+
+static struct attribute *tf_default_attrs[] = {
+ &tf_info_entry.attr,
+#ifdef CONFIG_TF_ZEBRA
+ &tf_started_entry.attr,
+ &tf_workspace_addr_entry.attr,
+ &tf_workspace_size_entry.attr,
+#endif
+ NULL,
+};
+static const struct sysfs_ops tf_sysfs_ops = {
+ .show = tf_attr_show,
+ .store = tf_attr_store,
+};
+static struct kobj_type tf_ktype = {
+ .release = tf_kobj_release,
+ .sysfs_ops = &tf_sysfs_ops,
+ .default_attrs = tf_default_attrs
+};
+
+/*----------------------------------------------------------------------------*/
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+static char *smc_mem;
+module_param(smc_mem, charp, S_IRUGO);
+#endif
+
+/*
+ * First routine called when the kernel module is loaded
+ */
+static int __init tf_device_register(void)
+{
+ int error;
+ struct tf_device *dev = &g_tf_dev;
+
+ dprintk(KERN_INFO "tf_device_register()\n");
+
+ /*
+ * Initialize the device
+ */
+ dev->dev_number = MKDEV(device_major_number,
+ TF_DEVICE_MINOR_NUMBER);
+ cdev_init(&dev->cdev, &g_tf_device_file_ops);
+ dev->cdev.owner = THIS_MODULE;
+
+ INIT_LIST_HEAD(&dev->connection_list);
+ spin_lock_init(&dev->connection_list_lock);
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+ error = (*tf_comm_early_init)();
+ if (error)
+ goto module_early_init_failed;
+
+ error = tf_device_mshield_init(smc_mem);
+ if (error)
+ goto mshield_init_failed;
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ error = tf_crypto_hmac_module_init();
+ if (error)
+ goto hmac_init_failed;
+
+ error = tf_self_test_register_device();
+ if (error)
+ goto self_test_register_device_failed;
+#endif
+#endif
+
+ /* register the sysfs object driver stats */
+ error = kobject_init_and_add(&dev->kobj, &tf_ktype, NULL, "%s",
+ TF_DEVICE_BASE_NAME);
+ if (error) {
+ printk(KERN_ERR "tf_device_register(): "
+ "kobject_init_and_add failed (error %d)!\n", error);
+ kobject_put(&dev->kobj);
+ goto kobject_init_and_add_failed;
+ }
+
+ /*
+ * Register the system device.
+ */
+ register_syscore_ops(&g_tf_device_syscore_ops);
+
+ /*
+ * Register the char device.
+ */
+ printk(KERN_INFO "Registering char device %s (%u:%u)\n",
+ TF_DEVICE_BASE_NAME,
+ MAJOR(dev->dev_number),
+ MINOR(dev->dev_number));
+ error = register_chrdev_region(dev->dev_number, 1,
+ TF_DEVICE_BASE_NAME);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register():"
+ " register_chrdev_region failed (error %d)!\n",
+ error);
+ goto register_chrdev_region_failed;
+ }
+
+ error = cdev_add(&dev->cdev, dev->dev_number, 1);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register(): "
+ "cdev_add failed (error %d)!\n",
+ error);
+ goto cdev_add_failed;
+ }
+
+ /*
+ * Initialize the communication with the Secure World.
+ */
+#ifdef CONFIG_TF_TRUSTZONE
+ dev->sm.soft_int_irq = soft_interrupt;
+#endif
+ error = tf_init(&g_tf_dev.sm);
+ if (error != S_SUCCESS) {
+ dprintk(KERN_ERR "tf_device_register(): "
+ "tf_init failed (error %d)!\n",
+ error);
+ goto init_failed;
+ }
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ error = tf_self_test_post_init(&(dev_stats->kobj));
+ /* N.B. error > 0 indicates a POST failure, which will not
+ prevent the module from loading. */
+ if (error < 0) {
+ dprintk(KERN_ERR "tf_device_register(): "
+ "tf_self_test_post_vectors failed (error %d)!\n",
+ error);
+ goto post_failed;
+ }
+#endif
+
+#ifdef CONFIG_ANDROID
+ tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME);
+ device_create(tf_class, NULL,
+ dev->dev_number,
+ NULL, TF_DEVICE_BASE_NAME);
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initializes the /dev/tf_ctrl device node.
+ */
+ error = tf_ctrl_device_register();
+ if (error)
+ goto ctrl_failed;
+#endif
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ address_cache_property((unsigned long) &tf_device_register);
+#endif
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_device_register(): Success\n");
+ return 0;
+
+ /*
+ * Error: undo all operations in the reverse order
+ */
+#ifdef CONFIG_TF_ZEBRA
+ctrl_failed:
+#endif
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ tf_self_test_post_exit();
+post_failed:
+#endif
+init_failed:
+ cdev_del(&dev->cdev);
+cdev_add_failed:
+ unregister_chrdev_region(dev->dev_number, 1);
+register_chrdev_region_failed:
+ unregister_syscore_ops(&g_tf_device_syscore_ops);
+kobject_init_and_add_failed:
+ kobject_del(&g_tf_dev.kobj);
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ tf_self_test_unregister_device();
+self_test_register_device_failed:
+ tf_crypto_hmac_module_exit();
+hmac_init_failed:
+#endif
+ tf_device_mshield_exit();
+mshield_init_failed:
+module_early_init_failed:
+#endif
+ dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n",
+ error);
+ return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_open(struct inode *inode, struct file *file)
+{
+ int error;
+ struct tf_device *dev = &g_tf_dev;
+ struct tf_connection *connection = NULL;
+
+ dprintk(KERN_INFO "tf_device_open(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ /* Dummy lseek for non-seekable driver */
+ error = nonseekable_open(inode, file);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "nonseekable_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+#ifndef CONFIG_ANDROID
+ /*
+ * Check file flags. We only autthorize the O_RDWR access
+ */
+ if (file->f_flags != O_RDWR) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "Invalid access mode %u\n",
+ file, file->f_flags);
+ error = -EACCES;
+ goto error;
+ }
+#endif
+
+ /*
+ * Open a new connection.
+ */
+
+ error = tf_open(dev, file, &connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "tf_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+ file->private_data = connection;
+
+ /*
+ * Send the CreateDeviceContext command to the secure
+ */
+ error = tf_create_device_context(connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "tf_create_device_context failed (error %d)!\n",
+ file, error);
+ goto error1;
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_device_open(%p): Success (connection=%p)\n",
+ file, connection);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error1:
+ tf_close(connection);
+error:
+ dprintk(KERN_INFO "tf_device_open(%p): Failure (error %d)\n",
+ file, error);
+ return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_release(struct inode *inode, struct file *file)
+{
+ struct tf_connection *connection;
+
+ dprintk(KERN_INFO "tf_device_release(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ connection = tf_conn_from_file(file);
+ tf_close(connection);
+
+ dprintk(KERN_INFO "tf_device_release(%p): Success\n", file);
+ return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static long tf_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int result = S_SUCCESS;
+ struct tf_connection *connection;
+ union tf_command command;
+ struct tf_command_header header;
+ union tf_answer answer;
+ u32 command_size;
+ u32 answer_size;
+ void *user_answer;
+
+ dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ switch (ioctl_num) {
+ case IOCTL_TF_GET_VERSION:
+ /* ioctl is asking for the driver interface version */
+ result = TF_DRIVER_INTERFACE_VERSION;
+ goto exit;
+
+ case IOCTL_TF_EXCHANGE:
+ /*
+ * ioctl is asking to perform a message exchange with the Secure
+ * Module
+ */
+
+ /*
+ * Make a local copy of the data from the user application
+ * This routine checks the data is readable
+ *
+ * Get the header first.
+ */
+ if (copy_from_user(&header,
+ (struct tf_command_header *)ioctl_param,
+ sizeof(struct tf_command_header))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /* size in words of u32 */
+ command_size = header.message_size +
+ sizeof(struct tf_command_header)/sizeof(u32);
+ if (command_size > sizeof(command)/sizeof(u32)) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Buffer overflow: too many bytes to copy %d\n",
+ file, command_size);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ if (copy_from_user(&command,
+ (union tf_command *)ioctl_param,
+ command_size * sizeof(u32))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ connection = tf_conn_from_file(file);
+ BUG_ON(connection == NULL);
+
+ /*
+ * The answer memory space address is in the operation_id field
+ */
+ user_answer = (void *) command.header.operation_id;
+
+ atomic_inc(&(connection->pending_op_count));
+
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Sending message type 0x%08x\n",
+ file, command.header.message_type);
+
+ switch (command.header.message_type) {
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ result = tf_open_client_session(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ result = tf_close_client_session(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ result = tf_register_shared_memory(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ result = tf_release_shared_memory(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ result = tf_invoke_client_command(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ result = tf_cancel_client_command(connection,
+ &command, &answer);
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Incorrect message type (0x%08x)!\n",
+ connection, command.header.message_type);
+ result = -EOPNOTSUPP;
+ break;
+ }
+
+ atomic_dec(&(connection->pending_op_count));
+
+ if (result != 0) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Operation returning error code 0x%08x)!\n",
+ file, result);
+ goto exit;
+ }
+
+ /*
+ * Copy the answer back to the user space application.
+ * The driver does not check this field, only copy back to user
+ * space the data handed over by Secure World
+ */
+ answer_size = answer.header.message_size +
+ sizeof(struct tf_answer_header)/sizeof(u32);
+ if (copy_to_user(user_answer,
+ &answer, answer_size * sizeof(u32))) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Failed to copy back the full command "
+ "answer to %p\n", file, user_answer);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file);
+ break;
+
+ case IOCTL_TF_GET_DESCRIPTION: {
+ /* ioctl asking for the version information buffer */
+ struct tf_version_information_buffer *pInfoBuffer;
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ pInfoBuffer =
+ ((struct tf_version_information_buffer *) ioctl_param);
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: "
+ "driver_description=\"%64s\"\n", S_VERSION_STRING);
+
+ if (copy_to_user(pInfoBuffer->driver_description,
+ S_VERSION_STRING,
+ strlen(S_VERSION_STRING) + 1)) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Fail to copy back the driver description "
+ "to %p\n",
+ file, pInfoBuffer->driver_description);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: "
+ "secure_world_description=\"%64s\"\n",
+ tf_get_description(&g_tf_dev.sm));
+
+ if (copy_to_user(pInfoBuffer->secure_world_description,
+ tf_get_description(&g_tf_dev.sm),
+ TF_DESCRIPTION_BUFFER_LENGTH)) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Failed to copy back the secure world "
+ "description to %p\n",
+ file, pInfoBuffer->secure_world_description);
+ result = -EFAULT;
+ goto exit;
+ }
+ break;
+ }
+
+ default:
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Unknown IOCTL code 0x%08x!\n",
+ file, ioctl_num);
+ result = -EOPNOTSUPP;
+ goto exit;
+ }
+
+exit:
+ return result;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_shutdown(void)
+{
+
+ return tf_power_management(&g_tf_dev.sm,
+ TF_POWER_OPERATION_SHUTDOWN);
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_suspend(void)
+{
+ dprintk(KERN_INFO "tf_device_suspend: Enter\n");
+ return tf_power_management(&g_tf_dev.sm,
+ TF_POWER_OPERATION_HIBERNATE);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_resume(void)
+{
+ return tf_power_management(&g_tf_dev.sm,
+ TF_POWER_OPERATION_RESUME);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+module_init(tf_device_register);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/tf_driver/tf_protocol.h b/security/tf_driver/tf_protocol.h
new file mode 100644
index 000000000000..403df8ec8ef5
--- /dev/null
+++ b/security/tf_driver/tf_protocol.h
@@ -0,0 +1,690 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_PROTOCOL_H__
+#define __TF_PROTOCOL_H__
+
+/*----------------------------------------------------------------------------
+ *
+ * This header file defines the structure used in the SChannel Protocol.
+ * See your Product Reference Manual for a specification of the SChannel
+ * protocol.
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The driver interface version returned by the version ioctl
+ */
+#define TF_DRIVER_INTERFACE_VERSION 0x04000000
+
+/*
+ * Protocol version handling
+ */
+#define TF_S_PROTOCOL_MAJOR_VERSION (0x06)
+#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
+#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
+
+/*
+ * The S flag of the config_flag_s register.
+ */
+#define TF_CONFIG_FLAG_S (1 << 3)
+
+/*
+ * The TimeSlot field of the sync_serial_n register.
+ */
+#define TF_SYNC_SERIAL_TIMESLOT_N (1)
+
+/*
+ * status_s related defines.
+ */
+#define TF_STATUS_P_MASK (0X00000001)
+#define TF_STATUS_POWER_STATE_SHIFT (3)
+#define TF_STATUS_POWER_STATE_MASK (0x1F << TF_STATUS_POWER_STATE_SHIFT)
+
+/*
+ * Possible power states of the POWER_STATE field of the status_s register
+ */
+#define TF_POWER_MODE_COLD_BOOT (0)
+#define TF_POWER_MODE_WARM_BOOT (1)
+#define TF_POWER_MODE_ACTIVE (3)
+#define TF_POWER_MODE_READY_TO_SHUTDOWN (5)
+#define TF_POWER_MODE_READY_TO_HIBERNATE (7)
+#define TF_POWER_MODE_WAKEUP (8)
+#define TF_POWER_MODE_PANIC (15)
+
+/*
+ * Possible command values for MANAGEMENT commands
+ */
+#define TF_MANAGEMENT_HIBERNATE (1)
+#define TF_MANAGEMENT_SHUTDOWN (2)
+#define TF_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
+#define TF_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
+
+/*
+ * The capacity of the Normal Word message queue, in number of slots.
+ */
+#define TF_N_MESSAGE_QUEUE_CAPACITY (512)
+
+/*
+ * The capacity of the Secure World message answer queue, in number of slots.
+ */
+#define TF_S_ANSWER_QUEUE_CAPACITY (256)
+
+/*
+ * The value of the S-timeout register indicating an infinite timeout.
+ */
+#define TF_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
+#define TF_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
+
+/*
+ * The value of the S-timeout register indicating an immediate timeout.
+ */
+#define TF_S_TIMEOUT_0_IMMEDIATE (0x0)
+#define TF_S_TIMEOUT_1_IMMEDIATE (0x0)
+
+/*
+ * Identifies the get protocol version SMC.
+ */
+#define TF_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
+
+/*
+ * Identifies the init SMC.
+ */
+#define TF_SMC_INIT (0XFFFFFFFF)
+
+/*
+ * Identifies the reset irq SMC.
+ */
+#define TF_SMC_RESET_IRQ (0xFFFFFFFE)
+
+/*
+ * Identifies the SET_W3B SMC.
+ */
+#define TF_SMC_WAKE_UP (0xFFFFFFFD)
+
+/*
+ * Identifies the STOP SMC.
+ */
+#define TF_SMC_STOP (0xFFFFFFFC)
+
+/*
+ * Identifies the n-yield SMC.
+ */
+#define TF_SMC_N_YIELD (0X00000003)
+
+
+/* Possible stop commands for SMC_STOP */
+#define SCSTOP_HIBERNATE (0xFFFFFFE1)
+#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
+
+/*
+ * representation of an UUID.
+ */
+struct tf_uuid {
+ u32 time_low;
+ u16 time_mid;
+ u16 time_hi_and_version;
+ u8 clock_seq_and_node[8];
+};
+
+
+/**
+ * Command parameters.
+ */
+struct tf_command_param_value {
+ u32 a;
+ u32 b;
+};
+
+struct tf_command_param_temp_memref {
+ u32 descriptor; /* data pointer for exchange message.*/
+ u32 size;
+ u32 offset;
+};
+
+struct tf_command_param_memref {
+ u32 block;
+ u32 size;
+ u32 offset;
+};
+
+union tf_command_param {
+ struct tf_command_param_value value;
+ struct tf_command_param_temp_memref temp_memref;
+ struct tf_command_param_memref memref;
+};
+
+/**
+ * Answer parameters.
+ */
+struct tf_answer_param_value {
+ u32 a;
+ u32 b;
+};
+
+struct tf_answer_param_size {
+ u32 _ignored;
+ u32 size;
+};
+
+union tf_answer_param {
+ struct tf_answer_param_size size;
+ struct tf_answer_param_value value;
+};
+
+/*
+ * Descriptor tables capacity
+ */
+#define TF_MAX_W3B_COARSE_PAGES (2)
+/* TF_MAX_COARSE_PAGES is the number of level 1 descriptors (describing
+ * 1MB each) that can be shared with the secure world in a single registered
+ * shared memory block. It must be kept in synch with
+ * SCHANNEL6_MAX_DESCRIPTORS_PER_REGISTERED_SHARED_MEM in the SChannel
+ * protocol spec. */
+#define TF_MAX_COARSE_PAGES 128
+#define TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
+#define TF_DESCRIPTOR_TABLE_CAPACITY \
+ (1 << TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
+#define TF_DESCRIPTOR_TABLE_CAPACITY_MASK \
+ (TF_DESCRIPTOR_TABLE_CAPACITY - 1)
+/* Shared memories coarse pages can map up to 1MB */
+#define TF_MAX_COARSE_PAGE_MAPPED_SIZE \
+ (PAGE_SIZE * TF_DESCRIPTOR_TABLE_CAPACITY)
+/* Shared memories cannot exceed 8MB */
+#define TF_MAX_SHMEM_SIZE \
+ (TF_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
+
+/*
+ * Buffer size for version description fields
+ */
+#define TF_DESCRIPTION_BUFFER_LENGTH 64
+
+/*
+ * Shared memory type flags.
+ */
+#define TF_SHMEM_TYPE_READ (0x00000001)
+#define TF_SHMEM_TYPE_WRITE (0x00000002)
+
+/*
+ * Shared mem flags
+ */
+#define TF_SHARED_MEM_FLAG_INPUT 1
+#define TF_SHARED_MEM_FLAG_OUTPUT 2
+#define TF_SHARED_MEM_FLAG_INOUT 3
+
+
+/*
+ * Parameter types
+ */
+#define TF_PARAM_TYPE_NONE 0x0
+#define TF_PARAM_TYPE_VALUE_INPUT 0x1
+#define TF_PARAM_TYPE_VALUE_OUTPUT 0x2
+#define TF_PARAM_TYPE_VALUE_INOUT 0x3
+#define TF_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
+#define TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
+#define TF_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
+#define TF_PARAM_TYPE_MEMREF_INPUT 0xD
+#define TF_PARAM_TYPE_MEMREF_OUTPUT 0xE
+#define TF_PARAM_TYPE_MEMREF_INOUT 0xF
+
+#define TF_PARAM_TYPE_MEMREF_FLAG 0x4
+#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
+
+
+#define TF_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+#define TF_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
+
+/*
+ * Login types.
+ */
+#define TF_LOGIN_PUBLIC 0x00000000
+#define TF_LOGIN_USER 0x00000001
+#define TF_LOGIN_GROUP 0x00000002
+#define TF_LOGIN_APPLICATION 0x00000004
+#define TF_LOGIN_APPLICATION_USER 0x00000005
+#define TF_LOGIN_APPLICATION_GROUP 0x00000006
+#define TF_LOGIN_AUTHENTICATION 0x80000000
+#define TF_LOGIN_PRIVILEGED 0x80000002
+
+/* Login variants */
+
+#define TF_LOGIN_VARIANT(main_type, os, variant) \
+ ((main_type) | (1 << 27) | ((os) << 16) | ((variant) << 8))
+
+#define TF_LOGIN_GET_MAIN_TYPE(type) \
+ ((type) & ~TF_LOGIN_VARIANT(0, 0xFF, 0xFF))
+
+#define TF_LOGIN_OS_ANY 0x00
+#define TF_LOGIN_OS_LINUX 0x01
+#define TF_LOGIN_OS_ANDROID 0x04
+
+/* OS-independent variants */
+#define TF_LOGIN_USER_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_GROUP_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_APPLICATION_USER_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_AUTHENTICATION, TF_LOGIN_OS_ANY, 0x01)
+#define TF_LOGIN_PRIVILEGED_KERNEL \
+ TF_LOGIN_VARIANT(TF_LOGIN_PRIVILEGED, TF_LOGIN_OS_ANY, 0x01)
+
+/* Linux variants */
+#define TF_LOGIN_USER_LINUX_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_GROUP_LINUX_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+
+/* Android variants */
+#define TF_LOGIN_USER_ANDROID_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_GROUP_ANDROID_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_ANDROID_UID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANDROID, \
+ 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_ANDROID, \
+ 0x01)
+
+/*
+ * return origins
+ */
+#define TF_ORIGIN_COMMS 2
+#define TF_ORIGIN_TEE 3
+#define TF_ORIGIN_TRUSTED_APP 4
+/*
+ * The message types.
+ */
+#define TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
+#define TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
+#define TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
+#define TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
+#define TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
+#define TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
+#define TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
+#define TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
+#define TF_MESSAGE_TYPE_MANAGEMENT 0xFE
+
+
+/*
+ * The SChannel error codes.
+ */
+#define S_SUCCESS 0x00000000
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+
+struct tf_command_header {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info;
+ u32 operation_id;
+};
+
+struct tf_answer_header {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info;
+ u32 operation_id;
+ u32 error_code;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT command message.
+ */
+struct tf_command_create_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 device_context_id;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_create_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ /* an opaque Normal World identifier for the device context */
+ u32 device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT command message.
+ */
+struct tf_command_destroy_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_destroy_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 device_context_id;
+};
+
+/*
+ * OPEN_CLIENT_SESSION command message.
+ */
+struct tf_command_open_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 param_types;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 cancellation_id;
+ u64 timeout;
+ struct tf_uuid destination_uuid;
+ union tf_command_param params[4];
+ u32 login_type;
+ /*
+ * Size = 0 for public, [16] for group identification, [20] for
+ * authentication
+ */
+ u8 login_data[20];
+};
+
+/*
+ * OPEN_CLIENT_SESSION answer message.
+ */
+struct tf_answer_open_client_session {
+ u8 message_size;
+ u8 message_type;
+ u8 error_origin;
+ u8 __reserved;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 client_session;
+ union tf_answer_param answers[4];
+};
+
+/*
+ * CLOSE_CLIENT_SESSION command message.
+ */
+struct tf_command_close_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+};
+
+/*
+ * CLOSE_CLIENT_SESSION answer message.
+ */
+struct tf_answer_close_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+};
+
+
+/*
+ * REGISTER_SHARED_MEMORY command message
+ */
+struct tf_command_register_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 memory_flags;
+ u32 operation_id;
+ u32 device_context;
+ u32 block_id;
+ u32 shared_mem_size;
+ u32 shared_mem_start_offset;
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+};
+
+/*
+ * REGISTER_SHARED_MEMORY answer message.
+ */
+struct tf_answer_register_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY command message.
+ */
+struct tf_command_release_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY answer message.
+ */
+struct tf_answer_release_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 error_code;
+ u32 block_id;
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command message.
+ */
+struct tf_command_invoke_client_command {
+ u8 message_size;
+ u8 message_type;
+ u16 param_types;
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+ u64 timeout;
+ u32 cancellation_id;
+ u32 client_command_identifier;
+ union tf_command_param params[4];
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command answer.
+ */
+struct tf_answer_invoke_client_command {
+ u8 message_size;
+ u8 message_type;
+ u8 error_origin;
+ u8 __reserved;
+ u32 operation_id;
+ u32 error_code;
+ union tf_answer_param answers[4];
+};
+
+/*
+ * CANCEL_CLIENT_OPERATION command message.
+ */
+struct tf_command_cancel_client_operation {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+ u32 cancellation_id;
+};
+
+struct tf_answer_cancel_client_operation {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 error_code;
+};
+
+/*
+ * MANAGEMENT command message.
+ */
+struct tf_command_management {
+ u8 message_size;
+ u8 message_type;
+ u16 command;
+ u32 operation_id;
+ u32 w3b_size;
+ u32 w3b_start_offset;
+ u32 shared_mem_descriptors[1];
+};
+
+/*
+ * POWER_MANAGEMENT answer message.
+ * The message does not provide message specific parameters.
+ * Therefore no need to define a specific answer structure
+ */
+
+/*
+ * Structure for L2 messages
+ */
+union tf_command {
+ struct tf_command_header header;
+ struct tf_command_create_device_context create_device_context;
+ struct tf_command_destroy_device_context destroy_device_context;
+ struct tf_command_open_client_session open_client_session;
+ struct tf_command_close_client_session close_client_session;
+ struct tf_command_register_shared_memory register_shared_memory;
+ struct tf_command_release_shared_memory release_shared_memory;
+ struct tf_command_invoke_client_command invoke_client_command;
+ struct tf_command_cancel_client_operation cancel_client_operation;
+ struct tf_command_management management;
+};
+
+/*
+ * Structure for any L2 answer
+ */
+
+union tf_answer {
+ struct tf_answer_header header;
+ struct tf_answer_create_device_context create_device_context;
+ struct tf_answer_open_client_session open_client_session;
+ struct tf_answer_close_client_session close_client_session;
+ struct tf_answer_register_shared_memory register_shared_memory;
+ struct tf_answer_release_shared_memory release_shared_memory;
+ struct tf_answer_invoke_client_command invoke_client_command;
+ struct tf_answer_destroy_device_context destroy_device_context;
+ struct tf_answer_cancel_client_operation cancel_client_operation;
+};
+
+/* Structure of the Communication Buffer */
+struct tf_l1_shared_buffer {
+ #ifdef CONFIG_TF_ZEBRA
+ u32 exit_code;
+ u32 l1_shared_buffer_descr;
+ u32 backing_store_addr;
+ u32 backext_storage_addr;
+ u32 workspace_addr;
+ u32 workspace_size;
+ u32 conf_descriptor;
+ u32 conf_size;
+ u32 conf_offset;
+ u32 protocol_version;
+ u32 rpc_command;
+ u32 rpc_status;
+ u8 reserved1[16];
+ #else
+ u32 config_flag_s;
+ u32 w3b_size_max_s;
+ u32 reserved0;
+ u32 w3b_size_current_s;
+ u8 reserved1[48];
+ #endif
+ u8 version_description[TF_DESCRIPTION_BUFFER_LENGTH];
+ u32 status_s;
+ u32 reserved2;
+ u32 sync_serial_n;
+ u32 sync_serial_s;
+ u64 time_n[2];
+ u64 timeout_s[2];
+ u32 first_command;
+ u32 first_free_command;
+ u32 first_answer;
+ u32 first_free_answer;
+ u32 w3b_descriptors[128];
+ #ifdef CONFIG_TF_ZEBRA
+ u8 rpc_trace_buffer[140];
+ u8 rpc_cus_buffer[180];
+ #else
+ u8 reserved3[320];
+ #endif
+ u32 command_queue[TF_N_MESSAGE_QUEUE_CAPACITY];
+ u32 answer_queue[TF_S_ANSWER_QUEUE_CAPACITY];
+};
+
+
+/*
+ * tf_version_information_buffer structure description
+ * Description of the sVersionBuffer handed over from user space to kernel space
+ * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
+ * and handed back to user space
+ */
+struct tf_version_information_buffer {
+ u8 driver_description[65];
+ u8 secure_world_description[65];
+};
+
+
+/* The IOCTLs the driver supports */
+#include <linux/ioctl.h>
+
+#define IOCTL_TF_GET_VERSION _IO('z', 0)
+#define IOCTL_TF_EXCHANGE _IOWR('z', 1, union tf_command)
+#define IOCTL_TF_GET_DESCRIPTION _IOR('z', 2, \
+ struct tf_version_information_buffer)
+
+#endif /* !defined(__TF_PROTOCOL_H__) */
diff --git a/security/tf_driver/tf_util.c b/security/tf_driver/tf_util.c
new file mode 100644
index 000000000000..78f90bf677e0
--- /dev/null
+++ b/security/tf_driver/tf_util.c
@@ -0,0 +1,1143 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/mman.h>
+#include "tf_util.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void tf_trace_array(const char *fun, const char *msg,
+ const void *ptr, size_t len)
+{
+ char hex[511];
+ bool ell = (len > sizeof(hex)/2);
+ unsigned lim = (len > sizeof(hex)/2 ? sizeof(hex)/2 : len);
+ unsigned i;
+ for (i = 0; i < lim; i++)
+ sprintf(hex + 2 * i, "%02x", ((unsigned char *)ptr)[i]);
+ pr_info("%s: %s[%u] = %s%s\n",
+ fun, msg, len, hex, ell ? "..." : "");
+}
+
+void address_cache_property(unsigned long va)
+{
+ unsigned long pa;
+ unsigned long inner;
+ unsigned long outer;
+
+ asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa));
+
+ dprintk(KERN_INFO "VA:%x, PA:%x\n",
+ (unsigned int) va,
+ (unsigned int) pa);
+
+ if (pa & 1) {
+ dprintk(KERN_INFO "Prop Error\n");
+ return;
+ }
+
+ outer = (pa >> 2) & 3;
+ dprintk(KERN_INFO "\touter : %x", (unsigned int) outer);
+
+ switch (outer) {
+ case 3:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 2:
+ dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ inner = (pa >> 4) & 7;
+ dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner);
+
+ switch (inner) {
+ case 7:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 6:
+ dprintk(KERN_INFO "Write-Through.\n");
+ break;
+ case 5:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 3:
+ dprintk(KERN_INFO "Device.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Strongly-ordered.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ if (pa & 0x00000002)
+ dprintk(KERN_INFO "SuperSection.\n");
+ if (pa & 0x00000080)
+ dprintk(KERN_INFO "Memory is shareable.\n");
+ else
+ dprintk(KERN_INFO "Memory is non-shareable.\n");
+
+ if (pa & 0x00000200)
+ dprintk(KERN_INFO "Non-secure.\n");
+}
+
+/*
+ * Dump the L1 shared buffer.
+ */
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer)
+{
+ dprintk(KERN_INFO
+ "buffer@%p:\n"
+ #ifndef CONFIG_TF_ZEBRA
+ " config_flag_s=%08X\n"
+ #endif
+ " version_description=%64s\n"
+ " status_s=%08X\n"
+ " sync_serial_n=%08X\n"
+ " sync_serial_s=%08X\n"
+ " time_n[0]=%016llX\n"
+ " time_n[1]=%016llX\n"
+ " timeout_s[0]=%016llX\n"
+ " timeout_s[1]=%016llX\n"
+ " first_command=%08X\n"
+ " first_free_command=%08X\n"
+ " first_answer=%08X\n"
+ " first_free_answer=%08X\n\n",
+ buffer,
+ #ifndef CONFIG_TF_ZEBRA
+ buffer->config_flag_s,
+ #endif
+ buffer->version_description,
+ buffer->status_s,
+ buffer->sync_serial_n,
+ buffer->sync_serial_s,
+ buffer->time_n[0],
+ buffer->time_n[1],
+ buffer->timeout_s[0],
+ buffer->timeout_s[1],
+ buffer->first_command,
+ buffer->first_free_command,
+ buffer->first_answer,
+ buffer->first_free_answer);
+}
+
+
+/*
+ * Dump the specified SChannel message using dprintk.
+ */
+void tf_dump_command(union tf_command *command)
+{
+ u32 i;
+
+ dprintk(KERN_INFO "message@%p:\n", command);
+
+ switch (command->header.message_type) {
+ case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " device_context_id = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->create_device_context.device_context_id
+ );
+ break;
+
+ case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->destroy_device_context.device_context);
+ break;
+
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n"
+ " param_types = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " cancellation_id = 0x%08X\n"
+ " timeout = 0x%016llX\n"
+ " destination_uuid = "
+ "%08X-%04X-%04X-%02X%02X-"
+ "%02X%02X%02X%02X%02X%02X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->open_client_session.param_types,
+ command->header.operation_id,
+ command->open_client_session.device_context,
+ command->open_client_session.cancellation_id,
+ command->open_client_session.timeout,
+ command->open_client_session.destination_uuid.
+ time_low,
+ command->open_client_session.destination_uuid.
+ time_mid,
+ command->open_client_session.destination_uuid.
+ time_hi_and_version,
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[0],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[1],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[2],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[3],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[4],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[5],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[6],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[7]
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *param = (uint32_t *) &command->
+ open_client_session.params[i];
+ dprintk(KERN_INFO " params[%d] = "
+ "0x%08X:0x%08X:0x%08X\n",
+ i, param[0], param[1], param[2]);
+ }
+
+ switch (TF_LOGIN_GET_MAIN_TYPE(
+ command->open_client_session.login_type)) {
+ case TF_LOGIN_PUBLIC:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PUBLIC\n");
+ break;
+ case TF_LOGIN_USER:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_USER\n");
+ break;
+ case TF_LOGIN_GROUP:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_GROUP\n");
+ break;
+ case TF_LOGIN_APPLICATION:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION\n");
+ break;
+ case TF_LOGIN_APPLICATION_USER:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION_USER\n");
+ break;
+ case TF_LOGIN_APPLICATION_GROUP:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION_GROUP\n");
+ break;
+ case TF_LOGIN_AUTHENTICATION:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_AUTHENTICATION\n");
+ break;
+ case TF_LOGIN_PRIVILEGED:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PRIVILEGED\n");
+ break;
+ case TF_LOGIN_PRIVILEGED_KERNEL:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PRIVILEGED_KERNEL\n");
+ break;
+ default:
+ dprintk(
+ KERN_ERR " login_type = "
+ "0x%08X (Unknown login type)\n",
+ command->open_client_session.login_type);
+ break;
+ }
+
+ dprintk(
+ KERN_INFO " login_data = ");
+ for (i = 0; i < 20; i++)
+ dprintk(
+ KERN_INFO "%d",
+ command->open_client_session.
+ login_data[i]);
+ dprintk("\n");
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->close_client_session.device_context,
+ command->close_client_session.client_session
+ );
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n"
+ " memory_flags = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " block_id = 0x%08X\n"
+ " shared_mem_size = 0x%08X\n"
+ " shared_mem_start_offset = 0x%08X\n"
+ " shared_mem_descriptors[0] = 0x%08X\n"
+ " shared_mem_descriptors[1] = 0x%08X\n"
+ " shared_mem_descriptors[2] = 0x%08X\n"
+ " shared_mem_descriptors[3] = 0x%08X\n"
+ " shared_mem_descriptors[4] = 0x%08X\n"
+ " shared_mem_descriptors[5] = 0x%08X\n"
+ " shared_mem_descriptors[6] = 0x%08X\n"
+ " shared_mem_descriptors[7] = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->register_shared_memory.memory_flags,
+ command->header.operation_id,
+ command->register_shared_memory.device_context,
+ command->register_shared_memory.block_id,
+ command->register_shared_memory.shared_mem_size,
+ command->register_shared_memory.
+ shared_mem_start_offset,
+ command->register_shared_memory.
+ shared_mem_descriptors[0],
+ command->register_shared_memory.
+ shared_mem_descriptors[1],
+ command->register_shared_memory.
+ shared_mem_descriptors[2],
+ command->register_shared_memory.
+ shared_mem_descriptors[3],
+ command->register_shared_memory.
+ shared_mem_descriptors[4],
+ command->register_shared_memory.
+ shared_mem_descriptors[5],
+ command->register_shared_memory.
+ shared_mem_descriptors[6],
+ command->register_shared_memory.
+ shared_mem_descriptors[7]);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " block = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->release_shared_memory.device_context,
+ command->release_shared_memory.block);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n"
+ " param_types = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n"
+ " timeout = 0x%016llX\n"
+ " cancellation_id = 0x%08X\n"
+ " client_command_identifier = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->invoke_client_command.param_types,
+ command->header.operation_id,
+ command->invoke_client_command.device_context,
+ command->invoke_client_command.client_session,
+ command->invoke_client_command.timeout,
+ command->invoke_client_command.cancellation_id,
+ command->invoke_client_command.
+ client_command_identifier
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *param = (uint32_t *) &command->
+ open_client_session.params[i];
+ dprintk(KERN_INFO " params[%d] = "
+ "0x%08X:0x%08X:0x%08X\n", i,
+ param[0], param[1], param[2]);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->cancel_client_operation.device_context,
+ command->cancel_client_operation.client_session);
+ break;
+
+ case TF_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_MANAGEMENT\n"
+ " operation_id = 0x%08X\n"
+ " command = 0x%08X\n"
+ " w3b_size = 0x%08X\n"
+ " w3b_start_offset = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->management.command,
+ command->management.w3b_size,
+ command->management.w3b_start_offset);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " message_type = 0x%08X "
+ "(Unknown message type)\n",
+ command->header.message_type);
+ break;
+ }
+}
+
+
+/*
+ * Dump the specified SChannel answer using dprintk.
+ */
+void tf_dump_answer(union tf_answer *answer)
+{
+ u32 i;
+ dprintk(
+ KERN_INFO "answer@%p:\n",
+ answer);
+
+ switch (answer->header.message_type) {
+ case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_create_device_context\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " device_context = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->create_device_context.error_code,
+ answer->create_device_context.device_context);
+ break;
+
+ case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_DESTROY_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " device_context_id = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->destroy_device_context.error_code,
+ answer->destroy_device_context.device_context_id);
+ break;
+
+
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_open_client_session\n"
+ " error_origin = 0x%02X\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->open_client_session.error_origin,
+ answer->header.operation_id,
+ answer->open_client_session.error_code,
+ answer->open_client_session.client_session);
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
+ i,
+ answer->open_client_session.answers[i].
+ value.a,
+ answer->open_client_session.answers[i].
+ value.b);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_CLOSE_CLIENT_SESSION\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->close_client_session.error_code);
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_register_shared_memory\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " block = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->register_shared_memory.error_code,
+ answer->register_shared_memory.block);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_RELEASE_SHARED_MEMORY\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " block_id = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->release_shared_memory.error_code,
+ answer->release_shared_memory.block_id);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_invoke_client_command\n"
+ " error_origin = 0x%02X\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->invoke_client_command.error_origin,
+ answer->header.operation_id,
+ answer->invoke_client_command.error_code
+ );
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
+ i,
+ answer->invoke_client_command.answers[i].
+ value.a,
+ answer->invoke_client_command.answers[i].
+ value.b);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_ANSWER_CANCEL_CLIENT_COMMAND\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->cancel_client_operation.error_code);
+ break;
+
+ case TF_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_MANAGEMENT\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->header.error_code);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " message_type = 0x%02X "
+ "(Unknown message type)\n",
+ answer->header.message_type);
+ break;
+
+ }
+}
+
+#endif /* defined(TF_DRIVER_DEBUG_SUPPORT) */
+
+/*----------------------------------------------------------------------------
+ * SHA-1 implementation
+ * This is taken from the Linux kernel source crypto/sha1.c
+ *----------------------------------------------------------------------------*/
+
+struct sha1_ctx {
+ u64 count;
+ u32 state[5];
+ u8 buffer[64];
+};
+
+static inline u32 rol(u32 value, u32 bits)
+{
+ return ((value) << (bits)) | ((value) >> (32 - (bits)));
+}
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) block32[i]
+
+#define blk(i) (block32[i & 15] = rol( \
+ block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \
+ block32[(i + 2) & 15] ^ block32[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R1(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R2(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R3(v, w, x, y, z, i) do { \
+ z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R4(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void sha1_transform(u32 *state, const u8 *in)
+{
+ u32 a, b, c, d, e;
+ u32 block32[16];
+
+ /* convert/copy data to workspace */
+ for (a = 0; a < sizeof(block32)/sizeof(u32); a++)
+ block32[a] = ((u32) in[4 * a]) << 24 |
+ ((u32) in[4 * a + 1]) << 16 |
+ ((u32) in[4 * a + 2]) << 8 |
+ ((u32) in[4 * a + 3]);
+
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1);
+ R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3);
+ R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5);
+ R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7);
+ R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9);
+ R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11);
+ R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13);
+ R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15);
+
+ R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17);
+ R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19);
+
+ R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21);
+ R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23);
+ R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25);
+ R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27);
+ R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29);
+ R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31);
+ R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33);
+ R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35);
+ R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37);
+ R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39);
+
+ R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41);
+ R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43);
+ R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45);
+ R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47);
+ R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49);
+ R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51);
+ R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53);
+ R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55);
+ R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57);
+ R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59);
+
+ R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61);
+ R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63);
+ R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65);
+ R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67);
+ R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69);
+ R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71);
+ R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73);
+ R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75);
+ R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77);
+ R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79);
+
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+ memset(block32, 0x00, sizeof(block32));
+}
+
+
+static void sha1_init(void *ctx)
+{
+ struct sha1_ctx *sctx = ctx;
+ static const struct sha1_ctx initstate = {
+ 0,
+ { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
+ { 0, }
+ };
+
+ *sctx = initstate;
+}
+
+
+static void sha1_update(void *ctx, const u8 *data, unsigned int len)
+{
+ struct sha1_ctx *sctx = ctx;
+ unsigned int i, j;
+
+ j = (sctx->count >> 3) & 0x3f;
+ sctx->count += len << 3;
+
+ if ((j + len) > 63) {
+ memcpy(&sctx->buffer[j], data, (i = 64 - j));
+ sha1_transform(sctx->state, sctx->buffer);
+ for ( ; i + 63 < len; i += 64)
+ sha1_transform(sctx->state, &data[i]);
+ j = 0;
+ } else
+ i = 0;
+ memcpy(&sctx->buffer[j], &data[i], len - i);
+}
+
+
+/* Add padding and return the message digest. */
+static void sha1_final(void *ctx, u8 *out)
+{
+ struct sha1_ctx *sctx = ctx;
+ u32 i, j, index, padlen;
+ u64 t;
+ u8 bits[8] = { 0, };
+ static const u8 padding[64] = { 0x80, };
+
+ t = sctx->count;
+ bits[7] = 0xff & t; t >>= 8;
+ bits[6] = 0xff & t; t >>= 8;
+ bits[5] = 0xff & t; t >>= 8;
+ bits[4] = 0xff & t; t >>= 8;
+ bits[3] = 0xff & t; t >>= 8;
+ bits[2] = 0xff & t; t >>= 8;
+ bits[1] = 0xff & t; t >>= 8;
+ bits[0] = 0xff & t;
+
+ /* Pad out to 56 mod 64 */
+ index = (sctx->count >> 3) & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ sha1_update(sctx, padding, padlen);
+
+ /* Append length */
+ sha1_update(sctx, bits, sizeof(bits));
+
+ /* Store state in digest */
+ for (i = j = 0; i < 5; i++, j += 4) {
+ u32 t2 = sctx->state[i];
+ out[j+3] = t2 & 0xff; t2 >>= 8;
+ out[j+2] = t2 & 0xff; t2 >>= 8;
+ out[j+1] = t2 & 0xff; t2 >>= 8;
+ out[j] = t2 & 0xff;
+ }
+
+ /* Wipe context */
+ memset(sctx, 0, sizeof(*sctx));
+}
+
+
+
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+/* This function generates a processes hash table for authentication */
+int tf_get_current_process_hash(void *hash)
+{
+ int result = 0;
+ void *buffer;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash:"
+ " Out of memory for buffer!\n");
+ return -ENOMEM;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if ((vma->vm_flags & VM_EXECUTABLE) != 0 && vma->vm_file
+ != NULL) {
+ struct dentry *dentry;
+ unsigned long start;
+ unsigned long cur;
+ unsigned long end;
+ struct sha1_ctx sha1;
+
+ dentry = dget(vma->vm_file->f_dentry);
+
+ dprintk(
+ KERN_DEBUG "tf_get_current_process_hash: "
+ "Found executable VMA for inode %lu "
+ "(%lu bytes).\n",
+ dentry->d_inode->i_ino,
+ (unsigned long) (dentry->d_inode->
+ i_size));
+
+ start = do_mmap(vma->vm_file, 0,
+ dentry->d_inode->i_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE, 0);
+ if (start < 0) {
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash"
+ "Hash: do_mmap failed (error %d)!\n",
+ (int) start);
+ dput(dentry);
+ result = -EFAULT;
+ goto vma_out;
+ }
+
+ end = start + dentry->d_inode->i_size;
+
+ sha1_init(&sha1);
+ cur = start;
+ while (cur < end) {
+ unsigned long chunk;
+
+ chunk = end - cur;
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE;
+ if (copy_from_user(buffer, (const void *) cur,
+ chunk) != 0) {
+ dprintk(
+ KERN_ERR "tf_get_current_"
+ "process_hash: copy_from_user "
+ "failed!\n");
+ result = -EINVAL;
+ (void) do_munmap(mm, start,
+ dentry->d_inode->i_size);
+ dput(dentry);
+ goto vma_out;
+ }
+ sha1_update(&sha1, buffer, chunk);
+ cur += chunk;
+ }
+ sha1_final(&sha1, hash);
+ result = 0;
+
+ (void) do_munmap(mm, start, dentry->d_inode->i_size);
+ dput(dentry);
+ break;
+ }
+ }
+vma_out:
+ up_read(&(mm->mmap_sem));
+
+ internal_kfree(buffer);
+
+ if (result == -ENOENT)
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash: "
+ "No executable VMA found for process!\n");
+ return result;
+}
+
+#ifndef CONFIG_ANDROID
+/* This function hashes the path of the current application.
+ * If data = NULL ,nothing else is added to the hash
+ else add data to the hash
+ */
+int tf_hash_application_path_and_data(char *buffer, void *data,
+ u32 data_len)
+{
+ int result = -ENOENT;
+ char *tmp = NULL;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ tmp = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (tmp == NULL) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if ((vma->vm_flags & VM_EXECUTABLE) != 0
+ && vma->vm_file != NULL) {
+ struct path *path;
+ char *endpath;
+ size_t pathlen;
+ struct sha1_ctx sha1;
+ u8 hash[SHA1_DIGEST_SIZE];
+
+ path = &vma->vm_file->f_path;
+
+ endpath = d_path(path, tmp, PAGE_SIZE);
+ if (IS_ERR(path)) {
+ result = PTR_ERR(endpath);
+ up_read(&(mm->mmap_sem));
+ goto end;
+ }
+ pathlen = (tmp + PAGE_SIZE) - endpath;
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ {
+ char *c;
+ dprintk(KERN_DEBUG "current process path = ");
+ for (c = endpath;
+ c < tmp + PAGE_SIZE;
+ c++)
+ dprintk("%c", *c);
+
+ dprintk(", uid=%d, euid=%d\n", current_uid(),
+ current_euid());
+ }
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+ sha1_init(&sha1);
+ sha1_update(&sha1, endpath, pathlen);
+ if (data != NULL) {
+ dprintk(KERN_INFO "current process path: "
+ "Hashing additional data\n");
+ sha1_update(&sha1, data, data_len);
+ }
+ sha1_final(&sha1, hash);
+ memcpy(buffer, hash, sizeof(hash));
+
+ result = 0;
+
+ break;
+ }
+ }
+ up_read(&(mm->mmap_sem));
+
+end:
+ if (tmp != NULL)
+ internal_kfree(tmp);
+
+ return result;
+}
+#endif /* !CONFIG_ANDROID */
+
+void *internal_kmalloc(size_t size, int priority)
+{
+ void *ptr;
+ struct tf_device *dev = tf_get_device();
+
+ ptr = kmalloc(size, priority);
+
+ if (ptr != NULL)
+ atomic_inc(
+ &dev->stats.stat_memories_allocated);
+
+ return ptr;
+}
+
+void internal_kfree(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+ return kfree(ptr);
+}
+
+void internal_vunmap(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+
+ vunmap((void *) (((unsigned int)ptr) & 0xFFFFF000));
+}
+
+void *internal_vmalloc(size_t size)
+{
+ void *ptr;
+ struct tf_device *dev = tf_get_device();
+
+ ptr = vmalloc(size);
+
+ if (ptr != NULL)
+ atomic_inc(
+ &dev->stats.stat_memories_allocated);
+
+ return ptr;
+}
+
+void internal_vfree(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+ return vfree(ptr);
+}
+
+unsigned long internal_get_zeroed_page(int priority)
+{
+ unsigned long result;
+ struct tf_device *dev = tf_get_device();
+
+ result = get_zeroed_page(priority);
+
+ if (result != 0)
+ atomic_inc(&dev->stats.
+ stat_pages_allocated);
+
+ return result;
+}
+
+void internal_free_page(unsigned long addr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (addr != 0)
+ atomic_dec(
+ &dev->stats.stat_pages_allocated);
+ return free_page(addr);
+}
+
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ int result;
+ struct tf_device *dev = tf_get_device();
+
+ result = get_user_pages(
+ tsk,
+ mm,
+ start,
+ len,
+ write,
+ force,
+ pages,
+ vmas);
+
+ if (result > 0)
+ atomic_add(result,
+ &dev->stats.stat_pages_locked);
+
+ return result;
+}
+
+void internal_get_page(struct page *page)
+{
+ struct tf_device *dev = tf_get_device();
+
+ atomic_inc(&dev->stats.stat_pages_locked);
+
+ get_page(page);
+}
+
+void internal_page_cache_release(struct page *page)
+{
+ struct tf_device *dev = tf_get_device();
+
+ atomic_dec(&dev->stats.stat_pages_locked);
+
+ page_cache_release(page);
+}
+
+
diff --git a/security/tf_driver/tf_util.h b/security/tf_driver/tf_util.h
new file mode 100644
index 000000000000..14bc78952d86
--- /dev/null
+++ b/security/tf_driver/tf_util.h
@@ -0,0 +1,122 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_UTIL_H__
+#define __TF_UTIL_H__
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/crypto.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+extern unsigned tf_debug_level;
+
+void address_cache_property(unsigned long va);
+
+#define dprintk(args...) ((void)(tf_debug_level >= 6 ? printk(args) : 0))
+#define dpr_info(args...) ((void)(tf_debug_level >= 3 ? pr_info(args) : 0))
+#define dpr_err(args...) ((void)(tf_debug_level >= 1 ? pr_err(args) : 0))
+#define INFO(fmt, args...) \
+ (void)dprintk(KERN_INFO "%s: " fmt "\n", __func__, ## args)
+#define WARNING(fmt, args...) \
+ (tf_debug_level >= 3 ? \
+ printk(KERN_WARNING "%s: " fmt "\n", __func__, ## args) : \
+ (void)0)
+#define ERROR(fmt, args...) \
+ (tf_debug_level >= 1 ? \
+ printk(KERN_ERR "%s: " fmt "\n", __func__, ## args) : \
+ (void)0)
+void tf_trace_array(const char *fun, const char *msg,
+ const void *ptr, size_t len);
+#define TF_TRACE_ARRAY(ptr, len) \
+ (tf_debug_level >= 7 ? \
+ tf_trace_array(__func__, #ptr "/" #len, ptr, len) : \
+ 0)
+
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer);
+
+void tf_dump_command(union tf_command *command);
+
+void tf_dump_answer(union tf_answer *answer);
+
+#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define dprintk(args...) do { ; } while (0)
+#define dpr_info(args...) do { ; } while (0)
+#define dpr_err(args...) do { ; } while (0)
+#define INFO(fmt, args...) ((void)0)
+#define WARNING(fmt, args...) ((void)0)
+#define ERROR(fmt, args...) ((void)0)
+#define TF_TRACE_ARRAY(ptr, len) ((void)(ptr), (void)(len))
+#define tf_dump_l1_shared_buffer(buffer) ((void) 0)
+#define tf_dump_command(command) ((void) 0)
+#define tf_dump_answer(answer) ((void) 0)
+
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define SHA1_DIGEST_SIZE 20
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+int tf_get_current_process_hash(void *hash);
+
+#ifndef CONFIG_ANDROID
+int tf_hash_application_path_and_data(char *buffer, void *data, u32 data_len);
+#endif /* !CONFIG_ANDROID */
+
+/*----------------------------------------------------------------------------
+ * Statistic computation
+ *----------------------------------------------------------------------------*/
+
+void *internal_kmalloc(size_t size, int priority);
+void internal_kfree(void *ptr);
+void internal_vunmap(void *ptr);
+void *internal_vmalloc(size_t size);
+void internal_vfree(void *ptr);
+unsigned long internal_get_zeroed_page(int priority);
+void internal_free_page(unsigned long addr);
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas);
+void internal_get_page(struct page *page);
+void internal_page_cache_release(struct page *page);
+#endif /* __TF_UTIL_H__ */
+
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index bb7e102d6726..522c4fd958dc 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -84,6 +84,20 @@ config SND_HDA_PATCH_LOADER
This option turns on hwdep and reconfig features automatically.
+config SND_HDA_PLATFORM_DRIVER
+ bool "Build platform driver interface for HD-audio driver"
+ help
+ Say Y here to build a platform driver interface for HD-audio driver.
+ This interface can be used by platforms which have an Azalia
+ controller not installed on a PCI bus.
+
+config SND_HDA_PLATFORM_NVIDIA_TEGRA
+ bool "Build NVIDIA Tegra platform driver interface for HD-audio driver"
+ depends on SND_HDA_PLATFORM_DRIVER
+ help
+ Say Y here to build NVIDIA Tegra platform driver interface for
+ HD-audio driver.
+
config SND_HDA_CODEC_REALTEK
bool "Build Realtek HD-audio codec support"
default y
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 755f2b0f9d8e..8f801ae14a15 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -21,6 +21,8 @@
#ifndef __SOUND_HDA_CODEC_H
#define __SOUND_HDA_CODEC_H
+#include <linux/platform_device.h>
+
#include <sound/info.h>
#include <sound/control.h>
#include <sound/pcm.h>
@@ -617,6 +619,7 @@ struct hda_bus_ops {
struct hda_bus_template {
void *private_data;
struct pci_dev *pci;
+ struct platform_device *pdev;
const char *modelname;
int *power_save;
struct hda_bus_ops ops;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index f6659751f0c9..79af22905195 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -46,6 +46,7 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/reboot.h>
+#include <linux/clk.h>
#include <sound/core.h>
#include <sound/initval.h>
#include "hda_codec.h"
@@ -321,6 +322,32 @@ enum {
#define NVIDIA_HDA_OSTRM_COH 0x4c
#define NVIDIA_HDA_ENABLE_COHBIT 0x01
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+/* Defines for Nvidia Tegra HDA support */
+#define NVIDIA_TEGRA_HDA_BAR0_OFFSET 0x8000
+
+#define NVIDIA_TEGRA_HDA_CFG_CMD_OFFSET 0x1004
+#define NVIDIA_TEGRA_HDA_CFG_BAR0_OFFSET 0x1010
+
+#define NVIDIA_TEGRA_HDA_ENABLE_IO_SPACE (1 << 0)
+#define NVIDIA_TEGRA_HDA_ENABLE_MEM_SPACE (1 << 1)
+#define NVIDIA_TEGRA_HDA_ENABLE_BUS_MASTER (1 << 2)
+#define NVIDIA_TEGRA_HDA_ENABLE_SERR (1 << 8)
+#define NVIDIA_TEGRA_HDA_DISABLE_INTR (1 << 10)
+#define NVIDIA_TEGRA_HDA_BAR0_INIT_PROGRAM 0xFFFFFFFF
+#define NVIDIA_TEGRA_HDA_BAR0_FINAL_PROGRAM (1 << 14)
+
+/* IPFS */
+#define NVIDIA_TEGRA_HDA_IPFS_CONFIG 0x180
+#define NVIDIA_TEGRA_HDA_IPFS_EN_FPCI 0x1
+
+#define NVIDIA_TEGRA_HDA_IPFS_FPCI_BAR0 0x80
+#define NVIDIA_TEGRA_HDA_FPCI_BAR0_START 0x40
+
+#define NVIDIA_TEGRA_HDA_IPFS_INTR_MASK 0x188
+#define NVIDIA_TEGRA_HDA_IPFS_EN_INTR (1 << 16)
+#endif /* CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA */
+
/* Defines for Intel SCH HDA snoop control */
#define INTEL_SCH_HDA_DEVC 0x78
#define INTEL_SCH_HDA_DEVC_NOSNOOP (0x1<<11)
@@ -388,6 +415,9 @@ struct azx_rb {
struct azx {
struct snd_card *card;
struct pci_dev *pci;
+ struct platform_device *pdev;
+ struct device *dev;
+ int irq_id;
int dev_index;
/* chip type specific */
@@ -402,8 +432,18 @@ struct azx {
/* pci resources */
unsigned long addr;
void __iomem *remap_addr;
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+ void __iomem *remap_config_addr;
+#endif
int irq;
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ /* platform driver clocks */
+ struct clk **platform_clks;
+ int platform_clk_count;
+ int platform_clk_enable;
+#endif
+
/* locks */
spinlock_t reg_lock;
struct mutex open_mutex;
@@ -460,6 +500,7 @@ enum {
AZX_DRIVER_SIS,
AZX_DRIVER_ULI,
AZX_DRIVER_NVIDIA,
+ AZX_DRIVER_NVIDIA_TEGRA,
AZX_DRIVER_TERA,
AZX_DRIVER_CTX,
AZX_DRIVER_GENERIC,
@@ -505,6 +546,7 @@ static char *driver_short_names[] __devinitdata = {
[AZX_DRIVER_SIS] = "HDA SIS966",
[AZX_DRIVER_ULI] = "HDA ULI M5461",
[AZX_DRIVER_NVIDIA] = "HDA NVidia",
+ [AZX_DRIVER_NVIDIA_TEGRA] = "HDA NVIDIA Tegra",
[AZX_DRIVER_TERA] = "HDA Teradici",
[AZX_DRIVER_CTX] = "HDA Creative",
[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
@@ -513,6 +555,48 @@ static char *driver_short_names[] __devinitdata = {
/*
* macros for easy use
*/
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+#define MASK_LONG_ALIGN 0x3UL
+#define SHIFT_BYTE 3
+#define SHIFT_BITS(reg) ((reg & MASK_LONG_ALIGN) << SHIFT_BYTE)
+#define ADDR_ALIGN_L(base, reg) (base + (reg & ~MASK_LONG_ALIGN))
+#define MASK(bits) (BIT(bits) - 1)
+#define MASK_REG(reg, bits) (MASK(bits) << SHIFT_BITS(reg))
+
+#define tegra_write(base, reg, val, bits) \
+ writel((readl(ADDR_ALIGN_L(base, reg)) & ~MASK_REG(reg, bits)) | \
+ ((val) << SHIFT_BITS(reg)), ADDR_ALIGN_L(base, reg))
+
+#define tegra_read(base, reg, bits) \
+ ((readl(ADDR_ALIGN_L(base, reg)) >> SHIFT_BITS(reg)) & MASK(bits))
+
+#define azx_writel(chip, reg, value) \
+ writel(value, (chip)->remap_addr + ICH6_REG_##reg)
+#define azx_readl(chip, reg) \
+ readl((chip)->remap_addr + ICH6_REG_##reg)
+#define azx_writew(chip, reg, value) \
+ tegra_write((chip)->remap_addr, ICH6_REG_##reg, value, 16)
+#define azx_readw(chip, reg) \
+ tegra_read((chip)->remap_addr, ICH6_REG_##reg, 16)
+#define azx_writeb(chip, reg, value) \
+ tegra_write((chip)->remap_addr, ICH6_REG_##reg, value, 8)
+#define azx_readb(chip, reg) \
+ tegra_read((chip)->remap_addr, ICH6_REG_##reg, 8)
+
+#define azx_sd_writel(dev, reg, value) \
+ writel(value, (dev)->sd_addr + ICH6_REG_##reg)
+#define azx_sd_readl(dev, reg) \
+ readl((dev)->sd_addr + ICH6_REG_##reg)
+#define azx_sd_writew(dev, reg, value) \
+ tegra_write((dev)->sd_addr, ICH6_REG_##reg, value, 16)
+#define azx_sd_readw(dev, reg) \
+ tegra_read((dev)->sd_addr, ICH6_REG_##reg, 16)
+#define azx_sd_writeb(dev, reg, value) \
+ tegra_write((dev)->sd_addr, ICH6_REG_##reg, value, 8)
+#define azx_sd_readb(dev, reg) \
+ tegra_read((dev)->sd_addr, ICH6_REG_##reg, 8)
+
+#else /* CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA */
#define azx_writel(chip,reg,value) \
writel(value, (chip)->remap_addr + ICH6_REG_##reg)
#define azx_readl(chip,reg) \
@@ -539,6 +623,8 @@ static char *driver_short_names[] __devinitdata = {
#define azx_sd_readb(dev,reg) \
readb((dev)->sd_addr + ICH6_REG_##reg)
+#endif /* CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA */
+
/* for pcm support */
#define get_azx_dev(substream) (substream->runtime->private_data)
@@ -557,7 +643,7 @@ static int azx_alloc_cmd_io(struct azx *chip)
/* single page (at least 4096 bytes) must suffice for both ringbuffes */
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ chip->dev,
PAGE_SIZE, &chip->rb);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate CORB/RIRB\n");
@@ -1131,6 +1217,83 @@ static void azx_init_pci(struct azx *chip)
}
}
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+/*
+ * initialize the platform specific registers
+ */
+static void reg_update_bits(void __iomem *base, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ unsigned int data;
+
+ data = readl(base + reg);
+ data &= ~mask;
+ data |= (val & mask);
+ writel(data, base + reg);
+}
+
+static void azx_init_platform(struct azx *chip)
+{
+ switch (chip->driver_type) {
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+ case AZX_DRIVER_NVIDIA_TEGRA:
+ /*Enable the PCI access */
+ reg_update_bits(chip->remap_config_addr,
+ NVIDIA_TEGRA_HDA_IPFS_CONFIG,
+ NVIDIA_TEGRA_HDA_IPFS_EN_FPCI,
+ NVIDIA_TEGRA_HDA_IPFS_EN_FPCI);
+ /* Enable MEM/IO space and bus master */
+ reg_update_bits(chip->remap_config_addr,
+ NVIDIA_TEGRA_HDA_CFG_CMD_OFFSET, 0x507,
+ NVIDIA_TEGRA_HDA_ENABLE_MEM_SPACE |
+ NVIDIA_TEGRA_HDA_ENABLE_IO_SPACE |
+ NVIDIA_TEGRA_HDA_ENABLE_BUS_MASTER |
+ NVIDIA_TEGRA_HDA_ENABLE_SERR);
+ reg_update_bits(chip->remap_config_addr,
+ NVIDIA_TEGRA_HDA_CFG_BAR0_OFFSET, 0xFFFFFFFF,
+ NVIDIA_TEGRA_HDA_BAR0_INIT_PROGRAM);
+ reg_update_bits(chip->remap_config_addr,
+ NVIDIA_TEGRA_HDA_CFG_BAR0_OFFSET, 0xFFFFFFFF,
+ NVIDIA_TEGRA_HDA_BAR0_FINAL_PROGRAM);
+ reg_update_bits(chip->remap_config_addr,
+ NVIDIA_TEGRA_HDA_IPFS_FPCI_BAR0, 0xFFFFFFFF,
+ NVIDIA_TEGRA_HDA_FPCI_BAR0_START);
+ reg_update_bits(chip->remap_config_addr,
+ NVIDIA_TEGRA_HDA_IPFS_INTR_MASK,
+ NVIDIA_TEGRA_HDA_IPFS_EN_INTR,
+ NVIDIA_TEGRA_HDA_IPFS_EN_INTR);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ return;
+}
+
+static void azx_platform_enable_clocks(struct azx *chip)
+{
+ int i;
+
+ for (i = 0; i < chip->platform_clk_count; i++)
+ clk_enable(chip->platform_clks[i]);
+
+ chip->platform_clk_enable++;
+}
+
+static void azx_platform_disable_clocks(struct azx *chip)
+{
+ int i;
+
+ if (!chip->platform_clk_enable)
+ return;
+
+ for (i = 0; i < chip->platform_clk_count; i++)
+ clk_disable(chip->platform_clks[i]);
+
+ chip->platform_clk_enable--;
+}
+#endif /* CONFIG_SND_HDA_PLATFORM_DRIVER */
static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev);
@@ -1437,6 +1600,7 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
bus_temp.private_data = chip;
bus_temp.modelname = model;
bus_temp.pci = chip->pci;
+ bus_temp.pdev = chip->pdev;
bus_temp.ops.command = azx_send_cmd;
bus_temp.ops.get_response = azx_get_response;
bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
@@ -2126,7 +2290,7 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
if (size > MAX_PREALLOC_SIZE)
size = MAX_PREALLOC_SIZE;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ chip->dev,
size, MAX_PREALLOC_SIZE);
return 0;
}
@@ -2168,17 +2332,19 @@ static int __devinit azx_init_stream(struct azx *chip)
static int azx_acquire_irq(struct azx *chip, int do_disconnect)
{
- if (request_irq(chip->pci->irq, azx_interrupt,
+ if (request_irq(chip->irq_id, azx_interrupt,
chip->msi ? 0 : IRQF_SHARED,
KBUILD_MODNAME, chip)) {
printk(KERN_ERR "hda-intel: unable to grab IRQ %d, "
- "disabling device\n", chip->pci->irq);
+ "disabling device\n", chip->irq_id);
if (do_disconnect)
snd_card_disconnect(chip->card);
return -1;
}
- chip->irq = chip->pci->irq;
- pci_intx(chip->pci, !chip->msi);
+ chip->irq = chip->irq_id;
+ if (chip->pci)
+ pci_intx(chip->pci, !chip->msi);
+
return 0;
}
@@ -2216,11 +2382,19 @@ static void azx_power_notify(struct hda_bus *bus)
break;
}
}
- if (power_on)
+ if (power_on) {
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ azx_platform_enable_clocks(chip);
+#endif
azx_init_chip(chip, 1);
+ }
else if (chip->running && power_save_controller &&
- !bus->power_keep_link_on)
+ !bus->power_keep_link_on) {
azx_stop_chip(chip);
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ azx_platform_disable_clocks(chip);
+#endif
+ }
}
#endif /* CONFIG_SND_HDA_POWER_SAVE */
@@ -2240,12 +2414,17 @@ static int snd_hda_codecs_inuse(struct hda_bus *bus)
return 0;
}
-static int azx_suspend(struct pci_dev *pci, pm_message_t state)
+static int azx_suspend(struct azx *chip, pm_message_t state)
{
- struct snd_card *card = pci_get_drvdata(pci);
- struct azx *chip = card->private_data;
+ struct snd_card *card = chip->card;
int i;
+#if defined(CONFIG_SND_HDA_PLATFORM_DRIVER) && \
+ defined(CONFIG_SND_HDA_POWER_SAVE)
+ if (chip->pdev)
+ azx_platform_enable_clocks(chip);
+#endif
+
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
azx_clear_irq_pending(chip);
for (i = 0; i < HDA_MAX_PCMS; i++)
@@ -2257,42 +2436,123 @@ static int azx_suspend(struct pci_dev *pci, pm_message_t state)
free_irq(chip->irq, chip);
chip->irq = -1;
}
- if (chip->msi)
- pci_disable_msi(chip->pci);
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+
+ if (chip->pci) {
+ if (chip->msi)
+ pci_disable_msi(chip->pci);
+ pci_disable_device(chip->pci);
+ pci_save_state(chip->pci);
+ pci_set_power_state(chip->pci,
+ pci_choose_state(chip->pci, state));
+ }
+
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ if (chip->pdev) {
+ /* Disable all clk references */
+ while (chip->platform_clk_enable)
+ azx_platform_disable_clocks(chip);
+ }
+#endif
+
return 0;
}
-static int azx_resume(struct pci_dev *pci)
+static int azx_resume(struct azx *chip)
{
- struct snd_card *card = pci_get_drvdata(pci);
- struct azx *chip = card->private_data;
+ struct snd_card *card = chip->card;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- printk(KERN_ERR "hda-intel: pci_enable_device failed, "
- "disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ if (chip->pdev)
+ azx_platform_enable_clocks(chip);
+#endif
+
+ if (chip->pci) {
+ pci_set_power_state(chip->pci, PCI_D0);
+ pci_restore_state(chip->pci);
+ if (pci_enable_device(chip->pci) < 0) {
+ printk(KERN_ERR "hda-intel: pci_enable_device failed, "
+ "disabling device\n");
+ snd_card_disconnect(card);
+ return -EIO;
+ }
+ pci_set_master(chip->pci);
+ if (chip->msi)
+ if (pci_enable_msi(chip->pci) < 0)
+ chip->msi = 0;
}
- pci_set_master(pci);
- if (chip->msi)
- if (pci_enable_msi(pci) < 0)
- chip->msi = 0;
+
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
- azx_init_pci(chip);
+
+ if (chip->pci)
+ azx_init_pci(chip);
+
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ if (chip->pdev)
+ azx_init_platform(chip);
+#endif
if (snd_hda_codecs_inuse(chip->bus))
azx_init_chip(chip, 1);
+#if defined(CONFIG_SND_HDA_PLATFORM_DRIVER) && \
+ defined(CONFIG_SND_HDA_POWER_SAVE)
+ else if (chip->driver_type == AZX_DRIVER_NVIDIA_TEGRA) {
+ struct hda_bus *bus = chip->bus;
+ struct hda_codec *c;
+
+ list_for_each_entry(c, &bus->codec_list, list) {
+ snd_hda_power_up(c);
+ snd_hda_power_down(c);
+ }
+ }
+#endif
snd_hda_resume(chip->bus);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+
+#if defined(CONFIG_SND_HDA_PLATFORM_DRIVER) && \
+ defined(CONFIG_SND_HDA_POWER_SAVE)
+ if (chip->pdev)
+ azx_platform_disable_clocks(chip);
+#endif
+
return 0;
}
+
+static int azx_suspend_pci(struct pci_dev *pci, pm_message_t state)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+
+ return azx_suspend(chip, state);
+}
+
+static int azx_resume_pci(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+
+ return azx_resume(chip);
+}
+
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+static int azx_suspend_platform(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct snd_card *card = dev_get_drvdata(&pdev->dev);
+ struct azx *chip = card->private_data;
+
+ return azx_suspend(chip, state);
+}
+
+static int azx_resume_platform(struct platform_device *pdev)
+{
+ struct snd_card *card = dev_get_drvdata(&pdev->dev);
+ struct azx *chip = card->private_data;
+
+ return azx_resume(chip);
+}
+#endif /* CONFIG_SND_HDA_PLATFORM_DRIVER */
#endif /* CONFIG_PM */
@@ -2302,8 +2562,22 @@ static int azx_resume(struct pci_dev *pci)
static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
{
struct azx *chip = container_of(nb, struct azx, reboot_notifier);
+
+#if defined(CONFIG_SND_HDA_PLATFORM_DRIVER) && \
+ defined(CONFIG_SND_HDA_POWER_SAVE)
+ if (chip->pdev)
+ azx_platform_enable_clocks(chip);
+#endif
+
snd_hda_bus_reboot_notify(chip->bus);
azx_stop_chip(chip);
+
+#if defined(CONFIG_SND_HDA_PLATFORM_DRIVER) && \
+ defined(CONFIG_SND_HDA_POWER_SAVE)
+ if (chip->pdev)
+ azx_platform_disable_clocks(chip);
+#endif
+
return NOTIFY_OK;
}
@@ -2335,9 +2609,15 @@ static int azx_free(struct azx *chip)
azx_stop_chip(chip);
}
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ azx_platform_disable_clocks(chip);
+ for (i = 0; i < chip->platform_clk_count; i++)
+ clk_put(chip->platform_clks[i]);
+#endif
+
if (chip->irq >= 0)
free_irq(chip->irq, (void*)chip);
- if (chip->msi)
+ if (chip->pci && chip->msi)
pci_disable_msi(chip->pci);
if (chip->remap_addr)
iounmap(chip->remap_addr);
@@ -2351,8 +2631,10 @@ static int azx_free(struct azx *chip)
snd_dma_free_pages(&chip->rb);
if (chip->posbuf.area)
snd_dma_free_pages(&chip->posbuf);
- pci_release_regions(chip->pci);
- pci_disable_device(chip->pci);
+ if (chip->pci) {
+ pci_release_regions(chip->pci);
+ pci_disable_device(chip->pci);
+ }
kfree(chip->azx_dev);
kfree(chip);
@@ -2397,13 +2679,15 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
return fix;
}
- q = snd_pci_quirk_lookup(chip->pci, position_fix_list);
- if (q) {
- printk(KERN_INFO
- "hda_intel: position_fix set to %d "
- "for device %04x:%04x\n",
- q->value, q->subvendor, q->subdevice);
- return q->value;
+ if (chip->pci) {
+ q = snd_pci_quirk_lookup(chip->pci, position_fix_list);
+ if (q) {
+ printk(KERN_INFO
+ "hda_intel: position_fix set to %d "
+ "for device %04x:%04x\n",
+ q->value, q->subvendor, q->subdevice);
+ return q->value;
+ }
}
/* Check VIA/ATI HD Audio Controller exist */
@@ -2445,7 +2729,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
const struct snd_pci_quirk *q;
chip->codec_probe_mask = probe_mask[dev];
- if (chip->codec_probe_mask == -1) {
+ if (chip->pci && (chip->codec_probe_mask == -1)) {
q = snd_pci_quirk_lookup(chip->pci, probe_mask_list);
if (q) {
printk(KERN_INFO
@@ -2481,6 +2765,12 @@ static void __devinit check_msi(struct azx *chip)
{
const struct snd_pci_quirk *q;
+ /* Disable MSI if chip is not a pci device */
+ if (!chip->pci) {
+ chip->msi = 0;
+ return;
+ }
+
if (enable_msi >= 0) {
chip->msi = !!enable_msi;
return;
@@ -2502,11 +2792,20 @@ static void __devinit check_msi(struct azx *chip)
}
}
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+static const char *tegra_clk_names[] __initdata = {
+ "hda",
+ "hda2codec",
+ "hda2hdmi",
+};
+static struct clk *tegra_clks[ARRAY_SIZE(tegra_clk_names)];
+#endif
/*
* constructor
*/
static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
+ struct platform_device *pdev,
int dev, unsigned int driver_caps,
struct azx **rchip)
{
@@ -2519,14 +2818,17 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
*rchip = NULL;
- err = pci_enable_device(pci);
- if (err < 0)
- return err;
+ if (pci) {
+ err = pci_enable_device(pci);
+ if (err < 0)
+ return err;
+ }
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip) {
snd_printk(KERN_ERR SFX "cannot allocate chip\n");
- pci_disable_device(pci);
+ if (pci)
+ pci_disable_device(pci);
return -ENOMEM;
}
@@ -2534,6 +2836,9 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
mutex_init(&chip->open_mutex);
chip->card = card;
chip->pci = pci;
+ chip->pdev = pdev;
+ chip->dev = pci ? snd_dma_pci_data(pci) : &pdev->dev;
+ chip->irq_id = pci ? pci->irq : platform_get_irq(pdev, 0);
chip->irq = -1;
chip->driver_caps = driver_caps;
chip->driver_type = driver_caps & 0xff;
@@ -2569,38 +2874,105 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
}
#endif
- err = pci_request_regions(pci, "ICH HD audio");
- if (err < 0) {
- kfree(chip);
- pci_disable_device(pci);
- return err;
- }
+ if (chip->pci) {
+ err = pci_request_regions(pci, "ICH HD audio");
+ if (err < 0) {
+ kfree(chip);
+ pci_disable_device(pci);
+ return err;
+ }
- chip->addr = pci_resource_start(pci, 0);
- chip->remap_addr = pci_ioremap_bar(pci, 0);
- if (chip->remap_addr == NULL) {
- snd_printk(KERN_ERR SFX "ioremap error\n");
- err = -ENXIO;
- goto errout;
+ chip->addr = pci_resource_start(pci, 0);
+ chip->remap_addr = pci_ioremap_bar(pci, 0);
+ if (chip->remap_addr == NULL) {
+ snd_printk(KERN_ERR SFX "ioremap error\n");
+ err = -ENXIO;
+ goto errout;
+ }
+
+ if (chip->msi)
+ if (pci_enable_msi(pci) < 0)
+ chip->msi = 0;
}
- if (chip->msi)
- if (pci_enable_msi(pci) < 0)
- chip->msi = 0;
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ if (chip->pdev) {
+ struct resource *res, *region;
+
+ /* Do platform specific initialization */
+ switch (chip->driver_type) {
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+ case AZX_DRIVER_NVIDIA_TEGRA:
+ chip->platform_clk_count = ARRAY_SIZE(tegra_clk_names);
+ for (i = 0; i < chip->platform_clk_count; i++) {
+ tegra_clks[i] = clk_get(&pdev->dev,
+ tegra_clk_names[i]);
+ if (IS_ERR_OR_NULL(tegra_clks[i])) {
+ err = PTR_ERR(tegra_clks[i]);
+ goto errout;
+ }
+ }
+ chip->platform_clks = tegra_clks;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ azx_platform_enable_clocks(chip);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = EINVAL;
+ goto errout;
+ }
+
+ region = devm_request_mem_region(chip->dev, res->start,
+ resource_size(res),
+ pdev->name);
+ if (!region) {
+ snd_printk(KERN_ERR SFX "Mem region already claimed\n");
+ err = -EINVAL;
+ goto errout;
+ }
+
+ chip->addr = res->start;
+ chip->remap_addr = devm_ioremap(chip->dev,
+ res->start,
+ resource_size(res));
+ if (chip->remap_addr == NULL) {
+ snd_printk(KERN_ERR SFX "ioremap error\n");
+ err = -ENXIO;
+ goto errout;
+ }
+
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+ if (chip->driver_type == AZX_DRIVER_NVIDIA_TEGRA) {
+ chip->remap_config_addr = chip->remap_addr;
+ chip->remap_addr += NVIDIA_TEGRA_HDA_BAR0_OFFSET;
+ chip->addr += NVIDIA_TEGRA_HDA_BAR0_OFFSET;
+ }
+#endif
+
+ azx_init_platform(chip);
+ }
+#endif /* CONFIG_SND_HDA_PLATFORM_DRIVER */
if (azx_acquire_irq(chip, 0) < 0) {
err = -EBUSY;
goto errout;
}
- pci_set_master(pci);
+ if (chip->pci)
+ pci_set_master(pci);
+
synchronize_irq(chip->irq);
gcap = azx_readw(chip, GCAP);
snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap);
/* disable SB600 64bit support for safety */
- if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
+ if (chip->pci && chip->pci->vendor == PCI_VENDOR_ID_ATI) {
struct pci_dev *p_smbus;
p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_SBX00_SMBUS,
@@ -2618,12 +2990,15 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
gcap &= ~ICH6_GCAP_64OK;
}
- /* allow 64bit DMA address if supported by H/W */
- if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
- pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
- else {
- pci_set_dma_mask(pci, DMA_BIT_MASK(32));
- pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
+ if (chip->pci) {
+ /* allow 64bit DMA address if supported by H/W */
+ if ((gcap & ICH6_GCAP_64OK) &&
+ !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
+ pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
+ else {
+ pci_set_dma_mask(pci, DMA_BIT_MASK(32));
+ pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
+ }
}
/* read number of streams from GCAP register instead of using
@@ -2663,7 +3038,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
for (i = 0; i < chip->num_streams; i++) {
/* allocate memory for the BDL for each stream */
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ chip->dev,
BDL_SIZE, &chip->azx_dev[i].bdl);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
@@ -2672,7 +3047,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
}
/* allocate memory for the position buffer */
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ chip->dev,
chip->num_streams * 8, &chip->posbuf);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
@@ -2687,7 +3062,8 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
azx_init_stream(chip);
/* initialize chip */
- azx_init_pci(chip);
+ if (chip->pci)
+ azx_init_pci(chip);
azx_init_chip(chip, (probe_only[dev] & 2) == 0);
/* codec detection */
@@ -2732,11 +3108,13 @@ static void power_down_all_codecs(struct azx *chip)
}
static int __devinit azx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+ struct platform_device *pdev,
+ int driver_data)
{
static int dev;
struct snd_card *card;
struct azx *chip;
+ struct device *azx_dev = pci ? &pci->dev : &pdev->dev;
int err;
if (dev >= SNDRV_CARDS)
@@ -2753,9 +3131,9 @@ static int __devinit azx_probe(struct pci_dev *pci,
}
/* set this here since it's referred in snd_hda_load_patch() */
- snd_card_set_dev(card, &pci->dev);
+ snd_card_set_dev(card, azx_dev);
- err = azx_create(card, pci, dev, pci_id->driver_data, &chip);
+ err = azx_create(card, pci, pdev, dev, driver_data, &chip);
if (err < 0)
goto out_free;
card->private_data = chip;
@@ -2797,7 +3175,11 @@ static int __devinit azx_probe(struct pci_dev *pci,
if (err < 0)
goto out_free;
- pci_set_drvdata(pci, card);
+ if (pci)
+ pci_set_drvdata(pci, card);
+ else
+ dev_set_drvdata(&pdev->dev, card);
+
chip->running = 1;
power_down_all_codecs(chip);
azx_notifier_register(chip);
@@ -2809,14 +3191,20 @@ out_free:
return err;
}
-static void __devexit azx_remove(struct pci_dev *pci)
+static int __devinit azx_probe_pci(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return azx_probe(pci, NULL, pci_id->driver_data);
+}
+
+static void __devexit azx_remove_pci(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
/* PCI IDs */
-static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+static DEFINE_PCI_DEVICE_TABLE(azx_pci_ids) = {
/* CPT */
{ PCI_DEVICE(0x8086, 0x1c20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
@@ -2934,27 +3322,85 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, azx_ids);
+MODULE_DEVICE_TABLE(pci, azx_pci_ids);
/* pci_driver definition */
static struct pci_driver driver = {
.name = KBUILD_MODNAME,
- .id_table = azx_ids,
- .probe = azx_probe,
- .remove = __devexit_p(azx_remove),
+ .id_table = azx_pci_ids,
+ .probe = azx_probe_pci,
+ .remove = __devexit_p(azx_remove_pci),
#ifdef CONFIG_PM
- .suspend = azx_suspend,
- .resume = azx_resume,
+ .suspend = azx_suspend_pci,
+ .resume = azx_resume_pci,
#endif
};
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+static int __devinit azx_probe_platform(struct platform_device *pdev)
+{
+ const struct platform_device_id *pdev_id = platform_get_device_id(pdev);
+
+ return azx_probe(NULL, pdev, pdev_id->driver_data);
+}
+
+static int __devexit azx_remove_platform(struct platform_device *pdev)
+{
+ return snd_card_free(dev_get_drvdata(&pdev->dev));
+}
+
+static const struct platform_device_id azx_platform_ids[] = {
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+ { "tegra30-hda",
+ .driver_data = AZX_DRIVER_NVIDIA_TEGRA | AZX_DCAPS_RIRB_DELAY },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(platform, azx_platform_ids);
+
+/* platform_driver definition */
+static struct platform_driver driver_platform = {
+ .driver = {
+ .name = "hda-platform"
+ },
+ .probe = azx_probe_platform,
+ .remove = __devexit_p(azx_remove_platform),
+ .id_table = azx_platform_ids,
+#ifdef CONFIG_PM
+ .suspend = azx_suspend_platform,
+ .resume = azx_resume_platform,
+#endif
+};
+#endif /* CONFIG_SND_HDA_PLATFORM_DRIVER */
+
static int __init alsa_card_azx_init(void)
{
- return pci_register_driver(&driver);
+ int err = 0;
+
+ err = pci_register_driver(&driver);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX "Failed to register pci driver\n");
+ return err;
+ }
+
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ err = platform_driver_register(&driver_platform);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX "Failed to register platform driver\n");
+ pci_unregister_driver(&driver);
+ return err;
+ }
+#endif
+
+ return 0;
}
static void __exit alsa_card_azx_exit(void)
{
+#ifdef CONFIG_SND_HDA_PLATFORM_DRIVER
+ platform_driver_unregister(&driver_platform);
+#endif
+
pci_unregister_driver(&driver);
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index e287015cbbb3..b03efbc17132 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -34,6 +34,11 @@
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/jack.h>
+
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+#include <mach/hdmi-audio.h>
+#endif
+
#include "hda_codec.h"
#include "hda_local.h"
@@ -865,6 +870,34 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
hinfo->formats = per_cvt->formats;
hinfo->maxbps = per_cvt->maxbps;
+#ifdef CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA
+ if ((codec->preset->id == 0x10de0020) &&
+ (!eld->eld_valid || !eld->sad_count)) {
+ int err = 0;
+ unsigned long timeout;
+
+ if (!eld->eld_valid) {
+ err = tegra_hdmi_setup_hda_presence();
+ if (err < 0) {
+ snd_printk(KERN_WARNING
+ "HDMI: No HDMI device connected\n");
+ return -ENODEV;
+ }
+ }
+
+ timeout = jiffies + msecs_to_jiffies(5000);
+ for (;;) {
+ if (eld->eld_valid && eld->sad_count)
+ break;
+
+ if (time_after(jiffies, timeout))
+ break;
+
+ mdelay(10);
+ }
+ }
+#endif
+
/* Restrict capabilities by ELD if this isn't disabled */
if (!static_hdmi_pcm && eld->eld_valid) {
snd_hdmi_eld_update_pcm_info(eld, hinfo);
@@ -1057,8 +1090,8 @@ static int hdmi_parse_codec(struct hda_codec *codec)
* HDA link is powered off at hot plug or hw initialization time.
*/
#ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!(snd_hda_param_read(codec, codec->afg, AC_PAR_POWER_STATE) &
- AC_PWRST_EPSS))
+ if ((!(snd_hda_param_read(codec, codec->afg, AC_PAR_POWER_STATE) &
+ AC_PWRST_EPSS)) && (codec->preset->id != 0x10de0020))
codec->bus->power_keep_link_on = 1;
#endif
@@ -1089,6 +1122,21 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
int pin_idx = hinfo_to_pin_index(spec, hinfo);
hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
+#if defined(CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA) && defined(CONFIG_TEGRA_DC)
+ if (codec->preset->id == 0x10de0020) {
+ int err = 0;
+ /* Set hdmi:audio freq and source selection*/
+ err = tegra_hdmi_setup_audio_freq_source(
+ substream->runtime->rate, HDA);
+ if ( err < 0 ) {
+ snd_printk(KERN_ERR
+ "Unable to set hdmi audio freq to %d \n",
+ substream->runtime->rate);
+ return err;
+ }
+ }
+#endif
+
hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
hdmi_setup_audio_infoframe(codec, pin_idx, substream);
@@ -1189,6 +1237,14 @@ static int generic_hdmi_init(struct hda_codec *codec)
struct hdmi_spec *spec = codec->spec;
int pin_idx;
+ switch (codec->preset->id) {
+ case 0x10de0020:
+ snd_hda_codec_write(codec, 4, 0,
+ AC_VERB_SET_DIGI_CONVERT_1, 0x11);
+ default:
+ break;
+ }
+
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx];
hda_nid_t pin_nid = per_pin->pin_nid;
@@ -1803,6 +1859,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0020, .name = "Tegra30 HDMI", .patch = patch_generic_hdmi },
{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
@@ -1848,6 +1905,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0019");
MODULE_ALIAS("snd-hda-codec-id:10de001a");
MODULE_ALIAS("snd-hda-codec-id:10de001b");
MODULE_ALIAS("snd-hda-codec-id:10de001c");
+MODULE_ALIAS("snd-hda-codec-id:10de0020");
MODULE_ALIAS("snd-hda-codec-id:10de0040");
MODULE_ALIAS("snd-hda-codec-id:10de0041");
MODULE_ALIAS("snd-hda-codec-id:10de0042");
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 665d9240c4ae..31fe04ddf19c 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -39,6 +39,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX9850 if I2C
select SND_SOC_MAX9877 if I2C
select SND_SOC_PCM3008
+ select SND_SOC_RT5640 if I2C
select SND_SOC_SGTL5000 if I2C
select SND_SOC_SN95031 if INTEL_SCU_IPC
select SND_SOC_SPDIF
@@ -48,6 +49,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TLV320AIC23 if I2C
select SND_SOC_TLV320AIC26 if SPI_MASTER
select SND_SOC_TVL320AIC32X4 if I2C
+ select SND_SOC_TLV320AIC326X if I2C
select SND_SOC_TLV320AIC3X if I2C
select SND_SOC_TPA6130A2 if I2C
select SND_SOC_TLV320DAC33 if I2C
@@ -214,6 +216,9 @@ config SND_SOC_MAX9850
config SND_SOC_PCM3008
tristate
+config SND_SOC_RT5640
+ tristate
+
#Freescale sgtl5000 codec
config SND_SOC_SGTL5000
tristate
@@ -246,6 +251,9 @@ config SND_SOC_TVL320AIC32X4
config SND_SOC_TLV320AIC3X
tristate
+config SND_SOC_TLV320AIC326X
+ tristate "TI AIC326x Codec"
+
config SND_SOC_TLV320DAC33
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 5119a7e2c1a8..e797b1717680 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -35,6 +35,8 @@ snd-soc-stac9766-objs := stac9766.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic26-objs := tlv320aic26.o
snd-soc-tlv320aic3x-objs := tlv320aic3x.o
+snd-soc-tlv320aic326x-objs := tlv320aic326x.o tlv320aic326x_minidsp_config.o
+snd-soc-tlv320aic326x-objs += tlv320aic326x_mini-dsp.o
snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o
snd-soc-tlv320dac33-objs := tlv320dac33.o
snd-soc-twl4030-objs := twl4030.o
@@ -86,6 +88,7 @@ snd-soc-wm9712-objs := wm9712.o
snd-soc-wm9713-objs := wm9713.o
snd-soc-wm-hubs-objs := wm_hubs.o
snd-soc-jz4740-codec-objs := jz4740.o
+snd-soc-rt5640-objs := rt5640.o
# Amp
snd-soc-lm4857-objs := lm4857.o
@@ -133,6 +136,7 @@ obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o
obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
obj-$(CONFIG_SND_SOC_TVL320AIC32X4) += snd-soc-tlv320aic32x4.o
+obj-$(CONFIG_SND_SOC_TLV320AIC326X) += snd-soc-tlv320aic326x.o
obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
@@ -182,6 +186,7 @@ obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o
obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o
obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o
obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
+obj-$(CONFIG_SND_SOC_RT5640) += snd-soc-rt5640.o
# Amp
obj-$(CONFIG_SND_SOC_LM4857) += snd-soc-lm4857.o
diff --git a/sound/soc/codecs/Patch_base_jazz_Rate48_pps_driver.h b/sound/soc/codecs/Patch_base_jazz_Rate48_pps_driver.h
new file mode 100644
index 000000000000..bbea174e3d24
--- /dev/null
+++ b/sound/soc/codecs/Patch_base_jazz_Rate48_pps_driver.h
@@ -0,0 +1,96 @@
+/*
+ * linux/sound/soc/codecs/Patch_base_jazz_Rate48_pps_driver.h
+ *
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+*/
+
+reg_value jazz_A_reg_values[] = {
+};
+#define miniDSP_A_reg_values_COEFF_START 0
+#define miniDSP_A_reg_values_COEFF_SIZE 0
+#define miniDSP_A_reg_values_INST_START 0
+#define miniDSP_A_reg_values_INST_SIZE 0
+
+reg_value jazz_D_reg_values[] = {
+ {0, 0x0},
+ {0x7F, 0x50},
+ { 0, 0x01},
+ { 36, 0xB0},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0xE0},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x08},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x40},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x58},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 0, 0x09},
+ {100, 0xB0},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0xE0},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x08},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x40},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x58},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x3C},
+ { 0, 0x01},
+ { 24, 0xB0},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xE0},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x08},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x40},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x58},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+};
+#define miniDSP_D_reg_values_COEFF_START 0
+#define miniDSP_D_reg_values_COEFF_SIZE 67
+#define miniDSP_D_reg_values_INST_START 67
+#define miniDSP_D_reg_values_INST_SIZE 0
diff --git a/sound/soc/codecs/Patch_base_main_Rate48_pps_driver.h b/sound/soc/codecs/Patch_base_main_Rate48_pps_driver.h
new file mode 100644
index 000000000000..80cc3416c344
--- /dev/null
+++ b/sound/soc/codecs/Patch_base_main_Rate48_pps_driver.h
@@ -0,0 +1,96 @@
+/*
+ * linux/sound/soc/codecs/Patch_base_main_Rate48_pps_driver.h
+ *
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+*/
+
+reg_value main_A_reg_values[] = {
+};
+#define miniDSP_A_reg_values_COEFF_START 0
+#define miniDSP_A_reg_values_COEFF_SIZE 0
+#define miniDSP_A_reg_values_INST_START 0
+#define miniDSP_A_reg_values_INST_SIZE 0
+
+reg_value main_D_reg_values[] = {
+ { 0, 0x0},
+ { 0x7F, 0x50},
+ { 0, 0x01},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 0, 0x09},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x3C},
+ { 0, 0x01},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+};
+#define miniDSP_D_reg_values_COEFF_START 0
+#define miniDSP_D_reg_values_COEFF_SIZE 67
+#define miniDSP_D_reg_values_INST_START 67
+#define miniDSP_D_reg_values_INST_SIZE 0
diff --git a/sound/soc/codecs/Patch_base_pop_Rate48_pps_driver.h b/sound/soc/codecs/Patch_base_pop_Rate48_pps_driver.h
new file mode 100644
index 000000000000..c2c3d0a1df84
--- /dev/null
+++ b/sound/soc/codecs/Patch_base_pop_Rate48_pps_driver.h
@@ -0,0 +1,96 @@
+/*
+ * linux/sound/soc/codecs/Patch_base_pop_Rate48_pps_driver.h
+ *
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+*/
+
+reg_value pop_A_reg_values[] = {
+};
+#define miniDSP_A_reg_values_COEFF_START 0
+#define miniDSP_A_reg_values_COEFF_SIZE 0
+#define miniDSP_A_reg_values_INST_START 0
+#define miniDSP_A_reg_values_INST_SIZE 0
+
+reg_value pop_D_reg_values[] = {
+ { 0, 0x0},
+ { 0x7F, 0x50},
+ { 0, 0x01},
+ { 36, 0x40},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x20},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xD8},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0xB0},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 0, 0x09},
+ {100, 0x40},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x20},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0xD8},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0xB0},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x3C},
+ { 0, 0x01},
+ { 24, 0x40},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x20},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0xD8},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0xB0},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+};
+#define miniDSP_D_reg_values_COEFF_START 0
+#define miniDSP_D_reg_values_COEFF_SIZE 67
+#define miniDSP_D_reg_values_INST_START 67
+#define miniDSP_D_reg_values_INST_SIZE 0
diff --git a/sound/soc/codecs/Patch_base_rock_Rate48_pps_driver.h b/sound/soc/codecs/Patch_base_rock_Rate48_pps_driver.h
new file mode 100644
index 000000000000..954c7c37478e
--- /dev/null
+++ b/sound/soc/codecs/Patch_base_rock_Rate48_pps_driver.h
@@ -0,0 +1,96 @@
+/*
+ * linux/sound/soc/codecs/Patch_base_rock_Rate48_pps_driver.h
+ *
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+*/
+
+reg_value rock_A_reg_values[] = {
+};
+#define miniDSP_A_reg_values_COEFF_START 0
+#define miniDSP_A_reg_values_COEFF_SIZE 0
+#define miniDSP_A_reg_values_INST_START 0
+#define miniDSP_A_reg_values_INST_SIZE 0
+
+reg_value rock_D_reg_values[] = {
+ { 0, 0x0},
+ { 0x7F, 0x50},
+ { 0, 0x01},
+ { 36, 0x58},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x30},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0xD8},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xF0},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x60},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 0, 0x09},
+ {100, 0x58},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x30},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0xD8},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0xF0},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x60},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x3C},
+ { 0, 0x01},
+ { 24, 0x58},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x30},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0xD8},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0xF0},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x60},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+};
+#define miniDSP_D_reg_values_COEFF_START 0
+#define miniDSP_D_reg_values_COEFF_SIZE 67
+#define miniDSP_D_reg_values_INST_START 67
+#define miniDSP_D_reg_values_INST_SIZE 0
diff --git a/sound/soc/codecs/first_rate_pps_driver.h b/sound/soc/codecs/first_rate_pps_driver.h
new file mode 100644
index 000000000000..c24a1ca5553d
--- /dev/null
+++ b/sound/soc/codecs/first_rate_pps_driver.h
@@ -0,0 +1,6011 @@
+/*
+ * linux/sound/soc/codecs/first_rate_pps_driver.h
+ *
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+*/
+
+static control main44_MUX_controls[] = {
+ {80, 1, 28, 1, 0},
+ {40, 3, 120, 1, 1}
+};
+
+static char *main44_MUX_control_names[] = {
+ "Stereo_Mux_TwoToOne_1",
+ "Mono_Mux_1_1"
+};
+
+static control main44_VOLUME_controls[] = {
+ /*{80, 1, 32, 0, 0}*/
+};
+
+static char *main44_VOLUME_control_names[] = {
+ /*"Volume_1"*/
+};
+
+/*INSTRUCTIONS & COEFFICIENTS*/
+/*typedef struct {
+ u8 reg_off;
+ u8 reg_val;
+} reg_value;*/
+
+static char *REG_Section_names[] = {
+ "main44_miniDSP_A_reg_values",
+ "main44_miniDSP_D_reg_values",
+};
+
+reg_value main44_REG_Section_init_program[] = {
+ { 0, 0x0},
+ { 0x7F, 0x00},
+/* # Set AutoINC */
+ {121, 0x01},
+/*# reg[0][1][122] = 0x01; reg(0)(1)(0x7A => 122) Vref charge time - 40 ms.*/
+ {122, 0x01},
+};
+
+
+reg_value main44_REG_Section_post_program[] = {
+ { 0, 0x0},
+ { 0x7F, 0x28},
+/*# reg[40][0][1] = 0x04; adaptive mode for ADC */
+ { 1, 0x04},
+ { 0x7F, 0x50},
+/*# reg[80][0][1] = 0x04; adaptive mode for DAC */
+ { 1, 0x04},
+ { 0x7F, 0x64},
+/*# reg[100][0][48] = 4; IDAC = 256; MDAC*DOSR;IADC = 256;
+MADC*AOSR;IDAC = 512 ; MDAC*DOSR;IADC = 512; MADC*AOSR;IDAC = 1024;
+MDAC*DOSR;IADC = 1024 ; MADC*AOSR;IDAC = 1536; MDAC*DOSR;IADC = 1536;
+MADC*AOSR;IDAC = 2048 ; MDAC*DOSR;IADC = 2048; MADC*AOSR;IDAC = 3072;
+MDAC*DOSR;IADC = 3072 ; MADC*AOSR;IDAC = 4096; MDAC*DOSR;IADC = 4096;
+MADC*AOSR;IDAC = 6144 ; MDAC*DOSR;IADC = 6144 ; MADC*AOSR */
+ { 48, 0x04},
+/* # reg[100][0][49] = 0*/
+ { 49, 0x00},
+ { 0x7F, 0x78},
+/*# reg[120][0][48] = 4 */
+ { 48, 0x04},
+/*# reg[120][0][49] = 0*/
+ { 49, 0x00},
+ { 0x7F, 0x00},
+};
+
+
+reg_value main44_miniDSP_A_reg_values[] = {
+ { 0, 0x0},
+ { 0x7F, 0x28},
+ { 0, 0x01},
+ { 8, 0x00},
+ { 9, 0xA7},
+ { 10, 0xF8},
+ { 11, 0x00},
+ { 12, 0x7E},
+ { 13, 0xB0},
+ { 14, 0x10},
+ { 15, 0x00},
+ { 16, 0x7F},
+ { 17, 0xFF},
+ { 18, 0xFF},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x20},
+ { 29, 0x26},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x20},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x01},
+ { 37, 0x58},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0xFE},
+ { 41, 0xA8},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x7E},
+ { 45, 0x89},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x82},
+ { 49, 0xB2},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x01},
+ { 53, 0xE5},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0xFE},
+ { 57, 0x1B},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x7D},
+ { 61, 0xDF},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x83},
+ { 65, 0xCC},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x02},
+ { 69, 0xAA},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0xFD},
+ { 73, 0x56},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x7C},
+ { 77, 0xE0},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x85},
+ { 81, 0x55},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x03},
+ { 85, 0xBB},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0xFC},
+ { 89, 0x45},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x7B},
+ { 93, 0x5B},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x87},
+ { 97, 0x78},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x05},
+ {101, 0x34},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0xFA},
+ {105, 0xCC},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x78},
+ {109, 0xFF},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x8A},
+ {113, 0x6A},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x07},
+ {117, 0x35},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0xF8},
+ {121, 0xCB},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x75},
+ {125, 0x44},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x02},
+ { 8, 0x8E},
+ { 9, 0x6C},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x09},
+ { 13, 0xE0},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0xF6},
+ { 17, 0x20},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x6F},
+ { 21, 0x43},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x93},
+ { 25, 0xC1},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x0D},
+ { 29, 0x4C},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0xF2},
+ { 33, 0xB4},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x65},
+ { 37, 0x81},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x9A},
+ { 41, 0x9A},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0xFF},
+ { 45, 0xFF},
+ { 46, 0xFF},
+ { 47, 0x00},
+ { 48, 0x80},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x7F},
+ { 53, 0xFF},
+ { 54, 0xFF},
+ { 55, 0x00},
+ { 56, 0x40},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0xFF},
+ { 65, 0x9E},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0xF7},
+ { 69, 0x10},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x26},
+ { 73, 0xF0},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x02},
+ { 77, 0x61},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x40},
+ { 81, 0x02},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0xFF},
+ { 85, 0xFC},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0xFF},
+ { 89, 0xFD},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0xFF},
+ { 93, 0xE5},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0xFF},
+ { 97, 0xA7},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0xFF},
+ {101, 0xC7},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0xFF},
+ {105, 0xCE},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0xFF},
+ {109, 0xFE},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0xFE},
+ {113, 0xF7},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0xFF},
+ {117, 0x46},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0xFF},
+ {121, 0x22},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0xFF},
+ {125, 0x0F},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x03},
+ { 8, 0xFA},
+ { 9, 0x8C},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xF5},
+ { 13, 0x08},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0xFD},
+ { 17, 0x92},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0xF3},
+ { 21, 0xA3},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x03},
+ { 25, 0xB3},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0x33},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x71},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x40},
+ { 37, 0x60},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0xE2},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0xC6},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x87},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x0F},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x0A},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x02},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x01},
+ { 65, 0x81},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x18},
+ { 69, 0x89},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x07},
+ { 73, 0xFB},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x03},
+ { 77, 0xBE},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0xC0},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x80},
+ { 85, 0x04},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x7F},
+ { 93, 0x12},
+ { 94, 0x3A},
+ { 95, 0x00},
+ { 96, 0x40},
+ { 97, 0x00},
+ { 98, 0x04},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x08},
+ {103, 0x00},
+ {104, 0x0C},
+ {105, 0xCC},
+ {106, 0xCD},
+ {107, 0x00},
+ {108, 0x01},
+ {109, 0x47},
+ {110, 0xAE},
+ {111, 0x00},
+ {112, 0x02},
+ {113, 0x52},
+ {114, 0x6E},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x08},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x01},
+ {123, 0x00},
+ {124, 0x7F},
+ {125, 0xF8},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x04},
+ { 8, 0x0C},
+ { 9, 0xCC},
+ { 10, 0xCD},
+ { 11, 0x00},
+ { 12, 0x73},
+ { 13, 0x33},
+ { 14, 0x33},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0xA3},
+ { 18, 0xD7},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x00},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ { 0, 0x09},
+ { 72, 0x00},
+ { 73, 0xA7},
+ { 74, 0xF8},
+ { 75, 0x00},
+ { 76, 0x7E},
+ { 77, 0xB0},
+ { 78, 0x10},
+ { 79, 0x00},
+ { 80, 0x7F},
+ { 81, 0xFF},
+ { 82, 0xFF},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x20},
+ { 93, 0x26},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x20},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x01},
+ {101, 0x58},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0xFE},
+ {105, 0xA8},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x7E},
+ {109, 0x89},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x82},
+ {113, 0xB2},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x01},
+ {117, 0xE5},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0xFE},
+ {121, 0x1B},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x7D},
+ {125, 0xDF},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0A},
+ { 8, 0x83},
+ { 9, 0xCC},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x02},
+ { 13, 0xAA},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0xFD},
+ { 17, 0x56},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x7C},
+ { 21, 0xE0},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x85},
+ { 25, 0x55},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x03},
+ { 29, 0xBB},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0xFC},
+ { 33, 0x45},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x7B},
+ { 37, 0x5B},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x87},
+ { 41, 0x78},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x05},
+ { 45, 0x34},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xFA},
+ { 49, 0xCC},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x78},
+ { 53, 0xFF},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x8A},
+ { 57, 0x6A},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x07},
+ { 61, 0x35},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0xF8},
+ { 65, 0xCB},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x75},
+ { 69, 0x44},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x8E},
+ { 73, 0x6C},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x09},
+ { 77, 0xE0},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0xF6},
+ { 81, 0x20},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x6F},
+ { 85, 0x43},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x93},
+ { 89, 0xC1},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x0D},
+ { 93, 0x4C},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0xF2},
+ { 97, 0xB4},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x65},
+ {101, 0x81},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x9A},
+ {105, 0x9A},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0xFF},
+ {109, 0xFF},
+ {110, 0xFF},
+ {111, 0x00},
+ {112, 0x80},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x7F},
+ {117, 0xFF},
+ {118, 0xFF},
+ {119, 0x00},
+ {120, 0x40},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0B},
+ { 8, 0xFF},
+ { 9, 0x9E},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xF7},
+ { 13, 0x10},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x26},
+ { 17, 0xF0},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x02},
+ { 21, 0x61},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x40},
+ { 25, 0x02},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xFF},
+ { 29, 0xFC},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0xFF},
+ { 33, 0xFD},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0xFF},
+ { 37, 0xE5},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0xFF},
+ { 41, 0xA7},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0xFF},
+ { 45, 0xC7},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xFF},
+ { 49, 0xCE},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0xFF},
+ { 53, 0xFE},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0xFE},
+ { 57, 0xF7},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0xFF},
+ { 61, 0x46},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0xFF},
+ { 65, 0x22},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0xFF},
+ { 69, 0x0F},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0xFA},
+ { 73, 0x8C},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0xF5},
+ { 77, 0x08},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0xFD},
+ { 81, 0x92},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0xF3},
+ { 85, 0xA3},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x03},
+ { 89, 0xB3},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0x33},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x71},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x40},
+ {101, 0x60},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0xE2},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0xC6},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x87},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x0F},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x0A},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x02},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0C},
+ { 8, 0x01},
+ { 9, 0x81},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x18},
+ { 13, 0x89},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x07},
+ { 17, 0xFB},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x03},
+ { 21, 0xBE},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0xC0},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+
+ { 28, 0x80},
+ { 29, 0x04},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x7F},
+ { 37, 0x12},
+ { 38, 0x3A},
+ { 39, 0x00},
+ { 40, 0x40},
+ { 41, 0x00},
+ { 42, 0x04},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x08},
+ { 47, 0x00},
+ { 48, 0x0C},
+ { 49, 0xCC},
+ { 50, 0xCD},
+ { 51, 0x00},
+ { 52, 0x01},
+ { 53, 0x47},
+ { 54, 0xAE},
+ { 55, 0x00},
+ { 56, 0x02},
+ { 57, 0x52},
+ { 58, 0x6E},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x08},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x00},
+ { 65, 0x00},
+ { 66, 0x01},
+ { 67, 0x00},
+ { 68, 0x7F},
+ { 69, 0xF8},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x0C},
+ { 73, 0xCC},
+ { 74, 0xCD},
+ { 75, 0x00},
+ { 76, 0x73},
+ { 77, 0x33},
+ { 78, 0x33},
+ { 79, 0x00},
+ { 80, 0x00},
+ { 81, 0xA3},
+ { 82, 0xD7},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0D},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x14},
+ { 0, 0x01},
+ { 8, 0xC0},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xC0},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x64},
+ { 0, 0x01},
+ { 8, 0x58},
+ { 9, 0x60},
+ { 10, 0x08},
+ { 11, 0x01},
+ { 12, 0x60},
+ { 13, 0x60},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x58},
+ { 17, 0x60},
+ { 18, 0x00},
+ { 19, 0x2B},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x44},
+ { 25, 0x00},
+ { 26, 0xC0},
+ { 27, 0x07},
+ { 28, 0x04},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x04},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x0D},
+ { 36, 0x04},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x08},
+ { 40, 0x04},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x04},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x44},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x07},
+ { 56, 0x21},
+ { 57, 0x00},
+ { 58, 0x20},
+ { 59, 0x00},
+ { 60, 0x4B},
+ { 61, 0x00},
+ { 62, 0xC0},
+ { 63, 0x00},
+ { 64, 0x4B},
+ { 65, 0x00},
+ { 66, 0xE0},
+ { 67, 0x00},
+ { 68, 0x10},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x10},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x0D},
+ { 76, 0x10},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x08},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x04},
+ { 84, 0x18},
+ { 85, 0x06},
+ { 86, 0x00},
+ { 87, 0x0F},
+ { 88, 0x1C},
+ { 89, 0x05},
+ { 90, 0xE0},
+ { 91, 0x09},
+ { 92, 0x1C},
+ { 93, 0x05},
+ { 94, 0xE0},
+ { 95, 0x03},
+ { 96, 0x1C},
+ { 97, 0x05},
+ { 98, 0xC0},
+ { 99, 0x0A},
+ {100, 0x1C},
+ {101, 0x05},
+ {102, 0xC0},
+ {103, 0x02},
+ {104, 0x1C},
+ {105, 0x05},
+ {106, 0x80},
+ {107, 0x0C},
+ {108, 0x1C},
+ {109, 0x05},
+ {110, 0x80},
+ {111, 0x00},
+ {112, 0x1C},
+ {113, 0x05},
+ {114, 0xA0},
+ {115, 0x01},
+ {116, 0x1C},
+ {117, 0x05},
+ {118, 0xA0},
+ {119, 0x0B},
+ {120, 0x18},
+ {121, 0x05},
+ {122, 0x80},
+ {123, 0x03},
+ {124, 0x1C},
+ {125, 0x05},
+ {126, 0x80},
+ {127, 0x08},
+ { 0, 0x02},
+ { 8, 0x1C},
+ { 9, 0x05},
+ { 10, 0xA0},
+ { 11, 0x09},
+ { 12, 0x10},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x2D},
+ { 16, 0x1C},
+ { 17, 0x05},
+ { 18, 0xA0},
+ { 19, 0x02},
+ { 20, 0x1C},
+ { 21, 0x05},
+ { 22, 0xC0},
+ { 23, 0x01},
+ { 24, 0x1C},
+ { 25, 0x05},
+ { 26, 0xC0},
+ { 27, 0x0A},
+ { 28, 0x1C},
+ { 29, 0x05},
+ { 30, 0xE0},
+ { 31, 0x00},
+ { 32, 0x1C},
+ { 33, 0x05},
+ { 34, 0xE0},
+ { 35, 0x0B},
+ { 36, 0x1C},
+ { 37, 0x06},
+ { 38, 0x00},
+ { 39, 0x06},
+ { 40, 0x00},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x10},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x0F},
+ { 56, 0x18},
+ { 57, 0x06},
+ { 58, 0x20},
+ { 59, 0x2C},
+ { 60, 0x1C},
+ { 61, 0x06},
+ { 62, 0x40},
+ { 63, 0x11},
+ { 64, 0x1C},
+ { 65, 0x06},
+ { 66, 0x40},
+ { 67, 0x48},
+ { 68, 0x1C},
+ { 69, 0x06},
+ { 70, 0x20},
+ { 71, 0x2D},
+ { 72, 0x1C},
+ { 73, 0x06},
+ { 74, 0x60},
+ { 75, 0x2F},
+ { 76, 0x1C},
+ { 77, 0x06},
+ { 78, 0x60},
+ { 79, 0x2A},
+ { 80, 0x1C},
+ { 81, 0x06},
+ { 82, 0x80},
+ { 83, 0x31},
+ { 84, 0x1C},
+ { 85, 0x06},
+ { 86, 0x80},
+ { 87, 0x28},
+ { 88, 0x1C},
+ { 89, 0x06},
+ { 90, 0xA0},
+ { 91, 0x37},
+ { 92, 0x1C},
+ { 93, 0x06},
+ { 94, 0xA0},
+ { 95, 0x22},
+ { 96, 0x1C},
+ { 97, 0x06},
+ { 98, 0xC0},
+ { 99, 0x14},
+ {100, 0x1C},
+ {101, 0x06},
+ {102, 0xC0},
+ {103, 0x45},
+ {104, 0x1C},
+ {105, 0x06},
+ {106, 0xE0},
+ {107, 0x12},
+ {108, 0x1C},
+ {109, 0x06},
+ {110, 0xE0},
+ {111, 0x47},
+ {112, 0x1C},
+ {113, 0x07},
+ {114, 0x00},
+ {115, 0x38},
+ {116, 0x1C},
+ {117, 0x07},
+ {118, 0x00},
+ {119, 0x21},
+ {120, 0x1C},
+ {121, 0x07},
+ {122, 0x20},
+ {123, 0x33},
+ {124, 0x1C},
+ {125, 0x07},
+ {126, 0x20},
+ {127, 0x26},
+ { 0, 0x03},
+ { 8, 0x1C},
+ { 9, 0x07},
+ { 10, 0x40},
+ { 11, 0x16},
+ { 12, 0x1C},
+ { 13, 0x07},
+ { 14, 0x40},
+ { 15, 0x43},
+ { 16, 0x1C},
+ { 17, 0x07},
+ { 18, 0x60},
+ { 19, 0x35},
+ { 20, 0x1C},
+ { 21, 0x07},
+ { 22, 0x60},
+ { 23, 0x24},
+ { 24, 0x1C},
+ { 25, 0x07},
+ { 26, 0x80},
+ { 27, 0x1A},
+ { 28, 0x1C},
+ { 29, 0x07},
+ { 30, 0x80},
+ { 31, 0x3F},
+ { 32, 0x1C},
+ { 33, 0x07},
+ { 34, 0xA0},
+ { 35, 0x3A},
+ { 36, 0x1C},
+ { 37, 0x07},
+ { 38, 0xA0},
+ { 39, 0x1F},
+ { 40, 0x1C},
+ { 41, 0x07},
+ { 42, 0xC0},
+ { 43, 0x18},
+ { 44, 0x1C},
+ { 45, 0x07},
+ { 46, 0xC0},
+ { 47, 0x41},
+ { 48, 0x1C},
+ { 49, 0x07},
+ { 50, 0xE0},
+ { 51, 0x1C},
+ { 52, 0x1C},
+ { 53, 0x07},
+ { 54, 0xE0},
+ { 55, 0x3D},
+ { 56, 0x1C},
+ { 57, 0x08},
+ { 58, 0x00},
+ { 59, 0x19},
+ { 60, 0x1C},
+ { 61, 0x08},
+ { 62, 0x00},
+ { 63, 0x40},
+ { 64, 0x1C},
+ { 65, 0x08},
+ { 66, 0x20},
+ { 67, 0x30},
+ { 68, 0x1C},
+ { 69, 0x08},
+ { 70, 0x20},
+ { 71, 0x29},
+ { 72, 0x1C},
+ { 73, 0x08},
+ { 74, 0x40},
+ { 75, 0x15},
+ { 76, 0x1C},
+ { 77, 0x08},
+ { 78, 0x40},
+ { 79, 0x44},
+ { 80, 0x1C},
+ { 81, 0x08},
+ { 82, 0x60},
+ { 83, 0x3B},
+ { 84, 0x1C},
+ { 85, 0x08},
+ { 86, 0x60},
+ { 87, 0x1E},
+ { 88, 0x1C},
+ { 89, 0x08},
+ { 90, 0x80},
+ { 91, 0x34},
+ { 92, 0x1C},
+ { 93, 0x08},
+ { 94, 0x80},
+ { 95, 0x25},
+ { 96, 0x1C},
+ { 97, 0x08},
+ { 98, 0xA0},
+ { 99, 0x36},
+ {100, 0x1C},
+ {101, 0x08},
+ {102, 0xA0},
+ {103, 0x23},
+ {104, 0x1C},
+ {105, 0x08},
+ {106, 0xC0},
+ {107, 0x32},
+ {108, 0x1C},
+ {109, 0x08},
+ {110, 0xC0},
+ {111, 0x27},
+ {112, 0x1C},
+ {113, 0x08},
+ {114, 0xE0},
+ {115, 0x13},
+ {116, 0x1C},
+ {117, 0x08},
+ {118, 0xE0},
+ {119, 0x46},
+ {120, 0x1C},
+ {121, 0x09},
+ {122, 0x00},
+ {123, 0x2E},
+ {124, 0x1C},
+ {125, 0x09},
+ {126, 0x00},
+ {127, 0x2B},
+ { 0, 0x04},
+ { 8, 0x1C},
+ { 9, 0x09},
+ { 10, 0x20},
+ { 11, 0x10},
+ { 12, 0x1C},
+ { 13, 0x09},
+ { 14, 0x20},
+ { 15, 0x49},
+ { 16, 0x1C},
+ { 17, 0x09},
+ { 18, 0x40},
+ { 19, 0x17},
+ { 20, 0x1C},
+ { 21, 0x09},
+ { 22, 0x40},
+ { 23, 0x42},
+ { 24, 0x1C},
+ { 25, 0x09},
+ { 26, 0x60},
+ { 27, 0x1D},
+ { 28, 0x1C},
+ { 29, 0x09},
+ { 30, 0x60},
+ { 31, 0x3C},
+ { 32, 0x1C},
+ { 33, 0x09},
+ { 34, 0x80},
+ { 35, 0x1B},
+ { 36, 0x1C},
+ { 37, 0x09},
+ { 38, 0x80},
+ { 39, 0x3E},
+ { 40, 0x1C},
+ { 41, 0x09},
+ { 42, 0xA0},
+ { 43, 0x39},
+ { 44, 0x1C},
+ { 45, 0x09},
+ { 46, 0xA0},
+ { 47, 0x20},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x5C},
+ { 53, 0x90},
+ { 54, 0x40},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x5C},
+ { 61, 0x90},
+ { 62, 0x20},
+ { 63, 0x00},
+ { 64, 0x00},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x10},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x49},
+ { 80, 0x18},
+ { 81, 0x00},
+ { 82, 0x60},
+ { 83, 0x4A},
+ { 84, 0x1C},
+ { 85, 0x00},
+ { 86, 0x80},
+ { 87, 0x4C},
+ { 88, 0x1C},
+ { 89, 0x00},
+ { 90, 0x40},
+ { 91, 0x49},
+ { 92, 0x30},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x4C},
+ { 96, 0x1C},
+ { 97, 0x00},
+ { 98, 0x20},
+ { 99, 0x4E},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x10},
+ {105, 0x30},
+ {106, 0x00},
+ {107, 0x4B},
+ {108, 0x34},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x4B},
+ {112, 0x5C},
+ {113, 0x60},
+ {114, 0xA0},
+ {115, 0x4B},
+ {116, 0x0C},
+ {117, 0x00},
+ {118, 0x40},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x10},
+ {125, 0x30},
+ {126, 0x00},
+ {127, 0x4D},
+ { 0, 0x05},
+ { 8, 0x10},
+ { 9, 0x13},
+ { 10, 0xE0},
+ { 11, 0x50},
+ { 12, 0x18},
+ { 13, 0x05},
+ { 14, 0x20},
+ { 15, 0x50},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x18},
+ { 25, 0x05},
+ { 26, 0x00},
+ { 27, 0x58},
+ { 28, 0x4C},
+ { 29, 0x05},
+ { 30, 0x20},
+ { 31, 0x56},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x10},
+ { 45, 0x13},
+ { 46, 0xC0},
+ { 47, 0x59},
+ { 48, 0x18},
+ { 49, 0x00},
+ { 50, 0xE0},
+ { 51, 0x59},
+ { 52, 0x6C},
+ { 53, 0x01},
+ { 54, 0x20},
+ { 55, 0x5B},
+ { 56, 0x1C},
+ { 57, 0x01},
+ { 58, 0x40},
+ { 59, 0x5C},
+ { 60, 0x18},
+ { 61, 0x01},
+ { 62, 0x60},
+ { 63, 0x59},
+ { 64, 0x6C},
+ { 65, 0x01},
+ { 66, 0xA0},
+ { 67, 0x61},
+ { 68, 0x1C},
+ { 69, 0x01},
+ { 70, 0xC0},
+ { 71, 0x62},
+ { 72, 0x10},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x5A},
+ { 76, 0x18},
+ { 77, 0x01},
+ { 78, 0xE0},
+ { 79, 0x59},
+ { 80, 0x6C},
+ { 81, 0x02},
+ { 82, 0x20},
+ { 83, 0x67},
+ { 84, 0x1C},
+ { 85, 0x02},
+ { 86, 0x40},
+ { 87, 0x68},
+ { 88, 0x10},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x60},
+ { 92, 0x18},
+ { 93, 0x02},
+ { 94, 0x60},
+ { 95, 0x59},
+ { 96, 0x6C},
+ { 97, 0x02},
+ { 98, 0xA0},
+ { 99, 0x6D},
+ {100, 0x1C},
+ {101, 0x02},
+ {102, 0xC0},
+ {103, 0x6E},
+ {104, 0x10},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x66},
+ {108, 0x18},
+ {109, 0x02},
+ {110, 0xE0},
+ {111, 0x59},
+ {112, 0x6C},
+ {113, 0x03},
+ {114, 0x20},
+ {115, 0x73},
+ {116, 0x1C},
+ {117, 0x03},
+ {118, 0x40},
+ {119, 0x74},
+ {120, 0x10},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x6C},
+ {124, 0x18},
+ {125, 0x03},
+ {126, 0x60},
+ {127, 0x59},
+ { 0, 0x06},
+ { 8, 0x6C},
+ { 9, 0x03},
+ { 10, 0xA0},
+ { 11, 0x79},
+ { 12, 0x1C},
+ { 13, 0x03},
+ { 14, 0xC0},
+ { 15, 0x7A},
+ { 16, 0x10},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x72},
+ { 20, 0x18},
+ { 21, 0x03},
+ { 22, 0xE0},
+ { 23, 0x59},
+ { 24, 0x6C},
+ { 25, 0x04},
+ { 26, 0x20},
+ { 27, 0x7F},
+ { 28, 0x1C},
+ { 29, 0x04},
+ { 30, 0x40},
+ { 31, 0x80},
+ { 32, 0x10},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x78},
+ { 36, 0x18},
+ { 37, 0x04},
+ { 38, 0x60},
+ { 39, 0x59},
+ { 40, 0x6C},
+ { 41, 0x04},
+ { 42, 0xA0},
+ { 43, 0x85},
+ { 44, 0x1C},
+ { 45, 0x04},
+ { 46, 0xC0},
+ { 47, 0x86},
+ { 48, 0x10},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x7E},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x10},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x84},
+ { 64, 0x18},
+ { 65, 0x01},
+ { 66, 0x00},
+ { 67, 0x5A},
+ { 68, 0x1C},
+ { 69, 0x00},
+ { 70, 0xE0},
+ { 71, 0x5C},
+ { 72, 0x6C},
+ { 73, 0x01},
+ { 74, 0x20},
+ { 75, 0x5E},
+ { 76, 0x1C},
+ { 77, 0x01},
+ { 78, 0x40},
+ { 79, 0x5F},
+ { 80, 0x18},
+ { 81, 0x01},
+ { 82, 0x60},
+ { 83, 0x60},
+ { 84, 0x1C},
+ { 85, 0x01},
+ { 86, 0x80},
+ { 87, 0x62},
+ { 88, 0x6C},
+ { 89, 0x01},
+ { 90, 0xA0},
+ { 91, 0x64},
+ { 92, 0x10},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x5D},
+ { 96, 0x1C},
+ { 97, 0x01},
+ { 98, 0xC0},
+ { 99, 0x65},
+ {100, 0x18},
+ {101, 0x02},
+ {102, 0x00},
+ {103, 0x66},
+ {104, 0x1C},
+ {105, 0x01},
+ {106, 0xE0},
+ {107, 0x68},
+ {108, 0x6C},
+ {109, 0x02},
+ {110, 0x20},
+ {111, 0x6A},
+ {112, 0x10},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x63},
+ {116, 0x1C},
+ {117, 0x02},
+ {118, 0x40},
+ {119, 0x6B},
+ {120, 0x18},
+ {121, 0x02},
+ {122, 0x60},
+ {123, 0x6C},
+ {124, 0x1C},
+ {125, 0x02},
+ {126, 0x80},
+ {127, 0x6E},
+ { 0, 0x07},
+ { 8, 0x6C},
+ { 9, 0x02},
+ { 10, 0xA0},
+ { 11, 0x70},
+ { 12, 0x10},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x69},
+ { 16, 0x1C},
+ { 17, 0x02},
+ { 18, 0xC0},
+ { 19, 0x71},
+ { 20, 0x18},
+ { 21, 0x03},
+ { 22, 0x00},
+ { 23, 0x72},
+ { 24, 0x1C},
+ { 25, 0x02},
+ { 26, 0xE0},
+ { 27, 0x74},
+ { 28, 0x6C},
+ { 29, 0x03},
+ { 30, 0x20},
+ { 31, 0x76},
+ { 32, 0x10},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x6F},
+ { 36, 0x1C},
+ { 37, 0x03},
+ { 38, 0x40},
+ { 39, 0x77},
+ { 40, 0x18},
+ { 41, 0x03},
+ { 42, 0x60},
+ { 43, 0x78},
+ { 44, 0x1C},
+ { 45, 0x03},
+ { 46, 0x80},
+ { 47, 0x7A},
+ { 48, 0x6C},
+ { 49, 0x03},
+ { 50, 0xA0},
+ { 51, 0x7C},
+ { 52, 0x10},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x75},
+ { 56, 0x1C},
+ { 57, 0x03},
+ { 58, 0xC0},
+ { 59, 0x7D},
+ { 60, 0x18},
+ { 61, 0x04},
+ { 62, 0x00},
+ { 63, 0x7E},
+ { 64, 0x1C},
+ { 65, 0x03},
+ { 66, 0xE0},
+ { 67, 0x80},
+ { 68, 0x6C},
+ { 69, 0x04},
+ { 70, 0x20},
+ { 71, 0x82},
+ { 72, 0x10},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x7B},
+ { 76, 0x1C},
+ { 77, 0x04},
+ { 78, 0x40},
+ { 79, 0x83},
+ { 80, 0x18},
+ { 81, 0x04},
+ { 82, 0x60},
+ { 83, 0x84},
+ { 84, 0x1C},
+ { 85, 0x04},
+ { 86, 0x80},
+ { 87, 0x86},
+ { 88, 0x6C},
+ { 89, 0x04},
+ { 90, 0xA0},
+ { 91, 0x88},
+ { 92, 0x10},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x81},
+ { 96, 0x1C},
+ { 97, 0x04},
+ { 98, 0xC0},
+ { 99, 0x89},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x10},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x87},
+ {116, 0x18},
+ {117, 0x0A},
+ {118, 0x20},
+ {119, 0x94},
+ {120, 0x34},
+ {121, 0x09},
+ {122, 0xE0},
+ {123, 0x5D},
+ {124, 0x18},
+ {125, 0x0A},
+ {126, 0x20},
+ {127, 0x94},
+ { 0, 0x08},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x30},
+ { 13, 0x05},
+ { 14, 0x20},
+ { 15, 0x5D},
+ { 16, 0x44},
+ { 17, 0x00},
+ { 18, 0xC0},
+ { 19, 0x04},
+ { 20, 0x10},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x93},
+ { 24, 0x20},
+ { 25, 0x0B},
+ { 26, 0xA0},
+ { 27, 0x02},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x44},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x04},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x10},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x93},
+ { 48, 0x20},
+ { 49, 0x0B},
+ { 50, 0xA0},
+ { 51, 0x02},
+ { 52, 0x18},
+ { 53, 0x0A},
+ { 54, 0x20},
+ { 55, 0x96},
+ { 56, 0x34},
+ { 57, 0x09},
+ { 58, 0xE0},
+ { 59, 0x63},
+ { 60, 0x18},
+ { 61, 0x0A},
+ { 62, 0x20},
+ { 63, 0x96},
+ { 64, 0x00},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x30},
+ { 69, 0x05},
+ { 70, 0x20},
+ { 71, 0x63},
+ { 72, 0x44},
+ { 73, 0x00},
+ { 74, 0xC0},
+ { 75, 0x04},
+ { 76, 0x10},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x95},
+ { 80, 0x20},
+ { 81, 0x0C},
+ { 82, 0x00},
+ { 83, 0x02},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x44},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x04},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x10},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x95},
+ {104, 0x20},
+ {105, 0x0C},
+ {106, 0x00},
+ {107, 0x02},
+ {108, 0x18},
+ {109, 0x0A},
+ {110, 0x20},
+ {111, 0x98},
+ {112, 0x34},
+ {113, 0x09},
+ {114, 0xE0},
+ {115, 0x69},
+ {116, 0x18},
+ {117, 0x0A},
+ {118, 0x20},
+ {119, 0x98},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x30},
+ {125, 0x05},
+ {126, 0x20},
+ {127, 0x69},
+ { 0, 0x09},
+ { 8, 0x44},
+ { 9, 0x00},
+ { 10, 0xC0},
+ { 11, 0x04},
+ { 12, 0x10},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x97},
+ { 16, 0x20},
+ { 17, 0x0C},
+ { 18, 0x60},
+ { 19, 0x02},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x44},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x04},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x10},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x97},
+ { 40, 0x20},
+ { 41, 0x0C},
+ { 42, 0x60},
+ { 43, 0x02},
+ { 44, 0x18},
+ { 45, 0x0A},
+ { 46, 0x20},
+ { 47, 0x9A},
+ { 48, 0x34},
+ { 49, 0x09},
+ { 50, 0xE0},
+ { 51, 0x6F},
+ { 52, 0x18},
+ { 53, 0x0A},
+ { 54, 0x20},
+ { 55, 0x9A},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x30},
+ { 61, 0x05},
+ { 62, 0x20},
+ { 63, 0x6F},
+ { 64, 0x44},
+ { 65, 0x00},
+ { 66, 0xC0},
+ { 67, 0x04},
+ { 68, 0x10},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x99},
+ { 72, 0x20},
+ { 73, 0x0C},
+ { 74, 0xC0},
+ { 75, 0x02},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x44},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x04},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x10},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x99},
+ { 96, 0x20},
+ { 97, 0x0C},
+ { 98, 0xC0},
+ { 99, 0x02},
+ {100, 0x18},
+ {101, 0x0A},
+ {102, 0x20},
+ {103, 0x9C},
+ {104, 0x34},
+ {105, 0x09},
+ {106, 0xE0},
+ {107, 0x75},
+ {108, 0x18},
+ {109, 0x0A},
+ {110, 0x20},
+ {111, 0x9C},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x30},
+ {117, 0x05},
+ {118, 0x20},
+ {119, 0x75},
+ {120, 0x44},
+ {121, 0x00},
+ {122, 0xC0},
+ {123, 0x04},
+ {124, 0x10},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x9B},
+ { 0, 0x0A},
+ { 8, 0x20},
+ { 9, 0x0D},
+ { 10, 0x20},
+ { 11, 0x02},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x44},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x04},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x10},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x9B},
+ { 32, 0x20},
+ { 33, 0x0D},
+ { 34, 0x20},
+ { 35, 0x02},
+ { 36, 0x18},
+ { 37, 0x0A},
+ { 38, 0x20},
+ { 39, 0x9E},
+ { 40, 0x34},
+ { 41, 0x09},
+ { 42, 0xE0},
+ { 43, 0x7B},
+ { 44, 0x18},
+ { 45, 0x0A},
+ { 46, 0x20},
+ { 47, 0x9E},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x30},
+ { 53, 0x05},
+ { 54, 0x20},
+ { 55, 0x7B},
+ { 56, 0x44},
+ { 57, 0x00},
+ { 58, 0xC0},
+ { 59, 0x04},
+ { 60, 0x10},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x9D},
+ { 64, 0x20},
+ { 65, 0x0D},
+ { 66, 0x80},
+ { 67, 0x02},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x44},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x04},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x10},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x9D},
+ { 88, 0x20},
+ { 89, 0x0D},
+ { 90, 0x80},
+ { 91, 0x02},
+ { 92, 0x18},
+ { 93, 0x0A},
+ { 94, 0x20},
+ { 95, 0xA0},
+ { 96, 0x34},
+ { 97, 0x09},
+ { 98, 0xE0},
+ { 99, 0x81},
+ {100, 0x18},
+ {101, 0x0A},
+ {102, 0x20},
+ {103, 0xA0},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x30},
+ {109, 0x05},
+ {110, 0x20},
+ {111, 0x81},
+ {112, 0x44},
+ {113, 0x00},
+ {114, 0xC0},
+ {115, 0x04},
+ {116, 0x10},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x9F},
+ {120, 0x20},
+ {121, 0x0D},
+ {122, 0xE0},
+ {123, 0x02},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0B},
+ { 8, 0x44},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x04},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x10},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x9F},
+ { 24, 0x20},
+ { 25, 0x0D},
+ { 26, 0xE0},
+ { 27, 0x02},
+ { 28, 0x18},
+ { 29, 0x0A},
+ { 30, 0x20},
+ { 31, 0xA2},
+ { 32, 0x34},
+ { 33, 0x09},
+ { 34, 0xE0},
+ { 35, 0x87},
+ { 36, 0x18},
+ { 37, 0x0A},
+ { 38, 0x20},
+ { 39, 0xA2},
+ { 40, 0x00},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x30},
+ { 45, 0x05},
+ { 46, 0x20},
+ { 47, 0x87},
+ { 48, 0x44},
+ { 49, 0x00},
+ { 50, 0xC0},
+ { 51, 0x04},
+ { 52, 0x10},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0xA1},
+ { 56, 0x20},
+ { 57, 0x0E},
+ { 58, 0x40},
+ { 59, 0x02},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x44},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x04},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x10},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0xA1},
+ { 80, 0x20},
+ { 81, 0x0E},
+ { 82, 0x40},
+ { 83, 0x02},
+ { 84, 0x58},
+ { 85, 0x60},
+ { 86, 0x00},
+ { 87, 0x2A},
+ { 88, 0x1C},
+ { 89, 0x09},
+ { 90, 0xC0},
+ { 91, 0xBB},
+ { 92, 0x1C},
+ { 93, 0x09},
+ { 94, 0xC0},
+ { 95, 0xBC},
+ { 96, 0x1C},
+ { 97, 0x09},
+ { 98, 0xC0},
+ { 99, 0xBD},
+ {100, 0x1C},
+ {101, 0x09},
+ {102, 0xC0},
+ {103, 0xBE},
+ {104, 0x1C},
+ {105, 0x09},
+ {106, 0xC0},
+ {107, 0xBF},
+ {108, 0x1C},
+ {109, 0x09},
+ {110, 0xC0},
+ {111, 0xC0},
+ {112, 0x1C},
+ {113, 0x09},
+ {114, 0xC0},
+ {115, 0xC1},
+ {116, 0x18},
+ {117, 0x09},
+ {118, 0xC0},
+ {119, 0xBB},
+ {120, 0x58},
+ {121, 0x70},
+ {122, 0x00},
+ {123, 0x2A},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0C},
+ { 8, 0x10},
+ { 9, 0x00},
+ { 10, 0x20},
+ { 11, 0xBA},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x10},
+ { 17, 0x00},
+ { 18, 0x20},
+ { 19, 0xC3},
+ { 20, 0x18},
+ { 21, 0x0B},
+ { 22, 0xA0},
+ { 23, 0xBB},
+ { 24, 0x1C},
+ { 25, 0x0C},
+ { 26, 0x00},
+ { 27, 0xBC},
+ { 28, 0x1C},
+ { 29, 0x0C},
+ { 30, 0x60},
+ { 31, 0xBD},
+ { 32, 0x1C},
+ { 33, 0x0C},
+ { 34, 0xC0},
+ { 35, 0xBE},
+ { 36, 0x1C},
+ { 37, 0x0D},
+ { 38, 0x20},
+ { 39, 0xBF},
+ { 40, 0x1C},
+ { 41, 0x0D},
+ { 42, 0x80},
+ { 43, 0xC0},
+ { 44, 0x1C},
+ { 45, 0x0D},
+ { 46, 0xE0},
+ { 47, 0xC1},
+ { 48, 0x1C},
+ { 49, 0x0E},
+ { 50, 0x40},
+ { 51, 0xC2},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x10},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0xA4},
+ { 68, 0x58},
+ { 69, 0x60},
+ { 70, 0x00},
+ { 71, 0x53},
+ { 72, 0x6C},
+ { 73, 0x0A},
+ { 74, 0x40},
+ { 75, 0xAD},
+ { 76, 0x1C},
+ { 77, 0x09},
+ { 78, 0xE0},
+ { 79, 0xA4},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x10},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x54},
+ { 92, 0x44},
+ { 93, 0x00},
+ { 94, 0xC0},
+ { 95, 0x02},
+ { 96, 0x18},
+ { 97, 0x05},
+ { 98, 0x20},
+ { 99, 0xA4},
+ {100, 0x44},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x02},
+ {104, 0x18},
+ {105, 0x05},
+ {106, 0x20},
+ {107, 0x54},
+ {108, 0x44},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x10},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0xA5},
+ {124, 0x18},
+ {125, 0x09},
+ {126, 0xE0},
+ {127, 0xA5},
+ { 0, 0x0D},
+ { 8, 0x58},
+ { 9, 0x70},
+ { 10, 0x00},
+ { 11, 0x5C},
+ { 12, 0x58},
+ { 13, 0x60},
+ { 14, 0x00},
+ { 15, 0x5C},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x44},
+ { 25, 0x00},
+ { 26, 0xA0},
+ { 27, 0x01},
+ { 28, 0x44},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x01},
+ { 32, 0x10},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0xA5},
+ { 36, 0x18},
+ { 37, 0x0A},
+ { 38, 0xE0},
+ { 39, 0xA5},
+ { 40, 0x1C},
+ { 41, 0x0B},
+ { 42, 0x20},
+ { 43, 0x92},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x10},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x8A},
+ { 60, 0x18},
+ { 61, 0x09},
+ { 62, 0xE0},
+ { 63, 0x8A},
+ { 64, 0x1C},
+ { 65, 0x00},
+ { 66, 0xA0},
+ { 67, 0xA4},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0xA3},
+ { 84, 0x44},
+ { 85, 0x00},
+ { 86, 0xA0},
+ { 87, 0x10},
+ { 88, 0x18},
+ { 89, 0x09},
+ { 90, 0xE0},
+ { 91, 0xB8},
+ { 92, 0x58},
+ { 93, 0x70},
+ { 94, 0x00},
+ { 95, 0x06},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x10},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x54},
+ {112, 0x18},
+ {113, 0x0A},
+ {114, 0xC0},
+ {115, 0x54},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0E},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x44},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x15},
+ { 32, 0x18},
+ { 33, 0x05},
+ { 34, 0x20},
+ { 35, 0xA3},
+ { 36, 0x59},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x55},
+ { 40, 0x59},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x54},
+ { 44, 0x18},
+ { 45, 0x05},
+ { 46, 0x20},
+ { 47, 0xA3},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x44},
+ { 53, 0x00},
+ { 54, 0x60},
+ { 55, 0x03},
+ { 56, 0x44},
+ { 57, 0x00},
+ { 58, 0x80},
+ { 59, 0x05},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x44},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x05},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x58},
+ { 73, 0x60},
+ { 74, 0x00},
+ { 75, 0x55},
+ { 76, 0x44},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x02},
+ { 80, 0x58},
+ { 81, 0x60},
+ { 82, 0x00},
+ { 83, 0x54},
+ { 84, 0x44},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x18},
+ { 89, 0x09},
+ { 90, 0xE0},
+ { 91, 0xB8},
+ { 92, 0x58},
+ { 93, 0x70},
+ { 94, 0x00},
+ { 95, 0x29},
+ { 96, 0x20},
+ { 97, 0x0A},
+ { 98, 0x00},
+ { 99, 0x02},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x10},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x54},
+ {112, 0x18},
+ {113, 0x0A},
+ {114, 0x00},
+ {115, 0x54},
+ {116, 0x1C},
+ {117, 0x05},
+ {118, 0x20},
+ {119, 0xB8},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0F},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x10},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0xB0},
+ { 16, 0x18},
+ { 17, 0x05},
+ { 18, 0x20},
+ { 19, 0xB0},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x20},
+ { 33, 0x0A},
+ { 34, 0x00},
+ { 35, 0x02},
+ { 36, 0x18},
+ { 37, 0x0B},
+ { 38, 0xC0},
+ { 39, 0xC3},
+ { 40, 0x1C},
+ { 41, 0x0A},
+ { 42, 0x00},
+ { 43, 0xBB},
+ { 44, 0x18},
+ { 45, 0x0C},
+ { 46, 0x20},
+ { 47, 0xC4},
+ { 48, 0x1C},
+ { 49, 0x0A},
+ { 50, 0x00},
+ { 51, 0xBC},
+ { 52, 0x18},
+ { 53, 0x0C},
+ { 54, 0x80},
+ { 55, 0xC5},
+ { 56, 0x20},
+ { 57, 0x0B},
+ { 58, 0xC0},
+ { 59, 0x02},
+ { 60, 0x1C},
+ { 61, 0x0A},
+ { 62, 0x00},
+ { 63, 0xBD},
+ { 64, 0x20},
+ { 65, 0x0C},
+ { 66, 0x20},
+ { 67, 0x02},
+ { 68, 0x18},
+ { 69, 0x0C},
+ { 70, 0xE0},
+ { 71, 0xC6},
+ { 72, 0x1C},
+ { 73, 0x0A},
+ { 74, 0x00},
+ { 75, 0xBE},
+ { 76, 0x20},
+ { 77, 0x0C},
+ { 78, 0x80},
+ { 79, 0x02},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x20},
+ { 89, 0x0C},
+ { 90, 0xE0},
+ { 91, 0x02},
+ { 92, 0x18},
+ { 93, 0x0D},
+ { 94, 0x40},
+ { 95, 0xC7},
+ { 96, 0x1C},
+ { 97, 0x0A},
+ { 98, 0x00},
+ { 99, 0xBF},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x20},
+ {113, 0x0D},
+ {114, 0x40},
+ {115, 0x02},
+ {116, 0x18},
+ {117, 0x0D},
+ {118, 0xA0},
+ {119, 0xC8},
+ {120, 0x1C},
+ {121, 0x0A},
+ {122, 0x00},
+ {123, 0xC0},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x10},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x20},
+ { 17, 0x0D},
+ { 18, 0xA0},
+ { 19, 0x02},
+ { 20, 0x18},
+ { 21, 0x0E},
+ { 22, 0x00},
+ { 23, 0xC9},
+ { 24, 0x1C},
+ { 25, 0x0A},
+ { 26, 0x00},
+ { 27, 0xC1},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x20},
+ { 41, 0x0E},
+ { 42, 0x00},
+ { 43, 0x02},
+ { 44, 0x18},
+ { 45, 0x0E},
+ { 46, 0x60},
+ { 47, 0xCA},
+ { 48, 0x1C},
+ { 49, 0x0A},
+ { 50, 0x00},
+ { 51, 0xC2},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x20},
+ { 65, 0x0E},
+ { 66, 0x60},
+ { 67, 0x02},
+ { 68, 0x58},
+ { 69, 0x60},
+ { 70, 0x00},
+ { 71, 0x5A},
+ { 72, 0x58},
+ { 73, 0x60},
+ { 74, 0x00},
+ { 75, 0x5B},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x10},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x51},
+ { 88, 0x10},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x52},
+ { 92, 0x18},
+ { 93, 0x0B},
+ { 94, 0xC0},
+ { 95, 0x51},
+ { 96, 0x1C},
+ { 97, 0x0B},
+ { 98, 0xE0},
+ { 99, 0x52},
+ {100, 0x18},
+ {101, 0x0C},
+ {102, 0x20},
+ {103, 0x51},
+ {104, 0x1C},
+ {105, 0x0C},
+ {106, 0x40},
+ {107, 0x52},
+ {108, 0x18},
+ {109, 0x0C},
+ {110, 0x80},
+ {111, 0x51},
+ {112, 0x20},
+ {113, 0x0B},
+ {114, 0xE0},
+ {115, 0x02},
+ {116, 0x1C},
+ {117, 0x0C},
+ {118, 0xA0},
+ {119, 0x52},
+ {120, 0x20},
+ {121, 0x0C},
+ {122, 0x40},
+ {123, 0x02},
+ {124, 0x18},
+ {125, 0x0C},
+ {126, 0xE0},
+ {127, 0x51},
+ { 0, 0x11},
+ { 8, 0x1C},
+ { 9, 0x0D},
+
+ { 10, 0x00},
+ { 11, 0x52},
+ { 12, 0x20},
+ { 13, 0x0C},
+ { 14, 0xA0},
+ { 15, 0x02},
+ { 16, 0x18},
+ { 17, 0x0D},
+ { 18, 0x40},
+ { 19, 0x51},
+ { 20, 0x1C},
+ { 21, 0x0D},
+ { 22, 0x60},
+ { 23, 0x52},
+ { 24, 0x20},
+ { 25, 0x0D},
+ { 26, 0x00},
+ { 27, 0x02},
+ { 28, 0x18},
+ { 29, 0x0D},
+ { 30, 0xA0},
+ { 31, 0x51},
+ { 32, 0x1C},
+ { 33, 0x0D},
+ { 34, 0xC0},
+ { 35, 0x52},
+ { 36, 0x20},
+ { 37, 0x0D},
+ { 38, 0x60},
+ { 39, 0x02},
+ { 40, 0x18},
+ { 41, 0x0E},
+ { 42, 0x00},
+ { 43, 0x51},
+ { 44, 0x1C},
+ { 45, 0x0E},
+ { 46, 0x20},
+ { 47, 0x52},
+ { 48, 0x20},
+ { 49, 0x0D},
+ { 50, 0xC0},
+ { 51, 0x02},
+ { 52, 0x18},
+ { 53, 0x0E},
+ { 54, 0x60},
+ { 55, 0x51},
+ { 56, 0x1C},
+ { 57, 0x0E},
+ { 58, 0x80},
+ { 59, 0x52},
+ { 60, 0x20},
+ { 61, 0x0E},
+ { 62, 0x20},
+ { 63, 0x02},
+ { 64, 0x00},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x20},
+ { 73, 0x0E},
+ { 74, 0x80},
+ { 75, 0x02},
+ { 76, 0x18},
+ { 77, 0x0B},
+ { 78, 0xE0},
+ { 79, 0x5D},
+ { 80, 0x1C},
+ { 81, 0x0C},
+ { 82, 0x40},
+ { 83, 0x63},
+ { 84, 0x1C},
+ { 85, 0x0C},
+ { 86, 0xA0},
+ { 87, 0x69},
+ { 88, 0x1C},
+ { 89, 0x0D},
+ { 90, 0x00},
+ { 91, 0x6F},
+ { 92, 0x1C},
+ { 93, 0x0D},
+ { 94, 0x60},
+ { 95, 0x75},
+ { 96, 0x1C},
+ { 97, 0x0D},
+ { 98, 0xC0},
+ { 99, 0x7B},
+ {100, 0x1C},
+ {101, 0x0E},
+ {102, 0x20},
+ {103, 0x81},
+ {104, 0x1C},
+ {105, 0x0E},
+ {106, 0x80},
+ {107, 0x87},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x10},
+ {121, 0x00},
+ {122, 0x40},
+ {123, 0x53},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x12},
+ { 8, 0x18},
+ { 9, 0x05},
+ { 10, 0x40},
+ { 11, 0x53},
+ { 12, 0x18},
+ { 13, 0x05},
+ { 14, 0x40},
+ { 15, 0x50},
+ { 16, 0x58},
+ { 17, 0x60},
+ { 18, 0x00},
+ { 19, 0x2B},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x10},
+ { 25, 0x00},
+ { 26, 0x20},
+ { 27, 0xCB},
+ { 28, 0x10},
+ { 29, 0x00},
+ { 30, 0x20},
+ { 31, 0xCC},
+ { 32, 0x10},
+ { 33, 0x00},
+ { 34, 0x20},
+ { 35, 0xCD},
+ { 36, 0x10},
+ { 37, 0x00},
+ { 38, 0x20},
+ { 39, 0xCE},
+ { 40, 0x58},
+ { 41, 0x60},
+ { 42, 0x00},
+ { 43, 0x58},
+ { 44, 0x58},
+ { 45, 0x70},
+ { 46, 0x00},
+ { 47, 0x27},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x0C},
+ { 53, 0x07},
+ { 54, 0x80},
+ { 55, 0x0D},
+ { 56, 0x48},
+ { 57, 0x03},
+ { 58, 0xE0},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x18},
+ { 65, 0x05},
+ { 66, 0x40},
+ { 67, 0xCB},
+ { 68, 0x49},
+ { 69, 0x03},
+ { 70, 0xE0},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x20},
+ { 83, 0xCF},
+ { 84, 0x18},
+ { 85, 0x05},
+ { 86, 0x40},
+ { 87, 0xCF},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x0C},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x03},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x02},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+};
+#define main44_miniDSP_A_reg_values_COEFF_START 0
+#define main44_miniDSP_A_reg_values_COEFF_SIZE 958
+#define main44_miniDSP_A_reg_values_INST_START 958
+#define main44_miniDSP_A_reg_values_INST_SIZE 2160
+
+reg_value main44_miniDSP_D_reg_values[] = {
+ { 0, 0x0},
+ { 0x7F, 0x50},
+ { 0, 0x01},
+ { 8, 0xFF},
+ { 9, 0xFF},
+ { 10, 0xFF},
+ { 11, 0x00},
+ { 12, 0x80},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x7F},
+ { 17, 0xF7},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x80},
+ { 21, 0x09},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x7F},
+ { 25, 0xEF},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xFF}, /*changed*/
+ { 29, 0xFF},
+ { 30, 0xFF},
+ { 31, 0x00},
+ { 32, 0x16},
+ { 33, 0xB5},
+ { 34, 0x43},
+ { 35, 0x00},
+ { 36, 0x40},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x20},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xD8},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0xB0},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x40},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x7F},
+ { 61, 0xFF},
+ { 62, 0xFF},
+ { 63, 0x00},
+ { 64, 0x00},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x80},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0xC0},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x0D},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x1C},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0x3E},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x78},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x02},
+ {101, 0x4C},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0xD5},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x01},
+ {109, 0x65},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x03},
+ {113, 0xE9},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x07},
+ {117, 0xCA},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0xFF},
+ {121, 0xEF},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0xFE},
+ {125, 0xEB},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x02},
+ { 8, 0xFF},
+ { 9, 0xA8},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xFD},
+ { 13, 0x08},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0xFF},
+ { 17, 0x5E},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0xFF},
+ { 21, 0xD5},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0xFE},
+ { 25, 0x36},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xFA},
+ { 29, 0xAC},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0xF2},
+ { 33, 0xA3},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x28},
+ { 37, 0xAB},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x3F},
+ { 41, 0xFF},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0xFF},
+ { 45, 0x98},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xF6},
+ { 49, 0xF9},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x26},
+ { 53, 0xFB},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x02},
+ { 57, 0x72},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x40},
+ { 61, 0x02},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0xFB},
+ { 65, 0xCB},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x20},
+ { 69, 0xA7},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0xFF},
+ { 73, 0x6A},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x3A},
+ { 77, 0x0F},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 0, 0x09},
+ { 72, 0xFF},
+ { 73, 0xFF},
+ { 74, 0xFF},
+ { 75, 0x00},
+ { 76, 0x80},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x7F},
+ { 81, 0xF7},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x80},
+ { 85, 0x09},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x7F},
+ { 89, 0xEF},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0xFF},
+ { 93, 0xFF},
+ { 94, 0xFF},
+ { 95, 0x00},
+ { 96, 0x16},
+ { 97, 0xB5},
+ { 98, 0x43},
+ { 99, 0x00},
+ {100, 0x40},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x20},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0xD8},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0xB0},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x40},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x7F},
+ {125, 0xFF},
+ {126, 0xFF},
+ {127, 0x00},
+ { 0, 0x0A},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x80},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0xC0},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0x0D},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x1C},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x3E},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0x78},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x02},
+ { 45, 0x4C},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0xD5},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x01},
+ { 53, 0x65},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x03},
+ { 57, 0xE9},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x07},
+ { 61, 0xCA},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0xFF},
+ { 65, 0xEF},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0xFE},
+ { 69, 0xEB},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0xFF},
+ { 73, 0xA8},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0xFD},
+ { 77, 0x08},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0xFF},
+ { 81, 0x5E},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0xFF},
+ { 85, 0xD5},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0xFE},
+ { 89, 0x36},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0xFA},
+ { 93, 0xAC},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0xF2},
+ { 97, 0xA3},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x28},
+ {101, 0xAB},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x3F},
+ {105, 0xFF},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0xFF},
+ {109, 0x98},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0xF6},
+ {113, 0xF9},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x26},
+ {117, 0xFB},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x02},
+ {121, 0x72},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x40},
+ {125, 0x02},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0B},
+ { 8, 0xFB},
+ { 9, 0xCB},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x20},
+ { 13, 0xA7},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0xFF},
+ { 17, 0x6A},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x3A},
+ { 21, 0x0F},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x3C},
+ { 0, 0x01},
+ { 8, 0xC0},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xFF},
+ { 13, 0x80},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x20},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x40},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x20},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0xD8},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0xB0},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x01},
+ { 45, 0x99},
+ { 46, 0x9A},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0xCC},
+ { 50, 0xCD},
+ { 51, 0x00},
+ { 52, 0xB8},
+ { 53, 0x8C},
+ { 54, 0x96},
+ { 55, 0x00},
+ { 56, 0x6B},
+ { 57, 0x2D},
+ { 58, 0x1E},
+ { 59, 0x00},
+ { 60, 0x5C},
+ { 61, 0x46},
+ { 62, 0x4B},
+ { 63, 0x00},
+ { 64, 0x15},
+ { 65, 0x55},
+ { 66, 0x55},
+ { 67, 0x00},
+ { 68, 0x40},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0xE0},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x48},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x60},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x20},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x20},
+ {101, 0xC8},
+ {102, 0xB6},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0xC0},
+ {109, 0x88},
+ {110, 0x0C},
+ {111, 0x00},
+ {112, 0x1E},
+ {113, 0xB0},
+ {114, 0xA3},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x3F},
+ {125, 0x77},
+ {126, 0xF4},
+ {127, 0x00},
+ { 0, 0x02},
+ { 8, 0xE0},
+ { 9, 0x86},
+ { 10, 0xA8},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x22},
+ { 17, 0x58},
+ { 18, 0x30},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0xC1},
+ { 25, 0xA1},
+ { 26, 0x93},
+ { 27, 0x00},
+ { 28, 0x1C},
+ { 29, 0x15},
+ { 30, 0x25},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x3E},
+ { 41, 0x5E},
+ { 42, 0x6D},
+ { 43, 0x00},
+ { 44, 0xE1},
+ { 45, 0x92},
+ { 46, 0xAB},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x27},
+ { 53, 0x50},
+ { 54, 0xC7},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0xC5},
+ { 61, 0x69},
+ { 62, 0xDD},
+ { 63, 0x00},
+ { 64, 0x13},
+ { 65, 0xC6},
+ { 66, 0xC7},
+ { 67, 0x00},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x3A},
+ { 77, 0x96},
+ { 78, 0x23},
+ { 79, 0x00},
+ { 80, 0xE4},
+ { 81, 0xE8},
+ { 82, 0x72},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x33},
+ { 89, 0x86},
+ { 90, 0xC5},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0xD1},
+ { 97, 0xC6},
+ { 98, 0x99},
+ { 99, 0x00},
+ {100, 0xFF},
+ {101, 0x5F},
+ {102, 0x8F},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x2E},
+ {113, 0x39},
+ {114, 0x67},
+ {115, 0x00},
+ {116, 0xED},
+ {117, 0x19},
+ {118, 0xAC},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x4E},
+ {125, 0xA5},
+ {126, 0x44},
+ {127, 0x00},
+ { 0, 0x03},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xF7},
+ { 13, 0x89},
+ { 14, 0x02},
+ { 15, 0x00},
+ { 16, 0xD2},
+ { 17, 0x0F},
+ { 18, 0x57},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x08},
+ { 29, 0x76},
+ { 30, 0xFE},
+ { 31, 0x00},
+ { 32, 0xFF},
+ { 33, 0x4B},
+ { 34, 0x64},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0xC0},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x78},
+ { 0, 0x01},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x04},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x04},
+ { 21, 0x00},
+ { 22, 0x20},
+ { 23, 0x01},
+ { 24, 0x58},
+ { 25, 0x60},
+ { 26, 0x08},
+ { 27, 0x44},
+ { 28, 0x60},
+ { 29, 0x60},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x58},
+ { 33, 0x60},
+ { 34, 0x00},
+ { 35, 0x0E},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x44},
+ { 41, 0x00},
+ { 42, 0xC0},
+ { 43, 0x16},
+ { 44, 0x08},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x75},
+ { 48, 0x08},
+ { 49, 0x00},
+ { 50, 0x20},
+ { 51, 0xB5},
+ { 52, 0x08},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x74},
+ { 56, 0x08},
+ { 57, 0x00},
+ { 58, 0x20},
+ { 59, 0xB4},
+ { 60, 0x08},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x73},
+ { 64, 0x08},
+ { 65, 0x00},
+ { 66, 0x20},
+ { 67, 0xB3},
+ { 68, 0x08},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x72},
+ { 72, 0x08},
+ { 73, 0x00},
+ { 74, 0x20},
+ { 75, 0xB2},
+ { 76, 0x08},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x71},
+ { 80, 0x08},
+ { 81, 0x00},
+ { 82, 0x20},
+ { 83, 0xB1},
+ { 84, 0x08},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x70},
+ { 88, 0x08},
+ { 89, 0x00},
+ { 90, 0x20},
+ { 91, 0xB0},
+ { 92, 0x08},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x6F},
+ { 96, 0x08},
+ { 97, 0x00},
+ { 98, 0x20},
+ { 99, 0xAF},
+ {100, 0x08},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x6E},
+ {104, 0x08},
+ {105, 0x00},
+ {106, 0x20},
+ {107, 0xAE},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x02},
+ { 8, 0x44},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x06},
+ { 12, 0x21},
+ { 13, 0x08},
+ { 14, 0x80},
+ { 15, 0x00},
+ { 16, 0x4B},
+ { 17, 0x00},
+ { 18, 0xC0},
+ { 19, 0x00},
+ { 20, 0x4B},
+ { 21, 0x00},
+ { 22, 0xE0},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x40},
+ { 27, 0x48},
+ { 28, 0x0C},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x0C},
+ { 33, 0x00},
+ { 34, 0x20},
+ { 35, 0x00},
+ { 36, 0x58},
+ { 37, 0x60},
+ { 38, 0x08},
+ { 39, 0x03},
+ { 40, 0x5C},
+ { 41, 0x60},
+ { 42, 0x00},
+ { 43, 0x27},
+ { 44, 0x59},
+ { 45, 0x00},
+ { 46, 0x08},
+ { 47, 0x01},
+ { 48, 0x58},
+ { 49, 0x70},
+ { 50, 0x08},
+ { 51, 0x02},
+ { 52, 0x10},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x26},
+ { 56, 0x08},
+ { 57, 0x07},
+ { 58, 0x60},
+ { 59, 0x27},
+ { 60, 0x44},
+ { 61, 0x00},
+ { 62, 0x80},
+ { 63, 0x01},
+ { 64, 0x44},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x01},
+ { 68, 0x10},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x26},
+ { 72, 0x58},
+ { 73, 0x60},
+ { 74, 0x08},
+ { 75, 0x09},
+ { 76, 0x58},
+ { 77, 0x60},
+ { 78, 0x08},
+ { 79, 0x0A},
+ { 80, 0x48},
+ { 81, 0x03},
+ { 82, 0xC0},
+ { 83, 0x00},
+ { 84, 0x58},
+ { 85, 0x70},
+ { 86, 0x08},
+ { 87, 0x04},
+ { 88, 0x10},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x28},
+ { 92, 0x59},
+ { 93, 0xA0},
+ { 94, 0x00},
+ { 95, 0x07},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x5D},
+ {101, 0xA0},
+ {102, 0x00},
+ {103, 0x28},
+ {104, 0x58},
+ {105, 0x70},
+ {106, 0x00},
+ {107, 0x07},
+ {108, 0x44},
+ {109, 0x00},
+ {110, 0xC0},
+ {111, 0x0D},
+ {112, 0x58},
+ {113, 0x60},
+ {114, 0x00},
+ {115, 0x07},
+ {116, 0x44},
+ {117, 0x00},
+ {118, 0xC0},
+ {119, 0x03},
+ {120, 0x21},
+ {121, 0x00},
+ {122, 0x80},
+ {123, 0x02},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x03},
+ { 8, 0x44},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x0D},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x21},
+ { 21, 0x00},
+ { 22, 0x80},
+ { 23, 0x02},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x44},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x0A},
+ { 44, 0x5C},
+ { 45, 0x70},
+ { 46, 0x00},
+ { 47, 0x28},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x21},
+ { 61, 0x00},
+ { 62, 0x80},
+ { 63, 0x02},
+ { 64, 0x5C},
+ { 65, 0x60},
+ { 66, 0x00},
+ { 67, 0x27},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x26},
+ { 84, 0x64},
+ { 85, 0x50},
+ { 86, 0x28},
+ { 87, 0x04},
+ { 88, 0x49},
+ { 89, 0x03},
+ { 90, 0xC0},
+ { 91, 0x00},
+ { 92, 0x64},
+ { 93, 0x40},
+ { 94, 0xA8},
+ { 95, 0x0E},
+ { 96, 0x58},
+ { 97, 0x70},
+ { 98, 0x08},
+ { 99, 0x0F},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x58},
+ {105, 0x60},
+ {106, 0x08},
+ {107, 0x0C},
+ {108, 0x39},
+ {109, 0x01},
+ {110, 0x60},
+ {111, 0x28},
+ {112, 0x21},
+ {113, 0x02},
+ {114, 0x00},
+ {115, 0x02},
+ {116, 0x25},
+ {117, 0x02},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x58},
+ {121, 0x70},
+ {122, 0x08},
+ {123, 0x0D},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x04},
+ { 8, 0x25},
+ { 9, 0x02},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x58},
+ { 13, 0x70},
+ { 14, 0x00},
+ { 15, 0x0F},
+ { 16, 0x19},
+ { 17, 0x02},
+ { 18, 0x80},
+ { 19, 0x27},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x10},
+ { 25, 0x00},
+ { 26, 0x40},
+ { 27, 0x2A},
+ { 28, 0x10},
+ { 29, 0x00},
+ { 30, 0x40},
+ { 31, 0x2B},
+ { 32, 0x10},
+ { 33, 0x00},
+ { 34, 0x80},
+ { 35, 0x29},
+ { 36, 0x19},
+ { 37, 0x02},
+ { 38, 0x60},
+ { 39, 0x2A},
+ { 40, 0x08},
+ { 41, 0x07},
+ { 42, 0x60},
+ { 43, 0x29},
+ { 44, 0x19},
+ { 45, 0x02},
+ { 46, 0x60},
+ { 47, 0x2B},
+ { 48, 0x48},
+ { 49, 0x03},
+ { 50, 0xC0},
+ { 51, 0x00},
+ { 52, 0x10},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x28},
+ { 56, 0x1D},
+ { 57, 0x02},
+ { 58, 0xE0},
+ { 59, 0x2A},
+ { 60, 0x19},
+ { 61, 0x03},
+ { 62, 0x40},
+ { 63, 0x2A},
+ { 64, 0x1D},
+ { 65, 0x03},
+ { 66, 0xC0},
+ { 67, 0x2B},
+ { 68, 0x5C},
+ { 69, 0x60},
+ { 70, 0x00},
+ { 71, 0x28},
+ { 72, 0x21},
+ { 73, 0x03},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x1D},
+ { 77, 0x02},
+ { 78, 0xE0},
+ { 79, 0x2B},
+ { 80, 0x21},
+ { 81, 0x03},
+ { 82, 0x60},
+ { 83, 0x00},
+ { 84, 0x19},
+ { 85, 0x03},
+ { 86, 0xC0},
+ { 87, 0x2A},
+ { 88, 0x1D},
+ { 89, 0x03},
+ { 90, 0x40},
+ { 91, 0x2B},
+ { 92, 0x21},
+ { 93, 0x03},
+ { 94, 0x80},
+ { 95, 0x00},
+ { 96, 0x58},
+ { 97, 0x60},
+ { 98, 0x08},
+ { 99, 0x1C},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x21},
+ {105, 0x03},
+ {106, 0xE0},
+ {107, 0x00},
+ {108, 0x49},
+ {109, 0x03},
+ {110, 0xC0},
+ {111, 0x00},
+ {112, 0x21},
+ {113, 0x02},
+ {114, 0x20},
+ {115, 0x02},
+ {116, 0x58},
+ {117, 0x70},
+ {118, 0x08},
+ {119, 0x15},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0xA0},
+ {123, 0x18},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x05},
+ { 8, 0x58},
+ { 9, 0x60},
+ { 10, 0x08},
+ { 11, 0x16},
+ { 12, 0x39},
+ { 13, 0x02},
+ { 14, 0x20},
+ { 15, 0x28},
+ { 16, 0x21},
+ { 17, 0x02},
+ { 18, 0x40},
+ { 19, 0x01},
+ { 20, 0x25},
+ { 21, 0x02},
+ { 22, 0x40},
+ { 23, 0x04},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x48},
+ { 33, 0x03},
+ { 34, 0xC0},
+ { 35, 0x00},
+ { 36, 0x21},
+ { 37, 0x03},
+ { 38, 0x80},
+ { 39, 0x03},
+ { 40, 0x49},
+ { 41, 0x03},
+ { 42, 0xC0},
+ { 43, 0x00},
+ { 44, 0x5C},
+ { 45, 0x60},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x19},
+ { 49, 0x03},
+ { 50, 0x20},
+ { 51, 0x03},
+ { 52, 0x1D},
+ { 53, 0x03},
+ { 54, 0x60},
+ { 55, 0x04},
+ { 56, 0x1D},
+ { 57, 0x03},
+ { 58, 0xA0},
+ { 59, 0x06},
+ { 60, 0x4D},
+ { 61, 0x03},
+ { 62, 0x00},
+ { 63, 0x02},
+ { 64, 0x1D},
+ { 65, 0x03},
+ { 66, 0xE0},
+ { 67, 0x07},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x25},
+ { 73, 0x03},
+ { 74, 0x80},
+ { 75, 0x02},
+ { 76, 0x19},
+ { 77, 0x04},
+ { 78, 0x40},
+ { 79, 0x06},
+ { 80, 0x1D},
+ { 81, 0x04},
+ { 82, 0x80},
+ { 83, 0x07},
+ { 84, 0x1D},
+ { 85, 0x04},
+ { 86, 0xC0},
+ { 87, 0x09},
+ { 88, 0x4D},
+ { 89, 0x04},
+ { 90, 0x20},
+ { 91, 0x05},
+ { 92, 0x1D},
+ { 93, 0x05},
+ { 94, 0x00},
+ { 95, 0x0A},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x25},
+ {101, 0x04},
+ {102, 0xA0},
+ {103, 0x02},
+ {104, 0x19},
+ {105, 0x05},
+ {106, 0x60},
+ {107, 0x09},
+ {108, 0x1D},
+ {109, 0x05},
+ {110, 0xA0},
+ {111, 0x0A},
+ {112, 0x1D},
+ {113, 0x05},
+ {114, 0xE0},
+ {115, 0x0C},
+ {116, 0x4D},
+ {117, 0x05},
+ {118, 0x40},
+ {119, 0x08},
+ {120, 0x1D},
+ {121, 0x06},
+ {122, 0x20},
+ {123, 0x0D},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x06},
+ { 8, 0x25},
+ { 9, 0x05},
+ { 10, 0xC0},
+ { 11, 0x02},
+ { 12, 0x19},
+ { 13, 0x06},
+ { 14, 0x80},
+ { 15, 0x0C},
+ { 16, 0x1D},
+ { 17, 0x06},
+ { 18, 0xC0},
+ { 19, 0x0D},
+ { 20, 0x1D},
+ { 21, 0x07},
+ { 22, 0x00},
+ { 23, 0x0F},
+ { 24, 0x4D},
+ { 25, 0x06},
+ { 26, 0x60},
+ { 27, 0x0B},
+ { 28, 0x1D},
+ { 29, 0x07},
+ { 30, 0x40},
+ { 31, 0x10},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x25},
+ { 37, 0x06},
+ { 38, 0xE0},
+ { 39, 0x02},
+ { 40, 0x19},
+ { 41, 0x07},
+ { 42, 0xA0},
+ { 43, 0x0F},
+ { 44, 0x1D},
+ { 45, 0x07},
+ { 46, 0xE0},
+ { 47, 0x10},
+ { 48, 0x1D},
+ { 49, 0x08},
+ { 50, 0x20},
+ { 51, 0x12},
+ { 52, 0x4D},
+ { 53, 0x07},
+ { 54, 0x80},
+ { 55, 0x0E},
+ { 56, 0x1D},
+ { 57, 0x08},
+ { 58, 0x60},
+ { 59, 0x13},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x25},
+ { 65, 0x08},
+ { 66, 0x00},
+ { 67, 0x02},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x11},
+ { 84, 0x5C},
+ { 85, 0x60},
+ { 86, 0x00},
+ { 87, 0x01},
+ { 88, 0x19},
+ { 89, 0x03},
+ { 90, 0x20},
+ { 91, 0x15},
+ { 92, 0x1D},
+ { 93, 0x03},
+ { 94, 0x60},
+ { 95, 0x16},
+ { 96, 0x1D},
+ { 97, 0x03},
+ { 98, 0xA0},
+ { 99, 0x18},
+ {100, 0x4D},
+ {101, 0x03},
+ {102, 0x00},
+ {103, 0x14},
+ {104, 0x1D},
+ {105, 0x03},
+ {106, 0xE0},
+ {107, 0x19},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x25},
+ {113, 0x03},
+ {114, 0x80},
+ {115, 0x02},
+ {116, 0x19},
+ {117, 0x04},
+ {118, 0x40},
+ {119, 0x18},
+ {120, 0x1D},
+ {121, 0x04},
+ {122, 0x80},
+ {123, 0x19},
+ {124, 0x1D},
+ {125, 0x04},
+ {126, 0xC0},
+ {127, 0x1B},
+ { 0, 0x07},
+ { 8, 0x4D},
+ { 9, 0x04},
+ { 10, 0x20},
+ { 11, 0x17},
+ { 12, 0x1D},
+ { 13, 0x05},
+ { 14, 0x00},
+ { 15, 0x1C},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x25},
+ { 21, 0x04},
+ { 22, 0xA0},
+ { 23, 0x02},
+ { 24, 0x19},
+ { 25, 0x05},
+ { 26, 0x60},
+ { 27, 0x1B},
+ { 28, 0x1D},
+ { 29, 0x05},
+ { 30, 0xA0},
+ { 31, 0x1C},
+ { 32, 0x1D},
+ { 33, 0x05},
+ { 34, 0xE0},
+ { 35, 0x1E},
+ { 36, 0x4D},
+ { 37, 0x05},
+ { 38, 0x40},
+ { 39, 0x1A},
+ { 40, 0x1D},
+ { 41, 0x06},
+ { 42, 0x20},
+ { 43, 0x1F},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x25},
+ { 49, 0x05},
+ { 50, 0xC0},
+ { 51, 0x02},
+ { 52, 0x19},
+ { 53, 0x06},
+ { 54, 0x80},
+ { 55, 0x1E},
+ { 56, 0x1D},
+ { 57, 0x06},
+ { 58, 0xC0},
+ { 59, 0x1F},
+ { 60, 0x1D},
+ { 61, 0x07},
+ { 62, 0x00},
+ { 63, 0x21},
+ { 64, 0x4D},
+ { 65, 0x06},
+ { 66, 0x60},
+ { 67, 0x1D},
+ { 68, 0x1D},
+ { 69, 0x07},
+ { 70, 0x40},
+ { 71, 0x22},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x25},
+ { 77, 0x06},
+ { 78, 0xE0},
+ { 79, 0x02},
+ { 80, 0x19},
+ { 81, 0x07},
+ { 82, 0xA0},
+ { 83, 0x21},
+ { 84, 0x1D},
+ { 85, 0x07},
+ { 86, 0xE0},
+ { 87, 0x22},
+ { 88, 0x1D},
+ { 89, 0x08},
+ { 90, 0x20},
+ { 91, 0x24},
+ { 92, 0x4D},
+ { 93, 0x07},
+ { 94, 0x80},
+ { 95, 0x20},
+ { 96, 0x1D},
+ { 97, 0x08},
+ { 98, 0x60},
+ { 99, 0x25},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x25},
+ {105, 0x08},
+ {106, 0x00},
+ {107, 0x02},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x10},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x23},
+ {124, 0x58},
+ {125, 0x60},
+ {126, 0x00},
+ {127, 0x05},
+ { 0, 0x08},
+ { 8, 0x18},
+ { 9, 0x01},
+ { 10, 0x80},
+ { 11, 0x11},
+ { 12, 0x18},
+ { 13, 0x01},
+ { 14, 0x80},
+ { 15, 0x23},
+ { 16, 0x18},
+ { 17, 0x01},
+ { 18, 0x80},
+ { 19, 0x00},
+ { 20, 0x44},
+ { 21, 0x00},
+ { 22, 0xC0},
+ { 23, 0x05},
+ { 24, 0x10},
+ { 25, 0x00},
+ { 26, 0x20},
+ { 27, 0x2C},
+ { 28, 0x10},
+ { 29, 0x00},
+ { 30, 0x20},
+ { 31, 0x2D},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x44},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x05},
+ { 44, 0x18},
+ { 45, 0x01},
+ { 46, 0x80},
+ { 47, 0x01},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x10},
+ { 53, 0x00},
+ { 54, 0x20},
+ { 55, 0x2C},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x10},
+ { 61, 0x00},
+ { 62, 0x20},
+ { 63, 0x2D},
+ { 64, 0x58},
+ { 65, 0x60},
+ { 66, 0x00},
+ { 67, 0x06},
+ { 68, 0x40},
+ { 69, 0x02},
+ { 70, 0x40},
+ { 71, 0x35},
+ { 72, 0x1C},
+ { 73, 0x02},
+ { 74, 0x40},
+ { 75, 0x33},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x20},
+ { 83, 0x34},
+ { 84, 0x44},
+ { 85, 0x00},
+ { 86, 0x40},
+ { 87, 0x06},
+ { 88, 0x10},
+ { 89, 0x12},
+ { 90, 0xE0},
+ { 91, 0x31},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x18},
+ {101, 0x01},
+ {102, 0x80},
+ {103, 0x33},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x44},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x19},
+ {112, 0x18},
+ {113, 0x01},
+ {114, 0x80},
+ {115, 0x31},
+ {116, 0x59},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x0E},
+ {120, 0x1C},
+ {121, 0x01},
+ {122, 0x80},
+ {123, 0x33},
+ {124, 0x40},
+ {125, 0x02},
+ {126, 0x40},
+ {127, 0x34},
+ { 0, 0x09},
+ { 8, 0x44},
+ { 9, 0x00},
+ { 10, 0xC0},
+ { 11, 0x0A},
+ { 12, 0x44},
+ { 13, 0x00},
+ { 14, 0x40},
+ { 15, 0x13},
+ { 16, 0x10},
+ { 17, 0x00},
+ { 18, 0x20},
+ { 19, 0x32},
+ { 20, 0x44},
+ { 21, 0x00},
+ { 22, 0x80},
+ { 23, 0x13},
+ { 24, 0x18},
+ { 25, 0x01},
+ { 26, 0x80},
+ { 27, 0x34},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x58},
+ { 37, 0x60},
+ { 38, 0x00},
+ { 39, 0x0E},
+ { 40, 0x10},
+ { 41, 0x00},
+ { 42, 0x20},
+ { 43, 0x32},
+ { 44, 0x20},
+ { 45, 0x02},
+ { 46, 0x00},
+ { 47, 0x02},
+ { 48, 0x44},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x13},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x10},
+ { 57, 0x00},
+ { 58, 0x20},
+ { 59, 0x32},
+ { 60, 0x44},
+ { 61, 0x00},
+ { 62, 0x60},
+ { 63, 0x09},
+ { 64, 0x18},
+ { 65, 0x01},
+ { 66, 0x80},
+ { 67, 0x34},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x58},
+ { 77, 0x60},
+ { 78, 0x00},
+ { 79, 0x0E},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x20},
+ { 83, 0x32},
+ { 84, 0x20},
+ { 85, 0x02},
+ { 86, 0x00},
+ { 87, 0x02},
+ { 88, 0x44},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x09},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x10},
+ { 97, 0x00},
+ { 98, 0x20},
+ { 99, 0x32},
+ {100, 0x20},
+ {101, 0x02},
+ {102, 0x00},
+ {103, 0x02},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x18},
+ {113, 0x01},
+ {114, 0x80},
+ {115, 0x31},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0A},
+ { 8, 0x10},
+ { 9, 0x00},
+ { 10, 0x20},
+ { 11, 0x30},
+ { 12, 0x18},
+ { 13, 0x02},
+ { 14, 0x00},
+ { 15, 0x2C},
+ { 16, 0x18},
+ { 17, 0x02},
+ { 18, 0x00},
+ { 19, 0x2D},
+ { 20, 0x58},
+ { 21, 0x60},
+ { 22, 0x00},
+ { 23, 0x10},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x10},
+ { 29, 0x00},
+ { 30, 0x20},
+ { 31, 0x2E},
+ { 32, 0x10},
+ { 33, 0x00},
+ { 34, 0x20},
+ { 35, 0x2F},
+ { 36, 0x20},
+ { 37, 0x02},
+ { 38, 0x20},
+ { 39, 0x9F},
+ { 40, 0x18},
+ { 41, 0x01},
+ { 42, 0x80},
+ { 43, 0x2E},
+ { 44, 0x18},
+ { 45, 0x01},
+ { 46, 0x80},
+ { 47, 0x2F},
+ { 48, 0x18},
+ { 49, 0x00},
+ { 50, 0x60},
+ { 51, 0x39},
+ { 52, 0x1C},
+ { 53, 0x00},
+ { 54, 0x80},
+ { 55, 0x3B},
+ { 56, 0x10},
+ { 57, 0x00},
+ { 58, 0x20},
+ { 59, 0x38},
+ { 60, 0x10},
+ { 61, 0x00},
+ { 62, 0x20},
+ { 63, 0x76},
+ { 64, 0x1C},
+ { 65, 0x00},
+ { 66, 0x40},
+ { 67, 0x38},
+ { 68, 0x18},
+ { 69, 0x00},
+ { 70, 0x60},
+ { 71, 0x77},
+ { 72, 0x1C},
+ { 73, 0x00},
+ { 74, 0x80},
+ { 75, 0x7B},
+ { 76, 0x1C},
+ { 77, 0x00},
+ { 78, 0x40},
+ { 79, 0x76},
+ { 80, 0x10},
+ { 81, 0x30},
+ { 82, 0x00},
+ { 83, 0x3A},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x10},
+ { 93, 0x30},
+ { 94, 0x00},
+ { 95, 0x7A},
+ { 96, 0x18},
+ { 97, 0x02},
+ { 98, 0x60},
+ { 99, 0x5F},
+ {100, 0x1C},
+ {101, 0x02},
+ {102, 0x80},
+ {103, 0x3C},
+ {104, 0x1C},
+ {105, 0x02},
+ {106, 0x60},
+ {107, 0x3A},
+ {108, 0x1C},
+ {109, 0x02},
+ {110, 0x80},
+ {111, 0x5D},
+ {112, 0x1C},
+ {113, 0x02},
+ {114, 0xA0},
+ {115, 0x3E},
+ {116, 0x1C},
+ {117, 0x02},
+ {118, 0xA0},
+ {119, 0x5B},
+ {120, 0x1C},
+ {121, 0x02},
+ {122, 0xC0},
+ {123, 0x40},
+ {124, 0x1C},
+ {125, 0x02},
+ {126, 0xC0},
+ {127, 0x59},
+ { 0, 0x0B},
+ { 8, 0x1C},
+ { 9, 0x02},
+ { 10, 0xE0},
+ { 11, 0x46},
+ { 12, 0x1C},
+ { 13, 0x02},
+ { 14, 0xE0},
+ { 15, 0x53},
+ { 16, 0x1C},
+ { 17, 0x03},
+ { 18, 0x00},
+ { 19, 0x42},
+ { 20, 0x1C},
+ { 21, 0x03},
+ { 22, 0x00},
+ { 23, 0x57},
+ { 24, 0x1C},
+ { 25, 0x03},
+ { 26, 0x20},
+ { 27, 0x44},
+ { 28, 0x1C},
+ { 29, 0x03},
+ { 30, 0x20},
+ { 31, 0x55},
+ { 32, 0x1C},
+ { 33, 0x03},
+ { 34, 0x40},
+ { 35, 0x48},
+ { 36, 0x1C},
+ { 37, 0x03},
+ { 38, 0x40},
+ { 39, 0x51},
+ { 40, 0x1C},
+ { 41, 0x03},
+ { 42, 0x60},
+ { 43, 0x4A},
+ { 44, 0x1C},
+ { 45, 0x03},
+ { 46, 0x60},
+ { 47, 0x4F},
+ { 48, 0x1C},
+ { 49, 0x03},
+ { 50, 0x80},
+ { 51, 0x3B},
+ { 52, 0x1C},
+ { 53, 0x03},
+ { 54, 0x80},
+ { 55, 0x5E},
+ { 56, 0x1C},
+ { 57, 0x03},
+ { 58, 0xA0},
+ { 59, 0x43},
+ { 60, 0x1C},
+ { 61, 0x03},
+ { 62, 0xA0},
+ { 63, 0x56},
+ { 64, 0x1C},
+ { 65, 0x03},
+ { 66, 0xC0},
+ { 67, 0x3F},
+ { 68, 0x1C},
+ { 69, 0x03},
+ { 70, 0xC0},
+ { 71, 0x5A},
+ { 72, 0x1C},
+ { 73, 0x03},
+ { 74, 0xE0},
+ { 75, 0x47},
+ { 76, 0x1C},
+ { 77, 0x03},
+ { 78, 0xE0},
+ { 79, 0x52},
+ { 80, 0x1C},
+ { 81, 0x04},
+ { 82, 0x00},
+ { 83, 0x41},
+ { 84, 0x1C},
+ { 85, 0x04},
+ { 86, 0x00},
+ { 87, 0x58},
+ { 88, 0x1C},
+ { 89, 0x04},
+ { 90, 0x20},
+ { 91, 0x3D},
+ { 92, 0x1C},
+ { 93, 0x04},
+ { 94, 0x20},
+ { 95, 0x5C},
+ { 96, 0x1C},
+ { 97, 0x04},
+ { 98, 0x40},
+ { 99, 0x45},
+ {100, 0x1C},
+ {101, 0x04},
+ {102, 0x40},
+ {103, 0x54},
+ {104, 0x1C},
+ {105, 0x04},
+ {106, 0x60},
+ {107, 0x49},
+ {108, 0x1C},
+ {109, 0x04},
+ {110, 0x60},
+ {111, 0x50},
+ {112, 0x1C},
+ {113, 0x04},
+ {114, 0x80},
+ {115, 0x4B},
+ {116, 0x1C},
+ {117, 0x04},
+ {118, 0x80},
+ {119, 0x4E},
+ {120, 0x1C},
+ {121, 0x04},
+ {122, 0xA0},
+ {123, 0x4C},
+ {124, 0x1C},
+ {125, 0x04},
+ {126, 0xA0},
+ {127, 0x4D},
+ { 0, 0x0C},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x10},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x64},
+ { 24, 0x18},
+ { 25, 0x02},
+ { 26, 0x60},
+ { 27, 0x9F},
+ { 28, 0x1C},
+ { 29, 0x02},
+ { 30, 0x80},
+ { 31, 0x7C},
+ { 32, 0x1C},
+ { 33, 0x02},
+ { 34, 0x60},
+ { 35, 0x7A},
+ { 36, 0x1C},
+ { 37, 0x02},
+ { 38, 0x80},
+ { 39, 0x9D},
+ { 40, 0x1C},
+ { 41, 0x02},
+ { 42, 0xA0},
+ { 43, 0x7E},
+ { 44, 0x1C},
+ { 45, 0x02},
+ { 46, 0xA0},
+ { 47, 0x9B},
+ { 48, 0x1C},
+ { 49, 0x02},
+ { 50, 0xC0},
+ { 51, 0x80},
+ { 52, 0x1C},
+ { 53, 0x02},
+ { 54, 0xC0},
+ { 55, 0x99},
+ { 56, 0x1C},
+ { 57, 0x02},
+ { 58, 0xE0},
+ { 59, 0x86},
+ { 60, 0x1C},
+ { 61, 0x02},
+ { 62, 0xE0},
+ { 63, 0x93},
+ { 64, 0x1C},
+ { 65, 0x03},
+ { 66, 0x00},
+ { 67, 0x82},
+ { 68, 0x1C},
+ { 69, 0x03},
+ { 70, 0x00},
+ { 71, 0x97},
+ { 72, 0x1C},
+ { 73, 0x03},
+ { 74, 0x20},
+ { 75, 0x84},
+ { 76, 0x1C},
+ { 77, 0x03},
+ { 78, 0x20},
+ { 79, 0x95},
+ { 80, 0x1C},
+ { 81, 0x03},
+ { 82, 0x40},
+ { 83, 0x88},
+ { 84, 0x1C},
+ { 85, 0x03},
+ { 86, 0x40},
+ { 87, 0x91},
+ { 88, 0x1C},
+ { 89, 0x03},
+ { 90, 0x60},
+ { 91, 0x8A},
+ { 92, 0x1C},
+ { 93, 0x03},
+ { 94, 0x60},
+ { 95, 0x8F},
+ { 96, 0x1C},
+ { 97, 0x03},
+ { 98, 0x80},
+ { 99, 0x7B},
+ {100, 0x1C},
+ {101, 0x03},
+ {102, 0x80},
+ {103, 0x9E},
+ {104, 0x1C},
+ {105, 0x03},
+ {106, 0xA0},
+ {107, 0x83},
+ {108, 0x1C},
+ {109, 0x03},
+ {110, 0xA0},
+ {111, 0x96},
+ {112, 0x1C},
+ {113, 0x03},
+ {114, 0xC0},
+ {115, 0x7F},
+ {116, 0x1C},
+ {117, 0x03},
+ {118, 0xC0},
+ {119, 0x9A},
+ {120, 0x1C},
+ {121, 0x03},
+ {122, 0xE0},
+ {123, 0x87},
+ {124, 0x1C},
+ {125, 0x03},
+ {126, 0xE0},
+ {127, 0x92},
+ { 0, 0x0D},
+ { 8, 0x1C},
+ { 9, 0x04},
+ { 10, 0x00},
+ { 11, 0x81},
+ { 12, 0x1C},
+ { 13, 0x04},
+ { 14, 0x00},
+ { 15, 0x98},
+ { 16, 0x1C},
+ { 17, 0x04},
+ { 18, 0x20},
+ { 19, 0x7D},
+ { 20, 0x1C},
+ { 21, 0x04},
+ { 22, 0x20},
+ { 23, 0x9C},
+ { 24, 0x1C},
+ { 25, 0x04},
+ { 26, 0x40},
+ { 27, 0x85},
+ { 28, 0x1C},
+ { 29, 0x04},
+ { 30, 0x40},
+ { 31, 0x94},
+ { 32, 0x1C},
+ { 33, 0x04},
+ { 34, 0x60},
+ { 35, 0x89},
+ { 36, 0x1C},
+ { 37, 0x04},
+ { 38, 0x60},
+ { 39, 0x90},
+ { 40, 0x1C},
+ { 41, 0x04},
+ { 42, 0x80},
+ { 43, 0x8B},
+ { 44, 0x1C},
+ { 45, 0x04},
+ { 46, 0x80},
+ { 47, 0x8E},
+ { 48, 0x1C},
+ { 49, 0x04},
+ { 50, 0xA0},
+ { 51, 0x8C},
+ { 52, 0x1C},
+ { 53, 0x04},
+ { 54, 0xA0},
+ { 55, 0x8D},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x00},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x10},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0xA4},
+ { 72, 0x18},
+ { 73, 0x04},
+ { 74, 0xE0},
+ { 75, 0xA3},
+ { 76, 0x1C},
+ { 77, 0x05},
+ { 78, 0x00},
+ { 79, 0xA5},
+ { 80, 0x1C},
+ { 81, 0x05},
+ { 82, 0x00},
+ { 83, 0xA2},
+ { 84, 0x1C},
+ { 85, 0x04},
+ { 86, 0xE0},
+ { 87, 0xA4},
+ { 88, 0x1C},
+ { 89, 0x05},
+ { 90, 0x20},
+ { 91, 0xA1},
+ { 92, 0x1C},
+ { 93, 0x05},
+ { 94, 0x20},
+ { 95, 0xA6},
+ { 96, 0x1C},
+ { 97, 0x05},
+ { 98, 0x40},
+ { 99, 0xA0},
+ {100, 0x1C},
+ {101, 0x05},
+ {102, 0x40},
+ {103, 0xA7},
+ {104, 0x18},
+ {105, 0x05},
+ {106, 0xA0},
+ {107, 0xA8},
+ {108, 0x1C},
+ {109, 0x05},
+ {110, 0xA0},
+ {111, 0xAA},
+ {112, 0x1C},
+ {113, 0x05},
+ {114, 0x80},
+ {115, 0xAD},
+ {116, 0x4C},
+ {117, 0x05},
+ {118, 0x80},
+ {119, 0xAB},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x5C},
+ {125, 0x90},
+ {126, 0x60},
+ {127, 0x03},
+ { 0, 0x0E},
+ { 8, 0x18},
+ { 9, 0x04},
+ { 10, 0xE0},
+ { 11, 0x63},
+ { 12, 0x1C},
+ { 13, 0x04},
+ { 14, 0xE0},
+ { 15, 0x64},
+ { 16, 0x1C},
+ { 17, 0x05},
+ { 18, 0x00},
+ { 19, 0x65},
+ { 20, 0x10},
+ { 21, 0x00},
+ { 22, 0x40},
+ { 23, 0xB4},
+ { 24, 0x1C},
+ { 25, 0x05},
+ { 26, 0x00},
+ { 27, 0x62},
+ { 28, 0x1C},
+ { 29, 0x05},
+ { 30, 0x20},
+ { 31, 0x61},
+ { 32, 0x1C},
+ { 33, 0x05},
+ { 34, 0x20},
+ { 35, 0x66},
+ { 36, 0x1C},
+ { 37, 0x05},
+ { 38, 0x40},
+ { 39, 0x60},
+ { 40, 0x1C},
+ { 41, 0x05},
+ { 42, 0x40},
+ { 43, 0x67},
+ { 44, 0x18},
+ { 45, 0x05},
+ { 46, 0xA0},
+ { 47, 0x68},
+ { 48, 0x1C},
+ { 49, 0x05},
+ { 50, 0xA0},
+ { 51, 0x6A},
+ { 52, 0x1C},
+ { 53, 0x05},
+ { 54, 0x80},
+ { 55, 0x6D},
+ { 56, 0x4C},
+ { 57, 0x05},
+ { 58, 0x80},
+ { 59, 0x6B},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x5C},
+ { 65, 0x90},
+ { 66, 0x40},
+ { 67, 0x03},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x40},
+ { 83, 0x74},
+ { 84, 0x18},
+ { 85, 0x05},
+ { 86, 0xC0},
+ { 87, 0xAB},
+ { 88, 0x1C},
+ { 89, 0x05},
+ { 90, 0xC0},
+ { 91, 0xAA},
+ { 92, 0x1C},
+ { 93, 0x05},
+ { 94, 0xE0},
+ { 95, 0xA8},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x5C},
+ {101, 0x90},
+ {102, 0x60},
+ {103, 0x03},
+ {104, 0x18},
+ {105, 0x05},
+ {106, 0xC0},
+ {107, 0x6B},
+ {108, 0x1C},
+ {109, 0x05},
+ {110, 0xC0},
+ {111, 0x6A},
+ {112, 0x1C},
+ {113, 0x05},
+ {114, 0xE0},
+ {115, 0x68},
+ {116, 0x10},
+ {117, 0x00},
+ {118, 0x40},
+ {119, 0xB3},
+ {120, 0x18},
+ {121, 0x05},
+ {122, 0x60},
+ {123, 0xA1},
+ {124, 0x5C},
+ {125, 0x90},
+ {126, 0x40},
+ {127, 0x03},
+ { 0, 0x0F},
+ { 8, 0x18},
+ { 9, 0x05},
+ { 10, 0x80},
+ { 11, 0xAA},
+ { 12, 0x1C},
+ { 13, 0x05},
+ { 14, 0xA0},
+ { 15, 0xAB},
+ { 16, 0x4C},
+ { 17, 0x05},
+ { 18, 0x80},
+ { 19, 0xAC},
+ { 20, 0x10},
+ { 21, 0x00},
+ { 22, 0x40},
+ { 23, 0x73},
+ { 24, 0x1C},
+ { 25, 0x05},
+ { 26, 0xA0},
+ { 27, 0xA8},
+ { 28, 0x18},
+ { 29, 0x05},
+ { 30, 0x60},
+ { 31, 0x61},
+ { 32, 0x5C},
+ { 33, 0x90},
+ { 34, 0x60},
+ { 35, 0x03},
+ { 36, 0x18},
+ { 37, 0x05},
+ { 38, 0x80},
+ { 39, 0x6A},
+ { 40, 0x1C},
+ { 41, 0x05},
+ { 42, 0xA0},
+ { 43, 0x6B},
+ { 44, 0x4C},
+ { 45, 0x05},
+ { 46, 0x80},
+ { 47, 0x6C},
+ { 48, 0x10},
+ { 49, 0x00},
+ { 50, 0x40},
+ { 51, 0xB2},
+ { 52, 0x1C},
+ { 53, 0x05},
+ { 54, 0xA0},
+ { 55, 0x68},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x5C},
+ { 61, 0x90},
+ { 62, 0x40},
+ { 63, 0x03},
+ { 64, 0x18},
+ { 65, 0x05},
+ { 66, 0xC0},
+ { 67, 0xAC},
+ { 68, 0x1C},
+ { 69, 0x05},
+ { 70, 0xC0},
+ { 71, 0xA8},
+ { 72, 0x1C},
+ { 73, 0x05},
+ { 74, 0xE0},
+ { 75, 0xAB},
+ { 76, 0x10},
+ { 77, 0x00},
+ { 78, 0x40},
+ { 79, 0x72},
+ { 80, 0x5C},
+ { 81, 0x90},
+ { 82, 0x60},
+ { 83, 0x03},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x10},
+ { 97, 0x00},
+ { 98, 0x40},
+ { 99, 0xB1},
+ {100, 0x18},
+ {101, 0x05},
+ {102, 0xC0},
+ {103, 0x6C},
+ {104, 0x1C},
+ {105, 0x05},
+ {106, 0xC0},
+ {107, 0x68},
+ {108, 0x1C},
+ {109, 0x05},
+ {110, 0xE0},
+ {111, 0x6B},
+ {112, 0x18},
+ {113, 0x04},
+ {114, 0xC0},
+ {115, 0x8C},
+ {116, 0x5C},
+ {117, 0x90},
+ {118, 0x40},
+ {119, 0x03},
+ {120, 0x18},
+ {121, 0x04},
+ {122, 0xE0},
+ {123, 0xA7},
+ {124, 0x1C},
+ {125, 0x05},
+ {126, 0x00},
+ {127, 0xA0},
+ { 0, 0x10},
+ { 8, 0x4C},
+ { 9, 0x04},
+ { 10, 0xE0},
+ { 11, 0x9F},
+ { 12, 0x10},
+ { 13, 0x00},
+ { 14, 0x40},
+ { 15, 0x71},
+ { 16, 0x1C},
+ { 17, 0x05},
+ { 18, 0x00},
+ { 19, 0xA6},
+ { 20, 0x1C},
+ { 21, 0x05},
+ { 22, 0x20},
+ { 23, 0xA5},
+ { 24, 0x1C},
+ { 25, 0x05},
+ { 26, 0x20},
+ { 27, 0xA1},
+ { 28, 0x1C},
+ { 29, 0x05},
+ { 30, 0x40},
+ { 31, 0xA4},
+ { 32, 0x1C},
+ { 33, 0x05},
+ { 34, 0x40},
+ { 35, 0xA2},
+ { 36, 0x18},
+ { 37, 0x05},
+ { 38, 0xA0},
+ { 39, 0xAC},
+ { 40, 0x1C},
+ { 41, 0x05},
+ { 42, 0xA0},
+ { 43, 0xAB},
+ { 44, 0x1C},
+ { 45, 0x05},
+ { 46, 0x80},
+ { 47, 0xA8},
+ { 48, 0x4C},
+ { 49, 0x05},
+ { 50, 0x80},
+ { 51, 0xA9},
+ { 52, 0x18},
+ { 53, 0x04},
+ { 54, 0xC0},
+ { 55, 0x4C},
+ { 56, 0x5C},
+ { 57, 0x90},
+ { 58, 0x60},
+ { 59, 0x03},
+ { 60, 0x18},
+ { 61, 0x04},
+ { 62, 0xE0},
+ { 63, 0x67},
+ { 64, 0x1C},
+ { 65, 0x05},
+ { 66, 0x00},
+ { 67, 0x60},
+ { 68, 0x4C},
+ { 69, 0x04},
+ { 70, 0xE0},
+ { 71, 0x5F},
+ { 72, 0x10},
+ { 73, 0x00},
+ { 74, 0x40},
+ { 75, 0xB0},
+ { 76, 0x1C},
+ { 77, 0x05},
+ { 78, 0x00},
+ { 79, 0x66},
+ { 80, 0x1C},
+ { 81, 0x05},
+ { 82, 0x20},
+ { 83, 0x65},
+ { 84, 0x1C},
+ { 85, 0x05},
+ { 86, 0x20},
+ { 87, 0x61},
+ { 88, 0x1C},
+ { 89, 0x05},
+ { 90, 0x40},
+ { 91, 0x64},
+ { 92, 0x1C},
+ { 93, 0x05},
+ { 94, 0x40},
+ { 95, 0x62},
+ { 96, 0x18},
+ { 97, 0x05},
+ { 98, 0xA0},
+ { 99, 0x6C},
+ {100, 0x1C},
+ {101, 0x05},
+ {102, 0xA0},
+ {103, 0x6B},
+ {104, 0x1C},
+ {105, 0x05},
+ {106, 0x80},
+ {107, 0x68},
+ {108, 0x4C},
+ {109, 0x05},
+ {110, 0x80},
+ {111, 0x69},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x5C},
+ {117, 0x90},
+ {118, 0x40},
+ {119, 0x03},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x00},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x11},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x10},
+ { 13, 0x00},
+ { 14, 0x40},
+ { 15, 0x6F},
+ { 16, 0x18},
+ { 17, 0x05},
+ { 18, 0xC0},
+ { 19, 0xA9},
+ { 20, 0x1C},
+ { 21, 0x05},
+ { 22, 0xC0},
+ { 23, 0xAB},
+ { 24, 0x1C},
+ { 25, 0x05},
+ { 26, 0xE0},
+ { 27, 0xAC},
+ { 28, 0x10},
+ { 29, 0x00},
+ { 30, 0x40},
+ { 31, 0x70},
+ { 32, 0x00},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x5C},
+ { 37, 0x90},
+ { 38, 0x60},
+ { 39, 0x03},
+ { 40, 0x18},
+ { 41, 0x05},
+ { 42, 0xC0},
+ { 43, 0x69},
+ { 44, 0x1C},
+ { 45, 0x05},
+ { 46, 0xC0},
+ { 47, 0x6B},
+ { 48, 0x1C},
+ { 49, 0x05},
+ { 50, 0xE0},
+ { 51, 0x6C},
+ { 52, 0x10},
+ { 53, 0x00},
+ { 54, 0x40},
+ { 55, 0xAF},
+ { 56, 0x18},
+ { 57, 0x05},
+ { 58, 0x60},
+ { 59, 0xA5},
+ { 60, 0x5C},
+ { 61, 0x90},
+ { 62, 0x40},
+ { 63, 0x03},
+ { 64, 0x18},
+ { 65, 0x05},
+ { 66, 0x80},
+ { 67, 0xAB},
+ { 68, 0x1C},
+ { 69, 0x05},
+ { 70, 0xA0},
+ { 71, 0xA9},
+ { 72, 0x4C},
+ { 73, 0x05},
+ { 74, 0x80},
+ { 75, 0xA7},
+ { 76, 0x10},
+ { 77, 0x00},
+ { 78, 0x40},
+ { 79, 0x6F},
+ { 80, 0x1C},
+ { 81, 0x05},
+ { 82, 0xA0},
+ { 83, 0xAC},
+ { 84, 0x18},
+ { 85, 0x05},
+ { 86, 0x60},
+ { 87, 0x65},
+ { 88, 0x5C},
+ { 89, 0x90},
+ { 90, 0x60},
+ { 91, 0x03},
+ { 92, 0x18},
+ { 93, 0x05},
+ { 94, 0x80},
+ { 95, 0x6B},
+ { 96, 0x1C},
+ { 97, 0x05},
+ { 98, 0xA0},
+ { 99, 0x69},
+ {100, 0x4C},
+ {101, 0x05},
+ {102, 0x80},
+ {103, 0x67},
+ {104, 0x10},
+ {105, 0x00},
+ {106, 0x40},
+ {107, 0xAE},
+ {108, 0x1C},
+ {109, 0x05},
+ {110, 0xA0},
+ {111, 0x6C},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x5C},
+ {117, 0x90},
+ {118, 0x40},
+ {119, 0x03},
+ {120, 0x18},
+ {121, 0x05},
+ {122, 0xC0},
+ {123, 0xA7},
+ {124, 0x1C},
+ {125, 0x05},
+ {126, 0xC0},
+ {127, 0xAC},
+ { 0, 0x12},
+ { 8, 0x1C},
+ { 9, 0x05},
+ { 10, 0xE0},
+ { 11, 0xA9},
+ { 12, 0x10},
+ { 13, 0x00},
+ { 14, 0x40},
+ { 15, 0x6E},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x5C},
+ { 21, 0x90},
+ { 22, 0x60},
+ { 23, 0x03},
+ { 24, 0x18},
+ { 25, 0x05},
+ { 26, 0xC0},
+ { 27, 0x67},
+ { 28, 0x1C},
+ { 29, 0x05},
+ { 30, 0xC0},
+ { 31, 0x6C},
+ { 32, 0x1C},
+ { 33, 0x05},
+ { 34, 0xE0},
+ { 35, 0x69},
+ { 36, 0x10},
+ { 37, 0x00},
+ { 38, 0x40},
+ { 39, 0xAD},
+ { 40, 0x5C},
+ { 41, 0x90},
+ { 42, 0x40},
+ { 43, 0x03},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x10},
+ { 57, 0x00},
+ { 58, 0x40},
+ { 59, 0x6D},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x02},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+};
+#define main44_miniDSP_D_reg_values_COEFF_START 0
+#define main44_miniDSP_D_reg_values_COEFF_SIZE 672
+#define main44_miniDSP_D_reg_values_INST_START 672
+#define main44_miniDSP_D_reg_values_INST_SIZE 2124
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index ac65a2d36408..cee6354656e7 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <asm/div64.h>
#include <sound/max98088.h>
+#include <sound/jack.h>
#include "max98088.h"
enum max98088_type {
@@ -54,6 +55,8 @@ struct max98088_priv {
unsigned int mic1pre;
unsigned int mic2pre;
unsigned int extmic_mode;
+ int irq;
+ struct snd_soc_jack *headset_jack;
};
static const u8 max98088_reg[M98088_REG_CNT] = {
@@ -803,6 +806,7 @@ static const struct snd_kcontrol_new max98088_snd_controls[] = {
SOC_SINGLE("THD Limiter Threshold", M98088_REG_46_THDLMT_CFG, 4, 15, 0),
SOC_SINGLE("THD Limiter Time", M98088_REG_46_THDLMT_CFG, 0, 7, 0),
+ SOC_SINGLE("Digital Mic Enable", M98088_REG_48_CFG_MIC, 4, 3, 0),
};
/* Left speaker mixer switch */
@@ -1596,7 +1600,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
static void max98088_sync_cache(struct snd_soc_codec *codec)
{
- u16 *reg_cache = codec->reg_cache;
+ u8 *reg_cache = codec->reg_cache;
int i;
if (!codec->cache_sync)
@@ -1634,6 +1638,9 @@ static int max98088_set_bias_level(struct snd_soc_codec *codec,
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
max98088_sync_cache(codec);
+ snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS,
+ M98088_SHDNRUN, M98088_SHDNRUN);
+
snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
M98088_MBEN, M98088_MBEN);
break;
@@ -1641,6 +1648,8 @@ static int max98088_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
M98088_MBEN, 0);
+ snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS,
+ M98088_SHDNRUN, 0);
codec->cache_sync = 1;
break;
}
@@ -1908,6 +1917,7 @@ static void max98088_handle_pdata(struct snd_soc_codec *codec)
struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
struct max98088_pdata *pdata = max98088->pdata;
u8 regval = 0;
+ unsigned int debounce_time;
if (!pdata) {
dev_dbg(codec->dev, "No platform data\n");
@@ -1933,26 +1943,98 @@ static void max98088_handle_pdata(struct snd_soc_codec *codec)
/* Configure equalizers */
if (pdata->eq_cfgcnt)
max98088_handle_eq_pdata(codec);
+
+ /* Configure the debounce time */
+ if (max98088->irq) {
+ switch (pdata->debounce_time_ms) {
+ case 25:
+ debounce_time = M98088_JDEB_25;
+ break;
+ case 50:
+ debounce_time = M98088_JDEB_50;
+ break;
+ case 100:
+ debounce_time = M98088_JDEB_100;
+ break;
+ case 200:
+ default:
+ debounce_time = M98088_JDEB_200;
+ }
+ snd_soc_update_bits(codec, M98088_REG_4B_CFG_JACKDET,
+ M98088_JDEB, debounce_time);
+ }
}
-#ifdef CONFIG_PM
-static int max98088_suspend(struct snd_soc_codec *codec, pm_message_t state)
+int max98088_report_jack(struct snd_soc_codec *codec)
{
- max98088_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+ int jack_report = 0;
+
+ /* Read the Jack Status Register*/
+ value = snd_soc_read(codec, M98088_REG_02_JACK_STAUS);
+
+ if ((value & M98088_JKSNS_7) == 0)
+ jack_report |= SND_JACK_HEADPHONE;
+ if (value & M98088_JKSNS_6)
+ jack_report |= SND_JACK_MICROPHONE;
+
+ snd_soc_jack_report(max98088->headset_jack,
+ jack_report, SND_JACK_HEADSET);
return 0;
}
-static int max98088_resume(struct snd_soc_codec *codec)
+static irqreturn_t max98088_jack_handler(int irq, void *data)
{
- max98088_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ struct snd_soc_codec *codec = data;
+
+ /*clear the interrupt by reading the status register */
+ snd_soc_read(codec, M98088_REG_00_IRQ_STATUS);
+ max98088_report_jack(codec);
+
+ return IRQ_HANDLED;
+}
+
+int max98088_headset_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, enum snd_jack_types type)
+{
+ struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
+ max98088->headset_jack = jack;
+
+ if (max98088->irq) {
+ if (type & SND_JACK_HEADSET) {
+ /* headphone + microphone detection */
+ snd_soc_update_bits(codec, M98088_REG_4E_BIAS_CNTL,
+ M98088_JDWK, 0);
+ } else {
+ /* headphone detection only*/
+ snd_soc_update_bits(codec, M98088_REG_4E_BIAS_CNTL,
+ M98088_JDWK, 1);
+ }
+ /* Enable the Jack Detection Circuitry */
+ snd_soc_update_bits(codec, M98088_REG_4B_CFG_JACKDET,
+ M98088_JDETEN, M98088_JDETEN);
+
+ /*JDET is always set the first time JDETEN is set,
+ so clear it*/
+ snd_soc_read(codec, M98088_REG_00_IRQ_STATUS);
+
+ /*after setting JDETEN, JKSNS would be set after hw
+ debounce time so wait before reading the status*/
+ msleep(max98088->pdata->debounce_time_ms);
+
+ /*report jack status at boot-up*/
+ max98088_report_jack(codec);
+
+ /*Enable the jack detection interrupt*/
+ snd_soc_update_bits(codec, M98088_REG_0F_IRQ_ENABLE,
+ M98088_IJDET, M98088_IJDET);
+ }
return 0;
}
-#else
-#define max98088_suspend NULL
-#define max98088_resume NULL
-#endif
+EXPORT_SYMBOL_GPL(max98088_headset_detect);
static int max98088_probe(struct snd_soc_codec *codec)
{
@@ -1961,6 +2043,7 @@ static int max98088_probe(struct snd_soc_codec *codec)
int ret = 0;
codec->cache_sync = 1;
+ codec->dapm.idle_bias_off = 1;
ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
if (ret != 0) {
@@ -1990,6 +2073,18 @@ static int max98088_probe(struct snd_soc_codec *codec)
max98088->mic1pre = 0;
max98088->mic2pre = 0;
+ if (max98088->irq) {
+ /* register an audio interrupt */
+ ret = request_threaded_irq(max98088->irq, NULL,
+ max98088_jack_handler,
+ IRQF_TRIGGER_FALLING,
+ "max98088", codec);
+ if (ret) {
+ dev_err(codec->dev, "Failed to request IRQ: %d\n", ret);
+ goto err_access;
+ }
+ }
+
ret = snd_soc_read(codec, M98088_REG_FF_REV_ID);
if (ret < 0) {
dev_err(codec->dev, "Failed to read device revision: %d\n",
@@ -2037,6 +2132,32 @@ static int max98088_remove(struct snd_soc_codec *codec)
return 0;
}
+#ifdef CONFIG_PM
+static int max98088_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+ struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
+
+ disable_irq(max98088->irq);
+ max98088_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int max98088_resume(struct snd_soc_codec *codec)
+{
+ struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
+
+ max98088_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ max98088_report_jack(codec);
+ enable_irq(max98088->irq);
+
+ return 0;
+}
+#else
+#define max98088_suspend NULL
+#define max98088_resume NULL
+#endif
+
static struct snd_soc_codec_driver soc_codec_dev_max98088 = {
.probe = max98088_probe,
.remove = max98088_remove,
@@ -2068,6 +2189,7 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, max98088);
max98088->control_data = i2c;
max98088->pdata = i2c->dev.platform_data;
+ max98088->irq = i2c->irq;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_max98088, &max98088_dai[0], 2);
diff --git a/sound/soc/codecs/max98088.h b/sound/soc/codecs/max98088.h
index be89a4f4aab8..cf4b04d2d07a 100644
--- a/sound/soc/codecs/max98088.h
+++ b/sound/soc/codecs/max98088.h
@@ -194,6 +194,25 @@
#define M98088_PWRSV8K (1<<1)
#define M98088_PWRSV (1<<0)
+/* M98088_REG_4E_BIAS_CNTL */
+ #define M98088_JDWK (1<<1)
+
+/* M98088_REG_4B_CFG_JACKDET */
+ #define M98088_JDETEN (1<<7)
+ #define M98088_JDEB (3<<0)
+ #define M98088_JDEB_25 (0<<0)
+ #define M98088_JDEB_50 (1<<0)
+ #define M98088_JDEB_100 (2<<0)
+ #define M98088_JDEB_200 (3<<0)
+
+
+/* M98088_REG_0F_IRQ_ENABLE */
+ #define M98088_IJDET (1<<1)
+
+/* M98088_REG_02_JACK_STAUS */
+ #define M98088_JKSNS_7 (1<<7)
+ #define M98088_JKSNS_6 (1<<6)
+
/* Line inputs */
#define LINE_INA 0
#define LINE_INB 1
@@ -203,4 +222,7 @@
#define M98088_BYTE1(w) ((w >> 8) & 0xff)
#define M98088_BYTE0(w) (w & 0xff)
+int max98088_headset_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, enum snd_jack_types type);
+
#endif
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 668434d44303..3f873b6d75a3 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -618,14 +618,13 @@ static int max98095_volatile(struct snd_soc_codec *codec, unsigned int reg)
static int max98095_hw_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
- u8 data[2];
+ int ret;
- data[0] = reg;
- data[1] = value;
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
+ codec->cache_bypass = 1;
+ ret = snd_soc_write(codec, reg, value);
+ codec->cache_bypass = 0;
+
+ return ret ? -EIO : 0;
}
/*
@@ -1992,12 +1991,19 @@ static void max98095_handle_eq_pdata(struct snd_soc_codec *codec)
dev_err(codec->dev, "Failed to add EQ control: %d\n", ret);
}
-static int max98095_get_bq_channel(const char *name)
+static const char *bq_mode_name[] = {"Biquad1 Mode", "Biquad2 Mode"};
+
+static int max98095_get_bq_channel(struct snd_soc_codec *codec,
+ const char *name)
{
- if (strcmp(name, "Biquad1 Mode") == 0)
- return 0;
- if (strcmp(name, "Biquad2 Mode") == 0)
- return 1;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bq_mode_name); i++)
+ if (strcmp(name, bq_mode_name[i]) == 0)
+ return i;
+
+ /* Shouldn't happen */
+ dev_err(codec->dev, "Bad biquad channel name '%s'\n", name);
return -EINVAL;
}
@@ -2007,14 +2013,15 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_pdata *pdata = max98095->pdata;
- int channel = max98095_get_bq_channel(kcontrol->id.name);
+ int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
struct max98095_cdata *cdata;
int sel = ucontrol->value.integer.value[0];
struct max98095_biquad_cfg *coef_set;
int fs, best, best_val, i;
int regmask, regsave;
- BUG_ON(channel > 1);
+ if (channel < 0)
+ return channel;
if (!pdata || !max98095->bq_textcnt)
return 0;
@@ -2066,9 +2073,12 @@ static int max98095_get_bq_enum(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
- int channel = max98095_get_bq_channel(kcontrol->id.name);
+ int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
struct max98095_cdata *cdata;
+ if (channel < 0)
+ return channel;
+
cdata = &max98095->dai[channel];
ucontrol->value.enumerated.item[0] = cdata->bq_sel;
@@ -2086,15 +2096,16 @@ static void max98095_handle_bq_pdata(struct snd_soc_codec *codec)
int ret;
struct snd_kcontrol_new controls[] = {
- SOC_ENUM_EXT("Biquad1 Mode",
+ SOC_ENUM_EXT((char *)bq_mode_name[0],
max98095->bq_enum,
max98095_get_bq_enum,
max98095_put_bq_enum),
- SOC_ENUM_EXT("Biquad2 Mode",
+ SOC_ENUM_EXT((char *)bq_mode_name[1],
max98095->bq_enum,
max98095_get_bq_enum,
max98095_put_bq_enum),
};
+ BUILD_BUG_ON(ARRAY_SIZE(controls) != ARRAY_SIZE(bq_mode_name));
cfg = pdata->bq_cfg;
cfgcnt = pdata->bq_cfgcnt;
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
new file mode 100644
index 000000000000..c6f0ad011a67
--- /dev/null
+++ b/sound/soc/codecs/rt5640.c
@@ -0,0 +1,2509 @@
+/*
+ * rt5640.c -- RT5640 ALSA SoC audio codec driver
+ *
+ * Copyright 2011 Realtek Semiconductor Corp.
+ * Author: Johnny Hsu <johnnyhsu@realtek.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "rt5640.h"
+#if (CONFIG_SND_SOC_RT5642_MODULE | CONFIG_SND_SOC_RT5642)
+#include "rt5640-dsp.h"
+#endif
+
+#define RT5640_DEMO 1
+#define RT5640_REG_RW 1
+#define RT5640_DET_EXT_MIC 0
+
+#ifdef RT5640_DEMO
+struct rt5640_init_reg {
+ u8 reg;
+ u16 val;
+};
+
+static struct rt5640_init_reg init_list[] = {
+ {RT5640_DUMMY1 , 0x3701},/*fa[12:13] = 1'b;fa[8~10]=1;fa[0]=1*/
+ {RT5640_DEPOP_M1 , 0x0019},/* 8e[4:3] = 11'b; 8e[0] = 1'b */
+ {RT5640_DEPOP_M2 , 0x3100},/* 8f[13] = 1'b */
+ {RT5640_ADDA_CLK1 , 0x1114},/* 73[2] = 1'b */
+ {RT5640_MICBIAS , 0x3030},/* 93[5:4] = 11'b */
+ {RT5640_PRIV_INDEX , 0x003d},/* PR3d[12] = 1'b */
+ {RT5640_PRIV_DATA , 0x3600},
+ {RT5640_CLS_D_OUT , 0xa000},/* 8d[11] = 0'b */
+ {RT5640_PRIV_INDEX , 0x001c},/* PR1c = 0D21'h */
+ {RT5640_PRIV_DATA , 0x0D21},
+ {RT5640_PRIV_INDEX , 0x001b},/* PR1B = 0D21'h */
+ {RT5640_PRIV_DATA , 0x0000},
+ {RT5640_PRIV_INDEX , 0x0012},/* PR12 = 0aa8'h */
+ {RT5640_PRIV_DATA , 0x0aa8},
+ {RT5640_PRIV_INDEX , 0x0014},/* PR14 = 0aaa'h */
+ {RT5640_PRIV_DATA , 0x0aaa},
+ {RT5640_PRIV_INDEX , 0x0020},/* PR20 = 6110'h */
+ {RT5640_PRIV_DATA , 0x6110},
+ {RT5640_PRIV_INDEX , 0x0021},/* PR21 = e0e0'h */
+ {RT5640_PRIV_DATA , 0xe0e0},
+ {RT5640_PRIV_INDEX , 0x0023},/* PR23 = 1804'h */
+ {RT5640_PRIV_DATA , 0x1804},
+ /*playback*/
+ {RT5640_STO_DAC_MIXER , 0x1414},/*Dig inf 1 -> Sto DAC mixer -> DACL*/
+ {RT5640_OUT_L3_MIXER , 0x01fe},/*DACL1 -> OUTMIXL*/
+ {RT5640_OUT_R3_MIXER , 0x01fe},/*DACR1 -> OUTMIXR */
+ {RT5640_HP_VOL , 0x8888},/* OUTMIX -> HPVOL */
+ {RT5640_HPO_MIXER , 0xc000},/* HPVOL -> HPOLMIX */
+/* {RT5640_HPO_MIXER , 0xa000},// DAC1 -> HPOLMIX */
+ {RT5640_SPK_L_MIXER , 0x0036},/* DACL1 -> SPKMIXL */
+ {RT5640_SPK_R_MIXER , 0x0036},/* DACR1 -> SPKMIXR */
+ {RT5640_SPK_VOL , 0x8888},/* SPKMIX -> SPKVOL */
+ {RT5640_SPO_L_MIXER , 0xe800},/* SPKVOLL -> SPOLMIX */
+ {RT5640_SPO_R_MIXER , 0x2800},/* SPKVOLR -> SPORMIX */
+/* {RT5640_SPO_L_MIXER , 0xb800},//DAC -> SPOLMIX */
+/* {RT5640_SPO_R_MIXER , 0x1800},//DAC -> SPORMIX */
+ {RT5640_I2S1_SDP , 0xD000},/*change IIS1 and IIS2 */
+ /*record*/
+ {RT5640_IN1_IN2 , 0x5080},/*IN1 boost 40db & differential mode*/
+ {RT5640_IN3_IN4 , 0x0500},/*IN2 boost 40db & signal ended mode*/
+ {RT5640_REC_L2_MIXER , 0x005f},/* enable Mic1 -> RECMIXL */
+ {RT5640_REC_R2_MIXER , 0x005f},/* enable Mic1 -> RECMIXR */
+/* {RT5640_REC_L2_MIXER , 0x006f},//Mic2 -> RECMIXL */
+/* {RT5640_REC_R2_MIXER , 0x006f},//Mic2 -> RECMIXR */
+ {RT5640_STO_ADC_MIXER , 0x3020},/* ADC -> Sto ADC mixer */
+
+#if RT5640_DET_EXT_MIC
+ {RT5640_MICBIAS , 0x3800},/* enable MICBIAS short current */
+ {RT5640_GPIO_CTRL1 , 0x8400},/* set GPIO1 to IRQ */
+ {RT5640_GPIO_CTRL3 , 0x0004},/* set GPIO1 output */
+ {RT5640_IRQ_CTRL2 , 0x8000},/*set MICBIAS short current to IRQ */
+ /*( if sticky set regBE : 8800 ) */
+#endif
+
+};
+#define RT5640_INIT_REG_LEN ARRAY_SIZE(init_list)
+
+static int rt5640_reg_init(struct snd_soc_codec *codec)
+{
+ int i;
+ for (i = 0; i < RT5640_INIT_REG_LEN; i++)
+ snd_soc_write(codec, init_list[i].reg, init_list[i].val);
+ return 0;
+}
+#endif
+
+static const u16 rt5640_reg[RT5640_VENDOR_ID2 + 1] = {
+ [RT5640_RESET] = 0x000c,
+ [RT5640_SPK_VOL] = 0xc8c8,
+ [RT5640_HP_VOL] = 0xc8c8,
+ [RT5640_OUTPUT] = 0xc8c8,
+ [RT5640_MONO_OUT] = 0x8000,
+ [RT5640_INL_INR_VOL] = 0x0808,
+ [RT5640_DAC1_DIG_VOL] = 0xafaf,
+ [RT5640_DAC2_DIG_VOL] = 0xafaf,
+ [RT5640_ADC_DIG_VOL] = 0x2f2f,
+ [RT5640_ADC_DATA] = 0x2f2f,
+ [RT5640_STO_ADC_MIXER] = 0x7060,
+ [RT5640_MONO_ADC_MIXER] = 0x7070,
+ [RT5640_AD_DA_MIXER] = 0x8080,
+ [RT5640_STO_DAC_MIXER] = 0x5454,
+ [RT5640_MONO_DAC_MIXER] = 0x5454,
+ [RT5640_DIG_MIXER] = 0xaa00,
+ [RT5640_DSP_PATH2] = 0xa000,
+ [RT5640_REC_L2_MIXER] = 0x007f,
+ [RT5640_REC_R2_MIXER] = 0x007f,
+ [RT5640_HPO_MIXER] = 0xe000,
+ [RT5640_SPK_L_MIXER] = 0x003e,
+ [RT5640_SPK_R_MIXER] = 0x003e,
+ [RT5640_SPO_L_MIXER] = 0xf800,
+ [RT5640_SPO_R_MIXER] = 0x3800,
+ [RT5640_SPO_CLSD_RATIO] = 0x0004,
+ [RT5640_MONO_MIXER] = 0xfc00,
+ [RT5640_OUT_L3_MIXER] = 0x01ff,
+ [RT5640_OUT_R3_MIXER] = 0x01ff,
+ [RT5640_LOUT_MIXER] = 0xf000,
+ [RT5640_PWR_ANLG1] = 0x00c0,
+ [RT5640_I2S1_SDP] = 0x8000,
+ [RT5640_I2S2_SDP] = 0x8000,
+ [RT5640_I2S3_SDP] = 0x8000,
+ [RT5640_ADDA_CLK1] = 0x1110,
+ [RT5640_ADDA_CLK2] = 0x0c00,
+ [RT5640_DMIC] = 0x1d00,
+ [RT5640_ASRC_3] = 0x0008,
+ [RT5640_HP_OVCD] = 0x0600,
+ [RT5640_CLS_D_OVCD] = 0x0228,
+ [RT5640_CLS_D_OUT] = 0xa800,
+ [RT5640_DEPOP_M1] = 0x0004,
+ [RT5640_DEPOP_M2] = 0x1100,
+ [RT5640_DEPOP_M3] = 0x0646,
+ [RT5640_CHARGE_PUMP] = 0x0c00,
+ [RT5640_MICBIAS] = 0x3000,
+ [RT5640_EQ_CTRL1] = 0x2080,
+ [RT5640_DRC_AGC_1] = 0x2206,
+ [RT5640_DRC_AGC_2] = 0x1f00,
+ [RT5640_ANC_CTRL1] = 0x034b,
+ [RT5640_ANC_CTRL2] = 0x0066,
+ [RT5640_ANC_CTRL3] = 0x000b,
+ [RT5640_GPIO_CTRL1] = 0x0400,
+ [RT5640_DSP_CTRL3] = 0x2000,
+ [RT5640_BASE_BACK] = 0x0013,
+ [RT5640_MP3_PLUS1] = 0x0680,
+ [RT5640_MP3_PLUS2] = 0x1c17,
+ [RT5640_3D_HP] = 0x8c00,
+ [RT5640_ADJ_HPF] = 0x2a20,
+ [RT5640_HP_CALIB_AMP_DET] = 0x0400,
+ [RT5640_SV_ZCD1] = 0x0809,
+ [RT5640_VENDOR_ID1] = 0x10ec,
+ [RT5640_VENDOR_ID2] = 0x6231,
+};
+
+static int rt5640_reset(struct snd_soc_codec *codec)
+{
+ return snd_soc_write(codec, RT5640_RESET, 0);
+}
+
+/**
+ * rt5640_index_write - Write private register.
+ * @codec: SoC audio codec device.
+ * @reg: Private register index.
+ * @value: Private register Data.
+ *
+ * Modify private register for advanced setting. It can be written through
+ * private index (0x6a) and data (0x6c) register.
+ *
+ * Returns 0 for success or negative error code.
+ */
+static int rt5640_index_write(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int value)
+{
+ int ret;
+
+ ret = snd_soc_write(codec, RT5640_PRIV_INDEX, reg);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set private addr: %d\n", ret);
+ goto err;
+ }
+ ret = snd_soc_write(codec, RT5640_PRIV_DATA, value);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set private value: %d\n", ret);
+ goto err;
+ }
+ return 0;
+
+err:
+ return ret;
+}
+
+/**
+ * rt5640_index_read - Read private register.
+ * @codec: SoC audio codec device.
+ * @reg: Private register index.
+ *
+ * Read advanced setting from private register. It can be read through
+ * private index (0x6a) and data (0x6c) register.
+ *
+ * Returns private register value or negative error code.
+ */
+static unsigned int rt5640_index_read(
+ struct snd_soc_codec *codec, unsigned int reg)
+{
+ int ret;
+
+ ret = snd_soc_write(codec, RT5640_PRIV_INDEX, reg);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set private addr: %d\n", ret);
+ return ret;
+ }
+ return snd_soc_read(codec, RT5640_PRIV_DATA);
+}
+
+/**
+ * rt5640_index_update_bits - update private register bits
+ * @codec: audio codec
+ * @reg: Private register index.
+ * @mask: register mask
+ * @value: new value
+ *
+ * Writes new register value.
+ *
+ * Returns 1 for change, 0 for no change, or negative error code.
+ */
+static int rt5640_index_update_bits(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int mask, unsigned int value)
+{
+ unsigned int old, new;
+ int change, ret;
+
+ ret = rt5640_index_read(codec, reg);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to read private reg: %d\n", ret);
+ goto err;
+ }
+
+ old = ret;
+ new = (old & ~mask) | (value & mask);
+ change = old != new;
+ if (change) {
+ ret = rt5640_index_write(codec, reg, new);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to write private reg: %d\n", ret);
+ goto err;
+ }
+ }
+ return change;
+
+err:
+ return ret;
+}
+
+static int rt5640_volatile_register(
+ struct snd_soc_codec *codec, unsigned int reg)
+{
+ switch (reg) {
+ case RT5640_RESET:
+ case RT5640_PRIV_DATA:
+ case RT5640_ASRC_5:
+ case RT5640_EQ_CTRL1:
+ case RT5640_DRC_AGC_1:
+ case RT5640_ANC_CTRL1:
+ case RT5640_IRQ_CTRL2:
+ case RT5640_INT_IRQ_ST:
+ case RT5640_DSP_CTRL2:
+ case RT5640_DSP_CTRL3:
+ case RT5640_PGM_REG_ARR1:
+ case RT5640_PGM_REG_ARR3:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int rt5640_readable_register(
+ struct snd_soc_codec *codec, unsigned int reg)
+{
+ switch (reg) {
+ case RT5640_RESET:
+ case RT5640_SPK_VOL:
+ case RT5640_HP_VOL:
+ case RT5640_OUTPUT:
+ case RT5640_MONO_OUT:
+ case RT5640_IN1_IN2:
+ case RT5640_IN3_IN4:
+ case RT5640_INL_INR_VOL:
+ case RT5640_DAC1_DIG_VOL:
+ case RT5640_DAC2_DIG_VOL:
+ case RT5640_DAC2_CTRL:
+ case RT5640_ADC_DIG_VOL:
+ case RT5640_ADC_DATA:
+ case RT5640_ADC_BST_VOL:
+ case RT5640_STO_ADC_MIXER:
+ case RT5640_MONO_ADC_MIXER:
+ case RT5640_AD_DA_MIXER:
+ case RT5640_STO_DAC_MIXER:
+ case RT5640_MONO_DAC_MIXER:
+ case RT5640_DIG_MIXER:
+ case RT5640_DSP_PATH1:
+ case RT5640_DSP_PATH2:
+ case RT5640_DIG_INF_DATA:
+ case RT5640_REC_L1_MIXER:
+ case RT5640_REC_L2_MIXER:
+ case RT5640_REC_R1_MIXER:
+ case RT5640_REC_R2_MIXER:
+ case RT5640_HPO_MIXER:
+ case RT5640_SPK_L_MIXER:
+ case RT5640_SPK_R_MIXER:
+ case RT5640_SPO_L_MIXER:
+ case RT5640_SPO_R_MIXER:
+ case RT5640_SPO_CLSD_RATIO:
+ case RT5640_MONO_MIXER:
+ case RT5640_OUT_L1_MIXER:
+ case RT5640_OUT_L2_MIXER:
+ case RT5640_OUT_L3_MIXER:
+ case RT5640_OUT_R1_MIXER:
+ case RT5640_OUT_R2_MIXER:
+ case RT5640_OUT_R3_MIXER:
+ case RT5640_LOUT_MIXER:
+ case RT5640_PWR_DIG1:
+ case RT5640_PWR_DIG2:
+ case RT5640_PWR_ANLG1:
+ case RT5640_PWR_ANLG2:
+ case RT5640_PWR_MIXER:
+ case RT5640_PWR_VOL:
+ case RT5640_PRIV_INDEX:
+ case RT5640_PRIV_DATA:
+ case RT5640_I2S1_SDP:
+ case RT5640_I2S2_SDP:
+ case RT5640_I2S3_SDP:
+ case RT5640_ADDA_CLK1:
+ case RT5640_ADDA_CLK2:
+ case RT5640_DMIC:
+ case RT5640_GLB_CLK:
+ case RT5640_PLL_CTRL1:
+ case RT5640_PLL_CTRL2:
+ case RT5640_ASRC_1:
+ case RT5640_ASRC_2:
+ case RT5640_ASRC_3:
+ case RT5640_ASRC_4:
+ case RT5640_ASRC_5:
+ case RT5640_HP_OVCD:
+ case RT5640_CLS_D_OVCD:
+ case RT5640_CLS_D_OUT:
+ case RT5640_DEPOP_M1:
+ case RT5640_DEPOP_M2:
+ case RT5640_DEPOP_M3:
+ case RT5640_CHARGE_PUMP:
+ case RT5640_PV_DET_SPK_G:
+ case RT5640_MICBIAS:
+ case RT5640_EQ_CTRL1:
+ case RT5640_EQ_CTRL2:
+ case RT5640_WIND_FILTER:
+ case RT5640_DRC_AGC_1:
+ case RT5640_DRC_AGC_2:
+ case RT5640_DRC_AGC_3:
+ case RT5640_SVOL_ZC:
+ case RT5640_ANC_CTRL1:
+ case RT5640_ANC_CTRL2:
+ case RT5640_ANC_CTRL3:
+ case RT5640_JD_CTRL:
+ case RT5640_ANC_JD:
+ case RT5640_IRQ_CTRL1:
+ case RT5640_IRQ_CTRL2:
+ case RT5640_INT_IRQ_ST:
+ case RT5640_GPIO_CTRL1:
+ case RT5640_GPIO_CTRL2:
+ case RT5640_GPIO_CTRL3:
+ case RT5640_DSP_CTRL1:
+ case RT5640_DSP_CTRL2:
+ case RT5640_DSP_CTRL3:
+ case RT5640_DSP_CTRL4:
+ case RT5640_PGM_REG_ARR1:
+ case RT5640_PGM_REG_ARR2:
+ case RT5640_PGM_REG_ARR3:
+ case RT5640_PGM_REG_ARR4:
+ case RT5640_PGM_REG_ARR5:
+ case RT5640_SCB_FUNC:
+ case RT5640_SCB_CTRL:
+ case RT5640_BASE_BACK:
+ case RT5640_MP3_PLUS1:
+ case RT5640_MP3_PLUS2:
+ case RT5640_3D_HP:
+ case RT5640_ADJ_HPF:
+ case RT5640_HP_CALIB_AMP_DET:
+ case RT5640_HP_CALIB2:
+ case RT5640_SV_ZCD1:
+ case RT5640_SV_ZCD2:
+ case RT5640_DUMMY1:
+ case RT5640_DUMMY2:
+ case RT5640_DUMMY3:
+ case RT5640_VENDOR_ID:
+ case RT5640_VENDOR_ID1:
+ case RT5640_VENDOR_ID2:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+
+/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+static unsigned int bst_tlv[] = {
+ TLV_DB_RANGE_HEAD(7),
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
+ 3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
+ 6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
+ 7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
+ 8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
+};
+
+static int rt5640_dmic_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = rt5640->dmic_en;
+
+ return 0;
+}
+
+static int rt5640_dmic_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ if (rt5640->dmic_en == ucontrol->value.integer.value[0])
+ return 0;
+
+ rt5640->dmic_en = ucontrol->value.integer.value[0];
+ switch (rt5640->dmic_en) {
+ case RT5640_DMIC_DIS:
+ snd_soc_update_bits(codec, RT5640_GPIO_CTRL1,
+ RT5640_GP2_PIN_MASK | RT5640_GP3_PIN_MASK |
+ RT5640_GP4_PIN_MASK,
+ RT5640_GP2_PIN_GPIO2 | RT5640_GP3_PIN_GPIO3 |
+ RT5640_GP4_PIN_GPIO4);
+ snd_soc_update_bits(codec, RT5640_DMIC,
+ RT5640_DMIC_1_DP_MASK | RT5640_DMIC_2_DP_MASK,
+ RT5640_DMIC_1_DP_GPIO3 | RT5640_DMIC_2_DP_GPIO4);
+ snd_soc_update_bits(codec, RT5640_DMIC,
+ RT5640_DMIC_1_EN_MASK | RT5640_DMIC_2_EN_MASK,
+ RT5640_DMIC_1_DIS | RT5640_DMIC_2_DIS);
+ break;
+
+ case RT5640_DMIC1:
+ snd_soc_update_bits(codec, RT5640_GPIO_CTRL1,
+ RT5640_GP2_PIN_MASK | RT5640_GP3_PIN_MASK,
+ RT5640_GP2_PIN_DMIC1_SCL | RT5640_GP3_PIN_DMIC1_SDA);
+ snd_soc_update_bits(codec, RT5640_DMIC,
+ RT5640_DMIC_1L_LH_MASK | RT5640_DMIC_1R_LH_MASK |
+ RT5640_DMIC_1_DP_MASK,
+ RT5640_DMIC_1L_LH_FALLING | RT5640_DMIC_1R_LH_RISING |
+ RT5640_DMIC_1_DP_IN1P);
+ snd_soc_update_bits(codec, RT5640_DMIC,
+ RT5640_DMIC_1_EN_MASK, RT5640_DMIC_1_EN);
+ break;
+
+ case RT5640_DMIC2:
+ snd_soc_update_bits(codec, RT5640_GPIO_CTRL1,
+ RT5640_GP2_PIN_MASK | RT5640_GP4_PIN_MASK,
+ RT5640_GP2_PIN_DMIC1_SCL | RT5640_GP4_PIN_DMIC2_SDA);
+ snd_soc_update_bits(codec, RT5640_DMIC,
+ RT5640_DMIC_2L_LH_MASK | RT5640_DMIC_2R_LH_MASK |
+ RT5640_DMIC_2_DP_MASK,
+ RT5640_DMIC_2L_LH_FALLING | RT5640_DMIC_2R_LH_RISING |
+ RT5640_DMIC_2_DP_IN1N);
+ snd_soc_update_bits(codec, RT5640_DMIC,
+ RT5640_DMIC_2_EN_MASK, RT5640_DMIC_2_EN);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/* IN1/IN2 Input Type */
+static const char *rt5640_input_mode[] = {
+ "Single ended", "Differential"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_in1_mode_enum, RT5640_IN1_IN2,
+ RT5640_IN_SFT1, rt5640_input_mode);
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_in2_mode_enum, RT5640_IN3_IN4,
+ RT5640_IN_SFT2, rt5640_input_mode);
+
+/* Interface data select */
+static const char *rt5640_data_select[] = {
+ "Normal", "left copy to right", "right copy to left", "Swap"};
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
+ RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_if1_adc_enum, RT5640_DIG_INF_DATA,
+ RT5640_IF1_ADC_SEL_SFT, rt5640_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_if2_dac_enum, RT5640_DIG_INF_DATA,
+ RT5640_IF2_DAC_SEL_SFT, rt5640_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_if2_adc_enum, RT5640_DIG_INF_DATA,
+ RT5640_IF2_ADC_SEL_SFT, rt5640_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_if3_dac_enum, RT5640_DIG_INF_DATA,
+ RT5640_IF3_DAC_SEL_SFT, rt5640_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_if3_adc_enum, RT5640_DIG_INF_DATA,
+ RT5640_IF3_ADC_SEL_SFT, rt5640_data_select);
+
+/* Class D speaker gain ratio */
+static const char *rt5640_clsd_spk_ratio[] = {"1.66x", "1.83x", "1.94x", "2x",
+ "2.11x", "2.22x", "2.33x", "2.44x", "2.55x", "2.66x", "2.77x"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_clsd_spk_ratio_enum, RT5640_CLS_D_OUT,
+ RT5640_CLSD_RATIO_SFT, rt5640_clsd_spk_ratio);
+
+/* DMIC */
+static const char *rt5640_dmic_mode[] = {"Disable", "DMIC1", "DMIC2"};
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_dmic_enum, 0, 0, rt5640_dmic_mode);
+
+
+
+#ifdef RT5640_REG_RW
+#define REGVAL_MAX 0xffff
+static unsigned int regctl_addr;
+static int rt5640_regctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo) {
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 2;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = REGVAL_MAX;
+ return 0;
+}
+
+static int rt5640_regctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ ucontrol->value.integer.value[0] = regctl_addr;
+ ucontrol->value.integer.value[1] = snd_soc_read(codec, regctl_addr);
+ return 0;
+}
+
+static int rt5640_regctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ regctl_addr = ucontrol->value.integer.value[0];
+ if (ucontrol->value.integer.value[1] <= REGVAL_MAX)
+ snd_soc_write(codec, regctl_addr,
+ ucontrol->value.integer.value[1]);
+ return 0;
+}
+#endif
+
+
+#define VOL_RESCALE_MAX_VOL 0x27 /* 39 */
+#define VOL_RESCALE_MIX_RANGE 0x1F /* 31 */
+
+static int rt5640_vol_rescale_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned int val = snd_soc_read(codec, mc->reg);
+
+ ucontrol->value.integer.value[0] = VOL_RESCALE_MAX_VOL -
+ ((val & RT5640_L_VOL_MASK) >> mc->shift);
+ ucontrol->value.integer.value[1] = VOL_RESCALE_MAX_VOL -
+ (val & RT5640_R_VOL_MASK);
+
+ return 0;
+}
+
+static int rt5640_vol_rescale_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned int val, val2;
+
+ val = VOL_RESCALE_MAX_VOL - ucontrol->value.integer.value[0];
+ val2 = VOL_RESCALE_MAX_VOL - ucontrol->value.integer.value[1];
+ return snd_soc_update_bits_locked(codec, mc->reg, RT5640_L_VOL_MASK |
+ RT5640_R_VOL_MASK, val << mc->shift | val2);
+}
+
+
+static const struct snd_kcontrol_new rt5640_snd_controls[] = {
+ /* Speaker Output Volume */
+ SOC_DOUBLE("Speaker Playback Switch", RT5640_SPK_VOL,
+ RT5640_L_MUTE_SFT, RT5640_R_MUTE_SFT, 1, 1),
+
+ SOC_DOUBLE_EXT_TLV("Speaker Playback Volume", RT5640_SPK_VOL,
+ RT5640_L_VOL_SFT, RT5640_R_VOL_SFT, VOL_RESCALE_MIX_RANGE, 0,
+ rt5640_vol_rescale_get, rt5640_vol_rescale_put, out_vol_tlv),
+
+ /* Headphone Output Volume */
+ SOC_DOUBLE("HP Playback Switch", RT5640_HP_VOL,
+ RT5640_L_MUTE_SFT, RT5640_R_MUTE_SFT, 1, 1),
+
+ SOC_DOUBLE_EXT_TLV("HP Playback Volume", RT5640_HP_VOL,
+ RT5640_L_VOL_SFT, RT5640_R_VOL_SFT, VOL_RESCALE_MIX_RANGE, 0,
+ rt5640_vol_rescale_get, rt5640_vol_rescale_put, out_vol_tlv),
+
+ /* OUTPUT Control */
+ SOC_DOUBLE("OUT Playback Switch", RT5640_OUTPUT,
+ RT5640_L_MUTE_SFT, RT5640_R_MUTE_SFT, 1, 1),
+ SOC_DOUBLE("OUT Channel Switch", RT5640_OUTPUT,
+ RT5640_VOL_L_SFT, RT5640_VOL_R_SFT, 1, 1),
+ SOC_DOUBLE_TLV("OUT Playback Volume", RT5640_OUTPUT,
+ RT5640_L_VOL_SFT, RT5640_R_VOL_SFT, 39, 1, out_vol_tlv),
+ /* MONO Output Control */
+ SOC_SINGLE("Mono Playback Switch", RT5640_MONO_OUT,
+ RT5640_L_MUTE_SFT, 1, 1),
+ /* DAC Digital Volume */
+ SOC_DOUBLE("DAC2 Playback Switch", RT5640_DAC2_CTRL,
+ RT5640_M_DAC_L2_VOL_SFT, RT5640_M_DAC_R2_VOL_SFT, 1, 1),
+ SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5640_DAC1_DIG_VOL,
+ RT5640_L_VOL_SFT, RT5640_R_VOL_SFT,
+ 175, 0, dac_vol_tlv),
+ SOC_DOUBLE_TLV("Mono DAC Playback Volume", RT5640_DAC2_DIG_VOL,
+ RT5640_L_VOL_SFT, RT5640_R_VOL_SFT,
+ 175, 0, dac_vol_tlv),
+ /* IN1/IN2 Control */
+ SOC_ENUM("IN1 Mode Control", rt5640_in1_mode_enum),
+ SOC_SINGLE_TLV("IN1 Boost", RT5640_IN1_IN2,
+ RT5640_BST_SFT1, 8, 0, bst_tlv),
+ SOC_ENUM("IN2 Mode Control", rt5640_in2_mode_enum),
+ SOC_SINGLE_TLV("IN2 Boost", RT5640_IN3_IN4,
+ RT5640_BST_SFT2, 8, 0, bst_tlv),
+ /* INL/INR Volume Control */
+ SOC_DOUBLE_TLV("IN Capture Volume", RT5640_INL_INR_VOL,
+ RT5640_INL_VOL_SFT, RT5640_INR_VOL_SFT,
+ 31, 1, in_vol_tlv),
+ /* ADC Digital Volume Control */
+ SOC_DOUBLE("ADC Capture Switch", RT5640_ADC_DIG_VOL,
+ RT5640_L_MUTE_SFT, RT5640_R_MUTE_SFT, 1, 1),
+ SOC_DOUBLE_TLV("ADC Capture Volume", RT5640_ADC_DIG_VOL,
+ RT5640_L_VOL_SFT, RT5640_R_VOL_SFT,
+ 127, 0, adc_vol_tlv),
+ SOC_DOUBLE_TLV("Mono ADC Capture Volume", RT5640_ADC_DATA,
+ RT5640_L_VOL_SFT, RT5640_R_VOL_SFT,
+ 127, 0, adc_vol_tlv),
+ /* ADC Boost Volume Control */
+ SOC_DOUBLE_TLV("ADC Boost Gain", RT5640_ADC_BST_VOL,
+ RT5640_ADC_L_BST_SFT, RT5640_ADC_R_BST_SFT,
+ 3, 0, adc_bst_tlv),
+ /* Class D speaker gain ratio */
+ SOC_ENUM("Class D SPK Ratio Control", rt5640_clsd_spk_ratio_enum),
+ /* DMIC */
+ SOC_ENUM_EXT("DMIC Switch", rt5640_dmic_enum,
+ rt5640_dmic_get, rt5640_dmic_put),
+
+#ifdef RT5640_REG_RW
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Register Control",
+ .info = rt5640_regctl_info,
+ .get = rt5640_regctl_get,
+ .put = rt5640_regctl_put,
+ },
+#endif
+};
+
+/**
+ * set_dmic_clk - Set parameter of dmic.
+ *
+ * @w: DAPM widget.
+ * @kcontrol: The kcontrol of this widget.
+ * @event: Event id.
+ *
+ * Choose dmic clock between 1MHz and 3MHz.
+ * It is better for clock to approximate 3MHz.
+ */
+static int set_dmic_clk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+ int div[] = {2, 3, 4, 6, 12}, idx = -EINVAL, i, rate, red, bound, temp;
+
+ rate = rt5640->lrck[rt5640->aif_pu] << 8;
+ red = 3000000 * 12;
+ for (i = 0; i < ARRAY_SIZE(div); i++) {
+ bound = div[i] * 3000000;
+ if (rate > bound)
+ continue;
+ temp = bound - rate;
+ if (temp < red) {
+ red = temp;
+ idx = i;
+ }
+ }
+ if (idx < 0)
+ dev_err(codec->dev, "Failed to set DMIC clock\n");
+ else
+ snd_soc_update_bits(codec, RT5640_DMIC, RT5640_DMIC_CLK_MASK,
+ idx << RT5640_DMIC_CLK_SFT);
+ return idx;
+}
+
+static int check_sysclk1_source(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int val;
+
+ val = snd_soc_read(source->codec, RT5640_GLB_CLK);
+ val &= RT5640_SCLK_SRC_MASK;
+ if (val == RT5640_SCLK_SRC_PLL1 || val == RT5640_SCLK_SRC_PLL1T)
+ return 1;
+ else
+ return 0;
+}
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt5640_sto_adc_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5640_STO_ADC_MIXER,
+ RT5640_M_ADC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5640_STO_ADC_MIXER,
+ RT5640_M_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_sto_adc_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5640_STO_ADC_MIXER,
+ RT5640_M_ADC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5640_STO_ADC_MIXER,
+ RT5640_M_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_mono_adc_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5640_MONO_ADC_MIXER,
+ RT5640_M_MONO_ADC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5640_MONO_ADC_MIXER,
+ RT5640_M_MONO_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_mono_adc_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5640_MONO_ADC_MIXER,
+ RT5640_M_MONO_ADC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5640_MONO_ADC_MIXER,
+ RT5640_M_MONO_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("Stereo ADC Switch", RT5640_AD_DA_MIXER,
+ RT5640_M_ADCMIX_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INF1 Switch", RT5640_AD_DA_MIXER,
+ RT5640_M_IF1_DAC_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("Stereo ADC Switch", RT5640_AD_DA_MIXER,
+ RT5640_M_ADCMIX_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INF1 Switch", RT5640_AD_DA_MIXER,
+ RT5640_M_IF1_DAC_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_sto_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5640_STO_DAC_MIXER,
+ RT5640_M_DAC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5640_STO_DAC_MIXER,
+ RT5640_M_DAC_L2_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ANC Switch", RT5640_STO_DAC_MIXER,
+ RT5640_M_ANC_DAC_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_sto_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5640_STO_DAC_MIXER,
+ RT5640_M_DAC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5640_STO_DAC_MIXER,
+ RT5640_M_DAC_R2_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ANC Switch", RT5640_STO_DAC_MIXER,
+ RT5640_M_ANC_DAC_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_mono_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5640_MONO_DAC_MIXER,
+ RT5640_M_DAC_L1_MONO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5640_MONO_DAC_MIXER,
+ RT5640_M_DAC_L2_MONO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5640_MONO_DAC_MIXER,
+ RT5640_M_DAC_R2_MONO_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_mono_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5640_MONO_DAC_MIXER,
+ RT5640_M_DAC_R1_MONO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5640_MONO_DAC_MIXER,
+ RT5640_M_DAC_R2_MONO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5640_MONO_DAC_MIXER,
+ RT5640_M_DAC_L2_MONO_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_dig_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5640_DIG_MIXER,
+ RT5640_M_STO_L_DAC_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5640_DIG_MIXER,
+ RT5640_M_DAC_L2_DAC_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_dig_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5640_DIG_MIXER,
+ RT5640_M_STO_R_DAC_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5640_DIG_MIXER,
+ RT5640_M_DAC_R2_DAC_R_SFT, 1, 1),
+};
+
+/* Analog Input Mixer */
+static const struct snd_kcontrol_new rt5640_rec_l_mix[] = {
+ SOC_DAPM_SINGLE("HPOL Switch", RT5640_REC_L2_MIXER,
+ RT5640_M_HP_L_RM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INL Switch", RT5640_REC_L2_MIXER,
+ RT5640_M_IN_L_RM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5640_REC_L2_MIXER,
+ RT5640_M_BST4_RM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5640_REC_L2_MIXER,
+ RT5640_M_BST1_RM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUT MIXL Switch", RT5640_REC_L2_MIXER,
+ RT5640_M_OM_L_RM_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_rec_r_mix[] = {
+ SOC_DAPM_SINGLE("HPOR Switch", RT5640_REC_R2_MIXER,
+ RT5640_M_HP_R_RM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INR Switch", RT5640_REC_R2_MIXER,
+ RT5640_M_IN_R_RM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5640_REC_R2_MIXER,
+ RT5640_M_BST4_RM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5640_REC_R2_MIXER,
+ RT5640_M_BST1_RM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUT MIXR Switch", RT5640_REC_R2_MIXER,
+ RT5640_M_OM_R_RM_R_SFT, 1, 1),
+};
+
+/* Analog Output Mixer */
+static const struct snd_kcontrol_new rt5640_spk_l_mix[] = {
+ SOC_DAPM_SINGLE("REC MIXL Switch", RT5640_SPK_L_MIXER,
+ RT5640_M_RM_L_SM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INL Switch", RT5640_SPK_L_MIXER,
+ RT5640_M_IN_L_SM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5640_SPK_L_MIXER,
+ RT5640_M_DAC_L1_SM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5640_SPK_L_MIXER,
+ RT5640_M_DAC_L2_SM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUT MIXL Switch", RT5640_SPK_L_MIXER,
+ RT5640_M_OM_L_SM_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_spk_r_mix[] = {
+ SOC_DAPM_SINGLE("REC MIXR Switch", RT5640_SPK_R_MIXER,
+ RT5640_M_RM_R_SM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INR Switch", RT5640_SPK_R_MIXER,
+ RT5640_M_IN_R_SM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5640_SPK_R_MIXER,
+ RT5640_M_DAC_R1_SM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5640_SPK_R_MIXER,
+ RT5640_M_DAC_R2_SM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUT MIXR Switch", RT5640_SPK_R_MIXER,
+ RT5640_M_OM_R_SM_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_out_l_mix[] = {
+ SOC_DAPM_SINGLE("SPK MIXL Switch", RT5640_OUT_L3_MIXER,
+ RT5640_M_SM_L_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5640_OUT_L3_MIXER,
+ RT5640_M_BST1_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INL Switch", RT5640_OUT_L3_MIXER,
+ RT5640_M_IN_L_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("REC MIXL Switch", RT5640_OUT_L3_MIXER,
+ RT5640_M_RM_L_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5640_OUT_L3_MIXER,
+ RT5640_M_DAC_R2_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5640_OUT_L3_MIXER,
+ RT5640_M_DAC_L2_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5640_OUT_L3_MIXER,
+ RT5640_M_DAC_L1_OM_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_out_r_mix[] = {
+ SOC_DAPM_SINGLE("SPK MIXR Switch", RT5640_OUT_R3_MIXER,
+ RT5640_M_SM_L_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5640_OUT_R3_MIXER,
+ RT5640_M_BST4_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5640_OUT_R3_MIXER,
+ RT5640_M_BST1_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("INR Switch", RT5640_OUT_R3_MIXER,
+ RT5640_M_IN_R_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("REC MIXR Switch", RT5640_OUT_R3_MIXER,
+ RT5640_M_RM_R_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5640_OUT_R3_MIXER,
+ RT5640_M_DAC_L2_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5640_OUT_R3_MIXER,
+ RT5640_M_DAC_R2_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5640_OUT_R3_MIXER,
+ RT5640_M_DAC_R1_OM_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_spo_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5640_SPO_L_MIXER,
+ RT5640_M_DAC_R1_SPM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5640_SPO_L_MIXER,
+ RT5640_M_DAC_L1_SPM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("SPKVOL R Switch", RT5640_SPO_L_MIXER,
+ RT5640_M_SV_R_SPM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("SPKVOL L Switch", RT5640_SPO_L_MIXER,
+ RT5640_M_SV_L_SPM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5640_SPO_L_MIXER,
+ RT5640_M_BST1_SPM_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_spo_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5640_SPO_R_MIXER,
+ RT5640_M_DAC_R1_SPM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("SPKVOL R Switch", RT5640_SPO_R_MIXER,
+ RT5640_M_SV_R_SPM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5640_SPO_R_MIXER,
+ RT5640_M_BST1_SPM_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_hpo_mix[] = {
+ SOC_DAPM_SINGLE("DAC2 Switch", RT5640_HPO_MIXER,
+ RT5640_M_DAC2_HM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC1 Switch", RT5640_HPO_MIXER,
+ RT5640_M_DAC1_HM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("HPVOL Switch", RT5640_HPO_MIXER,
+ RT5640_M_HPVOL_HM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_lout_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5640_LOUT_MIXER,
+ RT5640_M_DAC_L1_LM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5640_LOUT_MIXER,
+ RT5640_M_DAC_R1_LM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUTVOL L Switch", RT5640_LOUT_MIXER,
+ RT5640_M_OV_L_LM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUTVOL R Switch", RT5640_LOUT_MIXER,
+ RT5640_M_OV_R_LM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5640_mono_mix[] = {
+ SOC_DAPM_SINGLE("DAC R2 Switch", RT5640_MONO_MIXER,
+ RT5640_M_DAC_R2_MM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L2 Switch", RT5640_MONO_MIXER,
+ RT5640_M_DAC_L2_MM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUTVOL R Switch", RT5640_MONO_MIXER,
+ RT5640_M_OV_R_MM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUTVOL L Switch", RT5640_MONO_MIXER,
+ RT5640_M_OV_L_MM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5640_MONO_MIXER,
+ RT5640_M_BST1_MM_SFT, 1, 1),
+};
+
+/* INL/R source */
+static const char *rt5640_inl_src[] = {"IN2P", "MonoP"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_inl_enum, RT5640_INL_INR_VOL,
+ RT5640_INL_SEL_SFT, rt5640_inl_src);
+
+static const struct snd_kcontrol_new rt5640_inl_mux =
+ SOC_DAPM_ENUM("INL source", rt5640_inl_enum);
+
+static const char *rt5640_inr_src[] = {"IN2N", "MonoN"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_inr_enum, RT5640_INL_INR_VOL,
+ RT5640_INR_SEL_SFT, rt5640_inr_src);
+
+static const struct snd_kcontrol_new rt5640_inr_mux =
+ SOC_DAPM_ENUM("INR source", rt5640_inr_enum);
+
+/* Stereo ADC source */
+static const char *rt5640_stereo_adc1_src[] = {"DIG MIX", "ADC"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_stereo_adc1_enum, RT5640_STO_ADC_MIXER,
+ RT5640_ADC_1_SRC_SFT, rt5640_stereo_adc1_src);
+
+static const struct snd_kcontrol_new rt5640_sto_adc_l1_mux =
+ SOC_DAPM_ENUM("Stereo ADC L1 source", rt5640_stereo_adc1_enum);
+
+static const struct snd_kcontrol_new rt5640_sto_adc_r1_mux =
+ SOC_DAPM_ENUM("Stereo ADC R1 source", rt5640_stereo_adc1_enum);
+
+static const char *rt5640_stereo_adc2_src[] = {"DMIC1", "DMIC2", "DIG MIX"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_stereo_adc2_enum, RT5640_STO_ADC_MIXER,
+ RT5640_ADC_2_SRC_SFT, rt5640_stereo_adc2_src);
+
+static const struct snd_kcontrol_new rt5640_sto_adc_l2_mux =
+ SOC_DAPM_ENUM("Stereo ADC L2 source", rt5640_stereo_adc2_enum);
+
+static const struct snd_kcontrol_new rt5640_sto_adc_r2_mux =
+ SOC_DAPM_ENUM("Stereo ADC R2 source", rt5640_stereo_adc2_enum);
+
+/* Mono ADC source */
+static const char *rt5640_mono_adc_l1_src[] = {"Mono DAC MIXL", "ADCL"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_mono_adc_l1_enum, RT5640_MONO_ADC_MIXER,
+ RT5640_MONO_ADC_L1_SRC_SFT, rt5640_mono_adc_l1_src);
+
+static const struct snd_kcontrol_new rt5640_mono_adc_l1_mux =
+ SOC_DAPM_ENUM("Mono ADC1 left source", rt5640_mono_adc_l1_enum);
+
+static const char *rt5640_mono_adc_l2_src[] = {
+ "DMIC L1", "DMIC L2", "Mono DAC MIXL"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_mono_adc_l2_enum, RT5640_MONO_ADC_MIXER,
+ RT5640_MONO_ADC_L2_SRC_SFT, rt5640_mono_adc_l2_src);
+
+static const struct snd_kcontrol_new rt5640_mono_adc_l2_mux =
+ SOC_DAPM_ENUM("Mono ADC2 left source", rt5640_mono_adc_l2_enum);
+
+static const char *rt5640_mono_adc_r1_src[] = {"Mono DAC MIXR", "ADCR"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_mono_adc_r1_enum, RT5640_MONO_ADC_MIXER,
+ RT5640_MONO_ADC_R1_SRC_SFT, rt5640_mono_adc_r1_src);
+
+static const struct snd_kcontrol_new rt5640_mono_adc_r1_mux =
+ SOC_DAPM_ENUM("Mono ADC1 right source", rt5640_mono_adc_r1_enum);
+
+static const char *rt5640_mono_adc_r2_src[] = {
+ "DMIC R1", "DMIC R2", "Mono DAC MIXR"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_mono_adc_r2_enum, RT5640_MONO_ADC_MIXER,
+ RT5640_MONO_ADC_R2_SRC_SFT, rt5640_mono_adc_r2_src);
+
+static const struct snd_kcontrol_new rt5640_mono_adc_r2_mux =
+ SOC_DAPM_ENUM("Mono ADC2 right source", rt5640_mono_adc_r2_enum);
+
+/* DAC2 channel source */
+static const char *rt5640_dac_l2_src[] = {"IF2", "IF3", "TxDC", "Base L/R"};
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_dac_l2_enum, RT5640_DSP_PATH2,
+ RT5640_DAC_L2_SEL_SFT, rt5640_dac_l2_src);
+
+static const struct snd_kcontrol_new rt5640_dac_l2_mux =
+ SOC_DAPM_ENUM("DAC2 left channel source", rt5640_dac_l2_enum);
+
+static const char *rt5640_dac_r2_src[] = {"IF2", "IF3", "TxDC"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_dac_r2_enum, RT5640_DSP_PATH2,
+ RT5640_DAC_R2_SEL_SFT, rt5640_dac_r2_src);
+
+static const struct snd_kcontrol_new rt5640_dac_r2_mux =
+ SOC_DAPM_ENUM("DAC2 right channel source", rt5640_dac_r2_enum);
+
+/* Interface 2 ADC channel source */
+static const char *rt5640_if2_adc_l_src[] = {"TxDP", "Mono ADC MIXL"};
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_if2_adc_l_enum, RT5640_DSP_PATH2,
+ RT5640_IF2_ADC_L_SEL_SFT, rt5640_if2_adc_l_src);
+
+static const struct snd_kcontrol_new rt5640_if2_adc_l_mux =
+ SOC_DAPM_ENUM("IF2 ADC left channel source", rt5640_if2_adc_l_enum);
+
+static const char *rt5640_if2_adc_r_src[] = {"TxDP", "Mono ADC MIXR"};
+
+static const SOC_ENUM_SINGLE_DECL(rt5640_if2_adc_r_enum, RT5640_DSP_PATH2,
+ RT5640_IF2_ADC_R_SEL_SFT, rt5640_if2_adc_r_src);
+
+static const struct snd_kcontrol_new rt5640_if2_adc_r_mux =
+ SOC_DAPM_ENUM("IF2 ADC right channel source", rt5640_if2_adc_r_enum);
+
+/* digital interface and iis interface map */
+static const char *rt5640_dai_iis_map[] = {"1:1|2:2|3:3", "1:1|2:3|3:2",
+ "1:3|2:1|3:2", "1:3|2:2|3:1", "1:2|2:3|3:1",
+ "1:2|2:1|3:3", "1:1|2:1|3:3", "1:2|2:2|3:3"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_dai_iis_map_enum, RT5640_I2S1_SDP,
+ RT5640_I2S_IF_SFT, rt5640_dai_iis_map);
+
+static const struct snd_kcontrol_new rt5640_dai_mux =
+ SOC_DAPM_ENUM("DAI select", rt5640_dai_iis_map_enum);
+
+/* SDI select */
+static const char *rt5640_sdi_sel[] = {"IF1", "IF2"};
+
+static const SOC_ENUM_SINGLE_DECL(
+ rt5640_sdi_sel_enum, RT5640_I2S2_SDP,
+ RT5640_I2S2_SDI_SFT, rt5640_sdi_sel);
+
+static const struct snd_kcontrol_new rt5640_sdi_mux =
+ SOC_DAPM_ENUM("SDI select", rt5640_sdi_sel_enum);
+
+static int spk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ static unsigned int spkl_out_enable;
+ static unsigned int spkr_out_enable;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ pr_info("spk_event --SND_SOC_DAPM_POST_PMU\n");
+ snd_soc_update_bits(codec, RT5640_PWR_DIG1, 0x0001, 0x0001);
+ rt5640_index_update_bits(codec, 0x1c, 0xf000, 0xf000);
+ /* rt5640_index_write(codec, 0x1c, 0xfd21); */
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ pr_info("spk_event --SND_SOC_DAPM_POST_PMD\n");
+ /* rt5640_index_write(codec, 0x1c, 0xfd00); */
+ rt5640_index_update_bits(codec, 0x1c, 0xf000, 0x0000);
+ snd_soc_update_bits(codec, RT5640_PWR_DIG1, 0x0001, 0x0000);
+ break;
+
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static int hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ static unsigned int hp_out_enable;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ pr_info("hp_event --SND_SOC_DAPM_POST_PMU\n");
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ pr_info("hp_event --SND_SOC_DAPM_POST_PMD\n");
+ break;
+
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("PLL1", RT5640_PWR_ANLG2,
+ RT5640_PWR_PLL_BIT, 0, NULL, 0),
+ /* Input Side */
+ /* micbias */
+ SND_SOC_DAPM_SUPPLY("LDO2", RT5640_PWR_ANLG1,
+ RT5640_PWR_LDO2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MICBIAS("micbias1", RT5640_PWR_ANLG2,
+ RT5640_PWR_MB1_BIT, 0),
+ SND_SOC_DAPM_MICBIAS("micbias2", RT5640_PWR_ANLG2,
+ RT5640_PWR_MB2_BIT, 0),
+ /* Input Lines */
+
+ SND_SOC_DAPM_INPUT("MIC1"),
+ SND_SOC_DAPM_INPUT("MIC2"),
+ SND_SOC_DAPM_INPUT("DMIC1"),
+ SND_SOC_DAPM_INPUT("DMIC2"),
+ SND_SOC_DAPM_INPUT("IN1P"),
+ SND_SOC_DAPM_INPUT("IN1N"),
+ SND_SOC_DAPM_INPUT("IN2P"),
+ SND_SOC_DAPM_INPUT("IN2N"),
+ SND_SOC_DAPM_INPUT("DMIC L1"),
+ SND_SOC_DAPM_INPUT("DMIC R1"),
+ SND_SOC_DAPM_INPUT("DMIC L2"),
+ SND_SOC_DAPM_INPUT("DMIC R2"),
+ SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
+ set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
+ /* Boost */
+ SND_SOC_DAPM_PGA("BST1", RT5640_PWR_ANLG2,
+ RT5640_PWR_BST1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("BST2", RT5640_PWR_ANLG2,
+ RT5640_PWR_BST4_BIT, 0, NULL, 0),
+ /* Input Volume */
+ SND_SOC_DAPM_PGA("INL VOL", RT5640_PWR_VOL,
+ RT5640_PWR_IN_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("INR VOL", RT5640_PWR_VOL,
+ RT5640_PWR_IN_R_BIT, 0, NULL, 0),
+ /* IN Mux */
+ SND_SOC_DAPM_MUX("INL Mux", SND_SOC_NOPM, 0, 0, &rt5640_inl_mux),
+ SND_SOC_DAPM_MUX("INR Mux", SND_SOC_NOPM, 0, 0, &rt5640_inr_mux),
+ /* REC Mixer */
+ SND_SOC_DAPM_MIXER("RECMIXL", RT5640_PWR_MIXER, RT5640_PWR_RM_L_BIT, 0,
+ rt5640_rec_l_mix, ARRAY_SIZE(rt5640_rec_l_mix)),
+ SND_SOC_DAPM_MIXER("RECMIXR", RT5640_PWR_MIXER, RT5640_PWR_RM_R_BIT, 0,
+ rt5640_rec_r_mix, ARRAY_SIZE(rt5640_rec_r_mix)),
+ /* ADCs */
+ SND_SOC_DAPM_ADC("ADC L", NULL, RT5640_PWR_DIG1,
+ RT5640_PWR_ADC_L_BIT, 0),
+ SND_SOC_DAPM_ADC("ADC R", NULL, RT5640_PWR_DIG1,
+ RT5640_PWR_ADC_R_BIT, 0),
+ /* ADC Mux */
+ SND_SOC_DAPM_MUX("Stereo ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_sto_adc_l2_mux),
+ SND_SOC_DAPM_MUX("Stereo ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_sto_adc_r2_mux),
+ SND_SOC_DAPM_MUX("Stereo ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_sto_adc_l1_mux),
+ SND_SOC_DAPM_MUX("Stereo ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_sto_adc_r1_mux),
+ SND_SOC_DAPM_MUX("Mono ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_mono_adc_l2_mux),
+ SND_SOC_DAPM_MUX("Mono ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_mono_adc_l1_mux),
+ SND_SOC_DAPM_MUX("Mono ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_mono_adc_r1_mux),
+ SND_SOC_DAPM_MUX("Mono ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_mono_adc_r2_mux),
+ /* ADC Mixer */
+ SND_SOC_DAPM_SUPPLY("stereo filter", RT5640_PWR_DIG2,
+ RT5640_PWR_ADC_SF_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Stereo ADC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5640_sto_adc_l_mix, ARRAY_SIZE(rt5640_sto_adc_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo ADC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5640_sto_adc_r_mix, ARRAY_SIZE(rt5640_sto_adc_r_mix)),
+ SND_SOC_DAPM_SUPPLY("mono left filter", RT5640_PWR_DIG2,
+ RT5640_PWR_ADC_MF_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Mono ADC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5640_mono_adc_l_mix, ARRAY_SIZE(rt5640_mono_adc_l_mix)),
+ SND_SOC_DAPM_SUPPLY("mono right filter", RT5640_PWR_DIG2,
+ RT5640_PWR_ADC_MF_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Mono ADC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5640_mono_adc_r_mix, ARRAY_SIZE(rt5640_mono_adc_r_mix)),
+
+ /* IF2 Mux */
+ SND_SOC_DAPM_MUX("IF2 ADC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_if2_adc_l_mux),
+ SND_SOC_DAPM_MUX("IF2 ADC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_if2_adc_r_mux),
+
+ /* Digital Interface */
+ SND_SOC_DAPM_SUPPLY("I2S1", RT5640_PWR_DIG1,
+ RT5640_PWR_I2S1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 ADC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 ADC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S2", RT5640_PWR_DIG1,
+ RT5640_PWR_I2S2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2 ADC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2 ADC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S3", RT5640_PWR_DIG1,
+ RT5640_PWR_I2S3_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 ADC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF3 ADC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Digital Interface Select */
+ SND_SOC_DAPM_MUX("DAI1 RX Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("DAI1 TX Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("DAI1 IF1 Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("DAI1 IF2 Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("SDI1 TX Mux", SND_SOC_NOPM, 0, 0, &rt5640_sdi_mux),
+
+ SND_SOC_DAPM_MUX("DAI2 RX Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("DAI2 TX Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("DAI2 IF1 Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("DAI2 IF2 Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("SDI2 TX Mux", SND_SOC_NOPM, 0, 0, &rt5640_sdi_mux),
+
+ SND_SOC_DAPM_MUX("DAI3 RX Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+ SND_SOC_DAPM_MUX("DAI3 TX Mux", SND_SOC_NOPM, 0, 0, &rt5640_dai_mux),
+
+ /* Audio Interface */
+ SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF2RX", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2TX", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF3RX", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF3TX", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+ /* Audio DSP */
+ SND_SOC_DAPM_PGA("Audio DSP", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* ANC */
+ SND_SOC_DAPM_PGA("ANC", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Output Side */
+ /* DAC mixer before sound effect */
+ SND_SOC_DAPM_MIXER("DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5640_dac_l_mix, ARRAY_SIZE(rt5640_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5640_dac_r_mix, ARRAY_SIZE(rt5640_dac_r_mix)),
+
+ /* DAC2 channel Mux */
+ SND_SOC_DAPM_MUX("DAC L2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_dac_l2_mux),
+ SND_SOC_DAPM_MUX("DAC R2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5640_dac_r2_mux),
+
+ /* DAC Mixer */
+ SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5640_sto_dac_l_mix, ARRAY_SIZE(rt5640_sto_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5640_sto_dac_r_mix, ARRAY_SIZE(rt5640_sto_dac_r_mix)),
+ SND_SOC_DAPM_MIXER("Mono DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5640_mono_dac_l_mix, ARRAY_SIZE(rt5640_mono_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("Mono DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5640_mono_dac_r_mix, ARRAY_SIZE(rt5640_mono_dac_r_mix)),
+ SND_SOC_DAPM_MIXER("DIG MIXL", SND_SOC_NOPM, 0, 0,
+ rt5640_dig_l_mix, ARRAY_SIZE(rt5640_dig_l_mix)),
+ SND_SOC_DAPM_MIXER("DIG MIXR", SND_SOC_NOPM, 0, 0,
+ rt5640_dig_r_mix, ARRAY_SIZE(rt5640_dig_r_mix)),
+ /* DACs */
+ SND_SOC_DAPM_DAC("DAC L1", NULL, RT5640_PWR_DIG1,
+ RT5640_PWR_DAC_L1_BIT, 0),
+ SND_SOC_DAPM_DAC("DAC L2", NULL, RT5640_PWR_DIG1,
+ RT5640_PWR_DAC_L2_BIT, 0),
+ SND_SOC_DAPM_DAC("DAC R1", NULL, RT5640_PWR_DIG1,
+ RT5640_PWR_DAC_R1_BIT, 0),
+ SND_SOC_DAPM_DAC("DAC R2", NULL, RT5640_PWR_DIG1,
+ RT5640_PWR_DAC_R2_BIT, 0),
+ /* SPK/OUT Mixer */
+ SND_SOC_DAPM_MIXER("SPK MIXL", RT5640_PWR_MIXER, RT5640_PWR_SM_L_BIT,
+ 0, rt5640_spk_l_mix, ARRAY_SIZE(rt5640_spk_l_mix)),
+ SND_SOC_DAPM_MIXER("SPK MIXR", RT5640_PWR_MIXER, RT5640_PWR_SM_R_BIT,
+ 0, rt5640_spk_r_mix, ARRAY_SIZE(rt5640_spk_r_mix)),
+ SND_SOC_DAPM_MIXER("OUT MIXL", RT5640_PWR_MIXER, RT5640_PWR_OM_L_BIT,
+ 0, rt5640_out_l_mix, ARRAY_SIZE(rt5640_out_l_mix)),
+ SND_SOC_DAPM_MIXER("OUT MIXR", RT5640_PWR_MIXER, RT5640_PWR_OM_R_BIT,
+ 0, rt5640_out_r_mix, ARRAY_SIZE(rt5640_out_r_mix)),
+ /* Ouput Volume */
+ SND_SOC_DAPM_PGA("SPKVOL L", RT5640_PWR_VOL,
+ RT5640_PWR_SV_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("SPKVOL R", RT5640_PWR_VOL,
+ RT5640_PWR_SV_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("OUTVOL L", RT5640_PWR_VOL,
+ RT5640_PWR_OV_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("OUTVOL R", RT5640_PWR_VOL,
+ RT5640_PWR_OV_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HPOVOL L", RT5640_PWR_VOL,
+ RT5640_PWR_HV_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HPOVOL R", RT5640_PWR_VOL,
+ RT5640_PWR_HV_R_BIT, 0, NULL, 0),
+ /* SPO/HPO/LOUT/Mono Mixer */
+ SND_SOC_DAPM_MIXER("SPOL MIX", SND_SOC_NOPM, 0,
+ 0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)),
+ SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0,
+ 0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)),
+
+ SND_SOC_DAPM_MIXER("HPOL MIX", SND_SOC_NOPM, 0, 0,
+ rt5640_hpo_mix, ARRAY_SIZE(rt5640_hpo_mix)),
+ SND_SOC_DAPM_MIXER("HPOR MIX", SND_SOC_NOPM, 0, 0,
+ rt5640_hpo_mix, ARRAY_SIZE(rt5640_hpo_mix)),
+ SND_SOC_DAPM_MIXER("LOUT MIX", RT5640_PWR_ANLG1, RT5640_PWR_LM_BIT, 0,
+ rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)),
+ SND_SOC_DAPM_MIXER("Mono MIX", RT5640_PWR_ANLG1, RT5640_PWR_MM_BIT, 0,
+ rt5640_mono_mix, ARRAY_SIZE(rt5640_mono_mix)),
+
+ SND_SOC_DAPM_SUPPLY("Improve mono amp drv", RT5640_PWR_ANLG1,
+ RT5640_PWR_MA_BIT, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("Improve HP amp drv", RT5640_PWR_ANLG1,
+ SND_SOC_NOPM, 0, hp_event, SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMU),
+
+ SND_SOC_DAPM_PGA("HP L amp", RT5640_PWR_ANLG1,
+ RT5640_PWR_HP_L_BIT, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("HP R amp", RT5640_PWR_ANLG1,
+ RT5640_PWR_HP_R_BIT, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("Improve SPK amp drv", RT5640_PWR_DIG1,
+ SND_SOC_NOPM, 0, spk_event,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+
+ /* Output Lines */
+ SND_SOC_DAPM_OUTPUT("SPOLP"),
+ SND_SOC_DAPM_OUTPUT("SPOLN"),
+ SND_SOC_DAPM_OUTPUT("SPORP"),
+ SND_SOC_DAPM_OUTPUT("SPORN"),
+ SND_SOC_DAPM_OUTPUT("HPOL"),
+ SND_SOC_DAPM_OUTPUT("HPOR"),
+ SND_SOC_DAPM_OUTPUT("LOUTL"),
+ SND_SOC_DAPM_OUTPUT("LOUTR"),
+ SND_SOC_DAPM_OUTPUT("MonoP"),
+ SND_SOC_DAPM_OUTPUT("MonoN"),
+};
+
+static const struct snd_soc_dapm_route rt5640_dapm_routes[] = {
+ {"IN1P", NULL, "LDO2"},
+ {"IN2P", NULL, "LDO2"},
+
+ {"IN1P", NULL, "MIC1"},
+ {"IN1N", NULL, "MIC1"},
+ {"IN2P", NULL, "MIC2"},
+ {"IN2N", NULL, "MIC2"},
+
+ {"DMIC L1", NULL, "DMIC1"},
+ {"DMIC R1", NULL, "DMIC1"},
+ {"DMIC L2", NULL, "DMIC2"},
+ {"DMIC R2", NULL, "DMIC2"},
+
+ {"BST1", NULL, "IN1P"},
+ {"BST1", NULL, "IN1N"},
+ {"BST2", NULL, "IN2P"},
+ {"BST2", NULL, "IN2N"},
+
+ {"INL VOL", NULL, "IN2P"},
+ {"INR VOL", NULL, "IN2N"},
+
+ {"RECMIXL", "HPOL Switch", "HPOL"},
+ {"RECMIXL", "INL Switch", "INL VOL"},
+ {"RECMIXL", "BST2 Switch", "BST2"},
+ {"RECMIXL", "BST1 Switch", "BST1"},
+ {"RECMIXL", "OUT MIXL Switch", "OUT MIXL"},
+
+ {"RECMIXR", "HPOR Switch", "HPOR"},
+ {"RECMIXR", "INR Switch", "INR VOL"},
+ {"RECMIXR", "BST2 Switch", "BST2"},
+ {"RECMIXR", "BST1 Switch", "BST1"},
+ {"RECMIXR", "OUT MIXR Switch", "OUT MIXR"},
+
+ {"ADC L", NULL, "RECMIXL"},
+ {"ADC R", NULL, "RECMIXR"},
+
+ {"DMIC L1", NULL, "DMIC CLK"},
+ {"DMIC L2", NULL, "DMIC CLK"},
+
+ {"Stereo ADC L2 Mux", "DMIC1", "DMIC L1"},
+ {"Stereo ADC L2 Mux", "DMIC2", "DMIC L2"},
+ {"Stereo ADC L2 Mux", "DIG MIX", "DIG MIXL"},
+ {"Stereo ADC L1 Mux", "ADC", "ADC L"},
+ {"Stereo ADC L1 Mux", "DIG MIX", "DIG MIXL"},
+
+ {"Stereo ADC R1 Mux", "ADC", "ADC R"},
+ {"Stereo ADC R1 Mux", "DIG MIX", "DIG MIXR"},
+ {"Stereo ADC R2 Mux", "DMIC1", "DMIC R1"},
+ {"Stereo ADC R2 Mux", "DMIC2", "DMIC R2"},
+ {"Stereo ADC R2 Mux", "DIG MIX", "DIG MIXR"},
+
+ {"Mono ADC L2 Mux", "DMIC L1", "DMIC L1"},
+ {"Mono ADC L2 Mux", "DMIC L2", "DMIC L2"},
+ {"Mono ADC L2 Mux", "Mono DAC MIXL", "Mono DAC MIXL"},
+ {"Mono ADC L1 Mux", "Mono DAC MIXL", "Mono DAC MIXL"},
+ {"Mono ADC L1 Mux", "ADCL", "ADC L"},
+
+ {"Mono ADC R1 Mux", "Mono DAC MIXR", "Mono DAC MIXR"},
+ {"Mono ADC R1 Mux", "ADCR", "ADC R"},
+ {"Mono ADC R2 Mux", "DMIC R1", "DMIC R1"},
+ {"Mono ADC R2 Mux", "DMIC R2", "DMIC R2"},
+ {"Mono ADC R2 Mux", "Mono DAC MIXR", "Mono DAC MIXR"},
+
+ {"Stereo ADC MIXL", "ADC1 Switch", "Stereo ADC L1 Mux"},
+ {"Stereo ADC MIXL", "ADC2 Switch", "Stereo ADC L2 Mux"},
+ {"Stereo ADC MIXL", NULL, "stereo filter"},
+ {"stereo filter", NULL, "PLL1", check_sysclk1_source},
+
+ {"Stereo ADC MIXR", "ADC1 Switch", "Stereo ADC R1 Mux"},
+ {"Stereo ADC MIXR", "ADC2 Switch", "Stereo ADC R2 Mux"},
+ {"Stereo ADC MIXR", NULL, "stereo filter"},
+ {"stereo filter", NULL, "PLL1", check_sysclk1_source},
+
+ {"Mono ADC MIXL", "ADC1 Switch", "Mono ADC L1 Mux"},
+ {"Mono ADC MIXL", "ADC2 Switch", "Mono ADC L2 Mux"},
+ {"Mono ADC MIXL", NULL, "mono left filter"},
+ {"mono left filter", NULL, "PLL1", check_sysclk1_source},
+
+ {"Mono ADC MIXR", "ADC1 Switch", "Mono ADC R1 Mux"},
+ {"Mono ADC MIXR", "ADC2 Switch", "Mono ADC R2 Mux"},
+ {"Mono ADC MIXR", NULL, "mono right filter"},
+ {"mono right filter", NULL, "PLL1", check_sysclk1_source},
+
+ {"IF2 ADC L Mux", "Mono ADC MIXL", "Mono ADC MIXL"},
+ {"IF2 ADC R Mux", "Mono ADC MIXR", "Mono ADC MIXR"},
+
+ {"IF2 ADC L", NULL, "IF2 ADC L Mux"},
+ {"IF2 ADC R", NULL, "IF2 ADC R Mux"},
+ {"IF3 ADC L", NULL, "Mono ADC MIXL"},
+ {"IF3 ADC R", NULL, "Mono ADC MIXR"},
+ {"IF1 ADC L", NULL, "Stereo ADC MIXL"},
+ {"IF1 ADC R", NULL, "Stereo ADC MIXR"},
+
+ {"IF1 ADC", NULL, "I2S1"},
+ {"IF1 ADC", NULL, "IF1 ADC L"},
+ {"IF1 ADC", NULL, "IF1 ADC R"},
+ {"IF2 ADC", NULL, "I2S2"},
+ {"IF2 ADC", NULL, "IF2 ADC L"},
+ {"IF2 ADC", NULL, "IF2 ADC R"},
+ {"IF3 ADC", NULL, "I2S3"},
+ {"IF3 ADC", NULL, "IF3 ADC L"},
+ {"IF3 ADC", NULL, "IF3 ADC R"},
+
+ {"DAI1 TX Mux", "1:1|2:2|3:3", "IF1 ADC"},
+ {"DAI1 TX Mux", "1:1|2:3|3:2", "IF1 ADC"},
+ {"DAI1 TX Mux", "1:3|2:1|3:2", "IF2 ADC"},
+ {"DAI1 TX Mux", "1:2|2:1|3:3", "IF2 ADC"},
+ {"DAI1 TX Mux", "1:3|2:2|3:1", "IF3 ADC"},
+ {"DAI1 TX Mux", "1:2|2:3|3:1", "IF3 ADC"},
+ {"DAI1 IF1 Mux", "1:1|2:1|3:3", "IF1 ADC"},
+ {"DAI1 IF2 Mux", "1:1|2:1|3:3", "IF2 ADC"},
+ {"SDI1 TX Mux", "IF1", "DAI1 IF1 Mux"},
+ {"SDI1 TX Mux", "IF2", "DAI1 IF2 Mux"},
+
+ {"DAI2 TX Mux", "1:2|2:3|3:1", "IF1 ADC"},
+ {"DAI2 TX Mux", "1:2|2:1|3:3", "IF1 ADC"},
+ {"DAI2 TX Mux", "1:1|2:2|3:3", "IF2 ADC"},
+ {"DAI2 TX Mux", "1:3|2:2|3:1", "IF2 ADC"},
+ {"DAI2 TX Mux", "1:1|2:3|3:2", "IF3 ADC"},
+ {"DAI2 TX Mux", "1:3|2:1|3:2", "IF3 ADC"},
+ {"DAI2 IF1 Mux", "1:2|2:2|3:3", "IF1 ADC"},
+ {"DAI2 IF2 Mux", "1:2|2:2|3:3", "IF2 ADC"},
+ {"SDI2 TX Mux", "IF1", "DAI2 IF1 Mux"},
+ {"SDI2 TX Mux", "IF2", "DAI2 IF2 Mux"},
+
+ {"DAI3 TX Mux", "1:3|2:1|3:2", "IF1 ADC"},
+ {"DAI3 TX Mux", "1:3|2:2|3:1", "IF1 ADC"},
+ {"DAI3 TX Mux", "1:1|2:3|3:2", "IF2 ADC"},
+ {"DAI3 TX Mux", "1:2|2:3|3:1", "IF2 ADC"},
+ {"DAI3 TX Mux", "1:1|2:2|3:3", "IF3 ADC"},
+ {"DAI3 TX Mux", "1:2|2:1|3:3", "IF3 ADC"},
+ {"DAI3 TX Mux", "1:1|2:1|3:3", "IF3 ADC"},
+ {"DAI3 TX Mux", "1:2|2:2|3:3", "IF3 ADC"},
+
+ {"AIF1TX", NULL, "DAI1 TX Mux"},
+ {"AIF1TX", NULL, "SDI1 TX Mux"},
+ {"AIF2TX", NULL, "DAI2 TX Mux"},
+ {"AIF2TX", NULL, "SDI2 TX Mux"},
+ {"AIF3TX", NULL, "DAI3 TX Mux"},
+
+ {"DAI1 RX Mux", "1:1|2:2|3:3", "AIF1RX"},
+ {"DAI1 RX Mux", "1:1|2:3|3:2", "AIF1RX"},
+ {"DAI1 RX Mux", "1:1|2:1|3:3", "AIF1RX"},
+ {"DAI1 RX Mux", "1:2|2:3|3:1", "AIF2RX"},
+ {"DAI1 RX Mux", "1:2|2:1|3:3", "AIF2RX"},
+ {"DAI1 RX Mux", "1:2|2:2|3:3", "AIF2RX"},
+ {"DAI1 RX Mux", "1:3|2:1|3:2", "AIF3RX"},
+ {"DAI1 RX Mux", "1:3|2:2|3:1", "AIF3RX"},
+
+ {"DAI2 RX Mux", "1:3|2:1|3:2", "AIF1RX"},
+ {"DAI2 RX Mux", "1:2|2:1|3:3", "AIF1RX"},
+ {"DAI2 RX Mux", "1:1|2:1|3:3", "AIF1RX"},
+ {"DAI2 RX Mux", "1:1|2:2|3:3", "AIF2RX"},
+ {"DAI2 RX Mux", "1:3|2:2|3:1", "AIF2RX"},
+ {"DAI2 RX Mux", "1:2|2:2|3:3", "AIF2RX"},
+ {"DAI2 RX Mux", "1:1|2:3|3:2", "AIF3RX"},
+ {"DAI2 RX Mux", "1:2|2:3|3:1", "AIF3RX"},
+
+ {"DAI3 RX Mux", "1:3|2:2|3:1", "AIF1RX"},
+ {"DAI3 RX Mux", "1:2|2:3|3:1", "AIF1RX"},
+ {"DAI3 RX Mux", "1:1|2:3|3:2", "AIF2RX"},
+ {"DAI3 RX Mux", "1:3|2:1|3:2", "AIF2RX"},
+ {"DAI3 RX Mux", "1:1|2:2|3:3", "AIF3RX"},
+ {"DAI3 RX Mux", "1:2|2:1|3:3", "AIF3RX"},
+ {"DAI3 RX Mux", "1:1|2:1|3:3", "AIF3RX"},
+ {"DAI3 RX Mux", "1:2|2:2|3:3", "AIF3RX"},
+
+ {"IF1 DAC", NULL, "I2S1"},
+ {"IF1 DAC", NULL, "DAI1 RX Mux"},
+ {"IF2 DAC", NULL, "I2S2"},
+ {"IF2 DAC", NULL, "DAI2 RX Mux"},
+ {"IF3 DAC", NULL, "I2S3"},
+ {"IF3 DAC", NULL, "DAI3 RX Mux"},
+
+ {"IF1 DAC L", NULL, "IF1 DAC"},
+ {"IF1 DAC R", NULL, "IF1 DAC"},
+ {"IF2 DAC L", NULL, "IF2 DAC"},
+ {"IF2 DAC R", NULL, "IF2 DAC"},
+ {"IF3 DAC L", NULL, "IF3 DAC"},
+ {"IF3 DAC R", NULL, "IF3 DAC"},
+
+ {"DAC MIXL", "Stereo ADC Switch", "Stereo ADC MIXL"},
+ {"DAC MIXL", "INF1 Switch", "IF1 DAC L"},
+ {"DAC MIXR", "Stereo ADC Switch", "Stereo ADC MIXR"},
+ {"DAC MIXR", "INF1 Switch", "IF1 DAC R"},
+
+ {"ANC", NULL, "Stereo ADC MIXL"},
+ {"ANC", NULL, "Stereo ADC MIXR"},
+
+ {"Audio DSP", NULL, "DAC MIXL"},
+ {"Audio DSP", NULL, "DAC MIXR"},
+
+ {"DAC L2 Mux", "IF2", "IF2 DAC L"},
+ {"DAC L2 Mux", "IF3", "IF3 DAC L"},
+ {"DAC L2 Mux", "Base L/R", "Audio DSP"},
+
+ {"DAC R2 Mux", "IF2", "IF2 DAC R"},
+ {"DAC R2 Mux", "IF3", "IF3 DAC R"},
+
+ {"Stereo DAC MIXL", "DAC L1 Switch", "DAC MIXL"},
+ {"Stereo DAC MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+ {"Stereo DAC MIXL", "ANC Switch", "ANC"},
+ {"Stereo DAC MIXR", "DAC R1 Switch", "DAC MIXR"},
+ {"Stereo DAC MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+ {"Stereo DAC MIXR", "ANC Switch", "ANC"},
+
+ {"Mono DAC MIXL", "DAC L1 Switch", "DAC MIXL"},
+ {"Mono DAC MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+ {"Mono DAC MIXL", "DAC R2 Switch", "DAC R2 Mux"},
+ {"Mono DAC MIXR", "DAC R1 Switch", "DAC MIXR"},
+ {"Mono DAC MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+ {"Mono DAC MIXR", "DAC L2 Switch", "DAC L2 Mux"},
+
+ {"DIG MIXL", "DAC L1 Switch", "DAC MIXL"},
+ {"DIG MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+ {"DIG MIXR", "DAC R1 Switch", "DAC MIXR"},
+ {"DIG MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+
+ {"DAC L1", NULL, "Stereo DAC MIXL"},
+ {"DAC L1", NULL, "PLL1", check_sysclk1_source},
+ {"DAC R1", NULL, "Stereo DAC MIXR"},
+ {"DAC R1", NULL, "PLL1", check_sysclk1_source},
+ {"DAC L2", NULL, "Mono DAC MIXL"},
+ {"DAC L2", NULL, "PLL1", check_sysclk1_source},
+ {"DAC R2", NULL, "Mono DAC MIXR"},
+ {"DAC R2", NULL, "PLL1", check_sysclk1_source},
+
+ {"SPK MIXL", "REC MIXL Switch", "RECMIXL"},
+ {"SPK MIXL", "INL Switch", "INL VOL"},
+ {"SPK MIXL", "DAC L1 Switch", "DAC L1"},
+ {"SPK MIXL", "DAC L2 Switch", "DAC L2"},
+ {"SPK MIXL", "OUT MIXL Switch", "OUT MIXL"},
+ {"SPK MIXR", "REC MIXR Switch", "RECMIXR"},
+ {"SPK MIXR", "INR Switch", "INR VOL"},
+ {"SPK MIXR", "DAC R1 Switch", "DAC R1"},
+ {"SPK MIXR", "DAC R2 Switch", "DAC R2"},
+ {"SPK MIXR", "OUT MIXR Switch", "OUT MIXR"},
+
+ {"OUT MIXL", "SPK MIXL Switch", "SPK MIXL"},
+ {"OUT MIXL", "BST1 Switch", "BST1"},
+ {"OUT MIXL", "INL Switch", "INL VOL"},
+ {"OUT MIXL", "REC MIXL Switch", "RECMIXL"},
+ {"OUT MIXL", "DAC R2 Switch", "DAC R2"},
+ {"OUT MIXL", "DAC L2 Switch", "DAC L2"},
+ {"OUT MIXL", "DAC L1 Switch", "DAC L1"},
+
+ {"OUT MIXR", "SPK MIXR Switch", "SPK MIXR"},
+ {"OUT MIXR", "BST2 Switch", "BST2"},
+ {"OUT MIXR", "BST1 Switch", "BST1"},
+ {"OUT MIXR", "INR Switch", "INR VOL"},
+ {"OUT MIXR", "REC MIXR Switch", "RECMIXR"},
+ {"OUT MIXR", "DAC L2 Switch", "DAC L2"},
+ {"OUT MIXR", "DAC R2 Switch", "DAC R2"},
+ {"OUT MIXR", "DAC R1 Switch", "DAC R1"},
+
+ {"SPKVOL L", NULL, "SPK MIXL"},
+ {"SPKVOL R", NULL, "SPK MIXR"},
+ {"HPOVOL L", NULL, "OUT MIXL"},
+ {"HPOVOL R", NULL, "OUT MIXR"},
+ {"OUTVOL L", NULL, "OUT MIXL"},
+ {"OUTVOL R", NULL, "OUT MIXR"},
+
+ {"SPOL MIX", "DAC R1 Switch", "DAC R1"},
+ {"SPOL MIX", "DAC L1 Switch", "DAC L1"},
+ {"SPOL MIX", "SPKVOL R Switch", "SPKVOL R"},
+ {"SPOL MIX", "SPKVOL L Switch", "SPKVOL L"},
+ {"SPOL MIX", "BST1 Switch", "BST1"},
+ {"SPOR MIX", "DAC R1 Switch", "DAC R1"},
+ {"SPOR MIX", "SPKVOL R Switch", "SPKVOL R"},
+ {"SPOR MIX", "BST1 Switch", "BST1"},
+
+ {"HPOL MIX", "DAC2 Switch", "DAC L2"},
+ {"HPOL MIX", "DAC1 Switch", "DAC L1"},
+ {"HPOL MIX", "HPVOL Switch", "HPOVOL L"},
+ {"HPOR MIX", "DAC2 Switch", "DAC R2"},
+ {"HPOR MIX", "DAC1 Switch", "DAC R1"},
+ {"HPOR MIX", "HPVOL Switch", "HPOVOL R"},
+
+ {"LOUT MIX", "DAC L1 Switch", "DAC L1"},
+ {"LOUT MIX", "DAC R1 Switch", "DAC R1"},
+ {"LOUT MIX", "OUTVOL L Switch", "OUTVOL L"},
+ {"LOUT MIX", "OUTVOL R Switch", "OUTVOL R"},
+
+ {"Mono MIX", "DAC R2 Switch", "DAC R2"},
+ {"Mono MIX", "DAC L2 Switch", "DAC L2"},
+ {"Mono MIX", "OUTVOL R Switch", "OUTVOL R"},
+ {"Mono MIX", "OUTVOL L Switch", "OUTVOL L"},
+ {"Mono MIX", "BST1 Switch", "BST1"},
+
+ {"HP L amp", NULL, "HPOL MIX"},
+ {"HP R amp", NULL, "HPOR MIX"},
+
+/* {"HP L amp", NULL, "Improve HP amp drv"},
+ {"HP R amp", NULL, "Improve HP amp drv"}, */
+
+ {"SPOLP", NULL, "SPOL MIX"},
+ {"SPOLN", NULL, "SPOL MIX"},
+ {"SPORP", NULL, "SPOR MIX"},
+ {"SPORN", NULL, "SPOR MIX"},
+
+ {"SPOLP", NULL, "Improve SPK amp drv"},
+ {"SPOLN", NULL, "Improve SPK amp drv"},
+ {"SPORP", NULL, "Improve SPK amp drv"},
+ {"SPORN", NULL, "Improve SPK amp drv"},
+
+ {"HPOL", NULL, "Improve HP amp drv"},
+ {"HPOR", NULL, "Improve HP amp drv"},
+
+ {"HPOL", NULL, "HP L amp"},
+ {"HPOR", NULL, "HP R amp"},
+ {"LOUTL", NULL, "LOUT MIX"},
+ {"LOUTR", NULL, "LOUT MIX"},
+ {"MonoP", NULL, "Mono MIX"},
+ {"MonoN", NULL, "Mono MIX"},
+ {"MonoP", NULL, "Improve mono amp drv"},
+};
+
+static int get_sdp_info(struct snd_soc_codec *codec, int dai_id)
+{
+ int ret = 0, val = snd_soc_read(codec, RT5640_I2S1_SDP);
+
+ if (codec == NULL)
+ return -EINVAL;
+
+ val = (val & RT5640_I2S_IF_MASK) >> RT5640_I2S_IF_SFT;
+ switch (dai_id) {
+ case RT5640_AIF1:
+ if (val == RT5640_IF_123 || val == RT5640_IF_132 ||
+ val == RT5640_IF_113)
+ ret |= RT5640_U_IF1;
+ if (val == RT5640_IF_312 || val == RT5640_IF_213 ||
+ val == RT5640_IF_113)
+ ret |= RT5640_U_IF2;
+ if (val == RT5640_IF_321 || val == RT5640_IF_231)
+ ret |= RT5640_U_IF3;
+ break;
+
+ case RT5640_AIF2:
+ if (val == RT5640_IF_231 || val == RT5640_IF_213 ||
+ val == RT5640_IF_223)
+ ret |= RT5640_U_IF1;
+ if (val == RT5640_IF_123 || val == RT5640_IF_321 ||
+ val == RT5640_IF_223)
+ ret |= RT5640_U_IF2;
+ if (val == RT5640_IF_132 || val == RT5640_IF_312)
+ ret |= RT5640_U_IF3;
+ break;
+
+#if (CONFIG_SND_SOC_RT5643_MODULE | CONFIG_SND_SOC_RT5643 | \
+ CONFIG_SND_SOC_RT5646_MODULE | CONFIG_SND_SOC_RT5646)
+
+ case RT5640_AIF3:
+ if (val == RT5640_IF_312 || val == RT5640_IF_321)
+ ret |= RT5640_U_IF1;
+ if (val == RT5640_IF_132 || val == RT5640_IF_231)
+ ret |= RT5640_U_IF2;
+ if (val == RT5640_IF_123 || val == RT5640_IF_213 ||
+ val == RT5640_IF_113 || val == RT5640_IF_223)
+ ret |= RT5640_U_IF3;
+ break;
+#endif
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int get_clk_info(int sclk, int rate)
+{
+ int i, pd[] = {1, 2, 3, 4, 6, 8, 12, 16};
+
+ if (sclk <= 0 || rate <= 0)
+ return -EINVAL;
+
+ rate = rate << 8;
+ for (i = 0; i < ARRAY_SIZE(pd); i++)
+ if (sclk == rate * pd[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int rt5640_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val_len = 0, val_clk, mask_clk, dai_sel;
+ int pre_div, bclk_ms, frame_size;
+
+ rt5640->lrck[dai->id] = params_rate(params);
+ pre_div = get_clk_info(rt5640->sysclk, rt5640->lrck[dai->id]);
+ if (pre_div < 0) {
+ dev_err(codec->dev, "Unsupported clock setting\n");
+ return -EINVAL;
+ }
+ frame_size = snd_soc_params_to_frame_size(params);
+ if (frame_size < 0) {
+ dev_err(codec->dev, "Unsupported frame size: %d\n", frame_size);
+ return -EINVAL;
+ }
+ bclk_ms = frame_size > 32 ? 1 : 0;
+ rt5640->bclk[dai->id] = rt5640->lrck[dai->id] * (32 << bclk_ms);
+
+ dev_dbg(dai->dev, "bclk is %dHz and lrck is %dHz\n",
+ rt5640->bclk[dai->id], rt5640->lrck[dai->id]);
+ dev_dbg(dai->dev, "bclk_ms is %d and pre_div is %d for iis %d\n",
+ bclk_ms, pre_div, dai->id);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ val_len |= RT5640_I2S_DL_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ val_len |= RT5640_I2S_DL_24;
+ break;
+ case SNDRV_PCM_FORMAT_S8:
+ val_len |= RT5640_I2S_DL_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dai_sel = get_sdp_info(codec, dai->id);
+ if (dai_sel < 0) {
+ dev_err(codec->dev, "Failed to get sdp info: %d\n", dai_sel);
+ return -EINVAL;
+ }
+ if (dai_sel & RT5640_U_IF1) {
+ mask_clk = RT5640_I2S_BCLK_MS1_MASK | RT5640_I2S_PD1_MASK;
+ val_clk = bclk_ms << RT5640_I2S_BCLK_MS1_SFT |
+ pre_div << RT5640_I2S_PD1_SFT;
+ snd_soc_update_bits(codec, RT5640_I2S1_SDP,
+ RT5640_I2S_DL_MASK, val_len);
+ snd_soc_update_bits(codec, RT5640_ADDA_CLK1, mask_clk, val_clk);
+ }
+ if (dai_sel & RT5640_U_IF2) {
+ mask_clk = RT5640_I2S_BCLK_MS2_MASK | RT5640_I2S_PD2_MASK;
+ val_clk = bclk_ms << RT5640_I2S_BCLK_MS2_SFT |
+ pre_div << RT5640_I2S_PD2_SFT;
+ snd_soc_update_bits(codec, RT5640_I2S2_SDP,
+ RT5640_I2S_DL_MASK, val_len);
+ snd_soc_update_bits(codec, RT5640_ADDA_CLK1, mask_clk, val_clk);
+ }
+#if (CONFIG_SND_SOC_RT5643_MODULE | CONFIG_SND_SOC_RT5643 | \
+ CONFIG_SND_SOC_RT5646_MODULE | CONFIG_SND_SOC_RT5646)
+ if (dai_sel & RT5640_U_IF3) {
+ mask_clk = RT5640_I2S_BCLK_MS3_MASK | RT5640_I2S_PD3_MASK;
+ val_clk = bclk_ms << RT5640_I2S_BCLK_MS3_SFT |
+ pre_div << RT5640_I2S_PD3_SFT;
+ snd_soc_update_bits(codec, RT5640_I2S3_SDP,
+ RT5640_I2S_DL_MASK, val_len);
+ snd_soc_update_bits(codec, RT5640_ADDA_CLK1, mask_clk, val_clk);
+ }
+#endif
+ return 0;
+}
+
+static int rt5640_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ rt5640->aif_pu = dai->id;
+ return 0;
+}
+
+static int rt5640_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg_val = 0, dai_sel;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ rt5640->master[dai->id] = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ reg_val |= RT5640_I2S_MS_S;
+ rt5640->master[dai->id] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ reg_val |= RT5640_I2S_BP_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ reg_val |= RT5640_I2S_DF_LEFT;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ reg_val |= RT5640_I2S_DF_PCM_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ reg_val |= RT5640_I2S_DF_PCM_B;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dai_sel = get_sdp_info(codec, dai->id);
+ if (dai_sel < 0) {
+ dev_err(codec->dev, "Failed to get sdp info: %d\n", dai_sel);
+ return -EINVAL;
+ }
+ if (dai_sel & RT5640_U_IF1) {
+ snd_soc_update_bits(codec, RT5640_I2S1_SDP,
+ RT5640_I2S_MS_MASK | RT5640_I2S_BP_MASK |
+ RT5640_I2S_DF_MASK, reg_val);
+ }
+ if (dai_sel & RT5640_U_IF2) {
+ snd_soc_update_bits(codec, RT5640_I2S2_SDP,
+ RT5640_I2S_MS_MASK | RT5640_I2S_BP_MASK |
+ RT5640_I2S_DF_MASK, reg_val);
+ }
+#if (CONFIG_SND_SOC_RT5643_MODULE | CONFIG_SND_SOC_RT5643 | \
+ CONFIG_SND_SOC_RT5646_MODULE | CONFIG_SND_SOC_RT5646)
+ if (dai_sel & RT5640_U_IF3) {
+ snd_soc_update_bits(codec, RT5640_I2S3_SDP,
+ RT5640_I2S_MS_MASK | RT5640_I2S_BP_MASK |
+ RT5640_I2S_DF_MASK, reg_val);
+ }
+#endif
+ return 0;
+}
+
+static int rt5640_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg_val = 0;
+
+ if (freq == rt5640->sysclk && clk_id == rt5640->sysclk_src)
+ return 0;
+
+ switch (clk_id) {
+ case RT5640_SCLK_S_MCLK:
+ reg_val |= RT5640_SCLK_SRC_MCLK;
+ break;
+ case RT5640_SCLK_S_PLL1:
+ reg_val |= RT5640_SCLK_SRC_PLL1;
+ break;
+ case RT5640_SCLK_S_PLL1_TK:
+ reg_val |= RT5640_SCLK_SRC_PLL1T;
+ break;
+ case RT5640_SCLK_S_RCCLK:
+ reg_val |= RT5640_SCLK_SRC_RCCLK;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, RT5640_GLB_CLK,
+ RT5640_SCLK_SRC_MASK, reg_val);
+ rt5640->sysclk = freq;
+ rt5640->sysclk_src = clk_id;
+
+ dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
+ return 0;
+}
+
+/**
+ * rt5640_pll_calc - Calcualte PLL M/N/K code.
+ * @freq_in: external clock provided to codec.
+ * @freq_out: target clock which codec works on.
+ * @pll_code: Pointer to structure with M, N, K and bypass flag.
+ *
+ * Calcualte M/N/K code to configure PLL for codec. And K is assigned to 2
+ * which make calculation more efficiently.
+ *
+ * Returns 0 for success or negative error code.
+ */
+static int rt5640_pll_calc(const unsigned int freq_in,
+ const unsigned int freq_out, struct rt5640_pll_code *pll_code)
+{
+ int max_n = RT5640_PLL_N_MAX, max_m = RT5640_PLL_M_MAX;
+ int n, m, red, n_t, m_t, in_t, out_t, red_t = abs(freq_out - freq_in);
+ bool bypass = false;
+
+ if (RT5640_PLL_INP_MAX < freq_in || RT5640_PLL_INP_MIN > freq_in)
+ return -EINVAL;
+
+ for (n_t = 0; n_t <= max_n; n_t++) {
+ in_t = (freq_in >> 1) + (freq_in >> 2) * n_t;
+ if (in_t < 0)
+ continue;
+ if (in_t == freq_out) {
+ bypass = true;
+ n = n_t;
+ goto code_find;
+ }
+ for (m_t = 0; m_t <= max_m; m_t++) {
+ out_t = in_t / (m_t + 2);
+ red = abs(out_t - freq_out);
+ if (red < red_t) {
+ n = n_t;
+ m = m_t;
+ if (red == 0)
+ goto code_find;
+ red_t = red;
+ }
+ }
+ }
+ pr_debug("Only get approximation about PLL\n");
+
+code_find:
+
+ pll_code->m_bp = bypass;
+ pll_code->m_code = m;
+ pll_code->n_code = n;
+ pll_code->k_code = 2;
+ return 0;
+}
+
+static int rt5640_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+ struct rt5640_pll_code pll_code;
+ int ret, dai_sel;
+
+ if (source == rt5640->pll_src && freq_in == rt5640->pll_in &&
+ freq_out == rt5640->pll_out)
+ return 0;
+
+ if (!freq_in || !freq_out) {
+ dev_dbg(codec->dev, "PLL disabled\n");
+
+ rt5640->pll_in = 0;
+ rt5640->pll_out = 0;
+ snd_soc_update_bits(codec, RT5640_GLB_CLK,
+ RT5640_SCLK_SRC_MASK, RT5640_SCLK_SRC_MCLK);
+ return 0;
+ }
+
+ switch (source) {
+ case RT5640_PLL1_S_MCLK:
+ snd_soc_update_bits(codec, RT5640_GLB_CLK,
+ RT5640_PLL1_SRC_MASK, RT5640_PLL1_SRC_MCLK);
+ break;
+ case RT5640_PLL1_S_BCLK1:
+ case RT5640_PLL1_S_BCLK2:
+
+#if (CONFIG_SND_SOC_RT5643_MODULE | CONFIG_SND_SOC_RT5643 | \
+ CONFIG_SND_SOC_RT5646_MODULE | CONFIG_SND_SOC_RT5646)
+
+ case RT5640_PLL1_S_BCLK3:
+
+#endif
+ dai_sel = get_sdp_info(codec, dai->id);
+ if (dai_sel < 0) {
+ dev_err(codec->dev,
+ "Failed to get sdp info: %d\n", dai_sel);
+ return -EINVAL;
+ }
+ if (dai_sel & RT5640_U_IF1) {
+ snd_soc_update_bits(codec, RT5640_GLB_CLK,
+ RT5640_PLL1_SRC_MASK, RT5640_PLL1_SRC_BCLK1);
+ }
+ if (dai_sel & RT5640_U_IF2) {
+ snd_soc_update_bits(codec, RT5640_GLB_CLK,
+ RT5640_PLL1_SRC_MASK, RT5640_PLL1_SRC_BCLK2);
+ }
+ if (dai_sel & RT5640_U_IF3) {
+ snd_soc_update_bits(codec, RT5640_GLB_CLK,
+ RT5640_PLL1_SRC_MASK, RT5640_PLL1_SRC_BCLK3);
+ }
+ break;
+ default:
+ dev_err(codec->dev, "Unknown PLL source %d\n", source);
+ return -EINVAL;
+ }
+
+ ret = rt5640_pll_calc(freq_in, freq_out, &pll_code);
+ if (ret < 0) {
+ dev_err(codec->dev, "Unsupport input clock %d\n", freq_in);
+ return ret;
+ }
+
+ dev_dbg(codec->dev, "bypass=%d m=%d n=%d k=2\n", pll_code.m_bp,
+ (pll_code.m_bp ? 0 : pll_code.m_code), pll_code.n_code);
+
+ snd_soc_write(codec, RT5640_PLL_CTRL1,
+ pll_code.n_code << RT5640_PLL_N_SFT | pll_code.k_code);
+ snd_soc_write(codec, RT5640_PLL_CTRL2,
+ (pll_code.m_bp ? 0 : pll_code.m_code) << RT5640_PLL_M_SFT |
+ pll_code.m_bp << RT5640_PLL_M_BP_SFT);
+
+ rt5640->pll_in = freq_in;
+ rt5640->pll_out = freq_out;
+ rt5640->pll_src = source;
+
+ return 0;
+}
+
+/**
+ * rt5640_index_show - Dump private registers.
+ * @dev: codec device.
+ * @attr: device attribute.
+ * @buf: buffer for display.
+ *
+ * To show non-zero values of all private registers.
+ *
+ * Returns buffer length.
+ */
+static ssize_t rt5640_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rt5640_priv *rt5640 = i2c_get_clientdata(client);
+ struct snd_soc_codec *codec = rt5640->codec;
+ unsigned int val;
+ int cnt = 0, i;
+
+ cnt += sprintf(buf, "RT5640 index register\n");
+ for (i = 0; i < 0xb4; i++) {
+ if (cnt + 9 >= PAGE_SIZE - 1)
+ break;
+ val = rt5640_index_read(codec, i);
+ if (!val)
+ continue;
+ cnt += snprintf(buf + cnt, 10, "%02x: %04x\n", i, val);
+ }
+
+ if (cnt >= PAGE_SIZE)
+ cnt = PAGE_SIZE - 1;
+
+ return cnt;
+}
+static DEVICE_ATTR(index_reg, 0444, rt5640_index_show, NULL);
+
+static int rt5640_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+#ifdef RT5640_DEMO
+ snd_soc_update_bits(codec, RT5640_SPK_VOL,
+ RT5640_L_MUTE | RT5640_R_MUTE, 0);
+ snd_soc_update_bits(codec, RT5640_HP_VOL,
+ RT5640_L_MUTE | RT5640_R_MUTE, 0);
+ break;
+#endif
+ case SND_SOC_BIAS_PREPARE:
+#ifdef RT5640_DEMO
+ snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+ RT5640_PWR_VREF1 | RT5640_PWR_MB |
+ RT5640_PWR_BG | RT5640_PWR_VREF2,
+ RT5640_PWR_VREF1 | RT5640_PWR_MB |
+ RT5640_PWR_BG | RT5640_PWR_VREF2);
+ msleep(100);
+
+ snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2);
+
+ snd_soc_update_bits(codec, RT5640_PWR_ANLG2,
+ RT5640_PWR_MB1 | RT5640_PWR_MB2,
+ RT5640_PWR_MB1 | RT5640_PWR_MB2);
+#endif
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+#ifdef RT5640_DEMO
+ snd_soc_update_bits(codec, RT5640_SPK_VOL, RT5640_L_MUTE |
+ RT5640_R_MUTE, RT5640_L_MUTE | RT5640_R_MUTE);
+ snd_soc_update_bits(codec, RT5640_HP_VOL, RT5640_L_MUTE |
+ RT5640_R_MUTE, RT5640_L_MUTE | RT5640_R_MUTE);
+
+ snd_soc_update_bits(codec, RT5640_PWR_ANLG2,
+ RT5640_PWR_MB1 | RT5640_PWR_MB2,
+ 0);
+#endif
+ if (SND_SOC_BIAS_OFF == codec->dapm.bias_level) {
+ snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+ RT5640_PWR_VREF1 | RT5640_PWR_MB |
+ RT5640_PWR_BG | RT5640_PWR_VREF2,
+ RT5640_PWR_VREF1 | RT5640_PWR_MB |
+ RT5640_PWR_BG | RT5640_PWR_VREF2);
+ msleep(10);
+ snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2);
+ codec->cache_only = false;
+ snd_soc_cache_sync(codec);
+ }
+ break;
+
+ case SND_SOC_BIAS_OFF:
+#ifdef RT5640_DEMO
+ snd_soc_update_bits(codec, RT5640_SPK_VOL, RT5640_L_MUTE |
+ RT5640_R_MUTE, RT5640_L_MUTE | RT5640_R_MUTE);
+ snd_soc_update_bits(codec, RT5640_HP_VOL, RT5640_L_MUTE |
+ RT5640_R_MUTE, RT5640_L_MUTE | RT5640_R_MUTE);
+ snd_soc_update_bits(codec, RT5640_OUTPUT, RT5640_L_MUTE |
+ RT5640_R_MUTE, RT5640_L_MUTE | RT5640_R_MUTE);
+ snd_soc_update_bits(codec, RT5640_MONO_OUT,
+ RT5640_L_MUTE, RT5640_L_MUTE);
+#endif
+ snd_soc_write(codec, RT5640_PWR_DIG1, 0x0000);
+ snd_soc_write(codec, RT5640_PWR_DIG2, 0x0000);
+ snd_soc_write(codec, RT5640_PWR_VOL, 0x0000);
+ snd_soc_write(codec, RT5640_PWR_MIXER, 0x0000);
+ snd_soc_write(codec, RT5640_PWR_ANLG1, 0x0000);
+ snd_soc_write(codec, RT5640_PWR_ANLG2, 0x0000);
+ break;
+
+ default:
+ break;
+ }
+ codec->dapm.bias_level = level;
+
+ return 0;
+}
+
+static int rt5640_probe(struct snd_soc_codec *codec)
+{
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+ u16 val;
+
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+
+ val = snd_soc_read(codec, RT5640_RESET);
+ if (val != rt5640_reg[RT5640_RESET]) {
+ dev_err(codec->dev,
+ "Device with ID register %x is not a rt5640\n", val);
+ return -ENODEV;
+ }
+
+ rt5640_reset(codec);
+ snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+ RT5640_PWR_VREF1 | RT5640_PWR_MB |
+ RT5640_PWR_BG | RT5640_PWR_VREF2,
+ RT5640_PWR_VREF1 | RT5640_PWR_MB |
+ RT5640_PWR_BG | RT5640_PWR_VREF2);
+ msleep(100);
+ snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2);
+ /* DMIC */
+ if (rt5640->dmic_en == RT5640_DMIC1) {
+ snd_soc_update_bits(codec, RT5640_GPIO_CTRL1,
+ RT5640_GP2_PIN_MASK, RT5640_GP2_PIN_DMIC1_SCL);
+ snd_soc_update_bits(codec, RT5640_DMIC,
+ RT5640_DMIC_1L_LH_MASK | RT5640_DMIC_1R_LH_MASK,
+ RT5640_DMIC_1L_LH_FALLING | RT5640_DMIC_1R_LH_RISING);
+ } else if (rt5640->dmic_en == RT5640_DMIC2) {
+ snd_soc_update_bits(codec, RT5640_GPIO_CTRL1,
+ RT5640_GP2_PIN_MASK, RT5640_GP2_PIN_DMIC1_SCL);
+ snd_soc_update_bits(codec, RT5640_DMIC,
+ RT5640_DMIC_2L_LH_MASK | RT5640_DMIC_2R_LH_MASK,
+ RT5640_DMIC_2L_LH_FALLING | RT5640_DMIC_2R_LH_RISING);
+ }
+
+#ifdef RT5640_DEMO
+ rt5640_reg_init(codec);
+#endif
+
+
+#if (CONFIG_SND_SOC_RT5642_MODULE | CONFIG_SND_SOC_RT5642)
+ rt5640_register_dsp(codec);
+#endif
+
+ codec->dapm.bias_level = SND_SOC_BIAS_STANDBY;
+
+ snd_soc_add_controls(codec, rt5640_snd_controls,
+ ARRAY_SIZE(rt5640_snd_controls));
+
+ rt5640->codec = codec;
+ ret = device_create_file(codec->dev, &dev_attr_index_reg);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to create index_reg sysfs files: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rt5640_remove(struct snd_soc_codec *codec)
+{
+ rt5640_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int rt5640_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+ rt5640_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int rt5640_resume(struct snd_soc_codec *codec)
+{
+ rt5640_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ return 0;
+}
+#else
+#define rt5640_suspend NULL
+#define rt5640_resume NULL
+#endif
+
+#define RT5640_STEREO_RATES SNDRV_PCM_RATE_8000_96000
+#define RT5640_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+struct snd_soc_dai_ops rt5640_aif_dai_ops = {
+ .hw_params = rt5640_hw_params,
+ .prepare = rt5640_prepare,
+ .set_fmt = rt5640_set_dai_fmt,
+ .set_sysclk = rt5640_set_dai_sysclk,
+ .set_pll = rt5640_set_dai_pll,
+};
+
+struct snd_soc_dai_driver rt5640_dai[] = {
+ {
+ .name = "rt5640-aif1",
+ .id = RT5640_AIF1,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5640_STEREO_RATES,
+ .formats = RT5640_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5640_STEREO_RATES,
+ .formats = RT5640_FORMATS,
+ },
+ .ops = &rt5640_aif_dai_ops,
+ },
+ {
+ .name = "rt5640-aif2",
+ .id = RT5640_AIF2,
+ .playback = {
+ .stream_name = "AIF2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5640_STEREO_RATES,
+ .formats = RT5640_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5640_STEREO_RATES,
+ .formats = RT5640_FORMATS,
+ },
+ .ops = &rt5640_aif_dai_ops,
+ },
+#if (CONFIG_SND_SOC_RT5643_MODULE | CONFIG_SND_SOC_RT5643 | \
+ CONFIG_SND_SOC_RT5646_MODULE | CONFIG_SND_SOC_RT5646)
+ {
+ .name = "rt5640-aif3",
+ .id = RT5640_AIF3,
+ .playback = {
+ .stream_name = "AIF3 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5640_STEREO_RATES,
+ .formats = RT5640_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF3 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5640_STEREO_RATES,
+ .formats = RT5640_FORMATS,
+ },
+ .ops = &rt5640_aif_dai_ops,
+ },
+#endif
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
+ .probe = rt5640_probe,
+ .remove = rt5640_remove,
+ .suspend = rt5640_suspend,
+ .resume = rt5640_resume,
+ .set_bias_level = rt5640_set_bias_level,
+ .reg_cache_size = RT5640_VENDOR_ID2 + 1,
+ .reg_word_size = sizeof(u16),
+ .reg_cache_default = rt5640_reg,
+ .volatile_register = rt5640_volatile_register,
+ .readable_register = rt5640_readable_register,
+ .reg_cache_step = 1,
+ .dapm_widgets = rt5640_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5640_dapm_widgets),
+ .dapm_routes = rt5640_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5640_dapm_routes),
+};
+
+static const struct i2c_device_id rt5640_i2c_id[] = {
+ { "rt5640", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rt5640_i2c_id);
+
+static int rt5640_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct rt5640_priv *rt5640;
+ int ret;
+
+ rt5640 = kzalloc(sizeof(struct rt5640_priv), GFP_KERNEL);
+ if (NULL == rt5640)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rt5640);
+
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5640,
+ rt5640_dai, ARRAY_SIZE(rt5640_dai));
+ if (ret < 0)
+ kfree(rt5640);
+
+ return ret;
+}
+
+static __devexit int rt5640_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_codec(&i2c->dev);
+ kfree(i2c_get_clientdata(i2c));
+ return 0;
+}
+
+struct i2c_driver rt5640_i2c_driver = {
+ .driver = {
+ .name = "rt5640",
+ .owner = THIS_MODULE,
+ },
+ .probe = rt5640_i2c_probe,
+ .remove = __devexit_p(rt5640_i2c_remove),
+ .id_table = rt5640_i2c_id,
+};
+
+static int __init rt5640_modinit(void)
+{
+ return i2c_add_driver(&rt5640_i2c_driver);
+}
+module_init(rt5640_modinit);
+
+static void __exit rt5640_modexit(void)
+{
+ i2c_del_driver(&rt5640_i2c_driver);
+}
+module_exit(rt5640_modexit);
+
+MODULE_DESCRIPTION("ASoC RT5640 driver");
+MODULE_AUTHOR("Johnny Hsu <johnnyhsu@realtek.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
new file mode 100644
index 000000000000..ba9ac7f78b20
--- /dev/null
+++ b/sound/soc/codecs/rt5640.h
@@ -0,0 +1,2094 @@
+/*
+ * rt5640.h -- RT5640 ALSA SoC audio driver
+ *
+ * Copyright 2011 Realtek Microelectronics
+ * Author: Johnny Hsu <johnnyhsu@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5640_H__
+#define __RT5640_H__
+
+/* Info */
+#define RT5640_RESET 0x00
+#define RT5640_VENDOR_ID 0xfd
+#define RT5640_VENDOR_ID1 0xfe
+#define RT5640_VENDOR_ID2 0xff
+/* I/O - Output */
+#define RT5640_SPK_VOL 0x01
+#define RT5640_HP_VOL 0x02
+#define RT5640_OUTPUT 0x03
+#define RT5640_MONO_OUT 0x04
+/* I/O - Input */
+#define RT5640_IN1_IN2 0x0d
+#define RT5640_IN3_IN4 0x0e
+#define RT5640_INL_INR_VOL 0x0f
+/* I/O - ADC/DAC/DMIC */
+#define RT5640_DAC1_DIG_VOL 0x19
+#define RT5640_DAC2_DIG_VOL 0x1a
+#define RT5640_DAC2_CTRL 0x1b
+#define RT5640_ADC_DIG_VOL 0x1c
+#define RT5640_ADC_DATA 0x1d
+#define RT5640_ADC_BST_VOL 0x1e
+/* Mixer - D-D */
+#define RT5640_STO_ADC_MIXER 0x27
+#define RT5640_MONO_ADC_MIXER 0x28
+#define RT5640_AD_DA_MIXER 0x29
+#define RT5640_STO_DAC_MIXER 0x2a
+#define RT5640_MONO_DAC_MIXER 0x2b
+#define RT5640_DIG_MIXER 0x2c
+#define RT5640_DSP_PATH1 0x2d
+#define RT5640_DSP_PATH2 0x2e
+#define RT5640_DIG_INF_DATA 0x2f
+/* Mixer - ADC */
+#define RT5640_REC_L1_MIXER 0x3b
+#define RT5640_REC_L2_MIXER 0x3c
+#define RT5640_REC_R1_MIXER 0x3d
+#define RT5640_REC_R2_MIXER 0x3e
+/* Mixer - DAC */
+#define RT5640_HPO_MIXER 0x45
+#define RT5640_SPK_L_MIXER 0x46
+#define RT5640_SPK_R_MIXER 0x47
+#define RT5640_SPO_L_MIXER 0x48
+#define RT5640_SPO_R_MIXER 0x49
+#define RT5640_SPO_CLSD_RATIO 0x4a
+#define RT5640_MONO_MIXER 0x4c
+#define RT5640_OUT_L1_MIXER 0x4d
+#define RT5640_OUT_L2_MIXER 0x4e
+#define RT5640_OUT_L3_MIXER 0x4f
+#define RT5640_OUT_R1_MIXER 0x50
+#define RT5640_OUT_R2_MIXER 0x51
+#define RT5640_OUT_R3_MIXER 0x52
+#define RT5640_LOUT_MIXER 0x53
+/* Power */
+#define RT5640_PWR_DIG1 0x61
+#define RT5640_PWR_DIG2 0x62
+#define RT5640_PWR_ANLG1 0x63
+#define RT5640_PWR_ANLG2 0x64
+#define RT5640_PWR_MIXER 0x65
+#define RT5640_PWR_VOL 0x66
+/* Private Register Control */
+#define RT5640_PRIV_INDEX 0x6a
+#define RT5640_PRIV_DATA 0x6c
+/* Format - ADC/DAC */
+#define RT5640_I2S1_SDP 0x70
+#define RT5640_I2S2_SDP 0x71
+#define RT5640_I2S3_SDP 0x72
+#define RT5640_ADDA_CLK1 0x73
+#define RT5640_ADDA_CLK2 0x74
+#define RT5640_DMIC 0x75
+/* Function - Analog */
+#define RT5640_GLB_CLK 0x80
+#define RT5640_PLL_CTRL1 0x81
+#define RT5640_PLL_CTRL2 0x82
+#define RT5640_ASRC_1 0x83
+#define RT5640_ASRC_2 0x84
+#define RT5640_ASRC_3 0x85
+#define RT5640_ASRC_4 0x89
+#define RT5640_ASRC_5 0x8a
+#define RT5640_HP_OVCD 0x8b
+#define RT5640_CLS_D_OVCD 0x8c
+#define RT5640_CLS_D_OUT 0x8d
+#define RT5640_DEPOP_M1 0x8e
+#define RT5640_DEPOP_M2 0x8f
+#define RT5640_DEPOP_M3 0x90
+#define RT5640_CHARGE_PUMP 0x91
+#define RT5640_PV_DET_SPK_G 0x92
+#define RT5640_MICBIAS 0x93
+/* Function - Digital */
+#define RT5640_EQ_CTRL1 0xb0
+#define RT5640_EQ_CTRL2 0xb1
+#define RT5640_WIND_FILTER 0xb2
+#define RT5640_DRC_AGC_1 0xb4
+#define RT5640_DRC_AGC_2 0xb5
+#define RT5640_DRC_AGC_3 0xb6
+#define RT5640_SVOL_ZC 0xb7
+#define RT5640_ANC_CTRL1 0xb8
+#define RT5640_ANC_CTRL2 0xb9
+#define RT5640_ANC_CTRL3 0xba
+#define RT5640_JD_CTRL 0xbb
+#define RT5640_ANC_JD 0xbc
+#define RT5640_IRQ_CTRL1 0xbd
+#define RT5640_IRQ_CTRL2 0xbe
+#define RT5640_INT_IRQ_ST 0xbf
+#define RT5640_GPIO_CTRL1 0xc0
+#define RT5640_GPIO_CTRL2 0xc1
+#define RT5640_GPIO_CTRL3 0xc2
+#define RT5640_DSP_CTRL1 0xc4
+#define RT5640_DSP_CTRL2 0xc5
+#define RT5640_DSP_CTRL3 0xc6
+#define RT5640_DSP_CTRL4 0xc7
+#define RT5640_PGM_REG_ARR1 0xc8
+#define RT5640_PGM_REG_ARR2 0xc9
+#define RT5640_PGM_REG_ARR3 0xca
+#define RT5640_PGM_REG_ARR4 0xcb
+#define RT5640_PGM_REG_ARR5 0xcc
+#define RT5640_SCB_FUNC 0xcd
+#define RT5640_SCB_CTRL 0xce
+#define RT5640_BASE_BACK 0xcf
+#define RT5640_MP3_PLUS1 0xd0
+#define RT5640_MP3_PLUS2 0xd1
+#define RT5640_3D_HP 0xd2
+#define RT5640_ADJ_HPF 0xd3
+#define RT5640_HP_CALIB_AMP_DET 0xd6
+#define RT5640_HP_CALIB2 0xd7
+#define RT5640_SV_ZCD1 0xd9
+#define RT5640_SV_ZCD2 0xda
+/* Dummy Register */
+#define RT5640_DUMMY1 0xfa
+#define RT5640_DUMMY2 0xfb
+#define RT5640_DUMMY3 0xfc
+
+
+/* Index of Codec Private Register definition */
+#define RT5640_3D_SPK 0x63
+#define RT5640_WND_1 0x6c
+#define RT5640_WND_2 0x6d
+#define RT5640_WND_3 0x6e
+#define RT5640_WND_4 0x6f
+#define RT5640_WND_5 0x70
+#define RT5640_WND_8 0x73
+#define RT5640_DIP_SPK_INF 0x75
+#define RT5640_EQ_BW_LOP 0xa0
+#define RT5640_EQ_GN_LOP 0xa1
+#define RT5640_EQ_FC_BP1 0xa2
+#define RT5640_EQ_BW_BP1 0xa3
+#define RT5640_EQ_GN_BP1 0xa4
+#define RT5640_EQ_FC_BP2 0xa5
+#define RT5640_EQ_BW_BP2 0xa6
+#define RT5640_EQ_GN_BP2 0xa7
+#define RT5640_EQ_FC_BP3 0xa8
+#define RT5640_EQ_BW_BP3 0xa9
+#define RT5640_EQ_GN_BP3 0xaa
+#define RT5640_EQ_FC_BP4 0xab
+#define RT5640_EQ_BW_BP4 0xac
+#define RT5640_EQ_GN_BP4 0xad
+#define RT5640_EQ_FC_HIP1 0xae
+#define RT5640_EQ_GN_HIP1 0xaf
+#define RT5640_EQ_FC_HIP2 0xb0
+#define RT5640_EQ_BW_HIP2 0xb1
+#define RT5640_EQ_GN_HIP2 0xb2
+#define RT5640_EQ_PRE_VOL 0xb3
+#define RT5640_EQ_PST_VOL 0xb4
+
+
+/* global definition */
+#define RT5640_L_MUTE (0x1 << 15)
+#define RT5640_L_MUTE_SFT 15
+#define RT5640_VOL_L_MUTE (0x1 << 14)
+#define RT5640_VOL_L_SFT 14
+#define RT5640_R_MUTE (0x1 << 7)
+#define RT5640_R_MUTE_SFT 7
+#define RT5640_VOL_R_MUTE (0x1 << 6)
+#define RT5640_VOL_R_SFT 6
+#define RT5640_L_VOL_MASK (0x3f << 8)
+#define RT5640_L_VOL_SFT 8
+#define RT5640_R_VOL_MASK (0x3f)
+#define RT5640_R_VOL_SFT 0
+
+/* IN1 and IN2 Control (0x0d) */
+/* IN3 and IN4 Control (0x0e) */
+#define RT5640_BST_SFT1 12
+#define RT5640_BST_SFT2 8
+#define RT5640_IN_DF1 (0x1 << 7)
+#define RT5640_IN_SFT1 7
+#define RT5640_IN_DF2 (0x1 << 6)
+#define RT5640_IN_SFT2 6
+
+/* INL and INR Volume Control (0x0f) */
+#define RT5640_INL_SEL_MASK (0x1 << 15)
+#define RT5640_INL_SEL_SFT 15
+#define RT5640_INL_SEL_IN4P (0x0 << 15)
+#define RT5640_INL_SEL_MONOP (0x1 << 15)
+#define RT5640_INL_VOL_MASK (0x1f << 8)
+#define RT5640_INL_VOL_SFT 8
+#define RT5640_INR_SEL_MASK (0x1 << 7)
+#define RT5640_INR_SEL_SFT 7
+#define RT5640_INR_SEL_IN4N (0x0 << 7)
+#define RT5640_INR_SEL_MONON (0x1 << 7)
+#define RT5640_INR_VOL_MASK (0x1f)
+#define RT5640_INR_VOL_SFT 0
+
+/* DAC1 Digital Volume (0x19) */
+#define RT5640_DAC_L1_VOL_MASK (0xff << 8)
+#define RT5640_DAC_L1_VOL_SFT 8
+#define RT5640_DAC_R1_VOL_MASK (0xff)
+#define RT5640_DAC_R1_VOL_SFT 0
+
+/* DAC2 Digital Volume (0x1a) */
+#define RT5640_DAC_L2_VOL_MASK (0xff << 8)
+#define RT5640_DAC_L2_VOL_SFT 8
+#define RT5640_DAC_R2_VOL_MASK (0xff)
+#define RT5640_DAC_R2_VOL_SFT 0
+
+/* DAC2 Control (0x1b) */
+#define RT5640_M_DAC_L2_VOL (0x1 << 13)
+#define RT5640_M_DAC_L2_VOL_SFT 13
+#define RT5640_M_DAC_R2_VOL (0x1 << 12)
+#define RT5640_M_DAC_R2_VOL_SFT 12
+
+/* ADC Digital Volume Control (0x1c) */
+#define RT5640_ADC_L_VOL_MASK (0x7f << 8)
+#define RT5640_ADC_L_VOL_SFT 8
+#define RT5640_ADC_R_VOL_MASK (0x7f)
+#define RT5640_ADC_R_VOL_SFT 0
+
+/* Mono ADC Digital Volume Control (0x1d) */
+#define RT5640_MONO_ADC_L_VOL_MASK (0x7f << 8)
+#define RT5640_MONO_ADC_L_VOL_SFT 8
+#define RT5640_MONO_ADC_R_VOL_MASK (0x7f)
+#define RT5640_MONO_ADC_R_VOL_SFT 0
+
+/* ADC Boost Volume Control (0x1e) */
+#define RT5640_ADC_L_BST_MASK (0x3 << 14)
+#define RT5640_ADC_L_BST_SFT 14
+#define RT5640_ADC_R_BST_MASK (0x3 << 12)
+#define RT5640_ADC_R_BST_SFT 12
+#define RT5640_ADC_COMP_MASK (0x3 << 10)
+#define RT5640_ADC_COMP_SFT 10
+
+/* Stereo ADC Mixer Control (0x27) */
+#define RT5640_M_ADC_L1 (0x1 << 14)
+#define RT5640_M_ADC_L1_SFT 14
+#define RT5640_M_ADC_L2 (0x1 << 13)
+#define RT5640_M_ADC_L2_SFT 13
+#define RT5640_ADC_1_SRC_MASK (0x1 << 12)
+#define RT5640_ADC_1_SRC_SFT 12
+#define RT5640_ADC_1_SRC_ADC (0x1 << 12)
+#define RT5640_ADC_1_SRC_DACMIX (0x0 << 12)
+#define RT5640_ADC_2_SRC_MASK (0x3 << 10)
+#define RT5640_ADC_2_SRC_SFT 10
+#define RT5640_ADC_2_SRC_DMIC1 (0x0 << 10)
+#define RT5640_ADC_2_SRC_DMIC2 (0x1 << 10)
+#define RT5640_ADC_2_SRC_DACMIX (0x2 << 10)
+#define RT5640_M_ADC_R1 (0x1 << 6)
+#define RT5640_M_ADC_R1_SFT 6
+#define RT5640_M_ADC_R2 (0x1 << 5)
+#define RT5640_M_ADC_R2_SFT 5
+
+/* Mono ADC Mixer Control (0x28) */
+#define RT5640_M_MONO_ADC_L1 (0x1 << 14)
+#define RT5640_M_MONO_ADC_L1_SFT 14
+#define RT5640_M_MONO_ADC_L2 (0x1 << 13)
+#define RT5640_M_MONO_ADC_L2_SFT 13
+#define RT5640_MONO_ADC_L1_SRC_MASK (0x1 << 12)
+#define RT5640_MONO_ADC_L1_SRC_SFT 12
+#define RT5640_MONO_ADC_L1_SRC_DACMIXL (0x0 << 12)
+#define RT5640_MONO_ADC_L1_SRC_ADCL (0x1 << 12)
+#define RT5640_MONO_ADC_L2_SRC_MASK (0x3 << 10)
+#define RT5640_MONO_ADC_L2_SRC_SFT 10
+#define RT5640_MONO_ADC_L2_SRC_DMIC_L1 (0x0 << 10)
+#define RT5640_MONO_ADC_L2_SRC_DMIC_L2 (0x1 << 10)
+#define RT5640_MONO_ADC_L2_SRC_DACMIXL (0x2 << 10)
+#define RT5640_M_MONO_ADC_R1 (0x1 << 6)
+#define RT5640_M_MONO_ADC_R1_SFT 6
+#define RT5640_M_MONO_ADC_R2 (0x1 << 5)
+#define RT5640_M_MONO_ADC_R2_SFT 5
+#define RT5640_MONO_ADC_R1_SRC_MASK (0x1 << 4)
+#define RT5640_MONO_ADC_R1_SRC_SFT 4
+#define RT5640_MONO_ADC_R1_SRC_ADCR (0x1 << 4)
+#define RT5640_MONO_ADC_R1_SRC_DACMIXR (0x0 << 4)
+#define RT5640_MONO_ADC_R2_SRC_MASK (0x3 << 2)
+#define RT5640_MONO_ADC_R2_SRC_SFT 2
+#define RT5640_MONO_ADC_R2_SRC_DMIC_R1 (0x0 << 2)
+#define RT5640_MONO_ADC_R2_SRC_DMIC_R2 (0x1 << 2)
+#define RT5640_MONO_ADC_R2_SRC_DACMIXR (0x2 << 2)
+
+/* ADC Mixer to DAC Mixer Control (0x29) */
+#define RT5640_M_ADCMIX_L (0x1 << 15)
+#define RT5640_M_ADCMIX_L_SFT 15
+#define RT5640_M_IF1_DAC_L (0x1 << 14)
+#define RT5640_M_IF1_DAC_L_SFT 14
+#define RT5640_M_ADCMIX_R (0x1 << 7)
+#define RT5640_M_ADCMIX_R_SFT 7
+#define RT5640_M_IF1_DAC_R (0x1 << 6)
+#define RT5640_M_IF1_DAC_R_SFT 6
+
+/* Stereo DAC Mixer Control (0x2a) */
+#define RT5640_M_DAC_L1 (0x1 << 14)
+#define RT5640_M_DAC_L1_SFT 14
+#define RT5640_DAC_L1_STO_L_VOL_MASK (0x1 << 13)
+#define RT5640_DAC_L1_STO_L_VOL_SFT 13
+#define RT5640_M_DAC_L2 (0x1 << 12)
+#define RT5640_M_DAC_L2_SFT 12
+#define RT5640_DAC_L2_STO_L_VOL_MASK (0x1 << 11)
+#define RT5640_DAC_L2_STO_L_VOL_SFT 11
+#define RT5640_M_ANC_DAC_L (0x1 << 10)
+#define RT5640_M_ANC_DAC_L_SFT 10
+#define RT5640_M_DAC_R1 (0x1 << 6)
+#define RT5640_M_DAC_R1_SFT 6
+#define RT5640_DAC_R1_STO_R_VOL_MASK (0x1 << 5)
+#define RT5640_DAC_R1_STO_R_VOL_SFT 5
+#define RT5640_M_DAC_R2 (0x1 << 4)
+#define RT5640_M_DAC_R2_SFT 4
+#define RT5640_DAC_R2_STO_R_VOL_MASK (0x1 << 3)
+#define RT5640_DAC_R2_STO_R_VOL_SFT 3
+#define RT5640_M_ANC_DAC_R (0x1 << 2)
+#define RT5640_M_ANC_DAC_R_SFT 2
+
+/* Mono DAC Mixer Control (0x2b) */
+#define RT5640_M_DAC_L1_MONO_L (0x1 << 14)
+#define RT5640_M_DAC_L1_MONO_L_SFT 14
+#define RT5640_DAC_L1_MONO_L_VOL_MASK (0x1 << 13)
+#define RT5640_DAC_L1_MONO_L_VOL_SFT 13
+#define RT5640_M_DAC_L2_MONO_L (0x1 << 12)
+#define RT5640_M_DAC_L2_MONO_L_SFT 12
+#define RT5640_DAC_L2_MONO_L_VOL_MASK (0x1 << 11)
+#define RT5640_DAC_L2_MONO_L_VOL_SFT 11
+#define RT5640_M_DAC_R2_MONO_L (0x1 << 10)
+#define RT5640_M_DAC_R2_MONO_L_SFT 10
+#define RT5640_DAC_R2_MONO_L_VOL_MASK (0x1 << 9)
+#define RT5640_DAC_R2_MONO_L_VOL_SFT 9
+#define RT5640_M_DAC_R1_MONO_R (0x1 << 6)
+#define RT5640_M_DAC_R1_MONO_R_SFT 6
+#define RT5640_DAC_R1_MONO_R_VOL_MASK (0x1 << 5)
+#define RT5640_DAC_R1_MONO_R_VOL_SFT 5
+#define RT5640_M_DAC_R2_MONO_R (0x1 << 4)
+#define RT5640_M_DAC_R2_MONO_R_SFT 4
+#define RT5640_DAC_R2_MONO_R_VOL_MASK (0x1 << 3)
+#define RT5640_DAC_R2_MONO_R_VOL_SFT 3
+#define RT5640_M_DAC_L2_MONO_R (0x1 << 2)
+#define RT5640_M_DAC_L2_MONO_R_SFT 2
+#define RT5640_DAC_L2_MONO_R_VOL_MASK (0x1 << 1)
+#define RT5640_DAC_L2_MONO_R_VOL_SFT 1
+
+/* Digital Mixer Control (0x2c) */
+#define RT5640_M_STO_L_DAC_L (0x1 << 15)
+#define RT5640_M_STO_L_DAC_L_SFT 15
+#define RT5640_STO_L_DAC_L_VOL_MASK (0x1 << 14)
+#define RT5640_STO_L_DAC_L_VOL_SFT 14
+#define RT5640_M_DAC_L2_DAC_L (0x1 << 13)
+#define RT5640_M_DAC_L2_DAC_L_SFT 13
+#define RT5640_DAC_L2_DAC_L_VOL_MASK (0x1 << 12)
+#define RT5640_DAC_L2_DAC_L_VOL_SFT 12
+#define RT5640_M_STO_R_DAC_R (0x1 << 11)
+#define RT5640_M_STO_R_DAC_R_SFT 11
+#define RT5640_STO_R_DAC_R_VOL_MASK (0x1 << 10)
+#define RT5640_STO_R_DAC_R_VOL_SFT 10
+#define RT5640_M_DAC_R2_DAC_R (0x1 << 9)
+#define RT5640_M_DAC_R2_DAC_R_SFT 9
+#define RT5640_DAC_R2_DAC_R_VOL_MASK (0x1 << 8)
+#define RT5640_DAC_R2_DAC_R_VOL_SFT 8
+
+/* DSP Path Control 1 (0x2d) */
+#define RT5640_RXDP_SRC_MASK (0x1 << 15)
+#define RT5640_RXDP_SRC_SFT 15
+#define RT5640_RXDP_SRC_NOR (0x0 << 15)
+#define RT5640_RXDP_SRC_DIV3 (0x1 << 15)
+#define RT5640_TXDP_SRC_MASK (0x1 << 14)
+#define RT5640_TXDP_SRC_SFT 14
+#define RT5640_TXDP_SRC_NOR (0x0 << 14)
+#define RT5640_TXDP_SRC_DIV3 (0x1 << 14)
+
+/* DSP Path Control 2 (0x2e) */
+#define RT5640_DAC_L2_SEL_MASK (0x3 << 14)
+#define RT5640_DAC_L2_SEL_SFT 14
+#define RT5640_DAC_L2_SEL_IF2 (0x0 << 14)
+#define RT5640_DAC_L2_SEL_IF3 (0x1 << 14)
+#define RT5640_DAC_L2_SEL_TXDC (0x2 << 14)
+#define RT5640_DAC_L2_SEL_BASS (0x3 << 14)
+#define RT5640_DAC_R2_SEL_MASK (0x3 << 12)
+#define RT5640_DAC_R2_SEL_SFT 12
+#define RT5640_DAC_R2_SEL_IF2 (0x0 << 12)
+#define RT5640_DAC_R2_SEL_IF3 (0x1 << 12)
+#define RT5640_DAC_R2_SEL_TXDC (0x2 << 12)
+#define RT5640_IF2_ADC_L_SEL_MASK (0x1 << 11)
+#define RT5640_IF2_ADC_L_SEL_SFT 11
+#define RT5640_IF2_ADC_L_SEL_TXDP (0x0 << 11)
+#define RT5640_IF2_ADC_L_SEL_PASS (0x1 << 11)
+#define RT5640_IF2_ADC_R_SEL_MASK (0x1 << 10)
+#define RT5640_IF2_ADC_R_SEL_SFT 10
+#define RT5640_IF2_ADC_R_SEL_TXDP (0x0 << 10)
+#define RT5640_IF2_ADC_R_SEL_PASS (0x1 << 10)
+#define RT5640_RXDC_SEL_MASK (0x3 << 8)
+#define RT5640_RXDC_SEL_SFT 8
+#define RT5640_RXDC_SEL_NOR (0x0 << 8)
+#define RT5640_RXDC_SEL_L2R (0x1 << 8)
+#define RT5640_RXDC_SEL_R2L (0x2 << 8)
+#define RT5640_RXDC_SEL_SWAP (0x3 << 8)
+#define RT5640_RXDP_SEL_MASK (0x3 << 6)
+#define RT5640_RXDP_SEL_SFT 6
+#define RT5640_RXDP_SEL_NOR (0x0 << 6)
+#define RT5640_RXDP_SEL_L2R (0x1 << 6)
+#define RT5640_RXDP_SEL_R2L (0x2 << 6)
+#define RT5640_RXDP_SEL_SWAP (0x3 << 6)
+#define RT5640_TXDC_SEL_MASK (0x3 << 4)
+#define RT5640_TXDC_SEL_SFT 4
+#define RT5640_TXDC_SEL_NOR (0x0 << 4)
+#define RT5640_TXDC_SEL_L2R (0x1 << 4)
+#define RT5640_TXDC_SEL_R2L (0x2 << 4)
+#define RT5640_TXDC_SEL_SWAP (0x3 << 4)
+#define RT5640_TXDP_SEL_MASK (0x3 << 2)
+#define RT5640_TXDP_SEL_SFT 2
+#define RT5640_TXDP_SEL_NOR (0x0 << 2)
+#define RT5640_TXDP_SEL_L2R (0x1 << 2)
+#define RT5640_TXDP_SEL_R2L (0x2 << 2)
+#define RT5640_TRXDP_SEL_SWAP (0x3 << 2)
+
+/* Digital Interface Data Control (0x2f) */
+#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
+#define RT5640_IF1_DAC_SEL_SFT 14
+#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
+#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
+#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
+#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
+#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
+#define RT5640_IF1_ADC_SEL_SFT 12
+#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
+#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
+#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
+#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
+#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
+#define RT5640_IF2_DAC_SEL_SFT 10
+#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
+#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
+#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
+#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
+#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
+#define RT5640_IF2_ADC_SEL_SFT 8
+#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
+#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
+#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
+#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
+#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
+#define RT5640_IF3_DAC_SEL_SFT 6
+#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
+#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
+#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
+#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
+#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
+#define RT5640_IF3_ADC_SEL_SFT 4
+#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
+#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
+#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
+#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
+
+/* REC Left Mixer Control 1 (0x3b) */
+#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
+#define RT5640_G_HP_L_RM_L_SFT 13
+#define RT5640_G_IN_L_RM_L_MASK (0x7 << 10)
+#define RT5640_G_IN_L_RM_L_SFT 10
+#define RT5640_G_BST4_RM_L_MASK (0x7 << 7)
+#define RT5640_G_BST4_RM_L_SFT 7
+#define RT5640_G_BST3_RM_L_MASK (0x7 << 4)
+#define RT5640_G_BST3_RM_L_SFT 4
+#define RT5640_G_BST2_RM_L_MASK (0x7 << 1)
+#define RT5640_G_BST2_RM_L_SFT 1
+
+/* REC Left Mixer Control 2 (0x3c) */
+#define RT5640_G_BST1_RM_L_MASK (0x7 << 13)
+#define RT5640_G_BST1_RM_L_SFT 13
+#define RT5640_G_OM_L_RM_L_MASK (0x7 << 10)
+#define RT5640_G_OM_L_RM_L_SFT 10
+#define RT5640_M_HP_L_RM_L (0x1 << 6)
+#define RT5640_M_HP_L_RM_L_SFT 6
+#define RT5640_M_IN_L_RM_L (0x1 << 5)
+#define RT5640_M_IN_L_RM_L_SFT 5
+#define RT5640_M_BST4_RM_L (0x1 << 4)
+#define RT5640_M_BST4_RM_L_SFT 4
+#define RT5640_M_BST3_RM_L (0x1 << 3)
+#define RT5640_M_BST3_RM_L_SFT 3
+#define RT5640_M_BST2_RM_L (0x1 << 2)
+#define RT5640_M_BST2_RM_L_SFT 2
+#define RT5640_M_BST1_RM_L (0x1 << 1)
+#define RT5640_M_BST1_RM_L_SFT 1
+#define RT5640_M_OM_L_RM_L (0x1)
+#define RT5640_M_OM_L_RM_L_SFT 0
+
+/* REC Right Mixer Control 1 (0x3d) */
+#define RT5640_G_HP_R_RM_R_MASK (0x7 << 13)
+#define RT5640_G_HP_R_RM_R_SFT 13
+#define RT5640_G_IN_R_RM_R_MASK (0x7 << 10)
+#define RT5640_G_IN_R_RM_R_SFT 10
+#define RT5640_G_BST4_RM_R_MASK (0x7 << 7)
+#define RT5640_G_BST4_RM_R_SFT 7
+#define RT5640_G_BST3_RM_R_MASK (0x7 << 4)
+#define RT5640_G_BST3_RM_R_SFT 4
+#define RT5640_G_BST2_RM_R_MASK (0x7 << 1)
+#define RT5640_G_BST2_RM_R_SFT 1
+
+/* REC Right Mixer Control 2 (0x3e) */
+#define RT5640_G_BST1_RM_R_MASK (0x7 << 13)
+#define RT5640_G_BST1_RM_R_SFT 13
+#define RT5640_G_OM_R_RM_R_MASK (0x7 << 10)
+#define RT5640_G_OM_R_RM_R_SFT 10
+#define RT5640_M_HP_R_RM_R (0x1 << 6)
+#define RT5640_M_HP_R_RM_R_SFT 6
+#define RT5640_M_IN_R_RM_R (0x1 << 5)
+#define RT5640_M_IN_R_RM_R_SFT 5
+#define RT5640_M_BST4_RM_R (0x1 << 4)
+#define RT5640_M_BST4_RM_R_SFT 4
+#define RT5640_M_BST3_RM_R (0x1 << 3)
+#define RT5640_M_BST3_RM_R_SFT 3
+#define RT5640_M_BST2_RM_R (0x1 << 2)
+#define RT5640_M_BST2_RM_R_SFT 2
+#define RT5640_M_BST1_RM_R (0x1 << 1)
+#define RT5640_M_BST1_RM_R_SFT 1
+#define RT5640_M_OM_R_RM_R (0x1)
+#define RT5640_M_OM_R_RM_R_SFT 0
+
+/* HPMIX Control (0x45) */
+#define RT5640_M_DAC2_HM (0x1 << 15)
+#define RT5640_M_DAC2_HM_SFT 15
+#define RT5640_M_DAC1_HM (0x1 << 14)
+#define RT5640_M_DAC1_HM_SFT 14
+#define RT5640_M_HPVOL_HM (0x1 << 13)
+#define RT5640_M_HPVOL_HM_SFT 13
+#define RT5640_G_HPOMIX_MASK (0x1 << 12)
+#define RT5640_G_HPOMIX_SFT 12
+
+/* SPK Left Mixer Control (0x46) */
+#define RT5640_G_RM_L_SM_L_MASK (0x3 << 14)
+#define RT5640_G_RM_L_SM_L_SFT 14
+#define RT5640_G_IN_L_SM_L_MASK (0x3 << 12)
+#define RT5640_G_IN_L_SM_L_SFT 12
+#define RT5640_G_DAC_L1_SM_L_MASK (0x3 << 10)
+#define RT5640_G_DAC_L1_SM_L_SFT 10
+#define RT5640_G_DAC_L2_SM_L_MASK (0x3 << 8)
+#define RT5640_G_DAC_L2_SM_L_SFT 8
+#define RT5640_G_OM_L_SM_L_MASK (0x3 << 6)
+#define RT5640_G_OM_L_SM_L_SFT 6
+#define RT5640_M_RM_L_SM_L (0x1 << 5)
+#define RT5640_M_RM_L_SM_L_SFT 5
+#define RT5640_M_IN_L_SM_L (0x1 << 4)
+#define RT5640_M_IN_L_SM_L_SFT 4
+#define RT5640_M_DAC_L1_SM_L (0x1 << 3)
+#define RT5640_M_DAC_L1_SM_L_SFT 3
+#define RT5640_M_DAC_L2_SM_L (0x1 << 2)
+#define RT5640_M_DAC_L2_SM_L_SFT 2
+#define RT5640_M_OM_L_SM_L (0x1 << 1)
+#define RT5640_M_OM_L_SM_L_SFT 1
+
+/* SPK Right Mixer Control (0x47) */
+#define RT5640_G_RM_R_SM_R_MASK (0x3 << 14)
+#define RT5640_G_RM_R_SM_R_SFT 14
+#define RT5640_G_IN_R_SM_R_MASK (0x3 << 12)
+#define RT5640_G_IN_R_SM_R_SFT 12
+#define RT5640_G_DAC_R1_SM_R_MASK (0x3 << 10)
+#define RT5640_G_DAC_R1_SM_R_SFT 10
+#define RT5640_G_DAC_R2_SM_R_MASK (0x3 << 8)
+#define RT5640_G_DAC_R2_SM_R_SFT 8
+#define RT5640_G_OM_R_SM_R_MASK (0x3 << 6)
+#define RT5640_G_OM_R_SM_R_SFT 6
+#define RT5640_M_RM_R_SM_R (0x1 << 5)
+#define RT5640_M_RM_R_SM_R_SFT 5
+#define RT5640_M_IN_R_SM_R (0x1 << 4)
+#define RT5640_M_IN_R_SM_R_SFT 4
+#define RT5640_M_DAC_R1_SM_R (0x1 << 3)
+#define RT5640_M_DAC_R1_SM_R_SFT 3
+#define RT5640_M_DAC_R2_SM_R (0x1 << 2)
+#define RT5640_M_DAC_R2_SM_R_SFT 2
+#define RT5640_M_OM_R_SM_R (0x1 << 1)
+#define RT5640_M_OM_R_SM_R_SFT 1
+
+/* SPOLMIX Control (0x48) */
+#define RT5640_M_DAC_R1_SPM_L (0x1 << 15)
+#define RT5640_M_DAC_R1_SPM_L_SFT 15
+#define RT5640_M_DAC_L1_SPM_L (0x1 << 14)
+#define RT5640_M_DAC_L1_SPM_L_SFT 14
+#define RT5640_M_SV_R_SPM_L (0x1 << 13)
+#define RT5640_M_SV_R_SPM_L_SFT 13
+#define RT5640_M_SV_L_SPM_L (0x1 << 12)
+#define RT5640_M_SV_L_SPM_L_SFT 12
+#define RT5640_M_BST1_SPM_L (0x1 << 11)
+#define RT5640_M_BST1_SPM_L_SFT 11
+
+/* SPORMIX Control (0x49) */
+#define RT5640_M_DAC_R1_SPM_R (0x1 << 13)
+#define RT5640_M_DAC_R1_SPM_R_SFT 13
+#define RT5640_M_SV_R_SPM_R (0x1 << 12)
+#define RT5640_M_SV_R_SPM_R_SFT 12
+#define RT5640_M_BST1_SPM_R (0x1 << 11)
+#define RT5640_M_BST1_SPM_R_SFT 11
+
+/* SPOLMIX / SPORMIX Ratio Control (0x4a) */
+#define RT5640_SPO_CLSD_RATIO_MASK (0x7)
+#define RT5640_SPO_CLSD_RATIO_SFT 0
+
+/* Mono Output Mixer Control (0x4c) */
+#define RT5640_M_DAC_R2_MM (0x1 << 15)
+#define RT5640_M_DAC_R2_MM_SFT 15
+#define RT5640_M_DAC_L2_MM (0x1 << 14)
+#define RT5640_M_DAC_L2_MM_SFT 14
+#define RT5640_M_OV_R_MM (0x1 << 13)
+#define RT5640_M_OV_R_MM_SFT 13
+#define RT5640_M_OV_L_MM (0x1 << 12)
+#define RT5640_M_OV_L_MM_SFT 12
+#define RT5640_M_BST1_MM (0x1 << 11)
+#define RT5640_M_BST1_MM_SFT 11
+#define RT5640_G_MONOMIX_MASK (0x1 << 10)
+#define RT5640_G_MONOMIX_SFT 10
+
+/* Output Left Mixer Control 1 (0x4d) */
+#define RT5640_G_BST3_OM_L_MASK (0x7 << 13)
+#define RT5640_G_BST3_OM_L_SFT 13
+#define RT5640_G_BST2_OM_L_MASK (0x7 << 10)
+#define RT5640_G_BST2_OM_L_SFT 10
+#define RT5640_G_BST1_OM_L_MASK (0x7 << 7)
+#define RT5640_G_BST1_OM_L_SFT 7
+#define RT5640_G_IN_L_OM_L_MASK (0x7 << 4)
+#define RT5640_G_IN_L_OM_L_SFT 4
+#define RT5640_G_RM_L_OM_L_MASK (0x7 << 1)
+#define RT5640_G_RM_L_OM_L_SFT 1
+
+/* Output Left Mixer Control 2 (0x4e) */
+#define RT5640_G_DAC_R2_OM_L_MASK (0x7 << 13)
+#define RT5640_G_DAC_R2_OM_L_SFT 13
+#define RT5640_G_DAC_L2_OM_L_MASK (0x7 << 10)
+#define RT5640_G_DAC_L2_OM_L_SFT 10
+#define RT5640_G_DAC_L1_OM_L_MASK (0x7 << 7)
+#define RT5640_G_DAC_L1_OM_L_SFT 7
+
+/* Output Left Mixer Control 3 (0x4f) */
+#define RT5640_M_SM_L_OM_L (0x1 << 8)
+#define RT5640_M_SM_L_OM_L_SFT 8
+#define RT5640_M_BST3_OM_L (0x1 << 7)
+#define RT5640_M_BST3_OM_L_SFT 7
+#define RT5640_M_BST2_OM_L (0x1 << 6)
+#define RT5640_M_BST2_OM_L_SFT 6
+#define RT5640_M_BST1_OM_L (0x1 << 5)
+#define RT5640_M_BST1_OM_L_SFT 5
+#define RT5640_M_IN_L_OM_L (0x1 << 4)
+#define RT5640_M_IN_L_OM_L_SFT 4
+#define RT5640_M_RM_L_OM_L (0x1 << 3)
+#define RT5640_M_RM_L_OM_L_SFT 3
+#define RT5640_M_DAC_R2_OM_L (0x1 << 2)
+#define RT5640_M_DAC_R2_OM_L_SFT 2
+#define RT5640_M_DAC_L2_OM_L (0x1 << 1)
+#define RT5640_M_DAC_L2_OM_L_SFT 1
+#define RT5640_M_DAC_L1_OM_L (0x1)
+#define RT5640_M_DAC_L1_OM_L_SFT 0
+
+/* Output Right Mixer Control 1 (0x50) */
+#define RT5640_G_BST4_OM_R_MASK (0x7 << 13)
+#define RT5640_G_BST4_OM_R_SFT 13
+#define RT5640_G_BST2_OM_R_MASK (0x7 << 10)
+#define RT5640_G_BST2_OM_R_SFT 10
+#define RT5640_G_BST1_OM_R_MASK (0x7 << 7)
+#define RT5640_G_BST1_OM_R_SFT 7
+#define RT5640_G_IN_R_OM_R_MASK (0x7 << 4)
+#define RT5640_G_IN_R_OM_R_SFT 4
+#define RT5640_G_RM_R_OM_R_MASK (0x7 << 1)
+#define RT5640_G_RM_R_OM_R_SFT 1
+
+/* Output Right Mixer Control 2 (0x51) */
+#define RT5640_G_DAC_L2_OM_R_MASK (0x7 << 13)
+#define RT5640_G_DAC_L2_OM_R_SFT 13
+#define RT5640_G_DAC_R2_OM_R_MASK (0x7 << 10)
+#define RT5640_G_DAC_R2_OM_R_SFT 10
+#define RT5640_G_DAC_R1_OM_R_MASK (0x7 << 7)
+#define RT5640_G_DAC_R1_OM_R_SFT 7
+
+/* Output Right Mixer Control 3 (0x52) */
+#define RT5640_M_SM_L_OM_R (0x1 << 8)
+#define RT5640_M_SM_L_OM_R_SFT 8
+#define RT5640_M_BST4_OM_R (0x1 << 7)
+#define RT5640_M_BST4_OM_R_SFT 7
+#define RT5640_M_BST2_OM_R (0x1 << 6)
+#define RT5640_M_BST2_OM_R_SFT 6
+#define RT5640_M_BST1_OM_R (0x1 << 5)
+#define RT5640_M_BST1_OM_R_SFT 5
+#define RT5640_M_IN_R_OM_R (0x1 << 4)
+#define RT5640_M_IN_R_OM_R_SFT 4
+#define RT5640_M_RM_R_OM_R (0x1 << 3)
+#define RT5640_M_RM_R_OM_R_SFT 3
+#define RT5640_M_DAC_L2_OM_R (0x1 << 2)
+#define RT5640_M_DAC_L2_OM_R_SFT 2
+#define RT5640_M_DAC_R2_OM_R (0x1 << 1)
+#define RT5640_M_DAC_R2_OM_R_SFT 1
+#define RT5640_M_DAC_R1_OM_R (0x1)
+#define RT5640_M_DAC_R1_OM_R_SFT 0
+
+/* LOUT Mixer Control (0x53) */
+#define RT5640_M_DAC_L1_LM (0x1 << 15)
+#define RT5640_M_DAC_L1_LM_SFT 15
+#define RT5640_M_DAC_R1_LM (0x1 << 14)
+#define RT5640_M_DAC_R1_LM_SFT 14
+#define RT5640_M_OV_L_LM (0x1 << 13)
+#define RT5640_M_OV_L_LM_SFT 13
+#define RT5640_M_OV_R_LM (0x1 << 12)
+#define RT5640_M_OV_R_LM_SFT 12
+#define RT5640_G_LOUTMIX_MASK (0x1 << 11)
+#define RT5640_G_LOUTMIX_SFT 11
+
+/* Power Management for Digital 1 (0x61) */
+#define RT5640_PWR_I2S1 (0x1 << 15)
+#define RT5640_PWR_I2S1_BIT 15
+#define RT5640_PWR_I2S2 (0x1 << 14)
+#define RT5640_PWR_I2S2_BIT 14
+#define RT5640_PWR_I2S3 (0x1 << 13)
+#define RT5640_PWR_I2S3_BIT 13
+#define RT5640_PWR_DAC_L1 (0x1 << 12)
+#define RT5640_PWR_DAC_L1_BIT 12
+#define RT5640_PWR_DAC_R1 (0x1 << 11)
+#define RT5640_PWR_DAC_R1_BIT 11
+#define RT5640_PWR_DAC_L2 (0x1 << 7)
+#define RT5640_PWR_DAC_L2_BIT 7
+#define RT5640_PWR_DAC_R2 (0x1 << 6)
+#define RT5640_PWR_DAC_R2_BIT 6
+#define RT5640_PWR_ADC_L (0x1 << 2)
+#define RT5640_PWR_ADC_L_BIT 2
+#define RT5640_PWR_ADC_R (0x1 << 1)
+#define RT5640_PWR_ADC_R_BIT 1
+#define RT5640_PWR_CLS_D (0x1)
+#define RT5640_PWR_CLS_D_BIT 0
+
+/* Power Management for Digital 2 (0x62) */
+#define RT5640_PWR_ADC_SF (0x1 << 15)
+#define RT5640_PWR_ADC_SF_BIT 15
+#define RT5640_PWR_ADC_MF_L (0x1 << 14)
+#define RT5640_PWR_ADC_MF_L_BIT 14
+#define RT5640_PWR_ADC_MF_R (0x1 << 13)
+#define RT5640_PWR_ADC_MF_R_BIT 13
+#define RT5640_PWR_I2S_DSP (0x1 << 12)
+#define RT5640_PWR_I2S_DSP_BIT 12
+
+/* Power Management for Analog 1 (0x63) */
+#define RT5640_PWR_VREF1 (0x1 << 15)
+#define RT5640_PWR_VREF1_BIT 15
+#define RT5640_PWR_FV1 (0x1 << 14)
+#define RT5640_PWR_FV1_BIT 14
+#define RT5640_PWR_MB (0x1 << 13)
+#define RT5640_PWR_MB_BIT 13
+#define RT5640_PWR_LM (0x1 << 12)
+#define RT5640_PWR_LM_BIT 12
+#define RT5640_PWR_BG (0x1 << 11)
+#define RT5640_PWR_BG_BIT 11
+#define RT5640_PWR_MM (0x1 << 10)
+#define RT5640_PWR_MM_BIT 10
+#define RT5640_PWR_MA (0x1 << 8)
+#define RT5640_PWR_MA_BIT 8
+#define RT5640_PWR_HP_L (0x1 << 7)
+#define RT5640_PWR_HP_L_BIT 7
+#define RT5640_PWR_HP_R (0x1 << 6)
+#define RT5640_PWR_HP_R_BIT 6
+#define RT5640_PWR_HA (0x1 << 5)
+#define RT5640_PWR_HA_BIT 5
+#define RT5640_PWR_VREF2 (0x1 << 4)
+#define RT5640_PWR_VREF2_BIT 4
+#define RT5640_PWR_FV2 (0x1 << 3)
+#define RT5640_PWR_FV2_BIT 3
+#define RT5640_PWR_LDO2 (0x1 << 2)
+#define RT5640_PWR_LDO2_BIT 2
+
+/* Power Management for Analog 2 (0x64) */
+#define RT5640_PWR_BST1 (0x1 << 15)
+#define RT5640_PWR_BST1_BIT 15
+#define RT5640_PWR_BST2 (0x1 << 14)
+#define RT5640_PWR_BST2_BIT 14
+#define RT5640_PWR_BST3 (0x1 << 13)
+#define RT5640_PWR_BST3_BIT 13
+#define RT5640_PWR_BST4 (0x1 << 12)
+#define RT5640_PWR_BST4_BIT 12
+#define RT5640_PWR_MB1 (0x1 << 11)
+#define RT5640_PWR_MB1_BIT 11
+#define RT5640_PWR_MB2 (0x1 << 10)
+#define RT5640_PWR_MB2_BIT 10
+#define RT5640_PWR_PLL (0x1 << 9)
+#define RT5640_PWR_PLL_BIT 9
+
+/* Power Management for Mixer (0x65) */
+#define RT5640_PWR_OM_L (0x1 << 15)
+#define RT5640_PWR_OM_L_BIT 15
+#define RT5640_PWR_OM_R (0x1 << 14)
+#define RT5640_PWR_OM_R_BIT 14
+#define RT5640_PWR_SM_L (0x1 << 13)
+#define RT5640_PWR_SM_L_BIT 13
+#define RT5640_PWR_SM_R (0x1 << 12)
+#define RT5640_PWR_SM_R_BIT 12
+#define RT5640_PWR_RM_L (0x1 << 11)
+#define RT5640_PWR_RM_L_BIT 11
+#define RT5640_PWR_RM_R (0x1 << 10)
+#define RT5640_PWR_RM_R_BIT 10
+
+/* Power Management for Volume (0x66) */
+#define RT5640_PWR_SV_L (0x1 << 15)
+#define RT5640_PWR_SV_L_BIT 15
+#define RT5640_PWR_SV_R (0x1 << 14)
+#define RT5640_PWR_SV_R_BIT 14
+#define RT5640_PWR_OV_L (0x1 << 13)
+#define RT5640_PWR_OV_L_BIT 13
+#define RT5640_PWR_OV_R (0x1 << 12)
+#define RT5640_PWR_OV_R_BIT 12
+#define RT5640_PWR_HV_L (0x1 << 11)
+#define RT5640_PWR_HV_L_BIT 11
+#define RT5640_PWR_HV_R (0x1 << 10)
+#define RT5640_PWR_HV_R_BIT 10
+#define RT5640_PWR_IN_L (0x1 << 9)
+#define RT5640_PWR_IN_L_BIT 9
+#define RT5640_PWR_IN_R (0x1 << 8)
+#define RT5640_PWR_IN_R_BIT 8
+
+/* I2S1/2/3 Audio Serial Data Port Control (0x70 0x71 0x72) */
+#define RT5640_I2S_MS_MASK (0x1 << 15)
+#define RT5640_I2S_MS_SFT 15
+#define RT5640_I2S_MS_M (0x0 << 15)
+#define RT5640_I2S_MS_S (0x1 << 15)
+#define RT5640_I2S_IF_MASK (0x7 << 12)
+#define RT5640_I2S_IF_SFT 12
+#define RT5640_I2S_O_CP_MASK (0x3 << 10)
+#define RT5640_I2S_O_CP_SFT 10
+#define RT5640_I2S_O_CP_OFF (0x0 << 10)
+#define RT5640_I2S_O_CP_U_LAW (0x1 << 10)
+#define RT5640_I2S_O_CP_A_LAW (0x2 << 10)
+#define RT5640_I2S_I_CP_MASK (0x3 << 8)
+#define RT5640_I2S_I_CP_SFT 8
+#define RT5640_I2S_I_CP_OFF (0x0 << 8)
+#define RT5640_I2S_I_CP_U_LAW (0x1 << 8)
+#define RT5640_I2S_I_CP_A_LAW (0x2 << 8)
+#define RT5640_I2S_BP_MASK (0x1 << 7)
+#define RT5640_I2S_BP_SFT 7
+#define RT5640_I2S_BP_NOR (0x0 << 7)
+#define RT5640_I2S_BP_INV (0x1 << 7)
+#define RT5640_I2S_DL_MASK (0x3 << 2)
+#define RT5640_I2S_DL_SFT 2
+#define RT5640_I2S_DL_16 (0x0 << 2)
+#define RT5640_I2S_DL_20 (0x1 << 2)
+#define RT5640_I2S_DL_24 (0x2 << 2)
+#define RT5640_I2S_DL_8 (0x3 << 2)
+#define RT5640_I2S_DF_MASK (0x3)
+#define RT5640_I2S_DF_SFT 0
+#define RT5640_I2S_DF_I2S (0x0)
+#define RT5640_I2S_DF_LEFT (0x1)
+#define RT5640_I2S_DF_PCM_A (0x2)
+#define RT5640_I2S_DF_PCM_B (0x3)
+
+/* I2S2 Audio Serial Data Port Control (0x71) */
+#define RT5640_I2S2_SDI_MASK (0x1 << 6)
+#define RT5640_I2S2_SDI_SFT 6
+#define RT5640_I2S2_SDI_I2S1 (0x0 << 6)
+#define RT5640_I2S2_SDI_I2S2 (0x1 << 6)
+
+/* ADC/DAC Clock Control 1 (0x73) */
+#define RT5640_I2S_BCLK_MS1_MASK (0x1 << 15)
+#define RT5640_I2S_BCLK_MS1_SFT 15
+#define RT5640_I2S_BCLK_MS1_32 (0x0 << 15)
+#define RT5640_I2S_BCLK_MS1_64 (0x1 << 15)
+#define RT5640_I2S_PD1_MASK (0x7 << 12)
+#define RT5640_I2S_PD1_SFT 12
+#define RT5640_I2S_PD1_1 (0x0 << 12)
+#define RT5640_I2S_PD1_2 (0x1 << 12)
+#define RT5640_I2S_PD1_3 (0x2 << 12)
+#define RT5640_I2S_PD1_4 (0x3 << 12)
+#define RT5640_I2S_PD1_6 (0x4 << 12)
+#define RT5640_I2S_PD1_8 (0x5 << 12)
+#define RT5640_I2S_PD1_12 (0x6 << 12)
+#define RT5640_I2S_PD1_16 (0x7 << 12)
+#define RT5640_I2S_BCLK_MS2_MASK (0x1 << 11)
+#define RT5640_I2S_BCLK_MS2_SFT 11
+#define RT5640_I2S_BCLK_MS2_32 (0x0 << 11)
+#define RT5640_I2S_BCLK_MS2_64 (0x1 << 11)
+#define RT5640_I2S_PD2_MASK (0x7 << 8)
+#define RT5640_I2S_PD2_SFT 8
+#define RT5640_I2S_PD2_1 (0x0 << 8)
+#define RT5640_I2S_PD2_2 (0x1 << 8)
+#define RT5640_I2S_PD2_3 (0x2 << 8)
+#define RT5640_I2S_PD2_4 (0x3 << 8)
+#define RT5640_I2S_PD2_6 (0x4 << 8)
+#define RT5640_I2S_PD2_8 (0x5 << 8)
+#define RT5640_I2S_PD2_12 (0x6 << 8)
+#define RT5640_I2S_PD2_16 (0x7 << 8)
+#define RT5640_I2S_BCLK_MS3_MASK (0x1 << 7)
+#define RT5640_I2S_BCLK_MS3_SFT 7
+#define RT5640_I2S_BCLK_MS3_32 (0x0 << 7)
+#define RT5640_I2S_BCLK_MS3_64 (0x1 << 7)
+#define RT5640_I2S_PD3_MASK (0x7 << 4)
+#define RT5640_I2S_PD3_SFT 4
+#define RT5640_I2S_PD3_1 (0x0 << 4)
+#define RT5640_I2S_PD3_2 (0x1 << 4)
+#define RT5640_I2S_PD3_3 (0x2 << 4)
+#define RT5640_I2S_PD3_4 (0x3 << 4)
+#define RT5640_I2S_PD3_6 (0x4 << 4)
+#define RT5640_I2S_PD3_8 (0x5 << 4)
+#define RT5640_I2S_PD3_12 (0x6 << 4)
+#define RT5640_I2S_PD3_16 (0x7 << 4)
+#define RT5640_DAC_OSR_MASK (0x3 << 2)
+#define RT5640_DAC_OSR_SFT 2
+#define RT5640_DAC_OSR_128 (0x0 << 2)
+#define RT5640_DAC_OSR_64 (0x1 << 2)
+#define RT5640_DAC_OSR_32 (0x2 << 2)
+#define RT5640_DAC_OSR_16 (0x3 << 2)
+#define RT5640_ADC_OSR_MASK (0x3)
+#define RT5640_ADC_OSR_SFT 0
+#define RT5640_ADC_OSR_128 (0x0)
+#define RT5640_ADC_OSR_64 (0x1)
+#define RT5640_ADC_OSR_32 (0x2)
+#define RT5640_ADC_OSR_16 (0x3)
+
+/* ADC/DAC Clock Control 2 (0x74) */
+#define RT5640_DAC_L_OSR_MASK (0x3 << 14)
+#define RT5640_DAC_L_OSR_SFT 14
+#define RT5640_DAC_L_OSR_128 (0x0 << 14)
+#define RT5640_DAC_L_OSR_64 (0x1 << 14)
+#define RT5640_DAC_L_OSR_32 (0x2 << 14)
+#define RT5640_DAC_L_OSR_16 (0x3 << 14)
+#define RT5640_ADC_R_OSR_MASK (0x3 << 12)
+#define RT5640_ADC_R_OSR_SFT 12
+#define RT5640_ADC_R_OSR_128 (0x0 << 12)
+#define RT5640_ADC_R_OSR_64 (0x1 << 12)
+#define RT5640_ADC_R_OSR_32 (0x2 << 12)
+#define RT5640_ADC_R_OSR_16 (0x3 << 12)
+#define RT5640_DAHPF_EN (0x1 << 11)
+#define RT5640_DAHPF_EN_SFT 11
+#define RT5640_ADHPF_EN (0x1 << 10)
+#define RT5640_ADHPF_EN_SFT 10
+
+/* Digital Microphone Control (0x75) */
+#define RT5640_DMIC_1_EN_MASK (0x1 << 15)
+#define RT5640_DMIC_1_EN_SFT 15
+#define RT5640_DMIC_1_DIS (0x0 << 15)
+#define RT5640_DMIC_1_EN (0x1 << 15)
+#define RT5640_DMIC_2_EN_MASK (0x1 << 14)
+#define RT5640_DMIC_2_EN_SFT 14
+#define RT5640_DMIC_2_DIS (0x0 << 14)
+#define RT5640_DMIC_2_EN (0x1 << 14)
+#define RT5640_DMIC_1L_LH_MASK (0x1 << 13)
+#define RT5640_DMIC_1L_LH_SFT 13
+#define RT5640_DMIC_1L_LH_FALLING (0x0 << 13)
+#define RT5640_DMIC_1L_LH_RISING (0x1 << 13)
+#define RT5640_DMIC_1R_LH_MASK (0x1 << 12)
+#define RT5640_DMIC_1R_LH_SFT 12
+#define RT5640_DMIC_1R_LH_FALLING (0x0 << 12)
+#define RT5640_DMIC_1R_LH_RISING (0x1 << 12)
+#define RT5640_DMIC_1_DP_MASK (0x1 << 11)
+#define RT5640_DMIC_1_DP_SFT 11
+#define RT5640_DMIC_1_DP_GPIO3 (0x0 << 11)
+#define RT5640_DMIC_1_DP_IN1P (0x1 << 11)
+#define RT5640_DMIC_2_DP_MASK (0x1 << 10)
+#define RT5640_DMIC_2_DP_SFT 10
+#define RT5640_DMIC_2_DP_GPIO4 (0x0 << 10)
+#define RT5640_DMIC_2_DP_IN1N (0x1 << 10)
+#define RT5640_DMIC_2L_LH_MASK (0x1 << 9)
+#define RT5640_DMIC_2L_LH_SFT 9
+#define RT5640_DMIC_2L_LH_FALLING (0x0 << 9)
+#define RT5640_DMIC_2L_LH_RISING (0x1 << 9)
+#define RT5640_DMIC_2R_LH_MASK (0x1 << 8)
+#define RT5640_DMIC_2R_LH_SFT 8
+#define RT5640_DMIC_2R_LH_FALLING (0x0 << 8)
+#define RT5640_DMIC_2R_LH_RISING (0x1 << 8)
+#define RT5640_DMIC_CLK_MASK (0x7 << 5)
+#define RT5640_DMIC_CLK_SFT 5
+
+/* Global Clock Control (0x80) */
+#define RT5640_SCLK_SRC_MASK (0x3 << 14)
+#define RT5640_SCLK_SRC_SFT 14
+#define RT5640_SCLK_SRC_MCLK (0x0 << 14)
+#define RT5640_SCLK_SRC_PLL1 (0x1 << 14)
+#define RT5640_SCLK_SRC_PLL1T (0x2 << 14)
+#define RT5640_SCLK_SRC_RCCLK (0x3 << 14) /* 15MHz */
+#define RT5640_PLL1_SRC_MASK (0x3 << 12)
+#define RT5640_PLL1_SRC_SFT 12
+#define RT5640_PLL1_SRC_MCLK (0x0 << 12)
+#define RT5640_PLL1_SRC_BCLK1 (0x1 << 12)
+#define RT5640_PLL1_SRC_BCLK2 (0x2 << 12)
+#define RT5640_PLL1_SRC_BCLK3 (0x3 << 12)
+#define RT5640_PLL1_PD_MASK (0x1 << 3)
+#define RT5640_PLL1_PD_SFT 3
+#define RT5640_PLL1_PD_1 (0x0 << 3)
+#define RT5640_PLL1_PD_2 (0x1 << 3)
+
+#define RT5640_PLL_INP_MAX 40000000
+#define RT5640_PLL_INP_MIN 256000
+/* PLL M/N/K Code Control 1 (0x81) */
+#define RT5640_PLL_N_MAX 0x1ff
+#define RT5640_PLL_N_MASK (RT5640_PLL_N_MAX << 7)
+#define RT5640_PLL_N_SFT 7
+#define RT5640_PLL_K_MAX 0x1f
+#define RT5640_PLL_K_MASK (RT5640_PLL_K_MAX)
+#define RT5640_PLL_K_SFT 0
+
+/* PLL M/N/K Code Control 2 (0x82) */
+#define RT5640_PLL_M_MAX 0xf
+#define RT5640_PLL_M_MASK (RT5640_PLL_M_MAX << 12)
+#define RT5640_PLL_M_SFT 12
+#define RT5640_PLL_M_BP (0x1 << 11)
+#define RT5640_PLL_M_BP_SFT 11
+
+/* ASRC Control 1 (0x83) */
+#define RT5640_STO_T_MASK (0x1 << 15)
+#define RT5640_STO_T_SFT 15
+#define RT5640_STO_T_SCLK (0x0 << 15)
+#define RT5640_STO_T_LRCK1 (0x1 << 15)
+#define RT5640_M1_T_MASK (0x1 << 14)
+#define RT5640_M1_T_SFT 14
+#define RT5640_M1_T_I2S2 (0x0 << 14)
+#define RT5640_M1_T_I2S2_D3 (0x1 << 14)
+#define RT5640_I2S2_F_MASK (0x1 << 12)
+#define RT5640_I2S2_F_SFT 12
+#define RT5640_I2S2_F_I2S2_D2 (0x0 << 12)
+#define RT5640_I2S2_F_I2S1_TCLK (0x1 << 12)
+#define RT5640_DMIC_1_M_MASK (0x1 << 9)
+#define RT5640_DMIC_1_M_SFT 9
+#define RT5640_DMIC_1_M_NOR (0x0 << 9)
+#define RT5640_DMIC_1_M_ASYN (0x1 << 9)
+#define RT5640_DMIC_2_M_MASK (0x1 << 8)
+#define RT5640_DMIC_2_M_SFT 8
+#define RT5640_DMIC_2_M_NOR (0x0 << 8)
+#define RT5640_DMIC_2_M_ASYN (0x1 << 8)
+
+/* ASRC Control 2 (0x84) */
+#define RT5640_MDA_L_M_MASK (0x1 << 15)
+#define RT5640_MDA_L_M_SFT 15
+#define RT5640_MDA_L_M_NOR (0x0 << 15)
+#define RT5640_MDA_L_M_ASYN (0x1 << 15)
+#define RT5640_MDA_R_M_MASK (0x1 << 14)
+#define RT5640_MDA_R_M_SFT 14
+#define RT5640_MDA_R_M_NOR (0x0 << 14)
+#define RT5640_MDA_R_M_ASYN (0x1 << 14)
+#define RT5640_MAD_L_M_MASK (0x1 << 13)
+#define RT5640_MAD_L_M_SFT 13
+#define RT5640_MAD_L_M_NOR (0x0 << 13)
+#define RT5640_MAD_L_M_ASYN (0x1 << 13)
+#define RT5640_MAD_R_M_MASK (0x1 << 12)
+#define RT5640_MAD_R_M_SFT 12
+#define RT5640_MAD_R_M_NOR (0x0 << 12)
+#define RT5640_MAD_R_M_ASYN (0x1 << 12)
+#define RT5640_ADC_M_MASK (0x1 << 11)
+#define RT5640_ADC_M_SFT 11
+#define RT5640_ADC_M_NOR (0x0 << 11)
+#define RT5640_ADC_M_ASYN (0x1 << 11)
+#define RT5640_STO_DAC_M_MASK (0x1 << 5)
+#define RT5640_STO_DAC_M_SFT 5
+#define RT5640_STO_DAC_M_NOR (0x0 << 5)
+#define RT5640_STO_DAC_M_ASYN (0x1 << 5)
+#define RT5640_I2S1_R_D_MASK (0x1 << 4)
+#define RT5640_I2S1_R_D_SFT 4
+#define RT5640_I2S1_R_D_DIS (0x0 << 4)
+#define RT5640_I2S1_R_D_EN (0x1 << 4)
+#define RT5640_I2S2_R_D_MASK (0x1 << 3)
+#define RT5640_I2S2_R_D_SFT 3
+#define RT5640_I2S2_R_D_DIS (0x0 << 3)
+#define RT5640_I2S2_R_D_EN (0x1 << 3)
+#define RT5640_PRE_SCLK_MASK (0x3)
+#define RT5640_PRE_SCLK_SFT 0
+#define RT5640_PRE_SCLK_512 (0x0)
+#define RT5640_PRE_SCLK_1024 (0x1)
+#define RT5640_PRE_SCLK_2048 (0x2)
+
+/* ASRC Control 3 (0x85) */
+#define RT5640_I2S1_RATE_MASK (0xf << 12)
+#define RT5640_I2S1_RATE_SFT 12
+#define RT5640_I2S2_RATE_MASK (0xf << 8)
+#define RT5640_I2S2_RATE_SFT 8
+
+/* ASRC Control 4 (0x89) */
+#define RT5640_I2S1_PD_MASK (0x7 << 12)
+#define RT5640_I2S1_PD_SFT 12
+#define RT5640_I2S2_PD_MASK (0x7 << 8)
+#define RT5640_I2S2_PD_SFT 8
+
+/* HPOUT Over Current Detection (0x8b) */
+#define RT5640_HP_OVCD_MASK (0x1 << 10)
+#define RT5640_HP_OVCD_SFT 10
+#define RT5640_HP_OVCD_DIS (0x0 << 10)
+#define RT5640_HP_OVCD_EN (0x1 << 10)
+#define RT5640_HP_OC_TH_MASK (0x3 << 8)
+#define RT5640_HP_OC_TH_SFT 8
+#define RT5640_HP_OC_TH_90 (0x0 << 8)
+#define RT5640_HP_OC_TH_105 (0x1 << 8)
+#define RT5640_HP_OC_TH_120 (0x2 << 8)
+#define RT5640_HP_OC_TH_135 (0x3 << 8)
+
+/* Class D Over Current Control (0x8c) */
+#define RT5640_CLSD_OC_MASK (0x1 << 9)
+#define RT5640_CLSD_OC_SFT 9
+#define RT5640_CLSD_OC_PU (0x0 << 9)
+#define RT5640_CLSD_OC_PD (0x1 << 9)
+#define RT5640_AUTO_PD_MASK (0x1 << 8)
+#define RT5640_AUTO_PD_SFT 8
+#define RT5640_AUTO_PD_DIS (0x0 << 8)
+#define RT5640_AUTO_PD_EN (0x1 << 8)
+#define RT5640_CLSD_OC_TH_MASK (0x3f)
+#define RT5640_CLSD_OC_TH_SFT 0
+
+/* Class D Output Control (0x8d) */
+#define RT5640_CLSD_RATIO_MASK (0xf << 12)
+#define RT5640_CLSD_RATIO_SFT 12
+#define RT5640_CLSD_OM_MASK (0x1 << 11)
+#define RT5640_CLSD_OM_SFT 11
+#define RT5640_CLSD_OM_MONO (0x0 << 11)
+#define RT5640_CLSD_OM_STO (0x1 << 11)
+#define RT5640_CLSD_SCH_MASK (0x1 << 10)
+#define RT5640_CLSD_SCH_SFT 10
+#define RT5640_CLSD_SCH_L (0x0 << 10)
+#define RT5640_CLSD_SCH_S (0x1 << 10)
+
+/* Depop Mode Control 1 (0x8e) */
+#define RT5640_SMT_TRIG_MASK (0x1 << 15)
+#define RT5640_SMT_TRIG_SFT 15
+#define RT5640_SMT_TRIG_DIS (0x0 << 15)
+#define RT5640_SMT_TRIG_EN (0x1 << 15)
+#define RT5640_HP_L_SMT_MASK (0x1 << 9)
+#define RT5640_HP_L_SMT_SFT 9
+#define RT5640_HP_L_SMT_DIS (0x0 << 9)
+#define RT5640_HP_L_SMT_EN (0x1 << 9)
+#define RT5640_HP_R_SMT_MASK (0x1 << 8)
+#define RT5640_HP_R_SMT_SFT 8
+#define RT5640_HP_R_SMT_DIS (0x0 << 8)
+#define RT5640_HP_R_SMT_EN (0x1 << 8)
+#define RT5640_HP_CD_PD_MASK (0x1 << 7)
+#define RT5640_HP_CD_PD_SFT 7
+#define RT5640_HP_CD_PD_DIS (0x0 << 7)
+#define RT5640_HP_CD_PD_EN (0x1 << 7)
+#define RT5640_RSTN_MASK (0x1 << 6)
+#define RT5640_RSTN_SFT 6
+#define RT5640_RSTN_DIS (0x0 << 6)
+#define RT5640_RSTN_EN (0x1 << 6)
+#define RT5640_RSTP_MASK (0x1 << 5)
+#define RT5640_RSTP_SFT 5
+#define RT5640_RSTP_DIS (0x0 << 5)
+#define RT5640_RSTP_EN (0x1 << 5)
+#define RT5640_HP_CO_MASK (0x1 << 4)
+#define RT5640_HP_CO_SFT 4
+#define RT5640_HP_CO_DIS (0x0 << 4)
+#define RT5640_HP_CO_EN (0x1 << 4)
+#define RT5640_HP_CP_MASK (0x1 << 3)
+#define RT5640_HP_CP_SFT 3
+#define RT5640_HP_CP_PD (0x0 << 3)
+#define RT5640_HP_CP_PU (0x1 << 3)
+#define RT5640_HP_SG_MASK (0x1 << 2)
+#define RT5640_HP_SG_SFT 2
+#define RT5640_HP_SG_DIS (0x0 << 2)
+#define RT5640_HP_SG_EN (0x1 << 2)
+#define RT5640_HP_DP_MASK (0x1 << 1)
+#define RT5640_HP_DP_SFT 1
+#define RT5640_HP_DP_PD (0x0 << 1)
+#define RT5640_HP_DP_PU (0x1 << 1)
+#define RT5640_HP_CB_MASK (0x1)
+#define RT5640_HP_CB_SFT 0
+#define RT5640_HP_CB_PD (0x0)
+#define RT5640_HP_CB_PU (0x1)
+
+/* Depop Mode Control 2 (0x8f) */
+#define RT5640_DEPOP_MASK (0x1 << 13)
+#define RT5640_DEPOP_SFT 13
+#define RT5640_DEPOP_AUTO (0x0 << 13)
+#define RT5640_DEPOP_MAN (0x1 << 13)
+#define RT5640_RAMP_MASK (0x1 << 12)
+#define RT5640_RAMP_SFT 12
+#define RT5640_RAMP_DIS (0x0 << 12)
+#define RT5640_RAMP_EN (0x1 << 12)
+#define RT5640_BPS_MASK (0x1 << 11)
+#define RT5640_BPS_SFT 11
+#define RT5640_BPS_DIS (0x0 << 11)
+#define RT5640_BPS_EN (0x1 << 11)
+#define RT5640_FAST_UPDN_MASK (0x1 << 10)
+#define RT5640_FAST_UPDN_SFT 10
+#define RT5640_FAST_UPDN_DIS (0x0 << 10)
+#define RT5640_FAST_UPDN_EN (0x1 << 10)
+#define RT5640_MRES_MASK (0x3 << 8)
+#define RT5640_MRES_SFT 8
+#define RT5640_MRES_15MO (0x0 << 8)
+#define RT5640_MRES_25MO (0x1 << 8)
+#define RT5640_MRES_35MO (0x2 << 8)
+#define RT5640_MRES_45MO (0x3 << 8)
+#define RT5640_VLO_MASK (0x1 << 7)
+#define RT5640_VLO_SFT 7
+#define RT5640_VLO_3V (0x0 << 7)
+#define RT5640_VLO_32V (0x1 << 7)
+#define RT5640_DIG_DP_MASK (0x1 << 6)
+#define RT5640_DIG_DP_SFT 6
+#define RT5640_DIG_DP_DIS (0x0 << 6)
+#define RT5640_DIG_DP_EN (0x1 << 6)
+#define RT5640_DP_TH_MASK (0x3 << 4)
+#define RT5640_DP_TH_SFT 4
+
+/* Depop Mode Control 3 (0x90) */
+#define RT5640_CP_SYS_MASK (0x7 << 12)
+#define RT5640_CP_SYS_SFT 12
+#define RT5640_CP_FQ1_MASK (0x7 << 8)
+#define RT5640_CP_FQ1_SFT 8
+#define RT5640_CP_FQ2_MASK (0x7 << 4)
+#define RT5640_CP_FQ2_SFT 4
+#define RT5640_CP_FQ3_MASK (0x7)
+#define RT5640_CP_FQ3_SFT 0
+
+/* HPOUT charge pump (0x91) */
+#define RT5640_OSW_L_MASK (0x1 << 11)
+#define RT5640_OSW_L_SFT 11
+#define RT5640_OSW_L_DIS (0x0 << 11)
+#define RT5640_OSW_L_EN (0x1 << 11)
+#define RT5640_OSW_R_MASK (0x1 << 10)
+#define RT5640_OSW_R_SFT 10
+#define RT5640_OSW_R_DIS (0x0 << 10)
+#define RT5640_OSW_R_EN (0x1 << 10)
+#define RT5640_PM_HP_MASK (0x3 << 8)
+#define RT5640_PM_HP_SFT 8
+#define RT5640_PM_HP_LV (0x0 << 8)
+#define RT5640_PM_HP_MV (0x1 << 8)
+#define RT5640_PM_HP_HV (0x2 << 8)
+#define RT5640_IB_HP_MASK (0x3 << 6)
+#define RT5640_IB_HP_SFT 6
+#define RT5640_IB_HP_125IL (0x0 << 6)
+#define RT5640_IB_HP_25IL (0x1 << 6)
+#define RT5640_IB_HP_5IL (0x2 << 6)
+#define RT5640_IB_HP_1IL (0x3 << 6)
+
+/* PV detection and SPK gain control (0x92) */
+#define RT5640_PVDD_DET_MASK (0x1 << 15)
+#define RT5640_PVDD_DET_SFT 15
+#define RT5640_PVDD_DET_DIS (0x0 << 15)
+#define RT5640_PVDD_DET_EN (0x1 << 15)
+#define RT5640_SPK_AG_MASK (0x1 << 14)
+#define RT5640_SPK_AG_SFT 14
+#define RT5640_SPK_AG_DIS (0x0 << 14)
+#define RT5640_SPK_AG_EN (0x1 << 14)
+
+/* Micbias Control (0x93) */
+#define RT5640_MIC1_BS_MASK (0x1 << 15)
+#define RT5640_MIC1_BS_SFT 15
+#define RT5640_MIC1_BS_9AV (0x0 << 15)
+#define RT5640_MIC1_BS_75AV (0x1 << 15)
+#define RT5640_MIC2_BS_MASK (0x1 << 14)
+#define RT5640_MIC2_BS_SFT 14
+#define RT5640_MIC2_BS_9AV (0x0 << 14)
+#define RT5640_MIC2_BS_75AV (0x1 << 14)
+#define RT5640_MIC1_CLK_MASK (0x1 << 13)
+#define RT5640_MIC1_CLK_SFT 13
+#define RT5640_MIC1_CLK_DIS (0x0 << 13)
+#define RT5640_MIC1_CLK_EN (0x1 << 13)
+#define RT5640_MIC2_CLK_MASK (0x1 << 12)
+#define RT5640_MIC2_CLK_SFT 12
+#define RT5640_MIC2_CLK_DIS (0x0 << 12)
+#define RT5640_MIC2_CLK_EN (0x1 << 12)
+#define RT5640_MIC1_OVCD_MASK (0x1 << 11)
+#define RT5640_MIC1_OVCD_SFT 11
+#define RT5640_MIC1_OVCD_DIS (0x0 << 11)
+#define RT5640_MIC1_OVCD_EN (0x1 << 11)
+#define RT5640_MIC1_OVTH_MASK (0x3 << 9)
+#define RT5640_MIC1_OVTH_SFT 9
+#define RT5640_MIC1_OVTH_600UA (0x0 << 9)
+#define RT5640_MIC1_OVTH_1500UA (0x1 << 9)
+#define RT5640_MIC1_OVTH_2000UA (0x2 << 9)
+#define RT5640_MIC2_OVCD_MASK (0x1 << 8)
+#define RT5640_MIC2_OVCD_SFT 8
+#define RT5640_MIC2_OVCD_DIS (0x0 << 8)
+#define RT5640_MIC2_OVCD_EN (0x1 << 8)
+#define RT5640_MIC2_OVTH_MASK (0x3 << 6)
+#define RT5640_MIC2_OVTH_SFT 6
+#define RT5640_MIC2_OVTH_600UA (0x0 << 6)
+#define RT5640_MIC2_OVTH_1500UA (0x1 << 6)
+#define RT5640_MIC2_OVTH_2000UA (0x2 << 6)
+#define RT5640_PWR_MB_MASK (0x1 << 5)
+#define RT5640_PWR_MB_SFT 5
+#define RT5640_PWR_MB_PD (0x0 << 5)
+#define RT5640_PWR_MB_PU (0x1 << 5)
+#define RT5640_PWR_CLK25M_MASK (0x1 << 4)
+#define RT5640_PWR_CLK25M_SFT 4
+#define RT5640_PWR_CLK25M_PD (0x0 << 4)
+#define RT5640_PWR_CLK25M_PU (0x1 << 4)
+
+/* EQ Control 1 (0xb0) */
+#define RT5640_EQ_SRC_MASK (0x1 << 15)
+#define RT5640_EQ_SRC_SFT 15
+#define RT5640_EQ_SRC_DAC (0x0 << 15)
+#define RT5640_EQ_SRC_ADC (0x1 << 15)
+#define RT5640_EQ_UPD (0x1 << 14)
+#define RT5640_EQ_UPD_BIT 14
+#define RT5640_EQ_CD_MASK (0x1 << 13)
+#define RT5640_EQ_CD_SFT 13
+#define RT5640_EQ_CD_DIS (0x0 << 13)
+#define RT5640_EQ_CD_EN (0x1 << 13)
+#define RT5640_EQ_DITH_MASK (0x3 << 8)
+#define RT5640_EQ_DITH_SFT 8
+#define RT5640_EQ_DITH_NOR (0x0 << 8)
+#define RT5640_EQ_DITH_LSB (0x1 << 8)
+#define RT5640_EQ_DITH_LSB_1 (0x2 << 8)
+#define RT5640_EQ_DITH_LSB_2 (0x3 << 8)
+
+/* EQ Control 2 (0xb1) */
+#define RT5640_EQ_HPF1_M_MASK (0x1 << 8)
+#define RT5640_EQ_HPF1_M_SFT 8
+#define RT5640_EQ_HPF1_M_HI (0x0 << 8)
+#define RT5640_EQ_HPF1_M_1ST (0x1 << 8)
+#define RT5640_EQ_LPF1_M_MASK (0x1 << 7)
+#define RT5640_EQ_LPF1_M_SFT 7
+#define RT5640_EQ_LPF1_M_LO (0x0 << 7)
+#define RT5640_EQ_LPF1_M_1ST (0x1 << 7)
+#define RT5640_EQ_HPF2_MASK (0x1 << 6)
+#define RT5640_EQ_HPF2_SFT 6
+#define RT5640_EQ_HPF2_DIS (0x0 << 6)
+#define RT5640_EQ_HPF2_EN (0x1 << 6)
+#define RT5640_EQ_HPF1_MASK (0x1 << 5)
+#define RT5640_EQ_HPF1_SFT 5
+#define RT5640_EQ_HPF1_DIS (0x0 << 5)
+#define RT5640_EQ_HPF1_EN (0x1 << 5)
+#define RT5640_EQ_BPF4_MASK (0x1 << 4)
+#define RT5640_EQ_BPF4_SFT 4
+#define RT5640_EQ_BPF4_DIS (0x0 << 4)
+#define RT5640_EQ_BPF4_EN (0x1 << 4)
+#define RT5640_EQ_BPF3_MASK (0x1 << 3)
+#define RT5640_EQ_BPF3_SFT 3
+#define RT5640_EQ_BPF3_DIS (0x0 << 3)
+#define RT5640_EQ_BPF3_EN (0x1 << 3)
+#define RT5640_EQ_BPF2_MASK (0x1 << 2)
+#define RT5640_EQ_BPF2_SFT 2
+#define RT5640_EQ_BPF2_DIS (0x0 << 2)
+#define RT5640_EQ_BPF2_EN (0x1 << 2)
+#define RT5640_EQ_BPF1_MASK (0x1 << 1)
+#define RT5640_EQ_BPF1_SFT 1
+#define RT5640_EQ_BPF1_DIS (0x0 << 1)
+#define RT5640_EQ_BPF1_EN (0x1 << 1)
+#define RT5640_EQ_LPF_MASK (0x1)
+#define RT5640_EQ_LPF_SFT 0
+#define RT5640_EQ_LPF_DIS (0x0)
+#define RT5640_EQ_LPF_EN (0x1)
+
+/* Memory Test (0xb2) */
+#define RT5640_MT_MASK (0x1 << 15)
+#define RT5640_MT_SFT 15
+#define RT5640_MT_DIS (0x0 << 15)
+#define RT5640_MT_EN (0x1 << 15)
+
+/* DRC/AGC Control 1 (0xb4) */
+#define RT5640_DRC_AGC_P_MASK (0x1 << 15)
+#define RT5640_DRC_AGC_P_SFT 15
+#define RT5640_DRC_AGC_P_DAC (0x0 << 15)
+#define RT5640_DRC_AGC_P_ADC (0x1 << 15)
+#define RT5640_DRC_AGC_MASK (0x1 << 14)
+#define RT5640_DRC_AGC_SFT 14
+#define RT5640_DRC_AGC_DIS (0x0 << 14)
+#define RT5640_DRC_AGC_EN (0x1 << 14)
+#define RT5640_DRC_AGC_UPD (0x1 << 13)
+#define RT5640_DRC_AGC_UPD_BIT 13
+#define RT5640_DRC_AGC_AR_MASK (0x1f << 8)
+#define RT5640_DRC_AGC_AR_SFT 8
+#define RT5640_DRC_AGC_R_MASK (0x7 << 5)
+#define RT5640_DRC_AGC_R_SFT 5
+#define RT5640_DRC_AGC_R_48K (0x1 << 5)
+#define RT5640_DRC_AGC_R_96K (0x2 << 5)
+#define RT5640_DRC_AGC_R_192K (0x3 << 5)
+#define RT5640_DRC_AGC_R_441K (0x5 << 5)
+#define RT5640_DRC_AGC_R_882K (0x6 << 5)
+#define RT5640_DRC_AGC_R_1764K (0x7 << 5)
+#define RT5640_DRC_AGC_RC_MASK (0x1f)
+#define RT5640_DRC_AGC_RC_SFT 0
+
+/* DRC/AGC Control 2 (0xb5) */
+#define RT5640_DRC_AGC_POB_MASK (0x3f << 8)
+#define RT5640_DRC_AGC_POB_SFT 8
+#define RT5640_DRC_AGC_CP_MASK (0x1 << 7)
+#define RT5640_DRC_AGC_CP_SFT 7
+#define RT5640_DRC_AGC_CP_DIS (0x0 << 7)
+#define RT5640_DRC_AGC_CP_EN (0x1 << 7)
+#define RT5640_DRC_AGC_CPR_MASK (0x3 << 5)
+#define RT5640_DRC_AGC_CPR_SFT 5
+#define RT5640_DRC_AGC_CPR_1_1 (0x0 << 5)
+#define RT5640_DRC_AGC_CPR_1_2 (0x1 << 5)
+#define RT5640_DRC_AGC_CPR_1_3 (0x2 << 5)
+#define RT5640_DRC_AGC_CPR_1_4 (0x3 << 5)
+#define RT5640_DRC_AGC_PRB_MASK (0x1f)
+#define RT5640_DRC_AGC_PRB_SFT 0
+
+/* DRC/AGC Control 3 (0xb6) */
+#define RT5640_DRC_AGC_NGB_MASK (0xf << 12)
+#define RT5640_DRC_AGC_NGB_SFT 12
+#define RT5640_DRC_AGC_TAR_MASK (0x1f << 7)
+#define RT5640_DRC_AGC_TAR_SFT 7
+#define RT5640_DRC_AGC_NG_MASK (0x1 << 6)
+#define RT5640_DRC_AGC_NG_SFT 6
+#define RT5640_DRC_AGC_NG_DIS (0x0 << 6)
+#define RT5640_DRC_AGC_NG_EN (0x1 << 6)
+#define RT5640_DRC_AGC_NGH_MASK (0x1 << 5)
+#define RT5640_DRC_AGC_NGH_SFT 5
+#define RT5640_DRC_AGC_NGH_DIS (0x0 << 5)
+#define RT5640_DRC_AGC_NGH_EN (0x1 << 5)
+#define RT5640_DRC_AGC_NGT_MASK (0x1f)
+#define RT5640_DRC_AGC_NGT_SFT 0
+
+/* ANC Control 1 (0xb8) */
+#define RT5640_ANC_M_MASK (0x1 << 15)
+#define RT5640_ANC_M_SFT 15
+#define RT5640_ANC_M_NOR (0x0 << 15)
+#define RT5640_ANC_M_REV (0x1 << 15)
+#define RT5640_ANC_MASK (0x1 << 14)
+#define RT5640_ANC_SFT 14
+#define RT5640_ANC_DIS (0x0 << 14)
+#define RT5640_ANC_EN (0x1 << 14)
+#define RT5640_ANC_MD_MASK (0x3 << 12)
+#define RT5640_ANC_MD_SFT 12
+#define RT5640_ANC_MD_DIS (0x0 << 12)
+#define RT5640_ANC_MD_67MS (0x1 << 12)
+#define RT5640_ANC_MD_267MS (0x2 << 12)
+#define RT5640_ANC_MD_1067MS (0x3 << 12)
+#define RT5640_ANC_SN_MASK (0x1 << 11)
+#define RT5640_ANC_SN_SFT 11
+#define RT5640_ANC_SN_DIS (0x0 << 11)
+#define RT5640_ANC_SN_EN (0x1 << 11)
+#define RT5640_ANC_CLK_MASK (0x1 << 10)
+#define RT5640_ANC_CLK_SFT 10
+#define RT5640_ANC_CLK_ANC (0x0 << 10)
+#define RT5640_ANC_CLK_REG (0x1 << 10)
+#define RT5640_ANC_ZCD_MASK (0x3 << 8)
+#define RT5640_ANC_ZCD_SFT 8
+#define RT5640_ANC_ZCD_DIS (0x0 << 8)
+#define RT5640_ANC_ZCD_T1 (0x1 << 8)
+#define RT5640_ANC_ZCD_T2 (0x2 << 8)
+#define RT5640_ANC_ZCD_WT (0x3 << 8)
+#define RT5640_ANC_CS_MASK (0x1 << 7)
+#define RT5640_ANC_CS_SFT 7
+#define RT5640_ANC_CS_DIS (0x0 << 7)
+#define RT5640_ANC_CS_EN (0x1 << 7)
+#define RT5640_ANC_SW_MASK (0x1 << 6)
+#define RT5640_ANC_SW_SFT 6
+#define RT5640_ANC_SW_NOR (0x0 << 6)
+#define RT5640_ANC_SW_AUTO (0x1 << 6)
+#define RT5640_ANC_CO_L_MASK (0x3f)
+#define RT5640_ANC_CO_L_SFT 0
+
+/* ANC Control 2 (0xb6) */
+#define RT5640_ANC_FG_R_MASK (0xf << 12)
+#define RT5640_ANC_FG_R_SFT 12
+#define RT5640_ANC_FG_L_MASK (0xf << 8)
+#define RT5640_ANC_FG_L_SFT 8
+#define RT5640_ANC_CG_R_MASK (0xf << 4)
+#define RT5640_ANC_CG_R_SFT 4
+#define RT5640_ANC_CG_L_MASK (0xf)
+#define RT5640_ANC_CG_L_SFT 0
+
+/* ANC Control 3 (0xb6) */
+#define RT5640_ANC_CD_MASK (0x1 << 6)
+#define RT5640_ANC_CD_SFT 6
+#define RT5640_ANC_CD_BOTH (0x0 << 6)
+#define RT5640_ANC_CD_IND (0x1 << 6)
+#define RT5640_ANC_CO_R_MASK (0x3f)
+#define RT5640_ANC_CO_R_SFT 0
+
+/* Jack Detect Control (0xbb) */
+#define RT5640_JD_MASK (0x7 << 13)
+#define RT5640_JD_SFT 13
+#define RT5640_JD_DIS (0x0 << 13)
+#define RT5640_JD_GPIO1 (0x1 << 13)
+#define RT5640_JD_JD1_IN4P (0x2 << 13)
+#define RT5640_JD_JD2_IN4N (0x3 << 13)
+#define RT5640_JD_GPIO2 (0x4 << 13)
+#define RT5640_JD_GPIO3 (0x5 << 13)
+#define RT5640_JD_GPIO4 (0x6 << 13)
+#define RT5640_JD_HP_MASK (0x1 << 11)
+#define RT5640_JD_HP_SFT 11
+#define RT5640_JD_HP_DIS (0x0 << 11)
+#define RT5640_JD_HP_EN (0x1 << 11)
+#define RT5640_JD_HP_TRG_MASK (0x1 << 10)
+#define RT5640_JD_HP_TRG_SFT 10
+#define RT5640_JD_HP_TRG_LO (0x0 << 10)
+#define RT5640_JD_HP_TRG_HI (0x1 << 10)
+#define RT5640_JD_SPL_MASK (0x1 << 9)
+#define RT5640_JD_SPL_SFT 9
+#define RT5640_JD_SPL_DIS (0x0 << 9)
+#define RT5640_JD_SPL_EN (0x1 << 9)
+#define RT5640_JD_SPL_TRG_MASK (0x1 << 8)
+#define RT5640_JD_SPL_TRG_SFT 8
+#define RT5640_JD_SPL_TRG_LO (0x0 << 8)
+#define RT5640_JD_SPL_TRG_HI (0x1 << 8)
+#define RT5640_JD_SPR_MASK (0x1 << 7)
+#define RT5640_JD_SPR_SFT 7
+#define RT5640_JD_SPR_DIS (0x0 << 7)
+#define RT5640_JD_SPR_EN (0x1 << 7)
+#define RT5640_JD_SPR_TRG_MASK (0x1 << 6)
+#define RT5640_JD_SPR_TRG_SFT 6
+#define RT5640_JD_SPR_TRG_LO (0x0 << 6)
+#define RT5640_JD_SPR_TRG_HI (0x1 << 6)
+#define RT5640_JD_MO_MASK (0x1 << 5)
+#define RT5640_JD_MO_SFT 5
+#define RT5640_JD_MO_DIS (0x0 << 5)
+#define RT5640_JD_MO_EN (0x1 << 5)
+#define RT5640_JD_MO_TRG_MASK (0x1 << 4)
+#define RT5640_JD_MO_TRG_SFT 4
+#define RT5640_JD_MO_TRG_LO (0x0 << 4)
+#define RT5640_JD_MO_TRG_HI (0x1 << 4)
+#define RT5640_JD_LO_MASK (0x1 << 3)
+#define RT5640_JD_LO_SFT 3
+#define RT5640_JD_LO_DIS (0x0 << 3)
+#define RT5640_JD_LO_EN (0x1 << 3)
+#define RT5640_JD_LO_TRG_MASK (0x1 << 2)
+#define RT5640_JD_LO_TRG_SFT 2
+#define RT5640_JD_LO_TRG_LO (0x0 << 2)
+#define RT5640_JD_LO_TRG_HI (0x1 << 2)
+#define RT5640_JD1_IN4P_MASK (0x1 << 1)
+#define RT5640_JD1_IN4P_SFT 1
+#define RT5640_JD1_IN4P_DIS (0x0 << 1)
+#define RT5640_JD1_IN4P_EN (0x1 << 1)
+#define RT5640_JD2_IN4N_MASK (0x1)
+#define RT5640_JD2_IN4N_SFT 0
+#define RT5640_JD2_IN4N_DIS (0x0)
+#define RT5640_JD2_IN4N_EN (0x1)
+
+/* Jack detect for ANC (0xbc) */
+#define RT5640_ANC_DET_MASK (0x3 << 4)
+#define RT5640_ANC_DET_SFT 4
+#define RT5640_ANC_DET_DIS (0x0 << 4)
+#define RT5640_ANC_DET_MB1 (0x1 << 4)
+#define RT5640_ANC_DET_MB2 (0x2 << 4)
+#define RT5640_ANC_DET_JD (0x3 << 4)
+#define RT5640_AD_TRG_MASK (0x1 << 3)
+#define RT5640_AD_TRG_SFT 3
+#define RT5640_AD_TRG_LO (0x0 << 3)
+#define RT5640_AD_TRG_HI (0x1 << 3)
+#define RT5640_ANCM_DET_MASK (0x3 << 4)
+#define RT5640_ANCM_DET_SFT 4
+#define RT5640_ANCM_DET_DIS (0x0 << 4)
+#define RT5640_ANCM_DET_MB1 (0x1 << 4)
+#define RT5640_ANCM_DET_MB2 (0x2 << 4)
+#define RT5640_ANCM_DET_JD (0x3 << 4)
+#define RT5640_AMD_TRG_MASK (0x1 << 3)
+#define RT5640_AMD_TRG_SFT 3
+#define RT5640_AMD_TRG_LO (0x0 << 3)
+#define RT5640_AMD_TRG_HI (0x1 << 3)
+
+/* IRQ Control 1 (0xbd) */
+#define RT5640_IRQ_JD_MASK (0x1 << 15)
+#define RT5640_IRQ_JD_SFT 15
+#define RT5640_IRQ_JD_BP (0x0 << 15)
+#define RT5640_IRQ_JD_NOR (0x1 << 15)
+#define RT5640_IRQ_OT_MASK (0x1 << 14)
+#define RT5640_IRQ_OT_SFT 14
+#define RT5640_IRQ_OT_BP (0x0 << 14)
+#define RT5640_IRQ_OT_NOR (0x1 << 14)
+#define RT5640_JD_STKY_MASK (0x1 << 13)
+#define RT5640_JD_STKY_SFT 13
+#define RT5640_JD_STKY_DIS (0x0 << 13)
+#define RT5640_JD_STKY_EN (0x1 << 13)
+#define RT5640_OT_STKY_MASK (0x1 << 12)
+#define RT5640_OT_STKY_SFT 12
+#define RT5640_OT_STKY_DIS (0x0 << 12)
+#define RT5640_OT_STKY_EN (0x1 << 12)
+#define RT5640_JD_P_MASK (0x1 << 11)
+#define RT5640_JD_P_SFT 11
+#define RT5640_JD_P_NOR (0x0 << 11)
+#define RT5640_JD_P_INV (0x1 << 11)
+#define RT5640_OT_P_MASK (0x1 << 10)
+#define RT5640_OT_P_SFT 10
+#define RT5640_OT_P_NOR (0x0 << 10)
+#define RT5640_OT_P_INV (0x1 << 10)
+
+/* IRQ Control 2 (0xbe) */
+#define RT5640_IRQ_MB1_OC_MASK (0x1 << 15)
+#define RT5640_IRQ_MB1_OC_SFT 15
+#define RT5640_IRQ_MB1_OC_BP (0x0 << 15)
+#define RT5640_IRQ_MB1_OC_NOR (0x1 << 15)
+#define RT5640_IRQ_MB2_OC_MASK (0x1 << 14)
+#define RT5640_IRQ_MB2_OC_SFT 14
+#define RT5640_IRQ_MB2_OC_BP (0x0 << 14)
+#define RT5640_IRQ_MB2_OC_NOR (0x1 << 14)
+#define RT5640_MB1_OC_STKY_MASK (0x1 << 11)
+#define RT5640_MB1_OC_STKY_SFT 11
+#define RT5640_MB1_OC_STKY_DIS (0x0 << 11)
+#define RT5640_MB1_OC_STKY_EN (0x1 << 11)
+#define RT5640_MB2_OC_STKY_MASK (0x1 << 10)
+#define RT5640_MB2_OC_STKY_SFT 10
+#define RT5640_MB2_OC_STKY_DIS (0x0 << 10)
+#define RT5640_MB2_OC_STKY_EN (0x1 << 10)
+#define RT5640_MB1_OC_P_MASK (0x1 << 7)
+#define RT5640_MB1_OC_P_SFT 7
+#define RT5640_MB1_OC_P_NOR (0x0 << 7)
+#define RT5640_MB1_OC_P_INV (0x1 << 7)
+#define RT5640_MB2_OC_P_MASK (0x1 << 6)
+#define RT5640_MB2_OC_P_SFT 6
+#define RT5640_MB2_OC_P_NOR (0x0 << 6)
+#define RT5640_MB2_OC_P_INV (0x1 << 6)
+#define RT5640_MB1_OC_CLR (0x1 << 3)
+#define RT5640_MB1_OC_CLR_SFT 3
+#define RT5640_MB2_OC_CLR (0x1 << 2)
+#define RT5640_MB2_OC_CLR_SFT 2
+
+/* GPIO Control 1 (0xc0) */
+#define RT5640_GP1_PIN_MASK (0x1 << 15)
+#define RT5640_GP1_PIN_SFT 15
+#define RT5640_GP1_PIN_GPIO1 (0x0 << 15)
+#define RT5640_GP1_PIN_IRQ (0x1 << 15)
+#define RT5640_GP2_PIN_MASK (0x1 << 14)
+#define RT5640_GP2_PIN_SFT 14
+#define RT5640_GP2_PIN_GPIO2 (0x0 << 14)
+#define RT5640_GP2_PIN_DMIC1_SCL (0x1 << 14)
+#define RT5640_GP3_PIN_MASK (0x3 << 12)
+#define RT5640_GP3_PIN_SFT 12
+#define RT5640_GP3_PIN_GPIO3 (0x0 << 12)
+#define RT5640_GP3_PIN_DMIC1_SDA (0x1 << 12)
+#define RT5640_GP3_PIN_IRQ (0x2 << 12)
+#define RT5640_GP4_PIN_MASK (0x1 << 11)
+#define RT5640_GP4_PIN_SFT 11
+#define RT5640_GP4_PIN_GPIO4 (0x0 << 11)
+#define RT5640_GP4_PIN_DMIC2_SDA (0x1 << 11)
+#define RT5640_DP_SIG_MASK (0x1 << 10)
+#define RT5640_DP_SIG_SFT 10
+#define RT5640_DP_SIG_TEST (0x0 << 10)
+#define RT5640_DP_SIG_AP (0x1 << 10)
+#define RT5640_GPIO_M_MASK (0x1 << 9)
+#define RT5640_GPIO_M_SFT 9
+#define RT5640_GPIO_M_FLT (0x0 << 9)
+#define RT5640_GPIO_M_PH (0x1 << 9)
+
+/* GPIO Control 3 (0xc2) */
+#define RT5640_GP4_PF_MASK (0x1 << 11)
+#define RT5640_GP4_PF_SFT 11
+#define RT5640_GP4_PF_IN (0x0 << 11)
+#define RT5640_GP4_PF_OUT (0x1 << 11)
+#define RT5640_GP4_OUT_MASK (0x1 << 10)
+#define RT5640_GP4_OUT_SFT 10
+#define RT5640_GP4_OUT_LO (0x0 << 10)
+#define RT5640_GP4_OUT_HI (0x1 << 10)
+#define RT5640_GP4_P_MASK (0x1 << 9)
+#define RT5640_GP4_P_SFT 9
+#define RT5640_GP4_P_NOR (0x0 << 9)
+#define RT5640_GP4_P_INV (0x1 << 9)
+#define RT5640_GP3_PF_MASK (0x1 << 8)
+#define RT5640_GP3_PF_SFT 8
+#define RT5640_GP3_PF_IN (0x0 << 8)
+#define RT5640_GP3_PF_OUT (0x1 << 8)
+#define RT5640_GP3_OUT_MASK (0x1 << 7)
+#define RT5640_GP3_OUT_SFT 7
+#define RT5640_GP3_OUT_LO (0x0 << 7)
+#define RT5640_GP3_OUT_HI (0x1 << 7)
+#define RT5640_GP3_P_MASK (0x1 << 6)
+#define RT5640_GP3_P_SFT 6
+#define RT5640_GP3_P_NOR (0x0 << 6)
+#define RT5640_GP3_P_INV (0x1 << 6)
+#define RT5640_GP2_PF_MASK (0x1 << 5)
+#define RT5640_GP2_PF_SFT 5
+#define RT5640_GP2_PF_IN (0x0 << 5)
+#define RT5640_GP2_PF_OUT (0x1 << 5)
+#define RT5640_GP2_OUT_MASK (0x1 << 4)
+#define RT5640_GP2_OUT_SFT 4
+#define RT5640_GP2_OUT_LO (0x0 << 4)
+#define RT5640_GP2_OUT_HI (0x1 << 4)
+#define RT5640_GP2_P_MASK (0x1 << 3)
+#define RT5640_GP2_P_SFT 3
+#define RT5640_GP2_P_NOR (0x0 << 3)
+#define RT5640_GP2_P_INV (0x1 << 3)
+#define RT5640_GP1_PF_MASK (0x1 << 2)
+#define RT5640_GP1_PF_SFT 2
+#define RT5640_GP1_PF_IN (0x0 << 2)
+#define RT5640_GP1_PF_OUT (0x1 << 2)
+#define RT5640_GP1_OUT_MASK (0x1 << 1)
+#define RT5640_GP1_OUT_SFT 1
+#define RT5640_GP1_OUT_LO (0x0 << 1)
+#define RT5640_GP1_OUT_HI (0x1 << 1)
+#define RT5640_GP1_P_MASK (0x1)
+#define RT5640_GP1_P_SFT 0
+#define RT5640_GP1_P_NOR (0x0)
+#define RT5640_GP1_P_INV (0x1)
+
+/* FM34-500 Register Control 1 (0xc4) */
+#define RT5640_DSP_ADD_SFT 0
+
+/* FM34-500 Register Control 2 (0xc5) */
+#define RT5640_DSP_DAT_SFT 0
+
+/* FM34-500 Register Control 3 (0xc6) */
+#define RT5640_DSP_BUSY_MASK (0x1 << 15)
+#define RT5640_DSP_BUSY_BIT 15
+#define RT5640_DSP_DS_MASK (0x1 << 14)
+#define RT5640_DSP_DS_SFT 14
+#define RT5640_DSP_DS_FM3010 (0x1 << 14)
+#define RT5640_DSP_DS_TEMP (0x1 << 14)
+#define RT5640_DSP_CLK_MASK (0x3 << 12)
+#define RT5640_DSP_CLK_SFT 12
+#define RT5640_DSP_CLK_384K (0x0 << 12)
+#define RT5640_DSP_CLK_192K (0x1 << 12)
+#define RT5640_DSP_CLK_96K (0x2 << 12)
+#define RT5640_DSP_CLK_64K (0x3 << 12)
+#define RT5640_DSP_PD_PIN_MASK (0x1 << 11)
+#define RT5640_DSP_PD_PIN_SFT 11
+#define RT5640_DSP_PD_PIN_LO (0x0 << 11)
+#define RT5640_DSP_PD_PIN_HI (0x1 << 11)
+#define RT5640_DSP_RST_PIN_MASK (0x1 << 10)
+#define RT5640_DSP_RST_PIN_SFT 10
+#define RT5640_DSP_RST_PIN_LO (0x0 << 10)
+#define RT5640_DSP_RST_PIN_HI (0x1 << 10)
+#define RT5640_DSP_R_EN (0x1 << 9)
+#define RT5640_DSP_R_EN_BIT 9
+#define RT5640_DSP_W_EN (0x1 << 8)
+#define RT5640_DSP_W_EN_BIT 8
+#define RT5640_DSP_CMD_MASK (0xff)
+#define RT5640_DSP_CMD_SFT 0
+#define RT5640_DSP_CMD_MW (0x3B) /* Memory Write */
+#define RT5640_DSP_CMD_MR (0x37) /* Memory Read */
+#define RT5640_DSP_CMD_RR (0x60) /* Register Read */
+#define RT5640_DSP_CMD_RW (0x68) /* Register Write */
+
+/* Programmable Register Array Control 1 (0xc8) */
+#define RT5640_REG_SEQ_MASK (0xf << 12)
+#define RT5640_REG_SEQ_SFT 12
+#define RT5640_SEQ1_ST_MASK (0x1 << 11) /*RO*/
+#define RT5640_SEQ1_ST_SFT 11
+#define RT5640_SEQ1_ST_RUN (0x0 << 11)
+#define RT5640_SEQ1_ST_FIN (0x1 << 11)
+#define RT5640_SEQ2_ST_MASK (0x1 << 10) /*RO*/
+#define RT5640_SEQ2_ST_SFT 10
+#define RT5640_SEQ2_ST_RUN (0x0 << 10)
+#define RT5640_SEQ2_ST_FIN (0x1 << 10)
+#define RT5640_REG_LV_MASK (0x1 << 9)
+#define RT5640_REG_LV_SFT 9
+#define RT5640_REG_LV_MX (0x0 << 9)
+#define RT5640_REG_LV_PR (0x1 << 9)
+#define RT5640_SEQ_2_PT_MASK (0x1 << 8)
+#define RT5640_SEQ_2_PT_BIT 8
+#define RT5640_REG_IDX_MASK (0xff)
+#define RT5640_REG_IDX_SFT 0
+
+/* Programmable Register Array Control 2 (0xc9) */
+#define RT5640_REG_DAT_MASK (0xffff)
+#define RT5640_REG_DAT_SFT 0
+
+/* Programmable Register Array Control 3 (0xca) */
+#define RT5640_SEQ_DLY_MASK (0xff << 8)
+#define RT5640_SEQ_DLY_SFT 8
+#define RT5640_PROG_MASK (0x1 << 7)
+#define RT5640_PROG_SFT 7
+#define RT5640_PROG_DIS (0x0 << 7)
+#define RT5640_PROG_EN (0x1 << 7)
+#define RT5640_SEQ1_PT_RUN (0x1 << 6)
+#define RT5640_SEQ1_PT_RUN_BIT 6
+#define RT5640_SEQ2_PT_RUN (0x1 << 5)
+#define RT5640_SEQ2_PT_RUN_BIT 5
+
+/* Programmable Register Array Control 4 (0xcb) */
+#define RT5640_SEQ1_START_MASK (0xf << 8)
+#define RT5640_SEQ1_START_SFT 8
+#define RT5640_SEQ1_END_MASK (0xf)
+#define RT5640_SEQ1_END_SFT 0
+
+/* Programmable Register Array Control 5 (0xcc) */
+#define RT5640_SEQ2_START_MASK (0xf << 8)
+#define RT5640_SEQ2_START_SFT 8
+#define RT5640_SEQ2_END_MASK (0xf)
+#define RT5640_SEQ2_END_SFT 0
+
+/* Scramble Function (0xcd) */
+#define RT5640_SCB_KEY_MASK (0xff)
+#define RT5640_SCB_KEY_SFT 0
+
+/* Scramble Control (0xce) */
+#define RT5640_SCB_SWAP_MASK (0x1 << 15)
+#define RT5640_SCB_SWAP_SFT 15
+#define RT5640_SCB_SWAP_DIS (0x0 << 15)
+#define RT5640_SCB_SWAP_EN (0x1 << 15)
+#define RT5640_SCB_MASK (0x1 << 14)
+#define RT5640_SCB_SFT 14
+#define RT5640_SCB_DIS (0x0 << 14)
+#define RT5640_SCB_EN (0x1 << 14)
+
+/* Baseback Control (0xcf) */
+#define RT5640_BB_MASK (0x1 << 15)
+#define RT5640_BB_SFT 15
+#define RT5640_BB_DIS (0x0 << 15)
+#define RT5640_BB_EN (0x1 << 15)
+#define RT5640_BB_CT_MASK (0x7 << 12)
+#define RT5640_BB_CT_SFT 12
+#define RT5640_BB_CT_A (0x0 << 12)
+#define RT5640_BB_CT_B (0x1 << 12)
+#define RT5640_BB_CT_C (0x2 << 12)
+#define RT5640_BB_CT_D (0x3 << 12)
+#define RT5640_M_BB_L_MASK (0x1 << 9)
+#define RT5640_M_BB_L_SFT 9
+#define RT5640_M_BB_R_MASK (0x1 << 8)
+#define RT5640_M_BB_R_SFT 8
+#define RT5640_M_BB_HPF_L_MASK (0x1 << 7)
+#define RT5640_M_BB_HPF_L_SFT 7
+#define RT5640_M_BB_HPF_R_MASK (0x1 << 6)
+#define RT5640_M_BB_HPF_R_SFT 6
+#define RT5640_G_BB_BST_MASK (0x3f)
+#define RT5640_G_BB_BST_SFT 0
+
+/* MP3 Plus Control 1 (0xd0) */
+#define RT5640_M_MP3_L_MASK (0x1 << 15)
+#define RT5640_M_MP3_L_SFT 15
+#define RT5640_M_MP3_R_MASK (0x1 << 14)
+#define RT5640_M_MP3_R_SFT 14
+#define RT5640_M_MP3_MASK (0x1 << 13)
+#define RT5640_M_MP3_SFT 13
+#define RT5640_M_MP3_DIS (0x0 << 13)
+#define RT5640_M_MP3_EN (0x1 << 13)
+#define RT5640_EG_MP3_MASK (0x1f << 8)
+#define RT5640_EG_MP3_SFT 8
+#define RT5640_MP3_HLP_MASK (0x1 << 7)
+#define RT5640_MP3_HLP_SFT 7
+#define RT5640_MP3_HLP_DIS (0x0 << 7)
+#define RT5640_MP3_HLP_EN (0x1 << 7)
+#define RT5640_M_MP3_ORG_L_MASK (0x1 << 6)
+#define RT5640_M_MP3_ORG_L_SFT 6
+#define RT5640_M_MP3_ORG_R_MASK (0x1 << 5)
+#define RT5640_M_MP3_ORG_R_SFT 5
+
+/* MP3 Plus Control 2 (0xd1) */
+#define RT5640_MP3_WT_MASK (0x1 << 13)
+#define RT5640_MP3_WT_SFT 13
+#define RT5640_MP3_WT_1_4 (0x0 << 13)
+#define RT5640_MP3_WT_1_2 (0x1 << 13)
+#define RT5640_OG_MP3_MASK (0x1f << 8)
+#define RT5640_OG_MP3_SFT 8
+#define RT5640_HG_MP3_MASK (0x3f)
+#define RT5640_HG_MP3_SFT 0
+
+/* 3D HP Control 1 (0xd2) */
+#define RT5640_3D_CF_MASK (0x1 << 15)
+#define RT5640_3D_CF_SFT 15
+#define RT5640_3D_CF_DIS (0x0 << 15)
+#define RT5640_3D_CF_EN (0x1 << 15)
+#define RT5640_3D_HP_MASK (0x1 << 14)
+#define RT5640_3D_HP_SFT 14
+#define RT5640_3D_HP_DIS (0x0 << 14)
+#define RT5640_3D_HP_EN (0x1 << 14)
+#define RT5640_3D_BT_MASK (0x1 << 13)
+#define RT5640_3D_BT_SFT 13
+#define RT5640_3D_BT_DIS (0x0 << 13)
+#define RT5640_3D_BT_EN (0x1 << 13)
+#define RT5640_3D_1F_MIX_MASK (0x3 << 11)
+#define RT5640_3D_1F_MIX_SFT 11
+#define RT5640_3D_HP_M_MASK (0x1 << 10)
+#define RT5640_3D_HP_M_SFT 10
+#define RT5640_3D_HP_M_SUR (0x0 << 10)
+#define RT5640_3D_HP_M_FRO (0x1 << 10)
+#define RT5640_M_3D_HRTF_MASK (0x1 << 9)
+#define RT5640_M_3D_HRTF_SFT 9
+#define RT5640_M_3D_D2H_MASK (0x1 << 8)
+#define RT5640_M_3D_D2H_SFT 8
+#define RT5640_M_3D_D2R_MASK (0x1 << 7)
+#define RT5640_M_3D_D2R_SFT 7
+#define RT5640_M_3D_REVB_MASK (0x1 << 6)
+#define RT5640_M_3D_REVB_SFT 6
+
+/* Adjustable high pass filter control 1 (0xd3) */
+#define RT5640_2ND_HPF_MASK (0x1 << 15)
+#define RT5640_2ND_HPF_SFT 15
+#define RT5640_2ND_HPF_DIS (0x0 << 15)
+#define RT5640_2ND_HPF_EN (0x1 << 15)
+#define RT5640_HPF_CF_L_MASK (0x7 << 12)
+#define RT5640_HPF_CF_L_SFT 12
+#define RT5640_1ST_HPF_MASK (0x1 << 11)
+#define RT5640_1ST_HPF_SFT 11
+#define RT5640_1ST_HPF_DIS (0x0 << 11)
+#define RT5640_1ST_HPF_EN (0x1 << 11)
+#define RT5640_HPF_CF_R_MASK (0x7 << 8)
+#define RT5640_HPF_CF_R_SFT 8
+#define RT5640_ZD_T_MASK (0x3 << 6)
+#define RT5640_ZD_T_SFT 6
+#define RT5640_ZD_F_MASK (0x3 << 4)
+#define RT5640_ZD_F_SFT 4
+#define RT5640_ZD_F_IM (0x0 << 4)
+#define RT5640_ZD_F_ZC_IM (0x1 << 4)
+#define RT5640_ZD_F_ZC_IOD (0x2 << 4)
+#define RT5640_ZD_F_UN (0x3 << 4)
+
+/* HP calibration control and Amp detection (0xd6) */
+#define RT5640_SI_DAC_MASK (0x1 << 11)
+#define RT5640_SI_DAC_SFT 11
+#define RT5640_SI_DAC_AUTO (0x0 << 11)
+#define RT5640_SI_DAC_TEST (0x1 << 11)
+#define RT5640_DC_CAL_M_MASK (0x1 << 10)
+#define RT5640_DC_CAL_M_SFT 10
+#define RT5640_DC_CAL_M_CAL (0x0 << 10)
+#define RT5640_DC_CAL_M_NOR (0x1 << 10)
+#define RT5640_DC_CAL_MASK (0x1 << 9)
+#define RT5640_DC_CAL_SFT 9
+#define RT5640_DC_CAL_DIS (0x0 << 9)
+#define RT5640_DC_CAL_EN (0x1 << 9)
+#define RT5640_HPD_RCV_MASK (0x7 << 6)
+#define RT5640_HPD_RCV_SFT 6
+#define RT5640_HPD_PS_MASK (0x1 << 5)
+#define RT5640_HPD_PS_SFT 5
+#define RT5640_HPD_PS_DIS (0x0 << 5)
+#define RT5640_HPD_PS_EN (0x1 << 5)
+#define RT5640_CAL_M_MASK (0x1 << 4)
+#define RT5640_CAL_M_SFT 4
+#define RT5640_CAL_M_DEP (0x0 << 4)
+#define RT5640_CAL_M_CAL (0x1 << 4)
+#define RT5640_CAL_MASK (0x1 << 3)
+#define RT5640_CAL_SFT 3
+#define RT5640_CAL_DIS (0x0 << 3)
+#define RT5640_CAL_EN (0x1 << 3)
+#define RT5640_CAL_TEST_MASK (0x1 << 2)
+#define RT5640_CAL_TEST_SFT 2
+#define RT5640_CAL_TEST_DIS (0x0 << 2)
+#define RT5640_CAL_TEST_EN (0x1 << 2)
+#define RT5640_CAL_P_MASK (0x3)
+#define RT5640_CAL_P_SFT 0
+#define RT5640_CAL_P_NONE (0x0)
+#define RT5640_CAL_P_CAL (0x1)
+#define RT5640_CAL_P_DAC_CAL (0x2)
+
+/* Soft volume and zero cross control 1 (0xd9) */
+#define RT5640_SV_MASK (0x1 << 15)
+#define RT5640_SV_SFT 15
+#define RT5640_SV_DIS (0x0 << 15)
+#define RT5640_SV_EN (0x1 << 15)
+#define RT5640_SPO_SV_MASK (0x1 << 14)
+#define RT5640_SPO_SV_SFT 14
+#define RT5640_SPO_SV_DIS (0x0 << 14)
+#define RT5640_SPO_SV_EN (0x1 << 14)
+#define RT5640_OUT_SV_MASK (0x1 << 13)
+#define RT5640_OUT_SV_SFT 13
+#define RT5640_OUT_SV_DIS (0x0 << 13)
+#define RT5640_OUT_SV_EN (0x1 << 13)
+#define RT5640_HP_SV_MASK (0x1 << 12)
+#define RT5640_HP_SV_SFT 12
+#define RT5640_HP_SV_DIS (0x0 << 12)
+#define RT5640_HP_SV_EN (0x1 << 12)
+#define RT5640_ZCD_DIG_MASK (0x1 << 11)
+#define RT5640_ZCD_DIG_SFT 11
+#define RT5640_ZCD_DIG_DIS (0x0 << 11)
+#define RT5640_ZCD_DIG_EN (0x1 << 11)
+#define RT5640_ZCD_MASK (0x1 << 10)
+#define RT5640_ZCD_SFT 10
+#define RT5640_ZCD_PD (0x0 << 10)
+#define RT5640_ZCD_PU (0x1 << 10)
+#define RT5640_M_ZCD_MASK (0x3f << 4)
+#define RT5640_M_ZCD_SFT 4
+#define RT5640_M_ZCD_RM_L (0x1 << 9)
+#define RT5640_M_ZCD_RM_R (0x1 << 8)
+#define RT5640_M_ZCD_SM_L (0x1 << 7)
+#define RT5640_M_ZCD_SM_R (0x1 << 6)
+#define RT5640_M_ZCD_OM_L (0x1 << 5)
+#define RT5640_M_ZCD_OM_R (0x1 << 4)
+#define RT5640_SV_DLY_MASK (0xf)
+#define RT5640_SV_DLY_SFT 0
+
+/* Soft volume and zero cross control 2 (0xda) */
+#define RT5640_ZCD_HP_MASK (0x1 << 15)
+#define RT5640_ZCD_HP_SFT 15
+#define RT5640_ZCD_HP_DIS (0x0 << 15)
+#define RT5640_ZCD_HP_EN (0x1 << 15)
+
+
+/* Codec Private Register definition */
+/* 3D Speaker Control (0x63) */
+#define RT5640_3D_SPK_MASK (0x1 << 15)
+#define RT5640_3D_SPK_SFT 15
+#define RT5640_3D_SPK_DIS (0x0 << 15)
+#define RT5640_3D_SPK_EN (0x1 << 15)
+#define RT5640_3D_SPK_M_MASK (0x3 << 13)
+#define RT5640_3D_SPK_M_SFT 13
+#define RT5640_3D_SPK_CG_MASK (0x1f << 8)
+#define RT5640_3D_SPK_CG_SFT 8
+#define RT5640_3D_SPK_SG_MASK (0x1f)
+#define RT5640_3D_SPK_SG_SFT 0
+
+/* Wind Noise Detection Control 1 (0x6c) */
+#define RT5640_WND_MASK (0x1 << 15)
+#define RT5640_WND_SFT 15
+#define RT5640_WND_DIS (0x0 << 15)
+#define RT5640_WND_EN (0x1 << 15)
+
+/* Wind Noise Detection Control 2 (0x6d) */
+#define RT5640_WND_FC_NW_MASK (0x3f << 10)
+#define RT5640_WND_FC_NW_SFT 10
+#define RT5640_WND_FC_WK_MASK (0x3f << 4)
+#define RT5640_WND_FC_WK_SFT 4
+
+/* Wind Noise Detection Control 3 (0x6e) */
+#define RT5640_HPF_FC_MASK (0x3f << 6)
+#define RT5640_HPF_FC_SFT 6
+#define RT5640_WND_FC_ST_MASK (0x3f)
+#define RT5640_WND_FC_ST_SFT 0
+
+/* Wind Noise Detection Control 4 (0x6f) */
+#define RT5640_WND_TH_LO_MASK (0x3ff)
+#define RT5640_WND_TH_LO_SFT 0
+
+/* Wind Noise Detection Control 5 (0x70) */
+#define RT5640_WND_TH_HI_MASK (0x3ff)
+#define RT5640_WND_TH_HI_SFT 0
+
+/* Wind Noise Detection Control 8 (0x73) */
+#define RT5640_WND_WIND_MASK (0x1 << 13) /* Read-Only */
+#define RT5640_WND_WIND_SFT 13
+#define RT5640_WND_STRONG_MASK (0x1 << 12) /* Read-Only */
+#define RT5640_WND_STRONG_SFT 12
+enum {
+ RT5640_NO_WIND,
+ RT5640_BREEZE,
+ RT5640_STORM,
+};
+
+/* Dipole Speaker Interface (0x75) */
+#define RT5640_DP_ATT_MASK (0x3 << 14)
+#define RT5640_DP_ATT_SFT 14
+#define RT5640_DP_SPK_MASK (0x1 << 10)
+#define RT5640_DP_SPK_SFT 10
+#define RT5640_DP_SPK_DIS (0x0 << 10)
+#define RT5640_DP_SPK_EN (0x1 << 10)
+
+/* EQ Pre Volume Control (0xb3) */
+#define RT5640_EQ_PRE_VOL_MASK (0xffff)
+#define RT5640_EQ_PRE_VOL_SFT 0
+
+/* EQ Post Volume Control (0xb4) */
+#define RT5640_EQ_PST_VOL_MASK (0xffff)
+#define RT5640_EQ_PST_VOL_SFT 0
+
+
+
+/* System Clock Source */
+#define RT5640_SCLK_S_MCLK 0
+#define RT5640_SCLK_S_PLL1 1
+#define RT5640_SCLK_S_PLL1_TK 2
+#define RT5640_SCLK_S_RCCLK 3
+
+/* PLL1 Source */
+#define RT5640_PLL1_S_MCLK 0
+#define RT5640_PLL1_S_BCLK1 1
+#define RT5640_PLL1_S_BCLK2 2
+#define RT5640_PLL1_S_BCLK3 3
+
+
+enum {
+ RT5640_AIF1,
+ RT5640_AIF2,
+ RT5640_AIF3,
+ RT5640_AIFS,
+};
+
+enum {
+ RT5640_U_IF1 = 0x1,
+ RT5640_U_IF2 = 0x2,
+ RT5640_U_IF3 = 0x4,
+};
+
+enum {
+ RT5640_IF_123,
+ RT5640_IF_132,
+ RT5640_IF_312,
+ RT5640_IF_321,
+ RT5640_IF_231,
+ RT5640_IF_213,
+ RT5640_IF_113,
+ RT5640_IF_223,
+ RT5640_IF_ALL,
+};
+
+enum {
+ RT5640_DMIC_DIS,
+ RT5640_DMIC1,
+ RT5640_DMIC2,
+};
+
+struct rt5640_pll_code {
+ bool m_bp; /* Indicates bypass m code or not. */
+ int m_code;
+ int n_code;
+ int k_code;
+};
+
+struct rt5640_priv {
+ struct snd_soc_codec *codec;
+
+ int aif_pu;
+ int sysclk;
+ int sysclk_src;
+ int lrck[RT5640_AIFS];
+ int bclk[RT5640_AIFS];
+ int master[RT5640_AIFS];
+
+ int pll_src;
+ int pll_in;
+ int pll_out;
+
+ int dmic_en;
+ int dsp_sw;
+};
+
+
+#endif /* __RT5640_H__ */
diff --git a/sound/soc/codecs/second_rate_pps_driver.h b/sound/soc/codecs/second_rate_pps_driver.h
new file mode 100644
index 000000000000..118a775f8a4d
--- /dev/null
+++ b/sound/soc/codecs/second_rate_pps_driver.h
@@ -0,0 +1,3153 @@
+/*
+ * linux/sound/soc/codecs/second_rate_pps_driver.h
+ *
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+*/
+
+static control Second_Rate_MUX_controls[] = {
+};
+
+static char *Second_Rate_MUX_control_names[] = {
+};
+
+static control Second_Rate_VOLUME_controls[] = {
+};
+
+static char *Second_Rate_VOLUME_control_names[] = {
+};
+
+
+
+static char *Second_Rate_REG_Section_names[] = {
+ "miniDSP_A_reg_values",
+ "miniDSP_D_reg_values",
+};
+
+reg_value Second_Rate_REG_Section_init_program[] = {
+ { 0, 0x0},
+ { 0x7F, 0x00},
+ {122, 0x01},
+ { 0, 0x0},
+ { 0x7F, 0x78},
+ { 24, 0x80}
+};
+
+reg_value Second_Rate_REG_Section_post_program[] = {
+ { 0, 0x0},
+ { 0x7F, 0x28},
+ { 1, 0x04},
+ { 0x7F, 0x50},
+ { 1, 0x04},
+ { 0x7F, 0x64},
+ { 48, 0x04},
+ { 49, 0x00},
+ { 0x7F, 0x78},
+ { 48, 0x04},
+ { 49, 0x00}
+};
+
+reg_value Second_Rate_miniDSP_A_reg_values[] = {
+ { 0, 0x0},
+ { 0x7F, 0x28},
+ { 0, 0x01},
+ { 8, 0x00},
+ { 9, 0xB7},
+ { 10, 0x98},
+ { 11, 0x00},
+ { 12, 0x7E},
+ { 13, 0x90},
+ { 14, 0xD0},
+ { 15, 0x00},
+ { 16, 0x7F},
+ { 17, 0xFF},
+ { 18, 0xFF},
+ { 19, 0x00},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0x00},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xFF},
+ { 29, 0xFF},
+ { 30, 0xFF},
+ { 31, 0x00},
+ { 32, 0x80},
+ { 33, 0x00},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x7F},
+ { 37, 0xFF},
+ { 38, 0xFF},
+ { 39, 0x00},
+ { 40, 0x40},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xFF},
+ { 49, 0x9E},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0xF7},
+ { 53, 0x10},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x26},
+ { 57, 0xF0},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x02},
+ { 61, 0x61},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x40},
+ { 65, 0x02},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0xFF},
+ { 69, 0xFC},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0xFF},
+ { 73, 0xFD},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0xFF},
+ { 77, 0xE5},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0xFF},
+ { 81, 0xA7},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0xFF},
+ { 85, 0xC7},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0xFF},
+ { 89, 0xCE},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0xFF},
+ { 93, 0xFE},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0xFE},
+ { 97, 0xF7},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0xFF},
+ {101, 0x46},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0xFF},
+ {105, 0x22},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0xFF},
+ {109, 0x0F},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0xFA},
+ {113, 0x8C},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0xF5},
+ {117, 0x08},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0xFD},
+ {121, 0x92},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0xF3},
+ {125, 0xA3},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x02},
+ { 8, 0x03},
+ { 9, 0xB3},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x33},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x71},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x40},
+ { 21, 0x60},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x00},
+ { 25, 0xE2},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x00},
+ { 29, 0xC6},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x00},
+ { 33, 0x87},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x0F},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x00},
+ { 41, 0x0A},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x02},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x01},
+ { 49, 0x81},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x18},
+ { 53, 0x89},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x07},
+ { 57, 0xFB},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x03},
+ { 61, 0xBE},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x00},
+ { 65, 0x49},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0xFF},
+ { 69, 0xF2},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0xF9},
+ { 73, 0xBA},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0xF9},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0xFF},
+ { 81, 0x68},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0xFE},
+ { 85, 0x2C},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0xEB},
+ { 89, 0x8D},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x03},
+ { 93, 0x83},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x01},
+ { 97, 0xC3},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0xD3},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x33},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x02},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x32},
+ {113, 0x08},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x0A},
+ {117, 0xFA},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x54},
+ {121, 0x7B},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0xFF},
+ {125, 0x6B},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x03},
+ { 8, 0xFF},
+ { 9, 0x03},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xFC},
+ { 13, 0xC6},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0xF5},
+ { 17, 0x54},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0xF9},
+ { 21, 0x64},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x39},
+ { 25, 0x80},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x03},
+ { 29, 0x1C},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x03},
+ { 33, 0x2F},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x67},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x0F},
+ { 41, 0x73},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x2C},
+ { 45, 0x2B},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 0, 0x09},
+ { 72, 0x00},
+ { 73, 0xB7},
+ { 74, 0x98},
+ { 75, 0x00},
+ { 76, 0x7E},
+ { 77, 0x90},
+ { 78, 0xD0},
+ { 79, 0x00},
+ { 80, 0x7F},
+ { 81, 0xFF},
+ { 82, 0xFF},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0xFF},
+ { 93, 0xFF},
+ { 94, 0xFF},
+ { 95, 0x00},
+ { 96, 0x80},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x7F},
+ {101, 0xFF},
+ {102, 0xFF},
+ {103, 0x00},
+ {104, 0x40},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0xFF},
+ {113, 0x9E},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0xF7},
+ {117, 0x10},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x26},
+ {121, 0xF0},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x02},
+ {125, 0x61},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0A},
+ { 8, 0x40},
+ { 9, 0x02},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xFF},
+ { 13, 0xFC},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0xFF},
+ { 17, 0xFD},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0xFF},
+ { 21, 0xE5},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0xFF},
+ { 25, 0xA7},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xFF},
+ { 29, 0xC7},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0xFF},
+ { 33, 0xCE},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0xFF},
+ { 37, 0xFE},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0xFE},
+ { 41, 0xF7},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0xFF},
+ { 45, 0x46},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xFF},
+ { 49, 0x22},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0xFF},
+ { 53, 0x0F},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0xFA},
+ { 57, 0x8C},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0xF5},
+ { 61, 0x08},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0xFD},
+ { 65, 0x92},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0xF3},
+ { 69, 0xA3},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x03},
+ { 73, 0xB3},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x33},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x00},
+ { 81, 0x71},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x40},
+ { 85, 0x60},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0xE2},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0xC6},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x87},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x0F},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x0A},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x02},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x01},
+ {113, 0x81},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x18},
+ {117, 0x89},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x07},
+ {121, 0xFB},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x03},
+ {125, 0xBE},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0B},
+ { 8, 0x00},
+ { 9, 0x49},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xFF},
+ { 13, 0xF2},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0xF9},
+ { 17, 0xBA},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0xF9},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0xFF},
+ { 25, 0x68},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xFE},
+ { 29, 0x2C},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0xEB},
+ { 33, 0x8D},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0x03},
+ { 37, 0x83},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x01},
+ { 41, 0xC3},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0xD3},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x33},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x02},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x32},
+ { 57, 0x08},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x0A},
+ { 61, 0xFA},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x54},
+ { 65, 0x7B},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0xFF},
+ { 69, 0x6B},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0xFF},
+ { 73, 0x03},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0xFC},
+ { 77, 0xC6},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0xF5},
+ { 81, 0x54},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0xF9},
+ { 85, 0x64},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x39},
+ { 89, 0x80},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x03},
+ { 93, 0x1C},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x03},
+ { 97, 0x2F},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x67},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x0F},
+ {105, 0x73},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x2C},
+ {109, 0x2B},
+ {110, 0x00},
+ {111, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x14},
+ { 0, 0x01},
+ { 8, 0xC0},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xC0},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x64},
+ { 0, 0x01},
+ { 8, 0x58},
+ { 9, 0x60},
+ { 10, 0x08},
+ { 11, 0x01},
+ { 12, 0x60},
+ { 13, 0x60},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x58},
+ { 17, 0x60},
+ { 18, 0x00},
+ { 19, 0x09},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x44},
+ { 25, 0x00},
+ { 26, 0xC0},
+ { 27, 0x0B},
+ { 28, 0x04},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x04},
+ { 33, 0x00},
+ { 34, 0x20},
+ { 35, 0x51},
+ { 36, 0x04},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x0D},
+ { 40, 0x04},
+ { 41, 0x00},
+ { 42, 0x20},
+ { 43, 0x5E},
+ { 44, 0x04},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x08},
+ { 48, 0x04},
+ { 49, 0x00},
+ { 50, 0x20},
+ { 51, 0x59},
+ { 52, 0x04},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x04},
+ { 56, 0x04},
+ { 57, 0x00},
+ { 58, 0x20},
+ { 59, 0x55},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x00},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x44},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x0B},
+ { 72, 0x21},
+ { 73, 0x00},
+ { 74, 0x20},
+ { 75, 0x00},
+ { 76, 0x4B},
+ { 77, 0x00},
+ { 78, 0xC0},
+ { 79, 0x00},
+ { 80, 0x4B},
+ { 81, 0x00},
+ { 82, 0xE0},
+ { 83, 0x00},
+ { 84, 0x10},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x10},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x51},
+ { 92, 0x10},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x0D},
+ { 96, 0x10},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x5E},
+ {100, 0x10},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x08},
+ {104, 0x10},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x59},
+ {108, 0x10},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x04},
+ {112, 0x10},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x55},
+ {116, 0x18},
+ {117, 0x01},
+ {118, 0xC0},
+ {119, 0x0F},
+ {120, 0x1C},
+ {121, 0x01},
+ {122, 0xA0},
+ {123, 0x09},
+ {124, 0x1C},
+ {125, 0x01},
+ {126, 0xA0},
+ {127, 0x03},
+ { 0, 0x02},
+ { 8, 0x1C},
+ { 9, 0x01},
+ { 10, 0x80},
+ { 11, 0x0A},
+ { 12, 0x1C},
+ { 13, 0x01},
+ { 14, 0x80},
+ { 15, 0x02},
+ { 16, 0x1C},
+ { 17, 0x01},
+ { 18, 0x60},
+ { 19, 0x01},
+ { 20, 0x1C},
+ { 21, 0x01},
+ { 22, 0x60},
+ { 23, 0x0B},
+ { 24, 0x1C},
+ { 25, 0x01},
+ { 26, 0x40},
+ { 27, 0x0C},
+ { 28, 0x1C},
+ { 29, 0x01},
+ { 30, 0x40},
+ { 31, 0x00},
+ { 32, 0x18},
+ { 33, 0x01},
+ { 34, 0x40},
+ { 35, 0x5D},
+ { 36, 0x1C},
+ { 37, 0x01},
+ { 38, 0x40},
+ { 39, 0x51},
+ { 40, 0x1C},
+ { 41, 0x01},
+ { 42, 0x60},
+ { 43, 0x52},
+ { 44, 0x10},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x2D},
+ { 48, 0x1C},
+ { 49, 0x01},
+ { 50, 0x60},
+ { 51, 0x5C},
+ { 52, 0x1C},
+ { 53, 0x01},
+ { 54, 0x80},
+ { 55, 0x5B},
+ { 56, 0x1C},
+ { 57, 0x01},
+ { 58, 0x80},
+ { 59, 0x53},
+ { 60, 0x1C},
+ { 61, 0x01},
+ { 62, 0xA0},
+ { 63, 0x5A},
+ { 64, 0x1C},
+ { 65, 0x01},
+ { 66, 0xA0},
+ { 67, 0x54},
+ { 68, 0x1C},
+ { 69, 0x01},
+ { 70, 0xC0},
+ { 71, 0x60},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x10},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x7E},
+ { 88, 0x18},
+ { 89, 0x01},
+ { 90, 0xE0},
+ { 91, 0x2C},
+ { 92, 0x1C},
+ { 93, 0x01},
+ { 94, 0xE0},
+ { 95, 0x2D},
+ { 96, 0x1C},
+ { 97, 0x02},
+ { 98, 0x00},
+ { 99, 0x11},
+ {100, 0x1C},
+ {101, 0x02},
+ {102, 0x00},
+ {103, 0x48},
+ {104, 0x1C},
+ {105, 0x02},
+ {106, 0x20},
+ {107, 0x2F},
+ {108, 0x1C},
+ {109, 0x02},
+ {110, 0x20},
+ {111, 0x2A},
+ {112, 0x1C},
+ {113, 0x02},
+ {114, 0x40},
+ {115, 0x31},
+ {116, 0x1C},
+ {117, 0x02},
+ {118, 0x40},
+ {119, 0x28},
+ {120, 0x1C},
+ {121, 0x02},
+ {122, 0x60},
+ {123, 0x37},
+ {124, 0x1C},
+ {125, 0x02},
+ {126, 0x60},
+ {127, 0x22},
+ { 0, 0x03},
+ { 8, 0x1C},
+ { 9, 0x02},
+ { 10, 0x80},
+ { 11, 0x14},
+ { 12, 0x1C},
+ { 13, 0x02},
+ { 14, 0x80},
+ { 15, 0x45},
+ { 16, 0x1C},
+ { 17, 0x02},
+ { 18, 0xA0},
+ { 19, 0x12},
+ { 20, 0x1C},
+ { 21, 0x02},
+ { 22, 0xA0},
+ { 23, 0x47},
+ { 24, 0x1C},
+ { 25, 0x02},
+ { 26, 0xC0},
+ { 27, 0x38},
+ { 28, 0x1C},
+ { 29, 0x02},
+ { 30, 0xC0},
+ { 31, 0x21},
+ { 32, 0x1C},
+ { 33, 0x02},
+ { 34, 0xE0},
+ { 35, 0x33},
+ { 36, 0x1C},
+ { 37, 0x02},
+ { 38, 0xE0},
+ { 39, 0x26},
+ { 40, 0x1C},
+ { 41, 0x03},
+ { 42, 0x00},
+ { 43, 0x16},
+ { 44, 0x1C},
+ { 45, 0x03},
+ { 46, 0x00},
+ { 47, 0x43},
+ { 48, 0x1C},
+ { 49, 0x03},
+ { 50, 0x20},
+ { 51, 0x35},
+ { 52, 0x1C},
+ { 53, 0x03},
+ { 54, 0x20},
+ { 55, 0x24},
+ { 56, 0x1C},
+ { 57, 0x03},
+ { 58, 0x40},
+ { 59, 0x1A},
+ { 60, 0x1C},
+ { 61, 0x03},
+ { 62, 0x40},
+ { 63, 0x3F},
+ { 64, 0x1C},
+ { 65, 0x03},
+ { 66, 0x60},
+ { 67, 0x3A},
+ { 68, 0x1C},
+ { 69, 0x03},
+ { 70, 0x60},
+ { 71, 0x1F},
+ { 72, 0x1C},
+ { 73, 0x03},
+ { 74, 0x80},
+ { 75, 0x18},
+ { 76, 0x1C},
+ { 77, 0x03},
+ { 78, 0x80},
+ { 79, 0x41},
+ { 80, 0x1C},
+ { 81, 0x03},
+ { 82, 0xA0},
+ { 83, 0x1C},
+ { 84, 0x1C},
+ { 85, 0x03},
+ { 86, 0xA0},
+ { 87, 0x3D},
+ { 88, 0x1C},
+ { 89, 0x03},
+ { 90, 0xC0},
+ { 91, 0x19},
+ { 92, 0x1C},
+ { 93, 0x03},
+ { 94, 0xC0},
+ { 95, 0x40},
+ { 96, 0x1C},
+ { 97, 0x03},
+ { 98, 0xE0},
+ { 99, 0x30},
+ {100, 0x1C},
+ {101, 0x03},
+ {102, 0xE0},
+ {103, 0x29},
+ {104, 0x1C},
+ {105, 0x04},
+ {106, 0x00},
+ {107, 0x15},
+ {108, 0x1C},
+ {109, 0x04},
+ {110, 0x00},
+ {111, 0x44},
+ {112, 0x1C},
+ {113, 0x04},
+ {114, 0x20},
+ {115, 0x3B},
+ {116, 0x1C},
+ {117, 0x04},
+ {118, 0x20},
+ {119, 0x1E},
+ {120, 0x1C},
+ {121, 0x04},
+ {122, 0x40},
+ {123, 0x34},
+ {124, 0x1C},
+ {125, 0x04},
+ {126, 0x40},
+ {127, 0x25},
+ { 0, 0x04},
+ { 8, 0x1C},
+ { 9, 0x04},
+ { 10, 0x60},
+ { 11, 0x36},
+ { 12, 0x1C},
+ { 13, 0x04},
+ { 14, 0x60},
+ { 15, 0x23},
+ { 16, 0x1C},
+ { 17, 0x04},
+ { 18, 0x80},
+ { 19, 0x32},
+ { 20, 0x1C},
+ { 21, 0x04},
+ { 22, 0x80},
+ { 23, 0x27},
+ { 24, 0x1C},
+ { 25, 0x04},
+ { 26, 0xA0},
+ { 27, 0x13},
+ { 28, 0x1C},
+ { 29, 0x04},
+ { 30, 0xA0},
+ { 31, 0x46},
+ { 32, 0x1C},
+ { 33, 0x04},
+ { 34, 0xC0},
+ { 35, 0x2E},
+ { 36, 0x1C},
+ { 37, 0x04},
+ { 38, 0xC0},
+ { 39, 0x2B},
+ { 40, 0x1C},
+ { 41, 0x04},
+ { 42, 0xE0},
+ { 43, 0x10},
+ { 44, 0x1C},
+ { 45, 0x04},
+ { 46, 0xE0},
+ { 47, 0x49},
+ { 48, 0x1C},
+ { 49, 0x05},
+ { 50, 0x00},
+ { 51, 0x17},
+ { 52, 0x1C},
+ { 53, 0x05},
+ { 54, 0x00},
+ { 55, 0x42},
+ { 56, 0x1C},
+ { 57, 0x05},
+ { 58, 0x20},
+ { 59, 0x1D},
+ { 60, 0x1C},
+ { 61, 0x05},
+ { 62, 0x20},
+ { 63, 0x3C},
+ { 64, 0x1C},
+ { 65, 0x05},
+ { 66, 0x40},
+ { 67, 0x1B},
+ { 68, 0x1C},
+ { 69, 0x05},
+ { 70, 0x40},
+ { 71, 0x3E},
+ { 72, 0x1C},
+ { 73, 0x05},
+ { 74, 0x60},
+ { 75, 0x39},
+ { 76, 0x1C},
+ { 77, 0x05},
+ { 78, 0x60},
+ { 79, 0x20},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x5C},
+ { 85, 0x90},
+ { 86, 0x40},
+ { 87, 0x00},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x5C},
+ { 93, 0x90},
+ { 94, 0x20},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x10},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x49},
+ {112, 0x18},
+ {113, 0x01},
+ {114, 0xC0},
+ {115, 0x57},
+ {116, 0x1C},
+ {117, 0x01},
+ {118, 0xA0},
+ {119, 0x51},
+ {120, 0x1C},
+ {121, 0x01},
+ {122, 0xA0},
+ {123, 0x5C},
+ {124, 0x1C},
+ {125, 0x01},
+ {126, 0x80},
+ {127, 0x52},
+ { 0, 0x05},
+ { 8, 0x1C},
+ { 9, 0x01},
+ { 10, 0x80},
+ { 11, 0x5B},
+ { 12, 0x1C},
+ { 13, 0x01},
+ { 14, 0x60},
+ { 15, 0x5A},
+ { 16, 0x1C},
+ { 17, 0x01},
+ { 18, 0x60},
+ { 19, 0x53},
+ { 20, 0x1C},
+ { 21, 0x01},
+ { 22, 0x40},
+ { 23, 0x54},
+ { 24, 0x1C},
+ { 25, 0x01},
+ { 26, 0x40},
+ { 27, 0x59},
+ { 28, 0x18},
+ { 29, 0x01},
+ { 30, 0x40},
+ { 31, 0x03},
+ { 32, 0x1C},
+ { 33, 0x01},
+ { 34, 0x40},
+ { 35, 0x08},
+ { 36, 0x1C},
+ { 37, 0x01},
+ { 38, 0x60},
+ { 39, 0x09},
+ { 40, 0x10},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x60},
+ { 44, 0x1C},
+ { 45, 0x01},
+ { 46, 0x60},
+ { 47, 0x02},
+ { 48, 0x1C},
+ { 49, 0x01},
+ { 50, 0x80},
+ { 51, 0x01},
+ { 52, 0x1C},
+ { 53, 0x01},
+ { 54, 0x80},
+ { 55, 0x0A},
+ { 56, 0x1C},
+ { 57, 0x01},
+ { 58, 0xA0},
+ { 59, 0x00},
+ { 60, 0x1C},
+ { 61, 0x01},
+ { 62, 0xA0},
+ { 63, 0x0B},
+ { 64, 0x1C},
+ { 65, 0x01},
+ { 66, 0xC0},
+ { 67, 0x06},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x0F},
+ { 84, 0x18},
+ { 85, 0x01},
+ { 86, 0xE0},
+ { 87, 0x7D},
+ { 88, 0x1C},
+ { 89, 0x01},
+ { 90, 0xE0},
+ { 91, 0x7E},
+ { 92, 0x1C},
+ { 93, 0x02},
+ { 94, 0x00},
+ { 95, 0x62},
+ { 96, 0x1C},
+ { 97, 0x02},
+ { 98, 0x00},
+ { 99, 0x99},
+ {100, 0x1C},
+ {101, 0x02},
+ {102, 0x20},
+ {103, 0x80},
+ {104, 0x1C},
+ {105, 0x02},
+ {106, 0x20},
+ {107, 0x7B},
+ {108, 0x1C},
+ {109, 0x02},
+ {110, 0x40},
+ {111, 0x82},
+ {112, 0x1C},
+ {113, 0x02},
+ {114, 0x40},
+ {115, 0x79},
+ {116, 0x1C},
+ {117, 0x02},
+ {118, 0x60},
+ {119, 0x88},
+ {120, 0x1C},
+ {121, 0x02},
+ {122, 0x60},
+ {123, 0x73},
+ {124, 0x1C},
+ {125, 0x02},
+ {126, 0x80},
+ {127, 0x65},
+ { 0, 0x06},
+ { 8, 0x1C},
+ { 9, 0x02},
+ { 10, 0x80},
+ { 11, 0x96},
+ { 12, 0x1C},
+ { 13, 0x02},
+ { 14, 0xA0},
+ { 15, 0x63},
+ { 16, 0x1C},
+ { 17, 0x02},
+ { 18, 0xA0},
+ { 19, 0x98},
+ { 20, 0x1C},
+ { 21, 0x02},
+ { 22, 0xC0},
+ { 23, 0x89},
+ { 24, 0x1C},
+ { 25, 0x02},
+ { 26, 0xC0},
+ { 27, 0x72},
+ { 28, 0x1C},
+ { 29, 0x02},
+ { 30, 0xE0},
+ { 31, 0x84},
+ { 32, 0x1C},
+ { 33, 0x02},
+ { 34, 0xE0},
+ { 35, 0x77},
+ { 36, 0x1C},
+ { 37, 0x03},
+ { 38, 0x00},
+ { 39, 0x67},
+ { 40, 0x1C},
+ { 41, 0x03},
+ { 42, 0x00},
+ { 43, 0x94},
+ { 44, 0x1C},
+ { 45, 0x03},
+ { 46, 0x20},
+ { 47, 0x86},
+ { 48, 0x1C},
+ { 49, 0x03},
+ { 50, 0x20},
+ { 51, 0x75},
+ { 52, 0x1C},
+ { 53, 0x03},
+ { 54, 0x40},
+ { 55, 0x6B},
+ { 56, 0x1C},
+ { 57, 0x03},
+ { 58, 0x40},
+ { 59, 0x90},
+ { 60, 0x1C},
+ { 61, 0x03},
+ { 62, 0x60},
+ { 63, 0x8B},
+ { 64, 0x1C},
+ { 65, 0x03},
+ { 66, 0x60},
+ { 67, 0x70},
+ { 68, 0x1C},
+ { 69, 0x03},
+ { 70, 0x80},
+ { 71, 0x69},
+ { 72, 0x1C},
+ { 73, 0x03},
+ { 74, 0x80},
+ { 75, 0x92},
+ { 76, 0x1C},
+ { 77, 0x03},
+ { 78, 0xA0},
+ { 79, 0x6D},
+ { 80, 0x1C},
+ { 81, 0x03},
+ { 82, 0xA0},
+ { 83, 0x8E},
+ { 84, 0x1C},
+ { 85, 0x03},
+ { 86, 0xC0},
+ { 87, 0x6A},
+ { 88, 0x1C},
+ { 89, 0x03},
+ { 90, 0xC0},
+ { 91, 0x91},
+ { 92, 0x1C},
+ { 93, 0x03},
+ { 94, 0xE0},
+ { 95, 0x81},
+ { 96, 0x1C},
+ { 97, 0x03},
+ { 98, 0xE0},
+ { 99, 0x7A},
+ {100, 0x1C},
+ {101, 0x04},
+ {102, 0x00},
+ {103, 0x66},
+ {104, 0x1C},
+ {105, 0x04},
+ {106, 0x00},
+ {107, 0x95},
+ {108, 0x1C},
+ {109, 0x04},
+ {110, 0x20},
+ {111, 0x8C},
+ {112, 0x1C},
+ {113, 0x04},
+ {114, 0x20},
+ {115, 0x6F},
+ {116, 0x1C},
+ {117, 0x04},
+ {118, 0x40},
+ {119, 0x85},
+ {120, 0x1C},
+ {121, 0x04},
+ {122, 0x40},
+ {123, 0x76},
+ {124, 0x1C},
+ {125, 0x04},
+ {126, 0x60},
+ {127, 0x87},
+ { 0, 0x07},
+ { 8, 0x1C},
+ { 9, 0x04},
+ { 10, 0x60},
+ { 11, 0x74},
+ { 12, 0x1C},
+ { 13, 0x04},
+ { 14, 0x80},
+ { 15, 0x83},
+ { 16, 0x1C},
+ { 17, 0x04},
+ { 18, 0x80},
+ { 19, 0x78},
+ { 20, 0x1C},
+ { 21, 0x04},
+ { 22, 0xA0},
+ { 23, 0x64},
+ { 24, 0x1C},
+ { 25, 0x04},
+ { 26, 0xA0},
+ { 27, 0x97},
+ { 28, 0x1C},
+ { 29, 0x04},
+ { 30, 0xC0},
+ { 31, 0x7F},
+ { 32, 0x1C},
+ { 33, 0x04},
+ { 34, 0xC0},
+ { 35, 0x7C},
+ { 36, 0x1C},
+ { 37, 0x04},
+ { 38, 0xE0},
+ { 39, 0x61},
+ { 40, 0x1C},
+ { 41, 0x04},
+ { 42, 0xE0},
+ { 43, 0x9A},
+ { 44, 0x1C},
+ { 45, 0x05},
+ { 46, 0x00},
+ { 47, 0x68},
+ { 48, 0x1C},
+ { 49, 0x05},
+ { 50, 0x00},
+ { 51, 0x93},
+ { 52, 0x1C},
+ { 53, 0x05},
+ { 54, 0x20},
+ { 55, 0x6E},
+ { 56, 0x1C},
+ { 57, 0x05},
+ { 58, 0x20},
+ { 59, 0x8D},
+ { 60, 0x1C},
+ { 61, 0x05},
+ { 62, 0x40},
+ { 63, 0x6C},
+ { 64, 0x1C},
+ { 65, 0x05},
+ { 66, 0x40},
+ { 67, 0x8F},
+ { 68, 0x1C},
+ { 69, 0x05},
+ { 70, 0x60},
+ { 71, 0x8A},
+ { 72, 0x1C},
+ { 73, 0x05},
+ { 74, 0x60},
+ { 75, 0x71},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x5C},
+ { 81, 0x90},
+ { 82, 0x80},
+ { 83, 0x00},
+ { 84, 0x00},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x5C},
+ { 89, 0x90},
+ { 90, 0x60},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x10},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x9A},
+ {108, 0x18},
+ {109, 0x00},
+ {110, 0x60},
+ {111, 0x4A},
+ {112, 0x1C},
+ {113, 0x00},
+ {114, 0x80},
+ {115, 0x4C},
+ {116, 0x1C},
+ {117, 0x00},
+ {118, 0x40},
+ {119, 0x49},
+ {120, 0x30},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x4C},
+ {124, 0x1C},
+ {125, 0x00},
+ {126, 0x20},
+ {127, 0x4E},
+ { 0, 0x08},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x10},
+ { 13, 0x30},
+ { 14, 0x00},
+ { 15, 0x4B},
+ { 16, 0x34},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x4B},
+ { 20, 0x18},
+ { 21, 0x00},
+ { 22, 0x60},
+ { 23, 0x9B},
+ { 24, 0x0C},
+ { 25, 0x00},
+ { 26, 0x40},
+ { 27, 0x00},
+ { 28, 0x1C},
+ { 29, 0x00},
+ { 30, 0x80},
+ { 31, 0x9D},
+ { 32, 0x10},
+ { 33, 0x30},
+ { 34, 0x00},
+ { 35, 0x4D},
+ { 36, 0x1C},
+ { 37, 0x00},
+ { 38, 0x40},
+ { 39, 0x9A},
+ { 40, 0x30},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x9D},
+ { 44, 0x1C},
+ { 45, 0x00},
+ { 46, 0x20},
+ { 47, 0x9F},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x10},
+ { 53, 0x30},
+ { 54, 0x00},
+ { 55, 0x9C},
+ { 56, 0x34},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x9C},
+ { 60, 0x5C},
+ { 61, 0x60},
+ { 62, 0xC0},
+ { 63, 0x9C},
+ { 64, 0x0C},
+ { 65, 0x00},
+ { 66, 0x60},
+ { 67, 0x00},
+ { 68, 0x5C},
+ { 69, 0x60},
+ { 70, 0xA0},
+ { 71, 0x4B},
+ { 72, 0x10},
+ { 73, 0x30},
+ { 74, 0x00},
+ { 75, 0x9E},
+ { 76, 0x10},
+ { 77, 0x13},
+ { 78, 0xE0},
+ { 79, 0xA1},
+ { 80, 0x00},
+ { 81, 0x00},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x10},
+ { 85, 0x13},
+ { 86, 0xE0},
+ { 87, 0x50},
+ { 88, 0x18},
+ { 89, 0x01},
+ { 90, 0x00},
+ { 91, 0x50},
+ { 92, 0x18},
+ { 93, 0x01},
+ { 94, 0x00},
+ { 95, 0xA1},
+ { 96, 0x0C},
+ { 97, 0x02},
+ { 98, 0x00},
+ { 99, 0x03},
+ {100, 0x0C},
+ {101, 0x02},
+ {102, 0x20},
+ {103, 0x03},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x02},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+};
+#define Second_Rate_miniDSP_A_reg_values_COEFF_START 0
+#define Second_Rate_miniDSP_A_reg_values_COEFF_SIZE 579
+#define Second_Rate_miniDSP_A_reg_values_INST_START 579
+#define Second_Rate_miniDSP_A_reg_values_INST_SIZE 966
+
+reg_value Second_Rate_miniDSP_D_reg_values[] = {
+ { 0, 0x0},
+ { 0x7F, 0x50},
+ { 0, 0x01},
+ { 8, 0xFF},
+ { 9, 0xFF},
+ { 10, 0xFF},
+ { 11, 0x00},
+ { 12, 0x80},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x7F},
+ { 17, 0xF7},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x80},
+ { 21, 0x09},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x7F},
+ { 25, 0xEF},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0x40},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x7F},
+ { 33, 0xFF},
+ { 34, 0xFF},
+ { 35, 0x00},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x80},
+ { 41, 0x00},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0x80},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x0D},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x1C},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x3E},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x78},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x02},
+ { 65, 0x4C},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x00},
+ { 69, 0xD5},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x01},
+ { 73, 0x65},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x03},
+ { 77, 0xE9},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x07},
+ { 81, 0xCA},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0xFF},
+ { 85, 0xEF},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0xFE},
+ { 89, 0xEB},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0xFF},
+ { 93, 0xA8},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0xFD},
+ { 97, 0x08},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0xFF},
+ {101, 0x5E},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0xFF},
+ {105, 0xD5},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0xFE},
+ {109, 0x36},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0xFA},
+ {113, 0xAC},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0xF2},
+ {117, 0xA3},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x28},
+ {121, 0xAB},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x3F},
+ {125, 0xFF},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x02},
+ { 8, 0xFF},
+ { 9, 0x98},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xF6},
+ { 13, 0xF9},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x26},
+ { 17, 0xFB},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x02},
+ { 21, 0x72},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x40},
+ { 25, 0x02},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xFB},
+ { 29, 0xCB},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x20},
+ { 33, 0xA7},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0xFF},
+ { 37, 0x6A},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x3A},
+ { 41, 0x0F},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 0, 0x09},
+ { 72, 0xFF},
+ { 73, 0xFF},
+ { 74, 0xFF},
+ { 75, 0x00},
+ { 76, 0x80},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x7F},
+ { 81, 0xF7},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x80},
+ { 85, 0x09},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x7F},
+ { 89, 0xEF},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x40},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x7F},
+ { 97, 0xFF},
+ { 98, 0xFF},
+ { 99, 0x00},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x80},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x80},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x0D},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x1C},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x3E},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x00},
+ {125, 0x78},
+ {126, 0x00},
+ {127, 0x00},
+ { 0, 0x0A},
+ { 8, 0x02},
+ { 9, 0x4C},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0xD5},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x01},
+ { 17, 0x65},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x03},
+ { 21, 0xE9},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x07},
+ { 25, 0xCA},
+ { 26, 0x00},
+ { 27, 0x00},
+ { 28, 0xFF},
+ { 29, 0xEF},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0xFE},
+ { 33, 0xEB},
+ { 34, 0x00},
+ { 35, 0x00},
+ { 36, 0xFF},
+ { 37, 0xA8},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0xFD},
+ { 41, 0x08},
+ { 42, 0x00},
+ { 43, 0x00},
+ { 44, 0xFF},
+ { 45, 0x5E},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0xFF},
+ { 49, 0xD5},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0xFE},
+ { 53, 0x36},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0xFA},
+ { 57, 0xAC},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0xF2},
+ { 61, 0xA3},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x28},
+ { 65, 0xAB},
+ { 66, 0x00},
+ { 67, 0x00},
+ { 68, 0x3F},
+ { 69, 0xFF},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0xFF},
+ { 73, 0x98},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0xF6},
+ { 77, 0xF9},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x26},
+ { 81, 0xFB},
+ { 82, 0x00},
+ { 83, 0x00},
+ { 84, 0x02},
+ { 85, 0x72},
+ { 86, 0x00},
+ { 87, 0x00},
+ { 88, 0x40},
+ { 89, 0x02},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0xFB},
+ { 93, 0xCB},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x20},
+ { 97, 0xA7},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0xFF},
+ {101, 0x6A},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x3A},
+ {105, 0x0F},
+ {106, 0x00},
+ {107, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x3C},
+ { 0, 0x01},
+ { 8, 0xC0},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0xC0},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 0, 0x0},
+ { 0x7F, 0x78},
+ { 0, 0x01},
+ { 8, 0x04},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x02},
+ { 12, 0x04},
+ { 13, 0x00},
+ { 14, 0x20},
+ { 15, 0x03},
+ { 16, 0x58},
+ { 17, 0x60},
+ { 18, 0x08},
+ { 19, 0x01},
+ { 20, 0x60},
+ { 21, 0x60},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x58},
+ { 25, 0x60},
+ { 26, 0x00},
+ { 27, 0x07},
+ { 28, 0x00},
+ { 29, 0x00},
+ { 30, 0x00},
+ { 31, 0x00},
+ { 32, 0x44},
+ { 33, 0x00},
+ { 34, 0xC0},
+ { 35, 0x16},
+ { 36, 0x08},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x45},
+ { 40, 0x08},
+ { 41, 0x00},
+ { 42, 0x20},
+ { 43, 0x85},
+ { 44, 0x08},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x44},
+ { 48, 0x08},
+ { 49, 0x00},
+ { 50, 0x20},
+ { 51, 0x84},
+ { 52, 0x08},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x43},
+ { 56, 0x08},
+ { 57, 0x00},
+ { 58, 0x20},
+ { 59, 0x83},
+ { 60, 0x08},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x42},
+ { 64, 0x08},
+ { 65, 0x00},
+ { 66, 0x20},
+ { 67, 0x82},
+ { 68, 0x08},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x41},
+ { 72, 0x08},
+ { 73, 0x00},
+ { 74, 0x20},
+ { 75, 0x81},
+ { 76, 0x08},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x40},
+ { 80, 0x08},
+ { 81, 0x00},
+ { 82, 0x20},
+ { 83, 0x80},
+ { 84, 0x08},
+ { 85, 0x00},
+ { 86, 0x00},
+ { 87, 0x3F},
+ { 88, 0x08},
+ { 89, 0x00},
+ { 90, 0x20},
+ { 91, 0x7F},
+ { 92, 0x08},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x3E},
+ { 96, 0x08},
+ { 97, 0x00},
+ { 98, 0x20},
+ { 99, 0x7E},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x44},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x06},
+ {124, 0x21},
+ {125, 0x00},
+ {126, 0x20},
+ {127, 0x00},
+ { 0, 0x02},
+ { 8, 0x4B},
+ { 9, 0x00},
+ { 10, 0xC0},
+ { 11, 0x00},
+ { 12, 0x4B},
+ { 13, 0x00},
+ { 14, 0xE0},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x40},
+ { 19, 0x48},
+ { 20, 0x0C},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x0C},
+ { 25, 0x00},
+ { 26, 0x20},
+ { 27, 0x00},
+ { 28, 0x04},
+ { 29, 0x02},
+ { 30, 0xC0},
+ { 31, 0x00},
+ { 32, 0x04},
+ { 33, 0x02},
+ { 34, 0xE0},
+ { 35, 0x01},
+ { 36, 0x18},
+ { 37, 0x00},
+ { 38, 0xA0},
+ { 39, 0x02},
+ { 40, 0x1C},
+ { 41, 0x00},
+ { 42, 0xA0},
+ { 43, 0x00},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x10},
+ { 57, 0x00},
+ { 58, 0x20},
+ { 59, 0x04},
+ { 60, 0x18},
+ { 61, 0x00},
+ { 62, 0xA0},
+ { 63, 0x03},
+ { 64, 0x1C},
+ { 65, 0x00},
+ { 66, 0xA0},
+ { 67, 0x01},
+ { 68, 0x00},
+ { 69, 0x00},
+ { 70, 0x00},
+ { 71, 0x00},
+ { 72, 0x00},
+ { 73, 0x00},
+ { 74, 0x00},
+ { 75, 0x00},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x20},
+ { 83, 0x05},
+ { 84, 0x18},
+ { 85, 0x00},
+ { 86, 0xA0},
+ { 87, 0x04},
+ { 88, 0x18},
+ { 89, 0x00},
+ { 90, 0xA0},
+ { 91, 0x05},
+ { 92, 0x18},
+ { 93, 0x00},
+ { 94, 0x60},
+ { 95, 0x09},
+ { 96, 0x1C},
+ { 97, 0x00},
+ { 98, 0x80},
+ { 99, 0x0B},
+ {100, 0x10},
+ {101, 0x00},
+ {102, 0x20},
+ {103, 0x08},
+ {104, 0x10},
+ {105, 0x00},
+ {106, 0x20},
+ {107, 0x46},
+ {108, 0x1C},
+ {109, 0x00},
+ {110, 0x40},
+ {111, 0x08},
+ {112, 0x18},
+ {113, 0x00},
+ {114, 0x60},
+ {115, 0x47},
+ {116, 0x1C},
+ {117, 0x00},
+ {118, 0x80},
+ {119, 0x4B},
+ {120, 0x1C},
+ {121, 0x00},
+ {122, 0x40},
+ {123, 0x46},
+ {124, 0x10},
+ {125, 0x30},
+ {126, 0x00},
+ {127, 0x0A},
+ { 0, 0x03},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x10},
+ { 17, 0x30},
+ { 18, 0x00},
+ { 19, 0x4A},
+ { 20, 0x18},
+ { 21, 0x01},
+ { 22, 0x40},
+ { 23, 0x2F},
+ { 24, 0x1C},
+ { 25, 0x01},
+ { 26, 0x60},
+ { 27, 0x0C},
+ { 28, 0x1C},
+ { 29, 0x01},
+ { 30, 0x40},
+ { 31, 0x0A},
+ { 32, 0x1C},
+ { 33, 0x01},
+ { 34, 0x60},
+ { 35, 0x2D},
+ { 36, 0x1C},
+ { 37, 0x01},
+ { 38, 0x80},
+ { 39, 0x0E},
+ { 40, 0x1C},
+ { 41, 0x01},
+ { 42, 0x80},
+ { 43, 0x2B},
+ { 44, 0x1C},
+ { 45, 0x01},
+ { 46, 0xA0},
+ { 47, 0x10},
+ { 48, 0x1C},
+ { 49, 0x01},
+ { 50, 0xA0},
+ { 51, 0x29},
+ { 52, 0x1C},
+ { 53, 0x01},
+ { 54, 0xC0},
+ { 55, 0x16},
+ { 56, 0x1C},
+ { 57, 0x01},
+ { 58, 0xC0},
+ { 59, 0x23},
+ { 60, 0x1C},
+ { 61, 0x01},
+ { 62, 0xE0},
+ { 63, 0x12},
+ { 64, 0x1C},
+ { 65, 0x01},
+ { 66, 0xE0},
+ { 67, 0x27},
+ { 68, 0x1C},
+ { 69, 0x02},
+ { 70, 0x00},
+ { 71, 0x14},
+ { 72, 0x1C},
+ { 73, 0x02},
+ { 74, 0x00},
+ { 75, 0x25},
+ { 76, 0x1C},
+ { 77, 0x02},
+ { 78, 0x20},
+ { 79, 0x18},
+ { 80, 0x1C},
+ { 81, 0x02},
+ { 82, 0x20},
+ { 83, 0x21},
+ { 84, 0x1C},
+ { 85, 0x02},
+ { 86, 0x40},
+ { 87, 0x1A},
+ { 88, 0x1C},
+ { 89, 0x02},
+ { 90, 0x40},
+ { 91, 0x1F},
+ { 92, 0x1C},
+ { 93, 0x02},
+ { 94, 0x60},
+ { 95, 0x0B},
+ { 96, 0x1C},
+ { 97, 0x02},
+ { 98, 0x60},
+ { 99, 0x2E},
+ {100, 0x1C},
+ {101, 0x02},
+ {102, 0x80},
+ {103, 0x13},
+ {104, 0x1C},
+ {105, 0x02},
+ {106, 0x80},
+ {107, 0x26},
+ {108, 0x1C},
+ {109, 0x02},
+ {110, 0xA0},
+ {111, 0x0F},
+ {112, 0x1C},
+ {113, 0x02},
+ {114, 0xA0},
+ {115, 0x2A},
+ {116, 0x1C},
+ {117, 0x02},
+ {118, 0xC0},
+ {119, 0x17},
+ {120, 0x1C},
+ {121, 0x02},
+ {122, 0xC0},
+ {123, 0x22},
+ {124, 0x1C},
+ {125, 0x02},
+ {126, 0xE0},
+ {127, 0x11},
+ { 0, 0x04},
+ { 8, 0x1C},
+ { 9, 0x02},
+ { 10, 0xE0},
+ { 11, 0x28},
+ { 12, 0x1C},
+ { 13, 0x03},
+ { 14, 0x00},
+ { 15, 0x0D},
+ { 16, 0x1C},
+ { 17, 0x03},
+ { 18, 0x00},
+ { 19, 0x2C},
+ { 20, 0x1C},
+ { 21, 0x03},
+ { 22, 0x20},
+ { 23, 0x15},
+ { 24, 0x1C},
+ { 25, 0x03},
+ { 26, 0x20},
+ { 27, 0x24},
+ { 28, 0x1C},
+ { 29, 0x03},
+ { 30, 0x40},
+ { 31, 0x19},
+ { 32, 0x1C},
+ { 33, 0x03},
+ { 34, 0x40},
+ { 35, 0x20},
+ { 36, 0x1C},
+ { 37, 0x03},
+ { 38, 0x60},
+ { 39, 0x1B},
+ { 40, 0x1C},
+ { 41, 0x03},
+ { 42, 0x60},
+ { 43, 0x1E},
+ { 44, 0x1C},
+ { 45, 0x03},
+ { 46, 0x80},
+ { 47, 0x1C},
+ { 48, 0x1C},
+ { 49, 0x03},
+ { 50, 0x80},
+ { 51, 0x1D},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x00},
+ { 57, 0x00},
+ { 58, 0x00},
+ { 59, 0x00},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x10},
+ { 65, 0x00},
+ { 66, 0x00},
+ { 67, 0x34},
+ { 68, 0x18},
+ { 69, 0x01},
+ { 70, 0x40},
+ { 71, 0x6F},
+ { 72, 0x1C},
+ { 73, 0x01},
+ { 74, 0x60},
+ { 75, 0x4C},
+ { 76, 0x1C},
+ { 77, 0x01},
+ { 78, 0x40},
+ { 79, 0x4A},
+ { 80, 0x1C},
+ { 81, 0x01},
+ { 82, 0x60},
+ { 83, 0x6D},
+ { 84, 0x1C},
+ { 85, 0x01},
+ { 86, 0x80},
+ { 87, 0x4E},
+ { 88, 0x1C},
+ { 89, 0x01},
+ { 90, 0x80},
+ { 91, 0x6B},
+ { 92, 0x1C},
+ { 93, 0x01},
+ { 94, 0xA0},
+ { 95, 0x50},
+ { 96, 0x1C},
+ { 97, 0x01},
+ { 98, 0xA0},
+ { 99, 0x69},
+ {100, 0x1C},
+ {101, 0x01},
+ {102, 0xC0},
+ {103, 0x56},
+ {104, 0x1C},
+ {105, 0x01},
+ {106, 0xC0},
+ {107, 0x63},
+ {108, 0x1C},
+ {109, 0x01},
+ {110, 0xE0},
+ {111, 0x52},
+ {112, 0x1C},
+ {113, 0x01},
+ {114, 0xE0},
+ {115, 0x67},
+ {116, 0x1C},
+ {117, 0x02},
+ {118, 0x00},
+ {119, 0x54},
+ {120, 0x1C},
+ {121, 0x02},
+ {122, 0x00},
+ {123, 0x65},
+ {124, 0x1C},
+ {125, 0x02},
+ {126, 0x20},
+ {127, 0x58},
+ { 0, 0x05},
+ { 8, 0x1C},
+ { 9, 0x02},
+ { 10, 0x20},
+ { 11, 0x61},
+ { 12, 0x1C},
+ { 13, 0x02},
+ { 14, 0x40},
+ { 15, 0x5A},
+ { 16, 0x1C},
+ { 17, 0x02},
+ { 18, 0x40},
+ { 19, 0x5F},
+ { 20, 0x1C},
+ { 21, 0x02},
+ { 22, 0x60},
+ { 23, 0x4B},
+ { 24, 0x1C},
+ { 25, 0x02},
+ { 26, 0x60},
+ { 27, 0x6E},
+ { 28, 0x1C},
+ { 29, 0x02},
+ { 30, 0x80},
+ { 31, 0x53},
+ { 32, 0x1C},
+ { 33, 0x02},
+ { 34, 0x80},
+ { 35, 0x66},
+ { 36, 0x1C},
+ { 37, 0x02},
+ { 38, 0xA0},
+ { 39, 0x4F},
+ { 40, 0x1C},
+ { 41, 0x02},
+ { 42, 0xA0},
+ { 43, 0x6A},
+ { 44, 0x1C},
+ { 45, 0x02},
+ { 46, 0xC0},
+ { 47, 0x57},
+ { 48, 0x1C},
+ { 49, 0x02},
+ { 50, 0xC0},
+ { 51, 0x62},
+ { 52, 0x1C},
+ { 53, 0x02},
+ { 54, 0xE0},
+ { 55, 0x51},
+ { 56, 0x1C},
+ { 57, 0x02},
+ { 58, 0xE0},
+ { 59, 0x68},
+ { 60, 0x1C},
+ { 61, 0x03},
+ { 62, 0x00},
+ { 63, 0x4D},
+ { 64, 0x1C},
+ { 65, 0x03},
+ { 66, 0x00},
+ { 67, 0x6C},
+ { 68, 0x1C},
+ { 69, 0x03},
+ { 70, 0x20},
+ { 71, 0x55},
+ { 72, 0x1C},
+ { 73, 0x03},
+ { 74, 0x20},
+ { 75, 0x64},
+ { 76, 0x1C},
+ { 77, 0x03},
+ { 78, 0x40},
+ { 79, 0x59},
+ { 80, 0x1C},
+ { 81, 0x03},
+ { 82, 0x40},
+ { 83, 0x60},
+ { 84, 0x1C},
+ { 85, 0x03},
+ { 86, 0x60},
+ { 87, 0x5B},
+ { 88, 0x1C},
+ { 89, 0x03},
+ { 90, 0x60},
+ { 91, 0x5E},
+ { 92, 0x1C},
+ { 93, 0x03},
+ { 94, 0x80},
+ { 95, 0x5C},
+ { 96, 0x1C},
+ { 97, 0x03},
+ { 98, 0x80},
+ { 99, 0x5D},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x10},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x74},
+ {116, 0x18},
+ {117, 0x03},
+ {118, 0xC0},
+ {119, 0x73},
+ {120, 0x1C},
+ {121, 0x03},
+ {122, 0xE0},
+ {123, 0x75},
+ {124, 0x1C},
+ {125, 0x03},
+ {126, 0xE0},
+ {127, 0x72},
+ { 0, 0x06},
+ { 8, 0x1C},
+ { 9, 0x03},
+ { 10, 0xC0},
+ { 11, 0x74},
+ { 12, 0x1C},
+ { 13, 0x04},
+ { 14, 0x00},
+ { 15, 0x71},
+ { 16, 0x1C},
+ { 17, 0x04},
+ { 18, 0x00},
+ { 19, 0x76},
+ { 20, 0x1C},
+ { 21, 0x04},
+ { 22, 0x20},
+ { 23, 0x70},
+ { 24, 0x1C},
+ { 25, 0x04},
+ { 26, 0x20},
+ { 27, 0x77},
+ { 28, 0x18},
+ { 29, 0x04},
+ { 30, 0x80},
+ { 31, 0x78},
+ { 32, 0x1C},
+ { 33, 0x04},
+ { 34, 0x80},
+ { 35, 0x7A},
+ { 36, 0x1C},
+ { 37, 0x04},
+ { 38, 0x60},
+ { 39, 0x7D},
+ { 40, 0x4C},
+ { 41, 0x04},
+ { 42, 0x60},
+ { 43, 0x7B},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x5C},
+ { 49, 0x90},
+ { 50, 0x60},
+ { 51, 0x03},
+ { 52, 0x18},
+ { 53, 0x03},
+ { 54, 0xC0},
+ { 55, 0x33},
+ { 56, 0x1C},
+ { 57, 0x03},
+ { 58, 0xC0},
+ { 59, 0x34},
+ { 60, 0x1C},
+ { 61, 0x03},
+ { 62, 0xE0},
+ { 63, 0x35},
+ { 64, 0x10},
+ { 65, 0x00},
+ { 66, 0x40},
+ { 67, 0x84},
+ { 68, 0x1C},
+ { 69, 0x03},
+ { 70, 0xE0},
+ { 71, 0x32},
+ { 72, 0x1C},
+ { 73, 0x04},
+ { 74, 0x00},
+ { 75, 0x31},
+ { 76, 0x1C},
+ { 77, 0x04},
+ { 78, 0x00},
+ { 79, 0x36},
+ { 80, 0x1C},
+ { 81, 0x04},
+ { 82, 0x20},
+ { 83, 0x30},
+ { 84, 0x1C},
+ { 85, 0x04},
+ { 86, 0x20},
+ { 87, 0x37},
+ { 88, 0x18},
+ { 89, 0x04},
+ { 90, 0x80},
+ { 91, 0x38},
+ { 92, 0x1C},
+ { 93, 0x04},
+ { 94, 0x80},
+ { 95, 0x3A},
+ { 96, 0x1C},
+ { 97, 0x04},
+ { 98, 0x60},
+ { 99, 0x3D},
+ {100, 0x4C},
+ {101, 0x04},
+ {102, 0x60},
+ {103, 0x3B},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x5C},
+ {109, 0x90},
+ {110, 0x40},
+ {111, 0x03},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x00},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+ {124, 0x10},
+ {125, 0x00},
+ {126, 0x40},
+ {127, 0x44},
+ { 0, 0x07},
+ { 8, 0x18},
+ { 9, 0x04},
+ { 10, 0xA0},
+ { 11, 0x7B},
+ { 12, 0x1C},
+ { 13, 0x04},
+ { 14, 0xA0},
+ { 15, 0x7A},
+ { 16, 0x1C},
+ { 17, 0x04},
+ { 18, 0xC0},
+ { 19, 0x78},
+ { 20, 0x00},
+ { 21, 0x00},
+ { 22, 0x00},
+ { 23, 0x00},
+ { 24, 0x5C},
+ { 25, 0x90},
+ { 26, 0x60},
+ { 27, 0x03},
+ { 28, 0x18},
+ { 29, 0x04},
+ { 30, 0xA0},
+ { 31, 0x3B},
+ { 32, 0x1C},
+ { 33, 0x04},
+ { 34, 0xA0},
+ { 35, 0x3A},
+ { 36, 0x1C},
+ { 37, 0x04},
+ { 38, 0xC0},
+ { 39, 0x38},
+ { 40, 0x10},
+ { 41, 0x00},
+ { 42, 0x40},
+ { 43, 0x83},
+ { 44, 0x18},
+ { 45, 0x04},
+ { 46, 0x40},
+ { 47, 0x71},
+ { 48, 0x5C},
+ { 49, 0x90},
+ { 50, 0x40},
+ { 51, 0x03},
+ { 52, 0x18},
+ { 53, 0x04},
+ { 54, 0x60},
+ { 55, 0x7A},
+ { 56, 0x1C},
+ { 57, 0x04},
+ { 58, 0x80},
+ { 59, 0x7B},
+ { 60, 0x4C},
+ { 61, 0x04},
+ { 62, 0x60},
+ { 63, 0x7C},
+ { 64, 0x10},
+ { 65, 0x00},
+ { 66, 0x40},
+ { 67, 0x43},
+ { 68, 0x1C},
+ { 69, 0x04},
+ { 70, 0x80},
+ { 71, 0x78},
+ { 72, 0x18},
+ { 73, 0x04},
+ { 74, 0x40},
+ { 75, 0x31},
+ { 76, 0x5C},
+ { 77, 0x90},
+ { 78, 0x60},
+ { 79, 0x03},
+ { 80, 0x18},
+ { 81, 0x04},
+ { 82, 0x60},
+ { 83, 0x3A},
+ { 84, 0x1C},
+ { 85, 0x04},
+ { 86, 0x80},
+ { 87, 0x3B},
+ { 88, 0x4C},
+ { 89, 0x04},
+ { 90, 0x60},
+ { 91, 0x3C},
+ { 92, 0x10},
+ { 93, 0x00},
+ { 94, 0x40},
+ { 95, 0x82},
+ { 96, 0x1C},
+ { 97, 0x04},
+ { 98, 0x80},
+ { 99, 0x38},
+ {100, 0x00},
+ {101, 0x00},
+ {102, 0x00},
+ {103, 0x00},
+ {104, 0x5C},
+ {105, 0x90},
+ {106, 0x40},
+ {107, 0x03},
+ {108, 0x18},
+ {109, 0x04},
+ {110, 0xA0},
+ {111, 0x7C},
+ {112, 0x1C},
+ {113, 0x04},
+ {114, 0xA0},
+ {115, 0x78},
+ {116, 0x1C},
+ {117, 0x04},
+ {118, 0xC0},
+ {119, 0x7B},
+ {120, 0x10},
+ {121, 0x00},
+ {122, 0x40},
+ {123, 0x42},
+ {124, 0x5C},
+ {125, 0x90},
+ {126, 0x60},
+ {127, 0x03},
+ { 0, 0x08},
+ { 8, 0x00},
+ { 9, 0x00},
+ { 10, 0x00},
+ { 11, 0x00},
+ { 12, 0x00},
+ { 13, 0x00},
+ { 14, 0x00},
+ { 15, 0x00},
+ { 16, 0x00},
+ { 17, 0x00},
+ { 18, 0x00},
+ { 19, 0x00},
+ { 20, 0x10},
+ { 21, 0x00},
+ { 22, 0x40},
+ { 23, 0x81},
+ { 24, 0x18},
+ { 25, 0x04},
+ { 26, 0xA0},
+ { 27, 0x3C},
+ { 28, 0x1C},
+ { 29, 0x04},
+ { 30, 0xA0},
+ { 31, 0x38},
+ { 32, 0x1C},
+ { 33, 0x04},
+ { 34, 0xC0},
+ { 35, 0x3B},
+ { 36, 0x18},
+ { 37, 0x03},
+ { 38, 0xA0},
+ { 39, 0x5C},
+ { 40, 0x5C},
+ { 41, 0x90},
+ { 42, 0x40},
+ { 43, 0x03},
+ { 44, 0x18},
+ { 45, 0x03},
+ { 46, 0xC0},
+ { 47, 0x77},
+ { 48, 0x1C},
+ { 49, 0x03},
+ { 50, 0xE0},
+ { 51, 0x70},
+ { 52, 0x4C},
+ { 53, 0x03},
+ { 54, 0xC0},
+ { 55, 0x6F},
+ { 56, 0x10},
+ { 57, 0x00},
+ { 58, 0x40},
+ { 59, 0x41},
+ { 60, 0x1C},
+ { 61, 0x03},
+ { 62, 0xE0},
+ { 63, 0x76},
+ { 64, 0x1C},
+ { 65, 0x04},
+ { 66, 0x00},
+ { 67, 0x75},
+ { 68, 0x1C},
+ { 69, 0x04},
+ { 70, 0x00},
+ { 71, 0x71},
+ { 72, 0x1C},
+ { 73, 0x04},
+ { 74, 0x20},
+ { 75, 0x74},
+ { 76, 0x1C},
+ { 77, 0x04},
+ { 78, 0x20},
+ { 79, 0x72},
+ { 80, 0x18},
+ { 81, 0x04},
+ { 82, 0x80},
+ { 83, 0x7C},
+ { 84, 0x1C},
+ { 85, 0x04},
+ { 86, 0x80},
+ { 87, 0x7B},
+ { 88, 0x1C},
+ { 89, 0x04},
+ { 90, 0x60},
+ { 91, 0x78},
+ { 92, 0x4C},
+ { 93, 0x04},
+ { 94, 0x60},
+ { 95, 0x79},
+ { 96, 0x18},
+ { 97, 0x03},
+ { 98, 0xA0},
+ { 99, 0x1C},
+ {100, 0x5C},
+ {101, 0x90},
+ {102, 0x60},
+ {103, 0x03},
+ {104, 0x18},
+ {105, 0x03},
+ {106, 0xC0},
+ {107, 0x37},
+ {108, 0x1C},
+ {109, 0x03},
+ {110, 0xE0},
+ {111, 0x30},
+ {112, 0x4C},
+ {113, 0x03},
+ {114, 0xC0},
+ {115, 0x2F},
+ {116, 0x10},
+ {117, 0x00},
+ {118, 0x40},
+ {119, 0x80},
+ {120, 0x1C},
+ {121, 0x03},
+ {122, 0xE0},
+ {123, 0x36},
+ {124, 0x1C},
+ {125, 0x04},
+ {126, 0x00},
+ {127, 0x35},
+ { 0, 0x09},
+ { 8, 0x1C},
+ { 9, 0x04},
+ { 10, 0x00},
+ { 11, 0x31},
+ { 12, 0x1C},
+ { 13, 0x04},
+ { 14, 0x20},
+ { 15, 0x34},
+ { 16, 0x1C},
+ { 17, 0x04},
+ { 18, 0x20},
+ { 19, 0x32},
+ { 20, 0x18},
+ { 21, 0x04},
+ { 22, 0x80},
+ { 23, 0x3C},
+ { 24, 0x1C},
+ { 25, 0x04},
+ { 26, 0x80},
+ { 27, 0x3B},
+ { 28, 0x1C},
+ { 29, 0x04},
+ { 30, 0x60},
+ { 31, 0x38},
+ { 32, 0x4C},
+ { 33, 0x04},
+ { 34, 0x60},
+ { 35, 0x39},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x5C},
+ { 41, 0x90},
+ { 42, 0x40},
+ { 43, 0x03},
+ { 44, 0x00},
+ { 45, 0x00},
+ { 46, 0x00},
+ { 47, 0x00},
+ { 48, 0x00},
+ { 49, 0x00},
+ { 50, 0x00},
+ { 51, 0x00},
+ { 52, 0x00},
+ { 53, 0x00},
+ { 54, 0x00},
+ { 55, 0x00},
+ { 56, 0x10},
+ { 57, 0x00},
+ { 58, 0x40},
+ { 59, 0x3F},
+ { 60, 0x18},
+ { 61, 0x04},
+ { 62, 0xA0},
+ { 63, 0x79},
+ { 64, 0x1C},
+ { 65, 0x04},
+ { 66, 0xA0},
+ { 67, 0x7B},
+ { 68, 0x1C},
+ { 69, 0x04},
+ { 70, 0xC0},
+ { 71, 0x7C},
+ { 72, 0x10},
+ { 73, 0x00},
+ { 74, 0x40},
+ { 75, 0x40},
+ { 76, 0x00},
+ { 77, 0x00},
+ { 78, 0x00},
+ { 79, 0x00},
+ { 80, 0x5C},
+ { 81, 0x90},
+ { 82, 0x60},
+ { 83, 0x03},
+ { 84, 0x18},
+ { 85, 0x04},
+ { 86, 0xA0},
+ { 87, 0x39},
+ { 88, 0x1C},
+ { 89, 0x04},
+ { 90, 0xA0},
+ { 91, 0x3B},
+ { 92, 0x1C},
+ { 93, 0x04},
+ { 94, 0xC0},
+ { 95, 0x3C},
+ { 96, 0x10},
+ { 97, 0x00},
+ { 98, 0x40},
+ { 99, 0x7F},
+ {100, 0x18},
+ {101, 0x04},
+ {102, 0x40},
+ {103, 0x75},
+ {104, 0x5C},
+ {105, 0x90},
+ {106, 0x40},
+ {107, 0x03},
+ {108, 0x18},
+ {109, 0x04},
+ {110, 0x60},
+ {111, 0x7B},
+ {112, 0x1C},
+ {113, 0x04},
+ {114, 0x80},
+ {115, 0x79},
+ {116, 0x4C},
+ {117, 0x04},
+ {118, 0x60},
+ {119, 0x77},
+ {120, 0x10},
+ {121, 0x00},
+ {122, 0x40},
+ {123, 0x3F},
+ {124, 0x1C},
+ {125, 0x04},
+ {126, 0x80},
+ {127, 0x7C},
+ { 0, 0x0A},
+ { 8, 0x18},
+ { 9, 0x04},
+ { 10, 0x40},
+ { 11, 0x35},
+ { 12, 0x5C},
+ { 13, 0x90},
+ { 14, 0x60},
+ { 15, 0x03},
+ { 16, 0x18},
+ { 17, 0x04},
+ { 18, 0x60},
+ { 19, 0x3B},
+ { 20, 0x1C},
+ { 21, 0x04},
+ { 22, 0x80},
+ { 23, 0x39},
+ { 24, 0x4C},
+ { 25, 0x04},
+ { 26, 0x60},
+ { 27, 0x37},
+ { 28, 0x10},
+ { 29, 0x00},
+ { 30, 0x40},
+ { 31, 0x7E},
+ { 32, 0x1C},
+ { 33, 0x04},
+ { 34, 0x80},
+ { 35, 0x3C},
+ { 36, 0x00},
+ { 37, 0x00},
+ { 38, 0x00},
+ { 39, 0x00},
+ { 40, 0x5C},
+ { 41, 0x90},
+ { 42, 0x40},
+ { 43, 0x03},
+ { 44, 0x18},
+ { 45, 0x04},
+ { 46, 0xA0},
+ { 47, 0x77},
+ { 48, 0x1C},
+ { 49, 0x04},
+ { 50, 0xA0},
+ { 51, 0x7C},
+ { 52, 0x1C},
+ { 53, 0x04},
+ { 54, 0xC0},
+ { 55, 0x79},
+ { 56, 0x10},
+ { 57, 0x00},
+ { 58, 0x40},
+ { 59, 0x3E},
+ { 60, 0x00},
+ { 61, 0x00},
+ { 62, 0x00},
+ { 63, 0x00},
+ { 64, 0x5C},
+ { 65, 0x90},
+ { 66, 0x60},
+ { 67, 0x03},
+ { 68, 0x18},
+ { 69, 0x04},
+ { 70, 0xA0},
+ { 71, 0x37},
+ { 72, 0x1C},
+ { 73, 0x04},
+ { 74, 0xA0},
+ { 75, 0x3C},
+ { 76, 0x1C},
+ { 77, 0x04},
+ { 78, 0xC0},
+ { 79, 0x39},
+ { 80, 0x10},
+ { 81, 0x00},
+ { 82, 0x40},
+ { 83, 0x7D},
+ { 84, 0x5C},
+ { 85, 0x90},
+ { 86, 0x40},
+ { 87, 0x03},
+ { 88, 0x00},
+ { 89, 0x00},
+ { 90, 0x00},
+ { 91, 0x00},
+ { 92, 0x00},
+ { 93, 0x00},
+ { 94, 0x00},
+ { 95, 0x00},
+ { 96, 0x00},
+ { 97, 0x00},
+ { 98, 0x00},
+ { 99, 0x00},
+ {100, 0x10},
+ {101, 0x00},
+ {102, 0x40},
+ {103, 0x3D},
+ {104, 0x00},
+ {105, 0x00},
+ {106, 0x00},
+ {107, 0x00},
+ {108, 0x00},
+ {109, 0x00},
+ {110, 0x00},
+ {111, 0x00},
+ {112, 0x00},
+ {113, 0x00},
+ {114, 0x00},
+ {115, 0x00},
+ {116, 0x02},
+ {117, 0x00},
+ {118, 0x00},
+ {119, 0x00},
+ {120, 0x00},
+ {121, 0x00},
+ {122, 0x00},
+ {123, 0x00},
+};
+#define Second_Rate_miniDSP_D_reg_values_COEFF_START 0
+#define Second_Rate_miniDSP_D_reg_values_COEFF_SIZE 329
+#define Second_Rate_miniDSP_D_reg_values_INST_START 329
+#define Second_Rate_miniDSP_D_reg_values_INST_SIZE 1208
diff --git a/sound/soc/codecs/spdif_transciever.c b/sound/soc/codecs/spdif_transciever.c
index 6a1a7e705cd7..7c1bf5c04eb9 100644
--- a/sound/soc/codecs/spdif_transciever.c
+++ b/sound/soc/codecs/spdif_transciever.c
@@ -38,6 +38,13 @@ static struct snd_soc_dai_driver dit_stub_dai = {
.rates = STUB_RATES,
.formats = STUB_FORMATS,
},
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 384,
+ .rates = STUB_RATES,
+ .formats = STUB_FORMATS,
+ },
};
static int spdif_dit_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/tlv320aic326x.c b/sound/soc/codecs/tlv320aic326x.c
new file mode 100644
index 000000000000..9f1874328b28
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic326x.c
@@ -0,0 +1,3689 @@
+/*
+* linux/sound/soc/codecs/tlv320aic3262.c
+*
+* Copyright (C) 2011 Mistral Solutions Pvt Ltd.
+*
+* Based on sound/soc/codecs/tlv320aic3262.c
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+*
+* The TLV320AIC3262 is a flexible, low-power, low-voltage stereo audio
+* codec with digital microphone inputs and programmable outputs.
+*
+* History:
+*
+* Rev 0.1 ASoC driver support Mistral 20-01-2011
+*
+* The AIC325x ASoC driver is ported for the codec AIC3262.
+* Rev 0.2 ASoC driver support Mistral 21-03-2011
+* The AIC326x ASoC driver is updated abe changes.
+*
+* Rev 0.3 ASoC driver support Mistral 12.09.2011
+* fixed the compilation issues for Whistler support
+*
+* Rev 0.4 ASoC driver support Mistral 27.09.2011
+* The AIC326x driver ported for Nvidia cardhu.
+*
+* Rev 0.5 Modified to support Multiple ASI Ports 08-Nov-2011
+* Driver updated to support ASI Ports of AIC3262
+*
+* Modified by Nvidia 23-Nov-2011 for K39 ASoC changes.
+*/
+
+/*
+ *****************************************************************************
+ * INCLUDES
+ *****************************************************************************
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <asm/div64.h>
+#include <sound/tlv320aic326x.h>
+#include <sound/jack.h>
+
+#include "tlv320aic326x.h"
+
+/*
+ *****************************************************************************
+ * Global Variable
+ *****************************************************************************
+ */
+static u8 aic3262_reg_ctl;
+
+u8 dac_reg = 0, adc_gain = 0, hpl = 0, hpr = 0;
+u8 rec_amp = 0, rampr = 0, spk_amp = 0;
+/* whenever aplay/arecord is run, aic3262_hw_params() function gets called.
+ * This function reprograms the clock dividers etc. this flag can be used to
+ * disable this when the clock dividers are programmed by pps config file
+ */
+static int soc_static_freq_config = 1;
+
+/*
+ *****************************************************************************
+ * Macros
+ *****************************************************************************
+ */
+
+/* ASoC Widget Control definition for a single Register based Control */
+#define SOC_SINGLE_AIC3262(xname) \
+{\
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = __new_control_info, .get = __new_control_get,\
+ .put = __new_control_put, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+}
+#define SOC_SINGLE_N(xname, xreg, xshift, xmax, xinvert) \
+{\
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = n_control_info, .get = n_control_get,\
+ .put = n_control_put, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .private_value = ((unsigned long)&(struct soc_mixer_control)) \
+ {.reg = xreg, .shift = xshift, .rshift = xshift, .max = xmax, \
+ .invert = xinvert} }
+
+/* ASoC Widget Control definition for a Double Register based Control */
+
+#define SOC_DOUBLE_R_N(xname, reg_left, reg_right, xshift, xmax, xinvert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .info = snd_soc_info_volsw_2r_n, \
+ .get = snd_soc_get_volsw_2r_n, .put = snd_soc_put_volsw_2r_n, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = reg_left, .rreg = reg_right, .shift = xshift, \
+ .max = xmax, .invert = xinvert} }
+
+#define SND_SOC_DAPM_SWITCH_N(wname, wreg, wshift, winvert) \
+{ .id = snd_soc_dapm_switch, .name = wname, .reg = wreg, .shift = wshift,\
+ .invert = winvert, .kcontrols = NULL, .num_kcontrols = 0}
+/*
+ *****************************************************************************
+ * Function Prototype
+ *****************************************************************************
+ */
+static int aic3262_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level);
+
+static int __new_control_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+
+static int __new_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+static int __new_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+static inline int aic3262_get_divs(int mclk, int rate);
+
+static int aic3262_multi_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+
+static int aic3262_multi_i2s_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir);
+static int aic3262_multi_i2s_set_dai_pll(struct snd_soc_dai *codec_dai,
+ int pll_id, int source, unsigned int freq_in,
+ unsigned int freq_out);
+
+static int aic3262_multi_i2s_asi1_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt);
+
+static int aic3262_multi_i2s_asi2_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt);
+
+static int aic3262_multi_i2s_asi3_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt);
+
+static int aic3262_multi_i2s_asi1_mute(struct snd_soc_dai *dai, int mute);
+
+static int aic3262_multi_i2s_asi2_mute(struct snd_soc_dai *dai, int mute);
+
+static int aic3262_multi_i2s_asi3_mute(struct snd_soc_dai *dai, int mute);
+
+static const char *wclk1_pincontrol[] = {
+ "ASI1 Word Clock Input/Output", "CLKOUT output"};
+static const char *dout1_pincontrol[] = {
+ "disabled", "ASI1 data output", "gpio", "clock out",
+ "INT1", "INT2", "SAR ADC interrupt"};
+
+static const char *din1_pincontrol[] = {"disabled", "enabled"};
+
+static const char *wclk2_pincontrol[] = {
+ "diabled", "ASI1 secondary wclk", "general purpose input",
+ "general purpose output", "clkout", "INT1 interrupt",
+ "IN2 interrupt", "output digital microphone",
+ "SAR ADC interrupt", "data output for ASI1"};
+
+static const char *bclk2_pincontrol[] = {
+ "diabled", "ASI1 secondary wclk", "general purpose input",
+ "general purpose output", "clkout", "INT1 interrupt",
+ "IN2 interrupt", "output digital microphone",
+ "SAR ADC interrupt", "data output for ASI1"};
+
+static const char *dout2_pincontrol[] = {
+ "disabled", "ASI2 Data Output", "General Purpose Output",
+ "INT1 Interrupt", "INT2 Interrupt", "SAR ADC interrupt",
+ "Output for digital microphone", "Data Output for ASI1"};
+
+static const char *din2_pincontrol[] = {"disabled", "enabled"};
+
+static const char *wclk3_pincontrol[] = {
+ "Disabled", "ASI3 WCLK", "General Purpose Input",
+ "General Purpose output", "Data Output for ASI1"};
+
+static const char *bclk3_pincontrol[] = {
+ "Disabled", "ASI3 BCLK", "General Purpose Input",
+ "General Purpose output", "Data Output for ASI1"};
+
+static const char *dout3_pincontrol[] = {
+ "disabled", "ASI3 data ooutput", "General Purpose Output",
+ "ASI1 Word Clock Output", "Data Output for ASI1"};
+
+static const char *din3_pincontrol[] = {"disabled", "enabled"};
+
+static const char *clkin[] = {
+ "mclk1", "bclk1", "gpio1", "pll_clk", "bclk2", "gpi1",
+ "hf_ref_clk", "hf_osc_clk", "mclk2", "gpio2", "gpi2"};
+
+/* List of SOC_ENUM structures for the AIC3262 PIN Control Amixer Controls */
+static const struct soc_enum aic326x_enum[] = {
+ SOC_ENUM_SINGLE(WCLK1_PIN_CNTL_REG, 2, 2, wclk1_pincontrol),
+ SOC_ENUM_SINGLE(DOUT1_PIN_CNTL_REG, 1, 7, dout1_pincontrol),
+ SOC_ENUM_SINGLE(DIN1_PIN_CNTL_REG, 5, 2, din1_pincontrol),
+ SOC_ENUM_SINGLE(WCLK2_PIN_CNTL_REG, 2, 10, wclk2_pincontrol),
+ SOC_ENUM_SINGLE(BCLK2_PIN_CNTL_REG, 2, 10, bclk2_pincontrol),
+ SOC_ENUM_SINGLE(DOUT2_PIN_CNTL_REG, 1, 8, dout2_pincontrol),
+ SOC_ENUM_SINGLE(DIN2_PIN_CNTL_REG, 5, 2, din2_pincontrol),
+ SOC_ENUM_SINGLE(WCLK3_PIN_CNTL_REG, 2, 5, wclk3_pincontrol),
+ SOC_ENUM_SINGLE(BCLK3_PIN_CNTL_REG, 2, 5, bclk3_pincontrol),
+ SOC_ENUM_SINGLE(DOUT3_PIN_CNTL_REG, 1, 5, dout3_pincontrol),
+ SOC_ENUM_SINGLE(DIN3_PIN_CNTL_REG, 5, 2, din3_pincontrol),
+ SOC_ENUM_DOUBLE(DAC_ADC_CLKIN_REG, 0, 4, 11, clkin),
+};
+
+#ifdef DAC_INDEPENDENT_VOL
+/*
+ *----------------------------------------------------------------------------
+ * Function : n_control_info
+ * Purpose : This function is to initialize data for new control required to
+ * program the AIC3262 registers.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int n_control_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int max = mc->max;
+ unsigned int shift = mc->shift;
+ unsigned int rshift = mc->rshift;
+
+ if (max == 1)
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+ uinfo->count = shift == rshift ? 1 : 2;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = max;
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : n_control_get
+ * Purpose : This function is to read data of new control for
+ * program the AIC3262 registers.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int n_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u32 val;
+ unsigned short mask, shift;
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ if (!strcmp(kcontrol->id.name, "Left DAC Volume")) {
+ mask = AIC3262_8BITS_MASK;
+ shift = 0;
+ val = aic3262_read(codec, mc->reg);
+ ucontrol->value.integer.value[0] =
+ (val <= 48) ? (val + 127) : (val - 129);
+ }
+ if (!strcmp(kcontrol->id.name, "Right DAC Volume")) {
+ mask = AIC3262_8BITS_MASK;
+ shift = 0;
+ val = aic3262_read(codec, mc->reg);
+ ucontrol->value.integer.value[0] =
+ (val <= 48) ? (val + 127) : (val - 129);
+ }
+
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_put
+ * Purpose : new_control_put is called to pass data from user/application to
+ * the driver.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int n_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ u8 val, val_mask;
+ int reg, err;
+ unsigned int invert = mc->invert;
+ int max = mc->max;
+ DBG("n_control_put\n");
+ reg = mc->reg;
+ val = ucontrol->value.integer.value[0];
+ if (invert)
+ val = max - val;
+ if (!strcmp(kcontrol->id.name, "Left DAC Volume")) {
+ DBG("LDAC\n");
+ val = (val >= 127) ? (val - 127) : (val + 129);
+ val_mask = AIC3262_8BITS_MASK;
+ }
+ if (!strcmp(kcontrol->id.name, "Right DAC Volume")) {
+ DBG("RDAC\n");
+ val = (val >= 127) ? (val - 127) : (val + 129);
+ val_mask = AIC3262_8BITS_MASK;
+ }
+
+ err = snd_soc_update_bits_locked(codec, reg, val_mask, val);
+ if (err < 0) {
+ printk(KERN_ERR "Error while updating bits\n");
+ return err;
+ }
+
+ return 0;
+}
+#endif /*#ifdef DAC_INDEPENDENT_VOL*/
+/*
+ *------------------------------------------------------------------------------
+ * snd_soc_info_volsw_2r_n - double mixer info callback
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information about a double mixer control that
+ * spans 2 codec registers.
+ *
+ * Returns 0 for success.
+ *------------------------------------------------------------------------------
+ */
+int snd_soc_info_volsw_2r_n(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int max = mc->max;
+
+ if (max == 1)
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+ uinfo->count = 2;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = max;
+ return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * snd_soc_get_volsw_2r_n - double mixer get callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to get the value of a double mixer control that spans 2 registers.
+ *
+ * Returns 0 for success.
+ *------------------------------------------------------------------------------
+ */
+int snd_soc_get_volsw_2r_n(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ int max = mc->max;
+ unsigned int mask;
+ unsigned int invert = mc->invert;
+ unsigned short val, val2;
+
+ if (!strcmp(kcontrol->id.name, "PCM Playback Volume")) {
+ mask = AIC3262_8BITS_MASK;
+ shift = 0;
+ } else if (!strcmp(kcontrol->id.name, "HP Driver Gain")) {
+ mask = 0x3F;
+ shift = 0;
+ } else if (!strcmp(kcontrol->id.name, "PGA Capture Volume")) {
+ mask = 0x7F;
+ shift = 0;
+ } else if (!strcmp(kcontrol->id.name, "REC Driver Volume")) {
+ mask = 0x3F;
+ shift = 0;
+ } else if (!strcmp(kcontrol->id.name, "LO to HP Volume")) {
+ mask = 0x7F;
+ shift = 0;
+ } else if (!strcmp(kcontrol->id.name, "MA Volume")) {
+ mask = 0x7F;
+ shift = 0;
+ } else {
+ printk(KERN_ERR "Invalid kcontrol name\n");
+ return -1;
+ }
+
+ /* Read, update the corresponding Registers */
+ val = (aic3262_read(codec, reg) >> shift) & mask;
+ val2 = (aic3262_read(codec, reg2) >> shift) & mask;
+
+ if (!strcmp(kcontrol->id.name, "PCM Playback Volume")) {
+ ucontrol->value.integer.value[0] =
+ (val <= 48) ? (val + 127) : (val - 129);
+ ucontrol->value.integer.value[1] =
+ (val2 <= 48) ? (val2 + 127) : (val2 - 129);
+ } else if (!strcmp(kcontrol->id.name, "HP Driver Gain")) {
+ ucontrol->value.integer.value[0] =
+ (val >= 57) ? (val - 57) : (val + 7);
+ ucontrol->value.integer.value[1] =
+ (val2 >= 57) ? (val2 - 57) : (val2 + 7);
+ } else if (!strcmp(kcontrol->id.name, "PGA Capture Volume")) {
+ ucontrol->value.integer.value[0] =
+ (val <= 40) ? (val + 24) : (val - 104);
+ ucontrol->value.integer.value[1] =
+ (val2 <= 40) ? (val2 + 24) : (val2 - 104);
+ } else if (!strcmp(kcontrol->id.name, "REC Driver Volume")) {
+ ucontrol->value.integer.value[0] = ((val >= 0) & (val <= 29)) ?
+ (val + 7) : (val - 57);
+ ucontrol->value.integer.value[1] = ((val2 >= 0) &
+ (val2 <= 29)) ? (val2 + 7) : (val2 - 57);
+
+ } else if (!strcmp(kcontrol->id.name, "LO to HP Volume")) {
+ ucontrol->value.integer.value[0] = ((val >= 0) & (val <= 116)) ?
+ (val + 1) : ((val == 127) ? (0) : (117));
+ ucontrol->value.integer.value[1] = ((val2 >= 0) & (val2 <= 116))
+ ? (val2 + 1) : ((val2 == 127) ? (0) : (117));
+
+ } else if (!strcmp(kcontrol->id.name, "MA Volume")) {
+ ucontrol->value.integer.value[0] = (val <= 40) ?
+ (41 - val) : (val = 0);
+ ucontrol->value.integer.value[1] = (val2 <= 40) ?
+ (41 - val2) : (val2 = 0);
+ }
+
+ if (invert) {
+ ucontrol->value.integer.value[0] =
+ max - ucontrol->value.integer.value[0];
+ ucontrol->value.integer.value[1] =
+ max - ucontrol->value.integer.value[1];
+ }
+
+ return 0;
+}
+/*
+*-------------------------------------------------------------------------------
+* snd_soc_put_volsw_2r_n - double mixer set callback
+* @kcontrol: mixer control
+* @ucontrol: control element information
+*
+* Callback to set the value of a double mixer control that spans 2 registers.
+*
+* Returns 0 for success.
+*-------------------------------------------------------------------------------
+*/
+int snd_soc_put_volsw_2r_n(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ int max = mc->max;
+ unsigned int mask;
+ unsigned int invert = mc->invert;
+ int err;
+ unsigned short val, val2, val_mask;
+
+ mask = 0x00FF;
+
+ val = (ucontrol->value.integer.value[0] & mask);
+ val2 = (ucontrol->value.integer.value[1] & mask);
+ if (invert) {
+ val = max - val;
+ val2 = max - val2;
+ }
+
+ /* Check for the string name of the kcontrol */
+ if (!strcmp(kcontrol->id.name, "PCM Playback Volume")) {
+ val = (val >= 127) ? (val - 127) : (val + 129);
+ val2 = (val2 >= 127) ? (val2 - 127) : (val2 + 129);
+ val_mask = AIC3262_8BITS_MASK; /* 8 bits */
+ } else if ((!strcmp(kcontrol->id.name, "HP Driver Gain")) ||
+ (!strcmp(kcontrol->id.name, "LO Driver Gain"))) {
+ val = (val <= 6) ? (val + 57) : (val - 7);
+ val2 = (val2 <= 6) ? (val2 + 57) : (val2 - 7);
+ val_mask = 0x3F; /* 6 bits */
+ DBG("val=%d, val2=%d", val, val2);
+ } else if (!strcmp(kcontrol->id.name, "PGA Capture Volume")) {
+ val = (val >= 24) ? ((val <= 64) ?
+ (val-24) : (40)) : (val + 104);
+ val2 = (val2 >= 24) ?
+ ((val2 <= 64) ? (val2 - 24) : (40)) : (val2 + 104);
+ val_mask = 0x7F; /* 7 bits */
+ } else if (!strcmp(kcontrol->id.name, "LO to REC Volume")) {
+
+ val = (val <= 116) ?
+ (val % 116) : ((val == 117) ? (127) : (117));
+ val2 = (val2 <= 116) ?
+ (val2 % 116) : ((val2 == 117) ? (127) : (117));
+ val_mask = 0x7F;
+ } else if (!strcmp(kcontrol->id.name, "REC Driver Volume")) {
+
+ val = (val <= 7) ? (val + 57) : ((val < 36) ? (val - 7) : (29));
+ val2 = (val2 <= 7) ?
+ (val2 + 57) : ((val2 < 36) ? (val2 - 7) : (29));
+ val_mask = 0x3F;
+ } else if (!strcmp(kcontrol->id.name, "LO to HP Volume")) {
+
+ val = ((val > 0) & (val <= 117)) ?
+ (val - 1) : ((val == 0) ? (127) : (116));
+ val2 = ((val2 > 0) & (val2 <= 117)) ?
+ (val2 - 1) : ((val2 == 0) ? (127) : (116));
+ val_mask = 0x7F;
+ } else if (!strcmp(kcontrol->id.name, "MA Volume")) {
+
+ val = ((val <= 41) & (val > 0)) ?
+ (41 - val) : ((val > 41) ? (val = 41) : (63));
+ val2 = ((val2 <= 41) & (val2 > 0)) ?
+ (41 - val2) : ((val2 > 41) ? (val2 = 41) : (63));
+ val_mask = 0x7F;
+ } else {
+ printk(KERN_ERR "Invalid control name\n");
+ return -1;
+ }
+
+ val = val << shift;
+ val2 = val2 << shift;
+
+ err = snd_soc_update_bits_locked(codec, reg, val_mask, val);
+ if (err < 0)
+ return err;
+
+ err = snd_soc_update_bits_locked(codec, reg2, val_mask, val2);
+ return err;
+}
+
+static int __new_control_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 65535;
+
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_get
+ * Purpose : This function is to read data of new control for
+ * program the AIC3262 registers.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int __new_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u32 val;
+ val = aic3262_read(codec, aic3262_reg_ctl);
+ ucontrol->value.integer.value[0] = val;
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_put
+ * Purpose : new_control_put is called to pass data from user/application to
+ * the driver.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int __new_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 data[2];
+ int ret = 0;
+
+ u32 data_from_user = ucontrol->value.integer.value[0];
+
+ aic3262_change_book(codec, 0);
+ aic3262_reg_ctl = data[0] = (u8) ((data_from_user & 0xFF00) >> 8);
+ data[1] = (u8) ((data_from_user & 0x00FF));
+
+ if (!data[0])
+ aic3262->page_no = data[1];
+
+ DBG("reg = %d val = %x\n", data[0], data[1]);
+#if defined(LOCAL_REG_ACCESS)
+ if (codec->hw_write(codec->control_data, data, 2) != 2)
+ ret = -EIO;
+#else
+ ret = snd_soc_write(codec, data[0], data[1]);
+#endif
+ if (ret)
+ printk(KERN_ERR "Error in i2c write\n");
+
+ return ret;
+}
+
+
+/*
+ *****************************************************************************
+ * Structure Initialization
+ *****************************************************************************
+ */
+static const struct snd_kcontrol_new aic3262_snd_controls[] = {
+ /* Output */
+ #ifndef DAC_INDEPENDENT_VOL
+ /* sound new kcontrol for PCM Playback volume control */
+ SOC_DOUBLE_R_N("PCM Playback Volume", DAC_LVOL, DAC_RVOL, 0, 0xAf, 0),
+ #endif
+ /* sound new kcontrol for HP driver gain */
+ SOC_DOUBLE_R_N("HP Driver Gain", HPL_VOL, HPR_VOL, 0, 21, 0),
+ /* sound new kcontrol for SPK driver gain*/
+ SOC_DOUBLE("SPK Driver Gain", SPK_AMP_CNTL_R4, 0, 4, 5, 0),
+ /* Reveiver driver Gain volume*/
+ SOC_DOUBLE_R_N("REC Driver Volume",
+ REC_AMP_CNTL_R5, RAMPR_VOL, 0, 36, 0),
+ /* sound new kcontrol for PGA capture volume */
+ SOC_DOUBLE_R_N("PGA Capture Volume", LADC_VOL, RADC_VOL, 0, 0x3F,
+ 0),
+ SOC_DOUBLE("ADC Fine Gain ", ADC_FINE_GAIN, 4, 0, 4, 1),
+ SOC_DOUBLE_R("PGA MIC Volume", MICL_PGA, MICR_PGA, 0, 95, 0),
+
+ SOC_DOUBLE_R("LO to REC Volume", RAMP_CNTL_R1, RAMP_CNTL_R2, 0, 116, 1),
+ SOC_DOUBLE_R_N("LO to HP Volume",
+ HP_AMP_CNTL_R2, HP_AMP_CNTL_R3, 0, 117, 0),
+ SOC_DOUBLE_R("LO to SPK Volume",
+ SPK_AMP_CNTL_R2, SPK_AMP_CNTL_R3, 0, 116, 1),
+ SOC_DOUBLE("ADC channel mute", ADC_FINE_GAIN, 7, 3, 1, 0),
+
+ SOC_DOUBLE("DAC MUTE", DAC_MVOL_CONF, 2, 3, 1, 1),
+
+ /* sound new kcontrol for Programming the registers from user space */
+ SOC_SINGLE_AIC3262("Program Registers"),
+
+ SOC_SINGLE("RESET", RESET_REG, 0 , 1, 0),
+
+ SOC_SINGLE("DAC VOL SOFT STEPPING", DAC_MVOL_CONF, 0, 2, 0),
+
+ #ifdef DAC_INDEPENDENT_VOL
+ SOC_SINGLE_N("Left DAC Volume", DAC_LVOL, 0, 0xAF, 0),
+ SOC_SINGLE_N("Right DAC Volume", DAC_RVOL, 0, 0xAF, 0),
+ #endif
+
+ SOC_SINGLE("DAC AUTO MUTE CONTROL", DAC_MVOL_CONF, 4, 7, 0),
+ SOC_SINGLE("RIGHT MODULATOR SETUP", DAC_MVOL_CONF, 7, 1, 0),
+
+ SOC_SINGLE("ADC Volume soft stepping", ADC_CHANNEL_POW, 0, 3, 0),
+
+ SOC_DOUBLE_R_N("MA Volume",
+ LADC_PGA_MAL_VOL, RADC_PGA_MAR_VOL, 0, 42, 0),
+
+ SOC_SINGLE("Mic Bias ext independent enable", MIC_BIAS_CNTL, 7, 1, 0),
+ SOC_SINGLE("MICBIAS_EXT ON", MIC_BIAS_CNTL, 6, 1, 0),
+ SOC_SINGLE("MICBIAS EXT Power Level", MIC_BIAS_CNTL, 4, 3, 0),
+
+ SOC_SINGLE("MICBIAS_INT ON", MIC_BIAS_CNTL, 2, 1, 0),
+ SOC_SINGLE("MICBIAS INT Power Level", MIC_BIAS_CNTL, 0, 3, 0),
+
+ SOC_DOUBLE("DRC_EN_CTL", DRC_CNTL_R1, 6, 5, 1, 0),
+ SOC_SINGLE("DRC_THRESHOLD_LEVEL", DRC_CNTL_R1, 2, 7, 1),
+ SOC_SINGLE("DRC_HYSTERISIS_LEVEL", DRC_CNTL_R1, 0, 7, 0),
+
+ SOC_SINGLE("DRC_HOLD_LEVEL", DRC_CNTL_R2, 3, 0x0F, 0),
+ SOC_SINGLE("DRC_GAIN_RATE", DRC_CNTL_R2, 0, 4, 0),
+ SOC_SINGLE("DRC_ATTACK_RATE", DRC_CNTL_R3, 4, 0x0F, 1),
+ SOC_SINGLE("DRC_DECAY_RATE", DRC_CNTL_R3, 0, 0x0F, 1),
+
+ SOC_SINGLE("BEEP_GEN_EN", BEEP_CNTL_R1, 7, 1, 0),
+ SOC_DOUBLE_R("BEEP_VOL_CNTL", BEEP_CNTL_R1, BEEP_CNTL_R2, 0, 0x0F, 1),
+ SOC_SINGLE("BEEP_MAS_VOL", BEEP_CNTL_R2, 6, 3, 0),
+
+ SOC_DOUBLE_R("AGC_EN", LAGC_CNTL, RAGC_CNTL, 7, 1, 0),
+ SOC_DOUBLE_R("AGC_TARGET_LEVEL", LAGC_CNTL, RAGC_CNTL, 4, 7, 1),
+
+ SOC_DOUBLE_R("AGC_GAIN_HYSTERESIS", LAGC_CNTL, RAGC_CNTL, 0, 3, 0),
+ SOC_DOUBLE_R("AGC_HYSTERESIS", LAGC_CNTL_R2, RAGC_CNTL_R2, 6, 3, 0),
+ SOC_DOUBLE_R("AGC_NOISE_THRESHOLD",
+ LAGC_CNTL_R2, RAGC_CNTL_R2, 1, 31, 1),
+
+ SOC_DOUBLE_R("AGC_MAX_GAIN", LAGC_CNTL_R3, RAGC_CNTL_R3, 0, 116, 0),
+ SOC_DOUBLE_R("AGC_ATCK_TIME", LAGC_CNTL_R4, RAGC_CNTL_R4, 3, 31, 0),
+ SOC_DOUBLE_R("AGC_ATCK_SCALE_FACTOR",
+ LAGC_CNTL_R4, RAGC_CNTL_R4, 0, 7, 0),
+
+ SOC_DOUBLE_R("AGC_DECAY_TIME", LAGC_CNTL_R5, RAGC_CNTL_R5, 3, 31, 0),
+ SOC_DOUBLE_R("AGC_DECAY_SCALE_FACTOR",
+ LAGC_CNTL_R5, RAGC_CNTL_R5, 0, 7, 0),
+ SOC_DOUBLE_R("AGC_NOISE_DEB_TIME",
+ LAGC_CNTL_R6, RAGC_CNTL_R6, 0, 31, 0),
+
+ SOC_DOUBLE_R("AGC_SGL_DEB_TIME",
+ LAGC_CNTL_R7, RAGC_CNTL_R7, 0, 0x0F, 0),
+ SOC_SINGLE("DAC PRB Selection", DAC_PRB, 0, 25, 0),
+
+ SOC_SINGLE("INTERRUPT FLAG - Read only", 46, 0, 255, 0),
+ SOC_SINGLE("INTERRUPT STICKY FLAG - Read only", 44, 0, 255, 0),
+ SOC_SINGLE("INT1 CONTROL", 48, 0, 255, 0),
+ SOC_SINGLE("GPIO1 CONTROL", (PAGE_4 + 86), 0, 255, 0),
+ SOC_SINGLE("HP_DEPOP", HP_DEPOP, 0, 255, 0),
+
+ #if defined(FULL_IN_CNTL)
+
+ SOC_SINGLE("IN1L_2_LMPGA_P_CTL", LMIC_PGA_PIN, 6, 3, 0),
+ SOC_SINGLE("IN2L_2_LMPGA_P_CTL", LMIC_PGA_PIN, 4, 3, 0),
+ SOC_SINGLE("IN3L_2_LMPGA_P_CTL", LMIC_PGA_PIN, 2, 3, 0),
+ SOC_SINGLE("IN1R_2_LMPGA_P_CTL", LMIC_PGA_PIN, 0, 3, 0),
+
+ SOC_SINGLE("IN4L_2_LMPGA_P_CTL", LMIC_PGA_PM_IN4, 5, 1, 0),
+ SOC_SINGLE("IN4R_2_LMPGA_M_CTL", LMIC_PGA_PM_IN4, 4, 1, 0),
+
+ SOC_SINGLE("CM1_2_LMPGA_M_CTL", LMIC_PGA_MIN, 6, 3, 0),
+ SOC_SINGLE("IN2R_2_LMPGA_M_CTL", LMIC_PGA_MIN, 4, 3, 0),
+ SOC_SINGLE("IN3R_2_LMPGA_M_CTL", LMIC_PGA_MIN, 2, 3, 0),
+ SOC_SINGLE("CM2_2_LMPGA_M_CTL", LMIC_PGA_MIN, 0, 3, 0),
+
+ SOC_SINGLE("IN1R_2_RMPGA_P_CTL", RMIC_PGA_PIN, 6, 3, 0),
+ SOC_SINGLE("IN2R_2_RMPGA_P_CTL", RMIC_PGA_PIN, 4, 3, 0),
+ SOC_SINGLE("IN3R_2_RMPGA_P_CTL", RMIC_PGA_PIN, 2, 3, 0),
+ SOC_SINGLE("IN2L_2_RMPGA_P_CTL", RMIC_PGA_PIN, 0, 3, 0),
+
+ SOC_SINGLE("IN4R_2_RMPGA_P_CTL", RMIC_PGA_PM_IN4, 5, 1, 0),
+ SOC_SINGLE("IN4L_2_RMPGA_M_CTL", RMIC_PGA_PM_IN4, 4, 1, 0),
+
+ SOC_SINGLE("CM1_2_RMPGA_M_CTL", RMIC_PGA_MIN, 6, 3, 0),
+ SOC_SINGLE("IN1L_2_RMPGA_M_CTL", RMIC_PGA_MIN, 4, 3, 0),
+ SOC_SINGLE("IN3L_2_RMPGA_M_CTL", RMIC_PGA_MIN, 2, 3, 0),
+ SOC_SINGLE("CM2_2_RMPGA_M_CTL", RMIC_PGA_MIN, 0, 3, 0),
+
+ #endif
+
+ SOC_DOUBLE("MA_EN_CNTL", MA_CNTL, 3, 2, 1, 0),
+ SOC_DOUBLE("IN1 LO DIRECT BYPASS", MA_CNTL, 5, 4, 1, 0),
+ SOC_DOUBLE("IN1 LO BYPASS VOLUME" , LINE_AMP_CNTL_R2, 3, 0, 3, 1),
+ SOC_DOUBLE("MA LO BYPASS EN", LINE_AMP_CNTL_R2, 7, 6, 1, 0),
+
+ /* Pin control Macros */
+ SOC_ENUM("DOUT1 Pin Control", aic326x_enum[DOUT1_ENUM]),
+ SOC_ENUM("DIN1 Pin Control", aic326x_enum[DIN1_ENUM]),
+ SOC_ENUM("WCLK2 Pin Control", aic326x_enum[WCLK2_ENUM]),
+ SOC_ENUM("BCLK2 Pin Control", aic326x_enum[BCLK2_ENUM]),
+ SOC_ENUM("DOUT2 Pin Control", aic326x_enum[DOUT2_ENUM]),
+ SOC_ENUM("DIN2 Pin Control", aic326x_enum[DIN2_ENUM]),
+ SOC_ENUM("WCLK3 Pin Control", aic326x_enum[WCLK3_ENUM]),
+ SOC_ENUM("BCLK3 Pin Control", aic326x_enum[BCLK3_ENUM]),
+ SOC_ENUM("DOUT3 Pin Control", aic326x_enum[DOUT3_ENUM]),
+ SOC_ENUM("DIN3 Pin Control", aic326x_enum[DIN3_ENUM]),
+ SOC_ENUM("DAC CLK IN", aic326x_enum[CLKIN_ENUM]),
+ SOC_ENUM("ADC CLK IN", aic326x_enum[CLKIN_ENUM]),
+
+};
+
+/* the sturcture contains the different values for mclk */
+static const struct aic3262_rate_divs aic3262_divs[] = {
+/*
+ * mclk, rate, p_val, pll_j, pll_d, dosr, ndac, mdac, aosr, nadc, madc, blck_N,
+ * codec_speficic_initializations
+ */
+ /* 8k rate */
+#ifdef CONFIG_MINI_DSP
+ {12000000, 8000, 1, 8, 1920, 768, 8, 2, 128, 8, 12, 4,
+ {{0, 60, 0}, {0, 61, 0} } },
+#else
+ {12000000, 8000, 1, 8, 1920, 128, 12, 8, 128, 8, 6, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {12288000, 8000, 1, 1, 3333, 128, 12, 8, 128, 8, 6, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {24000000, 8000, 1, 4, 96, 128, 12, 8, 128, 12, 8, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+#endif
+ /* 11.025k rate */
+ {12000000, 11025, 1, 1, 8816, 1024, 8, 2, 128, 8, 2, 48,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {12288000, 11025, 1, 1, 8375, 1024, 8, 2, 128, 8, 2, 48,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {24000000, 11025, 1, 3, 7632, 128, 8, 8, 128, 8, 8, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+
+ /* 16k rate */
+#ifdef CONFIG_MINI_DSP
+ {12000000, 16000, 1, 8, 1920, 384, 4, 4, 128, 4, 12, 12,
+ {{0, 60, 0}, {0, 61, 0} } },
+#else
+ {12000000, 16000, 1, 8, 1920, 128, 8, 6, 128, 8, 6, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {12288000, 16000, 1, 2, 6667, 128, 8, 6, 128, 8, 6, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {24000000, 16000, 1, 4, 96, 128, 8, 6, 128, 8, 6, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+#endif
+ /* 22.05k rate */
+ {12000000, 22050, 1, 3, 7632, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {12288000, 22050, 1, 3, 675, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {24000000, 22050, 1, 3, 7632, 128, 8, 3, 128, 8, 3, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ /* 32k rate */
+ {12000000, 32000, 1, 5, 4613, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {12288000, 32000, 1, 5, 3333, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {24000000, 32000, 1, 4, 96, 128, 6, 4, 128, 6, 4, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+
+#ifdef CONFIG_MINI_DSP
+ {12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4,
+ {{0, 60, 0}, {0, 61, 0} } },
+ {12288000, 44100, 1, 7, 3548, 128, 2, 8, 128, 8, 2, 4,
+ {{0, 60, 0}, {0, 61, 0} } },
+#else
+ /* 44.1k rate */
+ {12000000, 44100, 1, 7, 5264, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {12288000, 44100, 1, 7, 3548, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {24000000, 44100, 1, 3, 7632, 128, 4, 4, 64, 4, 4, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+#endif
+
+#ifdef CONFIG_MINI_DSP
+ {12288000, 48000, 1, 8, 52, 128, 2, 8, 128, 2, 8, 4,
+ {{0, 60, 0}, {0, 61, 0} } },
+#else
+ /* 48k rate */
+ {12000000, 48000, 1, 8, 1920, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {12288000, 48000, 1, 8, 52, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+ {24000000, 48000, 1, 4, 960, 128, 4, 4, 128, 4, 4, 4,
+ {{0, 60, 1}, {0, 61, 1} } },
+#endif
+
+ /*96k rate */
+ {12000000, 96000, 1, 16, 3840, 128, 8, 2, 128, 8, 2 , 4,
+ {{0, 60, 7}, {0, 61, 7} } },
+ {24000000, 96000, 1, 4, 960, 128, 4, 2, 128, 4, 2, 2,
+ {{0, 60, 7}, {0, 61, 7} } },
+ /*192k */
+ {12000000, 192000, 1, 32, 7680, 128, 8, 2, 128, 8, 2, 4,
+ {{0, 60, 17}, {0, 61, 13} } },
+ {24000000, 192000, 1, 4, 960, 128, 2, 2, 128, 2, 2, 4,
+ {{0, 60, 17}, {0, 61, 13} } },
+};
+
+
+
+/*
+*----------------------------------------------------------------------------
+* Function : aic3262_multi_i2s_dump_regs
+* Purpose : This function is to mute or unmute the left and right DAC
+*
+*----------------------------------------------------------------------------
+*/
+static void aic3262_multi_i2s_dump_regs(struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ unsigned int counter;
+
+ DBG(KERN_INFO "#%s: Dai Active %d ASI%d REGS DUMP\n",
+ __func__, aic3262->active_count, dai->id);
+
+ aic3262_change_page(codec, 0);
+ aic3262_change_book(codec, 0);
+
+ DBG(KERN_INFO "#Page0 REGS..\n");
+ for (counter = 0; counter < 85; counter++) {
+ DBG(KERN_INFO "#%2d -> 0x%x\n", counter,
+ aic3262_read(codec, counter));
+ }
+
+ DBG(KERN_INFO "#Page1 REGS..\n");
+ for (counter = 128; counter < 176; counter++) {
+ DBG(KERN_INFO "#%2d -> 0x%x\n", (counter % 128),
+ aic3262_read(codec, counter));
+ }
+
+ DBG(KERN_INFO "#Page4 REGS..\n");
+ for (counter = 512; counter < 631; counter++) {
+ DBG(KERN_INFO "#%2d -> 0x%x\n",
+ (counter % 128), aic3262_read(codec, counter));
+ }
+
+ for (counter = 0; counter < MAX_ASI_COUNT; counter++) {
+ DBG(KERN_INFO "#ASI%d Frame %s @ %dHz Playback %d Record %d\n",
+ (counter + 1),
+ (aic3262->asiCtxt[counter].master == 1) ? "Master" : "Slave",
+ aic3262->asiCtxt[counter].sampling_rate,
+ aic3262->asiCtxt[counter].playback_mode,
+ aic3262->asiCtxt[counter].capture_mode);
+ DBG(KERN_INFO "#DAC Option [%d,%d] ADC Option %d WLEN %d\n\n",
+ aic3262->asiCtxt[counter].left_dac_output,
+ aic3262->asiCtxt[counter].right_dac_output,
+ aic3262->asiCtxt[counter].adc_input,
+ aic3262->asiCtxt[counter].word_len);
+ }
+ return;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_multi_i2s_mute
+ * Purpose : This function is to mute or unmute the left and right DAC
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_multi_i2s_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "#%s : mute entered with %d\n", __func__, mute);
+
+ /* If we are doing both Recording and Playback on this DAI interface,
+ * do not MUTE the Codec.
+ */
+ if (mute && (aic3262->asiCtxt[dai->id - 1].asi_active > 1)) {
+ DBG("#%s Cannot Mute the ASI%d Now..\n",
+ __func__, dai->id);
+ } else {
+ switch (dai->id) {
+ case 1:
+ aic3262_multi_i2s_asi1_mute(dai, mute);
+ break;
+ case 2:
+ aic3262_multi_i2s_asi2_mute(dai, mute);
+ break;
+ case 3:
+ aic3262_multi_i2s_asi3_mute(dai, mute);
+ break;
+ default:
+ printk(KERN_ERR "#%s: Invalid DAI id\n", __func__);
+ return -EINVAL;
+ }
+ }
+ DBG(KERN_INFO "#%s : mute ended\n", __func__);
+ return 0;
+}
+
+
+/*
+*----------------------------------------------------------------------------
+* Function : aic3262_multi_i2s_asi1_mute
+* Purpose : This function is to mute or unmute the left and right DAC
+*
+*----------------------------------------------------------------------------
+*/
+static int aic3262_multi_i2s_asi1_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "#%s : mute %d started\n", __func__, mute);
+
+ if (mute) {
+ DBG(KERN_INFO "Mute if part\n");
+
+ if (!aic3262->asiCtxt[0].port_muted) {
+ dac_reg = aic3262_read(codec, DAC_MVOL_CONF);
+ adc_gain = aic3262_read(codec, ADC_FINE_GAIN);
+ hpl = aic3262_read(codec, HPL_VOL);
+ hpr = aic3262_read(codec, HPR_VOL);
+ rec_amp = aic3262_read(codec, REC_AMP_CNTL_R5);
+ rampr = aic3262_read(codec, RAMPR_VOL);
+ spk_amp = aic3262_read(codec, SPK_AMP_CNTL_R4);
+ }
+ DBG(KERN_INFO "spk_reg = %2x\n\n", spk_amp);
+
+ /* First check if both Playback and Recording is going on
+ * this interface.
+ */
+ if (aic3262->asiCtxt[0].asi_active > 1) {
+ DBG("#%s Cannot Mute the ASI Now..\n", __func__);
+ } else if (!(aic3262->asiCtxt[1].playback_mode) &&
+ !(aic3262->asiCtxt[2].playback_mode)) {
+ /* Before Muting, please check if any other
+ * ASI is active. if so, we cannot simply mute the
+ * DAC and ADC Registers.
+ */
+ aic3262_write(codec, DAC_MVOL_CONF,
+ ((dac_reg & 0xF3) | 0x0C));
+ aic3262_write(codec, ADC_FINE_GAIN,
+ ((adc_gain & 0x77) | 0x88));
+ aic3262_write(codec, HPL_VOL, 0xB9);
+ aic3262_write(codec, HPR_VOL, 0xB9);
+ aic3262_write(codec, REC_AMP_CNTL_R5, 0x39);
+ aic3262_write(codec, RAMPR_VOL, 0x39);
+ aic3262_write(codec, SPK_AMP_CNTL_R4, 0x00);
+ aic3262->asiCtxt[0].port_muted = 1;
+ } else {
+ DBG(KERN_INFO
+ "#%s: Other ASI Active. Cannot MUTE Codec..\n",
+ __func__);
+ }
+ } else {
+ DBG(KERN_INFO "Mute else part\n");
+ aic3262_write(codec, DAC_MVOL_CONF, (dac_reg & 0xF3));
+ mdelay(5);
+ aic3262_write(codec, ADC_FINE_GAIN, (adc_gain & 0x77));
+ mdelay(5);
+ aic3262_write(codec, HPL_VOL, hpl);
+ mdelay(5);
+ aic3262_write(codec, HPR_VOL, hpr);
+ mdelay(5);
+ aic3262_write(codec, REC_AMP_CNTL_R5, rec_amp);
+ mdelay(5);
+ aic3262_write(codec, RAMPR_VOL, rampr);
+ mdelay(5);
+ aic3262_write(codec, SPK_AMP_CNTL_R4, spk_amp);
+ mdelay(5);
+
+ /* Basic hack */
+ aic3262_write(codec, LINE_AMP_CNTL_R1, 0xc3);
+ aic3262_write(codec, HP_AMP_CNTL_R1, 0x33);
+
+ aic3262->asiCtxt[0].port_muted = 0;
+ aic3262_multi_i2s_dump_regs(dai);
+ }
+
+ DBG(KERN_INFO "#%s : mute %d ended\n", __func__, mute);
+
+ return 0;
+}
+
+/*
+*----------------------------------------------------------------------------
+* Function : aic3262_multi_i2s_asi2_maic3262_asi3_clk_configute
+* Purpose : This function is to mute or unmute the left and right DAC
+*
+*----------------------------------------------------------------------------
+*/
+static int aic3262_multi_i2s_asi2_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "#%s : mute %d started\n", __func__, mute);
+
+ if (mute) {
+ DBG(KERN_INFO "Mute if part\n");
+
+ if (!aic3262->asiCtxt[1].port_muted) {
+ dac_reg = aic3262_read(codec, DAC_MVOL_CONF);
+ adc_gain = aic3262_read(codec, ADC_FINE_GAIN);
+ hpl = aic3262_read(codec, HPL_VOL);
+ hpr = aic3262_read(codec, HPR_VOL);
+ rec_amp = aic3262_read(codec, REC_AMP_CNTL_R5);
+ rampr = aic3262_read(codec, RAMPR_VOL);
+ spk_amp = aic3262_read(codec, SPK_AMP_CNTL_R4);
+ DBG(KERN_INFO "spk_reg = %2x\n\n", spk_amp);
+ }
+
+ /* First check if both Playback and Recording is going on
+ * this interface.
+ */
+ if (aic3262->asiCtxt[1].asi_active > 1) {
+ DBG("#%s Cannot Mute the ASI Now..\n", __func__);
+ } else if (!(aic3262->asiCtxt[0].playback_mode) &&
+ !(aic3262->asiCtxt[2].playback_mode)) {
+ /* Before Muting, please check if any other
+ * ASI is active. if so, we cannot simply mute the
+ * DAC and ADC Registers.
+ */
+ aic3262_write(codec, DAC_MVOL_CONF,
+ ((dac_reg & 0xF3) | 0x0C));
+ aic3262_write(codec, ADC_FINE_GAIN,
+ ((adc_gain & 0x77) | 0x88));
+ aic3262_write(codec, HPL_VOL, 0xB9);
+ aic3262_write(codec, HPR_VOL, 0xB9);
+ aic3262_write(codec, REC_AMP_CNTL_R5, 0x39);
+ aic3262_write(codec, RAMPR_VOL, 0x39);
+ aic3262_write(codec, SPK_AMP_CNTL_R4, 0x00);
+ aic3262->asiCtxt[1].port_muted = 1;
+ } else {
+ DBG("#%s: Other ASI Active. Cannot MUTE Codec..\n",
+ __func__);
+ }
+ } else {
+ DBG(KERN_INFO "Mute else part\n");
+ aic3262_write(codec, DAC_MVOL_CONF, (dac_reg & 0xF3));
+ mdelay(5);
+ aic3262_write(codec, ADC_FINE_GAIN, (adc_gain & 0x77));
+ mdelay(5);
+ aic3262_write(codec, HPL_VOL, hpl);
+ mdelay(5);
+ aic3262_write(codec, HPR_VOL, hpr);
+ mdelay(5);
+ aic3262_write(codec, REC_AMP_CNTL_R5, rec_amp);
+ mdelay(5);
+ aic3262_write(codec, RAMPR_VOL, rampr);
+ mdelay(5);
+ aic3262_write(codec, SPK_AMP_CNTL_R4, spk_amp);
+ mdelay(5);
+
+ /* Basic hack */
+ aic3262_write(codec, LINE_AMP_CNTL_R1, 0xc3);
+ aic3262_write(codec, HP_AMP_CNTL_R1, 0x33);
+
+ aic3262->asiCtxt[1].port_muted = 0;
+ aic3262_multi_i2s_dump_regs(dai);
+ }
+
+ DBG(KERN_INFO "#%s : mute %d ended\n", __func__, mute);
+
+ return 0;
+}
+/*
+*----------------------------------------------------------------------------
+* Function : aic3262_multi_i2s_asi3_mute
+* Purpose : This function is to mute or unmute the left and right DAC
+*
+*----------------------------------------------------------------------------
+*/
+static int aic3262_multi_i2s_asi3_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "#%s : mute %d started\n", __func__, mute);
+
+ if (mute) {
+ DBG("Mute if part\n");
+
+ if (!aic3262->asiCtxt[2].port_muted) {
+ dac_reg = aic3262_read(codec, DAC_MVOL_CONF);
+ adc_gain = aic3262_read(codec, ADC_FINE_GAIN);
+ hpl = aic3262_read(codec, HPL_VOL);
+ hpr = aic3262_read(codec, HPR_VOL);
+ rec_amp = aic3262_read(codec, REC_AMP_CNTL_R5);
+ rampr = aic3262_read(codec, RAMPR_VOL);
+ spk_amp = aic3262_read(codec, SPK_AMP_CNTL_R4);
+ }
+ DBG("spk_reg = %2x\n\n", spk_amp);
+
+ /* First check if both Playback and Recording is going on
+ * this interface.
+ */
+ if (aic3262->asiCtxt[2].asi_active > 1) {
+ DBG("#%s Cannot Mute the ASI Now..\n", __func__);
+ } else if (!(aic3262->asiCtxt[0].playback_mode) &&
+ !(aic3262->asiCtxt[1].playback_mode)) {
+ /* Before Muting, please check if any other
+ * ASI is active. if so, we cannot simply mute the
+ * DAC and ADC Registers.
+ */
+ aic3262_write(codec, DAC_MVOL_CONF,
+ ((dac_reg & 0xF3) | 0x0C));
+ aic3262_write(codec, ADC_FINE_GAIN,
+ ((adc_gain & 0x77) | 0x88));
+ aic3262_write(codec, HPL_VOL, 0xB9);
+ aic3262_write(codec, HPR_VOL, 0xB9);
+ aic3262_write(codec, REC_AMP_CNTL_R5, 0x39);
+ aic3262_write(codec, RAMPR_VOL, 0x39);
+ aic3262_write(codec, SPK_AMP_CNTL_R4, 0x00);
+ aic3262->asiCtxt[2].port_muted = 1;
+ } else {
+ DBG("#%s: Other ASI Active. Cannot MUTE Codec..\n",
+ __func__);
+ }
+ } else {
+ DBG("Mute else part\n");
+ aic3262_write(codec, DAC_MVOL_CONF, (dac_reg & 0xF3));
+ mdelay(5);
+ aic3262_write(codec, ADC_FINE_GAIN, (adc_gain & 0x77));
+ mdelay(5);
+ aic3262_write(codec, HPL_VOL, hpl);
+ mdelay(5);
+ aic3262_write(codec, HPR_VOL, hpr);
+ mdelay(5);
+ aic3262_write(codec, REC_AMP_CNTL_R5, rec_amp);
+ mdelay(5);
+ aic3262_write(codec, RAMPR_VOL, rampr);
+ mdelay(5);
+ aic3262_write(codec, SPK_AMP_CNTL_R4, spk_amp);
+ mdelay(5);
+
+ /* Basic hack */
+ aic3262_write(codec, LINE_AMP_CNTL_R1, 0xc3);
+ aic3262_write(codec, HP_AMP_CNTL_R1, 0x33);
+
+ aic3262->asiCtxt[2].port_muted = 0;
+ aic3262_multi_i2s_dump_regs(dai);
+ }
+
+ DBG(KERN_INFO "#%s : mute %d ended\n", __func__, mute);
+
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_multi_i2s_set_dai_fmt
+ * Purpose : This function is to set the DAI format
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_multi_i2s_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ /* Check the DAI Id and based on that switch the configuration for
+ * the Individual ASI Port.
+ */
+ switch (codec_dai->id) {
+ case 1:
+ aic3262_multi_i2s_asi1_set_dai_fmt(codec_dai, fmt);
+ break;
+ case 2:
+ aic3262_multi_i2s_asi2_set_dai_fmt(codec_dai, fmt);
+ break;
+ case 3:
+ aic3262_multi_i2s_asi3_set_dai_fmt(codec_dai, fmt);
+ break;
+ default:
+ printk(KERN_ERR
+ "#%s: Invalid DAI interface format\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+
+/*
+*----------------------------------------------------------------------------
+* Function : aic3262_multi_i2s_asi1_set_dai_fmt
+* Purpose : This function is to set the DAI format for ASI1 Port
+*
+*----------------------------------------------------------------------------
+*/
+static int aic3262_multi_i2s_asi1_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 iface_reg, clk_reg;
+ u8 regvalue;
+
+ DBG(KERN_INFO "%s: DAI_ID %d fmt %d\n",
+ __func__, codec_dai->id, fmt);
+
+ /* Read the B0_P4_R4 and B0_P4_R10 Registers to configure the
+ * ASI1 Bus and Clock Formats depending on the PCM Format.
+ */
+ iface_reg = aic3262_read(codec, ASI1_BUS_FMT);
+ clk_reg = aic3262_read(codec, ASI1_BWCLK_CNTL_REG);
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ DBG(KERN_INFO "#%s: Configuring ASI%d as Frame Master..\n",
+ __func__, codec_dai->id);
+ aic3262->asiCtxt[0].master = 1;
+ clk_reg |= (BIT5 | BIT2); /* Codec Interface as Master */
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ DBG(KERN_INFO "#%s: Configuring ASI%d as Frame Slave..\n",
+ __func__, codec_dai->id);
+ clk_reg &= ~0xFC; /* Reset bits D[7:5] and D[4:2] to zero */
+ aic3262->asiCtxt[0].master = 0;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ /* new case..just for debugging */
+ DBG(KERN_INFO "%s: SND_SOC_DAIFMT_CBS_CFM\n", __func__);
+ aic3262->asiCtxt[0].master = 0;
+ clk_reg |= BIT5; /* Only WCLK1 Output from Codec */
+ clk_reg &= ~0x1C; /* BCLK1 Input to Codec */
+ break;
+ default:
+ printk(KERN_ERR "#%s: Invalid DAI master/slave interface\n",
+ __func__);
+ return -EINVAL;
+ }
+ aic3262->asiCtxt[0].pcm_format = (fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for I2s Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f);
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for DSP_A Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f) | 0x20;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ iface_reg = (iface_reg & 0x1f) | 0x40;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ iface_reg = (iface_reg & 0x1f) | 0x60;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for DSP_B Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f) | 0x80;
+ /* voice call need data offset in 1 bitclock */
+ aic3262_write(codec, ASI1_LCH_OFFSET, 1);
+ break;
+ default:
+ printk(KERN_ERR
+ "#%s: Invalid DAI interface format\n", __func__);
+ return -EINVAL;
+ }
+ /* Also Configure the Pin Control Registers before writing into
+ * the ASI specific Clock Control and Format Registers
+ */
+
+ /* Configure B0_P4_R65_D[5:2] to 001 This configures the
+ * WCLK1 Pin to ASI1
+ */
+ regvalue = aic3262_read(codec, WCLK1_PIN_CNTL_REG);
+ aic3262_write(codec, WCLK1_PIN_CNTL_REG, (regvalue | BIT2));
+
+ /* Configure B0_P4_R68_d[6:5] = 01 and B0_P4_R67_D[4:1] to 0001
+ * to ensure that the DIN1 and DOUT1 Pins are configured
+ * correctly
+ */
+ regvalue = aic3262_read(codec, DIN1_PIN_CNTL_REG);
+ aic3262_write(codec, DIN1_PIN_CNTL_REG, (regvalue | BIT5));
+ regvalue = aic3262_read(codec, DOUT1_PIN_CNTL_REG);
+ aic3262_write(codec, DOUT1_PIN_CNTL_REG, (regvalue | BIT1));
+
+ aic3262_write(codec, ASI1_BWCLK_CNTL_REG, clk_reg);
+
+ aic3262_write(codec, ASI1_BUS_FMT, iface_reg);
+
+ return 0;
+}
+
+
+/*
+*----------------------------------------------------------------------------
+* Function : aic3262_multi_i2s_asi2_set_dai_fmt
+* Purpose : This function is to set the DAI format for ASI2 Port
+*
+*----------------------------------------------------------------------------
+*/
+static int aic3262_multi_i2s_asi2_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 iface_reg, clk_reg;
+ u8 regvalue;
+
+ DBG(KERN_INFO "%s: DAI_ID %d fmt %d\n",
+ __func__, codec_dai->id, fmt);
+
+ /* Read the B0_P4_R17 and B0_P4_R26 Registers to configure the
+ * ASI1 Bus and Clock Formats depending on the PCM Format.
+ */
+ iface_reg = aic3262_read(codec, ASI2_BUS_FMT);
+ clk_reg = aic3262_read(codec, ASI2_BWCLK_CNTL_REG);
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ DBG(KERN_INFO "#%s: Configuring ASI%d as Frame Master..\n",
+ __func__, codec_dai->id);
+ aic3262->asiCtxt[1].master = 1;
+ clk_reg |= (BIT5 | BIT2);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ DBG(KERN_INFO "#%s: Configuring ASI%d as Frame Slave..\n",
+ __func__, codec_dai->id);
+
+ clk_reg &= ~0xFC;
+ aic3262->asiCtxt[1].master = 0;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ /*new case..just for debugging */
+ DBG(KERN_INFO "%s: SND_SOC_DAIFMT_CBS_CFM\n", __func__);
+ aic3262->asiCtxt[1].master = 0;
+ clk_reg |= BIT5;
+ clk_reg &= ~0x1C;
+ break;
+ default:
+ printk(KERN_ERR "#%s:Invalid DAI master/slave interface\n",
+ __func__);
+ return -EINVAL;
+ }
+ aic3262->asiCtxt[1].pcm_format = (fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for I2S Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f);
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for DSP_A Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f) | 0x20;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ iface_reg = (iface_reg & 0x1f) | 0x40;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ iface_reg = (iface_reg & 0x1f) | 0x60;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for DSP Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f) | 0x80;
+ /* voice call need data offset in 1 bitclock */
+ aic3262_write(codec, ASI2_LCH_OFFSET, 1);
+ break;
+ default:
+ printk(KERN_ERR "#%s:Invalid DAI interface format\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Also Configure the Pin Control Registers before writing into
+ * the ASI2 specific Clock Control and Format Registers
+ */
+
+ /* Configure B0_P4_R69_D[5:2] to 001 This configures the
+ * WCLK2 Pin to ASI2
+ */
+
+ regvalue = aic3262_read(codec, WCLK2_PIN_CNTL_REG);
+ aic3262_write(codec, WCLK2_PIN_CNTL_REG, (regvalue | BIT2));
+
+ regvalue = aic3262_read(codec, BCLK2_PIN_CNTL_REG);
+ aic3262_write(codec, BCLK2_PIN_CNTL_REG, (regvalue | BIT2));
+
+ /* Configure B0_P4_R72_d[6:5] = 01 and B0_P4_R71_D[4:1] to 0001
+ * to ensure that the DIN2 and DOUT2 Pins are configured
+ * correctly
+ */
+ regvalue = aic3262_read(codec, DIN2_PIN_CNTL_REG);
+ aic3262_write(codec, DIN2_PIN_CNTL_REG, (regvalue | BIT5));
+
+ regvalue = aic3262_read(codec, DOUT2_PIN_CNTL_REG);
+ aic3262_write(codec, DOUT2_PIN_CNTL_REG, (regvalue | BIT5 | BIT1));
+
+ aic3262_write(codec, ASI2_BWCLK_CNTL_REG, clk_reg);
+
+ aic3262_write(codec, ASI2_BUS_FMT, iface_reg);
+
+ return 0;
+}
+
+/*
+*----------------------------------------------------------------------------
+* Function : aic3262_multi_i2s_asi3_set_dai_fmt
+* Purpose : This function is to set the DAI format for ASI3 Port
+*
+*----------------------------------------------------------------------------
+*/
+static int aic3262_multi_i2s_asi3_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 iface_reg, clk_reg;
+ u8 regvalue;
+
+ DBG(KERN_INFO "%s: DAI_ID %d fmt %d\n",
+ __func__, codec_dai->id, fmt);
+
+ /* Read the B0_P4_R33 and B0_P4_R42 Registers to configure the
+ * ASI1 Bus and Clock Formats depending on the PCM Format.
+ */
+ iface_reg = aic3262_read(codec, ASI3_BUS_FMT);
+ clk_reg = aic3262_read(codec, ASI3_BWCLK_CNTL_REG);
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ DBG(KERN_INFO "#%s: Configuring ASI%d as Frame Master..\n",
+ __func__, codec_dai->id);
+ aic3262->asiCtxt[2].master = 1;
+ clk_reg |= (BIT5 | BIT2);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ DBG(KERN_INFO "#%s: Configuring ASI%d as Frame Slave..\n",
+ __func__, codec_dai->id);
+ clk_reg &= ~0xFC;
+ aic3262->asiCtxt[2].master = 0;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ /* new case..just for debugging */
+ DBG(KERN_INFO "%s: SND_SOC_DAIFMT_CBS_CFM\n", __func__);
+ aic3262->asiCtxt[2].master = 0;
+ clk_reg |= BIT5;
+ clk_reg &= ~0x1C;
+ break;
+ default:
+ printk(KERN_ERR "Invalid DAI master/slave interface\n");
+ return -EINVAL;
+ }
+ aic3262->asiCtxt[2].pcm_format = (fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for I2S Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f);
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for DSP_A Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f) | 0x20;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ iface_reg = (iface_reg & 0x1f) | 0x40;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ iface_reg = (iface_reg & 0x1f) | 0x60;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ DBG(KERN_INFO "#%s: Configuring ASI%d for DSP Mode..\n",
+ __func__, codec_dai->id);
+ iface_reg = (iface_reg & 0x1f) | 0x80;
+ /* voice call need data offset in 1 bitclock */
+ aic3262_write(codec, ASI3_LCH_OFFSET, 1);
+ break;
+ default:
+ printk(KERN_ERR
+ "#%s: Invalid DAI interface format\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Also Configure the Pin Control Registers before writing into
+ * the ASI specific Clock Control and Format Registers
+ */
+ /* Configure B0_P4_R73_D[5:2] to 0001 This configures the
+ * WCLK1 Pin to ASI1
+ */
+ regvalue = aic3262_read(codec, WCLK3_PIN_CNTL_REG);
+ aic3262_write(codec, WCLK3_PIN_CNTL_REG, (regvalue | BIT2));
+
+ regvalue = aic3262_read(codec, BCLK3_PIN_CNTL_REG);
+ aic3262_write(codec, BCLK3_PIN_CNTL_REG, (regvalue | BIT2));
+
+ /* Configure B0_P4_R76_d[6:5] = 01 and B0_P4_R75_D[4:1] to 0001
+ * to ensure that the DIN1 and DOUT1 Pins are configured
+ * correctly
+ */
+ regvalue = aic3262_read(codec, DIN3_PIN_CNTL_REG);
+ aic3262_write(codec, DIN3_PIN_CNTL_REG, (regvalue | BIT5));
+ regvalue = aic3262_read(codec, DOUT3_PIN_CNTL_REG);
+ aic3262_write(codec, DOUT3_PIN_CNTL_REG, (regvalue | BIT1));
+
+ aic3262_write(codec, ASI3_BWCLK_CNTL_REG, clk_reg);
+
+ aic3262_write(codec, ASI3_BUS_FMT, iface_reg);
+
+ return 0;
+}
+
+/*
+ * Clock after PLL and dividers
+ */
+static int aic3262_multi_i2s_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "#%s: DAI ID %d Freq %d Direction %d\n",
+ __func__, codec_dai->id, freq, dir);
+ switch (freq) {
+ case AIC3262_FREQ_12000000:
+ case AIC3262_FREQ_12288000:
+ case AIC3262_FREQ_24000000:
+ aic3262->sysclk = freq;
+ return 0;
+ break;
+ }
+ printk(KERN_ERR "Invalid frequency to set DAI system clock\n");
+ return -EINVAL;
+}
+
+/*
+* aic3262_multi_i2s_set_pll
+*
+* This function is invoked as part of the PLL call-back
+* handler from the ALSA layer.
+*/
+static int aic3262_multi_i2s_set_dai_pll(struct snd_soc_dai *codec_dai,
+ int pll_id, int source, unsigned int freq_in,
+ unsigned int freq_out)
+{
+ /*u16 reg, enable;
+ int offset;
+ struct snd_soc_codec *codec = codec_dai->codec;*/
+
+ printk(KERN_INFO "%s: DAI ID %d PLL_ID %d InFreq %d OutFreq %d\n",
+ __func__, pll_id, codec_dai->id, freq_in, freq_out);
+
+ return 0;
+}
+
+/*
+* aic3262_asi1_clk_config
+*
+* This function is used to configure the BCLK1, WCLK1 pins which
+* are specific to ASI1 Interface. This function just enables the
+* BCLk and WCLK along with the miniDSP Port Control Registers.
+* However, depending on the user requirement, this function can also be
+* extended to configure the sourc for the BCLK and WCLK on a ASI basis.
+*/
+static int aic3262_asi1_clk_config(struct snd_soc_codec *codec,
+ struct snd_pcm_hw_params *params)
+{
+ u8 bclk_N_value, wclk_N_value;
+ u8 minidspD_data, minidspA_data;
+ u8 regval;
+
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "%s: Invoked\n", __func__);
+
+ if (aic3262->asiCtxt[0].master == 1) {
+ DBG(KERN_INFO
+ "#%s: Codec Master on ASI1 Port. Enabling BCLK WCLK Divider.\n",
+ __func__);
+ bclk_N_value = aic3262->asiCtxt[0].bclk_div;
+ aic3262_write(codec, ASI1_BCLK_N, (bclk_N_value | 0x80));
+
+ wclk_N_value = aic3262_read(codec, ASI1_WCLK_N);
+ aic3262_write(codec, ASI1_WCLK_N, (wclk_N_value | 0xA0));
+ }
+ /* Configure the BCLK and WCLK Output Mux Options */
+ regval = aic3262_read(codec, ASI1_BWCLK_OUT_CNTL);
+ regval &= ~(AIC3262_ASI_BCLK_MUX_MASK | AIC3262_ASI_WCLK_MUX_MASK);
+
+ regval |= (aic3262->asiCtxt[0].bclk_output <<
+ AIC3262_ASI_BCLK_MUX_SHIFT);
+ regval |= aic3262->asiCtxt[0].wclk_output;
+ aic3262_write(codec, ASI1_BWCLK_OUT_CNTL, regval);
+
+ /* Configure the corresponding miniDSP Data Ports */
+ minidspD_data = aic3262_read(codec, MINIDSP_PORT_CNTL_REG);
+ minidspD_data &= ~(BIT5 | BIT4);
+ aic3262_write(codec, MINIDSP_PORT_CNTL_REG, minidspD_data);
+
+ minidspA_data = aic3262_read(codec, ASI1_ADC_INPUT_CNTL);
+ minidspA_data &= ~(BIT2 | BIT1 | BIT0);
+ minidspA_data |= aic3262->asiCtxt[0].adc_input;
+ aic3262_write(codec, ASI1_ADC_INPUT_CNTL, minidspA_data);
+
+ return 0;
+
+}
+
+/*
+* aic3262_asi2_clk_config
+*
+* This function is used to configure the BCLK2, WCLK2 pins which
+* are specific to ASI2 Interface. This function just enables the
+* BCLk and WCLK along with the miniDSP Port Control Registers.
+* However, depending on the user requirement, this function can also be
+* extended to configure the sourc for the BCLK and WCLK on a ASI basis.
+*/
+static int aic3262_asi2_clk_config(struct snd_soc_codec *codec,
+ struct snd_pcm_hw_params *params)
+{
+ u8 bclk_N_value, wclk_N_value, minidspD_data, minidspA_data;
+ u8 regval;
+
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "%s: Invoked\n", __func__);
+
+ if (aic3262->asiCtxt[1].master == 1) {
+ DBG(KERN_INFO
+ "#%s: Codec Master on ASI2 Port. Enabling BCLK WCLK Divider.\n",
+ __func__);
+ bclk_N_value = aic3262->asiCtxt[1].bclk_div;
+ aic3262_write(codec, ASI2_BCLK_N, (bclk_N_value | 0x80));
+
+ wclk_N_value = aic3262_read(codec, ASI2_WCLK_N);
+ aic3262_write(codec, ASI2_WCLK_N, (wclk_N_value | 0xA0));
+ }
+ /* Configure the BCLK and WCLK Output Mux Options */
+ regval = aic3262_read(codec, ASI2_BWCLK_OUT_CNTL);
+ regval &= ~(AIC3262_ASI_BCLK_MUX_MASK | AIC3262_ASI_WCLK_MUX_MASK);
+ regval |= (aic3262->asiCtxt[1].bclk_output <<
+ AIC3262_ASI_BCLK_MUX_SHIFT);
+ regval |= aic3262->asiCtxt[1].wclk_output;
+
+ aic3262_write(codec, ASI2_BWCLK_OUT_CNTL, regval);
+ /* Configure the corresponding miniDSP Data Ports */
+ minidspD_data = aic3262_read(codec, MINIDSP_PORT_CNTL_REG);
+ minidspD_data |= (BIT2);
+ aic3262_write(codec, MINIDSP_PORT_CNTL_REG, minidspD_data);
+
+ minidspA_data = aic3262_read(codec, ASI2_ADC_INPUT_CNTL);
+ minidspA_data &= ~(BIT2 | BIT1 | BIT0);
+ minidspA_data |= aic3262->asiCtxt[1].adc_input;
+ aic3262_write(codec, ASI2_ADC_INPUT_CNTL, minidspA_data);
+
+ return 0;
+
+}
+
+/*
+* aic3262_asi3_clk_config
+*
+* This function is used to configure the BCLK3, WCLK3 pins which
+* are specific to ASI3 Interface. This function just enables the
+* BCLk and WCLK along with the miniDSP Port Control Registers.
+* However, depending on the user requirement, this function can also be
+* extended to configure the sourc for the BCLK and WCLK on a ASI basis.
+*/
+static int aic3262_asi3_clk_config(struct snd_soc_codec *codec,
+ struct snd_pcm_hw_params *params)
+{
+ u8 bclk_N_value, wclk_N_value, minidspD_data, minidspA_data;
+ u8 regval;
+
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "%s:\n", __func__);
+
+ if (aic3262->asiCtxt[2].master == 1) {
+ DBG(KERN_INFO
+ "#%s: Codec Master on ASI3 Port. Enabling BCLK WCLK Divider.\n",
+ __func__);
+ bclk_N_value = aic3262->asiCtxt[2].bclk_div;
+ aic3262_write(codec, ASI2_BCLK_N, (bclk_N_value | 0x80));
+
+ wclk_N_value = aic3262_read(codec, ASI3_WCLK_N);
+ aic3262_write(codec, ASI3_WCLK_N, (wclk_N_value | 0xA0));
+ }
+
+ /* Configure the BCLK and WCLK Output Mux Options */
+ regval = aic3262_read(codec, ASI3_BWCLK_OUT_CNTL);
+ regval &= ~(AIC3262_ASI_BCLK_MUX_MASK | AIC3262_ASI_WCLK_MUX_MASK);
+ regval |= (aic3262->asiCtxt[2].bclk_output <<
+ AIC3262_ASI_BCLK_MUX_SHIFT);
+ regval |= aic3262->asiCtxt[2].wclk_output;
+ aic3262_write(codec, ASI3_BWCLK_OUT_CNTL, regval);
+
+ minidspD_data = aic3262_read(codec, MINIDSP_PORT_CNTL_REG);
+ minidspD_data |= (BIT1);
+ aic3262_write(codec, MINIDSP_PORT_CNTL_REG, minidspD_data);
+
+ minidspA_data = aic3262_read(codec, ASI3_ADC_INPUT_CNTL);
+ minidspA_data &= ~(BIT2 | BIT1 | BIT0);
+ minidspA_data |= aic3262->asiCtxt[2].adc_input;
+ aic3262_write(codec, ASI3_ADC_INPUT_CNTL, minidspA_data);
+
+ return 0;
+
+}
+
+/*
+* aic3262_multi_i2s_hw_params
+*
+* This function is used to configure the individual ASI port registers
+* depending on the configuration passed on by the snd_pcm_hw_params
+* structure.
+* This function internally configures the ASI specific pins and clock
+* Control Registers.
+*/
+static int aic3262_multi_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int i, j;
+ u8 data;
+ u16 regoffset = 0;
+ u8 dacpath = 0;
+ u8 adcpath = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "#%s: Invoked for ASI%d Port for %s Mode\n",
+ __func__, dai->id,
+ (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ? "Playback" : "Record");
+
+ i = aic3262_get_divs(aic3262->sysclk, params_rate(params));
+
+ i2c_verify_book0(codec);
+
+ if (i < 0) {
+ printk(KERN_ERR "#%s: Sampling rate %d not supported\n",
+ __func__, params_rate(params));
+ return i;
+ }
+
+ aic3262_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* Configure the PLL J, R D values only if none of the ASI
+ * Interfaces are Active.
+ */
+ /*if (!(AIC3262_MULTI_ASI_ACTIVE(aic3262))) {*/
+ if (1) {
+ DBG(KERN_INFO "#%s: None of the ASIs active yet...\n",
+ __func__);
+ /*We will fix R value to 1 and make P & J=K.D as variable */
+ /* Setting P & R values are set to 1 and 1 at init*/
+
+ /* J value */
+ aic3262_write(codec, PLL_J_REG, aic3262_divs[i].pll_j);
+
+ /* MSB & LSB for D value */
+
+ aic3262_write(codec, PLL_D_MSB, (aic3262_divs[i].pll_d >> 8));
+ aic3262_write(codec, PLL_D_LSB,
+ (aic3262_divs[i].pll_d & AIC3262_8BITS_MASK));
+
+ /* NDAC divider value */
+ data = aic3262_read(codec, NDAC_DIV_POW_REG);
+ DBG(KERN_INFO "# reading NDAC = %d , NDAC_DIV_POW_REG = %x\n",
+ aic3262_divs[i].ndac, data);
+ aic3262_write(codec, NDAC_DIV_POW_REG,
+ ((data & 0x80)|(aic3262_divs[i].ndac)));
+ DBG(KERN_INFO "# writing NDAC = %d , NDAC_DIV_POW_REG = %x\n",
+ aic3262_divs[i].ndac,
+ ((data & 0x80)|(aic3262_divs[i].ndac)));
+
+ /* MDAC divider value */
+ data = aic3262_read(codec, MDAC_DIV_POW_REG);
+ DBG(KERN_INFO "# reading MDAC = %d , MDAC_DIV_POW_REG = %x\n",
+ aic3262_divs[i].mdac, data);
+ aic3262_write(codec, MDAC_DIV_POW_REG,
+ ((data & 0x80)|(aic3262_divs[i].mdac)));
+ DBG(KERN_INFO "# writing MDAC = %d , MDAC_DIV_POW_REG = %x\n",
+ aic3262_divs[i].mdac, ((data & 0x80)|(aic3262_divs[i].mdac)));
+
+ /* DOSR MSB & LSB values */
+ aic3262_write(codec, DOSR_MSB_REG, aic3262_divs[i].dosr >> 8);
+ DBG(KERN_INFO "# writing DOSR_MSB_REG = %d\n",
+ (aic3262_divs[i].dosr >> 8));
+ aic3262_write(codec, DOSR_LSB_REG,
+ aic3262_divs[i].dosr & AIC3262_8BITS_MASK);
+ DBG(KERN_INFO "# writing DOSR_LSB_REG = %d\n",
+ (aic3262_divs[i].dosr & AIC3262_8BITS_MASK));
+
+ /* NADC divider value */
+ data = aic3262_read(codec, NADC_DIV_POW_REG);
+ aic3262_write(codec, NADC_DIV_POW_REG,
+ ((data & 0x80)|(aic3262_divs[i].nadc)));
+ DBG(KERN_INFO "# writing NADC_DIV_POW_REG = %d\n",
+ aic3262_divs[i].nadc);
+
+ /* MADC divider value */
+ data = aic3262_read(codec, MADC_DIV_POW_REG);
+ aic3262_write(codec, MADC_DIV_POW_REG,
+ ((data & 0x80)|(aic3262_divs[i].madc)));
+ DBG(KERN_INFO "# writing MADC_DIV_POW_REG = %d\n",
+ aic3262_divs[i].madc);
+
+ /* AOSR value */
+ aic3262_write(codec, AOSR_REG, aic3262_divs[i].aosr);
+ DBG(KERN_INFO "# writing AOSR = %d\n", aic3262_divs[i].aosr);
+ } else {
+ DBG(KERN_INFO "#Atleast 1 ASI Active. Cannot Program PLL..\n");
+ }
+ /* Check for the DAI ID to know which ASI needs
+ * Configuration.
+ */
+ switch (dai->id) {
+ case 1:
+ regoffset = ASI1_BUS_FMT;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ DBG(KERN_INFO "#%s: ASI1 DAC Inputs enabled..\n",
+ __func__);
+ /* Read the DAC Control Register and configure it
+ * as per the ASIContext Structure Settings.
+ */
+ dacpath = aic3262_read(codec, ASI1_DAC_OUT_CNTL);
+ dacpath &= ~(AIC3262_ASI_LDAC_PATH_MASK |
+ AIC3262_ASI_RDAC_PATH_MASK);
+ dacpath |= (aic3262->asiCtxt[0].left_dac_output
+ << AIC3262_ASI_LDAC_PATH_SHIFT);
+
+ dacpath |= (aic3262->asiCtxt[0].right_dac_output
+ << AIC3262_ASI_RDAC_PATH_SHIFT);
+ aic3262_write(codec, ASI1_DAC_OUT_CNTL, dacpath);
+
+ aic3262->asiCtxt[0].playback_mode = 1;
+ aic3262->asiCtxt[0].bclk_div =
+ aic3262_divs[i].blck_N;
+ } else {
+ /* For Recording, Configure the DOUT Pin as per
+ * ASIContext Structure Settings.
+ */
+ adcpath = aic3262_read(codec, ASI1_DATA_OUT);
+ adcpath &= ~(AIC3262_ASI_DOUT_MASK);
+
+ adcpath |= aic3262->asiCtxt[0].dout_option;
+ aic3262_write(codec, ASI1_DATA_OUT, adcpath);
+
+ aic3262->asiCtxt[0].capture_mode = 1;
+ }
+ break;
+ case 2:
+ regoffset = ASI2_BUS_FMT;
+
+ /* Since we are configuring ASI2, please check if Playback
+ * is expected. If so, enable ASI2 Inputs to Left and
+ * Right DACs
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ DBG(KERN_INFO "#%s: ASI2 DAC Inputs enabled..\n",
+ __func__);
+ /* Read the DAC Control Register and configure it
+ * as per theASIContext Structure Settings.
+ */
+ dacpath = aic3262_read(codec, ASI2_DAC_OUT_CNTL);
+ dacpath &= ~(AIC3262_ASI_LDAC_PATH_MASK |
+ AIC3262_ASI_RDAC_PATH_MASK);
+ dacpath |= (aic3262->asiCtxt[1].left_dac_output
+ << AIC3262_ASI_LDAC_PATH_SHIFT);
+
+ dacpath |= (aic3262->asiCtxt[1].right_dac_output
+ << AIC3262_ASI_RDAC_PATH_SHIFT);
+ aic3262_write(codec, ASI2_DAC_OUT_CNTL, dacpath);
+ aic3262->asiCtxt[1].playback_mode = 1;
+
+ aic3262->asiCtxt[1].bclk_div =
+ aic3262_divs[i].blck_N;
+ } else {
+ /* For Recording, Configure the DOUT Pin as per
+ * ASIContext Structure Settings.
+ */
+ adcpath = aic3262_read(codec, ASI2_DATA_OUT);
+ adcpath &= ~(AIC3262_ASI_DOUT_MASK);
+ adcpath |= aic3262->asiCtxt[1].dout_option;
+ aic3262_write(codec, ASI2_DATA_OUT, adcpath);
+
+ aic3262->asiCtxt[1].capture_mode = 1;
+ }
+ break;
+ case 3:
+ regoffset = ASI3_BUS_FMT;
+ /* Since we are configuring ASI3, please check if Playback
+ * is expected. If so, enable ASI3 Inputs to Left and
+ * Right DACs
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ DBG(KERN_INFO "#%s:ASI3 DAC Inputs enabled.\n",
+ __func__);
+ /* Read the DAC Control Register and configure
+ * it as per the ASIContext Structure Settings.
+ */
+ dacpath = aic3262_read(codec, ASI3_DAC_OUT_CNTL);
+ dacpath &= ~(AIC3262_ASI_LDAC_PATH_MASK |
+ AIC3262_ASI_RDAC_PATH_MASK);
+ dacpath |= (aic3262->asiCtxt[2].left_dac_output
+ << AIC3262_ASI_LDAC_PATH_SHIFT);
+ dacpath |= (aic3262->asiCtxt[2].right_dac_output
+ << AIC3262_ASI_RDAC_PATH_SHIFT);
+ aic3262_write(codec,
+ ASI3_DAC_OUT_CNTL, dacpath);
+
+ aic3262->asiCtxt[2].playback_mode = 1;
+
+ aic3262->asiCtxt[2].bclk_div =
+ aic3262_divs[i].blck_N;
+ } else {
+ /* For Recording, Configure the DOUT Pin as per
+ * ASIContext Structure Settings.
+ */
+ adcpath &= ~(AIC3262_ASI_DOUT_MASK);
+ adcpath |= aic3262->asiCtxt[2].dout_option;
+ aic3262_write(codec, ASI3_DATA_OUT, adcpath);
+
+ aic3262->asiCtxt[2].capture_mode = 1;
+ }
+ break;
+ default:
+ printk(KERN_ERR "Invalid Dai ID %d in %s",
+ dai->id, __func__);
+ break;
+ }
+ DBG(KERN_INFO "#%s: Reading Pg %d Reg %d for Bus Format Control.\n",
+ __func__, (regoffset/128), (regoffset % 128));
+
+ /* Read the correspondig ASI DAI Interface Register */
+ data = aic3262_read(codec, regoffset);
+
+ data = data & 0xe7;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ DBG(KERN_INFO "#%s: Configuring ASI%d S16_LE Fmt..\n",
+ __func__, dai->id);
+ data = data | 0x00;
+ aic3262->asiCtxt[dai->id - 1].word_len = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ data |= (0x08);
+ aic3262->asiCtxt[dai->id - 1].word_len = 20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ DBG(KERN_INFO "#%s: Configuring ASI%d S24_LE Fmt..\n",
+ __func__, dai->id);
+ data |= (0x10);
+ aic3262->asiCtxt[dai->id - 1].word_len = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ DBG(KERN_INFO "#%s: Configuring ASI%d S32_LE Fmt..\n",
+ __func__, dai->id);
+ data |= (0x18);
+ aic3262->asiCtxt[dai->id - 1].word_len = 32;
+ break;
+ }
+
+ /* configure the respective Registers for the above configuration */
+ aic3262_write(codec, regoffset, data);
+
+ for (j = 0; j < NO_FEATURE_REGS; j++) {
+ aic3262_write(codec,
+ aic3262_divs[i].codec_specific_regs[j].reg_offset,
+ aic3262_divs[i].codec_specific_regs[j].reg_val);
+ }
+
+ /* Enable the PLL, MDAC, NDAC, NADC, MADC and BCLK Dividers */
+ aic3262_set_bias_level(codec, SND_SOC_BIAS_ON);
+
+ /* Based on the DAI ID we enable the corresponding pins related to the
+ * ASI Port.
+ */
+ switch (dai->id) {
+ case 1:
+ aic3262_asi1_clk_config(codec, params);
+ break;
+ case 2:
+ aic3262_asi2_clk_config(codec, params);
+ break;
+ case 3:
+ aic3262_asi3_clk_config(codec, params);
+ break;
+ default:
+ printk(KERN_ERR "Invalid Dai ID %d in %s",
+ dai->id, __func__);
+ break;
+ }
+ /* Depending on the DAI->ID update the local Flags */
+ aic3262->asiCtxt[dai->id - 1].asi_active++;
+ aic3262->asiCtxt[dai->id - 1].sampling_rate = params_rate(params);
+ /* Update the active_count flag */
+ aic3262->active_count++;
+
+ return 0;
+}
+
+/*
+*
+* aic3262_multi_i2s_hw_free
+*
+* This function is used to configure the Codec after the usage is completed.
+* We can use this function to disable the DAC and ADC specific inputs from the
+* individual ASI Ports of the Audio Codec.
+*/
+static int aic3262_multi_i2s_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ u8 value;
+ u8 dacpath;
+ u8 adcpath;
+ u16 dacregoffset = 0;
+ u16 adcregoffset = 0;
+
+ DBG(KERN_INFO "#%s: ASI%d Port for %s Mode\n",
+ __func__, dai->id,
+ (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ "Playback" : "Record");
+
+ /* Check if this function was already executed earlier for the same
+ * ASI Port
+ */
+ if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
+ (aic3262->asiCtxt[dai->id - 1].playback_mode == 0)) {
+ DBG(KERN_INFO "#%s: Function Already Executed. Exiting..\n",
+ __func__);
+ goto err;
+ } else if ((substream->stream != SNDRV_PCM_STREAM_PLAYBACK) &&
+ (aic3262->asiCtxt[dai->id - 1].capture_mode == 0)) {
+ DBG(KERN_INFO "#%s: Function Already Executed. Exiting..\n",
+ __func__);
+ goto err;
+ }
+
+ switch (dai->id) {
+ case 1:
+ /* In case we are Frame Master on this Interface, Switch off
+ * the Bit Clock Divider and Word Clock Dividers
+ */
+ if (aic3262->asiCtxt[0].master == 1) {
+ /* Also check if either Playback or Recording is still
+ * going on this ASI Interface
+ */
+
+ value = aic3262_read(codec, ASI1_BCLK_N);
+ aic3262_write(codec, ASI1_BCLK_N, (value & 0x7f));
+
+ value = aic3262_read(codec, ASI1_WCLK_N);
+ aic3262_write(codec, ASI1_WCLK_N, (value & 0x7f));
+ }
+
+ dacregoffset = ASI1_DAC_OUT_CNTL;
+ adcregoffset = ASI1_ADC_INPUT_CNTL;
+ break;
+ case 2:
+ /* In case we are Frame Master on this Interface, Switch off
+ * the Bit Clock Divider and Word Clock Dividers
+ */
+ if (aic3262->asiCtxt[1].master == 1) {
+ value = aic3262_read(codec, ASI2_BCLK_N);
+ aic3262_write(codec, ASI2_BCLK_N, (value & 0x7f));
+
+ value = aic3262_read(codec, ASI2_WCLK_N);
+ aic3262_write(codec, ASI2_WCLK_N, (value & 0x7f));
+ }
+ dacregoffset = ASI2_DAC_OUT_CNTL;
+ adcregoffset = ASI2_ADC_INPUT_CNTL;
+ break;
+ case 3:
+ /* In case we are Frame Master on this Interface, Switch off
+ * the Bit Clock Divider and Word Clock Dividers
+ */
+ if (aic3262->asiCtxt[2].master == 1) {
+ value = aic3262_read(codec, ASI3_BCLK_N);
+ aic3262_write(codec, ASI3_BCLK_N, (value & 0x7f));
+
+ value = aic3262_read(codec, ASI3_WCLK_N);
+ aic3262_write(codec, ASI3_WCLK_N, (value & 0x7f));
+ }
+ dacregoffset = ASI3_DAC_OUT_CNTL;
+ adcregoffset = ASI3_ADC_INPUT_CNTL;
+ break;
+ default:
+ printk(KERN_ERR "#%s: Invalid dai id\n", __func__);
+ }
+ /* If this was a Playback Stream Stop, then only
+ * switch off the DAC Inputs
+ */
+ if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
+ (dacregoffset != 0)) {
+ DBG(KERN_INFO "#%s: Disabling Pg %d Reg %d DAC Inputs ..\n",
+ __func__, (dacregoffset/128), (dacregoffset % 128));
+
+ dacpath = aic3262_read(codec, dacregoffset);
+ aic3262_write(codec, dacregoffset, (dacpath & ~(BIT6 | BIT4)));
+
+ aic3262->asiCtxt[dai->id - 1].playback_mode = 0;
+ } else {
+ /* Switch off the ADC Input Control Registers here */
+ DBG(KERN_INFO "#%s: Disabling Pg %d Reg %d for ADC Inputs..\n",
+ __func__, (adcregoffset/128), (adcregoffset % 128));
+
+ adcpath = aic3262_read(codec, adcregoffset);
+ aic3262_write(codec, adcregoffset,
+ (adcpath & ~(BIT2 | BIT1 | BIT0)));
+
+ aic3262->asiCtxt[dai->id - 1].capture_mode = 0;
+ }
+
+ /* If we were configured in mono PCM Mode earlier, then reset the
+ * Left Channel and Right Channel offset Registers here.
+ */
+ switch (dai->id) {
+ case 1:
+ if (aic3262->asiCtxt[0].pcm_format == SND_SOC_DAIFMT_DSP_B) {
+ aic3262_write(codec, ASI1_LCH_OFFSET, 0x00);
+ aic3262_write(codec, ASI1_RCH_OFFSET, 0x00);
+ }
+ break;
+ case 2:
+ if (aic3262->asiCtxt[1].pcm_format == SND_SOC_DAIFMT_DSP_B) {
+ aic3262_write(codec, ASI2_LCH_OFFSET, 0x00);
+ aic3262_write(codec, ASI2_RCH_OFFSET, 0x00);
+ }
+
+ break;
+ case 3:
+ if (aic3262->asiCtxt[2].pcm_format == SND_SOC_DAIFMT_DSP_B) {
+ aic3262_write(codec, ASI3_LCH_OFFSET, 0x00);
+ aic3262_write(codec, ASI3_RCH_OFFSET, 0x00);
+ }
+ break;
+ }
+ /* Depending on the DAI->ID update the asi_active Flags */
+ if (aic3262->asiCtxt[dai->id - 1].asi_active) {
+ aic3262->asiCtxt[dai->id - 1].asi_active--;
+
+ /* Update the active_count flag */
+ if (aic3262->active_count)
+ aic3262->active_count--;
+ }
+err:
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * @struct snd_soc_codec_dai |
+ * It is SoC Codec DAI structure which has DAI capabilities viz.,
+ * playback and capture, DAI runtime information viz. state of DAI
+ * and pop wait state, and DAI private data.
+ * The AIC3262 rates ranges from 8k to 192k
+ * The PCM bit format supported are 16, 20, 24 and 32 bits
+ *----------------------------------------------------------------------------
+ */
+struct snd_soc_dai_ops aic3262_multi_i2s_dai_ops = {
+ .hw_params = aic3262_multi_i2s_hw_params,
+ .digital_mute = aic3262_multi_i2s_mute,
+ .set_fmt = aic3262_multi_i2s_set_dai_fmt,
+ .set_pll = aic3262_multi_i2s_set_dai_pll,
+ .set_sysclk = aic3262_multi_i2s_set_dai_sysclk,
+ .hw_free = aic3262_multi_i2s_hw_free,
+};
+
+
+static struct snd_soc_dai_driver tlv320aic3262_dai[] = {
+/* AIC3262 ASI1 DAI */
+{
+ .name = "aic3262-asi1",
+ .id = 1,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = AIC3262_RATES,
+ .formats = AIC3262_FORMATS},
+ .capture = { /* dummy for fast DAI switching */
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = AIC3262_RATES,
+ .formats = AIC3262_FORMATS},
+ .ops = &aic3262_multi_i2s_dai_ops,
+},
+/* AIC3262 ASI2 DAI */
+{
+ .name = "aic3262-asi2",
+ .id = 2,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = AIC3262_RATES,
+ .formats = AIC3262_FORMATS,},
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = AIC3262_RATES,
+ .formats = AIC3262_FORMATS,},
+ .ops = &aic3262_multi_i2s_dai_ops,
+
+},
+/* AIC3262 ASI3 DAI */
+{
+ .name = "aic3262-asi3",
+ .id = 3,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = AIC3262_RATES,
+ .formats = AIC3262_FORMATS, },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = AIC3262_RATES,
+ .formats = AIC3262_FORMATS, },
+ .ops = &aic3262_multi_i2s_dai_ops,
+
+},
+};
+
+/*
+ *****************************************************************************
+ * Initializations
+ *****************************************************************************
+ */
+/*
+ * AIC3262 register cache
+ * We are caching the registers here.
+ * There is no point in caching the reset register.
+ *
+ * NOTE: In AIC3262, there are 127 registers supported in both page0 and page1
+ * The following table contains the page0 and page 1 and page 3
+ * registers values.
+ */
+static const u8 aic3262_reg[AIC3262_CACHEREGNUM] = {
+ 0x00, 0x00, 0x10, 0x00, /* 0 */
+ 0x03, 0x40, 0x11, 0x08, /* 4 */
+ 0x00, 0x00, 0x00, 0x82, /* 8 */
+ 0x88, 0x00, 0x80, 0x02, /* 12 */
+ 0x00, 0x08, 0x01, 0x01, /* 16 */
+ 0x80, 0x01, 0x00, 0x04, /* 20 */
+ 0x00, 0x00, 0x01, 0x00, /* 24 */
+ 0x00, 0x00, 0x01, 0x00, /* 28 */
+ 0x00, 0x00, 0x00, 0x00, /* 32 */
+ 0x00, 0x00, 0x00, 0x00, /* 36 */
+ 0x00, 0x00, 0x00, 0x00, /* 40 */
+ 0x00, 0x00, 0x00, 0x00, /* 44 */
+ 0x00, 0x00, 0x00, 0x00, /* 48 */
+ 0x00, 0x42, 0x02, 0x02, /* 52 */
+ 0x42, 0x02, 0x02, 0x02, /* 56 */
+ 0x00, 0x00, 0x00, 0x01, /* 60 */
+ 0x01, 0x00, 0x14, 0x00, /* 64 */
+ 0x0C, 0x00, 0x00, 0x00, /* 68 */
+ 0x00, 0x00, 0x00, 0xEE, /* 72 */
+ 0x10, 0xD8, 0x10, 0xD8, /* 76 */
+ 0x00, 0x00, 0x88, 0x00, /* 80 */
+ 0x00, 0x00, 0x00, 0x00, /* 84 */
+ 0x7F, 0x00, 0x00, 0x00, /* 88 */
+ 0x00, 0x00, 0x00, 0x00, /* 92 */
+ 0x7F, 0x00, 0x00, 0x00, /* 96 */
+ 0x00, 0x00, 0x00, 0x00, /* 100 */
+ 0x00, 0x00, 0x00, 0x00, /* 104 */
+ 0x00, 0x00, 0x00, 0x00, /* 108 */
+ 0x00, 0x00, 0x00, 0x00, /* 112 */
+ 0x00, 0x00, 0x00, 0x00, /* 116 */
+ 0x00, 0x00, 0x00, 0x00, /* 120 */
+ 0x00, 0x00, 0x00, 0x00, /* 124 - PAGE0 Registers(127) ends here */
+ 0x01, 0x00, 0x08, 0x00, /* 128, PAGE1-0 */
+ 0x00, 0x00, 0x00, 0x00, /* 132, PAGE1-4 */
+ 0x00, 0x00, 0x00, 0x10, /* 136, PAGE1-8 */
+ 0x00, 0x00, 0x00, 0x00, /* 140, PAGE1-12 */
+ 0x40, 0x40, 0x40, 0x40, /* 144, PAGE1-16 */
+ 0x00, 0x00, 0x00, 0x00, /* 148, PAGE1-20 */
+ 0x00, 0x00, 0x00, 0x00, /* 152, PAGE1-24 */
+ 0x00, 0x00, 0x00, 0x00, /* 156, PAGE1-28 */
+ 0x00, 0x00, 0x00, 0x00, /* 160, PAGE1-32 */
+ 0x00, 0x00, 0x00, 0x00, /* 164, PAGE1-36 */
+ 0x00, 0x00, 0x00, 0x00, /* 168, PAGE1-40 */
+ 0x00, 0x00, 0x00, 0x00, /* 172, PAGE1-44 */
+ 0x00, 0x00, 0x00, 0x00, /* 176, PAGE1-48 */
+ 0x00, 0x00, 0x00, 0x00, /* 180, PAGE1-52 */
+ 0x00, 0x00, 0x00, 0x80, /* 184, PAGE1-56 */
+ 0x80, 0x00, 0x00, 0x00, /* 188, PAGE1-60 */
+ 0x00, 0x00, 0x00, 0x00, /* 192, PAGE1-64 */
+ 0x00, 0x00, 0x00, 0x00, /* 196, PAGE1-68 */
+ 0x00, 0x00, 0x00, 0x00, /* 200, PAGE1-72 */
+ 0x00, 0x00, 0x00, 0x00, /* 204, PAGE1-76 */
+ 0x00, 0x00, 0x00, 0x00, /* 208, PAGE1-80 */
+ 0x00, 0x00, 0x00, 0x00, /* 212, PAGE1-84 */
+ 0x00, 0x00, 0x00, 0x00, /* 216, PAGE1-88 */
+ 0x00, 0x00, 0x00, 0x00, /* 220, PAGE1-92 */
+ 0x00, 0x00, 0x00, 0x00, /* 224, PAGE1-96 */
+ 0x00, 0x00, 0x00, 0x00, /* 228, PAGE1-100 */
+ 0x00, 0x00, 0x00, 0x00, /* 232, PAGE1-104 */
+ 0x00, 0x00, 0x00, 0x00, /* 236, PAGE1-108 */
+ 0x00, 0x00, 0x00, 0x00, /* 240, PAGE1-112 */
+ 0x00, 0x00, 0x00, 0x00, /* 244, PAGE1-116 */
+ 0x00, 0x00, 0x00, 0x00, /* 248, PAGE1-120 */
+ 0x00, 0x00, 0x00, 0x00, /* 252, PAGE1-124 Page 1 Registers Ends Here */
+ 0x00, 0x00, 0x00, 0x00, /* 256, PAGE2-0 */
+ 0x00, 0x00, 0x00, 0x00, /* 260, PAGE2-4 */
+ 0x00, 0x00, 0x00, 0x00, /* 264, PAGE2-8 */
+ 0x00, 0x00, 0x00, 0x00, /* 268, PAGE2-12 */
+ 0x00, 0x00, 0x00, 0x00, /* 272, PAGE2-16 */
+ 0x00, 0x00, 0x00, 0x00, /* 276, PAGE2-20 */
+ 0x00, 0x00, 0x00, 0x00, /* 280, PAGE2-24 */
+ 0x00, 0x00, 0x00, 0x00, /* 284, PAGE2-28 */
+ 0x00, 0x00, 0x00, 0x00, /* 288, PAGE2-32 */
+ 0x00, 0x00, 0x00, 0x00, /* 292, PAGE2-36 */
+ 0x00, 0x00, 0x00, 0x00, /* 296, PAGE2-40 */
+ 0x00, 0x00, 0x00, 0x00, /* 300, PAGE2-44 */
+ 0x00, 0x00, 0x00, 0x00, /* 304, PAGE2-48 */
+ 0x00, 0x00, 0x00, 0x00, /* 308, PAGE2-52 */
+ 0x00, 0x00, 0x00, 0x00, /* 312, PAGE2-56 */
+ 0x00, 0x00, 0x00, 0x00, /* 316, PAGE2-60 */
+ 0x00, 0x00, 0x00, 0x00, /* 320, PAGE2-64 */
+ 0x00, 0x00, 0x00, 0x00, /* 324, PAGE2-68 */
+ 0x00, 0x00, 0x00, 0x00, /* 328, PAGE2-72 */
+ 0x00, 0x00, 0x00, 0x00, /* 332, PAGE2-76 */
+ 0x00, 0x00, 0x00, 0x00, /* 336, PAGE2-80 */
+ 0x00, 0x00, 0x00, 0x00, /* 340, PAGE2-84 */
+ 0x00, 0x00, 0x00, 0x00, /* 344, PAGE2-88 */
+ 0x00, 0x00, 0x00, 0x00, /* 348, PAGE2-92 */
+ 0x00, 0x00, 0x00, 0x00, /* 352, PAGE2-96 */
+ 0x00, 0x00, 0x00, 0x00, /* 356, PAGE2-100 */
+ 0x00, 0x00, 0x00, 0x00, /* 360, PAGE2-104 */
+ 0x00, 0x00, 0x00, 0x00, /* 364, PAGE2-108 */
+ 0x00, 0x00, 0x00, 0x00, /* 368, PAGE2-112*/
+ 0x00, 0x00, 0x00, 0x00, /* 372, PAGE2-116*/
+ 0x00, 0x00, 0x00, 0x00, /* 376, PAGE2-120*/
+ 0x00, 0x00, 0x00, 0x00, /* 380, PAGE2-124 Page 2 Registers Ends Here */
+ 0x00, 0x00, 0x00, 0x00, /* 384, PAGE3-0 */
+ 0x00, 0x00, 0x00, 0x00, /* 388, PAGE3-4 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-8 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-12 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-16 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-20 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-24 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-28 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-32 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-36 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-40 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-44 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-48 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-52 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-56 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-60 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-64 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE3-68 */
+ 0x00, 0x00, 0x00, 0x00, /* 328, PAGE3-72 */
+ 0x00, 0x00, 0x00, 0x00, /* 332, PAGE3-76 */
+ 0x00, 0x00, 0x00, 0x00, /* 336, PAGE3-80 */
+ 0x00, 0x00, 0x00, 0x00, /* 340, PAGE3-84 */
+ 0x00, 0x00, 0x00, 0x00, /* 344, PAGE3-88 */
+ 0x00, 0x00, 0x00, 0x00, /* 348, PAGE3-92 */
+ 0x00, 0x00, 0x00, 0x00, /* 352, PAGE3-96 */
+ 0x00, 0x00, 0x00, 0x00, /* 356, PAGE3-100 */
+ 0x00, 0x00, 0x00, 0x00, /* 360, PAGE3-104 */
+ 0x00, 0x00, 0x00, 0x00, /* 364, PAGE3-108 */
+ 0x00, 0x00, 0x00, 0x00, /* 368, PAGE3-112*/
+ 0x00, 0x00, 0x00, 0x00, /* 372, PAGE3-116*/
+ 0x00, 0x00, 0x00, 0x00, /* 376, PAGE3-120*/
+ 0x00, 0x00, 0x00, 0x00, /* 380, PAGE3-124 Page 3 Registers Ends Here */
+ 0x00, 0x00, 0x00, 0x00, /* 384, PAGE4-0 */
+ 0x00, 0x00, 0x00, 0x00, /* 388, PAGE4-4 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-8 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-12 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-16 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-20 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-24 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-28 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-32 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-36 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-40 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-44 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-48 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-52 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-56 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-60 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-64 */
+ 0x00, 0x00, 0x00, 0x00, /* 392, PAGE4-68 */
+ 0x00, 0x00, 0x00, 0x00, /* 328, PAGE4-72 */
+ 0x00, 0x00, 0x00, 0x00, /* 332, PAGE4-76 */
+ 0x00, 0x00, 0x00, 0x00, /* 336, PAGE4-80 */
+ 0x00, 0x00, 0x00, 0x00, /* 340, PAGE4-84 */
+ 0x00, 0x00, 0x00, 0x00, /* 344, PAGE4-88 */
+ 0x00, 0x00, 0x00, 0x00, /* 348, PAGE4-92 */
+ 0x00, 0x00, 0x00, 0x00, /* 352, PAGE4-96 */
+ 0x00, 0x00, 0x00, 0x00, /* 356, PAGE4-100 */
+ 0x00, 0x00, 0x00, 0x00, /* 360, PAGE4-104 */
+ 0x00, 0x00, 0x00, 0x00, /* 364, PAGE4-108 */
+ 0x00, 0x00, 0x00, 0x00, /* 368, PAGE4-112*/
+ 0x00, 0x00, 0x00, 0x00, /* 372, PAGE4-116*/
+ 0x00, 0x00, 0x00, 0x00, /* 376, PAGE4-120*/
+ 0x00, 0x00, 0x00, 0x00, /* 380, PAGE4-124 Page 2 Registers Ends Here */
+
+};
+
+/*
+ *------------------------------------------------------------------------------
+ * aic3262 initialization data
+ * This structure initialization contains the initialization required for
+ * AIC326x.
+ * These registers values (reg_val) are written into the respective AIC3262
+ * register offset (reg_offset) to initialize AIC326x.
+ * These values are used in aic3262_init() function only.
+ *------------------------------------------------------------------------------
+ */
+static const struct aic3262_configs aic3262_reg_init[] = {
+ /* CLOCKING */
+
+ {0, RESET_REG, 1},
+ {0, RESET_REG, 0},
+
+ {0, PASI_DAC_DP_SETUP, 0xc0}, /*DAC */
+ {0, DAC_MVOL_CONF, 0x00}, /*DAC un-muted*/
+ /* set default volumes */
+ {0, DAC_LVOL, 0x01},
+ {0, DAC_RVOL, 0x01},
+ {0, HPL_VOL, 0x3a},
+ {0, HPR_VOL, 0x3a},
+ {0, SPK_AMP_CNTL_R2, 0x14},
+ {0, SPK_AMP_CNTL_R3, 0x14},
+ {0, SPK_AMP_CNTL_R4, 0x33},
+ {0, REC_AMP_CNTL_R5, 0x82},
+ {0, RAMPR_VOL, 20},
+ {0, RAMP_CNTL_R1, 70},
+ {0, RAMP_CNTL_R2, 70},
+
+ /* DRC Defaults */
+ {0, DRC_CNTL_R1, 0x6c},
+ {0, DRC_CNTL_R2, 16},
+
+ /* DEPOP SETTINGS */
+ {0, HP_DEPOP, 0x14},
+ {0, RECV_DEPOP, 0x14},
+
+ {0, POWER_CONF, 0x00}, /* Disconnecting AVDD-DVD weak link*/
+ {0, REF_PWR_DLY, 0x01},
+ {0, CM_REG, 0x00}, /*CM - default*/
+ {0, LDAC_PTM, 0}, /*LDAC_PTM - default*/
+ {0, RDAC_PTM, 0}, /*RDAC_PTM - default*/
+ {0, HP_CTL, 0x30}, /*HP output percentage - at 75%*/
+ {0, LADC_VOL, 0x01}, /*LADC volume*/
+ {0, RADC_VOL, 0x01}, /*RADC volume*/
+
+ {0, DAC_ADC_CLKIN_REG, 0x33}, /*DAC ADC CLKIN*/
+ {0, PLL_CLKIN_REG, 0x00}, /*PLL CLKIN*/
+ {0, PLL_PR_POW_REG, 0x11}, /*PLL Power=0-down, P=1, R=1 vals*/
+ {0, 0x3d, 1},
+
+ {0, LMIC_PGA_PIN, 0x55}, /*IN1_L select - - 10k -LMICPGA_P*/
+ {0, LMIC_PGA_MIN, 0x40}, /*CM to LMICPGA-M*/
+ {0, RMIC_PGA_PIN, 0x55}, /*IN1_R select - - 10k -RMIC_PGA_P*/
+ {0, RMIC_PGA_MIN, 0x40}, /*CM to RMICPGA_M*/
+ {0, (PAGE_1 + 0x79), 33}, /*LMIC-PGA-POWERUP-DELAY - default*/
+ {0, (PAGE_1 + 0x7a), 1}, /*FIXMELATER*/
+
+
+ {0, ADC_CHANNEL_POW, 0xc2}, /*ladc, radc ON , SOFT STEP disabled*/
+ {0, ADC_FINE_GAIN, 0x00}, /*ladc - unmute, radc - unmute*/
+ {0, MICL_PGA, 0x4f},
+ {0, MICR_PGA, 0x4f},
+ {0, MIC_BIAS_CNTL, 0xFC},
+ /* ASI1 Configuration */
+ {0, ASI1_BUS_FMT, 0},
+ {0, ASI1_BWCLK_CNTL_REG, 0x00}, /* originaly 0x24*/
+ {0, ASI1_BCLK_N_CNTL, 1},
+ {0, ASI1_BCLK_N, 0x84},
+
+ {0, MA_CNTL, 0}, /* Mixer Amp disabled */
+ {0, LINE_AMP_CNTL_R2, 0x00}, /* Line Amp Cntl disabled */
+
+ /* ASI2 Configuration */
+ {0, ASI2_BUS_FMT, 0},
+ {0, ASI2_BCLK_N_CNTL, 1},
+ {0, ASI2_BCLK_N, 0x84},
+ {0, ASI2_BWCLK_OUT_CNTL, 0x20},
+
+ {0, BEEP_CNTL_R1, 0x05},
+ {0, BEEP_CNTL_R2, 0x04},
+
+ /* Interrupt config for headset detection */
+ {0, INT1_CNTL, 0x80},
+ {0, INT_FMT, 0x40},
+ {0, GPIO1_IO_CNTL, 0x14},
+ {0, HP_DETECT, 0x94},
+
+#if defined(CONFIG_MINI_DSP)
+ {0, 60, 0},
+ {0, 61, 0},
+ /* Added the below set of values after consulting the miniDSP
+ * Program Section Array
+ */
+ {0, MINIDSP_ACCESS_CTRL, 0x00},
+#endif
+
+
+};
+
+static int reg_init_size =
+ sizeof(aic3262_reg_init) / sizeof(struct aic3262_configs);
+
+static const struct snd_kcontrol_new aic3262_snd_controls2[] = {
+
+ SOC_DOUBLE_R("IN1 MPGA Route", LMIC_PGA_PIN, RMIC_PGA_PIN, 6, 3, 0),
+ SOC_DOUBLE_R("IN2 MPGA Route", LMIC_PGA_PIN, RMIC_PGA_PIN, 4, 3, 0),
+ SOC_DOUBLE_R("IN3 MPGA Route", LMIC_PGA_PIN, RMIC_PGA_PIN, 2, 3, 0),
+ SOC_DOUBLE_R("IN4 MPGA Route",
+ LMIC_PGA_PM_IN4, RMIC_PGA_PM_IN4, 5, 1, 0),
+};
+static const struct snd_soc_dapm_widget aic3262_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("Left DAC", "Playback", PASI_DAC_DP_SETUP, 7, 0),
+ SND_SOC_DAPM_DAC("Right DAC", "Playback", PASI_DAC_DP_SETUP, 6, 0),
+
+ SND_SOC_DAPM_SWITCH_N("LDAC_2_HPL", HP_AMP_CNTL_R1, 5, 0),
+ SND_SOC_DAPM_SWITCH_N("RDAC_2_HPR", HP_AMP_CNTL_R1, 4, 0),
+
+ SND_SOC_DAPM_PGA("HPL Driver", HP_AMP_CNTL_R1, 1, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HPR Driver", HP_AMP_CNTL_R1, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_SWITCH_N("LDAC_2_LOL", LINE_AMP_CNTL_R1, 7, 0),
+ SND_SOC_DAPM_SWITCH_N("RDAC_2_LOR", LINE_AMP_CNTL_R1, 6, 0),
+
+ SND_SOC_DAPM_PGA("LOL Driver", LINE_AMP_CNTL_R1, 1, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("LOR Driver", LINE_AMP_CNTL_R1, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("SPKL Driver", SPK_AMP_CNTL_R1, 1, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("SPKR Driver", SPK_AMP_CNTL_R1, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("RECL Driver", REC_AMP_CNTL_R5, 7, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("RECR Driver", REC_AMP_CNTL_R5, 6, 0, NULL, 0),
+
+ SND_SOC_DAPM_ADC("Left ADC", "Capture", ADC_CHANNEL_POW, 7, 0),
+ SND_SOC_DAPM_ADC("Right ADC", "Capture", ADC_CHANNEL_POW, 6, 0),
+
+ SND_SOC_DAPM_SWITCH("IN1L Route",
+ LMIC_PGA_PIN, 6, 0, &aic3262_snd_controls2[0]),
+ SND_SOC_DAPM_SWITCH("IN2L Route",
+ LMIC_PGA_PIN, 4, 0, &aic3262_snd_controls2[1]),
+ SND_SOC_DAPM_SWITCH("IN3L Route",
+ LMIC_PGA_PIN, 2, 0, &aic3262_snd_controls2[2]),
+ SND_SOC_DAPM_SWITCH("IN4L Route",
+ LMIC_PGA_PM_IN4, 5, 0, &aic3262_snd_controls2[3]),
+ SND_SOC_DAPM_SWITCH("IN1R Route",
+ RMIC_PGA_PIN, 6, 0, &aic3262_snd_controls2[4]),
+ SND_SOC_DAPM_SWITCH("IN2R Route",
+ RMIC_PGA_PIN, 4, 0, &aic3262_snd_controls2[5]),
+ SND_SOC_DAPM_SWITCH("IN3R Route",
+ RMIC_PGA_PIN, 2, 0, &aic3262_snd_controls2[6]),
+ SND_SOC_DAPM_SWITCH("IN4R Route",
+ RMIC_PGA_PM_IN4, 5, 0, &aic3262_snd_controls2[7]),
+
+ SND_SOC_DAPM_OUTPUT("HPL"),
+ SND_SOC_DAPM_OUTPUT("HPR"),
+ SND_SOC_DAPM_OUTPUT("LOL"),
+ SND_SOC_DAPM_OUTPUT("LOR"),
+ SND_SOC_DAPM_OUTPUT("SPKL"),
+ SND_SOC_DAPM_OUTPUT("SPKR"),
+ SND_SOC_DAPM_OUTPUT("RECL"),
+ SND_SOC_DAPM_OUTPUT("RECR"),
+
+ SND_SOC_DAPM_INPUT("IN1L"),
+ SND_SOC_DAPM_INPUT("IN2L"),
+ SND_SOC_DAPM_INPUT("IN3L"),
+ SND_SOC_DAPM_INPUT("IN4L"),
+
+ SND_SOC_DAPM_INPUT("IN1R"),
+ SND_SOC_DAPM_INPUT("IN2R"),
+ SND_SOC_DAPM_INPUT("IN3R"),
+ SND_SOC_DAPM_INPUT("IN4R"),
+};
+
+static const struct snd_soc_dapm_route aic3262_dapm_routes[] = {
+ {"LDAC_2_HPL", NULL, "Left DAC"},
+ {"HPL Driver", NULL, "LDAC_2_HPL"},
+ {"HPL", NULL, "HPL Driver"},
+ {"RDAC_2_HPR", NULL, "Right DAC"},
+ {"HPR Driver", NULL, "RDAC_2_HPR"},
+ {"HPR", NULL, "HPR Driver"},
+
+ {"LDAC_2_LOL", NULL, "Left DAC"},
+ {"LOL Driver", NULL, "LDAC_2_LOL"},
+ {"LOL", NULL, "LOL Driver"},
+ {"RDAC_2_LOR", NULL, "Right DAC"},
+ {"LOR Driver", NULL, "RDAC_2_LOR"},
+ {"LOR", NULL, "LOR Driver"},
+
+ {"SPKL Driver", NULL, "LOL"},
+ {"SPKL", NULL, "SPKL Driver"},
+ {"SPKR Driver", NULL, "LOR"},
+ {"SPKR", NULL, "SPKR Driver"},
+
+ {"RECL Driver", NULL, "LOL"},
+ {"RECL", NULL, "RECL Driver"},
+ {"RECR Driver", NULL, "LOR"},
+ {"RECR", NULL, "RECR Driver"},
+
+ {"Left ADC", "IN1L Route", "IN1L"},
+ {"Left ADC", "IN2L Route", "IN2L"},
+ {"Left ADC", "IN3L Route", "IN3L"},
+ {"Left ADC", "IN4L Route", "IN4L"},
+
+ {"Right ADC", "IN1R Route", "IN1R"},
+ {"Right ADC", "IN2R Route", "IN2R"},
+ {"Right ADC", "IN3R Route", "IN3R"},
+ {"Right ADC", "IN4R Route", "IN4R"},
+/*
+ {"LOL Driver", NULL, "IN1L"},
+ {"LOR Driver", NULL, "IN1R"},
+*/
+};
+
+/*
+ *****************************************************************************
+ * Function Definitions
+ *****************************************************************************
+ */
+
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_change_page
+ * Purpose : This function is to switch between page 0 and page 1.
+ *
+ *----------------------------------------------------------------------------
+ */
+int aic3262_change_page(struct snd_soc_codec *codec, u8 new_page)
+{
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 data[2];
+ int ret = 0;
+
+ data[0] = 0;
+ data[1] = new_page;
+ aic3262->page_no = new_page;
+
+#if defined(LOCAL_REG_ACCESS)
+ if (codec->hw_write(codec->control_data, data, 2) != 2)
+ ret = -EIO;
+#else
+ ret = snd_soc_write(codec, data[0], data[1]);
+#endif
+ if (ret)
+ printk(KERN_ERR "Error in changing page to %d\n", new_page);
+
+ /*DBG("# Changing page to %d\r\n", new_page);*/
+
+ return ret;
+}
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_change_book
+ * Purpose : This function is to switch between books
+ *
+ *----------------------------------------------------------------------------
+ */
+int aic3262_change_book(struct snd_soc_codec *codec, u8 new_book)
+{
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 data[2];
+ int ret = 0;
+
+ data[0] = 0x7F;
+ data[1] = new_book;
+ aic3262->book_no = new_book;
+
+ ret = aic3262_change_page(codec, 0);
+ if (ret)
+ return ret;
+
+#if defined(LOCAL_REG_ACCESS)
+ if (codec->hw_write(codec->control_data, data, 2) != 2)
+ ret = -EIO;
+#else
+ ret = snd_soc_write(codec, data[0], data[1]);
+#endif
+ if (ret)
+ printk(KERN_ERR "Error in changing Book\n");
+
+ /*DBG("# Changing book to %d\r\n", new_book);*/
+
+ return ret;
+}
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_write_reg_cache
+ * Purpose : This function is to write aic3262 register cache
+ *
+ *----------------------------------------------------------------------------
+ */
+void aic3262_write_reg_cache(struct snd_soc_codec *codec,
+ u16 reg, u8 value)
+{
+#if defined(EN_REG_CACHE)
+ u8 *cache = codec->reg_cache;
+
+ if (reg >= AIC3262_CACHEREGNUM)
+ return;
+
+ if (cache)
+ cache[reg] = value;
+#endif
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_read
+ * Purpose : This function is to read the aic3262 register space.
+ *
+ *----------------------------------------------------------------------------
+ */
+u8 aic3262_read(struct snd_soc_codec *codec, u16 reg)
+{
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 value;
+ u8 page = reg / 128;
+
+ reg = reg % 128;
+
+ if (aic3262->page_no != page)
+ aic3262_change_page(codec, page);
+
+#if defined(LOCAL_REG_ACCESS)
+ i2c_master_send(codec->control_data, (char *)&reg, 1);
+ i2c_master_recv(codec->control_data, &value, 1);
+#else
+ value = snd_soc_read(codec, reg);
+#endif
+ /*DBG("r %2x %02x\r\n", reg, value);*/
+ return value;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_write
+ * Purpose : This function is to write to the aic3262 register space.
+ *
+ *----------------------------------------------------------------------------
+ */
+int aic3262_write(struct snd_soc_codec *codec, u16 reg, u8 value)
+{
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 data[2];
+ u8 page;
+ int ret = 0;
+
+ page = reg / 128;
+ data[AIC3262_REG_OFFSET_INDEX] = reg % 128;
+ if (aic3262->page_no != page)
+ aic3262_change_page(codec, page);
+
+ /* data is
+ * D15..D8 aic3262 register offset
+ * D7...D0 register data
+ */
+ data[AIC3262_REG_DATA_INDEX] = value & AIC3262_8BITS_MASK;
+#if defined(EN_REG_CACHE)
+ if ((page >= 0) & (page <= 4))
+ aic3262_write_reg_cache(codec, reg, value);
+
+#endif
+ if (!data[AIC3262_REG_OFFSET_INDEX]) {
+ /* if the write is to reg0 update aic3262->page_no */
+ aic3262->page_no = value;
+ }
+
+ /*DBG("w %2x %02x\r\n",
+ data[AIC3262_REG_OFFSET_INDEX], data[AIC3262_REG_DATA_INDEX]);*/
+
+#if defined(LOCAL_REG_ACCESS)
+ if (codec->hw_write(codec->control_data, data, 2) != 2)
+ ret = -EIO;
+#else
+ ret = snd_soc_write(codec, data[AIC3262_REG_OFFSET_INDEX],
+ data[AIC3262_REG_DATA_INDEX]);
+#endif
+ if (ret)
+ printk(KERN_ERR "Error in i2c write\n");
+
+ return ret;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function : aic3262_write__
+ * Purpose : This function is to write to the aic3262 register space.
+ * (low level).
+ *------------------------------------------------------------------------------
+ */
+
+int aic3262_write__(struct i2c_client *client, const char *buf, int count)
+{
+ u8 data[3];
+ int ret;
+ data[0] = *buf;
+ data[1] = *(buf+1);
+ data[2] = *(buf+2);
+ /*DBG("w %2x %02x\r\n",
+ data[AIC3262_REG_OFFSET_INDEX], data[AIC3262_REG_DATA_INDEX]);*/
+ ret = i2c_master_send(client, data, 2);
+ if (ret < 2) {
+ printk(
+ KERN_ERR "I2C write Error : bytes written = %d\n\n", ret);
+ return -EIO;
+ }
+
+ return ret;
+}
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_reset_cache
+ * Purpose : This function is to reset the cache.
+ *----------------------------------------------------------------------------
+ */
+int aic3262_reset_cache(struct snd_soc_codec *codec)
+{
+#if defined(EN_REG_CACHE)
+ if (codec->reg_cache) {
+ memcpy(codec->reg_cache, aic3262_reg, sizeof(aic3262_reg));
+ return 0;
+ }
+
+ codec->reg_cache = kmemdup(aic3262_reg,
+ sizeof(aic3262_reg), GFP_KERNEL);
+ if (!codec->reg_cache) {
+ printk(KERN_ERR "aic32x4: kmemdup failed\n");
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_get_divs
+ * Purpose : This function is to get required divisor from the "aic3262_divs"
+ * table.
+ *
+ *----------------------------------------------------------------------------
+ */
+static inline int aic3262_get_divs(int mclk, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aic3262_divs); i++) {
+ if ((aic3262_divs[i].rate == rate)
+ && (aic3262_divs[i].mclk == mclk)) {
+ DBG(KERN_INFO "#%s: Found Entry %d in Clock_Array\n",
+ __func__, i);
+ return i;
+ }
+ }
+ printk(KERN_ERR "Master clock and sample rate is not supported\n");
+ return -EINVAL;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_add_controls
+ * Purpose : This function is to add non dapm kcontrols. The different
+ * controls are in "aic3262_snd_controls" table.
+ * The following different controls are supported
+ * # PCM Playback volume control
+ * # PCM Playback Volume
+ * # HP Driver Gain
+ * # HP DAC Playback Switch
+ * # PGA Capture Volume
+ * # Program Registers
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_add_controls(struct snd_soc_codec *codec)
+{
+ int err;
+
+ DBG("%s++\n", __func__);
+
+ err = snd_soc_add_controls(codec, aic3262_snd_controls,
+ ARRAY_SIZE(aic3262_snd_controls));
+ if (err < 0) {
+ printk(KERN_ERR "Invalid control\n");
+ return err;
+ }
+
+ err = snd_soc_add_controls(codec, aic3262_snd_controls2,
+ ARRAY_SIZE(aic3262_snd_controls2));
+ if (err < 0) {
+ printk(KERN_ERR "Invalid control\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_add_widgets
+ * Purpose : This function is to add the dapm widgets
+ * The following are the main widgets supported
+ * # Left DAC to Left Outputs
+ * # Right DAC to Right Outputs
+ * # Left Inputs to Left ADC
+ * # Right Inputs to Right ADC
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_add_widgets(struct snd_soc_codec *codec)
+{
+ int ret;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+#ifndef AIC3262_MULTI_I2S
+ int i;
+ for (i = 0; i < ARRAY_SIZE(aic3262_dapm_widgets); i++)
+ ret = snd_soc_dapm_new_control(dapm, &aic3262_dapm_widgets[i]);
+#else
+ ret = snd_soc_dapm_new_controls(dapm, aic3262_dapm_widgets,
+ ARRAY_SIZE(aic3262_dapm_widgets));
+ if (ret != 0) {
+ printk(KERN_ERR "#%s: Unable to add DAPM Controls. Err %d\n",
+ __func__, ret);
+ }
+#endif
+ /* set up audio path interconnects */
+ DBG("#Completed adding new dapm widget controls size=%d\n",
+ ARRAY_SIZE(aic3262_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, aic3262_dapm_routes,
+ ARRAY_SIZE(aic3262_dapm_routes));
+ DBG("#Completed adding DAPM routes\n");
+ /*snd_soc_dapm_new_widgets(codec);*/
+ DBG("#Completed updating dapm\n");
+ return 0;
+}
+/*
+ *----------------------------------------------------------------------------
+ * Function : reg_def_conf
+ * Purpose : This function is to reset the codec book 0 registers
+ *
+ *----------------------------------------------------------------------------
+ */
+int reg_def_conf(struct snd_soc_codec *codec)
+{
+ int i = 0, ret;
+ DBG(KERN_INFO "#%s: Invoked..\n", __func__);
+
+ ret = aic3262_change_page(codec, 0);
+ if (ret != 0)
+ return ret;
+
+ ret = aic3262_change_book(codec, 0);
+ if (ret != 0)
+ return ret;
+
+ /* Configure the Codec with the default Initialization Values */
+ for (i = 0; i < reg_init_size; i++) {
+ ret = aic3262_write(codec, aic3262_reg_init[i].reg_offset,
+ aic3262_reg_init[i].reg_val);
+ if (ret)
+ break;
+ }
+ DBG(KERN_INFO "#%s: Done..\n", __func__);
+ return ret;
+}
+
+/*
+ * i2c_verify_book0
+ *
+ * This function is used to dump the values of the Book 0 Pages.
+ */
+int i2c_verify_book0(struct snd_soc_codec *codec)
+{
+ int i, j, k = 0;
+ u8 val1;
+
+ DBG("starting i2c_verify\n");
+ DBG("Resetting page to 0\n");
+ aic3262_change_book(codec, 0);
+ for (j = 0; j < 3; j++) {
+ if (j == 0) {
+ aic3262_change_page(codec, 0);
+ k = 0;
+ }
+ if (j == 1) {
+ aic3262_change_page(codec, 1);
+ k = 1;
+ }
+ /*
+ if (j == 2) {
+ aic3262_change_page(codec, 4);
+ k = 4;
+ }*/
+ for (i = 0; i <= 127; i++) {
+#if defined(LOCAL_REG_ACCESS)
+ val1 = i2c_smbus_read_byte_data(codec->control_data, i);
+#else
+ val1 = snd_soc_read(codec, i);
+#endif
+ /* printk("[%d][%d]=[0x%2x]\n",k,i,val1); */
+ }
+ }
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_set_bias_level
+ * Purpose : This function is to get triggered when dapm events occurs.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ u8 value;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ DBG(KERN_INFO "#%s: Codec Active %d[%d]\n",
+ __func__, codec->active, aic3262->active_count);
+ switch (level) {
+ /* full On */
+ case SND_SOC_BIAS_ON:
+ DBG(KERN_INFO "#aic3262 codec : set_bias_on started\n");
+ case SND_SOC_BIAS_PREPARE:
+ /* all power is driven by DAPM system */
+ DBG(KERN_INFO "#aic3262 codec : set_bias_prepare started\n");
+
+ /* Switch on PLL */
+ value = aic3262_read(codec, PLL_PR_POW_REG);
+ aic3262_write(codec, PLL_PR_POW_REG, ((value | 0x80)));
+
+ /* Switch on NDAC Divider */
+ value = aic3262_read(codec, NDAC_DIV_POW_REG);
+ aic3262_write(codec, NDAC_DIV_POW_REG,
+ ((value & 0x7f) | (0x80)));
+
+ /* Switch on MDAC Divider */
+ value = aic3262_read(codec, MDAC_DIV_POW_REG);
+ aic3262_write(codec, MDAC_DIV_POW_REG,
+ ((value & 0x7f) | (0x80)));
+
+ /* Switch on NADC Divider */
+ value = aic3262_read(codec, NADC_DIV_POW_REG);
+ aic3262_write(codec, NADC_DIV_POW_REG,
+ ((value & 0x7f) | (0x80)));
+
+ /* Switch on MADC Divider */
+ value = aic3262_read(codec, MADC_DIV_POW_REG);
+ aic3262_write(codec, MADC_DIV_POW_REG,
+ ((value & 0x7f) | (0x80)));
+
+
+ aic3262_write(codec, ADC_CHANNEL_POW, 0xc2);
+ aic3262_write(codec, ADC_FINE_GAIN, 0x00);
+
+ DBG("#aic3262 codec : set_bias_on complete\n");
+
+ break;
+
+
+ /* Off, with power */
+ case SND_SOC_BIAS_STANDBY:
+ /*
+ * all power is driven by DAPM system,
+ * so output power is safe if bypass was set
+ */
+
+ DBG("#aic3262 codec : set_bias_stby inside if condn\n");
+
+ if (!aic3262->active_count) {
+ /* Switch off NDAC Divider */
+ value = aic3262_read(codec, NDAC_DIV_POW_REG);
+ aic3262_write(codec, NDAC_DIV_POW_REG,
+ (value & 0x7f));
+
+ /* Switch off MDAC Divider */
+ value = aic3262_read(codec, MDAC_DIV_POW_REG);
+ aic3262_write(codec, MDAC_DIV_POW_REG,
+ (value & 0x7f));
+
+ /* Switch off NADC Divider */
+ value = aic3262_read(codec, NADC_DIV_POW_REG);
+ aic3262_write(codec, NADC_DIV_POW_REG,
+ (value & 0x7f));
+
+ /* Switch off MADC Divider */
+ value = aic3262_read(codec, MADC_DIV_POW_REG);
+ aic3262_write(codec, MADC_DIV_POW_REG,
+ (value & 0x7f));
+
+ /* Switch off PLL */
+ value = aic3262_read(codec, PLL_PR_POW_REG);
+ aic3262_write(codec, PLL_PR_POW_REG, (value & 0x7f));
+
+ DBG("#%s: set_bias_stby complete\n", __func__);
+ } else
+ DBG(KERN_INFO
+ "#%s: Another Stream Active. No STANDBY\n", __func__);
+ break;
+
+ /* Off, without power */
+ case SND_SOC_BIAS_OFF:
+ /* force all power off */
+
+ /* Switch off PLL */
+ value = aic3262_read(codec, PLL_PR_POW_REG);
+ aic3262_write(codec,
+ PLL_PR_POW_REG, (value & ~(0x01 << 7)));
+
+ /* Switch off NDAC Divider */
+ value = aic3262_read(codec, NDAC_DIV_POW_REG);
+ aic3262_write(codec, NDAC_DIV_POW_REG,
+ (value & ~(0x01 << 7)));
+
+ /* Switch off MDAC Divider */
+ value = aic3262_read(codec, MDAC_DIV_POW_REG);
+ aic3262_write(codec, MDAC_DIV_POW_REG,
+ (value & ~(0x01 << 7)));
+
+ /* Switch off NADC Divider */
+ value = aic3262_read(codec, NADC_DIV_POW_REG);
+ aic3262_write(codec, NADC_DIV_POW_REG,
+ (value & ~(0x01 << 7)));
+
+ /* Switch off MADC Divider */
+ value = aic3262_read(codec, MADC_DIV_POW_REG);
+ aic3262_write(codec, MADC_DIV_POW_REG,
+ (value & ~(0x01 << 7)));
+ value = aic3262_read(codec, ASI1_BCLK_N);
+
+ break;
+ }
+ codec->dapm.bias_level = level;
+ DBG(KERN_INFO "#aic3262 codec : set_bias exiting\n");
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_suspend
+ * Purpose : This function is to suspend the AIC3262 driver.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ DBG(KERN_INFO "#%s: Invoked..\n", __func__);
+ if (aic3262)
+ disable_irq(aic3262->irq);
+
+ aic3262_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_resume
+ * Purpose : This function is to resume the AIC3262 driver
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_resume(struct snd_soc_codec *codec)
+{
+ int i;
+ u8 data[2];
+ int ret = 0;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u8 *cache = codec->reg_cache;
+ DBG(KERN_INFO "#%s: Invoked..\n", __func__);
+
+ ret = aic3262_change_page(codec, 0);
+ if (ret)
+ return ret;
+#if defined(EN_REG_CACHE)
+ /* Sync reg_cache with the hardware */
+ for (i = 0; i < ARRAY_SIZE(aic3262_reg); i++) {
+ data[0] = i % 128;
+ data[1] = cache[i];
+#if defined(LOCAL_REG_ACCESS)
+ codec->hw_write(codec->control_data, data, 2);
+#else
+ ret = snd_soc_write(codec, data[0], data[1]);
+ if (ret)
+ break;
+#endif
+ }
+#endif
+ if (!ret) {
+ aic3262_change_page(codec, 0);
+ aic3262_set_bias_level(codec, SND_SOC_BIAS_ON);
+
+ if (aic3262)
+ enable_irq(aic3262->irq);
+ }
+ return ret;
+}
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_hw_read
+ * Purpose : This is a low level harware read function.
+ *
+ *----------------------------------------------------------------------------
+ */
+unsigned int aic3262_hw_read(struct snd_soc_codec *codec, unsigned int count)
+{
+ struct i2c_client *client = codec->control_data;
+ unsigned int buf;
+
+ if (count > (sizeof(unsigned int)))
+ return 0;
+
+ i2c_master_recv(client, (char *)&buf, count);
+ return buf;
+}
+
+/*
+* aic3262_jack_handler
+*
+* This function is called from the Interrupt Handler
+* to check the status of the AIC3262 Registers related to Headset Detection
+*/
+static irqreturn_t aic3262_jack_handler(int irq, void *data)
+{
+ struct snd_soc_codec *codec = data;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+ unsigned int micbits, hsbits = 0;
+
+ DBG("%s++\n", __func__);
+
+ aic3262_change_page(codec, 0);
+
+ /* Read the Jack Status Register*/
+ value = aic3262_read(codec, STICKY_FLAG2);
+ DBG(KERN_INFO "reg44 0x%x\n", value);
+
+ value = aic3262_read(codec, INT_FLAG2);
+ DBG("reg46 0x%x\n", value);
+
+ value = aic3262_read(codec, DAC_FLAG_R1);
+ DBG("reg37 0x%x\n", value);
+
+ micbits = value & DAC_FLAG_MIC_MASKBITS;
+ DBG("micbits 0x%x\n", micbits);
+
+ hsbits = value & DAC_FLAG_HS_MASKBITS;
+ DBG("hsbits 0x%x\n", hsbits);
+
+ /* sleep for debounce time */
+ /*msleep(aic3262->pdata->debounce_time_ms);*/
+
+ /* No Headphone or Headset*/
+ if (!micbits && !hsbits) {
+ DBG("no headset/headphone\n");
+ snd_soc_jack_report(aic3262->headset_jack,
+ 0, SND_JACK_HEADSET);
+ }
+
+ /* Headphone Detected */
+ if ((micbits == DAC_FLAG_R1_NOMIC) || (hsbits)) {
+ DBG("headphone\n");
+ snd_soc_jack_report(aic3262->headset_jack,
+ SND_JACK_HEADPHONE, SND_JACK_HEADSET);
+ }
+
+ /* Headset Detected - only with capless */
+ if (micbits == DAC_FLAG_R1_MIC) {
+ DBG("headset\n");
+ snd_soc_jack_report(aic3262->headset_jack,
+ SND_JACK_HEADSET, SND_JACK_HEADSET);
+ }
+
+ DBG("%s--\n", __func__);
+ return IRQ_HANDLED;
+}
+
+/*
+* aic326x_headset_detect
+*
+* Call-back function called to check the status of Headset Pin.
+*/
+int aic326x_headset_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, int jack_type)
+{
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ aic3262->headset_jack = jack;
+
+ /*TODO*/
+ aic3262_jack_handler(aic3262->irq, codec);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aic326x_headset_detect);
+
+
+#ifdef AIC3262_MULTI_I2S
+/*
+* aic3262_asi_default_config
+*
+* This function is used to perform the default pin configurations for
+* the functionalities which are specific to each ASI Port of the AIC3262
+* Audio Codec Chipset. The user is encouraged to change these values
+* if required on their platforms.
+*/
+static void aic3262_asi_default_config(struct snd_soc_codec *codec)
+{
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u16 counter;
+
+ DBG(KERN_INFO
+ "#%s: Invoked. Will Config ASI Registers to Defaults..\n",
+ __func__);
+ for (counter = 0; counter < MAX_ASI_COUNT; counter++) {
+ aic3262->asiCtxt[counter].asi_active = 0;
+ aic3262->asiCtxt[counter].bclk_div = 1;
+ aic3262->asiCtxt[counter].wclk_div = 1;
+ aic3262->asiCtxt[counter].port_muted = 1;
+ aic3262->asiCtxt[counter].bclk_div_option =
+ BDIV_CLKIN_DAC_MOD_CLK;
+ aic3262->asiCtxt[counter].offset1 = 0;
+ aic3262->asiCtxt[counter].offset2 = 0;
+ }
+ /* ASI1 Defaults */
+ aic3262->asiCtxt[0].bclk_output = ASI1_BCLK_DIVIDER_OUTPUT;
+ aic3262->asiCtxt[0].wclk_output = GENERATED_DAC_FS;
+ aic3262->asiCtxt[0].left_dac_output = DAC_PATH_LEFT;
+ aic3262->asiCtxt[0].right_dac_output = DAC_PATH_LEFT;
+ aic3262->asiCtxt[0].adc_input = ADC_PATH_MINIDSP_1;
+ aic3262->asiCtxt[0].dout_option = ASI_OUTPUT;
+
+ /* ASI2 Defaults */
+ aic3262->asiCtxt[1].bclk_output = ASI2_BCLK_DIVIDER_OUTPUT;
+ aic3262->asiCtxt[1].wclk_output = GENERATED_DAC_FS;
+ aic3262->asiCtxt[1].left_dac_output = DAC_PATH_LEFT;
+ aic3262->asiCtxt[1].right_dac_output = DAC_PATH_LEFT;
+ aic3262->asiCtxt[1].adc_input = ADC_PATH_MINIDSP_2;
+ aic3262->asiCtxt[1].dout_option = ASI_OUTPUT;
+
+ /* ASI3 Defaults */
+ aic3262->asiCtxt[2].bclk_output = ASI3_BCLK_DIVIDER_OUTPUT;
+ aic3262->asiCtxt[2].wclk_output = GENERATED_DAC_FS;
+ aic3262->asiCtxt[2].left_dac_output = DAC_PATH_LEFT;
+ aic3262->asiCtxt[2].right_dac_output = DAC_PATH_LEFT;
+ aic3262->asiCtxt[2].adc_input = ADC_PATH_MINIDSP_3;
+ aic3262->asiCtxt[2].dout_option = ASI2_INPUT;
+
+ return;
+}
+
+#endif /* #ifdef AIC3262_MULTI_I2S */
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_probe
+ * Purpose : This is first driver function called by the SoC core driver.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_probe(struct snd_soc_codec *codec)
+{
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ DBG(KERN_INFO "#%s: Invoked..\n", __func__);
+
+#if defined(EN_REG_CACHE)
+ codec->reg_cache =
+ kmemdup(aic3262_reg, sizeof(aic3262_reg), GFP_KERNEL);
+
+ if (!codec->reg_cache) {
+ printk(KERN_ERR "aic3262: kmemdup failed\n");
+ return -ENOMEM;
+ }
+#else
+ /* Setting cache bypass - not to overwrite the cache registers,
+ Codec registers have 4 pages which is not handled in the common
+ cache code properly - bypass it in write value and save it
+ using separate call*/
+ codec->cache_bypass = 1;
+#endif
+
+#if defined(LOCAL_REG_ACCESS)
+ codec->control_data = aic3262->control_data;
+ codec->hw_write = (hw_write_t) aic3262_write__;
+ codec->hw_read = aic3262_hw_read;
+#else
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+#endif
+ ret = reg_def_conf(codec);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to init TI codec: %d\n", ret);
+ return ret;
+ }
+
+ if (aic3262->irq) {
+ /* audio interrupt */
+ ret = request_threaded_irq(aic3262->irq, NULL,
+ aic3262_jack_handler,
+ IRQF_TRIGGER_FALLING,
+ "tlv320aic3262", codec);
+ if (ret) {
+ printk(KERN_INFO "#%s: IRQ Registration failed..[%d]",
+ __func__, ret);
+ dev_err(codec->dev, "Failed to request IRQ: %d\n", ret);
+ return ret;
+ } else
+ DBG(KERN_INFO
+ "#%s: irq Registration for IRQ %d done..\n",
+ __func__, aic3262->irq);
+ } else {
+ DBG(KERN_INFO "#%s: I2C IRQ Configuration is Wrong. \
+ Please check it..\n", __func__);
+ }
+
+ aic3262_asi_default_config(codec);
+
+ /* off, with power on */
+ aic3262_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ aic3262_add_controls(codec);
+ aic3262_add_widgets(codec);
+ /*TODO*/
+ aic3262_write(codec, MIC_BIAS_CNTL, 0x66);
+
+#ifdef CONFIG_MINI_DSP
+ /* Program MINI DSP for ADC and DAC */
+ aic3262_minidsp_program(codec);
+ aic3262_add_minidsp_controls(codec);
+ aic3262_change_book(codec, 0x0);
+#endif
+
+#ifdef MULTIBYTE_CONFIG_SUPPORT
+ aic3262_add_multiconfig_controls(codec);
+#endif
+
+ DBG(KERN_INFO "#%s: done..\n", __func__);
+ return ret;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_remove
+ * Purpose : to remove aic3262 soc device
+ *
+ *----------------------------------------------------------------------------
+ */
+static int aic3262_remove(struct snd_soc_codec *codec)
+{
+
+ /* power down chip */
+ aic3262_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+
+/*
+ *----------------------------------------------------------------------------
+ * @struct snd_soc_codec_device |
+ * This structure is soc audio codec device sturecute which pointer
+ * to basic functions aic3262_probe(), aic3262_remove(),
+ * aic3262_suspend() and aic3262_resume()
+ *----------------------------------------------------------------------------
+ */
+static struct snd_soc_codec_driver soc_codec_dev_aic3262 = {
+ .probe = aic3262_probe,
+ .remove = aic3262_remove,
+ .suspend = aic3262_suspend,
+ .resume = aic3262_resume,
+ .set_bias_level = aic3262_set_bias_level,
+#if defined(LOCAL_REG_ACCESS)
+ .read = aic3262_read,
+ .write = aic3262_write,
+#endif
+#if !defined(EN_REG_CACHE)
+ .reg_cache_size = ARRAY_SIZE(aic3262_reg),
+ .reg_word_size = sizeof(u8),
+ .reg_cache_default = aic3262_reg,
+#endif
+};
+
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_codec_probe
+ * Purpose : This function attaches the i2c client and initializes
+ * AIC3262 CODEC.
+ * NOTE:
+ * This function is called from i2c core when the I2C address is
+ * valid.
+ * If the i2c layer weren't so broken, we could pass this kind of
+ * data around
+ *
+ *----------------------------------------------------------------------------
+ */
+static __devinit int aic3262_codec_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ int ret;
+
+ struct aic3262_priv *aic3262;
+
+ DBG(KERN_INFO "#%s: Entered\n", __func__);
+
+ aic3262 = kzalloc(sizeof(struct aic3262_priv), GFP_KERNEL);
+
+ if (!aic3262) {
+ printk(KERN_ERR "#%s: Unable to Allocate Priv struct..\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(i2c, aic3262);
+#if defined(LOCAL_REG_ACCESS)
+ aic3262->control_data = i2c;
+#endif
+ aic3262->control_type = SND_SOC_I2C;
+ aic3262->irq = i2c->irq;
+ aic3262->pdata = i2c->dev.platform_data;
+
+ /* The Configuration Support will be by default to 3 which
+ * holds the MAIN Patch Configuration.
+ */
+ aic3262->current_dac_config[0] = -1;
+ aic3262->current_dac_config[1] = -1;
+ aic3262->current_adc_config[0] = -1;
+ aic3262->current_adc_config[1] = -1;
+
+ aic3262->mute_codec = 1;
+
+ aic3262->page_no = 0;
+ aic3262->book_no = 0;
+ aic3262->active_count = 0;
+ aic3262->dac_clkin_option = 3;
+ aic3262->adc_clkin_option = 3;
+
+ ret = snd_soc_register_codec(&i2c->dev,
+ &soc_codec_dev_aic3262,
+ tlv320aic3262_dai, ARRAY_SIZE(tlv320aic3262_dai));
+
+ if (ret < 0)
+ kfree(aic3262);
+ DBG(KERN_INFO "#%s: Done ret %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_i2c_remove
+ * Purpose : This function removes the i2c client and uninitializes
+ * AIC3262 CODEC.
+ * NOTE:
+ * This function is called from i2c core
+ * If the i2c layer weren't so broken, we could pass this kind of
+ * data around
+ *
+ *----------------------------------------------------------------------------
+ */
+static __devexit int aic3262_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_codec(&i2c->dev);
+ kfree(i2c_get_clientdata(i2c));
+ return 0;
+}
+
+static const struct i2c_device_id tlv320aic3262_id[] = {
+ {"tlv320aic3262", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tlv320aic3262_id);
+
+static struct i2c_driver tlv320aic3262_i2c_driver = {
+ .driver = {
+ .name = "aic3262-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = aic3262_codec_probe,
+ .remove = __devexit_p(aic3262_i2c_remove),
+ .id_table = tlv320aic3262_id,
+};
+#endif /*#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)*/
+
+static int __init tlv320aic3262_modinit(void)
+{
+ int ret = 0;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&tlv320aic3262_i2c_driver);
+ if (ret != 0)
+ printk(KERN_ERR "Failed to register aic326x i2c driver %d\n",
+ ret);
+#endif
+ return ret;
+
+}
+module_init(tlv320aic3262_modinit);
+
+static void __exit tlv320aic3262_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&tlv320aic3262_i2c_driver);
+#endif
+}
+module_exit(tlv320aic3262_exit);
+
+MODULE_DESCRIPTION("ASoC TLV320AIC3262 codec driver");
+MODULE_AUTHOR("Barani Prashanth<gvbarani@mistralsolutions.com>");
+MODULE_AUTHOR("Ravindra<ravindra@mistralsolutions.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic326x.h b/sound/soc/codecs/tlv320aic326x.h
new file mode 100644
index 000000000000..259ecb110787
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic326x.h
@@ -0,0 +1,638 @@
+/*
+ * linux/sound/soc/codecs/tlv320aic3262.h
+ *
+ *
+ * Copyright (C) 2011 Mistral Solutions Pvt Ltd.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * History:
+ * Rev 0.1 ASoC driver support Mistral 20-01-2011
+ *
+ * The AIC3262 ASoC driver is ported for the codec AIC3262.
+ *
+ */
+
+#ifndef _TLV320AIC3262_H
+#define _TLV320AIC3262_H
+
+#define AUDIO_NAME "aic3262"
+#define AIC3262_VERSION "1.1"
+
+/* Enable this macro allow for different ASI formats */
+/*#define ASI_MULTI_FMT*/
+#undef ASI_MULTI_FMT
+
+/* Enable register caching on write */
+#define EN_REG_CACHE 1
+
+#define MULTIBYTE_CONFIG_SUPPORT
+
+/*Setting all codec reg/write locally*/
+/* This definition is added as the snd_ direct call are
+result some issue with cache. Common code doesnot support
+page, so fix that before commenting this line*/
+#define LOCAL_REG_ACCESS 1
+
+/* Macro enables or disables support for miniDSP in the driver */
+/* Enable the AIC3262_TiLoad macro first before enabling these macros */
+#define CONFIG_MINI_DSP
+/*#undef CONFIG_MINI_DSP*/
+
+/* Enable or disable controls to have Input routing*/
+/*#define FULL_IN_CNTL */
+#undef FULL_IN_CNTL
+/* AIC3262 supported sample rate are 8k to 192k */
+#define AIC3262_RATES SNDRV_PCM_RATE_8000_192000
+
+/* AIC3262 supports the word formats 16bits, 20bits, 24bits and 32 bits */
+#define AIC3262_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
+ | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+#define AIC3262_FREQ_12000000 12000000
+#define AIC3262_FREQ_12288000 12288000
+#define AIC3262_FREQ_24000000 24000000
+
+/* Macro for enabling the Multi_I2S Support in Driver */
+#define AIC3262_MULTI_I2S 1
+
+/* Driver Debug Messages Enabled */
+/*#define DEBUG*/
+
+#ifdef DEBUG
+ #define DBG(x...) printk(x)
+#else
+ #define DBG(x...)
+#endif
+
+/*Select the below macro to decide on the DAC master volume controls.
+ *2 independent or one combined
+ */
+/*#define DAC_INDEPENDENT_VOL*/
+#undef DAC_INDEPENDENT_VOL
+
+/* Audio data word length = 16-bits (default setting) */
+#define AIC3262_WORD_LEN_16BITS 0x00
+#define AIC3262_WORD_LEN_20BITS 0x01
+#define AIC3262_WORD_LEN_24BITS 0x02
+#define AIC3262_WORD_LEN_32BITS 0x03
+
+/* sink: name of target widget */
+#define AIC3262_WIDGET_NAME 0
+/* control: mixer control name */
+#define AIC3262_CONTROL_NAME
+/* source: name of source name */
+#define AIC3262_SOURCE_NAME 2
+
+/* D15..D8 aic3262 register offset */
+#define AIC3262_REG_OFFSET_INDEX 0
+/* D7...D0 register data */
+#define AIC3262_REG_DATA_INDEX 1
+
+/* Serial data bus uses I2S mode (Default mode) */
+#define AIC3262_I2S_MODE 0x00
+#define AIC3262_DSP_MODE 0x01
+#define AIC3262_RIGHT_JUSTIFIED_MODE 0x02
+#define AIC3262_LEFT_JUSTIFIED_MODE 0x03
+
+/* 8 bit mask value */
+#define AIC3262_8BITS_MASK 0xFF
+
+/* shift value for CLK_REG_3 register */
+#define CLK_REG_3_SHIFT 6
+/* shift value for DAC_OSR_MSB register */
+#define DAC_OSR_MSB_SHIFT 4
+
+/* number of codec specific register for configuration */
+#define NO_FEATURE_REGS 2
+
+/* Total number of ASI Ports */
+#define MAX_ASI_COUNT 3
+
+/* AIC3262 register space */
+/* Updated from 256 to support Page 3 registers */
+#define AIC3262_CACHEREGNUM 1024
+#define BIT7 (0x01 << 7)
+#define BIT6 (0x01 << 6)
+#define BIT5 (0x01 << 5)
+#define BIT4 (0x01 << 4)
+#define BIT3 (0x01 << 3)
+#define BIT2 (0x01 << 2)
+#define BIT1 (0x01 << 1)
+#define BIT0 (0x01 << 0)
+
+#define DAC_FLAG_MIC_MASKBITS 0x30
+#define DAC_FLAG_HS_MASKBITS 0x03
+#define DAC_FLAG_R1_NOJACK 0
+#define DAC_FLAG_R1_NOMIC (0x1 << 4)
+#define DAC_FLAG_R1_MIC (0x3 << 4)
+#define DAC_FLAG_R1_NOHS 0
+#define DAC_FLAG_R1_MONOHS 1
+#define DAC_FLAG_R1_STEREOHS 2
+/* ****************** Book 0 Registers **************************************/
+
+/* ****************** Page 0 Registers **************************************/
+
+#define PAGE_SEL_REG 0
+#define RESET_REG 1
+#define DAC_ADC_CLKIN_REG 4
+#define PLL_CLKIN_REG 5
+#define PLL_CLK_RANGE_REG 5
+#define PLL_PR_POW_REG 6
+#define PLL_J_REG 7
+#define PLL_D_MSB 8
+#define PLL_D_LSB 9
+#define PLL_CKIN_DIV 10
+
+#define NDAC_DIV_POW_REG 11
+#define MDAC_DIV_POW_REG 12
+#define DOSR_MSB_REG 13
+#define DOSR_LSB_REG 14
+
+#define NADC_DIV_POW_REG 18
+#define MADC_DIV_POW_REG 19
+#define AOSR_REG 20
+#define CLKOUT_MUX 21
+#define CLKOUT_MDIV_VAL 22
+#define TIMER_REG 23
+
+#define LF_CLK_CNTL 24
+#define HF_CLK_CNTL_R1 25
+#define HF_CLK_CNTL_R2 26
+#define HF_CLK_CNTL_R3 27
+#define HF_CLK_CNTL_R4 28
+#define HF_CLK_TRIM_R1 29
+#define HF_CLK_TRIM_R2 30
+#define HF_CLK_TRIM_R3 31
+#define HF_CLK_TRIM_R4 32
+#define DAC_FLAG_R1 37
+#define DAC_FLAG_R2 38
+
+#define STICKY_FLAG1 42
+#define INT_FLAG1 43
+#define STICKY_FLAG2 44
+#define INT_FLAG2 46
+#define INT1_CNTL 48
+#define INT2_CNTL 49
+#define INT_FMT 51
+
+#define DAC_PRB 60
+#define ADC_PRB 61
+#define PASI_DAC_DP_SETUP 63
+#define DAC_MVOL_CONF 64
+#define DAC_LVOL 65
+#define DAC_RVOL 66
+#define HP_DETECT 67
+#define DRC_CNTL_R1 68
+#define DRC_CNTL_R2 69
+#define DRC_CNTL_R3 70
+#define BEEP_CNTL_R1 71
+#define BEEP_CNTL_R2 72
+
+#define ADC_CHANNEL_POW 81
+#define ADC_FINE_GAIN 82
+#define LADC_VOL 83
+#define RADC_VOL 84
+#define ADC_PHASE 85
+
+#define LAGC_CNTL 86
+#define LAGC_CNTL_R2 87
+#define LAGC_CNTL_R3 88
+#define LAGC_CNTL_R4 89
+#define LAGC_CNTL_R5 90
+#define LAGC_CNTL_R6 91
+#define LAGC_CNTL_R7 92
+#define LAGC_CNTL_R8 93
+
+#define RAGC_CNTL 94
+#define RAGC_CNTL_R2 95
+#define RAGC_CNTL_R3 96
+#define RAGC_CNTL_R4 97
+#define RAGC_CNTL_R5 98
+#define RAGC_CNTL_R6 99
+#define RAGC_CNTL_R7 100
+#define RAGC_CNTL_R8 101
+#define MINIDSP_ACCESS_CTRL 121
+/* ****************** Page 1 Registers **************************************/
+#define PAGE_1 128
+
+#define POWER_CONF (PAGE_1 + 1)
+#define LDAC_PTM (PAGE_1 + 3)
+#define RDAC_PTM (PAGE_1 + 4)
+#define CM_REG (PAGE_1 + 8)
+#define HP_CTL (PAGE_1 + 9)
+#define HP_DEPOP (PAGE_1 + 11)
+#define RECV_DEPOP (PAGE_1 + 12)
+#define MA_CNTL (PAGE_1 + 17)
+#define LADC_PGA_MAL_VOL (PAGE_1 + 18)
+#define RADC_PGA_MAR_VOL (PAGE_1 + 19)
+
+
+#define LINE_AMP_CNTL_R1 (PAGE_1 + 22)
+#define LINE_AMP_CNTL_R2 (PAGE_1 + 23)
+
+#define HP_AMP_CNTL_R1 (PAGE_1 + 27)
+#define HP_AMP_CNTL_R2 (PAGE_1 + 28)
+#define HP_AMP_CNTL_R3 (PAGE_1 + 29)
+
+#define HPL_VOL (PAGE_1 + 31)
+#define HPR_VOL (PAGE_1 + 32)
+#define INT1_SEL_L (PAGE_1 + 34)
+#define RAMP_CNTL_R1 (PAGE_1 + 36)
+#define RAMP_CNTL_R2 (PAGE_1 + 37)
+#define INT1_SEL_RM (PAGE_1 + 39)
+#define REC_AMP_CNTL_R5 (PAGE_1 + 40)
+#define RAMPR_VOL (PAGE_1 + 41)
+#define RAMP_TIME_CNTL (PAGE_1 + 42)
+#define SPK_AMP_CNTL_R1 (PAGE_1 + 45)
+#define SPK_AMP_CNTL_R2 (PAGE_1 + 46)
+#define SPK_AMP_CNTL_R3 (PAGE_1 + 47)
+#define SPK_AMP_CNTL_R4 (PAGE_1 + 48)
+#define MIC_BIAS_CNTL (PAGE_1 + 51)
+
+#define LMIC_PGA_PIN (PAGE_1 + 52)
+#define LMIC_PGA_PM_IN4 (PAGE_1 + 53)
+#define LMIC_PGA_MIN (PAGE_1 + 54)
+#define RMIC_PGA_PIN (PAGE_1 + 55)
+#define RMIC_PGA_PM_IN4 (PAGE_1 + 56)
+#define RMIC_PGA_MIN (PAGE_1 + 57)
+/* MIC PGA Gain Registers */
+#define MICL_PGA (PAGE_1 + 59)
+#define MICR_PGA (PAGE_1 + 60)
+#define HEADSET_TUNING1_REG (PAGE_1 + 119)
+#define HEADSET_TUNING2_REG (PAGE_1 + 120)
+#define MIC_PWR_DLY (PAGE_1 + 121)
+#define REF_PWR_DLY (PAGE_1 + 122)
+
+/* ****************** Page 4 Registers **************************************/
+#define PAGE_4 512
+#define ASI1_BUS_FMT (PAGE_4 + 1)
+#define ASI1_LCH_OFFSET (PAGE_4 + 2)
+#define ASI1_RCH_OFFSET (PAGE_4 + 3)
+#define ASI1_CHNL_SETUP (PAGE_4 + 4)
+#define ASI1_MULTI_CH_SETUP_R1 (PAGE_4 + 5)
+#define ASI1_MULTI_CH_SETUP_R2 (PAGE_4 + 6)
+#define ASI1_ADC_INPUT_CNTL (PAGE_4 + 7)
+#define ASI1_DAC_OUT_CNTL (PAGE_4 + 8)
+#define ASI1_ADC_OUT_TRISTATE (PAGE_4 + 9)
+#define ASI1_BWCLK_CNTL_REG (PAGE_4 + 10)
+#define ASI1_BCLK_N_CNTL (PAGE_4 + 11)
+#define ASI1_BCLK_N (PAGE_4 + 12)
+#define ASI1_WCLK_N (PAGE_4 + 13)
+#define ASI1_BWCLK_OUT_CNTL (PAGE_4 + 14)
+#define ASI1_DATA_OUT (PAGE_4 + 15)
+#define ASI2_BUS_FMT (PAGE_4 + 17)
+#define ASI2_LCH_OFFSET (PAGE_4 + 18)
+#define ASI2_RCH_OFFSET (PAGE_4 + 19)
+#define ASI2_ADC_INPUT_CNTL (PAGE_4 + 23)
+#define ASI2_DAC_OUT_CNTL (PAGE_4 + 24)
+#define ASI2_BWCLK_CNTL_REG (PAGE_4 + 26)
+#define ASI2_BCLK_N_CNTL (PAGE_4 + 27)
+#define ASI2_BCLK_N (PAGE_4 + 28)
+#define ASI2_WCLK_N (PAGE_4 + 29)
+#define ASI2_BWCLK_OUT_CNTL (PAGE_4 + 30)
+#define ASI2_DATA_OUT (PAGE_4 + 31)
+#define ASI3_BUS_FMT (PAGE_4 + 33)
+#define ASI3_LCH_OFFSET (PAGE_4 + 34)
+#define ASI3_RCH_OFFSET (PAGE_4 + 35)
+#define ASI3_ADC_INPUT_CNTL (PAGE_4 + 39)
+#define ASI3_DAC_OUT_CNTL (PAGE_4 + 40)
+#define ASI3_BWCLK_CNTL_REG (PAGE_4 + 42)
+#define ASI3_BCLK_N (PAGE_4 + 44)
+#define ASI3_WCLK_N (PAGE_4 + 45)
+#define ASI3_BWCLK_OUT_CNTL (PAGE_4 + 46)
+#define ASI3_DATA_OUT (PAGE_4 + 47)
+#define WCLK1_PIN_CNTL_REG (PAGE_4 + 65)
+#define DOUT1_PIN_CNTL_REG (PAGE_4 + 67)
+#define DIN1_PIN_CNTL_REG (PAGE_4 + 68)
+#define WCLK2_PIN_CNTL_REG (PAGE_4 + 69)
+#define BCLK2_PIN_CNTL_REG (PAGE_4 + 70)
+#define DOUT2_PIN_CNTL_REG (PAGE_4 + 71)
+#define DIN2_PIN_CNTL_REG (PAGE_4 + 72)
+#define WCLK3_PIN_CNTL_REG (PAGE_4 + 73)
+#define BCLK3_PIN_CNTL_REG (PAGE_4 + 74)
+#define DOUT3_PIN_CNTL_REG (PAGE_4 + 75)
+#define DIN3_PIN_CNTL_REG (PAGE_4 + 76)
+#define MCLK2_PIN_CNTL_REG (PAGE_4 + 82)
+#define GPIO1_IO_CNTL (PAGE_4 + 86)
+#define GPIO2_IO_CNTL (PAGE_4 + 87)
+#define GPI1_EN (PAGE_4 + 91)
+#define GPO2_EN (PAGE_4 + 92)
+#define GPO1_PIN_CNTL (PAGE_4 + 96)
+#define MINIDSP_PORT_CNTL_REG (PAGE_4 + 118)
+
+/****************************************************************************
+* Mixer control related #defines
+***************************************************************************
+*/
+#define WCLK1_ENUM 0
+#define DOUT1_ENUM 1
+#define DIN1_ENUM 2
+#define WCLK2_ENUM 3
+#define BCLK2_ENUM 4
+#define DOUT2_ENUM 5
+#define DIN2_ENUM 6
+#define WCLK3_ENUM 7
+#define BCLK3_ENUM 8
+#define DOUT3_ENUM 9
+#define DIN3_ENUM 10
+#define CLKIN_ENUM 11
+/*
+*****************************************************************************
+* Enumeration Definitions
+*****************************************************************************
+*/
+/* The below enumeration lists down all the possible inputs to the
+* the PLL of the AIC3262. The Private structure will hold a member
+* of this Enumeration Type.
+*/
+enum AIC3262_PLL_OPTION {
+ PLL_CLKIN_MCLK1 = 0, /* 0000: (Device Pin) */
+ PLL_CLKIN_BLKC1, /* 0001: (Device Pin) */
+ PLL_CLKIN_GPIO1, /* 0010: (Device Pin)*/
+ PLL_CLKIN_DIN1, /* 0011: (Device Pin)*/
+ PLL_CLKIN_BCLK2, /* 0100: (Device Pin)*/
+ PLL_CLKIN_GPI1, /* 0101: (Device Pin)*/
+ PLL_CLKIN_HF_REF_CLK, /* 0110: (Device Pin)*/
+ PLL_CLKIN_GPIO2, /* 0111: (Device Pin)*/
+ PLL_CLKIN_GPI2, /* 1000: (Device Pin)*/
+ PLL_CLKIN_MCLK2 /* 1001: (Device Pin)*/
+};
+
+/* ASI Specific Bit Clock Divider Input Options.
+* Please refer to Page 4 Reg 11, Reg 27 and Reg 43
+*/
+enum ASI_BDIV_CLKIN_OPTION {
+ BDIV_CLKIN_DAC_CLK = 0, /* 00 DAC_CLK */
+ BDIV_CLKIN_DAC_MOD_CLK, /* 01 DAC_MOD_CLK */
+ BDIV_CLKIN_ADC_CLK, /* 02 ADC_CLK */
+ BDIV_CLKIN_ADC_MOD_CLK /* 03 ADC_MOD_CLK */
+};
+
+/* ASI Specific Bit Clock Output Mux Options.
+* Please refer to Page 4 Reg 14, Reg 30 and Reg 46
+* Please note that we are not handling the Reserved
+* cases here.
+*/
+enum ASI_BCLK_OPTION {
+ ASI1_BCLK_DIVIDER_OUTPUT = 0, /* 00 ASI1 Bit Clock Divider Output */
+ ASI1_BCLK_INPUT, /* 01 ASI1 Bit Clock Input */
+ ASI2_BCLK_DIVIDER_OUTPUT, /* 02 ASI2 Bit Clock Divider Output */
+ ASI2_BCLK_INPUT, /* 03 ASI2 Bit Clock Input */
+ ASI3_BCLK_DIVIDER_OUTPUT, /* 04 ASI3 Bit Clock Divider Output */
+ ASI3_BBCLK_INPUT /* 05 ASi3 Bit Clock Input */
+};
+
+/* Above bits are to be configured after Shifting 4 bits */
+#define AIC3262_ASI_BCLK_MUX_SHIFT 4
+#define AIC3262_ASI_BCLK_MUX_MASK (BIT6 | BIT5 | BIT4)
+#define AIC3262_ASI_WCLK_MUX_MASK (BIT2 | BIT1 | BIT0)
+
+/* ASI Specific Word Clock Output Mux Options */
+enum ASI_WCLK_OPTION {
+ GENERATED_DAC_FS = 0, /* 00 WCLK = DAC_FS */
+ GENERATED_ADC_FS = 1, /* 01 WCLK = ADC_FS */
+ ASI1_WCLK_DIV_OUTPUT = 2, /* 02 WCLK = ASI1 WCLK_DIV_OUT */
+ ASI1_WCLK_INPUT = 3, /* 03 WCLK = ASI1 WCLK Input */
+ ASI2_WCLK_DIV_OUTPUT = 4, /* 04 WCLK = ASI2 WCLK_DIV_OUT */
+ ASI2_WCLK_INPUT = 5, /* 05 WCLK = ASI2 WCLK Input */
+ ASI3_WCLK_DIV_OUTPUT = 6, /* 06 WCLK = ASI3 WCLK_DIV_OUT */
+ ASI3_WCLK_INPUT = 7 /* 07 WCLK = ASI3 WCLK Input */
+};
+
+/* ASI DAC Output Control Options */
+enum ASI_DAC_OUTPUT_OPTION {
+ DAC_PATH_OFF = 0, /* 00 DAC Datapath Off */
+ DAC_PATH_LEFT, /* 01 DAC Datapath left Data */
+ DAC_PATH_RIGHT, /* 02 DAC Datapath Right Data */
+};
+
+/* Shift the above options by so many bits */
+#define AIC3262_ASI_LDAC_PATH_SHIFT 6
+#define AIC3262_ASI_LDAC_PATH_MASK (BIT5 | BIT4)
+#define AIC3262_ASI_RDAC_PATH_SHIFT 4
+#define AIC3262_ASI_RDAC_PATH_MASK (BIT7 | BIT6)
+
+/* ASI specific ADC Input Control Options */
+enum ASI_ADC_INPUT_OPTION {
+ ADC_PATH_OFF = 0, /* 00 ASI Digital Output Disabled */
+ ADC_PATH_MINIDSP_1, /* 01 ASI Digital O/P from miniDSP_A(L1,R1) */
+ ADC_PATH_ASI1, /* 02 ASI Digital Output from ASI1 */
+ ADC_PATH_ASI2, /* 03 ASI Digital Output from ASI2 */
+ ADC_PATH_ASI3, /* 04 ASI Digital Output from ASI3 */
+ ADC_PATH_MINIDSP_2, /* 05 ASI Digital O/P from miniDSP_A(L2,R2) */
+ ADC_PATH_MINIDSP_3 /* 05 ASI Digital O/P from miniDSP_A(L3,R3) */
+};
+
+/* ASI Specific DOUT Pin Options */
+enum ASI_DOUT_OPTION {
+ ASI_OUTPUT = 0, /* 00 Default ASI Output */
+ ASI1_INPUT, /* 01 ASI1 Data Input */
+ ASI2_INPUT, /* 02 ASI2 Data Input */
+ ASI3_INPUT /* 03 ASI3 Data Input */
+};
+
+#define AIC3262_ASI_DOUT_MASK (BIT1 | BIT0)
+
+/*
+ *****************************************************************************
+ * Structures Definitions
+ *****************************************************************************
+ */
+#define AIC3262_MULTI_ASI_ACTIVE(x) (((x)->asiCtxt[0].asi_active) || \
+ ((x)->asiCtxt[1].asi_active) || \
+ ((x)->asiCtxt[2].asi_active))
+
+/*
+*----------------------------------------------------------------------------
+* @struct aic3262_setup_data |
+* i2c specific data setup for AIC3262.
+* @field unsigned short |i2c_address |
+* Unsigned short for i2c address.
+*----------------------------------------------------------------------------
+*/
+ struct aic3262_setup_data {
+ unsigned short i2c_address;
+};
+
+/*
+*----------------------------------------------------------------------------
+* @struct aic3262_asi_data
+* ASI specific data stored for each ASI Interface
+*
+*
+*---------------------------------------------------------------------------
+*/
+struct aic3262_asi_data {
+ u8 asi_active; /* ASI Active Flag */
+ u8 master; /* Frame Master */
+ u32 sampling_rate; /* Sampling Rate */
+ enum ASI_BDIV_CLKIN_OPTION bclk_div_option; /* BCLK DIV Mux Option*/
+ enum ASI_BCLK_OPTION bclk_output; /* BCLK Output Option*/
+ enum ASI_WCLK_OPTION wclk_output; /* WCLK Output Option*/
+ u8 bclk_div; /* BCLK Divider */
+ u8 wclk_div; /* WCLK Divider */
+ enum ASI_DAC_OUTPUT_OPTION left_dac_output; /* LDAC Path */
+ enum ASI_DAC_OUTPUT_OPTION right_dac_output; /* RDAC Path */
+ enum ASI_ADC_INPUT_OPTION adc_input; /* ADC Input Control */
+ enum ASI_DOUT_OPTION dout_option; /* DOUT Option */
+ u8 playback_mode; /* Playback Selected */
+ u8 capture_mode; /* Record Selected */
+ u8 port_muted; /* ASI Muted */
+ u8 pcm_format; /* PCM Format */
+ u8 word_len; /* Word Length */
+ u8 offset1; /* Left Ch offset */
+ u8 offset2; /* Right Ch Offset */
+};
+
+/*
+*----------------------------------------------------------------------------
+* @struct aic3262_priv |
+* AIC3262 priviate data structure to set the system clock, mode and
+* page number.
+* @field u32 | sysclk |
+* system clock
+* @field s32 | master |
+* master/slave mode setting for AIC3262
+* @field u8 | book_no |
+* book number.
+* @field u8 | page_no |
+* page number. Here, page 0 and page 1 are used.
+*----------------------------------------------------------------------------
+*/
+struct aic3262_priv {
+ enum snd_soc_control_type control_type;
+ struct aic326x_pdata *pdata;
+ u32 sysclk;
+ s32 master;
+ u8 book_no;
+ u8 page_no;
+ u8 process_flow;
+ u8 mute_codec;
+ u8 stream_status;
+ u32 active_count;
+ int current_dac_config[MAX_ASI_COUNT];
+ int current_adc_config[MAX_ASI_COUNT];
+ struct aic3262_asi_data asiCtxt[MAX_ASI_COUNT];
+ enum AIC3262_PLL_OPTION aic3262_pllclkin_option;
+ u8 dac_clkin_option;
+ u8 adc_clkin_option;
+ int irq;
+ struct snd_soc_jack *headset_jack;
+#if defined(LOCAL_REG_ACCESS)
+ void *control_data;
+#endif
+};
+
+/*
+ *----------------------------------------------------------------------------
+ * @struct aic3262_configs |
+ * AIC3262 initialization data which has register offset and register
+ * value.
+ * @field u8 | book_no |
+ * AIC3262 Book Number Offsets required for initialization..
+ * @field u16 | reg_offset |
+ * AIC3262 Register offsets required for initialization..
+ * @field u8 | reg_val |
+ * value to set the AIC3262 register to initialize the AIC3262.
+ *----------------------------------------------------------------------------
+ */
+struct aic3262_configs {
+ u8 book_no;
+ u16 reg_offset;
+ u8 reg_val;
+};
+
+/*
+ *----------------------------------------------------------------------------
+ * @struct aic3262_rate_divs |
+ * Setting up the values to get different freqencies
+ * @field u32 | mclk |
+ * Master clock
+ * @field u32 | rate |
+ * sample rate
+ * @field u8 | p_val |
+ * value of p in PLL
+ * @field u32 | pll_j |
+ * value for pll_j
+ * @field u32 | pll_d |
+ * value for pll_d
+ * @field u32 | dosr |
+ * value to store dosr
+ * @field u32 | ndac |
+ * value for ndac
+ * @field u32 | mdac |
+ * value for mdac
+ * @field u32 | aosr |
+ * value for aosr
+ * @field u32 | nadc |
+ * value for nadc
+ * @field u32 | madc |
+ * value for madc
+ * @field u32 | blck_N |
+ * value for block N
+ * @field u32 | aic3262_configs |
+ * configurations for aic3262 register value
+ *----------------------------------------------------------------------------
+ */
+struct aic3262_rate_divs {
+ u32 mclk;
+ u32 rate;
+ u8 p_val;
+ u8 pll_j;
+ u16 pll_d;
+ u16 dosr;
+ u8 ndac;
+ u8 mdac;
+ u8 aosr;
+ u8 nadc;
+ u8 madc;
+ u8 blck_N;
+ struct aic3262_configs codec_specific_regs[NO_FEATURE_REGS];
+};
+
+/*
+*****************************************************************************
+* EXTERN DECLARATIONS
+*****************************************************************************
+*/
+/*
+ *----------------------------------------------------------------------------
+ * @func aic326x_headset_detect
+ * This function help to setup the needed registers to
+ * enable the headset detection
+ *
+ */
+extern int aic326x_headset_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, int jack_type);
+
+
+extern u8 aic3262_read(struct snd_soc_codec *codec, u16 reg);
+extern u16 aic3262_read_2byte(struct snd_soc_codec *codec, u16 reg);
+extern int aic3262_reset_cache(struct snd_soc_codec *codec);
+extern int aic3262_change_page(struct snd_soc_codec *codec, u8 new_page);
+extern int aic3262_write(struct snd_soc_codec *codec, u16 reg, u8 value);
+extern void aic3262_write_reg_cache(struct snd_soc_codec *codec,
+ u16 reg, u8 value);
+extern int aic3262_change_book(struct snd_soc_codec *codec, u8 new_book);
+extern int reg_def_conf(struct snd_soc_codec *codec);
+extern int i2c_verify_book0(struct snd_soc_codec *codec);
+
+#ifdef CONFIG_MINI_DSP
+extern int aic3262_minidsp_program(struct snd_soc_codec *codec);
+extern int aic3262_add_minidsp_controls(struct snd_soc_codec *codec);
+#endif
+
+
+#ifdef MULTIBYTE_CONFIG_SUPPORT
+extern int aic3262_add_multiconfig_controls(struct snd_soc_codec *codec);
+#endif
+
+#endif /* _TLV320AIC3262_H */
+
diff --git a/sound/soc/codecs/tlv320aic326x_mini-dsp.c b/sound/soc/codecs/tlv320aic326x_mini-dsp.c
new file mode 100644
index 000000000000..4d9c4de7e59a
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic326x_mini-dsp.c
@@ -0,0 +1,1587 @@
+/*
+ * linux/sound/soc/codecs/tlv320aic326x_mini-dsp.c
+ *
+ * Copyright (C) 2011 Mistral Solutions Pvt Ltd.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * The TLV320AIC3262 is a flexible, low-power, low-voltage stereo audio
+ * codec with digital microphone inputs and programmable outputs.
+ *
+ * History:
+ *
+ * Rev 0.1 Added the miniDSP Support Mistral 01-03-2011
+ *
+ * Rev 0.2 Updated the code-base for miniDSP switching and
+ * mux control update. Mistral 21-03-2011
+ *
+ * Rev 0.3 Updated the code-base to support Multi-Configuration feature
+ * of PPS GDE
+ */
+
+/*
+ *****************************************************************************
+ * INCLUDES
+ *****************************************************************************
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <sound/soc.h>
+#include <sound/core.h>
+#include <sound/soc-dapm.h>
+#include <sound/control.h>
+#include <linux/time.h> /* For timing computations */
+#include "tlv320aic326x.h"
+#include "tlv320aic326x_mini-dsp.h"
+
+#include "first_rate_pps_driver.h"
+#include "second_rate_pps_driver.h"
+
+#ifdef CONFIG_MINI_DSP
+
+#ifdef REG_DUMP_MINIDSP
+static void aic3262_dump_page(struct i2c_client *i2c, u8 page);
+#endif
+
+/*
+ *****************************************************************************
+ * LOCAL STATIC DECLARATIONS
+ *****************************************************************************
+ */
+static int m_control_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int m_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+static int m_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+/*
+ *****************************************************************************
+ * MINIDSP RELATED GLOBALS
+ *****************************************************************************
+ */
+/* The below variable is used to maintain the I2C Transactions
+ * to be carried out during miniDSP switching.
+ */
+minidsp_parser_data dsp_parse_data[MINIDSP_PARSER_ARRAY_SIZE*2];
+
+struct i2c_msg i2c_transaction[MINIDSP_PARSER_ARRAY_SIZE * 2];
+/* Total count of I2C Messages are stored in the i2c_count */
+int i2c_count;
+
+/* The below array is used to store the burst array for I2C Multibyte
+ * Operations
+ */
+minidsp_i2c_page i2c_page_array[MINIDSP_PARSER_ARRAY_SIZE];
+int i2c_page_count;
+
+/* kcontrol structure used to register with ALSA Core layer */
+static struct snd_kcontrol_new snd_mux_controls[MAX_MUX_CONTROLS];
+
+/* mode variables */
+static int amode;
+static int dmode;
+
+/* k-control macros used for miniDSP related Kcontrols */
+#define SOC_SINGLE_VALUE_M(xmax, xinvert) \
+ ((unsigned long)&(struct soc_mixer_control) \
+ {.max = xmax, \
+ .invert = xinvert})
+#define SOC_SINGLE_M(xname, max, invert) \
+{\
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = m_control_info, .get = m_control_get,\
+ .put = m_control_put, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .private_value = SOC_SINGLE_VALUE_M(max, invert) }
+#define SOC_SINGLE_AIC3262_M(xname) \
+{\
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = m_control_info, .get = m_control_get,\
+ .put = m_control_put, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+}
+
+/*
+ * aic3262_minidsp_controls
+ *
+ * Contains the list of the Kcontrol macros required for modifying the
+ * miniDSP behavior at run-time.
+ */
+static const struct snd_kcontrol_new aic3262_minidsp_controls[] = {
+ SOC_SINGLE_AIC3262_M("Minidsp mode") ,
+ SOC_SINGLE_AIC3262_M("ADC Adaptive mode Enable") ,
+ SOC_SINGLE_AIC3262_M("DAC Adaptive mode Enable") ,
+ SOC_SINGLE_AIC3262_M("Dump Regs Book0") ,
+ SOC_SINGLE_AIC3262_M("Verify minidsp program") ,
+};
+
+#ifdef REG_DUMP_MINIDSP
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_dump_page
+ * Purpose : Read and display one codec register page, for debugging purpose
+ *----------------------------------------------------------------------------
+ */
+static void aic3262_dump_page(struct i2c_client *i2c, u8 page)
+{
+ int i;
+ u8 data;
+ u8 test_page_array[256];
+
+ aic3262_change_page(codec, page);
+
+ data = 0x0;
+
+ i2c_master_send(i2c, data, 1);
+ i2c_master_recv(i2c, test_page_array, 128);
+
+ DBG("\n------- MINI_DSP PAGE %d DUMP --------\n", page);
+ for (i = 0; i < 128; i++)
+ DBG(KERN_INFO " [ %d ] = 0x%x\n", i, test_page_array[i]);
+
+}
+#endif
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : update_kcontrols
+ * Purpose : Given the miniDSP process flow, this function reads the
+ * corresponding Page Numbers and then performs I2C Read for those
+ * Pages.
+ *----------------------------------------------------------------------------
+ */
+void update_kcontrols(struct snd_soc_codec *codec, int process_flow)
+{
+ int i, val1, array_size;
+ char **knames;
+ control *cntl;
+
+ if (process_flow == 1) {
+ knames = Second_Rate_MUX_control_names;
+ cntl = Second_Rate_MUX_controls;
+ array_size = ARRAY_SIZE(Second_Rate_MUX_controls);
+ } else {
+ knames = main44_MUX_control_names;
+ cntl = main44_MUX_controls;
+ array_size = ARRAY_SIZE(main44_MUX_controls);
+ }
+
+ DBG(KERN_INFO "%s: ARRAY_SIZE = %d\tmode=%d\n", __func__,
+ array_size, process_flow);
+ for (i = 0; i < array_size; i++) {
+ aic3262_change_book(codec, cntl[i].control_book);
+ aic3262_change_page(codec, cntl[i].control_page);
+ val1 = i2c_smbus_read_byte_data(codec->control_data,
+ cntl[i].control_base);
+ snd_mux_controls[i].private_value = 0;
+ }
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : byte_i2c_array_transfer
+ * Purpose : Function used only for debugging purpose. This function will
+ * be used while switching miniDSP Modes register by register.
+ * This needs to be used only during development.
+ *-----------------------------------------------------------------------------
+ */
+int byte_i2c_array_transfer(struct snd_soc_codec *codec,
+ reg_value *program_ptr,
+ int size)
+{
+ int j;
+ u8 buf[3];
+
+ for (j = 0; j < size; j++) {
+ /* Check if current Reg offset is zero */
+ if (program_ptr[j].reg_off == 0) {
+ /* Check for the Book Change Request */
+ if ((j < (size - 1)) &&
+ (program_ptr[j+1].reg_off == 127)) {
+ aic3262_change_book(codec,
+ program_ptr[j+1].reg_val);
+ /* Increment for loop counter across Book Change */
+ j++;
+ continue;
+ }
+ /* Check for the Page Change Request in Current book */
+ aic3262_change_page(codec, program_ptr[j].reg_val);
+ continue;
+ }
+
+ buf[AIC3262_REG_OFFSET_INDEX] = program_ptr[j].reg_off % 128;
+ buf[AIC3262_REG_DATA_INDEX] =
+ program_ptr[j].reg_val & AIC3262_8BITS_MASK;
+
+ if (codec->hw_write(codec->control_data, buf, 2) != 2) {
+ printk(KERN_ERR "Error in i2c write\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : byte_i2c_array_read
+ * Purpose : This function is used to perform Byte I2C Read. This is used
+ * only for debugging purposes to read back the Codec Page
+ * Registers after miniDSP Configuration.
+ *----------------------------------------------------------------------------
+ */
+int byte_i2c_array_read(struct snd_soc_codec *codec,
+ reg_value *program_ptr, int size)
+{
+ int j;
+ u8 val1;
+ u8 cur_page = 0;
+ u8 cur_book = 0;
+ for (j = 0; j < size; j++) {
+ /* Check if current Reg offset is zero */
+ if (program_ptr[j].reg_off == 0) {
+ /* Check for the Book Change Request */
+ if ((j < (size - 1)) &&
+ (program_ptr[j+1].reg_off == 127)) {
+ aic3262_change_book(codec,
+ program_ptr[j+1].reg_val);
+ cur_book = program_ptr[j+1].reg_val;
+ /* Increment for loop counter across Book Change */
+ j++;
+ continue;
+ }
+ /* Check for the Page Change Request in Current book */
+ aic3262_change_page(codec, program_ptr[j].reg_val);
+ cur_page = program_ptr[j].reg_val;
+ continue;
+ }
+
+ val1 = i2c_smbus_read_byte_data(codec->control_data,
+ program_ptr[j].reg_off);
+ if (val1 < 0)
+ printk(KERN_ERR "Error in smbus read\n");
+
+ DBG(KERN_INFO "[%d][%d][%d]= %x\n",
+ cur_book, cur_page, program_ptr[j].reg_off, val1);
+ }
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : minidsp_get_burst
+ * Purpose : Format one I2C burst for transfer from mini dsp program array.
+ * This function will parse the program array and get next burst
+ * data for doing an I2C bulk transfer.
+ *----------------------------------------------------------------------------
+ */
+static void
+minidsp_get_burst(reg_value *program_ptr,
+ int program_size,
+ minidsp_parser_data *parse_data)
+{
+ int index = parse_data->current_loc;
+ int burst_write_count = 0;
+
+ /*DBG("GET_BURST: start\n");*/
+ /* check if first location is page register, and populate page addr */
+ if (program_ptr[index].reg_off == 0) {
+ if ((index < (program_size - 1)) &&
+ (program_ptr[index+1].reg_off == 127)) {
+ parse_data->book_change = 1;
+ parse_data->book_no = program_ptr[index+1].reg_val;
+ index += 2;
+ goto finish_out;
+
+ }
+ parse_data->page_num = program_ptr[index].reg_val;
+ parse_data->burst_array[burst_write_count++] =
+ program_ptr[index].reg_off;
+ parse_data->burst_array[burst_write_count++] =
+ program_ptr[index].reg_val;
+ index++;
+ goto finish_out;
+ }
+
+ parse_data->burst_array[burst_write_count++] =
+ program_ptr[index].reg_off;
+ parse_data->burst_array[burst_write_count++] =
+ program_ptr[index].reg_val;
+ index++;
+
+ for (; index < program_size; index++) {
+ if (program_ptr[index].reg_off !=
+ (program_ptr[index - 1].reg_off + 1))
+ break;
+ else
+ parse_data->burst_array[burst_write_count++] =
+ program_ptr[index].reg_val;
+
+ }
+finish_out:
+ parse_data->burst_size = burst_write_count;
+ if (index == program_size)
+ /* parsing completed */
+ parse_data->current_loc = MINIDSP_PARSING_END;
+ else
+ parse_data->current_loc = index;
+ /*DBG("GET_BURST: end\n");*/
+}
+/*
+ *----------------------------------------------------------------------------
+ * Function : minidsp_i2c_multibyte_transfer
+ * Purpose : Function used to perform multi-byte I2C Writes. Used to configure
+ * the miniDSP Pages.
+ *----------------------------------------------------------------------------
+ */
+int
+minidsp_i2c_multibyte_transfer(struct snd_soc_codec *codec,
+ reg_value *program_ptr,
+ int program_size)
+{
+ struct i2c_client *client = codec->control_data;
+
+ minidsp_parser_data parse_data;
+ int count = 0;
+
+#ifdef DEBUG_MINIDSP_LOADING
+ int i = 0, j = 0;
+#endif
+ /* point the current location to start of program array */
+ parse_data.current_loc = 0;
+ parse_data.page_num = 0;
+ parse_data.book_change = 0;
+ parse_data.book_no = 0;
+
+ DBG(KERN_INFO "size is : %d", program_size);
+ do {
+ do {
+ /* Get first burst data */
+ minidsp_get_burst(program_ptr, program_size,
+ &parse_data);
+ if (parse_data.book_change == 1)
+ break;
+ dsp_parse_data[count] = parse_data;
+
+ i2c_transaction[count].addr = client->addr;
+ i2c_transaction[count].flags =
+ client->flags & I2C_M_TEN;
+ i2c_transaction[count].len =
+ dsp_parse_data[count].burst_size;
+ i2c_transaction[count].buf =
+ dsp_parse_data[count].burst_array;
+
+#ifdef DEBUG_MINIDSP_LOADING
+ DBG(KERN_INFO
+ "i: %d\taddr: %d\tflags: %d\tlen: %d\tbuf:",
+ i, client->addr, client->flags & I2C_M_TEN,
+ dsp_parse_data[count].burst_size);
+
+ for (j = 0; j <= dsp_parse_data[count].burst_size; j++)
+ DBG(KERN_INFO "%x ",
+ dsp_parse_data[i].burst_array[j]);
+
+ DBG(KERN_INFO "\n\n");
+ i++;
+#endif
+
+ count++;
+ /* Proceed to the next burst reg_addr_incruence */
+ } while (parse_data.current_loc != MINIDSP_PARSING_END);
+
+ if (count > 0) {
+ if (i2c_transfer(client->adapter,
+ i2c_transaction, count) != count) {
+ printk(KERN_ERR "Write burst i2c data error!\n");
+ }
+ }
+ if (parse_data.book_change == 1) {
+ aic3262_change_book(codec, parse_data.book_no);
+ parse_data.book_change = 0;
+ }
+ } while (parse_data.current_loc != MINIDSP_PARSING_END);
+
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : set_minidsp_mode
+ * Purpose : Switch to the first minidsp mode.
+ *----------------------------------------------------------------------------
+ */
+int
+set_minidsp_mode(struct snd_soc_codec *codec, int new_mode)
+{
+ struct aic3262_priv *aic326x = snd_soc_codec_get_drvdata(codec);
+
+ DBG("%s: switch mode start\n", __func__);
+ aic3262_reset_cache(codec);
+ reg_def_conf(codec);
+
+ if (new_mode == 0) {
+
+ /* General Programming */
+ DBG(KERN_INFO "$Writing reg_section_init_program\n");
+ if (ARRAY_SIZE(main44_REG_Section_init_program) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ main44_REG_Section_init_program,
+ ARRAY_SIZE(main44_REG_Section_init_program));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ main44_REG_Section_init_program,
+ ARRAY_SIZE(main44_REG_Section_init_program));
+#endif
+
+ } else {
+ printk(KERN_ERR
+ "_CODEC_REGS: Insufficient data for programming\n");
+ }
+
+ /* minidsp A programming */
+ DBG(KERN_INFO "#Writing minidsp_A_reg_values\n");
+
+ if ((main44_miniDSP_A_reg_values_COEFF_SIZE +
+ main44_miniDSP_A_reg_values_INST_SIZE) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ main44_miniDSP_A_reg_values,
+ (main44_miniDSP_A_reg_values_COEFF_SIZE +
+ main44_miniDSP_A_reg_values_INST_SIZE));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ main44_miniDSP_A_reg_values,
+ (main44_miniDSP_A_reg_values_COEFF_SIZE +
+ main44_miniDSP_A_reg_values_INST_SIZE));
+#endif
+ } else {
+ printk(KERN_ERR
+ "MINI_DSP_A_second: Insufficient data for programming\n");
+ }
+ /* minidsp D programming */
+ DBG(KERN_INFO "#Writing minidsp_D_reg_values\n");
+ if ((main44_miniDSP_D_reg_values_COEFF_SIZE +
+ main44_miniDSP_D_reg_values_INST_SIZE) > 0) {
+
+#ifdef MULTIBYTE_CONFIG_SUPPORT
+ /*Multibyte for DAC */
+#endif
+
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ main44_miniDSP_D_reg_values,
+ (main44_miniDSP_D_reg_values_COEFF_SIZE +
+ main44_miniDSP_D_reg_values_INST_SIZE));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ main44_miniDSP_D_reg_values,
+ (main44_miniDSP_D_reg_values_COEFF_SIZE +
+ main44_miniDSP_D_reg_values_INST_SIZE));
+#endif
+
+ } else {
+ printk(KERN_ERR
+ "MINI_DSP_D_second: Insufficient data for programming\n");
+ }
+ DBG(KERN_INFO "#Writing reg_section_post_program\n");
+ if (ARRAY_SIZE(main44_REG_Section_post_program) > 0) {
+ #ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ main44_REG_Section_post_program,
+ ARRAY_SIZE(REG_Section_post_program));
+ #else
+ minidsp_i2c_multibyte_transfer(codec,
+ main44_REG_Section_post_program,
+ ARRAY_SIZE(main44_REG_Section_post_program));
+ #endif
+ } else {
+ printk(KERN_ERR
+ "second_CODEC_REGS: Insufficient data for programming\n");
+ }
+ }
+
+ if (new_mode == 1) {
+ /* General Programming */
+ DBG(KERN_INFO "#Writing reg_section_init_program\n");
+ if (ARRAY_SIZE(Second_Rate_REG_Section_init_program) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ Second_Rate_REG_Section_init_program,
+ ARRAY_SIZE(Second_Rate_REG_Section_init_program));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ Second_Rate_REG_Section_init_program,
+ ARRAY_SIZE(Second_Rate_REG_Section_init_program));
+#endif
+
+ } else {
+ printk(KERN_ERR
+ "_CODEC_REGS: Insufficient data for programming\n");
+ }
+ /* minidsp A programming */
+ DBG(KERN_INFO "#Writing minidsp_A_reg_values\n");
+
+ if ((Second_Rate_miniDSP_A_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_A_reg_values_INST_SIZE) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ Second_Rate_miniDSP_A_reg_values,
+ (Second_Rate_miniDSP_A_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_A_reg_values_INST_SIZE));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ Second_Rate_miniDSP_A_reg_values,
+ (Second_Rate_miniDSP_A_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_A_reg_values_INST_SIZE));
+#endif
+ } else {
+ printk(KERN_ERR
+ "MINI_DSP_A_second: Insufficient data for programming\n");
+ }
+
+ /* minidsp D programming */
+ DBG(KERN_INFO "#Writing minidsp_D_reg_values\n");
+
+ if ((Second_Rate_miniDSP_D_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_D_reg_values_INST_SIZE) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ Second_Rate_miniDSP_D_reg_values,
+ (Second_Rate_miniDSP_D_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_D_reg_values_INST_SIZE));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ Second_Rate_miniDSP_D_reg_values,
+ (Second_Rate_miniDSP_D_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_D_reg_values_INST_SIZE));
+#endif
+ } else {
+ printk(KERN_ERR
+ "MINI_DSP_D_second: Insufficient data for programming\n");
+ }
+ DBG(KERN_INFO "Writing reg_section_post_program\n");
+ if (ARRAY_SIZE(Second_Rate_REG_Section_post_program) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ Second_Rate_REG_Section_post_program,
+ ARRAY_SIZE(REG_Section_post_program));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ Second_Rate_REG_Section_post_program,
+ ARRAY_SIZE(Second_Rate_REG_Section_post_program));
+#endif
+ } else {
+ printk(KERN_ERR
+ "second_CODEC_REGS: Insufficient data for programming\n");
+ }
+ }
+
+#ifdef MULTIBYTE_CONFIG_SUPPORT
+ /*Multibyte for DAC */
+ aic326x->process_flow = new_mode;
+ config_multibyte_for_mode(codec, new_mode);
+#endif
+ DBG("%s: switch mode finished\n", __func__);
+ return 0;
+}
+
+/*
+ * i2c_verify
+ *
+ * Function used to validate the contents written into the miniDSP
+ * pages after miniDSP Configuration.
+*/
+int i2c_verify(struct snd_soc_codec *codec)
+{
+
+ DBG(KERN_INFO "#%s: Invoked.. Resetting to page 0\n", __func__);
+
+ aic3262_change_book(codec, 0);
+ DBG(KERN_INFO "#Reading reg_section_init_program\n");
+
+ byte_i2c_array_read(codec, main44_REG_Section_init_program,
+ ARRAY_SIZE(main44_REG_Section_init_program));
+
+ DBG(KERN_INFO "#Reading minidsp_A_reg_values\n");
+ byte_i2c_array_read(codec, main44_miniDSP_A_reg_values,
+ (main44_miniDSP_A_reg_values_COEFF_SIZE +
+ main44_miniDSP_A_reg_values_INST_SIZE));
+
+ DBG(KERN_INFO "#Reading minidsp_D_reg_values\n");
+ byte_i2c_array_read(codec, main44_miniDSP_D_reg_values,
+ (main44_miniDSP_D_reg_values_COEFF_SIZE +
+ main44_miniDSP_D_reg_values_INST_SIZE));
+
+ DBG(KERN_INFO "#Reading reg_section_post_program\n");
+ byte_i2c_array_read(codec, main44_REG_Section_post_program,
+ ARRAY_SIZE(main44_REG_Section_post_program));
+
+ DBG(KERN_INFO "i2c_verify completed\n");
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : set_minidsp_mode1
+ * Purpose : for laoding the default minidsp mode for the first time .
+ *----------------------------------------------------------------------------
+ */
+int
+set_minidsp_mode1(struct snd_soc_codec *codec, int new_mode)
+{
+ DBG("#%s: switch mode start\n", __func__);
+ aic3262_reset_cache(codec);
+
+ if (new_mode == 0) {
+ /* General Programming */
+ DBG(KERN_INFO "#Writing reg_section_init_program\n");
+ if (ARRAY_SIZE(main44_REG_Section_init_program) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ main44_REG_Section_init_program,
+ ARRAY_SIZE(main44_REG_Section_init_program));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ main44_REG_Section_init_program,
+ ARRAY_SIZE(main44_REG_Section_init_program));
+#endif
+
+ } else {
+ printk(KERN_ERR
+ "_CODEC_REGS: Insufficient data for programming\n");
+ }
+ /* minidsp A programming */
+ DBG(KERN_INFO "#Writing minidsp_A_reg_values\n");
+ if ((main44_miniDSP_A_reg_values_COEFF_SIZE +
+ main44_miniDSP_A_reg_values_INST_SIZE) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ main44_miniDSP_A_reg_values,
+ (main44_miniDSP_A_reg_values_COEFF_SIZE +
+ main44_miniDSP_A_reg_values_INST_SIZE));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ main44_miniDSP_A_reg_values,
+ (main44_miniDSP_A_reg_values_COEFF_SIZE +
+ main44_miniDSP_A_reg_values_INST_SIZE));
+#endif
+ } else {
+ printk(KERN_ERR
+ "MINI_DSP_A_second: Insufficient data for programming\n");
+ }
+
+ /* minidsp D programming */
+ DBG(KERN_INFO "#Writing minidsp_D_reg_values\n");
+ if ((main44_miniDSP_D_reg_values_COEFF_SIZE +
+ main44_miniDSP_D_reg_values_INST_SIZE) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ main44_miniDSP_D_reg_values,
+ (main44_miniDSP_D_reg_values_COEFF_SIZE +
+ main44_miniDSP_D_reg_values_INST_SIZE));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ main44_miniDSP_D_reg_values,
+ (main44_miniDSP_D_reg_values_COEFF_SIZE +
+ main44_miniDSP_D_reg_values_INST_SIZE));
+#endif
+ } else {
+ printk(KERN_ERR
+ "MINI_DSP_D_second: Insufficient data for programming\n");
+ }
+
+ DBG(KERN_INFO "#Writing reg_section_post_program\n");
+ if (ARRAY_SIZE(main44_REG_Section_post_program) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ main44_REG_Section_post_program,
+ ARRAY_SIZE(REG_Section_post_program));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ main44_REG_Section_post_program,
+ ARRAY_SIZE(main44_REG_Section_post_program));
+#endif
+ } else {
+ printk(KERN_ERR
+ "second_CODEC_REGS: Insufficient data for programming\n");
+ }
+ }
+
+ if (new_mode == 1) {
+ /* General Programming */
+ DBG(KERN_INFO "#Writing reg_section_init_program\n");
+ if (ARRAY_SIZE(Second_Rate_REG_Section_init_program) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ second_Rate_REG_Section_init_program,
+ ARRAY_SIZE(Second_Rate_REG_Section_init_program));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ Second_Rate_REG_Section_init_program,
+ ARRAY_SIZE(Second_Rate_REG_Section_init_program));
+#endif
+ } else {
+ printk(KERN_ERR
+ "_CODEC_REGS: Insufficient data for programming\n");
+ }
+ /* minidsp A programming */
+ DBG(KERN_INFO "#Writing minidsp_A_reg_values\n");
+ if ((Second_Rate_miniDSP_A_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_A_reg_values_INST_SIZE) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ Second_Rate_miniDSP_A_reg_values,
+ (Second_Rate_miniDSP_A_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_A_reg_values_INST_SIZE));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ Second_Rate_miniDSP_A_reg_values,
+ (Second_Rate_miniDSP_A_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_A_reg_values_INST_SIZE));
+#endif
+ } else {
+ printk(KERN_ERR\
+ "MINI_DSP_A_second: Insufficient data for programming\n");
+ }
+ /* minidsp D programming */
+ DBG(KERN_INFO "#Writing minidsp_D_reg_values\n");
+ if ((Second_Rate_miniDSP_D_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_D_reg_values_INST_SIZE) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ Second_Rate_miniDSP_D_reg_values,
+ (Second_Rate_miniDSP_D_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_D_reg_values_INST_SIZE));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ Second_Rate_miniDSP_D_reg_values,
+ (Second_Rate_miniDSP_D_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_D_reg_values_INST_SIZE));
+#endif
+ } else
+ printk(KERN_ERR "MINI_DSP_D_second: Insufficient data for programming\n");
+
+ DBG(KERN_INFO "#Writing reg_section_post_program\n");
+ if (ARRAY_SIZE(Second_Rate_REG_Section_post_program) > 0) {
+#ifndef MULTIBYTE_I2C
+ byte_i2c_array_transfer(codec,
+ Second_Rate_REG_Section_post_program,
+ ARRAY_SIZE(REG_Section_post_program));
+#else
+ minidsp_i2c_multibyte_transfer(codec,
+ Second_Rate_REG_Section_post_program,
+ ARRAY_SIZE(Second_Rate_REG_Section_post_program));
+#endif
+ } else
+ printk(KERN_ERR\
+ "second_CODEC_REGS: Insufficient data for programming\n");
+
+ }
+
+ DBG("#%s: switch mode completed\n", __func__);
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : aic3262_minidsp_program
+ * Purpose : Program mini dsp for AIC3262 codec chip. This routine is
+ * called from the aic3262 codec driver, if mini dsp programming
+ * is enabled.
+ *----------------------------------------------------------------------------
+ */
+int aic3262_minidsp_program(struct snd_soc_codec *codec)
+{
+ DBG(KERN_INFO "#AIC3262: programming mini dsp\n");
+
+#if defined(PROGRAM_MINI_DSP_first)
+ #ifdef DEBUG
+ DBG("#Verifying book 0\n");
+ i2c_verify_book0(codec);
+#endif
+ aic3262_change_book(codec, 0);
+ set_minidsp_mode1(codec, 0);
+ aic3262_change_book(codec, 0);
+#ifdef DEBUG
+ DBG("#verifying book 0\n");
+ i2c_verify_book0(codec);
+#endif
+#endif
+#if defined(PROGRAM_MINI_DSP_second)
+#ifdef DEBUG
+ DBG("#Verifying book 0\n");
+ i2c_verify_book0(codec);
+#endif
+ set_minidsp_mode1(codec, 1);
+#ifdef DEBUG
+ DBG("#verifying book 0\n");
+ i2c_verify_book0(codec);
+#endif
+#endif
+ return 0;
+}
+/*
+ *----------------------------------------------------------------------------
+ * Function : m_control_info
+ * Purpose : This function is to initialize data for new control required to
+ * program the AIC3262 registers.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int m_control_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->count = 1;
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : m_control_get
+ * Purpose : This function is to read data of new control for
+ * program the AIC3262 registers.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int m_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+ u32 val;
+ u8 val1;
+
+ if (!strcmp(kcontrol->id.name, "Minidsp mode")) {
+ val = aic3262->process_flow;
+ ucontrol->value.integer.value[0] = val;
+ DBG(KERN_INFO "control get : mode=%d\n", aic3262->process_flow);
+ }
+ if (!strcmp(kcontrol->id.name, "DAC Adaptive mode Enable")) {
+ aic3262_change_book(codec, 80);
+ val1 = i2c_smbus_read_byte_data(codec->control_data, 1);
+ ucontrol->value.integer.value[0] = ((val1>>1)&0x01);
+ DBG(KERN_INFO "control get : mode=%d\n", aic3262->process_flow);
+ }
+ if (!strcmp(kcontrol->id.name, "ADC Adaptive mode Enable")) {
+ aic3262_change_book(codec, 40);
+ val1 = i2c_smbus_read_byte_data(codec->control_data, 1);
+ ucontrol->value.integer.value[0] = ((val1>>1)&0x01);
+ DBG(KERN_INFO "control get : mode=%d\n", dmode);
+ }
+
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : m_new_control_put
+ * Purpose : new_control_put is called to pass data from user/application to
+ * the driver.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int m_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ u32 val;
+ u8 val1;
+ int mode = aic3262->process_flow;
+
+ DBG("n_control_put\n");
+ val = ucontrol->value.integer.value[0];
+ if (!strcmp(kcontrol->id.name, "Minidsp mode")) {
+ DBG(KERN_INFO "\nMini dsp put\n mode = %d, val=%d\n",
+ aic3262->process_flow, val);
+ if (val != mode) {
+ if (aic3262->mute_codec == 1) {
+ i2c_verify_book0(codec);
+ aic3262_change_book(codec, 0);
+ set_minidsp_mode(codec, val);
+
+ aic3262_change_book(codec, 0);
+ i2c_verify_book0(codec);
+ /* update_kcontrols(codec, val);*/
+ } else {
+ printk(KERN_ERR
+ " Cant Switch Processflows, Playback in progress");
+ }
+ }
+ }
+
+ if (!strcmp(kcontrol->id.name, "DAC Adaptive mode Enable")) {
+ DBG(KERN_INFO "\nMini dsp put\n mode = %d, val=%d\n",
+ aic3262->process_flow, val);
+ if (val != amode) {
+ aic3262_change_book(codec, 80);
+ val1 = i2c_smbus_read_byte_data(codec->control_data, 1);
+ aic3262_write(codec, 1, (val1&0xfb)|(val<<1));
+ }
+ amode = val;
+ }
+
+ if (!strcmp(kcontrol->id.name, "ADC Adaptive mode Enable")) {
+ DBG(KERN_INFO "\nMini dsp put\n mode = %d, val=%d\n",
+ aic3262->process_flow, val);
+ if (val != dmode) {
+ aic3262_change_book(codec, 40);
+ val1 = i2c_smbus_read_byte_data(codec->control_data, 1);
+ aic3262_write(codec, 1, (val1&0xfb)|(val<<1));
+ }
+ dmode = val;
+ }
+
+ if (!strcmp(kcontrol->id.name, "Dump Regs Book0"))
+ i2c_verify_book0(codec);
+
+
+ if (!strcmp(kcontrol->id.name, "Verify minidsp program")) {
+ if (mode == 0) {
+ DBG("Current mod=%d\nVerifying minidsp_D_regs", mode);
+ byte_i2c_array_read(codec, main44_miniDSP_D_reg_values,
+ (main44_miniDSP_D_reg_values_COEFF_SIZE +
+ main44_miniDSP_D_reg_values_INST_SIZE));
+ } else {
+ byte_i2c_array_read(codec,
+ Second_Rate_miniDSP_D_reg_values,
+ (Second_Rate_miniDSP_D_reg_values_COEFF_SIZE +
+ Second_Rate_miniDSP_D_reg_values_INST_SIZE));
+ }
+ }
+ DBG("\nmode = %d\n", mode);
+ return mode;
+}
+
+/************************** MUX CONTROL section *****************************/
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_info_minidsp_mux
+ * Purpose : info routine for mini dsp mux control amixer kcontrols
+ *----------------------------------------------------------------------------
+ */
+static int __new_control_info_minidsp_mux(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ int index;
+ int ret_val = -1;
+
+ for (index = 0; index < ARRAY_SIZE(main44_MUX_controls); index++) {
+ if (strstr(kcontrol->id.name, main44_MUX_control_names[index]))
+ break;
+ }
+
+ if (index < ARRAY_SIZE(main44_MUX_controls)) {
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = MIN_MUX_CTRL;
+ uinfo->value.integer.max = MAX_MUX_CTRL;
+ ret_val = 0;
+ }
+ return ret_val;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_get_minidsp_mux
+ *
+ * Purpose : get routine for mux control amixer kcontrols,
+ * read current register values to user.
+ * Used for for mini dsp 'MUX control' amixer controls.
+ *----------------------------------------------------------------------------
+ */
+static int __new_control_get_minidsp_mux(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = kcontrol->private_value;
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_put_minidsp_mux
+ *
+ * Purpose : put routine for amixer kcontrols, write user values to registers
+ * values. Used for for mini dsp 'MUX control' amixer controls.
+ *----------------------------------------------------------------------------
+ */
+static int __new_control_put_minidsp_mux(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 data[MUX_CTRL_REG_SIZE + 1];
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int index = 1;
+ int user_value = ucontrol->value.integer.value[0];
+ struct i2c_client *i2c;
+ u8 value[2], swap_reg_pre, swap_reg_post;
+ u8 page;
+ int ret_val = -1, array_size;
+ control *array;
+ char **array_names;
+ char *control_name, *control_name1;
+ struct aic3262_priv *aic326x = snd_soc_codec_get_drvdata(codec);
+ i2c = codec->control_data;
+
+ if (aic326x->process_flow == 0) {
+ DBG("#the current process flow is %d", aic326x->process_flow);
+ array = main44_MUX_controls;
+ array_size = ARRAY_SIZE(main44_MUX_controls);
+ array_names = main44_MUX_control_names;
+ control_name = "Stereo_Mux_TwoToOne_1";
+ control_name1 = "Mono_Mux_1_1";
+
+ /* Configure only for process flow 1 controls */
+ if (strcmp(kcontrol->id.name, control_name) &&
+ strcmp(kcontrol->id.name, control_name1))
+ return 0;
+ } else {
+ array = Second_Rate_MUX_controls;
+ array_size = ARRAY_SIZE(Second_Rate_MUX_controls);
+ array_names = Second_Rate_MUX_control_names;
+ control_name = "Stereo_Mux_TwoToOne_1_Second";
+ control_name1 = "Mono_Mux_1_1_Second";
+
+ /* Configure only for process flow 2 controls */
+ if (strcmp(kcontrol->id.name, control_name) &&
+ strcmp(kcontrol->id.name, control_name1))
+ return 0;
+ }
+
+ page = array[index].control_page;
+
+ DBG("#user value = 0x%x\n", user_value);
+ for (index = 0; index < array_size; index++) {
+ if (strstr(kcontrol->id.name, array_names[index]))
+ break;
+ }
+ if (index < array_size) {
+ DBG(KERN_INFO "#Index %d Changing to Page %d\n", index,
+ array[index].control_page);
+
+ aic3262_change_book(codec,
+ array[index].control_book);
+ aic3262_change_page(codec,
+ array[index].control_page);
+
+ if (!strcmp(array_names[index], control_name)) {
+ if (user_value > 0) {
+ data[1] = 0x00;
+ data[2] = 0x00;
+ data[3] = 0x00;
+ } else {
+ data[1] = 0xFF;
+ data[2] = 0xFf;
+ data[3] = 0xFF;
+ }
+ } else {
+ if (user_value > 0) {
+ data[1] =
+ (u8) ((user_value >> 16) &
+ AIC3262_8BITS_MASK);
+ data[2] =
+ (u8) ((user_value >> 8) &
+ AIC3262_8BITS_MASK);
+ data[3] =
+ (u8)((user_value) & AIC3262_8BITS_MASK);
+ }
+ }
+ /* start register address */
+ data[0] = array[index].control_base;
+
+ DBG(KERN_INFO
+ "#Writing %d %d %d \r\n", data[0], data[1], data[2]);
+
+ ret_val = i2c_master_send(i2c, data, MUX_CTRL_REG_SIZE + 1);
+
+ if (ret_val != MUX_CTRL_REG_SIZE + 1)
+ printk(KERN_ERR "i2c_master_send transfer failed\n");
+ else {
+ /* store the current level */
+ kcontrol->private_value = user_value;
+ ret_val = 0;
+ /* Enable adaptive filtering for ADC/DAC */
+ }
+
+ /* Perform a BUFFER SWAP Command. Check if we are currently not
+ * in Page 8, if so, swap to Page 8 first
+ */
+
+ value[0] = 1;
+
+ if (i2c_master_send(i2c, value, 1) != 1)
+ printk(KERN_ERR "Can not write register address\n");
+
+ /* Read the Value of the Page 8 Register 1 which controls the
+ Adaptive Switching Mode */
+ if (i2c_master_recv(i2c, value, 1) != 1)
+ printk(KERN_ERR "Can not read codec registers\n");
+
+ swap_reg_pre = value[0];
+ /* Write the Register bit updates */
+ value[1] = value[0] | 1;
+ value[0] = 1;
+
+ if (i2c_master_send(i2c, value, 2) != 2)
+ printk(KERN_ERR "Can not write register address\n");
+
+ value[0] = 1;
+ /* verify buffer swap */
+ if (i2c_master_send(i2c, value, 1) != 1)
+ printk(KERN_ERR "Can not write register address\n");
+
+ /* Read the Value of the Page 8 Register 1 which controls the
+ Adaptive Switching Mode */
+ if (i2c_master_recv(i2c, &swap_reg_post, 1) != 1)
+ printk(KERN_ERR "Can not read codec registers\n");
+
+ if ((swap_reg_pre == 4 && swap_reg_post == 6)
+ || (swap_reg_pre == 6 && swap_reg_post == 4))
+ DBG("Buffer swap success\n");
+ else
+ printk(KERN_ERR
+ "Buffer swap...FAILED\nswap_reg_pre=%x, \
+ swap_reg_post=%x\n", swap_reg_pre, swap_reg_post);
+
+ }
+ /* update the new buffer value in the old, just swapped out buffer */
+ aic3262_change_book(codec, array[index].control_book);
+ aic3262_change_page(codec, array[index].control_page);
+ ret_val = i2c_master_send(i2c, data, MUX_CTRL_REG_SIZE + 1);
+ ret_val = 0;
+
+ aic3262_change_book(codec, 0);
+ return ret_val;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : minidsp_mux_ctrl_mixer_controls
+ *
+ * Purpose : Add amixer kcontrols for mini dsp mux controls,
+ *----------------------------------------------------------------------------
+ */
+static int minidsp_mux_ctrl_mixer_controls(struct snd_soc_codec *codec,
+ int size, control *cntl,
+ char **name)
+{
+ int i, err;
+ int val1;
+
+ /* DBG("%d mixer controls for mini dsp MUX\n", no_mux_controls);*/
+
+ if (size) {
+ for (i = 0; i < size; i++) {
+
+ snd_mux_controls[i].name = name[i];
+ snd_mux_controls[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ snd_mux_controls[i].access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ snd_mux_controls[i].info =
+ __new_control_info_minidsp_mux;
+ snd_mux_controls[i].get = __new_control_get_minidsp_mux;
+ snd_mux_controls[i].put = __new_control_put_minidsp_mux;
+ /*
+ * TBD: read volume reg and update the index number
+ */
+ aic3262_change_book(codec, cntl[i].control_book);
+ aic3262_change_page(codec, cntl[i].control_page);
+ val1 = i2c_smbus_read_byte_data(codec->control_data,
+ cntl[i].control_base);
+ DBG(KERN_INFO "Control data %x\n", val1);
+ /*
+ if( val1 >= 0 )
+ snd_mux_controls[i].private_value = val1;
+ else
+ snd_mux_controls[i].private_value = 0;
+ */
+ DBG(KERN_INFO
+ "the value of amixer control mux=%d", val1);
+ if (val1 >= 0 && val1 != 255)
+ snd_mux_controls[i].private_value = val1;
+ else
+ snd_mux_controls[i].private_value = 0;
+
+ snd_mux_controls[i].count = 0;
+
+ err = snd_ctl_add(codec->card->snd_card,
+ snd_ctl_new1(&snd_mux_controls[i],
+ codec));
+ if (err < 0)
+ printk(KERN_ERR
+ "%s:Invalid control %s\n", __FILE__,
+ snd_mux_controls[i].name);
+ }
+ }
+ return 0;
+}
+
+/*------------------------- Volume Controls -----------------------*/
+static int volume_lite_table[] = {
+
+ 0x00000D, 0x00000E, 0x00000E, 0x00000F,
+ 0x000010, 0x000011, 0x000012, 0x000013,
+ 0x000015, 0x000016, 0x000017, 0x000018,
+ 0x00001A, 0x00001C, 0x00001D, 0x00001F,
+ 0x000021, 0x000023, 0x000025, 0x000027,
+ 0x000029, 0x00002C, 0x00002F, 0x000031,
+ 0x000034, 0x000037, 0x00003B, 0x00003E,
+ 0x000042, 0x000046, 0x00004A, 0x00004F,
+ 0x000053, 0x000058, 0x00005D, 0x000063,
+ 0x000069, 0x00006F, 0x000076, 0x00007D,
+ 0x000084, 0x00008C, 0x000094, 0x00009D,
+ 0x0000A6, 0x0000B0, 0x0000BB, 0x0000C6,
+ 0x0000D2, 0x0000DE, 0x0000EB, 0x0000F9,
+ 0x000108, 0x000118, 0x000128, 0x00013A,
+ 0x00014D, 0x000160, 0x000175, 0x00018B,
+ 0x0001A3, 0x0001BC, 0x0001D6, 0x0001F2,
+ 0x000210, 0x00022F, 0x000250, 0x000273,
+ 0x000298, 0x0002C0, 0x0002E9, 0x000316,
+ 0x000344, 0x000376, 0x0003AA, 0x0003E2,
+ 0x00041D, 0x00045B, 0x00049E, 0x0004E4,
+ 0x00052E, 0x00057C, 0x0005D0, 0x000628,
+ 0x000685, 0x0006E8, 0x000751, 0x0007C0,
+ 0x000836, 0x0008B2, 0x000936, 0x0009C2,
+ 0x000A56, 0x000AF3, 0x000B99, 0x000C49,
+ 0x000D03, 0x000DC9, 0x000E9A, 0x000F77,
+ 0x001062, 0x00115A, 0x001262, 0x001378,
+ 0x0014A0, 0x0015D9, 0x001724, 0x001883,
+ 0x0019F7, 0x001B81, 0x001D22, 0x001EDC,
+ 0x0020B0, 0x0022A0, 0x0024AD, 0x0026DA,
+ 0x002927, 0x002B97, 0x002E2D, 0x0030E9,
+ 0x0033CF, 0x0036E1, 0x003A21, 0x003D93,
+ 0x004139, 0x004517, 0x00492F, 0x004D85,
+ 0x00521D, 0x0056FA, 0x005C22, 0x006197,
+ 0x006760, 0x006D80, 0x0073FD, 0x007ADC,
+ 0x008224, 0x0089DA, 0x009205, 0x009AAC,
+ 0x00A3D7, 0x00B7D4, 0x00AD8C, 0x00C2B9,
+ 0x00CE43, 0x00DA7B, 0x00E76E, 0x00F524,
+ 0x0103AB, 0x01130E, 0x01235A, 0x01349D,
+ 0x0146E7, 0x015A46, 0x016ECA, 0x018486,
+ 0x019B8C, 0x01B3EE, 0x01CDC3, 0x01E920,
+ 0x02061B, 0x0224CE, 0x024553, 0x0267C5,
+ 0x028C42, 0x02B2E8, 0x02DBD8, 0x030736,
+ 0x033525, 0x0365CD, 0x039957, 0x03CFEE,
+ 0x0409C2, 0x044703, 0x0487E5, 0x04CCA0,
+ 0x05156D, 0x05628A, 0x05B439, 0x060ABF,
+ 0x066666, 0x06C77B, 0x072E50, 0x079B3D,
+ 0x080E9F, 0x0888D7, 0x090A4D, 0x09936E,
+ 0x0A24B0, 0x0ABE8D, 0x0B6188, 0x0C0E2B,
+ 0x0CC509, 0x0D86BD, 0x0E53EB, 0x0F2D42,
+ 0x101379, 0x110754, 0x1209A3, 0x131B40,
+ 0x143D13, 0x157012, 0x16B543, 0x180DB8,
+ 0x197A96, 0x1AFD13, 0x1C9676, 0x1E481C,
+ 0x201373, 0x21FA02, 0x23FD66, 0x261F54,
+ 0x28619A, 0x2AC625, 0x2D4EFB, 0x2FFE44,
+ 0x32D646, 0x35D96B, 0x390A41, 0x3C6B7E,
+ 0x400000, 0x43CAD0, 0x47CF26, 0x4C106B,
+ 0x50923B, 0x55586A, 0x5A6703, 0x5FC253,
+ 0x656EE3, 0x6B7186, 0x71CF54, 0x788DB4,
+ 0x7FB260,
+};
+
+static struct snd_kcontrol_new snd_vol_controls[MAX_VOLUME_CONTROLS];
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_info_main44_minidsp_volume
+ * Purpose : info routine for volumeLite amixer kcontrols
+ *----------------------------------------------------------------------------
+ */
+
+static int
+__new_control_info_minidsp_volume(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ int index, index8;
+ int ret_val = -1;
+
+ for (index = 0; index < ARRAY_SIZE(main44_VOLUME_controls); index++) {
+ if (strstr
+ (kcontrol->id.name, main44_VOLUME_control_names[index]))
+ break;
+ }
+ for (index8 = 0; index8 < ARRAY_SIZE(Second_Rate_VOLUME_controls);
+ index8++) {
+ if (strstr
+ (kcontrol->id.name,
+ Second_Rate_VOLUME_control_names[index]))
+ break;
+ }
+ if ((index < ARRAY_SIZE(main44_VOLUME_controls))
+ || (index8 < ARRAY_SIZE(Second_Rate_VOLUME_controls))) {
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = MIN_VOLUME;
+ uinfo->value.integer.max = MAX_VOLUME;
+ ret_val = 0;
+ }
+ return ret_val;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_get_main44_minidsp_vol
+ * Purpose : get routine for amixer kcontrols, read current register
+ * values. Used for for mini dsp 'VolumeLite' amixer controls.
+ *----------------------------------------------------------------------------
+ */
+static int
+__new_control_get_minidsp_volume(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = kcontrol->private_value;
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : __new_control_put_main44_minidsp_volume
+ * Purpose : put routine for amixer kcontrols, write user values to registers
+ * values. Used for for mini dsp 'VolumeLite' amixer controls.
+ *----------------------------------------------------------------------------
+ */
+static int
+__new_control_put_minidsp_volume(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 data[4];
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int user_value = ucontrol->value.integer.value[0];
+ struct i2c_client *i2c = codec->control_data;
+ int ret_val = -1;
+ int coeff;
+ u8 value[2], swap_reg_pre, swap_reg_post;
+ struct aic3262_priv *aic3262 = snd_soc_codec_get_drvdata(codec);
+
+ control *volume_controls = NULL;
+ printk(KERN_INFO "user value = 0x%x\n", user_value);
+
+ if (aic3262->process_flow == 0)
+ volume_controls = main44_VOLUME_controls;
+ else
+ volume_controls = Second_Rate_VOLUME_controls;
+
+ aic3262_change_book(codec, volume_controls->control_book);
+ aic3262_change_page(codec, volume_controls->control_page);
+
+ coeff = volume_lite_table[user_value << 1];
+
+ data[1] = (u8) ((coeff >> 16) & AIC3262_8BITS_MASK);
+ data[2] = (u8) ((coeff >> 8) & AIC3262_8BITS_MASK);
+ data[3] = (u8) ((coeff) & AIC3262_8BITS_MASK);
+
+ /* Start register address */
+ data[0] = volume_controls->control_base;
+ ret_val = i2c_master_send(i2c, data, VOLUME_REG_SIZE + 1);
+ if (ret_val != VOLUME_REG_SIZE + 1)
+ printk(KERN_ERR "i2c_master_send transfer failed\n");
+ else {
+ /* store the current level */
+ kcontrol->private_value = user_value;
+ ret_val = 0;
+ }
+ /* Initiate buffer swap */
+ value[0] = 1;
+
+ if (i2c_master_send(i2c, value, 1) != 1)
+ printk(KERN_ERR "Can not write register address\n");
+
+ /* Read the Value of the Page 8 Register 1 which controls the
+ Adaptive Switching Mode */
+ if (i2c_master_recv(i2c, value, 1) != 1)
+ printk(KERN_ERR "Can not read codec registers\n");
+
+ swap_reg_pre = value[0];
+ /* Write the Register bit updates */
+ value[1] = value[0] | 1;
+ value[0] = 1;
+ if (i2c_master_send(i2c, value, 2) != 2)
+ printk(KERN_ERR "Can not write register address\n");
+
+ value[0] = 1;
+ /* verify buffer swap */
+ if (i2c_master_send(i2c, value, 1) != 1)
+ printk(KERN_ERR "Can not write register address\n");
+
+ /* Read the Value of the Page 8 Register 1 which controls the
+ Adaptive Switching Mode */
+ if (i2c_master_recv(i2c, &swap_reg_post, 1) != 1)
+ printk(KERN_ERR "Can not read codec registers\n");
+
+ if ((swap_reg_pre == 4 && swap_reg_post == 6)
+ || (swap_reg_pre == 6 && swap_reg_post == 4))
+ DBG("Buffer swap success\n");
+ else
+ DBG("Buffer swap...FAILED\nswap_reg_pre=%x, swap_reg_post=%x\n",
+ swap_reg_pre, swap_reg_post);
+
+ /* update the new buffer value in the old, just swapped out buffer */
+ aic3262_change_book(codec, volume_controls->control_book);
+ aic3262_change_page(codec, volume_controls->control_page);
+ i2c_master_send(i2c, data, MUX_CTRL_REG_SIZE + 1);
+
+ aic3262_change_book(codec, 0);
+
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : minidsp_volume_main44_mixer_controls
+ * Purpose : Add amixer kcontrols for mini dsp volume Lite controls,
+ *----------------------------------------------------------------------------
+ */
+static int minidsp_volume_mixer_controls(struct snd_soc_codec *codec)
+{
+ int i, err, no_volume_controls;
+ static char volume_control_name[MAX_VOLUME_CONTROLS][40];
+
+ /* ADD first process volume controls */
+ no_volume_controls = ARRAY_SIZE(main44_VOLUME_controls);
+
+ printk(KERN_INFO " %d mixer controls for mini dsp 'volumeLite'\n",
+ no_volume_controls);
+
+ if (no_volume_controls) {
+
+ for (i = 0; i < no_volume_controls; i++) {
+ strcpy(volume_control_name[i],
+ main44_VOLUME_control_names[i]);
+ strcat(volume_control_name[i], VOLUME_KCONTROL_NAME);
+
+ printk(KERN_ERR "Volume controls: %s\n",
+ volume_control_name[i]);
+
+ snd_vol_controls[i].name = volume_control_name[i];
+ snd_vol_controls[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ snd_vol_controls[i].access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ snd_vol_controls[i].info =
+ __new_control_info_minidsp_volume;
+ snd_vol_controls[i].get =
+ __new_control_get_minidsp_volume;
+ snd_vol_controls[i].put =
+ __new_control_put_minidsp_volume;
+ /*
+ * TBD: read volume reg and update the index number
+ */
+ snd_vol_controls[i].private_value = 0;
+ snd_vol_controls[i].count = 0;
+
+ err = snd_ctl_add(codec->card->snd_card,
+ snd_ctl_new1(&snd_vol_controls[i],
+ codec));
+ if (err < 0) {
+ printk(KERN_ERR
+ "%s:Invalid control %s\n", __FILE__,
+ snd_vol_controls[i].name);
+ }
+ }
+ }
+ /* ADD second process volume controls */
+ no_volume_controls = ARRAY_SIZE(Second_Rate_VOLUME_controls);
+
+ printk(KERN_ERR " %d mixer controls for mini dsp 'volumeLite'\n",
+ no_volume_controls);
+
+ if (no_volume_controls) {
+
+ for (i = 0; i < no_volume_controls; i++) {
+ strcpy(volume_control_name[i],
+ Second_Rate_VOLUME_control_names[i]);
+ strcat(volume_control_name[i], VOLUME_KCONTROL_NAME);
+
+ printk(KERN_ERR "Volume controls: %s\n",
+ volume_control_name[i]);
+
+ snd_vol_controls[i].name = volume_control_name[i];
+ snd_vol_controls[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ snd_vol_controls[i].access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ snd_vol_controls[i].info =
+ __new_control_info_minidsp_volume;
+ snd_vol_controls[i].get =
+ __new_control_get_minidsp_volume;
+ snd_vol_controls[i].put =
+ __new_control_put_minidsp_volume;
+ /*
+ * TBD: read volume reg and update the index number
+ */
+ snd_vol_controls[i].private_value = 0;
+ snd_vol_controls[i].count = 0;
+
+ err = snd_ctl_add(codec->card->snd_card,
+ snd_ctl_new1(&snd_vol_controls[i],
+ codec));
+ if (err < 0) {
+ printk(KERN_ERR
+ "%s:Invalid control %s\n", __FILE__,
+ snd_vol_controls[i].name);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ *--------------------------------------------------------------------------
+ * Function : aic3262_add_minidsp_controls
+ * Purpose : Configures the AMIXER Control Interfaces that can be exercised by
+ * the user at run-time. Utilizes the the snd_adaptive_controls[]
+ * array to specify two run-time controls.
+ *---------------------------------------------------------------------------
+ */
+int aic3262_add_minidsp_controls(struct snd_soc_codec *codec)
+{
+#ifdef ADD_MINI_DSP_CONTROLS
+ int i, err, no_mux_controls;
+ /* add mode k control */
+ for (i = 0; i < ARRAY_SIZE(aic3262_minidsp_controls); i++) {
+ err = snd_ctl_add(codec->card->snd_card,
+ snd_ctl_new1(&aic3262_minidsp_controls[i], codec));
+ if (err < 0) {
+ printk(KERN_ERR "Invalid control\n");
+ return err;
+ }
+ }
+
+ /* add mux controls */
+ no_mux_controls = ARRAY_SIZE(main44_MUX_controls);
+ minidsp_mux_ctrl_mixer_controls(codec, no_mux_controls,
+ main44_MUX_controls, main44_MUX_control_names);
+
+ no_mux_controls = ARRAY_SIZE(Second_Rate_MUX_controls);
+ minidsp_mux_ctrl_mixer_controls(codec, no_mux_controls,
+ Second_Rate_MUX_controls, Second_Rate_MUX_control_names);
+
+ /* add volume controls*/
+ minidsp_volume_mixer_controls(codec);
+#endif /* ADD_MINI_DSP_CONTROLS */
+ return 0;
+}
+
+MODULE_DESCRIPTION("ASoC TLV320AIC3262 miniDSP driver");
+MODULE_AUTHOR("Y Preetam Sashank Reddy <preetam@mistralsolutions.com>");
+MODULE_LICENSE("GPL");
+#endif /* End of CONFIG_MINI_DSP */
diff --git a/sound/soc/codecs/tlv320aic326x_mini-dsp.h b/sound/soc/codecs/tlv320aic326x_mini-dsp.h
new file mode 100644
index 000000000000..394fa5954dde
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic326x_mini-dsp.h
@@ -0,0 +1,127 @@
+/*
+ * linux/sound/soc/codecs/tlv320aic3262_mini-dsp.h
+ *
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * History:
+ * Rev 0.1 Added the multiconfig support Mistral 17-08-2011
+ *
+ * Rev 0.2 Migrated for aic3262 nVidia
+ * Mistral 21-10-2011
+ */
+
+#ifndef _TLV320AIC3262_MINI_DSP_H
+#define _TLV320AIC3262_MINI_DSP_H
+
+
+#define MULTIBYTE_CONFIG_SUPPORT
+/*#undef MULTIBYTE_CONFIG_SUPPORT*/
+
+
+/* defines */
+
+/* Select the functionalities to be used in mini dsp module */
+/*#define PROGRAM_MINI_DSP_first*/
+#define PROGRAM_MINI_DSP_second
+#define PROGRAM_CODEC_REG_SECTIONS
+#define ADD_MINI_DSP_CONTROLS
+
+/* use the following macros to select between burst and byte mode of i2c
+ * Byte mode uses standard read & write as provides debugging information
+ * if enabled.
+ * Multibyte should be used for production codes where performance is priority
+ */
+#define MULTIBYTE_I2C
+/*#undef MULTIBYTE_I2C*/
+
+typedef struct {
+ u8 reg_off;
+ u8 reg_val;
+} reg_value;
+
+/*CONTROL LOCATIONS*/
+typedef struct {
+ u8 control_book; /*coefficient book location*/
+ u8 control_page; /*coefficient page location*/
+ u8 control_base; /*coefficient base address within page*/
+ u8 control_mute_flag; /*non-zero means muting required*/
+ u8 control_string_index; /*string table index*/
+} control;
+
+/* volume ranges from -110db to 6db
+ * amixer controls does not accept negative values
+ * Therefore we are normalizing values to start from value 0
+ * value 0 corresponds to -110db and 116 to 6db
+ */
+#define MAX_VOLUME_CONTROLS 2
+#define MIN_VOLUME 0
+#define MAX_VOLUME 116
+#define VOLUME_REG_SIZE 3 /* 3 bytes */
+#define VOLUME_KCONTROL_NAME "(0=-110dB, 116=+6dB)"
+
+#define FILT_CTL_NAME_ADC "ADC adaptive filter(0=Disable, 1=Enable)"
+#define FILT_CTL_NAME_DAC "DAC adaptive filter(0=Disable, 1=Enable)"
+#define COEFF_CTL_NAME_ADC "ADC coeff Buffer(0=Buffer A, 1=Buffer B)"
+#define COEFF_CTL_NAME_DAC "DAC coeff Buffer(0=Buffer A, 1=Buffer B)"
+
+#define BUFFER_PAGE_ADC 0x8
+#define BUFFER_PAGE_DAC 0x2c
+
+#define ADAPTIVE_MAX_CONTROLS 4
+
+/*
+ * MUX controls, 3 bytes of control data.
+ */
+#define MAX_MUX_CONTROLS 2
+#define MIN_MUX_CTRL 0
+#define MAX_MUX_CTRL 2
+#define MUX_CTRL_REG_SIZE 3 /* 3 bytes */
+
+#define MINIDSP_PARSING_START 0
+#define MINIDSP_PARSING_END (-1)
+
+#define CODEC_REG_DONT_IGNORE 0
+#define CODEC_REG_IGNORE 1
+
+#define CODEC_REG_PRE_INIT 0
+#define CODEC_REG_POST_INIT 1
+#define INIT_SEQ_DELIMITER 255 /* Delimiter register */
+#define DELIMITER_COUNT 2 /* 2 delimiter entries */
+
+/* Parser info structure */
+typedef struct {
+ char page_num;
+ char burst_array[129];
+ int burst_size;
+ int current_loc;
+ int book_change;
+ u8 book_no;
+} minidsp_parser_data;
+
+/* I2c Page Change Structure */
+typedef struct {
+ char burst_array[4];
+} minidsp_i2c_page;
+
+/* This macro defines the total size of the miniDSP parser arrays
+ * that the driver will maintain as a data backup.
+ * The total memory requirement will be around
+ * sizeof(minidsp_parser_data) * 48 = 138 * 32 = 4416 bytes
+ */
+#define MINIDSP_PARSER_ARRAY_SIZE 80
+
+extern int
+minidsp_i2c_multibyte_transfer(struct snd_soc_codec *, reg_value *, int);
+extern int byte_i2c_array_transfer(struct snd_soc_codec *, reg_value *, int);
+extern void config_multibyte_for_mode(struct snd_soc_codec *, int);
+#endif
diff --git a/sound/soc/codecs/tlv320aic326x_minidsp_config.c b/sound/soc/codecs/tlv320aic326x_minidsp_config.c
new file mode 100644
index 000000000000..07b762aeedbc
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic326x_minidsp_config.c
@@ -0,0 +1,410 @@
+/*
+ * linux/sound/soc/codecs/tlv320aic326x_minidsp_config.c
+ *
+ * Copyright (C) 2011 Mistral Solutions Pvt Ltd.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * The TLV320AIC3262 is a flexible, low-power, low-voltage stereo audio
+ * codec with digital microphone inputs and programmable outputs.
+ *
+ * History:
+ *
+ * Rev 0.1 Added the multiconfig support Mistral 17-08-2011
+ *
+ * Rev 0.2 Migrated for aic3262 nVidia
+ * Mistral 21-10-2011
+ */
+
+/*
+ *****************************************************************************
+ * INCLUDES
+ *****************************************************************************
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <sound/soc.h>
+#include <sound/core.h>
+#include <sound/soc-dapm.h>
+#include <sound/control.h>
+#include <linux/time.h> /* For timing computations */
+#include "tlv320aic326x.h"
+#include "tlv320aic326x_mini-dsp.h"
+
+#include "Patch_base_jazz_Rate48_pps_driver.h"
+#include "Patch_base_main_Rate48_pps_driver.h"
+#include "Patch_base_pop_Rate48_pps_driver.h"
+#include "Patch_base_rock_Rate48_pps_driver.h"
+
+#ifdef CONFIG_MINI_DSP
+
+#define MAX_CONFIG_D_ARRAYS 4
+#define MAX_CONFIG_A_ARRAYS 0
+#define MAX_CONFIG_ARRAYS 4
+#define MINIDSP_DMODE 0
+#define MINIDSP_AMODE 1
+
+/*
+ *****************************************************************************
+ * LOCAL STATIC DECLARATIONS
+ *****************************************************************************
+ */
+static int multibyte_coeff_change(struct snd_soc_codec *codec, int);
+
+static int m_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+static int m_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+/* k-control macros used for miniDSP related Kcontrols */
+#define SOC_SINGLE_VALUE_M(xmax, xinvert) \
+ ((unsigned long)&(struct soc_mixer_control) \
+ {.max = xmax, \
+ .invert = xinvert})
+#define SOC_SINGLE_M(xname, max, invert) \
+{\
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = m_control_info, .get = m_control_get,\
+ .put = m_control_put, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .private_value = SOC_SINGLE_VALUE_M(max, invert) }
+#define SOC_SINGLE_AIC3262_M(xname) \
+{\
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = m_control_info, .get = m_control_get,\
+ .put = m_control_put, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+}
+
+
+/* The Multi-Configurations generated through PPS GDE has been
+ * named as Rock, Pop, Jazz and Main. These were the Configurations
+ * that were used while testing this AUdio Driver. If the user
+ * creates Process-flow with different names, it is advised to
+ * modify the names present in the below array.
+ */
+static const char *multi_config_support_DAC[] = {
+ "ROCK",
+ "POP",
+ "JAZZ",
+ "MAIN",
+};
+
+/* SOC_ENUM Declaration and kControl for switching Configurations
+ * at run-time.
+ */
+static const struct soc_enum aic3262_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(multi_config_support_DAC),
+ multi_config_support_DAC);
+
+static const struct snd_kcontrol_new aic3262_minidsp_controls1[] = {
+
+ SOC_ENUM_EXT("Multiconfig support for DAC",
+ aic3262_enum, m_control_get, m_control_put),
+
+};
+
+
+/*
+ * multibyte_config
+ *
+ * This structure has been devised to maintain information about each
+ * configuration provided with the PPS GDE Processflow. For each
+ * configuration, the Driver needs to know where the starting offset
+ * of the Coefficient change, Total Size of Coefficients being affected,
+ * and the Instruction Sizes.
+ * This has to be replicated for both miniDSP_A and miniDSP_D
+ */
+struct multibyte_config {
+ reg_value *regs;
+ unsigned int d_coeff_start;
+ unsigned int d_coeff_size;
+ unsigned int d_inst_start;
+ unsigned int d_inst_size;
+ unsigned int a_coeff_start;
+ unsigned int a_coeff_size;
+ unsigned int a_inst_start;
+ unsigned int a_inst_size;
+} config_array[][2][MAX_CONFIG_ARRAYS] = {
+ /* Process flow 1 */
+ {
+ {
+ /* DAC */
+ {rock_D_reg_values, 0, 67, 67, 0, 0, 0, 0, 0},
+ {pop_D_reg_values, 0, 67, 67, 0, 0, 0, 0, 0},
+ {jazz_D_reg_values, 0, 67, 67, 0, 0, 0, 0, 0},
+ {main_D_reg_values, 0, 67, 67, 0, 0, 0, 0, 0},
+ },
+ /* ADC */
+ {},
+ },
+
+ /* Process flow 2 */
+ {
+#if 0
+ {
+ {main, 0, 0, 0, 0, 0, 0, 0, 0},
+ {pop, 0, 0, 0, 0, 0, 0, 0, 0},
+ {jazz, 0, 0, 0, 0, 0, 0, 0, 0},
+ {rock, 0, 0, 0, 0, 0, 0, 0, 0},
+ },
+ /* ADC */
+ {},
+#endif
+ },
+};
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : m_control_get
+ * Purpose : This function is to read data of new control for
+ * program the AIC3262 registers.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int m_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct aic3262_priv *aic326x = snd_soc_codec_get_drvdata(codec);
+ u32 val = 0;
+ u32 mode = aic326x->process_flow;
+
+
+ if (!strcmp(kcontrol->id.name, "Multiconfig support for DAC"))
+ val = aic326x->current_dac_config[mode];
+ else if (!strcmp(kcontrol->id.name, "Multiconfig support for ADC"))
+ val = aic326x->current_adc_config[mode];
+
+
+ ucontrol->value.integer.value[0] = val;
+ return 0;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ * Function : m_new_control_put
+ * Purpose : new_control_put is called to pass data from user/application to
+ * the driver.
+ *
+ *----------------------------------------------------------------------------
+ */
+static int m_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct aic3262_priv *aic326x = snd_soc_codec_get_drvdata(codec);
+ u32 val;
+ u8 value;
+ int pf = aic326x->process_flow;
+ struct multibyte_config *array;
+
+ val = ucontrol->value.integer.value[0];
+ if (!strcmp(kcontrol->id.name, "Multiconfig support for DAC")) {
+ if (aic326x->process_flow == MINIDSP_DMODE) {
+ if (val > MAX_CONFIG_D_ARRAYS) {
+ dev_err(codec->dev, "Value not in range\n");
+ return -1;
+ }
+
+ value = aic3262_read(codec, ADC_CHANNEL_POW);
+ value = aic3262_read(codec, PASI_DAC_DP_SETUP);
+
+ array = &config_array[pf][MINIDSP_DMODE][val];
+ minidsp_i2c_multibyte_transfer(codec,
+ array->regs,
+ (array->d_inst_size + array->d_coeff_size));
+
+
+ /*coefficent buffer change*/
+ multibyte_coeff_change(codec, 0x50);
+
+ minidsp_i2c_multibyte_transfer(codec,
+ array->regs,
+ (array->d_inst_size + array->d_coeff_size));
+
+
+
+ value = aic3262_read(codec, ADC_CHANNEL_POW);
+ value = aic3262_read(codec, PASI_DAC_DP_SETUP);
+ }
+ aic326x->current_dac_config[pf] = val;
+
+ } else {
+ if (aic326x->process_flow == MINIDSP_AMODE) {
+ if (val > MAX_CONFIG_A_ARRAYS) {
+ dev_err(codec->dev, "Value not in range\n");
+ return -1;
+ }
+ array = &config_array[pf][MINIDSP_AMODE][val];
+ minidsp_i2c_multibyte_transfer(codec,
+ array->regs,
+ (array->a_inst_size + array->a_coeff_size));
+ }
+ aic326x->current_adc_config[pf] = val;
+ }
+ return val;
+}
+
+
+/*
+ *--------------------------------------------------------------------------
+ * Function : aic3262_add_multiconfig_controls
+ * Purpose : Configures the AMIXER Control Interfaces that can be exercised by
+ * the user at run-time. Utilizes the the snd_adaptive_controls[]
+ * array to specify two run-time controls.
+ *---------------------------------------------------------------------------
+ */
+int aic3262_add_multiconfig_controls(struct snd_soc_codec *codec)
+{
+ int i, err;
+
+ DBG(KERN_INFO
+ "#%s: Invoked to add controls for Multi-Configuration\n",
+ __func__);
+
+ /* add mode k control */
+ for (i = 0; i < ARRAY_SIZE(aic3262_minidsp_controls1); i++) {
+ err = snd_ctl_add(codec->card->snd_card,
+ snd_ctl_new1(&aic3262_minidsp_controls1[i],
+ codec));
+ if (err < 0) {
+ printk(KERN_ERR
+ "Cannot add controls for mulibyte configuration\n");
+ return err;
+ }
+ }
+ DBG(KERN_INFO "#%s: Completed control addition.\n", __func__);
+ return 0;
+}
+
+/*
+ *--------------------------------------------------------------------------
+ * Function : config_multibyte_for_mode
+ * Purpose : Function which is invoked when user changes the configuration
+ * at run-time. Internally configures/switches both
+ * miniDSP_D and miniDSP_A Coefficient arrays.
+ *---------------------------------------------------------------------------
+ */
+void config_multibyte_for_mode(struct snd_soc_codec *codec, int mode)
+{
+ int val;
+ int pf = mode;
+ struct aic3262_priv *aic326x = snd_soc_codec_get_drvdata(codec);
+ struct multibyte_config *array;
+
+ DBG(KERN_INFO "#%s: Invoked for miniDSP Mode %d\n", __func__, mode);
+
+ array = config_array[pf][MINIDSP_DMODE];
+ if ((aic326x->current_dac_config[pf] >= 0) &&
+ (aic326x->current_dac_config[pf] < MAX_CONFIG_ARRAYS)) {
+ val = aic326x->current_dac_config[pf];
+ array = &config_array[pf][MINIDSP_DMODE][val];
+ byte_i2c_array_transfer(codec,
+ array->regs,
+ (array->d_inst_size +
+ array->d_coeff_size));
+ } else {
+ DBG(KERN_INFO "#%s: Invalid Configuration ID %d specified.\n",
+ __func__, aic326x->current_dac_config[pf]);
+ }
+
+ array = config_array[pf][MINIDSP_AMODE];
+ if ((aic326x->current_adc_config[pf] >= 0) &&
+ (aic326x->current_adc_config[pf] < MAX_CONFIG_ARRAYS)) {
+ val = aic326x->current_adc_config[pf];
+ minidsp_i2c_multibyte_transfer(codec,
+ array[val].regs,
+ array[val].a_inst_size +
+ array[val].a_coeff_size);
+ } else {
+ DBG(KERN_INFO "#%s: Invalid Configuration ID %d specified.\n",
+ __func__, aic326x->current_dac_config[pf]);
+ }
+ return;
+}
+
+/*
+ *--------------------------------------------------------------------------
+ * Function : config_multibyte_for_mode
+ * Purpose : Function which is invoked when user changes the configuration
+ * at run-time. Internally configures/switches both
+ * miniDSP_D and miniDSP_A Coefficient arrays.
+ *---------------------------------------------------------------------------
+ */
+static int multibyte_coeff_change(struct snd_soc_codec *codec, int bk)
+{
+
+ u8 value[2], swap_reg_pre, swap_reg_post;
+ struct i2c_client *i2c;
+ i2c = codec->control_data;
+
+ aic3262_change_book(codec, bk);
+
+ value[0] = 1;
+
+ if (i2c_master_send(i2c, value, 1) != 1)
+ printk(KERN_ERR "Can not write register address\n");
+ else {
+ /* Read the Value of the Page 8 Register 1 which controls the
+ Adaptive Switching Mode */
+ if (i2c_master_recv(i2c, value, 1) != 1) {
+ printk(KERN_ERR "Can not read codec registers\n");
+ goto err;
+ }
+ swap_reg_pre = value[0];
+
+ /* Write the Register bit updates */
+ value[1] = value[0] | 1;
+ value[0] = 1;
+
+ if (i2c_master_send(i2c, value, 2) != 2) {
+ printk(KERN_ERR "Can not write register address\n");
+ goto err;
+ }
+ value[0] = 1;
+ /* verify buffer swap */
+ if (i2c_master_send(i2c, value, 1) != 1)
+ printk(KERN_ERR "Can not write register address\n");
+
+ /* Read the Value of the Page 8 Register 1 which controls the
+ Adaptive Switching Mode */
+ if (i2c_master_recv(i2c, &swap_reg_post, 1) != 1)
+ printk(KERN_ERR "Can not read codec registers\n");
+
+ if ((swap_reg_pre == 4 && swap_reg_post == 6)
+ || (swap_reg_pre == 6 && swap_reg_post == 4))
+ DBG(KERN_INFO "Buffer swap success\n");
+ else
+ printk(KERN_ERR
+ "Buffer swap...FAILED\nswap_reg_pre=%x, swap_reg_post=%x\n",
+ swap_reg_pre, swap_reg_post);
+ }
+
+err:
+ return 0;
+}
+
+#endif
+
+MODULE_DESCRIPTION("ASoC AIC3262 miniDSP multi-configuration");
+MODULE_AUTHOR("Barani Prashanth <gvbarani@mistralsolutions.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 66d18a3e57f6..c822e119555f 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -48,6 +48,7 @@
#include <sound/initval.h>
#include <sound/tlv.h>
#include <asm/div64.h>
+#include <sound/jack.h>
#include "wm8753.h"
@@ -94,6 +95,9 @@ struct wm8753_priv {
unsigned int hifi_fmt;
int dai_func;
+ int irq;
+ struct snd_soc_jack *headset_jack;
+ unsigned int debounce_time_hp;
};
#define wm8753_reset(c) snd_soc_write(c, WM8753_RESET, 0)
@@ -1393,13 +1397,18 @@ static void wm8753_work(struct work_struct *work)
static int wm8753_suspend(struct snd_soc_codec *codec, pm_message_t state)
{
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
+
wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ disable_irq(wm8753->irq);
+
return 0;
}
static int wm8753_resume(struct snd_soc_codec *codec)
{
u16 *reg_cache = codec->reg_cache;
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
int i;
/* Sync reg_cache with the hardware */
@@ -1416,6 +1425,8 @@ static int wm8753_resume(struct snd_soc_codec *codec)
wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ enable_irq(wm8753->irq);
+
/* charge wm8753 caps */
if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
@@ -1427,6 +1438,70 @@ static int wm8753_resume(struct snd_soc_codec *codec)
return 0;
}
+static irqreturn_t wm8753_jack_handler(int irq, void *data)
+{
+ struct snd_soc_codec *codec = data;
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+
+ /* GPIO4 interrupt disable */
+ snd_soc_update_bits(codec, WM8753_INTEN, WM8753_GPIO4IEN_MASK,
+ WM8753_GPIO4IEN_DIS);
+
+ /* sleep for debounce time */
+ msleep(wm8753->debounce_time_hp);
+
+ /* Invert GPIO4 interrupt polarity */
+ value = snd_soc_read(codec, WM8753_INTPOL);
+ if (value & WM8753_GPIO4IPOL_LOW) {
+ snd_soc_jack_report(wm8753->headset_jack, SND_JACK_HEADPHONE,
+ SND_JACK_HEADPHONE);
+ /* interupt when high i.e Headphone disconnected */
+ snd_soc_update_bits(codec, WM8753_INTPOL, WM8753_GPIO4IPOL_MASK,
+ WM8753_GPIO4IPOL_HIGH);
+ } else {
+ snd_soc_jack_report(wm8753->headset_jack, 0,
+ SND_JACK_HEADPHONE);
+ /* interupt when low i.e Headphone connected */
+ snd_soc_update_bits(codec, WM8753_INTPOL, WM8753_GPIO4IPOL_MASK,
+ WM8753_GPIO4IPOL_LOW);
+ }
+
+ /* GPIO4 interrupt enable */
+ snd_soc_update_bits(codec, WM8753_INTEN, WM8753_GPIO4IEN_MASK,
+ WM8753_GPIO4IEN_EN);
+
+ return IRQ_HANDLED;
+}
+
+int wm8753_headphone_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, enum snd_jack_types type,
+ unsigned int debounce_time_hp)
+{
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
+
+ wm8753->headset_jack = jack;
+ wm8753->debounce_time_hp = debounce_time_hp;
+
+ if (wm8753->irq && (type & SND_JACK_HEADPHONE)) {
+ /* Configure GPIO2 pin to generate the interrupt */
+ snd_soc_update_bits(codec, WM8753_GPIO2, WM8753_GP2M_MASK,
+ WM8753_GP2M_INT);
+ /* Active low Interrupt */
+ snd_soc_update_bits(codec, WM8753_GPIO1, WM8753_INTCON_MASK,
+ WM8753_INTCON_AL);
+ /* interupt when low i.e Headphone connected */
+ snd_soc_update_bits(codec, WM8753_INTPOL, WM8753_GPIO4IPOL_MASK,
+ WM8753_GPIO4IPOL_LOW);
+ /* GPIO4 interrupt enable */
+ snd_soc_update_bits(codec, WM8753_INTEN, WM8753_GPIO4IEN_MASK,
+ WM8753_GPIO4IEN_EN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8753_headphone_detect);
+
static int wm8753_probe(struct snd_soc_codec *codec)
{
struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
@@ -1446,6 +1521,19 @@ static int wm8753_probe(struct snd_soc_codec *codec)
return ret;
}
+ if (wm8753->irq) {
+ /* register an audio interrupt */
+ ret = request_threaded_irq(wm8753->irq, NULL,
+ wm8753_jack_handler,
+ IRQF_TRIGGER_FALLING,
+ "wm8753", codec);
+
+ if (ret) {
+ dev_err(codec->dev, "Failed to request IRQ: %d\n", ret);
+ return ret;
+ }
+ }
+
wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
wm8753->dai_func = 0;
@@ -1543,6 +1631,7 @@ static __devinit int wm8753_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8753);
wm8753->control_type = SND_SOC_I2C;
+ wm8753->irq = i2c->irq;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8753, wm8753_dai, ARRAY_SIZE(wm8753_dai));
diff --git a/sound/soc/codecs/wm8753.h b/sound/soc/codecs/wm8753.h
index 94edac144bcb..550af291d3fb 100644
--- a/sound/soc/codecs/wm8753.h
+++ b/sound/soc/codecs/wm8753.h
@@ -112,7 +112,28 @@
#define WM8753_VXCLK_DIV_8 (3 << 6)
#define WM8753_VXCLK_DIV_16 (4 << 6)
-#define WM8753_DAI_HIFI 0
-#define WM8753_DAI_VOICE 1
+/* GPIO Control 2 */
+#define WM8753_GP2M_MASK (7 << 3)
+#define WM8753_GP2M_INT (3 << 3)
+/* GPIO Control 1 */
+#define WM8753_INTCON_MASK (3 << 7)
+#define WM8753_INTCON_AL (3 << 7)
+
+/* Interrupt Polarity*/
+#define WM8753_GPIO4IPOL_MASK (1 << 4)
+#define WM8753_GPIO4IPOL_LOW (1 << 4)
+#define WM8753_GPIO4IPOL_HIGH (0 << 4)
+
+/* Interrupt Enable*/
+#define WM8753_GPIO4IEN_MASK (1 << 4)
+#define WM8753_GPIO4IEN_EN (1 << 4)
+#define WM8753_GPIO4IEN_DIS (0 << 4)
+
+#define WM8753_DAI_HIFI 0
+#define WM8753_DAI_VOICE 1
+
+int wm8753_headphone_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, enum snd_jack_types type,
+ unsigned int debounce_time_hp);
#endif
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 4ad8ebd290e3..4f418fc2c324 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1761,6 +1761,11 @@ static struct snd_soc_dai_driver wm8903_dai = {
static int wm8903_suspend(struct snd_soc_codec *codec, pm_message_t state)
{
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+
+ if (wm8903->irq)
+ disable_irq(wm8903->irq);
+
wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
@@ -1768,11 +1773,15 @@ static int wm8903_suspend(struct snd_soc_codec *codec, pm_message_t state)
static int wm8903_resume(struct snd_soc_codec *codec)
{
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
int i;
u16 *reg_cache = codec->reg_cache;
u16 *tmp_cache = kmemdup(reg_cache, sizeof(wm8903_reg_defaults),
GFP_KERNEL);
+ if (wm8903->irq)
+ enable_irq(wm8903->irq);
+
/* Bring the codec back up to standby first to minimise pop/clicks */
wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index c6af1fd707f5..b1f6bf4ce99a 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -1,23 +1,56 @@
config SND_SOC_TEGRA
tristate "SoC Audio for the Tegra System-on-Chip"
depends on ARCH_TEGRA && TEGRA_SYSTEM_DMA
+ depends on !TEGRA_LEGACY_AUDIO
help
Say Y or M here if you want support for SoC audio on Tegra.
-config SND_SOC_TEGRA_I2S
- tristate
- depends on SND_SOC_TEGRA
+config SND_SOC_TEGRA20_DAS
+ tristate "Tegra 20 Digital Audio Switch driver"
+ depends on SND_SOC_TEGRA && ARCH_TEGRA_2x_SOC
+
+config SND_SOC_TEGRA20_I2S
+ tristate "Tegra 20 I2S driver"
+ depends on SND_SOC_TEGRA && ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA20_DAS
+ help
+ Say Y or M if you want to add support for codecs attached to the
+ Tegra I2S interface. You will also need to select the individual
+ machine drivers to support below.
+
+config SND_SOC_TEGRA30_AHUB
+ tristate "Tegra 30 Audio Hub driver"
+ depends on SND_SOC_TEGRA && ARCH_TEGRA_3x_SOC
+
+config SND_SOC_TEGRA30_DAM
+ tristate "Tegra 30 Audio Dam driver"
+ depends on SND_SOC_TEGRA && ARCH_TEGRA_3x_SOC
+ select SND_SOC_TEGRA30_AHUB
+
+config SND_SOC_TEGRA30_I2S
+ tristate "Tegra 30 I2S driver"
+ depends on SND_SOC_TEGRA && ARCH_TEGRA_3x_SOC
+ select SND_SOC_TEGRA30_AHUB
help
Say Y or M if you want to add support for codecs attached to the
Tegra I2S interface. You will also need to select the individual
machine drivers to support below.
-config SND_SOC_TEGRA_SPDIF
+config SND_SOC_TEGRA20_SPDIF
tristate
- depends on SND_SOC_TEGRA
+ depends on SND_SOC_TEGRA && ARCH_TEGRA_2x_SOC && TEGRA_DC
default m
help
- Say Y or M if you want to add support for the SPDIF interface.
+ Say Y or M if you want to add support for the TEGRA20 SPDIF interface.
+ You will also need to select the individual machine drivers to support
+ below.
+
+config SND_SOC_TEGRA30_SPDIF
+ tristate
+ depends on SND_SOC_TEGRA && ARCH_TEGRA_3x_SOC && TEGRA_DC
+ select SND_SOC_TEGRA30_AHUB
+ help
+ Say Y or M if you want to add support for the TEGRA30 SPDIF interface.
You will also need to select the individual machine drivers to support
below.
@@ -30,10 +63,15 @@ config MACH_HAS_SND_SOC_TEGRA_WM8903
config SND_SOC_TEGRA_WM8903
tristate "SoC Audio support for Tegra boards using a WM8903 codec"
- depends on SND_SOC_TEGRA && I2C
+ depends on SND_SOC_TEGRA && I2C && TEGRA_DC
depends on MACH_HAS_SND_SOC_TEGRA_WM8903
- select SND_SOC_TEGRA_I2S
+ select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
+ select SND_SOC_TEGRA20_SPDIF if ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_SPDIF if ARCH_TEGRA_3x_SOC
select SND_SOC_WM8903
+ select SND_SOC_SPDIF
+ select SND_SOC_TEGRA30_DAM if ARCH_TEGRA_3x_SOC
help
Say Y or M here if you want to add support for SoC audio on Tegra
boards using the WM8093 codec. Currently, the supported boards are
@@ -47,3 +85,68 @@ config SND_SOC_TEGRA_TRIMSLICE
help
Say Y or M here if you want to add support for SoC audio on the
TrimSlice platform.
+
+config MACH_HAS_SND_SOC_TEGRA_WM8753
+ bool
+ help
+ Machines that use the SND_SOC_TEGRA_WM8753 driver should select
+ this config option, in order to allow the user to enable
+ SND_SOC_TEGRA_WM8753.
+
+config SND_SOC_TEGRA_WM8753
+ tristate "SoC Audio support for Tegra boards using a WM8753 codec"
+ depends on SND_SOC_TEGRA && I2C && TEGRA_DC
+ depends on MACH_HAS_SND_SOC_TEGRA_WM8753
+ select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
+ select SND_SOC_TEGRA20_SPDIF if ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_SPDIF if ARCH_TEGRA_3x_SOC
+ select SND_SOC_WM8753
+ select SND_SOC_SPDIF
+ help
+ Say Y or M here if you want to add support for SoC audio on Tegra
+ boards using the WM8753 codec. Currently, only supported board is
+ Whistler.
+
+config MACH_HAS_SND_SOC_TEGRA_MAX98088
+ bool
+ help
+ Machines that use the SND_SOC_TEGRA_MAX98088 driver should select
+ this config option, in order to allow the user to enable
+ SND_SOC_TEGRA_MAX98088.
+
+config SND_SOC_TEGRA_MAX98088
+ tristate "SoC Audio support for Tegra boards using a MAX98088 codec"
+ depends on SND_SOC_TEGRA && I2C && TEGRA_DC
+ depends on MACH_HAS_SND_SOC_TEGRA_MAX98088
+ select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
+ select SND_SOC_TEGRA30_SPDIF if ARCH_TEGRA_3x_SOC
+ select SND_SOC_MAX98088
+ select SND_SOC_SPDIF
+ select SND_SOC_TEGRA30_DAM if ARCH_TEGRA_3x_SOC
+ help
+ Say Y or M here if you want to add support for SoC audio on Tegra
+ boards using the MAX98088 codec. Currently, only supported board is
+ Enterprise.
+
+config MACH_HAS_SND_SOC_TEGRA_TLV320AIC326X
+ bool
+ help
+ Machines that use the SND_SOC_TEGRA_TLV320AIC326X driver should select
+ this config option, in order to allow the user to enable
+ SND_SOC_TEGRA_TLV320AIC326X.
+
+config SND_SOC_TEGRA_TLV320AIC326X
+ tristate "SoC Audio support for Tegra boards using a TI AIC326x codec"
+ depends on SND_SOC_TEGRA && I2C && TEGRA_DC
+ depends on MACH_HAS_SND_SOC_TEGRA_TLV320AIC326X
+ select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
+ select SND_SOC_TEGRA30_SPDIF if ARCH_TEGRA_3x_SOC
+ select SND_SOC_TLV320AIC326X
+ select SND_SOC_SPDIF
+ select SND_SOC_TEGRA30_DAM if ARCH_TEGRA_3x_SOC
+ help
+ Say Y or M here if you want to add support for SoC audio on Tegra
+ boards using the TI AIC326X codec.
diff --git a/sound/soc/tegra/Makefile b/sound/soc/tegra/Makefile
index 4d943b3fe150..821e9b06a6f5 100644
--- a/sound/soc/tegra/Makefile
+++ b/sound/soc/tegra/Makefile
@@ -1,19 +1,35 @@
+GCOV_PROFILE := y
+
# Tegra platform Support
-snd-soc-tegra-das-objs := tegra_das.o
snd-soc-tegra-pcm-objs := tegra_pcm.o
-snd-soc-tegra-i2s-objs := tegra_i2s.o
-snd-soc-tegra-spdif-objs := tegra_spdif.o
+snd-soc-tegra20-spdif-objs := tegra20_spdif.o
snd-soc-tegra-utils-objs += tegra_asoc_utils.o
+snd-soc-tegra20-das-objs := tegra20_das.o
+snd-soc-tegra20-i2s-objs := tegra20_i2s.o
+snd-soc-tegra30-ahub-objs := tegra30_ahub.o
+snd-soc-tegra30-i2s-objs := tegra30_i2s.o
+snd-soc-tegra30-spdif-objs := tegra30_spdif.o
+snd-soc-tegra30-dam-objs := tegra30_dam.o
-obj-$(CONFIG_SND_SOC_TEGRA) += snd-soc-tegra-utils.o
-obj-$(CONFIG_SND_SOC_TEGRA) += snd-soc-tegra-das.o
obj-$(CONFIG_SND_SOC_TEGRA) += snd-soc-tegra-pcm.o
-obj-$(CONFIG_SND_SOC_TEGRA_I2S) += snd-soc-tegra-i2s.o
-obj-$(CONFIG_SND_SOC_TEGRA_SPDIF) += snd-soc-tegra-spdif.o
+obj-$(CONFIG_SND_SOC_TEGRA) += snd-soc-tegra-utils.o
+obj-$(CONFIG_SND_SOC_TEGRA20_DAS) += snd-soc-tegra20-das.o
+obj-$(CONFIG_SND_SOC_TEGRA20_I2S) += snd-soc-tegra20-i2s.o
+obj-$(CONFIG_SND_SOC_TEGRA30_AHUB) += snd-soc-tegra30-ahub.o
+obj-$(CONFIG_SND_SOC_TEGRA30_DAM) += snd-soc-tegra30-dam.o
+obj-$(CONFIG_SND_SOC_TEGRA30_I2S) += snd-soc-tegra30-i2s.o
+obj-$(CONFIG_SND_SOC_TEGRA20_SPDIF) += snd-soc-tegra20-spdif.o
+obj-$(CONFIG_SND_SOC_TEGRA30_SPDIF) += snd-soc-tegra30-spdif.o
# Tegra machine Support
snd-soc-tegra-wm8903-objs := tegra_wm8903.o
snd-soc-tegra-trimslice-objs := trimslice.o
+snd-soc-tegra-wm8753-objs := tegra_wm8753.o
+snd-soc-tegra-max98088-objs := tegra_max98088.o
+snd-soc-tegra-aic326x-objs := tegra_aic326x.o
obj-$(CONFIG_SND_SOC_TEGRA_WM8903) += snd-soc-tegra-wm8903.o
obj-$(CONFIG_SND_SOC_TEGRA_TRIMSLICE) += snd-soc-tegra-trimslice.o
+obj-$(CONFIG_SND_SOC_TEGRA_WM8753) += snd-soc-tegra-wm8753.o
+obj-$(CONFIG_SND_SOC_TEGRA_MAX98088) += snd-soc-tegra-max98088.o
+obj-$(CONFIG_SND_SOC_TEGRA_TLV320AIC326X) += snd-soc-tegra-aic326x.o
diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c
new file mode 100644
index 000000000000..29ce3166052c
--- /dev/null
+++ b/sound/soc/tegra/tegra20_das.c
@@ -0,0 +1,301 @@
+/*
+ * tegra20_das.c - Tegra20 DAS driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <sound/soc.h>
+#include "tegra20_das.h"
+
+#define DRV_NAME "tegra20-das"
+
+static struct tegra20_das *das;
+
+static inline void tegra20_das_write(u32 reg, u32 val)
+{
+#ifdef CONFIG_PM
+ das->reg_cache[reg >> 2] = val;
+#endif
+ __raw_writel(val, das->regs + reg);
+}
+
+static inline u32 tegra20_das_read(u32 reg)
+{
+ return __raw_readl(das->regs + reg);
+}
+
+#ifdef CONFIG_PM
+int tegra20_das_resume()
+{
+ int i, reg;
+
+ for (i = 0; i <= TEGRA20_DAS_DAP_ID_5; i++)
+ tegra20_das_write(i << 2, das->reg_cache[i]);
+
+ for (i = 0; i <= TEGRA20_DAS_DAC_ID_3; i++) {
+ reg = TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL +
+ (i * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
+ tegra20_das_write(reg, das->reg_cache[reg >> 2]);
+ }
+
+ return 0;
+}
+#endif
+
+int tegra20_das_connect_dap_to_dac(int dap, int dac)
+{
+ u32 addr;
+ u32 reg;
+
+ if (!das)
+ return -ENODEV;
+
+ addr = TEGRA20_DAS_DAP_CTRL_SEL +
+ (dap * TEGRA20_DAS_DAP_CTRL_SEL_STRIDE);
+ reg = dac << TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P;
+
+ tegra20_das_write(addr, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra20_das_connect_dap_to_dac);
+
+int tegra20_das_connect_dap_to_dap(int dap, int otherdap, int master,
+ int sdata1rx, int sdata2rx)
+{
+ u32 addr;
+ u32 reg;
+
+ if (!das)
+ return -ENODEV;
+
+ addr = TEGRA20_DAS_DAP_CTRL_SEL +
+ (dap * TEGRA20_DAS_DAP_CTRL_SEL_STRIDE);
+ reg = otherdap << TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P |
+ !!sdata2rx << TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P |
+ !!sdata1rx << TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P |
+ !!master << TEGRA20_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P;
+
+ tegra20_das_write(addr, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra20_das_connect_dap_to_dap);
+
+int tegra20_das_connect_dac_to_dap(int dac, int dap)
+{
+ u32 addr;
+ u32 reg;
+
+ if (!das)
+ return -ENODEV;
+
+ addr = TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL +
+ (dac * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
+ reg = dap << TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P |
+ dap << TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P |
+ dap << TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P;
+
+ tegra20_das_write(addr, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra20_das_connect_dac_to_dap);
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra20_das_show(struct seq_file *s, void *unused)
+{
+ int i;
+ u32 addr;
+ u32 reg;
+
+ for (i = 0; i < TEGRA20_DAS_DAP_CTRL_SEL_COUNT; i++) {
+ addr = TEGRA20_DAS_DAP_CTRL_SEL +
+ (i * TEGRA20_DAS_DAP_CTRL_SEL_STRIDE);
+ reg = tegra20_das_read(addr);
+ seq_printf(s, "TEGRA20_DAS_DAP_CTRL_SEL[%d] = %08x\n", i, reg);
+ }
+
+ for (i = 0; i < TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_COUNT; i++) {
+ addr = TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL +
+ (i * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
+ reg = tegra20_das_read(addr);
+ seq_printf(s, "TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL[%d] = %08x\n",
+ i, reg);
+ }
+
+ return 0;
+}
+
+static int tegra20_das_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra20_das_show, inode->i_private);
+}
+
+static const struct file_operations tegra20_das_debug_fops = {
+ .open = tegra20_das_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra20_das_debug_add(struct tegra20_das *das)
+{
+ das->debug = debugfs_create_file(DRV_NAME, S_IRUGO,
+ snd_soc_debugfs_root, das,
+ &tegra20_das_debug_fops);
+}
+
+static void tegra20_das_debug_remove(struct tegra20_das *das)
+{
+ if (das->debug)
+ debugfs_remove(das->debug);
+}
+#else
+static inline void tegra20_das_debug_add(struct tegra20_das *das)
+{
+}
+
+static inline void tegra20_das_debug_remove(struct tegra20_das *das)
+{
+}
+#endif
+
+static int __devinit tegra20_das_probe(struct platform_device *pdev)
+{
+ struct resource *res, *region;
+ int ret = 0;
+#ifdef CONFIG_PM
+ int i, reg;
+#endif
+
+ if (das)
+ return -ENODEV;
+
+ das = kzalloc(sizeof(struct tegra20_das), GFP_KERNEL);
+ if (!das) {
+ dev_err(&pdev->dev, "Can't allocate tegra20_das\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ das->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ region = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (!region) {
+ dev_err(&pdev->dev, "Memory region already claimed\n");
+ ret = -EBUSY;
+ goto err_free;
+ }
+
+ das->regs = ioremap(res->start, resource_size(res));
+ if (!das->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+#ifdef CONFIG_PM
+ /* populate the das reg cache with POR values*/
+ for (i = 0; i <= TEGRA20_DAS_DAP_ID_5; i++)
+ das->reg_cache[i] = tegra20_das_read(i << 2);
+
+ for (i = 0; i <= TEGRA20_DAS_DAC_ID_3; i++) {
+ reg = TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL +
+ (i * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
+ das->reg_cache[reg >> 2] = tegra20_das_read(reg);
+ }
+#endif
+
+ tegra20_das_debug_add(das);
+
+ platform_set_drvdata(pdev, das);
+
+ return 0;
+
+err_release:
+ release_mem_region(res->start, resource_size(res));
+err_free:
+ kfree(das);
+ das = 0;
+exit:
+ return ret;
+}
+
+static int __devexit tegra20_das_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ if (!das)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, NULL);
+
+ tegra20_das_debug_remove(das);
+
+ iounmap(das->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(das);
+ das = 0;
+
+ return 0;
+}
+
+static struct platform_driver tegra20_das_driver = {
+ .probe = tegra20_das_probe,
+ .remove = __devexit_p(tegra20_das_remove),
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init tegra20_das_modinit(void)
+{
+ return platform_driver_register(&tegra20_das_driver);
+}
+module_init(tegra20_das_modinit);
+
+static void __exit tegra20_das_modexit(void)
+{
+ platform_driver_unregister(&tegra20_das_driver);
+}
+module_exit(tegra20_das_modexit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra DAS driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra20_das.h b/sound/soc/tegra/tegra20_das.h
new file mode 100644
index 000000000000..1d7c57fd0092
--- /dev/null
+++ b/sound/soc/tegra/tegra20_das.h
@@ -0,0 +1,146 @@
+/*
+ * tegra20_das.h - Definitions for Tegra20 DAS driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA20_DAS_H__
+#define __TEGRA20_DAS_H__
+
+/* Register TEGRA20_DAS_DAP_CTRL_SEL */
+#define TEGRA20_DAS_DAP_CTRL_SEL 0x00
+#define TEGRA20_DAS_DAP_CTRL_SEL_COUNT 5
+#define TEGRA20_DAS_DAP_CTRL_SEL_STRIDE 4
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P 31
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_MS_SEL_S 1
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P 30
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_S 1
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P 29
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_S 1
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P 0
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_S 5
+
+/* Values for field TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL */
+#define TEGRA20_DAS_DAP_SEL_DAC1 0
+#define TEGRA20_DAS_DAP_SEL_DAC2 1
+#define TEGRA20_DAS_DAP_SEL_DAC3 2
+#define TEGRA20_DAS_DAP_SEL_DAP1 16
+#define TEGRA20_DAS_DAP_SEL_DAP2 17
+#define TEGRA20_DAS_DAP_SEL_DAP3 18
+#define TEGRA20_DAS_DAP_SEL_DAP4 19
+#define TEGRA20_DAS_DAP_SEL_DAP5 20
+
+/* Register TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL */
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL 0x40
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_COUNT 3
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE 4
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P 28
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_S 4
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P 24
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_S 4
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P 0
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_S 4
+
+/*
+ * Values for:
+ * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL
+ * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL
+ * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL
+ */
+#define TEGRA20_DAS_DAC_SEL_DAP1 0
+#define TEGRA20_DAS_DAC_SEL_DAP2 1
+#define TEGRA20_DAS_DAC_SEL_DAP3 2
+#define TEGRA20_DAS_DAC_SEL_DAP4 3
+#define TEGRA20_DAS_DAC_SEL_DAP5 4
+
+/*
+ * Names/IDs of the DACs/DAPs.
+ */
+
+#define TEGRA20_DAS_DAP_ID_1 0
+#define TEGRA20_DAS_DAP_ID_2 1
+#define TEGRA20_DAS_DAP_ID_3 2
+#define TEGRA20_DAS_DAP_ID_4 3
+#define TEGRA20_DAS_DAP_ID_5 4
+
+#define TEGRA20_DAS_DAC_ID_1 0
+#define TEGRA20_DAS_DAC_ID_2 1
+#define TEGRA20_DAS_DAC_ID_3 2
+
+#ifdef CONFIG_PM
+#define TEGRA20_DAS_CACHE_SIZE ((((TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL) + (TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE*TEGRA20_DAS_DAC_ID_3))>>2) + 1)
+#endif
+
+struct tegra20_das {
+ struct device *dev;
+ void __iomem *regs;
+ struct dentry *debug;
+#ifdef CONFIG_PM
+ u32 reg_cache[TEGRA20_DAS_CACHE_SIZE];
+#endif
+};
+
+#ifdef CONFIG_PM
+/* Restores the das registers from cache */
+extern int tegra20_das_resume();
+#endif
+/*
+ * Terminology:
+ * DAS: Digital audio switch (HW module controlled by this driver)
+ * DAP: Digital audio port (port/pins on Tegra device)
+ * DAC: Digital audio controller (e.g. I2S or AC97 controller elsewhere)
+ *
+ * The Tegra DAS is a mux/cross-bar which can connect each DAP to a specific
+ * DAC, or another DAP. When DAPs are connected, one must be the master and
+ * one the slave. Each DAC allows selection of a specific DAP for input, to
+ * cater for the case where N DAPs are connected to 1 DAC for broadcast
+ * output.
+ *
+ * This driver is dumb; no attempt is made to ensure that a valid routing
+ * configuration is programmed.
+ */
+
+/*
+ * Connect a DAP to to a DAC
+ * dap_id: DAP to connect: TEGRA20_DAS_DAP_ID_*
+ * dac_sel: DAC to connect to: TEGRA20_DAS_DAP_SEL_DAC*
+ */
+extern int tegra20_das_connect_dap_to_dac(int dap_id, int dac_sel);
+
+/*
+ * Connect a DAP to to another DAP
+ * dap_id: DAP to connect: TEGRA20_DAS_DAP_ID_*
+ * other_dap_sel: DAP to connect to: TEGRA20_DAS_DAP_SEL_DAP*
+ * master: Is this DAP the master (1) or slave (0)
+ * sdata1rx: Is this DAP's SDATA1 pin RX (1) or TX (0)
+ * sdata2rx: Is this DAP's SDATA2 pin RX (1) or TX (0)
+ */
+extern int tegra20_das_connect_dap_to_dap(int dap_id, int other_dap_sel,
+ int master, int sdata1rx,
+ int sdata2rx);
+
+/*
+ * Connect a DAC's input to a DAP
+ * (DAC outputs are selected by the DAP)
+ * dac_id: DAC ID to connect: TEGRA20_DAS_DAC_ID_*
+ * dap_sel: DAP to receive input from: TEGRA20_DAS_DAC_SEL_DAP*
+ */
+extern int tegra20_das_connect_dac_to_dap(int dac_id, int dap_sel);
+
+#endif
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
new file mode 100644
index 000000000000..c8682302b4aa
--- /dev/null
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -0,0 +1,576 @@
+/*
+ * tegra20_i2s.c - Tegra20 I2S driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra20_das.h"
+#include "tegra20_i2s.h"
+
+#define DRV_NAME "tegra20-i2s"
+
+static inline void tegra20_i2s_write(struct tegra20_i2s *i2s, u32 reg, u32 val)
+{
+#ifdef CONFIG_PM
+ i2s->reg_cache[reg >> 2] = val;
+#endif
+ __raw_writel(val, i2s->regs + reg);
+}
+
+static inline u32 tegra20_i2s_read(struct tegra20_i2s *i2s, u32 reg)
+{
+ return __raw_readl(i2s->regs + reg);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra20_i2s_show(struct seq_file *s, void *unused)
+{
+#define REG(r) { r, #r }
+ static const struct {
+ int offset;
+ const char *name;
+ } regs[] = {
+ REG(TEGRA20_I2S_CTRL),
+ REG(TEGRA20_I2S_STATUS),
+ REG(TEGRA20_I2S_TIMING),
+ REG(TEGRA20_I2S_FIFO_SCR),
+ REG(TEGRA20_I2S_PCM_CTRL),
+ REG(TEGRA20_I2S_NW_CTRL),
+ REG(TEGRA20_I2S_TDM_CTRL),
+ REG(TEGRA20_I2S_TDM_TX_RX_CTRL),
+ };
+#undef REG
+
+ struct tegra20_i2s *i2s = s->private;
+ int i;
+
+ clk_enable(i2s->clk_i2s);
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ u32 val = tegra20_i2s_read(i2s, regs[i].offset);
+ seq_printf(s, "%s = %08x\n", regs[i].name, val);
+ }
+
+ clk_disable(i2s->clk_i2s);
+
+ return 0;
+}
+
+static int tegra20_i2s_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra20_i2s_show, inode->i_private);
+}
+
+static const struct file_operations tegra20_i2s_debug_fops = {
+ .open = tegra20_i2s_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra20_i2s_debug_add(struct tegra20_i2s *i2s, int id)
+{
+ char name[] = DRV_NAME ".0";
+
+ snprintf(name, sizeof(name), DRV_NAME".%1d", id);
+ i2s->debug = debugfs_create_file(name, S_IRUGO, snd_soc_debugfs_root,
+ i2s, &tegra20_i2s_debug_fops);
+}
+
+static void tegra20_i2s_debug_remove(struct tegra20_i2s *i2s)
+{
+ if (i2s->debug)
+ debugfs_remove(i2s->debug);
+}
+#else
+static inline void tegra20_i2s_debug_add(struct tegra20_i2s *i2s, int id)
+{
+}
+
+static inline void tegra20_i2s_debug_remove(struct tegra20_i2s *i2s)
+{
+}
+#endif
+
+static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2s->reg_ctrl &= ~(TEGRA20_I2S_CTRL_BIT_FORMAT_MASK |
+ TEGRA20_I2S_CTRL_LRCK_MASK);
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_DSP;
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_DSP;
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_R_LOW;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_I2S;
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_RJM;
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_LJM;
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra20_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct device *dev = substream->pcm->card->dev;
+ struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ u32 reg;
+ int ret, sample_size, srate, i2sclock, bitcnt, i2sclk_div;
+
+ if ((i2s->reg_ctrl & TEGRA20_I2S_CTRL_BIT_FORMAT_I2S) &&
+ (params_channels(params) != 2)) {
+ dev_err(dev, "Only Stereo is supported in I2s mode\n");
+ return -EINVAL;
+ }
+
+ i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_BIT_SIZE_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_SIZE_16;
+ sample_size = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_SIZE_24;
+ sample_size = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_SIZE_32;
+ sample_size = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+
+ /* Final "* 2" required by Tegra hardware */
+ i2sclock = srate * params_channels(params) * sample_size * 2;
+
+ /* Additional "* 2" is needed for DSP mode */
+ if (i2s->reg_ctrl & TEGRA20_I2S_CTRL_BIT_FORMAT_DSP)
+ i2sclock *= 2;
+
+ ret = clk_set_rate(i2s->clk_i2s, i2sclock);
+ if (ret) {
+ dev_err(dev, "Can't set I2S clock rate: %d\n", ret);
+ return ret;
+ }
+
+ if (i2s->reg_ctrl & TEGRA20_I2S_CTRL_BIT_FORMAT_DSP)
+ i2sclk_div = srate;
+ else
+ i2sclk_div = params_channels(params) * srate;
+
+ bitcnt = (i2sclock / i2sclk_div) - 1;
+
+ if (bitcnt < 0 || bitcnt > TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US)
+ return -EINVAL;
+ reg = bitcnt << TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
+
+ if (i2sclock % i2sclk_div)
+ reg |= TEGRA20_I2S_TIMING_NON_SYM_ENABLE;
+
+ clk_enable(i2s->clk_i2s);
+
+ tegra20_i2s_write(i2s, TEGRA20_I2S_TIMING, reg);
+
+ tegra20_i2s_write(i2s, TEGRA20_I2S_FIFO_SCR,
+ TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS |
+ TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS);
+
+ i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_FIFO_FORMAT_MASK;
+ reg = tegra20_i2s_read(i2s, TEGRA20_I2S_PCM_CTRL);
+ if (i2s->reg_ctrl & TEGRA20_I2S_CTRL_BIT_FORMAT_DSP) {
+ if (sample_size == 16)
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_FIFO_FORMAT_16_LSB;
+ else if (sample_size == 24)
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_FIFO_FORMAT_24_LSB;
+ else
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_FIFO_FORMAT_32;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ reg |= TEGRA20_I2S_PCM_CTRL_TRM_MODE_EN;
+ else
+ reg |= TEGRA20_I2S_PCM_CTRL_RCV_MODE_EN;
+ } else {
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_FIFO_FORMAT_PACKED;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ reg &= ~TEGRA20_I2S_PCM_CTRL_TRM_MODE_EN;
+ else
+ reg &= ~TEGRA20_I2S_PCM_CTRL_RCV_MODE_EN;
+ }
+ tegra20_i2s_write(i2s, TEGRA20_I2S_PCM_CTRL, reg);
+
+ clk_disable(i2s->clk_i2s);
+
+ return 0;
+}
+
+static void tegra20_i2s_start_playback(struct tegra20_i2s *i2s)
+{
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_FIFO1_ENABLE;
+ tegra20_i2s_write(i2s, TEGRA20_I2S_CTRL, i2s->reg_ctrl);
+}
+
+static void tegra20_i2s_stop_playback(struct tegra20_i2s *i2s)
+{
+ i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_FIFO1_ENABLE;
+ tegra20_i2s_write(i2s, TEGRA20_I2S_CTRL, i2s->reg_ctrl);
+}
+
+static void tegra20_i2s_start_capture(struct tegra20_i2s *i2s)
+{
+ i2s->reg_ctrl |= TEGRA20_I2S_CTRL_FIFO2_ENABLE;
+ tegra20_i2s_write(i2s, TEGRA20_I2S_CTRL, i2s->reg_ctrl);
+}
+
+static void tegra20_i2s_stop_capture(struct tegra20_i2s *i2s)
+{
+ i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_FIFO2_ENABLE;
+ tegra20_i2s_write(i2s, TEGRA20_I2S_CTRL, i2s->reg_ctrl);
+}
+
+static int tegra20_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ clk_enable(i2s->clk_i2s);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra20_i2s_start_playback(i2s);
+ else
+ tegra20_i2s_start_capture(i2s);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra20_i2s_stop_playback(i2s);
+ else
+ tegra20_i2s_stop_capture(i2s);
+ clk_disable(i2s->clk_i2s);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra20_i2s_probe(struct snd_soc_dai *dai)
+{
+ struct tegra20_i2s * i2s = snd_soc_dai_get_drvdata(dai);
+#ifdef CONFIG_PM
+ int i;
+#endif
+
+ dai->capture_dma_data = &i2s->capture_dma_data;
+ dai->playback_dma_data = &i2s->playback_dma_data;
+
+#ifdef CONFIG_PM
+ /* populate the i2s reg cache with POR values*/
+ clk_enable(i2s->clk_i2s);
+
+ for (i = 0; i < ((TEGRA20_I2S_TDM_TX_RX_CTRL >> 2) + 1); i++) {
+ if ((i == TEGRA20_I2S_CACHE_RSVD_6) ||
+ (i == TEGRA20_I2S_CACHE_RSVD_7))
+ continue;
+
+ i2s->reg_cache[i] = tegra20_i2s_read(i2s, i << 2);
+ }
+
+ clk_disable(i2s->clk_i2s);
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int tegra20_i2s_resume(struct snd_soc_dai *cpu_dai)
+{
+ struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ int i;
+
+ clk_enable(i2s->clk_i2s);
+
+ /*restore the i2s regs*/
+ for (i = 0; i < ((TEGRA20_I2S_TDM_TX_RX_CTRL >> 2) + 1); i++) {
+ if ((i == TEGRA20_I2S_CACHE_RSVD_6) ||
+ (i == TEGRA20_I2S_CACHE_RSVD_7))
+ continue;
+
+ tegra20_i2s_write(i2s, i << 2, i2s->reg_cache[i]);
+ }
+
+ /*restore the das regs*/
+ tegra20_das_resume();
+
+ clk_disable(i2s->clk_i2s);
+
+ return 0;
+}
+#else
+#define tegra20_i2s_resume NULL
+#endif
+
+static struct snd_soc_dai_ops tegra20_i2s_dai_ops = {
+ .set_fmt = tegra20_i2s_set_fmt,
+ .hw_params = tegra20_i2s_hw_params,
+ .trigger = tegra20_i2s_trigger,
+};
+
+struct snd_soc_dai_driver tegra20_i2s_dai[] = {
+ {
+ .name = DRV_NAME ".0",
+ .probe = tegra20_i2s_probe,
+ .resume = tegra20_i2s_resume,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &tegra20_i2s_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = DRV_NAME ".1",
+ .probe = tegra20_i2s_probe,
+ .resume = tegra20_i2s_resume,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &tegra20_i2s_dai_ops,
+ .symmetric_rates = 1,
+ },
+};
+
+static __devinit int tegra20_i2s_platform_probe(struct platform_device *pdev)
+{
+ struct tegra20_i2s * i2s;
+ struct resource *mem, *memregion, *dmareq;
+ int ret;
+
+ if ((pdev->id < 0) ||
+ (pdev->id >= ARRAY_SIZE(tegra20_i2s_dai))) {
+ dev_err(&pdev->dev, "ID %d out of range\n", pdev->id);
+ return -EINVAL;
+ }
+
+ i2s = kzalloc(sizeof(struct tegra20_i2s), GFP_KERNEL);
+ if (!i2s) {
+ dev_err(&pdev->dev, "Can't allocate tegra20_i2s\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ dev_set_drvdata(&pdev->dev, i2s);
+
+ i2s->clk_i2s = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2s->clk_i2s)) {
+ dev_err(&pdev->dev, "Can't retrieve i2s clock\n");
+ ret = PTR_ERR(i2s->clk_i2s);
+ goto err_free;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err_clk_put;
+ }
+
+ dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!dmareq) {
+ dev_err(&pdev->dev, "No DMA resource\n");
+ ret = -ENODEV;
+ goto err_clk_put;
+ }
+
+ memregion = request_mem_region(mem->start, resource_size(mem),
+ DRV_NAME);
+ if (!memregion) {
+ dev_err(&pdev->dev, "Memory region already claimed\n");
+ ret = -EBUSY;
+ goto err_clk_put;
+ }
+
+ i2s->regs = ioremap(mem->start, resource_size(mem));
+ if (!i2s->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ i2s->capture_dma_data.addr = mem->start + TEGRA20_I2S_FIFO2;
+ i2s->capture_dma_data.wrap = 4;
+ i2s->capture_dma_data.width = 32;
+ i2s->capture_dma_data.req_sel = dmareq->start;
+
+ i2s->playback_dma_data.addr = mem->start + TEGRA20_I2S_FIFO1;
+ i2s->playback_dma_data.wrap = 4;
+ i2s->playback_dma_data.width = 32;
+ i2s->playback_dma_data.req_sel = dmareq->start;
+
+ ret = snd_soc_register_dai(&pdev->dev, &tegra20_i2s_dai[pdev->id]);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ tegra20_i2s_debug_add(i2s, pdev->id);
+
+ return 0;
+
+err_unmap:
+ iounmap(i2s->regs);
+err_release:
+ release_mem_region(mem->start, resource_size(mem));
+err_clk_put:
+ clk_put(i2s->clk_i2s);
+err_free:
+ kfree(i2s);
+exit:
+ return ret;
+}
+
+static int __devexit tegra20_i2s_platform_remove(struct platform_device *pdev)
+{
+ struct tegra20_i2s *i2s = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+
+ snd_soc_unregister_dai(&pdev->dev);
+
+ tegra20_i2s_debug_remove(i2s);
+
+ iounmap(i2s->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(i2s->clk_i2s);
+
+ kfree(i2s);
+
+ return 0;
+}
+
+static struct platform_driver tegra20_i2s_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra20_i2s_platform_probe,
+ .remove = __devexit_p(tegra20_i2s_platform_remove),
+};
+
+static int __init snd_tegra20_i2s_init(void)
+{
+ return platform_driver_register(&tegra20_i2s_driver);
+}
+module_init(snd_tegra20_i2s_init);
+
+static void __exit snd_tegra20_i2s_exit(void)
+{
+ platform_driver_unregister(&tegra20_i2s_driver);
+}
+module_exit(snd_tegra20_i2s_exit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra I2S ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra20_i2s.h b/sound/soc/tegra/tegra20_i2s.h
new file mode 100644
index 000000000000..ded0c8dee023
--- /dev/null
+++ b/sound/soc/tegra/tegra20_i2s.h
@@ -0,0 +1,198 @@
+/*
+ * tegra20_i2s.h - Definitions for Tegra20 I2S driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2010 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA20_I2S_H__
+#define __TEGRA20_I2S_H__
+
+#include "tegra_pcm.h"
+
+/* Register offsets from TEGRA20_I2S1_BASE and TEGRA20_I2S2_BASE */
+
+#define TEGRA20_I2S_CTRL 0x00
+#define TEGRA20_I2S_STATUS 0x04
+#define TEGRA20_I2S_TIMING 0x08
+#define TEGRA20_I2S_FIFO_SCR 0x0c
+#define TEGRA20_I2S_PCM_CTRL 0x10
+#define TEGRA20_I2S_NW_CTRL 0x14
+#define TEGRA20_I2S_TDM_CTRL 0x20
+#define TEGRA20_I2S_TDM_TX_RX_CTRL 0x24
+#define TEGRA20_I2S_FIFO1 0x40
+#define TEGRA20_I2S_FIFO2 0x80
+
+/* Fields in TEGRA20_I2S_CTRL */
+
+#define TEGRA20_I2S_CTRL_FIFO2_TX_ENABLE (1 << 30)
+#define TEGRA20_I2S_CTRL_FIFO1_ENABLE (1 << 29)
+#define TEGRA20_I2S_CTRL_FIFO2_ENABLE (1 << 28)
+#define TEGRA20_I2S_CTRL_FIFO1_RX_ENABLE (1 << 27)
+#define TEGRA20_I2S_CTRL_FIFO_LPBK_ENABLE (1 << 26)
+#define TEGRA20_I2S_CTRL_MASTER_ENABLE (1 << 25)
+
+#define TEGRA20_I2S_LRCK_LEFT_LOW 0
+#define TEGRA20_I2S_LRCK_RIGHT_LOW 1
+
+#define TEGRA20_I2S_CTRL_LRCK_SHIFT 24
+#define TEGRA20_I2S_CTRL_LRCK_MASK (1 << TEGRA20_I2S_CTRL_LRCK_SHIFT)
+#define TEGRA20_I2S_CTRL_LRCK_L_LOW (TEGRA20_I2S_LRCK_LEFT_LOW << TEGRA20_I2S_CTRL_LRCK_SHIFT)
+#define TEGRA20_I2S_CTRL_LRCK_R_LOW (TEGRA20_I2S_LRCK_RIGHT_LOW << TEGRA20_I2S_CTRL_LRCK_SHIFT)
+
+#define TEGRA20_I2S_BIT_FORMAT_I2S 0
+#define TEGRA20_I2S_BIT_FORMAT_RJM 1
+#define TEGRA20_I2S_BIT_FORMAT_LJM 2
+#define TEGRA20_I2S_BIT_FORMAT_DSP 3
+
+#define TEGRA20_I2S_CTRL_BIT_FORMAT_SHIFT 10
+#define TEGRA20_I2S_CTRL_BIT_FORMAT_MASK (3 << TEGRA20_I2S_CTRL_BIT_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_BIT_FORMAT_I2S (TEGRA20_I2S_BIT_FORMAT_I2S << TEGRA20_I2S_CTRL_BIT_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_BIT_FORMAT_RJM (TEGRA20_I2S_BIT_FORMAT_RJM << TEGRA20_I2S_CTRL_BIT_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_BIT_FORMAT_LJM (TEGRA20_I2S_BIT_FORMAT_LJM << TEGRA20_I2S_CTRL_BIT_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_BIT_FORMAT_DSP (TEGRA20_I2S_BIT_FORMAT_DSP << TEGRA20_I2S_CTRL_BIT_FORMAT_SHIFT)
+
+#define TEGRA20_I2S_BIT_SIZE_16 0
+#define TEGRA20_I2S_BIT_SIZE_20 1
+#define TEGRA20_I2S_BIT_SIZE_24 2
+#define TEGRA20_I2S_BIT_SIZE_32 3
+
+#define TEGRA20_I2S_CTRL_BIT_SIZE_SHIFT 8
+#define TEGRA20_I2S_CTRL_BIT_SIZE_MASK (3 << TEGRA20_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA20_I2S_CTRL_BIT_SIZE_16 (TEGRA20_I2S_BIT_SIZE_16 << TEGRA20_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA20_I2S_CTRL_BIT_SIZE_20 (TEGRA20_I2S_BIT_SIZE_20 << TEGRA20_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA20_I2S_CTRL_BIT_SIZE_24 (TEGRA20_I2S_BIT_SIZE_24 << TEGRA20_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA20_I2S_CTRL_BIT_SIZE_32 (TEGRA20_I2S_BIT_SIZE_32 << TEGRA20_I2S_CTRL_BIT_SIZE_SHIFT)
+
+#define TEGRA20_I2S_FIFO_16_LSB 0
+#define TEGRA20_I2S_FIFO_20_LSB 1
+#define TEGRA20_I2S_FIFO_24_LSB 2
+#define TEGRA20_I2S_FIFO_32 3
+#define TEGRA20_I2S_FIFO_PACKED 7
+
+#define TEGRA20_I2S_CTRL_FIFO_FORMAT_SHIFT 4
+#define TEGRA20_I2S_CTRL_FIFO_FORMAT_MASK (7 << TEGRA20_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_FIFO_FORMAT_16_LSB (TEGRA20_I2S_FIFO_16_LSB << TEGRA20_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_FIFO_FORMAT_20_LSB (TEGRA20_I2S_FIFO_20_LSB << TEGRA20_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_FIFO_FORMAT_24_LSB (TEGRA20_I2S_FIFO_24_LSB << TEGRA20_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_FIFO_FORMAT_32 (TEGRA20_I2S_FIFO_32 << TEGRA20_I2S_CTRL_FIFO_FORMAT_SHIFT)
+#define TEGRA20_I2S_CTRL_FIFO_FORMAT_PACKED (TEGRA20_I2S_FIFO_PACKED << TEGRA20_I2S_CTRL_FIFO_FORMAT_SHIFT)
+
+#define TEGRA20_I2S_CTRL_IE_FIFO1_ERR (1 << 3)
+#define TEGRA20_I2S_CTRL_IE_FIFO2_ERR (1 << 2)
+#define TEGRA20_I2S_CTRL_QE_FIFO1 (1 << 1)
+#define TEGRA20_I2S_CTRL_QE_FIFO2 (1 << 0)
+
+/* Fields in TEGRA20_I2S_STATUS */
+
+#define TEGRA20_I2S_STATUS_FIFO1_RDY (1 << 31)
+#define TEGRA20_I2S_STATUS_FIFO2_RDY (1 << 30)
+#define TEGRA20_I2S_STATUS_FIFO1_BSY (1 << 29)
+#define TEGRA20_I2S_STATUS_FIFO2_BSY (1 << 28)
+#define TEGRA20_I2S_STATUS_FIFO1_ERR (1 << 3)
+#define TEGRA20_I2S_STATUS_FIFO2_ERR (1 << 2)
+#define TEGRA20_I2S_STATUS_QS_FIFO1 (1 << 1)
+#define TEGRA20_I2S_STATUS_QS_FIFO2 (1 << 0)
+
+/* Fields in TEGRA20_I2S_TIMING */
+
+#define TEGRA20_I2S_TIMING_NON_SYM_ENABLE (1 << 12)
+#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT 0
+#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US 0x7fff
+#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK (TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US << TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT)
+
+/* Fields in TEGRA20_I2S_FIFO_SCR */
+
+#define TEGRA20_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_SHIFT 24
+#define TEGRA20_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_SHIFT 16
+#define TEGRA20_I2S_FIFO_SCR_FIFO_FULL_EMPTY_COUNT_MASK 0x3f
+
+#define TEGRA20_I2S_FIFO_SCR_FIFO2_CLR (1 << 12)
+#define TEGRA20_I2S_FIFO_SCR_FIFO1_CLR (1 << 8)
+
+#define TEGRA20_I2S_FIFO_ATN_LVL_ONE_SLOT 0
+#define TEGRA20_I2S_FIFO_ATN_LVL_FOUR_SLOTS 1
+#define TEGRA20_I2S_FIFO_ATN_LVL_EIGHT_SLOTS 2
+#define TEGRA20_I2S_FIFO_ATN_LVL_TWELVE_SLOTS 3
+
+#define TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT 4
+#define TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_MASK (3 << TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+#define TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_ONE_SLOT (TEGRA20_I2S_FIFO_ATN_LVL_ONE_SLOT << TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+#define TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS (TEGRA20_I2S_FIFO_ATN_LVL_FOUR_SLOTS << TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+#define TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_EIGHT_SLOTS (TEGRA20_I2S_FIFO_ATN_LVL_EIGHT_SLOTS << TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+#define TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_TWELVE_SLOTS (TEGRA20_I2S_FIFO_ATN_LVL_TWELVE_SLOTS << TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
+
+#define TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT 0
+#define TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_MASK (3 << TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+#define TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_ONE_SLOT (TEGRA20_I2S_FIFO_ATN_LVL_ONE_SLOT << TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+#define TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS (TEGRA20_I2S_FIFO_ATN_LVL_FOUR_SLOTS << TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+#define TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_EIGHT_SLOTS (TEGRA20_I2S_FIFO_ATN_LVL_EIGHT_SLOTS << TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+#define TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_TWELVE_SLOTS (TEGRA20_I2S_FIFO_ATN_LVL_TWELVE_SLOTS << TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
+
+/* Fields in TEGRA20_I2S_PCM_CTRL */
+
+#define TEGRA20_I2S_PCM_TX_POS_EDGE_NO_HIGHZ 0
+#define TEGRA20_I2S_PCM_TX_POS_EDGE_HIGHZ 1
+#define TEGRA20_I2S_PCM_TX_NEG_EDGE_NO_HIGHZ 2
+#define TEGRA20_I2S_PCM_TX_NEG_EDGE_HIGHZ 3
+
+#define TEGRA20_I2S_PCM_CTRL_TX_EDGE_CTRL_SHIFT 9
+#define TEGRA20_I2S_PCM_CTRL_TX_EDGE_CTRL_MASK (0x3 << TEGRA20_I2S_PCM_CTRL_TX_EDGE_CTRL_SHIFT)
+#define TEGRA20_I2S_PCM_CTRL_TX_POS_EDGE_NO_HIGHZ (TEGRA20_I2S_PCM_TX_POS_EDGE_NO_HIGHZ << TEGRA20_I2S_PCM_CTRL_TX_EDGE_CTRL_SHIFT)
+#define TEGRA20_I2S_PCM_CTRL_TX_POS_EDGE_HIGHZ (TEGRA20_I2S_PCM_TX_POS_EDGE_HIGHZ << TEGRA20_I2S_PCM_CTRL_TX_EDGE_CTRL_SHIFT)
+#define TEGRA20_I2S_PCM_CTRL_TX_NEG_EDGE_NO_HIGHZ (TEGRA20_I2S_PCM_TX_NEG_EDGE_NO_HIGHZ << TEGRA20_I2S_PCM_CTRL_TX_EDGE_CTRL_SHIFT)
+#define TEGRA20_I2S_PCM_CTRL_TX_NEG_EDGE_HIGHZ (TEGRA20_I2S_PCM_TX_NEG_EDGE_HIGHZ << TEGRA20_I2S_PCM_CTRL_TX_EDGE_CTRL_SHIFT)
+
+#define TEGRA20_I2S_PCM_CTRL_TX_MASK_BITS_SHIFT 6
+#define TEGRA20_I2S_PCM_CTRL_TX_MASK_BITS_MASK (0x7 << TEGRA20_I2S_PCM_CTRL_TX_MASK_BITS_SHIFT)
+
+#define TEGRA20_I2S_PCM_CTRL_FSYNC_LONG (1 << 5)
+#define TEGRA20_I2S_PCM_CTRL_TRM_MODE_EN (1 << 4)
+
+#define TEGRA20_I2S_PCM_CTRL_RX_MASK_BITS_SHIFT 1
+#define TEGRA20_I2S_PCM_CTRL_RX_MASK_BITS_MASK (0x7 << TEGRA20_I2S_PCM_CTRL_RX_MASK_BITS_SHIFT)
+
+#define TEGRA20_I2S_PCM_CTRL_RCV_MODE_EN (1 << 0)
+
+#ifdef CONFIG_PM
+/* unused cache locations for i2s reg cache */
+#define TEGRA20_I2S_CACHE_RSVD_6 ((TEGRA20_I2S_NW_CTRL>>2) + 1)
+#define TEGRA20_I2S_CACHE_RSVD_7 (TEGRA20_I2S_CACHE_RSVD_6 + 1)
+#endif
+
+struct tegra20_i2s {
+ struct clk *clk_i2s;
+ struct tegra_pcm_dma_params capture_dma_data;
+ struct tegra_pcm_dma_params playback_dma_data;
+ void __iomem *regs;
+ struct dentry *debug;
+ u32 reg_ctrl;
+#ifdef CONFIG_PM
+ u32 reg_cache[(TEGRA20_I2S_TDM_TX_RX_CTRL >> 2) + 1];
+#endif
+};
+
+#endif
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
new file mode 100644
index 000000000000..3e747b5e1931
--- /dev/null
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -0,0 +1,463 @@
+/*
+ * tegra20_spdif.c - Tegra20 SPDIF driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <mach/hdmi-audio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra20_spdif.h"
+
+#define DRV_NAME "tegra20-spdif"
+
+static inline void tegra20_spdif_write(struct tegra20_spdif *spdif, u32 reg,
+ u32 val)
+{
+#ifdef CONFIG_PM
+ if (reg < TEGRA20_SPDIF_CH_STA_TX_A)
+ spdif->reg_ctrl_cache[reg >> 2] = val;
+ else
+ spdif->reg_tx_cache[((reg - TEGRA20_SPDIF_CH_STA_TX_A) >> 2)]
+ = val;
+#endif
+ __raw_writel(val, spdif->regs + reg);
+}
+
+static inline u32 tegra20_spdif_read(struct tegra20_spdif *spdif, u32 reg)
+{
+ return __raw_readl(spdif->regs + reg);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra20_spdif_show(struct seq_file *s, void *unused)
+{
+#define REG(r) { r, #r }
+ static const struct {
+ int offset;
+ const char *name;
+ } regs[] = {
+ REG(TEGRA20_SPDIF_CTRL),
+ REG(TEGRA20_SPDIF_STATUS),
+ REG(TEGRA20_SPDIF_STROBE_CTRL),
+ REG(TEGRA20_SPDIF_DATA_FIFO_CSR),
+ REG(TEGRA20_SPDIF_CH_STA_RX_A),
+ REG(TEGRA20_SPDIF_CH_STA_RX_B),
+ REG(TEGRA20_SPDIF_CH_STA_RX_C),
+ REG(TEGRA20_SPDIF_CH_STA_RX_D),
+ REG(TEGRA20_SPDIF_CH_STA_RX_E),
+ REG(TEGRA20_SPDIF_CH_STA_RX_F),
+ REG(TEGRA20_SPDIF_CH_STA_TX_A),
+ REG(TEGRA20_SPDIF_CH_STA_TX_B),
+ REG(TEGRA20_SPDIF_CH_STA_TX_C),
+ REG(TEGRA20_SPDIF_CH_STA_TX_D),
+ REG(TEGRA20_SPDIF_CH_STA_TX_E),
+ REG(TEGRA20_SPDIF_CH_STA_TX_F),
+ };
+#undef REG
+
+ struct tegra20_spdif *spdif = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ u32 val = tegra20_spdif_read(spdif, regs[i].offset);
+ seq_printf(s, "%s = %08x\n", regs[i].name, val);
+ }
+
+ return 0;
+}
+
+static int tegra20_spdif_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra20_spdif_show, inode->i_private);
+}
+
+static const struct file_operations tegra20_spdif_debug_fops = {
+ .open = tegra20_spdif_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra20_spdif_debug_add(struct tegra20_spdif *spdif)
+{
+ spdif->debug = debugfs_create_file(DRV_NAME, S_IRUGO,
+ snd_soc_debugfs_root, spdif,
+ &tegra20_spdif_debug_fops);
+}
+
+static void tegra20_spdif_debug_remove(struct tegra20_spdif *spdif)
+{
+ if (spdif->debug)
+ debugfs_remove(spdif->debug);
+}
+#else
+static inline void tegra20_spdif_debug_add(struct tegra20_spdif *spdif)
+{
+}
+
+static inline void tegra20_spdif_debug_remove(struct tegra20_spdif *spdif)
+{
+}
+#endif
+
+static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct device *dev = substream->pcm->card->dev;
+ struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+ int ret, srate, spdifclock;
+ u32 ch_sta[2] = {0, 0};
+
+ spdif->reg_ctrl &= ~TEGRA20_SPDIF_CTRL_PACK;
+ spdif->reg_ctrl &= ~TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_PACK;
+ spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+ ch_sta[0] = tegra20_spdif_read(spdif, TEGRA20_SPDIF_CH_STA_TX_A);
+ ch_sta[0] &= ~TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_MASK;
+ ch_sta[1] = tegra20_spdif_read(spdif, TEGRA20_SPDIF_CH_STA_TX_B);
+ ch_sta[1] &= ~TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_MASK;
+ switch (params_rate(params)) {
+ case 32000:
+ spdifclock = 4096000;
+ ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_32000;
+ ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_32000;
+ break;
+ case 44100:
+ spdifclock = 5644800;
+ ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_44100;
+ ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_44100;
+ break;
+ case 48000:
+ spdifclock = 6144000;
+ ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_48000;
+ ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_48000;
+ break;
+ case 88200:
+ spdifclock = 11289600;
+ ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_88200;
+ ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_88200;
+ break;
+ case 96000:
+ spdifclock = 12288000;
+ ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_96000;
+ ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_96000;
+ break;
+ case 176400:
+ spdifclock = 22579200;
+ ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_176400;
+ ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_176400;
+ break;
+ case 192000:
+ spdifclock = 24576000;
+ ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_192000;
+ ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(spdif->clk_spdif_out, spdifclock);
+ if (ret) {
+ dev_err(dev, "Can't set SPDIF clock rate: %d\n", ret);
+ return ret;
+ }
+
+ clk_enable(spdif->clk_spdif_out);
+
+ tegra20_spdif_write(spdif, TEGRA20_SPDIF_CH_STA_TX_A, ch_sta[0]);
+ tegra20_spdif_write(spdif, TEGRA20_SPDIF_CH_STA_TX_B, ch_sta[1]);
+
+ clk_disable(spdif->clk_spdif_out);
+
+ ret = tegra_hdmi_setup_audio_freq_source(srate, SPDIF);
+ if (ret) {
+ dev_err(dev, "Can't set HDMI audio freq source: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tegra20_spdif_start_playback(struct tegra20_spdif *spdif)
+{
+ spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_TC_EN | TEGRA20_SPDIF_CTRL_TX_EN;
+ tegra20_spdif_write(spdif, TEGRA20_SPDIF_CTRL, spdif->reg_ctrl);
+}
+
+static void tegra20_spdif_stop_playback(struct tegra20_spdif *spdif)
+{
+ spdif->reg_ctrl &= ~(TEGRA20_SPDIF_CTRL_TX_EN |
+ TEGRA20_SPDIF_CTRL_TC_EN);
+ tegra20_spdif_write(spdif, TEGRA20_SPDIF_CTRL, spdif->reg_ctrl);
+}
+
+static int tegra20_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ clk_enable(spdif->clk_spdif_out);
+ tegra20_spdif_start_playback(spdif);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ tegra20_spdif_stop_playback(spdif);
+ clk_disable(spdif->clk_spdif_out);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra20_spdif_probe(struct snd_soc_dai *dai)
+{
+ struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+#ifdef CONFIG_PM
+ int i, reg;
+#endif
+
+ dai->capture_dma_data = NULL;
+ dai->playback_dma_data = &spdif->playback_dma_data;
+
+#ifdef CONFIG_PM
+ clk_enable(spdif->clk_spdif_out);
+
+ /* populate the spdif reg cache with POR values*/
+ for (i = 0; i < TEGRA20_SPDIF_CTRL_CACHE_SIZE; i++)
+ spdif->reg_ctrl_cache[i] = tegra20_spdif_read(spdif, i << 2);
+
+ for (i = 0; i < TEGRA20_SPDIF_TX_CACHE_SIZE; i++) {
+ reg = (TEGRA20_SPDIF_CH_STA_TX_A) + (i << 2);
+ spdif->reg_tx_cache[i] = tegra20_spdif_read(spdif, reg);
+ }
+
+ clk_disable(spdif->clk_spdif_out);
+
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int tegra20_spdif_resume(struct snd_soc_dai *cpu_dai)
+{
+ struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(cpu_dai);
+ int i, reg;
+
+ clk_enable(spdif->clk_spdif_out);
+
+ /*restore the spdif regs*/
+ for (i = 0; i < TEGRA20_SPDIF_CTRL_CACHE_SIZE; i++)
+ tegra20_spdif_write(spdif, i << 2, spdif->reg_ctrl_cache[i]);
+
+ for (i = 0; i < TEGRA20_SPDIF_TX_CACHE_SIZE; i++) {
+ reg = (TEGRA20_SPDIF_CH_STA_TX_A) + (i << 2);
+ tegra20_spdif_write(spdif, reg, spdif->reg_tx_cache[i]);
+ }
+
+ clk_disable(spdif->clk_spdif_out);
+
+ return 0;
+}
+#else
+#define tegra20_spdif_resume NULL
+#endif
+
+static struct snd_soc_dai_ops tegra20_spdif_dai_ops = {
+ .hw_params = tegra20_spdif_hw_params,
+ .trigger = tegra20_spdif_trigger,
+};
+
+struct snd_soc_dai_driver tegra20_spdif_dai = {
+ .name = DRV_NAME,
+ .probe = tegra20_spdif_probe,
+ .resume = tegra20_spdif_resume,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &tegra20_spdif_dai_ops,
+};
+
+static __devinit int tegra20_spdif_platform_probe(struct platform_device *pdev)
+{
+ struct tegra20_spdif *spdif;
+ struct resource *mem, *memregion, *dmareq;
+ int ret;
+ u32 reg_val;
+
+ spdif = kzalloc(sizeof(struct tegra20_spdif), GFP_KERNEL);
+ if (!spdif) {
+ dev_err(&pdev->dev, "Can't allocate tegra20_spdif\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ dev_set_drvdata(&pdev->dev, spdif);
+
+ spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out");
+ if (IS_ERR(spdif->clk_spdif_out)) {
+ pr_err("Can't retrieve spdif clock\n");
+ ret = PTR_ERR(spdif->clk_spdif_out);
+ goto err_free;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err_clk_put;
+ }
+
+ dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!dmareq) {
+ dev_err(&pdev->dev, "No DMA resource\n");
+ ret = -ENODEV;
+ goto err_clk_put;
+ }
+
+ memregion = request_mem_region(mem->start, resource_size(mem),
+ DRV_NAME);
+ if (!memregion) {
+ dev_err(&pdev->dev, "Memory region already claimed\n");
+ ret = -EBUSY;
+ goto err_clk_put;
+ }
+
+ spdif->regs = ioremap(mem->start, resource_size(mem));
+ if (!spdif->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
+ spdif->playback_dma_data.wrap = 4;
+ spdif->playback_dma_data.width = 32;
+ spdif->playback_dma_data.req_sel = dmareq->start;
+
+ clk_enable(spdif->clk_spdif_out);
+
+ reg_val = tegra20_spdif_read(spdif, TEGRA20_SPDIF_DATA_FIFO_CSR);
+ reg_val &= ~TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_MASK;
+ reg_val |= TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU4_WORD_FULL;
+ tegra20_spdif_write(spdif, TEGRA20_SPDIF_DATA_FIFO_CSR, reg_val);
+
+ clk_disable(spdif->clk_spdif_out);
+
+ ret = snd_soc_register_dai(&pdev->dev, &tegra20_spdif_dai);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ tegra20_spdif_debug_add(spdif);
+
+ return 0;
+
+err_unmap:
+ iounmap(spdif->regs);
+err_release:
+ release_mem_region(mem->start, resource_size(mem));
+err_clk_put:
+ clk_put(spdif->clk_spdif_out);
+err_free:
+ kfree(spdif);
+exit:
+ return ret;
+}
+
+static int __devexit tegra20_spdif_platform_remove(struct platform_device *pdev)
+{
+ struct tegra20_spdif *spdif = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+
+ snd_soc_unregister_dai(&pdev->dev);
+
+ tegra20_spdif_debug_remove(spdif);
+
+ iounmap(spdif->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(spdif->clk_spdif_out);
+
+ kfree(spdif);
+
+ return 0;
+}
+
+static struct platform_driver tegra20_spdif_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra20_spdif_platform_probe,
+ .remove = __devexit_p(tegra20_spdif_platform_remove),
+};
+
+static int __init snd_tegra20_spdif_init(void)
+{
+ return platform_driver_register(&tegra20_spdif_driver);
+}
+module_init(snd_tegra20_spdif_init);
+
+static void __exit snd_tegra20_spdif_exit(void)
+{
+ platform_driver_unregister(&tegra20_spdif_driver);
+}
+module_exit(snd_tegra20_spdif_exit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra SPDIF ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra20_spdif.h b/sound/soc/tegra/tegra20_spdif.h
new file mode 100644
index 000000000000..c1fb6ed2a79f
--- /dev/null
+++ b/sound/soc/tegra/tegra20_spdif.h
@@ -0,0 +1,556 @@
+/*
+ * tegra20_spdif.h - Definitions for Tegra20 SPDIF driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ * Copyright (c) 2008-2009, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA20_SPDIF_H__
+#define __TEGRA20_SPDIF_H__
+
+#include "tegra_pcm.h"
+
+/* Offsets from TEGRA_SPDIF_BASE */
+
+#define TEGRA20_SPDIF_CTRL 0x0
+#define TEGRA20_SPDIF_STATUS 0x4
+#define TEGRA20_SPDIF_STROBE_CTRL 0x8
+#define TEGRA20_SPDIF_DATA_FIFO_CSR 0x0C
+#define TEGRA20_SPDIF_DATA_OUT 0x40
+#define TEGRA20_SPDIF_DATA_IN 0x80
+#define TEGRA20_SPDIF_CH_STA_RX_A 0x100
+#define TEGRA20_SPDIF_CH_STA_RX_B 0x104
+#define TEGRA20_SPDIF_CH_STA_RX_C 0x108
+#define TEGRA20_SPDIF_CH_STA_RX_D 0x10C
+#define TEGRA20_SPDIF_CH_STA_RX_E 0x110
+#define TEGRA20_SPDIF_CH_STA_RX_F 0x114
+#define TEGRA20_SPDIF_CH_STA_TX_A 0x140
+#define TEGRA20_SPDIF_CH_STA_TX_B 0x144
+#define TEGRA20_SPDIF_CH_STA_TX_C 0x148
+#define TEGRA20_SPDIF_CH_STA_TX_D 0x14C
+#define TEGRA20_SPDIF_CH_STA_TX_E 0x150
+#define TEGRA20_SPDIF_CH_STA_TX_F 0x154
+#define TEGRA20_SPDIF_USR_STA_RX_A 0x180
+#define TEGRA20_SPDIF_USR_DAT_TX_A 0x1C0
+
+/* Fields in TEGRA20_SPDIF_CTRL */
+
+/* Start capturing from 0=right, 1=left channel */
+#define TEGRA20_SPDIF_CTRL_CAP_LC (1 << 30)
+
+/* SPDIF receiver(RX) enable */
+#define TEGRA20_SPDIF_CTRL_RX_EN (1 << 29)
+
+/* SPDIF Transmitter(TX) enable */
+#define TEGRA20_SPDIF_CTRL_TX_EN (1 << 28)
+
+/* Transmit Channel status */
+#define TEGRA20_SPDIF_CTRL_TC_EN (1 << 27)
+
+/* Transmit user Data */
+#define TEGRA20_SPDIF_CTRL_TU_EN (1 << 26)
+
+/* Interrupt on transmit error */
+#define TEGRA20_SPDIF_CTRL_IE_TXE (1 << 25)
+
+/* Interrupt on receive error */
+#define TEGRA20_SPDIF_CTRL_IE_RXE (1 << 24)
+
+/* Interrupt on invalid preamble */
+#define TEGRA20_SPDIF_CTRL_IE_P (1 << 23)
+
+/* Interrupt on "B" preamble */
+#define TEGRA20_SPDIF_CTRL_IE_B (1 << 22)
+
+/* Interrupt when block of channel status received */
+#define TEGRA20_SPDIF_CTRL_IE_C (1 << 21)
+
+/* Interrupt when a valid information unit (IU) is received */
+#define TEGRA20_SPDIF_CTRL_IE_U (1 << 20)
+
+/* Interrupt when RX user FIFO attention level is reached */
+#define TEGRA20_SPDIF_CTRL_QE_RU (1 << 19)
+
+/* Interrupt when TX user FIFO attention level is reached */
+#define TEGRA20_SPDIF_CTRL_QE_TU (1 << 18)
+
+/* Interrupt when RX data FIFO attention level is reached */
+#define TEGRA20_SPDIF_CTRL_QE_RX (1 << 17)
+
+/* Interrupt when TX data FIFO attention level is reached */
+#define TEGRA20_SPDIF_CTRL_QE_TX (1 << 16)
+
+/* Loopback test mode enable */
+#define TEGRA20_SPDIF_CTRL_LBK_EN (1 << 15)
+
+/*
+ * Pack data mode:
+ * 0 = Single data (16 bit needs to be padded to match the
+ * interface data bit size).
+ * 1 = Packeted left/right channel data into a single word.
+ */
+#define TEGRA20_SPDIF_CTRL_PACK (1 << 14)
+
+/*
+ * 00 = 16bit data
+ * 01 = 20bit data
+ * 10 = 24bit data
+ * 11 = raw data
+ */
+#define TEGRA20_SPDIF_BIT_MODE_16BIT 0
+#define TEGRA20_SPDIF_BIT_MODE_20BIT 1
+#define TEGRA20_SPDIF_BIT_MODE_24BIT 2
+#define TEGRA20_SPDIF_BIT_MODE_RAW 3
+
+#define TEGRA20_SPDIF_CTRL_BIT_MODE_SHIFT 12
+#define TEGRA20_SPDIF_CTRL_BIT_MODE_MASK (3 << TEGRA20_SPDIF_CTRL_BIT_MODE_SHIFT)
+#define TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT (TEGRA20_SPDIF_BIT_MODE_16BIT << TEGRA20_SPDIF_CTRL_BIT_MODE_SHIFT)
+#define TEGRA20_SPDIF_CTRL_BIT_MODE_20BIT (TEGRA20_SPDIF_BIT_MODE_20BIT << TEGRA20_SPDIF_CTRL_BIT_MODE_SHIFT)
+#define TEGRA20_SPDIF_CTRL_BIT_MODE_24BIT (TEGRA20_SPDIF_BIT_MODE_24BIT << TEGRA20_SPDIF_CTRL_BIT_MODE_SHIFT)
+#define TEGRA20_SPDIF_CTRL_BIT_MODE_RAW (TEGRA20_SPDIF_BIT_MODE_RAW << TEGRA20_SPDIF_CTRL_BIT_MODE_SHIFT)
+
+/* Fields in TEGRA20_SPDIF_STATUS */
+
+/*
+ * Note: IS_P, IS_B, IS_C, and IS_U are sticky bits. Software must
+ * write a 1 to the corresponding bit location to clear the status.
+ */
+
+/*
+ * Receiver(RX) shifter is busy receiving data.
+ * This bit is asserted when the receiver first locked onto the
+ * preamble of the data stream after RX_EN is asserted. This bit is
+ * deasserted when either,
+ * (a) the end of a frame is reached after RX_EN is deeasserted, or
+ * (b) the SPDIF data stream becomes inactive.
+ */
+#define TEGRA20_SPDIF_STATUS_RX_BSY (1 << 29)
+
+/*
+ * Transmitter(TX) shifter is busy transmitting data.
+ * This bit is asserted when TX_EN is asserted.
+ * This bit is deasserted when the end of a frame is reached after
+ * TX_EN is deasserted.
+ */
+#define TEGRA20_SPDIF_STATUS_TX_BSY (1 << 28)
+
+/*
+ * TX is busy shifting out channel status.
+ * This bit is asserted when both TX_EN and TC_EN are asserted and
+ * data from CH_STA_TX_A register is loaded into the internal shifter.
+ * This bit is deasserted when either,
+ * (a) the end of a frame is reached after TX_EN is deasserted, or
+ * (b) CH_STA_TX_F register is loaded into the internal shifter.
+ */
+#define TEGRA20_SPDIF_STATUS_TC_BSY (1 << 27)
+
+/*
+ * TX User data FIFO busy.
+ * This bit is asserted when TX_EN and TXU_EN are asserted and
+ * there's data in the TX user FIFO. This bit is deassert when either,
+ * (a) the end of a frame is reached after TX_EN is deasserted, or
+ * (b) there's no data left in the TX user FIFO.
+ */
+#define TEGRA20_SPDIF_STATUS_TU_BSY (1 << 26)
+
+/* TX FIFO Underrun error status */
+#define TEGRA20_SPDIF_STATUS_TX_ERR (1 << 25)
+
+/* RX FIFO Overrun error status */
+#define TEGRA20_SPDIF_STATUS_RX_ERR (1 << 24)
+
+/* Preamble status: 0=Preamble OK, 1=bad/missing preamble */
+#define TEGRA20_SPDIF_STATUS_IS_P (1 << 23)
+
+/* B-preamble detection status: 0=not detected, 1=B-preamble detected */
+#define TEGRA20_SPDIF_STATUS_IS_B (1 << 22)
+
+/*
+ * RX channel block data receive status:
+ * 0=entire block not recieved yet.
+ * 1=received entire block of channel status,
+ */
+#define TEGRA20_SPDIF_STATUS_IS_C (1 << 21)
+
+/* RX User Data Valid flag: 1=valid IU detected, 0 = no IU detected. */
+#define TEGRA20_SPDIF_STATUS_IS_U (1 << 20)
+
+/*
+ * RX User FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define TEGRA20_SPDIF_STATUS_QS_RU (1 << 19)
+
+/*
+ * TX User FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define TEGRA20_SPDIF_STATUS_QS_TU (1 << 18)
+
+/*
+ * RX Data FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define TEGRA20_SPDIF_STATUS_QS_RX (1 << 17)
+
+/*
+ * TX Data FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define TEGRA20_SPDIF_STATUS_QS_TX (1 << 16)
+
+/* Fields in TEGRA20_SPDIF_STROBE_CTRL */
+
+/*
+ * Indicates the approximate number of detected SPDIFIN clocks within a
+ * bi-phase period.
+ */
+#define TEGRA20_SPDIF_STROBE_CTRL_PERIOD_SHIFT 16
+#define TEGRA20_SPDIF_STROBE_CTRL_PERIOD_MASK (0xff << TEGRA20_SPDIF_STROBE_CTRL_PERIOD_SHIFT)
+
+/* Data strobe mode: 0=Auto-locked 1=Manual locked */
+#define TEGRA20_SPDIF_STROBE_CTRL_STROBE (1 << 15)
+
+/*
+ * Manual data strobe time within the bi-phase clock period (in terms of
+ * the number of over-sampling clocks).
+ */
+#define TEGRA20_SPDIF_STROBE_CTRL_DATA_STROBES_SHIFT 8
+#define TEGRA20_SPDIF_STROBE_CTRL_DATA_STROBES_MASK (0x1f << TEGRA20_SPDIF_STROBE_CTRL_DATA_STROBES_SHIFT)
+
+/*
+ * Manual SPDIFIN bi-phase clock period (in terms of the number of
+ * over-sampling clocks).
+ */
+#define TEGRA20_SPDIF_STROBE_CTRL_CLOCK_PERIOD_SHIFT 0
+#define TEGRA20_SPDIF_STROBE_CTRL_CLOCK_PERIOD_MASK (0x3f << TEGRA20_SPDIF_STROBE_CTRL_CLOCK_PERIOD_SHIFT)
+
+/* Fields in SPDIF_DATA_FIFO_CSR */
+
+/* Clear Receiver User FIFO (RX USR.FIFO) */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_CLR (1 << 31)
+
+#define TEGRA20_SPDIF_FIFO_ATN_LVL_U_ONE_SLOT 0
+#define TEGRA20_SPDIF_FIFO_ATN_LVL_U_TWO_SLOTS 1
+#define TEGRA20_SPDIF_FIFO_ATN_LVL_U_THREE_SLOTS 2
+#define TEGRA20_SPDIF_FIFO_ATN_LVL_U_FOUR_SLOTS 3
+
+/* RU FIFO attention level */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT 29
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_MASK \
+ (0x3 << TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_RU1_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_U_ONE_SLOT << TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_RU2_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_U_TWO_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_RU3_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_U_THREE_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_RU4_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_U_FOUR_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
+
+/* Number of RX USR.FIFO levels with valid data. */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_FULL_COUNT_SHIFT 24
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RU_FULL_COUNT_MASK (0x1f << TEGRA20_SPDIF_DATA_FIFO_CSR_RU_FULL_COUNT_SHIFT)
+
+/* Clear Transmitter User FIFO (TX USR.FIFO) */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_CLR (1 << 23)
+
+/* TU FIFO attention level */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT 21
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_MASK \
+ (0x3 << TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_TU1_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_U_ONE_SLOT << TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_TU2_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_U_TWO_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_TU3_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_U_THREE_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_TU4_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_U_FOUR_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
+
+/* Number of TX USR.FIFO levels that could be filled. */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_EMPTY_COUNT_SHIFT 16
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TU_EMPTY_COUNT_MASK (0x1f << SPDIF_DATA_FIFO_CSR_TU_EMPTY_COUNT_SHIFT)
+
+/* Clear Receiver Data FIFO (RX DATA.FIFO) */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_CLR (1 << 15)
+
+#define TEGRA20_SPDIF_FIFO_ATN_LVL_D_ONE_SLOT 0
+#define TEGRA20_SPDIF_FIFO_ATN_LVL_D_FOUR_SLOTS 1
+#define TEGRA20_SPDIF_FIFO_ATN_LVL_D_EIGHT_SLOTS 2
+#define TEGRA20_SPDIF_FIFO_ATN_LVL_D_TWELVE_SLOT 3
+
+/* RU FIFO attention level */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT 13
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_MASK \
+ (0x3 << TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_RU1_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_D_ONE_SLOT << TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_RU4_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_D_FOUR_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_RU8_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_D_EIGHT_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_RU12_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_D_TWELVE_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
+
+/* Number of RX DATA.FIFO levels with valid data. */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_FULL_COUNT_SHIFT 8
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_RX_FULL_COUNT_MASK (0x1f << TEGRA20_SPDIF_DATA_FIFO_CSR_RX_FULL_COUNT_SHIFT)
+
+/* Clear Transmitter Data FIFO (TX DATA.FIFO) */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_CLR (1 << 7)
+
+/* TU FIFO attention level */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT 5
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_MASK \
+ (0x3 << TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU1_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_D_ONE_SLOT << TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU4_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_D_FOUR_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU8_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_D_EIGHT_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU12_WORD_FULL \
+ (TEGRA20_SPDIF_FIFO_ATN_LVL_D_TWELVE_SLOTS << TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
+
+/* Number of TX DATA.FIFO levels that could be filled. */
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_EMPTY_COUNT_SHIFT 0
+#define TEGRA20_SPDIF_DATA_FIFO_CSR_TX_EMPTY_COUNT_MASK (0x1f << SPDIF_DATA_FIFO_CSR_TX_EMPTY_COUNT_SHIFT)
+
+/* Fields in TEGRA20_SPDIF_DATA_OUT */
+
+/*
+ * This register has 5 different formats:
+ * 16-bit (BIT_MODE=00, PACK=0)
+ * 20-bit (BIT_MODE=01, PACK=0)
+ * 24-bit (BIT_MODE=10, PACK=0)
+ * raw (BIT_MODE=11, PACK=0)
+ * 16-bit packed (BIT_MODE=00, PACK=1)
+ */
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_16_SHIFT 0
+#define TEGRA20_SPDIF_DATA_OUT_DATA_16_MASK (0xffff << TEGRA20_SPDIF_DATA_OUT_DATA_16_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_20_SHIFT 0
+#define TEGRA20_SPDIF_DATA_OUT_DATA_20_MASK (0xfffff << TEGRA20_SPDIF_DATA_OUT_DATA_20_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_24_SHIFT 0
+#define TEGRA20_SPDIF_DATA_OUT_DATA_24_MASK (0xffffff << TEGRA20_SPDIF_DATA_OUT_DATA_24_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_P (1 << 31)
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_C (1 << 30)
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_U (1 << 29)
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_V (1 << 28)
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_DATA_SHIFT 8
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_DATA_MASK (0xfffff << TEGRA20_SPDIF_DATA_OUT_DATA_RAW_DATA_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_AUX_SHIFT 4
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_AUX_MASK (0xf << TEGRA20_SPDIF_DATA_OUT_DATA_RAW_AUX_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_PREAMBLE_SHIFT 0
+#define TEGRA20_SPDIF_DATA_OUT_DATA_RAW_PREAMBLE_MASK (0xf << TEGRA20_SPDIF_DATA_OUT_DATA_RAW_PREAMBLE_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_16_PACKED_RIGHT_SHIFT 16
+#define TEGRA20_SPDIF_DATA_OUT_DATA_16_PACKED_RIGHT_MASK (0xffff << TEGRA20_SPDIF_DATA_OUT_DATA_16_PACKED_RIGHT_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_OUT_DATA_16_PACKED_LEFT_SHIFT 0
+#define TEGRA20_SPDIF_DATA_OUT_DATA_16_PACKED_LEFT_MASK (0xffff << TEGRA20_SPDIF_DATA_OUT_DATA_16_PACKED_LEFT_SHIFT)
+
+/* Fields in TEGRA20_SPDIF_DATA_IN */
+
+/*
+ * This register has 5 different formats:
+ * 16-bit (BIT_MODE=00, PACK=0)
+ * 20-bit (BIT_MODE=01, PACK=0)
+ * 24-bit (BIT_MODE=10, PACK=0)
+ * raw (BIT_MODE=11, PACK=0)
+ * 16-bit packed (BIT_MODE=00, PACK=1)
+ *
+ * Bits 31:24 are common to all modes except 16-bit packed
+ */
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_P (1 << 31)
+#define TEGRA20_SPDIF_DATA_IN_DATA_C (1 << 30)
+#define TEGRA20_SPDIF_DATA_IN_DATA_U (1 << 29)
+#define TEGRA20_SPDIF_DATA_IN_DATA_V (1 << 28)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_PREAMBLE_SHIFT 24
+#define TEGRA20_SPDIF_DATA_IN_DATA_PREAMBLE_MASK (0xf << TEGRA20_SPDIF_DATA_IN_DATA_PREAMBLE_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_16_SHIFT 0
+#define TEGRA20_SPDIF_DATA_IN_DATA_16_MASK (0xffff << TEGRA20_SPDIF_DATA_IN_DATA_16_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_20_SHIFT 0
+#define TEGRA20_SPDIF_DATA_IN_DATA_20_MASK (0xfffff << TEGRA20_SPDIF_DATA_IN_DATA_20_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_24_SHIFT 0
+#define TEGRA20_SPDIF_DATA_IN_DATA_24_MASK (0xffffff << TEGRA20_SPDIF_DATA_IN_DATA_24_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_RAW_DATA_SHIFT 8
+#define TEGRA20_SPDIF_DATA_IN_DATA_RAW_DATA_MASK (0xfffff << TEGRA20_SPDIF_DATA_IN_DATA_RAW_DATA_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_RAW_AUX_SHIFT 4
+#define TEGRA20_SPDIF_DATA_IN_DATA_RAW_AUX_MASK (0xf << TEGRA20_SPDIF_DATA_IN_DATA_RAW_AUX_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_RAW_PREAMBLE_SHIFT 0
+#define TEGRA20_SPDIF_DATA_IN_DATA_RAW_PREAMBLE_MASK (0xf << TEGRA20_SPDIF_DATA_IN_DATA_RAW_PREAMBLE_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_16_PACKED_RIGHT_SHIFT 16
+#define TEGRA20_SPDIF_DATA_IN_DATA_16_PACKED_RIGHT_MASK (0xffff << TEGRA20_SPDIF_DATA_IN_DATA_16_PACKED_RIGHT_SHIFT)
+
+#define TEGRA20_SPDIF_DATA_IN_DATA_16_PACKED_LEFT_SHIFT 0
+#define TEGRA20_SPDIF_DATA_IN_DATA_16_PACKED_LEFT_MASK (0xffff << TEGRA20_SPDIF_DATA_IN_DATA_16_PACKED_LEFT_SHIFT)
+
+/* Fields in TEGRA20_SPDIF_CH_STA_RX_A */
+/* Fields in TEGRA20_SPDIF_CH_STA_RX_B */
+/* Fields in TEGRA20_SPDIF_CH_STA_RX_C */
+/* Fields in TEGRA20_SPDIF_CH_STA_RX_D */
+/* Fields in TEGRA20_SPDIF_CH_STA_RX_E */
+/* Fields in TEGRA20_SPDIF_CH_STA_RX_F */
+
+/*
+ * The 6-word receive channel data page buffer holds a block (192 frames) of
+ * channel status information. The order of receive is from LSB to MSB
+ * bit, and from CH_STA_RX_A to CH_STA_RX_F then back to CH_STA_RX_A.
+ */
+
+/* Fields in TEGRA20_SPDIF_CH_STA_TX_A */
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_22050 0x4
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_24000 0x6
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_32000 0x3
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_44100 0x0
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_48000 0x2
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_88200 0x8
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_96000 0xA
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_176400 0xC
+#define TEGRA20_SPDIF_CH_STA_TX_A_SF_192000 0xE
+
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT 24
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_MASK \
+ (0xF << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_22050 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_22050 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_24000 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_24000 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_32000 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_32000 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_44100 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_44100 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_48000 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_48000 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_88200 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_88200 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_96000 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_96000 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_176400 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_176400 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_192000 \
+ (TEGRA20_SPDIF_CH_STA_TX_A_SF_192000 << TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+
+/* Fields in TEGRA20_SPDIF_CH_STA_TX_B */
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_8000 0x6
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_11025 0xA
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_12000 0x2
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_16000 0x8
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_22050 0xB
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_24000 0x9
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_32000 0xC
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_44100 0xF
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_48000 0xD
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_88200 0x7
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_96000 0x5
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_176400 0x3
+#define TEGRA20_SPDIF_CH_STA_TX_B_SF_192000 0x1
+
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT 4
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_MASK \
+ (0xF << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_8000 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_8000 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_11025 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_11025 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_12000 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_12000 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_16000 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_16000 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_22050 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_22025 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_24000 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_24000 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_32000 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_32000 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_44100 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_44100 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_48000 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_48000 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_88200 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_88200 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_96000 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_96000 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_176400 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_176400 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_192000 \
+ (TEGRA20_SPDIF_CH_STA_TX_B_SF_192000 << TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+
+/* Fields in TEGRA20_SPDIF_CH_STA_TX_C */
+/* Fields in TEGRA20_SPDIF_CH_STA_TX_D */
+/* Fields in TEGRA20_SPDIF_CH_STA_TX_E */
+/* Fields in TEGRA20_SPDIF_CH_STA_TX_F */
+
+/*
+ * The 6-word transmit channel data page buffer holds a block (192 frames) of
+ * channel status information. The order of transmission is from LSB to MSB
+ * bit, and from CH_STA_TX_A to CH_STA_TX_F then back to CH_STA_TX_A.
+ */
+
+/* Fields in TEGRA20_SPDIF_USR_STA_RX_A */
+
+/*
+ * This 4-word deep FIFO receives user FIFO field information. The order of
+ * receive is from LSB to MSB bit.
+ */
+
+/* Fields in TEGRA20_SPDIF_USR_DAT_TX_A */
+
+/*
+ * This 4-word deep FIFO transmits user FIFO field information. The order of
+ * transmission is from LSB to MSB bit.
+ */
+#ifdef CONFIG_PM
+#define TEGRA20_SPDIF_CTRL_CACHE_SIZE ((TEGRA20_SPDIF_DATA_FIFO_CSR >> 2) + 1)
+#define TEGRA20_SPDIF_TX_CACHE_SIZE (((TEGRA20_SPDIF_CH_STA_TX_F - TEGRA20_SPDIF_CH_STA_TX_A) >> 2) + 1)
+#endif
+
+struct tegra20_spdif {
+ struct clk *clk_spdif_out;
+ struct tegra_pcm_dma_params capture_dma_data;
+ struct tegra_pcm_dma_params playback_dma_data;
+ void __iomem *regs;
+ struct dentry *debug;
+ u32 reg_ctrl;
+#ifdef CONFIG_PM
+ u32 reg_ctrl_cache[TEGRA20_SPDIF_CTRL_CACHE_SIZE];
+ u32 reg_tx_cache[TEGRA20_SPDIF_TX_CACHE_SIZE];
+#endif
+};
+
+#endif
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
new file mode 100644
index 000000000000..3fceda143da8
--- /dev/null
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -0,0 +1,659 @@
+/*
+ * tegra30_ahub.c - Tegra 30 AHUB driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/dma.h>
+#include <mach/iomap.h>
+#include <sound/soc.h>
+#include "tegra30_ahub.h"
+
+#define DRV_NAME "tegra30-ahub"
+
+static struct tegra30_ahub *ahub;
+
+static inline void tegra30_apbif_write(u32 reg, u32 val)
+{
+#ifdef CONFIG_PM
+ ahub->apbif_reg_cache[reg >> 2] = val;
+#endif
+ __raw_writel(val, ahub->apbif_regs + reg);
+}
+
+static inline u32 tegra30_apbif_read(u32 reg)
+{
+ return __raw_readl(ahub->apbif_regs + reg);
+}
+
+static inline void tegra30_audio_write(u32 reg, u32 val)
+{
+#ifdef CONFIG_PM
+ ahub->ahub_reg_cache[reg >> 2] = val;
+#endif
+ __raw_writel(val, ahub->audio_regs + reg);
+}
+
+static inline u32 tegra30_audio_read(u32 reg)
+{
+ return __raw_readl(ahub->audio_regs + reg);
+}
+
+#ifdef CONFIG_PM
+int tegra30_ahub_apbif_resume()
+{
+ int i = 0;
+ int cache_idx_rsvd;
+
+ tegra30_ahub_enable_clocks();
+
+ /*restore ahub regs*/
+ for (i = 0; i < TEGRA30_AHUB_AUDIO_RX_COUNT; i++)
+ tegra30_audio_write(i<<2, ahub->ahub_reg_cache[i]);
+
+ /*restore apbif regs*/
+ cache_idx_rsvd = TEGRA30_APBIF_CACHE_REG_INDEX_RSVD;
+ for (i = 0; i < TEGRA30_APBIF_CACHE_REG_COUNT; i++) {
+ if (i == cache_idx_rsvd) {
+ cache_idx_rsvd +=
+ TEGRA30_APBIF_CACHE_REG_INDEX_RSVD_STRIDE;
+ continue;
+ }
+
+ tegra30_apbif_write(i<<2, ahub->apbif_reg_cache[i]);
+ }
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+#endif
+
+/*
+ * clk_apbif isn't required for a theoretical I2S<->I2S configuration where
+ * no PCM data is read from or sent to memory. However, that's an unlikely
+ * use-case, and not something the rest of the driver supports right now, so
+ * we'll just treat the two clocks as one for now.
+ *
+ * This function should not be a plain ref-count. Instead, each active stream
+ * contributes some requirement to the minimum clock rate, so starting or
+ * stopping streams should dynamically adjust the clock as required. However,
+ * this is not yet implemented.
+ */
+void tegra30_ahub_enable_clocks(void)
+{
+ clk_enable(ahub->clk_d_audio);
+ clk_enable(ahub->clk_apbif);
+}
+
+void tegra30_ahub_disable_clocks(void)
+{
+ clk_disable(ahub->clk_apbif);
+ clk_disable(ahub->clk_d_audio);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static inline u32 tegra30_ahub_read(u32 space, u32 reg)
+{
+ if (space == 0)
+ return tegra30_apbif_read(reg);
+ else
+ return tegra30_audio_read(reg);
+}
+
+static int tegra30_ahub_show(struct seq_file *s, void *unused)
+{
+#define REG(space, r) { space, r, 0, 1, #r }
+#define ARR(space, r) { space, r, r##_STRIDE, r##_COUNT, #r }
+ static const struct {
+ int space;
+ u32 offset;
+ u32 stride;
+ u32 count;
+ const char *name;
+ } regs[] = {
+ ARR(0, TEGRA30_AHUB_CHANNEL_CTRL),
+ ARR(0, TEGRA30_AHUB_CHANNEL_CLEAR),
+ ARR(0, TEGRA30_AHUB_CHANNEL_STATUS),
+ ARR(0, TEGRA30_AHUB_CIF_TX_CTRL),
+ ARR(0, TEGRA30_AHUB_CIF_RX_CTRL),
+ REG(0, TEGRA30_AHUB_CONFIG_LINK_CTRL),
+ REG(0, TEGRA30_AHUB_MISC_CTRL),
+ REG(0, TEGRA30_AHUB_APBDMA_LIVE_STATUS),
+ REG(0, TEGRA30_AHUB_I2S_LIVE_STATUS),
+ ARR(0, TEGRA30_AHUB_DAM_LIVE_STATUS),
+ REG(0, TEGRA30_AHUB_SPDIF_LIVE_STATUS),
+ REG(0, TEGRA30_AHUB_I2S_INT_MASK),
+ REG(0, TEGRA30_AHUB_DAM_INT_MASK),
+ REG(0, TEGRA30_AHUB_SPDIF_INT_MASK),
+ REG(0, TEGRA30_AHUB_APBIF_INT_MASK),
+ REG(0, TEGRA30_AHUB_I2S_INT_STATUS),
+ REG(0, TEGRA30_AHUB_DAM_INT_STATUS),
+ REG(0, TEGRA30_AHUB_SPDIF_INT_STATUS),
+ REG(0, TEGRA30_AHUB_APBIF_INT_STATUS),
+ REG(0, TEGRA30_AHUB_I2S_INT_SOURCE),
+ REG(0, TEGRA30_AHUB_DAM_INT_SOURCE),
+ REG(0, TEGRA30_AHUB_SPDIF_INT_SOURCE),
+ REG(0, TEGRA30_AHUB_APBIF_INT_SOURCE),
+ REG(0, TEGRA30_AHUB_I2S_INT_SET),
+ REG(0, TEGRA30_AHUB_DAM_INT_SET),
+ REG(0, TEGRA30_AHUB_SPDIF_INT_SET),
+ REG(0, TEGRA30_AHUB_APBIF_INT_SET),
+ ARR(1, TEGRA30_AHUB_AUDIO_RX),
+ };
+#undef ARRG
+#undef REG
+
+ int i, j;
+
+ tegra30_ahub_enable_clocks();
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i].count > 1) {
+ for (j = 0; j < regs[i].count; j++) {
+ u32 reg = regs[i].offset + (j * regs[i].stride);
+ u32 val = tegra30_ahub_read(regs[i].space, reg);
+ seq_printf(s, "%s[%d] = %08x\n", regs[i].name,
+ j, val);
+ }
+ } else {
+ u32 val = tegra30_ahub_read(regs[i].space,
+ regs[i].offset);
+ seq_printf(s, "%s = %08x\n", regs[i].name, val);
+ }
+ }
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+static int tegra30_ahub_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra30_ahub_show, inode->i_private);
+}
+
+static const struct file_operations tegra30_ahub_debug_fops = {
+ .open = tegra30_ahub_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra30_ahub_debug_add(struct tegra30_ahub *ahub)
+{
+ ahub->debug = debugfs_create_file(DRV_NAME, S_IRUGO,
+ snd_soc_debugfs_root, ahub,
+ &tegra30_ahub_debug_fops);
+}
+
+static void tegra30_ahub_debug_remove(struct tegra30_ahub *ahub)
+{
+ if (ahub->debug)
+ debugfs_remove(ahub->debug);
+}
+#else
+static inline void tegra30_ahub_debug_add(struct tegra30_ahub *ahub)
+{
+}
+
+static inline void tegra30_ahub_debug_remove(struct tegra30_ahub *ahub)
+{
+}
+#endif
+
+int tegra30_ahub_allocate_rx_fifo(enum tegra30_ahub_rxcif *rxcif,
+ unsigned long *fiforeg,
+ unsigned long *reqsel)
+{
+ int channel;
+ u32 reg, val;
+
+ channel = find_first_zero_bit(ahub->rx_usage,
+ TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
+ if (channel >= TEGRA30_AHUB_CHANNEL_CTRL_COUNT)
+ return -EBUSY;
+
+ __set_bit(channel, ahub->rx_usage);
+
+ *rxcif = TEGRA30_AHUB_RXCIF_APBIF_RX0 + channel;
+ *fiforeg = ahub->apbif_addr + TEGRA30_AHUB_CHANNEL_RXFIFO +
+ (channel * TEGRA30_AHUB_CHANNEL_RXFIFO_STRIDE);
+ *reqsel = TEGRA_DMA_REQ_SEL_APBIF_CH0 + channel;
+
+ tegra30_ahub_enable_clocks();
+
+ reg = TEGRA30_AHUB_CHANNEL_CTRL +
+ (channel * TEGRA30_AHUB_CHANNEL_CTRL_STRIDE);
+ val = tegra30_apbif_read(reg);
+ val &= ~(TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_MASK |
+ TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_MASK);
+ val |= (7 << TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_SHIFT) |
+ TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_EN |
+ TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_16;
+ tegra30_apbif_write(reg, val);
+
+ reg = TEGRA30_AHUB_CIF_RX_CTRL +
+ (channel * TEGRA30_AHUB_CIF_RX_CTRL_STRIDE);
+ val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
+ (1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+ (1 << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
+ TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16 |
+ TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX;
+ tegra30_apbif_write(reg, val);
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+int tegra30_ahub_enable_rx_fifo(enum tegra30_ahub_rxcif rxcif)
+{
+ int channel = rxcif - TEGRA30_AHUB_RXCIF_APBIF_RX0;
+ int reg, val;
+
+ tegra30_ahub_enable_clocks();
+
+ reg = TEGRA30_AHUB_CHANNEL_CTRL +
+ (channel * TEGRA30_AHUB_CHANNEL_CTRL_STRIDE);
+ val = tegra30_apbif_read(reg);
+ val |= TEGRA30_AHUB_CHANNEL_CTRL_RX_EN;
+ tegra30_apbif_write(reg, val);
+
+ return 0;
+}
+
+int tegra30_ahub_disable_rx_fifo(enum tegra30_ahub_rxcif rxcif)
+{
+ int channel = rxcif - TEGRA30_AHUB_RXCIF_APBIF_RX0;
+ int reg, val;
+
+ reg = TEGRA30_AHUB_CHANNEL_CTRL +
+ (channel * TEGRA30_AHUB_CHANNEL_CTRL_STRIDE);
+ val = tegra30_apbif_read(reg);
+ val &= ~TEGRA30_AHUB_CHANNEL_CTRL_RX_EN;
+ tegra30_apbif_write(reg, val);
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+int tegra30_ahub_free_rx_fifo(enum tegra30_ahub_rxcif rxcif)
+{
+ int channel = rxcif - TEGRA30_AHUB_RXCIF_APBIF_RX0;
+
+ __clear_bit(channel, ahub->rx_usage);
+
+ return 0;
+}
+
+int tegra30_ahub_allocate_tx_fifo(enum tegra30_ahub_txcif *txcif,
+ unsigned long *fiforeg,
+ unsigned long *reqsel)
+{
+ int channel;
+ u32 reg, val;
+
+ channel = find_first_zero_bit(ahub->tx_usage,
+ TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
+ if (channel >= TEGRA30_AHUB_CHANNEL_CTRL_COUNT)
+ return -EBUSY;
+
+ __set_bit(channel, ahub->tx_usage);
+
+ *txcif = TEGRA30_AHUB_TXCIF_APBIF_TX0 + channel;
+ *fiforeg = ahub->apbif_addr + TEGRA30_AHUB_CHANNEL_TXFIFO +
+ (channel * TEGRA30_AHUB_CHANNEL_TXFIFO_STRIDE);
+ *reqsel = TEGRA_DMA_REQ_SEL_APBIF_CH0 + channel;
+
+ tegra30_ahub_enable_clocks();
+
+ reg = TEGRA30_AHUB_CHANNEL_CTRL +
+ (channel * TEGRA30_AHUB_CHANNEL_CTRL_STRIDE);
+ val = tegra30_apbif_read(reg);
+ val &= ~(TEGRA30_AHUB_CHANNEL_CTRL_TX_THRESHOLD_MASK |
+ TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_MASK);
+ val |= (7 << TEGRA30_AHUB_CHANNEL_CTRL_TX_THRESHOLD_SHIFT) |
+ TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_EN |
+ TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_16;
+ tegra30_apbif_write(reg, val);
+
+ reg = TEGRA30_AHUB_CIF_TX_CTRL +
+ (channel * TEGRA30_AHUB_CIF_TX_CTRL_STRIDE);
+ val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
+ (1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+ (1 << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
+ TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16 |
+ TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
+ tegra30_apbif_write(reg, val);
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+int tegra30_ahub_enable_tx_fifo(enum tegra30_ahub_txcif txcif)
+{
+ int channel = txcif - TEGRA30_AHUB_TXCIF_APBIF_TX0;
+ int reg, val;
+
+ tegra30_ahub_enable_clocks();
+
+ reg = TEGRA30_AHUB_CHANNEL_CTRL +
+ (channel * TEGRA30_AHUB_CHANNEL_CTRL_STRIDE);
+ val = tegra30_apbif_read(reg);
+ val |= TEGRA30_AHUB_CHANNEL_CTRL_TX_EN;
+ tegra30_apbif_write(reg, val);
+
+ return 0;
+}
+
+int tegra30_ahub_disable_tx_fifo(enum tegra30_ahub_txcif txcif)
+{
+ int channel = txcif - TEGRA30_AHUB_TXCIF_APBIF_TX0;
+ int reg, val;
+
+ reg = TEGRA30_AHUB_CHANNEL_CTRL +
+ (channel * TEGRA30_AHUB_CHANNEL_CTRL_STRIDE);
+ val = tegra30_apbif_read(reg);
+ val &= ~TEGRA30_AHUB_CHANNEL_CTRL_TX_EN;
+ tegra30_apbif_write(reg, val);
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+int tegra30_ahub_free_tx_fifo(enum tegra30_ahub_txcif txcif)
+{
+ int channel = txcif - TEGRA30_AHUB_TXCIF_APBIF_TX0;
+
+ __clear_bit(channel, ahub->tx_usage);
+
+ return 0;
+}
+
+int tegra30_ahub_set_rx_cif_source(enum tegra30_ahub_rxcif rxcif,
+ enum tegra30_ahub_txcif txcif)
+{
+ int channel = rxcif - TEGRA30_AHUB_RXCIF_APBIF_RX0;
+ int reg;
+
+ tegra30_ahub_enable_clocks();
+
+ reg = TEGRA30_AHUB_AUDIO_RX +
+ (channel * TEGRA30_AHUB_AUDIO_RX_STRIDE);
+ tegra30_audio_write(reg, 1 << txcif);
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+int tegra30_ahub_unset_rx_cif_source(enum tegra30_ahub_rxcif rxcif)
+{
+ int channel = rxcif - TEGRA30_AHUB_RXCIF_APBIF_RX0;
+ int reg;
+
+ tegra30_ahub_enable_clocks();
+
+ reg = TEGRA30_AHUB_AUDIO_RX +
+ (channel * TEGRA30_AHUB_AUDIO_RX_STRIDE);
+ tegra30_audio_write(reg, 0);
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+int tegra30_ahub_set_rx_cif_channels(enum tegra30_ahub_rxcif rxcif,
+ unsigned int audio_ch,
+ unsigned int client_ch)
+{
+ int channel = rxcif - TEGRA30_AHUB_RXCIF_APBIF_RX0;
+ unsigned int reg, val;
+
+ tegra30_ahub_enable_clocks();
+
+ reg = TEGRA30_AHUB_CIF_RX_CTRL +
+ (channel * TEGRA30_AHUB_CIF_RX_CTRL_STRIDE);
+ val = tegra30_apbif_read(reg);
+ val &= ~(TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK |
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK);
+ val |= ((audio_ch - 1) << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+ ((client_ch - 1) << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT);
+ tegra30_apbif_write(reg, val);
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+int tegra30_ahub_set_tx_cif_channels(enum tegra30_ahub_txcif txcif,
+ unsigned int audio_ch,
+ unsigned int client_ch)
+{
+ int channel = txcif - TEGRA30_AHUB_TXCIF_APBIF_TX0;
+ unsigned int reg, val;
+
+ tegra30_ahub_enable_clocks();
+
+ reg = TEGRA30_AHUB_CIF_TX_CTRL +
+ (channel * TEGRA30_AHUB_CIF_TX_CTRL_STRIDE);
+ val = tegra30_apbif_read(reg);
+ val &= ~(TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK |
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK);
+ val |= ((audio_ch - 1) << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+ ((client_ch - 1) << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT);
+
+ tegra30_apbif_write(reg, val);
+
+ tegra30_ahub_disable_clocks();
+
+ return 0;
+}
+
+static int __devinit tegra30_ahub_probe(struct platform_device *pdev)
+{
+ struct resource *res0, *res1, *region;
+ int ret = 0;
+#ifdef CONFIG_PM
+ int i = 0, cache_idx_rsvd;
+#endif
+
+ if (ahub)
+ return -ENODEV;
+
+ ahub = kzalloc(sizeof(struct tegra30_ahub), GFP_KERNEL);
+ if (!ahub) {
+ dev_err(&pdev->dev, "Can't allocate tegra30_ahub\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ahub->dev = &pdev->dev;
+
+ ahub->clk_d_audio = clk_get(&pdev->dev, "d_audio");
+ if (IS_ERR(ahub->clk_d_audio)) {
+ dev_err(&pdev->dev, "Can't retrieve ahub d_audio clock\n");
+ ret = PTR_ERR(ahub->clk_d_audio);
+ goto err_free;
+ }
+
+ ahub->clk_apbif = clk_get(&pdev->dev, "apbif");
+ if (IS_ERR(ahub->clk_apbif)) {
+ dev_err(&pdev->dev, "Can't retrieve ahub apbif clock\n");
+ ret = PTR_ERR(ahub->clk_apbif);
+ goto err_clk_put_d_audio;
+ }
+
+ res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res0) {
+ dev_err(&pdev->dev, "No memory 0 resource\n");
+ ret = -ENODEV;
+ goto err_clk_put_apbif;
+ }
+
+ region = request_mem_region(res0->start, resource_size(res0),
+ pdev->name);
+ if (!region) {
+ dev_err(&pdev->dev, "Memory region 0 already claimed\n");
+ ret = -EBUSY;
+ goto err_clk_put_apbif;
+ }
+
+ ahub->apbif_regs = ioremap(res0->start, resource_size(res0));
+ if (!ahub->apbif_regs) {
+ dev_err(&pdev->dev, "ioremap 0 failed\n");
+ ret = -ENOMEM;
+ goto err_release0;
+ }
+
+ ahub->apbif_addr = res0->start;
+
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res1) {
+ dev_err(&pdev->dev, "No memory 1 resource\n");
+ ret = -ENODEV;
+ goto err_unmap0;
+ }
+
+ region = request_mem_region(res1->start, resource_size(res1),
+ pdev->name);
+ if (!region) {
+ dev_err(&pdev->dev, "Memory region 1 already claimed\n");
+ ret = -EBUSY;
+ goto err_unmap0;
+ }
+
+ ahub->audio_regs = ioremap(res1->start, resource_size(res1));
+ if (!ahub->audio_regs) {
+ dev_err(&pdev->dev, "ioremap 1 failed\n");
+ ret = -ENOMEM;
+ goto err_release1;
+ }
+
+#ifdef CONFIG_PM
+ /* cache the POR values of ahub/apbif regs*/
+ tegra30_ahub_enable_clocks();
+
+ for (i = 0; i < TEGRA30_AHUB_AUDIO_RX_COUNT; i++)
+ ahub->ahub_reg_cache[i] = tegra30_audio_read(i<<2);
+
+ cache_idx_rsvd = TEGRA30_APBIF_CACHE_REG_INDEX_RSVD;
+ for (i = 0; i < TEGRA30_APBIF_CACHE_REG_COUNT; i++) {
+ if (i == cache_idx_rsvd) {
+ cache_idx_rsvd +=
+ TEGRA30_APBIF_CACHE_REG_INDEX_RSVD_STRIDE;
+ continue;
+ }
+
+ ahub->apbif_reg_cache[i] = tegra30_apbif_read(i<<2);
+ }
+
+ tegra30_ahub_disable_clocks();
+#endif
+
+ tegra30_ahub_debug_add(ahub);
+
+ platform_set_drvdata(pdev, ahub);
+
+ return 0;
+
+err_release1:
+ release_mem_region(res1->start, resource_size(res1));
+err_unmap0:
+ iounmap(ahub->apbif_regs);
+err_release0:
+ release_mem_region(res0->start, resource_size(res0));
+err_clk_put_apbif:
+ clk_put(ahub->clk_apbif);
+err_clk_put_d_audio:
+ clk_put(ahub->clk_d_audio);
+err_free:
+ kfree(ahub);
+ ahub = 0;
+exit:
+ return ret;
+}
+
+static int __devexit tegra30_ahub_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ if (!ahub)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, NULL);
+
+ tegra30_ahub_debug_remove(ahub);
+
+ iounmap(ahub->audio_regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(res->start, resource_size(res));
+
+ iounmap(ahub->apbif_regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(ahub->clk_apbif);
+ clk_put(ahub->clk_d_audio);
+
+ kfree(ahub);
+ ahub = 0;
+
+ return 0;
+}
+
+static struct platform_driver tegra30_ahub_driver = {
+ .probe = tegra30_ahub_probe,
+ .remove = __devexit_p(tegra30_ahub_remove),
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init tegra30_ahub_modinit(void)
+{
+ return platform_driver_register(&tegra30_ahub_driver);
+}
+module_init(tegra30_ahub_modinit);
+
+static void __exit tegra30_ahub_modexit(void)
+{
+ platform_driver_unregister(&tegra30_ahub_driver);
+}
+module_exit(tegra30_ahub_modexit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra 30 AHUB driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra30_ahub.h b/sound/soc/tegra/tegra30_ahub.h
new file mode 100644
index 000000000000..7de1b7c86c7f
--- /dev/null
+++ b/sound/soc/tegra/tegra30_ahub.h
@@ -0,0 +1,512 @@
+/*
+ * tegra30_ahub.h - Definitions for Tegra 30 AHUB driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA30_AHUB_H__
+#define __TEGRA30_AHUB_H__
+
+/* Fields in *_CIF_RX/TX_CTRL; used by AHUB FIFOs, and all other audio modules */
+
+#define TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT 28
+#define TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US 0xf
+#define TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK (TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT)
+
+/* Channel count minus 1 */
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT 24
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US 7
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK (TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT)
+
+/* Channel count minus 1 */
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT 16
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US 7
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK (TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT)
+
+#define TEGRA30_AUDIOCIF_BITS_4 0
+#define TEGRA30_AUDIOCIF_BITS_8 1
+#define TEGRA30_AUDIOCIF_BITS_12 2
+#define TEGRA30_AUDIOCIF_BITS_16 3
+#define TEGRA30_AUDIOCIF_BITS_20 4
+#define TEGRA30_AUDIOCIF_BITS_24 5
+#define TEGRA30_AUDIOCIF_BITS_28 6
+#define TEGRA30_AUDIOCIF_BITS_32 7
+
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT 12
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_MASK (7 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_4 (TEGRA30_AUDIOCIF_BITS_4 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_8 (TEGRA30_AUDIOCIF_BITS_8 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_12 (TEGRA30_AUDIOCIF_BITS_12 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 (TEGRA30_AUDIOCIF_BITS_16 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_20 (TEGRA30_AUDIOCIF_BITS_20 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_24 (TEGRA30_AUDIOCIF_BITS_24 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_28 (TEGRA30_AUDIOCIF_BITS_28 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_32 (TEGRA30_AUDIOCIF_BITS_32 << TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT)
+
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT 8
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_MASK (7 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_4 (TEGRA30_AUDIOCIF_BITS_4 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_8 (TEGRA30_AUDIOCIF_BITS_8 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_12 (TEGRA30_AUDIOCIF_BITS_12 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16 (TEGRA30_AUDIOCIF_BITS_16 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_20 (TEGRA30_AUDIOCIF_BITS_20 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_24 (TEGRA30_AUDIOCIF_BITS_24 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_28 (TEGRA30_AUDIOCIF_BITS_28 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_32 (TEGRA30_AUDIOCIF_BITS_32 << TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT)
+
+#define TEGRA30_AUDIOCIF_EXPAND_ZERO 0
+#define TEGRA30_AUDIOCIF_EXPAND_ONE 1
+#define TEGRA30_AUDIOCIF_EXPAND_LFSR 2
+
+#define TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT 6
+#define TEGRA30_AUDIOCIF_CTRL_EXPAND_MASK (3 << TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_EXPAND_ZERO (TEGRA30_AUDIOCIF_EXPAND_ZERO << TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_EXPAND_ONE (TEGRA30_AUDIOCIF_EXPAND_ONE << TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_EXPAND_LFSR (TEGRA30_AUDIOCIF_EXPAND_LFSR << TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT)
+
+#define TEGRA30_AUDIOCIF_STEREO_CONV_CH0 0
+#define TEGRA30_AUDIOCIF_STEREO_CONV_CH1 1
+#define TEGRA30_AUDIOCIF_STEREO_CONV_AVG 2
+
+#define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT 4
+#define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_MASK (3 << TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_CH0 (TEGRA30_AUDIOCIF_STEREO_CONV_CH0 << TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_CH1 (TEGRA30_AUDIOCIF_STEREO_CONV_CH1 << TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_AVG (TEGRA30_AUDIOCIF_STEREO_CONV_AVG << TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT)
+
+#define TEGRA30_AUDIOCIF_CTRL_REPLICATE 3
+
+#define TEGRA30_AUDIOCIF_DIRECTION_TX 0
+#define TEGRA30_AUDIOCIF_DIRECTION_RX 1
+
+#define TEGRA30_AUDIOCIF_CTRL_DIRECTION_SHIFT 2
+#define TEGRA30_AUDIOCIF_CTRL_DIRECTION_MASK (1 << TEGRA30_AUDIOCIF_CTRL_DIRECTION_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX (TEGRA30_AUDIOCIF_DIRECTION_TX << TEGRA30_AUDIOCIF_CTRL_DIRECTION_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX (TEGRA30_AUDIOCIF_DIRECTION_RX << TEGRA30_AUDIOCIF_CTRL_DIRECTION_SHIFT)
+
+#define TEGRA30_AUDIOCIF_TRUNCATE_ROUND 0
+#define TEGRA30_AUDIOCIF_TRUNCATE_CHOP 1
+
+#define TEGRA30_AUDIOCIF_CTRL_TRUNCATE_SHIFT 1
+#define TEGRA30_AUDIOCIF_CTRL_TRUNCATE_MASK (1 << TEGRA30_AUDIOCIF_CTRL_TRUNCATE_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_TRUNCATE_ROUND (TEGRA30_AUDIOCIF_TRUNCATE_ROUND << TEGRA30_AUDIOCIF_CTRL_TRUNCATE_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_TRUNCATE_CHOP (TEGRA30_AUDIOCIF_TRUNCATE_CHOP << TEGRA30_AUDIOCIF_CTRL_TRUNCATE_SHIFT)
+
+#define TEGRA30_AUDIOCIF_MONO_CONV_ZERO 0
+#define TEGRA30_AUDIOCIF_MONO_CONV_COPY 1
+
+#define TEGRA30_AUDIOCIF_CTRL_MONO_CONV_SHIFT 0
+#define TEGRA30_AUDIOCIF_CTRL_MONO_CONV_MASK (1 << TEGRA30_AUDIOCIF_CTRL_MONO_CONV_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_MONO_CONV_ZERO (TEGRA30_AUDIOCIF_MONO_CONV_ZERO << TEGRA30_AUDIOCIF_CTRL_MONO_CONV_SHIFT)
+#define TEGRA30_AUDIOCIF_CTRL_MONO_CONV_COPY (TEGRA30_AUDIOCIF_MONO_CONV_COPY << TEGRA30_AUDIOCIF_CTRL_MONO_CONV_SHIFT)
+
+/* Registers within TEGRA30_AUDIO_CLUSTER_BASE */
+
+/* TEGRA30_AHUB_CHANNEL_CTRL */
+
+#define TEGRA30_AHUB_CHANNEL_CTRL 0x0
+#define TEGRA30_AHUB_CHANNEL_CTRL_STRIDE 0x20
+#define TEGRA30_AHUB_CHANNEL_CTRL_COUNT 4
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_EN (1 << 31)
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_EN (1 << 30)
+#define TEGRA30_AHUB_CHANNEL_CTRL_LOOPBACK (1 << 29)
+
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_THRESHOLD_SHIFT 16
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_THRESHOLD_MASK_US 0xff
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_THRESHOLD_MASK (TEGRA30_AHUB_CHANNEL_CTRL_TX_THRESHOLD_MASK_US << TEGRA30_AHUB_CHANNEL_CTRL_TX_THRESHOLD_SHIFT)
+
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_SHIFT 8
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_MASK_US 0xff
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_MASK (TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_MASK_US << TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_SHIFT)
+
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_EN (1 << 6)
+
+#define TEGRA30_PACK_8_4 2
+#define TEGRA30_PACK_16 3
+
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_SHIFT 4
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_MASK_US 3
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_MASK (TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_MASK_US << TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_SHIFT)
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_8_4 (TEGRA30_PACK_8_4 << TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_SHIFT)
+#define TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_16 (TEGRA30_PACK_16 << TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_SHIFT)
+
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_EN (1 << 2)
+
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_SHIFT 0
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_MASK_US 3
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_MASK (TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_MASK_US << TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_SHIFT)
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_8_4 (TEGRA30_PACK_8_4 << TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_SHIFT)
+#define TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_16 (TEGRA30_PACK_16 << TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_SHIFT)
+
+/* TEGRA30_AHUB_CHANNEL_CLEAR */
+
+#define TEGRA30_AHUB_CHANNEL_CLEAR 0x4
+#define TEGRA30_AHUB_CHANNEL_CLEAR_STRIDE 0x20
+#define TEGRA30_AHUB_CHANNEL_CLEAR_COUNT 4
+#define TEGRA30_AHUB_CHANNEL_CLEAR_TX_SOFT_RESET (1 << 31)
+#define TEGRA30_AHUB_CHANNEL_CLEAR_RX_SOFT_RESET (1 << 30)
+
+/* TEGRA30_AHUB_CHANNEL_STATUS */
+
+#define TEGRA30_AHUB_CHANNEL_STATUS 0x8
+#define TEGRA30_AHUB_CHANNEL_STATUS_STRIDE 0x20
+#define TEGRA30_AHUB_CHANNEL_STATUS_COUNT 4
+#define TEGRA30_AHUB_CHANNEL_STATUS_TX_FREE_SHIFT 24
+#define TEGRA30_AHUB_CHANNEL_STATUS_TX_FREE_MASK_US 0xff
+#define TEGRA30_AHUB_CHANNEL_STATUS_TX_FREE_MASK (TEGRA30_AHUB_CHANNEL_STATUS_TX_FREE_MASK_US << TEGRA30_AHUB_CHANNEL_STATUS_TX_FREE_SHIFT)
+#define TEGRA30_AHUB_CHANNEL_STATUS_RX_FREE_SHIFT 16
+#define TEGRA30_AHUB_CHANNEL_STATUS_RX_FREE_MASK_US 0xff
+#define TEGRA30_AHUB_CHANNEL_STATUS_RX_FREE_MASK (TEGRA30_AHUB_CHANNEL_STATUS_RX_FREE_MASK_US << TEGRA30_AHUB_CHANNEL_STATUS_RX_FREE_SHIFT)
+#define TEGRA30_AHUB_CHANNEL_STATUS_TX_TRIG (1 << 1)
+#define TEGRA30_AHUB_CHANNEL_STATUS_RX_TRIG (1 << 0)
+
+/* TEGRA30_AHUB_CHANNEL_TXFIFO */
+
+#define TEGRA30_AHUB_CHANNEL_TXFIFO 0xc
+#define TEGRA30_AHUB_CHANNEL_TXFIFO_STRIDE 0x20
+#define TEGRA30_AHUB_CHANNEL_TXFIFO_COUNT 4
+
+/* TEGRA30_AHUB_CHANNEL_RXFIFO */
+
+#define TEGRA30_AHUB_CHANNEL_RXFIFO 0x10
+#define TEGRA30_AHUB_CHANNEL_RXFIFO_STRIDE 0x20
+#define TEGRA30_AHUB_CHANNEL_RXFIFO_COUNT 4
+
+/* TEGRA30_AHUB_CIF_TX_CTRL */
+
+#define TEGRA30_AHUB_CIF_TX_CTRL 0x14
+#define TEGRA30_AHUB_CIF_TX_CTRL_STRIDE 0x20
+#define TEGRA30_AHUB_CIF_TX_CTRL_COUNT 4
+/* Uses field from TEGRA30_AUDIOCIF_CTRL_* */
+
+/* TEGRA30_AHUB_CIF_RX_CTRL */
+
+#define TEGRA30_AHUB_CIF_RX_CTRL 0x18
+#define TEGRA30_AHUB_CIF_RX_CTRL_STRIDE 0x20
+#define TEGRA30_AHUB_CIF_RX_CTRL_COUNT 4
+/* Uses field from TEGRA30_AUDIOCIF_CTRL_* */
+
+/* TEGRA30_AHUB_CONFIG_LINK_CTRL */
+
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL 0x80
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_MASTER_FIFO_FULL_CNT_SHIFT 28
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_MASTER_FIFO_FULL_CNT_MASK_US 0xf
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_MASTER_FIFO_FULL_CNT_MASK (TEGRA30_AHUB_CONFIG_LINK_CTRL_MASTER_FIFO_FULL_CNT_MASK_US << TEGRA30_AHUB_CONFIG_LINK_CTRL_MASTER_FIFO_FULL_CNT_SHIFT)
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_TIMEOUT_CNT_SHIFT 16
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_TIMEOUT_CNT_MASK_US 0xfff
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_TIMEOUT_CNT_MASK (TEGRA30_AHUB_CONFIG_LINK_CTRL_TIMEOUT_CNT_MASK_US << TEGRA30_AHUB_CONFIG_LINK_CTRL_TIMEOUT_CNT_SHIFT)
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_IDLE_CNT_SHIFT 5
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_IDLE_CNT_MASK_US 0xfff
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_IDLE_CNT_MASK (TEGRA30_AHUB_CONFIG_LINK_CTRL_IDLE_CNT_MASK_US << TEGRA30_AHUB_CONFIG_LINK_CTRL_IDLE_CNT_SHIFT)
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_CG_EN (1 << 2)
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_CLEAR_TIMEOUT_CNTR (1 << 1)
+#define TEGRA30_AHUB_CONFIG_LINK_CTRL_SOFT_RESET (1 << 0)
+
+/* TEGRA30_AHUB_MISC_CTRL */
+
+#define TEGRA30_AHUB_MISC_CTRL 0x84
+#define TEGRA30_AHUB_MISC_CTRL_AUDIO_ACTIVE (1 << 31)
+#define TEGRA30_AHUB_MISC_CTRL_AUDIO_CG_EN (1 << 9)
+#define TEGRA30_AHUB_MISC_CTRL_AUDIO_OBS_SEL_SHIFT 0
+#define TEGRA30_AHUB_MISC_CTRL_AUDIO_OBS_SEL_MASK (0x1f << TEGRA30_AHUB_MISC_CTRL_AUDIO_OBS_SEL_SHIFT)
+
+/* TEGRA30_AHUB_APBDMA_LIVE_STATUS */
+
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS 0x88
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH3_RX_CIF_FIFO_FULL (1 << 31)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH3_TX_CIF_FIFO_FULL (1 << 30)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH2_RX_CIF_FIFO_FULL (1 << 29)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH2_TX_CIF_FIFO_FULL (1 << 28)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH1_RX_CIF_FIFO_FULL (1 << 27)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH1_TX_CIF_FIFO_FULL (1 << 26)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH0_RX_CIF_FIFO_FULL (1 << 25)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH0_TX_CIF_FIFO_FULL (1 << 24)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH3_RX_CIF_FIFO_EMPTY (1 << 23)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH3_TX_CIF_FIFO_EMPTY (1 << 22)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH2_RX_CIF_FIFO_EMPTY (1 << 21)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH2_TX_CIF_FIFO_EMPTY (1 << 20)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH1_RX_CIF_FIFO_EMPTY (1 << 19)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH1_TX_CIF_FIFO_EMPTY (1 << 18)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH0_RX_CIF_FIFO_EMPTY (1 << 17)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH0_TX_CIF_FIFO_EMPTY (1 << 16)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH3_RX_DMA_FIFO_FULL (1 << 15)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH3_TX_DMA_FIFO_FULL (1 << 14)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH2_RX_DMA_FIFO_FULL (1 << 13)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH2_TX_DMA_FIFO_FULL (1 << 12)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH1_RX_DMA_FIFO_FULL (1 << 11)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH1_TX_DMA_FIFO_FULL (1 << 10)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH0_RX_DMA_FIFO_FULL (1 << 9)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH0_TX_DMA_FIFO_FULL (1 << 8)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH3_RX_DMA_FIFO_EMPTY (1 << 7)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH3_TX_DMA_FIFO_EMPTY (1 << 6)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH2_RX_DMA_FIFO_EMPTY (1 << 5)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH2_TX_DMA_FIFO_EMPTY (1 << 4)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH1_RX_DMA_FIFO_EMPTY (1 << 3)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH1_TX_DMA_FIFO_EMPTY (1 << 2)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH0_RX_DMA_FIFO_EMPTY (1 << 1)
+#define TEGRA30_AHUB_APBDMA_LIVE_STATUS_CH0_TX_DMA_FIFO_EMPTY (1 << 0)
+
+/* TEGRA30_AHUB_I2S_LIVE_STATUS */
+
+#define TEGRA30_AHUB_I2S_LIVE_STATUS 0x8c
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S4_RX_FIFO_FULL (1 << 29)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S4_TX_FIFO_FULL (1 << 28)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S3_RX_FIFO_FULL (1 << 27)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S3_TX_FIFO_FULL (1 << 26)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S2_RX_FIFO_FULL (1 << 25)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S2_TX_FIFO_FULL (1 << 24)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S1_RX_FIFO_FULL (1 << 23)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S1_TX_FIFO_FULL (1 << 22)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S0_RX_FIFO_FULL (1 << 21)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S0_TX_FIFO_FULL (1 << 20)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S4_RX_FIFO_ENABLED (1 << 19)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S4_TX_FIFO_ENABLED (1 << 18)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S3_RX_FIFO_ENABLED (1 << 17)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S3_TX_FIFO_ENABLED (1 << 16)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S2_RX_FIFO_ENABLED (1 << 15)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S2_TX_FIFO_ENABLED (1 << 14)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S1_RX_FIFO_ENABLED (1 << 13)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S1_TX_FIFO_ENABLED (1 << 12)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S0_RX_FIFO_ENABLED (1 << 11)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S0_TX_FIFO_ENABLED (1 << 10)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S4_RX_FIFO_EMPTY (1 << 9)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S4_TX_FIFO_EMPTY (1 << 8)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S3_RX_FIFO_EMPTY (1 << 7)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S3_TX_FIFO_EMPTY (1 << 6)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S2_RX_FIFO_EMPTY (1 << 5)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S2_TX_FIFO_EMPTY (1 << 4)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S1_RX_FIFO_EMPTY (1 << 3)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S1_TX_FIFO_EMPTY (1 << 2)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S0_RX_FIFO_EMPTY (1 << 1)
+#define TEGRA30_AHUB_I2S_LIVE_STATUS_I2S0_TX_FIFO_EMPTY (1 << 0)
+
+/* TEGRA30_AHUB_DAM0_LIVE_STATUS */
+
+#define TEGRA30_AHUB_DAM_LIVE_STATUS 0x90
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_STRIDE 0x8
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_COUNT 3
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_TX_ENABLED (1 << 26)
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_RX1_ENABLED (1 << 25)
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_RX0_ENABLED (1 << 24)
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_TXFIFO_FULL (1 << 15)
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_RX1FIFO_FULL (1 << 9)
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_RX0FIFO_FULL (1 << 8)
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_TXFIFO_EMPTY (1 << 7)
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_RX1FIFO_EMPTY (1 << 1)
+#define TEGRA30_AHUB_DAM_LIVE_STATUS_RX0FIFO_EMPTY (1 << 0)
+
+/* TEGRA30_AHUB_SPDIF_LIVE_STATUS */
+
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS 0xa8
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_USER_TX_ENABLED (1 << 11)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_USER_RX_ENABLED (1 << 10)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_DATA_TX_ENABLED (1 << 9)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_DATA_RX_ENABLED (1 << 8)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_USER_TXFIFO_FULL (1 << 7)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_USER_RXFIFO_FULL (1 << 6)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_DATA_TXFIFO_FULL (1 << 5)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_DATA_RXFIFO_FULL (1 << 4)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_USER_TXFIFO_EMPTY (1 << 3)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_USER_RXFIFO_EMPTY (1 << 2)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_DATA_TXFIFO_EMPTY (1 << 1)
+#define TEGRA30_AHUB_SPDIF_LIVE_STATUS_DATA_RXFIFO_EMPTY (1 << 0)
+
+/* TEGRA30_AHUB_I2S_INT_MASK */
+
+#define TEGRA30_AHUB_I2S_INT_MASK 0xb0
+
+/* TEGRA30_AHUB_DAM_INT_MASK */
+
+#define TEGRA30_AHUB_DAM_INT_MASK 0xb4
+
+/* TEGRA30_AHUB_SPDIF_INT_MASK */
+
+#define TEGRA30_AHUB_SPDIF_INT_MASK 0xbc
+
+/* TEGRA30_AHUB_APBIF_INT_MASK */
+
+#define TEGRA30_AHUB_APBIF_INT_MASK 0xc0
+
+/* TEGRA30_AHUB_I2S_INT_STATUS */
+
+#define TEGRA30_AHUB_I2S_INT_STATUS 0xc8
+
+/* TEGRA30_AHUB_DAM_INT_STATUS */
+
+#define TEGRA30_AHUB_DAM_INT_STATUS 0xcc
+
+/* TEGRA30_AHUB_SPDIF_INT_STATUS */
+
+#define TEGRA30_AHUB_SPDIF_INT_STATUS 0xd4
+
+/* TEGRA30_AHUB_APBIF_INT_STATUS */
+
+#define TEGRA30_AHUB_APBIF_INT_STATUS 0xd8
+
+/* TEGRA30_AHUB_I2S_INT_SOURCE */
+
+#define TEGRA30_AHUB_I2S_INT_SOURCE 0xe0
+
+/* TEGRA30_AHUB_DAM_INT_SOURCE */
+
+#define TEGRA30_AHUB_DAM_INT_SOURCE 0xe4
+
+/* TEGRA30_AHUB_SPDIF_INT_SOURCE */
+
+#define TEGRA30_AHUB_SPDIF_INT_SOURCE 0xec
+
+/* TEGRA30_AHUB_APBIF_INT_SOURCE */
+
+#define TEGRA30_AHUB_APBIF_INT_SOURCE 0xf0
+
+/* TEGRA30_AHUB_I2S_INT_SET */
+
+#define TEGRA30_AHUB_I2S_INT_SET 0xf8
+
+/* TEGRA30_AHUB_DAM_INT_SET */
+
+#define TEGRA30_AHUB_DAM_INT_SET 0xfc
+
+/* TEGRA30_AHUB_SPDIF_INT_SET */
+
+#define TEGRA30_AHUB_SPDIF_INT_SET 0x100
+
+/* TEGRA30_AHUB_APBIF_INT_SET */
+
+#define TEGRA30_AHUB_APBIF_INT_SET 0x104
+
+/* Registers within TEGRA30_AHUB_BASE */
+
+#define TEGRA30_AHUB_AUDIO_RX 0x0
+#define TEGRA30_AHUB_AUDIO_RX_STRIDE 0x4
+#define TEGRA30_AHUB_AUDIO_RX_COUNT 17
+/* This register repeats once for each entry in enum tegra30_ahub_rxcif */
+/* The fields in this register are 1 bit per entry in tegra30_ahub_txcif */
+
+/* apbif register count */
+#define TEGRA30_APBIF_CACHE_REG_COUNT_PER_CHANNEL ((TEGRA30_AHUB_CIF_RX_CTRL>>2) + 1)
+#define TEGRA30_APBIF_CACHE_REG_COUNT ((TEGRA30_APBIF_CACHE_REG_COUNT_PER_CHANNEL + 1) * TEGRA30_AHUB_CHANNEL_CTRL_COUNT)
+
+/* cache index to be skipped */
+#define TEGRA30_APBIF_CACHE_REG_INDEX_RSVD TEGRA30_APBIF_CACHE_REG_COUNT_PER_CHANNEL
+#define TEGRA30_APBIF_CACHE_REG_INDEX_RSVD_STRIDE (TEGRA30_APBIF_CACHE_REG_COUNT_PER_CHANNEL + 1)
+
+/*
+ * Terminology:
+ * AHUB: Audio Hub; a cross-bar switch between the audio devices: DMA FIFOs,
+ * I2S controllers, SPDIF controllers, and DAMs.
+ * XBAR: The core cross-bar component of the AHUB.
+ * CIF: Client Interface; the HW module connecting an audio device to the
+ * XBAR.
+ * DAM: Digital Audio Mixer: A HW module that mixes multiple audio streams,
+ * possibly including sample-rate conversion.
+ *
+ * Each TX CIF transmits data into the XBAR. Each RX CIF can receive audio
+ * transmitted by a particular TX CIF.
+ *
+ * This driver is currently very simplistic; many HW features are not
+ * exposed; DAMs are not supported, only 16-bit stereo audio is supported,
+ * etc.
+ */
+
+enum tegra30_ahub_txcif {
+ TEGRA30_AHUB_TXCIF_APBIF_TX0,
+ TEGRA30_AHUB_TXCIF_APBIF_TX1,
+ TEGRA30_AHUB_TXCIF_APBIF_TX2,
+ TEGRA30_AHUB_TXCIF_APBIF_TX3,
+ TEGRA30_AHUB_TXCIF_I2S0_TX0,
+ TEGRA30_AHUB_TXCIF_I2S1_TX0,
+ TEGRA30_AHUB_TXCIF_I2S2_TX0,
+ TEGRA30_AHUB_TXCIF_I2S3_TX0,
+ TEGRA30_AHUB_TXCIF_I2S4_TX0,
+ TEGRA30_AHUB_TXCIF_DAM0_TX0,
+ TEGRA30_AHUB_TXCIF_DAM1_TX0,
+ TEGRA30_AHUB_TXCIF_DAM2_TX0,
+ TEGRA30_AHUB_TXCIF_SPDIF_TX0,
+ TEGRA30_AHUB_TXCIF_SPDIF_TX1,
+};
+
+enum tegra30_ahub_rxcif {
+ TEGRA30_AHUB_RXCIF_APBIF_RX0,
+ TEGRA30_AHUB_RXCIF_APBIF_RX1,
+ TEGRA30_AHUB_RXcIF_APBIF_RX2,
+ TEGRA30_AHUB_RXCIF_APBIF_RX3,
+ TEGRA30_AHUB_RXCIF_I2S0_RX0,
+ TEGRA30_AHUB_RXCIF_I2S1_RX0,
+ TEGRA30_AHUB_RXCIF_I2S2_RX0,
+ TEGRA30_AHUB_RXCIF_I2S3_RX0,
+ TEGRA30_AHUB_RXCIF_I2S4_RX0,
+ TEGRA30_AHUB_RXCIF_DAM0_RX0,
+ TEGRA30_AHUB_RXCIF_DAM0_RX1,
+ TEGRA30_AHUB_RXCIF_DAM1_RX0,
+ TEGRA30_AHUB_RXCIF_DAM2_RX1,
+ TEGRA30_AHUB_RXCIF_DAM3_RX0,
+ TEGRA30_AHUB_RXCIF_DAM3_RX1,
+ TEGRA30_AHUB_RXCIF_SPDIF_RX0,
+ TEGRA30_AHUB_RXCIF_SPDIF_RX1,
+};
+
+extern void tegra30_ahub_enable_clocks(void);
+extern void tegra30_ahub_disable_clocks(void);
+
+extern int tegra30_ahub_allocate_rx_fifo(enum tegra30_ahub_rxcif *rxcif,
+ unsigned long *fiforeg,
+ unsigned long *reqsel);
+extern int tegra30_ahub_set_rx_cif_channels(enum tegra30_ahub_rxcif rxcif,
+ unsigned int audio_ch,
+ unsigned int client_ch);
+extern int tegra30_ahub_enable_rx_fifo(enum tegra30_ahub_rxcif rxcif);
+extern int tegra30_ahub_disable_rx_fifo(enum tegra30_ahub_rxcif rxcif);
+extern int tegra30_ahub_free_rx_fifo(enum tegra30_ahub_rxcif rxcif);
+
+extern int tegra30_ahub_allocate_tx_fifo(enum tegra30_ahub_txcif *txcif,
+ unsigned long *fiforeg,
+ unsigned long *reqsel);
+extern int tegra30_ahub_set_tx_cif_channels(enum tegra30_ahub_txcif txcif,
+ unsigned int audio_ch,
+ unsigned int client_ch);
+extern int tegra30_ahub_enable_tx_fifo(enum tegra30_ahub_txcif txcif);
+extern int tegra30_ahub_disable_tx_fifo(enum tegra30_ahub_txcif txcif);
+extern int tegra30_ahub_free_tx_fifo(enum tegra30_ahub_txcif txcif);
+
+extern int tegra30_ahub_set_rx_cif_source(enum tegra30_ahub_rxcif rxcif,
+ enum tegra30_ahub_txcif txcif);
+extern int tegra30_ahub_unset_rx_cif_source(enum tegra30_ahub_rxcif rxcif);
+
+#ifdef CONFIG_PM
+extern int tegra30_ahub_apbif_resume(void);
+#endif
+
+struct tegra30_ahub {
+ struct device *dev;
+ struct clk *clk_d_audio;
+ struct clk *clk_apbif;
+ resource_size_t apbif_addr;
+ void __iomem *apbif_regs;
+ void __iomem *audio_regs;
+ DECLARE_BITMAP(rx_usage, TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
+ DECLARE_BITMAP(tx_usage, TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
+ struct dentry *debug;
+#ifdef CONFIG_PM
+ u32 ahub_reg_cache[TEGRA30_AHUB_AUDIO_RX_COUNT];
+ u32 apbif_reg_cache[TEGRA30_APBIF_CACHE_REG_COUNT];
+#endif
+};
+
+#endif
diff --git a/sound/soc/tegra/tegra30_dam.c b/sound/soc/tegra/tegra30_dam.c
new file mode 100644
index 000000000000..4ac81266e7cf
--- /dev/null
+++ b/sound/soc/tegra/tegra30_dam.c
@@ -0,0 +1,644 @@
+/*
+ * tegra30_dam.c - Tegra 30 DAM driver
+ *
+ * Author: Nikesh Oswal <noswal@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <sound/soc.h>
+#include "tegra30_dam.h"
+#include "tegra30_ahub.h"
+
+#define DRV_NAME "tegra30-dam"
+
+static struct tegra30_dam_context *dams_cont_info[TEGRA30_NR_DAM_IFC];
+
+enum {
+ dam_ch_in0 = 0x0,
+ dam_ch_in1,
+ dam_ch_out,
+ dam_ch_maxnum
+} tegra30_dam_chtype;
+
+struct tegra30_dam_src_step_table step_table[] = {
+ { 8000, 44100, 80 },
+ { 8000, 48000, 1 },
+ { 16000, 44100, 160 },
+ { 16000, 48000, 1 },
+ { 44100, 8000, 441 },
+ { 48000, 8000, 0 },
+ { 44100, 16000, 441 },
+ { 48000, 16000, 0 },
+};
+
+static void tegra30_dam_set_output_samplerate(struct tegra30_dam_context *dam,
+ int fsout);
+static void tegra30_dam_set_input_samplerate(struct tegra30_dam_context *dam,
+ int fsin);
+static int tegra30_dam_set_step_reset(struct tegra30_dam_context *dam,
+ int insample, int outsample);
+static void tegra30_dam_ch0_set_step(struct tegra30_dam_context *dam, int step);
+
+static inline void tegra30_dam_writel(struct tegra30_dam_context *dam,
+ u32 val, u32 reg)
+{
+#ifdef CONFIG_PM
+ dam->reg_cache[reg >> 2] = val;
+#endif
+ __raw_writel(val, dam->damregs + reg);
+}
+
+static inline u32 tegra30_dam_readl(struct tegra30_dam_context *dam, u32 reg)
+{
+ u32 val = __raw_readl(dam->damregs + reg);
+
+ return val;
+}
+
+#ifdef CONFIG_PM
+int tegra30_dam_resume(int ifc)
+{
+ int i = 0;
+ struct tegra30_dam_context *dam;
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return -EINVAL;
+
+ dam = dams_cont_info[ifc];
+
+ if (dam->in_use) {
+ tegra30_dam_enable_clock(ifc);
+
+ for (i = 0; i <= TEGRA30_DAM_CTRL_REGINDEX; i++) {
+ if ((i == TEGRA30_DAM_CTRL_RSVD_6) ||
+ (i == TEGRA30_DAM_CTRL_RSVD_10))
+ continue;
+
+ tegra30_dam_writel(dam, dam->reg_cache[i],
+ (i << 2));
+ }
+
+ tegra30_dam_disable_clock(ifc);
+ }
+
+ return 0;
+}
+#endif
+
+void tegra30_dam_disable_clock(int ifc)
+{
+ struct tegra30_dam_context *dam;
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return;
+
+ dam = dams_cont_info[ifc];
+ clk_disable(dam->dam_clk);
+ tegra30_ahub_disable_clocks();
+}
+
+int tegra30_dam_enable_clock(int ifc)
+{
+ struct tegra30_dam_context *dam;
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return -EINVAL;
+
+ dam = dams_cont_info[ifc];
+ tegra30_ahub_enable_clocks();
+ clk_enable(dam->dam_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra30_dam_show(struct seq_file *s, void *unused)
+{
+#define REG(r) { r, #r }
+ static const struct {
+ int offset;
+ const char *name;
+ } regs[] = {
+ REG(TEGRA30_DAM_CTRL),
+ REG(TEGRA30_DAM_CLIP),
+ REG(TEGRA30_DAM_CLIP_THRESHOLD),
+ REG(TEGRA30_DAM_AUDIOCIF_OUT_CTRL),
+ REG(TEGRA30_DAM_CH0_CTRL),
+ REG(TEGRA30_DAM_CH0_CONV),
+ REG(TEGRA30_DAM_AUDIOCIF_CH0_CTRL),
+ REG(TEGRA30_DAM_CH1_CTRL),
+ REG(TEGRA30_DAM_CH1_CONV),
+ REG(TEGRA30_DAM_AUDIOCIF_CH1_CTRL),
+ };
+#undef REG
+
+ struct tegra30_dam_context *dam = s->private;
+ int i;
+
+ clk_enable(dam->dam_clk);
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ u32 val = tegra30_dam_readl(dam, regs[i].offset);
+ seq_printf(s, "%s = %08x\n", regs[i].name, val);
+ }
+
+ clk_disable(dam->dam_clk);
+
+ return 0;
+}
+
+static int tegra30_dam_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra30_dam_show, inode->i_private);
+}
+
+static const struct file_operations tegra30_dam_debug_fops = {
+ .open = tegra30_dam_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra30_dam_debug_add(struct tegra30_dam_context *dam, int id)
+{
+ char name[] = DRV_NAME ".0";
+
+ snprintf(name, sizeof(name), DRV_NAME".%1d", id);
+ dam->debug = debugfs_create_file(name, S_IRUGO, snd_soc_debugfs_root,
+ dam, &tegra30_dam_debug_fops);
+}
+
+static void tegra30_dam_debug_remove(struct tegra30_dam_context *dam)
+{
+ if (dam->debug)
+ debugfs_remove(dam->debug);
+}
+#else
+static inline void tegra30_dam_debug_add(struct tegra30_dam_context *dam,
+ int id)
+{
+}
+
+static inline void tegra30_dam_debug_remove(struct tegra30_dam_context *dam)
+{
+}
+#endif
+
+int tegra30_dam_allocate_controller()
+{
+ int i = 0;
+ struct tegra30_dam_context *dam = NULL;
+
+ for (i = 0; i < TEGRA30_NR_DAM_IFC; i++) {
+
+ dam = dams_cont_info[i];
+
+ if (!dam->in_use) {
+ dam->in_use = true;
+ return i;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int tegra30_dam_allocate_channel(int ifc, int chid)
+{
+ struct tegra30_dam_context *dam = NULL;
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return -EINVAL;
+
+ dam = dams_cont_info[ifc];
+
+ if (!dam->ch_alloc[chid]) {
+ dam->ch_alloc[chid] = true;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int tegra30_dam_free_channel(int ifc, int chid)
+{
+ struct tegra30_dam_context *dam = NULL;
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return -EINVAL;
+
+ dam = dams_cont_info[ifc];
+
+ if (dam->ch_alloc[chid]) {
+ dam->ch_alloc[chid] = false;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int tegra30_dam_free_controller(int ifc)
+{
+ struct tegra30_dam_context *dam = NULL;
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return -EINVAL;
+
+ dam = dams_cont_info[ifc];
+
+ if (!dam->ch_alloc[dam_ch_in0] &&
+ !dam->ch_alloc[dam_ch_in1]) {
+ dam->in_use = false;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void tegra30_dam_set_samplerate(int ifc, int chid, int samplerate)
+{
+ struct tegra30_dam_context *dam = dams_cont_info[ifc];
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return;
+
+ switch (chid) {
+ case dam_ch_in0:
+ tegra30_dam_set_input_samplerate(dam, samplerate);
+ dam->ch_insamplerate[dam_ch_in0] = samplerate;
+ tegra30_dam_set_step_reset(dam, samplerate, dam->outsamplerate);
+ break;
+ case dam_ch_in1:
+ if (samplerate != dam->outsamplerate)
+ return;
+ dam->ch_insamplerate[dam_ch_in1] = samplerate;
+ break;
+ case dam_ch_out:
+ tegra30_dam_set_output_samplerate(dam, samplerate);
+ dam->outsamplerate = samplerate;
+ break;
+ default:
+ break;
+ }
+}
+
+void tegra30_dam_set_output_samplerate(struct tegra30_dam_context *dam,
+ int fsout)
+{
+ u32 val;
+
+ val = tegra30_dam_readl(dam, TEGRA30_DAM_CTRL);
+ val &= ~TEGRA30_DAM_CTRL_FSOUT_MASK;
+
+ switch (fsout) {
+ case TEGRA30_AUDIO_SAMPLERATE_8000:
+ val |= TEGRA30_DAM_CTRL_FSOUT_FS8;
+ break;
+ case TEGRA30_AUDIO_SAMPLERATE_16000:
+ val |= TEGRA30_DAM_CTRL_FSOUT_FS16;
+ break;
+ case TEGRA30_AUDIO_SAMPLERATE_44100:
+ val |= TEGRA30_DAM_CTRL_FSOUT_FS44;
+ break;
+ case TEGRA30_AUDIO_SAMPLERATE_48000:
+ val |= TEGRA30_DAM_CTRL_FSOUT_FS48;
+ break;
+ default:
+ break;
+ }
+
+ tegra30_dam_writel(dam, val, TEGRA30_DAM_CTRL);
+}
+
+void tegra30_dam_set_input_samplerate(struct tegra30_dam_context *dam, int fsin)
+{
+ u32 val;
+
+ val = tegra30_dam_readl(dam, TEGRA30_DAM_CH0_CTRL);
+ val &= ~TEGRA30_DAM_CH0_CTRL_FSIN_MASK;
+
+ switch (fsin) {
+ case TEGRA30_AUDIO_SAMPLERATE_8000:
+ val |= TEGRA30_DAM_CH0_CTRL_FSIN_FS8;
+ break;
+ case TEGRA30_AUDIO_SAMPLERATE_16000:
+ val |= TEGRA30_DAM_CH0_CTRL_FSIN_FS16;
+ break;
+ case TEGRA30_AUDIO_SAMPLERATE_44100:
+ val |= TEGRA30_DAM_CH0_CTRL_FSIN_FS44;
+ break;
+ case TEGRA30_AUDIO_SAMPLERATE_48000:
+ val |= TEGRA30_DAM_CH0_CTRL_FSIN_FS48;
+ break;
+ default:
+ break;
+ }
+
+ tegra30_dam_writel(dam, val, TEGRA30_DAM_CH0_CTRL);
+}
+
+int tegra30_dam_set_step_reset(struct tegra30_dam_context *dam,
+ int insample, int outsample)
+{
+ int step_reset = 0;
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(step_table); i++) {
+ if ((insample == step_table[i].insample) &&
+ (outsample == step_table[i].outsample))
+ step_reset = step_table[i].stepreset;
+ }
+
+ tegra30_dam_ch0_set_step(dam, step_reset);
+
+ return 0;
+}
+
+void tegra30_dam_ch0_set_step(struct tegra30_dam_context *dam, int step)
+{
+ u32 val;
+
+ val = tegra30_dam_readl(dam, TEGRA30_DAM_CH0_CTRL);
+ val &= ~TEGRA30_DAM_CH0_CTRL_STEP_MASK;
+ val |= step << TEGRA30_DAM_CH0_CTRL_STEP_SHIFT;
+ tegra30_dam_writel(dam, val, TEGRA30_DAM_CH0_CTRL);
+}
+
+int tegra30_dam_set_gain(int ifc, int chid, int gain)
+{
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return -EINVAL;
+
+ switch (chid) {
+ case dam_ch_in0:
+ tegra30_dam_writel(dams_cont_info[ifc], gain,
+ TEGRA30_DAM_CH0_CONV);
+ break;
+ case dam_ch_in1:
+ tegra30_dam_writel(dams_cont_info[ifc], gain,
+ TEGRA30_DAM_CH1_CONV);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int tegra30_dam_set_acif(int ifc, int chid, unsigned int audio_channels,
+ unsigned int audio_bits, unsigned int client_channels,
+ unsigned int client_bits)
+{
+ unsigned int reg;
+ unsigned int value = 0;
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return -EINVAL;
+
+ /*ch0 takes input as mono/16bit always*/
+ if ((chid == dam_ch_in0) &&
+ ((client_channels != 1) || (client_bits != 16)))
+ return -EINVAL;
+
+ value |= TEGRA30_CIF_MONOCONV_COPY;
+ value |= TEGRA30_CIF_STEREOCONV_CH0;
+ value |= (audio_channels-1) << TEGRA30_AUDIO_CHANNELS_SHIFT;
+ value |= (((audio_bits>>2)-1)<<TEGRA30_AUDIO_BITS_SHIFT);
+ value |= (client_channels-1) << TEGRA30_CLIENT_CHANNELS_SHIFT;
+ value |= (((client_bits>>2)-1)<<TEGRA30_CLIENT_BITS_SHIFT);
+
+ switch (chid) {
+ case dam_ch_out:
+ value |= TEGRA30_CIF_DIRECTION_TX;
+ reg = TEGRA30_DAM_AUDIOCIF_OUT_CTRL;
+ break;
+ case dam_ch_in0:
+ value |= TEGRA30_CIF_DIRECTION_RX;
+ reg = TEGRA30_DAM_AUDIOCIF_CH0_CTRL;
+ break;
+ case dam_ch_in1:
+ value |= TEGRA30_CIF_DIRECTION_RX;
+ reg = TEGRA30_DAM_AUDIOCIF_CH1_CTRL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tegra30_dam_writel(dams_cont_info[ifc], value, reg);
+
+ return 0;
+}
+
+void tegra30_dam_enable(int ifc, int on, int chid)
+{
+ u32 old_val, val, enreg;
+ struct tegra30_dam_context *dam = dams_cont_info[ifc];
+
+ if (ifc >= TEGRA30_NR_DAM_IFC)
+ return;
+
+ if (chid == dam_ch_in0)
+ enreg = TEGRA30_DAM_CH0_CTRL;
+ else
+ enreg = TEGRA30_DAM_CH1_CTRL;
+
+ old_val = val = tegra30_dam_readl(dam, enreg);
+
+ if (on) {
+ if (!dam->ch_enable_refcnt[chid]++)
+ val |= TEGRA30_DAM_CH0_CTRL_EN;
+ } else if (dam->ch_enable_refcnt[chid]) {
+ dam->ch_enable_refcnt[chid]--;
+ if (!dam->ch_enable_refcnt[chid])
+ val &= ~TEGRA30_DAM_CH0_CTRL_EN;
+ }
+
+ if (val != old_val)
+ tegra30_dam_writel(dam, val, enreg);
+
+ old_val = val = tegra30_dam_readl(dam, TEGRA30_DAM_CTRL);
+
+ if (dam->ch_enable_refcnt[dam_ch_in0] ||
+ dam->ch_enable_refcnt[dam_ch_in1])
+ val |= TEGRA30_DAM_CTRL_DAM_EN;
+ else
+ val &= ~TEGRA30_DAM_CTRL_DAM_EN;
+
+ if (old_val != val)
+ tegra30_dam_writel(dam, val, TEGRA30_DAM_CTRL);
+}
+
+void tegra30_dam_ch0_set_datasync(struct tegra30_dam_context *dam, int datasync)
+{
+ u32 val;
+
+ val = tegra30_dam_readl(dam, TEGRA30_DAM_CH0_CTRL);
+ val &= ~TEGRA30_DAM_CH0_CTRL_DATA_SYNC_MASK;
+ val |= datasync << TEGRA30_DAM_DATA_SYNC_SHIFT;
+ tegra30_dam_writel(dam, val, TEGRA30_DAM_CH0_CTRL);
+}
+
+void tegra30_dam_ch1_set_datasync(struct tegra30_dam_context *dam, int datasync)
+{
+ u32 val;
+
+ val = tegra30_dam_readl(dam, TEGRA30_DAM_CH1_CTRL);
+ val &= ~TEGRA30_DAM_CH1_CTRL_DATA_SYNC_MASK;
+ val |= datasync << TEGRA30_DAM_DATA_SYNC_SHIFT;
+ tegra30_dam_writel(dam, val, TEGRA30_DAM_CH1_CTRL);
+}
+
+void tegra30_dam_enable_clip_counter(struct tegra30_dam_context *dam, int on)
+{
+ u32 val;
+
+ val = tegra30_dam_readl(dam, TEGRA30_DAM_CLIP);
+ val &= ~TEGRA30_DAM_CLIP_COUNTER_ENABLE;
+ val |= on ? TEGRA30_DAM_CLIP_COUNTER_ENABLE : 0;
+ tegra30_dam_writel(dam, val, TEGRA30_DAM_CLIP);
+}
+
+static int __devinit tegra30_dam_probe(struct platform_device *pdev)
+{
+ struct resource *res, *region;
+ struct tegra30_dam_context *dam;
+ int ret = 0;
+#ifdef CONFIG_PM
+ int i;
+#endif
+
+ if ((pdev->id < 0) ||
+ (pdev->id >= TEGRA30_NR_DAM_IFC)) {
+ dev_err(&pdev->dev, "ID %d out of range\n", pdev->id);
+ return -EINVAL;
+ }
+
+ dams_cont_info[pdev->id] = devm_kzalloc(&pdev->dev,
+ sizeof(struct tegra30_dam_context),
+ GFP_KERNEL);
+ if (!dams_cont_info[pdev->id]) {
+ dev_err(&pdev->dev, "Can't allocate dam context\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ dam = dams_cont_info[pdev->id];
+
+ dam->dam_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dam->dam_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve dam clock\n");
+ ret = PTR_ERR(dam->dam_clk);
+ goto err_free;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory 0 resource\n");
+ ret = -ENODEV;
+ goto err_clk_put_dam;
+ }
+
+ region = devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name);
+ if (!region) {
+ dev_err(&pdev->dev, "Memory region 0 already claimed\n");
+ ret = -EBUSY;
+ goto err_clk_put_dam;
+ }
+
+ dam->damregs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!dam->damregs) {
+ dev_err(&pdev->dev, "ioremap 0 failed\n");
+ ret = -ENOMEM;
+ goto err_clk_put_dam;
+ }
+
+#ifdef CONFIG_PM
+ /* cache the POR values of DAM regs*/
+ tegra30_dam_enable_clock(pdev->id);
+
+ for (i = 0; i <= TEGRA30_DAM_CTRL_REGINDEX; i++) {
+ if ((i == TEGRA30_DAM_CTRL_RSVD_6) ||
+ (i == TEGRA30_DAM_CTRL_RSVD_10))
+ continue;
+
+ dam->reg_cache[i] =
+ tegra30_dam_readl(dam, i << 2);
+ }
+
+ tegra30_dam_disable_clock(pdev->id);
+#endif
+
+ platform_set_drvdata(pdev, dam);
+
+ tegra30_dam_debug_add(dam, pdev->id);
+
+ return 0;
+
+err_clk_put_dam:
+ clk_put(dam->dam_clk);
+err_free:
+ dams_cont_info[pdev->id] = NULL;
+exit:
+ return ret;
+}
+
+static int __devexit tegra30_dam_remove(struct platform_device *pdev)
+{
+ struct tegra30_dam_context *dam;
+
+ dam = platform_get_drvdata(pdev);
+ clk_put(dam->dam_clk);
+ tegra30_dam_debug_remove(dam);
+ dams_cont_info[pdev->id] = NULL;
+
+ return 0;
+}
+
+static struct platform_driver tegra30_dam_driver = {
+ .probe = tegra30_dam_probe,
+ .remove = __devexit_p(tegra30_dam_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra30_dam_modinit(void)
+{
+ return platform_driver_register(&tegra30_dam_driver);
+}
+module_init(tegra30_dam_modinit);
+
+static void __exit tegra30_dam_modexit(void)
+{
+ platform_driver_unregister(&tegra30_dam_driver);
+}
+module_exit(tegra30_dam_modexit);
+
+MODULE_AUTHOR("Nikesh Oswal <noswal@nvidia.com>");
+MODULE_DESCRIPTION("Tegra 30 DAM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra30_dam.h b/sound/soc/tegra/tegra30_dam.h
new file mode 100644
index 000000000000..371e8139eec7
--- /dev/null
+++ b/sound/soc/tegra/tegra30_dam.h
@@ -0,0 +1,163 @@
+/*
+ * tegra30_dam.h - Tegra 30 DAM driver.
+ *
+ * Author: Nikesh Oswal <noswal@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA30_DAM_H
+#define __TEGRA30_DAM_H
+
+/* Register offsets from TEGRA30_DAM*_BASE */
+#define TEGRA30_DAM_CTRL 0
+#define TEGRA30_DAM_CLIP 4
+#define TEGRA30_DAM_CLIP_THRESHOLD 8
+#define TEGRA30_DAM_AUDIOCIF_OUT_CTRL 0x0C
+#define TEGRA30_DAM_CH0_CTRL 0x10
+#define TEGRA30_DAM_CH0_CONV 0x14
+#define TEGRA30_DAM_AUDIOCIF_CH0_CTRL 0x1C
+#define TEGRA30_DAM_CH1_CTRL 0x20
+#define TEGRA30_DAM_CH1_CONV 0x24
+#define TEGRA30_DAM_AUDIOCIF_CH1_CTRL 0x2C
+#define TEGRA30_DAM_CTRL_REGINDEX (TEGRA30_DAM_AUDIOCIF_CH1_CTRL >> 2)
+#define TEGRA30_DAM_CTRL_RSVD_6 6
+#define TEGRA30_DAM_CTRL_RSVD_10 10
+
+#define TEGRA30_NR_DAM_IFC 3
+
+#define TEGRA30_DAM_NUM_INPUT_CHANNELS 2
+
+/* Fields in TEGRA30_DAM_CTRL */
+#define TEGRA30_DAM_CTRL_SOFT_RESET_ENABLE (1 << 31)
+#define TEGRA30_DAM_CTRL_FSOUT_SHIFT 4
+#define TEGRA30_DAM_CTRL_FSOUT_MASK (0xf << TEGRA30_DAM_CTRL_FSOUT_SHIFT)
+#define TEGRA30_DAM_FS_8KHZ 0
+#define TEGRA30_DAM_FS_16KHZ 1
+#define TEGRA30_DAM_FS_44KHZ 2
+#define TEGRA30_DAM_FS_48KHZ 3
+#define TEGRA30_DAM_CTRL_FSOUT_FS8 (TEGRA30_DAM_FS_8KHZ << TEGRA30_DAM_CTRL_FSOUT_SHIFT)
+#define TEGRA30_DAM_CTRL_FSOUT_FS16 (TEGRA30_DAM_FS_16KHZ << TEGRA30_DAM_CTRL_FSOUT_SHIFT)
+#define TEGRA30_DAM_CTRL_FSOUT_FS44 (TEGRA30_DAM_FS_44KHZ << TEGRA30_DAM_CTRL_FSOUT_SHIFT)
+#define TEGRA30_DAM_CTRL_FSOUT_FS48 (TEGRA30_DAM_FS_48KHZ << TEGRA30_DAM_CTRL_FSOUT_SHIFT)
+#define TEGRA30_DAM_CTRL_CG_EN (1 << 1)
+#define TEGRA30_DAM_CTRL_DAM_EN (1 << 0)
+
+
+/* Fields in TEGRA30_DAM_CLIP */
+#define TEGRA30_DAM_CLIP_COUNTER_ENABLE (1 << 31)
+#define TEGRA30_DAM_CLIP_COUNT_MASK 0x7fffffff
+
+
+/* Fields in TEGRA30_DAM_CH0_CTRL */
+#define TEGRA30_STEP_RESET 1
+#define TEGRA30_DAM_DATA_SYNC 1
+#define TEGRA30_DAM_DATA_SYNC_SHIFT 4
+#define TEGRA30_DAM_CH0_CTRL_FSIN_SHIFT 8
+#define TEGRA30_DAM_CH0_CTRL_STEP_SHIFT 16
+#define TEGRA30_DAM_CH0_CTRL_STEP_MASK (0xffff << 16)
+#define TEGRA30_DAM_CH0_CTRL_STEP_RESET (TEGRA30_STEP_RESET << 16)
+#define TEGRA30_DAM_CH0_CTRL_FSIN_MASK (0xf << 8)
+#define TEGRA30_DAM_CH0_CTRL_FSIN_FS8 (TEGRA30_DAM_FS_8KHZ << 8)
+#define TEGRA30_DAM_CH0_CTRL_FSIN_FS16 (TEGRA30_DAM_FS_16KHZ << 8)
+#define TEGRA30_DAM_CH0_CTRL_FSIN_FS44 (TEGRA30_DAM_FS_44KHZ << 8)
+#define TEGRA30_DAM_CH0_CTRL_FSIN_FS48 (TEGRA30_DAM_FS_48KHZ << 8)
+#define TEGRA30_DAM_CH0_CTRL_DATA_SYNC_MASK (0xf << TEGRA30_DAM_DATA_SYNC_SHIFT)
+#define TEGRA30_DAM_CH0_CTRL_DATA_SYNC (TEGRA30_DAM_DATA_SYNC << TEGRA30_DAM_DATA_SYNC_SHIFT)
+#define TEGRA30_DAM_CH0_CTRL_EN (1 << 0)
+
+
+/* Fields in TEGRA30_DAM_CH0_CONV */
+#define TEGRA30_DAM_GAIN 1
+#define TEGRA30_DAM_GAIN_SHIFT 0
+#define TEGRA30_DAM_CH0_CONV_GAIN (TEGRA30_DAM_GAIN << TEGRA30_DAM_GAIN_SHIFT)
+
+/* Fields in TEGRA30_DAM_CH1_CTRL */
+#define TEGRA30_DAM_CH1_CTRL_DATA_SYNC_MASK (0xf << TEGRA30_DAM_DATA_SYNC_SHIFT)
+#define TEGRA30_DAM_CH1_CTRL_DATA_SYNC (TEGRA30_DAM_DATA_SYNC << TEGRA30_DAM_DATA_SYNC_SHIFT)
+#define TEGRA30_DAM_CH1_CTRL_EN (1 << 0)
+
+/* Fields in TEGRA30_DAM_CH1_CONV */
+#define TEGRA30_DAM_CH1_CONV_GAIN (TEGRA30_DAM_GAIN << TEGRA30_DAM_GAIN_SHIFT)
+
+#define TEGRA30_AUDIO_CHANNELS_SHIFT 24
+#define TEGRA30_AUDIO_CHANNELS_MASK (7 << TEGRA30_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_CLIENT_CHANNELS_SHIFT 16
+#define TEGRA30_CLIENT_CHANNELS_MASK (7 << TEGRA30_CLIENT_CHANNELS_SHIFT)
+#define TEGRA30_AUDIO_BITS_SHIFT 12
+#define TEGRA30_AUDIO_BITS_MASK (7 << TEGRA30_AUDIO_BITS_SHIFT)
+#define TEGRA30_CLIENT_BITS_SHIFT 8
+#define TEGRA30_CLIENT_BITS_MASK (7 << TEGRA30_CLIENT_BITS_SHIFT)
+#define TEGRA30_CIF_DIRECTION_TX (0 << 2)
+#define TEGRA30_CIF_DIRECTION_RX (1 << 2)
+#define TEGRA30_CIF_BIT24 5
+#define TEGRA30_CIF_BIT16 3
+#define TEGRA30_CIF_CH1 0
+#define TEGRA30_CIF_MONOCONV_COPY (1<<0)
+#define TEGRA30_CIF_STEREOCONV_CH0 (0<<4)
+
+/*
+* Audio Samplerates
+*/
+#define TEGRA30_AUDIO_SAMPLERATE_8000 8000
+#define TEGRA30_AUDIO_SAMPLERATE_16000 16000
+#define TEGRA30_AUDIO_SAMPLERATE_44100 44100
+#define TEGRA30_AUDIO_SAMPLERATE_48000 48000
+
+#define TEGRA30_DAM_CHIN0_SRC 0
+#define TEGRA30_DAM_CHIN1 1
+#define TEGRA30_DAM_CHOUT 2
+#define TEGRA30_DAM_ENABLE 1
+#define TEGRA30_DAM_DISABLE 0
+
+struct tegra30_dam_context {
+ int outsamplerate;
+ bool ch_alloc[TEGRA30_DAM_NUM_INPUT_CHANNELS];
+ int ch_enable_refcnt[TEGRA30_DAM_NUM_INPUT_CHANNELS];
+ int ch_insamplerate[TEGRA30_DAM_NUM_INPUT_CHANNELS];
+#ifdef CONFIG_PM
+ int reg_cache[TEGRA30_DAM_CTRL_REGINDEX + 1];
+#endif
+ struct clk *dam_clk;
+ bool in_use;
+ void __iomem *damregs;
+ struct dentry *debug;
+};
+
+struct tegra30_dam_src_step_table {
+ int insample;
+ int outsample;
+ int stepreset;
+};
+
+#ifdef CONFIG_PM
+int tegra30_dam_resume(int ifc);
+#endif
+void tegra30_dam_disable_clock(int ifc);
+int tegra30_dam_enable_clock(int ifc);
+int tegra30_dam_allocate_controller(void);
+int tegra30_dam_allocate_channel(int ifc, int chid);
+int tegra30_dam_free_channel(int ifc, int chid);
+int tegra30_dam_free_controller(int ifc);
+void tegra30_dam_set_samplerate(int ifc, int chtype, int samplerate);
+int tegra30_dam_set_gain(int ifc, int chtype, int gain);
+int tegra30_dam_set_acif(int ifc, int chtype, unsigned int audio_channels,
+ unsigned int audio_bits, unsigned int client_channels,
+ unsigned int client_bits);
+void tegra30_dam_enable(int ifc, int on, int chtype);
+
+#endif
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
new file mode 100644
index 000000000000..c1de635765a5
--- /dev/null
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -0,0 +1,947 @@
+/*
+ * tegra30_i2s.c - Tegra 30 I2S driver
+ *
+ * Author: Stephen Warren <swarren@nvidia.com>
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra30_ahub.h"
+#include "tegra30_dam.h"
+#include "tegra30_i2s.h"
+
+#define DRV_NAME "tegra30-i2s"
+
+static struct tegra30_i2s i2scont[TEGRA30_NR_I2S_IFC];
+
+static inline void tegra30_i2s_write(struct tegra30_i2s *i2s, u32 reg, u32 val)
+{
+#ifdef CONFIG_PM
+ i2s->reg_cache[reg >> 2] = val;
+#endif
+ __raw_writel(val, i2s->regs + reg);
+}
+
+static inline u32 tegra30_i2s_read(struct tegra30_i2s *i2s, u32 reg)
+{
+ return __raw_readl(i2s->regs + reg);
+}
+
+static void tegra30_i2s_enable_clocks(struct tegra30_i2s *i2s)
+{
+ tegra30_ahub_enable_clocks();
+ clk_enable(i2s->clk_i2s);
+}
+
+static void tegra30_i2s_disable_clocks(struct tegra30_i2s *i2s)
+{
+ clk_disable(i2s->clk_i2s);
+ tegra30_ahub_disable_clocks();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra30_i2s_show(struct seq_file *s, void *unused)
+{
+#define REG(r) { r, #r }
+ static const struct {
+ int offset;
+ const char *name;
+ } regs[] = {
+ REG(TEGRA30_I2S_CTRL),
+ REG(TEGRA30_I2S_TIMING),
+ REG(TEGRA30_I2S_OFFSET),
+ REG(TEGRA30_I2S_CH_CTRL),
+ REG(TEGRA30_I2S_SLOT_CTRL),
+ REG(TEGRA30_I2S_CIF_TX_CTRL),
+ REG(TEGRA30_I2S_CIF_RX_CTRL),
+ REG(TEGRA30_I2S_FLOWCTL),
+ REG(TEGRA30_I2S_TX_STEP),
+ REG(TEGRA30_I2S_FLOW_STATUS),
+ REG(TEGRA30_I2S_FLOW_TOTAL),
+ REG(TEGRA30_I2S_FLOW_OVER),
+ REG(TEGRA30_I2S_FLOW_UNDER),
+ REG(TEGRA30_I2S_LCOEF_1_4_0),
+ REG(TEGRA30_I2S_LCOEF_1_4_1),
+ REG(TEGRA30_I2S_LCOEF_1_4_2),
+ REG(TEGRA30_I2S_LCOEF_1_4_3),
+ REG(TEGRA30_I2S_LCOEF_1_4_4),
+ REG(TEGRA30_I2S_LCOEF_1_4_5),
+ REG(TEGRA30_I2S_LCOEF_2_4_0),
+ REG(TEGRA30_I2S_LCOEF_2_4_1),
+ REG(TEGRA30_I2S_LCOEF_2_4_2),
+ };
+#undef REG
+
+ struct tegra30_i2s *i2s = s->private;
+ int i;
+
+ tegra30_i2s_enable_clocks(i2s);
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ u32 val = tegra30_i2s_read(i2s, regs[i].offset);
+ seq_printf(s, "%s = %08x\n", regs[i].name, val);
+ }
+
+ tegra30_i2s_disable_clocks(i2s);
+
+ return 0;
+}
+
+static int tegra30_i2s_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra30_i2s_show, inode->i_private);
+}
+
+static const struct file_operations tegra30_i2s_debug_fops = {
+ .open = tegra30_i2s_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra30_i2s_debug_add(struct tegra30_i2s *i2s, int id)
+{
+ char name[] = DRV_NAME ".0";
+
+ snprintf(name, sizeof(name), DRV_NAME".%1d", id);
+ i2s->debug = debugfs_create_file(name, S_IRUGO, snd_soc_debugfs_root,
+ i2s, &tegra30_i2s_debug_fops);
+}
+
+static void tegra30_i2s_debug_remove(struct tegra30_i2s *i2s)
+{
+ if (i2s->debug)
+ debugfs_remove(i2s->debug);
+}
+#else
+static inline void tegra30_i2s_debug_add(struct tegra30_i2s *i2s, int id)
+{
+}
+
+static inline void tegra30_i2s_debug_remove(struct tegra30_i2s *i2s)
+{
+}
+#endif
+
+int tegra30_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ tegra30_i2s_enable_clocks(i2s);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* increment the playback ref count */
+ i2s->playback_ref_count++;
+
+ ret = tegra30_ahub_allocate_tx_fifo(&i2s->txcif,
+ &i2s->playback_dma_data.addr,
+ &i2s->playback_dma_data.req_sel);
+ i2s->playback_dma_data.wrap = 4;
+ i2s->playback_dma_data.width = 32;
+
+ if (!i2s->is_dam_used)
+ tegra30_ahub_set_rx_cif_source(
+ TEGRA30_AHUB_RXCIF_I2S0_RX0 + i2s->id,
+ i2s->txcif);
+ } else {
+ ret = tegra30_ahub_allocate_rx_fifo(&i2s->rxcif,
+ &i2s->capture_dma_data.addr,
+ &i2s->capture_dma_data.req_sel);
+ i2s->capture_dma_data.wrap = 4;
+ i2s->capture_dma_data.width = 32;
+ tegra30_ahub_set_rx_cif_source(i2s->rxcif,
+ TEGRA30_AHUB_TXCIF_I2S0_TX0 + i2s->id);
+ }
+
+ tegra30_i2s_disable_clocks(i2s);
+
+ return ret;
+}
+
+void tegra30_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ tegra30_i2s_enable_clocks(i2s);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (i2s->playback_ref_count == 1)
+ tegra30_ahub_unset_rx_cif_source(
+ TEGRA30_AHUB_RXCIF_I2S0_RX0 + i2s->id);
+
+ /* free the apbif dma channel*/
+ tegra30_ahub_free_tx_fifo(i2s->txcif);
+
+ /* decrement the playback ref count */
+ i2s->playback_ref_count--;
+ } else {
+ tegra30_ahub_unset_rx_cif_source(i2s->rxcif);
+ tegra30_ahub_free_rx_fifo(i2s->rxcif);
+ }
+
+ tegra30_i2s_disable_clocks(i2s);
+}
+
+static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2s->reg_ctrl &= ~(TEGRA30_I2S_CTRL_FRAME_FORMAT_MASK |
+ TEGRA30_I2S_CTRL_LRCK_MASK);
+ i2s->reg_ch_ctrl &= ~TEGRA30_I2S_CH_CTRL_EGDE_CTRL_MASK;
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_R_LOW;
+ i2s->reg_ch_ctrl |= TEGRA30_I2S_CH_CTRL_EGDE_CTRL_NEG_EDGE;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_R_LOW;
+ i2s->reg_ch_ctrl |= TEGRA30_I2S_CH_CTRL_EGDE_CTRL_POS_EDGE;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
+ i2s->reg_ch_ctrl |= TEGRA30_I2S_CH_CTRL_EGDE_CTRL_POS_EDGE;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_R_LOW;
+ i2s->reg_ch_ctrl |= TEGRA30_I2S_CH_CTRL_EGDE_CTRL_POS_EDGE;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_R_LOW;
+ i2s->reg_ch_ctrl |= TEGRA30_I2S_CH_CTRL_EGDE_CTRL_POS_EDGE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct device *dev = substream->pcm->card->dev;
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ u32 val;
+ int ret, sample_size, srate, i2sclock, bitcnt, sym_bitclk;
+ int i2s_client_ch;
+
+ i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_BIT_SIZE_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_BIT_SIZE_16;
+ sample_size = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+
+ if (i2s->reg_ctrl & TEGRA30_I2S_CTRL_MASTER_ENABLE) {
+ /* Final "* 2" required by Tegra hardware */
+ i2sclock = srate * params_channels(params) * sample_size * 2;
+
+ /* Additional "* 2" is needed for FSYNC mode */
+ if (i2s->reg_ctrl & TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC)
+ i2sclock *= 2;
+
+ ret = clk_set_parent(i2s->clk_i2s, i2s->clk_pll_a_out0);
+ if (ret) {
+ dev_err(dev, "Can't set parent of I2S clock\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(i2s->clk_i2s, i2sclock);
+ if (ret) {
+ dev_err(dev, "Can't set I2S clock rate: %d\n", ret);
+ return ret;
+ }
+
+ if (i2s->reg_ctrl & TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC) {
+ bitcnt = (i2sclock / srate) - 1;
+ sym_bitclk = !(i2sclock % srate);
+ } else {
+ bitcnt = (i2sclock / (2 * srate)) - 1;
+ sym_bitclk = !(i2sclock % (2 * srate));
+ }
+ val = bitcnt << TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
+
+ if (!sym_bitclk)
+ val |= TEGRA30_I2S_TIMING_NON_SYM_ENABLE;
+
+ tegra30_i2s_enable_clocks(i2s);
+
+ tegra30_i2s_write(i2s, TEGRA30_I2S_TIMING, val);
+ } else {
+ i2sclock = srate * params_channels(params) * sample_size;
+
+ ret = clk_set_rate(i2s->clk_i2s_sync, i2sclock);
+ if (ret) {
+ dev_err(dev, "Can't set I2S sync clock rate\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(i2s->clk_audio_2x, i2sclock);
+ if (ret) {
+ dev_err(dev, "Can't set I2S sync clock rate\n");
+ return ret;
+ }
+
+ ret = clk_set_parent(i2s->clk_i2s, i2s->clk_audio_2x);
+ if (ret) {
+ dev_err(dev, "Can't set parent of audio2x clock\n");
+ return ret;
+ }
+
+ tegra30_i2s_enable_clocks(i2s);
+ }
+
+ i2s_client_ch = (i2s->reg_ctrl & TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC) ?
+ params_channels(params) : 2;
+
+ val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
+ ((params_channels(params) - 1) <<
+ TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+ ((i2s_client_ch - 1) <<
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
+ TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX;
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CIF_RX_CTRL, val);
+
+ tegra30_ahub_set_tx_cif_channels(i2s->txcif,
+ params_channels(params),
+ params_channels(params));
+ } else {
+ val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CIF_TX_CTRL, val);
+
+ tegra30_ahub_set_rx_cif_channels(i2s->rxcif,
+ params_channels(params),
+ params_channels(params));
+ }
+
+ val = (1 << TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_SHIFT) |
+ (1 << TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_SHIFT);
+ tegra30_i2s_write(i2s, TEGRA30_I2S_OFFSET, val);
+
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CH_CTRL, i2s->reg_ch_ctrl);
+
+ val = tegra30_i2s_read(i2s, TEGRA30_I2S_SLOT_CTRL);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ val &= ~TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_MASK;
+ val |= (1 << TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_SHIFT);
+ } else {
+ val &= ~TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_MASK;
+ val |= (1 << TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_SHIFT);
+ }
+ tegra30_i2s_write(i2s, TEGRA30_I2S_SLOT_CTRL, val);
+
+ tegra30_i2s_disable_clocks(i2s);
+
+ return 0;
+}
+
+static void tegra30_i2s_start_playback(struct tegra30_i2s *i2s)
+{
+ tegra30_ahub_enable_tx_fifo(i2s->txcif);
+ /* if this is the only user of i2s tx then enable it*/
+ if (i2s->playback_ref_count == 1) {
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_XFER_EN_TX;
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CTRL, i2s->reg_ctrl);
+ }
+}
+
+static void tegra30_i2s_stop_playback(struct tegra30_i2s *i2s)
+{
+ tegra30_ahub_disable_tx_fifo(i2s->txcif);
+ /* if this is the only user of i2s tx then disable it*/
+ if (i2s->playback_ref_count == 1) {
+ i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_XFER_EN_TX;
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CTRL, i2s->reg_ctrl);
+ }
+}
+
+static void tegra30_i2s_start_capture(struct tegra30_i2s *i2s)
+{
+ tegra30_ahub_enable_rx_fifo(i2s->rxcif);
+ if (!i2s->is_call_mode_rec) {
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_XFER_EN_RX;
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CTRL, i2s->reg_ctrl);
+ }
+}
+
+static void tegra30_i2s_stop_capture(struct tegra30_i2s *i2s)
+{
+ tegra30_ahub_disable_rx_fifo(i2s->rxcif);
+ if (!i2s->is_call_mode_rec) {
+ i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_XFER_EN_RX;
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CTRL, i2s->reg_ctrl);
+ }
+}
+
+static int tegra30_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ tegra30_i2s_enable_clocks(i2s);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra30_i2s_start_playback(i2s);
+ else
+ tegra30_i2s_start_capture(i2s);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra30_i2s_stop_playback(i2s);
+ else
+ tegra30_i2s_stop_capture(i2s);
+ tegra30_i2s_disable_clocks(i2s);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra30_i2s_probe(struct snd_soc_dai *dai)
+{
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+#ifdef CONFIG_PM
+ int i;
+#endif
+
+ dai->capture_dma_data = &i2s->capture_dma_data;
+ dai->playback_dma_data = &i2s->playback_dma_data;
+
+#ifdef CONFIG_PM
+ tegra30_i2s_enable_clocks(i2s);
+
+ /*cache the POR values of i2s regs*/
+ for (i = 0; i < ((TEGRA30_I2S_CIF_TX_CTRL>>2) + 1); i++)
+ i2s->reg_cache[i] = tegra30_i2s_read(i2s, i<<2);
+
+ tegra30_i2s_disable_clocks(i2s);
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int tegra30_i2s_resume(struct snd_soc_dai *cpu_dai)
+{
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ int i, ret = 0;
+
+ tegra30_i2s_enable_clocks(i2s);
+
+ /*restore the i2s regs*/
+ for (i = 0; i < ((TEGRA30_I2S_CIF_TX_CTRL>>2) + 1); i++)
+ tegra30_i2s_write(i2s, i<<2, i2s->reg_cache[i]);
+
+ tegra30_ahub_apbif_resume();
+
+ tegra30_i2s_disable_clocks(i2s);
+
+ if (i2s->dam_ch_refcount)
+ ret = tegra30_dam_resume(i2s->dam_ifc);
+
+ return ret;
+}
+#else
+#define tegra30_i2s_resume NULL
+#endif
+
+static struct snd_soc_dai_ops tegra30_i2s_dai_ops = {
+ .startup = tegra30_i2s_startup,
+ .shutdown = tegra30_i2s_shutdown,
+ .set_fmt = tegra30_i2s_set_fmt,
+ .hw_params = tegra30_i2s_hw_params,
+ .trigger = tegra30_i2s_trigger,
+};
+
+#define TEGRA30_I2S_DAI(id) \
+ { \
+ .name = DRV_NAME "." #id, \
+ .probe = tegra30_i2s_probe, \
+ .resume = tegra30_i2s_resume, \
+ .playback = { \
+ .channels_min = 1, \
+ .channels_max = 2, \
+ .rates = SNDRV_PCM_RATE_8000_96000, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE, \
+ }, \
+ .capture = { \
+ .channels_min = 1, \
+ .channels_max = 2, \
+ .rates = SNDRV_PCM_RATE_8000_96000, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE, \
+ }, \
+ .ops = &tegra30_i2s_dai_ops, \
+ .symmetric_rates = 1, \
+ }
+
+struct snd_soc_dai_driver tegra30_i2s_dai[] = {
+ TEGRA30_I2S_DAI(0),
+ TEGRA30_I2S_DAI(1),
+ TEGRA30_I2S_DAI(2),
+ TEGRA30_I2S_DAI(3),
+ TEGRA30_I2S_DAI(4),
+};
+
+static int configure_baseband_i2s(struct tegra30_i2s *i2s, int is_i2smaster,
+ int is_formatdsp, int channels, int rate, int bitsize)
+{
+ u32 val;
+ int i2sclock, bitcnt;
+
+ i2s->reg_ctrl &= ~(TEGRA30_I2S_CTRL_FRAME_FORMAT_MASK |
+ TEGRA30_I2S_CTRL_LRCK_MASK |
+ TEGRA30_I2S_CTRL_MASTER_ENABLE);
+ i2s->reg_ch_ctrl &= ~TEGRA30_I2S_CH_CTRL_EGDE_CTRL_MASK;
+
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_BIT_SIZE_16;
+
+ if (is_i2smaster)
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
+
+ if (is_formatdsp) {
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_R_LOW;
+ i2s->reg_ch_ctrl |= TEGRA30_I2S_CH_CTRL_EGDE_CTRL_NEG_EDGE;
+ } else {
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
+ i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
+ i2s->reg_ch_ctrl |= TEGRA30_I2S_CH_CTRL_EGDE_CTRL_POS_EDGE;
+ }
+
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CH_CTRL, i2s->reg_ch_ctrl);
+
+ val = tegra30_i2s_read(i2s, TEGRA30_I2S_SLOT_CTRL);
+ val &= ~(TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_MASK |
+ TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_MASK);
+ val |= (1 << TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_SHIFT |
+ 1 << TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_SHIFT);
+ tegra30_i2s_write(i2s, TEGRA30_I2S_SLOT_CTRL, val);
+
+ val = (1 << TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_SHIFT) |
+ (1 << TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_SHIFT);
+ tegra30_i2s_write(i2s, TEGRA30_I2S_OFFSET, val);
+
+ i2sclock = rate * channels * bitsize * 2;
+
+ /* additional 8 for baseband */
+ if (is_formatdsp)
+ i2sclock *= 8;
+
+ clk_set_rate(i2s->clk_i2s, i2sclock);
+
+ if (is_formatdsp) {
+ bitcnt = (i2sclock/rate) - 1;
+ val = bitcnt << TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
+ if (i2sclock % (rate))
+ val |= TEGRA30_I2S_TIMING_NON_SYM_ENABLE;
+ } else {
+ bitcnt = (i2sclock/(2*rate)) - 1;
+ val = bitcnt << TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
+ if (i2sclock % (2*rate))
+ val |= TEGRA30_I2S_TIMING_NON_SYM_ENABLE;
+ }
+
+ tegra30_i2s_write(i2s, TEGRA30_I2S_TIMING, val);
+
+ /* configure the i2s cif*/
+ val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
+ ((channels - 1) << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+ ((channels - 1) << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
+ TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16;
+ val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX;
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CIF_RX_CTRL, val);
+
+ val &= ~TEGRA30_AUDIOCIF_CTRL_DIRECTION_MASK;
+ val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
+ tegra30_i2s_write(i2s, TEGRA30_I2S_CIF_TX_CTRL, val);
+
+ return 0;
+}
+
+static int configure_dam(struct tegra30_i2s *i2s, int out_channel,
+ int out_rate, int out_bitsize, int in_channels,
+ int in_rate, int in_bitsize)
+{
+
+ if (!i2s->dam_ch_refcount)
+ i2s->dam_ifc = tegra30_dam_allocate_controller();
+
+ tegra30_dam_allocate_channel(i2s->dam_ifc, TEGRA30_DAM_CHIN0_SRC);
+ i2s->dam_ch_refcount++;
+ tegra30_dam_enable_clock(i2s->dam_ifc);
+ tegra30_dam_set_samplerate(i2s->dam_ifc, TEGRA30_DAM_CHOUT, out_rate);
+ tegra30_dam_set_samplerate(i2s->dam_ifc, TEGRA30_DAM_CHIN0_SRC,
+ in_rate);
+ tegra30_dam_set_gain(i2s->dam_ifc, TEGRA30_DAM_CHIN0_SRC, 0x1000);
+ tegra30_dam_set_acif(i2s->dam_ifc, TEGRA30_DAM_CHIN0_SRC,
+ in_channels, in_bitsize, 1, 16);
+ tegra30_dam_set_acif(i2s->dam_ifc, TEGRA30_DAM_CHOUT,
+ out_channel, out_bitsize, out_channel, out_bitsize);
+
+ return 0;
+}
+
+
+int tegra30_make_voice_call_connections(struct codec_config *codec_info,
+ struct codec_config *bb_info)
+{
+ struct tegra30_i2s *codec_i2s;
+ struct tegra30_i2s *bb_i2s;
+
+ codec_i2s = &i2scont[codec_info->i2s_id];
+ bb_i2s = &i2scont[bb_info->i2s_id];
+ tegra30_i2s_enable_clocks(codec_i2s);
+ tegra30_i2s_enable_clocks(bb_i2s);
+
+ /* increment the codec i2s playback ref count */
+ codec_i2s->playback_ref_count++;
+ bb_i2s->playback_ref_count++;
+
+ /*Configure codec i2s*/
+ configure_baseband_i2s(codec_i2s, codec_info->is_i2smaster,
+ codec_info->is_format_dsp, codec_info->channels,
+ codec_info->rate, codec_info->bitsize);
+
+ /*Configure bb i2s*/
+ configure_baseband_i2s(bb_i2s, bb_info->is_i2smaster,
+ bb_info->is_format_dsp, bb_info->channels,
+ bb_info->rate, bb_info->bitsize);
+
+ /*configure codec dam*/
+ configure_dam(codec_i2s, codec_info->channels,
+ codec_info->rate, codec_info->bitsize, bb_info->channels,
+ bb_info->rate, bb_info->bitsize);
+
+ /*configure bb dam*/
+ configure_dam(bb_i2s, bb_info->channels,
+ bb_info->rate, bb_info->bitsize, codec_info->channels,
+ codec_info->rate, codec_info->bitsize);
+
+ /*make ahub connections*/
+
+ /* if this is the only user of i2s tx then make ahub i2s rx connection*/
+ if (codec_i2s->playback_ref_count == 1) {
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_I2S0_RX0 +
+ codec_info->i2s_id, TEGRA30_AHUB_TXCIF_DAM0_TX0 +
+ codec_i2s->dam_ifc);
+ }
+
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_I2S0_RX0 +
+ bb_info->i2s_id, TEGRA30_AHUB_TXCIF_DAM0_TX0 +
+ bb_i2s->dam_ifc);
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX0 +
+ (codec_i2s->dam_ifc*2), TEGRA30_AHUB_TXCIF_I2S0_TX0 +
+ bb_info->i2s_id);
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX0 +
+ (bb_i2s->dam_ifc*2), TEGRA30_AHUB_TXCIF_I2S0_TX0 +
+ codec_info->i2s_id);
+
+ /*enable dam and i2s*/
+ tegra30_dam_enable(codec_i2s->dam_ifc, TEGRA30_DAM_ENABLE,
+ TEGRA30_DAM_CHIN0_SRC);
+ tegra30_dam_enable(bb_i2s->dam_ifc, TEGRA30_DAM_ENABLE,
+ TEGRA30_DAM_CHIN0_SRC);
+
+ /* if this is the only user of i2s tx then enable it*/
+ if (codec_i2s->playback_ref_count == 1)
+ codec_i2s->reg_ctrl |= TEGRA30_I2S_CTRL_XFER_EN_TX;
+
+ codec_i2s->reg_ctrl |= TEGRA30_I2S_CTRL_XFER_EN_RX;
+ tegra30_i2s_write(codec_i2s, TEGRA30_I2S_CTRL,
+ codec_i2s->reg_ctrl);
+ bb_i2s->reg_ctrl |= TEGRA30_I2S_CTRL_XFER_EN_TX;
+ bb_i2s->reg_ctrl |= TEGRA30_I2S_CTRL_XFER_EN_RX;
+ tegra30_i2s_write(bb_i2s, TEGRA30_I2S_CTRL,
+ bb_i2s->reg_ctrl);
+
+ return 0;
+}
+
+int tegra30_break_voice_call_connections(struct codec_config *codec_info,
+ struct codec_config *bb_info)
+{
+ struct tegra30_i2s *codec_i2s;
+ struct tegra30_i2s *bb_i2s;
+
+ codec_i2s = &i2scont[codec_info->i2s_id];
+ bb_i2s = &i2scont[bb_info->i2s_id];
+
+ /* disconnect the ahub connections */
+
+ /* if this is the only user of i2s tx then break ahub
+ i2s rx connection */
+ if (codec_i2s->playback_ref_count == 1)
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_I2S0_RX0
+ + codec_info->i2s_id);
+
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_I2S0_RX0
+ + bb_info->i2s_id);
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX0
+ + (codec_i2s->dam_ifc*2));
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX0
+ + (bb_i2s->dam_ifc*2));
+
+ /* disable the i2s */
+
+ /* if this is the only user of i2s tx then disable it*/
+ if (codec_i2s->playback_ref_count == 1)
+ codec_i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_XFER_EN_TX;
+
+ codec_i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_XFER_EN_RX;
+ tegra30_i2s_write(codec_i2s, TEGRA30_I2S_CTRL, codec_i2s->reg_ctrl);
+ bb_i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_XFER_EN_TX;
+ bb_i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_XFER_EN_RX;
+ tegra30_i2s_write(bb_i2s, TEGRA30_I2S_CTRL, bb_i2s->reg_ctrl);
+ tegra30_i2s_disable_clocks(codec_i2s);
+ tegra30_i2s_disable_clocks(bb_i2s);
+
+ /* decrement the codec i2s playback ref count */
+ codec_i2s->playback_ref_count--;
+ bb_i2s->playback_ref_count--;
+
+ /* disable the codec dam */
+ tegra30_dam_enable(codec_i2s->dam_ifc,
+ TEGRA30_DAM_DISABLE, TEGRA30_DAM_CHIN0_SRC);
+ tegra30_dam_disable_clock(codec_i2s->dam_ifc);
+ tegra30_dam_free_channel(codec_i2s->dam_ifc,
+ TEGRA30_DAM_CHIN0_SRC);
+ codec_i2s->dam_ch_refcount--;
+ if (!codec_i2s->dam_ch_refcount)
+ tegra30_dam_free_controller(codec_i2s->dam_ifc);
+
+ /* disable the bb dam */
+ tegra30_dam_enable(bb_i2s->dam_ifc, TEGRA30_DAM_DISABLE,
+ TEGRA30_DAM_CHIN0_SRC);
+ tegra30_dam_disable_clock(bb_i2s->dam_ifc);
+ tegra30_dam_free_channel(bb_i2s->dam_ifc, TEGRA30_DAM_CHIN0_SRC);
+ bb_i2s->dam_ch_refcount--;
+ if (!bb_i2s->dam_ch_refcount)
+ tegra30_dam_free_controller(bb_i2s->dam_ifc);
+
+ return 0;
+}
+
+static __devinit int tegra30_i2s_platform_probe(struct platform_device *pdev)
+{
+ struct tegra30_i2s *i2s;
+ struct resource *mem, *memregion;
+ int ret;
+
+ if ((pdev->id < 0) ||
+ (pdev->id >= ARRAY_SIZE(tegra30_i2s_dai))) {
+ dev_err(&pdev->dev, "ID %d out of range\n", pdev->id);
+ return -EINVAL;
+ }
+
+ i2s = &i2scont[pdev->id];
+ dev_set_drvdata(&pdev->dev, i2s);
+ i2s->id = pdev->id;
+
+ i2s->clk_i2s = clk_get(&pdev->dev, "i2s");
+ if (IS_ERR(i2s->clk_i2s)) {
+ dev_err(&pdev->dev, "Can't retrieve i2s clock\n");
+ ret = PTR_ERR(i2s->clk_i2s);
+ goto exit;
+ }
+
+ i2s->clk_i2s_sync = clk_get(&pdev->dev, "ext_audio_sync");
+ if (IS_ERR(i2s->clk_i2s_sync)) {
+ dev_err(&pdev->dev, "Can't retrieve i2s_sync clock\n");
+ ret = PTR_ERR(i2s->clk_i2s_sync);
+ goto err_i2s_clk_put;
+ }
+
+ i2s->clk_audio_2x = clk_get(&pdev->dev, "audio_sync_2x");
+ if (IS_ERR(i2s->clk_audio_2x)) {
+ dev_err(&pdev->dev, "Can't retrieve audio 2x clock\n");
+ ret = PTR_ERR(i2s->clk_audio_2x);
+ goto err_i2s_sync_clk_put;
+ }
+
+ i2s->clk_pll_a_out0 = clk_get_sys(NULL, "pll_a_out0");
+ if (IS_ERR(i2s->clk_pll_a_out0)) {
+ dev_err(&pdev->dev, "Can't retrieve pll_a_out0 clock\n");
+ ret = PTR_ERR(i2s->clk_pll_a_out0);
+ goto err_audio_2x_clk_put;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err_pll_a_out0_clk_put;
+ }
+
+ memregion = request_mem_region(mem->start, resource_size(mem),
+ DRV_NAME);
+ if (!memregion) {
+ dev_err(&pdev->dev, "Memory region already claimed\n");
+ ret = -EBUSY;
+ goto err_pll_a_out0_clk_put;
+ }
+
+ i2s->regs = ioremap(mem->start, resource_size(mem));
+ if (!i2s->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ ret = snd_soc_register_dai(&pdev->dev, &tegra30_i2s_dai[pdev->id]);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ tegra30_i2s_debug_add(i2s, pdev->id);
+
+ return 0;
+
+err_unmap:
+ iounmap(i2s->regs);
+err_release:
+ release_mem_region(mem->start, resource_size(mem));
+err_pll_a_out0_clk_put:
+ clk_put(i2s->clk_pll_a_out0);
+err_audio_2x_clk_put:
+ clk_put(i2s->clk_audio_2x);
+err_i2s_sync_clk_put:
+ clk_put(i2s->clk_i2s_sync);
+err_i2s_clk_put:
+ clk_put(i2s->clk_i2s);
+exit:
+ return ret;
+}
+
+static int __devexit tegra30_i2s_platform_remove(struct platform_device *pdev)
+{
+ struct tegra30_i2s *i2s = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+
+ snd_soc_unregister_dai(&pdev->dev);
+
+ tegra30_i2s_debug_remove(i2s);
+
+ iounmap(i2s->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(i2s->clk_pll_a_out0);
+ clk_put(i2s->clk_audio_2x);
+ clk_put(i2s->clk_i2s_sync);
+ clk_put(i2s->clk_i2s);
+
+ kfree(i2s);
+
+ return 0;
+}
+
+static struct platform_driver tegra30_i2s_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra30_i2s_platform_probe,
+ .remove = __devexit_p(tegra30_i2s_platform_remove),
+};
+
+static int __init snd_tegra30_i2s_init(void)
+{
+ return platform_driver_register(&tegra30_i2s_driver);
+}
+module_init(snd_tegra30_i2s_init);
+
+static void __exit snd_tegra30_i2s_exit(void)
+{
+ platform_driver_unregister(&tegra30_i2s_driver);
+}
+module_exit(snd_tegra30_i2s_exit);
+
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_DESCRIPTION("Tegra 30 I2S ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra30_i2s.h b/sound/soc/tegra/tegra30_i2s.h
new file mode 100644
index 000000000000..b9baddd5db8e
--- /dev/null
+++ b/sound/soc/tegra/tegra30_i2s.h
@@ -0,0 +1,274 @@
+/*
+ * tegra30_i2s.h - Definitions for Tegra 30 I2S driver
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA30_I2S_H__
+#define __TEGRA30_I2S_H__
+
+#include "tegra_pcm.h"
+
+/* Register offsets from TEGRA30_I2S*_BASE */
+
+#define TEGRA30_I2S_CTRL 0x0
+#define TEGRA30_I2S_TIMING 0x4
+#define TEGRA30_I2S_OFFSET 0x08
+#define TEGRA30_I2S_CH_CTRL 0x0c
+#define TEGRA30_I2S_SLOT_CTRL 0x10
+#define TEGRA30_I2S_CIF_RX_CTRL 0x14
+#define TEGRA30_I2S_CIF_TX_CTRL 0x18
+#define TEGRA30_I2S_FLOWCTL 0x1c
+#define TEGRA30_I2S_TX_STEP 0x20
+#define TEGRA30_I2S_FLOW_STATUS 0x24
+#define TEGRA30_I2S_FLOW_TOTAL 0x28
+#define TEGRA30_I2S_FLOW_OVER 0x2c
+#define TEGRA30_I2S_FLOW_UNDER 0x30
+#define TEGRA30_I2S_LCOEF_1_4_0 0x34
+#define TEGRA30_I2S_LCOEF_1_4_1 0x38
+#define TEGRA30_I2S_LCOEF_1_4_2 0x3c
+#define TEGRA30_I2S_LCOEF_1_4_3 0x40
+#define TEGRA30_I2S_LCOEF_1_4_4 0x44
+#define TEGRA30_I2S_LCOEF_1_4_5 0x48
+#define TEGRA30_I2S_LCOEF_2_4_0 0x4c
+#define TEGRA30_I2S_LCOEF_2_4_1 0x50
+#define TEGRA30_I2S_LCOEF_2_4_2 0x54
+
+/* Fields in TEGRA30_I2S_CTRL */
+
+#define TEGRA30_I2S_CTRL_XFER_EN_TX (1 << 31)
+#define TEGRA30_I2S_CTRL_XFER_EN_RX (1 << 30)
+#define TEGRA30_I2S_CTRL_CG_EN (1 << 29)
+#define TEGRA30_I2S_CTRL_SOFT_RESET (1 << 28)
+#define TEGRA30_I2S_CTRL_TX_FLOWCTL_EN (1 << 27)
+
+#define TEGRA30_I2S_CTRL_OBS_SEL_SHIFT 24
+#define TEGRA30_I2S_CTRL_OBS_SEL_MASK (7 << TEGRA30_I2S_CTRL_OBS_SEL_SHIFT)
+
+#define TEGRA30_I2S_FRAME_FORMAT_LRCK 0
+#define TEGRA30_I2S_FRAME_FORMAT_FSYNC 1
+
+#define TEGRA30_I2S_CTRL_FRAME_FORMAT_SHIFT 12
+#define TEGRA30_I2S_CTRL_FRAME_FORMAT_MASK (7 << TEGRA30_I2S_CTRL_FRAME_FORMAT_SHIFT)
+#define TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK (TEGRA30_I2S_FRAME_FORMAT_LRCK << TEGRA30_I2S_CTRL_FRAME_FORMAT_SHIFT)
+#define TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC (TEGRA30_I2S_FRAME_FORMAT_FSYNC << TEGRA30_I2S_CTRL_FRAME_FORMAT_SHIFT)
+
+#define TEGRA30_I2S_CTRL_MASTER_ENABLE (1 << 10)
+
+#define TEGRA30_I2S_LRCK_LEFT_LOW 0
+#define TEGRA30_I2S_LRCK_RIGHT_LOW 1
+
+#define TEGRA30_I2S_CTRL_LRCK_SHIFT 9
+#define TEGRA30_I2S_CTRL_LRCK_MASK (1 << TEGRA30_I2S_CTRL_LRCK_SHIFT)
+#define TEGRA30_I2S_CTRL_LRCK_L_LOW (TEGRA30_I2S_LRCK_LEFT_LOW << TEGRA30_I2S_CTRL_LRCK_SHIFT)
+#define TEGRA30_I2S_CTRL_LRCK_R_LOW (TEGRA30_I2S_LRCK_RIGHT_LOW << TEGRA30_I2S_CTRL_LRCK_SHIFT)
+
+#define TEGRA30_I2S_CTRL_LPBK_ENABLE (1 << 8)
+
+#define TEGRA30_I2S_BIT_CODE_LINEAR 0
+#define TEGRA30_I2S_BIT_CODE_ULAW 1
+#define TEGRA30_I2S_BIT_CODE_ALAW 2
+
+#define TEGRA30_I2S_CTRL_BIT_CODE_SHIFT 4
+#define TEGRA30_I2S_CTRL_BIT_CODE_MASK (3 << TEGRA30_I2S_CTRL_BIT_CODE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_CODE_LINEAR (TEGRA30_I2S_BIT_CODE_LINEAR << TEGRA30_I2S_CTRL_BIT_CODE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_CODE_ULAW (TEGRA30_I2S_BIT_CODE_ULAW << TEGRA30_I2S_CTRL_BIT_CODE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_CODE_ALAW (TEGRA30_I2S_BIT_CODE_ALAW << TEGRA30_I2S_CTRL_BIT_CODE_SHIFT)
+
+#define TEGRA30_I2S_BITS_8 1
+#define TEGRA30_I2S_BITS_12 2
+#define TEGRA30_I2S_BITS_16 3
+#define TEGRA30_I2S_BITS_20 4
+#define TEGRA30_I2S_BITS_24 5
+#define TEGRA30_I2S_BITS_28 6
+#define TEGRA30_I2S_BITS_32 7
+
+/* Sample container size; see {RX,TX}_MASK field in CH_CTRL below */
+#define TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT 0
+#define TEGRA30_I2S_CTRL_BIT_SIZE_MASK (7 << TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_SIZE_8 (TEGRA30_I2S_BITS_8 << TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_SIZE_12 (TEGRA30_I2S_BITS_12 << TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_SIZE_16 (TEGRA30_I2S_BITS_16 << TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_SIZE_20 (TEGRA30_I2S_BITS_20 << TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_SIZE_24 (TEGRA30_I2S_BITS_24 << TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_SIZE_28 (TEGRA30_I2S_BITS_28 << TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT)
+#define TEGRA30_I2S_CTRL_BIT_SIZE_32 (TEGRA30_I2S_BITS_32 << TEGRA30_I2S_CTRL_BIT_SIZE_SHIFT)
+
+/* Fields in TEGRA30_I2S_TIMING */
+
+#define TEGRA30_I2S_TIMING_NON_SYM_ENABLE (1 << 12)
+#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT 0
+#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US 0x7fff
+#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK (TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US << TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT)
+
+/* Fields in TEGRA30_I2S_OFFSET */
+
+#define TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_SHIFT 16
+#define TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_MASK_US 0x7ff
+#define TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_MASK (TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_MASK_US << TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_SHIFT)
+#define TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_SHIFT 0
+#define TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_MASK_US 0x7ff
+#define TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_MASK (TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_MASK_US << TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_SHIFT)
+
+/* Fields in TEGRA30_I2S_CH_CTRL */
+
+/* (FSYNC width - 1) in bit clocks */
+#define TEGRA30_I2S_CH_CTRL_FSYNC_WIDTH_SHIFT 24
+#define TEGRA30_I2S_CH_CTRL_FSYNC_WIDTH_MASK_US 0xff
+#define TEGRA30_I2S_CH_CTRL_FSYNC_WIDTH_MASK (TEGRA30_I2S_CH_CTRL_FSYNC_WIDTH_MASK_US << TEGRA30_I2S_CH_CTRL_FSYNC_WIDTH_SHIFT)
+
+#define TEGRA30_I2S_HIGHZ_NO 0
+#define TEGRA30_I2S_HIGHZ_YES 1
+#define TEGRA30_I2S_HIGHZ_ON_HALF_BIT_CLK 2
+
+#define TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_SHIFT 12
+#define TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_MASK (3 << TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_NO (TEGRA30_I2S_HIGHZ_NO << TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_YES (TEGRA30_I2S_HIGHZ_YES << TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_ON_HALF_BIT_CLK (TEGRA30_I2S_HIGHZ_ON_HALF_BIT_CLK << TEGRA30_I2S_CH_CTRL_HIGHZ_CTRL_SHIFT)
+
+#define TEGRA30_I2S_MSB_FIRST 0
+#define TEGRA30_I2S_LSB_FIRST 1
+
+#define TEGRA30_I2S_CH_CTRL_RX_BIT_ORDER_SHIFT 10
+#define TEGRA30_I2S_CH_CTRL_RX_BIT_ORDER_MASK (1 << TEGRA30_I2S_CH_CTRL_RX_BIT_ORDER_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_RX_BIT_ORDER_MSB_FIRST (TEGRA30_I2S_MSB_FIRST << TEGRA30_I2S_CH_CTRL_RX_BIT_ORDER_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_RX_BIT_ORDER_LSB_FIRST (TEGRA30_I2S_LSB_FIRST << TEGRA30_I2S_CH_CTRL_RX_BIT_ORDER_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_TX_BIT_ORDER_SHIFT 9
+#define TEGRA30_I2S_CH_CTRL_TX_BIT_ORDER_MASK (1 << TEGRA30_I2S_CH_CTRL_TX_BIT_ORDER_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_TX_BIT_ORDER_MSB_FIRST (TEGRA30_I2S_MSB_FIRST << TEGRA30_I2S_CH_CTRL_TX_BIT_ORDER_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_TX_BIT_ORDER_LSB_FIRST (TEGRA30_I2S_LSB_FIRST << TEGRA30_I2S_CH_CTRL_TX_BIT_ORDER_SHIFT)
+
+#define TEGRA30_I2S_POS_EDGE 0
+#define TEGRA30_I2S_NEG_EDGE 1
+
+#define TEGRA30_I2S_CH_CTRL_EGDE_CTRL_SHIFT 8
+#define TEGRA30_I2S_CH_CTRL_EGDE_CTRL_MASK (1 << TEGRA30_I2S_CH_CTRL_EGDE_CTRL_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_EGDE_CTRL_POS_EDGE (TEGRA30_I2S_POS_EDGE << TEGRA30_I2S_CH_CTRL_EGDE_CTRL_SHIFT)
+#define TEGRA30_I2S_CH_CTRL_EGDE_CTRL_NEG_EDGE (TEGRA30_I2S_NEG_EDGE << TEGRA30_I2S_CH_CTRL_EGDE_CTRL_SHIFT)
+
+/* Sample size is # bits from BIT_SIZE minus this field */
+#define TEGRA30_I2S_CH_CTRL_RX_MASK_BITS_SHIFT 4
+#define TEGRA30_I2S_CH_CTRL_RX_MASK_BITS_MASK_US 7
+#define TEGRA30_I2S_CH_CTRL_RX_MASK_BITS_MASK (TEGRA30_I2S_CH_CTRL_RX_MASK_BITS_MASK_US << TEGRA30_I2S_CH_CTRL_RX_MASK_BITS_SHIFT)
+
+#define TEGRA30_I2S_CH_CTRL_TX_MASK_BITS_SHIFT 0
+#define TEGRA30_I2S_CH_CTRL_TX_MASK_BITS_MASK_US 7
+#define TEGRA30_I2S_CH_CTRL_TX_MASK_BITS_MASK (TEGRA30_I2S_CH_CTRL_TX_MASK_BITS_MASK_US << TEGRA30_I2S_CH_CTRL_TX_MASK_BITS_SHIFT)
+
+/* Fields in TEGRA30_I2S_SLOT_CTRL */
+
+/* Number of slots in frame, minus 1 */
+#define TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_SHIFT 16
+#define TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_MASK_US 7
+#define TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_MASK (TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOT_MASK_US << TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOT_SHIFT)
+
+/* TDM mode slot enable bitmask */
+#define TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_SHIFT 8
+#define TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_MASK (0xff << TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_SHIFT)
+
+#define TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_SHIFT 0
+#define TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_MASK (0xff << TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_SHIFT)
+
+/* Fields in TEGRA30_I2S_CIF_RX_CTRL */
+/* Uses field from TEGRA30_AUDIOCIF_CTRL_* in tegra30_ahub.h */
+
+/* Fields in TEGRA30_I2S_CIF_TX_CTRL */
+/* Uses field from TEGRA30_AUDIOCIF_CTRL_* in tegra30_ahub.h */
+
+/* Fields in TEGRA30_I2S_FLOWCTL */
+
+#define TEGRA30_I2S_FILTER_LINEAR 0
+#define TEGRA30_I2S_FILTER_QUAD 1
+
+#define TEGRA30_I2S_FLOWCTL_FILTER_SHIFT 31
+#define TEGRA30_I2S_FLOWCTL_FILTER_MASK (1 << TEGRA30_I2S_FLOWCTL_FILTER_SHIFT)
+#define TEGRA30_I2S_FLOWCTL_FILTER_LINEAR (TEGRA30_I2S_FILTER_LINEAR << TEGRA30_I2S_FLOWCTL_FILTER_SHIFT)
+#define TEGRA30_I2S_FLOWCTL_FILTER_QUAD (TEGRA30_I2S_FILTER_QUAD << TEGRA30_I2S_FLOWCTL_FILTER_SHIFT)
+
+/* Fields in TEGRA30_I2S_TX_STEP */
+
+#define TEGRA30_I2S_TX_STEP_SHIFT 0
+#define TEGRA30_I2S_TX_STEP_MASK_US 0xffff
+#define TEGRA30_I2S_TX_STEP_MASK (TEGRA30_I2S_TX_STEP_MASK_US << TEGRA30_I2S_TX_STEP_SHIFT)
+
+/* Fields in TEGRA30_I2S_FLOW_STATUS */
+
+#define TEGRA30_I2S_FLOW_STATUS_UNDERFLOW (1 << 31)
+#define TEGRA30_I2S_FLOW_STATUS_OVERFLOW (1 << 30)
+#define TEGRA30_I2S_FLOW_STATUS_MONITOR_INT_EN (1 << 4)
+#define TEGRA30_I2S_FLOW_STATUS_COUNTER_CLR (1 << 3)
+#define TEGRA30_I2S_FLOW_STATUS_MONITOR_CLR (1 << 2)
+#define TEGRA30_I2S_FLOW_STATUS_COUNTER_EN (1 << 1)
+#define TEGRA30_I2S_FLOW_STATUS_MONITOR_EN (1 << 0)
+
+/*
+ * There are no fields in TEGRA30_I2S_FLOW_TOTAL, TEGRA30_I2S_FLOW_OVER,
+ * TEGRA30_I2S_FLOW_UNDER; they are counters taking the whole register.
+ */
+
+/* Fields in TEGRA30_I2S_LCOEF_* */
+
+#define TEGRA30_I2S_LCOEF_COEF_SHIFT 0
+#define TEGRA30_I2S_LCOEF_COEF_MASK_US 0xffff
+#define TEGRA30_I2S_LCOEF_COEF_MASK (TEGRA30_I2S_LCOEF_COEF_MASK_US << TEGRA30_I2S_LCOEF_COEF_SHIFT)
+
+/* Number of i2s controllers*/
+#define TEGRA30_NR_I2S_IFC 5
+
+struct tegra30_i2s {
+ int id;
+ struct clk *clk_i2s;
+ struct clk *clk_i2s_sync;
+ struct clk *clk_audio_2x;
+ struct clk *clk_pll_a_out0;
+ enum tegra30_ahub_rxcif rxcif;
+ struct tegra_pcm_dma_params capture_dma_data;
+ enum tegra30_ahub_txcif txcif;
+ struct tegra_pcm_dma_params playback_dma_data;
+ void __iomem *regs;
+ struct dentry *debug;
+ u32 reg_ctrl;
+ u32 reg_ch_ctrl;
+ int dam_ifc;
+ int dam_ch_refcount;
+ int playback_ref_count;
+ bool is_dam_used;
+#ifdef CONFIG_PM
+ u32 reg_cache[(TEGRA30_I2S_CIF_TX_CTRL >> 2) + 1];
+#endif
+ int call_record_dam_ifc;
+ int is_call_mode_rec;
+};
+
+struct codec_config {
+ int i2s_id;
+ int rate;
+ int channels;
+ int bitsize;
+ int is_i2smaster;
+ int is_format_dsp;
+};
+
+int tegra30_make_voice_call_connections(struct codec_config *codec_info,
+ struct codec_config *bb_info);
+
+int tegra30_break_voice_call_connections(struct codec_config *codec_info,
+ struct codec_config *bb_info);
+
+#endif
diff --git a/sound/soc/tegra/tegra30_spdif.c b/sound/soc/tegra/tegra30_spdif.c
new file mode 100644
index 000000000000..038127c0afb0
--- /dev/null
+++ b/sound/soc/tegra/tegra30_spdif.c
@@ -0,0 +1,505 @@
+/*
+ * tegra30_spdif.c - Tegra30 SPDIF driver
+ *
+ * Author: Sumit Bhattacharya <sumitb@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <mach/hdmi-audio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra30_spdif.h"
+
+#define DRV_NAME "tegra30-spdif"
+
+static inline void tegra30_spdif_write(struct tegra30_spdif *spdif,
+ u32 reg, u32 val)
+{
+ __raw_writel(val, spdif->regs + reg);
+}
+
+static inline u32 tegra30_spdif_read(struct tegra30_spdif *spdif, u32 reg)
+{
+ return __raw_readl(spdif->regs + reg);
+}
+
+static void tegra30_spdif_enable_clocks(struct tegra30_spdif *spdif)
+{
+ clk_enable(spdif->clk_spdif_out);
+ tegra30_ahub_enable_clocks();
+}
+
+static void tegra30_spdif_disable_clocks(struct tegra30_spdif *spdif)
+{
+ tegra30_ahub_disable_clocks();
+ clk_disable(spdif->clk_spdif_out);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra30_spdif_show(struct seq_file *s, void *unused)
+{
+#define REG(r) { r, #r }
+ static const struct {
+ int offset;
+ const char *name;
+ } regs[] = {
+ REG(TEGRA30_SPDIF_CTRL),
+ REG(TEGRA30_SPDIF_STROBE_CTRL),
+ REG(TEGRA30_SPDIF_CIF_TXD_CTRL),
+ REG(TEGRA30_SPDIF_CIF_RXD_CTRL),
+ REG(TEGRA30_SPDIF_CIF_TXU_CTRL),
+ REG(TEGRA30_SPDIF_CIF_RXU_CTRL),
+ REG(TEGRA30_SPDIF_CH_STA_RX_A),
+ REG(TEGRA30_SPDIF_CH_STA_RX_B),
+ REG(TEGRA30_SPDIF_CH_STA_RX_C),
+ REG(TEGRA30_SPDIF_CH_STA_RX_D),
+ REG(TEGRA30_SPDIF_CH_STA_RX_E),
+ REG(TEGRA30_SPDIF_CH_STA_RX_F),
+ REG(TEGRA30_SPDIF_CH_STA_TX_A),
+ REG(TEGRA30_SPDIF_CH_STA_TX_B),
+ REG(TEGRA30_SPDIF_CH_STA_TX_C),
+ REG(TEGRA30_SPDIF_CH_STA_TX_D),
+ REG(TEGRA30_SPDIF_CH_STA_TX_E),
+ REG(TEGRA30_SPDIF_CH_STA_TX_F),
+ REG(TEGRA30_SPDIF_FLOWCTL_CTRL),
+ REG(TEGRA30_SPDIF_TX_STEP),
+ REG(TEGRA30_SPDIF_FLOW_STATUS),
+ REG(TEGRA30_SPDIF_FLOW_TOTAL),
+ REG(TEGRA30_SPDIF_FLOW_OVER),
+ REG(TEGRA30_SPDIF_FLOW_UNDER),
+ REG(TEGRA30_SPDIF_LCOEF_1_4_0),
+ REG(TEGRA30_SPDIF_LCOEF_1_4_1),
+ REG(TEGRA30_SPDIF_LCOEF_1_4_2),
+ REG(TEGRA30_SPDIF_LCOEF_1_4_3),
+ REG(TEGRA30_SPDIF_LCOEF_1_4_4),
+ REG(TEGRA30_SPDIF_LCOEF_1_4_5),
+ REG(TEGRA30_SPDIF_LCOEF_2_4_0),
+ REG(TEGRA30_SPDIF_LCOEF_2_4_1),
+ REG(TEGRA30_SPDIF_LCOEF_2_4_2),
+ };
+#undef REG
+
+ struct tegra30_spdif *spdif = s->private;
+ int i;
+
+ tegra30_spdif_enable_clocks(spdif);
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ u32 val = tegra30_spdif_read(spdif, regs[i].offset);
+ seq_printf(s, "%s = %08x\n", regs[i].name, val);
+ }
+
+ tegra30_spdif_disable_clocks(spdif);
+
+ return 0;
+}
+
+static int tegra30_spdif_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra30_spdif_show, inode->i_private);
+}
+
+static const struct file_operations tegra30_spdif_debug_fops = {
+ .open = tegra30_spdif_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra30_spdif_debug_add(struct tegra30_spdif *spdif)
+{
+ char name[] = DRV_NAME;
+
+ spdif->debug = debugfs_create_file(name, S_IRUGO, snd_soc_debugfs_root,
+ spdif, &tegra30_spdif_debug_fops);
+}
+
+static void tegra30_spdif_debug_remove(struct tegra30_spdif *spdif)
+{
+ if (spdif->debug)
+ debugfs_remove(spdif->debug);
+}
+#else
+static inline void tegra30_spdif_debug_add(struct tegra30_spdif *spdif)
+{
+}
+
+static inline void tegra30_spdif_debug_remove(struct tegra30_spdif *spdif)
+{
+}
+#endif
+
+int tegra30_spdif_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct tegra30_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ tegra30_spdif_enable_clocks(spdif);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = tegra30_ahub_allocate_tx_fifo(&spdif->txcif,
+ &spdif->playback_dma_data.addr,
+ &spdif->playback_dma_data.req_sel);
+ spdif->playback_dma_data.wrap = 4;
+ spdif->playback_dma_data.width = 32;
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_SPDIF_RX0,
+ spdif->txcif);
+ }
+
+ tegra30_spdif_disable_clocks(spdif);
+
+ return ret;
+}
+
+void tegra30_spdif_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct tegra30_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+
+ tegra30_spdif_enable_clocks(spdif);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_SPDIF_RX0);
+ tegra30_ahub_free_tx_fifo(spdif->txcif);
+ }
+
+ tegra30_spdif_disable_clocks(spdif);
+}
+
+static int tegra30_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct device *dev = substream->pcm->card->dev;
+ struct tegra30_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+ int ret, srate, spdifclock;
+
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) {
+ dev_err(dev, "spdif capture is not supported\n");
+ return -EINVAL;
+ }
+
+ spdif->reg_ctrl &= ~TEGRA30_SPDIF_CTRL_BIT_MODE_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ spdif->reg_ctrl |= TEGRA30_SPDIF_CTRL_PACK_ENABLE;
+ spdif->reg_ctrl |= TEGRA30_SPDIF_CTRL_BIT_MODE_16BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+ spdif->reg_ch_sta_a &= ~TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_MASK;
+ spdif->reg_ch_sta_b &= ~TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_MASK;
+ switch (srate) {
+ case 32000:
+ spdifclock = 4096000;
+ spdif->reg_ch_sta_a |=
+ TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_32000;
+ spdif->reg_ch_sta_b |=
+ TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_32000;
+ break;
+ case 44100:
+ spdifclock = 5644800;
+ spdif->reg_ch_sta_a |=
+ TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_44100;
+ spdif->reg_ch_sta_b |=
+ TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_44100;
+ break;
+ case 48000:
+ spdifclock = 6144000;
+ spdif->reg_ch_sta_a |=
+ TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_48000;
+ spdif->reg_ch_sta_b |=
+ TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_48000;
+ break;
+ case 88200:
+ spdifclock = 11289600;
+ spdif->reg_ch_sta_a |=
+ TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_88200;
+ spdif->reg_ch_sta_b |=
+ TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_88200;
+ break;
+ case 96000:
+ spdifclock = 12288000;
+ spdif->reg_ch_sta_a |=
+ TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_96000;
+ spdif->reg_ch_sta_b |=
+ TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_96000;
+ break;
+ case 176400:
+ spdifclock = 22579200;
+ spdif->reg_ch_sta_a |=
+ TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_176400;
+ spdif->reg_ch_sta_b |=
+ TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_176400;
+ break;
+ case 192000:
+ spdifclock = 24576000;
+ spdif->reg_ch_sta_a |=
+ TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_192000;
+ spdif->reg_ch_sta_b |=
+ TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(spdif->clk_spdif_out, spdifclock);
+ if (ret) {
+ dev_err(dev, "Can't set SPDIF clock rate: %d\n", ret);
+ return ret;
+ }
+
+ tegra30_spdif_enable_clocks(spdif);
+
+ tegra30_spdif_write(spdif, TEGRA30_SPDIF_CH_STA_TX_A,
+ spdif->reg_ch_sta_a);
+ tegra30_spdif_write(spdif, TEGRA30_SPDIF_CH_STA_TX_B,
+ spdif->reg_ch_sta_b);
+
+ tegra30_spdif_disable_clocks(spdif);
+
+ ret = tegra_hdmi_setup_audio_freq_source(srate, SPDIF);
+ if (ret) {
+ dev_err(dev, "Can't set HDMI audio freq source: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tegra30_spdif_start_playback(struct tegra30_spdif *spdif)
+{
+ tegra30_ahub_enable_tx_fifo(spdif->txcif);
+ spdif->reg_ctrl |= TEGRA30_SPDIF_CTRL_TX_EN_ENABLE |
+ TEGRA30_SPDIF_CTRL_TC_EN_ENABLE;
+ tegra30_spdif_write(spdif, TEGRA30_SPDIF_CTRL, spdif->reg_ctrl);
+}
+
+static void tegra30_spdif_stop_playback(struct tegra30_spdif *spdif)
+{
+ tegra30_ahub_disable_tx_fifo(spdif->txcif);
+ spdif->reg_ctrl &= ~(TEGRA30_SPDIF_CTRL_TX_EN_ENABLE |
+ TEGRA30_SPDIF_CTRL_TC_EN_ENABLE);
+ tegra30_spdif_write(spdif, TEGRA30_SPDIF_CTRL, spdif->reg_ctrl);
+}
+
+static int tegra30_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct tegra30_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ tegra30_spdif_enable_clocks(spdif);
+ tegra30_spdif_start_playback(spdif);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ tegra30_spdif_stop_playback(spdif);
+ tegra30_spdif_disable_clocks(spdif);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra30_spdif_probe(struct snd_soc_dai *dai)
+{
+ struct tegra30_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+
+ dai->playback_dma_data = &spdif->playback_dma_data;
+ dai->capture_dma_data = NULL;
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops tegra30_spdif_dai_ops = {
+ .startup = tegra30_spdif_startup,
+ .shutdown = tegra30_spdif_shutdown,
+ .hw_params = tegra30_spdif_hw_params,
+ .trigger = tegra30_spdif_trigger,
+};
+
+struct snd_soc_dai_driver tegra30_spdif_dai = {
+ .name = DRV_NAME,
+ .probe = tegra30_spdif_probe,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &tegra30_spdif_dai_ops,
+};
+
+static __devinit int tegra30_spdif_platform_probe(struct platform_device *pdev)
+{
+ struct tegra30_spdif *spdif;
+ struct resource *mem, *memregion;
+ int ret;
+ u32 reg_val;
+
+ spdif = kzalloc(sizeof(struct tegra30_spdif), GFP_KERNEL);
+ if (!spdif) {
+ dev_err(&pdev->dev, "Can't allocate tegra30_spdif\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ dev_set_drvdata(&pdev->dev, spdif);
+
+ spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out");
+ if (IS_ERR(spdif->clk_spdif_out)) {
+ dev_err(&pdev->dev, "Can't retrieve spdif clock\n");
+ ret = PTR_ERR(spdif->clk_spdif_out);
+ goto err_free;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err_clk_put_spdif;
+ }
+
+ memregion = request_mem_region(mem->start, resource_size(mem),
+ DRV_NAME);
+ if (!memregion) {
+ dev_err(&pdev->dev, "Memory region already claimed\n");
+ ret = -EBUSY;
+ goto err_clk_put_spdif;
+ }
+
+ spdif->regs = ioremap(mem->start, resource_size(mem));
+ if (!spdif->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ tegra30_spdif_enable_clocks(spdif);
+
+ reg_val = TEGRA30_SPDIF_CIF_TXD_CTRL_DIRECTION_RXCIF |
+ TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT16 |
+ TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT16 |
+ TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH2 |
+ TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH2 |
+ (3 << TEGRA30_SPDIF_CIF_TXD_CTRL_FIFO_TH_SHIFT);
+
+ tegra30_spdif_write(spdif, TEGRA30_SPDIF_CIF_TXD_CTRL, reg_val);
+
+ tegra30_spdif_disable_clocks(spdif);
+
+ ret = snd_soc_register_dai(&pdev->dev, &tegra30_spdif_dai);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ tegra30_spdif_debug_add(spdif);
+
+ return 0;
+
+err_unmap:
+ iounmap(spdif->regs);
+err_release:
+ release_mem_region(mem->start, resource_size(mem));
+err_clk_put_spdif:
+ clk_put(spdif->clk_spdif_out);
+err_free:
+ kfree(spdif);
+exit:
+ return ret;
+}
+
+static int __devexit tegra30_spdif_platform_remove(struct platform_device *pdev)
+{
+ struct tegra30_spdif *spdif = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+
+ snd_soc_unregister_dai(&pdev->dev);
+
+ tegra30_spdif_debug_remove(spdif);
+
+ iounmap(spdif->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(spdif->clk_spdif_out);
+
+ kfree(spdif);
+
+ return 0;
+}
+
+static struct platform_driver tegra30_spdif_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra30_spdif_platform_probe,
+ .remove = __devexit_p(tegra30_spdif_platform_remove),
+};
+
+static int __init snd_tegra30_spdif_init(void)
+{
+ return platform_driver_register(&tegra30_spdif_driver);
+}
+module_init(snd_tegra30_spdif_init);
+
+static void __exit snd_tegra30_spdif_exit(void)
+{
+ platform_driver_unregister(&tegra30_spdif_driver);
+}
+module_exit(snd_tegra30_spdif_exit);
+
+MODULE_AUTHOR("Sumit Bhattacharya <sumitb@nvidia.com>");
+MODULE_DESCRIPTION("Tegra30 SPDIF ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra30_spdif.h b/sound/soc/tegra/tegra30_spdif.h
new file mode 100644
index 000000000000..c4763c31b257
--- /dev/null
+++ b/sound/soc/tegra/tegra30_spdif.h
@@ -0,0 +1,777 @@
+/*
+ * tegra30_spdif.h - Definitions for Tegra30 SPDIF driver
+ *
+ * Author: Sumit Bhattacharya <sumitb@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ * Scott Peterson <speterson@nvidia.com>
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TEGRA30_SPDIF_H__
+#define __TEGRA30_SPDIF_H__
+
+#include "tegra_pcm.h"
+#include "tegra30_ahub.h"
+
+/* Register offsets from TEGRA_SPDIF_BASE */
+
+#define TEGRA30_SPDIF_CTRL 0x0
+#define TEGRA30_SPDIF_STROBE_CTRL 0x4
+#define TEGRA30_SPDIF_CIF_TXD_CTRL 0x08
+#define TEGRA30_SPDIF_CIF_RXD_CTRL 0x0C
+#define TEGRA30_SPDIF_CIF_TXU_CTRL 0x10
+#define TEGRA30_SPDIF_CIF_RXU_CTRL 0x14
+#define TEGRA30_SPDIF_CH_STA_RX_A 0x18
+#define TEGRA30_SPDIF_CH_STA_RX_B 0x1C
+#define TEGRA30_SPDIF_CH_STA_RX_C 0x20
+#define TEGRA30_SPDIF_CH_STA_RX_D 0x24
+#define TEGRA30_SPDIF_CH_STA_RX_E 0x28
+#define TEGRA30_SPDIF_CH_STA_RX_F 0x2C
+#define TEGRA30_SPDIF_CH_STA_TX_A 0x30
+#define TEGRA30_SPDIF_CH_STA_TX_B 0x34
+#define TEGRA30_SPDIF_CH_STA_TX_C 0x38
+#define TEGRA30_SPDIF_CH_STA_TX_D 0x3C
+#define TEGRA30_SPDIF_CH_STA_TX_E 0x40
+#define TEGRA30_SPDIF_CH_STA_TX_F 0x44
+#define TEGRA30_SPDIF_FLOWCTL_CTRL 0x70
+#define TEGRA30_SPDIF_TX_STEP 0x74
+#define TEGRA30_SPDIF_FLOW_STATUS 0x78
+#define TEGRA30_SPDIF_FLOW_TOTAL 0x7c
+#define TEGRA30_SPDIF_FLOW_OVER 0x80
+#define TEGRA30_SPDIF_FLOW_UNDER 0x84
+#define TEGRA30_SPDIF_LCOEF_1_4_0 0x88
+#define TEGRA30_SPDIF_LCOEF_1_4_1 0x8c
+#define TEGRA30_SPDIF_LCOEF_1_4_2 0x90
+#define TEGRA30_SPDIF_LCOEF_1_4_3 0x94
+#define TEGRA30_SPDIF_LCOEF_1_4_4 0x98
+#define TEGRA30_SPDIF_LCOEF_1_4_5 0x9c
+#define TEGRA30_SPDIF_LCOEF_2_4_0 0xa0
+#define TEGRA30_SPDIF_LCOEF_2_4_1 0xa4
+#define TEGRA30_SPDIF_LCOEF_2_4_2 0xa8
+
+/* Fields in TEGRA30_SPDIF_CTRL */
+#define TEGRA30_SPDIF_CTRL_FLOWCTL_EN_ENABLE (1<<31)
+#define TEGRA30_SPDIF_CTRL_CAP_LC_LEFT_CH (1<<30)
+#define TEGRA30_SPDIF_CTRL_RX_EN_ENABLE (1<<29)
+#define TEGRA30_SPDIF_CTRL_TX_EN_ENABLE (1<<28)
+#define TEGRA30_SPDIF_CTRL_TC_EN_ENABLE (1<<27)
+#define TEGRA30_SPDIF_CTRL_TU_EN_ENABLE (1<<26)
+#define TEGRA30_SPDIF_CTRL_IE_P_RSVD_ENABLE (1<<23)
+#define TEGRA30_SPDIF_CTRL_IE_B_RSVD_ENABLE (1<<22)
+#define TEGRA30_SPDIF_CTRL_IE_C_RSVD_ENABLE (1<<21)
+#define TEGRA30_SPDIF_CTRL_IE_U_RSVD_ENABLE (1<<20)
+#define TEGRA30_SPDIF_CTRL_LBK_EN_ENABLE (1<<15)
+#define TEGRA30_SPDIF_CTRL_PACK_ENABLE (1<<14)
+
+#define TEGRA30_SPDIF_BIT_MODE16 0
+#define TEGRA30_SPDIF_BIT_MODE20 1
+#define TEGRA30_SPDIF_BIT_MODE24 2
+#define TEGRA30_SPDIF_BIT_MODERAW 3
+
+#define TEGRA30_SPDIF_CTRL_BIT_MODE_SHIFT 12
+#define TEGRA30_SPDIF_CTRL_BIT_MODE_MASK (3 << TEGRA30_SPDIF_CTRL_BIT_MODE_SHIFT)
+#define TEGRA30_SPDIF_CTRL_BIT_MODE_16BIT (TEGRA30_SPDIF_BIT_MODE16 << TEGRA30_SPDIF_CTRL_BIT_MODE_SHIFT)
+#define TEGRA30_SPDIF_CTRL_BIT_MODE_20BIT (TEGRA30_SPDIF_BIT_MODE20 << TEGRA30_SPDIF_CTRL_BIT_MODE_SHIFT)
+#define TEGRA30_SPDIF_CTRL_BIT_MODE_24BIT (TEGRA30_SPDIF_BIT_MODE24 << TEGRA30_SPDIF_CTRL_BIT_MODE_SHIFT)
+#define TEGRA30_SPDIF_CTRL_BIT_MODE_RAW (TEGRA30_SPDIF_BIT_MODERAW << TEGRA30_SPDIF_CTRL_BIT_MODE_SHIFT)
+
+#define TEGRA30_SPDIF_CTRL_CG_EN_ENABLE (1<<11)
+
+#define TEGRA30_SPDIF_CTRL_OBS_SEL_SHIFT 8
+#define TEGRA30_SPDIF_CTRL_OBS_SEL_NASK (0x7 << TEGRA30_SPDIF_CTRL_OBS_SEL_SHIFT)
+
+#define TEGRA30_SPDIF_CTRL_SOFT_RESET_ENABLE (1<<7)
+
+/* Fields in TEGRA30_SPDIF_STROBE_CTRL */
+#define TEGRA30_SPDIF_STROBE_CTRL_PERIOD_SHIFT 16
+#define TEGRA30_SPDIF_STROBE_CTRL_PERIOD_MASK (0xff << TEGRA30_SPDIF_STROBE_CTRL_PERIOD_SHIFT)
+
+#define TEGRA30_SPDIF_STROBE_CTRL_STROBE (1<<15)
+
+#define TEGRA30_SPDIF_STROBE_CTRL_DATA_STROBES_SHIFT 8
+#define TEGRA30_SPDIF_STROBE_CTRL_DATA_STROBES_MASK (0x1f << TEGRA30_SPDIF_STROBE_CTRL_DATA_STROBES_SHIFT)
+
+#define TEGRA30_SPDIF_STROBE_CTRL_CLOCK_PERIOD_SHIFT 0
+#define TEGRA30_SPDIF_STROBE_CTRL_CLOCK_PERIOD_MASK (0x3f << TEGRA30_SPDIF_STROBE_CTRL_CLOCK_PERIOD_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_CIF_TXD_CTRL */
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_MONO_CONV_COPY (1<<0)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_TRUNCATE_CHOP (1<<1)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_DIRECTION_RXCIF (1<<2)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_REPLICATE_ENABLE (1<<3)
+
+#define TEGRA30_SPDIF_CIF_STEREO_CH0 0
+#define TEGRA30_SPDIF_CIF_STEREO_CH1 1
+#define TEGRA30_SPDIF_CIF_STEREO_AVG 2
+#define TEGRA30_SPDIF_CIF_STEREO_RSVD 3
+
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_SHIFT 4
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_MASK \
+ (0x3 << TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_CH0 \
+ (TEGRA30_SPDIF_CIF_STEREO_CH0 << TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_CH1 \
+ (TEGRA30_SPDIF_CIF_STEREO_CH1 << TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_AVG \
+ (TEGRA30_SPDIF_CIF_STEREO_AVG << TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_RSVD \
+ (TEGRA30_SPDIF_CIF_STEREO_RSVD << TEGRA30_SPDIF_CIF_TXD_CTRL_STEREO_CONV_SHIFT)
+
+#define TEGRA30_SPDIF_CIF_EXPAND_ZERO 0
+#define TEGRA30_SPDIF_CIF_EXPAND_ONE 1
+#define TEGRA30_SPDIF_CIF_EXPAND_LFSR 2
+#define TEGRA30_SPDIF_CIF_EXPAND_RSVD 3
+
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_SHIFT 6
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_MASK \
+ (0x3 << TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_ZERO \
+ (TEGRA30_SPDIF_CIF_EXPAND_ZERO << TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_ONE \
+ (TEGRA30_SPDIF_CIF_EXPAND_ONE << TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_LFSR \
+ (TEGRA30_SPDIF_CIF_EXPAND_LFSR << TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_RSVD \
+ (TEGRA30_SPDIF_CIF_EXPAND_RSVD << TEGRA30_SPDIF_CIF_TXD_CTRL_EXPAND_SHIFT)
+
+#define TEGRA30_SPDIF_CIF_BIT4 0
+#define TEGRA30_SPDIF_CIF_BIT8 1
+#define TEGRA30_SPDIF_CIF_BIT12 2
+#define TEGRA30_SPDIF_CIF_BIT16 3
+#define TEGRA30_SPDIF_CIF_BIT20 4
+#define TEGRA30_SPDIF_CIF_BIT24 5
+#define TEGRA30_SPDIF_CIF_BIT28 6
+#define TEGRA30_SPDIF_CIF_BIT32 7
+
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT 8
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT4 \
+ (TEGRA30_SPDIF_CIF_BIT4 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT8 \
+ (TEGRA30_SPDIF_CIF_BIT8 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT12 \
+ (TEGRA30_SPDIF_CIF_BIT12 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT16 \
+ (TEGRA30_SPDIF_CIF_BIT16 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT20 \
+ (TEGRA30_SPDIF_CIF_BIT20 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT24 \
+ (TEGRA30_SPDIF_CIF_BIT24 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT28 \
+ (TEGRA30_SPDIF_CIF_BIT28 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BIT32 \
+ (TEGRA30_SPDIF_CIF_BIT32 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_BITS_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT 12
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT4 \
+ (TEGRA30_SPDIF_CIF_BIT4 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT8 \
+ (TEGRA30_SPDIF_CIF_BIT8 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT12 \
+ (TEGRA30_SPDIF_CIF_BIT12 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT16 \
+ (TEGRA30_SPDIF_CIF_BIT16 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT20 \
+ (TEGRA30_SPDIF_CIF_BIT20 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT24 \
+ (TEGRA30_SPDIF_CIF_BIT24 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT28 \
+ (TEGRA30_SPDIF_CIF_BIT28 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BIT32 \
+ (TEGRA30_SPDIF_CIF_BIT32 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_BITS_SHIFT)
+
+#define TEGRA30_SPDIF_CIF_CH1 0
+#define TEGRA30_SPDIF_CIF_CH2 1
+#define TEGRA30_SPDIF_CIF_CH3 2
+#define TEGRA30_SPDIF_CIF_CH4 3
+#define TEGRA30_SPDIF_CIF_CH5 4
+#define TEGRA30_SPDIF_CIF_CH6 5
+#define TEGRA30_SPDIF_CIF_CH7 6
+#define TEGRA30_SPDIF_CIF_CH8 7
+
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT 16
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH1 \
+ (TEGRA30_SPDIF_CIF_CH1 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH2 \
+ (TEGRA30_SPDIF_CIF_CH2 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH3 \
+ (TEGRA30_SPDIF_CIF_CH3 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH4 \
+ (TEGRA30_SPDIF_CIF_CH4 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH5 \
+ (TEGRA30_SPDIF_CIF_CH5 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH6 \
+ (TEGRA30_SPDIF_CIF_CH6 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH7 \
+ (TEGRA30_SPDIF_CIF_CH7 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH8 \
+ (TEGRA30_SPDIF_CIF_CH8 << TEGRA30_SPDIF_CIF_TXD_CTRL_CLIENT_CH_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT 24
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH1 \
+ (TEGRA30_SPDIF_CIF_CH1 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH2 \
+ (TEGRA30_SPDIF_CIF_CH2 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH3 \
+ (TEGRA30_SPDIF_CIF_CH3 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH4 \
+ (TEGRA30_SPDIF_CIF_CH4 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH5 \
+ (TEGRA30_SPDIF_CIF_CH5 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH6 \
+ (TEGRA30_SPDIF_CIF_CH6 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH7 \
+ (TEGRA30_SPDIF_CIF_CH7 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH8 \
+ (TEGRA30_SPDIF_CIF_CH8 << TEGRA30_SPDIF_CIF_TXD_CTRL_AUDIO_CH_SHIFT)
+
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_FIFO_TH_SHIFT 28
+#define TEGRA30_SPDIF_CIF_TXD_CTRL_FIFO_TH_MASK (0x7 << TEGRA30_SPDIF_CIF_TXD_CTRL_FIFO_TH_SHIFT)
+
+/* Fields in TEGRA30_TEGRA30_SPDIF_CIF_RXD_CTRL */
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_MONO_CONV_COPY (1<<0)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_TRUNCATE_CHOP (1<<1)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_DIRECTION_RXCIF (1<<2)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_REPLICATE_ENABLE (1<<3)
+
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_SHIFT 4
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_MASK \
+ (0x3 << TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_CH0 \
+ (TEGRA30_SPDIF_CIF_STEREO_CH0 << TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_CH1 \
+ (TEGRA30_SPDIF_CIF_STEREO_CH1 << TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_AVG \
+ (TEGRA30_SPDIF_CIF_STEREO_AVG << TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_RSVD \
+ (TEGRA30_SPDIF_CIF_STEREO_RSVD << TEGRA30_SPDIF_CIF_RXD_CTRL_STEREO_CONV_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_SHIFT 6
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_MASK \
+ (0x3 << TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_ZERO \
+ (TEGRA30_SPDIF_CIF_EXPAND_ZERO << TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_ONE \
+ (TEGRA30_SPDIF_CIF_EXPAND_ONE << TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_LFSR \
+ (TEGRA30_SPDIF_CIF_EXPAND_LFSR << TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_EXPAND_RSVD \
+
+
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT 8
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BIT4 \
+ (TEGRA30_SPDIF_CIF_BIT4 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BIT8 \
+ (TEGRA30_SPDIF_CIF_BIT8 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BIT12 \
+ (TEGRA30_SPDIF_CIF_BIT12 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BIT16 \
+ (TEGRA30_SPDIF_CIF_BIT16 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BIT20 \
+ (TEGRA30_SPDIF_CIF_BIT20 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BIT24 \
+ (TEGRA30_SPDIF_CIF_BIT24 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BIT28 \
+ (TEGRA30_SPDIF_CIF_BIT28 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BIT32 \
+ (TEGRA30_SPDIF_CIF_BIT32 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_BITS_SHIFT)
+
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT 12
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BIT4 \
+ (TEGRA30_SPDIF_CIF_BIT4 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BIT8 \
+ (TEGRA30_SPDIF_CIF_BIT8 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BIT12 \
+ (TEGRA30_SPDIF_CIF_BIT12 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BIT16 \
+ (TEGRA30_SPDIF_CIF_BIT16 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BIT20 \
+ (TEGRA30_SPDIF_CIF_BIT20 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BIT24 \
+ (TEGRA30_SPDIF_CIF_BIT24 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BIT28 \
+ (TEGRA30_SPDIF_CIF_BIT28 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BIT32 \
+ (TEGRA30_SPDIF_CIF_BIT32 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_BITS_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT 16
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH1 \
+ (TEGRA30_SPDIF_CIF_CH1 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH2 \
+ (TEGRA30_SPDIF_CIF_CH2 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH3 \
+ (TEGRA30_SPDIF_CIF_CH3 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH4 \
+ (TEGRA30_SPDIF_CIF_CH4 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH5 \
+ (TEGRA30_SPDIF_CIF_CH5 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH6 \
+ (TEGRA30_SPDIF_CIF_CH6 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH7 \
+ (TEGRA30_SPDIF_CIF_CH7 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH8 \
+ (TEGRA30_SPDIF_CIF_CH8 << TEGRA30_SPDIF_CIF_RXD_CTRL_CLIENT_CH_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT 24
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CH1 \
+ (TEGRA30_SPDIF_CIF_CH1 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CH2 \
+ (TEGRA30_SPDIF_CIF_CH2 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CH3 \
+ (TEGRA30_SPDIF_CIF_CH3 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CH4 \
+ (TEGRA30_SPDIF_CIF_CH4 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CH5 \
+ (TEGRA30_SPDIF_CIF_CH5 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CH6 \
+ (TEGRA30_SPDIF_CIF_CH6 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CH7 \
+ (TEGRA30_SPDIF_CIF_CH7 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CH8 \
+ (TEGRA30_SPDIF_CIF_CH8 << TEGRA30_SPDIF_CIF_RXD_CTRL_AUDIO_CHANNELS_SHIFT)
+
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_FIFO_TH_SHIFT 28
+#define TEGRA30_SPDIF_CIF_RXD_CTRL_FIFO_TH_MASK (0x7 << TEGRA30_SPDIF_CIF_RXD_CTRL_FIFO_TH_SHIFT)
+
+/* Fields in TEGRA30_TEGRA30_SPDIF_CIF_TXU_CTRL */
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_MONO_CONV_COPY (1<<0)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_TRUNCATE_CHOP (1<<1)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_DIRECTION_RXCIF (1<<2)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_REPLICATE_ENABLE (1<<3)
+
+
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_STEREO_CONV_SHIFT 4
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_STEREO_CONV_MASK \
+ (0x3 << TEGRA30_SPDIF_CIF_TXU_CTRL_0_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_STEREO_CONV_CH0 \
+ (TEGRA30_SPDIF_CIF_STEREO_CH0 << TEGRA30_SPDIF_CIF_TXU_CTRL_0_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_STEREO_CONV_CH1 \
+ (TEGRA30_SPDIF_CIF_STEREO_CH1 << TEGRA30_SPDIF_CIF_TXU_CTRL_0_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_STEREO_CONV_AVG \
+ (TEGRA30_SPDIF_CIF_STEREO_AVG << TEGRA30_SPDIF_CIF_TXU_CTRL_0_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_STEREO_CONV_RSVD \
+ (TEGRA30_SPDIF_CIF_STEREO_RSVD << TEGRA30_SPDIF_CIF_TXU_CTRL_0_STEREO_CONV_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_SHIFT 6
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_MASK \
+ (0x3 << TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_ZERO \
+ (TEGRA30_SPDIF_CIF_EXPAND_ZERO << TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_ONE \
+ (TEGRA30_SPDIF_CIF_EXPAND_ONE << TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_LFSR \
+ (TEGRA30_SPDIF_CIF_EXPAND_LFSR << TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_RSVD \
+ (TEGRA30_SPDIF_CIF_EXPAND_RSVD << TEGRA30_SPDIF_CIF_TXU_CTRL_EXPAND_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT 8
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BIT4 \
+ (TEGRA30_SPDIF_CIF_BIT4 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BIT8 \
+ (TEGRA30_SPDIF_CIF_BIT8 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BIT12 \
+ (TEGRA30_SPDIF_CIF_BIT12 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BIT16 \
+ (TEGRA30_SPDIF_CIF_BIT16 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BIT20 \
+ (TEGRA30_SPDIF_CIF_BIT20 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BIT24 \
+ (TEGRA30_SPDIF_CIF_BIT24 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BIT28 \
+ (TEGRA30_SPDIF_CIF_BIT28 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BIT32 \
+ (TEGRA30_SPDIF_CIF_BIT32 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_BITS_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT 12
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BIT4 \
+ (TEGRA30_SPDIF_CIF_BIT4 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BIT8 \
+ (TEGRA30_SPDIF_CIF_BIT8 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BIT12 \
+ (TEGRA30_SPDIF_CIF_BIT12 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BIT16 \
+ (TEGRA30_SPDIF_CIF_BIT16 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BIT20 \
+ (TEGRA30_SPDIF_CIF_BIT20 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BIT24 \
+ (TEGRA30_SPDIF_CIF_BIT24 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BIT28 \
+ (TEGRA30_SPDIF_CIF_BIT28 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BIT32 \
+ (TEGRA30_SPDIF_CIF_BIT32 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_BITS_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT 16
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH1 \
+ (TEGRA30_SPDIF_CIF_CH1 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH2 \
+ (TEGRA30_SPDIF_CIF_CH2 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH3 \
+ (TEGRA30_SPDIF_CIF_CH3 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH4 \
+ (TEGRA30_SPDIF_CIF_CH4 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH5 \
+ (TEGRA30_SPDIF_CIF_CH5 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH6 \
+ (TEGRA30_SPDIF_CIF_CH6 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH7 \
+ (TEGRA30_SPDIF_CIF_CH7 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH8 \
+ (TEGRA30_SPDIF_CIF_CH8 << TEGRA30_SPDIF_CIF_TXU_CTRL_CLIENT_CH_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT 24
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH1 \
+ (TEGRA30_SPDIF_CIF_CH1 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH2 \
+ (TEGRA30_SPDIF_CIF_CH2 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH3 \
+ (TEGRA30_SPDIF_CIF_CH3 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH4 \
+ (TEGRA30_SPDIF_CIF_CH4 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH5 \
+ (TEGRA30_SPDIF_CIF_CH5 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH6 \
+ (TEGRA30_SPDIF_CIF_CH6 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH7 \
+ (TEGRA30_SPDIF_CIF_CH7 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH8 \
+ (TEGRA30_SPDIF_CIF_CH8 << TEGRA30_SPDIF_CIF_TXU_CTRL_AUDIO_CH_SHIFT)
+
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_FIFO_TH_SHIFT 28
+#define TEGRA30_SPDIF_CIF_TXU_CTRL_FIFO_TH_MASK (0x7 << TEGRA30_SPDIF_CIF_TXU_CTRL_FIFO_TH_SHIFT)
+
+/* Fields in TEGRA30_TEGRA30_SPDIF_CIF_RXU_CTRL */
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_MONO_CONV_COPY (1<<0)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_TRUNCATE_CHOP (1<<1)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_DIRECTION_RXCIF (1<<2)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_REPLICATE_ENABLE (1<<3)
+
+
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_STEREO_CONV_SHIFT 4
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_STEREO_CONV_MASK \
+ (0x3 << TEGRA30_SPDIF_CIF_RXU_CTRL_0_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_STEREO_CONV_CH0 \
+ (TEGRA30_SPDIF_CIF_STEREO_CH0 << TEGRA30_SPDIF_CIF_RXU_CTRL_0_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_STEREO_CONV_CH1 \
+ (TEGRA30_SPDIF_CIF_STEREO_CH1 << TEGRA30_SPDIF_CIF_RXU_CTRL_0_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_STEREO_CONV_AVG \
+ (TEGRA30_SPDIF_CIF_STEREO_AVG << TEGRA30_SPDIF_CIF_RXU_CTRL_0_STEREO_CONV_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_STEREO_CONV_RSVD \
+ (TEGRA30_SPDIF_CIF_STEREO_RSVD << TEGRA30_SPDIF_CIF_RXU_CTRL_0_STEREO_CONV_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_SHIFT 6
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_MASK \
+ (0x3 << TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_ZERO \
+ (TEGRA30_SPDIF_CIF_EXPAND_ZERO << TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_ONE \
+ (TEGRA30_SPDIF_CIF_EXPAND_ONE << TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_LFSR \
+ (TEGRA30_SPDIF_CIF_EXPAND_LFSR << TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_RSVD \
+ (TEGRA30_SPDIF_CIF_EXPAND_RSVD << TEGRA30_SPDIF_CIF_RXU_CTRL_EXPAND_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT 8
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BIT4 \
+ (TEGRA30_SPDIF_CIF_BIT4 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BIT8 \
+ (TEGRA30_SPDIF_CIF_BIT8 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BIT12 \
+ (TEGRA30_SPDIF_CIF_BIT12 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BIT16 \
+ (TEGRA30_SPDIF_CIF_BIT16 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BIT20 \
+ (TEGRA30_SPDIF_CIF_BIT20 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BIT24 \
+ (TEGRA30_SPDIF_CIF_BIT24 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BIT28 \
+ (TEGRA30_SPDIF_CIF_BIT28 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BIT32 \
+ (TEGRA30_SPDIF_CIF_BIT32 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_BITS_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT 12
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BIT4 \
+ (TEGRA30_SPDIF_CIF_BIT4 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BIT8 \
+ (TEGRA30_SPDIF_CIF_BIT8 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BIT12 \
+ (TEGRA30_SPDIF_CIF_BIT12 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BIT16 \
+ (TEGRA30_SPDIF_CIF_BIT16 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BIT20 \
+ (TEGRA30_SPDIF_CIF_BIT20 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BIT24 \
+ (TEGRA30_SPDIF_CIF_BIT24 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BIT28 \
+ (TEGRA30_SPDIF_CIF_BIT28 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BIT32 \
+ (TEGRA30_SPDIF_CIF_BIT32 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_BITS_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT 16
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH1 \
+ (TEGRA30_SPDIF_CIF_CH1 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH2 \
+ (TEGRA30_SPDIF_CIF_CH2 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH3 \
+ (TEGRA30_SPDIF_CIF_CH3 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH4 \
+ (TEGRA30_SPDIF_CIF_CH4 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH5 \
+ (TEGRA30_SPDIF_CIF_CH5 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH6 \
+ (TEGRA30_SPDIF_CIF_CH6 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH7 \
+ (TEGRA30_SPDIF_CIF_CH7 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH8 \
+ (TEGRA30_SPDIF_CIF_CH8 << TEGRA30_SPDIF_CIF_RXU_CTRL_CLIENT_CH_SHIFT)
+
+
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT 24
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_MASK \
+ (0x7 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH1 \
+ (TEGRA30_SPDIF_CIF_CH1 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH2 \
+ (TEGRA30_SPDIF_CIF_CH2 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH3 \
+ (TEGRA30_SPDIF_CIF_CH3 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH4 \
+ (TEGRA30_SPDIF_CIF_CH4 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH5 \
+ (TEGRA30_SPDIF_CIF_CH5 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH6 \
+ (TEGRA30_SPDIF_CIF_CH6 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH7 \
+ (TEGRA30_SPDIF_CIF_CH7 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH8 \
+ (TEGRA30_SPDIF_CIF_CH8 << TEGRA30_SPDIF_CIF_RXU_CTRL_AUDIO_CH_SHIFT)
+
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_FIFO_TH_SHIFT 28
+#define TEGRA30_SPDIF_CIF_RXU_CTRL_FIFO_TH_MASK (0x7 << TEGRA30_SPDIF_CIF_RXU_CTRL_FIFO_TH_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_CH_STA_RX_A */
+/* Fields in TEGRA30_SPDIF_CH_STA_RX_B */
+/* Fields in TEGRA30_SPDIF_CH_STA_RX_C */
+/* Fields in TEGRA30_SPDIF_CH_STA_RX_D */
+/* Fields in TEGRA30_SPDIF_CH_STA_RX_E */
+/* Fields in TEGRA30_SPDIF_CH_STA_RX_F */
+
+/*
+ * The 6-word receive channel data page buffer holds a block (192 frames) of
+ * channel status information. The order of receive is from LSB to MSB
+ * bit, and from CH_STA_RX_A to CH_STA_RX_F then back to CH_STA_RX_A.
+ */
+
+/* Fields in TEGRA30_SPDIF_CH_STA_TX_A */
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_22050 0x4
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_24000 0x6
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_32000 0x3
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_44100 0x0
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_48000 0x2
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_88200 0x8
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_96000 0xA
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_176400 0xC
+#define TEGRA30_SPDIF_CH_STA_TX_A_SF_192000 0xE
+
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT 24
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_MASK \
+ (0xF << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_22050 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_22050 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_24000 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_24000 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_32000 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_32000 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_44100 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_44100 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_48000 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_48000 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_88200 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_88200 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_96000 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_96000 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_176400 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_176400 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_192000 \
+ (TEGRA30_SPDIF_CH_STA_TX_A_SF_192000 << TEGRA30_SPDIF_CH_STA_TX_A_SAMP_FREQ_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_CH_STA_TX_B */
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_8000 0x6
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_11025 0xA
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_12000 0x2
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_16000 0x8
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_22050 0xB
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_24000 0x9
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_32000 0xC
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_44100 0xF
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_48000 0xD
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_88200 0x7
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_96000 0x5
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_176400 0x3
+#define TEGRA30_SPDIF_CH_STA_TX_B_SF_192000 0x1
+
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT 4
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_MASK \
+ (0xF << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_8000 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_8000 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_11025 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_11025 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_12000 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_12000 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_16000 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_16000 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_22050 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_22025 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_24000 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_24000 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_32000 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_32000 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_44100 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_44100 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_48000 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_48000 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_88200 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_88200 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_96000 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_96000 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_176400 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_176400 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+#define TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_192000 \
+ (TEGRA30_SPDIF_CH_STA_TX_B_SF_192000 << TEGRA30_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_CH_STA_TX_C */
+/* Fields in TEGRA30_SPDIF_CH_STA_TX_D */
+/* Fields in TEGRA30_SPDIF_CH_STA_TX_E */
+/* Fields in TEGRA30_SPDIF_CH_STA_TX_F */
+
+/* Fields in TEGRA30_SPDIF_FLOWCTL_CTRL */
+#define TEGRA30_SPDIF_FLOWCTL_CTRL_FILTER_QUAD (1<<31)
+
+/* Fields in TEGRA30_SPDIF_TX_STEP */
+#define TEGRA30_SPDIF_TX_STEP_STEP_SIZE_SHIFT 0
+#define TEGRA30_SPDIF_TX_STEP_STEP_SIZE_MASK (0xffff << TEGRA30_SPDIF_TX_STEP_STEP_SIZE_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_FLOW_STATUS */
+#define TEGRA30_SPDIF_FLOW_STATUS_COUNTER_EN_ENABLE (1<<1)
+#define TEGRA30_SPDIF_FLOW_STATUS_MONITOR_CLR_CLEAR (1<<2)
+#define TEGRA30_SPDIF_FLOW_STATUS_COUNTER_CLR_CLEAR (1<<3)
+#define TEGRA30_SPDIF_FLOW_STATUS_MONITOR_INT_EN_ENABLE (1<<4)
+#define TEGRA30_SPDIF_FLOW_STATUS_FLOW_OVERFLOW_OVER (1<<30)
+#define TEGRA30_SPDIF_FLOW_STATUS_FLOW_UNDERFLOW_UNDER (1<<31)
+
+/* Fields in TEGRA30_SPDIF_FLOW_TOTAL */
+/* Fields in TEGRA30_SPDIF_FLOW_OVER */
+/* Fields in TEGRA30_SPDIF_FLOW_UNDER */
+
+/* Fields in TEGRA30_SPDIF_LCOEF_1_4_0 */
+#define TEGRA30_SPDIF_LCOEF_1_4_0_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_1_4_0_COEF_MASK (0xffff << TEGRA30_TEGRA30_SPDIF_LCOEF_1_4_0_COEF_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_LCOEF_1_4_1 */
+#define TEGRA30_SPDIF_LCOEF_1_4_1_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_1_4_1_COEF_MASK (0xffff << TEGRA30_SPDIF_LCOEF_1_4_1_COEF_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_LCOEF_1_4_2 */
+#define TEGRA30_SPDIF_LCOEF_1_4_2_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_1_4_2_COEF_MASK (0xffff << TEGRA30_SPDIF_LCOEF_1_4_2_COEF_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_LCOEF_1_4_3 */
+#define TEGRA30_SPDIF_LCOEF_1_4_3_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_1_4_3_COEF_MASK (0xffff << TEGRA30_SPDIF_LCOEF_1_4_3_COEF_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_LCOEF_1_4_4 */
+#define TEGRA30_SPDIF_LCOEF_1_4_4_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_1_4_4_COEF_MASK (0xffff << TEGRA30_SPDIF_LCOEF_1_4_4_COEF_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_LCOEF_1_4_5 */
+#define TEGRA30_SPDIF_LCOEF_1_4_5_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_1_4_5_COEF_MASK (0xffff << TEGRA30_SPDIF_LCOEF_1_4_5_COEF_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_LCOEF_2_4_0 */
+#define TEGRA30_SPDIF_LCOEF_2_4_0_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_2_4_0_COEF_MASK (0xffff << TEGRA30_SPDIF_LCOEF_2_4_0_COEF_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_LCOEF_2_4_1 */
+#define TEGRA30_SPDIF_LCOEF_2_4_1_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_2_4_1_COEF_MASK (0xffff << TEGRA30_SPDIF_LCOEF_2_4_1_COEF_SHIFT)
+
+/* Fields in TEGRA30_SPDIF_LCOEF_2_4_2 */
+#define TEGRA30_SPDIF_LCOEF_2_4_2_COEF_SHIFT 0
+#define TEGRA30_SPDIF_LCOEF_2_4_2_COEF_MASK (0xffff << SPDIF_LCOEF_2_4_2_COEF_SHIFT)
+
+struct tegra30_spdif {
+ struct clk *clk_spdif_out;
+ enum tegra30_ahub_txcif txcif;
+ struct tegra_pcm_dma_params playback_dma_data;
+ void __iomem *regs;
+ struct dentry *debug;
+ u32 reg_ctrl;
+ u32 reg_ch_sta_a;
+ u32 reg_ch_sta_b;
+};
+
+#endif
diff --git a/sound/soc/tegra/tegra_aic326x.c b/sound/soc/tegra/tegra_aic326x.c
new file mode 100644
index 000000000000..a7297a54233a
--- /dev/null
+++ b/sound/soc/tegra/tegra_aic326x.c
@@ -0,0 +1,1075 @@
+/*
+ * tegra_aic326x.c - Tegra machine ASoC driver for boards using TI 3262 codec.
+ *
+ * Author: Vinod G. <vinodg@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * (c) 2010, 2011 Nvidia Graphics Pvt. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <asm/mach-types.h>
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
+
+#include <mach/tegra_aic326x_pdata.h>
+
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../codecs/tlv320aic326x.h"
+
+#include "tegra_pcm.h"
+#include "tegra_asoc_utils.h"
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#include "tegra20_das.h"
+#else
+#include "tegra30_ahub.h"
+#include "tegra30_i2s.h"
+#include "tegra30_dam.h"
+#endif
+
+
+#define DRV_NAME "tegra-snd-aic326x"
+
+#define GPIO_SPKR_EN BIT(0)
+#define GPIO_HP_MUTE BIT(1)
+#define GPIO_INT_MIC_EN BIT(2)
+#define GPIO_EXT_MIC_EN BIT(3)
+
+#define DAI_LINK_HIFI 0
+#define DAI_LINK_SPDIF 1
+#define DAI_LINK_BTSCO 2
+#define DAI_LINK_VOICE_CALL 3
+#define DAI_LINK_BT_VOICE_CALL 4
+#define NUM_DAI_LINKS 5
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+const char *tegra_aic326x_i2s_dai_name[TEGRA30_NR_I2S_IFC] = {
+ "tegra30-i2s.0",
+ "tegra30-i2s.1",
+ "tegra30-i2s.2",
+ "tegra30-i2s.3",
+ "tegra30-i2s.4",
+};
+#endif
+
+struct tegra_aic326x {
+ struct tegra_asoc_utils_data util_data;
+ struct tegra_aic326x_platform_data *pdata;
+ struct regulator *audio_reg;
+ int gpio_requested;
+ bool init_done;
+ int is_call_mode;
+ int is_device_bt;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct codec_config codec_info[NUM_I2S_DEVICES];
+#endif
+};
+
+static int tegra_aic326x_call_mode_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ return 0;
+}
+
+static int tegra_aic326x_call_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_aic326x *machine = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = machine->is_call_mode;
+
+ return 0;
+}
+
+static int tegra_aic326x_call_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_aic326x *machine = snd_kcontrol_chip(kcontrol);
+ int is_call_mode_new = ucontrol->value.integer.value[0];
+ int codec_index;
+
+ if (machine->is_call_mode == is_call_mode_new)
+ return 0;
+
+ if (machine->is_device_bt)
+ codec_index = BT_SCO;
+ else
+ codec_index = HIFI_CODEC;
+
+ if (is_call_mode_new) {
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (machine->codec_info[codec_index].rate == 0 ||
+ machine->codec_info[codec_index].channels == 0)
+ return -EINVAL;
+
+ tegra30_make_voice_call_connections(
+ &machine->codec_info[codec_index],
+ &machine->codec_info[BASEBAND]);
+#endif
+ } else {
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra30_break_voice_call_connections(
+ &machine->codec_info[codec_index],
+ &machine->codec_info[BASEBAND]);
+#endif
+ }
+
+ machine->is_call_mode = is_call_mode_new;
+
+ return 1;
+}
+
+struct snd_kcontrol_new tegra_aic326x_call_mode_control = {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Call Mode Switch",
+ .private_value = 0xffff,
+ .info = tegra_aic326x_call_mode_info,
+ .get = tegra_aic326x_call_mode_get,
+ .put = tegra_aic326x_call_mode_put
+};
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static int tegra_aic326x_set_dam_cif(int dam_ifc, int srate,
+ int channels, int bit_size)
+{
+ tegra30_dam_set_samplerate(dam_ifc, TEGRA30_DAM_CHOUT,
+ srate);
+ tegra30_dam_set_samplerate(dam_ifc, TEGRA30_DAM_CHIN1,
+ srate);
+ tegra30_dam_set_acif(dam_ifc, TEGRA30_DAM_CHIN1,
+ channels, bit_size, channels,
+ bit_size);
+ tegra30_dam_set_acif(dam_ifc, TEGRA30_DAM_CHOUT,
+ channels, bit_size, channels,
+ bit_size);
+
+ return 0;
+}
+#endif
+
+static int tegra_aic326x_get_mclk(int srate)
+{
+ int mclk = 0;
+ switch (srate) {
+ case 8000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ default:
+ mclk = -EINVAL;
+ break;
+ }
+
+ return mclk;
+}
+
+static int tegra_aic326x_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+#endif
+ int srate, mclk, sample_size, daifmt;
+ int err;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ sample_size = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+
+ mclk = tegra_aic326x_get_mclk(srate);
+ if (mclk < 0)
+ return mclk;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ daifmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS;
+
+ err = snd_soc_dai_set_fmt(codec_dai, daifmt);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_fmt(cpu_dai, daifmt);
+ if (err < 0) {
+ dev_err(card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ err = tegra20_das_connect_dac_to_dap(TEGRA20_DAS_DAP_SEL_DAC1,
+ TEGRA20_DAS_DAP_ID_1);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dap-dac path\n");
+ return err;
+ }
+
+ err = tegra20_das_connect_dap_to_dac(TEGRA20_DAS_DAP_ID_1,
+ TEGRA20_DAS_DAP_SEL_DAC1);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dac-dap path\n");
+ return err;
+ }
+#else
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra_aic326x_set_dam_cif(i2s->dam_ifc, srate,
+ params_channels(params), sample_size);
+#endif
+
+ return 0;
+}
+
+static int tegra_aic326x_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, min_mclk;
+ int err;
+
+ srate = params_rate(params);
+
+ mclk = tegra_aic326x_get_mclk(srate);
+ if (mclk < 0)
+ return mclk;
+
+ min_mclk = 128 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ return 0;
+}
+
+static int tegra_aic326x_bt_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+#endif
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+ int err, srate, mclk, min_mclk, sample_size;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ sample_size = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+
+ mclk = tegra_aic326x_get_mclk(srate);
+ if (mclk < 0)
+ return mclk;
+
+ min_mclk = 64 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ err = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(rtd->codec->card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra_aic326x_set_dam_cif(i2s->dam_ifc, params_rate(params),
+ params_channels(params), sample_size);
+#endif
+
+ return 0;
+}
+
+static int tegra_aic326x_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(rtd->card);
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 0);
+
+ return 0;
+}
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static int tegra_aic326x_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+
+ if ((substream->stream != SNDRV_PCM_STREAM_PLAYBACK) ||
+ !(i2s->is_dam_used))
+ return 0;
+
+ /*dam configuration*/
+ if (!i2s->dam_ch_refcount)
+ i2s->dam_ifc = tegra30_dam_allocate_controller();
+
+ tegra30_dam_allocate_channel(i2s->dam_ifc, TEGRA30_DAM_CHIN1);
+ i2s->dam_ch_refcount++;
+ tegra30_dam_enable_clock(i2s->dam_ifc);
+ tegra30_dam_set_gain(i2s->dam_ifc, TEGRA30_DAM_CHIN1, 0x1000);
+
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX1 +
+ (i2s->dam_ifc*2), i2s->txcif);
+
+ /*
+ *make the dam tx to i2s rx connection if this is the only client
+ *using i2s for playback
+ */
+ if (i2s->playback_ref_count == 1)
+ tegra30_ahub_set_rx_cif_source(
+ TEGRA30_AHUB_RXCIF_I2S0_RX0 + i2s->id,
+ TEGRA30_AHUB_TXCIF_DAM0_TX0 + i2s->dam_ifc);
+
+ /* enable the dam*/
+ tegra30_dam_enable(i2s->dam_ifc, TEGRA30_DAM_ENABLE,
+ TEGRA30_DAM_CHIN1);
+
+ return 0;
+}
+
+static void tegra_aic326x_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+
+ if ((substream->stream != SNDRV_PCM_STREAM_PLAYBACK) ||
+ !(i2s->is_dam_used))
+ return;
+
+ /* disable the dam*/
+ tegra30_dam_enable(i2s->dam_ifc, TEGRA30_DAM_DISABLE,
+ TEGRA30_DAM_CHIN1);
+
+ /* disconnect the ahub connections*/
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX1 +
+ (i2s->dam_ifc*2));
+
+ /* disable the dam and free the controller */
+ tegra30_dam_disable_clock(i2s->dam_ifc);
+ tegra30_dam_free_channel(i2s->dam_ifc, TEGRA30_DAM_CHIN1);
+ i2s->dam_ch_refcount--;
+ if (!i2s->dam_ch_refcount)
+ tegra30_dam_free_controller(i2s->dam_ifc);
+}
+#endif
+
+static int tegra_aic326x_voice_call_hw_params(
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk;
+ int err;
+
+ srate = params_rate(params);
+ mclk = tegra_aic326x_get_mclk(srate);
+ if (mclk < 0)
+ return mclk;
+
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ err = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* codec configuration */
+ machine->codec_info[HIFI_CODEC].rate = params_rate(params);
+ machine->codec_info[HIFI_CODEC].channels = params_channels(params);
+ machine->codec_info[HIFI_CODEC].bitsize = 16;
+ machine->codec_info[HIFI_CODEC].is_i2smaster = 1;
+ machine->codec_info[HIFI_CODEC].is_format_dsp = 0;
+
+ /* baseband configuration */
+ machine->codec_info[BASEBAND].bitsize = 16;
+ machine->codec_info[BASEBAND].is_i2smaster = 1;
+ machine->codec_info[BASEBAND].is_format_dsp = 1;
+#endif
+
+ machine->is_device_bt = 0;
+
+ return 0;
+}
+
+static void tegra_aic326x_voice_call_shutdown(
+ struct snd_pcm_substream *substream)
+{
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_aic326x *machine =
+ snd_soc_card_get_drvdata(rtd->codec->card);
+
+ machine->codec_info[HIFI_CODEC].rate = 0;
+ machine->codec_info[HIFI_CODEC].channels = 0;
+#endif
+}
+
+static int tegra_aic326x_bt_voice_call_hw_params(
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+ int err, srate, mclk, min_mclk;
+
+ srate = params_rate(params);
+
+ mclk = tegra_aic326x_get_mclk(srate);
+ if (mclk < 0)
+ return mclk;
+
+ min_mclk = 64 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* codec configuration */
+ machine->codec_info[BT_SCO].rate = params_rate(params);
+ machine->codec_info[BT_SCO].channels = params_channels(params);
+ machine->codec_info[BT_SCO].bitsize = 16;
+ machine->codec_info[BT_SCO].is_i2smaster = 1;
+ machine->codec_info[BT_SCO].is_format_dsp = 1;
+
+ /* baseband configuration */
+ machine->codec_info[BASEBAND].bitsize = 16;
+ machine->codec_info[BASEBAND].is_i2smaster = 1;
+ machine->codec_info[BASEBAND].is_format_dsp = 1;
+#endif
+
+ machine->is_device_bt = 1;
+
+ return 0;
+}
+
+static void tegra_aic326x_bt_voice_call_shutdown(
+ struct snd_pcm_substream *substream)
+{
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_aic326x *machine =
+ snd_soc_card_get_drvdata(rtd->codec->card);
+
+ machine->codec_info[BT_SCO].rate = 0;
+ machine->codec_info[BT_SCO].channels = 0;
+#endif
+}
+
+static struct snd_soc_ops tegra_aic326x_hifi_ops = {
+ .hw_params = tegra_aic326x_hw_params,
+ .hw_free = tegra_aic326x_hw_free,
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ .startup = tegra_aic326x_startup,
+ .shutdown = tegra_aic326x_shutdown,
+#endif
+};
+
+static struct snd_soc_ops tegra_aic326x_spdif_ops = {
+ .hw_params = tegra_aic326x_spdif_hw_params,
+ .hw_free = tegra_aic326x_hw_free,
+};
+
+static struct snd_soc_ops tegra_aic326x_voice_call_ops = {
+ .hw_params = tegra_aic326x_voice_call_hw_params,
+ .shutdown = tegra_aic326x_voice_call_shutdown,
+ .hw_free = tegra_aic326x_hw_free,
+};
+
+static struct snd_soc_ops tegra_aic326x_bt_voice_call_ops = {
+ .hw_params = tegra_aic326x_bt_voice_call_hw_params,
+ .shutdown = tegra_aic326x_bt_voice_call_shutdown,
+ .hw_free = tegra_aic326x_hw_free,
+};
+
+static struct snd_soc_ops tegra_aic326x_bt_ops = {
+ .hw_params = tegra_aic326x_bt_hw_params,
+ .hw_free = tegra_aic326x_hw_free,
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ .startup = tegra_aic326x_startup,
+ .shutdown = tegra_aic326x_shutdown,
+#endif
+};
+
+static struct snd_soc_jack tegra_aic326x_hp_jack;
+
+#ifdef CONFIG_SWITCH
+static struct switch_dev aic326x_wired_switch_dev = {
+ .name = "h2w",
+};
+
+/* These values are copied from WiredAccessoryObserver */
+enum headset_state {
+ BIT_NO_HEADSET = 0,
+ BIT_HEADSET = (1 << 0),
+ BIT_HEADSET_NO_MIC = (1 << 1),
+};
+
+static int aic326x_headset_switch_notify(struct notifier_block *self,
+ unsigned long action, void *dev)
+{
+ int state = 0;
+
+ switch (action) {
+ case SND_JACK_HEADPHONE:
+ state |= BIT_HEADSET_NO_MIC;
+ break;
+ case SND_JACK_HEADSET:
+ state |= BIT_HEADSET;
+ break;
+ default:
+ state |= BIT_NO_HEADSET;
+ }
+
+ switch_set_state(&aic326x_wired_switch_dev, state);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block aic326x_headset_switch_nb = {
+ .notifier_call = aic326x_headset_switch_notify,
+};
+#else
+static struct snd_soc_jack_pin tegra_aic326x_hp_jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+#endif
+
+static int tegra_aic326x_event_int_spk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_aic326x_platform_data *pdata = machine->pdata;
+
+ if (!(machine->gpio_requested & GPIO_SPKR_EN))
+ return 0;
+
+ gpio_set_value_cansleep(pdata->gpio_spkr_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static int tegra_aic326x_event_hp(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_aic326x_platform_data *pdata = machine->pdata;
+
+ if (!(machine->gpio_requested & GPIO_HP_MUTE))
+ return 0;
+
+ gpio_set_value_cansleep(pdata->gpio_hp_mute,
+ !SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget tegra_aic326x_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Int Spk", tegra_aic326x_event_int_spk),
+ SND_SOC_DAPM_HP("Earpiece", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", tegra_aic326x_event_hp),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_INPUT("Ext Mic"),
+ SND_SOC_DAPM_LINE("Linein", NULL),
+};
+
+static const struct snd_soc_dapm_route aic326x_audio_map[] = {
+ {"Int Spk", NULL, "SPKL"},
+ {"Int Spk", NULL, "SPKR"},
+ {"Earpiece", NULL, "RECL"},
+ {"Earpiece", NULL, "RECR"},
+ {"Headphone Jack", NULL, "HPL"},
+ {"Headphone Jack", NULL, "HPR"},
+ {"IN2L", NULL, "Mic Jack"},
+
+ /*TODO correct */
+ /* external mic is stero */
+ {"IN2L", NULL, "Ext Mic"},
+ {"IN2R", NULL, "Ext Mic"},
+
+ /* Line in */
+ {"IN2L", NULL, "Linein"},
+ {"IN2R", NULL, "Linein"},
+};
+
+static const struct snd_kcontrol_new tegra_aic326x_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Int Spk"),
+ SOC_DAPM_PIN_SWITCH("Earpiece"),
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+ SOC_DAPM_PIN_SWITCH("Ext Mic"),
+ SOC_DAPM_PIN_SWITCH("Linein"),
+};
+
+static int tegra_aic326x_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_aic326x_platform_data *pdata = machine->pdata;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+#endif
+ int ret;
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (machine->codec_info[BASEBAND].i2s_id != -1)
+ i2s->is_dam_used = true;
+#endif
+
+ if (machine->init_done)
+ return 0;
+
+ machine->init_done = true;
+
+ if (machine_is_whistler()) {
+ machine->audio_reg = regulator_get(NULL, "avddio_audio");
+ if (IS_ERR(machine->audio_reg)) {
+ dev_err(card->dev, "cannot get avddio_audio reg\n");
+ ret = PTR_ERR(machine->audio_reg);
+ return ret;
+ }
+
+ ret = regulator_enable(machine->audio_reg);
+ if (ret) {
+ dev_err(card->dev, "cannot enable avddio_audio reg\n");
+ regulator_put(machine->audio_reg);
+ machine->audio_reg = NULL;
+ return ret;
+ }
+ }
+
+ if (gpio_is_valid(pdata->gpio_spkr_en)) {
+ ret = gpio_request(pdata->gpio_spkr_en, "spkr_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get spkr_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_SPKR_EN;
+
+ gpio_direction_output(pdata->gpio_spkr_en, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_hp_mute)) {
+ ret = gpio_request(pdata->gpio_hp_mute, "hp_mute");
+ if (ret) {
+ dev_err(card->dev, "cannot get hp_mute gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_HP_MUTE;
+
+ gpio_direction_output(pdata->gpio_hp_mute, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_int_mic_en)) {
+ ret = gpio_request(pdata->gpio_int_mic_en, "int_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get int_mic_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_INT_MIC_EN;
+
+ /* Disable int mic; enable signal is active-high */
+ gpio_direction_output(pdata->gpio_int_mic_en, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_ext_mic_en)) {
+ ret = gpio_request(pdata->gpio_ext_mic_en, "ext_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get ext_mic_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_EXT_MIC_EN;
+
+ /* Enable ext mic; enable signal is active-low */
+ gpio_direction_output(pdata->gpio_ext_mic_en, 0);
+ }
+
+ ret = snd_soc_add_controls(codec, tegra_aic326x_controls,
+ ARRAY_SIZE(tegra_aic326x_controls));
+ if (ret < 0)
+ return ret;
+
+ snd_soc_dapm_new_controls(dapm, tegra_aic326x_dapm_widgets,
+ ARRAY_SIZE(tegra_aic326x_dapm_widgets));
+
+ snd_soc_dapm_add_routes(dapm, aic326x_audio_map,
+ ARRAY_SIZE(aic326x_audio_map));
+
+ ret = snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET,
+ &tegra_aic326x_hp_jack);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_SWITCH
+ snd_soc_jack_notifier_register(&tegra_aic326x_hp_jack,
+ &aic326x_headset_switch_nb);
+#else /*gpio based headset detection*/
+ snd_soc_jack_add_pins(&tegra_aic326x_hp_jack,
+ ARRAY_SIZE(tegra_aic326x_hp_jack_pins),
+ tegra_aic326x_hp_jack_pins);
+#endif
+
+ aic326x_headset_detect(codec, &tegra_aic326x_hp_jack,
+ SND_JACK_HEADSET);
+
+ /* Add call mode switch control */
+ ret = snd_ctl_add(codec->card->snd_card,
+ snd_ctl_new1(&tegra_aic326x_call_mode_control,
+ machine));
+ if (ret < 0)
+ return ret;
+
+ snd_soc_dapm_nc_pin(dapm, "IN2L");
+ snd_soc_dapm_nc_pin(dapm, "IN2R");
+ snd_soc_dapm_sync(dapm);
+
+ return 0;
+}
+
+static struct snd_soc_dai_link tegra_aic326x_dai[] = {
+ [DAI_LINK_HIFI] = {
+ .name = "AIC3262",
+ .stream_name = "AIC3262 PCM HIFI",
+ .codec_name = "aic3262-codec.4-0018",
+ .platform_name = "tegra-pcm-audio",
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ .cpu_dai_name = "tegra20-i2s.0",
+#endif
+ .codec_dai_name = "aic3262-asi1",
+ .init = tegra_aic326x_init,
+ .ops = &tegra_aic326x_hifi_ops,
+ },
+ [DAI_LINK_SPDIF] = {
+ .name = "SPDIF",
+ .stream_name = "SPDIF PCM",
+ .codec_name = "spdif-dit.0",
+ .platform_name = "tegra-pcm-audio",
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ .cpu_dai_name = "tegra20-spdif",
+#else
+ .cpu_dai_name = "tegra30-spdif",
+#endif
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_aic326x_spdif_ops,
+ },
+ [DAI_LINK_BTSCO] = {
+ .name = "BT-SCO",
+ .stream_name = "BT SCO PCM",
+ .codec_name = "spdif-dit.1",
+ .platform_name = "tegra-pcm-audio",
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ .cpu_dai_name = "tegra20-i2s.1",
+#endif
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_aic326x_bt_ops,
+ },
+ [DAI_LINK_VOICE_CALL] = {
+ .name = "VOICE CALL",
+ .stream_name = "VOICE CALL PCM",
+ .codec_name = "aic3262-codec.4-0018",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "dit-hifi",
+ .codec_dai_name = "aic3262-asi2",
+ .ops = &tegra_aic326x_voice_call_ops,
+ },
+ [DAI_LINK_BT_VOICE_CALL] = {
+ .name = "BT VOICE CALL",
+ .stream_name = "BT VOICE CALL PCM",
+ .codec_name = "spdif-dit.2",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "dit-hifi",
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_aic326x_bt_voice_call_ops,
+ },
+};
+
+static struct snd_soc_card snd_soc_tegra_aic326x = {
+ .name = "tegra-aic326x",
+ .dai_link = tegra_aic326x_dai,
+ .num_links = ARRAY_SIZE(tegra_aic326x_dai),
+};
+
+static __devinit int tegra_aic326x_driver_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &snd_soc_tegra_aic326x;
+ struct tegra_aic326x *machine;
+ struct tegra_aic326x_platform_data *pdata;
+ int ret, i;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ machine = kzalloc(sizeof(struct tegra_aic326x), GFP_KERNEL);
+ if (!machine) {
+ dev_err(&pdev->dev, "Can't allocate tegra_aic326x struct\n");
+ return -ENOMEM;
+ }
+
+ machine->pdata = pdata;
+
+ ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
+ if (ret)
+ goto err_free_machine;
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ for (i = 0; i < NUM_I2S_DEVICES ; i++)
+ machine->codec_info[i].i2s_id = pdata->audio_port_id[i];
+
+ machine->codec_info[BASEBAND].rate = pdata->baseband_param.rate;
+ machine->codec_info[BASEBAND].channels = pdata->baseband_param.channels;
+
+ tegra_aic326x_dai[DAI_LINK_HIFI].cpu_dai_name =
+ tegra_aic326x_i2s_dai_name[machine->codec_info[HIFI_CODEC].i2s_id];
+
+ tegra_aic326x_dai[DAI_LINK_BTSCO].cpu_dai_name =
+ tegra_aic326x_i2s_dai_name[machine->codec_info[BT_SCO].i2s_id];
+#endif
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
+ goto err_fini_utils;
+ }
+
+ if (!card->instantiated) {
+ dev_err(&pdev->dev, "No TI AIC3262 codec\n");
+ goto err_unregister_card;
+ }
+
+#ifdef CONFIG_SWITCH
+ /* Add h2w switch class support */
+ ret = switch_dev_register(&aic326x_wired_switch_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "not able to register switch device %d\n",
+ ret);
+ goto err_unregister_card;
+ }
+#endif
+
+ return 0;
+
+err_unregister_card:
+ snd_soc_unregister_card(card);
+err_fini_utils:
+ tegra_asoc_utils_fini(&machine->util_data);
+err_free_machine:
+ kfree(machine);
+ return ret;
+}
+
+static int __devexit tegra_aic326x_driver_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct tegra_aic326x *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_aic326x_platform_data *pdata = machine->pdata;
+
+ snd_soc_unregister_card(card);
+
+#ifdef CONFIG_SWITCH
+ switch_dev_unregister(&aic326x_wired_switch_dev);
+#endif
+
+ tegra_asoc_utils_fini(&machine->util_data);
+
+ if (machine->gpio_requested & GPIO_EXT_MIC_EN)
+ gpio_free(pdata->gpio_ext_mic_en);
+ if (machine->gpio_requested & GPIO_INT_MIC_EN)
+ gpio_free(pdata->gpio_int_mic_en);
+ if (machine->gpio_requested & GPIO_HP_MUTE)
+ gpio_free(pdata->gpio_hp_mute);
+ if (machine->gpio_requested & GPIO_SPKR_EN)
+ gpio_free(pdata->gpio_spkr_en);
+
+ kfree(machine);
+
+ return 0;
+}
+
+static struct platform_driver tegra_aic326x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = tegra_aic326x_driver_probe,
+ .remove = __devexit_p(tegra_aic326x_driver_remove),
+};
+
+static int __init tegra_aic326x_modinit(void)
+{
+ return platform_driver_register(&tegra_aic326x_driver);
+}
+module_init(tegra_aic326x_modinit);
+
+static void __exit tegra_aic326x_modexit(void)
+{
+ platform_driver_unregister(&tegra_aic326x_driver);
+}
+module_exit(tegra_aic326x_modexit);
+
+/* Module information */
+MODULE_AUTHOR("Vinod G. <vinodg@nvidia.com>");
+MODULE_DESCRIPTION("Tegra+AIC3262 machine ASoC driver");
+MODULE_DESCRIPTION("Tegra ALSA SoC");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/tegra/tegra_asoc_utils.c b/sound/soc/tegra/tegra_asoc_utils.c
index dfa85cbb05c8..c517bd04e2da 100644
--- a/sound/soc/tegra/tegra_asoc_utils.c
+++ b/sound/soc/tegra/tegra_asoc_utils.c
@@ -39,7 +39,11 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
case 22050:
case 44100:
case 88200:
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
new_baseclock = 56448000;
+#else
+ new_baseclock = 564480000;
+#endif
break;
case 8000:
case 16000:
@@ -47,7 +51,11 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
case 48000:
case 64000:
case 96000:
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
new_baseclock = 73728000;
+#else
+ new_baseclock = 552960000;
+#endif
break;
default:
return -EINVAL;
@@ -58,6 +66,10 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
if (!clk_change)
return 0;
+ /* Don't change rate if already one dai-link is using it */
+ if (data->lock_count)
+ return -EINVAL;
+
data->set_baseclock = 0;
data->set_mclk = 0;
@@ -77,7 +89,7 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
return err;
}
- /* Don't set cdev1 rate; its locked to pll_a_out0 */
+ /* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */
err = clk_enable(data->clk_pll_a);
if (err) {
@@ -104,6 +116,51 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
}
EXPORT_SYMBOL_GPL(tegra_asoc_utils_set_rate);
+void tegra_asoc_utils_lock_clk_rate(struct tegra_asoc_utils_data *data,
+ int lock)
+{
+ if (lock)
+ data->lock_count++;
+ else if (data->lock_count)
+ data->lock_count--;
+}
+EXPORT_SYMBOL_GPL(tegra_asoc_utils_lock_clk_rate);
+
+int tegra_asoc_utils_clk_enable(struct tegra_asoc_utils_data *data)
+{
+ int err;
+
+ err = clk_enable(data->clk_pll_a);
+ if (err) {
+ dev_err(data->dev, "Can't enable pll_a: %d\n", err);
+ return err;
+ }
+
+ err = clk_enable(data->clk_pll_a_out0);
+ if (err) {
+ dev_err(data->dev, "Can't enable pll_a_out0: %d\n", err);
+ return err;
+ }
+
+ err = clk_enable(data->clk_cdev1);
+ if (err) {
+ dev_err(data->dev, "Can't enable cdev1: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_asoc_utils_clk_enable);
+
+int tegra_asoc_utils_clk_disable(struct tegra_asoc_utils_data *data)
+{
+ clk_disable(data->clk_cdev1);
+ clk_disable(data->clk_pll_a_out0);
+ clk_disable(data->clk_pll_a);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_asoc_utils_clk_disable);
+
int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
struct device *dev)
{
@@ -125,15 +182,75 @@ int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
goto err_put_pll_a;
}
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
data->clk_cdev1 = clk_get_sys(NULL, "cdev1");
+#else
+ data->clk_cdev1 = clk_get_sys("extern1", NULL);
+#endif
if (IS_ERR(data->clk_cdev1)) {
dev_err(data->dev, "Can't retrieve clk cdev1\n");
ret = PTR_ERR(data->clk_cdev1);
goto err_put_pll_a_out0;
}
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ data->clk_out1 = ERR_PTR(-ENOENT);
+#else
+ data->clk_out1 = clk_get_sys("clk_out_1", "extern1");
+ if (IS_ERR(data->clk_out1)) {
+ dev_err(data->dev, "Can't retrieve clk out1\n");
+ ret = PTR_ERR(data->clk_out1);
+ goto err_put_cdev1;
+ }
+#endif
+
+ ret = clk_enable(data->clk_pll_a);
+ if (ret) {
+ dev_err(data->dev, "Can't enable clk pll_a");
+ goto err_put_out1;
+ }
+
+ ret = clk_enable(data->clk_pll_a_out0);
+ if (ret) {
+ dev_err(data->dev, "Can't enable clk pll_a_out0");
+ goto err_put_out1;
+ }
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ ret = clk_set_parent(data->clk_cdev1, data->clk_pll_a_out0);
+ if (ret) {
+ dev_err(data->dev, "Can't set clk cdev1/extern1 parent");
+ goto err_put_out1;
+ }
+#endif
+
+ ret = clk_enable(data->clk_cdev1);
+ if (ret) {
+ dev_err(data->dev, "Can't enable clk cdev1/extern1");
+ goto err_put_out1;
+ }
+
+ if (!IS_ERR(data->clk_out1)) {
+ ret = clk_enable(data->clk_out1);
+ if (ret) {
+ dev_err(data->dev, "Can't enable clk out1");
+ goto err_put_out1;
+ }
+ }
+
+ ret = tegra_asoc_utils_set_rate(data, 48000, 256 * 48000);
+ if (ret)
+ goto err_put_out1;
+
return 0;
+err_put_out1:
+ if (!IS_ERR(data->clk_out1))
+ clk_put(data->clk_out1);
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+err_put_cdev1:
+#endif
+ clk_put(data->clk_cdev1);
err_put_pll_a_out0:
clk_put(data->clk_pll_a_out0);
err_put_pll_a:
@@ -145,6 +262,8 @@ EXPORT_SYMBOL_GPL(tegra_asoc_utils_init);
void tegra_asoc_utils_fini(struct tegra_asoc_utils_data *data)
{
+ if (!IS_ERR(data->clk_out1))
+ clk_put(data->clk_out1);
clk_put(data->clk_cdev1);
clk_put(data->clk_pll_a_out0);
clk_put(data->clk_pll_a);
diff --git a/sound/soc/tegra/tegra_asoc_utils.h b/sound/soc/tegra/tegra_asoc_utils.h
index 4818195da25c..72d3994a935f 100644
--- a/sound/soc/tegra/tegra_asoc_utils.h
+++ b/sound/soc/tegra/tegra_asoc_utils.h
@@ -31,15 +31,21 @@ struct tegra_asoc_utils_data {
struct clk *clk_pll_a;
struct clk *clk_pll_a_out0;
struct clk *clk_cdev1;
+ struct clk *clk_out1;
int set_baseclock;
int set_mclk;
+ int lock_count;
};
int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
int mclk);
+void tegra_asoc_utils_lock_clk_rate(struct tegra_asoc_utils_data *data,
+ int lock);
int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
struct device *dev);
void tegra_asoc_utils_fini(struct tegra_asoc_utils_data *data);
+int tegra_asoc_utils_clk_enable(struct tegra_asoc_utils_data *data);
+int tegra_asoc_utils_clk_disable(struct tegra_asoc_utils_data *data);
#endif
diff --git a/sound/soc/tegra/tegra_das.c b/sound/soc/tegra/tegra_das.c
deleted file mode 100644
index 9f24ef73f2cb..000000000000
--- a/sound/soc/tegra/tegra_das.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * tegra_das.c - Tegra DAS driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010 - NVIDIA, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <mach/iomap.h>
-#include <sound/soc.h>
-#include "tegra_das.h"
-
-#define DRV_NAME "tegra-das"
-
-static struct tegra_das *das;
-
-static inline void tegra_das_write(u32 reg, u32 val)
-{
- __raw_writel(val, das->regs + reg);
-}
-
-static inline u32 tegra_das_read(u32 reg)
-{
- return __raw_readl(das->regs + reg);
-}
-
-int tegra_das_connect_dap_to_dac(int dap, int dac)
-{
- u32 addr;
- u32 reg;
-
- if (!das)
- return -ENODEV;
-
- addr = TEGRA_DAS_DAP_CTRL_SEL +
- (dap * TEGRA_DAS_DAP_CTRL_SEL_STRIDE);
- reg = dac << TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P;
-
- tegra_das_write(addr, reg);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(tegra_das_connect_dap_to_dac);
-
-int tegra_das_connect_dap_to_dap(int dap, int otherdap, int master,
- int sdata1rx, int sdata2rx)
-{
- u32 addr;
- u32 reg;
-
- if (!das)
- return -ENODEV;
-
- addr = TEGRA_DAS_DAP_CTRL_SEL +
- (dap * TEGRA_DAS_DAP_CTRL_SEL_STRIDE);
- reg = otherdap << TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P |
- !!sdata2rx << TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P |
- !!sdata1rx << TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P |
- !!master << TEGRA_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P;
-
- tegra_das_write(addr, reg);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(tegra_das_connect_dap_to_dap);
-
-int tegra_das_connect_dac_to_dap(int dac, int dap)
-{
- u32 addr;
- u32 reg;
-
- if (!das)
- return -ENODEV;
-
- addr = TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL +
- (dac * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
- reg = dap << TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P |
- dap << TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P |
- dap << TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P;
-
- tegra_das_write(addr, reg);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(tegra_das_connect_dac_to_dap);
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_das_show(struct seq_file *s, void *unused)
-{
- int i;
- u32 addr;
- u32 reg;
-
- for (i = 0; i < TEGRA_DAS_DAP_CTRL_SEL_COUNT; i++) {
- addr = TEGRA_DAS_DAP_CTRL_SEL +
- (i * TEGRA_DAS_DAP_CTRL_SEL_STRIDE);
- reg = tegra_das_read(addr);
- seq_printf(s, "TEGRA_DAS_DAP_CTRL_SEL[%d] = %08x\n", i, reg);
- }
-
- for (i = 0; i < TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_COUNT; i++) {
- addr = TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL +
- (i * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
- reg = tegra_das_read(addr);
- seq_printf(s, "TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL[%d] = %08x\n",
- i, reg);
- }
-
- return 0;
-}
-
-static int tegra_das_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, tegra_das_show, inode->i_private);
-}
-
-static const struct file_operations tegra_das_debug_fops = {
- .open = tegra_das_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void tegra_das_debug_add(struct tegra_das *das)
-{
- das->debug = debugfs_create_file(DRV_NAME, S_IRUGO,
- snd_soc_debugfs_root, das,
- &tegra_das_debug_fops);
-}
-
-static void tegra_das_debug_remove(struct tegra_das *das)
-{
- if (das->debug)
- debugfs_remove(das->debug);
-}
-#else
-static inline void tegra_das_debug_add(struct tegra_das *das)
-{
-}
-
-static inline void tegra_das_debug_remove(struct tegra_das *das)
-{
-}
-#endif
-
-static int __devinit tegra_das_probe(struct platform_device *pdev)
-{
- struct resource *res, *region;
- int ret = 0;
-
- if (das)
- return -ENODEV;
-
- das = kzalloc(sizeof(struct tegra_das), GFP_KERNEL);
- if (!das) {
- dev_err(&pdev->dev, "Can't allocate tegra_das\n");
- ret = -ENOMEM;
- goto exit;
- }
- das->dev = &pdev->dev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "No memory resource\n");
- ret = -ENODEV;
- goto err_free;
- }
-
- region = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!region) {
- dev_err(&pdev->dev, "Memory region already claimed\n");
- ret = -EBUSY;
- goto err_free;
- }
-
- das->regs = ioremap(res->start, resource_size(res));
- if (!das->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err_release;
- }
-
- tegra_das_debug_add(das);
-
- platform_set_drvdata(pdev, das);
-
- return 0;
-
-err_release:
- release_mem_region(res->start, resource_size(res));
-err_free:
- kfree(das);
- das = 0;
-exit:
- return ret;
-}
-
-static int __devexit tegra_das_remove(struct platform_device *pdev)
-{
- struct resource *res;
-
- if (!das)
- return -ENODEV;
-
- platform_set_drvdata(pdev, NULL);
-
- tegra_das_debug_remove(das);
-
- iounmap(das->regs);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(das);
- das = 0;
-
- return 0;
-}
-
-static struct platform_driver tegra_das_driver = {
- .probe = tegra_das_probe,
- .remove = __devexit_p(tegra_das_remove),
- .driver = {
- .name = DRV_NAME,
- },
-};
-
-static int __init tegra_das_modinit(void)
-{
- return platform_driver_register(&tegra_das_driver);
-}
-module_init(tegra_das_modinit);
-
-static void __exit tegra_das_modexit(void)
-{
- platform_driver_unregister(&tegra_das_driver);
-}
-module_exit(tegra_das_modexit);
-
-MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
-MODULE_DESCRIPTION("Tegra DAS driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_das.h b/sound/soc/tegra/tegra_das.h
deleted file mode 100644
index 2c96c7b3c459..000000000000
--- a/sound/soc/tegra/tegra_das.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * tegra_das.h - Definitions for Tegra DAS driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010 - NVIDIA, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __TEGRA_DAS_H__
-#define __TEGRA_DAS_H__
-
-/* Register TEGRA_DAS_DAP_CTRL_SEL */
-#define TEGRA_DAS_DAP_CTRL_SEL 0x00
-#define TEGRA_DAS_DAP_CTRL_SEL_COUNT 5
-#define TEGRA_DAS_DAP_CTRL_SEL_STRIDE 4
-#define TEGRA_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P 31
-#define TEGRA_DAS_DAP_CTRL_SEL_DAP_MS_SEL_S 1
-#define TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P 30
-#define TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_S 1
-#define TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P 29
-#define TEGRA_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_S 1
-#define TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P 0
-#define TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_S 5
-
-/* Values for field TEGRA_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL */
-#define TEGRA_DAS_DAP_SEL_DAC1 0
-#define TEGRA_DAS_DAP_SEL_DAC2 1
-#define TEGRA_DAS_DAP_SEL_DAC3 2
-#define TEGRA_DAS_DAP_SEL_DAP1 16
-#define TEGRA_DAS_DAP_SEL_DAP2 17
-#define TEGRA_DAS_DAP_SEL_DAP3 18
-#define TEGRA_DAS_DAP_SEL_DAP4 19
-#define TEGRA_DAS_DAP_SEL_DAP5 20
-
-/* Register TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL */
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL 0x40
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_COUNT 3
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE 4
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P 28
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_S 4
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P 24
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_S 4
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P 0
-#define TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_S 4
-
-/*
- * Values for:
- * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL
- * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL
- * TEGRA_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL
- */
-#define TEGRA_DAS_DAC_SEL_DAP1 0
-#define TEGRA_DAS_DAC_SEL_DAP2 1
-#define TEGRA_DAS_DAC_SEL_DAP3 2
-#define TEGRA_DAS_DAC_SEL_DAP4 3
-#define TEGRA_DAS_DAC_SEL_DAP5 4
-
-/*
- * Names/IDs of the DACs/DAPs.
- */
-
-#define TEGRA_DAS_DAP_ID_1 0
-#define TEGRA_DAS_DAP_ID_2 1
-#define TEGRA_DAS_DAP_ID_3 2
-#define TEGRA_DAS_DAP_ID_4 3
-#define TEGRA_DAS_DAP_ID_5 4
-
-#define TEGRA_DAS_DAC_ID_1 0
-#define TEGRA_DAS_DAC_ID_2 1
-#define TEGRA_DAS_DAC_ID_3 2
-
-struct tegra_das {
- struct device *dev;
- void __iomem *regs;
- struct dentry *debug;
-};
-
-/*
- * Terminology:
- * DAS: Digital audio switch (HW module controlled by this driver)
- * DAP: Digital audio port (port/pins on Tegra device)
- * DAC: Digital audio controller (e.g. I2S or AC97 controller elsewhere)
- *
- * The Tegra DAS is a mux/cross-bar which can connect each DAP to a specific
- * DAC, or another DAP. When DAPs are connected, one must be the master and
- * one the slave. Each DAC allows selection of a specific DAP for input, to
- * cater for the case where N DAPs are connected to 1 DAC for broadcast
- * output.
- *
- * This driver is dumb; no attempt is made to ensure that a valid routing
- * configuration is programmed.
- */
-
-/*
- * Connect a DAP to to a DAC
- * dap_id: DAP to connect: TEGRA_DAS_DAP_ID_*
- * dac_sel: DAC to connect to: TEGRA_DAS_DAP_SEL_DAC*
- */
-extern int tegra_das_connect_dap_to_dac(int dap_id, int dac_sel);
-
-/*
- * Connect a DAP to to another DAP
- * dap_id: DAP to connect: TEGRA_DAS_DAP_ID_*
- * other_dap_sel: DAP to connect to: TEGRA_DAS_DAP_SEL_DAP*
- * master: Is this DAP the master (1) or slave (0)
- * sdata1rx: Is this DAP's SDATA1 pin RX (1) or TX (0)
- * sdata2rx: Is this DAP's SDATA2 pin RX (1) or TX (0)
- */
-extern int tegra_das_connect_dap_to_dap(int dap_id, int other_dap_sel,
- int master, int sdata1rx,
- int sdata2rx);
-
-/*
- * Connect a DAC's input to a DAP
- * (DAC outputs are selected by the DAP)
- * dac_id: DAC ID to connect: TEGRA_DAS_DAC_ID_*
- * dap_sel: DAP to receive input from: TEGRA_DAS_DAC_SEL_DAP*
- */
-extern int tegra_das_connect_dac_to_dap(int dac_id, int dap_sel);
-
-#endif
diff --git a/sound/soc/tegra/tegra_i2s.c b/sound/soc/tegra/tegra_i2s.c
deleted file mode 100644
index f36b9969cfec..000000000000
--- a/sound/soc/tegra/tegra_i2s.c
+++ /dev/null
@@ -1,507 +0,0 @@
-/*
- * tegra_i2s.c - Tegra I2S driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010 - NVIDIA, Inc.
- *
- * Based on code copyright/by:
- *
- * Copyright (c) 2009-2010, NVIDIA Corporation.
- * Scott Peterson <speterson@nvidia.com>
- *
- * Copyright (C) 2010 Google, Inc.
- * Iliyan Malchev <malchev@google.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <mach/iomap.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "tegra_das.h"
-#include "tegra_i2s.h"
-
-#define DRV_NAME "tegra-i2s"
-
-static inline void tegra_i2s_write(struct tegra_i2s *i2s, u32 reg, u32 val)
-{
- __raw_writel(val, i2s->regs + reg);
-}
-
-static inline u32 tegra_i2s_read(struct tegra_i2s *i2s, u32 reg)
-{
- return __raw_readl(i2s->regs + reg);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_i2s_show(struct seq_file *s, void *unused)
-{
-#define REG(r) { r, #r }
- static const struct {
- int offset;
- const char *name;
- } regs[] = {
- REG(TEGRA_I2S_CTRL),
- REG(TEGRA_I2S_STATUS),
- REG(TEGRA_I2S_TIMING),
- REG(TEGRA_I2S_FIFO_SCR),
- REG(TEGRA_I2S_PCM_CTRL),
- REG(TEGRA_I2S_NW_CTRL),
- REG(TEGRA_I2S_TDM_CTRL),
- REG(TEGRA_I2S_TDM_TX_RX_CTRL),
- };
-#undef REG
-
- struct tegra_i2s *i2s = s->private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(regs); i++) {
- u32 val = tegra_i2s_read(i2s, regs[i].offset);
- seq_printf(s, "%s = %08x\n", regs[i].name, val);
- }
-
- return 0;
-}
-
-static int tegra_i2s_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, tegra_i2s_show, inode->i_private);
-}
-
-static const struct file_operations tegra_i2s_debug_fops = {
- .open = tegra_i2s_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void tegra_i2s_debug_add(struct tegra_i2s *i2s, int id)
-{
- char name[] = DRV_NAME ".0";
-
- snprintf(name, sizeof(name), DRV_NAME".%1d", id);
- i2s->debug = debugfs_create_file(name, S_IRUGO, snd_soc_debugfs_root,
- i2s, &tegra_i2s_debug_fops);
-}
-
-static void tegra_i2s_debug_remove(struct tegra_i2s *i2s)
-{
- if (i2s->debug)
- debugfs_remove(i2s->debug);
-}
-#else
-static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s, int id)
-{
-}
-
-static inline void tegra_i2s_debug_remove(struct tegra_i2s *i2s)
-{
-}
-#endif
-
-static int tegra_i2s_set_fmt(struct snd_soc_dai *dai,
- unsigned int fmt)
-{
- struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai);
-
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- break;
- default:
- return -EINVAL;
- }
-
- i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_MASTER_ENABLE;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_MASTER_ENABLE;
- break;
- case SND_SOC_DAIFMT_CBM_CFM:
- break;
- default:
- return -EINVAL;
- }
-
- i2s->reg_ctrl &= ~(TEGRA_I2S_CTRL_BIT_FORMAT_MASK |
- TEGRA_I2S_CTRL_LRCK_MASK);
- switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
- case SND_SOC_DAIFMT_DSP_A:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_DSP;
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW;
- break;
- case SND_SOC_DAIFMT_DSP_B:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_DSP;
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_R_LOW;
- break;
- case SND_SOC_DAIFMT_I2S:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_I2S;
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW;
- break;
- case SND_SOC_DAIFMT_RIGHT_J:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_RJM;
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW;
- break;
- case SND_SOC_DAIFMT_LEFT_J:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_LJM;
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tegra_i2s_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct device *dev = substream->pcm->card->dev;
- struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai);
- u32 reg;
- int ret, sample_size, srate, i2sclock, bitcnt;
-
- i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_BIT_SIZE_MASK;
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_16;
- sample_size = 16;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_24;
- sample_size = 24;
- break;
- case SNDRV_PCM_FORMAT_S32_LE:
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_32;
- sample_size = 32;
- break;
- default:
- return -EINVAL;
- }
-
- srate = params_rate(params);
-
- /* Final "* 2" required by Tegra hardware */
- i2sclock = srate * params_channels(params) * sample_size * 2;
-
- ret = clk_set_rate(i2s->clk_i2s, i2sclock);
- if (ret) {
- dev_err(dev, "Can't set I2S clock rate: %d\n", ret);
- return ret;
- }
-
- bitcnt = (i2sclock / (2 * srate)) - 1;
- if (bitcnt < 0 || bitcnt > TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US)
- return -EINVAL;
- reg = bitcnt << TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
-
- if (i2sclock % (2 * srate))
- reg |= TEGRA_I2S_TIMING_NON_SYM_ENABLE;
-
- if (!i2s->clk_refs)
- clk_enable(i2s->clk_i2s);
-
- tegra_i2s_write(i2s, TEGRA_I2S_TIMING, reg);
-
- tegra_i2s_write(i2s, TEGRA_I2S_FIFO_SCR,
- TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS |
- TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS);
-
- if (!i2s->clk_refs)
- clk_disable(i2s->clk_i2s);
-
- return 0;
-}
-
-static void tegra_i2s_start_playback(struct tegra_i2s *i2s)
-{
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_FIFO1_ENABLE;
- tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl);
-}
-
-static void tegra_i2s_stop_playback(struct tegra_i2s *i2s)
-{
- i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_FIFO1_ENABLE;
- tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl);
-}
-
-static void tegra_i2s_start_capture(struct tegra_i2s *i2s)
-{
- i2s->reg_ctrl |= TEGRA_I2S_CTRL_FIFO2_ENABLE;
- tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl);
-}
-
-static void tegra_i2s_stop_capture(struct tegra_i2s *i2s)
-{
- i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_FIFO2_ENABLE;
- tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl);
-}
-
-static int tegra_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- case SNDRV_PCM_TRIGGER_RESUME:
- if (!i2s->clk_refs)
- clk_enable(i2s->clk_i2s);
- i2s->clk_refs++;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- tegra_i2s_start_playback(i2s);
- else
- tegra_i2s_start_capture(i2s);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- tegra_i2s_stop_playback(i2s);
- else
- tegra_i2s_stop_capture(i2s);
- i2s->clk_refs--;
- if (!i2s->clk_refs)
- clk_disable(i2s->clk_i2s);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tegra_i2s_probe(struct snd_soc_dai *dai)
-{
- struct tegra_i2s * i2s = snd_soc_dai_get_drvdata(dai);
-
- dai->capture_dma_data = &i2s->capture_dma_data;
- dai->playback_dma_data = &i2s->playback_dma_data;
-
- return 0;
-}
-
-static struct snd_soc_dai_ops tegra_i2s_dai_ops = {
- .set_fmt = tegra_i2s_set_fmt,
- .hw_params = tegra_i2s_hw_params,
- .trigger = tegra_i2s_trigger,
-};
-
-struct snd_soc_dai_driver tegra_i2s_dai[] = {
- {
- .name = DRV_NAME ".0",
- .probe = tegra_i2s_probe,
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_96000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .capture = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_96000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .ops = &tegra_i2s_dai_ops,
- .symmetric_rates = 1,
- },
- {
- .name = DRV_NAME ".1",
- .probe = tegra_i2s_probe,
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_96000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .capture = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_96000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .ops = &tegra_i2s_dai_ops,
- .symmetric_rates = 1,
- },
-};
-
-static __devinit int tegra_i2s_platform_probe(struct platform_device *pdev)
-{
- struct tegra_i2s * i2s;
- struct resource *mem, *memregion, *dmareq;
- int ret;
-
- if ((pdev->id < 0) ||
- (pdev->id >= ARRAY_SIZE(tegra_i2s_dai))) {
- dev_err(&pdev->dev, "ID %d out of range\n", pdev->id);
- return -EINVAL;
- }
-
- /*
- * FIXME: Until a codec driver exists for the tegra DAS, hard-code a
- * 1:1 mapping between audio controllers and audio ports.
- */
- ret = tegra_das_connect_dap_to_dac(TEGRA_DAS_DAP_ID_1 + pdev->id,
- TEGRA_DAS_DAP_SEL_DAC1 + pdev->id);
- if (ret) {
- dev_err(&pdev->dev, "Can't set up DAP connection\n");
- return ret;
- }
- ret = tegra_das_connect_dac_to_dap(TEGRA_DAS_DAC_ID_1 + pdev->id,
- TEGRA_DAS_DAC_SEL_DAP1 + pdev->id);
- if (ret) {
- dev_err(&pdev->dev, "Can't set up DAC connection\n");
- return ret;
- }
-
- i2s = kzalloc(sizeof(struct tegra_i2s), GFP_KERNEL);
- if (!i2s) {
- dev_err(&pdev->dev, "Can't allocate tegra_i2s\n");
- ret = -ENOMEM;
- goto exit;
- }
- dev_set_drvdata(&pdev->dev, i2s);
-
- i2s->clk_i2s = clk_get(&pdev->dev, NULL);
- if (IS_ERR(i2s->clk_i2s)) {
- dev_err(&pdev->dev, "Can't retrieve i2s clock\n");
- ret = PTR_ERR(i2s->clk_i2s);
- goto err_free;
- }
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "No memory resource\n");
- ret = -ENODEV;
- goto err_clk_put;
- }
-
- dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dmareq) {
- dev_err(&pdev->dev, "No DMA resource\n");
- ret = -ENODEV;
- goto err_clk_put;
- }
-
- memregion = request_mem_region(mem->start, resource_size(mem),
- DRV_NAME);
- if (!memregion) {
- dev_err(&pdev->dev, "Memory region already claimed\n");
- ret = -EBUSY;
- goto err_clk_put;
- }
-
- i2s->regs = ioremap(mem->start, resource_size(mem));
- if (!i2s->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err_release;
- }
-
- i2s->capture_dma_data.addr = mem->start + TEGRA_I2S_FIFO2;
- i2s->capture_dma_data.wrap = 4;
- i2s->capture_dma_data.width = 32;
- i2s->capture_dma_data.req_sel = dmareq->start;
-
- i2s->playback_dma_data.addr = mem->start + TEGRA_I2S_FIFO1;
- i2s->playback_dma_data.wrap = 4;
- i2s->playback_dma_data.width = 32;
- i2s->playback_dma_data.req_sel = dmareq->start;
-
- i2s->reg_ctrl = TEGRA_I2S_CTRL_FIFO_FORMAT_PACKED;
-
- ret = snd_soc_register_dai(&pdev->dev, &tegra_i2s_dai[pdev->id]);
- if (ret) {
- dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
- ret = -ENOMEM;
- goto err_unmap;
- }
-
- tegra_i2s_debug_add(i2s, pdev->id);
-
- return 0;
-
-err_unmap:
- iounmap(i2s->regs);
-err_release:
- release_mem_region(mem->start, resource_size(mem));
-err_clk_put:
- clk_put(i2s->clk_i2s);
-err_free:
- kfree(i2s);
-exit:
- return ret;
-}
-
-static int __devexit tegra_i2s_platform_remove(struct platform_device *pdev)
-{
- struct tegra_i2s *i2s = dev_get_drvdata(&pdev->dev);
- struct resource *res;
-
- snd_soc_unregister_dai(&pdev->dev);
-
- tegra_i2s_debug_remove(i2s);
-
- iounmap(i2s->regs);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- clk_put(i2s->clk_i2s);
-
- kfree(i2s);
-
- return 0;
-}
-
-static struct platform_driver tegra_i2s_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .probe = tegra_i2s_platform_probe,
- .remove = __devexit_p(tegra_i2s_platform_remove),
-};
-
-static int __init snd_tegra_i2s_init(void)
-{
- return platform_driver_register(&tegra_i2s_driver);
-}
-module_init(snd_tegra_i2s_init);
-
-static void __exit snd_tegra_i2s_exit(void)
-{
- platform_driver_unregister(&tegra_i2s_driver);
-}
-module_exit(snd_tegra_i2s_exit);
-
-MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
-MODULE_DESCRIPTION("Tegra I2S ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_i2s.h b/sound/soc/tegra/tegra_i2s.h
deleted file mode 100644
index 2b38a096f46c..000000000000
--- a/sound/soc/tegra/tegra_i2s.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * tegra_i2s.h - Definitions for Tegra I2S driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010 - NVIDIA, Inc.
- *
- * Based on code copyright/by:
- *
- * Copyright (c) 2009-2010, NVIDIA Corporation.
- * Scott Peterson <speterson@nvidia.com>
- *
- * Copyright (C) 2010 Google, Inc.
- * Iliyan Malchev <malchev@google.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __TEGRA_I2S_H__
-#define __TEGRA_I2S_H__
-
-#include "tegra_pcm.h"
-
-/* Register offsets from TEGRA_I2S1_BASE and TEGRA_I2S2_BASE */
-
-#define TEGRA_I2S_CTRL 0x00
-#define TEGRA_I2S_STATUS 0x04
-#define TEGRA_I2S_TIMING 0x08
-#define TEGRA_I2S_FIFO_SCR 0x0c
-#define TEGRA_I2S_PCM_CTRL 0x10
-#define TEGRA_I2S_NW_CTRL 0x14
-#define TEGRA_I2S_TDM_CTRL 0x20
-#define TEGRA_I2S_TDM_TX_RX_CTRL 0x24
-#define TEGRA_I2S_FIFO1 0x40
-#define TEGRA_I2S_FIFO2 0x80
-
-/* Fields in TEGRA_I2S_CTRL */
-
-#define TEGRA_I2S_CTRL_FIFO2_TX_ENABLE (1 << 30)
-#define TEGRA_I2S_CTRL_FIFO1_ENABLE (1 << 29)
-#define TEGRA_I2S_CTRL_FIFO2_ENABLE (1 << 28)
-#define TEGRA_I2S_CTRL_FIFO1_RX_ENABLE (1 << 27)
-#define TEGRA_I2S_CTRL_FIFO_LPBK_ENABLE (1 << 26)
-#define TEGRA_I2S_CTRL_MASTER_ENABLE (1 << 25)
-
-#define TEGRA_I2S_LRCK_LEFT_LOW 0
-#define TEGRA_I2S_LRCK_RIGHT_LOW 1
-
-#define TEGRA_I2S_CTRL_LRCK_SHIFT 24
-#define TEGRA_I2S_CTRL_LRCK_MASK (1 << TEGRA_I2S_CTRL_LRCK_SHIFT)
-#define TEGRA_I2S_CTRL_LRCK_L_LOW (TEGRA_I2S_LRCK_LEFT_LOW << TEGRA_I2S_CTRL_LRCK_SHIFT)
-#define TEGRA_I2S_CTRL_LRCK_R_LOW (TEGRA_I2S_LRCK_RIGHT_LOW << TEGRA_I2S_CTRL_LRCK_SHIFT)
-
-#define TEGRA_I2S_BIT_FORMAT_I2S 0
-#define TEGRA_I2S_BIT_FORMAT_RJM 1
-#define TEGRA_I2S_BIT_FORMAT_LJM 2
-#define TEGRA_I2S_BIT_FORMAT_DSP 3
-
-#define TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT 10
-#define TEGRA_I2S_CTRL_BIT_FORMAT_MASK (3 << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_BIT_FORMAT_I2S (TEGRA_I2S_BIT_FORMAT_I2S << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_BIT_FORMAT_RJM (TEGRA_I2S_BIT_FORMAT_RJM << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_BIT_FORMAT_LJM (TEGRA_I2S_BIT_FORMAT_LJM << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_BIT_FORMAT_DSP (TEGRA_I2S_BIT_FORMAT_DSP << TEGRA_I2S_CTRL_BIT_FORMAT_SHIFT)
-
-#define TEGRA_I2S_BIT_SIZE_16 0
-#define TEGRA_I2S_BIT_SIZE_20 1
-#define TEGRA_I2S_BIT_SIZE_24 2
-#define TEGRA_I2S_BIT_SIZE_32 3
-
-#define TEGRA_I2S_CTRL_BIT_SIZE_SHIFT 8
-#define TEGRA_I2S_CTRL_BIT_SIZE_MASK (3 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
-#define TEGRA_I2S_CTRL_BIT_SIZE_16 (TEGRA_I2S_BIT_SIZE_16 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
-#define TEGRA_I2S_CTRL_BIT_SIZE_20 (TEGRA_I2S_BIT_SIZE_20 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
-#define TEGRA_I2S_CTRL_BIT_SIZE_24 (TEGRA_I2S_BIT_SIZE_24 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
-#define TEGRA_I2S_CTRL_BIT_SIZE_32 (TEGRA_I2S_BIT_SIZE_32 << TEGRA_I2S_CTRL_BIT_SIZE_SHIFT)
-
-#define TEGRA_I2S_FIFO_16_LSB 0
-#define TEGRA_I2S_FIFO_20_LSB 1
-#define TEGRA_I2S_FIFO_24_LSB 2
-#define TEGRA_I2S_FIFO_32 3
-#define TEGRA_I2S_FIFO_PACKED 7
-
-#define TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT 4
-#define TEGRA_I2S_CTRL_FIFO_FORMAT_MASK (7 << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_FIFO_FORMAT_16_LSB (TEGRA_I2S_FIFO_16_LSB << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_FIFO_FORMAT_20_LSB (TEGRA_I2S_FIFO_20_LSB << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_FIFO_FORMAT_24_LSB (TEGRA_I2S_FIFO_24_LSB << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_FIFO_FORMAT_32 (TEGRA_I2S_FIFO_32 << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
-#define TEGRA_I2S_CTRL_FIFO_FORMAT_PACKED (TEGRA_I2S_FIFO_PACKED << TEGRA_I2S_CTRL_FIFO_FORMAT_SHIFT)
-
-#define TEGRA_I2S_CTRL_IE_FIFO1_ERR (1 << 3)
-#define TEGRA_I2S_CTRL_IE_FIFO2_ERR (1 << 2)
-#define TEGRA_I2S_CTRL_QE_FIFO1 (1 << 1)
-#define TEGRA_I2S_CTRL_QE_FIFO2 (1 << 0)
-
-/* Fields in TEGRA_I2S_STATUS */
-
-#define TEGRA_I2S_STATUS_FIFO1_RDY (1 << 31)
-#define TEGRA_I2S_STATUS_FIFO2_RDY (1 << 30)
-#define TEGRA_I2S_STATUS_FIFO1_BSY (1 << 29)
-#define TEGRA_I2S_STATUS_FIFO2_BSY (1 << 28)
-#define TEGRA_I2S_STATUS_FIFO1_ERR (1 << 3)
-#define TEGRA_I2S_STATUS_FIFO2_ERR (1 << 2)
-#define TEGRA_I2S_STATUS_QS_FIFO1 (1 << 1)
-#define TEGRA_I2S_STATUS_QS_FIFO2 (1 << 0)
-
-/* Fields in TEGRA_I2S_TIMING */
-
-#define TEGRA_I2S_TIMING_NON_SYM_ENABLE (1 << 12)
-#define TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT 0
-#define TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US 0x7fff
-#define TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK (TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US << TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT)
-
-/* Fields in TEGRA_I2S_FIFO_SCR */
-
-#define TEGRA_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_SHIFT 24
-#define TEGRA_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_SHIFT 16
-#define TEGRA_I2S_FIFO_SCR_FIFO_FULL_EMPTY_COUNT_MASK 0x3f
-
-#define TEGRA_I2S_FIFO_SCR_FIFO2_CLR (1 << 12)
-#define TEGRA_I2S_FIFO_SCR_FIFO1_CLR (1 << 8)
-
-#define TEGRA_I2S_FIFO_ATN_LVL_ONE_SLOT 0
-#define TEGRA_I2S_FIFO_ATN_LVL_FOUR_SLOTS 1
-#define TEGRA_I2S_FIFO_ATN_LVL_EIGHT_SLOTS 2
-#define TEGRA_I2S_FIFO_ATN_LVL_TWELVE_SLOTS 3
-
-#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT 4
-#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_MASK (3 << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
-#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_ONE_SLOT (TEGRA_I2S_FIFO_ATN_LVL_ONE_SLOT << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
-#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_FOUR_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
-#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_EIGHT_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_EIGHT_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
-#define TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_TWELVE_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_TWELVE_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_SHIFT)
-
-#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT 0
-#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_MASK (3 << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
-#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_ONE_SLOT (TEGRA_I2S_FIFO_ATN_LVL_ONE_SLOT << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
-#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_FOUR_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
-#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_EIGHT_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_EIGHT_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
-#define TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_TWELVE_SLOTS (TEGRA_I2S_FIFO_ATN_LVL_TWELVE_SLOTS << TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_SHIFT)
-
-struct tegra_i2s {
- struct clk *clk_i2s;
- int clk_refs;
- struct tegra_pcm_dma_params capture_dma_data;
- struct tegra_pcm_dma_params playback_dma_data;
- void __iomem *regs;
- struct dentry *debug;
- u32 reg_ctrl;
-};
-
-#endif
diff --git a/sound/soc/tegra/tegra_max98088.c b/sound/soc/tegra/tegra_max98088.c
new file mode 100644
index 000000000000..7748d0e071cb
--- /dev/null
+++ b/sound/soc/tegra/tegra_max98088.c
@@ -0,0 +1,1199 @@
+/*
+ * tegra_max98088.c - Tegra machine ASoC driver for boards using MAX98088 codec.
+ *
+ * Author: Sumit Bhattacharya <sumitb@nvidia.com>
+ * Copyright (C) 2011 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * (c) 2010, 2011 Nvidia Graphics Pvt. Ltd.
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory
+ * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <asm/mach-types.h>
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
+
+#include <mach/tegra_max98088_pdata.h>
+
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../codecs/max98088.h"
+
+#include "tegra_pcm.h"
+#include "tegra_asoc_utils.h"
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+#include "tegra30_ahub.h"
+#include "tegra30_i2s.h"
+#include "tegra30_dam.h"
+#endif
+
+#define DRV_NAME "tegra-snd-max98088"
+
+#define GPIO_SPKR_EN BIT(0)
+#define GPIO_HP_MUTE BIT(1)
+#define GPIO_INT_MIC_EN BIT(2)
+#define GPIO_EXT_MIC_EN BIT(3)
+
+#define DAI_LINK_HIFI 0
+#define DAI_LINK_SPDIF 1
+#define DAI_LINK_BTSCO 2
+#define DAI_LINK_VOICE_CALL 3
+#define DAI_LINK_BT_VOICE_CALL 4
+#define NUM_DAI_LINKS 5
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+const char *tegra_max98088_i2s_dai_name[TEGRA30_NR_I2S_IFC] = {
+ "tegra30-i2s.0",
+ "tegra30-i2s.1",
+ "tegra30-i2s.2",
+ "tegra30-i2s.3",
+ "tegra30-i2s.4",
+};
+#endif
+
+struct tegra_max98088 {
+ struct tegra_asoc_utils_data util_data;
+ struct tegra_max98088_platform_data *pdata;
+ int gpio_requested;
+ bool init_done;
+ int is_call_mode;
+ int is_device_bt;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct codec_config codec_info[NUM_I2S_DEVICES];
+#endif
+ enum snd_soc_bias_level bias_level;
+ struct snd_soc_card *pcard;
+};
+
+static int tegra_call_mode_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ return 0;
+}
+
+static int tegra_call_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_max98088 *machine = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = machine->is_call_mode;
+
+ return 0;
+}
+
+static int tegra_call_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_max98088 *machine = snd_kcontrol_chip(kcontrol);
+ int is_call_mode_new = ucontrol->value.integer.value[0];
+ int codec_index;
+ unsigned int i;
+
+ if (machine->is_call_mode == is_call_mode_new)
+ return 0;
+
+ if (machine->is_device_bt)
+ codec_index = BT_SCO;
+ else
+ codec_index = HIFI_CODEC;
+
+ if (is_call_mode_new) {
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (machine->codec_info[codec_index].rate == 0 ||
+ machine->codec_info[codec_index].channels == 0)
+ return -EINVAL;
+
+ for (i = 0; i < machine->pcard->num_links; i++)
+ machine->pcard->dai_link[i].ignore_suspend = 1;
+
+ tegra30_make_voice_call_connections(
+ &machine->codec_info[codec_index],
+ &machine->codec_info[BASEBAND]);
+#endif
+ } else {
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ tegra30_break_voice_call_connections(
+ &machine->codec_info[codec_index],
+ &machine->codec_info[BASEBAND]);
+
+ for (i = 0; i < machine->pcard->num_links; i++)
+ machine->pcard->dai_link[i].ignore_suspend = 0;
+#endif
+ }
+
+ machine->is_call_mode = is_call_mode_new;
+
+ return 1;
+}
+
+struct snd_kcontrol_new tegra_call_mode_control = {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Call Mode Switch",
+ .private_value = 0xffff,
+ .info = tegra_call_mode_info,
+ .get = tegra_call_mode_get,
+ .put = tegra_call_mode_put
+};
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static int tegra_max98088_set_dam_cif(int dam_ifc, int srate,
+ int channels, int bit_size, int src_on, int src_srate,
+ int src_channels, int src_bit_size)
+{
+ tegra30_dam_set_gain(dam_ifc, TEGRA30_DAM_CHIN1, 0x1000);
+ tegra30_dam_set_samplerate(dam_ifc, TEGRA30_DAM_CHOUT,
+ srate);
+ tegra30_dam_set_samplerate(dam_ifc, TEGRA30_DAM_CHIN1,
+ srate);
+ tegra30_dam_set_acif(dam_ifc, TEGRA30_DAM_CHIN1,
+ channels, bit_size, channels,
+ bit_size);
+ tegra30_dam_set_acif(dam_ifc, TEGRA30_DAM_CHOUT,
+ channels, bit_size, channels,
+ bit_size);
+
+ if (src_on) {
+ tegra30_dam_set_gain(dam_ifc, TEGRA30_DAM_CHIN0_SRC, 0x1000);
+ tegra30_dam_set_samplerate(dam_ifc, TEGRA30_DAM_CHIN0_SRC,
+ src_srate);
+ tegra30_dam_set_acif(dam_ifc, TEGRA30_DAM_CHIN0_SRC,
+ src_channels, src_bit_size, 1, 16);
+ }
+
+ return 0;
+}
+#endif
+
+static int tegra_max98088_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+#endif
+ int srate, mclk, sample_size;
+ int err;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ sample_size = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 8000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ default:
+ mclk = 12000000;
+ break;
+ }
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ err = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra_max98088_set_dam_cif(i2s->dam_ifc, srate,
+ params_channels(params), sample_size, 0, 0, 0, 0);
+#endif
+
+ return 0;
+}
+
+static int tegra_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, min_mclk;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ min_mclk = 128 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ return 0;
+}
+
+static int tegra_bt_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+#endif
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+ int err, srate, mclk, min_mclk, sample_size;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ sample_size = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ min_mclk = 64 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ err = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(rtd->codec->card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tegra_max98088_set_dam_cif(i2s->dam_ifc, params_rate(params),
+ params_channels(params), sample_size, 0, 0, 0, 0);
+#endif
+
+ return 0;
+}
+
+static int tegra_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(rtd->card);
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 0);
+
+ return 0;
+}
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+static int tegra_max98088_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(rtd->card);
+ struct codec_config *codec_info;
+ struct codec_config *bb_info;
+ int codec_index;
+
+ if (!i2s->is_dam_used)
+ return 0;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /*dam configuration*/
+ if (!i2s->dam_ch_refcount)
+ i2s->dam_ifc = tegra30_dam_allocate_controller();
+
+ tegra30_dam_allocate_channel(i2s->dam_ifc, TEGRA30_DAM_CHIN1);
+ i2s->dam_ch_refcount++;
+ tegra30_dam_enable_clock(i2s->dam_ifc);
+
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX1 +
+ (i2s->dam_ifc*2), i2s->txcif);
+
+ /*
+ *make the dam tx to i2s rx connection if this is the only client
+ *using i2s for playback
+ */
+ if (i2s->playback_ref_count == 1)
+ tegra30_ahub_set_rx_cif_source(
+ TEGRA30_AHUB_RXCIF_I2S0_RX0 + i2s->id,
+ TEGRA30_AHUB_TXCIF_DAM0_TX0 + i2s->dam_ifc);
+
+ /* enable the dam*/
+ tegra30_dam_enable(i2s->dam_ifc, TEGRA30_DAM_ENABLE,
+ TEGRA30_DAM_CHIN1);
+ } else {
+
+ i2s->is_call_mode_rec = machine->is_call_mode;
+
+ if (!i2s->is_call_mode_rec)
+ return 0;
+
+ if (machine->is_device_bt)
+ codec_index = BT_SCO;
+ else
+ codec_index = HIFI_CODEC;
+
+ codec_info = &machine->codec_info[codec_index];
+ bb_info = &machine->codec_info[BASEBAND];
+
+ /* allocate a dam for voice call recording */
+
+ i2s->call_record_dam_ifc = tegra30_dam_allocate_controller();
+ tegra30_dam_allocate_channel(i2s->call_record_dam_ifc,
+ TEGRA30_DAM_CHIN0_SRC);
+ tegra30_dam_allocate_channel(i2s->call_record_dam_ifc,
+ TEGRA30_DAM_CHIN1);
+ tegra30_dam_enable_clock(i2s->call_record_dam_ifc);
+
+ /* configure the dam */
+ tegra_max98088_set_dam_cif(i2s->call_record_dam_ifc,
+ codec_info->rate, codec_info->channels,
+ codec_info->bitsize, 1, bb_info->rate,
+ bb_info->channels, bb_info->bitsize);
+
+ /* setup the connections for voice call record */
+
+ tegra30_ahub_unset_rx_cif_source(i2s->rxcif);
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX0 +
+ (i2s->call_record_dam_ifc*2),
+ TEGRA30_AHUB_TXCIF_I2S0_TX0 + bb_info->i2s_id);
+ tegra30_ahub_set_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX1 +
+ (i2s->call_record_dam_ifc*2),
+ TEGRA30_AHUB_TXCIF_I2S0_TX0 + codec_info->i2s_id);
+ tegra30_ahub_set_rx_cif_source(i2s->rxcif,
+ TEGRA30_AHUB_TXCIF_DAM0_TX0 + i2s->call_record_dam_ifc);
+
+ /* enable the dam*/
+
+ tegra30_dam_enable(i2s->call_record_dam_ifc, TEGRA30_DAM_ENABLE,
+ TEGRA30_DAM_CHIN1);
+ tegra30_dam_enable(i2s->call_record_dam_ifc, TEGRA30_DAM_ENABLE,
+ TEGRA30_DAM_CHIN0_SRC);
+ }
+
+ return 0;
+}
+
+static void tegra_max98088_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+
+ if (!i2s->is_dam_used)
+ return;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* disable the dam*/
+ tegra30_dam_enable(i2s->dam_ifc, TEGRA30_DAM_DISABLE,
+ TEGRA30_DAM_CHIN1);
+
+ /* disconnect the ahub connections*/
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX1 +
+ (i2s->dam_ifc*2));
+
+ /* disable the dam and free the controller */
+ tegra30_dam_disable_clock(i2s->dam_ifc);
+ tegra30_dam_free_channel(i2s->dam_ifc, TEGRA30_DAM_CHIN1);
+ i2s->dam_ch_refcount--;
+ if (!i2s->dam_ch_refcount)
+ tegra30_dam_free_controller(i2s->dam_ifc);
+ } else {
+
+ if (!i2s->is_call_mode_rec)
+ return 0;
+
+ i2s->is_call_mode_rec = 0;
+
+ /* disable the dam*/
+ tegra30_dam_enable(i2s->call_record_dam_ifc,
+ TEGRA30_DAM_DISABLE, TEGRA30_DAM_CHIN1);
+ tegra30_dam_enable(i2s->call_record_dam_ifc,
+ TEGRA30_DAM_DISABLE, TEGRA30_DAM_CHIN0_SRC);
+
+ /* disconnect the ahub connections*/
+ tegra30_ahub_unset_rx_cif_source(i2s->rxcif);
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX0 +
+ (i2s->call_record_dam_ifc*2));
+ tegra30_ahub_unset_rx_cif_source(TEGRA30_AHUB_RXCIF_DAM0_RX1 +
+ (i2s->call_record_dam_ifc*2));
+
+ /* free the dam channels and dam controller */
+ tegra30_dam_disable_clock(i2s->call_record_dam_ifc);
+ tegra30_dam_free_channel(i2s->call_record_dam_ifc,
+ TEGRA30_DAM_CHIN1);
+ tegra30_dam_free_channel(i2s->call_record_dam_ifc,
+ TEGRA30_DAM_CHIN0_SRC);
+ tegra30_dam_free_controller(i2s->call_record_dam_ifc);
+ }
+
+ return;
+}
+#endif
+
+static int tegra_voice_call_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 8000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ err = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* codec configuration */
+ machine->codec_info[HIFI_CODEC].rate = params_rate(params);
+ machine->codec_info[HIFI_CODEC].channels = params_channels(params);
+ machine->codec_info[HIFI_CODEC].bitsize = 16;
+ machine->codec_info[HIFI_CODEC].is_i2smaster = 1;
+ machine->codec_info[HIFI_CODEC].is_format_dsp = 0;
+
+ /* baseband configuration */
+ machine->codec_info[BASEBAND].bitsize = 16;
+ machine->codec_info[BASEBAND].is_i2smaster = 1;
+ machine->codec_info[BASEBAND].is_format_dsp = 1;
+#endif
+
+ machine->is_device_bt = 0;
+
+ return 0;
+}
+
+static void tegra_voice_call_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_max98088 *machine =
+ snd_soc_card_get_drvdata(rtd->codec->card);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ machine->codec_info[HIFI_CODEC].rate = 0;
+ machine->codec_info[HIFI_CODEC].channels = 0;
+#endif
+
+ return;
+}
+
+static int tegra_bt_voice_call_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+ int err, srate, mclk, min_mclk;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ min_mclk = 64 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ /* codec configuration */
+ machine->codec_info[BT_SCO].rate = params_rate(params);
+ machine->codec_info[BT_SCO].channels = params_channels(params);
+ machine->codec_info[BT_SCO].bitsize = 16;
+ machine->codec_info[BT_SCO].is_i2smaster = 1;
+ machine->codec_info[BT_SCO].is_format_dsp = 1;
+
+ /* baseband configuration */
+ machine->codec_info[BASEBAND].bitsize = 16;
+ machine->codec_info[BASEBAND].is_i2smaster = 1;
+ machine->codec_info[BASEBAND].is_format_dsp = 1;
+#endif
+
+ machine->is_device_bt = 1;
+
+ return 0;
+}
+
+static void tegra_bt_voice_call_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_max98088 *machine =
+ snd_soc_card_get_drvdata(rtd->codec->card);
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ machine->codec_info[BT_SCO].rate = 0;
+ machine->codec_info[BT_SCO].channels = 0;
+#endif
+
+ return;
+}
+
+static struct snd_soc_ops tegra_max98088_ops = {
+ .hw_params = tegra_max98088_hw_params,
+ .hw_free = tegra_hw_free,
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ .startup = tegra_max98088_startup,
+ .shutdown = tegra_max98088_shutdown,
+#endif
+};
+
+static struct snd_soc_ops tegra_spdif_ops = {
+ .hw_params = tegra_spdif_hw_params,
+ .hw_free = tegra_hw_free,
+};
+
+static struct snd_soc_ops tegra_voice_call_ops = {
+ .hw_params = tegra_voice_call_hw_params,
+ .shutdown = tegra_voice_call_shutdown,
+ .hw_free = tegra_hw_free,
+};
+
+static struct snd_soc_ops tegra_bt_voice_call_ops = {
+ .hw_params = tegra_bt_voice_call_hw_params,
+ .shutdown = tegra_bt_voice_call_shutdown,
+ .hw_free = tegra_hw_free,
+};
+
+static struct snd_soc_ops tegra_bt_ops = {
+ .hw_params = tegra_bt_hw_params,
+ .hw_free = tegra_hw_free,
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ .startup = tegra_max98088_startup,
+ .shutdown = tegra_max98088_shutdown,
+#endif
+};
+
+static struct snd_soc_jack tegra_max98088_hp_jack;
+
+#ifdef CONFIG_SWITCH
+static struct switch_dev wired_switch_dev = {
+ .name = "h2w",
+};
+
+/* These values are copied from WiredAccessoryObserver */
+enum headset_state {
+ BIT_NO_HEADSET = 0,
+ BIT_HEADSET = (1 << 0),
+ BIT_HEADSET_NO_MIC = (1 << 1),
+};
+
+static int headset_switch_notify(struct notifier_block *self,
+ unsigned long action, void *dev)
+{
+ int state = 0;
+
+ switch (action) {
+ case SND_JACK_HEADPHONE:
+ state |= BIT_HEADSET_NO_MIC;
+ break;
+ case SND_JACK_HEADSET:
+ state |= BIT_HEADSET;
+ break;
+ default:
+ state |= BIT_NO_HEADSET;
+ }
+
+ switch_set_state(&wired_switch_dev, state);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block headset_switch_nb = {
+ .notifier_call = headset_switch_notify,
+};
+#else
+static struct snd_soc_jack_pin tegra_max98088_hp_jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+#endif
+
+static int tegra_max98088_event_int_spk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_max98088_platform_data *pdata = machine->pdata;
+
+ if (!(machine->gpio_requested & GPIO_SPKR_EN))
+ return 0;
+
+ gpio_set_value_cansleep(pdata->gpio_spkr_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static int tegra_max98088_event_hp(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_max98088_platform_data *pdata = machine->pdata;
+
+ if (!(machine->gpio_requested & GPIO_HP_MUTE))
+ return 0;
+
+ gpio_set_value_cansleep(pdata->gpio_hp_mute,
+ !SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget tegra_max98088_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Int Spk", tegra_max98088_event_int_spk),
+ SND_SOC_DAPM_OUTPUT("Earpiece"),
+ SND_SOC_DAPM_HP("Headphone Jack", tegra_max98088_event_hp),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_INPUT("Int Mic"),
+};
+
+static const struct snd_soc_dapm_route enterprise_audio_map[] = {
+ {"Int Spk", NULL, "SPKL"},
+ {"Int Spk", NULL, "SPKR"},
+ {"Earpiece", NULL, "RECL"},
+ {"Earpiece", NULL, "RECR"},
+ {"Headphone Jack", NULL, "HPL"},
+ {"Headphone Jack", NULL, "HPR"},
+ {"MICBIAS", NULL, "Mic Jack"},
+ {"MIC2", NULL, "MICBIAS"},
+ {"MICBIAS", NULL, "Int Mic"},
+ {"MIC1", NULL, "MICBIAS"},
+};
+
+static const struct snd_kcontrol_new tegra_max98088_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Int Spk"),
+ SOC_DAPM_PIN_SWITCH("Earpiece"),
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+};
+
+static int tegra_max98088_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_max98088_platform_data *pdata = machine->pdata;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+#endif
+ int ret;
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ if (machine->codec_info[BASEBAND].i2s_id != -1)
+ i2s->is_dam_used = true;
+#endif
+
+ if (machine->init_done)
+ return 0;
+
+ machine->init_done = true;
+
+ machine->pcard = card;
+
+ if (gpio_is_valid(pdata->gpio_spkr_en)) {
+ ret = gpio_request(pdata->gpio_spkr_en, "spkr_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get spkr_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_SPKR_EN;
+
+ gpio_direction_output(pdata->gpio_spkr_en, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_hp_mute)) {
+ ret = gpio_request(pdata->gpio_hp_mute, "hp_mute");
+ if (ret) {
+ dev_err(card->dev, "cannot get hp_mute gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_HP_MUTE;
+
+ gpio_direction_output(pdata->gpio_hp_mute, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_int_mic_en)) {
+ ret = gpio_request(pdata->gpio_int_mic_en, "int_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get int_mic_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_INT_MIC_EN;
+
+ /* Disable int mic; enable signal is active-high */
+ gpio_direction_output(pdata->gpio_int_mic_en, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_ext_mic_en)) {
+ ret = gpio_request(pdata->gpio_ext_mic_en, "ext_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get ext_mic_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_EXT_MIC_EN;
+
+ /* Enable ext mic; enable signal is active-low */
+ gpio_direction_output(pdata->gpio_ext_mic_en, 0);
+ }
+
+ ret = snd_soc_add_controls(codec, tegra_max98088_controls,
+ ARRAY_SIZE(tegra_max98088_controls));
+ if (ret < 0)
+ return ret;
+
+ snd_soc_dapm_new_controls(dapm, tegra_max98088_dapm_widgets,
+ ARRAY_SIZE(tegra_max98088_dapm_widgets));
+
+ snd_soc_dapm_add_routes(dapm, enterprise_audio_map,
+ ARRAY_SIZE(enterprise_audio_map));
+
+ ret = snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET,
+ &tegra_max98088_hp_jack);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_SWITCH
+ snd_soc_jack_notifier_register(&tegra_max98088_hp_jack,
+ &headset_switch_nb);
+#else /*gpio based headset detection*/
+ snd_soc_jack_add_pins(&tegra_max98088_hp_jack,
+ ARRAY_SIZE(tegra_max98088_hp_jack_pins),
+ tegra_max98088_hp_jack_pins);
+#endif
+
+ max98088_headset_detect(codec, &tegra_max98088_hp_jack,
+ SND_JACK_HEADSET);
+
+ /* Add call mode switch control */
+ ret = snd_ctl_add(codec->card->snd_card,
+ snd_ctl_new1(&tegra_call_mode_control, machine));
+ if (ret < 0)
+ return ret;
+
+ snd_soc_dapm_nc_pin(dapm, "INA1");
+ snd_soc_dapm_nc_pin(dapm, "INA2");
+ snd_soc_dapm_nc_pin(dapm, "INB1");
+ snd_soc_dapm_nc_pin(dapm, "INB2");
+ snd_soc_dapm_sync(dapm);
+
+ return 0;
+}
+
+static struct snd_soc_dai_link tegra_max98088_dai[NUM_DAI_LINKS] = {
+ [DAI_LINK_HIFI] = {
+ .name = "MAX98088",
+ .stream_name = "MAX98088 HIFI",
+ .codec_name = "max98088.0-0010",
+ .platform_name = "tegra-pcm-audio",
+ .codec_dai_name = "HiFi",
+ .init = tegra_max98088_init,
+ .ops = &tegra_max98088_ops,
+ },
+ [DAI_LINK_SPDIF] = {
+ .name = "SPDIF",
+ .stream_name = "SPDIF PCM",
+ .codec_name = "spdif-dit.0",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "tegra30-spdif",
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_spdif_ops,
+ },
+ [DAI_LINK_BTSCO] = {
+ .name = "BT SCO",
+ .stream_name = "BT SCO PCM",
+ .codec_name = "spdif-dit.1",
+ .platform_name = "tegra-pcm-audio",
+ .codec_dai_name = "dit-hifi",
+ .init = tegra_max98088_init,
+ .ops = &tegra_bt_ops,
+ },
+ [DAI_LINK_VOICE_CALL] = {
+ .name = "VOICE CALL",
+ .stream_name = "VOICE CALL PCM",
+ .codec_name = "max98088.0-0010",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "dit-hifi",
+ .codec_dai_name = "HiFi",
+ .ops = &tegra_voice_call_ops,
+ },
+ [DAI_LINK_BT_VOICE_CALL] = {
+ .name = "BT VOICE CALL",
+ .stream_name = "BT VOICE CALL PCM",
+ .codec_name = "spdif-dit.2",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "dit-hifi",
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_bt_voice_call_ops,
+ },
+};
+
+static int tegra30_soc_set_bias_level(struct snd_soc_card *card,
+ enum snd_soc_bias_level level)
+{
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+
+ if (machine->bias_level == SND_SOC_BIAS_OFF &&
+ level != SND_SOC_BIAS_OFF)
+ tegra_asoc_utils_clk_enable(&machine->util_data);
+
+ return 0;
+}
+
+static int tegra30_soc_set_bias_level_post(struct snd_soc_card *card,
+ enum snd_soc_bias_level level)
+{
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+
+ if (machine->bias_level != SND_SOC_BIAS_OFF &&
+ level == SND_SOC_BIAS_OFF)
+ tegra_asoc_utils_clk_disable(&machine->util_data);
+
+ machine->bias_level = level;
+
+ return 0 ;
+}
+
+static struct snd_soc_card snd_soc_tegra_max98088 = {
+ .name = "tegra-max98088",
+ .dai_link = tegra_max98088_dai,
+ .num_links = ARRAY_SIZE(tegra_max98088_dai),
+ .set_bias_level = tegra30_soc_set_bias_level,
+ .set_bias_level_post = tegra30_soc_set_bias_level_post,
+};
+
+static __devinit int tegra_max98088_driver_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &snd_soc_tegra_max98088;
+ struct tegra_max98088 *machine;
+ struct tegra_max98088_platform_data *pdata;
+ int ret, i;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ machine = kzalloc(sizeof(struct tegra_max98088), GFP_KERNEL);
+ if (!machine) {
+ dev_err(&pdev->dev, "Can't allocate tegra_max98088 struct\n");
+ return -ENOMEM;
+ }
+
+ machine->pdata = pdata;
+
+ ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
+ if (ret)
+ goto err_free_machine;
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+
+#ifdef CONFIG_SWITCH
+ /* Add h2w switch class support */
+ ret = switch_dev_register(&wired_switch_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "not able to register switch device\n");
+ goto err_fini_utils;
+ }
+#endif
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ for (i = 0; i < NUM_I2S_DEVICES ; i++)
+ machine->codec_info[i].i2s_id = pdata->audio_port_id[i];
+
+ machine->codec_info[BASEBAND].rate = pdata->baseband_param.rate;
+ machine->codec_info[BASEBAND].channels = pdata->baseband_param.channels;
+
+ tegra_max98088_dai[DAI_LINK_HIFI].cpu_dai_name =
+ tegra_max98088_i2s_dai_name[machine->codec_info[HIFI_CODEC].i2s_id];
+
+ tegra_max98088_dai[DAI_LINK_BTSCO].cpu_dai_name =
+ tegra_max98088_i2s_dai_name[machine->codec_info[BT_SCO].i2s_id];
+#endif
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
+ goto err_switch_unregister;
+ }
+
+ return 0;
+
+err_switch_unregister:
+#ifdef CONFIG_SWITCH
+ switch_dev_unregister(&wired_switch_dev);
+#endif
+err_fini_utils:
+ tegra_asoc_utils_fini(&machine->util_data);
+err_free_machine:
+ kfree(machine);
+ return ret;
+}
+
+static int __devexit tegra_max98088_driver_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct tegra_max98088 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_max98088_platform_data *pdata = machine->pdata;
+
+ snd_soc_unregister_card(card);
+
+#ifdef CONFIG_SWITCH
+ switch_dev_unregister(&wired_switch_dev);
+#endif
+
+ tegra_asoc_utils_fini(&machine->util_data);
+
+ if (machine->gpio_requested & GPIO_EXT_MIC_EN)
+ gpio_free(pdata->gpio_ext_mic_en);
+ if (machine->gpio_requested & GPIO_INT_MIC_EN)
+ gpio_free(pdata->gpio_int_mic_en);
+ if (machine->gpio_requested & GPIO_HP_MUTE)
+ gpio_free(pdata->gpio_hp_mute);
+ if (machine->gpio_requested & GPIO_SPKR_EN)
+ gpio_free(pdata->gpio_spkr_en);
+
+ kfree(machine);
+
+ return 0;
+}
+
+static struct platform_driver tegra_max98088_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = tegra_max98088_driver_probe,
+ .remove = __devexit_p(tegra_max98088_driver_remove),
+};
+
+static int __init tegra_max98088_modinit(void)
+{
+ return platform_driver_register(&tegra_max98088_driver);
+}
+module_init(tegra_max98088_modinit);
+
+static void __exit tegra_max98088_modexit(void)
+{
+ platform_driver_unregister(&tegra_max98088_driver);
+}
+module_exit(tegra_max98088_modexit);
+
+MODULE_AUTHOR("Sumit Bhattacharya <sumitb@nvidia.com>");
+MODULE_DESCRIPTION("Tegra+MAX98088 machine ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index c7cfd96e991e..a27f65f68325 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -48,9 +48,9 @@ static const struct snd_pcm_hardware tegra_pcm_hardware = {
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_INTERLEAVED,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 2,
+ .channels_min = 1,
.channels_max = 2,
- .period_bytes_min = 1024,
+ .period_bytes_min = 128,
.period_bytes_max = PAGE_SIZE,
.periods_min = 2,
.periods_max = 8,
@@ -147,34 +147,53 @@ static int tegra_pcm_open(struct snd_pcm_substream *substream)
spin_lock_init(&prtd->lock);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
- setup_dma_tx_request(&prtd->dma_req[0], dmap);
- setup_dma_tx_request(&prtd->dma_req[1], dmap);
- } else {
- dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
- setup_dma_rx_request(&prtd->dma_req[0], dmap);
- setup_dma_rx_request(&prtd->dma_req[1], dmap);
- }
-
- prtd->dma_req[0].dev = prtd;
- prtd->dma_req[1].dev = prtd;
-
- prtd->dma_chan = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
- if (prtd->dma_chan == NULL) {
- ret = -ENOMEM;
- goto err;
+ dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+ if (dmap) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ setup_dma_tx_request(&prtd->dma_req[0], dmap);
+ setup_dma_tx_request(&prtd->dma_req[1], dmap);
+ } else {
+ setup_dma_rx_request(&prtd->dma_req[0], dmap);
+ setup_dma_rx_request(&prtd->dma_req[1], dmap);
+ }
+
+ prtd->dma_req[0].dev = prtd;
+ prtd->dma_req[1].dev = prtd;
+
+ prtd->dma_chan = tegra_dma_allocate_channel(
+ TEGRA_DMA_MODE_CONTINUOUS_SINGLE,
+ "pcm");
+ if (prtd->dma_chan == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
}
/* Set HW params now that initialization is complete */
snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware);
+ /* Ensure period size is multiple of 8 */
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 0x8);
+ if (ret < 0)
+ goto err;
+
/* Ensure that buffer size is a multiple of period size */
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
goto err;
+#ifdef CONFIG_HAS_WAKELOCK
+ snprintf(prtd->tegra_wake_lock_name, sizeof(prtd->tegra_wake_lock_name),
+ "tegra-pcm-%s-%d",
+ (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? "out" : "in",
+ substream->pcm->device);
+ wake_lock_init(&prtd->tegra_wake_lock, WAKE_LOCK_SUSPEND,
+ prtd->tegra_wake_lock_name);
+#endif
+
return 0;
err:
@@ -192,7 +211,12 @@ static int tegra_pcm_close(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct tegra_runtime_data *prtd = runtime->private_data;
- tegra_dma_free_channel(prtd->dma_chan);
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&prtd->tegra_wake_lock);
+#endif
+
+ if (prtd->dma_chan)
+ tegra_dma_free_channel(prtd->dma_chan);
kfree(prtd);
@@ -235,6 +259,9 @@ static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
/* Fall-through */
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&prtd->tegra_wake_lock);
+#endif
spin_lock_irqsave(&prtd->lock, flags);
prtd->running = 1;
spin_unlock_irqrestore(&prtd->lock, flags);
@@ -249,6 +276,10 @@ static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
spin_unlock_irqrestore(&prtd->lock, flags);
tegra_dma_dequeue_req(prtd->dma_chan, &prtd->dma_req[0]);
tegra_dma_dequeue_req(prtd->dma_chan, &prtd->dma_req[1]);
+
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&prtd->tegra_wake_lock);
+#endif
break;
default:
return -EINVAL;
@@ -261,10 +292,15 @@ static snd_pcm_uframes_t tegra_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct tegra_runtime_data *prtd = runtime->private_data;
+ int dma_transfer_count;
- return prtd->period_index * runtime->period_size;
-}
+ dma_transfer_count = tegra_dma_get_transfer_count(prtd->dma_chan,
+ &prtd->dma_req[prtd->dma_req_idx],
+ false);
+ return prtd->period_index * runtime->period_size +
+ bytes_to_frames(runtime, dma_transfer_count);
+}
static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
diff --git a/sound/soc/tegra/tegra_pcm.h b/sound/soc/tegra/tegra_pcm.h
index dbb90339fe0d..883c979268de 100644
--- a/sound/soc/tegra/tegra_pcm.h
+++ b/sound/soc/tegra/tegra_pcm.h
@@ -33,6 +33,10 @@
#include <mach/dma.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
struct tegra_pcm_dma_params {
unsigned long addr;
unsigned long wrap;
@@ -50,6 +54,10 @@ struct tegra_runtime_data {
int dma_req_idx;
struct tegra_dma_req dma_req[2];
struct tegra_dma_channel *dma_chan;
+#ifdef CONFIG_HAS_WAKELOCK
+ struct wake_lock tegra_wake_lock;
+ char tegra_wake_lock_name[32];
+#endif
};
#endif
diff --git a/sound/soc/tegra/tegra_spdif.c b/sound/soc/tegra/tegra_spdif.c
deleted file mode 100644
index abe606b0a29e..000000000000
--- a/sound/soc/tegra/tegra_spdif.c
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * tegra_spdif.c - Tegra SPDIF driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2011 - NVIDIA, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <mach/iomap.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "tegra_spdif.h"
-
-#define DRV_NAME "tegra-spdif"
-
-static inline void tegra_spdif_write(struct tegra_spdif *spdif, u32 reg,
- u32 val)
-{
- __raw_writel(val, spdif->regs + reg);
-}
-
-static inline u32 tegra_spdif_read(struct tegra_spdif *spdif, u32 reg)
-{
- return __raw_readl(spdif->regs + reg);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_spdif_show(struct seq_file *s, void *unused)
-{
-#define REG(r) { r, #r }
- static const struct {
- int offset;
- const char *name;
- } regs[] = {
- REG(TEGRA_SPDIF_CTRL),
- REG(TEGRA_SPDIF_STATUS),
- REG(TEGRA_SPDIF_STROBE_CTRL),
- REG(TEGRA_SPDIF_DATA_FIFO_CSR),
- REG(TEGRA_SPDIF_CH_STA_RX_A),
- REG(TEGRA_SPDIF_CH_STA_RX_B),
- REG(TEGRA_SPDIF_CH_STA_RX_C),
- REG(TEGRA_SPDIF_CH_STA_RX_D),
- REG(TEGRA_SPDIF_CH_STA_RX_E),
- REG(TEGRA_SPDIF_CH_STA_RX_F),
- REG(TEGRA_SPDIF_CH_STA_TX_A),
- REG(TEGRA_SPDIF_CH_STA_TX_B),
- REG(TEGRA_SPDIF_CH_STA_TX_C),
- REG(TEGRA_SPDIF_CH_STA_TX_D),
- REG(TEGRA_SPDIF_CH_STA_TX_E),
- REG(TEGRA_SPDIF_CH_STA_TX_F),
- };
-#undef REG
-
- struct tegra_spdif *spdif = s->private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(regs); i++) {
- u32 val = tegra_spdif_read(spdif, regs[i].offset);
- seq_printf(s, "%s = %08x\n", regs[i].name, val);
- }
-
- return 0;
-}
-
-static int tegra_spdif_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, tegra_spdif_show, inode->i_private);
-}
-
-static const struct file_operations tegra_spdif_debug_fops = {
- .open = tegra_spdif_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void tegra_spdif_debug_add(struct tegra_spdif *spdif)
-{
- spdif->debug = debugfs_create_file(DRV_NAME, S_IRUGO,
- snd_soc_debugfs_root, spdif,
- &tegra_spdif_debug_fops);
-}
-
-static void tegra_spdif_debug_remove(struct tegra_spdif *spdif)
-{
- if (spdif->debug)
- debugfs_remove(spdif->debug);
-}
-#else
-static inline void tegra_spdif_debug_add(struct tegra_spdif *spdif)
-{
-}
-
-static inline void tegra_spdif_debug_remove(struct tegra_spdif *spdif)
-{
-}
-#endif
-
-static int tegra_spdif_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct device *dev = substream->pcm->card->dev;
- struct tegra_spdif *spdif = snd_soc_dai_get_drvdata(dai);
- int ret, srate, spdifclock;
-
- spdif->reg_ctrl &= ~TEGRA_SPDIF_CTRL_PACK;
- spdif->reg_ctrl &= ~TEGRA_SPDIF_CTRL_BIT_MODE_MASK;
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- spdif->reg_ctrl |= TEGRA_SPDIF_CTRL_PACK;
- spdif->reg_ctrl |= TEGRA_SPDIF_CTRL_BIT_MODE_16BIT;
- break;
- default:
- return -EINVAL;
- }
-
- srate = params_rate(params);
- switch (params_rate(params)) {
- case 32000:
- spdifclock = 4096000;
- break;
- case 44100:
- spdifclock = 5644800;
- break;
- case 48000:
- spdifclock = 6144000;
- break;
- case 88200:
- spdifclock = 11289600;
- break;
- case 96000:
- spdifclock = 12288000;
- break;
- case 176400:
- spdifclock = 22579200;
- break;
- case 192000:
- spdifclock = 24576000;
- break;
- default:
- return -EINVAL;
- }
-
- ret = clk_set_rate(spdif->clk_spdif_out, spdifclock);
- if (ret) {
- dev_err(dev, "Can't set SPDIF clock rate: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void tegra_spdif_start_playback(struct tegra_spdif *spdif)
-{
- spdif->reg_ctrl |= TEGRA_SPDIF_CTRL_TX_EN;
- tegra_spdif_write(spdif, TEGRA_SPDIF_CTRL, spdif->reg_ctrl);
-}
-
-static void tegra_spdif_stop_playback(struct tegra_spdif *spdif)
-{
- spdif->reg_ctrl &= ~TEGRA_SPDIF_CTRL_TX_EN;
- tegra_spdif_write(spdif, TEGRA_SPDIF_CTRL, spdif->reg_ctrl);
-}
-
-static int tegra_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct tegra_spdif *spdif = snd_soc_dai_get_drvdata(dai);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- case SNDRV_PCM_TRIGGER_RESUME:
- if (!spdif->clk_refs)
- clk_enable(spdif->clk_spdif_out);
- spdif->clk_refs++;
- tegra_spdif_start_playback(spdif);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- tegra_spdif_stop_playback(spdif);
- spdif->clk_refs--;
- if (!spdif->clk_refs)
- clk_disable(spdif->clk_spdif_out);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tegra_spdif_probe(struct snd_soc_dai *dai)
-{
- struct tegra_spdif *spdif = snd_soc_dai_get_drvdata(dai);
-
- dai->capture_dma_data = NULL;
- dai->playback_dma_data = &spdif->playback_dma_data;
-
- return 0;
-}
-
-static struct snd_soc_dai_ops tegra_spdif_dai_ops = {
- .hw_params = tegra_spdif_hw_params,
- .trigger = tegra_spdif_trigger,
-};
-
-struct snd_soc_dai_driver tegra_spdif_dai = {
- .name = DRV_NAME,
- .probe = tegra_spdif_probe,
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .ops = &tegra_spdif_dai_ops,
-};
-
-static __devinit int tegra_spdif_platform_probe(struct platform_device *pdev)
-{
- struct tegra_spdif *spdif;
- struct resource *mem, *memregion, *dmareq;
- int ret;
-
- spdif = kzalloc(sizeof(struct tegra_spdif), GFP_KERNEL);
- if (!spdif) {
- dev_err(&pdev->dev, "Can't allocate tegra_spdif\n");
- ret = -ENOMEM;
- goto exit;
- }
- dev_set_drvdata(&pdev->dev, spdif);
-
- spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out");
- if (IS_ERR(spdif->clk_spdif_out)) {
- pr_err("Can't retrieve spdif clock\n");
- ret = PTR_ERR(spdif->clk_spdif_out);
- goto err_free;
- }
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "No memory resource\n");
- ret = -ENODEV;
- goto err_clk_put;
- }
-
- dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dmareq) {
- dev_err(&pdev->dev, "No DMA resource\n");
- ret = -ENODEV;
- goto err_clk_put;
- }
-
- memregion = request_mem_region(mem->start, resource_size(mem),
- DRV_NAME);
- if (!memregion) {
- dev_err(&pdev->dev, "Memory region already claimed\n");
- ret = -EBUSY;
- goto err_clk_put;
- }
-
- spdif->regs = ioremap(mem->start, resource_size(mem));
- if (!spdif->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err_release;
- }
-
- spdif->playback_dma_data.addr = mem->start + TEGRA_SPDIF_DATA_OUT;
- spdif->playback_dma_data.wrap = 4;
- spdif->playback_dma_data.width = 32;
- spdif->playback_dma_data.req_sel = dmareq->start;
-
- ret = snd_soc_register_dai(&pdev->dev, &tegra_spdif_dai);
- if (ret) {
- dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
- ret = -ENOMEM;
- goto err_unmap;
- }
-
- tegra_spdif_debug_add(spdif);
-
- return 0;
-
-err_unmap:
- iounmap(spdif->regs);
-err_release:
- release_mem_region(mem->start, resource_size(mem));
-err_clk_put:
- clk_put(spdif->clk_spdif_out);
-err_free:
- kfree(spdif);
-exit:
- return ret;
-}
-
-static int __devexit tegra_spdif_platform_remove(struct platform_device *pdev)
-{
- struct tegra_spdif *spdif = dev_get_drvdata(&pdev->dev);
- struct resource *res;
-
- snd_soc_unregister_dai(&pdev->dev);
-
- tegra_spdif_debug_remove(spdif);
-
- iounmap(spdif->regs);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- clk_put(spdif->clk_spdif_out);
-
- kfree(spdif);
-
- return 0;
-}
-
-static struct platform_driver tegra_spdif_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .probe = tegra_spdif_platform_probe,
- .remove = __devexit_p(tegra_spdif_platform_remove),
-};
-
-static int __init snd_tegra_spdif_init(void)
-{
- return platform_driver_register(&tegra_spdif_driver);
-}
-module_init(snd_tegra_spdif_init);
-
-static void __exit snd_tegra_spdif_exit(void)
-{
- platform_driver_unregister(&tegra_spdif_driver);
-}
-module_exit(snd_tegra_spdif_exit);
-
-MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
-MODULE_DESCRIPTION("Tegra SPDIF ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_spdif.h b/sound/soc/tegra/tegra_spdif.h
deleted file mode 100644
index 2e03db430279..000000000000
--- a/sound/soc/tegra/tegra_spdif.h
+++ /dev/null
@@ -1,473 +0,0 @@
-/*
- * tegra_spdif.h - Definitions for Tegra SPDIF driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2011 - NVIDIA, Inc.
- *
- * Based on code copyright/by:
- * Copyright (c) 2008-2009, NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __TEGRA_SPDIF_H__
-#define __TEGRA_SPDIF_H__
-
-#include "tegra_pcm.h"
-
-/* Offsets from TEGRA_SPDIF_BASE */
-
-#define TEGRA_SPDIF_CTRL 0x0
-#define TEGRA_SPDIF_STATUS 0x4
-#define TEGRA_SPDIF_STROBE_CTRL 0x8
-#define TEGRA_SPDIF_DATA_FIFO_CSR 0x0C
-#define TEGRA_SPDIF_DATA_OUT 0x40
-#define TEGRA_SPDIF_DATA_IN 0x80
-#define TEGRA_SPDIF_CH_STA_RX_A 0x100
-#define TEGRA_SPDIF_CH_STA_RX_B 0x104
-#define TEGRA_SPDIF_CH_STA_RX_C 0x108
-#define TEGRA_SPDIF_CH_STA_RX_D 0x10C
-#define TEGRA_SPDIF_CH_STA_RX_E 0x110
-#define TEGRA_SPDIF_CH_STA_RX_F 0x114
-#define TEGRA_SPDIF_CH_STA_TX_A 0x140
-#define TEGRA_SPDIF_CH_STA_TX_B 0x144
-#define TEGRA_SPDIF_CH_STA_TX_C 0x148
-#define TEGRA_SPDIF_CH_STA_TX_D 0x14C
-#define TEGRA_SPDIF_CH_STA_TX_E 0x150
-#define TEGRA_SPDIF_CH_STA_TX_F 0x154
-#define TEGRA_SPDIF_USR_STA_RX_A 0x180
-#define TEGRA_SPDIF_USR_DAT_TX_A 0x1C0
-
-/* Fields in TEGRA_SPDIF_CTRL */
-
-/* Start capturing from 0=right, 1=left channel */
-#define TEGRA_SPDIF_CTRL_CAP_LC (1 << 30)
-
-/* SPDIF receiver(RX) enable */
-#define TEGRA_SPDIF_CTRL_RX_EN (1 << 29)
-
-/* SPDIF Transmitter(TX) enable */
-#define TEGRA_SPDIF_CTRL_TX_EN (1 << 28)
-
-/* Transmit Channel status */
-#define TEGRA_SPDIF_CTRL_TC_EN (1 << 27)
-
-/* Transmit user Data */
-#define TEGRA_SPDIF_CTRL_TU_EN (1 << 26)
-
-/* Interrupt on transmit error */
-#define TEGRA_SPDIF_CTRL_IE_TXE (1 << 25)
-
-/* Interrupt on receive error */
-#define TEGRA_SPDIF_CTRL_IE_RXE (1 << 24)
-
-/* Interrupt on invalid preamble */
-#define TEGRA_SPDIF_CTRL_IE_P (1 << 23)
-
-/* Interrupt on "B" preamble */
-#define TEGRA_SPDIF_CTRL_IE_B (1 << 22)
-
-/* Interrupt when block of channel status received */
-#define TEGRA_SPDIF_CTRL_IE_C (1 << 21)
-
-/* Interrupt when a valid information unit (IU) is received */
-#define TEGRA_SPDIF_CTRL_IE_U (1 << 20)
-
-/* Interrupt when RX user FIFO attention level is reached */
-#define TEGRA_SPDIF_CTRL_QE_RU (1 << 19)
-
-/* Interrupt when TX user FIFO attention level is reached */
-#define TEGRA_SPDIF_CTRL_QE_TU (1 << 18)
-
-/* Interrupt when RX data FIFO attention level is reached */
-#define TEGRA_SPDIF_CTRL_QE_RX (1 << 17)
-
-/* Interrupt when TX data FIFO attention level is reached */
-#define TEGRA_SPDIF_CTRL_QE_TX (1 << 16)
-
-/* Loopback test mode enable */
-#define TEGRA_SPDIF_CTRL_LBK_EN (1 << 15)
-
-/*
- * Pack data mode:
- * 0 = Single data (16 bit needs to be padded to match the
- * interface data bit size).
- * 1 = Packeted left/right channel data into a single word.
- */
-#define TEGRA_SPDIF_CTRL_PACK (1 << 14)
-
-/*
- * 00 = 16bit data
- * 01 = 20bit data
- * 10 = 24bit data
- * 11 = raw data
- */
-#define TEGRA_SPDIF_BIT_MODE_16BIT 0
-#define TEGRA_SPDIF_BIT_MODE_20BIT 1
-#define TEGRA_SPDIF_BIT_MODE_24BIT 2
-#define TEGRA_SPDIF_BIT_MODE_RAW 3
-
-#define TEGRA_SPDIF_CTRL_BIT_MODE_SHIFT 12
-#define TEGRA_SPDIF_CTRL_BIT_MODE_MASK (3 << TEGRA_SPDIF_CTRL_BIT_MODE_SHIFT)
-#define TEGRA_SPDIF_CTRL_BIT_MODE_16BIT (TEGRA_SPDIF_BIT_MODE_16BIT << TEGRA_SPDIF_CTRL_BIT_MODE_SHIFT)
-#define TEGRA_SPDIF_CTRL_BIT_MODE_20BIT (TEGRA_SPDIF_BIT_MODE_20BIT << TEGRA_SPDIF_CTRL_BIT_MODE_SHIFT)
-#define TEGRA_SPDIF_CTRL_BIT_MODE_24BIT (TEGRA_SPDIF_BIT_MODE_24BIT << TEGRA_SPDIF_CTRL_BIT_MODE_SHIFT)
-#define TEGRA_SPDIF_CTRL_BIT_MODE_RAW (TEGRA_SPDIF_BIT_MODE_RAW << TEGRA_SPDIF_CTRL_BIT_MODE_SHIFT)
-
-/* Fields in TEGRA_SPDIF_STATUS */
-
-/*
- * Note: IS_P, IS_B, IS_C, and IS_U are sticky bits. Software must
- * write a 1 to the corresponding bit location to clear the status.
- */
-
-/*
- * Receiver(RX) shifter is busy receiving data.
- * This bit is asserted when the receiver first locked onto the
- * preamble of the data stream after RX_EN is asserted. This bit is
- * deasserted when either,
- * (a) the end of a frame is reached after RX_EN is deeasserted, or
- * (b) the SPDIF data stream becomes inactive.
- */
-#define TEGRA_SPDIF_STATUS_RX_BSY (1 << 29)
-
-/*
- * Transmitter(TX) shifter is busy transmitting data.
- * This bit is asserted when TX_EN is asserted.
- * This bit is deasserted when the end of a frame is reached after
- * TX_EN is deasserted.
- */
-#define TEGRA_SPDIF_STATUS_TX_BSY (1 << 28)
-
-/*
- * TX is busy shifting out channel status.
- * This bit is asserted when both TX_EN and TC_EN are asserted and
- * data from CH_STA_TX_A register is loaded into the internal shifter.
- * This bit is deasserted when either,
- * (a) the end of a frame is reached after TX_EN is deasserted, or
- * (b) CH_STA_TX_F register is loaded into the internal shifter.
- */
-#define TEGRA_SPDIF_STATUS_TC_BSY (1 << 27)
-
-/*
- * TX User data FIFO busy.
- * This bit is asserted when TX_EN and TXU_EN are asserted and
- * there's data in the TX user FIFO. This bit is deassert when either,
- * (a) the end of a frame is reached after TX_EN is deasserted, or
- * (b) there's no data left in the TX user FIFO.
- */
-#define TEGRA_SPDIF_STATUS_TU_BSY (1 << 26)
-
-/* TX FIFO Underrun error status */
-#define TEGRA_SPDIF_STATUS_TX_ERR (1 << 25)
-
-/* RX FIFO Overrun error status */
-#define TEGRA_SPDIF_STATUS_RX_ERR (1 << 24)
-
-/* Preamble status: 0=Preamble OK, 1=bad/missing preamble */
-#define TEGRA_SPDIF_STATUS_IS_P (1 << 23)
-
-/* B-preamble detection status: 0=not detected, 1=B-preamble detected */
-#define TEGRA_SPDIF_STATUS_IS_B (1 << 22)
-
-/*
- * RX channel block data receive status:
- * 0=entire block not recieved yet.
- * 1=received entire block of channel status,
- */
-#define TEGRA_SPDIF_STATUS_IS_C (1 << 21)
-
-/* RX User Data Valid flag: 1=valid IU detected, 0 = no IU detected. */
-#define TEGRA_SPDIF_STATUS_IS_U (1 << 20)
-
-/*
- * RX User FIFO Status:
- * 1=attention level reached, 0=attention level not reached.
- */
-#define TEGRA_SPDIF_STATUS_QS_RU (1 << 19)
-
-/*
- * TX User FIFO Status:
- * 1=attention level reached, 0=attention level not reached.
- */
-#define TEGRA_SPDIF_STATUS_QS_TU (1 << 18)
-
-/*
- * RX Data FIFO Status:
- * 1=attention level reached, 0=attention level not reached.
- */
-#define TEGRA_SPDIF_STATUS_QS_RX (1 << 17)
-
-/*
- * TX Data FIFO Status:
- * 1=attention level reached, 0=attention level not reached.
- */
-#define TEGRA_SPDIF_STATUS_QS_TX (1 << 16)
-
-/* Fields in TEGRA_SPDIF_STROBE_CTRL */
-
-/*
- * Indicates the approximate number of detected SPDIFIN clocks within a
- * bi-phase period.
- */
-#define TEGRA_SPDIF_STROBE_CTRL_PERIOD_SHIFT 16
-#define TEGRA_SPDIF_STROBE_CTRL_PERIOD_MASK (0xff << TEGRA_SPDIF_STROBE_CTRL_PERIOD_SHIFT)
-
-/* Data strobe mode: 0=Auto-locked 1=Manual locked */
-#define TEGRA_SPDIF_STROBE_CTRL_STROBE (1 << 15)
-
-/*
- * Manual data strobe time within the bi-phase clock period (in terms of
- * the number of over-sampling clocks).
- */
-#define TEGRA_SPDIF_STROBE_CTRL_DATA_STROBES_SHIFT 8
-#define TEGRA_SPDIF_STROBE_CTRL_DATA_STROBES_MASK (0x1f << TEGRA_SPDIF_STROBE_CTRL_DATA_STROBES_SHIFT)
-
-/*
- * Manual SPDIFIN bi-phase clock period (in terms of the number of
- * over-sampling clocks).
- */
-#define TEGRA_SPDIF_STROBE_CTRL_CLOCK_PERIOD_SHIFT 0
-#define TEGRA_SPDIF_STROBE_CTRL_CLOCK_PERIOD_MASK (0x3f << TEGRA_SPDIF_STROBE_CTRL_CLOCK_PERIOD_SHIFT)
-
-/* Fields in SPDIF_DATA_FIFO_CSR */
-
-/* Clear Receiver User FIFO (RX USR.FIFO) */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_CLR (1 << 31)
-
-#define TEGRA_SPDIF_FIFO_ATN_LVL_U_ONE_SLOT 0
-#define TEGRA_SPDIF_FIFO_ATN_LVL_U_TWO_SLOTS 1
-#define TEGRA_SPDIF_FIFO_ATN_LVL_U_THREE_SLOTS 2
-#define TEGRA_SPDIF_FIFO_ATN_LVL_U_FOUR_SLOTS 3
-
-/* RU FIFO attention level */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT 29
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_MASK \
- (0x3 << TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_RU1_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_U_ONE_SLOT << TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_RU2_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_U_TWO_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_RU3_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_U_THREE_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_RU4_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_U_FOUR_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_RU_ATN_LVL_SHIFT)
-
-/* Number of RX USR.FIFO levels with valid data. */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_FULL_COUNT_SHIFT 24
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RU_FULL_COUNT_MASK (0x1f << TEGRA_SPDIF_DATA_FIFO_CSR_RU_FULL_COUNT_SHIFT)
-
-/* Clear Transmitter User FIFO (TX USR.FIFO) */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_CLR (1 << 23)
-
-/* TU FIFO attention level */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT 21
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_MASK \
- (0x3 << TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_TU1_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_U_ONE_SLOT << TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_TU2_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_U_TWO_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_TU3_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_U_THREE_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_TU4_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_U_FOUR_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_TU_ATN_LVL_SHIFT)
-
-/* Number of TX USR.FIFO levels that could be filled. */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_EMPTY_COUNT_SHIFT 16
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TU_EMPTY_COUNT_MASK (0x1f << SPDIF_DATA_FIFO_CSR_TU_EMPTY_COUNT_SHIFT)
-
-/* Clear Receiver Data FIFO (RX DATA.FIFO) */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_CLR (1 << 15)
-
-#define TEGRA_SPDIF_FIFO_ATN_LVL_D_ONE_SLOT 0
-#define TEGRA_SPDIF_FIFO_ATN_LVL_D_FOUR_SLOTS 1
-#define TEGRA_SPDIF_FIFO_ATN_LVL_D_EIGHT_SLOTS 2
-#define TEGRA_SPDIF_FIFO_ATN_LVL_D_TWELVE_SLOTS 3
-
-/* RU FIFO attention level */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT 13
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_MASK \
- (0x3 << TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_RU1_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_D_ONE_SLOT << TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_RU4_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_D_FOUR_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_RU8_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_D_EIGHT_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_RU12_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_D_TWELVE_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_RX_ATN_LVL_SHIFT)
-
-/* Number of RX DATA.FIFO levels with valid data. */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_FULL_COUNT_SHIFT 8
-#define TEGRA_SPDIF_DATA_FIFO_CSR_RX_FULL_COUNT_MASK (0x1f << TEGRA_SPDIF_DATA_FIFO_CSR_RX_FULL_COUNT_SHIFT)
-
-/* Clear Transmitter Data FIFO (TX DATA.FIFO) */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_CLR (1 << 7)
-
-/* TU FIFO attention level */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT 5
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_MASK \
- (0x3 << TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU1_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_D_ONE_SLOT << TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU4_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_D_FOUR_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU8_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_D_EIGHT_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU12_WORD_FULL \
- (TEGRA_SPDIF_FIFO_ATN_LVL_D_TWELVE_SLOTS << TEGRA_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_SHIFT)
-
-/* Number of TX DATA.FIFO levels that could be filled. */
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_EMPTY_COUNT_SHIFT 0
-#define TEGRA_SPDIF_DATA_FIFO_CSR_TX_EMPTY_COUNT_MASK (0x1f << SPDIF_DATA_FIFO_CSR_TX_EMPTY_COUNT_SHIFT)
-
-/* Fields in TEGRA_SPDIF_DATA_OUT */
-
-/*
- * This register has 5 different formats:
- * 16-bit (BIT_MODE=00, PACK=0)
- * 20-bit (BIT_MODE=01, PACK=0)
- * 24-bit (BIT_MODE=10, PACK=0)
- * raw (BIT_MODE=11, PACK=0)
- * 16-bit packed (BIT_MODE=00, PACK=1)
- */
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_16_SHIFT 0
-#define TEGRA_SPDIF_DATA_OUT_DATA_16_MASK (0xffff << TEGRA_SPDIF_DATA_OUT_DATA_16_SHIFT)
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_20_SHIFT 0
-#define TEGRA_SPDIF_DATA_OUT_DATA_20_MASK (0xfffff << TEGRA_SPDIF_DATA_OUT_DATA_20_SHIFT)
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_24_SHIFT 0
-#define TEGRA_SPDIF_DATA_OUT_DATA_24_MASK (0xffffff << TEGRA_SPDIF_DATA_OUT_DATA_24_SHIFT)
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_P (1 << 31)
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_C (1 << 30)
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_U (1 << 29)
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_V (1 << 28)
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_DATA_SHIFT 8
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_DATA_MASK (0xfffff << TEGRA_SPDIF_DATA_OUT_DATA_RAW_DATA_SHIFT)
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_AUX_SHIFT 4
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_AUX_MASK (0xf << TEGRA_SPDIF_DATA_OUT_DATA_RAW_AUX_SHIFT)
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_PREAMBLE_SHIFT 0
-#define TEGRA_SPDIF_DATA_OUT_DATA_RAW_PREAMBLE_MASK (0xf << TEGRA_SPDIF_DATA_OUT_DATA_RAW_PREAMBLE_SHIFT)
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_16_PACKED_RIGHT_SHIFT 16
-#define TEGRA_SPDIF_DATA_OUT_DATA_16_PACKED_RIGHT_MASK (0xffff << TEGRA_SPDIF_DATA_OUT_DATA_16_PACKED_RIGHT_SHIFT)
-
-#define TEGRA_SPDIF_DATA_OUT_DATA_16_PACKED_LEFT_SHIFT 0
-#define TEGRA_SPDIF_DATA_OUT_DATA_16_PACKED_LEFT_MASK (0xffff << TEGRA_SPDIF_DATA_OUT_DATA_16_PACKED_LEFT_SHIFT)
-
-/* Fields in TEGRA_SPDIF_DATA_IN */
-
-/*
- * This register has 5 different formats:
- * 16-bit (BIT_MODE=00, PACK=0)
- * 20-bit (BIT_MODE=01, PACK=0)
- * 24-bit (BIT_MODE=10, PACK=0)
- * raw (BIT_MODE=11, PACK=0)
- * 16-bit packed (BIT_MODE=00, PACK=1)
- *
- * Bits 31:24 are common to all modes except 16-bit packed
- */
-
-#define TEGRA_SPDIF_DATA_IN_DATA_P (1 << 31)
-#define TEGRA_SPDIF_DATA_IN_DATA_C (1 << 30)
-#define TEGRA_SPDIF_DATA_IN_DATA_U (1 << 29)
-#define TEGRA_SPDIF_DATA_IN_DATA_V (1 << 28)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_PREAMBLE_SHIFT 24
-#define TEGRA_SPDIF_DATA_IN_DATA_PREAMBLE_MASK (0xf << TEGRA_SPDIF_DATA_IN_DATA_PREAMBLE_SHIFT)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_16_SHIFT 0
-#define TEGRA_SPDIF_DATA_IN_DATA_16_MASK (0xffff << TEGRA_SPDIF_DATA_IN_DATA_16_SHIFT)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_20_SHIFT 0
-#define TEGRA_SPDIF_DATA_IN_DATA_20_MASK (0xfffff << TEGRA_SPDIF_DATA_IN_DATA_20_SHIFT)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_24_SHIFT 0
-#define TEGRA_SPDIF_DATA_IN_DATA_24_MASK (0xffffff << TEGRA_SPDIF_DATA_IN_DATA_24_SHIFT)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_RAW_DATA_SHIFT 8
-#define TEGRA_SPDIF_DATA_IN_DATA_RAW_DATA_MASK (0xfffff << TEGRA_SPDIF_DATA_IN_DATA_RAW_DATA_SHIFT)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_RAW_AUX_SHIFT 4
-#define TEGRA_SPDIF_DATA_IN_DATA_RAW_AUX_MASK (0xf << TEGRA_SPDIF_DATA_IN_DATA_RAW_AUX_SHIFT)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_RAW_PREAMBLE_SHIFT 0
-#define TEGRA_SPDIF_DATA_IN_DATA_RAW_PREAMBLE_MASK (0xf << TEGRA_SPDIF_DATA_IN_DATA_RAW_PREAMBLE_SHIFT)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_16_PACKED_RIGHT_SHIFT 16
-#define TEGRA_SPDIF_DATA_IN_DATA_16_PACKED_RIGHT_MASK (0xffff << TEGRA_SPDIF_DATA_IN_DATA_16_PACKED_RIGHT_SHIFT)
-
-#define TEGRA_SPDIF_DATA_IN_DATA_16_PACKED_LEFT_SHIFT 0
-#define TEGRA_SPDIF_DATA_IN_DATA_16_PACKED_LEFT_MASK (0xffff << TEGRA_SPDIF_DATA_IN_DATA_16_PACKED_LEFT_SHIFT)
-
-/* Fields in TEGRA_SPDIF_CH_STA_RX_A */
-/* Fields in TEGRA_SPDIF_CH_STA_RX_B */
-/* Fields in TEGRA_SPDIF_CH_STA_RX_C */
-/* Fields in TEGRA_SPDIF_CH_STA_RX_D */
-/* Fields in TEGRA_SPDIF_CH_STA_RX_E */
-/* Fields in TEGRA_SPDIF_CH_STA_RX_F */
-
-/*
- * The 6-word receive channel data page buffer holds a block (192 frames) of
- * channel status information. The order of receive is from LSB to MSB
- * bit, and from CH_STA_RX_A to CH_STA_RX_F then back to CH_STA_RX_A.
- */
-
-/* Fields in TEGRA_SPDIF_CH_STA_TX_A */
-/* Fields in TEGRA_SPDIF_CH_STA_TX_B */
-/* Fields in TEGRA_SPDIF_CH_STA_TX_C */
-/* Fields in TEGRA_SPDIF_CH_STA_TX_D */
-/* Fields in TEGRA_SPDIF_CH_STA_TX_E */
-/* Fields in TEGRA_SPDIF_CH_STA_TX_F */
-
-/*
- * The 6-word transmit channel data page buffer holds a block (192 frames) of
- * channel status information. The order of transmission is from LSB to MSB
- * bit, and from CH_STA_TX_A to CH_STA_TX_F then back to CH_STA_TX_A.
- */
-
-/* Fields in TEGRA_SPDIF_USR_STA_RX_A */
-
-/*
- * This 4-word deep FIFO receives user FIFO field information. The order of
- * receive is from LSB to MSB bit.
- */
-
-/* Fields in TEGRA_SPDIF_USR_DAT_TX_A */
-
-/*
- * This 4-word deep FIFO transmits user FIFO field information. The order of
- * transmission is from LSB to MSB bit.
- */
-
-struct tegra_spdif {
- struct clk *clk_spdif_out;
- int clk_refs;
- struct tegra_pcm_dma_params capture_dma_data;
- struct tegra_pcm_dma_params playback_dma_data;
- void __iomem *regs;
- struct dentry *debug;
- u32 reg_ctrl;
-};
-
-#endif
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
new file mode 100644
index 000000000000..0b2a431d500e
--- /dev/null
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -0,0 +1,685 @@
+/*
+ * tegra_wm8753.c - Tegra machine ASoC driver for boards using WM8753 codec.
+ *
+ * Author: Sumit Bhattacharya <sumitb@nvidia.com>
+ * Copyright (C) 2010-2011 - NVIDIA, Inc.
+ *
+ * Based on code copyright/by:
+ *
+ * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory
+ * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <asm/mach-types.h>
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
+
+#include <mach/tegra_wm8753_pdata.h>
+
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../codecs/wm8753.h"
+
+#include "tegra_pcm.h"
+#include "tegra_asoc_utils.h"
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#include "tegra20_das.h"
+#endif
+
+#define DRV_NAME "tegra-snd-wm8753"
+
+#define GPIO_SPKR_EN BIT(0)
+#define GPIO_HP_MUTE BIT(1)
+#define GPIO_INT_MIC_EN BIT(2)
+#define GPIO_EXT_MIC_EN BIT(3)
+
+struct tegra_wm8753 {
+ struct tegra_asoc_utils_data util_data;
+ struct tegra_wm8753_platform_data *pdata;
+ struct regulator *audio_reg;
+ int gpio_requested;
+};
+
+static int tegra_wm8753_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, i2s_daifmt;
+ int err;
+ srate = params_rate(params);
+ switch (srate) {
+ case 8000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ default:
+ mclk = 12000000;
+ break;
+ }
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ i2s_daifmt = SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS;
+
+ /* Use DSP mode for mono on Tegra20 */
+ if ((params_channels(params) != 2) && machine_is_whistler())
+ i2s_daifmt |= SND_SOC_DAIFMT_DSP_A;
+ else
+ i2s_daifmt |= SND_SOC_DAIFMT_I2S;
+
+ err = snd_soc_dai_set_fmt(codec_dai, i2s_daifmt);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_fmt(cpu_dai, i2s_daifmt);
+ if (err < 0) {
+ dev_err(card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ err = tegra20_das_connect_dac_to_dap(TEGRA20_DAS_DAP_SEL_DAC1,
+ TEGRA20_DAS_DAP_ID_1);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dap-dac path\n");
+ return err;
+ }
+
+ err = tegra20_das_connect_dap_to_dac(TEGRA20_DAS_DAP_ID_1,
+ TEGRA20_DAS_DAP_SEL_DAC1);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dac-dap path\n");
+ return err;
+ }
+#endif
+ return 0;
+}
+
+static int tegra_bt_sco_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, min_mclk;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ min_mclk = 64 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ err = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ err = tegra20_das_connect_dac_to_dap(TEGRA20_DAS_DAP_SEL_DAC2,
+ TEGRA20_DAS_DAP_ID_4);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dac-dap path\n");
+ return err;
+ }
+
+ err = tegra20_das_connect_dap_to_dac(TEGRA20_DAS_DAP_ID_4,
+ TEGRA20_DAS_DAP_SEL_DAC2);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dac-dap path\n");
+ return err;
+ }
+#endif
+ return 0;
+}
+
+static int tegra_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, min_mclk;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ min_mclk = 128 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ return 0;
+}
+
+static int tegra_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(rtd->card);
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 0);
+
+ return 0;
+}
+
+static struct snd_soc_ops tegra_wm8753_ops = {
+ .hw_params = tegra_wm8753_hw_params,
+ .hw_free = tegra_hw_free,
+};
+
+static struct snd_soc_ops tegra_bt_sco_ops = {
+ .hw_params = tegra_bt_sco_hw_params,
+ .hw_free = tegra_hw_free,
+};
+
+static struct snd_soc_ops tegra_spdif_ops = {
+ .hw_params = tegra_spdif_hw_params,
+ .hw_free = tegra_hw_free,
+};
+
+static struct snd_soc_jack tegra_wm8753_hp_jack;
+
+#ifdef CONFIG_SWITCH
+static struct switch_dev wired_switch_dev = {
+ .name = "h2w",
+};
+
+/* These values are copied from WiredAccessoryObserver */
+enum headset_state {
+ BIT_NO_HEADSET = 0,
+ BIT_HEADSET = (1 << 0),
+ BIT_HEADSET_NO_MIC = (1 << 1),
+};
+
+static int headset_switch_notify(struct notifier_block *self,
+ unsigned long action, void *dev)
+{
+ switch (action) {
+ case SND_JACK_HEADPHONE:
+ switch_set_state(&wired_switch_dev, BIT_HEADSET_NO_MIC);
+ break;
+ case SND_JACK_HEADSET:
+ switch_set_state(&wired_switch_dev, BIT_HEADSET);
+ break;
+ default:
+ switch_set_state(&wired_switch_dev, BIT_NO_HEADSET);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block headset_switch_nb = {
+ .notifier_call = headset_switch_notify,
+};
+#else
+static struct snd_soc_jack_pin tegra_wm8753_hp_jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+#endif
+
+static int tegra_wm8753_event_int_spk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_wm8753_platform_data *pdata = machine->pdata;
+
+ if (!(machine->gpio_requested & GPIO_SPKR_EN))
+ return 0;
+
+ gpio_set_value_cansleep(pdata->gpio_spkr_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static int tegra_wm8753_event_hp(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_wm8753_platform_data *pdata = machine->pdata;
+
+ if (!(machine->gpio_requested & GPIO_HP_MUTE))
+ return 0;
+
+ gpio_set_value_cansleep(pdata->gpio_hp_mute,
+ !SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget tegra_wm8753_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Int Spk", tegra_wm8753_event_int_spk),
+ SND_SOC_DAPM_HP("Earpiece", NULL),
+ SND_SOC_DAPM_OUTPUT("Mono Out"),
+ SND_SOC_DAPM_HP("Headphone Jack", tegra_wm8753_event_hp),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_INPUT("Int Mic"),
+ SND_SOC_DAPM_LINE("LineIn Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route whistler_audio_map[] = {
+ {"Int Spk", NULL, "ROUT2"},
+ {"Int Spk", NULL, "LOUT2"},
+ {"Earpiece", NULL, "OUT3"},
+ {"Earpiece", NULL, "LOUT1"},
+ {"Mono Out", NULL, "MONO1"},
+ {"Mono Out", NULL, "MONO2"},
+ {"Headphone Jack", NULL, "ROUT1"},
+ {"Headphone Jack", NULL, "LOUT1"},
+ {"Headphone Jack", NULL, "OUT4"},
+ {"Mic Bias", NULL, "Mic Jack"},
+ {"MIC1", NULL, "Mic Bias"},
+ {"Mic Bias", NULL, "Int Mic"},
+ {"MIC2", NULL, "Mic Bias"},
+ {"MIC2N", NULL, "Mic Bias"},
+ {"LINE1", NULL, "LineIn Jack"},
+ {"LINE2", NULL, "LineIn Jack"},
+};
+
+static const struct snd_kcontrol_new tegra_wm8753_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Int Spk"),
+ SOC_DAPM_PIN_SWITCH("Earpiece"),
+ SOC_DAPM_PIN_SWITCH("Mono Out"),
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("LineIn Jack"),
+};
+
+static int tegra_wm8753_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_wm8753_platform_data *pdata = machine->pdata;
+ int ret;
+
+ if (machine_is_whistler()) {
+ machine->audio_reg = regulator_get(NULL, "avddio_audio");
+ if (IS_ERR(machine->audio_reg)) {
+ dev_err(card->dev, "cannot get avddio_audio reg\n");
+ ret = PTR_ERR(machine->audio_reg);
+ return ret;
+ }
+
+ ret = regulator_enable(machine->audio_reg);
+ if (ret) {
+ dev_err(card->dev, "cannot enable avddio_audio reg\n");
+ regulator_put(machine->audio_reg);
+ machine->audio_reg = NULL;
+ return ret;
+ }
+ }
+
+ if (gpio_is_valid(pdata->gpio_spkr_en)) {
+ ret = gpio_request(pdata->gpio_spkr_en, "spkr_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get spkr_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_SPKR_EN;
+
+ gpio_direction_output(pdata->gpio_spkr_en, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_hp_mute)) {
+ ret = gpio_request(pdata->gpio_hp_mute, "hp_mute");
+ if (ret) {
+ dev_err(card->dev, "cannot get hp_mute gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_HP_MUTE;
+
+ gpio_direction_output(pdata->gpio_hp_mute, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_int_mic_en)) {
+ ret = gpio_request(pdata->gpio_int_mic_en, "int_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get int_mic_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_INT_MIC_EN;
+
+ /* Disable int mic; enable signal is active-high */
+ gpio_direction_output(pdata->gpio_int_mic_en, 0);
+ }
+
+ if (gpio_is_valid(pdata->gpio_ext_mic_en)) {
+ ret = gpio_request(pdata->gpio_ext_mic_en, "ext_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get ext_mic_en gpio\n");
+ return ret;
+ }
+ machine->gpio_requested |= GPIO_EXT_MIC_EN;
+
+ /* Enable ext mic; enable signal is active-low */
+ gpio_direction_output(pdata->gpio_ext_mic_en, 0);
+ }
+
+ ret = snd_soc_add_controls(codec, tegra_wm8753_controls,
+ ARRAY_SIZE(tegra_wm8753_controls));
+ if (ret < 0)
+ return ret;
+
+ snd_soc_dapm_new_controls(dapm, tegra_wm8753_dapm_widgets,
+ ARRAY_SIZE(tegra_wm8753_dapm_widgets));
+
+ snd_soc_dapm_add_routes(dapm, whistler_audio_map,
+ ARRAY_SIZE(whistler_audio_map));
+
+ snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
+ &tegra_wm8753_hp_jack);
+ wm8753_headphone_detect(codec, &tegra_wm8753_hp_jack,
+ SND_JACK_HEADPHONE, pdata->debounce_time_hp);
+#ifdef CONFIG_SWITCH
+ snd_soc_jack_notifier_register(&tegra_wm8753_hp_jack,
+ &headset_switch_nb);
+#else
+ snd_soc_jack_add_pins(&tegra_wm8753_hp_jack,
+ ARRAY_SIZE(tegra_wm8753_hp_jack_pins),
+ tegra_wm8753_hp_jack_pins);
+#endif
+
+ snd_soc_dapm_nc_pin(dapm, "ACIN");
+ snd_soc_dapm_nc_pin(dapm, "ACOP");
+ snd_soc_dapm_nc_pin(dapm, "OUT3");
+ snd_soc_dapm_nc_pin(dapm, "OUT4");
+
+ snd_soc_dapm_sync(dapm);
+
+ return 0;
+}
+
+static struct snd_soc_dai_link tegra_wm8753_dai[] = {
+ {
+ .name = "WM8753",
+ .stream_name = "WM8753 PCM HIFI",
+ .codec_name = "wm8753-codec.4-001a",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "tegra20-i2s.0",
+ .codec_dai_name = "wm8753-hifi",
+ .init = tegra_wm8753_init,
+ .ops = &tegra_wm8753_ops,
+ },
+ {
+ .name = "SPDIF",
+ .stream_name = "SPDIF PCM",
+ .codec_name = "spdif-dit.0",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "tegra20-spdif",
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_spdif_ops,
+ },
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ {
+ .name = "BT-SCO",
+ .stream_name = "BT SCO PCM",
+ .codec_name = "spdif-dit.1",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "tegra20-i2s.1",
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_bt_sco_ops,
+ },
+#endif
+};
+
+static struct snd_soc_card snd_soc_tegra_wm8753 = {
+ .name = "tegra-wm8753",
+ .dai_link = tegra_wm8753_dai,
+ .num_links = ARRAY_SIZE(tegra_wm8753_dai),
+};
+
+static __devinit int tegra_wm8753_driver_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &snd_soc_tegra_wm8753;
+ struct tegra_wm8753 *machine;
+ struct tegra_wm8753_platform_data *pdata;
+ int ret;
+
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ machine = kzalloc(sizeof(struct tegra_wm8753), GFP_KERNEL);
+ if (!machine) {
+ dev_err(&pdev->dev, "Can't allocate tegra_wm8753 struct\n");
+ return -ENOMEM;
+ }
+
+ machine->pdata = pdata;
+
+ ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
+ if (ret)
+ goto err_free_machine;
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
+ goto err_fini_utils;
+ }
+
+ if (!card->instantiated) {
+ dev_err(&pdev->dev, "No WM8753 codec\n");
+ goto err_unregister_card;
+ }
+
+#ifdef CONFIG_SWITCH
+ /* Add h2w swith class support */
+ ret = switch_dev_register(&wired_switch_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "not able to register switch device\n");
+ goto err_unregister_card;
+ }
+#endif
+
+ return 0;
+
+err_unregister_card:
+ snd_soc_unregister_card(card);
+err_fini_utils:
+ tegra_asoc_utils_fini(&machine->util_data);
+err_free_machine:
+ kfree(machine);
+ return ret;
+}
+
+static int __devexit tegra_wm8753_driver_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_wm8753_platform_data *pdata = machine->pdata;
+
+ snd_soc_unregister_card(card);
+
+#ifdef CONFIG_SWITCH
+ switch_dev_unregister(&wired_switch_dev);
+#endif
+
+ tegra_asoc_utils_fini(&machine->util_data);
+
+ if (machine->gpio_requested & GPIO_EXT_MIC_EN)
+ gpio_free(pdata->gpio_ext_mic_en);
+ if (machine->gpio_requested & GPIO_INT_MIC_EN)
+ gpio_free(pdata->gpio_int_mic_en);
+ if (machine->gpio_requested & GPIO_HP_MUTE)
+ gpio_free(pdata->gpio_hp_mute);
+ if (machine->gpio_requested & GPIO_SPKR_EN)
+ gpio_free(pdata->gpio_spkr_en);
+ if (machine->audio_reg) {
+ regulator_disable(machine->audio_reg);
+ regulator_put(machine->audio_reg);
+ }
+
+ kfree(machine);
+
+ return 0;
+}
+
+static struct platform_driver tegra_wm8753_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = tegra_wm8753_driver_probe,
+ .remove = __devexit_p(tegra_wm8753_driver_remove),
+};
+
+static int __init tegra_wm8753_modinit(void)
+{
+ return platform_driver_register(&tegra_wm8753_driver);
+}
+module_init(tegra_wm8753_modinit);
+
+static void __exit tegra_wm8753_modexit(void)
+{
+ platform_driver_unregister(&tegra_wm8753_driver);
+}
+module_exit(tegra_wm8753_modexit);
+
+MODULE_AUTHOR("Sumit Bhattacharya <sumitb@nvidia.com>");
+MODULE_DESCRIPTION("Tegra+WM8753 machine ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index be27f1d229af..67cb126876f7 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -34,6 +34,10 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
#include <mach/tegra_wm8903_pdata.h>
@@ -45,11 +49,13 @@
#include "../codecs/wm8903.h"
-#include "tegra_das.h"
-#include "tegra_i2s.h"
#include "tegra_pcm.h"
#include "tegra_asoc_utils.h"
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#include "tegra20_das.h"
+#endif
+
#define DRV_NAME "tegra-snd-wm8903"
#define GPIO_SPKR_EN BIT(0)
@@ -61,7 +67,12 @@
struct tegra_wm8903 {
struct tegra_asoc_utils_data util_data;
struct tegra_wm8903_platform_data *pdata;
+ struct regulator *spk_reg;
+ struct regulator *dmic_reg;
int gpio_requested;
+#ifdef CONFIG_SWITCH
+ int jack_status;
+#endif
};
static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
@@ -73,7 +84,7 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_card *card = codec->card;
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
- int srate, mclk;
+ int srate, mclk, i2s_daifmt;
int err;
srate = params_rate(params);
@@ -93,23 +104,34 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
+ if (!(machine->util_data.set_mclk % mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
}
- err = snd_soc_dai_set_fmt(codec_dai,
- SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ i2s_daifmt = SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS;
+
+ /* Use DSP mode for mono on Tegra20 */
+ if ((params_channels(params) != 2) &&
+ (machine_is_ventana() || machine_is_harmony() ||
+ machine_is_kaen() || machine_is_aebl()))
+ i2s_daifmt |= SND_SOC_DAIFMT_DSP_A;
+ else
+ i2s_daifmt |= SND_SOC_DAIFMT_I2S;
+
+ err = snd_soc_dai_set_fmt(codec_dai, i2s_daifmt);
if (err < 0) {
dev_err(card->dev, "codec_dai fmt not set\n");
return err;
}
- err = snd_soc_dai_set_fmt(cpu_dai,
- SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
+ err = snd_soc_dai_set_fmt(cpu_dai, i2s_daifmt);
if (err < 0) {
dev_err(card->dev, "cpu_dai fmt not set\n");
return err;
@@ -122,22 +144,167 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
return err;
}
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ err = tegra20_das_connect_dac_to_dap(TEGRA20_DAS_DAP_SEL_DAC1,
+ TEGRA20_DAS_DAP_ID_1);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dap-dac path\n");
+ return err;
+ }
+
+ err = tegra20_das_connect_dap_to_dac(TEGRA20_DAS_DAP_ID_1,
+ TEGRA20_DAS_DAP_SEL_DAC1);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dac-dap path\n");
+ return err;
+ }
+#endif
+ return 0;
+}
+
+static int tegra_bt_sco_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, min_mclk;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ min_mclk = 64 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ err = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (err < 0) {
+ dev_err(card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ err = tegra20_das_connect_dac_to_dap(TEGRA20_DAS_DAP_SEL_DAC2,
+ TEGRA20_DAS_DAP_ID_4);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dac-dap path\n");
+ return err;
+ }
+
+ err = tegra20_das_connect_dap_to_dac(TEGRA20_DAS_DAP_ID_4,
+ TEGRA20_DAS_DAP_SEL_DAC2);
+ if (err < 0) {
+ dev_err(card->dev, "failed to set dac-dap path\n");
+ return err;
+ }
+#endif
+ return 0;
+}
+
+static int tegra_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, min_mclk;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ min_mclk = 128 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ return 0;
+}
+
+static int tegra_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(rtd->card);
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 0);
+
return 0;
}
static struct snd_soc_ops tegra_wm8903_ops = {
.hw_params = tegra_wm8903_hw_params,
+ .hw_free = tegra_hw_free,
};
-static struct snd_soc_jack tegra_wm8903_hp_jack;
+static struct snd_soc_ops tegra_wm8903_bt_sco_ops = {
+ .hw_params = tegra_bt_sco_hw_params,
+ .hw_free = tegra_hw_free,
+};
-static struct snd_soc_jack_pin tegra_wm8903_hp_jack_pins[] = {
- {
- .pin = "Headphone Jack",
- .mask = SND_JACK_HEADPHONE,
- },
+static struct snd_soc_ops tegra_spdif_ops = {
+ .hw_params = tegra_spdif_hw_params,
+ .hw_free = tegra_hw_free,
};
+static struct snd_soc_jack tegra_wm8903_hp_jack;
+static struct snd_soc_jack tegra_wm8903_mic_jack;
+
static struct snd_soc_jack_gpio tegra_wm8903_hp_jack_gpio = {
.name = "headphone detect",
.report = SND_JACK_HEADPHONE,
@@ -145,7 +312,63 @@ static struct snd_soc_jack_gpio tegra_wm8903_hp_jack_gpio = {
.invert = 1,
};
-static struct snd_soc_jack tegra_wm8903_mic_jack;
+#ifdef CONFIG_SWITCH
+/* These values are copied from Android WiredAccessoryObserver */
+enum headset_state {
+ BIT_NO_HEADSET = 0,
+ BIT_HEADSET = (1 << 0),
+ BIT_HEADSET_NO_MIC = (1 << 1),
+};
+
+static struct switch_dev tegra_wm8903_headset_switch = {
+ .name = "h2w",
+};
+
+static int tegra_wm8903_jack_notifier(struct notifier_block *self,
+ unsigned long action, void *dev)
+{
+ struct snd_soc_jack *jack = dev;
+ struct snd_soc_codec *codec = jack->codec;
+ struct snd_soc_card *card = codec->card;
+ struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
+ enum headset_state state = BIT_NO_HEADSET;
+
+ if (jack == &tegra_wm8903_hp_jack) {
+ machine->jack_status &= ~SND_JACK_HEADPHONE;
+ machine->jack_status |= (action & SND_JACK_HEADPHONE);
+ } else {
+ machine->jack_status &= ~SND_JACK_MICROPHONE;
+ machine->jack_status |= (action & SND_JACK_MICROPHONE);
+ }
+
+ switch (machine->jack_status) {
+ case SND_JACK_HEADPHONE:
+ state = BIT_HEADSET_NO_MIC;
+ break;
+ case SND_JACK_HEADSET:
+ state = BIT_HEADSET;
+ break;
+ case SND_JACK_MICROPHONE:
+ /* mic: would not report */
+ default:
+ state = BIT_NO_HEADSET;
+ }
+
+ switch_set_state(&tegra_wm8903_headset_switch, state);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tegra_wm8903_jack_detect_nb = {
+ .notifier_call = tegra_wm8903_jack_notifier,
+};
+#else
+static struct snd_soc_jack_pin tegra_wm8903_hp_jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
static struct snd_soc_jack_pin tegra_wm8903_mic_jack_pins[] = {
{
@@ -153,6 +376,7 @@ static struct snd_soc_jack_pin tegra_wm8903_mic_jack_pins[] = {
.mask = SND_JACK_MICROPHONE,
},
};
+#endif
static int tegra_wm8903_event_int_spk(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
@@ -162,6 +386,13 @@ static int tegra_wm8903_event_int_spk(struct snd_soc_dapm_widget *w,
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
struct tegra_wm8903_platform_data *pdata = machine->pdata;
+ if (machine->spk_reg) {
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ regulator_enable(machine->spk_reg);
+ else
+ regulator_disable(machine->spk_reg);
+ }
+
if (!(machine->gpio_requested & GPIO_SPKR_EN))
return 0;
@@ -188,7 +419,57 @@ static int tegra_wm8903_event_hp(struct snd_soc_dapm_widget *w,
return 0;
}
-static const struct snd_soc_dapm_widget tegra_wm8903_dapm_widgets[] = {
+static int tegra_wm8903_event_int_mic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_wm8903_platform_data *pdata = machine->pdata;
+
+ if (machine->dmic_reg) {
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ regulator_enable(machine->dmic_reg);
+ else
+ regulator_disable(machine->dmic_reg);
+ }
+
+ if (!(machine->gpio_requested & GPIO_INT_MIC_EN))
+ return 0;
+
+ gpio_set_value_cansleep(pdata->gpio_int_mic_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static int tegra_wm8903_event_ext_mic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_wm8903_platform_data *pdata = machine->pdata;
+
+ if (!(machine->gpio_requested & GPIO_EXT_MIC_EN))
+ return 0;
+
+ gpio_set_value_cansleep(pdata->gpio_ext_mic_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget cardhu_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Int Spk", tegra_wm8903_event_int_spk),
+ SND_SOC_DAPM_HP("Headphone Jack", tegra_wm8903_event_hp),
+ SND_SOC_DAPM_LINE("Line Out", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", tegra_wm8903_event_ext_mic),
+ SND_SOC_DAPM_MIC("Int Mic", tegra_wm8903_event_int_mic),
+ SND_SOC_DAPM_LINE("Line In", NULL),
+};
+
+static const struct snd_soc_dapm_widget tegra_wm8903_default_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Int Spk", tegra_wm8903_event_int_spk),
SND_SOC_DAPM_HP("Headphone Jack", tegra_wm8903_event_hp),
SND_SOC_DAPM_MIC("Mic Jack", NULL),
@@ -205,6 +486,24 @@ static const struct snd_soc_dapm_route harmony_audio_map[] = {
{"IN1L", NULL, "Mic Bias"},
};
+static const struct snd_soc_dapm_route cardhu_audio_map[] = {
+ {"Headphone Jack", NULL, "HPOUTR"},
+ {"Headphone Jack", NULL, "HPOUTL"},
+ {"Int Spk", NULL, "ROP"},
+ {"Int Spk", NULL, "RON"},
+ {"Int Spk", NULL, "LOP"},
+ {"Int Spk", NULL, "LON"},
+ {"Line Out", NULL, "LINEOUTL"},
+ {"Line Out", NULL, "LINEOUTR"},
+ {"Mic Bias", NULL, "Mic Jack"},
+ {"IN1L", NULL, "Mic Bias"},
+ {"Mic Bias", NULL, "Int Mic"},
+ {"IN1L", NULL, "Mic Bias"},
+ {"IN1R", NULL, "Mic Bias"},
+ {"IN3L", NULL, "Line In"},
+ {"IN3R", NULL, "Line In"},
+};
+
static const struct snd_soc_dapm_route seaboard_audio_map[] = {
{"Headphone Jack", NULL, "HPOUTR"},
{"Headphone Jack", NULL, "HPOUTL"},
@@ -236,7 +535,16 @@ static const struct snd_soc_dapm_route aebl_audio_map[] = {
{"IN1R", NULL, "Mic Bias"},
};
-static const struct snd_kcontrol_new tegra_wm8903_controls[] = {
+static const struct snd_kcontrol_new cardhu_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Int Spk"),
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("LineOut"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("LineIn"),
+};
+
+static const struct snd_kcontrol_new tegra_wm8903_default_controls[] = {
SOC_DAPM_PIN_SWITCH("Int Spk"),
};
@@ -299,9 +607,14 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
tegra_wm8903_hp_jack_gpio.gpio = pdata->gpio_hp_det;
snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
&tegra_wm8903_hp_jack);
+#ifndef CONFIG_SWITCH
snd_soc_jack_add_pins(&tegra_wm8903_hp_jack,
ARRAY_SIZE(tegra_wm8903_hp_jack_pins),
tegra_wm8903_hp_jack_pins);
+#else
+ snd_soc_jack_notifier_register(&tegra_wm8903_hp_jack,
+ &tegra_wm8903_jack_detect_nb);
+#endif
snd_soc_jack_add_gpios(&tegra_wm8903_hp_jack,
1,
&tegra_wm8903_hp_jack_gpio);
@@ -310,18 +623,25 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
&tegra_wm8903_mic_jack);
+#ifndef CONFIG_SWITCH
snd_soc_jack_add_pins(&tegra_wm8903_mic_jack,
ARRAY_SIZE(tegra_wm8903_mic_jack_pins),
tegra_wm8903_mic_jack_pins);
+#else
+ snd_soc_jack_notifier_register(&tegra_wm8903_mic_jack,
+ &tegra_wm8903_jack_detect_nb);
+#endif
wm8903_mic_detect(codec, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE,
- 0);
+ machine_is_cardhu() ? SND_JACK_MICROPHONE : 0);
snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
/* FIXME: Calculate automatically based on DAPM routes? */
- if (!machine_is_harmony())
+ if (!machine_is_harmony() && !machine_is_ventana() &&
+ !machine_is_cardhu())
snd_soc_dapm_nc_pin(dapm, "IN1L");
- if (!machine_is_seaboard() && !machine_is_aebl())
+ if (!machine_is_seaboard() && !machine_is_aebl() &&
+ !machine_is_cardhu())
snd_soc_dapm_nc_pin(dapm, "IN1R");
snd_soc_dapm_nc_pin(dapm, "IN2L");
if (!machine_is_kaen())
@@ -344,26 +664,41 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static struct snd_soc_dai_link tegra_wm8903_dai = {
- .name = "WM8903",
- .stream_name = "WM8903 PCM",
- .codec_name = "wm8903.0-001a",
- .platform_name = "tegra-pcm-audio",
- .cpu_dai_name = "tegra-i2s.0",
- .codec_dai_name = "wm8903-hifi",
- .init = tegra_wm8903_init,
- .ops = &tegra_wm8903_ops,
+static struct snd_soc_dai_link tegra_wm8903_dai[] = {
+ {
+ .name = "WM8903",
+ .stream_name = "WM8903 PCM",
+ .codec_name = "wm8903.0-001a",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "tegra20-i2s.0",
+ .codec_dai_name = "wm8903-hifi",
+ .init = tegra_wm8903_init,
+ .ops = &tegra_wm8903_ops,
+ },
+ {
+ .name = "SPDIF",
+ .stream_name = "SPDIF PCM",
+ .codec_name = "spdif-dit.0",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "tegra20-spdif",
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_spdif_ops,
+ },
+ {
+ .name = "BT-SCO",
+ .stream_name = "BT SCO PCM",
+ .codec_name = "spdif-dit.1",
+ .platform_name = "tegra-pcm-audio",
+ .cpu_dai_name = "tegra20-i2s.1",
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_wm8903_bt_sco_ops,
+ },
};
static struct snd_soc_card snd_soc_tegra_wm8903 = {
.name = "tegra-wm8903",
- .dai_link = &tegra_wm8903_dai,
- .num_links = 1,
-
- .controls = tegra_wm8903_controls,
- .num_controls = ARRAY_SIZE(tegra_wm8903_controls),
- .dapm_widgets = tegra_wm8903_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_wm8903_dapm_widgets),
+ .dai_link = tegra_wm8903_dai,
+ .num_links = ARRAY_SIZE(tegra_wm8903_dai),
};
static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
@@ -391,13 +726,58 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
if (ret)
goto err_free_machine;
+ machine->spk_reg = regulator_get(&pdev->dev, "vdd_spk_amp");
+ if (IS_ERR(machine->spk_reg)) {
+ dev_info(&pdev->dev, "No speaker regulator found\n");
+ machine->spk_reg = 0;
+ }
+
+ machine->dmic_reg = regulator_get(&pdev->dev, "vdd_dmic");
+ if (IS_ERR(machine->dmic_reg)) {
+ dev_info(&pdev->dev, "No digital mic regulator found\n");
+ machine->dmic_reg = 0;
+ }
+
+ if (machine_is_cardhu()) {
+ tegra_wm8903_dai[0].codec_name = "wm8903.4-001a",
+ tegra_wm8903_dai[0].cpu_dai_name = "tegra30-i2s.1";
+
+ tegra_wm8903_dai[1].cpu_dai_name = "tegra30-spdif";
+
+ tegra_wm8903_dai[2].cpu_dai_name = "tegra30-i2s.3";
+ }
+
+#ifdef CONFIG_SWITCH
+ /* Addd h2w swith class support */
+ ret = switch_dev_register(&tegra_wm8903_headset_switch);
+ if (ret < 0)
+ goto err_fini_utils;
+#endif
+
card->dev = &pdev->dev;
platform_set_drvdata(pdev, card);
snd_soc_card_set_drvdata(card, machine);
+ if (machine_is_cardhu() || machine_is_ventana()) {
+ card->controls = cardhu_controls;
+ card->num_controls = ARRAY_SIZE(cardhu_controls);
+
+ card->dapm_widgets = cardhu_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(cardhu_dapm_widgets);
+ } else {
+ card->controls = tegra_wm8903_default_controls;
+ card->num_controls = ARRAY_SIZE(tegra_wm8903_default_controls);
+
+ card->dapm_widgets = tegra_wm8903_default_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(tegra_wm8903_default_dapm_widgets);
+ }
+
if (machine_is_harmony()) {
card->dapm_routes = harmony_audio_map;
card->num_dapm_routes = ARRAY_SIZE(harmony_audio_map);
+ } else if (machine_is_ventana() || machine_is_cardhu()) {
+ card->dapm_routes = cardhu_audio_map;
+ card->num_dapm_routes = ARRAY_SIZE(cardhu_audio_map);
} else if (machine_is_seaboard()) {
card->dapm_routes = seaboard_audio_map;
card->num_dapm_routes = ARRAY_SIZE(seaboard_audio_map);
@@ -413,11 +793,15 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ goto err_unregister_switch;
}
return 0;
+err_unregister_switch:
+#ifdef CONFIG_SWITCH
+ switch_dev_unregister(&tegra_wm8903_headset_switch);
+#endif
err_fini_utils:
tegra_asoc_utils_fini(&machine->util_data);
err_free_machine:
@@ -445,10 +829,18 @@ static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev)
gpio_free(pdata->gpio_spkr_en);
machine->gpio_requested = 0;
+ if (machine->spk_reg)
+ regulator_put(machine->spk_reg);
+ if (machine->dmic_reg)
+ regulator_put(machine->dmic_reg);
+
snd_soc_unregister_card(card);
tegra_asoc_utils_fini(&machine->util_data);
+#ifdef CONFIG_SWITCH
+ switch_dev_unregister(&tegra_wm8903_headset_switch);
+#endif
kfree(machine);
return 0;